diff --git a/.mailmap b/.mailmap index 5273cfd70ad6..c7b10caecc4e 100644 --- a/.mailmap +++ b/.mailmap @@ -68,6 +68,8 @@ Jacob Shin James Bottomley James Bottomley James E Wilson +James Hogan +James Hogan James Ketrenos Javi Merino diff --git a/CREDITS b/CREDITS index 5d09c26d69cd..9fbd2c77b546 100644 --- a/CREDITS +++ b/CREDITS @@ -2090,7 +2090,7 @@ S: Kuala Lumpur, Malaysia N: Mohit Kumar D: ST Microelectronics SPEAr13xx PCI host bridge driver -D: Synopsys Designware PCI host bridge driver +D: Synopsys DesignWare PCI host bridge driver N: Gabor Kuti E: seasons@falcon.sch.bme.hu @@ -2606,11 +2606,9 @@ E: tmolina@cablespeed.com D: bug fixes, documentation, minor hackery N: Paul Moore -E: paul.moore@hp.com -D: NetLabel author -S: Hewlett-Packard -S: 110 Spit Brook Road -S: Nashua, NH 03062 +E: paul@paul-moore.com +W: http://www.paul-moore.com +D: NetLabel, SELinux, audit N: James Morris E: jmorris@namei.org diff --git a/Documentation/ABI/stable/sysfs-driver-dma-ioatdma b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma new file mode 100644 index 000000000000..420c1d09e42f --- /dev/null +++ b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma @@ -0,0 +1,30 @@ +What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dmachan/quickdata/cap +Date: December 3, 2009 +KernelVersion: 2.6.32 +Contact: dmaengine@vger.kernel.org +Description: Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL, + DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT. + +What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dmachan/quickdata/ring_active +Date: December 3, 2009 +KernelVersion: 2.6.32 +Contact: dmaengine@vger.kernel.org +Description: The number of descriptors active in the ring. + +What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dmachan/quickdata/ring_size +Date: December 3, 2009 +KernelVersion: 2.6.32 +Contact: dmaengine@vger.kernel.org +Description: Descriptor ring size, total number of descriptors available. + +What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dmachan/quickdata/version +Date: December 3, 2009 +KernelVersion: 2.6.32 +Contact: dmaengine@vger.kernel.org +Description: Version of ioatdma device. + +What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dmachan/quickdata/intr_coalesce +Date: August 8, 2017 +KernelVersion: 4.14 +Contact: dmaengine@vger.kernel.org +Description: Tune-able interrupt delay value per channel basis. diff --git a/Documentation/ABI/testing/ppc-memtrace b/Documentation/ABI/testing/ppc-memtrace new file mode 100644 index 000000000000..2e8b93741270 --- /dev/null +++ b/Documentation/ABI/testing/ppc-memtrace @@ -0,0 +1,45 @@ +What: /sys/kernel/debug/powerpc/memtrace +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: This folder contains the relevant debugfs files for the + hardware trace macro to use. CONFIG_PPC64_HARDWARE_TRACING + must be set. + +What: /sys/kernel/debug/powerpc/memtrace/enable +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: Write an integer containing the size in bytes of the memory + you want removed from each NUMA node to this file - it must be + aligned to the memblock size. This amount of RAM will be removed + from the kernel mappings and the following debugfs files will be + created. This can only be successfully done once per boot. Once + memory is successfully removed from each node, the following + files are created. + +What: /sys/kernel/debug/powerpc/memtrace/ +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: This directory contains information about the removed memory + from the specific NUMA node. + +What: /sys/kernel/debug/powerpc/memtrace//size +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: This contains the size of the memory removed from the node. + +What: /sys/kernel/debug/powerpc/memtrace//start +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: This contains the start address of the removed memory. + +What: /sys/kernel/debug/powerpc/memtrace//trace +Date: Aug 2017 +KernelVersion: 4.14 +Contact: linuxppc-dev@lists.ozlabs.org +Description: This is where the hardware trace macro will output the trace + it generates. diff --git a/Documentation/ABI/testing/procfs-smaps_rollup b/Documentation/ABI/testing/procfs-smaps_rollup new file mode 100644 index 000000000000..0a54ed0d63c9 --- /dev/null +++ b/Documentation/ABI/testing/procfs-smaps_rollup @@ -0,0 +1,31 @@ +What: /proc/pid/smaps_rollup +Date: August 2017 +Contact: Daniel Colascione +Description: + This file provides pre-summed memory information for a + process. The format is identical to /proc/pid/smaps, + except instead of an entry for each VMA in a process, + smaps_rollup has a single entry (tagged "[rollup]") + for which each field is the sum of the corresponding + fields from all the maps in /proc/pid/smaps. + For more details, see the procfs man page. + + Typical output looks like this: + + 00100000-ff709000 ---p 00000000 00:00 0 [rollup] + Rss: 884 kB + Pss: 385 kB + Shared_Clean: 696 kB + Shared_Dirty: 0 kB + Private_Clean: 120 kB + Private_Dirty: 68 kB + Referenced: 884 kB + Anonymous: 68 kB + LazyFree: 0 kB + AnonHugePages: 0 kB + ShmemPmdMapped: 0 kB + Shared_Hugetlb: 0 kB + Private_Hugetlb: 0 kB + Swap: 0 kB + SwapPss: 0 kB + Locked: 385 kB diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 451b6d882b2c..c1513c756af1 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -90,3 +90,11 @@ Description: device's debugging info useful for kernel developers. Its format is not documented intentionally and may change anytime without any notice. + +What: /sys/block/zram/backing_dev +Date: June 2017 +Contact: Minchan Kim +Description: + The backing_dev file is read-write and set up backing + device for zram to write incompressible pages. + For using, user should enable CONFIG_ZRAM_WRITEBACK. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 new file mode 100644 index 000000000000..ad2cc63e4bf8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 @@ -0,0 +1,57 @@ +What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset +KernelVersion: 4.13 +Contact: fabrice.gasnier@st.com +Description: + Reading returns the current preset value. Writing sets the + preset value. Encoder counts continuously from 0 to preset + value, depending on direction (up/down). + +What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available +KernelVersion: 4.13 +Contact: fabrice.gasnier@st.com +Description: + Reading returns the list possible quadrature modes. + +What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode +KernelVersion: 4.13 +Contact: fabrice.gasnier@st.com +Description: + Configure the device counter quadrature modes: + - non-quadrature: + Encoder IN1 input servers as the count input (up + direction). + - quadrature: + Encoder IN1 and IN2 inputs are mixed to get direction + and count. + +What: /sys/bus/iio/devices/iio:deviceX/in_count_polarity_available +KernelVersion: 4.13 +Contact: fabrice.gasnier@st.com +Description: + Reading returns the list possible active edges. + +What: /sys/bus/iio/devices/iio:deviceX/in_count0_polarity +KernelVersion: 4.13 +Contact: fabrice.gasnier@st.com +Description: + Configure the device encoder/counter active edge: + - rising-edge + - falling-edge + - both-edges + + In non-quadrature mode, device counts up on active edge. + In quadrature mode, encoder counting scenarios are as follows: + ---------------------------------------------------------------- + | Active | Level on | IN1 signal | IN2 signal | + | edge | opposite |------------------------------------------ + | | signal | Rising | Falling | Rising | Falling | + ---------------------------------------------------------------- + | Rising | High -> | Down | - | Up | - | + | edge | Low -> | Up | - | Down | - | + ---------------------------------------------------------------- + | Falling | High -> | - | Up | - | Down | + | edge | Low -> | - | Down | - | Up | + ---------------------------------------------------------------- + | Both | High -> | Down | Up | Up | Down | + | edges | Low -> | Up | Down | Down | Up | + ---------------------------------------------------------------- diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-powercap b/Documentation/ABI/testing/sysfs-firmware-opal-powercap new file mode 100644 index 000000000000..c9b66ec4f165 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-opal-powercap @@ -0,0 +1,31 @@ +What: /sys/firmware/opal/powercap +Date: August 2017 +Contact: Linux for PowerPC mailing list +Description: Powercap directory for Powernv (P8, P9) servers + + Each folder in this directory contains a + power-cappable component. + +What: /sys/firmware/opal/powercap/system-powercap + /sys/firmware/opal/powercap/system-powercap/powercap-min + /sys/firmware/opal/powercap/system-powercap/powercap-max + /sys/firmware/opal/powercap/system-powercap/powercap-current +Date: August 2017 +Contact: Linux for PowerPC mailing list +Description: System powercap directory and attributes applicable for + Powernv (P8, P9) servers + + This directory provides powercap information. It + contains below sysfs attributes: + + - powercap-min : This file provides the minimum + possible powercap in Watt units + + - powercap-max : This file provides the maximum + possible powercap in Watt units + + - powercap-current : This file provides the current + powercap set on the system. Writing to this file + creates a request for setting a new-powercap. The + powercap requested must be between powercap-min + and powercap-max. diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-psr b/Documentation/ABI/testing/sysfs-firmware-opal-psr new file mode 100644 index 000000000000..cc2ece70e365 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-opal-psr @@ -0,0 +1,18 @@ +What: /sys/firmware/opal/psr +Date: August 2017 +Contact: Linux for PowerPC mailing list +Description: Power-Shift-Ratio directory for Powernv P9 servers + + Power-Shift-Ratio allows to provide hints the firmware + to shift/throttle power between different entities in + the system. Each attribute in this directory indicates + a settable PSR. + +What: /sys/firmware/opal/psr/cpu_to_gpu_X +Date: August 2017 +Contact: Linux for PowerPC mailing list +Description: PSR sysfs attributes for Powernv P9 servers + + Power-Shift-Ratio between CPU and GPU for a given chip + with chip-id X. This file gives the ratio (0-100) + which is used by OCC for power-capping. diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 84c606fb3ca4..11b7f4ebea7c 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -57,6 +57,15 @@ Contact: "Jaegeuk Kim" Description: Controls the issue rate of small discard commands. +What: /sys/fs/f2fs//discard_granularity +Date: July 2017 +Contact: "Chao Yu" +Description: + Controls discard granularity of inner discard thread, inner thread + will not issue discards with size that is smaller than granularity. + The unit size is one block, now only support configuring in range + of [1, 512]. + What: /sys/fs/f2fs//max_victim_search Date: January 2014 Contact: "Jaegeuk Kim" @@ -130,3 +139,15 @@ Date: June 2017 Contact: "Chao Yu" Description: Controls current reserved blocks in system. + +What: /sys/fs/f2fs//gc_urgent +Date: August 2017 +Contact: "Jaegeuk Kim" +Description: + Do background GC agressively + +What: /sys/fs/f2fs//gc_urgent_sleep_time +Date: August 2017 +Contact: "Jaegeuk Kim" +Description: + Controls sleep time of GC urgent mode diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap new file mode 100644 index 000000000000..587db52084c7 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap @@ -0,0 +1,26 @@ +What: /sys/kernel/mm/swap/ +Date: August 2017 +Contact: Linux memory management mailing list +Description: Interface for swapping + +What: /sys/kernel/mm/swap/vma_ra_enabled +Date: August 2017 +Contact: Linux memory management mailing list +Description: Enable/disable VMA based swap readahead. + + If set to true, the VMA based swap readahead algorithm + will be used for swappable anonymous pages mapped in a + VMA, and the global swap readahead algorithm will be + still used for tmpfs etc. other users. If set to + false, the global swap readahead algorithm will be + used for all swappable pages. + +What: /sys/kernel/mm/swap/vma_ra_max_order +Date: August 2017 +Contact: Linux memory management mailing list +Description: The max readahead size in order for VMA based swap readahead + + VMA based swap readahead algorithm will readahead at + most 1 << max_order pages for each readahead. The + real readahead size for each readahead will be scaled + according to the estimation algorithm. diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index f523e5a3ac33..a1d1612f3651 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power @@ -127,7 +127,7 @@ Description: What; /sys/power/pm_trace_dev_match Date: October 2010 -Contact: James Hogan +Contact: James Hogan Description: The /sys/power/pm_trace_dev_match file contains the name of the device associated with the last PM event point saved in the RTC @@ -273,3 +273,15 @@ Description: This output is useful for system wakeup diagnostics of spurious wakeup interrupts. + +What: /sys/power/pm_debug_messages +Date: July 2017 +Contact: Rafael J. Wysocki +Description: + The /sys/power/pm_debug_messages file controls the printing + of debug messages from the system suspend/hiberbation + infrastructure to the kernel log. + + Writing a "1" to this file enables the debug messages and + writing a "0" (default) to it disables them. Reads from + this file return the current value. diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 45b29326d719..ac66ae2509a9 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -515,14 +515,15 @@ API at all. :: void * - dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag, unsigned long attrs) -Identical to dma_alloc_coherent() except that the platform will -choose to return either consistent or non-consistent memory as it sees -fit. By using this API, you are guaranteeing to the platform that you -have all the correct and necessary sync points for this memory in the -driver should it choose to return non-consistent memory. +Identical to dma_alloc_coherent() except that when the +DMA_ATTR_NON_CONSISTENT flags is passed in the attrs argument, the +platform will choose to return either consistent or non-consistent memory +as it sees fit. By using this API, you are guaranteeing to the platform +that you have all the correct and necessary sync points for this memory +in the driver should it choose to return non-consistent memory. Note: where the platform can return consistent memory, it will guarantee that the sync points become nops. @@ -535,12 +536,13 @@ that simply cannot make consistent memory. :: void - dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs) -Free memory allocated by the nonconsistent API. All parameters must -be identical to those passed in (and returned by -dma_alloc_noncoherent()). +Free memory allocated by the dma_alloc_attrs(). All parameters common +parameters must identical to those otherwise passed to dma_fre_coherent, +and the attrs argument must be identical to the attrs passed to +dma_alloc_attrs(). :: @@ -564,8 +566,8 @@ memory or doing partial flushes. dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) -Do a partial sync of memory that was allocated by -dma_alloc_noncoherent(), starting at virtual address vaddr and +Do a partial sync of memory that was allocated by dma_alloc_attrs() with +the DMA_ATTR_NON_CONSISTENT flag starting at virtual address vaddr and continuing on for size. Again, you *must* observe the cache line boundaries when doing this. @@ -590,34 +592,11 @@ size is the size of the area (must be multiples of PAGE_SIZE). flags can be ORed together and are: -- DMA_MEMORY_MAP - request that the memory returned from - dma_alloc_coherent() be directly writable. - -- DMA_MEMORY_IO - request that the memory returned from - dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc. - -One or both of these flags must be present. - -- DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by - dma_alloc_coherent of any child devices of this one (for memory residing - on a bridge). - - DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions. Do not allow dma_alloc_coherent() to fall back to system memory when it's out of memory in the declared region. -The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and -must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO -if only DMA_MEMORY_MAP were passed in) for success or zero for -failure. - -Note, for DMA_MEMORY_IO returns, all subsequent memory returned by -dma_alloc_coherent() may no longer be accessed directly, but instead -must be accessed using the correct bus functions. If your driver -isn't prepared to handle this contingency, it should not specify -DMA_MEMORY_IO in the input flags. - -As a simplification for the platforms, only **one** such region of +As a simplification for the platforms, only *one* such region of memory may be declared per device. For reasons of efficiency, most platforms choose to track the declared diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst index a5947218fa64..e2d6b6e15082 100644 --- a/Documentation/admin-guide/LSM/tomoyo.rst +++ b/Documentation/admin-guide/LSM/tomoyo.rst @@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel. LiveCD-based tutorials are available at -http://tomoyo.sourceforge.jp/1.7/1st-step/ubuntu10.04-live/ -http://tomoyo.sourceforge.jp/1.7/1st-step/centos5-live/ +http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html +http://tomoyo.sourceforge.jp/1.8/centos6-live.html Though these tutorials use non-LSM version of TOMOYO, they are useful for you to know what TOMOYO is. @@ -21,35 +21,35 @@ How to enable TOMOYO? Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on kernel's command line. -Please see http://tomoyo.sourceforge.jp/2.3/ for details. +Please see http://tomoyo.osdn.jp/2.5/ for details. Where is documentation? ======================= User <-> Kernel interface documentation is available at -http://tomoyo.sourceforge.jp/2.3/policy-reference.html . +http://tomoyo.osdn.jp/2.5/policy-specification/index.html . Materials we prepared for seminars and symposiums are available at -http://sourceforge.jp/projects/tomoyo/docs/?category_id=532&language_id=1 . +http://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 . Below lists are chosen from three aspects. What is TOMOYO? TOMOYO Linux Overview - http://sourceforge.jp/projects/tomoyo/docs/lca2009-takeda.pdf + http://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf TOMOYO Linux: pragmatic and manageable security for Linux - http://sourceforge.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf + http://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box - http://sourceforge.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf + http://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf What can TOMOYO do? Deep inside TOMOYO Linux - http://sourceforge.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf + http://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf The role of "pathname based access control" in security. - http://sourceforge.jp/projects/tomoyo/docs/lfj2008-bof.pdf + http://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf History of TOMOYO? Realities of Mainlining - http://sourceforge.jp/projects/tomoyo/docs/lfj2008.pdf + http://osdn.jp/projects/tomoyo/docs/lfj2008.pdf What is future plan? ==================== @@ -60,6 +60,6 @@ multiple LSM modules at the same time. We feel sorry that you have to give up SELinux/SMACK/AppArmor etc. when you want to use TOMOYO. We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM -version of TOMOYO, available at http://tomoyo.sourceforge.jp/1.7/ . +version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ . LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning to port non-LSM version's functionalities to LSM versions. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 6996b7727b85..05496622b4ef 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2764,6 +2764,15 @@ If the dependencies are under your control, you can turn on cpu0_hotplug. + nps_mtm_hs_ctr= [KNL,ARC] + This parameter sets the maximum duration, in + cycles, each HW thread of the CTOP can run + without interruptions, before HW switches it. + The actual maximum duration is 16 times this + parameter's value. + Format: integer between 1 and 255 + Default: 255 + nptcg= [IA-64] Override max number of concurrent global TLB purges which is reported from either PAL_VM_SUMMARY or SAL PALO. @@ -2783,7 +2792,7 @@ Allowed values are enable and disable numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. - one of ['zone', 'node', 'default'] can be specified + 'node', 'default' can be specified This can be set from sysctl after boot. See Documentation/sysctl/vm.txt for details. diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst index 7af83a92d2d6..47153e64dfb5 100644 --- a/Documentation/admin-guide/pm/cpufreq.rst +++ b/Documentation/admin-guide/pm/cpufreq.rst @@ -479,14 +479,6 @@ This governor exposes the following tunables: # echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate - -``min_sampling_rate`` - The minimum value of ``sampling_rate``. - - Equal to 10000 (10 ms) if :c:macro:`CONFIG_NO_HZ_COMMON` and - :c:data:`tick_nohz_active` are both set or to 20 times the value of - :c:data:`jiffies` in microseconds otherwise. - ``up_threshold`` If the estimated CPU load is above this value (in percent), the governor will set the frequency to the maximum value allowed for the policy. diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst index 7f148f76f432..49237ac73442 100644 --- a/Documentation/admin-guide/pm/index.rst +++ b/Documentation/admin-guide/pm/index.rst @@ -5,12 +5,6 @@ Power Management .. toctree:: :maxdepth: 2 - cpufreq - intel_pstate - -.. only:: subproject and html - - Indices - ======= - - * :ref:`genindex` + strategies + system-wide + working-state diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 1d6249825efc..d2b6fda3d67b 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -167,35 +167,17 @@ is set. ``powersave`` ............. -Without HWP, this P-state selection algorithm generally depends on the -processor model and/or the system profile setting in the ACPI tables and there -are two variants of it. - -One of them is used with processors from the Atom line and (regardless of the -processor model) on platforms with the system profile in the ACPI tables set to -"mobile" (laptops mostly), "tablet", "appliance PC", "desktop", or -"workstation". It is also used with processors supporting the HWP feature if -that feature has not been enabled (that is, with the ``intel_pstate=no_hwp`` -argument in the kernel command line). It is similar to the algorithm +Without HWP, this P-state selection algorithm is similar to the algorithm implemented by the generic ``schedutil`` scaling governor except that the utilization metric used by it is based on numbers coming from feedback registers of the CPU. It generally selects P-states proportional to the -current CPU utilization, so it is referred to as the "proportional" algorithm. +current CPU utilization. -The second variant of the ``powersave`` P-state selection algorithm, used in all -of the other cases (generally, on processors from the Core line, so it is -referred to as the "Core" algorithm), is based on the values read from the APERF -and MPERF feedback registers and the previously requested target P-state. -It does not really take CPU utilization into account explicitly, but as a rule -it causes the CPU P-state to ramp up very quickly in response to increased -utilization which is generally desirable in server environments. - -Regardless of the variant, this algorithm is run by the driver's utilization -update callback for the given CPU when it is invoked by the CPU scheduler, but -not more often than every 10 ms (that can be tweaked via ``debugfs`` in `this -particular case `_). Like in the ``performance`` -case, the hardware configuration is not touched if the new P-state turns out to -be the same as the current one. +This algorithm is run by the driver's utilization update callback for the +given CPU when it is invoked by the CPU scheduler, but not more often than +every 10 ms. Like in the ``performance`` case, the hardware configuration +is not touched if the new P-state turns out to be the same as the current +one. This is the default P-state selection algorithm if the :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option @@ -720,34 +702,7 @@ P-state is called, the ``ftrace`` filter can be set to to gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func -0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func -Tuning Interface in ``debugfs`` -------------------------------- - -The ``powersave`` algorithm provided by ``intel_pstate`` for `the Core line of -processors in the active mode `_ is based on a `PID controller`_ -whose parameters were chosen to address a number of different use cases at the -same time. However, it still is possible to fine-tune it to a specific workload -and the ``debugfs`` interface under ``/sys/kernel/debug/pstate_snb/`` is -provided for this purpose. [Note that the ``pstate_snb`` directory will be -present only if the specific P-state selection algorithm matching the interface -in it actually is in use.] - -The following files present in that directory can be used to modify the PID -controller parameters at run time: - -| ``deadband`` -| ``d_gain_pct`` -| ``i_gain_pct`` -| ``p_gain_pct`` -| ``sample_rate_ms`` -| ``setpoint`` - -Note, however, that achieving desirable results this way generally requires -expert-level understanding of the power vs performance tradeoff, so extra care -is recommended when attempting to do that. - .. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf .. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html .. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf -.. _PID controller: https://en.wikipedia.org/wiki/PID_controller diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst new file mode 100644 index 000000000000..1e5c0f00cb2f --- /dev/null +++ b/Documentation/admin-guide/pm/sleep-states.rst @@ -0,0 +1,245 @@ +=================== +System Sleep States +=================== + +:: + + Copyright (c) 2017 Intel Corp., Rafael J. Wysocki + +Sleep states are global low-power states of the entire system in which user +space code cannot be executed and the overall system activity is significantly +reduced. + + +Sleep States That Can Be Supported +================================== + +Depending on its configuration and the capabilities of the platform it runs on, +the Linux kernel can support up to four system sleep states, includig +hibernation and up to three variants of system suspend. The sleep states that +can be supported by the kernel are listed below. + +.. _s2idle: + +Suspend-to-Idle +--------------- + +This is a generic, pure software, light-weight variant of system suspend (also +referred to as S2I or S2Idle). It allows more energy to be saved relative to +runtime idle by freezing user space, suspending the timekeeping and putting all +I/O devices into low-power states (possibly lower-power than available in the +working state), such that the processors can spend time in their deepest idle +states while the system is suspended. + +The system is woken up from this state by in-band interrupts, so theoretically +any devices that can cause interrupts to be generated in the working state can +also be set up as wakeup devices for S2Idle. + +This state can be used on platforms without support for :ref:`standby ` +or :ref:`suspend-to-RAM `, or it can be used in addition to any of the +deeper system suspend variants to provide reduced resume latency. It is always +supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration option is set. + +.. _standby: + +Standby +------- + +This state, if supported, offers moderate, but real, energy savings, while +providing a relatively straightforward transition back to the working state. No +operating state is lost (the system core logic retains power), so the system can +go back to where it left off easily enough. + +In addition to freezing user space, suspending the timekeeping and putting all +I/O devices into low-power states, which is done for :ref:`suspend-to-idle +` too, nonboot CPUs are taken offline and all low-level system functions +are suspended during transitions into this state. For this reason, it should +allow more energy to be saved relative to :ref:`suspend-to-idle `, but +the resume latency will generally be greater than for that state. + +The set of devices that can wake up the system from this state usually is +reduced relative to :ref:`suspend-to-idle ` and it may be necessary to +rely on the platform for setting up the wakeup functionality as appropriate. + +This state is supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration +option is set and the support for it is registered by the platform with the +core system suspend subsystem. On ACPI-based systems this state is mapped to +the S1 system state defined by ACPI. + +.. _s2ram: + +Suspend-to-RAM +-------------- + +This state (also referred to as STR or S2RAM), if supported, offers significant +energy savings as everything in the system is put into a low-power state, except +for memory, which should be placed into the self-refresh mode to retain its +contents. All of the steps carried out when entering :ref:`standby ` +are also carried out during transitions to S2RAM. Additional operations may +take place depending on the platform capabilities. In particular, on ACPI-based +systems the kernel passes control to the platform firmware (BIOS) as the last +step during S2RAM transitions and that usually results in powering down some +more low-level components that are not directly controlled by the kernel. + +The state of devices and CPUs is saved and held in memory. All devices are +suspended and put into low-power states. In many cases, all peripheral buses +lose power when entering S2RAM, so devices must be able to handle the transition +back to the "on" state. + +On ACPI-based systems S2RAM requires some minimal boot-strapping code in the +platform firmware to resume the system from it. This may be the case on other +platforms too. + +The set of devices that can wake up the system from S2RAM usually is reduced +relative to :ref:`suspend-to-idle ` and :ref:`standby ` and it +may be necessary to rely on the platform for setting up the wakeup functionality +as appropriate. + +S2RAM is supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration option +is set and the support for it is registered by the platform with the core system +suspend subsystem. On ACPI-based systems it is mapped to the S3 system state +defined by ACPI. + +.. _hibernation: + +Hibernation +----------- + +This state (also referred to as Suspend-to-Disk or STD) offers the greatest +energy savings and can be used even in the absence of low-level platform support +for system suspend. However, it requires some low-level code for resuming the +system to be present for the underlying CPU architecture. + +Hibernation is significantly different from any of the system suspend variants. +It takes three system state changes to put it into hibernation and two system +state changes to resume it. + +First, when hibernation is triggered, the kernel stops all system activity and +creates a snapshot image of memory to be written into persistent storage. Next, +the system goes into a state in which the snapshot image can be saved, the image +is written out and finally the system goes into the target low-power state in +which power is cut from almost all of its hardware components, including memory, +except for a limited set of wakeup devices. + +Once the snapshot image has been written out, the system may either enter a +special low-power state (like ACPI S4), or it may simply power down itself. +Powering down means minimum power draw and it allows this mechanism to work on +any system. However, entering a special low-power state may allow additional +means of system wakeup to be used (e.g. pressing a key on the keyboard or +opening a laptop lid). + +After wakeup, control goes to the platform firmware that runs a boot loader +which boots a fresh instance of the kernel (control may also go directly to +the boot loader, depending on the system configuration, but anyway it causes +a fresh instance of the kernel to be booted). That new instance of the kernel +(referred to as the ``restore kernel``) looks for a hibernation image in +persistent storage and if one is found, it is loaded into memory. Next, all +activity in the system is stopped and the restore kernel overwrites itself with +the image contents and jumps into a special trampoline area in the original +kernel stored in the image (referred to as the ``image kernel``), which is where +the special architecture-specific low-level code is needed. Finally, the +image kernel restores the system to the pre-hibernation state and allows user +space to run again. + +Hibernation is supported if the :c:macro:`CONFIG_HIBERNATION` kernel +configuration option is set. However, this option can only be set if support +for the given CPU architecture includes the low-level code for system resume. + + +Basic ``sysfs`` Interfaces for System Suspend and Hibernation +============================================================= + +The following files located in the :file:`/sys/power/` directory can be used by +user space for sleep states control. + +``state`` + This file contains a list of strings representing sleep states supported + by the kernel. Writing one of these strings into it causes the kernel + to start a transition of the system into the sleep state represented by + that string. + + In particular, the strings "disk", "freeze" and "standby" represent the + :ref:`hibernation `, :ref:`suspend-to-idle ` and + :ref:`standby ` sleep states, respectively. The string "mem" + is interpreted in accordance with the contents of the ``mem_sleep`` file + described below. + + If the kernel does not support any system sleep states, this file is + not present. + +``mem_sleep`` + This file contains a list of strings representing supported system + suspend variants and allows user space to select the variant to be + associated with the "mem" string in the ``state`` file described above. + + The strings that may be present in this file are "s2idle", "shallow" + and "deep". The string "s2idle" always represents :ref:`suspend-to-idle + ` and, by convention, "shallow" and "deep" represent + :ref:`standby ` and :ref:`suspend-to-RAM `, + respectively. + + Writing one of the listed strings into this file causes the system + suspend variant represented by it to be associated with the "mem" string + in the ``state`` file. The string representing the suspend variant + currently associated with the "mem" string in the ``state`` file + is listed in square brackets. + + If the kernel does not support system suspend, this file is not present. + +``disk`` + This file contains a list of strings representing different operations + that can be carried out after the hibernation image has been saved. The + possible options are as follows: + + ``platform`` + Put the system into a special low-power state (e.g. ACPI S4) to + make additional wakeup options available and possibly allow the + platform firmware to take a simplified initialization path after + wakeup. + + ``shutdown`` + Power off the system. + + ``reboot`` + Reboot the system (useful for diagnostics mostly). + + ``suspend`` + Hybrid system suspend. Put the system into the suspend sleep + state selected through the ``mem_sleep`` file described above. + If the system is successfully woken up from that state, discard + the hibernation image and continue. Otherwise, use the image + to restore the previous state of the system. + + ``test_resume`` + Diagnostic operation. Load the image as though the system had + just woken up from hibernation and the currently running kernel + instance was a restore kernel and follow up with full system + resume. + + Writing one of the listed strings into this file causes the option + represented by it to be selected. + + The currently selected option is shown in square brackets which means + that the operation represented by it will be carried out after creating + and saving the image next time hibernation is triggered by writing + ``disk`` to :file:`/sys/power/state`. + + If the kernel does not support hibernation, this file is not present. + +According to the above, there are two ways to make the system go into the +:ref:`suspend-to-idle ` state. The first one is to write "freeze" +directly to :file:`/sys/power/state`. The second one is to write "s2idle" to +:file:`/sys/power/mem_sleep` and then to write "mem" to +:file:`/sys/power/state`. Likewise, there are two ways to make the system go +into the :ref:`standby ` state (the strings to write to the control +files in that case are "standby" or "shallow" and "mem", respectively) if that +state is supported by the platform. However, there is only one way to make the +system go into the :ref:`suspend-to-RAM ` state (write "deep" into +:file:`/sys/power/mem_sleep` and "mem" into :file:`/sys/power/state`). + +The default suspend variant (ie. the one to be used without writing anything +into :file:`/sys/power/mem_sleep`) is either "deep" (on the majority of systems +supporting :ref:`suspend-to-RAM `) or "s2idle", but it can be overridden +by the value of the "mem_sleep_default" parameter in the kernel command line. +On some ACPI-based systems, depending on the information in the ACPI tables, the +default may be "s2idle" even if :ref:`suspend-to-RAM ` is supported. diff --git a/Documentation/admin-guide/pm/strategies.rst b/Documentation/admin-guide/pm/strategies.rst new file mode 100644 index 000000000000..afe4d3f831fe --- /dev/null +++ b/Documentation/admin-guide/pm/strategies.rst @@ -0,0 +1,52 @@ +=========================== +Power Management Strategies +=========================== + +:: + + Copyright (c) 2017 Intel Corp., Rafael J. Wysocki + +The Linux kernel supports two major high-level power management strategies. + +One of them is based on using global low-power states of the whole system in +which user space code cannot be executed and the overall system activity is +significantly reduced, referred to as :doc:`sleep states `. The +kernel puts the system into one of these states when requested by user space +and the system stays in it until a special signal is received from one of +designated devices, triggering a transition to the ``working state`` in which +user space code can run. Because sleep states are global and the whole system +is affected by the state changes, this strategy is referred to as the +:doc:`system-wide power management `. + +The other strategy, referred to as the :doc:`working-state power management +`, is based on adjusting the power states of individual hardware +components of the system, as needed, in the working state. In consequence, if +this strategy is in use, the working state of the system usually does not +correspond to any particular physical configuration of it, but can be treated as +a metastate covering a range of different power states of the system in which +the individual components of it can be either ``active`` (in use) or +``inactive`` (idle). If they are active, they have to be in power states +allowing them to process data and to be accessed by software. In turn, if they +are inactive, ideally, they should be in low-power states in which they may not +be accessible. + +If all of the system components are active, the system as a whole is regarded as +"runtime active" and that situation typically corresponds to the maximum power +draw (or maximum energy usage) of it. If all of them are inactive, the system +as a whole is regarded as "runtime idle" which may be very close to a sleep +state from the physical system configuration and power draw perspective, but +then it takes much less time and effort to start executing user space code than +for the same system in a sleep state. However, transitions from sleep states +back to the working state can only be started by a limited set of devices, so +typically the system can spend much more time in a sleep state than it can be +runtime idle in one go. For this reason, systems usually use less energy in +sleep states than when they are runtime idle most of the time. + +Moreover, the two power management strategies address different usage scenarios. +Namely, if the user indicates that the system will not be in use going forward, +for example by closing its lid (if the system is a laptop), it probably should +go into a sleep state at that point. On the other hand, if the user simply goes +away from the laptop keyboard, it probably should stay in the working state and +use the working-state power management in case it becomes idle, because the user +may come back to it at any time and then may want the system to be immediately +accessible. diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst new file mode 100644 index 000000000000..0c81e4c5de39 --- /dev/null +++ b/Documentation/admin-guide/pm/system-wide.rst @@ -0,0 +1,8 @@ +============================ +System-Wide Power Management +============================ + +.. toctree:: + :maxdepth: 2 + + sleep-states diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst new file mode 100644 index 000000000000..fa01bf083dfe --- /dev/null +++ b/Documentation/admin-guide/pm/working-state.rst @@ -0,0 +1,9 @@ +============================== +Working-State Power Management +============================== + +.. toctree:: + :maxdepth: 2 + + cpufreq + intel_pstate diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt index 05e2822a80b3..3d6951d63489 100644 --- a/Documentation/block/bfq-iosched.txt +++ b/Documentation/block/bfq-iosched.txt @@ -16,14 +16,16 @@ throughput. So, when needed for achieving a lower latency, BFQ builds schedules that may lead to a lower throughput. If your main or only goal, for a given device, is to achieve the maximum-possible throughput at all times, then do switch off all low-latency heuristics -for that device, by setting low_latency to 0. Full details in Section 3. +for that device, by setting low_latency to 0. See Section 3 for +details on how to configure BFQ for the desired tradeoff between +latency and throughput, or on how to maximize throughput. On average CPUs, the current version of BFQ can handle devices performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a reference, 30-50 KIOPS correspond to very high bandwidths with sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and -to 120-200 MB/s with 4KB random I/O. BFQ has not yet been tested on -multi-queue devices. +to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on +multi-queue devices too. The table of contents follow. Impatients can just jump to Section 3. @@ -33,7 +35,7 @@ CONTENTS 1-1 Personal systems 1-2 Server systems 2. How does BFQ work? -3. What are BFQ's tunable? +3. What are BFQ's tunables and how to properly configure BFQ? 4. BFQ group scheduling 4-1 Service guarantees provided 4-2 Interface @@ -145,19 +147,28 @@ plus a lot of code, are borrowed from CFQ. contrast, BFQ may idle the device for a short time interval, giving the process the chance to go on being served if it issues a new request in time. Device idling typically boosts the - throughput on rotational devices, if processes do synchronous - and sequential I/O. In addition, under BFQ, device idling is - also instrumental in guaranteeing the desired throughput - fraction to processes issuing sync requests (see the description - of the slice_idle tunable in this document, or [1, 2], for more - details). + throughput on rotational devices and on non-queueing flash-based + devices, if processes do synchronous and sequential I/O. In + addition, under BFQ, device idling is also instrumental in + guaranteeing the desired throughput fraction to processes + issuing sync requests (see the description of the slice_idle + tunable in this document, or [1, 2], for more details). - With respect to idling for service guarantees, if several processes are competing for the device at the same time, but - all processes (and groups, after the following commit) have - the same weight, then BFQ guarantees the expected throughput - distribution without ever idling the device. Throughput is - thus as high as possible in this common scenario. + all processes and groups have the same weight, then BFQ + guarantees the expected throughput distribution without ever + idling the device. Throughput is thus as high as possible in + this common scenario. + + - On flash-based storage with internal queueing of commands + (typically NCQ), device idling happens to be always detrimental + for throughput. So, with these devices, BFQ performs idling + only when strictly needed for service guarantees, i.e., for + guaranteeing low latency or fairness. In these cases, overall + throughput may be sub-optimal. No solution currently exists to + provide both strong service guarantees and optimal throughput + on devices with internal queueing. - If low-latency mode is enabled (default configuration), BFQ executes some special heuristics to detect interactive and soft @@ -191,10 +202,7 @@ plus a lot of code, are borrowed from CFQ. - Queues are scheduled according to a variant of WF2Q+, named B-WF2Q+, and implemented using an augmented rb-tree to preserve an O(log N) overall complexity. See [2] for more details. B-WF2Q+ is - also ready for hierarchical scheduling. However, for a cleaner - logical breakdown, the code that enables and completes - hierarchical support is provided in the next commit, which focuses - exactly on this feature. + also ready for hierarchical scheduling, details in Section 4. - B-WF2Q+ guarantees a tight deviation with respect to an ideal, perfectly fair, and smooth service. In particular, B-WF2Q+ @@ -249,13 +257,24 @@ plus a lot of code, are borrowed from CFQ. the Idle class, to prevent it from starving. -3. What are BFQ's tunable? -========================== +3. What are BFQ's tunables and how to properly configure BFQ? +============================================================= -The tunables back_seek-max, back_seek_penalty, fifo_expire_async and -fifo_expire_sync below are the same as in CFQ. Their description is -just copied from that for CFQ. Some considerations in the description -of slice_idle are copied from CFQ too. +Most BFQ tunables affect service guarantees (basically latency and +fairness) and throughput. For full details on how to choose the +desired tradeoff between service guarantees and throughput, see the +parameters slice_idle, strict_guarantees and low_latency. For details +on how to maximise throughput, see slice_idle, timeout_sync and +max_budget. The other performance-related parameters have been +inherited from, and have been preserved mostly for compatibility with +CFQ. So far, no performance improvement has been reported after +changing the latter parameters in BFQ. + +In particular, the tunables back_seek-max, back_seek_penalty, +fifo_expire_async and fifo_expire_sync below are the same as in +CFQ. Their description is just copied from that for CFQ. Some +considerations in the description of slice_idle are copied from CFQ +too. per-process ioprio and weight ----------------------------- @@ -285,15 +304,17 @@ number of seeks and see improved throughput. Setting slice_idle to 0 will remove all the idling on queues and one should see an overall improved throughput on faster storage devices -like multiple SATA/SAS disks in hardware RAID configuration. +like multiple SATA/SAS disks in hardware RAID configuration, as well +as flash-based storage with internal command queueing (and +parallelism). So depending on storage and workload, it might be useful to set slice_idle=0. In general for SATA/SAS disks and software RAID of SATA/SAS disks keeping slice_idle enabled should be useful. For any configurations where there are multiple spindles behind single LUN -(Host based hardware RAID controller or for storage arrays), setting -slice_idle=0 might end up in better throughput and acceptable -latencies. +(Host based hardware RAID controller or for storage arrays), or with +flash-based fast storage, setting slice_idle=0 might end up in better +throughput and acceptable latencies. Idling is however necessary to have service guarantees enforced in case of differentiated weights or differentiated I/O-request lengths. @@ -312,13 +333,14 @@ There is an important flipside for idling: apart from the above cases where it is beneficial also for throughput, idling can severely impact throughput. One important case is random workload. Because of this issue, BFQ tends to avoid idling as much as possible, when it is not -beneficial also for throughput. As a consequence of this behavior, and -of further issues described for the strict_guarantees tunable, -short-term service guarantees may be occasionally violated. And, in -some cases, these guarantees may be more important than guaranteeing -maximum throughput. For example, in video playing/streaming, a very -low drop rate may be more important than maximum throughput. In these -cases, consider setting the strict_guarantees parameter. +beneficial also for throughput (as detailed in Section 2). As a +consequence of this behavior, and of further issues described for the +strict_guarantees tunable, short-term service guarantees may be +occasionally violated. And, in some cases, these guarantees may be +more important than guaranteeing maximum throughput. For example, in +video playing/streaming, a very low drop rate may be more important +than maximum throughput. In these cases, consider setting the +strict_guarantees parameter. strict_guarantees ----------------- @@ -420,6 +442,13 @@ The default value is 0, which enables auto-tuning: BFQ sets max_budget to the maximum number of sectors that can be served during timeout_sync, according to the estimated peak rate. +For specific devices, some users have occasionally reported to have +reached a higher throughput by setting max_budget explicitly, i.e., by +setting max_budget to a higher value than 0. In particular, they have +set max_budget to higher values than those to which BFQ would have set +it with auto-tuning. An alternative way to achieve this goal is to +just increase the value of timeout_sync, leaving max_budget equal to 0. + weights ------- @@ -427,51 +456,6 @@ Read-only parameter, used to show the weights of the currently active BFQ queues. -wr_ tunables ------------- - -BFQ exports a few parameters to control/tune the behavior of -low-latency heuristics. - -wr_coeff - -Factor by which the weight of a weight-raised queue is multiplied. If -the queue is deemed soft real-time, then the weight is further -multiplied by an additional, constant factor. - -wr_max_time - -Maximum duration of a weight-raising period for an interactive task -(ms). If set to zero (default value), then this value is computed -automatically, as a function of the peak rate of the device. In any -case, when the value of this parameter is read, it always reports the -current duration, regardless of whether it has been set manually or -computed automatically. - -wr_max_softrt_rate - -Maximum service rate below which a queue is deemed to be associated -with a soft real-time application, and is then weight-raised -accordingly (sectors/sec). - -wr_min_idle_time - -Minimum idle period after which interactive weight-raising may be -reactivated for a queue (in ms). - -wr_rt_max_time - -Maximum weight-raising duration for soft real-time queues (in ms). The -start time from which this duration is considered is automatically -moved forward if the queue is detected to be still soft real-time -before the current soft real-time weight-raising period finishes. - -wr_min_inter_arr_async - -Minimum period between I/O request arrivals after which weight-raising -may be reactivated for an already busy async queue (in ms). - - 4. Group scheduling with BFQ ============================ diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt deleted file mode 100644 index 3a5477cc456e..000000000000 --- a/Documentation/blockdev/cciss.txt +++ /dev/null @@ -1,194 +0,0 @@ -This driver is for Compaq's SMART Array Controllers. - -Supported Cards: ----------------- - -This driver is known to work with the following cards: - - * SA 5300 - * SA 5i - * SA 532 - * SA 5312 - * SA 641 - * SA 642 - * SA 6400 - * SA 6400 U320 Expansion Module - * SA 6i - * SA P600 - * SA P800 - * SA E400 - * SA P400i - * SA E200 - * SA E200i - * SA E500 - * SA P700m - * SA P212 - * SA P410 - * SA P410i - * SA P411 - * SA P812 - * SA P712m - * SA P711m - -Detecting drive failures: -------------------------- - -To get the status of logical volumes and to detect physical drive -failures, you can use the cciss_vol_status program found here: -http://cciss.sourceforge.net/#cciss_utils - -Device Naming: --------------- - -If nodes are not already created in the /dev/cciss directory, run as root: - -# cd /dev -# ./MAKEDEV cciss - -You need some entries in /dev for the cciss device. The MAKEDEV script -can make device nodes for you automatically. Currently the device setup -is as follows: - -Major numbers: - 104 cciss0 - 105 cciss1 - 106 cciss2 - 105 cciss3 - 108 cciss4 - 109 cciss5 - 110 cciss6 - 111 cciss7 - -Minor numbers: - b7 b6 b5 b4 b3 b2 b1 b0 - |----+----| |----+----| - | | - | +-------- Partition ID (0=wholedev, 1-15 partition) - | - +-------------------- Logical Volume number - -The device naming scheme is: -/dev/cciss/c0d0 Controller 0, disk 0, whole device -/dev/cciss/c0d0p1 Controller 0, disk 0, partition 1 -/dev/cciss/c0d0p2 Controller 0, disk 0, partition 2 -/dev/cciss/c0d0p3 Controller 0, disk 0, partition 3 - -/dev/cciss/c1d1 Controller 1, disk 1, whole device -/dev/cciss/c1d1p1 Controller 1, disk 1, partition 1 -/dev/cciss/c1d1p2 Controller 1, disk 1, partition 2 -/dev/cciss/c1d1p3 Controller 1, disk 1, partition 3 - -CCISS simple mode support -------------------------- - -The "cciss_simple_mode=1" boot parameter may be used to prevent the driver -from putting the controller into "performant" mode. The difference is that -with simple mode, each command completion requires an interrupt, while with -"performant mode" (the default, and ordinarily better performing) it is -possible to have multiple command completions indicated by a single -interrupt. - -SCSI tape drive and medium changer support ------------------------------------------- - -SCSI sequential access devices and medium changer devices are supported and -appropriate device nodes are automatically created. (e.g. -/dev/st0, /dev/st1, etc. See the "st" man page for more details.) -You must enable "SCSI tape drive support for Smart Array 5xxx" and -"SCSI support" in your kernel configuration to be able to use SCSI -tape drives with your Smart Array 5xxx controller. - -Additionally, note that the driver will engage the SCSI core at init -time if any tape drives or medium changers are detected. The driver may -also be directed to dynamically engage the SCSI core via the /proc filesystem -entry which the "block" side of the driver creates as -/proc/driver/cciss/cciss* at runtime. This is best done via a script. - -For example: - - for x in /proc/driver/cciss/cciss[0-9]* - do - echo "engage scsi" > $x - done - -Once the SCSI core is engaged by the driver, it cannot be disengaged -(except by unloading the driver, if it happens to be linked as a module.) - -Note also that if no sequential access devices or medium changers are -detected, the SCSI core will not be engaged by the action of the above -script. - -Hot plug support for SCSI tape drives -------------------------------------- - -Hot plugging of SCSI tape drives is supported, with some caveats. -The cciss driver must be informed that changes to the SCSI bus -have been made. This may be done via the /proc filesystem. -For example: - - echo "rescan" > /proc/scsi/cciss0/1 - -This causes the driver to query the adapter about changes to the -physical SCSI buses and/or fibre channel arbitrated loop and the -driver to make note of any new or removed sequential access devices -or medium changers. The driver will output messages indicating what -devices have been added or removed and the controller, bus, target and -lun used to address the device. It then notifies the SCSI mid layer -of these changes. - -Note that the naming convention of the /proc filesystem entries -contains a number in addition to the driver name. (E.g. "cciss0" -instead of just "cciss" which you might expect.) - -Note: ONLY sequential access devices and medium changers are presented -as SCSI devices to the SCSI mid layer by the cciss driver. Specifically, -physical SCSI disk drives are NOT presented to the SCSI mid layer. The -physical SCSI disk drives are controlled directly by the array controller -hardware and it is important to prevent the kernel from attempting to directly -access these devices too, as if the array controller were merely a SCSI -controller in the same way that we are allowing it to access SCSI tape drives. - -SCSI error handling for tape drives and medium changers -------------------------------------------------------- - -The linux SCSI mid layer provides an error handling protocol which -kicks into gear whenever a SCSI command fails to complete within a -certain amount of time (which can vary depending on the command). -The cciss driver participates in this protocol to some extent. The -normal protocol is a four step process. First the device is told -to abort the command. If that doesn't work, the device is reset. -If that doesn't work, the SCSI bus is reset. If that doesn't work -the host bus adapter is reset. Because the cciss driver is a block -driver as well as a SCSI driver and only the tape drives and medium -changers are presented to the SCSI mid layer, and unlike more -straightforward SCSI drivers, disk i/o continues through the block -side during the SCSI error recovery process, the cciss driver only -implements the first two of these actions, aborting the command, and -resetting the device. Additionally, most tape drives will not oblige -in aborting commands, and sometimes it appears they will not even -obey a reset command, though in most circumstances they will. In -the case that the command cannot be aborted and the device cannot be -reset, the device will be set offline. - -In the event the error handling code is triggered and a tape drive is -successfully reset or the tardy command is successfully aborted, the -tape drive may still not allow i/o to continue until some command -is issued which positions the tape to a known position. Typically you -must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example) -before i/o can proceed again to a tape drive which was reset. - -There is a cciss_tape_cmds module parameter which can be used to make cciss -allocate more commands for use by tape drives. Ordinarily only a few commands -(6) are allocated for tape drives because tape drives are slow and -infrequently used and the primary purpose of Smart Array controllers is to -act as a RAID controller for disk drives, so the vast majority of commands -are allocated for disk devices. However, if you have more than a few tape -drives attached to a smart array, the default number of commands may not be -enough (for example, if you have 8 tape drives, you could only rewind 6 -at one time with the default number of commands.) The cciss_tape_cmds module -parameter allows more commands (up to 16 more) to be allocated for use by -tape drives. For example: - - insmod cciss.ko cciss_tape_cmds=16 - -Or, as a kernel boot parameter passed in via grub: cciss.cciss_tape_cmds=8 diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 4fced8a21307..257e65714c6a 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -168,6 +168,7 @@ max_comp_streams RW the number of possible concurrent compress operations comp_algorithm RW show and change the compression algorithm compact WO trigger memory compaction debug_stat RO this file is used for zram debugging purposes +backing_dev RW set up backend storage for zram to write out User space is advised to use the following files to read the device statistics. @@ -231,5 +232,15 @@ line of text and contains the following stats separated by whitespace: resets the disksize to zero. You must set the disksize again before reusing the device. +* Optional Feature + += writeback + +With incompressible pages, there is no memory saving with zram. +Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page +to backing storage rather than keeping it in memory. +User should set up backing device via /sys/block/zramX/backing_dev +before disksize setting. + Nitin Gupta ngupta@vflare.org diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index bde177103567..dc44785dc0fa 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -18,7 +18,9 @@ v1 is available under Documentation/cgroup-v1/. 1-2. What is cgroup? 2. Basic Operations 2-1. Mounting - 2-2. Organizing Processes + 2-2. Organizing Processes and Threads + 2-2-1. Processes + 2-2-2. Threads 2-3. [Un]populated Notification 2-4. Controlling Controllers 2-4-1. Enabling and Disabling @@ -167,8 +169,11 @@ cgroup v2 currently supports the following mount options. Delegation section for details. -Organizing Processes --------------------- +Organizing Processes and Threads +-------------------------------- + +Processes +~~~~~~~~~ Initially, only the root cgroup exists to which all processes belong. A child cgroup can be created by creating a sub-directory:: @@ -219,6 +224,105 @@ is removed subsequently, " (deleted)" is appended to the path:: 0::/test-cgroup/test-cgroup-nested (deleted) +Threads +~~~~~~~ + +cgroup v2 supports thread granularity for a subset of controllers to +support use cases requiring hierarchical resource distribution across +the threads of a group of processes. By default, all threads of a +process belong to the same cgroup, which also serves as the resource +domain to host resource consumptions which are not specific to a +process or thread. The thread mode allows threads to be spread across +a subtree while still maintaining the common resource domain for them. + +Controllers which support thread mode are called threaded controllers. +The ones which don't are called domain controllers. + +Marking a cgroup threaded makes it join the resource domain of its +parent as a threaded cgroup. The parent may be another threaded +cgroup whose resource domain is further up in the hierarchy. The root +of a threaded subtree, that is, the nearest ancestor which is not +threaded, is called threaded domain or thread root interchangeably and +serves as the resource domain for the entire subtree. + +Inside a threaded subtree, threads of a process can be put in +different cgroups and are not subject to the no internal process +constraint - threaded controllers can be enabled on non-leaf cgroups +whether they have threads in them or not. + +As the threaded domain cgroup hosts all the domain resource +consumptions of the subtree, it is considered to have internal +resource consumptions whether there are processes in it or not and +can't have populated child cgroups which aren't threaded. Because the +root cgroup is not subject to no internal process constraint, it can +serve both as a threaded domain and a parent to domain cgroups. + +The current operation mode or type of the cgroup is shown in the +"cgroup.type" file which indicates whether the cgroup is a normal +domain, a domain which is serving as the domain of a threaded subtree, +or a threaded cgroup. + +On creation, a cgroup is always a domain cgroup and can be made +threaded by writing "threaded" to the "cgroup.type" file. The +operation is single direction:: + + # echo threaded > cgroup.type + +Once threaded, the cgroup can't be made a domain again. To enable the +thread mode, the following conditions must be met. + +- As the cgroup will join the parent's resource domain. The parent + must either be a valid (threaded) domain or a threaded cgroup. + +- When the parent is an unthreaded domain, it must not have any domain + controllers enabled or populated domain children. The root is + exempt from this requirement. + +Topology-wise, a cgroup can be in an invalid state. Please consider +the following toplogy:: + + A (threaded domain) - B (threaded) - C (domain, just created) + +C is created as a domain but isn't connected to a parent which can +host child domains. C can't be used until it is turned into a +threaded cgroup. "cgroup.type" file will report "domain (invalid)" in +these cases. Operations which fail due to invalid topology use +EOPNOTSUPP as the errno. + +A domain cgroup is turned into a threaded domain when one of its child +cgroup becomes threaded or threaded controllers are enabled in the +"cgroup.subtree_control" file while there are processes in the cgroup. +A threaded domain reverts to a normal domain when the conditions +clear. + +When read, "cgroup.threads" contains the list of the thread IDs of all +threads in the cgroup. Except that the operations are per-thread +instead of per-process, "cgroup.threads" has the same format and +behaves the same way as "cgroup.procs". While "cgroup.threads" can be +written to in any cgroup, as it can only move threads inside the same +threaded domain, its operations are confined inside each threaded +subtree. + +The threaded domain cgroup serves as the resource domain for the whole +subtree, and, while the threads can be scattered across the subtree, +all the processes are considered to be in the threaded domain cgroup. +"cgroup.procs" in a threaded domain cgroup contains the PIDs of all +processes in the subtree and is not readable in the subtree proper. +However, "cgroup.procs" can be written to from anywhere in the subtree +to migrate all threads of the matching process to the cgroup. + +Only threaded controllers can be enabled in a threaded subtree. When +a threaded controller is enabled inside a threaded subtree, it only +accounts for and controls resource consumptions associated with the +threads in the cgroup and its descendants. All consumptions which +aren't tied to a specific thread belong to the threaded domain cgroup. + +Because a threaded subtree is exempt from no internal process +constraint, a threaded controller must be able to handle competition +between threads in a non-leaf cgroup and its child cgroups. Each +threaded controller defines how such competitions are handled. + + [Un]populated Notification -------------------------- @@ -302,15 +406,15 @@ disabled if one or more children have it enabled. No Internal Process Constraint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Non-root cgroups can only distribute resources to their children when -they don't have any processes of their own. In other words, only -cgroups which don't contain any processes can have controllers enabled -in their "cgroup.subtree_control" files. +Non-root cgroups can distribute domain resources to their children +only when they don't have any processes of their own. In other words, +only domain cgroups which don't contain any processes can have domain +controllers enabled in their "cgroup.subtree_control" files. -This guarantees that, when a controller is looking at the part of the -hierarchy which has it enabled, processes are always only on the -leaves. This rules out situations where child cgroups compete against -internal processes of the parent. +This guarantees that, when a domain controller is looking at the part +of the hierarchy which has it enabled, processes are always only on +the leaves. This rules out situations where child cgroups compete +against internal processes of the parent. The root cgroup is exempt from this restriction. Root contains processes and anonymous resource consumption which can't be associated @@ -334,10 +438,10 @@ Model of Delegation ~~~~~~~~~~~~~~~~~~~ A cgroup can be delegated in two ways. First, to a less privileged -user by granting write access of the directory and its "cgroup.procs" -and "cgroup.subtree_control" files to the user. Second, if the -"nsdelegate" mount option is set, automatically to a cgroup namespace -on namespace creation. +user by granting write access of the directory and its "cgroup.procs", +"cgroup.threads" and "cgroup.subtree_control" files to the user. +Second, if the "nsdelegate" mount option is set, automatically to a +cgroup namespace on namespace creation. Because the resource control interface files in a given directory control the distribution of the parent's resources, the delegatee @@ -644,6 +748,29 @@ Core Interface Files All cgroup core files are prefixed with "cgroup." + cgroup.type + + A read-write single value file which exists on non-root + cgroups. + + When read, it indicates the current type of the cgroup, which + can be one of the following values. + + - "domain" : A normal valid domain cgroup. + + - "domain threaded" : A threaded domain cgroup which is + serving as the root of a threaded subtree. + + - "domain invalid" : A cgroup which is in an invalid state. + It can't be populated or have controllers enabled. It may + be allowed to become a threaded cgroup. + + - "threaded" : A threaded cgroup which is a member of a + threaded subtree. + + A cgroup can be turned into a threaded cgroup by writing + "threaded" to this file. + cgroup.procs A read-write new-line separated values file which exists on all cgroups. @@ -658,9 +785,6 @@ All cgroup core files are prefixed with "cgroup." the PID to the cgroup. The writer should match all of the following conditions. - - Its euid is either root or must match either uid or suid of - the target process. - - It must have write access to the "cgroup.procs" file. - It must have write access to the "cgroup.procs" file of the @@ -669,6 +793,35 @@ All cgroup core files are prefixed with "cgroup." When delegating a sub-hierarchy, write access to this file should be granted along with the containing directory. + In a threaded cgroup, reading this file fails with EOPNOTSUPP + as all the processes belong to the thread root. Writing is + supported and moves every thread of the process to the cgroup. + + cgroup.threads + A read-write new-line separated values file which exists on + all cgroups. + + When read, it lists the TIDs of all threads which belong to + the cgroup one-per-line. The TIDs are not ordered and the + same TID may show up more than once if the thread got moved to + another cgroup and then back or the TID got recycled while + reading. + + A TID can be written to migrate the thread associated with the + TID to the cgroup. The writer should match all of the + following conditions. + + - It must have write access to the "cgroup.threads" file. + + - The cgroup that the thread is currently in must be in the + same resource domain as the destination cgroup. + + - It must have write access to the "cgroup.procs" file of the + common ancestor of the source and destination cgroups. + + When delegating a sub-hierarchy, write access to this file + should be granted along with the containing directory. + cgroup.controllers A read-only space separated values file which exists on all cgroups. @@ -701,6 +854,38 @@ All cgroup core files are prefixed with "cgroup." 1 if the cgroup or its descendants contains any live processes; otherwise, 0. + cgroup.max.descendants + A read-write single value files. The default is "max". + + Maximum allowed number of descent cgroups. + If the actual number of descendants is equal or larger, + an attempt to create a new cgroup in the hierarchy will fail. + + cgroup.max.depth + A read-write single value files. The default is "max". + + Maximum allowed descent depth below the current cgroup. + If the actual descent depth is equal or larger, + an attempt to create a new child cgroup will fail. + + cgroup.stat + A read-only flat-keyed file with the following entries: + + nr_descendants + Total number of visible descendant cgroups. + + nr_dying_descendants + Total number of dying descendant cgroups. A cgroup becomes + dying after being deleted by a user. The cgroup will remain + in dying state for some time undefined time (which can depend + on system load) before being completely destroyed. + + A process can't enter a dying cgroup under any circumstances, + a dying cgroup can't revive. + + A dying cgroup can consume system resources not exceeding + limits, which were active at the moment of cgroup deletion. + Controllers =========== diff --git a/Documentation/conf.py b/Documentation/conf.py index f9054ab60cb1..63857d33778c 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -271,10 +271,29 @@ latex_elements = { # Additional stuff for the LaTeX preamble. 'preamble': ''' - \\usepackage{ifthen} + % Use some font with UTF-8 support with XeLaTeX + \\usepackage{fontspec} + \\setsansfont{DejaVu Serif} + \\setromanfont{DejaVu Sans} + \\setmonofont{DejaVu Sans Mono} - % Allow generate some pages in landscape - \\usepackage{lscape} + ''' +} + +# Fix reference escape troubles with Sphinx 1.4.x +if major == 1 and minor > 3: + latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n' + +if major == 1 and minor <= 4: + latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}' +elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)): + latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in' + latex_elements['preamble'] += '\\fvset{fontsize=auto}\n' + +# Customize notice background colors on Sphinx < 1.6: +if major == 1 and minor < 6: + latex_elements['preamble'] += ''' + \\usepackage{ifthen} % Put notes in color and let them be inside a table \\definecolor{NoteColor}{RGB}{204,255,255} @@ -325,27 +344,26 @@ latex_elements = { } \\makeatother - % Use some font with UTF-8 support with XeLaTeX - \\usepackage{fontspec} - \\setsansfont{DejaVu Serif} - \\setromanfont{DejaVu Sans} - \\setmonofont{DejaVu Sans Mono} - - % To allow adjusting table sizes - \\usepackage{adjustbox} - ''' -} -# Fix reference escape troubles with Sphinx 1.4.x -if major == 1 and minor > 3: - latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n' - -if major == 1 and minor <= 4: - latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}' -elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)): - latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in' - latex_elements['preamble'] += '\\fvset{fontsize=auto}\n' +# With Sphinx 1.6, it is possible to change the Bg color directly +# by using: +# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255} +# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204} +# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204} +# \definecolor{sphinximportantBgColor}{RGB}{192,255,204} +# +# However, it require to use sphinx heavy box with: +# +# \renewenvironment{sphinxlightbox} {% +# \\begin{sphinxheavybox} +# } +# \\end{sphinxheavybox} +# } +# +# Unfortunately, the implementation is buggy: if a note is inside a +# table, it isn't displayed well. So, for now, let's use boring +# black and white notes. # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst index ffdec94fbca1..00a5ba51e63f 100644 --- a/Documentation/core-api/workqueue.rst +++ b/Documentation/core-api/workqueue.rst @@ -39,8 +39,8 @@ up. Although MT wq wasted a lot of resource, the level of concurrency provided was unsatisfactory. The limitation was common to both ST and MT wq albeit less severe on MT. Each wq maintained its own separate -worker pool. A MT wq could provide only one execution context per CPU -while a ST wq one for the whole system. Work items had to compete for +worker pool. An MT wq could provide only one execution context per CPU +while an ST wq one for the whole system. Work items had to compete for those very limited execution contexts leading to various problems including proneness to deadlocks around the single execution context. @@ -151,7 +151,7 @@ Application Programming Interface (API) ``alloc_workqueue()`` allocates a wq. The original ``create_*workqueue()`` functions are deprecated and scheduled for -removal. ``alloc_workqueue()`` takes three arguments - @``name``, +removal. ``alloc_workqueue()`` takes three arguments - ``@name``, ``@flags`` and ``@max_active``. ``@name`` is the name of the wq and also used as the name of the rescuer thread if there is one. @@ -197,7 +197,7 @@ resources, scheduled and executed. served by worker threads with elevated nice level. Note that normal and highpri worker-pools don't interact with - each other. Each maintain its separate pool of workers and + each other. Each maintains its separate pool of workers and implements concurrency management among its workers. ``WQ_CPU_INTENSIVE`` @@ -243,11 +243,15 @@ throttling the number of active work items, specifying '0' is recommended. Some users depend on the strict execution ordering of ST wq. The -combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` is used to -achieve this behavior. Work items on such wq are always queued to the -unbound worker-pools and only one work item can be active at any given +combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` used to +achieve this behavior. Work items on such wq were always queued to the +unbound worker-pools and only one work item could be active at any given time thus achieving the same ordering property as ST wq. +In the current implementation the above configuration only guarantees +ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should +be used to achieve system-wide ST behavior. + Example Execution Scenarios =========================== diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt index 03a7cee6ac73..c15e75386a05 100644 --- a/Documentation/cpu-freq/index.txt +++ b/Documentation/cpu-freq/index.txt @@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats. index.txt - File index, Mailing list and Links (this document) -intel-pstate.txt - Intel pstate cpufreq driver specific file. - pcc-cpufreq.txt - PCC cpufreq driver specific file. diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 4a0a7469fdd7..32df07e29f68 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -344,3 +344,4 @@ Version History (wrong raid10_copies/raid10_format sequence) 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option 1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available +1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A') diff --git a/Documentation/devicetree/bindings/arc/hsdk.txt b/Documentation/devicetree/bindings/arc/hsdk.txt new file mode 100644 index 000000000000..be50654bbf61 --- /dev/null +++ b/Documentation/devicetree/bindings/arc/hsdk.txt @@ -0,0 +1,7 @@ +Synopsys DesignWare ARC HS Development Kit Device Tree Bindings +--------------------------------------------------------------------------- + +ARC HSDK Board with quad-core ARC HS38x4 in silicon. + +Required root node properties: + - compatible = "snps,hsdk"; diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt index 0fff40a6330d..4e4bc0bae597 100644 --- a/Documentation/devicetree/bindings/arm/amlogic.txt +++ b/Documentation/devicetree/bindings/arm/amlogic.txt @@ -1,6 +1,18 @@ Amlogic MesonX device tree bindings ------------------------------------------- +Work in progress statement: + +Device tree files and bindings applying to Amlogic SoCs and boards are +considered "unstable". Any Amlogic device tree binding may change at +any time. Be sure to use a device tree binary and a kernel image +generated from the same source tree. + +Please refer to Documentation/devicetree/bindings/ABI.txt for a definition of a +stable binding/ABI. + +--------------------------------------------------------------- + Boards with the Amlogic Meson6 SoC shall have the following properties: Required root node property: compatible: "amlogic,meson6" @@ -61,3 +73,32 @@ Board compatible values (alphabetically, grouped by SoC): - "amlogic,q201" (Meson gxm s912) - "kingnovel,r-box-pro" (Meson gxm S912) - "nexbox,a1" (Meson gxm s912) + +Amlogic Meson Firmware registers Interface +------------------------------------------ + +The Meson SoCs have a register bank with status and data shared with the +secure firmware. + +Required properties: + - compatible: For Meson GX SoCs, must be "amlogic,meson-gx-ao-secure", "syscon" + +Properties should indentify components of this register interface : + +Meson GX SoC Information +------------------------ +A firmware register encodes the SoC type, package and revision information on +the Meson GX SoCs. +If present, the following property should be added : + +Optional properties: + - amlogic,has-chip-id: If present, the interface gives the current SoC version. + +Example +------- + +ao-secure@140 { + compatible = "amlogic,meson-gx-ao-secure", "syscon"; + reg = <0x0 0x140 0x0 0x140>; + amlogic,has-chip-id; +}; diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt index e926aea1147d..68301b77e854 100644 --- a/Documentation/devicetree/bindings/arm/arch_timer.txt +++ b/Documentation/devicetree/bindings/arm/arch_timer.txt @@ -108,6 +108,5 @@ Example: frame-number = <1> interrupts = <0 15 0x8>; reg = <0xf0003000 0x1000>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt index 9c97de23919a..3e3efa046ac5 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt @@ -42,6 +42,10 @@ Raspberry Pi Zero Required root node properties: compatible = "raspberrypi,model-zero", "brcm,bcm2835"; +Raspberry Pi Zero W +Required root node properties: +compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; + Generic BCM2835 board Required root node properties: compatible = "brcm,bcm2835"; diff --git a/Documentation/devicetree/bindings/arm/bhf.txt b/Documentation/devicetree/bindings/arm/bhf.txt new file mode 100644 index 000000000000..886b503caf9c --- /dev/null +++ b/Documentation/devicetree/bindings/arm/bhf.txt @@ -0,0 +1,6 @@ +Beckhoff Automation Platforms Device Tree Bindings +-------------------------------------------------- + +CX9020 Embedded PC +Required root node properties: + - compatible = "bhf,cx9020", "fsl,imx53"; diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index a44253cad269..b92f12bd5244 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt @@ -200,6 +200,7 @@ described below. "arm,realview-smp" "brcm,bcm11351-cpu-method" "brcm,bcm23550" + "brcm,bcm2836-smp" "brcm,bcm-nsp-smp" "brcm,brahma-b15" "marvell,armada-375-smp" diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt b/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt new file mode 100644 index 000000000000..f3e9624534c6 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt @@ -0,0 +1,15 @@ +Marvell Armada 8KPlus Platforms Device Tree Bindings +---------------------------------------------------- + +Boards using a SoC of the Marvell Armada 8KP families must carry +the following root node property: + + - compatible, with one of the following values: + + - "marvell,armada-8080", "marvell,armada-ap810-octa", "marvell,armada-ap810" + when the SoC being used is the Armada 8080 + +Example: + +compatible = "marvell,armada-8080-db", "marvell,armada-8080", + "marvell,armada-ap810-octa", "marvell,armada-ap810" diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt index 171d02cadea4..29cdbae6c5ac 100644 --- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt +++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt @@ -183,7 +183,6 @@ cpm_syscon0: system-controller@440000 { gpio-controller; #gpio-cells = <2>; gpio-ranges = <&cpm_pinctrl 0 0 32>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt index da7bd138e6f2..91d517849483 100644 --- a/Documentation/devicetree/bindings/arm/mediatek.txt +++ b/Documentation/devicetree/bindings/arm/mediatek.txt @@ -1,12 +1,12 @@ -MediaTek mt65xx, mt67xx & mt81xx Platforms Device Tree Bindings +MediaTek SoC based Platforms Device Tree Bindings -Boards with a MediaTek mt65xx/mt67xx/mt81xx SoC shall have the -following property: +Boards with a MediaTek SoC shall have the following property: Required root node property: compatible: Must contain one of "mediatek,mt2701" + "mediatek,mt2712" "mediatek,mt6580" "mediatek,mt6589" "mediatek,mt6592" @@ -14,7 +14,8 @@ compatible: Must contain one of "mediatek,mt6795" "mediatek,mt6797" "mediatek,mt7622" - "mediatek,mt7623" + "mediatek,mt7623" which is referred to MT7623N SoC + "mediatek,mt7623a" "mediatek,mt8127" "mediatek,mt8135" "mediatek,mt8173" @@ -25,6 +26,9 @@ Supported boards: - Evaluation board for MT2701: Required root node properties: - compatible = "mediatek,mt2701-evb", "mediatek,mt2701"; +- Evaluation board for MT2712: + Required root node properties: + - compatible = "mediatek,mt2712-evb", "mediatek,mt2712"; - Evaluation board for MT6580: Required root node properties: - compatible = "mediatek,mt6580-evbp1", "mediatek,mt6580"; @@ -46,9 +50,11 @@ Supported boards: - Reference board variant 1 for MT7622: Required root node properties: - compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; -- Evaluation board for MT7623: +- Reference board for MT7623n with NAND: Required root node properties: - - compatible = "mediatek,mt7623-evb", "mediatek,mt7623"; + - compatible = "mediatek,mt7623n-rfb-nand", "mediatek,mt7623"; +- Bananapi BPI-R2 board: + - compatible = "bananapi,bpi-r2", "mediatek,mt7623"; - MTK mt8127 tablet moose EVB: Required root node properties: - compatible = "mediatek,mt8127-moose", "mediatek,mt8127"; diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index 8219b2c6bb29..2ecc712bf707 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -80,6 +80,9 @@ SoCs: - OMAP5432 compatible = "ti,omap5432", "ti,omap5" +- DRA762 + compatible = "ti,dra762", "ti,dra7" + - DRA742 compatible = "ti,dra742", "ti,dra74", "ti,dra7" @@ -154,6 +157,9 @@ Boards: - AM335X phyCORE-AM335x: Development kit compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx" +- AM335X UC-8100-ME-T: Communication-centric industrial computing platform + compatible = "moxa,uc-8100-me-t", "ti,am33xx"; + - OMAP5 EVM : Evaluation Module compatible = "ti,omap5-evm", "ti,omap5" @@ -184,6 +190,9 @@ Boards: - AM5718 IDK compatible = "ti,am5718-idk", "ti,am5718", "ti,dra7" +- DRA762 EVM: Software Development Board for DRA762 + compatible = "ti,dra76-evm", "ti,dra762", "ti,dra7" + - DRA742 EVM: Software Development Board for DRA742 compatible = "ti,dra7-evm", "ti,dra742", "ti,dra74", "ti,dra7" diff --git a/Documentation/devicetree/bindings/arm/qcom.txt b/Documentation/devicetree/bindings/arm/qcom.txt index 028d16e72186..0ed4d39d7fe1 100644 --- a/Documentation/devicetree/bindings/arm/qcom.txt +++ b/Documentation/devicetree/bindings/arm/qcom.txt @@ -25,6 +25,7 @@ The 'SoC' element must be one of the following strings: msm8994 msm8996 mdm9615 + ipq8074 The 'board' element must be one of the following strings: @@ -33,6 +34,7 @@ The 'board' element must be one of the following strings: dragonboard mtp sbc + hk01 The 'soc_version' and 'board_version' elements take the form of v. where the minor number may be omitted when it's zero, i.e. v1.0 is the same diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt index 11c0ac4a2d56..b003148e2945 100644 --- a/Documentation/devicetree/bindings/arm/rockchip.txt +++ b/Documentation/devicetree/bindings/arm/rockchip.txt @@ -134,6 +134,10 @@ Rockchip platforms device tree bindings Required root node properties: - compatible = "phytec,rk3288-pcm-947", "phytec,rk3288-phycore-som", "rockchip,rk3288"; +- Pine64 Rock64 board: + Required root node properties: + - compatible = "pine64,rock64", "rockchip,rk3328"; + - Rockchip PX3 Evaluation board: Required root node properties: - compatible = "rockchip,px3-evb", "rockchip,px3", "rockchip,rk3188"; @@ -173,6 +177,14 @@ Rockchip platforms device tree bindings Required root node properties: - compatible = "rockchip,rk3399-evb", "rockchip,rk3399"; +- Rockchip RK3399 Sapphire Excavator board: + Required root node properties: + - compatible = "rockchip,rk3399-sapphire-excavator", "rockchip,rk3399"; + +- Theobroma Systems RK3399-Q7 Haikou Baseboard: + Required root node properties: + - compatible = "tsd,rk3399-q7-haikou", "rockchip,rk3399"; + - Tronsmart Orion R68 Meta Required root node properties: - compatible = "tronsmart,orion-r68-meta", "rockchip,rk3368"; diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt index 1a671e329864..ae75cb3b1331 100644 --- a/Documentation/devicetree/bindings/arm/shmobile.txt +++ b/Documentation/devicetree/bindings/arm/shmobile.txt @@ -39,6 +39,8 @@ SoCs: compatible = "renesas,r8a7795" - R-Car M3-W (R8A77960) compatible = "renesas,r8a7796" + - R-Car D3 (R8A77995) + compatible = "renesas,r8a77995" Boards: @@ -53,6 +55,8 @@ Boards: compatible = "renesas,blanche", "renesas,r8a7792" - BOCK-W compatible = "renesas,bockw", "renesas,r8a7778" + - Draak (RTP0RC77995SEB0010S) + compatible = "renesas,draak", "renesas,r8a77995" - Genmai (RTK772100BC00000BR) compatible = "renesas,genmai", "renesas,r7s72100" - GR-Peach (X28A-M01-E/F) @@ -64,6 +68,10 @@ Boards: compatible = "renesas,h3ulcb", "renesas,r8a7795"; - Henninger compatible = "renesas,henninger", "renesas,r8a7791" + - iWave Systems RZ/G1E SODIMM SOM Development Platform (iW-RainboW-G22D) + compatible = "iwave,g22d", "iwave,g22m", "renesas,r8a7745" + - iWave Systems RZ/G1E SODIMM System On Module (iW-RainboW-G22M-SM) + compatible = "iwave,g22m", "renesas,r8a7745" - iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven) compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7743" - iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven) diff --git a/Documentation/devicetree/bindings/ata/ahci-mtk.txt b/Documentation/devicetree/bindings/ata/ahci-mtk.txt new file mode 100644 index 000000000000..d2aa696b161b --- /dev/null +++ b/Documentation/devicetree/bindings/ata/ahci-mtk.txt @@ -0,0 +1,51 @@ +MediaTek Serial ATA controller + +Required properties: + - compatible : Must be "mediatek,-ahci", "mediatek,mtk-ahci". + When using "mediatek,mtk-ahci" compatible strings, you + need SoC specific ones in addition, one of: + - "mediatek,mt7622-ahci" + - reg : Physical base addresses and length of register sets. + - interrupts : Interrupt associated with the SATA device. + - interrupt-names : Associated name must be: "hostc". + - clocks : A list of phandle and clock specifier pairs, one for each + entry in clock-names. + - clock-names : Associated names must be: "ahb", "axi", "asic", "rbc", "pm". + - phys : A phandle and PHY specifier pair for the PHY port. + - phy-names : Associated name must be: "sata-phy". + - ports-implemented : See ./ahci-platform.txt for details. + +Optional properties: + - power-domains : A phandle and power domain specifier pair to the power + domain which is responsible for collapsing and restoring + power to the peripheral. + - resets : Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. + - reset-names : Associated names must be: "axi", "sw", "reg". + - mediatek,phy-mode : A phandle to the system controller, used to enable + SATA function. + +Example: + + sata: sata@1a200000 { + compatible = "mediatek,mt7622-ahci", + "mediatek,mtk-ahci"; + reg = <0 0x1a200000 0 0x1100>; + interrupts = ; + interrupt-names = "hostc"; + clocks = <&pciesys CLK_SATA_AHB_EN>, + <&pciesys CLK_SATA_AXI_EN>, + <&pciesys CLK_SATA_ASIC_EN>, + <&pciesys CLK_SATA_RBC_EN>, + <&pciesys CLK_SATA_PM_EN>; + clock-names = "ahb", "axi", "asic", "rbc", "pm"; + phys = <&u3port1 PHY_TYPE_SATA>; + phy-names = "sata-phy"; + ports-implemented = <0x1>; + power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>; + resets = <&pciesys MT7622_SATA_AXI_BUS_RST>, + <&pciesys MT7622_SATA_PHY_SW_RST>, + <&pciesys MT7622_SATA_PHY_REG_RST>; + reset-names = "axi", "sw", "reg"; + mediatek,phy-mode = <&pciesys>; + }; diff --git a/Documentation/devicetree/bindings/ata/apm-xgene.txt b/Documentation/devicetree/bindings/ata/apm-xgene.txt index a668f0e7d001..02e690a675db 100644 --- a/Documentation/devicetree/bindings/ata/apm-xgene.txt +++ b/Documentation/devicetree/bindings/ata/apm-xgene.txt @@ -57,7 +57,6 @@ Example: <0x0 0x1f227000 0x0 0x1000>; interrupts = <0x0 0x87 0x4>; dma-coherent; - status = "ok"; clocks = <&sataclk 0>; phys = <&phy2 0>; phy-names = "sata-phy"; @@ -72,7 +71,6 @@ Example: <0x0 0x1f237000 0x0 0x1000>; interrupts = <0x0 0x88 0x4>; dma-coherent; - status = "ok"; clocks = <&sataclk 0>; phys = <&phy3 0>; phy-names = "sata-phy"; diff --git a/Documentation/devicetree/bindings/ata/imx-pata.txt b/Documentation/devicetree/bindings/ata/imx-pata.txt index e38d73414b0d..f1172f00188a 100644 --- a/Documentation/devicetree/bindings/ata/imx-pata.txt +++ b/Documentation/devicetree/bindings/ata/imx-pata.txt @@ -13,5 +13,4 @@ Example: reg = <0x83fe0000 0x4000>; interrupts = <70>; clocks = <&clks 161>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/bus/mvebu-mbus.txt b/Documentation/devicetree/bindings/bus/mvebu-mbus.txt index fa6cde41b460..f2ab7fd013bd 100644 --- a/Documentation/devicetree/bindings/bus/mvebu-mbus.txt +++ b/Documentation/devicetree/bindings/bus/mvebu-mbus.txt @@ -227,7 +227,6 @@ See the example below, where a more complete device tree is shown: }; devbus-bootcs { - status = "okay"; ranges = <0 MBUS_ID(0x01, 0x2f) 0 0x8000000>; /* NOR */ @@ -240,7 +239,6 @@ See the example below, where a more complete device tree is shown: pcie-controller { compatible = "marvell,armada-xp-pcie"; - status = "okay"; device_type = "pci"; #address-cells = <3>; @@ -258,7 +256,6 @@ See the example below, where a more complete device tree is shown: pcie@1,0 { /* Port 0, Lane 0 */ - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt b/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt index 83b0e54f727c..3e21eb822811 100644 --- a/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt +++ b/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt @@ -84,7 +84,6 @@ gmi@70090000 { reset-names = "gmi"; ranges = <4 0 0xd0000000 0xfffffff>; - status = "okay"; bus@4,0 { compatible = "simple-bus"; @@ -121,7 +120,6 @@ gmi@70090000 { reset-names = "gmi"; ranges = <4 0 0xd0000000 0xfffffff>; - status = "okay"; can@4,0 { reg = <4 0 0x100>; diff --git a/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt index 7ff13be1750b..3108d03802ee 100644 --- a/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt +++ b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt @@ -33,7 +33,6 @@ Example: #size-cells = <1>; ranges = <0x702c0000 0x0 0x702c0000 0x00040000>; - status = "disabled"; child1 { ... diff --git a/Documentation/devicetree/bindings/chosen.txt b/Documentation/devicetree/bindings/chosen.txt index dee3f5d9df26..e3b13ea7d2ae 100644 --- a/Documentation/devicetree/bindings/chosen.txt +++ b/Documentation/devicetree/bindings/chosen.txt @@ -5,9 +5,31 @@ The chosen node does not represent a real device, but serves as a place for passing data between firmware and the operating system, like boot arguments. Data in the chosen node does not represent the hardware. +The following properties are recognized: -stdout-path property --------------------- + +kaslr-seed +----------- + +This property is used when booting with CONFIG_RANDOMIZE_BASE as the +entropy used to randomize the kernel image base address location. Since +it is used directly, this value is intended only for KASLR, and should +not be used for other purposes (as it may leak information about KASLR +offsets). It is parsed as a u64 value, e.g. + +/ { + chosen { + kaslr-seed = <0xfeedbeef 0xc0def00d>; + }; +}; + +Note that if this property is set from UEFI (or a bootloader in EFI +mode) when EFI_RNG_PROTOCOL is supported, it will be overwritten by +the Linux EFI stub (which will populate the property itself, using +EFI_RNG_PROTOCOL). + +stdout-path +----------- Device trees may specify the device to be used for boot console output with a stdout-path property under /chosen, as described in the Devicetree diff --git a/Documentation/devicetree/bindings/clock/alphascale,acc.txt b/Documentation/devicetree/bindings/clock/alphascale,acc.txt index 62e67e883e76..b3205b21c9d0 100644 --- a/Documentation/devicetree/bindings/clock/alphascale,acc.txt +++ b/Documentation/devicetree/bindings/clock/alphascale,acc.txt @@ -102,7 +102,6 @@ uart4: serial@80010000 { reg = <0x80010000 0x4000>; clocks = <&acc CLKID_SYS_UART4>, <&acc CLKID_AHB_UART4>; interrupts = <19>; - status = "disabled"; }; Clock consumer with only one, _AHB_ sink. diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt index a55d31b48d6e..786dc39ca904 100644 --- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt +++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt @@ -5,9 +5,11 @@ controllers within the Always-On part of the SoC. Required Properties: -- compatible: should be "amlogic,gxbb-aoclkc" -- reg: physical base address of the clock controller and length of memory - mapped region. +- compatible: value should be different for each SoC family as : + - GXBB (S905) : "amlogic,meson-gxbb-aoclkc" + - GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc" + - GXM (S912) : "amlogic,meson-gxm-aoclkc" + followed by the common "amlogic,meson-gx-aoclkc" - #clock-cells: should be 1. @@ -23,14 +25,22 @@ to specify the reset which they consume. All available resets are defined as preprocessor macros in the dt-bindings/reset/gxbb-aoclkc.h header and can be used in device tree sources. +Parent node should have the following properties : +- compatible: "amlogic,meson-gx-ao-sysctrl", "syscon", "simple-mfd" +- reg: base address and size of the AO system control register space. + Example: AO Clock controller node: - clkc_AO: clock-controller@040 { - compatible = "amlogic,gxbb-aoclkc"; - reg = <0x0 0x040 0x0 0x4>; +ao_sysctrl: sys-ctrl@0 { + compatible = "amlogic,meson-gx-ao-sysctrl", "syscon", "simple-mfd"; + reg = <0x0 0x0 0x0 0x100>; + + clkc_AO: clock-controller { + compatible = "amlogic,meson-gxbb-aoclkc", "amlogic,meson-gx-aoclkc"; #clock-cells = <1>; #reset-cells = <1>; }; +}; Example: UART controller node that consumes the clock and reset generated by the clock controller: @@ -41,5 +51,4 @@ Example: UART controller node that consumes the clock and reset generated interrupts = <0 90 1>; clocks = <&clkc_AO CLKID_AO_UART1>; resets = <&clkc_AO RESET_AO_UART1>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt index a09d627b5508..924040769186 100644 --- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt +++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt @@ -33,5 +33,4 @@ Example: UART controller node that consumes the clock generated by the clock reg = <0xc81004c0 0x14>; interrupts = <0 90 1>; clocks = <&clkc CLKID_CLK81>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt index 606da38c0959..b455c5aa9139 100644 --- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt +++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt @@ -16,18 +16,25 @@ Required Properties: mapped region. - #clock-cells: should be 1. +- #reset-cells: should be 1. Each clock is assigned an identifier and client nodes can use this identifier to specify the clock which they consume. All available clocks are defined as preprocessor macros in the dt-bindings/clock/meson8b-clkc.h header and can be used in device tree sources. +Similarly a preprocessor macro for each reset line is defined in +dt-bindings/reset/amlogic,meson8b-clkc-reset.h (which can be used from the +device tree sources). + + Example: Clock controller node: clkc: clock-controller@c1104000 { - #clock-cells = <1>; compatible = "amlogic,meson8b-clkc"; reg = <0xc1108000 0x4>, <0xc1104000 0x460>; + #clock-cells = <1>; + #reset-cells = <1>; }; @@ -39,5 +46,4 @@ Example: UART controller node that consumes the clock generated by the clock reg = <0xc81004c0 0x14>; interrupts = <0 90 1>; clocks = <&clkc CLKID_CLK81>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt index 5f3ad65daf69..51c259a92d02 100644 --- a/Documentation/devicetree/bindings/clock/at91-clock.txt +++ b/Documentation/devicetree/bindings/clock/at91-clock.txt @@ -81,6 +81,16 @@ Required properties: "atmel,sama5d2-clk-generated": at91 generated clock + "atmel,sama5d2-clk-audio-pll-frac": + at91 audio fractional pll + + "atmel,sama5d2-clk-audio-pll-pad": + at91 audio pll CLK_AUDIO output pin + + "atmel,sama5d2-clk-audio-pll-pmc" + at91 audio pll output on AUDIOPLLCLK that feeds the PMC + and can be used by peripheral clock or generic clock + Required properties for SCKC node: - reg : defines the IO memory reserved for the SCKC. - #size-cells : shall be 0 (reg is used to encode clk id). diff --git a/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt b/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt index 5286e260fcae..8e5a7d868557 100644 --- a/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt +++ b/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt @@ -46,7 +46,6 @@ Device tree example: uart@3e002000 { compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart"; - status = "disabled"; reg = <0x3e002000 0x1000>; clocks = <&slave_ccu BCM281XX_SLAVE_CCU_UARTB3>; interrupts = ; diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt index 1dc80f8811fe..fe885abc9cb4 100644 --- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt @@ -465,5 +465,4 @@ Example 3: UART controller node that consumes the clock generated by the clock clock-names = "uart", "clk_uart_baud0"; pinctrl-names = "default"; pinctrl-0 = <&uart0_bus>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/hi3660-clock.txt b/Documentation/devicetree/bindings/clock/hi3660-clock.txt index cc9b86c35758..0035a7ecaf20 100644 --- a/Documentation/devicetree/bindings/clock/hi3660-clock.txt +++ b/Documentation/devicetree/bindings/clock/hi3660-clock.txt @@ -38,5 +38,4 @@ Examples: clocks = <&crg_ctrl HI3660_CLK_MUX_UART0>, <&crg_ctrl HI3660_PCLK>; clock-names = "uartclk", "apb_pclk"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/hix5hd2-clock.txt b/Documentation/devicetree/bindings/clock/hix5hd2-clock.txt index 7894a64887cb..4733e58e491b 100644 --- a/Documentation/devicetree/bindings/clock/hix5hd2-clock.txt +++ b/Documentation/devicetree/bindings/clock/hix5hd2-clock.txt @@ -27,5 +27,4 @@ Examples: interrupts = <0 49 4>; clocks = <&clock HIX5HD2_FIXED_83M>; clock-names = "apb_pclk"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt index 53d7e50ed875..05a245c9df08 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt @@ -1,24 +1,32 @@ -Binding for IDT VersaClock5 programmable i2c clock generator. +Binding for IDT VersaClock 5,6 programmable i2c clock generators. -The IDT VersaClock5 are programmable i2c clock generators providing -from 3 to 12 output clocks. +The IDT VersaClock 5 and VersaClock 6 are programmable i2c clock +generators providing from 3 to 12 output clocks. ==I2C device node== Required properties: -- compatible: shall be one of "idt,5p49v5923" , "idt,5p49v5933" , - "idt,5p49v5935". +- compatible: shall be one of + "idt,5p49v5923" + "idt,5p49v5925" + "idt,5p49v5933" + "idt,5p49v5935" + "idt,5p49v6901" - reg: i2c device address, shall be 0x68 or 0x6a. - #clock-cells: from common clock binding; shall be set to 1. - clocks: from common clock binding; list of parent clock handles, - - 5p49v5923: (required) either or both of XTAL or CLKIN + - 5p49v5923 and + 5p49v5925 and + 5p49v6901: (required) either or both of XTAL or CLKIN reference clock. - 5p49v5933 and - 5p49v5935: (optional) property not present (internal Xtal used) or CLKIN reference clock. - clock-names: from common clock binding; clock input names, can be - - 5p49v5923: (required) either or both of "xin", "clkin". + - 5p49v5923 and + 5p49v5925 and + 5p49v6901: (required) either or both of "xin", "clkin". - 5p49v5933 and - 5p49v5935: (optional) property not present or "clkin". @@ -37,6 +45,7 @@ clock specifier, the following mapping applies: 1 -- OUT1 2 -- OUT4 +5P49V5925 and 5P49V5935: 0 -- OUT0_SEL_I2CB 1 -- OUT1 @@ -44,6 +53,13 @@ clock specifier, the following mapping applies: 3 -- OUT3 4 -- OUT4 +5P49V6901: + 0 -- OUT0_SEL_I2CB + 1 -- OUT1 + 2 -- OUT2 + 3 -- OUT3 + 4 -- OUT4 + ==Example== /* 25MHz reference crystal */ diff --git a/Documentation/devicetree/bindings/clock/imx21-clock.txt b/Documentation/devicetree/bindings/clock/imx21-clock.txt index c3b0db437c48..806f63d628bd 100644 --- a/Documentation/devicetree/bindings/clock/imx21-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx21-clock.txt @@ -24,5 +24,4 @@ Examples: clocks = <&clks IMX21_CLK_UART1_IPG_GATE>, <&clks IMX21_CLK_PER1>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx23-clock.txt b/Documentation/devicetree/bindings/clock/imx23-clock.txt index 5083c0b834b2..8385348d3bd9 100644 --- a/Documentation/devicetree/bindings/clock/imx23-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx23-clock.txt @@ -67,5 +67,4 @@ auart0: serial@8006c000 { reg = <0x8006c000 0x2000>; interrupts = <24 25 23>; clocks = <&clks 32>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx25-clock.txt b/Documentation/devicetree/bindings/clock/imx25-clock.txt index ba6b312ff8a5..f8135ea9ca4e 100644 --- a/Documentation/devicetree/bindings/clock/imx25-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx25-clock.txt @@ -157,5 +157,4 @@ uart1: serial@43f90000 { interrupts = <45>; clocks = <&clks 79>, <&clks 50>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.txt b/Documentation/devicetree/bindings/clock/imx27-clock.txt index cc05de9ec393..4c95c048d3b2 100644 --- a/Documentation/devicetree/bindings/clock/imx27-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx27-clock.txt @@ -24,5 +24,4 @@ Examples: clocks = <&clks IMX27_CLK_UART1_IPG_GATE>, <&clks IMX27_CLK_PER1_GATE>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx28-clock.txt b/Documentation/devicetree/bindings/clock/imx28-clock.txt index e6587af62ff0..d84a37d2885f 100644 --- a/Documentation/devicetree/bindings/clock/imx28-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx28-clock.txt @@ -90,5 +90,4 @@ auart0: serial@8006a000 { reg = <0x8006a000 0x2000>; interrupts = <112 70 71>; clocks = <&clks 45>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt index 8163d565f697..0a291090e562 100644 --- a/Documentation/devicetree/bindings/clock/imx31-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt @@ -87,5 +87,4 @@ uart1: serial@43f90000 { interrupts = <45>; clocks = <&clks 10>, <&clks 30>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.txt b/Documentation/devicetree/bindings/clock/imx5-clock.txt index cadc4d29ada6..a24ca9e582d2 100644 --- a/Documentation/devicetree/bindings/clock/imx5-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx5-clock.txt @@ -25,5 +25,4 @@ can1: can@53fc8000 { interrupts = <82>; clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, <&clks IMX5_CLK_CAN1_SERIAL_GATE>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt index 9252912a5b0e..aa0a4d423ef5 100644 --- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx6q-clock.txt @@ -27,5 +27,4 @@ uart1: serial@02020000 { interrupts = <0 26 0x04>; clocks = <&clks IMX6QDL_CLK_UART_IPG>, <&clks IMX6QDL_CLK_UART_SERIAL>; clock-names = "ipg", "per"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt deleted file mode 100644 index 52b457c23eed..000000000000 --- a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt +++ /dev/null @@ -1,83 +0,0 @@ -Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC - -Required properties: -- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names. -- clock-names: Should contain the following: - "cpu" - The multiplexer for clock input of CPU cluster. - "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock - source (usually MAINPLL) when the original CPU PLL is under - transition and not stable yet. - Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for - generic clock consumer properties. -- proc-supply: Regulator for Vproc of CPU cluster. - -Optional properties: -- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver - needs to do "voltage tracking" to step by step scale up/down Vproc and - Vsram to fit SoC specific needs. When absent, the voltage scaling - flow is handled by hardware, hence no software "voltage tracking" is - needed. - -Example: --------- - cpu0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x000>; - enable-method = "psci"; - cpu-idle-states = <&CPU_SLEEP_0>; - clocks = <&infracfg CLK_INFRA_CA53SEL>, - <&apmixedsys CLK_APMIXED_MAINPLL>; - clock-names = "cpu", "intermediate"; - }; - - cpu1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x001>; - enable-method = "psci"; - cpu-idle-states = <&CPU_SLEEP_0>; - clocks = <&infracfg CLK_INFRA_CA53SEL>, - <&apmixedsys CLK_APMIXED_MAINPLL>; - clock-names = "cpu", "intermediate"; - }; - - cpu2: cpu@100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x100>; - enable-method = "psci"; - cpu-idle-states = <&CPU_SLEEP_0>; - clocks = <&infracfg CLK_INFRA_CA57SEL>, - <&apmixedsys CLK_APMIXED_MAINPLL>; - clock-names = "cpu", "intermediate"; - }; - - cpu3: cpu@101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x101>; - enable-method = "psci"; - cpu-idle-states = <&CPU_SLEEP_0>; - clocks = <&infracfg CLK_INFRA_CA57SEL>, - <&apmixedsys CLK_APMIXED_MAINPLL>; - clock-names = "cpu", "intermediate"; - }; - - &cpu0 { - proc-supply = <&mt6397_vpca15_reg>; - }; - - &cpu1 { - proc-supply = <&mt6397_vpca15_reg>; - }; - - &cpu2 { - proc-supply = <&da9211_vcpu_reg>; - sram-supply = <&mt6397_vsramca7_reg>; - }; - - &cpu3 { - proc-supply = <&da9211_vcpu_reg>; - sram-supply = <&mt6397_vsramca7_reg>; - }; diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt index 63f9d8277d48..dff236f524a7 100644 --- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt +++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt @@ -66,7 +66,6 @@ clock@70110000 { #clock-cells = <0>; clock-output-names = "dfllCPU_out"; vdd-cpu-supply = <&vdd_cpu>; - status = "okay"; nvidia,sample-rate = <12500>; nvidia,droop-ctrl = <0x00000f00>; diff --git a/Documentation/devicetree/bindings/clock/pxa-clock.txt b/Documentation/devicetree/bindings/clock/pxa-clock.txt index 4b4a9024bd99..8f67239411fe 100644 --- a/Documentation/devicetree/bindings/clock/pxa-clock.txt +++ b/Documentation/devicetree/bindings/clock/pxa-clock.txt @@ -12,5 +12,4 @@ Examples: pxa2xx_clks: pxa2xx_clks@41300004 { compatible = "marvell,pxa-clocks"; #clock-cells = <1>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt index 0cd894f987a3..316e13686568 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt @@ -22,6 +22,7 @@ Required Properties: - "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2) - "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3) - "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W) + - "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3) - reg: Base address and length of the memory resource used by the CPG/MSSR block @@ -30,7 +31,7 @@ Required Properties: clock-names - clock-names: List of external parent clock names. Valid names are: - "extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794, - r8a7795, r8a7796) + r8a7795, r8a7796, r8a77995) - "extalr" (r8a7795, r8a7796) - "usb_extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7793, r8a7794) @@ -81,5 +82,4 @@ Examples dma-names = "tx", "rx"; power-domains = <&cpg>; resets = <&cpg 310>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt index e4cdaf1cb333..7cc4c0330b53 100644 --- a/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt @@ -44,5 +44,4 @@ Examples interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp3_clks R8A7778_CLK_SDHI0>; power-domains = <&cpg_clocks>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt new file mode 100644 index 000000000000..e96e085271c1 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt @@ -0,0 +1,55 @@ +* Renesas R-Car USB 2.0 clock selector + +This file provides information on what the device node for the R-Car USB 2.0 +clock selector. + +If you connect an external clock to the USB_EXTAL pin only, you should set +the clock rate to "usb_extal" node only. +If you connect an oscillator to both the USB_XTAL and USB_EXTAL, this module +is not needed because this is default setting. (Of course, you can set the +clock rates to both "usb_extal" and "usb_xtal" nodes. + +Case 1: An external clock connects to R-Car SoC + +----------+ +--- R-Car ---------------------+ + |External |---|USB_EXTAL ---> all usb channels| + |clock | |USB_XTAL | + +----------+ +-------------------------------+ +In this case, we need this driver with "usb_extal" clock. + +Case 2: An oscillator connects to R-Car SoC + +----------+ +--- R-Car ---------------------+ + |Oscillator|---|USB_EXTAL -+-> all usb channels| + | |---|USB_XTAL --+ | + +----------+ +-------------------------------+ +In this case, we don't need this selector. + +Required properties: +- compatible: "renesas,r8a7795-rcar-usb2-clock-sel" if the device is a part of + an R8A7795 SoC. + "renesas,r8a7796-rcar-usb2-clock-sel" if the device if a part of + an R8A7796 SoC. + "renesas,rcar-gen3-usb2-clock-sel" for a generic R-Car Gen3 + compatible device. + + When compatible with the generic version, nodes must list the + SoC-specific version corresponding to the platform first + followed by the generic version. + +- reg: offset and length of the USB 2.0 clock selector register block. +- clocks: A list of phandles and specifier pairs. +- clock-names: Name of the clocks. + - The functional clock must be "ehci_ohci" + - The USB_EXTAL clock pin must be "usb_extal" + - The USB_XTAL clock pin must be "usb_xtal" +- #clock-cells: Must be 0 + +Example (R-Car H3): + + usb2_clksel: clock-controller@e6590630 { + compatible = "renesas,r8a77950-rcar-usb2-clock-sel", + "renesas,rcar-gen3-usb2-clock-sel"; + reg = <0 0xe6590630 0 0x02>; + clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>; + clock-names = "ehci_ohci", "usb_extal", "usb_xtal"; + #clock-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt index bb51a33a1fbf..bb5d942075fb 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt @@ -50,5 +50,4 @@ Examples clocks = <&mstp3_clks R7S72100_CLK_MTU2>; clock-names = "fck"; power-domains = <&cpg_clocks>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt index 455a9a00a623..6f8744fd301b 100644 --- a/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt @@ -1,12 +1,14 @@ -* Rockchip RK3128 Clock and Reset Unit +* Rockchip RK3126/RK3128 Clock and Reset Unit -The RK3128 clock controller generates and supplies clock to various +The RK3126/RK3128 clock controller generates and supplies clock to various controllers within the SoC and also implements a reset controller for SoC peripherals. Required Properties: -- compatible: should be "rockchip,rk3128-cru" +- compatible: should be "rockchip,rk3126-cru" or "rockchip,rk3128-cru" + "rockchip,rk3126-cru" - controller compatible with RK3126 SoC. + "rockchip,rk3128-cru" - controller compatible with RK3128 SoC. - reg: physical base address of the controller and length of memory mapped region. - #clock-cells: should be 1. diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c2410-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c2410-clock.txt index 822505e715ae..2632d3f13004 100644 --- a/Documentation/devicetree/bindings/clock/samsung,s3c2410-clock.txt +++ b/Documentation/devicetree/bindings/clock/samsung,s3c2410-clock.txt @@ -46,5 +46,4 @@ Example: UART controller node that consumes the clock generated by the clock interrupts = <1 23 3 4>, <1 23 4 4>; clock-names = "uart", "clk_uart_baud2"; clocks = <&clocks PCLK_UART0>, <&clocks PCLK_UART0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c2412-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c2412-clock.txt index 2b430960ba47..21a8c23e658f 100644 --- a/Documentation/devicetree/bindings/clock/samsung,s3c2412-clock.txt +++ b/Documentation/devicetree/bindings/clock/samsung,s3c2412-clock.txt @@ -46,5 +46,4 @@ Example: UART controller node that consumes the clock generated by the clock clock-names = "uart", "clk_uart_baud2", "clk_uart_baud3"; clocks = <&clocks PCLK_UART0>, <&clocks PCLK_UART0>, <&clocks SCLK_UART>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c2443-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c2443-clock.txt index e67bb05478af..985c0f574e9a 100644 --- a/Documentation/devicetree/bindings/clock/samsung,s3c2443-clock.txt +++ b/Documentation/devicetree/bindings/clock/samsung,s3c2443-clock.txt @@ -52,5 +52,4 @@ Example: UART controller node that consumes the clock generated by the clock "clk_uart_baud3"; clocks = <&clocks PCLK_UART0>, <&clocks PCLK_UART0>, <&clocks SCLK_UART>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt index fa171dc4bd3c..872ee8e0f041 100644 --- a/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt +++ b/Documentation/devicetree/bindings/clock/samsung,s3c64xx-clock.txt @@ -73,5 +73,4 @@ Example: UART controller node that consumes the clock generated by the clock "clk_uart_baud3"; clocks = <&clock PCLK_UART0>, <&clocks PCLK_UART0>, <&clock SCLK_UART>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt index effd9401c133..15b48e20a061 100644 --- a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt +++ b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt @@ -74,5 +74,4 @@ Example: UART controller node that consumes the clock generated by the clock "clk_uart_baud1"; clocks = <&clocks UART0>, <&clocks UART0>, <&clocks SCLK_UART0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/silabs,si5351.txt b/Documentation/devicetree/bindings/clock/silabs,si5351.txt index 28b28309f535..a6c4ef343b44 100644 --- a/Documentation/devicetree/bindings/clock/silabs,si5351.txt +++ b/Documentation/devicetree/bindings/clock/silabs,si5351.txt @@ -12,7 +12,11 @@ generators can be found in [1]. ==I2C device node== Required properties: -- compatible: shall be one of "silabs,si5351{a,a-msop,b,c}". +- compatible: shall be one of the following: + "silabs,si5351a" - Si5351a, QFN20 package + "silabs,si5351a-msop" - Si5351a, MSOP10 package + "silabs,si5351b" - Si5351b, QFN20 package + "silabs,si5351c" - Si5351c, QFN20 package - reg: i2c device address, shall be 0x60 or 0x61. - #clock-cells: from common clock binding; shall be set to 1. - clocks: from common clock binding; list of parent clock diff --git a/Documentation/devicetree/bindings/clock/snps,hsdk-pll-clock.txt b/Documentation/devicetree/bindings/clock/snps,hsdk-pll-clock.txt new file mode 100644 index 000000000000..c56c7553c730 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/snps,hsdk-pll-clock.txt @@ -0,0 +1,28 @@ +Binding for the HSDK Generic PLL clock + +This binding uses the common clock binding[1]. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt + +Required properties: +- compatible: should be "snps,hsdk--pll-clock" + "snps,hsdk-core-pll-clock" + "snps,hsdk-gp-pll-clock" + "snps,hsdk-hdmi-pll-clock" +- reg : should contain base register location and length. +- clocks: shall be the input parent clock phandle for the PLL. +- #clock-cells: from common clock binding; Should always be set to 0. + +Example: + input_clk: input-clk { + clock-frequency = <33333333>; + compatible = "fixed-clock"; + #clock-cells = <0>; + }; + + cpu_clk: cpu-clk@0 { + compatible = "snps,hsdk-core-pll-clock"; + reg = <0x00 0x10>; + #clock-cells = <0>; + clocks = <&input_clk>; + }; diff --git a/Documentation/devicetree/bindings/clock/snps,pll-clock.txt b/Documentation/devicetree/bindings/clock/snps,pll-clock.txt new file mode 100644 index 000000000000..11fe4876612c --- /dev/null +++ b/Documentation/devicetree/bindings/clock/snps,pll-clock.txt @@ -0,0 +1,28 @@ +Binding for the AXS10X Generic PLL clock + +This binding uses the common clock binding[1]. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt + +Required properties: +- compatible: should be "snps,axs10x--pll-clock" + "snps,axs10x-arc-pll-clock" + "snps,axs10x-pgu-pll-clock" +- reg: should always contain 2 pairs address - length: first for PLL config +registers and second for corresponding LOCK CGU register. +- clocks: shall be the input parent clock phandle for the PLL. +- #clock-cells: from common clock binding; Should always be set to 0. + +Example: + input-clk: input-clk { + clock-frequency = <33333333>; + compatible = "fixed-clock"; + #clock-cells = <0>; + }; + + core-clk: core-clk@80 { + compatible = "snps,axs10x-arc-pll-clock"; + reg = <0x80 0x10>, <0x100 0x10>; + #clock-cells = <0>; + clocks = <&input-clk>; + }; diff --git a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt new file mode 100644 index 000000000000..cac24ee10b72 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt @@ -0,0 +1,71 @@ +STMicroelectronics STM32H7 Reset and Clock Controller +===================================================== + +The RCC IP is both a reset and a clock controller. + +Please refer to clock-bindings.txt for common clock controller binding usage. +Please also refer to reset.txt for common reset controller binding usage. + +Required properties: +- compatible: Should be: + "st,stm32h743-rcc" + +- reg: should be register base and length as documented in the + datasheet + +- #reset-cells: 1, see below + +- #clock-cells : from common clock binding; shall be set to 1 + +- clocks: External oscillator clock phandle + - high speed external clock signal (HSE) + - low speed external clock signal (LSE) + - external I2S clock (I2S_CKIN) + +Optional properties: +- st,syscfg: phandle for pwrcfg, mandatory to disable/enable backup domain + write protection (RTC clock). + +Example: + + rcc: reset-clock-controller@58024400 { + compatible = "st,stm32h743-rcc", "st,stm32-rcc"; + reg = <0x58024400 0x400>; + #reset-cells = <1>; + #clock-cells = <1>; + clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>; + + st,syscfg = <&pwrcfg>; +}; + +The peripheral clock consumer should specify the desired clock by +having the clock ID in its "clocks" phandle cell. + +Example: + + timer5: timer@40000c00 { + compatible = "st,stm32-timer"; + reg = <0x40000c00 0x400>; + interrupts = <50>; + clocks = <&rcc TIM5_CK>; + }; + +Specifying softreset control of devices +======================================= + +Device nodes should specify the reset channel required in their "resets" +property, containing a phandle to the reset device node and an index specifying +which channel to use. +The index is the bit number within the RCC registers bank, starting from RCC +base address. +It is calculated as: index = register_offset / 4 * 32 + bit_offset. +Where bit_offset is the bit offset within the register. + +For example, for CRC reset: + crc = AHB4RSTR_offset / 4 * 32 + CRCRST_bit_offset = 0x88 / 4 * 32 + 19 = 1107 + +Example: + + timer2 { + resets = <&rcc STM32H7_APB1L_RESET(TIM2)>; + }; diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index df9fad58facd..7eda08eb8a1e 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -3,18 +3,24 @@ Allwinner Clock Control Unit Binding Required properties : - compatible: must contain one of the following compatibles: + - "allwinner,sun4i-a10-ccu" + - "allwinner,sun5i-a10s-ccu" + - "allwinner,sun5i-a13-ccu" - "allwinner,sun6i-a31-ccu" + - "allwinner,sun7i-a20-ccu" - "allwinner,sun8i-a23-ccu" - "allwinner,sun8i-a33-ccu" - "allwinner,sun8i-a83t-ccu" - "allwinner,sun8i-a83t-r-ccu" - "allwinner,sun8i-h3-ccu" - "allwinner,sun8i-h3-r-ccu" ++ - "allwinner,sun8i-r40-ccu" - "allwinner,sun8i-v3s-ccu" - "allwinner,sun9i-a80-ccu" - "allwinner,sun50i-a64-ccu" - "allwinner,sun50i-a64-r-ccu" - "allwinner,sun50i-h5-ccu" + - "nextthing,gr8-ccu" - reg: Must contain the registers base address and length - clocks: phandle to the oscillators feeding the CCU. Two are needed: diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.txt b/Documentation/devicetree/bindings/clock/ti,sci-clk.txt index 1e884c40ab50..4e59dc6b1778 100644 --- a/Documentation/devicetree/bindings/clock/ti,sci-clk.txt +++ b/Documentation/devicetree/bindings/clock/ti,sci-clk.txt @@ -14,10 +14,9 @@ Required properties: - compatible: Must be "ti,k2g-sci-clk" - #clock-cells: Shall be 2. In clock consumers, this cell represents the device ID and clock ID - exposed by the PM firmware. The assignments can be found in the header - files .h> (which covers the device IDs) and - .h> (which covers the clock IDs), where - is the SoC involved, for example 'k2g'. + exposed by the PM firmware. The list of valid values for the device IDs + and clocks IDs for 66AK2G SoC are documented at + http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data Examples: -------- diff --git a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt index 585e8c191f50..10f7047755f3 100644 --- a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt +++ b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt @@ -81,13 +81,11 @@ atl: atl@4843c000 { <&atl_clkin2_ck>, <&atl_clkin3_ck>; clocks = <&atl_gfclk_mux>; clock-names = "fck"; - status = "disabled"; }; #include &atl { - status = "okay"; atl2 { bws = ; diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt index 812163060fa3..7b5f602765fe 100644 --- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt +++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt @@ -6,7 +6,6 @@ System clock Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-clock" - for sLD3 SoC. "socionext,uniphier-ld4-clock" - for LD4 SoC. "socionext,uniphier-pro4-clock" - for Pro4 SoC. "socionext,uniphier-sld8-clock" - for sLD8 SoC. @@ -14,6 +13,7 @@ Required properties: "socionext,uniphier-pxs2-clock" - for PXs2/LD6b SoC. "socionext,uniphier-ld11-clock" - for LD11 SoC. "socionext,uniphier-ld20-clock" - for LD20 SoC. + "socionext,uniphier-pxs3-clock" - for PXs3 SoC - #clock-cells: should be 1. Example: @@ -48,7 +48,6 @@ Media I/O (MIO) clock, SD clock Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-mio-clock" - for sLD3 SoC. "socionext,uniphier-ld4-mio-clock" - for LD4 SoC. "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC. "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC. @@ -56,6 +55,7 @@ Required properties: "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC. "socionext,uniphier-ld11-mio-clock" - for LD11 SoC. "socionext,uniphier-ld20-sd-clock" - for LD20 SoC. + "socionext,uniphier-pxs3-sd-clock" - for PXs3 SoC - #clock-cells: should be 1. Example: @@ -82,11 +82,9 @@ Provided clocks: 8: USB2 ch0 host 9: USB2 ch1 host 10: USB2 ch2 host -11: USB2 ch3 host 12: USB2 ch0 PHY 13: USB2 ch1 PHY 14: USB2 ch2 PHY -15: USB2 ch3 PHY Peripheral clock @@ -94,7 +92,6 @@ Peripheral clock Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-peri-clock" - for sLD3 SoC. "socionext,uniphier-ld4-peri-clock" - for LD4 SoC. "socionext,uniphier-pro4-peri-clock" - for Pro4 SoC. "socionext,uniphier-sld8-peri-clock" - for sLD8 SoC. @@ -102,6 +99,7 @@ Required properties: "socionext,uniphier-pxs2-peri-clock" - for PXs2/LD6b SoC. "socionext,uniphier-ld11-peri-clock" - for LD11 SoC. "socionext,uniphier-ld20-peri-clock" - for LD20 SoC. + "socionext,uniphier-pxs3-peri-clock" - for PXs3 SoC - #clock-cells: should be 1. Example: diff --git a/Documentation/devicetree/bindings/clock/zx296702-clk.txt b/Documentation/devicetree/bindings/clock/zx296702-clk.txt index 750442b65505..e85ecb510d56 100644 --- a/Documentation/devicetree/bindings/clock/zx296702-clk.txt +++ b/Documentation/devicetree/bindings/clock/zx296702-clk.txt @@ -31,5 +31,4 @@ uart0: serial@0x09405000 { reg = <0x09405000 0x1000>; interrupts = ; clocks = <&lsp1clk ZX296702_UART0_PCLK>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/zx296718-clk.txt b/Documentation/devicetree/bindings/clock/zx296718-clk.txt index 4ad703808407..3a46bf0b2540 100644 --- a/Documentation/devicetree/bindings/clock/zx296718-clk.txt +++ b/Documentation/devicetree/bindings/clock/zx296718-clk.txt @@ -34,5 +34,4 @@ usbphy0:usb-phy0 { #phy-cells = <0>; clocks = <&topclk USB20_PHY_CLK>; clock-names = "phyclk"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt new file mode 100644 index 000000000000..f6403089edcf --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt @@ -0,0 +1,247 @@ +Binding for MediaTek's CPUFreq driver +===================================== + +Required properties: +- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names. +- clock-names: Should contain the following: + "cpu" - The multiplexer for clock input of CPU cluster. + "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock + source (usually MAINPLL) when the original CPU PLL is under + transition and not stable yet. + Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for + generic clock consumer properties. +- operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt + for detail. +- proc-supply: Regulator for Vproc of CPU cluster. + +Optional properties: +- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver + needs to do "voltage tracking" to step by step scale up/down Vproc and + Vsram to fit SoC specific needs. When absent, the voltage scaling + flow is handled by hardware, hence no software "voltage tracking" is + needed. +- #cooling-cells: +- cooling-min-level: +- cooling-max-level: + Please refer to Documentation/devicetree/bindings/thermal/thermal.txt + for detail. + +Example 1 (MT7623 SoC): + + cpu_opp_table: opp_table { + compatible = "operating-points-v2"; + opp-shared; + + opp-598000000 { + opp-hz = /bits/ 64 <598000000>; + opp-microvolt = <1050000>; + }; + + opp-747500000 { + opp-hz = /bits/ 64 <747500000>; + opp-microvolt = <1050000>; + }; + + opp-1040000000 { + opp-hz = /bits/ 64 <1040000000>; + opp-microvolt = <1150000>; + }; + + opp-1196000000 { + opp-hz = /bits/ 64 <1196000000>; + opp-microvolt = <1200000>; + }; + + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + opp-microvolt = <1300000>; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x0>; + clocks = <&infracfg CLK_INFRA_CPUSEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table>; + #cooling-cells = <2>; + cooling-min-level = <0>; + cooling-max-level = <7>; + }; + cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x1>; + operating-points-v2 = <&cpu_opp_table>; + }; + cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x2>; + operating-points-v2 = <&cpu_opp_table>; + }; + cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x3>; + operating-points-v2 = <&cpu_opp_table>; + }; + +Example 2 (MT8173 SoC): + cpu_opp_table_a: opp_table_a { + compatible = "operating-points-v2"; + opp-shared; + + opp-507000000 { + opp-hz = /bits/ 64 <507000000>; + opp-microvolt = <859000>; + }; + + opp-702000000 { + opp-hz = /bits/ 64 <702000000>; + opp-microvolt = <908000>; + }; + + opp-1001000000 { + opp-hz = /bits/ 64 <1001000000>; + opp-microvolt = <983000>; + }; + + opp-1105000000 { + opp-hz = /bits/ 64 <1105000000>; + opp-microvolt = <1009000>; + }; + + opp-1183000000 { + opp-hz = /bits/ 64 <1183000000>; + opp-microvolt = <1028000>; + }; + + opp-1404000000 { + opp-hz = /bits/ 64 <1404000000>; + opp-microvolt = <1083000>; + }; + + opp-1508000000 { + opp-hz = /bits/ 64 <1508000000>; + opp-microvolt = <1109000>; + }; + + opp-1573000000 { + opp-hz = /bits/ 64 <1573000000>; + opp-microvolt = <1125000>; + }; + }; + + cpu_opp_table_b: opp_table_b { + compatible = "operating-points-v2"; + opp-shared; + + opp-507000000 { + opp-hz = /bits/ 64 <507000000>; + opp-microvolt = <828000>; + }; + + opp-702000000 { + opp-hz = /bits/ 64 <702000000>; + opp-microvolt = <867000>; + }; + + opp-1001000000 { + opp-hz = /bits/ 64 <1001000000>; + opp-microvolt = <927000>; + }; + + opp-1209000000 { + opp-hz = /bits/ 64 <1209000000>; + opp-microvolt = <968000>; + }; + + opp-1404000000 { + opp-hz = /bits/ 64 <1007000000>; + opp-microvolt = <1028000>; + }; + + opp-1612000000 { + opp-hz = /bits/ 64 <1612000000>; + opp-microvolt = <1049000>; + }; + + opp-1807000000 { + opp-hz = /bits/ 64 <1807000000>; + opp-microvolt = <1089000>; + }; + + opp-1989000000 { + opp-hz = /bits/ 64 <1989000000>; + opp-microvolt = <1125000>; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x000>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP_0>; + clocks = <&infracfg CLK_INFRA_CA53SEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table_a>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x001>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP_0>; + clocks = <&infracfg CLK_INFRA_CA53SEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table_a>; + }; + + cpu2: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP_0>; + clocks = <&infracfg CLK_INFRA_CA57SEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table_b>; + }; + + cpu3: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP_0>; + clocks = <&infracfg CLK_INFRA_CA57SEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table_b>; + }; + + &cpu0 { + proc-supply = <&mt6397_vpca15_reg>; + }; + + &cpu1 { + proc-supply = <&mt6397_vpca15_reg>; + }; + + &cpu2 { + proc-supply = <&da9211_vcpu_reg>; + sram-supply = <&mt6397_vsramca7_reg>; + }; + + &cpu3 { + proc-supply = <&da9211_vcpu_reg>; + sram-supply = <&mt6397_vsramca7_reg>; + }; diff --git a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt new file mode 100644 index 000000000000..d9cca4875bd6 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt @@ -0,0 +1,16 @@ +Axis crypto engine with PDMA interface. + +Required properties: +- compatible : Should be one of the following strings: + "axis,artpec6-crypto" for the version in the Axis ARTPEC-6 SoC + "axis,artpec7-crypto" for the version in the Axis ARTPEC-7 SoC. +- reg: Base address and size for the PDMA register area. +- interrupts: Interrupt handle for the PDMA interrupt line. + +Example: + +crypto@f4264000 { + compatible = "axis,artpec6-crypto"; + reg = <0xf4264000 0x1000>; + interrupts = ; +}; diff --git a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt index f2aab3dc2b52..7de1a9674c70 100644 --- a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt +++ b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt @@ -66,3 +66,16 @@ sha@f8034000 { dmas = <&dma1 2 17>; dma-names = "tx"; }; + +* Eliptic Curve Cryptography (I2C) + +Required properties: +- compatible : must be "atmel,atecc508a". +- reg: I2C bus address of the device. +- clock-frequency: must be present in the i2c controller node. + +Example: +atecc508a@C0 { + compatible = "atmel,atecc508a"; + reg = <0xC0>; +}; diff --git a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt index 6949e50f1f16..76a0b4e80e83 100644 --- a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt +++ b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt @@ -13,5 +13,4 @@ dcp@80028000 { compatible = "fsl,imx28-dcp", "fsl,imx23-dcp"; reg = <0x80028000 0x2000>; interrupts = <52 53>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt index 941bb6a6fb13..fbc07d12322f 100644 --- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt +++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt @@ -23,5 +23,4 @@ Example: interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", "eip"; clocks = <&cpm_syscon0 1 26>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt index c6c6a4a045bd..28d3f2496b89 100644 --- a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt +++ b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt @@ -41,5 +41,4 @@ Examples: clock-names = "cesa0", "cesa1"; marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>; marvell,crypto-sram-size = <0x600>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt index c0c35f00335b..d9b92e2f3138 100644 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ b/Documentation/devicetree/bindings/crypto/mv_cesa.txt @@ -29,5 +29,4 @@ Examples: interrupts = <22>; marvell,crypto-srams = <&crypto_sram>; marvell,crypto-sram-size = <0x600>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt index 096df34b11c1..5e2ba385b8c9 100644 --- a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt +++ b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt @@ -25,5 +25,4 @@ Examples: clock-names = "aclk", "hclk", "sclk", "apb_pclk"; resets = <&cru SRST_CRYPTO>; reset-names = "crypto-rst"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt b/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt new file mode 100644 index 000000000000..04fc246f02f7 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt @@ -0,0 +1,30 @@ +* STMicroelectronics STM32 HASH + +Required properties: +- compatible: Should contain entries for this and backward compatible + HASH versions: + - "st,stm32f456-hash" for stm32 F456. + - "st,stm32f756-hash" for stm32 F756. +- reg: The address and length of the peripheral registers space +- interrupts: the interrupt specifier for the HASH +- clocks: The input clock of the HASH instance + +Optional properties: +- resets: The input reset of the HASH instance +- dmas: DMA specifiers for the HASH. See the DMA client binding, + Documentation/devicetree/bindings/dma/dma.txt +- dma-names: DMA request name. Should be "in" if a dma is present. +- dma-maxburst: Set number of maximum dma burst supported + +Example: + +hash1: hash@50060400 { + compatible = "st,stm32f756-hash"; + reg = <0x50060400 0x400>; + interrupts = <80>; + clocks = <&rcc 0 STM32F7_AHB2_CLOCK(HASH)>; + resets = <&rcc STM32F7_AHB2_RESET(HASH)>; + dmas = <&dma2 7 2 0x400 0x0>; + dma-names = "in"; + dma-maxburst = <0>; +}; diff --git a/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt index f2233138eba9..001dd63979a9 100644 --- a/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt +++ b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt @@ -15,5 +15,4 @@ Example: rockchip,pmu = <&pmugrf>; clocks = <&cru PCLK_DDR_MON>; clock-names = "pclk_ddr_mon"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt index 7a9e8603c150..d6d2833482c9 100644 --- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt +++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt @@ -205,5 +205,4 @@ Example: rockchip,phy_lpddr4_ck_cs_drv = ; rockchip,phy_lpddr4_dq_drv = ; rockchip,phy_lpddr4_odt = ; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/display/atmel,lcdc.txt b/Documentation/devicetree/bindings/display/atmel,lcdc.txt index ecb8da063d07..1a21202778ee 100644 --- a/Documentation/devicetree/bindings/display/atmel,lcdc.txt +++ b/Documentation/devicetree/bindings/display/atmel,lcdc.txt @@ -34,7 +34,6 @@ Example: pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fb>; display = <&display0>; - status = "okay"; #address-cells = <1>; #size-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt index ec94468b35be..82f2acb3d374 100644 --- a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt +++ b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt @@ -23,7 +23,6 @@ Example: interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>; clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>; clock-names = "periph_clk","sys_clk", "slow_clk"; - status = "disabled"; hlcdc-display-controller { compatible = "atmel,hlcdc-display-controller"; diff --git a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt index 7baa6582517e..aacc8b92968c 100644 --- a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt +++ b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt @@ -33,7 +33,6 @@ stdp2690-ge-b850v3-fw required properties: Example: &mux2_i2c2 { - status = "okay"; clock-frequency = <100000>; stdp4028@73 { diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt index 81b68580e199..b1a8929c2536 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt @@ -13,6 +13,7 @@ Required properties: - compatible : Shall contain one or more of - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX + - "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 compatible HDMI TX When compatible with generic versions, nodes must list the SoC-specific @@ -43,7 +44,6 @@ Example: clocks = <&cpg CPG_CORE R8A7795_CLK_S0D4>, <&cpg CPG_MOD 729>; clock-names = "iahb", "isfr"; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; - status = "disabled"; ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/fsl,tcon.txt b/Documentation/devicetree/bindings/display/fsl,tcon.txt index 6fa4ab668db5..475008747801 100644 --- a/Documentation/devicetree/bindings/display/fsl,tcon.txt +++ b/Documentation/devicetree/bindings/display/fsl,tcon.txt @@ -14,5 +14,4 @@ timing-controller@4003d000 { reg = <0x4003d000 0x1000>; clocks = <&clks VF610_CLK_TCON0>; clock-names = "ipg"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt index fa01db7eb66c..f79854783c2c 100644 --- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt @@ -116,7 +116,7 @@ Parallel display support Required properties: - compatible: Should be "fsl,imx-parallel-display" Optional properties: -- interface_pix_fmt: How this display is connected to the +- interface-pix-fmt: How this display is connected to the display interface. Currently supported types: "rgb24", "rgb565", "bgr666" and "lvds666". - edid: verbatim EDID data block describing attached display. diff --git a/Documentation/devicetree/bindings/display/marvell,pxa2xx-lcdc.txt b/Documentation/devicetree/bindings/display/marvell,pxa2xx-lcdc.txt index 309c47f25b87..f79641bd5f18 100644 --- a/Documentation/devicetree/bindings/display/marvell,pxa2xx-lcdc.txt +++ b/Documentation/devicetree/bindings/display/marvell,pxa2xx-lcdc.txt @@ -23,7 +23,6 @@ Example: reg = <0x44000000 0x10000>; interrupts = <17>; clocks = <&clks CLK_LCD>; - status = "okay"; port { lcdc_out: endpoint { diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt b/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt index 5c70a8380e58..d0f55161579a 100644 --- a/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt +++ b/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt @@ -18,6 +18,5 @@ Example: power-supply = <...>; backlight = <&backlight>; enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index c6cb96a4fa93..4bbd1e9bf3be 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -36,8 +36,10 @@ Required Properties: When supplied they must be named "dclkin.x" with "x" being the input clock numerical index. - - vsps: A list of phandles to the VSP nodes that handle the memory - interfaces for the DU channels. + - vsps: A list of phandle and channel index tuples to the VSPs that handle + the memory interfaces for the DU channels. The phandle identifies the VSP + instance that serves the DU channel, and the channel index identifies the + LIF instance in that VSP. Required nodes: @@ -59,24 +61,24 @@ corresponding to each DU output. R8A7796 (M3-W) DPAD HDMI LVDS - -Example: R8A7790 (R-Car H2) DU +Example: R8A7795 (R-Car H3) ES2.0 DU - du: du@feb00000 { - compatible = "renesas,du-r8a7790"; - reg = <0 0xfeb00000 0 0x70000>, - <0 0xfeb90000 0 0x1c>, - <0 0xfeb94000 0 0x1c>; - reg-names = "du", "lvds.0", "lvds.1"; - interrupt-parent = <&gic>; - interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>, - <0 268 IRQ_TYPE_LEVEL_HIGH>, - <0 269 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&mstp7_clks R8A7790_CLK_DU0>, - <&mstp7_clks R8A7790_CLK_DU1>, - <&mstp7_clks R8A7790_CLK_DU2>, - <&mstp7_clks R8A7790_CLK_LVDS0>, - <&mstp7_clks R8A7790_CLK_LVDS1>; - clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1"; + du: display@feb00000 { + compatible = "renesas,du-r8a7795"; + reg = <0 0xfeb00000 0 0x80000>, + <0 0xfeb90000 0 0x14>; + reg-names = "du", "lvds.0"; + interrupts = , + , + , + ; + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&cpg CPG_MOD 721>, + <&cpg CPG_MOD 727>; + clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0"; + vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>; ports { #address-cells = <1>; @@ -89,12 +91,19 @@ Example: R8A7790 (R-Car H2) DU }; port@1 { reg = <1>; - du_out_lvds0: endpoint { + du_out_hdmi0: endpoint { + remote-endpoint = <&dw_hdmi0_in>; }; }; port@2 { reg = <2>; - du_out_lvds1: endpoint { + du_out_hdmi1: endpoint { + remote-endpoint = <&dw_hdmi1_in>; + }; + }; + port@3 { + reg = <3>; + du_out_lvds0: endpoint { }; }; }; diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt index 47665a12786f..43561584c13a 100644 --- a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt @@ -59,7 +59,6 @@ Example: pinctrl-names = "default"; pinctrl-0 = <&edp_hpd>; - status = "disabled"; ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt index fad8b7619647..adc94fc3c9f8 100644 --- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt @@ -46,7 +46,6 @@ hdmi: hdmi@ff980000 { interrupts = ; clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>; clock-names = "iahb", "isfr"; - status = "disabled"; ports { hdmi_in: port { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt index 543b07435f4f..6bb59ab39f2f 100644 --- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt @@ -36,7 +36,6 @@ Example: resets = <&cru SRST_MIPIDSI0>; reset-names = "apb"; rockchip,grf = <&grf>; - status = "okay"; ports { #address-cells = <1>; @@ -65,6 +64,5 @@ Example: pinctrl-names = "default"; pinctrl-0 = <&lcd_en>; backlight = <&backlight>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt index 8096a29f9776..cec21714f0e0 100644 --- a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt @@ -25,7 +25,6 @@ hdmi: hdmi@20034000 { clock-names = "pclk"; pinctrl-names = "default"; pinctrl-0 = <&hdmi_ctl>; - status = "disabled"; hdmi_in: port { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt b/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt index c46ba641a1df..a9168ae6946c 100644 --- a/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt +++ b/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt @@ -28,6 +28,5 @@ chosen { allwinner,pipeline = "de_be0-lcd0-hdmi"; clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, <&ahb_gates 44>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt index 2ee6ff0ef98e..92441086caba 100644 --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt @@ -278,7 +278,6 @@ hdmi: hdmi@01c16000 { <&dma SUN4I_DMA_NORMAL 16>, <&dma SUN4I_DMA_DEDICATED 24>; dma-names = "ddc-tx", "ddc-rx", "audio-tx"; - status = "disabled"; ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt index 191d7bd8a6fe..97e213e07660 100644 --- a/Documentation/devicetree/bindings/dma/fsl-edma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt @@ -72,5 +72,4 @@ sai2: sai@40031000 { dma-names = "tx", "rx"; dmas = <&edma0 0 21>, <&edma0 0 20>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt index c075f5988135..0ffb4d8766a8 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor.txt @@ -30,7 +30,6 @@ xor@d0060900 { reg = <0xd0060900 0x100 0xd0060b00 0x100>; clocks = <&coreclk 0>; - status = "okay"; xor00 { interrupts = <51>; diff --git a/Documentation/devicetree/bindings/dma/qcom_adm.txt b/Documentation/devicetree/bindings/dma/qcom_adm.txt index 9bcab9115982..9d3b2f917b7b 100644 --- a/Documentation/devicetree/bindings/dma/qcom_adm.txt +++ b/Documentation/devicetree/bindings/dma/qcom_adm.txt @@ -48,7 +48,6 @@ Each dmas request consists of 3 cells: Example: spi4: spi@1a280000 { - status = "ok"; spi-max-frequency = <50000000>; pinctrl-0 = <&spi_pins>; diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 79a204d50234..891db41e9420 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -25,6 +25,7 @@ Required Properties: - "renesas,dmac-r8a7794" (R-Car E2) - "renesas,dmac-r8a7795" (R-Car H3) - "renesas,dmac-r8a7796" (R-Car M3-W) + - "renesas,dmac-r8a77970" (R-Car V3M) - reg: base address and length of the registers block for the DMAC diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index e7780a186a36..1be6941ac1e5 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt @@ -8,6 +8,7 @@ Required Properties: - "renesas,r8a7793-usb-dmac" (R-Car M2-N) - "renesas,r8a7794-usb-dmac" (R-Car E2) - "renesas,r8a7795-usb-dmac" (R-Car H3) + - "renesas,r8a7796-usb-dmac" (R-Car M3-W) - reg: base address and length of the registers block for the DMAC - interrupts: interrupt specifiers for the DMAC, one for each entry in interrupt-names. diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index 4775c66f4508..a122723907ac 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt @@ -63,7 +63,6 @@ Example: compatible = "arm,pl011", "arm,primecell"; reg = <0xe0000000 0x1000>; interrupts = <0 35 0x4>; - status = "disabled"; dmas = <&dmahost 12 0 1>, <&dmahost 13 0 1 0>; dma-names = "rx", "rx"; diff --git a/Documentation/devicetree/bindings/dma/st_fdma.txt b/Documentation/devicetree/bindings/dma/st_fdma.txt index 495d853c569b..52cfec9e77ad 100644 --- a/Documentation/devicetree/bindings/dma/st_fdma.txt +++ b/Documentation/devicetree/bindings/dma/st_fdma.txt @@ -69,7 +69,6 @@ Example: sti_uni_player2: sti-uni-player@2 { compatible = "st,sti-uni-player"; - status = "disabled"; #sound-dai-cells = <0>; st,syscfg = <&syscfg_core>; clocks = <&clk_s_d0_flexgen CLK_PCM_2>; diff --git a/Documentation/devicetree/bindings/dma/ste-dma40.txt b/Documentation/devicetree/bindings/dma/ste-dma40.txt index 95800ab37bb0..aa7dbd565ad0 100644 --- a/Documentation/devicetree/bindings/dma/ste-dma40.txt +++ b/Documentation/devicetree/bindings/dma/ste-dma40.txt @@ -135,5 +135,4 @@ Example: <&dma 13 0 0x0>; /* Logical - MemToDev */ dma-names = "rx", "rx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/dma/sun4i-dma.txt b/Documentation/devicetree/bindings/dma/sun4i-dma.txt index f1634a27a830..3b484380c56a 100644 --- a/Documentation/devicetree/bindings/dma/sun4i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt @@ -40,7 +40,6 @@ Example: clock-names = "ahb", "mod"; dmas = <&dma 1 29>, <&dma 1 28>; dma-names = "rx", "tx"; - status = "disabled"; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index 6b267045f522..98fbe1a5c6dd 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt @@ -9,6 +9,7 @@ Required properties: "allwinner,sun8i-a23-dma" "allwinner,sun8i-a83t-dma" "allwinner,sun8i-h3-dma" + "allwinner,sun8i-v3s-dma" - reg: Should contain the registers base address and length - interrupts: Should contain a reference to the interrupt used by this device - clocks: Should contain a reference to the parent AHB clock diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt index aead5869a28d..b849a1ed389d 100644 --- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt +++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt @@ -62,7 +62,6 @@ uart1: serial@4806a000 { interrupts-extended = <&gic GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart1"; clock-frequency = <48000000>; - status = "disabled"; /* Requesting crossbar input 49 and 50 */ dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; dma-names = "tx", "rx"; diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 18090e7226b4..41f0c1a07c56 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -9,7 +9,12 @@ execute the actual DMA tansfer. eDMA3 Channel Controller Required properties: -- compatible: "ti,edma3-tpcc" for the channel controller(s) +-------------------- +- compatible: Should be: + - "ti,edma3-tpcc" for the channel controller(s) on OMAP, + AM33xx and AM43xx SoCs. + - "ti,k2g-edma3-tpcc", "ti,edma3-tpcc" for the + channel controller(s) on 66AK2G. - #dma-cells: Should be set to <2>. The first number is the DMA request number and the second is the TC the channel is serviced on. - reg: Memory map of eDMA CC @@ -19,8 +24,19 @@ Required properties: - ti,tptcs: List of TPTCs associated with the eDMA in the following form: <&tptc_phandle TC_priority_number>. The highest priority is 0. +SoC-specific Required properties: +-------------------------------- +The following are mandatory properties for OMAP, AM33xx and AM43xx SoCs only: +- ti,hwmods: Name of the hwmods associated to the eDMA CC. + +The following are mandatory properties for 66AK2G SoCs only: +- power-domains:Should contain a phandle to a PM domain provider node + and an args specifier containing the device id + value. This property is as per the binding, + Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt + Optional properties: -- ti,hwmods: Name of the hwmods associated to the eDMA CC +------------------- - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow these channels will be SW triggered channels. See example. - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by @@ -31,17 +47,34 @@ Optional properties: eDMA3 Transfer Controller Required properties: -- compatible: "ti,edma3-tptc" for the transfer controller(s) +-------------------- +- compatible: Should be: + - "ti,edma3-tptc" for the transfer controller(s) on OMAP, + AM33xx and AM43xx SoCs. + - "ti,k2g-edma3-tptc", "ti,edma3-tptc" for the + transfer controller(s) on 66AK2G. - reg: Memory map of eDMA TC - interrupts: Interrupt number for TCerrint. +SoC-specific Required properties: +-------------------------------- +The following are mandatory properties for OMAP, AM33xx and AM43xx SoCs only: +- ti,hwmods: Name of the hwmods associated to the eDMA TC. + +The following are mandatory properties for 66AK2G SoCs only: +- power-domains:Should contain a phandle to a PM domain provider node + and an args specifier containing the device id + value. This property is as per the binding, + Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt + Optional properties: -- ti,hwmods: Name of the hwmods associated to the given eDMA TC +------------------- - interrupt-names: "edma3_tcerrint" ------------------------------------------------------------------------------ -Example: +Examples: +1. edma: edma@49000000 { compatible = "ti,edma3-tpcc"; ti,hwmods = "tpcc"; @@ -102,13 +135,64 @@ mcasp0: mcasp@48038000 { reg-names = "mpu", "dat"; interrupts = <80>, <81>; interrupt-names = "tx", "rx"; - status = "disabled"; /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */ dmas = <&edma 8 2>, <&edma 9 2>; dma-names = "tx", "rx"; }; +2. +edma1: edma@02728000 { + compatible = "ti,k2g-edma3-tpcc", "ti,edma3-tpcc"; + reg = <0x02728000 0x8000>; + reg-names = "edma3_cc"; + interrupts = , + , + ; + interrupt-names = "edma3_ccint", "emda3_mperr", + "edma3_ccerrint"; + dma-requests = <64>; + #dma-cells = <2>; + + ti,tptcs = <&edma1_tptc0 7>, <&edma1_tptc1 0>; + + /* + * memcpy is disabled, can be enabled with: + * ti,edma-memcpy-channels = <12 13 14 15>; + * for example. + */ + + power-domains = <&k2g_pds 0x4f>; +}; + +edma1_tptc0: tptc@027b0000 { + compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x027b0000 0x400>; + power-domains = <&k2g_pds 0x4f>; +}; + +edma1_tptc1: tptc@027b8000 { + compatible = "ti, k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x027b8000 0x400>; + power-domains = <&k2g_pds 0x4f>; +}; + +mmc0: mmc@23000000 { + compatible = "ti,k2g-hsmmc", "ti,omap4-hsmmc"; + reg = <0x23000000 0x400>; + interrupts = ; + dmas = <&edma1 24 0>, <&edma1 25 0>; + dma-names = "tx", "rx"; + bus-width = <4>; + ti,needs-special-reset; + no-1-8-v; + max-frequency = <96000000>; + power-domains = <&k2g_pds 0xb>; + clocks = <&k2g_clks 0xb 1>, <&k2g_clks 0xb 2>; + clock-names = "fck", "mmchsdb_fck"; + status = "disabled"; +}; + ------------------------------------------------------------------------------ DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc binding. diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt index 5696eb508e95..afc04589eadf 100644 --- a/Documentation/devicetree/bindings/eeprom/eeprom.txt +++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt @@ -16,8 +16,12 @@ Required properties: "renesas,r1ex24002" + The following manufacturers values have been deprecated: + "at", "at24" + If there is no specific driver for , a generic - driver based on is selected. Possible types are: + device with and manufacturer "atmel" should be used. + Possible types are: "24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64", "24c128", "24c256", "24c512", "24c1024", "spd" diff --git a/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt b/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt index 9766f7472f51..cfa4ed42b62f 100644 --- a/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt +++ b/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt @@ -31,7 +31,6 @@ Example for full FPGA configuration: cell-index = <1>; interrupts = <92>; clocks = <&coreclk 0>; - status = "okay"; fpga_mgr_spi: fpga-mgr@0 { compatible = "xlnx,fpga-slave-serial"; diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt index 4b6cc632ca5c..69d46162d0f5 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt @@ -23,7 +23,6 @@ gpio0: gpio@1100 { #gpio-cells = <2>; reg = <0x1100 0x080>; interrupts = <78 0x8>; - status = "okay"; }; Example of gpio-controller node for a ls2080a SoC: diff --git a/Documentation/devicetree/bindings/gpio/spear_spics.txt b/Documentation/devicetree/bindings/gpio/spear_spics.txt index 96c37eb15075..dd04d96e6ff1 100644 --- a/Documentation/devicetree/bindings/gpio/spear_spics.txt +++ b/Documentation/devicetree/bindings/gpio/spear_spics.txt @@ -42,7 +42,6 @@ spics: spics@e0700000{ spi0: spi@e0100000 { - status = "okay"; num-cs = <3>; cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt index 5aa5926029ee..039219df05c5 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt @@ -17,6 +17,7 @@ Required properties: * which must be preceded by one of the following vendor specifics: + "amlogic,meson-gxm-mali" + "rockchip,rk3288-mali" + + "rockchip,rk3399-mali" - reg : Physical base address of the device and length of the register area. diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt index 2b6243e730f6..b4ebd56d03f3 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt @@ -10,6 +10,7 @@ Required properties: * And, optionally, one of the vendor specific compatible: + allwinner,sun4i-a10-mali + allwinner,sun7i-a20-mali + + allwinner,sun50i-h5-mali + amlogic,meson-gxbb-mali + amlogic,meson-gxl-mali + stericsson,db8500-mali @@ -58,6 +59,10 @@ to specify one more vendor-specific compatible, among: Required properties: * resets: phandle to the reset line for the GPU + - allwinner,sun50i-h5-mali + Required properties: + * resets: phandle to the reset line for the GPU + - stericsson,db8500-mali Required properties: * interrupt-names and interrupts: diff --git a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt index b7e4c7444510..f32bbba4d3bc 100644 --- a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt +++ b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt @@ -51,7 +51,6 @@ Example for GK20A: resets = <&tegra_car 184>; reset-names = "gpu"; iommus = <&mc TEGRA_SWGROUP_GPU>; - status = "disabled"; }; Example for GM20B: @@ -70,7 +69,6 @@ Example for GM20B: resets = <&tegra_car 184>; reset-names = "gpu"; iommus = <&mc TEGRA_SWGROUP_GPU>; - status = "disabled"; }; Example for GP10B: @@ -89,5 +87,4 @@ Example for GP10B: reset-names = "gpu"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_GPU>; iommus = <&smmu TEGRA186_SID_GPU>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt b/Documentation/devicetree/bindings/gpu/samsung-g2d.txt index c4f358dafdaa..1e7959332dbc 100644 --- a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt +++ b/Documentation/devicetree/bindings/gpu/samsung-g2d.txt @@ -24,5 +24,4 @@ Example: interrupts = <0 89 0>; clocks = <&clock 177>, <&clock 277>; clock-names = "sclk_fimg2d", "fimg2d"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/hsi/omap-ssi.txt b/Documentation/devicetree/bindings/hsi/omap-ssi.txt index f26625e42693..b8eca3c7810d 100644 --- a/Documentation/devicetree/bindings/hsi/omap-ssi.txt +++ b/Documentation/devicetree/bindings/hsi/omap-ssi.txt @@ -92,6 +92,5 @@ ssi-controller@48058000 { interrupts = <69>, <70>; - status = "disabled"; /* second port is not used on N900 */ } } diff --git a/Documentation/devicetree/bindings/i2c/i2c-altera.txt b/Documentation/devicetree/bindings/i2c/i2c-altera.txt new file mode 100644 index 000000000000..767664f448ec --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-altera.txt @@ -0,0 +1,39 @@ +* Altera I2C Controller +* This is Altera's synthesizable logic block I2C Controller for use +* in Altera's FPGAs. + +Required properties : + - compatible : should be "altr,softip-i2c-v1.0" + - reg : Offset and length of the register set for the device + - interrupts : where IRQ is the interrupt number. + - clocks : phandle to input clock. + - #address-cells = <1>; + - #size-cells = <0>; + +Recommended properties : + - clock-frequency : desired I2C bus clock frequency in Hz. + +Optional properties : + - fifo-size : Size of the RX and TX FIFOs in bytes. + - Child nodes conforming to i2c bus binding + +Example : + + i2c@100080000 { + compatible = "altr,softip-i2c-v1.0"; + reg = <0x00000001 0x00080000 0x00000040>; + interrupt-parent = <&intc>; + interrupts = <0 43 4>; + clocks = <&clk_0>; + clock-frequency = <100000>; + #address-cells = <1>; + #size-cells = <0>; + fifo-size = <4>; + + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + pagesize = <32>; + }; + }; + diff --git a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt index 8ce9cd2855b5..c143948b2a37 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt @@ -20,8 +20,8 @@ i2c@0 { #address-cells = <1>; #size-cells = <0>; - retu-mfd: retu@1 { - compatible = "retu-mfd"; + retu: retu@1 { + compatible = "nokia,retu"; reg = <0x1>; }; }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt index 7ce23ac61308..81b5d55086fa 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt @@ -102,7 +102,6 @@ And for clarification, here are the snipplets for the i2c-parents: #address-cells = <1>; #size-cells = <0>; compatible = "i2c-gpio"; - status = "disabled"; gpios = <&gpio5 6 GPIO_ACTIVE_HIGH /* sda */ &gpio5 5 GPIO_ACTIVE_HIGH /* scl */ >; diff --git a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt b/Documentation/devicetree/bindings/i2c/i2c-efm32.txt index 50b25c3da186..3b30e54ae3c7 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-efm32.txt @@ -22,7 +22,6 @@ Example: interrupts = <9>; clocks = <&cmu clk_HFPERCLKI2C0>; clock-frequency = <100000>; - status = "ok"; energymicro,location = <3>; eeprom@50 { diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt index bd5a7befd951..ff7bf37deb43 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt @@ -1,14 +1,15 @@ -* Mediatek's I2C controller +* MediaTek's I2C controller -The Mediatek's I2C controller is used to interface with I2C devices. +The MediaTek's I2C controller is used to interface with I2C devices. Required properties: - compatible: value should be either of the following. - "mediatek,mt2701-i2c", "mediatek,mt6577-i2c": for Mediatek mt2701 - "mediatek,mt6577-i2c": for i2c compatible with mt6577. - "mediatek,mt6589-i2c": for i2c compatible with mt6589. - "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for i2c compatible with mt7623. - "mediatek,mt8173-i2c": for i2c compatible with mt8173. + "mediatek,mt2701-i2c", "mediatek,mt6577-i2c": for MediaTek MT2701 + "mediatek,mt6577-i2c": for MediaTek MT6577 + "mediatek,mt6589-i2c": for MediaTek MT6589 + "mediatek,mt7622-i2c": for MediaTek MT7622 + "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623 + "mediatek,mt8173-i2c": for MediaTek MT8173 - reg: physical base address of the controller and dma base, length of memory mapped region. - interrupts: interrupt number to the cpu. diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt index 2b8bd33dbf8d..cad39aee9f73 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt @@ -2,6 +2,8 @@ I2C for R-Car platforms Required properties: - compatible: + "renesas,i2c-r8a7743" if the device is a part of a R8A7743 SoC. + "renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC. "renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC. "renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC. "renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC. @@ -12,7 +14,8 @@ Required properties: "renesas,i2c-r8a7795" if the device is a part of a R8A7795 SoC. "renesas,i2c-r8a7796" if the device is a part of a R8A7796 SoC. "renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device. - "renesas,rcar-gen2-i2c" for a generic R-Car Gen2 compatible device. + "renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible + device. "renesas,rcar-gen3-i2c" for a generic R-Car Gen3 compatible device. "renesas,i2c-rcar" (deprecated) diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt index e18445d0980c..22f2eeb2c4c9 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt @@ -7,6 +7,7 @@ Required properties : - reg : Offset and length of the register set for the device - compatible: should be one of the following: + - "rockchip,rv1108-i2c": for rv1108 - "rockchip,rk3066-i2c": for rk3066 - "rockchip,rk3188-i2c": for rk3188 - "rockchip,rk3228-i2c": for rk3228 diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt index ae9c2a735f39..224390999e81 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt @@ -4,6 +4,8 @@ Required properties: - compatible : - "renesas,iic-r8a73a4" (R-Mobile APE6) - "renesas,iic-r8a7740" (R-Mobile A1) + - "renesas,iic-r8a7743" (RZ/G1M) + - "renesas,iic-r8a7745" (RZ/G1E) - "renesas,iic-r8a7790" (R-Car H2) - "renesas,iic-r8a7791" (R-Car M2-W) - "renesas,iic-r8a7792" (R-Car V2H) @@ -12,7 +14,8 @@ Required properties: - "renesas,iic-r8a7795" (R-Car H3) - "renesas,iic-r8a7796" (R-Car M3-W) - "renesas,iic-sh73a0" (SH-Mobile AG5) - - "renesas,rcar-gen2-iic" (generic R-Car Gen2 compatible device) + - "renesas,rcar-gen2-iic" (generic R-Car Gen2 or RZ/G1 + compatible device) - "renesas,rcar-gen3-iic" (generic R-Car Gen3 compatible device) - "renesas,rmobile-iic" (generic device) diff --git a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt b/Documentation/devicetree/bindings/i2c/i2c-sprd.txt new file mode 100644 index 000000000000..60b7cda15dd2 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-sprd.txt @@ -0,0 +1,31 @@ +I2C for Spreadtrum platforms + +Required properties: +- compatible: Should be "sprd,sc9860-i2c". +- reg: Specify the physical base address of the controller and length + of memory mapped region. +- interrupts: Should contain I2C interrupt. +- clock-names: Should contain following entries: + "i2c" for I2C clock, + "source" for I2C source (parent) clock, + "enable" for I2C module enable clock. +- clocks: Should contain a clock specifier for each entry in clock-names. +- clock-frequency: Constains desired I2C bus clock frequency in Hz. +- #address-cells: Should be 1 to describe address cells for I2C device address. +- #size-cells: Should be 0 means no size cell for I2C device address. + +Optional properties: +- Child nodes conforming to I2C bus binding + +Examples: +i2c0: i2c@70500000 { + compatible = "sprd,sc9860-i2c"; + reg = <0 0x70500000 0 0x1000>; + interrupts = ; + clock-names = "i2c", "source", "enable"; + clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>; + clock-frequency = <400000>; + #address-cells = <1>; + #size-cells = <0>; +}; + diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt index 78eaf7b718ed..3b5489966634 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt @@ -1,7 +1,9 @@ * I2C controller embedded in STMicroelectronics STM32 I2C platform Required properties : -- compatible : Must be "st,stm32f4-i2c" +- compatible : Must be one of the following + - "st,stm32f4-i2c" + - "st,stm32f7-i2c" - reg : Offset and length of the register set for the device - interrupts : Must contain the interrupt id for I2C event and then the interrupt id for I2C error. @@ -14,8 +16,16 @@ Required properties : Optional properties : - clock-frequency : Desired I2C bus clock frequency in Hz. If not specified, - the default 100 kHz frequency will be used. As only Normal and Fast modes - are supported, possible values are 100000 and 400000. + the default 100 kHz frequency will be used. + For STM32F4 SoC Standard-mode and Fast-mode are supported, possible values are + 100000 and 400000. + For STM32F7 SoC, Standard-mode, Fast-mode and Fast-mode Plus are supported, + possible values are 100000, 400000 and 1000000. +- i2c-scl-rising-time-ns : Only for STM32F7, I2C SCL Rising time for the board + (default: 25) +- i2c-scl-falling-time-ns : Only for STM32F7, I2C SCL Falling time for the board + (default: 10) + I2C Timings are derived from these 2 values Example : @@ -31,3 +41,16 @@ Example : pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; pinctrl-names = "default"; }; + + i2c@40005400 { + compatible = "st,stm32f7-i2c"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x40005400 0x400>; + interrupts = <31>, + <32>; + resets = <&rcc STM32F7_APB1_RESET(I2C1)>; + clocks = <&rcc 1 CLK_I2C1>; + pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; + pinctrl-names = "default"; + }; diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt index 656716b72cc4..f64064f8bdc2 100644 --- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt +++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt @@ -71,5 +71,4 @@ Example: reset-names = "i2c"; dmas = <&apbdma 16>, <&apbdma 16>; dma-names = "rx", "tx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt b/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt index caaaed765ce4..7b1b1e4086d4 100644 --- a/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt +++ b/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt @@ -37,5 +37,4 @@ For example: clocks = <&asiu_clks BCM_CYGNUS_ASIU_ADC_CLK>; clock-names = "tsc_clk"; interrupts = ; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt index 0bcae5140bc5..9ada5abd45fa 100644 --- a/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt +++ b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt @@ -17,5 +17,4 @@ adc0: adc@400e3000 { clocks = <&ccu1 CLK_APB3_ADC0>; vref-supply = <®_vdda>; resets = <&rgu 40>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt b/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt new file mode 100644 index 000000000000..a04aa5c04103 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt @@ -0,0 +1,27 @@ +STMicroelectronics STM32 Low-Power Timer quadrature encoder and counter + +STM32 Low-Power Timer provides several counter modes. It can be used as: +- quadrature encoder to detect angular position and direction of rotary + elements, from IN1 and IN2 input signals. +- simple counter from IN1 input signal. + +Must be a sub-node of an STM32 Low-Power Timer device tree node. +See ../mfd/stm32-lptimer.txt for details about the parent node. + +Required properties: +- compatible: Must be "st,stm32-lptimer-counter". +- pinctrl-names: Set to "default". +- pinctrl-0: List of phandles pointing to pin configuration nodes, + to set IN1/IN2 pins in mode of operation for Low-Power + Timer input on external pin. + +Example: + timer@40002400 { + compatible = "st,stm32-lptimer"; + ... + counter { + compatible = "st,stm32-lptimer-counter"; + pinctrl-names = "default"; + pinctrl-0 = <&lptim1_in_pins>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt index 7d6647d4af5e..42db783c4e75 100644 --- a/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt +++ b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt @@ -16,5 +16,4 @@ dac: dac@400e1000 { clocks = <&ccu1 CLK_APB3_DAC>; vref-supply = <®_vdda>; resets = <&rgu 42>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt new file mode 100644 index 000000000000..85e6806b17d7 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt @@ -0,0 +1,23 @@ +STMicroelectronics STM32 Low-Power Timer Trigger + +STM32 Low-Power Timer provides trigger source (LPTIM output) that can be used +by STM32 internal ADC and/or DAC. + +Must be a sub-node of an STM32 Low-Power Timer device tree node. +See ../mfd/stm32-lptimer.txt for details about the parent node. + +Required properties: +- compatible: Must be "st,stm32-lptimer-trigger". +- reg: Identify trigger hardware block. Must be 0, 1 or 2 + respectively for lptimer1, lptimer2 or lptimer3 + trigger output. + +Example: + timer@40002400 { + compatible = "st,stm32-lptimer"; + ... + trigger@0 { + compatible = "st,stm32-lptimer-trigger"; + reg = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt index 1852906517ab..23e3abc3fdef 100644 --- a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt +++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt @@ -22,6 +22,8 @@ Optional properties for main touchpad device: experiment to determine which bit corresponds to which input. Use KEY_RESERVED for unused padding values. +- reset-gpios: GPIO specifier for the touchscreen's reset pin (active low) + Example: touch@4b { diff --git a/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt b/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt index b77f50bd6403..262deab73588 100644 --- a/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt +++ b/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt @@ -72,7 +72,6 @@ Example: /* Required Board specific properties */ keypad,num-rows = <5>; keypad,num-columns = <5>; - status = "okay"; linux,keymap = ; + }; + + vibrator_enable_pin: pinmux_vibrator_enable_pin { + pinctrl-single,pins = < + OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1) /* dmtimer9_pwm_evt (gpio_28) */ + >; + }; +}; + +/ { + pwm8: dmtimer-pwm { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_direction_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer8>; + ti,clock-source = <0x01>; + }; + + pwm9: dmtimer-pwm { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_enable_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer9>; + ti,clock-source = <0x01>; + }; + + vibrator { + compatible = "pwm-vibrator"; + pwms = <&pwm8 0 1000000000 0>, + <&pwm9 0 1000000000 0>; + pwm-names = "enable", "direction"; + direction-duty-cycle-ns = <1000000000>; + }; +}; diff --git a/Documentation/devicetree/bindings/input/ti,drv260x.txt b/Documentation/devicetree/bindings/input/ti,drv260x.txt index ee09c8f4474a..4c5312eaaa85 100644 --- a/Documentation/devicetree/bindings/input/ti,drv260x.txt +++ b/Documentation/devicetree/bindings/input/ti,drv260x.txt @@ -43,7 +43,7 @@ haptics: haptics@5a { mode = ; library-sel = ; vib-rated-mv = <3200>; - vib-overdriver-mv = <3200>; + vib-overdrive-mv = <3200>; } For more product information please see the link below: diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt index 9d9e930f3251..df531b5b6a0d 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt @@ -32,5 +32,4 @@ Example: pinctrl-1 = <&pinctrl_touchctrl_default>; pinctrl-2 = <&pinctrl_touchctrl_gpios>; vf50-ts-min-pressure = <200>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt index d4927c202aef..e67e58b61706 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt @@ -35,5 +35,4 @@ Example: measure-delay-time = <0xfff>; pre-charge-time = <0xffff>; touchscreen-average-samples = <32>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt index 11cc87aeb276..07bf0b9a5139 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt @@ -17,6 +17,7 @@ Required properties: "mediatek,mt6582-sysirq", "mediatek,mt6577-sysirq": for MT6582 "mediatek,mt6580-sysirq", "mediatek,mt6577-sysirq": for MT6580 "mediatek,mt6577-sysirq": for MT6577 + "mediatek,mt2712-sysirq", "mediatek,mt6577-sysirq": for MT2712 "mediatek,mt2701-sysirq", "mediatek,mt6577-sysirq": for MT2701 - interrupt-controller : Identifies the node as an interrupt controller - #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt. diff --git a/Documentation/devicetree/bindings/iommu/qcom,iommu.txt b/Documentation/devicetree/bindings/iommu/qcom,iommu.txt new file mode 100644 index 000000000000..b2641ceb2b40 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/qcom,iommu.txt @@ -0,0 +1,121 @@ +* QCOM IOMMU v1 Implementation + +Qualcomm "B" family devices which are not compatible with arm-smmu have +a similar looking IOMMU but without access to the global register space, +and optionally requiring additional configuration to route context irqs +to non-secure vs secure interrupt line. + +** Required properties: + +- compatible : Should be one of: + + "qcom,msm8916-iommu" + + Followed by "qcom,msm-iommu-v1". + +- clock-names : Should be a pair of "iface" (required for IOMMUs + register group access) and "bus" (required for + the IOMMUs underlying bus access). + +- clocks : Phandles for respective clocks described by + clock-names. + +- #address-cells : must be 1. + +- #size-cells : must be 1. + +- #iommu-cells : Must be 1. Index identifies the context-bank #. + +- ranges : Base address and size of the iommu context banks. + +- qcom,iommu-secure-id : secure-id. + +- List of sub-nodes, one per translation context bank. Each sub-node + has the following required properties: + + - compatible : Should be one of: + - "qcom,msm-iommu-v1-ns" : non-secure context bank + - "qcom,msm-iommu-v1-sec" : secure context bank + - reg : Base address and size of context bank within the iommu + - interrupts : The context fault irq. + +** Optional properties: + +- reg : Base address and size of the SMMU local base, should + be only specified if the iommu requires configuration + for routing of context bank irq's to secure vs non- + secure lines. (Ie. if the iommu contains secure + context banks) + + +** Examples: + + apps_iommu: iommu@1e20000 { + #address-cells = <1>; + #size-cells = <1>; + #iommu-cells = <1>; + compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; + ranges = <0 0x1e20000 0x40000>; + reg = <0x1ef0000 0x3000>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_APSS_TCU_CLK>; + clock-names = "iface", "bus"; + qcom,iommu-secure-id = <17>; + + // mdp_0: + iommu-ctx@4000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x4000 0x1000>; + interrupts = ; + }; + + // venus_ns: + iommu-ctx@5000 { + compatible = "qcom,msm-iommu-v1-sec"; + reg = <0x5000 0x1000>; + interrupts = ; + }; + }; + + gpu_iommu: iommu@1f08000 { + #address-cells = <1>; + #size-cells = <1>; + #iommu-cells = <1>; + compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; + ranges = <0 0x1f08000 0x10000>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_GFX_TCU_CLK>; + clock-names = "iface", "bus"; + qcom,iommu-secure-id = <18>; + + // gfx3d_user: + iommu-ctx@1000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x1000 0x1000>; + interrupts = ; + }; + + // gfx3d_priv: + iommu-ctx@2000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x2000 0x1000>; + interrupts = ; + }; + }; + + ... + + venus: video-codec@1d00000 { + ... + iommus = <&apps_iommu 5>; + }; + + mdp: mdp@1a01000 { + ... + iommus = <&apps_iommu 4>; + }; + + gpu@01c00000 { + ... + iommus = <&gpu_iommu 1>, <&gpu_iommu 2>; + }; diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt index 9a55ac3735e5..2098f7732264 100644 --- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt @@ -15,6 +15,11 @@ Required properties: to associate with its master device. See: Documentation/devicetree/bindings/iommu/iommu.txt +Optional properties: +- rockchip,disable-mmu-reset : Don't use the mmu reset operation. + Some mmu instances may produce unexpected results + when the reset operation is used. + Example: vopl_mmu: iommu@ff940300 { diff --git a/Documentation/devicetree/bindings/leds/ams,as3645a.txt b/Documentation/devicetree/bindings/leds/ams,as3645a.txt new file mode 100644 index 000000000000..fdc40e354a64 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/ams,as3645a.txt @@ -0,0 +1,79 @@ +Analog devices AS3645A device tree bindings + +The AS3645A flash LED controller can drive two LEDs, one high current +flash LED and one indicator LED. The high current flash LED can be +used in torch mode as well. + +Ranges below noted as [a, b] are closed ranges between a and b, i.e. a +and b are included in the range. + +Please also see common.txt in the same directory. + + +Required properties +=================== + +compatible : Must be "ams,as3645a". +reg : The I2C address of the device. Typically 0x30. +#address-cells : 1 +#size-cells : 0 + + +Required properties of the flash child node (0) +=============================================== + +reg: 0 +flash-timeout-us: Flash timeout in microseconds. The value must be in + the range [100000, 850000] and divisible by 50000. +flash-max-microamp: Maximum flash current in microamperes. Has to be + in the range between [200000, 500000] and + divisible by 20000. +led-max-microamp: Maximum torch (assist) current in microamperes. The + value must be in the range between [20000, 160000] and + divisible by 20000. +ams,input-max-microamp: Maximum flash controller input current. The + value must be in the range [1250000, 2000000] + and divisible by 50000. + + +Optional properties of the flash child node +=========================================== + +label : The label of the flash LED. + + +Required properties of the indicator child node (1) +=================================================== + +reg: 1 +led-max-microamp: Maximum indicator current. The allowed values are + 2500, 5000, 7500 and 10000. + +Optional properties of the indicator child node +=============================================== + +label : The label of the indicator LED. + + +Example +======= + + as3645a@30 { + #address-cells: 1 + #size-cells: 0 + reg = <0x30>; + compatible = "ams,as3645a"; + flash@0 { + reg = <0x0>; + flash-timeout-us = <150000>; + flash-max-microamp = <320000>; + led-max-microamp = <60000>; + ams,input-max-microamp = <1750000>; + label = "as3645a:flash"; + }; + indicator@1 { + reg = <0x1>; + led-max-microamp = <10000>; + label = "as3645a:indicator"; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt b/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt new file mode 100644 index 000000000000..cbe8dfd29715 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt @@ -0,0 +1,14 @@ +Device tree bindings for IR LED connected through gpio pin which is used as +remote controller transmitter. + +Required properties: + - compatible: should be "gpio-ir-tx". + - gpios : Should specify the IR LED GPIO, see "gpios property" in + Documentation/devicetree/bindings/gpio/gpio.txt. Active low LEDs + should be indicated using flags in the GPIO specifier. + +Example: + irled@0 { + compatible = "gpio-ir-tx"; + gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt new file mode 100644 index 000000000000..66e5672c2e3d --- /dev/null +++ b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt @@ -0,0 +1,13 @@ +Device tree bindings for IR LED connected through pwm pin which is used as +remote controller transmitter. + +Required properties: + - compatible: should be "pwm-ir-tx". + - pwms : PWM property to point to the PWM device (phandle)/port (id) + and to specify the period time to be used: <&phandle id period_ns>; + +Example: + irled { + compatible = "pwm-ir-tx"; + pwms = <&pwm0 0 10000000>; + }; diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt index 76535ca37120..a48dda268f81 100644 --- a/Documentation/devicetree/bindings/leds/leds-gpio.txt +++ b/Documentation/devicetree/bindings/leds/leds-gpio.txt @@ -18,6 +18,9 @@ LED sub-node properties: see Documentation/devicetree/bindings/leds/common.txt - retain-state-suspended: (optional) The suspend state can be retained.Such as charge-led gpio. +- retain-state-shutdown: (optional) Retain the state of the LED on shutdown. + Useful in BMC systems, for example when the BMC is rebooted while the host + remains up. - panic-indicator : (optional) see Documentation/devicetree/bindings/leds/common.txt diff --git a/Documentation/devicetree/bindings/leds/leds-pca955x.txt b/Documentation/devicetree/bindings/leds/leds-pca955x.txt new file mode 100644 index 000000000000..7984efb767b4 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-pca955x.txt @@ -0,0 +1,88 @@ +* NXP - pca955x LED driver + +The PCA955x family of chips are I2C LED blinkers whose pins not used +to control LEDs can be used as general purpose I/Os. The GPIO pins can +be input or output, and output pins can also be pulse-width controlled. + +Required properties: +- compatible : should be one of : + "nxp,pca9550" + "nxp,pca9551" + "nxp,pca9552" + "nxp,pca9553" +- #address-cells: must be 1 +- #size-cells: must be 0 +- reg: I2C slave address. depends on the model. + +Optional properties: +- gpio-controller: allows pins to be used as GPIOs. +- #gpio-cells: must be 2. +- gpio-line-names: define the names of the GPIO lines + +LED sub-node properties: +- reg : number of LED line. + from 0 to 1 for the pca9550 + from 0 to 7 for the pca9551 + from 0 to 15 for the pca9552 + from 0 to 3 for the pca9553 +- type: (optional) either + PCA9532_TYPE_NONE + PCA9532_TYPE_LED + PCA9532_TYPE_GPIO + see dt-bindings/leds/leds-pca955x.h (default to LED) +- label : (optional) + see Documentation/devicetree/bindings/leds/common.txt +- linux,default-trigger : (optional) + see Documentation/devicetree/bindings/leds/common.txt + +Examples: + +pca9552: pca9552@60 { + compatible = "nxp,pca9552"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x60>; + + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "GPIO12", "GPIO13", "GPIO14", "GPIO15"; + + gpio@12 { + reg = <12>; + type = ; + }; + gpio@13 { + reg = <13>; + type = ; + }; + gpio@14 { + reg = <14>; + type = ; + }; + gpio@15 { + reg = <15>; + type = ; + }; + + led@0 { + label = "red:power"; + linux,default-trigger = "default-on"; + reg = <0>; + type = ; + }; + led@1 { + label = "green:power"; + reg = <1>; + type = ; + }; + led@2 { + label = "pca9552:yellow"; + reg = <2>; + type = ; + }; + led@3 { + label = "pca9552:white"; + reg = <3>; + type = ; + }; +}; diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.txt b/Documentation/devicetree/bindings/media/i2c/adv748x.txt new file mode 100644 index 000000000000..21ffb5ed8183 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/adv748x.txt @@ -0,0 +1,95 @@ +* Analog Devices ADV748X video decoder with HDMI receiver + +The ADV7481 and ADV7482 are multi format video decoders with an integrated +HDMI receiver. They can output CSI-2 on two independent outputs TXA and TXB +from three input sources HDMI, analog and TTL. + +Required Properties: + + - compatible: Must contain one of the following + - "adi,adv7481" for the ADV7481 + - "adi,adv7482" for the ADV7482 + + - reg: I2C slave address + +Optional Properties: + + - interrupt-names: Should specify the interrupts as "intrq1", "intrq2" and/or + "intrq3". All interrupts are optional. The "intrq3" interrupt + is only available on the adv7481 + - interrupts: Specify the interrupt lines for the ADV748x + +The device node must contain one 'port' child node per device input and output +port, in accordance with the video interface bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. The port nodes +are numbered as follows. + + Name Type Port + --------------------------------------- + AIN0 sink 0 + AIN1 sink 1 + AIN2 sink 2 + AIN3 sink 3 + AIN4 sink 4 + AIN5 sink 5 + AIN6 sink 6 + AIN7 sink 7 + HDMI sink 8 + TTL sink 9 + TXA source 10 + TXB source 11 + +The digital output port nodes must contain at least one endpoint. + +Ports are optional if they are not connected to anything at the hardware level. + +Example: + + video-receiver@70 { + compatible = "adi,adv7482"; + reg = <0x70>; + + #address-cells = <1>; + #size-cells = <0>; + + interrupt-parent = <&gpio6>; + interrupt-names = "intrq1", "intrq2"; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>, + <31 IRQ_TYPE_LEVEL_LOW>; + + port@7 { + reg = <7>; + + adv7482_ain7: endpoint { + remote-endpoint = <&cvbs_in>; + }; + }; + + port@8 { + reg = <8>; + + adv7482_hdmi: endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + + port@10 { + reg = <10>; + + adv7482_txa: endpoint { + clock-lanes = <0>; + data-lanes = <1 2 3 4>; + remote-endpoint = <&csi40_in>; + }; + }; + + port@11 { + reg = <11>; + + adv7482_txb: endpoint { + clock-lanes = <0>; + data-lanes = <1>; + remote-endpoint = <&csi20_in>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt new file mode 100644 index 000000000000..b88dcdd41def --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt @@ -0,0 +1,9 @@ +Dongwoon Anatech DW9714 camera voice coil lens driver + +DW9174 is a 10-bit DAC with current sink capability. It is intended +for driving voice coil lenses in camera modules. + +Mandatory properties: + +- compatible: "dongwoon,dw9714" +- reg: I²C slave address diff --git a/Documentation/devicetree/bindings/media/meson-ao-cec.txt b/Documentation/devicetree/bindings/media/meson-ao-cec.txt new file mode 100644 index 000000000000..8671bdb08080 --- /dev/null +++ b/Documentation/devicetree/bindings/media/meson-ao-cec.txt @@ -0,0 +1,28 @@ +* Amlogic Meson AO-CEC driver + +The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is +to handle communication between HDMI connected devices over the CEC bus. + +Required properties: + - compatible : value should be following + "amlogic,meson-gx-ao-cec" + + - reg : Physical base address of the IP registers and length of memory + mapped region. + + - interrupts : AO-CEC interrupt number to the CPU. + - clocks : from common clock binding: handle to AO-CEC clock. + - clock-names : from common clock binding: must contain "core", + corresponding to entry in the clocks property. + - hdmi-phandle: phandle to the HDMI controller + +Example: + +cec_AO: cec@100 { + compatible = "amlogic,meson-gx-ao-cec"; + reg = <0x0 0x00100 0x0 0x14>; + interrupts = ; + clocks = <&clkc_AO CLKID_AO_CEC_32K>; + clock-names = "core"; + hdmi-phandle = <&hdmi_tx>; +}; diff --git a/Documentation/devicetree/bindings/media/mtk-cir.txt b/Documentation/devicetree/bindings/media/mtk-cir.txt index 2be2005577d6..5e18087ce11f 100644 --- a/Documentation/devicetree/bindings/media/mtk-cir.txt +++ b/Documentation/devicetree/bindings/media/mtk-cir.txt @@ -2,10 +2,14 @@ Device-Tree bindings for Mediatek consumer IR controller found in Mediatek SoC family Required properties: -- compatible : "mediatek,mt7623-cir" +- compatible : Should be + "mediatek,mt7623-cir": for MT7623 SoC + "mediatek,mt7622-cir": for MT7622 SoC - clocks : list of clock specifiers, corresponding to entries in clock-names property; -- clock-names : should contain "clk" entries; +- clock-names : should contain + - "clk" entries: for MT7623 SoC + - "clk", "bus" entries: for MT7622 SoC - interrupts : should contain IR IRQ number; - reg : should contain IO map address for IR. diff --git a/Documentation/devicetree/bindings/media/pxa-camera.txt b/Documentation/devicetree/bindings/media/pxa-camera.txt index 11f5b5d51af8..bc03ec096269 100644 --- a/Documentation/devicetree/bindings/media/pxa-camera.txt +++ b/Documentation/devicetree/bindings/media/pxa-camera.txt @@ -24,7 +24,6 @@ Example: clock-frequency = <50000000>; clock-output-names = "qci_mclk"; - status = "okay"; port { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/media/qcom,camss.txt b/Documentation/devicetree/bindings/media/qcom,camss.txt new file mode 100644 index 000000000000..cadecebc73f7 --- /dev/null +++ b/Documentation/devicetree/bindings/media/qcom,camss.txt @@ -0,0 +1,197 @@ +Qualcomm Camera Subsystem + +* Properties + +- compatible: + Usage: required + Value type: + Definition: Should contain: + - "qcom,msm8916-camss" +- reg: + Usage: required + Value type: + Definition: Register ranges as listed in the reg-names property. +- reg-names: + Usage: required + Value type: + Definition: Should contain the following entries: + - "csiphy0" + - "csiphy0_clk_mux" + - "csiphy1" + - "csiphy1_clk_mux" + - "csid0" + - "csid1" + - "ispif" + - "csi_clk_mux" + - "vfe0" +- interrupts: + Usage: required + Value type: + Definition: Interrupts as listed in the interrupt-names property. +- interrupt-names: + Usage: required + Value type: + Definition: Should contain the following entries: + - "csiphy0" + - "csiphy1" + - "csid0" + - "csid1" + - "ispif" + - "vfe0" +- power-domains: + Usage: required + Value type: + Definition: A phandle and power domain specifier pairs to the + power domain which is responsible for collapsing + and restoring power to the peripheral. +- clocks: + Usage: required + Value type: + Definition: A list of phandle and clock specifier pairs as listed + in clock-names property. +- clock-names: + Usage: required + Value type: + Definition: Should contain the following entries: + - "camss_top_ahb" + - "ispif_ahb" + - "csiphy0_timer" + - "csiphy1_timer" + - "csi0_ahb" + - "csi0" + - "csi0_phy" + - "csi0_pix" + - "csi0_rdi" + - "csi1_ahb" + - "csi1" + - "csi1_phy" + - "csi1_pix" + - "csi1_rdi" + - "camss_ahb" + - "camss_vfe_vfe" + - "camss_csi_vfe" + - "iface" + - "bus" +- vdda-supply: + Usage: required + Value type: + Definition: A phandle to voltage supply for CSI2. +- iommus: + Usage: required + Value type: + Definition: A list of phandle and IOMMU specifier pairs. + +* Nodes + +- ports: + Usage: required + Definition: As described in video-interfaces.txt in same directory. + Properties: + - reg: + Usage: required + Value type: + Definition: Selects CSI2 PHY interface - PHY0 or PHY1. + Endpoint node properties: + - clock-lanes: + Usage: required + Value type: + Definition: The physical clock lane index. The value + must always be <1> as the physical clock + lane is lane 1. + - data-lanes: + Usage: required + Value type: + Definition: An array of physical data lanes indexes. + Position of an entry determines the logical + lane number, while the value of an entry + indicates physical lane index. Lane swapping + is supported. + +* An Example + + camss: camss@1b00000 { + compatible = "qcom,msm8916-camss"; + reg = <0x1b0ac00 0x200>, + <0x1b00030 0x4>, + <0x1b0b000 0x200>, + <0x1b00038 0x4>, + <0x1b08000 0x100>, + <0x1b08400 0x100>, + <0x1b0a000 0x500>, + <0x1b00020 0x10>, + <0x1b10000 0x1000>; + reg-names = "csiphy0", + "csiphy0_clk_mux", + "csiphy1", + "csiphy1_clk_mux", + "csid0", + "csid1", + "ispif", + "csi_clk_mux", + "vfe0"; + interrupts = , + , + , + , + , + ; + interrupt-names = "csiphy0", + "csiphy1", + "csid0", + "csid1", + "ispif", + "vfe0"; + power-domains = <&gcc VFE_GDSC>; + clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>, + <&gcc GCC_CAMSS_ISPIF_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI1PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI0_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0_CLK>, + <&gcc GCC_CAMSS_CSI0PHY_CLK>, + <&gcc GCC_CAMSS_CSI0PIX_CLK>, + <&gcc GCC_CAMSS_CSI0RDI_CLK>, + <&gcc GCC_CAMSS_CSI1_AHB_CLK>, + <&gcc GCC_CAMSS_CSI1_CLK>, + <&gcc GCC_CAMSS_CSI1PHY_CLK>, + <&gcc GCC_CAMSS_CSI1PIX_CLK>, + <&gcc GCC_CAMSS_CSI1RDI_CLK>, + <&gcc GCC_CAMSS_AHB_CLK>, + <&gcc GCC_CAMSS_VFE0_CLK>, + <&gcc GCC_CAMSS_CSI_VFE0_CLK>, + <&gcc GCC_CAMSS_VFE_AHB_CLK>, + <&gcc GCC_CAMSS_VFE_AXI_CLK>; + clock-names = "camss_top_ahb", + "ispif_ahb", + "csiphy0_timer", + "csiphy1_timer", + "csi0_ahb", + "csi0", + "csi0_phy", + "csi0_pix", + "csi0_rdi", + "csi1_ahb", + "csi1", + "csi1_phy", + "csi1_pix", + "csi1_rdi", + "camss_ahb", + "camss_vfe_vfe", + "camss_csi_vfe", + "iface", + "bus"; + vdda-supply = <&pm8916_l2>; + iommus = <&apps_iommu 3>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + csiphy0_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&ov5645_ep>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/renesas,drif.txt b/Documentation/devicetree/bindings/media/renesas,drif.txt index 39516b94c28f..0d8974aa8b38 100644 --- a/Documentation/devicetree/bindings/media/renesas,drif.txt +++ b/Documentation/devicetree/bindings/media/renesas,drif.txt @@ -40,6 +40,7 @@ To summarize, Required properties of an internal channel: ------------------------------------------- - compatible: "renesas,r8a7795-drif" if DRIF controller is a part of R8A7795 SoC. + "renesas,r8a7796-drif" if DRIF controller is a part of R8A7796 SoC. "renesas,rcar-gen3-drif" for a generic R-Car Gen3 compatible device. When compatible with the generic version, nodes must list the diff --git a/Documentation/devicetree/bindings/media/s5p-cec.txt b/Documentation/devicetree/bindings/media/s5p-cec.txt index 1b1a10ba48ce..6f3756da900f 100644 --- a/Documentation/devicetree/bindings/media/s5p-cec.txt +++ b/Documentation/devicetree/bindings/media/s5p-cec.txt @@ -33,5 +33,4 @@ hdmicec: cec@100B0000 { hdmi-phandle = <&hdmi>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_cec>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/media/samsung-fimc.txt b/Documentation/devicetree/bindings/media/samsung-fimc.txt index 922d6f8e74be..e4e15d8d7521 100644 --- a/Documentation/devicetree/bindings/media/samsung-fimc.txt +++ b/Documentation/devicetree/bindings/media/samsung-fimc.txt @@ -166,7 +166,6 @@ Example: clock-output-names = "cam_a_clkout", "cam_b_clkout"; pinctrl-names = "default"; pinctrl-0 = <&cam_port_a_clk_active>; - status = "okay"; #address-cells = <1>; #size-cells = <1>; @@ -189,7 +188,6 @@ Example: compatible = "samsung,exynos4210-fimc"; reg = <0x11800000 0x1000>; interrupts = <0 85 0>; - status = "okay"; }; csis_0: csis@11880000 { diff --git a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt index cc51b1fd6e0c..6af3fc210ecc 100644 --- a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt +++ b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt @@ -52,7 +52,6 @@ Example: c8sectpfe@08a20000 { compatible = "st,stih407-c8sectpfe"; - status = "okay"; reg = <0x08a20000 0x10000>, <0x08a00000 0x4000>; reg-names = "stfe", "stfe-ram"; interrupts = , ; diff --git a/Documentation/devicetree/bindings/media/ti,da850-vpif.txt b/Documentation/devicetree/bindings/media/ti,da850-vpif.txt index df7182a63e59..e47c7ccc57f1 100644 --- a/Documentation/devicetree/bindings/media/ti,da850-vpif.txt +++ b/Documentation/devicetree/bindings/media/ti,da850-vpif.txt @@ -59,7 +59,6 @@ I2C-connected TVP5147 decoder: tvp5147@5d { compatible = "ti,tvp5147"; reg = <0x5d>; - status = "okay"; port { composite_in: endpoint { diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt index 9cd2a369125d..852041a7480c 100644 --- a/Documentation/devicetree/bindings/media/video-interfaces.txt +++ b/Documentation/devicetree/bindings/media/video-interfaces.txt @@ -76,6 +76,11 @@ Optional endpoint properties mode horizontal and vertical synchronization signals are provided to the slave device (data source) by the master device (data sink). In the master mode the data source device is also the source of the synchronization signals. +- bus-type: data bus type. Possible values are: + 0 - autodetect based on other properties (MIPI CSI-2 D-PHY, parallel or Bt656) + 1 - MIPI CSI-2 C-PHY + 2 - MIPI CSI1 + 3 - CCP2 - bus-width: number of data lines actively used, valid for the parallel busses. - data-shift: on the parallel data busses, if bus-width is used to specify the number of data lines, data-shift can be used to specify which data lines are @@ -112,7 +117,8 @@ Optional endpoint properties should be the combined length of data-lanes and clock-lanes properties. If the lane-polarities property is omitted, the value must be interpreted as 0 (normal). This property is valid for serial busses only. - +- strobe: Whether the clock signal is used as clock (0) or strobe (1). Used + with CCP2, for instance. Example ------- diff --git a/Documentation/devicetree/bindings/media/zx-irdec.txt b/Documentation/devicetree/bindings/media/zx-irdec.txt new file mode 100644 index 000000000000..295b9fab593e --- /dev/null +++ b/Documentation/devicetree/bindings/media/zx-irdec.txt @@ -0,0 +1,14 @@ +IR Decoder (IRDEC) on ZTE ZX family SoCs + +Required properties: + - compatible: Should be "zte,zx296718-irdec". + - reg: Physical base address and length of IRDEC registers. + - interrupts: Interrupt number of IRDEC. + +Exmaples: + + irdec: ir-decoder@111000 { + compatible = "zte,zx296718-irdec"; + reg = <0x111000 0x1000>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt index 21277a56e94c..ddf46b8856a5 100644 --- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt +++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt @@ -15,6 +15,9 @@ Required properties: the register. - "smi" : It's the clock for transfer data and command. +Required property for mt2701: +- mediatek,larb-id :the hardware id of this larb. + Example: larb1: larb@16010000 { compatible = "mediatek,mt8173-smi-larb"; @@ -25,3 +28,15 @@ Example: <&vdecsys CLK_VDEC_LARB_CKEN>; clock-names = "apb", "smi"; }; + +Example for mt2701: + larb0: larb@14010000 { + compatible = "mediatek,mt2701-smi-larb"; + reg = <0 0x14010000 0 0x1000>; + mediatek,smi = <&smi_common>; + mediatek,larb-id = <0>; + clocks = <&mmsys CLK_MM_SMI_LARB0>, + <&mmsys CLK_MM_SMI_LARB0>; + clock-names = "apb", "smi"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_DISP>; + }; diff --git a/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt index 1ee3bc09f319..8b9388cc1ccc 100644 --- a/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt +++ b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt @@ -130,7 +130,6 @@ The reg property implicitly specifies the chip select as this: Example: devbus-bootcs@d0010400 { - status = "okay"; ranges = <0 0xf0000000 0x1000000>; /* @addr 0xf0000000, size 0x1000000 */ #address-cells = <1>; #size-cells = <1>; diff --git a/Documentation/devicetree/bindings/mfd/act8945a.txt b/Documentation/devicetree/bindings/mfd/act8945a.txt index 462819ac3da8..e6f168db6c72 100644 --- a/Documentation/devicetree/bindings/mfd/act8945a.txt +++ b/Documentation/devicetree/bindings/mfd/act8945a.txt @@ -12,7 +12,6 @@ Example: pmic@5b { compatible = "active-semi,act8945a"; reg = <0x5b>; - status = "okay"; active-semi,vsel-high; @@ -79,6 +78,5 @@ Example: active-semi,input-voltage-threshold-microvolt = <6600>; active-semi,precondition-timeout = <40>; active-semi,total-timeout = <3>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt index eec40be7f79a..3f643ef121ff 100644 --- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt +++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt @@ -25,7 +25,6 @@ Example: clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>; clock-names = "periph_clk","sys_clk", "slow_clk"; interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>; - status = "disabled"; hlcdc-display-controller { compatible = "atmel,hlcdc-display-controller"; diff --git a/Documentation/devicetree/bindings/mfd/atmel-smc.txt b/Documentation/devicetree/bindings/mfd/atmel-smc.txt index 26eeed373934..1103ce2030fb 100644 --- a/Documentation/devicetree/bindings/mfd/atmel-smc.txt +++ b/Documentation/devicetree/bindings/mfd/atmel-smc.txt @@ -8,6 +8,7 @@ Required properties: - compatible: Should be one of the following "atmel,at91sam9260-smc", "syscon" "atmel,sama5d3-smc", "syscon" + "atmel,sama5d2-smc", "syscon" - reg: Contains offset/length value of the SMC memory region. diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt index aca09af66514..9455503b0299 100644 --- a/Documentation/devicetree/bindings/mfd/axp20x.txt +++ b/Documentation/devicetree/bindings/mfd/axp20x.txt @@ -7,7 +7,14 @@ axp209 (X-Powers) axp221 (X-Powers) axp223 (X-Powers) axp803 (X-Powers) +axp806 (X-Powers) axp809 (X-Powers) +axp813 (X-Powers) + +The AXP813 is 2 chips packaged into 1. The 2 chips do not share anything +other than the packaging. Pins are routed separately. As such they should +be treated as separate entities. The other half is an AC100 RTC/codec +combo chip. Please see ./ac100.txt for its bindings. Required properties: - compatible: should be one of: @@ -19,6 +26,7 @@ Required properties: * "x-powers,axp803" * "x-powers,axp806" * "x-powers,axp809" + * "x-powers,axp813" - reg: The I2C slave address or RSB hardware address for the AXP chip - interrupt-parent: The parent interrupt controller - interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin @@ -28,12 +36,14 @@ Required properties: Optional properties: - x-powers,dcdc-freq: defines the work frequency of DC-DC in KHz AXP152/20X: range: 750-1875, Default: 1.5 MHz - AXP22X/80X: range: 1800-4050, Default: 3 MHz + AXP22X/8XX: range: 1800-4050, Default: 3 MHz -- x-powers,drive-vbus-en: axp221 / axp223 only boolean, set this when the - N_VBUSEN pin is used as an output pin to control an external - regulator to drive the OTG VBus, rather then as an input pin - which signals whether the board is driving OTG VBus or not. +- x-powers,drive-vbus-en: boolean, set this when the N_VBUSEN pin is + used as an output pin to control an external + regulator to drive the OTG VBus, rather then + as an input pin which signals whether the + board is driving OTG VBus or not. + (axp221 / axp223 / axp813 only) - x-powers,master-mode: Boolean (axp806 only). Set this when the PMIC is wired for master mode. The default is slave mode. @@ -171,6 +181,36 @@ LDO_IO1 : LDO : ips-supply : GPIO 1 RTC_LDO : LDO : ips-supply : always on SW : On/Off Switch : swin-supply +AXP813 regulators, type, and corresponding input supply names: + +Regulator Type Supply Name Notes +--------- ---- ----------- ----- +DCDC1 : DC-DC buck : vin1-supply +DCDC2 : DC-DC buck : vin2-supply : poly-phase capable +DCDC3 : DC-DC buck : vin3-supply : poly-phase capable +DCDC4 : DC-DC buck : vin4-supply +DCDC5 : DC-DC buck : vin5-supply : poly-phase capable +DCDC6 : DC-DC buck : vin6-supply : poly-phase capable +DCDC7 : DC-DC buck : vin7-supply +ALDO1 : LDO : aldoin-supply : shared supply +ALDO2 : LDO : aldoin-supply : shared supply +ALDO3 : LDO : aldoin-supply : shared supply +DLDO1 : LDO : dldoin-supply : shared supply +DLDO2 : LDO : dldoin-supply : shared supply +DLDO3 : LDO : dldoin-supply : shared supply +DLDO4 : LDO : dldoin-supply : shared supply +ELDO1 : LDO : eldoin-supply : shared supply +ELDO2 : LDO : eldoin-supply : shared supply +ELDO3 : LDO : eldoin-supply : shared supply +FLDO1 : LDO : fldoin-supply : shared supply +FLDO2 : LDO : fldoin-supply : shared supply +FLDO3 : LDO : fldoin-supply : shared supply +LDO_IO0 : LDO : ips-supply : GPIO 0 +LDO_IO1 : LDO : ips-supply : GPIO 1 +RTC_LDO : LDO : ips-supply : always on +SW : On/Off Switch : swin-supply +DRIVEVBUS : Enable output : drivevbus-supply : external regulator + Example: axp209: pmic@34 { diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt new file mode 100644 index 000000000000..9ab216a851d5 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt @@ -0,0 +1,49 @@ +* ROHM BD9571MWV Power Management Integrated Circuit (PMIC) bindings + +Required properties: + - compatible : Should be "rohm,bd9571mwv". + - reg : I2C slave address. + - interrupt-parent : Phandle to the parent interrupt controller. + - interrupts : The interrupt line the device is connected to. + - interrupt-controller : Marks the device node as an interrupt controller. + - #interrupt-cells : The number of cells to describe an IRQ, should be 2. + The first cell is the IRQ number. + The second cell is the flags, encoded as trigger + masks from ../interrupt-controller/interrupts.txt. + - gpio-controller : Marks the device node as a GPIO Controller. + - #gpio-cells : Should be two. The first cell is the pin number and + the second cell is used to specify flags. + See ../gpio/gpio.txt for more information. + - regulators: : List of child nodes that specify the regulator + initialization data. Child nodes must be named + after their hardware counterparts: + - vd09 + - vd18 + - vd25 + - vd33 + - dvfs + Each child node is defined using the standard + binding for regulators. + +Example: + + pmic: pmic@30 { + compatible = "rohm,bd9571mwv"; + reg = <0x30>; + interrupt-parent = <&gpio2>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; + gpio-controller; + #gpio-cells = <2>; + + regulators { + dvfs: dvfs { + regulator-name = "dvfs"; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1030000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt index 9554292dc6cb..07c69c0c6624 100644 --- a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt +++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt @@ -4,6 +4,14 @@ Required properties: - compatible : Should be "dlg,da9052", "dlg,da9053-aa", "dlg,da9053-ab", or "dlg,da9053-bb" +Optional properties: +- dlg,tsi-as-adc : Boolean, if set the X+, X-, Y+, Y- touchscreen + input lines are used as general purpose analogue + input. +- tsiref-supply: Phandle to the regulator, which provides the reference + voltage for the TSIREF pin. Must be provided when the + touchscreen pins are used for ADC purposes. + Sub-nodes: - regulators : Contain the regulator nodes. The DA9052/53 regulators are bound using their names as listed below: @@ -29,7 +37,6 @@ Sub-nodes: Examples: i2c@63fc8000 { /* I2C1 */ - status = "okay"; pmic: dialog@48 { compatible = "dlg,da9053-aa"; diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt index 8aba48821a85..39ba4146769d 100644 --- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt +++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt @@ -116,7 +116,6 @@ ecspi@70010000 { /* ECSPI1 */ fsl,spi-num-chipselects = <2>; cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */ <&gpio4 25 0>; /* GPIO4_25 */ - status = "okay"; pmic: mc13892@0 { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/mfd/mxs-lradc.txt b/Documentation/devicetree/bindings/mfd/mxs-lradc.txt index 555fb117d4fa..755cbef0647d 100644 --- a/Documentation/devicetree/bindings/mfd/mxs-lradc.txt +++ b/Documentation/devicetree/bindings/mfd/mxs-lradc.txt @@ -26,7 +26,6 @@ Example for i.MX23 SoC: compatible = "fsl,imx23-lradc"; reg = <0x80050000 0x2000>; interrupts = <36 37 38 39 40 41 42 43 44>; - status = "okay"; fsl,lradc-touchscreen-wires = <4>; fsl,ave-ctrl = <4>; fsl,ave-delay = <2>; @@ -39,7 +38,6 @@ Example for i.MX28 SoC: compatible = "fsl,imx28-lradc"; reg = <0x80050000 0x2000>; interrupts = <10 14 15 16 17 18 19 20 21 22 23 24 25>; - status = "okay"; fsl,lradc-touchscreen-wires = <5>; fsl,ave-ctrl = <4>; fsl,ave-delay = <2>; diff --git a/Documentation/devicetree/bindings/mfd/retu.txt b/Documentation/devicetree/bindings/mfd/retu.txt new file mode 100644 index 000000000000..876242394a16 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/retu.txt @@ -0,0 +1,25 @@ +* Device tree bindings for Nokia Retu and Tahvo multi-function device + +Retu and Tahvo are a multi-function devices found on Nokia Internet +Tablets (770, N800 and N810). The Retu chip provides watchdog timer +and power button control functionalities while Tahvo chip provides +USB transceiver functionality. + +Required properties: +- compatible: "nokia,retu" or "nokia,tahvo" +- reg: Specifies the CBUS slave address of the ASIC chip +- interrupts: The interrupt line the device is connected to +- interrupt-parent: The parent interrupt controller + +Example: + +cbus0 { + compatible = "i2c-cbus-gpio"; + ... + retu: retu@1 { + compatible = "nokia,retu"; + interrupt-parent = <&gpio4>; + interrupts = <12 IRQ_TYPE_EDGE_RISING>; + reg = <0x1>; + }; +}; diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt index 9636ae8d8d41..91b65227afeb 100644 --- a/Documentation/devicetree/bindings/mfd/rk808.txt +++ b/Documentation/devicetree/bindings/mfd/rk808.txt @@ -1,11 +1,14 @@ RK8XX Power Management Integrated Circuit The rk8xx family current members: +rk805 rk808 rk818 Required properties: -- compatible: "rockchip,rk808", "rockchip,rk818" +- compatible: "rockchip,rk805" +- compatible: "rockchip,rk808" +- compatible: "rockchip,rk818" - reg: I2C slave address - interrupt-parent: The parent interrupt controller. - interrupts: the interrupt outputs of the controller. @@ -18,6 +21,14 @@ Optional properties: - rockchip,system-power-controller: Telling whether or not this pmic is controlling the system power. +Optional RK805 properties: +- vcc1-supply: The input supply for DCDC_REG1 +- vcc2-supply: The input supply for DCDC_REG2 +- vcc3-supply: The input supply for DCDC_REG3 +- vcc4-supply: The input supply for DCDC_REG4 +- vcc5-supply: The input supply for LDO_REG1 and LDO_REG2 +- vcc6-supply: The input supply for LDO_REG3 + Optional RK808 properties: - vcc1-supply: The input supply for DCDC_REG1 - vcc2-supply: The input supply for DCDC_REG2 @@ -56,6 +67,15 @@ by a child node of the 'regulators' node. /* standard regulator bindings here */ }; +Following regulators of the RK805 PMIC regulators are supported. Note that +the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO +number as described in RK805 datasheet. + + - DCDC_REGn + - valid values for n are 1 to 4. + - LDO_REGn + - valid values for n are 1 to 3 + Following regulators of the RK808 PMIC block are supported. Note that the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO number as described in RK808 datasheet. diff --git a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt index df664018c148..d759da606f75 100644 --- a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt +++ b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt @@ -57,7 +57,6 @@ audio-subsystem { clock-names = "iis", "i2s_opclk0", "i2s_opclk1"; pinctrl-names = "default"; pinctrl-0 = <&i2s0_bus>; - status = "disabled"; }; serial_3: serial@11460000 { @@ -69,6 +68,5 @@ audio-subsystem { clock-names = "uart", "clk_uart_baud0"; pinctrl-names = "default"; pinctrl-0 = <&uart_aud_bus>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt new file mode 100644 index 000000000000..2a9ff29db9c9 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt @@ -0,0 +1,48 @@ +STMicroelectronics STM32 Low-Power Timer + +The STM32 Low-Power Timer (LPTIM) is a 16-bit timer that provides several +functions: +- PWM output (with programmable prescaler, configurable polarity) +- Quadrature encoder, counter +- Trigger source for STM32 ADC/DAC (LPTIM_OUT) + +Required properties: +- compatible: Must be "st,stm32-lptimer". +- reg: Offset and length of the device's register set. +- clocks: Phandle to the clock used by the LP Timer module. +- clock-names: Must be "mux". +- #address-cells: Should be '<1>'. +- #size-cells: Should be '<0>'. + +Optional subnodes: +- pwm: See ../pwm/pwm-stm32-lp.txt +- counter: See ../iio/timer/stm32-lptimer-cnt.txt +- trigger: See ../iio/timer/stm32-lptimer-trigger.txt + +Example: + + timer@40002400 { + compatible = "st,stm32-lptimer"; + reg = <0x40002400 0x400>; + clocks = <&timer_clk>; + clock-names = "mux"; + #address-cells = <1>; + #size-cells = <0>; + + pwm { + compatible = "st,stm32-pwm-lp"; + pinctrl-names = "default"; + pinctrl-0 = <&lppwm1_pins>; + }; + + trigger@0 { + compatible = "st,stm32-lptimer-trigger"; + reg = <0>; + }; + + counter { + compatible = "st,stm32-lptimer-counter"; + pinctrl-names = "default"; + pinctrl-0 = <&lptim1_in_pins>; + }; + }; diff --git a/Documentation/devicetree/bindings/mfd/tps6105x.txt b/Documentation/devicetree/bindings/mfd/tps6105x.txt new file mode 100644 index 000000000000..93602c7a19c8 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/tps6105x.txt @@ -0,0 +1,17 @@ +* Device tree bindings for TI TPS61050/61052 Boost Converters + +The TP61050/TPS61052 is a high-power "white LED driver". The +device provides LED, GPIO and regulator functionalities. + +Required properties: +- compatible: "ti,tps61050" or "ti,tps61052" +- reg: Specifies the I2C slave address + +Example: + +i2c0 { + tps61052@33 { + compatible = "ti,tps61052"; + reg = <0x33>; + }; +}; diff --git a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt new file mode 100644 index 000000000000..088eff9ddb78 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt @@ -0,0 +1,39 @@ +Zodiac Inflight Innovations RAVE Supervisory Processor + +RAVE Supervisory Processor communicates with SoC over UART. It is +expected that its Device Tree node is specified as a child of a node +corresponding to UART controller used for communication. + +Required parent device properties: + + - compatible: Should be one of: + - "zii,rave-sp-niu" + - "zii,rave-sp-mezz" + - "zii,rave-sp-esb" + - "zii,rave-sp-rdu1" + - "zii,rave-sp-rdu2" + + - current-speed: Should be set to baud rate SP device is using + +RAVE SP consists of the following sub-devices: + +Device Description +------ ----------- +rave-sp-wdt : Watchdog +rave-sp-nvmem : Interface to onborad EEPROM +rave-sp-backlight : Display backlight +rave-sp-hwmon : Interface to onboard hardware sensors +rave-sp-leds : Interface to onboard LEDs +rave-sp-input : Interface to onboard power button + +Example of usage: + + rdu { + compatible = "zii,rave-sp-rdu2"; + current-speed = <1000000>; + + watchdog { + compatible = "zii,rave-sp-watchdog"; + }; + }; + diff --git a/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt b/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt new file mode 100644 index 000000000000..0a2df4338332 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt @@ -0,0 +1,31 @@ +Lantiq XWAY SoC FPI BUS binding +============================ + + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,xrx200-fpi" +- reg : The address and length of the XBAR + configuration register. + Address and length of the FPI bus itself. +- lantiq,rcu : A phandle to the RCU syscon +- lantiq,offset-endianness : Offset of the endianness configuration + register + +------------------------------------------------------------------------------- +Example for the FPI on the xrx200 SoCs: + fpi@10000000 { + compatible = "lantiq,xrx200-fpi"; + ranges = <0x0 0x10000000 0xf000000>; + reg = <0x1f400000 0x1000>, + <0x10000000 0xf000000>; + lantiq,rcu = <&rcu0>; + lantiq,offset-endianness = <0x4c>; + #address-cells = <1>; + #size-cells = <1>; + + gptu@e100a00 { + ...... + }; + }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt new file mode 100644 index 000000000000..a0c19bd1ce66 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt @@ -0,0 +1,36 @@ +Lantiq XWAY SoC GPHY binding +============================ + +This binding describes a software-defined ethernet PHY, provided by the RCU +module on newer Lantiq XWAY SoCs (xRX200 and newer). + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,xrx200a1x-gphy" + "lantiq,xrx200a2x-gphy" + "lantiq,xrx300-gphy" + "lantiq,xrx330-gphy" +- reg : Addrress of the GPHY FW load address register +- resets : Must reference the RCU GPHY reset bit +- reset-names : One entry, value must be "gphy" or optional "gphy2" +- clocks : A reference to the (PMU) GPHY clock gate + +Optional properties: +- lantiq,gphy-mode : GPHY_MODE_GE (default) or GPHY_MODE_FE as defined in + + + +------------------------------------------------------------------------------- +Example for the GPHys on the xRX200 SoCs: + +#include + gphy0: gphy@20 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x20 0x4>; + + resets = <&reset0 31 30>, <&reset1 7 7>; + reset-names = "gphy", "gphy2"; + clocks = <&pmu0 XRX200_PMU_GATE_GPHY>; + lantiq,gphy-mode = ; + }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt new file mode 100644 index 000000000000..a086f1e1cdd7 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt @@ -0,0 +1,89 @@ +Lantiq XWAY SoC RCU binding +=========================== + +This binding describes the RCU (reset controller unit) multifunction device, +where each sub-device has it's own set of registers. + +The RCU register range is used for multiple purposes. Mostly one device +uses one or multiple register exclusively, but for some registers some +bits are for one driver and some other bits are for a different driver. +With this patch all accesses to the RCU registers will go through +syscon. + + +------------------------------------------------------------------------------- +Required properties: +- compatible : The first and second values must be: + "lantiq,xrx200-rcu", "simple-mfd", "syscon" +- reg : The address and length of the system control registers + + +------------------------------------------------------------------------------- +Example of the RCU bindings on a xRX200 SoC: + rcu0: rcu@203000 { + compatible = "lantiq,xrx200-rcu", "simple-mfd", "syscon"; + reg = <0x203000 0x100>; + ranges = <0x0 0x203000 0x100>; + big-endian; + + gphy0: gphy@20 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x20 0x4>; + + resets = <&reset0 31 30>, <&reset1 7 7>; + reset-names = "gphy", "gphy2"; + lantiq,gphy-mode = ; + }; + + gphy1: gphy@68 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x68 0x4>; + + resets = <&reset0 29 28>, <&reset1 6 6>; + reset-names = "gphy", "gphy2"; + lantiq,gphy-mode = ; + }; + + reset0: reset-controller@10 { + compatible = "lantiq,xrx200-reset"; + reg = <0x10 4>, <0x14 4>; + + #reset-cells = <2>; + }; + + reset1: reset-controller@48 { + compatible = "lantiq,xrx200-reset"; + reg = <0x48 4>, <0x24 4>; + + #reset-cells = <2>; + }; + + usb_phy0: usb2-phy@18 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x18 4>, <0x38 4>; + status = "disabled"; + + resets = <&reset1 4 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; + + usb_phy1: usb2-phy@34 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x34 4>, <0x3C 4>; + status = "disabled"; + + resets = <&reset1 5 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; + + reboot@10 { + compatible = "syscon-reboot"; + reg = <0x10 4>; + + regmap = <&rcu0>; + offset = <0x10>; + mask = <0x40000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/mips/ni.txt b/Documentation/devicetree/bindings/mips/ni.txt new file mode 100644 index 000000000000..722bf2d62da9 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/ni.txt @@ -0,0 +1,7 @@ +National Instruments MIPS platforms + +required root node properties: + - compatible: must be "ni,169445" + +CPU Nodes + - compatible: must be "mti,mips14KEc" diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt index b35a8d04f8b6..a16e8d7fe56c 100644 --- a/Documentation/devicetree/bindings/mips/ralink.txt +++ b/Documentation/devicetree/bindings/mips/ralink.txt @@ -15,3 +15,4 @@ value must be one of the following values: ralink,rt5350-soc ralink,mt7620a-soc ralink,mt7620n-soc + ralink,mt7628a-soc diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt index f8629bb73945..f9fb412642fe 100644 --- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt +++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt @@ -47,5 +47,4 @@ ssc0: ssc@f0010000 { dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt index 49df630bd44f..60481bfc3d31 100644 --- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt @@ -74,5 +74,4 @@ Example: phys = <&emmc_phy>; phy-names = "phy_arasan"; #clock-cells = <0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/mmc/davinci_mmc.txt b/Documentation/devicetree/bindings/mmc/davinci_mmc.txt index e5a0140b2381..516fb0143d4c 100644 --- a/Documentation/devicetree/bindings/mmc/davinci_mmc.txt +++ b/Documentation/devicetree/bindings/mmc/davinci_mmc.txt @@ -24,7 +24,6 @@ mmc0: mmc@1c40000 { compatible = "ti,da830-mmc", reg = <0x40000 0x1000>; interrupts = <16>; - status = "okay"; bus-width = <4>; max-frequency = <50000000>; dmas = <&edma 16 diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-mmc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-mmc.txt index db442355cd24..184ccffe2739 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-mmc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-mmc.txt @@ -20,5 +20,4 @@ sdhci1: sdhci@10014000 { dma-names = "rx-tx"; bus-width = <4>; cd-gpios = <&gpio3 29>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt index b878a1e305af..ed1456f5c94d 100644 --- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt @@ -16,11 +16,13 @@ Required Properties: - clocks: Array of clocks required for SDHC. - Require at least input clock for Xenon IP core. + Require at least input clock for Xenon IP core. For Armada AP806 and + CP110, the AXI clock is also mandatory. - clock-names: Array of names corresponding to clocks property. The input clock for Xenon IP core should be named as "core". + The input clock for the AXI bus must be named as "axi". - reg: * For "marvell,armada-3700-sdhci", two register areas. @@ -106,8 +108,8 @@ Example: compatible = "marvell,armada-ap806-sdhci"; reg = <0xaa0000 0x1000>; interrupts = - clocks = <&emmc_clk>; - clock-names = "core"; + clocks = <&emmc_clk>,<&axi_clk>; + clock-names = "core", "axi"; bus-width = <4>; marvell,xenon-phy-slow-mode; marvell,xenon-tun-count = <11>; @@ -126,8 +128,8 @@ Example: interrupts = vqmmc-supply = <&sd_vqmmc_regulator>; vmmc-supply = <&sd_vmmc_regulator>; - clocks = <&sdclk>; - clock-names = "core"; + clocks = <&sdclk>, <&axi_clk>; + clock-names = "core", "axi"; bus-width = <4>; marvell,xenon-tun-count = <9>; }; diff --git a/Documentation/devicetree/bindings/mmc/mmc-card.txt b/Documentation/devicetree/bindings/mmc/mmc-card.txt index a70fcd65b9ea..8d2d71758907 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-card.txt +++ b/Documentation/devicetree/bindings/mmc/mmc-card.txt @@ -21,7 +21,6 @@ Example: vmmc-supply = <®_vcc3v3>; bus-width = <8>; non-removable; - status = "okay"; mmccard: mmccard@0 { reg = <0>; diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt index c7f4a0ec48ed..b32ade645ad9 100644 --- a/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/Documentation/devicetree/bindings/mmc/mmc.txt @@ -153,7 +153,6 @@ mmc3: mmc@01c12000 { bus-width = <4>; non-removable; mmc-pwrseq = <&sdhci0_pwrseq> - status = "okay"; brcmf: bcrmf@1 { reg = <1>; diff --git a/Documentation/devicetree/bindings/mmc/orion-sdio.txt b/Documentation/devicetree/bindings/mmc/orion-sdio.txt index 84f0ebd67a13..10f0818a34c5 100644 --- a/Documentation/devicetree/bindings/mmc/orion-sdio.txt +++ b/Documentation/devicetree/bindings/mmc/orion-sdio.txt @@ -13,5 +13,4 @@ Example: reg = <0xd00d4000 0x200>; interrupts = <54>; clocks = <&gateclk 17>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt index c32dc5a9dbe6..5ff1e12c655a 100644 --- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt +++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt @@ -11,6 +11,8 @@ Required properties: - "renesas,mmcif-r7s72100" for the MMCIF found in r7s72100 SoCs - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs + - "renesas,mmcif-r8a7743" for the MMCIF found in r8a7743 SoCs + - "renesas,mmcif-r8a7745" for the MMCIF found in r8a7745 SoCs - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs @@ -21,7 +23,7 @@ Required properties: - interrupts: Some SoCs have only 1 shared interrupt, while others have either 2 or 3 individual interrupts (error, int, card detect). Below is the number of interrupts for each SoC: - 1: r8a73a4, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794 + 1: r8a73a4, r8a7743, r8a7745, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794 2: r8a7740, sh73a0 3: r7s72100 diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt index 49ed3ad2524a..c6558785e61b 100644 --- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt @@ -15,6 +15,7 @@ Required Properties: - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 + - "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x - "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt index 230fd696eb92..e35645598315 100644 --- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt +++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt @@ -63,7 +63,6 @@ Example: mmc0: sdhci@fe81e000 { compatible = "st,sdhci"; - status = "disabled"; reg = <0xfe81e000 0x1000>; interrupts = ; interrupt-names = "mmcirq"; @@ -77,7 +76,6 @@ mmc0: sdhci@fe81e000 { mmc1: sdhci@09080000 { compatible = "st,sdhci-stih407", "st,sdhci"; - status = "disabled"; reg = <0x09080000 0x7ff>; reg-names = "mmc"; interrupts = ; @@ -94,7 +92,6 @@ mmc1: sdhci@09080000 { mmc0: sdhci@09060000 { compatible = "st,sdhci-stih407", "st,sdhci"; - status = "disabled"; reg = <0x09060000 0x7ff>, <0x9061008 0x20>; reg-names = "mmc", "top-mmc-delay"; interrupts = ; diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt index 7d53a799f140..63b57e2a10fb 100644 --- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt +++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt @@ -12,6 +12,7 @@ Required properties: * "allwinner,sun4i-a10-mmc" * "allwinner,sun5i-a13-mmc" * "allwinner,sun7i-a20-mmc" + * "allwinner,sun8i-a83t-emmc" * "allwinner,sun9i-a80-mmc" * "allwinner,sun50i-a64-emmc" * "allwinner,sun50i-a64-mmc" diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt index 0e026c151c1c..3a4ac401e6f9 100644 --- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt +++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt @@ -1,33 +1,55 @@ -* TI Highspeed MMC host controller for OMAP +* TI Highspeed MMC host controller for OMAP and 66AK2G family. -The Highspeed MMC Host Controller on TI OMAP family +The Highspeed MMC Host Controller on TI OMAP and 66AK2G family provides an interface for MMC, SD, and SDIO types of memory cards. This file documents differences between the core properties described by mmc.txt and the properties used by the omap_hsmmc driver. Required properties: +-------------------- - compatible: Should be "ti,omap2-hsmmc", for OMAP2 controllers Should be "ti,omap3-hsmmc", for OMAP3 controllers Should be "ti,omap3-pre-es3-hsmmc" for OMAP3 controllers pre ES3.0 Should be "ti,omap4-hsmmc", for OMAP4 controllers Should be "ti,am33xx-hsmmc", for AM335x controllers -- ti,hwmods: Must be "mmc", n is controller instance starting 1 + Should be "ti,k2g-hsmmc", "ti,omap4-hsmmc" for 66AK2G controllers. + +SoC specific required properties: +--------------------------------- +The following are mandatory properties for OMAPs, AM33xx and AM43xx SoCs only: +- ti,hwmods: Must be "mmc", n is controller instance starting 1. + +The following are mandatory properties for 66AK2G SoCs only: +- power-domains:Should contain a phandle to a PM domain provider node + and an args specifier containing the MMC device id + value. This property is as per the binding, + Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt +- clocks: Must contain an entry for each entry in clock-names. Should + be defined as per the he appropriate clock bindings consumer + usage in Documentation/devicetree/bindings/clock/ti,sci-clk.txt +- clock-names: Shall be "fck" for the functional clock, + and "mmchsdb_fck" for the debounce clock. + Optional properties: -ti,dual-volt: boolean, supports dual voltage cards --supply: phandle to the regulator device tree node -"supply-name" examples are "vmmc", "vmmc_aux"(deprecated)/"vqmmc" etc -ti,non-removable: non-removable slot (like eMMC) -ti,needs-special-reset: Requires a special softreset sequence -ti,needs-special-hs-handling: HSMMC IP needs special setting for handling High Speed -dmas: List of DMA specifiers with the controller specific format -as described in the generic DMA client binding. A tx and rx -specifier is required. -dma-names: List of DMA request names. These strings correspond -1:1 with the DMA specifiers listed in dmas. The string naming is -to be "rx" and "tx" for RX and TX DMA requests, respectively. +-------------------- +- ti,dual-volt: boolean, supports dual voltage cards +- -supply: phandle to the regulator device tree node + "supply-name" examples are "vmmc", + "vmmc_aux"(deprecated)/"vqmmc" etc +- ti,non-removable: non-removable slot (like eMMC) +- ti,needs-special-reset: Requires a special softreset sequence +- ti,needs-special-hs-handling: HSMMC IP needs special setting + for handling High Speed +- dmas: List of DMA specifiers with the controller specific + format as described in the generic DMA client + binding. A tx and rx specifier is required. +- dma-names: List of DMA request names. These strings correspond + 1:1 with the DMA specifiers listed in dmas. + The string naming is to be "rx" and "tx" for + RX and TX DMA requests, respectively. Examples: diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt index 4fd8b7acc510..54ef642f23a0 100644 --- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt +++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt @@ -15,6 +15,8 @@ Required properties: "renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC + "renesas,sdhi-r8a7743" - SDHI IP on R8A7743 SoC + "renesas,sdhi-r8a7745" - SDHI IP on R8A7745 SoC "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC "renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC "renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC @@ -33,10 +35,8 @@ Required properties: If 2 clocks are specified by the hardware, you must name them as "core" and "cd". If the controller only has 1 clock, naming is not required. - Below is the number clocks for each supported SoC: - 1: SH73A0, R8A73A4, R8A7740, R8A7778, R8A7779, R8A7790 - R8A7791, R8A7792, R8A7793, R8A7794, R8A7795, R8A7796 - 2: R7S72100 + Devices which have more than 1 clock are listed below: + 2: R7S72100 Optional properties: - toshiba,mmc-wrprotect-disable: write-protect detection is unavailable diff --git a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt index 906819a90c2b..0f59bd5361f5 100644 --- a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt @@ -28,5 +28,4 @@ Example: max-frequency = <50000000>; cap-sdio-irq; cap-sd-highspeed; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt index 489807005eda..b93c1e2f25dd 100644 --- a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt @@ -24,7 +24,6 @@ spi@f0020000 { #size-cells = <0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi0_default>; - status = "okay"; m25p80@0 { ... diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt index 5ded66ad7aef..840f9405dcf0 100644 --- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt @@ -37,7 +37,6 @@ nor_flash: spi@1100d000 { clock-names = "spi", "sf"; #address-cells = <1>; #size-cells = <0>; - status = "disabled"; flash@0 { compatible = "jedec,spi-nor"; diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt index 70dd5118a324..73d336befa08 100644 --- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt +++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt @@ -1,11 +1,20 @@ * Qualcomm NAND controller Required properties: -- compatible: should be "qcom,ipq806x-nand" +- compatible: must be one of the following: + * "qcom,ipq806x-nand" - for EBI2 NAND controller being used in IPQ806x + SoC and it uses ADM DMA + * "qcom,ipq4019-nand" - for QPIC NAND controller v1.4.0 being used in + IPQ4019 SoC and it uses BAM DMA + * "qcom,ipq8074-nand" - for QPIC NAND controller v1.5.0 being used in + IPQ8074 SoC and it uses BAM DMA + - reg: MMIO address range - clocks: must contain core clock and always on clock - clock-names: must contain "core" for the core clock and "aon" for the always on clock + +EBI2 specific properties: - dmas: DMA specifier, consisting of a phandle to the ADM DMA controller node and the channel number to be used for NAND. Refer to dma.txt and qcom_adm.txt for more details @@ -16,6 +25,12 @@ Required properties: - qcom,data-crci: must contain the ADM data type CRCI block instance number specified for the NAND controller on the given platform + +QPIC specific properties: +- dmas: DMA specifier, consisting of a phandle to the BAM DMA + and the channel number to be used for NAND. Refer to + dma.txt, qcom_bam_dma.txt for more details +- dma-names: must contain all 3 channel names : "tx", "rx", "cmd" - #address-cells: <1> - subnodes give the chip-select number - #size-cells: <0> @@ -26,7 +41,6 @@ chip-selects which (may) contain NAND flash chips. Their properties are as follows. Required properties: -- compatible: should contain "qcom,nandcs" - reg: a single integer representing the chip-select number (e.g., 0, 1, 2, etc.) - #address-cells: see partition.txt @@ -43,8 +57,8 @@ partition.txt for more detail. Example: -nand@1ac00000 { - compatible = "qcom,ebi2-nandc"; +nand-controller@1ac00000 { + compatible = "qcom,ipq806x-nand"; reg = <0x1ac00000 0x800>; clocks = <&gcc EBI2_CLK>, @@ -59,8 +73,7 @@ nand@1ac00000 { #address-cells = <1>; #size-cells = <0>; - nandcs@0 { - compatible = "qcom,nandcs"; + nand@0 { reg = <0>; nand-ecc-strength = <4>; @@ -84,3 +97,43 @@ nand@1ac00000 { }; }; }; + +nand-controller@79b0000 { + compatible = "qcom,ipq4019-nand"; + reg = <0x79b0000 0x1000>; + + clocks = <&gcc GCC_QPIC_CLK>, + <&gcc GCC_QPIC_AHB_CLK>; + clock-names = "core", "aon"; + + dmas = <&qpicbam 0>, + <&qpicbam 1>, + <&qpicbam 2>; + dma-names = "tx", "rx", "cmd"; + + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + nand-ecc-strength = <4>; + nand-ecc-step-size = <512>; + nand-bus-width = <8>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "boot-nand"; + reg = <0 0x58a0000>; + }; + + partition@58a0000 { + label = "fs-nand"; + reg = <0x58a0000 0x4000000>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/mtd/st-fsm.txt b/Documentation/devicetree/bindings/mtd/st-fsm.txt index c2489391c437..54cef9ef3083 100644 --- a/Documentation/devicetree/bindings/mtd/st-fsm.txt +++ b/Documentation/devicetree/bindings/mtd/st-fsm.txt @@ -21,6 +21,5 @@ Example: st,syscfg = <&syscfg_rear>; st,boot-device-reg = <0x958>; st,boot-device-spi = <0x1a>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt index f322f56aef74..a37c67bcb43b 100644 --- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt +++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt @@ -41,7 +41,6 @@ nfc: nand@01c03000 { #size-cells = <0>; pinctrl-names = "default"; pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>; - status = "okay"; nand@0 { reg = <0>; diff --git a/Documentation/devicetree/bindings/net/anarion-gmac.txt b/Documentation/devicetree/bindings/net/anarion-gmac.txt new file mode 100644 index 000000000000..fe678965ae69 --- /dev/null +++ b/Documentation/devicetree/bindings/net/anarion-gmac.txt @@ -0,0 +1,25 @@ +* Adaptrum Anarion ethernet controller + +This device is a platform glue layer for stmmac. +Please see stmmac.txt for the other unchanged properties. + +Required properties: + - compatible: Should be "adaptrum,anarion-gmac", "snps,dwmac" + - phy-mode: Should be "rgmii". Other modes are not currently supported. + + +Examples: + + gmac1: ethernet@f2014000 { + compatible = "adaptrum,anarion-gmac", "snps,dwmac"; + reg = <0xf2014000 0x4000>, <0xf2018100 8>; + + interrupt-parent = <&core_intc>; + interrupts = <21>; + interrupt-names = "macirq"; + + clocks = <&core_clk>; + clock-names = "stmmaceth"; + + phy-mode = "rgmii"; + }; diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt index ad16c1f481f7..0bfad656a9ff 100644 --- a/Documentation/devicetree/bindings/net/brcm,amac.txt +++ b/Documentation/devicetree/bindings/net/brcm,amac.txt @@ -27,5 +27,4 @@ amac0: ethernet@18022000 { <0x18110000 0x1000>; reg-names = "amac_base", "idm_base"; interrupts = ; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt new file mode 100644 index 000000000000..4194ff7e6ee6 --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt @@ -0,0 +1,35 @@ +Broadcom Bluetooth Chips +--------------------- + +This documents the binding structure and common properties for serial +attached Broadcom devices. + +Serial attached Broadcom devices shall be a child node of the host UART +device the slave device is attached to. + +Required properties: + + - compatible: should contain one of the following: + * "brcm,bcm43438-bt" + +Optional properties: + + - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt + - shutdown-gpios: GPIO specifier, used to enable the BT module + - device-wakeup-gpios: GPIO specifier, used to wakeup the controller + - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor + - clocks: clock specifier if external clock provided to the controller + - clock-names: should be "extclk" + + +Example: + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + max-speed = <921600>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt index 01fa2d4188d4..9c5e663fa1af 100644 --- a/Documentation/devicetree/bindings/net/btusb.txt +++ b/Documentation/devicetree/bindings/net/btusb.txt @@ -29,7 +29,6 @@ Example: Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt: &usb_host1_ehci { - status = "okay"; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt index 5a1d8b0c39e9..2d504256b0d8 100644 --- a/Documentation/devicetree/bindings/net/can/c_can.txt +++ b/Documentation/devicetree/bindings/net/can/c_can.txt @@ -11,9 +11,20 @@ Required properties: - interrupts : property with a value describing the interrupt number -Optional properties: +The following are mandatory properties for DRA7x, AM33xx and AM43xx SoCs only: - ti,hwmods : Must be "d_can" or "c_can", n being the instance number + +The following are mandatory properties for Keystone 2 66AK2G SoCs only: +- power-domains : Should contain a phandle to a PM domain provider node + and an args specifier containing the DCAN device id + value. This property is as per the binding, + Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt +- clocks : CAN functional clock phandle. This property is as per the + binding, + Documentation/devicetree/bindings/clock/ti,sci-clk.txt + +Optional properties: - syscon-raminit : Handle to system control region that contains the RAMINIT register, register offset to the RAMINIT register and the CAN instance number (0 offset). diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt index 9e331777c203..78138333ff7a 100644 --- a/Documentation/devicetree/bindings/net/can/m_can.txt +++ b/Documentation/devicetree/bindings/net/can/m_can.txt @@ -56,7 +56,6 @@ m_can1: can@020e8000 { <&clks IMX6SX_CLK_CANFD>; clock-names = "hclk", "cclk"; bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>; - status = "disabled"; }; Board dts: diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index 0ab8b39d0b30..fd23904ac68e 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -24,7 +24,6 @@ Ethernet switch connected via SPI to the host, CPU port wired to eth0: pinctrl-0 = <&pinctrl_spi_ksz>; cs-gpios = <&pioC 25 0>; id = <1>; - status = "okay"; ksz9477: ksz9477@0 { compatible = "microchip,ksz9477"; @@ -34,7 +33,6 @@ Ethernet switch connected via SPI to the host, CPU port wired to eth0: spi-cpha; spi-cpol; - status = "okay"; ports { #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/net/dsa/lan9303.txt b/Documentation/devicetree/bindings/net/dsa/lan9303.txt index 04f2965a4467..4448d063ddf6 100644 --- a/Documentation/devicetree/bindings/net/dsa/lan9303.txt +++ b/Documentation/devicetree/bindings/net/dsa/lan9303.txt @@ -27,7 +27,6 @@ Example: I2C managed mode: master: masterdevice@X { - status = "okay"; fixed-link { /* RMII fixed link to LAN9303 */ speed = <100>; @@ -38,7 +37,6 @@ I2C managed mode: switch: switch@a { compatible = "smsc,lan9303-i2c"; reg = <0xa>; - status = "okay"; reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>; reset-duration = <200>; @@ -67,7 +65,6 @@ I2C managed mode: MDIO managed mode: master: masterdevice@X { - status = "okay"; phy-handle = <&switch>; mdio { diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index 7da86f22a13b..2974e63ba311 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt @@ -1,5 +1,9 @@ The following properties are common to the Ethernet controllers: +NOTE: All 'phy*' properties documented below are Ethernet specific. For the +generic PHY 'phys' property, see +Documentation/devicetree/bindings/phy/phy-bindings.txt. + - local-mac-address: array of 6 bytes, specifies the MAC address that was assigned to the network device; - mac-address: array of 6 bytes, specifies the MAC address that was last used by diff --git a/Documentation/devicetree/bindings/net/ftgmac100.txt b/Documentation/devicetree/bindings/net/ftgmac100.txt index c1ce1680246f..72e7aaf7242e 100644 --- a/Documentation/devicetree/bindings/net/ftgmac100.txt +++ b/Documentation/devicetree/bindings/net/ftgmac100.txt @@ -30,6 +30,5 @@ Example: compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; reg = <0x1e660000 0x180>; interrupts = <2>; - status = "okay"; use-ncsi; }; diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index ae4234ca4ee4..bedcfd5a52cd 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -41,7 +41,6 @@ ethernet@70000 { interrupts = <8>; clocks = <&gate_clk 4>; tx-csum-limit = <9800> - status = "okay"; phy = <&phy0>; phy-mode = "rgmii-id"; buffer-manager = <&bm>; diff --git a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt index 9be1059ff03f..3d27c68613a6 100644 --- a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt +++ b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt @@ -44,7 +44,6 @@ Example for SDIO device follows (calibration data is also available in below example). &mmc3 { - status = "okay"; vmmc-supply = <&wlan_en_reg>; bus-width = <4>; cap-power-off-card; @@ -70,7 +69,6 @@ below example). Example for USB device: &usb_host1_ohci { - status = "okay"; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/net/marvell-neta-bm.txt b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt index c1b1d7c3bde1..07b31050dbe5 100644 --- a/Documentation/devicetree/bindings/net/marvell-neta-bm.txt +++ b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt @@ -31,7 +31,6 @@ bm: bm@c8000 { reg = <0xc8000 0xac>; clocks = <&gateclk 13>; internal-mem = <&bm_bppi>; - status = "okay"; pool2,capacity = <4096>; pool1,pkt-size = <512>; }; @@ -45,5 +44,4 @@ bm_bppi: bm-bppi { #address-cells = <1>; #size-cells = <1>; clocks = <&gateclk 13>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 6b4956beff8c..1814fa13f6ab 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt @@ -21,8 +21,9 @@ Required properties: - main controller clock (for both armada-375-pp2 and armada-7k-pp2) - GOP clock (for both armada-375-pp2 and armada-7k-pp2) - MG clock (only for armada-7k-pp2) -- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and - "mg_clk" (the latter only for armada-7k-pp2). + - AXI clock (only for armada-7k-pp2) +- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk" + and "axi_clk" (the 2 latter only for armada-7k-pp2). The ethernet ports are represented by subnodes. At least one port is required. @@ -41,6 +42,11 @@ Optional properties (port): - marvell,loopback: port is loopback mode - phy: a phandle to a phy node defining the PHY address (as the reg property, a single integer). +- interrupt-names: if more than a single interrupt for rx is given, must + be the name associated to the interrupts listed. Valid + names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3", + "rx-shared", "link". +- marvell,system-controller: a phandle to the system controller. Example for marvell,armada-375-pp2: @@ -52,12 +58,10 @@ ethernet@f0000 { <0xc5000 0x100>; clocks = <&gateclk 3>, <&gateclk 19>; clock-names = "pp_clk", "gop_clk"; - status = "okay"; eth0: eth0@c4000 { interrupts = ; port-id = <0>; - status = "okay"; phy = <&phy0>; phy-mode = "gmii"; }; @@ -65,7 +69,6 @@ ethernet@f0000 { eth1: eth1@c5000 { interrupts = ; port-id = <1>; - status = "okay"; phy = <&phy3>; phy-mode = "gmii"; }; @@ -76,23 +79,42 @@ Example for marvell,armada-7k-pp2: cpm_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>; - clock-names = "pp_clk", "gop_clk", "gp_clk"; + clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, + <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>; + clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk"; eth0: eth0 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <0>; gop-port-id = <0>; }; eth1: eth1 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <1>; gop-port-id = <2>; }; eth2: eth2 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <2>; gop-port-id = <3>; }; diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index c7194e87d5f4..214eaa9a6683 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt @@ -7,24 +7,32 @@ have dual GMAC each represented by a child node.. * Ethernet controller node Required properties: -- compatible: Should be "mediatek,mt2701-eth" +- compatible: Should be + "mediatek,mt2701-eth": for MT2701 SoC + "mediatek,mt7623-eth", "mediatek,mt2701-eth": for MT7623 SoC + "mediatek,mt7622-eth": for MT7622 SoC - reg: Address and length of the register set for the device - interrupts: Should contain the three frame engines interrupts in numeric order. These are fe_int0, fe_int1 and fe_int2. - clocks: the clock used by the core - clock-names: the names of the clock listed in the clocks property. These are - "ethif", "esw", "gp2", "gp1" + "ethif", "esw", "gp2", "gp1" : For MT2701 and MT7623 SoC + "ethif", "esw", "gp0", "gp1", "gp2", "sgmii_tx250m", "sgmii_rx250m", + "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" : For MT7622 SoC - power-domains: phandle to the power domain that the ethernet is part of -- resets: Should contain a phandle to the ethsys reset signal -- reset-names: Should contain the reset signal name "eth" +- resets: Should contain phandles to the ethsys reset signals +- reset-names: Should contain the names of reset signal listed in the resets + property + These are "fe", "gmac" and "ppe" - mediatek,ethsys: phandle to the syscon node that handles the port setup +- mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup + which is required for those SoCs equipped with SGMII such as MT7622 SoC. - mediatek,pctl: phandle to the syscon node that handles the ports slew rate and driver current Optional properties: - interrupt-parent: Should be the phandle for the interrupt controller that services interrupts for this device - * Ethernet MAC node Required properties: diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt index 0703ad3f3c1e..354dd9896bb5 100644 --- a/Documentation/devicetree/bindings/net/meson-dwmac.txt +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt @@ -66,5 +66,4 @@ Example for GXBB: <&clkc CLKID_MPLL2>; clock-names = "stmmaceth", "clkin0", "clkin1"; phy-mode = "rgmii"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt index c35b5b428a7f..42a248301615 100644 --- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt +++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt @@ -69,7 +69,6 @@ Examples: }; }; ethernet@70000 { - status = "okay"; phy = <&phy0>; phy-mode = "rgmii-id"; }; diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt index 1dc3bc75539d..44dff53d4dda 100644 --- a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt +++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt @@ -33,7 +33,6 @@ Example (for NXP i.MX28 with pin control stuff for GPIO irq): compatible = "fsl,imx28-spi"; pinctrl-names = "default"; pinctrl-0 = <&spi2_pins_b &spi2_sck_cfg>; - status = "okay"; enc28j60: ethernet@0 { compatible = "microchip,enc28j60"; diff --git a/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt index 76df9173825a..c9b35251bb20 100644 --- a/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt +++ b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt @@ -25,7 +25,6 @@ Optional I2C-based chip specific properties: Example (for ARM-based BeagleBoard Black with 88W8887 on UART5): &uart5 { - status = "okay"; nfcmrvluart: nfcmrvluart@5 { compatible = "marvell,nfc-uart"; @@ -41,7 +40,6 @@ Example (for ARM-based BeagleBoard Black with 88W8887 on UART5): Example (for ARM-based BeagleBoard Black with 88W8887 on I2C1): &i2c1 { - status = "okay"; clock-frequency = <400000>; nfcmrvli2c0: i2c@1 { diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt index 5b6cd9b3f628..92486733df71 100644 --- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt +++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt @@ -17,7 +17,6 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2): &i2c2 { - status = "okay"; npc100: npc100@29 { diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt index 1aea822d4530..122460e42e3c 100644 --- a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt +++ b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt @@ -15,7 +15,6 @@ Example (for ARM-based BeagleBone with PN532 on I2C2): &i2c2 { - status = "okay"; pn532: pn532@24 { diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt index dab69f36167c..538a86f7b2b0 100644 --- a/Documentation/devicetree/bindings/net/nfc/pn544.txt +++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt @@ -17,7 +17,6 @@ Example (for ARM-based BeagleBone with PN544 on I2C2): &i2c2 { - status = "okay"; pn544: pn544@28 { diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt index fb1e75facf1b..ed5b3eaadb39 100644 --- a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt +++ b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt @@ -12,7 +12,6 @@ Required properties: Example: &hsi2c_4 { - status = "okay"; s3fwrn5@27 { compatible = "samsung,s3fwrn5-i2c"; diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt index 263732e8879f..b46d473be425 100644 --- a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt +++ b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt @@ -20,7 +20,6 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2): &i2c2 { - status = "okay"; st21nfcb: st21nfcb@8 { diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt index 711ca85a363d..54ce8e7ac681 100644 --- a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt +++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt @@ -19,7 +19,6 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on SPI4): &mcspi4 { - status = "okay"; st21nfcb: st21nfcb@0 { diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt index 7bb2e213d6f9..5ee9440fa9ad 100644 --- a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt +++ b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt @@ -20,7 +20,6 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2): &i2c2 { - status = "okay"; st21nfca: st21nfca@1 { diff --git a/Documentation/devicetree/bindings/net/nfc/st95hf.txt b/Documentation/devicetree/bindings/net/nfc/st95hf.txt index ea3178bc9ddd..08a202e00d47 100644 --- a/Documentation/devicetree/bindings/net/nfc/st95hf.txt +++ b/Documentation/devicetree/bindings/net/nfc/st95hf.txt @@ -35,12 +35,10 @@ spi@9840000 { #address-cells = <1>; #size-cells = <0>; cs-gpios = <&pio0 4>; - status = "okay"; st95hf@0{ reg = <0>; compatible = "st,st95hf"; - status = "okay"; spi-max-frequency = <1000000>; enable-gpio = <&pio4 0>; interrupt-parent = <&pio0>; diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt index 60c833d62181..5ca9362ef127 100644 --- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt +++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt @@ -23,7 +23,6 @@ Optional SoC Specific Properties: Example (for ARM-based BeagleBone with TRF7970A on SPI1): &spi1 { - status = "okay"; nfc@0 { compatible = "ti,trf7970a"; @@ -41,6 +40,5 @@ Example (for ARM-based BeagleBone with TRF7970A on SPI1): irq-status-read-quirk; en2-rf-quirk; clock-frequency = <27120000>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/net/oxnas-dwmac.txt b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt index df0534e2eda1..d7117a22fd87 100644 --- a/Documentation/devicetree/bindings/net/oxnas-dwmac.txt +++ b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt @@ -35,5 +35,4 @@ etha: ethernet@40400000 { /* Regmap for sys registers */ oxsemi,sys-ctrl = <&sys>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index b55857696fc3..77d0b2a61ffa 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -2,11 +2,7 @@ PHY nodes Required properties: - - interrupts : where a is the interrupt number and b is a - field that represents an encoding of the sense and level - information for the interrupt. This should be encoded based on - the information in section 2) depending on the type of interrupt - controller you have. + - interrupts : interrupt specifier for the sole interrupt. - interrupt-parent : the phandle for the interrupt controller that services interrupts for this device. - reg : The ID number for the phy, usually a small integer @@ -52,11 +48,16 @@ Optional Properties: Mark the corresponding energy efficient ethernet mode as broken and request the ethernet to stop advertising it. +- phy-is-integrated: If set, indicates that the PHY is integrated into the same + physical package as the Ethernet MAC. If needed, muxers should be configured + to ensure the integrated PHY is used. The absence of this property indicates + the muxers should be configured so that the external PHY is used. + Example: ethernet-phy@0 { compatible = "ethernet-phy-id0141.0e90", "ethernet-phy-ieee802.3-c22"; - interrupt-parent = <40000>; - interrupts = <35 1>; + interrupt-parent = <&PIC>; + interrupts = <35 IRQ_TYPE_EDGE_RISING>; reg = <0>; }; diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt index 6d9efb2eb9a5..3987846b3fd3 100644 --- a/Documentation/devicetree/bindings/net/qca,qca7000.txt +++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt @@ -41,7 +41,6 @@ ssp2: spi@80014000 { compatible = "fsl,imx28-spi"; pinctrl-names = "default"; pinctrl-0 = <&spi2_pins_a>; - status = "okay"; qca7000: ethernet@0 { compatible = "qca,qca7000"; @@ -78,7 +77,6 @@ auart0: serial@8006a000 { reg = <0x8006a000 0x2000>; pinctrl-names = "default"; pinctrl-0 = <&auart0_2pins_a>; - status = "okay"; qca7000: ethernet { compatible = "qca,qca7000"; diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index b519503be51a..16723535e1aa 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -4,19 +4,25 @@ This file provides information on what the device node for the Ethernet AVB interface contains. Required properties: -- compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC. - "renesas,etheravb-r8a7791" if the device is a part of R8A7791 SoC. - "renesas,etheravb-r8a7792" if the device is a part of R8A7792 SoC. - "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC. - "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC. - "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC. - "renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC. - "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface. - "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface. +- compatible: Must contain one or more of the following: + - "renesas,etheravb-r8a7743" for the R8A7743 SoC. + - "renesas,etheravb-r8a7745" for the R8A7745 SoC. + - "renesas,etheravb-r8a7790" for the R8A7790 SoC. + - "renesas,etheravb-r8a7791" for the R8A7791 SoC. + - "renesas,etheravb-r8a7792" for the R8A7792 SoC. + - "renesas,etheravb-r8a7793" for the R8A7793 SoC. + - "renesas,etheravb-r8a7794" for the R8A7794 SoC. + - "renesas,etheravb-rcar-gen2" as a fallback for the above + R-Car Gen2 and RZ/G1 devices. - When compatible with the generic version, nodes must list the - SoC-specific version corresponding to the platform first - followed by the generic version. + - "renesas,etheravb-r8a7795" for the R8A7795 SoC. + - "renesas,etheravb-r8a7796" for the R8A7796 SoC. + - "renesas,etheravb-rcar-gen3" as a fallback for the above + R-Car Gen3 devices. + + When compatible with the generic version, nodes must list the + SoC-specific version corresponding to the platform first followed by + the generic version. - reg: offset and length of (1) the register block and (2) the stream buffer. - interrupts: A list of interrupt-specifiers, one for each entry in diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 8f427550720a..9c16ee2965a2 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -4,12 +4,14 @@ The device node has following properties. Required properties: - compatible: should be "rockchip,-gamc" + "rockchip,rk3128-gmac": found on RK312x SoCs "rockchip,rk3228-gmac": found on RK322x SoCs "rockchip,rk3288-gmac": found on RK3288 SoCs "rockchip,rk3328-gmac": found on RK3328 SoCs "rockchip,rk3366-gmac": found on RK3366 SoCs "rockchip,rk3368-gmac": found on RK3368 SoCs "rockchip,rk3399-gmac": found on RK3399 SoCs + "rockchip,rv1108-gmac": found on RV1108 SoCs - reg: addresses and length of the register sets for the device. - interrupts: Should contain the GMAC interrupts. - interrupt-names: Should contain the interrupt names "macirq". @@ -70,5 +72,4 @@ gmac: ethernet@ff290000 { tx_delay = <0x30>; rx_delay = <0x10>; - status = "ok"; }; diff --git a/Documentation/devicetree/bindings/net/sff,sfp.txt b/Documentation/devicetree/bindings/net/sff,sfp.txt new file mode 100644 index 000000000000..60e970ce10ee --- /dev/null +++ b/Documentation/devicetree/bindings/net/sff,sfp.txt @@ -0,0 +1,76 @@ +Small Form Factor (SFF) Committee Small Form-factor Pluggable (SFP) +Transceiver + +Required properties: + +- compatible : must be "sff,sfp" + +Optional Properties: + +- i2c-bus : phandle of an I2C bus controller for the SFP two wire serial + interface + +- mod-def0-gpios : GPIO phandle and a specifier of the MOD-DEF0 (AKA Mod_ABS) + module presence input gpio signal, active (module absent) high + +- los-gpios : GPIO phandle and a specifier of the Receiver Loss of Signal + Indication input gpio signal, active (signal lost) high + +- tx-fault-gpios : GPIO phandle and a specifier of the Module Transmitter + Fault input gpio signal, active (fault condition) high + +- tx-disable-gpios : GPIO phandle and a specifier of the Transmitter Disable + output gpio signal, active (Tx disable) high + +- rate-select0-gpios : GPIO phandle and a specifier of the Rx Signaling Rate + Select (AKA RS0) output gpio signal, low: low Rx rate, high: high Rx rate + +- rate-select1-gpios : GPIO phandle and a specifier of the Tx Signaling Rate + Select (AKA RS1) output gpio signal (SFP+ only), low: low Tx rate, high: + high Tx rate + +Example #1: Direct serdes to SFP connection + +sfp_eth3: sfp-eth3 { + compatible = "sff,sfp"; + i2c-bus = <&sfp_1g_i2c>; + los-gpios = <&cpm_gpio2 22 GPIO_ACTIVE_HIGH>; + mod-def0-gpios = <&cpm_gpio2 21 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_sfp_1g_pins &cps_sfp_1g_pins>; + tx-disable-gpios = <&cps_gpio1 24 GPIO_ACTIVE_HIGH>; + tx-fault-gpios = <&cpm_gpio2 19 GPIO_ACTIVE_HIGH>; +}; + +&cps_emac3 { + phy-names = "comphy"; + phys = <&cps_comphy5 0>; + sfp = <&sfp_eth3>; +}; + +Example #2: Serdes to PHY to SFP connection + +sfp_eth0: sfp-eth0 { + compatible = "sff,sfp"; + i2c-bus = <&sfpp0_i2c>; + los-gpios = <&cps_gpio1 28 GPIO_ACTIVE_HIGH>; + mod-def0-gpios = <&cps_gpio1 27 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&cps_sfpp0_pins>; + tx-disable-gpios = <&cps_gpio1 29 GPIO_ACTIVE_HIGH>; + tx-fault-gpios = <&cps_gpio1 26 GPIO_ACTIVE_HIGH>; +}; + +p0_phy: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c45"; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_phy0_pins &cps_phy0_pins>; + reg = <0>; + interrupt = <&cpm_gpio2 18 IRQ_TYPE_EDGE_FALLING>; + sfp = <&sfp_eth0>; +}; + +&cpm_eth0 { + phy = <&p0_phy>; + phy-mode = "10gbase-kr"; +}; diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt index 974edd5c85cc..8b7c719b0bb9 100644 --- a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt +++ b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt @@ -15,7 +15,6 @@ smsc phy with disabled energy detect mode on an am335x based board. pinctrl-names = "default", "sleep"; pinctrl-0 = <&davinci_mdio_default>; pinctrl-1 = <&davinci_mdio_sleep>; - status = "okay"; ethernetphy0: ethernet-phy@0 { reg = <0>; diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt index 2e68a3cd8513..b30d04b54ee9 100644 --- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt +++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt @@ -40,7 +40,6 @@ gmii_to_sgmii_converter: phy@0x100000240 { gmac0: ethernet@ff700000 { compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a", "snps,dwmac"; altr,sysmgr-syscon = <&sysmgr 0x60 0>; - status = "disabled"; reg = <0xff700000 0x2000>; interrupts = <0 115 4>; interrupt-names = "macirq"; diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt index d05c1e1fd9b6..062c5174add3 100644 --- a/Documentation/devicetree/bindings/net/sti-dwmac.txt +++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt @@ -34,7 +34,6 @@ Example: ethernet0: dwmac@9630000 { device_type = "network"; - status = "disabled"; compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710"; reg = <0x9630000 0x8000>; reg-names = "stmmaceth"; diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.txt b/Documentation/devicetree/bindings/net/stm32-dwmac.txt index c35afb7e956a..489dbcb66c5a 100644 --- a/Documentation/devicetree/bindings/net/stm32-dwmac.txt +++ b/Documentation/devicetree/bindings/net/stm32-dwmac.txt @@ -18,7 +18,6 @@ Example: ethernet@40028000 { compatible = "st,stm32-dwmac", "snps,dwmac-3.50a"; - status = "disabled"; reg = <0x40028000 0x8000>; reg-names = "stmmaceth"; interrupts = <0 61 0>, <0 62 0>; diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt index 590f622188de..b2bd4704f859 100644 --- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt +++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt @@ -29,7 +29,6 @@ mmc3: mmc@01c12000 { vmmc-supply = <®_vmmc3>; bus-width = <4>; non-removable; - status = "okay"; brcmf: wifi@1 { reg = <1>; diff --git a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt index 19331bb4ff6e..6830c4786f8a 100644 --- a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt +++ b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt @@ -21,7 +21,6 @@ Example: mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; non-removable; - status = "okay"; esp8089: sdio_wifi@1 { compatible = "esp,esp8089"; diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt index 0854451ff91d..59de8646862d 100644 --- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt +++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt @@ -48,7 +48,6 @@ IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured so that firmware can wakeup host using this device side pin. &mmc3 { - status = "okay"; vmmc-supply = <&wlan_en_reg>; mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt index 7b2cbb14113e..f42f6b0f1bc7 100644 --- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt +++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt @@ -31,7 +31,6 @@ clock, new bindings (for parsing the clock nodes) have to be added. Example: &mmc3 { - status = "okay"; vmmc-supply = <&wlan_en_reg>; bus-width = <4>; cap-power-off-card; diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt new file mode 100644 index 000000000000..38f9ec076743 --- /dev/null +++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt @@ -0,0 +1,55 @@ +XILINX AXI ETHERNET Device Tree Bindings +-------------------------------------------------------- + +Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core +provides connectivity to an external ethernet PHY supporting different +interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two +segments of memory for buffering TX and RX, as well as the capability of +offloading TX/RX checksum calculation off the processor. + +Management configuration is done through the AXI interface, while payload is +sent and received through means of an AXI DMA controller. This driver +includes the DMA driver code, so this driver is incompatible with AXI DMA +driver. + +For more details about mdio please refer phy.txt file in the same directory. + +Required properties: +- compatible : Must be one of "xlnx,axi-ethernet-1.00.a", + "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a" +- reg : Address and length of the IO space. +- interrupts : Should be a list of two interrupt, TX and RX. +- phy-handle : Should point to the external phy device. + See ethernet.txt file in the same directory. +- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware + +Optional properties: +- phy-mode : See ethernet.txt +- xlnx,phy-type : Deprecated, do not use, but still accepted in preference + to phy-mode. +- xlnx,txcsum : 0 or empty for disabling TX checksum offload, + 1 to enable partial TX checksum offload, + 2 to enable full TX checksum offload +- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload + +Example: + axi_ethernet_eth: ethernet@40c00000 { + compatible = "xlnx,axi-ethernet-1.00.a"; + device_type = "network"; + interrupt-parent = <µblaze_0_axi_intc>; + interrupts = <2 0>; + phy-mode = "mii"; + reg = <0x40c00000 0x40000>; + xlnx,rxcsum = <0x2>; + xlnx,rxmem = <0x800>; + xlnx,txcsum = <0x2>; + phy-handle = <&phy0>; + axi_ethernetlite_0_mdio: mdio { + #address-cells = <1>; + #size-cells = <0>; + phy0: phy@0 { + device_type = "ethernet-phy"; + reg = <1>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt index 74cf52908a6c..0668c45a156d 100644 --- a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt +++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt @@ -3,7 +3,10 @@ This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs. Required properties: -- compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse" +- compatible: should be + "mediatek,mt7622-efuse", "mediatek,efuse": for MT7622 + "mediatek,mt7623-efuse", "mediatek,efuse": for MT7623 + "mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173 - reg: Should contain registers location and length = Data cells = diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt index daebce9e6b07..372c72fd64dc 100644 --- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt +++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt @@ -21,5 +21,4 @@ Example for i.MX28: #size-cells = <1>; reg = <0x8002c000 0x2000>; clocks = <&clks 25>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt index e36d261b9ba6..9d733af26be7 100644 --- a/Documentation/devicetree/bindings/opp/opp.txt +++ b/Documentation/devicetree/bindings/opp/opp.txt @@ -464,7 +464,6 @@ Example 5: opp-supported-hw opp_table { compatible = "operating-points-v2"; - status = "okay"; opp-shared; opp-600000000 { diff --git a/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt index 35a465362408..b9165b72473c 100644 --- a/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt +++ b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt @@ -1,11 +1,11 @@ * Freescale 83xx and 512x PCI bridges -Freescale 83xx and 512x SOCs include the same pci bridge core. +Freescale 83xx and 512x SOCs include the same PCI bridge core. 83xx/512x specific notes: - reg: should contain two address length tuples - The first is for the internal pci bridge registers - The second is for the pci config space access registers + The first is for the internal PCI bridge registers + The second is for the PCI config space access registers Example (MPC8313ERDB) pci0: pci@e0008500 { diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt index bbcd9f4c501f..310ef7145c47 100644 --- a/Documentation/devicetree/bindings/pci/aardvark-pci.txt +++ b/Documentation/devicetree/bindings/pci/aardvark-pci.txt @@ -33,7 +33,6 @@ Example: pcie0: pcie@d0070000 { compatible = "marvell,armada-3700-pcie"; device_type = "pci"; - status = "disabled"; reg = <0 0xd0070000 0 0x20000>; #address-cells = <3>; #size-cells = <2>; diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt index 2951a6a50704..495880193adc 100644 --- a/Documentation/devicetree/bindings/pci/altera-pcie.txt +++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt @@ -7,21 +7,21 @@ Required properties: "Txs": TX slave port region "Cra": Control register access region - interrupt-parent: interrupt source phandle. -- interrupts: specifies the interrupt source of the parent interrupt controller. - The format of the interrupt specifier depends on the parent interrupt - controller. +- interrupts: specifies the interrupt source of the parent interrupt + controller. The format of the interrupt specifier depends + on the parent interrupt controller. - device_type: must be "pci" - #address-cells: set to <3> -- #size-cells: set to <2> +- #size-cells: set to <2> - #interrupt-cells: set to <1> -- ranges: describes the translation of addresses for root ports and standard - PCI regions. +- ranges: describes the translation of addresses for root ports and + standard PCI regions. - interrupt-map-mask and interrupt-map: standard PCI properties to define the mapping of the PCIe interface to interrupt numbers. Optional properties: -- msi-parent: Link to the hardware entity that serves as the MSI controller for this PCIe - controller. +- msi-parent: Link to the hardware entity that serves as the MSI controller + for this PCIe controller. - bus-range: PCI bus numbers covered Example @@ -45,5 +45,5 @@ Example <0 0 0 3 &pcie_0 3>, <0 0 0 4 &pcie_0 4>; ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000 - 0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>; + 0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>; }; diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt index 5ecaea1e6eee..4e4aee4439ea 100644 --- a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt +++ b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt @@ -6,7 +6,7 @@ and thus inherits all the common properties defined in designware-pcie.txt. Required properties: - compatible: "axis,artpec6-pcie", "snps,dw-pcie" - reg: base addresses and lengths of the PCIe controller (DBI), - the phy controller, and configuration address space. + the PHY controller, and configuration address space. - reg-names: Must include the following entries: - "dbi" - "phy" diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index b2480dd38c11..1da7ade3183c 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt @@ -1,4 +1,4 @@ -* Synopsys Designware PCIe interface +* Synopsys DesignWare PCIe interface Required properties: - compatible: should contain "snps,dw-pcie" to identify the core. @@ -17,29 +17,27 @@ RC mode: properties to define the mapping of the PCIe interface to interrupt numbers. EP mode: -- num-ib-windows: number of inbound address translation - windows -- num-ob-windows: number of outbound address translation - windows +- num-ib-windows: number of inbound address translation windows +- num-ob-windows: number of outbound address translation windows Optional properties: - num-lanes: number of lanes to use (this property should be specified unless the link is brought already up in BIOS) -- reset-gpio: gpio pin number of power good signal +- reset-gpio: GPIO pin number of power good signal - clocks: Must contain an entry for each entry in clock-names. See ../clocks/clock-bindings.txt for details. - clock-names: Must include the following entries: - "pcie" - "pcie_bus" RC mode: -- num-viewport: number of view ports configured in - hardware. If a platform does not specify it, the driver assumes 2. -- bus-range: PCI bus numbers covered (it is recommended - for new devicetrees to specify this property, to keep backwards - compatibility a range of 0x00-0xff is assumed if not present) +- num-viewport: number of view ports configured in hardware. If a platform + does not specify it, the driver assumes 2. +- bus-range: PCI bus numbers covered (it is recommended for new devicetrees + to specify this property, to keep backwards compatibility a range of + 0x00-0xff is assumed if not present) + EP mode: -- max-functions: maximum number of functions that can be - configured +- max-functions: maximum number of functions that can be configured Example configuration: diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index cf92d3ba5a26..7b1e48bf172b 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt @@ -1,6 +1,6 @@ * Freescale i.MX6 PCIe interface -This PCIe host controller is based on the Synopsis Designware PCIe IP +This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in designware-pcie.txt. Required properties: diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt index a339dbb15493..bdb7ab39d2d7 100644 --- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt +++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt @@ -1,7 +1,7 @@ HiSilicon Hip05 and Hip06 PCIe host bridge DT description -HiSilicon PCIe host controller is based on Designware PCI core. -It shares common functions with PCIe Designware core driver and inherits +HiSilicon PCIe host controller is based on the Synopsys DesignWare PCI core. +It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in Documentation/devicetree/bindings/pci/designware-pci.txt. @@ -40,7 +40,6 @@ Hip05 Example (note that Hip06 is the same except compatible): 0x0 0 0 2 &mbigen_pcie 2 11 0x0 0 0 3 &mbigen_pcie 3 12 0x0 0 0 4 &mbigen_pcie 4 13>; - status = "ok"; }; HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description. @@ -83,5 +82,4 @@ Example: 0x0 0 0 2 &mbigen_pcie0 650 4 0x0 0 0 3 &mbigen_pcie0 650 4 0x0 0 0 4 &mbigen_pcie0 650 4>; - status = "ok"; }; diff --git a/Documentation/devicetree/bindings/pci/kirin-pcie.txt b/Documentation/devicetree/bindings/pci/kirin-pcie.txt index 68ffa0fbcd73..6e217c63123d 100644 --- a/Documentation/devicetree/bindings/pci/kirin-pcie.txt +++ b/Documentation/devicetree/bindings/pci/kirin-pcie.txt @@ -1,8 +1,8 @@ HiSilicon Kirin SoCs PCIe host DT description -Kirin PCIe host controller is based on Designware PCI core. -It shares common functions with PCIe Designware core driver -and inherits common properties defined in +Kirin PCIe host controller is based on the Synopsys DesignWare PCI core. +It shares common functions with the PCIe DesignWare core driver and +inherits common properties defined in Documentation/devicetree/bindings/pci/designware-pci.txt. Additional properties are described here: @@ -16,7 +16,7 @@ Required properties "apb": apb Ctrl register defined by Kirin; "phy": apb PHY register defined by Kirin; "config": PCIe configuration space registers. -- reset-gpios: The gpio to generate PCIe perst assert and deassert signal. +- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal. Optional properties: diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt index ee1c72d5162e..c0484da0f20d 100644 --- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt +++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt @@ -15,8 +15,10 @@ Required properties: - compatible: should contain the platform identifier such as: "fsl,ls1021a-pcie", "snps,dw-pcie" "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", "snps,dw-pcie" + "fsl,ls2088a-pcie" + "fsl,ls1088a-pcie" "fsl,ls1046a-pcie" -- reg: base addresses and lengths of the PCIe controller +- reg: base addresses and lengths of the PCIe controller register blocks. - interrupts: A list of interrupt outputs of the controller. Must contain an entry for each entry in the interrupt-names property. - interrupt-names: Must include the following entries: diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt deleted file mode 100644 index fe80dda9bf73..000000000000 --- a/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt +++ /dev/null @@ -1,130 +0,0 @@ -MediaTek Gen2 PCIe controller which is available on MT7623 series SoCs - -PCIe subsys supports single root complex (RC) with 3 Root Ports. Each root -ports supports a Gen2 1-lane Link and has PIPE interface to PHY. - -Required properties: -- compatible: Should contain "mediatek,mt7623-pcie". -- device_type: Must be "pci" -- reg: Base addresses and lengths of the PCIe controller. -- #address-cells: Address representation for root ports (must be 3) -- #size-cells: Size representation for root ports (must be 2) -- #interrupt-cells: Size representation for interrupts (must be 1) -- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties - Please refer to the standard PCI bus binding document for a more detailed - explanation. -- clocks: Must contain an entry for each entry in clock-names. - See ../clocks/clock-bindings.txt for details. -- clock-names: Must include the following entries: - - free_ck :for reference clock of PCIe subsys - - sys_ck0 :for clock of Port0 - - sys_ck1 :for clock of Port1 - - sys_ck2 :for clock of Port2 -- resets: Must contain an entry for each entry in reset-names. - See ../reset/reset.txt for details. -- reset-names: Must include the following entries: - - pcie-rst0 :port0 reset - - pcie-rst1 :port1 reset - - pcie-rst2 :port2 reset -- phys: List of PHY specifiers (used by generic PHY framework). -- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the - number of PHYs as specified in *phys* property. -- power-domains: A phandle and power domain specifier pair to the power domain - which is responsible for collapsing and restoring power to the peripheral. -- bus-range: Range of bus numbers associated with this controller. -- ranges: Ranges for the PCI memory and I/O regions. - -In addition, the device tree node must have sub-nodes describing each -PCIe port interface, having the following mandatory properties: - -Required properties: -- device_type: Must be "pci" -- reg: Only the first four bytes are used to refer to the correct bus number - and device number. -- #address-cells: Must be 3 -- #size-cells: Must be 2 -- #interrupt-cells: Must be 1 -- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties - Please refer to the standard PCI bus binding document for a more detailed - explanation. -- ranges: Sub-ranges distributed from the PCIe controller node. An empty - property is sufficient. -- num-lanes: Number of lanes to use for this port. - -Examples: - - hifsys: syscon@1a000000 { - compatible = "mediatek,mt7623-hifsys", - "mediatek,mt2701-hifsys", - "syscon"; - reg = <0 0x1a000000 0 0x1000>; - #clock-cells = <1>; - #reset-cells = <1>; - }; - - pcie: pcie-controller@1a140000 { - compatible = "mediatek,mt7623-pcie"; - device_type = "pci"; - reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */ - <0 0x1a142000 0 0x1000>, /* Port0 registers */ - <0 0x1a143000 0 0x1000>, /* Port1 registers */ - <0 0x1a144000 0 0x1000>; /* Port2 registers */ - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0xf800 0 0 0>; - interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>, - <0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>, - <0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; - clocks = <&topckgen CLK_TOP_ETHIF_SEL>, - <&hifsys CLK_HIFSYS_PCIE0>, - <&hifsys CLK_HIFSYS_PCIE1>, - <&hifsys CLK_HIFSYS_PCIE2>; - clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2"; - resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>, - <&hifsys MT2701_HIFSYS_PCIE1_RST>, - <&hifsys MT2701_HIFSYS_PCIE2_RST>; - reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2"; - phys = <&pcie0_phy>, <&pcie1_phy>, <&pcie2_phy>; - phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2"; - power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>; - bus-range = <0x00 0xff>; - ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */ - 0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */ - - pcie@0,0 { - device_type = "pci"; - reg = <0x0000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>; - ranges; - num-lanes = <1>; - }; - - pcie@1,0 { - device_type = "pci"; - reg = <0x0800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>; - ranges; - num-lanes = <1>; - }; - - pcie@2,0 { - device_type = "pci"; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; - ranges; - num-lanes = <1>; - }; - }; diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt new file mode 100644 index 000000000000..3a6ce55dd310 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt @@ -0,0 +1,284 @@ +MediaTek Gen2 PCIe controller + +Required properties: +- compatible: Should contain one of the following strings: + "mediatek,mt2701-pcie" + "mediatek,mt2712-pcie" + "mediatek,mt7622-pcie" + "mediatek,mt7623-pcie" +- device_type: Must be "pci" +- reg: Base addresses and lengths of the PCIe subsys and root ports. +- reg-names: Names of the above areas to use during resource lookup. +- #address-cells: Address representation for root ports (must be 3) +- #size-cells: Size representation for root ports (must be 2) +- clocks: Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names: + Mandatory entries: + - sys_ckN :transaction layer and data link layer clock + Required entries for MT2701/MT7623: + - free_ck :for reference clock of PCIe subsys + Required entries for MT2712/MT7622: + - ahb_ckN :AHB slave interface operating clock for CSR access and RC + initiated MMIO access + Required entries for MT7622: + - axi_ckN :application layer MMIO channel operating clock + - aux_ckN :pe2_mac_bridge and pe2_mac_core operating clock when + pcie_mac_ck/pcie_pipe_ck is turned off + - obff_ckN :OBFF functional block operating clock + - pipe_ckN :LTSSM and PHY/MAC layer operating clock + where N starting from 0 to one less than the number of root ports. +- phys: List of PHY specifiers (used by generic PHY framework). +- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the + number of PHYs as specified in *phys* property. +- power-domains: A phandle and power domain specifier pair to the power domain + which is responsible for collapsing and restoring power to the peripheral. +- bus-range: Range of bus numbers associated with this controller. +- ranges: Ranges for the PCI memory and I/O regions. + +Required properties for MT7623/MT2701: +- #interrupt-cells: Size representation for interrupts (must be 1) +- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- resets: Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. +- reset-names: Must be "pcie-rst0", "pcie-rst1", "pcie-rstN".. based on the + number of root ports. + +Required properties for MT2712/MT7622: +-interrupts: A list of interrupt outputs of the controller, must have one + entry for each PCIe port + +In addition, the device tree node must have sub-nodes describing each +PCIe port interface, having the following mandatory properties: + +Required properties: +- device_type: Must be "pci" +- reg: Only the first four bytes are used to refer to the correct bus number + and device number. +- #address-cells: Must be 3 +- #size-cells: Must be 2 +- #interrupt-cells: Must be 1 +- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- ranges: Sub-ranges distributed from the PCIe controller node. An empty + property is sufficient. +- num-lanes: Number of lanes to use for this port. + +Examples for MT7623: + + hifsys: syscon@1a000000 { + compatible = "mediatek,mt7623-hifsys", + "mediatek,mt2701-hifsys", + "syscon"; + reg = <0 0x1a000000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + pcie: pcie-controller@1a140000 { + compatible = "mediatek,mt7623-pcie"; + device_type = "pci"; + reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */ + <0 0x1a142000 0 0x1000>, /* Port0 registers */ + <0 0x1a143000 0 0x1000>, /* Port1 registers */ + <0 0x1a144000 0 0x1000>; /* Port2 registers */ + reg-names = "subsys", "port0", "port1", "port2"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xf800 0 0 0>; + interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>, + <0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>, + <0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; + clocks = <&topckgen CLK_TOP_ETHIF_SEL>, + <&hifsys CLK_HIFSYS_PCIE0>, + <&hifsys CLK_HIFSYS_PCIE1>, + <&hifsys CLK_HIFSYS_PCIE2>; + clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2"; + resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>, + <&hifsys MT2701_HIFSYS_PCIE1_RST>, + <&hifsys MT2701_HIFSYS_PCIE2_RST>; + reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2"; + phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>, + <&pcie2_phy PHY_TYPE_PCIE>; + phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>; + bus-range = <0x00 0xff>; + ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */ + 0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */ + + pcie@0,0 { + device_type = "pci"; + reg = <0x0000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + + pcie@1,0 { + device_type = "pci"; + reg = <0x0800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + + pcie@2,0 { + device_type = "pci"; + reg = <0x1000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + }; + +Examples for MT2712: + pcie: pcie@11700000 { + compatible = "mediatek,mt2712-pcie"; + device_type = "pci"; + reg = <0 0x11700000 0 0x1000>, + <0 0x112ff000 0 0x1000>; + reg-names = "port0", "port1"; + #address-cells = <3>; + #size-cells = <2>; + interrupts = , + ; + clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>, + <&topckgen CLK_TOP_PE2_MAC_P1_SEL>, + <&pericfg CLK_PERI_PCIE0>, + <&pericfg CLK_PERI_PCIE1>; + clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1"; + phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>; + phy-names = "pcie-phy0", "pcie-phy1"; + bus-range = <0x00 0xff>; + ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>; + + pcie0: pcie@0,0 { + device_type = "pci"; + reg = <0x0000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc0 0>, + <0 0 0 2 &pcie_intc0 1>, + <0 0 0 3 &pcie_intc0 2>, + <0 0 0 4 &pcie_intc0 3>; + pcie_intc0: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + + pcie1: pcie@1,0 { + device_type = "pci"; + reg = <0x0800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc1 0>, + <0 0 0 2 &pcie_intc1 1>, + <0 0 0 3 &pcie_intc1 2>, + <0 0 0 4 &pcie_intc1 3>; + pcie_intc1: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + }; + +Examples for MT7622: + pcie: pcie@1a140000 { + compatible = "mediatek,mt7622-pcie"; + device_type = "pci"; + reg = <0 0x1a140000 0 0x1000>, + <0 0x1a143000 0 0x1000>, + <0 0x1a145000 0 0x1000>; + reg-names = "subsys", "port0", "port1"; + #address-cells = <3>; + #size-cells = <2>; + interrupts = , + ; + clocks = <&pciesys CLK_PCIE_P0_MAC_EN>, + <&pciesys CLK_PCIE_P1_MAC_EN>, + <&pciesys CLK_PCIE_P0_AHB_EN>, + <&pciesys CLK_PCIE_P1_AHB_EN>, + <&pciesys CLK_PCIE_P0_AUX_EN>, + <&pciesys CLK_PCIE_P1_AUX_EN>, + <&pciesys CLK_PCIE_P0_AXI_EN>, + <&pciesys CLK_PCIE_P1_AXI_EN>, + <&pciesys CLK_PCIE_P0_OBFF_EN>, + <&pciesys CLK_PCIE_P1_OBFF_EN>, + <&pciesys CLK_PCIE_P0_PIPE_EN>, + <&pciesys CLK_PCIE_P1_PIPE_EN>; + clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1", + "aux_ck0", "aux_ck1", "axi_ck0", "axi_ck1", + "obff_ck0", "obff_ck1", "pipe_ck0", "pipe_ck1"; + phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>; + phy-names = "pcie-phy0", "pcie-phy1"; + power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>; + bus-range = <0x00 0xff>; + ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>; + + pcie0: pcie@0,0 { + device_type = "pci"; + reg = <0x0000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc0 0>, + <0 0 0 2 &pcie_intc0 1>, + <0 0 0 3 &pcie_intc0 2>, + <0 0 0 4 &pcie_intc0 3>; + pcie_intc0: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + + pcie1: pcie@1,0 { + device_type = "pci"; + reg = <0x0800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc1 0>, + <0 0 0 2 &pcie_intc1 1>, + <0 0 0 3 &pcie_intc1 2>, + <0 0 0 4 &pcie_intc1 3>; + pcie_intc1: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pci/mvebu-pci.txt b/Documentation/devicetree/bindings/pci/mvebu-pci.txt index 2de6f65ecfb1..6173af6885f8 100644 --- a/Documentation/devicetree/bindings/pci/mvebu-pci.txt +++ b/Documentation/devicetree/bindings/pci/mvebu-pci.txt @@ -77,7 +77,7 @@ and the following optional properties: - marvell,pcie-lane: the physical PCIe lane number, for ports having multiple lanes. If this property is not found, we assume that the value is 0. -- reset-gpios: optional gpio to PERST# +- reset-gpios: optional GPIO to PERST# - reset-delay-us: delay in us to wait after reset de-assertion, if not specified will default to 100ms, as required by the PCIe specification. @@ -85,7 +85,6 @@ Example: pcie-controller { compatible = "marvell,armada-xp-pcie"; - status = "disabled"; device_type = "pci"; #address-cells = <3>; @@ -147,7 +146,6 @@ pcie-controller { /* wait 20ms for device settle after reset deassertion */ reset-delay-us = <20000>; clocks = <&gateclk 5>; - status = "disabled"; }; pcie@2,0 { @@ -164,7 +162,6 @@ pcie-controller { marvell,pcie-port = <0>; marvell,pcie-lane = <1>; clocks = <&gateclk 6>; - status = "disabled"; }; pcie@3,0 { @@ -181,7 +178,6 @@ pcie-controller { marvell,pcie-port = <0>; marvell,pcie-lane = <2>; clocks = <&gateclk 7>; - status = "disabled"; }; pcie@4,0 { @@ -198,7 +194,6 @@ pcie-controller { marvell,pcie-port = <0>; marvell,pcie-lane = <3>; clocks = <&gateclk 8>; - status = "disabled"; }; pcie@5,0 { @@ -215,7 +210,6 @@ pcie-controller { marvell,pcie-port = <1>; marvell,pcie-lane = <0>; clocks = <&gateclk 9>; - status = "disabled"; }; pcie@6,0 { @@ -232,7 +226,6 @@ pcie-controller { marvell,pcie-port = <1>; marvell,pcie-lane = <1>; clocks = <&gateclk 10>; - status = "disabled"; }; pcie@7,0 { @@ -249,7 +242,6 @@ pcie-controller { marvell,pcie-port = <1>; marvell,pcie-lane = <2>; clocks = <&gateclk 11>; - status = "disabled"; }; pcie@8,0 { @@ -266,7 +258,6 @@ pcie-controller { marvell,pcie-port = <1>; marvell,pcie-lane = <3>; clocks = <&gateclk 12>; - status = "disabled"; }; pcie@9,0 { @@ -283,10 +274,9 @@ pcie-controller { marvell,pcie-port = <2>; marvell,pcie-lane = <0>; clocks = <&gateclk 26>; - status = "disabled"; }; - pcie@10,0 { + pcie@a,0 { device_type = "pci"; assigned-addresses = <0x82005000 0 0x82000 0 0x2000>; reg = <0x5000 0 0 0 0>; @@ -300,6 +290,5 @@ pcie-controller { marvell,pcie-port = <3>; marvell,pcie-lane = <0>; clocks = <&gateclk 27>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt index 598533a57d79..c1e4c3d10a74 100644 --- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt +++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt @@ -1,6 +1,6 @@ * Marvell Armada 7K/8K PCIe interface -This PCIe host controller is based on the Synopsis Designware PCIe IP +This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in designware-pcie.txt. Required properties: @@ -34,5 +34,4 @@ Example: interrupts = ; num-lanes = <1>; clocks = <&cpm_syscon0 1 13>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt index d08a4d51108f..7e05487544ed 100644 --- a/Documentation/devicetree/bindings/pci/pci-keystone.txt +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt @@ -1,12 +1,12 @@ TI Keystone PCIe interface -Keystone PCI host Controller is based on Designware PCI h/w version 3.65. -It shares common functions with PCIe Designware core driver and inherit -common properties defined in +Keystone PCI host Controller is based on the Synopsys DesignWare PCI +hardware version 3.65. It shares common functions with the PCIe DesignWare +core driver and inherits common properties defined in Documentation/devicetree/bindings/pci/designware-pci.txt Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt -for the details of Designware DT bindings. Additional properties are +for the details of DesignWare DT bindings. Additional properties are described here as well as properties that are not applicable. Required Properties:- @@ -52,13 +52,12 @@ pcie_intc: Interrupt controller device node for Legacy IRQ chip }; Optional properties:- - phys: phandle to Generic Keystone SerDes phy for PCI - phy-names: name of the Generic Keystine SerDes phy for PCI + phys: phandle to generic Keystone SerDes PHY for PCI + phy-names: name of the generic Keystone SerDes PHY for PCI - If boot loader already does PCI link establishment, then phys and phy-names shouldn't be present. interrupts: platform interrupt for error interrupts. -Designware DT Properties not applicable for Keystone PCI +DesignWare DT Properties not applicable for Keystone PCI 1. pcie_bus clock-names not used. Instead, a phandle to phys is used. - diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt index 07a75094c5a8..3d038638612b 100644 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt @@ -6,11 +6,14 @@ AHB. There is one bridge instance per USB port connected to the internal OHCI and EHCI controllers. Required properties: -- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; +- compatible: "renesas,pci-r8a7743" for the R8A7743 SoC; + "renesas,pci-r8a7745" for the R8A7745 SoC; + "renesas,pci-r8a7790" for the R8A7790 SoC; "renesas,pci-r8a7791" for the R8A7791 SoC; "renesas,pci-r8a7793" for the R8A7793 SoC; "renesas,pci-r8a7794" for the R8A7794 SoC; - "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device + "renesas,pci-rcar-gen2" for a generic R-Car Gen2 or + RZ/G1 compatible device. When compatible with the generic version, nodes must list the diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index 50f9e2ca5b13..c77981c5dd18 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt @@ -1,12 +1,12 @@ PCI bus bridges have standardized Device Tree bindings: PCI Bus Binding to: IEEE Std 1275-1994 -http://www.firmware.org/1275/bindings/pci/pci2_1.pdf +http://www.devicetree.org/open-firmware/bindings/pci/pci2_1.pdf And for the interrupt mapping part: Open Firmware Recommended Practice: Interrupt Mapping -http://www.firmware.org/1275/practice/imap/imap0_9d.pdf +http://www.devicetree.org/open-firmware/practice/imap/imap0_9d.pdf Additionally to the properties specified in the above standards a host bridge driver implementation may support the following properties: diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt index 9d418b71774f..3c9d321b3d3b 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt +++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt @@ -9,6 +9,7 @@ - "qcom,pcie-apq8084" for apq8084 - "qcom,pcie-msm8996" for msm8996 or apq8096 - "qcom,pcie-ipq4019" for ipq4019 + - "qcom,pcie-ipq8074" for ipq8074 - reg: Usage: required @@ -20,7 +21,7 @@ Value type: Definition: Must include the following entries - "parf" Qualcomm specific registers - - "dbi" Designware PCIe registers + - "dbi" DesignWare PCIe registers - "elbi" External local bus interface registers - "config" PCIe configuration space @@ -105,6 +106,16 @@ - "bus_master" Master AXI clock - "bus_slave" Slave AXI clock +- clock-names: + Usage: required for ipq8074 + Value type: + Definition: Should contain the following entries + - "iface" PCIe to SysNOC BIU clock + - "axi_m" AXI Master clock + - "axi_s" AXI Slave clock + - "ahb" AHB clock + - "aux" Auxiliary clock + - resets: Usage: required Value type: @@ -144,6 +155,18 @@ - "ahb" AHB reset - "phy_ahb" PHY AHB reset +- reset-names: + Usage: required for ipq8074 + Value type: + Definition: Should contain the following entries + - "pipe" PIPE reset + - "sleep" Sleep reset + - "sticky" Core Sticky reset + - "axi_m" AXI Master reset + - "axi_s" AXI Slave reset + - "ahb" AHB Reset + - "axi_m_sticky" AXI Master Sticky reset + - power-domains: Usage: required for apq8084 and msm8996/apq8096 Value type: @@ -180,7 +203,7 @@ - -gpios: Usage: optional Value type: - Definition: List of phandle and gpio specifier pairs. Should contain + Definition: List of phandle and GPIO specifier pairs. Should contain - "perst-gpios" PCIe endpoint reset signal line - "wake-gpios" PCIe endpoint wake signal line diff --git a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt index 8e0a1eb0acbb..a04ab1b76211 100644 --- a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt +++ b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt @@ -71,7 +71,7 @@ - interrupt-map: standard PCI properties to define the mapping of the PCI interface to interrupt numbers. - The PCI host bridge node migh have additional sub-nodes representing + The PCI host bridge node might have additional sub-nodes representing the onboard PCI devices/PCI slots. Each such sub-node must have the following mandatory properties: diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt index bd27428dda61..76ba3a61d1a3 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci.txt +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -14,7 +14,7 @@ compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; SoC-specific version corresponding to the platform first followed by the generic version. -- reg: base address and length of the pcie controller registers. +- reg: base address and length of the PCIe controller registers. - #address-cells: set to <3> - #size-cells: set to <2> - bus-range: PCI bus numbers covered @@ -25,15 +25,14 @@ compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; source for hardware related interrupts (e.g. link speed change). - #interrupt-cells: set to <1> - interrupt-map-mask and interrupt-map: standard PCI properties - to define the mapping of the PCIe interface to interrupt - numbers. + to define the mapping of the PCIe interface to interrupt numbers. - clocks: from common clock binding: clock specifiers for the PCIe controller and PCIe bus clocks. - clock-names: from common clock binding: should be "pcie" and "pcie_bus". Example: -SoC specific DT Entry: +SoC-specific DT Entry: pcie: pcie@fe000000 { compatible = "renesas,pcie-r8a7791", "renesas,pcie-rcar-gen2"; @@ -54,5 +53,4 @@ SoC specific DT Entry: interrupt-map = <0 0 0 0 &gic 0 116 4>; clocks = <&mstp3_clks R8A7791_CLK_PCIE>, <&pcie_bus_clk>; clock-names = "pcie", "pcie_bus"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt index 1453a734c2f5..af34c65773fd 100644 --- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt +++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt @@ -19,8 +19,6 @@ Required properties: - "pm" - msi-map: Maps a Requester ID to an MSI controller and associated msi-specifier data. See ./pci-msi.txt -- phys: From PHY bindings: Phandle for the Generic PHY for PCIe. -- phy-names: MUST be "pcie-phy". - interrupts: Three interrupt entries must be specified. - interrupt-names: Must include the following names - "sys" @@ -42,11 +40,24 @@ Required properties: interrupt source. The value must be 1. - interrupt-map-mask and interrupt-map: standard PCI properties +Required properties for legacy PHY model (deprecated): +- phys: From PHY bindings: Phandle for the Generic PHY for PCIe. +- phy-names: MUST be "pcie-phy". + +Required properties for per-lane PHY model (preferred): +- phys: Must contain an phandle to a PHY for each entry in phy-names. +- phy-names: Must include 4 entries for all 4 lanes even if some of + them won't be used for your cases. Entries are of the form "pcie-phy-N": + where N ranges from 0 to 3. + (see example below and you MUST also refer to ../phy/rockchip-pcie-phy.txt + for changing the #phy-cells of phy node to support it) + Optional Property: - aspm-no-l0s: RC won't support ASPM L0s. This property is needed if using 24MHz OSC for RC's PHY. -- ep-gpios: contain the entry for pre-reset gpio +- ep-gpios: contain the entry for pre-reset GPIO - num-lanes: number of lanes to use +- vpcie12v-supply: The phandle to the 12v regulator to use for PCIe. - vpcie3v3-supply: The phandle to the 3.3v regulator to use for PCIe. - vpcie1v8-supply: The phandle to the 1.8v regulator to use for PCIe. - vpcie0v9-supply: The phandle to the 0.9v regulator to use for PCIe. @@ -95,6 +106,7 @@ pcie0: pcie@f8000000 { <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>; reset-names = "core", "mgmt", "mgmt-sticky", "pipe", "pm", "pclk", "aclk"; + /* deprecated legacy PHY model */ phys = <&pcie_phy>; phy-names = "pcie-phy"; pinctrl-names = "default"; @@ -111,3 +123,13 @@ pcie0: pcie@f8000000 { #interrupt-cells = <1>; }; }; + +pcie0: pcie@f8000000 { + ... + + /* preferred per-lane PHY model */ + phys = <&pcie_phy 0>, <&pcie_phy 1>, <&pcie_phy 2>, <&pcie_phy 3>; + phy-names = "pcie-phy-0", "pcie-phy-1", "pcie-phy-2", "pcie-phy-3"; + + ... +}; diff --git a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt index 7d3b09474657..34a11bfbfb60 100644 --- a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt +++ b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt @@ -1,29 +1,29 @@ * Samsung Exynos 5440 PCIe interface -This PCIe host controller is based on the Synopsis Designware PCIe IP +This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in designware-pcie.txt. Required properties: - compatible: "samsung,exynos5440-pcie" -- reg: base addresses and lengths of the pcie controller, - the phy controller, additional register for the phy controller. - (Registers for the phy controller are DEPRECATED. +- reg: base addresses and lengths of the PCIe controller, + the PHY controller, additional register for the PHY controller. + (Registers for the PHY controller are DEPRECATED. Use the PHY framework.) - reg-names : First name should be set to "elbi". - And use the "config" instead of getting the confgiruation address space + And use the "config" instead of getting the configuration address space from "ranges". - NOTE: When use the "config" property, reg-names must be set. + NOTE: When using the "config" property, reg-names must be set. - interrupts: A list of interrupt outputs for level interrupt, pulse interrupt, special interrupt. -- phys: From PHY binding. Phandle for the Generic PHY. +- phys: From PHY binding. Phandle for the generic PHY. Refer to Documentation/devicetree/bindings/phy/samsung-phy.txt -Other common properties refer to - Documentation/devicetree/binding/pci/designware-pcie.txt +For other common properties, refer to + Documentation/devicetree/bindings/pci/designware-pcie.txt Example: -SoC specific DT Entry: +SoC-specific DT Entry: pcie@290000 { compatible = "samsung,exynos5440-pcie", "snps,dw-pcie"; @@ -83,7 +83,7 @@ With using PHY framework: ... }; -Board specific DT Entry: +Board-specific DT Entry: pcie@290000 { reset-gpio = <&pin_ctrl 5 0>; diff --git a/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt b/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt index 49ea76da7718..d5a14f5dad46 100644 --- a/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt +++ b/Documentation/devicetree/bindings/pci/spear13xx-pcie.txt @@ -1,12 +1,12 @@ SPEAr13XX PCIe DT detail: ================================ -SPEAr13XX uses synopsis designware PCIe controller and ST MiPHY as phy +SPEAr13XX uses the Synopsys DesignWare PCIe controller and ST MiPHY as PHY controller. Required properties: -- compatible : should be "st,spear1340-pcie", "snps,dw-pcie". -- phys : phandle to phy node associated with pcie controller +- compatible : should be "st,spear1340-pcie", "snps,dw-pcie". +- phys : phandle to PHY node associated with PCIe controller - phy-names : must be "pcie-phy" - All other definitions as per generic PCI bindings diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt index 6a07c96227e0..7f7af3044016 100644 --- a/Documentation/devicetree/bindings/pci/ti-pci.txt +++ b/Documentation/devicetree/bindings/pci/ti-pci.txt @@ -1,6 +1,6 @@ TI PCI Controllers -PCIe Designware Controller +PCIe DesignWare Controller - compatible: Should be "ti,dra7-pcie" for RC Should be "ti,dra7-pcie-ep" for EP - phys : list of PHY specifiers (used by generic PHY framework) @@ -13,7 +13,7 @@ PCIe Designware Controller HOST MODE ========= - reg : Two register ranges as listed in the reg-names property - - reg-names : The first entry must be "ti-conf" for the TI specific registers + - reg-names : The first entry must be "ti-conf" for the TI-specific registers The second entry must be "rc-dbics" for the DesignWare PCIe registers The third entry must be "config" for the PCIe configuration space @@ -30,7 +30,7 @@ HOST MODE DEVICE MODE =========== - reg : Four register ranges as listed in the reg-names property - - reg-names : "ti-conf" for the TI specific registers + - reg-names : "ti-conf" for the TI-specific registers "ep_dbics" for the standard configuration registers as they are locally accessed within the DIF CS space "ep_dbics2" for the standard configuration registers as @@ -46,7 +46,7 @@ DEVICE MODE access. Optional Property: - - gpios : Should be added if a gpio line is required to drive PERST# line + - gpios : Should be added if a GPIO line is required to drive PERST# line NOTE: Two DT nodes may be added for each PCI controller; one for host mode and another for device mode. So in order for PCI to diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt index ebd1e7d0403e..0a702b13d2ac 100644 --- a/Documentation/devicetree/bindings/pci/versatile.txt +++ b/Documentation/devicetree/bindings/pci/versatile.txt @@ -5,7 +5,7 @@ PCI host controller found on the ARM Versatile PB board's FPGA. Required properties: - compatible: should contain "arm,versatile-pci" to identify the Versatile PCI controller. -- reg: base addresses and lengths of the pci controller. There must be 3 +- reg: base addresses and lengths of the PCI controller. There must be 3 entries: - Versatile-specific registers - Self Config space diff --git a/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt index 36d881c8e6d4..85d9b95234f7 100644 --- a/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt +++ b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt @@ -4,7 +4,7 @@ Required properties: - compatible: should be "apm,xgene1-msi" to identify X-Gene v1 PCIe MSI controller block. -- msi-controller: indicates that this is X-Gene v1 PCIe MSI controller node +- msi-controller: indicates that this is an X-Gene v1 PCIe MSI controller node - reg: physical base address (0x79000000) and length (0x900000) for controller registers. These registers include the MSI termination address and data registers as well as the MSI interrupt status registers. @@ -13,7 +13,8 @@ Required properties: interrupt number 0x10 to 0x1f. - interrupt-names: not required -Each PCIe node needs to have property msi-parent that points to msi controller node +Each PCIe node needs to have property msi-parent that points to an MSI +controller node Examples: @@ -44,7 +45,6 @@ SoC DTSI: + PCIe controller node with msi-parent property pointing to MSI node: pcie0: pcie@1f2b0000 { - status = "disabled"; device_type = "pci"; compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie"; #interrupt-cells = <1>; diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt index 1070b068c7c6..6fd2decfa66c 100644 --- a/Documentation/devicetree/bindings/pci/xgene-pci.txt +++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt @@ -8,7 +8,7 @@ Required properties: property. - reg-names: Must include the following entries: "csr": controller configuration registers. - "cfg": pcie configuration space registers. + "cfg": PCIe configuration space registers. - #address-cells: set to <3> - #size-cells: set to <2> - ranges: ranges for the outbound memory, I/O regions. @@ -21,11 +21,11 @@ Required properties: Optional properties: - status: Either "ok" or "disabled". -- dma-coherent: Present if dma operations are coherent +- dma-coherent: Present if DMA operations are coherent Example: -SoC specific DT Entry: +SoC-specific DT Entry: pcie0: pcie@1f2b0000 { status = "disabled"; @@ -51,7 +51,7 @@ SoC specific DT Entry: }; -Board specific DT Entry: +Board-specific DT Entry: &pcie0 { status = "ok"; }; diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt index 3259798a1192..01bf7fdf4c19 100644 --- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt +++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt @@ -15,9 +15,9 @@ Required properties: - device_type: must be "pci" - interrupts: Should contain NWL PCIe interrupt - interrupt-names: Must include the following entries: - "msi1, msi0": interrupt asserted when MSI is received + "msi1, msi0": interrupt asserted when an MSI is received "intx": interrupt asserted when a legacy interrupt is received - "misc": interrupt asserted when miscellaneous is received + "misc": interrupt asserted when miscellaneous interrupt is received - interrupt-map-mask and interrupt-map: standard PCI properties to define the mapping of the PCI interface to interrupt numbers. - ranges: ranges for the PCI memory regions (I/O space region is not @@ -26,7 +26,8 @@ Required properties: detailed explanation - msi-controller: indicates that this is MSI controller node - msi-parent: MSI parent of the root complex itself -- legacy-interrupt-controller: Interrupt controller device node for Legacy interrupts +- legacy-interrupt-controller: Interrupt controller device node for Legacy + interrupts - interrupt-controller: identifies the node as an interrupt controller - #interrupt-cells: should be set to 1 - #address-cells: specifies the number of cells needed to encode an diff --git a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt index 5f3a65a9dd88..e1bb12711fbf 100644 --- a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt +++ b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt @@ -61,19 +61,16 @@ Example: compatible = "apm,xgene-phy"; reg = <0x0 0x1f21a000 0x0 0x100>; #phy-cells = <1>; - status = "disabled"; }; phy2: phy@1f22a000 { compatible = "apm,xgene-phy"; reg = <0x0 0x1f22a000 0x0 0x100>; #phy-cells = <1>; - status = "ok"; }; phy3: phy@1f23a000 { compatible = "apm,xgene-phy"; reg = <0x0 0x1f23a000 0x0 0x100>; #phy-cells = <1>; - status = "ok"; }; diff --git a/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt b/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt index f37b3a86341d..300830dda0bf 100644 --- a/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt +++ b/Documentation/devicetree/bindings/phy/keystone-usb-phy.txt @@ -16,5 +16,4 @@ usb_phy: usb_phy@2620738 { #address-cells = <1>; #size-cells = <1>; reg = <0x2620738 32>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/phy/phy-bindings.txt b/Documentation/devicetree/bindings/phy/phy-bindings.txt index 1293c321754c..a403b81d0679 100644 --- a/Documentation/devicetree/bindings/phy/phy-bindings.txt +++ b/Documentation/devicetree/bindings/phy/phy-bindings.txt @@ -34,7 +34,9 @@ PHY user node ============= Required Properties: -phys : the phandle for the PHY device (used by the PHY subsystem) +phys : the phandle for the PHY device (used by the PHY subsystem; not to be + confused with the Ethernet specific 'phy' and 'phy-handle' properties, + see Documentation/devicetree/bindings/net/ethernet.txt for these) phy-names : the names of the PHY corresponding to the PHYs present in the *phys* phandle diff --git a/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt b/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt new file mode 100644 index 000000000000..643948b6b576 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt @@ -0,0 +1,40 @@ +Lantiq XWAY SoC RCU USB 1.1/2.0 PHY binding +=========================================== + +This binding describes the USB PHY hardware provided by the RCU module on the +Lantiq XWAY SoCs. + +This node has to be a sub node of the Lantiq RCU block. + +------------------------------------------------------------------------------- +Required properties (controller (parent) node): +- compatible : Should be one of + "lantiq,ase-usb2-phy" + "lantiq,danube-usb2-phy" + "lantiq,xrx100-usb2-phy" + "lantiq,xrx200-usb2-phy" + "lantiq,xrx300-usb2-phy" +- reg : Defines the following sets of registers in the parent + syscon device + - Offset of the USB PHY configuration register + - Offset of the USB Analog configuration + register (only for xrx200 and xrx200) +- clocks : References to the (PMU) "phy" clk gate. +- clock-names : Must be "phy" +- resets : References to the RCU USB configuration reset bits. +- reset-names : Must be one of the following: + "phy" (optional) + "ctrl" (shared) + +------------------------------------------------------------------------------- +Example for the USB PHYs on an xRX200 SoC: + usb_phy0: usb2-phy@18 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x18 4>, <0x38 4>; + + clocks = <&pmu PMU_GATE_USB0_PHY>; + clock-names = "phy"; + resets = <&reset1 4 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt index faf18084a33a..41e09ed2ca70 100644 --- a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt +++ b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt @@ -51,14 +51,12 @@ u3phy: usb-phy@11290000 { #address-cells = <2>; #size-cells = <2>; ranges; - status = "okay"; u2port0: usb-phy@11290800 { reg = <0 0x11290800 0 0x100>; clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>; clock-names = "ref"; #phy-cells = <1>; - status = "okay"; }; u3port0: usb-phy@11290900 { @@ -66,7 +64,6 @@ u3phy: usb-phy@11290000 { clocks = <&clk26m>; clock-names = "ref"; #phy-cells = <1>; - status = "okay"; }; u2port1: usb-phy@11291000 { @@ -74,7 +71,6 @@ u3phy: usb-phy@11290000 { clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>; clock-names = "ref"; #phy-cells = <1>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt new file mode 100644 index 000000000000..bfcf80341657 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt @@ -0,0 +1,43 @@ +mvebu comphy driver +------------------- + +A comphy controller can be found on Marvell Armada 7k/8k on the CP110. It +provides a number of shared PHYs used by various interfaces (network, sata, +usb, PCIe...). + +Required properties: + +- compatible: should be "marvell,comphy-cp110" +- reg: should contain the comphy register location and length. +- marvell,system-controller: should contain a phandle to the + system controller node. +- #address-cells: should be 1. +- #size-cells: should be 0. + +A sub-node is required for each comphy lane provided by the comphy. + +Required properties (child nodes): + +- reg: comphy lane number. +- #phy-cells : from the generic phy bindings, must be 1. Defines the + input port to use for a given comphy lane. + +Example: + + cpm_comphy: phy@120000 { + compatible = "marvell,comphy-cp110"; + reg = <0x120000 0x6000>; + marvell,system-controller = <&cpm_syscon0>; + #address-cells = <1>; + #size-cells = <0>; + + cpm_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cpm_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu.txt b/Documentation/devicetree/bindings/phy/phy-mvebu.txt index f95b6260a3b3..64afdd13d91d 100644 --- a/Documentation/devicetree/bindings/phy/phy-mvebu.txt +++ b/Documentation/devicetree/bindings/phy/phy-mvebu.txt @@ -18,7 +18,6 @@ Example: clocks = <&gate_clk 15>; clock-names = "sata"; #phy-cells = <0>; - status = "ok"; }; Armada 375 USB cluster diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt index a67ef2a3874f..074a7b3b0425 100644 --- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt +++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt @@ -67,14 +67,12 @@ grf: syscon@ff770000 { , ; interrupt-names = "otg-id", "otg-bvalid", "linestate"; - status = "okay"; }; u2phy_host: host-port { #phy-cells = <0>; interrupts = ; interrupt-names = "linestate"; - status = "okay"; }; }; }; diff --git a/Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt b/Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt index 86f2dbe07ed4..a1697c27aecd 100644 --- a/Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt +++ b/Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt @@ -25,7 +25,6 @@ Example: clock-names = "ref"; #phy-cells = <0>; - status = "ok"; }; phy@100f8830 { @@ -35,5 +34,4 @@ Example: clock-names = "ref"; #phy-cells = <0>; - status = "ok"; }; diff --git a/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt index 0f6222a672ce..b496042f1f44 100644 --- a/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt +++ b/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt @@ -3,7 +3,6 @@ Rockchip PCIE PHY Required properties: - compatible: rockchip,rk3399-pcie-phy - - #phy-cells: must be 0 - clocks: Must contain an entry in clock-names. See ../clocks/clock-bindings.txt for details. - clock-names: Must be "refclk" @@ -11,6 +10,12 @@ Required properties: See ../reset/reset.txt for details. - reset-names: Must be "phy" +Required properties for legacy PHY mode (deprecated): + - #phy-cells: must be 0 + +Required properties for per-lane PHY mode (preferred): + - #phy-cells: must be 1 + Example: grf: syscon@ff770000 { diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt index ab80bfe31cb3..1c40ccd40ce4 100644 --- a/Documentation/devicetree/bindings/phy/samsung-phy.txt +++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt @@ -82,7 +82,6 @@ usbphy: phy@125b0000 { reg = <0x125b0000 0x100>; clocks = <&clock 305>, <&clock 2>; clock-names = "phy", "ref"; - status = "okay"; #phy-cells = <1>; samsung,sysreg-phandle = <&sys_reg>; samsung,pmureg-phandle = <&pmu_reg>; diff --git a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt index 1cca85c709d1..f9853156e311 100644 --- a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt +++ b/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt @@ -33,6 +33,5 @@ Example: clock-names = "hsic_480M", "hsic_12M", "phy"; resets = <&usb_phy_clk 18>, <&usb_phy_clk 19>; reset-names = "hsic", "phy"; - status = "disabled"; #phy-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt index 590e60378be3..3e23fece99da 100644 --- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt @@ -148,5 +148,4 @@ dbgu: serial@fffff200 { interrupts = <1 4 7>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_dbgu>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt index 9fde25f1401a..42d74f8a1bcc 100644 --- a/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt @@ -61,7 +61,6 @@ Examples: usdhc@0219c000 { /* uSDHC4 */ non-removable; vmmc-supply = <®_3p3v>; - status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4_1>; }; diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt index 8c5d27c5b562..6666277c3acb 100644 --- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt @@ -61,7 +61,6 @@ iomuxc-lpsr controller and SDA pad from iomuxc controller as: i2c1: i2c@30a20000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1_1 &pinctrl_i2c1_2>; - status = "okay"; }; iomuxc-lpsr@302c0000 { diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt index f2abdaee9022..e0e886b73527 100644 --- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt +++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt @@ -56,5 +56,4 @@ Example: pinctrl-0 = <&state_dpaux_i2c>; pinctrl-1 = <&state_dpaux_off>; pinctrl-names = "default", "idle"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt index 09e81a95bbfd..b1159434f593 100644 --- a/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt @@ -50,7 +50,6 @@ uart2: serial@900000 { reg-io-width = <1>; current-speed = <115200>; no-loopback-test; - status = "disabled"; resets = <&reset 22>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart2>; diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt new file mode 100644 index 000000000000..eee3dc260934 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt @@ -0,0 +1,63 @@ +Pincontrol driver for RK805 Power management IC. + +RK805 has 2 pins which can be configured as GPIO output only. + +Please refer file +for details of the common pinctrl bindings used by client devices, +including the meaning of the phrase "pin configuration node". + +Optional Pinmux properties: +-------------------------- +Following properties are required if default setting of pins are required +at boot. +- pinctrl-names: A pinctrl state named per . +- pinctrl[0...n]: Properties to contain the phandle for pinctrl states per + . + +The pin configurations are defined as child of the pinctrl states node. Each +sub-node have following properties: + +Required properties: +------------------ +- #gpio-cells: Should be two. The first cell is the pin number and the + second is the GPIO flags. + +- gpio-controller: Marks the device node as a GPIO controller. + +- pins: List of pins. Valid values of pins properties are: gpio0, gpio1. + +First 2 properties must be added in the RK805 PMIC node, documented in +Documentation/devicetree/bindings/mfd/rk808.txt + +Optional properties: +------------------- +Following are optional properties defined as pinmux DT binding document +. Absence of properties will leave the configuration +on default. + function, + output-low, + output-high. + +Valid values for function properties are: gpio. + +Theres is also not customised properties for any GPIO. + +Example: +-------- +rk805: rk805@18 { + compatible = "rockchip,rk805"; + ... + gpio-controller; + #gpio-cells = <2>; + + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>, <&rk805_default>; + + rk805_default: pinmux { + gpio01 { + pins = "gpio0", "gpio1"; + function = "gpio"; + output-high; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt index e219849b21ca..39170f372599 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt @@ -81,5 +81,4 @@ pmm: pin-controller@1462000 { &vga { pinctrl-names = "default"; pinctrl-0 = <&vga_pins>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt index f4d127df980d..9b4f8041c36a 100644 --- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt @@ -112,7 +112,7 @@ Examples Example 1: SH73A0 (SH-Mobile AG5) pin controller node - pfc: pfc@e6050000 { + pfc: pin-controller@e6050000 { compatible = "renesas,pfc-sh73a0"; reg = <0xe6050000 0x8000>, <0xe605801c 0x1c>; @@ -173,5 +173,4 @@ Example 4: KZM-A9-GT (SH-Mobile AG5) default pin state for the MMCIF device bus-width = <8>; vmmc-supply = <®_1p8v>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt index 58b7921b4fed..4864e3a74de3 100644 --- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt @@ -121,7 +121,6 @@ uart2: serial@20064000 { reg-shift = <2>; reg-io-width = <1>; clocks = <&mux_uart2>; - status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&uart2_xfer>; diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt index d907a74f8dc0..33e3d3c47552 100644 --- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt @@ -180,5 +180,4 @@ pin-controller { &usart1 { pinctrl-0 = <&usart1_pins_a>; pinctrl-names = "default"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/power/power-controller.txt b/Documentation/devicetree/bindings/power/power-controller.txt index 4f7a3bc9c407..e45affea8078 100644 --- a/Documentation/devicetree/bindings/power/power-controller.txt +++ b/Documentation/devicetree/bindings/power/power-controller.txt @@ -13,6 +13,5 @@ Example: act8846: act8846@5 { compatible = "active-semi,act8846"; - status = "okay"; system-power-controller; } diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.txt b/Documentation/devicetree/bindings/power/renesas,apmu.txt index 84404c9edff7..af21502e939c 100644 --- a/Documentation/devicetree/bindings/power/renesas,apmu.txt +++ b/Documentation/devicetree/bindings/power/renesas,apmu.txt @@ -1,12 +1,13 @@ DT bindings for the Renesas Advanced Power Management Unit -Renesas R-Car line of SoCs utilize one or more APMU hardware units +Renesas R-Car and RZ/G1 SoCs utilize one or more APMU hardware units for CPU core power domain control including SMP boot and CPU Hotplug. Required properties: - compatible: Should be "renesas,-apmu", "renesas,apmu" as fallback. Examples with soctypes are: + - "renesas,r8a7743-apmu" (RZ/G1M) - "renesas,r8a7790-apmu" (R-Car H2) - "renesas,r8a7791-apmu" (R-Car M2-W) - "renesas,r8a7792-apmu" (R-Car V2H) diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt index d91715bc8d52..98cc8c09d02d 100644 --- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt +++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt @@ -17,6 +17,7 @@ Required properties: - "renesas,r8a7794-sysc" (R-Car E2) - "renesas,r8a7795-sysc" (R-Car H3) - "renesas,r8a7796-sysc" (R-Car M3-W) + - "renesas,r8a77995-sysc" (R-Car D3) - reg: Address start and address range for the device. - #power-domain-cells: Must be 1. diff --git a/Documentation/devicetree/bindings/power/reset/st-reset.txt b/Documentation/devicetree/bindings/power/reset/st-reset.txt index 83734dc3a389..b63948737d80 100644 --- a/Documentation/devicetree/bindings/power/reset/st-reset.txt +++ b/Documentation/devicetree/bindings/power/reset/st-reset.txt @@ -8,5 +8,4 @@ Example node: restart { compatible = "st,stih407-restart"; st,syscfg = <&syscfg_sbc_reg>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt index 43c21fb04564..4a4766e9c254 100644 --- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt +++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt @@ -39,6 +39,8 @@ Required properties: - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains - "rockchip,rk3399-io-voltage-domain" for rk3399 - "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains + - "rockchip,rv1108-io-voltage-domain" for rv1108 + - "rockchip,rv1108-pmu-io-voltage-domain" for rv1108 pmu-domains Deprecated properties: - rockchip,grf: phandle to the syscon managing the "general register files" diff --git a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt index de78d761ce44..b86ecada4f84 100644 --- a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt +++ b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt @@ -29,7 +29,6 @@ Example: pmic@5b { compatible = "active-semi,act8945a"; reg = <0x5b>; - status = "okay"; charger { compatible = "active-semi,act8945a-charger"; @@ -43,6 +42,5 @@ Example: active-semi,input-voltage-threshold-microvolt = <6600>; active-semi,precondition-timeout = <40>; active-semi,total-timeout = <3>; - status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.txt b/Documentation/devicetree/bindings/power/supply/bq24190.txt new file mode 100644 index 000000000000..9e517d307070 --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/bq24190.txt @@ -0,0 +1,51 @@ +TI BQ24190 Li-Ion Battery Charger + +Required properties: +- compatible: contains one of the following: + * "ti,bq24190" + * "ti,bq24192i" +- reg: integer, I2C address of the charger. +- interrupts[-extended]: configuration for charger INT pin. + +Optional properties: +- monitored-battery: phandle of battery characteristics devicetree node + The charger uses the following battery properties: + + precharge-current-microamp: maximum charge current during precharge + phase (typically 20% of battery capacity). + + charge-term-current-microamp: a charge cycle terminates when the + battery voltage is above recharge threshold, and the current is below + this setting (typically 10% of battery capacity). + See also Documentation/devicetree/bindings/power/supply/battery.txt +- ti,system-minimum-microvolt: when power is connected and the battery is below + minimum system voltage, the system will be regulated above this setting. + +Notes: +- Some circuit boards wire the chip's "OTG" pin high (enabling 500mA default + charge current on USB SDP ports, among other features). To simulate this on + boards that wire the pin to a GPIO, set a gpio-hog. + +Example: + + bat: battery { + compatible = "simple-battery"; + precharge-current-microamp = <256000>; + charge-term-current-microamp = <128000>; + // etc. + }; + + bq24190: charger@6a { + compatible = "ti,bq24190"; + reg = <0x6a>; + interrupts-extended = <&gpiochip 10 IRQ_TYPE_EDGE_FALLING>; + monitored-battery = <&bat>; + ti,system-minimum-microvolt = <3200000>; + }; + + &twl_gpio { + otg { + gpio-hog; + gpios = <6 0>; + output-high; + line-name = "otg-gpio"; + }; + }; diff --git a/Documentation/devicetree/bindings/power/supply/ltc2941.txt b/Documentation/devicetree/bindings/power/supply/ltc2941.txt index a9d7aa60558b..3b9ba147b041 100644 --- a/Documentation/devicetree/bindings/power/supply/ltc2941.txt +++ b/Documentation/devicetree/bindings/power/supply/ltc2941.txt @@ -1,13 +1,14 @@ -binding for LTC2941 and LTC2943 battery gauges +binding for LTC2941, LTC2942, LTC2943 and LTC2944 battery gauges -Both the LTC2941 and LTC2943 measure battery capacity. -The LTC2943 is compatible with the LTC2941, it adds voltage and -temperature monitoring, and uses a slightly different conversion -formula for the charge counter. +All chips measure battery capacity. +The LTC2942 is pin compatible with the LTC2941, it adds voltage and +temperature monitoring, and is runtime detected. LTC2943 and LTC2944 +is software compatible, uses a slightly different conversion formula +for the charge counter and adds voltage, current and temperature monitoring. Required properties: -- compatible: Should contain "lltc,ltc2941" or "lltc,ltc2943" which also - indicates the type of I2C chip attached. +- compatible: Should contain "lltc,ltc2941", "lltc,ltc2942", "lltc,ltc2943" + or "lltc,ltc2944" which also indicates the type of I2C chip attached. - reg: The 7-bit I2C address. - lltc,resistor-sense: The sense resistor value in milli-ohms. Can be a 32-bit negative value when the battery has been connected to the wrong end of the diff --git a/Documentation/devicetree/bindings/power/supply/max8903-charger.txt b/Documentation/devicetree/bindings/power/supply/max8903-charger.txt index f0f4e12b076e..bab947fef025 100644 --- a/Documentation/devicetree/bindings/power/supply/max8903-charger.txt +++ b/Documentation/devicetree/bindings/power/supply/max8903-charger.txt @@ -21,5 +21,4 @@ Example: flt-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; chg-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; cen-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt index e03e85ae6572..d6e8dfd0a581 100644 --- a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt +++ b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt @@ -12,7 +12,6 @@ Example: clock-frequency = <50000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; - status = "okay"; max14656@35 { compatible = "maxim,max14656"; diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index 963c6dfd484d..3c81f78b5c27 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt @@ -20,13 +20,12 @@ List of legacy properties and respective binding document 1. "enable-sdio-wakeup" Documentation/devicetree/bindings/mmc/mmc.txt 2. "gpio-key,wakeup" Documentation/devicetree/bindings/input/gpio-keys{,-polled}.txt 3. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt -4. "isil,irq2-can-wakeup-machine" Documentation/devicetree/bindings/rtc/isil,isl12057.txt -5. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt +4. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt Documentation/devicetree/bindings/mfd/tc3589x.txt Documentation/devicetree/bindings/input/ads7846.txt -6. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt -7. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung-keypad.txt -8. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt +5. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt +6. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung-keypad.txt +7. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt Examples -------- diff --git a/Documentation/devicetree/bindings/powerpc/ibm,vas.txt b/Documentation/devicetree/bindings/powerpc/ibm,vas.txt new file mode 100644 index 000000000000..bf11d2faf7b8 --- /dev/null +++ b/Documentation/devicetree/bindings/powerpc/ibm,vas.txt @@ -0,0 +1,22 @@ +* IBM Powerpc Virtual Accelerator Switchboard (VAS) + +VAS is a hardware mechanism that allows kernel subsystems and user processes +to directly submit compression and other requests to Nest accelerators (NX) +or other coprocessors functions. + +Required properties: +- compatible : should be "ibm,vas". +- ibm,vas-id : A unique identifier for each instance of VAS in the system +- reg : Should contain 4 pairs of 64-bit fields specifying the Hypervisor + window context start and length, OS/User window context start and length, + "Paste address" start and length, "Paste window id" start bit and number + of bits) + +Example: + + vas@6019100000000 { + compatible = "ibm,vas", "ibm,power9-vas"; + reg = <0x6019100000000 0x2000000 0x6019000000000 0x100000000 0x8000000000000 0x100000000 0x20 0x10>; + name = "vas"; + ibm,vas-id = <0x1>; + }; diff --git a/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt b/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt new file mode 100644 index 000000000000..6ad881cbffda --- /dev/null +++ b/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt @@ -0,0 +1,27 @@ +IBM OPAL Sensor Groups Binding +------------------------------- + +Node: /ibm,opal/sensor-groups + +Description: Contains sensor groups available in the Powernv P9 +servers. Each child node indicates a sensor group. + +- compatible : Should be "ibm,opal-sensor-group" + +Each child node contains below properties: + +- type : String to indicate the type of sensor-group + +- sensor-group-id: Abstract unique identifier provided by firmware of + type which is used for sensor-group + operations like clearing the min/max history of all + sensors belonging to the group. + +- ibm,chip-id : Chip ID + +- sensors : Phandle array of child nodes of /ibm,opal/sensor/ + belonging to this group + +- ops : Array of opal-call numbers indicating available operations on + sensor groups like clearing min/max, enabling/disabling sensor + group. diff --git a/Documentation/devicetree/bindings/pps/pps-gpio.txt b/Documentation/devicetree/bindings/pps/pps-gpio.txt index 40bf9c3564a5..0de23b793657 100644 --- a/Documentation/devicetree/bindings/pps/pps-gpio.txt +++ b/Documentation/devicetree/bindings/pps/pps-gpio.txt @@ -13,8 +13,12 @@ Optional properties: Example: pps { - compatible = "pps-gpio"; - gpios = <&gpio2 6 0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pps>; + gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>; assert-falling-edge; + + compatible = "pps-gpio"; + status = "okay"; }; diff --git a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt index 7c04e22a5d6a..6b1075ee8a30 100644 --- a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt +++ b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt @@ -16,5 +16,4 @@ Example: ptp: ptp-dte@180af650 { compatible = "brcm,iproc-ptp-dte", "brcm,ptp-dte"; reg = <0x180af650 0x10>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt index cf573e85b11d..8cf87d1bfca5 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt @@ -6,7 +6,7 @@ Required properties: - clocks: This clock defines the base clock frequency of the PWM hardware system, the period and the duty_cycle of the PWM signal is a multiple of the base period. -- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of +- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of the cells format. Examples: @@ -15,7 +15,7 @@ pwm@2020c000 { compatible = "brcm,bcm2835-pwm"; reg = <0x2020c000 0x28>; clocks = <&clk_pwm>; - #pwm-cells = <2>; + #pwm-cells = <3>; }; clocks { diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt index 54c59b0560ad..ef8bd3cb67ab 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt @@ -2,6 +2,8 @@ MediaTek PWM controller Required properties: - compatible: should be "mediatek,-pwm": + - "mediatek,mt2712-pwm": found on mt2712 SoC. + - "mediatek,mt7622-pwm": found on mt7622 SoC. - "mediatek,mt7623-pwm": found on mt7623 SoC. - reg: physical base address and length of the controller's registers. - #pwm-cells: must be 2. See pwm.txt in this directory for a description of @@ -10,7 +12,9 @@ Required properties: - clock-names: must contain the following: - "top": the top clock generator - "main": clock used by the PWM core - - "pwm1-5": the five per PWM clocks + - "pwm1-8": the eight per PWM clocks for mt2712 + - "pwm1-6": the six per PWM clocks for mt7622 + - "pwm1-5": the five per PWM clocks for mt7623 - pinctrl-names: Must contain a "default" entry. - pinctrl-0: One property must exist for each entry in pinctrl-names. See pinctrl/pinctrl-bindings.txt for details of the property values. diff --git a/Documentation/devicetree/bindings/pwm/pwm-meson.txt b/Documentation/devicetree/bindings/pwm/pwm-meson.txt index 5b07bebbf6f7..1ee81321c35e 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-meson.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-meson.txt @@ -19,7 +19,6 @@ Example: compatible = "amlogic,meson-gxbb-pwm"; reg = <0x0 0x08550 0x0 0x10>; #pwm-cells = <3>; - status = "disabled"; clocks = <&xtal>, <&xtal>; clock-names = "clkin0", "clkin1"; } diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt index b8be3d09ee26..2c5e52a5bede 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt @@ -3,10 +3,17 @@ Rockchip PWM controller Required properties: - compatible: should be "rockchip,-pwm" "rockchip,rk2928-pwm": found on RK29XX,RK3066 and RK3188 SoCs - "rockchip,rk3288-pwm": found on RK3288 SoC + "rockchip,rk3288-pwm": found on RK3288 SOC + "rockchip,rv1108-pwm", "rockchip,rk3288-pwm": found on RV1108 SoC "rockchip,vop-pwm": found integrated in VOP on RK3288 SoC - reg: physical base address and length of the controller's registers - - clocks: phandle and clock specifier of the PWM reference clock + - clocks: See ../clock/clock-bindings.txt + - For older hardware (rk2928, rk3066, rk3188, rk3228, rk3288, rk3399): + - There is one clock that's used both to derive the functional clock + for the device and as the bus clock. + - For newer hardware (rk3328 and future socs): specified by name + - "pwm": This is used to derive the functional clock. + - "pclk": This is the APB bus clock. - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.txt in this directory for a description of the cell format. diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt new file mode 100644 index 000000000000..f8338d11fd2b --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt @@ -0,0 +1,24 @@ +STMicroelectronics STM32 Low-Power Timer PWM + +STM32 Low-Power Timer provides single channel PWM. + +Must be a sub-node of an STM32 Low-Power Timer device tree node. +See ../mfd/stm32-lptimer.txt for details about the parent node. + +Required parameters: +- compatible: Must be "st,stm32-pwm-lp". + +Optional properties: +- pinctrl-names: Set to "default". +- pinctrl-0: Phandle pointing to pin configuration node for PWM. + +Example: + timer@40002400 { + compatible = "st,stm32-lptimer"; + ... + pwm { + compatible = "st,stm32-pwm-lp"; + pinctrl-names = "default"; + pinctrl-0 = <&lppwm1_pins>; + }; + }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt index f1cbeefb3087..c5171660eaf9 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt @@ -19,5 +19,4 @@ Example: reg = <0x01c20e00 0xc>; clocks = <&osc24M>; #pwm-cells = <3>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt index 8007e839a716..06a363d9ccef 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt @@ -6,6 +6,7 @@ Required properties: for am4372 - compatible = "ti,am4372-ecap", "ti,am3352-ecap", "ti,am33xx-ecap"; for da850 - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap"; for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap"; + for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap"; - #pwm-cells: should be 3. See pwm.txt in this directory for a description of the cells format. The PWM channel index ranges from 0 to 4. The only third cell flag supported by this binding is PWM_POLARITY_INVERTED. diff --git a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt index 1a5d7b71db89..4633697fbda1 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt @@ -26,7 +26,6 @@ epwmss0: epwmss@48300000 { /* PWMSS for am33xx */ ti,hwmods = "epwmss0"; #address-cells = <1>; #size-cells = <1>; - status = "disabled"; ranges = <0x48300100 0x48300100 0x80 /* ECAP */ 0x48300180 0x48300180 0x80 /* EQEP */ 0x48300200 0x48300200 0x80>; /* EHRPWM */ @@ -40,7 +39,6 @@ epwmss0: epwmss@48300000 { /* PWMSS for am4372 */ ti,hwmods = "epwmss0"; #address-cells = <1>; #size-cells = <1>; - status = "disabled"; ranges = <0x48300100 0x48300100 0x80 /* ECAP */ 0x48300180 0x48300180 0x80 /* EQEP */ 0x48300200 0x48300200 0x80>; /* EHRPWM */ diff --git a/Documentation/devicetree/bindings/pwm/pwm-zx.txt b/Documentation/devicetree/bindings/pwm/pwm-zx.txt new file mode 100644 index 000000000000..a6bcc75c9164 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm-zx.txt @@ -0,0 +1,22 @@ +ZTE ZX PWM controller + +Required properties: + - compatible: Should be "zte,zx296718-pwm". + - reg: Physical base address and length of the controller's registers. + - clocks : The phandle and specifier referencing the controller's clocks. + - clock-names: "pclk" for PCLK, "wclk" for WCLK to the PWM controller. The + PCLK is for register access, while WCLK is the reference clock for + calculating period and duty cycles. + - #pwm-cells: Should be 3. See pwm.txt in this directory for a description of + the cells format. + +Example: + + pwm: pwm@1439000 { + compatible = "zte,zx296718-pwm"; + reg = <0x1439000 0x1000>; + clocks = <&lsp1crm LSP1_PWM_PCLK>, + <&lsp1crm LSP1_PWM_WCLK>; + clock-names = "pclk", "wclk"; + #pwm-cells = <3>; + }; diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt index b067e84a94b5..1aadc804dae4 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt +++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt @@ -6,7 +6,6 @@ Required Properties: - "renesas,tpu-r8a73a4": for R8A77A4 (R-Mobile APE6) compatible PWM controller. - "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller. - "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller. - - "renesas,tpu-sh7372": for SH7372 (SH-Mobile AP4) compatible PWM controller. - "renesas,tpu": for generic R-Car TPU PWM controller. - reg: Base address and length of each memory resource used by the PWM diff --git a/Documentation/devicetree/bindings/regulator/act8865-regulator.txt b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt index 6067d9830d07..3ae9f1088845 100644 --- a/Documentation/devicetree/bindings/regulator/act8865-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt @@ -52,7 +52,6 @@ Example: compatible = "active-semi,act8865"; reg = <0x5b>; active-semi,vsel-high; - status = "disabled"; regulators { vcc_1v8_reg: DCDC_REG1 { diff --git a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt index 5c80a7779552..ac955dea00d1 100644 --- a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt @@ -23,7 +23,6 @@ Example: pmic@5b { compatible = "active-semi,act8945a"; reg = <0x5b>; - status = "okay"; active-semi,vsel-high; diff --git a/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt b/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt new file mode 100644 index 000000000000..fbcefd965dc4 --- /dev/null +++ b/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt @@ -0,0 +1,33 @@ +NXP iMX6SX/iMX7D Co-Processor Bindings +---------------------------------------- + +This binding provides support for ARM Cortex M4 Co-processor found on some +NXP iMX SoCs. + +Required properties: +- compatible Should be one of: + "fsl,imx7d-cm4" + "fsl,imx6sx-cm4" +- clocks Clock for co-processor (See: ../clock/clock-bindings.txt) +- syscon Phandle to syscon block which provide access to + System Reset Controller + +Optional properties: +- memory-region list of phandels to the reserved memory regions. + (See: ../reserved-memory/reserved-memory.txt) + +Example: + m4_reserved_sysmem1: cm4@80000000 { + reg = <0x80000000 0x80000>; + }; + + m4_reserved_sysmem2: cm4@81000000 { + reg = <0x81000000 0x80000>; + }; + + imx7d-cm4 { + compatible = "fsl,imx7d-cm4"; + memory-region = <&m4_reserved_sysmem1>, <&m4_reserved_sysmem2>; + syscon = <&src>; + clocks = <&clks IMX7D_ARM_M4_ROOT_CLK>; + }; diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt index 75ad7b8df0b1..728e4193f7a6 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt @@ -63,9 +63,10 @@ on the Qualcomm ADSP Hexagon core. = SUBNODES -The adsp node may have an subnode named "smd-edge" that describes the SMD edge, -channels and devices related to the ADSP. See ../soc/qcom/qcom,smd.txt for -details on how to describe the SMD edge. +The adsp node may have an subnode named either "smd-edge" or "glink-edge" that +describes the communication edge, channels and devices related to the ADSP. +See ../soc/qcom/qcom,smd.txt and ../soc/qcom/qcom,glink.txt for details on how +to describe these. = EXAMPLE diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt index 92347fe6890e..7ff3f7903f26 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt @@ -90,6 +90,11 @@ the memory regions used by the Hexagon firmware. Each sub-node must contain: Value type: Definition: reference to the reserved-memory for the region +The Hexagon node may also have an subnode named either "smd-edge" or +"glink-edge" that describes the communication edge, channels and devices +related to the Hexagon. See ../soc/qcom/qcom,smd.txt and +../soc/qcom/qcom,glink.txt for details on how to describe these. + = EXAMPLE The following example describes the resources needed to boot control the Hexagon, as it is found on MSM8974 boards. diff --git a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt new file mode 100644 index 000000000000..e44a97e21164 --- /dev/null +++ b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt @@ -0,0 +1,86 @@ +TI Davinci DSP devices +======================= + +Binding status: Unstable - Subject to changes for DT representation of clocks + and resets + +The TI Davinci family of SoCs usually contains a TI DSP Core sub-system that +is used to offload some of the processor-intensive tasks or algorithms, for +achieving various system level goals. + +The processor cores in the sub-system usually contain additional sub-modules +like L1 and/or L2 caches/SRAMs, an Interrupt Controller, an external memory +controller, a dedicated local power/sleep controller etc. The DSP processor +core used in Davinci SoCs is usually a C674x DSP CPU. + +DSP Device Node: +================ +Each DSP Core sub-system is represented as a single DT node. + +Required properties: +-------------------- +The following are the mandatory properties: + +- compatible: Should be one of the following, + "ti,da850-dsp" for DSPs on OMAP-L138 SoCs + +- reg: Should contain an entry for each value in 'reg-names'. + Each entry should have the memory region's start address + and the size of the region, the representation matching + the parent node's '#address-cells' and '#size-cells' values. + +- reg-names: Should contain strings with the following names, each + representing a specific internal memory region or a + specific register space, + "l2sram", "l1pram", "l1dram", "host1cfg", "chipsig_base" + +- interrupts: Should contain the interrupt number used to receive the + interrupts from the DSP. The value should follow the + interrupt-specifier format as dictated by the + 'interrupt-parent' node. + +- memory-region: phandle to the reserved memory node to be associated + with the remoteproc device. The reserved memory node + can be a CMA memory node, and should be defined as + per the bindings in + Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt + +Optional properties: +-------------------- +- interrupt-parent: phandle to the interrupt controller node. This property + is needed if the device node hierarchy doesn't have an + interrupt controller. + + +Example: +-------- + + /* DSP Reserved Memory node */ + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + dsp_memory_region: dsp-memory@c3000000 { + compatible = "shared-dma-pool"; + reg = <0xc3000000 0x1000000>; + reusable; + }; + }; + + /* DSP node */ + { + dsp: dsp@11800000 { + compatible = "ti,da850-dsp"; + reg = <0x11800000 0x40000>, + <0x11e00000 0x8000>, + <0x11f00000 0x8000>, + <0x01c14044 0x4>, + <0x01c14174 0x8>; + reg-names = "l2sram", "l1pram", "l1dram", "host1cfg", + "chipsig"; + interrupt-parent = <&intc>; + interrupts = <28>; + memory-region = <&dsp_memory_region>; + }; + }; diff --git a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt index 2aac1aa4123d..1eb72874130b 100644 --- a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt +++ b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt @@ -26,6 +26,7 @@ The following are the mandatory properties: "ti,k2hk-dsp" for DSPs on Keystone 2 66AK2H/K SoCs "ti,k2l-dsp" for DSPs on Keystone 2 66AK2L SoCs "ti,k2e-dsp" for DSPs on Keystone 2 66AK2E SoCs + "ti,k2g-dsp" for DSPs on Keystone 2 66AK2G SoCs - reg: Should contain an entry for each value in 'reg-names'. Each entry should have the memory region's start address @@ -37,20 +38,18 @@ The following are the mandatory properties: should be defined in this order, "l2sram", "l1pram", "l1dram" -- clocks: Should contain the device's input clock, and should be - defined as per the bindings in, - Documentation/devicetree/bindings/clock/keystone-gate.txt - - ti,syscon-dev: Should be a pair of the phandle to the Keystone Device State Control node, and the register offset of the DSP boot address register within that node's address space. - resets: Should contain the phandle to the reset controller node managing the resets for this device, and a reset - specifier. Please refer to the following reset bindings - for the reset argument specifier as per SoC, + specifier. Please refer to either of the following reset + bindings for the reset argument specifier as per SoC, Documentation/devicetree/bindings/reset/ti-syscon-reset.txt - for 66AK2HK/66AK2L/66AK2E SoCs + for 66AK2HK/66AK2L/66AK2E SoCs or, + Documentation/devicetree/bindings/reset/ti,sci-reset.txt + for 66AK2G SoCs - interrupt-parent: Should contain a phandle to the Keystone 2 IRQ controller IP node that is used by the ARM CorePac processor to @@ -75,6 +74,22 @@ The following are the mandatory properties: The gpio device to be used is as per the bindings in, Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt +SoC-specific Required properties: +--------------------------------- +The following are mandatory properties for Keystone 2 66AK2HK, 66AK2L and 66AK2E +SoCs only: + +- clocks: Should contain the device's input clock, and should be + defined as per the bindings in, + Documentation/devicetree/bindings/clock/keystone-gate.txt + +The following are mandatory properties for Keystone 2 66AK2G SoCs only: + +- power-domains: Should contain a phandle to a PM domain provider node + and an args specifier containing the DSP device id + value. This property is as per the binding, + Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt + Optional properties: -------------------- @@ -85,8 +100,10 @@ Optional properties: Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt -Example: --------- +Examples: +--------- + +1. /* 66AK2H/K DSP aliases */ aliases { rproc0 = &dsp0; @@ -131,3 +148,41 @@ Example: }; }; + +2. + /* 66AK2G DSP alias */ + aliases { + rproc0 = &dsp0; + }; + + /* 66AK2G DSP memory node */ + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + }; + }; + + /* 66AK2G DSP node */ + soc { + dsp0: dsp@10800000 { + compatible = "ti,k2g-dsp"; + reg = <0x10800000 0x00100000>, + <0x10e00000 0x00008000>, + <0x10f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + power-domains = <&k2g_pds 0x0046>; + ti,syscon-dev = <&devctrl 0x40>; + resets = <&k2g_reset 0x0046 0x1>; + interrupt-parent = <&kirq0>; + interrupts = <0 8>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio0 27 0>; + memory-region = <&dsp_common_memory>; + }; + }; diff --git a/Documentation/devicetree/bindings/reset/lantiq,reset.txt b/Documentation/devicetree/bindings/reset/lantiq,reset.txt new file mode 100644 index 000000000000..c6aef36b7d15 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/lantiq,reset.txt @@ -0,0 +1,30 @@ +Lantiq XWAY SoC RCU reset controller binding +============================================ + +This binding describes a reset-controller found on the RCU module on Lantiq +XWAY SoCs. + +This node has to be a sub node of the Lantiq RCU block. + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,danube-reset" + "lantiq,xrx200-reset" +- reg : Defines the following sets of registers in the parent + syscon device + - Offset of the reset set register + - Offset of the reset status register +- #reset-cells : Specifies the number of cells needed to encode the + reset line, should be 2. + The first cell takes the reset set bit and the + second cell takes the status bit. + +------------------------------------------------------------------------------- +Example for the reset-controllers on the xRX200 SoCs: + reset0: reset-controller@10 { + compatible = "lantiq,xrx200-reset"; + reg <0x10 0x04>, <0x14 0x04>; + + #reset-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt b/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt index b4e96a278445..05d5be48dae4 100644 --- a/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt +++ b/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt @@ -80,5 +80,4 @@ mac: ethernet@40010000 { clock-names = "stmmaceth"; resets = <&rgu 22>; reset-names = "stmmaceth"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.txt b/Documentation/devicetree/bindings/reset/renesas,rst.txt index fe5e0f37b3c9..e5a03ffe04fb 100644 --- a/Documentation/devicetree/bindings/reset/renesas,rst.txt +++ b/Documentation/devicetree/bindings/reset/renesas,rst.txt @@ -26,6 +26,7 @@ Required properties: - "renesas,r8a7794-rst" (R-Car E2) - "renesas,r8a7795-rst" (R-Car H3) - "renesas,r8a7796-rst" (R-Car M3-W) + - "renesas,r8a77995-rst" (R-Car D3) - reg: Address start and address range for the device. diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt new file mode 100644 index 000000000000..830069b1c37c --- /dev/null +++ b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt @@ -0,0 +1,28 @@ +Binding for the Synopsys HSDK reset controller + +This binding uses the common reset binding[1]. + +[1] Documentation/devicetree/bindings/reset/reset.txt + +Required properties: +- compatible: should be "snps,hsdk-reset". +- reg: should always contain 2 pairs address - length: first for reset + configuration register and second for corresponding SW reset and status bits + register. +- #reset-cells: from common reset binding; Should always be set to 1. + +Example: + reset: reset@880 { + compatible = "snps,hsdk-reset"; + #reset-cells = <1>; + reg = <0x8A0 0x4>, <0xFF0 0x4>; + }; + +Specifying reset lines connected to IP modules: + ethernet@.... { + .... + resets = <&reset HSDK_V1_ETH_RESET>; + .... + }; + +The index could be found in diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt index 83ab0f599c40..68a6f487c409 100644 --- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt +++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt @@ -6,7 +6,6 @@ System reset Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-reset" - for sLD3 SoC "socionext,uniphier-ld4-reset" - for LD4 SoC "socionext,uniphier-pro4-reset" - for Pro4 SoC "socionext,uniphier-sld8-reset" - for sLD8 SoC @@ -37,7 +36,6 @@ Media I/O (MIO) reset, SD reset Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC "socionext,uniphier-ld4-mio-reset" - for LD4 SoC "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC @@ -92,3 +90,28 @@ Example: other nodes ... }; + + +Analog signal amplifier reset +----------------------------- + +Required properties: +- compatible: should be one of the following: + "socionext,uniphier-ld11-adamv-reset" - for LD11 SoC + "socionext,uniphier-ld20-adamv-reset" - for LD20 SoC +- #reset-cells: should be 1. + +Example: + + adamv@57920000 { + compatible = "socionext,uniphier-ld11-adamv", + "simple-mfd", "syscon"; + reg = <0x57920000 0x1000>; + + adamv_rst: reset { + compatible = "socionext,uniphier-ld11-adamv-reset"; + #reset-cells = <1>; + }; + + other nodes ... + }; diff --git a/Documentation/devicetree/bindings/rng/imx-rngc.txt b/Documentation/devicetree/bindings/rng/imx-rngc.txt new file mode 100644 index 000000000000..93c7174a7bed --- /dev/null +++ b/Documentation/devicetree/bindings/rng/imx-rngc.txt @@ -0,0 +1,21 @@ +Freescale RNGC (Random Number Generator Version C) + +The driver also supports version B, which is mostly compatible +to version C. + +Required properties: +- compatible : should be one of + "fsl,imx25-rngb" + "fsl,imx35-rngc" +- reg : offset and length of the register set of this block +- interrupts : the interrupt number for the RNGC block +- clocks : the RNGC clk source + +Example: + +rng@53fb0000 { + compatible = "fsl,imx25-rngb"; + reg = <0x53fb0000 0x4000>; + interrupts = <22>; + clocks = <&trng_clk>; +}; diff --git a/Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt b/Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt new file mode 100644 index 000000000000..634312dd95ca --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt @@ -0,0 +1,17 @@ +Android Goldfish RTC + +Android Goldfish RTC device used by Android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-rtc" +- reg : +- interrupts : + +Example: + + goldfish_timer@9020000 { + compatible = "google,goldfish-rtc"; + reg = <0x9020000 0x1000>; + interrupts = <0x3>; + }; diff --git a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt index cf83e0940302..fbbdd92e5af9 100644 --- a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt +++ b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt @@ -24,7 +24,6 @@ Optional properties: - "wakeup-source": mark the chip as a wakeup source, independently of the availability of an IRQ line connected to the SoC. - (Legacy property supported: "isil,irq2-can-wakeup-machine") - "interrupt-parent", "interrupts": for passing the interrupt line of the SoC connected to IRQ#2 of the RTC chip. diff --git a/Documentation/devicetree/bindings/rtc/realtek,rtd119x.txt b/Documentation/devicetree/bindings/rtc/realtek,rtd119x.txt new file mode 100644 index 000000000000..bbf1ccb5df31 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/realtek,rtd119x.txt @@ -0,0 +1,16 @@ +Realtek RTD129x Real-Time Clock +=============================== + +Required properties: +- compatible : Should be "realtek,rtd1295-rtc" +- reg : Specifies the physical base address and size +- clocks : Specifies the clock gate + + +Example: + + rtc@9801b600 { + compatible = "realtek,rtd1295-clk"; + reg = <0x9801b600 0x100>; + clocks = <&clkc RTD1295_CLK_EN_MISC_RTC>; + }; diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt index 0a4c371a9b7a..a66692a08ace 100644 --- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt @@ -45,5 +45,4 @@ Example: interrupts = <17 1>; interrupt-names = "alarm"; st,syscfg = <&pwrcfg>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt index 945934918b71..d5e26d313f62 100644 --- a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt @@ -10,7 +10,7 @@ Required properties: Required properties for new device trees - clocks : phandle to the 32kHz external oscillator -- clock-output-names : name of the LOSC clock created +- clock-output-names : names of the LOSC and its external output clocks created - #clock-cells : must be equals to 1. The RTC provides two clocks: the LOSC and its external output, with index 0 and 1 respectively. @@ -21,7 +21,7 @@ rtc: rtc@01f00000 { compatible = "allwinner,sun6i-a31-rtc"; reg = <0x01f00000 0x54>; interrupts = <0 40 4>, <0 41 4>; - clock-output-names = "osc32k"; + clock-output-names = "osc32k", "osc32k-out"; clocks = <&ext_osc32k>; #clock-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt index 2a42a323fa1a..b6a869f97715 100644 --- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt +++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt @@ -85,5 +85,4 @@ Example: <366 1>,<367 1>/* cq30-31 */ <376 4>,/* fatal ecc */ <381 4>;/* fatal axi */ - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt index 3ad115efed1e..6a4e0d30d8c4 100644 --- a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt +++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt @@ -19,7 +19,6 @@ Example (for ARM-based BeagleBoard xM with ST33ZP24 on I2C2): &i2c2 { - status = "okay"; st33zp24: st33zp24@13 { diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt index 158b0165e01c..604dce901b60 100644 --- a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt +++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt @@ -18,7 +18,6 @@ Example (for ARM-based BeagleBoard xM with ST33ZP24 on SPI4): &mcspi4 { - status = "okay"; st33zp24@0 { diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt index 8cb638b7e89c..a65d7b71e81a 100644 --- a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt +++ b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt @@ -8,6 +8,12 @@ Required properties: the firmware event log - linux,sml-size : size of the memory allocated for the firmware event log +Optional properties: + +- powered-while-suspended: present when the TPM is left powered on between + suspend and resume (makes the suspend/resume + callbacks do nothing). + Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C) ---------------------------------------------------------- @@ -17,5 +23,4 @@ tpm@57 { compatible = "nuvoton,npct650", "nuvoton,npct601"; linux,sml-base = <0x7f 0xfd450000>; linux,sml-size = <0x10000>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt index 85741cd468cc..b800667da92b 100644 --- a/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt +++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt @@ -13,7 +13,6 @@ Example (for ARM-based BeagleBoard xM with TPM_TIS on SPI4): &mcspi4 { - status = "okay"; tpm_tis@0 { diff --git a/Documentation/devicetree/bindings/serial/arc-uart.txt b/Documentation/devicetree/bindings/serial/arc-uart.txt index 5cae2eb686f8..256cc150ca7e 100644 --- a/Documentation/devicetree/bindings/serial/arc-uart.txt +++ b/Documentation/devicetree/bindings/serial/arc-uart.txt @@ -15,7 +15,6 @@ arcuart0: serial@c0fc1000 { interrupts = <5>; clock-frequency = <80000000>; current-speed = <115200>; - status = "okay"; }; Note: Each port should have an alias correctly numbered in "aliases" node. diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt index 51b3c9e80ad9..048c3818c826 100644 --- a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt @@ -15,7 +15,6 @@ serial@b00260000 { compatible = "axis,etraxfs-uart"; reg = <0xb0026000 0x1000>; interrupts = <68>; - status = "disabled"; dtr-gpios = <&sysgpio 0 GPIO_ACTIVE_LOW>; dsr-gpios = <&sysgpio 1 GPIO_ACTIVE_LOW>; rng-gpios = <&sysgpio 2 GPIO_ACTIVE_LOW>; diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt index b6cf384597e1..f73abff3de43 100644 --- a/Documentation/devicetree/bindings/serial/mtk-uart.txt +++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt @@ -3,6 +3,7 @@ Required properties: - compatible should contain: * "mediatek,mt2701-uart" for MT2701 compatible UARTS + * "mediatek,mt2712-uart" for MT2712 compatible UARTS * "mediatek,mt6580-uart" for MT6580 compatible UARTS * "mediatek,mt6582-uart" for MT6582 compatible UARTS * "mediatek,mt6589-uart" for MT6589 compatible UARTS diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt index c93a2d1c1a65..d7edf732eb7f 100644 --- a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt +++ b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt @@ -33,5 +33,4 @@ serial@70006000 { reset-names = "serial"; dmas = <&apbdma 8>, <&apbdma 8>; dma-names = "rx", "tx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt index d5f73b8f614f..9d098cf73b53 100644 --- a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt +++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt @@ -72,13 +72,10 @@ Examples: }; uarta: serial@12490000 { - status = "ok"; }; uartb: serial@16340000 { - status = "ok"; }; uartc: serial@1a240000 { - status = "ok"; }; diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index 4fc96946f81d..cf504d0380ae 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt @@ -41,6 +41,8 @@ Required properties: - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. + - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. + - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART. - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART. diff --git a/Documentation/devicetree/bindings/serial/serial.txt b/Documentation/devicetree/bindings/serial/serial.txt index b542a0ecf06e..863c2893759e 100644 --- a/Documentation/devicetree/bindings/serial/serial.txt +++ b/Documentation/devicetree/bindings/serial/serial.txt @@ -43,7 +43,6 @@ Examples: rng-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; rts-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; - status = "okay"; }; scifa4: serial@e6c80000 { @@ -54,5 +53,4 @@ Examples: clock-names = "fck"; power-domains = <&pd_a3sp>; uart-has-rtscts; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt index 362a76925bcd..f311472990a7 100644 --- a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt +++ b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt @@ -19,5 +19,4 @@ Example: reg = <0x01c2a000 0x400>; interrupts = <0 62 4>; clocks = <&apb1_gates 6>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/serio/ps2-gpio.txt b/Documentation/devicetree/bindings/serio/ps2-gpio.txt new file mode 100644 index 000000000000..7b7bc9cdf986 --- /dev/null +++ b/Documentation/devicetree/bindings/serio/ps2-gpio.txt @@ -0,0 +1,23 @@ +Device-Tree binding for ps/2 gpio device + +Required properties: + - compatible = "ps2-gpio" + - data-gpios: the data pin + - clk-gpios: the clock pin + - interrupts: Should trigger on the falling edge of the clock line. + +Optional properties: + - write-enable: Indicates whether write function is provided + to serio device. Possibly providing the write fn will not work, because + of the tough timing requirements. + +Example nodes: + +ps2@0 { + compatible = "ps2-gpio"; + interrupt-parent = <&gpio>; + interrupts = <23 IRQ_TYPE_EDGE_FALLING>; + data-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>; + clk-gpios = <&gpio 23 GPIO_ACTIVE_HIGH>; + write-enable; +}; diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt index b1d165b4d4b3..40056f7990f8 100644 --- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt +++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt @@ -12,11 +12,13 @@ power/power_domain.txt. It provides the power domains defined in - include/dt-bindings/power/mt8173-power.h - include/dt-bindings/power/mt6797-power.h - include/dt-bindings/power/mt2701-power.h +- include/dt-bindings/power/mt7622-power.h Required properties: - compatible: Should be one of: - "mediatek,mt2701-scpsys" - "mediatek,mt6797-scpsys" + - "mediatek,mt7622-scpsys" - "mediatek,mt8173-scpsys" - #power-domain-cells: Must be 1 - reg: Address range of the SCPSYS unit @@ -26,6 +28,7 @@ Required properties: enabled before enabling certain power domains. Required clocks for MT2701: "mm", "mfg", "ethif" Required clocks for MT6797: "mm", "mfg", "vdec" + Required clocks for MT7622: "hif_sel" Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt" Optional properties: diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 50fc20c6ce91..b277eca861f7 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt @@ -1,11 +1,12 @@ -Qualcomm RPM GLINK binding +Qualcomm GLINK edge binding -This binding describes the Qualcomm RPM GLINK, a fifo based mechanism for -communication with the Resource Power Management system on various Qualcomm -platforms. +This binding describes a Qualcomm GLINK edge, a fifo based mechanism for +communication between subsystem-pairs on various Qualcomm platforms. Two types +of edges can be described by the binding; the GLINK RPM edge and a SMEM based +edge. - compatible: - Usage: required + Usage: required for glink-rpm Value type: Definition: must be "qcom,glink-rpm" @@ -16,7 +17,7 @@ platforms. signal this processor about communication related events - qcom,rpm-msg-ram: - Usage: required + Usage: required for glink-rpm Value type: Definition: handle to RPM message memory resource diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt index 2f5ede39bea2..fe1855f09dcc 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt @@ -78,7 +78,6 @@ Example for APQ8064: interrupts = <0 152 0x0>; clocks = <&gcc GSBI4_UART_CLK>, <&gcc GSBI4_H_CLK>; clock-names = "core", "iface"; - status = "ok"; }; }; diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.txt b/Documentation/devicetree/bindings/soc/rockchip/grf.txt index cc9f05d3cbc1..7dc5ce858a0e 100644 --- a/Documentation/devicetree/bindings/soc/rockchip/grf.txt +++ b/Documentation/devicetree/bindings/soc/rockchip/grf.txt @@ -21,6 +21,7 @@ Required Properties: - "rockchip,rk3328-grf", "syscon": for rk3328 - "rockchip,rk3368-grf", "syscon": for rk3368 - "rockchip,rk3399-grf", "syscon": for rk3399 + - "rockchip,rv1108-grf", "syscon": for rv1108 - compatible: PMUGRF should be one of the following: - "rockchip,rk3368-pmugrf", "syscon": for rk3368 - "rockchip,rk3399-pmugrf", "syscon": for rk3399 @@ -28,6 +29,8 @@ Required Properties: - "rockchip,rk3288-sgrf", "syscon": for rk3288 - compatible: USB2PHYGRF should be one of the followings - "rockchip,rk3328-usb2phy-grf", "syscon": for rk3328 +- compatible: USBGRF should be one of the following + - "rockchip,rv1108-usbgrf", "syscon": for rv1108 - reg: physical base address of the controller and length of memory mapped region. diff --git a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt b/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt index 01bfb6745fbd..301d2a9bc1b8 100644 --- a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt +++ b/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt @@ -7,6 +7,7 @@ Required properties for power domain controller: - compatible: Should be one of the following. "rockchip,rk3288-power-controller" - for RK3288 SoCs. "rockchip,rk3328-power-controller" - for RK3328 SoCs. + "rockchip,rk3366-power-controller" - for RK3366 SoCs. "rockchip,rk3368-power-controller" - for RK3368 SoCs. "rockchip,rk3399-power-controller" - for RK3399 SoCs. - #power-domain-cells: Number of cells in a power-domain specifier. @@ -18,6 +19,7 @@ Required properties for power domain sub nodes: - reg: index of the power domain, should use macros in: "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain. "include/dt-bindings/power/rk3328-power.h" - for RK3328 type power domain. + "include/dt-bindings/power/rk3366-power.h" - for RK3366 type power domain. "include/dt-bindings/power/rk3368-power.h" - for RK3368 type power domain. "include/dt-bindings/power/rk3399-power.h" - for RK3399 type power domain. - clocks (optional): phandles to clocks which need to be enabled while power domain @@ -93,6 +95,7 @@ power domain to use. The index should use macros in: "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain. "include/dt-bindings/power/rk3328-power.h" - for rk3328 type power domain. + "include/dt-bindings/power/rk3366-power.h" - for rk3366 type power domain. "include/dt-bindings/power/rk3368-power.h" - for rk3368 type power domain. "include/dt-bindings/power/rk3399-power.h" - for rk3399 type power domain. diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt index c705db07d820..66e6265fb0aa 100644 --- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt +++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt @@ -46,12 +46,13 @@ Required Properties: - power-domains: phandle pointing to the corresponding PM domain node and an ID representing the device. -See dt-bindings/genpd/k2g.h for the list of valid identifiers for k2g. +See http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data for the list +of valid identifiers for k2g. Example (K2G): -------------------- uart0: serial@02530c00 { compatible = "ns16550a"; ... - power-domains = <&k2g_pds K2G_DEV_UART0>; + power-domains = <&k2g_pds 0x002c>; }; diff --git a/Documentation/devicetree/bindings/sound/armada-370db-audio.txt b/Documentation/devicetree/bindings/sound/armada-370db-audio.txt index bf984d238620..953c092db72f 100644 --- a/Documentation/devicetree/bindings/sound/armada-370db-audio.txt +++ b/Documentation/devicetree/bindings/sound/armada-370db-audio.txt @@ -23,5 +23,4 @@ Example: compatible = "marvell,a370db-audio"; marvell,audio-controller = <&audio_controller>; marvell,audio-codec = <&audio_codec &spdif_out &spdif_in>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/atmel-classd.txt b/Documentation/devicetree/bindings/sound/atmel-classd.txt index 549e701cb7a1..898551076382 100644 --- a/Documentation/devicetree/bindings/sound/atmel-classd.txt +++ b/Documentation/devicetree/bindings/sound/atmel-classd.txt @@ -13,13 +13,11 @@ Required properties: Must be "tx". - clock-names Tuple listing input clock names. - Required elements: "pclk", "gclk" and "aclk". + Required elements: "pclk" and "gclk". - clocks Please refer to clock-bindings.txt. - assigned-clocks Should be <&classd_gclk>. -- assigned-clock-parents - Should be <&audio_pll_pmc>. Optional properties: - pinctrl-names, pinctrl-0 @@ -45,10 +43,9 @@ classd: classd@fc048000 { (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(47))>; dma-names = "tx"; - clocks = <&classd_clk>, <&classd_gclk>, <&audio_pll_pmc>; - clock-names = "pclk", "gclk", "aclk"; + clocks = <&classd_clk>, <&classd_gclk>; + clock-names = "pclk", "gclk"; assigned-clocks = <&classd_gclk>; - assigned-clock-parents = <&audio_pll_pmc>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_classd_default>; diff --git a/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt index fdb25b492514..9d049d4bfd58 100644 --- a/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt +++ b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt @@ -45,7 +45,6 @@ Example: &ssc0 { #sound-dai-cells = <0>; - status = "okay"; }; &i2c { diff --git a/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt b/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt index b139e66d2a11..630bf7c0344d 100644 --- a/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt +++ b/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt @@ -47,21 +47,17 @@ Example: ssp0: ssp_port@0 { reg = <0>; - status = "okay"; }; ssp1: ssp_port@1 { reg = <1>; - status = "disabled"; }; ssp2: ssp_port@2 { reg = <2>; - status = "disabled"; }; spdif: spdif_port@3 { reg = <3>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/sound/cs43130.txt b/Documentation/devicetree/bindings/sound/cs43130.txt new file mode 100644 index 000000000000..8b1dd5aeb004 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cs43130.txt @@ -0,0 +1,67 @@ +CS43130 DAC + +Required properties: + + - compatible : "cirrus,cs43130", "cirrus,cs4399", "cirrus,cs43131", + "cirrus,cs43198" + + - reg : the I2C address of the device for I2C + + - VA-supply, VP-supply, VL-supply, VCP-supply, VD-supply: + power supplies for the device, as covered in + Documentation/devicetree/bindings/regulator/regulator.txt. + + +Optional properties: + + - reset-gpios : Active low GPIO used to reset the device + + - cirrus,xtal-ibias: + When external MCLK is generated by external crystal + oscillator, CS43130 can be used to provide bias current + for external crystal. Amount of bias current sent is + set as: + 1 = 7.5uA + 2 = 12.5uA + 3 = 15uA + + - cirrus,dc-measure: + Boolean, define to enable headphone DC impedance measurement. + + - cirrus,ac-measure: + Boolean, define to enable headphone AC impedance measurement. + DC impedance must also be enabled for AC impedance measurement. + + - cirrus,dc-threshold: + Define 2 DC impedance thresholds in ohms for HP output control. + Default values are 50 and 120 Ohms. + + - cirrus,ac-freq: + Define the frequencies at which to measure HP AC impedance. + Only used if "cirrus,dc-measure" is defined. + Exactly 10 frequencies must be defined. + If this properties is undefined, by default, + following frequencies are used: + <24 43 93 200 431 928 2000 4309 9283 20000> + The above frequencies are logarithmically equally spaced. + Log base is 10. + +Example: + +cs43130: audio-codec@30 { + compatible = "cirrus,cs43130"; + reg = <0x30>; + reset-gpios = <&axi_gpio 54 0>; + VA-supply = <&dummy_vreg>; + VP-supply = <&dummy_vreg>; + VL-supply = <&dummy_vreg>; + VCP-supply = <&dummy_vreg>; + VD-supply = <&dummy_vreg>; + cirrus,xtal-ibias = <2>; + interrupt-parent = <&gpio0>; + interrupts = <55 8>; + cirrus,dc-measure; + cirrus,ac-measure; + cirrus,dc-threshold = /bits/ 16 <20 100>; + cirrus,ac-freq = /bits/ 16 <24 43 93 200 431 928 2000 4309 9283 20000>; +}; diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt index e0b6165c9cfc..3ffc2562fb31 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt +++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt @@ -47,5 +47,4 @@ mcbsp0: mcbsp@1d10000 { dmas = <&edma0 3 1 &edma0 2 1>; dma-names = "tx", "rx"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt new file mode 100644 index 000000000000..54c8ef6498a8 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/dmic.txt @@ -0,0 +1,16 @@ +Device-Tree bindings for Digital microphone (DMIC) codec + +This device support generic PDM digital microphone. + +Required properties: + - compatible: should be "dmic-codec". + +Optional properties: + - dmicen-gpios: GPIO specifier for dmic to control start and stop + +Example node: + + dmic_codec: dmic@0 { + compatible = "dmic-codec"; + dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/sound/fsl,asrc.txt b/Documentation/devicetree/bindings/sound/fsl,asrc.txt index 3e26a9478e57..65979b205893 100644 --- a/Documentation/devicetree/bindings/sound/fsl,asrc.txt +++ b/Documentation/devicetree/bindings/sound/fsl,asrc.txt @@ -61,5 +61,4 @@ asrc: asrc@02034000 { "txa", "txb", "txc"; fsl,asrc-rate = <48000>; fsl,asrc-width = <16>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt index cd3ee5d84f03..21c401e2ccda 100644 --- a/Documentation/devicetree/bindings/sound/fsl,esai.txt +++ b/Documentation/devicetree/bindings/sound/fsl,esai.txt @@ -59,5 +59,4 @@ esai: esai@02024000 { fsl,fifo-depth = <128>; fsl,esai-synchronous; big-endian; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.txt b/Documentation/devicetree/bindings/sound/fsl,spdif.txt index 4ca39ddc0417..0f97e54c3d43 100644 --- a/Documentation/devicetree/bindings/sound/fsl,spdif.txt +++ b/Documentation/devicetree/bindings/sound/fsl,spdif.txt @@ -59,5 +59,4 @@ spdif: spdif@02004000 { "rxtx7"; big-endian; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/hdmi.txt b/Documentation/devicetree/bindings/sound/hdmi.txt index 31af7bca3099..56407c30e954 100644 --- a/Documentation/devicetree/bindings/sound/hdmi.txt +++ b/Documentation/devicetree/bindings/sound/hdmi.txt @@ -13,5 +13,4 @@ Example node: hdmi_audio: hdmi_audio@0 { compatible = "linux,hdmi-audio"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt index 9800a560e0c2..77a57f84bed4 100644 --- a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt +++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt @@ -3,7 +3,8 @@ Mediatek AFE PCM controller for mt2701 Required properties: - compatible = "mediatek,mt2701-audio"; - reg: register location and size -- interrupts: Should contain AFE interrupt +- interrupts: should contain AFE and ASYS interrupts +- interrupt-names: should be "afe" and "asys" - power-domains: should define the power domain - clock-names: should have these clock names: "infra_sys_audio_clk", @@ -59,6 +60,7 @@ Example: <0 0x112A0000 0 0x20000>; interrupts = , ; + interrupt-names = "afe", "asys"; power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>; clocks = <&infracfg CLK_INFRA_AUDIO>, <&topckgen CLK_TOP_AUD_MUX1_SEL>, diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt index ccb401cfef9d..551ecab67efe 100644 --- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt +++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt @@ -31,8 +31,22 @@ Required properties - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node. - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node. - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node. - Optional Properties: + - qcom,mbhc-vthreshold-low: Array of 5 threshold voltages in mV for 5 buttons + detection on headset when the mbhc is powered up + by internal current source, this is a low power. + - qcom,mbhc-vthreshold-high: Array of 5 thresold voltages in mV for 5 buttons + detection on headset when mbhc is powered up + from micbias. +- qcom,micbias-lvl: Voltage (mV) for Mic Bias +- qcom,hphl-jack-type-normally-open: boolean, present if hphl pin on jack is a + NO (Normally Open). If not specified, then + its assumed that hphl pin on jack is NC + (Normally Closed). +- qcom,gnd-jack-type-normally-open: boolean, present if gnd pin on jack is + NO (Normally Open). If not specified, then + its assumed that gnd pin on jack is NC + (Normally Closed). - qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor connected. - qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor @@ -48,6 +62,8 @@ spmi_bus { reg-names = "pmic-codec-core"; clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; clock-names = "mclk"; + qcom,mbhc-vthreshold-low = <75 150 237 450 500>; + qcom,mbhc-vthreshold-high = <75 150 237 450 500>; interrupt-parent = <&spmi_bus>; interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>, <0x1 0xf0 0x1 IRQ_TYPE_NONE>, diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt index 7246bb268bf9..085bec364caf 100644 --- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt +++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt @@ -199,10 +199,10 @@ Ex) sound { compatible = "simple-scu-audio-card"; ... - simple-audio-card,cpu@0 { + simple-audio-card,cpu-0 { sound-dai = <&rcar_sound 0>; }; - simple-audio-card,cpu@1 { + simple-audio-card,cpu-1 { sound-dai = <&rcar_sound 1>; }; simple-audio-card,codec { @@ -441,79 +441,79 @@ rcar_sound: sound@ec500000 { "clk_a", "clk_b", "clk_c", "clk_i"; rcar_sound,dvc { - dvc0: dvc@0 { + dvc0: dvc-0 { dmas = <&audma0 0xbc>; dma-names = "tx"; }; - dvc1: dvc@1 { + dvc1: dvc-1 { dmas = <&audma0 0xbe>; dma-names = "tx"; }; }; rcar_sound,mix { - mix0: mix@0 { }; - mix1: mix@1 { }; + mix0: mix-0 { }; + mix1: mix-1 { }; }; rcar_sound,ctu { - ctu00: ctu@0 { }; - ctu01: ctu@1 { }; - ctu02: ctu@2 { }; - ctu03: ctu@3 { }; - ctu10: ctu@4 { }; - ctu11: ctu@5 { }; - ctu12: ctu@6 { }; - ctu13: ctu@7 { }; + ctu00: ctu-0 { }; + ctu01: ctu-1 { }; + ctu02: ctu-2 { }; + ctu03: ctu-3 { }; + ctu10: ctu-4 { }; + ctu11: ctu-5 { }; + ctu12: ctu-6 { }; + ctu13: ctu-7 { }; }; rcar_sound,src { - src0: src@0 { + src0: src-0 { interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x85>, <&audma1 0x9a>; dma-names = "rx", "tx"; }; - src1: src@1 { + src1: src-1 { interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x87>, <&audma1 0x9c>; dma-names = "rx", "tx"; }; - src2: src@2 { + src2: src-2 { interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x89>, <&audma1 0x9e>; dma-names = "rx", "tx"; }; - src3: src@3 { + src3: src-3 { interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x8b>, <&audma1 0xa0>; dma-names = "rx", "tx"; }; - src4: src@4 { + src4: src-4 { interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x8d>, <&audma1 0xb0>; dma-names = "rx", "tx"; }; - src5: src@5 { + src5: src-5 { interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x8f>, <&audma1 0xb2>; dma-names = "rx", "tx"; }; - src6: src@6 { + src6: src-6 { interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x91>, <&audma1 0xb4>; dma-names = "rx", "tx"; }; - src7: src@7 { + src7: src-7 { interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x93>, <&audma1 0xb6>; dma-names = "rx", "tx"; }; - src8: src@8 { + src8: src-8 { interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x95>, <&audma1 0xb8>; dma-names = "rx", "tx"; }; - src9: src@9 { + src9: src-9 { interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x97>, <&audma1 0xba>; dma-names = "rx", "tx"; @@ -521,52 +521,52 @@ rcar_sound: sound@ec500000 { }; rcar_sound,ssi { - ssi0: ssi@0 { + ssi0: ssi-0 { interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi1: ssi@1 { + ssi1: ssi-1 { interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi2: ssi@2 { + ssi2: ssi-2 { interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi3: ssi@3 { + ssi3: ssi-3 { interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi4: ssi@4 { + ssi4: ssi-4 { interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi5: ssi@5 { + ssi5: ssi-5 { interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi6: ssi@6 { + ssi6: ssi-6 { interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi7: ssi@7 { + ssi7: ssi-7 { interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi8: ssi@8 { + ssi8: ssi-8 { interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>; dma-names = "rx", "tx", "rxu", "txu"; }; - ssi9: ssi@9 { + ssi9: ssi-9 { interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>; dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>; dma-names = "rx", "tx", "rxu", "txu"; @@ -621,7 +621,6 @@ Example: simple sound card /* Single DAI */ #sound-dai-cells = <0>; - status = "okay"; rcar_sound,dai { dai0 { @@ -667,7 +666,6 @@ Example: simple sound card for Multi channel /* Single DAI */ #sound-dai-cells = <0>; - status = "okay"; rcar_sound,dai { dai0 { diff --git a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt index 921729de7346..47f164fbd1d7 100644 --- a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt +++ b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt @@ -29,11 +29,13 @@ pdm: pdm@ff040000 { dma-names = "rx"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&pdmm0_clk - &pdmm0_fsync &pdmm0_sdi0 &pdmm0_sdi1 &pdmm0_sdi2 &pdmm0_sdi3>; - pinctrl-1 = <&pdmm0_sleep>; - status = "disabled"; + pinctrl-1 = <&pdmm0_clk_sleep + &pdmm0_sdi0_sleep + &pdmm0_sdi1_sleep + &pdmm0_sdi2_sleep + &pdmm0_sdi3_sleep>; }; diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt index eac91db07178..72d3cf4c2606 100644 --- a/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt +++ b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt @@ -4,7 +4,7 @@ Required properties: - compatible: "rockchip,rk3399-gru-sound" - rockchip,cpu: The phandle of the Rockchip I2S controller that's connected to the codecs -- rockchip,codec: The phandle of the MAX98357A/RT5514/DA7219 codecs +- rockchip,codec: The phandle of the audio codecs Optional properties: - dmic-wakeup-delay-ms : specify delay time (ms) for DMIC ready. diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt index 206aba1b34bb..b208a752576c 100644 --- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt +++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt @@ -7,8 +7,12 @@ Required properties: - compatible: should be one of the following: - "rockchip,rk3066-i2s": for rk3066 + - "rockchip,rk3036-i2s", "rockchip,rk3066-i2s": for rk3036 - "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188 + - "rockchip,rk3228-i2s", "rockchip,rk3066-i2s": for rk3228 - "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288 + - "rockchip,rk3328-i2s", "rockchip,rk3066-i2s": for rk3328 + - "rockchip,rk3366-i2s", "rockchip,rk3066-i2s": for rk3366 - "rockchip,rk3368-i2s", "rockchip,rk3066-i2s": for rk3368 - "rockchip,rk3399-i2s", "rockchip,rk3066-i2s": for rk3399 - reg: physical base address of the controller and length of memory mapped diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt index 4706b96d450b..0a1dc4e1815c 100644 --- a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt +++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt @@ -41,6 +41,5 @@ spdif: spdif@0x1011e000 { dma-names = "tx"; clock-names = "hclk", "mclk"; clocks = <&cru HCLK_SPDIF>, <&cru SCLK_SPDIF>; - status = "disabled"; #sound-dai-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/sound/rt274.txt b/Documentation/devicetree/bindings/sound/rt274.txt new file mode 100644 index 000000000000..e9a6178c78cf --- /dev/null +++ b/Documentation/devicetree/bindings/sound/rt274.txt @@ -0,0 +1,33 @@ +RT274 audio CODEC + +This device supports I2C only. + +Required properties: + +- compatible : "realtek,rt274". + +- reg : The I2C address of the device. + +Optional properties: + +- interrupts : The CODEC's interrupt output. + + +Pins on the device (for linking into audio routes) for RT274: + + * DMIC1 Pin + * DMIC2 Pin + * MIC + * LINE1 + * LINE2 + * HPO Pin + * SPDIF + * LINE3 + +Example: + +codec: rt274@1c { + compatible = "realtek,rt274"; + reg = <0x1c>; + interrupts = <7 IRQ_TYPE_EDGE_FALLING>; +}; diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt index 70eaeaed2b18..ff381718c517 100644 --- a/Documentation/devicetree/bindings/sound/rt5663.txt +++ b/Documentation/devicetree/bindings/sound/rt5663.txt @@ -12,6 +12,14 @@ Required properties: Optional properties: +- "realtek,dc_offset_l_manual" +- "realtek,dc_offset_r_manual" +- "realtek,dc_offset_l_manual_mic" +- "realtek,dc_offset_r_manual_mic" + Based on the different PCB layout, add the manual offset value to + compensate the DC offset for each L and R channel, and they are different + between headphone and headset. + Pins on the device (for linking into audio routes) for RT5663: * IN1P diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt index c30934dd975b..625b1b18fd02 100644 --- a/Documentation/devicetree/bindings/sound/samsung,odroid.txt +++ b/Documentation/devicetree/bindings/sound/samsung,odroid.txt @@ -7,9 +7,6 @@ Required properties: - model - the user-visible name of this sound complex - clocks - should contain entries matching clock names in the clock-names property - - clock-names - should contain following entries: - - "epll" - indicating the EPLL output clock - - "i2s_rclk" - indicating the RCLK (root) clock of the I2S0 controller - samsung,audio-widgets - this property specifies off-codec audio elements like headphones or speakers, for details see widgets.txt - samsung,audio-routing - a list of the connections between audio @@ -46,9 +43,6 @@ sound { "IN1", "Mic Jack", "Mic Jack", "MICBIAS"; - clocks = <&clock CLK_FOUT_EPLL>, <&i2s0 CLK_I2S_RCLK_SRC>; - clock-names = "epll", "sclk_i2s"; - cpu { sound-dai = <&i2s0 0>; }; diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt index c7a93931fad2..166f2290233b 100644 --- a/Documentation/devicetree/bindings/sound/simple-card.txt +++ b/Documentation/devicetree/bindings/sound/simple-card.txt @@ -86,6 +86,9 @@ Optional CPU/CODEC subnodes properties: in dai startup() and disabled with clk_disable_unprepare() in dai shutdown(). +- system-clock-direction-out : specifies clock direction as 'out' on + initialization. It is useful for some aCPUs with + fixed clocks. Example 1 - single DAI link: diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt index 327d229a51b2..32f8dbce5241 100644 --- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt +++ b/Documentation/devicetree/bindings/sound/simple-scu-card.txt @@ -24,6 +24,7 @@ Optional subnode properties: - simple-audio-card,convert-rate : platform specified sampling rate convert - simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch) - simple-audio-card,prefix : see routing +- simple-audio-card,widgets : Please refer to widgets.txt. - simple-audio-card,routing : A list of the connections between audio components. Each entry is a pair of strings, the first being the connection's sink, the second being the connection's source. Valid names for sources. diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt index 745dc62f76ea..40068ec0e9a5 100644 --- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt +++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt @@ -53,7 +53,6 @@ Example: sti_uni_player1: sti-uni-player@0x8D81000 { compatible = "st,stih407-uni-player-hdmi"; - status = "okay"; #sound-dai-cells = <0>; st,syscfg = <&syscfg_core>; clocks = <&clk_s_d0_flexgen CLK_PCM_1>; @@ -66,7 +65,6 @@ Example: sti_uni_player2: sti-uni-player@0x8D82000 { compatible = "st,stih407-uni-player-pcm-out"; - status = "okay"; #sound-dai-cells = <0>; st,syscfg = <&syscfg_core>; clocks = <&clk_s_d0_flexgen CLK_PCM_2>; @@ -78,7 +76,6 @@ Example: sti_uni_player3: sti-uni-player@0x8D85000 { compatible = "st,stih407-uni-player-spdif"; - status = "okay"; #sound-dai-cells = <0>; st,syscfg = <&syscfg_core>; clocks = <&clk_s_d0_flexgen CLK_SPDIFF>; @@ -90,7 +87,6 @@ Example: sti_uni_reader1: sti-uni-reader@0x8D84000 { compatible = "st,stih407-uni-reader-hdmi"; - status = "disabled"; #sound-dai-cells = <0>; st,syscfg = <&syscfg_core>; reg = <0x8D84000 0x158>; @@ -125,7 +121,6 @@ Example of audio card declaration: sound { compatible = "simple-audio-card"; simple-audio-card,name = "sti audio card"; - status = "okay"; simple-audio-card,dai-link@0 { /* DAC */ diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt index ee21da865771..fc5da6080759 100644 --- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt +++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt @@ -8,6 +8,7 @@ Required properties: - compatible: should be one of the following: - "allwinner,sun4i-a10-i2s" - "allwinner,sun6i-a31-i2s" + - "allwinner,sun8i-h3-i2s" - reg: physical base address of the controller and length of memory mapped region. - interrupts: should contain the I2S interrupt. @@ -22,6 +23,7 @@ Required properties: Required properties for the following compatibles: - "allwinner,sun6i-a31-i2s" + - "allwinner,sun8i-h3-i2s" - resets: phandle to the reset line for this codec Example: diff --git a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt index fe0a65e6d629..70ee177901d3 100644 --- a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt +++ b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt @@ -39,5 +39,4 @@ spdif: spdif@01c21000 { clock-names = "apb", "spdif"; dmas = <&dma 0 2>, <&dma 0 2>; dma-names = "rx", "tx"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt index 806ea7381483..40d94f82beb3 100644 --- a/Documentation/devicetree/bindings/sound/tas5720.txt +++ b/Documentation/devicetree/bindings/sound/tas5720.txt @@ -17,7 +17,6 @@ Required properties: Example: tas5720: tas5720@6c { - status = "okay"; compatible = "ti,tas5720"; reg = <0x6c>; dvdd-supply = <&vdd_3v3_reg>; diff --git a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt index 5e2741af27be..ca75890f0d07 100644 --- a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt +++ b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt @@ -3,7 +3,9 @@ Texas Instruments - tlv320aic32x4 Codec module The tlv320aic32x4 serial control bus communicates through I2C protocols Required properties: - - compatible: Should be "ti,tlv320aic32x4" + - compatible - "string" - One of: + "ti,tlv320aic32x4" TLV320AIC3204 + "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256 - reg: I2C slave address - supply-*: Required supply regulators are: "iov" - digital IO power supply @@ -18,6 +20,8 @@ Optional properties: - reset-gpios: Reset-GPIO phandle with args as described in gpio/gpio.txt - clocks/clock-names: Clock named 'mclk' for the master clock of the codec. See clock/clock-bindings.txt for information about the detailed format. + - aic32x4-gpio-func - + - Types are defined in include/sound/tlv320aic32x4.h Example: @@ -27,4 +31,11 @@ codec: tlv320aic32x4@18 { reg = <0x18>; clocks = <&clks 201>; clock-names = "mclk"; + aic32x4-gpio-func= < + 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */ + 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */ + 0x04 /* MFP3 AIC32X4_MFP3_GPIO_ENABLED */ + 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */ + 0x08 /* MFP5 AIC32X4_MFP5_GPIO_INPUT */ + >; }; diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt index 47a213c411ce..ba5b45c483f5 100644 --- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt +++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt @@ -26,6 +26,11 @@ Optional properties: 3 - MICBIAS output is connected to AVDD, If this node is not mentioned or if the value is incorrect, then MicBias is powered down. +- ai3x-ocmv - Output Common-Mode Voltage selection: + 0 - 1.35V, + 1 - 1.5V, + 2 - 1.65V, + 3 - 1.8V - AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the device as covered in Documentation/devicetree/bindings/regulator/regulator.txt diff --git a/Documentation/devicetree/bindings/sound/wm8524.txt b/Documentation/devicetree/bindings/sound/wm8524.txt new file mode 100644 index 000000000000..20c62002cbcd --- /dev/null +++ b/Documentation/devicetree/bindings/sound/wm8524.txt @@ -0,0 +1,16 @@ +WM8524 audio CODEC + +This device does not use I2C or SPI but a simple Hardware Control Interface. + +Required properties: + + - compatible : "wlf,wm8524" + + - wlf,mute-gpios: a GPIO spec for the MUTE pin. + +Example: + +codec: wm8524@0 { + compatible = "wlf,wm8524"; + wlf,mute-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; +}; diff --git a/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt b/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt index 292ad5083704..3927251464f0 100644 --- a/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt +++ b/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt @@ -27,7 +27,6 @@ Example: interrupts = ; dmas = <&dma 5>, <&dma 6>; dma-names = "tx", "rx"; - status = "okay"; }; sound { diff --git a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt b/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt index 989544ea6eb5..b5a5ca4502f9 100644 --- a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt +++ b/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt @@ -24,5 +24,4 @@ Example: interrupts = ; dmas = <&dma 4>; dma-names = "tx"; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt index 750e29aff9bc..2c1e6a43930b 100644 --- a/Documentation/devicetree/bindings/spi/efm32-spi.txt +++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt @@ -28,7 +28,6 @@ spi1: spi@0x4000c400 { /* USART1 */ clocks = <&cmu 20>; cs-gpios = <&gpio 51 1>; // D3 energymicro,location = <1>; - status = "ok"; ks8851@0 { compatible = "ks8851"; @@ -36,6 +35,5 @@ spi1: spi@0x4000c400 { /* USART1 */ reg = <0>; interrupt-parent = <&boardfpga>; interrupts = <4>; - status = "ok"; }; }; diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt index b785976fe98a..9ba7c5a273b4 100644 --- a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt +++ b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt @@ -38,5 +38,4 @@ spi@7000d600 { reset-names = "spi"; dmas = <&apbdma 16>, <&apbdma 16>; dma-names = "rx", "tx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt index bdf08e6dec9b..c212491929b5 100644 --- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt +++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt @@ -34,5 +34,4 @@ spi@7000c380 { reset-names = "spi"; dmas = <&apbdma 11>, <&apbdma 11>; dma-names = "rx", "tx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt index 5db9144a33c8..40d80b93e327 100644 --- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt +++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt @@ -34,5 +34,4 @@ spi@7000d600 { reset-names = "spi"; dmas = <&apbdma 16>, <&apbdma 16>; dma-names = "rx", "tx"; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/sh-hspi.txt b/Documentation/devicetree/bindings/spi/sh-hspi.txt index 319bad4af875..585fed90376e 100644 --- a/Documentation/devicetree/bindings/spi/sh-hspi.txt +++ b/Documentation/devicetree/bindings/spi/sh-hspi.txt @@ -24,6 +24,5 @@ Example: interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index 39e5ef7c5e71..e865855726a2 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -79,5 +79,4 @@ Example: dma-names = "tx", "rx"; #address-cells = <1>; #size-cells = <0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/spi-clps711x.txt b/Documentation/devicetree/bindings/spi/spi-clps711x.txt index 4c3ec13f423f..5122dc7860af 100644 --- a/Documentation/devicetree/bindings/spi/spi-clps711x.txt +++ b/Documentation/devicetree/bindings/spi/spi-clps711x.txt @@ -23,7 +23,6 @@ spi@80000500 { reg = <0x80000500 0x4>; interrupts = <15>; clocks = <&clks CLPS711X_CLK_SPI>; - status = "disabled"; }; syscon3: syscon@80002200 { diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt index ff5893d275a2..13b1fcc8469e 100644 --- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt +++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt @@ -39,7 +39,6 @@ dspi0@4002c000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_dspi0_1>; big-endian; - status = "okay"; sflash: at26df081a@0 { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt index e0318cf92d73..236dcb0faf37 100644 --- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt +++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt @@ -57,5 +57,4 @@ spi: spi@1100a000 { clock-names = "parent-clk", "sel-clk", "spi-clk"; cs-gpios = <&pio 105 GPIO_ACTIVE_LOW>, <&pio 72 GPIO_ACTIVE_LOW>; mediatek,pad-select = <1>, <0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/spi/spi-orion.txt b/Documentation/devicetree/bindings/spi/spi-orion.txt index 4f629cc7634a..df8ec31f2f07 100644 --- a/Documentation/devicetree/bindings/spi/spi-orion.txt +++ b/Documentation/devicetree/bindings/spi/spi-orion.txt @@ -29,7 +29,6 @@ Example: cell-index = <0>; reg = <0x10600 0x28>; interrupts = <23>; - status = "disabled"; }; Example with SPI direct mode support (optionally): @@ -48,7 +47,6 @@ Example with SPI direct mode support (optionally): , /* CS6 */ ; /* CS7 */ interrupts = <23>; - status = "disabled"; }; To enable the direct mode, the board specific 'ranges' property in the diff --git a/Documentation/devicetree/bindings/spi/spi-sun4i.txt b/Documentation/devicetree/bindings/spi/spi-sun4i.txt index de827f5a301e..484bbff5337e 100644 --- a/Documentation/devicetree/bindings/spi/spi-sun4i.txt +++ b/Documentation/devicetree/bindings/spi/spi-sun4i.txt @@ -18,7 +18,6 @@ spi1: spi@01c06000 { interrupts = <11>; clocks = <&ahb_gates 21>, <&spi1_clk>; clock-names = "ahb", "mod"; - status = "disabled"; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/spi/spi-sun6i.txt b/Documentation/devicetree/bindings/spi/spi-sun6i.txt index 2ec99b86b622..ab1811354cce 100644 --- a/Documentation/devicetree/bindings/spi/spi-sun6i.txt +++ b/Documentation/devicetree/bindings/spi/spi-sun6i.txt @@ -39,7 +39,6 @@ spi0: spi@01c68000 { pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>; resets = <&ccu RST_BUS_SPI0>; - status = "disabled"; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt index fb588b3e6a9a..f99c733d75c1 100644 --- a/Documentation/devicetree/bindings/spi/spi_atmel.txt +++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt @@ -26,7 +26,6 @@ spi1: spi@fffcc000 { clock-names = "spi_clk"; cs-gpios = <&pioB 3 0>; atmel,fifo-size = <32>; - status = "okay"; mmc-slot@0 { compatible = "mmc-spi-slot"; diff --git a/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt b/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt new file mode 100644 index 000000000000..712d05e3e15e --- /dev/null +++ b/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt @@ -0,0 +1,27 @@ +* Renesas SMP SRAM + +Renesas R-Car Gen2 and RZ/G1 SoCs need a small piece of SRAM for the jump stub +for secondary CPU bringup and CPU hotplug. +This memory is reserved by adding a child node to a "mmio-sram" node, cfr. +Documentation/devicetree/bindings/sram/sram.txt. + +Required child node properties: + - compatible: Must be "renesas,smp-sram", + - reg: Address and length of the reserved SRAM. + The full physical (bus) address must be aligned to a 256 KiB boundary. + + +Example: + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; diff --git a/Documentation/devicetree/bindings/sram/sunxi-sram.txt b/Documentation/devicetree/bindings/sram/sunxi-sram.txt index 8d5665468fe7..6bb92a1df753 100644 --- a/Documentation/devicetree/bindings/sram/sunxi-sram.txt +++ b/Documentation/devicetree/bindings/sram/sunxi-sram.txt @@ -9,7 +9,9 @@ Controller Node --------------- Required properties: -- compatible : "allwinner,sun4i-a10-sram-controller" +- compatible : should be: + - "allwinner,sun4i-a10-sram-controller" + - "allwinner,sun50i-a64-sram-controller" - reg : sram controller register offset + length SRAM nodes @@ -22,10 +24,13 @@ Each SRAM will have SRAM sections that are going to be handled by the SRAM controller as subnodes. These sections are represented following once again the representation described in the mmio-sram binding. -The valid sections compatible are: +The valid sections compatible for A10 are: - allwinner,sun4i-a10-sram-a3-a4 - allwinner,sun4i-a10-sram-d +The valid sections compatible for A64 are: + - allwinner,sun50i-a64-sram-c + Devices using SRAM sections --------------------------- @@ -59,7 +64,6 @@ sram-controller@01c00000 { emac_sram: sram-section@8000 { compatible = "allwinner,sun4i-a10-sram-a3-a4"; reg = <0x8000 0x4000>; - status = "disabled"; }; }; }; diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt index 4698e0edc205..24aacf8948c5 100644 --- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt @@ -20,5 +20,4 @@ Example: compatible = "marvell,armada370-thermal"; reg = <0xd0018300 0x4 0xd0018304 0x4>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 70b4c16c7ed8..9b4c7b017495 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -77,7 +77,6 @@ Example 1): interrupts = <2 4>; clocks = <&clock 383>; clock-names = "tmu_apbif"; - status = "disabled"; vtmu-supply = <&tmu_regulator_node>; #include "exynos4412-tmu-sensor-conf.dtsi" }; diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt index e2f494d74d8a..0d73ea5e9c0c 100644 --- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt @@ -11,6 +11,7 @@ Required properties: - compatible: - "mediatek,mt8173-thermal" : For MT8173 family of SoCs - "mediatek,mt2701-thermal" : For MT2701 family of SoCs + - "mediatek,mt2712-thermal" : For MT2712 family of SoCs - reg: Address range of the thermal controller - interrupts: IRQ for the thermal controller - clocks, clock-names: Clocks needed for the thermal controller. required diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt index 07a9713ae6a7..fdf5caa6229b 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt @@ -36,7 +36,6 @@ Example: clocks = <&cpg CPG_MOD 522>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; #thermal-sensor-cells = <1>; - status = "okay"; }; thermal-zones { diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt index 43003aec94bd..e3a6234fb1ac 100644 --- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt @@ -4,6 +4,7 @@ Required properties: - compatible : should be "rockchip,-tsadc" "rockchip,rk3228-tsadc": found on RK3228 SoCs "rockchip,rk3288-tsadc": found on RK3288 SoCs + "rockchip,rk3328-tsadc": found on RK3328 SoCs "rockchip,rk3368-tsadc": found on RK3368 SoCs "rockchip,rk3399-tsadc": found on RK3399 SoCs - reg : physical base address of the controller and length of memory mapped diff --git a/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt b/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt new file mode 100644 index 000000000000..686c0b42ed3f --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt @@ -0,0 +1,64 @@ +* UniPhier Thermal bindings + +This describes the devicetree bindings for thermal monitor supported by +PVT(Process, Voltage and Temperature) monitoring unit implemented on Socionext +UniPhier SoCs. + +Required properties: +- compatible : + - "socionext,uniphier-pxs2-thermal" : For UniPhier PXs2 SoC + - "socionext,uniphier-ld20-thermal" : For UniPhier LD20 SoC +- interrupts : IRQ for the temperature alarm +- #thermal-sensor-cells : Should be 0. See ./thermal.txt for details. + +Optional properties: +- socionext,tmod-calibration: A pair of calibrated values referred from PVT, + in case that the values aren't set on SoC, + like a reference board. + +Example: + + sysctrl@61840000 { + compatible = "socionext,uniphier-ld20-sysctrl", + "simple-mfd", "syscon"; + reg = <0x61840000 0x10000>; + ... + pvtctl: pvtctl { + compatible = "socionext,uniphier-ld20-thermal"; + interrupts = <0 3 1>; + #thermal-sensor-cells = <0>; + }; + ... + }; + + thermal-zones { + cpu_thermal { + polling-delay-passive = <250>; /* 250ms */ + polling-delay = <1000>; /* 1000ms */ + thermal-sensors = <&pvtctl>; + + trips { + cpu_crit: cpu_crit { + temperature = <110000>; /* 110C */ + hysteresis = <2000>; + type = "critical"; + }; + cpu_alert: cpu_alert { + temperature = <100000>; /* 100C */ + hysteresis = <2000>; + type = "passive"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_alert>; + cooling-device = <&cpu0 (-1) (-1)>; + }; + map1 { + trip = <&cpu_alert>; + cooling-device = <&cpu2 (-1) (-1)>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt index 35f406dd86b6..af284fbd4d23 100644 --- a/Documentation/devicetree/bindings/trivial-devices.txt +++ b/Documentation/devicetree/bindings/trivial-devices.txt @@ -21,6 +21,16 @@ adi,adt7490 +/-1C TDM Extended Temp Range I.C adi,adxl345 Three-Axis Digital Accelerometer adi,adxl346 Three-Axis Digital Accelerometer (backward-compatibility value "adi,adxl345" must be listed too) ams,iaq-core AMS iAQ-Core VOC Sensor +amstaos,tsl2571 AMS/TAOS ALS and proximity sensor +amstaos,tsl2671 AMS/TAOS ALS and proximity sensor +amstaos,tmd2671 AMS/TAOS ALS and proximity sensor +amstaos,tsl2771 AMS/TAOS ALS and proximity sensor +amstaos,tmd2771 AMS/TAOS ALS and proximity sensor +amstaos,tsl2572 AMS/TAOS ALS and proximity sensor +amstaos,tsl2672 AMS/TAOS ALS and proximity sensor +amstaos,tmd2672 AMS/TAOS ALS and proximity sensor +amstaos,tsl2772 AMS/TAOS ALS and proximity sensor +amstaos,tmd2772 AMS/TAOS ALS and proximity sensor at,24c08 i2c serial eeprom (24cxx) atmel,at97sc3204t i2c trusted platform module (TPM) capella,cm32181 CM32181: Ambient Light Sensor @@ -36,7 +46,9 @@ dallas,ds1775 Tiny Digital Thermometer and Thermostat dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O dallas,ds75 Digital Thermometer and Thermostat +devantech,srf02 Devantech SRF02 ultrasonic ranger in I2C mode devantech,srf08 Devantech SRF08 ultrasonic ranger +devantech,srf10 Devantech SRF10 ultrasonic ranger dlg,da9053 DA9053: flexible system level PMIC with multicore support dlg,da9063 DA9063: system PMIC for quad-core application processors domintech,dmard09 DMARD09: 3-axis Accelerometer @@ -54,6 +66,7 @@ fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz) infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz) +isil,isl1208 Intersil ISL1208 Low Power RTC with Battery Backed SRAM isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor isil,isl29030 Intersil ISL29030 Ambient Light and Proximity Sensor maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator @@ -168,6 +181,7 @@ st,m41t80 M41T80 - SERIAL ACCESS RTC WITH ALARMS taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface ti,ads7828 8-Channels, 12-bit ADC ti,ads7830 8-Channels, 8-bit ADC +ti,amc6821 Temperature Monitoring and Fan Control ti,tsc2003 I2C Touch-Screen Controller ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface diff --git a/Documentation/devicetree/bindings/unittest.txt b/Documentation/devicetree/bindings/unittest.txt index 3bf58c20fe94..9a5b311f4434 100644 --- a/Documentation/devicetree/bindings/unittest.txt +++ b/Documentation/devicetree/bindings/unittest.txt @@ -10,7 +10,6 @@ All other properties are optional. Example: unittest { compatible = "unittest"; - status = "okay"; }; 2) OF unittest i2c adapter platform device @@ -25,7 +24,6 @@ Children nodes contain unittest i2c devices. Example: unittest-i2c-bus { compatible = "unittest-i2c-bus"; - status = "okay"; }; 3) OF unittest i2c device @@ -40,7 +38,6 @@ All other properties are optional Example: unittest-i2c-dev { compatible = "unittest-i2c-dev"; - status = "okay"; }; 4) OF unittest i2c mux device @@ -55,7 +52,6 @@ Children nodes contain unittest i2c bus nodes per channel. Example: unittest-i2c-mux { compatible = "unittest-i2c-mux"; - status = "okay"; #address-cells = <1>; #size-cells = <0>; channel-0 { @@ -65,7 +61,6 @@ Example: i2c-dev { reg = <8>; compatible = "unittest-i2c-dev"; - status = "okay"; }; }; }; diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt index d9b42da016f3..cb2bd83fa89a 100644 --- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt +++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt @@ -25,5 +25,4 @@ Example: phys = <&usbphy 0>; phy-names = "usb"; extcon = <&usbphy 0>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt index 20c2ff2ba07e..16920d78e1b8 100644 --- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt +++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt @@ -4,9 +4,9 @@ - reg: offset and length of the usbss register sets - ti,hwmods : must be "usb_otg_hs" -The glue layer contains multiple child nodes. It is required the have +The glue layer contains multiple child nodes. It is required to have at least a control module node, USB node and a PHY node. The second USB -node and its PHY node is optional. The DMA node is also optional. +node and its PHY node are optional. The DMA node is also optional. Reset module ~~~~~~~~~~~~ diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt index 717c5f656237..9ce22551b2b3 100644 --- a/Documentation/devicetree/bindings/usb/da8xx-usb.txt +++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt @@ -42,7 +42,6 @@ Example: usb_phy: usb-phy { compatible = "ti,da830-usb-phy"; #phy-cells = <0>; - status = "okay"; }; usb0: usb@200000 { compatible = "ti,da830-musb"; @@ -66,7 +65,6 @@ Example: "rx1", "rx2", "rx3", "rx4", "tx1", "tx2", "tx3", "tx4"; - status = "okay"; cppi41dma: dma-controller@201000 { compatible = "ti,da830-cppi41"; diff --git a/Documentation/devicetree/bindings/usb/dwc3-st.txt b/Documentation/devicetree/bindings/usb/dwc3-st.txt index 50dee3b44665..df0e02e1ee43 100644 --- a/Documentation/devicetree/bindings/usb/dwc3-st.txt +++ b/Documentation/devicetree/bindings/usb/dwc3-st.txt @@ -42,7 +42,6 @@ or "device". Example: st_dwc3: dwc3@8f94000 { - status = "disabled"; compatible = "st,stih407-dwc3"; reg = <0x08f94000 0x1000>, <0x110 0x4>; reg-names = "reg-glue", "syscfg-reg"; diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt index 30361b32a460..4aae5b2cef56 100644 --- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt +++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt @@ -18,7 +18,6 @@ Example device node: usb@0 { #address-cells = <0x2>; #size-cells = <0x1>; - status = "okay"; compatible = "xlnx,zynqmp-dwc3"; clock-names = "bus_clk" "ref_clk"; clocks = <&clk125>, <&clk125>; diff --git a/Documentation/devicetree/bindings/usb/ehci-st.txt b/Documentation/devicetree/bindings/usb/ehci-st.txt index 410d922cfdd7..9feea6c3e4d9 100644 --- a/Documentation/devicetree/bindings/usb/ehci-st.txt +++ b/Documentation/devicetree/bindings/usb/ehci-st.txt @@ -31,7 +31,6 @@ Example: clocks = <&clk_s_a1_ls 0>; phys = <&usb2_phy>; phy-names = "usb"; - status = "okay"; resets = <&powerdown STIH416_USB1_POWERDOWN>, <&softreset STIH416_USB1_SOFTRESET>; diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt index 78ebebb66dad..c97374315049 100644 --- a/Documentation/devicetree/bindings/usb/exynos-usb.txt +++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt @@ -40,7 +40,6 @@ Example: port@0 { reg = <0>; phys = <&usb2phy 1>; - status = "disabled"; }; }; @@ -75,7 +74,6 @@ Example: port@0 { reg = <0>; phys = <&usb2phy 1>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/usb/isp1301.txt b/Documentation/devicetree/bindings/usb/isp1301.txt index 5405d99d9aaa..ecd607dacba5 100644 --- a/Documentation/devicetree/bindings/usb/isp1301.txt +++ b/Documentation/devicetree/bindings/usb/isp1301.txt @@ -21,5 +21,4 @@ Example: interrupt-parent = <&mic>; interrupts = <0x3d 0>, <0x3e 0>, <0x3c 0>, <0x3a 0>; transceiver = <&isp1301>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/usb/keystone-usb.txt b/Documentation/devicetree/bindings/usb/keystone-usb.txt index 2d1bef16f149..f96e09f784cc 100644 --- a/Documentation/devicetree/bindings/usb/keystone-usb.txt +++ b/Documentation/devicetree/bindings/usb/keystone-usb.txt @@ -44,7 +44,6 @@ Example: clock-names = "usb"; interrupts = ; ranges; - status = "disabled"; dwc3@2690000 { compatible = "synopsys,dwc3"; diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt index 838ae48eafc1..49f54767cd21 100644 --- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt +++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt @@ -75,7 +75,6 @@ ssusb: usb@11271000 { #address-cells = <2>; #size-cells = <2>; ranges; - status = "disabled"; usb_host: xhci@11270000 { compatible = "mediatek,mt8173-xhci"; @@ -86,6 +85,5 @@ ssusb: usb@11271000 { clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>; clock-names = "sys_ck", "ref_ck"; vusb33-supply = <&mt6397_vusb_reg>; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/usb/ohci-st.txt b/Documentation/devicetree/bindings/usb/ohci-st.txt index 6d8393748da2..d893ec9131c3 100644 --- a/Documentation/devicetree/bindings/usb/ohci-st.txt +++ b/Documentation/devicetree/bindings/usb/ohci-st.txt @@ -29,7 +29,6 @@ Example: clock-names = "ic", "clk48"; phys = <&usb2_phy>; phy-names = "usb"; - status = "okay"; resets = <&powerdown STIH416_USB0_POWERDOWN>, <&softreset STIH416_USB0_SOFTRESET>; diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt index 73cc0963e823..bc8a2fa5d2bf 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt @@ -29,7 +29,6 @@ Example device nodes: clock-names = "ref"; #phy-cells = <0>; - status = "ok"; }; ss_phy: phy@100f8830 { @@ -39,7 +38,6 @@ Example device nodes: clock-names = "ref"; #phy-cells = <0>; - status = "ok"; }; usb3_0: usb30@0 { @@ -51,7 +49,6 @@ Example device nodes: ranges; - status = "ok"; dwc3@10000000 { compatible = "snps,dwc3"; diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt index 0536a938e3ab..50a31536e975 100644 --- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt @@ -29,13 +29,11 @@ Example device nodes: #address-cells = <2>; #size-cells = <2>; ranges; - status = "disabled"; usbdrd_dwc3_0: dwc3@fe800000 { compatible = "snps,dwc3"; reg = <0x0 0xfe800000 0x0 0x100000>; interrupts = ; dr_mode = "otg"; - status = "disabled"; }; }; @@ -48,12 +46,10 @@ Example device nodes: #address-cells = <2>; #size-cells = <2>; ranges; - status = "disabled"; usbdrd_dwc3_1: dwc3@fe900000 { compatible = "snps,dwc3"; reg = <0x0 0xfe900000 0x0 0x100000>; interrupts = ; dr_mode = "otg"; - status = "disabled"; }; }; diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt index 1c35e7b665e1..ce02cebac26a 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.txt +++ b/Documentation/devicetree/bindings/usb/usb-device.txt @@ -2,7 +2,7 @@ Generic USB Device Properties Usually, we only use device tree for hard wired USB device. The reference binding doc is from: -http://www.firmware.org/1275/bindings/usb/usb-1_0.ps +http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps Required properties: - compatible: usbVID,PID. The textual representation of VID, PID shall @@ -16,7 +16,6 @@ Required properties: Example: &usb1 { - status = "okay"; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 4e72012928b4..1afd298eddd7 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -3,12 +3,13 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order. This isn't an exhaustive list, but you should add new prefixes to it before using them to avoid name-space collisions. -abcn Abracon Corporation abilis Abilis Systems +abracon Abracon Corporation actions Actions Semiconductor Co., Ltd. active-semi Active-Semi International Inc ad Avionic Design GmbH adapteva Adapteva, Inc. +adaptrum Adaptrum, Inc. adh AD Holdings Plc. adi Analog Devices, Inc. advantech Advantech Corporation @@ -47,6 +48,7 @@ avic Shanghai AVIC Optoelectronics Co., Ltd. axentia Axentia Technologies AB axis Axis Communications AB bananapi BIPAI KEJI LIMITED +bhf Beckhoff Automation GmbH & Co. KG boe BOE Technology Group Co., Ltd. bosch Bosch Sensortec GmbH boundary Boundary Devices Inc. @@ -88,6 +90,7 @@ dlg Dialog Semiconductor dlink D-Link Corporation dmo Data Modul AG domintech Domintech Co., Ltd. +dongwoon Dongwoon Anatech dptechnics DPTechnics dragino Dragino Technology Co., Limited ea Embedded Artists AB @@ -196,6 +199,7 @@ mediatek MediaTek Inc. megachips MegaChips melexis Melexis N.V. melfas MELFAS Inc. +mellanox Mellanox Technologies memsic MEMSIC Inc. merrii Merrii Technology Co., Ltd. micrel Micrel Inc. @@ -207,7 +211,7 @@ miramems MiraMEMS Sensing Technology Co., Ltd. mitsubishi Mitsubishi Electric Corporation mosaixtech Mosaix Technologies, Inc. motorola Motorola, Inc. -moxa Moxa +moxa Moxa Inc. mpl MPL AG mqmaker mqmaker Inc. msi Micro-Star International Co. Ltd. @@ -289,6 +293,7 @@ schindler Schindler seagate Seagate Technology PLC semtech Semtech Corporation sensirion Sensirion AG +sff Small Form Factor Committee sgx SGX Sensortech sharp Sharp Corporation si-en Si-En Technology Ltd. @@ -343,6 +348,7 @@ tpo TPO tronfy Tronfy tronsmart Tronsmart truly Truly Semiconductors Limited +tsd Theobroma Systems Design und Consulting GmbH tyan Tyan Computer Corporation ucrobotics uCRobotics udoo Udoo @@ -355,6 +361,7 @@ variscite Variscite Ltd. via VIA Technologies, Inc. virtio Virtual I/O Device Specification, developed by the OASIS consortium vivante Vivante Corporation +vocore VoCore Studio voipac Voipac Technologies s.r.o. wd Western Digital Corp. wetek WeTek Electronics, limited. diff --git a/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt b/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt index ecf42c07684d..cbaa6467ab2c 100644 --- a/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt +++ b/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt @@ -15,5 +15,4 @@ owire: owire@63fa4000 { compatible = "fsl,imx53-owire", "fsl,imx21-owire"; reg = <0x63fa4000 0x4000>; clocks = <&clks 159>; - status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt index c5e74d7b4406..c5077a1f5cb3 100644 --- a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt @@ -8,9 +8,49 @@ Required properties: - reg: physical base address of the controller and length of memory mapped region +Optional properties: + + - aspeed,reset-type = "cpu|soc|system|none" + + Reset behavior - Whenever a timeout occurs the watchdog can be programmed + to generate one of three different, mutually exclusive, types of resets. + + Type "none" can be specified to indicate that no resets are to be done. + This is useful in situations where another watchdog engine on chip is + to perform the reset. + + If 'aspeed,reset-type=' is not specfied the default is to enable system + reset. + + Reset types: + + - cpu: Reset CPU on watchdog timeout + + - soc: Reset 'System on Chip' on watchdog timeout + + - system: Reset system on watchdog timeout + + - none: No reset is performed on timeout. Assumes another watchdog + engine is responsible for this. + + - aspeed,alt-boot: If property is present then boot from alternate block. + - aspeed,external-signal: If property is present then signal is sent to + external reset counter (only WDT1 and WDT2). If not + specified no external signal is sent. + - aspeed,ext-pulse-duration: External signal pulse duration in microseconds + +Optional properties for AST2500-compatible watchdogs: + - aspeed,ext-push-pull: If aspeed,external-signal is present, set the pin's + drive type to push-pull. The default is open-drain. + - aspeed,ext-active-high: If aspeed,external-signal is present and and the pin + is configured as push-pull, then set the pulse + polarity to active-high. The default is active-low. + Example: wdt1: watchdog@1e785000 { compatible = "aspeed,ast2400-wdt"; reg = <0x1e785000 0x1c>; + aspeed,reset-type = "system"; + aspeed,external-signal; }; diff --git a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt index f7cc7c060910..4fec1e3725b4 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt @@ -31,5 +31,4 @@ Example: atmel,watchdog-type = "hardware"; atmel,dbg-halt; atmel,idle-halt; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index 86fa6de1019b..711a880b3d3b 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt @@ -48,5 +48,4 @@ Example: atmel,idle-halt; atmel,max-heartbeat-sec = <16>; atmel,min-heartbeat-sec = <0>; - status = "okay"; }; diff --git a/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt b/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt new file mode 100644 index 000000000000..18d4d8302702 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt @@ -0,0 +1,24 @@ +Lantiq WTD watchdog binding +============================ + +This describes the binding of the Lantiq watchdog driver. + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,wdt" + "lantiq,xrx100-wdt" + "lantiq,xrx200-wdt", "lantiq,xrx100-wdt" + "lantiq,falcon-wdt" +- reg : Address of the watchdog block +- lantiq,rcu : A phandle to the RCU syscon (required for + "lantiq,falcon-wdt" and "lantiq,xrx100-wdt") + +------------------------------------------------------------------------------- +Example for the watchdog on the xRX200 SoCs: + watchdog@803f0 { + compatible = "lantiq,xrx200-wdt", "lantiq,xrx100-wdt"; + reg = <0x803f0 0x10>; + + lantiq,rcu = <&rcu0>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/marvel.txt b/Documentation/devicetree/bindings/watchdog/marvel.txt index 858ed9221ac4..c1b67a78f00c 100644 --- a/Documentation/devicetree/bindings/watchdog/marvel.txt +++ b/Documentation/devicetree/bindings/watchdog/marvel.txt @@ -41,6 +41,5 @@ Example: reg = <0x20300 0x28>, <0x20108 0x4>; interrupts = <3>; timeout-sec = <10>; - status = "okay"; clocks = <&gate_clk 7>; }; diff --git a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt index ae70185d96e6..8a6d84cb36c9 100644 --- a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt @@ -2,7 +2,11 @@ Meson SoCs Watchdog timer Required properties: -- compatible : should be "amlogic,meson6-wdt" or "amlogic,meson8b-wdt" +- compatible : depending on the SoC this should be one of: + "amlogic,meson6-wdt" on Meson6 SoCs + "amlogic,meson8-wdt" and "amlogic,meson6-wdt" on Meson8 SoCs + "amlogic,meson8b-wdt" on Meson8b SoCs + "amlogic,meson8m2-wdt" and "amlogic,meson8b-wdt" on Meson8m2 SoCs - reg : Specifies base physical address and size of the registers. Example: diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt index 6a00939a059a..235de0683bb6 100644 --- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt @@ -3,9 +3,11 @@ Mediatek SoCs Watchdog timer Required properties: - compatible should contain: - * "mediatek,mt2701-wdt" for MT2701 compatible watchdog timers - * "mediatek,mt6589-wdt" for all compatible watchdog timers (MT2701, - MT6589) + "mediatek,mt2701-wdt", "mediatek,mt6589-wdt": for MT2701 + "mediatek,mt6589-wdt": for MT6589 + "mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797 + "mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622 + "mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623 - reg : Specifies base physical address and size of the registers. diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 9e306afbbd49..bf6d1ca58af7 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt @@ -6,6 +6,7 @@ Required properties: Examples with soctypes are: - "renesas,r8a7795-wdt" (R-Car H3) - "renesas,r8a7796-wdt" (R-Car M3-W) + - "renesas,r8a77995-wdt" (R-Car D3) - "renesas,r7s72100-wdt" (RZ/A1) When compatible with the generic version, nodes must list the SoC-specific diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt index fb740445199f..417f91110010 100644 --- a/Documentation/devicetree/booting-without-of.txt +++ b/Documentation/devicetree/booting-without-of.txt @@ -1282,7 +1282,7 @@ hierarchy and routing of interrupts in the hardware. The interrupt tree model is fully described in the document "Open Firmware Recommended Practice: Interrupt Mapping Version 0.9". The document is available at: - + 1) interrupts property ---------------------- diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index e33bc1c8ed2c..5dbe054a40ad 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -181,13 +181,6 @@ Currently, the types available are: - Used by the client drivers to register a callback that will be called on a regular basis through the DMA controller interrupt - * DMA_SG - - The device supports memory to memory scatter-gather - transfers. - - Even though a plain memcpy can look like a particular case of a - scatter-gather transfer, with a single chunk to transfer, it's a - distinct transaction type in the mem2mem transfers case - * DMA_PRIVATE - The devices only supports slave transfers, and as such isn't available for async transfers. @@ -395,6 +388,13 @@ where to put them) when DMA_CTRL_REUSE is already set - Terminating the channel + * DMA_PREP_CMD + - If set, the client driver tells DMA controller that passed data in DMA + API is command data. + - Interpretation of command data is DMA controller specific. It can be + used for issuing commands to other peripherals/register reads/register + writes for which the descriptor should be in different format from + normal data descriptors. General Design Notes -------------------- diff --git a/Documentation/dontdiff b/Documentation/dontdiff index 358b47c06ad4..2228fcc8e29f 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -259,5 +259,4 @@ wakeup.bin wakeup.elf wakeup.lds zImage* -zconf.hash.c zoffset.h diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst index 1c2c4967cd43..cc0aea880824 100644 --- a/Documentation/driver-api/firmware/request_firmware.rst +++ b/Documentation/driver-api/firmware/request_firmware.rst @@ -44,17 +44,6 @@ request_firmware_nowait .. kernel-doc:: drivers/base/firmware_class.c :functions: request_firmware_nowait -Considerations for suspend and resume -===================================== - -During suspend and resume only the built-in firmware and the firmware cache -elements of the firmware API can be used. This is managed by fw_pm_notify(). - -fw_pm_notify ------------- -.. kernel-doc:: drivers/base/firmware_class.c - :functions: fw_pm_notify - request firmware API expected driver use ======================================== diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index e9afa586d15e..2a5191b6d445 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -516,7 +516,7 @@ mirrored table is performed. The most important field in the nand_bbt_descr structure is the options field. The options define most of the table properties. Use the -predefined constants from nand.h to define the options. +predefined constants from rawnand.h to define the options. - Number of bits per block @@ -843,7 +843,7 @@ Chip option constants Constants for chip id table ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -These constants are defined in nand.h. They are OR-ed together to +These constants are defined in rawnand.h. They are OR-ed together to describe the chip functionality:: /* Buswitdh is 16 bit */ @@ -865,7 +865,7 @@ describe the chip functionality:: Constants for runtime options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -These constants are defined in nand.h. They are OR-ed together to +These constants are defined in rawnand.h. They are OR-ed together to describe the functionality:: /* The hw ecc generator provides a syndrome instead a ecc value on read @@ -956,7 +956,7 @@ developer. Each struct member has a short description which is marked with an [XXX] identifier. See the chapter "Documentation hints" for an explanation. -.. kernel-doc:: include/linux/mtd/nand.h +.. kernel-doc:: include/linux/mtd/rawnand.h :internal: Public Functions Provided diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index bedd32388dac..a0dc2879a152 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst @@ -675,7 +675,7 @@ sub-domain of the parent domain. Support for power domains is provided through the :c:member:`pm_domain` field of |struct device|. This field is a pointer to an object of type -|struct dev_pm_domain|, defined in :file:`include/linux/pm.h``, providing a set +|struct dev_pm_domain|, defined in :file:`include/linux/pm.h`, providing a set of power management callbacks analogous to the subsystem-level and device driver callbacks that are executed for the given device during all power transitions, instead of the respective subsystem-level callbacks. Specifically, if a diff --git a/Documentation/driver-model/driver.txt b/Documentation/driver-model/driver.txt index 4421135826a2..d661e6f7e6a0 100644 --- a/Documentation/driver-model/driver.txt +++ b/Documentation/driver-model/driver.txt @@ -196,12 +196,13 @@ struct driver_attribute { }; Device drivers can export attributes via their sysfs directories. -Drivers can declare attributes using a DRIVER_ATTR macro that works -identically to the DEVICE_ATTR macro. +Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO +macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO +macros. Example: -DRIVER_ATTR(debug,0644,show_debug,store_debug); +DRIVER_ATTR_RW(debug); This is equivalent to declaring: diff --git a/Documentation/errseq.rst b/Documentation/errseq.rst new file mode 100644 index 000000000000..4c29bd5afbc5 --- /dev/null +++ b/Documentation/errseq.rst @@ -0,0 +1,149 @@ +The errseq_t datatype +===================== +An errseq_t is a way of recording errors in one place, and allowing any +number of "subscribers" to tell whether it has changed since a previous +point where it was sampled. + +The initial use case for this is tracking errors for file +synchronization syscalls (fsync, fdatasync, msync and sync_file_range), +but it may be usable in other situations. + +It's implemented as an unsigned 32-bit value. The low order bits are +designated to hold an error code (between 1 and MAX_ERRNO). The upper bits +are used as a counter. This is done with atomics instead of locking so that +these functions can be called from any context. + +Note that there is a risk of collisions if new errors are being recorded +frequently, since we have so few bits to use as a counter. + +To mitigate this, the bit between the error value and counter is used as +a flag to tell whether the value has been sampled since a new value was +recorded. That allows us to avoid bumping the counter if no one has +sampled it since the last time an error was recorded. + +Thus we end up with a value that looks something like this:: + + bit: 31..13 12 11..0 + +-----------------+----+----------------+ + | counter | SF | errno | + +-----------------+----+----------------+ + +The general idea is for "watchers" to sample an errseq_t value and keep +it as a running cursor. That value can later be used to tell whether +any new errors have occurred since that sampling was done, and atomically +record the state at the time that it was checked. This allows us to +record errors in one place, and then have a number of "watchers" that +can tell whether the value has changed since they last checked it. + +A new errseq_t should always be zeroed out. An errseq_t value of all zeroes +is the special (but common) case where there has never been an error. An all +zero value thus serves as the "epoch" if one wishes to know whether there +has ever been an error set since it was first initialized. + +API usage +========= +Let me tell you a story about a worker drone. Now, he's a good worker +overall, but the company is a little...management heavy. He has to +report to 77 supervisors today, and tomorrow the "big boss" is coming in +from out of town and he's sure to test the poor fellow too. + +They're all handing him work to do -- so much he can't keep track of who +handed him what, but that's not really a big problem. The supervisors +just want to know when he's finished all of the work they've handed him so +far and whether he made any mistakes since they last asked. + +He might have made the mistake on work they didn't actually hand him, +but he can't keep track of things at that level of detail, all he can +remember is the most recent mistake that he made. + +Here's our worker_drone representation:: + + struct worker_drone { + errseq_t wd_err; /* for recording errors */ + }; + +Every day, the worker_drone starts out with a blank slate:: + + struct worker_drone wd; + + wd.wd_err = (errseq_t)0; + +The supervisors come in and get an initial read for the day. They +don't care about anything that happened before their watch begins:: + + struct supervisor { + errseq_t s_wd_err; /* private "cursor" for wd_err */ + spinlock_t s_wd_err_lock; /* protects s_wd_err */ + } + + struct supervisor su; + + su.s_wd_err = errseq_sample(&wd.wd_err); + spin_lock_init(&su.s_wd_err_lock); + +Now they start handing him tasks to do. Every few minutes they ask him to +finish up all of the work they've handed him so far. Then they ask him +whether he made any mistakes on any of it:: + + spin_lock(&su.su_wd_err_lock); + err = errseq_check_and_advance(&wd.wd_err, &su.s_wd_err); + spin_unlock(&su.su_wd_err_lock); + +Up to this point, that just keeps returning 0. + +Now, the owners of this company are quite miserly and have given him +substandard equipment with which to do his job. Occasionally it +glitches and he makes a mistake. He sighs a heavy sigh, and marks it +down:: + + errseq_set(&wd.wd_err, -EIO); + +...and then gets back to work. The supervisors eventually poll again +and they each get the error when they next check. Subsequent calls will +return 0, until another error is recorded, at which point it's reported +to each of them once. + +Note that the supervisors can't tell how many mistakes he made, only +whether one was made since they last checked, and the latest value +recorded. + +Occasionally the big boss comes in for a spot check and asks the worker +to do a one-off job for him. He's not really watching the worker +full-time like the supervisors, but he does need to know whether a +mistake occurred while his job was processing. + +He can just sample the current errseq_t in the worker, and then use that +to tell whether an error has occurred later:: + + errseq_t since = errseq_sample(&wd.wd_err); + /* submit some work and wait for it to complete */ + err = errseq_check(&wd.wd_err, since); + +Since he's just going to discard "since" after that point, he doesn't +need to advance it here. He also doesn't need any locking since it's +not usable by anyone else. + +Serializing errseq_t cursor updates +=================================== +Note that the errseq_t API does not protect the errseq_t cursor during a +check_and_advance_operation. Only the canonical error code is handled +atomically. In a situation where more than one task might be using the +same errseq_t cursor at the same time, it's important to serialize +updates to that cursor. + +If that's not done, then it's possible for the cursor to go backward +in which case the same error could be reported more than once. + +Because of this, it's often advantageous to first do an errseq_check to +see if anything has changed, and only later do an +errseq_check_and_advance after taking the lock. e.g.:: + + if (errseq_check(&wd.wd_err, READ_ONCE(su.s_wd_err)) { + /* su.s_wd_err is protected by s_wd_err_lock */ + spin_lock(&su.s_wd_err_lock); + err = errseq_check_and_advance(&wd.wd_err, &su.s_wd_err); + spin_unlock(&su.s_wd_err_lock); + } + +That avoids the spinlock in the common case where nothing has changed +since the last time it was checked. diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 4a9739abc860..a38d3aa4d189 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt @@ -148,6 +148,13 @@ C. Boot options Actually, the underlying fb driver is totally ignorant of console rotation. +5. fbcon=margin: + + This option specifies the color of the margins. The margins are the + leftover area at the right and the bottom of the screen that are not + used by text. By default, this area will be black. The 'color' value + is an integer number that depends on the framebuffer driver being used. + C. Attaching, Detaching and Unloading Before going on how to attach, detach and unload the framebuffer console, an diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index fe25787ff6d4..75d2d57e2c44 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -22,7 +22,7 @@ prototypes: struct vfsmount *(*d_automount)(struct path *path); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *, - unsigned int); + unsigned int, unsigned int); locking rules: rename_lock ->d_lock may block rcu-walk diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt index aed6b94160b1..0eb31de3a2c1 100644 --- a/Documentation/filesystems/caching/netfs-api.txt +++ b/Documentation/filesystems/caching/netfs-api.txt @@ -151,8 +151,6 @@ To define an object, a structure of the following type should be filled out: void (*mark_pages_cached)(void *cookie_netfs_data, struct address_space *mapping, struct pagevec *cached_pvec); - - void (*now_uncached)(void *cookie_netfs_data); }; This has the following fields: diff --git a/Documentation/filesystems/cifs/AUTHORS b/Documentation/filesystems/cifs/AUTHORS index c98800df677f..9f4f87e16240 100644 --- a/Documentation/filesystems/cifs/AUTHORS +++ b/Documentation/filesystems/cifs/AUTHORS @@ -41,6 +41,11 @@ Igor Mammedov (DFS support) Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) Scott Lovenberg Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features) +Aurelien Aptel (for DFS SMB3 work and some key bug fixes) +Ronnie Sahlberg (for SMB3 xattr work and bug fixes) +Shirish Pargaonkar (for many ACL patches over the years) +Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security) + Test case and Bug Report contributors ------------------------------------- diff --git a/Documentation/filesystems/cifs/README b/Documentation/filesystems/cifs/README index a54788405429..a9da51553ba3 100644 --- a/Documentation/filesystems/cifs/README +++ b/Documentation/filesystems/cifs/README @@ -1,10 +1,14 @@ -The CIFS VFS support for Linux supports many advanced network filesystem -features such as hierarchical dfs like namespace, hardlinks, locking and more. +This module supports the SMB3 family of advanced network protocols (as well +as older dialects, originally called "CIFS" or SMB1). + +The CIFS VFS module for Linux supports many advanced network filesystem +features such as hierarchical DFS like namespace, hardlinks, locking and more. It was designed to comply with the SNIA CIFS Technical Reference (which supersedes the 1992 X/Open SMB Standard) as well as to perform best practice practical interoperability with Windows 2000, Windows XP, Samba and equivalent servers. This code was developed in participation with the Protocol Freedom -Information Foundation. +Information Foundation. CIFS and now SMB3 has now become a defacto +standard for interoperating between Macs and Windows and major NAS appliances. Please see http://protocolfreedom.org/ and @@ -15,30 +19,11 @@ for more details. For questions or bug reports please contact: sfrench@samba.org (sfrench@us.ibm.com) +See the project page at: https://wiki.samba.org/index.php/LinuxCIFS_utils + Build instructions: ================== -For Linux 2.4: -1) Get the kernel source (e.g.from http://www.kernel.org) -and download the cifs vfs source (see the project page -at http://us1.samba.org/samba/Linux_CIFS_client.html) -and change directory into the top of the kernel directory -then patch the kernel (e.g. "patch -p1 < cifs_24.patch") -to add the cifs vfs to your kernel configure options if -it has not already been added (e.g. current SuSE and UL -users do not need to apply the cifs_24.patch since the cifs vfs is -already in the kernel configure menu) and then -mkdir linux/fs/cifs and then copy the current cifs vfs files from -the cifs download to your kernel build directory e.g. - - cp /fs/cifs/* to /fs/cifs - -2) make menuconfig (or make xconfig) -3) select cifs from within the network filesystem choices -4) save and exit -5) make dep -6) make modules (or "make" if CIFS VFS not to be built as a module) - -For Linux 2.6: +For Linux: 1) Download the kernel (e.g. from http://www.kernel.org) and change directory into the top of the kernel directory tree (e.g. /usr/src/linux-2.5.73) @@ -61,16 +46,13 @@ would simply type "make install"). If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on the CIFS VFS web site) copy it to the same directory in which mount.smbfs and similar files reside (usually /sbin). Although the helper software is not -required, mount.cifs is recommended. Eventually the Samba 3.0 utility program -"net" may also be helpful since it may someday provide easier mount syntax for -users who are used to Windows e.g. - net use +required, mount.cifs is recommended. Most distros include a "cifs-utils" +package that includes this utility so it is recommended to install this. + Note that running the Winbind pam/nss module (logon service) on all of your Linux clients is useful in mapping Uids and Gids consistently across the domain to the proper network user. The mount.cifs mount helper can be -trivially built from Samba 3.0 or later source e.g. by executing: - - gcc samba/source/client/mount.cifs.c -o mount.cifs +found at cifs-utils.git on git.samba.org If cifs is built as a module, then the size and number of network buffers and maximum number of simultaneous requests to one server can be configured. @@ -79,6 +61,18 @@ Changing these from their defaults is not recommended. By executing modinfo on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made at module initialization time (by running insmod cifs.ko) can be seen. +Recommendations +=============== +To improve security the SMB2.1 dialect or later (usually will get SMB3) is now +the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0" +on mount (or vers=2.0 for Windows Vista). Note that the CIFS (vers=1.0) is +much older and less secure than the default dialect SMB3 which includes +many advanced security features such as downgrade attack detection +and encrypted shares and stronger signing and authentication algorithms. +There are additional mount options that may be helpful for SMB3 to get +improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1): + "mfsymlinks" and "cifsacl" and "idsfromsid" + Allowing User Mounts ==================== To permit users to mount and unmount over directories they own is possible @@ -98,9 +92,7 @@ and execution of suid programs on the remote target would be enabled by default. This can be changed, as with nfs and other filesystems, by simply specifying "nosuid" among the mount options. For user mounts though to be able to pass the suid flag to mount requires rebuilding -mount.cifs with the following flag: - - gcc samba/source/client/mount.cifs.c -DCIFS_ALLOW_USR_SUID -o mount.cifs +mount.cifs with the following flag: CIFS_ALLOW_USR_SUID There is a corresponding manual page for cifs mounting in the Samba 3.0 and later source tree in docs/manpages/mount.cifs.8 @@ -189,18 +181,18 @@ applications running on the same server as Samba. Use instructions: ================ Once the CIFS VFS support is built into the kernel or installed as a module -(cifs.o), you can use mount syntax like the following to access Samba or Windows -servers: +(cifs.ko), you can use mount syntax like the following to access Samba or +Mac or Windows servers: - mount -t cifs //9.53.216.11/e$ /mnt -o user=myname,pass=mypassword + mount -t cifs //9.53.216.11/e$ /mnt -o username=myname,password=mypassword Before -o the option -v may be specified to make the mount.cifs mount helper display the mount steps more verbosely. After -o the following commonly used cifs vfs specific options are supported: - user= - pass= + username= + password= domain= Other cifs mount options are described below. Use of TCP names (in addition to @@ -246,13 +238,16 @@ the Server's registry. Samba starting with version 3.10 will allow such filenames (ie those which contain valid Linux characters, which normally would be forbidden for Windows/CIFS semantics) as long as the server is configured for Unix Extensions (and the client has not disabled -/proc/fs/cifs/LinuxExtensionsEnabled). - +/proc/fs/cifs/LinuxExtensionsEnabled). In addition the mount option +"mapposix" can be used on CIFS (vers=1.0) to force the mapping of +illegal Windows/NTFS/SMB characters to a remap range (this mount parm +is the default for SMB3). This remap ("mapposix") range is also +compatible with Mac (and "Services for Mac" on some older Windows). CIFS VFS Mount Options ====================== A partial list of the supported mount options follows: - user The user name to use when trying to establish + username The user name to use when trying to establish the CIFS session. password The user password. If the mount helper is installed, the user will be prompted for password diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO index 066ffddc3964..396ecfd6ff4a 100644 --- a/Documentation/filesystems/cifs/TODO +++ b/Documentation/filesystems/cifs/TODO @@ -1,4 +1,4 @@ -Version 2.03 August 1, 2014 +Version 2.04 September 13, 2017 A Partial List of Missing Features ================================== @@ -8,73 +8,69 @@ for visible, important contributions to this module. Here is a partial list of the known problems and missing features: a) SMB3 (and SMB3.02) missing optional features: - - RDMA + - RDMA (started) - multichannel (started) - directory leases (improved metadata caching) - T10 copy offload (copy chunk is only mechanism supported) - - encrypted shares b) improved sparse file support c) Directory entry caching relies on a 1 second timer, rather than -using FindNotify or equivalent. - (started) +using Directory Leases d) quota support (needs minor kernel change since quota calls to make it to network filesystems or deviceless filesystems) -e) improve support for very old servers (OS/2 and Win9x for example) -Including support for changing the time remotely (utimes command). +e) Better optimize open to reduce redundant opens (using reference +counts more) and to improve use of compounding in SMB3 to reduce +number of roundtrips. -f) hook lower into the sockets api (as NFS/SunRPC does) to avoid the -extra copy in/out of the socket buffers in some cases. - -g) Better optimize open (and pathbased setfilesize) to reduce the -oplock breaks coming from windows srv. Piggyback identical file -opens on top of each other by incrementing reference count rather -than resending (helps reduce server resource utilization and avoid -spurious oplock breaks). - -h) Add support for storing symlink info to Windows servers -in the Extended Attribute format their SFU clients would recognize. - -i) Finish inotify support so kde and gnome file list windows +f) Finish inotify support so kde and gnome file list windows will autorefresh (partially complete by Asser). Needs minor kernel vfs change to support removing D_NOTIFY on a file. -j) Add GUI tool to configure /proc/fs/cifs settings and for display of +g) Add GUI tool to configure /proc/fs/cifs settings and for display of the CIFS statistics (started) -k) implement support for security and trusted categories of xattrs +h) implement support for security and trusted categories of xattrs (requires minor protocol extension) to enable better support for SELINUX -l) Implement O_DIRECT flag on open (already supported on mount) +i) Implement O_DIRECT flag on open (already supported on mount) -m) Create UID mapping facility so server UIDs can be mapped on a per +j) Create UID mapping facility so server UIDs can be mapped on a per mount or a per server basis to client UIDs or nobody if no mapping -exists. This is helpful when Unix extensions are negotiated to -allow better permission checking when UIDs differ on the server -and client. Add new protocol request to the CIFS protocol -standard for asking the server for the corresponding name of a -particular uid. +exists. Also better integration with winbind for resolving SID owners -n) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too) +k) Add tools to take advantage of more smb3 specific ioctls and features -o) mount check for unmatched uids +l) encrypted file support -p) Add support for new vfs entry point for fallocate +m) improved stats gathering, tools (perhaps integration with nfsometer?) -q) Add tools to take advantage of cifs/smb3 specific ioctls and features -such as "CopyChunk" (fast server side file copy) +n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed +file attribute via chflags) and improve user space tools for managing and +viewing them. -r) encrypted file support +o) mount helper GUI (to simplify the various configuration options on mount) -s) improved stats gathering, tools (perhaps integration with nfsometer?) +p) autonegotiation of dialects (offering more than one dialect ie SMB3.02, +SMB3, SMB2.1 not just SMB3). -t) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed -file attribute via chflags) +q) Allow mount.cifs to be more verbose in reporting errors with dialect +or unsupported feature errors. -u) mount helper GUI (to simplify the various configuration options on mount) +r) updating cifs documentation, and user guid. +s) Addressing bugs found by running a broader set of xfstests in standard +file system xfstest suite. + +t) split cifs and smb3 support into separate modules so legacy (and less +secure) CIFS dialect can be disabled in environments that don't need it +and simplify the code. + +u) Finish up SMB3.1.1 dialect support + +v) POSIX Extensions for SMB3.1.1 KNOWN BUGS ==================================== diff --git a/Documentation/filesystems/cifs/cifs.txt b/Documentation/filesystems/cifs/cifs.txt index 2fac91ac96cf..67756607246e 100644 --- a/Documentation/filesystems/cifs/cifs.txt +++ b/Documentation/filesystems/cifs/cifs.txt @@ -1,24 +1,28 @@ - This is the client VFS module for the Common Internet File System - (CIFS) protocol which is the successor to the Server Message Block + This is the client VFS module for the SMB3 NAS protocol as well + older dialects such as the Common Internet File System (CIFS) + protocol which was the successor to the Server Message Block (SMB) protocol, the native file sharing mechanism for most early PC operating systems. New and improved versions of CIFS are now called SMB2 and SMB3. These dialects are also supported by the CIFS VFS module. CIFS is fully supported by network - file servers such as Windows 2000, 2003, 2008 and 2012 + file servers such as Windows 2000, 2003, 2008, 2012 and 2016 as well by Samba (which provides excellent CIFS - server support for Linux and many other operating systems), so + server support for Linux and many other operating systems), Apple + systems, as well as most Network Attached Storage vendors, so this network filesystem client can mount to a wide variety of servers. The intent of this module is to provide the most advanced network - file system function for CIFS compliant servers, including better - POSIX compliance, secure per-user session establishment, high - performance safe distributed caching (oplock), optional packet + file system function for SMB3 compliant servers, including advanced + security features, excellent parallelized high performance i/o, better + POSIX compliance, secure per-user session establishment, encryption, + high performance safe distributed caching (leases/oplocks), optional packet signing, large files, Unicode support and other internationalization improvements. Since both Samba server and this filesystem client support - the CIFS Unix extensions, the combination can provide a reasonable - alternative to NFSv4 for fileserving in some Linux to Linux environments, - not just in Linux to Windows environments. + the CIFS Unix extensions (and in the future SMB3 POSIX extensions), + the combination can provide a reasonable alternative to other network and + cluster file systems for fileserving in some Linux to Linux environments, + not just in Linux to Windows (or Linux to Mac) environments. This filesystem has an mount utility (mount.cifs) that can be obtained from diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index a7e6e14aeb08..3be3b266be41 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -63,9 +63,8 @@ Filesystem support consists of - implementing an mmap file operation for DAX files which sets the VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These - handlers should probably call dax_iomap_fault() (for fault and page_mkwrite - handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate - iomap operations. + handlers should probably call dax_iomap_fault() passing the appropriate + fault size and iomap operations. - calling iomap_zero_range() passing appropriate iomap operations instead of block_truncate_page() for DAX files - ensuring that there is sufficient locking between reads, writes, diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 273ccb26885e..13c2ff034348 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -164,6 +164,16 @@ io_bits=%u Set the bit size of write IO requests. It should be set with "mode=lfs". usrquota Enable plain user disk quota accounting. grpquota Enable plain group disk quota accounting. +prjquota Enable plain project quota accounting. +usrjquota= Appoint specified file and type during mount, so that quota +grpjquota= information can be properly updated during recovery flow, +prjjquota= : must be in root directory; +jqfmt= : [vfsold,vfsv0,vfsv1]. +offusrjquota Turn off user journelled quota. +offgrpjquota Turn off group journelled quota. +offprjjquota Turn off project journelled quota. +quota Enable plain user disk quota accounting. +noquota Disable all plain disk quota option. ================================================================================ DEBUGFS ENTRIES @@ -209,6 +219,15 @@ Files in /sys/fs/f2fs/ gc_idle = 1 will select the Cost Benefit approach & setting gc_idle = 2 will select the greedy approach. + gc_urgent This parameter controls triggering background GCs + urgently or not. Setting gc_urgent = 0 [default] + makes back to default behavior, while if it is set + to 1, background thread starts to do GC by given + gc_urgent_sleep_time interval. + + gc_urgent_sleep_time This parameter controls sleep time for gc_urgent. + 500 ms is set by default. See above gc_urgent. + reclaim_segments This parameter controls the number of prefree segments to be reclaimed. If the number of prefree segments is larger than the number of segments diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt index 1dfdec790946..e2818b60a5c2 100644 --- a/Documentation/filesystems/orangefs.txt +++ b/Documentation/filesystems/orangefs.txt @@ -45,14 +45,11 @@ upstream version of the kernel client. BUILDING THE USERSPACE FILESYSTEM ON A SINGLE SERVER ==================================================== -When Orangefs is upstream, "--with-kernel" shouldn't be needed, but -until then the path to where the kernel with the Orangefs kernel client -patch was built is needed to ensure that pvfs2-client-core (the bridge -between kernel space and user space) will build properly. You can omit ---prefix if you don't care that things are sprinkled around in -/usr/local. +You can omit --prefix if you don't care that things are sprinkled around in +/usr/local. As of version 2.9.6, Orangefs uses Berkeley DB by default, we +will probably be changing the default to lmdb soon. -./configure --prefix=/opt/ofs --with-kernel=/path/to/orangefs/kernel +./configure --prefix=/opt/ofs --with-db-backend=lmdb make @@ -82,9 +79,6 @@ prove things are working with: /opt/osf/bin/pvfs2-ls /mymountpoint -You might not want to enforce selinux, it doesn't seem to matter by -linux 3.11... - If stuff seems to be working, turn on the client core: /opt/osf/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 36f528a7fdd6..8caa60734647 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is beneath or above the path of another overlay lower layer path. Using an upper layer path and/or a workdir path that are already used by -another overlay mount is not allowed and will fail with EBUSY. Using +another overlay mount is not allowed and may fail with EBUSY. Using partially overlapping paths is not allowed but will not fail with EBUSY. +If files are accessed from two overlayfs mounts which share or overlap the +upper layer and/or workdir path the behavior of the overlay is undefined, +though it will not result in a crash or deadlock. Mounting an overlay using an upper layer path, where the upper layer path was previously used by another mounted overlay in combination with a diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 5fb17f49f7a2..93e0a2404532 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -228,7 +228,7 @@ anything from oops to silent memory corruption. --- [mandatory] - FS_NOMOUNT is gone. If you use it - just set MS_NOUSER in flags + FS_NOMOUNT is gone. If you use it - just set SB_NOUSER in flags (see rootfs for one kind of solution and bdev/socket/pipe for another). --- diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 24da7b32c489..9a3658cc399e 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -366,7 +366,8 @@ struct driver_attribute { Declaring: -DRIVER_ATTR(_name, _mode, _show, _store) +DRIVER_ATTR_RO(_name) +DRIVER_ATTR_RW(_name) Creation/Removal: diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 405a3df759b3..5fd325df59e2 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -988,7 +988,7 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *, - unsigned int); + unsigned int, unsigned int); }; d_revalidate: called when the VFS needs to revalidate a dentry. This diff --git a/Documentation/gpio/drivers-on-gpio.txt b/Documentation/gpio/drivers-on-gpio.txt index 306513251713..9a78d385b92e 100644 --- a/Documentation/gpio/drivers-on-gpio.txt +++ b/Documentation/gpio/drivers-on-gpio.txt @@ -84,6 +84,11 @@ hardware descriptions such as device tree or ACPI: NAND flash MTD subsystem and provides chip access and partition parsing like any other NAND driving hardware. +- ps2-gpio: drivers/input/serio/ps2-gpio.c is used to drive a PS/2 (IBM) serio + bus, data and clock line, by bit banging two GPIO lines. It will appear as + any other serio bus to the system and makes it possible to connect drivers + for e.g. keyboards and other PS/2 protocol based devices. + Apart from this there are special GPIO drivers in subsystems like MMC/SD to read card detect and write protect GPIO lines, and in the TTY serial subsystem to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 0500193434cb..d47702456926 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 @@ -36,6 +36,7 @@ Supported adapters: * Intel Gemini Lake (SOC) * Intel Cannon Lake-H (PCH) * Intel Cannon Lake-LP (PCH) + * Intel Cedar Fork (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology index 1a014fede0b7..f74d78b53d4d 100644 --- a/Documentation/i2c/i2c-topology +++ b/Documentation/i2c/i2c-topology @@ -42,6 +42,10 @@ i2c-arb-gpio-challenge Parent-locked i2c-mux-gpio Normally parent-locked, mux-locked iff all involved gpio pins are controlled by the same i2c root adapter that they mux. +i2c-mux-gpmux Normally parent-locked, mux-locked iff + specified in device-tree. +i2c-mux-ltc4306 Mux-locked +i2c-mux-mlxcpld Parent-locked i2c-mux-pca9541 Parent-locked i2c-mux-pca954x Parent-locked i2c-mux-pinctrl Normally parent-locked, mux-locked iff @@ -50,9 +54,11 @@ i2c-mux-pinctrl Normally parent-locked, mux-locked iff i2c-mux-reg Parent-locked In drivers/iio/ +gyro/mpu3050 Mux-locked imu/inv_mpu6050/ Mux-locked In drivers/media/ +dvb-frontends/lgdt3306a Mux-locked dvb-frontends/m88ds3103 Parent-locked dvb-frontends/rtl2830 Parent-locked dvb-frontends/rtl2832 Mux-locked diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt index ba2e7d254842..00b6dfed573c 100644 --- a/Documentation/laptops/thinkpad-acpi.txt +++ b/Documentation/laptops/thinkpad-acpi.txt @@ -121,8 +121,9 @@ space, for 2.6.23+ this is /sys/devices/platform/thinkpad_acpi/. Sysfs device attributes for the sensors and fan are on the thinkpad_hwmon device's sysfs attribute space, but you should locate it looking for a hwmon device with the name attribute of "thinkpad", or -better yet, through libsensors. - +better yet, through libsensors. For 4.14+ sysfs attributes were moved to the +hwmon device (/sys/bus/platform/devices/thinkpad_hwmon/hwmon/hwmon? or +/sys/class/hwmon/hwmon?). Driver version -------------- @@ -1478,3 +1479,7 @@ Sysfs interface changelog: 0x020700: Support for mute-only mixers. Volume control in read-only mode by default. Marker for ALSA mixer support. + +0x030000: Thermal and fan sysfs attributes were moved to the hwmon + device instead of being attached to the backing platform + device. diff --git a/Documentation/media/ca.h.rst.exceptions b/Documentation/media/ca.h.rst.exceptions index d7c9fed8c004..553559cc6ad7 100644 --- a/Documentation/media/ca.h.rst.exceptions +++ b/Documentation/media/ca.h.rst.exceptions @@ -16,7 +16,6 @@ replace define CA_NDS :c:type:`ca_descr_info` replace define CA_DSS :c:type:`ca_descr_info` # some typedefs should point to struct/enums -replace typedef ca_pid_t :c:type:`ca_pid` replace typedef ca_slot_info_t :c:type:`ca_slot_info` replace typedef ca_descr_info_t :c:type:`ca_descr_info` replace typedef ca_caps_t :c:type:`ca_caps` diff --git a/Documentation/media/cec-drivers/index.rst b/Documentation/media/cec-drivers/index.rst new file mode 100644 index 000000000000..7ef204823422 --- /dev/null +++ b/Documentation/media/cec-drivers/index.rst @@ -0,0 +1,34 @@ +.. -*- coding: utf-8; mode: rst -*- + +.. include:: + +.. _cec-drivers: + +################################# +CEC driver-specific documentation +################################# + +**Copyright** |copy| 2017 : LinuxTV Developers + +This documentation is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the Free +Software Foundation version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +more details. + +For more details see the file COPYING in the source distribution of Linux. + +.. only:: html + + .. class:: toc-title + + Table of Contents + +.. toctree:: + :maxdepth: 5 + :numbered: + + pulse8-cec diff --git a/Documentation/media/cec-drivers/pulse8-cec.rst b/Documentation/media/cec-drivers/pulse8-cec.rst new file mode 100644 index 000000000000..99551c6a9bc5 --- /dev/null +++ b/Documentation/media/cec-drivers/pulse8-cec.rst @@ -0,0 +1,11 @@ +Pulse-Eight CEC Adapter driver +============================== + +The pulse8-cec driver implements the following module option: + +``persistent_config`` +--------------------- + +By default this is off, but when set to 1 the driver will store the current +settings to the device's internal eeprom and restore it the next time the +device is connected to the USB port. diff --git a/Documentation/media/dmx.h.rst.exceptions b/Documentation/media/dmx.h.rst.exceptions index 2fdb458564ba..629db384104a 100644 --- a/Documentation/media/dmx.h.rst.exceptions +++ b/Documentation/media/dmx.h.rst.exceptions @@ -40,27 +40,17 @@ replace enum dmx_input :c:type:`dmx_input` replace symbol DMX_IN_FRONTEND :c:type:`dmx_input` replace symbol DMX_IN_DVR :c:type:`dmx_input` -# dmx_source_t symbols -replace enum dmx_source :c:type:`dmx_source` -replace symbol DMX_SOURCE_FRONT0 :c:type:`dmx_source` -replace symbol DMX_SOURCE_FRONT1 :c:type:`dmx_source` -replace symbol DMX_SOURCE_FRONT2 :c:type:`dmx_source` -replace symbol DMX_SOURCE_FRONT3 :c:type:`dmx_source` -replace symbol DMX_SOURCE_DVR0 :c:type:`dmx_source` -replace symbol DMX_SOURCE_DVR1 :c:type:`dmx_source` -replace symbol DMX_SOURCE_DVR2 :c:type:`dmx_source` -replace symbol DMX_SOURCE_DVR3 :c:type:`dmx_source` - - # Flags for struct dmx_sct_filter_params replace define DMX_CHECK_CRC :c:type:`dmx_sct_filter_params` replace define DMX_ONESHOT :c:type:`dmx_sct_filter_params` replace define DMX_IMMEDIATE_START :c:type:`dmx_sct_filter_params` -replace define DMX_KERNEL_CLIENT :c:type:`dmx_sct_filter_params` # some typedefs should point to struct/enums -replace typedef dmx_caps_t :c:type:`dmx_caps` replace typedef dmx_filter_t :c:type:`dmx_filter` replace typedef dmx_pes_type_t :c:type:`dmx_pes_type` replace typedef dmx_input_t :c:type:`dmx_input` -replace typedef dmx_source_t :c:type:`dmx_source` + +ignore symbol DMX_OUT_DECODER +ignore symbol DMX_OUT_TAP +ignore symbol DMX_OUT_TS_TAP +ignore symbol DMX_OUT_TSDEMUX_TAP diff --git a/Documentation/media/dvb-drivers/ci.rst b/Documentation/media/dvb-drivers/ci.rst index 69b07e9d1816..87f3748c49b9 100644 --- a/Documentation/media/dvb-drivers/ci.rst +++ b/Documentation/media/dvb-drivers/ci.rst @@ -143,7 +143,6 @@ All these ioctls are also valid for the High level CI interface #define CA_GET_MSG _IOR('o', 132, ca_msg_t) #define CA_SEND_MSG _IOW('o', 133, ca_msg_t) #define CA_SET_DESCR _IOW('o', 134, ca_descr_t) -#define CA_SET_PID _IOW('o', 135, ca_pid_t) On querying the device, the device yields information thus: diff --git a/Documentation/media/dvb-drivers/index.rst b/Documentation/media/dvb-drivers/index.rst index ea0da6d63299..376141143ae9 100644 --- a/Documentation/media/dvb-drivers/index.rst +++ b/Documentation/media/dvb-drivers/index.rst @@ -19,7 +19,9 @@ more details. For more details see the file COPYING in the source distribution of Linux. -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/frontend.h.rst.exceptions b/Documentation/media/frontend.h.rst.exceptions index 7656770f1936..f7c4df620a52 100644 --- a/Documentation/media/frontend.h.rst.exceptions +++ b/Documentation/media/frontend.h.rst.exceptions @@ -25,19 +25,9 @@ ignore define DTV_MAX_COMMAND ignore define MAX_DTV_STATS ignore define DTV_IOCTL_MAX_MSGS -# Stats enum is documented altogether -replace enum fecap_scale_params :ref:`frontend-stat-properties` -replace symbol FE_SCALE_COUNTER frontend-stat-properties -replace symbol FE_SCALE_DECIBEL frontend-stat-properties -replace symbol FE_SCALE_NOT_AVAILABLE frontend-stat-properties -replace symbol FE_SCALE_RELATIVE frontend-stat-properties - # the same reference is used for both get and set ioctls replace ioctl FE_SET_PROPERTY :c:type:`FE_GET_PROPERTY` -# Ignore struct used only internally at Kernel -ignore struct dtv_cmds_h - # Typedefs that use the enum reference replace typedef fe_sec_voltage_t :c:type:`fe_sec_voltage` @@ -45,3 +35,178 @@ replace typedef fe_sec_voltage_t :c:type:`fe_sec_voltage` replace define FE_TUNE_MODE_ONESHOT :c:func:`FE_SET_FRONTEND_TUNE_MODE` replace define LNA_AUTO dtv-lna replace define NO_STREAM_ID_FILTER dtv-stream-id + +# Those enums are defined at the frontend.h header, and not externally + +ignore symbol FE_IS_STUPID +ignore symbol FE_CAN_INVERSION_AUTO +ignore symbol FE_CAN_FEC_1_2 +ignore symbol FE_CAN_FEC_2_3 +ignore symbol FE_CAN_FEC_3_4 +ignore symbol FE_CAN_FEC_4_5 +ignore symbol FE_CAN_FEC_5_6 +ignore symbol FE_CAN_FEC_6_7 +ignore symbol FE_CAN_FEC_7_8 +ignore symbol FE_CAN_FEC_8_9 +ignore symbol FE_CAN_FEC_AUTO +ignore symbol FE_CAN_QPSK +ignore symbol FE_CAN_QAM_16 +ignore symbol FE_CAN_QAM_32 +ignore symbol FE_CAN_QAM_64 +ignore symbol FE_CAN_QAM_128 +ignore symbol FE_CAN_QAM_256 +ignore symbol FE_CAN_QAM_AUTO +ignore symbol FE_CAN_TRANSMISSION_MODE_AUTO +ignore symbol FE_CAN_BANDWIDTH_AUTO +ignore symbol FE_CAN_GUARD_INTERVAL_AUTO +ignore symbol FE_CAN_HIERARCHY_AUTO +ignore symbol FE_CAN_8VSB +ignore symbol FE_CAN_16VSB +ignore symbol FE_HAS_EXTENDED_CAPS +ignore symbol FE_CAN_MULTISTREAM +ignore symbol FE_CAN_TURBO_FEC +ignore symbol FE_CAN_2G_MODULATION +ignore symbol FE_NEEDS_BENDING +ignore symbol FE_CAN_RECOVER +ignore symbol FE_CAN_MUTE_TS + +ignore symbol QPSK +ignore symbol QAM_16 +ignore symbol QAM_32 +ignore symbol QAM_64 +ignore symbol QAM_128 +ignore symbol QAM_256 +ignore symbol QAM_AUTO +ignore symbol VSB_8 +ignore symbol VSB_16 +ignore symbol PSK_8 +ignore symbol APSK_16 +ignore symbol APSK_32 +ignore symbol DQPSK +ignore symbol QAM_4_NR + +ignore symbol SEC_VOLTAGE_13 +ignore symbol SEC_VOLTAGE_18 +ignore symbol SEC_VOLTAGE_OFF + +ignore symbol SEC_TONE_ON +ignore symbol SEC_TONE_OFF + +ignore symbol SEC_MINI_A +ignore symbol SEC_MINI_B + +ignore symbol FE_NONE +ignore symbol FE_HAS_SIGNAL +ignore symbol FE_HAS_CARRIER +ignore symbol FE_HAS_VITERBI +ignore symbol FE_HAS_SYNC +ignore symbol FE_HAS_LOCK +ignore symbol FE_REINIT +ignore symbol FE_TIMEDOUT + +ignore symbol FEC_NONE +ignore symbol FEC_1_2 +ignore symbol FEC_2_3 +ignore symbol FEC_3_4 +ignore symbol FEC_4_5 +ignore symbol FEC_5_6 +ignore symbol FEC_6_7 +ignore symbol FEC_7_8 +ignore symbol FEC_8_9 +ignore symbol FEC_AUTO +ignore symbol FEC_3_5 +ignore symbol FEC_9_10 +ignore symbol FEC_2_5 + +ignore symbol TRANSMISSION_MODE_AUTO +ignore symbol TRANSMISSION_MODE_1K +ignore symbol TRANSMISSION_MODE_2K +ignore symbol TRANSMISSION_MODE_8K +ignore symbol TRANSMISSION_MODE_4K +ignore symbol TRANSMISSION_MODE_16K +ignore symbol TRANSMISSION_MODE_32K +ignore symbol TRANSMISSION_MODE_C1 +ignore symbol TRANSMISSION_MODE_C3780 +ignore symbol TRANSMISSION_MODE_2K +ignore symbol TRANSMISSION_MODE_8K + +ignore symbol GUARD_INTERVAL_AUTO +ignore symbol GUARD_INTERVAL_1_128 +ignore symbol GUARD_INTERVAL_1_32 +ignore symbol GUARD_INTERVAL_1_16 +ignore symbol GUARD_INTERVAL_1_8 +ignore symbol GUARD_INTERVAL_1_4 +ignore symbol GUARD_INTERVAL_19_128 +ignore symbol GUARD_INTERVAL_19_256 +ignore symbol GUARD_INTERVAL_PN420 +ignore symbol GUARD_INTERVAL_PN595 +ignore symbol GUARD_INTERVAL_PN945 + +ignore symbol HIERARCHY_NONE +ignore symbol HIERARCHY_AUTO +ignore symbol HIERARCHY_1 +ignore symbol HIERARCHY_2 +ignore symbol HIERARCHY_4 + +ignore symbol INTERLEAVING_NONE +ignore symbol INTERLEAVING_AUTO +ignore symbol INTERLEAVING_240 +ignore symbol INTERLEAVING_720 + +ignore symbol PILOT_ON +ignore symbol PILOT_OFF +ignore symbol PILOT_AUTO + +ignore symbol ROLLOFF_35 +ignore symbol ROLLOFF_20 +ignore symbol ROLLOFF_25 +ignore symbol ROLLOFF_AUTO + +ignore symbol INVERSION_ON +ignore symbol INVERSION_OFF +ignore symbol INVERSION_AUTO + +ignore symbol SYS_UNDEFINED +ignore symbol SYS_DVBC_ANNEX_A +ignore symbol SYS_DVBC_ANNEX_B +ignore symbol SYS_DVBC_ANNEX_C +ignore symbol SYS_ISDBC +ignore symbol SYS_DVBT +ignore symbol SYS_DVBT2 +ignore symbol SYS_ISDBT +ignore symbol SYS_ATSC +ignore symbol SYS_ATSCMH +ignore symbol SYS_DTMB +ignore symbol SYS_DVBS +ignore symbol SYS_DVBS2 +ignore symbol SYS_TURBO +ignore symbol SYS_ISDBS +ignore symbol SYS_DAB +ignore symbol SYS_DSS +ignore symbol SYS_CMMB +ignore symbol SYS_DVBH + +ignore symbol ATSCMH_SCCC_BLK_SEP +ignore symbol ATSCMH_SCCC_BLK_COMB +ignore symbol ATSCMH_SCCC_BLK_RES + +ignore symbol ATSCMH_SCCC_CODE_HLF +ignore symbol ATSCMH_SCCC_CODE_QTR +ignore symbol ATSCMH_SCCC_CODE_RES + +ignore symbol ATSCMH_RSFRAME_ENS_PRI +ignore symbol ATSCMH_RSFRAME_ENS_SEC + +ignore symbol ATSCMH_RSFRAME_PRI_ONLY +ignore symbol ATSCMH_RSFRAME_PRI_SEC +ignore symbol ATSCMH_RSFRAME_RES + +ignore symbol ATSCMH_RSCODE_211_187 +ignore symbol ATSCMH_RSCODE_223_187 +ignore symbol ATSCMH_RSCODE_235_187 +ignore symbol ATSCMH_RSCODE_RES + +ignore symbol FE_SCALE_NOT_AVAILABLE +ignore symbol FE_SCALE_DECIBEL +ignore symbol FE_SCALE_RELATIVE +ignore symbol FE_SCALE_COUNTER diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst index 7f8f0af620ce..1cf5316c8ff8 100644 --- a/Documentation/media/index.rst +++ b/Documentation/media/index.rst @@ -1,7 +1,11 @@ Linux Media Subsystem Documentation =================================== -Contents: +.. only:: html + + .. class:: toc-title + + Table of Contents .. toctree:: :maxdepth: 2 @@ -10,6 +14,7 @@ Contents: media_kapi dvb-drivers/index v4l-drivers/index + cec-drivers/index .. only:: subproject diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst index 8a65c69ed071..28866259998c 100644 --- a/Documentation/media/kapi/cec-core.rst +++ b/Documentation/media/kapi/cec-core.rst @@ -107,6 +107,7 @@ your driver: int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg); void (*adap_status)(struct cec_adapter *adap, struct seq_file *file); + void (*adap_free)(struct cec_adapter *adap); /* High-level callbacks */ ... @@ -184,6 +185,14 @@ To log the current CEC hardware status: This optional callback can be used to show the status of the CEC hardware. The status is available through debugfs: cat /sys/kernel/debug/cec/cecX/status +To free any resources when the adapter is deleted: + +.. c:function:: + void (*adap_free)(struct cec_adapter *adap); + +This optional callback can be used to free any resources that might have been +allocated by the driver. It's called from cec_delete_adapter. + Your adapter driver will also have to react to events (typically interrupt driven) by calling into the framework in the following situations: @@ -336,3 +345,34 @@ log_addrs->num_log_addrs set to 0. The block argument is ignored when unconfiguring. This function will just return if the physical address is invalid. Once the physical address becomes valid, then the framework will attempt to claim these logical addresses. + +CEC Pin framework +----------------- + +Most CEC hardware operates on full CEC messages where the software provides +the message and the hardware handles the low-level CEC protocol. But some +hardware only drives the CEC pin and software has to handle the low-level +CEC protocol. The CEC pin framework was created to handle such devices. + +Note that due to the close-to-realtime requirements it can never be guaranteed +to work 100%. This framework uses highres timers internally, but if a +timer goes off too late by more than 300 microseconds wrong results can +occur. In reality it appears to be fairly reliable. + +One advantage of this low-level implementation is that it can be used as +a cheap CEC analyser, especially if interrupts can be used to detect +CEC pin transitions from low to high or vice versa. + +.. kernel-doc:: include/media/cec-pin.h + +CEC Notifier framework +---------------------- + +Most drm HDMI implementations have an integrated CEC implementation and no +notifier support is needed. But some have independent CEC implementations +that have their own driver. This could be an IP block for an SoC or a +completely separate chip that deals with the CEC pin. For those cases a +drm driver can install a notifier and use the notifier to inform the +CEC driver about changes in the physical address. + +.. kernel-doc:: include/media/cec-notifier.h diff --git a/Documentation/media/kapi/csi2.rst b/Documentation/media/kapi/csi2.rst index e33fcb967922..0560100efca2 100644 --- a/Documentation/media/kapi/csi2.rst +++ b/Documentation/media/kapi/csi2.rst @@ -51,6 +51,16 @@ not active. Some transmitters do this automatically but some have to be explicitly programmed to do so, and some are unable to do so altogether due to hardware constraints. +Stopping the transmitter +^^^^^^^^^^^^^^^^^^^^^^^^ + +A transmitter stops sending the stream of images as a result of +calling the ``.s_stream()`` callback. Some transmitters may stop the +stream at a frame boundary whereas others stop immediately, +effectively leaving the current frame unfinished. The receiver driver +should not make assumptions either way, but function properly in both +cases. + Receiver drivers ---------------- diff --git a/Documentation/media/kapi/v4l2-event.rst b/Documentation/media/kapi/v4l2-event.rst index 9a5e31546ae3..9938d21ef4d1 100644 --- a/Documentation/media/kapi/v4l2-event.rst +++ b/Documentation/media/kapi/v4l2-event.rst @@ -67,6 +67,8 @@ type). The ops argument allows the driver to specify a number of callbacks: +.. tabularcolumns:: |p{1.5cm}|p{16.0cm}| + ======== ============================================================== Callback Description ======== ============================================================== diff --git a/Documentation/media/media_kapi.rst b/Documentation/media/media_kapi.rst index bc0638956a43..83da736fad72 100644 --- a/Documentation/media/media_kapi.rst +++ b/Documentation/media/media_kapi.rst @@ -20,7 +20,9 @@ more details. For more details see the file COPYING in the source distribution of Linux. -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/media_uapi.rst b/Documentation/media/media_uapi.rst index fd8ebe002cd2..28eb35a1f965 100644 --- a/Documentation/media/media_uapi.rst +++ b/Documentation/media/media_uapi.rst @@ -14,7 +14,9 @@ any later version published by the Free Software Foundation. A copy of the license is included in the chapter entitled "GNU Free Documentation License". -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/uapi/cec/cec-api.rst b/Documentation/media/uapi/cec/cec-api.rst index bb018709970c..b68ca9c1d2e0 100644 --- a/Documentation/media/uapi/cec/cec-api.rst +++ b/Documentation/media/uapi/cec/cec-api.rst @@ -10,7 +10,10 @@ Part V - Consumer Electronics Control API This part describes the CEC: Consumer Electronics Control -.. class:: toc-title + +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst index 895d9c2d1c04..334358dfa72e 100644 --- a/Documentation/media/uapi/cec/cec-func-close.rst +++ b/Documentation/media/uapi/cec/cec-func-close.rst @@ -40,7 +40,7 @@ freed. The device configuration remain unchanged. Return Value ============ -:c:func:`close()` returns 0 on success. On error, -1 is returned, and +:c:func:`close() ` returns 0 on success. On error, -1 is returned, and ``errno`` is set appropriately. Possible error codes are: ``EBADF`` diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst index 22fb6304a2df..e2b6260b0086 100644 --- a/Documentation/media/uapi/cec/cec-func-ioctl.rst +++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst @@ -39,7 +39,7 @@ Arguments Description =========== -The :c:func:`ioctl()` function manipulates cec device parameters. The +The :c:func:`ioctl() ` function manipulates cec device parameters. The argument ``fd`` must be an open file descriptor. The ioctl ``request`` code specifies the cec function to be called. It diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst index 18dfb62f2efe..5d6663a649bd 100644 --- a/Documentation/media/uapi/cec/cec-func-open.rst +++ b/Documentation/media/uapi/cec/cec-func-open.rst @@ -46,7 +46,7 @@ Arguments Description =========== -To open a cec device applications call :c:func:`open()` with the +To open a cec device applications call :c:func:`open() ` with the desired device name. The function has no side effects; the device configuration remain unchanged. @@ -58,7 +58,7 @@ EBADF. Return Value ============ -:c:func:`open()` returns the new file descriptor on success. On error, +:c:func:`open() ` returns the new file descriptor on success. On error, -1 is returned, and ``errno`` is set appropriately. Possible error codes include: diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst index fa0abd8fb160..d49f1ee0742d 100644 --- a/Documentation/media/uapi/cec/cec-func-poll.rst +++ b/Documentation/media/uapi/cec/cec-func-poll.rst @@ -39,10 +39,10 @@ Arguments Description =========== -With the :c:func:`poll()` function applications can wait for CEC +With the :c:func:`poll() ` function applications can wait for CEC events. -On success :c:func:`poll()` returns the number of file descriptors +On success :c:func:`poll() ` returns the number of file descriptors that have been selected (that is, file descriptors for which the ``revents`` field of the respective struct :c:type:`pollfd` is non-zero). CEC devices set the ``POLLIN`` and ``POLLRDNORM`` flags in @@ -53,13 +53,13 @@ then the ``POLLPRI`` flag is set. When the function times out it returns a value of zero, on failure it returns -1 and the ``errno`` variable is set appropriately. -For more details see the :c:func:`poll()` manual page. +For more details see the :c:func:`poll() ` manual page. Return Value ============ -On success, :c:func:`poll()` returns the number structures which have +On success, :c:func:`poll() ` returns the number structures which have non-zero ``revents`` fields, or zero if the call timed out. On error -1 is returned, and the ``errno`` variable is set appropriately: diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst index 6d7bf7bef3eb..6c1f6efb822e 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst @@ -21,7 +21,7 @@ Arguments ========= ``fd`` - File descriptor returned by :ref:`open() `. + File descriptor returned by :c:func:`open() `. ``argp`` @@ -121,6 +121,13 @@ returns the information to the application. The ioctl never fails. high. This makes it impossible to use CEC to wake up displays that set the HPD pin low when in standby mode, but keep the CEC bus alive. + * .. _`CEC-CAP-MONITOR-PIN`: + + - ``CEC_CAP_MONITOR_PIN`` + - 0x00000080 + - The CEC hardware can monitor CEC pin changes from low to high voltage + and vice versa. When in pin monitoring mode the application will + receive ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events. diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst index fcf863ab6f43..84f431a022ad 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst @@ -48,7 +48,9 @@ can only be called by a file descriptor in initiator mode (see :ref:`CEC_S_MODE` the ``EBUSY`` error code will be returned. To clear existing logical addresses set ``num_log_addrs`` to 0. All other fields -will be ignored in that case. The adapter will go to the unconfigured state. +will be ignored in that case. The adapter will go to the unconfigured state and the +``cec_version``, ``vendor_id`` and ``osd_name`` fields are all reset to their default +values (CEC version 2.0, no vendor ID and an empty OSD name). If the physical address is valid (see :ref:`ioctl CEC_ADAP_S_PHYS_ADDR `), then this ioctl will block until all requested logical @@ -63,7 +65,7 @@ logical address types are already defined will return with error ``EBUSY``. .. c:type:: cec_log_addrs -.. tabularcolumns:: |p{1.0cm}|p{7.5cm}|p{8.0cm}| +.. tabularcolumns:: |p{1.0cm}|p{8.0cm}|p{7.5cm}| .. cssclass:: longtable @@ -146,6 +148,9 @@ logical address types are already defined will return with error ``EBUSY``. give the CEC framework more information about the device type, even though the framework won't use it directly in the CEC message. + +.. tabularcolumns:: |p{7.8cm}|p{1.0cm}|p{8.7cm}| + .. _cec-log-addrs-flags: .. flat-table:: Flags for struct cec_log_addrs @@ -173,7 +178,7 @@ logical address types are already defined will return with error ``EBUSY``. to avoid trivial snooping of the keystrokes. * .. _`CEC-LOG-ADDRS-FL-CDC-ONLY`: - - `CEC_LOG_ADDRS_FL_CDC_ONLY` + - ``CEC_LOG_ADDRS_FL_CDC_ONLY`` - 4 - If this flag is set, then the device is CDC-Only. CDC-Only CEC devices are CEC devices that can only handle CDC messages. @@ -181,7 +186,7 @@ logical address types are already defined will return with error ``EBUSY``. All other messages are ignored. -.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}| +.. tabularcolumns:: |p{7.8cm}|p{1.0cm}|p{8.7cm}| .. _cec-versions: diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst index 4d3570c2e0b3..a5c821809cc6 100644 --- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst +++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst @@ -22,7 +22,7 @@ Arguments ========= ``fd`` - File descriptor returned by :ref:`open() `. + File descriptor returned by :c:func:`open() `. ``argp`` @@ -87,7 +87,7 @@ it is guaranteed that the state did change in between the two events. this is more than enough. -.. tabularcolumns:: |p{1.0cm}|p{4.2cm}|p{2.5cm}|p{8.8cm}| +.. tabularcolumns:: |p{1.0cm}|p{4.4cm}|p{2.5cm}|p{9.6cm}| .. c:type:: cec_event @@ -98,10 +98,11 @@ it is guaranteed that the state did change in between the two events. * - __u64 - ``ts`` - - :cspan:`1` Timestamp of the event in ns. + - :cspan:`1`\ Timestamp of the event in ns. - The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access - the same clock from userspace use :c:func:`clock_gettime`. + The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. + + To access the same clock from userspace use :c:func:`clock_gettime`. * - __u32 - ``event`` - :cspan:`1` The CEC event type, see :ref:`cec-events`. @@ -146,6 +147,20 @@ it is guaranteed that the state did change in between the two events. - 2 - Generated if one or more CEC messages were lost because the application didn't dequeue CEC messages fast enough. + * .. _`CEC-EVENT-PIN-CEC-LOW`: + + - ``CEC_EVENT_PIN_CEC_LOW`` + - 3 + - Generated if the CEC pin goes from a high voltage to a low voltage. + Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN`` + capability set. + * .. _`CEC-EVENT-PIN-CEC-HIGH`: + + - ``CEC_EVENT_PIN_CEC_HIGH`` + - 4 + - Generated if the CEC pin goes from a low voltage to a high voltage. + Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN`` + capability set. .. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}| @@ -165,6 +180,12 @@ it is guaranteed that the state did change in between the two events. opened. See the table above for which events do this. This allows applications to learn the initial state of the CEC adapter at open() time. + * .. _`CEC-EVENT-FL-DROPPED-EVENTS`: + + - ``CEC_EVENT_FL_DROPPED_EVENTS`` + - 2 + - Set if one or more events of the given event type have been dropped. + This is an indication that the application cannot keep up. diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst index 664f0d47bbcd..508e2e325683 100644 --- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst +++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst @@ -108,6 +108,8 @@ Available follower modes are: .. _cec-mode-follower_e: +.. cssclass:: longtable + .. flat-table:: Follower Modes :header-rows: 0 :stub-columns: 0 @@ -149,13 +151,28 @@ Available follower modes are: code. You cannot become a follower if :ref:`CEC_CAP_TRANSMIT ` is not set or if :ref:`CEC_MODE_NO_INITIATOR ` was specified, the ``EINVAL`` error code is returned in that case. + * .. _`CEC-MODE-MONITOR-PIN`: + + - ``CEC_MODE_MONITOR_PIN`` + - 0xd0 + - Put the file descriptor into pin monitoring mode. Can only be used in + combination with :ref:`CEC_MODE_NO_INITIATOR `, + otherwise the ``EINVAL`` error code will be returned. + This mode requires that the :ref:`CEC_CAP_MONITOR_PIN ` + capability is set, otherwise the ``EINVAL`` error code is returned. + While in pin monitoring mode this file descriptor can receive the + ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events to see the + low-level CEC pin transitions. This is very useful for debugging. + This mode is only allowed if the process has the ``CAP_NET_ADMIN`` + capability. If that is not set, then the ``EPERM`` error code is returned. * .. _`CEC-MODE-MONITOR`: - ``CEC_MODE_MONITOR`` - 0xe0 - Put the file descriptor into monitor mode. Can only be used in - combination with :ref:`CEC_MODE_NO_INITIATOR `, otherwise EINVAL error - code will be returned. In monitor mode all messages this CEC + combination with :ref:`CEC_MODE_NO_INITIATOR `,i + otherwise the ``EINVAL`` error code will be returned. + In monitor mode all messages this CEC device transmits and all messages it receives (both broadcast messages and directed messages for one its logical addresses) will be reported. This is very useful for debugging. This is only @@ -191,55 +208,68 @@ Core message processing details: * .. _`CEC-MSG-GET-CEC-VERSION`: - ``CEC_MSG_GET_CEC_VERSION`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will return the CEC version that was - set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `. + - The core will return the CEC version that was set with + :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `, + except when in passthrough mode. In passthrough mode the core + does nothing and this message has to be handled by a follower + instead. * .. _`CEC-MSG-GIVE-DEVICE-VENDOR-ID`: - ``CEC_MSG_GIVE_DEVICE_VENDOR_ID`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will return the vendor ID that was - set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `. + - The core will return the vendor ID that was set with + :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `, + except when in passthrough mode. In passthrough mode the core + does nothing and this message has to be handled by a follower + instead. * .. _`CEC-MSG-ABORT`: - ``CEC_MSG_ABORT`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will return a feature refused - message as per the specification. + - The core will return a Feature Abort message with reason + 'Feature Refused' as per the specification, except when in + passthrough mode. In passthrough mode the core does nothing + and this message has to be handled by a follower instead. * .. _`CEC-MSG-GIVE-PHYSICAL-ADDR`: - ``CEC_MSG_GIVE_PHYSICAL_ADDR`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will report the current physical - address. + - The core will report the current physical address, except when + in passthrough mode. In passthrough mode the core does nothing + and this message has to be handled by a follower instead. * .. _`CEC-MSG-GIVE-OSD-NAME`: - ``CEC_MSG_GIVE_OSD_NAME`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will report the current OSD name as - was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `. + - The core will report the current OSD name that was set with + :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `, + except when in passthrough mode. In passthrough mode the core + does nothing and this message has to be handled by a follower + instead. * .. _`CEC-MSG-GIVE-FEATURES`: - ``CEC_MSG_GIVE_FEATURES`` - - When in passthrough mode this message has to be handled by - userspace, otherwise the core will report the current features as - was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS ` - or the message is ignored if the CEC version was older than 2.0. + - The core will do nothing if the CEC version is older than 2.0, + otherwise it will report the current features that were set with + :ref:`ioctl CEC_ADAP_S_LOG_ADDRS `, + except when in passthrough mode. In passthrough mode the core + does nothing (for any CEC version) and this message has to be handled + by a follower instead. * .. _`CEC-MSG-USER-CONTROL-PRESSED`: - ``CEC_MSG_USER_CONTROL_PRESSED`` - - If :ref:`CEC_CAP_RC ` is set, then generate a remote control key - press. This message is always passed on to userspace. + - If :ref:`CEC_CAP_RC ` is set and if + :ref:`CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU ` + is set, then generate a remote control key + press. This message is always passed on to the follower(s). * .. _`CEC-MSG-USER-CONTROL-RELEASED`: - ``CEC_MSG_USER_CONTROL_RELEASED`` - - If :ref:`CEC_CAP_RC ` is set, then generate a remote control key - release. This message is always passed on to userspace. + - If :ref:`CEC_CAP_RC ` is set and if + :ref:`CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU ` + is set, then generate a remote control key + release. This message is always passed on to the follower(s). * .. _`CEC-MSG-REPORT-PHYSICAL-ADDR`: - ``CEC_MSG_REPORT_PHYSICAL_ADDR`` - The CEC framework will make note of the reported physical address - and then just pass the message on to userspace. + and then just pass the message on to the follower(s). diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst index 267044f7ac30..0f397c535a4c 100644 --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst @@ -195,6 +195,8 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). valid if the :ref:`CEC_TX_STATUS_ERROR ` status bit is set. +.. tabularcolumns:: |p{6.2cm}|p{1.0cm}|p{10.3cm}| + .. _cec-msg-flags: .. flat-table:: Flags for struct cec_msg diff --git a/Documentation/media/uapi/dvb/audio-channel-select.rst b/Documentation/media/uapi/dvb/audio-channel-select.rst index 2ceb4efebdf0..8cab3d7abff5 100644 --- a/Documentation/media/uapi/dvb/audio-channel-select.rst +++ b/Documentation/media/uapi/dvb/audio-channel-select.rst @@ -44,7 +44,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 ``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK`` control instead. This ioctl call asks the Audio Device to select the requested channel if diff --git a/Documentation/media/uapi/dvb/audio-fclose.rst b/Documentation/media/uapi/dvb/audio-fclose.rst index 4df24c8d74ed..58d351a3af4b 100644 --- a/Documentation/media/uapi/dvb/audio-fclose.rst +++ b/Documentation/media/uapi/dvb/audio-fclose.rst @@ -2,14 +2,14 @@ .. _audio_fclose: -================= -DVB audio close() -================= +======================== +Digital TV audio close() +======================== Name ---- -DVB audio close() +Digital TV audio close() .. attention:: This ioctl is deprecated diff --git a/Documentation/media/uapi/dvb/audio-fopen.rst b/Documentation/media/uapi/dvb/audio-fopen.rst index a802c2e0dc6a..4a174640bf11 100644 --- a/Documentation/media/uapi/dvb/audio-fopen.rst +++ b/Documentation/media/uapi/dvb/audio-fopen.rst @@ -2,14 +2,14 @@ .. _audio_fopen: -================ -DVB audio open() -================ +======================= +Digital TV audio open() +======================= Name ---- -DVB audio open() +Digital TV audio open() .. attention:: This ioctl is deprecated diff --git a/Documentation/media/uapi/dvb/audio-fwrite.rst b/Documentation/media/uapi/dvb/audio-fwrite.rst index 8882cad7d165..4980ae7953ef 100644 --- a/Documentation/media/uapi/dvb/audio-fwrite.rst +++ b/Documentation/media/uapi/dvb/audio-fwrite.rst @@ -2,14 +2,14 @@ .. _audio_fwrite: -================= -DVB audio write() -================= +========================= +Digital TV audio write() +========================= Name ---- -DVB audio write() +Digital TV audio write() .. attention:: This ioctl is deprecated diff --git a/Documentation/media/uapi/dvb/audio-set-av-sync.rst b/Documentation/media/uapi/dvb/audio-set-av-sync.rst index 0cef4917d2cf..cf621f3a3037 100644 --- a/Documentation/media/uapi/dvb/audio-set-av-sync.rst +++ b/Documentation/media/uapi/dvb/audio-set-av-sync.rst @@ -38,7 +38,7 @@ Arguments - boolean state - - Tells the DVB subsystem if A/V synchronization shall be ON or OFF. + - Tells the Digital TV subsystem if A/V synchronization shall be ON or OFF. TRUE: AV-sync ON diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst index b063c496c2eb..f0db1fbdb066 100644 --- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst +++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst @@ -38,7 +38,7 @@ Arguments - boolean mode - Enables or disables the decoding of the current Audio stream in - the DVB subsystem. + the Digital TV subsystem. TRUE: Bypass is disabled @@ -50,8 +50,8 @@ Description This ioctl call asks the Audio Device to bypass the Audio decoder and forward the stream without decoding. This mode shall be used if streams -that can’t be handled by the DVB system shall be decoded. Dolby -DigitalTM streams are automatically forwarded by the DVB subsystem if +that can’t be handled by the Digial TV system shall be decoded. Dolby +DigitalTM streams are automatically forwarded by the Digital TV subsystem if the hardware can handle it. diff --git a/Documentation/media/uapi/dvb/audio-set-mute.rst b/Documentation/media/uapi/dvb/audio-set-mute.rst index 897e7228f4d8..0af105a8ddcc 100644 --- a/Documentation/media/uapi/dvb/audio-set-mute.rst +++ b/Documentation/media/uapi/dvb/audio-set-mute.rst @@ -48,7 +48,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 :ref:`VIDIOC_DECODER_CMD` with the ``V4L2_DEC_CMD_START_MUTE_AUDIO`` flag instead. diff --git a/Documentation/media/uapi/dvb/audio.rst b/Documentation/media/uapi/dvb/audio.rst index 155622185ea4..e9f9e589c486 100644 --- a/Documentation/media/uapi/dvb/audio.rst +++ b/Documentation/media/uapi/dvb/audio.rst @@ -2,15 +2,16 @@ .. _dvb_audio: -################ -DVB Audio Device -################ -The DVB audio device controls the MPEG2 audio decoder of the DVB -hardware. It can be accessed through ``/dev/dvb/adapter?/audio?``. Data +####################### +Digital TV Audio Device +####################### + +The Digital TV audio device controls the MPEG2 audio decoder of the Digital +TV hardware. It can be accessed through ``/dev/dvb/adapter?/audio?``. Data types and and ioctl definitions can be accessed by including ``linux/dvb/audio.h`` in your application. -Please note that some DVB cards don’t have their own MPEG decoder, which +Please note that some Digital TV cards don’t have their own MPEG decoder, which results in the omission of the audio and video device. These ioctls were also used by V4L2 to control MPEG decoders implemented diff --git a/Documentation/media/uapi/dvb/audio_h.rst b/Documentation/media/uapi/dvb/audio_h.rst deleted file mode 100644 index e00c3010fdf9..000000000000 --- a/Documentation/media/uapi/dvb/audio_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _audio_h: - -********************* -DVB Audio Header File -********************* - -.. kernel-include:: $BUILDDIR/audio.h.rst diff --git a/Documentation/media/uapi/dvb/ca-fclose.rst b/Documentation/media/uapi/dvb/ca-fclose.rst index 5ecefa4abc3d..e84bbfcfa184 100644 --- a/Documentation/media/uapi/dvb/ca-fclose.rst +++ b/Documentation/media/uapi/dvb/ca-fclose.rst @@ -2,14 +2,14 @@ .. _ca_fclose: -============== -DVB CA close() -============== +===================== +Digital TV CA close() +===================== Name ---- -DVB CA close() +Digital TV CA close() Synopsis @@ -34,13 +34,10 @@ This system call closes a previously opened CA device. Return Value ------------ -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 +On success 0 is returned. +On error -1 is returned, and the ``errno`` variable is set +appropriately. - - .. row 1 - - - ``EBADF`` - - - fd is not a valid open file descriptor. +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-fopen.rst b/Documentation/media/uapi/dvb/ca-fopen.rst index 3d2819751446..056c71b53a70 100644 --- a/Documentation/media/uapi/dvb/ca-fopen.rst +++ b/Documentation/media/uapi/dvb/ca-fopen.rst @@ -2,14 +2,14 @@ .. _ca_fopen: -============= -DVB CA open() -============= +==================== +Digital TV CA open() +==================== Name ---- -DVB CA open() +Digital TV CA open() Synopsis @@ -23,25 +23,25 @@ Arguments --------- ``name`` - Name of specific DVB CA device. + Name of specific Digital TV CA device. ``flags`` A bit-wise OR of the following flags: +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| + .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 - - - - O_RDONLY + - - ``O_RDONLY`` - read-only access - - - - O_RDWR + - - ``O_RDWR`` - read/write access - - - - O_NONBLOCK + - - ``O_NONBLOCK`` - open in non-blocking mode (blocking mode is the default) @@ -49,50 +49,29 @@ Arguments Description ----------- -This system call opens a named ca device (e.g. /dev/ost/ca) for -subsequent use. +This system call opens a named ca device (e.g. ``/dev/dvb/adapter?/ca?``) +for subsequent use. -When an open() call has succeeded, the device will be ready for use. The +When an ``open()`` call has succeeded, the device will be ready for use. The significance of blocking or non-blocking mode is described in the documentation for functions where there is a difference. It does not -affect the semantics of the open() call itself. A device opened in +affect the semantics of the ``open()`` call itself. A device opened in blocking mode can later be put into non-blocking mode (and vice versa) -using the F_SETFL command of the fcntl system call. This is a standard -system call, documented in the Linux manual page for fcntl. Only one -user can open the CA Device in O_RDWR mode. All other attempts to open -the device in this mode will fail, and an error code will be returned. +using the ``F_SETFL`` command of the ``fcntl`` system call. This is a +standard system call, documented in the Linux manual page for fcntl. +Only one user can open the CA Device in ``O_RDWR`` mode. All other +attempts to open the device in this mode will fail, and an error code +will be returned. Return Value ------------ -.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 +On success 0 is returned. +On error -1 is returned, and the ``errno`` variable is set +appropriately. - - .. row 1 - - - ``ENODEV`` - - - Device driver not loaded/available. - - - .. row 2 - - - ``EINTERNAL`` - - - Internal error. - - - .. row 3 - - - ``EBUSY`` - - - Device or resource busy. - - - .. row 4 - - - ``EINVAL`` - - - Invalid argument. +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-get-cap.rst b/Documentation/media/uapi/dvb/ca-get-cap.rst index fbf7e359cb8a..d2d5c1355396 100644 --- a/Documentation/media/uapi/dvb/ca-get-cap.rst +++ b/Documentation/media/uapi/dvb/ca-get-cap.rst @@ -28,43 +28,19 @@ Arguments ``caps`` Pointer to struct :c:type:`ca_caps`. -.. c:type:: struct ca_caps - -.. flat-table:: struct ca_caps - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - unsigned int - - slot_num - - total number of CA card and module slots - - - - unsigned int - - slot_type - - bitmask with all supported slot types - - - - unsigned int - - descr_num - - total number of descrambler slots (keys) - - - - unsigned int - - descr_type - - bit mask with all supported descr types - - Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. - +Queries the Kernel for information about the available CA and descrambler +slots, and their types. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned and :c:type:`ca_caps` is filled. + +On error, -1 is returned and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-get-descr-info.rst b/Documentation/media/uapi/dvb/ca-get-descr-info.rst index 7bf327a3d0e3..e564fbb8d524 100644 --- a/Documentation/media/uapi/dvb/ca-get-descr-info.rst +++ b/Documentation/media/uapi/dvb/ca-get-descr-info.rst @@ -27,37 +27,16 @@ Arguments ``desc`` Pointer to struct :c:type:`ca_descr_info`. -.. c:type:: struct ca_descr_info - -.. flat-table:: struct ca_descr_info - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - - unsigned int - - num - - number of available descramblers (keys) - - - - unsigned int - - type - - type of supported scrambling system. Valid values are: - ``CA_ECD``, ``CA_NDS`` and ``CA_DSS``. - - Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. - +Returns information about all descrambler slots. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set +On success 0 is returned, and :c:type:`ca_descr_info` is filled. + +On error -1 is returned, and the ``errno`` variable is set appropriately. The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-get-msg.rst b/Documentation/media/uapi/dvb/ca-get-msg.rst index 121588da3ef1..ceeda623ce93 100644 --- a/Documentation/media/uapi/dvb/ca-get-msg.rst +++ b/Documentation/media/uapi/dvb/ca-get-msg.rst @@ -28,47 +28,25 @@ Arguments ``msg`` Pointer to struct :c:type:`ca_msg`. - -.. c:type:: struct ca_msg - -.. flat-table:: struct ca_msg - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - unsigned int - - index - - - - - - - unsigned int - - type - - - - - - - unsigned int - - length - - - - - - - unsigned char - - msg[256] - - - - Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. +Receives a message via a CI CA module. + +.. note:: + + Please notice that, on most drivers, this is done by reading from + the /dev/adapter?/ca? device node. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the + +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-get-slot-info.rst b/Documentation/media/uapi/dvb/ca-get-slot-info.rst index 54e5dc78a2dc..1a1d6f0c71b9 100644 --- a/Documentation/media/uapi/dvb/ca-get-slot-info.rst +++ b/Documentation/media/uapi/dvb/ca-get-slot-info.rst @@ -26,100 +26,32 @@ Arguments File descriptor returned by a previous call to :c:func:`open() `. ``info`` - Pointer to struct c:type:`ca_slot_info`. - -.. _ca_slot_info_type: - -.. flat-table:: ca_slot_info types - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - CA_CI - - 1 - - CI high level interface - - - - - CA_CI_LINK - - 2 - - CI link layer level interface - - - - - CA_CI_PHYS - - 4 - - CI physical layer level interface - - - - - CA_DESCR - - 8 - - built-in descrambler - - - - - CA_SC - - 128 - - simple smart card interface - -.. _ca_slot_info_flag: - -.. flat-table:: ca_slot_info flags - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - - CA_CI_MODULE_PRESENT - - 1 - - module (or card) inserted - - - - - CA_CI_MODULE_READY - - 2 - - - -.. c:type:: ca_slot_info - -.. flat-table:: struct ca_slot_info - :header-rows: 1 - :stub-columns: 0 - - - - - type - - name - - description - - - - - int - - num - - slot number - - - - - int - - type - - CA interface this slot supports, as defined at :ref:`ca_slot_info_type`. - - - - - unsigned int - - flags - - flags as defined at :ref:`ca_slot_info_flag`. - + Pointer to struct :c:type:`ca_slot_info`. Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. +Returns information about a CA slot identified by +:c:type:`ca_slot_info`.slot_num. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned, and :c:type:`ca_slot_info` is filled. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 1 16 + + - - ``ENODEV`` + - the slot is not available. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-reset.rst b/Documentation/media/uapi/dvb/ca-reset.rst index 477313121a65..29788325f90e 100644 --- a/Documentation/media/uapi/dvb/ca-reset.rst +++ b/Documentation/media/uapi/dvb/ca-reset.rst @@ -28,12 +28,17 @@ Arguments Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. +Puts the Conditional Access hardware on its initial state. It should +be called before start using the CA hardware. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-send-msg.rst b/Documentation/media/uapi/dvb/ca-send-msg.rst index 532ef5f9d6ac..9e91287b7bbc 100644 --- a/Documentation/media/uapi/dvb/ca-send-msg.rst +++ b/Documentation/media/uapi/dvb/ca-send-msg.rst @@ -32,12 +32,20 @@ Arguments Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. +Sends a message via a CI CA module. +.. note:: + + Please notice that, on most drivers, this is done by writing + to the /dev/adapter?/ca? device node. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst index 70f7b3cf12ad..a6c47205ffd8 100644 --- a/Documentation/media/uapi/dvb/ca-set-descr.rst +++ b/Documentation/media/uapi/dvb/ca-set-descr.rst @@ -28,16 +28,19 @@ Arguments ``msg`` Pointer to struct :c:type:`ca_descr`. - Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. - +CA_SET_DESCR is used for feeding descrambler CA slots with descrambling +keys (refered as control words). Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca-set-pid.rst b/Documentation/media/uapi/dvb/ca-set-pid.rst deleted file mode 100644 index 891c1c72ef24..000000000000 --- a/Documentation/media/uapi/dvb/ca-set-pid.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _CA_SET_PID: - -========== -CA_SET_PID -========== - -Name ----- - -CA_SET_PID - - -Synopsis --------- - -.. c:function:: int ioctl(fd, CA_SET_PID, struct ca_pid *pid) - :name: CA_SET_PID - - -Arguments ---------- - -``fd`` - File descriptor returned by a previous call to :c:func:`open() `. - -``pid`` - Pointer to struct :c:type:`ca_pid`. - -.. c:type:: ca_pid - -.. flat-table:: struct ca_pid - :header-rows: 1 - :stub-columns: 0 - - - - - unsigned int - - pid - - Program ID - - - - - int - - index - - PID index. Use -1 to disable. - - - -Description ------------ - -.. note:: This ioctl is undocumented. Documentation is welcome. - - -Return Value ------------- - -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/ca.rst b/Documentation/media/uapi/dvb/ca.rst index 14b14abda1ae..deac72d89e93 100644 --- a/Documentation/media/uapi/dvb/ca.rst +++ b/Documentation/media/uapi/dvb/ca.rst @@ -2,14 +2,20 @@ .. _dvb_ca: -############# -DVB CA Device -############# -The DVB CA device controls the conditional access hardware. It can be -accessed through ``/dev/dvb/adapter?/ca?``. Data types and and ioctl +#################### +Digital TV CA Device +#################### + +The Digital TV CA device controls the conditional access hardware. It +can be accessed through ``/dev/dvb/adapter?/ca?``. Data types and and ioctl definitions can be accessed by including ``linux/dvb/ca.h`` in your application. +.. note:: + + There are three ioctls at this API that aren't documented: + :ref:`CA_GET_MSG`, :ref:`CA_SEND_MSG` and :ref:`CA_SET_DESCR`. + Documentation for them are welcome. .. toctree:: :maxdepth: 1 diff --git a/Documentation/media/uapi/dvb/ca_data_types.rst b/Documentation/media/uapi/dvb/ca_data_types.rst index d9e27c77426c..ac7cbd76ddd5 100644 --- a/Documentation/media/uapi/dvb/ca_data_types.rst +++ b/Documentation/media/uapi/dvb/ca_data_types.rst @@ -6,105 +6,4 @@ CA Data Types ************* - -.. c:type:: ca_slot_info - -ca_slot_info_t -============== - - -.. code-block:: c - - typedef struct ca_slot_info { - int num; /* slot number */ - - int type; /* CA interface this slot supports */ - #define CA_CI 1 /* CI high level interface */ - #define CA_CI_LINK 2 /* CI link layer level interface */ - #define CA_CI_PHYS 4 /* CI physical layer level interface */ - #define CA_DESCR 8 /* built-in descrambler */ - #define CA_SC 128 /* simple smart card interface */ - - unsigned int flags; - #define CA_CI_MODULE_PRESENT 1 /* module (or card) inserted */ - #define CA_CI_MODULE_READY 2 - } ca_slot_info_t; - - -.. c:type:: ca_descr_info - -ca_descr_info_t -=============== - - -.. code-block:: c - - typedef struct ca_descr_info { - unsigned int num; /* number of available descramblers (keys) */ - unsigned int type; /* type of supported scrambling system */ - #define CA_ECD 1 - #define CA_NDS 2 - #define CA_DSS 4 - } ca_descr_info_t; - - -.. c:type:: ca_caps - -ca_caps_t -========= - - -.. code-block:: c - - typedef struct ca_caps { - unsigned int slot_num; /* total number of CA card and module slots */ - unsigned int slot_type; /* OR of all supported types */ - unsigned int descr_num; /* total number of descrambler slots (keys) */ - unsigned int descr_type;/* OR of all supported types */ - } ca_cap_t; - - -.. c:type:: ca_msg - -ca_msg_t -======== - - -.. code-block:: c - - /* a message to/from a CI-CAM */ - typedef struct ca_msg { - unsigned int index; - unsigned int type; - unsigned int length; - unsigned char msg[256]; - } ca_msg_t; - - -.. c:type:: ca_descr - -ca_descr_t -========== - - -.. code-block:: c - - typedef struct ca_descr { - unsigned int index; - unsigned int parity; - unsigned char cw[8]; - } ca_descr_t; - - -.. c:type:: ca_pid - -ca-pid -====== - - -.. code-block:: c - - typedef struct ca_pid { - unsigned int pid; - int index; /* -1 == disable*/ - } ca_pid_t; +.. kernel-doc:: include/uapi/linux/dvb/ca.h diff --git a/Documentation/media/uapi/dvb/ca_function_calls.rst b/Documentation/media/uapi/dvb/ca_function_calls.rst index c085a0ebbc05..87d697851e82 100644 --- a/Documentation/media/uapi/dvb/ca_function_calls.rst +++ b/Documentation/media/uapi/dvb/ca_function_calls.rst @@ -18,4 +18,3 @@ CA Function Calls ca-get-msg ca-send-msg ca-set-descr - ca-set-pid diff --git a/Documentation/media/uapi/dvb/ca_h.rst b/Documentation/media/uapi/dvb/ca_h.rst deleted file mode 100644 index f513592ef529..000000000000 --- a/Documentation/media/uapi/dvb/ca_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _ca_h: - -********************************** -DVB Conditional Access Header File -********************************** - -.. kernel-include:: $BUILDDIR/ca.h.rst diff --git a/Documentation/media/uapi/dvb/demux.rst b/Documentation/media/uapi/dvb/demux.rst index b12b5a2dac94..45c3d6405c46 100644 --- a/Documentation/media/uapi/dvb/demux.rst +++ b/Documentation/media/uapi/dvb/demux.rst @@ -2,10 +2,15 @@ .. _dvb_demux: -################ -DVB Demux Device -################ -The DVB demux device controls the filters of the DVB hardware/software. +####################### +Digital TV Demux Device +####################### + +The Digital TV demux device controls the MPEG-TS filters for the +digital TV. If the driver and hardware supports, those filters are +implemented at the hardware. Otherwise, the Kernel provides a software +emulation. + It can be accessed through ``/dev/adapter?/demux?``. Data types and and ioctl definitions can be accessed by including ``linux/dvb/dmx.h`` in your application. diff --git a/Documentation/media/uapi/dvb/dmx-add-pid.rst b/Documentation/media/uapi/dvb/dmx-add-pid.rst index 689cd1fc9142..4d5632dfb43e 100644 --- a/Documentation/media/uapi/dvb/dmx-add-pid.rst +++ b/Documentation/media/uapi/dvb/dmx-add-pid.rst @@ -33,13 +33,17 @@ Description ----------- This ioctl call allows to add multiple PIDs to a transport stream filter -previously set up with DMX_SET_PES_FILTER and output equal to -DMX_OUT_TSDEMUX_TAP. +previously set up with :ref:`DMX_SET_PES_FILTER` and output equal to +:c:type:`DMX_OUT_TSDEMUX_TAP `. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-fclose.rst b/Documentation/media/uapi/dvb/dmx-fclose.rst index ca93c23cde6d..578e929f4bde 100644 --- a/Documentation/media/uapi/dvb/dmx-fclose.rst +++ b/Documentation/media/uapi/dvb/dmx-fclose.rst @@ -2,14 +2,14 @@ .. _dmx_fclose: -================= -DVB demux close() -================= +======================== +Digital TV demux close() +======================== Name ---- -DVB demux close() +Digital TV demux close() Synopsis @@ -23,25 +23,23 @@ Arguments --------- ``fd`` - File descriptor returned by a previous call to :c:func:`open() `. + File descriptor returned by a previous call to + :c:func:`open() `. Description ----------- This system call deactivates and deallocates a filter that was -previously allocated via the open() call. +previously allocated via the :c:func:`open() ` call. Return Value ------------ -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 +On success 0 is returned. +On error, -1 is returned and the ``errno`` variable is set +appropriately. - - .. row 1 - - - ``EBADF`` - - - fd is not a valid open file descriptor. +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-fopen.rst b/Documentation/media/uapi/dvb/dmx-fopen.rst index a697e33c32ea..55628a18ba67 100644 --- a/Documentation/media/uapi/dvb/dmx-fopen.rst +++ b/Documentation/media/uapi/dvb/dmx-fopen.rst @@ -2,14 +2,14 @@ .. _dmx_fopen: -================ -DVB demux open() -================ +======================= +Digital TV demux open() +======================= Name ---- -DVB demux open() +Digital TV demux open() Synopsis @@ -22,25 +22,28 @@ Arguments --------- ``name`` - Name of specific DVB demux device. + Name of specific Digital TV demux device. ``flags`` A bit-wise OR of the following flags: +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| + .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 - - - O_RDONLY + - ``O_RDONLY`` - read-only access - - - O_RDWR + - ``O_RDWR`` - read/write access - - - O_NONBLOCK + - ``O_NONBLOCK`` - open in non-blocking mode (blocking mode is the default) @@ -48,52 +51,41 @@ Arguments Description ----------- -This system call, used with a device name of /dev/dvb/adapter0/demux0, +This system call, used with a device name of ``/dev/dvb/adapter?/demux?``, allocates a new filter and returns a handle which can be used for subsequent control of that filter. This call has to be made for each filter to be used, i.e. every returned file descriptor is a reference to -a single filter. /dev/dvb/adapter0/dvr0 is a logical device to be used +a single filter. ``/dev/dvb/adapter?/dvr?`` is a logical device to be used for retrieving Transport Streams for digital video recording. When reading from this device a transport stream containing the packets from all PES filters set in the corresponding demux device -(/dev/dvb/adapter0/demux0) having the output set to DMX_OUT_TS_TAP. A -recorded Transport Stream is replayed by writing to this device. +(``/dev/dvb/adapter?/demux?``) having the output set to ``DMX_OUT_TS_TAP``. +A recorded Transport Stream is replayed by writing to this device. The significance of blocking or non-blocking mode is described in the documentation for functions where there is a difference. It does not -affect the semantics of the open() call itself. A device opened in -blocking mode can later be put into non-blocking mode (and vice versa) -using the F_SETFL command of the fcntl system call. +affect the semantics of the ``open()`` call itself. A device opened +in blocking mode can later be put into non-blocking mode (and vice versa) +using the ``F_SETFL`` command of the fcntl system call. Return Value ------------ +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| + .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 - - - .. row 1 - - - ``ENODEV`` - - - Device driver not loaded/available. - - - .. row 2 - - - ``EINVAL`` - - - Invalid argument. - - - .. row 3 - - - ``EMFILE`` - + - - ``EMFILE`` - “Too many open files”, i.e. no more filters available. - - .. row 4 - - - ``ENOMEM`` - - - The driver failed to allocate enough memory. +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-fread.rst b/Documentation/media/uapi/dvb/dmx-fread.rst index e8c7f4db353f..488bdc4ba178 100644 --- a/Documentation/media/uapi/dvb/dmx-fread.rst +++ b/Documentation/media/uapi/dvb/dmx-fread.rst @@ -2,14 +2,14 @@ .. _dmx_fread: -================ -DVB demux read() -================ +======================= +Digital TV demux read() +======================= Name ---- -DVB demux read() +Digital TV demux read() Synopsis @@ -33,62 +33,48 @@ Arguments Description ----------- -This system call returns filtered data, which might be section or PES -data. The filtered data is transferred from the driver’s internal -circular buffer to buf. The maximum amount of data to be transferred is -implied by count. +This system call returns filtered data, which might be section or Packetized +Elementary Stream (PES) data. The filtered data is transferred from +the driver’s internal circular buffer to ``buf``. The maximum amount of data +to be transferred is implied by count. + +.. note:: + + if a section filter created with + :c:type:`DMX_CHECK_CRC ` flag set, + data that fails on CRC check will be silently ignored. + Return Value ------------ +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + .. tabularcolumns:: |p{2.5cm}|p{15.0cm}| .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 + - - ``EWOULDBLOCK`` + - No data to return and ``O_NONBLOCK`` was specified. - - .. row 1 - - - ``EWOULDBLOCK`` - - - No data to return and O_NONBLOCK was specified. - - - .. row 2 - - - ``EBADF`` - - - fd is not a valid open file descriptor. - - - .. row 3 - - - ``ECRC`` - - - Last section had a CRC error - no data returned. The buffer is - flushed. - - - .. row 4 - - - ``EOVERFLOW`` - - - - - - .. row 5 - - - + - - ``EOVERFLOW`` - The filtered data was not read from the buffer in due time, resulting in non-read data being lost. The buffer is flushed. - - .. row 6 + - - ``ETIMEDOUT`` + - The section was not loaded within the stated timeout period. + See ioctl :ref:`DMX_SET_FILTER` for how to set a timeout. - - ``ETIMEDOUT`` + - - ``EFAULT`` + - The driver failed to write to the callers buffer due to an + invalid \*buf pointer. - - The section was not loaded within the stated timeout period. See - ioctl DMX_SET_FILTER for how to set a timeout. - - .. row 7 - - - ``EFAULT`` - - - The driver failed to write to the callers buffer due to an invalid - \*buf pointer. +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-fwrite.rst b/Documentation/media/uapi/dvb/dmx-fwrite.rst index 8a90dfe28307..519e5733e53b 100644 --- a/Documentation/media/uapi/dvb/dmx-fwrite.rst +++ b/Documentation/media/uapi/dvb/dmx-fwrite.rst @@ -2,14 +2,14 @@ .. _dmx_fwrite: -================= -DVB demux write() -================= +======================== +Digital TV demux write() +======================== Name ---- -DVB demux write() +Digital TV demux write() Synopsis @@ -34,42 +34,39 @@ Description ----------- This system call is only provided by the logical device -/dev/dvb/adapter0/dvr0, associated with the physical demux device that +``/dev/dvb/adapter?/dvr?``, associated with the physical demux device that provides the actual DVR functionality. It is used for replay of a digitally recorded Transport Stream. Matching filters have to be defined -in the corresponding physical demux device, /dev/dvb/adapter0/demux0. +in the corresponding physical demux device, ``/dev/dvb/adapter?/demux?``. The amount of data to be transferred is implied by count. Return Value ------------ +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + .. tabularcolumns:: |p{2.5cm}|p{15.0cm}| .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 - - .. row 1 - - - ``EWOULDBLOCK`` - - - No data was written. This might happen if O_NONBLOCK was + - - ``EWOULDBLOCK`` + - No data was written. This might happen if ``O_NONBLOCK`` was specified and there is no more buffer space available (if - O_NONBLOCK is not specified the function will block until buffer + ``O_NONBLOCK`` is not specified the function will block until buffer space is available). - - .. row 2 - - - ``EBUSY`` - + - - ``EBUSY`` - This error code indicates that there are conflicting requests. The corresponding demux device is setup to receive data from the front- end. Make sure that these filters are stopped and that the - filters with input set to DMX_IN_DVR are started. + filters with input set to ``DMX_IN_DVR`` are started. - - .. row 3 - - - ``EBADF`` - - - fd is not a valid open file descriptor. +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-get-caps.rst b/Documentation/media/uapi/dvb/dmx-get-caps.rst deleted file mode 100644 index 145fb520d779..000000000000 --- a/Documentation/media/uapi/dvb/dmx-get-caps.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _DMX_GET_CAPS: - -============ -DMX_GET_CAPS -============ - -Name ----- - -DMX_GET_CAPS - - -Synopsis --------- - -.. c:function:: int ioctl(fd, DMX_GET_CAPS, struct dmx_caps *caps) - :name: DMX_GET_CAPS - -Arguments ---------- - -``fd`` - File descriptor returned by :c:func:`open() `. - -``caps`` - Pointer to struct :c:type:`dmx_caps` - - -Description ------------ - -.. note:: This ioctl is undocumented. Documentation is welcome. - -Return Value ------------- - -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-get-event.rst b/Documentation/media/uapi/dvb/dmx-get-event.rst deleted file mode 100644 index 8be626c29158..000000000000 --- a/Documentation/media/uapi/dvb/dmx-get-event.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _DMX_GET_EVENT: - -============= -DMX_GET_EVENT -============= - -Name ----- - -DMX_GET_EVENT - - -Synopsis --------- - -.. c:function:: int ioctl( int fd, DMX_GET_EVENT, struct dmx_event *ev) - :name: DMX_GET_EVENT - - -Arguments ---------- - -``fd`` - File descriptor returned by :c:func:`open() `. - -``ev`` - Pointer to the location where the event is to be stored. - - -Description ------------ - -This ioctl call returns an event if available. If an event is not -available, the behavior depends on whether the device is in blocking or -non-blocking mode. In the latter case, the call fails immediately with -errno set to ``EWOULDBLOCK``. In the former case, the call blocks until an -event becomes available. - - -Return Value ------------- - -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - - - .. row 1 - - - ``EWOULDBLOCK`` - - - There is no event pending, and the device is in non-blocking mode. diff --git a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst index b31634a1cca4..fbdbc12869d1 100644 --- a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst +++ b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst @@ -25,18 +25,40 @@ Arguments File descriptor returned by :c:func:`open() `. ``pids`` - Undocumented. + Array used to store 5 Program IDs. Description ----------- -.. note:: This ioctl is undocumented. Documentation is welcome. +This ioctl allows to query a DVB device to return the first PID used +by audio, video, textext, subtitle and PCR programs on a given service. +They're stored as: + +======================= ======== ======================================= +PID element position content +======================= ======== ======================================= +pids[DMX_PES_AUDIO] 0 first audio PID +pids[DMX_PES_VIDEO] 1 first video PID +pids[DMX_PES_TELETEXT] 2 first teletext PID +pids[DMX_PES_SUBTITLE] 3 first subtitle PID +pids[DMX_PES_PCR] 4 first Program Clock Reference PID +======================= ======== ======================================= + + +.. note:: + + A value equal to 0xffff means that the PID was not filled by the + Kernel. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-get-stc.rst b/Documentation/media/uapi/dvb/dmx-get-stc.rst index 9fc501e8128a..604031f7904b 100644 --- a/Documentation/media/uapi/dvb/dmx-get-stc.rst +++ b/Documentation/media/uapi/dvb/dmx-get-stc.rst @@ -25,34 +25,42 @@ Arguments File descriptor returned by :c:func:`open() `. ``stc`` - Pointer to the location where the stc is to be stored. + Pointer to :c:type:`dmx_stc` where the stc data is to be stored. Description ----------- This ioctl call returns the current value of the system time counter -(which is driven by a PES filter of type DMX_PES_PCR). Some hardware -supports more than one STC, so you must specify which one by setting the -num field of stc before the ioctl (range 0...n). The result is returned -in form of a ratio with a 64 bit numerator and a 32 bit denominator, so -the real 90kHz STC value is stc->stc / stc->base . +(which is driven by a PES filter of type :c:type:`DMX_PES_PCR `). +Some hardware supports more than one STC, so you must specify which one by +setting the :c:type:`num ` field of stc before the ioctl (range 0...n). +The result is returned in form of a ratio with a 64 bit numerator +and a 32 bit denominator, so the real 90kHz STC value is +``stc->stc / stc->base``. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| .. flat-table:: :header-rows: 0 :stub-columns: 0 - + :widths: 1 16 - .. row 1 - ``EINVAL`` - Invalid stc number. + + +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-remove-pid.rst b/Documentation/media/uapi/dvb/dmx-remove-pid.rst index e411495c619c..456cc2ded2c0 100644 --- a/Documentation/media/uapi/dvb/dmx-remove-pid.rst +++ b/Documentation/media/uapi/dvb/dmx-remove-pid.rst @@ -34,13 +34,17 @@ Description This ioctl call allows to remove a PID when multiple PIDs are set on a transport stream filter, e. g. a filter previously set up with output -equal to DMX_OUT_TSDEMUX_TAP, created via either -DMX_SET_PES_FILTER or DMX_ADD_PID. +equal to :c:type:`DMX_OUT_TSDEMUX_TAP `, created via either +:ref:`DMX_SET_PES_FILTER` or :ref:`DMX_ADD_PID`. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst index f2f7379f29ed..74fd076a9b90 100644 --- a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst +++ b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst @@ -33,13 +33,18 @@ Description This ioctl call is used to set the size of the circular buffer used for filtered data. The default size is two maximum sized sections, i.e. if -this function is not called a buffer size of 2 \* 4096 bytes will be +this function is not called a buffer size of ``2 * 4096`` bytes will be used. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the + +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-set-filter.rst b/Documentation/media/uapi/dvb/dmx-set-filter.rst index 1d50c803d69a..88594b8d3846 100644 --- a/Documentation/media/uapi/dvb/dmx-set-filter.rst +++ b/Documentation/media/uapi/dvb/dmx-set-filter.rst @@ -40,13 +40,18 @@ state whether a section should be CRC-checked, whether the filter should be a ”one-shot” filter, i.e. if the filtering operation should be stopped after the first section is received, and whether the filtering operation should be started immediately (without waiting for a -DMX_START ioctl call). If a filter was previously set-up, this filter -will be canceled, and the receive buffer will be flushed. +:ref:`DMX_START` ioctl call). If a filter was previously set-up, this +filter will be canceled, and the receive buffer will be flushed. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the + +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst index 145451d04f7d..d70e7bf96a41 100644 --- a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst +++ b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst @@ -42,15 +42,17 @@ capability is supported. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. .. tabularcolumns:: |p{2.5cm}|p{15.0cm}| .. flat-table:: :header-rows: 0 :stub-columns: 0 + :widths: 1 16 - .. row 1 @@ -61,3 +63,7 @@ appropriately. The generic error codes are described at the There are active filters filtering data from another input source. Make sure that these filters are stopped before starting this filter. + + +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-set-source.rst b/Documentation/media/uapi/dvb/dmx-set-source.rst deleted file mode 100644 index ac7f77b25e06..000000000000 --- a/Documentation/media/uapi/dvb/dmx-set-source.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _DMX_SET_SOURCE: - -============== -DMX_SET_SOURCE -============== - -Name ----- - -DMX_SET_SOURCE - - -Synopsis --------- - -.. c:function:: int ioctl(fd, DMX_SET_SOURCE, struct dmx_source *src) - :name: DMX_SET_SOURCE - - -Arguments ---------- - - -``fd`` - File descriptor returned by :c:func:`open() `. - -``src`` - Undocumented. - - -Description ------------ - -.. note:: This ioctl is undocumented. Documentation is welcome. - - -Return Value ------------- - -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-start.rst b/Documentation/media/uapi/dvb/dmx-start.rst index 641f3e017fb1..36700e775296 100644 --- a/Documentation/media/uapi/dvb/dmx-start.rst +++ b/Documentation/media/uapi/dvb/dmx-start.rst @@ -29,15 +29,16 @@ Description ----------- This ioctl call is used to start the actual filtering operation defined -via the ioctl calls DMX_SET_FILTER or DMX_SET_PES_FILTER. +via the ioctl calls :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER`. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. .. tabularcolumns:: |p{2.5cm}|p{15.0cm}| @@ -51,7 +52,7 @@ appropriately. The generic error codes are described at the - ``EINVAL`` - Invalid argument, i.e. no filtering parameters provided via the - DMX_SET_FILTER or DMX_SET_PES_FILTER functions. + :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER` ioctls. - .. row 2 @@ -61,3 +62,7 @@ appropriately. The generic error codes are described at the There are active filters filtering data from another input source. Make sure that these filters are stopped before starting this filter. + + +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx-stop.rst b/Documentation/media/uapi/dvb/dmx-stop.rst index 569a3df44923..6d9c927bcd5f 100644 --- a/Documentation/media/uapi/dvb/dmx-stop.rst +++ b/Documentation/media/uapi/dvb/dmx-stop.rst @@ -29,13 +29,17 @@ Description ----------- This ioctl call is used to stop the actual filtering operation defined -via the ioctl calls DMX_SET_FILTER or DMX_SET_PES_FILTER and -started via the DMX_START command. +via the ioctl calls :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER` and +started via the :ref:`DMX_START` command. Return Value ------------ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/dmx_fcalls.rst b/Documentation/media/uapi/dvb/dmx_fcalls.rst index 77a1554d9834..a17289143220 100644 --- a/Documentation/media/uapi/dvb/dmx_fcalls.rst +++ b/Documentation/media/uapi/dvb/dmx_fcalls.rst @@ -18,10 +18,7 @@ Demux Function Calls dmx-set-filter dmx-set-pes-filter dmx-set-buffer-size - dmx-get-event dmx-get-stc dmx-get-pes-pids - dmx-get-caps - dmx-set-source dmx-add-pid dmx-remove-pid diff --git a/Documentation/media/uapi/dvb/dmx_h.rst b/Documentation/media/uapi/dvb/dmx_h.rst deleted file mode 100644 index 4fd1704a0833..000000000000 --- a/Documentation/media/uapi/dvb/dmx_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _dmx_h: - -********************* -DVB Demux Header File -********************* - -.. kernel-include:: $BUILDDIR/dmx.h.rst diff --git a/Documentation/media/uapi/dvb/dmx_types.rst b/Documentation/media/uapi/dvb/dmx_types.rst index 80dd659860d7..2a023a4f516c 100644 --- a/Documentation/media/uapi/dvb/dmx_types.rst +++ b/Documentation/media/uapi/dvb/dmx_types.rst @@ -6,227 +6,4 @@ Demux Data Types **************** -Output for the demux -==================== - -.. c:type:: dmx_output - -.. tabularcolumns:: |p{5.0cm}|p{12.5cm}| - -.. flat-table:: enum dmx_output - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _DMX-OUT-DECODER: - - DMX_OUT_DECODER - - - Streaming directly to decoder. - - - .. row 3 - - - .. _DMX-OUT-TAP: - - DMX_OUT_TAP - - - Output going to a memory buffer (to be retrieved via the read - command). Delivers the stream output to the demux device on which - the ioctl is called. - - - .. row 4 - - - .. _DMX-OUT-TS-TAP: - - DMX_OUT_TS_TAP - - - Output multiplexed into a new TS (to be retrieved by reading from - the logical DVR device). Routes output to the logical DVR device - ``/dev/dvb/adapter?/dvr?``, which delivers a TS multiplexed from - all filters for which ``DMX_OUT_TS_TAP`` was specified. - - - .. row 5 - - - .. _DMX-OUT-TSDEMUX-TAP: - - DMX_OUT_TSDEMUX_TAP - - - Like :ref:`DMX_OUT_TS_TAP ` but retrieved - from the DMX device. - - -dmx_input_t -=========== - -.. c:type:: dmx_input - -.. code-block:: c - - typedef enum - { - DMX_IN_FRONTEND, /* Input from a front-end device. */ - DMX_IN_DVR /* Input from the logical DVR device. */ - } dmx_input_t; - - -dmx_pes_type_t -============== - -.. c:type:: dmx_pes_type - - -.. code-block:: c - - typedef enum - { - DMX_PES_AUDIO0, - DMX_PES_VIDEO0, - DMX_PES_TELETEXT0, - DMX_PES_SUBTITLE0, - DMX_PES_PCR0, - - DMX_PES_AUDIO1, - DMX_PES_VIDEO1, - DMX_PES_TELETEXT1, - DMX_PES_SUBTITLE1, - DMX_PES_PCR1, - - DMX_PES_AUDIO2, - DMX_PES_VIDEO2, - DMX_PES_TELETEXT2, - DMX_PES_SUBTITLE2, - DMX_PES_PCR2, - - DMX_PES_AUDIO3, - DMX_PES_VIDEO3, - DMX_PES_TELETEXT3, - DMX_PES_SUBTITLE3, - DMX_PES_PCR3, - - DMX_PES_OTHER - } dmx_pes_type_t; - - -struct dmx_filter -================= - -.. c:type:: dmx_filter - -.. code-block:: c - - typedef struct dmx_filter - { - __u8 filter[DMX_FILTER_SIZE]; - __u8 mask[DMX_FILTER_SIZE]; - __u8 mode[DMX_FILTER_SIZE]; - } dmx_filter_t; - - -.. c:type:: dmx_sct_filter_params - -struct dmx_sct_filter_params -============================ - - -.. code-block:: c - - struct dmx_sct_filter_params - { - __u16 pid; - dmx_filter_t filter; - __u32 timeout; - __u32 flags; - #define DMX_CHECK_CRC 1 - #define DMX_ONESHOT 2 - #define DMX_IMMEDIATE_START 4 - #define DMX_KERNEL_CLIENT 0x8000 - }; - - -struct dmx_pes_filter_params -============================ - -.. c:type:: dmx_pes_filter_params - -.. code-block:: c - - struct dmx_pes_filter_params - { - __u16 pid; - dmx_input_t input; - dmx_output_t output; - dmx_pes_type_t pes_type; - __u32 flags; - }; - - -struct dmx_event -================ - -.. c:type:: dmx_event - -.. code-block:: c - - struct dmx_event - { - dmx_event_t event; - time_t timeStamp; - union - { - dmx_scrambling_status_t scrambling; - } u; - }; - - -struct dmx_stc -============== - -.. c:type:: dmx_stc - -.. code-block:: c - - struct dmx_stc { - unsigned int num; /* input : which STC? 0..N */ - unsigned int base; /* output: divisor for stc to get 90 kHz clock */ - __u64 stc; /* output: stc in 'base'*90 kHz units */ - }; - - -struct dmx_caps -=============== - -.. c:type:: dmx_caps - -.. code-block:: c - - typedef struct dmx_caps { - __u32 caps; - int num_decoders; - } dmx_caps_t; - - -enum dmx_source -=============== - -.. c:type:: dmx_source - -.. code-block:: c - - typedef enum dmx_source { - DMX_SOURCE_FRONT0 = 0, - DMX_SOURCE_FRONT1, - DMX_SOURCE_FRONT2, - DMX_SOURCE_FRONT3, - DMX_SOURCE_DVR0 = 16, - DMX_SOURCE_DVR1, - DMX_SOURCE_DVR2, - DMX_SOURCE_DVR3 - } dmx_source_t; +.. kernel-doc:: include/uapi/linux/dvb/dmx.h diff --git a/Documentation/media/uapi/dvb/dtv-fe-stats.rst b/Documentation/media/uapi/dvb/dtv-fe-stats.rst deleted file mode 100644 index e8a02a1f138d..000000000000 --- a/Documentation/media/uapi/dvb/dtv-fe-stats.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. c:type:: dtv_fe_stats - -******************* -struct dtv_fe_stats -******************* - - -.. code-block:: c - - #define MAX_DTV_STATS 4 - - struct dtv_fe_stats { - __u8 len; - struct dtv_stats stat[MAX_DTV_STATS]; - } __packed; diff --git a/Documentation/media/uapi/dvb/dtv-properties.rst b/Documentation/media/uapi/dvb/dtv-properties.rst deleted file mode 100644 index 48c4e834ad11..000000000000 --- a/Documentation/media/uapi/dvb/dtv-properties.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. c:type:: dtv_properties - -********************* -struct dtv_properties -********************* - - -.. code-block:: c - - struct dtv_properties { - __u32 num; - struct dtv_property *props; - }; diff --git a/Documentation/media/uapi/dvb/dtv-property.rst b/Documentation/media/uapi/dvb/dtv-property.rst deleted file mode 100644 index 3ddc3474b00e..000000000000 --- a/Documentation/media/uapi/dvb/dtv-property.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. c:type:: dtv_property - -******************* -struct dtv_property -******************* - - -.. code-block:: c - - /* Reserved fields should be set to 0 */ - - struct dtv_property { - __u32 cmd; - __u32 reserved[3]; - union { - __u32 data; - struct dtv_fe_stats st; - struct { - __u8 data[32]; - __u32 len; - __u32 reserved1[3]; - void *reserved2; - } buffer; - } u; - int result; - } __attribute__ ((packed)); - - /* num of properties cannot exceed DTV_IOCTL_MAX_MSGS per ioctl */ - #define DTV_IOCTL_MAX_MSGS 64 diff --git a/Documentation/media/uapi/dvb/dtv-stats.rst b/Documentation/media/uapi/dvb/dtv-stats.rst deleted file mode 100644 index 35239e72bf74..000000000000 --- a/Documentation/media/uapi/dvb/dtv-stats.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. c:type:: dtv_stats - -**************** -struct dtv_stats -**************** - - -.. code-block:: c - - struct dtv_stats { - __u8 scale; /* enum fecap_scale_params type */ - union { - __u64 uvalue; /* for counters and relative scales */ - __s64 svalue; /* for 1/1000 dB measures */ - }; - } __packed; diff --git a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst index 76c20612b274..212f032cad8b 100644 --- a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst +++ b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst @@ -20,6 +20,6 @@ Signal statistics are provided via .. note:: Most statistics require the demodulator to be fully locked - (e. g. with FE_HAS_LOCK bit set). See + (e. g. with :c:type:`FE_HAS_LOCK ` bit set). See :ref:`Frontend statistics indicators ` for more details. diff --git a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst index 899fd5c3545e..b152166f8fa7 100644 --- a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst +++ b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst @@ -24,7 +24,7 @@ instead, in order to be able to support the newer System Delivery like DVB-S2, DVB-T2, DVB-C2, ISDB, etc. All kinds of parameters are combined as a union in the -FrontendParameters structure: +``dvb_frontend_parameters`` structure: .. code-block:: c diff --git a/Documentation/media/uapi/dvb/dvbapi.rst b/Documentation/media/uapi/dvb/dvbapi.rst index 37680137e3f2..18c86b3a3af1 100644 --- a/Documentation/media/uapi/dvb/dvbapi.rst +++ b/Documentation/media/uapi/dvb/dvbapi.rst @@ -10,12 +10,27 @@ Part II - Digital TV API .. note:: - This API is also known as **DVB API**, although it is generic - enough to support all digital TV standards. + This API is also known as Linux **DVB API**. + + It it was originally written to support the European digital TV + standard (DVB), and later extended to support all digital TV standards. + + In order to avoid confusion, within this document, it was opted to refer to + it, and to associated hardware as **Digital TV**. + + The word **DVB** is reserved to be used for: + + - the Digital TV API version + (e. g. DVB API version 3 or DVB API version 5); + - digital TV data types (enums, structs, defines, etc); + - digital TV device nodes (``/dev/dvb/...``); + - the European DVB standard. **Version 5.10** -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents @@ -30,12 +45,7 @@ Part II - Digital TV API net legacy_dvb_apis examples - audio_h - ca_h - dmx_h - frontend_h - net_h - video_h + headers ********************** @@ -46,11 +56,11 @@ Authors: - J. K. Metzler, Ralph - - Original author of the DVB API documentation. + - Original author of the Digital TV API documentation. - O. C. Metzler, Marcus - - Original author of the DVB API documentation. + - Original author of the Digital TV API documentation. - Carvalho Chehab, Mauro @@ -58,21 +68,26 @@ Authors: **Copyright** |copy| 2002-2003 : Convergence GmbH -**Copyright** |copy| 2009-2016 : Mauro Carvalho Chehab +**Copyright** |copy| 2009-2017 : Mauro Carvalho Chehab **************** Revision History **************** +:revision: 2.2.0 / 2017-09-01 (*mcc*) + +Most gaps between the uAPI document and the Kernel implementation +got fixed for the non-legacy API. + :revision: 2.1.0 / 2015-05-29 (*mcc*) DocBook improvements and cleanups, in order to document the system calls on a more standard way and provide more description about the current -DVB API. +Digital TV API. :revision: 2.0.4 / 2011-05-06 (*mcc*) -Add more information about DVB APIv5, better describing the frontend +Add more information about DVBv5 API, better describing the frontend GET/SET props ioctl's. diff --git a/Documentation/media/uapi/dvb/dvbproperty-006.rst b/Documentation/media/uapi/dvb/dvbproperty-006.rst deleted file mode 100644 index 3343a0f306fe..000000000000 --- a/Documentation/media/uapi/dvb/dvbproperty-006.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -************** -Property types -************** - -On :ref:`FE_GET_PROPERTY and FE_SET_PROPERTY `, -the actual action is determined by the dtv_property cmd/data pairs. -With one single ioctl, is possible to get/set up to 64 properties. The -actual meaning of each property is described on the next sections. - -The available frontend property types are shown on the next section. diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst index dd2d71ce43fa..1a56c1724e59 100644 --- a/Documentation/media/uapi/dvb/dvbproperty.rst +++ b/Documentation/media/uapi/dvb/dvbproperty.rst @@ -2,63 +2,76 @@ .. _frontend-properties: -DVB Frontend properties -======================= +************** +Property types +************** Tuning into a Digital TV physical channel and starting decoding it requires changing a set of parameters, in order to control the tuner, the demodulator, the Linear Low-noise Amplifier (LNA) and to set the -antenna subsystem via Satellite Equipment Control (SEC), on satellite -systems. The actual parameters are specific to each particular digital +antenna subsystem via Satellite Equipment Control - SEC (on satellite +systems). The actual parameters are specific to each particular digital TV standards, and may change as the digital TV specs evolves. -In the past, the strategy used was to have a union with the parameters -needed to tune for DVB-S, DVB-C, DVB-T and ATSC delivery systems grouped -there. The problem is that, as the second generation standards appeared, -those structs were not big enough to contain the additional parameters. -Also, the union didn't have any space left to be expanded without -breaking userspace. So, the decision was to deprecate the legacy -union/struct based approach, in favor of a properties set approach. +In the past (up to DVB API version 3 - DVBv3), the strategy used was to have a +union with the parameters needed to tune for DVB-S, DVB-C, DVB-T and +ATSC delivery systems grouped there. The problem is that, as the second +generation standards appeared, the size of such union was not big +enough to group the structs that would be required for those new +standards. Also, extending it would break userspace. + +So, the legacy union/struct based approach was deprecated, in favor +of a properties set approach. On such approach, +:ref:`FE_GET_PROPERTY and FE_SET_PROPERTY ` are used +to setup the frontend and read its status. + +The actual action is determined by a set of dtv_property cmd/data pairs. +With one single ioctl, is possible to get/set up to 64 properties. + +This section describes the new and recommended way to set the frontend, +with supports all digital TV delivery systems. .. note:: - On Linux DVB API version 3, setting a frontend were done via - struct :c:type:`dvb_frontend_parameters`. - This got replaced on version 5 (also called "S2API", as this API were - added originally_enabled to provide support for DVB-S2), because the - old API has a very limited support to new standards and new hardware. - This section describes the new and recommended way to set the frontend, - with suppports all digital TV delivery systems. + 1. On Linux DVB API version 3, setting a frontend was done via + struct :c:type:`dvb_frontend_parameters`. -Example: with the properties based approach, in order to set the tuner -to a DVB-C channel at 651 kHz, modulated with 256-QAM, FEC 3/4 and -symbol rate of 5.217 Mbauds, those properties should be sent to + 2. Don't use DVB API version 3 calls on hardware with supports + newer standards. Such API provides no suport or a very limited + support to new standards and/or new hardware. + + 3. Nowadays, most frontends support multiple delivery systems. + Only with DVB API version 5 calls it is possible to switch between + the multiple delivery systems supported by a frontend. + + 4. DVB API version 5 is also called *S2API*, as the first + new standard added to it was DVB-S2. + +**Example**: in order to set the hardware to tune into a DVB-C channel +at 651 kHz, modulated with 256-QAM, FEC 3/4 and symbol rate of 5.217 +Mbauds, those properties should be sent to :ref:`FE_SET_PROPERTY ` ioctl: -- :ref:`DTV_DELIVERY_SYSTEM ` = - SYS_DVBC_ANNEX_A + :ref:`DTV_DELIVERY_SYSTEM ` = SYS_DVBC_ANNEX_A -- :ref:`DTV_FREQUENCY ` = 651000000 + :ref:`DTV_FREQUENCY ` = 651000000 -- :ref:`DTV_MODULATION ` = QAM_256 + :ref:`DTV_MODULATION ` = QAM_256 -- :ref:`DTV_INVERSION ` = INVERSION_AUTO + :ref:`DTV_INVERSION ` = INVERSION_AUTO -- :ref:`DTV_SYMBOL_RATE ` = 5217000 + :ref:`DTV_SYMBOL_RATE ` = 5217000 -- :ref:`DTV_INNER_FEC ` = FEC_3_4 + :ref:`DTV_INNER_FEC ` = FEC_3_4 -- :ref:`DTV_TUNE ` + :ref:`DTV_TUNE ` The code that would that would do the above is show in :ref:`dtv-prop-example`. -.. _dtv-prop-example: - -Example: Setting digital TV frontend properties -=============================================== - .. code-block:: c + :caption: Example: Setting digital TV frontend properties + :name: dtv-prop-example #include #include @@ -102,17 +115,12 @@ Example: Setting digital TV frontend properties provides methods for usual operations like program scanning and to read/write channel descriptor files. - .. toctree:: :maxdepth: 1 - dtv-stats - dtv-fe-stats - dtv-property - dtv-properties - dvbproperty-006 fe_property_parameters frontend-stat-properties frontend-property-terrestrial-systems frontend-property-cable-systems frontend-property-satellite-systems + frontend-header diff --git a/Documentation/media/uapi/dvb/dvbstb.svg b/Documentation/media/uapi/dvb/dvbstb.svg index 4effe45b448d..f6fe2f837373 100644 --- a/Documentation/media/uapi/dvb/dvbstb.svg +++ b/Documentation/media/uapi/dvb/dvbstb.svg @@ -1,17 +1,16 @@ -image/svg+xmlAntena -Frontend -CA -Demux -SEC -Audio -Video -TV - +image/svg+xmlAntena +Frontend +CA +Demux +SEC +Audio +Video +TV +Decoder +Decoder + diff --git a/Documentation/media/uapi/dvb/examples.rst b/Documentation/media/uapi/dvb/examples.rst index 1a94966312c0..e0f627ca2e4d 100644 --- a/Documentation/media/uapi/dvb/examples.rst +++ b/Documentation/media/uapi/dvb/examples.rst @@ -6,8 +6,8 @@ Examples ******** -In this section we would like to present some examples for using the DVB -API. +In this section we would like to present some examples for using the Digital +TV API. .. note:: diff --git a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst index 302db2857f90..f220ee351e15 100644 --- a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst +++ b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst @@ -26,8 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - pointer to struct - :c:type:`dvb_diseqc_slave_reply` + pointer to struct :c:type:`dvb_diseqc_slave_reply`. Description @@ -35,46 +34,15 @@ Description Receives reply from a DiSEqC 2.0 command. -.. c:type:: dvb_diseqc_slave_reply - -.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| - -.. flat-table:: struct dvb_diseqc_slave_reply - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - - - .. row 1 - - - uint8_t - - - msg[4] - - - DiSEqC message (framing, data[3]) - - - .. row 2 - - - uint8_t - - - msg_len - - - Length of the DiSEqC message. Valid values are 0 to 4, where 0 - means no msg - - - .. row 3 - - - int - - - timeout - - - Return from ioctl after timeout ms with errorcode when no message - was received - +The received message is stored at the buffer pointed by ``argp``. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst index 75116f283faf..78476c1c7bf5 100644 --- a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst +++ b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst @@ -31,12 +31,16 @@ Description If the bus has been automatically powered off due to power overload, this ioctl call restores the power to the bus. The call requires read/write access to the device. This call has no effect if the device -is manually powered off. Not all DVB adapters support this ioctl. +is manually powered off. Not all Digital TV adapters support this ioctl. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst index e962f6ec5aaf..a7e05914efae 100644 --- a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst +++ b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst @@ -26,7 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``tone`` - an integer enumered value described at :c:type:`fe_sec_mini_cmd` + An integer enumered value described at :c:type:`fe_sec_mini_cmd`. Description @@ -39,39 +39,14 @@ read/write permissions. It provides support for what's specified at `Digital Satellite Equipment Control (DiSEqC) - Simple "ToneBurst" Detection Circuit specification. `__ -.. c:type:: fe_sec_mini_cmd - -.. flat-table:: enum fe_sec_mini_cmd - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _SEC-MINI-A: - - ``SEC_MINI_A`` - - - Sends a mini-DiSEqC 22kHz '0' Tone Burst to select satellite-A - - - .. row 3 - - - .. _SEC-MINI-B: - - ``SEC_MINI_B`` - - - Sends a mini-DiSEqC 22kHz '1' Data Burst to select satellite-B - Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst index bbcab3df39b5..6bd3994edfc2 100644 --- a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst +++ b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst @@ -33,39 +33,17 @@ Arguments Description =========== -Sends a DiSEqC command to the antenna subsystem. - - -.. c:type:: dvb_diseqc_master_cmd - -.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| - -.. flat-table:: struct dvb_diseqc_master_cmd - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - - - .. row 1 - - - uint8_t - - - msg[6] - - - DiSEqC message (framing, address, command, data[3]) - - - .. row 2 - - - uint8_t - - - msg_len - - - Length of the DiSEqC message. Valid values are 3 to 6 +Sends the DiSEqC command pointed by :c:type:`dvb_diseqc_master_cmd` +to the antenna subsystem. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst index f41371f12456..dcf2d20d460f 100644 --- a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst +++ b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst @@ -46,6 +46,10 @@ dishes were already legacy in 2004. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst index bacafbc462d2..b20cb360fe37 100644 --- a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst +++ b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst @@ -45,6 +45,10 @@ voltages. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-get-event.rst b/Documentation/media/uapi/dvb/fe-get-event.rst index 8a719c33073d..505db94bf183 100644 --- a/Documentation/media/uapi/dvb/fe-get-event.rst +++ b/Documentation/media/uapi/dvb/fe-get-event.rst @@ -44,10 +44,10 @@ an event becomes available. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. +On success 0 is returned. +On error -1 is returned, and the ``errno`` variable is set +appropriately. .. flat-table:: @@ -66,3 +66,6 @@ appropriately. The generic error codes are described at the - ``EOVERFLOW`` - Overflow in event queue - one or more events were lost. + +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-get-frontend.rst b/Documentation/media/uapi/dvb/fe-get-frontend.rst index d53a3f8237c3..5db552cedd70 100644 --- a/Documentation/media/uapi/dvb/fe-get-frontend.rst +++ b/Documentation/media/uapi/dvb/fe-get-frontend.rst @@ -42,11 +42,10 @@ this command, read-only access to the device is sufficient. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. - +On success 0 is returned. +On error -1 is returned, and the ``errno`` variable is set +appropriately. .. flat-table:: :header-rows: 0 @@ -58,3 +57,6 @@ appropriately. The generic error codes are described at the - ``EINVAL`` - Maximum supported symbol rate reached. + +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-get-info.rst b/Documentation/media/uapi/dvb/fe-get-info.rst index e3d64b251f61..49307c0abfee 100644 --- a/Documentation/media/uapi/dvb/fe-get-info.rst +++ b/Documentation/media/uapi/dvb/fe-get-info.rst @@ -9,7 +9,8 @@ ioctl FE_GET_INFO Name ==== -FE_GET_INFO - Query DVB frontend capabilities and returns information about the - front-end. This call only requires read-only access to the device +FE_GET_INFO - Query Digital TV frontend capabilities and returns information +about the - front-end. This call only requires read-only access to the device. Synopsis @@ -33,119 +34,13 @@ Arguments Description =========== -All DVB frontend devices support the ``FE_GET_INFO`` ioctl. It is used -to identify kernel devices compatible with this specification and to +All Digital TV frontend devices support the :ref:`FE_GET_INFO` ioctl. It is +used to identify kernel devices compatible with this specification and to obtain information about driver and hardware capabilities. The ioctl takes a pointer to dvb_frontend_info which is filled by the driver. When the driver is not compatible with this specification the ioctl returns an error. -.. c:type:: dvb_frontend_info - -.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| - -.. flat-table:: struct dvb_frontend_info - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - - - .. row 1 - - - char - - - name[128] - - - Name of the frontend - - - .. row 2 - - - fe_type_t - - - type - - - **DEPRECATED**. DVBv3 type. Should not be used on modern programs, - as a frontend may have more than one type. So, the DVBv5 API - should be used instead to enumerate and select the frontend type. - - - .. row 3 - - - uint32_t - - - frequency_min - - - Minimal frequency supported by the frontend - - - .. row 4 - - - uint32_t - - - frequency_max - - - Maximal frequency supported by the frontend - - - .. row 5 - - - uint32_t - - - frequency_stepsize - - - Frequency step - all frequencies are multiple of this value - - - .. row 6 - - - uint32_t - - - frequency_tolerance - - - Tolerance of the frequency - - - .. row 7 - - - uint32_t - - - symbol_rate_min - - - Minimal symbol rate (for Cable/Satellite systems), in bauds - - - .. row 8 - - - uint32_t - - - symbol_rate_max - - - Maximal symbol rate (for Cable/Satellite systems), in bauds - - - .. row 9 - - - uint32_t - - - symbol_rate_tolerance - - - Maximal symbol rate tolerance, in ppm - - - .. row 10 - - - uint32_t - - - notifier_delay - - - **DEPRECATED**. Not used by any driver. - - - .. row 11 - - - enum :c:type:`fe_caps` - - - caps - - - Capabilities supported by the frontend - - -.. note:: - - The frequencies are specified in Hz for Terrestrial and Cable - systems. They're specified in kHz for Satellite systems - frontend capabilities ===================== @@ -153,274 +48,16 @@ frontend capabilities Capabilities describe what a frontend can do. Some capabilities are supported only on some specific frontend types. -.. c:type:: fe_caps - -.. tabularcolumns:: |p{6.5cm}|p{11.0cm}| - -.. flat-table:: enum fe_caps - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _FE-IS-STUPID: - - ``FE_IS_STUPID`` - - - There's something wrong at the frontend, and it can't report its - capabilities - - - .. row 3 - - - .. _FE-CAN-INVERSION-AUTO: - - ``FE_CAN_INVERSION_AUTO`` - - - The frontend is capable of auto-detecting inversion - - - .. row 4 - - - .. _FE-CAN-FEC-1-2: - - ``FE_CAN_FEC_1_2`` - - - The frontend supports FEC 1/2 - - - .. row 5 - - - .. _FE-CAN-FEC-2-3: - - ``FE_CAN_FEC_2_3`` - - - The frontend supports FEC 2/3 - - - .. row 6 - - - .. _FE-CAN-FEC-3-4: - - ``FE_CAN_FEC_3_4`` - - - The frontend supports FEC 3/4 - - - .. row 7 - - - .. _FE-CAN-FEC-4-5: - - ``FE_CAN_FEC_4_5`` - - - The frontend supports FEC 4/5 - - - .. row 8 - - - .. _FE-CAN-FEC-5-6: - - ``FE_CAN_FEC_5_6`` - - - The frontend supports FEC 5/6 - - - .. row 9 - - - .. _FE-CAN-FEC-6-7: - - ``FE_CAN_FEC_6_7`` - - - The frontend supports FEC 6/7 - - - .. row 10 - - - .. _FE-CAN-FEC-7-8: - - ``FE_CAN_FEC_7_8`` - - - The frontend supports FEC 7/8 - - - .. row 11 - - - .. _FE-CAN-FEC-8-9: - - ``FE_CAN_FEC_8_9`` - - - The frontend supports FEC 8/9 - - - .. row 12 - - - .. _FE-CAN-FEC-AUTO: - - ``FE_CAN_FEC_AUTO`` - - - The frontend can autodetect FEC. - - - .. row 13 - - - .. _FE-CAN-QPSK: - - ``FE_CAN_QPSK`` - - - The frontend supports QPSK modulation - - - .. row 14 - - - .. _FE-CAN-QAM-16: - - ``FE_CAN_QAM_16`` - - - The frontend supports 16-QAM modulation - - - .. row 15 - - - .. _FE-CAN-QAM-32: - - ``FE_CAN_QAM_32`` - - - The frontend supports 32-QAM modulation - - - .. row 16 - - - .. _FE-CAN-QAM-64: - - ``FE_CAN_QAM_64`` - - - The frontend supports 64-QAM modulation - - - .. row 17 - - - .. _FE-CAN-QAM-128: - - ``FE_CAN_QAM_128`` - - - The frontend supports 128-QAM modulation - - - .. row 18 - - - .. _FE-CAN-QAM-256: - - ``FE_CAN_QAM_256`` - - - The frontend supports 256-QAM modulation - - - .. row 19 - - - .. _FE-CAN-QAM-AUTO: - - ``FE_CAN_QAM_AUTO`` - - - The frontend can autodetect modulation - - - .. row 20 - - - .. _FE-CAN-TRANSMISSION-MODE-AUTO: - - ``FE_CAN_TRANSMISSION_MODE_AUTO`` - - - The frontend can autodetect the transmission mode - - - .. row 21 - - - .. _FE-CAN-BANDWIDTH-AUTO: - - ``FE_CAN_BANDWIDTH_AUTO`` - - - The frontend can autodetect the bandwidth - - - .. row 22 - - - .. _FE-CAN-GUARD-INTERVAL-AUTO: - - ``FE_CAN_GUARD_INTERVAL_AUTO`` - - - The frontend can autodetect the guard interval - - - .. row 23 - - - .. _FE-CAN-HIERARCHY-AUTO: - - ``FE_CAN_HIERARCHY_AUTO`` - - - The frontend can autodetect hierarch - - - .. row 24 - - - .. _FE-CAN-8VSB: - - ``FE_CAN_8VSB`` - - - The frontend supports 8-VSB modulation - - - .. row 25 - - - .. _FE-CAN-16VSB: - - ``FE_CAN_16VSB`` - - - The frontend supports 16-VSB modulation - - - .. row 26 - - - .. _FE-HAS-EXTENDED-CAPS: - - ``FE_HAS_EXTENDED_CAPS`` - - - Currently, unused - - - .. row 27 - - - .. _FE-CAN-MULTISTREAM: - - ``FE_CAN_MULTISTREAM`` - - - The frontend supports multistream filtering - - - .. row 28 - - - .. _FE-CAN-TURBO-FEC: - - ``FE_CAN_TURBO_FEC`` - - - The frontend supports turbo FEC modulation - - - .. row 29 - - - .. _FE-CAN-2G-MODULATION: - - ``FE_CAN_2G_MODULATION`` - - - The frontend supports "2nd generation modulation" (DVB-S2/T2)> - - - .. row 30 - - - .. _FE-NEEDS-BENDING: - - ``FE_NEEDS_BENDING`` - - - Not supported anymore, don't use it - - - .. row 31 - - - .. _FE-CAN-RECOVER: - - ``FE_CAN_RECOVER`` - - - The frontend can recover from a cable unplug automatically - - - .. row 32 - - - .. _FE-CAN-MUTE-TS: - - ``FE_CAN_MUTE_TS`` - - - The frontend can stop spurious TS data output +The frontend capabilities are described at :c:type:`fe_caps`. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst index 015d4db597b5..948d2ba84f2c 100644 --- a/Documentation/media/uapi/dvb/fe-get-property.rst +++ b/Documentation/media/uapi/dvb/fe-get-property.rst @@ -29,13 +29,13 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - pointer to struct :c:type:`dtv_properties` + Pointer to struct :c:type:`dtv_properties`. Description =========== -All DVB frontend devices support the ``FE_SET_PROPERTY`` and +All Digital TV frontend devices support the ``FE_SET_PROPERTY`` and ``FE_GET_PROPERTY`` ioctls. The supported properties and statistics depends on the delivery system and on the device: @@ -64,6 +64,10 @@ depends on the delivery system and on the device: Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-read-ber.rst b/Documentation/media/uapi/dvb/fe-read-ber.rst index e54972ad5250..1e6a79567a4c 100644 --- a/Documentation/media/uapi/dvb/fe-read-ber.rst +++ b/Documentation/media/uapi/dvb/fe-read-ber.rst @@ -41,6 +41,10 @@ access to the device is sufficient. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst index 4b13c4757744..198f6dfb53a1 100644 --- a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst +++ b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst @@ -41,6 +41,10 @@ to the device is sufficient. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-read-snr.rst b/Documentation/media/uapi/dvb/fe-read-snr.rst index 2aed487f5c99..6db22c043512 100644 --- a/Documentation/media/uapi/dvb/fe-read-snr.rst +++ b/Documentation/media/uapi/dvb/fe-read-snr.rst @@ -41,6 +41,10 @@ to the device is sufficient. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-read-status.rst b/Documentation/media/uapi/dvb/fe-read-status.rst index 812f086c20f5..4adb52f084ff 100644 --- a/Documentation/media/uapi/dvb/fe-read-status.rst +++ b/Documentation/media/uapi/dvb/fe-read-status.rst @@ -33,7 +33,7 @@ Arguments Description =========== -All DVB frontend devices support the ``FE_READ_STATUS`` ioctl. It is +All Digital TV frontend devices support the ``FE_READ_STATUS`` ioctl. It is used to check about the locking status of the frontend after being tuned. The ioctl takes a pointer to an integer where the status will be written. @@ -52,85 +52,14 @@ The fe_status parameter is used to indicate the current state and/or state changes of the frontend hardware. It is produced using the enum :c:type:`fe_status` values on a bitmask -.. c:type:: fe_status - -.. tabularcolumns:: |p{3.5cm}|p{14.0cm}| - -.. _fe-status: - -.. flat-table:: enum fe_status - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _FE-HAS-SIGNAL: - - ``FE_HAS_SIGNAL`` - - - The frontend has found something above the noise level - - - .. row 3 - - - .. _FE-HAS-CARRIER: - - ``FE_HAS_CARRIER`` - - - The frontend has found a DVB signal - - - .. row 4 - - - .. _FE-HAS-VITERBI: - - ``FE_HAS_VITERBI`` - - - The frontend FEC inner coding (Viterbi, LDPC or other inner code) - is stable - - - .. row 5 - - - .. _FE-HAS-SYNC: - - ``FE_HAS_SYNC`` - - - Synchronization bytes was found - - - .. row 6 - - - .. _FE-HAS-LOCK: - - ``FE_HAS_LOCK`` - - - The DVB were locked and everything is working - - - .. row 7 - - - .. _FE-TIMEDOUT: - - ``FE_TIMEDOUT`` - - - no lock within the last about 2 seconds - - - .. row 8 - - - .. _FE-REINIT: - - ``FE_REINIT`` - - - The frontend was reinitialized, application is recommended to - reset DiSEqC, tone and parameters - Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst index 46687c123402..f2c688bcacb3 100644 --- a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst +++ b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst @@ -43,6 +43,10 @@ sufficient. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst index 1d5878da2f41..3c4bc179b313 100644 --- a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst +++ b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst @@ -30,7 +30,7 @@ Arguments - 0 - normal tune mode - - FE_TUNE_MODE_ONESHOT - When set, this flag will disable any + - ``FE_TUNE_MODE_ONESHOT`` - When set, this flag will disable any zigzagging or other "normal" tuning behaviour. Additionally, there will be no automatic monitoring of the lock status, and hence no frontend events will be generated. If a frontend device @@ -42,12 +42,16 @@ Description =========== Allow setting tuner mode flags to the frontend, between 0 (normal) or -FE_TUNE_MODE_ONESHOT mode +``FE_TUNE_MODE_ONESHOT`` mode Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-set-frontend.rst b/Documentation/media/uapi/dvb/fe-set-frontend.rst index 7f97dce9aee6..4f3dcf338254 100644 --- a/Documentation/media/uapi/dvb/fe-set-frontend.rst +++ b/Documentation/media/uapi/dvb/fe-set-frontend.rst @@ -48,17 +48,24 @@ requires read/write access to the device. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the -:ref:`Generic Error Codes ` chapter. +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| .. flat-table:: :header-rows: 0 :stub-columns: 0 - + :widths: 1 16 - .. row 1 - ``EINVAL`` - Maximum supported symbol rate reached. + + +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-set-tone.rst b/Documentation/media/uapi/dvb/fe-set-tone.rst index 84e4da3fd4c9..758efa11014c 100644 --- a/Documentation/media/uapi/dvb/fe-set-tone.rst +++ b/Documentation/media/uapi/dvb/fe-set-tone.rst @@ -45,40 +45,14 @@ this is done using the DiSEqC ioctls. capability of selecting the band. So, it is recommended that applications would change to SEC_TONE_OFF when the device is not used. -.. c:type:: fe_sec_tone_mode - -.. flat-table:: enum fe_sec_tone_mode - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _SEC-TONE-ON: - - ``SEC_TONE_ON`` - - - Sends a 22kHz tone burst to the antenna - - - .. row 3 - - - .. _SEC-TONE-OFF: - - ``SEC_TONE_OFF`` - - - Don't send a 22kHz tone to the antenna (except if the - FE_DISEQC_* ioctls are called) - Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-set-voltage.rst b/Documentation/media/uapi/dvb/fe-set-voltage.rst index 052f316bb4a3..38d4485290a0 100644 --- a/Documentation/media/uapi/dvb/fe-set-voltage.rst +++ b/Documentation/media/uapi/dvb/fe-set-voltage.rst @@ -53,6 +53,10 @@ power up the LNBf. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/fe-type-t.rst b/Documentation/media/uapi/dvb/fe-type-t.rst index 548b965188d0..dee32ae104d7 100644 --- a/Documentation/media/uapi/dvb/fe-type-t.rst +++ b/Documentation/media/uapi/dvb/fe-type-t.rst @@ -78,7 +78,7 @@ parameter. In the old days, struct :c:type:`dvb_frontend_info` used to contain ``fe_type_t`` field to indicate the delivery systems, -filled with either FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC. While this +filled with either ``FE_QPSK, FE_QAM, FE_OFDM`` or ``FE_ATSC``. While this is still filled to keep backward compatibility, the usage of this field is deprecated, as it can report just one delivery system, but some devices support multiple delivery systems. Please use diff --git a/Documentation/media/uapi/dvb/fe_property_parameters.rst b/Documentation/media/uapi/dvb/fe_property_parameters.rst index 7bb7559c4500..6eef507fea50 100644 --- a/Documentation/media/uapi/dvb/fe_property_parameters.rst +++ b/Documentation/media/uapi/dvb/fe_property_parameters.rst @@ -6,6 +6,11 @@ Digital TV property parameters ****************************** +There are several different Digital TV parameters that can be used by +:ref:`FE_SET_PROPERTY and FE_GET_PROPERTY ioctls`. +This section describes each of them. Please notice, however, that only +a subset of them are needed to setup a frontend. + .. _DTV-UNDEFINED: @@ -67,144 +72,36 @@ DTV_MODULATION ============== Specifies the frontend modulation type for delivery systems that -supports more than one modulation type. The modulation can be one of the -types defined by enum :c:type:`fe_modulation`. - - -.. c:type:: fe_modulation - -Modulation property -------------------- - -Most of the digital TV standards currently offers more than one possible -modulation (sometimes called as "constellation" on some standards). This -enum contains the values used by the Kernel. Please note that not all -modulations are supported by a given standard. - - -.. flat-table:: enum fe_modulation - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _QPSK: - - ``QPSK`` - - - QPSK modulation - - - .. row 3 - - - .. _QAM-16: - - ``QAM_16`` - - - 16-QAM modulation - - - .. row 4 - - - .. _QAM-32: - - ``QAM_32`` - - - 32-QAM modulation - - - .. row 5 - - - .. _QAM-64: - - ``QAM_64`` - - - 64-QAM modulation - - - .. row 6 - - - .. _QAM-128: - - ``QAM_128`` - - - 128-QAM modulation - - - .. row 7 - - - .. _QAM-256: - - ``QAM_256`` - - - 256-QAM modulation - - - .. row 8 - - - .. _QAM-AUTO: - - ``QAM_AUTO`` - - - Autodetect QAM modulation - - - .. row 9 - - - .. _VSB-8: - - ``VSB_8`` - - - 8-VSB modulation - - - .. row 10 - - - .. _VSB-16: - - ``VSB_16`` - - - 16-VSB modulation - - - .. row 11 - - - .. _PSK-8: - - ``PSK_8`` - - - 8-PSK modulation - - - .. row 12 - - - .. _APSK-16: - - ``APSK_16`` - - - 16-APSK modulation - - - .. row 13 - - - .. _APSK-32: - - ``APSK_32`` - - - 32-APSK modulation - - - .. row 14 - - - .. _DQPSK: - - ``DQPSK`` - - - DQPSK modulation - - - .. row 15 - - - .. _QAM-4-NR: - - ``QAM_4_NR`` - - - 4-QAM-NR modulation - +supports more multiple modulations. + +The modulation can be one of the types defined by enum :c:type:`fe_modulation`. + +Most of the digital TV standards offers more than one possible +modulation type. + +The table below presents a summary of the types of modulation types +supported by each delivery system, as currently defined by specs. + +======================= ======================================================= +Standard Modulation types +======================= ======================================================= +ATSC (version 1) 8-VSB and 16-VSB. +DMTB 4-QAM, 16-QAM, 32-QAM, 64-QAM and 4-QAM-NR. +DVB-C Annex A/C 16-QAM, 32-QAM, 64-QAM and 256-QAM. +DVB-C Annex B 64-QAM. +DVB-T QPSK, 16-QAM and 64-QAM. +DVB-T2 QPSK, 16-QAM, 64-QAM and 256-QAM. +DVB-S No need to set. It supports only QPSK. +DVB-S2 QPSK, 8-PSK, 16-APSK and 32-APSK. +ISDB-T QPSK, DQPSK, 16-QAM and 64-QAM. +ISDB-S 8-PSK, QPSK and BPSK. +======================= ======================================================= + +.. note:: + + Please notice that some of the above modulation types may not be + defined currently at the Kernel. The reason is simple: no driver + needed such definition yet. .. _DTV-BANDWIDTH-HZ: @@ -214,33 +111,43 @@ DTV_BANDWIDTH_HZ Bandwidth for the channel, in HZ. +Should be set only for terrestrial delivery systems. + Possible values: ``1712000``, ``5000000``, ``6000000``, ``7000000``, ``8000000``, ``10000000``. +======================= ======================================================= +Terrestrial Standard Possible values for bandwidth +======================= ======================================================= +ATSC (version 1) No need to set. It is always 6MHz. +DMTB No need to set. It is always 8MHz. +DVB-T 6MHz, 7MHz and 8MHz. +DVB-T2 1.172 MHz, 5MHz, 6MHz, 7MHz, 8MHz and 10MHz +ISDB-T 5MHz, 6MHz, 7MHz and 8MHz, although most places + use 6MHz. +======================= ======================================================= + + .. note:: - #. DVB-T supports 6, 7 and 8MHz. - #. DVB-T2 supports 1.172, 5, 6, 7, 8 and 10MHz. + #. For ISDB-Tsb, the bandwidth can vary depending on the number of + connected segments. - #. ISDB-T supports 5MHz, 6MHz, 7MHz and 8MHz, although most - places use 6MHz. - - #. On DVB-C and DVB-S/S2, the bandwidth depends on the symbol rate. - So, the Kernel will silently ignore setting :ref:`DTV-BANDWIDTH-HZ`. - - #. For DVB-C and DVB-S/S2, the Kernel will return an estimation of the - bandwidth, calculated from :ref:`DTV-SYMBOL-RATE` and from - the rolloff, with is fixed for DVB-C and DVB-S. - - #. For DVB-S2, the bandwidth estimation will use :ref:`DTV-ROLLOFF`. - - #. For ISDB-Tsb, it can vary depending on the number of connected - segments. - - #. Bandwidth in ISDB-Tsb can be easily derived from other parameters + It can be easily derived from other parameters (DTV_ISDBT_SB_SEGMENT_IDX, DTV_ISDBT_SB_SEGMENT_COUNT). + #. On Satellite and Cable delivery systems, the bandwidth depends on + the symbol rate. So, the Kernel will silently ignore any setting + :ref:`DTV-BANDWIDTH-HZ`. I will however fill it back with a + bandwidth estimation. + + Such bandwidth estimation takes into account the symbol rate set with + :ref:`DTV-SYMBOL-RATE`, and the rolloff factor, with is fixed for + DVB-C and DVB-S. + + For DVB-S2, the rolloff should also be set via :ref:`DTV-ROLLOFF`. + .. _DTV-INVERSION: @@ -249,53 +156,7 @@ DTV_INVERSION Specifies if the frontend should do spectral inversion or not. -.. c:type:: fe_spectral_inversion - -enum fe_modulation: Frontend spectral inversion ------------------------------------------------ - -This parameter indicates if spectral inversion should be presumed or -not. In the automatic setting (``INVERSION_AUTO``) the hardware will try -to figure out the correct setting by itself. If the hardware doesn't -support, the DVB core will try to lock at the carrier first with -inversion off. If it fails, it will try to enable inversion. - - -.. flat-table:: enum fe_modulation - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _INVERSION-OFF: - - ``INVERSION_OFF`` - - - Don't do spectral band inversion. - - - .. row 3 - - - .. _INVERSION-ON: - - ``INVERSION_ON`` - - - Do spectral band inversion. - - - .. row 4 - - - .. _INVERSION-AUTO: - - ``INVERSION_AUTO`` - - - Autodetect spectral band inversion. - +The acceptable values are defined by :c:type:`fe_spectral_inversion`. .. _DTV-DISEQC-MASTER: @@ -311,8 +172,9 @@ Currently not implemented. DTV_SYMBOL_RATE =============== -Digital TV symbol rate, in bauds (symbols/second). Used on cable -standards. +Used on cable and satellite delivery systems. + +Digital TV symbol rate, in bauds (symbols/second). .. _DTV-INNER-FEC: @@ -320,128 +182,9 @@ standards. DTV_INNER_FEC ============= -Used cable/satellite transmissions. The acceptable values are: - -.. c:type:: fe_code_rate - -enum fe_code_rate: type of the Forward Error Correction. --------------------------------------------------------- - -.. flat-table:: enum fe_code_rate - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _FEC-NONE: - - ``FEC_NONE`` - - - No Forward Error Correction Code - - - .. row 3 - - - .. _FEC-AUTO: - - ``FEC_AUTO`` - - - Autodetect Error Correction Code - - - .. row 4 - - - .. _FEC-1-2: - - ``FEC_1_2`` - - - Forward Error Correction Code 1/2 - - - .. row 5 - - - .. _FEC-2-3: - - ``FEC_2_3`` - - - Forward Error Correction Code 2/3 - - - .. row 6 - - - .. _FEC-3-4: - - ``FEC_3_4`` - - - Forward Error Correction Code 3/4 - - - .. row 7 - - - .. _FEC-4-5: - - ``FEC_4_5`` - - - Forward Error Correction Code 4/5 - - - .. row 8 - - - .. _FEC-5-6: - - ``FEC_5_6`` - - - Forward Error Correction Code 5/6 - - - .. row 9 - - - .. _FEC-6-7: - - ``FEC_6_7`` - - - Forward Error Correction Code 6/7 - - - .. row 10 - - - .. _FEC-7-8: - - ``FEC_7_8`` - - - Forward Error Correction Code 7/8 - - - .. row 11 - - - .. _FEC-8-9: - - ``FEC_8_9`` - - - Forward Error Correction Code 8/9 - - - .. row 12 - - - .. _FEC-9-10: - - ``FEC_9_10`` - - - Forward Error Correction Code 9/10 - - - .. row 13 - - - .. _FEC-2-5: - - ``FEC_2_5`` - - - Forward Error Correction Code 2/5 - - - .. row 14 - - - .. _FEC-3-5: - - ``FEC_3_5`` - - - Forward Error Correction Code 3/5 +Used on cable and satellite delivery systems. +The acceptable values are defined by :c:type:`fe_code_rate`. .. _DTV-VOLTAGE: @@ -449,49 +192,14 @@ enum fe_code_rate: type of the Forward Error Correction. DTV_VOLTAGE =========== +Used on satellite delivery systems. + The voltage is usually used with non-DiSEqC capable LNBs to switch the polarzation (horizontal/vertical). When using DiSEqC epuipment this voltage has to be switched consistently to the DiSEqC commands as described in the DiSEqC spec. - -.. c:type:: fe_sec_voltage - -.. flat-table:: enum fe_sec_voltage - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _SEC-VOLTAGE-13: - - ``SEC_VOLTAGE_13`` - - - Set DC voltage level to 13V - - - .. row 3 - - - .. _SEC-VOLTAGE-18: - - ``SEC_VOLTAGE_18`` - - - Set DC voltage level to 18V - - - .. row 4 - - - .. _SEC-VOLTAGE-OFF: - - ``SEC_VOLTAGE_OFF`` - - - Don't send any voltage to the antenna - +The acceptable values are defined by :c:type:`fe_sec_voltage`. .. _DTV-TONE: @@ -507,50 +215,11 @@ Currently not used. DTV_PILOT ========= -Sets DVB-S2 pilot +Used on DVB-S2. +Sets DVB-S2 pilot. -.. c:type:: fe_pilot - -fe_pilot type -------------- - - -.. flat-table:: enum fe_pilot - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _PILOT-ON: - - ``PILOT_ON`` - - - Pilot tones enabled - - - .. row 3 - - - .. _PILOT-OFF: - - ``PILOT_OFF`` - - - Pilot tones disabled - - - .. row 4 - - - .. _PILOT-AUTO: - - ``PILOT_AUTO`` - - - Autodetect pilot tones - +The acceptable values are defined by :c:type:`fe_pilot`. .. _DTV-ROLLOFF: @@ -558,58 +227,11 @@ fe_pilot type DTV_ROLLOFF =========== -Sets DVB-S2 rolloff +Used on DVB-S2. +Sets DVB-S2 rolloff. -.. c:type:: fe_rolloff - -fe_rolloff type ---------------- - - -.. flat-table:: enum fe_rolloff - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ROLLOFF-35: - - ``ROLLOFF_35`` - - - Roloff factor: α=35% - - - .. row 3 - - - .. _ROLLOFF-20: - - ``ROLLOFF_20`` - - - Roloff factor: α=20% - - - .. row 4 - - - .. _ROLLOFF-25: - - ``ROLLOFF_25`` - - - Roloff factor: α=25% - - - .. row 5 - - - .. _ROLLOFF-AUTO: - - ``ROLLOFF_AUTO`` - - - Auto-detect the roloff factor. - +The acceptable values are defined by :c:type:`fe_rolloff`. .. _DTV-DISEQC-SLAVE-REPLY: @@ -641,180 +263,9 @@ Currently not implemented. DTV_DELIVERY_SYSTEM =================== -Specifies the type of Delivery system - - -.. c:type:: fe_delivery_system - -fe_delivery_system type ------------------------ - -Possible values: - - -.. flat-table:: enum fe_delivery_system - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _SYS-UNDEFINED: - - ``SYS_UNDEFINED`` - - - Undefined standard. Generally, indicates an error - - - .. row 3 - - - .. _SYS-DVBC-ANNEX-A: - - ``SYS_DVBC_ANNEX_A`` - - - Cable TV: DVB-C following ITU-T J.83 Annex A spec - - - .. row 4 - - - .. _SYS-DVBC-ANNEX-B: - - ``SYS_DVBC_ANNEX_B`` - - - Cable TV: DVB-C following ITU-T J.83 Annex B spec (ClearQAM) - - - .. row 5 - - - .. _SYS-DVBC-ANNEX-C: - - ``SYS_DVBC_ANNEX_C`` - - - Cable TV: DVB-C following ITU-T J.83 Annex C spec - - - .. row 6 - - - .. _SYS-ISDBC: - - ``SYS_ISDBC`` - - - Cable TV: ISDB-C (no drivers yet) - - - .. row 7 - - - .. _SYS-DVBT: - - ``SYS_DVBT`` - - - Terrestral TV: DVB-T - - - .. row 8 - - - .. _SYS-DVBT2: - - ``SYS_DVBT2`` - - - Terrestral TV: DVB-T2 - - - .. row 9 - - - .. _SYS-ISDBT: - - ``SYS_ISDBT`` - - - Terrestral TV: ISDB-T - - - .. row 10 - - - .. _SYS-ATSC: - - ``SYS_ATSC`` - - - Terrestral TV: ATSC - - - .. row 11 - - - .. _SYS-ATSCMH: - - ``SYS_ATSCMH`` - - - Terrestral TV (mobile): ATSC-M/H - - - .. row 12 - - - .. _SYS-DTMB: - - ``SYS_DTMB`` - - - Terrestrial TV: DTMB - - - .. row 13 - - - .. _SYS-DVBS: - - ``SYS_DVBS`` - - - Satellite TV: DVB-S - - - .. row 14 - - - .. _SYS-DVBS2: - - ``SYS_DVBS2`` - - - Satellite TV: DVB-S2 - - - .. row 15 - - - .. _SYS-TURBO: - - ``SYS_TURBO`` - - - Satellite TV: DVB-S Turbo - - - .. row 16 - - - .. _SYS-ISDBS: - - ``SYS_ISDBS`` - - - Satellite TV: ISDB-S - - - .. row 17 - - - .. _SYS-DAB: - - ``SYS_DAB`` - - - Digital audio: DAB (not fully supported) - - - .. row 18 - - - .. _SYS-DSS: - - ``SYS_DSS`` - - - Satellite TV:"DSS (not fully supported) - - - .. row 19 - - - .. _SYS-CMMB: - - ``SYS_CMMB`` - - - Terrestral TV (mobile):CMMB (not fully supported) - - - .. row 20 - - - .. _SYS-DVBH: - - ``SYS_DVBH`` - - - Terrestral TV (mobile): DVB-H (standard deprecated) +Specifies the type of the delivery system. +The acceptable values are defined by :c:type:`fe_delivery_system`. .. _DTV-ISDBT-PARTIAL-RECEPTION: @@ -822,6 +273,8 @@ Possible values: DTV_ISDBT_PARTIAL_RECEPTION =========================== +Used only on ISDB. + If ``DTV_ISDBT_SOUND_BROADCASTING`` is '0' this bit-field represents whether the channel is in partial reception mode or not. @@ -840,6 +293,8 @@ Possible values: 0, 1, -1 (AUTO) DTV_ISDBT_SOUND_BROADCASTING ============================ +Used only on ISDB. + This field represents whether the other DTV_ISDBT_*-parameters are referring to an ISDB-T and an ISDB-Tsb channel. (See also ``DTV_ISDBT_PARTIAL_RECEPTION``). @@ -852,6 +307,8 @@ Possible values: 0, 1, -1 (AUTO) DTV_ISDBT_SB_SUBCHANNEL_ID ========================== +Used only on ISDB. + This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'. (Note of the author: This might not be the correct description of the @@ -887,6 +344,8 @@ Possible values: 0 .. 41, -1 (AUTO) DTV_ISDBT_SB_SEGMENT_IDX ======================== +Used only on ISDB. + This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'. ``DTV_ISDBT_SB_SEGMENT_IDX`` gives the index of the segment to be @@ -903,6 +362,8 @@ Note: This value cannot be determined by an automatic channel search. DTV_ISDBT_SB_SEGMENT_COUNT ========================== +Used only on ISDB. + This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'. ``DTV_ISDBT_SB_SEGMENT_COUNT`` gives the total count of connected @@ -918,6 +379,8 @@ Note: This value cannot be determined by an automatic channel search. DTV-ISDBT-LAYER[A-C] parameters =============================== +Used only on ISDB. + ISDB-T channels can be coded hierarchically. As opposed to DVB-T in ISDB-T hierarchical layers can be decoded simultaneously. For that reason a ISDB-T demodulator has 3 Viterbi and 3 Reed-Solomon decoders. @@ -934,6 +397,8 @@ There are 3 parameter sets, for Layers A, B and C. DTV_ISDBT_LAYER_ENABLED ----------------------- +Used only on ISDB. + Hierarchical reception in ISDB-T is achieved by enabling or disabling layers in the decoding process. Setting all bits of ``DTV_ISDBT_LAYER_ENABLED`` to '1' forces all layers (if applicable) to @@ -964,7 +429,13 @@ Only the values of the first 3 bits are used. Other bits will be silently ignore DTV_ISDBT_LAYER[A-C]_FEC ------------------------ -Possible values: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``, +Used only on ISDB. + +The Forward Error Correction mechanism used by a given ISDB Layer, as +defined by :c:type:`fe_code_rate`. + + +Possible values are: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``, ``FEC_5_6``, ``FEC_7_8`` @@ -973,11 +444,19 @@ Possible values: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``, DTV_ISDBT_LAYER[A-C]_MODULATION ------------------------------- -Possible values: ``QAM_AUTO``, QP\ ``SK, QAM_16``, ``QAM_64``, ``DQPSK`` +Used only on ISDB. -Note: If layer C is ``DQPSK`` layer B has to be ``DQPSK``. If layer B is -``DQPSK`` and ``DTV_ISDBT_PARTIAL_RECEPTION``\ =0 layer has to be -``DQPSK``. +The modulation used by a given ISDB Layer, as defined by +:c:type:`fe_modulation`. + +Possible values are: ``QAM_AUTO``, ``QPSK``, ``QAM_16``, ``QAM_64``, ``DQPSK`` + +.. note:: + + #. If layer C is ``DQPSK``, then layer B has to be ``DQPSK``. + + #. If layer B is ``DQPSK`` and ``DTV_ISDBT_PARTIAL_RECEPTION``\ = 0, + then layer has to be ``DQPSK``. .. _DTV-ISDBT-LAYER-SEGMENT-COUNT: @@ -985,6 +464,8 @@ Note: If layer C is ``DQPSK`` layer B has to be ``DQPSK``. If layer B is DTV_ISDBT_LAYER[A-C]_SEGMENT_COUNT ---------------------------------- +Used only on ISDB. + Possible values: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 (AUTO) Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and @@ -993,15 +474,15 @@ Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and .. _isdbt-layer_seg-cnt-table: .. flat-table:: Truth table for ISDB-T Sound Broadcasting - :header-rows: 0 + :header-rows: 1 :stub-columns: 0 - .. row 1 - - PR + - Partial Reception - - SB + - Sound Broadcasting - Layer A width @@ -1074,6 +555,8 @@ Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and DTV_ISDBT_LAYER[A-C]_TIME_INTERLEAVING -------------------------------------- +Used only on ISDB. + Valid values: 0, 1, 2, 4, -1 (AUTO) when DTV_ISDBT_SOUND_BROADCASTING is active, value 8 is also valid. @@ -1086,7 +569,7 @@ TMCC-structure, as shown in the table below. .. c:type:: isdbt_layer_interleaving_table .. flat-table:: ISDB-T time interleaving modes - :header-rows: 0 + :header-rows: 1 :stub-columns: 0 @@ -1147,6 +630,8 @@ TMCC-structure, as shown in the table below. DTV_ATSCMH_FIC_VER ------------------ +Used only on ATSC-MH. + Version number of the FIC (Fast Information Channel) signaling data. FIC is used for relaying information to allow rapid service acquisition @@ -1160,6 +645,8 @@ Possible values: 0, 1, 2, 3, ..., 30, 31 DTV_ATSCMH_PARADE_ID -------------------- +Used only on ATSC-MH. + Parade identification number A parade is a collection of up to eight MH groups, conveying one or two @@ -1173,6 +660,8 @@ Possible values: 0, 1, 2, 3, ..., 126, 127 DTV_ATSCMH_NOG -------------- +Used only on ATSC-MH. + Number of MH groups per MH subframe for a designated parade. Possible values: 1, 2, 3, 4, 5, 6, 7, 8 @@ -1183,6 +672,8 @@ Possible values: 1, 2, 3, 4, 5, 6, 7, 8 DTV_ATSCMH_TNOG --------------- +Used only on ATSC-MH. + Total number of MH groups including all MH groups belonging to all MH parades in one MH subframe. @@ -1194,6 +685,8 @@ Possible values: 0, 1, 2, 3, ..., 30, 31 DTV_ATSCMH_SGN -------------- +Used only on ATSC-MH. + Start group number. Possible values: 0, 1, 2, 3, ..., 14, 15 @@ -1204,6 +697,8 @@ Possible values: 0, 1, 2, 3, ..., 14, 15 DTV_ATSCMH_PRC -------------- +Used only on ATSC-MH. + Parade repetition cycle. Possible values: 1, 2, 3, 4, 5, 6, 7, 8 @@ -1214,44 +709,11 @@ Possible values: 1, 2, 3, 4, 5, 6, 7, 8 DTV_ATSCMH_RS_FRAME_MODE ------------------------ +Used only on ATSC-MH. + Reed Solomon (RS) frame mode. -Possible values are: - -.. tabularcolumns:: |p{5.0cm}|p{12.5cm}| - -.. c:type:: atscmh_rs_frame_mode - -.. flat-table:: enum atscmh_rs_frame_mode - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ATSCMH-RSFRAME-PRI-ONLY: - - ``ATSCMH_RSFRAME_PRI_ONLY`` - - - Single Frame: There is only a primary RS Frame for all Group - Regions. - - - .. row 3 - - - .. _ATSCMH-RSFRAME-PRI-SEC: - - ``ATSCMH_RSFRAME_PRI_SEC`` - - - Dual Frame: There are two separate RS Frames: Primary RS Frame for - Group Region A and B and Secondary RS Frame for Group Region C and - D. - +The acceptable values are defined by :c:type:`atscmh_rs_frame_mode`. .. _DTV-ATSCMH-RS-FRAME-ENSEMBLE: @@ -1259,48 +721,11 @@ Possible values are: DTV_ATSCMH_RS_FRAME_ENSEMBLE ---------------------------- +Used only on ATSC-MH. + Reed Solomon(RS) frame ensemble. -Possible values are: - - -.. c:type:: atscmh_rs_frame_ensemble - -.. flat-table:: enum atscmh_rs_frame_ensemble - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ATSCMH-RSFRAME-ENS-PRI: - - ``ATSCMH_RSFRAME_ENS_PRI`` - - - Primary Ensemble. - - - .. row 3 - - - .. _ATSCMH-RSFRAME-ENS-SEC: - - ``AATSCMH_RSFRAME_PRI_SEC`` - - - Secondary Ensemble. - - - .. row 4 - - - .. _ATSCMH-RSFRAME-RES: - - ``AATSCMH_RSFRAME_RES`` - - - Reserved. Shouldn't be used. - +The acceptable values are defined by :c:type:`atscmh_rs_frame_ensemble`. .. _DTV-ATSCMH-RS-CODE-MODE-PRI: @@ -1308,56 +733,11 @@ Possible values are: DTV_ATSCMH_RS_CODE_MODE_PRI --------------------------- +Used only on ATSC-MH. + Reed Solomon (RS) code mode (primary). -Possible values are: - - -.. c:type:: atscmh_rs_code_mode - -.. flat-table:: enum atscmh_rs_code_mode - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ATSCMH-RSCODE-211-187: - - ``ATSCMH_RSCODE_211_187`` - - - Reed Solomon code (211,187). - - - .. row 3 - - - .. _ATSCMH-RSCODE-223-187: - - ``ATSCMH_RSCODE_223_187`` - - - Reed Solomon code (223,187). - - - .. row 4 - - - .. _ATSCMH-RSCODE-235-187: - - ``ATSCMH_RSCODE_235_187`` - - - Reed Solomon code (235,187). - - - .. row 5 - - - .. _ATSCMH-RSCODE-RES: - - ``ATSCMH_RSCODE_RES`` - - - Reserved. Shouldn't be used. - +The acceptable values are defined by :c:type:`atscmh_rs_code_mode`. .. _DTV-ATSCMH-RS-CODE-MODE-SEC: @@ -1365,10 +745,11 @@ Possible values are: DTV_ATSCMH_RS_CODE_MODE_SEC --------------------------- +Used only on ATSC-MH. + Reed Solomon (RS) code mode (secondary). -Possible values are the same as documented on enum -:c:type:`atscmh_rs_code_mode`: +The acceptable values are defined by :c:type:`atscmh_rs_code_mode`. .. _DTV-ATSCMH-SCCC-BLOCK-MODE: @@ -1376,51 +757,11 @@ Possible values are the same as documented on enum DTV_ATSCMH_SCCC_BLOCK_MODE -------------------------- +Used only on ATSC-MH. + Series Concatenated Convolutional Code Block Mode. -Possible values are: - -.. tabularcolumns:: |p{4.5cm}|p{13.0cm}| - -.. c:type:: atscmh_sccc_block_mode - -.. flat-table:: enum atscmh_scc_block_mode - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ATSCMH-SCCC-BLK-SEP: - - ``ATSCMH_SCCC_BLK_SEP`` - - - Separate SCCC: the SCCC outer code mode shall be set independently - for each Group Region (A, B, C, D) - - - .. row 3 - - - .. _ATSCMH-SCCC-BLK-COMB: - - ``ATSCMH_SCCC_BLK_COMB`` - - - Combined SCCC: all four Regions shall have the same SCCC outer - code mode. - - - .. row 4 - - - .. _ATSCMH-SCCC-BLK-RES: - - ``ATSCMH_SCCC_BLK_RES`` - - - Reserved. Shouldn't be used. - +The acceptable values are defined by :c:type:`atscmh_sccc_block_mode`. .. _DTV-ATSCMH-SCCC-CODE-MODE-A: @@ -1428,55 +769,19 @@ Possible values are: DTV_ATSCMH_SCCC_CODE_MODE_A --------------------------- +Used only on ATSC-MH. + Series Concatenated Convolutional Code Rate. -Possible values are: - - -.. c:type:: atscmh_sccc_code_mode - -.. flat-table:: enum atscmh_sccc_code_mode - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _ATSCMH-SCCC-CODE-HLF: - - ``ATSCMH_SCCC_CODE_HLF`` - - - The outer code rate of a SCCC Block is 1/2 rate. - - - .. row 3 - - - .. _ATSCMH-SCCC-CODE-QTR: - - ``ATSCMH_SCCC_CODE_QTR`` - - - The outer code rate of a SCCC Block is 1/4 rate. - - - .. row 4 - - - .. _ATSCMH-SCCC-CODE-RES: - - ``ATSCMH_SCCC_CODE_RES`` - - - to be documented. - - +The acceptable values are defined by :c:type:`atscmh_sccc_code_mode`. .. _DTV-ATSCMH-SCCC-CODE-MODE-B: DTV_ATSCMH_SCCC_CODE_MODE_B --------------------------- +Used only on ATSC-MH. + Series Concatenated Convolutional Code Rate. Possible values are the same as documented on enum @@ -1488,6 +793,8 @@ Possible values are the same as documented on enum DTV_ATSCMH_SCCC_CODE_MODE_C --------------------------- +Used only on ATSC-MH. + Series Concatenated Convolutional Code Rate. Possible values are the same as documented on enum @@ -1499,6 +806,8 @@ Possible values are the same as documented on enum DTV_ATSCMH_SCCC_CODE_MODE_D --------------------------- +Used only on ATSC-MH. + Series Concatenated Convolutional Code Rate. Possible values are the same as documented on enum @@ -1510,7 +819,7 @@ Possible values are the same as documented on enum DTV_API_VERSION =============== -Returns the major/minor version of the DVB API +Returns the major/minor version of the Digital TV API .. _DTV-CODE-RATE-HP: @@ -1518,8 +827,9 @@ Returns the major/minor version of the DVB API DTV_CODE_RATE_HP ================ -Used on terrestrial transmissions. The acceptable values are the ones -described at :c:type:`fe_transmit_mode`. +Used on terrestrial transmissions. + +The acceptable values are defined by :c:type:`fe_transmit_mode`. .. _DTV-CODE-RATE-LP: @@ -1527,8 +837,9 @@ described at :c:type:`fe_transmit_mode`. DTV_CODE_RATE_LP ================ -Used on terrestrial transmissions. The acceptable values are the ones -described at :c:type:`fe_transmit_mode`. +Used on terrestrial transmissions. + +The acceptable values are defined by :c:type:`fe_transmit_mode`. .. _DTV-GUARD-INTERVAL: @@ -1536,242 +847,56 @@ described at :c:type:`fe_transmit_mode`. DTV_GUARD_INTERVAL ================== -Possible values are: +The acceptable values are defined by :c:type:`fe_guard_interval`. +.. note:: -.. c:type:: fe_guard_interval - -Modulation guard interval -------------------------- - - -.. flat-table:: enum fe_guard_interval - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _GUARD-INTERVAL-AUTO: - - ``GUARD_INTERVAL_AUTO`` - - - Autodetect the guard interval - - - .. row 3 - - - .. _GUARD-INTERVAL-1-128: - - ``GUARD_INTERVAL_1_128`` - - - Guard interval 1/128 - - - .. row 4 - - - .. _GUARD-INTERVAL-1-32: - - ``GUARD_INTERVAL_1_32`` - - - Guard interval 1/32 - - - .. row 5 - - - .. _GUARD-INTERVAL-1-16: - - ``GUARD_INTERVAL_1_16`` - - - Guard interval 1/16 - - - .. row 6 - - - .. _GUARD-INTERVAL-1-8: - - ``GUARD_INTERVAL_1_8`` - - - Guard interval 1/8 - - - .. row 7 - - - .. _GUARD-INTERVAL-1-4: - - ``GUARD_INTERVAL_1_4`` - - - Guard interval 1/4 - - - .. row 8 - - - .. _GUARD-INTERVAL-19-128: - - ``GUARD_INTERVAL_19_128`` - - - Guard interval 19/128 - - - .. row 9 - - - .. _GUARD-INTERVAL-19-256: - - ``GUARD_INTERVAL_19_256`` - - - Guard interval 19/256 - - - .. row 10 - - - .. _GUARD-INTERVAL-PN420: - - ``GUARD_INTERVAL_PN420`` - - - PN length 420 (1/4) - - - .. row 11 - - - .. _GUARD-INTERVAL-PN595: - - ``GUARD_INTERVAL_PN595`` - - - PN length 595 (1/6) - - - .. row 12 - - - .. _GUARD-INTERVAL-PN945: - - ``GUARD_INTERVAL_PN945`` - - - PN length 945 (1/9) - - -Notes: - -1) If ``DTV_GUARD_INTERVAL`` is set the ``GUARD_INTERVAL_AUTO`` the -hardware will try to find the correct guard interval (if capable) and -will use TMCC to fill in the missing parameters. - -2) Intervals 1/128, 19/128 and 19/256 are used only for DVB-T2 at -present - -3) DTMB specifies PN420, PN595 and PN945. - + #. If ``DTV_GUARD_INTERVAL`` is set the ``GUARD_INTERVAL_AUTO`` the + hardware will try to find the correct guard interval (if capable) and + will use TMCC to fill in the missing parameters. + #. Intervals ``GUARD_INTERVAL_1_128``, ``GUARD_INTERVAL_19_128`` + and ``GUARD_INTERVAL_19_256`` are used only for DVB-T2 at + present. + #. Intervals ``GUARD_INTERVAL_PN420``, ``GUARD_INTERVAL_PN595`` and + ``GUARD_INTERVAL_PN945`` are used only for DMTB at the present. + On such standard, only those intervals and ``GUARD_INTERVAL_AUTO`` + are valid. .. _DTV-TRANSMISSION-MODE: DTV_TRANSMISSION_MODE ===================== -Specifies the number of carriers used by the standard. This is used only -on OFTM-based standards, e. g. DVB-T/T2, ISDB-T, DTMB +Used only on OFTM-based standards, e. g. DVB-T/T2, ISDB-T, DTMB. -.. c:type:: fe_transmit_mode +Specifies the FFT size (with corresponds to the approximate number of +carriers) used by the standard. -enum fe_transmit_mode: Number of carriers per channel ------------------------------------------------------ +The acceptable values are defined by :c:type:`fe_transmit_mode`. -.. tabularcolumns:: |p{5.0cm}|p{12.5cm}| +.. note:: -.. flat-table:: enum fe_transmit_mode - :header-rows: 1 - :stub-columns: 0 + #. ISDB-T supports three carrier/symbol-size: 8K, 4K, 2K. It is called + **mode** on such standard, and are numbered from 1 to 3: + ==== ======== ======================== + Mode FFT size Transmission mode + ==== ======== ======================== + 1 2K ``TRANSMISSION_MODE_2K`` + 2 4K ``TRANSMISSION_MODE_4K`` + 3 8K ``TRANSMISSION_MODE_8K`` + ==== ======== ======================== - - .. row 1 + #. If ``DTV_TRANSMISSION_MODE`` is set the ``TRANSMISSION_MODE_AUTO`` + the hardware will try to find the correct FFT-size (if capable) and + will use TMCC to fill in the missing parameters. - - ID + #. DVB-T specifies 2K and 8K as valid sizes. - - Description + #. DVB-T2 specifies 1K, 2K, 4K, 8K, 16K and 32K. - - .. row 2 - - - .. _TRANSMISSION-MODE-AUTO: - - ``TRANSMISSION_MODE_AUTO`` - - - Autodetect transmission mode. The hardware will try to find the - correct FFT-size (if capable) to fill in the missing parameters. - - - .. row 3 - - - .. _TRANSMISSION-MODE-1K: - - ``TRANSMISSION_MODE_1K`` - - - Transmission mode 1K - - - .. row 4 - - - .. _TRANSMISSION-MODE-2K: - - ``TRANSMISSION_MODE_2K`` - - - Transmission mode 2K - - - .. row 5 - - - .. _TRANSMISSION-MODE-8K: - - ``TRANSMISSION_MODE_8K`` - - - Transmission mode 8K - - - .. row 6 - - - .. _TRANSMISSION-MODE-4K: - - ``TRANSMISSION_MODE_4K`` - - - Transmission mode 4K - - - .. row 7 - - - .. _TRANSMISSION-MODE-16K: - - ``TRANSMISSION_MODE_16K`` - - - Transmission mode 16K - - - .. row 8 - - - .. _TRANSMISSION-MODE-32K: - - ``TRANSMISSION_MODE_32K`` - - - Transmission mode 32K - - - .. row 9 - - - .. _TRANSMISSION-MODE-C1: - - ``TRANSMISSION_MODE_C1`` - - - Single Carrier (C=1) transmission mode (DTMB) - - - .. row 10 - - - .. _TRANSMISSION-MODE-C3780: - - ``TRANSMISSION_MODE_C3780`` - - - Multi Carrier (C=3780) transmission mode (DTMB) - - -Notes: - -1) ISDB-T supports three carrier/symbol-size: 8K, 4K, 2K. It is called -'mode' in the standard: Mode 1 is 2K, mode 2 is 4K, mode 3 is 8K - -2) If ``DTV_TRANSMISSION_MODE`` is set the ``TRANSMISSION_MODE_AUTO`` -the hardware will try to find the correct FFT-size (if capable) and will -use TMCC to fill in the missing parameters. - -3) DVB-T specifies 2K and 8K as valid sizes. - -4) DVB-T2 specifies 1K, 2K, 4K, 8K, 16K and 32K. - -5) DTMB specifies C1 and C3780. + #. DTMB specifies C1 and C3780. .. _DTV-HIERARCHY: @@ -1779,66 +904,11 @@ use TMCC to fill in the missing parameters. DTV_HIERARCHY ============= -Frontend hierarchy +Used only on DVB-T and DVB-T2. +Frontend hierarchy. -.. c:type:: fe_hierarchy - -Frontend hierarchy ------------------- - - -.. flat-table:: enum fe_hierarchy - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _HIERARCHY-NONE: - - ``HIERARCHY_NONE`` - - - No hierarchy - - - .. row 3 - - - .. _HIERARCHY-AUTO: - - ``HIERARCHY_AUTO`` - - - Autodetect hierarchy (if supported) - - - .. row 4 - - - .. _HIERARCHY-1: - - ``HIERARCHY_1`` - - - Hierarchy 1 - - - .. row 5 - - - .. _HIERARCHY-2: - - ``HIERARCHY_2`` - - - Hierarchy 2 - - - .. row 6 - - - .. _HIERARCHY-4: - - ``HIERARCHY_4`` - - - Hierarchy 4 - +The acceptable values are defined by :c:type:`fe_hierarchy`. .. _DTV-STREAM-ID: @@ -1846,8 +916,10 @@ Frontend hierarchy DTV_STREAM_ID ============= +Used on DVB-S2, DVB-T2 and ISDB-S. + DVB-S2, DVB-T2 and ISDB-S support the transmission of several streams on -a single transport stream. This property enables the DVB driver to +a single transport stream. This property enables the digital TV driver to handle substream filtering, when supported by the hardware. By default, substream filtering is disabled. @@ -1884,60 +956,17 @@ with it, rather than trying to use FE_GET_INFO. In the case of a legacy frontend, the result is just the same as with FE_GET_INFO, but in a more structured format +The acceptable values are defined by :c:type:`fe_delivery_system`. + .. _DTV-INTERLEAVING: DTV_INTERLEAVING ================ -Time interleaving to be used. Currently, used only on DTMB. - - -.. c:type:: fe_interleaving - -.. flat-table:: enum fe_interleaving - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - .. _INTERLEAVING-NONE: - - ``INTERLEAVING_NONE`` - - - No interleaving. - - - .. row 3 - - - .. _INTERLEAVING-AUTO: - - ``INTERLEAVING_AUTO`` - - - Auto-detect interleaving. - - - .. row 4 - - - .. _INTERLEAVING-240: - - ``INTERLEAVING_240`` - - - Interleaving of 240 symbols. - - - .. row 5 - - - .. _INTERLEAVING-720: - - ``INTERLEAVING_720`` - - - Interleaving of 720 symbols. +Time interleaving to be used. +The acceptable values are defined by :c:type:`fe_interleaving`. .. _DTV-LNA: diff --git a/Documentation/media/uapi/dvb/frontend-header.rst b/Documentation/media/uapi/dvb/frontend-header.rst new file mode 100644 index 000000000000..8d8433cf1e12 --- /dev/null +++ b/Documentation/media/uapi/dvb/frontend-header.rst @@ -0,0 +1,4 @@ +Frontend uAPI data types +======================== + +.. kernel-doc:: include/uapi/linux/dvb/frontend.h diff --git a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst index dbc717cad9ee..0beb5cb3d729 100644 --- a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst +++ b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst @@ -100,7 +100,7 @@ to tune any ISDB-T/ISDB-Tsb hardware. Of course it is possible that some very sophisticated devices won't need certain parameters to tune. The information given here should help application writers to know how -to handle ISDB-T and ISDB-Tsb hardware using the Linux DVB-API. +to handle ISDB-T and ISDB-Tsb hardware using the Linux Digital TV API. The details given here about ISDB-T and ISDB-Tsb are just enough to basically show the dependencies between the needed parameter values, but diff --git a/Documentation/media/uapi/dvb/frontend.rst b/Documentation/media/uapi/dvb/frontend.rst index e051a9012540..4967c48d46ce 100644 --- a/Documentation/media/uapi/dvb/frontend.rst +++ b/Documentation/media/uapi/dvb/frontend.rst @@ -2,20 +2,22 @@ .. _dvb_frontend: -################ -DVB Frontend API -################ -The DVB frontend API was designed to support three types of delivery -systems: +####################### +Digital TV Frontend API +####################### + +The Digital TV frontend API was designed to support three groups of delivery +systems: Terrestrial, cable and Satellite. Currently, the following +delivery systems are supported: - Terrestrial systems: DVB-T, DVB-T2, ATSC, ATSC M/H, ISDB-T, DVB-H, DTMB, CMMB -- Cable systems: DVB-C Annex A/C, ClearQAM (DVB-C Annex B), ISDB-C +- Cable systems: DVB-C Annex A/C, ClearQAM (DVB-C Annex B) - Satellite systems: DVB-S, DVB-S2, DVB Turbo, ISDB-S, DSS -The DVB frontend controls several sub-devices including: +The Digital TV frontend controls several sub-devices including: - Tuner @@ -23,7 +25,7 @@ The DVB frontend controls several sub-devices including: - Low noise amplifier (LNA) -- Satellite Equipment Control (SEC) hardware (only for Satellite). +- Satellite Equipment Control (SEC) [#f1]_. The frontend can be accessed through ``/dev/dvb/adapter?/frontend?``. Data types and ioctl definitions can be accessed by including @@ -31,16 +33,18 @@ Data types and ioctl definitions can be accessed by including .. note:: - Transmission via the internet (DVB-IP) is not yet handled by this - API but a future extension is possible. + Transmission via the internet (DVB-IP) and MMT (MPEG Media Transport) + is not yet handled by this API but a future extension is possible. -On Satellite systems, the API support for the Satellite Equipment -Control (SEC) allows to power control and to send/receive signals to -control the antenna subsystem, selecting the polarization and choosing -the Intermediate Frequency IF) of the Low Noise Block Converter Feed -Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC -(digital SEC) specification is available at -`Eutelsat `__. +.. [#f1] + + On Satellite systems, the API support for the Satellite Equipment + Control (SEC) allows to power control and to send/receive signals to + control the antenna subsystem, selecting the polarization and choosing + the Intermediate Frequency IF) of the Low Noise Block Converter Feed + Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC + (digital SEC) specification is available at + `Eutelsat `__. .. toctree:: @@ -50,4 +54,3 @@ Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC dvb-fe-read-status dvbproperty frontend_fcalls - frontend_legacy_dvbv3_api diff --git a/Documentation/media/uapi/dvb/frontend_f_close.rst b/Documentation/media/uapi/dvb/frontend_f_close.rst index f3b04b60246c..67958d73cf34 100644 --- a/Documentation/media/uapi/dvb/frontend_f_close.rst +++ b/Documentation/media/uapi/dvb/frontend_f_close.rst @@ -2,9 +2,9 @@ .. _frontend_f_close: -******************** -DVB frontend close() -******************** +*************************** +Digital TV frontend close() +*************************** Name ==== @@ -41,8 +41,10 @@ down automatically. Return Value ============ -The function returns 0 on success, -1 on failure and the ``errno`` is -set appropriately. Possible error codes: +On success 0 is returned. -EBADF - ``fd`` is not a valid open file descriptor. +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +Generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/frontend_f_open.rst b/Documentation/media/uapi/dvb/frontend_f_open.rst index 690eb375bdc1..8e8cb466c24b 100644 --- a/Documentation/media/uapi/dvb/frontend_f_open.rst +++ b/Documentation/media/uapi/dvb/frontend_f_open.rst @@ -2,9 +2,9 @@ .. _frontend_f_open: -******************* -DVB frontend open() -******************* +*************************** +Digital TV frontend open() +*************************** Name ==== @@ -79,24 +79,32 @@ On error, -1 is returned, and the ``errno`` variable is set appropriately. Possible error codes are: -EACCES - The caller has no permission to access the device. -EBUSY - The the device driver is already in use. +On success 0 is returned, and :c:type:`ca_slot_info` is filled. -ENXIO - No device corresponding to this device special file exists. +On error -1 is returned, and the ``errno`` variable is set +appropriately. -ENOMEM - Not enough kernel memory was available to complete the request. +.. tabularcolumns:: |p{2.5cm}|p{15.0cm}| -EMFILE - The process already has the maximum number of files open. +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 1 16 -ENFILE - The limit on the total number of files open on the system has been - reached. + - - ``EPERM`` + - The caller has no permission to access the device. -ENODEV - The device got removed. + - - ``EBUSY`` + - The the device driver is already in use. + + - - ``EMFILE`` + - The process already has the maximum number of files open. + + - - ``ENFILE`` + - The limit on the total number of files open on the system has been + reached. + + +The generic error codes are described at the +:ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/frontend_h.rst b/Documentation/media/uapi/dvb/frontend_h.rst deleted file mode 100644 index 15fca04d1c32..000000000000 --- a/Documentation/media/uapi/dvb/frontend_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _frontend_h: - -************************ -DVB Frontend Header File -************************ - -.. kernel-include:: $BUILDDIR/frontend.h.rst diff --git a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst index 7d4a091b7d7f..a4d5319cb76b 100644 --- a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst +++ b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst @@ -2,9 +2,9 @@ .. _frontend_legacy_dvbv3_api: -**************************************** -DVB Frontend legacy API (a. k. a. DVBv3) -**************************************** +*********************************************** +Digital TV Frontend legacy API (a. k. a. DVBv3) +*********************************************** The usage of this API is deprecated, as it doesn't support all digital TV standards, doesn't provide good statistics measurements and provides diff --git a/Documentation/media/uapi/dvb/headers.rst b/Documentation/media/uapi/dvb/headers.rst new file mode 100644 index 000000000000..c13fd537fbff --- /dev/null +++ b/Documentation/media/uapi/dvb/headers.rst @@ -0,0 +1,21 @@ +**************************** +Digital TV uAPI header files +**************************** + +Digital TV uAPI headers +*********************** + +.. kernel-include:: $BUILDDIR/frontend.h.rst + +.. kernel-include:: $BUILDDIR/dmx.h.rst + +.. kernel-include:: $BUILDDIR/ca.h.rst + +.. kernel-include:: $BUILDDIR/net.h.rst + +Legacy uAPI +*********** + +.. kernel-include:: $BUILDDIR/audio.h.rst + +.. kernel-include:: $BUILDDIR/video.h.rst diff --git a/Documentation/media/uapi/dvb/intro.rst b/Documentation/media/uapi/dvb/intro.rst index 652c4aacd2c6..79b4d0e4e920 100644 --- a/Documentation/media/uapi/dvb/intro.rst +++ b/Documentation/media/uapi/dvb/intro.rst @@ -13,15 +13,18 @@ What you need to know ===================== The reader of this document is required to have some knowledge in the -area of digital video broadcasting (DVB) and should be familiar with +area of digital video broadcasting (Digital TV) and should be familiar with part I of the MPEG2 specification ISO/IEC 13818 (aka ITU-T H.222), i.e you should know what a program/transport stream (PS/TS) is and what is meant by a packetized elementary stream (PES) or an I-frame. -Various DVB standards documents are available from http://www.dvb.org -and/or http://www.etsi.org. +Various Digital TV standards documents are available for download at: -It is also necessary to know how to access unix/linux devices and how to +- European standards (DVB): http://www.dvb.org and/or http://www.etsi.org. +- American standards (ATSC): https://www.atsc.org/standards/ +- Japanese standards (ISDB): http://www.dibeg.org/ + +It is also necessary to know how to access Linux devices and how to use ioctl calls. This also includes the knowledge of C or C++. @@ -30,21 +33,25 @@ use ioctl calls. This also includes the knowledge of C or C++. History ======= -The first API for DVB cards we used at Convergence in late 1999 was an +The first API for Digital TV cards we used at Convergence in late 1999 was an extension of the Video4Linux API which was primarily developed for frame -grabber cards. As such it was not really well suited to be used for DVB -cards and their new features like recording MPEG streams and filtering +grabber cards. As such it was not really well suited to be used for Digital +TV cards and their new features like recording MPEG streams and filtering several section and PES data streams at the same time. -In early 2000, we were approached by Nokia with a proposal for a new -standard Linux DVB API. As a commitment to the development of terminals +In early 2000, Convergence was approached by Nokia with a proposal for a new +standard Linux Digital TV API. As a commitment to the development of terminals based on open standards, Nokia and Convergence made it available to all Linux developers and published it on https://linuxtv.org in September -2000. Convergence is the maintainer of the Linux DVB API. Together with -the LinuxTV community (i.e. you, the reader of this document), the Linux -DVB API will be constantly reviewed and improved. With the Linux driver -for the Siemens/Hauppauge DVB PCI card Convergence provides a first -implementation of the Linux DVB API. +2000. With the Linux driver for the Siemens/Hauppauge DVB PCI card, +Convergence provided a first implementation of the Linux Digital TV API. +Convergence was the maintainer of the Linux Digital TV API in the early +days. + +Now, the API is maintained by the LinuxTV community (i.e. you, the reader +of this document). The Linux Digital TV API is constantly reviewed and +improved together with the improvements at the subsystem's core at the +Kernel. .. _overview: @@ -59,61 +66,65 @@ Overview :alt: dvbstb.svg :align: center - Components of a DVB card/STB + Components of a Digital TV card/STB -A DVB PCI card or DVB set-top-box (STB) usually consists of the +A Digital TV card or set-top-box (STB) usually consists of the following main hardware components: -- Frontend consisting of tuner and DVB demodulator - - Here the raw signal reaches the DVB hardware from a satellite dish or +Frontend consisting of tuner and digital TV demodulator + Here the raw signal reaches the digital TV hardware from a satellite dish or antenna or directly from cable. The frontend down-converts and demodulates this signal into an MPEG transport stream (TS). In case of a satellite frontend, this includes a facility for satellite equipment control (SEC), which allows control of LNB polarization, multi feed switches or dish rotors. -- Conditional Access (CA) hardware like CI adapters and smartcard slots - +Conditional Access (CA) hardware like CI adapters and smartcard slots The complete TS is passed through the CA hardware. Programs to which the user has access (controlled by the smart card) are decoded in real time and re-inserted into the TS. -- Demultiplexer which filters the incoming DVB stream + .. note:: + Not every digital TV hardware provides conditional access hardware. + +Demultiplexer which filters the incoming Digital TV MPEG-TS stream The demultiplexer splits the TS into its components like audio and video streams. Besides usually several of such audio and video streams it also contains data streams with information about the programs offered in this or other streams of the same provider. -- MPEG2 audio and video decoder +Audio and video decoder + The main targets of the demultiplexer are audio and video + decoders. After decoding, they pass on the uncompressed audio and + video to the computer screen or to a TV set. - The main targets of the demultiplexer are the MPEG2 audio and video - decoders. After decoding they pass on the uncompressed audio and - video to the computer screen or (through a PAL/NTSC encoder) to a TV - set. + .. note:: + + Modern hardware usually doesn't have a separate decoder hardware, as + such functionality can be provided by the main CPU, by the graphics + adapter of the system or by a signal processing hardware embedded on + a Systems on a Chip (SoC) integrated circuit. + + It may also not be needed for certain usages (e.g. for data-only + uses like “internet over satellite”). :ref:`stb_components` shows a crude schematic of the control and data flow between those components. -On a DVB PCI card not all of these have to be present since some -functionality can be provided by the main CPU of the PC (e.g. MPEG -picture and sound decoding) or is not needed (e.g. for data-only uses -like “internet over satellite”). Also not every card or STB provides -conditional access hardware. .. _dvb_devices: -Linux DVB Devices -================= +Linux Digital TV Devices +======================== -The Linux DVB API lets you control these hardware components through +The Linux Digital TV API lets you control these hardware components through currently six Unix-style character devices for video, audio, frontend, demux, CA and IP-over-DVB networking. The video and audio devices control the MPEG2 decoder hardware, the frontend device the tuner and -the DVB demodulator. The demux device gives you control over the PES and -section filters of the hardware. If the hardware does not support +the Digital TV demodulator. The demux device gives you control over the PES +and section filters of the hardware. If the hardware does not support filtering these filters can be implemented in software. Finally, the CA device controls all the conditional access capabilities of the hardware. It can depend on the individual security requirements of the platform, @@ -137,9 +148,9 @@ individual devices are called: - ``/dev/dvb/adapterN/caM``, -where N enumerates the DVB PCI cards in a system starting from 0, and M -enumerates the devices of each type within each adapter, starting -from 0, too. We will omit the “ ``/dev/dvb/adapterN/``\ ” in the further +where ``N`` enumerates the Digital TV cards in a system starting from 0, and +``M`` enumerates the devices of each type within each adapter, starting +from 0, too. We will omit the “``/dev/dvb/adapterN/``\ ” in the further discussion of these devices. More details about the data structures and function calls of all the @@ -151,8 +162,8 @@ devices are described in the following chapters. API include files ================= -For each of the DVB devices a corresponding include file exists. The DVB -API include files should be included in application sources with a +For each of the Digital TV devices a corresponding include file exists. The +Digital TV API include files should be included in application sources with a partial path like: diff --git a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst index 2957f5a988b0..e1b2c9c7b620 100644 --- a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst +++ b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst @@ -2,19 +2,31 @@ .. _legacy_dvb_apis: -******************* -DVB Deprecated APIs -******************* +*************************** +Digital TV Deprecated APIs +*************************** -The APIs described here are kept only for historical reasons. There's -just one driver for a very legacy hardware that uses this API. No modern -drivers should use it. Instead, audio and video should be using the V4L2 -and ALSA APIs, and the pipelines should be set using the Media -Controller API +The APIs described here **should not** be used on new drivers or applications. + +The DVBv3 frontend API has issues with new delivery systems, including +DVB-S2, DVB-T2, ISDB, etc. + +There's just one driver for a very legacy hardware using the Digital TV +audio and video APIs. No modern drivers should use it. Instead, audio and +video should be using the V4L2 and ALSA APIs, and the pipelines should +be set via the Media Controller API. + +.. attention:: + + The APIs described here doesn't necessarily reflect the current + code implementation, as this section of the document was written + for DVB version 1, while the code reflects DVB version 3 + implementation. .. toctree:: :maxdepth: 1 + frontend_legacy_dvbv3_api video audio diff --git a/Documentation/media/uapi/dvb/net-add-if.rst b/Documentation/media/uapi/dvb/net-add-if.rst index 82ce2438213f..6749b70246c5 100644 --- a/Documentation/media/uapi/dvb/net-add-if.rst +++ b/Documentation/media/uapi/dvb/net-add-if.rst @@ -41,43 +41,13 @@ created. The struct :c:type:`dvb_net_if`::ifnum field will be filled with the number of the created interface. -.. c:type:: dvb_net_if - -.. flat-table:: struct dvb_net_if - :header-rows: 1 - :stub-columns: 0 - - - - .. row 1 - - - ID - - - Description - - - .. row 2 - - - pid - - - Packet ID (PID) of the MPEG-TS that contains data - - - .. row 3 - - - ifnum - - - number of the DVB interface. - - - .. row 4 - - - feedtype - - - Encapsulation type of the feed. It can be: - ``DVB_NET_FEEDTYPE_MPE`` for MPE encoding or - ``DVB_NET_FEEDTYPE_ULE`` for ULE encoding. - - Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned, and :c:type:`ca_slot_info` is filled. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/net-get-if.rst b/Documentation/media/uapi/dvb/net-get-if.rst index 1bb8ee0cbced..3733b34da9db 100644 --- a/Documentation/media/uapi/dvb/net-get-if.rst +++ b/Documentation/media/uapi/dvb/net-get-if.rst @@ -43,6 +43,10 @@ the ``errno`` with ``EINVAL`` error code. Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned, and :c:type:`ca_slot_info` is filled. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/net-remove-if.rst b/Documentation/media/uapi/dvb/net-remove-if.rst index 646af23a925a..4ebe07a6b79a 100644 --- a/Documentation/media/uapi/dvb/net-remove-if.rst +++ b/Documentation/media/uapi/dvb/net-remove-if.rst @@ -39,6 +39,10 @@ The NET_REMOVE_IF ioctl deletes an interface previously created via Return Value ============ -On success 0 is returned, on error -1 and the ``errno`` variable is set -appropriately. The generic error codes are described at the +On success 0 is returned, and :c:type:`ca_slot_info` is filled. + +On error -1 is returned, and the ``errno`` variable is set +appropriately. + +The generic error codes are described at the :ref:`Generic Error Codes ` chapter. diff --git a/Documentation/media/uapi/dvb/net-types.rst b/Documentation/media/uapi/dvb/net-types.rst new file mode 100644 index 000000000000..e1177bdcd623 --- /dev/null +++ b/Documentation/media/uapi/dvb/net-types.rst @@ -0,0 +1,9 @@ +.. -*- coding: utf-8; mode: rst -*- + +.. _dmx_types: + +************** +Net Data Types +************** + +.. kernel-doc:: include/uapi/linux/dvb/net.h diff --git a/Documentation/media/uapi/dvb/net.rst b/Documentation/media/uapi/dvb/net.rst index eca42dd53261..e0cd4e402627 100644 --- a/Documentation/media/uapi/dvb/net.rst +++ b/Documentation/media/uapi/dvb/net.rst @@ -2,10 +2,11 @@ .. _net: -############### -DVB Network API -############### -The DVB net device controls the mapping of data packages that are part +###################### +Digital TV Network API +###################### + +The Digital TV net device controls the mapping of data packages that are part of a transport stream to be mapped into a virtual network interface, visible through the standard Linux network protocol stack. @@ -28,13 +29,13 @@ header. .. _net_fcalls: -###################### -DVB net Function Calls -###################### +Digital TV net Function Calls +############################# .. toctree:: :maxdepth: 1 + net-types net-add-if net-remove-if net-get-if diff --git a/Documentation/media/uapi/dvb/net_h.rst b/Documentation/media/uapi/dvb/net_h.rst deleted file mode 100644 index 7bcf5ba9d1eb..000000000000 --- a/Documentation/media/uapi/dvb/net_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _net_h: - -*********************** -DVB Network Header File -*********************** - -.. kernel-include:: $BUILDDIR/net.h.rst diff --git a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst index 81cd9b92a36c..51ec0b04b496 100644 --- a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst +++ b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst @@ -9,5 +9,5 @@ Querying frontend information Usually, the first thing to do when the frontend is opened is to check the frontend capabilities. This is done using :ref:`FE_GET_INFO`. This ioctl will enumerate the -DVB API version and other characteristics about the frontend, and can be -opened either in read only or read/write mode. +Digital TV API version and other characteristics about the frontend, and can +be opened either in read only or read/write mode. diff --git a/Documentation/media/uapi/dvb/video-continue.rst b/Documentation/media/uapi/dvb/video-continue.rst index 030c2ec98869..e65e600be632 100644 --- a/Documentation/media/uapi/dvb/video-continue.rst +++ b/Documentation/media/uapi/dvb/video-continue.rst @@ -44,7 +44,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 :ref:`VIDIOC_DECODER_CMD` instead. This ioctl call restarts decoding and playing processes of the video diff --git a/Documentation/media/uapi/dvb/video-freeze.rst b/Documentation/media/uapi/dvb/video-freeze.rst index 9cef65a02e8d..5a28bdc8badd 100644 --- a/Documentation/media/uapi/dvb/video-freeze.rst +++ b/Documentation/media/uapi/dvb/video-freeze.rst @@ -44,14 +44,14 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 :ref:`VIDIOC_DECODER_CMD` instead. This ioctl call suspends the live video stream being played. Decoding and playing are frozen. It is then possible to restart the decoding and playing process of the video stream using the VIDEO_CONTINUE command. If VIDEO_SOURCE_MEMORY is selected in the ioctl call -VIDEO_SELECT_SOURCE, the DVB subsystem will not decode any more data +VIDEO_SELECT_SOURCE, the Digital TV subsystem will not decode any more data until the ioctl call VIDEO_CONTINUE or VIDEO_PLAY is performed. diff --git a/Documentation/media/uapi/dvb/video-get-event.rst b/Documentation/media/uapi/dvb/video-get-event.rst index 6ad14cdb894a..b4f53616db9a 100644 --- a/Documentation/media/uapi/dvb/video-get-event.rst +++ b/Documentation/media/uapi/dvb/video-get-event.rst @@ -50,7 +50,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To get events from a V4L2 decoder +This ioctl is for Digital TV devices only. To get events from a V4L2 decoder use the V4L2 :ref:`VIDIOC_DQEVENT` ioctl instead. This ioctl call returns an event of type video_event if available. If diff --git a/Documentation/media/uapi/dvb/video-play.rst b/Documentation/media/uapi/dvb/video-play.rst index 3f66ae3b7e35..2124120aec22 100644 --- a/Documentation/media/uapi/dvb/video-play.rst +++ b/Documentation/media/uapi/dvb/video-play.rst @@ -44,7 +44,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 :ref:`VIDIOC_DECODER_CMD` instead. This ioctl call asks the Video Device to start playing a video stream diff --git a/Documentation/media/uapi/dvb/video-select-source.rst b/Documentation/media/uapi/dvb/video-select-source.rst index 2f4fbf4b490c..cde6542723ca 100644 --- a/Documentation/media/uapi/dvb/video-select-source.rst +++ b/Documentation/media/uapi/dvb/video-select-source.rst @@ -50,7 +50,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. This ioctl was also supported by the +This ioctl is for Digital TV devices only. This ioctl was also supported by the V4L2 ivtv driver, but that has been replaced by the ivtv-specific ``IVTV_IOC_PASSTHROUGH_MODE`` ioctl. diff --git a/Documentation/media/uapi/dvb/video-stop.rst b/Documentation/media/uapi/dvb/video-stop.rst index fb827effb276..474309ad31c2 100644 --- a/Documentation/media/uapi/dvb/video-stop.rst +++ b/Documentation/media/uapi/dvb/video-stop.rst @@ -60,7 +60,7 @@ Arguments Description ----------- -This ioctl is for DVB devices only. To control a V4L2 decoder use the +This ioctl is for Digital TV devices only. To control a V4L2 decoder use the V4L2 :ref:`VIDIOC_DECODER_CMD` instead. This ioctl call asks the Video Device to stop playing the current diff --git a/Documentation/media/uapi/dvb/video.rst b/Documentation/media/uapi/dvb/video.rst index 60d43fb7ce22..e7d68cd0cf23 100644 --- a/Documentation/media/uapi/dvb/video.rst +++ b/Documentation/media/uapi/dvb/video.rst @@ -2,20 +2,21 @@ .. _dvb_video: -################ -DVB Video Device -################ -The DVB video device controls the MPEG2 video decoder of the DVB -hardware. It can be accessed through **/dev/dvb/adapter0/video0**. Data +####################### +Digital TV Video Device +####################### + +The Digital TV video device controls the MPEG2 video decoder of the Digital +TV hardware. It can be accessed through **/dev/dvb/adapter0/video0**. Data types and and ioctl definitions can be accessed by including **linux/dvb/video.h** in your application. -Note that the DVB video device only controls decoding of the MPEG video +Note that the Digital TV video device only controls decoding of the MPEG video stream, not its presentation on the TV or computer screen. On PCs this is typically handled by an associated video4linux device, e.g. **/dev/video**, which allows scaling and defining output windows. -Some DVB cards don’t have their own MPEG decoder, which results in the +Some Digital TV cards don’t have their own MPEG decoder, which results in the omission of the audio and video device as well as the video4linux device. diff --git a/Documentation/media/uapi/dvb/video_h.rst b/Documentation/media/uapi/dvb/video_h.rst deleted file mode 100644 index 3f39b0c4879c..000000000000 --- a/Documentation/media/uapi/dvb/video_h.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _video_h: - -********************* -DVB Video Header File -********************* - -.. kernel-include:: $BUILDDIR/video.h.rst diff --git a/Documentation/media/uapi/gen-errors.rst b/Documentation/media/uapi/gen-errors.rst index d39e34d1b19d..689d3b101ede 100644 --- a/Documentation/media/uapi/gen-errors.rst +++ b/Documentation/media/uapi/gen-errors.rst @@ -17,9 +17,7 @@ Generic Error Codes :widths: 1 16 - - .. row 1 - - - ``EAGAIN`` (aka ``EWOULDBLOCK``) + - - ``EAGAIN`` (aka ``EWOULDBLOCK``) - The ioctl can't be handled because the device is in state where it can't perform it. This could happen for example in case where @@ -27,15 +25,11 @@ Generic Error Codes is also returned when the ioctl would need to wait for an event, but the device was opened in non-blocking mode. - - .. row 2 - - - ``EBADF`` + - - ``EBADF`` - The file descriptor is not a valid. - - .. row 3 - - - ``EBUSY`` + - - ``EBUSY`` - The ioctl can't be handled because the device is busy. This is typically return while device is streaming, and an ioctl tried to @@ -44,64 +38,53 @@ Generic Error Codes ioctl must not be retried without performing another action to fix the problem first (typically: stop the stream before retrying). - - .. row 4 - - - ``EFAULT`` + - - ``EFAULT`` - There was a failure while copying data from/to userspace, probably caused by an invalid pointer reference. - - .. row 5 - - - ``EINVAL`` + - - ``EINVAL`` - One or more of the ioctl parameters are invalid or out of the allowed range. This is a widely used error code. See the individual ioctl requests for specific causes. - - .. row 6 - - - ``ENODEV`` + - - ``ENODEV`` - Device not found or was removed. - - .. row 7 - - - ``ENOMEM`` + - - ``ENOMEM`` - There's not enough memory to handle the desired operation. - - .. row 8 - - - ``ENOTTY`` + - - ``ENOTTY`` - The ioctl is not supported by the driver, actually meaning that the required functionality is not available, or the file descriptor is not for a media device. - - .. row 9 - - - ``ENOSPC`` + - - ``ENOSPC`` - On USB devices, the stream ioctl's can return this error, meaning that this request would overcommit the usb bandwidth reserved for periodic transfers (up to 80% of the USB bandwidth). - - .. row 10 - - - ``EPERM`` + - - ``EPERM`` - Permission denied. Can be returned if the device needs write permission, or some special capabilities is needed (e. g. root) - - .. row 11 - - - ``EIO`` + - - ``EIO`` - I/O error. Typically used when there are problems communicating with a hardware device. This could indicate broken or flaky hardware. It's a 'Something is wrong, I give up!' type of error. + - - ``ENXIO`` + + - No device corresponding to this device special file exists. + + .. note:: #. This list is not exhaustive; ioctls may return other error codes. diff --git a/Documentation/media/uapi/mediactl/media-controller.rst b/Documentation/media/uapi/mediactl/media-controller.rst index 7ae38d48969e..0eea4f9a07d5 100644 --- a/Documentation/media/uapi/mediactl/media-controller.rst +++ b/Documentation/media/uapi/mediactl/media-controller.rst @@ -8,7 +8,9 @@ Part IV - Media Controller API ############################## -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst index 0fd329279bef..b59ce149efb5 100644 --- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst +++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst @@ -51,7 +51,7 @@ id's until they get an error. .. c:type:: media_entity_desc -.. tabularcolumns:: |p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{11.5cm}| +.. tabularcolumns:: |p{1.5cm}|p{1.7cm}|p{1.6cm}|p{1.5cm}|p{11.2cm}| .. flat-table:: struct media_entity_desc :header-rows: 0 diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst index add8281494f8..997e6b17440d 100644 --- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst +++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst @@ -46,7 +46,7 @@ other values untouched. If the ``topology_version`` remains the same, the ioctl should fill the desired arrays with the media graph elements. -.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}| +.. tabularcolumns:: |p{1.6cm}|p{3.4cm}|p{12.5cm}| .. c:type:: media_v2_topology diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst index 71078565d644..8d64b0c06ebc 100644 --- a/Documentation/media/uapi/mediactl/media-types.rst +++ b/Documentation/media/uapi/mediactl/media-types.rst @@ -5,7 +5,7 @@ Types and flags used to represent the media graph elements ========================================================== -.. tabularcolumns:: |p{8.0cm}|p{10.5cm}| +.. tabularcolumns:: |p{8.2cm}|p{10.3cm}| .. _media-entity-type: diff --git a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst index 3476ae29708f..2d01358d5504 100644 --- a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst +++ b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst @@ -34,9 +34,9 @@ receiver device where N is the number of the receiver. /sys/class/rc/rcN/protocols =========================== -Reading this file returns a list of available protocols, something like: +Reading this file returns a list of available protocols, something like:: -``rc5 [rc6] nec jvc [sony]`` + rc5 [rc6] nec jvc [sony] Enabled protocols are shown in [] brackets. @@ -90,11 +90,11 @@ This value may be reset to 0 if the current protocol is altered. ================================== Reading this file returns a list of available protocols to use for the -wakeup filter, something like: +wakeup filter, something like:: -``rc-5 nec nec-x rc-6-0 rc-6-6a-24 [rc-6-6a-32] rc-6-mce`` + rc-5 nec nec-x rc-6-0 rc-6-6a-24 [rc-6-6a-32] rc-6-mce -Note that protocol variants are listed, so "nec", "sony", "rc-5", "rc-6" +Note that protocol variants are listed, so ``nec``, ``sony``, ``rc-5``, ``rc-6`` have their different bit length encodings listed if available. Note that all protocol variants are listed. diff --git a/Documentation/media/uapi/rc/remote_controllers.rst b/Documentation/media/uapi/rc/remote_controllers.rst index 3e25cc9f65e0..46a8acb82125 100644 --- a/Documentation/media/uapi/rc/remote_controllers.rst +++ b/Documentation/media/uapi/rc/remote_controllers.rst @@ -8,7 +8,9 @@ Part III - Remote Controller API ################################ -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents diff --git a/Documentation/media/uapi/v4l/pixfmt-006.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst similarity index 98% rename from Documentation/media/uapi/v4l/pixfmt-006.rst rename to Documentation/media/uapi/v4l/colorspaces-defs.rst index 7ae7dcf73f63..410907fe9415 100644 --- a/Documentation/media/uapi/v4l/pixfmt-006.rst +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst @@ -76,6 +76,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum .. c:type:: v4l2_xfer_func +.. tabularcolumns:: |p{5.5cm}|p{12.0cm}| + .. flat-table:: V4L2 Transfer Function :header-rows: 1 :stub-columns: 0 @@ -97,7 +99,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum * - ``V4L2_XFER_FUNC_DCI_P3`` - Use the DCI-P3 transfer function. * - ``V4L2_XFER_FUNC_SMPTE2084`` - - Use the SMPTE 2084 transfer function. + - Use the SMPTE 2084 transfer function. See :ref:`xf-smpte-2084`. diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst similarity index 92% rename from Documentation/media/uapi/v4l/pixfmt-007.rst rename to Documentation/media/uapi/v4l/colorspaces-details.rst index 0c30ee2577d3..b5d551b9cc8f 100644 --- a/Documentation/media/uapi/v4l/pixfmt-007.rst +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst @@ -418,6 +418,11 @@ Inverse Transfer function: L = \left( \frac{L' + 0.099}{1.099}\right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.081 +Please note that while Rec. 709 is defined as the default transfer function +by the :ref:`itu2020` standard, in practice this colorspace is often used +with the :ref:`xf-smpte-2084`. In particular Ultra HD Blu-ray discs use +this combination. + The luminance (Y') and color difference (Cb and Cr) are obtained with the following ``V4L2_YCBCR_ENC_BT2020`` encoding: @@ -758,3 +763,45 @@ scaled to [-128…128] and then clipped to [-128…127]. ``V4L2_COLORSPACE_JPEG`` can be considered to be an abbreviation for ``V4L2_COLORSPACE_SRGB``, ``V4L2_YCBCR_ENC_601`` and ``V4L2_QUANTIZATION_FULL_RANGE``. + +*************************************** +Detailed Transfer Function Descriptions +*************************************** + +.. _xf-smpte-2084: + +Transfer Function SMPTE 2084 (V4L2_XFER_FUNC_SMPTE2084) +======================================================= + +The :ref:`smpte2084` standard defines the transfer function used by +High Dynamic Range content. + +Constants: + m1 = (2610 / 4096) / 4 + + m2 = (2523 / 4096) * 128 + + c1 = 3424 / 4096 + + c2 = (2413 / 4096) * 32 + + c3 = (2392 / 4096) * 32 + +Transfer function: + L' = ((c1 + c2 * L\ :sup:`m1`) / (1 + c3 * L\ :sup:`m1`))\ :sup:`m2` + +Inverse Transfer function: + L = (max(L':sup:`1/m2` - c1, 0) / (c2 - c3 * + L'\ :sup:`1/m2`))\ :sup:`1/m1` + +Take care when converting between this transfer function and non-HDR transfer +functions: the linear RGB values [0…1] of HDR content map to a luminance range +of 0 to 10000 cd/m\ :sup:`2` whereas the linear RGB values of non-HDR (aka +Standard Dynamic Range or SDR) map to a luminance range of 0 to 100 cd/m\ :sup:`2`. + +To go from SDR to HDR you will have to divide L by 100 first. To go in the other +direction you will have to multiply L by 100. Of course, this clamps all +luminance values over 100 cd/m\ :sup:`2` to 100 cd/m\ :sup:`2`. + +There are better methods, see e.g. :ref:`colimg` for more in-depth information +about this. diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst index 62518adfe37b..f7ac8d0d3af1 100644 --- a/Documentation/media/uapi/v4l/dev-meta.rst +++ b/Documentation/media/uapi/v4l/dev-meta.rst @@ -42,6 +42,8 @@ the :c:type:`v4l2_format` structure to 0. .. _v4l2-meta-format: +.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}| + .. flat-table:: struct v4l2_meta_format :header-rows: 0 :stub-columns: 0 diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst index 5f6d534ea73b..9d6c860271cb 100644 --- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst +++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst @@ -105,7 +105,13 @@ which may return ``EBUSY`` can be the struct v4l2_sliced_vbi_format ----------------------------- -.. tabularcolumns:: |p{1.0cm}|p{4.5cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}| +.. raw:: latex + + \begingroup + \scriptsize + \setlength{\tabcolsep}{2pt} + +.. tabularcolumns:: |p{.75cm}|p{3.3cm}|p{3.4cm}|p{3.4cm}|p{3.4cm}| .. cssclass:: longtable @@ -199,6 +205,9 @@ struct v4l2_sliced_vbi_format Applications and drivers must set it to zero. +.. raw:: latex + + \endgroup .. _vbi-services2: @@ -207,9 +216,9 @@ Sliced VBI services .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \footnotesize -.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}| +.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{2.4cm}|p{2.0cm}|p{7.3cm}| .. flat-table:: :header-rows: 1 @@ -263,7 +272,7 @@ Sliced VBI services .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize Drivers may return an ``EINVAL`` error code when applications attempt to @@ -457,7 +466,7 @@ number). struct v4l2_mpeg_vbi_fmt_ivtv ----------------------------- -.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{1.0cm}|p{11.5cm}| +.. tabularcolumns:: |p{1.0cm}|p{3.8cm}|p{1.0cm}|p{11.2cm}| .. flat-table:: :header-rows: 0 @@ -525,7 +534,7 @@ Magic Constants for struct v4l2_mpeg_vbi_fmt_ivtv magic field structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0 ------------------------------------------------- -.. tabularcolumns:: |p{4.4cm}|p{2.4cm}|p{10.7cm}| +.. tabularcolumns:: |p{4.9cm}|p{2.4cm}|p{10.2cm}| .. flat-table:: :header-rows: 0 @@ -574,7 +583,7 @@ structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0 struct v4l2_mpeg_vbi_ITV0 ------------------------- -.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| +.. tabularcolumns:: |p{4.9cm}|p{4.4cm}|p{8.2cm}| .. flat-table:: :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst index f0e762167730..d20d945803a7 100644 --- a/Documentation/media/uapi/v4l/dev-subdev.rst +++ b/Documentation/media/uapi/v4l/dev-subdev.rst @@ -204,9 +204,9 @@ list entity names and pad numbers). .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \tiny -.. tabularcolumns:: |p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}| +.. tabularcolumns:: |p{2.0cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}| .. _sample-pipeline-config: @@ -253,7 +253,7 @@ list entity names and pad numbers). .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize 1. Initial state. The sensor source pad format is set to its native 3MP size and V4L2_MBUS_FMT_SGRBG8_1X8 media bus code. Formats on the @@ -370,7 +370,7 @@ circumstances. This may also cause the accessed rectangle to be adjusted by the driver, depending on the properties of the underlying hardware. The coordinates to a step always refer to the actual size of the -previous step. The exception to this rule is the source compose +previous step. The exception to this rule is the sink compose rectangle, which refers to the sink compose bounds rectangle --- if it is supported by the hardware. diff --git a/Documentation/media/uapi/v4l/driver.rst b/Documentation/media/uapi/v4l/driver.rst deleted file mode 100644 index 2319b383f0a4..000000000000 --- a/Documentation/media/uapi/v4l/driver.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -.. _driver: - -*********************** -V4L2 Driver Programming -*********************** - -to do diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst index 9acc9cad49e2..a3e81c1d276b 100644 --- a/Documentation/media/uapi/v4l/extended-controls.rst +++ b/Documentation/media/uapi/v4l/extended-controls.rst @@ -942,21 +942,21 @@ enum v4l2_mpeg_video_mpeg4_level - :header-rows: 0 :stub-columns: 0 - * - ``V4L2_MPEG_VIDEO_LEVEL_0`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0`` - Level 0 - * - ``V4L2_MPEG_VIDEO_LEVEL_0B`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B`` - Level 0b - * - ``V4L2_MPEG_VIDEO_LEVEL_1`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1`` - Level 1 - * - ``V4L2_MPEG_VIDEO_LEVEL_2`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2`` - Level 2 - * - ``V4L2_MPEG_VIDEO_LEVEL_3`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3`` - Level 3 - * - ``V4L2_MPEG_VIDEO_LEVEL_3B`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B`` - Level 3b - * - ``V4L2_MPEG_VIDEO_LEVEL_4`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4`` - Level 4 - * - ``V4L2_MPEG_VIDEO_LEVEL_5`` + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5`` - Level 5 @@ -1028,15 +1028,15 @@ enum v4l2_mpeg_video_mpeg4_profile - :header-rows: 0 :stub-columns: 0 - * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE`` + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE`` - Simple profile - * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE`` + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE`` - Advanced Simple profile - * - ``V4L2_MPEG_VIDEO_PROFILE_CORE`` + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE`` - Core profile - * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE`` + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE`` - Simple Scalable profile - * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY`` + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY`` - @@ -1922,9 +1922,9 @@ enum v4l2_vp8_golden_frame_sel - .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \footnotesize -.. tabularcolumns:: |p{11.0cm}|p{10.0cm}| +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| .. flat-table:: :header-rows: 0 @@ -1940,7 +1940,7 @@ enum v4l2_vp8_golden_frame_sel - .. raw:: latex - \end{adjustbox} + \normalsize ``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)`` diff --git a/Documentation/media/uapi/v4l/format.rst b/Documentation/media/uapi/v4l/format.rst index 452c6d59cad5..3e3efb0e349e 100644 --- a/Documentation/media/uapi/v4l/format.rst +++ b/Documentation/media/uapi/v4l/format.rst @@ -78,7 +78,7 @@ output devices is available. [#f1]_ The :ref:`VIDIOC_ENUM_FMT` ioctl must be supported by all drivers exchanging image data with applications. - **Important** +.. important:: Drivers are not supposed to convert image formats in kernel space. They must enumerate only formats directly supported by the hardware. diff --git a/Documentation/media/uapi/v4l/pixfmt-008.rst b/Documentation/media/uapi/v4l/pixfmt-008.rst deleted file mode 100644 index 4bec79784bdd..000000000000 --- a/Documentation/media/uapi/v4l/pixfmt-008.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. -*- coding: utf-8; mode: rst -*- - -*************************************** -Detailed Transfer Function Descriptions -*************************************** - - -.. _xf-smpte-2084: - -Transfer Function SMPTE 2084 (V4L2_XFER_FUNC_SMPTE2084) -======================================================= - -The :ref:`smpte2084` standard defines the transfer function used by -High Dynamic Range content. - -Constants: - m1 = (2610 / 4096) / 4 - - m2 = (2523 / 4096) * 128 - - c1 = 3424 / 4096 - - c2 = (2413 / 4096) * 32 - - c3 = (2392 / 4096) * 32 - -Transfer function: - L' = ((c1 + c2 * L\ :sup:`m1`) / (1 + c3 * L\ :sup:`m1`))\ :sup:`m2` - -Inverse Transfer function: - L = (max(L':sup:`1/m2` - c1, 0) / (c2 - c3 * - L'\ :sup:`1/m2`))\ :sup:`1/m1` diff --git a/Documentation/media/uapi/v4l/pixfmt-013.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst similarity index 100% rename from Documentation/media/uapi/v4l/pixfmt-013.rst rename to Documentation/media/uapi/v4l/pixfmt-compressed.rst diff --git a/Documentation/media/uapi/v4l/pixfmt-004.rst b/Documentation/media/uapi/v4l/pixfmt-intro.rst similarity index 100% rename from Documentation/media/uapi/v4l/pixfmt-004.rst rename to Documentation/media/uapi/v4l/pixfmt-intro.rst diff --git a/Documentation/media/uapi/v4l/pixfmt-inzi.rst b/Documentation/media/uapi/v4l/pixfmt-inzi.rst index 9849e799f205..75272f80bc8a 100644 --- a/Documentation/media/uapi/v4l/pixfmt-inzi.rst +++ b/Documentation/media/uapi/v4l/pixfmt-inzi.rst @@ -34,11 +34,12 @@ The second plane provides 16-bit per-pixel Depth data arranged in Each cell is a 16-bit word with more significant data stored at higher memory address (byte order is little-endian). + .. raw:: latex - \newline\newline\begin{adjustbox}{width=\columnwidth} + \small -.. tabularcolumns:: |p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}| +.. tabularcolumns:: |p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}| .. flat-table:: :header-rows: 0 @@ -78,4 +79,4 @@ memory address (byte order is little-endian). .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize diff --git a/Documentation/media/uapi/v4l/pixfmt-m420.rst b/Documentation/media/uapi/v4l/pixfmt-m420.rst index 7dd47c071e2f..6703f4079c3e 100644 --- a/Documentation/media/uapi/v4l/pixfmt-m420.rst +++ b/Documentation/media/uapi/v4l/pixfmt-m420.rst @@ -66,7 +66,7 @@ Each cell is one byte. - Cr\ :sub:`11` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12.rst b/Documentation/media/uapi/v4l/pixfmt-nv12.rst index 5b45a6d2ac95..2776b41377d5 100644 --- a/Documentation/media/uapi/v4l/pixfmt-nv12.rst +++ b/Documentation/media/uapi/v4l/pixfmt-nv12.rst @@ -71,7 +71,7 @@ Each cell is one byte. - Cr\ :sub:`11` -**Color Sample Location..** +**Color Sample Location:** .. flat-table:: :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst index de3051fd6b50..c1a2779f604c 100644 --- a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst +++ b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst @@ -83,7 +83,7 @@ Each cell is one byte. - Cr\ :sub:`11` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16.rst b/Documentation/media/uapi/v4l/pixfmt-nv16.rst index 8ceba79ff636..f0fdad3006cf 100644 --- a/Documentation/media/uapi/v4l/pixfmt-nv16.rst +++ b/Documentation/media/uapi/v4l/pixfmt-nv16.rst @@ -79,7 +79,7 @@ Each cell is one byte. - Cr\ :sub:`31` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst index 4d46ab39f9f1..c45f036763e7 100644 --- a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst +++ b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst @@ -83,7 +83,7 @@ Each cell is one byte. - Cr\ :sub:`32` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst index 3fdb34ce2f09..8edf65c80660 100644 --- a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst +++ b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst @@ -17,11 +17,14 @@ cylinder: 0 being the smallest value and 255 the maximum. The values are packed in 24 or 32 bit formats. + .. raw:: latex - \newline\begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} -.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}| +.. tabularcolumns:: |p{2.0cm}|p{0.54cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}| .. _packed-hsv-formats: @@ -33,11 +36,8 @@ The values are packed in 24 or 32 bit formats. - Code - - :cspan:`7` Byte 0 in memory - - - :cspan:`7` Byte 1 - - - :cspan:`7` Byte 2 - - - :cspan:`7` Byte 3 * - - @@ -50,7 +50,7 @@ The values are packed in 24 or 32 bit formats. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -59,7 +59,7 @@ The values are packed in 24 or 32 bit formats. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -68,7 +68,7 @@ The values are packed in 24 or 32 bit formats. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -90,7 +90,7 @@ The values are packed in 24 or 32 bit formats. - - - - - + - h\ :sub:`7` - h\ :sub:`6` - h\ :sub:`5` @@ -99,7 +99,7 @@ The values are packed in 24 or 32 bit formats. - h\ :sub:`2` - h\ :sub:`1` - h\ :sub:`0` - - + - s\ :sub:`7` - s\ :sub:`6` - s\ :sub:`5` @@ -108,7 +108,7 @@ The values are packed in 24 or 32 bit formats. - s\ :sub:`2` - s\ :sub:`1` - s\ :sub:`0` - - + - v\ :sub:`7` - v\ :sub:`6` - v\ :sub:`5` @@ -130,7 +130,7 @@ The values are packed in 24 or 32 bit formats. - h\ :sub:`2` - h\ :sub:`1` - h\ :sub:`0` - - + - s\ :sub:`7` - s\ :sub:`6` - s\ :sub:`5` @@ -139,7 +139,7 @@ The values are packed in 24 or 32 bit formats. - s\ :sub:`2` - s\ :sub:`1` - s\ :sub:`0` - - + - v\ :sub:`7` - v\ :sub:`6` - v\ :sub:`5` @@ -149,9 +149,9 @@ The values are packed in 24 or 32 bit formats. - v\ :sub:`1` - v\ :sub:`0` - - - + .. raw:: latex - \end{adjustbox}\newline\newline + \endgroup Bit 7 is the most significant bit. diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst index 84fcbcb74171..4938d9655a41 100644 --- a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst +++ b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst @@ -16,9 +16,12 @@ next to each other in memory. .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} + +.. tabularcolumns:: |p{2.3cm}|p{1.6cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}| -.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}| .. _rgb-formats: @@ -28,17 +31,12 @@ next to each other in memory. * - Identifier - Code - - - :cspan:`7` Byte 0 in memory - - - :cspan:`7` Byte 1 - - - :cspan:`7` Byte 2 - - - :cspan:`7` Byte 3 * - - - - Bit - 7 - 6 - 5 @@ -47,7 +45,7 @@ next to each other in memory. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -56,7 +54,7 @@ next to each other in memory. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -65,7 +63,7 @@ next to each other in memory. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -78,7 +76,7 @@ next to each other in memory. - ``V4L2_PIX_FMT_RGB332`` - 'RGB1' - - + - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` @@ -87,11 +85,12 @@ next to each other in memory. - g\ :sub:`0` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-ARGB444: - ``V4L2_PIX_FMT_ARGB444`` - 'AR12' - - + - g\ :sub:`3` - g\ :sub:`2` - g\ :sub:`1` @@ -100,7 +99,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - a\ :sub:`3` - a\ :sub:`2` - a\ :sub:`1` @@ -109,11 +108,12 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` + - * .. _V4L2-PIX-FMT-XRGB444: - ``V4L2_PIX_FMT_XRGB444`` - 'XR12' - - + - g\ :sub:`3` - g\ :sub:`2` - g\ :sub:`1` @@ -122,7 +122,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - - - @@ -131,11 +131,12 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` + - * .. _V4L2-PIX-FMT-ARGB555: - ``V4L2_PIX_FMT_ARGB555`` - 'AR15' - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -144,7 +145,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - a - r\ :sub:`4` - r\ :sub:`3` @@ -153,11 +154,12 @@ next to each other in memory. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` + - * .. _V4L2-PIX-FMT-XRGB555: - ``V4L2_PIX_FMT_XRGB555`` - 'XR15' - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -166,7 +168,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - - r\ :sub:`4` - r\ :sub:`3` @@ -175,11 +177,12 @@ next to each other in memory. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` + - * .. _V4L2-PIX-FMT-RGB565: - ``V4L2_PIX_FMT_RGB565`` - 'RGBP' - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -188,7 +191,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - r\ :sub:`4` - r\ :sub:`3` - r\ :sub:`2` @@ -197,11 +200,12 @@ next to each other in memory. - g\ :sub:`5` - g\ :sub:`4` - g\ :sub:`3` + - * .. _V4L2-PIX-FMT-ARGB555X: - ``V4L2_PIX_FMT_ARGB555X`` - 'AR15' | (1 << 31) - - + - a - r\ :sub:`4` - r\ :sub:`3` @@ -210,7 +214,7 @@ next to each other in memory. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -219,11 +223,12 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-XRGB555X: - ``V4L2_PIX_FMT_XRGB555X`` - 'XR15' | (1 << 31) - - + - - r\ :sub:`4` - r\ :sub:`3` @@ -232,7 +237,7 @@ next to each other in memory. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -241,11 +246,12 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-RGB565X: - ``V4L2_PIX_FMT_RGB565X`` - 'RGBR' - - + - r\ :sub:`4` - r\ :sub:`3` - r\ :sub:`2` @@ -254,7 +260,7 @@ next to each other in memory. - g\ :sub:`5` - g\ :sub:`4` - g\ :sub:`3` - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -263,11 +269,12 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-BGR24: - ``V4L2_PIX_FMT_BGR24`` - 'BGR3' - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -276,7 +283,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -285,7 +292,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -294,11 +301,12 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` + - * .. _V4L2-PIX-FMT-RGB24: - ``V4L2_PIX_FMT_RGB24`` - 'RGB3' - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -307,7 +315,7 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -316,7 +324,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -325,11 +333,12 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-BGR666: - ``V4L2_PIX_FMT_BGR666`` - 'BGRH' - - + - b\ :sub:`5` - b\ :sub:`4` - b\ :sub:`3` @@ -338,7 +347,7 @@ next to each other in memory. - b\ :sub:`0` - g\ :sub:`5` - g\ :sub:`4` - - + - g\ :sub:`3` - g\ :sub:`2` - g\ :sub:`1` @@ -347,7 +356,7 @@ next to each other in memory. - r\ :sub:`4` - r\ :sub:`3` - r\ :sub:`2` - - + - r\ :sub:`1` - r\ :sub:`0` - @@ -356,7 +365,7 @@ next to each other in memory. - - - - - + - - - @@ -369,7 +378,7 @@ next to each other in memory. - ``V4L2_PIX_FMT_ABGR32`` - 'AR24' - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -378,7 +387,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -387,7 +396,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -396,7 +405,7 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - a\ :sub:`7` - a\ :sub:`6` - a\ :sub:`5` @@ -409,7 +418,7 @@ next to each other in memory. - ``V4L2_PIX_FMT_XBGR32`` - 'XR24' - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -418,7 +427,7 @@ next to each other in memory. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -427,7 +436,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -436,7 +445,7 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - - - @@ -449,7 +458,7 @@ next to each other in memory. - ``V4L2_PIX_FMT_ARGB32`` - 'BA24' - - + - a\ :sub:`7` - a\ :sub:`6` - a\ :sub:`5` @@ -458,7 +467,7 @@ next to each other in memory. - a\ :sub:`2` - a\ :sub:`1` - a\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -467,7 +476,7 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -476,7 +485,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -489,6 +498,7 @@ next to each other in memory. - ``V4L2_PIX_FMT_XRGB32`` - 'BX24' + - - - @@ -497,8 +507,7 @@ next to each other in memory. - - - - - - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -507,7 +516,7 @@ next to each other in memory. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -516,7 +525,7 @@ next to each other in memory. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -528,7 +537,7 @@ next to each other in memory. .. raw:: latex - \end{adjustbox}\newline\newline + \endgroup .. note:: Bit 7 is the most significant bit. @@ -562,9 +571,9 @@ Each cell is one byte. .. raw:: latex - \newline\newline\begin{adjustbox}{width=\columnwidth} + \small -.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.3cm}| +.. tabularcolumns:: |p{3.1cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}| .. flat-table:: RGB byte order :header-rows: 0 @@ -626,19 +635,21 @@ Each cell is one byte. .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize Formats defined in :ref:`rgb-formats-deprecated` are deprecated and must not be used by new drivers. They are documented here for reference. -The meaning of their alpha bits (a) is ill-defined and interpreted as in +The meaning of their alpha bits ``(a)`` are ill-defined and interpreted as in either the corresponding ARGB or XRGB format, depending on the driver. .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} -.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}| +.. tabularcolumns:: |p{2.2cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}| .. _rgb-formats-deprecated: @@ -648,17 +659,15 @@ either the corresponding ARGB or XRGB format, depending on the driver. * - Identifier - Code - - - :cspan:`7` Byte 0 in memory - - + - :cspan:`7` Byte 1 - - + - :cspan:`7` Byte 2 - - + - :cspan:`7` Byte 3 * - - - - Bit - 7 - 6 - 5 @@ -667,7 +676,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -676,7 +685,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -685,7 +694,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -698,7 +707,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - ``V4L2_PIX_FMT_RGB444`` - 'R444' - - + - g\ :sub:`3` - g\ :sub:`2` - g\ :sub:`1` @@ -707,7 +716,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - a\ :sub:`3` - a\ :sub:`2` - a\ :sub:`1` @@ -716,11 +725,12 @@ either the corresponding ARGB or XRGB format, depending on the driver. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` + - * .. _V4L2-PIX-FMT-RGB555: - ``V4L2_PIX_FMT_RGB555`` - 'RGBO' - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -729,7 +739,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - a - r\ :sub:`4` - r\ :sub:`3` @@ -738,11 +748,12 @@ either the corresponding ARGB or XRGB format, depending on the driver. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` + - * .. _V4L2-PIX-FMT-RGB555X: - ``V4L2_PIX_FMT_RGB555X`` - 'RGBQ' - - + - a - r\ :sub:`4` - r\ :sub:`3` @@ -751,7 +762,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - r\ :sub:`0` - g\ :sub:`4` - g\ :sub:`3` - - + - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` @@ -760,11 +771,12 @@ either the corresponding ARGB or XRGB format, depending on the driver. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` + - * .. _V4L2-PIX-FMT-BGR32: - ``V4L2_PIX_FMT_BGR32`` - 'BGR4' - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -773,7 +785,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - b\ :sub:`2` - b\ :sub:`1` - b\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -782,7 +794,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -791,7 +803,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - a\ :sub:`7` - a\ :sub:`6` - a\ :sub:`5` @@ -804,7 +816,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - ``V4L2_PIX_FMT_RGB32`` - 'RGB4' - - + - a\ :sub:`7` - a\ :sub:`6` - a\ :sub:`5` @@ -813,7 +825,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - a\ :sub:`2` - a\ :sub:`1` - a\ :sub:`0` - - + - r\ :sub:`7` - r\ :sub:`6` - r\ :sub:`5` @@ -822,7 +834,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - r\ :sub:`2` - r\ :sub:`1` - r\ :sub:`0` - - + - g\ :sub:`7` - g\ :sub:`6` - g\ :sub:`5` @@ -831,7 +843,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. - g\ :sub:`2` - g\ :sub:`1` - g\ :sub:`0` - - + - b\ :sub:`7` - b\ :sub:`6` - b\ :sub:`5` @@ -843,7 +855,7 @@ either the corresponding ARGB or XRGB format, depending on the driver. .. raw:: latex - \end{adjustbox}\newline\newline + \endgroup A test utility to determine which RGB formats a driver actually supports is available from the LinuxTV v4l-dvb repository. See diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst index ebc8fcc937ad..d7644b411ccc 100644 --- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst +++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst @@ -12,13 +12,16 @@ Description Similar to the packed RGB formats these formats store the Y, Cb and Cr component of each pixel in one 16 or 32 bit word. + .. raw:: latex - \newline\newline\begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} .. _packed-yuv-formats: -.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}| +.. tabularcolumns:: |p{2.0cm}|p{0.67cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}| .. flat-table:: Packed YUV Image Formats :header-rows: 2 @@ -26,17 +29,16 @@ component of each pixel in one 16 or 32 bit word. * - Identifier - Code - - + - :cspan:`7` Byte 0 in memory - - + - :cspan:`7` Byte 1 - - + - :cspan:`7` Byte 2 - - + - :cspan:`7` Byte 3 * - - - - Bit - 7 - 6 - 5 @@ -45,7 +47,7 @@ component of each pixel in one 16 or 32 bit word. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -54,7 +56,7 @@ component of each pixel in one 16 or 32 bit word. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -63,7 +65,7 @@ component of each pixel in one 16 or 32 bit word. - 2 - 1 - 0 - - + - 7 - 6 - 5 @@ -76,7 +78,7 @@ component of each pixel in one 16 or 32 bit word. - ``V4L2_PIX_FMT_YUV444`` - 'Y444' - - + - Cb\ :sub:`3` - Cb\ :sub:`2` - Cb\ :sub:`1` @@ -85,7 +87,7 @@ component of each pixel in one 16 or 32 bit word. - Cr\ :sub:`2` - Cr\ :sub:`1` - Cr\ :sub:`0` - - + - a\ :sub:`3` - a\ :sub:`2` - a\ :sub:`1` @@ -94,11 +96,12 @@ component of each pixel in one 16 or 32 bit word. - Y'\ :sub:`2` - Y'\ :sub:`1` - Y'\ :sub:`0` + - * .. _V4L2-PIX-FMT-YUV555: - ``V4L2_PIX_FMT_YUV555`` - 'YUVO' - - + - Cb\ :sub:`2` - Cb\ :sub:`1` - Cb\ :sub:`0` @@ -107,7 +110,7 @@ component of each pixel in one 16 or 32 bit word. - Cr\ :sub:`2` - Cr\ :sub:`1` - Cr\ :sub:`0` - - + - a - Y'\ :sub:`4` - Y'\ :sub:`3` @@ -116,11 +119,12 @@ component of each pixel in one 16 or 32 bit word. - Y'\ :sub:`0` - Cb\ :sub:`4` - Cb\ :sub:`3` + - * .. _V4L2-PIX-FMT-YUV565: - ``V4L2_PIX_FMT_YUV565`` - 'YUVP' - - + - Cb\ :sub:`2` - Cb\ :sub:`1` - Cb\ :sub:`0` @@ -129,7 +133,7 @@ component of each pixel in one 16 or 32 bit word. - Cr\ :sub:`2` - Cr\ :sub:`1` - Cr\ :sub:`0` - - + - Y'\ :sub:`4` - Y'\ :sub:`3` - Y'\ :sub:`2` @@ -138,11 +142,12 @@ component of each pixel in one 16 or 32 bit word. - Cb\ :sub:`5` - Cb\ :sub:`4` - Cb\ :sub:`3` + - * .. _V4L2-PIX-FMT-YUV32: - ``V4L2_PIX_FMT_YUV32`` - 'YUV4' - - + - a\ :sub:`7` - a\ :sub:`6` - a\ :sub:`5` @@ -151,7 +156,7 @@ component of each pixel in one 16 or 32 bit word. - a\ :sub:`2` - a\ :sub:`1` - a\ :sub:`0` - - + - Y'\ :sub:`7` - Y'\ :sub:`6` - Y'\ :sub:`5` @@ -160,7 +165,7 @@ component of each pixel in one 16 or 32 bit word. - Y'\ :sub:`2` - Y'\ :sub:`1` - Y'\ :sub:`0` - - + - Cb\ :sub:`7` - Cb\ :sub:`6` - Cb\ :sub:`5` @@ -169,7 +174,7 @@ component of each pixel in one 16 or 32 bit word. - Cb\ :sub:`2` - Cb\ :sub:`1` - Cb\ :sub:`0` - - + - Cr\ :sub:`7` - Cr\ :sub:`6` - Cr\ :sub:`5` @@ -181,7 +186,7 @@ component of each pixel in one 16 or 32 bit word. .. raw:: latex - \end{adjustbox}\newline\newline + \endgroup .. note:: diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst index b0f35136021e..4cc27195dc79 100644 --- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst +++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst @@ -17,4 +17,5 @@ RGB Formats pixfmt-srggb10alaw8 pixfmt-srggb10dpcm8 pixfmt-srggb12 + pixfmt-srggb12p pixfmt-srggb16 diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst index b6d426c70ccd..d9e07a4b8b31 100644 --- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst +++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst @@ -33,11 +33,7 @@ of a small V4L2_PIX_FMT_SBGGR10P image: **Byte Order.** Each cell is one byte. -.. raw:: latex - - \newline\newline\begin{adjustbox}{width=\columnwidth} - -.. tabularcolumns:: |p{2.0cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{10.9cm}| +.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{5.4cm}| .. flat-table:: :header-rows: 0 @@ -50,6 +46,7 @@ Each cell is one byte. - B\ :sub:`02high` - G\ :sub:`03high` - G\ :sub:`03low`\ (bits 7--6) B\ :sub:`02low`\ (bits 5--4) + G\ :sub:`01low`\ (bits 3--2) B\ :sub:`00low`\ (bits 1--0) * - start + 5: - G\ :sub:`10high` @@ -57,6 +54,7 @@ Each cell is one byte. - G\ :sub:`12high` - R\ :sub:`13high` - R\ :sub:`13low`\ (bits 7--6) G\ :sub:`12low`\ (bits 5--4) + R\ :sub:`11low`\ (bits 3--2) G\ :sub:`10low`\ (bits 1--0) * - start + 10: - B\ :sub:`20high` @@ -64,6 +62,7 @@ Each cell is one byte. - B\ :sub:`22high` - G\ :sub:`23high` - G\ :sub:`23low`\ (bits 7--6) B\ :sub:`22low`\ (bits 5--4) + G\ :sub:`21low`\ (bits 3--2) B\ :sub:`20low`\ (bits 1--0) * - start + 15: - G\ :sub:`30high` @@ -71,8 +70,5 @@ Each cell is one byte. - G\ :sub:`32high` - R\ :sub:`33high` - R\ :sub:`33low`\ (bits 7--6) G\ :sub:`32low`\ (bits 5--4) + R\ :sub:`31low`\ (bits 3--2) G\ :sub:`30low`\ (bits 1--0) - -.. raw:: latex - - \end{adjustbox}\newline\newline diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst new file mode 100644 index 000000000000..59918a7913fe --- /dev/null +++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst @@ -0,0 +1,86 @@ +.. -*- coding: utf-8; mode: rst -*- + +.. _V4L2-PIX-FMT-SRGGB12P: +.. _v4l2-pix-fmt-sbggr12p: +.. _v4l2-pix-fmt-sgbrg12p: +.. _v4l2-pix-fmt-sgrbg12p: + +******************************************************************************************************************************* +V4L2_PIX_FMT_SRGGB12P ('pRAA'), V4L2_PIX_FMT_SGRBG12P ('pgAA'), V4L2_PIX_FMT_SGBRG12P ('pGAA'), V4L2_PIX_FMT_SBGGR12P ('pBAA'), +******************************************************************************************************************************* + + +12-bit packed Bayer formats + + +Description +=========== + +These four pixel formats are packed raw sRGB / Bayer formats with 12 +bits per colour. Every two consecutive samples are packed into three +bytes. Each of the first two bytes contain the 8 high order bits of +the pixels, and the third byte contains the four least significants +bits of each pixel, in the same order. + +Each n-pixel row contains n/2 green samples and n/2 blue or red +samples, with alternating green-red and green-blue rows. They are +conventionally described as GRGR... BGBG..., RGRG... GBGB..., etc. +Below is an example of a small V4L2_PIX_FMT_SBGGR12P image: + +**Byte Order.** +Each cell is one byte. + +.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}| + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 2 1 1 1 1 1 1 + + + - - start + 0: + - B\ :sub:`00high` + - G\ :sub:`01high` + - G\ :sub:`01low`\ (bits 7--4) + + B\ :sub:`00low`\ (bits 3--0) + - B\ :sub:`02high` + - G\ :sub:`03high` + - G\ :sub:`03low`\ (bits 7--4) + + B\ :sub:`02low`\ (bits 3--0) + + - - start + 6: + - G\ :sub:`10high` + - R\ :sub:`11high` + - R\ :sub:`11low`\ (bits 7--4) + + G\ :sub:`10low`\ (bits 3--0) + - G\ :sub:`12high` + - R\ :sub:`13high` + - R\ :sub:`13low`\ (bits 3--2) + + G\ :sub:`12low`\ (bits 3--0) + - - start + 12: + - B\ :sub:`20high` + - G\ :sub:`21high` + - G\ :sub:`21low`\ (bits 7--4) + + B\ :sub:`20low`\ (bits 3--0) + - B\ :sub:`22high` + - G\ :sub:`23high` + - G\ :sub:`23low`\ (bits 7--4) + + B\ :sub:`22low`\ (bits 3--0) + - - start + 18: + - G\ :sub:`30high` + - R\ :sub:`31high` + - R\ :sub:`31low`\ (bits 7--4) + + G\ :sub:`30low`\ (bits 3--0) + - G\ :sub:`32high` + - R\ :sub:`33high` + - R\ :sub:`33low`\ (bits 3--2) + + G\ :sub:`32low`\ (bits 3--0) diff --git a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst index 30660e04dd0e..ecdc2d94c209 100644 --- a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst +++ b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst @@ -65,7 +65,7 @@ Each cell is one byte. - Y'\ :sub:`33` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-003.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst similarity index 100% rename from Documentation/media/uapi/v4l/pixfmt-003.rst rename to Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst diff --git a/Documentation/media/uapi/v4l/pixfmt-002.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2.rst similarity index 100% rename from Documentation/media/uapi/v4l/pixfmt-002.rst rename to Documentation/media/uapi/v4l/pixfmt-v4l2.rst diff --git a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst index a3f61f280b94..670c339c1714 100644 --- a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst +++ b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst @@ -65,7 +65,7 @@ Each cell is one byte. - Y'\ :sub:`33` -**Color Sample Location..** +**Color Sample Location:** .. flat-table:: :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/pixfmt-y41p.rst b/Documentation/media/uapi/v4l/pixfmt-y41p.rst index 05d040c46a47..e1fe548807a4 100644 --- a/Documentation/media/uapi/v4l/pixfmt-y41p.rst +++ b/Documentation/media/uapi/v4l/pixfmt-y41p.rst @@ -88,7 +88,7 @@ Each cell is one byte. - Y'\ :sub:`37` -**Color Sample Location..** +**Color Sample Location:** .. flat-table:: :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst index 0c49915af850..b51a0d1c6108 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst @@ -67,7 +67,7 @@ Each cell is one byte. - Cb\ :sub:`00` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst index 2cf33fad7254..2582341972db 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst @@ -75,7 +75,7 @@ Each cell is one byte. - Cr\ :sub:`30` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst index fd98904058ed..a9b85c4b1dbc 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst @@ -76,7 +76,7 @@ Each cell is one byte. - Cb\ :sub:`11` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst index cce8c477fdfc..32c68c33f2b1 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst @@ -85,7 +85,7 @@ Each cell is one byte. - Cr\ :sub:`11` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst index d986393aa934..9e7028c4967c 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst @@ -96,7 +96,7 @@ Each cell is one byte. - Cr\ :sub:`31` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst index e6f5de546dba..a96f836c7fa5 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst @@ -84,7 +84,7 @@ Each cell is one byte. - Cr\ :sub:`31` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst index 830fbf6fcd1d..8605bfaee112 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst @@ -106,7 +106,7 @@ Each cell is one byte. - Cr\ :sub:`33` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst index e1bdd6b1aefc..53e876d053fb 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst @@ -68,7 +68,7 @@ Each cell is one byte. - Cr\ :sub:`31` -**Color Sample Location..** +**Color Sample Location:** diff --git a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst index 0244ce6741a6..b9c31746e565 100644 --- a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst +++ b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst @@ -65,7 +65,7 @@ Each cell is one byte. - Cb\ :sub:`31` -**Color Sample Location..** +**Color Sample Location:** .. flat-table:: :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst index 00737152497b..2aa449e2da67 100644 --- a/Documentation/media/uapi/v4l/pixfmt.rst +++ b/Documentation/media/uapi/v4l/pixfmt.rst @@ -19,20 +19,19 @@ see also :ref:`VIDIOC_G_FBUF `.) .. toctree:: :maxdepth: 1 - pixfmt-002 - pixfmt-003 - pixfmt-004 - colorspaces - pixfmt-006 - pixfmt-007 - pixfmt-008 + pixfmt-v4l2 + pixfmt-v4l2-mplane + pixfmt-intro pixfmt-indexed pixfmt-rgb yuv-formats hsv-formats depth-formats - pixfmt-013 + pixfmt-compressed sdr-formats tch-formats meta-formats pixfmt-reserved + colorspaces + colorspaces-defs + colorspaces-details diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst index 8e73bb00c0d5..b1eea44550e1 100644 --- a/Documentation/media/uapi/v4l/subdev-formats.rst +++ b/Documentation/media/uapi/v4l/subdev-formats.rst @@ -1586,7 +1586,7 @@ JEIDA defined bit mapping will be named .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \tiny .. _v4l2-mbus-pixelcode-rgb-lvds: @@ -1784,7 +1784,7 @@ JEIDA defined bit mapping will be named .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize Bayer Formats @@ -7321,11 +7321,14 @@ following information. The following table lists existing HSV/HSL formats. + .. raw:: latex - \newline\newline\begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} -.. tabularcolumns:: |p{6.2cm}|p{1.6cm}|p{0.7cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}| +.. tabularcolumns:: |p{3.0cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}| .. _v4l2-mbus-pixelcode-hsv: @@ -7413,7 +7416,7 @@ The following table lists existing HSV/HSL formats. .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize JPEG Compressed Formats @@ -7435,7 +7438,7 @@ The following table lists existing JPEG compressed formats. .. _v4l2-mbus-pixelcode-jpeg: -.. tabularcolumns:: |p{5.6cm}|p{1.2cm}|p{10.7cm}| +.. tabularcolumns:: |p{5.4cm}|p{1.4cm}|p{10.7cm}| .. flat-table:: JPEG Formats :header-rows: 1 @@ -7468,7 +7471,7 @@ formats. .. _v4l2-mbus-pixelcode-vendor-specific: -.. tabularcolumns:: |p{6.6cm}|p{1.2cm}|p{9.7cm}| +.. tabularcolumns:: |p{6.8cm}|p{1.4cm}|p{9.3cm}| .. flat-table:: Vendor and device specific formats :header-rows: 1 diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst index cab07de6f4da..87433ec76c6b 100644 --- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst +++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst @@ -12,7 +12,7 @@ of the two interfaces they are used. .. _v4l2-selection-targets-table: -.. tabularcolumns:: |p{5.8cm}|p{1.4cm}|p{6.5cm}|p{1.2cm}|p{1.6cm}| +.. tabularcolumns:: |p{6.0cm}|p{1.4cm}|p{7.4cm}|p{1.2cm}|p{1.4cm}| .. flat-table:: Selection target definitions :header-rows: 1 diff --git a/Documentation/media/uapi/v4l/v4l2.rst b/Documentation/media/uapi/v4l/v4l2.rst index f52a11c949d3..2128717299b3 100644 --- a/Documentation/media/uapi/v4l/v4l2.rst +++ b/Documentation/media/uapi/v4l/v4l2.rst @@ -11,7 +11,9 @@ This part describes the Video for Linux API version 2 (V4L2 API) specification. **Revision 4.5** -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents @@ -23,7 +25,6 @@ This part describes the Video for Linux API version 2 (V4L2 API) specification. pixfmt io devices - driver libv4l compat user-func diff --git a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst index aaca12fca06e..a39e18d69511 100644 --- a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst +++ b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_create_buffers`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst index 0f80d5ca2643..a65dbec6b20b 100644 --- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst +++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_cropcap`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst index e1e5507e79ff..7709852282c2 100644 --- a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst +++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_dbg_chip_info`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst index 5960a6547f41..f4e8dd5f7889 100644 --- a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst +++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_dbg_register`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-dqevent.rst b/Documentation/media/uapi/v4l/vidioc-dqevent.rst index 8d663a73818e..cb3565f36793 100644 --- a/Documentation/media/uapi/v4l/vidioc-dqevent.rst +++ b/Documentation/media/uapi/v4l/vidioc-dqevent.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_event`. Description @@ -38,7 +39,7 @@ exceptions which the application may get by e.g. using the select system call. -.. tabularcolumns:: |p{3.0cm}|p{4.3cm}|p{2.5cm}|p{7.7cm}| +.. tabularcolumns:: |p{3.0cm}|p{4.4cm}|p{2.4cm}|p{7.7cm}| .. c:type:: v4l2_event diff --git a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst index 424f3a1c7f56..63ead6b7a115 100644 --- a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst +++ b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_dv_timings_cap`. Description @@ -97,7 +98,7 @@ that doesn't support them will return an ``EINVAL`` error code. -.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{3.5cm}|p{9.5cm}| +.. tabularcolumns:: |p{1.0cm}|p{4.0cm}|p{3.5cm}|p{9.2cm}| .. c:type:: v4l2_dv_timings_cap diff --git a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst index ae20ee573757..5ae8c933b1b9 100644 --- a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst +++ b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst @@ -29,7 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - + Pointer to struct :c:type:`v4l2_encoder_cmd`. Description =========== diff --git a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst index 3e9d0f69cc73..63dca65f49e4 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_enum_dv_timings`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst index a2adaa4bd4dd..019c513df217 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_fmtdesc`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst index 39492453f02d..fea7dc3c879d 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst @@ -26,9 +26,8 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - Pointer to a struct :c:type:`v4l2_frmivalenum` - structure that contains a pixel format and size and receives a frame - interval. + Pointer to struct :c:type:`v4l2_frmivalenum` + that contains a pixel format and size and receives a frame interval. Description @@ -124,6 +123,8 @@ application should zero out all members except for the *IN* fields. .. c:type:: v4l2_frmivalenum +.. tabularcolumns:: |p{1.8cm}|p{4.4cm}|p{2.4cm}|p{8.9cm}| + .. flat-table:: struct v4l2_frmivalenum :header-rows: 0 :stub-columns: 0 diff --git a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst index 628f1aa66338..6de117f163e0 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst @@ -26,7 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - Pointer to a struct :c:type:`v4l2_frmsizeenum` + Pointer to struct :c:type:`v4l2_frmsizeenum` that contains an index and pixel format and receives a frame width and height. @@ -140,6 +140,8 @@ application should zero out all members except for the *IN* fields. .. c:type:: v4l2_frmsizeenum +.. tabularcolumns:: |p{1.4cm}|p{5.9cm}|p{2.3cm}|p{8.0cm}| + .. flat-table:: struct v4l2_frmsizeenum :header-rows: 0 :stub-columns: 0 diff --git a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst index 4e5f5e5bf632..195cf45f3c32 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_frequency_band`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst index 74bc3ed0bdd8..8e5193e8696f 100644 --- a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst +++ b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_audio`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst index 4470a1ece5cf..6d2b4f6e78b0 100644 --- a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst +++ b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_audioout`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enuminput.rst b/Documentation/media/uapi/v4l/vidioc-enuminput.rst index 266e48ab237f..0350069a56c5 100644 --- a/Documentation/media/uapi/v4l/vidioc-enuminput.rst +++ b/Documentation/media/uapi/v4l/vidioc-enuminput.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_input`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst index 93a2cf3b310c..697dcd186ae3 100644 --- a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst +++ b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_output`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst index f2bdd45cfa0d..b7fda29f46a1 100644 --- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst +++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_standard`. Description @@ -224,12 +225,15 @@ support digital TV. See also the Linux DVB API at #define V4L2_STD_ALL (V4L2_STD_525_60 | V4L2_STD_625_50) + .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \begingroup + \tiny + \setlength{\tabcolsep}{2pt} .. NTSC/M PAL/M /N /B /D /H /I SECAM/B /D /K1 /L -.. tabularcolumns:: |p{2.7cm}|p{2.6cm}|p{3.0cm}|p{3.2cm}|p{3.2cm}|p{2.2cm}|p{1.2cm}|p{3.2cm}|p{3.0cm}|p{2.0cm}|p{2.0cm}|p{2.0cm}| +.. tabularcolumns:: |p{1.43cm}|p{1.38cm}|p{1.59cm}|p{1.7cm}|p{1.7cm}|p{1.17cm}|p{0.64cm}|p{1.71cm}|p{1.6cm}|p{1.07cm}|p{1.07cm}|p{1.07cm}| .. _video-standards: @@ -293,7 +297,7 @@ support digital TV. See also the Linux DVB API at .. raw:: latex - \end{adjustbox}\newline\newline + \endgroup diff --git a/Documentation/media/uapi/v4l/vidioc-expbuf.rst b/Documentation/media/uapi/v4l/vidioc-expbuf.rst index 246e48028d40..226e83eb28a9 100644 --- a/Documentation/media/uapi/v4l/vidioc-expbuf.rst +++ b/Documentation/media/uapi/v4l/vidioc-expbuf.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_exportbuffer`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-audio.rst b/Documentation/media/uapi/v4l/vidioc-g-audio.rst index 5b67e81a0db6..290851f99386 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-audio.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-audio.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_audio`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst index d16ecbaddc59..1c98af33ee70 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_audioout`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst index 13771ee3e94a..a6ed43ba9ca3 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_crop`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst index d8a379182a34..299b9aabbac2 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_control`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst index e573c74138de..2696380626d4 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst @@ -35,6 +35,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_dv_timings`. Description @@ -208,7 +209,7 @@ EBUSY - 0 - BT.656/1120 timings - +.. tabularcolumns:: |p{4.5cm}|p{12.8cm}| .. _dv-bt-standards: @@ -231,7 +232,7 @@ EBUSY There are no horizontal syncs/porches at all in this format. Total blanking timings must be set in hsync or vsync fields only. -.. tabularcolumns:: |p{6.0cm}|p{11.5cm}| +.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| .. _dv-bt-flags: diff --git a/Documentation/media/uapi/v4l/vidioc-g-edid.rst b/Documentation/media/uapi/v4l/vidioc-g-edid.rst index a16a193a1cbf..acab90f06e5a 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-edid.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-edid.rst @@ -36,6 +36,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_edid`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst index 418e886fd44b..9dfe64fc21a4 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_enc_idx`. Description @@ -55,7 +56,7 @@ Currently this ioctl is only defined for MPEG-2 program streams and video elementary streams. -.. tabularcolumns:: |p{3.5cm}|p{5.6cm}|p{8.4cm}| +.. tabularcolumns:: |p{3.8cm}|p{5.6cm}|p{8.1cm}| .. c:type:: v4l2_enc_idx diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst index 5ab8d2ac27b9..2011c2b2ee67 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst @@ -34,6 +34,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_ext_controls`. Description @@ -180,7 +181,7 @@ still cause this situation. ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set for this control. -.. tabularcolumns:: |p{4.0cm}|p{2.0cm}|p{2.0cm}|p{8.5cm}| +.. tabularcolumns:: |p{4.0cm}|p{2.2cm}|p{2.1cm}|p{8.2cm}| .. c:type:: v4l2_ext_controls diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst index 4a6a03d158ca..fc73bf0f6052 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_framebuffer`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst index d082f9a21548..3ead350e099f 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst @@ -31,6 +31,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_format`. Description @@ -87,7 +88,7 @@ The format as returned by :ref:`VIDIOC_TRY_FMT ` must be identical .. c:type:: v4l2_format -.. tabularcolumns:: |p{1.2cm}|p{4.3cm}|p{3.0cm}|p{9.0cm}| +.. tabularcolumns:: |p{1.2cm}|p{4.6cm}|p{3.0cm}|p{8.6cm}| .. flat-table:: struct v4l2_format :header-rows: 0 diff --git a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst index 46ab276f412b..c1cccb144660 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_frequency`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-input.rst b/Documentation/media/uapi/v4l/vidioc-g-input.rst index 1364a918fbce..1dcef44eef02 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-input.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-input.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer an integer with input index. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst index 8ba353067b33..a1773ea9543e 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_jpegcompression`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst index 77d017eb3fcc..a47b6a15cfbe 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_modulator`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-output.rst b/Documentation/media/uapi/v4l/vidioc-g-output.rst index 7750948fc61b..3e0093f66834 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-output.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-output.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to an integer with output index. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst index 3b2e6e59a334..616a5ea3f8fa 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_streamparm`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-priority.rst b/Documentation/media/uapi/v4l/vidioc-g-priority.rst index a763988f64e4..c28996b4a45c 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-priority.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-priority.rst @@ -29,7 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - Pointer to an enum v4l2_priority type. + Pointer to an enum :c:type:`v4l2_priority` type. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst index c1ee86472918..f1d9df029e0d 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst @@ -29,11 +29,8 @@ Arguments ``fd`` File descriptor returned by :ref:`open() `. -``request`` - VIDIOC_G_SELECTION, VIDIOC_S_SELECTION - ``argp`` - + Pointer to struct :c:type:`v4l2_selection`. Description =========== diff --git a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst index d7e2b2fa8b88..a9633cae76c5 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_sliced_vbi_cap`. Description @@ -122,9 +123,9 @@ the sliced VBI API is unsupported or ``type`` is invalid. .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \scriptsize -.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}| +.. tabularcolumns:: |p{3.5cm}|p{1.0cm}|p{2.0cm}|p{2.0cm}|p{8.0cm}| .. _vbi-services: @@ -180,7 +181,7 @@ the sliced VBI API is unsupported or ``type`` is invalid. .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize Return Value diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst index cd856ad21a28..90791ab51a53 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-std.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to :c:type:`v4l2_std_id`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst index 57c79fa43866..acdd15901a51 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_tuner`. Description @@ -392,22 +393,25 @@ To change the radio frequency the .. raw:: latex - \begin{adjustbox}{width=\columnwidth} + \scriptsize + +.. tabularcolumns:: |p{1.5cm}|p{1.5cm}|p{2.9cm}|p{2.9cm}|p{2.9cm}|p{2.9cm}| .. _tuner-matrix: .. flat-table:: Tuner Audio Matrix :header-rows: 2 :stub-columns: 0 + :widths: 7 7 14 14 14 14 * - - - :cspan:`5` Selected ``V4L2_TUNER_MODE_`` + - :cspan:`4` Selected ``V4L2_TUNER_MODE_`` * - Received ``V4L2_TUNER_SUB_`` - ``MONO`` - ``STEREO`` - ``LANG1`` - ``LANG2 = SAP`` - - ``LANG1_LANG2``\ [#f1]_ + - ``LANG1_LANG2``\ [#f1]_ * - ``MONO`` - Mono - Mono/Mono @@ -434,14 +438,14 @@ To change the radio frequency the - L+R/SAP (preferred) or L/R or L+R/L+R * - ``LANG1 | LANG2`` - Language 1 - - Lang1/Lang2 (deprecated [#f2]_) or Lang1/Lang1 + - Lang1/Lang2 (deprecated\ [#f2]_) or Lang1/Lang1 - Language 1 - Language 2 - Lang1/Lang2 (preferred) or Lang1/Lang1 .. raw:: latex - \end{adjustbox}\newline\newline + \normalsize Return Value ============ diff --git a/Documentation/media/uapi/v4l/vidioc-overlay.rst b/Documentation/media/uapi/v4l/vidioc-overlay.rst index cd7b62ebc53b..1383e3db25fc 100644 --- a/Documentation/media/uapi/v4l/vidioc-overlay.rst +++ b/Documentation/media/uapi/v4l/vidioc-overlay.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to an integer. Description diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst index bdcfd9fe550d..70687a86ae38 100644 --- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst +++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_buffer`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst index 1f3612637200..9e448a4aa3aa 100644 --- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst +++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_buffer`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst index 0d16853b1b51..6c82eafd28bb 100644 --- a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst +++ b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_dv_timings`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-querybuf.rst b/Documentation/media/uapi/v4l/vidioc-querybuf.rst index 0bdc8e0abddc..dd54747fabc9 100644 --- a/Documentation/media/uapi/v4l/vidioc-querybuf.rst +++ b/Documentation/media/uapi/v4l/vidioc-querybuf.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_buffer`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-querycap.rst b/Documentation/media/uapi/v4l/vidioc-querycap.rst index 12e0d9a63cd8..66fb1b3d6e6e 100644 --- a/Documentation/media/uapi/v4l/vidioc-querycap.rst +++ b/Documentation/media/uapi/v4l/vidioc-querycap.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_capability`. Description @@ -91,12 +92,13 @@ specification the ioctl returns an ``EINVAL`` error code. stack from a newer kernel. The version number is formatted using the ``KERNEL_VERSION()`` - macro: + macro. For example if the media stack corresponds to the V4L2 + version shipped with Kernel 4.14, it would be equivalent to: * - :cspan:`2` ``#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))`` - ``__u32 version = KERNEL_VERSION(0, 8, 1);`` + ``__u32 version = KERNEL_VERSION(4, 14, 0);`` ``printf ("Version: %u.%u.%u\\n",`` @@ -131,7 +133,7 @@ specification the ioctl returns an ``EINVAL`` error code. -.. tabularcolumns:: |p{6cm}|p{2.2cm}|p{8.8cm}| +.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}| .. _device-capabilities: diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst index 41c5744a1239..5bd26e8c9a1a 100644 --- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst +++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst @@ -32,6 +32,8 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_queryctl`, :c:type:`v4l2_query_ext_ctrl` + or :c:type`v4l2_querymenu` (depending on the ioctl). Description @@ -274,7 +276,7 @@ See also the examples in :ref:`control`. -.. tabularcolumns:: |p{1.2cm}|p{0.6cm}|p{1.6cm}|p{13.5cm}| +.. tabularcolumns:: |p{1.2cm}|p{1.0cm}|p{1.7cm}|p{13.0cm}| .. _v4l2-querymenu: diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst index 3ef9ab37f582..cf40bca19b9f 100644 --- a/Documentation/media/uapi/v4l/vidioc-querystd.rst +++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to :c:type:`v4l2_std_id`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst index a4180d576ee5..316f52c8a310 100644 --- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst +++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst @@ -26,7 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - + Pointer to struct :c:type:`v4l2_requestbuffers`. Description =========== diff --git a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst index 5672ca48d2bd..b318cb8e1df3 100644 --- a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst +++ b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_hw_freq_seek`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-streamon.rst b/Documentation/media/uapi/v4l/vidioc-streamon.rst index 972d5b3c74aa..e851a6961b78 100644 --- a/Documentation/media/uapi/v4l/vidioc-streamon.rst +++ b/Documentation/media/uapi/v4l/vidioc-streamon.rst @@ -29,7 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` - + Pointer to an integer. Description =========== diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst index 1a02c935c8b5..1bfe3865dcc2 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_frame_interval_enum`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst index 746c24ed97a0..33fdc3ac9316 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_frame_size_enum`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst index 0dfee3829ee2..4e4291798e4b 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst @@ -26,6 +26,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_mbus_code_enum`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst index 000e8fcd3f25..69b2ae8e7c15 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_crop`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst index b352456dfe2c..81c5d331af9a 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_format`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst index 46159dcfce30..5af0a7179941 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_frame_interval`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst index 071d9c033db6..b1d3dbbef42a 100644 --- a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst +++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst @@ -29,6 +29,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_subdev_selection`. Description diff --git a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst index e4a51431032c..b521efa53ceb 100644 --- a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst +++ b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst @@ -30,6 +30,7 @@ Arguments File descriptor returned by :ref:`open() `. ``argp`` + Pointer to struct :c:type:`v4l2_event_subscription`. Description @@ -39,7 +40,7 @@ Subscribe or unsubscribe V4L2 event. Subscribed events are dequeued by using the :ref:`VIDIOC_DQEVENT` ioctl. -.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| +.. tabularcolumns:: |p{4.6cm}|p{4.4cm}|p{8.7cm}| .. c:type:: v4l2_event_subscription @@ -72,7 +73,7 @@ using the :ref:`VIDIOC_DQEVENT` ioctl. -.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}| +.. tabularcolumns:: |p{6.8cm}|p{2.2cm}|p{8.5cm}| .. _event-flags: diff --git a/Documentation/media/v4l-drivers/au0828-cardlist.rst b/Documentation/media/v4l-drivers/au0828-cardlist.rst index 82d2567bc7c1..bb87b7b36a83 100644 --- a/Documentation/media/v4l-drivers/au0828-cardlist.rst +++ b/Documentation/media/v4l-drivers/au0828-cardlist.rst @@ -1,13 +1,37 @@ AU0828 cards list ================= -=========== ========================== ======================================================================================================================= -Card number Card name USB IDs -=========== ========================== ======================================================================================================================= -0 Unknown board -1 Hauppauge HVR950Q 2040:7200, 2040:7210, 2040:7217, 2040:721b, 2040:721e, 2040:721f, 2040:7280, 0fd9:0008, 2040:7260, 2040:7213, 2040:7270 -2 Hauppauge HVR850 2040:7240 -3 DViCO FusionHDTV USB 0fe9:d620 -4 Hauppauge HVR950Q rev xxF8 2040:7201, 2040:7211, 2040:7281 -5 Hauppauge Woodbury 05e1:0480, 2040:8200 -=========== ========================== ======================================================================================================================= +.. tabularcolumns:: |p{1.4cm}|p{6.5cm}|p{10.0cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - USB IDs + + * - 0 + - Unknown board + - + + * - 1 + - Hauppauge HVR950Q + - 2040:7200, 2040:7210, 2040:7217, 2040:721b, 2040:721e, 2040:721f, 2040:7280, 0fd9:0008, 2040:7260, 2040:7213, 2040:7270 + + * - 2 + - Hauppauge HVR850 + - 2040:7240 + + * - 3 + - DViCO FusionHDTV USB + - 0fe9:d620 + + * - 4 + - Hauppauge HVR950Q rev xxF8 + - 2040:7201, 2040:7211, 2040:7281 + + * - 5 + - Hauppauge Woodbury + - 05e1:0480, 2040:8200 diff --git a/Documentation/media/v4l-drivers/bttv-cardlist.rst b/Documentation/media/v4l-drivers/bttv-cardlist.rst index 28a01cd6cf2e..8da27b924e01 100644 --- a/Documentation/media/v4l-drivers/bttv-cardlist.rst +++ b/Documentation/media/v4l-drivers/bttv-cardlist.rst @@ -1,174 +1,681 @@ BTTV cards list =============== -=========== ================================================================================= ============================================================================================================================================================================== -Card number Card name PCI IDs -=========== ================================================================================= ============================================================================================================================================================================== -0 *** UNKNOWN/GENERIC *** -1 MIRO PCTV -2 Hauppauge (bt848) -3 STB, Gateway P/N 6000699 (bt848) -4 Intel Create and Share PCI/ Smart Video Recorder III -5 Diamond DTV2000 -6 AVerMedia TVPhone -7 MATRIX-Vision MV-Delta -8 Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26 -9 IMS/IXmicro TurboTV -10 Hauppauge (bt878) 0070:13eb, 0070:3900, 2636:10b4 -11 MIRO PCTV pro -12 ADS Technologies Channel Surfer TV (bt848) -13 AVerMedia TVCapture 98 1461:0002, 1461:0004, 1461:0300 -14 Aimslab Video Highway Xtreme (VHX) -15 Zoltrix TV-Max a1a0:a0fc -16 Prolink Pixelview PlayTV (bt878) -17 Leadtek WinView 601 -18 AVEC Intercapture -19 Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only) -20 CEI Raffles Card -21 Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50 -22 Askey CPH050/ Phoebe Tv Master + FM 14ff:3002 -23 Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878 14c7:0101 -24 Askey CPH05X/06X (bt878) [many vendors] 144f:3002, 144f:3005, 144f:5000, 14ff:3000 -25 Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar -26 Hauppauge WinCam newer (bt878) -27 Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50 -28 Terratec TerraTV+ Version 1.1 (bt878) 153b:1127, 1852:1852 -29 Imagenation PXC200 1295:200a -30 Lifeview FlyVideo 98 LR50 1f7f:1850 -31 Formac iProTV, Formac ProTV I (bt848) -32 Intel Create and Share PCI/ Smart Video Recorder III -33 Terratec TerraTValue Version Bt878 153b:1117, 153b:1118, 153b:1119, 153b:111a, 153b:1134, 153b:5018 -34 Leadtek WinFast 2000/ WinFast 2000 XP 107d:6606, 107d:6609, 6606:217d, f6ff:fff6 -35 Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II 1851:1850, 1851:a050 -36 Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner 1852:1852 -37 Prolink PixelView PlayTV pro -38 Askey CPH06X TView99 144f:3000, 144f:a005, a04f:a0fc -39 Pinnacle PCTV Studio/Rave 11bd:0012, bd11:1200, bd11:ff00, 11bd:ff12 -40 STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100 10b4:2636, 10b4:2645, 121a:3060 -41 AVerMedia TVPhone 98 1461:0001, 1461:0003 -42 ProVideo PV951 aa0c:146c -43 Little OnAir TV -44 Sigma TVII-FM -45 MATRIX-Vision MV-Delta 2 -46 Zoltrix Genie TV/FM 15b0:4000, 15b0:400a, 15b0:400d, 15b0:4010, 15b0:4016 -47 Terratec TV/Radio+ 153b:1123 -48 Askey CPH03x/ Dynalink Magic TView -49 IODATA GV-BCTV3/PCI 10fc:4020 -50 Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP -51 Eagle Wireless Capricorn2 (bt878A) -52 Pinnacle PCTV Studio Pro -53 Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS -54 Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90] -55 Askey CPH031/ BESTBUY Easy TV -56 Lifeview FlyVideo 98FM LR50 a051:41a0 -57 GrandTec 'Grand Video Capture' (Bt848) 4344:4142 -58 Askey CPH060/ Phoebe TV Master Only (No FM) -59 Askey CPH03x TV Capturer -60 Modular Technology MM100PCTV -61 AG Electronics GMV1 15cb:0101 -62 Askey CPH061/ BESTBUY Easy TV (bt878) -63 ATI TV-Wonder 1002:0001 -64 ATI TV-Wonder VE 1002:0003 -65 Lifeview FlyVideo 2000S LR90 -66 Terratec TValueRadio 153b:1135, 153b:ff3b -67 IODATA GV-BCTV4/PCI 10fc:4050 -68 3Dfx VoodooTV FM (Euro) 10b4:2637 -69 Active Imaging AIMMS -70 Prolink Pixelview PV-BT878P+ (Rev.4C,8E) -71 Lifeview FlyVideo 98EZ (capture only) LR51 1851:1851 -72 Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM) 1554:4011 -73 Sensoray 311/611 6000:0311, 6000:0611 -74 RemoteVision MX (RV605) -75 Powercolor MTV878/ MTV878R/ MTV878F -76 Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP) 0e11:0079 -77 GrandTec Multi Capture Card (Bt878) -78 Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF 0a01:17de -79 DSP Design TCVIDEO -80 Hauppauge WinTV PVR 0070:4500 -81 IODATA GV-BCTV5/PCI 10fc:4070, 10fc:d018 -82 Osprey 100/150 (878) 0070:ff00 -83 Osprey 100/150 (848) -84 Osprey 101 (848) -85 Osprey 101/151 -86 Osprey 101/151 w/ svid -87 Osprey 200/201/250/251 -88 Osprey 200/250 0070:ff01 -89 Osprey 210/220/230 -90 Osprey 500 0070:ff02 -91 Osprey 540 0070:ff04 -92 Osprey 2000 0070:ff03 -93 IDS Eagle -94 Pinnacle PCTV Sat 11bd:001c -95 Formac ProTV II (bt878) -96 MachTV -97 Euresys Picolo -98 ProVideo PV150 aa00:1460, aa01:1461, aa02:1462, aa03:1463, aa04:1464, aa05:1465, aa06:1466, aa07:1467 -99 AD-TVK503 -100 Hercules Smart TV Stereo -101 Pace TV & Radio Card -102 IVC-200 0000:a155, 0001:a155, 0002:a155, 0003:a155, 0100:a155, 0101:a155, 0102:a155, 0103:a155, 0800:a155, 0801:a155, 0802:a155, 0803:a155 -103 Grand X-Guard / Trust 814PCI 0304:0102 -104 Nebula Electronics DigiTV 0071:0101 -105 ProVideo PV143 aa00:1430, aa00:1431, aa00:1432, aa00:1433, aa03:1433 -106 PHYTEC VD-009-X1 VD-011 MiniDIN (bt878) -107 PHYTEC VD-009-X1 VD-011 Combi (bt878) -108 PHYTEC VD-009 MiniDIN (bt878) -109 PHYTEC VD-009 Combi (bt878) -110 IVC-100 ff00:a132 -111 IVC-120G ff00:a182, ff01:a182, ff02:a182, ff03:a182, ff04:a182, ff05:a182, ff06:a182, ff07:a182, ff08:a182, ff09:a182, ff0a:a182, ff0b:a182, ff0c:a182, ff0d:a182, ff0e:a182, ff0f:a182 -112 pcHDTV HD-2000 TV 7063:2000 -113 Twinhan DST + clones 11bd:0026, 1822:0001, 270f:fc00, 1822:0026 -114 Winfast VC100 107d:6607 -115 Teppro TEV-560/InterVision IV-560 -116 SIMUS GVC1100 aa6a:82b2 -117 NGS NGSTV+ -118 LMLBT4 -119 Tekram M205 PRO -120 Conceptronic CONTVFMi -121 Euresys Picolo Tetra 1805:0105, 1805:0106, 1805:0107, 1805:0108 -122 Spirit TV Tuner -123 AVerMedia AVerTV DVB-T 771 1461:0771 -124 AverMedia AverTV DVB-T 761 1461:0761 -125 MATRIX Vision Sigma-SQ -126 MATRIX Vision Sigma-SLC -127 APAC Viewcomp 878(AMAX) -128 DViCO FusionHDTV DVB-T Lite 18ac:db10, 18ac:db11 -129 V-Gear MyVCD -130 Super TV Tuner -131 Tibet Systems 'Progress DVR' CS16 -132 Kodicom 4400R (master) -133 Kodicom 4400R (slave) -134 Adlink RTV24 -135 DViCO FusionHDTV 5 Lite 18ac:d500 -136 Acorp Y878F 9511:1540 -137 Conceptronic CTVFMi v2 036e:109e -138 Prolink Pixelview PV-BT878P+ (Rev.2E) -139 Prolink PixelView PlayTV MPEG2 PV-M4900 -140 Osprey 440 0070:ff07 -141 Asound Skyeye PCTV -142 Sabrent TV-FM (bttv version) -143 Hauppauge ImpactVCB (bt878) 0070:13eb -144 MagicTV -145 SSAI Security Video Interface 4149:5353 -146 SSAI Ultrasound Video Interface 414a:5353 -147 VoodooTV 200 (USA) 121a:3000 -148 DViCO FusionHDTV 2 dbc0:d200 -149 Typhoon TV-Tuner PCI (50684) -150 Geovision GV-600 008a:763c -151 Kozumi KTV-01C -152 Encore ENL TV-FM-2 1000:1801 -153 PHYTEC VD-012 (bt878) -154 PHYTEC VD-012-X1 (bt878) -155 PHYTEC VD-012-X2 (bt878) -156 IVCE-8784 0000:f050, 0001:f050, 0002:f050, 0003:f050 -157 Geovision GV-800(S) (master) 800a:763d -158 Geovision GV-800(S) (slave) 800b:763d, 800c:763d, 800d:763d -159 ProVideo PV183 1830:1540, 1831:1540, 1832:1540, 1833:1540, 1834:1540, 1835:1540, 1836:1540, 1837:1540 -160 Tongwei Video Technology TD-3116 f200:3116 -161 Aposonic W-DVR 0279:0228 -162 Adlink MPG24 -163 Bt848 Capture 14MHz -164 CyberVision CV06 (SV) -165 Kworld V-Stream Xpert TV PVR878 -166 PCI-8604PW -=========== ================================================================================= ============================================================================================================================================================================== +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - *** UNKNOWN/GENERIC *** + - + + * - 1 + - MIRO PCTV + - + + * - 2 + - Hauppauge (bt848) + - + + * - 3 + - STB, Gateway P/N 6000699 (bt848) + - + + * - 4 + - Intel Create and Share PCI/ Smart Video Recorder III + - + + * - 5 + - Diamond DTV2000 + - + + * - 6 + - AVerMedia TVPhone + - + + * - 7 + - MATRIX-Vision MV-Delta + - + + * - 8 + - Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26 + - + + * - 9 + - IMS/IXmicro TurboTV + - + + * - 10 + - Hauppauge (bt878) + - 0070:13eb, 0070:3900, 2636:10b4 + + * - 11 + - MIRO PCTV pro + - + + * - 12 + - ADS Technologies Channel Surfer TV (bt848) + - + + * - 13 + - AVerMedia TVCapture 98 + - 1461:0002, 1461:0004, 1461:0300 + + * - 14 + - Aimslab Video Highway Xtreme (VHX) + - + + * - 15 + - Zoltrix TV-Max + - a1a0:a0fc + + * - 16 + - Prolink Pixelview PlayTV (bt878) + - + + * - 17 + - Leadtek WinView 601 + - + + * - 18 + - AVEC Intercapture + - + + * - 19 + - Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only) + - + + * - 20 + - CEI Raffles Card + - + + * - 21 + - Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50 + - + + * - 22 + - Askey CPH050/ Phoebe Tv Master + FM + - 14ff:3002 + + * - 23 + - Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878 + - 14c7:0101 + + * - 24 + - Askey CPH05X/06X (bt878) [many vendors] + - 144f:3002, 144f:3005, 144f:5000, 14ff:3000 + + * - 25 + - Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar + - + + * - 26 + - Hauppauge WinCam newer (bt878) + - + + * - 27 + - Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50 + - + + * - 28 + - Terratec TerraTV+ Version 1.1 (bt878) + - 153b:1127, 1852:1852 + + * - 29 + - Imagenation PXC200 + - 1295:200a + + * - 30 + - Lifeview FlyVideo 98 LR50 + - 1f7f:1850 + + * - 31 + - Formac iProTV, Formac ProTV I (bt848) + - + + * - 32 + - Intel Create and Share PCI/ Smart Video Recorder III + - + + * - 33 + - Terratec TerraTValue Version Bt878 + - 153b:1117, 153b:1118, 153b:1119, 153b:111a, 153b:1134, 153b:5018 + + * - 34 + - Leadtek WinFast 2000/ WinFast 2000 XP + - 107d:6606, 107d:6609, 6606:217d, f6ff:fff6 + + * - 35 + - Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II + - 1851:1850, 1851:a050 + + * - 36 + - Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner + - 1852:1852 + + * - 37 + - Prolink PixelView PlayTV pro + - + + * - 38 + - Askey CPH06X TView99 + - 144f:3000, 144f:a005, a04f:a0fc + + * - 39 + - Pinnacle PCTV Studio/Rave + - 11bd:0012, bd11:1200, bd11:ff00, 11bd:ff12 + + * - 40 + - STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100 + - 10b4:2636, 10b4:2645, 121a:3060 + + * - 41 + - AVerMedia TVPhone 98 + - 1461:0001, 1461:0003 + + * - 42 + - ProVideo PV951 + - aa0c:146c + + * - 43 + - Little OnAir TV + - + + * - 44 + - Sigma TVII-FM + - + + * - 45 + - MATRIX-Vision MV-Delta 2 + - + + * - 46 + - Zoltrix Genie TV/FM + - 15b0:4000, 15b0:400a, 15b0:400d, 15b0:4010, 15b0:4016 + + * - 47 + - Terratec TV/Radio+ + - 153b:1123 + + * - 48 + - Askey CPH03x/ Dynalink Magic TView + - + + * - 49 + - IODATA GV-BCTV3/PCI + - 10fc:4020 + + * - 50 + - Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP + - + + * - 51 + - Eagle Wireless Capricorn2 (bt878A) + - + + * - 52 + - Pinnacle PCTV Studio Pro + - + + * - 53 + - Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS + - + + * - 54 + - Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90] + - + + * - 55 + - Askey CPH031/ BESTBUY Easy TV + - + + * - 56 + - Lifeview FlyVideo 98FM LR50 + - a051:41a0 + + * - 57 + - GrandTec 'Grand Video Capture' (Bt848) + - 4344:4142 + + * - 58 + - Askey CPH060/ Phoebe TV Master Only (No FM) + - + + * - 59 + - Askey CPH03x TV Capturer + - + + * - 60 + - Modular Technology MM100PCTV + - + + * - 61 + - AG Electronics GMV1 + - 15cb:0101 + + * - 62 + - Askey CPH061/ BESTBUY Easy TV (bt878) + - + + * - 63 + - ATI TV-Wonder + - 1002:0001 + + * - 64 + - ATI TV-Wonder VE + - 1002:0003 + + * - 65 + - Lifeview FlyVideo 2000S LR90 + - + + * - 66 + - Terratec TValueRadio + - 153b:1135, 153b:ff3b + + * - 67 + - IODATA GV-BCTV4/PCI + - 10fc:4050 + + * - 68 + - 3Dfx VoodooTV FM (Euro) + - 10b4:2637 + + * - 69 + - Active Imaging AIMMS + - + + * - 70 + - Prolink Pixelview PV-BT878P+ (Rev.4C,8E) + - + + * - 71 + - Lifeview FlyVideo 98EZ (capture only) LR51 + - 1851:1851 + + * - 72 + - Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM) + - 1554:4011 + + * - 73 + - Sensoray 311/611 + - 6000:0311, 6000:0611 + + * - 74 + - RemoteVision MX (RV605) + - + + * - 75 + - Powercolor MTV878/ MTV878R/ MTV878F + - + + * - 76 + - Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP) + - 0e11:0079 + + * - 77 + - GrandTec Multi Capture Card (Bt878) + - + + * - 78 + - Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF + - 0a01:17de + + * - 79 + - DSP Design TCVIDEO + - + + * - 80 + - Hauppauge WinTV PVR + - 0070:4500 + + * - 81 + - IODATA GV-BCTV5/PCI + - 10fc:4070, 10fc:d018 + + * - 82 + - Osprey 100/150 (878) + - 0070:ff00 + + * - 83 + - Osprey 100/150 (848) + - + + * - 84 + - Osprey 101 (848) + - + + * - 85 + - Osprey 101/151 + - + + * - 86 + - Osprey 101/151 w/ svid + - + + * - 87 + - Osprey 200/201/250/251 + - + + * - 88 + - Osprey 200/250 + - 0070:ff01 + + * - 89 + - Osprey 210/220/230 + - + + * - 90 + - Osprey 500 + - 0070:ff02 + + * - 91 + - Osprey 540 + - 0070:ff04 + + * - 92 + - Osprey 2000 + - 0070:ff03 + + * - 93 + - IDS Eagle + - + + * - 94 + - Pinnacle PCTV Sat + - 11bd:001c + + * - 95 + - Formac ProTV II (bt878) + - + + * - 96 + - MachTV + - + + * - 97 + - Euresys Picolo + - + + * - 98 + - ProVideo PV150 + - aa00:1460, aa01:1461, aa02:1462, aa03:1463, aa04:1464, aa05:1465, aa06:1466, aa07:1467 + + * - 99 + - AD-TVK503 + - + + * - 100 + - Hercules Smart TV Stereo + - + + * - 101 + - Pace TV & Radio Card + - + + * - 102 + - IVC-200 + - 0000:a155, 0001:a155, 0002:a155, 0003:a155, 0100:a155, 0101:a155, 0102:a155, 0103:a155, 0800:a155, 0801:a155, 0802:a155, 0803:a155 + + * - 103 + - Grand X-Guard / Trust 814PCI + - 0304:0102 + + * - 104 + - Nebula Electronics DigiTV + - 0071:0101 + + * - 105 + - ProVideo PV143 + - aa00:1430, aa00:1431, aa00:1432, aa00:1433, aa03:1433 + + * - 106 + - PHYTEC VD-009-X1 VD-011 MiniDIN (bt878) + - + + * - 107 + - PHYTEC VD-009-X1 VD-011 Combi (bt878) + - + + * - 108 + - PHYTEC VD-009 MiniDIN (bt878) + - + + * - 109 + - PHYTEC VD-009 Combi (bt878) + - + + * - 110 + - IVC-100 + - ff00:a132 + + * - 111 + - IVC-120G + - ff00:a182, ff01:a182, ff02:a182, ff03:a182, ff04:a182, ff05:a182, ff06:a182, ff07:a182, ff08:a182, ff09:a182, ff0a:a182, ff0b:a182, ff0c:a182, ff0d:a182, ff0e:a182, ff0f:a182 + + * - 112 + - pcHDTV HD-2000 TV + - 7063:2000 + + * - 113 + - Twinhan DST + clones + - 11bd:0026, 1822:0001, 270f:fc00, 1822:0026 + + * - 114 + - Winfast VC100 + - 107d:6607 + + * - 115 + - Teppro TEV-560/InterVision IV-560 + - + + * - 116 + - SIMUS GVC1100 + - aa6a:82b2 + + * - 117 + - NGS NGSTV+ + - + + * - 118 + - LMLBT4 + - + + * - 119 + - Tekram M205 PRO + - + + * - 120 + - Conceptronic CONTVFMi + - + + * - 121 + - Euresys Picolo Tetra + - 1805:0105, 1805:0106, 1805:0107, 1805:0108 + + * - 122 + - Spirit TV Tuner + - + + * - 123 + - AVerMedia AVerTV DVB-T 771 + - 1461:0771 + + * - 124 + - AverMedia AverTV DVB-T 761 + - 1461:0761 + + * - 125 + - MATRIX Vision Sigma-SQ + - + + * - 126 + - MATRIX Vision Sigma-SLC + - + + * - 127 + - APAC Viewcomp 878(AMAX) + - + + * - 128 + - DViCO FusionHDTV DVB-T Lite + - 18ac:db10, 18ac:db11 + + * - 129 + - V-Gear MyVCD + - + + * - 130 + - Super TV Tuner + - + + * - 131 + - Tibet Systems 'Progress DVR' CS16 + - + + * - 132 + - Kodicom 4400R (master) + - + + * - 133 + - Kodicom 4400R (slave) + - + + * - 134 + - Adlink RTV24 + - + + * - 135 + - DViCO FusionHDTV 5 Lite + - 18ac:d500 + + * - 136 + - Acorp Y878F + - 9511:1540 + + * - 137 + - Conceptronic CTVFMi v2 + - 036e:109e + + * - 138 + - Prolink Pixelview PV-BT878P+ (Rev.2E) + - + + * - 139 + - Prolink PixelView PlayTV MPEG2 PV-M4900 + - + + * - 140 + - Osprey 440 + - 0070:ff07 + + * - 141 + - Asound Skyeye PCTV + - + + * - 142 + - Sabrent TV-FM (bttv version) + - + + * - 143 + - Hauppauge ImpactVCB (bt878) + - 0070:13eb + + * - 144 + - MagicTV + - + + * - 145 + - SSAI Security Video Interface + - 4149:5353 + + * - 146 + - SSAI Ultrasound Video Interface + - 414a:5353 + + * - 147 + - VoodooTV 200 (USA) + - 121a:3000 + + * - 148 + - DViCO FusionHDTV 2 + - dbc0:d200 + + * - 149 + - Typhoon TV-Tuner PCI (50684) + - + + * - 150 + - Geovision GV-600 + - 008a:763c + + * - 151 + - Kozumi KTV-01C + - + + * - 152 + - Encore ENL TV-FM-2 + - 1000:1801 + + * - 153 + - PHYTEC VD-012 (bt878) + - + + * - 154 + - PHYTEC VD-012-X1 (bt878) + - + + * - 155 + - PHYTEC VD-012-X2 (bt878) + - + + * - 156 + - IVCE-8784 + - 0000:f050, 0001:f050, 0002:f050, 0003:f050 + + * - 157 + - Geovision GV-800(S) (master) + - 800a:763d + + * - 158 + - Geovision GV-800(S) (slave) + - 800b:763d, 800c:763d, 800d:763d + + * - 159 + - ProVideo PV183 + - 1830:1540, 1831:1540, 1832:1540, 1833:1540, 1834:1540, 1835:1540, 1836:1540, 1837:1540 + + * - 160 + - Tongwei Video Technology TD-3116 + - f200:3116 + + * - 161 + - Aposonic W-DVR + - 0279:0228 + + * - 162 + - Adlink MPG24 + - + + * - 163 + - Bt848 Capture 14MHz + - + + * - 164 + - CyberVision CV06 (SV) + - + + * - 165 + - Kworld V-Stream Xpert TV PVR878 + - + + * - 166 + - PCI-8604PW + - diff --git a/Documentation/media/v4l-drivers/cx23885-cardlist.rst b/Documentation/media/v4l-drivers/cx23885-cardlist.rst index fd20b50d2c1d..3129ef04ddd3 100644 --- a/Documentation/media/v4l-drivers/cx23885-cardlist.rst +++ b/Documentation/media/v4l-drivers/cx23885-cardlist.rst @@ -1,65 +1,245 @@ cx23885 cards list ================== -=========== ==================================== ====================================================================================== -Card number Card name PCI IDs -=========== ==================================== ====================================================================================== -0 UNKNOWN/GENERIC 0070:3400 -1 Hauppauge WinTV-HVR1800lp 0070:7600 -2 Hauppauge WinTV-HVR1800 0070:7800, 0070:7801, 0070:7809 -3 Hauppauge WinTV-HVR1250 0070:7911 -4 DViCO FusionHDTV5 Express 18ac:d500 -5 Hauppauge WinTV-HVR1500Q 0070:7790, 0070:7797 -6 Hauppauge WinTV-HVR1500 0070:7710, 0070:7717 -7 Hauppauge WinTV-HVR1200 0070:71d1, 0070:71d3 -8 Hauppauge WinTV-HVR1700 0070:8101 -9 Hauppauge WinTV-HVR1400 0070:8010 -10 DViCO FusionHDTV7 Dual Express 18ac:d618 -11 DViCO FusionHDTV DVB-T Dual Express 18ac:db78 -12 Leadtek Winfast PxDVR3200 H 107d:6681 -13 Compro VideoMate E650F 185b:e800 -14 TurboSight TBS 6920 6920:8888 -15 TeVii S470 d470:9022 -16 DVBWorld DVB-S2 2005 0001:2005 -17 NetUP Dual DVB-S2 CI 1b55:2a2c -18 Hauppauge WinTV-HVR1270 0070:2211 -19 Hauppauge WinTV-HVR1275 0070:2215, 0070:221d, 0070:22f2 -20 Hauppauge WinTV-HVR1255 0070:2251, 0070:22f1 -21 Hauppauge WinTV-HVR1210 0070:2291, 0070:2295, 0070:2299, 0070:229d, 0070:22f0, 0070:22f3, 0070:22f4, 0070:22f5 -22 Mygica X8506 DMB-TH 14f1:8651 -23 Magic-Pro ProHDTV Extreme 2 14f1:8657 -24 Hauppauge WinTV-HVR1850 0070:8541 -25 Compro VideoMate E800 1858:e800 -26 Hauppauge WinTV-HVR1290 0070:8551 -27 Mygica X8558 PRO DMB-TH 14f1:8578 -28 LEADTEK WinFast PxTV1200 107d:6f22 -29 GoTView X5 3D Hybrid 5654:2390 -30 NetUP Dual DVB-T/C-CI RF 1b55:e2e4 -31 Leadtek Winfast PxDVR3200 H XC4000 107d:6f39 -32 MPX-885 -33 Mygica X8502/X8507 ISDB-T 14f1:8502 -34 TerraTec Cinergy T PCIe Dual 153b:117e -35 TeVii S471 d471:9022 -36 Hauppauge WinTV-HVR1255 0070:2259 -37 Prof Revolution DVB-S2 8000 8000:3034 -38 Hauppauge WinTV-HVR4400/HVR5500 0070:c108, 0070:c138, 0070:c1f8 -39 AVerTV Hybrid Express Slim HC81R 1461:d939 -40 TurboSight TBS 6981 6981:8888 -41 TurboSight TBS 6980 6980:8888 -42 Leadtek Winfast PxPVR2200 107d:6f21 -43 Hauppauge ImpactVCB-e 0070:7133 -44 DViCO FusionHDTV DVB-T Dual Express2 18ac:db98 -45 DVBSky T9580 4254:9580 -46 DVBSky T980C 4254:980c -47 DVBSky S950C 4254:950c -48 Technotrend TT-budget CT2-4500 CI 13c2:3013 -49 DVBSky S950 4254:0950 -50 DVBSky S952 4254:0952 -51 DVBSky T982 4254:0982 -52 Hauppauge WinTV-HVR5525 0070:f038 -53 Hauppauge WinTV Starburst 0070:c12a -54 ViewCast 260e 1576:0260 -55 ViewCast 460e 1576:0460 -56 Hauppauge WinTV-QuadHD-DVB 0070:6a28, 0070:6b28 -57 Hauppauge WinTV-QuadHD-ATSC 0070:6a18, 0070:6b18 -=========== ==================================== ====================================================================================== +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - UNKNOWN/GENERIC + - 0070:3400 + + * - 1 + - Hauppauge WinTV-HVR1800lp + - 0070:7600 + + * - 2 + - Hauppauge WinTV-HVR1800 + - 0070:7800, 0070:7801, 0070:7809 + + * - 3 + - Hauppauge WinTV-HVR1250 + - 0070:7911 + + * - 4 + - DViCO FusionHDTV5 Express + - 18ac:d500 + + * - 5 + - Hauppauge WinTV-HVR1500Q + - 0070:7790, 0070:7797 + + * - 6 + - Hauppauge WinTV-HVR1500 + - 0070:7710, 0070:7717 + + * - 7 + - Hauppauge WinTV-HVR1200 + - 0070:71d1, 0070:71d3 + + * - 8 + - Hauppauge WinTV-HVR1700 + - 0070:8101 + + * - 9 + - Hauppauge WinTV-HVR1400 + - 0070:8010 + + * - 10 + - DViCO FusionHDTV7 Dual Express + - 18ac:d618 + + * - 11 + - DViCO FusionHDTV DVB-T Dual Express + - 18ac:db78 + + * - 12 + - Leadtek Winfast PxDVR3200 H + - 107d:6681 + + * - 13 + - Compro VideoMate E650F + - 185b:e800 + + * - 14 + - TurboSight TBS 6920 + - 6920:8888 + + * - 15 + - TeVii S470 + - d470:9022 + + * - 16 + - DVBWorld DVB-S2 2005 + - 0001:2005 + + * - 17 + - NetUP Dual DVB-S2 CI + - 1b55:2a2c + + * - 18 + - Hauppauge WinTV-HVR1270 + - 0070:2211 + + * - 19 + - Hauppauge WinTV-HVR1275 + - 0070:2215, 0070:221d, 0070:22f2 + + * - 20 + - Hauppauge WinTV-HVR1255 + - 0070:2251, 0070:22f1 + + * - 21 + - Hauppauge WinTV-HVR1210 + - 0070:2291, 0070:2295, 0070:2299, 0070:229d, 0070:22f0, 0070:22f3, 0070:22f4, 0070:22f5 + + * - 22 + - Mygica X8506 DMB-TH + - 14f1:8651 + + * - 23 + - Magic-Pro ProHDTV Extreme 2 + - 14f1:8657 + + * - 24 + - Hauppauge WinTV-HVR1850 + - 0070:8541 + + * - 25 + - Compro VideoMate E800 + - 1858:e800 + + * - 26 + - Hauppauge WinTV-HVR1290 + - 0070:8551 + + * - 27 + - Mygica X8558 PRO DMB-TH + - 14f1:8578 + + * - 28 + - LEADTEK WinFast PxTV1200 + - 107d:6f22 + + * - 29 + - GoTView X5 3D Hybrid + - 5654:2390 + + * - 30 + - NetUP Dual DVB-T/C-CI RF + - 1b55:e2e4 + + * - 31 + - Leadtek Winfast PxDVR3200 H XC4000 + - 107d:6f39 + + * - 32 + - MPX-885 + - + + * - 33 + - Mygica X8502/X8507 ISDB-T + - 14f1:8502 + + * - 34 + - TerraTec Cinergy T PCIe Dual + - 153b:117e + + * - 35 + - TeVii S471 + - d471:9022 + + * - 36 + - Hauppauge WinTV-HVR1255 + - 0070:2259 + + * - 37 + - Prof Revolution DVB-S2 8000 + - 8000:3034 + + * - 38 + - Hauppauge WinTV-HVR4400/HVR5500 + - 0070:c108, 0070:c138, 0070:c1f8 + + * - 39 + - AVerTV Hybrid Express Slim HC81R + - 1461:d939 + + * - 40 + - TurboSight TBS 6981 + - 6981:8888 + + * - 41 + - TurboSight TBS 6980 + - 6980:8888 + + * - 42 + - Leadtek Winfast PxPVR2200 + - 107d:6f21 + + * - 43 + - Hauppauge ImpactVCB-e + - 0070:7133 + + * - 44 + - DViCO FusionHDTV DVB-T Dual Express2 + - 18ac:db98 + + * - 45 + - DVBSky T9580 + - 4254:9580 + + * - 46 + - DVBSky T980C + - 4254:980c + + * - 47 + - DVBSky S950C + - 4254:950c + + * - 48 + - Technotrend TT-budget CT2-4500 CI + - 13c2:3013 + + * - 49 + - DVBSky S950 + - 4254:0950 + + * - 50 + - DVBSky S952 + - 4254:0952 + + * - 51 + - DVBSky T982 + - 4254:0982 + + * - 52 + - Hauppauge WinTV-HVR5525 + - 0070:f038 + + * - 53 + - Hauppauge WinTV Starburst + - 0070:c12a + + * - 54 + - ViewCast 260e + - 1576:0260 + + * - 55 + - ViewCast 460e + - 1576:0460 + + * - 56 + - Hauppauge WinTV-QuadHD-DVB + - 0070:6a28, 0070:6b28 + + * - 57 + - Hauppauge WinTV-QuadHD-ATSC + - 0070:6a18, 0070:6b18 diff --git a/Documentation/media/v4l-drivers/cx88-cardlist.rst b/Documentation/media/v4l-drivers/cx88-cardlist.rst index 8cc1cea17035..21648b8c2e83 100644 --- a/Documentation/media/v4l-drivers/cx88-cardlist.rst +++ b/Documentation/media/v4l-drivers/cx88-cardlist.rst @@ -1,98 +1,377 @@ CX88 cards list =============== -=========== =================================================== ====================================================================================== -Card number Card name PCI IDs -=========== =================================================== ====================================================================================== -0 UNKNOWN/GENERIC -1 Hauppauge WinTV 34xxx models 0070:3400, 0070:3401 -2 GDI Black Gold 14c7:0106, 14c7:0107 -3 PixelView 1554:4811 -4 ATI TV Wonder Pro 1002:00f8, 1002:00f9 -5 Leadtek Winfast 2000XP Expert 107d:6611, 107d:6613 -6 AverTV Studio 303 (M126) 1461:000b -7 MSI TV-@nywhere Master 1462:8606 -8 Leadtek Winfast DV2000 107d:6620, 107d:6621 -9 Leadtek PVR 2000 107d:663b, 107d:663c, 107d:6632, 107d:6630, 107d:6638, 107d:6631, 107d:6637, 107d:663d -10 IODATA GV-VCP3/PCI 10fc:d003 -11 Prolink PlayTV PVR -12 ASUS PVR-416 1043:4823, 1461:c111 -13 MSI TV-@nywhere -14 KWorld/VStream XPert DVB-T 17de:08a6 -15 DViCO FusionHDTV DVB-T1 18ac:db00 -16 KWorld LTV883RF -17 DViCO FusionHDTV 3 Gold-Q 18ac:d810, 18ac:d800 -18 Hauppauge Nova-T DVB-T 0070:9002, 0070:9001, 0070:9000 -19 Conexant DVB-T reference design 14f1:0187 -20 Provideo PV259 1540:2580 -21 DViCO FusionHDTV DVB-T Plus 18ac:db10, 18ac:db11 -22 pcHDTV HD3000 HDTV 7063:3000 -23 digitalnow DNTV Live! DVB-T 17de:a8a6 -24 Hauppauge WinTV 28xxx (Roslyn) models 0070:2801 -25 Digital-Logic MICROSPACE Entertainment Center (MEC) 14f1:0342 -26 IODATA GV/BCTV7E 10fc:d035 -27 PixelView PlayTV Ultra Pro (Stereo) -28 DViCO FusionHDTV 3 Gold-T 18ac:d820 -29 ADS Tech Instant TV DVB-T PCI 1421:0334 -30 TerraTec Cinergy 1400 DVB-T 153b:1166 -31 DViCO FusionHDTV 5 Gold 18ac:d500 -32 AverMedia UltraTV Media Center PCI 550 1461:8011 -33 Kworld V-Stream Xpert DVD -34 ATI HDTV Wonder 1002:a101 -35 WinFast DTV1000-T 107d:665f -36 AVerTV 303 (M126) 1461:000a -37 Hauppauge Nova-S-Plus DVB-S 0070:9201, 0070:9202 -38 Hauppauge Nova-SE2 DVB-S 0070:9200 -39 KWorld DVB-S 100 17de:08b2, 1421:0341 -40 Hauppauge WinTV-HVR1100 DVB-T/Hybrid 0070:9400, 0070:9402 -41 Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile) 0070:9800, 0070:9802 -42 digitalnow DNTV Live! DVB-T Pro 1822:0025, 1822:0019 -43 KWorld/VStream XPert DVB-T with cx22702 17de:08a1, 12ab:2300 -44 DViCO FusionHDTV DVB-T Dual Digital 18ac:db50, 18ac:db54 -45 KWorld HardwareMpegTV XPert 17de:0840, 1421:0305 -46 DViCO FusionHDTV DVB-T Hybrid 18ac:db40, 18ac:db44 -47 pcHDTV HD5500 HDTV 7063:5500 -48 Kworld MCE 200 Deluxe 17de:0841 -49 PixelView PlayTV P7000 1554:4813 -50 NPG Tech Real TV FM Top 10 14f1:0842 -51 WinFast DTV2000 H 107d:665e -52 Geniatech DVB-S 14f1:0084 -53 Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T 0070:1404, 0070:1400, 0070:1401, 0070:1402 -54 Norwood Micro TV Tuner -55 Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM c180:c980 -56 Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder 0070:9600, 0070:9601, 0070:9602 -57 ADS Tech Instant Video PCI 1421:0390 -58 Pinnacle PCTV HD 800i 11bd:0051 -59 DViCO FusionHDTV 5 PCI nano 18ac:d530 -60 Pinnacle Hybrid PCTV 12ab:1788 -61 Leadtek TV2000 XP Global 107d:6f18, 107d:6618, 107d:6619 -62 PowerColor RA330 14f1:ea3d -63 Geniatech X8000-MT DVBT 14f1:8852 -64 DViCO FusionHDTV DVB-T PRO 18ac:db30 -65 DViCO FusionHDTV 7 Gold 18ac:d610 -66 Prolink Pixelview MPEG 8000GT 1554:4935 -67 Kworld PlusTV HD PCI 120 (ATSC 120) 17de:08c1 -68 Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid 0070:6900, 0070:6904, 0070:6902 -69 Hauppauge WinTV-HVR4000(Lite) DVB-S/S2 0070:6905, 0070:6906 -70 TeVii S460 DVB-S/S2 d460:9022 -71 Omicom SS4 DVB-S/S2 PCI A044:2011 -72 TBS 8920 DVB-S/S2 8920:8888 -73 TeVii S420 DVB-S d420:9022 -74 Prolink Pixelview Global Extreme 1554:4976 -75 PROF 7300 DVB-S/S2 B033:3033 -76 SATTRADE ST4200 DVB-S/S2 b200:4200 -77 TBS 8910 DVB-S 8910:8888 -78 Prof 6200 DVB-S b022:3022 -79 Terratec Cinergy HT PCI MKII 153b:1177 -80 Hauppauge WinTV-IR Only 0070:9290 -81 Leadtek WinFast DTV1800 Hybrid 107d:6654 -82 WinFast DTV2000 H rev. J 107d:6f2b -83 Prof 7301 DVB-S/S2 b034:3034 -84 Samsung SMT 7020 DVB-S 18ac:dc00, 18ac:dccd -85 Twinhan VP-1027 DVB-S 1822:0023 -86 TeVii S464 DVB-S/S2 d464:9022 -87 Leadtek WinFast DTV2000 H PLUS 107d:6f42 -88 Leadtek WinFast DTV1800 H (XC4000) 107d:6f38 -89 Leadtek TV2000 XP Global (SC4100) 107d:6f36 -90 Leadtek TV2000 XP Global (XC4100) 107d:6f43 -=========== =================================================== ====================================================================================== +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - UNKNOWN/GENERIC + - + + * - 1 + - Hauppauge WinTV 34xxx models + - 0070:3400, 0070:3401 + + * - 2 + - GDI Black Gold + - 14c7:0106, 14c7:0107 + + * - 3 + - PixelView + - 1554:4811 + + * - 4 + - ATI TV Wonder Pro + - 1002:00f8, 1002:00f9 + + * - 5 + - Leadtek Winfast 2000XP Expert + - 107d:6611, 107d:6613 + + * - 6 + - AverTV Studio 303 (M126) + - 1461:000b + + * - 7 + - MSI TV-@nywhere Master + - 1462:8606 + + * - 8 + - Leadtek Winfast DV2000 + - 107d:6620, 107d:6621 + + * - 9 + - Leadtek PVR 2000 + - 107d:663b, 107d:663c, 107d:6632, 107d:6630, 107d:6638, 107d:6631, 107d:6637, 107d:663d + + * - 10 + - IODATA GV-VCP3/PCI + - 10fc:d003 + + * - 11 + - Prolink PlayTV PVR + - + + * - 12 + - ASUS PVR-416 + - 1043:4823, 1461:c111 + + * - 13 + - MSI TV-@nywhere + - + + * - 14 + - KWorld/VStream XPert DVB-T + - 17de:08a6 + + * - 15 + - DViCO FusionHDTV DVB-T1 + - 18ac:db00 + + * - 16 + - KWorld LTV883RF + - + + * - 17 + - DViCO FusionHDTV 3 Gold-Q + - 18ac:d810, 18ac:d800 + + * - 18 + - Hauppauge Nova-T DVB-T + - 0070:9002, 0070:9001, 0070:9000 + + * - 19 + - Conexant DVB-T reference design + - 14f1:0187 + + * - 20 + - Provideo PV259 + - 1540:2580 + + * - 21 + - DViCO FusionHDTV DVB-T Plus + - 18ac:db10, 18ac:db11 + + * - 22 + - pcHDTV HD3000 HDTV + - 7063:3000 + + * - 23 + - digitalnow DNTV Live! DVB-T + - 17de:a8a6 + + * - 24 + - Hauppauge WinTV 28xxx (Roslyn) models + - 0070:2801 + + * - 25 + - Digital-Logic MICROSPACE Entertainment Center (MEC) + - 14f1:0342 + + * - 26 + - IODATA GV/BCTV7E + - 10fc:d035 + + * - 27 + - PixelView PlayTV Ultra Pro (Stereo) + - + + * - 28 + - DViCO FusionHDTV 3 Gold-T + - 18ac:d820 + + * - 29 + - ADS Tech Instant TV DVB-T PCI + - 1421:0334 + + * - 30 + - TerraTec Cinergy 1400 DVB-T + - 153b:1166 + + * - 31 + - DViCO FusionHDTV 5 Gold + - 18ac:d500 + + * - 32 + - AverMedia UltraTV Media Center PCI 550 + - 1461:8011 + + * - 33 + - Kworld V-Stream Xpert DVD + - + + * - 34 + - ATI HDTV Wonder + - 1002:a101 + + * - 35 + - WinFast DTV1000-T + - 107d:665f + + * - 36 + - AVerTV 303 (M126) + - 1461:000a + + * - 37 + - Hauppauge Nova-S-Plus DVB-S + - 0070:9201, 0070:9202 + + * - 38 + - Hauppauge Nova-SE2 DVB-S + - 0070:9200 + + * - 39 + - KWorld DVB-S 100 + - 17de:08b2, 1421:0341 + + * - 40 + - Hauppauge WinTV-HVR1100 DVB-T/Hybrid + - 0070:9400, 0070:9402 + + * - 41 + - Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile) + - 0070:9800, 0070:9802 + + * - 42 + - digitalnow DNTV Live! DVB-T Pro + - 1822:0025, 1822:0019 + + * - 43 + - KWorld/VStream XPert DVB-T with cx22702 + - 17de:08a1, 12ab:2300 + + * - 44 + - DViCO FusionHDTV DVB-T Dual Digital + - 18ac:db50, 18ac:db54 + + * - 45 + - KWorld HardwareMpegTV XPert + - 17de:0840, 1421:0305 + + * - 46 + - DViCO FusionHDTV DVB-T Hybrid + - 18ac:db40, 18ac:db44 + + * - 47 + - pcHDTV HD5500 HDTV + - 7063:5500 + + * - 48 + - Kworld MCE 200 Deluxe + - 17de:0841 + + * - 49 + - PixelView PlayTV P7000 + - 1554:4813 + + * - 50 + - NPG Tech Real TV FM Top 10 + - 14f1:0842 + + * - 51 + - WinFast DTV2000 H + - 107d:665e + + * - 52 + - Geniatech DVB-S + - 14f1:0084 + + * - 53 + - Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T + - 0070:1404, 0070:1400, 0070:1401, 0070:1402 + + * - 54 + - Norwood Micro TV Tuner + - + + * - 55 + - Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM + - c180:c980 + + * - 56 + - Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder + - 0070:9600, 0070:9601, 0070:9602 + + * - 57 + - ADS Tech Instant Video PCI + - 1421:0390 + + * - 58 + - Pinnacle PCTV HD 800i + - 11bd:0051 + + * - 59 + - DViCO FusionHDTV 5 PCI nano + - 18ac:d530 + + * - 60 + - Pinnacle Hybrid PCTV + - 12ab:1788 + + * - 61 + - Leadtek TV2000 XP Global + - 107d:6f18, 107d:6618, 107d:6619 + + * - 62 + - PowerColor RA330 + - 14f1:ea3d + + * - 63 + - Geniatech X8000-MT DVBT + - 14f1:8852 + + * - 64 + - DViCO FusionHDTV DVB-T PRO + - 18ac:db30 + + * - 65 + - DViCO FusionHDTV 7 Gold + - 18ac:d610 + + * - 66 + - Prolink Pixelview MPEG 8000GT + - 1554:4935 + + * - 67 + - Kworld PlusTV HD PCI 120 (ATSC 120) + - 17de:08c1 + + * - 68 + - Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid + - 0070:6900, 0070:6904, 0070:6902 + + * - 69 + - Hauppauge WinTV-HVR4000(Lite) DVB-S/S2 + - 0070:6905, 0070:6906 + + * - 70 + - TeVii S460 DVB-S/S2 + - d460:9022 + + * - 71 + - Omicom SS4 DVB-S/S2 PCI + - A044:2011 + + * - 72 + - TBS 8920 DVB-S/S2 + - 8920:8888 + + * - 73 + - TeVii S420 DVB-S + - d420:9022 + + * - 74 + - Prolink Pixelview Global Extreme + - 1554:4976 + + * - 75 + - PROF 7300 DVB-S/S2 + - B033:3033 + + * - 76 + - SATTRADE ST4200 DVB-S/S2 + - b200:4200 + + * - 77 + - TBS 8910 DVB-S + - 8910:8888 + + * - 78 + - Prof 6200 DVB-S + - b022:3022 + + * - 79 + - Terratec Cinergy HT PCI MKII + - 153b:1177 + + * - 80 + - Hauppauge WinTV-IR Only + - 0070:9290 + + * - 81 + - Leadtek WinFast DTV1800 Hybrid + - 107d:6654 + + * - 82 + - WinFast DTV2000 H rev. J + - 107d:6f2b + + * - 83 + - Prof 7301 DVB-S/S2 + - b034:3034 + + * - 84 + - Samsung SMT 7020 DVB-S + - 18ac:dc00, 18ac:dccd + + * - 85 + - Twinhan VP-1027 DVB-S + - 1822:0023 + + * - 86 + - TeVii S464 DVB-S/S2 + - d464:9022 + + * - 87 + - Leadtek WinFast DTV2000 H PLUS + - 107d:6f42 + + * - 88 + - Leadtek WinFast DTV1800 H (XC4000) + - 107d:6f38 + + * - 89 + - Leadtek TV2000 XP Global (SC4100) + - 107d:6f36 + + * - 90 + - Leadtek TV2000 XP Global (XC4100) + - 107d:6f43 diff --git a/Documentation/media/v4l-drivers/em28xx-cardlist.rst b/Documentation/media/v4l-drivers/em28xx-cardlist.rst index 76b1d301754c..ec938c08f43d 100644 --- a/Documentation/media/v4l-drivers/em28xx-cardlist.rst +++ b/Documentation/media/v4l-drivers/em28xx-cardlist.rst @@ -1,107 +1,422 @@ EM28xx cards list ================= -=========== ==================================================================== ================ ================================================================================================================================== -Card number Card name Empia Chip USB IDs -=========== ==================================================================== ================ ================================================================================================================================== -0 Unknown EM2800 video grabber em2800 eb1a:2800 -1 Unknown EM2750/28xx video grabber em2820 or em2840 eb1a:2710, eb1a:2820, eb1a:2821, eb1a:2860, eb1a:2861, eb1a:2862, eb1a:2863, eb1a:2870, eb1a:2881, eb1a:2883, eb1a:2868, eb1a:2875 -2 Terratec Cinergy 250 USB em2820 or em2840 0ccd:0036 -3 Pinnacle PCTV USB 2 em2820 or em2840 2304:0208 -4 Hauppauge WinTV USB 2 em2820 or em2840 2040:4200, 2040:4201 -5 MSI VOX USB 2.0 em2820 or em2840 -6 Terratec Cinergy 200 USB em2800 -7 Leadtek Winfast USB II em2800 0413:6023 -8 Kworld USB2800 em2800 -9 Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker em2820 or em2840 1b80:e302, 1b80:e304, 2304:0207, 2304:021a, 093b:a003 -10 Hauppauge WinTV HVR 900 em2880 2040:6500 -11 Terratec Hybrid XS em2880 -12 Kworld PVR TV 2800 RF em2820 or em2840 -13 Terratec Prodigy XS em2880 -14 SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0 em2820 or em2840 -15 V-Gear PocketTV em2800 -16 Hauppauge WinTV HVR 950 em2883 2040:6513, 2040:6517, 2040:651b -17 Pinnacle PCTV HD Pro Stick em2880 2304:0227 -18 Hauppauge WinTV HVR 900 (R2) em2880 2040:6502 -19 EM2860/SAA711X Reference Design em2860 -20 AMD ATI TV Wonder HD 600 em2880 0438:b002 -21 eMPIA Technology, Inc. GrabBeeX+ Video Encoder em2800 eb1a:2801 -22 EM2710/EM2750/EM2751 webcam grabber em2750 eb1a:2750, eb1a:2751 -23 Huaqi DLCW-130 em2750 -24 D-Link DUB-T210 TV Tuner em2820 or em2840 2001:f112 -25 Gadmei UTV310 em2820 or em2840 -26 Hercules Smart TV USB 2.0 em2820 or em2840 -27 Pinnacle PCTV USB 2 (Philips FM1216ME) em2820 or em2840 -28 Leadtek Winfast USB II Deluxe em2820 or em2840 -29 EM2860/TVP5150 Reference Design em2860 -30 Videology 20K14XUSB USB2.0 em2820 or em2840 -31 Usbgear VD204v9 em2821 -32 Supercomp USB 2.0 TV em2821 -33 Elgato Video Capture em2860 0fd9:0033 -34 Terratec Cinergy A Hybrid XS em2860 0ccd:004f -35 Typhoon DVD Maker em2860 -36 NetGMBH Cam em2860 -37 Gadmei UTV330 em2860 eb1a:50a6 -38 Yakumo MovieMixer em2861 -39 KWorld PVRTV 300U em2861 eb1a:e300 -40 Plextor ConvertX PX-TV100U em2861 093b:a005 -41 Kworld 350 U DVB-T em2870 eb1a:e350 -42 Kworld 355 U DVB-T em2870 eb1a:e355, eb1a:e357, eb1a:e359 -43 Terratec Cinergy T XS em2870 -44 Terratec Cinergy T XS (MT2060) em2870 0ccd:0043 -45 Pinnacle PCTV DVB-T em2870 -46 Compro, VideoMate U3 em2870 185b:2870 -47 KWorld DVB-T 305U em2880 eb1a:e305 -48 KWorld DVB-T 310U em2880 -49 MSI DigiVox A/D em2880 eb1a:e310 -50 MSI DigiVox A/D II em2880 eb1a:e320 -51 Terratec Hybrid XS Secam em2880 0ccd:004c -52 DNT DA2 Hybrid em2881 -53 Pinnacle Hybrid Pro em2881 -54 Kworld VS-DVB-T 323UR em2882 eb1a:e323 -55 Terratec Cinnergy Hybrid T USB XS (em2882) em2882 0ccd:005e, 0ccd:0042 -56 Pinnacle Hybrid Pro (330e) em2882 2304:0226 -57 Kworld PlusTV HD Hybrid 330 em2883 eb1a:a316 -58 Compro VideoMate ForYou/Stereo em2820 or em2840 185b:2041 -59 Pinnacle PCTV HD Mini em2874 2304:023f -60 Hauppauge WinTV HVR 850 em2883 2040:651f -61 Pixelview PlayTV Box 4 USB 2.0 em2820 or em2840 -62 Gadmei TVR200 em2820 or em2840 -63 Kaiomy TVnPC U2 em2860 eb1a:e303 -64 Easy Cap Capture DC-60 em2860 1b80:e309 -65 IO-DATA GV-MVP/SZ em2820 or em2840 04bb:0515 -66 Empire dual TV em2880 -67 Terratec Grabby em2860 0ccd:0096, 0ccd:10AF -68 Terratec AV350 em2860 0ccd:0084 -69 KWorld ATSC 315U HDTV TV Box em2882 eb1a:a313 -70 Evga inDtube em2882 -71 Silvercrest Webcam 1.3mpix em2820 or em2840 -72 Gadmei UTV330+ em2861 -73 Reddo DVB-C USB TV Box em2870 -74 Actionmaster/LinXcel/Digitus VC211A em2800 -75 Dikom DK300 em2882 -76 KWorld PlusTV 340U or UB435-Q (ATSC) em2870 1b80:a340 -77 EM2874 Leadership ISDBT em2874 -78 PCTV nanoStick T2 290e em28174 2013:024f -79 Terratec Cinergy H5 em2884 eb1a:2885, 0ccd:10a2, 0ccd:10ad, 0ccd:10b6 -80 PCTV DVB-S2 Stick (460e) em28174 2013:024c -81 Hauppauge WinTV HVR 930C em2884 2040:1605 -82 Terratec Cinergy HTC Stick em2884 0ccd:00b2 -83 Honestech Vidbox NW03 em2860 eb1a:5006 -84 MaxMedia UB425-TC em2874 1b80:e425 -85 PCTV QuatroStick (510e) em2884 2304:0242 -86 PCTV QuatroStick nano (520e) em2884 2013:0251 -87 Terratec Cinergy HTC USB XS em2884 0ccd:008e, 0ccd:00ac -88 C3 Tech Digital Duo HDTV/SDTV USB em2884 1b80:e755 -89 Delock 61959 em2874 1b80:e1cc -90 KWorld USB ATSC TV Stick UB435-Q V2 em2874 1b80:e346 -91 SpeedLink Vicious And Devine Laplace webcam em2765 1ae7:9003, 1ae7:9004 -92 PCTV DVB-S2 Stick (461e) em28178 2013:0258 -93 KWorld USB ATSC TV Stick UB435-Q V3 em2874 1b80:e34c -94 PCTV tripleStick (292e) em28178 2013:025f, 2040:0264 -95 Leadtek VC100 em2861 0413:6f07 -96 Terratec Cinergy T2 Stick HD em28178 eb1a:8179 -97 Elgato EyeTV Hybrid 2008 INT em2884 0fd9:0018 -98 PLEX PX-BCUD em28178 3275:0085 -99 Hauppauge WinTV-dualHD DVB em28174 2040:0265 -=========== ==================================================================== ================ ================================================================================================================================== +.. tabularcolumns:: |p{1.4cm}|p{10.0cm}|p{1.9cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 12 3 16 + :stub-columns: 0 + + * - Card number + - Card name + - Empia Chip + - USB IDs + * - 0 + - Unknown EM2800 video grabber + - em2800 + - eb1a:2800 + * - 1 + - Unknown EM2750/28xx video grabber + - em2820 or em2840 + - eb1a:2710, eb1a:2820, eb1a:2821, eb1a:2860, eb1a:2861, eb1a:2862, eb1a:2863, eb1a:2870, eb1a:2881, eb1a:2883, eb1a:2868, eb1a:2875 + * - 2 + - Terratec Cinergy 250 USB + - em2820 or em2840 + - 0ccd:0036 + * - 3 + - Pinnacle PCTV USB 2 + - em2820 or em2840 + - 2304:0208 + * - 4 + - Hauppauge WinTV USB 2 + - em2820 or em2840 + - 2040:4200, 2040:4201 + * - 5 + - MSI VOX USB 2.0 + - em2820 or em2840 + - + * - 6 + - Terratec Cinergy 200 USB + - em2800 + - + * - 7 + - Leadtek Winfast USB II + - em2800 + - 0413:6023 + * - 8 + - Kworld USB2800 + - em2800 + - + * - 9 + - Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U + - em2820 or em2840 + - 1b80:e302, 1b80:e304, 2304:0207, 2304:021a, 093b:a003 + * - 10 + - Hauppauge WinTV HVR 900 + - em2880 + - 2040:6500 + * - 11 + - Terratec Hybrid XS + - em2880 + - + * - 12 + - Kworld PVR TV 2800 RF + - em2820 or em2840 + - + * - 13 + - Terratec Prodigy XS + - em2880 + - + * - 14 + - SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0 + - em2820 or em2840 + - + * - 15 + - V-Gear PocketTV + - em2800 + - + * - 16 + - Hauppauge WinTV HVR 950 + - em2883 + - 2040:6513, 2040:6517, 2040:651b + * - 17 + - Pinnacle PCTV HD Pro Stick + - em2880 + - 2304:0227 + * - 18 + - Hauppauge WinTV HVR 900 (R2) + - em2880 + - 2040:6502 + * - 19 + - EM2860/SAA711X Reference Design + - em2860 + - + * - 20 + - AMD ATI TV Wonder HD 600 + - em2880 + - 0438:b002 + * - 21 + - eMPIA Technology, Inc. GrabBeeX+ Video Encoder + - em2800 + - eb1a:2801 + * - 22 + - EM2710/EM2750/EM2751 webcam grabber + - em2750 + - eb1a:2750, eb1a:2751 + * - 23 + - Huaqi DLCW-130 + - em2750 + - + * - 24 + - D-Link DUB-T210 TV Tuner + - em2820 or em2840 + - 2001:f112 + * - 25 + - Gadmei UTV310 + - em2820 or em2840 + - + * - 26 + - Hercules Smart TV USB 2.0 + - em2820 or em2840 + - + * - 27 + - Pinnacle PCTV USB 2 (Philips FM1216ME) + - em2820 or em2840 + - + * - 28 + - Leadtek Winfast USB II Deluxe + - em2820 or em2840 + - + * - 29 + - EM2860/TVP5150 Reference Design + - em2860 + - eb1a:5051 + * - 30 + - Videology 20K14XUSB USB2.0 + - em2820 or em2840 + - + * - 31 + - Usbgear VD204v9 + - em2821 + - + * - 32 + - Supercomp USB 2.0 TV + - em2821 + - + * - 33 + - Elgato Video Capture + - em2860 + - 0fd9:0033 + * - 34 + - Terratec Cinergy A Hybrid XS + - em2860 + - 0ccd:004f + * - 35 + - Typhoon DVD Maker + - em2860 + - + * - 36 + - NetGMBH Cam + - em2860 + - + * - 37 + - Gadmei UTV330 + - em2860 + - eb1a:50a6 + * - 38 + - Yakumo MovieMixer + - em2861 + - + * - 39 + - KWorld PVRTV 300U + - em2861 + - eb1a:e300 + * - 40 + - Plextor ConvertX PX-TV100U + - em2861 + - 093b:a005 + * - 41 + - Kworld 350 U DVB-T + - em2870 + - eb1a:e350 + * - 42 + - Kworld 355 U DVB-T + - em2870 + - eb1a:e355, eb1a:e357, eb1a:e359 + * - 43 + - Terratec Cinergy T XS + - em2870 + - + * - 44 + - Terratec Cinergy T XS (MT2060) + - em2870 + - 0ccd:0043 + * - 45 + - Pinnacle PCTV DVB-T + - em2870 + - + * - 46 + - Compro, VideoMate U3 + - em2870 + - 185b:2870 + * - 47 + - KWorld DVB-T 305U + - em2880 + - eb1a:e305 + * - 48 + - KWorld DVB-T 310U + - em2880 + - + * - 49 + - MSI DigiVox A/D + - em2880 + - eb1a:e310 + * - 50 + - MSI DigiVox A/D II + - em2880 + - eb1a:e320 + * - 51 + - Terratec Hybrid XS Secam + - em2880 + - 0ccd:004c + * - 52 + - DNT DA2 Hybrid + - em2881 + - + * - 53 + - Pinnacle Hybrid Pro + - em2881 + - + * - 54 + - Kworld VS-DVB-T 323UR + - em2882 + - eb1a:e323 + * - 55 + - Terratec Cinnergy Hybrid T USB XS (em2882) + - em2882 + - 0ccd:005e, 0ccd:0042 + * - 56 + - Pinnacle Hybrid Pro (330e) + - em2882 + - 2304:0226 + * - 57 + - Kworld PlusTV HD Hybrid 330 + - em2883 + - eb1a:a316 + * - 58 + - Compro VideoMate ForYou/Stereo + - em2820 or em2840 + - 185b:2041 + * - 59 + - Pinnacle PCTV HD Mini + - em2874 + - 2304:023f + * - 60 + - Hauppauge WinTV HVR 850 + - em2883 + - 2040:651f + * - 61 + - Pixelview PlayTV Box 4 USB 2.0 + - em2820 or em2840 + - + * - 62 + - Gadmei TVR200 + - em2820 or em2840 + - + * - 63 + - Kaiomy TVnPC U2 + - em2860 + - eb1a:e303 + * - 64 + - Easy Cap Capture DC-60 + - em2860 + - 1b80:e309 + * - 65 + - IO-DATA GV-MVP/SZ + - em2820 or em2840 + - 04bb:0515 + * - 66 + - Empire dual TV + - em2880 + - + * - 67 + - Terratec Grabby + - em2860 + - 0ccd:0096, 0ccd:10AF + * - 68 + - Terratec AV350 + - em2860 + - 0ccd:0084 + * - 69 + - KWorld ATSC 315U HDTV TV Box + - em2882 + - eb1a:a313 + * - 70 + - Evga inDtube + - em2882 + - + * - 71 + - Silvercrest Webcam 1.3mpix + - em2820 or em2840 + - + * - 72 + - Gadmei UTV330+ + - em2861 + - + * - 73 + - Reddo DVB-C USB TV Box + - em2870 + - + * - 74 + - Actionmaster/LinXcel/Digitus VC211A + - em2800 + - + * - 75 + - Dikom DK300 + - em2882 + - + * - 76 + - KWorld PlusTV 340U or UB435-Q (ATSC) + - em2870 + - 1b80:a340 + * - 77 + - EM2874 Leadership ISDBT + - em2874 + - + * - 78 + - PCTV nanoStick T2 290e + - em28174 + - 2013:024f + * - 79 + - Terratec Cinergy H5 + - em2884 + - eb1a:2885, 0ccd:10a2, 0ccd:10ad, 0ccd:10b6 + * - 80 + - PCTV DVB-S2 Stick (460e) + - em28174 + - 2013:024c + * - 81 + - Hauppauge WinTV HVR 930C + - em2884 + - 2040:1605 + * - 82 + - Terratec Cinergy HTC Stick + - em2884 + - 0ccd:00b2 + * - 83 + - Honestech Vidbox NW03 + - em2860 + - eb1a:5006 + * - 84 + - MaxMedia UB425-TC + - em2874 + - 1b80:e425 + * - 85 + - PCTV QuatroStick (510e) + - em2884 + - 2304:0242 + * - 86 + - PCTV QuatroStick nano (520e) + - em2884 + - 2013:0251 + * - 87 + - Terratec Cinergy HTC USB XS + - em2884 + - 0ccd:008e, 0ccd:00ac + * - 88 + - C3 Tech Digital Duo HDTV/SDTV USB + - em2884 + - 1b80:e755 + * - 89 + - Delock 61959 + - em2874 + - 1b80:e1cc + * - 90 + - KWorld USB ATSC TV Stick UB435-Q V2 + - em2874 + - 1b80:e346 + * - 91 + - SpeedLink Vicious And Devine Laplace webcam + - em2765 + - 1ae7:9003, 1ae7:9004 + * - 92 + - PCTV DVB-S2 Stick (461e) + - em28178 + - 2013:0258 + * - 93 + - KWorld USB ATSC TV Stick UB435-Q V3 + - em2874 + - 1b80:e34c + * - 94 + - PCTV tripleStick (292e) + - em28178 + - 2013:025f, 2040:0264 + * - 95 + - Leadtek VC100 + - em2861 + - 0413:6f07 + * - 96 + - Terratec Cinergy T2 Stick HD + - em28178 + - eb1a:8179 + * - 97 + - Elgato EyeTV Hybrid 2008 INT + - em2884 + - 0fd9:0018 + * - 98 + - PLEX PX-BCUD + - em28178 + - 3275:0085 + * - 99 + - Hauppauge WinTV-dualHD DVB + - em28174 + - 2040:0265 + * - 100 + - Hauppauge WinTV-dualHD 01595 ATSC/QAM + - em28174 + - 2040:026d + * - 101 + - Terratec Cinergy H6 rev. 2 + - em2884 + - 0ccd:10b2 diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst index 10f2ce42ece2..679238e786a7 100644 --- a/Documentation/media/v4l-drivers/index.rst +++ b/Documentation/media/v4l-drivers/index.rst @@ -21,7 +21,9 @@ more details. For more details see the file COPYING in the source distribution of Linux. -.. class:: toc-title +.. only:: html + + .. class:: toc-title Table of Contents @@ -50,6 +52,7 @@ For more details see the file COPYING in the source distribution of Linux. philips pvrusb2 pxa_camera + qcom_camss radiotrack rcar-fdp1 saa7134 diff --git a/Documentation/media/v4l-drivers/ivtv-cardlist.rst b/Documentation/media/v4l-drivers/ivtv-cardlist.rst index 754ffa820b4c..022dca80c2c8 100644 --- a/Documentation/media/v4l-drivers/ivtv-cardlist.rst +++ b/Documentation/media/v4l-drivers/ivtv-cardlist.rst @@ -1,38 +1,137 @@ IVTV cards list =============== -=========== ============================================================= ==================================================== -Card number Card name PCI IDs -=========== ============================================================= ==================================================== -0 Hauppauge WinTV PVR-250 IVTV16 104d:813d -1 Hauppauge WinTV PVR-350 IVTV16 104d:813d -2 Hauppauge WinTV PVR-150 IVTV16 104d:813d -3 AVerMedia M179 IVTV15 1461:a3cf, IVTV15 1461:a3ce -4 Yuan MPG600, Kuroutoshikou ITVC16-STVLP IVTV16 12ab:fff3, IVTV16 12ab:ffff -5 YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI IVTV15 10fc:40a0 -6 Yuan PG600, Diamond PVR-550 IVTV16 ff92:0070, IVTV16 ffab:0600 -7 Adaptec VideOh! AVC-2410 IVTV16 9005:0093 -8 Adaptec VideOh! AVC-2010 IVTV16 9005:0092 -9 Nagase Transgear 5000TV IVTV16 1461:bfff -10 AOpen VA2000MAX-SNT6 IVTV16 0000:ff5f -11 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP IVTV16 12ab:0600, IVTV16 fbab:0600, IVTV16 1154:0523 -12 I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner) IVTV16 10fc:d01e, IVTV16 10fc:d038, IVTV16 10fc:d039 -13 I/O Data GV-MVP/RX2E IVTV16 10fc:d025 -14 GotView PCI DVD IVTV16 12ab:0600 -15 GotView PCI DVD2 Deluxe IVTV16 ffac:0600 -16 Yuan MPC622 IVTV16 ff01:d998 -17 Digital Cowboy DCT-MTVP1 IVTV16 1461:bfff -18 Yuan PG600-2, GotView PCI DVD Lite IVTV16 ffab:0600, IVTV16 ffad:0600 -19 Club3D ZAP-TV1x01 IVTV16 ffab:0600 -20 AVerTV MCE 116 Plus IVTV16 1461:c439 -21 ASUS Falcon2 IVTV16 1043:4b66, IVTV16 1043:462e, IVTV16 1043:4b2e -22 AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner IVTV16 1461:c034, IVTV16 1461:c035 -23 AVerMedia EZMaker PCI Deluxe IVTV16 1461:c03f -24 AVerMedia M104 IVTV16 1461:c136 -25 Buffalo PC-MV5L/PCI IVTV16 1154:052b -26 AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner IVTV16 1461:c019, IVTV16 1461:c01b -27 Sony VAIO Giga Pocket (ENX Kikyou) IVTV16 104d:813d -28 Hauppauge WinTV PVR-350 (V1) IVTV16 104d:813d -29 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR) IVTV16 104d:813d -30 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS) IVTV16 104d:813d -=========== ============================================================= ==================================================== +.. tabularcolumns:: |p{1.4cm}|p{12.7cm}|p{3.4cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - Hauppauge WinTV PVR-250 + - IVTV16 104d:813d + + * - 1 + - Hauppauge WinTV PVR-350 + - IVTV16 104d:813d + + * - 2 + - Hauppauge WinTV PVR-150 + - IVTV16 104d:813d + + * - 3 + - AVerMedia M179 + - IVTV15 1461:a3cf, IVTV15 1461:a3ce + + * - 4 + - Yuan MPG600, Kuroutoshikou ITVC16-STVLP + - IVTV16 12ab:fff3, IVTV16 12ab:ffff + + * - 5 + - YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI + - IVTV15 10fc:40a0 + + * - 6 + - Yuan PG600, Diamond PVR-550 + - IVTV16 ff92:0070, IVTV16 ffab:0600 + + * - 7 + - Adaptec VideOh! AVC-2410 + - IVTV16 9005:0093 + + * - 8 + - Adaptec VideOh! AVC-2010 + - IVTV16 9005:0092 + + * - 9 + - Nagase Transgear 5000TV + - IVTV16 1461:bfff + + * - 10 + - AOpen VA2000MAX-SNT6 + - IVTV16 0000:ff5f + + * - 11 + - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP + - IVTV16 12ab:0600, IVTV16 fbab:0600, IVTV16 1154:0523 + + * - 12 + - I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner) + - IVTV16 10fc:d01e, IVTV16 10fc:d038, IVTV16 10fc:d039 + + * - 13 + - I/O Data GV-MVP/RX2E + - IVTV16 10fc:d025 + + * - 14 + - GotView PCI DVD + - IVTV16 12ab:0600 + + * - 15 + - GotView PCI DVD2 Deluxe + - IVTV16 ffac:0600 + + * - 16 + - Yuan MPC622 + - IVTV16 ff01:d998 + + * - 17 + - Digital Cowboy DCT-MTVP1 + - IVTV16 1461:bfff + + * - 18 + - Yuan PG600-2, GotView PCI DVD Lite + - IVTV16 ffab:0600, IVTV16 ffad:0600 + + * - 19 + - Club3D ZAP-TV1x01 + - IVTV16 ffab:0600 + + * - 20 + - AVerTV MCE 116 Plus + - IVTV16 1461:c439 + + * - 21 + - ASUS Falcon2 + - IVTV16 1043:4b66, IVTV16 1043:462e, IVTV16 1043:4b2e + + * - 22 + - AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner + - IVTV16 1461:c034, IVTV16 1461:c035 + + * - 23 + - AVerMedia EZMaker PCI Deluxe + - IVTV16 1461:c03f + + * - 24 + - AVerMedia M104 + - IVTV16 1461:c136 + + * - 25 + - Buffalo PC-MV5L/PCI + - IVTV16 1154:052b + + * - 26 + - AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner + - IVTV16 1461:c019, IVTV16 1461:c01b + + * - 27 + - Sony VAIO Giga Pocket (ENX Kikyou) + - IVTV16 104d:813d + + * - 28 + - Hauppauge WinTV PVR-350 (V1) + - IVTV16 104d:813d + + * - 29 + - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR) + - IVTV16 104d:813d + + * - 30 + - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS) + - IVTV16 104d:813d diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst new file mode 100644 index 000000000000..9e66b7b5770f --- /dev/null +++ b/Documentation/media/v4l-drivers/qcom_camss.rst @@ -0,0 +1,156 @@ +.. include:: + +Qualcomm Camera Subsystem driver +================================ + +Introduction +------------ + +This file documents the Qualcomm Camera Subsystem driver located under +drivers/media/platform/qcom/camss-8x16. + +The current version of the driver supports the Camera Subsystem found on +Qualcomm MSM8916 and APQ8016 processors. + +The driver implements V4L2, Media controller and V4L2 subdev interfaces. +Camera sensor using V4L2 subdev interface in the kernel is supported. + +The driver is implemented using as a reference the Qualcomm Camera Subsystem +driver for Android as found in Code Aurora [#f1]_. + + +Qualcomm Camera Subsystem hardware +---------------------------------- + +The Camera Subsystem hardware found on 8x16 processors and supported by the +driver consists of: + +- 2 CSIPHY modules. They handle the Physical layer of the CSI2 receivers. + A separate camera sensor can be connected to each of the CSIPHY module; +- 2 CSID (CSI Decoder) modules. They handle the Protocol and Application layer + of the CSI2 receivers. A CSID can decode data stream from any of the CSIPHY. + Each CSID also contains a TG (Test Generator) block which can generate + artificial input data for test purposes; +- ISPIF (ISP Interface) module. Handles the routing of the data streams from + the CSIDs to the inputs of the VFE; +- VFE (Video Front End) module. Contains a pipeline of image processing hardware + blocks. The VFE has different input interfaces. The PIX (Pixel) input + interface feeds the input data to the image processing pipeline. The image + processing pipeline contains also a scale and crop module at the end. Three + RDI (Raw Dump Interface) input interfaces bypass the image processing + pipeline. The VFE also contains the AXI bus interface which writes the output + data to memory. + + +Supported functionality +----------------------- + +The current version of the driver supports: + +- Input from camera sensor via CSIPHY; +- Generation of test input data by the TG in CSID; +- RDI interface of VFE - raw dump of the input data to memory. + + Supported formats: + + - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV / + V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY); + - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 / + V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8); + - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P / + V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P); + - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P / + V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P). + +- PIX interface of VFE + + - Format conversion of the input data. + + Supported input formats: + + - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV / + V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY). + + Supported output formats: + + - NV12/NV21 (two plane YUV 4:2:0 - V4L2_PIX_FMT_NV12 / V4L2_PIX_FMT_NV21); + - NV16/NV61 (two plane YUV 4:2:2 - V4L2_PIX_FMT_NV16 / V4L2_PIX_FMT_NV61). + + - Scaling support. Configuration of the VFE Encoder Scale module + for downscalling with ratio up to 16x. + + - Cropping support. Configuration of the VFE Encoder Crop module. + +- Concurrent and independent usage of two data inputs - could be camera sensors + and/or TG. + + +Driver Architecture and Design +------------------------------ + +The driver implements the V4L2 subdev interface. With the goal to model the +hardware links between the modules and to expose a clean, logical and usable +interface, the driver is split into V4L2 sub-devices as follows: + +- 2 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device; +- 2 CSID sub-devices - each CSID is represented by a single sub-device; +- 2 ISPIF sub-devices - ISPIF is represented by a number of sub-devices equal + to the number of CSID sub-devices; +- 4 VFE sub-devices - VFE is represented by a number of sub-devices equal to + the number of the input interfaces (3 RDI and 1 PIX). + +The considerations to split the driver in this particular way are as follows: + +- representing CSIPHY and CSID modules by a separate sub-device for each module + allows to model the hardware links between these modules; +- representing VFE by a separate sub-devices for each input interface allows + to use the input interfaces concurently and independently as this is + supported by the hardware; +- representing ISPIF by a number of sub-devices equal to the number of CSID + sub-devices allows to create linear media controller pipelines when using two + cameras simultaneously. This avoids branches in the pipelines which otherwise + will require a) userspace and b) media framework (e.g. power on/off + operations) to make assumptions about the data flow from a sink pad to a + source pad on a single media entity. + +Each VFE sub-device is linked to a separate video device node. + +The media controller pipeline graph is as follows (with connected two OV5645 +camera sensors): + +.. _qcom_camss_graph: + +.. kernel-figure:: qcom_camss_graph.dot + :alt: qcom_camss_graph.dot + :align: center + + Media pipeline graph + + +Implementation +-------------- + +Runtime configuration of the hardware (updating settings while streaming) is +not required to implement the currently supported functionality. The complete +configuration on each hardware module is applied on STREAMON ioctl based on +the current active media links, formats and controls set. + +The output size of the scaler module in the VFE is configured with the actual +compose selection rectangle on the sink pad of the 'msm_vfe0_pix' entity. + +The crop output area of the crop module in the VFE is configured with the actual +crop selection rectangle on the source pad of the 'msm_vfe0_pix' entity. + + +Documentation +------------- + +APQ8016 Specification: +https://developer.qualcomm.com/download/sd410/snapdragon-410-processor-device-specification.pdf +Referenced 2016-11-24. + + +References +---------- + +.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/ diff --git a/Documentation/media/v4l-drivers/qcom_camss_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_graph.dot new file mode 100644 index 000000000000..827fc7112c1e --- /dev/null +++ b/Documentation/media/v4l-drivers/qcom_camss_graph.dot @@ -0,0 +1,41 @@ +digraph board { + rankdir=TB + n00000001 [label="{{ 0} | msm_csiphy0\n/dev/v4l-subdev0 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000001:port1 -> n00000007:port0 [style=dashed] + n00000001:port1 -> n0000000a:port0 [style=dashed] + n00000004 [label="{{ 0} | msm_csiphy1\n/dev/v4l-subdev1 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000004:port1 -> n00000007:port0 [style=dashed] + n00000004:port1 -> n0000000a:port0 [style=dashed] + n00000007 [label="{{ 0} | msm_csid0\n/dev/v4l-subdev2 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000007:port1 -> n0000000d:port0 [style=dashed] + n00000007:port1 -> n00000010:port0 [style=dashed] + n0000000a [label="{{ 0} | msm_csid1\n/dev/v4l-subdev3 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000000a:port1 -> n0000000d:port0 [style=dashed] + n0000000a:port1 -> n00000010:port0 [style=dashed] + n0000000d [label="{{ 0} | msm_ispif0\n/dev/v4l-subdev4 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000000d:port1 -> n00000013:port0 [style=dashed] + n0000000d:port1 -> n0000001c:port0 [style=dashed] + n0000000d:port1 -> n00000025:port0 [style=dashed] + n0000000d:port1 -> n0000002e:port0 [style=dashed] + n00000010 [label="{{ 0} | msm_ispif1\n/dev/v4l-subdev5 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000010:port1 -> n00000013:port0 [style=dashed] + n00000010:port1 -> n0000001c:port0 [style=dashed] + n00000010:port1 -> n00000025:port0 [style=dashed] + n00000010:port1 -> n0000002e:port0 [style=dashed] + n00000013 [label="{{ 0} | msm_vfe0_rdi0\n/dev/v4l-subdev6 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000013:port1 -> n00000016 [style=bold] + n00000016 [label="msm_vfe0_video0\n/dev/video0", shape=box, style=filled, fillcolor=yellow] + n0000001c [label="{{ 0} | msm_vfe0_rdi1\n/dev/v4l-subdev7 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000001c:port1 -> n0000001f [style=bold] + n0000001f [label="msm_vfe0_video1\n/dev/video1", shape=box, style=filled, fillcolor=yellow] + n00000025 [label="{{ 0} | msm_vfe0_rdi2\n/dev/v4l-subdev8 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000025:port1 -> n00000028 [style=bold] + n00000028 [label="msm_vfe0_video2\n/dev/video2", shape=box, style=filled, fillcolor=yellow] + n0000002e [label="{{ 0} | msm_vfe0_pix\n/dev/v4l-subdev9 | { 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000002e:port1 -> n00000031 [style=bold] + n00000031 [label="msm_vfe0_video3\n/dev/video3", shape=box, style=filled, fillcolor=yellow] + n00000057 [label="{{} | ov5645 1-0076\n/dev/v4l-subdev10 | { 0}}", shape=Mrecord, style=filled, fillcolor=green] + n00000057:port0 -> n00000001:port0 [style=bold] + n00000059 [label="{{} | ov5645 1-0074\n/dev/v4l-subdev11 | { 0}}", shape=Mrecord, style=filled, fillcolor=green] + n00000059:port0 -> n00000004:port0 [style=bold] +} diff --git a/Documentation/media/v4l-drivers/saa7134-cardlist.rst b/Documentation/media/v4l-drivers/saa7134-cardlist.rst index a5efa8f4b8e4..6e4c35cbaabf 100644 --- a/Documentation/media/v4l-drivers/saa7134-cardlist.rst +++ b/Documentation/media/v4l-drivers/saa7134-cardlist.rst @@ -1,204 +1,801 @@ SAA7134 cards list ================== -=========== ======================================================= ================================================================ -Card number Card name PCI IDs -=========== ======================================================= ================================================================ -0 UNKNOWN/GENERIC -1 Proteus Pro [philips reference design] 1131:2001, 1131:2001 -2 LifeView FlyVIDEO3000 5168:0138, 4e42:0138 -3 LifeView/Typhoon FlyVIDEO2000 5168:0138, 4e42:0138 -4 EMPRESS 1131:6752 -5 SKNet Monster TV 1131:4e85 -6 Tevion MD 9717 -7 KNC One TV-Station RDS / Typhoon TV Tuner RDS 1131:fe01, 1894:fe01 -8 Terratec Cinergy 400 TV 153b:1142 -9 Medion 5044 -10 Kworld/KuroutoShikou SAA7130-TVPCI -11 Terratec Cinergy 600 TV 153b:1143 -12 Medion 7134 16be:0003, 16be:5000 -13 Typhoon TV+Radio 90031 -14 ELSA EX-VISION 300TV 1048:226b -15 ELSA EX-VISION 500TV 1048:226a -16 ASUS TV-FM 7134 1043:4842, 1043:4830, 1043:4840 -17 AOPEN VA1000 POWER 1131:7133 -18 BMK MPEX No Tuner -19 Compro VideoMate TV 185b:c100 -20 Matrox CronosPlus 102B:48d0 -21 10MOONS PCI TV CAPTURE CARD 1131:2001 -22 AverMedia M156 / Medion 2819 1461:a70b -23 BMK MPEX Tuner -24 KNC One TV-Station DVR 1894:a006 -25 ASUS TV-FM 7133 1043:4843 -26 Pinnacle PCTV Stereo (saa7134) 11bd:002b -27 Manli MuchTV M-TV002 -28 Manli MuchTV M-TV001 -29 Nagase Sangyo TransGear 3000TV 1461:050c -30 Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM) 1019:4cb4 -31 Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM) 1019:4cb5 -32 AVACS SmartTV -33 AVerMedia DVD EZMaker 1461:10ff -34 Noval Prime TV 7133 -35 AverMedia AverTV Studio 305 1461:2115 -36 UPMOST PURPLE TV 12ab:0800 -37 Items MuchTV Plus / IT-005 -38 Terratec Cinergy 200 TV 153b:1152 -39 LifeView FlyTV Platinum Mini 5168:0212, 4e42:0212, 5169:1502 -40 Compro VideoMate TV PVR/FM 185b:c100 -41 Compro VideoMate TV Gold+ 185b:c100 -42 Sabrent SBT-TVFM (saa7130) -43 :Zolid Xpert TV7134 -44 Empire PCI TV-Radio LE -45 Avermedia AVerTV Studio 307 1461:9715 -46 AVerMedia Cardbus TV/Radio (E500) 1461:d6ee -47 Terratec Cinergy 400 mobile 153b:1162 -48 Terratec Cinergy 600 TV MK3 153b:1158 -49 Compro VideoMate Gold+ Pal 185b:c200 -50 Pinnacle PCTV 300i DVB-T + PAL 11bd:002d -51 ProVideo PV952 1540:9524 -52 AverMedia AverTV/305 1461:2108 -53 ASUS TV-FM 7135 1043:4845 -54 LifeView FlyTV Platinum FM / Gold 5168:0214, 5168:5214, 1489:0214, 5168:0304 -55 LifeView FlyDVB-T DUO / MSI TV@nywhere Duo 5168:0306, 4E42:0306 -56 Avermedia AVerTV 307 1461:a70a -57 Avermedia AVerTV GO 007 FM 1461:f31f -58 ADS Tech Instant TV (saa7135) 1421:0350, 1421:0351, 1421:0370, 1421:1370 -59 Kworld/Tevion V-Stream Xpert TV PVR7134 -60 LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus 5168:0502, 4e42:0502, 1489:0502 -61 Philips TOUGH DVB-T reference design 1131:2004 -62 Compro VideoMate TV Gold+II -63 Kworld Xpert TV PVR7134 -64 FlyTV mini Asus Digimatrix 1043:0210 -65 V-Stream Studio TV Terminator -66 Yuan TUN-900 (saa7135) -67 Beholder BeholdTV 409 FM 0000:4091 -68 GoTView 7135 PCI 5456:7135 -69 Philips EUROPA V3 reference design 1131:2004 -70 Compro Videomate DVB-T300 185b:c900 -71 Compro Videomate DVB-T200 185b:c901 -72 RTD Embedded Technologies VFG7350 1435:7350 -73 RTD Embedded Technologies VFG7330 1435:7330 -74 LifeView FlyTV Platinum Mini2 14c0:1212 -75 AVerMedia AVerTVHD MCE A180 1461:1044 -76 SKNet MonsterTV Mobile 1131:4ee9 -77 Pinnacle PCTV 40i/50i/110i (saa7133) 11bd:002e -78 ASUSTeK P7131 Dual 1043:4862 -79 Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B) -80 ASUS Digimatrix TV 1043:0210 -81 Philips Tiger reference design 1131:2018 -82 MSI TV@Anywhere plus 1462:6231, 1462:8624 -83 Terratec Cinergy 250 PCI TV 153b:1160 -84 LifeView FlyDVB Trio 5168:0319 -85 AverTV DVB-T 777 1461:2c05, 1461:2c05 -86 LifeView FlyDVB-T / Genius VideoWonder DVB-T 5168:0301, 1489:0301 -87 ADS Instant TV Duo Cardbus PTV331 0331:1421 -88 Tevion/KWorld DVB-T 220RF 17de:7201 -89 ELSA EX-VISION 700TV 1048:226c -90 Kworld ATSC110/115 17de:7350, 17de:7352 -91 AVerMedia A169 B 1461:7360 -92 AVerMedia A169 B1 1461:6360 -93 Medion 7134 Bridge #2 16be:0005 -94 LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB 5168:3306, 5168:3502, 5168:3307, 4e42:3502 -95 LifeView FlyVIDEO3000 (NTSC) 5169:0138 -96 Medion Md8800 Quadro 16be:0007, 16be:0008, 16be:000d -97 LifeView FlyDVB-S /Acorp TV134DS 5168:0300, 4e42:0300 -98 Proteus Pro 2309 0919:2003 -99 AVerMedia TV Hybrid A16AR 1461:2c00 -100 Asus Europa2 OEM 1043:4860 -101 Pinnacle PCTV 310i 11bd:002f -102 Avermedia AVerTV Studio 507 1461:9715 -103 Compro Videomate DVB-T200A -104 Hauppauge WinTV-HVR1110 DVB-T/Hybrid 0070:6700, 0070:6701, 0070:6702, 0070:6703, 0070:6704, 0070:6705 -105 Terratec Cinergy HT PCMCIA 153b:1172 -106 Encore ENLTV 1131:2342, 1131:2341, 3016:2344 -107 Encore ENLTV-FM 1131:230f -108 Terratec Cinergy HT PCI 153b:1175 -109 Philips Tiger - S Reference design -110 Avermedia M102 1461:f31e -111 ASUS P7131 4871 1043:4871 -112 ASUSTeK P7131 Hybrid 1043:4876 -113 Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM) 1019:4cb6 -114 KWorld DVB-T 210 17de:7250 -115 Sabrent PCMCIA TV-PCB05 0919:2003 -116 10MOONS TM300 TV Card 1131:2304 -117 Avermedia Super 007 1461:f01d -118 Beholder BeholdTV 401 0000:4016 -119 Beholder BeholdTV 403 0000:4036 -120 Beholder BeholdTV 403 FM 0000:4037 -121 Beholder BeholdTV 405 0000:4050 -122 Beholder BeholdTV 405 FM 0000:4051 -123 Beholder BeholdTV 407 0000:4070 -124 Beholder BeholdTV 407 FM 0000:4071 -125 Beholder BeholdTV 409 0000:4090 -126 Beholder BeholdTV 505 FM 5ace:5050 -127 Beholder BeholdTV 507 FM / BeholdTV 509 FM 5ace:5070, 5ace:5090 -128 Beholder BeholdTV Columbus TV/FM 0000:5201 -129 Beholder BeholdTV 607 FM 5ace:6070 -130 Beholder BeholdTV M6 5ace:6190 -131 Twinhan Hybrid DTV-DVB 3056 PCI 1822:0022 -132 Genius TVGO AM11MCE -133 NXP Snake DVB-S reference design -134 Medion/Creatix CTX953 Hybrid 16be:0010 -135 MSI TV@nywhere A/D v1.1 1462:8625 -136 AVerMedia Cardbus TV/Radio (E506R) 1461:f436 -137 AVerMedia Hybrid TV/Radio (A16D) 1461:f936 -138 Avermedia M115 1461:a836 -139 Compro VideoMate T750 185b:c900 -140 Avermedia DVB-S Pro A700 1461:a7a1 -141 Avermedia DVB-S Hybrid+FM A700 1461:a7a2 -142 Beholder BeholdTV H6 5ace:6290 -143 Beholder BeholdTV M63 5ace:6191 -144 Beholder BeholdTV M6 Extra 5ace:6193 -145 AVerMedia MiniPCI DVB-T Hybrid M103 1461:f636, 1461:f736 -146 ASUSTeK P7131 Analog -147 Asus Tiger 3in1 1043:4878 -148 Encore ENLTV-FM v5.3 1a7f:2008 -149 Avermedia PCI pure analog (M135A) 1461:f11d -150 Zogis Real Angel 220 -151 ADS Tech Instant HDTV 1421:0380 -152 Asus Tiger Rev:1.00 1043:4857 -153 Kworld Plus TV Analog Lite PCI 17de:7128 -154 Avermedia AVerTV GO 007 FM Plus 1461:f31d -155 Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid 0070:6706, 0070:6708 -156 Hauppauge WinTV-HVR1120 DVB-T/Hybrid 0070:6707, 0070:6709, 0070:670a -157 Avermedia AVerTV Studio 507UA 1461:a11b -158 AVerMedia Cardbus TV/Radio (E501R) 1461:b7e9 -159 Beholder BeholdTV 505 RDS 0000:505B -160 Beholder BeholdTV 507 RDS 0000:5071 -161 Beholder BeholdTV 507 RDS 0000:507B -162 Beholder BeholdTV 607 FM 5ace:6071 -163 Beholder BeholdTV 609 FM 5ace:6090 -164 Beholder BeholdTV 609 FM 5ace:6091 -165 Beholder BeholdTV 607 RDS 5ace:6072 -166 Beholder BeholdTV 607 RDS 5ace:6073 -167 Beholder BeholdTV 609 RDS 5ace:6092 -168 Beholder BeholdTV 609 RDS 5ace:6093 -169 Compro VideoMate S350/S300 185b:c900 -170 AverMedia AverTV Studio 505 1461:a115 -171 Beholder BeholdTV X7 5ace:7595 -172 RoverMedia TV Link Pro FM 19d1:0138 -173 Zolid Hybrid TV Tuner PCI 1131:2004 -174 Asus Europa Hybrid OEM 1043:4847 -175 Leadtek Winfast DTV1000S 107d:6655 -176 Beholder BeholdTV 505 RDS 0000:5051 -177 Hawell HW-404M7 -178 Beholder BeholdTV H7 5ace:7190 -179 Beholder BeholdTV A7 5ace:7090 -180 Avermedia PCI M733A 1461:4155, 1461:4255 -181 TechoTrend TT-budget T-3000 13c2:2804 -182 Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid 17de:b136 -183 Compro VideoMate Vista M1F 185b:c900 -184 Encore ENLTV-FM 3 1a7f:2108 -185 MagicPro ProHDTV Pro2 DMB-TH/Hybrid 17de:d136 -186 Beholder BeholdTV 501 5ace:5010 -187 Beholder BeholdTV 503 FM 5ace:5030 -188 Sensoray 811/911 6000:0811, 6000:0911 -189 Kworld PC150-U 17de:a134 -190 Asus My Cinema PS3-100 1043:48cd -191 Hawell HW-9004V1 -192 AverMedia AverTV Satellite Hybrid+FM A706 1461:2055 -193 WIS Voyager or compatible 1905:7007 -194 AverMedia AverTV/505 1461:a10a -195 Leadtek Winfast TV2100 FM 107d:6f3a -196 SnaZio* TVPVR PRO 1779:13cf -=========== ======================================================= ================================================================ +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - UNKNOWN/GENERIC + - + + * - 1 + - Proteus Pro [philips reference design] + - 1131:2001, 1131:2001 + + * - 2 + - LifeView FlyVIDEO3000 + - 5168:0138, 4e42:0138 + + * - 3 + - LifeView/Typhoon FlyVIDEO2000 + - 5168:0138, 4e42:0138 + + * - 4 + - EMPRESS + - 1131:6752 + + * - 5 + - SKNet Monster TV + - 1131:4e85 + + * - 6 + - Tevion MD 9717 + - + + * - 7 + - KNC One TV-Station RDS / Typhoon TV Tuner RDS + - 1131:fe01, 1894:fe01 + + * - 8 + - Terratec Cinergy 400 TV + - 153b:1142 + + * - 9 + - Medion 5044 + - + + * - 10 + - Kworld/KuroutoShikou SAA7130-TVPCI + - + + * - 11 + - Terratec Cinergy 600 TV + - 153b:1143 + + * - 12 + - Medion 7134 + - 16be:0003, 16be:5000 + + * - 13 + - Typhoon TV+Radio 90031 + - + + * - 14 + - ELSA EX-VISION 300TV + - 1048:226b + + * - 15 + - ELSA EX-VISION 500TV + - 1048:226a + + * - 16 + - ASUS TV-FM 7134 + - 1043:4842, 1043:4830, 1043:4840 + + * - 17 + - AOPEN VA1000 POWER + - 1131:7133 + + * - 18 + - BMK MPEX No Tuner + - + + * - 19 + - Compro VideoMate TV + - 185b:c100 + + * - 20 + - Matrox CronosPlus + - 102B:48d0 + + * - 21 + - 10MOONS PCI TV CAPTURE CARD + - 1131:2001 + + * - 22 + - AverMedia M156 / Medion 2819 + - 1461:a70b + + * - 23 + - BMK MPEX Tuner + - + + * - 24 + - KNC One TV-Station DVR + - 1894:a006 + + * - 25 + - ASUS TV-FM 7133 + - 1043:4843 + + * - 26 + - Pinnacle PCTV Stereo (saa7134) + - 11bd:002b + + * - 27 + - Manli MuchTV M-TV002 + - + + * - 28 + - Manli MuchTV M-TV001 + - + + * - 29 + - Nagase Sangyo TransGear 3000TV + - 1461:050c + + * - 30 + - Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM) + - 1019:4cb4 + + * - 31 + - Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM) + - 1019:4cb5 + + * - 32 + - AVACS SmartTV + - + + * - 33 + - AVerMedia DVD EZMaker + - 1461:10ff + + * - 34 + - Noval Prime TV 7133 + - + + * - 35 + - AverMedia AverTV Studio 305 + - 1461:2115 + + * - 36 + - UPMOST PURPLE TV + - 12ab:0800 + + * - 37 + - Items MuchTV Plus / IT-005 + - + + * - 38 + - Terratec Cinergy 200 TV + - 153b:1152 + + * - 39 + - LifeView FlyTV Platinum Mini + - 5168:0212, 4e42:0212, 5169:1502 + + * - 40 + - Compro VideoMate TV PVR/FM + - 185b:c100 + + * - 41 + - Compro VideoMate TV Gold+ + - 185b:c100 + + * - 42 + - Sabrent SBT-TVFM (saa7130) + - + + * - 43 + - :Zolid Xpert TV7134 + - + + * - 44 + - Empire PCI TV-Radio LE + - + + * - 45 + - Avermedia AVerTV Studio 307 + - 1461:9715 + + * - 46 + - AVerMedia Cardbus TV/Radio (E500) + - 1461:d6ee + + * - 47 + - Terratec Cinergy 400 mobile + - 153b:1162 + + * - 48 + - Terratec Cinergy 600 TV MK3 + - 153b:1158 + + * - 49 + - Compro VideoMate Gold+ Pal + - 185b:c200 + + * - 50 + - Pinnacle PCTV 300i DVB-T + PAL + - 11bd:002d + + * - 51 + - ProVideo PV952 + - 1540:9524 + + * - 52 + - AverMedia AverTV/305 + - 1461:2108 + + * - 53 + - ASUS TV-FM 7135 + - 1043:4845 + + * - 54 + - LifeView FlyTV Platinum FM / Gold + - 5168:0214, 5168:5214, 1489:0214, 5168:0304 + + * - 55 + - LifeView FlyDVB-T DUO / MSI TV@nywhere Duo + - 5168:0306, 4E42:0306 + + * - 56 + - Avermedia AVerTV 307 + - 1461:a70a + + * - 57 + - Avermedia AVerTV GO 007 FM + - 1461:f31f + + * - 58 + - ADS Tech Instant TV (saa7135) + - 1421:0350, 1421:0351, 1421:0370, 1421:1370 + + * - 59 + - Kworld/Tevion V-Stream Xpert TV PVR7134 + - + + * - 60 + - LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus + - 5168:0502, 4e42:0502, 1489:0502 + + * - 61 + - Philips TOUGH DVB-T reference design + - 1131:2004 + + * - 62 + - Compro VideoMate TV Gold+II + - + + * - 63 + - Kworld Xpert TV PVR7134 + - + + * - 64 + - FlyTV mini Asus Digimatrix + - 1043:0210 + + * - 65 + - V-Stream Studio TV Terminator + - + + * - 66 + - Yuan TUN-900 (saa7135) + - + + * - 67 + - Beholder BeholdTV 409 FM + - 0000:4091 + + * - 68 + - GoTView 7135 PCI + - 5456:7135 + + * - 69 + - Philips EUROPA V3 reference design + - 1131:2004 + + * - 70 + - Compro Videomate DVB-T300 + - 185b:c900 + + * - 71 + - Compro Videomate DVB-T200 + - 185b:c901 + + * - 72 + - RTD Embedded Technologies VFG7350 + - 1435:7350 + + * - 73 + - RTD Embedded Technologies VFG7330 + - 1435:7330 + + * - 74 + - LifeView FlyTV Platinum Mini2 + - 14c0:1212 + + * - 75 + - AVerMedia AVerTVHD MCE A180 + - 1461:1044 + + * - 76 + - SKNet MonsterTV Mobile + - 1131:4ee9 + + * - 77 + - Pinnacle PCTV 40i/50i/110i (saa7133) + - 11bd:002e + + * - 78 + - ASUSTeK P7131 Dual + - 1043:4862 + + * - 79 + - Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B) + - + + * - 80 + - ASUS Digimatrix TV + - 1043:0210 + + * - 81 + - Philips Tiger reference design + - 1131:2018 + + * - 82 + - MSI TV@Anywhere plus + - 1462:6231, 1462:8624 + + * - 83 + - Terratec Cinergy 250 PCI TV + - 153b:1160 + + * - 84 + - LifeView FlyDVB Trio + - 5168:0319 + + * - 85 + - AverTV DVB-T 777 + - 1461:2c05, 1461:2c05 + + * - 86 + - LifeView FlyDVB-T / Genius VideoWonder DVB-T + - 5168:0301, 1489:0301 + + * - 87 + - ADS Instant TV Duo Cardbus PTV331 + - 0331:1421 + + * - 88 + - Tevion/KWorld DVB-T 220RF + - 17de:7201 + + * - 89 + - ELSA EX-VISION 700TV + - 1048:226c + + * - 90 + - Kworld ATSC110/115 + - 17de:7350, 17de:7352 + + * - 91 + - AVerMedia A169 B + - 1461:7360 + + * - 92 + - AVerMedia A169 B1 + - 1461:6360 + + * - 93 + - Medion 7134 Bridge #2 + - 16be:0005 + + * - 94 + - LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB + - 5168:3306, 5168:3502, 5168:3307, 4e42:3502 + + * - 95 + - LifeView FlyVIDEO3000 (NTSC) + - 5169:0138 + + * - 96 + - Medion Md8800 Quadro + - 16be:0007, 16be:0008, 16be:000d + + * - 97 + - LifeView FlyDVB-S /Acorp TV134DS + - 5168:0300, 4e42:0300 + + * - 98 + - Proteus Pro 2309 + - 0919:2003 + + * - 99 + - AVerMedia TV Hybrid A16AR + - 1461:2c00 + + * - 100 + - Asus Europa2 OEM + - 1043:4860 + + * - 101 + - Pinnacle PCTV 310i + - 11bd:002f + + * - 102 + - Avermedia AVerTV Studio 507 + - 1461:9715 + + * - 103 + - Compro Videomate DVB-T200A + - + + * - 104 + - Hauppauge WinTV-HVR1110 DVB-T/Hybrid + - 0070:6700, 0070:6701, 0070:6702, 0070:6703, 0070:6704, 0070:6705 + + * - 105 + - Terratec Cinergy HT PCMCIA + - 153b:1172 + + * - 106 + - Encore ENLTV + - 1131:2342, 1131:2341, 3016:2344 + + * - 107 + - Encore ENLTV-FM + - 1131:230f + + * - 108 + - Terratec Cinergy HT PCI + - 153b:1175 + + * - 109 + - Philips Tiger - S Reference design + - + + * - 110 + - Avermedia M102 + - 1461:f31e + + * - 111 + - ASUS P7131 4871 + - 1043:4871 + + * - 112 + - ASUSTeK P7131 Hybrid + - 1043:4876 + + * - 113 + - Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM) + - 1019:4cb6 + + * - 114 + - KWorld DVB-T 210 + - 17de:7250 + + * - 115 + - Sabrent PCMCIA TV-PCB05 + - 0919:2003 + + * - 116 + - 10MOONS TM300 TV Card + - 1131:2304 + + * - 117 + - Avermedia Super 007 + - 1461:f01d + + * - 118 + - Beholder BeholdTV 401 + - 0000:4016 + + * - 119 + - Beholder BeholdTV 403 + - 0000:4036 + + * - 120 + - Beholder BeholdTV 403 FM + - 0000:4037 + + * - 121 + - Beholder BeholdTV 405 + - 0000:4050 + + * - 122 + - Beholder BeholdTV 405 FM + - 0000:4051 + + * - 123 + - Beholder BeholdTV 407 + - 0000:4070 + + * - 124 + - Beholder BeholdTV 407 FM + - 0000:4071 + + * - 125 + - Beholder BeholdTV 409 + - 0000:4090 + + * - 126 + - Beholder BeholdTV 505 FM + - 5ace:5050 + + * - 127 + - Beholder BeholdTV 507 FM / BeholdTV 509 FM + - 5ace:5070, 5ace:5090 + + * - 128 + - Beholder BeholdTV Columbus TV/FM + - 0000:5201 + + * - 129 + - Beholder BeholdTV 607 FM + - 5ace:6070 + + * - 130 + - Beholder BeholdTV M6 + - 5ace:6190 + + * - 131 + - Twinhan Hybrid DTV-DVB 3056 PCI + - 1822:0022 + + * - 132 + - Genius TVGO AM11MCE + - + + * - 133 + - NXP Snake DVB-S reference design + - + + * - 134 + - Medion/Creatix CTX953 Hybrid + - 16be:0010 + + * - 135 + - MSI TV@nywhere A/D v1.1 + - 1462:8625 + + * - 136 + - AVerMedia Cardbus TV/Radio (E506R) + - 1461:f436 + + * - 137 + - AVerMedia Hybrid TV/Radio (A16D) + - 1461:f936 + + * - 138 + - Avermedia M115 + - 1461:a836 + + * - 139 + - Compro VideoMate T750 + - 185b:c900 + + * - 140 + - Avermedia DVB-S Pro A700 + - 1461:a7a1 + + * - 141 + - Avermedia DVB-S Hybrid+FM A700 + - 1461:a7a2 + + * - 142 + - Beholder BeholdTV H6 + - 5ace:6290 + + * - 143 + - Beholder BeholdTV M63 + - 5ace:6191 + + * - 144 + - Beholder BeholdTV M6 Extra + - 5ace:6193 + + * - 145 + - AVerMedia MiniPCI DVB-T Hybrid M103 + - 1461:f636, 1461:f736 + + * - 146 + - ASUSTeK P7131 Analog + - + + * - 147 + - Asus Tiger 3in1 + - 1043:4878 + + * - 148 + - Encore ENLTV-FM v5.3 + - 1a7f:2008 + + * - 149 + - Avermedia PCI pure analog (M135A) + - 1461:f11d + + * - 150 + - Zogis Real Angel 220 + - + + * - 151 + - ADS Tech Instant HDTV + - 1421:0380 + + * - 152 + - Asus Tiger Rev:1.00 + - 1043:4857 + + * - 153 + - Kworld Plus TV Analog Lite PCI + - 17de:7128 + + * - 154 + - Avermedia AVerTV GO 007 FM Plus + - 1461:f31d + + * - 155 + - Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid + - 0070:6706, 0070:6708 + + * - 156 + - Hauppauge WinTV-HVR1120 DVB-T/Hybrid + - 0070:6707, 0070:6709, 0070:670a + + * - 157 + - Avermedia AVerTV Studio 507UA + - 1461:a11b + + * - 158 + - AVerMedia Cardbus TV/Radio (E501R) + - 1461:b7e9 + + * - 159 + - Beholder BeholdTV 505 RDS + - 0000:505B + + * - 160 + - Beholder BeholdTV 507 RDS + - 0000:5071 + + * - 161 + - Beholder BeholdTV 507 RDS + - 0000:507B + + * - 162 + - Beholder BeholdTV 607 FM + - 5ace:6071 + + * - 163 + - Beholder BeholdTV 609 FM + - 5ace:6090 + + * - 164 + - Beholder BeholdTV 609 FM + - 5ace:6091 + + * - 165 + - Beholder BeholdTV 607 RDS + - 5ace:6072 + + * - 166 + - Beholder BeholdTV 607 RDS + - 5ace:6073 + + * - 167 + - Beholder BeholdTV 609 RDS + - 5ace:6092 + + * - 168 + - Beholder BeholdTV 609 RDS + - 5ace:6093 + + * - 169 + - Compro VideoMate S350/S300 + - 185b:c900 + + * - 170 + - AverMedia AverTV Studio 505 + - 1461:a115 + + * - 171 + - Beholder BeholdTV X7 + - 5ace:7595 + + * - 172 + - RoverMedia TV Link Pro FM + - 19d1:0138 + + * - 173 + - Zolid Hybrid TV Tuner PCI + - 1131:2004 + + * - 174 + - Asus Europa Hybrid OEM + - 1043:4847 + + * - 175 + - Leadtek Winfast DTV1000S + - 107d:6655 + + * - 176 + - Beholder BeholdTV 505 RDS + - 0000:5051 + + * - 177 + - Hawell HW-404M7 + - + + * - 178 + - Beholder BeholdTV H7 + - 5ace:7190 + + * - 179 + - Beholder BeholdTV A7 + - 5ace:7090 + + * - 180 + - Avermedia PCI M733A + - 1461:4155, 1461:4255 + + * - 181 + - TechoTrend TT-budget T-3000 + - 13c2:2804 + + * - 182 + - Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid + - 17de:b136 + + * - 183 + - Compro VideoMate Vista M1F + - 185b:c900 + + * - 184 + - Encore ENLTV-FM 3 + - 1a7f:2108 + + * - 185 + - MagicPro ProHDTV Pro2 DMB-TH/Hybrid + - 17de:d136 + + * - 186 + - Beholder BeholdTV 501 + - 5ace:5010 + + * - 187 + - Beholder BeholdTV 503 FM + - 5ace:5030 + + * - 188 + - Sensoray 811/911 + - 6000:0811, 6000:0911 + + * - 189 + - Kworld PC150-U + - 17de:a134 + + * - 190 + - Asus My Cinema PS3-100 + - 1043:48cd + + * - 191 + - Hawell HW-9004V1 + - + + * - 192 + - AverMedia AverTV Satellite Hybrid+FM A706 + - 1461:2055 + + * - 193 + - WIS Voyager or compatible + - 1905:7007 + + * - 194 + - AverMedia AverTV/505 + - 1461:a10a + + * - 195 + - Leadtek Winfast TV2100 FM + - 107d:6f3a + + * - 196 + - SnaZio* TVPVR PRO + - 1779:13cf diff --git a/Documentation/media/v4l-drivers/saa7164-cardlist.rst b/Documentation/media/v4l-drivers/saa7164-cardlist.rst index 7d17d38df3bc..e28382ba82e6 100644 --- a/Documentation/media/v4l-drivers/saa7164-cardlist.rst +++ b/Documentation/media/v4l-drivers/saa7164-cardlist.rst @@ -1,21 +1,69 @@ SAA7164 cards list ================== -=========== ==================================== ==================== -Card number Card name PCI IDs -=========== ==================================== ==================== -0 Unknown -1 Generic Rev2 -2 Generic Rev3 -3 Hauppauge WinTV-HVR2250 0070:8880, 0070:8810 -4 Hauppauge WinTV-HVR2200 0070:8980 -5 Hauppauge WinTV-HVR2200 0070:8900 -6 Hauppauge WinTV-HVR2200 0070:8901 -7 Hauppauge WinTV-HVR2250 0070:8891, 0070:8851 -8 Hauppauge WinTV-HVR2250 0070:88A1 -9 Hauppauge WinTV-HVR2200 0070:8940 -10 Hauppauge WinTV-HVR2200 0070:8953 -11 Hauppauge WinTV-HVR2255(proto) 0070:f111 -12 Hauppauge WinTV-HVR2255 0070:f111 -13 Hauppauge WinTV-HVR2205 0070:f123, 0070:f120 -=========== ==================================== ==================== +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - PCI IDs + + * - 0 + - Unknown + - + + * - 1 + - Generic Rev2 + - + + * - 2 + - Generic Rev3 + - + + * - 3 + - Hauppauge WinTV-HVR2250 + - 0070:8880, 0070:8810 + + * - 4 + - Hauppauge WinTV-HVR2200 + - 0070:8980 + + * - 5 + - Hauppauge WinTV-HVR2200 + - 0070:8900 + + * - 6 + - Hauppauge WinTV-HVR2200 + - 0070:8901 + + * - 7 + - Hauppauge WinTV-HVR2250 + - 0070:8891, 0070:8851 + + * - 8 + - Hauppauge WinTV-HVR2250 + - 0070:88A1 + + * - 9 + - Hauppauge WinTV-HVR2200 + - 0070:8940 + + * - 10 + - Hauppauge WinTV-HVR2200 + - 0070:8953 + + * - 11 + - Hauppauge WinTV-HVR2255(proto) + - 0070:f111 + + * - 12 + - Hauppauge WinTV-HVR2255 + - 0070:f111 + + * - 13 + - Hauppauge WinTV-HVR2205 + - 0070:f123, 0070:f120 diff --git a/Documentation/media/v4l-drivers/tm6000-cardlist.rst b/Documentation/media/v4l-drivers/tm6000-cardlist.rst index ae2952683ccf..6bd083544457 100644 --- a/Documentation/media/v4l-drivers/tm6000-cardlist.rst +++ b/Documentation/media/v4l-drivers/tm6000-cardlist.rst @@ -1,24 +1,81 @@ TM6000 cards list ================= -=========== ================================================= ========================================== -Card number Card name USB IDs -=========== ================================================= ========================================== -0 Unknown tm6000 video grabber -1 Generic tm5600 board 6000:0001 -2 Generic tm6000 board -3 Generic tm6010 board 6000:0002 -4 10Moons UT 821 -5 10Moons UT 330 -6 ADSTECH Dual TV USB 06e1:f332 -7 Freecom Hybrid Stick / Moka DVB-T Receiver Dual 14aa:0620 -8 ADSTECH Mini Dual TV USB 06e1:b339 -9 Hauppauge WinTV HVR-900H / WinTV USB2-Stick 2040:6600, 2040:6601, 2040:6610, 2040:6611 -10 Beholder Wander DVB-T/TV/FM USB2.0 6000:dec0 -11 Beholder Voyager TV/FM USB2.0 6000:dec1 -12 Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick 0ccd:0086, 0ccd:00A5 -13 Twinhan TU501(704D1) 13d3:3240, 13d3:3241, 13d3:3243, 13d3:3264 -14 Beholder Wander Lite DVB-T/TV/FM USB2.0 6000:dec2 -15 Beholder Voyager Lite TV/FM USB2.0 6000:dec3 -16 Terratec Grabster AV 150/250 MX 0ccd:0079 -=========== ================================================= ========================================== +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - USB IDs + + * - 0 + - Unknown tm6000 video grabber + - + + * - 1 + - Generic tm5600 board + - 6000:0001 + + * - 2 + - Generic tm6000 board + - + + * - 3 + - Generic tm6010 board + - 6000:0002 + + * - 4 + - 10Moons UT 821 + - + + * - 5 + - 10Moons UT 330 + - + + * - 6 + - ADSTECH Dual TV USB + - 06e1:f332 + + * - 7 + - Freecom Hybrid Stick / Moka DVB-T Receiver Dual + - 14aa:0620 + + * - 8 + - ADSTECH Mini Dual TV USB + - 06e1:b339 + + * - 9 + - Hauppauge WinTV HVR-900H / WinTV USB2-Stick + - 2040:6600, 2040:6601, 2040:6610, 2040:6611 + + * - 10 + - Beholder Wander DVB-T/TV/FM USB2.0 + - 6000:dec0 + + * - 11 + - Beholder Voyager TV/FM USB2.0 + - 6000:dec1 + + * - 12 + - Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick + - 0ccd:0086, 0ccd:00A5 + + * - 13 + - Twinhan TU501(704D1) + - 13d3:3240, 13d3:3241, 13d3:3243, 13d3:3264 + + * - 14 + - Beholder Wander Lite DVB-T/TV/FM USB2.0 + - 6000:dec2 + + * - 15 + - Beholder Voyager Lite TV/FM USB2.0 + - 6000:dec3 + + * - 16 + - Terratec Grabster AV 150/250 MX + - 0ccd:0079 diff --git a/Documentation/media/v4l-drivers/usbvision-cardlist.rst b/Documentation/media/v4l-drivers/usbvision-cardlist.rst index 44d53dff0984..5a8ffbfc204e 100644 --- a/Documentation/media/v4l-drivers/usbvision-cardlist.rst +++ b/Documentation/media/v4l-drivers/usbvision-cardlist.rst @@ -1,74 +1,281 @@ USBvision cards list ==================== -=========== ======================================================== ========= -Card number Card name USB IDs -=========== ======================================================== ========= -0 Xanboo 0a6f:0400 -1 Belkin USB VideoBus II Adapter 050d:0106 -2 Belkin Components USB VideoBus 050d:0207 -3 Belkin USB VideoBus II 050d:0208 -4 echoFX InterView Lite 0571:0002 -5 USBGear USBG-V1 resp. HAMA USB 0573:0003 -6 D-Link V100 0573:0400 -7 X10 USB Camera 0573:2000 -8 Hauppauge WinTV USB Live (PAL B/G) 0573:2d00 -9 Hauppauge WinTV USB Live Pro (NTSC M/N) 0573:2d01 -10 Zoran Co. PMD (Nogatech) AV-grabber Manhattan 0573:2101 -11 Nogatech USB-TV (NTSC) FM 0573:4100 -12 PNY USB-TV (NTSC) FM 0573:4110 -13 PixelView PlayTv-USB PRO (PAL) FM 0573:4450 -14 ZTV ZT-721 2.4GHz USB A/V Receiver 0573:4550 -15 Hauppauge WinTV USB (NTSC M/N) 0573:4d00 -16 Hauppauge WinTV USB (PAL B/G) 0573:4d01 -17 Hauppauge WinTV USB (PAL I) 0573:4d02 -18 Hauppauge WinTV USB (PAL/SECAM L) 0573:4d03 -19 Hauppauge WinTV USB (PAL D/K) 0573:4d04 -20 Hauppauge WinTV USB (NTSC FM) 0573:4d10 -21 Hauppauge WinTV USB (PAL B/G FM) 0573:4d11 -22 Hauppauge WinTV USB (PAL I FM) 0573:4d12 -23 Hauppauge WinTV USB (PAL D/K FM) 0573:4d14 -24 Hauppauge WinTV USB Pro (NTSC M/N) 0573:4d2a -25 Hauppauge WinTV USB Pro (NTSC M/N) V2 0573:4d2b -26 Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L) 0573:4d2c -27 Hauppauge WinTV USB Pro (NTSC M/N) V3 0573:4d20 -28 Hauppauge WinTV USB Pro (PAL B/G) 0573:4d21 -29 Hauppauge WinTV USB Pro (PAL I) 0573:4d22 -30 Hauppauge WinTV USB Pro (PAL/SECAM L) 0573:4d23 -31 Hauppauge WinTV USB Pro (PAL D/K) 0573:4d24 -32 Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) 0573:4d25 -33 Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2 0573:4d26 -34 Hauppauge WinTV USB Pro (PAL B/G) V2 0573:4d27 -35 Hauppauge WinTV USB Pro (PAL B/G,D/K) 0573:4d28 -36 Hauppauge WinTV USB Pro (PAL I,D/K) 0573:4d29 -37 Hauppauge WinTV USB Pro (NTSC M/N FM) 0573:4d30 -38 Hauppauge WinTV USB Pro (PAL B/G FM) 0573:4d31 -39 Hauppauge WinTV USB Pro (PAL I FM) 0573:4d32 -40 Hauppauge WinTV USB Pro (PAL D/K FM) 0573:4d34 -41 Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM) 0573:4d35 -42 Hauppauge WinTV USB Pro (Temic PAL B/G FM) 0573:4d36 -43 Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM) 0573:4d37 -44 Hauppauge WinTV USB Pro (NTSC M/N FM) V2 0573:4d38 -45 Camtel Technology USB TV Genie Pro FM Model TVB330 0768:0006 -46 Digital Video Creator I 07d0:0001 -47 Global Village GV-007 (NTSC) 07d0:0002 -48 Dazzle Fusion Model DVC-50 Rev 1 (NTSC) 07d0:0003 -49 Dazzle Fusion Model DVC-80 Rev 1 (PAL) 07d0:0004 -50 Dazzle Fusion Model DVC-90 Rev 1 (SECAM) 07d0:0005 -51 Eskape Labs MyTV2Go 07f8:9104 -52 Pinnacle Studio PCTV USB (PAL) 2304:010d -53 Pinnacle Studio PCTV USB (SECAM) 2304:0109 -54 Pinnacle Studio PCTV USB (PAL) FM 2304:0110 -55 Miro PCTV USB 2304:0111 -56 Pinnacle Studio PCTV USB (NTSC) FM 2304:0112 -57 Pinnacle Studio PCTV USB (PAL) FM V2 2304:0210 -58 Pinnacle Studio PCTV USB (NTSC) FM V2 2304:0212 -59 Pinnacle Studio PCTV USB (PAL) FM V3 2304:0214 -60 Pinnacle Studio Linx Video input cable (NTSC) 2304:0300 -61 Pinnacle Studio Linx Video input cable (PAL) 2304:0301 -62 Pinnacle PCTV Bungee USB (PAL) FM 2304:0419 -63 Hauppauge WinTv-USB 2400:4200 -64 Pinnacle Studio PCTV USB (NTSC) FM V3 2304:0113 -65 Nogatech USB MicroCam NTSC (NV3000N) 0573:3000 -66 Nogatech USB MicroCam PAL (NV3001P) 0573:3001 -=========== ======================================================== ========= +.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}| + +.. flat-table:: + :header-rows: 1 + :widths: 2 19 18 + :stub-columns: 0 + + * - Card number + - Card name + - USB IDs + + * - 0 + - Xanboo + - 0a6f:0400 + + * - 1 + - Belkin USB VideoBus II Adapter + - 050d:0106 + + * - 2 + - Belkin Components USB VideoBus + - 050d:0207 + + * - 3 + - Belkin USB VideoBus II + - 050d:0208 + + * - 4 + - echoFX InterView Lite + - 0571:0002 + + * - 5 + - USBGear USBG-V1 resp. HAMA USB + - 0573:0003 + + * - 6 + - D-Link V100 + - 0573:0400 + + * - 7 + - X10 USB Camera + - 0573:2000 + + * - 8 + - Hauppauge WinTV USB Live (PAL B/G) + - 0573:2d00 + + * - 9 + - Hauppauge WinTV USB Live Pro (NTSC M/N) + - 0573:2d01 + + * - 10 + - Zoran Co. PMD (Nogatech) AV-grabber Manhattan + - 0573:2101 + + * - 11 + - Nogatech USB-TV (NTSC) FM + - 0573:4100 + + * - 12 + - PNY USB-TV (NTSC) FM + - 0573:4110 + + * - 13 + - PixelView PlayTv-USB PRO (PAL) FM + - 0573:4450 + + * - 14 + - ZTV ZT-721 2.4GHz USB A/V Receiver + - 0573:4550 + + * - 15 + - Hauppauge WinTV USB (NTSC M/N) + - 0573:4d00 + + * - 16 + - Hauppauge WinTV USB (PAL B/G) + - 0573:4d01 + + * - 17 + - Hauppauge WinTV USB (PAL I) + - 0573:4d02 + + * - 18 + - Hauppauge WinTV USB (PAL/SECAM L) + - 0573:4d03 + + * - 19 + - Hauppauge WinTV USB (PAL D/K) + - 0573:4d04 + + * - 20 + - Hauppauge WinTV USB (NTSC FM) + - 0573:4d10 + + * - 21 + - Hauppauge WinTV USB (PAL B/G FM) + - 0573:4d11 + + * - 22 + - Hauppauge WinTV USB (PAL I FM) + - 0573:4d12 + + * - 23 + - Hauppauge WinTV USB (PAL D/K FM) + - 0573:4d14 + + * - 24 + - Hauppauge WinTV USB Pro (NTSC M/N) + - 0573:4d2a + + * - 25 + - Hauppauge WinTV USB Pro (NTSC M/N) V2 + - 0573:4d2b + + * - 26 + - Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L) + - 0573:4d2c + + * - 27 + - Hauppauge WinTV USB Pro (NTSC M/N) V3 + - 0573:4d20 + + * - 28 + - Hauppauge WinTV USB Pro (PAL B/G) + - 0573:4d21 + + * - 29 + - Hauppauge WinTV USB Pro (PAL I) + - 0573:4d22 + + * - 30 + - Hauppauge WinTV USB Pro (PAL/SECAM L) + - 0573:4d23 + + * - 31 + - Hauppauge WinTV USB Pro (PAL D/K) + - 0573:4d24 + + * - 32 + - Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) + - 0573:4d25 + + * - 33 + - Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2 + - 0573:4d26 + + * - 34 + - Hauppauge WinTV USB Pro (PAL B/G) V2 + - 0573:4d27 + + * - 35 + - Hauppauge WinTV USB Pro (PAL B/G,D/K) + - 0573:4d28 + + * - 36 + - Hauppauge WinTV USB Pro (PAL I,D/K) + - 0573:4d29 + + * - 37 + - Hauppauge WinTV USB Pro (NTSC M/N FM) + - 0573:4d30 + + * - 38 + - Hauppauge WinTV USB Pro (PAL B/G FM) + - 0573:4d31 + + * - 39 + - Hauppauge WinTV USB Pro (PAL I FM) + - 0573:4d32 + + * - 40 + - Hauppauge WinTV USB Pro (PAL D/K FM) + - 0573:4d34 + + * - 41 + - Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM) + - 0573:4d35 + + * - 42 + - Hauppauge WinTV USB Pro (Temic PAL B/G FM) + - 0573:4d36 + + * - 43 + - Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM) + - 0573:4d37 + + * - 44 + - Hauppauge WinTV USB Pro (NTSC M/N FM) V2 + - 0573:4d38 + + * - 45 + - Camtel Technology USB TV Genie Pro FM Model TVB330 + - 0768:0006 + + * - 46 + - Digital Video Creator I + - 07d0:0001 + + * - 47 + - Global Village GV-007 (NTSC) + - 07d0:0002 + + * - 48 + - Dazzle Fusion Model DVC-50 Rev 1 (NTSC) + - 07d0:0003 + + * - 49 + - Dazzle Fusion Model DVC-80 Rev 1 (PAL) + - 07d0:0004 + + * - 50 + - Dazzle Fusion Model DVC-90 Rev 1 (SECAM) + - 07d0:0005 + + * - 51 + - Eskape Labs MyTV2Go + - 07f8:9104 + + * - 52 + - Pinnacle Studio PCTV USB (PAL) + - 2304:010d + + * - 53 + - Pinnacle Studio PCTV USB (SECAM) + - 2304:0109 + + * - 54 + - Pinnacle Studio PCTV USB (PAL) FM + - 2304:0110 + + * - 55 + - Miro PCTV USB + - 2304:0111 + + * - 56 + - Pinnacle Studio PCTV USB (NTSC) FM + - 2304:0112 + + * - 57 + - Pinnacle Studio PCTV USB (PAL) FM V2 + - 2304:0210 + + * - 58 + - Pinnacle Studio PCTV USB (NTSC) FM V2 + - 2304:0212 + + * - 59 + - Pinnacle Studio PCTV USB (PAL) FM V3 + - 2304:0214 + + * - 60 + - Pinnacle Studio Linx Video input cable (NTSC) + - 2304:0300 + + * - 61 + - Pinnacle Studio Linx Video input cable (PAL) + - 2304:0301 + + * - 62 + - Pinnacle PCTV Bungee USB (PAL) FM + - 2304:0419 + + * - 63 + - Hauppauge WinTv-USB + - 2400:4200 + + * - 64 + - Pinnacle Studio PCTV USB (NTSC) FM V3 + - 2304:0113 + + * - 65 + - Nogatech USB MicroCam NTSC (NV3000N) + - 0573:3000 + + * - 66 + - Nogatech USB MicroCam PAL (NV3001P) + - 0573:3001 diff --git a/Documentation/media/v4l-drivers/vivid.rst b/Documentation/media/v4l-drivers/vivid.rst index 3e44b2217f2d..089595ce11c5 100644 --- a/Documentation/media/v4l-drivers/vivid.rst +++ b/Documentation/media/v4l-drivers/vivid.rst @@ -829,6 +829,7 @@ The following two controls are only valid for video and vbi capture. The following two controls are only valid for video capture. - DV Timings Signal Mode: + selects the behavior of VIDIOC_QUERY_DV_TIMINGS: what should it return? diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index c6beb5f1637f..7a79b3587dd3 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -30,8 +30,6 @@ atm.txt - info on where to get ATM programs and support for Linux. ax25.txt - info on using AX.25 and NET/ROM code for Linux -batman-adv.txt - - B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames. baycom.txt - info on the driver for Baycom style amateur radio modems bonding.txt diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst new file mode 100644 index 000000000000..a342b2cc3dc6 --- /dev/null +++ b/Documentation/networking/batman-adv.rst @@ -0,0 +1,220 @@ +========== +batman-adv +========== + +Batman advanced is a new approach to wireless networking which does no longer +operate on the IP basis. Unlike the batman daemon, which exchanges information +using UDP packets and sets routing tables, batman-advanced operates on ISO/OSI +Layer 2 only and uses and routes (or better: bridges) Ethernet Frames. It +emulates a virtual network switch of all nodes participating. Therefore all +nodes appear to be link local, thus all higher operating protocols won't be +affected by any changes within the network. You can run almost any protocol +above batman advanced, prominent examples are: IPv4, IPv6, DHCP, IPX. + +Batman advanced was implemented as a Linux kernel driver to reduce the overhead +to a minimum. It does not depend on any (other) network driver, and can be used +on wifi as well as ethernet lan, vpn, etc ... (anything with ethernet-style +layer 2). + + +Configuration +============= + +Load the batman-adv module into your kernel:: + + $ insmod batman-adv.ko + +The module is now waiting for activation. You must add some interfaces on which +batman can operate. After loading the module batman advanced will scan your +systems interfaces to search for compatible interfaces. Once found, it will +create subfolders in the ``/sys`` directories of each supported interface, +e.g.:: + + $ ls /sys/class/net/eth0/batman_adv/ + elp_interval iface_status mesh_iface throughput_override + +If an interface does not have the ``batman_adv`` subfolder, it probably is not +supported. Not supported interfaces are: loopback, non-ethernet and batman's +own interfaces. + +Note: After the module was loaded it will continuously watch for new +interfaces to verify the compatibility. There is no need to reload the module +if you plug your USB wifi adapter into your machine after batman advanced was +initially loaded. + +The batman-adv soft-interface can be created using the iproute2 tool ``ip``:: + + $ ip link add name bat0 type batadv + +To activate a given interface simply attach it to the ``bat0`` interface:: + + $ ip link set dev eth0 master bat0 + +Repeat this step for all interfaces you wish to add. Now batman starts +using/broadcasting on this/these interface(s). + +By reading the "iface_status" file you can check its status:: + + $ cat /sys/class/net/eth0/batman_adv/iface_status + active + +To deactivate an interface you have to detach it from the "bat0" interface:: + + $ ip link set dev eth0 nomaster + + +All mesh wide settings can be found in batman's own interface folder:: + + $ ls /sys/class/net/bat0/mesh/ + aggregated_ogms fragmentation isolation_mark routing_algo + ap_isolation gw_bandwidth log_level vlan0 + bonding gw_mode multicast_mode + bridge_loop_avoidance gw_sel_class network_coding + distributed_arp_table hop_penalty orig_interval + +There is a special folder for debugging information:: + + $ ls /sys/kernel/debug/batman_adv/bat0/ + bla_backbone_table log neighbors transtable_local + bla_claim_table mcast_flags originators + dat_cache nc socket + gateways nc_nodes transtable_global + +Some of the files contain all sort of status information regarding the mesh +network. For example, you can view the table of originators (mesh +participants) with:: + + $ cat /sys/kernel/debug/batman_adv/bat0/originators + +Other files allow to change batman's behaviour to better fit your requirements. +For instance, you can check the current originator interval (value in +milliseconds which determines how often batman sends its broadcast packets):: + + $ cat /sys/class/net/bat0/mesh/orig_interval + 1000 + +and also change its value:: + + $ echo 3000 > /sys/class/net/bat0/mesh/orig_interval + +In very mobile scenarios, you might want to adjust the originator interval to a +lower value. This will make the mesh more responsive to topology changes, but +will also increase the overhead. + + +Usage +===== + +To make use of your newly created mesh, batman advanced provides a new +interface "bat0" which you should use from this point on. All interfaces added +to batman advanced are not relevant any longer because batman handles them for +you. Basically, one "hands over" the data by using the batman interface and +batman will make sure it reaches its destination. + +The "bat0" interface can be used like any other regular interface. It needs an +IP address which can be either statically configured or dynamically (by using +DHCP or similar services):: + + NodeA: ip link set up dev bat0 + NodeA: ip addr add 192.168.0.1/24 dev bat0 + + NodeB: ip link set up dev bat0 + NodeB: ip addr add 192.168.0.2/24 dev bat0 + NodeB: ping 192.168.0.1 + +Note: In order to avoid problems remove all IP addresses previously assigned to +interfaces now used by batman advanced, e.g.:: + + $ ip addr flush dev eth0 + + +Logging/Debugging +================= + +All error messages, warnings and information messages are sent to the kernel +log. Depending on your operating system distribution this can be read in one of +a number of ways. Try using the commands: ``dmesg``, ``logread``, or looking in +the files ``/var/log/kern.log`` or ``/var/log/syslog``. All batman-adv messages +are prefixed with "batman-adv:" So to see just these messages try:: + + $ dmesg | grep batman-adv + +When investigating problems with your mesh network, it is sometimes necessary to +see more detail debug messages. This must be enabled when compiling the +batman-adv module. When building batman-adv as part of kernel, use "make +menuconfig" and enable the option ``B.A.T.M.A.N. debugging`` +(``CONFIG_BATMAN_ADV_DEBUG=y``). + +Those additional debug messages can be accessed using a special file in +debugfs:: + + $ cat /sys/kernel/debug/batman_adv/bat0/log + +The additional debug output is by default disabled. It can be enabled during +run time. Following log_levels are defined: + +.. flat-table:: + + * - 0 + - All debug output disabled + * - 1 + - Enable messages related to routing / flooding / broadcasting + * - 2 + - Enable messages related to route added / changed / deleted + * - 4 + - Enable messages related to translation table operations + * - 8 + - Enable messages related to bridge loop avoidance + * - 16 + - Enable messages related to DAT, ARP snooping and parsing + * - 32 + - Enable messages related to network coding + * - 64 + - Enable messages related to multicast + * - 128 + - Enable messages related to throughput meter + * - 255 + - Enable all messages + +The debug output can be changed at runtime using the file +``/sys/class/net/bat0/mesh/log_level``. e.g.:: + + $ echo 6 > /sys/class/net/bat0/mesh/log_level + +will enable debug messages for when routes change. + +Counters for different types of packets entering and leaving the batman-adv +module are available through ethtool:: + + $ ethtool --statistics bat0 + + +batctl +====== + +As batman advanced operates on layer 2, all hosts participating in the virtual +switch are completely transparent for all protocols above layer 2. Therefore +the common diagnosis tools do not work as expected. To overcome these problems, +batctl was created. At the moment the batctl contains ping, traceroute, tcpdump +and interfaces to the kernel module settings. + +For more information, please see the manpage (``man batctl``). + +batctl is available on https://www.open-mesh.org/ + + +Contact +======= + +Please send us comments, experiences, questions, anything :) + +IRC: + #batman on irc.freenode.org +Mailing-list: + b.a.t.m.a.n@open-mesh.org (optional subscription at + https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n) + +You can also contact the Authors: + +* Marek Lindner +* Simon Wunderlich diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt deleted file mode 100644 index ccf94677b240..000000000000 --- a/Documentation/networking/batman-adv.txt +++ /dev/null @@ -1,215 +0,0 @@ -BATMAN-ADV ----------- - -Batman advanced is a new approach to wireless networking which -does no longer operate on the IP basis. Unlike the batman daemon, -which exchanges information using UDP packets and sets routing -tables, batman-advanced operates on ISO/OSI Layer 2 only and uses -and routes (or better: bridges) Ethernet Frames. It emulates a -virtual network switch of all nodes participating. Therefore all -nodes appear to be link local, thus all higher operating proto- -cols won't be affected by any changes within the network. You can -run almost any protocol above batman advanced, prominent examples -are: IPv4, IPv6, DHCP, IPX. - -Batman advanced was implemented as a Linux kernel driver to re- -duce the overhead to a minimum. It does not depend on any (other) -network driver, and can be used on wifi as well as ethernet lan, -vpn, etc ... (anything with ethernet-style layer 2). - - -CONFIGURATION -------------- - -Load the batman-adv module into your kernel: - -# insmod batman-adv.ko - -The module is now waiting for activation. You must add some in- -terfaces on which batman can operate. After loading the module -batman advanced will scan your systems interfaces to search for -compatible interfaces. Once found, it will create subfolders in -the /sys directories of each supported interface, e.g. - -# ls /sys/class/net/eth0/batman_adv/ -# elp_interval iface_status mesh_iface throughput_override - -If an interface does not have the "batman_adv" subfolder it prob- -ably is not supported. Not supported interfaces are: loopback, -non-ethernet and batman's own interfaces. - -Note: After the module was loaded it will continuously watch for -new interfaces to verify the compatibility. There is no need to -reload the module if you plug your USB wifi adapter into your ma- -chine after batman advanced was initially loaded. - -The batman-adv soft-interface can be created using the iproute2 -tool "ip" - -# ip link add name bat0 type batadv - -To activate a given interface simply attach it to the "bat0" -interface - -# ip link set dev eth0 master bat0 - -Repeat this step for all interfaces you wish to add. Now batman -starts using/broadcasting on this/these interface(s). - -By reading the "iface_status" file you can check its status: - -# cat /sys/class/net/eth0/batman_adv/iface_status -# active - -To deactivate an interface you have to detach it from the -"bat0" interface: - -# ip link set dev eth0 nomaster - - -All mesh wide settings can be found in batman's own interface -folder: - -# ls /sys/class/net/bat0/mesh/ -# aggregated_ogms fragmentation isolation_mark routing_algo -# ap_isolation gw_bandwidth log_level vlan0 -# bonding gw_mode multicast_mode -# bridge_loop_avoidance gw_sel_class network_coding -# distributed_arp_table hop_penalty orig_interval - -There is a special folder for debugging information: - -# ls /sys/kernel/debug/batman_adv/bat0/ -# bla_backbone_table log neighbors transtable_local -# bla_claim_table mcast_flags originators -# dat_cache nc socket -# gateways nc_nodes transtable_global - -Some of the files contain all sort of status information regard- -ing the mesh network. For example, you can view the table of -originators (mesh participants) with: - -# cat /sys/kernel/debug/batman_adv/bat0/originators - -Other files allow to change batman's behaviour to better fit your -requirements. For instance, you can check the current originator -interval (value in milliseconds which determines how often batman -sends its broadcast packets): - -# cat /sys/class/net/bat0/mesh/orig_interval -# 1000 - -and also change its value: - -# echo 3000 > /sys/class/net/bat0/mesh/orig_interval - -In very mobile scenarios, you might want to adjust the originator -interval to a lower value. This will make the mesh more respon- -sive to topology changes, but will also increase the overhead. - - -USAGE ------ - -To make use of your newly created mesh, batman advanced provides -a new interface "bat0" which you should use from this point on. -All interfaces added to batman advanced are not relevant any -longer because batman handles them for you. Basically, one "hands -over" the data by using the batman interface and batman will make -sure it reaches its destination. - -The "bat0" interface can be used like any other regular inter- -face. It needs an IP address which can be either statically con- -figured or dynamically (by using DHCP or similar services): - -# NodeA: ip link set up dev bat0 -# NodeA: ip addr add 192.168.0.1/24 dev bat0 - -# NodeB: ip link set up dev bat0 -# NodeB: ip addr add 192.168.0.2/24 dev bat0 -# NodeB: ping 192.168.0.1 - -Note: In order to avoid problems remove all IP addresses previ- -ously assigned to interfaces now used by batman advanced, e.g. - -# ip addr flush dev eth0 - - -LOGGING/DEBUGGING ------------------ - -All error messages, warnings and information messages are sent to -the kernel log. Depending on your operating system distribution -this can be read in one of a number of ways. Try using the com- -mands: dmesg, logread, or looking in the files /var/log/kern.log -or /var/log/syslog. All batman-adv messages are prefixed with -"batman-adv:" So to see just these messages try - -# dmesg | grep batman-adv - -When investigating problems with your mesh network it is some- -times necessary to see more detail debug messages. This must be -enabled when compiling the batman-adv module. When building bat- -man-adv as part of kernel, use "make menuconfig" and enable the -option "B.A.T.M.A.N. debugging". - -Those additional debug messages can be accessed using a special -file in debugfs - -# cat /sys/kernel/debug/batman_adv/bat0/log - -The additional debug output is by default disabled. It can be en- -abled during run time. Following log_levels are defined: - - 0 - All debug output disabled - 1 - Enable messages related to routing / flooding / broadcasting - 2 - Enable messages related to route added / changed / deleted - 4 - Enable messages related to translation table operations - 8 - Enable messages related to bridge loop avoidance - 16 - Enable messages related to DAT, ARP snooping and parsing - 32 - Enable messages related to network coding - 64 - Enable messages related to multicast -128 - Enable messages related to throughput meter -255 - Enable all messages - -The debug output can be changed at runtime using the file -/sys/class/net/bat0/mesh/log_level. e.g. - -# echo 6 > /sys/class/net/bat0/mesh/log_level - -will enable debug messages for when routes change. - -Counters for different types of packets entering and leaving the -batman-adv module are available through ethtool: - -# ethtool --statistics bat0 - - -BATCTL ------- - -As batman advanced operates on layer 2 all hosts participating in -the virtual switch are completely transparent for all protocols -above layer 2. Therefore the common diagnosis tools do not work -as expected. To overcome these problems batctl was created. At -the moment the batctl contains ping, traceroute, tcpdump and -interfaces to the kernel module settings. - -For more information, please see the manpage (man batctl). - -batctl is available on https://www.open-mesh.org/ - - -CONTACT -------- - -Please send us comments, experiences, questions, anything :) - -IRC: #batman on irc.freenode.org -Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription - at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n) - -You can also contact the Authors: - -Marek Lindner -Simon Wunderlich diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 57f52cdce32e..9ba04c0bab8d 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this and packet type ID), so in a "gatewayed" configuration, all outgoing traffic will generally use the same device. Incoming traffic may also end up on a single device, but that is - dependent upon the balancing policy of the peer's 8023.ad + dependent upon the balancing policy of the peer's 802.3ad implementation. In a "local" configuration, traffic will be distributed across the devices in the bond. diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt index 76e016d4d344..f88194f71c54 100644 --- a/Documentation/networking/dpaa.txt +++ b/Documentation/networking/dpaa.txt @@ -13,6 +13,7 @@ Contents - Configuring DPAA Ethernet in your kernel - DPAA Ethernet Frame Processing - DPAA Ethernet Features + - DPAA IRQ Affinity and Receive Side Scaling - Debugging DPAA Ethernet Overview @@ -147,7 +148,10 @@ gradually. The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx checksum offload feature is enabled by default and cannot be controlled through -ethtool. +ethtool. Also, rx-flow-hash and rx-hashing was added. The addition of RSS +provides a big performance boost for the forwarding scenarios, allowing +different traffic flows received by one interface to be processed by different +CPUs in parallel. The driver has support for multiple prioritized Tx traffic classes. Priorities range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with @@ -166,6 +170,68 @@ classes as follows: tc qdisc add dev root handle 1: \ mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1 +DPAA IRQ Affinity and Receive Side Scaling +========================================== + +Traffic coming on the DPAA Rx queues or on the DPAA Tx confirmation +queues is seen by the CPU as ingress traffic on a certain portal. +The DPAA QMan portal interrupts are affined each to a certain CPU. +The same portal interrupt services all the QMan portal consumers. + +By default the DPAA Ethernet driver enables RSS, making use of the +DPAA FMan Parser and Keygen blocks to distribute traffic on 128 +hardware frame queues using a hash on IP v4/v6 source and destination +and L4 source and destination ports, in present in the received frame. +When RSS is disabled, all traffic received by a certain interface is +received on the default Rx frame queue. The default DPAA Rx frame +queues are configured to put the received traffic into a pool channel +that allows any available CPU portal to dequeue the ingress traffic. +The default frame queues have the HOLDACTIVE option set, ensuring that +traffic bursts from a certain queue are serviced by the same CPU. +This ensures a very low rate of frame reordering. A drawback of this +is that only one CPU at a time can service the traffic received by a +certain interface when RSS is not enabled. + +To implement RSS, the DPAA Ethernet driver allocates an extra set of +128 Rx frame queues that are configured to dedicated channels, in a +round-robin manner. The mapping of the frame queues to CPUs is now +hardcoded, there is no indirection table to move traffic for a certain +FQ (hash result) to another CPU. The ingress traffic arriving on one +of these frame queues will arrive at the same portal and will always +be processed by the same CPU. This ensures intra-flow order preservation +and workload distribution for multiple traffic flows. + +RSS can be turned off for a certain interface using ethtool, i.e. + + # ethtool -N fm1-mac9 rx-flow-hash tcp4 "" + +To turn it back on, one needs to set rx-flow-hash for tcp4/6 or udp4/6: + + # ethtool -N fm1-mac9 rx-flow-hash udp4 sfdn + +There is no independent control for individual protocols, any command +run for one of tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 is +going to control the rx-flow-hashing for all protocols on that interface. + +Besides using the FMan Keygen computed hash for spreading traffic on the +128 Rx FQs, the DPAA Ethernet driver also sets the skb hash value when +the NETIF_F_RXHASH feature is on (active by default). This can be turned +on or off through ethtool, i.e.: + + # ethtool -K fm1-mac9 rx-hashing off + # ethtool -k fm1-mac9 | grep hash + receive-hashing: off + # ethtool -K fm1-mac9 rx-hashing on + Actual changes: + receive-hashing: on + # ethtool -k fm1-mac9 | grep hash + receive-hashing: on + +Please note that Rx hashing depends upon the rx-flow-hashing being on +for that interface - turning off rx-flow-hashing will also disable the +rx-hashing (without ethtool reporting it as off as that depends on the +NETIF_F_RXHASH feature flag). + Debugging ========= diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index b69b205501de..87814859cfc2 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -45,7 +45,7 @@ in many more places. There's xt_bpf for netfilter, cls_bpf in the kernel qdisc layer, SECCOMP-BPF (SECure COMPuting [1]), and lots of other places such as team driver, PTP code, etc where BPF is being used. - [1] Documentation/prctl/seccomp_filter.txt + [1] Documentation/userspace-api/seccomp_filter.rst Original BPF paper: @@ -337,7 +337,7 @@ Examples for low-level BPF: jeq #14, good /* __NR_rt_sigprocmask */ jeq #13, good /* __NR_rt_sigaction */ jeq #35, good /* __NR_nanosleep */ - bad: ret #0 /* SECCOMP_RET_KILL */ + bad: ret #0 /* SECCOMP_RET_KILL_THREAD */ good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */ The above example code can be placed into a file (here called "foo"), and @@ -596,8 +596,8 @@ skb pointer). All constraints and restrictions from bpf_check_classic() apply before a conversion to the new layout is being done behind the scenes! Currently, the classic BPF format is being used for JITing on most 32-bit -architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64 perform JIT -compilation from eBPF instruction set. +architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform +JIT compilation from eBPF instruction set. Some core changes of the new internal format: @@ -793,7 +793,7 @@ Some core changes of the new internal format: bpf_exit After the call the registers R1-R5 contain junk values and cannot be read. - In the future an eBPF verifier can be used to validate internal BPF programs. + An in-kernel eBPF verifier is used to validate internal BPF programs. Also in the new design, eBPF is limited to 4096 insns, which means that any program will terminate quickly and will only call a fixed number of kernel @@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: BPF_JSGE 0x70 /* eBPF only: signed '>=' */ BPF_CALL 0x80 /* eBPF only: function call */ BPF_EXIT 0x90 /* eBPF only: function return */ + BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ + BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ + BPF_JSLT 0xc0 /* eBPF only: signed '<' */ + BPF_JSLE 0xd0 /* eBPF only: signed '<=' */ So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF and eBPF. There are only two registers in classic BPF, so it means A += X. @@ -1017,7 +1021,7 @@ At the start of the program the register R1 contains a pointer to context and has type PTR_TO_CTX. If verifier sees an insn that does R2=R1, then R2 has now type PTR_TO_CTX as well and can be used on the right hand side of expression. -If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=UNKNOWN_VALUE, +If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=SCALAR_VALUE, since addition of two valid pointers makes invalid pointer. (In 'secure' mode verifier will reject any type of pointer arithmetic to make sure that kernel addresses don't leak to unprivileged users) @@ -1039,7 +1043,7 @@ is a correct program. If there was R1 instead of R6, it would have been rejected. load/store instructions are allowed only with registers of valid types, which -are PTR_TO_CTX, PTR_TO_MAP, FRAME_PTR. They are bounds and alignment checked. +are PTR_TO_CTX, PTR_TO_MAP, PTR_TO_STACK. They are bounds and alignment checked. For example: bpf_mov R1 = 1 bpf_mov R2 = 2 @@ -1058,7 +1062,7 @@ intends to load a word from address R6 + 8 and store it into R0 If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know that offset 8 of size 4 bytes can be accessed for reading, otherwise the verifier will reject the program. -If R6=FRAME_PTR, then access should be aligned and be within +If R6=PTR_TO_STACK, then access should be aligned and be within stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8, so it will fail verification, since it's out of bounds. @@ -1069,7 +1073,7 @@ For example: bpf_ld R0 = *(u32 *)(R10 - 4) bpf_exit is invalid program. -Though R10 is correct read-only register and has type FRAME_PTR +Though R10 is correct read-only register and has type PTR_TO_STACK and R10 - 4 is within stack bounds, there were no stores into that location. Pointer register spill/fill is tracked as well, since four (R6-R9) @@ -1094,6 +1098,71 @@ all use cases. See details of eBPF verifier in kernel/bpf/verifier.c +Register value tracking +----------------------- +In order to determine the safety of an eBPF program, the verifier must track +the range of possible values in each register and also in each stack slot. +This is done with 'struct bpf_reg_state', defined in include/linux/ +bpf_verifier.h, which unifies tracking of scalar and pointer values. Each +register state has a type, which is either NOT_INIT (the register has not been +written to), SCALAR_VALUE (some value which is not usable as a pointer), or a +pointer type. The types of pointers describe their base, as follows: + PTR_TO_CTX Pointer to bpf_context. + CONST_PTR_TO_MAP Pointer to struct bpf_map. "Const" because arithmetic + on these pointers is forbidden. + PTR_TO_MAP_VALUE Pointer to the value stored in a map element. + PTR_TO_MAP_VALUE_OR_NULL + Either a pointer to a map value, or NULL; map accesses + (see section 'eBPF maps', below) return this type, + which becomes a PTR_TO_MAP_VALUE when checked != NULL. + Arithmetic on these pointers is forbidden. + PTR_TO_STACK Frame pointer. + PTR_TO_PACKET skb->data. + PTR_TO_PACKET_END skb->data + headlen; arithmetic forbidden. +However, a pointer may be offset from this base (as a result of pointer +arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable +offset'. The former is used when an exactly-known value (e.g. an immediate +operand) is added to a pointer, while the latter is used for values which are +not exactly known. The variable offset is also used in SCALAR_VALUEs, to track +the range of possible values in the register. +The verifier's knowledge about the variable offset consists of: +* minimum and maximum values as unsigned +* minimum and maximum values as signed +* knowledge of the values of individual bits, in the form of a 'tnum': a u64 +'mask' and a u64 'value'. 1s in the mask represent bits whose value is unknown; +1s in the value represent bits known to be 1. Bits known to be 0 have 0 in both +mask and value; no bit should ever be 1 in both. For example, if a byte is read +into a register from memory, the register's top 56 bits are known zero, while +the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we +then OR this with 0x40, we get (0x40; 0xcf), then if we add 1 we get (0x0; +0x1ff), because of potential carries. +Besides arithmetic, the register state can also be updated by conditional +branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch +it will have a umin_value (unsigned minimum value) of 9, whereas in the 'false' +branch it will have a umax_value of 8. A signed compare (with BPF_JSGT or +BPF_JSGE) would instead update the signed minimum/maximum values. Information +from the signed and unsigned bounds can be combined; for instance if a value is +first tested < 8 and then tested s> 4, the verifier will conclude that the value +is also > 4 and s< 8, since the bounds prevent crossing the sign boundary. +PTR_TO_PACKETs with a variable offset part have an 'id', which is common to all +pointers sharing that same variable offset. This is important for packet range +checks: after adding some variable to a packet pointer, if you then copy it to +another register and (say) add a constant 4, both registers will share the same +'id' but one will have a fixed offset of +4. Then if it is bounds-checked and +found to be less than a PTR_TO_PACKET_END, the other register is now known to +have a safe range of at least 4 bytes. See 'Direct packet access', below, for +more on PTR_TO_PACKET ranges. +The 'id' field is also used on PTR_TO_MAP_VALUE_OR_NULL, common to all copies of +the pointer returned from a map lookup. This means that when one copy is +checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs. +As well as range-checking, the tracked information is also used for enforcing +alignment of pointer accesses. For instance, on most systems the packet pointer +is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump +over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting +pointer will have a variable offset known to be 4n+2 for some n, so adding the 2 +bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through +that pointer are safe. + Direct packet access -------------------- In cls_bpf and act_bpf programs the verifier allows direct access to the packet @@ -1121,7 +1190,7 @@ it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14) which is zero bytes. More complex packet access may look like: - R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp + R0=inv1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp 6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */ 7: r4 = *(u8 *)(r3 +12) 8: r4 *= 14 @@ -1135,26 +1204,31 @@ More complex packet access may look like: 16: r2 += 8 17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */ 18: if r2 > r1 goto pc+2 - R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp + R0=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)) R5=pkt(id=0,off=14,r=14) R10=fp 19: r1 = *(u8 *)(r3 +4) The state of the register R3 is R3=pkt(id=2,off=0,r=8) id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some offset within a packet and since the program author did 'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8). -The verifier only allows 'add' operation on packet registers. Any other -operation will set the register state to 'unknown_value' and it won't be +The verifier only allows 'add'/'sub' operations on packet registers. Any other +operation will set the register state to 'SCALAR_VALUE' and it won't be available for direct packet access. Operation 'r3 += rX' may overflow and become less than original skb->data, -therefore the verifier has to prevent that. So it tracks the number of -upper zero bits in all 'uknown_value' registers, so when it sees -'r3 += rX' instruction and rX is more than 16-bit value, it will error as: -"cannot add integer value with N upper zero bits to ptr_to_packet" +therefore the verifier has to prevent that. So when it sees 'r3 += rX' +instruction and rX is more than 16-bit value, any subsequent bounds-check of r3 +against skb->data_end will not give us 'range' information, so attempts to read +through the pointer will give "invalid access to packet" error. Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is -R4=inv56 which means that upper 56 bits on the register are guaranteed -to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since -multiplying 8-bit value by constant 14 will keep upper 52 bits as zero. -Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign -extending. This logic is implemented in evaluate_reg_alu() function. +R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) which means that upper 56 bits +of the register are guaranteed to be zero, and nothing is known about the lower +8 bits. After insn 'r4 *= 14' the state becomes +R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)), since multiplying an 8-bit +value by constant 14 will keep upper 52 bits as zero, also the least significant +bit will be zero as 14 is even. Similarly 'r2 >>= 48' will make +R2=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)), since the shift is not sign +extending. This logic is implemented in adjust_reg_min_max_vals() function, +which calls adjust_ptr_min_max_vals() for adding pointer to scalar (or vice +versa) and adjust_scalar_min_max_vals() for operations on two scalars. The end result is that bpf program author can access packet directly using normal C code as: @@ -1214,6 +1288,22 @@ The map is defined by: . key size in bytes . value size in bytes +Pruning +------- +The verifier does not actually walk all possible paths through the program. For +each new branch to analyse, the verifier looks at all the states it's previously +been in when at this instruction. If any of them contain the current state as a +subset, the branch is 'pruned' - that is, the fact that the previous state was +accepted implies the current state would be as well. For instance, if in the +previous state, r1 held a packet-pointer, and in the current state, r1 holds a +packet-pointer with a range as long or longer and at least as strict an +alignment, then r1 is safe. Similarly, if r2 was NOT_INIT before then it can't +have been used by any path from that point, so any value in r2 (including +another NOT_INIT) is safe. The implementation is in the function regsafe(). +Pruning considers not only the registers but also the stack (and any spilled +registers it may hold). They must all be safe for the branch to be pruned. +This is implemented in states_equal(). + Understanding eBPF verifier messages ------------------------------------ diff --git a/Documentation/networking/hinic.txt b/Documentation/networking/hinic.txt new file mode 100644 index 000000000000..989366a4039c --- /dev/null +++ b/Documentation/networking/hinic.txt @@ -0,0 +1,125 @@ +Linux Kernel Driver for Huawei Intelligent NIC(HiNIC) family +============================================================ + +Overview: +========= +HiNIC is a network interface card for the Data Center Area. + +The driver supports a range of link-speed devices (10GbE, 25GbE, 40GbE, etc.). +The driver supports also a negotiated and extendable feature set. + +Some HiNIC devices support SR-IOV. This driver is used for Physical Function +(PF). + +HiNIC devices support MSI-X interrupt vector for each Tx/Rx queue and +adaptive interrupt moderation. + +HiNIC devices support also various offload features such as checksum offload, +TCP Transmit Segmentation Offload(TSO), Receive-Side Scaling(RSS) and +LRO(Large Receive Offload). + + +Supported PCI vendor ID/device IDs: +=================================== + +19e5:1822 - HiNIC PF + + +Driver Architecture and Source Code: +==================================== + +hinic_dev - Implement a Logical Network device that is independent from +specific HW details about HW data structure formats. + +hinic_hwdev - Implement the HW details of the device and include the components +for accessing the PCI NIC. + +hinic_hwdev contains the following components: +=============================================== + +HW Interface: +============= + +The interface for accessing the pci device (DMA memory and PCI BARs). +(hinic_hw_if.c, hinic_hw_if.h) + +Configuration Status Registers Area that describes the HW Registers on the +configuration and status BAR0. (hinic_hw_csr.h) + +MGMT components: +================ + +Asynchronous Event Queues(AEQs) - The event queues for receiving messages from +the MGMT modules on the cards. (hinic_hw_eqs.c, hinic_hw_eqs.h) + +Application Programmable Interface commands(API CMD) - Interface for sending +MGMT commands to the card. (hinic_hw_api_cmd.c, hinic_hw_api_cmd.h) + +Management (MGMT) - the PF to MGMT channel that uses API CMD for sending MGMT +commands to the card and receives notifications from the MGMT modules on the +card by AEQs. Also set the addresses of the IO CMDQs in HW. +(hinic_hw_mgmt.c, hinic_hw_mgmt.h) + +IO components: +============== + +Completion Event Queues(CEQs) - The completion Event Queues that describe IO +tasks that are finished. (hinic_hw_eqs.c, hinic_hw_eqs.h) + +Work Queues(WQ) - Contain the memory and operations for use by CMD queues and +the Queue Pairs. The WQ is a Memory Block in a Page. The Block contains +pointers to Memory Areas that are the Memory for the Work Queue Elements(WQEs). +(hinic_hw_wq.c, hinic_hw_wq.h) + +Command Queues(CMDQ) - The queues for sending commands for IO management and is +used to set the QPs addresses in HW. The commands completion events are +accumulated on the CEQ that is configured to receive the CMDQ completion events. +(hinic_hw_cmdq.c, hinic_hw_cmdq.h) + +Queue Pairs(QPs) - The HW Receive and Send queues for Receiving and Transmitting +Data. (hinic_hw_qp.c, hinic_hw_qp.h, hinic_hw_qp_ctxt.h) + +IO - de/constructs all the IO components. (hinic_hw_io.c, hinic_hw_io.h) + +HW device: +========== + +HW device - de/constructs the HW Interface, the MGMT components on the +initialization of the driver and the IO components on the case of Interface +UP/DOWN Events. (hinic_hw_dev.c, hinic_hw_dev.h) + + +hinic_dev contains the following components: +=============================================== + +PCI ID table - Contains the supported PCI Vendor/Device IDs. +(hinic_pci_tbl.h) + +Port Commands - Send commands to the HW device for port management +(MAC, Vlan, MTU, ...). (hinic_port.c, hinic_port.h) + +Tx Queues - Logical Tx Queues that use the HW Send Queues for transmit. +The Logical Tx queue is not dependent on the format of the HW Send Queue. +(hinic_tx.c, hinic_tx.h) + +Rx Queues - Logical Rx Queues that use the HW Receive Queues for receive. +The Logical Rx queue is not dependent on the format of the HW Receive Queue. +(hinic_rx.c, hinic_rx.h) + +hinic_dev - de/constructs the Logical Tx and Rx Queues. +(hinic_main.c, hinic_dev.h) + + +Miscellaneous: +============= + +Common functions that are used by HW and Logical Device. +(hinic_common.c, hinic_common.h) + + +Support +======= + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +aviad.krawczyk@huawei.com. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index b5bd87e01f52..66e620866245 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -6,6 +6,7 @@ Contents: .. toctree:: :maxdepth: 2 + batman-adv kapi z8530book diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 974ab47ae53a..77f4de59dc9c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -109,7 +109,10 @@ neigh/default/unres_qlen_bytes - INTEGER queued for each unresolved address by other network layers. (added in linux 3.3) Setting negative value is meaningless and will return error. - Default: 65536 Bytes(64KB) + Default: SK_WMEM_MAX, (same as net.core.wmem_default). + Exact value depends on architecture and kernel options, + but should be enough to allow queuing 256 packets + of medium size. neigh/default/unres_qlen - INTEGER The maximum number of packets which may be queued for each @@ -119,7 +122,7 @@ neigh/default/unres_qlen - INTEGER unexpected packet loss. The current default value is calculated according to default value of unres_qlen_bytes and true size of packet. - Default: 31 + Default: 101 mtu_expires - INTEGER Time, in seconds, that cached PMTU information is kept. @@ -353,12 +356,7 @@ tcp_l3mdev_accept - BOOLEAN compiled with CONFIG_NET_L3_MASTER_DEV. tcp_low_latency - BOOLEAN - If set, the TCP stack makes decisions that prefer lower - latency as opposed to higher throughput. By default, this - option is not set meaning that higher throughput is preferred. - An example of an application where this default should be - changed would be a Beowulf compute cluster. - Default: 0 + This is a legacy option, it has no effect anymore. tcp_max_orphans - INTEGER Maximal number of TCP sockets not attached to any user file handle, @@ -1291,8 +1289,7 @@ tag - INTEGER xfrm4_gc_thresh - INTEGER The threshold at which we will start garbage collecting for IPv4 destination cache entries. At twice this value the system will - refuse new allocations. The value must be set below the flowcache - limit (4096 * number of online cpus) to take effect. + refuse new allocations. igmp_link_local_mcast_reports - BOOLEAN Enable IGMP reports for link local multicast groups in the @@ -1356,6 +1353,15 @@ flowlabel_state_ranges - BOOLEAN FALSE: disabled Default: true +flowlabel_reflect - BOOLEAN + Automatically reflect the flow label. Needed for Path MTU + Discovery to work with Equal Cost Multipath Routing in anycast + environments. See RFC 7690 and: + https://tools.ietf.org/html/draft-wang-6man-flow-label-reflection-01 + TRUE: enabled + FALSE: disabled + Default: FALSE + anycast_src_echo_reply - BOOLEAN Controls the use of anycast addresses as source addresses for ICMPv6 echo reply @@ -1674,6 +1680,9 @@ accept_dad - INTEGER 2: Enable DAD, and disable IPv6 operation if MAC-based duplicate link-local address has been found. + DAD operation and mode on a given interface will be selected according + to the maximum value of conf/{all,interface}/accept_dad. + force_tllao - BOOLEAN Enable sending the target link-layer address option even when responding to a unicast neighbor solicitation. @@ -1721,16 +1730,23 @@ suppress_frag_ndisc - INTEGER optimistic_dad - BOOLEAN Whether to perform Optimistic Duplicate Address Detection (RFC 4429). - 0: disabled (default) - 1: enabled + 0: disabled (default) + 1: enabled + + Optimistic Duplicate Address Detection for the interface will be enabled + if at least one of conf/{all,interface}/optimistic_dad is set to 1, + it will be disabled otherwise. use_optimistic - BOOLEAN If enabled, do not classify optimistic addresses as deprecated during source address selection. Preferred addresses will still be chosen before optimistic addresses, subject to other ranking in the source address selection algorithm. - 0: disabled (default) - 1: enabled + 0: disabled (default) + 1: enabled + + This will be enabled if at least one of + conf/{all,interface}/use_optimistic is set to 1, disabled otherwise. stable_secret - IPv6 address This IPv6 address will be used as a secret to generate IPv6 @@ -1778,8 +1794,7 @@ ratelimit - INTEGER xfrm6_gc_thresh - INTEGER The threshold at which we will start garbage collecting for IPv6 destination cache entries. At twice this value the system will - refuse new allocations. The value must be set below the flowcache - limit (4096 * number of online cpus) to take effect. + refuse new allocations. IPv6 Update by: diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst new file mode 100644 index 000000000000..77f6d7e25cfd --- /dev/null +++ b/Documentation/networking/msg_zerocopy.rst @@ -0,0 +1,257 @@ + +============ +MSG_ZEROCOPY +============ + +Intro +===== + +The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. +The feature is currently implemented for TCP sockets. + + +Opportunity and Caveats +----------------------- + +Copying large buffers between user process and kernel can be +expensive. Linux supports various interfaces that eschew copying, +such as sendpage and splice. The MSG_ZEROCOPY flag extends the +underlying copy avoidance mechanism to common socket send calls. + +Copy avoidance is not a free lunch. As implemented, with page pinning, +it replaces per byte copy cost with page accounting and completion +notification overhead. As a result, MSG_ZEROCOPY is generally only +effective at writes over around 10 KB. + +Page pinning also changes system call semantics. It temporarily shares +the buffer between process and network stack. Unlike with copying, the +process cannot immediately overwrite the buffer after system call +return without possibly modifying the data in flight. Kernel integrity +is not affected, but a buggy program can possibly corrupt its own data +stream. + +The kernel returns a notification when it is safe to modify data. +Converting an existing application to MSG_ZEROCOPY is not always as +trivial as just passing the flag, then. + + +More Info +--------- + +Much of this document was derived from a longer paper presented at +netdev 2.1. For more in-depth information see that paper and talk, +the excellent reporting over at LWN.net or read the original code. + + paper, slides, video + https://netdevconf.org/2.1/session.html?debruijn + + LWN article + https://lwn.net/Articles/726917/ + + patchset + [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY + http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com + + +Interface +========= + +Passing the MSG_ZEROCOPY flag is the most obvious step to enable copy +avoidance, but not the only one. + +Socket Setup +------------ + +The kernel is permissive when applications pass undefined flags to the +send system call. By default it simply ignores these. To avoid enabling +copy avoidance mode for legacy processes that accidentally already pass +this flag, a process must first signal intent by setting a socket option: + +:: + + if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one))) + error(1, errno, "setsockopt zerocopy"); + + +Transmission +------------ + +The change to send (or sendto, sendmsg, sendmmsg) itself is trivial. +Pass the new flag. + +:: + + ret = send(fd, buf, sizeof(buf), MSG_ZEROCOPY); + +A zerocopy failure will return -1 with errno ENOBUFS. This happens if +the socket option was not set, the socket exceeds its optmem limit or +the user exceeds its ulimit on locked pages. + + +Mixing copy avoidance and copying +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Many workloads have a mixture of large and small buffers. Because copy +avoidance is more expensive than copying for small packets, the +feature is implemented as a flag. It is safe to mix calls with the flag +with those without. + + +Notifications +------------- + +The kernel has to notify the process when it is safe to reuse a +previously passed buffer. It queues completion notifications on the +socket error queue, akin to the transmit timestamping interface. + +The notification itself is a simple scalar value. Each socket +maintains an internal unsigned 32-bit counter. Each send call with +MSG_ZEROCOPY that successfully sends data increments the counter. The +counter is not incremented on failure or if called with length zero. +The counter counts system call invocations, not bytes. It wraps after +UINT_MAX calls. + + +Notification Reception +~~~~~~~~~~~~~~~~~~~~~~ + +The below snippet demonstrates the API. In the simplest case, each +send syscall is followed by a poll and recvmsg on the error queue. + +Reading from the error queue is always a non-blocking operation. The +poll call is there to block until an error is outstanding. It will set +POLLERR in its output flags. That flag does not have to be set in the +events field. Errors are signaled unconditionally. + +:: + + pfd.fd = fd; + pfd.events = 0; + if (poll(&pfd, 1, -1) != 1 || pfd.revents & POLLERR == 0) + error(1, errno, "poll"); + + ret = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (ret == -1) + error(1, errno, "recvmsg"); + + read_notification(msg); + +The example is for demonstration purpose only. In practice, it is more +efficient to not wait for notifications, but read without blocking +every couple of send calls. + +Notifications can be processed out of order with other operations on +the socket. A socket that has an error queued would normally block +other operations until the error is read. Zerocopy notifications have +a zero error code, however, to not block send and recv calls. + + +Notification Batching +~~~~~~~~~~~~~~~~~~~~~ + +Multiple outstanding packets can be read at once using the recvmmsg +call. This is often not needed. In each message the kernel returns not +a single value, but a range. It coalesces consecutive notifications +while one is outstanding for reception on the error queue. + +When a new notification is about to be queued, it checks whether the +new value extends the range of the notification at the tail of the +queue. If so, it drops the new notification packet and instead increases +the range upper value of the outstanding notification. + +For protocols that acknowledge data in-order, like TCP, each +notification can be squashed into the previous one, so that no more +than one notification is outstanding at any one point. + +Ordered delivery is the common case, but not guaranteed. Notifications +may arrive out of order on retransmission and socket teardown. + + +Notification Parsing +~~~~~~~~~~~~~~~~~~~~ + +The below snippet demonstrates how to parse the control message: the +read_notification() call in the previous snippet. A notification +is encoded in the standard error format, sock_extended_err. + +The level and type fields in the control data are protocol family +specific, IP_RECVERR or IPV6_RECVERR. + +Error origin is the new type SO_EE_ORIGIN_ZEROCOPY. ee_errno is zero, +as explained before, to avoid blocking read and write system calls on +the socket. + +The 32-bit notification range is encoded as [ee_info, ee_data]. This +range is inclusive. Other fields in the struct must be treated as +undefined, bar for ee_code, as discussed below. + +:: + + struct sock_extended_err *serr; + struct cmsghdr *cm; + + cm = CMSG_FIRSTHDR(msg); + if (cm->cmsg_level != SOL_IP && + cm->cmsg_type != IP_RECVERR) + error(1, 0, "cmsg"); + + serr = (void *) CMSG_DATA(cm); + if (serr->ee_errno != 0 || + serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) + error(1, 0, "serr"); + + printf("completed: %u..%u\n", serr->ee_info, serr->ee_data); + + +Deferred copies +~~~~~~~~~~~~~~~ + +Passing flag MSG_ZEROCOPY is a hint to the kernel to apply copy +avoidance, and a contract that the kernel will queue a completion +notification. It is not a guarantee that the copy is elided. + +Copy avoidance is not always feasible. Devices that do not support +scatter-gather I/O cannot send packets made up of kernel generated +protocol headers plus zerocopy user data. A packet may need to be +converted to a private copy of data deep in the stack, say to compute +a checksum. + +In all these cases, the kernel returns a completion notification when +it releases its hold on the shared pages. That notification may arrive +before the (copied) data is fully transmitted. A zerocopy completion +notification is not a transmit completion notification, therefore. + +Deferred copies can be more expensive than a copy immediately in the +system call, if the data is no longer warm in the cache. The process +also incurs notification processing cost for no benefit. For this +reason, the kernel signals if data was completed with a copy, by +setting flag SO_EE_CODE_ZEROCOPY_COPIED in field ee_code on return. +A process may use this signal to stop passing flag MSG_ZEROCOPY on +subsequent requests on the same socket. + + +Implementation +============== + +Loopback +-------- + +Data sent to local sockets can be queued indefinitely if the receive +process does not read its socket. Unbound notification latency is not +acceptable. For this reason all packets generated with MSG_ZEROCOPY +that are looped to a local socket will incur a deferred copy. This +includes looping onto packet sockets (e.g., tcpdump) and tun devices. + + +Testing +======= + +More realistic example code can be found in the kernel source under +tools/testing/selftests/net/msg_zerocopy.c. + +Be cognizant of the loopback constraint. The test can be run between +a pair of hosts. But if run between a local pair of processes, for +instance when run with msg_zerocopy.sh between a veth pair across +namespaces, the test will not show any improvement. For testing, the +loopback restriction can be temporarily relaxed by making +skb_orphan_frags_rx identical to skb_orphan_frags. diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt index 247a30ba8e17..cfc66ea72329 100644 --- a/Documentation/networking/netdev-FAQ.txt +++ b/Documentation/networking/netdev-FAQ.txt @@ -111,6 +111,14 @@ A: Generally speaking, the patches get triaged quickly (in less than 48h). patch is a good way to ensure your patch is ignored or pushed to the bottom of the priority list. +Q: I submitted multiple versions of the patch series, should I directly update + patchwork for the previous versions of these patch series? + +A: No, please don't interfere with the patch status on patchwork, leave it to + the maintainer to figure out what is the most recent and current version that + should be applied. If there is any doubt, the maintainer will reply and ask + what should be done. + Q: How can I tell what patches are queued up for backporting to the various stable releases? diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt new file mode 100644 index 000000000000..93560fb1170a --- /dev/null +++ b/Documentation/networking/netvsc.txt @@ -0,0 +1,75 @@ +Hyper-V network driver +====================== + +Compatibility +============= + +This driver is compatible with Windows Server 2012 R2, 2016 and +Windows 10. + +Features +======== + + Checksum offload + ---------------- + The netvsc driver supports checksum offload as long as the + Hyper-V host version does. Windows Server 2016 and Azure + support checksum offload for TCP and UDP for both IPv4 and + IPv6. Windows Server 2012 only supports checksum offload for TCP. + + Receive Side Scaling + -------------------- + Hyper-V supports receive side scaling. For TCP, packets are + distributed among available queues based on IP address and port + number. + + For UDP, we can switch UDP hash level between L3 and L4 by ethtool + command. UDP over IPv4 and v6 can be set differently. The default + hash level is L4. We currently only allow switching TX hash level + from within the guests. + + On Azure, fragmented UDP packets have high loss rate with L4 + hashing. Using L3 hashing is recommended in this case. + + For example, for UDP over IPv4 on eth0: + To include UDP port numbers in hashing: + ethtool -N eth0 rx-flow-hash udp4 sdfn + To exclude UDP port numbers in hashing: + ethtool -N eth0 rx-flow-hash udp4 sd + To show UDP hash level: + ethtool -n eth0 rx-flow-hash udp4 + + Generic Receive Offload, aka GRO + -------------------------------- + The driver supports GRO and it is enabled by default. GRO coalesces + like packets and significantly reduces CPU usage under heavy Rx + load. + + SR-IOV support + -------------- + Hyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV + is enabled in both the vSwitch and the guest configuration, then the + Virtual Function (VF) device is passed to the guest as a PCI + device. In this case, both a synthetic (netvsc) and VF device are + visible in the guest OS and both NIC's have the same MAC address. + + The VF is enslaved by netvsc device. The netvsc driver will transparently + switch the data path to the VF when it is available and up. + Network state (addresses, firewall, etc) should be applied only to the + netvsc device; the slave device should not be accessed directly in + most cases. The exceptions are if some special queue discipline or + flow direction is desired, these should be applied directly to the + VF slave device. + + Receive Buffer + -------------- + Packets are received into a receive area which is created when device + is probed. The receive area is broken into MTU sized chunks and each may + contain one or more packets. The number of receive sections may be changed + via ethtool Rx ring parameters. + + There is a similar send buffer which is used to aggregate packets for sending. + The send area is broken into chunks of 6144 bytes, each of section may + contain one or more packets. The send buffer is an optimization, the driver + will use slower method to handle very large packets or if the send buffer + area is exhausted. diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt index 497d668288f9..433b6724797a 100644 --- a/Documentation/networking/nf_conntrack-sysctl.txt +++ b/Documentation/networking/nf_conntrack-sysctl.txt @@ -96,17 +96,6 @@ nf_conntrack_max - INTEGER Size of connection tracking table. Default value is nf_conntrack_buckets value * 4. -nf_conntrack_default_on - BOOLEAN - 0 - don't register conntrack in new net namespaces - 1 - register conntrack in new net namespaces (default) - - This controls wheter newly created network namespaces have connection - tracking enabled by default. It will be enabled automatically - regardless of this setting if the new net namespace requires - connection tracking, e.g. when NAT rules are created. - This setting is only visible in initial user namespace, it has no - effect on existing namespaces. - nf_conntrack_tcp_be_liberal - BOOLEAN 0 - disabled (default) not 0 - enabled diff --git a/Documentation/networking/rmnet.txt b/Documentation/networking/rmnet.txt new file mode 100644 index 000000000000..6b341eaf2062 --- /dev/null +++ b/Documentation/networking/rmnet.txt @@ -0,0 +1,82 @@ +1. Introduction + +rmnet driver is used for supporting the Multiplexing and aggregation +Protocol (MAP). This protocol is used by all recent chipsets using Qualcomm +Technologies, Inc. modems. + +This driver can be used to register onto any physical network device in +IP mode. Physical transports include USB, HSIC, PCIe and IP accelerator. + +Multiplexing allows for creation of logical netdevices (rmnet devices) to +handle multiple private data networks (PDN) like a default internet, tethering, +multimedia messaging service (MMS) or IP media subsystem (IMS). Hardware sends +packets with MAP headers to rmnet. Based on the multiplexer id, rmnet +routes to the appropriate PDN after removing the MAP header. + +Aggregation is required to achieve high data rates. This involves hardware +sending aggregated bunch of MAP frames. rmnet driver will de-aggregate +these MAP frames and send them to appropriate PDN's. + +2. Packet format + +a. MAP packet (data / control) + +MAP header has the same endianness of the IP packet. + +Packet format - + +Bit 0 1 2-7 8 - 15 16 - 31 +Function Command / Data Reserved Pad Multiplexer ID Payload length +Bit 32 - x +Function Raw Bytes + +Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command +or data packet. Control packet is used for transport level flow control. Data +packets are standard IP packets. + +Reserved bits are usually zeroed out and to be ignored by receiver. + +Padding is number of bytes to be added for 4 byte alignment if required by +hardware. + +Multiplexer ID is to indicate the PDN on which data has to be sent. + +Payload length includes the padding length but does not include MAP header +length. + +b. MAP packet (command specific) + +Bit 0 1 2-7 8 - 15 16 - 31 +Function Command Reserved Pad Multiplexer ID Payload length +Bit 32 - 39 40 - 45 46 - 47 48 - 63 +Function Command name Reserved Command Type Reserved +Bit 64 - 95 +Function Transaction ID +Bit 96 - 127 +Function Command data + +Command 1 indicates disabling flow while 2 is enabling flow + +Command types - +0 for MAP command request +1 is to acknowledge the receipt of a command +2 is for unsupported commands +3 is for error during processing of commands + +c. Aggregation + +Aggregation is multiple MAP packets (can be data or command) delivered to +rmnet in a single linear skb. rmnet will process the individual +packets and either ACK the MAP command or deliver the IP packet to the +network stack as needed + +MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding.... +MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad... + +3. Userspace configuration + +rmnet userspace configuration is done through netlink library librmnetctl +and command line utility rmnetcli. Utility is hosted in codeaurora forum git. +The driver uses rtnl_link_ops for communication. + +https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 8c70ba5dee4d..810620153a44 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -818,10 +818,15 @@ The kernel interface functions are as follows: (*) Send data through a call. + typedef void (*rxrpc_notify_end_tx_t)(struct sock *sk, + unsigned long user_call_ID, + struct sk_buff *skb); + int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, struct msghdr *msg, - size_t len); + size_t len, + rxrpc_notify_end_tx_t notify_end_rx); This is used to supply either the request part of a client call or the reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the @@ -832,6 +837,11 @@ The kernel interface functions are as follows: The msg must not specify a destination address, control data or any flags other than MSG_MORE. len is the total amount of data to transmit. + notify_end_rx can be NULL or it can be used to specify a function to be + called when the call changes state to end the Tx phase. This function is + called with the call-state spinlock held to prevent any reply or final ACK + from being delivered first. + (*) Receive data from a call. int rxrpc_kernel_recv_data(struct socket *sock, @@ -965,6 +975,51 @@ The kernel interface functions are as follows: size should be set when the call is begun. tx_total_len may not be less than zero. + (*) Check to see the completion state of a call so that the caller can assess + whether it needs to be retried. + + enum rxrpc_call_completion { + RXRPC_CALL_SUCCEEDED, + RXRPC_CALL_REMOTELY_ABORTED, + RXRPC_CALL_LOCALLY_ABORTED, + RXRPC_CALL_LOCAL_ERROR, + RXRPC_CALL_NETWORK_ERROR, + }; + + int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, + enum rxrpc_call_completion *_compl, + u32 *_abort_code); + + On return, -EINPROGRESS will be returned if the call is still ongoing; if + it is finished, *_compl will be set to indicate the manner of completion, + *_abort_code will be set to any abort code that occurred. 0 will be + returned on a successful completion, -ECONNABORTED will be returned if the + client failed due to a remote abort and anything else will return an + appropriate error code. + + The caller should look at this information to decide if it's worth + retrying the call. + + (*) Retry a client call. + + int rxrpc_kernel_retry_call(struct socket *sock, + struct rxrpc_call *call, + struct sockaddr_rxrpc *srx, + struct key *key); + + This attempts to partially reinitialise a call and submit it again whilst + reusing the original call's Tx queue to avoid the need to repackage and + re-encrypt the data to be sent. call indicates the call to retry, srx the + new address to send it to and key the encryption key to use for signing or + encrypting the packets. + + For this to work, the first Tx data packet must still be in the transmit + queue, and currently this is only permitted for local and network errors + and the call must not have been aborted. Any partially constructed Tx + packet is left as is and can continue being filled afterwards. + + It returns 0 if the call was requeued and an error otherwise. + ======================= CONFIGURABLE PARAMETERS diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt index a0bf573dfa61..13081b3decef 100644 --- a/Documentation/networking/strparser.txt +++ b/Documentation/networking/strparser.txt @@ -1,45 +1,107 @@ -Stream Parser -------------- +Stream Parser (strparser) + +Introduction +============ The stream parser (strparser) is a utility that parses messages of an -application layer protocol running over a TCP connection. The stream +application layer protocol running over a data stream. The stream parser works in conjunction with an upper layer in the kernel to provide kernel support for application layer messages. For instance, Kernel Connection Multiplexor (KCM) uses the Stream Parser to parse messages using a BPF program. +The strparser works in one of two modes: receive callback or general +mode. + +In receive callback mode, the strparser is called from the data_ready +callback of a TCP socket. Messages are parsed and delivered as they are +received on the socket. + +In general mode, a sequence of skbs are fed to strparser from an +outside source. Message are parsed and delivered as the sequence is +processed. This modes allows strparser to be applied to arbitrary +streams of data. + Interface ---------- +========= The API includes a context structure, a set of callbacks, utility -functions, and a data_ready function. The callbacks include -a parse_msg function that is called to perform parsing (e.g. -BPF parsing in case of KCM), and a rcv_msg function that is called -when a full message has been completed. +functions, and a data_ready function for receive callback mode. The +callbacks include a parse_msg function that is called to perform +parsing (e.g. BPF parsing in case of KCM), and a rcv_msg function +that is called when a full message has been completed. -A stream parser can be instantiated for a TCP connection. This is done -by: +Functions +========= -strp_init(struct strparser *strp, struct sock *csk, - struct strp_callbacks *cb) +strp_init(struct strparser *strp, struct sock *sk, + const struct strp_callbacks *cb) -strp is a struct of type strparser that is allocated by the upper layer. -csk is the TCP socket associated with the stream parser. Callbacks are -called by the stream parser. + Called to initialize a stream parser. strp is a struct of type + strparser that is allocated by the upper layer. sk is the TCP + socket associated with the stream parser for use with receive + callback mode; in general mode this is set to NULL. Callbacks + are called by the stream parser (the callbacks are listed below). + +void strp_pause(struct strparser *strp) + + Temporarily pause a stream parser. Message parsing is suspended + and no new messages are delivered to the upper layer. + +void strp_pause(struct strparser *strp) + + Unpause a paused stream parser. + +void strp_stop(struct strparser *strp); + + strp_stop is called to completely stop stream parser operations. + This is called internally when the stream parser encounters an + error, and it is called from the upper layer to stop parsing + operations. + +void strp_done(struct strparser *strp); + + strp_done is called to release any resources held by the stream + parser instance. This must be called after the stream processor + has been stopped. + +int strp_process(struct strparser *strp, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len, + size_t max_msg_size, long timeo) + + strp_process is called in general mode for a stream parser to + parse an sk_buff. The number of bytes processed or a negative + error number is returned. Note that strp_process does not + consume the sk_buff. max_msg_size is maximum size the stream + parser will parse. timeo is timeout for completing a message. + +void strp_data_ready(struct strparser *strp); + + The upper layer calls strp_tcp_data_ready when data is ready on + the lower socket for strparser to process. This should be called + from a data_ready callback that is set on the socket. Note that + maximum messages size is the limit of the receive socket + buffer and message timeout is the receive timeout for the socket. + +void strp_check_rcv(struct strparser *strp); + + strp_check_rcv is called to check for new messages on the socket. + This is normally called at initialization of a stream parser + instance or after strp_unpause. Callbacks ---------- +========= -There are four callbacks: +There are six callbacks: int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); parse_msg is called to determine the length of the next message in the stream. The upper layer must implement this function. It should parse the sk_buff as containing the headers for the - next application layer messages in the stream. + next application layer message in the stream. - The skb->cb in the input skb is a struct strp_rx_msg. Only + The skb->cb in the input skb is a struct strp_msg. Only the offset field is relevant in parse_msg and gives the offset where the message starts in the skb. @@ -50,26 +112,41 @@ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); -ESTRPIPE : current message should not be processed by the kernel, return control of the socket to userspace which can proceed to read the messages itself - other < 0 : Error is parsing, give control back to userspace + other < 0 : Error in parsing, give control back to userspace assuming that synchronization is lost and the stream is unrecoverable (application expected to close TCP socket) In the case that an error is returned (return value is less than - zero) the stream parser will set the error on TCP socket and wake - it up. If parse_msg returned -ESTRPIPE and the stream parser had - previously read some bytes for the current message, then the error - set on the attached socket is ENODATA since the stream is - unrecoverable in that case. + zero) and the parser is in receive callback mode, then it will set + the error on TCP socket and wake it up. If parse_msg returned + -ESTRPIPE and the stream parser had previously read some bytes for + the current message, then the error set on the attached socket is + ENODATA since the stream is unrecoverable in that case. + +void (*lock)(struct strparser *strp) + + The lock callback is called to lock the strp structure when + the strparser is performing an asynchronous operation (such as + processing a timeout). In receive callback mode the default + function is to lock_sock for the associated socket. In general + mode the callback must be set appropriately. + +void (*unlock)(struct strparser *strp) + + The unlock callback is called to release the lock obtained + by the lock callback. In receive callback mode the default + function is release_sock for the associated socket. In general + mode the callback must be set appropriately. void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); rcv_msg is called when a full message has been received and is queued. The callee must consume the sk_buff; it can call strp_pause to prevent any further messages from being - received in rcv_msg (see strp_pause below). This callback + received in rcv_msg (see strp_pause above). This callback must be set. - The skb->cb in the input skb is a struct strp_rx_msg. This + The skb->cb in the input skb is a struct strp_msg. This struct contains two fields: offset and full_len. Offset is where the message starts in the skb, and full_len is the the length of the message. skb->len - offset may be greater @@ -78,59 +155,53 @@ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); int (*read_sock_done)(struct strparser *strp, int err); read_sock_done is called when the stream parser is done reading - the TCP socket. The stream parser may read multiple messages - in a loop and this function allows cleanup to occur when existing - the loop. If the callback is not set (NULL in strp_init) a - default function is used. + the TCP socket in receive callback mode. The stream parser may + read multiple messages in a loop and this function allows cleanup + to occur when exiting the loop. If the callback is not set (NULL + in strp_init) a default function is used. void (*abort_parser)(struct strparser *strp, int err); This function is called when stream parser encounters an error - in parsing. The default function stops the stream parser for the - TCP socket and sets the error in the socket. The default function - can be changed by setting the callback to non-NULL in strp_init. - -Functions ---------- - -The upper layer calls strp_tcp_data_ready when data is ready on the lower -socket for strparser to process. This should be called from a data_ready -callback that is set on the socket. - -strp_stop is called to completely stop stream parser operations. This -is called internally when the stream parser encounters an error, and -it is called from the upper layer when unattaching a TCP socket. - -strp_done is called to unattach the stream parser from the TCP socket. -This must be called after the stream processor has be stopped. - -strp_check_rcv is called to check for new messages on the socket. This -is normally called at initialization of the a stream parser instance -of after strp_unpause. + in parsing. The default function stops the stream parser and + sets the error in the socket if the parser is in receive callback + mode. The default function can be changed by setting the callback + to non-NULL in strp_init. Statistics ----------- +========== -Various counters are kept for each stream parser for a TCP socket. -These are in the strp_stats structure. strp_aggr_stats is a convenience -structure for accumulating statistics for multiple stream parser -instances. save_strp_stats and aggregate_strp_stats are helper functions -to save and aggregate statistics. +Various counters are kept for each stream parser instance. These are in +the strp_stats structure. strp_aggr_stats is a convenience structure for +accumulating statistics for multiple stream parser instances. +save_strp_stats and aggregate_strp_stats are helper functions to save +and aggregate statistics. Message assembly limits ------------------------ +======================= The stream parser provide mechanisms to limit the resources consumed by message assembly. -A timer is set when assembly starts for a new message. The message -timeout is taken from rcvtime for the associated TCP socket. If the -timer fires before assembly completes the stream parser is aborted -and the ETIMEDOUT error is set on the TCP socket. +A timer is set when assembly starts for a new message. In receive +callback mode the message timeout is taken from rcvtime for the +associated TCP socket. In general mode, the timeout is passed as an +argument in strp_process. If the timer fires before assembly completes +the stream parser is aborted and the ETIMEDOUT error is set on the TCP +socket if in receive callback mode. + +In receive callback mode, message length is limited to the receive +buffer size of the associated TCP socket. If the length returned by +parse_msg is greater than the socket buffer size then the stream parser +is aborted with EMSGSIZE error set on the TCP socket. Note that this +makes the maximum size of receive skbuffs for a socket with a stream +parser to be 2*sk_rcvbuf of the TCP socket. + +In general mode the message length limit is passed in as an argument +to strp_process. + +Author +====== + +Tom Herbert (tom@quantonium.net) -Message length is limited to the receive buffer size of the associated -TCP socket. If the length returned by parse_msg is greater than -the socket buffer size then the stream parser is aborted with -EMSGSIZE error set on the TCP socket. Note that this makes the -maximum size of receive skbuffs for a socket with a stream parser -to be 2*sk_rcvbuf of the TCP socket. diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 5e40e1f68873..82236a17b5e6 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups with SR-IOV or soft switches, such as OVS, are possible. -                             User-space tools + User-space tools -       user space                   | -      +-------------------------------------------------------------------+ -       kernel                       | Netlink -                                    | -                     +--------------+-------------------------------+ -                     |         Network stack                        | -                     |           (Linux)                            | -                     |                                              | -                     +----------------------------------------------+ + user space | + +-------------------------------------------------------------------+ + kernel | Netlink + | + +--------------+-------------------------------+ + | Network stack | + | (Linux) | + | | + +----------------------------------------------+ sw1p2 sw1p4 sw1p6 -                      sw1p1  + sw1p3 +  sw1p5 +         eth1 -                        +    |    +    |    +    |            + -                        |    |    |    |    |    |            | -                     +--+----+----+----+-+--+----+---+  +-----+-----+ -                     |         Switch driver         |  |    mgmt   | -                     |        (this document)        |  |   driver  | -                     |                               |  |           | -                     +--------------+----------------+  +-----------+ -                                    | -       kernel                       | HW bus (eg PCI) -      +-------------------------------------------------------------------+ -       hardware                     | -                     +--------------+---+------------+ -                     |         Switch device (sw1)   | -                     |  +----+                       +--------+ -                     |  |    v offloaded data path   | mgmt port -                     |  |    |                       | -                     +--|----|----+----+----+----+---+ -                        |    |    |    |    |    | -                        +    +    +    +    +    + -                       p1   p2   p3   p4   p5   p6 + sw1p1 + sw1p3 + sw1p5 + eth1 + + | + | + | + + | | | | | | | + +--+----+----+----+----+----+---+ +-----+-----+ + | Switch driver | | mgmt | + | (this document) | | driver | + | | | | + +--------------+----------------+ +-----------+ + | + kernel | HW bus (eg PCI) + +-------------------------------------------------------------------+ + hardware | + +--------------+----------------+ + | Switch device (sw1) | + | +----+ +--------+ + | | v offloaded data path | mgmt port + | | | | + +--|----|----+----+----+----+---+ + | | | | | | + + + + + + + + p1 p2 p3 p4 p5 p6 -                             front-panel ports + front-panel ports Fig 1. diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt deleted file mode 100644 index bc4548245a24..000000000000 --- a/Documentation/power/states.txt +++ /dev/null @@ -1,125 +0,0 @@ -System Power Management Sleep States - -(C) 2014 Intel Corp., Rafael J. Wysocki - -The kernel supports up to four system sleep states generically, although three -of them depend on the platform support code to implement the low-level details -for each state. - -The states are represented by strings that can be read or written to the -/sys/power/state file. Those strings may be "mem", "standby", "freeze" and -"disk", where the last three always represent Power-On Suspend (if supported), -Suspend-To-Idle and hibernation (Suspend-To-Disk), respectively. - -The meaning of the "mem" string is controlled by the /sys/power/mem_sleep file. -It contains strings representing the available modes of system suspend that may -be triggered by writing "mem" to /sys/power/state. These modes are "s2idle" -(Suspend-To-Idle), "shallow" (Power-On Suspend) and "deep" (Suspend-To-RAM). -The "s2idle" mode is always available, while the other ones are only available -if supported by the platform (if not supported, the strings representing them -are not present in /sys/power/mem_sleep). The string representing the suspend -mode to be used subsequently is enclosed in square brackets. Writing one of -the other strings present in /sys/power/mem_sleep to it causes the suspend mode -to be used subsequently to change to the one represented by that string. - -Consequently, there are two ways to cause the system to go into the -Suspend-To-Idle sleep state. The first one is to write "freeze" directly to -/sys/power/state. The second one is to write "s2idle" to /sys/power/mem_sleep -and then to write "mem" to /sys/power/state. Similarly, there are two ways -to cause the system to go into the Power-On Suspend sleep state (the strings to -write to the control files in that case are "standby" or "shallow" and "mem", -respectively) if that state is supported by the platform. In turn, there is -only one way to cause the system to go into the Suspend-To-RAM state (write -"deep" into /sys/power/mem_sleep and "mem" into /sys/power/state). - -The default suspend mode (ie. the one to be used without writing anything into -/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or -"s2idle", but it can be overridden by the value of the "mem_sleep_default" -parameter in the kernel command line. - -The properties of all of the sleep states are described below. - - -State: Suspend-To-Idle -ACPI state: S0 -Label: "s2idle" ("freeze") - -This state is a generic, pure software, light-weight, system sleep state. -It allows more energy to be saved relative to runtime idle by freezing user -space and putting all I/O devices into low-power states (possibly -lower-power than available at run time), such that the processors can -spend more time in their idle states. - -This state can be used for platforms without Power-On Suspend/Suspend-to-RAM -support, or it can be used in addition to Suspend-to-RAM to provide reduced -resume latency. It is always supported. - - -State: Standby / Power-On Suspend -ACPI State: S1 -Label: "shallow" ("standby") - -This state, if supported, offers moderate, though real, power savings, while -providing a relatively low-latency transition back to a working system. No -operating state is lost (the CPU retains power), so the system easily starts up -again where it left off. - -In addition to freezing user space and putting all I/O devices into low-power -states, which is done for Suspend-To-Idle too, nonboot CPUs are taken offline -and all low-level system functions are suspended during transitions into this -state. For this reason, it should allow more energy to be saved relative to -Suspend-To-Idle, but the resume latency will generally be greater than for that -state. - - -State: Suspend-to-RAM -ACPI State: S3 -Label: "deep" - -This state, if supported, offers significant power savings as everything in the -system is put into a low-power state, except for memory, which should be placed -into the self-refresh mode to retain its contents. All of the steps carried out -when entering Power-On Suspend are also carried out during transitions to STR. -Additional operations may take place depending on the platform capabilities. In -particular, on ACPI systems the kernel passes control to the BIOS (platform -firmware) as the last step during STR transitions and that usually results in -powering down some more low-level components that aren't directly controlled by -the kernel. - -System and device state is saved and kept in memory. All devices are suspended -and put into low-power states. In many cases, all peripheral buses lose power -when entering STR, so devices must be able to handle the transition back to the -"on" state. - -For at least ACPI, STR requires some minimal boot-strapping code to resume the -system from it. This may be the case on other platforms too. - - -State: Suspend-to-disk -ACPI State: S4 -Label: "disk" - -This state offers the greatest power savings, and can be used even in -the absence of low-level platform support for power management. This -state operates similarly to Suspend-to-RAM, but includes a final step -of writing memory contents to disk. On resume, this is read and memory -is restored to its pre-suspend state. - -STD can be handled by the firmware or the kernel. If it is handled by -the firmware, it usually requires a dedicated partition that must be -setup via another operating system for it to use. Despite the -inconvenience, this method requires minimal work by the kernel, since -the firmware will also handle restoring memory contents on resume. - -For suspend-to-disk, a mechanism called 'swsusp' (Swap Suspend) is used -to write memory contents to free swap space. swsusp has some restrictive -requirements, but should work in most cases. Some, albeit outdated, -documentation can be found in Documentation/power/swsusp.txt. -Alternatively, userspace can do most of the actual suspend to disk work, -see userland-swsusp.txt. - -Once memory state is written to disk, the system may either enter a -low-power state (like ACPI S4), or it may simply power down. Powering -down offers greater savings, and allows this mechanism to work on any -system. However, entering a real low-power state allows the user to -trigger wake up events (e.g. pressing a key or opening a laptop lid). diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt index 1fdbd5447216..99f5d8c4c652 100644 --- a/Documentation/pps/pps.txt +++ b/Documentation/pps/pps.txt @@ -48,12 +48,12 @@ problem: time_pps_create(). This implies that the source has a /dev/... entry. This assumption is -ok for the serial and parallel port, where you can do something +OK for the serial and parallel port, where you can do something useful besides(!) the gathering of timestamps as it is the central -task for a PPS-API. But this assumption does not work for a single +task for a PPS API. But this assumption does not work for a single purpose GPIO line. In this case even basic file-related functionality (like read() and write()) makes no sense at all and should not be a -precondition for the use of a PPS-API. +precondition for the use of a PPS API. The problem can be simply solved if you consider that a PPS source is not always connected with a GPS data source. @@ -88,13 +88,13 @@ Coding example -------------- To register a PPS source into the kernel you should define a struct -pps_source_info_s as follows: +pps_source_info as follows: static struct pps_source_info pps_ktimer_info = { .name = "ktimer", .path = "", - .mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | \ - PPS_ECHOASSERT | \ + .mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | + PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC, .echo = pps_ktimer_echo, .owner = THIS_MODULE, @@ -108,13 +108,13 @@ initialization routine as follows: The pps_register_source() prototype is: - int pps_register_source(struct pps_source_info_s *info, int default_params) + int pps_register_source(struct pps_source_info *info, int default_params) where "info" is a pointer to a structure that describes a particular PPS source, "default_params" tells the system what the initial default parameters for the device should be (it is obvious that these parameters must be a subset of ones defined in the struct -pps_source_info_s which describe the capabilities of the driver). +pps_source_info which describe the capabilities of the driver). Once you have registered a new PPS source into the system you can signal an assert event (for example in the interrupt handler routine) @@ -142,8 +142,10 @@ If the SYSFS filesystem is enabled in the kernel it provides a new class: Every directory is the ID of a PPS sources defined in the system and inside you find several files: - $ ls /sys/class/pps/pps0/ - assert clear echo mode name path subsystem@ uevent + $ ls -F /sys/class/pps/pps0/ + assert dev mode path subsystem@ + clear echo name power/ uevent + Inside each "assert" and "clear" file you can find the timestamp and a sequence number: @@ -154,32 +156,32 @@ sequence number: Where before the "#" is the timestamp in seconds; after it is the sequence number. Other files are: -* echo: reports if the PPS source has an echo function or not; + * echo: reports if the PPS source has an echo function or not; -* mode: reports available PPS functioning modes; + * mode: reports available PPS functioning modes; -* name: reports the PPS source's name; + * name: reports the PPS source's name; -* path: reports the PPS source's device path, that is the device the - PPS source is connected to (if it exists). + * path: reports the PPS source's device path, that is the device the + PPS source is connected to (if it exists). Testing the PPS support ----------------------- In order to test the PPS support even without specific hardware you can use -the ktimer driver (see the client subsection in the PPS configuration menu) +the pps-ktimer driver (see the client subsection in the PPS configuration menu) and the userland tools available in your distribution's pps-tools package, -http://linuxpps.org , or https://github.com/ago/pps-tools . +http://linuxpps.org , or https://github.com/redlab-i/pps-tools. -Once you have enabled the compilation of ktimer just modprobe it (if +Once you have enabled the compilation of pps-ktimer just modprobe it (if not statically compiled): - # modprobe ktimer + # modprobe pps-ktimer and the run ppstest as follow: - $ ./ppstest /dev/pps0 + $ ./ppstest /dev/pps1 trying PPS source "/dev/pps1" found PPS source "/dev/pps1" ok, found 1 source(s), now start fetching data... @@ -187,7 +189,7 @@ and the run ppstest as follow: source 0 - assert 1186592700.388931295, sequence: 365 - clear 0.000000000, sequence: 0 source 0 - assert 1186592701.389032765, sequence: 366 - clear 0.000000000, sequence: 0 -Please, note that to compile userland programs you need the file timepps.h . +Please note that to compile userland programs, you need the file timepps.h. This is available in the pps-tools repository mentioned above. diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt index b8a8c70b0188..c42a21b99046 100644 --- a/Documentation/rbtree.txt +++ b/Documentation/rbtree.txt @@ -193,6 +193,39 @@ Example:: for (node = rb_first(&mytree); node; node = rb_next(node)) printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring); +Cached rbtrees +-------------- + +Computing the leftmost (smallest) node is quite a common task for binary +search trees, such as for traversals or users relying on a the particular +order for their own logic. To this end, users can use 'struct rb_root_cached' +to optimize O(logN) rb_first() calls to a simple pointer fetch avoiding +potentially expensive tree iterations. This is done at negligible runtime +overhead for maintanence; albeit larger memory footprint. + +Similar to the rb_root structure, cached rbtrees are initialized to be +empty via: + + struct rb_root_cached mytree = RB_ROOT_CACHED; + +Cached rbtree is simply a regular rb_root with an extra pointer to cache the +leftmost node. This allows rb_root_cached to exist wherever rb_root does, +which permits augmented trees to be supported as well as only a few extra +interfaces: + + struct rb_node *rb_first_cached(struct rb_root_cached *tree); + void rb_insert_color_cached(struct rb_node *, struct rb_root_cached *, bool); + void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); + +Both insert and erase calls have their respective counterpart of augmented +trees: + + void rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *, + bool, struct rb_augment_callbacks *); + void rb_erase_augmented_cached(struct rb_node *, struct rb_root_cached *, + struct rb_augment_callbacks *); + + Support for Augmented rbtrees ----------------------------- diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index ce61d1fe08ca..694968c7523c 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -75,6 +75,7 @@ show up in /proc/sys/kernel: - reboot-cmd [ SPARC only ] - rtsig-max - rtsig-nr +- seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst - sem - sem_next_id [ sysv ipc ] - sg-big-buff [ generic SCSI device (sg) ] diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 28596e03220b..b67044a2575f 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -46,13 +46,13 @@ translate these BPF proglets into native CPU instructions. There are two flavors of JITs, the newer eBPF JIT currently supported on: - x86_64 - arm64 + - arm32 - ppc64 - sparc64 - mips64 - s390x And the older cBPF JIT supported on the following archs: - - arm - mips - ppc - sparc diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 48244c42ff52..9baf66a9ef4e 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -572,7 +572,9 @@ See Documentation/nommu-mmap.txt for more information. numa_zonelist_order -This sysctl is only for NUMA. +This sysctl is only for NUMA and it is deprecated. Anything but +Node order will fail! + 'where the memory is allocated from' is controlled by zonelists. (This documentation ignores ZONE_HIGHMEM/ZONE_DMA32 for simple explanation. you may be able to read ZONE_DMA as ZONE_DMA32...) diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index bc80fc0e210f..a7a813258013 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -523,11 +523,11 @@ CPU 에게 기대할 수 있는 최소한의 보장사항 몇가지가 있습니 즉, ACQUIRE 는 최소한의 "취득" 동작처럼, 그리고 RELEASE 는 최소한의 "공개" 처럼 동작한다는 의미입니다. -core-api/atomic_ops.rst 에서 설명되는 어토믹 오퍼레이션들 중에는 완전히 -순서잡힌 것들과 (배리어를 사용하지 않는) 완화된 순서의 것들 외에 ACQUIRE 와 -RELEASE 부류의 것들도 존재합니다. 로드와 스토어를 모두 수행하는 조합된 어토믹 -오퍼레이션에서, ACQUIRE 는 해당 오퍼레이션의 로드 부분에만 적용되고 RELEASE 는 -해당 오퍼레이션의 스토어 부분에만 적용됩니다. +atomic_t.txt 에 설명된 어토믹 오퍼레이션들 중 일부는 완전히 순서잡힌 것들과 +(배리어를 사용하지 않는) 완화된 순서의 것들 외에 ACQUIRE 와 RELEASE 부류의 +것들도 존재합니다. 로드와 스토어를 모두 수행하는 조합된 어토믹 오퍼레이션에서, +ACQUIRE 는 해당 오퍼레이션의 로드 부분에만 적용되고 RELEASE 는 해당 +오퍼레이션의 스토어 부분에만 적용됩니다. 메모리 배리어들은 두 CPU 간, 또는 CPU 와 디바이스 간에 상호작용의 가능성이 있을 때에만 필요합니다. 만약 어떤 코드에 그런 상호작용이 없을 것이 보장된다면, 해당 @@ -617,7 +617,22 @@ RELEASE 부류의 것들도 존재합니다. 로드와 스토어를 모두 수 이 변경은 앞의 처음 두가지 결과 중 하나만이 발생할 수 있고, 세번째의 결과는 발생할 수 없도록 합니다. -데이터 의존성 배리어는 의존적 쓰기에 대해서도 순서를 잡아줍니다: + +[!] 이 상당히 반직관적인 상황은 분리된 캐시를 가지는 기계들에서 가장 잘 +발생하는데, 예를 들면 한 캐시 뱅크는 짝수 번호의 캐시 라인들을 처리하고, 다른 +뱅크는 홀수 번호의 캐시 라인들을 처리하는 경우임을 알아두시기 바랍니다. 포인터 +P 는 짝수 번호 캐시 라인에 저장되어 있고, 변수 B 는 홀수 번호 캐시 라인에 +저장되어 있을 수 있습니다. 여기서 값을 읽어오는 CPU 의 캐시의 홀수 번호 처리 +뱅크는 열심히 일감을 처리중인 반면 홀수 번호 처리 뱅크는 할 일 없이 한가한 +중이라면 포인터 P (&B) 의 새로운 값과 변수 B 의 기존 값 (2) 를 볼 수 있습니다. + + +의존적 쓰기들의 순서를 맞추는데에는 데이터 의존성 배리어가 필요치 않은데, 이는 +리눅스 커널이 지원하는 CPU 들은 (1) 쓰기가 정말로 일어날지, (2) 쓰기가 어디에 +이루어질지, 그리고 (3) 쓰여질 값을 확실히 알기 전까지는 쓰기를 수행하지 않기 +때문입니다. 하지만 "컨트롤 의존성" 섹션과 +Documentation/RCU/rcu_dereference.txt 파일을 주의 깊게 읽어 주시기 바랍니다: +컴파일러는 매우 창의적인 많은 방법으로 종속성을 깰 수 있습니다. CPU 1 CPU 2 =============== =============== @@ -626,28 +641,19 @@ RELEASE 부류의 것들도 존재합니다. 로드와 스토어를 모두 수 <쓰기 배리어> WRITE_ONCE(P, &B); Q = READ_ONCE(P); - <데이터 의존성 배리어> - *Q = 5; + WRITE_ONCE(*Q, 5); -이 데이터 의존성 배리어는 Q 로의 읽기가 *Q 로의 스토어와 순서를 맞추게 -해줍니다. 이는 다음과 같은 결과를 막습니다: +따라서, Q 로의 읽기와 *Q 로의 쓰기 사이에는 데이터 종속성 배리어가 필요치 +않습니다. 달리 말하면, 데이터 종속성 배리어가 없더라도 다음 결과는 생기지 +않습니다: (Q == &B) && (B == 4) 이런 패턴은 드물게 사용되어야 함을 알아 두시기 바랍니다. 무엇보다도, 의존성 순서 규칙의 의도는 쓰기 작업을 -예방- 해서 그로 인해 발생하는 비싼 캐시 미스도 없애려는 것입니다. 이 패턴은 드물게 발생하는 에러 조건 같은것들을 기록하는데 -사용될 수 있고, 이렇게 배리어를 사용해 순서를 지키게 함으로써 그런 기록이 -사라지는 것을 막습니다. - - -[!] 상당히 비직관적인 이 상황은 분리된 캐시를 가진 기계, 예를 들어 한 캐시 -뱅크가 짝수번 캐시 라인을 처리하고 다른 뱅크는 홀수번 캐시 라인을 처리하는 기계 -등에서 가장 잘 발생합니다. 포인터 P 는 홀수 번호의 캐시 라인에 있고, 변수 B 는 -짝수 번호 캐시 라인에 있다고 생각해 봅시다. 그런 상태에서 읽기 작업을 하는 CPU -의 짝수번 뱅크는 할 일이 쌓여 매우 바쁘지만 홀수번 뱅크는 할 일이 없어 아무 -일도 하지 않고 있었다면, 포인터 P 는 새 값 (&B) 을, 그리고 변수 B 는 옛날 값 -(2) 을 가지고 있는 상태가 보여질 수도 있습니다. +사용될 수 있으며, CPU의 자연적인 순서 보장이 그런 기록들을 사라지지 않게 +해줍니다. 데이터 의존성 배리어는 매우 중요한데, 예를 들어 RCU 시스템에서 그렇습니다. @@ -1848,8 +1854,7 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효 이 코드는 객체의 업데이트된 death 마크가 레퍼런스 카운터 감소 동작 *전에* 보일 것을 보장합니다. - 더 많은 정보를 위해선 Documentation/core-api/atomic_ops.rst 문서를 참고하세요. - 어디서 이것들을 사용해야 할지 궁금하다면 "어토믹 오퍼레이션" 서브섹션을 + 더 많은 정보를 위해선 Documentation/atomic_{t,bitops}.txt 문서를 참고하세요. @@ -2468,86 +2473,7 @@ _않습니다_. 전체 메모리 배리어를 내포하고 또 일부는 내포하지 않지만, 커널에서 상당히 의존적으로 사용하는 기능 중 하나입니다. -메모리의 어떤 상태를 수정하고 해당 상태에 대한 (예전의 또는 최신의) 정보를 -리턴하는 어토믹 오퍼레이션은 모두 SMP-조건적 범용 메모리 배리어(smp_mb())를 -실제 오퍼레이션의 앞과 뒤에 내포합니다. 이런 오퍼레이션은 다음의 것들을 -포함합니다: - - xchg(); - atomic_xchg(); atomic_long_xchg(); - atomic_inc_return(); atomic_long_inc_return(); - atomic_dec_return(); atomic_long_dec_return(); - atomic_add_return(); atomic_long_add_return(); - atomic_sub_return(); atomic_long_sub_return(); - atomic_inc_and_test(); atomic_long_inc_and_test(); - atomic_dec_and_test(); atomic_long_dec_and_test(); - atomic_sub_and_test(); atomic_long_sub_and_test(); - atomic_add_negative(); atomic_long_add_negative(); - test_and_set_bit(); - test_and_clear_bit(); - test_and_change_bit(); - - /* exchange 조건이 성공할 때 */ - cmpxchg(); - atomic_cmpxchg(); atomic_long_cmpxchg(); - atomic_add_unless(); atomic_long_add_unless(); - -이것들은 메모리 배리어 효과가 필요한 ACQUIRE 부류와 RELEASE 부류 오퍼레이션들을 -구현할 때, 그리고 객체 해제를 위해 레퍼런스 카운터를 조정할 때, 암묵적 메모리 -배리어 효과가 필요한 곳 등에 사용됩니다. - - -다음의 오퍼레이션들은 메모리 배리어를 내포하지 _않기_ 때문에 문제가 될 수 -있지만, RELEASE 부류의 오퍼레이션들과 같은 것들을 구현할 때 사용될 수도 -있습니다: - - atomic_set(); - set_bit(); - clear_bit(); - change_bit(); - -이것들을 사용할 때에는 필요하다면 적절한 (예를 들면 smp_mb__before_atomic() -같은) 메모리 배리어가 명시적으로 함께 사용되어야 합니다. - - -아래의 것들도 메모리 배리어를 내포하지 _않기_ 때문에, 일부 환경에서는 (예를 -들면 smp_mb__before_atomic() 과 같은) 명시적인 메모리 배리어 사용이 필요합니다. - - atomic_add(); - atomic_sub(); - atomic_inc(); - atomic_dec(); - -이것들이 통계 생성을 위해 사용된다면, 그리고 통계 데이터 사이에 관계가 존재하지 -않는다면 메모리 배리어는 필요치 않을 겁니다. - -객체의 수명을 관리하기 위해 레퍼런스 카운팅 목적으로 사용된다면, 레퍼런스 -카운터는 락으로 보호되는 섹션에서만 조정되거나 호출하는 쪽이 이미 충분한 -레퍼런스를 잡고 있을 것이기 때문에 메모리 배리어는 아마 필요 없을 겁니다. - -만약 어떤 락을 구성하기 위해 사용된다면, 락 관련 동작은 일반적으로 작업을 특정 -순서대로 진행해야 하므로 메모리 배리어가 필요할 수 있습니다. - -기본적으로, 각 사용처에서는 메모리 배리어가 필요한지 아닌지 충분히 고려해야 -합니다. - -아래의 오퍼레이션들은 특별한 락 관련 동작들입니다: - - test_and_set_bit_lock(); - clear_bit_unlock(); - __clear_bit_unlock(); - -이것들은 ACQUIRE 류와 RELEASE 류의 오퍼레이션들을 구현합니다. 락 관련 도구를 -구현할 때에는 이것들을 좀 더 선호하는 편이 나은데, 이것들의 구현은 많은 -아키텍쳐에서 최적화 될 수 있기 때문입니다. - -[!] 이런 상황에 사용할 수 있는 특수한 메모리 배리어 도구들이 있습니다만, 일부 -CPU 에서는 사용되는 어토믹 인스트럭션 자체에 메모리 배리어가 내포되어 있어서 -어토믹 오퍼레이션과 메모리 배리어를 함께 사용하는 게 불필요한 일이 될 수 -있는데, 그런 경우에 이 특수 메모리 배리어 도구들은 no-op 이 되어 실질적으로 -아무일도 하지 않습니다. - -더 많은 내용을 위해선 Documentation/core-api/atomic_ops.rst 를 참고하세요. +더 많은 내용을 위해선 Documentation/atomic_t.txt 를 참고하세요. 디바이스 액세스 diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index f71eb5ef1f2d..099c412951d6 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst @@ -87,11 +87,16 @@ Return values A seccomp filter may return any of the following values. If multiple filters exist, the return value for the evaluation of a given system call will always use the highest precedent value. (For example, -``SECCOMP_RET_KILL`` will always take precedence.) +``SECCOMP_RET_KILL_PROCESS`` will always take precedence.) In precedence order, they are: -``SECCOMP_RET_KILL``: +``SECCOMP_RET_KILL_PROCESS``: + Results in the entire process exiting immediately without executing + the system call. The exit status of the task (``status & 0x7f``) + will be ``SIGSYS``, not ``SIGKILL``. + +``SECCOMP_RET_KILL_THREAD``: Results in the task exiting immediately without executing the system call. The exit status of the task (``status & 0x7f``) will be ``SIGSYS``, not ``SIGKILL``. @@ -141,6 +146,15 @@ In precedence order, they are: allow use of ptrace, even of other sandboxed processes, without extreme care; ptracers can use this mechanism to escape.) +``SECCOMP_RET_LOG``: + Results in the system call being executed after it is logged. This + should be used by application developers to learn which syscalls their + application needs without having to iterate through multiple test and + development cycles to build the list. + + This action will only be logged if "log" is present in the + actions_logged sysctl string. + ``SECCOMP_RET_ALLOW``: Results in the system call being executed. @@ -169,7 +183,41 @@ The ``samples/seccomp/`` directory contains both an x86-specific example and a more generic example of a higher level macro interface for BPF program generation. +Sysctls +======= +Seccomp's sysctl files can be found in the ``/proc/sys/kernel/seccomp/`` +directory. Here's a description of each file in that directory: + +``actions_avail``: + A read-only ordered list of seccomp return values (refer to the + ``SECCOMP_RET_*`` macros above) in string form. The ordering, from + left-to-right, is the least permissive return value to the most + permissive return value. + + The list represents the set of seccomp return values supported + by the kernel. A userspace program may use this list to + determine if the actions found in the ``seccomp.h``, when the + program was built, differs from the set of actions actually + supported in the current running kernel. + +``actions_logged``: + A read-write ordered list of seccomp return values (refer to the + ``SECCOMP_RET_*`` macros above) that are allowed to be logged. Writes + to the file do not need to be in ordered form but reads from the file + will be ordered in the same way as the actions_avail sysctl. + + It is important to note that the value of ``actions_logged`` does not + prevent certain actions from being logged when the audit subsystem is + configured to audit a task. If the action is not found in + ``actions_logged`` list, the final decision on whether to audit the + action for that task is ultimately left up to the audit subsystem to + decide for all seccomp return values other than ``SECCOMP_RET_ALLOW``. + + The ``allow`` string is not accepted in the ``actions_logged`` sysctl + as it is not possible to log ``SECCOMP_RET_ALLOW`` actions. Attempting + to write ``allow`` to the sysctl will result in an EINVAL being + returned. Adding architecture support =========================== diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index b2f60ca8b60c..b3ce12643553 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt @@ -83,6 +83,11 @@ Groups: Bits for undefined preemption levels are RAZ/WI. + Note that this differs from a CPU's view of the APRs on hardware in which + a GIC without the security extensions expose group 0 and group 1 active + priorities in separate register groups, whereas we show a combined view + similar to GICv2's GICH_APR. + For historical reasons and to provide ABI compatibility with userspace we export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask field in the lower 5 bits of a word, meaning that userspace must always diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt index 903fc926860b..95ca68d663a4 100644 --- a/Documentation/virtual/kvm/devices/vm.txt +++ b/Documentation/virtual/kvm/devices/vm.txt @@ -176,7 +176,8 @@ Architectures: s390 3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH -Allows user space to set/get the TOD clock extension (u8). +Allows user space to set/get the TOD clock extension (u8) (superseded by +KVM_S390_VM_TOD_EXT). Parameters: address of a buffer in user space to store the data (u8) to Returns: -EFAULT if the given address is not accessible from kernel space @@ -190,6 +191,17 @@ the POP (u64). Parameters: address of a buffer in user space to store the data (u64) to Returns: -EFAULT if the given address is not accessible from kernel space +3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT +Allows user space to set/get bits 0-63 of the TOD clock register as defined in +the POP (u64). If the guest CPU model supports the TOD clock extension (u8), it +also allows user space to get/set it. If the guest CPU model does not support +it, it is stored as 0 and not allowed to be set to a value != 0. + +Parameters: address of a buffer in user space to store the data + (kvm_s390_vm_tod_clock) to +Returns: -EFAULT if the given address is not accessible from kernel space + -EINVAL if setting the TOD clock extension to != 0 is not supported + 4. GROUP: KVM_S390_VM_CRYPTO Architectures: s390 diff --git a/Documentation/vm/hmm.txt b/Documentation/vm/hmm.txt new file mode 100644 index 000000000000..4d3aac9f4a5d --- /dev/null +++ b/Documentation/vm/hmm.txt @@ -0,0 +1,384 @@ +Heterogeneous Memory Management (HMM) + +Transparently allow any component of a program to use any memory region of said +program with a device without using device specific memory allocator. This is +becoming a requirement to simplify the use of advance heterogeneous computing +where GPU, DSP or FPGA are use to perform various computations. + +This document is divided as follow, in the first section i expose the problems +related to the use of a device specific allocator. The second section i expose +the hardware limitations that are inherent to many platforms. The third section +gives an overview of HMM designs. The fourth section explains how CPU page- +table mirroring works and what is HMM purpose in this context. Fifth section +deals with how device memory is represented inside the kernel. Finaly the last +section present the new migration helper that allow to leverage the device DMA +engine. + + +1) Problems of using device specific memory allocator: +2) System bus, device memory characteristics +3) Share address space and migration +4) Address space mirroring implementation and API +5) Represent and manage device memory from core kernel point of view +6) Migrate to and from device memory +7) Memory cgroup (memcg) and rss accounting + + +------------------------------------------------------------------------------- + +1) Problems of using device specific memory allocator: + +Device with large amount of on board memory (several giga bytes) like GPU have +historically manage their memory through dedicated driver specific API. This +creates a disconnect between memory allocated and managed by device driver and +regular application memory (private anonymous, share memory or regular file +back memory). From here on i will refer to this aspect as split address space. +I use share address space to refer to the opposite situation ie one in which +any memory region can be use by device transparently. + +Split address space because device can only access memory allocated through the +device specific API. This imply that all memory object in a program are not +equal from device point of view which complicate large program that rely on a +wide set of libraries. + +Concretly this means that code that wants to leverage device like GPU need to +copy object between genericly allocated memory (malloc, mmap private/share/) +and memory allocated through the device driver API (this still end up with an +mmap but of the device file). + +For flat dataset (array, grid, image, ...) this isn't too hard to achieve but +complex data-set (list, tree, ...) are hard to get right. Duplicating a complex +data-set need to re-map all the pointer relations between each of its elements. +This is error prone and program gets harder to debug because of the duplicate +data-set. + +Split address space also means that library can not transparently use data they +are getting from core program or other library and thus each library might have +to duplicate its input data-set using specific memory allocator. Large project +suffer from this and waste resources because of the various memory copy. + +Duplicating each library API to accept as input or output memory allocted by +each device specific allocator is not a viable option. It would lead to a +combinatorial explosions in the library entry points. + +Finaly with the advance of high level language constructs (in C++ but in other +language too) it is now possible for compiler to leverage GPU or other devices +without even the programmer knowledge. Some of compiler identified patterns are +only do-able with a share address. It is as well more reasonable to use a share +address space for all the other patterns. + + +------------------------------------------------------------------------------- + +2) System bus, device memory characteristics + +System bus cripple share address due to few limitations. Most system bus only +allow basic memory access from device to main memory, even cache coherency is +often optional. Access to device memory from CPU is even more limited, most +often than not it is not cache coherent. + +If we only consider the PCIE bus than device can access main memory (often +through an IOMMU) and be cache coherent with the CPUs. However it only allows +a limited set of atomic operation from device on main memory. This is worse +in the other direction the CPUs can only access a limited range of the device +memory and can not perform atomic operations on it. Thus device memory can not +be consider like regular memory from kernel point of view. + +Another crippling factor is the limited bandwidth (~32GBytes/s with PCIE 4.0 +and 16 lanes). This is 33 times less that fastest GPU memory (1 TBytes/s). +The final limitation is latency, access to main memory from the device has an +order of magnitude higher latency than when the device access its own memory. + +Some platform are developing new system bus or additions/modifications to PCIE +to address some of those limitations (OpenCAPI, CCIX). They mainly allow two +way cache coherency between CPU and device and allow all atomic operations the +architecture supports. Saddly not all platform are following this trends and +some major architecture are left without hardware solutions to those problems. + +So for share address space to make sense not only we must allow device to +access any memory memory but we must also permit any memory to be migrated to +device memory while device is using it (blocking CPU access while it happens). + + +------------------------------------------------------------------------------- + +3) Share address space and migration + +HMM intends to provide two main features. First one is to share the address +space by duplication the CPU page table into the device page table so same +address point to same memory and this for any valid main memory address in +the process address space. + +To achieve this, HMM offer a set of helpers to populate the device page table +while keeping track of CPU page table updates. Device page table updates are +not as easy as CPU page table updates. To update the device page table you must +allow a buffer (or use a pool of pre-allocated buffer) and write GPU specifics +commands in it to perform the update (unmap, cache invalidations and flush, +...). This can not be done through common code for all device. Hence why HMM +provides helpers to factor out everything that can be while leaving the gory +details to the device driver. + +The second mechanism HMM provide is a new kind of ZONE_DEVICE memory that does +allow to allocate a struct page for each page of the device memory. Those page +are special because the CPU can not map them. They however allow to migrate +main memory to device memory using exhisting migration mechanism and everything +looks like if page was swap out to disk from CPU point of view. Using a struct +page gives the easiest and cleanest integration with existing mm mechanisms. +Again here HMM only provide helpers, first to hotplug new ZONE_DEVICE memory +for the device memory and second to perform migration. Policy decision of what +and when to migrate things is left to the device driver. + +Note that any CPU access to a device page trigger a page fault and a migration +back to main memory ie when a page backing an given address A is migrated from +a main memory page to a device page then any CPU access to address A trigger a +page fault and initiate a migration back to main memory. + + +With this two features, HMM not only allow a device to mirror a process address +space and keeps both CPU and device page table synchronize, but also allow to +leverage device memory by migrating part of data-set that is actively use by a +device. + + +------------------------------------------------------------------------------- + +4) Address space mirroring implementation and API + +Address space mirroring main objective is to allow to duplicate range of CPU +page table into a device page table and HMM helps keeping both synchronize. A +device driver that want to mirror a process address space must start with the +registration of an hmm_mirror struct: + + int hmm_mirror_register(struct hmm_mirror *mirror, + struct mm_struct *mm); + int hmm_mirror_register_locked(struct hmm_mirror *mirror, + struct mm_struct *mm); + +The locked variant is to be use when the driver is already holding the mmap_sem +of the mm in write mode. The mirror struct has a set of callback that are use +to propagate CPU page table: + + struct hmm_mirror_ops { + /* sync_cpu_device_pagetables() - synchronize page tables + * + * @mirror: pointer to struct hmm_mirror + * @update_type: type of update that occurred to the CPU page table + * @start: virtual start address of the range to update + * @end: virtual end address of the range to update + * + * This callback ultimately originates from mmu_notifiers when the CPU + * page table is updated. The device driver must update its page table + * in response to this callback. The update argument tells what action + * to perform. + * + * The device driver must not return from this callback until the device + * page tables are completely updated (TLBs flushed, etc); this is a + * synchronous call. + */ + void (*update)(struct hmm_mirror *mirror, + enum hmm_update action, + unsigned long start, + unsigned long end); + }; + +Device driver must perform update to the range following action (turn range +read only, or fully unmap, ...). Once driver callback returns the device must +be done with the update. + + +When device driver wants to populate a range of virtual address it can use +either: + int hmm_vma_get_pfns(struct vm_area_struct *vma, + struct hmm_range *range, + unsigned long start, + unsigned long end, + hmm_pfn_t *pfns); + int hmm_vma_fault(struct vm_area_struct *vma, + struct hmm_range *range, + unsigned long start, + unsigned long end, + hmm_pfn_t *pfns, + bool write, + bool block); + +First one (hmm_vma_get_pfns()) will only fetch present CPU page table entry and +will not trigger a page fault on missing or non present entry. The second one +do trigger page fault on missing or read only entry if write parameter is true. +Page fault use the generic mm page fault code path just like a CPU page fault. + +Both function copy CPU page table into their pfns array argument. Each entry in +that array correspond to an address in the virtual range. HMM provide a set of +flags to help driver identify special CPU page table entries. + +Locking with the update() callback is the most important aspect the driver must +respect in order to keep things properly synchronize. The usage pattern is : + + int driver_populate_range(...) + { + struct hmm_range range; + ... + again: + ret = hmm_vma_get_pfns(vma, &range, start, end, pfns); + if (ret) + return ret; + take_lock(driver->update); + if (!hmm_vma_range_done(vma, &range)) { + release_lock(driver->update); + goto again; + } + + // Use pfns array content to update device page table + + release_lock(driver->update); + return 0; + } + +The driver->update lock is the same lock that driver takes inside its update() +callback. That lock must be call before hmm_vma_range_done() to avoid any race +with a concurrent CPU page table update. + +HMM implements all this on top of the mmu_notifier API because we wanted to a +simpler API and also to be able to perform optimization latter own like doing +concurrent device update in multi-devices scenario. + +HMM also serve as an impedence missmatch between how CPU page table update are +done (by CPU write to the page table and TLB flushes) from how device update +their own page table. Device update is a multi-step process, first appropriate +commands are write to a buffer, then this buffer is schedule for execution on +the device. It is only once the device has executed commands in the buffer that +the update is done. Creating and scheduling update command buffer can happen +concurrently for multiple devices. Waiting for each device to report commands +as executed is serialize (there is no point in doing this concurrently). + + +------------------------------------------------------------------------------- + +5) Represent and manage device memory from core kernel point of view + +Several differents design were try to support device memory. First one use +device specific data structure to keep information about migrated memory and +HMM hooked itself in various place of mm code to handle any access to address +that were back by device memory. It turns out that this ended up replicating +most of the fields of struct page and also needed many kernel code path to be +updated to understand this new kind of memory. + +Thing is most kernel code path never try to access the memory behind a page +but only care about struct page contents. Because of this HMM switchted to +directly using struct page for device memory which left most kernel code path +un-aware of the difference. We only need to make sure that no one ever try to +map those page from the CPU side. + +HMM provide a set of helpers to register and hotplug device memory as a new +region needing struct page. This is offer through a very simple API: + + struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, + struct device *device, + unsigned long size); + void hmm_devmem_remove(struct hmm_devmem *devmem); + +The hmm_devmem_ops is where most of the important things are: + + struct hmm_devmem_ops { + void (*free)(struct hmm_devmem *devmem, struct page *page); + int (*fault)(struct hmm_devmem *devmem, + struct vm_area_struct *vma, + unsigned long addr, + struct page *page, + unsigned flags, + pmd_t *pmdp); + }; + +The first callback (free()) happens when the last reference on a device page is +drop. This means the device page is now free and no longer use by anyone. The +second callback happens whenever CPU try to access a device page which it can +not do. This second callback must trigger a migration back to system memory. + + +------------------------------------------------------------------------------- + +6) Migrate to and from device memory + +Because CPU can not access device memory, migration must use device DMA engine +to perform copy from and to device memory. For this we need a new migration +helper: + + int migrate_vma(const struct migrate_vma_ops *ops, + struct vm_area_struct *vma, + unsigned long mentries, + unsigned long start, + unsigned long end, + unsigned long *src, + unsigned long *dst, + void *private); + +Unlike other migration function it works on a range of virtual address, there +is two reasons for that. First device DMA copy has a high setup overhead cost +and thus batching multiple pages is needed as otherwise the migration overhead +make the whole excersie pointless. The second reason is because driver trigger +such migration base on range of address the device is actively accessing. + +The migrate_vma_ops struct define two callbacks. First one (alloc_and_copy()) +control destination memory allocation and copy operation. Second one is there +to allow device driver to perform cleanup operation after migration. + + struct migrate_vma_ops { + void (*alloc_and_copy)(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + void (*finalize_and_map)(struct vm_area_struct *vma, + const unsigned long *src, + const unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + }; + +It is important to stress that this migration helpers allow for hole in the +virtual address range. Some pages in the range might not be migrated for all +the usual reasons (page is pin, page is lock, ...). This helper does not fail +but just skip over those pages. + +The alloc_and_copy() might as well decide to not migrate all pages in the +range (for reasons under the callback control). For those the callback just +have to leave the corresponding dst entry empty. + +Finaly the migration of the struct page might fails (for file back page) for +various reasons (failure to freeze reference, or update page cache, ...). If +that happens then the finalize_and_map() can catch any pages that was not +migrated. Note those page were still copied to new page and thus we wasted +bandwidth but this is considered as a rare event and a price that we are +willing to pay to keep all the code simpler. + + +------------------------------------------------------------------------------- + +7) Memory cgroup (memcg) and rss accounting + +For now device memory is accounted as any regular page in rss counters (either +anonymous if device page is use for anonymous, file if device page is use for +file back page or shmem if device page is use for share memory). This is a +deliberate choice to keep existing application that might start using device +memory without knowing about it to keep runing unimpacted. + +Drawbacks is that OOM killer might kill an application using a lot of device +memory and not a lot of regular system memory and thus not freeing much system +memory. We want to gather more real world experience on how application and +system react under memory pressure in the presence of device memory before +deciding to account device memory differently. + + +Same decision was made for memory cgroup. Device memory page are accounted +against same memory cgroup a regular page would be accounted to. This does +simplify migration to and from device memory. This also means that migration +back from device memory to regular memory can not fail because it would +go above memory cgroup limit. We might revisit this choice latter on once we +get more experience in how device memory is use and its impact on memory +resource control. + + +Note that device memory can never be pin nor by device driver nor through GUP +and thus such memory is always free upon process exit. Or when last reference +is drop in case of share memory or file back memory. diff --git a/Documentation/vm/numa b/Documentation/vm/numa index a08f71647714..a31b85b9bb88 100644 --- a/Documentation/vm/numa +++ b/Documentation/vm/numa @@ -79,11 +79,8 @@ memory, Linux must decide whether to order the zonelists such that allocations fall back to the same zone type on a different node, or to a different zone type on the same node. This is an important consideration because some zones, such as DMA or DMA32, represent relatively scarce resources. Linux chooses -a default zonelist order based on the sizes of the various zone types relative -to the total memory of the node and the total memory of the system. The -default zonelist order may be overridden using the numa_zonelist_order kernel -boot parameter or sysctl. [see Documentation/admin-guide/kernel-parameters.rst and -Documentation/sysctl/vm.txt] +a default Node ordered zonelist. This means it tries to fallback to other zones +from the same node before using remote nodes which are ordered by NUMA distance. By default, Linux will attempt to satisfy memory allocation requests from the node to which the CPU that executes the request is assigned. Specifically, diff --git a/Documentation/vm/swap_numa.txt b/Documentation/vm/swap_numa.txt new file mode 100644 index 000000000000..d5960c9124f5 --- /dev/null +++ b/Documentation/vm/swap_numa.txt @@ -0,0 +1,69 @@ +Automatically bind swap device to numa node +------------------------------------------- + +If the system has more than one swap device and swap device has the node +information, we can make use of this information to decide which swap +device to use in get_swap_pages() to get better performance. + + +How to use this feature +----------------------- + +Swap device has priority and that decides the order of it to be used. To make +use of automatically binding, there is no need to manipulate priority settings +for swap devices. e.g. on a 2 node machine, assume 2 swap devices swapA and +swapB, with swapA attached to node 0 and swapB attached to node 1, are going +to be swapped on. Simply swapping them on by doing: +# swapon /dev/swapA +# swapon /dev/swapB + +Then node 0 will use the two swap devices in the order of swapA then swapB and +node 1 will use the two swap devices in the order of swapB then swapA. Note +that the order of them being swapped on doesn't matter. + +A more complex example on a 4 node machine. Assume 6 swap devices are going to +be swapped on: swapA and swapB are attached to node 0, swapC is attached to +node 1, swapD and swapE are attached to node 2 and swapF is attached to node3. +The way to swap them on is the same as above: +# swapon /dev/swapA +# swapon /dev/swapB +# swapon /dev/swapC +# swapon /dev/swapD +# swapon /dev/swapE +# swapon /dev/swapF + +Then node 0 will use them in the order of: +swapA/swapB -> swapC -> swapD -> swapE -> swapF +swapA and swapB will be used in a round robin mode before any other swap device. + +node 1 will use them in the order of: +swapC -> swapA -> swapB -> swapD -> swapE -> swapF + +node 2 will use them in the order of: +swapD/swapE -> swapA -> swapB -> swapC -> swapF +Similaly, swapD and swapE will be used in a round robin mode before any +other swap devices. + +node 3 will use them in the order of: +swapF -> swapA -> swapB -> swapC -> swapD -> swapE + + +Implementation details +---------------------- + +The current code uses a priority based list, swap_avail_list, to decide +which swap device to use and if multiple swap devices share the same +priority, they are used round robin. This change here replaces the single +global swap_avail_list with a per-numa-node list, i.e. for each numa node, +it sees its own priority based list of available swap devices. Swap +device's priority can be promoted on its matching node's swap_avail_list. + +The current swap device's priority is set as: user can set a >=0 value, +or the system will pick one starting from -1 then downwards. The priority +value in the swap_avail_list is the negated value of the swap device's +due to plist being sorted from low to high. The new policy doesn't change +the semantics for priority >=0 cases, the previous starting from -1 then +downwards now becomes starting from -2 then downwards and -1 is reserved +as the promoted value. So if multiple swap devices are attached to the same +node, they will all be promoted to priority -1 on that node's plist and will +be used round robin before any other swap devices. diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index b3526365ea8e..6f9d7b418917 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started ------------------------------------------------- iTCO_wdt: heartbeat: Watchdog heartbeat in seconds. - (5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30) + (2 +M: Len Brown +L: linux-acpi@vger.kernel.org +R: Tony Luck +R: Borislav Petkov +F: drivers/acpi/apei/ + ACPI COMPONENT ARCHITECTURE (ACPICA) M: Robert Moore M: Lv Zheng @@ -343,6 +352,18 @@ L: linux-acpi@vger.kernel.org S: Maintained F: drivers/acpi/arm64 +ACPI PMIC DRIVERS +M: "Rafael J. Wysocki" +M: Len Brown +R: Andy Shevchenko +R: Mika Westerberg +L: linux-acpi@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-acpi/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm +B: https://bugzilla.kernel.org +S: Supported +F: drivers/acpi/pmic/ + ACPI THERMAL DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org @@ -635,6 +656,11 @@ ALPS PS/2 TOUCHPAD DRIVER R: Pali Rohár F: drivers/input/mouse/alps.* +ALTERA I2C CONTROLLER DRIVER +M: Thor Thayer +S: Maintained +F: drivers/i2c/busses/i2c-altera.c + ALTERA MAILBOX DRIVER M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) @@ -769,6 +795,12 @@ W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/media/i2c/adv7180.c +ANALOG DEVICES INC ADV748X DRIVER +M: Kieran Bingham +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/adv748x/* + ANALOG DEVICES INC ADV7511 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org @@ -840,6 +872,12 @@ S: Supported F: drivers/android/ F: drivers/staging/android/ +ANDROID GOLDFISH RTC DRIVER +M: Miodrag Dinic +S: Supported +F: Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt +F: drivers/rtc/rtc-goldfish.c + ANDROID ION DRIVER M: Laura Abbott M: Sumit Semwal @@ -1153,6 +1191,7 @@ L: linux-arm-kernel@axis.com F: arch/arm/mach-artpec F: arch/arm/boot/dts/artpec6* F: drivers/clk/axis +F: drivers/crypto/axis F: drivers/pinctrl/pinctrl-artpec* F: Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt @@ -2085,17 +2124,38 @@ F: arch/arm/mach-pxa/include/mach/z2.h ARM/ZTE ARCHITECTURE M: Jun Nie M: Baoyou Xie +M: Shawn Guo L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: arch/arm/boot/dts/zx2967* F: arch/arm/mach-zx/ +F: arch/arm64/boot/dts/zte/ F: drivers/clk/zte/ +F: drivers/dma/zx_dma.c +F: drivers/gpio/gpio-zx.c +F: drivers/i2c/busses/i2c-zx2967.c +F: drivers/mmc/host/dw_mmc-zx.* +F: drivers/pinctrl/zte/ F: drivers/reset/reset-zx2967.c F: drivers/soc/zte/ +F: drivers/thermal/zx2967_thermal.c +F: drivers/watchdog/zx2967_wdt.c F: Documentation/devicetree/bindings/arm/zte.txt -F: Documentation/devicetree/bindings/clock/zx296702-clk.txt +F: Documentation/devicetree/bindings/clock/zx2967*.txt +F: Documentation/devicetree/bindings/dma/zxdma.txt +F: Documentation/devicetree/bindings/gpio/zx296702-gpio.txt +F: Documentation/devicetree/bindings/i2c/i2c-zx2967.txt +F: Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt +F: Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt F: Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt F: Documentation/devicetree/bindings/soc/zte/ -F: include/dt-bindings/soc/zx*.h +F: Documentation/devicetree/bindings/sound/zte,*.txt +F: Documentation/devicetree/bindings/thermal/zx2967-thermal.txt +F: Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt +F: include/dt-bindings/clock/zx2967*.h +F: include/dt-bindings/soc/zte,*.h +F: sound/soc/codecs/zx_aud96p22.c +F: sound/soc/zte/ ARM/ZYNQ ARCHITECTURE M: Michal Simek @@ -2123,6 +2183,12 @@ S: Maintained F: arch/arm64/ F: Documentation/arm64/ +AS3645A LED FLASH CONTROLLER DRIVER +M: Sakari Ailus +L: linux-leds@vger.kernel.org +S: Maintained +F: drivers/leds/leds-as3645a.c + AS3645A LED FLASH CONTROLLER DRIVER M: Laurent Pinchart L: linux-media@vger.kernel.org @@ -2391,9 +2457,10 @@ AUDIT SUBSYSTEM M: Paul Moore M: Eric Paris L: linux-audit@redhat.com (moderated for non-subscribers) -W: http://people.redhat.com/sgrubb/audit/ -T: git git://git.infradead.org/users/pcmoore/audit -S: Maintained +W: https://github.com/linux-audit +W: https://people.redhat.com/sgrubb/audit +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git +S: Supported F: include/linux/audit.h F: include/uapi/linux/audit.h F: kernel/audit* @@ -2483,7 +2550,7 @@ Q: https://patchwork.open-mesh.org/project/batman/list/ S: Maintained F: Documentation/ABI/testing/sysfs-class-net-batman-adv F: Documentation/ABI/testing/sysfs-class-net-mesh -F: Documentation/networking/batman-adv.txt +F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batman_adv.h F: net/batman-adv/ @@ -2552,13 +2619,6 @@ W: http://blackfin.uclinux.org S: Supported F: drivers/net/ethernet/adi/ -BLACKFIN I2C TWI DRIVER -M: Sonic Zhang -L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) -W: http://blackfin.uclinux.org/ -S: Supported -F: drivers/i2c/busses/i2c-bfin-twi.c - BLACKFIN MEDIA DRIVER M: Scott Jiang L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) @@ -2575,14 +2635,12 @@ S: Supported F: drivers/rtc/rtc-bfin.c BLACKFIN SDH DRIVER -M: Sonic Zhang L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org S: Supported F: drivers/mmc/host/bfin_sdh.c BLACKFIN SERIAL DRIVER -M: Sonic Zhang L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org S: Supported @@ -2807,7 +2865,6 @@ S: Supported F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER -M: Yuval Mintz M: Ariel Elior M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org @@ -3161,6 +3218,7 @@ S: Supported F: drivers/crypto/cavium/cpt/ CAVIUM THUNDERX2 ARM64 SOC +M: Robert Richter M: Jayachandran C L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -4114,7 +4172,9 @@ F: include/linux/dax.h F: include/trace/events/fs_dax.h DIRECTORY NOTIFICATION (DNOTIFY) -M: Eric Paris +M: Jan Kara +R: Amir Goldstein +L: linux-fsdevel@vger.kernel.org S: Maintained F: Documentation/filesystems/dnotify.txt F: fs/notify/dnotify/ @@ -4181,7 +4241,7 @@ DMA MAPPING HELPERS M: Christoph Hellwig M: Marek Szyprowski R: Robin Murphy -L: linux-kernel@vger.kernel.org +L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/users/hch/dma-mapping.git W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported @@ -4347,6 +4407,12 @@ S: Supported F: drivers/gpu/drm/nouveau/ F: include/uapi/drm/nouveau_drm.h +DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS +M: Noralf Trønnes +S: Maintained +F: drivers/gpu/drm/tinydrm/repaper.c +F: Documentation/devicetree/bindings/display/repaper.txt + DRM DRIVER FOR QEMU'S CIRRUS DEVICE M: Dave Airlie M: Gerd Hoffmann @@ -4365,12 +4431,6 @@ S: Maintained F: drivers/gpu/drm/qxl/ F: include/uapi/drm/qxl_drm.h -DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS -M: Noralf Trønnes -S: Maintained -F: drivers/gpu/drm/tinydrm/repaper.c -F: Documentation/devicetree/bindings/display/repaper.txt - DRM DRIVER FOR RAGE 128 VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/r128/ @@ -5127,6 +5187,7 @@ F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h F: include/linux/platform_data/mdio-gpio.h +F: include/linux/platform_data/mdio-bcm-unimac.h F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h @@ -5198,7 +5259,8 @@ S: Maintained F: drivers/iommu/exynos-iommu.c EZchip NPS platform support -M: Noam Camus +M: Elad Kanfi +M: Vineet Gupta S: Supported F: arch/arc/plat-eznps F: arch/arc/boot/dts/eznps.dts @@ -5224,7 +5286,9 @@ F: Documentation/hwmon/f71805f F: drivers/hwmon/f71805f.c FANOTIFY -M: Eric Paris +M: Jan Kara +R: Amir Goldstein +L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/notify/fanotify/ F: include/linux/fanotify.h @@ -5282,9 +5346,7 @@ M: "J. Bruce Fields" L: linux-fsdevel@vger.kernel.org S: Maintained F: include/linux/fcntl.h -F: include/linux/fs.h F: include/uapi/linux/fcntl.h -F: include/uapi/linux/fs.h F: fs/fcntl.c F: fs/locks.c @@ -5293,6 +5355,8 @@ M: Alexander Viro L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/* +F: include/linux/fs.h +F: include/uapi/linux/fs.h FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: Riku Voipio @@ -5790,6 +5854,12 @@ S: Maintained F: Documentation/acpi/gpio-properties.txt F: drivers/gpio/gpiolib-acpi.c +GPIO IR Transmitter +M: Sean Young +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/rc/gpio-ir-tx.c + GPIO MOCKUP DRIVER M: Bamvor Jian Zhang L: linux-gpio@vger.kernel.org @@ -6059,16 +6129,6 @@ F: drivers/scsi/hpsa*.[ch] F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h -HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) -M: Don Brace -L: esc.storagedev@microsemi.com -L: linux-scsi@vger.kernel.org -S: Supported -F: Documentation/blockdev/cciss.txt -F: drivers/block/cciss* -F: include/linux/cciss_ioctl.h -F: include/uapi/linux/cciss_ioctl.h - HFI1 DRIVER M: Mike Marciniszyn M: Dennis Dalessandro @@ -6165,6 +6225,14 @@ F: include/uapi/linux/if_hippi.h F: net/802/hippi.c F: drivers/net/hippi/ +HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) +M: Yisen Zhuang +M: Salil Mehta +L: netdev@vger.kernel.org +W: http://www.hisilicon.com +S: Maintained +F: drivers/net/ethernet/hisilicon/hns3/ + HISILICON NETWORK SUBSYSTEM DRIVER M: Yisen Zhuang M: Salil Mehta @@ -6189,6 +6257,13 @@ S: Supported F: drivers/scsi/hisi_sas/ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt +HMM - Heterogeneous Memory Management +M: Jérôme Glisse +L: linux-mm@kvack.org +S: Maintained +F: mm/hmm* +F: include/linux/hmm* + HOST AP DRIVER M: Jouni Malinen L: linux-wireless@vger.kernel.org @@ -6258,6 +6333,13 @@ L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/htcpen.c +HUAWEI ETHERNET DRIVER +M: Aviad Krawczyk +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/hinic.txt +F: drivers/net/ethernet/huawei/hinic/ + HUGETLB FILESYSTEM M: Nadia Yvette Chambers S: Maintained @@ -6284,7 +6366,9 @@ M: Haiyang Zhang M: Stephen Hemminger L: devel@linuxdriverproject.org S: Maintained +F: Documentation/networking/netvsc.txt F: arch/x86/include/asm/mshyperv.h +F: arch/x86/include/asm/trace/hyperv.h F: arch/x86/include/uapi/asm/hyperv.h F: arch/x86/kernel/cpu/mshyperv.c F: arch/x86/hyperv @@ -6296,7 +6380,9 @@ F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c F: drivers/video/fbdev/hyperv_fb.c +F: net/vmw_vsock/hyperv_transport.c F: include/linux/hyperv.h +F: include/uapi/linux/hyperv.h F: tools/hv/ F: Documentation/ABI/stable/sysfs-bus-vmbus @@ -6398,6 +6484,12 @@ F: drivers/i2c/busses/i2c-sis96x.c F: drivers/i2c/busses/i2c-via.c F: drivers/i2c/busses/i2c-viapro.c +I2C/SMBUS INTEL CHT WHISKEY COVE PMIC DRIVER +M: Hans de Goede +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-cht-wc.c + I2C/SMBUS ISMT DRIVER M: Seth Heasley M: Neil Horman @@ -6464,6 +6556,15 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmvnic.* +IBM Power Virtual Accelerator Switchboard +M: Sukadev Bhattiprolu +L: linuxppc-dev@lists.ozlabs.org +S: Supported +F: arch/powerpc/platforms/powernv/vas* +F: arch/powerpc/platforms/powernv/copy-paste.h +F: arch/powerpc/include/asm/vas.h +F: arch/powerpc/include/uapi/asm/vas.h + IBM Power Virtual Ethernet Device Driver M: Thomas Falcon L: netdev@vger.kernel.org @@ -6554,8 +6655,8 @@ M: Alexander Aring M: Stefan Schmidt L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ @@ -6638,7 +6739,7 @@ F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt F: drivers/auxdisplay/img-ascii-lcd.c IMGTEC IR DECODER DRIVER -M: James Hogan +M: James Hogan S: Maintained F: drivers/media/rc/img-ir/ @@ -6700,9 +6801,9 @@ S: Maintained F: drivers/mtd/nand/jz4780_* INOTIFY -M: John McCutchan -M: Robert Love -M: Eric Paris +M: Jan Kara +R: Amir Goldstein +L: linux-fsdevel@vger.kernel.org S: Maintained F: Documentation/filesystems/inotify.txt F: fs/notify/inotify/ @@ -7111,9 +7212,7 @@ W: http://irda.sourceforge.net/ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git F: Documentation/networking/irda.txt -F: drivers/net/irda/ -F: include/net/irda/ -F: net/irda/ +F: drivers/staging/irda/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Marc Zyngier @@ -7407,6 +7506,13 @@ S: Maintained F: tools/testing/selftests/ F: Documentation/dev-tools/kselftest* +KERNEL USERMODE HELPER +M: "Luis R. Rodriguez" +L: linux-kernel@vger.kernel.org +S: Maintained +F: kernel/umh.c +F: include/linux/umh.h + KERNEL VIRTUAL MACHINE (KVM) M: Paolo Bonzini M: Radim Krčmář @@ -7414,18 +7520,17 @@ L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git S: Supported -F: Documentation/*/kvm*.txt F: Documentation/virtual/kvm/ -F: arch/*/kvm/ -F: arch/x86/kernel/kvm.c -F: arch/x86/kernel/kvmclock.c -F: arch/*/include/asm/kvm* -F: include/linux/kvm* +F: include/trace/events/kvm.h +F: include/uapi/asm-generic/kvm* F: include/uapi/linux/kvm* -F: virt/kvm/ +F: include/asm-generic/kvm* +F: include/linux/kvm* +F: include/kvm/iodev.h +F: virt/kvm/* F: tools/kvm/ -KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V +KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd) M: Joerg Roedel L: kvm@vger.kernel.org W: http://www.linux-kvm.org/ @@ -7433,7 +7538,7 @@ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c -KERNEL VIRTUAL MACHINE (KVM) FOR ARM +KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) M: Christoffer Dall M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -7447,15 +7552,6 @@ F: arch/arm/kvm/ F: virt/kvm/arm/ F: include/kvm/arm_* -KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC -M: Alexander Graf -L: kvm-ppc@vger.kernel.org -W: http://www.linux-kvm.org/ -T: git git://github.com/agraf/linux-2.6.git -S: Supported -F: arch/powerpc/include/asm/kvm* -F: arch/powerpc/kvm/ - KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) M: Christoffer Dall M: Marc Zyngier @@ -7467,13 +7563,24 @@ F: arch/arm64/include/asm/kvm* F: arch/arm64/kvm/ KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) -M: James Hogan +M: James Hogan L: linux-mips@linux-mips.org S: Supported F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ +KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) +M: Alexander Graf +L: kvm-ppc@vger.kernel.org +W: http://www.linux-kvm.org/ +T: git git://github.com/agraf/linux-2.6.git +S: Supported +F: arch/powerpc/include/uapi/asm/kvm* +F: arch/powerpc/include/asm/kvm* +F: arch/powerpc/kvm/ +F: arch/powerpc/kernel/kvm* + KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger M: Cornelia Huck @@ -7481,11 +7588,25 @@ L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git S: Supported -F: Documentation/s390/kvm.txt +F: arch/s390/include/uapi/asm/kvm* +F: arch/s390/include/asm/gmap.h F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ F: arch/s390/mm/gmap.c +KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) +M: Paolo Bonzini +M: Radim Krčmář +L: kvm@vger.kernel.org +W: http://www.linux-kvm.org +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +S: Supported +F: arch/x86/kvm/ +F: arch/x86/include/uapi/asm/kvm* +F: arch/x86/include/asm/kvm* +F: arch/x86/kernel/kvm.c +F: arch/x86/kernel/kvmclock.c + KERNFS M: Greg Kroah-Hartman M: Tejun Heo @@ -7567,7 +7688,7 @@ F: include/linux/kmemleak.h F: mm/kmemleak.c F: mm/kmemleak-test.c -KMOD MODULE USERMODE HELPER +KMOD KERNEL MODULE LOADER - USERMODE HELPER M: "Luis R. Rodriguez" L: linux-kernel@vger.kernel.org S: Maintained @@ -7608,6 +7729,7 @@ M: John Crispin L: linux-mips@linux-mips.org S: Maintained F: arch/mips/lantiq +F: drivers/soc/lantiq LAPB module L: linux-x25@vger.kernel.org @@ -7802,6 +7924,7 @@ F: drivers/pci/hotplug/rpa* F: drivers/rtc/rtc-opal.c F: drivers/scsi/ibmvscsi/ F: drivers/tty/hvc/hvc_opal.c +F: drivers/watchdog/wdrtas.c F: tools/testing/selftests/powerpc N: /pmac N: powermac @@ -8142,6 +8265,12 @@ L: libertas-dev@lists.infradead.org S: Orphan F: drivers/net/wireless/marvell/libertas/ +MARVELL MACCHIATOBIN SUPPORT +M: Russell King +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts + MARVELL MV643XX ETHERNET DRIVER M: Sebastian Hesselbarth L: netdev@vger.kernel.org @@ -8318,6 +8447,14 @@ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/cxd2841er* +MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES +M: Daniel Scheller +L: linux-media@vger.kernel.org +W: https://linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/pci/ddbridge/* + MEDIA DRIVERS FOR FREESCALE IMX M: Steve Longerbeam M: Philipp Zabel @@ -8359,6 +8496,14 @@ T: git git://linuxtv.org/media_tree.git S: Supported F: drivers/media/dvb-frontends/lnbh25* +MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS +M: Daniel Scheller +L: linux-media@vger.kernel.org +W: https://linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/mxl5xx* + MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices M: Sergey Kozlov M: Abylay Ospan @@ -8415,6 +8560,22 @@ S: Supported F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/ +MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs +M: Daniel Scheller +L: linux-media@vger.kernel.org +W: https://linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/stv0910* + +MEDIA DRIVERS FOR ST STV6111 TUNER ICs +M: Daniel Scheller +L: linux-media@vger.kernel.org +W: https://linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/stv6111* + MEDIA INPUT INFRASTRUCTURE (V4L/DVB) M: Mauro Carvalho Chehab M: Mauro Carvalho Chehab @@ -8438,9 +8599,22 @@ F: include/uapi/linux/meye.h F: include/uapi/linux/ivtv* F: include/uapi/linux/uvcvideo.h +MEDIATEK CIR DRIVER +M: Sean Wang +S: Maintained +F: drivers/media/rc/mtk-cir.c + +MEDIATEK PMIC LED DRIVER +M: Sean Wang +S: Maintained +F: drivers/leds/leds-mt6323.c +F: Documentation/devicetree/bindings/leds/leds-mt6323.txt + MEDIATEK ETHERNET DRIVER M: Felix Fietkau -M: John Crispin +M: John Crispin +M: Sean Wang +M: Nelson Chang L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/mediatek/ @@ -8707,8 +8881,18 @@ F: drivers/leds/leds-menf21bmc.c F: drivers/hwmon/menf21bmc_hwmon.c F: Documentation/hwmon/menf21bmc +MESON AO CEC DRIVER FOR AMLOGIC SOCS +M: Neil Armstrong +L: linux-media@lists.freedesktop.org +L: linux-amlogic@lists.infradead.org +W: http://linux-meson.com/ +S: Supported +F: drivers/media/platform/meson/ao-cec.c +F: Documentation/devicetree/bindings/media/meson-ao-cec.txt +T: git git://linuxtv.org/media_tree.git + METAG ARCHITECTURE -M: James Hogan +M: James Hogan L: linux-metag@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git S: Odd Fixes @@ -8743,6 +8927,12 @@ F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +MICROCHIP / ATMEL ECC DRIVER +M: Tudor Ambarus +L: linux-crypto@vger.kernel.org +S: Maintained +F: drivers/crypto/atmel-ecc.* + MICROCHIP / ATMEL ISC DRIVER M: Songjun Wu L: linux-media@vger.kernel.org @@ -8817,6 +9007,7 @@ M: Paul Burton L: linux-mips@linux-mips.org S: Supported F: arch/mips/generic/ +F: arch/mips/tools/generic-board-config.sh MIPS/LOONGSON1 ARCHITECTURE M: Keguang Zhang @@ -8827,6 +9018,13 @@ F: arch/mips/include/asm/mach-loongson32/ F: drivers/*/*loongson1* F: drivers/*/*/*loongson1* +MIPS RINT INSTRUCTION EMULATION +M: Aleksandar Markovic +L: linux-mips@linux-mips.org +S: Supported +F: arch/mips/math-emu/sp_rint.c +F: arch/mips/math-emu/dp_rint.c + MIROSOUND PCM20 FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org @@ -9052,7 +9250,7 @@ T: git git://git.infradead.org/linux-mtd.git nand/fixes T: git git://git.infradead.org/l2-mtd.git nand/next S: Maintained F: drivers/mtd/nand/ -F: include/linux/mtd/nand*.h +F: include/linux/mtd/*nand*.h NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER M: Daniel Mack @@ -9144,15 +9342,6 @@ F: net/*/netfilter/ F: net/netfilter/ F: net/bridge/br_netfilter*.c -NETLABEL -M: Paul Moore -W: http://netlabel.sf.net -L: netdev@vger.kernel.org -S: Maintained -F: Documentation/netlabel/ -F: include/net/netlabel.h -F: net/netlabel/ - NETROM NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org @@ -9172,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD) M: Josef Bacik S: Maintained L: linux-block@vger.kernel.org -L: nbd-general@lists.sourceforge.net +L: nbd@other.debian.org F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c F: include/uapi/linux/nbd.h @@ -9280,10 +9469,23 @@ F: net/ipv6/ F: include/net/ip* F: arch/x86/net/* -NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK) +NETWORKING [LABELED] (NetLabel, Labeled IPsec, SECMARK) M: Paul Moore +W: https://github.com/netlabel L: netdev@vger.kernel.org +L: linux-security-module@vger.kernel.org S: Maintained +F: Documentation/netlabel/ +F: include/net/calipso.h +F: include/net/cipso_ipv4.h +F: include/net/netlabel.h +F: include/uapi/linux/netfilter/xt_SECMARK.h +F: include/uapi/linux/netfilter/xt_CONNSECMARK.h +F: net/netlabel/ +F: net/ipv4/cipso_ipv4.c +F: net/ipv6/calipso.c +F: net/netfilter/xt_CONNSECMARK.c +F: net/netfilter/xt_SECMARK.c NETWORKING [TLS] M: Ilya Lesokhin @@ -9700,6 +9902,12 @@ F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c F: include/linux/i2c-omap.h +ONION OMEGA2+ BOARD +M: Harvey Hunt +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/boot/dts/ralink/omega2p.dts + OMFS FILESYSTEM M: Bob Copeland L: linux-karma-devel@lists.sourceforge.net @@ -9734,7 +9942,7 @@ S: Maintained F: drivers/media/i2c/ov5640.c OMNIVISION OV5647 SENSOR DRIVER -M: Ramiro Oliveira +M: Luis Oliveira L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained @@ -9971,7 +10179,7 @@ F: include/uapi/linux/ppdev.h F: Documentation/parport*.txt PARAVIRT_OPS INTERFACE -M: Jeremy Fitzhardinge +M: Juergen Gross M: Chris Wright M: Alok Kataria M: Rusty Russell @@ -9979,7 +10187,7 @@ L: virtualization@lists.linux-foundation.org S: Supported F: Documentation/virtual/paravirt_ops.txt F: arch/*/kernel/paravirt* -F: arch/*/include/asm/paravirt.h +F: arch/*/include/asm/paravirt*.h F: include/linux/hypervisor.h PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES @@ -10123,6 +10331,7 @@ F: drivers/pci/dwc/*imx6* PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) M: Keith Busch +M: Jonathan Derrick L: linux-pci@vger.kernel.org S: Supported F: drivers/pci/host/vmd.c @@ -10169,7 +10378,7 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: drivers/pci/dwc/pci-exynos.c -PCI DRIVER FOR SYNOPSIS DESIGNWARE +PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han M: Joao Pinto L: linux-pci@vger.kernel.org @@ -10605,8 +10814,11 @@ W: http://wiki.enneenne.com/index.php/LinuxPPS_support L: linuxpps@ml.enneenne.com (subscribers-only) S: Maintained F: Documentation/pps/ +F: Documentation/devicetree/bindings/pps/pps-gpio.txt +F: Documentation/ABI/testing/sysfs-pps F: drivers/pps/ F: include/linux/pps*.h +F: include/uapi/linux/pps.h PPTP DRIVER M: Dmitry Kozlov @@ -10732,6 +10944,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/pulse8-cec/* +F: Documentation/media/cec-drivers/pulse8-cec.rst PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely @@ -10759,6 +10972,12 @@ F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt F: Documentation/hwmon/pwm-fan F: drivers/hwmon/pwm-fan.c +PWM IR Transmitter +M: Sean Young +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/rc/pwm-ir-tx.c + PWM SUBSYSTEM M: Thierry Reding L: linux-pwm@vger.kernel.org @@ -10852,7 +11071,6 @@ S: Supported F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER -M: Yuval Mintz M: Ariel Elior M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org @@ -10953,6 +11171,14 @@ W: http://wireless.kernel.org/en/users/Drivers/ath9k S: Supported F: drivers/net/wireless/ath/ath9k/ +QUALCOMM CAMERA SUBSYSTEM DRIVER +M: Todor Tomov +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/qcom,camss.txt +F: Documentation/media/v4l-drivers/qcom_camss.rst +F: drivers/media/platform/qcom/camss-8x16/ + QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi L: netdev@vger.kernel.org @@ -10966,6 +11192,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g S: Supported F: arch/hexagon/ +QUALCOMM IOMMU +M: Rob Clark +L: iommu@lists.linux-foundation.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: drivers/iommu/qcom_iommu.c + QUALCOMM VENUS VIDEO ACCELERATOR DRIVER M: Stanimir Varbanov L: linux-media@vger.kernel.org @@ -11225,6 +11458,8 @@ RENESAS ETHERNET DRIVERS R: Sergei Shtylyov L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org +F: Documentation/devicetree/bindings/net/renesas,*.txt +F: Documentation/devicetree/bindings/net/sh_eth.txt F: drivers/net/ethernet/renesas/ F: include/linux/sh_eth.h @@ -11306,6 +11541,17 @@ L: linux-serial@vger.kernel.org S: Odd Fixes F: drivers/tty/serial/rp2.* +ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS +M: Marek Vasut +L: linux-kernel@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: drivers/mfd/bd9571mwv.c +F: drivers/regulator/bd9571mwv-regulator.c +F: drivers/gpio/gpio-bd9571mwv.c +F: include/linux/mfd/bd9571mwv.h +F: Documentation/devicetree/bindings/mfd/bd9571mwv.txt + ROSE NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org @@ -11467,6 +11713,7 @@ F: drivers/s390/crypto/ S390 ZFCP DRIVER M: Steffen Maier +M: Benjamin Block L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@ -11831,8 +12078,9 @@ M: Paul Moore M: Stephen Smalley M: Eric Paris L: selinux@tycho.nsa.gov (moderated for non-subscribers) -W: http://selinuxproject.org -T: git git://git.infradead.org/users/pcmoore/selinux +W: https://selinuxproject.org +W: https://github.com/SELinuxProject +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git S: Supported F: include/linux/selinux* F: security/selinux/ @@ -12319,6 +12567,7 @@ F: drivers/tty/serial/sunsab.h F: drivers/tty/serial/sunsu.c F: drivers/tty/serial/sunzilog.c F: drivers/tty/serial/sunzilog.h +F: drivers/tty/vcc.c SPARSE CHECKER M: "Christopher Li" @@ -12515,6 +12764,12 @@ M: Ion Badulescu S: Odd Fixes F: drivers/net/ethernet/adaptec/starfire* +STEC S1220 SKD DRIVER +M: Bart Van Assche +L: linux-block@vger.kernel.org +S: Maintained +F: drivers/block/skd*[ch] + STI CEC DRIVER M: Benjamin Gaignard S: Maintained @@ -12632,6 +12887,18 @@ F: drivers/clocksource/arc_timer.c F: drivers/tty/serial/arc_uart.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git +SYNOPSYS ARC HSDK SDP pll clock driver +M: Eugeniy Paltsev +S: Supported +F: drivers/clk/clk-hsdk-pll.c +F: Documentation/devicetree/bindings/clock/snps,hsdk-pll-clock.txt + +SYNOPSYS ARC SDP clock driver +M: Eugeniy Paltsev +S: Supported +F: drivers/clk/axs10x/* +F: Documentation/devicetree/bindings/clock/snps,pll-clock.txt + SYNOPSYS ARC SDP platform support M: Alexey Brodkin S: Supported @@ -12668,6 +12935,13 @@ L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/dw_mmc* +SYNOPSYS HSDK RESET CONTROLLER DRIVER +M: Eugeniy Paltsev +S: Supported +F: drivers/reset/reset-hsdk.c +F: include/dt-bindings/reset/snps,hsdk-reset.h +F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt + SYSTEM CONFIGURATION (SYSCON) M: Lee Jones M: Arnd Bergmann @@ -13547,8 +13821,7 @@ F: Documentation/scsi/ufs.txt F: drivers/scsi/ufs/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS -M: Manjunath M Bettegowda -M: Prabu Thangamuthu +M: Joao Pinto L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ufs/*dwc* @@ -14157,6 +14430,12 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/vmxnet3/ +VOCORE VOCORE2 BOARD +M: Harvey Hunt +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/boot/dts/ralink/vocore2.dts + VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown diff --git a/Makefile b/Makefile index ab067d51ddf1..2835863bdd5a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 -PATCHLEVEL = 13 +PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc4 NAME = Fearless Coyote # *DOCUMENTATION* @@ -130,8 +130,8 @@ endif ifneq ($(KBUILD_OUTPUT),) # check that the output directory actually exists saved-output := $(KBUILD_OUTPUT) -KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \ - && /bin/pwd) +$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT)) +KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT)) $(if $(KBUILD_OUTPUT),, \ $(error failed to create output directory "$(saved-output)")) @@ -978,7 +978,7 @@ ifdef CONFIG_HEADERS_CHECK $(Q)$(MAKE) -f $(srctree)/Makefile headers_check endif ifdef CONFIG_GDB_SCRIPTS - $(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py + $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py) endif ifdef CONFIG_TRIM_UNUSED_KSYMS $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ @@ -1128,16 +1128,6 @@ headerdep: $(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \ $(srctree)/scripts/headerdep.pl -I$(srctree)/include -# --------------------------------------------------------------------------- -# Firmware install -INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware -export INSTALL_FW_PATH - -PHONY += firmware_install -firmware_install: - @mkdir -p $(objtree)/firmware - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install - # --------------------------------------------------------------------------- # Kernel headers @@ -1182,11 +1172,11 @@ headers_check: headers_install PHONY += kselftest kselftest: - $(Q)$(MAKE) -C tools/testing/selftests run_tests + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests PHONY += kselftest-clean kselftest-clean: - $(Q)$(MAKE) -C tools/testing/selftests clean + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean PHONY += kselftest-merge kselftest-merge: @@ -1216,7 +1206,6 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order @$(kecho) ' Building modules, stage 2.'; $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) $(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin @@ -1238,7 +1227,7 @@ _modinst_: @rm -rf $(MODLIB)/kernel @rm -f $(MODLIB)/source @mkdir -p $(MODLIB)/kernel - @ln -s `cd $(srctree) && /bin/pwd` $(MODLIB)/source + @ln -s $(abspath $(srctree)) $(MODLIB)/source @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \ rm -f $(MODLIB)/build ; \ ln -s $(CURDIR) $(MODLIB)/build ; \ @@ -1252,7 +1241,6 @@ _modinst_: # boot script depmod is the master version. PHONY += _modinst_post _modinst_post: _modinst_ - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst $(call cmd,depmod) ifeq ($(CONFIG_MODULE_SIG), y) @@ -1375,8 +1363,6 @@ help: @echo '* vmlinux - Build the bare kernel' @echo '* modules - Build all modules' @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)' - @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH' - @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)' @echo ' dir/ - Build all files in dir and below' @echo ' dir/file.[ois] - Build specified target only' @echo ' dir/file.ll - Build the LLVM assembly file' @@ -1630,11 +1616,11 @@ image_name: # Clear a bunch of variables before executing the submake tools/: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/ tools/%: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $* + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/ $* # Single targets # --------------------------------------------------------------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 2520ca5b42eb..d789a89cb32c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -458,6 +458,13 @@ config GCC_PLUGIN_STRUCTLEAK * https://grsecurity.net/ * https://pax.grsecurity.net/ +config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL + bool "Force initialize all struct type variables passed by reference" + depends on GCC_PLUGIN_STRUCTLEAK + help + Zero initialize any struct type local variable that may be passed by + reference without having been initialized. + config GCC_PLUGIN_STRUCTLEAK_VERBOSE bool "Report forcefully initialized variables" depends on GCC_PLUGIN_STRUCTLEAK @@ -473,11 +480,13 @@ config GCC_PLUGIN_RANDSTRUCT depends on GCC_PLUGINS select MODVERSIONS if MODULES help - If you say Y here, the layouts of structures explicitly - marked by __randomize_layout will be randomized at - compile-time. This can introduce the requirement of an - additional information exposure vulnerability for exploits - targeting these structure types. + If you say Y here, the layouts of structures that are entirely + function pointers (and have not been manually annotated with + __no_randomize_layout), or structures that have been explicitly + marked with __randomize_layout, will be randomized at compile-time. + This can introduce the requirement of an additional information + exposure vulnerability for exploits targeting these structure + types. Enabling this feature will introduce some performance impact, slightly increase memory usage, and prevent the use of forensic @@ -928,9 +937,6 @@ config STRICT_MODULE_RWX and non-text memory will be made non-executable. This provides protection against certain security exploits (e.g. writing to text) -config ARCH_WANT_RELAX_ORDER - bool - config ARCH_HAS_REFCOUNT bool help diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 384bd47b5187..45c020a0fe76 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h @@ -8,6 +8,7 @@ */ #include +#include #include #include diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h index c2911f591704..9eb9933d845f 100644 --- a/arch/alpha/include/asm/string.h +++ b/arch/alpha/include/asm/string.h @@ -65,13 +65,14 @@ extern void * memchr(const void *, int, size_t); aligned values. The DEST and COUNT parameters must be even for correct operation. */ -#define __HAVE_ARCH_MEMSETW -extern void * __memsetw(void *dest, unsigned short, size_t count); - -#define memsetw(s, c, n) \ -(__builtin_constant_p(c) \ - ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ - : __memsetw((s),(c),(n))) +#define __HAVE_ARCH_MEMSET16 +extern void * __memset16(void *dest, unsigned short, size_t count); +static inline void *memset16(uint16_t *p, uint16_t v, size_t n) +{ + if (__builtin_constant_p(v)) + return __constant_c_memset(p, 0x0001000100010001UL * v, n * 2); + return __memset16(p, v, n * 2); +} #endif /* __KERNEL__ */ diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h index c00106bac521..3c1c2b6128e7 100644 --- a/arch/alpha/include/asm/vga.h +++ b/arch/alpha/include/asm/vga.h @@ -34,7 +34,7 @@ static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) if (__is_ioaddr(s)) memsetw_io((u16 __iomem *) s, c, count); else - memsetw(s, c, count); + memset16(s, c, count / 2); } /* Do not trust that the usage will be correct; analyze the arguments. */ diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index 02760f6e6ca4..3b26cc62dadb 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -64,20 +64,12 @@ overrides the coredump filter bits */ #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + /* compatibility flags */ #define MAP_FILE 0 -/* - * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. - * This gives us 6 bits, which is enough until someone invents 128 bit address - * spaces. - * - * Assume these are all power of twos. - * When 0 use the default page size. - */ -#define MAP_HUGE_SHIFT 26 -#define MAP_HUGE_MASK 0x3f - #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ diff --git a/arch/alpha/include/uapi/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h index 9822362a8424..70494d1d8f29 100644 --- a/arch/alpha/include/uapi/asm/siginfo.h +++ b/arch/alpha/include/uapi/asm/siginfo.h @@ -6,4 +6,18 @@ #include +/* + * SIGFPE si_codes + */ +#ifdef __KERNEL__ +#define FPE_FIXME 0 /* Broken dup of SI_USER */ +#endif /* __KERNEL__ */ + +/* + * SIGTRAP si_codes + */ +#ifdef __KERNEL__ +#define TRAP_FIXME 0 /* Broken dup of SI_USER */ +#endif /* __KERNEL__ */ + #endif diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 7b285dd4fe05..c6133a045352 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -109,4 +109,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 8322df174bbf..564114eb85e1 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -312,8 +312,9 @@ common_init_pci(void) { struct pci_controller *hose; struct list_head resources; + struct pci_host_bridge *bridge; struct pci_bus *bus; - int next_busno; + int ret, next_busno; int need_domain_info = 0; u32 pci_mem_end; u32 sg_base; @@ -336,11 +337,25 @@ common_init_pci(void) pci_add_resource_offset(&resources, hose->mem_space, hose->mem_space->start); - bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops, - hose, &resources); - if (!bus) + bridge = pci_alloc_host_bridge(0); + if (!bridge) continue; - hose->bus = bus; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = next_busno; + bridge->ops = alpha_mv.pci_ops; + bridge->swizzle_irq = alpha_mv.pci_swizzle; + bridge->map_irq = alpha_mv.pci_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + continue; + } + + bus = hose->bus = bridge->bus; hose->need_domain_info = need_domain_info; next_busno = bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - @@ -354,7 +369,6 @@ common_init_pci(void) pcibios_claim_console_setup(); pci_assign_unassigned_resources(); - pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); for (hose = hose_head; hose; hose = hose->next) { bus = hose->bus; if (bus) @@ -362,7 +376,6 @@ common_init_pci(void) } } - struct pci_controller * __init alloc_pci_controller(void) { diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 2cfaa0e5c577..8ae04a121186 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -194,22 +194,46 @@ static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, }; +static struct resource busn_resource = { + .name = "PCI busn", + .start = 0, + .end = 255, + .flags = IORESOURCE_BUS, +}; void __init nautilus_init_pci(void) { struct pci_controller *hose = hose_head; + struct pci_host_bridge *bridge; struct pci_bus *bus; struct pci_dev *irongate; unsigned long bus_align, bus_size, pci_mem; unsigned long memtop = max_low_pfn << PAGE_SHIFT; + int ret; - /* Scan our single hose. */ - bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); - if (!bus) + bridge = pci_alloc_host_bridge(0); + if (!bridge) return; - hose->bus = bus; + pci_add_resource(&bridge->windows, &ioport_resource); + pci_add_resource(&bridge->windows, &iomem_resource); + pci_add_resource(&bridge->windows, &busn_resource); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = 0; + bridge->ops = alpha_mv.pci_ops; + bridge->swizzle_irq = alpha_mv.pci_swizzle; + bridge->map_irq = alpha_mv.pci_map_irq; + + /* Scan our single hose. */ + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + bus = hose->bus = bridge->bus; pcibios_claim_one_bus(bus); irongate = pci_get_bus_and_slot(0, 0); @@ -254,7 +278,6 @@ nautilus_init_pci(void) /* pci_common_swizzle() relies on bus->self being NULL for the root bus, so just clear it. */ bus->self = NULL; - pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); pci_bus_add_devices(bus); } diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index ddb89a18cf26..49d3b1e63ce5 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -280,7 +280,7 @@ do_entIF(unsigned long type, struct pt_regs *regs) case 1: /* bugcheck */ info.si_signo = SIGTRAP; info.si_errno = 0; - info.si_code = __SI_FAULT; + info.si_code = TRAP_FIXME; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); @@ -320,7 +320,7 @@ do_entIF(unsigned long type, struct pt_regs *regs) break; case GEN_ROPRAND: signo = SIGFPE; - code = __SI_FAULT; + code = FPE_FIXME; break; case GEN_DECOVF: @@ -342,7 +342,7 @@ do_entIF(unsigned long type, struct pt_regs *regs) case GEN_SUBRNG7: default: signo = SIGTRAP; - code = __SI_FAULT; + code = TRAP_FIXME; break; } diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S index 89a26f5e89de..f824969e9e77 100644 --- a/arch/alpha/lib/memset.S +++ b/arch/alpha/lib/memset.S @@ -20,7 +20,7 @@ .globl memset .globl __memset .globl ___memset - .globl __memsetw + .globl __memset16 .globl __constant_c_memset .ent ___memset @@ -110,8 +110,8 @@ EXPORT_SYMBOL(___memset) EXPORT_SYMBOL(__constant_c_memset) .align 5 - .ent __memsetw -__memsetw: + .ent __memset16 +__memset16: .prologue 0 inswl $17,0,$1 /* E0 */ @@ -123,8 +123,8 @@ __memsetw: or $1,$4,$17 /* E0 */ br __constant_c_memset /* .. E1 */ - .end __memsetw -EXPORT_SYMBOL(__memsetw) + .end __memset16 +EXPORT_SYMBOL(__memset16) memset = ___memset __memset = ___memset diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 7db85ab00c52..c84e67fdea09 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -24,7 +24,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK - select HAVE_FUTEX_CMPXCHG + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_IOREMAP_PROT select HAVE_KPROBES select HAVE_KRETPROBES @@ -100,6 +100,7 @@ source "arch/arc/plat-tb10x/Kconfig" source "arch/arc/plat-axs10x/Kconfig" #New platform adds here source "arch/arc/plat-eznps/Kconfig" +source "arch/arc/plat-hsdk/Kconfig" endmenu @@ -418,7 +419,7 @@ endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" config LINUX_LINK_BASE - hex "Linux Link Address" + hex "Kernel link address" default "0x80000000" help ARC700 divides the 32 bit phy address space into two equal halves @@ -431,6 +432,14 @@ config LINUX_LINK_BASE If you don't know what the above means, leave this setting alone. This needs to match memory start address specified in Device Tree +config LINUX_RAM_BASE + hex "RAM base address" + default LINUX_LINK_BASE + help + By default Linux is linked at base of RAM. However in some special + cases (such as HSDK), Linux can't be linked at start of DDR, hence + this option. + config HIGHMEM bool "High Memory Support" select ARCH_DISCONTIGMEM_ENABLE diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 3a61cfcc38c0..d37f49d6a27f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -6,8 +6,6 @@ # published by the Free Software Foundation. # -UTS_MACHINE := arc - ifeq ($(CROSS_COMPILE),) ifndef CONFIG_CPU_BIG_ENDIAN CROSS_COMPILE := arc-linux- @@ -111,6 +109,7 @@ core-y += arch/arc/plat-sim/ core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/ core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/ core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/ +core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/ ifdef CONFIG_ARC_PLAT_EZNPS KBUILD_CPPFLAGS += -I$(srctree)/arch/arc/plat-eznps/include diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index a380ffa1a458..fdc266504ada 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi @@ -99,7 +99,7 @@ mb_intc: dw-apb-ictl@0xe0012000 { memory { device_type = "memory"; - /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */ + /* CONFIG_LINUX_RAM_BASE needs to match low mem start */ reg = <0x0 0x80000000 0x0 0x1b000000>; /* (512 - 32) MiB */ }; diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index cc9239ef8d08..4e6e9f57e790 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -24,10 +24,17 @@ cpu_card { ranges = <0x00000000 0x0 0xf0000000 0x10000000>; - core_clk: core_clk { + input_clk: input-clk { #clock-cells = <0>; compatible = "fixed-clock"; - clock-frequency = <90000000>; + clock-frequency = <33333333>; + }; + + core_clk: core-clk@80 { + compatible = "snps,axs10x-arc-pll-clock"; + reg = <0x80 0x10>, <0x100 0x10>; + #clock-cells = <0>; + clocks = <&input_clk>; }; core_intc: archs-intc@cpu { @@ -102,7 +109,7 @@ mb_intc: dw-apb-ictl@0xe0012000 { memory { device_type = "memory"; - /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */ + /* CONFIG_LINUX_RAM_BASE needs to match low mem start */ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */ }; diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 4ebb2170abec..63954a8b0100 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -24,10 +24,17 @@ cpu_card { ranges = <0x00000000 0x0 0xf0000000 0x10000000>; - core_clk: core_clk { + input_clk: input-clk { #clock-cells = <0>; compatible = "fixed-clock"; - clock-frequency = <100000000>; + clock-frequency = <33333333>; + }; + + core_clk: core-clk@80 { + compatible = "snps,axs10x-arc-pll-clock"; + reg = <0x80 0x10>, <0x100 0x10>; + #clock-cells = <0>; + clocks = <&input_clk>; }; core_intc: archs-intc@cpu { @@ -108,7 +115,7 @@ mb_intc: dw-apb-ictl@0xe0012000 { memory { device_type = "memory"; - /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */ + /* CONFIG_LINUX_RAM_BASE needs to match low mem start */ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */ }; diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 0ff7e07edcd4..e114000a84f5 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi @@ -44,7 +44,14 @@ apbclk: apbclk { mmcclk: mmcclk { compatible = "fixed-clock"; - clock-frequency = <50000000>; + /* + * DW sdio controller has external ciu clock divider + * controlled via register in SDIO IP. It divides + * sdio_ref_clk (which comes from CGU) by 16 for + * default. So default mmcclk clock (which comes + * to sdk_in) is 25000000 Hz. + */ + clock-frequency = <25000000>; #clock-cells = <0>; }; @@ -101,7 +108,6 @@ ohci@0x60000 { mmc@0x15000 { compatible = "altr,socfpga-dw-mshc"; reg = < 0x15000 0x400 >; - num-slots = < 1 >; fifo-depth = < 16 >; card-detect-delay = < 200 >; clocks = <&apbclk>, <&mmcclk>; diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts new file mode 100644 index 000000000000..8adde1b492f1 --- /dev/null +++ b/arch/arc/boot/dts/hsdk.dts @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Device Tree for ARC HS Development Kit + */ +/dts-v1/; + +#include +#include + +/ { + model = "snps,hsdk"; + compatible = "snps,hsdk"; + + #address-cells = <1>; + #size-cells = <1>; + + chosen { + bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <0>; + clocks = <&core_clk>; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <1>; + clocks = <&core_clk>; + }; + + cpu@2 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <2>; + clocks = <&core_clk>; + }; + + cpu@3 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <3>; + clocks = <&core_clk>; + }; + }; + + input_clk: input-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <33333333>; + }; + + cpu_intc: cpu-interrupt-controller { + compatible = "snps,archs-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + idu_intc: idu-interrupt-controller { + compatible = "snps,archs-idu-intc"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&cpu_intc>; + }; + + arcpct: pct { + compatible = "snps,archs-pct"; + }; + + /* TIMER0 with interrupt for clockevent */ + timer { + compatible = "snps,arc-timer"; + interrupts = <16>; + interrupt-parent = <&cpu_intc>; + clocks = <&core_clk>; + }; + + /* 64-bit Global Free Running Counter */ + gfrc { + compatible = "snps,archs-timer-gfrc"; + clocks = <&core_clk>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&idu_intc>; + + ranges = <0x00000000 0xf0000000 0x10000000>; + + cgu_rst: reset-controller@8a0 { + compatible = "snps,hsdk-reset"; + #reset-cells = <1>; + reg = <0x8A0 0x4>, <0xFF0 0x4>; + }; + + core_clk: core-clk@0 { + compatible = "snps,hsdk-core-pll-clock"; + reg = <0x00 0x10>, <0x14B8 0x4>; + #clock-cells = <0>; + clocks = <&input_clk>; + }; + + serial: serial@5000 { + compatible = "snps,dw-apb-uart"; + reg = <0x5000 0x100>; + clock-frequency = <33330000>; + interrupts = <6>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + }; + + gmacclk: gmacclk { + compatible = "fixed-clock"; + clock-frequency = <400000000>; + #clock-cells = <0>; + }; + + mmcclk_ciu: mmcclk-ciu { + compatible = "fixed-clock"; + /* + * DW sdio controller has external ciu clock divider + * controlled via register in SDIO IP. Due to its + * unexpected default value (it should devide by 1 + * but it devides by 8) SDIO IP uses wrong clock and + * works unstable (see STAR 9001204800) + * So add temporary fix and change clock frequency + * from 100000000 to 12500000 Hz until we fix dw sdio + * driver itself. + */ + clock-frequency = <12500000>; + #clock-cells = <0>; + }; + + mmcclk_biu: mmcclk-biu { + compatible = "fixed-clock"; + clock-frequency = <400000000>; + #clock-cells = <0>; + }; + + ethernet@8000 { + #interrupt-cells = <1>; + compatible = "snps,dwmac"; + reg = <0x8000 0x2000>; + interrupts = <10>; + interrupt-names = "macirq"; + phy-mode = "rgmii"; + snps,pbl = <32>; + clocks = <&gmacclk>; + clock-names = "stmmaceth"; + phy-handle = <&phy0>; + resets = <&cgu_rst HSDK_ETH_RESET>; + reset-names = "stmmaceth"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = ; + ti,tx-internal-delay = ; + ti,fifo-depth = ; + }; + }; + }; + + ohci@60000 { + compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; + reg = <0x60000 0x100>; + interrupts = <15>; + }; + + ehci@40000 { + compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; + reg = <0x40000 0x100>; + interrupts = <15>; + }; + + mmc@a000 { + compatible = "altr,socfpga-dw-mshc"; + reg = <0xa000 0x400>; + num-slots = <1>; + fifo-depth = <16>; + card-detect-delay = <200>; + clocks = <&mmcclk_biu>, <&mmcclk_ciu>; + clock-names = "biu", "ciu"; + interrupts = <12>; + bus-width = <4>; + }; + }; + + memory@80000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "memory"; + reg = <0x80000000 0x40000000>; /* 1 GiB */ + }; +}; diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts index 3772c40c245e..8d787b251f73 100644 --- a/arch/arc/boot/dts/nsim_hs.dts +++ b/arch/arc/boot/dts/nsim_hs.dts @@ -18,7 +18,7 @@ / { memory { device_type = "memory"; - /* CONFIG_LINUX_LINK_BASE needs to match low mem start */ + /* CONFIG_LINUX_RAM_BASE needs to match low mem start */ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */ 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ }; diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi index 459fc656b759..48bb4b4cd234 100644 --- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi +++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi @@ -104,7 +104,6 @@ ps2: ps2@e0017400 { mmc@0x15000 { compatible = "snps,dw-mshc"; reg = <0x15000 0x400>; - num-slots = <1>; fifo-depth = <1024>; card-detect-delay = <200>; clocks = <&apbclk>, <&mmcclk>; diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 6980b966a364..ec7c849a5c8e 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 2233f5777a71..63d3cf69e0b0 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 30a3d4cf53d2..f613ecac14a7 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 821a2e562f3f..3507be2af6fe 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -84,5 +84,5 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig new file mode 100644 index 000000000000..15f0f6b5fec1 --- /dev/null +++ b/arch/arc/configs/hsdk_defconfig @@ -0,0 +1,81 @@ +CONFIG_DEFAULT_HOSTNAME="ARCLinux" +CONFIG_SYSVIPC=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" +CONFIG_EMBEDDED=y +CONFIG_PERF_EVENTS=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_ARC_SOC_HSDK=y +CONFIG_ISA_ARCV2=y +CONFIG_SMP=y +CONFIG_LINUX_LINK_BASE=0x90000000 +CONFIG_LINUX_RAM_BASE=0x80000000 +CONFIG_ARC_BUILTIN_DTB_NAME="hsdk" +CONFIG_PREEMPT=y +# CONFIG_COMPACTION is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_DEVTMPFS=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +CONFIG_STMMAC_ETH=y +CONFIG_MICREL_PHY=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_UDL=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_DW=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_RESET_HSDK=y +CONFIG_EXT3_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_STRIP_ASM_SYMS=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index c0d6a010751a..4fcf4f2503f6 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_SHIRQ=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 5c0971787acf..7b71464f6c2f 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_SHIRQ=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index ba8e802dba80..b1c56d35f2a9 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -98,6 +98,7 @@ /* Auxiliary registers */ #define AUX_IDENTITY 4 +#define AUX_EXEC_CTRL 8 #define AUX_INTR_VEC_BASE 0x25 #define AUX_VOL 0x5e @@ -135,12 +136,12 @@ struct bcr_identity { #endif }; -struct bcr_isa { +struct bcr_isa_arcv2 { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, - pad1:11, atomic1:1, ver:8; + pad1:12, ver:8; #else - unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1, + unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1, ldd:1, pad2:4, div_rem:4; #endif }; @@ -263,13 +264,13 @@ struct cpuinfo_arc { struct cpuinfo_arc_mmu mmu; struct cpuinfo_arc_bpu bpu; struct bcr_identity core; - struct bcr_isa isa; + struct bcr_isa_arcv2 isa; const char *details, *name; unsigned int vec_base; struct cpuinfo_arc_ccm iccm, dccm; struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, - fpu_sp:1, fpu_dp:1, pad2:6, + fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4, debug:1, ap:1, smart:1, rtt:1, pad3:4, timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 02fd1cece6ef..8486f328cc5d 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -47,7 +47,8 @@ : "r"(data), "r"(ptr)); \ }) -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +/* Largest line length for either L1 or L2 is 128 bytes */ +#define ARCH_DMA_MINALIGN 128 extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); @@ -95,6 +96,8 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_SLC_CTRL 0x903 #define ARC_REG_SLC_FLUSH 0x904 #define ARC_REG_SLC_INVALIDATE 0x905 +#define ARC_AUX_SLC_IVDL 0x910 +#define ARC_AUX_SLC_FLDL 0x912 #define ARC_REG_SLC_RGN_START 0x914 #define ARC_REG_SLC_RGN_START1 0x915 #define ARC_REG_SLC_RGN_END 0x916 diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 14c310f2e0b1..ec36d5b6d435 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -192,6 +192,12 @@ PUSHAX lp_start PUSHAX erbta +#ifdef CONFIG_ARC_PLAT_EZNPS + .word CTOP_INST_SCHD_RW + PUSHAX CTOP_AUX_GPA1 + PUSHAX CTOP_AUX_EFLAGS +#endif + lr r9, [ecr] st r9, [sp, PT_event] /* EV_Trap expects r9 to have ECR */ .endm @@ -208,6 +214,12 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro EXCEPTION_EPILOGUE +#ifdef CONFIG_ARC_PLAT_EZNPS + .word CTOP_INST_SCHD_RW + POPAX CTOP_AUX_EFLAGS + POPAX CTOP_AUX_GPA1 +#endif + POPAX erbta POPAX lp_start POPAX lp_end @@ -265,6 +277,12 @@ PUSHAX lp_end PUSHAX lp_start PUSHAX bta_l\LVL\() + +#ifdef CONFIG_ARC_PLAT_EZNPS + .word CTOP_INST_SCHD_RW + PUSHAX CTOP_AUX_GPA1 + PUSHAX CTOP_AUX_EFLAGS +#endif .endm /*-------------------------------------------------------------- @@ -277,6 +295,12 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro INTERRUPT_EPILOGUE LVL +#ifdef CONFIG_ARC_PLAT_EZNPS + .word CTOP_INST_SCHD_RW + POPAX CTOP_AUX_EFLAGS + POPAX CTOP_AUX_GPA1 +#endif + POPAX bta_l\LVL\() POPAX lp_start POPAX lp_end diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index a64c447b0337..8a4f77ea3238 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -47,9 +47,6 @@ #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ (ARCV2_IRQ_DEF_PRIO << 1)) -/* SLEEP needs default irq priority (<=) which can interrupt the doze */ -#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO) - #ifndef __ASSEMBLY__ /* diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index 4c6eed80cd8b..fcb80171fc34 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h @@ -43,8 +43,6 @@ #define ISA_INIT_STATUS_BITS STATUS_IE_MASK -#define ISA_SLEEP_ARG 0x3 - #ifndef __ASSEMBLY__ /****************************************************************** diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 296c3426a6ad..109baa06831c 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -85,7 +85,7 @@ typedef pte_t * pgtable_t; */ #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) +#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE) #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 4104a0839214..8ee41e988169 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -27,6 +27,13 @@ struct arc_fpu { }; #endif +#ifdef CONFIG_ARC_PLAT_EZNPS +struct eznps_dp { + unsigned int eflags; + unsigned int gpa1; +}; +#endif + /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in * struct thread_info @@ -38,6 +45,9 @@ struct thread_struct { #ifdef CONFIG_ARC_FPU_SAVE_RESTORE struct arc_fpu fpu; #endif +#ifdef CONFIG_ARC_PLAT_EZNPS + struct eznps_dp dp; +#endif }; #define INIT_THREAD { \ @@ -68,9 +78,6 @@ struct task_struct; #endif -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 5297faa8a378..5a8cb22724a1 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -19,6 +19,11 @@ #ifdef CONFIG_ISA_ARCOMPACT struct pt_regs { +#ifdef CONFIG_ARC_PLAT_EZNPS + unsigned long eflags; /* Extended FLAGS */ + unsigned long gpa1; /* General Purpose Aux */ +#endif + /* Real registers */ unsigned long bta; /* bta_l1, bta_l2, erbta */ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index a325e6a36523..47efc8451b70 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -247,9 +247,15 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" +#ifdef CONFIG_EZNPS_MTM_EXT + " .word %3 \n" +#endif " breq %0, %2, 1b \n" : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) +#ifdef CONFIG_EZNPS_MTM_EXT + , "i"(CTOP_INST_SCHD_RW) +#endif : "memory"); /* @@ -291,6 +297,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) */ smp_mb(); + /* + * EX is not really required here, a simple STore of 0 suffices. + * However this causes tasklist livelocks in SystemC based SMP virtual + * platforms where the systemc core scheduler uses EX as a cue for + * moving to next core. Do a git log of this file for details + */ __asm__ __volatile__( " ex %0, [%1] \n" : "+r" (val) diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h index 1b171ab5fec0..f7d07feeea61 100644 --- a/arch/arc/include/asm/switch_to.h +++ b/arch/arc/include/asm/switch_to.h @@ -26,10 +26,19 @@ extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); #endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */ +#ifdef CONFIG_ARC_PLAT_EZNPS +extern void dp_save_restore(struct task_struct *p, struct task_struct *n); +#define ARC_EZNPS_DP_PREV(p, n) dp_save_restore(p, n) +#else +#define ARC_EZNPS_DP_PREV(p, n) + +#endif /* !CONFIG_ARC_PLAT_EZNPS */ + struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); #define switch_to(prev, next, last) \ do { \ + ARC_EZNPS_DP_PREV(prev, next); \ ARC_FPU_PREV(prev, next); \ last = __switch_to(prev, next);\ ARC_FPU_NEXT(next); \ diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index 8942c5c3b4c5..2dc5f4296d44 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile @@ -12,7 +12,6 @@ obj-y := arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o -obj-$(CONFIG_PCI) += pcibios.o obj-$(CONFIG_MODULES) += arcksyms.o module.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index 3b67f538f142..521ef3521a1c 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c @@ -29,8 +29,9 @@ static void __init arc_set_early_base_baud(unsigned long dt_root) { if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x")) arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ - else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) - arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ + else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp") || + of_flat_dt_is_compatible(dt_root, "snps,hsdk")) + arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x & HSDK) */ else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps")) arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */ else diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 9211707634dc..f285dbb28066 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S @@ -25,12 +25,12 @@ * * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well - * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't + * -Wrappers for sys_{,rt_}sigsuspend() no longer needed as they don't * need ptregs anymore * * Vineetg: Oct 2009 * -In a rare scenario, Process gets a Priv-V exception and gets scheduled - * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains + * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains * active (AE bit enabled). This causes a double fault for a subseq valid * exception. Thus FAKE RTIE needed in low level Priv-Violation handler. * Instr Error could also cause similar scenario, so same there as well. @@ -59,7 +59,7 @@ */ #include -#include /* {EXTRY,EXIT} */ +#include /* {ENTRY,EXIT} */ #include #include @@ -80,8 +80,8 @@ .align 4 /* Each entry in the vector table must occupy 2 words. Since it is a jump - * across sections (.vector to .text) we are gauranteed that 'j somewhere' - * will use the 'j limm' form of the intrsuction as long as somewhere is in + * across sections (.vector to .text) we are guaranteed that 'j somewhere' + * will use the 'j limm' form of the instruction as long as somewhere is in * a section other than .vector. */ @@ -105,13 +105,13 @@ VECTOR handle_interrupt_level1 ; Other devices ; ******************** Exceptions ********************** VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20) -VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21) +VECTOR EV_TLBMissI ; 0x108, Instruction TLB miss (0x21) VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22) VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23) ; or Misaligned Access VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24) VECTOR EV_Trap ; 0x128, Trap exception (0x25) -VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26) +VECTOR EV_Extension ; 0x130, Extn Instruction Excp (0x26) .rept 24 VECTOR reserved ; Reserved Exceptions @@ -199,7 +199,7 @@ END(handle_interrupt_level2) ; --------------------------------------------- ; User Mode Memory Bus Error Interrupt Handler -; (Kernel mode memory errors handled via seperate exception vectors) +; (Kernel mode memory errors handled via separate exception vectors) ; --------------------------------------------- ENTRY(mem_service) @@ -273,7 +273,7 @@ ENTRY(EV_TLBProtV) ;------ (5) Type of Protection Violation? ---------- ; ; ProtV Hardware Exception is triggered for Access Faults of 2 types - ; -Access Violaton : 00_23_(00|01|02|03)_00 + ; -Access Violation : 00_23_(00|01|02|03)_00 ; x r w r+w ; -Unaligned Access : 00_23_04_00 ; @@ -327,7 +327,7 @@ END(call_do_page_fault) .Lrestore_regs: - # Interrpts are actually disabled from this point on, but will get + # Interrupts are actually disabled from this point on, but will get # reenabled after we return from interrupt/exception. # But irq tracer needs to be told now... TRACE_ASM_IRQ_ENABLE @@ -335,7 +335,7 @@ END(call_do_page_fault) lr r10, [status32] ; Restore REG File. In case multiple Events outstanding, - ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None + ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None ; Note that we use realtime STATUS32 (not pt_regs->status32) to ; decide that. diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 1eea99beecc3..85d9ea4a0acc 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -92,6 +92,12 @@ ENTRY(EV_MachineCheck) lr r0, [efa] mov r1, sp + ; hardware auto-disables MMU, re-enable it to allow kernel vaddr + ; access for say stack unwinding of modules for crash dumps + lr r3, [ARC_REG_PID] + or r3, r3, MMU_ENABLE + sr r3, [ARC_REG_PID] + lsr r3, r2, 8 bmsk r3, r3, 7 brne r3, ECR_C_MCHK_DUP_TLB, 1f diff --git a/arch/arc/kernel/pcibios.c b/arch/arc/kernel/pcibios.c deleted file mode 100644 index 72e1d73d0bd6..000000000000 --- a/arch/arc/kernel/pcibios.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include - -/* - * We don't have to worry about legacy ISA devices, so nothing to do here - */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} - -void pcibios_fixup_bus(struct pci_bus *bus) -{ -} diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 2a018de6d6cd..5ac3b547453f 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -79,15 +79,40 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) return uval; } +#ifdef CONFIG_ISA_ARCV2 + void arch_cpu_idle(void) { - /* sleep, but enable all interrupts before committing */ + /* Re-enable interrupts <= default irq priority before commiting SLEEP */ + const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO; + __asm__ __volatile__( "sleep %0 \n" : - :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */ + :"I"(arg)); /* can't be "r" has to be embedded const */ } +#elif defined(CONFIG_EZNPS_MTM_EXT) /* ARC700 variant in NPS */ + +void arch_cpu_idle(void) +{ + /* only the calling HW thread needs to sleep */ + __asm__ __volatile__( + ".word %0 \n" + : + :"i"(CTOP_INST_HWSCHD_WFT_IE12)); +} + +#else /* ARC700 */ + +void arch_cpu_idle(void) +{ + /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */ + __asm__ __volatile__("sleep 0x3 \n"); +} + +#endif + asmlinkage void ret_from_fork(void); /* @@ -209,6 +234,10 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp) */ regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS; +#ifdef CONFIG_EZNPS_MTM_EXT + regs->eflags = 0; +#endif + /* bogus seed values for debugging */ regs->lp_start = 0x10; regs->lp_end = 0x80; diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 666613fde91d..fb83844daeea 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = { { 0x51, "R2.0" }, { 0x52, "R2.1" }, { 0x53, "R3.0" }, + { 0x54, "R4.0" }, #endif { 0x00, NULL } }; @@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = { #else { 0x40, "ARC EM" }, { 0x50, "ARC HS38" }, + { 0x54, "ARC HS48" }, #endif { 0x00, "Unknown" } }; @@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void) struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; const struct id_to_str *tbl; + struct bcr_isa_arcv2 isa; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); - READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { if (cpu->core.family == tbl->id) { @@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void) } for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { - if ((cpu->core.family & 0xF0) == tbl->id) + if ((cpu->core.family & 0xF4) == tbl->id) break; } cpu->name = tbl->str; @@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.full = bpu.ft; cpu->bpu.num_cache = 256 << bpu.bce; cpu->bpu.num_pred = 2048 << bpu.pte; + + if (cpu->core.family >= 0x54) { + unsigned int exec_ctrl; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + cpu->extn.dual_iss_exist = 1; + cpu->extn.dual_iss_enb = exec_ctrl & 1; + } } READ_BCR(ARC_REG_AP_BCR, bcr); @@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void) cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; + READ_BCR(ARC_REG_ISA_CFG_BCR, isa); + /* some hacks for lack of feature BCR info in old ARC700 cores */ if (is_isa_arcompact()) { - if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ + if (!isa.ver) /* ISA BCR absent, use Kconfig info */ cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); - else - cpu->isa.atomic = cpu->isa.atomic1; + else { + /* ARC700_BUILD only has 2 bits of isa info */ + struct bcr_generic bcr = *(struct bcr_generic *)&isa; + cpu->isa.atomic = bcr.info & 1; + } cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); /* there's no direct way to distinguish 750 vs. 770 */ if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) cpu->name = "ARC750"; + } else { + cpu->isa = isa; } } @@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", core->family, core->cpu_id, core->chip_id); - n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n", + n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", cpu_id, cpu->name, cpu->details, is_isa_arcompact() ? "ARCompact" : "ARCv2", - IS_AVAIL1(cpu->isa.be, "[Big-Endian]")); + IS_AVAIL1(cpu->isa.be, "[Big-Endian]"), + IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue")); n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", IS_AVAIL1(cpu->extn.timer0, "Timer0 "), @@ -385,13 +403,13 @@ void setup_processor(void) read_arc_build_cfg_regs(); arc_init_IRQ(); - printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str))); + pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str))); arc_mmu_init(); arc_cache_init(); - printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); - printk(arc_platform_smp_cpuinfo()); + pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); + pr_info("%s", arc_platform_smp_cpuinfo()); arc_chk_core_config(); } @@ -510,7 +528,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) goto done; } - str = (char *)__get_free_page(GFP_TEMPORARY); + str = (char *)__get_free_page(GFP_KERNEL); if (!str) goto done; diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index ff83e78d0cfb..bcd7c9fc5d0f 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -80,7 +80,7 @@ int name(unsigned long address, struct pt_regs *regs) \ DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC) DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC) DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC) -DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR) +DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR) DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) @@ -103,7 +103,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs, */ void do_machine_check_fault(unsigned long address, struct pt_regs *regs) { - die("Machine Check Exception", regs, address); + die("Unhandled Machine Check Exception", regs, address); } diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index f9caf79186d4..7d8c1d6c2f60 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -140,7 +140,7 @@ static void show_ecr_verbose(struct pt_regs *regs) } else if (vec == ECR_V_ITLB_MISS) { pr_cont("Insn could not be fetched\n"); } else if (vec == ECR_V_MACH_CHK) { - pr_cont("%s\n", (cause_code == 0x0) ? + pr_cont("Machine Check (%s)\n", (cause_code == 0x0) ? "Double Fault" : "Other Fatal Err"); } else if (vec == ECR_V_PROTV) { @@ -178,7 +178,7 @@ void show_regs(struct pt_regs *regs) struct callee_regs *cregs; char *buf; - buf = (char *)__get_free_page(GFP_TEMPORARY); + buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return; @@ -233,6 +233,9 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs, { current->thread.fault_address = address; + /* Show fault description */ + pr_info("\n%s\n", str); + /* Caller and Callee regs */ show_regs(regs); diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 7db283b46ebd..eee924dfffa6 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -652,7 +652,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, #endif /* CONFIG_ARC_HAS_ICACHE */ -noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) +noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 /* @@ -715,6 +715,58 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) #endif } +noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) +{ +#ifdef CONFIG_ISA_ARCV2 + /* + * SLC is shared between all cores and concurrent aux operations from + * multiple cores need to be serialized using a spinlock + * A concurrent operation can be silently ignored and/or the old/new + * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop + * below) + */ + static DEFINE_SPINLOCK(lock); + + const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); + unsigned int ctrl, cmd; + unsigned long flags; + int num_lines; + + spin_lock_irqsave(&lock, flags); + + ctrl = read_aux_reg(ARC_REG_SLC_CTRL); + + /* Don't rely on default value of IM bit */ + if (!(op & OP_FLUSH)) /* i.e. OP_INV */ + ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ + else + ctrl |= SLC_CTRL_IM; + + write_aux_reg(ARC_REG_SLC_CTRL, ctrl); + + cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL; + + sz += paddr & ~SLC_LINE_MASK; + paddr &= SLC_LINE_MASK; + + num_lines = DIV_ROUND_UP(sz, l2_line_sz); + + while (num_lines-- > 0) { + write_aux_reg(cmd, paddr); + paddr += l2_line_sz; + } + + /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ + read_aux_reg(ARC_REG_SLC_CTRL); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + + spin_unlock_irqrestore(&lock, flags); +#endif +} + +#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op) + noinline static void slc_entire_op(const int op) { unsigned int ctrl, r = ARC_REG_SLC_CTRL; @@ -1095,7 +1147,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) */ noinline void __init arc_ioc_setup(void) { - unsigned int ap_sz; + unsigned int ioc_base, mem_sz; /* Flush + invalidate + disable L1 dcache */ __dc_disable(); @@ -1104,18 +1156,29 @@ noinline void __init arc_ioc_setup(void) if (read_aux_reg(ARC_REG_SLC_BCR)) slc_entire_op(OP_FLUSH_N_INV); - /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */ - write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); - /* - * IOC Aperture size: - * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M + * currently IOC Aperture covers entire DDR * TBD: fix for PGU + 1GB of low mem * TBD: fix for PAE */ - ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2; - write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz); + mem_sz = arc_get_mem_sz(); + if (!is_power_of_2(mem_sz) || mem_sz < 4096) + panic("IOC Aperture size must be power of 2 larger than 4KB"); + + /* + * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, + * so setting 0x11 implies 512MB, 0x12 implies 1GB... + */ + write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); + + /* for now assume kernel base is start of IOC aperture */ + ioc_base = CONFIG_LINUX_RAM_BASE; + + if (ioc_base % mem_sz != 0) + panic("IOC Aperture start must be aligned to the size of the aperture"); + + write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); @@ -1207,7 +1270,7 @@ void __ref arc_cache_init(void) unsigned int __maybe_unused cpu = smp_processor_id(); char str[256]; - printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str))); if (!cpu) arc_cache_init_master(); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 162c97528872..a0b7bd6d030d 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -207,7 +207,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source - * when it acesses user-memory. When it fails in one + * when it accesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 8c9415ed6280..ba145065c579 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -26,7 +26,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE); EXPORT_SYMBOL(empty_zero_page); -static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE; +static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE; static unsigned long low_mem_sz; #ifdef CONFIG_HIGHMEM @@ -63,7 +63,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) if (!low_mem_sz) { if (base != low_mem_start) - panic("CONFIG_LINUX_LINK_BASE != DT memory { }"); + panic("CONFIG_LINUX_RAM_BASE != DT memory { }"); low_mem_sz = size; in_use = 1; @@ -161,7 +161,7 @@ void __init setup_arch_memory(void) * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong * when our kernel doesn't start at PAGE_OFFSET, i.e. - * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE + * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE */ free_area_init_node(0, /* node-id */ zones_size, /* num pages per zone */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index b181f3ee38aa..8ceefbf72fb0 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -821,7 +821,7 @@ void arc_mmu_init(void) char str[256]; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); + pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); /* * Can't be done in processor.h due to header include depenedencies @@ -908,9 +908,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, local_irq_save(flags); - /* re-enable the MMU */ - write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); - /* loop thru all sets of TLB */ for (set = 0; set < mmu->sets; set++) { diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index b30e4e36bb00..0e1e47a67c73 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -274,6 +274,13 @@ ex_saved_reg1: .macro COMMIT_ENTRY_TO_MMU #if (CONFIG_ARC_MMU_VER < 4) +#ifdef CONFIG_EZNPS_MTM_EXT + /* verify if entry for this vaddr+ASID already exists */ + sr TLBProbe, [ARC_REG_TLBCOMMAND] + lr r0, [ARC_REG_TLBINDEX] + bbit0 r0, 31, 88f +#endif + /* Get free TLB slot: Set = computed from vaddr, way = random */ sr TLBGetIndex, [ARC_REG_TLBCOMMAND] @@ -287,6 +294,8 @@ ex_saved_reg1: #else sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] #endif + +88: .endm diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 38ff349d7f2a..cf14ebc36916 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -80,22 +80,6 @@ static void __init axs10x_enable_gpio_intc_wire(void) iowrite32(1 << MB_TO_GPIO_IRQ, (void __iomem *) GPIO_INTEN); } -static inline void __init -write_cgu_reg(uint32_t value, void __iomem *reg, void __iomem *lock_reg) -{ - unsigned int loops = 128 * 1024, ctr; - - iowrite32(value, reg); - - ctr = loops; - while (((ioread32(lock_reg) & 1) == 1) && ctr--) /* wait for unlock */ - cpu_relax(); - - ctr = loops; - while (((ioread32(lock_reg) & 1) == 0) && ctr--) /* wait for re-lock */ - cpu_relax(); -} - static void __init axs10x_print_board_ver(unsigned int creg, const char *str) { union ver { @@ -127,6 +111,13 @@ static void __init axs10x_early_init(void) axs10x_enable_gpio_intc_wire(); + /* + * Reset ethernet IP core. + * TODO: get rid of this quirk after axs10x reset driver (or simple + * reset driver) will be available in upstream. + */ + iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET); + scnprintf(mb, 32, "MainBoard v%d", mb_rev); axs10x_print_board_ver(CREG_MB_VER, mb); } @@ -314,7 +305,6 @@ static void __init axs101_early_init(void) #ifdef CONFIG_AXS103 -#define AXC003_CGU 0xF0000000 #define AXC003_CREG 0xF0001000 #define AXC003_MST_AXI_TUNNEL 0 #define AXC003_MST_HS38 1 @@ -324,131 +314,38 @@ static void __init axs101_early_init(void) #define CREG_CPU_TUN_IO_CTRL (AXC003_CREG + 0x494) -union pll_reg { - struct { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:17, noupd:1, bypass:1, edge:1, high:6, low:6; -#else - unsigned int low:6, high:6, edge:1, bypass:1, noupd:1, pad:17; -#endif - }; - unsigned int val; -}; - -static unsigned int __init axs103_get_freq(void) -{ - union pll_reg idiv, fbdiv, odiv; - unsigned int f = 33333333; - - idiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 0); - fbdiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 4); - odiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 8); - - if (idiv.bypass != 1) - f = f / (idiv.low + idiv.high); - - if (fbdiv.bypass != 1) - f = f * (fbdiv.low + fbdiv.high); - - if (odiv.bypass != 1) - f = f / (odiv.low + odiv.high); - - f = (f + 500000) / 1000000; /* Rounding */ - return f; -} - -static inline unsigned int __init encode_div(unsigned int id, int upd) -{ - union pll_reg div; - - div.val = 0; - - div.noupd = !upd; - div.bypass = id == 1 ? 1 : 0; - div.edge = (id%2 == 0) ? 0 : 1; /* 0 = rising */ - div.low = (id%2 == 0) ? id >> 1 : (id >> 1)+1; - div.high = id >> 1; - - return div.val; -} - -noinline static void __init -axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od) -{ - write_cgu_reg(encode_div(id, 0), - (void __iomem *)AXC003_CGU + 0x80 + 0, - (void __iomem *)AXC003_CGU + 0x110); - - write_cgu_reg(encode_div(fd, 0), - (void __iomem *)AXC003_CGU + 0x80 + 4, - (void __iomem *)AXC003_CGU + 0x110); - - write_cgu_reg(encode_div(od, 1), - (void __iomem *)AXC003_CGU + 0x80 + 8, - (void __iomem *)AXC003_CGU + 0x110); -} - static void __init axs103_early_init(void) { - int offset = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); - const struct fdt_property *prop = fdt_get_property(initial_boot_params, - offset, - "clock-frequency", - NULL); - u32 freq = be32_to_cpu(*(u32*)(prop->data)) / 1000000, orig = freq; - +#ifdef CONFIG_ARC_MCIP /* * AXS103 configurations for SMP/QUAD configurations share device tree - * which defaults to 90 MHz. However recent failures of Quad config + * which defaults to 100 MHz. However recent failures of Quad config * revealed P&R timing violations so clamp it down to safe 50 MHz * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack - * - * This hack is really hacky as of now. Fix it properly by getting the - * number of cores as return value of platform's early SMP callback + * of fudging the freq in DT */ -#ifdef CONFIG_ARC_MCIP unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; - if (num_cores > 2) - freq = 50; -#endif - - switch (freq) { - case 33: - axs103_set_freq(1, 1, 1); - break; - case 50: - axs103_set_freq(1, 30, 20); - break; - case 75: - axs103_set_freq(2, 45, 10); - break; - case 90: - axs103_set_freq(2, 54, 10); - break; - case 100: - axs103_set_freq(1, 30, 10); - break; - case 125: - axs103_set_freq(2, 45, 6); - break; - default: + if (num_cores > 2) { + u32 freq = 50, orig; /* - * In this case, core_frequency derived from - * DT "clock-frequency" might not match with board value. - * Hence update it to match the board value. + * TODO: use cpu node "cpu-freq" param instead of platform-specific + * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu. */ - freq = axs103_get_freq(); - break; - } + int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); + const struct fdt_property *prop; - pr_info("Freq is %dMHz\n", freq); + prop = fdt_get_property(initial_boot_params, off, + "clock-frequency", NULL); + orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000; - /* Patching .dtb in-place with new core clock value */ - if (freq != orig ) { - freq = cpu_to_be32(freq * 1000000); - fdt_setprop_inplace(initial_boot_params, offset, - "clock-frequency", &freq, sizeof(freq)); + /* Patching .dtb in-place with new core clock value */ + if (freq != orig ) { + freq = cpu_to_be32(freq * 1000000); + fdt_setprop_inplace(initial_boot_params, off, + "clock-frequency", &freq, sizeof(freq)); + } } +#endif /* Memory maps already config in pre-bootloader */ diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index 1595a38e50cd..e151e2067886 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -12,8 +12,8 @@ menuconfig ARC_PLAT_EZNPS help Support for EZchip development platforms, based on ARC700 cores. - We handle few flavours: - - Hardware Emulator AKA HE which is FPGA based chasis + We handle few flavors: + - Hardware Emulator AKA HE which is FPGA based chassis - Simulator based on MetaWare nSIM - NPS400 chip based on ASIC @@ -32,3 +32,25 @@ config EZNPS_MTM_EXT any of them seem like CPU from Linux point of view. All threads within same core share the execution unit of the core and HW scheduler round robin between them. + +config EZNPS_MEM_ERROR_ALIGN + bool "ARC-EZchip Memory error as an exception" + depends on EZNPS_MTM_EXT + default n + help + On the real chip of the NPS, user memory errors are handled + as a machine check exception, which is fatal, whereas on + simulator platform for NPS, is handled as a Level 2 interrupt + (just a stock ARC700) which is recoverable. This option makes + simulator behave like hardware. + +config EZNPS_SHARED_AUX_REGS + bool "ARC-EZchip Shared Auxiliary Registers Per Core" + depends on ARC_PLAT_EZNPS + default y + help + On the real chip of the NPS, auxiliary registers are shared between + all the cpus of the core, whereas on simulator platform for NPS, + each cpu has a different set of auxiliary registers. Configuration + should be unset if auxiliary registers are not shared between the cpus + of the core, so there will be a need to initialize them per cpu. diff --git a/arch/arc/plat-eznps/Makefile b/arch/arc/plat-eznps/Makefile index 21091b199df0..8d4371706b2f 100644 --- a/arch/arc/plat-eznps/Makefile +++ b/arch/arc/plat-eznps/Makefile @@ -2,6 +2,6 @@ # Makefile for the linux kernel. # -obj-y := entry.o platform.o +obj-y := entry.o platform.o ctop.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_EZNPS_MTM_EXT) += mtm.o diff --git a/arch/arc/plat-eznps/ctop.c b/arch/arc/plat-eznps/ctop.c new file mode 100644 index 000000000000..030bcd070a1b --- /dev/null +++ b/arch/arc/plat-eznps/ctop.c @@ -0,0 +1,32 @@ +/* + * Copyright(c) 2015 EZchip Technologies. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#include +#include +#include + +void dp_save_restore(struct task_struct *prev, struct task_struct *next) +{ + struct eznps_dp *prev_task_dp = &prev->thread.dp; + struct eznps_dp *next_task_dp = &next->thread.dp; + + /* Here we save all Data Plane related auxiliary registers */ + prev_task_dp->eflags = read_aux_reg(CTOP_AUX_EFLAGS); + write_aux_reg(CTOP_AUX_EFLAGS, next_task_dp->eflags); + + prev_task_dp->gpa1 = read_aux_reg(CTOP_AUX_GPA1); + write_aux_reg(CTOP_AUX_GPA1, next_task_dp->gpa1); +} diff --git a/arch/arc/plat-eznps/entry.S b/arch/arc/plat-eznps/entry.S index 328261c27cda..091c92c32ab6 100644 --- a/arch/arc/plat-eznps/entry.S +++ b/arch/arc/plat-eznps/entry.S @@ -27,7 +27,7 @@ .align 1024 ; HW requierment for restart first PC ENTRY(res_service) -#ifdef CONFIG_EZNPS_MTM_EXT +#if defined(CONFIG_EZNPS_MTM_EXT) && defined(CONFIG_EZNPS_SHARED_AUX_REGS) ; There is no work for HW thread id != 0 lr r3, [CTOP_AUX_THREAD_ID] cmp r3, 0 diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index ee2e32df5e90..0c7d11022d0f 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -39,6 +39,7 @@ #define CTOP_AUX_LOGIC_CORE_ID (CTOP_AUX_BASE + 0x018) #define CTOP_AUX_MT_CTRL (CTOP_AUX_BASE + 0x020) #define CTOP_AUX_HW_COMPLY (CTOP_AUX_BASE + 0x024) +#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) #define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) @@ -46,6 +47,7 @@ #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) /* EZchip core instructions */ +#define CTOP_INST_HWSCHD_WFT_IE12 0x3E6F7344 #define CTOP_INST_HWSCHD_OFF_R4 0x3C6F00BF #define CTOP_INST_HWSCHD_RESTORE_R4 0x3E6F7103 #define CTOP_INST_SCHD_RW 0x3E6F7004 diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c index aaaaffd3d940..2388de3d09ef 100644 --- a/arch/arc/plat-eznps/mtm.c +++ b/arch/arc/plat-eznps/mtm.c @@ -21,10 +21,22 @@ #include #include -#define MT_CTRL_HS_CNT 0xFF +#define MT_HS_CNT_MIN 0x01 +#define MT_HS_CNT_MAX 0xFF #define MT_CTRL_ST_CNT 0xF #define NPS_NUM_HW_THREADS 0x10 +static int mtm_hs_ctr = MT_HS_CNT_MAX; + +#ifdef CONFIG_EZNPS_MEM_ERROR_ALIGN +int do_memory_error(unsigned long address, struct pt_regs *regs) +{ + die("Invalid Mem Access", regs, address); + + return 1; +} +#endif + static void mtm_init_nat(int cpu) { struct nps_host_reg_mtm_cfg mtm_cfg; @@ -98,6 +110,18 @@ void mtm_enable_core(unsigned int cpu) int i; struct nps_host_reg_aux_mt_ctrl mt_ctrl; struct nps_host_reg_mtm_cfg mtm_cfg; + struct nps_host_reg_aux_dpc dpc; + + /* + * Initializing dpc register in each CPU. + * Overwriting the init value of the DPC + * register so that CMEM and FMT virtual address + * spaces are accessible, and Data Plane HW + * facilities are enabled. + */ + dpc.ien = 1; + dpc.men = 1; + write_aux_reg(CTOP_AUX_DPC, dpc.value); if (NPS_CPU_TO_THREAD_NUM(cpu) != 0) return; @@ -118,9 +142,7 @@ void mtm_enable_core(unsigned int cpu) /* Enable HW schedule, stall counter, mtm */ mt_ctrl.value = 0; mt_ctrl.hsen = 1; - mt_ctrl.hs_cnt = MT_CTRL_HS_CNT; - mt_ctrl.sten = 1; - mt_ctrl.st_cnt = MT_CTRL_ST_CNT; + mt_ctrl.hs_cnt = mtm_hs_ctr; mt_ctrl.mten = 1; write_aux_reg(CTOP_AUX_MT_CTRL, mt_ctrl.value); @@ -131,3 +153,23 @@ void mtm_enable_core(unsigned int cpu) */ cpu_relax(); } + +/* Verify and set the value of the mtm hs counter */ +static int __init set_mtm_hs_ctr(char *ctr_str) +{ + long hs_ctr; + int ret; + + ret = kstrtol(ctr_str, 0, &hs_ctr); + + if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { + pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", + hs_ctr, MT_HS_CNT_MIN, MT_HS_CNT_MAX); + return -EINVAL; + } + + mtm_hs_ctr = hs_ctr; + + return 0; +} +early_param("nps_mtm_hs_ctr", set_mtm_hs_ctr); diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig new file mode 100644 index 000000000000..bd08de4be75e --- /dev/null +++ b/arch/arc/plat-hsdk/Kconfig @@ -0,0 +1,10 @@ +# Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# + +menuconfig ARC_SOC_HSDK + bool "ARC HS Development Kit SOC" + select CLK_HSDK diff --git a/arch/arc/plat-hsdk/Makefile b/arch/arc/plat-hsdk/Makefile new file mode 100644 index 000000000000..9a50c511a672 --- /dev/null +++ b/arch/arc/plat-hsdk/Makefile @@ -0,0 +1,9 @@ +# +# Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# + +obj-y := platform.o diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c new file mode 100644 index 000000000000..744e62e58788 --- /dev/null +++ b/arch/arc/plat-hsdk/platform.c @@ -0,0 +1,108 @@ +/* + * ARC HSDK Platform support code + * + * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#define ARC_CCM_UNUSED_ADDR 0x60000000 + +static void __init hsdk_init_per_cpu(unsigned int cpu) +{ + /* + * By default ICCM is mapped to 0x7z while this area is used for + * kernel virtual mappings, so move it to currently unused area. + */ + if (cpuinfo_arc700[cpu].iccm.sz) + write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR); + + /* + * By default DCCM is mapped to 0x8z while this area is used by kernel, + * so move it to currently unused area. + */ + if (cpuinfo_arc700[cpu].dccm.sz) + write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR); +} + +#define ARC_PERIPHERAL_BASE 0xf0000000 +#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) +#define CREG_PAE (CREG_BASE + 0x180) +#define CREG_PAE_UPDATE (CREG_BASE + 0x194) + +#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8) +#define CREG_CORE_IF_CLK_DIV_2 0x1 +#define CGU_BASE ARC_PERIPHERAL_BASE +#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4) +#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0) +#define CGU_PLL_STATUS_LOCK BIT(0) +#define CGU_PLL_STATUS_ERR BIT(1) +#define CGU_PLL_CTRL_1GHZ 0x3A10 +#define HSDK_PLL_LOCK_TIMEOUT 500 + +#define HSDK_PLL_LOCKED() \ + !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK) + +#define HSDK_PLL_ERR() \ + !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR) + +static void __init hsdk_set_cpu_freq_1ghz(void) +{ + u32 timeout = HSDK_PLL_LOCK_TIMEOUT; + + /* + * As we set cpu clock which exceeds 500MHz, the divider for the interface + * clock must be programmed to div-by-2. + */ + iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV); + + /* Set cpu clock to 1GHz */ + iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL); + + while (!HSDK_PLL_LOCKED() && timeout--) + cpu_relax(); + + if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR()) + pr_err("Failed to setup CPU frequency to 1GHz!"); +} + +static void __init hsdk_init_early(void) +{ + /* + * PAE remapping for DMA clients does not work due to an RTL bug, so + * CREG_PAE register must be programmed to all zeroes, otherwise it + * will cause problems with DMA to/from peripherals even if PAE40 is + * not used. + */ + + /* Default is 1, which means "PAE offset = 4GByte" */ + writel_relaxed(0, (void __iomem *) CREG_PAE); + + /* Really apply settings made above */ + writel(1, (void __iomem *) CREG_PAE_UPDATE); + + /* + * Setup CPU frequency to 1GHz. + * TODO: remove it after smart hsdk pll driver will be introduced. + */ + hsdk_set_cpu_freq_1ghz(); +} + +static const char *hsdk_compat[] __initconst = { + "snps,hsdk", + NULL, +}; + +MACHINE_START(SIMULATION, "hsdk") + .dt_compat = hsdk_compat, + .init_early = hsdk_init_early, + .init_per_cpu = hsdk_init_per_cpu, +MACHINE_END diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 61a0cb15067e..7888c9803eb0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -50,7 +50,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 - select HAVE_CBPF_JIT + select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT @@ -1531,7 +1531,6 @@ config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K default y if CPU_THUMBONLY - select AEABI select ARM_ASM_UNIFIED select ARM_UNWIND help @@ -1594,7 +1593,8 @@ config ARM_PATCH_IDIV code to do integer division. config AEABI - bool "Use the ARM EABI to compile the kernel" + bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K + default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K help This option allows for the kernel to be compiled using the latest ARM ABI (aka EABI). This is only useful if you are using a user diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 447629d89884..6dcea8e8e941 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -646,7 +646,7 @@ choice config DEBUG_OMAP2UART1 bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 help This covers at least h4, 2430sdp, 3430sdp, 3630sdp, omap3 torpedo and 3530 lv som. @@ -654,17 +654,17 @@ choice config DEBUG_OMAP2UART2 bool "Kernel low-level debugging messages via OMAP2/3/4 UART2" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_OMAP2UART3 bool "Kernel low-level debugging messages via OMAP2 UART3 (n8x0)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_OMAP3UART3 bool "Kernel low-level debugging messages via OMAP3 UART3 (most omap3 boards)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 help This covers at least cm_t3x, beagle, crane, devkit8000, igep00x0, ldp, n900, n9(50), pandora, overo, touchbook, @@ -673,17 +673,17 @@ choice config DEBUG_OMAP4UART3 bool "Kernel low-level debugging messages via OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_OMAP3UART4 bool "Kernel low-level debugging messages via OMAP36XX UART4" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_OMAP4UART4 bool "Kernel low-level debugging messages via OMAP4/5 UART4" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_OMAP7XXUART1 bool "Kernel low-level debugging via OMAP730 UART1" @@ -712,22 +712,22 @@ choice config DEBUG_TI81XXUART1 bool "Kernel low-level debugging messages via TI81XX UART1 (ti8148evm)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_TI81XXUART2 bool "Kernel low-level debugging messages via TI81XX UART2" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_TI81XXUART3 bool "Kernel low-level debugging messages via TI81XX UART3 (ti8168evm)" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_AM33XXUART1 bool "Kernel low-level debugging messages via AM33XX UART1" depends on ARCH_OMAP2PLUS - select DEBUG_OMAP2PLUS_UART + select DEBUG_UART_8250 config DEBUG_ZOOM_UART bool "Kernel low-level debugging messages via Zoom2/3 UART" @@ -896,12 +896,13 @@ choice via SCIF2 on Renesas R-Car H1 (R8A7779). config DEBUG_RCAR_GEN2_SCIF0 - bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7792/R8A7793" - depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7792 || ARCH_R8A7793 + bool "Kernel low-level debugging messages via SCIF0 on R-Car Gen2 and RZ/G1" + depends on ARCH_R8A7743 || ARCH_R8A7790 || ARCH_R8A7791 || \ + ARCH_R8A7792 || ARCH_R8A7793 help Say Y here if you want kernel low-level debugging support - via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), V2H - (R8A7792), or M2-N (R8A7793). + via SCIF0 on Renesas RZ/G1M (R8A7743), R-Car H2 (R8A7790), + M2-W (R8A7791), V2H (R8A7792), or M2-N (R8A7793). config DEBUG_RCAR_GEN2_SCIF2 bool "Kernel low-level debugging messages via SCIF2 on R8A7794" @@ -1523,6 +1524,17 @@ config DEBUG_UART_PHYS default 0x40090000 if DEBUG_LPC32XX default 0x40100000 if DEBUG_PXA_UART1 default 0x42000000 if DEBUG_GEMINI + default 0x44e09000 if DEBUG_AM33XXUART1 + default 0x48020000 if DEBUG_OMAP4UART3 || DEBUG_TI81XXUART1 + default 0x48022000 if DEBUG_TI81XXUART2 + default 0x48024000 if DEBUG_TI81XXUART3 + default 0x4806a000 if DEBUG_OMAP2UART1 || DEBUG_OMAP3UART1 || \ + DEBUG_OMAP4UART1 || DEBUG_OMAP5UART1 + default 0x4806c000 if DEBUG_OMAP2UART2 || DEBUG_OMAP3UART2 || \ + DEBUG_OMAP4UART2 || DEBUG_OMAP5UART2 + default 0x4806e000 if DEBUG_OMAP2UART3 || DEBUG_OMAP4UART4 + default 0x49020000 if DEBUG_OMAP3UART3 + default 0x49042000 if DEBUG_OMAP3UART4 default 0x50000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \ DEBUG_S3C2410_UART0) default 0x50004000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART1 || \ @@ -1641,10 +1653,21 @@ config DEBUG_UART_VIRT default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1 default 0xf8ffee00 if DEBUG_AT91_SAM9263_DBGU default 0xf8fff200 if DEBUG_AT91_RM9200_DBGU + default 0xf9e09000 if DEBUG_AM33XXUART1 + default 0xfa020000 if DEBUG_OMAP4UART3 || DEBUG_TI81XXUART1 + default 0xfa022000 if DEBUG_TI81XXUART2 + default 0xfa024000 if DEBUG_TI81XXUART3 + default 0xfa06a000 if DEBUG_OMAP2UART1 || DEBUG_OMAP3UART1 || \ + DEBUG_OMAP4UART1 || DEBUG_OMAP5UART1 + default 0xfa06c000 if DEBUG_OMAP2UART2 || DEBUG_OMAP3UART2 || \ + DEBUG_OMAP4UART2 || DEBUG_OMAP5UART2 + default 0xfa06e000 if DEBUG_OMAP2UART3 || DEBUG_OMAP4UART4 default 0xfa71e000 if DEBUG_QCOM_UARTDM default 0xfb002000 if DEBUG_CNS3XXX default 0xfb009000 if DEBUG_REALVIEW_STD_PORT default 0xfb00c000 if DEBUG_AT91_SAMA5D4_USART3 + default 0xfb020000 if DEBUG_OMAP3UART3 + default 0xfb042000 if DEBUG_OMAP3UART4 default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT default 0xfc705000 if DEBUG_ZTE_ZX default 0xfcfe8600 if DEBUG_BCM63XX_UART diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S index a17ca8d78656..c94a88ae834d 100644 --- a/arch/arm/boot/compressed/efi-header.S +++ b/arch/arm/boot/compressed/efi-header.S @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 Linaro Ltd + * Copyright (C) 2013-2017 Linaro Ltd * Authors: Roy Franz * Ard Biesheuvel * @@ -8,6 +8,9 @@ * published by the Free Software Foundation. */ +#include +#include + .macro __nop #ifdef CONFIG_EFI_STUB @ This is almost but not quite a NOP, since it does clobber the @@ -15,7 +18,7 @@ @ PE/COFF expects the magic string "MZ" at offset 0, while the @ ARM/Linux boot protocol expects an executable instruction @ there. - .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 + .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000 #else AR_CLASS( mov r0, r0 ) M_CLASS( nop.w ) @@ -34,96 +37,97 @@ @ The only 2 fields of the MSDOS header that are used are this @ PE/COFF offset, and the "MZ" bytes at offset 0x0. @ - .long pe_header - start @ Offset to the PE header. + .long pe_header - start @ Offset to the PE header. pe_header: - .ascii "PE\0\0" + .long PE_MAGIC coff_header: - .short 0x01c2 @ ARM or Thumb - .short 2 @ nr_sections - .long 0 @ TimeDateStamp - .long 0 @ PointerToSymbolTable - .long 1 @ NumberOfSymbols - .short section_table - optional_header - @ SizeOfOptionalHeader - .short 0x306 @ Characteristics. - @ IMAGE_FILE_32BIT_MACHINE | - @ IMAGE_FILE_DEBUG_STRIPPED | - @ IMAGE_FILE_EXECUTABLE_IMAGE | - @ IMAGE_FILE_LINE_NUMS_STRIPPED + .short IMAGE_FILE_MACHINE_THUMB @ Machine + .short section_count @ NumberOfSections + .long 0 @ TimeDateStamp + .long 0 @ PointerToSymbolTable + .long 0 @ NumberOfSymbols + .short section_table - optional_header @ SizeOfOptionalHeader + .short IMAGE_FILE_32BIT_MACHINE | \ + IMAGE_FILE_DEBUG_STRIPPED | \ + IMAGE_FILE_EXECUTABLE_IMAGE | \ + IMAGE_FILE_LINE_NUMS_STRIPPED @ Characteristics + +#define __pecoff_code_size (__pecoff_data_start - __efi_start) optional_header: - .short 0x10b @ PE32 format - .byte 0x02 @ MajorLinkerVersion - .byte 0x14 @ MinorLinkerVersion - .long _end - __efi_start @ SizeOfCode - .long 0 @ SizeOfInitializedData - .long 0 @ SizeOfUninitializedData - .long efi_stub_entry - start @ AddressOfEntryPoint - .long start_offset @ BaseOfCode - .long 0 @ data + .short PE_OPT_MAGIC_PE32 @ PE32 format + .byte 0x02 @ MajorLinkerVersion + .byte 0x14 @ MinorLinkerVersion + .long __pecoff_code_size @ SizeOfCode + .long __pecoff_data_size @ SizeOfInitializedData + .long 0 @ SizeOfUninitializedData + .long efi_stub_entry - start @ AddressOfEntryPoint + .long start_offset @ BaseOfCode + .long __pecoff_data_start - start @ BaseOfData extra_header_fields: - .long 0 @ ImageBase - .long 0x200 @ SectionAlignment - .long 0x200 @ FileAlignment - .short 0 @ MajorOperatingSystemVersion - .short 0 @ MinorOperatingSystemVersion - .short 0 @ MajorImageVersion - .short 0 @ MinorImageVersion - .short 0 @ MajorSubsystemVersion - .short 0 @ MinorSubsystemVersion - .long 0 @ Win32VersionValue + .long 0 @ ImageBase + .long SZ_4K @ SectionAlignment + .long SZ_512 @ FileAlignment + .short 0 @ MajorOsVersion + .short 0 @ MinorOsVersion + .short 0 @ MajorImageVersion + .short 0 @ MinorImageVersion + .short 0 @ MajorSubsystemVersion + .short 0 @ MinorSubsystemVersion + .long 0 @ Win32VersionValue - .long _end - start @ SizeOfImage - .long start_offset @ SizeOfHeaders - .long 0 @ CheckSum - .short 0xa @ Subsystem (EFI application) - .short 0 @ DllCharacteristics - .long 0 @ SizeOfStackReserve - .long 0 @ SizeOfStackCommit - .long 0 @ SizeOfHeapReserve - .long 0 @ SizeOfHeapCommit - .long 0 @ LoaderFlags - .long 0x6 @ NumberOfRvaAndSizes + .long __pecoff_end - start @ SizeOfImage + .long start_offset @ SizeOfHeaders + .long 0 @ CheckSum + .short IMAGE_SUBSYSTEM_EFI_APPLICATION @ Subsystem + .short 0 @ DllCharacteristics + .long 0 @ SizeOfStackReserve + .long 0 @ SizeOfStackCommit + .long 0 @ SizeOfHeapReserve + .long 0 @ SizeOfHeapCommit + .long 0 @ LoaderFlags + .long (section_table - .) / 8 @ NumberOfRvaAndSizes - .quad 0 @ ExportTable - .quad 0 @ ImportTable - .quad 0 @ ResourceTable - .quad 0 @ ExceptionTable - .quad 0 @ CertificationTable - .quad 0 @ BaseRelocationTable + .quad 0 @ ExportTable + .quad 0 @ ImportTable + .quad 0 @ ResourceTable + .quad 0 @ ExceptionTable + .quad 0 @ CertificationTable + .quad 0 @ BaseRelocationTable section_table: - @ - @ The EFI application loader requires a relocation section - @ because EFI applications must be relocatable. This is a - @ dummy section as far as we are concerned. - @ - .ascii ".reloc\0\0" - .long 0 @ VirtualSize - .long 0 @ VirtualAddress - .long 0 @ SizeOfRawData - .long 0 @ PointerToRawData - .long 0 @ PointerToRelocations - .long 0 @ PointerToLineNumbers - .short 0 @ NumberOfRelocations - .short 0 @ NumberOfLineNumbers - .long 0x42100040 @ Characteristics - .ascii ".text\0\0\0" - .long _end - __efi_start @ VirtualSize - .long __efi_start @ VirtualAddress - .long _edata - __efi_start @ SizeOfRawData - .long __efi_start @ PointerToRawData - .long 0 @ PointerToRelocations - .long 0 @ PointerToLineNumbers - .short 0 @ NumberOfRelocations - .short 0 @ NumberOfLineNumbers - .long 0xe0500020 @ Characteristics + .long __pecoff_code_size @ VirtualSize + .long __efi_start @ VirtualAddress + .long __pecoff_code_size @ SizeOfRawData + .long __efi_start @ PointerToRawData + .long 0 @ PointerToRelocations + .long 0 @ PointerToLineNumbers + .short 0 @ NumberOfRelocations + .short 0 @ NumberOfLineNumbers + .long IMAGE_SCN_CNT_CODE | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_EXECUTE @ Characteristics - .align 9 + .ascii ".data\0\0\0" + .long __pecoff_data_size @ VirtualSize + .long __pecoff_data_start - start @ VirtualAddress + .long __pecoff_data_rawsize @ SizeOfRawData + .long __pecoff_data_start - start @ PointerToRawData + .long 0 @ PointerToRelocations + .long 0 @ PointerToLineNumbers + .short 0 @ NumberOfRelocations + .short 0 @ NumberOfLineNumbers + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_WRITE @ Characteristics + + .set section_count, (. - section_table) / 40 + + .align 12 __efi_start: #endif .endm diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S index 81c493156ce8..7a4c59154361 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.S +++ b/arch/arm/boot/compressed/vmlinux.lds.S @@ -48,13 +48,6 @@ SECTIONS *(.rodata) *(.rodata.*) } - .data : { - /* - * The EFI stub always executes from RAM, and runs strictly before the - * decompressor, so we can make an exception for its r/w data, and keep it - */ - *(.data.efistub) - } .piggydata : { *(.piggydata) } @@ -70,6 +63,26 @@ SECTIONS /* ensure the zImage file size is always a multiple of 64 bits */ /* (without a dummy byte, ld just ignores the empty section) */ .pad : { BYTE(0); . = ALIGN(8); } + +#ifdef CONFIG_EFI_STUB + .data : ALIGN(4096) { + __pecoff_data_start = .; + /* + * The EFI stub always executes from RAM, and runs strictly before the + * decompressor, so we can make an exception for its r/w data, and keep it + */ + *(.data.efistub) + __pecoff_data_end = .; + + /* + * PE/COFF mandates a file size which is a multiple of 512 bytes if the + * section size equals or exceeds 4 KB + */ + . = ALIGN(512); + } + __pecoff_data_rawsize = . - ADDR(.data); +#endif + _edata = .; _magic_sig = ZIMAGE_MAGIC(0x016f2818); @@ -84,6 +97,9 @@ SECTIONS . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } + PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data)); + PROVIDE(__pecoff_end = ALIGN(512)); + .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 4b17f35dc9a7..faf46abaa4a2 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -46,6 +46,7 @@ dtb-$(CONFIG_SOC_AT91SAM9) += \ at91sam9x35ek.dtb dtb-$(CONFIG_SOC_SAM_V7) += \ at91-kizbox2.dtb \ + at91-sama5d27_som1_ek.dtb \ at91-sama5d2_xplained.dtb \ at91-sama5d3_xplained.dtb \ at91-tse850-3.dtb \ @@ -73,7 +74,8 @@ dtb-$(CONFIG_ARCH_BCM2835) += \ bcm2835-rpi-a-plus.dtb \ bcm2836-rpi-2-b.dtb \ bcm2837-rpi-3-b.dtb \ - bcm2835-rpi-zero.dtb + bcm2835-rpi-zero.dtb \ + bcm2835-rpi-zero-w.dtb dtb-$(CONFIG_ARCH_BCM_5301X) += \ bcm4708-asus-rt-ac56u.dtb \ bcm4708-asus-rt-ac68u.dtb \ @@ -106,7 +108,8 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \ bcm953012hr.dtb \ bcm953012k.dtb dtb-$(CONFIG_ARCH_BCM_53573) += \ - bcm47189-tenda-ac9.dtb + bcm47189-tenda-ac9.dtb \ + bcm947189acdbmr.dtb dtb-$(CONFIG_ARCH_BCM_63XX) += \ bcm963138dvt.dtb dtb-$(CONFIG_ARCH_BCM_CYGNUS) += \ @@ -180,6 +183,7 @@ dtb-$(CONFIG_ARCH_EXYNOS5) += \ exynos5440-ssdk5440.dtb \ exynos5800-peach-pi.dtb dtb-$(CONFIG_ARCH_GEMINI) += \ + gemini-dlink-dir-685.dtb \ gemini-nas4220b.dtb \ gemini-rut1xx.dtb \ gemini-sq201.dtb \ @@ -340,6 +344,7 @@ dtb-$(CONFIG_SOC_IMX51) += \ imx51-ts4800.dtb dtb-$(CONFIG_SOC_IMX53) += \ imx53-ard.dtb \ + imx53-cx9020.dtb \ imx53-m53evk.dtb \ imx53-mba53.dtb \ imx53-qsb.dtb \ @@ -391,7 +396,9 @@ dtb-$(CONFIG_SOC_IMX6Q) += \ imx6dl-udoo.dtb \ imx6dl-wandboard.dtb \ imx6dl-wandboard-revb1.dtb \ + imx6q-apalis-eval.dtb \ imx6q-apalis-ixora.dtb \ + imx6q-apalis-ixora-v1.1.dtb \ imx6q-apf6dev.dtb \ imx6q-arm2.dtb \ imx6q-b450v3.dtb \ @@ -466,7 +473,7 @@ dtb-$(CONFIG_SOC_IMX6SX) += \ imx6sx-udoo-neo-full.dtb dtb-$(CONFIG_SOC_IMX6UL) += \ imx6ul-14x14-evk.dtb \ - imx6ul-geam-kit.dtb \ + imx6ul-geam.dtb \ imx6ul-isiot-emmc.dtb \ imx6ul-isiot-nand.dtb \ imx6ul-liteboard.dtb \ @@ -617,6 +624,7 @@ dtb-$(CONFIG_SOC_AM33XX) += \ am335x-evmsk.dtb \ am335x-icev2.dtb \ am335x-lxm.dtb \ + am335x-moxa-uc-8100-me-t.dtb \ am335x-nano.dtb \ am335x-pepper.dtb \ am335x-phycore-rdk.dtb \ @@ -650,6 +658,7 @@ dtb-$(CONFIG_SOC_OMAP5) += \ dtb-$(CONFIG_SOC_DRA7XX) += \ am57xx-beagle-x15.dtb \ am57xx-beagle-x15-revb1.dtb \ + am57xx-beagle-x15-revc.dtb \ am57xx-cl-som-am57x.dtb \ am57xx-sbc-am57x.dtb \ am572x-idk.dtb \ @@ -657,7 +666,8 @@ dtb-$(CONFIG_SOC_DRA7XX) += \ dra7-evm.dtb \ dra72-evm.dtb \ dra72-evm-revc.dtb \ - dra71-evm.dtb + dra71-evm.dtb \ + dra76-evm.dtb dtb-$(CONFIG_ARCH_ORION5X) += \ orion5x-kuroboxpro.dtb \ orion5x-lacie-d2-network.dtb \ @@ -903,6 +913,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \ sun8i-a33-q8-tablet.dtb \ sun8i-a33-sinlinx-sina33.dtb \ sun8i-a83t-allwinner-h8homlet-v2.dtb \ + sun8i-a83t-bananapi-m3.dtb \ sun8i-a83t-cubietruck-plus.dtb \ sun8i-h2-plus-orangepi-zero.dtb \ sun8i-h3-bananapi-m2-plus.dtb \ @@ -918,6 +929,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \ sun8i-h3-orangepi-pc-plus.dtb \ sun8i-h3-orangepi-plus.dtb \ sun8i-h3-orangepi-plus2e.dtb \ + sun8i-r16-bananapi-m2m.dtb \ sun8i-r16-parrot.dtb \ sun8i-v3s-licheepi-zero.dtb \ sun8i-v3s-licheepi-zero-dock.dtb @@ -970,7 +982,6 @@ dtb-$(CONFIG_ARCH_UNIPHIER) += \ uniphier-pro4-sanji.dtb \ uniphier-pxs2-gentil.dtb \ uniphier-pxs2-vodka.dtb \ - uniphier-sld3-ref.dtb \ uniphier-sld8-ref.dtb dtb-$(CONFIG_ARCH_VERSATILE) += \ versatile-ab.dtb \ @@ -1049,7 +1060,8 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += \ mt6580-evbp1.dtb \ mt6589-aquaris5.dtb \ mt6592-evb.dtb \ - mt7623-evb.dtb \ + mt7623n-rfb-nand.dtb \ + mt7623n-bananapi-bpi-r2.dtb \ mt8127-moose.dtb \ mt8135-evbp1.dtb dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 1d154444dfef..48a15fc641f2 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -319,13 +319,10 @@ &tps { ti,pmic-shutdown-controller; charger { - interrupts = <0>, <1>; - interrupt-names = "USB", "AC"; status = "okay"; }; pwrbutton { - interrupts = <2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts index d8769799772e..59431b235944 100644 --- a/arch/arm/boot/dts/am335x-chiliboard.dts +++ b/arch/arm/boot/dts/am335x-chiliboard.dts @@ -191,13 +191,10 @@ &tps { interrupts = <7>; /* NNMI */ charger { - interrupts = <0>, <1>; - interrupt-names = "USB", "AC"; status = "okay"; }; pwrbutton { - interrupts = <2>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index 1c37a7c1ea17..ddd897556e03 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -531,6 +531,7 @@ nand@0,0 { interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ + ti,nand-xfer-type = "prefetch-dma"; ti,nand-ecc-opt = "bch8"; ti,elm-id = <&elm>; nand-bus-width = <8>; diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts new file mode 100644 index 000000000000..f82233cd18e0 --- /dev/null +++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts @@ -0,0 +1,525 @@ +/* + * Copyright (C) 2017 MOXA Inc. - https://www.moxa.com/ + * + * Author: SZ Lin (林上智) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/dts-v1/; + +#include "am33xx.dtsi" + +/ { + model = "Moxa UC-8100-ME-T"; + compatible = "moxa,uc-8100-me-t", "ti,am33xx"; + + cpus { + cpu@0 { + cpu0-supply = <&vdd1_reg>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x20000000>; /* 512 MB */ + }; + + vbat: vbat-regulator { + compatible = "regulator-fixed"; + }; + + /* Power supply provides a fixed 3.3V @3A */ + vmmcsd_fixed: vmmcsd-regulator { + compatible = "regulator-fixed"; + regulator-name = "vmmcsd_fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; + + leds { + compatible = "gpio-leds"; + led1 { + label = "uc8100me:CEL1"; + gpios = <&gpio_xten 8 0>; + default-state = "off"; + }; + + led2 { + label = "uc8100me:CEL2"; + gpios = <&gpio_xten 9 0>; + default-state = "off"; + }; + + led3 { + label = "uc8100me:CEL3"; + gpios = <&gpio_xten 10 0>; + default-state = "off"; + }; + + led4 { + label = "uc8100me:DIA1"; + gpios = <&gpio_xten 11 0>; + default-state = "off"; + }; + led5 { + label = "uc8100me:DIA2"; + gpios = <&gpio_xten 12 0>; + default-state = "off"; + }; + led6 { + label = "uc8100me:DIA3"; + gpios = <&gpio_xten 13 0>; + default-state = "off"; + }; + led7 { + label = "uc8100me:SD"; + gpios = <&gpio_xten 14 0>; + default-state = "off"; + }; + led8 { + label = "uc8100me:USB"; + gpios = <&gpio_xten 15 0>; + default-state = "off"; + }; + led9 { + label = "uc8100me:USER"; + gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + buttons: push_button { + compatible = "gpio-keys"; + }; + +}; + +&am33xx_pinmux { + pinctrl-names = "default"; + pinctrl-0 = <&minipcie_pins>; + + minipcie_pins: pinmux_minipcie { + pinctrl-single,pins = < + AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.gpio2_24 */ + AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.gpio2_25 */ + AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.gpio2_22 Power off PIN*/ + >; + }; + + push_button_pins: pinmux_push_button { + pinctrl-single,pins = < + AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLDOWN | MUX_MODE7) /* mcasp0_ahcklx.gpio3_21 */ + >; + }; + + i2c0_pins: pinmux_i2c0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */ + AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */ + >; + }; + + + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_ctsn.i2c1_sda */ + AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_rtsn.i2c1_scl */ + >; + }; + + uart0_pins: pinmux_uart0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */ + AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */ + >; + }; + + uart1_pins: pinmux_uart1_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */ + AM33XX_IOPAD(0x97C, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */ + AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */ + AM33XX_IOPAD(0x984, PIN_OUTPUT | MUX_MODE0) /* uart1_txd.uart1_txd */ + >; + }; + + uart2_pins: pinmux_uart2_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x8d8, PIN_INPUT | MUX_MODE6) /* lcd_data14.uart5_ctsn */ + AM33XX_IOPAD(0x8dc, PIN_OUTPUT_PULLDOWN | MUX_MODE6) /* lcd_data15.uart5_rtsn */ + AM33XX_IOPAD(0x8c4, PIN_INPUT_PULLUP | MUX_MODE4) /* lcd_data9.uart5_rxd */ + AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE4) /* lcd_data8.uart5_txd */ + >; + }; + + cpsw_default: cpsw_default { + pinctrl-single,pins = < + /* Slave 1 */ + AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs_dv */ + AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */ + AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */ + AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */ + AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */ + AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */ + AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */ + AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mii1_refclk.rmii1_refclk */ + + /* Slave 2 */ + AM33XX_IOPAD(0x870, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_crs_dv */ + AM33XX_IOPAD(0x874, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rxer */ + AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_txen */ + AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td1 */ + AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td0 */ + AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd1 */ + AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd0 */ + AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii2_refclk */ + + >; + }; + + davinci_mdio_default: davinci_mdio_default { + pinctrl-single,pins = < + /* MDIO */ + AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */ + AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */ + >; + }; + + mmc0_pins_default: pinmux_mmc0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3 */ + AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2 */ + AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1 */ + AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0 */ + AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk */ + AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd */ + AM33XX_IOPAD(0x990, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkx.gpio3_14 */ + AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkx.gpio3_18 */ + >; + }; + + mmc2_pins_default: pinmux_mmc2_pins { + pinctrl-single,pins = < + /* eMMC */ + AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad12.mmc2_dat0 */ + AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad13.mmc2_dat1 */ + AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad14.mmc2_dat2 */ + AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad15.mmc2_dat3 */ + AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad8.mmc2_dat4 */ + AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad9.mmc2_dat5 */ + AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad10.mmc2_dat6 */ + AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad11.mmc2_dat7 */ + AM33XX_IOPAD(0x888, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_csn3.mmc2_cmd */ + AM33XX_IOPAD(0x88c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_clk.mmc2_clk */ + >; + }; + + spi0_pins: pinmux_spi0 { + pinctrl-single,pins = < + AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_sclk.spi0_sclk */ + AM33XX_IOPAD(0x95C, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_cs0.spi0_cs0 */ + AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d0.spi0_d0 */ + AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d1.spi0_d1 */ + >; + }; + +}; + +&uart0 { + /* Console */ + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins>; +}; + +&uart1 { + /* UART 1 setting */ + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins>; +}; + +&uart5 { + /* UART 2 setting */ + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; + + status = "okay"; + clock-frequency = <400000>; + + tpm: tpm@20 { + compatible = "infineon,slb9645tt"; + reg = <0x20>; + }; + + tps: tps@2d { + compatible = "ti,tps65910"; + reg = <0x2d>; + }; + + eeprom: eeprom@50 { + compatible = "atmel,24c16"; + pagesize = <16>; + reg = <0x50>; + }; + + rtc_wdt: rtc_wdt@68 { + compatible = "dallas,ds1374"; + reg = <0x68>; + }; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + + status = "okay"; + clock-frequency = <400000>; + gpio_xten: gpio_xten@27 { + compatible = "nxp,pca9535"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x27>; + }; +}; + +&usb { + status = "okay"; +}; + +&usb_ctrl_mod { + status = "okay"; +}; + +&usb0_phy { + status = "okay"; +}; + +&usb1_phy { + status = "okay"; +}; + +&usb0 { + status = "okay"; + dr_mode = "host"; +}; + +&usb1 { + status = "okay"; + dr_mode = "host"; +}; + +&cppi41dma { + status = "okay"; +}; + +#include "tps65910.dtsi" + +&tps { + vcc1-supply = <&vbat>; + vcc2-supply = <&vbat>; + vcc3-supply = <&vbat>; + vcc4-supply = <&vbat>; + vcc5-supply = <&vbat>; + vcc6-supply = <&vbat>; + vcc7-supply = <&vbat>; + vccio-supply = <&vbat>; + + regulators { + vrtc_reg: regulator@0 { + regulator-always-on; + }; + + vio_reg: regulator@1 { + regulator-always-on; + }; + + vdd1_reg: regulator@2 { + /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ + regulator-name = "vdd_mpu"; + regulator-min-microvolt = <912500>; + regulator-max-microvolt = <1378000>; + regulator-boot-on; + regulator-always-on; + }; + + vdd2_reg: regulator@3 { + /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ + regulator-name = "vdd_core"; + regulator-min-microvolt = <912500>; + regulator-max-microvolt = <1150000>; + regulator-boot-on; + regulator-always-on; + }; + + vdd3_reg: regulator@4 { + regulator-always-on; + }; + + vdig1_reg: regulator@5 { + regulator-always-on; + }; + + vdig2_reg: regulator@6 { + regulator-always-on; + }; + + vpll_reg: regulator@7 { + regulator-always-on; + }; + + vdac_reg: regulator@8 { + regulator-always-on; + }; + + vaux1_reg: regulator@9 { + regulator-always-on; + }; + + vaux2_reg: regulator@10 { + regulator-always-on; + }; + + vaux33_reg: regulator@11 { + regulator-always-on; + }; + + vmmc_reg: regulator@12 { + compatible = "regulator-fixed"; + regulator-name = "vmmc_reg"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; +}; + +/* Power */ +&vbat { + regulator-name = "vbat"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; +}; + +&mac { + pinctrl-names = "default"; + pinctrl-0 = <&cpsw_default>; + dual_emac = <1>; + status = "okay"; +}; + +&davinci_mdio { + pinctrl-names = "default"; + pinctrl-0 = <&davinci_mdio_default>; + status = "okay"; +}; + +&cpsw_emac0 { + status = "okay"; + phy_id = <&davinci_mdio>, <4>; + phy-mode = "rmii"; + dual_emac_res_vlan = <1>; +}; + +&cpsw_emac1 { + status = "okay"; + phy_id = <&davinci_mdio>, <5>; + phy-mode = "rmii"; + dual_emac_res_vlan = <2>; +}; + +&phy_sel { + reg= <0x44e10650 0xf5>; + rmii-clock-ext; +}; + +&sham { + status = "okay"; +}; + +&aes { + status = "okay"; +}; + +&gpio0 { + ti,no-reset-on-init; +}; + +&mmc1 { + pinctrl-names = "default"; + vmmc-supply = <&vmmcsd_fixed>; + bus-width = <4>; + pinctrl-0 = <&mmc0_pins_default>; + cd-gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>; + wp-gpios = <&gpio3 18 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&mmc3 { + dmas = <&edma_xbar 12 0 1 + &edma_xbar 13 0 2>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + vmmc-supply = <&vmmcsd_fixed>; + bus-width = <8>; + pinctrl-0 = <&mmc2_pins_default>; + ti,non-removable; + status = "okay"; +}; + +&buttons { + pinctrl-names = "default"; + pinctrl-0 = <&push_button_pins>; + #address-cells = <1>; + #size-cells = <0>; + + button@0 { + label = "push_button"; + linux,code = <0x100>; + gpios = <&gpio3 21 GPIO_ACTIVE_LOW>; + }; +}; + +/* SPI Busses */ +&spi0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; + + m25p80@0 { + compatible = "mx25l6405d"; + spi-max-frequency = <40000000>; + + reg = <0>; + spi-cpol; + spi-cpha; + #address-cells = <1>; + #size-cells = <1>; + + /* reg : The partition's offset and size within the mtd bank. */ + partitions@0 { + label = "MLO"; + reg = <0x0 0x80000>; + }; + + partitions@1 { + label = "U-Boot"; + reg = <0x80000 0x100000>; + }; + + partitions@2 { + label = "U-Boot Env"; + reg = <0x180000 0x20000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 7d7ca054c557..e58fab8aec5d 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -36,6 +36,8 @@ aliases { phy1 = &usb1_phy; ethernet0 = &cpsw_emac0; ethernet1 = &cpsw_emac1; + spi0 = &spi0; + spi1 = &spi1; }; cpus { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 29a538ecd405..afb8eb0a0a16 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -149,6 +149,13 @@ sound0_master: simple-audio-card,codec { system-clock-frequency = <12000000>; }; }; + + beeper: beeper { + compatible = "gpio-beeper"; + pinctrl-names = "default"; + pinctrl-0 = <&beeper_pins>; + gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>; + }; }; &am43xx_pinmux { @@ -510,6 +517,13 @@ AM4372_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */ AM4372_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */ >; }; + + beeper_pins: beeper_pins { + pinctrl-single,pins = < + AM4372_IOPAD(0x9e0, PIN_OUTPUT_PULLUP | MUX_MODE7) /* cam1_field.gpio4_12 */ + >; + }; + }; &uart0 { @@ -842,6 +856,7 @@ nand@0,0 { interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ + ti,nand-xfer-type = "prefetch-dma"; ti,nand-ecc-opt = "bch16"; ti,elm-id = <&elm>; nand-bus-width = <8>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 54f40f370011..081fa68b6f98 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -388,6 +388,7 @@ &mac { pinctrl-0 = <&cpsw_default>; pinctrl-1 = <&cpsw_sleep>; status = "okay"; + slaves = <1>; }; &davinci_mdio { @@ -402,11 +403,6 @@ &cpsw_emac0 { phy-mode = "rmii"; }; -&cpsw_emac1 { - phy_id = <&davinci_mdio>, <1>; - phy-mode = "rmii"; -}; - &phy_sel { rmii-clock-ext; }; @@ -564,6 +560,7 @@ nand@0,0 { interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ + ti,nand-xfer-type = "prefetch-dma"; ti,nand-ecc-opt = "bch16"; ti,elm-id = <&elm>; nand-bus-width = <8>; diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 7b207835b2d1..debf9464403e 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -11,6 +11,7 @@ #include #include #include "am57xx-idk-common.dtsi" +#include "dra72x-mmc-iodelay.dtsi" / { model = "TI AM5718 IDK"; @@ -64,13 +65,6 @@ mmc0-led { }; }; -&mmc1 { - status = "okay"; - vmmc-supply = <&ldo1_reg>; - bus-width = <4>; - cd-gpios = <&gpio6 27 0>; /* gpio 219 */ -}; - &omap_dwc3_2 { extcon = <&extcon_usb2>; }; @@ -96,3 +90,30 @@ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x { status = "okay"; }; }; + +&pcie1_rc { + status = "okay"; + gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; +}; + +&pcie1_ep { + gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; +}; + +&mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>; +}; diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts index 9da6d83ca185..a578fe97ba3b 100644 --- a/arch/arm/boot/dts/am572x-idk.dts +++ b/arch/arm/boot/dts/am572x-idk.dts @@ -12,6 +12,7 @@ #include #include #include "am57xx-idk-common.dtsi" +#include "dra74x-mmc-iodelay.dtsi" / { model = "TI AM5728 IDK"; @@ -67,6 +68,24 @@ mmc0-led { }; }; +&mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev20>; +}; + &omap_dwc3_2 { extcon = <&extcon_usb2>; }; @@ -76,19 +95,16 @@ &extcon_usb2 { vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>; }; -&mmc1 { - status = "okay"; - vmmc-supply = <&v3_3d>; - vmmc_aux-supply = <&ldo1_reg>; - bus-width = <4>; - cd-gpios = <&gpio6 27 0>; /* gpio 219 */ -}; - &sn65hvs882 { load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; }; -&pcie1 { +&pcie1_rc { + status = "okay"; + gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; +}; + +&pcie1_ep { gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index fdfe5b16b806..49aeecd312b4 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -9,6 +9,7 @@ #include "dra74x.dtsi" #include "am57xx-commercial-grade.dtsi" +#include "dra74x-mmc-iodelay.dtsi" #include #include @@ -166,34 +167,6 @@ sound0_master: simple-audio-card,codec { }; }; -&dra7_pmx_core { - mmc1_pins_default: mmc1_pins_default { - pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14) /* mmc1sdcd.gpio219 */ - DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ - DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ - DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ - DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ - DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ - DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ - >; - }; - - mmc2_pins_default: mmc2_pins_default { - pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ - DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ - DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ - DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ - DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ - DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ - DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ - DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ - DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ - DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ - >; - }; -}; &i2c1 { status = "okay"; clock-frequency = <400000>; @@ -570,7 +543,12 @@ hdmi_out: endpoint { }; }; -&pcie1 { +&pcie1_rc { + status = "ok"; + gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; +}; + +&pcie1_ep { gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts index 39a92aff0a0d..5a77b334923d 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts @@ -19,8 +19,23 @@ &tpd12s015 { }; &mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>; vmmc-supply = <&vdd_3v3>; - vmmc-aux-supply = <&ldo1_reg>; + vqmmc-supply = <&ldo1_reg>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_3_3v_rev11 &mmc2_iodelay_ddr_3_3v_rev11_conf>; }; /* errata i880 "Ethernet RGMII2 Limited to 10/100 Mbps" */ diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts new file mode 100644 index 000000000000..17c41da3b55f --- /dev/null +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "am57xx-beagle-x15-common.dtsi" + +/ { + model = "TI AM5728 BeagleBoard-X15 rev C"; +}; + +&tpd12s015 { + gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>, /* gpio7_10, CT CP HPD */ + <&gpio2 30 GPIO_ACTIVE_HIGH>, /* gpio2_30, LS OE */ + <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */ +}; + +&mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + vmmc-supply = <&vdd_3v3>; + vqmmc-supply = <&ldo1_reg>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev20>; +}; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 19a60a11c198..d6689106d2a8 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -20,9 +20,20 @@ &tpd12s015 { }; &mmc1 { + pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + vmmc-supply = <&ldo1_reg>; }; +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_3_3v_rev11 &mmc2_iodelay_ddr_3_3v_rev11_conf>; +}; + /* errata i880 "Ethernet RGMII2 Limited to 10/100 Mbps" */ &phy1 { max-speed = <100>; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index c536b2f5389f..97aa8e6a56da 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -399,6 +399,14 @@ &usb2 { dr_mode = "peripheral"; }; +&mmc1 { + status = "okay"; + vmmc-supply = <&v3_3d>; + vqmmc-supply = <&ldo1_reg>; + bus-width = <4>; + cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ +}; + &mmc2 { status = "okay"; vmmc-supply = <&v3_3d>; diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index f9cf1273f35e..b1cf5a26f3c2 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -72,7 +72,7 @@ bootrom { reg = ; }; - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-370-pcie"; status = "disabled"; device_type = "pci"; @@ -100,6 +100,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 58>; marvell,pcie-port = <0>; @@ -117,6 +118,7 @@ pcie2: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 62>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 50c5e8417802..7225c7ce9a8d 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -582,7 +582,7 @@ coredivclk: corediv-clock@e8250 { }; }; - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-370-pcie"; status = "disabled"; device_type = "pci"; @@ -610,6 +610,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <0>; @@ -627,6 +628,7 @@ pcie1: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <0>; diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi index e392f6036f39..132596fd0860 100644 --- a/arch/arm/boot/dts/armada-380.dtsi +++ b/arch/arm/boot/dts/armada-380.dtsi @@ -71,7 +71,7 @@ pinctrl@18000 { }; }; - pcie-controller { + pcie { compatible = "marvell,armada-370-pcie"; status = "disabled"; device_type = "pci"; @@ -104,6 +104,7 @@ pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <0>; @@ -122,6 +123,7 @@ pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <1>; @@ -140,6 +142,7 @@ pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <2>; diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts index db5b9f6b615d..25d2d720dc0e 100644 --- a/arch/arm/boot/dts/armada-385-db-ap.dts +++ b/arch/arm/boot/dts/armada-385-db-ap.dts @@ -209,7 +209,7 @@ bm-bppi { status = "okay"; }; - pcie-controller { + pcie { status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index be16ce39fb3d..06831e1e3f80 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -96,7 +96,7 @@ usb3@f8000 { }; }; - pcie-controller { + pcie { status = "okay"; pcie@1,0 { diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi index 7fcc4c4885cf..74863aff01c6 100644 --- a/arch/arm/boot/dts/armada-385.dtsi +++ b/arch/arm/boot/dts/armada-385.dtsi @@ -70,7 +70,7 @@ cpu@1 { }; soc { - pciec: pcie-controller { + pciec: pcie { compatible = "marvell,armada-370-pcie"; status = "disabled"; device_type = "pci"; @@ -109,6 +109,7 @@ pcie1: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <0>; @@ -127,6 +128,7 @@ pcie2: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <1>; @@ -145,6 +147,7 @@ pcie3: pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <2>; @@ -166,6 +169,7 @@ pcie4: pcie@4,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0 0x81000000 0 0 0x81000000 0x4 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <3>; diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts index 0d5f1f062275..ee7b0089eff0 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dts +++ b/arch/arm/boot/dts/armada-388-clearfog.dts @@ -62,7 +62,7 @@ usb3@f0000 { }; }; - pcie-controller { + pcie { pcie@3,0 { /* Port 2, Lane 0. CON2, nearest CPU. */ reset-gpios = <&expander0 2 GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi index 0f5938bede53..68acfc968706 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dtsi +++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi @@ -104,7 +104,7 @@ usb3@f8000 { }; }; - pcie-controller { + pcie { status = "okay"; /* * The two PCIe units are accessible through diff --git a/arch/arm/boot/dts/armada-388-db.dts b/arch/arm/boot/dts/armada-388-db.dts index 1ac923826445..a4ec1fa37529 100644 --- a/arch/arm/boot/dts/armada-388-db.dts +++ b/arch/arm/boot/dts/armada-388-db.dts @@ -172,7 +172,7 @@ bm-bppi { status = "okay"; }; - pcie-controller { + pcie { status = "okay"; /* * The two PCIe units are accessible through diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts index 563901e0ec07..f503955dbd3b 100644 --- a/arch/arm/boot/dts/armada-388-gp.dts +++ b/arch/arm/boot/dts/armada-388-gp.dts @@ -240,7 +240,7 @@ bm-bppi { status = "okay"; }; - pcie-controller { + pcie { status = "okay"; /* * One PCIe units is accessible through diff --git a/arch/arm/boot/dts/armada-388-rd.dts b/arch/arm/boot/dts/armada-388-rd.dts index af82f275eac2..9cc3ca0376b9 100644 --- a/arch/arm/boot/dts/armada-388-rd.dts +++ b/arch/arm/boot/dts/armada-388-rd.dts @@ -117,7 +117,7 @@ usb3@f0000 { }; }; - pcie-controller { + pcie { status = "okay"; /* * One PCIe units is accessible through diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index af31f5d6c0e5..7ff0811e61db 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -154,6 +154,13 @@ scu@c000 { reg = <0xc000 0x58>; }; + timer@c200 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0xc200 0x20>; + interrupts = ; + clocks = <&coreclk 2>; + }; + timer@c600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xc600 0x20>; diff --git a/arch/arm/boot/dts/armada-390-db.dts b/arch/arm/boot/dts/armada-390-db.dts index 2afed2ce4741..c718a5242595 100644 --- a/arch/arm/boot/dts/armada-390-db.dts +++ b/arch/arm/boot/dts/armada-390-db.dts @@ -123,7 +123,7 @@ usb3@f8000 { }; }; - pcie-controller { + pcie { status = "okay"; /* CON30 */ diff --git a/arch/arm/boot/dts/armada-395-gp.dts b/arch/arm/boot/dts/armada-395-gp.dts index 2cdbba804c1e..ef491b524fd6 100644 --- a/arch/arm/boot/dts/armada-395-gp.dts +++ b/arch/arm/boot/dts/armada-395-gp.dts @@ -139,7 +139,7 @@ usb3@f0000 { }; }; - pcie-controller { + pcie { status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-398-db.dts b/arch/arm/boot/dts/armada-398-db.dts index e8604281c3c9..f0e0379f7619 100644 --- a/arch/arm/boot/dts/armada-398-db.dts +++ b/arch/arm/boot/dts/armada-398-db.dts @@ -118,7 +118,7 @@ usb3@f8000 { }; }; - pcie-controller { + pcie { status = "okay"; pcie@1,0 { diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index 60fbfd5907c7..ea657071e278 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -442,7 +442,7 @@ thermal@e8078 { }; }; - pcie-controller { + pcie { compatible = "marvell,armada-370-pcie"; status = "disabled"; device_type = "pci"; @@ -481,6 +481,7 @@ pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <0>; @@ -499,6 +500,7 @@ pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <1>; @@ -517,6 +519,7 @@ pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <2>; @@ -538,6 +541,7 @@ pcie@4,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0 0x81000000 0 0 0x81000000 0x4 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; marvell,pcie-port = <3>; diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi index be22ec5236ac..bdd4c7a45fbf 100644 --- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi @@ -91,7 +91,7 @@ bootrom { /* * 98DX3236 has 1 x1 PCIe unit Gen2.0 */ - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-xp-pcie"; status = "disabled"; device_type = "pci"; @@ -116,6 +116,7 @@ pcie1: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 58>; marvell,pcie-port = <0>; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index a33974254d8c..065282c21789 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -242,7 +242,7 @@ pcie@9,0 { /* Port 2, Lane 0 */ status = "okay"; }; - pcie@10,0 { + pcie@a,0 { /* Port 3, Lane 0 */ status = "okay"; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index d62bf7bea1df..ac9eab8ac186 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -227,7 +227,7 @@ pcie@9,0 { /* Port 2, Lane 0 */ status = "okay"; }; - pcie@10,0 { + pcie@a,0 { /* Port 3, Lane 0 */ status = "okay"; }; diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index 9f25814077f2..129738f7973d 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -86,7 +86,7 @@ soc { * configured as x4 or quad x1 lanes. One unit is * x1 only. */ - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-xp-pcie"; status = "disabled"; device_type = "pci"; @@ -123,6 +123,7 @@ pcie1: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 58>; marvell,pcie-port = <0>; @@ -140,6 +141,7 @@ pcie2: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 59>; marvell,pcie-port = <0>; @@ -157,6 +159,7 @@ pcie3: pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 60>; marvell,pcie-port = <0>; @@ -174,6 +177,7 @@ pcie4: pcie@4,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0 0x81000000 0 0 0x81000000 0x4 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 61>; marvell,pcie-port = <0>; @@ -191,6 +195,7 @@ pcie5: pcie@5,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0 0x81000000 0 0 0x81000000 0x5 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 62>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 2bfe07aebf1a..e58d597e37b9 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -87,7 +87,7 @@ soc { * configured as x4 or quad x1 lanes. One unit is * x4 only. */ - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-xp-pcie"; status = "disabled"; device_type = "pci"; @@ -138,6 +138,7 @@ pcie1: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 58>; marvell,pcie-port = <0>; @@ -155,6 +156,7 @@ pcie2: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 59>; marvell,pcie-port = <0>; @@ -172,6 +174,7 @@ pcie3: pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 60>; marvell,pcie-port = <0>; @@ -189,6 +192,7 @@ pcie4: pcie@4,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0 0x81000000 0 0 0x81000000 0x4 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 61>; marvell,pcie-port = <0>; @@ -206,6 +210,7 @@ pcie5: pcie@5,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0 0x81000000 0 0 0x81000000 0x5 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 62>; marvell,pcie-port = <1>; @@ -223,6 +228,7 @@ pcie6: pcie@6,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0 0x81000000 0 0 0x81000000 0x6 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 63>; marvell,pcie-port = <1>; @@ -240,6 +246,7 @@ pcie7: pcie@7,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0 0x81000000 0 0 0x81000000 0x7 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 64>; marvell,pcie-port = <1>; @@ -257,6 +264,7 @@ pcie8: pcie@8,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0 0x81000000 0 0 0x81000000 0x8 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 65>; marvell,pcie-port = <1>; @@ -274,6 +282,7 @@ pcie9: pcie@9,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 0x81000000 0 0 0x81000000 0x9 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 99>; marvell,pcie-port = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi index 6c33935f7074..a5c961cee7de 100644 --- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi @@ -104,7 +104,7 @@ soc { * configured as x4 or quad x1 lanes. Two units are * x4/x1. */ - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,armada-xp-pcie"; status = "disabled"; device_type = "pci"; @@ -159,6 +159,7 @@ pcie1: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 58>; marvell,pcie-port = <0>; @@ -176,6 +177,7 @@ pcie2: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 59>; marvell,pcie-port = <0>; @@ -193,6 +195,7 @@ pcie3: pcie@3,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0 0x81000000 0 0 0x81000000 0x3 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 60>; marvell,pcie-port = <0>; @@ -210,6 +213,7 @@ pcie4: pcie@4,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0 0x81000000 0 0 0x81000000 0x4 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 61>; marvell,pcie-port = <0>; @@ -227,6 +231,7 @@ pcie5: pcie@5,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0 0x81000000 0 0 0x81000000 0x5 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 62>; marvell,pcie-port = <1>; @@ -244,6 +249,7 @@ pcie6: pcie@6,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0 0x81000000 0 0 0x81000000 0x6 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 63>; marvell,pcie-port = <1>; @@ -261,6 +267,7 @@ pcie7: pcie@7,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0 0x81000000 0 0 0x81000000 0x7 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 64>; marvell,pcie-port = <1>; @@ -278,6 +285,7 @@ pcie8: pcie@8,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0 0x81000000 0 0 0x81000000 0x8 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 65>; marvell,pcie-port = <1>; @@ -295,6 +303,7 @@ pcie9: pcie@9,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 0x81000000 0 0 0x81000000 0x9 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 99>; marvell,pcie-port = <2>; @@ -303,7 +312,7 @@ pcie9: pcie@9,0 { status = "disabled"; }; - pcie10: pcie@10,0 { + pcie10: pcie@a,0 { device_type = "pci"; assigned-addresses = <0x82005000 0 0x82000 0 0x2000>; reg = <0x5000 0 0 0 0>; @@ -312,6 +321,7 @@ pcie10: pcie@10,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0 0x81000000 0 0 0x81000000 0xa 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 103>; marvell,pcie-port = <3>; diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index 8a04c7e2d818..22b958537d31 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -26,7 +26,7 @@ ahb { fmc: flash-controller@1e620000 { reg = < 0x1e620000 0x94 - 0x20000000 0x02000000 >; + 0x20000000 0x10000000 >; #address-cells = <1>; #size-cells = <0>; compatible = "aspeed,ast2400-fmc"; @@ -41,7 +41,7 @@ flash@0 { spi: flash-controller@1e630000 { reg = < 0x1e630000 0x18 - 0x30000000 0x02000000 >; + 0x30000000 0x10000000 >; #address-cells = <1>; #size-cells = <0>; compatible = "aspeed,ast2400-spi"; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi new file mode 100644 index 000000000000..63a5af898165 --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi @@ -0,0 +1,102 @@ +/* + * at91-sama5d27_som1.dtsi - Device Tree file for SAMA5D27 SoM1 board + * + * Copyright (c) 2017, Microchip Technology Inc. + * 2017 Cristian Birsan + * 2017 Claudiu Beznea + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "sama5d2.dtsi" +#include "sama5d2-pinfunc.h" + +/ { + model = "Atmel SAMA5D27 SoM1"; + compatible = "atmel,sama5d27-som1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5"; + + clocks { + slow_xtal { + clock-frequency = <32768>; + }; + + main_xtal { + clock-frequency = <24000000>; + }; + }; + + ahb { + apb { + macb0: ethernet@f8008000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_macb0_default>; + phy-mode = "rmii"; + + ethernet-phy@1 { + reg = <0x1>; + interrupt-parent = <&pioA>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_macb0_phy_irq>; + }; + }; + + pinctrl@fc038000 { + + pinctrl_macb0_default: macb0_default { + pinmux = , + , + , + , + , + , + , + , + , + ; + bias-disable; + }; + + pinctrl_macb0_phy_irq: macb0_phy_irq { + pinmux = ; + bias-disable; + }; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts new file mode 100644 index 000000000000..60cb084a8d92 --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts @@ -0,0 +1,540 @@ +/* + * at91-sama5d27_som1_ek.dts - Device Tree file for SAMA5D27-SOM1-EK board + * + * Copyright (c) 2017, Microchip Technology Inc. + * 2016 Nicolas Ferre + * 2017 Cristian Birsan + * 2017 Claudiu Beznea + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/dts-v1/; +#include "at91-sama5d27_som1.dtsi" +#include +#include + +/ { + model = "Atmel SAMA5D27 SOM1 EK"; + compatible = "atmel,sama5d27-som1-ek", "atmel,sama5d27-som1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + ahb { + usb0: gadget@00300000 { + atmel,vbus-gpio = <&pioA PIN_PD20 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; + }; + + usb1: ohci@00400000 { + num-ports = <3>; + atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */ + &pioA PIN_PA27 GPIO_ACTIVE_HIGH + 0 + >; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + + usb2: ehci@00500000 { + status = "okay"; + }; + + sdmmc0: sdio-host@a0000000 { + bus-width = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc0_default>; + status = "okay"; + }; + + sdmmc1: sdio-host@b0000000 { + bus-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sdmmc1_default>; + status = "okay"; + }; + + apb { + isc: isc@f0008000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>; + status = "okay"; + }; + + spi0: spi@f8000000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0_default>; + status = "okay"; + }; + + macb0: ethernet@f8008000 { + status = "okay"; + }; + + uart1: serial@f8020000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1_default>; + atmel,use-dma-rx; + atmel,use-dma-tx; + status = "okay"; + }; + + uart2: serial@f8024000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mikrobus2_uart>; + atmel,use-dma-rx; + atmel,use-dma-tx; + status = "okay"; + }; + + pwm0: pwm@f802c000 { + status = "okay"; + }; + + flx1: flexcom@f8038000 { + atmel,flexcom-mode = ; + status = "disabled"; + + i2c2: i2c@600 { + compatible = "atmel,sama5d2-i2c"; + reg = <0x600 0x200>; + interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>; + dmas = <0>, <0>; + dma-names = "tx", "rx"; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&flx1_clk>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mikrobus_i2c>; + atmel,fifo-size = <16>; + status = "disabled"; + }; + }; + + shdwc@f8048010 { + atmel,shdwc-debouncer = <976>; + atmel,wakeup-rtc-timer; + + input@0 { + reg = <0>; + atmel,wakeup-type = "low"; + }; + }; + + watchdog@f8048040 { + status = "okay"; + }; + + can0: can@f8054000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_can0_default>; + }; + + uart3: serial@fc008000 { + atmel,use-dma-rx; + atmel,use-dma-tx; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3_default>; + status = "disabled"; + }; + + uart4: serial@fc00c000 { + atmel,use-dma-rx; + atmel,use-dma-tx; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mikrobus1_uart>; + status = "okay"; + }; + + flx3: flexcom@fc014000 { + atmel,flexcom-mode = ; + status = "disabled"; + + uart7: serial@200 { + compatible = "atmel,at91sam9260-usart"; + reg = <0x200 0x200>; + interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx3_clk>; + clock-names = "usart"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx3_default>; + atmel,fifo-size = <32>; + status = "disabled"; + }; + + spi2: spi@400 { + compatible = "atmel,at91rm9200-spi"; + reg = <0x400 0x200>; + interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx3_clk>; + clock-names = "spi_clk"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx3_default>; + atmel,fifo-size = <16>; + status = "disabled"; + }; + }; + + flx4: flexcom@fc018000 { + atmel,flexcom-mode = ; + status = "okay"; + + uart6: serial@200 { + compatible = "atmel,at91sam9260-usart"; + reg = <0x200 0x200>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx4_clk>; + clock-names = "usart"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx4_default>; + atmel,fifo-size = <32>; + status = "disabled"; + }; + + spi3: spi@400 { + compatible = "atmel,at91rm9200-spi"; + reg = <0x400 0x200>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&flx4_clk>; + clock-names = "spi_clk"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mikrobus_spi &pinctrl_mikrobus1_spi_cs &pinctrl_mikrobus2_spi_cs>; + atmel,fifo-size = <16>; + status = "okay"; + }; + + i2c3: i2c@600 { + compatible = "atmel,sama5d2-i2c"; + reg = <0x600 0x200>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>; + dmas = <0>, <0>; + dma-names = "tx", "rx"; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&flx4_clk>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flx4_default>; + atmel,fifo-size = <16>; + status = "disabled"; + }; + }; + + i2c1: i2c@fc028000 { + dmas = <0>, <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1_default>; + status = "okay"; + }; + + pinctrl@fc038000 { + + pinctrl_can0_default: can0_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_can1_default: can1_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_flx3_default: flx3_default { + pinmux = , + , + , + , + ; + bias-disable; + }; + + pinctrl_i2c1_default: i2c1_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_isc_base: isc_base { + pinmux = , + , + , + ; + bias-disable; + }; + + pinctrl_isc_data_8bit: isc_data_8bit { + pinmux = , + , + , + , + , + , + , + ; + bias-disable; + }; + + pinctrl_isc_data_9_10: isc_data_9_10 { + pinmux = , + ; + bias-disable; + }; + + pinctrl_isc_data_11_12: isc_data_11_12 { + pinmux = , + ; + bias-disable; + }; + + pinctrl_key_gpio_default: key_gpio_default { + pinmux = ; + bias-pull-up; + }; + + pinctrl_led_gpio_default: led_gpio_default { + pinmux = , + , + ; + bias-pull-up; + }; + + pinctrl_sdmmc0_default: sdmmc0_default { + cmd_data { + pinmux = , + , + , + , + , + , + , + , + ; + bias-pull-up; + }; + + ck_cd_vddsel { + pinmux = , + , + ; + bias-disable; + }; + }; + + pinctrl_sdmmc1_default: sdmmc1_default { + cmd_data { + pinmux = , + , + , + , + ; + bias-pull-up; + }; + + conf-ck_cd { + pinmux = , + ; + bias-disable; + }; + }; + + pinctrl_spi0_default: spi0_default { + pinmux = , + , + , + ; + bias-disable; + }; + + pinctrl_uart1_default: uart1_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_uart3_default: uart3_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_usb_default: usb_default { + pinmux = , + ; + bias-disable; + }; + + pinctrl_usba_vbus: usba_vbus { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus1_an: mikrobus1_an { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus2_an: mikrobus2_an { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus1_rst: mikrobus1_rst { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus2_rst: mikrobus2_rst { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus1_spi_cs: mikrobus1_spi_cs { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus2_spi_cs: mikrobus2_spi_cs { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus_spi: mikrobus_spi { + pinmux = , + , + ; + bias-disable; + }; + + pinctrl_mikrobus1_pwm: mikrobus1_pwm { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus2_pwm: mikrobus2_pwm { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus1_int: mikrobus1_int { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus2_int: mikrobus2_int { + pinmux = ; + bias-disable; + }; + + pinctrl_mikrobus1_uart: mikrobus1_uart { + pinmux = , + ; + bias-disable; + }; + + pinctrl_mikrobus2_uart: mikrobus2_uart { + pinmux = , + ; + bias-disable; + }; + + pinctrl_mikrobus_i2c: mikrobus1_i2c { + pinmux = , + ; + bias-disable; + }; + + pinctrl_flx4_default: flx4_uart_default { + pinmux = , + , + , + , + ; + bias-disable; + }; + }; + + can1: can@fc050000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_can1_default>; + status = "okay"; + }; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_key_gpio_default>; + + pb4 { + label = "USER"; + gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>; + linux,code = <0x104>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_led_gpio_default>; + status = "okay"; + + red { + label = "red"; + gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; + }; + + green { + label = "green"; + gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>; + }; + + blue { + label = "blue"; + gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; +}; diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index 2e2c3d1a1fa2..c7e9ccf2bc87 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts @@ -68,7 +68,7 @@ main_xtal { ahb { usb0: gadget@00300000 { - atmel,vbus-gpio = <&pioA 31 GPIO_ACTIVE_HIGH>; + atmel,vbus-gpio = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usba_vbus>; status = "okay"; @@ -76,8 +76,8 @@ usb0: gadget@00300000 { usb1: ohci@00400000 { num-ports = <3>; - atmel,vbus-gpio = <0 /* &pioA 41 GPIO_ACTIVE_HIGH */ - &pioA 42 GPIO_ACTIVE_HIGH + atmel,vbus-gpio = <0 /* &pioA PIN_PB9 GPIO_ACTIVE_HIGH */ + &pioA PIN_PB10 GPIO_ACTIVE_HIGH 0 >; pinctrl-names = "default"; @@ -127,7 +127,7 @@ macb0: ethernet@f8008000 { ethernet-phy@1 { reg = <0x1>; interrupt-parent = <&pioA>; - interrupts = <73 IRQ_TYPE_LEVEL_LOW>; + interrupts = ; }; }; @@ -160,9 +160,9 @@ pmic@5b { compatible = "active-semi,act8945a"; reg = <0x5b>; active-semi,vsel-high; - active-semi,chglev-gpios = <&pioA 12 GPIO_ACTIVE_HIGH>; - active-semi,lbo-gpios = <&pioA 72 GPIO_ACTIVE_LOW>; - active-semi,irq_gpios = <&pioA 45 GPIO_ACTIVE_LOW>; + active-semi,chglev-gpios = <&pioA PIN_PA12 GPIO_ACTIVE_HIGH>; + active-semi,lbo-gpios = <&pioA PIN_PC8 GPIO_ACTIVE_LOW>; + active-semi,irq_gpios = <&pioA PIN_PB13 GPIO_ACTIVE_LOW>; active-semi,input-voltage-threshold-microvolt = <6600>; active-semi,precondition-timeout = <40>; active-semi,total-timeout = <3>; @@ -355,6 +355,14 @@ pinctrl_charger_lbo: charger_lbo { bias-pull-up; }; + pinctrl_classd_default: classd_default { + pinmux = , + , + , + ; + bias-pull-up; + }; + pinctrl_flx0_default: flx0_default { pinmux = , ; @@ -488,6 +496,14 @@ pinctrl_usba_vbus: usba_vbus { }; + classd: classd@fc048000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_classd_default>; + atmel,pwm-type = "diff"; + atmel,non-overlap-time = <10>; + status = "okay"; + }; + can1: can@fc050000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_can1_default>; @@ -504,7 +520,7 @@ gpio_keys { bp1 { label = "PB_USER"; - gpios = <&pioA 41 GPIO_ACTIVE_LOW>; + gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>; linux,code = <0x104>; }; }; @@ -517,17 +533,18 @@ leds { red { label = "red"; - gpios = <&pioA 38 GPIO_ACTIVE_LOW>; + gpios = <&pioA PIN_PB6 GPIO_ACTIVE_LOW>; }; + green { label = "green"; - gpios = <&pioA 37 GPIO_ACTIVE_LOW>; + gpios = <&pioA PIN_PB5 GPIO_ACTIVE_LOW>; }; blue { label = "blue"; - gpios = <&pioA 32 GPIO_ACTIVE_LOW>; + gpios = <&pioA PIN_PB0 GPIO_ACTIVE_LOW>; linux,default-trigger = "heartbeat"; }; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index a4808c4fbc05..64fa3f9a39d3 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -455,6 +455,16 @@ pinctrl@fffff200 { >; /* shared pinctrl settings */ + ac97 { + pinctrl_ac97: ac97-0 { + atmel,pins = + ; /* AC97CK */ + }; + }; + adc0 { pinctrl_adc0_adtrg: adc0_adtrg { atmel,pins = ; @@ -1043,6 +1053,17 @@ ssc1: ssc@fffa0000 { status = "disabled"; }; + ac97: sound@fffac000 { + compatible = "atmel,at91sam9263-ac97c"; + reg = <0xfffac000 0x4000>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ac97>; + clocks = <&ac97_clk>; + clock-names = "ac97_clk"; + status = "disabled"; + }; + adc0: adc@fffb0000 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 2522c3308305..94c52c555f83 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -166,6 +166,10 @@ usb2: gadget@fff78000 { status = "okay"; }; + ac97: sound@fffac000 { + status = "okay"; + }; + adc0: adc@fffb0000 { pinctrl-names = "default"; pinctrl-0 = < diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index bf8c83815753..7c957ea06c66 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -55,6 +55,11 @@ cpu@0 { /include/ "bcm-cygnus-clock.dtsi" + pmu { + compatible = "arm,cortex-a9-pmu"; + interrupts = ; + }; + core { compatible = "simple-bus"; ranges = <0x00000000 0x19000000 0x1000000>; @@ -119,6 +124,21 @@ pinctrl: pinctrl@0301d0c8 { compatible = "brcm,cygnus-pinmux"; reg = <0x0301d0c8 0x30>, <0x0301d24c 0x2c>; + + spi_0: spi_0 { + function = "spi0"; + groups = "spi0_grp"; + }; + + spi_1: spi_1 { + function = "spi1"; + groups = "spi1_grp"; + }; + + spi_2: spi_2 { + function = "spi2"; + groups = "spi2_grp"; + }; }; mailbox: mailbox@03024024 { @@ -300,6 +320,23 @@ msi1: msi-controller { }; }; + dma0: dma@18018000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x18018000 0x1000>; + interrupts = , + , + , + , + , + , + , + , + ; + clocks = <&apb_clk>; + clock-names = "apb_pclk"; + #dma-cells = <1>; + }; + uart0: serial@18020000 { compatible = "snps,dw-apb-uart"; reg = <0x18020000 0x100>; @@ -324,7 +361,7 @@ uart1: serial@18021000 { uart2: serial@18022000 { compatible = "snps,dw-apb-uart"; - reg = <0x18020000 0x100>; + reg = <0x18022000 0x100>; reg-shift = <2>; reg-io-width = <4>; interrupts = ; @@ -344,6 +381,52 @@ uart3: serial@18023000 { status = "disabled"; }; + spi0: spi@18028000 { + compatible = "arm,pl022", "arm,primecell"; + reg = <0x18028000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + pinctrl-0 = <&spi_0>; + clocks = <&axi81_clk>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + spi1: spi@18029000 { + compatible = "arm,pl022", "arm,primecell"; + reg = <0x18029000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + pinctrl-0 = <&spi_1>; + clocks = <&axi81_clk>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + spi2: spi@1802a000 { + compatible = "arm,pl022", "arm,primecell"; + reg = <0x1802a000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + pinctrl-0 = <&spi_2>; + clocks = <&axi81_clk>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + sdhci0: sdhci@18041000 { + compatible = "brcm,sdhci-iproc-cygnus"; + reg = <0x18041000 0x100>; + interrupts = ; + clocks = <&lcpll0 BCM_CYGNUS_LCPLL0_SDIO_CLK>; + bus-width = <4>; + sdhci,auto-cmd12; + status = "disabled"; + }; + eth0: ethernet@18042000 { compatible = "brcm,amac"; reg = <0x18042000 0x1000>, @@ -353,6 +436,16 @@ eth0: ethernet@18042000 { status = "disabled"; }; + sdhci1: sdhci@18043000 { + compatible = "brcm,sdhci-iproc-cygnus"; + reg = <0x18043000 0x100>; + interrupts = ; + clocks = <&lcpll0 BCM_CYGNUS_LCPLL0_SDIO_CLK>; + bus-width = <4>; + sdhci,auto-cmd12; + status = "disabled"; + }; + nand: nand@18046000 { compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; reg = <0x18046000 0x600>, <0xf8105408 0x600>, @@ -366,6 +459,33 @@ nand: nand@18046000 { brcm,nand-has-wp; }; + ehci0: usb@18048000 { + compatible = "generic-ehci"; + reg = <0x18048000 0x100>; + interrupts = ; + status = "disabled"; + }; + + ohci0: usb@18048800 { + compatible = "generic-ohci"; + reg = <0x18048800 0x100>; + interrupts = ; + status = "disabled"; + }; + + v3d: v3d@180a2000 { + compatible = "brcm,cygnus-v3d"; + reg = <0x180a2000 0x1000>; + clocks = <&mipipll BCM_CYGNUS_MIPIPLL_CH2_V3D>; + clock-names = "v3d_clk"; + interrupts = ; + status = "disabled"; + }; + + vc4: gpu { + compatible = "brcm,cygnus-vc4"; + }; + gpio_asiu: gpio@180a5000 { compatible = "brcm,cygnus-asiu-gpio"; reg = <0x180a5000 0x668>; @@ -444,19 +564,6 @@ touchscreen: touchscreen@180a6000 { status = "disabled"; }; - v3d: v3d@180a2000 { - compatible = "brcm,cygnus-v3d"; - reg = <0x180a2000 0x1000>; - clocks = <&mipipll BCM_CYGNUS_MIPIPLL_CH2_V3D>; - clock-names = "v3d_clk"; - interrupts = ; - status = "disabled"; - }; - - vc4: gpu { - compatible = "brcm,cygnus-vc4"; - }; - adc: adc@180a6000 { compatible = "brcm,iproc-static-adc"; #io-channel-cells = <1>; @@ -467,5 +574,19 @@ adc: adc@180a6000 { interrupts = ; status = "disabled"; }; + + keypad: keypad@180ac000 { + compatible = "brcm,bcm-keypad"; + reg = <0x180ac000 0x14c>; + interrupts = ; + clocks = <&asiu_clks BCM_CYGNUS_ASIU_KEYPAD_CLK>; + clock-names = "peri_clk"; + clock-frequency = <31250>; + pull-up-enabled; + col-debounce-filter-period = <0>; + status-debounce-filter-period = <0>; + row-output-enabled; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 7204d1def23d..dff66974feed 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -215,6 +215,7 @@ sdio: sdhci@21000 { interrupts = ; sdhci,auto-cmd12; clocks = <&lcpll0 BCM_NSP_LCPLL0_SDIO_CLK>; + dma-coherent; status = "disabled"; }; @@ -224,6 +225,7 @@ amac0: ethernet@22000 { <0x110000 0x1000>; reg-names = "amac_base", "idm_base"; interrupts = ; + dma-coherent; status = "disabled"; }; @@ -233,6 +235,7 @@ amac1: ethernet@23000 { <0x111000 0x1000>; reg-names = "amac_base", "idm_base"; interrupts = ; + dma-coherent; status = "disabled"; }; @@ -242,6 +245,7 @@ amac2: ethernet@24000 { <0x112000 0x1000>; reg-names = "amac_base", "idm_base"; interrupts = ; + dma-coherent; status = "disabled"; }; @@ -252,6 +256,7 @@ mailbox: mailbox@25000 { #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; + dma-coherent; }; nand: nand@26000 { @@ -297,6 +302,32 @@ qspi: qspi@27200 { #size-cells = <0>; }; + xhci: usb@29000 { + compatible = "generic-xhci"; + reg = <0x29000 0x1000>; + interrupts = ; + phys = <&usb3_phy>; + phy-names = "usb3-phy"; + dma-coherent; + status = "disabled"; + }; + + ehci0: usb@2a000 { + compatible = "generic-ehci"; + reg = <0x2a000 0x100>; + interrupts = ; + dma-coherent; + status = "disabled"; + }; + + ohci0: usb@2b000 { + compatible = "generic-ohci"; + reg = <0x2b000 0x100>; + interrupts = ; + dma-coherent; + status = "disabled"; + }; + crypto@2f000 { compatible = "brcm,spum-nsp-crypto"; reg = <0x2f000 0x900>; @@ -321,20 +352,6 @@ pwm: pwm@31000 { status = "disabled"; }; - ehci0: usb@2a000 { - compatible = "generic-ehci"; - reg = <0x2a000 0x100>; - interrupts = ; - status = "disabled"; - }; - - ohci0: usb@2b000 { - compatible = "generic-ohci"; - reg = <0x2b000 0x100>; - interrupts = ; - status = "disabled"; - }; - rng: rng@33000 { compatible = "brcm,bcm-nsp-rng"; reg = <0x33000 0x14>; @@ -376,6 +393,7 @@ i2c0: i2c@38000 { #size-cells = <0>; interrupts = ; clock-frequency = <100000>; + dma-coherent; status = "disabled"; }; @@ -446,6 +464,7 @@ sata: ahci@41000 { interrupts = ; #address-cells = <1>; #size-cells = <0>; + dma-coherent; status = "disabled"; sata0: sata-port@0 { @@ -460,6 +479,15 @@ sata1: sata-port@1 { phy-names = "sata-phy"; }; }; + + usb3_phy: usb3-phy@104000 { + compatible = "brcm,ns-bx-usb3-phy"; + reg = <0x104000 0x1000>, + <0x032000 0x1000>; + reg-names = "dmp", "ccb-mii"; + #phy-cells = <0>; + status = "disabled"; + }; }; pcie0: pcie@18012000 { @@ -483,6 +511,7 @@ pcie0: pcie@18012000 { */ ranges = <0x82000000 0 0x08000000 0x08000000 0 0x8000000>; + dma-coherent; status = "disabled"; msi-parent = <&msi0>; @@ -519,6 +548,7 @@ pcie1: pcie@18013000 { */ ranges = <0x82000000 0 0x40000000 0x40000000 0 0x8000000>; + dma-coherent; status = "disabled"; msi-parent = <&msi1>; @@ -555,6 +585,7 @@ pcie2: pcie@18014000 { */ ranges = <0x82000000 0 0x48000000 0x48000000 0 0x8000000>; + dma-coherent; status = "disabled"; msi-parent = <&msi2>; diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts index d0704540db6b..9f866491efdf 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts @@ -99,3 +99,9 @@ i2s_alt0: i2s_alt0 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts index 46d078e29017..4b1af06c8dc0 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-a.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-a.dts @@ -94,3 +94,9 @@ i2s_alt2: i2s_alt2 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts index 432088ebb0a1..a846f1e781d8 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts @@ -101,3 +101,9 @@ i2s_alt0: i2s_alt0 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts index 4133bc2cd9be..e860964e39fa 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts @@ -94,3 +94,9 @@ i2s_alt2: i2s_alt2 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts index 4d56fe3006b0..5d77f3f8c4c5 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts @@ -89,3 +89,9 @@ &gpio { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts new file mode 100644 index 000000000000..82651c3eb682 --- /dev/null +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2017 Stefan Wahren + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/dts-v1/; +#include "bcm2835.dtsi" +#include "bcm2835-rpi.dtsi" +#include "bcm283x-rpi-usb-host.dtsi" + +/ { + compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; + model = "Raspberry Pi Zero W"; + + /* Needed by firmware to properly init UARTs */ + aliases { + uart0 = "/soc/serial@7e201000"; + uart1 = "/soc/serial@7e215040"; + serial0 = "/soc/serial@7e201000"; + serial1 = "/soc/serial@7e215040"; + }; + + leds { + act { + gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; + }; + }; + + wifi_pwrseq: wifi-pwrseq { + compatible = "mmc-pwrseq-simple"; + pinctrl-names = "default"; + pinctrl-0 = <&wl_on>; + reset-gpios = <&gpio 41 GPIO_ACTIVE_LOW>; + }; +}; + +&gpio { + /* + * This is based on the official GPU firmware DT blob. + * + * Legend: + * "NC" = not connected (no rail from the SoC) + * "FOO" = GPIO line named "FOO" on the schematic + * "FOO_N" = GPIO line named "FOO" on schematic, active low + */ + gpio-line-names = "GPIO0", + "GPIO1", + "SDA1", + "SCL1", + "GPIO_GCLK", + "GPIO5", + "GPIO6", + "SPI_CE1_N", + "SPI_CE0_N", + "SPI_MISO", + "SPI_MOSI", + "SPI_SCLK", + "GPIO12", + "GPIO13", + /* Serial port */ + "TXD0", + "RXD0", + "GPIO16", + "GPIO17", + "GPIO18", + "GPIO19", + "GPIO20", + "GPIO21", + "GPIO22", + "GPIO23", + "GPIO24", + "GPIO25", + "GPIO26", + "GPIO27", + "SDA0", + "SCL0", + "NC", /* GPIO30 */ + "NC", /* GPIO31 */ + "NC", /* GPIO32 */ + "NC", /* GPIO33 */ + "NC", /* GPIO34 */ + "NC", /* GPIO35 */ + "NC", /* GPIO36 */ + "NC", /* GPIO37 */ + "NC", /* GPIO38 */ + "NC", /* GPIO39 */ + "CAM_GPIO1", /* GPIO40 */ + "WL_ON", /* GPIO41 */ + "NC", /* GPIO42 */ + "WIFI_CLK", /* GPIO43 */ + "CAM_GPIO0", /* GPIO44 */ + "BT_ON", /* GPIO45 */ + "HDMI_HPD_N", + "STATUS_LED_N", + /* Used by SD Card */ + "SD_CLK_R", + "SD_CMD_R", + "SD_DATA0_R", + "SD_DATA1_R", + "SD_DATA2_R", + "SD_DATA3_R"; + + pinctrl-0 = <&gpioout &alt0>; + + wl_on: wl-on { + brcm,pins = <41>; + brcm,function = ; + }; +}; + +&hdmi { + hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; +}; + +&sdhci { + #address-cells = <1>; + #size-cells = <0>; + pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>; + mmc-pwrseq = <&wifi_pwrseq>; + non-removable; + status = "okay"; + + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts index 79a20d520931..70362405c595 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts @@ -103,3 +103,9 @@ i2s_alt0: i2s_alt0 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi index e55b362b9d6e..e36c392a2b8f 100644 --- a/arch/arm/boot/dts/bcm2835-rpi.dtsi +++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi @@ -39,7 +39,7 @@ gpioout: gpioout { }; alt0: alt0 { - brcm,pins = <4 5 7 8 9 10 11 14 15>; + brcm,pins = <4 5 7 8 9 10 11>; brcm,function = ; }; }; diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts index bf19e8cfb9e6..e8de41444b68 100644 --- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts +++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts @@ -39,3 +39,9 @@ i2s_alt0: i2s_alt0 { &hdmi { hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; }; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi index da3deeb42592..2c26d0be8b03 100644 --- a/arch/arm/boot/dts/bcm2836.dtsi +++ b/arch/arm/boot/dts/bcm2836.dtsi @@ -36,6 +36,7 @@ timer { cpus: cpus { #address-cells = <1>; #size-cells = <0>; + enable-method = "brcm,bcm2836-smp"; v7_cpu0: cpu@0 { device_type = "cpu"; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index c72a27d908b6..20725ca487f3 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts @@ -1 +1,51 @@ -#include "arm64/broadcom/bcm2837-rpi-3-b.dts" +/dts-v1/; +#include "bcm2837.dtsi" +#include "bcm2835-rpi.dtsi" +#include "bcm283x-rpi-smsc9514.dtsi" +#include "bcm283x-rpi-usb-host.dtsi" + +/ { + compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; + model = "Raspberry Pi 3 Model B"; + + memory { + reg = <0 0x40000000>; + }; + + leds { + act { + gpios = <&gpio 47 0>; + }; + }; +}; + +/* uart0 communicates with the BT module */ +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio32 &gpclk2_gpio43>; + status = "okay"; +}; + +/* uart1 is mapped to the pin header */ +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1_gpio14>; + status = "okay"; +}; + +/* SDHCI is used to control the SDIO for wireless */ +&sdhci { + pinctrl-names = "default"; + pinctrl-0 = <&emmc_gpio34>; + status = "okay"; + bus-width = <4>; + non-removable; +}; + +/* SDHOST is used to drive the SD card */ +&sdhost { + pinctrl-names = "default"; + pinctrl-0 = <&sdhost_gpio48>; + status = "okay"; + bus-width = <4>; +}; diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi similarity index 96% rename from arch/arm64/boot/dts/broadcom/bcm2837.dtsi rename to arch/arm/boot/dts/bcm2837.dtsi index 2d5de6f0f78d..bc1cca5cf43c 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi @@ -30,6 +30,7 @@ timer { cpus: cpus { #address-cells = <1>; #size-cells = <0>; + enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit cpu0: cpu@0 { device_type = "cpu"; diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts index 62e1427b3f10..8b64caabaad8 100644 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts @@ -52,6 +52,10 @@ leds { usb { label = "bcm53xx:blue:usb"; gpios = <&hc595 0 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port1>, <&ehci_port1>, + <&xhci_port1>, <&ohci_port2>, + <&ehci_port2>; + linux,default-trigger = "usbport"; }; power0 { diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts index a5647efe4118..d7c34fa72b4b 100644 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts @@ -48,6 +48,9 @@ power1 { usb { label = "bcm53xx:blue:usb"; gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>; + trigger-sources = <&ohci_port1>, <&ehci_port1>, + <&xhci_port1>; + linux,default-trigger = "usbport"; }; wireless { diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts index 19ee924d7d53..83a4c60bb431 100644 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts @@ -42,16 +42,22 @@ power-amber { usb2 { label = "bcm53xx:white:usb2"; gpios = <&chipcommon 3 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port2>, <&ehci_port2>; + linux,default-trigger = "usbport"; }; usb3-white { label = "bcm53xx:white:usb3"; gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>; + trigger-sources = <&xhci_port1>; + linux,default-trigger = "usbport"; }; usb3-green { label = "bcm53xx:green:usb3"; gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port1>, <&ehci_port1>; + linux,default-trigger = "usbport"; }; wps { diff --git a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts index a854a5174b7f..3ed8de42cb48 100644 --- a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts +++ b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts @@ -36,6 +36,8 @@ lan { usb2-port1 { label = "bcm53xx:green:usb2-port1"; gpios = <&chipcommon 2 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port1>, <&ehci_port1>; + linux,default-trigger = "usbport"; }; power { @@ -67,6 +69,8 @@ wan-amber { usb2-port2 { label = "bcm53xx:green:usb2-port2"; gpios = <&chipcommon 13 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port2>, <&ehci_port2>; + linux,default-trigger = "usbport"; }; }; diff --git a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts index 97aa5d59a1d8..ec4a50e440f6 100644 --- a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts +++ b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts @@ -46,11 +46,16 @@ wps { usb3 { label = "bcm53xx:blue:usb3"; gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port1>, <&ehci_port1>, + <&xhci_port1>; + linux,default-trigger = "usbport"; }; usb2 { label = "bcm53xx:blue:usb2"; gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port2>, <&ehci_port2>; + linux,default-trigger = "usbport"; }; wan-blue { diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts index 51b0641b5f79..7cc7d344fe5b 100644 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts @@ -71,6 +71,9 @@ wan-amber { usb3-white { label = "bcm53xx:white:usb3"; gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>; + trigger-sources = <&ohci_port1>, <&ehci_port1>, + <&xhci_port1>; + linux,default-trigger = "usbport"; }; 2ghz { diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts index 5f8621d00c50..bc1d1e10d4ac 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts @@ -59,6 +59,9 @@ lan2 { usb3 { label = "bcm53xx:green:usb3"; gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>; + trigger-sources = <&ohci_port1>, <&ehci_port1>, + <&xhci_port1>; + linux,default-trigger = "usbport"; }; status { diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts index 34417dac1cd0..19e61b5b066c 100644 --- a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts +++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts @@ -26,6 +26,8 @@ leds { usb { label = "bcm53xx:blue:usb"; gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>; + trigger-sources = <&ohci_port1>, <&ehci_port1>; + linux,default-trigger = "usbport"; }; wps { diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 98647d22b291..045b9bb857f9 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -272,6 +272,19 @@ ehci: ehci@21000 { reg = <0x00021000 0x1000>; interrupts = ; phys = <&usb2_phy>; + + #address-cells = <1>; + #size-cells = <0>; + + ehci_port1: port@1 { + reg = <1>; + #trigger-source-cells = <0>; + }; + + ehci_port2: port@2 { + reg = <2>; + #trigger-source-cells = <0>; + }; }; ohci: ohci@22000 { @@ -280,6 +293,19 @@ ohci: ohci@22000 { compatible = "generic-ohci"; reg = <0x00022000 0x1000>; interrupts = ; + + #address-cells = <1>; + #size-cells = <0>; + + ohci_port1: port@1 { + reg = <1>; + #trigger-source-cells = <0>; + }; + + ohci_port2: port@2 { + reg = <2>; + #trigger-source-cells = <0>; + }; }; }; @@ -300,6 +326,14 @@ xhci: xhci@23000 { interrupts = ; phys = <&usb3_phy>; phy-names = "usb"; + + #address-cells = <1>; + #size-cells = <0>; + + xhci_port1: port@1 { + reg = <1>; + #trigger-source-cells = <0>; + }; }; }; diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi index eae623f76401..c698a565b8ae 100644 --- a/arch/arm/boot/dts/bcm53573.dtsi +++ b/arch/arm/boot/dts/bcm53573.dtsi @@ -138,10 +138,12 @@ ehci: ehci@4000 { ehci_port1: port@1 { reg = <1>; + #trigger-source-cells = <0>; }; ehci_port2: port@2 { reg = <2>; + #trigger-source-cells = <0>; }; }; @@ -158,10 +160,12 @@ ohci: ohci@d000 { ohci_port1: port@1 { reg = <1>; + #trigger-source-cells = <0>; }; ohci_port2: port@2 { reg = <2>; + #trigger-source-cells = <0>; }; }; }; diff --git a/arch/arm/boot/dts/bcm911360_entphn.dts b/arch/arm/boot/dts/bcm911360_entphn.dts index 000f5f19215e..53f990defd6a 100644 --- a/arch/arm/boot/dts/bcm911360_entphn.dts +++ b/arch/arm/boot/dts/bcm911360_entphn.dts @@ -39,9 +39,12 @@ / { model = "Cygnus Enterprise Phone (BCM911360_ENTPHN)"; compatible = "brcm,bcm11360", "brcm,cygnus"; + aliases { + serial0 = &uart3; + }; + chosen { - stdout-path = &uart3; - bootargs = "console=ttyS0,115200"; + stdout-path = "serial0:115200n8"; }; gpio_keys { diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts new file mode 100644 index 000000000000..ef263412fea5 --- /dev/null +++ b/arch/arm/boot/dts/bcm947189acdbmr.dts @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2017 Broadcom + * Author: Florian Fainelli + * + * Licensed under the ISC license. + */ + +/dts-v1/; + +#include "bcm53573.dtsi" + +/ { + compatible = "brcm,bcm947189acdbmr", "brcm,bcm47189", "brcm,bcm53573"; + model = "Broadcom BCM947189ACDBMR"; + + chosen { + bootargs = "console=ttyS0,115200 earlycon"; + }; + + memory { + reg = <0x00000000 0x08000000>; + }; + + leds { + compatible = "gpio-leds"; + + wps { + label = "bcm53xx:blue:wps"; + gpios = <&chipcommon 10 GPIO_ACTIVE_HIGH>; + }; + + 5ghz { + label = "bcm53xx:blue:5ghz"; + gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>; + }; + + 2ghz { + label = "bcm53xx:blue:2ghz"; + gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + + restart { + label = "Reset"; + linux,code = ; + gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "WPS"; + linux,code = ; + gpios = <&chipcommon 9 GPIO_ACTIVE_LOW>; + }; + }; + + spi { + compatible = "spi-gpio"; + num-chipselects = <1>; + gpio-sck = <&chipcommon 21 0>; + gpio-miso = <&chipcommon 22 0>; + gpio-mosi = <&chipcommon 23 0>; + cs-gpios = <&chipcommon 24 0>; + #address-cells = <1>; + #size-cells = <0>; + + /* External BCM6802 MoCA chip is connected */ + }; +}; + +&pcie0 { + ranges = <0x00000000 0 0 0 0 0x00100000>; + #address-cells = <3>; + #size-cells = <2>; + + bridge@0,0,0 { + reg = <0x0000 0 0 0 0>; + ranges = <0x00000000 0 0 0 0 0 0 0x00100000>; + #address-cells = <3>; + #size-cells = <2>; + + wifi@0,1,0 { + reg = <0x0000 0 0 0 0>; + ranges = <0x00000000 0 0 0 0x00100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + }; +}; + +&usb2 { + vcc-gpio = <&chipcommon 8 GPIO_ACTIVE_HIGH>; +}; diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts index f5c42962c201..f9dd342cc2ae 100644 --- a/arch/arm/boot/dts/bcm958522er.dts +++ b/arch/arm/boot/dts/bcm958522er.dts @@ -170,3 +170,11 @@ partition@700000 { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts index efcb1f67bdad..374508a9cfbf 100644 --- a/arch/arm/boot/dts/bcm958525er.dts +++ b/arch/arm/boot/dts/bcm958525er.dts @@ -182,3 +182,11 @@ &sata { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts index b335ce02e32f..403250c5ad8e 100644 --- a/arch/arm/boot/dts/bcm958525xmc.dts +++ b/arch/arm/boot/dts/bcm958525xmc.dts @@ -202,3 +202,11 @@ &sdio { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts index 16ab2d82a14b..fd8b8c689ffe 100644 --- a/arch/arm/boot/dts/bcm958622hr.dts +++ b/arch/arm/boot/dts/bcm958622hr.dts @@ -219,3 +219,11 @@ fixed-link { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index 9b921c6aa8f8..3bc50849d013 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -227,3 +227,11 @@ fixed-link { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index 006b08e41a3b..d94d14b3c745 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -229,3 +229,11 @@ fixed-link { &uart0 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts index 64740f85cf4c..2cf2392483b2 100644 --- a/arch/arm/boot/dts/bcm958625k.dts +++ b/arch/arm/boot/dts/bcm958625k.dts @@ -264,3 +264,11 @@ &uart0 { &uart1 { status = "okay"; }; + +&usb3_phy { + status = "okay"; +}; + +&xhci { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index 67e72bc72e80..c75507922f7d 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -15,6 +15,13 @@ / { compatible = "ti,da850-evm", "ti,da850"; model = "DA850/AM1808/OMAP-L138 EVM"; + aliases { + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + ethernet0 = ð0; + }; + soc@1c00000 { pmx_core: pinmux@14120 { status = "okay"; diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts index 45983c04a8a7..413dbd5d9f64 100644 --- a/arch/arm/boot/dts/da850-lego-ev3.dts +++ b/arch/arm/boot/dts/da850-lego-ev3.dts @@ -249,6 +249,15 @@ battery_pins: pinmux_battery_pins { 0x4c 0x00000080 0x000000f0 >; }; + + ev3_lcd_pins: pinmux_lcd { + pinctrl-single,bits = < + /* SIMO, GP2[11], GP2[12], CLK */ + 0x14 0x00188100 0x00ffff00 + /* GP5[0] */ + 0x30 0x80000000 0xf0000000 + >; + }; }; &pinconf { @@ -357,6 +366,21 @@ adc: adc@3 { }; }; +&spi1 { + status = "okay"; + pinctrl-0 = <&ev3_lcd_pins>; + pinctrl-names = "default"; + cs-gpios = <&gpio 44 GPIO_ACTIVE_LOW>; + + display@0{ + compatible = "lego,ev3-lcd"; + reg = <0>; + spi-max-frequency = <10000000>; + a0-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>; + }; +}; + &ehrpwm0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/dove-d3plug.dts b/arch/arm/boot/dts/dove-d3plug.dts index f5f59bb5a534..e88ff83f1dec 100644 --- a/arch/arm/boot/dts/dove-d3plug.dts +++ b/arch/arm/boot/dts/dove-d3plug.dts @@ -88,7 +88,7 @@ spi-flash@0 { &pcie { status = "okay"; /* Fresco Logic USB3.0 xHCI controller */ - pcie-port@0 { + pcie@1 { status = "okay"; reset-gpios = <&gpio0 26 1>; reset-delay-us = <20000>; @@ -96,7 +96,7 @@ pcie-port@0 { pinctrl-names = "default"; }; /* Mini-PCIe slot */ - pcie-port@1 { + pcie@2 { status = "okay"; reset-gpios = <&gpio0 25 1>; }; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 698d58cea20d..1475d3672e56 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -89,7 +89,7 @@ MBUS_ID(0x01, 0xfd) 0 0xf8000000 0x8000000 /* BootROM 128M */ MBUS_ID(0x03, 0x01) 0 0xc8000000 0x0100000 /* CESA SRAM 1M */ MBUS_ID(0x0d, 0x00) 0 0xf0000000 0x0100000>; /* PMU SRAM 1M */ - pcie: pcie-controller { + pcie: pcie { compatible = "marvell,dove-pcie"; status = "disabled"; device_type = "pci"; @@ -106,7 +106,7 @@ pcie: pcie-controller { 0x82000000 0x2 0x0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 Mem */ 0x81000000 0x2 0x0 MBUS_ID(0x08, 0xe0) 0 1 0>; /* Port 1.0 I/O */ - pcie0: pcie-port@0 { + pcie0: pcie@1 { device_type = "pci"; status = "disabled"; assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; @@ -118,13 +118,14 @@ pcie0: pcie-port@0 { #size-cells = <2>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 16>; }; - pcie1: pcie-port@1 { + pcie1: pcie@2 { device_type = "pci"; status = "disabled"; assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; @@ -136,6 +137,7 @@ pcie1: pcie-port@1 { #size-cells = <2>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi new file mode 100644 index 000000000000..343e95f9a001 --- /dev/null +++ b/arch/arm/boot/dts/dra7-evm-common.dtsi @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +/ { + chosen { + stdout-path = &uart1; + }; + + extcon_usb1: extcon_usb1 { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>; + }; + + sound0: sound0 { + compatible = "simple-audio-card"; + simple-audio-card,name = "DRA7xx-EVM"; + simple-audio-card,widgets = + "Headphone", "Headphone Jack", + "Line", "Line Out", + "Microphone", "Mic Jack", + "Line", "Line In"; + simple-audio-card,routing = + "Headphone Jack", "HPLOUT", + "Headphone Jack", "HPROUT", + "Line Out", "LLOUT", + "Line Out", "RLOUT", + "MIC3L", "Mic Jack", + "MIC3R", "Mic Jack", + "Mic Jack", "Mic Bias", + "LINE1L", "Line In", + "LINE1R", "Line In"; + simple-audio-card,format = "dsp_b"; + simple-audio-card,bitclock-master = <&sound0_master>; + simple-audio-card,frame-master = <&sound0_master>; + simple-audio-card,bitclock-inversion; + + sound0_master: simple-audio-card,cpu { + sound-dai = <&mcasp3>; + system-clock-frequency = <5644800>; + }; + + simple-audio-card,codec { + sound-dai = <&tlv320aic3106>; + clocks = <&atl_clkin2_ck>; + }; + }; + + leds { + compatible = "gpio-leds"; + led0 { + label = "dra7:usr1"; + gpios = <&pcf_lcd 4 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led1 { + label = "dra7:usr2"; + gpios = <&pcf_lcd 5 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led2 { + label = "dra7:usr3"; + gpios = <&pcf_lcd 6 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led3 { + label = "dra7:usr4"; + gpios = <&pcf_lcd 7 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + autorepeat; + + USER1 { + label = "btnUser1"; + linux,code = ; + gpios = <&pcf_lcd 2 GPIO_ACTIVE_LOW>; + }; + + USER2 { + label = "btnUser2"; + linux,code = ; + gpios = <&pcf_lcd 3 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&i2c3 { + status = "okay"; + clock-frequency = <400000>; +}; + +&mcspi1 { + status = "okay"; +}; + +&mcspi2 { + status = "okay"; +}; + +&uart1 { + status = "okay"; + interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>, + <&dra7_pmx_core 0x3e0>; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&qspi { + status = "okay"; + + spi-max-frequency = <76800000>; + m25p80@0 { + compatible = "s25fl256s1"; + spi-max-frequency = <76800000>; + reg = <0>; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <4>; + #address-cells = <1>; + #size-cells = <1>; + + /* MTD partition table. + * The ROM checks the first four physical blocks + * for a valid file to boot and the flash here is + * 64KiB block size. + */ + partition@0 { + label = "QSPI.SPL"; + reg = <0x00000000 0x000010000>; + }; + partition@1 { + label = "QSPI.SPL.backup1"; + reg = <0x00010000 0x00010000>; + }; + partition@2 { + label = "QSPI.SPL.backup2"; + reg = <0x00020000 0x00010000>; + }; + partition@3 { + label = "QSPI.SPL.backup3"; + reg = <0x00030000 0x00010000>; + }; + partition@4 { + label = "QSPI.u-boot"; + reg = <0x00040000 0x00100000>; + }; + partition@5 { + label = "QSPI.u-boot-spl-os"; + reg = <0x00140000 0x00080000>; + }; + partition@6 { + label = "QSPI.u-boot-env"; + reg = <0x001c0000 0x00010000>; + }; + partition@7 { + label = "QSPI.u-boot-env.backup1"; + reg = <0x001d0000 0x0010000>; + }; + partition@8 { + label = "QSPI.kernel"; + reg = <0x001e0000 0x0800000>; + }; + partition@9 { + label = "QSPI.file-system"; + reg = <0x009e0000 0x01620000>; + }; + }; +}; + +&omap_dwc3_1 { + extcon = <&extcon_usb1>; +}; + +&usb1 { + dr_mode = "otg"; + extcon = <&extcon_usb1>; +}; + +&usb2 { + dr_mode = "host"; +}; + +&atl { + assigned-clocks = <&abe_dpll_sys_clk_mux>, + <&atl_gfclk_mux>, + <&dpll_abe_ck>, + <&dpll_abe_m2x2_ck>, + <&atl_clkin2_ck>; + assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>; + assigned-clock-rates = <0>, <0>, <180633600>, <361267200>, <5644800>; + + status = "okay"; + + atl2 { + bws = ; + aws = ; + }; +}; + +&mcasp3 { + #sound-dai-cells = <0>; + + assigned-clocks = <&mcasp3_ahclkx_mux>; + assigned-clock-parents = <&atl_clkin2_ck>; + + status = "okay"; + + op-mode = <0>; /* MCASP_IIS_MODE */ + tdm-slots = <2>; + /* 4 serializer */ + serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ + 1 2 0 0 + >; + tx-num-evt = <32>; + rx-num-evt = <32>; +}; + +&mailbox5 { + status = "okay"; + mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { + status = "okay"; + }; + mbox_dsp1_ipc3x: mbox_dsp1_ipc3x { + status = "okay"; + }; +}; + +&mailbox6 { + status = "okay"; + mbox_ipu2_ipc3x: mbox_ipu2_ipc3x { + status = "okay"; + }; + mbox_dsp2_ipc3x: mbox_dsp2_ipc3x { + status = "okay"; + }; +}; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index f47fc4daf062..aa426dabb6c3 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -8,9 +8,8 @@ /dts-v1/; #include "dra74x.dtsi" -#include -#include -#include +#include "dra7-evm-common.dtsi" +#include "dra74x-mmc-iodelay.dtsi" / { model = "TI DRA742"; @@ -21,8 +20,12 @@ memory@0 { reg = <0x0 0x80000000 0x0 0x60000000>; /* 1536 MB */ }; - chosen { - stdout-path = &uart1; + evm_1v8_sw: fixedregulator-evm_1v8 { + compatible = "regulator-fixed"; + regulator-name = "evm_1v8"; + vin-supply = <&smps9_reg>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; }; evm_3v3_sd: fixedregulator-sd { @@ -51,11 +54,6 @@ aic_dvdd: fixedregulator-aic_dvdd { regulator-max-microvolt = <1800000>; }; - extcon_usb1: extcon_usb1 { - compatible = "linux,extcon-usb-gpio"; - id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>; - }; - extcon_usb2: extcon_usb2 { compatible = "linux,extcon-usb-gpio"; id-gpio = <&pcf_gpio_21 2 GPIO_ACTIVE_HIGH>; @@ -73,85 +71,6 @@ vtt_fixed: fixedregulator-vtt { gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>; }; - sound0: sound0 { - compatible = "simple-audio-card"; - simple-audio-card,name = "DRA7xx-EVM"; - simple-audio-card,widgets = - "Headphone", "Headphone Jack", - "Line", "Line Out", - "Microphone", "Mic Jack", - "Line", "Line In"; - simple-audio-card,routing = - "Headphone Jack", "HPLOUT", - "Headphone Jack", "HPROUT", - "Line Out", "LLOUT", - "Line Out", "RLOUT", - "MIC3L", "Mic Jack", - "MIC3R", "Mic Jack", - "Mic Jack", "Mic Bias", - "LINE1L", "Line In", - "LINE1R", "Line In"; - simple-audio-card,format = "dsp_b"; - simple-audio-card,bitclock-master = <&sound0_master>; - simple-audio-card,frame-master = <&sound0_master>; - simple-audio-card,bitclock-inversion; - - sound0_master: simple-audio-card,cpu { - sound-dai = <&mcasp3>; - system-clock-frequency = <5644800>; - }; - - simple-audio-card,codec { - sound-dai = <&tlv320aic3106>; - clocks = <&atl_clkin2_ck>; - }; - }; - - leds { - compatible = "gpio-leds"; - led0 { - label = "dra7:usr1"; - gpios = <&pcf_lcd 4 GPIO_ACTIVE_LOW>; - default-state = "off"; - }; - - led1 { - label = "dra7:usr2"; - gpios = <&pcf_lcd 5 GPIO_ACTIVE_LOW>; - default-state = "off"; - }; - - led2 { - label = "dra7:usr3"; - gpios = <&pcf_lcd 6 GPIO_ACTIVE_LOW>; - default-state = "off"; - }; - - led3 { - label = "dra7:usr4"; - gpios = <&pcf_lcd 7 GPIO_ACTIVE_LOW>; - default-state = "off"; - }; - }; - - gpio_keys { - compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; - autorepeat; - - USER1 { - label = "btnUser1"; - linux,code = ; - gpios = <&pcf_lcd 2 GPIO_ACTIVE_LOW>; - }; - - USER2 { - label = "btnUser2"; - linux,code = ; - gpios = <&pcf_lcd 3 GPIO_ACTIVE_LOW>; - }; - }; }; &dra7_pmx_core { @@ -406,137 +325,49 @@ p1 { }; }; -&i2c3 { - status = "okay"; - clock-frequency = <400000>; -}; - -&mcspi1 { - status = "okay"; -}; - -&mcspi2 { - status = "okay"; -}; - -&uart1 { - status = "okay"; - interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>, - <&dra7_pmx_core 0x3e0>; -}; - -&uart2 { - status = "okay"; -}; - -&uart3 { - status = "okay"; -}; - &mmc1 { status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&mmc1_pins_default>; vmmc-supply = <&evm_3v3_sd>; - vmmc_aux-supply = <&ldo1_reg>; + vqmmc-supply = <&ldo1_reg>; bus-width = <4>; /* * SDCD signal is not being used here - using the fact that GPIO mode * is always hardwired. */ cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50-rev11", "sdr104-rev11", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>; + pinctrl-7 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; + pinctrl-8 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; }; &mmc2 { status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&mmc2_pins_default>; - vmmc-supply = <&evm_3v3_sw>; + vmmc-supply = <&evm_1v8_sw>; bus-width = <8>; + pinctrl-names = "default", "hs", "ddr_1_8v-rev11", "ddr_1_8v", "hs200_1_8v-rev11", "hs200_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_1_8v_rev11 &mmc2_iodelay_ddr_1_8v_rev11_conf>; + pinctrl-3 = <&mmc2_pins_ddr_rev20>; + pinctrl-4 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev11_conf>; + pinctrl-5 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev20_conf>; }; &cpu0 { cpu0-supply = <&smps123_reg>; }; -&qspi { - status = "okay"; - - spi-max-frequency = <76800000>; - m25p80@0 { - compatible = "s25fl256s1"; - spi-max-frequency = <76800000>; - reg = <0>; - spi-tx-bus-width = <1>; - spi-rx-bus-width = <4>; - #address-cells = <1>; - #size-cells = <1>; - - /* MTD partition table. - * The ROM checks the first four physical blocks - * for a valid file to boot and the flash here is - * 64KiB block size. - */ - partition@0 { - label = "QSPI.SPL"; - reg = <0x00000000 0x000010000>; - }; - partition@1 { - label = "QSPI.SPL.backup1"; - reg = <0x00010000 0x00010000>; - }; - partition@2 { - label = "QSPI.SPL.backup2"; - reg = <0x00020000 0x00010000>; - }; - partition@3 { - label = "QSPI.SPL.backup3"; - reg = <0x00030000 0x00010000>; - }; - partition@4 { - label = "QSPI.u-boot"; - reg = <0x00040000 0x00100000>; - }; - partition@5 { - label = "QSPI.u-boot-spl-os"; - reg = <0x00140000 0x00080000>; - }; - partition@6 { - label = "QSPI.u-boot-env"; - reg = <0x001c0000 0x00010000>; - }; - partition@7 { - label = "QSPI.u-boot-env.backup1"; - reg = <0x001d0000 0x0010000>; - }; - partition@8 { - label = "QSPI.kernel"; - reg = <0x001e0000 0x0800000>; - }; - partition@9 { - label = "QSPI.file-system"; - reg = <0x009e0000 0x01620000>; - }; - }; -}; - -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - &omap_dwc3_2 { extcon = <&extcon_usb2>; }; -&usb1 { - dr_mode = "otg"; - extcon = <&extcon_usb1>; -}; - -&usb2 { - dr_mode = "host"; -}; - &elm { status = "okay"; }; @@ -556,6 +387,7 @@ nand@0,0 { interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 pin */ + ti,nand-xfer-type = "prefetch-dma"; ti,nand-ecc-opt = "bch8"; ti,elm-id = <&elm>; nand-bus-width = <16>; @@ -666,57 +498,6 @@ &dcan1 { pinctrl-2 = <&dcan1_pins_default>; }; -&atl { - assigned-clocks = <&abe_dpll_sys_clk_mux>, - <&atl_gfclk_mux>, - <&dpll_abe_ck>, - <&dpll_abe_m2x2_ck>, - <&atl_clkin2_ck>; - assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>; - assigned-clock-rates = <0>, <0>, <180633600>, <361267200>, <5644800>; - +&pcie1_rc { status = "okay"; - - atl2 { - bws = ; - aws = ; - }; -}; - -&mcasp3 { - #sound-dai-cells = <0>; - - assigned-clocks = <&mcasp3_ahclkx_mux>; - assigned-clock-parents = <&atl_clkin2_ck>; - - status = "okay"; - - op-mode = <0>; /* MCASP_IIS_MODE */ - tdm-slots = <2>; - /* 4 serializer */ - serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ - 1 2 0 0 - >; - tx-num-evt = <32>; - rx-num-evt = <32>; -}; - -&mailbox5 { - status = "okay"; - mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { - status = "okay"; - }; - mbox_dsp1_ipc3x: mbox_dsp1_ipc3x { - status = "okay"; - }; -}; - -&mailbox6 { - status = "okay"; - mbox_ipu2_ipc3x: mbox_ipu2_ipc3x { - status = "okay"; - }; - mbox_dsp2_ipc3x: mbox_dsp2_ipc3x { - status = "okay"; - }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 0f0f6f58bd18..02a136a4661a 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -196,6 +196,7 @@ dra7_pmx_core: pinmux@1400 { scm_conf1: scm_conf@1c04 { compatible = "syscon"; reg = <0x1c04 0x0020>; + #syscon-cells = <2>; }; scm_conf_pcie: scm_conf@1c24 { @@ -287,7 +288,11 @@ axi@0 { #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; - pcie1: pcie@51000000 { + /** + * To enable PCI endpoint mode, disable the pcie1_rc + * node and enable pcie1_ep mode. + */ + pcie1_rc: pcie@51000000 { compatible = "ti,dra7-pcie"; reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@ -309,12 +314,28 @@ pcie1: pcie@51000000 { <0 0 0 2 &pcie1_intc 2>, <0 0 0 3 &pcie1_intc 3>, <0 0 0 4 &pcie1_intc 4>; + status = "disabled"; pcie1_intc: interrupt-controller { interrupt-controller; #address-cells = <0>; #interrupt-cells = <1>; }; }; + + pcie1_ep: pcie_ep@51000000 { + compatible = "ti,dra7-pcie-ep"; + reg = <0x51000000 0x28>, <0x51002000 0x14c>, <0x51001000 0x28>, <0x1000 0x10000000>; + reg-names = "ep_dbics", "ti_conf", "ep_dbics2", "addr_space"; + interrupts = <0 232 0x4>; + num-lanes = <1>; + num-ib-windows = <4>; + num-ob-windows = <16>; + ti,hwmods = "pcie1"; + phys = <&pcie1_phy>; + phy-names = "pcie-phy0"; + ti,syscon-unaligned-access = <&scm_conf1 0x14 2>; + status = "disabled"; + }; }; axi@1 { @@ -418,6 +439,14 @@ dsp1_system: dsp_system@40d00000 { reg = <0x40d00000 0x100>; }; + dra7_iodelay_core: padconf@4844a000 { + compatible = "ti,dra7-iodelay"; + reg = <0x4844a000 0x0d1c>; + #address-cells = <1>; + #size-cells = <0>; + #pinctrl-cells = <2>; + }; + sdma: dma-controller@4a056000 { compatible = "ti,omap4430-sdma"; reg = <0x4a056000 0x1000>; @@ -1037,6 +1066,7 @@ mmc1: mmc@4809c000 { dma-names = "tx", "rx"; status = "disabled"; pbias-supply = <&pbias_mmc_reg>; + max-frequency = <192000000>; }; mmc2: mmc@480b4000 { @@ -1048,6 +1078,7 @@ mmc2: mmc@480b4000 { dmas = <&sdma_xbar 47>, <&sdma_xbar 48>; dma-names = "tx", "rx"; status = "disabled"; + max-frequency = <192000000>; }; mmc3: mmc@480ad000 { @@ -1059,6 +1090,8 @@ mmc3: mmc@480ad000 { dmas = <&sdma_xbar 77>, <&sdma_xbar 78>; dma-names = "tx", "rx"; status = "disabled"; + /* Errata i887 limits max-frequency of MMC3 to 64 MHz */ + max-frequency = <64000000>; }; mmc4: mmc@480d1000 { @@ -1070,6 +1103,7 @@ mmc4: mmc@480d1000 { dmas = <&sdma_xbar 57>, <&sdma_xbar 58>; dma-names = "tx", "rx"; status = "disabled"; + max-frequency = <192000000>; }; mmu0_dsp1: mmu@40d01000 { diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts index a6298eb56978..41c9132eb550 100644 --- a/arch/arm/boot/dts/dra71-evm.dts +++ b/arch/arm/boot/dts/dra71-evm.dts @@ -7,6 +7,7 @@ */ #include "dra72-evm-common.dtsi" +#include "dra72x-mmc-iodelay.dtsi" #include / { @@ -32,6 +33,16 @@ vpo_sd_1v8_3v3: gpio-regulator-TPS74801 { 3000000 0x1>; }; + evm_1v8_sw: fixedregulator-evm_1v8 { + compatible = "regulator-fixed"; + regulator-name = "evm_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&lp8732_buck0_reg>; + regulator-always-on; + regulator-boot-on; + }; + poweroff: gpio-poweroff { compatible = "gpio-poweroff"; gpios = <&gpio7 30 GPIO_ACTIVE_HIGH>; @@ -162,7 +173,24 @@ p0 { }; &mmc1 { - vmmc_aux-supply = <&vpo_sd_1v8_3v3>; + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + vqmmc-supply = <&vpo_sd_1v8_3v3>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v", "hs200_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>; + pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev20_conf>; + vmmc-supply = <&evm_1v8_sw>; }; &mac { @@ -191,6 +219,7 @@ dp83867_0: ethernet-phy@2 { ti,tx-internal-delay = ; ti,fifo-depth = ; ti,min-output-impedance; + ti,dp83867-rxctrl-strap-quirk; }; dp83867_1: ethernet-phy@3 { @@ -199,6 +228,7 @@ dp83867_1: ethernet-phy@3 { ti,tx-internal-delay = ; ti,fifo-depth = ; ti,min-output-impedance; + ti,dp83867-rxctrl-strap-quirk; }; }; diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi index 85780549bc26..2e485a13dfd7 100644 --- a/arch/arm/boot/dts/dra72-evm-common.dtsi +++ b/arch/arm/boot/dts/dra72-evm-common.dtsi @@ -311,6 +311,7 @@ nand@0,0 { interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 pin */ + ti,nand-xfer-type = "prefetch-dma"; ti,nand-ecc-opt = "bch8"; ti,elm-id = <&elm>; nand-bus-width = <16>; @@ -419,8 +420,6 @@ &mmc2 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins_default>; - - vmmc-supply = <&evm_3v3_sw>; bus-width = <8>; ti,non-removable; max-frequency = <192000000>; @@ -564,3 +563,7 @@ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x { status = "okay"; }; }; + +&pcie1_rc { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts index 3ecac56bf504..bf588d00728d 100644 --- a/arch/arm/boot/dts/dra72-evm-revc.dts +++ b/arch/arm/boot/dts/dra72-evm-revc.dts @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ #include "dra72-evm-common.dtsi" +#include "dra72x-mmc-iodelay.dtsi" #include / { @@ -15,6 +16,16 @@ memory@0 { device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */ }; + + evm_1v8_sw: fixedregulator-evm_1v8 { + compatible = "regulator-fixed"; + regulator-name = "evm_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&smps4_reg>; + regulator-always-on; + regulator-boot-on; + }; }; &i2c1 { @@ -70,6 +81,7 @@ dp83867_0: ethernet-phy@2 { ti,min-output-impedance; interrupt-parent = <&gpio6>; interrupts = <16 IRQ_TYPE_EDGE_FALLING>; + ti,dp83867-rxctrl-strap-quirk; }; dp83867_1: ethernet-phy@3 { @@ -80,5 +92,27 @@ dp83867_1: ethernet-phy@3 { ti,min-output-impedance; interrupt-parent = <&gpio6>; interrupts = <16 IRQ_TYPE_EDGE_FALLING>; + ti,dp83867-rxctrl-strap-quirk; }; }; + +&mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + vqmmc-supply = <&ldo1_reg>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v", "hs200_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>; + pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev20_conf>; + vmmc-supply = <&evm_1v8_sw>; +}; diff --git a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi index e6df676886c0..57bfe5caf5e4 100644 --- a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi +++ b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi @@ -146,5 +146,5 @@ &dss { }; &mmc1 { - vmmc_aux-supply = <&ldo1_reg>; + vqmmc-supply = <&ldo1_reg>; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index cd9c4ff12654..c572693b1665 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ #include "dra72-evm-common.dtsi" +#include "dra72x-mmc-iodelay.dtsi" / { model = "TI DRA722"; @@ -13,6 +14,16 @@ memory@0 { device_type = "memory"; reg = <0x0 0x80000000 0x0 0x40000000>; /* 1024 MB */ }; + + evm_1v8_sw: fixedregulator-evm_1v8 { + compatible = "regulator-fixed"; + regulator-name = "evm_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&smps4_reg>; + regulator-always-on; + regulator-boot-on; + }; }; &i2c1 { @@ -43,3 +54,24 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <3>; phy-mode = "rgmii"; }; + +&mmc1 { + pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; + pinctrl-2 = <&mmc1_pins_sdr12>; + pinctrl-3 = <&mmc1_pins_sdr25>; + pinctrl-4 = <&mmc1_pins_sdr50>; + pinctrl-5 = <&mmc1_pins_ddr50_rev10>; + pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev10_conf>; + vqmmc-supply = <&ldo1_reg>; +}; + +&mmc2 { + pinctrl-names = "default", "hs", "ddr_1_8v", "hs200_1_8v"; + pinctrl-0 = <&mmc2_pins_default>; + pinctrl-1 = <&mmc2_pins_hs>; + pinctrl-2 = <&mmc2_pins_ddr_rev10>; + pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev10_conf>; + vmmc-supply = <&evm_1v8_sw>; +}; diff --git a/arch/arm/boot/dts/dra72x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra72x-mmc-iodelay.dtsi new file mode 100644 index 000000000000..088013c6dc6e --- /dev/null +++ b/arch/arm/boot/dts/dra72x-mmc-iodelay.dtsi @@ -0,0 +1,350 @@ +/* + * MMC IOdelay values for TI's DRA72x, DRA71x and AM571x SoCs. + * + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Rules for modifying this file: + * a) Update of this file should typically correspond to a datamanual revision. + * Datamanual revision that was used should be updated in comment below. + * If there is no update to datamanual, do not update the values. If you + * need to use values different from that recommended by the datamanual + * for your design, then you should consider adding values to the device- + * -tree file for your board directly. + * b) We keep the mode names as close to the datamanual as possible. So + * if the manual calls a mode, DDR50, or DDR or DDR 1.8v or DDR 3.3v, + * we follow that in code too. + * c) If the values change between multiple revisions of silicon, we add + * a revision tag to both the new and old entry. Use 'rev10' for PG 1.0, + * 'rev20' for PG 2.0 and so on. + * d) The node name and node label should be the exact same string. This is + * to curb naming creativity and achieve consistency. + * e) If in future, DRA71x and DRA72x values differ, then add 'dra71_' and + * 'dra72_' tag to entries. Both the new and old entries should gain a tag. + * + * Datamanual Revisions: + * + * AM571x Silicon Revision 2.0: SPRS957D, Revised January 2017 + * AM571x Silicon Revision 1.0: SPRS919M, Revised November 2017 + * DRA71x : SPRS960B, Revised February 2017 + */ + +&dra7_pmx_core { + mmc1_pins_default: mmc1_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr12: mmc1_pins_sdr12 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_hs: mmc1_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr25: mmc1_pins_sdr25 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr50: mmc1_pins_sdr50 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE15 | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_ddr50_rev10: mmc1_pins_ddr50_rev10 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_clk.mmc1_clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_cmd.mmc1_cmd */ + DRA7XX_CORE_IOPAD(0x375C, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_dat0.mmc1_dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_dat1.mmc1_dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_dat2.mmc1_dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE14 | MUX_MODE0) /* mmc1_dat3.mmc1_dat3 */ + >; + }; + + mmc1_pins_ddr50_rev20: mmc1_pins_ddr50_rev20 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr104: mmc1_pins_sdr104 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc2_pins_default: mmc2_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_hs: mmc2_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_ddr_rev10: mmc2_pins_ddr_rev10 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + >; + }; + + mmc2_pins_ddr_rev20: mmc2_pins_ddr_rev20 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_hs200: mmc2_pins_hs200 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; +}; + +&dra7_iodelay_core { + + /* Corresponds to MMC1_MANUAL1 in datamanual */ + mmc1_iodelay_ddr50_conf: mmc1_iodelay_ddr50_conf { + pinctrl-pin-array = < + 0x618 A_DELAY_PS(588) G_DELAY_PS(0) /* CFG_MMC1_CLK_IN */ + 0x624 A_DELAY_PS(1000) G_DELAY_PS(0) /* CFG_MMC1_CMD_IN */ + 0x630 A_DELAY_PS(1375) G_DELAY_PS(0) /* CFG_MMC1_DAT0_IN */ + 0x63C A_DELAY_PS(1000) G_DELAY_PS(0) /* CFG_MMC1_DAT1_IN */ + 0x648 A_DELAY_PS(1000) G_DELAY_PS(0) /* CFG_MMC1_DAT2_IN */ + 0x654 A_DELAY_PS(1000) G_DELAY_PS(0) /* CFG_MMC1_DAT3_IN */ + 0x620 A_DELAY_PS(1230) G_DELAY_PS(0) /* CFG_MMC1_CLK_OUT */ + 0x62C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x638 A_DELAY_PS(56) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x644 A_DELAY_PS(76) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x650 A_DELAY_PS(91) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x65C A_DELAY_PS(99) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x64C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + >; + }; + + /* Corresponds to MMC1_MANUAL2 in datamanual */ + mmc1_iodelay_sdr104_rev10_conf: mmc1_iodelay_sdr104_rev10_conf { + pinctrl-pin-array = < + 0x620 A_DELAY_PS(560) G_DELAY_PS(365) /* CFG_MMC1_CLK_OUT */ + 0x62c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x638 A_DELAY_PS(29) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x650 A_DELAY_PS(47) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x65c A_DELAY_PS(30) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + 0x628 A_DELAY_PS(125) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x634 A_DELAY_PS(43) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x640 A_DELAY_PS(433) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x64c A_DELAY_PS(287) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x658 A_DELAY_PS(351) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + >; + }; + + /* Corresponds to MMC1_MANUAL2 in datamanual */ + mmc1_iodelay_sdr104_rev20_conf: mmc1_iodelay_sdr104_rev20_conf { + pinctrl-pin-array = < + 0x620 A_DELAY_PS(520) G_DELAY_PS(320) /* CFG_MMC1_CLK_OUT */ + 0x62c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x638 A_DELAY_PS(40) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x644 A_DELAY_PS(83) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x650 A_DELAY_PS(98) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x65c A_DELAY_PS(106) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + 0x628 A_DELAY_PS(51) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x640 A_DELAY_PS(363) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x64c A_DELAY_PS(199) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x658 A_DELAY_PS(273) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + >; + }; + + /* Corresponds to MMC2_MANUAL1 in datamanual */ + mmc2_iodelay_ddr_conf: mmc2_iodelay_ddr_conf { + pinctrl-pin-array = < + 0x18c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_IN */ + 0x1a4 A_DELAY_PS(119) G_DELAY_PS(0) /* CFG_GPMC_A20_IN */ + 0x1b0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_IN */ + 0x1bc A_DELAY_PS(18) G_DELAY_PS(0) /* CFG_GPMC_A22_IN */ + 0x1c8 A_DELAY_PS(894) G_DELAY_PS(0) /* CFG_GPMC_A23_IN */ + 0x1d4 A_DELAY_PS(30) G_DELAY_PS(0) /* CFG_GPMC_A24_IN */ + 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */ + 0x1ec A_DELAY_PS(23) G_DELAY_PS(0) /* CFG_GPMC_A26_IN */ + 0x1f8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_IN */ + 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */ + 0x194 A_DELAY_PS(152) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1ac A_DELAY_PS(206) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b8 A_DELAY_PS(78) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c4 A_DELAY_PS(2) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(266) G_DELAY_PS(0) /* CFG_GPMC_A23_OUT */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f4 A_DELAY_PS(43) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x368 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x1a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1b4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1c0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1d8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1f0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x364 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + >; + }; + + /* Corresponds to MMC2_MANUAL3 in datamanual */ + mmc2_iodelay_hs200_rev10_conf: mmc2_iodelay_hs200_rev10_conf { + pinctrl-pin-array = < + 0x194 A_DELAY_PS(150) G_DELAY_PS(95) /* CFG_GPMC_A19_OUT */ + 0x1ac A_DELAY_PS(250) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b8 A_DELAY_PS(125) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c4 A_DELAY_PS(100) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(870) G_DELAY_PS(415) /* CFG_GPMC_A23_OUT */ + 0x1dc A_DELAY_PS(30) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e8 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x368 A_DELAY_PS(240) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(695) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x1a8 A_DELAY_PS(924) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1b4 A_DELAY_PS(719) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1c0 A_DELAY_PS(824) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1d8 A_DELAY_PS(877) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1e4 A_DELAY_PS(446) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1f0 A_DELAY_PS(847) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1fc A_DELAY_PS(586) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x364 A_DELAY_PS(1039) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + >; + }; + + /* Corresponds to MMC2_MANUAL3 in datamanual */ + mmc2_iodelay_hs200_rev20_conf: mmc2_iodelay_hs200_rev20_conf { + pinctrl-pin-array = < + 0x194 A_DELAY_PS(285) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1ac A_DELAY_PS(189) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b8 A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A21_OUT */ + 0x1c4 A_DELAY_PS(0) G_DELAY_PS(70) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(730) G_DELAY_PS(360) /* CFG_GPMC_A23_OUT */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f4 A_DELAY_PS(70) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x368 A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x1a8 A_DELAY_PS(231) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1b4 A_DELAY_PS(39) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1c0 A_DELAY_PS(91) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1d8 A_DELAY_PS(176) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1f0 A_DELAY_PS(101) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x364 A_DELAY_PS(360) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + >; + }; +}; diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi new file mode 100644 index 000000000000..28ebb4eb884a --- /dev/null +++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi @@ -0,0 +1,647 @@ +/* + * MMC IOdelay values for TI's DRA74x, DRA75x and AM572x SoCs. + * + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Rules for modifying this file: + * a) Update of this file should typically correspond to a datamanual revision. + * Datamanual revision that was used should be updated in comment below. + * If there is no update to datamanual, do not update the values. If you + * need to use values different from that recommended by the datamanual + * for your design, then you should consider adding values to the device- + * -tree file for your board directly. + * b) We keep the mode names as close to the datamanual as possible. So + * if the manual calls a mode, DDR50, or DDR or DDR 1.8v or DDR 3.3v, + * we follow that in code too. + * c) If the values change between multiple revisions of silicon, we add + * a revision tag to both the new and old entry. Use 'rev11' for PG 1.1, + * 'rev20' for PG 2.0 and so on. + * d) The node name and node label should be the exact same string. This is + * to curb naming creativity and achieve consistency. + * + * Datamanual Revisions: + * + * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 + * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 + * + */ + +&dra7_pmx_core { + mmc1_pins_default: mmc1_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr12: mmc1_pins_sdr12 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_hs: mmc1_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr25: mmc1_pins_sdr25 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr50: mmc1_pins_sdr50 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_ddr50: mmc1_pins_ddr50 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr104: mmc1_pins_sdr104 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc2_pins_default: mmc2_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_hs: mmc2_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_ddr_3_3v_rev11: mmc2_pins_ddr_3_3v_rev11 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_ddr_1_8v_rev11: mmc2_pins_ddr_1_8v_rev11 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_ddr_rev20: mmc2_pins_ddr_rev20 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc2_pins_hs200: mmc2_pins_hs200 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; + + mmc4_pins_default: mmc4_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ + DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ + DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ + DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ + DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ + DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ + >; + }; + + mmc4_pins_hs: mmc4_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ + DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ + DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ + DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ + DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ + DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ + >; + }; + + mmc3_pins_default: mmc3_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_hs: mmc3_pins_hs { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_sdr12: mmc3_pins_sdr12 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_sdr25: mmc3_pins_sdr25 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_sdr50: mmc3_pins_sdr50 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc4_pins_sdr12: mmc4_pins_sdr12 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ + DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ + DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ + DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ + DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ + DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ + >; + }; + + mmc4_pins_sdr25: mmc4_pins_sdr25 { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */ + DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */ + DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */ + DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */ + DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */ + DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */ + >; + }; +}; + +&dra7_iodelay_core { + + /* Corresponds to MMC1_DDR_MANUAL1 in datamanual */ + mmc1_iodelay_ddr_rev11_conf: mmc1_iodelay_ddr_rev11_conf { + pinctrl-pin-array = < + 0x618 A_DELAY_PS(572) G_DELAY_PS(540) /* CFG_MMC1_CLK_IN */ + 0x620 A_DELAY_PS(1525) G_DELAY_PS(0) /* CFG_MMC1_CLK_OUT */ + 0x624 A_DELAY_PS(0) G_DELAY_PS(600) /* CFG_MMC1_CMD_IN */ + 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x62c A_DELAY_PS(55) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x630 A_DELAY_PS(403) G_DELAY_PS(120) /* CFG_MMC1_DAT0_IN */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x638 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x63c A_DELAY_PS(23) G_DELAY_PS(60) /* CFG_MMC1_DAT1_IN */ + 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x648 A_DELAY_PS(25) G_DELAY_PS(60) /* CFG_MMC1_DAT2_IN */ + 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x654 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_IN */ + 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC1_DDR_MANUAL1 in datamanual */ + mmc1_iodelay_ddr_rev20_conf: mmc1_iodelay_ddr50_rev20_conf { + pinctrl-pin-array = < + 0x618 A_DELAY_PS(1076) G_DELAY_PS(330) /* CFG_MMC1_CLK_IN */ + 0x620 A_DELAY_PS(1271) G_DELAY_PS(0) /* CFG_MMC1_CLK_OUT */ + 0x624 A_DELAY_PS(722) G_DELAY_PS(0) /* CFG_MMC1_CMD_IN */ + 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x62C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x630 A_DELAY_PS(751) G_DELAY_PS(0) /* CFG_MMC1_DAT0_IN */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x638 A_DELAY_PS(20) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x63C A_DELAY_PS(256) G_DELAY_PS(0) /* CFG_MMC1_DAT1_IN */ + 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x648 A_DELAY_PS(263) G_DELAY_PS(0) /* CFG_MMC1_DAT2_IN */ + 0x64C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x654 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_IN */ + 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + 0x65C A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC1_SDR104_MANUAL1 in datamanual */ + mmc1_iodelay_sdr104_rev11_conf: mmc1_iodelay_sdr104_rev11_conf { + pinctrl-pin-array = < + 0x620 A_DELAY_PS(1063) G_DELAY_PS(17) /* CFG_MMC1_CLK_OUT */ + 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x62c A_DELAY_PS(23) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x638 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x644 A_DELAY_PS(2) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC1_SDR104_MANUAL1 in datamanual */ + mmc1_iodelay_sdr104_rev20_conf: mmc1_iodelay_sdr104_rev20_conf { + pinctrl-pin-array = < + 0x620 A_DELAY_PS(600) G_DELAY_PS(400) /* CFG_MMC1_CLK_OUT */ + 0x628 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OEN */ + 0x62c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_CMD_OUT */ + 0x634 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OEN */ + 0x638 A_DELAY_PS(30) G_DELAY_PS(0) /* CFG_MMC1_DAT0_OUT */ + 0x640 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OEN */ + 0x644 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT1_OUT */ + 0x64c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OEN */ + 0x650 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT2_OUT */ + 0x658 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OEN */ + 0x65c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC1_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ + mmc2_iodelay_hs200_rev11_conf: mmc2_iodelay_hs200_rev11_conf { + pinctrl-pin-array = < + 0x190 A_DELAY_PS(621) G_DELAY_PS(600) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(300) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1a8 A_DELAY_PS(739) G_DELAY_PS(600) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(240) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b4 A_DELAY_PS(812) G_DELAY_PS(600) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(240) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c0 A_DELAY_PS(954) G_DELAY_PS(600) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(60) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(1340) G_DELAY_PS(420) /* CFG_GPMC_A23_OUT */ + 0x1d8 A_DELAY_PS(935) G_DELAY_PS(600) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e4 A_DELAY_PS(525) G_DELAY_PS(600) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f0 A_DELAY_PS(767) G_DELAY_PS(600) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(225) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1fc A_DELAY_PS(565) G_DELAY_PS(600) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(60) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x364 A_DELAY_PS(969) G_DELAY_PS(600) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(180) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + >; + }; + + /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ + mmc2_iodelay_hs200_rev20_conf: mmc2_iodelay_hs200_rev20_conf { + pinctrl-pin-array = < + 0x190 A_DELAY_PS(274) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(162) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1a8 A_DELAY_PS(401) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(73) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b4 A_DELAY_PS(465) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(115) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c0 A_DELAY_PS(633) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(47) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(935) G_DELAY_PS(280) /* CFG_GPMC_A23_OUT */ + 0x1d8 A_DELAY_PS(621) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e4 A_DELAY_PS(183) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f0 A_DELAY_PS(467) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1fc A_DELAY_PS(262) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(46) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x364 A_DELAY_PS(684) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(76) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + >; + }; + + /* Correspnds to MMC2_DDR_3V3_MANUAL1 in datamanual */ + mmc2_iodelay_ddr_3_3v_rev11_conf: mmc2_iodelay_ddr_3_3v_rev11_conf { + pinctrl-pin-array = < + 0x18c A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A19_IN */ + 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(174) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1a4 A_DELAY_PS(265) G_DELAY_PS(360) /* CFG_GPMC_A20_IN */ + 0x1a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(168) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b0 A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A21_IN */ + 0x1b4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(136) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1bc A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A22_IN */ + 0x1c0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1c8 A_DELAY_PS(287) G_DELAY_PS(420) /* CFG_GPMC_A23_IN */ + 0x1d0 A_DELAY_PS(879) G_DELAY_PS(0) /* CFG_GPMC_A23_OUT */ + 0x1d4 A_DELAY_PS(144) G_DELAY_PS(240) /* CFG_GPMC_A24_IN */ + 0x1d8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */ + 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(34) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1ec A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A26_IN */ + 0x1f0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1f8 A_DELAY_PS(120) G_DELAY_PS(180) /* CFG_GPMC_A27_IN */ + 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */ + 0x364 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(11) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + >; + }; + + /* Corresponds to MMC2_DDR_1V8_MANUAL1 in datamanual */ + mmc2_iodelay_ddr_1_8v_rev11_conf: mmc2_iodelay_ddr_1_8v_rev11_conf { + pinctrl-pin-array = < + 0x18c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_IN */ + 0x190 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(174) G_DELAY_PS(0) /* CFG_GPMC_A19_OUT */ + 0x1a4 A_DELAY_PS(274) G_DELAY_PS(240) /* CFG_GPMC_A20_IN */ + 0x1a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(168) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b0 A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A21_IN */ + 0x1b4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(136) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1bc A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A22_IN */ + 0x1c0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1c8 A_DELAY_PS(514) G_DELAY_PS(360) /* CFG_GPMC_A23_IN */ + 0x1d0 A_DELAY_PS(879) G_DELAY_PS(0) /* CFG_GPMC_A23_OUT */ + 0x1d4 A_DELAY_PS(187) G_DELAY_PS(120) /* CFG_GPMC_A24_IN */ + 0x1d8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */ + 0x1e4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(34) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1ec A_DELAY_PS(0) G_DELAY_PS(60) /* CFG_GPMC_A26_IN */ + 0x1f0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1f8 A_DELAY_PS(121) G_DELAY_PS(60) /* CFG_GPMC_A27_IN */ + 0x1fc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */ + 0x364 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(11) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + >; + }; + + /* Corresponds to MMC3_MANUAL1 in datamanual */ + mmc3_iodelay_manual1_rev20_conf: mmc3_iodelay_manual1_conf { + pinctrl-pin-array = < + 0x678 A_DELAY_PS(0) G_DELAY_PS(386) /* CFG_MMC3_CLK_IN */ + 0x680 A_DELAY_PS(605) G_DELAY_PS(0) /* CFG_MMC3_CLK_OUT */ + 0x684 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_IN */ + 0x688 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OEN */ + 0x68c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OUT */ + 0x690 A_DELAY_PS(171) G_DELAY_PS(0) /* CFG_MMC3_DAT0_IN */ + 0x694 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OEN */ + 0x698 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OUT */ + 0x69c A_DELAY_PS(221) G_DELAY_PS(0) /* CFG_MMC3_DAT1_IN */ + 0x6a0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OEN */ + 0x6a4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OUT */ + 0x6a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_IN */ + 0x6ac A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OEN */ + 0x6b0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OUT */ + 0x6b4 A_DELAY_PS(474) G_DELAY_PS(0) /* CFG_MMC3_DAT3_IN */ + 0x6b8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OEN */ + 0x6bc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC3_MANUAL1 in datamanual */ + mmc3_iodelay_manual1_rev11_conf: mmc3_iodelay_manual1_conf { + pinctrl-pin-array = < + 0x678 A_DELAY_PS(406) G_DELAY_PS(0) /* CFG_MMC3_CLK_IN */ + 0x680 A_DELAY_PS(659) G_DELAY_PS(0) /* CFG_MMC3_CLK_OUT */ + 0x684 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_IN */ + 0x688 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OEN */ + 0x68c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_CMD_OUT */ + 0x690 A_DELAY_PS(130) G_DELAY_PS(0) /* CFG_MMC3_DAT0_IN */ + 0x694 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OEN */ + 0x698 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT0_OUT */ + 0x69c A_DELAY_PS(169) G_DELAY_PS(0) /* CFG_MMC3_DAT1_IN */ + 0x6a0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OEN */ + 0x6a4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT1_OUT */ + 0x6a8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_IN */ + 0x6ac A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OEN */ + 0x6b0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT2_OUT */ + 0x6b4 A_DELAY_PS(457) G_DELAY_PS(0) /* CFG_MMC3_DAT3_IN */ + 0x6b8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OEN */ + 0x6bc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_MMC3_DAT3_OUT */ + >; + }; + + /* Corresponds to MMC4_DS_MANUAL1 in datamanual */ + mmc4_iodelay_ds_rev11_conf: mmc4_iodelay_ds_rev11_conf { + pinctrl-pin-array = < + 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ + 0x848 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ + 0x84c A_DELAY_PS(96) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ + 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ + 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ + 0x870 A_DELAY_PS(582) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ + 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ + 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ + 0x87c A_DELAY_PS(391) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ + 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ + 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ + 0x888 A_DELAY_PS(561) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ + 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ + 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ + 0x894 A_DELAY_PS(588) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ + 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ + 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ + >; + }; + + /* Corresponds to MMC4_DS_MANUAL1 in datamanual */ + mmc4_iodelay_ds_rev20_conf: mmc4_iodelay_ds_rev20_conf { + pinctrl-pin-array = < + 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ + 0x848 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ + 0x84c A_DELAY_PS(307) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ + 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ + 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ + 0x870 A_DELAY_PS(785) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ + 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ + 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ + 0x87c A_DELAY_PS(613) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ + 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ + 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ + 0x888 A_DELAY_PS(683) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ + 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ + 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ + 0x894 A_DELAY_PS(835) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ + 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ + 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ + >; + }; + + /* Corresponds to MMC4_MANUAL1 in datamanual */ + mmc4_iodelay_sdr12_hs_sdr25_rev11_conf: mmc4_iodelay_sdr12_hs_sdr25_rev11_conf { + pinctrl-pin-array = < + 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ + 0x848 A_DELAY_PS(2651) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ + 0x84c A_DELAY_PS(1572) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ + 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ + 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ + 0x870 A_DELAY_PS(1913) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ + 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ + 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ + 0x87c A_DELAY_PS(1721) G_DELAY_PS(0) /* CFG_UART2_RTSN_IN */ + 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ + 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ + 0x888 A_DELAY_PS(1891) G_DELAY_PS(0) /* CFG_UART2_RXD_IN */ + 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ + 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ + 0x894 A_DELAY_PS(1919) G_DELAY_PS(0) /* CFG_UART2_TXD_IN */ + 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ + 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ + >; + }; + + /* Corresponds to MMC4_MANUAL1 in datamanual */ + mmc4_iodelay_sdr12_hs_sdr25_rev20_conf: mmc4_iodelay_sdr12_hs_sdr25_rev20_conf { + pinctrl-pin-array = < + 0x840 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_CTSN_IN */ + 0x848 A_DELAY_PS(1147) G_DELAY_PS(0) /* CFG_UART1_CTSN_OUT */ + 0x84c A_DELAY_PS(1834) G_DELAY_PS(0) /* CFG_UART1_RTSN_IN */ + 0x850 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OEN */ + 0x854 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART1_RTSN_OUT */ + 0x870 A_DELAY_PS(2165) G_DELAY_PS(0) /* CFG_UART2_CTSN_IN */ + 0x874 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OEN */ + 0x878 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_CTSN_OUT */ + 0x87c A_DELAY_PS(1929) G_DELAY_PS(64) /* CFG_UART2_RTSN_IN */ + 0x880 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OEN */ + 0x884 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RTSN_OUT */ + 0x888 A_DELAY_PS(1935) G_DELAY_PS(128) /* CFG_UART2_RXD_IN */ + 0x88c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OEN */ + 0x890 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_RXD_OUT */ + 0x894 A_DELAY_PS(2172) G_DELAY_PS(44) /* CFG_UART2_TXD_IN */ + 0x898 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OEN */ + 0x89c A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_UART2_TXD_OUT */ + >; + }; +}; diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts new file mode 100644 index 000000000000..b024a65c6e27 --- /dev/null +++ b/arch/arm/boot/dts/dra76-evm.dts @@ -0,0 +1,423 @@ +/* + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/dts-v1/; + +#include "dra76x.dtsi" +#include "dra7-evm-common.dtsi" +#include + +/ { + model = "TI DRA762 EVM"; + compatible = "ti,dra76-evm", "ti,dra762", "ti,dra7"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + }; + + vsys_12v0: fixedregulator-vsys12v0 { + /* main supply */ + compatible = "regulator-fixed"; + regulator-name = "vsys_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_5v0: fixedregulator-vsys5v0 { + /* Output of Cntlr B of TPS43351-Q1 on dra76-evm */ + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vsys_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_3v3: fixedregulator-vsys3v3 { + /* Output of Cntlr A of TPS43351-Q1 on dra76-evm */ + compatible = "regulator-fixed"; + regulator-name = "vsys_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vsys_12v0>; + regulator-always-on; + regulator-boot-on; + }; + + vio_3v3: fixedregulator-vio_3v3 { + compatible = "regulator-fixed"; + regulator-name = "vio_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vsys_3v3>; + regulator-always-on; + regulator-boot-on; + }; + + vio_3v3_sd: fixedregulator-sd { + compatible = "regulator-fixed"; + regulator-name = "vio_3v3_sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vio_3v3>; + enable-active-high; + gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; + }; + + vio_1v8: fixedregulator-vio_1v8 { + compatible = "regulator-fixed"; + regulator-name = "vio_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&smps5_reg>; + }; + + vtt_fixed: fixedregulator-vtt { + compatible = "regulator-fixed"; + regulator-name = "vtt_fixed"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + vin-supply = <&vsys_3v3>; + regulator-always-on; + regulator-boot-on; + }; + + aic_dvdd: fixedregulator-aic_dvdd { + /* TPS77018DBVT */ + compatible = "regulator-fixed"; + regulator-name = "aic_dvdd"; + vin-supply = <&vio_3v3>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; +}; + +&dra7_pmx_core { + mmc1_pins_default: mmc1_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14) /* mmc1sdcd.gpio219 */ + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc1_pins_sdr12: pinmux_mmc1_sdr12_pins { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */ + DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */ + DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */ + DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */ + DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */ + DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */ + >; + }; + + mmc2_pins_default: mmc2_pins_default { + pinctrl-single,pins = < + DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */ + DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */ + DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */ + DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */ + DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */ + DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */ + DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */ + DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */ + DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */ + DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */ + >; + }; +}; + +&i2c1 { + status = "okay"; + clock-frequency = <400000>; + + tps65917: tps65917@58 { + compatible = "ti,tps65917"; + reg = <0x58>; + ti,system-power-controller; + interrupt-controller; + #interrupt-cells = <2>; + + tps65917_pmic { + compatible = "ti,tps65917-pmic"; + + smps12-in-supply = <&vsys_3v3>; + smps3-in-supply = <&vsys_3v3>; + smps4-in-supply = <&vsys_3v3>; + smps5-in-supply = <&vsys_3v3>; + ldo1-in-supply = <&vsys_3v3>; + ldo2-in-supply = <&vsys_3v3>; + ldo3-in-supply = <&vsys_5v0>; + ldo4-in-supply = <&vsys_5v0>; + ldo5-in-supply = <&vsys_3v3>; + + tps65917_regulators: regulators { + smps12_reg: smps12 { + /* VDD_DSPEVE */ + regulator-name = "smps12"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1250000>; + regulator-always-on; + regulator-boot-on; + }; + + smps3_reg: smps3 { + /* VDD_CORE */ + regulator-name = "smps3"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1250000>; + regulator-boot-on; + regulator-always-on; + }; + + smps4_reg: smps4 { + /* VDD_IVA */ + regulator-name = "smps4"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1250000>; + regulator-always-on; + regulator-boot-on; + }; + + smps5_reg: smps5 { + /* VDDS1V8 */ + regulator-name = "smps5"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo1_reg: ldo1 { + /* LDO1_OUT --> VDA_PHY1_1V8 */ + regulator-name = "ldo1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-allow-bypass; + }; + + ldo2_reg: ldo2 { + /* LDO2_OUT --> VDA_PHY2_1V8 */ + regulator-name = "ldo2"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-allow-bypass; + regulator-always-on; + }; + + ldo3_reg: ldo3 { + /* VDA_USB_3V3 */ + regulator-name = "ldo3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo5_reg: ldo5 { + /* VDDA_1V8_PLL */ + regulator-name = "ldo5"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + ldo4_reg: ldo4 { + /* VDD_SDIO_DV */ + regulator-name = "ldo4"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; + + tps65917_power_button { + compatible = "ti,palmas-pwrbutton"; + interrupt-parent = <&tps65917>; + interrupts = <1 IRQ_TYPE_NONE>; + wakeup-source; + ti,palmas-long-press-seconds = <6>; + }; + }; + + lp87565: lp87565@60 { + compatible = "ti,lp87565-q1"; + reg = <0x60>; + + buck10-in-supply =<&vsys_3v3>; + buck23-in-supply =<&vsys_3v3>; + + regulators: regulators { + buck10_reg: buck10 { + /*VDD_MPU*/ + regulator-name = "buck10"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1250000>; + regulator-always-on; + regulator-boot-on; + }; + + buck23_reg: buck23 { + /* VDD_GPU*/ + regulator-name = "buck23"; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1250000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; + + pcf_lcd: pcf8757@20 { + compatible = "ti,pcf8575", "nxp,pcf8575"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&gpio1>; + interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + }; + + pcf_gpio_21: pcf8757@21 { + compatible = "ti,pcf8575", "nxp,pcf8575"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&gpio1>; + interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + pcf_hdmi: pcf8575@26 { + compatible = "ti,pcf8575", "nxp,pcf8575"; + reg = <0x26>; + gpio-controller; + #gpio-cells = <2>; + p1 { + /* vin6_sel_s0: high: VIN6, low: audio */ + gpio-hog; + gpios = <1 GPIO_ACTIVE_HIGH>; + output-low; + line-name = "vin6_sel_s0"; + }; + }; + + tlv320aic3106: tlv320aic3106@19 { + #sound-dai-cells = <0>; + compatible = "ti,tlv320aic3106"; + reg = <0x19>; + adc-settle-ms = <40>; + ai3x-micbias-vg = <1>; /* 2.0V */ + status = "okay"; + + /* Regulators */ + AVDD-supply = <&vio_3v3>; + IOVDD-supply = <&vio_3v3>; + DRVDD-supply = <&vio_3v3>; + DVDD-supply = <&aic_dvdd>; + }; +}; + +&cpu0 { + vdd-supply = <&buck10_reg>; +}; + +&mmc1 { + status = "okay"; + vmmc-supply = <&vio_3v3_sd>; + vmmc_aux-supply = <&ldo4_reg>; + bus-width = <4>; + /* + * SDCD signal is not being used here - using the fact that GPIO mode + * is always hardwired. + */ + cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&mmc1_pins_default>; +}; + +&mmc2 { + status = "okay"; + vmmc-supply = <&vio_1v8>; + bus-width = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_pins_default>; +}; + +/* No RTC on this device */ +&rtc { + status = "disabled"; +}; + +&mac { + status = "okay"; + + dual_emac; +}; + +&cpsw_emac0 { + phy_id = <&davinci_mdio>, <2>; + phy-mode = "rgmii-id"; + dual_emac_res_vlan = <1>; +}; + +&cpsw_emac1 { + phy_id = <&davinci_mdio>, <3>; + phy-mode = "rgmii-id"; + dual_emac_res_vlan = <2>; +}; + +&davinci_mdio { + dp83867_0: ethernet-phy@2 { + reg = <2>; + ti,rx-internal-delay = ; + ti,tx-internal-delay = ; + ti,fifo-depth = ; + ti,min-output-impedance; + ti,dp83867-rxctrl-strap-quirk; + }; + + dp83867_1: ethernet-phy@3 { + reg = <3>; + ti,rx-internal-delay = ; + ti,tx-internal-delay = ; + ti,fifo-depth = ; + ti,min-output-impedance; + ti,dp83867-rxctrl-strap-quirk; + }; +}; + +&usb2_phy1 { + phy-supply = <&ldo3_reg>; +}; + +&usb2_phy2 { + phy-supply = <&ldo3_reg>; +}; + +&qspi { + spi-max-frequency = <96000000>; + m25p80@0 { + spi-max-frequency = <96000000>; + }; +}; diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi new file mode 100644 index 000000000000..1c88c581ff18 --- /dev/null +++ b/arch/arm/boot/dts/dra76x.dtsi @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "dra74x.dtsi" + +/ { + compatible = "ti,dra762", "ti,dra7"; + +}; + +/* MCAN interrupts are hard-wired to irqs 67, 68 */ +&crossbar_mpu { + ti,irqs-skip = <10 67 68 133 139 140>; +}; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index cf229dfabf61..e62b62875cba 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -1817,6 +1817,8 @@ mcasp3_ahclkx_mux: mcasp3_ahclkx_mux@1868 { clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1868>; + assigned-clocks = <&mcasp3_ahclkx_mux>; + assigned-clock-parents = <&abe_24m_fclk>; }; mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { diff --git a/arch/arm/boot/dts/exynos3250-artik5-eval.dts b/arch/arm/boot/dts/exynos3250-artik5-eval.dts index 4bd2ee87124e..4cbfa09c6c4e 100644 --- a/arch/arm/boot/dts/exynos3250-artik5-eval.dts +++ b/arch/arm/boot/dts/exynos3250-artik5-eval.dts @@ -22,7 +22,6 @@ / { }; &mshc_2 { - num-slots = <1>; cap-sd-highspeed; disable-wp; vqmmc-supply = <&ldo3_reg>; diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi index 59c89d7662a8..639c2e605f3c 100644 --- a/arch/arm/boot/dts/exynos3250-artik5.dtsi +++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi @@ -304,7 +304,6 @@ buck5_reg: BUCK5 { }; &mshc_0 { - num-slots = <1>; non-removable; cap-mmc-highspeed; card-detect-delay = <200>; diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts index accee81da266..bbdfcbc6e7d2 100644 --- a/arch/arm/boot/dts/exynos3250-monk.dts +++ b/arch/arm/boot/dts/exynos3250-monk.dts @@ -426,7 +426,6 @@ &i2s2 { &mshc_0 { #address-cells = <1>; #size-cells = <0>; - num-slots = <1>; broken-cd; non-removable; cap-mmc-highspeed; diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index 443e0c98dc73..0b45467d77a8 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -220,21 +220,6 @@ &dsi_0 { samsung,pll-clock-frequency = <24000000>; status = "okay"; - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - - dsi_out: endpoint { - remote-endpoint = <&dsi_in>; - samsung,burst-clock-frequency = <250000000>; - samsung,esc-clock-frequency = <20000000>; - }; - }; - }; - panel@0 { compatible = "samsung,s6e63j0x03"; reg = <0>; @@ -264,12 +249,6 @@ timing-0 { vsync-len = <2>; }; }; - - port { - dsi_in: endpoint { - remote-endpoint = <&dsi_out>; - }; - }; }; }; @@ -642,7 +621,6 @@ &jpeg { &mshc_0 { #address-cells = <1>; #size-cells = <0>; - num-slots = <1>; broken-cd; non-removable; cap-mmc-highspeed; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 645feffb9239..7b6ab7265110 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -202,21 +202,6 @@ &dsi_0 { samsung,pll-clock-frequency = <24000000>; status = "okay"; - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - - dsi_out: endpoint { - remote-endpoint = <&dsi_in>; - samsung,burst-clock-frequency = <500000000>; - samsung,esc-clock-frequency = <20000000>; - }; - }; - }; - panel@0 { reg = <0>; compatible = "samsung,s6e8aa0"; @@ -244,12 +229,6 @@ timing-0 { vsync-len = <2>; }; }; - - port { - dsi_in: endpoint { - remote-endpoint = <&dsi_out>; - }; - }; }; }; diff --git a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi index 4cd62487bb16..14ce2c69bc0b 100644 --- a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi +++ b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi @@ -466,7 +466,6 @@ &mshc_0 { pinctrl-names = "default"; status = "okay"; vmmc-supply = <&buck9_reg>; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 219d587c5a85..102acd78be15 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -516,7 +516,6 @@ &mshc_0 { mmc-pwrseq = <&emmc_pwrseq>; status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts index 7a83e2df18a6..8a89eb893d64 100644 --- a/arch/arm/boot/dts/exynos4412-origen.dts +++ b/arch/arm/boot/dts/exynos4412-origen.dts @@ -488,7 +488,6 @@ &mshc_0 { pinctrl-names = "default"; status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 35e9b94b86b8..bceb919ac637 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -390,21 +390,6 @@ &dsi_0 { samsung,pll-clock-frequency = <24000000>; status = "okay"; - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - - dsi_out: endpoint { - remote-endpoint = <&dsi_in>; - samsung,burst-clock-frequency = <500000000>; - samsung,esc-clock-frequency = <20000000>; - }; - }; - }; - panel@0 { compatible = "samsung,s6e8aa0"; reg = <0>; @@ -432,12 +417,6 @@ timing-0 { vsync-len = <2>; }; }; - - port { - dsi_in: endpoint { - remote-endpoint = <&dsi_out>; - }; - }; }; }; @@ -901,7 +880,6 @@ &i2s0 { }; &mshc_0 { - num-slots = <1>; broken-cd; non-removable; card-detect-delay = <200>; diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 6a432460eb77..18a7f396ac5f 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -518,7 +518,6 @@ &i2s0 { &mmc_0 { status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; @@ -533,7 +532,6 @@ &mmc_0 { &mmc_2 { status = "okay"; - num-slots = <1>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; samsung,dw-mshc-sdr-timing = <2 3>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 6632f657394e..062cba4c2c31 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -346,7 +346,6 @@ &i2s0 { &mmc_0 { status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; @@ -360,7 +359,6 @@ &mmc_0 { &mmc_2 { status = "okay"; - num-slots = <1>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; samsung,dw-mshc-sdr-timing = <2 3>; diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index e1d293dbbe5d..8788880e459d 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi @@ -530,7 +530,6 @@ &i2s0 { /* eMMC flash */ &mmc_0 { status = "okay"; - num-slots = <1>; non-removable; samsung,dw-mshc-ciu-div = <3>; samsung,dw-mshc-sdr-timing = <2 3>; @@ -544,7 +543,6 @@ &mmc_0 { /* uSD card */ &mmc_2 { status = "okay"; - num-slots = <1>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; samsung,dw-mshc-sdr-timing = <2 3>; @@ -564,7 +562,6 @@ &mmc_2 { */ &mmc_3 { status = "okay"; - num-slots = <1>; non-removable; cap-sdio-irq; keep-power-in-suspend; diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index 95c3bcace9dc..d53bfcbeb39c 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -427,7 +427,6 @@ &i2s0 { &mmc_0 { status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; @@ -445,7 +444,6 @@ &mmc_0 { */ &mmc_1 { status = "okay"; - num-slots = <1>; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos5260-xyref5260.dts b/arch/arm/boot/dts/exynos5260-xyref5260.dts index d0cc300cfb4b..73b7cdd5f522 100644 --- a/arch/arm/boot/dts/exynos5260-xyref5260.dts +++ b/arch/arm/boot/dts/exynos5260-xyref5260.dts @@ -67,7 +67,6 @@ &uart3 { &mmc_0 { status = "okay"; - num-slots = <1>; broken-cd; bypass-smu; cap-mmc-highspeed; @@ -83,7 +82,6 @@ &mmc_0 { &mmc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts index 6cc74d97daae..9cb7726ef8d0 100644 --- a/arch/arm/boot/dts/exynos5410-smdk5410.dts +++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts @@ -41,7 +41,6 @@ firmware@02037000 { &mmc_0 { status = "okay"; - num-slots = <1>; cap-mmc-highspeed; broken-cd; card-detect-delay = <200>; @@ -53,7 +52,6 @@ &mmc_0 { &mmc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index f9a75bfd3f2b..683a4cfb4a23 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -699,7 +699,6 @@ &i2s0 { /* eMMC flash */ &mmc_0 { status = "okay"; - num-slots = <1>; mmc-hs200-1_8v; cap-mmc-highspeed; non-removable; @@ -717,7 +716,6 @@ &mmc_0 { /* WiFi SDIO module */ &mmc_1 { status = "okay"; - num-slots = <1>; non-removable; cap-sdio-irq; keep-power-in-suspend; @@ -737,7 +735,6 @@ &mmc_1 { /* uSD card */ &mmc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; card-detect-delay = <200>; clock-frequency = <400000000>; diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index bc4954e69f7b..7a00be7ea6d7 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi @@ -317,6 +317,7 @@ pcie_0: pcie@290000 { phys = <&pcie_phy0>; ranges = <0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x40011000 0x40011000 0 0x1ffef000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0x0 0 &gic 53>; @@ -339,6 +340,7 @@ pcie_1: pcie@2a0000 { phys = <&pcie_phy1>; ranges = <0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0x0 0 &gic 56>; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 953dc8677dc8..b2b95ff205e8 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -667,7 +667,6 @@ &i2s0 { /* eMMC flash */ &mmc_0 { status = "okay"; - num-slots = <1>; mmc-hs200-1_8v; mmc-hs400-1_8v; cap-mmc-highspeed; @@ -686,7 +685,6 @@ &mmc_0 { /* WiFi SDIO module */ &mmc_1 { status = "okay"; - num-slots = <1>; non-removable; cap-sdio-irq; keep-power-in-suspend; @@ -706,7 +704,6 @@ &mmc_1 { /* uSD card */ &mmc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; card-detect-delay = <200>; clock-frequency = <400000000>; diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts new file mode 100644 index 000000000000..e75e2d44371c --- /dev/null +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -0,0 +1,246 @@ +/* + * Device Tree file for D-Link DIR-685 Xtreme N Storage Router + */ + +/dts-v1/; + +#include "gemini.dtsi" +#include + +/ { + model = "D-Link DIR-685 Xtreme N Storage Router"; + compatible = "dlink,dir-685", "cortina,gemini"; + #address-cells = <1>; + #size-cells = <1>; + + memory { + /* 128 MB SDRAM in 2 x Hynix HY5DU121622DTP-D43 */ + device_type = "memory"; + reg = <0x00000000 0x8000000>; + }; + + chosen { + stdout-path = "uart0:115200n8"; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button-esc { + debounce_interval = <50>; + wakeup-source; + linux,code = ; + label = "reset"; + /* Collides with LPC_LAD[0], UART DCD, SSP 97RST */ + gpios = <&gpio0 8 GPIO_ACTIVE_LOW>; + }; + button-eject { + debounce_interval = <50>; + wakeup-source; + linux,code = ; + label = "unmount"; + /* Collides with LPC LFRAME, UART RTS, SSP TXD */ + gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + }; + }; + + leds { + compatible = "gpio-leds"; + led-wps { + label = "dir685:blue:WPS"; + /* Collides with ICE */ + gpios = <&gpio0 7 GPIO_ACTIVE_LOW>; + default-state = "on"; + linux,default-trigger = "heartbeat"; + }; + /* + * These two LEDs are on the side of the device. + * For electrical reasons, both LEDs cannot be active + * at the same time so only blue or orange can on at + * one time. Enabling both makes the LED go dark. + * The LEDs both sit inside the unmount button and the + * label on the case says "unmount". + */ + led-blue-hd { + label = "dir685:blue:HD"; + /* Collides with LPC_SERIRQ, UART DTR, SSP FSC pins */ + gpios = <&gpio0 11 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led-orange-hd { + label = "dir685:orange:HD"; + /* Collides with LPC_LAD[2], UART DSR, SSP ECLK pins */ + gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + /* + * This is a Sunon Maglev GM0502PFV2-8 cooling fan @10000 RPM. + * Since the platform has no temperature sensor, this is controlled + * from userspace by using the hard disks S.M.A.R.T. temperature + * sensor. It is turned on when the temperature exceeds 46 degrees + * and turned off when the temperatures goes below 41 degrees + * (celsius). + */ + gpio-fan { + compatible = "gpio-fan"; + /* Collides with IDE */ + gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; + gpio-fan,speed-map = <0 0>, <10000 1>; + #cooling-cells = <2>; + }; + + /* + * The touchpad input is connected to a GPIO bit-banged + * I2C bus. + */ + gpio-i2c { + compatible = "i2c-gpio"; + /* Collides with ICE */ + gpios = <&gpio0 5 0>, /* SDA */ + <&gpio0 6 0>; /* SCL */ + #address-cells = <1>; + #size-cells = <0>; + + touchkeys@26 { + compatible = "dlink,dir685-touchkeys"; + reg = <0x26>; + interrupt-parent = <&gpio0>; + /* Collides with NAND flash */ + interrupts = <17 IRQ_TYPE_EDGE_FALLING>; + }; + }; + + soc { + flash@30000000 { + status = "okay"; + /* 32MB of flash */ + reg = <0x30000000 0x02000000>; + + /* + * This "RedBoot" is the Storlink derivative. + */ + partition@0 { + label = "RedBoot"; + reg = <0x00000000 0x00040000>; + read-only; + }; + /* + * Between the boot loader and the rootfs is the kernel + * in a custom Storlink format flashed from the boot + * menu. The rootfs is in squashfs format. + */ + partition@1800c0 { + label = "rootfs"; + reg = <0x001800c0 0x01dbff40>; + read-only; + }; + partition@1f40000 { + label = "upgrade"; + reg = <0x01f40000 0x00040000>; + read-only; + }; + partition@1f80000 { + label = "rgdb"; + reg = <0x01f80000 0x00040000>; + read-only; + }; + /* + * This partition contains MAC addresses for WAN, + * WLAN and LAN, and the country code (for wireless + * I guess). + */ + partition@1fc0000 { + label = "nvram"; + reg = <0x01fc0000 0x00020000>; + read-only; + }; + partition@1fe0000 { + label = "LangPack"; + reg = <0x01fe0000 0x00020000>; + read-only; + }; + }; + + syscon: syscon@40000000 { + pinctrl { + /* + * gpio0bgrp cover line 5, 6 used by TK I2C + * gpio0bgrp cover line 7 used by WPS LED + * gpio0cgrp cover line 8, 13 used by keys + * and 11, 12 used by the HD LEDs + * gpio0egrp cover line 16 used by VDISP + * gpio0fgrp cover line 17 used by TK IRQ + * gpio0ggrp cover line 20 used by panel CS + * gpio0hgrp cover line 21,22 used by RTL8366RB + */ + gpio0_default_pins: pinctrl-gpio0 { + mux { + function = "gpio0"; + groups = "gpio0bgrp", + "gpio0cgrp", + "gpio0egrp", + "gpio0fgrp", + "gpio0ggrp", + "gpio0hgrp"; + }; + }; + /* + * gpio1bgrp cover line 5,8,7 used by panel SPI + * also line 6 used by the fan + * + */ + gpio1_default_pins: pinctrl-gpio1 { + mux { + function = "gpio1"; + groups = "gpio1bgrp"; + }; + }; + }; + }; + + sata: sata@46000000 { + cortina,gemini-ata-muxmode = <0>; + cortina,gemini-enable-sata-bridge; + status = "okay"; + }; + + gpio0: gpio@4d000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio0_default_pins>; + }; + + gpio1: gpio@4e000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio1_default_pins>; + }; + + pci@50000000 { + status = "okay"; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = + <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */ + <0x4800 0 0 2 &pci_intc 1>, + <0x4800 0 0 3 &pci_intc 2>, + <0x4800 0 0 4 &pci_intc 3>, + <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */ + <0x5000 0 0 2 &pci_intc 2>, + <0x5000 0 0 3 &pci_intc 3>, + <0x5000 0 0 4 &pci_intc 0>, + <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */ + <0x5800 0 0 2 &pci_intc 3>, + <0x5800 0 0 3 &pci_intc 0>, + <0x5800 0 0 4 &pci_intc 1>, + <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */ + <0x6000 0 0 2 &pci_intc 0>, + <0x6000 0 0 3 &pci_intc 1>, + <0x6000 0 0 4 &pci_intc 2>; + }; + + ata@63000000 { + status = "okay"; + }; + }; +}; diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts index 55f6a4f1f801..b4fc58c8cf8d 100644 --- a/arch/arm/boot/dts/gemini-nas4220b.dts +++ b/arch/arm/boot/dts/gemini-nas4220b.dts @@ -33,6 +33,7 @@ button@29 { wakeup-source; linux,code = ; label = "Backup button"; + /* Conflict with TVC */ gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; }; button@31 { @@ -40,6 +41,7 @@ button@31 { wakeup-source; linux,code = ; label = "Softreset button"; + /* Conflict with TVC */ gpios = <&gpio1 31 GPIO_ACTIVE_LOW>; }; }; @@ -48,11 +50,13 @@ leds { compatible = "gpio-leds"; led@28 { label = "nas4220b:orange:hdd"; + /* Conflict with TVC */ gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; default-state = "on"; }; led@30 { label = "nas4220b:green:os"; + /* Conflict with TVC */ gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; default-state = "on"; linux,default-trigger = "heartbeat"; @@ -99,12 +103,32 @@ partition@fe0000 { }; }; + syscon: syscon@40000000 { + pinctrl { + /* + * gpio1dgrp cover line 28-31 otherwise used + * by TVC. + */ + gpio1_default_pins: pinctrl-gpio1 { + mux { + function = "gpio1"; + groups = "gpio1dgrp"; + }; + }; + }; + }; + sata: sata@46000000 { cortina,gemini-ata-muxmode = <0>; cortina,gemini-enable-sata-bridge; status = "okay"; }; + gpio1: gpio@4e000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio1_default_pins>; + }; + ata@63000000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/gemini-rut1xx.dts b/arch/arm/boot/dts/gemini-rut1xx.dts index 7b920bfbda32..3613b264f45f 100644 --- a/arch/arm/boot/dts/gemini-rut1xx.dts +++ b/arch/arm/boot/dts/gemini-rut1xx.dts @@ -33,6 +33,7 @@ button@28 { wakeup-source; linux,code = ; label = "Reset to defaults"; + /* Conflict with TVC */ gpios = <&gpio1 28 GPIO_ACTIVE_LOW>; }; }; @@ -42,12 +43,14 @@ leds { led@7 { /* FIXME: add the LED color */ label = "rut1xx::gsm"; + /* Conflict with ICE */ gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>; default-state = "on"; }; led@31 { /* FIXME: add the LED color */ label = "rut1xx::power"; + /* Conflict with NAND CE0 */ gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>; default-state = "off"; linux,default-trigger = "heartbeat"; @@ -61,5 +64,41 @@ flash@30000000 { reg = <0x30000000 0x00800000>; /* TODO: add flash partitions here */ }; + + syscon: syscon@40000000 { + pinctrl { + /* + * gpio0bgrp cover line 7 used by GSM LED + * gpio0fgrp cover line 17 used by power LED + */ + gpio0_default_pins: pinctrl-gpio0 { + mux { + function = "gpio0"; + groups = "gpio0bgrp", + "gpio0fgrp"; + }; + }; + /* + * gpio1dgrp cover line 28-31 otherwise used + * by TVC. + */ + gpio1_default_pins: pinctrl-gpio1 { + mux { + function = "gpio1"; + groups = "gpio1dgrp"; + }; + }; + }; + }; + + gpio0: gpio@4d000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio0_default_pins>; + }; + + gpio1: gpio@4e000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio1_default_pins>; + }; }; }; diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts index 4d200f0bcd45..7cfa9caf47d4 100644 --- a/arch/arm/boot/dts/gemini-sq201.dts +++ b/arch/arm/boot/dts/gemini-sq201.dts @@ -33,6 +33,7 @@ button@18 { wakeup-source; linux,code = ; label = "factory reset"; + /* Conflict with NAND flash */ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; }; }; @@ -41,12 +42,14 @@ leds { compatible = "gpio-leds"; led@20 { label = "sq201:green:info"; + /* Conflict with parallel flash */ gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; default-state = "on"; linux,default-trigger = "heartbeat"; }; led@31 { label = "sq201:green:usb"; + /* Conflict with parallel and NAND flash */ gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>; default-state = "off"; linux,default-trigger = "usb-host"; @@ -55,7 +58,15 @@ led@31 { soc { flash@30000000 { - status = "okay"; + /* + * Flash access can be enabled, with the side effect + * of disabling access to GPIO LED on GPIO0[20] which + * reuse one of the parallel flash chip select lines. + * Also the default firmware on the machine has the + * problem that since it uses the flash, the two LEDS + * on the right become numb. + */ + /* status = "okay"; */ /* 16MB of flash */ reg = <0x30000000 0x01000000>; @@ -93,12 +104,35 @@ partition@fe0000 { }; }; + syscon: syscon@40000000 { + pinctrl { + /* + * gpio0fgrp cover line 18 used by reset button + * gpio0ggrp cover line 20 used by info LED + * gpio0kgrp cover line 31 used by USB LED + */ + gpio0_default_pins: pinctrl-gpio0 { + mux { + function = "gpio0"; + groups = "gpio0fgrp", + "gpio0ggrp", + "gpio0kgrp"; + }; + }; + }; + }; + sata: sata@46000000 { cortina,gemini-ata-muxmode = <0>; cortina,gemini-enable-sata-bridge; status = "okay"; }; + gpio0: gpio@4d000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio0_default_pins>; + }; + pci@50000000 { status = "okay"; interrupt-map-mask = <0xf800 0 0 7>; diff --git a/arch/arm/boot/dts/gemini-wbd111.dts b/arch/arm/boot/dts/gemini-wbd111.dts index 63b756e3bf5a..38a49e750478 100644 --- a/arch/arm/boot/dts/gemini-wbd111.dts +++ b/arch/arm/boot/dts/gemini-wbd111.dts @@ -33,6 +33,7 @@ button@5 { wakeup-source; linux,code = ; label = "reset"; + /* Conflict with ICE */ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; }; }; @@ -42,21 +43,25 @@ leds { led@1 { label = "wbd111:red:L3"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@2 { label = "wbd111:green:L4"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@3 { label = "wbd111:red:L4"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@5 { label = "wbd111:green:L3"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>; default-state = "on"; linux,default-trigger = "heartbeat"; @@ -98,5 +103,26 @@ partition@7e0000 { read-only; }; }; + + syscon: syscon@40000000 { + pinctrl { + /* + * gpio0agrp cover line 0-4 + * gpio0bgrp cover line 5 + */ + gpio0_default_pins: pinctrl-gpio0 { + mux { + function = "gpio0"; + groups = "gpio0agrp", + "gpio0bgrp"; + }; + }; + }; + }; + + gpio0: gpio@4d000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio0_default_pins>; + }; }; }; diff --git a/arch/arm/boot/dts/gemini-wbd222.dts b/arch/arm/boot/dts/gemini-wbd222.dts index 9747f5a47807..f77e34e0df0b 100644 --- a/arch/arm/boot/dts/gemini-wbd222.dts +++ b/arch/arm/boot/dts/gemini-wbd222.dts @@ -33,6 +33,7 @@ button@5 { wakeup-source; linux,code = ; label = "reset"; + /* Conflict with ICE */ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; }; }; @@ -42,21 +43,25 @@ leds { led@1 { label = "wbd111:red:L3"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@2 { label = "wbd111:green:L4"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@3 { label = "wbd111:red:L4"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led@5 { label = "wbd111:green:L3"; + /* Conflict with TVC and extended parallel flash */ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>; default-state = "on"; linux,default-trigger = "heartbeat"; @@ -98,5 +103,26 @@ partition@7e0000 { read-only; }; }; + + syscon: syscon@40000000 { + pinctrl { + /* + * gpio0agrp cover line 0-4 + * gpio0bgrp cover line 5 + */ + gpio0_default_pins: pinctrl-gpio0 { + mux { + function = "gpio0"; + groups = "gpio0agrp", + "gpio0bgrp"; + }; + }; + }; + }; + + gpio0: gpio@4d000000 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio0_default_pins>; + }; }; }; diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi index 141d8d3a1d07..c68e8d430234 100644 --- a/arch/arm/boot/dts/gemini.dtsi +++ b/arch/arm/boot/dts/gemini.dtsi @@ -5,6 +5,8 @@ /include/ "skeleton.dtsi" #include +#include +#include #include / { @@ -18,6 +20,8 @@ soc { flash@30000000 { compatible = "cortina,gemini-flash", "cfi-flash"; syscon = <&syscon>; + pinctrl-names = "default"; + pinctrl-0 = <&pflash_default_pins>; bank-width = <2>; #address-cells = <1>; #size-cells = <1>; @@ -39,22 +43,123 @@ syscon-reboot { /* RESET_GLOBAL | RESET_CPU1 */ mask = <0xC0000000>; }; + + pinctrl { + compatible = "cortina,gemini-pinctrl"; + regmap = <&syscon>; + /* Hog the DRAM pins */ + pinctrl-names = "default"; + pinctrl-0 = <&dram_default_pins>, <&system_default_pins>, + <&vcontrol_default_pins>; + + dram_default_pins: pinctrl-dram { + mux { + function = "dram"; + groups = "dramgrp"; + }; + }; + rtc_default_pins: pinctrl-rtc { + mux { + function = "rtc"; + groups = "rtcgrp"; + }; + }; + power_default_pins: pinctrl-power { + mux { + function = "power"; + groups = "powergrp"; + }; + }; + cir_default_pins: pinctrl-cir { + mux { + function = "cir"; + groups = "cirgrp"; + }; + }; + system_default_pins: pinctrl-system { + mux { + function = "system"; + groups = "systemgrp"; + }; + }; + vcontrol_default_pins: pinctrl-vcontrol { + mux { + function = "vcontrol"; + groups = "vcontrolgrp"; + }; + }; + ice_default_pins: pinctrl-ice { + mux { + function = "ice"; + groups = "icegrp"; + }; + }; + uart_default_pins: pinctrl-uart { + mux { + function = "uart"; + groups = "uartrxtxgrp"; + }; + }; + pflash_default_pins: pinctrl-pflash { + mux { + function = "pflash"; + groups = "pflashgrp"; + }; + }; + usb_default_pins: pinctrl-usb { + mux { + function = "usb"; + groups = "usbgrp"; + }; + }; + gmii_default_pins: pinctrl-gmii { + mux { + function = "gmii"; + groups = "gmiigrp"; + }; + }; + pci_default_pins: pinctrl-pci { + mux { + function = "pci"; + groups = "pcigrp"; + }; + }; + sata_default_pins: pinctrl-sata { + mux { + function = "sata"; + groups = "satagrp"; + }; + }; + /* Activate both groups of pins for this state */ + sata_and_ide_pins: pinctrl-sata-ide { + mux0 { + function = "sata"; + groups = "satagrp"; + }; + mux1 { + function = "ide"; + groups = "idegrp"; + }; + }; + }; }; watchdog@41000000 { compatible = "cortina,gemini-watchdog"; reg = <0x41000000 0x1000>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; - resets = <&syscon 23>; - clocks = <&syscon 2>; + resets = <&syscon GEMINI_RESET_WDOG>; + clocks = <&syscon GEMINI_CLK_APB>; }; uart0: serial@42000000 { compatible = "ns16550a"; reg = <0x42000000 0x100>; - resets = <&syscon 18>; - clocks = <&syscon 6>; + resets = <&syscon GEMINI_RESET_UART>; + clocks = <&syscon GEMINI_CLK_UART>; interrupts = <18 IRQ_TYPE_LEVEL_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&uart_default_pins>; reg-shift = <2>; }; @@ -65,9 +170,9 @@ timer@43000000 { interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */ <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */ <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */ - resets = <&syscon 17>; + resets = <&syscon GEMINI_RESET_TIMER>; /* APB clock or RTC clock */ - clocks = <&syscon 2>, <&syscon 0>; + clocks = <&syscon GEMINI_CLK_APB>, <&syscon GEMINI_CLK_RTC>; clock-names = "PCLK", "EXTCLK"; syscon = <&syscon>; }; @@ -76,20 +181,30 @@ rtc@45000000 { compatible = "cortina,gemini-rtc"; reg = <0x45000000 0x100>; interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; - resets = <&syscon 16>; - clocks = <&syscon 2>, <&syscon 0>; + resets = <&syscon GEMINI_RESET_RTC>; + clocks = <&syscon GEMINI_CLK_APB>, <&syscon GEMINI_CLK_RTC>; clock-names = "PCLK", "EXTCLK"; + pinctrl-names = "default"; + pinctrl-0 = <&rtc_default_pins>; }; sata: sata@46000000 { compatible = "cortina,gemini-sata-bridge"; reg = <0x46000000 0x100>; - resets = <&syscon 26>, - <&syscon 27>; + resets = <&syscon GEMINI_RESET_SATA0>, + <&syscon GEMINI_RESET_SATA1>; reset-names = "sata0", "sata1"; - clocks = <&syscon 10>, - <&syscon 11>; + clocks = <&syscon GEMINI_CLK_GATE_SATA0>, + <&syscon GEMINI_CLK_GATE_SATA1>; clock-names = "SATA0_PCLK", "SATA1_PCLK"; + /* + * This defines the special "ide" state that needs + * to be explicitly enabled to enable the IDE pins, + * as these pins are normally used for other things. + */ + pinctrl-names = "default", "ide"; + pinctrl-0 = <&sata_default_pins>; + pinctrl-1 = <&sata_and_ide_pins>; syscon = <&syscon>; status = "disabled"; }; @@ -97,7 +212,7 @@ sata: sata@46000000 { intcon: interrupt-controller@48000000 { compatible = "faraday,ftintc010"; reg = <0x48000000 0x1000>; - resets = <&syscon 14>; + resets = <&syscon GEMINI_RESET_INTCON0>; interrupt-controller; #interrupt-cells = <2>; }; @@ -106,14 +221,16 @@ power-controller@4b000000 { compatible = "cortina,gemini-power-controller"; reg = <0x4b000000 0x100>; interrupts = <26 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&power_default_pins>; }; gpio0: gpio@4d000000 { compatible = "cortina,gemini-gpio", "faraday,ftgpio010"; reg = <0x4d000000 0x100>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; - resets = <&syscon 20>; - clocks = <&syscon 2>; + resets = <&syscon GEMINI_RESET_GPIO0>; + clocks = <&syscon GEMINI_CLK_APB>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -124,8 +241,8 @@ gpio1: gpio@4e000000 { compatible = "cortina,gemini-gpio", "faraday,ftgpio010"; reg = <0x4e000000 0x100>; interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; - resets = <&syscon 21>; - clocks = <&syscon 2>; + resets = <&syscon GEMINI_RESET_GPIO1>; + clocks = <&syscon GEMINI_CLK_APB>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -136,8 +253,8 @@ gpio2: gpio@4f000000 { compatible = "cortina,gemini-gpio", "faraday,ftgpio010"; reg = <0x4f000000 0x100>; interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; - resets = <&syscon 22>; - clocks = <&syscon 2>; + resets = <&syscon GEMINI_RESET_GPIO2>; + clocks = <&syscon GEMINI_CLK_APB>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -151,9 +268,11 @@ pci@50000000 { * to configure the host bridge. */ reg = <0x50000000 0x100>; - resets = <&syscon 7>; - clocks = <&syscon 15>, <&syscon 4>; + resets = <&syscon GEMINI_RESET_PCI>; + clocks = <&syscon GEMINI_CLK_GATE_PCI>, <&syscon GEMINI_CLK_PCI>; clock-names = "PCLK", "PCICLK"; + pinctrl-names = "default"; + pinctrl-0 = <&pci_default_pins>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; @@ -193,8 +312,8 @@ ata@63000000 { compatible = "cortina,gemini-pata", "faraday,ftide010"; reg = <0x63000000 0x1000>; interrupts = <4 IRQ_TYPE_EDGE_RISING>; - resets = <&syscon 2>; - clocks = <&syscon 14>; + resets = <&syscon GEMINI_RESET_IDE>; + clocks = <&syscon GEMINI_CLK_GATE_IDE>; clock-names = "PCLK"; sata = <&sata>; status = "disabled"; @@ -204,8 +323,8 @@ ata@63400000 { compatible = "cortina,gemini-pata", "faraday,ftide010"; reg = <0x63400000 0x1000>; interrupts = <5 IRQ_TYPE_EDGE_RISING>; - resets = <&syscon 2>; - clocks = <&syscon 14>; + resets = <&syscon GEMINI_RESET_IDE>; + clocks = <&syscon GEMINI_CLK_GATE_IDE>; clock-names = "PCLK"; sata = <&sata>; status = "disabled"; @@ -217,8 +336,8 @@ dma-controller@67000000 { arm,primecell-periphid = <0x0003b080>; reg = <0x67000000 0x1000>; interrupts = <9 IRQ_TYPE_EDGE_RISING>; - resets = <&syscon 10>; - clocks = <&syscon 1>; + resets = <&syscon GEMINI_RESET_DMAC>; + clocks = <&syscon GEMINI_CLK_AHB>; clock-names = "apb_pclk"; /* Bus interface AHB1 (AHB0) is totally tilted */ lli-bus-interface-ahb2; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index 0ade3619f3c3..09ce8b81fafa 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -452,6 +452,13 @@ scc: crypto@53fac000 { interrupt-names = "scm", "smn"; }; + rngb: rngb@53fb0000 { + compatible = "fsl,imx25-rngb"; + reg = <0x53fb0000 0x4000>; + clocks = <&clks 109>; + interrupts = <22>; + }; + esdhc1: esdhc@53fb4000 { compatible = "fsl,imx25-esdhc"; reg = <0x53fb4000 0x4000>; diff --git a/arch/arm/boot/dts/imx53-cx9020.dts b/arch/arm/boot/dts/imx53-cx9020.dts new file mode 100644 index 000000000000..4f54fd4418a3 --- /dev/null +++ b/arch/arm/boot/dts/imx53-cx9020.dts @@ -0,0 +1,297 @@ +/* + * Copyright 2017 Beckhoff Automation GmbH & Co. KG + * based on imx53-qsb.dts + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/dts-v1/; +#include "imx53.dtsi" + +/ { + model = "Beckhoff CX9020 Embedded PC"; + compatible = "bhf,cx9020", "fsl,imx53"; + + chosen { + stdout-path = &uart2; + }; + + memory { + reg = <0x70000000 0x20000000>, + <0xb0000000 0x20000000>; + }; + + display-0 { + #address-cells =<1>; + #size-cells = <0>; + compatible = "fsl,imx-parallel-display"; + interface-pix-fmt = "rgb24"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu_disp0>; + + port@0 { + reg = <0>; + + display0_in: endpoint { + remote-endpoint = <&ipu_di0_disp0>; + }; + }; + + port@1 { + reg = <1>; + + display0_out: endpoint { + remote-endpoint = <&tfp410_in>; + }; + }; + }; + + dvi-connector { + compatible = "dvi-connector"; + ddc-i2c-bus = <&i2c2>; + digital; + + port { + dvi_connector_in: endpoint { + remote-endpoint = <&tfp410_out>; + }; + }; + }; + + dvi-converter { + #address-cells = <1>; + #size-cells = <0>; + compatible = "ti,tfp410"; + + port@0 { + reg = <0>; + + tfp410_in: endpoint { + remote-endpoint = <&display0_out>; + }; + }; + + port@1 { + reg = <1>; + + tfp410_out: endpoint { + remote-endpoint = <&dvi_connector_in>; + }; + }; + }; + + leds { + compatible = "gpio-leds"; + + pwr-r { + gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + pwr-g { + gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + + pwr-b { + gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + sd1-b { + linux,default-trigger = "mmc0"; + gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>; + }; + + sd2-b { + linux,default-trigger = "mmc1"; + gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>; + }; + }; + + regulator-3p2v { + compatible = "regulator-fixed"; + regulator-name = "3P2V"; + regulator-min-microvolt = <3200000>; + regulator-max-microvolt = <3200000>; + regulator-always-on; + }; + + reg_usb_vbus: regulator-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio7 8 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&esdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc1>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + bus-width = <4>; + status = "okay"; +}; + +&esdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc2>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + bus-width = <4>; + status = "okay"; +}; + +&fec { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec>; + phy-mode = "rmii"; + phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; +}; + +&ipu_di0_disp0 { + remote-endpoint = <&display0_in>; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + fsl,dte-mode; + status = "okay"; +}; + +&usbh1 { + vbus-supply = <®_usb_vbus>; + phy_type = "utmi"; + status = "okay"; +}; + +&usbotg { + dr_mode = "peripheral"; + status = "okay"; +}; + +&vpu { + status = "okay"; +}; + +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_hog: hoggrp { + fsl,pins = < + MX53_PAD_GPIO_0__CCM_CLKO 0x1c4 + MX53_PAD_GPIO_16__I2C3_SDA 0x1c4 + MX53_PAD_EIM_D22__GPIO3_22 0x1c4 + MX53_PAD_EIM_D23__GPIO3_23 0x1e4 + MX53_PAD_EIM_D24__GPIO3_24 0x1e4 + >; + }; + + pinctrl_esdhc1: esdhc1grp { + fsl,pins = < + MX53_PAD_SD1_DATA0__ESDHC1_DAT0 0x1d5 + MX53_PAD_SD1_DATA1__ESDHC1_DAT1 0x1d5 + MX53_PAD_SD1_DATA2__ESDHC1_DAT2 0x1d5 + MX53_PAD_SD1_DATA3__ESDHC1_DAT3 0x1d5 + MX53_PAD_SD1_CMD__ESDHC1_CMD 0x1d5 + MX53_PAD_SD1_CLK__ESDHC1_CLK 0x1d5 + MX53_PAD_GPIO_1__ESDHC1_CD 0x1c4 + MX53_PAD_EIM_D17__GPIO3_17 0x1e4 + MX53_PAD_GPIO_3__GPIO1_3 0x1c4 + >; + }; + + pinctrl_esdhc2: esdhc2grp { + fsl,pins = < + MX53_PAD_SD2_DATA0__ESDHC2_DAT0 0x1d5 + MX53_PAD_SD2_DATA1__ESDHC2_DAT1 0x1d5 + MX53_PAD_SD2_DATA2__ESDHC2_DAT2 0x1d5 + MX53_PAD_SD2_DATA3__ESDHC2_DAT3 0x1d5 + MX53_PAD_SD2_CMD__ESDHC2_CMD 0x1d5 + MX53_PAD_SD2_CLK__ESDHC2_CLK 0x1d5 + MX53_PAD_GPIO_4__ESDHC2_CD 0x1e4 + MX53_PAD_EIM_D20__GPIO3_20 0x1e4 + MX53_PAD_GPIO_8__GPIO1_8 0x1c4 + >; + }; + + pinctrl_fec: fecgrp { + fsl,pins = < + MX53_PAD_FEC_MDC__FEC_MDC 0x4 + MX53_PAD_FEC_MDIO__FEC_MDIO 0x1fc + MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x180 + MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x180 + MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x180 + MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x180 + MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x180 + MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x4 + MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x4 + MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x4 + >; + }; + + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX53_PAD_KEY_ROW3__I2C2_SDA 0xc0000000 + MX53_PAD_KEY_COL3__I2C2_SCL 0xc0000000 + >; + }; + + pinctrl_ipu_disp0: ipudisp0grp { + fsl,pins = < + MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK 0x5 + MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 0x5 + MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 0x5 + MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 0x5 + MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 0x5 + MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 0x5 + MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 0x5 + MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 0x5 + MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 0x5 + MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 0x5 + MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 0x5 + MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 0x5 + MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 0x5 + MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 0x5 + MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 0x5 + MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 0x5 + MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 0x5 + MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 0x5 + MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 0x5 + MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 0x5 + MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 0x5 + MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 0x5 + MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 0x5 + MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 0x5 + MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 0x5 + MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 0x5 + MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 0x5 + MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 0x5 + MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 0x5 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX53_PAD_EIM_D26__UART2_RXD_MUX 0x1e4 + MX53_PAD_EIM_D27__UART2_TXD_MUX 0x1e4 + MX53_PAD_EIM_D28__UART2_RTS 0x1e4 + MX53_PAD_EIM_D29__UART2_CTS 0x1e4 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx53-pinfunc.h b/arch/arm/boot/dts/imx53-pinfunc.h index aec406bc65eb..59f9c29e3fe2 100644 --- a/arch/arm/boot/dts/imx53-pinfunc.h +++ b/arch/arm/boot/dts/imx53-pinfunc.h @@ -524,6 +524,7 @@ #define MX53_PAD_EIM_D25__UART1_DSR 0x140 0x488 0x000 0x7 0x0 #define MX53_PAD_EIM_D26__EMI_WEIM_D_26 0x144 0x48c 0x000 0x0 0x0 #define MX53_PAD_EIM_D26__GPIO3_26 0x144 0x48c 0x000 0x1 0x0 +#define MX53_PAD_EIM_D26__UART2_RXD_MUX 0x144 0x48c 0x880 0x2 0x0 #define MX53_PAD_EIM_D26__UART2_TXD_MUX 0x144 0x48c 0x000 0x2 0x0 #define MX53_PAD_EIM_D26__FIRI_RXD 0x144 0x48c 0x80c 0x3 0x0 #define MX53_PAD_EIM_D26__IPU_CSI0_D_1 0x144 0x48c 0x000 0x4 0x0 @@ -533,6 +534,7 @@ #define MX53_PAD_EIM_D27__EMI_WEIM_D_27 0x148 0x490 0x000 0x0 0x0 #define MX53_PAD_EIM_D27__GPIO3_27 0x148 0x490 0x000 0x1 0x0 #define MX53_PAD_EIM_D27__UART2_RXD_MUX 0x148 0x490 0x880 0x2 0x1 +#define MX53_PAD_EIM_D27__UART2_TXD_MUX 0x148 0x490 0x000 0x2 0x0 #define MX53_PAD_EIM_D27__FIRI_TXD 0x148 0x490 0x000 0x3 0x0 #define MX53_PAD_EIM_D27__IPU_CSI0_D_0 0x148 0x490 0x000 0x4 0x0 #define MX53_PAD_EIM_D27__IPU_DI1_PIN13 0x148 0x490 0x000 0x5 0x0 @@ -541,6 +543,7 @@ #define MX53_PAD_EIM_D28__EMI_WEIM_D_28 0x14c 0x494 0x000 0x0 0x0 #define MX53_PAD_EIM_D28__GPIO3_28 0x14c 0x494 0x000 0x1 0x0 #define MX53_PAD_EIM_D28__UART2_CTS 0x14c 0x494 0x000 0x2 0x0 +#define MX53_PAD_EIM_D28__UART2_RTS 0x14c 0x494 0x87c 0x2 0x0 #define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO 0x14c 0x494 0x82c 0x3 0x1 #define MX53_PAD_EIM_D28__CSPI_MOSI 0x14c 0x494 0x788 0x4 0x1 #define MX53_PAD_EIM_D28__I2C1_SDA 0x14c 0x494 0x818 0x5 0x1 @@ -548,6 +551,7 @@ #define MX53_PAD_EIM_D28__IPU_DI0_PIN13 0x14c 0x494 0x000 0x7 0x0 #define MX53_PAD_EIM_D29__EMI_WEIM_D_29 0x150 0x498 0x000 0x0 0x0 #define MX53_PAD_EIM_D29__GPIO3_29 0x150 0x498 0x000 0x1 0x0 +#define MX53_PAD_EIM_D29__UART2_CTS 0x150 0x498 0x000 0x2 0x0 #define MX53_PAD_EIM_D29__UART2_RTS 0x150 0x498 0x87c 0x2 0x1 #define MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS 0x150 0x498 0x000 0x3 0x0 #define MX53_PAD_EIM_D29__CSPI_SS0 0x150 0x498 0x78c 0x4 0x2 diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 2e516f4985e4..8bf0d89cdd35 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -433,6 +433,15 @@ gpt: timer@53fa0000 { clock-names = "ipg", "per"; }; + srtc: srtc@53fa4000 { + compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; + reg = <0x53fa4000 0x4000>; + interrupts = <24>; + interrupt-parent = <&tzic>; + clocks = <&clks IMX5_CLK_SRTC_GATE>; + clock-names = "ipg"; + }; + iomuxc: iomuxc@53fa8000 { compatible = "fsl,imx53-iomuxc"; reg = <0x53fa8000 0x4000>; diff --git a/arch/arm/boot/dts/imx6dl-gw52xx.dts b/arch/arm/boot/dts/imx6dl-gw52xx.dts index a2e0b73fdd4a..5f9f8948100d 100644 --- a/arch/arm/boot/dts/imx6dl-gw52xx.dts +++ b/arch/arm/boot/dts/imx6dl-gw52xx.dts @@ -17,3 +17,61 @@ / { model = "Gateworks Ventana i.MX6 DualLite/Solo GW52XX"; compatible = "gw,imx6dl-gw52xx", "gw,ventana", "fsl,imx6dl"; }; + +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu1_csi1_mux: endpoint { + remote-endpoint = <&ipu1_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu1_csi1_from_ipu1_csi1_mux { + bus-width = <8>; +}; + +&ipu1_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu1_csi1_mux>; + bus-width = <8>; +}; + +&ipu1_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_csi1>; +}; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu1_csi1: ipu1_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU1_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU1_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU1_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU1_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU1_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU1_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU1_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU1_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU1_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU1_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU1_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6dl-gw53xx.dts b/arch/arm/boot/dts/imx6dl-gw53xx.dts index 6844b708d2f8..9bfc620d37bd 100644 --- a/arch/arm/boot/dts/imx6dl-gw53xx.dts +++ b/arch/arm/boot/dts/imx6dl-gw53xx.dts @@ -17,3 +17,61 @@ / { model = "Gateworks Ventana i.MX6 DualLite/Solo GW53XX"; compatible = "gw,imx6dl-gw53xx", "gw,ventana", "fsl,imx6dl"; }; + +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu1_csi1_mux: endpoint { + remote-endpoint = <&ipu1_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu1_csi1_from_ipu1_csi1_mux { + bus-width = <8>; +}; + +&ipu1_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu1_csi1_mux>; + bus-width = <8>; +}; + +&ipu1_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_csi1>; +}; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu1_csi1: ipu1_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU1_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU1_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU1_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU1_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU1_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU1_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU1_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU1_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU1_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU1_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU1_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6dl-gw54xx.dts b/arch/arm/boot/dts/imx6dl-gw54xx.dts index be915412f852..b909bdf9a2ef 100644 --- a/arch/arm/boot/dts/imx6dl-gw54xx.dts +++ b/arch/arm/boot/dts/imx6dl-gw54xx.dts @@ -17,3 +17,61 @@ / { model = "Gateworks Ventana i.MX6 DualLite/Solo GW54XX"; compatible = "gw,imx6dl-gw54xx", "gw,ventana", "fsl,imx6dl"; }; + +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu1_csi1_mux: endpoint { + remote-endpoint = <&ipu1_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu1_csi1_from_ipu1_csi1_mux { + bus-width = <8>; +}; + +&ipu1_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu1_csi1_mux>; + bus-width = <8>; +}; + +&ipu1_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_csi1>; +}; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu1_csi1: ipu1_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU1_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU1_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU1_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU1_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU1_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU1_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU1_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU1_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU1_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU1_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU1_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts index 29b45f2e64e0..275c6c05219d 100644 --- a/arch/arm/boot/dts/imx6dl-riotboard.dts +++ b/arch/arm/boot/dts/imx6dl-riotboard.dts @@ -101,6 +101,51 @@ &fec { status = "okay"; }; +&gpio1 { + gpio-line-names = + "", "", "SD2_WP", "", "SD2_CD", "I2C3_SCL", + "I2C3_SDA", "I2C4_SCL", + "I2C4_SDA", "", "", "", "", "", "", "", + "", "PWM3", "", "", "", "", "", "", + "", "", "", "", "", "", "", ""; +}; + +&gpio3 { + gpio-line-names = + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "USB_OTG_VBUS", "", + "UART3_TXD", "UART3_RXD", "", "", "EIM_D28", "", "", ""; +}; + +&gpio4 { + gpio-line-names = + "", "", "", "", "", "", "UART4_TXD", "UART4_RXD", + "UART5_TXD", "UART5_RXD", "", "", "", "", "", "", + "GPIO4_16", "GPIO4_17", "GPIO4_18", "GPIO4_19", "", + "CSPI3_CLK", "CSPI3_MOSI", "CSPI3_MISO", + "CSPI3_CS0", "CSPI3_CS1", "GPIO4_26", "GPIO4_27", + "CSPI3_RDY", "PWM1", "PWM2", "GPIO4_31"; +}; + +&gpio5 { + gpio-line-names = + "", "", "EIM_A25", "", "", "GPIO5_05", "GPIO5_06", + "GPIO5_07", + "GPIO5_08", "CSPI2_CS1", "CSPI2_MOSI", "CSPI2_MISO", + "CSPI2_CS0", "CSPI2_CLK", "", "", + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", ""; +}; + +&gpio7 { + gpio-line-names = + "SD3_CD", "SD3_WP", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", ""; +}; + &hdmi { ddc-i2c-bus = <&i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-apalis-eval.dts b/arch/arm/boot/dts/imx6q-apalis-eval.dts new file mode 100644 index 000000000000..4bbfe3d61027 --- /dev/null +++ b/arch/arm/boot/dts/imx6q-apalis-eval.dts @@ -0,0 +1,278 @@ +/* + * Copyright 2014-2017 Toradex AG + * Copyright 2012 Freescale Semiconductor, Inc. + * Copyright 2011 Linaro Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include +#include +#include +#include "imx6q.dtsi" +#include "imx6qdl-apalis.dtsi" + +/ { + model = "Toradex Apalis iMX6Q/D Module on Apalis Evaluation Board"; + compatible = "toradex,apalis_imx6q-eval", "toradex,apalis_imx6q", + "fsl,imx6q"; + + aliases { + i2c0 = &i2c1; + i2c1 = &i2c3; + i2c2 = &i2c2; + rtc0 = &rtc_i2c; + rtc1 = &snvs_rtc; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio_keys>; + + wakeup { + label = "Wake-Up"; + gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + }; + + lcd_display: display@di0 { + compatible = "fsl,imx-parallel-display"; + #address-cells = <1>; + #size-cells = <0>; + interface-pix-fmt = "rgb24"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_lcdif>; + status = "okay"; + + port@0 { + reg = <0>; + + lcd_display_in: endpoint { + remote-endpoint = <&ipu1_di1_disp1>; + }; + }; + + port@1 { + reg = <1>; + + lcd_display_out: endpoint { + remote-endpoint = <&lcd_panel_in>; + }; + }; + }; + + panel: panel { + /* + * edt,et057090dhu: EDT 5.7" LCD TFT + * edt,et070080dh6: EDT 7.0" LCD TFT + */ + compatible = "edt,et057090dhu"; + backlight = <&backlight>; + + port { + lcd_panel_in: endpoint { + remote-endpoint = <&lcd_display_out>; + }; + }; + }; + + reg_pcie_switch: regulator-pcie-switch { + compatible = "regulator-fixed"; + regulator-name = "pcie_switch"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; + startup-delay-us = <100000>; + enable-active-high; + status = "okay"; + }; +}; + +&backlight { + brightness-levels = <0 127 191 223 239 247 251 255>; + default-brightness-level = <1>; + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&can2 { + status = "okay"; +}; + +&hdmi { + status = "okay"; +}; + +/* I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */ +&i2c1 { + status = "okay"; + + pcie-switch@58 { + compatible = "plx,pex8605"; + reg = <0x58>; + }; + + /* M41T0M6 real time clock on carrier board */ + rtc_i2c: rtc@68 { + compatible = "st,m41t00"; + reg = <0x68>; + }; +}; + +/* + * I2C3_SDA/SCL (CAM) on MXM3 pin 201/203 (e.g. camera sensor on carrier + * board) + */ +&i2c3 { + status = "okay"; +}; + +&ipu1_di1_disp1 { + remote-endpoint = <&lcd_display_in>; +}; + +&ldb { + status = "okay"; +}; + +&pcie { + /* active-high meaning opposite of regular PERST# active-low polarity */ + reset-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>; + reset-gpio-active-high; + vpcie-supply = <®_pcie_switch>; + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +&pwm2 { + status = "okay"; +}; + +&pwm3 { + status = "okay"; +}; + +&pwm4 { + status = "okay"; +}; + +®_usb_otg_vbus { + status = "okay"; +}; + +®_usb_host_vbus { + status = "okay"; +}; + +&sata { + status = "okay"; +}; + +&sound_spdif { + status = "okay"; +}; + +&spdif { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart4 { + status = "okay"; +}; + +&uart5 { + status = "okay"; +}; + +&usbh1 { + vbus-supply = <®_usb_host_vbus>; + status = "okay"; +}; + +&usbotg { + vbus-supply = <®_usb_otg_vbus>; + status = "okay"; +}; + +/* MMC1 */ +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1_4bit &pinctrl_usdhc1_8bit &pinctrl_mmc_cd>; + cd-gpios = <&gpio4 20 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +/* SD1 */ +&usdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_sd_cd>; + cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&iomuxc { + /* + * Mux the Apalis GPIOs + */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_apalis_gpio1 &pinctrl_apalis_gpio2 + &pinctrl_apalis_gpio3 &pinctrl_apalis_gpio4 + &pinctrl_apalis_gpio5 &pinctrl_apalis_gpio6 + &pinctrl_apalis_gpio7 &pinctrl_apalis_gpio8 + >; +}; diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts new file mode 100644 index 000000000000..a35c7a54ad3b --- /dev/null +++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts @@ -0,0 +1,291 @@ +/* + * Copyright 2014-2017 Toradex AG + * Copyright 2012 Freescale Semiconductor, Inc. + * Copyright 2011 Linaro Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include +#include +#include +#include "imx6q.dtsi" +#include "imx6qdl-apalis.dtsi" + +/ { + model = "Toradex Apalis iMX6Q/D Module on Ixora Carrier Board V1.1"; + compatible = "toradex,apalis_imx6q-ixora-v1.1", + "toradex,apalis_imx6q-ixora", "toradex,apalis_imx6q", + "fsl,imx6q"; + + aliases { + i2c0 = &i2c1; + i2c1 = &i2c3; + i2c2 = &i2c2; + rtc0 = &rtc_i2c; + rtc1 = &snvs_rtc; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio_keys>; + + wakeup { + label = "Wake-Up"; + gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + }; + + lcd_display: display@di0 { + compatible = "fsl,imx-parallel-display"; + #address-cells = <1>; + #size-cells = <0>; + interface-pix-fmt = "rgb24"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_lcdif>; + status = "okay"; + + port@0 { + reg = <0>; + + lcd_display_in: endpoint { + remote-endpoint = <&ipu1_di1_disp1>; + }; + }; + + port@1 { + reg = <1>; + + lcd_display_out: endpoint { + remote-endpoint = <&lcd_panel_in>; + }; + }; + }; + + panel: panel { + /* + * edt,et057090dhu: EDT 5.7" LCD TFT + * edt,et070080dh6: EDT 7.0" LCD TFT + */ + compatible = "edt,et057090dhu"; + backlight = <&backlight>; + + port { + lcd_panel_in: endpoint { + remote-endpoint = <&lcd_display_out>; + }; + }; + }; + + leds { + compatible = "gpio-leds"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_leds_ixora>; + + led4-green { + label = "LED_4_GREEN"; + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>; + }; + + led4-red { + label = "LED_4_RED"; + gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>; + }; + + led5-green { + label = "LED_5_GREEN"; + gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; + }; + + led5-red { + label = "LED_5_RED"; + gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&backlight { + brightness-levels = <0 127 191 223 239 247 251 255>; + default-brightness-level = <1>; + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&can2 { + status = "okay"; +}; + +&hdmi { + status = "okay"; +}; + +/* I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */ +&i2c1 { + status = "okay"; + + /* M41T0M6 real time clock on carrier board */ + rtc_i2c: rtc@68 { + compatible = "st,m41t00"; + reg = <0x68>; + }; +}; + +/* + * I2C3_SDA/SCL (CAM) on MXM3 pin 201/203 (e.g. camera sensor on carrier + * board) + */ +&i2c3 { + status = "okay"; +}; + +&ipu1_di1_disp1 { + remote-endpoint = <&lcd_display_in>; +}; + +&ldb { + status = "okay"; +}; + +&pcie { + /* active-high meaning opposite of regular PERST# active-low polarity */ + reset-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>; + reset-gpio-active-high; + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +&pwm2 { + status = "okay"; +}; + +&pwm3 { + status = "okay"; +}; + +&pwm4 { + status = "okay"; +}; + +®_usb_otg_vbus { + status = "okay"; +}; + +®_usb_host_vbus { + status = "okay"; +}; + +&sata { + status = "okay"; +}; + +&sound_spdif { + status = "okay"; +}; + +&spdif { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart4 { + status = "okay"; +}; + +&uart5 { + status = "okay"; +}; + +&usbh1 { + vbus-supply = <®_usb_host_vbus>; + status = "okay"; +}; + +&usbotg { + vbus-supply = <®_usb_otg_vbus>; + status = "okay"; +}; + +/* MMC1 */ +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1_4bit &pinctrl_mmc_cd>; + cd-gpios = <&gpio4 20 GPIO_ACTIVE_LOW>; + bus-width = <4>; + status = "okay"; +}; + +&iomuxc { + /* + * Mux the Apalis GPIOs + */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_apalis_gpio1 &pinctrl_apalis_gpio2 + &pinctrl_apalis_gpio3 &pinctrl_apalis_gpio4 + &pinctrl_apalis_gpio5 &pinctrl_apalis_gpio6 + &pinctrl_apalis_gpio7 &pinctrl_apalis_gpio8 + >; + + pinctrl_leds_ixora: ledsixoragrp { + fsl,pins = < + MX6QDL_PAD_SD2_DAT1__GPIO1_IO14 0x1b0b0 + MX6QDL_PAD_SD2_DAT3__GPIO1_IO12 0x1b0b0 + MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0 + MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts index 88cc7f51a4e9..60d33e99de76 100644 --- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts +++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts @@ -1,5 +1,5 @@ /* - * Copyright 2014-2016 Toradex AG + * Copyright 2014-2017 Toradex AG * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * @@ -55,13 +55,9 @@ / { "fsl,imx6q"; aliases { - i2c0 = &i2cddc; - i2c1 = &i2c1; + i2c0 = &i2c1; + i2c1 = &i2c3; i2c2 = &i2c2; - i2c3 = &i2c3; - }; - - aliases { rtc0 = &rtc_i2c; rtc1 = &snvs_rtc; }; @@ -164,15 +160,10 @@ &can2 { }; &hdmi { - ddc-i2c-bus = <&i2cddc>; status = "okay"; }; -&i2cddc { - status = "okay"; -}; - -/* GEN1_I2C: I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */ +/* I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */ &i2c1 { status = "okay"; @@ -188,6 +179,14 @@ rtc_i2c: rtc@68 { }; }; +/* + * I2C3_SDA/SCL (CAM) on MXM3 pin 201/203 (e.g. camera sensor on carrier + * board) + */ +&i2c3 { + status = "okay"; +}; + &ipu1_di1_disp1 { remote-endpoint = <&lcd_display_in>; }; @@ -268,16 +267,13 @@ &usbotg { /* SD1 */ &usdhc2 { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sd_cd>; + pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_sd_cd>; cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; status = "okay"; }; &iomuxc { - /* - * Mux the Apalis GPIOs - * GPIO5, 6 used by optional fusion_F0710A kernel module - */ + /* Mux the Apalis GPIOs */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_apalis_gpio1 &pinctrl_apalis_gpio2 &pinctrl_apalis_gpio3 &pinctrl_apalis_gpio4 diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts index 2c1e98e0cf7b..46bdc6722715 100644 --- a/arch/arm/boot/dts/imx6q-b850v3.dts +++ b/arch/arm/boot/dts/imx6q-b850v3.dts @@ -57,7 +57,7 @@ &clks { assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>, <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, - <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>; + <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index c90b26f00e24..1015e55ca8f7 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -111,6 +111,11 @@ m25_eeprom: m25p80@0 { }; &i2c1 { + pinctrl-names = "default", "gpio"; + pinctrl-1 = <&pinctrl_i2c1_gpio>; + sda-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH>; + scl-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; + pca9547: mux@70 { compatible = "nxp,pca9547"; reg = <0x70>; @@ -261,6 +266,43 @@ mux1_i2c8: i2c@7 { }; }; +&i2c2 { + pinctrl-names = "default", "gpio"; + pinctrl-1 = <&pinctrl_i2c2_gpio>; + sda-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>; + scl-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>; +}; + +&i2c3 { + pinctrl-names = "default", "gpio"; + pinctrl-1 = <&pinctrl_i2c3_gpio>; + sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; + scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; +}; + +&iomuxc { + pinctrl_i2c1_gpio: i2c1gpiogrp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x1b0b0 + MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x1b0b0 + >; + }; + + pinctrl_i2c2_gpio: i2c2gpiogrp { + fsl,pins = < + MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x1b0b0 + MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x1b0b0 + >; + }; + + pinctrl_i2c3_gpio: i2c3gpiogrp { + fsl,pins = < + MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x1b0b0 + MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b0 + >; + }; +}; + &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; diff --git a/arch/arm/boot/dts/imx6q-gw52xx.dts b/arch/arm/boot/dts/imx6q-gw52xx.dts index a12c47e5ee05..0b8ae007ad73 100644 --- a/arch/arm/boot/dts/imx6q-gw52xx.dts +++ b/arch/arm/boot/dts/imx6q-gw52xx.dts @@ -18,6 +18,64 @@ / { compatible = "gw,imx6q-gw52xx", "gw,ventana", "fsl,imx6q"; }; +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu2_csi1_mux: endpoint { + remote-endpoint = <&ipu2_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu2_csi1_from_ipu2_csi1_mux { + bus-width = <8>; +}; + +&ipu2_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu2_csi1_mux>; + bus-width = <8>; +}; + +&ipu2_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu2_csi1>; +}; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu2_csi1: ipu2_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU2_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU2_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU2_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU2_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU2_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU2_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU2_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU2_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU2_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU2_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; + &sata { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-gw53xx.dts b/arch/arm/boot/dts/imx6q-gw53xx.dts index d76aaa83dad0..a56ef77eff3f 100644 --- a/arch/arm/boot/dts/imx6q-gw53xx.dts +++ b/arch/arm/boot/dts/imx6q-gw53xx.dts @@ -18,6 +18,64 @@ / { compatible = "gw,imx6q-gw53xx", "gw,ventana", "fsl,imx6q"; }; +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu2_csi1_mux: endpoint { + remote-endpoint = <&ipu2_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu2_csi1_from_ipu2_csi1_mux { + bus-width = <8>; +}; + +&ipu2_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu2_csi1_mux>; + bus-width = <8>; +}; + +&ipu2_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu2_csi1>; +}; + &sata { status = "okay"; }; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu2_csi1: ipu2_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU2_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU2_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU2_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU2_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU2_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU2_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU2_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU2_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU2_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU2_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6q-gw54xx.dts b/arch/arm/boot/dts/imx6q-gw54xx.dts index 6e8f53e92a2d..56e5b5050fcf 100644 --- a/arch/arm/boot/dts/imx6q-gw54xx.dts +++ b/arch/arm/boot/dts/imx6q-gw54xx.dts @@ -18,6 +18,64 @@ / { compatible = "gw,imx6q-gw54xx", "gw,ventana", "fsl,imx6q"; }; +&i2c3 { + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu2_csi1_mux: endpoint { + remote-endpoint = <&ipu2_csi1_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu2_csi1_from_ipu2_csi1_mux { + bus-width = <8>; +}; + +&ipu2_csi1_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu2_csi1_mux>; + bus-width = <8>; +}; + +&ipu2_csi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu2_csi1>; +}; + &sata { status = "okay"; }; + +&iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x0001b0b0 + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x4001b0b0 + >; + }; + + pinctrl_ipu2_csi1: ipu2_csi1grp { + fsl,pins = < + MX6QDL_PAD_EIM_EB2__IPU2_CSI1_DATA19 0x1b0b0 + MX6QDL_PAD_EIM_D16__IPU2_CSI1_DATA18 0x1b0b0 + MX6QDL_PAD_EIM_D18__IPU2_CSI1_DATA17 0x1b0b0 + MX6QDL_PAD_EIM_D19__IPU2_CSI1_DATA16 0x1b0b0 + MX6QDL_PAD_EIM_D20__IPU2_CSI1_DATA15 0x1b0b0 + MX6QDL_PAD_EIM_D26__IPU2_CSI1_DATA14 0x1b0b0 + MX6QDL_PAD_EIM_D27__IPU2_CSI1_DATA13 0x1b0b0 + MX6QDL_PAD_EIM_A17__IPU2_CSI1_DATA12 0x1b0b0 + MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x1b0b0 + MX6QDL_PAD_EIM_EB3__IPU2_CSI1_HSYNC 0x1b0b0 + MX6QDL_PAD_EIM_A16__IPU2_CSI1_PIXCLK 0x1b0b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi index ba01dd76d887..ea339fa58f4a 100644 --- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi @@ -1,5 +1,5 @@ /* - * Copyright 2014-2016 Toradex AG + * Copyright 2014-2017 Toradex AG * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * @@ -56,18 +56,6 @@ backlight: backlight { status = "disabled"; }; - /* DDC_I2C: I2C2_SDA/SCL on MXM3 205/207 */ - i2cddc: i2c@0 { - compatible = "i2c-gpio"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c_ddc>; - gpios = <&gpio3 16 GPIO_ACTIVE_HIGH /* sda */ - &gpio2 30 GPIO_ACTIVE_HIGH /* scl */ - >; - i2c-gpio,delay-us = <2>; /* ~100 kHz */ - status = "disabled"; - }; - reg_1p8v: regulator-1p8v { compatible = "regulator-fixed"; regulator-name = "1P8V"; @@ -210,10 +198,13 @@ ethphy: ethernet-phy@7 { }; }; -/* - * GEN1_I2C: I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier - * board) - */ +&hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hdmi_ddc>; + status = "disabled"; +}; + +/* I2C1_SDA/SCL on MXM3 209/211 (e.g. RTC on carrier board) */ &i2c1 { clock-frequency = <100000>; pinctrl-names = "default"; @@ -374,7 +365,8 @@ stmpe_touchscreen { }; /* - * GEN2_I2C, CAM: I2C3_SDA/SCL on MXM3 201/203 (unused) + * I2C3_SDA/SCL (CAM) on MXM3 pin 201/203 (e.g. camera sensor on carrier + * board) */ &i2c3 { clock-frequency = <100000>; @@ -460,7 +452,7 @@ &usbotg { /* MMC1 */ &usdhc1 { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-0 = <&pinctrl_usdhc1_4bit &pinctrl_usdhc1_8bit>; vqmmc-supply = <®_3p3v>; bus-width = <8>; voltage-ranges = <3300 3300>; @@ -640,11 +632,10 @@ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0 >; }; - pinctrl_i2c_ddc: gpioi2cddcgrp { + pinctrl_hdmi_ddc: hdmiddcgrp { fsl,pins = < - /* DDC bitbang */ - MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b0 - MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x1b0b0 + MX6QDL_PAD_EIM_EB2__HDMI_TX_DDC_SCL 0x4001b8b1 + MX6QDL_PAD_EIM_D16__HDMI_TX_DDC_SDA 0x4001b8b1 >; }; @@ -912,7 +903,7 @@ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; - pinctrl_usdhc1: usdhc1grp { + pinctrl_usdhc1_4bit: usdhc1grp_4bit { fsl,pins = < MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17071 MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10071 @@ -920,6 +911,11 @@ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17071 MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17071 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17071 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17071 + >; + }; + + pinctrl_usdhc1_8bit: usdhc1grp_8bit { + fsl,pins = < MX6QDL_PAD_NANDF_D0__SD1_DATA4 0x17071 MX6QDL_PAD_NANDF_D1__SD1_DATA5 0x17071 MX6QDL_PAD_NANDF_D2__SD1_DATA6 0x17071 diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi index e8c1edc82e6e..885556260bd0 100644 --- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi @@ -231,6 +231,37 @@ &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3>; status = "okay"; + + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio5>; + interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu1_csi0_mux: endpoint { + remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu1_csi0_from_ipu1_csi0_mux { + bus-width = <8>; +}; + +&ipu1_csi0_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu1_csi0_mux>; + bus-width = <8>; +}; + +&ipu1_csi0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_csi0>; }; &pcie { @@ -302,6 +333,13 @@ &wdog1 { &iomuxc { imx6qdl-gw51xx { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT5__GPIO5_IO23 0x0001b0b0 + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x4001b0b0 + >; + }; + pinctrl_enet: enetgrp { fsl,pins = < MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030 @@ -372,6 +410,22 @@ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1 >; }; + pinctrl_ipu1_csi0: ipu1csi0grp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0 + MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0 + MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0 + MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0 + MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0 + MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0 + MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0 + MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0 + MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x1b0b0 + MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x1b0b0 + MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0 + >; + }; + pinctrl_pcie: pciegrp { fsl,pins = < MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index 91991d63a69c..115d706228ef 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -377,7 +377,6 @@ &ssi1 { &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; - uart-has-rtscts; rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 5bc6ed1a5b35..24be7965056c 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -368,7 +368,6 @@ &ssi1 { &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; - uart-has-rtscts; rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 66fcf838e964..4594b2279169 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -416,7 +416,6 @@ &ssi2 { &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; - uart-has-rtscts; rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi index 57374dddf98d..1a0faa1a14c8 100644 --- a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi @@ -261,6 +261,37 @@ &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3>; status = "okay"; + + adv7180: camera@20 { + compatible = "adi,adv7180"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adv7180>; + reg = <0x20>; + powerdown-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio5>; + interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + + port { + adv7180_to_ipu1_csi0_mux: endpoint { + remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>; + bus-width = <8>; + }; + }; + }; +}; + +&ipu1_csi0_from_ipu1_csi0_mux { + bus-width = <8>; +}; + +&ipu1_csi0_mux_from_parallel_sensor { + remote-endpoint = <&adv7180_to_ipu1_csi0_mux>; + bus-width = <8>; +}; + +&ipu1_csi0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1_csi0>; }; &pcie { @@ -340,6 +371,13 @@ &wdog1 { }; &iomuxc { + pinctrl_adv7180: adv7180grp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT5__GPIO5_IO23 0x0001b0b0 + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x4001b0b0 + >; + }; + pinctrl_gpmi_nand: gpminandgrp { fsl,pins = < MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1 @@ -387,6 +425,22 @@ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1 >; }; + pinctrl_ipu1_csi0: ipu1csi0grp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0 + MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0 + MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0 + MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0 + MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0 + MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0 + MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0 + MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0 + MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x1b0b0 + MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x1b0b0 + MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0 + >; + }; + pinctrl_gpio_leds: gpioledsgrp { fsl,pins = < MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x1b0b0 diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi index 5fab5be414fe..7ca291e9dbdb 100644 --- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi @@ -184,7 +184,6 @@ &pcie { }; &ssi1 { - fsl,mode = "i2s-slave"; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index f22e5879340b..d309a4d0eb08 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -108,6 +108,18 @@ reg_wlan_vmmc: regulator@4 { startup-delay-us = <70000>; enable-active-high; }; + + reg_usb_h1_vbus: regulator@5 { + compatible = "regulator-fixed"; + reg = <5>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1>; + regulator-name = "usb_h1_vbus"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; }; gpio-keys { @@ -515,6 +527,12 @@ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1 >; }; + pinctrl_usbh1: usbh1grp { + fsl,pins = < + MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x030b0 + >; + }; + pinctrl_usbotg: usbotggrp { fsl,pins = < MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 @@ -629,6 +647,7 @@ &uart2 { }; &usbh1 { + vbus-supply = <®_usb_h1_vbus>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index afe7449c47da..756c5054f047 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -123,6 +123,18 @@ reg_2p8v: regulator@6 { regulator-max-microvolt = <2800000>; regulator-always-on; }; + + reg_usb_h1_vbus: regulator@7 { + compatible = "regulator-fixed"; + reg = <7>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1>; + regulator-name = "usb_h1_vbus"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; }; mipi_xclk: mipi_xclk { @@ -610,6 +622,12 @@ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1 >; }; + pinctrl_usbh1: usbh1grp { + fsl,pins = < + MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x030b0 + >; + }; + pinctrl_usbotg: usbotggrp { fsl,pins = < MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 @@ -705,6 +723,7 @@ &uart2 { }; &usbh1 { + vbus-supply = <®_usb_h1_vbus>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi index 5d94b5ee6aa0..eeb7679fd348 100644 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi @@ -59,6 +59,14 @@ mdio1: mdio { pinctrl-0 = <&pinctrl_mdio1>; gpios = <&gpio6 5 GPIO_ACTIVE_HIGH &gpio6 4 GPIO_ACTIVE_HIGH>; + + phy: ethernet-phy@0 { + pinctrl-0 = <&pinctrl_rmii_phy_irq>; + pinctrl-names = "default"; + reg = <0>; + interrupt-parent = <&gpio3>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + }; }; reg_28p0v: regulator-28p0v { @@ -615,14 +623,106 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rmii"; + phy-handle = <&phy>; phy-reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; phy-reset-duration = <100>; phy-supply = <®_3p3v>; status = "okay"; - fixed-link { - speed = <100>; - full-duplex; + mdio { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + switch: switch@0 { + compatible = "marvell,mv88e6085"; + pinctrl-0 = <&pinctrl_switch_irq>; + pinctrl-names = "default"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + dsa,member = <0 0>; + eeprom-length = <512>; + interrupt-parent = <&gpio6>; + interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupt-controller; + #interrupt-cells = <2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "gigabit_proc"; + phy-handle = <&switchphy0>; + }; + + port@1 { + reg = <1>; + label = "netaux"; + phy-handle = <&switchphy1>; + }; + + port@2 { + reg = <2>; + label = "cpu"; + ethernet = <&fec>; + + fixed-link { + speed = <100>; + full-duplex; + }; + }; + + port@3 { + reg = <3>; + label = "netright"; + phy-handle = <&switchphy3>; + }; + + port@4 { + reg = <4>; + label = "netleft"; + phy-handle = <&switchphy4>; + }; + }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switchphy0: switchphy@0 { + reg = <0>; + interrupt-parent = <&switch>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + }; + + switchphy1: switchphy@1 { + reg = <1>; + interrupt-parent = <&switch>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + }; + + switchphy2: switchphy@2 { + reg = <2>; + interrupt-parent = <&switch>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + }; + + switchphy3: switchphy@3 { + reg = <3>; + interrupt-parent = <&switch>; + interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; + }; + + switchphy4: switchphy@4 { + reg = <4>; + interrupt-parent = <&switch>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + }; }; }; @@ -840,6 +940,12 @@ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x40010000 >; }; + pinctrl_switch_irq: switchgrp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03 0x4001b000 + >; + }; + pinctrl_tc358767: tc358767grp { fsl,pins = < MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x10 diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index a9723b94bafa..8884b4a3cafb 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -769,6 +769,7 @@ snvs_poweroff: snvs-poweroff { compatible = "syscon-poweroff"; regmap = <&snvs>; offset = <0x38>; + value = <0x60>; mask = <0x60>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 3243af4a9984..3f76f980947e 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -655,6 +655,7 @@ snvs_poweroff: snvs-poweroff { compatible = "syscon-poweroff"; regmap = <&snvs>; offset = <0x38>; + value = <0x60>; mask = <0x60>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index f16b9df9d0c6..6c7eb54be9e2 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -710,6 +710,7 @@ snvs_poweroff: snvs-poweroff { compatible = "syscon-poweroff"; regmap = <&snvs>; offset = <0x38>; + value = <0x60>; mask = <0x60>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts index d2be8aa3370b..9c23e017d86a 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts @@ -22,7 +22,7 @@ memory { reg = <0x80000000 0x20000000>; }; - backlight { + backlight_display: backlight-display { compatible = "pwm-backlight"; pwms = <&pwm1 0 5000000>; brightness-levels = <0 4 8 16 32 64 128 255>; @@ -78,6 +78,17 @@ dailink_master: simple-audio-card,codec { clocks = <&clks IMX6UL_CLK_SAI2>; }; }; + + panel { + compatible = "innolux,at043tn24"; + backlight = <&backlight_display>; + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; + }; }; &clks { @@ -139,31 +150,11 @@ &lcdif { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lcdif_dat &pinctrl_lcdif_ctrl>; - display = <&display0>; status = "okay"; - display0: display { - bits-per-pixel = <16>; - bus-width = <24>; - - display-timings { - native-mode = <&timing0>; - - timing0: timing0 { - clock-frequency = <9200000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <8>; - hback-porch = <4>; - hsync-len = <41>; - vback-porch = <2>; - vfront-porch = <4>; - vsync-len = <10>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; - }; + port { + display_out: endpoint { + remote-endpoint = <&panel_in>; }; }; }; @@ -316,7 +307,6 @@ MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0 MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0 MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0 MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b031 - MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x17059 >; }; diff --git a/arch/arm/boot/dts/imx6ul-geam.dtsi b/arch/arm/boot/dts/imx6ul-geam.dts similarity index 87% rename from arch/arm/boot/dts/imx6ul-geam.dtsi rename to arch/arm/boot/dts/imx6ul-geam.dts index eb94d956808b..571eea7f1c6b 100644 --- a/arch/arm/boot/dts/imx6ul-geam.dtsi +++ b/arch/arm/boot/dts/imx6ul-geam.dts @@ -40,11 +40,16 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +/dts-v1/; + #include #include #include "imx6ul.dtsi" / { + model = "Engicam GEAM6UL Starter Kit"; + compatible = "engicam,imx6ul-geam", "fsl,imx6ul"; + memory { reg = <0x80000000 0x08000000>; }; @@ -87,18 +92,46 @@ reg_3p3v: regulator-3p3v { regulator-always-on; regulator-boot-on; }; + + sound { + compatible = "simple-audio-card"; + simple-audio-card,name = "imx6ul-geam-sgtl5000"; + simple-audio-card,format = "i2s"; + simple-audio-card,bitclock-master = <&dailink_master>; + simple-audio-card,frame-master = <&dailink_master>; + simple-audio-card,widgets = + "Microphone", "Mic Jack", + "Line", "Line In", + "Line", "Line Out", + "Headphone", "Headphone Jack"; + simple-audio-card,routing = + "MIC_IN", "Mic Jack", + "Mic Jack", "Mic Bias", + "Headphone Jack", "HP_OUT"; + + simple-audio-card,cpu { + sound-dai = <&sai2>; + }; + + dailink_master: simple-audio-card,codec { + sound-dai = <&sgtl5000>; + clocks = <&clks IMX6UL_CLK_SAI2>; + }; + }; }; &can1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flexcan1>; xceiver-supply = <®_3p3v>; + status = "okay"; }; &can2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flexcan2>; xceiver-supply = <®_3p3v>; + status = "okay"; }; &fec1 { @@ -144,6 +177,16 @@ &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; + + sgtl5000: codec@a { + compatible = "fsl,sgtl5000"; + reg = <0x0a>; + clocks = <&clks IMX6UL_CLK_OSC>; + clock-names = "mclk"; + VDDA-supply = <®_3p3v>; + VDDIO-supply = <®_3p3v>; + VDDD-supply = <®_1p8v>; + }; }; &i2c2 { @@ -158,6 +201,31 @@ &lcdif { pinctrl-0 = <&pinctrl_lcdif_dat &pinctrl_lcdif_ctrl>; display = <&display0>; + status = "okay"; + + display0: display { + bits-per-pixel = <16>; + bus-width = <18>; + + display-timings { + native-mode = <&timing0>; + timing0: timing0 { + clock-frequency = <28000000>; + hactive = <800>; + vactive = <480>; + hfront-porch = <30>; + hback-porch = <30>; + hsync-len = <64>; + vback-porch = <5>; + vfront-porch = <5>; + vsync-len = <20>; + hsync-active = <0>; + vsync-active = <0>; + de-active = <1>; + pixelclk-active = <0>; + }; + }; + }; }; &pwm8 { @@ -178,6 +246,12 @@ &sai2 { status = "okay"; }; +&tsc { + measure-delay-time = <0x1ffff>; + pre-charge-time = <0x1fff>; + status = "okay"; +}; + &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; diff --git a/arch/arm/boot/dts/imx6ul-isiot-common.dtsi b/arch/arm/boot/dts/imx6ul-isiot-common.dtsi deleted file mode 100644 index 2beaab6e272e..000000000000 --- a/arch/arm/boot/dts/imx6ul-isiot-common.dtsi +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2016 Amarula Solutions B.V. - * Copyright (C) 2016 Engicam S.r.l. - * - * This file is dual-licensed: you can use it either under the terms - * of the GPL or the X11 license, at your option. Note that this dual - * licensing only applies to this file, and not this project as a - * whole. - * - * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This file is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Or, alternatively, - * - * b) Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -&i2c1 { - stmpe811: gpio-expander@44 { - compatible = "st,stmpe811"; - reg = <0x44>; - #address-cells = <1>; - #size-cells = <0>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_stmpe>; - interrupt-parent = <&gpio1>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; - interrupt-controller; - #interrupt-cells = <2>; - - stmpe: touchscreen { - compatible = "st,stmpe-ts"; - st,sample-time = <4>; - st,mod-12b = <1>; - st,ref-sel = <0>; - st,adc-freq = <1>; - st,ave-ctrl = <1>; - st,touch-det-delay = <2>; - st,settling = <2>; - st,fraction-z = <7>; - st,i-drive = <1>; - }; - }; -}; - -&lcdif { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_lcdif_dat - &pinctrl_lcdif_ctrl>; - display = <&display0>; - status = "okay"; - - display0: display { - bits-per-pixel = <16>; - bus-width = <18>; - - display-timings { - native-mode = <&timing0>; - timing0: timing0 { - clock-frequency = <28000000>; - hactive = <800>; - vactive = <480>; - hfront-porch = <30>; - hback-porch = <30>; - hsync-len = <64>; - vback-porch = <5>; - vfront-porch = <5>; - vsync-len = <20>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; - }; - }; - }; -}; - -&iomuxc { - pinctrl_lcdif_ctrl: lcdifctrlgrp { - fsl,pins = < - MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79 - MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79 - MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79 - MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79 - >; - }; - - pinctrl_lcdif_dat: lcdifdatgrp { - fsl,pins = < - MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79 - MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79 - MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79 - MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79 - MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79 - MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79 - MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79 - MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79 - MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79 - MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79 - MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79 - MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79 - MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79 - MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79 - MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79 - MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79 - MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79 - MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79 - >; - }; - - pinctrl_stmpe: stmpegrp { - fsl,pins = < - MX6UL_PAD_UART1_CTS_B__GPIO1_IO18 0x1b0b0 - >; - }; -}; diff --git a/arch/arm/boot/dts/imx6ul-isiot-emmc.dts b/arch/arm/boot/dts/imx6ul-isiot-emmc.dts index 73a1d0f0b9d5..f5b422898e61 100644 --- a/arch/arm/boot/dts/imx6ul-isiot-emmc.dts +++ b/arch/arm/boot/dts/imx6ul-isiot-emmc.dts @@ -43,7 +43,6 @@ /dts-v1/; #include "imx6ul-isiot.dtsi" -#include "imx6ul-isiot-common.dtsi" / { model = "Engicam Is.IoT MX6UL eMMC Starter kit"; diff --git a/arch/arm/boot/dts/imx6ul-isiot-nand.dts b/arch/arm/boot/dts/imx6ul-isiot-nand.dts index da29a86eb6a8..de15e1c75dd1 100644 --- a/arch/arm/boot/dts/imx6ul-isiot-nand.dts +++ b/arch/arm/boot/dts/imx6ul-isiot-nand.dts @@ -43,7 +43,6 @@ /dts-v1/; #include "imx6ul-isiot.dtsi" -#include "imx6ul-isiot-common.dtsi" / { model = "Engicam Is.IoT MX6UL NAND Starter kit"; diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi index ea30380ad7a4..950fb28b630a 100644 --- a/arch/arm/boot/dts/imx6ul-isiot.dtsi +++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi @@ -69,6 +69,68 @@ backlight { 100>; default-brightness-level = <100>; }; + + reg_1p8v: regulator-1p8v { + compatible = "regulator-fixed"; + regulator-name = "1P8V"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + + sound { + compatible = "simple-audio-card"; + simple-audio-card,name = "imx6ul-isiot-sgtl5000"; + simple-audio-card,format = "i2s"; + simple-audio-card,bitclock-master = <&dailink_master>; + simple-audio-card,frame-master = <&dailink_master>; + simple-audio-card,widgets = + "Microphone", "Mic Jack", + "Line", "Line In", + "Line", "Line Out", + "Headphone", "Headphone Jack"; + simple-audio-card,routing = + "MIC_IN", "Mic Jack", + "Mic Jack", "Mic Bias", + "Headphone Jack", "HP_OUT"; + + simple-audio-card,cpu { + sound-dai = <&sai2>; + }; + + dailink_master: simple-audio-card,codec { + sound-dai = <&sgtl5000>; + clocks = <&clks IMX6UL_CLK_SAI2>; + }; + }; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet1>; + phy-mode = "rmii"; + phy-handle = <ðphy0>; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + }; }; &i2c1 { @@ -76,6 +138,42 @@ &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; + + sgtl5000: codec@a { + compatible = "fsl,sgtl5000"; + reg = <0x0a>; + clocks = <&clks IMX6UL_CLK_OSC>; + clock-names = "mclk"; + VDDA-supply = <®_3p3v>; + VDDIO-supply = <®_3p3v>; + VDDD-supply = <®_1p8v>; + }; + + stmpe811: gpio-expander@44 { + compatible = "st,stmpe811"; + reg = <0x44>; + #address-cells = <1>; + #size-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_stmpe>; + interrupt-parent = <&gpio1>; + interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupt-controller; + #interrupt-cells = <2>; + + stmpe: touchscreen { + compatible = "st,stmpe-ts"; + st,sample-time = <4>; + st,mod-12b = <1>; + st,ref-sel = <0>; + st,adc-freq = <1>; + st,ave-ctrl = <1>; + st,touch-det-delay = <2>; + st,settling = <2>; + st,fraction-z = <7>; + st,i-drive = <1>; + }; + }; }; &i2c2 { @@ -85,6 +183,38 @@ &i2c2 { status = "okay"; }; +&lcdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lcdif_dat + &pinctrl_lcdif_ctrl>; + display = <&display0>; + status = "okay"; + + display0: display { + bits-per-pixel = <16>; + bus-width = <18>; + + display-timings { + native-mode = <&timing0>; + timing0: timing0 { + clock-frequency = <28000000>; + hactive = <800>; + vactive = <480>; + hfront-porch = <30>; + hback-porch = <30>; + hsync-len = <64>; + vback-porch = <5>; + vfront-porch = <5>; + vsync-len = <20>; + hsync-active = <0>; + vsync-active = <0>; + de-active = <1>; + pixelclk-active = <0>; + }; + }; + }; +}; + &pwm8 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pwm8>; @@ -115,6 +245,21 @@ &usdhc1 { }; &iomuxc { + pinctrl_enet1: enet1grp { + fsl,pins = < + MX6UL_PAD_ENET2_RX_DATA0__ENET1_MDIO 0x1b0b0 + MX6UL_PAD_ENET2_RX_DATA1__ENET1_MDC 0x1b0b0 + MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0 + MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0 + MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0 + MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0 + MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0 + MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0 + MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031 + MX6UL_PAD_ENET2_RX_EN__GPIO2_IO10 0x1b0b0 + >; + }; + pinctrl_i2c1: i2c1grp { fsl,pins = < MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0 @@ -129,6 +274,38 @@ MX6UL_PAD_GPIO1_IO01__I2C2_SDA 0x4001b8b0 >; }; + pinctrl_lcdif_ctrl: lcdifctrlgrp { + fsl,pins = < + MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79 + MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79 + MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79 + MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79 + >; + }; + + pinctrl_lcdif_dat: lcdifdatgrp { + fsl,pins = < + MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79 + MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79 + MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79 + MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79 + MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79 + MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79 + MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79 + MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79 + MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79 + MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79 + MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79 + MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79 + MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79 + MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79 + MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79 + MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79 + MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79 + MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79 + >; + }; + pinctrl_pwm8: pwm8grp { fsl,pins = < MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0 @@ -145,6 +322,12 @@ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0 >; }; + pinctrl_stmpe: stmpegrp { + fsl,pins = < + MX6UL_PAD_UART1_CTS_B__GPIO1_IO18 0x1b0b0 + >; + }; + pinctrl_uart1: uart1grp { fsl,pins = < MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1 diff --git a/arch/arm/boot/dts/imx6ul-liteboard.dts b/arch/arm/boot/dts/imx6ul-liteboard.dts index ed1d891d6a89..1d863a16bcf0 100644 --- a/arch/arm/boot/dts/imx6ul-liteboard.dts +++ b/arch/arm/boot/dts/imx6ul-liteboard.dts @@ -124,6 +124,10 @@ ethphy0: ethernet-phy@0 { }; }; +&snvs_poweroff { + status = "okay"; +}; + &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 6da2b77edd46..f11a241a340d 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -614,6 +614,7 @@ snvs_poweroff: snvs-poweroff { compatible = "syscon-poweroff"; regmap = <&snvs>; offset = <0x38>; + value = <0x60>; mask = <0x60>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index d7753f79937a..0a3915868aa3 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -106,6 +106,15 @@ &fec1 { fsl,magic-packet; }; +&gpmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpmi_nand>; + fsl,use-minimum-ecc; + nand-on-flash-bbt; + nand-ecc-mode = "hw"; + status = "okay"; +}; + &i2c1 { clock-frequency = <100000>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 0a24d1bf3c39..44637cabcc56 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -117,6 +117,37 @@ reg_brcm: regulator-brcm { regulator-max-microvolt = <3300000>; startup-delay-us = <200000>; }; + + reg_lcd_3v3: regulator-lcd-3v3 { + compatible = "regulator-fixed"; + regulator-name = "lcd-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&extended_io 7 GPIO_ACTIVE_LOW>; + }; + + reg_can2_3v3: regulator-can2-3v3 { + compatible = "regulator-fixed"; + regulator-name = "can2-3v3"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan2_reg>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 14 GPIO_ACTIVE_LOW>; + }; + + panel { + compatible = "innolux,at043tn24"; + pinctrl-0 = <&pinctrl_backlight>; + enable-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>; + power-supply = <®_lcd_3v3>; + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; + }; }; &adc1 { @@ -168,6 +199,7 @@ &fec1 { phy-mode = "rgmii"; phy-handle = <ðphy0>; fsl,magic-packet; + phy-reset-gpios = <&extended_io 5 GPIO_ACTIVE_LOW>; status = "okay"; mdio { @@ -197,6 +229,13 @@ &fec2 { status = "okay"; }; +&flexcan2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan2>; + xceiver-supply = <®_can2_3v3>; + status = "okay"; +}; + &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; @@ -285,8 +324,8 @@ vgen5_reg: vldo3 { }; vgen6_reg: vldo4 { - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; regulator-always-on; }; }; @@ -322,31 +361,11 @@ codec: wm8960@1a { &lcdif { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lcdif>; - display = <&display0>; status = "okay"; - display0: display { - bits-per-pixel = <16>; - bus-width = <24>; - - display-timings { - native-mode = <&timing0>; - - timing0: timing0 { - clock-frequency = <9200000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <8>; - hback-porch = <4>; - hsync-len = <41>; - vback-porch = <2>; - vfront-porch = <4>; - vsync-len = <10>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; - }; + port { + display_out: endpoint { + remote-endpoint = <&panel_in>; }; }; }; @@ -356,12 +375,6 @@ &pcie { status = "okay"; }; -&pwm1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pwm1>; - status = "okay"; -}; - &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; @@ -488,6 +501,20 @@ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 >; }; + pinctrl_flexcan2: flexcan2grp { + fsl,pins = < + MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59 + MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59 + >; + }; + + pinctrl_flexcan2_reg: flexcan2reggrp { + fsl,pins = < + MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x59 /* CAN_STBY */ + >; + }; + + pinctrl_hog: hoggrp { fsl,pins = < MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 @@ -701,9 +728,9 @@ MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 >; }; - pinctrl_pwm1: pwm1grp { + pinctrl_backlight: backlightgrp { fsl,pins = < - MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0 + MX7D_PAD_LPSR_GPIO1_IO01__GPIO1_IO1 0x110b0 >; }; }; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 4cf6c458b583..82ad26e766eb 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -540,6 +540,7 @@ snvs_poweroff: snvs-poweroff { compatible = "syscon-poweroff"; regmap = <&snvs>; offset = <0x38>; + value = <0x60>; mask = <0x60>; }; @@ -1021,5 +1022,36 @@ fec1: ethernet@30be0000 { status = "disabled"; }; }; + + dma_apbh: dma-apbh@33000000 { + compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh"; + reg = <0x33000000 0x2000>; + interrupts = , + , + , + ; + interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3"; + #dma-cells = <1>; + dma-channels = <4>; + clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>; + }; + + gpmi: gpmi-nand@33002000{ + compatible = "fsl,imx7d-gpmi-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x33002000 0x2000>, <0x33004000 0x4000>; + reg-names = "gpmi-nand", "bch"; + interrupts = ; + interrupt-names = "bch"; + clocks = <&clks IMX7D_NAND_RAWNAND_CLK>, + <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>; + clock-names = "gpmi_io", "gpmi_bch_apb"; + dmas = <&dma_apbh 0>; + dma-names = "rx-tx"; + status = "disabled"; + assigned-clocks = <&clks IMX7D_NAND_ROOT_SRC>; + assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_500M_CLK>; + }; }; }; diff --git a/arch/arm/boot/dts/keystone-k2e-evm.dts b/arch/arm/boot/dts/keystone-k2e-evm.dts index ae1ebe7ee021..f1f32c54e72f 100644 --- a/arch/arm/boot/dts/keystone-k2e-evm.dts +++ b/arch/arm/boot/dts/keystone-k2e-evm.dts @@ -16,6 +16,19 @@ / { compatible = "ti,k2e-evm", "ti,k2e", "ti,keystone"; model = "Texas Instruments Keystone 2 Edison EVM"; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + status = "okay"; + }; + }; + soc { clocks { @@ -160,3 +173,8 @@ ethphy1: ethernet-phy@1 { reg = <1>; }; }; + +&dsp0 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi index 0dd4cdd6d40c..819ab8345916 100644 --- a/arch/arm/boot/dts/keystone-k2e.dtsi +++ b/arch/arm/boot/dts/keystone-k2e.dtsi @@ -45,6 +45,10 @@ cpu@3 { }; }; + aliases { + rproc0 = &dsp0; + }; + soc { /include/ "keystone-k2e-clocks.dtsi" @@ -114,6 +118,22 @@ dspgpio0: keystone_dsp_gpio@02620240 { gpio,syscon-dev = <&devctrl 0x240>; }; + dsp0: dsp@10800000 { + compatible = "ti,k2e-dsp"; + reg = <0x10800000 0x00080000>, + <0x10e00000 0x00008000>, + <0x10f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem0>; + ti,syscon-dev = <&devctrl 0x844>; + resets = <&pscrst 0>; + interrupt-parent = <&kirq0>; + interrupts = <0 8>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio0 27 0>; + status = "disabled"; + }; + pcie1: pcie@21020000 { compatible = "ti,keystone-pcie","snps,dw-pcie"; clocks = <&clkpcie1>; diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts index 61883cb969d2..f462f1043531 100644 --- a/arch/arm/boot/dts/keystone-k2g-evm.dts +++ b/arch/arm/boot/dts/keystone-k2g-evm.dts @@ -25,6 +25,26 @@ memory@800000000 { reg = <0x00000008 0x00000000 0x00000000 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + status = "okay"; + }; + }; + + vcc3v3_dcin_reg: fixedregulator-vcc3v3-dcin { + compatible = "regulator-fixed"; + regulator-name = "mmc0_fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; }; &k2g_pinctrl { @@ -34,6 +54,33 @@ K2G_CORE_IOPAD(0x11cc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* uart0_rxd. K2G_CORE_IOPAD(0x11d0) (BUFFER_CLASS_B | PIN_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */ >; }; + + mmc0_pins: pinmux_mmc0_pins { + pinctrl-single,pins = < + K2G_CORE_IOPAD(0x1300) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_dat3.mmc0_dat3 */ + K2G_CORE_IOPAD(0x1304) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_dat2.mmc0_dat2 */ + K2G_CORE_IOPAD(0x1308) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_dat1.mmc0_dat1 */ + K2G_CORE_IOPAD(0x130c) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_dat0.mmc0_dat0 */ + K2G_CORE_IOPAD(0x1310) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_clk.mmc0_clk */ + K2G_CORE_IOPAD(0x1314) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE2) /* mmc0_cmd.mmc0_cmd */ + K2G_CORE_IOPAD(0x12ec) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE3) /* mmc0_sdcd.gpio1_12 */ + >; + }; + + mmc1_pins: pinmux_mmc1_pins { + pinctrl-single,pins = < + K2G_CORE_IOPAD(0x10ec) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat7.mmc1_dat7 */ + K2G_CORE_IOPAD(0x10f0) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat6.mmc1_dat6 */ + K2G_CORE_IOPAD(0x10f4) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat5.mmc1_dat5 */ + K2G_CORE_IOPAD(0x10f8) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat4.mmc1_dat4 */ + K2G_CORE_IOPAD(0x10fc) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat3.mmc1_dat3 */ + K2G_CORE_IOPAD(0x1100) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat2.mmc1_dat2 */ + K2G_CORE_IOPAD(0x1104) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat1.mmc1_dat1 */ + K2G_CORE_IOPAD(0x1108) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_dat0.mmc1_dat0 */ + K2G_CORE_IOPAD(0x110c) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_clk.mmc1_clk */ + K2G_CORE_IOPAD(0x1110) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* mmc1_cmd.mmc1_cmd */ + >; + }; }; &uart0 { @@ -41,3 +88,27 @@ &uart0 { pinctrl-0 = <&uart0_pins>; status = "okay"; }; + +&gpio1 { + status = "okay"; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <&vcc3v3_dcin_reg>; + cd-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&mmc1 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc1_pins>; + vmmc-supply = <&vcc3v3_dcin_reg>; /* VCC3V3_EMMC is connected to VCC3V3_DCIN */ + status = "okay"; +}; + +&dsp0 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/keystone-k2g-ice.dts b/arch/arm/boot/dts/keystone-k2g-ice.dts index d820d37b5148..78692745e0af 100644 --- a/arch/arm/boot/dts/keystone-k2g-ice.dts +++ b/arch/arm/boot/dts/keystone-k2g-ice.dts @@ -17,6 +17,19 @@ memory@800000000 { device_type = "memory"; reg = <0x00000008 0x00000000 0x00000000 0x20000000>; }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + status = "okay"; + }; + }; }; &k2g_pinctrl { @@ -33,3 +46,8 @@ &uart0 { pinctrl-0 = <&uart0_pins>; status = "okay"; }; + +&dsp0 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi index a789f75a1ed5..826b286665e6 100644 --- a/arch/arm/boot/dts/keystone-k2g.dtsi +++ b/arch/arm/boot/dts/keystone-k2g.dtsi @@ -15,6 +15,7 @@ #include #include +#include / { compatible = "ti,k2g","ti,keystone"; @@ -27,6 +28,7 @@ / { aliases { serial0 = &uart0; + rproc0 = &dsp0; }; cpus { @@ -113,6 +115,24 @@ uart0: serial@02530c00 { status = "disabled"; }; + dcan0: can@0260B200 { + compatible = "ti,am4372-d_can", "ti,am3352-d_can"; + reg = <0x0260B200 0x200>; + interrupts = ; + status = "disabled"; + power-domains = <&k2g_pds 0x0008>; + clocks = <&k2g_clks 0x0008 1>; + }; + + dcan1: can@0260B400 { + compatible = "ti,am4372-d_can", "ti,am3352-d_can"; + reg = <0x0260B400 0x200>; + interrupts = ; + status = "disabled"; + power-domains = <&k2g_pds 0x0009>; + clocks = <&k2g_clks 0x0009 1>; + }; + kirq0: keystone_irq@026202a0 { compatible = "ti,keystone-irq"; interrupts = ; @@ -128,6 +148,22 @@ dspgpio0: keystone_dsp_gpio@02620240 { gpio,syscon-dev = <&devctrl 0x240>; }; + dsp0: dsp@10800000 { + compatible = "ti,k2g-dsp"; + reg = <0x10800000 0x00100000>, + <0x10e00000 0x00008000>, + <0x10f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + power-domains = <&k2g_pds 0x0046>; + ti,syscon-dev = <&devctrl 0x844>; + resets = <&k2g_reset 0x0046 0x1>; + interrupt-parent = <&kirq0>; + interrupts = <0 8>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio0 27 0>; + status = "disabled"; + }; + msgmgr: msgmgr@02a00000 { compatible = "ti,k2g-message-manager"; #mbox-cells = <2>; @@ -139,5 +175,173 @@ msgmgr: msgmgr@02a00000 { interrupts = , ; }; + + pmmc: pmmc@02921c00 { + compatible = "ti,k2g-sci"; + /* + * In case of rare platforms that does not use k2g as + * system master, use /delete-property/ + */ + ti,system-reboot-controller; + mbox-names = "rx", "tx"; + mboxes= <&msgmgr 5 2>, + <&msgmgr 0 0>; + reg-names = "debug_messages"; + reg = <0x02921c00 0x400>; + + k2g_pds: power-controller { + compatible = "ti,sci-pm-domain"; + #power-domain-cells = <1>; + }; + + k2g_clks: clocks { + compatible = "ti,k2g-sci-clk"; + #clock-cells = <2>; + }; + + k2g_reset: reset-controller { + compatible = "ti,sci-reset"; + #reset-cells = <2>; + }; + }; + + gpio0: gpio@2603000 { + compatible = "ti,k2g-gpio", "ti,keystone-gpio"; + reg = <0x02603000 0x100>; + gpio-controller; + #gpio-cells = <2>; + + interrupts = , + , + , + , + , + , + , + , + ; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <144>; + ti,davinci-gpio-unbanked = <0>; + clocks = <&k2g_clks 0x001b 0x0>; + clock-names = "gpio"; + }; + + gpio1: gpio@260a000 { + compatible = "ti,k2g-gpio", "ti,keystone-gpio"; + reg = <0x0260a000 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupts = , + , + , + , + ; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <68>; + ti,davinci-gpio-unbanked = <0>; + clocks = <&k2g_clks 0x001c 0x0>; + clock-names = "gpio"; + }; + + edma0: edma@02700000 { + compatible = "ti,k2g-edma3-tpcc", "ti,edma3-tpcc"; + reg = <0x02700000 0x8000>; + reg-names = "edma3_cc"; + interrupts = , + , + ; + interrupt-names = "edma3_ccint", "emda3_mperr", + "edma3_ccerrint"; + dma-requests = <64>; + #dma-cells = <2>; + + ti,tptcs = <&edma0_tptc0 7>, <&edma0_tptc1 0>; + + ti,edma-memcpy-channels = <32 33 34 35>; + + power-domains = <&k2g_pds 0x3f>; + }; + + edma0_tptc0: tptc@02760000 { + compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x02760000 0x400>; + power-domains = <&k2g_pds 0x3f>; + }; + + edma0_tptc1: tptc@02768000 { + compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x02768000 0x400>; + power-domains = <&k2g_pds 0x3f>; + }; + + edma1: edma@02728000 { + compatible = "ti,k2g-edma3-tpcc", "ti,edma3-tpcc"; + reg = <0x02728000 0x8000>; + reg-names = "edma3_cc"; + interrupts = , + , + ; + interrupt-names = "edma3_ccint", "emda3_mperr", + "edma3_ccerrint"; + dma-requests = <64>; + #dma-cells = <2>; + + ti,tptcs = <&edma1_tptc0 7>, <&edma1_tptc1 0>; + + /* + * memcpy is disabled, can be enabled with: + * ti,edma-memcpy-channels = <12 13 14 15>; + * for example. + */ + + power-domains = <&k2g_pds 0x4f>; + }; + + edma1_tptc0: tptc@027b0000 { + compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x027b0000 0x400>; + power-domains = <&k2g_pds 0x4f>; + }; + + edma1_tptc1: tptc@027b8000 { + compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc"; + reg = <0x027b8000 0x400>; + power-domains = <&k2g_pds 0x4f>; + }; + + mmc0: mmc@23000000 { + compatible = "ti,k2g-hsmmc", "ti,omap4-hsmmc"; + reg = <0x23000000 0x400>; + interrupts = ; + dmas = <&edma1 24 0>, <&edma1 25 0>; + dma-names = "tx", "rx"; + bus-width = <4>; + ti,needs-special-reset; + no-1-8-v; + max-frequency = <96000000>; + power-domains = <&k2g_pds 0xb>; + clocks = <&k2g_clks 0xb 1>, <&k2g_clks 0xb 2>; + clock-names = "fck", "mmchsdb_fck"; + status = "disabled"; + }; + + mmc1: mmc@23100000 { + compatible = "ti,k2g-hsmmc", "ti,omap4-hsmmc"; + reg = <0x23100000 0x400>; + interrupts = ; + dmas = <&edma1 26 0>, <&edma1 27 0>; + dma-names = "tx", "rx"; + bus-width = <8>; + ti,needs-special-reset; + ti,non-removable; + max-frequency = <96000000>; + power-domains = <&k2g_pds 0xc>; + clocks = <&k2g_clks 0xc 1>, <&k2g_clks 0xc 2>; + clock-names = "fck", "mmchsdb_fck"; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/keystone-k2hk-evm.dts b/arch/arm/boot/dts/keystone-k2hk-evm.dts index 2156ff92d08f..6dd13b98aaba 100644 --- a/arch/arm/boot/dts/keystone-k2hk-evm.dts +++ b/arch/arm/boot/dts/keystone-k2hk-evm.dts @@ -16,6 +16,19 @@ / { compatible = "ti,k2hk-evm", "ti,k2hk", "ti,keystone"; model = "Texas Instruments Keystone 2 Kepler/Hawking EVM"; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + status = "okay"; + }; + }; + soc { clocks { refclksys: refclksys { @@ -184,3 +197,43 @@ ethphy1: ethernet-phy@1 { reg = <1>; }; }; + +&dsp0 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp1 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp2 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp3 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp4 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp5 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp6 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp7 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/keystone-k2hk.dtsi b/arch/arm/boot/dts/keystone-k2hk.dtsi index 69d449430511..31dc00e4e5fd 100644 --- a/arch/arm/boot/dts/keystone-k2hk.dtsi +++ b/arch/arm/boot/dts/keystone-k2hk.dtsi @@ -45,6 +45,17 @@ cpu@3 { }; }; + aliases { + rproc0 = &dsp0; + rproc1 = &dsp1; + rproc2 = &dsp2; + rproc3 = &dsp3; + rproc4 = &dsp4; + rproc5 = &dsp5; + rproc6 = &dsp6; + rproc7 = &dsp7; + }; + soc { /include/ "keystone-k2hk-clocks.dtsi" @@ -134,6 +145,134 @@ dspgpio7: keystone_dsp_gpio@262025c { gpio,syscon-dev = <&devctrl 0x25c>; }; + dsp0: dsp@10800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x10800000 0x00100000>, + <0x10e00000 0x00008000>, + <0x10f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem0>; + ti,syscon-dev = <&devctrl 0x40>; + resets = <&pscrst 0>; + interrupt-parent = <&kirq0>; + interrupts = <0 8>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio0 27 0>; + status = "disabled"; + }; + + dsp1: dsp@11800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x11800000 0x00100000>, + <0x11e00000 0x00008000>, + <0x11f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem1>; + ti,syscon-dev = <&devctrl 0x44>; + resets = <&pscrst 1>; + interrupt-parent = <&kirq0>; + interrupts = <1 9>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio1 27 0>; + status = "disabled"; + }; + + dsp2: dsp@12800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x12800000 0x00100000>, + <0x12e00000 0x00008000>, + <0x12f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem2>; + ti,syscon-dev = <&devctrl 0x48>; + resets = <&pscrst 2>; + interrupt-parent = <&kirq0>; + interrupts = <2 10>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio2 27 0>; + status = "disabled"; + }; + + dsp3: dsp@13800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x13800000 0x00100000>, + <0x13e00000 0x00008000>, + <0x13f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem3>; + ti,syscon-dev = <&devctrl 0x4c>; + resets = <&pscrst 3>; + interrupt-parent = <&kirq0>; + interrupts = <3 11>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio3 27 0>; + status = "disabled"; + }; + + dsp4: dsp@14800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x14800000 0x00100000>, + <0x14e00000 0x00008000>, + <0x14f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem4>; + ti,syscon-dev = <&devctrl 0x50>; + resets = <&pscrst 4>; + interrupt-parent = <&kirq0>; + interrupts = <4 12>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio4 27 0>; + status = "disabled"; + }; + + dsp5: dsp@15800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x15800000 0x00100000>, + <0x15e00000 0x00008000>, + <0x15f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem5>; + ti,syscon-dev = <&devctrl 0x54>; + resets = <&pscrst 5>; + interrupt-parent = <&kirq0>; + interrupts = <5 13>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio5 27 0>; + status = "disabled"; + }; + + dsp6: dsp@16800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x16800000 0x00100000>, + <0x16e00000 0x00008000>, + <0x16f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem6>; + ti,syscon-dev = <&devctrl 0x58>; + resets = <&pscrst 6>; + interrupt-parent = <&kirq0>; + interrupts = <6 14>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio6 27 0>; + status = "disabled"; + }; + + dsp7: dsp@17800000 { + compatible = "ti,k2hk-dsp"; + reg = <0x17800000 0x00100000>, + <0x17e00000 0x00008000>, + <0x17f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem7>; + ti,syscon-dev = <&devctrl 0x5c>; + resets = <&pscrst 7>; + interrupt-parent = <&kirq0>; + interrupts = <7 15>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio7 27 0>; + status = "disabled"; + }; + mdio: mdio@02090300 { compatible = "ti,keystone_mdio", "ti,davinci_mdio"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/keystone-k2l-evm.dts b/arch/arm/boot/dts/keystone-k2l-evm.dts index 056b42f99d7a..528667618db4 100644 --- a/arch/arm/boot/dts/keystone-k2l-evm.dts +++ b/arch/arm/boot/dts/keystone-k2l-evm.dts @@ -16,6 +16,19 @@ / { compatible = "ti,k2l-evm", "ti,k2l", "ti,keystone"; model = "Texas Instruments Keystone 2 Lamarr EVM"; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dsp_common_memory: dsp-common-memory@81f800000 { + compatible = "shared-dma-pool"; + reg = <0x00000008 0x1f800000 0x00000000 0x800000>; + reusable; + status = "okay"; + }; + }; + soc { clocks { refclksys: refclksys { @@ -133,3 +146,23 @@ ethphy1: ethernet-phy@1 { reg = <1>; }; }; + +&dsp0 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp1 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp2 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; + +&dsp3 { + memory-region = <&dsp_common_memory>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi index 148650406cf7..4431310bc922 100644 --- a/arch/arm/boot/dts/keystone-k2l.dtsi +++ b/arch/arm/boot/dts/keystone-k2l.dtsi @@ -33,6 +33,13 @@ cpu@1 { }; }; + aliases { + rproc0 = &dsp0; + rproc1 = &dsp1; + rproc2 = &dsp2; + rproc3 = &dsp3; + }; + soc { /include/ "keystone-k2l-clocks.dtsi" @@ -268,6 +275,70 @@ dspgpio3: keystone_dsp_gpio@262024c { gpio,syscon-dev = <&devctrl 0x24c>; }; + dsp0: dsp@10800000 { + compatible = "ti,k2l-dsp"; + reg = <0x10800000 0x00100000>, + <0x10e00000 0x00008000>, + <0x10f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem0>; + ti,syscon-dev = <&devctrl 0x844>; + resets = <&pscrst 0>; + interrupt-parent = <&kirq0>; + interrupts = <0 8>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio0 27 0>; + status = "disabled"; + }; + + dsp1: dsp@11800000 { + compatible = "ti,k2l-dsp"; + reg = <0x11800000 0x00100000>, + <0x11e00000 0x00008000>, + <0x11f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem1>; + ti,syscon-dev = <&devctrl 0x848>; + resets = <&pscrst 1>; + interrupt-parent = <&kirq0>; + interrupts = <1 9>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio1 27 0>; + status = "disabled"; + }; + + dsp2: dsp@12800000 { + compatible = "ti,k2l-dsp"; + reg = <0x12800000 0x00100000>, + <0x12e00000 0x00008000>, + <0x12f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem2>; + ti,syscon-dev = <&devctrl 0x84c>; + resets = <&pscrst 2>; + interrupt-parent = <&kirq0>; + interrupts = <2 10>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio2 27 0>; + status = "disabled"; + }; + + dsp3: dsp@13800000 { + compatible = "ti,k2l-dsp"; + reg = <0x13800000 0x00100000>, + <0x13e00000 0x00008000>, + <0x13f00000 0x00008000>; + reg-names = "l2sram", "l1pram", "l1dram"; + clocks = <&clkgem3>; + ti,syscon-dev = <&devctrl 0x850>; + resets = <&pscrst 3>; + interrupt-parent = <&kirq0>; + interrupts = <3 11>; + interrupt-names = "vring", "exception"; + kick-gpios = <&dspgpio3 27 0>; + status = "disabled"; + }; + mdio: mdio@26200f00 { compatible = "ti,keystone_mdio", "ti,davinci_mdio"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/kirkwood-6192.dtsi b/arch/arm/boot/dts/kirkwood-6192.dtsi index d573e03f3134..f003f3f1bd65 100644 --- a/arch/arm/boot/dts/kirkwood-6192.dtsi +++ b/arch/arm/boot/dts/kirkwood-6192.dtsi @@ -1,6 +1,6 @@ / { mbus@f1000000 { - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,kirkwood-pcie"; status = "disabled"; device_type = "pci"; @@ -24,6 +24,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 9>; marvell,pcie-port = <0>; diff --git a/arch/arm/boot/dts/kirkwood-6281.dtsi b/arch/arm/boot/dts/kirkwood-6281.dtsi index 748d0b62f233..47d4b3d3d9e9 100644 --- a/arch/arm/boot/dts/kirkwood-6281.dtsi +++ b/arch/arm/boot/dts/kirkwood-6281.dtsi @@ -1,6 +1,6 @@ / { mbus@f1000000 { - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,kirkwood-pcie"; status = "disabled"; device_type = "pci"; @@ -24,6 +24,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 9>; marvell,pcie-port = <0>; diff --git a/arch/arm/boot/dts/kirkwood-6282.dtsi b/arch/arm/boot/dts/kirkwood-6282.dtsi index bb63d2d50fc5..a13dad0a7c08 100644 --- a/arch/arm/boot/dts/kirkwood-6282.dtsi +++ b/arch/arm/boot/dts/kirkwood-6282.dtsi @@ -1,6 +1,6 @@ / { mbus@f1000000 { - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,kirkwood-pcie"; status = "disabled"; device_type = "pci"; @@ -28,6 +28,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 9>; marvell,pcie-port = <0>; @@ -45,6 +46,7 @@ pcie1: pcie@2,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 0x81000000 0 0 0x81000000 0x2 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 10>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/kirkwood-98dx4122.dtsi b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi index 720c210d491d..90d4d71b6683 100644 --- a/arch/arm/boot/dts/kirkwood-98dx4122.dtsi +++ b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi @@ -1,6 +1,6 @@ / { mbus@f1000000 { - pciec: pcie-controller@82000000 { + pciec: pcie@82000000 { compatible = "marvell,kirkwood-pcie"; status = "disabled"; device_type = "pci"; @@ -24,6 +24,7 @@ pcie0: pcie@1,0 { #interrupt-cells = <1>; ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0 0x81000000 0 0 0x81000000 0x1 0 1 0>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 9>; marvell,pcie-port = <0>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 43e9364083de..b4575bbaf085 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -192,7 +192,7 @@ &mmc1 { interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins &mmc1_cd>; - cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */ + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */ vmmc-supply = <&vmmc1>; bus-width = <4>; cap-power-off-card; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 15204e44161d..cd6ad072e72c 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -86,14 +86,14 @@ hwrng: rng@8100 { }; uart_A: serial@84c0 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; reg = <0x84c0 0x18>; interrupts = ; status = "disabled"; }; uart_B: serial@84dc { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; reg = <0x84dc 0x18>; interrupts = ; status = "disabled"; @@ -108,6 +108,20 @@ i2c_A: i2c@8500 { status = "disabled"; }; + pwm_ab: pwm@8550 { + compatible = "amlogic,meson-pwm"; + reg = <0x8550 0x10>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm_cd: pwm@8650 { + compatible = "amlogic,meson-pwm"; + reg = <0x8650 0x10>; + #pwm-cells = <3>; + status = "disabled"; + }; + saradc: adc@8680 { compatible = "amlogic,meson-saradc"; reg = <0x8680 0x34>; @@ -117,7 +131,7 @@ saradc: adc@8680 { }; uart_C: serial@8700 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; reg = <0x8700 0x18>; interrupts = ; status = "disabled"; @@ -182,7 +196,7 @@ ir_receiver: ir-receiver@480 { }; uart_AO: serial@4c0 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; reg = <0x4c0 0x18>; interrupts = ; status = "disabled"; @@ -230,5 +244,13 @@ ethmac: ethernet@c9410000 { interrupt-names = "macirq"; status = "disabled"; }; + + ahb_sram: sram@d9000000 { + compatible = "mmio-sram"; + reg = <0xd9000000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0xd9000000 0x20000>; + }; }; }; /* end of / */ diff --git a/arch/arm/boot/dts/meson6.dtsi b/arch/arm/boot/dts/meson6.dtsi index 8557b6117a4b..ef281d290052 100644 --- a/arch/arm/boot/dts/meson6.dtsi +++ b/arch/arm/boot/dts/meson6.dtsi @@ -70,9 +70,37 @@ cpu@201 { }; }; + xtal: xtal-clk { + compatible = "fixed-clock"; + clock-frequency = <24000000>; + clock-output-names = "xtal"; + #clock-cells = <0>; + }; + clk81: clk@0 { #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <200000000>; }; }; /* end of / */ + + +&uart_AO { + clocks = <&xtal>, <&clk81>, <&clk81>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_A { + clocks = <&xtal>, <&clk81>, <&clk81>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_B { + clocks = <&xtal>, <&clk81>, <&clk81>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_C { + clocks = <&xtal>, <&clk81>, <&clk81>; + clock-names = "xtal", "pclk", "baud"; +}; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index cada35828931..b98d44fde6b6 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -168,10 +168,18 @@ mux { &cbus { clkc: clock-controller@4000 { #clock-cells = <1>; + #reset-cells = <1>; compatible = "amlogic,meson8-clkc"; reg = <0x8000 0x4>, <0x4000 0x460>; }; + pwm_ef: pwm@86c0 { + compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm"; + reg = <0x86c0 0x10>; + #pwm-cells = <3>; + status = "disabled"; + }; + pinctrl_cbus: pinctrl@9880 { compatible = "amlogic,meson8-cbus-pinctrl"; reg = <0x9880 0x10>; @@ -270,6 +278,14 @@ &L2 { arm,filter-ranges = <0x100000 0xc0000000>; }; +&pwm_ab { + compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm"; +}; + +&pwm_cd { + compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm"; +}; + &saradc { compatible = "amlogic,meson8-saradc", "amlogic,meson-saradc"; clocks = <&clkc CLKID_XTAL>, diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 72e4f425f190..bc278da7df0d 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -119,6 +119,7 @@ mux { &cbus { clkc: clock-controller@4000 { #clock-cells = <1>; + #reset-cells = <1>; compatible = "amlogic,meson8b-clkc"; reg = <0x8000 0x4>, <0x4000 0x460>; }; @@ -129,20 +130,6 @@ reset: reset-controller@4404 { #reset-cells = <1>; }; - pwm_ab: pwm@8550 { - compatible = "amlogic,meson8b-pwm"; - reg = <0x8550 0x10>; - #pwm-cells = <3>; - status = "disabled"; - }; - - pwm_cd: pwm@8650 { - compatible = "amlogic,meson8b-pwm"; - reg = <0x8650 0x10>; - #pwm-cells = <3>; - status = "disabled"; - }; - pwm_ef: pwm@86c0 { compatible = "amlogic,meson8b-pwm"; reg = <0x86c0 0x10>; @@ -150,12 +137,6 @@ pwm_ef: pwm@86c0 { status = "disabled"; }; - wdt: watchdog@9900 { - compatible = "amlogic,meson8b-wdt"; - reg = <0x9900 0x8>; - interrupts = <0 0 1>; - }; - pinctrl_cbus: pinctrl@9880 { compatible = "amlogic,meson8b-cbus-pinctrl"; reg = <0x9880 0x10>; @@ -193,6 +174,14 @@ &L2 { arm,filter-ranges = <0x100000 0xc0000000>; }; +&pwm_ab { + compatible = "amlogic,meson8b-pwm"; +}; + +&pwm_cd { + compatible = "amlogic,meson8b-pwm"; +}; + &saradc { compatible = "amlogic,meson8b-saradc", "amlogic,meson-saradc"; clocks = <&clkc CLKID_XTAL>, @@ -242,3 +231,7 @@ &usb1_phy { clock-names = "usb_general", "usb"; resets = <&reset RESET_USB_OTG>; }; + +&wdt { + compatible = "amlogic,meson8b-wdt"; +}; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index 1eb5da1dc8f0..4d61e5b1334a 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -255,5 +255,6 @@ vaudio: VAUDIO { regulator-min-microvolt = <2775000>; regulator-max-microvolt = <2775000>; regulator-enable-ramp-delay = <1000>; + regulator-initial-mode = <0x00>; /* NORMAL */ }; }; diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index f1efdc63656a..afe12e5b51f9 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -533,6 +534,7 @@ larb0: larb@14010000 { compatible = "mediatek,mt2701-smi-larb"; reg = <0 0x14010000 0 0x1000>; mediatek,smi = <&smi_common>; + mediatek,larb-id = <0>; clocks = <&mmsys CLK_MM_SMI_LARB0>, <&mmsys CLK_MM_SMI_LARB0>; clock-names = "apb", "smi"; @@ -549,6 +551,7 @@ larb2: larb@15001000 { compatible = "mediatek,mt2701-smi-larb"; reg = <0 0x15001000 0 0x1000>; mediatek,smi = <&smi_common>; + mediatek,larb-id = <2>; clocks = <&imgsys CLK_IMG_SMI_COMM>, <&imgsys CLK_IMG_SMI_COMM>; clock-names = "apb", "smi"; @@ -579,6 +582,7 @@ larb1: larb@16010000 { compatible = "mediatek,mt2701-smi-larb"; reg = <0 0x16010000 0 0x1000>; mediatek,smi = <&smi_common>; + mediatek,larb-id = <1>; clocks = <&vdecsys CLK_VDEC_CKGEN>, <&vdecsys CLK_VDEC_LARB>; clock-names = "apb", "smi"; @@ -591,12 +595,114 @@ hifsys: syscon@1a000000 { #clock-cells = <1>; }; + usb0: usb@1a1c0000 { + compatible = "mediatek,mt8173-xhci"; + reg = <0 0x1a1c0000 0 0x1000>, + <0 0x1a1c4700 0 0x0100>; + reg-names = "mac", "ippc"; + interrupts = ; + clocks = <&hifsys CLK_HIFSYS_USB0PHY>, + <&topckgen CLK_TOP_ETHIF_SEL>; + clock-names = "sys_ck", "ref_ck"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>; + phys = <&u2port0 PHY_TYPE_USB2>, <&u3port0 PHY_TYPE_USB3>; + status = "disabled"; + }; + + u3phy0: usb-phy@1a1c4000 { + compatible = "mediatek,mt2701-u3phy"; + reg = <0 0x1a1c4000 0 0x0700>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + u2port0: usb-phy@1a1c4800 { + reg = <0 0x1a1c4800 0 0x0100>; + clocks = <&topckgen CLK_TOP_USB_PHY48M>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u3port0: usb-phy@1a1c4900 { + reg = <0 0x1a1c4900 0 0x0700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + }; + + usb1: usb@1a240000 { + compatible = "mediatek,mt8173-xhci"; + reg = <0 0x1a240000 0 0x1000>, + <0 0x1a244700 0 0x0100>; + reg-names = "mac", "ippc"; + interrupts = ; + clocks = <&hifsys CLK_HIFSYS_USB1PHY>, + <&topckgen CLK_TOP_ETHIF_SEL>; + clock-names = "sys_ck", "ref_ck"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>; + phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>; + status = "disabled"; + }; + + u3phy1: usb-phy@1a244000 { + compatible = "mediatek,mt2701-u3phy"; + reg = <0 0x1a244000 0 0x0700>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + u2port1: usb-phy@1a244800 { + reg = <0 0x1a244800 0 0x0100>; + clocks = <&topckgen CLK_TOP_USB_PHY48M>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u3port1: usb-phy@1a244900 { + reg = <0 0x1a244900 0 0x0700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + }; + ethsys: syscon@1b000000 { compatible = "mediatek,mt2701-ethsys", "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; }; + eth: ethernet@1b100000 { + compatible = "mediatek,mt2701-eth", "syscon"; + reg = <0 0x1b100000 0 0x20000>; + interrupts = , + , + ; + clocks = <&topckgen CLK_TOP_ETHIF_SEL>, + <ðsys CLK_ETHSYS_ESW>, + <ðsys CLK_ETHSYS_GP1>, + <ðsys CLK_ETHSYS_GP2>, + <&apmixedsys CLK_APMIXED_TRGPLL>; + clock-names = "ethif", "esw", "gp1", "gp2", "trgpll"; + resets = <ðsys MT2701_ETHSYS_FE_RST>, + <ðsys MT2701_ETHSYS_GMAC_RST>, + <ðsys MT2701_ETHSYS_PPE_RST>; + reset-names = "fe", "gmac", "ppe"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; + mediatek,ethsys = <ðsys>; + mediatek,pctl = <&syscfg_pctl_a>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + bdpsys: syscon@1c000000 { compatible = "mediatek,mt2701-bdpsys", "syscon"; reg = <0 0x1c000000 0 0x1000>; diff --git a/arch/arm/boot/dts/mt6323.dtsi b/arch/arm/boot/dts/mt6323.dtsi new file mode 100644 index 000000000000..7c783d6c750e --- /dev/null +++ b/arch/arm/boot/dts/mt6323.dtsi @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: John Crispin + * Sean Wang + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&pwrap { + pmic: mt6323 { + compatible = "mediatek,mt6323"; + interrupt-parent = <&pio>; + interrupts = <150 IRQ_TYPE_LEVEL_HIGH>; + interrupt-controller; + #interrupt-cells = <2>; + + mt6323regulator: mt6323regulator{ + compatible = "mediatek,mt6323-regulator"; + + mt6323_vproc_reg: buck_vproc{ + regulator-name = "vproc"; + regulator-min-microvolt = < 700000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vsys_reg: buck_vsys{ + regulator-name = "vsys"; + regulator-min-microvolt = <1400000>; + regulator-max-microvolt = <2987500>; + regulator-ramp-delay = <25000>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vpa_reg: buck_vpa{ + regulator-name = "vpa"; + regulator-min-microvolt = < 500000>; + regulator-max-microvolt = <3650000>; + }; + + mt6323_vtcxo_reg: ldo_vtcxo{ + regulator-name = "vtcxo"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <90>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vcn28_reg: ldo_vcn28{ + regulator-name = "vcn28"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <185>; + }; + + mt6323_vcn33_bt_reg: ldo_vcn33_bt{ + regulator-name = "vcn33_bt"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3600000>; + regulator-enable-ramp-delay = <185>; + }; + + mt6323_vcn33_wifi_reg: ldo_vcn33_wifi{ + regulator-name = "vcn33_wifi"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3600000>; + regulator-enable-ramp-delay = <185>; + }; + + mt6323_va_reg: ldo_va{ + regulator-name = "va"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <216>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vcama_reg: ldo_vcama{ + regulator-name = "vcama"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vio28_reg: ldo_vio28{ + regulator-name = "vio28"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <216>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vusb_reg: ldo_vusb{ + regulator-name = "vusb"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <216>; + regulator-boot-on; + }; + + mt6323_vmc_reg: ldo_vmc{ + regulator-name = "vmc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <36>; + regulator-boot-on; + }; + + mt6323_vmch_reg: ldo_vmch{ + regulator-name = "vmch"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <36>; + regulator-boot-on; + }; + + mt6323_vemc3v3_reg: ldo_vemc3v3{ + regulator-name = "vemc3v3"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <36>; + regulator-boot-on; + }; + + mt6323_vgp1_reg: ldo_vgp1{ + regulator-name = "vgp1"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vgp2_reg: ldo_vgp2{ + regulator-name = "vgp2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3000000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vgp3_reg: ldo_vgp3{ + regulator-name = "vgp3"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vcn18_reg: ldo_vcn18{ + regulator-name = "vcn18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vsim1_reg: ldo_vsim1{ + regulator-name = "vsim1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3000000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vsim2_reg: ldo_vsim2{ + regulator-name = "vsim2"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3000000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vrtc_reg: ldo_vrtc{ + regulator-name = "vrtc"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vcamaf_reg: ldo_vcamaf{ + regulator-name = "vcamaf"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vibr_reg: ldo_vibr{ + regulator-name = "vibr"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <36>; + }; + + mt6323_vrf18_reg: ldo_vrf18{ + regulator-name = "vrf18"; + regulator-min-microvolt = <1825000>; + regulator-max-microvolt = <1825000>; + regulator-enable-ramp-delay = <187>; + }; + + mt6323_vm_reg: ldo_vm{ + regulator-name = "vm"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vio18_reg: ldo_vio18{ + regulator-name = "vio18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + regulator-always-on; + regulator-boot-on; + }; + + mt6323_vcamd_reg: ldo_vcamd{ + regulator-name = "vcamd"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + }; + + mt6323_vcamio_reg: ldo_vcamio{ + regulator-name = "vcamio"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-enable-ramp-delay = <216>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/mt7623-evb.dts b/arch/arm/boot/dts/mt7623-evb.dts deleted file mode 100644 index b60b41cad592..000000000000 --- a/arch/arm/boot/dts/mt7623-evb.dts +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2016 MediaTek Inc. - * Author: John Crispin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -#include "mt7623.dtsi" - -/ { - model = "MediaTek MT7623 evaluation board"; - compatible = "mediatek,mt7623-evb", "mediatek,mt7623"; - - chosen { - stdout-path = &uart2; - }; - - memory { - reg = <0 0x80000000 0 0x40000000>; - }; -}; - -&uart2 { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index d81158b2b02f..ec8a07415cb3 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -21,36 +21,99 @@ #include #include #include +#include #include "skeleton64.dtsi" / { compatible = "mediatek,mt7623"; interrupt-parent = <&sysirq>; + cpu_opp_table: opp_table { + compatible = "operating-points-v2"; + opp-shared; + + opp-98000000 { + opp-hz = /bits/ 64 <98000000>; + opp-microvolt = <1050000>; + }; + + opp-198000000 { + opp-hz = /bits/ 64 <198000000>; + opp-microvolt = <1050000>; + }; + + opp-398000000 { + opp-hz = /bits/ 64 <398000000>; + opp-microvolt = <1050000>; + }; + + opp-598000000 { + opp-hz = /bits/ 64 <598000000>; + opp-microvolt = <1050000>; + }; + + opp-747500000 { + opp-hz = /bits/ 64 <747500000>; + opp-microvolt = <1050000>; + }; + + opp-1040000000 { + opp-hz = /bits/ 64 <1040000000>; + opp-microvolt = <1150000>; + }; + + opp-1196000000 { + opp-hz = /bits/ 64 <1196000000>; + opp-microvolt = <1200000>; + }; + + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + opp-microvolt = <1300000>; + }; + }; + cpus { #address-cells = <1>; #size-cells = <0>; enable-method = "mediatek,mt6589-smp"; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a7"; reg = <0x0>; + clocks = <&infracfg CLK_INFRA_CPUSEL>, + <&apmixedsys CLK_APMIXED_MAINPLL>; + clock-names = "cpu", "intermediate"; + operating-points-v2 = <&cpu_opp_table>; + #cooling-cells = <2>; + cooling-min-level = <0>; + cooling-max-level = <7>; + clock-frequency = <1300000000>; }; - cpu@1 { + + cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a7"; reg = <0x1>; + operating-points-v2 = <&cpu_opp_table>; + clock-frequency = <1300000000>; }; - cpu@2 { + + cpu2: cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a7"; reg = <0x2>; + operating-points-v2 = <&cpu_opp_table>; + clock-frequency = <1300000000>; }; - cpu@3 { + + cpu3: cpu@3 { device_type = "cpu"; compatible = "arm,cortex-a7"; reg = <0x3>; + operating-points-v2 = <&cpu_opp_table>; + clock-frequency = <1300000000>; }; }; @@ -74,6 +137,58 @@ clk26m: oscillator@0 { clock-output-names = "clk26m"; }; + thermal-zones { + cpu_thermal: cpu_thermal { + polling-delay-passive = <1000>; + polling-delay = <1000>; + + thermal-sensors = <&thermal 0>; + + trips { + cpu_passive: cpu_passive { + temperature = <47000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_active: cpu_active { + temperature = <67000>; + hysteresis = <2000>; + type = "active"; + }; + + cpu_hot: cpu_hot { + temperature = <87000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_crit { + temperature = <107000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_passive>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + + map1 { + trip = <&cpu_active>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + + map2 { + trip = <&cpu_hot>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + timer { compatible = "arm,armv7-timer"; interrupt-parent = <&gic>; @@ -172,7 +287,7 @@ pwrap: pwrap@1000d000 { clock-names = "spi", "wrap"; }; - cir: cir@0x10013000 { + cir: cir@10013000 { compatible = "mediatek,mt7623-cir"; reg = <0 0x10013000 0 0x1000>; interrupts = ; @@ -193,7 +308,7 @@ sysirq: interrupt-controller@10200100 { efuse: efuse@10206000 { compatible = "mediatek,mt7623-efuse", "mediatek,mt8173-efuse"; - reg = <0 0x10206000 0 0x1000>; + reg = <0 0x10206000 0 0x1000>; #address-cells = <1>; #size-cells = <1>; thermal_calibration_data: calib@424 { @@ -371,6 +486,31 @@ thermal: thermal@1100b000 { nvmem-cell-names = "calibration-data"; }; + nandc: nfi@1100d000 { + compatible = "mediatek,mt7623-nfc", + "mediatek,mt2701-nfc"; + reg = <0 0x1100d000 0 0x1000>; + interrupts = ; + power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>; + clocks = <&pericfg CLK_PERI_NFI>, + <&pericfg CLK_PERI_NFI_PAD>; + clock-names = "nfi_clk", "pad_clk"; + status = "disabled"; + ecc-engine = <&bch>; + #address-cells = <1>; + #size-cells = <0>; + }; + + bch: ecc@1100e000 { + compatible = "mediatek,mt7623-ecc", + "mediatek,mt2701-ecc"; + reg = <0 0x1100e000 0 0x1000>; + interrupts = ; + clocks = <&pericfg CLK_PERI_NFI_ECC>; + clock-names = "nfiecc_clk"; + status = "disabled"; + }; + spi1: spi@11016000 { compatible = "mediatek,mt7623-spi", "mediatek,mt2701-spi"; @@ -399,31 +539,6 @@ spi2: spi@11017000 { status = "disabled"; }; - nandc: nfi@1100d000 { - compatible = "mediatek,mt7623-nfc", - "mediatek,mt2701-nfc"; - reg = <0 0x1100d000 0 0x1000>; - interrupts = ; - power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>; - clocks = <&pericfg CLK_PERI_NFI>, - <&pericfg CLK_PERI_NFI_PAD>; - clock-names = "nfi_clk", "pad_clk"; - status = "disabled"; - ecc-engine = <&bch>; - #address-cells = <1>; - #size-cells = <0>; - }; - - bch: ecc@1100e000 { - compatible = "mediatek,mt7623-ecc", - "mediatek,mt2701-ecc"; - reg = <0 0x1100e000 0 0x1000>; - interrupts = ; - clocks = <&pericfg CLK_PERI_NFI_ECC>; - clock-names = "nfiecc_clk"; - status = "disabled"; - }; - afe: audio-controller@11220000 { compatible = "mediatek,mt7623-audio", "mediatek,mt2701-audio"; @@ -538,13 +653,22 @@ mmc1: mmc@11240000 { compatible = "mediatek,mt7623-mmc", "mediatek,mt8135-mmc"; reg = <0 0x11240000 0 0x1000>; - interrupts = ; + interrupts = ; clocks = <&pericfg CLK_PERI_MSDC30_1>, <&topckgen CLK_TOP_MSDC30_1_SEL>; clock-names = "source", "hclk"; status = "disabled"; }; + hifsys: syscon@1a000000 { + compatible = "mediatek,mt7623-hifsys", + "mediatek,mt2701-hifsys", + "syscon"; + reg = <0 0x1a000000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + usb1: usb@1a1c0000 { compatible = "mediatek,mt7623-xhci", "mediatek,mt8173-xhci"; @@ -561,7 +685,8 @@ usb1: usb@1a1c0000 { }; u3phy1: usb-phy@1a1c4000 { - compatible = "mediatek,mt7623-u3phy", "mediatek,mt2701-u3phy"; + compatible = "mediatek,mt7623-u3phy", + "mediatek,mt2701-u3phy"; reg = <0 0x1a1c4000 0 0x0700>; clocks = <&clk26m>; clock-names = "u3phya_ref"; @@ -599,7 +724,8 @@ usb2: usb@1a240000 { }; u3phy2: usb-phy@1a244000 { - compatible = "mediatek,mt7623-u3phy", "mediatek,mt2701-u3phy"; + compatible = "mediatek,mt7623-u3phy", + "mediatek,mt2701-u3phy"; reg = <0 0x1a244000 0 0x0700>; clocks = <&clk26m>; clock-names = "u3phya_ref"; @@ -621,15 +747,6 @@ u3port1: usb-phy@1a244900 { }; }; - hifsys: syscon@1a000000 { - compatible = "mediatek,mt7623-hifsys", - "mediatek,mt2701-hifsys", - "syscon"; - reg = <0 0x1a000000 0 0x1000>; - #clock-cells = <1>; - #reset-cells = <1>; - }; - ethsys: syscon@1b000000 { compatible = "mediatek,mt7623-ethsys", "mediatek,mt2701-ethsys", @@ -639,7 +756,9 @@ ethsys: syscon@1b000000 { }; eth: ethernet@1b100000 { - compatible = "mediatek,mt2701-eth", "syscon"; + compatible = "mediatek,mt7623-eth", + "mediatek,mt2701-eth", + "syscon"; reg = <0 0x1b100000 0 0x20000>; interrupts = , , @@ -650,6 +769,10 @@ eth: ethernet@1b100000 { <ðsys CLK_ETHSYS_GP2>, <&apmixedsys CLK_APMIXED_TRGPLL>; clock-names = "ethif", "esw", "gp1", "gp2", "trgpll"; + resets = <ðsys MT2701_ETHSYS_FE_RST>, + <ðsys MT2701_ETHSYS_GMAC_RST>, + <ðsys MT2701_ETHSYS_PPE_RST>; + reset-names = "fe", "gmac", "ppe"; power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; mediatek,ethsys = <ðsys>; mediatek,pctl = <&syscfg_pctl_a>; diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts new file mode 100644 index 000000000000..688a86378cee --- /dev/null +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -0,0 +1,487 @@ +/* + * Copyright 2017 Sean Wang + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +/dts-v1/; +#include +#include "mt7623.dtsi" +#include "mt6323.dtsi" + +/ { + model = "Bananapi BPI-R2"; + compatible = "bananapi,bpi-r2", "mediatek,mt7623"; + + aliases { + serial2 = &uart2; + }; + + chosen { + stdout-path = "serial2:115200n8"; + }; + + cpus { + cpu@0 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu@1 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu@2 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu@3 { + proc-supply = <&mt6323_vproc_reg>; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&key_pins_a>; + + factory { + label = "factory"; + linux,code = ; + gpios = <&pio 256 GPIO_ACTIVE_LOW>; + }; + + wps { + label = "wps"; + linux,code = ; + gpios = <&pio 257 GPIO_ACTIVE_HIGH>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&led_pins_a>; + + blue { + label = "bpi-r2:pio:blue"; + gpios = <&pio 241 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + green { + label = "bpi-r2:pio:green"; + gpios = <&pio 240 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + red { + label = "bpi-r2:pio:red"; + gpios = <&pio 239 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + memory@80000000 { + reg = <0 0x80000000 0 0x40000000>; + }; +}; + +&cir { + pinctrl-names = "default"; + pinctrl-0 = <&cir_pins_a>; + status = "okay"; +}; + +&crypto { + status = "okay"; +}; + +ð { + status = "okay"; + + gmac0: mac@0 { + compatible = "mediatek,eth-mac"; + reg = <0>; + phy-mode = "trgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + + mdio: mdio-bus { + #address-cells = <1>; + #size-cells = <0>; + + switch@0 { + compatible = "mediatek,mt7530"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + pinctrl-names = "default"; + reset-gpios = <&pio 33 0>; + core-supply = <&mt6323_vpa_reg>; + io-supply = <&mt6323_vemc3v3_reg>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + port@0 { + reg = <0>; + label = "wan"; + }; + + port@1 { + reg = <1>; + label = "lan0"; + }; + + port@2 { + reg = <2>; + label = "lan1"; + }; + + port@3 { + reg = <3>; + label = "lan2"; + }; + + port@4 { + reg = <4>; + label = "lan3"; + }; + + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "trgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; + }; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins_a>; + status = "okay"; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins_a>; + status = "okay"; +}; + +&mmc0 { + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc0_pins_default>; + pinctrl-1 = <&mmc0_pins_uhs>; + status = "okay"; + bus-width = <8>; + max-frequency = <50000000>; + cap-mmc-highspeed; + vmmc-supply = <&mt6323_vemc3v3_reg>; + vqmmc-supply = <&mt6323_vio18_reg>; + non-removable; +}; + +&mmc1 { + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_uhs>; + status = "okay"; + bus-width = <4>; + max-frequency = <50000000>; + cap-sd-highspeed; + cd-gpios = <&pio 261 0>; + vmmc-supply = <&mt6323_vmch_reg>; + vqmmc-supply = <&mt6323_vio18_reg>; +}; + +&pio { + cir_pins_a:cir@0 { + pins_cir { + pinmux = ; + bias-disable; + }; + }; + + i2c0_pins_a: i2c@0 { + pins_i2c0 { + pinmux = , + ; + bias-disable; + }; + }; + + i2c1_pins_a: i2c@1 { + pin_i2c1 { + pinmux = , + ; + bias-disable; + }; + }; + + i2s0_pins_a: i2s@0 { + pin_i2s0 { + pinmux = , + , + , + , + ; + drive-strength = ; + bias-pull-down; + }; + }; + + i2s1_pins_a: i2s@1 { + pin_i2s1 { + pinmux = , + , + , + , + ; + drive-strength = ; + bias-pull-down; + }; + }; + + key_pins_a: keys@0 { + pins_keys { + pinmux = , + ; + input-enable; + }; + }; + + led_pins_a: leds@0 { + pins_leds { + pinmux = , + , + ; + }; + }; + + mmc0_pins_default: mmc0default { + pins_cmd_dat { + pinmux = , + , + , + , + , + , + , + , + ; + input-enable; + bias-pull-up; + }; + + pins_clk { + pinmux = ; + bias-pull-down; + }; + + pins_rst { + pinmux = ; + bias-pull-up; + }; + }; + + mmc0_pins_uhs: mmc0 { + pins_cmd_dat { + pinmux = , + , + , + , + , + , + , + , + ; + input-enable; + drive-strength = ; + bias-pull-up = ; + }; + + pins_clk { + pinmux = ; + drive-strength = ; + bias-pull-down = ; + }; + + pins_rst { + pinmux = ; + bias-pull-up; + }; + }; + + mmc1_pins_default: mmc1default { + pins_cmd_dat { + pinmux = , + , + , + , + ; + input-enable; + drive-strength = ; + bias-pull-up = ; + }; + + pins_clk { + pinmux = ; + bias-pull-down; + drive-strength = ; + }; + + pins_wp { + pinmux = ; + input-enable; + bias-pull-up; + }; + + pins_insert { + pinmux = ; + bias-pull-up; + }; + }; + + mmc1_pins_uhs: mmc1 { + pins_cmd_dat { + pinmux = , + , + , + , + ; + input-enable; + drive-strength = ; + bias-pull-up = ; + }; + + pins_clk { + pinmux = ; + drive-strength = ; + bias-pull-down = ; + }; + }; + + pwm_pins_a: pwm@0 { + pins_pwm { + pinmux = , + , + , + , + ; + }; + }; + + spi0_pins_a: spi@0 { + pins_spi { + pinmux = , + , + , + ; + bias-disable; + }; + }; + + uart0_pins_a: uart@0 { + pins_dat { + pinmux = , + ; + }; + }; + + uart1_pins_a: uart@1 { + pins_dat { + pinmux = , + ; + }; + }; +}; + +&pwm { + pinctrl-names = "default"; + pinctrl-0 = <&pwm_pins_a>; + status = "okay"; +}; + +&pwrap { + mt6323 { + mt6323led: led { + compatible = "mediatek,mt6323-led"; + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + label = "bpi-r2:isink:green"; + default-state = "off"; + }; + + led@1 { + reg = <1>; + label = "bpi-r2:isink:red"; + default-state = "off"; + }; + + led@2 { + reg = <2>; + label = "bpi-r2:isink:blue"; + default-state = "off"; + }; + }; + }; +}; + +&spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins_a>; + status = "okay"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_a>; + status = "disabled"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins_a>; + status = "disabled"; +}; + +&uart2 { + status = "okay"; +}; + +&usb1 { + vusb33-supply = <&mt6323_vusb_reg>; + status = "okay"; +}; + +&usb2 { + vusb33-supply = <&mt6323_vusb_reg>; + status = "okay"; +}; + +&u3phy1 { + status = "okay"; +}; + +&u3phy2 { + status = "okay"; +}; + diff --git a/arch/arm/boot/dts/mt7623n-rfb-nand.dts b/arch/arm/boot/dts/mt7623n-rfb-nand.dts new file mode 100644 index 000000000000..17c578f0d261 --- /dev/null +++ b/arch/arm/boot/dts/mt7623n-rfb-nand.dts @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: John Crispin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +#include "mt7623n-rfb.dtsi" + +/ { + model = "MediaTek MT7623N NAND reference board"; + compatible = "mediatek,mt7623n-rfb-nand", "mediatek,mt7623"; +}; + +&bch { + status = "okay"; +}; + +&nandc { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&nand_pins_default>; + + nand@0 { + reg = <0>; + spare_per_sector = <64>; + nand-ecc-mode = "hw"; + nand-ecc-strength = <12>; + nand-ecc-step-size = <1024>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "preloader"; + reg = <0x0 0x40000>; + }; + + partition@40000 { + label = "uboot"; + reg = <0x40000 0x80000>; + }; + + partition@C0000 { + label = "uboot-env"; + reg = <0xC0000 0x40000>; + }; + + partition@140000 { + label = "bootimg"; + reg = <0x140000 0x2000000>; + }; + + partition@2140000 { + label = "recovery"; + reg = <0x2140000 0x2000000>; + }; + + partition@4140000 { + label = "rootfs"; + reg = <0x4140000 0x1000000>; + }; + + partition@5140000 { + label = "usrdata"; + reg = <0x5140000 0x1000000>; + }; + }; + }; +}; + +&pio { + nand_pins_default: nanddefault { + pins_ale { + pinmux = ; + drive-strength = ; + bias-pull-down = ; + }; + + pins_dat { + pinmux = , + , + , + , + , + , + , + , + ; + input-enable; + drive-strength = ; + bias-pull-up; + }; + + pins_we { + pinmux = ; + drive-strength = ; + bias-pull-up = ; + }; + }; +}; diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi new file mode 100644 index 000000000000..256c5fd947bf --- /dev/null +++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: John Crispin + * Sean Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +#include "mt7623.dtsi" +#include "mt6323.dtsi" + +/ { + aliases { + serial0 = &uart0; + serial1 = &uart1; + serial2 = &uart2; + }; + + chosen { + stdout-path = "serial2:115200n8"; + }; + + cpus { + cpu0 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu1 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu2 { + proc-supply = <&mt6323_vproc_reg>; + }; + + cpu3 { + proc-supply = <&mt6323_vproc_reg>; + }; + }; + + memory@80000000 { + reg = <0 0x80000000 0 0x40000000>; + }; + + usb_p1_vbus: regulator@0 { + compatible = "regulator-fixed"; + regulator-name = "usb_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 135 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&mmc0 { + vmmc-supply = <&mt6323_vemc3v3_reg>; + vqmmc-supply = <&mt6323_vio18_reg>; +}; + +&mmc1 { + vmmc-supply = <&mt6323_vmch_reg>; + vqmmc-supply = <&mt6323_vmc_reg>; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&usb1 { + vbus-supply = <&usb_p1_vbus>; + status = "okay"; +}; + +&u3phy1 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi index 7e5ffc583c90..91886231e5a8 100644 --- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi +++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi @@ -15,8 +15,8 @@ &gpio3 0 GPIO_ACTIVE_HIGH /* gpio64 sel */ >; #address-cells = <1>; #size-cells = <0>; - retu_mfd: retu@1 { - compatible = "retu-mfd"; + retu: retu@1 { + compatible = "nokia,retu"; interrupt-parent = <&gpio4>; interrupts = <12 IRQ_TYPE_EDGE_RISING>; reg = <0x1>; diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index 673cee2234b2..683b96a8f73e 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -299,7 +299,7 @@ &i2c3 { &mmc1 { vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index 4be85ce59dd1..4d2eaf843fa9 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -283,7 +283,7 @@ &i2c3 { &mmc1 { vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap3-cm-t3517.dts b/arch/arm/boot/dts/omap3-cm-t3517.dts index 53ae04f9104d..3d293b345e99 100644 --- a/arch/arm/boot/dts/omap3-cm-t3517.dts +++ b/arch/arm/boot/dts/omap3-cm-t3517.dts @@ -129,7 +129,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <&wl12xx_vmmc2>; - vmmc_aux-supply = <&wl12xx_vaux2>; + vqmmc-supply = <&wl12xx_vaux2>; non-removable; bus-width = <4>; cap-power-off-card; diff --git a/arch/arm/boot/dts/omap3-cm-t3730.dts b/arch/arm/boot/dts/omap3-cm-t3730.dts index 2294f5b0aa10..bdf4b7fdda39 100644 --- a/arch/arm/boot/dts/omap3-cm-t3730.dts +++ b/arch/arm/boot/dts/omap3-cm-t3730.dts @@ -69,7 +69,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <&wl12xx_vmmc2>; - vmmc_aux-supply = <&wl12xx_vaux2>; + vqmmc-supply = <&wl12xx_vaux2>; non-removable; bus-width = <4>; cap-power-off-card; diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi index 82aa9c4a0f1c..0c0bb1b01b0b 100644 --- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi +++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi @@ -149,7 +149,7 @@ &i2c3 { &mmc1 { vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi index 2b1d6977a535..ff35803088e3 100644 --- a/arch/arm/boot/dts/omap3-evm-common.dtsi +++ b/arch/arm/boot/dts/omap3-evm-common.dtsi @@ -115,7 +115,7 @@ tsc2046@0 { &mmc1 { interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 49f37084e435..4acd32a1c4ef 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -164,6 +164,29 @@ vctcxo: vctcxo { }; }; +&isp { + vdds_csib-supply = <&vaux2>; + + pinctrl-names = "default"; + pinctrl-0 = <&camera_pins>; + + ports { + port@1 { + reg = <1>; + + csi_isp: endpoint { + remote-endpoint = <&csi_cam1>; + bus-type = <3>; /* CCP2 */ + clock-lanes = <1>; + data-lanes = <0>; + lane-polarity = <0 0>; + /* Select strobe = <1> for back camera, <0> for front camera */ + strobe = <1>; + }; + }; + }; +}; + &omap3_pmx_core { pinctrl-names = "default"; @@ -328,6 +351,22 @@ modem_pins: pinmux_modem { OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* gpio 157 => cmt_bsi */ >; }; + + camera_pins: pinmux_camera { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x210c, PIN_OUTPUT | MUX_MODE7) /* cam_hs */ + OMAP3_CORE1_IOPAD(0x210e, PIN_OUTPUT | MUX_MODE7) /* cam_vs */ + OMAP3_CORE1_IOPAD(0x2110, PIN_OUTPUT | MUX_MODE0) /* cam_xclka */ + OMAP3_CORE1_IOPAD(0x211e, PIN_OUTPUT | MUX_MODE7) /* cam_d4 */ + OMAP3_CORE1_IOPAD(0x2122, PIN_INPUT | MUX_MODE0) /* cam_d6 */ + OMAP3_CORE1_IOPAD(0x2124, PIN_INPUT | MUX_MODE0) /* cam_d7 */ + OMAP3_CORE1_IOPAD(0x2126, PIN_INPUT | MUX_MODE0) /* cam_d8 */ + OMAP3_CORE1_IOPAD(0x2128, PIN_INPUT | MUX_MODE0) /* cam_d9 */ + OMAP3_CORE1_IOPAD(0x212a, PIN_OUTPUT | MUX_MODE7) /* cam_d10 */ + OMAP3_CORE1_IOPAD(0x212e, PIN_OUTPUT | MUX_MODE7) /* cam_xclkb */ + OMAP3_CORE1_IOPAD(0x2132, PIN_OUTPUT | MUX_MODE0) /* cam_strobe */ + >; + }; }; &i2c1 { @@ -726,6 +765,40 @@ lis302dl: lis3lv02d@1d { st,max-limit-y = <32>; st,max-limit-z = <32>; }; + + cam1: camera@3e { + compatible = "toshiba,et8ek8"; + reg = <0x3e>; + + vana-supply = <&vaux4>; + + clocks = <&isp 0>; + clock-names = "extclk"; + clock-frequency = <9600000>; + + reset-gpio = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* 102 */ + + port { + csi_cam1: endpoint { + bus-type = <3>; /* CCP2 */ + strobe = <1>; + clock-inv = <0>; + crc = <1>; + + remote-endpoint = <&csi_isp>; + }; + }; + }; + + /* D/A converter for auto-focus */ + ad5820: dac@0c { + compatible = "adi,ad5820"; + reg = <0x0c>; + + VANA-supply = <&vaux4>; + + #io-channel-cells = <0>; + }; }; &mmc1 { @@ -733,6 +806,9 @@ &mmc1 { pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; bus-width = <4>; + /* For debugging, it is often good idea to remove this GPIO. + It means you can remove back cover (to reboot by removing + battery) and still use the MMC card. */ cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ }; @@ -741,7 +817,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <&vaux3>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <8>; non-removable; no-sdio; diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index df3366fa5409..1b0bd72945f2 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -265,6 +265,24 @@ &vio { &i2c2 { clock-frequency = <400000>; + + as3645a@30 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x30>; + compatible = "ams,as3645a"; + flash@0 { + reg = <0x0>; + flash-timeout-us = <150000>; + flash-max-microamp = <320000>; + led-max-microamp = <60000>; + ams,input-max-microamp = <1750000>; + }; + indicator@1 { + reg = <0x1>; + led-max-microamp = <10000>; + }; + }; }; &i2c3 { diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi index cd220342a805..f25e158e7163 100644 --- a/arch/arm/boot/dts/omap3-overo-base.dtsi +++ b/arch/arm/boot/dts/omap3-overo-base.dtsi @@ -181,7 +181,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <&w3cbw003c_npoweron>; - vmmc_aux-supply = <&w3cbw003c_wifi_nreset>; + vqmmc-supply = <&w3cbw003c_wifi_nreset>; bus-width = <4>; cap-sdio-irq; non-removable; diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi index 06ac0f80bcf0..9a601d15247b 100644 --- a/arch/arm/boot/dts/omap3-tao3530.dtsi +++ b/arch/arm/boot/dts/omap3-tao3530.dtsi @@ -223,7 +223,7 @@ &mmc1 { pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_HIGH>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts index 45e2ce0803de..96d0301a336a 100644 --- a/arch/arm/boot/dts/omap3-zoom3.dts +++ b/arch/arm/boot/dts/omap3-zoom3.dts @@ -174,7 +174,7 @@ &twl_gpio { &mmc1 { vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; bus-width = <4>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index a3ff4933dbc1..bdaf30c8c405 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -713,14 +713,12 @@ usbhshost: usbhshost@48064000 { usbhsohci: ohci@48064400 { compatible = "ti,ohci-omap3"; reg = <0x48064400 0x400>; - interrupt-parent = <&intc>; interrupts = <76>; }; usbhsehci: ehci@48064800 { compatible = "ti,ehci-omap"; reg = <0x48064800 0x400>; - interrupt-parent = <&intc>; interrupts = <77>; }; }; @@ -831,7 +829,6 @@ ssi_port1: ssi-port@4805a000 { reg-names = "tx", "rx"; - interrupt-parent = <&intc>; interrupts = <67>, <68>; }; @@ -844,7 +841,6 @@ ssi_port2: ssi-port@4805b000 { reg-names = "tx", "rx"; - interrupt-parent = <&intc>; interrupts = <69>, <70>; }; diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts index abd6921143be..908951eb5943 100644 --- a/arch/arm/boot/dts/omap3430-sdp.dts +++ b/arch/arm/boot/dts/omap3430-sdp.dts @@ -33,7 +33,7 @@ twl: twl@48 { &mmc1 { vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; + vqmmc-supply = <&vsim>; /* * S6-3 must be in ON position for 8 bit mode to function * Else, use 4 bit mode diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 10ca1c174995..8b93d37310f2 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -129,6 +129,34 @@ touchscreen_reset { output-high; line-name = "touchscreen-reset"; }; + + pwm8: dmtimer-pwm-8 { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_direction_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer8>; + ti,clock-source = <0x01>; + }; + + pwm9: dmtimer-pwm-9 { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_enable_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer9>; + ti,clock-source = <0x01>; + }; + + vibrator { + compatible = "pwm-vibrator"; + pwms = <&pwm9 0 10000000 0>, <&pwm8 0 10000000 0>; + pwm-names = "enable", "direction"; + direction-duty-cycle-ns = <10000000>; + }; + }; &dsi1 { @@ -373,7 +401,7 @@ OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3) /* hdmi_cec.hdmi_cec, hdmi_scl.hdmi_scl, hdmi_sda.hdmi_sda */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) >; @@ -488,6 +516,18 @@ usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) >; }; + + vibrator_direction_pin: pinmux_vibrator_direction_pin { + pinctrl-single,pins = < + OMAP4_IOPAD(0x1ce, PIN_OUTPUT | MUX_MODE1) /* dmtimer8_pwm_evt (gpio_27) */ + >; + }; + + vibrator_enable_pin: pinmux_vibrator_enable_pin { + pinctrl-single,pins = < + OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1) /* dmtimer9_pwm_evt (gpio_28) */ + >; + }; }; &uart3 { diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts index 1b825128a7b9..a9a584b5b955 100644 --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts @@ -100,7 +100,7 @@ OMAP4_IOPAD(0x070, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48: amdix enab dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3) /* hdmi_hpd.gpio_63 */ - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_ddc_scl.hdmi_ddc_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_ddc_sda.hdmi_ddc_sda */ >; diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi index edbc4090297d..2b48e51c372a 100644 --- a/arch/arm/boot/dts/omap4-panda-common.dtsi +++ b/arch/arm/boot/dts/omap4-panda-common.dtsi @@ -267,7 +267,7 @@ OMAP4_IOPAD(0x184, PIN_OUTPUT | MUX_MODE3) /* gpio_0 */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_scl.hdmi_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_sda.hdmi_sda */ >; diff --git a/arch/arm/boot/dts/omap4-sdp-es23plus.dts b/arch/arm/boot/dts/omap4-sdp-es23plus.dts index b4d19a7ae393..3d3140fd9659 100644 --- a/arch/arm/boot/dts/omap4-sdp-es23plus.dts +++ b/arch/arm/boot/dts/omap4-sdp-es23plus.dts @@ -10,7 +10,7 @@ /* SDP boards with 4430 ES2.3+ or 4460 have external pullups on SCL & SDA */ &dss_hdmi_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ >; diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index d728ec963111..280d92d42bf1 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts @@ -290,7 +290,7 @@ OMAP4_IOPAD(0x138, PIN_INPUT | MUX_MODE0) /* mcspi1_cs0.mcspi1_cs0 */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_scl.hdmi_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_sda.hdmi_sda */ >; diff --git a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi index 74940b6d7719..676d8dd0624a 100644 --- a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi +++ b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi @@ -122,7 +122,7 @@ OMAP4_IOPAD(0x1d4, PIN_OUTPUT | MUX_MODE5) /* dispc2_data0 */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_scl.hdmi_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_sda.hdmi_sda */ >; diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 4caadb253249..7824b2631cb6 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -290,7 +290,7 @@ OMAP5_IOPAD(0x1b6, PIN_OUTPUT | MUX_MODE0) /* uart5_cts.uart5_rts */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x13c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ + OMAP5_IOPAD(0x13c, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP5_IOPAD(0x140, PIN_INPUT | MUX_MODE0) /* hdmi_ddc_scl.hdmi_ddc_scl */ OMAP5_IOPAD(0x142, PIN_INPUT | MUX_MODE0) /* hdmi_ddc_sda.hdmi_ddc_sda */ >; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 78397f66d0b2..552a5c4c5942 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -266,7 +266,7 @@ OMAP5_IOPAD(0x00b6, PIN_OUTPUT | MUX_MODE6) /* hsi2_acdata.gpio3_83 */ dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x013c, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec */ + OMAP5_IOPAD(0x013c, PIN_INPUT | MUX_MODE0) /* hdmi_cec */ OMAP5_IOPAD(0x0140, PIN_INPUT | MUX_MODE0) /* hdmi_ddc_scl */ OMAP5_IOPAD(0x0142, PIN_INPUT | MUX_MODE0) /* hdmi_ddc_sda */ >; diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi index b9457dd21a69..e413b21ee331 100644 --- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi @@ -20,27 +20,12 @@ / { model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1"; compatible = "qcom,ipq4019"; - clocks { - xo: xo { - compatible = "fixed-clock"; - clock-frequency = <48000000>; - #clock-cells = <0>; - }; - }; - soc { - - - timer { - compatible = "arm,armv7-timer"; - interrupts = <1 2 0xf08>, - <1 3 0xf08>, - <1 4 0xf08>, - <1 1 0xf08>; - clock-frequency = <48000000>; + rng@22000 { + status = "ok"; }; - pinctrl@0x01000000 { + pinctrl@1000000 { serial_pins: serial_pinmux { mux { pins = "gpio60", "gpio61"; @@ -108,5 +93,13 @@ crypto@8e3a000 { watchdog@b017000 { status = "ok"; }; + + wifi@a000000 { + status = "ok"; + }; + + wifi@a800000 { + status = "ok"; + }; }; }; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 4b7d97275c62..10d112a4078e 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -96,6 +96,21 @@ sleep_clk: sleep_clk { clock-frequency = <32768>; #clock-cells = <0>; }; + + xo: xo { + compatible = "fixed-clock"; + clock-frequency = <48000000>; + #clock-cells = <0>; + }; + }; + + timer { + compatible = "arm,armv7-timer"; + interrupts = <1 2 0xf08>, + <1 3 0xf08>, + <1 4 0xf08>, + <1 1 0xf08>; + clock-frequency = <48000000>; }; soc { @@ -119,7 +134,15 @@ gcc: clock-controller@1800000 { reg = <0x1800000 0x60000>; }; - tlmm: pinctrl@0x01000000 { + rng@22000 { + compatible = "qcom,prng"; + reg = <0x22000 0x140>; + clocks = <&gcc GCC_PRNG_AHB_CLK>; + clock-names = "core"; + status = "disabled"; + }; + + tlmm: pinctrl@1000000 { compatible = "qcom,ipq4019-pinctrl"; reg = <0x01000000 0x300000>; gpio-controller; @@ -269,5 +292,89 @@ restart@4ab000 { compatible = "qcom,pshold"; reg = <0x4ab000 0x4>; }; + + wifi0: wifi@a000000 { + compatible = "qcom,ipq4019-wifi"; + reg = <0xa000000 0x200000>; + resets = <&gcc WIFI0_CPU_INIT_RESET>, + <&gcc WIFI0_RADIO_SRIF_RESET>, + <&gcc WIFI0_RADIO_WARM_RESET>, + <&gcc WIFI0_RADIO_COLD_RESET>, + <&gcc WIFI0_CORE_WARM_RESET>, + <&gcc WIFI0_CORE_COLD_RESET>; + reset-names = "wifi_cpu_init", "wifi_radio_srif", + "wifi_radio_warm", "wifi_radio_cold", + "wifi_core_warm", "wifi_core_cold"; + clocks = <&gcc GCC_WCSS2G_CLK>, + <&gcc GCC_WCSS2G_REF_CLK>, + <&gcc GCC_WCSS2G_RTC_CLK>; + clock-names = "wifi_wcss_cmd", "wifi_wcss_ref", + "wifi_wcss_rtc"; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "msi0", "msi1", "msi2", "msi3", + "msi4", "msi5", "msi6", "msi7", + "msi8", "msi9", "msi10", "msi11", + "msi12", "msi13", "msi14", "msi15", + "legacy"; + status = "disabled"; + }; + + wifi1: wifi@a800000 { + compatible = "qcom,ipq4019-wifi"; + reg = <0xa800000 0x200000>; + resets = <&gcc WIFI1_CPU_INIT_RESET>, + <&gcc WIFI1_RADIO_SRIF_RESET>, + <&gcc WIFI1_RADIO_WARM_RESET>, + <&gcc WIFI1_RADIO_COLD_RESET>, + <&gcc WIFI1_CORE_WARM_RESET>, + <&gcc WIFI1_CORE_COLD_RESET>; + reset-names = "wifi_cpu_init", "wifi_radio_srif", + "wifi_radio_warm", "wifi_radio_cold", + "wifi_core_warm", "wifi_core_cold"; + clocks = <&gcc GCC_WCSS5G_CLK>, + <&gcc GCC_WCSS5G_REF_CLK>, + <&gcc GCC_WCSS5G_RTC_CLK>; + clock-names = "wifi_wcss_cmd", "wifi_wcss_ref", + "wifi_wcss_rtc"; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "msi0", "msi1", "msi2", "msi3", + "msi4", "msi5", "msi6", "msi7", + "msi8", "msi9", "msi10", "msi11", + "msi12", "msi13", "msi14", "msi15", + "legacy"; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index c5ee68a3f7f5..a39207625354 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -779,7 +779,7 @@ tpiu_in: endpoint { }; replicator@fc31c000 { - compatible = "qcom,coresight-replicator1x", "arm,primecell"; + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; reg = <0xfc31c000 0x1000>; clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; diff --git a/arch/arm/boot/dts/r7s72100-genmai.dts b/arch/arm/boot/dts/r7s72100-genmai.dts index 52a7b586bac7..cd4d5ff7749e 100644 --- a/arch/arm/boot/dts/r7s72100-genmai.dts +++ b/arch/arm/boot/dts/r7s72100-genmai.dts @@ -11,6 +11,8 @@ /dts-v1/; #include "r7s72100.dtsi" +#include +#include / { model = "Genmai"; @@ -34,6 +36,54 @@ lbsc { #address-cells = <1>; #size-cells = <1>; }; + + leds { + status = "okay"; + compatible = "gpio-leds"; + + led1 { + gpios = <&port4 10 GPIO_ACTIVE_LOW>; + }; + + led2 { + gpios = <&port4 11 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&pinctrl { + + scif2_pins: serial2 { + /* P3_0 as TxD2; P3_2 as RxD2 */ + pinmux = , ; + }; + + i2c2_pins: i2c2 { + /* RIIC2: P1_4 as SCL, P1_5 as SDA */ + pinmux = , ; + }; + + ether_pins: ether { + /* Ethernet on Ports 1,2,3,5 */ + pinmux = ,/* P1_14 = ET_COL */ + , /* P5_9 = ET_MDC */ + , /* P3_3 = ET_MDIO */ + , /* P3_4 = ET_RXCLK */ + , /* P3_5 = ET_RXER */ + , /* P3_6 = ET_RXDV */ + , /* P2_0 = ET_TXCLK */ + , /* P2_1 = ET_TXER */ + , /* P2_2 = ET_TXEN */ + , /* P2_3 = ET_CRS */ + , /* P2_4 = ET_TXD0 */ + , /* P2_5 = ET_TXD1 */ + , /* P2_6 = ET_TXD2 */ + , /* P2_7 = ET_TXD3 */ + , /* P2_8 = ET_RXD0 */ + , /* P2_9 = ET_RXD1 */ + ,/* P2_10 = ET_RXD2 */ + ;/* P2_11 = ET_RXD3 */ + }; }; &extal_clk { @@ -52,12 +102,28 @@ &mtu2 { status = "okay"; }; +ðer { + pinctrl-names = "default"; + pinctrl-0 = <ðer_pins>; + + status = "okay"; + + renesas,no-ether-link; + phy-handle = <&phy0>; + phy0: ethernet-phy@0 { + reg = <0>; + }; +}; + &i2c2 { status = "okay"; clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + eeprom@50 { - compatible = "renesas,24c128"; + compatible = "renesas,24c128", "atmel,24c128"; reg = <0x50>; pagesize = <64>; }; @@ -68,6 +134,9 @@ &rtc { }; &scif2 { + pinctrl-names = "default"; + pinctrl-0 = <&scif2_pins>; + status = "okay"; }; diff --git a/arch/arm/boot/dts/r7s72100-rskrza1.dts b/arch/arm/boot/dts/r7s72100-rskrza1.dts index 72df20a04320..5dcaaf131d27 100644 --- a/arch/arm/boot/dts/r7s72100-rskrza1.dts +++ b/arch/arm/boot/dts/r7s72100-rskrza1.dts @@ -10,6 +10,8 @@ /dts-v1/; #include "r7s72100.dtsi" +#include +#include / { model = "RSKRZA1"; @@ -33,6 +35,15 @@ lbsc { #address-cells = <1>; #size-cells = <1>; }; + + leds { + status = "okay"; + compatible = "gpio-leds"; + + led0 { + gpios = <&port7 1 GPIO_ACTIVE_LOW>; + }; + }; }; &extal_clk { @@ -47,11 +58,57 @@ &rtc_x1_clk { clock-frequency = <32768>; }; +&pinctrl { + + /* Serial Console */ + scif2_pins: serial2 { + pinmux = , /* TxD2 */ + ; /* RxD2 */ + }; + + /* Ethernet */ + ether_pins: ether { + /* Ethernet on Ports 1,2,3,5 */ + pinmux = , /* ET_COL */ + , /* ET_MDC */ + , /* ET_MDIO */ + , /* ET_RXCLK */ + , /* ET_RXER */ + , /* ET_RXDV */ + , /* ET_TXCLK */ + , /* ET_TXER */ + , /* ET_TXEN */ + , /* ET_CRS */ + , /* ET_TXD0 */ + , /* ET_TXD1 */ + , /* ET_TXD2 */ + , /* ET_TXD3 */ + , /* ET_RXD0 */ + , /* ET_RXD1 */ + , /* ET_RXD2 */ + ; /* ET_RXD3 */ + }; + + /* SDHI ch1 on CN1 */ + sdhi1_pins: sdhi1 { + pinmux = , /* SD_CD_1 */ + , /* SD_WP_1 */ + , /* SD_D1_1 */ + , /* SD_D0_1 */ + , /* SD_CLK_1 */ + , /* SD_CMD_1 */ + , /* SD_D3_1 */ + ; /* SD_D2_1 */ + }; +}; + &mtu2 { status = "okay"; }; ðer { + pinctrl-names = "default"; + pinctrl-0 = <ðer_pins>; status = "okay"; renesas,no-ether-link; phy-handle = <&phy0>; @@ -61,6 +118,8 @@ phy0: ethernet-phy@0 { }; &sdhi1 { + pinctrl-names = "default"; + pinctrl-0 = <&sdhi1_pins>; bus-width = <4>; status = "okay"; }; @@ -78,5 +137,7 @@ &rtc { }; &scif2 { + pinctrl-names = "default"; + pinctrl-0 = <&scif2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi index 5cf53e9943af..4ed12a4d9d51 100644 --- a/arch/arm/boot/dts/r7s72100.dtsi +++ b/arch/arm/boot/dts/r7s72100.dtsi @@ -207,6 +207,84 @@ cpu@0 { }; }; + pinctrl: pin-controller@fcfe3000 { + compatible = "renesas,r7s72100-ports"; + + reg = <0xfcfe3000 0x4230>; + + port0: gpio-0 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 0 6>; + }; + + port1: gpio-1 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 16 16>; + }; + + port2: gpio-2 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 32 16>; + }; + + port3: gpio-3 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 48 16>; + }; + + port4: gpio-4 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 64 16>; + }; + + port5: gpio-5 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 80 11>; + }; + + port6: gpio-6 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 96 16>; + }; + + port7: gpio-7 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 112 16>; + }; + + port8: gpio-8 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 128 16>; + }; + + port9: gpio-9 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 144 8>; + }; + + port10: gpio-10 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 160 16>; + }; + + port11: gpio-11 { + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 176 16>; + }; + }; + scif0: serial@e8007000 { compatible = "renesas,scif-r7s72100", "renesas,scif"; reg = <0xe8007000 64>; diff --git a/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts b/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts index 9b54783cc2a5..081af0192851 100644 --- a/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts +++ b/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts @@ -17,9 +17,40 @@ / { aliases { serial0 = &scif0; + ethernet0 = &avb; + }; +}; + +&pfc { + scif0_pins: scif0 { + groups = "scif0_data_d"; + function = "scif0"; + }; + + avb_pins: avb { + groups = "avb_mdio", "avb_gmii"; + function = "avb"; }; }; &scif0 { + pinctrl-0 = <&scif0_pins>; + pinctrl-names = "default"; + status = "okay"; }; + +&avb { + pinctrl-0 = <&avb_pins>; + pinctrl-names = "default"; + + phy-handle = <&phy3>; + phy-mode = "gmii"; + renesas,no-ether-link; + status = "okay"; + + phy3: ethernet-phy@3 { + reg = <3>; + micrel,led-mode = <1>; + }; +}; diff --git a/arch/arm/boot/dts/r8a7743-iwg20m.dtsi b/arch/arm/boot/dts/r8a7743-iwg20m.dtsi index 001ca9144f4b..ff7993818637 100644 --- a/arch/arm/boot/dts/r8a7743-iwg20m.dtsi +++ b/arch/arm/boot/dts/r8a7743-iwg20m.dtsi @@ -22,8 +22,34 @@ memory@200000000 { device_type = "memory"; reg = <2 0x00000000 0 0x20000000>; }; + + reg_3p3v: 3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; }; &extal_clk { clock-frequency = <20000000>; }; + +&pfc { + mmcif0_pins: mmc { + groups = "mmc_data8_b", "mmc_ctrl"; + function = "mmc"; + }; +}; + +&mmcif0 { + pinctrl-0 = <&mmcif0_pins>; + pinctrl-names = "default"; + + vmmc-supply = <®_3p3v>; + bus-width = <8>; + non-removable; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts index 3a22538208f2..3d918d106593 100644 --- a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts +++ b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts @@ -1,7 +1,7 @@ /* * Device Tree Source for the SK-RZG1M board * - * Copyright (C) 2016 Cogent Embedded, Inc. + * Copyright (C) 2016-2017 Cogent Embedded, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -39,11 +39,34 @@ &extal_clk { clock-frequency = <20000000>; }; +&pfc { + scif0_pins: scif0 { + groups = "scif0_data_d"; + function = "scif0"; + }; + + ether_pins: ether { + groups = "eth_link", "eth_mdio", "eth_rmii"; + function = "eth"; + }; + + phy1_pins: phy1 { + groups = "intc_irq0"; + function = "intc"; + }; +}; + &scif0 { + pinctrl-0 = <&scif0_pins>; + pinctrl-names = "default"; + status = "okay"; }; ðer { + pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-names = "default"; + phy-handle = <&phy1>; renesas,ether-link-active-low; status = "okay"; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 0ddac81742e4..14222c72f0e0 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -1,7 +1,7 @@ /* * Device Tree Source for the r8a7743 SoC * - * Copyright (C) 2016 Cogent Embedded Inc. + * Copyright (C) 2016-2017 Cogent Embedded Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -18,9 +18,19 @@ / { #address-cells = <2>; #size-cells = <2>; + aliases { + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c4 = &i2c4; + i2c5 = &i2c5; + }; + cpus { #address-cells = <1>; #size-cells = <0>; + enable-method = "renesas,apmu"; cpu0: cpu@0 { device_type = "cpu"; @@ -28,8 +38,26 @@ cpu0: cpu@0 { reg = <0>; clock-frequency = <1500000000>; clocks = <&cpg CPG_CORE R8A7743_CLK_Z>; + clock-latency = <300000>; /* 300 us */ power-domains = <&sysc R8A7743_PD_CA15_CPU0>; next-level-cache = <&L2_CA15>; + + /* kHz - uV - OPPs unknown yet */ + operating-points = <1500000 1000000>, + <1312500 1000000>, + <1125000 1000000>, + < 937500 1000000>, + < 750000 1000000>, + < 375000 1000000>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <1>; + clock-frequency = <1500000000>; + power-domains = <&sysc R8A7743_PD_CA15_CPU1>; + next-level-cache = <&L2_CA15>; }; L2_CA15: cache-controller-0 { @@ -48,6 +76,12 @@ soc { #size-cells = <2>; ranges; + apmu@e6152000 { + compatible = "renesas,r8a7743-apmu", "renesas,apmu"; + reg = <0 0xe6152000 0 0x188>; + cpus = <&cpu0 &cpu1>; + }; + gic: interrupt-controller@f1001000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; @@ -65,6 +99,126 @@ gic: interrupt-controller@f1001000 { resets = <&cpg 408>; }; + gpio0: gpio@e6050000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6050000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 0 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 912>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 912>; + }; + + gpio1: gpio@e6051000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6051000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 32 26>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 911>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 911>; + }; + + gpio2: gpio@e6052000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6052000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 64 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 910>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 910>; + }; + + gpio3: gpio@e6053000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6053000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 96 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 909>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 909>; + }; + + gpio4: gpio@e6054000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6054000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 128 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 908>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 908>; + }; + + gpio5: gpio@e6055000 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6055000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 160 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 907>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 907>; + }; + + gpio6: gpio@e6055400 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6055400 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 192 32>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 905>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 905>; + }; + + gpio7: gpio@e6055800 { + compatible = "renesas,gpio-r8a7743", + "renesas,gpio-rcar"; + reg = <0 0xe6055800 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 224 26>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 904>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 904>; + }; + irqc: interrupt-controller@e61c0000 { compatible = "renesas,irqc-r8a7743", "renesas,irqc"; #interrupt-cells = <2>; @@ -123,6 +277,11 @@ sysc: system-controller@e6180000 { #power-domain-cells = <1>; }; + pfc: pin-controller@e6060000 { + compatible = "renesas,pfc-r8a7743"; + reg = <0 0xe6060000 0 0x250>; + }; + dmac0: dma-controller@e6700000 { compatible = "renesas,dmac-r8a7743", "renesas,rcar-dmac"; @@ -189,6 +348,94 @@ GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH dma-channels = <15>; }; + /* The memory map in the User's Manual maps the cores to bus + * numbers + */ + i2c0: i2c@e6508000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6508000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 931>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 931>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c1: i2c@e6518000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6518000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 930>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 930>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c2: i2c@e6530000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6530000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 929>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 929>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c3: i2c@e6540000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6540000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 928>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 928>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c4: i2c@e6520000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6520000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 927>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 927>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c5: i2c@e6528000 { + /* doesn't need pinmux */ + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a7743", + "renesas,rcar-gen2-i2c"; + reg = <0 0xe6528000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 925>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 925>; + i2c-scl-internal-delay-ns = <110>; + status = "disabled"; + }; + scifa0: serial@e6c40000 { compatible = "renesas,scifa-r8a7743", "renesas,rcar-gen2-scifa", "renesas,scifa"; @@ -468,6 +715,29 @@ hscif2: serial@e62d0000 { status = "disabled"; }; + icram2: sram@e6300000 { + compatible = "mmio-sram"; + reg = <0 0xe6300000 0 0x40000>; + }; + + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7743"; reg = <0 0xee700000 0 0x400>; @@ -480,6 +750,35 @@ ether: ethernet@ee700000 { #size-cells = <0>; status = "disabled"; }; + + avb: ethernet@e6800000 { + compatible = "renesas,etheravb-r8a7743", + "renesas,etheravb-rcar-gen2"; + reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>; + interrupts = ; + clocks = <&cpg CPG_MOD 812>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 812>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + mmcif0: mmc@ee200000 { + compatible = "renesas,mmcif-r8a7743", + "renesas,sh-mmcif"; + reg = <0 0xee200000 0 0x80>; + interrupts = ; + clocks = <&cpg CPG_MOD 315>; + dmas = <&dmac0 0xd1>, <&dmac0 0xd2>, + <&dmac1 0xd1>, <&dmac1 0xd2>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 315>; + reg-io-width = <4>; + max-frequency = <97500000>; + status = "disabled"; + }; }; /* External root clock */ diff --git a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts index 97840b340197..b4d679b04ad6 100644 --- a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts +++ b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts @@ -1,7 +1,7 @@ /* * Device Tree Source for the SK-RZG1E board * - * Copyright (C) 2016 Cogent Embedded, Inc. + * Copyright (C) 2016-2017 Cogent Embedded, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -34,11 +34,34 @@ &extal_clk { clock-frequency = <20000000>; }; +&pfc { + scif2_pins: scif2 { + groups = "scif2_data"; + function = "scif2"; + }; + + ether_pins: ether { + groups = "eth_link", "eth_mdio", "eth_rmii"; + function = "eth"; + }; + + phy1_pins: phy1 { + groups = "intc_irq8"; + function = "intc"; + }; +}; + &scif2 { + pinctrl-0 = <&scif2_pins>; + pinctrl-names = "default"; + status = "okay"; }; ðer { + pinctrl-0 = <ðer_pins &phy1_pins>; + pinctrl-names = "default"; + phy-handle = <&phy1>; renesas,ether-link-active-low; status = "okay"; diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi index 2feb0084bb3b..aff90dfb8b32 100644 --- a/arch/arm/boot/dts/r8a7745.dtsi +++ b/arch/arm/boot/dts/r8a7745.dtsi @@ -1,7 +1,7 @@ /* * Device Tree Source for the r8a7745 SoC * - * Copyright (C) 2016 Cogent Embedded Inc. + * Copyright (C) 2016-2017 Cogent Embedded Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -123,6 +123,11 @@ sysc: system-controller@e6180000 { #power-domain-cells = <1>; }; + pfc: pin-controller@e6060000 { + compatible = "renesas,pfc-r8a7745"; + reg = <0 0xe6060000 0 0x11c>; + }; + dmac0: dma-controller@e6700000 { compatible = "renesas,dmac-r8a7745", "renesas,rcar-dmac"; @@ -468,6 +473,29 @@ hscif2: serial@e62d0000 { status = "disabled"; }; + icram2: sram@e6300000 { + compatible = "mmio-sram"; + reg = <0 0xe6300000 0 0x40000>; + }; + + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7745"; reg = <0 0xee700000 0 0x400>; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 2805a8608d4b..16358bf8d1db 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -830,6 +830,24 @@ hscif1: serial@e62c8000 { status = "disabled"; }; + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7790"; reg = <0 0xee700000 0 0x400>; @@ -855,7 +873,7 @@ avb: ethernet@e6800000 { }; sata0: sata@ee300000 { - compatible = "renesas,sata-r8a7790"; + compatible = "renesas,sata-r8a7790", "renesas,rcar-gen2-sata"; reg = <0 0xee300000 0 0x2000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_SATA0>; @@ -864,7 +882,7 @@ sata0: sata@ee300000 { }; sata1: sata@ee500000 { - compatible = "renesas,sata-r8a7790"; + compatible = "renesas,sata-r8a7790", "renesas,rcar-gen2-sata"; reg = <0 0xee500000 0 0x2000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_SATA1>; @@ -909,7 +927,7 @@ usb2: usb-channel@2 { }; vin0: video@e6ef0000 { - compatible = "renesas,vin-r8a7790"; + compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_VIN0>; @@ -918,7 +936,7 @@ vin0: video@e6ef0000 { }; vin1: video@e6ef1000 { - compatible = "renesas,vin-r8a7790"; + compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_VIN1>; @@ -927,7 +945,7 @@ vin1: video@e6ef1000 { }; vin2: video@e6ef2000 { - compatible = "renesas,vin-r8a7790"; + compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef2000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_VIN2>; @@ -936,7 +954,7 @@ vin2: video@e6ef2000 { }; vin3: video@e6ef3000 { - compatible = "renesas,vin-r8a7790"; + compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef3000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7790_CLK_VIN3>; diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 001e6116c47c..0ce0b278e1cb 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -642,11 +642,19 @@ adv7180: endpoint { }; }; + cec_clock: cec-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <12000000>; + }; + hdmi@39 { compatible = "adi,adv7511w"; reg = <0x39>; interrupt-parent = <&gpio3>; interrupts = <29 IRQ_TYPE_LEVEL_LOW>; + clocks = <&cec_clock>; + clock-names = "cec"; adi,input-depth = <8>; adi,input-colorspace = "rgb"; @@ -702,7 +710,7 @@ adv7612_out: endpoint { }; eeprom@50 { - compatible = "renesas,24c02"; + compatible = "renesas,24c02", "atmel,24c02"; reg = <0x50>; pagesize = <16>; }; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index bd93f699ad84..f1d1a9772153 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -890,6 +890,24 @@ hscif2: serial@e62d0000 { status = "disabled"; }; + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7791"; reg = <0 0xee700000 0 0x400>; @@ -915,7 +933,7 @@ avb: ethernet@e6800000 { }; sata0: sata@ee300000 { - compatible = "renesas,sata-r8a7791"; + compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata"; reg = <0 0xee300000 0 0x2000>; interrupts = ; clocks = <&mstp8_clks R8A7791_CLK_SATA0>; @@ -924,7 +942,7 @@ sata0: sata@ee300000 { }; sata1: sata@ee500000 { - compatible = "renesas,sata-r8a7791"; + compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata"; reg = <0 0xee500000 0 0x2000>; interrupts = ; clocks = <&mstp8_clks R8A7791_CLK_SATA1>; @@ -969,7 +987,7 @@ usb2: usb-channel@2 { }; vin0: video@e6ef0000 { - compatible = "renesas,vin-r8a7791"; + compatible = "renesas,vin-r8a7791", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7791_CLK_VIN0>; @@ -978,7 +996,7 @@ vin0: video@e6ef0000 { }; vin1: video@e6ef1000 { - compatible = "renesas,vin-r8a7791"; + compatible = "renesas,vin-r8a7791", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7791_CLK_VIN1>; @@ -987,7 +1005,7 @@ vin1: video@e6ef1000 { }; vin2: video@e6ef2000 { - compatible = "renesas,vin-r8a7791"; + compatible = "renesas,vin-r8a7791", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef2000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7791_CLK_VIN2>; diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi index 0efecb232ee5..2623f39bed2b 100644 --- a/arch/arm/boot/dts/r8a7792.dtsi +++ b/arch/arm/boot/dts/r8a7792.dtsi @@ -465,6 +465,24 @@ hscif1: serial@e62c8000 { status = "disabled"; }; + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + sdhi0: sd@ee100000 { compatible = "renesas,sdhi-r8a7792"; reg = <0 0xee100000 0 0x328>; diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi index 13b980f27bbc..497716b6fbe2 100644 --- a/arch/arm/boot/dts/r8a7793.dtsi +++ b/arch/arm/boot/dts/r8a7793.dtsi @@ -848,6 +848,24 @@ hscif2: serial@e62d0000 { status = "disabled"; }; + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7793"; reg = <0 0xee700000 0 0x400>; diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi index 7d9a81d970d8..26535414203a 100644 --- a/arch/arm/boot/dts/r8a7794.dtsi +++ b/arch/arm/boot/dts/r8a7794.dtsi @@ -588,6 +588,24 @@ hscif2: serial@e62d0000 { status = "disabled"; }; + icram0: sram@e63a0000 { + compatible = "mmio-sram"; + reg = <0 0xe63a0000 0 0x12000>; + }; + + icram1: sram@e63c0000 { + compatible = "mmio-sram"; + reg = <0 0xe63c0000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0xe63c0000 0x1000>; + + smp-sram@0 { + compatible = "renesas,smp-sram"; + reg = <0 0x10>; + }; + }; + ether: ethernet@ee700000 { compatible = "renesas,ether-r8a7794"; reg = <0 0xee700000 0 0x400>; @@ -783,7 +801,7 @@ qspi: spi@e6b10000 { }; vin0: video@e6ef0000 { - compatible = "renesas,vin-r8a7794"; + compatible = "renesas,vin-r8a7794", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7794_CLK_VIN0>; @@ -792,7 +810,7 @@ vin0: video@e6ef0000 { }; vin1: video@e6ef1000 { - compatible = "renesas,vin-r8a7794"; + compatible = "renesas,vin-r8a7794", "renesas,rcar-gen2-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&mstp8_clks R8A7794_CLK_VIN1>; diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts index 5726135b7f8a..fdb1570bc7d3 100644 --- a/arch/arm/boot/dts/rk3036-kylin.dts +++ b/arch/arm/boot/dts/rk3036-kylin.dts @@ -357,7 +357,6 @@ &sdio { keep-power-in-suspend; mmc-pwrseq = <&sdio_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; sd-uhs-sdr12; @@ -372,7 +371,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>; }; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index ec91325d3b6e..4916c65e0ace 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -287,7 +287,6 @@ emmc: dwmmc@1021c000 { fifo-depth = <0x100>; mmc-ddr-1_8v; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; resets = <&cru SRST_EMMC>; @@ -599,7 +598,7 @@ sdmmc_cmd: sdmmc-cmd { rockchip,pins = <1 15 RK_FUNC_1 &pcfg_pull_default>; }; - sdmmc_cd: sdmcc-cd { + sdmmc_cd: sdmmc-cd { rockchip,pins = <1 17 RK_FUNC_1 &pcfg_pull_default>; }; diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts index e1f5198723b2..ef1eabf2512c 100644 --- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts +++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts @@ -190,7 +190,6 @@ vcc28_cif: regulator@12 { #include "tps65910.dtsi" &mmc0 { /* sdmmc */ - num-slots = <1>; status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; @@ -202,7 +201,6 @@ &mmc0 { /* sdmmc */ }; &mmc1 { /* wifi */ - num-slots = <1>; status = "okay"; non-removable; diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts index 7ca1cf5241e0..13e285c53def 100644 --- a/arch/arm/boot/dts/rk3066a-mk808.dts +++ b/arch/arm/boot/dts/rk3066a-mk808.dts @@ -132,7 +132,6 @@ &mmc0 { bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; - num-slots = <1>; vmmc-supply = <&vcc_sd>; status = "okay"; }; @@ -141,7 +140,6 @@ &mmc1 { bus-width = <4>; disable-wp; non-removable; - num-slots = <1>; pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>; pinctrl-names = "default"; vmmc-supply = <&vcc_wifi>; diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts index 8907deaab18e..400cbf9609e3 100644 --- a/arch/arm/boot/dts/rk3066a-rayeager.dts +++ b/arch/arm/boot/dts/rk3066a-rayeager.dts @@ -185,7 +185,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>; vmmc-supply = <&vcc_emmc>; @@ -336,7 +335,6 @@ &i2c4 { &mmc0 { bus-width = <4>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; vmmc-supply = <&vcc_sd>; @@ -349,7 +347,6 @@ &mmc1 { bus-width = <4>; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>; vmmc-supply = <&vccio_wl>; diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts index 5b2a0b6885cd..8ba9e06062f3 100644 --- a/arch/arm/boot/dts/rk3188-px3-evb.dts +++ b/arch/arm/boot/dts/rk3188-px3-evb.dts @@ -89,7 +89,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>; status = "okay"; @@ -256,7 +255,6 @@ gsl1680: touchscreen@40 { }; &mmc0 { - num-slots = <1>; status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index ca0a1c4bc15c..53d6fc2fdbce 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -296,7 +296,6 @@ vcc28: REG12 { }; &mmc0 { - num-slots = <1>; status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts index 58834330a5ba..1be9daacc4f9 100644 --- a/arch/arm/boot/dts/rk3228-evb.dts +++ b/arch/arm/boot/dts/rk3228-evb.dts @@ -50,6 +50,16 @@ memory@60000000 { device_type = "memory"; reg = <0x60000000 0x40000000>; }; + + vcc_phy: vcc-phy-regulator { + compatible = "regulator-fixed"; + enable-active-high; + regulator-name = "vcc_phy"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; }; &emmc { @@ -60,6 +70,30 @@ &emmc { status = "okay"; }; +&gmac { + assigned-clocks = <&cru SCLK_MAC_SRC>; + assigned-clock-rates = <50000000>; + clock_in_out = "output"; + phy-supply = <&vcc_phy>; + phy-mode = "rmii"; + phy-handle = <&phy>; + status = "okay"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy: phy@0 { + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; + reg = <0>; + clocks = <&cru SCLK_MAC_PHY>; + resets = <&cru SRST_MACPHY>; + phy-is-integrated; + }; + }; +}; + &tsadc { status = "okay"; diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts index 1b55192b7d04..73e384585755 100644 --- a/arch/arm/boot/dts/rk3229-evb.dts +++ b/arch/arm/boot/dts/rk3229-evb.dts @@ -40,7 +40,8 @@ /dts-v1/; -#include "rk322x.dtsi" +#include +#include "rk3229.dtsi" / { model = "Rockchip RK3229 Evaluation board"; @@ -51,6 +52,15 @@ memory@60000000 { reg = <0x60000000 0x40000000>; }; + dc_12v: dc-12v-regulator { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + ext_gmac: ext_gmac { compatible = "fixed-clock"; clock-frequency = <125000000>; @@ -67,6 +77,7 @@ vcc_host: vcc-host-regulator { regulator-name = "vcc_host"; regulator-always-on; regulator-boot-on; + vin-supply = <&vcc_sys>; }; vcc_phy: vcc-phy-regulator { @@ -77,7 +88,96 @@ vcc_phy: vcc-phy-regulator { regulator-max-microvolt = <1800000>; regulator-always-on; regulator-boot-on; + vin-supply = <&vccio_1v8>; }; + + vcc_sys: vcc-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; + + vccio_1v8: vccio-1v8-regulator { + compatible = "regulator-fixed"; + regulator-name = "vccio_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + vin-supply = <&vcc_sys>; + }; + + vccio_3v3: vccio-3v3-regulator { + compatible = "regulator-fixed"; + regulator-name = "vccio_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + vin-supply = <&vcc_sys>; + }; + + vdd_arm: vdd-arm-regulator { + compatible = "pwm-regulator"; + pwms = <&pwm1 0 25000 1>; + pwm-supply = <&vcc_sys>; + regulator-name = "vdd_arm"; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1400000>; + regulator-always-on; + regulator-boot-on; + }; + + vdd_log: vdd-log-regulator { + compatible = "pwm-regulator"; + pwms = <&pwm2 0 25000 1>; + pwm-supply = <&vcc_sys>; + regulator-name = "vdd_log"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1300000>; + regulator-always-on; + regulator-boot-on; + }; + + gpio_keys { + compatible = "gpio-keys"; + autorepeat; + pinctrl-names = "default"; + pinctrl-0 = <&pwr_key>; + + power_key: power-key { + label = "GPIO Key Power"; + gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <100>; + wakeup-source; + }; + }; +}; + +&cpu0 { + cpu-supply = <&vdd_arm>; +}; + +&cpu1 { + cpu-supply = <&vdd_arm>; +}; + +&cpu2 { + cpu-supply = <&vdd_arm>; +}; + +&cpu3 { + cpu-supply = <&vdd_arm>; +}; + +&emmc { + cap-mmc-highspeed; + disable-wp; + non-removable; + status = "okay"; }; &gmac { @@ -96,7 +196,21 @@ &gmac { status = "okay"; }; +&io_domains { + status = "okay"; + + vccio1-supply = <&vccio_3v3>; + vccio2-supply = <&vccio_1v8>; + vccio4-supply = <&vccio_3v3>; +}; + &pinctrl { + keys { + pwr_key: pwr-key { + rockchip,pins = <3 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + usb { host_vbus_drv: host-vbus-drv { rockchip,pins = <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>; @@ -104,6 +218,19 @@ host_vbus_drv: host-vbus-drv { }; }; +&pwm1 { + status = "okay"; +}; + +&pwm2 { + status = "okay"; +}; + +&tsadc { + rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */ + status = "okay"; +}; + &uart2 { status = "okay"; }; diff --git a/arch/arm/boot/dts/rk3229.dtsi b/arch/arm/boot/dts/rk3229.dtsi new file mode 100644 index 000000000000..6fe6c15fc13a --- /dev/null +++ b/arch/arm/boot/dts/rk3229.dtsi @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "rk322x.dtsi" + +/ { + compatible = "rockchip,rk3229"; + + /delete-node/ opp-table0; + + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp-408000000 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <950000>; + clock-latency-ns = <40000>; + opp-suspend; + }; + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <975000>; + }; + opp-816000000 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <1000000>; + }; + opp-1008000000 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <1175000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1275000>; + }; + opp-1296000000 { + opp-hz = /bits/ 64 <1296000000>; + opp-microvolt = <1325000>; + }; + opp-1392000000 { + opp-hz = /bits/ 64 <1392000000>; + opp-microvolt = <1375000>; + }; + opp-1464000000 { + opp-hz = /bits/ 64 <1464000000>; + opp-microvolt = <1400000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index f3e4ffd9f818..06814421eed2 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -55,6 +55,7 @@ aliases { serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; + spi0 = &spi0; }; cpus { @@ -70,6 +71,7 @@ cpu0: cpu@f00 { #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; + enable-method = "psci"; }; cpu1: cpu@f01 { @@ -78,6 +80,7 @@ cpu1: cpu@f01 { reg = <0xf01>; resets = <&cru SRST_CORE1>; operating-points-v2 = <&cpu0_opp_table>; + enable-method = "psci"; }; cpu2: cpu@f02 { @@ -86,6 +89,7 @@ cpu2: cpu@f02 { reg = <0xf02>; resets = <&cru SRST_CORE2>; operating-points-v2 = <&cpu0_opp_table>; + enable-method = "psci"; }; cpu3: cpu@f03 { @@ -94,6 +98,7 @@ cpu3: cpu@f03 { reg = <0xf03>; resets = <&cru SRST_CORE3>; operating-points-v2 = <&cpu0_opp_table>; + enable-method = "psci"; }; }; @@ -151,6 +156,11 @@ arm-pmu { interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; }; + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2"; + method = "smc"; + }; + timer { compatible = "arm,armv7-timer"; arm,cpu-registers-not-fw-configured; @@ -196,6 +206,19 @@ i2s0: i2s0@100c0000 { status = "disabled"; }; + spdif: spdif@100d0000 { + compatible = "rockchip,rk3228-spdif"; + reg = <0x100d0000 0x1000>; + interrupts = ; + clocks = <&cru SCLK_SPDIF>, <&cru HCLK_SPDIF_8CH>; + clock-names = "mclk", "hclk"; + dmas = <&pdma 10>; + dma-names = "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&spdif_tx>; + status = "disabled"; + }; + i2s2: i2s2@100e0000 { compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s"; reg = <0x100e0000 0x4000>; @@ -215,6 +238,11 @@ grf: syscon@11000000 { #address-cells = <1>; #size-cells = <1>; + io_domains: io-domains { + compatible = "rockchip,rk3228-io-voltage-domain"; + status = "disabled"; + }; + u2phy0: usb2-phy@760 { compatible = "rockchip,rk3228-usb2phy"; reg = <0x0760 0x0c>; @@ -309,6 +337,23 @@ uart2: serial@11030000 { status = "disabled"; }; + efuse: efuse@11040000 { + compatible = "rockchip,rk3228-efuse"; + reg = <0x11040000 0x20>; + clocks = <&cru PCLK_EFUSE_256>; + clock-names = "pclk_efuse"; + #address-cells = <1>; + #size-cells = <1>; + + /* Data cells */ + efuse_id: id@7 { + reg = <0x7 0x10>; + }; + cpu_leakage: cpu_leakage@17 { + reg = <0x17 0x1>; + }; + }; + i2c0: i2c@11050000 { compatible = "rockchip,rk3228-i2c"; reg = <0x11050000 0x1000>; @@ -361,6 +406,19 @@ i2c3: i2c@11080000 { status = "disabled"; }; + spi0: spi@11090000 { + compatible = "rockchip,rk3228-spi"; + reg = <0x11090000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; + clock-names = "spiclk", "apb_pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0 &spi0_cs1>; + status = "disabled"; + }; + wdt: watchdog@110a0000 { compatible = "snps,dw-wdt"; reg = <0x110a0000 0x100>; @@ -500,8 +558,70 @@ tsadc: tsadc@11150000 { status = "disabled"; }; + vpu_mmu: iommu@20020800 { + compatible = "rockchip,iommu"; + reg = <0x20020800 0x100>; + interrupts = ; + interrupt-names = "vpu_mmu"; + iommu-cells = <0>; + status = "disabled"; + }; + + vdec_mmu: iommu@20030480 { + compatible = "rockchip,iommu"; + reg = <0x20030480 0x40>, <0x200304c0 0x40>; + interrupts = ; + interrupt-names = "vdec_mmu"; + iommu-cells = <0>; + status = "disabled"; + }; + + vop_mmu: iommu@20053f00 { + compatible = "rockchip,iommu"; + reg = <0x20053f00 0x100>; + interrupts = ; + interrupt-names = "vop_mmu"; + iommu-cells = <0>; + status = "disabled"; + }; + + iep_mmu: iommu@20070800 { + compatible = "rockchip,iommu"; + reg = <0x20070800 0x100>; + interrupts = ; + interrupt-names = "iep_mmu"; + iommu-cells = <0>; + status = "disabled"; + }; + + sdmmc: dwmmc@30000000 { + compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc"; + reg = <0x30000000 0x4000>; + interrupts = ; + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; + clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + fifo-depth = <0x100>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; + status = "disabled"; + }; + + sdio: dwmmc@30010000 { + compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc"; + reg = <0x30010000 0x4000>; + interrupts = ; + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; + clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + fifo-depth = <0x100>; + pinctrl-names = "default"; + pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; + status = "disabled"; + }; + emmc: dwmmc@30020000 { - compatible = "rockchip,rk3288-dw-mshc"; + compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc"; reg = <0x30020000 0x4000>; interrupts = ; clock-frequency = <37500000>; @@ -511,7 +631,6 @@ emmc: dwmmc@30020000 { clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; bus-width = <8>; default-sample-phase = <158>; - num-slots = <1>; fifo-depth = <0x100>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; @@ -710,6 +829,40 @@ pcfg_pull_none_drv_12ma: pcfg-pull-none-drv-12ma { drive-strength = <12>; }; + sdmmc { + sdmmc_clk: sdmmc-clk { + rockchip,pins = <1 RK_PC0 1 &pcfg_pull_none_drv_12ma>; + }; + + sdmmc_cmd: sdmmc-cmd { + rockchip,pins = <1 RK_PB7 1 &pcfg_pull_none_drv_12ma>; + }; + + sdmmc_bus4: sdmmc-bus4 { + rockchip,pins = <1 RK_PC2 1 &pcfg_pull_none_drv_12ma>, + <1 RK_PC3 1 &pcfg_pull_none_drv_12ma>, + <1 RK_PC4 1 &pcfg_pull_none_drv_12ma>, + <1 RK_PC5 1 &pcfg_pull_none_drv_12ma>; + }; + }; + + sdio { + sdio_clk: sdio-clk { + rockchip,pins = <3 RK_PA0 1 &pcfg_pull_none_drv_12ma>; + }; + + sdio_cmd: sdio-cmd { + rockchip,pins = <3 RK_PA1 1 &pcfg_pull_none_drv_12ma>; + }; + + sdio_bus4: sdio-bus4 { + rockchip,pins = <3 RK_PA2 1 &pcfg_pull_none_drv_12ma>, + <3 RK_PA3 1 &pcfg_pull_none_drv_12ma>, + <3 RK_PA4 1 &pcfg_pull_none_drv_12ma>, + <3 RK_PA5 1 &pcfg_pull_none_drv_12ma>; + }; + }; + emmc { emmc_clk: emmc-clk { rockchip,pins = <2 7 RK_FUNC_2 &pcfg_pull_none>; @@ -797,6 +950,42 @@ i2c3_xfer: i2c3-xfer { }; }; + spi-0 { + spi0_clk: spi0-clk { + rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>; + }; + spi0_cs0: spi0-cs0 { + rockchip,pins = <0 14 RK_FUNC_2 &pcfg_pull_up>; + }; + spi0_tx: spi0-tx { + rockchip,pins = <0 11 RK_FUNC_2 &pcfg_pull_up>; + }; + spi0_rx: spi0-rx { + rockchip,pins = <0 13 RK_FUNC_2 &pcfg_pull_up>; + }; + spi0_cs1: spi0-cs1 { + rockchip,pins = <1 12 RK_FUNC_1 &pcfg_pull_up>; + }; + }; + + spi-1 { + spi1_clk: spi1-clk { + rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>; + }; + spi1_cs0: spi1-cs0 { + rockchip,pins = <2 2 RK_FUNC_2 &pcfg_pull_up>; + }; + spi1_rx: spi1-rx { + rockchip,pins = <2 0 RK_FUNC_2 &pcfg_pull_up>; + }; + spi1_tx: spi1-tx { + rockchip,pins = <2 1 RK_FUNC_2 &pcfg_pull_up>; + }; + spi1_cs1: spi1-cs1 { + rockchip,pins = <2 3 RK_FUNC_2 &pcfg_pull_up>; + }; + }; + i2s1 { i2s1_bus: i2s1-bus { rockchip,pins = <0 8 RK_FUNC_1 &pcfg_pull_none>, @@ -835,6 +1024,12 @@ pwm3_pin: pwm3-pin { }; }; + spdif { + spdif_tx: spdif-tx { + rockchip,pins = <3 31 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + tsadc { otp_gpio: otp-gpio { rockchip,pins = <0 24 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi index 0dec94c3583b..39b61dce97ad 100644 --- a/arch/arm/boot/dts/rk3288-evb.dtsi +++ b/arch/arm/boot/dts/rk3288-evb.dtsi @@ -45,7 +45,44 @@ / { memory@0 { device_type = "memory"; - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; + }; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 1>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + + button-up { + label = "Volume Up"; + linux,code = ; + press-threshold-microvolt = <100000>; + }; + + button-down { + label = "Volume Down"; + linux,code = ; + press-threshold-microvolt = <300000>; + }; + + menu { + label = "Menu"; + linux,code = ; + press-threshold-microvolt = <640000>; + }; + + esc { + label = "Esc"; + linux,code = ; + press-threshold-microvolt = <1000000>; + }; + + home { + label = "Home"; + linux,code = ; + press-threshold-microvolt = <1300000>; + }; }; backlight: backlight { @@ -212,19 +249,22 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; status = "okay"; }; +&saradc { + vref-supply = <&vcc_18>; + status = "okay"; +}; + &sdmmc { bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; card-detect-delay = <200>; disable-wp; /* wp not hooked up */ - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; status = "okay"; @@ -248,6 +288,11 @@ &gmac { status = "ok"; }; +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + &hdmi { ddc-i2c-bus = <&i2c5>; status = "okay"; diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts index 61d1c1028317..41405974253a 100644 --- a/arch/arm/boot/dts/rk3288-fennec.dts +++ b/arch/arm/boot/dts/rk3288-fennec.dts @@ -47,7 +47,7 @@ / { compatible = "rockchip,rk3288-fennec", "rockchip,rk3288"; memory@0 { - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; device_type = "memory"; }; @@ -77,7 +77,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; status = "okay"; @@ -99,6 +98,11 @@ &gmac { status = "okay"; }; +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + &hdmi { status = "okay"; }; diff --git a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi index 813496618d08..5f05815f47e0 100644 --- a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi +++ b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi @@ -47,7 +47,7 @@ / { memory@0 { device_type = "memory"; - reg = <0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; ext_gmac: external-gmac-clock { @@ -78,7 +78,6 @@ &emmc { mmc-ddr-1_8v; mmc-hs200-1_8v; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>; vmmc-supply = <&vcc_io>; diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts index b11a282c334c..7da0947ababb 100644 --- a/arch/arm/boot/dts/rk3288-firefly-reload.dts +++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts @@ -269,7 +269,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>; vmmc-supply = <&vcc_sd>; @@ -284,7 +283,6 @@ &sdio0 { disable-wp; mmc-pwrseq = <&sdio_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio0_bus4>, <&sdio0_cmd>, <&sdio0_clk>, <&sdio0_int>; sd-uhs-sdr12; diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi index 32dabae12e67..b9e6f3a97240 100644 --- a/arch/arm/boot/dts/rk3288-firefly.dtsi +++ b/arch/arm/boot/dts/rk3288-firefly.dtsi @@ -46,7 +46,7 @@ / { memory@0 { device_type = "memory"; - reg = <0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; adc-keys { @@ -208,7 +208,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>; vmmc-supply = <&vcc_io>; @@ -527,7 +526,6 @@ &sdio0 { bus-width = <4>; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio0_bus4>, <&sdio0_cmd>, <&sdio0_clk>; vmmc-supply = <&vbat_wl>; @@ -541,7 +539,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>; vmmc-supply = <&vcc_sd>; diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts index 30e93f694ae8..4d923aa6ed11 100644 --- a/arch/arm/boot/dts/rk3288-miqi.dts +++ b/arch/arm/boot/dts/rk3288-miqi.dts @@ -54,7 +54,7 @@ chosen { memory@0 { device_type = "memory"; - reg = <0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; ext_gmac: external-gmac-clock { @@ -126,7 +126,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>; vmmc-supply = <&vcc_io>; @@ -404,7 +403,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>; vmmc-supply = <&vcc_sd>; diff --git a/arch/arm/boot/dts/rk3288-phycore-rdk.dts b/arch/arm/boot/dts/rk3288-phycore-rdk.dts index 3dda79579b51..1241cbcfc16f 100644 --- a/arch/arm/boot/dts/rk3288-phycore-rdk.dts +++ b/arch/arm/boot/dts/rk3288-phycore-rdk.dts @@ -263,7 +263,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vdd_io_sd>; diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi index 26cd3ad45160..99cfae875e12 100644 --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi @@ -55,7 +55,7 @@ / { */ memory { device_type = "memory"; - reg = <0 0x8000000>; + reg = <0x0 0x0 0x0 0x8000000>; }; aliases { @@ -136,7 +136,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; vmmc-supply = <&vdd_3v3_io>; diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts index aa1f9ecff231..f084e0c8dcb3 100644 --- a/arch/arm/boot/dts/rk3288-popmetal.dts +++ b/arch/arm/boot/dts/rk3288-popmetal.dts @@ -50,7 +50,7 @@ / { memory@0 { device_type = "memory"; - reg = <0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; ext_gmac: external-gmac-clock { @@ -150,7 +150,6 @@ &emmc { mmc-ddr-1_8v; mmc-hs200-1_8v; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; vmmc-supply = <&vcc_io>; @@ -164,7 +163,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; /* wp not hooked up */ - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; sd-uhs-sdr12; diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts index 1145b62edde7..e95215c9788b 100644 --- a/arch/arm/boot/dts/rk3288-r89.dts +++ b/arch/arm/boot/dts/rk3288-r89.dts @@ -50,7 +50,7 @@ / { memory@0 { device_type = "memory"; - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; ext_gmac: external-gmac-clock { @@ -354,7 +354,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc_sdmmc>; diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi index 749a9b86e6e2..b9c471fcbd42 100644 --- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi +++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi @@ -43,7 +43,7 @@ / { memory@0 { - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; device_type = "memory"; }; @@ -89,7 +89,6 @@ &emmc { cap-mmc-highspeed; disable-wp; non-removable; - num-slots = <1>; mmc-pwrseq = <&emmc_pwrseq>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts index 8ed25e9f60bc..0e084b8a86ac 100644 --- a/arch/arm/boot/dts/rk3288-rock2-square.dts +++ b/arch/arm/boot/dts/rk3288-rock2-square.dts @@ -147,7 +147,6 @@ &sdio0 { disable-wp; mmc-pwrseq = <&sdio_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk &sdio0_int>; vmmc-supply = <&vcc_io>; @@ -161,7 +160,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; /* wp not hooked up */ - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc_sd>; diff --git a/arch/arm/boot/dts/rk3288-tinker.dts b/arch/arm/boot/dts/rk3288-tinker.dts index f601c78386a9..346b0d8b474d 100644 --- a/arch/arm/boot/dts/rk3288-tinker.dts +++ b/arch/arm/boot/dts/rk3288-tinker.dts @@ -50,7 +50,7 @@ / { compatible = "asus,rk3288-tinker", "rockchip,rk3288"; memory { - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; device_type = "memory"; }; @@ -156,6 +156,11 @@ &gmac { status = "ok"; }; +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + &hdmi { ddc-i2c-bus = <&i2c5>; status = "okay"; @@ -465,7 +470,6 @@ &sdmmc { cap-sd-highspeed; card-detect-delay = <200>; disable-wp; /* wp not hooked up */ - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; status = "okay"; diff --git a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi index aef07101e9ab..95e9bee8bca2 100644 --- a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi @@ -117,7 +117,6 @@ &sdmmc { card-detect-delay = <200>; cd-gpios = <&gpio7 RK_PA5 GPIO_ACTIVE_LOW>; rockchip,default-sample-phase = <90>; - num-slots = <1>; sd-uhs-sdr12; sd-uhs-sdr25; sd-uhs-sdr50; diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index d709fa1847f9..6e5bd8974f22 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -49,7 +49,7 @@ / { memory@0 { device_type = "memory"; - reg = <0x0 0x80000000>; + reg = <0x0 0x0 0x0 0x80000000>; }; gpio_keys: gpio-keys { @@ -156,7 +156,6 @@ &emmc { mmc-hs200-1_8v; mmc-pwrseq = <&emmc_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; }; @@ -372,7 +371,6 @@ &sdio0 { keep-power-in-suspend; mmc-pwrseq = <&sdio_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>; sd-uhs-sdr12; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 858e1fed762a..356ed1e62452 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -49,8 +49,8 @@ #include / { - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; compatible = "rockchip,rk3288"; @@ -139,13 +139,13 @@ cpu3: cpu@503 { amba { compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; dmac_peri: dma-controller@ff250000 { compatible = "arm,pl330", "arm,primecell"; - reg = <0xff250000 0x4000>; + reg = <0x0 0xff250000 0x0 0x4000>; interrupts = , ; #dma-cells = <1>; @@ -156,7 +156,7 @@ dmac_peri: dma-controller@ff250000 { dmac_bus_ns: dma-controller@ff600000 { compatible = "arm,pl330", "arm,primecell"; - reg = <0xff600000 0x4000>; + reg = <0x0 0xff600000 0x0 0x4000>; interrupts = , ; #dma-cells = <1>; @@ -168,7 +168,7 @@ dmac_bus_ns: dma-controller@ff600000 { dmac_bus_s: dma-controller@ffb20000 { compatible = "arm,pl330", "arm,primecell"; - reg = <0xffb20000 0x4000>; + reg = <0x0 0xffb20000 0x0 0x4000>; interrupts = , ; #dma-cells = <1>; @@ -179,8 +179,8 @@ dmac_bus_s: dma-controller@ffb20000 { }; reserved-memory { - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; /* @@ -194,7 +194,7 @@ reserved-memory { * is found. */ dma-unusable@fe000000 { - reg = <0xfe000000 0x1000000>; + reg = <0x0 0xfe000000 0x0 0x1000000>; }; }; @@ -217,7 +217,7 @@ timer { timer: timer@ff810000 { compatible = "rockchip,rk3288-timer"; - reg = <0xff810000 0x20>; + reg = <0x0 0xff810000 0x0 0x20>; interrupts = ; clocks = <&xin24m>, <&cru PCLK_TIMER>; clock-names = "timer", "pclk"; @@ -236,7 +236,7 @@ sdmmc: dwmmc@ff0c0000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; - reg = <0xff0c0000 0x4000>; + reg = <0x0 0xff0c0000 0x0 0x4000>; resets = <&cru SRST_MMC0>; reset-names = "reset"; status = "disabled"; @@ -250,7 +250,7 @@ sdio0: dwmmc@ff0d0000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; - reg = <0xff0d0000 0x4000>; + reg = <0x0 0xff0d0000 0x0 0x4000>; resets = <&cru SRST_SDIO0>; reset-names = "reset"; status = "disabled"; @@ -264,7 +264,7 @@ sdio1: dwmmc@ff0e0000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; - reg = <0xff0e0000 0x4000>; + reg = <0x0 0xff0e0000 0x0 0x4000>; resets = <&cru SRST_SDIO1>; reset-names = "reset"; status = "disabled"; @@ -278,7 +278,7 @@ emmc: dwmmc@ff0f0000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; - reg = <0xff0f0000 0x4000>; + reg = <0x0 0xff0f0000 0x0 0x4000>; resets = <&cru SRST_EMMC>; reset-names = "reset"; status = "disabled"; @@ -286,7 +286,7 @@ emmc: dwmmc@ff0f0000 { saradc: saradc@ff100000 { compatible = "rockchip,saradc"; - reg = <0xff100000 0x100>; + reg = <0x0 0xff100000 0x0 0x100>; interrupts = ; #io-channel-cells = <1>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; @@ -305,7 +305,7 @@ spi0: spi@ff110000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>; - reg = <0xff110000 0x1000>; + reg = <0x0 0xff110000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -320,7 +320,7 @@ spi1: spi@ff120000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>; - reg = <0xff120000 0x1000>; + reg = <0x0 0xff120000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -335,7 +335,7 @@ spi2: spi@ff130000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>; - reg = <0xff130000 0x1000>; + reg = <0x0 0xff130000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -343,7 +343,7 @@ spi2: spi@ff130000 { i2c1: i2c@ff140000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff140000 0x1000>; + reg = <0x0 0xff140000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -356,7 +356,7 @@ i2c1: i2c@ff140000 { i2c3: i2c@ff150000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff150000 0x1000>; + reg = <0x0 0xff150000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -369,7 +369,7 @@ i2c3: i2c@ff150000 { i2c4: i2c@ff160000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff160000 0x1000>; + reg = <0x0 0xff160000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -382,7 +382,7 @@ i2c4: i2c@ff160000 { i2c5: i2c@ff170000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff170000 0x1000>; + reg = <0x0 0xff170000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -395,7 +395,7 @@ i2c5: i2c@ff170000 { uart0: serial@ff180000 { compatible = "rockchip,rk3288-uart", "snps,dw-apb-uart"; - reg = <0xff180000 0x100>; + reg = <0x0 0xff180000 0x0 0x100>; interrupts = ; reg-shift = <2>; reg-io-width = <4>; @@ -408,7 +408,7 @@ uart0: serial@ff180000 { uart1: serial@ff190000 { compatible = "rockchip,rk3288-uart", "snps,dw-apb-uart"; - reg = <0xff190000 0x100>; + reg = <0x0 0xff190000 0x0 0x100>; interrupts = ; reg-shift = <2>; reg-io-width = <4>; @@ -421,7 +421,7 @@ uart1: serial@ff190000 { uart2: serial@ff690000 { compatible = "rockchip,rk3288-uart", "snps,dw-apb-uart"; - reg = <0xff690000 0x100>; + reg = <0x0 0xff690000 0x0 0x100>; interrupts = ; reg-shift = <2>; reg-io-width = <4>; @@ -434,7 +434,7 @@ uart2: serial@ff690000 { uart3: serial@ff1b0000 { compatible = "rockchip,rk3288-uart", "snps,dw-apb-uart"; - reg = <0xff1b0000 0x100>; + reg = <0x0 0xff1b0000 0x0 0x100>; interrupts = ; reg-shift = <2>; reg-io-width = <4>; @@ -447,7 +447,7 @@ uart3: serial@ff1b0000 { uart4: serial@ff1c0000 { compatible = "rockchip,rk3288-uart", "snps,dw-apb-uart"; - reg = <0xff1c0000 0x100>; + reg = <0x0 0xff1c0000 0x0 0x100>; interrupts = ; reg-shift = <2>; reg-io-width = <4>; @@ -535,7 +535,7 @@ map0 { tsadc: tsadc@ff280000 { compatible = "rockchip,rk3288-tsadc"; - reg = <0xff280000 0x100>; + reg = <0x0 0xff280000 0x0 0x100>; interrupts = ; clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; clock-names = "tsadc", "apb_pclk"; @@ -552,7 +552,7 @@ tsadc: tsadc@ff280000 { gmac: ethernet@ff290000 { compatible = "rockchip,rk3288-gmac"; - reg = <0xff290000 0x10000>; + reg = <0x0 0xff290000 0x0 0x10000>; interrupts = , ; interrupt-names = "macirq", "eth_wake_irq"; @@ -572,7 +572,7 @@ gmac: ethernet@ff290000 { usb_host0_ehci: usb@ff500000 { compatible = "generic-ehci"; - reg = <0xff500000 0x100>; + reg = <0x0 0xff500000 0x0 0x100>; interrupts = ; clocks = <&cru HCLK_USBHOST0>; clock-names = "usbhost"; @@ -586,7 +586,7 @@ usb_host0_ehci: usb@ff500000 { usb_host1: usb@ff540000 { compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2"; - reg = <0xff540000 0x40000>; + reg = <0x0 0xff540000 0x0 0x40000>; interrupts = ; clocks = <&cru HCLK_USBHOST1>; clock-names = "otg"; @@ -599,7 +599,7 @@ usb_host1: usb@ff540000 { usb_otg: usb@ff580000 { compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2"; - reg = <0xff580000 0x40000>; + reg = <0x0 0xff580000 0x0 0x40000>; interrupts = ; clocks = <&cru HCLK_OTG0>; clock-names = "otg"; @@ -614,7 +614,7 @@ usb_otg: usb@ff580000 { usb_hsic: usb@ff5c0000 { compatible = "generic-ehci"; - reg = <0xff5c0000 0x100>; + reg = <0x0 0xff5c0000 0x0 0x100>; interrupts = ; clocks = <&cru HCLK_HSIC>; clock-names = "usbhost"; @@ -623,7 +623,7 @@ usb_hsic: usb@ff5c0000 { i2c0: i2c@ff650000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff650000 0x1000>; + reg = <0x0 0xff650000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -636,7 +636,7 @@ i2c0: i2c@ff650000 { i2c2: i2c@ff660000 { compatible = "rockchip,rk3288-i2c"; - reg = <0xff660000 0x1000>; + reg = <0x0 0xff660000 0x0 0x1000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -649,7 +649,7 @@ i2c2: i2c@ff660000 { pwm0: pwm@ff680000 { compatible = "rockchip,rk3288-pwm"; - reg = <0xff680000 0x10>; + reg = <0x0 0xff680000 0x0 0x10>; #pwm-cells = <3>; pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; @@ -660,7 +660,7 @@ pwm0: pwm@ff680000 { pwm1: pwm@ff680010 { compatible = "rockchip,rk3288-pwm"; - reg = <0xff680010 0x10>; + reg = <0x0 0xff680010 0x0 0x10>; #pwm-cells = <3>; pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; @@ -671,7 +671,7 @@ pwm1: pwm@ff680010 { pwm2: pwm@ff680020 { compatible = "rockchip,rk3288-pwm"; - reg = <0xff680020 0x10>; + reg = <0x0 0xff680020 0x0 0x10>; #pwm-cells = <3>; pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; @@ -682,7 +682,7 @@ pwm2: pwm@ff680020 { pwm3: pwm@ff680030 { compatible = "rockchip,rk3288-pwm"; - reg = <0xff680030 0x10>; + reg = <0x0 0xff680030 0x0 0x10>; #pwm-cells = <2>; pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; @@ -693,10 +693,10 @@ pwm3: pwm@ff680030 { bus_intmem@ff700000 { compatible = "mmio-sram"; - reg = <0xff700000 0x18000>; + reg = <0x0 0xff700000 0x0 0x18000>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0xff700000 0x18000>; + ranges = <0 0x0 0xff700000 0x18000>; smp-sram@0 { compatible = "rockchip,rk3066-smp-sram"; reg = <0x00 0x10>; @@ -705,12 +705,12 @@ smp-sram@0 { sram@ff720000 { compatible = "rockchip,rk3288-pmu-sram", "mmio-sram"; - reg = <0xff720000 0x1000>; + reg = <0x0 0xff720000 0x0 0x1000>; }; pmu: power-management@ff730000 { compatible = "rockchip,rk3288-pmu", "syscon", "simple-mfd"; - reg = <0xff730000 0x100>; + reg = <0x0 0xff730000 0x0 0x100>; power: power-controller { compatible = "rockchip,rk3288-power-controller"; @@ -831,12 +831,12 @@ reboot-mode { sgrf: syscon@ff740000 { compatible = "rockchip,rk3288-sgrf", "syscon"; - reg = <0xff740000 0x1000>; + reg = <0x0 0xff740000 0x0 0x1000>; }; cru: clock-controller@ff760000 { compatible = "rockchip,rk3288-cru"; - reg = <0xff760000 0x1000>; + reg = <0x0 0xff760000 0x0 0x1000>; rockchip,grf = <&grf>; #clock-cells = <1>; #reset-cells = <1>; @@ -854,7 +854,7 @@ cru: clock-controller@ff760000 { grf: syscon@ff770000 { compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd"; - reg = <0xff770000 0x1000>; + reg = <0x0 0xff770000 0x0 0x1000>; edp_phy: edp-phy { compatible = "rockchip,rk3288-dp-phy"; @@ -903,7 +903,7 @@ usbphy2: usb-phy@348 { wdt: watchdog@ff800000 { compatible = "rockchip,rk3288-wdt", "snps,dw-wdt"; - reg = <0xff800000 0x100>; + reg = <0x0 0xff800000 0x0 0x100>; clocks = <&cru PCLK_WDT>; interrupts = ; status = "disabled"; @@ -911,7 +911,7 @@ wdt: watchdog@ff800000 { spdif: sound@ff88b0000 { compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif"; - reg = <0xff8b0000 0x10000>; + reg = <0x0 0xff8b0000 0x0 0x10000>; #sound-dai-cells = <0>; clock-names = "hclk", "mclk"; clocks = <&cru HCLK_SPDIF8CH>, <&cru SCLK_SPDIF8CH>; @@ -926,7 +926,7 @@ spdif: sound@ff88b0000 { i2s: i2s@ff890000 { compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s"; - reg = <0xff890000 0x10000>; + reg = <0x0 0xff890000 0x0 0x10000>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -943,7 +943,7 @@ i2s: i2s@ff890000 { crypto: cypto-controller@ff8a0000 { compatible = "rockchip,rk3288-crypto"; - reg = <0xff8a0000 0x4000>; + reg = <0x0 0xff8a0000 0x0 0x4000>; interrupts = ; clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; @@ -953,9 +953,28 @@ crypto: cypto-controller@ff8a0000 { status = "okay"; }; + iep_mmu: iommu@ff900800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff900800 0x0 0x40>; + interrupts = ; + interrupt-names = "iep_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + isp_mmu: iommu@ff914000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>; + interrupts = ; + interrupt-names = "isp_mmu"; + #iommu-cells = <0>; + rockchip,disable-mmu-reset; + status = "disabled"; + }; + vopb: vop@ff930000 { compatible = "rockchip,rk3288-vop"; - reg = <0xff930000 0x19c>; + reg = <0x0 0xff930000 0x0 0x19c>; interrupts = ; clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; @@ -988,7 +1007,7 @@ vopb_out_mipi: endpoint@2 { vopb_mmu: iommu@ff930300 { compatible = "rockchip,iommu"; - reg = <0xff930300 0x100>; + reg = <0x0 0xff930300 0x0 0x100>; interrupts = ; interrupt-names = "vopb_mmu"; power-domains = <&power RK3288_PD_VIO>; @@ -998,7 +1017,7 @@ vopb_mmu: iommu@ff930300 { vopl: vop@ff940000 { compatible = "rockchip,rk3288-vop"; - reg = <0xff940000 0x19c>; + reg = <0x0 0xff940000 0x0 0x19c>; interrupts = ; clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>; clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; @@ -1031,7 +1050,7 @@ vopl_out_mipi: endpoint@2 { vopl_mmu: iommu@ff940300 { compatible = "rockchip,iommu"; - reg = <0xff940300 0x100>; + reg = <0x0 0xff940300 0x0 0x100>; interrupts = ; interrupt-names = "vopl_mmu"; power-domains = <&power RK3288_PD_VIO>; @@ -1041,7 +1060,7 @@ vopl_mmu: iommu@ff940300 { mipi_dsi: mipi@ff960000 { compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"; - reg = <0xff960000 0x4000>; + reg = <0x0 0xff960000 0x0 0x4000>; interrupts = ; clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>; clock-names = "ref", "pclk"; @@ -1069,7 +1088,7 @@ mipi_in_vopl: endpoint@1 { edp: dp@ff970000 { compatible = "rockchip,rk3288-dp"; - reg = <0xff970000 0x4000>; + reg = <0x0 0xff970000 0x0 0x4000>; interrupts = ; clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>; clock-names = "dp", "pclk"; @@ -1101,7 +1120,7 @@ edp_in_vopl: endpoint@1 { hdmi: hdmi@ff980000 { compatible = "rockchip,rk3288-dw-hdmi"; - reg = <0xff980000 0x20000>; + reg = <0x0 0xff980000 0x0 0x20000>; reg-io-width = <4>; rockchip,grf = <&grf>; interrupts = ; @@ -1126,9 +1145,27 @@ hdmi_in_vopl: endpoint@1 { }; }; + vpu_mmu: iommu@ff9a0800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff9a0800 0x0 0x100>; + interrupts = ; + interrupt-names = "vpu_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + hevc_mmu: iommu@ff9c0440 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff9c0440 0x0 0x40>, <0x0 0xff9c0480 0x0 0x40>; + interrupts = ; + interrupt-names = "hevc_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + gpu: gpu@ffa30000 { compatible = "rockchip,rk3288-mali", "arm,mali-t760"; - reg = <0xffa30000 0x10000>; + reg = <0x0 0xffa30000 0x0 0x10000>; interrupts = , , ; @@ -1170,72 +1207,72 @@ opp@600000000 { qos_gpu_r: qos@ffaa0000 { compatible = "syscon"; - reg = <0xffaa0000 0x20>; + reg = <0x0 0xffaa0000 0x0 0x20>; }; qos_gpu_w: qos@ffaa0080 { compatible = "syscon"; - reg = <0xffaa0080 0x20>; + reg = <0x0 0xffaa0080 0x0 0x20>; }; qos_vio1_vop: qos@ffad0000 { compatible = "syscon"; - reg = <0xffad0000 0x20>; + reg = <0x0 0xffad0000 0x0 0x20>; }; qos_vio1_isp_w0: qos@ffad0100 { compatible = "syscon"; - reg = <0xffad0100 0x20>; + reg = <0x0 0xffad0100 0x0 0x20>; }; qos_vio1_isp_w1: qos@ffad0180 { compatible = "syscon"; - reg = <0xffad0180 0x20>; + reg = <0x0 0xffad0180 0x0 0x20>; }; qos_vio0_vop: qos@ffad0400 { compatible = "syscon"; - reg = <0xffad0400 0x20>; + reg = <0x0 0xffad0400 0x0 0x20>; }; qos_vio0_vip: qos@ffad0480 { compatible = "syscon"; - reg = <0xffad0480 0x20>; + reg = <0x0 0xffad0480 0x0 0x20>; }; qos_vio0_iep: qos@ffad0500 { compatible = "syscon"; - reg = <0xffad0500 0x20>; + reg = <0x0 0xffad0500 0x0 0x20>; }; qos_vio2_rga_r: qos@ffad0800 { compatible = "syscon"; - reg = <0xffad0800 0x20>; + reg = <0x0 0xffad0800 0x0 0x20>; }; qos_vio2_rga_w: qos@ffad0880 { compatible = "syscon"; - reg = <0xffad0880 0x20>; + reg = <0x0 0xffad0880 0x0 0x20>; }; qos_vio1_isp_r: qos@ffad0900 { compatible = "syscon"; - reg = <0xffad0900 0x20>; + reg = <0x0 0xffad0900 0x0 0x20>; }; qos_video: qos@ffae0000 { compatible = "syscon"; - reg = <0xffae0000 0x20>; + reg = <0x0 0xffae0000 0x0 0x20>; }; qos_hevc_r: qos@ffaf0000 { compatible = "syscon"; - reg = <0xffaf0000 0x20>; + reg = <0x0 0xffaf0000 0x0 0x20>; }; qos_hevc_w: qos@ffaf0080 { compatible = "syscon"; - reg = <0xffaf0080 0x20>; + reg = <0x0 0xffaf0080 0x0 0x20>; }; gic: interrupt-controller@ffc01000 { @@ -1244,16 +1281,16 @@ gic: interrupt-controller@ffc01000 { #interrupt-cells = <3>; #address-cells = <0>; - reg = <0xffc01000 0x1000>, - <0xffc02000 0x2000>, - <0xffc04000 0x2000>, - <0xffc06000 0x2000>; + reg = <0x0 0xffc01000 0x0 0x1000>, + <0x0 0xffc02000 0x0 0x2000>, + <0x0 0xffc04000 0x0 0x2000>, + <0x0 0xffc06000 0x0 0x2000>; interrupts = ; }; efuse: efuse@ffb40000 { compatible = "rockchip,rk3288-efuse"; - reg = <0xffb40000 0x20>; + reg = <0x0 0xffb40000 0x0 0x20>; #address-cells = <1>; #size-cells = <1>; clocks = <&cru PCLK_EFUSE256>; @@ -1268,13 +1305,13 @@ pinctrl: pinctrl { compatible = "rockchip,rk3288-pinctrl"; rockchip,grf = <&grf>; rockchip,pmu = <&pmu>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; gpio0: gpio0@ff750000 { compatible = "rockchip,gpio-bank"; - reg = <0xff750000 0x100>; + reg = <0x0 0xff750000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO0>; @@ -1287,7 +1324,7 @@ gpio0: gpio0@ff750000 { gpio1: gpio1@ff780000 { compatible = "rockchip,gpio-bank"; - reg = <0xff780000 0x100>; + reg = <0x0 0xff780000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO1>; @@ -1300,7 +1337,7 @@ gpio1: gpio1@ff780000 { gpio2: gpio2@ff790000 { compatible = "rockchip,gpio-bank"; - reg = <0xff790000 0x100>; + reg = <0x0 0xff790000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO2>; @@ -1313,7 +1350,7 @@ gpio2: gpio2@ff790000 { gpio3: gpio3@ff7a0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7a0000 0x100>; + reg = <0x0 0xff7a0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO3>; @@ -1326,7 +1363,7 @@ gpio3: gpio3@ff7a0000 { gpio4: gpio4@ff7b0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7b0000 0x100>; + reg = <0x0 0xff7b0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO4>; @@ -1339,7 +1376,7 @@ gpio4: gpio4@ff7b0000 { gpio5: gpio5@ff7c0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7c0000 0x100>; + reg = <0x0 0xff7c0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO5>; @@ -1352,7 +1389,7 @@ gpio5: gpio5@ff7c0000 { gpio6: gpio6@ff7d0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7d0000 0x100>; + reg = <0x0 0xff7d0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO6>; @@ -1365,7 +1402,7 @@ gpio6: gpio6@ff7d0000 { gpio7: gpio7@ff7e0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7e0000 0x100>; + reg = <0x0 0xff7e0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO7>; @@ -1378,7 +1415,7 @@ gpio7: gpio7@ff7e0000 { gpio8: gpio8@ff7f0000 { compatible = "rockchip,gpio-bank"; - reg = <0xff7f0000 0x100>; + reg = <0x0 0xff7f0000 0x0 0x100>; interrupts = ; clocks = <&cru PCLK_GPIO8>; diff --git a/arch/arm/boot/dts/rv1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts index 58cf4ac079c3..86a57f823616 100644 --- a/arch/arm/boot/dts/rv1108-evb.dts +++ b/arch/arm/boot/dts/rv1108-evb.dts @@ -54,6 +54,184 @@ memory@60000000 { chosen { stdout-path = "serial2:1500000n8"; }; + + backlight: backlight { + compatible = "pwm-backlight"; + brightness-levels = < + 0 1 2 3 4 5 6 7 + 8 9 10 11 12 13 14 15 + 16 17 18 19 20 21 22 23 + 24 25 26 27 28 29 30 31 + 32 33 34 35 36 37 38 39 + 40 41 42 43 44 45 46 47 + 48 49 50 51 52 53 54 55 + 56 57 58 59 60 61 62 63 + 64 65 66 67 68 69 70 71 + 72 73 74 75 76 77 78 79 + 80 81 82 83 84 85 86 87 + 88 89 90 91 92 93 94 95 + 96 97 98 99 100 101 102 103 + 104 105 106 107 108 109 110 111 + 112 113 114 115 116 117 118 119 + 120 121 122 123 124 125 126 127 + 128 129 130 131 132 133 134 135 + 136 137 138 139 140 141 142 143 + 144 145 146 147 148 149 150 151 + 152 153 154 155 156 157 158 159 + 160 161 162 163 164 165 166 167 + 168 169 170 171 172 173 174 175 + 176 177 178 179 180 181 182 183 + 184 185 186 187 188 189 190 191 + 192 193 194 195 196 197 198 199 + 200 201 202 203 204 205 206 207 + 208 209 210 211 212 213 214 215 + 216 217 218 219 220 221 222 223 + 224 225 226 227 228 229 230 231 + 232 233 234 235 236 237 238 239 + 240 241 242 243 244 245 246 247 + 248 249 250 251 252 253 254 255>; + default-brightness-level = <200>; + pwms = <&pwm0 0 25000 0>; + }; + + vcc_sys: vsys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vsys"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + }; +}; + +&cpu0 { + cpu-supply = <&vdd_core>; +}; + +&i2c0 { + status = "okay"; + i2c-scl-rising-time-ns = <275>; + i2c-scl-falling-time-ns = <16>; + clock-frequency = <400000>; + + rk805: pmic@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio0>; + interrupts = ; + rockchip,system-power-controller; + + vcc1-supply = <&vcc_sys>; + vcc2-supply = <&vcc_sys>; + vcc3-supply = <&vcc_sys>; + vcc4-supply = <&vcc_sys>; + vcc5-supply = <&vcc_sys>; + vcc6-supply = <&vcc_sys>; + + regulators { + vdd_core: DCDC_REG1 { + regulator-name= "vdd_core"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1500000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-enabled; + regulator-state-uv = <900000>; + }; + }; + + vdd_cam: DCDC_REG2 { + regulator-name= "vdd_cam"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <2000000>; + regulator-state-mem { + regulator-state-disabled; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name= "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-enabled; + }; + }; + + vcc_io: DCDC_REG4 { + regulator-name= "vcc_io"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-enabled; + regulator-state-uv = <3300000>; + }; + }; + + vdd_10: LDO_REG1 { + regulator-name= "vdd_10"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-disabled; + }; + }; + + vcc_18: LDO_REG2 { + regulator-name= "vcc_18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-disabled; + }; + }; + + vdd10_pmu: LDO_REG3 { + regulator-name= "vdd10_pmu"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-state-enabled; + regulator-state-uv = <1000000>; + }; + }; + }; + }; + + bma250: accelerometer@19 { + compatible = "bosch,bma250e"; + reg = <0x19>; + interrupt-parent = <&gpio0>; + interrupts = ; + }; +}; + +&pwm0 { + status = "okay"; +}; + +&sdmmc { + status = "okay"; +}; + +&u2phy { + status = "okay"; + + u2phy_host: host-port { + status = "okay"; + }; + + u2phy_otg: otg-port { + status = "okay"; + }; }; &uart0 { @@ -67,3 +245,15 @@ &uart1 { &uart2 { status = "okay"; }; + +&usb_host_ehci { + status = "okay"; +}; + +&usb_host_ohci { + status = "okay"; +}; + +&usb_otg { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi index 437098b556eb..e7cd1315db1b 100644 --- a/arch/arm/boot/dts/rv1108.dtsi +++ b/arch/arm/boot/dts/rv1108.dtsi @@ -52,6 +52,10 @@ / { interrupt-parent = <&gic>; aliases { + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -65,6 +69,33 @@ cpu0: cpu@f00 { device_type = "cpu"; compatible = "arm,cortex-a7"; reg = <0xf00>; + clocks = <&cru ARMCLK>; + operating-points-v2 = <&cpu_opp_table>; + }; + }; + + cpu_opp_table: opp_table { + compatible = "operating-points-v2"; + + opp-408000000 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <975000>; + clock-latency-ns = <40000>; + }; + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <975000>; + clock-latency-ns = <40000>; + }; + opp-816000000 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <1025000>; + clock-latency-ns = <40000>; + }; + opp-1008000000 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <1150000>; + clock-latency-ns = <40000>; }; }; @@ -154,9 +185,221 @@ uart0: serial@10230000 { status = "disabled"; }; + i2c1: i2c@10240000 { + compatible = "rockchip,rv1108-i2c"; + reg = <0x10240000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>; + clock-names = "i2c", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_xfer>; + rockchip,grf = <&grf>; + status = "disabled"; + }; + + i2c2: i2c@10250000 { + compatible = "rockchip,rv1108-i2c"; + reg = <0x10250000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>; + clock-names = "i2c", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2m1_xfer>; + rockchip,grf = <&grf>; + status = "disabled"; + }; + + i2c3: i2c@10260000 { + compatible = "rockchip,rv1108-i2c"; + reg = <0x10260000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>; + clock-names = "i2c", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c3_xfer>; + rockchip,grf = <&grf>; + status = "disabled"; + }; + + spi: spi@10270000 { + compatible = "rockchip,rv1108-spi"; + reg = <0x10270000 0x1000>; + interrupts = ; + clocks = <&cru SCLK_SPI>, <&cru PCLK_SPI>; + clock-names = "spiclk", "apb_pclk"; + dmas = <&pdma 8>, <&pdma 9>; + #dma-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + pwm4: pwm@10280000 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x10280000 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm4_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm5: pwm@10280010 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x10280010 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm5_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm6: pwm@10280020 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x10280020 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm6_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm7: pwm@10280030 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x10280030 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm7_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + grf: syscon@10300000 { - compatible = "rockchip,rv1108-grf", "syscon"; + compatible = "rockchip,rv1108-grf", "syscon", "simple-mfd"; reg = <0x10300000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + u2phy: usb2-phy@100 { + compatible = "rockchip,rv1108-usb2phy"; + reg = <0x100 0x0c>; + clocks = <&cru SCLK_USBPHY>; + clock-names = "phyclk"; + #clock-cells = <0>; + clock-output-names = "usbphy"; + rockchip,usbgrf = <&usbgrf>; + status = "disabled"; + + u2phy_otg: otg-port { + interrupts = ; + interrupt-names = "otg-mux"; + #phy-cells = <0>; + status = "disabled"; + }; + + u2phy_host: host-port { + interrupts = ; + interrupt-names = "linestate"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + }; + + watchdog: wdt@10360000 { + compatible = "snps,dw-wdt"; + reg = <0x10360000 0x100>; + interrupts = ; + clocks = <&cru PCLK_WDT>; + clock-names = "pclk_wdt"; + status = "disabled"; + }; + + adc: adc@1038c000 { + compatible = "rockchip,rv1108-saradc", "rockchip,rk3399-saradc"; + reg = <0x1038c000 0x100>; + interrupts = ; + #io-channel-cells = <1>; + clock-frequency = <1000000>; + clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; + clock-names = "saradc", "apb_pclk"; + status = "disabled"; + }; + + i2c0: i2c@20000000 { + compatible = "rockchip,rv1108-i2c"; + reg = <0x20000000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&cru SCLK_I2C0_PMU>, <&cru PCLK_I2C0_PMU>; + clock-names = "i2c", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_xfer>; + rockchip,grf = <&grf>; + status = "disabled"; + }; + + pwm0: pwm@20040000 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x20040000 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@20040010 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x20040010 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm2: pwm@20040020 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x20040020 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm2_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm3: pwm@20040030 { + compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm"; + reg = <0x20040030 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm3_pin>; + #pwm-cells = <3>; + status = "disabled"; }; pmugrf: syscon@20060000 { @@ -164,6 +407,11 @@ pmugrf: syscon@20060000 { reg = <0x20060000 0x1000>; }; + usbgrf: syscon@202a0000 { + compatible = "rockchip,rv1108-usbgrf", "syscon"; + reg = <0x202a0000 0x1000>; + }; + cru: clock-controller@20200000 { compatible = "rockchip,rv1108-cru"; reg = <0x20200000 0x1000>; @@ -174,37 +422,78 @@ cru: clock-controller@20200000 { emmc: dwmmc@30110000 { compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc"; - clock-freq-min-max = <400000 150000000>; + reg = <0x30110000 0x4000>; + interrupts = ; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; - interrupts = ; - reg = <0x30110000 0x4000>; + max-frequency = <150000000>; status = "disabled"; }; sdio: dwmmc@30120000 { compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc"; - clock-freq-min-max = <400000 150000000>; + reg = <0x30120000 0x4000>; + interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; - interrupts = ; - reg = <0x30120000 0x4000>; + max-frequency = <150000000>; status = "disabled"; }; sdmmc: dwmmc@30130000 { compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc"; - clock-freq-min-max = <400000 100000000>; + reg = <0x30130000 0x4000>; + interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; - interrupts = ; - reg = <0x30130000 0x4000>; + max-frequency = <100000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; + status = "disabled"; + }; + + usb_host_ehci: usb@30140000 { + compatible = "generic-ehci"; + reg = <0x30140000 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&u2phy>; + clock-names = "usbhost", "utmi"; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host_ohci: usb@30160000 { + compatible = "generic-ohci"; + reg = <0x30160000 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&u2phy>; + clock-names = "usbhost", "utmi"; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_otg: usb@30180000 { + compatible = "rockchip,rv1108-usb", "rockchip,rk3066-usb", + "snps,dwc2"; + reg = <0x30180000 0x40000>; + interrupts = ; + clocks = <&cru HCLK_OTG>; + clock-names = "otg"; + dr_mode = "otg"; + g-np-tx-fifo-size = <16>; + g-rx-fifo-size = <280>; + g-tx-fifo-size = <256 128 128 64 32 16>; + g-use-dma; + phys = <&u2phy_otg>; + phy-names = "usb2-phy"; status = "disabled"; }; @@ -301,6 +590,11 @@ pcfg_pull_none_drv_12ma: pcfg-pull-none-drv-12ma { drive-strength = <12>; }; + pcfg_pull_none_smt: pcfg-pull-none-smt { + bias-disable; + input-schmitt-enable; + }; + pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma { bias-pull-up; drive-strength = <8>; @@ -328,6 +622,13 @@ pcfg_input_high: pcfg-input-high { input-enable; }; + i2c0 { + i2c0_xfer: i2c0-xfer { + rockchip,pins = <0 RK_PB1 RK_FUNC_1 &pcfg_pull_none_smt>, + <0 RK_PB2 RK_FUNC_1 &pcfg_pull_none_smt>; + }; + }; + i2c1 { i2c1_xfer: i2c1-xfer { rockchip,pins = <2 RK_PD3 RK_FUNC_1 &pcfg_pull_up>, @@ -366,6 +667,54 @@ i2c3_xfer: i2c3-xfer { }; }; + pwm0 { + pwm0_pin: pwm0-pin { + rockchip,pins = <0 RK_PC5 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + + pwm1 { + pwm1_pin: pwm1-pin { + rockchip,pins = <0 RK_PC4 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + + pwm2 { + pwm2_pin: pwm2-pin { + rockchip,pins = <0 RK_PC6 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + + pwm3 { + pwm3_pin: pwm3-pin { + rockchip,pins = <0 RK_PC0 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + + pwm4 { + pwm4_pin: pwm4-pin { + rockchip,pins = <1 RK_PC1 RK_FUNC_3 &pcfg_pull_none>; + }; + }; + + pwm5 { + pwm5_pin: pwm5-pin { + rockchip,pins = <1 RK_PA7 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + + pwm6 { + pwm6_pin: pwm6-pin { + rockchip,pins = <1 RK_PB0 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + + pwm7 { + pwm7_pin: pwm7-pin { + rockchip,pins = <1 RK_PB1 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + sdmmc { sdmmc_clk: sdmmc-clk { rockchip,pins = <3 RK_PC4 RK_FUNC_1 &pcfg_pull_none_drv_4ma>; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 60e69aeacbdb..38d2216c7ead 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -416,6 +416,17 @@ hlcdc_pwm: hlcdc-pwm { }; }; + isc: isc@f0008000 { + compatible = "atmel,sama5d2-isc"; + reg = <0xf0008000 0x4000>; + interrupts = <46 IRQ_TYPE_LEVEL_HIGH 5>; + clocks = <&isc_clk>, <&iscck>, <&isc_gclk>; + clock-names = "hclock", "iscck", "gck"; + #clock-cells = <0>; + clock-output-names = "isc-mck"; + status = "disabled"; + }; + ramc0: ramc@f000c000 { compatible = "atmel,sama5d3-ddramc"; reg = <0xf000c000 0x200>; @@ -494,6 +505,24 @@ plladiv: plladivck { clocks = <&plla>; }; + audio_pll_frac: audiopll_fracck { + compatible = "atmel,sama5d2-clk-audio-pll-frac"; + #clock-cells = <0>; + clocks = <&main>; + }; + + audio_pll_pad: audiopll_padck { + compatible = "atmel,sama5d2-clk-audio-pll-pad"; + #clock-cells = <0>; + clocks = <&audio_pll_frac>; + }; + + audio_pll_pmc: audiopll_pmcck { + compatible = "atmel,sama5d2-clk-audio-pll-pmc"; + #clock-cells = <0>; + clocks = <&audio_pll_frac>; + }; + utmi: utmick { compatible = "atmel,at91sam9x5-clk-utmi"; #clock-cells = <0>; @@ -895,7 +924,7 @@ gck { #address-cells = <1>; #size-cells = <0>; interrupt-parent = <&pmc>; - clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>; + clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>; sdmmc0_gclk: sdmmc0_gclk { #clock-cells = <0>; @@ -925,6 +954,11 @@ pwm_gclk: pwm_gclk { atmel,clk-output-range = <0 83000000>; }; + isc_gclk: isc_gclk { + #clock-cells = <0>; + reg = <46>; + }; + pdmic_gclk: pdmic_gclk { #clock-cells = <0>; reg = <48>; @@ -951,9 +985,37 @@ can1_gclk: can1_gclk { reg = <57>; atmel,clk-output-range = <0 80000000>; }; + + classd_gclk: classd_gclk { + #clock-cells = <0>; + reg = <59>; + atmel,clk-output-range = <0 100000000>; + }; }; }; + qspi0: spi@f0020000 { + compatible = "atmel,sama5d2-qspi"; + reg = <0xf0020000 0x100>, <0xd0000000 0x08000000>; + reg-names = "qspi_base", "qspi_mmap"; + interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&qspi0_clk>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + qspi1: spi@f0024000 { + compatible = "atmel,sama5d2-qspi"; + reg = <0xf0024000 0x100>, <0xd8000000 0x08000000>; + reg-names = "qspi_base", "qspi_mmap"; + interrupts = <53 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&qspi1_clk>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + sha@f0028000 { compatible = "atmel,at91sam9g46-sha"; reg = <0xf0028000 0x100>; @@ -1406,6 +1468,19 @@ AT91_XDMAC_DT_PERID(28))>, status = "okay"; }; + classd: classd@fc048000 { + compatible = "atmel,sama5d2-classd"; + reg = <0xfc048000 0x100>; + interrupts = <59 IRQ_TYPE_LEVEL_HIGH 7>; + dmas = <&dma0 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | + AT91_XDMAC_DT_PERID(47))>; + dma-names = "tx"; + clocks = <&classd_clk>, <&classd_gclk>; + clock-names = "pclk", "gclk"; + status = "disabled"; + }; + can1: can@fc050000 { compatible = "bosch,m_can"; reg = <0xfc050000 0x4000>, <0x210000 0x4000>; diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi index 54bc6d3cf290..40f4ad3c34c6 100644 --- a/arch/arm/boot/dts/spear1310.dtsi +++ b/arch/arm/boot/dts/spear1310.dtsi @@ -98,6 +98,7 @@ pcie0: pcie@b1000000 { device_type = "pci"; ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; status = "disabled"; }; @@ -116,6 +117,7 @@ pcie1: pcie@b1800000 { device_type = "pci"; ranges = <0x81000000 0 0 0x90020000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x90030000 0x90030000 0 0x0ffd0000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; status = "disabled"; }; @@ -134,6 +136,7 @@ pcie2: pcie@b4000000 { device_type = "pci"; ranges = <0x81000000 0 0 0xc0020000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0xc0030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index df2232d767ed..5f347054527d 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -63,6 +63,7 @@ pcie0: pcie@b1000000 { device_type = "pci"; ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ + bus-range = <0x00 0xff>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 6c5affe2d0f5..2310a4e97768 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -37,6 +37,14 @@ CPU0: cpu@300 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x300>; + /* cpufreq controls */ + operating-points = <998400 0 + 800000 0 + 400000 0 + 200000 0>; + clocks = <&prcmu_clk PRCMU_ARMSS>; + clock-names = "cpu"; + clock-latency = <20000>; }; CPU1: cpu@301 { device_type = "cpu"; @@ -494,13 +502,6 @@ prcmu-timer-4@80157450 { reg = <0x80157450 0xC>; }; - cpufreq { - compatible = "stericsson,cpufreq-ux500"; - clocks = <&prcmu_clk PRCMU_ARMSS>; - clock-names = "armss"; - status = "disabled"; - }; - thermal@801573c0 { compatible = "stericsson,db8500-thermal"; reg = <0x801573c0 0x40>; diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi index 5882a2606ac3..3f14b4df69b4 100644 --- a/arch/arm/boot/dts/ste-hrefprev60.dtsi +++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi @@ -30,7 +30,7 @@ uart@80121000 { i2c@80004000 { tps61052@33 { - compatible = "tps61052"; + compatible = "ti,tps61052"; reg = <0x33>; }; diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts index dcda0bbefe5b..293ecb957227 100644 --- a/arch/arm/boot/dts/stm32429i-eval.dts +++ b/arch/arm/boot/dts/stm32429i-eval.dts @@ -47,6 +47,7 @@ /dts-v1/; #include "stm32f429.dtsi" +#include "stm32f429-pinctrl.dtsi" #include #include @@ -55,7 +56,7 @@ / { compatible = "st,stm32429i-eval", "st,stm32f429"; chosen { - bootargs = "root=/dev/ram rdinit=/linuxrc"; + bootargs = "root=/dev/ram"; stdout-path = "serial0:115200n8"; }; @@ -202,10 +203,8 @@ ov2640_0: endpoint { stmpe1600: stmpe1600@42 { compatible = "st,stmpe1600"; reg = <0x42>; - irq-gpio = <&gpioi 8 0>; - irq-trigger = <3>; interrupts = <8 3>; - interrupt-parent = <&exti>; + interrupt-parent = <&gpioi>; interrupt-controller; wakeup-source; diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi new file mode 100644 index 000000000000..7f3560c0211d --- /dev/null +++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi @@ -0,0 +1,343 @@ +/* + * Copyright 2017 - Alexandre Torgue + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +/ { + soc { + pinctrl: pin-controller { + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x40020000 0x3000>; + interrupt-parent = <&exti>; + st,syscfg = <&syscfg 0x8>; + pins-are-numbered; + + gpioa: gpio@40020000 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x0 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; + st,bank-name = "GPIOA"; + }; + + gpiob: gpio@40020400 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x400 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; + st,bank-name = "GPIOB"; + }; + + gpioc: gpio@40020800 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x800 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; + st,bank-name = "GPIOC"; + }; + + gpiod: gpio@40020c00 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0xc00 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; + st,bank-name = "GPIOD"; + }; + + gpioe: gpio@40021000 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x1000 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; + st,bank-name = "GPIOE"; + }; + + gpiof: gpio@40021400 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x1400 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; + st,bank-name = "GPIOF"; + }; + + gpiog: gpio@40021800 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x1800 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; + st,bank-name = "GPIOG"; + }; + + gpioh: gpio@40021c00 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x1c00 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; + st,bank-name = "GPIOH"; + }; + + gpioi: gpio@40022000 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x2000 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; + st,bank-name = "GPIOI"; + }; + + gpioj: gpio@40022400 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x2400 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; + st,bank-name = "GPIOJ"; + }; + + gpiok: gpio@40022800 { + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x2800 0x400>; + clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; + st,bank-name = "GPIOK"; + }; + + usart1_pins_a: usart1@0 { + pins1 { + pinmux = ; + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = ; + bias-disable; + }; + }; + + usart3_pins_a: usart3@0 { + pins1 { + pinmux = ; + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins2 { + pinmux = ; + bias-disable; + }; + }; + + usbotg_fs_pins_a: usbotg_fs@0 { + pins { + pinmux = , + , + ; + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + }; + + usbotg_fs_pins_b: usbotg_fs@1 { + pins { + pinmux = , + , + ; + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + }; + + usbotg_hs_pins_a: usbotg_hs@0 { + pins { + pinmux = , + , + , + , + , + , + , + , + , + , + , + ; + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + }; + + ethernet_mii: mii@0 { + pins { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + ; + slew-rate = <2>; + }; + }; + + adc3_in8_pin: adc@200 { + pins { + pinmux = ; + }; + }; + + pwm1_pins: pwm@1 { + pins { + pinmux = , + , + ; + }; + }; + + pwm3_pins: pwm@3 { + pins { + pinmux = , + ; + }; + }; + + i2c1_pins: i2c1@0 { + pins { + pinmux = , + ; + bias-disable; + drive-open-drain; + slew-rate = <3>; + }; + }; + + ltdc_pins: ltdc@0 { + pins { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + slew-rate = <2>; + }; + }; + + dcmi_pins: dcmi@0 { + pins { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + bias-disable; + drive-push-pull; + slew-rate = <3>; + }; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index ae47cde7952f..5ceb2cf3777f 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts @@ -47,6 +47,7 @@ /dts-v1/; #include "stm32f429.dtsi" +#include "stm32f429-pinctrl.dtsi" #include / { @@ -54,7 +55,7 @@ / { compatible = "st,stm32f429i-disco", "st,stm32f429"; chosen { - bootargs = "root=/dev/ram rdinit=/linuxrc"; + bootargs = "root=/dev/ram"; stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm/boot/dts/imx6ul-geam-kit.dts b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi similarity index 63% rename from arch/arm/boot/dts/imx6ul-geam-kit.dts rename to arch/arm/boot/dts/stm32f429-pinctrl.dtsi index 142e60cab65f..3e7a17d9112e 100644 --- a/arch/arm/boot/dts/imx6ul-geam-kit.dts +++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi @@ -1,6 +1,5 @@ /* - * Copyright (C) 2016 Amarula Solutions B.V. - * Copyright (C) 2016 Engicam S.r.l. + * Copyright 2017 - Alexandre Torgue * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual @@ -8,8 +7,9 @@ * whole. * * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -40,62 +40,56 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -/dts-v1/; - -#include -#include "imx6ul-geam.dtsi" +#include "stm32f4-pinctrl.dtsi" / { - model = "Engicam GEAM6UL"; - compatible = "engicam,imx6ul-geam", "fsl,imx6ul"; -}; + soc { + pinctrl: pin-controller { + compatible = "st,stm32f429-pinctrl"; -&can1 { - status = "okay"; -}; + gpioa: gpio@40020000 { + gpio-ranges = <&pinctrl 0 0 16>; + }; -&can2 { - status = "okay"; -}; + gpiob: gpio@40020400 { + gpio-ranges = <&pinctrl 0 16 16>; + }; -&lcdif { - display = <&display0>; - status = "okay"; + gpioc: gpio@40020800 { + gpio-ranges = <&pinctrl 0 32 16>; + }; - display0: display { - bits-per-pixel = <16>; - bus-width = <18>; - status = "okay"; + gpiod: gpio@40020c00 { + gpio-ranges = <&pinctrl 0 48 16>; + }; - display-timings { - native-mode = <&timing0>; - timing0: timing0 { - clock-frequency = <28000000>; - hactive = <800>; - vactive = <480>; - hfront-porch = <30>; - hback-porch = <30>; - hsync-len = <64>; - vback-porch = <5>; - vfront-porch = <5>; - vsync-len = <20>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; + gpioe: gpio@40021000 { + gpio-ranges = <&pinctrl 0 64 16>; + }; + + gpiof: gpio@40021400 { + gpio-ranges = <&pinctrl 0 80 16>; + }; + + gpiog: gpio@40021800 { + gpio-ranges = <&pinctrl 0 96 16>; + }; + + gpioh: gpio@40021c00 { + gpio-ranges = <&pinctrl 0 112 16>; + }; + + gpioi: gpio@40022000 { + gpio-ranges = <&pinctrl 0 128 16>; + }; + + gpioj: gpio@40022400 { + gpio-ranges = <&pinctrl 0 144 16>; + }; + + gpiok: gpio@40022800 { + gpio-ranges = <&pinctrl 0 160 8>; }; }; }; }; - -&usdhc1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc1>; - status = "okay"; -}; - -&tsc { - measure-delay-time = <0x1ffff>; - pre-charge-time = <0x1fff>; - status = "okay"; -}; diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index a8113dc879cf..5b36eb114ddc 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi @@ -47,7 +47,6 @@ #include "skeleton.dtsi" #include "armv7-m.dtsi" -#include #include #include @@ -361,6 +360,31 @@ i2c1: i2c@40005400 { status = "disabled"; }; + dac: dac@40007400 { + compatible = "st,stm32f4-dac-core"; + reg = <0x40007400 0x400>; + resets = <&rcc STM32F4_APB1_RESET(DAC)>; + clocks = <&rcc 0 STM32F4_APB1_CLOCK(DAC)>; + clock-names = "pclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + dac1: dac@1 { + compatible = "st,stm32-dac"; + #io-channels-cells = <1>; + reg = <1>; + status = "disabled"; + }; + + dac2: dac@2 { + compatible = "st,stm32-dac"; + #io-channels-cells = <1>; + reg = <2>; + status = "disabled"; + }; + }; + usart7: serial@40007800 { compatible = "st,stm32-usart", "st,stm32-uart"; reg = <0x40007800 0x400>; @@ -566,302 +590,6 @@ ltdc: display-controller@40016800 { status = "disabled"; }; - pinctrl: pin-controller { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,stm32f429-pinctrl"; - ranges = <0 0x40020000 0x3000>; - interrupt-parent = <&exti>; - st,syscfg = <&syscfg 0x8>; - pins-are-numbered; - - gpioa: gpio@40020000 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x0 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; - st,bank-name = "GPIOA"; - }; - - gpiob: gpio@40020400 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x400 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; - st,bank-name = "GPIOB"; - }; - - gpioc: gpio@40020800 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x800 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; - st,bank-name = "GPIOC"; - }; - - gpiod: gpio@40020c00 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0xc00 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; - st,bank-name = "GPIOD"; - }; - - gpioe: gpio@40021000 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x1000 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; - st,bank-name = "GPIOE"; - }; - - gpiof: gpio@40021400 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x1400 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; - st,bank-name = "GPIOF"; - }; - - gpiog: gpio@40021800 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x1800 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; - st,bank-name = "GPIOG"; - }; - - gpioh: gpio@40021c00 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x1c00 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; - st,bank-name = "GPIOH"; - }; - - gpioi: gpio@40022000 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x2000 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; - st,bank-name = "GPIOI"; - }; - - gpioj: gpio@40022400 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x2400 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; - st,bank-name = "GPIOJ"; - }; - - gpiok: gpio@40022800 { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - reg = <0x2800 0x400>; - clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; - st,bank-name = "GPIOK"; - }; - - usart1_pins_a: usart1@0 { - pins1 { - pinmux = ; - bias-disable; - drive-push-pull; - slew-rate = <0>; - }; - pins2 { - pinmux = ; - bias-disable; - }; - }; - - usart3_pins_a: usart3@0 { - pins1 { - pinmux = ; - bias-disable; - drive-push-pull; - slew-rate = <0>; - }; - pins2 { - pinmux = ; - bias-disable; - }; - }; - - usbotg_fs_pins_a: usbotg_fs@0 { - pins { - pinmux = , - , - ; - bias-disable; - drive-push-pull; - slew-rate = <2>; - }; - }; - - usbotg_fs_pins_b: usbotg_fs@1 { - pins { - pinmux = , - , - ; - bias-disable; - drive-push-pull; - slew-rate = <2>; - }; - }; - - usbotg_hs_pins_a: usbotg_hs@0 { - pins { - pinmux = , - , - , - , - , - , - , - , - , - , - , - ; - bias-disable; - drive-push-pull; - slew-rate = <2>; - }; - }; - - ethernet_mii: mii@0 { - pins { - pinmux = , - , - , - , - , - , - , - , - , - , - , - , - , - ; - slew-rate = <2>; - }; - }; - - adc3_in8_pin: adc@200 { - pins { - pinmux = ; - }; - }; - - pwm1_pins: pwm@1 { - pins { - pinmux = , - , - ; - }; - }; - - pwm3_pins: pwm@3 { - pins { - pinmux = , - ; - }; - }; - - i2c1_pins: i2c1@0 { - pins { - pinmux = , - ; - bias-disable; - drive-open-drain; - slew-rate = <3>; - }; - }; - - ltdc_pins: ltdc@0 { - pins { - pinmux = , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - slew-rate = <2>; - }; - }; - - dcmi_pins: dcmi@0 { - pins { - pinmux = , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - bias-disable; - drive-push-pull; - slew-rate = <3>; - }; - }; - }; - crc: crc@40023000 { compatible = "st,stm32f4-crc"; reg = <0x40023000 0x400>; diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts index 75470c34b92c..c18acbe4cf4e 100644 --- a/arch/arm/boot/dts/stm32f469-disco.dts +++ b/arch/arm/boot/dts/stm32f469-disco.dts @@ -47,13 +47,14 @@ /dts-v1/; #include "stm32f429.dtsi" +#include "stm32f469-pinctrl.dtsi" / { model = "STMicroelectronics STM32F469i-DISCO board"; compatible = "st,stm32f469i-disco", "st,stm32f469"; chosen { - bootargs = "root=/dev/ram rdinit=/linuxrc"; + bootargs = "root=/dev/ram"; stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi new file mode 100644 index 000000000000..fff542662eea --- /dev/null +++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi @@ -0,0 +1,96 @@ +/* + * Copyright 2017 - Alexandre Torgue + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "stm32f4-pinctrl.dtsi" + +/ { + soc { + pinctrl: pin-controller { + compatible = "st,stm32f469-pinctrl"; + + gpioa: gpio@40020000 { + gpio-ranges = <&pinctrl 0 0 16>; + }; + + gpiob: gpio@40020400 { + gpio-ranges = <&pinctrl 0 16 16>; + }; + + gpioc: gpio@40020800 { + gpio-ranges = <&pinctrl 0 32 16>; + }; + + gpiod: gpio@40020c00 { + gpio-ranges = <&pinctrl 0 48 16>; + }; + + gpioe: gpio@40021000 { + gpio-ranges = <&pinctrl 0 64 16>; + }; + + gpiof: gpio@40021400 { + gpio-ranges = <&pinctrl 0 80 16>; + }; + + gpiog: gpio@40021800 { + gpio-ranges = <&pinctrl 0 96 16>; + }; + + gpioh: gpio@40021c00 { + gpio-ranges = <&pinctrl 0 112 16>; + }; + + gpioi: gpio@40022000 { + gpio-ranges = <&pinctrl 0 128 16>; + }; + + gpioj: gpio@40022400 { + gpio-ranges = <&pinctrl 0 144 6>, + <&pinctrl 12 156 4>; + }; + + gpiok: gpio@40022800 { + gpio-ranges = <&pinctrl 3 163 5>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi index 4506eb97a4ab..5633860037d2 100644 --- a/arch/arm/boot/dts/stm32f746.dtsi +++ b/arch/arm/boot/dts/stm32f746.dtsi @@ -167,6 +167,15 @@ usart5: serial@40005000 { status = "disabled"; }; + cec: cec@40006c00 { + compatible = "st,stm32-cec"; + reg = <0x40006C00 0x400>; + interrupts = <94>; + clocks = <&rcc 0 STM32F7_APB1_CLOCK(CEC)>, <&rcc 1 CLK_HDMI_CEC>; + clock-names = "cec", "hdmi-cec"; + status = "disabled"; + }; + usart7: serial@40007800 { compatible = "st,stm32f7-usart", "st,stm32f7-uart"; reg = <0x40007800 0x400>; @@ -336,6 +345,15 @@ gpiok: gpio@40022800 { st,bank-name = "GPIOK"; }; + cec_pins_a: cec@0 { + pins { + pinmux = ; + slew-rate = <0>; + drive-open-drain; + bias-disable; + }; + }; + usart1_pins_a: usart1@0 { pins1 { pinmux = ; @@ -380,6 +398,39 @@ rcc: rcc@40023800 { assigned-clocks = <&rcc 1 CLK_HSE_RTC>; assigned-clock-rates = <1000000>; }; + + dma1: dma@40026000 { + compatible = "st,stm32-dma"; + reg = <0x40026000 0x400>; + interrupts = <11>, + <12>, + <13>, + <14>, + <15>, + <16>, + <17>, + <47>; + clocks = <&rcc 0 STM32F7_AHB1_CLOCK(DMA1)>; + #dma-cells = <4>; + status = "disabled"; + }; + + dma2: dma@40026400 { + compatible = "st,stm32-dma"; + reg = <0x40026400 0x400>; + interrupts = <56>, + <57>, + <58>, + <59>, + <60>, + <68>, + <69>, + <70>; + clocks = <&rcc 0 STM32F7_AHB1_CLOCK(DMA2)>; + #dma-cells = <4>; + st,mem2mem; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/stm32f769-disco.dts b/arch/arm/boot/dts/stm32f769-disco.dts index 166728aeb166..4463ca13a740 100644 --- a/arch/arm/boot/dts/stm32f769-disco.dts +++ b/arch/arm/boot/dts/stm32f769-disco.dts @@ -63,6 +63,12 @@ aliases { }; +&cec { + pinctrl-0 = <&cec_pins_a>; + pinctrl-names = "default"; + status = "okay"; +}; + &clk_hse { clock-frequency = <25000000>; }; diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi index 36a99db0a3b4..58ec2275181e 100644 --- a/arch/arm/boot/dts/stm32h743.dtsi +++ b/arch/arm/boot/dts/stm32h743.dtsi @@ -59,13 +59,11 @@ timer_clk: timer-clk { }; soc { - usart1: serial@40011000 { - compatible = "st,stm32f7-usart", "st,stm32f7-uart"; - reg = <0x40011000 0x400>; - interrupts = <37>; - status = "disabled"; + timer5: timer@40000c00 { + compatible = "st,stm32-timer"; + reg = <0x40000c00 0x400>; + interrupts = <50>; clocks = <&timer_clk>; - }; usart2: serial@40004400 { @@ -76,11 +74,124 @@ usart2: serial@40004400 { clocks = <&timer_clk>; }; - timer5: timer@40000c00 { - compatible = "st,stm32-timer"; - reg = <0x40000c00 0x400>; - interrupts = <50>; + dac: dac@40007400 { + compatible = "st,stm32h7-dac-core"; + reg = <0x40007400 0x400>; clocks = <&timer_clk>; + clock-names = "pclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + dac1: dac@1 { + compatible = "st,stm32-dac"; + #io-channels-cells = <1>; + reg = <1>; + status = "disabled"; + }; + + dac2: dac@2 { + compatible = "st,stm32-dac"; + #io-channels-cells = <1>; + reg = <2>; + status = "disabled"; + }; + }; + + usart1: serial@40011000 { + compatible = "st,stm32f7-usart", "st,stm32f7-uart"; + reg = <0x40011000 0x400>; + interrupts = <37>; + status = "disabled"; + clocks = <&timer_clk>; + + }; + + dma1: dma@40020000 { + compatible = "st,stm32-dma"; + reg = <0x40020000 0x400>; + interrupts = <11>, + <12>, + <13>, + <14>, + <15>, + <16>, + <17>, + <47>; + clocks = <&timer_clk>; + #dma-cells = <4>; + st,mem2mem; + status = "disabled"; + }; + + dma2: dma@40020400 { + compatible = "st,stm32-dma"; + reg = <0x40020400 0x400>; + interrupts = <56>, + <57>, + <58>, + <59>, + <60>, + <68>, + <69>, + <70>; + clocks = <&timer_clk>; + #dma-cells = <4>; + st,mem2mem; + status = "disabled"; + }; + + adc_12: adc@40022000 { + compatible = "st,stm32h7-adc-core"; + reg = <0x40022000 0x400>; + interrupts = <18>; + clocks = <&timer_clk>; + clock-names = "bus"; + interrupt-controller; + #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + adc1: adc@0 { + compatible = "st,stm32h7-adc"; + #io-channel-cells = <1>; + reg = <0x0>; + interrupt-parent = <&adc_12>; + interrupts = <0>; + status = "disabled"; + }; + + adc2: adc@100 { + compatible = "st,stm32h7-adc"; + #io-channel-cells = <1>; + reg = <0x100>; + interrupt-parent = <&adc_12>; + interrupts = <1>; + status = "disabled"; + }; + }; + + adc_3: adc@58026000 { + compatible = "st,stm32h7-adc-core"; + reg = <0x58026000 0x400>; + interrupts = <127>; + clocks = <&timer_clk>; + clock-names = "bus"; + interrupt-controller; + #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + adc3: adc@0 { + compatible = "st,stm32h7-adc"; + #io-channel-cells = <1>; + reg = <0x0>; + interrupt-parent = <&adc_3>; + interrupts = <0>; + status = "disabled"; + }; }; }; }; diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts index c6effbb36e4a..6c07786e7ddb 100644 --- a/arch/arm/boot/dts/stm32h743i-eval.dts +++ b/arch/arm/boot/dts/stm32h743i-eval.dts @@ -60,6 +60,24 @@ memory { aliases { serial0 = &usart1; }; + + vdda: regulator-vdda { + compatible = "regulator-fixed"; + regulator-name = "vdda"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; +}; + +&adc_12 { + vref-supply = <&vdda>; + status = "okay"; + adc1: adc@0 { + /* potentiometer */ + st,adc-channels = <0>; + status = "okay"; + }; }; &clk_hse { diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index aebc3f9dc7b6..b147cb0dc14b 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -1155,11 +1155,11 @@ rtc: rtc@01f00000 { ; }; - nmi_intc: interrupt-controller@01f00c0c { - compatible = "allwinner,sun6i-a31-sc-nmi"; + nmi_intc: interrupt-controller@1f00c00 { + compatible = "allwinner,sun6i-a31-r-intc"; interrupt-controller; #interrupt-cells = <2>; - reg = <0x01f00c0c 0x38>; + reg = <0x01f00c00 0x400>; interrupts = ; }; diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts index bb510187602c..852a0aa24dce 100644 --- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts +++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts @@ -271,6 +271,10 @@ &ac_power_supply { status = "okay"; }; +&battery_power_supply { + status = "okay"; +}; + ®_dcdc2 { regulator-always-on; regulator-min-microvolt = <1000000>; diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a8b978d0f35b..ea50dda75adc 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -519,11 +519,11 @@ rtc: rtc@01f00000 { #clock-cells = <1>; }; - nmi_intc: interrupt-controller@01f00c0c { - compatible = "allwinner,sun6i-a31-sc-nmi"; + nmi_intc: interrupt-controller@1f00c00 { + compatible = "allwinner,sun6i-a31-r-intc"; interrupt-controller; #interrupt-cells = <2>; - reg = <0x01f00c0c 0x38>; + reg = <0x01f00c00 0x400>; interrupts = ; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts index aecdeeb368ed..1f0d60afb25b 100644 --- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts +++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts @@ -43,6 +43,7 @@ /dts-v1/; #include "sun8i-a83t.dtsi" +#include "sunxi-common-regulators.dtsi" / { model = "Allwinner A83T H8Homlet Proto Dev Board v2.0"; @@ -57,8 +58,92 @@ chosen { }; }; +&ehci0 { + status = "okay"; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <®_vcc3v0>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */ + bus-width = <4>; + cd-inverted; + status = "okay"; +}; + +&mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_8bit_emmc_pins>; + vmmc-supply = <®_vcc3v0>; + bus-width = <8>; + non-removable; + cap-mmc-hw-reset; + status = "okay"; +}; + +&ohci0 { + status = "okay"; +}; + +®_usb0_vbus { + gpio = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */ + status = "okay"; +}; + +®_usb1_vbus { + gpio = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp81x: pmic@3a3 { + compatible = "x-powers,axp818", "x-powers,axp813"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; + + ac100: codec@e89 { + compatible = "x-powers,ac100"; + reg = <0xe89>; + + ac100_codec: codec { + compatible = "x-powers,ac100-codec"; + interrupt-parent = <&r_pio>; + interrupts = <0 11 IRQ_TYPE_LEVEL_LOW>; /* PL11 */ + #clock-cells = <0>; + clock-output-names = "4M_adda"; + }; + + ac100_rtc: rtc { + compatible = "x-powers,ac100-rtc"; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + clocks = <&ac100_codec>; + #clock-cells = <1>; + clock-output-names = "cko1_rtc", + "cko2_rtc", + "cko3_rtc"; + }; + }; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pb_pins>; status = "okay"; }; + +&usbphy { + usb0_vbus-supply = <®_usb0_vbus>; + usb1_vbus-supply = <®_usb1_vbus>; + status = "okay"; +}; + +&usb_otg { + dr_mode = "host"; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts new file mode 100644 index 000000000000..2bafd7e99ef7 --- /dev/null +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -0,0 +1,148 @@ +/* + * Copyright 2017 Chen-Yu Tsai + * + * Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; +#include "sun8i-a83t.dtsi" +#include "sunxi-common-regulators.dtsi" + +#include + +/ { + model = "Banana Pi BPI-M3"; + compatible = "sinovoip,bpi-m3", "allwinner,sun8i-a83t"; + + aliases { + serial0 = &uart0; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&ehci0 { + /* Terminus Tech FE 1.1s 4-port USB 2.0 hub here */ + status = "okay"; + + /* TODO GL830 USB-to-SATA bridge downstream w/ GPIO power controls */ +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <®_vcc3v3>; + bus-width = <4>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */ + cd-inverted; + status = "okay"; +}; + +&mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_8bit_emmc_pins>; + vmmc-supply = <®_vcc3v3>; + bus-width = <8>; + non-removable; + cap-mmc-hw-reset; + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp81x: pmic@3a3 { + compatible = "x-powers,axp813"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; + + ac100: codec@e89 { + compatible = "x-powers,ac100"; + reg = <0xe89>; + + ac100_codec: codec { + compatible = "x-powers,ac100-codec"; + interrupt-parent = <&r_pio>; + interrupts = <0 11 IRQ_TYPE_LEVEL_LOW>; /* PL11 */ + #clock-cells = <0>; + clock-output-names = "4M_adda"; + }; + + ac100_rtc: rtc { + compatible = "x-powers,ac100-rtc"; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + clocks = <&ac100_codec>; + #clock-cells = <1>; + clock-output-names = "cko1_rtc", + "cko2_rtc", + "cko3_rtc"; + }; + }; +}; + +®_usb1_vbus { + gpio = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */ + status = "okay"; +}; + +®_vcc3v0 { + status = "disabled"; +}; + +®_vcc5v0 { + status = "disabled"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pb_pins>; + status = "okay"; +}; + +&usbphy { + usb1_vbus-supply = <®_usb1_vbus>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index cff33454fc24..716a205c6dbb 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -44,6 +44,7 @@ /dts-v1/; #include "sun8i-a83t.dtsi" +#include "sunxi-common-regulators.dtsi" #include @@ -83,6 +84,17 @@ green { }; }; + usb-hub { + /* I2C is not connected */ + compatible = "smsc,usb3503"; + initial-mode = <1>; /* initialize in HUB mode */ + disabled-ports = <1>; + intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ + reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */ + connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ + refclk-frequency = <19200000>; + }; + sound { compatible = "simple-audio-card"; simple-audio-card,name = "On-board SPDIF"; @@ -102,6 +114,89 @@ spdif_out: spdif-out { }; }; +&ehci0 { + /* GL830 USB-to-SATA bridge here */ + status = "okay"; +}; + +&ehci1 { + /* USB3503 HSIC USB 2.0 hub here */ + status = "okay"; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <®_vcc3v3>; + bus-width = <4>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */ + cd-inverted; + status = "okay"; +}; + +&mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_8bit_emmc_pins>; + vmmc-supply = <®_vcc3v3>; + bus-width = <8>; + non-removable; + cap-mmc-hw-reset; + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp81x: pmic@3a3 { + compatible = "x-powers,axp818", "x-powers,axp813"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; + + ac100: codec@e89 { + compatible = "x-powers,ac100"; + reg = <0xe89>; + + ac100_codec: codec { + compatible = "x-powers,ac100-codec"; + interrupt-parent = <&r_pio>; + interrupts = <0 11 IRQ_TYPE_LEVEL_LOW>; /* PL11 */ + #clock-cells = <0>; + clock-output-names = "4M_adda"; + }; + + ac100_rtc: rtc { + compatible = "x-powers,ac100-rtc"; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + clocks = <&ac100_codec>; + #clock-cells = <1>; + clock-output-names = "cko1_rtc", + "cko2_rtc", + "cko3_rtc"; + }; + }; +}; + +®_usb1_vbus { + gpio = <&pio 3 29 GPIO_ACTIVE_HIGH>; /* PD29 */ + status = "okay"; +}; + +®_usb2_vbus { + gpio = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ + status = "okay"; +}; + +®_vcc3v0 { + status = "disabled"; +}; + +®_vcc5v0 { + status = "disabled"; +}; + &spdif { status = "okay"; }; @@ -111,3 +206,9 @@ &uart0 { pinctrl-0 = <&uart0_pb_pins>; status = "okay"; }; + +&usbphy { + usb1_vbus-supply = <®_usb1_vbus>; + usb2_vbus-supply = <®_usb2_vbus>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi index 19a8f4fcfab5..f996bd343e50 100644 --- a/arch/arm/boot/dts/sun8i-a83t.dtsi +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi @@ -47,6 +47,7 @@ #include #include #include +#include / { interrupt-parent = <&gic>; @@ -182,6 +183,141 @@ dma: dma-controller@1c02000 { #dma-cells = <1>; }; + mmc0: mmc@1c0f000 { + compatible = "allwinner,sun8i-a83t-mmc", + "allwinner,sun7i-a20-mmc"; + reg = <0x01c0f000 0x1000>; + clocks = <&ccu CLK_BUS_MMC0>, + <&ccu CLK_MMC0>, + <&ccu CLK_MMC0_OUTPUT>, + <&ccu CLK_MMC0_SAMPLE>; + clock-names = "ahb", + "mmc", + "output", + "sample"; + resets = <&ccu RST_BUS_MMC0>; + reset-names = "ahb"; + interrupts = ; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + mmc1: mmc@1c10000 { + compatible = "allwinner,sun8i-a83t-mmc", + "allwinner,sun7i-a20-mmc"; + reg = <0x01c10000 0x1000>; + clocks = <&ccu CLK_BUS_MMC1>, + <&ccu CLK_MMC1>, + <&ccu CLK_MMC1_OUTPUT>, + <&ccu CLK_MMC1_SAMPLE>; + clock-names = "ahb", + "mmc", + "output", + "sample"; + resets = <&ccu RST_BUS_MMC1>; + reset-names = "ahb"; + interrupts = ; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + mmc2: mmc@1c11000 { + compatible = "allwinner,sun8i-a83t-emmc"; + reg = <0x01c11000 0x1000>; + clocks = <&ccu CLK_BUS_MMC2>, + <&ccu CLK_MMC2>, + <&ccu CLK_MMC2_OUTPUT>, + <&ccu CLK_MMC2_SAMPLE>; + clock-names = "ahb", + "mmc", + "output", + "sample"; + resets = <&ccu RST_BUS_MMC2>; + reset-names = "ahb"; + interrupts = ; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + usb_otg: usb@01c19000 { + compatible = "allwinner,sun8i-a83t-musb", + "allwinner,sun8i-a33-musb"; + reg = <0x01c19000 0x0400>; + clocks = <&ccu CLK_BUS_OTG>; + resets = <&ccu RST_BUS_OTG>; + interrupts = ; + interrupt-names = "mc"; + phys = <&usbphy 0>; + phy-names = "usb"; + extcon = <&usbphy 0>; + status = "disabled"; + }; + + usbphy: phy@1c19400 { + compatible = "allwinner,sun8i-a83t-usb-phy"; + reg = <0x01c19400 0x10>, + <0x01c1a800 0x14>, + <0x01c1b800 0x14>; + reg-names = "phy_ctrl", + "pmu1", + "pmu2"; + clocks = <&ccu CLK_USB_PHY0>, + <&ccu CLK_USB_PHY1>, + <&ccu CLK_USB_HSIC>, + <&ccu CLK_USB_HSIC_12M>; + clock-names = "usb0_phy", + "usb1_phy", + "usb2_phy", + "usb2_hsic_12M"; + resets = <&ccu RST_USB_PHY0>, + <&ccu RST_USB_PHY1>, + <&ccu RST_USB_HSIC>; + reset-names = "usb0_reset", + "usb1_reset", + "usb2_reset"; + status = "disabled"; + #phy-cells = <1>; + }; + + ehci0: usb@1c1a000 { + compatible = "allwinner,sun8i-a83t-ehci", + "generic-ehci"; + reg = <0x01c1a000 0x100>; + interrupts = ; + clocks = <&ccu CLK_BUS_EHCI0>; + resets = <&ccu RST_BUS_EHCI0>; + phys = <&usbphy 1>; + phy-names = "usb"; + status = "disabled"; + }; + + ohci0: usb@1c1a400 { + compatible = "allwinner,sun8i-a83t-ohci", + "generic-ohci"; + reg = <0x01c1a400 0x100>; + interrupts = ; + clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>; + resets = <&ccu RST_BUS_OHCI0>; + phys = <&usbphy 1>; + phy-names = "usb"; + status = "disabled"; + }; + + ehci1: usb@1c1b000 { + compatible = "allwinner,sun8i-a83t-ehci", + "generic-ehci"; + reg = <0x01c1b000 0x100>; + interrupts = ; + clocks = <&ccu CLK_BUS_EHCI1>; + resets = <&ccu RST_BUS_EHCI1>; + phys = <&usbphy 2>; + phy-names = "usb"; + status = "disabled"; + }; + ccu: clock@1c20000 { compatible = "allwinner,sun8i-a83t-ccu"; reg = <0x01c20000 0x400>; @@ -212,6 +348,15 @@ mmc0_pins: mmc0-pins { bias-pull-up; }; + mmc2_8bit_emmc_pins: mmc2-8bit-emmc-pins { + pins = "PC5", "PC6", "PC8", "PC9", + "PC10", "PC11", "PC12", "PC13", + "PC14", "PC15", "PC16"; + function = "mmc2"; + drive-strength = <30>; + bias-pull-up; + }; + spdif_tx_pin: spdif-tx-pin { pins = "PE18"; function = "spdif"; @@ -281,6 +426,15 @@ gic: interrupt-controller@1c81000 { interrupts = ; }; + r_intc: interrupt-controller@1f00c00 { + compatible = "allwinner,sun8i-a83t-r-intc", + "allwinner,sun6i-a31-r-intc"; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x01f00c00 0x400>; + interrupts = ; + }; + r_ccu: clock@1f01400 { compatible = "allwinner,sun8i-a83t-r-ccu"; reg = <0x01f01400 0x400>; @@ -302,6 +456,28 @@ r_pio: pinctrl@1f02c00 { #gpio-cells = <3>; interrupt-controller; #interrupt-cells = <3>; + + r_rsb_pins: r-rsb-pins { + pins = "PL0", "PL1"; + function = "s_rsb"; + drive-strength = <20>; + bias-pull-up; + }; + }; + + r_rsb: rsb@1f03400 { + compatible = "allwinner,sun8i-a83t-rsb", + "allwinner,sun8i-a23-rsb"; + reg = <0x01f03400 0x400>; + interrupts = ; + clocks = <&r_ccu CLK_APB0_RSB>; + clock-frequency = <3000000>; + resets = <&r_ccu RST_APB0_RSB>; + pinctrl-names = "default"; + pinctrl-0 = <&r_rsb_pins>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; }; }; }; diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index e7fae65eb5d3..10da56e86ab8 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -100,6 +100,10 @@ spdif_out: spdif-out { }; }; +&ehci0 { + status = "okay"; +}; + &ehci1 { status = "okay"; }; @@ -147,10 +151,19 @@ &mmc2 { status = "okay"; }; +&ohci0 { + status = "okay"; +}; + &ohci1 { status = "okay"; }; +®_usb0_vbus { + gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */ + status = "okay"; +}; + &spdif { pinctrl-names = "default"; pinctrl-0 = <&spdif_tx_pins_a>; @@ -163,7 +176,14 @@ &uart0 { status = "okay"; }; -&usbphy { - /* USB VBUS is on as long as VCC-IO is on */ +&usb_otg { + dr_mode = "otg"; status = "okay"; }; + +&usbphy { + /* USB VBUS is always on except for the OTG port */ + status = "okay"; + usb0_id_det-gpios = <&pio 0 7 GPIO_ACTIVE_HIGH>; /* PA07 */ + usb0_vbus-supply = <®_usb0_vbus>; +}; diff --git a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts new file mode 100644 index 000000000000..eaf09666720d --- /dev/null +++ b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2017 Free Electrons + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; +#include "sun8i-a33.dtsi" + +#include + +/ { + model = "BananaPi M2 Magic"; + compatible = "sinovoip,bananapi-m2m", "allwinner,sun8i-a33"; + + aliases { + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + serial0 = &uart0; + serial1 = &uart1; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + compatible = "gpio-leds"; + + blue { + label = "bpi-m2m:blue:usr"; + gpios = <&pio 2 7 GPIO_ACTIVE_LOW>; + }; + + green { + label = "bpi-m2m:green:usr"; + gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; + }; + + red { + label = "bpi-m2m:red:power"; + gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>; + default-state = "on"; + }; + }; + + reg_vcc5v0: vcc5v0 { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + wifi_pwrseq: wifi_pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL06 */ + }; +}; + +&codec { + status = "okay"; +}; + +&cpu0 { + cpu-supply = <®_dcdc3>; +}; + +&cpu0_opp_table { + opp@1104000000 { + opp-hz = /bits/ 64 <1104000000>; + opp-microvolt = <1320000>; + clock-latency-ns = <244144>; /* 8 32k periods */ + }; + + opp@1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1320000>; + clock-latency-ns = <244144>; /* 8 32k periods */ + }; +}; + +&dai { + status = "okay"; +}; + +&ehci0 { + status = "okay"; +}; + +/* This is the i2c bus exposed on the DSI connector for the touch panel */ +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins_a>; + status = "disabled"; +}; + +/* This is the i2c bus exposed on the GPIO header */ +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins_a>; + status = "disabled"; +}; + +/* This is the i2c bus exposed on the CSI connector to control the sensor */ +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins_a>; + status = "disabled"; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins_a>; + vmmc-supply = <®_dcdc1>; + bus-width = <4>; + cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */ + cd-inverted; + status = "okay"; +}; + +&mmc1 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc1_pins_a>; + vmmc-supply = <®_aldo1>; + mmc-pwrseq = <&wifi_pwrseq>; + bus-width = <4>; + non-removable; + status = "okay"; +}; + +&mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_8bit_pins>; + vmmc-supply = <®_dcdc1>; + bus-width = <8>; + non-removable; + cap-mmc-hw-reset; + status = "okay"; +}; + +&ohci0 { + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp22x: pmic@3a3 { + compatible = "x-powers,axp223"; + reg = <0x3a3>; + interrupt-parent = <&nmi_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + eldoin-supply = <®_dcdc1>; + x-powers,drive-vbus-en; + }; +}; + +#include "axp223.dtsi" + +&ac_power_supply { + status = "okay"; +}; + +®_aldo1 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-io"; +}; + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-name = "vdd-dll"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "avcc"; +}; + +®_dc1sw { + regulator-name = "vcc-lcd"; +}; + +®_dc5ldo { + regulator-always-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <1400000>; + regulator-name = "vdd-cpus"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-3v0"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <1400000>; + regulator-name = "vdd-sys"; +}; + +®_dcdc3 { + regulator-always-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <1400000>; + regulator-name = "vdd-cpu"; +}; + +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "vcc-dram"; +}; + +/* + * Our WiFi chip needs both DLDO1 and DLDO2 to be powered at the same + * time, with the two being in sync. Since this is not really + * supported right now, just use the two as always on, and we will fix + * it later. + */ +®_dldo1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi0"; +}; + +®_dldo2 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi1"; +}; + +®_drivevbus { + regulator-name = "usb0-vbus"; + status = "okay"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; + +&sound { + status = "okay"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_b>; + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins_a>, <&uart1_pins_cts_rts_a>; + status = "okay"; +}; + +&usb_otg { + dr_mode = "otg"; + status = "okay"; +}; + +&usb_power_supply { + status = "okay"; +}; + +&usbphy { + usb0_id_det-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */ + usb0_vbus_power-supply = <&usb_power_supply>; + usb0_vbus-supply = <®_drivevbus>; + usb1_vbus-supply = <®_vcc5v0>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/tango4-smp8758.dtsi b/arch/arm/boot/dts/tango4-smp8758.dtsi index d2e65c46bcc7..eca33d568690 100644 --- a/arch/arm/boot/dts/tango4-smp8758.dtsi +++ b/arch/arm/boot/dts/tango4-smp8758.dtsi @@ -13,7 +13,6 @@ cpu0: cpu@0 { reg = <0>; clocks = <&clkgen CPU_CLK>; clock-latency = <1>; - operating-points = <1215000 0 607500 0 405000 0 243000 0 135000 0>; }; cpu1: cpu@1 { diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 1444fbd543e7..5af4dd321952 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts @@ -1122,6 +1122,16 @@ sdhci@78000600 { non-removable; }; + usb@7d000000 { + compatible = "nvidia,tegra114-udc"; + status = "okay"; + dr_mode = "peripheral"; + }; + + usb-phy@7d000000 { + status = "okay"; + }; + usb@7d008000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 7bacb2954f58..61873d642a45 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -1722,7 +1722,7 @@ usb2 { lanes { usb2-0 { - nvidia,function = "xusb"; + nvidia,function = "snps"; status = "okay"; }; @@ -1829,6 +1829,16 @@ i2s@70301100 { }; }; + usb@7d000000 { + compatible = "nvidia,tegra124-udc"; + status = "okay"; + dr_mode = "peripheral"; + }; + + usb-phy@7d000000 { + status = "okay"; + }; + /* mini-PCIe USB */ usb@7d004000 { status = "okay"; diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 1b10b14a6abd..8baf00b89efb 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -87,6 +87,7 @@ host1x@50000000 { clocks = <&tegra_car TEGRA124_CLK_HOST1X>; resets = <&tegra_car 28>; reset-names = "host1x"; + iommus = <&mc TEGRA_SWGROUP_HC>; #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index b4bfa5586c23..bfa9421fcf94 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -452,7 +452,9 @@ pmc@7000e400 { }; usb@c5000000 { + compatible = "nvidia,tegra20-udc"; status = "okay"; + dr_mode = "peripheral"; }; usb-phy@c5000000 { diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index 4f41b18d9547..3e104ddeb220 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -1927,6 +1927,16 @@ sdhci@78000600 { non-removable; }; + usb@7d000000 { + compatible = "nvidia,tegra30-udc"; + status = "okay"; + dr_mode = "peripheral"; + }; + + usb-phy@7d000000 { + status = "okay"; + }; + usb@7d004000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi index 02de56b55823..399baaa0a2ab 100644 --- a/arch/arm/boot/dts/tps65217.dtsi +++ b/arch/arm/boot/dts/tps65217.dtsi @@ -18,11 +18,14 @@ &tps { charger { compatible = "ti,tps65217-charger"; + interrupts = <0>, <1>; + interrupt-names = "USB", "AC"; status = "disabled"; }; pwrbutton { compatible = "ti,tps65217-pwrbutton"; + interrupts = <2>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/uniphier-ld4-ref.dts b/arch/arm/boot/dts/uniphier-ld4-ref.dts index 4817ebb28eb2..b3aaab354f3e 100644 --- a/arch/arm/boot/dts/uniphier-ld4-ref.dts +++ b/arch/arm/boot/dts/uniphier-ld4-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-ld4.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-ld4.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier LD4 Reference Board"; @@ -64,3 +64,7 @@ &usb0 { &usb1 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi index fb2fd9605b9d..79183db5b386 100644 --- a/arch/arm/boot/dts/uniphier-ld4.dtsi +++ b/arch/arm/boot/dts/uniphier-ld4.dtsi @@ -270,6 +270,13 @@ intc: interrupt-controller@60001000 { interrupt-controller; }; + aidet: aidet@61830000 { + compatible = "socionext,uniphier-ld4-aidet"; + reg = <0x61830000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + sysctrl@61840000 { compatible = "socionext,uniphier-ld4-sysctrl", "simple-mfd", "syscon"; @@ -285,7 +292,18 @@ sys_rst: reset { #reset-cells = <1>; }; }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5a"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand2cs>; + clocks = <&sys_clk 2>; + }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/uniphier-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ld6b-ref.dts index 96db4abc02c3..2188d114d79b 100644 --- a/arch/arm/boot/dts/uniphier-ld6b-ref.dts +++ b/arch/arm/boot/dts/uniphier-ld6b-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-ld6b.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-ld6b.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier LD6b Reference Board"; @@ -58,3 +58,7 @@ &serial2 { &i2c0 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/uniphier-ld6b.dtsi b/arch/arm/boot/dts/uniphier-ld6b.dtsi index 8b9a79731bd3..9a7b25cc8233 100644 --- a/arch/arm/boot/dts/uniphier-ld6b.dtsi +++ b/arch/arm/boot/dts/uniphier-ld6b.dtsi @@ -12,7 +12,7 @@ * The D-chip (digital chip) is the same as the PXs2 die. * Reuse the PXs2 device tree with some properties overridden. */ -/include/ "uniphier-pxs2.dtsi" +#include "uniphier-pxs2.dtsi" / { compatible = "socionext,uniphier-ld6b"; diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi index 246f35ffb638..be82cddc4072 100644 --- a/arch/arm/boot/dts/uniphier-pinctrl.dtsi +++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi @@ -4,51 +4,35 @@ * Copyright (C) 2015-2017 Socionext Inc. * Author: Masahiro Yamada * - * This file is dual-licensed: you can use it either under the terms - * of the GPL or the X11 license, at your option. Note that this dual - * licensing only applies to this file, and not this project as a - * whole. - * - * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This file is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Or, alternatively, - * - * b) Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ &pinctrl { + pinctrl_aout: aout_grp { + groups = "aout"; + function = "aout"; + }; + pinctrl_emmc: emmc_grp { groups = "emmc", "emmc_dat8"; function = "emmc"; }; + pinctrl_ether_mii: ether_mii_grp { + groups = "ether_mii"; + function = "ether_mii"; + }; + + pinctrl_ether_rgmii: ether_rgmii_grp { + groups = "ether_rgmii"; + function = "ether_rgmii"; + }; + + pinctrl_ether_rmii: ether_rmii_grp { + groups = "ether_rmii"; + function = "ether_rmii"; + }; + pinctrl_i2c0: i2c0_grp { groups = "i2c0"; function = "i2c0"; diff --git a/arch/arm/boot/dts/uniphier-pro4-ace.dts b/arch/arm/boot/dts/uniphier-pro4-ace.dts index 11690b57931c..089419cee273 100644 --- a/arch/arm/boot/dts/uniphier-pro4-ace.dts +++ b/arch/arm/boot/dts/uniphier-pro4-ace.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "uniphier-pro4.dtsi" +#include "uniphier-pro4.dtsi" / { model = "UniPhier Pro4 Ace Board"; diff --git a/arch/arm/boot/dts/uniphier-pro4-ref.dts b/arch/arm/boot/dts/uniphier-pro4-ref.dts index 4cf539245f2e..903df6348e77 100644 --- a/arch/arm/boot/dts/uniphier-pro4-ref.dts +++ b/arch/arm/boot/dts/uniphier-pro4-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-pro4.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-pro4.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier Pro4 Reference Board"; @@ -66,3 +66,7 @@ &usb2 { &usb3 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/uniphier-pro4-sanji.dts b/arch/arm/boot/dts/uniphier-pro4-sanji.dts index 2763cebcd76a..adef212b45b2 100644 --- a/arch/arm/boot/dts/uniphier-pro4-sanji.dts +++ b/arch/arm/boot/dts/uniphier-pro4-sanji.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "uniphier-pro4.dtsi" +#include "uniphier-pro4.dtsi" / { model = "UniPhier Pro4 Sanji Board"; diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi index 37400becf4ba..b3dbbd9b6e39 100644 --- a/arch/arm/boot/dts/uniphier-pro4.dtsi +++ b/arch/arm/boot/dts/uniphier-pro4.dtsi @@ -268,6 +268,13 @@ pinctrl: pinctrl { }; }; + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-pro4-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + timer@60000200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x60000200 0x20>; @@ -305,7 +312,18 @@ sys_rst: reset { #reset-cells = <1>; }; }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5a"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand>; + clocks = <&sys_clk 2>; + }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi index 9577769a0add..b026bcd42a06 100644 --- a/arch/arm/boot/dts/uniphier-pro5.dtsi +++ b/arch/arm/boot/dts/uniphier-pro5.dtsi @@ -4,43 +4,7 @@ * Copyright (C) 2015-2016 Socionext Inc. * Author: Masahiro Yamada * - * This file is dual-licensed: you can use it either under the terms - * of the GPL or the X11 license, at your option. Note that this dual - * licensing only applies to this file, and not this project as a - * whole. - * - * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This file is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Or, alternatively, - * - * b) Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ / { @@ -328,7 +292,7 @@ smpctrl@59801000 { sdctrl@59810000 { compatible = "socionext,uniphier-pro5-sdctrl", "simple-mfd", "syscon"; - reg = <0x59810000 0x800>; + reg = <0x59810000 0x400>; sd_clk: clock { compatible = "socionext,uniphier-pro5-sd-clock"; @@ -367,6 +331,13 @@ pinctrl: pinctrl { }; }; + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-pro5-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + timer@60000200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x60000200 0x20>; @@ -404,7 +375,18 @@ sys_rst: reset { #reset-cells = <1>; }; }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5b"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand2cs>; + clocks = <&sys_clk 2>; + }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts index 81560f75bfa7..7dfae2667f50 100644 --- a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts +++ b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "uniphier-pxs2.dtsi" +#include "uniphier-pxs2.dtsi" / { model = "UniPhier PXs2 Gentil Board"; diff --git a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts index dc2d0579c666..0cf615463a82 100644 --- a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts +++ b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "uniphier-pxs2.dtsi" +#include "uniphier-pxs2.dtsi" / { model = "UniPhier PXs2 Vodka Board"; diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi index bace751d4023..90b020c95083 100644 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi @@ -276,7 +276,7 @@ smpctrl@59801000 { sdctrl@59810000 { compatible = "socionext,uniphier-pxs2-sdctrl", "simple-mfd", "syscon"; - reg = <0x59810000 0x800>; + reg = <0x59810000 0x400>; sd_clk: clock { compatible = "socionext,uniphier-pxs2-sd-clock"; @@ -315,6 +315,13 @@ pinctrl: pinctrl { }; }; + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-pxs2-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + timer@60000200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x60000200 0x20>; @@ -352,7 +359,18 @@ sys_rst: reset { #reset-cells = <1>; }; }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5b"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand2cs>; + clocks = <&sys_clk 2>; + }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/uniphier-sld3-ref.dts b/arch/arm/boot/dts/uniphier-sld3-ref.dts deleted file mode 100644 index 70cda39a3dd2..000000000000 --- a/arch/arm/boot/dts/uniphier-sld3-ref.dts +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Device Tree Source for UniPhier sLD3 Reference Board - * - * Copyright (C) 2015-2016 Socionext Inc. - * Author: Masahiro Yamada - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) - */ - -/dts-v1/; -/include/ "uniphier-sld3.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" - -/ { - model = "UniPhier sLD3 Reference Board"; - compatible = "socionext,uniphier-sld3-ref", "socionext,uniphier-sld3"; - - chosen { - stdout-path = "serial0:115200n8"; - }; - - aliases { - serial0 = &serial0; - serial1 = &serial1; - serial2 = &serial2; - i2c0 = &i2c0; - i2c1 = &i2c1; - i2c2 = &i2c2; - i2c3 = &i2c3; - i2c4 = &i2c4; - }; - - memory@8000000 { - device_type = "memory"; - reg = <0x80000000 0x20000000 - 0xc0000000 0x20000000>; - }; -}; - -ðsc { - interrupts = <0 49 4>; -}; - -&serial0 { - status = "okay"; -}; - -&serial1 { - status = "okay"; -}; - -&serial2 { - status = "okay"; -}; - -&i2c0 { - status = "okay"; -}; - -&usb0 { - status = "okay"; -}; - -&usb1 { - status = "okay"; -}; - -&usb2 { - status = "okay"; -}; - -&usb3 { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/uniphier-sld3.dtsi b/arch/arm/boot/dts/uniphier-sld3.dtsi deleted file mode 100644 index 408287936613..000000000000 --- a/arch/arm/boot/dts/uniphier-sld3.dtsi +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Device Tree Source for UniPhier sLD3 SoC - * - * Copyright (C) 2015-2016 Socionext Inc. - * Author: Masahiro Yamada - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) - */ - -/ { - compatible = "socionext,uniphier-sld3"; - #address-cells = <1>; - #size-cells = <1>; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a9"; - reg = <0>; - enable-method = "psci"; - next-level-cache = <&l2>; - }; - - cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a9"; - reg = <1>; - enable-method = "psci"; - next-level-cache = <&l2>; - }; - }; - - psci { - compatible = "arm,psci-0.2"; - method = "smc"; - }; - - clocks { - refclk: ref { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <24576000>; - }; - - arm_timer_clk: arm_timer_clk { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <50000000>; - }; - }; - - soc { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - interrupt-parent = <&intc>; - - timer@20000200 { - compatible = "arm,cortex-a9-global-timer"; - reg = <0x20000200 0x20>; - interrupts = <1 11 0x304>; - clocks = <&arm_timer_clk>; - }; - - timer@20000600 { - compatible = "arm,cortex-a9-twd-timer"; - reg = <0x20000600 0x20>; - interrupts = <1 13 0x304>; - clocks = <&arm_timer_clk>; - }; - - intc: interrupt-controller@20001000 { - compatible = "arm,cortex-a9-gic"; - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x20001000 0x1000>, - <0x20000100 0x100>; - }; - - l2: l2-cache@500c0000 { - compatible = "socionext,uniphier-system-cache"; - reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, - <0x506c0000 0x400>; - interrupts = <0 174 4>, <0 175 4>; - cache-unified; - cache-size = <(512 * 1024)>; - cache-sets = <256>; - cache-line-size = <128>; - cache-level = <2>; - }; - - serial0: serial@54006800 { - compatible = "socionext,uniphier-uart"; - status = "disabled"; - reg = <0x54006800 0x40>; - interrupts = <0 33 4>; - clocks = <&sys_clk 0>; - }; - - serial1: serial@54006900 { - compatible = "socionext,uniphier-uart"; - status = "disabled"; - reg = <0x54006900 0x40>; - interrupts = <0 35 4>; - clocks = <&sys_clk 0>; - }; - - serial2: serial@54006a00 { - compatible = "socionext,uniphier-uart"; - status = "disabled"; - reg = <0x54006a00 0x40>; - interrupts = <0 37 4>; - clocks = <&sys_clk 0>; - }; - - i2c0: i2c@58400000 { - compatible = "socionext,uniphier-i2c"; - status = "disabled"; - reg = <0x58400000 0x40>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <0 41 1>; - clocks = <&sys_clk 1>; - clock-frequency = <100000>; - }; - - i2c1: i2c@58480000 { - compatible = "socionext,uniphier-i2c"; - status = "disabled"; - reg = <0x58480000 0x40>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <0 42 1>; - clocks = <&sys_clk 1>; - clock-frequency = <100000>; - }; - - i2c2: i2c@58500000 { - compatible = "socionext,uniphier-i2c"; - status = "disabled"; - reg = <0x58500000 0x40>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <0 43 1>; - clocks = <&sys_clk 1>; - clock-frequency = <100000>; - }; - - i2c3: i2c@58580000 { - compatible = "socionext,uniphier-i2c"; - status = "disabled"; - reg = <0x58580000 0x40>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <0 44 1>; - clocks = <&sys_clk 1>; - clock-frequency = <100000>; - }; - - /* chip-internal connection for DMD */ - i2c4: i2c@58600000 { - compatible = "socionext,uniphier-i2c"; - reg = <0x58600000 0x40>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <0 45 1>; - clocks = <&sys_clk 1>; - clock-frequency = <400000>; - }; - - system_bus: system-bus@58c00000 { - compatible = "socionext,uniphier-system-bus"; - status = "disabled"; - reg = <0x58c00000 0x400>; - #address-cells = <2>; - #size-cells = <1>; - }; - - smpctrl@59801000 { - compatible = "socionext,uniphier-smpctrl"; - reg = <0x59801000 0x400>; - }; - - mioctrl@59810000 { - compatible = "socionext,uniphier-sld3-mioctrl", - "simple-mfd", "syscon"; - reg = <0x59810000 0x800>; - - mio_clk: clock { - compatible = "socionext,uniphier-sld3-mio-clock"; - #clock-cells = <1>; - }; - - mio_rst: reset { - compatible = "socionext,uniphier-sld3-mio-reset"; - #reset-cells = <1>; - }; - }; - - usb0: usb@5a800100 { - compatible = "socionext,uniphier-ehci", "generic-ehci"; - status = "disabled"; - reg = <0x5a800100 0x100>; - interrupts = <0 80 4>; - clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>; - resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>, - <&mio_rst 12>; - }; - - usb1: usb@5a810100 { - compatible = "socionext,uniphier-ehci", "generic-ehci"; - status = "disabled"; - reg = <0x5a810100 0x100>; - interrupts = <0 81 4>; - clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>; - resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>, - <&mio_rst 13>; - }; - - usb2: usb@5a820100 { - compatible = "socionext,uniphier-ehci", "generic-ehci"; - status = "disabled"; - reg = <0x5a820100 0x100>; - interrupts = <0 82 4>; - clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>; - resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>, - <&mio_rst 14>; - }; - - usb3: usb@5a830100 { - compatible = "socionext,uniphier-ehci", "generic-ehci"; - status = "disabled"; - reg = <0x5a830100 0x100>; - interrupts = <0 83 4>; - clocks = <&mio_clk 7>, <&mio_clk 11>, <&mio_clk 15>; - resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 11>, - <&mio_rst 15>; - }; - - sysctrl@f1840000 { - compatible = "socionext,uniphier-sld3-sysctrl", - "simple-mfd", "syscon"; - reg = <0xf1840000 0x10000>; - - sys_clk: clock { - compatible = "socionext,uniphier-sld3-clock"; - #clock-cells = <1>; - }; - - sys_rst: reset { - compatible = "socionext,uniphier-sld3-reset"; - #reset-cells = <1>; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/uniphier-sld8-ref.dts b/arch/arm/boot/dts/uniphier-sld8-ref.dts index 4536d5b71297..5accd3cc76e4 100644 --- a/arch/arm/boot/dts/uniphier-sld8-ref.dts +++ b/arch/arm/boot/dts/uniphier-sld8-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-sld8.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-sld8.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier sLD8 Reference Board"; @@ -68,3 +68,7 @@ &usb1 { &usb2 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi index 9fb9167f2db4..b08390332971 100644 --- a/arch/arm/boot/dts/uniphier-sld8.dtsi +++ b/arch/arm/boot/dts/uniphier-sld8.dtsi @@ -270,6 +270,13 @@ intc: interrupt-controller@60001000 { interrupt-controller; }; + aidet: aidet@61830000 { + compatible = "socionext,uniphier-sld8-aidet"; + reg = <0x61830000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + sysctrl@61840000 { compatible = "socionext,uniphier-sld8-sysctrl", "simple-mfd", "syscon"; @@ -285,7 +292,18 @@ sys_rst: reset { #reset-cells = <1>; }; }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5a"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand2cs>; + clocks = <&sys_clk 2>; + }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 06e2331f666d..9abe26028c8b 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -39,7 +39,7 @@ gpio3: gpio@101e7000 { clock-names = "apb_pclk"; }; - pci-controller@10001000 { + pci@10001000 { compatible = "arm,versatile-pci"; device_type = "pci"; reg = <0x10001000 0x1000 diff --git a/arch/arm/boot/dts/zx296702-ad1.dts b/arch/arm/boot/dts/zx296702-ad1.dts index 081f980cfbe6..b0183c3a1d7c 100644 --- a/arch/arm/boot/dts/zx296702-ad1.dts +++ b/arch/arm/boot/dts/zx296702-ad1.dts @@ -18,7 +18,6 @@ memory { }; &mmc0 { - num-slots = <1>; supports-highspeed; non-removable; disable-wp; @@ -31,7 +30,6 @@ slot@0 { }; &mmc1 { - num-slots = <1>; supports-highspeed; non-removable; disable-wp; diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi index f3ac9bfe580e..0f79fe1ccd9d 100644 --- a/arch/arm/boot/dts/zynq-7000.dtsi +++ b/arch/arm/boot/dts/zynq-7000.dtsi @@ -42,6 +42,14 @@ cpu1: cpu@1 { }; }; + fpga_full: fpga-full { + compatible = "fpga-region"; + fpga-mgr = <&devcfg>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + }; + pmu@f8891000 { compatible = "arm,cortex-a9-pmu"; interrupts = <0 5 4>, <0 6 4>; diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts index 64a6390fc501..0144acfa9793 100644 --- a/arch/arm/boot/dts/zynq-parallella.dts +++ b/arch/arm/boot/dts/zynq-parallella.dts @@ -34,7 +34,7 @@ memory@0 { }; chosen { - bootargs = "earlycon root=/dev/mmcblk0p2 rootfstype=ext4 rw rootwait"; + bootargs = "root=/dev/mmcblk0p2 rootfstype=ext4 rw rootwait"; stdout-path = "serial0:115200n8"; }; }; @@ -54,6 +54,7 @@ ethernet_phy: ethernet-phy@0 { compatible = "ethernet-phy-id0141.0e90", "ethernet-phy-ieee802.3-c22"; reg = <0>; + device_type = "ethernet-phy"; marvell,reg-init = <0x3 0x10 0xff00 0x1e>, <0x3 0x11 0xfff0 0xa>; }; diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts index 0cdad2cc8b78..34e8277fce0d 100644 --- a/arch/arm/boot/dts/zynq-zc702.dts +++ b/arch/arm/boot/dts/zynq-zc702.dts @@ -12,7 +12,7 @@ * GNU General Public License for more details. */ /dts-v1/; -/include/ "zynq-7000.dtsi" +#include "zynq-7000.dtsi" / { model = "Zynq ZC702 Development Board"; @@ -30,7 +30,7 @@ memory@0 { }; chosen { - bootargs = "earlycon"; + bootargs = ""; stdout-path = "serial0:115200n8"; }; @@ -97,6 +97,7 @@ &gem0 { ethernet_phy: ethernet-phy@7 { reg = <7>; + device_type = "ethernet-phy"; }; }; @@ -131,6 +132,21 @@ si570: clock-generator@5d { }; }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + adv7511: hdmi-tx@39 { + compatible = "adi,adv7511"; + reg = <0x39>; + adi,input-depth = <8>; + adi,input-colorspace = "yuv422"; + adi,input-clock = "1x"; + adi,input-style = <3>; + adi,input-justification = "right"; + }; + }; + i2c@2 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts index ad4bb06dba25..7ebc8c5ae39d 100644 --- a/arch/arm/boot/dts/zynq-zc706.dts +++ b/arch/arm/boot/dts/zynq-zc706.dts @@ -12,7 +12,7 @@ * GNU General Public License for more details. */ /dts-v1/; -/include/ "zynq-7000.dtsi" +#include "zynq-7000.dtsi" / { model = "Zynq ZC706 Development Board"; @@ -30,7 +30,7 @@ memory@0 { }; chosen { - bootargs = "earlycon"; + bootargs = ""; stdout-path = "serial0:115200n8"; }; @@ -53,6 +53,7 @@ &gem0 { ethernet_phy: ethernet-phy@7 { reg = <7>; + device_type = "ethernet-phy"; }; }; @@ -87,6 +88,21 @@ si570: clock-generator@5d { }; }; + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + adv7511: hdmi-tx@39 { + compatible = "adi,adv7511"; + reg = <0x39>; + adi,input-depth = <8>; + adi,input-colorspace = "yuv422"; + adi,input-clock = "1x"; + adi,input-style = <3>; + adi,input-justification = "evenly"; + }; + }; + i2c@2 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts index 325379f7983c..5e44dc12fd60 100644 --- a/arch/arm/boot/dts/zynq-zed.dts +++ b/arch/arm/boot/dts/zynq-zed.dts @@ -12,7 +12,7 @@ * GNU General Public License for more details. */ /dts-v1/; -/include/ "zynq-7000.dtsi" +#include "zynq-7000.dtsi" / { model = "Zynq Zed Development Board"; @@ -29,7 +29,7 @@ memory@0 { }; chosen { - bootargs = "earlycon"; + bootargs = ""; stdout-path = "serial0:115200n8"; }; @@ -50,6 +50,7 @@ &gem0 { ethernet_phy: ethernet-phy@0 { reg = <0>; + device_type = "ethernet-phy"; }; }; diff --git a/arch/arm/boot/dts/zynq-zybo.dts b/arch/arm/boot/dts/zynq-zybo.dts index 590ec24b8749..e40cafc5ee5b 100644 --- a/arch/arm/boot/dts/zynq-zybo.dts +++ b/arch/arm/boot/dts/zynq-zybo.dts @@ -12,7 +12,7 @@ * GNU General Public License for more details. */ /dts-v1/; -/include/ "zynq-7000.dtsi" +#include "zynq-7000.dtsi" / { model = "Zynq ZYBO Development Board"; @@ -29,7 +29,7 @@ memory@0 { }; chosen { - bootargs = "earlycon"; + bootargs = ""; stdout-path = "serial0:115200n8"; }; @@ -51,6 +51,7 @@ &gem0 { ethernet_phy: ethernet-phy@0 { reg = <0>; + device_type = "ethernet-phy"; }; }; diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig index cfc2465e8b77..d23b9d56a88b 100644 --- a/arch/arm/configs/aspeed_g4_defconfig +++ b/arch/arm/configs/aspeed_g4_defconfig @@ -24,6 +24,7 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_ARCH_MULTI_V7 is not set CONFIG_ARCH_ASPEED=y CONFIG_MACH_ASPEED_G4=y +CONFIG_VMSPLIT_2G=y CONFIG_AEABI=y # CONFIG_CPU_SW_DOMAIN_PAN is not set # CONFIG_COMPACTION is not set @@ -64,6 +65,7 @@ CONFIG_MTD_UBI_FASTMAP=y CONFIG_MTD_UBI_BLOCK=y CONFIG_BLK_DEV_RAM=y CONFIG_ASPEED_LPC_CTRL=y +CONFIG_ASPEED_LPC_SNOOP=y CONFIG_EEPROM_AT24=y CONFIG_NETDEVICES=y CONFIG_NETCONSOLE=y @@ -104,6 +106,7 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_RUNTIME_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_ASPEED_VUART=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_ASPEED_BT_IPMI_BMC=y @@ -114,6 +117,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA9541=y CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_ASPEED=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_ASPEED=y @@ -166,7 +170,6 @@ CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y -CONFIG_LOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_TIMEOUT=-1 # CONFIG_SCHED_DEBUG is not set diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig index 3c20d93de389..c0ad7b82086b 100644 --- a/arch/arm/configs/aspeed_g5_defconfig +++ b/arch/arm/configs/aspeed_g5_defconfig @@ -67,6 +67,7 @@ CONFIG_MTD_UBI_FASTMAP=y CONFIG_MTD_UBI_BLOCK=y CONFIG_BLK_DEV_RAM=y CONFIG_ASPEED_LPC_CTRL=y +CONFIG_ASPEED_LPC_SNOOP=y CONFIG_EEPROM_AT24=y CONFIG_NETDEVICES=y CONFIG_NETCONSOLE=y @@ -107,6 +108,7 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_RUNTIME_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_ASPEED_VUART=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_ASPEED_BT_IPMI_BMC=y @@ -117,6 +119,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA9541=y CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_ASPEED=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_ASPEED=y @@ -169,7 +172,6 @@ CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y -CONFIG_LOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_TIMEOUT=-1 # CONFIG_SCHED_DEBUG is not set diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig index 3ee9d78c412a..43dab4890ad3 100644 --- a/arch/arm/configs/bcm2835_defconfig +++ b/arch/arm/configs/bcm2835_defconfig @@ -55,6 +55,7 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=32 CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_CONSTANTS=y @@ -62,9 +63,15 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_NETDEVICES=y CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC95XX=y +CONFIG_BRCMFMAC=m CONFIG_ZD1211RW=y CONFIG_INPUT_EVDEV=y # CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_BCM2835AUX=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_TTY_PRINTK=y diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index 06e2e2a1a9be..27d9720f7207 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -143,6 +143,8 @@ CONFIG_VIDEO_ADV7343=m CONFIG_DRM=m CONFIG_DRM_TILCDC=m CONFIG_DRM_DUMB_VGA_DAC=m +CONFIG_DRM_TINYDRM=m +CONFIG_TINYDRM_ST7586=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_DA8XX=y diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 25325ed9319e..8c2a2619971b 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -3,7 +3,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y @@ -48,7 +47,43 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_LEDS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_BCM=y +CONFIG_BT_HCIUART_QCA=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIUART_MRVL=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_NFC=y +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=y +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y +CONFIG_NFC_S3FWRN5_I2C=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y @@ -65,7 +100,9 @@ CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=m CONFIG_NETDEVICES=y CONFIG_SMSC911X=y +CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=y +CONFIG_USB_LAN78XX=m CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y CONFIG_USB_NET_SMSC95XX=y @@ -189,7 +226,25 @@ CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_EXYNOS=y +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y CONFIG_USB_HSIC_USB3503=y @@ -209,7 +264,6 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_PWM=y CONFIG_LEDS_MAX77693=y CONFIG_LEDS_MAX8997=y -CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX8997=y @@ -253,18 +307,30 @@ CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_PROVE_LOCKING=y +CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_USER=y +CONFIG_CRYPTO_RSA=m +CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_MD5=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m @@ -276,6 +342,7 @@ CONFIG_CRYPTO_SHA1_ARM_NEON=m CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m +CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRC_CCITT=y CONFIG_FONTS=y CONFIG_FONT_7x14=y diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index 23660f3d0f7f..484e51fbd4a6 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig @@ -27,7 +27,6 @@ CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="console=tty1 root=/dev/mmcblk0p2 rootfstype=ext2 rootdelay=3 ip=192.168.0.202:192.168.0.200:192.168.0.200:255.255.255.0 debug" CONFIG_KEXEC=y CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEBUG=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m CONFIG_CPU_FREQ_GOV_ONDEMAND=m diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index d2d75fa664a6..2a63fa10c813 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig @@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y +CONFIG_PATA_FTIDE010=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GEMINI=y CONFIG_DMADEVICES=y +CONFIG_AMBA_PL08X=y # CONFIG_DNOTIFY is not set CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e74de69caeab..32acac9ab81a 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -51,6 +51,7 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_CMA=y +CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_CMDLINE="noinitrd console=ttymxc0,115200" CONFIG_KEXEC=y CONFIG_CPU_FREQ=y @@ -186,6 +187,7 @@ CONFIG_SERIAL_FSL_LPUART=y CONFIG_SERIAL_FSL_LPUART_CONSOLE=y # CONFIG_I2C_COMPAT is not set CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y CONFIG_I2C_MUX_GPIO=y # CONFIG_I2C_HELPER_AUTO is not set CONFIG_I2C_ALGOPCF=m @@ -193,12 +195,14 @@ CONFIG_I2C_ALGOPCA=m CONFIG_I2C_GPIO=y CONFIG_I2C_IMX=y CONFIG_SPI=y +CONFIG_SPI_GPIO=y CONFIG_SPI_IMX=y CONFIG_SPI_FSL_DSPI=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MC9S08DZ60=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_STMPE=y +CONFIG_GPIO_74X164=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_IMX=y CONFIG_POWER_RESET_SYSCON=y @@ -226,15 +230,21 @@ CONFIG_REGULATOR_MC13892=y CONFIG_REGULATOR_PFUZE100=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RC_SUPPORT=y +CONFIG_RC_CORE=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_RC_DEVICES=y CONFIG_IR_GPIO_CIR=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_MUX=y CONFIG_SOC_CAMERA=y CONFIG_V4L_MEM2MEM_DRIVERS=y -CONFIG_VIDEO_CODA=y +CONFIG_VIDEO_CODA=m +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7180=m +CONFIG_VIDEO_OV5640=m CONFIG_SOC_CAMERA_OV2640=y CONFIG_IMX_IPUV3_CORE=y CONFIG_DRM=y @@ -344,6 +354,9 @@ CONFIG_FSL_EDMA=y CONFIG_IMX_SDMA=y CONFIG_MXS_DMA=y CONFIG_STAGING=y +CONFIG_STAGING_MEDIA=y +CONFIG_VIDEO_IMX_MEDIA=y +CONFIG_COMMON_CLK_PWM=y CONFIG_IIO=y CONFIG_IMX7D_ADC=y CONFIG_VF610_ADC=y diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig index c8378da71913..8c3c99cd6de9 100644 --- a/arch/arm/configs/ixp4xx_defconfig +++ b/arch/arm/configs/ixp4xx_defconfig @@ -81,12 +81,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig index 1331f6dc456a..f907869e0ddc 100644 --- a/arch/arm/configs/keystone_defconfig +++ b/arch/arm/configs/keystone_defconfig @@ -112,6 +112,9 @@ CONFIG_IP_NF_ARP_MANGLE=y CONFIG_IP6_NF_IPTABLES=m CONFIG_IP_SCTP=y CONFIG_VLAN_8021Q=y +CONFIG_CAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -156,6 +159,8 @@ CONFIG_POWER_RESET_KEYSTONE=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_DAVINCI_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y @@ -164,6 +169,8 @@ CONFIG_USB_STORAGE=y CONFIG_USB_DWC3=y CONFIG_NOP_USB_XCEIV=y CONFIG_KEYSTONE_USB_PHY=y +CONFIG_MMC=y +CONFIG_MMC_OMAP_HS=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -174,12 +181,18 @@ CONFIG_LEDS_TRIGGER_BACKLIGHT=y CONFIG_LEDS_TRIGGER_GPIO=y CONFIG_DMADEVICES=y CONFIG_TI_EDMA=y +CONFIG_MAILBOX=y +CONFIG_TI_MESSAGE_MANAGER=y CONFIG_SOC_TI=y CONFIG_KEYSTONE_NAVIGATOR_QMSS=y CONFIG_KEYSTONE_NAVIGATOR_DMA=y +CONFIG_TI_SCI_PM_DOMAINS=y CONFIG_MEMORY=y CONFIG_TI_AEMIF=y CONFIG_KEYSTONE_IRQ=y +CONFIG_RESET_TI_SCI=m +CONFIG_RESET_TI_SYSCON=m +CONFIG_TI_SCI_PROTOCOL=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_FANOTIFY=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 4d19c1b4b8e7..0cacdbf84a71 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -104,13 +104,11 @@ CONFIG_ARCH_TEGRA_2x_SOC=y CONFIG_ARCH_TEGRA_3x_SOC=y CONFIG_ARCH_TEGRA_114_SOC=y CONFIG_ARCH_TEGRA_124_SOC=y -CONFIG_TEGRA_EMC_SCALING_ENABLE=y CONFIG_ARCH_UNIPHIER=y CONFIG_ARCH_U8500=y CONFIG_MACH_HREFV60=y CONFIG_MACH_SNOWBALL=y CONFIG_ARCH_VEXPRESS=y -CONFIG_ARCH_VEXPRESS_CA9X4=y CONFIG_ARCH_VEXPRESS_TC2_PM=y CONFIG_ARCH_WM8850=y CONFIG_ARCH_ZYNQ=y @@ -270,6 +268,7 @@ CONFIG_ICPLUS_PHY=y CONFIG_REALTEK_PHY=y CONFIG_MICREL_PHY=y CONFIG_FIXED_PHY=y +CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_RTL8152=m CONFIG_USB_USBNET=y @@ -330,6 +329,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=20 CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_SH_SCI_DMA=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y @@ -455,6 +455,7 @@ CONFIG_SENSORS_NTC_THERMISTOR=m CONFIG_SENSORS_PWM_FAN=m CONFIG_SENSORS_INA2XX=m CONFIG_CPU_THERMAL=y +CONFIG_BRCMSTB_THERMAL=m CONFIG_ROCKCHIP_THERMAL=y CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y @@ -584,6 +585,7 @@ CONFIG_VIDEO_ADV7180=m CONFIG_VIDEO_ML86V7667=m CONFIG_DRM=y CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_I2C_ADV7511_AUDIO=y # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_DUMB_VGA_DAC=m @@ -603,7 +605,6 @@ CONFIG_ROCKCHIP_DW_MIPI_DSI=y CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_DRM_ATMEL_HLCDC=m CONFIG_DRM_RCAR_DU=m -CONFIG_DRM_RCAR_HDMI=y CONFIG_DRM_RCAR_LVDS=y CONFIG_DRM_SUN4I=m CONFIG_DRM_TEGRA=y @@ -650,9 +651,11 @@ CONFIG_SND_SOC_SMDK_WM8994_PCM=m CONFIG_SND_SOC_SNOW=m CONFIG_SND_SOC_SH4_FSI=m CONFIG_SND_SOC_RCAR=m -CONFIG_SND_SOC_RSRC_CARD=m +CONFIG_SND_SIMPLE_SCU_CARD=m CONFIG_SND_SUN4I_CODEC=m CONFIG_SND_SOC_TEGRA=m +CONFIG_SND_SOC_TEGRA20_I2S=m +CONFIG_SND_SOC_TEGRA30_I2S=m CONFIG_SND_SOC_TEGRA_RT5640=m CONFIG_SND_SOC_TEGRA_WM8753=m CONFIG_SND_SOC_TEGRA_WM8903=m @@ -695,7 +698,6 @@ CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_AB8500_USB=y CONFIG_KEYSTONE_USB_PHY=y -CONFIG_OMAP_USB3=y CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y CONFIG_USB_MSM_OTG=m @@ -711,7 +713,7 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y CONFIG_MMC_SDHCI_OF_AT91=y -CONFIG_MMC_SDHCI_OF_ESDHC=m +CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_ESDHC_IMX=y CONFIG_MMC_SDHCI_DOVE=y CONFIG_MMC_SDHCI_TEGRA=y @@ -728,7 +730,6 @@ CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_MVSDIO=y CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y -CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y @@ -825,7 +826,6 @@ CONFIG_BCMA_DRIVER_GPIO=y CONFIG_QCOM_GSBI=y CONFIG_QCOM_PM=y CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD=y CONFIG_QCOM_SMD_RPM=y CONFIG_QCOM_SMP2P=y CONFIG_QCOM_SMSM=y @@ -837,7 +837,6 @@ CONFIG_CHROME_PLATFORMS=y CONFIG_STAGING_BOARD=y CONFIG_CROS_EC_CHARDEV=m CONFIG_COMMON_CLK_MAX77686=y -CONFIG_COMMON_CLK_MAX77802=m CONFIG_COMMON_CLK_RK808=m CONFIG_COMMON_CLK_S2MPS11=m CONFIG_APQ_MMCC_8084=y @@ -933,7 +932,6 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOCKUP_DETECTOR=y -CONFIG_CRYPTO_DEV_TEGRA_AES=y CONFIG_CPUFREQ_DT=y CONFIG_KEYSTONE_IRQ=y CONFIG_HW_RANDOM=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a120ae816260..7b97200c1d64 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -170,6 +170,7 @@ CONFIG_TI_CPTS=y # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_AT803X_PHY=y CONFIG_DP83848_PHY=y +CONFIG_DP83867_PHY=y CONFIG_MICREL_PHY=y CONFIG_SMSC_PHY=y CONFIG_PPP=m @@ -250,6 +251,7 @@ CONFIG_DEBUG_GPIO=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_PCA953X=m CONFIG_GPIO_PCF857X=y +CONFIG_GPIO_LP87565=y CONFIG_GPIO_PALMAS=y CONFIG_GPIO_TWL4030=y CONFIG_W1=m @@ -284,6 +286,7 @@ CONFIG_MFD_TI_AM335X_TSCADC=m CONFIG_MFD_PALMAS=y CONFIG_MFD_TPS65217=y CONFIG_MFD_TI_LP873X=y +CONFIG_MFD_TI_LP87565=y CONFIG_MFD_TPS65218=y CONFIG_MFD_TPS65910=y CONFIG_TWL6040_CORE=y @@ -292,6 +295,7 @@ CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_LM363X=m CONFIG_REGULATOR_LP872X=y CONFIG_REGULATOR_LP873X=y +CONFIG_REGULATOR_LP87565=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_TI_ABB=y @@ -304,7 +308,7 @@ CONFIG_REGULATOR_TPS65910=y CONFIG_REGULATOR_TWL4030=y CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RC_SUPPORT=y +CONFIG_RC_CORE=m CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_LIRC=m diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 64e3a2a8cede..d5e1370ec303 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -471,7 +471,7 @@ CONFIG_LCD_PLATFORM=m CONFIG_LCD_TOSA=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_TOSA=m -CONFIG_FRAMEBUFFER_CONSOLE=m +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y CONFIG_SOUND=m diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig index b02039c712c3..879159e4ab58 100644 --- a/arch/arm/configs/qcom_defconfig +++ b/arch/arm/configs/qcom_defconfig @@ -199,7 +199,6 @@ CONFIG_QCOM_WCNSS_PIL=y CONFIG_QCOM_GSBI=y CONFIG_QCOM_PM=y CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD=y CONFIG_QCOM_SMD_RPM=y CONFIG_QCOM_SMP2P=y CONFIG_QCOM_SMSM=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 3c66a422fb4d..7b4fc0143148 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -27,6 +27,7 @@ CONFIG_ARCH_SH73A0=y CONFIG_PL310_ERRATA_588369=y CONFIG_ARM_ERRATA_754322=y CONFIG_PCI=y +CONFIG_PCI_MSI=y CONFIG_PCI_RCAR_GEN2=y CONFIG_PCIE_RCAR=y CONFIG_SMP=y @@ -83,14 +84,14 @@ CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set CONFIG_SH_ETH=y +CONFIG_RAVB=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMSC911X=y # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_SMSC_PHY=y CONFIG_MICREL_PHY=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_SMSC_PHY=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -105,6 +106,7 @@ CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=20 CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_SH_SCI_DMA=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_DEMUX_PINCTRL=y @@ -121,9 +123,9 @@ CONFIG_SPI_SH_HSPI=y CONFIG_GPIO_EM=y CONFIG_GPIO_RCAR=y CONFIG_GPIO_PCF857X=y -CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_RMOBILE=y +CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y @@ -153,10 +155,11 @@ CONFIG_VIDEO_ADV7180=y CONFIG_VIDEO_ADV7604=y CONFIG_VIDEO_ML86V7667=y CONFIG_DRM=y -CONFIG_DRM_I2C_ADV7511=y CONFIG_DRM_RCAR_DU=y -CONFIG_DRM_RCAR_HDMI=y CONFIG_DRM_RCAR_LVDS=y +CONFIG_DRM_DUMB_VGA_DAC=y +CONFIG_DRM_I2C_ADV7511=y +CONFIG_DRM_I2C_ADV7511_AUDIO=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SH_MOBILE_MERAM=y # CONFIG_LCD_CLASS_DEVICE is not set @@ -169,12 +172,12 @@ CONFIG_SND=y CONFIG_SND_SOC=y CONFIG_SND_SOC_SH4_FSI=y CONFIG_SND_SOC_RCAR=y -CONFIG_SND_SOC_RSRC_CARD=y CONFIG_SND_SOC_AK4642=y CONFIG_SND_SOC_WM8978=y +CONFIG_SND_SIMPLE_SCU_CARD=y CONFIG_USB=y CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_RCAR=y +CONFIG_USB_XHCI_PLATFORM=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_R8A66597_HCD=y @@ -190,6 +193,7 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RS5C372=y +CONFIG_RTC_DRV_BQ32K=y CONFIG_RTC_DRV_S35390A=y CONFIG_RTC_DRV_RX8581=y CONFIG_RTC_DRV_DA9063=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 0ec1d1ec130f..5caaf971fb50 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -1,4 +1,3 @@ -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y @@ -56,7 +55,6 @@ CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_SUN4I_LRADC=y # CONFIG_INPUT_MOUSE is not set @@ -71,7 +69,6 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=8 CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MV64XXX=y CONFIG_I2C_SUN6I_P2WI=y @@ -80,14 +77,14 @@ CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_SUPPLY=y +CONFIG_CHARGER_AXP20X=y +CONFIG_BATTERY_AXP20X=y CONFIG_AXP20X_POWER=y CONFIG_THERMAL=y -CONFIG_THERMAL_OF=y CONFIG_CPU_THERMAL=y CONFIG_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MFD_AC100=y -CONFIG_MFD_AXP20X=y CONFIG_MFD_AXP20X_I2C=y CONFIG_MFD_AXP20X_RSB=y CONFIG_REGULATOR=y @@ -95,16 +92,13 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_GPIO=y CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_RC_SUPPORT=y +CONFIG_RC_CORE=y CONFIG_RC_DEVICES=y CONFIG_IR_SUNXI=y CONFIG_DRM=y -CONFIG_DRM_DUMB_VGA_DAC=y CONFIG_DRM_SUN4I=y -CONFIG_FB=y +CONFIG_DRM_DUMB_VGA_DAC=y CONFIG_FB_SIMPLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y @@ -130,12 +124,13 @@ CONFIG_RTC_CLASS=y # CONFIG_RTC_INTF_SYSFS is not set # CONFIG_RTC_INTF_PROC is not set CONFIG_RTC_DRV_AC100=y -CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_SUNXI=y CONFIG_DMADEVICES=y CONFIG_DMA_SUN6I=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXTCON=y +CONFIG_IIO=y +CONFIG_AXP20X_ADC=y CONFIG_PWM=y CONFIG_PWM_SUN4I=y CONFIG_PHY_SUN4I_USB=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index f0efc854b5a2..6678f2929356 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -121,7 +121,6 @@ CONFIG_TOUCHSCREEN_WM97XX=y CONFIG_TOUCHSCREEN_STMPE=y CONFIG_INPUT_MISC=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y @@ -202,6 +201,8 @@ CONFIG_SND_HDA_CODEC_HDMI=y # CONFIG_SND_USB is not set CONFIG_SND_SOC=y CONFIG_SND_SOC_TEGRA=y +CONFIG_SND_SOC_TEGRA20_I2S=y +CONFIG_SND_SOC_TEGRA30_I2S=y CONFIG_SND_SOC_TEGRA_RT5640=y CONFIG_SND_SOC_TEGRA_WM8753=y CONFIG_SND_SOC_TEGRA_WM8903=y @@ -218,6 +219,9 @@ CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_ACM=y CONFIG_USB_WDM=y CONFIG_USB_STORAGE=y +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_GADGET=y CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_SDHCI=y @@ -247,8 +251,6 @@ CONFIG_RTC_DRV_TEGRA=y CONFIG_DMADEVICES=y CONFIG_TEGRA20_APB_DMA=y CONFIG_STAGING=y -CONFIG_SENSORS_ISL29018=y -CONFIG_SENSORS_ISL29028=y CONFIG_MFD_NVEC=y CONFIG_KEYBOARD_NVEC=y CONFIG_SERIO_NVEC_PS2=y @@ -263,6 +265,8 @@ CONFIG_ARCH_TEGRA_124_SOC=y CONFIG_MEMORY=y CONFIG_IIO=y CONFIG_MPU3050_I2C=y +CONFIG_SENSORS_ISL29018=y +CONFIG_SENSORS_ISL29028=y CONFIG_AK8975=y CONFIG_PWM=y CONFIG_PWM_TEGRA=y @@ -288,13 +292,11 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SLAB=y CONFIG_DEBUG_VM=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SG=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index 0fa0ed577b15..edae1c58fe80 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -19,7 +19,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARCH_VEXPRESS=y -CONFIG_ARCH_VEXPRESS_CA9X4=y CONFIG_ARCH_VEXPRESS_DCSCB=y CONFIG_ARCH_VEXPRESS_TC2_PM=y # CONFIG_SWP_EMULATE is not set diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig index 44d4fa57ba0a..070e5074f1ee 100644 --- a/arch/arm/configs/viper_defconfig +++ b/arch/arm/configs/viper_defconfig @@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_PWM=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=m +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig index 8d4c0c926c34..09e7050d5653 100644 --- a/arch/arm/configs/zeus_defconfig +++ b/arch/arm/configs/zeus_defconfig @@ -112,7 +112,7 @@ CONFIG_FB_PXA=m CONFIG_FB_PXA_PARAMETERS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=m +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index b9adedcc5b2e..ec72752d5668 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -94,14 +94,15 @@ config CRYPTO_AES_ARM_CE ARMv8 Crypto Extensions config CRYPTO_GHASH_ARM_CE - tristate "PMULL-accelerated GHASH using ARMv8 Crypto Extensions" + tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions" depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_CRYPTD help Use an implementation of GHASH (used by the GCM AEAD chaining mode) that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) - that is part of the ARMv8 Crypto Extensions + that is part of the ARMv8 Crypto Extensions, or a slower variant that + uses the vmull.p8 instruction that is part of the basic NEON ISA. config CRYPTO_CRCT10DIF_ARM_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index 0f966a8ca1ce..d0a9cec73707 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -285,9 +285,7 @@ static int ctr_encrypt(struct skcipher_request *req) ce_aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, num_rounds(ctx), blocks, walk.iv); - if (tdst != tsrc) - memcpy(tdst, tsrc, nbytes); - crypto_xor(tdst, tail, nbytes); + crypto_xor_cpy(tdst, tsrc, tail, nbytes); err = skcipher_walk_done(&walk, 0); } kernel_neon_end(); diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S index c817a86c4ca8..54b384084637 100644 --- a/arch/arm/crypto/aes-cipher-core.S +++ b/arch/arm/crypto/aes-cipher-core.S @@ -10,6 +10,7 @@ */ #include +#include .text .align 5 @@ -32,19 +33,19 @@ .endif .endm - .macro __load, out, in, idx + .macro __load, out, in, idx, sz, op .if __LINUX_ARM_ARCH__ < 7 && \idx > 0 - ldr \out, [ttab, \in, lsr #(8 * \idx) - 2] + ldr\op \out, [ttab, \in, lsr #(8 * \idx) - \sz] .else - ldr \out, [ttab, \in, lsl #2] + ldr\op \out, [ttab, \in, lsl #\sz] .endif .endm - .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc + .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op __select \out0, \in0, 0 __select t0, \in1, 1 - __load \out0, \out0, 0 - __load t0, t0, 1 + __load \out0, \out0, 0, \sz, \op + __load t0, t0, 1, \sz, \op .if \enc __select \out1, \in1, 0 @@ -53,10 +54,10 @@ __select \out1, \in3, 0 __select t1, \in0, 1 .endif - __load \out1, \out1, 0 + __load \out1, \out1, 0, \sz, \op __select t2, \in2, 2 - __load t1, t1, 1 - __load t2, t2, 2 + __load t1, t1, 1, \sz, \op + __load t2, t2, 2, \sz, \op eor \out0, \out0, t0, ror #24 @@ -68,9 +69,9 @@ __select \t3, \in1, 2 __select \t4, \in2, 3 .endif - __load \t3, \t3, 2 - __load t0, t0, 3 - __load \t4, \t4, 3 + __load \t3, \t3, 2, \sz, \op + __load t0, t0, 3, \sz, \op + __load \t4, \t4, 3, \sz, \op eor \out1, \out1, t1, ror #24 eor \out0, \out0, t2, ror #16 @@ -82,14 +83,14 @@ eor \out1, \out1, t2 .endm - .macro fround, out0, out1, out2, out3, in0, in1, in2, in3 - __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1 - __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1 + .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op + __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op .endm - .macro iround, out0, out1, out2, out3, in0, in1, in2, in3 - __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0 - __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0 + .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op + __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op .endm .macro __rev, out, in @@ -114,7 +115,7 @@ .endif .endm - .macro do_crypt, round, ttab, ltab + .macro do_crypt, round, ttab, ltab, bsz push {r3-r11, lr} ldr r4, [in] @@ -146,9 +147,12 @@ 1: subs rounds, rounds, #4 \round r8, r9, r10, r11, r4, r5, r6, r7 - __adrl ttab, \ltab, ls + bls 2f \round r4, r5, r6, r7, r8, r9, r10, r11 - bhi 0b + b 0b + +2: __adrl ttab, \ltab + \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b #ifdef CONFIG_CPU_BIG_ENDIAN __rev r4, r4 @@ -170,10 +174,48 @@ .ltorg .endm + .align L1_CACHE_SHIFT + .type __aes_arm_inverse_sbox, %object +__aes_arm_inverse_sbox: + .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 + .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb + .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 + .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb + .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d + .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e + .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 + .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 + .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 + .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 + .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda + .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 + .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a + .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 + .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 + .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b + .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea + .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 + .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 + .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e + .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 + .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b + .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 + .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 + .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 + .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f + .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d + .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef + .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 + .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 + .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 + .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d + .size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox + ENTRY(__aes_arm_encrypt) - do_crypt fround, crypto_ft_tab, crypto_fl_tab + do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 ENDPROC(__aes_arm_encrypt) + .align 5 ENTRY(__aes_arm_decrypt) - do_crypt iround, crypto_it_tab, crypto_il_tab + do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0 ENDPROC(__aes_arm_decrypt) diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index c76377961444..18768f330449 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -221,9 +221,8 @@ static int ctr_encrypt(struct skcipher_request *req) u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; - if (dst != src) - memcpy(dst, src, walk.total % AES_BLOCK_SIZE); - crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE); + crypto_xor_cpy(dst, src, final, + walk.total % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, 0); break; diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S index f6ab8bcc9efe..2f78c10b1881 100644 --- a/arch/arm/crypto/ghash-ce-core.S +++ b/arch/arm/crypto/ghash-ce-core.S @@ -1,7 +1,7 @@ /* - * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions. + * Accelerated GHASH implementation with NEON/ARMv8 vmull.p8/64 instructions. * - * Copyright (C) 2015 Linaro Ltd. + * Copyright (C) 2015 - 2017 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -12,40 +12,162 @@ #include SHASH .req q0 - SHASH2 .req q1 - T1 .req q2 - T2 .req q3 - MASK .req q4 - XL .req q5 - XM .req q6 - XH .req q7 - IN1 .req q7 + T1 .req q1 + XL .req q2 + XM .req q3 + XH .req q4 + IN1 .req q4 SHASH_L .req d0 SHASH_H .req d1 - SHASH2_L .req d2 - T1_L .req d4 - MASK_L .req d8 - XL_L .req d10 - XL_H .req d11 - XM_L .req d12 - XM_H .req d13 - XH_L .req d14 + T1_L .req d2 + T1_H .req d3 + XL_L .req d4 + XL_H .req d5 + XM_L .req d6 + XM_H .req d7 + XH_L .req d8 + + t0l .req d10 + t0h .req d11 + t1l .req d12 + t1h .req d13 + t2l .req d14 + t2h .req d15 + t3l .req d16 + t3h .req d17 + t4l .req d18 + t4h .req d19 + + t0q .req q5 + t1q .req q6 + t2q .req q7 + t3q .req q8 + t4q .req q9 + T2 .req q9 + + s1l .req d20 + s1h .req d21 + s2l .req d22 + s2h .req d23 + s3l .req d24 + s3h .req d25 + s4l .req d26 + s4h .req d27 + + MASK .req d28 + SHASH2_p8 .req d28 + + k16 .req d29 + k32 .req d30 + k48 .req d31 + SHASH2_p64 .req d31 .text .fpu crypto-neon-fp-armv8 + .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4 + vmull.p64 \rd, \rn, \rm + .endm + /* - * void pmull_ghash_update(int blocks, u64 dg[], const char *src, - * struct ghash_key const *k, const char *head) + * This implementation of 64x64 -> 128 bit polynomial multiplication + * using vmull.p8 instructions (8x8 -> 16) is taken from the paper + * "Fast Software Polynomial Multiplication on ARM Processors Using + * the NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and + * Ricardo Dahab (https://hal.inria.fr/hal-01506572) + * + * It has been slightly tweaked for in-order performance, and to allow + * 'rq' to overlap with 'ad' or 'bd'. */ -ENTRY(pmull_ghash_update) - vld1.64 {SHASH}, [r3] + .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l + vext.8 t0l, \ad, \ad, #1 @ A1 + .ifc \b1, t4l + vext.8 t4l, \bd, \bd, #1 @ B1 + .endif + vmull.p8 t0q, t0l, \bd @ F = A1*B + vext.8 t1l, \ad, \ad, #2 @ A2 + vmull.p8 t4q, \ad, \b1 @ E = A*B1 + .ifc \b2, t3l + vext.8 t3l, \bd, \bd, #2 @ B2 + .endif + vmull.p8 t1q, t1l, \bd @ H = A2*B + vext.8 t2l, \ad, \ad, #3 @ A3 + vmull.p8 t3q, \ad, \b2 @ G = A*B2 + veor t0q, t0q, t4q @ L = E + F + .ifc \b3, t4l + vext.8 t4l, \bd, \bd, #3 @ B3 + .endif + vmull.p8 t2q, t2l, \bd @ J = A3*B + veor t0l, t0l, t0h @ t0 = (L) (P0 + P1) << 8 + veor t1q, t1q, t3q @ M = G + H + .ifc \b4, t3l + vext.8 t3l, \bd, \bd, #4 @ B4 + .endif + vmull.p8 t4q, \ad, \b3 @ I = A*B3 + veor t1l, t1l, t1h @ t1 = (M) (P2 + P3) << 16 + vmull.p8 t3q, \ad, \b4 @ K = A*B4 + vand t0h, t0h, k48 + vand t1h, t1h, k32 + veor t2q, t2q, t4q @ N = I + J + veor t0l, t0l, t0h + veor t1l, t1l, t1h + veor t2l, t2l, t2h @ t2 = (N) (P4 + P5) << 24 + vand t2h, t2h, k16 + veor t3l, t3l, t3h @ t3 = (K) (P6 + P7) << 32 + vmov.i64 t3h, #0 + vext.8 t0q, t0q, t0q, #15 + veor t2l, t2l, t2h + vext.8 t1q, t1q, t1q, #14 + vmull.p8 \rq, \ad, \bd @ D = A*B + vext.8 t2q, t2q, t2q, #13 + vext.8 t3q, t3q, t3q, #12 + veor t0q, t0q, t1q + veor t2q, t2q, t3q + veor \rq, \rq, t0q + veor \rq, \rq, t2q + .endm + + // + // PMULL (64x64->128) based reduction for CPUs that can do + // it in a single instruction. + // + .macro __pmull_reduce_p64 + vmull.p64 T1, XL_L, MASK + + veor XH_L, XH_L, XM_H + vext.8 T1, T1, T1, #8 + veor XL_H, XL_H, XM_L + veor T1, T1, XL + + vmull.p64 XL, T1_H, MASK + .endm + + // + // Alternative reduction for CPUs that lack support for the + // 64x64->128 PMULL instruction + // + .macro __pmull_reduce_p8 + veor XL_H, XL_H, XM_L + veor XH_L, XH_L, XM_H + + vshl.i64 T1, XL, #57 + vshl.i64 T2, XL, #62 + veor T1, T1, T2 + vshl.i64 T2, XL, #63 + veor T1, T1, T2 + veor XL_H, XL_H, T1_L + veor XH_L, XH_L, T1_H + + vshr.u64 T1, XL, #1 + veor XH, XH, XL + veor XL, XL, T1 + vshr.u64 T1, T1, #6 + vshr.u64 XL, XL, #1 + .endm + + .macro ghash_update, pn vld1.64 {XL}, [r1] - vmov.i8 MASK, #0xe1 - vext.8 SHASH2, SHASH, SHASH, #8 - vshl.u64 MASK, MASK, #57 - veor SHASH2, SHASH2, SHASH /* do the head block first, if supplied */ ldr ip, [sp] @@ -62,33 +184,59 @@ ENTRY(pmull_ghash_update) #ifndef CONFIG_CPU_BIG_ENDIAN vrev64.8 T1, T1 #endif - vext.8 T2, XL, XL, #8 vext.8 IN1, T1, T1, #8 - veor T1, T1, T2 + veor T1_L, T1_L, XL_H veor XL, XL, IN1 - vmull.p64 XH, SHASH_H, XL_H @ a1 * b1 + __pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1 veor T1, T1, XL - vmull.p64 XL, SHASH_L, XL_L @ a0 * b0 - vmull.p64 XM, SHASH2_L, T1_L @ (a1 + a0)(b1 + b0) + __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0 + __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0) - vext.8 T1, XL, XH, #8 - veor T2, XL, XH + veor T1, XL, XH veor XM, XM, T1 - veor XM, XM, T2 - vmull.p64 T2, XL_L, MASK_L - vmov XH_L, XM_H - vmov XM_H, XL_L + __pmull_reduce_\pn - veor XL, XM, T2 - vext.8 T2, XL, XL, #8 - vmull.p64 XL, XL_L, MASK_L - veor T2, T2, XH - veor XL, XL, T2 + veor T1, T1, XH + veor XL, XL, T1 bne 0b vst1.64 {XL}, [r1] bx lr -ENDPROC(pmull_ghash_update) + .endm + + /* + * void pmull_ghash_update(int blocks, u64 dg[], const char *src, + * struct ghash_key const *k, const char *head) + */ +ENTRY(pmull_ghash_update_p64) + vld1.64 {SHASH}, [r3] + veor SHASH2_p64, SHASH_L, SHASH_H + + vmov.i8 MASK, #0xe1 + vshl.u64 MASK, MASK, #57 + + ghash_update p64 +ENDPROC(pmull_ghash_update_p64) + +ENTRY(pmull_ghash_update_p8) + vld1.64 {SHASH}, [r3] + veor SHASH2_p8, SHASH_L, SHASH_H + + vext.8 s1l, SHASH_L, SHASH_L, #1 + vext.8 s2l, SHASH_L, SHASH_L, #2 + vext.8 s3l, SHASH_L, SHASH_L, #3 + vext.8 s4l, SHASH_L, SHASH_L, #4 + vext.8 s1h, SHASH_H, SHASH_H, #1 + vext.8 s2h, SHASH_H, SHASH_H, #2 + vext.8 s3h, SHASH_H, SHASH_H, #3 + vext.8 s4h, SHASH_H, SHASH_H, #4 + + vmov.i64 k16, #0xffff + vmov.i64 k32, #0xffffffff + vmov.i64 k48, #0xffffffffffff + + ghash_update p8 +ENDPROC(pmull_ghash_update_p8) diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 6bac8bea9f1e..d9bb52cae2ac 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -22,6 +22,7 @@ MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 @@ -41,8 +42,17 @@ struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; -asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, const char *head); +asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); + +asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); + +static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); static int ghash_init(struct shash_desc *desc) { @@ -312,6 +322,14 @@ static int __init ghash_ce_mod_init(void) { int err; + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + + if (elf_hwcap2 & HWCAP2_PMULL) + pmull_ghash_update = pmull_ghash_update_p64; + else + pmull_ghash_update = pmull_ghash_update_p8; + err = crypto_register_shash(&ghash_alg); if (err) return err; @@ -332,5 +350,5 @@ static void __exit ghash_ce_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -module_cpu_feature_match(PMULL, ghash_ce_mod_init); +module_init(ghash_ce_mod_init); module_exit(ghash_ce_mod_exit); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index ebf020b02bc8..c8781450905b 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -227,7 +227,6 @@ #define HSR_DABT_S1PTW (_AC(1, UL) << 7) #define HSR_DABT_CM (_AC(1, UL) << 8) -#define HSR_DABT_EA (_AC(1, UL) << 9) #define kvm_arm_exception_type \ {0, "RESET" }, \ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 9a8a45aaf19a..98089ffd91bb 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -149,11 +149,6 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; -} - static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; @@ -206,6 +201,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; } +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + switch (kvm_vcpu_trap_get_fault_type(vcpu)) { + case FSC_SEA: + case FSC_SEA_TTW0: + case FSC_SEA_TTW1: + case FSC_SEA_TTW2: + case FSC_SEA_TTW3: + case FSC_SECC: + case FSC_SECC_TTW0: + case FSC_SECC_TTW1: + case FSC_SECC_TTW2: + case FSC_SECC_TTW3: + return true; + default: + return false; + } +} + static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index bfe163c40024..5983f6bc62d5 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLER__ +#include #include static inline bool scu_a9_has_base(void) diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h index cf4f3aad0fc1..fe1c6af3a1b1 100644 --- a/arch/arm/include/asm/string.h +++ b/arch/arm/include/asm/string.h @@ -24,6 +24,20 @@ extern void * memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); +#define __HAVE_ARCH_MEMSET32 +extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); +static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) +{ + return __memset32(p, v, n * 4); +} + +#define __HAVE_ARCH_MEMSET64 +extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); +static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) +{ + return __memset64(p, v, n * 8, v >> 32); +} + extern void __memzero(void *ptr, __kernel_size_t n); #define memset(p,v,n) \ diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index 6c7182f32cef..a61905c86732 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -1,6 +1,8 @@ #ifndef __ASM_ARM_SUSPEND_H #define __ASM_ARM_SUSPEND_H +#include + struct sleep_save_sp { u32 *save_ptr_stash; u32 save_ptr_stash_phys; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1d468b527b7b..776757d1604a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -139,11 +139,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ -#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */ -#define TIF_SYSCALL_TRACE 5 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ -#define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 @@ -154,7 +153,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_UPROBE (1 << TIF_UPROBE) -#define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -168,9 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_FSCHECK) +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 87936dd5d151..0bf2347495f1 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -70,8 +70,6 @@ static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); - /* On user-mode return, check fs is correct */ - set_thread_flag(TIF_FSCHECK); } #define segment_eq(a, b) ((a) == (b)) diff --git a/arch/arm/include/debug/omap2plus.S b/arch/arm/include/debug/omap2plus.S index 6d867aef18eb..192a7583999c 100644 --- a/arch/arm/include/debug/omap2plus.S +++ b/arch/arm/include/debug/omap2plus.S @@ -12,43 +12,6 @@ #include -/* OMAP2 serial ports */ -#define OMAP2_UART1_BASE 0x4806a000 -#define OMAP2_UART2_BASE 0x4806c000 -#define OMAP2_UART3_BASE 0x4806e000 - -/* OMAP3 serial ports */ -#define OMAP3_UART1_BASE OMAP2_UART1_BASE -#define OMAP3_UART2_BASE OMAP2_UART2_BASE -#define OMAP3_UART3_BASE 0x49020000 -#define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */ -#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */ - -/* OMAP4 serial ports */ -#define OMAP4_UART1_BASE OMAP2_UART1_BASE -#define OMAP4_UART2_BASE OMAP2_UART2_BASE -#define OMAP4_UART3_BASE 0x48020000 -#define OMAP4_UART4_BASE 0x4806e000 - -/* TI81XX serial ports */ -#define TI81XX_UART1_BASE 0x48020000 -#define TI81XX_UART2_BASE 0x48022000 -#define TI81XX_UART3_BASE 0x48024000 - -/* AM3505/3517 UART4 */ -#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */ - -/* AM33XX serial port */ -#define AM33XX_UART1_BASE 0x44E09000 - -/* OMAP5 serial ports */ -#define OMAP5_UART1_BASE OMAP2_UART1_BASE -#define OMAP5_UART2_BASE OMAP2_UART2_BASE -#define OMAP5_UART3_BASE OMAP4_UART3_BASE -#define OMAP5_UART4_BASE OMAP4_UART4_BASE -#define OMAP5_UART5_BASE 0x48066000 -#define OMAP5_UART6_BASE 0x48068000 - /* External port on Zoom2/3 */ #define ZOOM_UART_BASE 0x10000000 #define ZOOM_UART_VIRT 0xfa400000 @@ -59,6 +22,7 @@ #define UART_OFFSET(addr) ((addr) & 0x00ffffff) .pushsection .data + .align 2 omap_uart_phys: .word 0 omap_uart_virt: .word 0 omap_uart_lsr: .word 0 @@ -79,55 +43,6 @@ omap_uart_lsr: .word 0 bne 100f @ already configured /* Configure the UART offset from the phys/virt base */ -#ifdef CONFIG_DEBUG_OMAP2UART1 - mov \rp, #UART_OFFSET(OMAP2_UART1_BASE) @ omap2/3/4 - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP2UART2 - mov \rp, #UART_OFFSET(OMAP2_UART2_BASE) @ omap2/3/4 - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP2UART3 - mov \rp, #UART_OFFSET(OMAP2_UART3_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP3UART3 - mov \rp, #UART_OFFSET(OMAP3_UART1_BASE) - add \rp, \rp, #0x00fb0000 - add \rp, \rp, #0x00006000 @ OMAP3_UART3_BASE - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP4UART3 - mov \rp, #UART_OFFSET(OMAP4_UART3_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP3UART4 - mov \rp, #UART_OFFSET(OMAP3_UART1_BASE) - add \rp, \rp, #0x00fb0000 - add \rp, \rp, #0x00028000 @ OMAP3_UART4_BASE - b 98f -#endif -#ifdef CONFIG_DEBUG_OMAP4UART4 - mov \rp, #UART_OFFSET(OMAP4_UART4_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_TI81XXUART1 - mov \rp, #UART_OFFSET(TI81XX_UART1_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_TI81XXUART2 - mov \rp, #UART_OFFSET(TI81XX_UART2_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_TI81XXUART3 - mov \rp, #UART_OFFSET(TI81XX_UART3_BASE) - b 98f -#endif -#ifdef CONFIG_DEBUG_AM33XXUART1 - ldr \rp, =AM33XX_UART1_BASE - and \rp, \rp, #0x00ffffff - b 97f -#endif #ifdef CONFIG_DEBUG_ZOOM_UART ldr \rp, =ZOOM_UART_BASE str \rp, [\tmp, #0] @ omap_uart_phys @@ -138,28 +53,6 @@ omap_uart_lsr: .word 0 #endif b 10b - /* AM33XX: Store both phys and virt address for the uart */ -97: add \rp, \rp, #0x44000000 @ phys base - str \rp, [\tmp, #0] @ omap_uart_phys - sub \rp, \rp, #0x44000000 @ phys base - add \rp, \rp, #0xf9000000 @ virt base - str \rp, [\tmp, #4] @ omap_uart_virt - mov \rp, #(UART_LSR << OMAP_PORT_SHIFT) - str \rp, [\tmp, #8] @ omap_uart_lsr - - b 10b - - /* Store both phys and virt address for the uart */ -98: add \rp, \rp, #0x48000000 @ phys base - str \rp, [\tmp, #0] @ omap_uart_phys - sub \rp, \rp, #0x48000000 @ phys base - add \rp, \rp, #0xfa000000 @ virt base - str \rp, [\tmp, #4] @ omap_uart_virt - mov \rp, #(UART_LSR << OMAP_PORT_SHIFT) - str \rp, [\tmp, #8] @ omap_uart_lsr - - b 10b - .align 99: .word . .word omap_uart_phys diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8e8d20cdbce7..5266fd9ad6b4 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -87,6 +87,8 @@ EXPORT_SYMBOL(__raw_writesl); EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(__memset32); +EXPORT_SYMBOL(__memset64); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index a3308ad1a024..fda5579123a8 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -101,8 +101,8 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) ops = arm_cpuidle_get_ops(enable_method); if (!ops) { - pr_warn("%s: unsupported enable-method property: %s\n", - dn->full_name, enable_method); + pr_warn("%pOF: unsupported enable-method property: %s\n", + dn, enable_method); return -EOPNOTSUPP; } diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index f676febbb270..ecaa68dd1af5 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -95,7 +95,7 @@ void __init arm_dt_init_cpu_maps(void) if (of_node_cmp(cpu->type, "cpu")) continue; - pr_debug(" * %s...\n", cpu->full_name); + pr_debug(" * %pOF...\n", cpu); /* * A device tree containing CPU nodes with missing "reg" * properties is considered invalid to build the @@ -103,8 +103,7 @@ void __init arm_dt_init_cpu_maps(void) */ cell = of_get_property(cpu, "reg", &prop_bytes); if (!cell || prop_bytes < sizeof(*cell)) { - pr_debug(" * %s missing reg property\n", - cpu->full_name); + pr_debug(" * %pOF missing reg property\n", cpu); of_node_put(cpu); return; } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c731f0d2b2af..fbc707626b3e 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -721,6 +721,7 @@ do_fpe: */ .pushsection .data + .align 2 ENTRY(fp_enter) .word no_fp .popsection @@ -1224,6 +1225,7 @@ vector_addrexcptn: W(b) vector_fiq .data + .align 2 .globl cr_alignment cr_alignment: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e33c32d56193..99c908226065 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_AEABI #include #endif @@ -27,6 +28,14 @@ #include "entry-header.S" +saved_psr .req r8 +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) +saved_pc .req r9 +#define TRACE(x...) x +#else +saved_pc .req lr +#define TRACE(x...) +#endif .align 5 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) @@ -40,12 +49,14 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne fast_work_pending - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending + /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -68,16 +79,16 @@ ret_fast_syscall: UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne fast_work_pending - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) /* Slower path - fall through to work_pending */ -fast_work_pending: #endif tst r1, #_TIF_SYSCALL_WORK @@ -103,6 +114,9 @@ ENTRY(ret_to_user) ret_slow_syscall: disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne slow_work_pending @@ -146,16 +160,17 @@ ENTRY(vector_swi) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr THUMB( mov r8, sp ) THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr - mrs r8, spsr @ called from non-FIQ mode, so ok. - str lr, [sp, #S_PC] @ Save calling PC - str r8, [sp, #S_PSR] @ Save CPSR + mrs saved_psr, spsr @ called from non-FIQ mode, so ok. + TRACE( mov saved_pc, lr ) + str saved_pc, [sp, #S_PC] @ Save calling PC + str saved_psr, [sp, #S_PSR] @ Save CPSR str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif zero_fp alignment_trap r10, ip, __cr_alignment - enable_irq - ct_user_exit - get_thread_info tsk + asm_trace_hardirqs_on save=0 + enable_irq_notrace + ct_user_exit save=0 /* * Get the system call number. @@ -168,11 +183,11 @@ ENTRY(vector_swi) * value to determine if it is an EABI or an old ABI call. */ #ifdef CONFIG_ARM_THUMB - tst r8, #PSR_T_BIT + tst saved_psr, #PSR_T_BIT movne r10, #0 @ no thumb OABI emulation - USER( ldreq r10, [lr, #-4] ) @ get SWI instruction + USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction #else - USER( ldr r10, [lr, #-4] ) @ get SWI instruction + USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction #endif ARM_BE8(rev r10, r10) @ little endian instruction @@ -183,15 +198,17 @@ ENTRY(vector_swi) */ #elif defined(CONFIG_ARM_THUMB) /* Legacy ABI only, possibly thumb mode. */ - tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs + tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in - USER( ldreq scno, [lr, #-4] ) + USER( ldreq scno, [saved_pc, #-4] ) #else /* Legacy ABI only. */ - USER( ldr scno, [lr, #-4] ) @ get SWI instruction + USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction #endif + /* saved_psr and saved_pc are now dead */ + uaccess_disable tbl adr tbl, sys_call_table @ load syscall table pointer @@ -210,6 +227,12 @@ ENTRY(vector_swi) bic scno, scno, #0xff000000 @ mask off SWI op-code eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + get_thread_info tsk + /* + * Reload the registers that may have been corrupted on entry to + * the syscall assembly (by tracing or context tracking.) + */ + TRACE( ldmia sp, {r0 - r3} ) local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing @@ -239,8 +262,9 @@ local_restart: * current task. */ 9001: - sub lr, lr, #4 + sub lr, saved_pc, #4 str lr, [sp, #S_PC] + get_thread_info tsk b ret_fast_syscall #endif ENDPROC(vector_swi) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 04286fd9e09c..6b1148cafffd 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -556,6 +556,7 @@ ENDPROC(__fixup_smp) .word __smpalt_end .pushsection .data + .align 2 .globl smp_on_up smp_on_up: ALT_SMP(.long 1) @@ -716,6 +717,7 @@ ENTRY(fixup_pv_table) ENDPROC(fixup_pv_table) .data + .align 2 .globl __pv_phys_pfn_offset .type __pv_phys_pfn_offset, %object __pv_phys_pfn_offset: diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index ec7e7377d423..60146e32619a 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -31,6 +31,7 @@ * zeroing of .bss would clobber it. */ .data + .align 2 ENTRY(__boot_cpu_mode) .long 0 .text diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 49fadbda8c63..81cd4d43b3ec 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -367,6 +367,7 @@ ENTRY(iwmmxt_task_release) ENDPROC(iwmmxt_task_release) .data + .align 2 concan_owner: .word 0 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e2de50bf8742..b67ae12503f3 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -614,10 +614,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) * Update the trace code with the current status. */ trace_hardirqs_off(); - - /* Check valid user FS if needed */ - addr_limit_user_check(); - do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); @@ -678,3 +674,9 @@ struct page *get_signal_page(void) return page; } + +/* Defer to generic check */ +asmlinkage void addr_limit_check_failed(void) +{ + addr_limit_user_check(); +} diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 0f6c1000582c..9f08d214d05a 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -171,6 +171,7 @@ mpidr_hash_ptr: .long mpidr_hash - . @ mpidr_hash struct offset .data + .align 2 .type sleep_save_sp, #object ENTRY(sleep_save_sp) .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 3a2fa203637a..65228bf4c6df 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -171,6 +171,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { __save_stack_trace(tsk, trace, 1); } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index bf949a763dbe..24ac3cab411d 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -127,8 +127,7 @@ static void __init parse_dt_topology(void) rate = of_get_property(cn, "clock-frequency", &len); if (!rate || len != 4) { - pr_err("%s missing clock-frequency property\n", - cn->full_name); + pr_err("%pOF missing clock-frequency property\n", cn); continue; } diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 54442e375354..cf8bf6bf87c4 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -67,7 +67,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) { trace_kvm_wfx(*vcpu_pc(vcpu), true); vcpu->stat.wfe_exit_stat++; - kvm_vcpu_on_spin(vcpu); + kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); } else { trace_kvm_wfx(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 3c65e3bd790f..ed6d35d9cdb5 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -28,7 +28,7 @@ UNWIND( .fnstart ) 1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 - cmp r2, #16 +7: cmp r2, #16 blt 4f #if ! CALGN(1)+0 @@ -41,7 +41,7 @@ UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r8, lr} ) mov r8, r1 - mov lr, r1 + mov lr, r3 2: subs r2, r2, #64 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time. @@ -73,11 +73,11 @@ UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r4-r8, lr} ) mov r4, r1 - mov r5, r1 + mov r5, r3 mov r6, r1 - mov r7, r1 + mov r7, r3 mov r8, r1 - mov lr, r1 + mov lr, r3 cmp r2, #96 tstgt ip, #31 @@ -114,7 +114,7 @@ UNWIND( .fnstart ) tst r2, #4 strne r1, [ip], #4 /* - * When we get here, we've got less than 4 bytes to zero. We + * When we get here, we've got less than 4 bytes to set. We * may have an unaligned pointer as well. */ 5: tst r2, #2 @@ -135,3 +135,15 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) + +ENTRY(__memset32) +UNWIND( .fnstart ) + mov r3, r1 @ copy r1 to r3 and fall into memset64 +UNWIND( .fnend ) +ENDPROC(__memset32) +ENTRY(__memset64) +UNWIND( .fnstart ) + mov ip, r0 @ preserve r0 as return value + b 7b @ jump into the middle of memset +UNWIND( .fnend ) +ENDPROC(__memset64) diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index 195da38cb9a2..6d870421a7a6 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -26,6 +26,7 @@ config SOC_SAMA5D2 select HAVE_AT91_USB_CLK select HAVE_AT91_H32MX select HAVE_AT91_GENERATED_CLK + select HAVE_AT91_AUDIO_PLL select PINCTRL_AT91PIO4 help Select this if ou are using one of Atmel's SAMA5D2 family SoC. @@ -125,6 +126,9 @@ config HAVE_AT91_H32MX config HAVE_AT91_GENERATED_CLK bool +config HAVE_AT91_AUDIO_PLL + bool + config SOC_SAM_V4_V5 bool diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5036f996e694..849014c01cf4 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void) } pm_bu->suspended = 0; - pm_bu->canary = virt_to_phys(&canary); - pm_bu->resume = virt_to_phys(cpu_resume); + pm_bu->canary = __pa_symbol(&canary); + pm_bu->resume = __pa_symbol(cpu_resume); return; diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index e568c8c6f69c..cbde0030c092 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 18296a99c4d2..62e7bc3018f0 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 284ff27c1b32..be997243447b 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 0464999b7137..e75741fb2c1d 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 70e00dbeec96..b07c9b18d427 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 1d76e7480a42..cb0a41e83582 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 41c7c9615791..d85accf7f760 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c index bcb678fd2415..8971c3c0f0fe 100644 --- a/arch/arm/mach-dove/dove-db-setup.c +++ b/arch/arm/mach-dove/dove-db-setup.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index beec5f16443a..d2eee707d27f 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -98,6 +98,13 @@ static struct clk clk_keypad = { .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN, .set_rate = set_keytchclk_rate, }; +static struct clk clk_adc = { + .parent = &clk_xtali, + .sw_locked = 1, + .enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV, + .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_TSEN, + .set_rate = set_keytchclk_rate, +}; static struct clk clk_spi = { .parent = &clk_xtali, .rate = EP93XX_EXT_CLK_RATE, @@ -214,6 +221,7 @@ static struct clk_lookup clocks[] = { INIT_CK(NULL, "pll2", &clk_pll2), INIT_CK("ohci-platform", NULL, &clk_usb_host), INIT_CK("ep93xx-keypad", NULL, &clk_keypad), + INIT_CK("ep93xx-adc", NULL, &clk_adc), INIT_CK("ep93xx-fb", NULL, &clk_video), INIT_CK("ep93xx-spi.0", NULL, &clk_spi), INIT_CK("ep93xx-i2s", "mclk", &clk_i2s_mclk), diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index c393b1b0310d..f53c61813998 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -820,6 +820,30 @@ void ep93xx_ide_release_gpio(struct platform_device *pdev) } EXPORT_SYMBOL(ep93xx_ide_release_gpio); +/************************************************************************* + * EP93xx ADC + *************************************************************************/ +static struct resource ep93xx_adc_resources[] = { + DEFINE_RES_MEM(EP93XX_ADC_PHYS_BASE, 0x28), + DEFINE_RES_IRQ(IRQ_EP93XX_TOUCH), +}; + +static struct platform_device ep93xx_adc_device = { + .name = "ep93xx-adc", + .id = -1, + .num_resources = ARRAY_SIZE(ep93xx_adc_resources), + .resource = ep93xx_adc_resources, +}; + +void __init ep93xx_register_adc(void) +{ + /* Power up ADC, deactivate Touch Screen Controller */ + ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_TIN, + EP93XX_SYSCON_DEVCFG_ADCPD); + + platform_device_register(&ep93xx_adc_device); +} + /************************************************************************* * EP93xx Security peripheral *************************************************************************/ diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c index 0ac176386789..7a7f280b07d7 100644 --- a/arch/arm/mach-ep93xx/edb93xx.c +++ b/arch/arm/mach-ep93xx/edb93xx.c @@ -245,6 +245,7 @@ static void __init edb93xx_init_machine(void) edb93xx_register_pwm(); edb93xx_register_fb(); edb93xx_register_ide(); + ep93xx_register_adc(); } diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index 4c0bbd97f741..db0839691ef5 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h @@ -52,6 +52,7 @@ int ep93xx_i2s_acquire(void); void ep93xx_i2s_release(void); void ep93xx_register_ac97(void); void ep93xx_register_ide(void); +void ep93xx_register_adc(void); int ep93xx_ide_acquire_gpio(struct platform_device *pdev); void ep93xx_ide_release_gpio(struct platform_device *pdev); diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c index b2db791b3b38..8b29398f4dc7 100644 --- a/arch/arm/mach-ep93xx/snappercl15.c +++ b/arch/arm/mach-ep93xx/snappercl15.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h index 7bf7ff8beae7..d20e631164cf 100644 --- a/arch/arm/mach-ep93xx/soc.h +++ b/arch/arm/mach-ep93xx/soc.h @@ -95,6 +95,7 @@ #define EP93XX_KEY_MATRIX_PHYS_BASE EP93XX_APB_PHYS(0x000f0000) #define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000) +#define EP93XX_ADC_PHYS_BASE EP93XX_APB_PHYS(0x00100000) #define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000) #define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000) diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 55b186ef863a..8745162ec05d 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S index cf950790fbdc..4292cae43f3c 100644 --- a/arch/arm/mach-exynos/sleep.S +++ b/arch/arm/mach-exynos/sleep.S @@ -124,6 +124,7 @@ _cp15_save_diag: #endif /* CONFIG_CACHE_L2X0 */ .data + .align 2 .globl cp15_save_diag cp15_save_diag: .long 0 @ cp15 diagnostic diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 748cfb8d5212..b529ba04ed16 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -187,21 +187,20 @@ static int __init exynos_pmu_irq_init(struct device_node *node, struct irq_domain *parent_domain, *domain; if (!parent) { - pr_err("%s: no parent, giving up\n", node->full_name); + pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { - pr_err("%s: unable to obtain parent domain\n", node->full_name); + pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } pmu_base_addr = of_iomap(node, 0); if (!pmu_base_addr) { - pr_err("%s: failed to find exynos pmu register\n", - node->full_name); + pr_err("%pOF: failed to find exynos pmu register\n", node); return -ENOMEM; } diff --git a/arch/arm/mach-gemini/Kconfig b/arch/arm/mach-gemini/Kconfig index 06c8b095154c..70106b67631c 100644 --- a/arch/arm/mach-gemini/Kconfig +++ b/arch/arm/mach-gemini/Kconfig @@ -1,11 +1,16 @@ menuconfig ARCH_GEMINI bool "Cortina Systems Gemini" depends on ARCH_MULTI_V4 + select ARCH_HAS_RESET_CONTROLLER + select ARM_AMBA select ARM_APPENDED_DTB # Old Redboot bootloaders deployed + select COMMON_CLK_GEMINI select FARADAY_FTINTC010 select FTTMR010_TIMER select GPIO_FTGPIO010 select GPIOLIB + select PINCTRL + select PINCTRL_GEMINI select POWER_RESET select POWER_RESET_GEMINI_POWEROFF select POWER_RESET_SYSCON diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c index 91bb02dec20f..da5689ababf7 100644 --- a/arch/arm/mach-hisi/platsmp.c +++ b/arch/arm/mach-hisi/platsmp.c @@ -109,7 +109,7 @@ static void hix5hd2_set_scu_boot_addr(phys_addr_t start_addr, phys_addr_t jump_a virt = ioremap(start_addr, PAGE_SIZE); - writel_relaxed(0xe51ff004, virt); /* ldr pc, [rc, #-4] */ + writel_relaxed(0xe51ff004, virt); /* ldr pc, [pc, #-4] */ writel_relaxed(jump_addr, virt + 4); /* pc jump phy address */ iounmap(virt); } diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 93f584ba0130..de535cb679b3 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -224,13 +224,13 @@ static int __init imx_gpc_init(struct device_node *node, int i; if (!parent) { - pr_err("%s: no parent, giving up\n", node->full_name); + pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { - pr_err("%s: unable to obtain parent domain\n", node->full_name); + pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index dd75a4756761..5169dfba9718 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -245,7 +245,6 @@ static phys_addr_t mx2_camera_base __initdata; static void __init visstrim_analog_camera_init(void) { struct platform_device *pdev; - int dma; gpio_set_value(TVP5150_PWDN, 1); ndelay(1); @@ -258,12 +257,9 @@ static void __init visstrim_analog_camera_init(void) if (IS_ERR(pdev)) return; - dma = dma_declare_coherent_memory(&pdev->dev, - mx2_camera_base, mx2_camera_base, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!(dma & DMA_MEMORY_MAP)) - return; + dma_declare_coherent_memory(&pdev->dev, mx2_camera_base, + mx2_camera_base, MX2_CAMERA_BUF_SIZE, + DMA_MEMORY_EXCLUSIVE); } static void __init visstrim_reserve(void) @@ -444,16 +440,13 @@ static const struct imx_ssi_platform_data visstrim_m10_ssi_pdata __initconst = { static void __init visstrim_coda_init(void) { struct platform_device *pdev; - int dma; pdev = imx27_add_coda(); - dma = dma_declare_coherent_memory(&pdev->dev, - mx2_camera_base + MX2_CAMERA_BUF_SIZE, - mx2_camera_base + MX2_CAMERA_BUF_SIZE, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!(dma & DMA_MEMORY_MAP)) - return; + dma_declare_coherent_memory(&pdev->dev, + mx2_camera_base + MX2_CAMERA_BUF_SIZE, + mx2_camera_base + MX2_CAMERA_BUF_SIZE, + MX2_CAMERA_BUF_SIZE, + DMA_MEMORY_EXCLUSIVE); } /* DMA deinterlace */ @@ -466,24 +459,21 @@ static void __init visstrim_deinterlace_init(void) { int ret = -ENOMEM; struct platform_device *pdev = &visstrim_deinterlace; - int dma; ret = platform_device_register(pdev); - dma = dma_declare_coherent_memory(&pdev->dev, - mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, - mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!(dma & DMA_MEMORY_MAP)) - return; + dma_declare_coherent_memory(&pdev->dev, + mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, + mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, + MX2_CAMERA_BUF_SIZE, + DMA_MEMORY_EXCLUSIVE); } /* Emma-PrP for format conversion */ static void __init visstrim_emmaprp_init(void) { struct platform_device *pdev; - int dma; + int ret; pdev = imx27_add_mx2_emmaprp(); if (IS_ERR(pdev)) @@ -493,11 +483,11 @@ static void __init visstrim_emmaprp_init(void) * Use the same memory area as the analog camera since both * devices are, by nature, exclusive. */ - dma = dma_declare_coherent_memory(&pdev->dev, + ret = dma_declare_coherent_memory(&pdev->dev, mx2_camera_base, mx2_camera_base, MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!(dma & DMA_MEMORY_MAP)) + DMA_MEMORY_EXCLUSIVE); + if (ret) pr_err("Failed to declare memory for emmaprp\n"); } diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index bde9a9af6714..7716f83aecdd 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -475,7 +475,7 @@ static phys_addr_t mx3_camera_base __initdata; static int __init mx31moboard_init_cam(void) { - int dma, ret = -ENOMEM; + int ret; struct platform_device *pdev; imx31_add_ipu_core(); @@ -484,11 +484,11 @@ static int __init mx31moboard_init_cam(void) if (IS_ERR(pdev)) return PTR_ERR(pdev); - dma = dma_declare_coherent_memory(&pdev->dev, - mx3_camera_base, mx3_camera_base, - MX3_CAMERA_BUF_SIZE, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!(dma & DMA_MEMORY_MAP)) + ret = dma_declare_coherent_memory(&pdev->dev, + mx3_camera_base, mx3_camera_base, + MX3_CAMERA_BUF_SIZE, + DMA_MEMORY_EXCLUSIVE); + if (ret) goto err; ret = platform_device_add(pdev); diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c index 8c2cbd693d21..42a700053103 100644 --- a/arch/arm/mach-imx/mach-qong.c +++ b/arch/arm/mach-imx/mach-qong.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index 508c2d7786e2..93b89291c06b 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c index c3cf215773b2..6910b4e0d913 100644 --- a/arch/arm/mach-mediatek/mediatek.c +++ b/arch/arm/mach-mediatek/mediatek.c @@ -30,6 +30,7 @@ static void __init mediatek_timer_init(void) if (of_machine_is_compatible("mediatek,mt6589") || of_machine_is_compatible("mediatek,mt7623") || + of_machine_is_compatible("mediatek,mt7623a") || of_machine_is_compatible("mediatek,mt8135") || of_machine_is_compatible("mediatek,mt8127")) { /* turn on GPT6 which ungates arch timer clocks */ @@ -49,6 +50,7 @@ static const char * const mediatek_board_dt_compat[] = { "mediatek,mt6589", "mediatek,mt6592", "mediatek,mt7623", + "mediatek,mt7623a", "mediatek,mt8127", "mediatek,mt8135", NULL, diff --git a/arch/arm/mach-mediatek/platsmp.c b/arch/arm/mach-mediatek/platsmp.c index 726eb69bb655..27d78c945caf 100644 --- a/arch/arm/mach-mediatek/platsmp.c +++ b/arch/arm/mach-mediatek/platsmp.c @@ -59,6 +59,7 @@ static const struct of_device_id mtk_tz_smp_boot_infos[] __initconst = { static const struct of_device_id mtk_smp_boot_infos[] __initconst = { { .compatible = "mediatek,mt6589", .data = &mtk_mt6589_boot }, { .compatible = "mediatek,mt7623", .data = &mtk_mt7623_boot }, + { .compatible = "mediatek,mt7623a", .data = &mtk_mt7623_boot }, }; static void __iomem *mtk_smp_base; diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c index 5db0edf716dd..d2283009a5ff 100644 --- a/arch/arm/mach-mmp/aspenite.c +++ b/arch/arm/mach-mmp/aspenite.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 541647f57192..9b49867154bf 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -60,6 +60,8 @@ config MACH_ARMADA_38X select ARM_ERRATA_720789 select ARM_ERRATA_753970 select ARM_GIC + select ARM_GLOBAL_TIMER + select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK select ARMADA_370_XP_IRQ select ARMADA_38X_CLK select HAVE_ARM_SCU diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c index 7d9f2fd9e450..0aa88105d46e 100644 --- a/arch/arm/mach-mvebu/kirkwood.c +++ b/arch/arm/mach-mvebu/kirkwood.c @@ -107,8 +107,7 @@ static void __init kirkwood_dt_eth_fixup(void) clk_prepare_enable(clk); /* store MAC address register contents in local-mac-address */ - pr_err(FW_INFO "%s: local-mac-address is not set\n", - np->full_name); + pr_err(FW_INFO "%pOF: local-mac-address is not set\n", np); pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL); if (!pmac) diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c index fad95b74bb65..b93ad58b0a63 100644 --- a/arch/arm/mach-omap1/board-fsample.c +++ b/arch/arm/mach-omap1/board-fsample.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c index 357be2debc9d..91bda9c802ff 100644 --- a/arch/arm/mach-omap1/board-h2-mmc.c +++ b/arch/arm/mach-omap1/board-h2-mmc.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "board-h2.h" #include "mmc.h" diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c index 675254ee4b1e..6a38c7603064 100644 --- a/arch/arm/mach-omap1/board-h2.c +++ b/arch/arm/mach-omap1/board-h2.c @@ -24,11 +24,11 @@ #include #include #include -#include +#include #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c index 4f58bfa5e754..692c267a9a90 100644 --- a/arch/arm/mach-omap1/board-h3-mmc.c +++ b/arch/arm/mach-omap1/board-h3-mmc.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include "common.h" #include "board-h3.h" diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index e62f9d454f10..302260583e8e 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c @@ -23,12 +23,12 @@ #include #include #include -#include +#include #include #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap1/board-nand.c b/arch/arm/mach-omap1/board-nand.c index 7684f9203474..1bffbb4e050f 100644 --- a/arch/arm/mach-omap1/board-nand.c +++ b/arch/arm/mach-omap1/board-nand.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "common.h" diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index ee8d9f553db4..06243c0b12d2 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -233,10 +233,10 @@ static struct platform_device nokia770_cbus_device = { static struct i2c_board_info nokia770_i2c_board_info_2[] __initdata = { { - I2C_BOARD_INFO("retu-mfd", 0x01), + I2C_BOARD_INFO("retu", 0x01), }, { - I2C_BOARD_INFO("tahvo-mfd", 0x02), + I2C_BOARD_INFO("tahvo", 0x02), }, }; diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index 95ac1929aede..d579f4e04137 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c index 150b57ba42bf..e994a78bdd09 100644 --- a/arch/arm/mach-omap1/board-perseus2.c +++ b/arch/arm/mach-omap1/board-perseus2.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 0465338183c7..e31a5a22e171 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -87,6 +87,7 @@ config SOC_DRA7XX select OMAP_INTERCONNECT_BARRIER select PM_OPP if PM select ZONE_DMA if ARM_LPAE + select PINCTRL_TI_IODELAY if OF && PINCTRL config ARCH_OMAP2PLUS bool diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 583fc39d84cd..6c61ecc62905 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -313,6 +313,7 @@ MACHINE_END #ifdef CONFIG_SOC_DRA7XX static const char *const dra74x_boards_compat[] __initconst = { + "ti,dra762", "ti,am5728", "ti,am5726", "ti,dra742", diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 8cc6338fcb12..b5ad7fcb80ed 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index 0b77a0176018..694ce0939d50 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c @@ -204,61 +204,6 @@ static unsigned configure_dma_errata(void) return errata; } -static const struct dma_slave_map omap24xx_sdma_map[] = { - { "omap-gpmc", "rxtx", SDMA_FILTER_PARAM(4) }, - { "omap-aes", "tx", SDMA_FILTER_PARAM(9) }, - { "omap-aes", "rx", SDMA_FILTER_PARAM(10) }, - { "omap-sham", "rx", SDMA_FILTER_PARAM(13) }, - { "omap2_mcspi.2", "tx0", SDMA_FILTER_PARAM(15) }, - { "omap2_mcspi.2", "rx0", SDMA_FILTER_PARAM(16) }, - { "omap-mcbsp.3", "tx", SDMA_FILTER_PARAM(17) }, - { "omap-mcbsp.3", "rx", SDMA_FILTER_PARAM(18) }, - { "omap-mcbsp.4", "tx", SDMA_FILTER_PARAM(19) }, - { "omap-mcbsp.4", "rx", SDMA_FILTER_PARAM(20) }, - { "omap-mcbsp.5", "tx", SDMA_FILTER_PARAM(21) }, - { "omap-mcbsp.5", "rx", SDMA_FILTER_PARAM(22) }, - { "omap2_mcspi.2", "tx1", SDMA_FILTER_PARAM(23) }, - { "omap2_mcspi.2", "rx1", SDMA_FILTER_PARAM(24) }, - { "omap_i2c.1", "tx", SDMA_FILTER_PARAM(27) }, - { "omap_i2c.1", "rx", SDMA_FILTER_PARAM(28) }, - { "omap_i2c.2", "tx", SDMA_FILTER_PARAM(29) }, - { "omap_i2c.2", "rx", SDMA_FILTER_PARAM(30) }, - { "omap-mcbsp.1", "tx", SDMA_FILTER_PARAM(31) }, - { "omap-mcbsp.1", "rx", SDMA_FILTER_PARAM(32) }, - { "omap-mcbsp.2", "tx", SDMA_FILTER_PARAM(33) }, - { "omap-mcbsp.2", "rx", SDMA_FILTER_PARAM(34) }, - { "omap2_mcspi.0", "tx0", SDMA_FILTER_PARAM(35) }, - { "omap2_mcspi.0", "rx0", SDMA_FILTER_PARAM(36) }, - { "omap2_mcspi.0", "tx1", SDMA_FILTER_PARAM(37) }, - { "omap2_mcspi.0", "rx1", SDMA_FILTER_PARAM(38) }, - { "omap2_mcspi.0", "tx2", SDMA_FILTER_PARAM(39) }, - { "omap2_mcspi.0", "rx2", SDMA_FILTER_PARAM(40) }, - { "omap2_mcspi.0", "tx3", SDMA_FILTER_PARAM(41) }, - { "omap2_mcspi.0", "rx3", SDMA_FILTER_PARAM(42) }, - { "omap2_mcspi.1", "tx0", SDMA_FILTER_PARAM(43) }, - { "omap2_mcspi.1", "rx0", SDMA_FILTER_PARAM(44) }, - { "omap2_mcspi.1", "tx1", SDMA_FILTER_PARAM(45) }, - { "omap2_mcspi.1", "rx1", SDMA_FILTER_PARAM(46) }, - { "omap_hsmmc.1", "tx", SDMA_FILTER_PARAM(47) }, - { "omap_hsmmc.1", "rx", SDMA_FILTER_PARAM(48) }, - { "omap_uart.0", "tx", SDMA_FILTER_PARAM(49) }, - { "omap_uart.0", "rx", SDMA_FILTER_PARAM(50) }, - { "omap_uart.1", "tx", SDMA_FILTER_PARAM(51) }, - { "omap_uart.1", "rx", SDMA_FILTER_PARAM(52) }, - { "omap_uart.2", "tx", SDMA_FILTER_PARAM(53) }, - { "omap_uart.2", "rx", SDMA_FILTER_PARAM(54) }, - { "omap_hsmmc.0", "tx", SDMA_FILTER_PARAM(61) }, - { "omap_hsmmc.0", "rx", SDMA_FILTER_PARAM(62) }, - - /* external DMA requests when tusb6010 is used */ - { "musb-tusb", "dmareq0", SDMA_FILTER_PARAM(2) }, - { "musb-tusb", "dmareq1", SDMA_FILTER_PARAM(3) }, - { "musb-tusb", "dmareq2", SDMA_FILTER_PARAM(14) }, /* OMAP2420 only */ - { "musb-tusb", "dmareq3", SDMA_FILTER_PARAM(15) }, /* OMAP2420 only */ - { "musb-tusb", "dmareq4", SDMA_FILTER_PARAM(16) }, /* OMAP2420 only */ - { "musb-tusb", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */ -}; - static const struct dma_slave_map omap24xx_sdma_dt_map[] = { /* external DMA requests when tusb6010 is used */ { "musb-hdrc.1.auto", "dmareq0", SDMA_FILTER_PARAM(2) }, @@ -269,61 +214,6 @@ static const struct dma_slave_map omap24xx_sdma_dt_map[] = { { "musb-hdrc.1.auto", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */ }; -static const struct dma_slave_map omap3xxx_sdma_map[] = { - { "omap-gpmc", "rxtx", SDMA_FILTER_PARAM(4) }, - { "omap2_mcspi.2", "tx0", SDMA_FILTER_PARAM(15) }, - { "omap2_mcspi.2", "rx0", SDMA_FILTER_PARAM(16) }, - { "omap-mcbsp.3", "tx", SDMA_FILTER_PARAM(17) }, - { "omap-mcbsp.3", "rx", SDMA_FILTER_PARAM(18) }, - { "omap-mcbsp.4", "tx", SDMA_FILTER_PARAM(19) }, - { "omap-mcbsp.4", "rx", SDMA_FILTER_PARAM(20) }, - { "omap-mcbsp.5", "tx", SDMA_FILTER_PARAM(21) }, - { "omap-mcbsp.5", "rx", SDMA_FILTER_PARAM(22) }, - { "omap2_mcspi.2", "tx1", SDMA_FILTER_PARAM(23) }, - { "omap2_mcspi.2", "rx1", SDMA_FILTER_PARAM(24) }, - { "omap_i2c.3", "tx", SDMA_FILTER_PARAM(25) }, - { "omap_i2c.3", "rx", SDMA_FILTER_PARAM(26) }, - { "omap_i2c.1", "tx", SDMA_FILTER_PARAM(27) }, - { "omap_i2c.1", "rx", SDMA_FILTER_PARAM(28) }, - { "omap_i2c.2", "tx", SDMA_FILTER_PARAM(29) }, - { "omap_i2c.2", "rx", SDMA_FILTER_PARAM(30) }, - { "omap-mcbsp.1", "tx", SDMA_FILTER_PARAM(31) }, - { "omap-mcbsp.1", "rx", SDMA_FILTER_PARAM(32) }, - { "omap-mcbsp.2", "tx", SDMA_FILTER_PARAM(33) }, - { "omap-mcbsp.2", "rx", SDMA_FILTER_PARAM(34) }, - { "omap2_mcspi.0", "tx0", SDMA_FILTER_PARAM(35) }, - { "omap2_mcspi.0", "rx0", SDMA_FILTER_PARAM(36) }, - { "omap2_mcspi.0", "tx1", SDMA_FILTER_PARAM(37) }, - { "omap2_mcspi.0", "rx1", SDMA_FILTER_PARAM(38) }, - { "omap2_mcspi.0", "tx2", SDMA_FILTER_PARAM(39) }, - { "omap2_mcspi.0", "rx2", SDMA_FILTER_PARAM(40) }, - { "omap2_mcspi.0", "tx3", SDMA_FILTER_PARAM(41) }, - { "omap2_mcspi.0", "rx3", SDMA_FILTER_PARAM(42) }, - { "omap2_mcspi.1", "tx0", SDMA_FILTER_PARAM(43) }, - { "omap2_mcspi.1", "rx0", SDMA_FILTER_PARAM(44) }, - { "omap2_mcspi.1", "tx1", SDMA_FILTER_PARAM(45) }, - { "omap2_mcspi.1", "rx1", SDMA_FILTER_PARAM(46) }, - { "omap_hsmmc.1", "tx", SDMA_FILTER_PARAM(47) }, - { "omap_hsmmc.1", "rx", SDMA_FILTER_PARAM(48) }, - { "omap_uart.0", "tx", SDMA_FILTER_PARAM(49) }, - { "omap_uart.0", "rx", SDMA_FILTER_PARAM(50) }, - { "omap_uart.1", "tx", SDMA_FILTER_PARAM(51) }, - { "omap_uart.1", "rx", SDMA_FILTER_PARAM(52) }, - { "omap_uart.2", "tx", SDMA_FILTER_PARAM(53) }, - { "omap_uart.2", "rx", SDMA_FILTER_PARAM(54) }, - { "omap_hsmmc.0", "tx", SDMA_FILTER_PARAM(61) }, - { "omap_hsmmc.0", "rx", SDMA_FILTER_PARAM(62) }, - { "omap-aes", "tx", SDMA_FILTER_PARAM(65) }, - { "omap-aes", "rx", SDMA_FILTER_PARAM(66) }, - { "omap-sham", "rx", SDMA_FILTER_PARAM(69) }, - { "omap2_mcspi.3", "tx0", SDMA_FILTER_PARAM(70) }, - { "omap2_mcspi.3", "rx0", SDMA_FILTER_PARAM(71) }, - { "omap_hsmmc.2", "tx", SDMA_FILTER_PARAM(77) }, - { "omap_hsmmc.2", "rx", SDMA_FILTER_PARAM(78) }, - { "omap_uart.3", "tx", SDMA_FILTER_PARAM(81) }, - { "omap_uart.3", "rx", SDMA_FILTER_PARAM(82) }, -}; - static struct omap_system_dma_plat_info dma_plat_info __initdata = { .reg_map = reg_map, .channel_stride = 0x60, @@ -352,24 +242,10 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) p.dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr; p.errata = configure_dma_errata(); - if (!of_have_populated_dt()) { - if (soc_is_omap24xx()) { - p.slave_map = omap24xx_sdma_map; - p.slavecnt = ARRAY_SIZE(omap24xx_sdma_map); - } else if (soc_is_omap34xx() || soc_is_omap3630()) { - p.slave_map = omap3xxx_sdma_map; - p.slavecnt = ARRAY_SIZE(omap3xxx_sdma_map); - } else { - pr_err("%s: The legacy DMA map is not provided!\n", - __func__); - return -ENODEV; - } - } else { - if (soc_is_omap24xx()) { - /* DMA slave map for drivers not yet converted to DT */ - p.slave_map = omap24xx_sdma_dt_map; - p.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map); - } + if (soc_is_omap24xx()) { + /* DMA slave map for drivers not yet converted to DT */ + p.slave_map = omap24xx_sdma_dt_map; + p.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map); } pdev = omap_device_build(name, 0, oh, &p, sizeof(p)); @@ -413,21 +289,7 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) static int __init omap2_system_dma_init(void) { - struct platform_device *pdev; - int res; - - res = omap_hwmod_for_each_by_class("dma", + return omap_hwmod_for_each_by_class("dma", omap2_system_dma_init_dev, NULL); - if (res) - return res; - - if (of_have_populated_dt()) - return res; - - pdev = platform_device_register_full(&omap_dma_dev_info); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - return res; } omap_arch_initcall(omap2_system_dma_init); diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 5b614388d72f..6d28aa20a7d3 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) struct platform_device *pdev; int res; - if (omap_hsmmc_done != 1) + if (omap_hsmmc_done) return; - omap_hsmmc_done++; + omap_hsmmc_done = 1; for (; c->mmc; c++) { pdev = c->pdev; diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index e2274a162b74..16cb1c195fd8 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -663,6 +663,15 @@ void __init dra7xxx_check_revision(void) hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { + case 0xbb50: + switch (rev) { + case 0: + default: + omap_revision = DRA762_REV_ES1_0; + break; + } + break; + case 0xb990: switch (rev) { case 0: diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 33e4953c61a8..69df3620eca5 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -342,7 +342,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) c = &omap443x_cfg; else if (soc_is_omap446x()) c = &omap446x_cfg; - else if (soc_is_dra74x() || soc_is_omap54xx()) + else if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) c = &omap5_cfg; if (!c) { @@ -355,7 +355,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) cfg.startup_addr = c->startup_addr; cfg.wakeupgen_base = omap_get_wakeupgen_base(); - if (soc_is_dra74x() || soc_is_omap54xx()) { + if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) { if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) cfg.startup_addr = omap5_secondary_hyp_startup; omap5_erratum_workaround_801819(); diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 33ed5d53fa45..4bb6751864a5 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -522,13 +522,13 @@ static int __init wakeupgen_init(struct device_node *node, u32 val; if (!parent) { - pr_err("%s: no parent, giving up\n", node->full_name); + pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { - pr_err("%s: unable to obtain parent domain\n", node->full_name); + pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } /* Not supported on OMAP4 ES1.0 silicon */ diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index ef9ffb8ac912..acbede082b5b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -672,7 +672,6 @@ static int _od_suspend_noirq(struct device *dev) if (!ret && !pm_runtime_status_suspended(dev)) { if (pm_generic_runtime_suspend(dev) == 0) { - pm_runtime_set_suspended(dev); omap_device_idle(pdev); od->flags |= OMAP_DEVICE_SUSPENDED; } @@ -689,15 +688,6 @@ static int _od_resume_noirq(struct device *dev) if (od->flags & OMAP_DEVICE_SUSPENDED) { od->flags &= ~OMAP_DEVICE_SUSPENDED; omap_device_enable(pdev); - /* - * XXX: we run before core runtime pm has resumed itself. At - * this point in time, we just restore the runtime pm state and - * considering symmetric operations in resume, we donot expect - * to fail. If we failed, something changed in core runtime_pm - * framework OR some device driver messed things up, hence, WARN - */ - WARN(pm_runtime_set_active(dev), - "Could not set %s runtime state active\n", dev_name(dev)); pm_generic_runtime_resume(dev); } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 3b47ded5fa0c..2dbd63239c54 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2417,8 +2417,8 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, if (mem) pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); else - pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n", - oh->name, index, np->full_name); + pr_err("omap_hwmod: %s: Missing dt reg%i for %pOF\n", + oh->name, index, np); return -ENXIO; } diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index b3abb8d8b2f6..2f4f7002f38d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = { .name = "gpio1", .class = &dra7xx_gpio_hwmod_class, .clkdm_name = "wkupaon_clkdm", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "wkupaon_iclk_mux", .prcm = { .omap4 = { @@ -4070,6 +4071,11 @@ static struct omap_hwmod_ocp_if *dra7xx_gp_hwmod_ocp_ifs[] __initdata = { }; /* SoC variant specific hwmod links */ +static struct omap_hwmod_ocp_if *dra76x_hwmod_ocp_ifs[] __initdata = { + &dra7xx_l4_per3__usb_otg_ss4, + NULL, +}; + static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_per3__usb_otg_ss4, NULL, @@ -4095,12 +4101,14 @@ int __init dra7xx_hwmod_init(void) ret = omap_hwmod_register_links(dra74x_hwmod_ocp_ifs); else if (!ret && soc_is_dra72x()) ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs); + else if (!ret && soc_is_dra76x()) + ret = omap_hwmod_register_links(dra76x_hwmod_ocp_ifs); if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP) ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs); - /* now for the IPs *NOT* in dra71 */ - if (!ret && !of_machine_is_compatible("ti,dra718")) + /* now for the IPs available only in dra74 and dra72 */ + if (!ret && !of_machine_is_compatible("ti,dra718") && !soc_is_dra76x()) ret = omap_hwmod_register_links(dra74x_dra72x_hwmod_ocp_ifs); return ret; diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c index 1346b3ab34a5..295124b248ae 100644 --- a/arch/arm/mach-omap2/omap_twl.c +++ b/arch/arm/mach-omap2/omap_twl.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "soc.h" #include "voltage.h" diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 9700a8ef0f16..6b433fce65a5 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -434,6 +434,26 @@ static void __init omap5_uevm_legacy_init(void) } #endif +#ifdef CONFIG_SOC_DRA7XX +static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc1; +static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc2; +static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc3; + +static void __init dra7x_evm_mmc_quirk(void) +{ + if (omap_rev() == DRA752_REV_ES1_1 || omap_rev() == DRA752_REV_ES1_0) { + dra7_hsmmc_data_mmc1.version = "rev11"; + dra7_hsmmc_data_mmc1.max_freq = 96000000; + + dra7_hsmmc_data_mmc2.version = "rev11"; + dra7_hsmmc_data_mmc2.max_freq = 48000000; + + dra7_hsmmc_data_mmc3.version = "rev11"; + dra7_hsmmc_data_mmc3.max_freq = 48000000; + } +} +#endif + static struct pcs_pdata pcs_pdata; void omap_pcs_legacy_init(int irq, void (*rearm)(void)) @@ -560,6 +580,14 @@ static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { &omap4_iommu_pdata), OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu", &omap4_iommu_pdata), +#endif +#ifdef CONFIG_SOC_DRA7XX + OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x4809c000, "4809c000.mmc", + &dra7_hsmmc_data_mmc1), + OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x480b4000, "480b4000.mmc", + &dra7_hsmmc_data_mmc2), + OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x480ad000, "480ad000.mmc", + &dra7_hsmmc_data_mmc3), #endif /* Common auxdata */ OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata), @@ -589,6 +617,9 @@ static struct pdata_init pdata_quirks[] __initdata = { #endif #ifdef CONFIG_SOC_OMAP5 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, +#endif +#ifdef CONFIG_SOC_DRA7XX + { "ti,dra7-evm", dra7x_evm_mmc_quirk, }, #endif { /* sentinel */ }, }; diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c index eb350a673133..f50963916a21 100644 --- a/arch/arm/mach-omap2/powerdomains7xx_data.c +++ b/arch/arm/mach-omap2/powerdomains7xx_data.c @@ -29,6 +29,7 @@ #include "prcm44xx.h" #include "prm7xx.h" #include "prcm_mpu7xx.h" +#include "soc.h" /* iva_7xx_pwrdm: IVA-HD power domain */ static struct powerdomain iva_7xx_pwrdm = { @@ -63,6 +64,14 @@ static struct powerdomain custefuse_7xx_pwrdm = { .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; +/* custefuse_aon_7xx_pwrdm: Customer efuse controller power domain */ +static struct powerdomain custefuse_aon_7xx_pwrdm = { + .name = "custefuse_pwrdm", + .prcm_offs = DRA7XX_PRM_CUSTEFUSE_INST, + .prcm_partition = DRA7XX_PRM_PARTITION, + .pwrsts = PWRSTS_ON, +}; + /* ipu_7xx_pwrdm: Audio back end power domain */ static struct powerdomain ipu_7xx_pwrdm = { .name = "ipu_pwrdm", @@ -350,7 +359,6 @@ static struct powerdomain eve1_7xx_pwrdm = { static struct powerdomain *powerdomains_dra7xx[] __initdata = { &iva_7xx_pwrdm, &rtc_7xx_pwrdm, - &custefuse_7xx_pwrdm, &ipu_7xx_pwrdm, &dss_7xx_pwrdm, &l4per_7xx_pwrdm, @@ -374,9 +382,32 @@ static struct powerdomain *powerdomains_dra7xx[] __initdata = { NULL }; +static struct powerdomain *powerdomains_dra76x[] __initdata = { + &custefuse_aon_7xx_pwrdm, + NULL +}; + +static struct powerdomain *powerdomains_dra74x[] __initdata = { + &custefuse_7xx_pwrdm, + NULL +}; + +static struct powerdomain *powerdomains_dra72x[] __initdata = { + &custefuse_aon_7xx_pwrdm, + NULL +}; + void __init dra7xx_powerdomains_init(void) { pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_dra7xx); + + if (soc_is_dra76x()) + pwrdm_register_pwrdms(powerdomains_dra76x); + else if (soc_is_dra74x()) + pwrdm_register_pwrdms(powerdomains_dra74x); + else if (soc_is_dra72x()) + pwrdm_register_pwrdms(powerdomains_dra72x); + pwrdm_complete_init(); } diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 64f6451499a7..a2dd13217c89 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -706,7 +706,7 @@ static int omap3xxx_prm_late_init(void) np = of_find_matching_node(NULL, omap3_prm_dt_match_table); if (np) { irq_num = of_irq_get(np, 0); - if (irq_num >= 0) + if (irq_num > 0) omap3_prcm_irq_setup.irq = irq_num; } diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 3ab5df1ce900..1c0c1663f078 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -747,7 +747,7 @@ static int omap44xx_prm_late_init(void) * Already have OMAP4 IRQ num. For all other platforms, we need * IRQ numbers from DT */ - if (irq_num < 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) { + if (irq_num <= 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) { if (irq_num == -EPROBE_DEFER) return irq_num; @@ -756,7 +756,7 @@ static int omap44xx_prm_late_init(void) } /* Once OMAP4 DT is filled as well */ - if (irq_num >= 0) { + if (irq_num > 0) { omap4_prcm_irq_setup.irq = irq_num; omap4_prcm_irq_setup.xlate_irq = NULL; } diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 1b9f0520dea9..fa5fd24f524c 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -530,10 +530,12 @@ l2dis_3630_offset: .long l2dis_3630 - . .data + .align 2 l2dis_3630: .word 0 .data + .align 2 l2_inv_api_params: .word 0x1, 0x00 diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index c7a3b4aab4b5..56dfa2d5d0a8 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -385,6 +385,7 @@ ppa_zero_params_offset: ENDPROC(omap_do_wfi) .data + .align 2 ppa_zero_params: .word 0 diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index 2aa01c270898..754cd0fc0e7b 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -167,6 +167,7 @@ IS_TI_SUBCLASS(816x, 0x816) IS_TI_SUBCLASS(814x, 0x814) IS_AM_SUBCLASS(335x, 0x335) IS_AM_SUBCLASS(437x, 0x437) +IS_DRA_SUBCLASS(76x, 0x76) IS_DRA_SUBCLASS(75x, 0x75) IS_DRA_SUBCLASS(72x, 0x72) @@ -185,6 +186,7 @@ IS_DRA_SUBCLASS(72x, 0x72) #define soc_is_omap54xx() 0 #define soc_is_omap543x() 0 #define soc_is_dra7xx() 0 +#define soc_is_dra76x() 0 #define soc_is_dra74x() 0 #define soc_is_dra72x() 0 @@ -314,9 +316,11 @@ IS_OMAP_TYPE(3430, 0x3430) #if defined(CONFIG_SOC_DRA7XX) #undef soc_is_dra7xx +#undef soc_is_dra76x #undef soc_is_dra74x #undef soc_is_dra72x #define soc_is_dra7xx() is_dra7xx() +#define soc_is_dra76x() is_dra76x() #define soc_is_dra74x() is_dra75x() #define soc_is_dra72x() is_dra72x() #endif @@ -386,6 +390,7 @@ IS_OMAP_TYPE(3430, 0x3430) #define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8)) #define DRA7XX_CLASS 0x07000000 +#define DRA762_REV_ES1_0 (DRA7XX_CLASS | (0x62 << 16) | (0x10 << 8)) #define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) #define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) #define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8)) diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c index 12f74b46e2ff..3f5863de766a 100644 --- a/arch/arm/mach-orion5x/db88f5281-setup.c +++ b/arch/arm/mach-orion5x/db88f5281-setup.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c index 9dc3f59bed9c..83d43cff4bd7 100644 --- a/arch/arm/mach-orion5x/kurobox_pro-setup.c +++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c index 7bd671b2854c..0c315515dd2d 100644 --- a/arch/arm/mach-orion5x/ts209-setup.c +++ b/arch/arm/mach-orion5x/ts209-setup.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c index 7ef80a8304c0..94778739e38f 100644 --- a/arch/arm/mach-orion5x/ts78xx-setup.c +++ b/arch/arm/mach-orion5x/ts78xx-setup.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index 1467c1d1e541..d6d92f388f14 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 811a7317f3ea..6d28035ebba5 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c index fa9d71d194f0..91f7c3e40065 100644 --- a/arch/arm/mach-pxa/eseries.c +++ b/arch/arm/mach-pxa/eseries.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S index 81591491ab94..42d93f40a59f 100644 --- a/arch/arm/mach-pxa/mioa701_bootresume.S +++ b/arch/arm/mach-pxa/mioa701_bootresume.S @@ -16,6 +16,7 @@ * insist on it to be truly read-only. */ .data + .align 2 ENTRY(mioa701_bootstrap) 0: b 1f @@ -34,4 +35,5 @@ ENTRY(mioa701_jumpaddr) ENTRY(mioa701_bootstrap_lg) .data + .align 2 .word 2b-0b diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 36646975b5d2..47e3e38e9bec 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index e2c97728b3c6..9d662fed03ec 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -377,7 +377,7 @@ static struct gpiod_lookup_table raumfeld_rotary_gpios_table = { }, }; -static struct property_entry raumfeld_rotary_properties[] = { +static const struct property_entry raumfeld_rotary_properties[] __initconst = { PROPERTY_ENTRY_INTEGER("rotary-encoder,steps-per-period", u32, 24), PROPERTY_ENTRY_INTEGER("linux,axis", u32, REL_X), PROPERTY_ENTRY_INTEGER("rotary-encoder,relative_axis", u32, 1), diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 13de6602966f..6a386fd6363e 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index 9ad84cd01ba0..a4065966881a 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig @@ -3,6 +3,7 @@ config ARCH_ROCKCHIP depends on ARCH_MULTI_V7 select PINCTRL select PINCTRL_ROCKCHIP + select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_HAS_RESET_CONTROLLER select ARM_AMBA select ARM_GIC @@ -16,6 +17,7 @@ config ARCH_ROCKCHIP select ROCKCHIP_TIMER select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK + select ZONE_DMA if ARM_LPAE help Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs containing the RK2928, RK30xx and RK31xx series. diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c index 3abafdbdd7f4..ecec340ca345 100644 --- a/arch/arm/mach-rockchip/platsmp.c +++ b/arch/arm/mach-rockchip/platsmp.c @@ -67,7 +67,7 @@ static struct reset_control *rockchip_get_core_reset(int cpu) else np = of_get_cpu_node(cpu, NULL); - return of_reset_control_get(np, NULL); + return of_reset_control_get_exclusive(np, NULL); } static int pmu_set_power_domain(int pd, bool on) @@ -182,8 +182,8 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node) ret = of_address_to_resource(node, 0, &res); if (ret < 0) { - pr_err("%s: could not get address for node %s\n", - __func__, node->full_name); + pr_err("%s: could not get address for node %pOF\n", + __func__, node); return ret; } diff --git a/arch/arm/mach-rockchip/sleep.S b/arch/arm/mach-rockchip/sleep.S index 2eec9a341f05..9927f06f52fe 100644 --- a/arch/arm/mach-rockchip/sleep.S +++ b/arch/arm/mach-rockchip/sleep.S @@ -23,7 +23,7 @@ * ddr to sram for system resumeing. * so it is ".data section". */ -.align + .align 2 ENTRY(rockchip_slp_cpu_resume) setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set svc, irqs off diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index f07da82ebfea..b198be7d32b6 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -229,7 +229,7 @@ config ARCH_H1940 config H1940BT tristate "Control the state of H1940 bluetooth chip" depends on ARCH_H1940 - select RFKILL + depends on RFKILL help This is a simple driver that is able to control the state of built in bluetooth chip on h1940. diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c index 9e0bc46e90ec..0e116c92bf01 100644 --- a/arch/arm/mach-s3c24xx/common-smdk.c +++ b/arch/arm/mach-s3c24xx/common-smdk.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c index b59f4f4f256f..5b6b94ef41e2 100644 --- a/arch/arm/mach-s3c24xx/common.c +++ b/arch/arm/mach-s3c24xx/common.c @@ -173,7 +173,7 @@ static unsigned long s3c24xx_read_idcode_v5(void) return gs; #endif -#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) +#if defined(CONFIG_CPU_S3C2412) return __raw_readl(S3C2412_GSTATUS1); #else return 1UL; /* don't look like an 2400 */ diff --git a/arch/arm/mach-s3c24xx/include/mach/regs-clock.h b/arch/arm/mach-s3c24xx/include/mach/regs-clock.h index 3db6c10de023..ae4a3e0f3ba2 100644 --- a/arch/arm/mach-s3c24xx/include/mach/regs-clock.h +++ b/arch/arm/mach-s3c24xx/include/mach/regs-clock.h @@ -77,7 +77,7 @@ #endif /* CONFIG_CPU_S3C2440 or CONFIG_CPU_S3C2442 */ -#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) +#if defined(CONFIG_CPU_S3C2412) #define S3C2412_OSCSET S3C2410_CLKREG(0x18) #define S3C2412_CLKSRC S3C2410_CLKREG(0x1C) @@ -141,7 +141,7 @@ #define S3C2412_CLKSRC_UREFCLK_EXTCLK (1<<12) #define S3C2412_CLKSRC_EREFCLK_EXTCLK (1<<14) -#endif /* CONFIG_CPU_S3C2412 | CONFIG_CPU_S3C2413 */ +#endif /* CONFIG_CPU_S3C2412 */ #define S3C2416_CLKDIV2 S3C2410_CLKREG(0x28) diff --git a/arch/arm/mach-s3c24xx/mach-anubis.c b/arch/arm/mach-s3c24xx/mach-anubis.c index 029ef1b58925..c14cab361922 100644 --- a/arch/arm/mach-s3c24xx/mach-anubis.c +++ b/arch/arm/mach-s3c24xx/mach-anubis.c @@ -40,7 +40,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c index 7b28eb623fc1..ebdbafb9382a 100644 --- a/arch/arm/mach-s3c24xx/mach-at2440evb.c +++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c @@ -41,7 +41,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-bast.c b/arch/arm/mach-s3c24xx/mach-bast.c index 5185036765db..704dc84b3480 100644 --- a/arch/arm/mach-s3c24xx/mach-bast.c +++ b/arch/arm/mach-s3c24xx/mach-bast.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c index b0ed401da3a3..afe18baf0c84 100644 --- a/arch/arm/mach-s3c24xx/mach-gta02.c +++ b/arch/arm/mach-s3c24xx/mach-gta02.c @@ -50,7 +50,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index f5b5c49b56ac..17821976f769 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -43,7 +43,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index 71af8d2fd320..04c9f488c498 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -49,7 +49,7 @@ #include #include -#include +#include #include #include @@ -287,7 +287,7 @@ static struct s3c2410_platform_nand mini2440_nand_info __initdata = { .nr_sets = ARRAY_SIZE(mini2440_nand_sets), .sets = mini2440_nand_sets, .ignore_unset_ecc = 1, - .ecc_mode = NAND_ECC_SOFT, + .ecc_mode = NAND_ECC_HW, }; /* DM9000AEP 10/100 ethernet controller */ diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c index 262ab0744748..6cac7da15e2b 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c index 70b0eb7d3134..ed3b22ceef06 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris.c +++ b/arch/arm/mach-s3c24xx/mach-osiris.c @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include @@ -36,7 +36,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c index 868c82087403..84e3a9c53184 100644 --- a/arch/arm/mach-s3c24xx/mach-qt2410.c +++ b/arch/arm/mach-s3c24xx/mach-qt2410.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-rx3715.c b/arch/arm/mach-s3c24xx/mach-rx3715.c index a39fb9780dd3..b5ba615cf9dd 100644 --- a/arch/arm/mach-s3c24xx/mach-rx3715.c +++ b/arch/arm/mach-s3c24xx/mach-rx3715.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-s3c24xx/mach-smdk2443.c b/arch/arm/mach-s3c24xx/mach-smdk2443.c index 87fe5c5b8073..474cd81aa8ad 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2443.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2443.c @@ -111,9 +111,6 @@ static struct platform_device *smdk2443_devices[] __initdata = { &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_hsmmc1, -#ifdef CONFIG_SND_SOC_SMDK2443_WM9710 - &s3c_device_ac97, -#endif &s3c2443_device_dma, }; @@ -133,11 +130,6 @@ static void __init smdk2443_init_time(void) static void __init smdk2443_machine_init(void) { s3c_i2c0_set_platdata(NULL); - -#ifdef CONFIG_SND_SOC_SMDK2443_WM9710 - s3c24xx_ac97_setup_gpio(S3C24XX_AC97_GPE0); -#endif - platform_add_devices(smdk2443_devices, ARRAY_SIZE(smdk2443_devices)); smdk_machine_init(); } diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c index f5e6322145fa..1adc957edf0f 100644 --- a/arch/arm/mach-s3c24xx/mach-vstms.c +++ b/arch/arm/mach-s3c24xx/mach-vstms.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S index d833d616bd2e..b859268fa8da 100644 --- a/arch/arm/mach-s3c24xx/sleep.S +++ b/arch/arm/mach-s3c24xx/sleep.S @@ -33,10 +33,11 @@ #include #include -/* CONFIG_DEBUG_RESUME is dangerous if your bootloader does not +/* + * S3C24XX_DEBUG_RESUME is dangerous if your bootloader does not * reset the UART configuration, only enable if you really need this! -*/ -//#define CONFIG_DEBUG_RESUME + */ +//#define S3C24XX_DEBUG_RESUME .text @@ -71,13 +72,13 @@ ENTRY(s3c_cpu_resume) str r12, [ r14, #0x54 ] #endif -#ifdef CONFIG_DEBUG_RESUME +#ifdef S3C24XX_DEBUG_RESUME mov r3, #'L' strb r3, [ r2, #S3C2410_UTXH ] 1001: ldrb r14, [ r3, #S3C2410_UTRSTAT ] tst r14, #S3C2410_UTRSTAT_TXE beq 1001b -#endif /* CONFIG_DEBUG_RESUME */ +#endif /* S3C24XX_DEBUG_RESUME */ b cpu_resume diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index ad7d604ff001..280e7312a9e1 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -1,9 +1,6 @@ config ARCH_SHMOBILE bool -config ARCH_SHMOBILE_MULTI - bool - config PM_RMOBILE bool select PM @@ -34,7 +31,6 @@ menuconfig ARCH_RENESAS depends on ARCH_MULTI_V7 && MMU select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_SHMOBILE - select ARCH_SHMOBILE_MULTI select ARM_GIC select GPIOLIB select HAVE_ARM_SCU if SMP diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c index 0178da7ace82..e5f215c8b218 100644 --- a/arch/arm/mach-shmobile/pm-rcar-gen2.c +++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c @@ -11,7 +11,9 @@ */ #include +#include #include +#include #include #include #include @@ -69,8 +71,9 @@ void __init rcar_gen2_pm_init(void) struct device_node *np, *cpus; bool has_a7 = false; bool has_a15 = false; - phys_addr_t boot_vector_addr = ICRAM1; + struct resource res; u32 syscier = 0; + int error; if (once++) return; @@ -91,14 +94,38 @@ void __init rcar_gen2_pm_init(void) else if (of_machine_is_compatible("renesas,r8a7791")) syscier = 0x00111003; + np = of_find_compatible_node(NULL, NULL, "renesas,smp-sram"); + if (!np) { + /* No smp-sram in DT, fall back to hardcoded address */ + res = (struct resource)DEFINE_RES_MEM(ICRAM1, + shmobile_boot_size); + goto map; + } + + error = of_address_to_resource(np, 0, &res); + if (error) { + pr_err("Failed to get smp-sram address: %d\n", error); + return; + } + +map: /* RAM for jump stub, because BAR requires 256KB aligned address */ - p = ioremap_nocache(boot_vector_addr, shmobile_boot_size); + if (res.start & (256 * 1024 - 1) || + resource_size(&res) < shmobile_boot_size) { + pr_err("Invalid smp-sram region\n"); + return; + } + + p = ioremap(res.start, resource_size(&res)); + if (!p) + return; + memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size); iounmap(p); /* setup reset vectors */ p = ioremap_nocache(RST, 0x63); - bar = phys_to_sbar(boot_vector_addr); + bar = phys_to_sbar(res.start); if (has_a15) { writel_relaxed(bar, p + CA15BAR); writel_relaxed(bar | SBAR_BAREN, p + CA15BAR); diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 699429f28b73..3a4ed4c33a68 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -195,8 +195,7 @@ static void __init add_special_pd(struct device_node *np, enum pd_types type) return; } - pr_debug("Special PM domain %s type %d for %s\n", pd->name, type, - np->full_name); + pr_debug("Special PM domain %s type %d for %pOF\n", pd->name, type, np); special_pds[num_special_pds].pd = pd; special_pds[num_special_pds].type = type; @@ -331,13 +330,13 @@ static int __init rmobile_init_pm_domains(void) for_each_compatible_node(np, NULL, "renesas,sysc-rmobile") { base = of_iomap(np, 0); if (!base) { - pr_warn("%s cannot map reg 0\n", np->full_name); + pr_warn("%pOF cannot map reg 0\n", np); continue; } pmd = of_get_child_by_name(np, "pm-domains"); if (!pmd) { - pr_warn("%s lacks pm-domains node\n", np->full_name); + pr_warn("%pOF lacks pm-domains node\n", np); continue; } diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index a6e74f481dea..7ab1690fab82 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c @@ -29,17 +29,29 @@ #include "common.h" #include "rcar-gen2.h" +static const struct of_device_id cpg_matches[] __initconst = { + { .compatible = "renesas,rcar-gen2-cpg-clocks", }, + { .compatible = "renesas,r8a7743-cpg-mssr", .data = "extal" }, + { .compatible = "renesas,r8a7790-cpg-mssr", .data = "extal" }, + { .compatible = "renesas,r8a7791-cpg-mssr", .data = "extal" }, + { .compatible = "renesas,r8a7793-cpg-mssr", .data = "extal" }, + { /* sentinel */ } +}; + static unsigned int __init get_extal_freq(void) { + const struct of_device_id *match; struct device_node *cpg, *extal; u32 freq = 20000000; + int idx = 0; - cpg = of_find_compatible_node(NULL, NULL, - "renesas,rcar-gen2-cpg-clocks"); + cpg = of_find_matching_node_and_match(NULL, cpg_matches, &match); if (!cpg) return freq; - extal = of_parse_phandle(cpg, "clocks", 0); + if (match->data) + idx = of_property_match_string(cpg, "clock-names", match->data); + extal = of_parse_phandle(cpg, "clocks", idx); of_node_put(cpg); if (!extal) return freq; @@ -58,7 +70,8 @@ void __init rcar_gen2_timer_init(void) void __iomem *base; u32 freq; - if (of_machine_is_compatible("renesas,r8a7792") || + if (of_machine_is_compatible("renesas,r8a7745") || + of_machine_is_compatible("renesas,r8a7792") || of_machine_is_compatible("renesas,r8a7794")) { freq = 260000000 / 8; /* ZS / 8 */ /* CNTVOFF has to be initialized either from non-secure diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 329f01c5b6f8..c8368d647741 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -13,5 +13,7 @@ menuconfig ARCH_TEGRA select ARCH_HAS_RESET_CONTROLLER select RESET_CONTROLLER select SOC_BUS + select ZONE_DMA if ARM_LPAE + select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE help This enables support for NVIDIA Tegra based systems. diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c index d3aa9be16621..e3fbcfedf845 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra114.c +++ b/arch/arm/mach-tegra/cpuidle-tegra114.c @@ -60,7 +60,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, return index; } -static void tegra114_idle_enter_freeze(struct cpuidle_device *dev, +static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { @@ -77,7 +77,7 @@ static struct cpuidle_driver tegra_idle_driver = { #ifdef CONFIG_PM_SLEEP [1] = { .enter = tegra114_idle_power_down, - .enter_freeze = tegra114_idle_enter_freeze, + .enter_s2idle = tegra114_idle_enter_s2idle, .exit_latency = 500, .target_residency = 1000, .flags = CPUIDLE_FLAG_TIMER_STOP, diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index 649e9e8c7bcc..02e712d2ea30 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -84,35 +84,8 @@ static void __init tegra_dt_init_irq(void) static void __init tegra_dt_init(void) { - struct soc_device_attribute *soc_dev_attr; - struct soc_device *soc_dev; - struct device *parent = NULL; + struct device *parent = tegra_soc_device_register(); - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); - if (!soc_dev_attr) - goto out; - - soc_dev_attr->family = kasprintf(GFP_KERNEL, "Tegra"); - soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d", - tegra_sku_info.revision); - soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id()); - - soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr->family); - kfree(soc_dev_attr->revision); - kfree(soc_dev_attr->soc_id); - kfree(soc_dev_attr); - goto out; - } - - parent = soc_device_to_device(soc_dev); - - /* - * Finished with the static registrations now; fill in the missing - * devices - */ -out: of_platform_default_populate(NULL, NULL, parent); } diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 2522f8c8fbb1..a5084ec70c6e 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -47,6 +47,7 @@ #define CACHE_DLIMIT (CACHE_DSIZE * 4) .data + .align 2 flush_base: .long FLUSH_BASE .text diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ff8b0aa2dfde..42f585379e19 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * signal first. We do not need to release the mmap_sem because * it would already be released in __lock_page_or_retry in * mm/filemap.c. */ - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { + if (!user_mode(regs)) + goto no_context; return 0; + } /* * Major/minor page fault accounting is only done on the diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 5e5720e8bc5f..7d16bbc4102b 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -129,8 +129,7 @@ ENDPROC(cpu_v7_set_pte_ext) .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET? - mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister - orr \tmp, \tmp, #TTB_EAE + mov \tmp, #TTB_EAE @ for TTB control egister ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index b6bbfdb6dfdc..3d75b7972fd1 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -104,6 +104,7 @@ .endm .data + .align 2 clean_addr: .word CLEAN_ADDR .text diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index d5b9fa19b684..c199990e12b6 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1,6 +1,7 @@ /* - * Just-In-Time compiler for BPF filters on 32bit ARM + * Just-In-Time compiler for eBPF filters on 32bit ARM * + * Copyright (c) 2017 Shubham Bansal * Copyright (c) 2011 Mircea Gherzan * * This program is free software; you can redistribute it and/or modify it @@ -8,6 +9,7 @@ * Free Software Foundation; version 2 of the License. */ +#include #include #include #include @@ -18,54 +20,101 @@ #include #include -#include #include #include #include "bpf_jit_32.h" +int bpf_jit_enable __read_mostly; + +#define STACK_OFFSET(k) (k) +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ +#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ + +/* Flags used for JIT optimization */ +#define SEEN_CALL (1 << 0) + +#define FLAG_IMM_OVERFLOW (1 << 0) + /* - * ABI: + * Map eBPF registers to ARM 32bit registers or stack scratch space. * - * r0 scratch register - * r4 BPF register A - * r5 BPF register X - * r6 pointer to the skb - * r7 skb->data - * r8 skb_headlen(skb) + * 1. First argument is passed using the arm 32bit registers and rest of the + * arguments are passed on stack scratch space. + * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest + * arguments are mapped to scratch space on stack. + * 3. We need two 64 bit temp registers to do complex operations on eBPF + * registers. + * + * As the eBPF registers are all 64 bit registers and arm has only 32 bit + * registers, we have to map each eBPF registers with two arm 32 bit regs or + * scratch memory space and we have to build eBPF 64 bit register from those. + * + */ +static const u8 bpf2a32[][2] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = {ARM_R1, ARM_R0}, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = {ARM_R3, ARM_R2}, + /* Stored on stack scratch space */ + [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)}, + [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)}, + [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)}, + [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)}, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = {ARM_R5, ARM_R4}, + /* Stored on stack scratch space */ + [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)}, + [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)}, + [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)}, + /* Read only Frame Pointer to access Stack */ + [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)}, + /* Temporary Register for internal BPF JIT, can be used + * for constant blindings and others. + */ + [TMP_REG_1] = {ARM_R7, ARM_R6}, + [TMP_REG_2] = {ARM_R10, ARM_R8}, + /* Tail call count. Stored on stack scratch space. */ + [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)}, + /* temporary register for blinding constants. + * Stored on stack scratch space. + */ + [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)}, +}; + +#define dst_lo dst[1] +#define dst_hi dst[0] +#define src_lo src[1] +#define src_hi src[0] + +/* + * JIT Context: + * + * prog : bpf_prog + * idx : index of current last JITed instruction. + * prologue_bytes : bytes used in prologue. + * epilogue_offset : offset of epilogue starting. + * seen : bit mask used for JIT optimization. + * offsets : array of eBPF instruction offsets in + * JITed code. + * target : final JITed code. + * epilogue_bytes : no of bytes used in epilogue. + * imm_count : no of immediate counts used for global + * variables. + * imms : array of global variable addresses. */ -#define r_scratch ARM_R0 -/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ -#define r_off ARM_R1 -#define r_A ARM_R4 -#define r_X ARM_R5 -#define r_skb ARM_R6 -#define r_skb_data ARM_R7 -#define r_skb_hl ARM_R8 - -#define SCRATCH_SP_OFFSET 0 -#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) - -#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) -#define SEEN_MEM_WORD(k) (1 << (k)) -#define SEEN_X (1 << BPF_MEMWORDS) -#define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) -#define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) -#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) - -#define FLAG_NEED_X_RESET (1 << 0) -#define FLAG_IMM_OVERFLOW (1 << 1) - struct jit_ctx { - const struct bpf_prog *skf; - unsigned idx; - unsigned prologue_bytes; - int ret0_fp_idx; + const struct bpf_prog *prog; + unsigned int idx; + unsigned int prologue_bytes; + unsigned int epilogue_offset; u32 seen; u32 flags; u32 *offsets; u32 *target; + u32 stack_size; #if __LINUX_ARM_ARCH__ < 7 u16 epilogue_bytes; u16 imm_count; @@ -73,68 +122,16 @@ struct jit_ctx { #endif }; -int bpf_jit_enable __read_mostly; - -static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, - unsigned int size) -{ - void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); - - if (!ptr) - return -EFAULT; - memcpy(ret, ptr, size); - return 0; -} - -static u64 jit_get_skb_b(struct sk_buff *skb, int offset) -{ - u8 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 1); - else - err = skb_copy_bits(skb, offset, &ret, 1); - - return (u64)err << 32 | ret; -} - -static u64 jit_get_skb_h(struct sk_buff *skb, int offset) -{ - u16 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 2); - else - err = skb_copy_bits(skb, offset, &ret, 2); - - return (u64)err << 32 | ntohs(ret); -} - -static u64 jit_get_skb_w(struct sk_buff *skb, int offset) -{ - u32 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 4); - else - err = skb_copy_bits(skb, offset, &ret, 4); - - return (u64)err << 32 | ntohl(ret); -} - /* * Wrappers which handle both OABI and EABI and assures Thumb2 interworking * (where the assembly routines like __aeabi_uidiv could cause problems). */ -static u32 jit_udiv(u32 dividend, u32 divisor) +static u32 jit_udiv32(u32 dividend, u32 divisor) { return dividend / divisor; } -static u32 jit_mod(u32 dividend, u32 divisor) +static u32 jit_mod32(u32 dividend, u32 divisor) { return dividend % divisor; } @@ -158,36 +155,22 @@ static inline void emit(u32 inst, struct jit_ctx *ctx) _emit(ARM_COND_AL, inst, ctx); } -static u16 saved_regs(struct jit_ctx *ctx) +/* + * Checks if immediate value can be converted to imm12(12 bits) value. + */ +static int16_t imm8m(u32 x) { - u16 ret = 0; + u32 rot; - if ((ctx->skf->len > 1) || - (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) - ret |= 1 << r_A; - -#ifdef CONFIG_FRAME_POINTER - ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); -#else - if (ctx->seen & SEEN_CALL) - ret |= 1 << ARM_LR; -#endif - if (ctx->seen & (SEEN_DATA | SEEN_SKB)) - ret |= 1 << r_skb; - if (ctx->seen & SEEN_DATA) - ret |= (1 << r_skb_data) | (1 << r_skb_hl); - if (ctx->seen & SEEN_X) - ret |= 1 << r_X; - - return ret; -} - -static inline int mem_words_used(struct jit_ctx *ctx) -{ - /* yes, we do waste some stack space IF there are "holes" in the set" */ - return fls(ctx->seen & SEEN_MEM); + for (rot = 0; rot < 16; rot++) + if ((x & ~ror32(0xff, 2 * rot)) == 0) + return rol32(x, 2 * rot) | (rot << 8); + return -1; } +/* + * Initializes the JIT space with undefined instructions. + */ static void jit_fill_hole(void *area, unsigned int size) { u32 *ptr; @@ -196,88 +179,34 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } -static void build_prologue(struct jit_ctx *ctx) -{ - u16 reg_set = saved_regs(ctx); - u16 off; +/* Stack must be multiples of 16 Bytes */ +#define STACK_ALIGN(sz) (((sz) + 3) & ~3) -#ifdef CONFIG_FRAME_POINTER - emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); - emit(ARM_PUSH(reg_set), ctx); - emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); -#else - if (reg_set) - emit(ARM_PUSH(reg_set), ctx); -#endif +/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, + * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, + * BPF_REG_FP and Tail call counts. + */ +#define SCRATCH_SIZE 80 - if (ctx->seen & (SEEN_DATA | SEEN_SKB)) - emit(ARM_MOV_R(r_skb, ARM_R0), ctx); +/* total stack size used in JITed code */ +#define _STACK_SIZE \ + (ctx->prog->aux->stack_depth + \ + + SCRATCH_SIZE + \ + + 4 /* extra for skb_copy_bits buffer */) - if (ctx->seen & SEEN_DATA) { - off = offsetof(struct sk_buff, data); - emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); - /* headlen = len - data_len */ - off = offsetof(struct sk_buff, len); - emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); - off = offsetof(struct sk_buff, data_len); - emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); - emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); - } +#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) - if (ctx->flags & FLAG_NEED_X_RESET) - emit(ARM_MOV_I(r_X, 0), ctx); +/* Get the offset of eBPF REGISTERs stored on scratch space. */ +#define STACK_VAR(off) (STACK_SIZE-off-4) - /* do not leak kernel data to userspace */ - if (bpf_needs_clear_a(&ctx->skf->insns[0])) - emit(ARM_MOV_I(r_A, 0), ctx); - - /* stack space for the BPF_MEM words */ - if (ctx->seen & SEEN_MEM) - emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); -} - -static void build_epilogue(struct jit_ctx *ctx) -{ - u16 reg_set = saved_regs(ctx); - - if (ctx->seen & SEEN_MEM) - emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); - - reg_set &= ~(1 << ARM_LR); - -#ifdef CONFIG_FRAME_POINTER - /* the first instruction of the prologue was: mov ip, sp */ - reg_set &= ~(1 << ARM_IP); - reg_set |= (1 << ARM_SP); - emit(ARM_LDM(ARM_SP, reg_set), ctx); -#else - if (reg_set) { - if (ctx->seen & SEEN_CALL) - reg_set |= 1 << ARM_PC; - emit(ARM_POP(reg_set), ctx); - } - - if (!(ctx->seen & SEEN_CALL)) - emit(ARM_BX(ARM_LR), ctx); -#endif -} - -static int16_t imm8m(u32 x) -{ - u32 rot; - - for (rot = 0; rot < 16; rot++) - if ((x & ~ror32(0xff, 2 * rot)) == 0) - return rol32(x, 2 * rot) | (rot << 8); - - return -1; -} +/* Offset of skb_copy_bits buffer */ +#define SKB_BUFFER STACK_VAR(SCRATCH_SIZE) #if __LINUX_ARM_ARCH__ < 7 static u16 imm_offset(u32 k, struct jit_ctx *ctx) { - unsigned i = 0, offset; + unsigned int i = 0, offset; u16 imm; /* on the "fake" run we just count them (duplicates included) */ @@ -296,7 +225,7 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx) ctx->imms[i] = k; /* constants go just after the epilogue */ - offset = ctx->offsets[ctx->skf->len]; + offset = ctx->offsets[ctx->prog->len - 1] * 4; offset += ctx->prologue_bytes; offset += ctx->epilogue_bytes; offset += i * 4; @@ -320,10 +249,22 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx) #endif /* __LINUX_ARM_ARCH__ */ +static inline int bpf2a32_offset(int bpf_to, int bpf_from, + const struct jit_ctx *ctx) { + int to, from; + + if (ctx->target == NULL) + return 0; + to = ctx->offsets[bpf_to]; + from = ctx->offsets[bpf_from]; + + return to - from - 1; +} + /* * Move an immediate that's not an imm8m to a core register. */ -static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) +static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 7 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); @@ -334,7 +275,7 @@ static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) #endif } -static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) +static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) { int imm12 = imm8m(val); @@ -344,113 +285,9 @@ static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m(rd, val, ctx); } -#if __LINUX_ARM_ARCH__ < 6 - -static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) -{ - _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); - _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); - _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); - _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); - _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); - _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); - _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); - _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); -} - -static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) -{ - _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); - _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); - _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); -} - -static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) -{ - /* r_dst = (r_src << 8) | (r_src >> 8) */ - emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); - emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); - - /* - * we need to mask out the bits set in r_dst[23:16] due to - * the first shift instruction. - * - * note that 0x8ff is the encoded immediate 0x00ff0000. - */ - emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); -} - -#else /* ARMv6+ */ - -static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) -{ - _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); -#ifdef __LITTLE_ENDIAN - _emit(cond, ARM_REV(r_res, r_res), ctx); -#endif -} - -static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) -{ - _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); -#ifdef __LITTLE_ENDIAN - _emit(cond, ARM_REV16(r_res, r_res), ctx); -#endif -} - -static inline void emit_swap16(u8 r_dst __maybe_unused, - u8 r_src __maybe_unused, - struct jit_ctx *ctx __maybe_unused) -{ -#ifdef __LITTLE_ENDIAN - emit(ARM_REV16(r_dst, r_src), ctx); -#endif -} - -#endif /* __LINUX_ARM_ARCH__ < 6 */ - - -/* Compute the immediate value for a PC-relative branch. */ -static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) -{ - u32 imm; - - if (ctx->target == NULL) - return 0; - /* - * BPF allows only forward jumps and the offset of the target is - * still the one computed during the first pass. - */ - imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); - - return imm >> 2; -} - -#define OP_IMM3(op, r1, r2, imm_val, ctx) \ - do { \ - imm12 = imm8m(imm_val); \ - if (imm12 < 0) { \ - emit_mov_i_no8m(r_scratch, imm_val, ctx); \ - emit(op ## _R((r1), (r2), r_scratch), ctx); \ - } else { \ - emit(op ## _I((r1), (r2), imm12), ctx); \ - } \ - } while (0) - -static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) -{ - if (ctx->ret0_fp_idx >= 0) { - _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); - /* NOP to keep the size constant between passes */ - emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); - } else { - _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); - _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); - } -} - static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) { + ctx->seen |= SEEN_CALL; #if __LINUX_ARM_ARCH__ < 5 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); @@ -463,557 +300,1579 @@ static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) #endif } -static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, - int bpf_op) +static inline int epilogue_offset(const struct jit_ctx *ctx) { + int to, from; + /* No need for 1st dummy run */ + if (ctx->target == NULL) + return 0; + to = ctx->epilogue_offset; + from = ctx->idx; + + return to - from - 2; +} + +static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) +{ + const u8 *tmp = bpf2a32[TMP_REG_1]; + s32 jmp_offset; + + /* checks if divisor is zero or not. If it is, then + * exit directly. + */ + emit(ARM_CMP_I(rn, 0), ctx); + _emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx); + jmp_offset = epilogue_offset(ctx); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); #if __LINUX_ARM_ARCH__ == 7 if (elf_hwcap & HWCAP_IDIVA) { - if (bpf_op == BPF_DIV) + if (op == BPF_DIV) emit(ARM_UDIV(rd, rm, rn), ctx); else { - emit(ARM_UDIV(ARM_R3, rm, rn), ctx); - emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx); + emit(ARM_UDIV(ARM_IP, rm, rn), ctx); + emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx); } return; } #endif /* - * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 - * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into - * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm - * before using it as a source for ARM_R1. - * - * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is - * ARM_R5 (r_X) so there is no particular register overlap - * issues. + * For BPF_ALU | BPF_DIV | BPF_K instructions + * As ARM_R1 and ARM_R0 contains 1st argument of bpf + * function, we need to save it on caller side to save + * it from getting destroyed within callee. + * After the return from the callee, we restore ARM_R0 + * ARM_R1. */ - if (rn != ARM_R1) + if (rn != ARM_R1) { + emit(ARM_MOV_R(tmp[0], ARM_R1), ctx); emit(ARM_MOV_R(ARM_R1, rn), ctx); - if (rm != ARM_R0) + } + if (rm != ARM_R0) { + emit(ARM_MOV_R(tmp[1], ARM_R0), ctx); emit(ARM_MOV_R(ARM_R0, rm), ctx); + } + /* Call appropriate function */ ctx->seen |= SEEN_CALL; - emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod, - ctx); - emit_blx_r(ARM_R3, ctx); + emit_mov_i(ARM_IP, op == BPF_DIV ? + (u32)jit_udiv32 : (u32)jit_mod32, ctx); + emit_blx_r(ARM_IP, ctx); + /* Save return value */ if (rd != ARM_R0) emit(ARM_MOV_R(rd, ARM_R0), ctx); + + /* Restore ARM_R0 and ARM_R1 */ + if (rn != ARM_R1) + emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx); + if (rm != ARM_R0) + emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); } -static inline void update_on_xread(struct jit_ctx *ctx) +/* Checks whether BPF register is on scratch stack space or not. */ +static inline bool is_on_stack(u8 bpf_reg) { - if (!(ctx->seen & SEEN_X)) - ctx->flags |= FLAG_NEED_X_RESET; + static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5, + BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT, + BPF_REG_2, BPF_REG_FP}; + int i, reg_len = sizeof(stack_regs); - ctx->seen |= SEEN_X; + for (i = 0 ; i < reg_len ; i++) { + if (bpf_reg == stack_regs[i]) + return true; + } + return false; +} + +static inline void emit_a32_mov_i(const u8 dst, const u32 val, + bool dstk, struct jit_ctx *ctx) +{ + const u8 *tmp = bpf2a32[TMP_REG_1]; + + if (dstk) { + emit_mov_i(tmp[1], val, ctx); + emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx); + } else { + emit_mov_i(dst, val, ctx); + } +} + +/* Sign extended move */ +static inline void emit_a32_mov_i64(const bool is64, const u8 dst[], + const u32 val, bool dstk, + struct jit_ctx *ctx) { + u32 hi = 0; + + if (is64 && (val & (1<<31))) + hi = (u32)~0; + emit_a32_mov_i(dst_lo, val, dstk, ctx); + emit_a32_mov_i(dst_hi, hi, dstk, ctx); +} + +static inline void emit_a32_add_r(const u8 dst, const u8 src, + const bool is64, const bool hi, + struct jit_ctx *ctx) { + /* 64 bit : + * adds dst_lo, dst_lo, src_lo + * adc dst_hi, dst_hi, src_hi + * 32 bit : + * add dst_lo, dst_lo, src_lo + */ + if (!hi && is64) + emit(ARM_ADDS_R(dst, dst, src), ctx); + else if (hi && is64) + emit(ARM_ADC_R(dst, dst, src), ctx); + else + emit(ARM_ADD_R(dst, dst, src), ctx); +} + +static inline void emit_a32_sub_r(const u8 dst, const u8 src, + const bool is64, const bool hi, + struct jit_ctx *ctx) { + /* 64 bit : + * subs dst_lo, dst_lo, src_lo + * sbc dst_hi, dst_hi, src_hi + * 32 bit : + * sub dst_lo, dst_lo, src_lo + */ + if (!hi && is64) + emit(ARM_SUBS_R(dst, dst, src), ctx); + else if (hi && is64) + emit(ARM_SBC_R(dst, dst, src), ctx); + else + emit(ARM_SUB_R(dst, dst, src), ctx); +} + +static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, + const bool hi, const u8 op, struct jit_ctx *ctx){ + switch (BPF_OP(op)) { + /* dst = dst + src */ + case BPF_ADD: + emit_a32_add_r(dst, src, is64, hi, ctx); + break; + /* dst = dst - src */ + case BPF_SUB: + emit_a32_sub_r(dst, src, is64, hi, ctx); + break; + /* dst = dst | src */ + case BPF_OR: + emit(ARM_ORR_R(dst, dst, src), ctx); + break; + /* dst = dst & src */ + case BPF_AND: + emit(ARM_AND_R(dst, dst, src), ctx); + break; + /* dst = dst ^ src */ + case BPF_XOR: + emit(ARM_EOR_R(dst, dst, src), ctx); + break; + /* dst = dst * src */ + case BPF_MUL: + emit(ARM_MUL(dst, dst, src), ctx); + break; + /* dst = dst << src */ + case BPF_LSH: + emit(ARM_LSL_R(dst, dst, src), ctx); + break; + /* dst = dst >> src */ + case BPF_RSH: + emit(ARM_LSR_R(dst, dst, src), ctx); + break; + /* dst = dst >> src (signed)*/ + case BPF_ARSH: + emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx); + break; + } +} + +/* ALU operation (32 bit) + * dst = dst (op) src + */ +static inline void emit_a32_alu_r(const u8 dst, const u8 src, + bool dstk, bool sstk, + struct jit_ctx *ctx, const bool is64, + const bool hi, const u8 op) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rn = sstk ? tmp[1] : src; + + if (sstk) + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); + + /* ALU operation */ + if (dstk) { + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); + emit_alu_r(tmp[0], rn, is64, hi, op, ctx); + emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); + } else { + emit_alu_r(dst, rn, is64, hi, op, ctx); + } +} + +/* ALU operation (64 bit) */ +static inline void emit_a32_alu_r64(const bool is64, const u8 dst[], + const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx, + const u8 op) { + emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op); + if (is64) + emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op); + else + emit_a32_mov_i(dst_hi, 0, dstk, ctx); +} + +/* dst = imm (4 bytes)*/ +static inline void emit_a32_mov_r(const u8 dst, const u8 src, + bool dstk, bool sstk, + struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rt = sstk ? tmp[0] : src; + + if (sstk) + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); + if (dstk) + emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx); + else + emit(ARM_MOV_R(dst, rt), ctx); +} + +/* dst = src */ +static inline void emit_a32_mov_r64(const bool is64, const u8 dst[], + const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx); + if (is64) { + /* complete 8 byte move */ + emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx); + } else { + /* Zero out high 4 bytes */ + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + } +} + +/* Shift operations */ +static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, + struct jit_ctx *ctx, const u8 op) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[0] : dst; + + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + + /* Do shift operation */ + switch (op) { + case BPF_LSH: + emit(ARM_LSL_I(rd, rd, val), ctx); + break; + case BPF_RSH: + emit(ARM_LSR_I(rd, rd, val), ctx); + break; + case BPF_NEG: + emit(ARM_RSB_I(rd, rd, val), ctx); + break; + } + + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); +} + +/* dst = ~dst (64 bit) */ +static inline void emit_a32_neg64(const u8 dst[], bool dstk, + struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst[1]; + u8 rm = dstk ? tmp[0] : dst[0]; + + /* Setup Operand */ + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do Negate Operation */ + emit(ARM_RSBS_I(rd, rd, 0), ctx); + emit(ARM_RSC_I(rm, rm, 0), ctx); + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst << src */ +static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); + emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); + + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } +} + +/* dst = dst >> src (signed)*/ +static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do the ARSH operation */ + emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); + emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + _emit(ARM_COND_MI, ARM_B(0), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } +} + +/* dst = dst >> src */ +static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); + emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } +} + +/* dst = dst << val */ +static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx); + emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx); + emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx); + } else { + if (val == 32) + emit(ARM_MOV_R(rm, rd), ctx); + else + emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx); + emit(ARM_EOR_R(rd, rd, rd), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst >> val */ +static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSR operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx); + } else if (val == 32) { + emit(ARM_MOV_R(rd, rm), ctx); + emit(ARM_MOV_I(rm, 0), ctx); + } else { + emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx); + emit(ARM_MOV_I(rm, 0), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst >> val (signed) */ +static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do ARSH operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx); + } else if (val == 32) { + emit(ARM_MOV_R(rd, rm), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + } else { + emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands for multiplication */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rn = sstk ? tmp2[0] : src_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + if (sstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx); + } + + /* Do Multiplication */ + emit(ARM_MUL(ARM_IP, rd, rn), ctx); + emit(ARM_MUL(ARM_LR, rm, rt), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); + + emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); + emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_IP), ctx); + } +} + +/* *(size *)(dst + off) = src */ +static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, + const s32 off, struct jit_ctx *ctx, const u8 sz){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst; + + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + if (off) { + emit_a32_mov_i(tmp[0], off, false, ctx); + emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); + rd = tmp[0]; + } + switch (sz) { + case BPF_W: + /* Store a Word */ + emit(ARM_STR_I(src, rd, 0), ctx); + break; + case BPF_H: + /* Store a HalfWord */ + emit(ARM_STRH_I(src, rd, 0), ctx); + break; + case BPF_B: + /* Store a Byte */ + emit(ARM_STRB_I(src, rd, 0), ctx); + break; + } +} + +/* dst = *(size*)(src + off) */ +static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, + const s32 off, struct jit_ctx *ctx, const u8 sz){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst; + u8 rm = src; + + if (off) { + emit_a32_mov_i(tmp[0], off, false, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); + rm = tmp[0]; + } + switch (sz) { + case BPF_W: + /* Load a Word */ + emit(ARM_LDR_I(rd, rm, 0), ctx); + break; + case BPF_H: + /* Load a HalfWord */ + emit(ARM_LDRH_I(rd, rm, 0), ctx); + break; + case BPF_B: + /* Load a Byte */ + emit(ARM_LDRB_I(rd, rm, 0), ctx); + break; + } + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); +} + +/* Arithmatic Operation */ +static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, + const u8 rn, struct jit_ctx *ctx, u8 op) { + switch (op) { + case BPF_JSET: + ctx->seen |= SEEN_CALL; + emit(ARM_AND_R(ARM_IP, rt, rn), ctx); + emit(ARM_AND_R(ARM_LR, rd, rm), ctx); + emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + break; + case BPF_JEQ: + case BPF_JNE: + case BPF_JGT: + case BPF_JGE: + case BPF_JLE: + case BPF_JLT: + emit(ARM_CMP_R(rd, rm), ctx); + _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + break; + case BPF_JSLE: + case BPF_JSGT: + emit(ARM_CMP_R(rn, rt), ctx); + emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); + break; + case BPF_JSLT: + case BPF_JSGE: + emit(ARM_CMP_R(rt, rn), ctx); + emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); + break; + } +} + +static int out_offset = -1; /* initialized on the first pass of build_body() */ +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + + /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ + const u8 *r2 = bpf2a32[BPF_REG_2]; + const u8 *r3 = bpf2a32[BPF_REG_3]; + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const u8 *tcc = bpf2a32[TCALL_CNT]; + const int idx0 = ctx->idx; +#define cur_offset (ctx->idx - idx0) +#define jmp_offset (out_offset - (cur_offset)) + u32 off, lo, hi; + + /* if (index >= array->map.max_entries) + * goto out; + */ + off = offsetof(struct bpf_array, map.max_entries); + /* array->map.max_entries */ + emit_a32_mov_i(tmp[1], off, false, ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); + emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); + /* index (64 bit) */ + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + /* index >= array->map.max_entries */ + emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); + _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + lo = (u32)MAX_TAIL_CALL_CNT; + hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); + emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_CMP_I(tmp[0], hi), ctx); + _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx); + _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); + emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx); + emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx); + emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + + /* prog = array->ptrs[index] + * if (prog == NULL) + * goto out; + */ + off = offsetof(struct bpf_array, ptrs); + emit_a32_mov_i(tmp[1], off, false, ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); + emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx); + emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); + emit(ARM_CMP_I(tmp[1], 0), ctx); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); + + /* goto *(prog->bpf_func + prologue_size); */ + off = offsetof(struct bpf_prog, bpf_func); + emit_a32_mov_i(tmp2[1], off, false, ctx); + emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); + emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); + emit(ARM_BX(tmp[1]), ctx); + + /* out: */ + if (out_offset == -1) + out_offset = cur_offset; + if (cur_offset != out_offset) { + pr_err_once("tail_call out_offset = %d, expected %d!\n", + cur_offset, out_offset); + return -1; + } + return 0; +#undef cur_offset +#undef jmp_offset +} + +/* 0xabcd => 0xcdab */ +static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 6 + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); + emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); + emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx); +#else /* ARMv6+ */ + emit(ARM_REV16(rd, rn), ctx); +#endif +} + +/* 0xabcdefgh => 0xghefcdab */ +static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 6 + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); + emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx); + + emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx); + emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx); + emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx); + emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx); + emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx); + +#else /* ARMv6+ */ + emit(ARM_REV(rd, rn), ctx); +#endif +} + +// push the scratch stack register on top of the stack +static inline void emit_push_r64(const u8 src[], const u8 shift, + struct jit_ctx *ctx) +{ + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + u16 reg_set = 0; + + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx); + emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx); + + reg_set = (1 << tmp2[1]) | (1 << tmp2[0]); + emit(ARM_PUSH(reg_set), ctx); +} + +static void build_prologue(struct jit_ctx *ctx) +{ + const u8 r0 = bpf2a32[BPF_REG_0][1]; + const u8 r2 = bpf2a32[BPF_REG_1][1]; + const u8 r3 = bpf2a32[BPF_REG_1][0]; + const u8 r4 = bpf2a32[BPF_REG_6][1]; + const u8 r5 = bpf2a32[BPF_REG_6][0]; + const u8 r6 = bpf2a32[TMP_REG_1][1]; + const u8 r7 = bpf2a32[TMP_REG_1][0]; + const u8 r8 = bpf2a32[TMP_REG_2][1]; + const u8 r10 = bpf2a32[TMP_REG_2][0]; + const u8 fplo = bpf2a32[BPF_REG_FP][1]; + const u8 fphi = bpf2a32[BPF_REG_FP][0]; + const u8 sp = ARM_SP; + const u8 *tcc = bpf2a32[TCALL_CNT]; + + u16 reg_set = 0; + + /* + * eBPF prog stack layout + * + * high + * original ARM_SP => +-----+ eBPF prologue + * |FP/LR| + * current ARM_FP => +-----+ + * | ... | callee saved registers + * eBPF fp register => +-----+ <= (BPF_FP) + * | ... | eBPF JIT scratch space + * | | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + */ + + /* Save callee saved registers. */ + reg_set |= (1<seen & SEEN_CALL) + reg_set |= (1<stack_size = imm8m(STACK_SIZE); + + /* Set up function call stack */ + emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); + + /* Set up BPF prog stack base register */ + emit_a32_mov_r(fplo, ARM_IP, true, false, ctx); + emit_a32_mov_i(fphi, 0, true, ctx); + + /* mov r4, 0 */ + emit(ARM_MOV_I(r4, 0), ctx); + + /* Move BPF_CTX to BPF_R1 */ + emit(ARM_MOV_R(r3, r4), ctx); + emit(ARM_MOV_R(r2, r0), ctx); + /* Initialize Tail Count */ + emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx); + /* end of prologue */ +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r4 = bpf2a32[BPF_REG_6][1]; + const u8 r5 = bpf2a32[BPF_REG_6][0]; + const u8 r6 = bpf2a32[TMP_REG_1][1]; + const u8 r7 = bpf2a32[TMP_REG_1][0]; + const u8 r8 = bpf2a32[TMP_REG_2][1]; + const u8 r10 = bpf2a32[TMP_REG_2][0]; + u16 reg_set = 0; + + /* unwind function call stack */ + emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); + + /* restore callee saved registers. */ + reg_set |= (1<seen & SEEN_CALL) + reg_set |= (1<seen & SEEN_CALL)) + emit(ARM_BX(ARM_LR), ctx); +#endif +} + +/* + * Convert an eBPF instruction to native instruction, i.e + * JITs an eBPF instruction. + * Returns : + * 0 - Successfully JITed an 8-byte eBPF instruction + * >0 - Successfully JITed a 16-byte eBPF instruction + * <0 - Failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + const u8 *dst = bpf2a32[insn->dst_reg]; + const u8 *src = bpf2a32[insn->src_reg]; + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const int i = insn - ctx->prog->insnsi; + const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool dstk = is_on_stack(insn->dst_reg); + const bool sstk = is_on_stack(insn->src_reg); + u8 rd, rt, rm, rn; + s32 jmp_offset; + +#define check_imm(bits, imm) do { \ + if ((((imm) > 0) && ((imm) >> (bits))) || \ + (((imm) < 0) && (~(imm) >> (bits)))) { \ + pr_info("[%2d] imm=%d(0x%x) out of range\n", \ + i, imm, imm); \ + return -EINVAL; \ + } \ +} while (0) +#define check_imm24(imm) check_imm(24, imm) + + switch (code) { + /* ALU operations */ + + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx); + break; + case BPF_K: + /* Sign-extend immediate value to destination reg */ + emit_a32_mov_i64(is64, dst, imm, dstk, ctx); + break; + } + break; + /* dst = dst + src/imm */ + /* dst = dst - src/imm */ + /* dst = dst | src/imm */ + /* dst = dst & src/imm */ + /* dst = dst ^ src/imm */ + /* dst = dst * src/imm */ + /* dst = dst << src */ + /* dst = dst >> src */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_alu_r64(is64, dst, src, dstk, sstk, + ctx, BPF_OP(code)); + break; + case BPF_K: + /* Move immediate value to the temporary register + * and then do the ALU operation on the temporary + * register as this will sign-extend the immediate + * value into temporary reg and then it would be + * safe to do the operation on it. + */ + emit_a32_mov_i64(is64, tmp2, imm, false, ctx); + emit_a32_alu_r64(is64, dst, tmp2, dstk, false, + ctx, BPF_OP(code)); + break; + } + break; + /* dst = dst / src(imm) */ + /* dst = dst % src(imm) */ + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_MOD | BPF_X: + rt = src_lo; + rd = dstk ? tmp2[1] : dst_lo; + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + switch (BPF_SRC(code)) { + case BPF_X: + rt = sstk ? tmp2[0] : rt; + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), + ctx); + break; + case BPF_K: + rt = tmp2[0]; + emit_a32_mov_i(rt, imm, false, ctx); + break; + } + emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + goto notyet; + /* dst = dst >> imm */ + /* dst = dst << imm */ + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + if (unlikely(imm > 31)) + return -EINVAL; + if (imm) + emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + /* dst = dst << imm */ + case BPF_ALU64 | BPF_LSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_lsh_i64(dst, dstk, imm, ctx); + break; + /* dst = dst >> imm */ + case BPF_ALU64 | BPF_RSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_lsr_i64(dst, dstk, imm, ctx); + break; + /* dst = dst << src */ + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_a32_lsh_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> src */ + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> src (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_a32_arsh_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> imm (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_arsh_i64(dst, dstk, imm, ctx); + break; + /* dst = ~dst */ + case BPF_ALU | BPF_NEG: + emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + /* dst = ~dst (64 bit) */ + case BPF_ALU64 | BPF_NEG: + emit_a32_neg64(dst, dstk, ctx); + break; + /* dst = dst * src/imm */ + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_mul_r64(dst, src, dstk, sstk, ctx); + break; + case BPF_K: + /* Move immediate value to the temporary register + * and then do the multiplication on it as this + * will sign-extend the immediate value into temp + * reg then it would be safe to do the operation + * on it. + */ + emit_a32_mov_i64(is64, tmp2, imm, false, ctx); + emit_a32_mul_r64(dst, tmp2, dstk, false, ctx); + break; + } + break; + /* dst = htole(dst) */ + /* dst = htobe(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: + rd = dstk ? tmp[0] : dst_hi; + rt = dstk ? tmp[1] : dst_lo; + if (dstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + if (BPF_SRC(code) == BPF_FROM_LE) + goto emit_bswap_uxt; + switch (imm) { + case 16: + emit_rev16(rt, rt, ctx); + goto emit_bswap_uxt; + case 32: + emit_rev32(rt, rt, ctx); + goto emit_bswap_uxt; + case 64: + /* Because of the usage of ARM_LR */ + ctx->seen |= SEEN_CALL; + emit_rev32(ARM_LR, rt, ctx); + emit_rev32(rt, rd, ctx); + emit(ARM_MOV_R(rd, ARM_LR), ctx); + break; + } + goto exit; +emit_bswap_uxt: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ +#if __LINUX_ARM_ARCH__ < 6 + emit_a32_mov_i(tmp2[1], 0xffff, false, ctx); + emit(ARM_AND_R(rt, rt, tmp2[1]), ctx); +#else /* ARMv6+ */ + emit(ARM_UXTH(rt, rt), ctx); +#endif + emit(ARM_EOR_R(rd, rd, rd), ctx); + break; + case 32: + /* zero-extend 32 bits into 64 bits */ + emit(ARM_EOR_R(rd, rd, rd), ctx); + break; + case 64: + /* nop */ + break; + } +exit: + if (dstk) { + emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + const struct bpf_insn insn1 = insn[1]; + u32 hi, lo = imm; + + hi = insn1.imm; + emit_a32_mov_i(dst_lo, lo, dstk, ctx); + emit_a32_mov_i(dst_hi, hi, dstk, ctx); + + return 1; + } + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + rn = sstk ? tmp2[1] : src_lo; + if (sstk) + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + /* Load a Word */ + case BPF_H: + /* Load a Half-Word */ + case BPF_B: + /* Load a Byte */ + emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + case BPF_DW: + /* Load a double word */ + emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W); + emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W); + break; + } + break; + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ + case BPF_LD | BPF_ABS | BPF_W: + case BPF_LD | BPF_ABS | BPF_H: + case BPF_LD | BPF_ABS | BPF_B: + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ + case BPF_LD | BPF_IND | BPF_W: + case BPF_LD | BPF_IND | BPF_H: + case BPF_LD | BPF_IND | BPF_B: + { + const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */ + const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/ + /* rtn value */ + const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */ + const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */ + const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */ + const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */ + int size; + + /* Setting up first argument */ + emit(ARM_MOV_R(r0, r4), ctx); + + /* Setting up second argument */ + emit_a32_mov_i(r1, imm, false, ctx); + if (BPF_MODE(code) == BPF_IND) + emit_a32_alu_r(r1, src_lo, false, sstk, ctx, + false, false, BPF_ADD); + + /* Setting up third argument */ + switch (BPF_SIZE(code)) { + case BPF_W: + size = 4; + break; + case BPF_H: + size = 2; + break; + case BPF_B: + size = 1; + break; + default: + return -EINVAL; + } + emit_a32_mov_i(r2, size, false, ctx); + + /* Setting up fourth argument */ + emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx); + + /* Setting up function pointer to call */ + emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx); + emit_blx_r(r6, ctx); + + emit(ARM_EOR_R(r1, r1, r1), ctx); + /* Check if return address is NULL or not. + * if NULL then jump to epilogue + * else continue to load the value from retn address + */ + emit(ARM_CMP_I(r0, 0), ctx); + jmp_offset = epilogue_offset(ctx); + check_imm24(jmp_offset); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); + + /* Load value from the address */ + switch (BPF_SIZE(code)) { + case BPF_W: + emit(ARM_LDR_I(r0, r0, 0), ctx); + emit_rev32(r0, r0, ctx); + break; + case BPF_H: + emit(ARM_LDRH_I(r0, r0, 0), ctx); + emit_rev16(r0, r0, ctx); + break; + case BPF_B: + emit(ARM_LDRB_I(r0, r0, 0), ctx); + /* No need to reverse */ + break; + } + break; + } + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_DW: + /* Sign-extend immediate value into temp reg */ + emit_a32_mov_i64(true, tmp2, imm, false, ctx); + emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W); + emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W); + break; + case BPF_W: + case BPF_H: + case BPF_B: + emit_a32_mov_i(tmp2[1], imm, false, ctx); + emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, + BPF_SIZE(code)); + break; + } + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + goto notyet; + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: + { + u8 sz = BPF_SIZE(code); + + rn = sstk ? tmp2[1] : src_lo; + rm = sstk ? tmp2[0] : src_hi; + if (sstk) { + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); + } + + /* Store the value */ + if (BPF_SIZE(code) == BPF_DW) { + emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W); + emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W); + } else { + emit_str_r(dst_lo, rn, dstk, off, ctx, sz); + } + break; + } + /* PC += off if dst == src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst != src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + /* PC += off if dst & src */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + /* Setup source registers */ + rm = sstk ? tmp2[0] : src_hi; + rn = sstk ? tmp2[1] : src_lo; + if (sstk) { + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); + } + goto go_jmp; + /* PC += off if dst == imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst != imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + /* PC += off if dst & imm */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (off == 0) + break; + rm = tmp2[0]; + rn = tmp2[1]; + /* Sign-extend immediate value */ + emit_a32_mov_i64(true, tmp2, imm, false, ctx); +go_jmp: + /* Setup destination register */ + rd = dstk ? tmp[0] : dst_hi; + rt = dstk ? tmp[1] : dst_lo; + if (dstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Check for the condition */ + emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code)); + + /* Setup JUMP instruction */ + jmp_offset = bpf2a32_offset(i+off, i, ctx); + switch (BPF_OP(code)) { + case BPF_JNE: + case BPF_JSET: + _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx); + break; + case BPF_JEQ: + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); + break; + case BPF_JGT: + _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); + break; + case BPF_JGE: + _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); + break; + case BPF_JSGT: + _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); + break; + case BPF_JSGE: + _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); + break; + case BPF_JLE: + _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx); + break; + case BPF_JLT: + _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx); + break; + case BPF_JSLT: + _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); + break; + case BPF_JSLE: + _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); + break; + } + break; + /* JMP OFF */ + case BPF_JMP | BPF_JA: + { + if (off == 0) + break; + jmp_offset = bpf2a32_offset(i+off, i, ctx); + check_imm24(jmp_offset); + emit(ARM_B(jmp_offset), ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + /* function call */ + case BPF_JMP | BPF_CALL: + { + const u8 *r0 = bpf2a32[BPF_REG_0]; + const u8 *r1 = bpf2a32[BPF_REG_1]; + const u8 *r2 = bpf2a32[BPF_REG_2]; + const u8 *r3 = bpf2a32[BPF_REG_3]; + const u8 *r4 = bpf2a32[BPF_REG_4]; + const u8 *r5 = bpf2a32[BPF_REG_5]; + const u32 func = (u32)__bpf_call_base + (u32)imm; + + emit_a32_mov_r64(true, r0, r1, false, false, ctx); + emit_a32_mov_r64(true, r1, r2, false, true, ctx); + emit_push_r64(r5, 0, ctx); + emit_push_r64(r4, 8, ctx); + emit_push_r64(r3, 16, ctx); + + emit_a32_mov_i(tmp[1], func, false, ctx); + emit_blx_r(tmp[1], ctx); + + emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean + break; + } + /* function return */ + case BPF_JMP | BPF_EXIT: + /* Optimization: when last instruction is EXIT + * simply fallthrough to epilogue. + */ + if (i == ctx->prog->len - 1) + break; + jmp_offset = epilogue_offset(ctx); + check_imm24(jmp_offset); + emit(ARM_B(jmp_offset), ctx); + break; +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; + default: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; + } + + if (ctx->flags & FLAG_IMM_OVERFLOW) + /* + * this instruction generated an overflow when + * trying to access the literal pool, so + * delegate this filter to the kernel interpreter. + */ + return -1; + return 0; } static int build_body(struct jit_ctx *ctx) { - void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; - const struct bpf_prog *prog = ctx->skf; - const struct sock_filter *inst; - unsigned i, load_order, off, condt; - int imm12; - u32 k; + const struct bpf_prog *prog = ctx->prog; + unsigned int i; for (i = 0; i < prog->len; i++) { - u16 code; + const struct bpf_insn *insn = &(prog->insnsi[i]); + int ret; - inst = &(prog->insns[i]); - /* K as an immediate value operand */ - k = inst->k; - code = bpf_anc_helper(inst); + ret = build_insn(insn, ctx); - /* compute offsets only in the fake pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - switch (code) { - case BPF_LD | BPF_IMM: - emit_mov_i(r_A, k, ctx); - break; - case BPF_LD | BPF_W | BPF_LEN: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - emit(ARM_LDR_I(r_A, r_skb, - offsetof(struct sk_buff, len)), ctx); - break; - case BPF_LD | BPF_MEM: - /* A = scratch[k] */ - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_LD | BPF_W | BPF_ABS: - load_order = 2; - goto load; - case BPF_LD | BPF_H | BPF_ABS: - load_order = 1; - goto load; - case BPF_LD | BPF_B | BPF_ABS: - load_order = 0; -load: - emit_mov_i(r_off, k, ctx); -load_common: - ctx->seen |= SEEN_DATA | SEEN_CALL; - - if (load_order > 0) { - emit(ARM_SUB_I(r_scratch, r_skb_hl, - 1 << load_order), ctx); - emit(ARM_CMP_R(r_scratch, r_off), ctx); - condt = ARM_COND_GE; - } else { - emit(ARM_CMP_R(r_skb_hl, r_off), ctx); - condt = ARM_COND_HI; - } - - /* - * test for negative offset, only if we are - * currently scheduled to take the fast - * path. this will update the flags so that - * the slowpath instruction are ignored if the - * offset is negative. - * - * for loard_order == 0 the HI condition will - * make loads at offset 0 take the slow path too. - */ - _emit(condt, ARM_CMP_I(r_off, 0), ctx); - - _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), - ctx); - - if (load_order == 0) - _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), - ctx); - else if (load_order == 1) - emit_load_be16(condt, r_A, r_scratch, ctx); - else if (load_order == 2) - emit_load_be32(condt, r_A, r_scratch, ctx); - - _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); - - /* the slowpath */ - emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - /* the offset is already in R1 */ - emit_blx_r(ARM_R3, ctx); - /* check the result of skb_copy_bits */ - emit(ARM_CMP_I(ARM_R1, 0), ctx); - emit_err_ret(ARM_COND_NE, ctx); - emit(ARM_MOV_R(r_A, ARM_R0), ctx); - break; - case BPF_LD | BPF_W | BPF_IND: - load_order = 2; - goto load_ind; - case BPF_LD | BPF_H | BPF_IND: - load_order = 1; - goto load_ind; - case BPF_LD | BPF_B | BPF_IND: - load_order = 0; -load_ind: - update_on_xread(ctx); - OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); - goto load_common; - case BPF_LDX | BPF_IMM: - ctx->seen |= SEEN_X; - emit_mov_i(r_X, k, ctx); - break; - case BPF_LDX | BPF_W | BPF_LEN: - ctx->seen |= SEEN_X | SEEN_SKB; - emit(ARM_LDR_I(r_X, r_skb, - offsetof(struct sk_buff, len)), ctx); - break; - case BPF_LDX | BPF_MEM: - ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); - emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_LDX | BPF_B | BPF_MSH: - /* x = ((*(frame + k)) & 0xf) << 2; */ - ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; - /* the interpreter should deal with the negative K */ - if ((int)k < 0) - return -1; - /* offset in r1: we might have to take the slow path */ - emit_mov_i(r_off, k, ctx); - emit(ARM_CMP_R(r_skb_hl, r_off), ctx); - - /* load in r0: common with the slowpath */ - _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, - ARM_R1), ctx); - /* - * emit_mov_i() might generate one or two instructions, - * the same holds for emit_blx_r() - */ - _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); - - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - /* r_off is r1 */ - emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); - emit_blx_r(ARM_R3, ctx); - /* check the return value of skb_copy_bits */ - emit(ARM_CMP_I(ARM_R1, 0), ctx); - emit_err_ret(ARM_COND_NE, ctx); - - emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); - emit(ARM_LSL_I(r_X, r_X, 2), ctx); - break; - case BPF_ST: - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_STX: - update_on_xread(ctx); - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_ALU | BPF_ADD | BPF_K: - /* A += K */ - OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_ADD | BPF_X: - update_on_xread(ctx); - emit(ARM_ADD_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_SUB | BPF_K: - /* A -= K */ - OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_X: - update_on_xread(ctx); - emit(ARM_SUB_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_MUL | BPF_K: - /* A *= K */ - emit_mov_i(r_scratch, k, ctx); - emit(ARM_MUL(r_A, r_A, r_scratch), ctx); - break; - case BPF_ALU | BPF_MUL | BPF_X: - update_on_xread(ctx); - emit(ARM_MUL(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_DIV | BPF_K: - if (k == 1) - break; - emit_mov_i(r_scratch, k, ctx); - emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV); - break; - case BPF_ALU | BPF_DIV | BPF_X: - update_on_xread(ctx); - emit(ARM_CMP_I(r_X, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV); - break; - case BPF_ALU | BPF_MOD | BPF_K: - if (k == 1) { - emit_mov_i(r_A, 0, ctx); - break; - } - emit_mov_i(r_scratch, k, ctx); - emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD); - break; - case BPF_ALU | BPF_MOD | BPF_X: - update_on_xread(ctx); - emit(ARM_CMP_I(r_X, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD); - break; - case BPF_ALU | BPF_OR | BPF_K: - /* A |= K */ - OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_OR | BPF_X: - update_on_xread(ctx); - emit(ARM_ORR_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_XOR | BPF_K: - /* A ^= K; */ - OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); - break; - case BPF_ANC | SKF_AD_ALU_XOR_X: - case BPF_ALU | BPF_XOR | BPF_X: - /* A ^= X */ - update_on_xread(ctx); - emit(ARM_EOR_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_AND | BPF_K: - /* A &= K */ - OP_IMM3(ARM_AND, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_AND | BPF_X: - update_on_xread(ctx); - emit(ARM_AND_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_LSH | BPF_K: - if (unlikely(k > 31)) - return -1; - emit(ARM_LSL_I(r_A, r_A, k), ctx); - break; - case BPF_ALU | BPF_LSH | BPF_X: - update_on_xread(ctx); - emit(ARM_LSL_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_RSH | BPF_K: - if (unlikely(k > 31)) - return -1; - if (k) - emit(ARM_LSR_I(r_A, r_A, k), ctx); - break; - case BPF_ALU | BPF_RSH | BPF_X: - update_on_xread(ctx); - emit(ARM_LSR_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_NEG: - /* A = -A */ - emit(ARM_RSB_I(r_A, r_A, 0), ctx); - break; - case BPF_JMP | BPF_JA: - /* pc += K */ - emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); - break; - case BPF_JMP | BPF_JEQ | BPF_K: - /* pc += (A == K) ? pc->jt : pc->jf */ - condt = ARM_COND_EQ; - goto cmp_imm; - case BPF_JMP | BPF_JGT | BPF_K: - /* pc += (A > K) ? pc->jt : pc->jf */ - condt = ARM_COND_HI; - goto cmp_imm; - case BPF_JMP | BPF_JGE | BPF_K: - /* pc += (A >= K) ? pc->jt : pc->jf */ - condt = ARM_COND_HS; -cmp_imm: - imm12 = imm8m(k); - if (imm12 < 0) { - emit_mov_i_no8m(r_scratch, k, ctx); - emit(ARM_CMP_R(r_A, r_scratch), ctx); - } else { - emit(ARM_CMP_I(r_A, imm12), ctx); - } -cond_jump: - if (inst->jt) - _emit(condt, ARM_B(b_imm(i + inst->jt + 1, - ctx)), ctx); - if (inst->jf) - _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, - ctx)), ctx); - break; - case BPF_JMP | BPF_JEQ | BPF_X: - /* pc += (A == X) ? pc->jt : pc->jf */ - condt = ARM_COND_EQ; - goto cmp_x; - case BPF_JMP | BPF_JGT | BPF_X: - /* pc += (A > X) ? pc->jt : pc->jf */ - condt = ARM_COND_HI; - goto cmp_x; - case BPF_JMP | BPF_JGE | BPF_X: - /* pc += (A >= X) ? pc->jt : pc->jf */ - condt = ARM_COND_CS; -cmp_x: - update_on_xread(ctx); - emit(ARM_CMP_R(r_A, r_X), ctx); - goto cond_jump; - case BPF_JMP | BPF_JSET | BPF_K: - /* pc += (A & K) ? pc->jt : pc->jf */ - condt = ARM_COND_NE; - /* not set iff all zeroes iff Z==1 iff EQ */ - - imm12 = imm8m(k); - if (imm12 < 0) { - emit_mov_i_no8m(r_scratch, k, ctx); - emit(ARM_TST_R(r_A, r_scratch), ctx); - } else { - emit(ARM_TST_I(r_A, imm12), ctx); - } - goto cond_jump; - case BPF_JMP | BPF_JSET | BPF_X: - /* pc += (A & X) ? pc->jt : pc->jf */ - update_on_xread(ctx); - condt = ARM_COND_NE; - emit(ARM_TST_R(r_A, r_X), ctx); - goto cond_jump; - case BPF_RET | BPF_A: - emit(ARM_MOV_R(ARM_R0, r_A), ctx); - goto b_epilogue; - case BPF_RET | BPF_K: - if ((k == 0) && (ctx->ret0_fp_idx < 0)) - ctx->ret0_fp_idx = i; - emit_mov_i(ARM_R0, k, ctx); -b_epilogue: - if (i != ctx->skf->len - 1) - emit(ARM_B(b_imm(prog->len, ctx)), ctx); - break; - case BPF_MISC | BPF_TAX: - /* X = A */ - ctx->seen |= SEEN_X; - emit(ARM_MOV_R(r_X, r_A), ctx); - break; - case BPF_MISC | BPF_TXA: - /* A = X */ - update_on_xread(ctx); - emit(ARM_MOV_R(r_A, r_X), ctx); - break; - case BPF_ANC | SKF_AD_PROTOCOL: - /* A = ntohs(skb->protocol) */ - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - protocol) != 2); - off = offsetof(struct sk_buff, protocol); - emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); - emit_swap16(r_A, r_scratch, ctx); - break; - case BPF_ANC | SKF_AD_CPU: - /* r_scratch = current_thread_info() */ - OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); - /* A = current_thread_info()->cpu */ - BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); - off = offsetof(struct thread_info, cpu); - emit(ARM_LDR_I(r_A, r_scratch, off), ctx); - break; - case BPF_ANC | SKF_AD_IFINDEX: - case BPF_ANC | SKF_AD_HATYPE: - /* A = skb->dev->ifindex */ - /* A = skb->dev->type */ - ctx->seen |= SEEN_SKB; - off = offsetof(struct sk_buff, dev); - emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); - - emit(ARM_CMP_I(r_scratch, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - type) != 2); - - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { - off = offsetof(struct net_device, ifindex); - emit(ARM_LDR_I(r_A, r_scratch, off), ctx); - } else { - /* - * offset of field "type" in "struct - * net_device" is above what can be - * used in the ldrh rd, [rn, #imm] - * instruction, so load the offset in - * a register and use ldrh rd, [rn, rm] - */ - off = offsetof(struct net_device, type); - emit_mov_i(ARM_R3, off, ctx); - emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx); - } - break; - case BPF_ANC | SKF_AD_MARK: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - off = offsetof(struct sk_buff, mark); - emit(ARM_LDR_I(r_A, r_skb, off), ctx); - break; - case BPF_ANC | SKF_AD_RXHASH: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - off = offsetof(struct sk_buff, hash); - emit(ARM_LDR_I(r_A, r_skb, off), ctx); - break; - case BPF_ANC | SKF_AD_VLAN_TAG: - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - off = offsetof(struct sk_buff, vlan_tci); - emit(ARM_LDRH_I(r_A, r_skb, off), ctx); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); - else { - OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); - OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); - } - break; - case BPF_ANC | SKF_AD_PKTTYPE: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - __pkt_type_offset[0]) != 1); - off = PKT_TYPE_OFFSET(); - emit(ARM_LDRB_I(r_A, r_skb, off), ctx); - emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx); -#ifdef __BIG_ENDIAN_BITFIELD - emit(ARM_LSR_I(r_A, r_A, 5), ctx); -#endif - break; - case BPF_ANC | SKF_AD_QUEUE: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - queue_mapping) != 2); - BUILD_BUG_ON(offsetof(struct sk_buff, - queue_mapping) > 0xff); - off = offsetof(struct sk_buff, queue_mapping); - emit(ARM_LDRH_I(r_A, r_skb, off), ctx); - break; - case BPF_ANC | SKF_AD_PAY_OFFSET: - ctx->seen |= SEEN_SKB | SEEN_CALL; - - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx); - emit_blx_r(ARM_R3, ctx); - emit(ARM_MOV_R(r_A, ARM_R0), ctx); - break; - case BPF_LDX | BPF_W | BPF_ABS: - /* - * load a 32bit word from struct seccomp_data. - * seccomp_check_filter() will already have checked - * that k is 32bit aligned and lies within the - * struct seccomp_data. - */ - ctx->seen |= SEEN_SKB; - emit(ARM_LDR_I(r_A, r_skb, k), ctx); - break; - default: - return -1; + /* It's used with loading the 64 bit immediate value. */ + if (ret > 0) { + i++; + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx; + continue; } - if (ctx->flags & FLAG_IMM_OVERFLOW) - /* - * this instruction generated an overflow when - * trying to access the literal pool, so - * delegate this filter to the kernel interpreter. - */ + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx; + + /* If unsuccesfull, return with error code */ + if (ret) + return ret; + } + return 0; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF)) return -1; } - /* compute offsets only during the first pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - return 0; } - -void bpf_jit_compile(struct bpf_prog *fp) +void bpf_jit_compile(struct bpf_prog *prog) { - struct bpf_binary_header *header; - struct jit_ctx ctx; - unsigned tmp_idx; - unsigned alloc_size; - u8 *target_ptr; + /* Nothing to do here. We support Internal BPF. */ +} +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + bool tmp_blinded = false; + struct jit_ctx ctx; + unsigned int tmp_idx; + unsigned int image_size; + u8 *image_ptr; + + /* If BPF JIT was not enabled then we must fall back to + * the interpreter. + */ if (!bpf_jit_enable) - return; + return orig_prog; + + /* If constant blinding was enabled and we failed during blinding + * then we must fall back to the interpreter. Otherwise, we save + * the new JITed code. + */ + tmp = bpf_jit_blind_constants(prog); + + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } memset(&ctx, 0, sizeof(ctx)); - ctx.skf = fp; - ctx.ret0_fp_idx = -1; + ctx.prog = prog; - ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); - if (ctx.offsets == NULL) - return; - - /* fake pass to fill in the ctx->seen */ - if (unlikely(build_body(&ctx))) + /* Not able to allocate memory for offsets[] , then + * we must fall back to the interpreter + */ + ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (ctx.offsets == NULL) { + prog = orig_prog; goto out; + } + + /* 1) fake pass to find in the length of the JITed code, + * to compute ctx->offsets and other context variables + * needed to compute final JITed code. + * Also, calculate random starting pointer/start of JITed code + * which is prefixed by random number of fault instructions. + * + * If the first pass fails then there is no chance of it + * being successful in the second pass, so just fall back + * to the interpreter. + */ + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } tmp_idx = ctx.idx; build_prologue(&ctx); ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; + ctx.epilogue_offset = ctx.idx; + #if __LINUX_ARM_ARCH__ < 7 tmp_idx = ctx.idx; build_epilogue(&ctx); @@ -1021,64 +1880,83 @@ void bpf_jit_compile(struct bpf_prog *fp) ctx.idx += ctx.imm_count; if (ctx.imm_count) { - ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); - if (ctx.imms == NULL) - goto out; + ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL); + if (ctx.imms == NULL) { + prog = orig_prog; + goto out_off; + } } #else - /* there's nothing after the epilogue on ARMv7 */ + /* there's nothing about the epilogue on ARMv7 */ build_epilogue(&ctx); #endif - alloc_size = 4 * ctx.idx; - header = bpf_jit_binary_alloc(alloc_size, &target_ptr, - 4, jit_fill_hole); - if (header == NULL) - goto out; + /* Now we can get the actual image size of the JITed arm code. + * Currently, we are not considering the THUMB-2 instructions + * for jit, although it can decrease the size of the image. + * + * As each arm instruction is of length 32bit, we are translating + * number of JITed intructions into the size required to store these + * JITed code. + */ + image_size = sizeof(u32) * ctx.idx; - ctx.target = (u32 *) target_ptr; + /* Now we know the size of the structure to make */ + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + /* Not able to allocate memory for the structure then + * we must fall back to the interpretation + */ + if (header == NULL) { + prog = orig_prog; + goto out_imms; + } + + /* 2.) Actual pass to generate final JIT code */ + ctx.target = (u32 *) image_ptr; ctx.idx = 0; build_prologue(&ctx); + + /* If building the body of the JITed code fails somehow, + * we fall back to the interpretation. + */ if (build_body(&ctx) < 0) { -#if __LINUX_ARM_ARCH__ < 7 - if (ctx.imm_count) - kfree(ctx.imms); -#endif + image_ptr = NULL; bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_imms; } build_epilogue(&ctx); + /* 3.) Extra pass to validate JITed Code */ + if (validate_code(&ctx)) { + image_ptr = NULL; + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_imms; + } flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); + if (bpf_jit_enable > 1) + /* there are 2 passes here */ + bpf_jit_dump(prog->len, image_size, 2, ctx.target); + + set_memory_ro((unsigned long)header, header->pages); + prog->bpf_func = (void *)ctx.target; + prog->jited = 1; + prog->jited_len = image_size; + +out_imms: #if __LINUX_ARM_ARCH__ < 7 if (ctx.imm_count) kfree(ctx.imms); #endif - - if (bpf_jit_enable > 1) - /* there are 2 passes here */ - bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); - - set_memory_ro((unsigned long)header, header->pages); - fp->bpf_func = (void *)ctx.target; - fp->jited = 1; -out: +out_off: kfree(ctx.offsets); - return; +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; } -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index c46fca2972f7..d5cf5f6208aa 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -11,6 +11,7 @@ #ifndef PFILTER_OPCODES_ARM_H #define PFILTER_OPCODES_ARM_H +/* ARM 32bit Registers */ #define ARM_R0 0 #define ARM_R1 1 #define ARM_R2 2 @@ -22,38 +23,43 @@ #define ARM_R8 8 #define ARM_R9 9 #define ARM_R10 10 -#define ARM_FP 11 -#define ARM_IP 12 -#define ARM_SP 13 -#define ARM_LR 14 -#define ARM_PC 15 +#define ARM_FP 11 /* Frame Pointer */ +#define ARM_IP 12 /* Intra-procedure scratch register */ +#define ARM_SP 13 /* Stack pointer: as load/store base reg */ +#define ARM_LR 14 /* Link Register */ +#define ARM_PC 15 /* Program counter */ -#define ARM_COND_EQ 0x0 -#define ARM_COND_NE 0x1 -#define ARM_COND_CS 0x2 +#define ARM_COND_EQ 0x0 /* == */ +#define ARM_COND_NE 0x1 /* != */ +#define ARM_COND_CS 0x2 /* unsigned >= */ #define ARM_COND_HS ARM_COND_CS -#define ARM_COND_CC 0x3 +#define ARM_COND_CC 0x3 /* unsigned < */ #define ARM_COND_LO ARM_COND_CC -#define ARM_COND_MI 0x4 -#define ARM_COND_PL 0x5 -#define ARM_COND_VS 0x6 -#define ARM_COND_VC 0x7 -#define ARM_COND_HI 0x8 -#define ARM_COND_LS 0x9 -#define ARM_COND_GE 0xa -#define ARM_COND_LT 0xb -#define ARM_COND_GT 0xc -#define ARM_COND_LE 0xd -#define ARM_COND_AL 0xe +#define ARM_COND_MI 0x4 /* < 0 */ +#define ARM_COND_PL 0x5 /* >= 0 */ +#define ARM_COND_VS 0x6 /* Signed Overflow */ +#define ARM_COND_VC 0x7 /* No Signed Overflow */ +#define ARM_COND_HI 0x8 /* unsigned > */ +#define ARM_COND_LS 0x9 /* unsigned <= */ +#define ARM_COND_GE 0xa /* Signed >= */ +#define ARM_COND_LT 0xb /* Signed < */ +#define ARM_COND_GT 0xc /* Signed > */ +#define ARM_COND_LE 0xd /* Signed <= */ +#define ARM_COND_AL 0xe /* None */ /* register shift types */ #define SRTYPE_LSL 0 #define SRTYPE_LSR 1 #define SRTYPE_ASR 2 #define SRTYPE_ROR 3 +#define SRTYPE_ASL (SRTYPE_LSL) #define ARM_INST_ADD_R 0x00800000 +#define ARM_INST_ADDS_R 0x00900000 +#define ARM_INST_ADC_R 0x00a00000 +#define ARM_INST_ADC_I 0x02a00000 #define ARM_INST_ADD_I 0x02800000 +#define ARM_INST_ADDS_I 0x02900000 #define ARM_INST_AND_R 0x00000000 #define ARM_INST_AND_I 0x02000000 @@ -76,8 +82,10 @@ #define ARM_INST_LDRH_I 0x01d000b0 #define ARM_INST_LDRH_R 0x019000b0 #define ARM_INST_LDR_I 0x05900000 +#define ARM_INST_LDR_R 0x07900000 #define ARM_INST_LDM 0x08900000 +#define ARM_INST_LDM_IA 0x08b00000 #define ARM_INST_LSL_I 0x01a00000 #define ARM_INST_LSL_R 0x01a00010 @@ -86,6 +94,7 @@ #define ARM_INST_LSR_R 0x01a00030 #define ARM_INST_MOV_R 0x01a00000 +#define ARM_INST_MOVS_R 0x01b00000 #define ARM_INST_MOV_I 0x03a00000 #define ARM_INST_MOVW 0x03000000 #define ARM_INST_MOVT 0x03400000 @@ -96,17 +105,28 @@ #define ARM_INST_PUSH 0x092d0000 #define ARM_INST_ORR_R 0x01800000 +#define ARM_INST_ORRS_R 0x01900000 #define ARM_INST_ORR_I 0x03800000 #define ARM_INST_REV 0x06bf0f30 #define ARM_INST_REV16 0x06bf0fb0 #define ARM_INST_RSB_I 0x02600000 +#define ARM_INST_RSBS_I 0x02700000 +#define ARM_INST_RSC_I 0x02e00000 #define ARM_INST_SUB_R 0x00400000 +#define ARM_INST_SUBS_R 0x00500000 +#define ARM_INST_RSB_R 0x00600000 #define ARM_INST_SUB_I 0x02400000 +#define ARM_INST_SUBS_I 0x02500000 +#define ARM_INST_SBC_I 0x02c00000 +#define ARM_INST_SBC_R 0x00c00000 +#define ARM_INST_SBCS_R 0x00d00000 #define ARM_INST_STR_I 0x05800000 +#define ARM_INST_STRB_I 0x05c00000 +#define ARM_INST_STRH_I 0x01c000b0 #define ARM_INST_TST_R 0x01100000 #define ARM_INST_TST_I 0x03100000 @@ -117,6 +137,8 @@ #define ARM_INST_MLS 0x00600090 +#define ARM_INST_UXTH 0x06ff0070 + /* * Use a suitable undefined instruction to use for ARM/Thumb2 faulting. * We need to be careful not to conflict with those used by other modules @@ -135,9 +157,15 @@ #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) /* immediate */ #define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) +/* register with register-shift */ +#define _AL3_SR(inst) (inst | (1 << 4)) #define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) +#define ARM_ADDS_R(rd, rn, rm) _AL3_R(ARM_INST_ADDS, rd, rn, rm) #define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) +#define ARM_ADDS_I(rd, rn, imm) _AL3_I(ARM_INST_ADDS, rd, rn, imm) +#define ARM_ADC_R(rd, rn, rm) _AL3_R(ARM_INST_ADC, rd, rn, rm) +#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) @@ -156,7 +184,9 @@ #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm) #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ - | (off)) + | ((off) & 0xfff)) +#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \ + | (rm)) #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ | (off)) #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ @@ -167,15 +197,23 @@ | (rm)) #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) +#define ARM_LDM_IA(rn, regs) (ARM_INST_LDM_IA | (rn) << 16 | (regs)) #define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) #define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) #define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) #define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) +#define ARM_ASR_R(rd, rn, rm) (_AL3_R(ARM_INST_ASR, rd, 0, rn) | (rm) << 8) +#define ARM_ASR_I(rd, rn, imm) (_AL3_I(ARM_INST_ASR, rd, 0, rn) | (imm) << 7) #define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) +#define ARM_MOVS_R(rd, rm) _AL3_R(ARM_INST_MOVS, rd, 0, rm) #define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) +#define ARM_MOV_SR(rd, rm, type, rs) \ + (_AL3_SR(ARM_MOV_R(rd, rm)) | (type) << 5 | (rs) << 8) +#define ARM_MOV_SI(rd, rm, type, imm6) \ + (ARM_MOV_R(rd, rm) | (type) << 5 | (imm6) << 7) #define ARM_MOVW(rd, imm) \ (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) @@ -190,19 +228,38 @@ #define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) #define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) -#define ARM_ORR_S(rd, rn, rm, type, rs) \ - (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) +#define ARM_ORR_SR(rd, rn, rm, type, rs) \ + (_AL3_SR(ARM_ORR_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) +#define ARM_ORRS_R(rd, rn, rm) _AL3_R(ARM_INST_ORRS, rd, rn, rm) +#define ARM_ORRS_SR(rd, rn, rm, type, rs) \ + (_AL3_SR(ARM_ORRS_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) +#define ARM_ORR_SI(rd, rn, rm, type, imm6) \ + (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) +#define ARM_ORRS_SI(rd, rn, rm, type, imm6) \ + (ARM_ORRS_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) #define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) #define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) #define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) +#define ARM_RSBS_I(rd, rn, imm) _AL3_I(ARM_INST_RSBS, rd, rn, imm) +#define ARM_RSC_I(rd, rn, imm) _AL3_I(ARM_INST_RSC, rd, rn, imm) #define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) +#define ARM_SUBS_R(rd, rn, rm) _AL3_R(ARM_INST_SUBS, rd, rn, rm) +#define ARM_RSB_R(rd, rn, rm) _AL3_R(ARM_INST_RSB, rd, rn, rm) +#define ARM_SBC_R(rd, rn, rm) _AL3_R(ARM_INST_SBC, rd, rn, rm) +#define ARM_SBCS_R(rd, rn, rm) _AL3_R(ARM_INST_SBCS, rd, rn, rm) #define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) +#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm) +#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm) #define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ - | (off)) + | ((off) & 0xfff)) +#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \ + | (((off) & 0xf0) << 4) | ((off) & 0xf)) +#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \ + | (((off) & 0xf0) << 4) | ((off) & 0xf)) #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) @@ -214,5 +271,6 @@ #define ARM_MLS(rd, rn, rm, ra) (ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \ | (ra) << 12) +#define ARM_UXTH(rd, rm) (ARM_INST_UXTH | (rd) << 12 | (rm)) #endif /* PFILTER_OPCODES_ARM_H */ diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h index 6feedd47d875..33104911862e 100644 --- a/arch/arm/plat-samsung/include/plat/map-s3c.h +++ b/arch/arm/plat-samsung/include/plat/map-s3c.h @@ -61,7 +61,7 @@ /* deal with the registers that move under the 2412/2413 */ -#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) +#if defined(CONFIG_CPU_S3C2412) #ifndef __ASSEMBLY__ extern void __iomem *s3c24xx_va_gpio2; #endif diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index f5f0c813dfec..6b54ee8c1262 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -184,6 +184,12 @@ config ARCH_R8A7796 help This enables support for the Renesas R-Car M3-W SoC. +config ARCH_R8A77995 + bool "Renesas R-Car D3 SoC Platform" + depends on ARCH_RENESAS + help + This enables support for the Renesas R-Car D3 SoC. + config ARCH_STRATIX10 bool "Altera's Stratix 10 SoCFPGA Family" help @@ -250,6 +256,7 @@ config ARCH_XGENE config ARCH_ZX bool "ZTE ZX SoC Family" + select PINCTRL help This enables support for ZTE ZX SoC Family diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 9b41f1e3b1a0..939b310913cf 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -50,17 +50,22 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) +KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) +KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB LD += -EB +LDFLAGS += -maarch64linuxb UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL LD += -EL +LDFLAGS += -maarch64linux UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile index 108f12ce6d1d..19c3fbd75eda 100644 --- a/arch/arm64/boot/dts/allwinner/Makefile +++ b/arch/arm64/boot/dts/allwinner/Makefile @@ -1,4 +1,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-bananapi-m64.dtb +dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-nanopi-a64.dtb +dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-olinuxino.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-orangepi-win.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb diff --git a/arch/arm64/boot/dts/allwinner/axp803.dtsi b/arch/arm64/boot/dts/allwinner/axp803.dtsi new file mode 100644 index 000000000000..ff8af52743ff --- /dev/null +++ b/arch/arm64/boot/dts/allwinner/axp803.dtsi @@ -0,0 +1,150 @@ +/* + * Copyright 2017 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * AXP803 Integrated Power Management Chip + * http://files.pine64.org/doc/datasheet/pine64/AXP803_Datasheet_V1.0.pdf + */ + +&axp803 { + interrupt-controller; + #interrupt-cells = <1>; + + regulators { + /* Default work frequency for buck regulators */ + x-powers,dcdc-freq = <3000>; + + reg_aldo1: aldo1 { + regulator-name = "aldo1"; + }; + + reg_aldo2: aldo2 { + regulator-name = "aldo2"; + }; + + reg_aldo3: aldo3 { + regulator-name = "aldo3"; + }; + + reg_dc1sw: dc1sw { + regulator-name = "dc1sw"; + }; + + reg_dcdc1: dcdc1 { + regulator-name = "dcdc1"; + }; + + reg_dcdc2: dcdc2 { + regulator-name = "dcdc2"; + }; + + reg_dcdc3: dcdc3 { + regulator-name = "dcdc3"; + }; + + reg_dcdc4: dcdc4 { + regulator-name = "dcdc4"; + }; + + reg_dcdc5: dcdc5 { + regulator-name = "dcdc5"; + }; + + reg_dcdc6: dcdc6 { + regulator-name = "dcdc6"; + }; + + reg_dldo1: dldo1 { + regulator-name = "dldo1"; + }; + + reg_dldo2: dldo2 { + regulator-name = "dldo2"; + }; + + reg_dldo3: dldo3 { + regulator-name = "dldo3"; + }; + + reg_dldo4: dldo4 { + regulator-name = "dldo4"; + }; + + reg_eldo1: eldo1 { + regulator-name = "eldo1"; + }; + + reg_eldo2: eldo2 { + regulator-name = "eldo2"; + }; + + reg_eldo3: eldo3 { + regulator-name = "eldo3"; + }; + + reg_fldo1: fldo1 { + regulator-name = "fldo1"; + }; + + reg_fldo2: fldo2 { + regulator-name = "fldo2"; + }; + + reg_ldo_io0: ldo-io0 { + regulator-name = "ldo-io0"; + status = "disabled"; + }; + + reg_ldo_io1: ldo-io1 { + regulator-name = "ldo-io1"; + status = "disabled"; + }; + + reg_rtc_ldo: rtc-ldo { + /* RTC_LDO is a fixed, always-on regulator */ + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "rtc-ldo"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index 6872135d7f84..d347f52e27f6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -59,14 +59,16 @@ chosen { stdout-path = "serial0:115200n8"; }; - reg_vcc3v3: vcc3v3 { - compatible = "regulator-fixed"; - regulator-name = "vcc3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; + wifi_pwrseq: wifi_pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */ }; }; +&ehci1 { + status = "okay"; +}; + &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins>; @@ -80,7 +82,7 @@ &i2c1_pins { &mmc0 { pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; - vmmc-supply = <®_vcc3v3>; + vmmc-supply = <®_dcdc1>; cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; cd-inverted; disable-wp; @@ -91,22 +93,143 @@ &mmc0 { &mmc1 { pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - vmmc-supply = <®_vcc3v3>; + vmmc-supply = <®_dldo2>; + vqmmc-supply = <®_dldo4>; + mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; non-removable; status = "okay"; + + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + interrupt-parent = <&r_pio>; + interrupts = <0 3 IRQ_TYPE_LEVEL_LOW>; /* PL3 */ + interrupt-names = "host-wake"; + }; }; &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; - vmmc-supply = <®_vcc3v3>; + vmmc-supply = <®_dcdc1>; bus-width = <8>; non-removable; cap-mmc-hw-reset; status = "okay"; }; +&ohci1 { + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp803: pmic@3a3 { + compatible = "x-powers,axp803"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +#include "axp803.dtsi" + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pl"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pll-avcc"; +}; + +®_dc1sw { + regulator-name = "vcc-phy"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1300000>; + regulator-name = "vdd-cpux"; +}; + +/* DCDC3 is polyphased with DCDC2 */ + +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "vcc-dram"; +}; + +®_dcdc6 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-sys"; +}; + +®_dldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-hdmi-dsi"; +}; + +®_dldo2 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi"; +}; + +®_dldo4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi-io"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "cpvdd"; +}; + +®_fldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-1v2-hsic"; +}; + +/* + * The A64 chip cannot work without this regulator off, although + * it seems to be only driving the AR100 core. + * Maybe we don't still know well about CPUs domain. + */ +®_fldo2 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-cpus"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; @@ -118,3 +241,7 @@ &uart1 { pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; status = "okay"; }; + +&usbphy { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts new file mode 100644 index 000000000000..2beef9e6cb88 --- /dev/null +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2017 Jagan Teki + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "sun50i-a64.dtsi" + +#include + +/ { + model = "FriendlyARM NanoPi A64"; + compatible = "friendlyarm,nanopi-a64", "allwinner,sun50i-a64"; + + aliases { + serial0 = &uart0; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&ehci0 { + status = "okay"; +}; + +&ehci1 { + status = "okay"; +}; + +/* i2c1 connected with gpio headers like pine64, bananapi */ +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + status = "disabled"; +}; + +&i2c1_pins { + bias-pull-up; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <®_dcdc1>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; + cd-inverted; + disable-wp; + bus-width = <4>; + status = "okay"; +}; + +&ohci0 { + status = "okay"; +}; + +&ohci1 { + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp803: pmic@3a3 { + compatible = "x-powers,axp803"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +#include "axp803.dtsi" + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pl"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pll-avcc"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-3v"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1300000>; + regulator-name = "vdd-cpux"; +}; + +/* DCDC3 is polyphased with DCDC2 */ + +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "vcc-dram"; +}; + +®_dcdc6 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-sys"; +}; + +®_dldo1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-hdmi-dsi"; +}; + +®_dldo4 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pg-wifi-io"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "cpvdd"; +}; + +®_fldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-1v2-hsic"; +}; + +/* + * The A64 chip cannot work without this regulator off, although + * it seems to be only driving the AR100 core. + * Maybe we don't still know well about CPUs domain. + */ +®_fldo2 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-cpus"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_a>; + status = "okay"; +}; + +&usbphy { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts new file mode 100644 index 000000000000..338e786155b1 --- /dev/null +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2017 Jagan Teki + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "sun50i-a64.dtsi" + +#include + +/ { + model = "Olimex A64-Olinuxino"; + compatible = "olimex,a64-olinuxino", "allwinner,sun50i-a64"; + + aliases { + serial0 = &uart0; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; +}; + +&mmc0 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc0_pins>; + vmmc-supply = <®_dcdc1>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; + cd-inverted; + disable-wp; + bus-width = <4>; + status = "okay"; +}; + +&r_rsb { + status = "okay"; + + axp803: pmic@3a3 { + compatible = "x-powers,axp803"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +#include "axp803.dtsi" + +®_aldo1 { + regulator-always-on; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-name = "vcc-pe"; +}; + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pl"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pll-avcc"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1300000>; + regulator-name = "vdd-cpux"; +}; + +/* DCDC3 is polyphased with DCDC2 */ + +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "vcc-ddr3"; +}; + +®_dcdc6 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-sys"; +}; + +®_dldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-hdmi"; +}; + +®_dldo2 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-mipi"; +}; + +®_dldo3 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-name = "vcc-avdd-csi"; +}; + +®_dldo4 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi-io"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "cpvdd"; +}; + +®_eldo2 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc-dvdd-csi"; +}; + +®_fldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-1v2-hsic"; +}; + +/* + * The A64 chip cannot work without this regulator off, although + * it seems to be only driving the AR100 core. + * Maybe we don't still know well about CPUs domain. + */ +®_fldo2 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-cpus"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_a>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index 7c533b6d4ba9..caf8b6fbe5e3 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts @@ -107,6 +107,118 @@ &ohci1 { status = "okay"; }; +&r_rsb { + status = "okay"; + + axp803: pmic@3a3 { + compatible = "x-powers,axp803"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +#include "axp803.dtsi" + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pl"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pll-avcc"; +}; + +®_dc1sw { + regulator-name = "vcc-phy"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1300000>; + regulator-name = "vdd-cpux"; +}; + +/* DCDC3 is polyphased with DCDC2 */ + +/* + * The DRAM chips used by Pine64 boards are DDR3L-compatible, so they can + * work at 1.35V with less power consumption. + * As AXP803 DCDC5 cannot reach 1.35V accurately, use 1.36V instead. + */ +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1360000>; + regulator-max-microvolt = <1360000>; + regulator-name = "vcc-dram"; +}; + +®_dcdc6 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-sys"; +}; + +®_dldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-hdmi"; +}; + +®_dldo2 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-mipi"; +}; + +®_dldo4 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "cpvdd"; +}; + +®_fldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-1v2-hsic"; +}; + +/* + * The A64 chip cannot work without this regulator off, although + * it seems to be only driving the AR100 core. + * Maybe we don't still know well about CPUs domain. + */ +®_fldo2 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-cpus"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; + /* On Exp and Euler connectors */ &uart0 { pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts index d891a1a27f6c..17ccc12b58df 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts @@ -95,6 +95,28 @@ &ohci1 { status = "okay"; }; +®_dc1sw { + regulator-name = "vcc-phy"; +}; + +®_dldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-hdmi"; +}; + +®_dldo2 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-mipi"; +}; + +®_dldo4 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi"; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi index 475518b031dd..a5da18a6f286 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi @@ -63,3 +63,89 @@ &mmc0 { bus-width = <4>; status = "okay"; }; + +&r_rsb { + status = "okay"; + + axp803: pmic@3a3 { + compatible = "x-powers,axp803"; + reg = <0x3a3>; + interrupt-parent = <&r_intc>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +#include "axp803.dtsi" + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pl"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-pll-avcc"; +}; + +®_dcdc1 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; +}; + +®_dcdc2 { + regulator-always-on; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1300000>; + regulator-name = "vdd-cpux"; +}; + +/* DCDC3 is polyphased with DCDC2 */ + +®_dcdc5 { + regulator-always-on; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-dram"; +}; + +®_dcdc6 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-sys"; +}; + +®_eldo1 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vdd-1v8-lpddr"; +}; + +®_fldo1 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "vcc-1v2-hsic"; +}; + +/* + * The A64 chip cannot work without this regulator off, although + * it seems to be only driving the AR100 core. + * Maybe we don't still know well about CPUs domain. + */ +®_fldo2 { + regulator-always-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-name = "vdd-cpus"; +}; + +®_rtc_ldo { + regulator-name = "vcc-rtc"; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 68aadc9b96dc..8c8db1b057df 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -467,6 +467,15 @@ rtc: rtc@1f00000 { ; }; + r_intc: interrupt-controller@1f00c00 { + compatible = "allwinner,sun50i-a64-r-intc", + "allwinner,sun6i-a31-r-intc"; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x01f00c00 0x400>; + interrupts = ; + }; + r_ccu: clock@1f01400 { compatible = "allwinner,sun50i-a64-r-ccu"; reg = <0x01f01400 0x100>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index dc478d094c11..4157987f4a3d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -121,6 +121,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; @@ -161,7 +168,8 @@ &pwm_ef { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -187,7 +195,8 @@ brcmf: wifi@1 { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -205,10 +214,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 738ed689ff69..f175db846286 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -225,7 +225,7 @@ reset: reset-controller@4404 { }; uart_A: serial@84c0 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; reg = <0x0 0x84c0 0x0 0x14>; interrupts = ; clocks = <&xtal>; @@ -233,7 +233,7 @@ uart_A: serial@84c0 { }; uart_B: serial@84dc { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; reg = <0x0 0x84dc 0x0 0x14>; interrupts = ; clocks = <&xtal>; @@ -279,7 +279,7 @@ pwm_ef: pwm@86c0 { }; uart_C: serial@8700 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; reg = <0x0 0x8700 0x0 0x14>; interrupts = ; clocks = <&xtal>; @@ -367,26 +367,40 @@ aobus: aobus@c8100000 { #size-cells = <2>; ranges = <0x0 0x0 0x0 0xc8100000 0x0 0x100000>; - clkc_AO: clock-controller@040 { - compatible = "amlogic,gx-aoclkc", "amlogic,gxbb-aoclkc"; - reg = <0x0 0x00040 0x0 0x4>; - #clock-cells = <1>; - #reset-cells = <1>; + sysctrl_AO: sys-ctrl@0 { + compatible = "amlogic,meson-gx-ao-sysctrl", "syscon", "simple-mfd"; + reg = <0x0 0x0 0x0 0x100>; + + clkc_AO: clock-controller { + compatible = "amlogic,meson-gx-aoclkc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + }; + + cec_AO: cec@100 { + compatible = "amlogic,meson-gx-ao-cec"; + reg = <0x0 0x00100 0x0 0x14>; + interrupts = ; + }; + + sec_AO: ao-secure@140 { + compatible = "amlogic,meson-gx-ao-secure", "syscon"; + reg = <0x0 0x140 0x0 0x140>; + amlogic,has-chip-id; }; uart_AO: serial@4c0 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; reg = <0x0 0x004c0 0x0 0x14>; interrupts = ; - clocks = <&xtal>; status = "disabled"; }; uart_AO_B: serial@4e0 { - compatible = "amlogic,meson-uart"; + compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; reg = <0x0 0x004e0 0x0 0x14>; interrupts = ; - clocks = <&xtal>; status = "disabled"; }; @@ -437,9 +451,9 @@ hiubus: hiubus@c883c000 { mailbox: mailbox@404 { compatible = "amlogic,meson-gx-mhu", "amlogic,meson-gxbb-mhu"; reg = <0 0x404 0 0x4c>; - interrupts = <0 208 IRQ_TYPE_EDGE_RISING>, - <0 209 IRQ_TYPE_EDGE_RISING>, - <0 210 IRQ_TYPE_EDGE_RISING>; + interrupts = , + , + ; #mbox-cells = <1>; }; }; @@ -448,7 +462,7 @@ ethmac: ethernet@c9410000 { compatible = "amlogic,meson-gx-dwmac", "amlogic,meson-gxbb-dwmac", "snps,dwmac"; reg = <0x0 0xc9410000 0x0 0x10000 0x0 0xc8834540 0x0 0x4>; - interrupts = <0 8 1>; + interrupts = ; interrupt-names = "macirq"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index fa462831ccaf..4b17a76959b2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -107,6 +107,9 @@ vddio_tf: regulator-vddio-tf { states = <3300000 0>, <1800000 1>; + + regulator-settling-time-up-us = <100>; + regulator-settling-time-down-us = <5000>; }; wifi_32k: wifi-32k { @@ -175,6 +178,64 @@ &ir { pinctrl-names = "default"; }; +&pinctrl_aobus { + gpio-line-names = "UART TX", "UART RX", "Power Control", "Power Key In", + "VCCK En", "CON1 Header Pin31", + "I2S Header Pin6", "IR In", "I2S Header Pin7", + "I2S Header Pin3", "I2S Header Pin4", + "I2S Header Pin5", "HDMI CEC", "SYS LED"; +}; + +&pinctrl_periphs { + gpio-line-names = /* Bank GPIOZ */ + "Eth MDIO", "Eth MDC", "Eth RGMII RX Clk", + "Eth RX DV", "Eth RX D0", "Eth RX D1", "Eth RX D2", + "Eth RX D3", "Eth RGMII TX Clk", "Eth TX En", + "Eth TX D0", "Eth TX D1", "Eth TX D2", "Eth TX D3", + "Eth PHY nRESET", "Eth PHY Intc", + /* Bank GPIOH */ + "HDMI HPD", "HDMI DDC SDA", "HDMI DDC SCL", + "CON1 Header Pin33", + /* Bank BOOT */ + "eMMC D0", "eMMC D1", "eMMC D2", "eMMC D3", "eMMC D4", + "eMMC D5", "eMMC D6", "eMMC D7", "eMMC Clk", + "eMMC Reset", "eMMC CMD", + "", "", "", "", "eMMC DS", + "", "", + /* Bank CARD */ + "SDCard D1", "SDCard D0", "SDCard CLK", "SDCard CMD", + "SDCard D3", "SDCard D2", "SDCard Det", + /* Bank GPIODV */ + "", "", "", "", "", "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", "", "", + "I2C A SDA", "I2C A SCK", "I2C B SDA", "I2C B SCK", + "VDDEE Regulator", "VCCK Regulator", + /* Bank GPIOY */ + "CON1 Header Pin7", "CON1 Header Pin11", + "CON1 Header Pin13", "CON1 Header Pin15", + "CON1 Header Pin18", "CON1 Header Pin19", + "CON1 Header Pin22", "CON1 Header Pin21", + "CON1 Header Pin24", "CON1 Header Pin23", + "CON1 Header Pin26", "CON1 Header Pin29", + "CON1 Header Pin32", "CON1 Header Pin8", + "CON1 Header Pin10", "CON1 Header Pin16", + "CON1 Header Pin12", + /* Bank GPIOX */ + "WIFI SDIO D0", "WIFI SDIO D1", "WIFI SDIO D2", + "WIFI SDIO D3", "WIFI SDIO CLK", "WIFI SDIO CMD", + "WIFI Power Enable", "WIFI WAKE HOST", + "Bluetooth PCM DOUT", "Bluetooth PCM DIN", + "Bluetooth PCM SYNC", "Bluetooth PCM CLK", + "Bluetooth UART TX", "Bluetooth UART RX", + "Bluetooth UART CTS", "Bluetooth UART RTS", + "", "", "", "WIFI 32K", "Bluetooth Enable", + "Bluetooth WAKE HOST", + /* Bank GPIOCLK */ + "", "CON1 Header Pin35", "", "", + /* GPIO_TEST_N */ + ""; +}; + &pwm_ef { status = "okay"; pinctrl-0 = <&pwm_e_pins>; @@ -192,7 +253,8 @@ &saradc { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -218,11 +280,16 @@ brcmf: wifi@1 { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; - max-frequency = <100000000>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; + max-frequency = <200000000>; disable-wp; cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; @@ -236,10 +303,10 @@ &sd_emmc_b { &sd_emmc_c { status = "disabled"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; max-frequency = <200000000>; non-removable; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index a1078b3e1c76..38dfdde5c147 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -51,7 +51,7 @@ / { compatible = "nexbox,a95x", "amlogic,meson-gxbb"; model = "NEXBOX A95X"; - + aliases { serial0 = &uart_AO; }; @@ -171,6 +171,13 @@ cvbs_vdac_out: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + ðmac { status = "okay"; pinctrl-0 = <ð_rmii_pins>; @@ -225,7 +232,8 @@ &pwm_ef { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -246,7 +254,8 @@ &sd_emmc_a { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -264,10 +273,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index d147c853ab05..1ffa1c238a72 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -50,7 +50,7 @@ / { compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; model = "Hardkernel ODROID-C2"; - + aliases { serial0 = &uart_AO; }; @@ -253,7 +253,8 @@ &scpi_clocks { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -271,10 +272,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; max-frequency = <200000000>; non-removable; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index d904deb1018c..23c08c3afd0a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi @@ -84,6 +84,9 @@ vddio_card: gpio-regulator { /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ states = <1800000 0 3300000 1>; + + regulator-settling-time-up-us = <10000>; + regulator-settling-time-down-us = <150000>; }; vddio_boot: regulator-vddio_boot { @@ -148,6 +151,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; @@ -184,7 +194,8 @@ &pwm_ef { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -210,10 +221,14 @@ brcmf: wifi@1 { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; @@ -228,10 +243,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 346753fb6324..f2bc6dea1fc6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -155,7 +155,8 @@ &pwm_ef { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins &sdio_irq_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -181,7 +182,8 @@ brcmf: wifi@1 { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -198,10 +200,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts index e76ac313fef9..f7144fd5e03f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts @@ -108,6 +108,12 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; &cvbs_vdac_port { cvbs_vdac_out: endpoint { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 17d3efdf1469..af834cdbba79 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -307,6 +307,15 @@ spifc: spi@8c80 { }; }; +&cec_AO { + clocks = <&clkc_AO CLKID_AO_CEC_32K>; + clock-names = "core"; +}; + +&clkc_AO { + compatible = "amlogic,meson-gxbb-aoclkc", "amlogic,meson-gx-aoclkc"; +}; + ðmac { clocks = <&clkc CLKID_ETH>, <&clkc CLKID_FCLK_DIV2>, @@ -383,6 +392,17 @@ mux { }; }; + emmc_clk_gate_pins: emmc_clk_gate { + mux { + groups = "BOOT_8"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "BOOT_8"; + bias-pull-down; + }; + }; + nor_pins: nor { mux { groups = "nor_d", @@ -421,6 +441,17 @@ mux { }; }; + sdcard_clk_gate_pins: sdcard_clk_gate { + mux { + groups = "CARD_2"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "CARD_2"; + bias-pull-down; + }; + }; + sdio_pins: sdio { mux { groups = "sdio_d0", @@ -433,6 +464,17 @@ mux { }; }; + sdio_clk_gate_pins: sdio_clk_gate { + mux { + groups = "GPIOX_4"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "GPIOX_4"; + bias-pull-down; + }; + }; + sdio_irq_pins: sdio_irq { mux { groups = "sdio_irq"; @@ -652,21 +694,21 @@ &saradc { &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, - <&xtal>, + <&clkc CLKID_SD_EMMC_A_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; &sd_emmc_b { clocks = <&clkc CLKID_SD_EMMC_B>, - <&xtal>, + <&clkc CLKID_SD_EMMC_B_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; &sd_emmc_c { clocks = <&clkc CLKID_SD_EMMC_C>, - <&xtal>, + <&clkc CLKID_SD_EMMC_C_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; @@ -682,6 +724,31 @@ &spifc { clocks = <&clkc CLKID_SPI>; }; +&uart_A { + clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_AO { + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&xtal>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_AO_B { + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&xtal>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_B { + clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; + clock-names = "xtal", "core", "baud"; +}; + +&uart_C { + clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; + clock-names = "xtal", "core", "baud"; +}; + &vpu { compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts index 3e0c023d6abd..6827f235d7cf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts @@ -97,6 +97,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + /* P230 has exclusive choice between internal or external PHY */ ðmac { pinctrl-0 = <ð_pins>; @@ -124,7 +131,6 @@ external_phy: ethernet-phy@0 { }; }; - &hdmi_tx { status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index 2a5804ce7f4b..977b4240f3c1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts @@ -123,7 +123,8 @@ &pwm_ef { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -141,10 +142,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <100000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts index 94567eb17875..edc512ad0bac 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts @@ -67,6 +67,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &hdmi_tx { status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; @@ -105,6 +112,62 @@ &ir { linux,rc-map-name = "rc-geekbox"; }; +&pinctrl_aobus { + gpio-line-names = "UART TX", + "UART RX", + "Power Key In", + "J9 Header Pin35", + "J9 Header Pin16", + "J9 Header Pin15", + "J9 Header Pin33", + "IR In", + "HDMI CEC", + "SYS LED"; +}; + +&pinctrl_periphs { + gpio-line-names = /* Bank GPIOZ */ + "", "", "", "", "", "", "", + "", "", "", "", "", "", "", + "Power OFF", + "VCCK Enable", + /* Bank GPIOH */ + "HDMI HPD", "HDMI SDA", "HDMI SCL", + "HDMI_5V_EN", "SPDIF", + "J9 Header Pin37", + "J9 Header Pin30", + "J9 Header Pin29", + "J9 Header Pin32", + "J9 Header Pin31", + /* Bank BOOT */ + "eMMC D0", "eMMC D1", "eMMC D2", "eMMC D3", + "eMMC D4", "eMMC D5", "eMMC D6", "eMMC D7", + "eMMC Clk", "eMMC Reset", "eMMC CMD", + "", "BOOT_MODE", "", "", "eMMC Data Strobe", + /* Bank CARD */ + "SDCard D1", "SDCard D0", "SDCard CLK", "SDCard CMD", + "SDCard D3", "SDCard D2", "SDCard Det", + /* Bank GPIODV */ + "", "", "", "", "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", "", "", "", + "I2C A SDA", "I2C A SCK", "I2C B SDA", "I2C B SCK", + "VCCK Regulator", "VDDEE Regulator", + /* Bank GPIOX */ + "WIFI SDIO D0", "WIFI SDIO D1", "WIFI SDIO D2", + "WIFI SDIO D3", "WIFI SDIO CLK", "WIFI SDIO CMD", + "WIFI Power Enable", "WIFI WAKE HOST", + "Bluetooth PCM DOUT", "Bluetooth PCM DIN", + "Bluetooth PCM SYNC", "Bluetooth PCM CLK", + "Bluetooth UART TX", "Bluetooth UART RX", + "Bluetooth UART CTS", "Bluetooth UART RTS", + "WIFI 32K", "Bluetooth Enable", + "Bluetooth WAKE HOST", + /* Bank GPIOCLK */ + "", "J9 Header Pin39", + /* GPIO_TEST_N */ + ""; +}; + &pwm_AO_ab { status = "okay"; pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 266fbcf3e47f..64c54c92e214 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -91,6 +91,9 @@ vcc_card: regulator-vcc-card { states = <3300000 0>, <1800000 1>; + + regulator-settling-time-up-us = <200>; + regulator-settling-time-down-us = <50000>; }; vddio_boot: regulator-vddio_boot { @@ -101,6 +104,13 @@ vddio_boot: regulator-vddio_boot { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; @@ -129,14 +139,75 @@ hdmi_tx_tmds_out: endpoint { }; }; +&pinctrl_aobus { + gpio-line-names = "UART TX", + "UART RX", + "Blue LED", + "SDCard Voltage Switch", + "7J1 Header Pin5", + "7J1 Header Pin3", + "7J1 Header Pin12", + "IR In", + "9J3 Switch HDMI CEC/7J1 Header Pin11", + "7J1 Header Pin13"; +}; + +&pinctrl_periphs { + gpio-line-names = /* Bank GPIOZ */ + "", "", "", "", "", "", "", + "", "", "", "", "", "", "", + "Eth Link LED", "Eth Activity LED", + /* Bank GPIOH */ + "HDMI HPD", "HDMI SDA", "HDMI SCL", + "HDMI_5V_EN", "9J1 Header Pin2", + "Analog Audio Mute", + "2J3 Header Pin6", + "2J3 Header Pin5", + "2J3 Header Pin4", + "2J3 Header Pin3", + /* Bank BOOT */ + "eMMC D0", "eMMC D1", "eMMC D2", "eMMC D3", + "eMMC D4", "eMMC D5", "eMMC D6", "eMMC D7", + "eMMC Clk", "eMMC Reset", "eMMC CMD", + "ALT BOOT MODE", "", "", "", "eMMC Data Strobe", + /* Bank CARD */ + "SDCard D1", "SDCard D0", "SDCard CLK", "SDCard CMD", + "SDCard D3", "SDCard D2", "SDCard Det", + /* Bank GPIODV */ + "", "", "", "", "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", "", "", "", + "Green LED", "VCCK Enable", + "7J1 Header Pin27", "7J1 Header Pin28", + "VCCK Regulator", "VDDEE Regulator", + /* Bank GPIOX */ + "7J1 Header Pin22", "7J1 Header Pin26", + "7J1 Header Pin36", "7J1 Header Pin38", + "7J1 Header Pin40", "7J1 Header Pin37", + "7J1 Header Pin33", "7J1 Header Pin35", + "7J1 Header Pin19", "7J1 Header Pin21", + "7J1 Header Pin24", "7J1 Header Pin23", + "7J1 Header Pin8", "7J1 Header Pin10", + "7J1 Header Pin16", "7J1 Header Pin18", + "7J1 Header Pin32", "7J1 Header Pin29", + "7J1 Header Pin31", + /* Bank GPIOCLK */ + "7J1 Header Pin7", "", + /* GPIO_TEST_N */ + "7J1 Header Pin15"; +}; + /* SD card */ &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; @@ -151,10 +222,12 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; cap-mmc-highspeed; + mmc-ddr-3_3v; max-frequency = <50000000>; non-removable; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6633a5d8fdd3..1b8f32867aa1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -140,6 +140,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; @@ -182,7 +189,8 @@ &pwm_ef { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -203,7 +211,8 @@ &sd_emmc_a { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -221,10 +230,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts index 6ab17c1eeefd..6e2bf858291c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts @@ -71,6 +71,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index f3eea8e89d12..129af9068814 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -95,7 +95,8 @@ &saradc { &sd_emmc_a { status = "okay"; pinctrl-0 = <&sdio_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; #address-cells = <1>; #size-cells = <0>; @@ -116,7 +117,8 @@ &sd_emmc_a { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -134,10 +136,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 8d4f3160a0ee..d8dd3298b15c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -43,6 +43,7 @@ #include "meson-gx.dtsi" #include +#include #include #include @@ -207,6 +208,15 @@ mux { }; }; +&cec_AO { + clocks = <&clkc_AO CLKID_AO_CEC_32K>; + clock-names = "core"; +}; + +&clkc_AO { + compatible = "amlogic,meson-gxl-aoclkc", "amlogic,meson-gx-aoclkc"; +}; + &hdmi_tx { compatible = "amlogic,meson-gxl-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; resets = <&reset RESET_HDMITX_CAPB3>, @@ -271,6 +281,17 @@ mux { }; }; + emmc_clk_gate_pins: emmc_clk_gate { + mux { + groups = "BOOT_8"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "BOOT_8"; + bias-pull-down; + }; + }; + nor_pins: nor { mux { groups = "nor_d", @@ -309,6 +330,17 @@ mux { }; }; + sdcard_clk_gate_pins: sdcard_clk_gate { + mux { + groups = "CARD_2"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "CARD_2"; + bias-pull-down; + }; + }; + sdio_pins: sdio { mux { groups = "sdio_d0", @@ -321,6 +353,17 @@ mux { }; }; + sdio_clk_gate_pins: sdio_clk_gate { + mux { + groups = "GPIOX_4"; + function = "gpio_periphs"; + }; + cfg-pull-down { + pins = "GPIOX_4"; + bias-pull-down; + }; + }; + sdio_irq_pins: sdio_irq { mux { groups = "sdio_irq"; @@ -593,21 +636,21 @@ &saradc { &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, - <&xtal>, + <&clkc CLKID_SD_EMMC_A_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; &sd_emmc_b { clocks = <&clkc CLKID_SD_EMMC_B>, - <&xtal>, + <&clkc CLKID_SD_EMMC_B_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; &sd_emmc_c { clocks = <&clkc CLKID_SD_EMMC_C>, - <&xtal>, + <&clkc CLKID_SD_EMMC_C_CLK0>, <&clkc CLKID_FCLK_DIV2>; clock-names = "core", "clkin0", "clkin1"; }; @@ -623,6 +666,31 @@ &spifc { clocks = <&clkc CLKID_SPI>; }; +&uart_A { + clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>; + clock-names = "xtal", "core", "baud"; +}; + +&uart_AO { + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&xtal>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_AO_B { + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&xtal>; + clock-names = "xtal", "pclk", "baud"; +}; + +&uart_B { + clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; + clock-names = "xtal", "core", "baud"; +}; + +&uart_C { + clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; + clock-names = "xtal", "core", "baud"; +}; + &vpu { compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index 5f626d683088..22c697732f66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -113,6 +113,13 @@ hdmi_connector_in: endpoint { }; }; +&cec_AO { + status = "okay"; + pinctrl-0 = <&ao_cec_pins>; + pinctrl-names = "default"; + hdmi-phandle = <&hdmi_tx>; +}; + &cvbs_vdac_port { cvbs_vdac_out: endpoint { remote-endpoint = <&cvbs_connector_in>; @@ -168,7 +175,8 @@ &ir { &sd_emmc_b { status = "okay"; pinctrl-0 = <&sdcard_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <4>; cap-sd-highspeed; @@ -186,10 +194,10 @@ &sd_emmc_b { &sd_emmc_c { status = "okay"; pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; + pinctrl-1 = <&emmc_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 08f1dd69b679..470f72bb863c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -220,7 +220,6 @@ &sd_emmc_c { pinctrl-names = "default"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; max-frequency = <200000000>; non-removable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index fe451cce93e7..19a798d2ae2f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -117,6 +117,10 @@ cpu7: cpu@103 { }; }; +&clkc_AO { + compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; +}; + &saradc { compatible = "amlogic,meson-gxm-saradc", "amlogic,meson-saradc"; }; diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi index 72720e9132a1..c9ffffb96e43 100644 --- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi +++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi @@ -626,6 +626,7 @@ pcie0: pcie@1f2b0000 { 0x43000000 0xe0 0x00000000 0xe0 0x00000000 0x20 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x10 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x11 0x4 @@ -651,6 +652,7 @@ pcie1: pcie@1f2c0000 { 0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x16 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x17 0x4 diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 63be8e51eaa8..c09a36fed917 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -626,6 +626,7 @@ pcie0: pcie@1f2b0000 { 0x43000000 0xf0 0x00000000 0xf0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x4 @@ -651,6 +652,7 @@ pcie1: pcie@1f2c0000 { 0x43000000 0xd8 0x00000000 0xd8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x4 @@ -676,6 +678,7 @@ pcie2: pcie@1f2d0000 { 0x43000000 0x94 0x00000000 0x94 0x00000000 0x04 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x4 @@ -701,6 +704,7 @@ pcie3: pcie@1f500000 { 0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x4 @@ -726,6 +730,7 @@ pcie4: pcie@1f510000 { 0x43000000 0xc8 0x00000000 0xc8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x4 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x4 diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 7cfa8e414e7f..8ecdd4331980 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -226,7 +226,7 @@ v2m_serial3: uart@0c0000 { clock-names = "uartclk", "apb_pclk"; }; - virtio_block@0130000 { + virtio-block@0130000 { compatible = "virtio,mmio"; reg = <0x130000 0x200>; interrupts = <42>; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index e8b7413ec890..fbafe62d6b22 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -201,7 +201,7 @@ stm_out_port: endpoint { }; }; - cpu_debug0: cpu_debug@22010000 { + cpu_debug0: cpu-debug@22010000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x22010000 0x0 0x1000>; @@ -260,7 +260,7 @@ cluster0_funnel_in_port1: endpoint { }; }; - cpu_debug1: cpu_debug@22110000 { + cpu_debug1: cpu-debug@22110000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x22110000 0x0 0x1000>; @@ -283,7 +283,7 @@ cluster0_etm1_out_port: endpoint { }; }; - cpu_debug2: cpu_debug@23010000 { + cpu_debug2: cpu-debug@23010000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x23010000 0x0 0x1000>; @@ -356,7 +356,7 @@ cluster1_funnel_in_port3: endpoint { }; }; - cpu_debug3: cpu_debug@23110000 { + cpu_debug3: cpu-debug@23110000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x23110000 0x0 0x1000>; @@ -379,7 +379,7 @@ cluster1_etm1_out_port: endpoint { }; }; - cpu_debug4: cpu_debug@23210000 { + cpu_debug4: cpu-debug@23210000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x23210000 0x0 0x1000>; @@ -402,7 +402,7 @@ cluster1_etm2_out_port: endpoint { }; }; - cpu_debug5: cpu_debug@23310000 { + cpu_debug5: cpu-debug@23310000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x23310000 0x0 0x1000>; @@ -426,7 +426,7 @@ cluster1_etm3_out_port: endpoint { }; replicator@20120000 { - compatible = "qcom,coresight-replicator1x", "arm,primecell"; + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; reg = <0 0x20120000 0 0x1000>; clocks = <&soc_smc50mhz>; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi index 161ac98418a3..528875c75598 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi @@ -219,7 +219,7 @@ panel-timing { }; }; - virtio_block@0130000 { + virtio-block@0130000 { compatible = "virtio,mmio"; reg = <0x130000 0x200>; interrupts = <42>; diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile index f11bdd6689ea..3eaef3895d66 100644 --- a/arch/arm64/boot/dts/broadcom/Makefile +++ b/arch/arm64/boot/dts/broadcom/Makefile @@ -1,7 +1,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb -dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb ns2-xmc.dtb -dts-dirs := stingray +dts-dirs += northstar2 +dts-dirs += stingray always := $(dtb-y) subdir-y := $(dts-dirs) clean-files := *.dtb diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi deleted file mode 120000 index 3937b77cb310..000000000000 --- a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/bcm2835-rpi.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts index 972f14db28ac..699d340a3437 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts +++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts @@ -1,41 +1 @@ -/dts-v1/; -#include "bcm2837.dtsi" -#include "bcm2835-rpi.dtsi" -#include "bcm283x-rpi-smsc9514.dtsi" -#include "bcm283x-rpi-usb-host.dtsi" - -/ { - compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; - model = "Raspberry Pi 3 Model B"; - - memory { - reg = <0 0x40000000>; - }; - - leds { - act { - gpios = <&gpio 47 0>; - }; - }; -}; - -&uart1 { - status = "okay"; -}; - -/* SDHCI is used to control the SDIO for wireless */ -&sdhci { - pinctrl-names = "default"; - pinctrl-0 = <&emmc_gpio34>; - status = "okay"; - bus-width = <4>; - non-removable; -}; - -/* SDHOST is used to drive the SD card */ -&sdhost { - pinctrl-names = "default"; - pinctrl-0 = <&sdhost_gpio48>; - status = "okay"; - bus-width = <4>; -}; +#include "arm/bcm2837-rpi-3-b.dts" diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi deleted file mode 120000 index dca7c057d5a5..000000000000 --- a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi deleted file mode 120000 index cbeebe312ff8..000000000000 --- a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/bcm283x-rpi-usb-host.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi deleted file mode 120000 index 5f54e4cab99b..000000000000 --- a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/bcm283x.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/broadcom/northstar2/Makefile b/arch/arm64/boot/dts/broadcom/northstar2/Makefile new file mode 100644 index 000000000000..e01a1485b813 --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/northstar2/Makefile @@ -0,0 +1,6 @@ +dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb +dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-xmc.dtb + +always := $(dtb-y) +subdir-y := $(dts-dirs) +clean-files := *.dtb diff --git a/arch/arm64/boot/dts/broadcom/ns2-clock.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2-clock.dtsi similarity index 100% rename from arch/arm64/boot/dts/broadcom/ns2-clock.dtsi rename to arch/arm64/boot/dts/broadcom/northstar2/ns2-clock.dtsi diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts similarity index 100% rename from arch/arm64/boot/dts/broadcom/ns2-svk.dts rename to arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts diff --git a/arch/arm64/boot/dts/broadcom/ns2-xmc.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts similarity index 100% rename from arch/arm64/boot/dts/broadcom/ns2-xmc.dts rename to arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi similarity index 100% rename from arch/arm64/boot/dts/broadcom/ns2.dtsi rename to arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi index 5dca7d10253b..8862ec907fd8 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi @@ -72,6 +72,78 @@ &memory { /* Default DRAM banks */ <0x00000008 0x80000000 0x1 0x80000000>; /* 6G @ 34G */ }; +&sata0 { + status = "okay"; +}; + +&sata_phy0{ + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&sata_phy1{ + status = "okay"; +}; + +&sata2 { + status = "okay"; +}; + +&sata_phy2{ + status = "okay"; +}; + +&sata3 { + status = "okay"; +}; + +&sata_phy3{ + status = "okay"; +}; + +&sata4 { + status = "okay"; +}; + +&sata_phy4{ + status = "okay"; +}; + +&sata5 { + status = "okay"; +}; + +&sata_phy5{ + status = "okay"; +}; + +&sata6 { + status = "okay"; +}; + +&sata_phy6{ + status = "okay"; +}; + +&sata7 { + status = "okay"; +}; + +&sata_phy7{ + status = "okay"; +}; + +&mdio_mux_iproc { + mdio@10 { + gphy0: eth-phy@10 { + reg = <0x10>; + }; + }; +}; + &uart1 { status = "okay"; }; @@ -102,6 +174,12 @@ pcf8574: pcf8574@20 { }; }; +&enet { + phy-mode = "rgmii-id"; + phy-handle = <&gphy0>; + status = "okay"; +}; + &nand { status = "ok"; nandcs@0 { diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts index 5671669ba348..eb6f08cdbd79 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts @@ -39,6 +39,10 @@ / { model = "Stingray Combo SVK (BCM958742K)"; }; +&gphy0 { + enet-phy-lane-swap; +}; + &uart2 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts index 6ebe399fda6a..5084b037320f 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts @@ -38,3 +38,7 @@ / { compatible = "brcm,bcm958742t", "brcm,stingray"; model = "Stingray SST100 (BCM958742T)"; }; + +&gphy0 { + enet-phy-lane-swap; +}; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-fs4.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-fs4.dtsi new file mode 100644 index 000000000000..8bf1dc6b46ca --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-fs4.dtsi @@ -0,0 +1,118 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2016-2017 Broadcom. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + fs4: fs4 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x67000000 0x00800000>; + + crypto_mbox: crypto_mbox@00000000 { + compatible = "brcm,iproc-flexrm-mbox"; + reg = <0x00000000 0x200000>; + msi-parent = <&gic_its 0x4100>; + #mbox-cells = <3>; + dma-coherent; + }; + + raid_mbox: raid_mbox@00400000 { + compatible = "brcm,iproc-flexrm-mbox"; + reg = <0x00400000 0x200000>; + dma-coherent; + msi-parent = <&gic_its 0x4300>; + #mbox-cells = <3>; + }; + + raid0: raid@0 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 0 0x1 0xff00>, + <&raid_mbox 1 0x1 0xff00>, + <&raid_mbox 2 0x1 0xff00>, + <&raid_mbox 3 0x1 0xff00>; + }; + + raid1: raid@1 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 4 0x1 0xff00>, + <&raid_mbox 5 0x1 0xff00>, + <&raid_mbox 6 0x1 0xff00>, + <&raid_mbox 7 0x1 0xff00>; + }; + + raid2: raid@2 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 8 0x1 0xff00>, + <&raid_mbox 9 0x1 0xff00>, + <&raid_mbox 10 0x1 0xff00>, + <&raid_mbox 11 0x1 0xff00>; + }; + + raid3: raid@3 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 12 0x1 0xff00>, + <&raid_mbox 13 0x1 0xff00>, + <&raid_mbox 14 0x1 0xff00>, + <&raid_mbox 15 0x1 0xff00>; + }; + + raid4: raid@4 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 16 0x1 0xff00>, + <&raid_mbox 17 0x1 0xff00>, + <&raid_mbox 18 0x1 0xff00>, + <&raid_mbox 19 0x1 0xff00>; + }; + + raid5: raid@5 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 20 0x1 0xff00>, + <&raid_mbox 21 0x1 0xff00>, + <&raid_mbox 22 0x1 0xff00>, + <&raid_mbox 23 0x1 0xff00>; + }; + + raid6: raid@6 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 24 0x1 0xff00>, + <&raid_mbox 25 0x1 0xff00>, + <&raid_mbox 26 0x1 0xff00>, + <&raid_mbox 27 0x1 0xff00>; + }; + + raid7: raid@7 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 28 0x1 0xff00>, + <&raid_mbox 29 0x1 0xff00>, + <&raid_mbox 30 0x1 0xff00>, + <&raid_mbox 31 0x1 0xff00>; + }; + }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi new file mode 100644 index 000000000000..a774709388df --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi @@ -0,0 +1,278 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2016-2017 Broadcom. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + sata { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x67d00000 0x00800000>; + + sata0: ahci@00210000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00210000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata0_port0: sata-port@0 { + reg = <0>; + phys = <&sata0_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy0: sata_phy@00212100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00212100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata0_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata1: ahci@00310000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00310000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata1_port0: sata-port@0 { + reg = <0>; + phys = <&sata1_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy1: sata_phy@00312100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00312100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata1_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata2: ahci@00120000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00120000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata2_port0: sata-port@0 { + reg = <0>; + phys = <&sata2_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy2: sata_phy@00122100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00122100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata2_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata3: ahci@00130000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00130000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata3_port0: sata-port@0 { + reg = <0>; + phys = <&sata3_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy3: sata_phy@00132100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00132100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata3_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata4: ahci@00330000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00330000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata4_port0: sata-port@0 { + reg = <0>; + phys = <&sata4_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy4: sata_phy@00332100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00332100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata4_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata5: ahci@00400000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00400000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata5_port0: sata-port@0 { + reg = <0>; + phys = <&sata5_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy5: sata_phy@00402100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00402100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata5_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata6: ahci@00410000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00410000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata6_port0: sata-port@0 { + reg = <0>; + phys = <&sata6_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy6: sata_phy@00412100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00412100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata6_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + + sata7: ahci@00420000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x00420000 0x1000>; + reg-names = "ahci"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata7_port0: sata-port@0 { + reg = <0>; + phys = <&sata7_phy0>; + phy-names = "sata-phy"; + }; + }; + + sata_phy7: sata_phy@00422100 { + compatible = "brcm,iproc-sr-sata-phy"; + reg = <0x00422100 0x1000>; + reg-names = "phy"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata7_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + }; + }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index 49933cf16c92..e6f75c633623 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -152,6 +152,12 @@ scr { #size-cells = <1>; ranges = <0x0 0x0 0x61000000 0x05000000>; + ccn: ccn@00000000 { + compatible = "arm,ccn-502"; + reg = <0x00000000 0x900000>; + interrupts = ; + }; + gic: interrupt-controller@02c00000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; @@ -261,6 +267,9 @@ gpio_crmu: gpio@00024800 { }; }; + #include "stingray-fs4.dtsi" + #include "stingray-sata.dtsi" + hsls { compatible = "simple-bus"; #address-cells = <1>; @@ -269,6 +278,37 @@ hsls { #include "stingray-pinctrl.dtsi" + mdio_mux_iproc: mdio-mux@0002023c { + compatible = "brcm,mdio-mux-iproc"; + reg = <0x0002023c 0x14>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { /* PCIe serdes */ + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio@2 { /* SATA */ + reg = <0x2>; + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio@3 { /* USB */ + reg = <0x3>; + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio@10 { /* RGMII */ + reg = <0x10>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + pwm: pwm@00010000 { compatible = "brcm,iproc-pwm"; reg = <0x00010000 0x1000>; @@ -277,6 +317,93 @@ pwm: pwm@00010000 { status = "disabled"; }; + timer0: timer@00030000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00030000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer1: timer@00040000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00040000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + }; + + timer2: timer@00050000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00050000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer3: timer@00060000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00060000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer4: timer@00070000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00070000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer5: timer@00080000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00080000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer6: timer@00090000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x00090000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + + timer7: timer@000a0000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x000a0000 0x1000>; + interrupts = ; + clocks = <&hsls_25m_div2_clk>, + <&hsls_25m_div2_clk>, + <&hsls_div4_clk>; + clock-names = "timer1", "timer2", "apb_pclk"; + status = "disabled"; + }; + i2c0: i2c@000b0000 { compatible = "brcm,iproc-i2c"; reg = <0x000b0000 0x100>; @@ -424,6 +551,15 @@ dma0: dma@00310000 { iommus = <&smmu 0x6000 0x0000>; }; + enet: ethernet@00340000{ + compatible = "brcm,amac"; + reg = <0x00340000 0x1000>; + reg-names = "amac_base"; + dma-coherent; + interrupts = ; + status= "disabled"; + }; + nand: nand@00360000 { compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; reg = <0x00360000 0x600>, diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 105b2938082f..297597442c44 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -307,20 +307,6 @@ &dsi { samsung,pll-clock-frequency = <24000000>; pinctrl-names = "default"; pinctrl-0 = <&te_irq>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - - dsi_out: endpoint { - samsung,burst-clock-frequency = <512000000>; - samsung,esc-clock-frequency = <16000000>; - }; - }; - }; }; &hdmi { @@ -843,7 +829,6 @@ &i2s0 { &mshc_0 { status = "okay"; - num-slots = <1>; mmc-hs200-1_8v; mmc-hs400-1_8v; cap-mmc-highspeed; @@ -865,7 +850,6 @@ &mshc_0 { &mshc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; disable-wp; cd-gpios = <&gpa2 4 GPIO_ACTIVE_HIGH>; @@ -1210,8 +1194,9 @@ &usbdrd30 { status = "okay"; }; -&usbdrd_dwc3_0 { +&usbdrd_dwc3 { dr_mode = "otg"; + extcon = <&muic>; }; &usbdrd30_phy { diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index 727f36abf3d4..7fe994b750da 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -1367,7 +1367,7 @@ usbdrd30: usbdrd { ranges; status = "disabled"; - dwc3@15400000 { + usbdrd_dwc3: dwc3@15400000 { compatible = "snps,dwc3"; reg = <0x15400000 0x10000>; interrupts = ; @@ -1414,7 +1414,7 @@ usbhost30: usbhost { ranges; status = "disabled"; - usbdrd_dwc3_0: dwc3@15a00000 { + usbhost_dwc3: dwc3@15a00000 { compatible = "snps,dwc3"; reg = <0x15a00000 0x10000>; interrupts = ; diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index e5892bb0ae6e..4a8b1fb51243 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -359,7 +359,6 @@ pmic_irq: pmic-irq { &mmc_0 { status = "okay"; - num-slots = <1>; cap-mmc-highspeed; mmc-hs200-1_8v; non-removable; @@ -375,7 +374,6 @@ &mmc_0 { &mmc_2 { status = "okay"; - num-slots = <1>; cap-sd-highspeed; card-detect-delay = <200>; clock-frequency = <400000000>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index b1554cbd2c54..df83915d6ea6 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -444,6 +444,15 @@ edma0: edma@2c00000 { <&clockgen 4 3>; }; + usb0: usb3@2f00000 { + compatible = "snps,dwc3"; + reg = <0x0 0x2f00000 0x0 0x10000>; + interrupts = <0 60 0x4>; + dr_mode = "host"; + snps,quirk-frame-length-adjustment = <0x20>; + snps,dis_rxdet_inp3_quirk; + }; + sata: sata@3200000 { compatible = "fsl,ls1012a-ahci", "fsl,ls1043a-ahci"; reg = <0x0 0x3200000 0x0 0x10000>, @@ -454,5 +463,13 @@ sata: sata@3200000 { dma-coherent; status = "disabled"; }; + + usb1: usb2@8600000 { + compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr"; + reg = <0x0 0x8600000 0x0 0x1000>; + interrupts = <0 139 0x4>; + dr_mode = "host"; + phy_type = "ulpi"; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts index 213abb72de93..0f6fcda36b9e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts @@ -49,7 +49,7 @@ #include "fsl-ls1088a.dtsi" / { - model = "L1088A RDB Board"; + model = "LS1088A RDB Board"; compatible = "fsl,ls1088a-rdb", "fsl,ls1088a"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi index c144d06a6e33..33797b373674 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi @@ -52,6 +52,10 @@ / { #address-cells = <2>; #size-cells = <2>; + aliases { + crypto = &crypto; + }; + cpus { #address-cells = <1>; #size-cells = <0>; @@ -62,6 +66,7 @@ cpu0: cpu@0 { compatible = "arm,cortex-a53"; reg = <0x0>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PH20>; #cooling-cells = <2>; }; @@ -70,6 +75,7 @@ cpu1: cpu@1 { compatible = "arm,cortex-a53"; reg = <0x1>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PH20>; }; cpu2: cpu@2 { @@ -77,6 +83,7 @@ cpu2: cpu@2 { compatible = "arm,cortex-a53"; reg = <0x2>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PH20>; }; cpu3: cpu@3 { @@ -84,6 +91,7 @@ cpu3: cpu@3 { compatible = "arm,cortex-a53"; reg = <0x3>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PH20>; }; cpu4: cpu@100 { @@ -91,6 +99,7 @@ cpu4: cpu@100 { compatible = "arm,cortex-a53"; reg = <0x100>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PH20>; #cooling-cells = <2>; }; @@ -99,6 +108,7 @@ cpu5: cpu@101 { compatible = "arm,cortex-a53"; reg = <0x101>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PH20>; }; cpu6: cpu@102 { @@ -106,6 +116,7 @@ cpu6: cpu@102 { compatible = "arm,cortex-a53"; reg = <0x102>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PH20>; }; cpu7: cpu@103 { @@ -113,6 +124,16 @@ cpu7: cpu@103 { compatible = "arm,cortex-a53"; reg = <0x103>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PH20>; + }; + + CPU_PH20: cpu-ph20 { + compatible = "arm,idle-state"; + idle-state-name = "PH20"; + arm,psci-suspend-param = <0x00010000>; + entry-latency-us = <1000>; + exit-latency-us = <1000>; + min-residency-us = <3000>; }; }; @@ -136,6 +157,11 @@ timer { <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */ }; + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + sysclk: sysclk { compatible = "fixed-clock"; #clock-cells = <0>; @@ -369,6 +395,45 @@ sata: sata@3200000 { dma-coherent; status = "disabled"; }; + + crypto: crypto@8000000 { + compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; + fsl,sec-era = <8>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x8000000 0x100000>; + reg = <0x00 0x8000000 0x0 0x100000>; + interrupts = ; + dma-coherent; + + sec_jr0: jr@10000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x10000 0x10000>; + interrupts = ; + }; + + sec_jr1: jr@20000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x20000 0x10000>; + interrupts = ; + }; + + sec_jr2: jr@30000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x30000 0x10000>; + interrupts = ; + }; + + sec_jr3: jr@40000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x40000 0x10000>; + interrupts = ; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts index ed209cd57283..3c99608b9b45 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts @@ -55,11 +55,6 @@ / { model = "Freescale Layerscape 2080a QDS Board"; compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; - aliases { - serial0 = &serial0; - serial1 = &serial1; - }; - chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts index 67ec3f9c81a1..a4e7de9f70d8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts @@ -55,11 +55,6 @@ / { model = "Freescale Layerscape 2080a RDB Board"; compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; - aliases { - serial0 = &serial0; - serial1 = &serial1; - }; - chosen { stdout-path = "serial1:115200n8"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts index 3ee718f0aaf8..fbbb73e571c0 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts @@ -52,11 +52,6 @@ / { model = "Freescale Layerscape 2080a software Simulator model"; compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; - aliases { - serial0 = &serial0; - serial1 = &serial1; - }; - ethernet@2210000 { compatible = "smsc,lan91c111"; reg = <0x0 0x2210000 0x0 0x100>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi index d789c6814e6a..8d739301e7b8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi @@ -53,6 +53,7 @@ cpu0: cpu@0 { compatible = "arm,cortex-a57"; reg = <0x0>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster0_l2>; #cooling-cells = <2>; }; @@ -62,6 +63,7 @@ cpu1: cpu@1 { compatible = "arm,cortex-a57"; reg = <0x1>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster0_l2>; }; @@ -70,6 +72,7 @@ cpu2: cpu@100 { compatible = "arm,cortex-a57"; reg = <0x100>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster1_l2>; #cooling-cells = <2>; }; @@ -79,6 +82,7 @@ cpu3: cpu@101 { compatible = "arm,cortex-a57"; reg = <0x101>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster1_l2>; }; @@ -87,6 +91,7 @@ cpu4: cpu@200 { compatible = "arm,cortex-a57"; reg = <0x200>; clocks = <&clockgen 1 2>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster2_l2>; #cooling-cells = <2>; }; @@ -96,6 +101,7 @@ cpu5: cpu@201 { compatible = "arm,cortex-a57"; reg = <0x201>; clocks = <&clockgen 1 2>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster2_l2>; }; @@ -105,6 +111,7 @@ cpu6: cpu@300 { reg = <0x300>; clocks = <&clockgen 1 3>; next-level-cache = <&cluster3_l2>; + cpu-idle-states = <&CPU_PW20>; #cooling-cells = <2>; }; @@ -113,6 +120,7 @@ cpu7: cpu@301 { compatible = "arm,cortex-a57"; reg = <0x301>; clocks = <&clockgen 1 3>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster3_l2>; }; @@ -131,6 +139,15 @@ cluster2_l2: l2-cache2 { cluster3_l2: l2-cache3 { compatible = "cache"; }; + + CPU_PW20: cpu-pw20 { + compatible = "arm,idle-state"; + idle-state-name = "PW20"; + arm,psci-suspend-param = <0x00010000>; + entry-latency-us = <2000>; + exit-latency-us = <2000>; + min-residency-us = <6000>; + }; }; &pcie1 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts index 4a1df5ce3229..eaee5b1c3a44 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts @@ -54,11 +54,6 @@ / { model = "Freescale Layerscape 2088A QDS Board"; compatible = "fsl,ls2088a-qds", "fsl,ls2088a"; - aliases { - serial0 = &serial0; - serial1 = &serial1; - }; - chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts index a76d4b4debd1..c411442cac62 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts @@ -54,11 +54,6 @@ / { model = "Freescale Layerscape 2088A RDB Board"; compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; - aliases { - serial0 = &serial0; - serial1 = &serial1; - }; - chosen { stdout-path = "serial1:115200n8"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi index 5c695c658056..6aa319dae396 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi @@ -53,6 +53,7 @@ cpu0: cpu@0 { compatible = "arm,cortex-a72"; reg = <0x0>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster0_l2>; #cooling-cells = <2>; }; @@ -62,6 +63,7 @@ cpu1: cpu@1 { compatible = "arm,cortex-a72"; reg = <0x1>; clocks = <&clockgen 1 0>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster0_l2>; }; @@ -70,6 +72,7 @@ cpu2: cpu@100 { compatible = "arm,cortex-a72"; reg = <0x100>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster1_l2>; #cooling-cells = <2>; }; @@ -79,6 +82,7 @@ cpu3: cpu@101 { compatible = "arm,cortex-a72"; reg = <0x101>; clocks = <&clockgen 1 1>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster1_l2>; }; @@ -88,6 +92,7 @@ cpu4: cpu@200 { reg = <0x200>; clocks = <&clockgen 1 2>; next-level-cache = <&cluster2_l2>; + cpu-idle-states = <&CPU_PW20>; #cooling-cells = <2>; }; @@ -96,6 +101,7 @@ cpu5: cpu@201 { compatible = "arm,cortex-a72"; reg = <0x201>; clocks = <&clockgen 1 2>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster2_l2>; }; @@ -104,6 +110,7 @@ cpu6: cpu@300 { compatible = "arm,cortex-a72"; reg = <0x300>; clocks = <&clockgen 1 3>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster3_l2>; #cooling-cells = <2>; }; @@ -113,6 +120,7 @@ cpu7: cpu@301 { compatible = "arm,cortex-a72"; reg = <0x301>; clocks = <&clockgen 1 3>; + cpu-idle-states = <&CPU_PW20>; next-level-cache = <&cluster3_l2>; }; @@ -131,6 +139,15 @@ cluster2_l2: l2-cache2 { cluster3_l2: l2-cache3 { compatible = "cache"; }; + + CPU_PW20: cpu-pw20 { + compatible = "arm,idle-state"; + idle-state-name = "PW20"; + arm,psci-suspend-param = <0x00010000>; + entry-latency-us = <2000>; + exit-latency-us = <2000>; + min-residency-us = <6000>; + }; }; &pcie1 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 94cdd3045037..4fb9a0966a84 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -46,6 +46,7 @@ */ #include +#include / { compatible = "fsl,ls2080a"; @@ -53,6 +54,12 @@ / { #address-cells = <2>; #size-cells = <2>; + aliases { + crypto = &crypto; + serial0 = &serial0; + serial1 = &serial1; + }; + cpu: cpus { #address-cells = <1>; #size-cells = <0>; @@ -118,6 +125,11 @@ pmu { interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ }; + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -301,6 +313,45 @@ cluster4_core1_watchdog: wdt@c310000 { clock-names = "apb_pclk", "wdog_clk"; }; + crypto: crypto@8000000 { + compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; + fsl,sec-era = <8>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x8000000 0x100000>; + reg = <0x00 0x8000000 0x0 0x100000>; + interrupts = ; + dma-coherent; + + sec_jr0: jr@10000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x10000 0x10000>; + interrupts = ; + }; + + sec_jr1: jr@20000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x20000 0x10000>; + interrupts = ; + }; + + sec_jr2: jr@30000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x30000 0x10000>; + interrupts = ; + }; + + sec_jr3: jr@40000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x40000 0x10000>; + interrupts = ; + }; + }; + fsl_mc: fsl-mc@80c000000 { compatible = "fsl,qoriq-mc"; reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index 6609b0fe7a8b..fd4705c451e2 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -39,6 +39,34 @@ memory@0 { reg = <0x0 0x0 0x0 0x0>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + ramoops@32000000 { + compatible = "ramoops"; + reg = <0x0 0x32000000 0x0 0x00100000>; + record-size = <0x00020000>; + console-size = <0x00020000>; + ftrace-size = <0x00020000>; + }; + }; + + reboot-mode-syscon@32100000 { + compatible = "syscon", "simple-mfd"; + reg = <0x0 0x32100000 0x0 0x00001000>; + + reboot-mode { + compatible = "syscon-reboot-mode"; + offset = <0x0>; + + mode-normal = <0x77665501>; + mode-bootloader = <0x77665500>; + mode-recovery = <0x77665502>; + }; + }; + keys { compatible = "gpio-keys"; pinctrl-names = "default"; @@ -159,6 +187,13 @@ wlan_en: wlan-en-1-8v { startup-delay-us = <70000>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &i2c0 { @@ -195,7 +230,7 @@ &uart4 { bluetooth { compatible = "ti,wl1837-st"; enable-gpios = <&gpio15 6 GPIO_ACTIVE_HIGH>; - max-speed = <921600>; + max-speed = <3000000>; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi index c6a1961e8d55..b7a90d632959 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi @@ -58,6 +58,8 @@ cpu0: cpu@0 { device_type = "cpu"; reg = <0x0 0x0>; enable-method = "psci"; + next-level-cache = <&A53_L2>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>; }; cpu1: cpu@1 { @@ -65,6 +67,8 @@ cpu1: cpu@1 { device_type = "cpu"; reg = <0x0 0x1>; enable-method = "psci"; + next-level-cache = <&A53_L2>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>; }; cpu2: cpu@2 { @@ -72,6 +76,8 @@ cpu2: cpu@2 { device_type = "cpu"; reg = <0x0 0x2>; enable-method = "psci"; + next-level-cache = <&A53_L2>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>; }; cpu3: cpu@3 { @@ -79,6 +85,8 @@ cpu3: cpu@3 { device_type = "cpu"; reg = <0x0 0x3>; enable-method = "psci"; + next-level-cache = <&A53_L2>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP_0>; }; cpu4: cpu@100 { @@ -86,6 +94,12 @@ cpu4: cpu@100 { device_type = "cpu"; reg = <0x0 0x100>; enable-method = "psci"; + next-level-cache = <&A73_L2>; + cpu-idle-states = < + &CPU_NAP + &CPU_SLEEP + &CLUSTER_SLEEP_1 + >; }; cpu5: cpu@101 { @@ -93,6 +107,12 @@ cpu5: cpu@101 { device_type = "cpu"; reg = <0x0 0x101>; enable-method = "psci"; + next-level-cache = <&A73_L2>; + cpu-idle-states = < + &CPU_NAP + &CPU_SLEEP + &CLUSTER_SLEEP_1 + >; }; cpu6: cpu@102 { @@ -100,6 +120,12 @@ cpu6: cpu@102 { device_type = "cpu"; reg = <0x0 0x102>; enable-method = "psci"; + next-level-cache = <&A73_L2>; + cpu-idle-states = < + &CPU_NAP + &CPU_SLEEP + &CLUSTER_SLEEP_1 + >; }; cpu7: cpu@103 { @@ -107,6 +133,59 @@ cpu7: cpu@103 { device_type = "cpu"; reg = <0x0 0x103>; enable-method = "psci"; + next-level-cache = <&A73_L2>; + cpu-idle-states = < + &CPU_NAP + &CPU_SLEEP + &CLUSTER_SLEEP_1 + >; + }; + + idle-states { + entry-method = "psci"; + + CPU_NAP: cpu-nap { + compatible = "arm,idle-state"; + arm,psci-suspend-param = <0x0000001>; + entry-latency-us = <7>; + exit-latency-us = <2>; + min-residency-us = <15>; + }; + + CPU_SLEEP: cpu-sleep { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <40>; + exit-latency-us = <70>; + min-residency-us = <3000>; + }; + + CLUSTER_SLEEP_0: cluster-sleep-0 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <500>; + exit-latency-us = <5000>; + min-residency-us = <20000>; + }; + + CLUSTER_SLEEP_1: cluster-sleep-1 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <1000>; + exit-latency-us = <5000>; + min-residency-us = <20000>; + }; + }; + + A53_L2: l2-cache0 { + compatible = "cache"; + }; + + A73_L2: l2-cache1 { + compatible = "cache"; }; }; @@ -123,6 +202,26 @@ gic: interrupt-controller@e82b0000 { IRQ_TYPE_LEVEL_HIGH)>; }; + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = , + , + , + , + , + , + , + ; + interrupt-affinity = <&cpu0>, + <&cpu1>, + <&cpu2>, + <&cpu3>, + <&cpu4>, + <&cpu5>, + <&cpu6>, + <&cpu7>; + }; + timer { compatible = "arm,armv8-timer"; interrupt-parent = <&gic>; @@ -337,6 +436,19 @@ uart6: serial@fff32000 { status = "disabled"; }; + dma0: dma@fdf30000 { + compatible = "hisilicon,k3-dma-1.0"; + reg = <0x0 0xfdf30000 0x0 0x1000>; + #dma-cells = <1>; + dma-channels = <16>; + dma-requests = <32>; + dma-min-chan = <1>; + interrupts = ; + clocks = <&crg_ctrl HI3660_CLK_GATE_DMAC>; + dma-no-cci; + dma-type = "hi3660_dma"; + }; + rtc0: rtc@fff04000 { compatible = "arm,pl031", "arm,primecell"; reg = <0x0 0Xfff04000 0x0 0x1000>; @@ -810,6 +922,7 @@ dwmmc1: dwmmc1@ff37f000 { clock-names = "ciu", "biu"; clock-frequency = <3200000>; resets = <&crg_rst 0x94 18>; + reset-names = "reset"; cd-gpios = <&gpio25 3 0>; hisilicon,peripheral-syscon = <&sctrl>; pinctrl-names = "default"; @@ -839,6 +952,7 @@ dwmmc2: dwmmc2@ff3ff000 { <&crg_ctrl HI3660_HCLK_GATE_SDIO0>; clock-names = "ciu", "biu"; resets = <&crg_rst 0x94 20>; + reset-names = "reset"; card-detect-delay = <200>; supports-highspeed; keep-power-in-suspend; @@ -848,5 +962,21 @@ &sdio_clk_cfg_func &sdio_cfg_func>; status = "disabled"; }; + + watchdog0: watchdog@e8a06000 { + compatible = "arm,sp805-wdt", "arm,primecell"; + reg = <0x0 0xe8a06000 0x0 0x1000>; + interrupts = ; + clocks = <&crg_ctrl HI3660_OSC32K>; + clock-names = "apb_pclk"; + }; + + watchdog1: watchdog@e8a07000 { + compatible = "arm,sp805-wdt", "arm,primecell"; + reg = <0x0 0xe8a07000 0x0 0x1000>; + interrupts = ; + clocks = <&crg_ctrl HI3660_OSC32K>; + clock-names = "apb_pclk"; + }; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi index eacbe0db5bc2..02a3aa4b2165 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi @@ -262,6 +262,12 @@ pm_ctrl: pm_ctrl@f7032000 { #clock-cells = <1>; }; + acpu_sctrl: acpu_sctrl@f6504000 { + compatible = "hisilicon,hi6220-acpu-sctrl", "syscon"; + reg = <0x0 0xf6504000 0x0 0x1000>; + #clock-cells = <1>; + }; + medianoc_ade: medianoc_ade@f4520000 { compatible = "syscon"; reg = <0x0 0xf4520000 0x0 0x4000>; @@ -755,7 +761,8 @@ usb: usb@f72c0000 { dr_mode = "otg"; g-rx-fifo-size = <512>; g-np-tx-fifo-size = <128>; - g-tx-fifo-size = <128 128 128 128 128 128>; + g-tx-fifo-size = <128 128 128 128 128 128 128 128 + 16 16 16 16 16 16 16>; interrupts = <0 77 0x4>; }; diff --git a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts index f5d7f0889b41..fe7c16c36025 100644 --- a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts +++ b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts @@ -84,3 +84,7 @@ ð3 { &sas1 { status = "ok"; }; + +&p0_pcie2_a { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi index 283d7b532e16..2c01a21c3665 100644 --- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi @@ -1534,5 +1534,27 @@ sas2: sas@a3000000 { <637 1>,<638 1>,<639 1>; status = "disabled"; }; + + p0_pcie2_a: pcie@a00a0000 { + compatible = "hisilicon,hip07-pcie-ecam"; + reg = <0 0xaf800000 0 0x800000>, + <0 0xa00a0000 0 0x10000>; + bus-range = <0xf8 0xff>; + msi-map = <0xf800 &p0_its_dsa_a 0xf800 0x800>; + msi-map-mask = <0xffff>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; + dma-coherent; + ranges = <0x02000000 0 0xa8000000 0 0xa8000000 0 0x77f0000 + 0x01000000 0 0 0 0xaf7f0000 0 0x10000>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xf800 0 0 7>; + interrupt-map = <0x0 0 0 1 &mbigen_pcie2_a 671 4 + 0x0 0 0 2 &mbigen_pcie2_a 671 4 + 0x0 0 0 3 &mbigen_pcie2_a 671 4 + 0x0 0 0 4 &mbigen_pcie2_a 671 4>; + status = "disabled"; + }; }; }; diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile index 3e6ce6c15a74..6cff81eeaae2 100644 --- a/arch/arm64/boot/dts/marvell/Makefile +++ b/arch/arm64/boot/dts/marvell/Makefile @@ -8,6 +8,7 @@ dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-mcbin.dtb +dtb-$(CONFIG_ARCH_MVEBU) += armada-8080-db.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index e3a136ed77b0..2ce52ba74f73 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -45,6 +45,7 @@ /dts-v1/; +#include #include "armada-372x.dtsi" / { @@ -59,6 +60,20 @@ memory@0 { device_type = "memory"; reg = <0x00000000 0x00000000 0x00000000 0x20000000>; }; + + vcc_sd_reg1: regulator { + compatible = "regulator-gpio"; + regulator-name = "vcc_sd1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + + gpios = <&gpionb 4 GPIO_ACTIVE_HIGH>; + gpios-states = <0>; + states = <1800000 0x1 + 3300000 0x0>; + enable-active-high; + }; }; /* J9 */ @@ -71,6 +86,16 @@ &sata { status = "okay"; }; +/* J1 */ +&sdhci1 { + wp-inverted; + bus-width = <4>; + cd-gpios = <&gpionb 3 GPIO_ACTIVE_LOW>; + marvell,pad-type = "sd"; + vqmmc-supply = <&vcc_sd_reg1>; + status = "okay"; +}; + /* Exported on the micro USB connector J5 through an FTDI */ &uart0 { status = "okay"; @@ -81,6 +106,11 @@ &usb3 { status = "okay"; }; +/* J8 */ +&usb2 { + status = "okay"; +}; + &mdio { switch0: switch0@1 { compatible = "marvell,mv88e6085"; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 51763d674050..8c0cf7efac65 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -81,6 +81,11 @@ timer { ; }; + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -322,7 +327,11 @@ gic: interrupt-controller@1d00000 { #interrupt-cells = <3>; interrupt-controller; reg = <0x1d00000 0x10000>, /* GICD */ - <0x1d40000 0x40000>; /* GICR */ + <0x1d40000 0x40000>, /* GICR */ + <0x1d80000 0x2000>, /* GICC */ + <0x1d90000 0x2000>, /* GICH */ + <0x1da0000 0x20000>; /* GICV */ + interrupts = ; }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index 92c761c380d3..9c3bdf87e543 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -44,6 +44,7 @@ * Device Tree file for Marvell Armada 7040 Development board platform */ +#include #include "armada-7040.dtsi" / { @@ -59,6 +60,34 @@ memory@00000000 { device_type = "memory"; reg = <0x0 0x0 0x0 0x80000000>; }; + + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb3h0-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + gpio = <&expander0 0 GPIO_ACTIVE_HIGH>; + }; + + cpm_reg_usb3_1_vbus: cpm-usb3-1-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb3h1-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + gpio = <&expander0 1 GPIO_ACTIVE_HIGH>; + }; + + cpm_usb3_0_phy: cpm-usb3-0-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&cpm_reg_usb3_0_vbus>; + }; + + cpm_usb3_1_phy: cpm-usb3-1-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&cpm_reg_usb3_1_vbus>; + }; }; &i2c0 { @@ -105,6 +134,14 @@ &cpm_pcie2 { &cpm_i2c0 { status = "okay"; clock-frequency = <100000>; + + expander0: pca9555@21 { + compatible = "nxp,pca9555"; + pinctrl-names = "default"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x21>; + }; }; &cpm_spi1 { @@ -140,10 +177,12 @@ &cpm_sata0 { }; &cpm_usb3_0 { + usb-phy = <&cpm_usb3_0_phy>; status = "okay"; }; &cpm_usb3_1 { + usb-phy = <&cpm_usb3_1_phy>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 1e8f7242ed6f..0d7b2ae46610 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -44,6 +44,7 @@ * Device Tree file for Marvell Armada 8040 Development board platform */ +#include #include "armada-8040.dtsi" / { @@ -59,6 +60,48 @@ memory@00000000 { device_type = "memory"; reg = <0x0 0x0 0x0 0x80000000>; }; + + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { + compatible = "regulator-fixed"; + regulator-name = "cpm-usb3h0-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + gpio = <&expander0 0 GPIO_ACTIVE_HIGH>; + }; + + cpm_reg_usb3_1_vbus: cpm-usb3-1-vbus { + compatible = "regulator-fixed"; + regulator-name = "cpm-usb3h1-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + gpio = <&expander0 1 GPIO_ACTIVE_HIGH>; + }; + + cpm_usb3_0_phy: cpm-usb3-0-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&cpm_reg_usb3_0_vbus>; + }; + + cpm_usb3_1_phy: cpm-usb3-1-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&cpm_reg_usb3_1_vbus>; + }; + + cps_reg_usb3_0_vbus: cps-usb3-0-vbus { + compatible = "regulator-fixed"; + regulator-name = "cps-usb3h0-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + gpio = <&expander1 0 GPIO_ACTIVE_HIGH>; + }; + + cps_usb3_0_phy: cps-usb3-0-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&cps_reg_usb3_0_vbus>; + }; }; &i2c0 { @@ -107,6 +150,25 @@ &cpm_pcie2 { &cpm_i2c0 { status = "okay"; clock-frequency = <100000>; + + /* U31 */ + expander0: pca9555@21 { + compatible = "nxp,pca9555"; + pinctrl-names = "default"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x21>; + }; + + /* U25 */ + expander1: pca9555@25 { + compatible = "nxp,pca9555"; + pinctrl-names = "default"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x25>; + }; + }; /* CON4 on CP0 expansion */ @@ -116,11 +178,13 @@ &cpm_sata0 { /* CON9 on CP0 expansion */ &cpm_usb3_0 { + usb-phy = <&cpm_usb3_0_phy>; status = "okay"; }; /* CON10 on CP0 expansion */ &cpm_usb3_1 { + usb-phy = <&cpm_usb3_1_phy>; status = "okay"; }; @@ -159,6 +223,7 @@ &cps_sata0 { /* CON9 on CP1 expansion */ &cps_usb3_0 { + usb-phy = <&cps_usb3_0_phy>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts index 4968e731de61..acf5c7d16d79 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts @@ -46,11 +46,17 @@ #include "armada-8040.dtsi" +#include + / { model = "Marvell 8040 MACHIATOBin"; compatible = "marvell,armada8040-mcbin", "marvell,armada8040", "marvell,armada-ap806-quad", "marvell,armada-ap806"; + chosen { + stdout-path = "serial0:115200n8"; + }; + memory@00000000 { device_type = "memory"; reg = <0x0 0x0 0x0 0x80000000>; @@ -77,11 +83,13 @@ v_vddo_h: regulator-1-8v { v_5v0_usb3_hst_vbus: regulator-usb3-vbus0 { compatible = "regulator-fixed"; + enable-active-high; + gpio = <&cpm_gpio2 15 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_xhci_vbus_pins>; regulator-name = "v_5v0_usb3_hst_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; - /* actually GPIO controlled, but 8k has no GPIO support yet */ - regulator-always-on; status = "okay"; }; @@ -112,10 +120,44 @@ &ap_sdhci0 { &cpm_i2c0 { clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_i2c0_pins>; status = "okay"; }; +&cpm_i2c1 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_i2c1_pins>; + status = "okay"; + + i2c-switch@70 { + compatible = "nxp,pca9548"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x70>; + + sfpp0_i2c: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + sfpp1_i2c: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + sfp_1g_i2c: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + }; +}; + &cpm_mdio { + pinctrl-names = "default"; + pinctrl-0 = <&cpm_ge_mdio_pins>; status = "okay"; ge_phy: ethernet-phy@0 { @@ -123,6 +165,67 @@ ge_phy: ethernet-phy@0 { }; }; +&cpm_pcie0 { + pinctrl-names = "default"; + pinctrl-0 = <&cpm_pcie_pins>; + num-lanes = <4>; + num-viewport = <8>; + reset-gpio = <&cpm_gpio1 20 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&cpm_pinctrl { + cpm_ge_mdio_pins: ge-mdio-pins { + marvell,pins = "mpp32", "mpp34"; + marvell,function = "ge"; + }; + cpm_i2c1_pins: i2c1-pins { + marvell,pins = "mpp35", "mpp36"; + marvell,function = "i2c1"; + }; + cpm_i2c0_pins: i2c0-pins { + marvell,pins = "mpp37", "mpp38"; + marvell,function = "i2c0"; + }; + cpm_xhci_vbus_pins: xhci0-vbus-pins { + marvell,pins = "mpp47"; + marvell,function = "gpio"; + }; + cpm_pcie_pins: pcie-pins { + marvell,pins = "mpp52"; + marvell,function = "gpio"; + }; + cpm_sdhci_pins: sdhci-pins { + marvell,pins = "mpp55", "mpp56", "mpp57", "mpp58", "mpp59", + "mpp60", "mpp61"; + marvell,function = "sdio"; + }; +}; + +&cpm_xmdio { + status = "okay"; + + phy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + + phy8: ethernet-phy@8 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <8>; + }; +}; + +&cpm_ethernet { + status = "okay"; +}; + +&cpm_eth0 { + status = "okay"; + phy = <&phy0>; + phy-mode = "10gbase-kr"; +}; + &cpm_sata0 { /* CPM Lane 0 - U29 */ status = "okay"; @@ -132,6 +235,8 @@ &cpm_sdhci0 { /* U6 */ broken-cd; bus-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&cpm_sdhci_pins>; status = "okay"; vqmmc-supply = <&v_3_3>; }; @@ -150,6 +255,12 @@ &cps_ethernet { status = "okay"; }; +&cps_eth0 { + status = "okay"; + phy = <&phy8>; + phy-mode = "10gbase-kr"; +}; + &cps_eth1 { /* CPS Lane 0 - J5 (Gigabit RJ45) */ status = "okay"; @@ -157,6 +268,13 @@ &cps_eth1 { phy-mode = "sgmii"; }; +&cps_pinctrl { + cps_spi1_pins: spi1-pins { + marvell,pins = "mpp12", "mpp13", "mpp14", "mpp15", "mpp16"; + marvell,function = "spi1"; + }; +}; + &cps_sata0 { /* CPS Lane 1 - U32 */ /* CPS Lane 3 - U31 */ @@ -164,6 +282,8 @@ &cps_sata0 { }; &cps_spi1 { + pinctrl-names = "default"; + pinctrl-0 = <&cps_spi1_pins>; status = "okay"; spi-flash@0 { diff --git a/arch/arm64/boot/dts/marvell/armada-8080-db.dts b/arch/arm64/boot/dts/marvell/armada-8080-db.dts new file mode 100644 index 000000000000..707af833832b --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-8080-db.dts @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2017 Marvell Technology Group Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPLv2 or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Device Tree file for Marvell Armada-8080 Development board platform + */ + +#include "armada-8080.dtsi" + +/ { + model = "Marvell 8080 board"; + compatible = "marvell,armada-8080-db", "marvell,armada-8080", + "marvell,armada-ap810-octa", "marvell,armada-ap810"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@00000000 { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x80000000>; + }; +}; + +&uart0_ap0 { + clock-frequency = <384000>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-8080.dtsi b/arch/arm64/boot/dts/marvell/armada-8080.dtsi new file mode 100644 index 000000000000..d5535b716735 --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-8080.dtsi @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2017 Marvell Technology Group Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPLv2 or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Device Tree file for Marvell Armada-8080 SoC, made of an AP810 OCTA. + */ + +#include "armada-ap810-ap0-octa-core.dtsi" + +/ { + model = "Marvell 8080 board"; + compatible = "marvell,armada-8080", "marvell,armada-ap810-octa", + "marvell,armada-ap810"; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 4d360713ed12..30d48ecf46e0 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi @@ -254,7 +254,7 @@ ap_sdhci0: sdhci@6e0000 { ap_syscon: system-controller@6f4000 { compatible = "syscon", "simple-mfd"; - reg = <0x6f4000 0x1000>; + reg = <0x6f4000 0x2000>; ap_clk: clock { compatible = "marvell,ap806-clock"; @@ -265,7 +265,7 @@ ap_pinctrl: pinctrl { compatible = "marvell,ap806-pinctrl"; }; - ap_gpio: gpio { + ap_gpio: gpio@1040 { compatible = "marvell,armada-8k-gpio"; offset = <0x1040>; ngpios = <20>; diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi new file mode 100644 index 000000000000..bf1b22b70384 --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2017 Marvell Technology Group Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPLv2 or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Device Tree file for Marvell Armada AP810 OCTA cores. + */ + +#include "armada-ap810-ap0.dtsi" + +/ { + cpus { + #address-cells = <1>; + #size-cells = <0>; + compatible = "marvell,armada-ap810-octa"; + + cpu@000 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x000>; + enable-method = "psci"; + }; + cpu@001 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x001>; + enable-method = "psci"; + }; + cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x100>; + enable-method = "psci"; + }; + cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x101>; + enable-method = "psci"; + }; + cpu@200 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x200>; + enable-method = "psci"; + }; + cpu@201 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x201>; + enable-method = "psci"; + }; + cpu@300 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x300>; + enable-method = "psci"; + }; + cpu@301 { + device_type = "cpu"; + compatible = "arm,cortex-a72", "arm,armv8"; + reg = <0x301>; + enable-method = "psci"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi new file mode 100644 index 000000000000..7e6f039f0f80 --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2017 Marvell Technology Group Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPLv2 or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Device Tree file for Marvell Armada AP810. + */ + +#include + +/dts-v1/; + +/ { + model = "Marvell Armada AP810"; + compatible = "marvell,armada-ap810"; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + serial0 = &uart0_ap0; + serial1 = &uart1_ap0; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + ap810-ap0 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + interrupt-parent = <&gic>; + ranges; + + config-space@e8000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges = <0x0 0x0 0xe8000000 0x4000000>; + interrupt-parent = <&gic>; + + gic: interrupt-controller@3000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-controller; + interrupts = ; + ranges; + + reg = <0x3000000 0x10000>, /* GICD */ + <0x3060000 0x100000>, /* GICR */ + <0x00c0000 0x2000>, /* GICC */ + <0x00d0000 0x1000>, /* GICH */ + <0x00e0000 0x2000>; /* GICV */ + + gic_its_ap0: interrupt-controller@3040000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0x3040000 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + xor@400000 { + compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; + reg = <0x400000 0x1000>, + <0x410000 0x1000>; + msi-parent = <&gic_its_ap0 0xa0>; + dma-coherent; + }; + + xor@420000 { + compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; + reg = <0x420000 0x1000>, + <0x430000 0x1000>; + msi-parent = <&gic_its_ap0 0xa1>; + dma-coherent; + }; + + xor@440000 { + compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; + reg = <0x440000 0x1000>, + <0x450000 0x1000>; + msi-parent = <&gic_its_ap0 0xa2>; + dma-coherent; + }; + + xor@460000 { + compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; + reg = <0x460000 0x1000>, + <0x470000 0x1000>; + msi-parent = <&gic_its_ap0 0xa3>; + dma-coherent; + }; + + uart0_ap0: serial@512000 { + compatible = "snps,dw-apb-uart"; + reg = <0x512000 0x100>; + reg-shift = <2>; + interrupts = ; + reg-io-width = <1>; + status = "disabled"; + }; + + uart1_ap0: serial@512100 { + compatible = "snps,dw-apb-uart"; + reg = <0x512100 0x100>; + reg-shift = <2>; + interrupts = ; + reg-io-width = <1>; + status = "disabled"; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index 4c68605675a8..8263a8a504a8 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -65,25 +65,44 @@ cpm_ethernet: ethernet@0 { reg = <0x0 0x100000>, <0x129000 0xb000>; clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; clock-names = "pp_clk", "gop_clk", "mg_clk"; + marvell,system-controller = <&cpm_syscon0>; status = "disabled"; dma-coherent; cpm_eth0: eth0 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <0>; gop-port-id = <0>; status = "disabled"; }; cpm_eth1: eth1 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <1>; gop-port-id = <2>; status = "disabled"; }; cpm_eth2: eth2 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <2>; gop-port-id = <3>; status = "disabled"; @@ -115,6 +134,13 @@ cpm_icu: interrupt-controller@1e0000 { msi-parent = <&gicp>; }; + cpm_rtc: rtc@284000 { + compatible = "marvell,armada-8k-rtc"; + reg = <0x284000 0x20>, <0x284080 0x24>; + reg-names = "rtc", "rtc-soc"; + interrupts = ; + }; + cpm_syscon0: system-controller@440000 { compatible = "syscon", "simple-mfd"; reg = <0x440000 0x1000>; @@ -131,8 +157,12 @@ cpm_gpio1: gpio@100 { gpio-controller; #gpio-cells = <2>; gpio-ranges = <&cpm_pinctrl 0 0 32>; + interrupt-controller; + interrupts = , + , + , + ; status = "disabled"; - }; cpm_gpio2: gpio@140 { @@ -142,26 +172,15 @@ cpm_gpio2: gpio@140 { gpio-controller; #gpio-cells = <2>; gpio-ranges = <&cpm_pinctrl 0 32 31>; + interrupt-controller; + interrupts = , + , + , + ; status = "disabled"; }; }; - cpm_rtc: rtc@284000 { - compatible = "marvell,armada-8k-rtc"; - reg = <0x284000 0x20>, <0x284080 0x24>; - reg-names = "rtc", "rtc-soc"; - interrupts = ; - }; - - cpm_sata0: sata@540000 { - compatible = "marvell,armada-8k-ahci", - "generic-ahci"; - reg = <0x540000 0x30000>; - interrupts = ; - clocks = <&cpm_clk 1 15>; - status = "disabled"; - }; - cpm_usb3_0: usb3@500000 { compatible = "marvell,armada-8k-xhci", "generic-xhci"; @@ -182,6 +201,15 @@ cpm_usb3_1: usb3@510000 { status = "disabled"; }; + cpm_sata0: sata@540000 { + compatible = "marvell,armada-8k-ahci", + "generic-ahci"; + reg = <0x540000 0x30000>; + interrupts = ; + clocks = <&cpm_clk 1 15>; + status = "disabled"; + }; + cpm_xor0: xor@6a0000 { compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; reg = <0x6a0000 0x1000>, @@ -240,6 +268,21 @@ cpm_i2c1: i2c@701100 { status = "disabled"; }; + cpm_nand: nand@720000 { + /* + * Due to the limiation of the pin available + * this controller is only usable on the CPM + * for A7K and on the CPS for A8K. + */ + compatible = "marvell,armada370-nand"; + reg = <0x720000 0x54>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = ; + clocks = <&cpm_clk 1 2>; + status = "disabled"; + }; + cpm_trng: trng@760000 { compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76"; reg = <0x760000 0x7d>; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 923f354b02f0..b71ee6c83668 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -60,37 +60,49 @@ config-space@f4000000 { compatible = "simple-bus"; ranges = <0x0 0x0 0xf4000000 0x2000000>; - cps_rtc: rtc@284000 { - compatible = "marvell,armada-8k-rtc"; - reg = <0x284000 0x20>, <0x284080 0x24>; - reg-names = "rtc", "rtc-soc"; - interrupts = ; - }; - cps_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; clock-names = "pp_clk", "gop_clk", "mg_clk"; + marvell,system-controller = <&cps_syscon0>; status = "disabled"; dma-coherent; cps_eth0: eth0 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <0>; gop-port-id = <0>; status = "disabled"; }; cps_eth1: eth1 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <1>; gop-port-id = <2>; status = "disabled"; }; cps_eth2: eth2 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <2>; gop-port-id = <3>; status = "disabled"; @@ -122,6 +134,13 @@ cps_icu: interrupt-controller@1e0000 { msi-parent = <&gicp>; }; + cps_rtc: rtc@284000 { + compatible = "marvell,armada-8k-rtc"; + reg = <0x284000 0x20>, <0x284080 0x24>; + reg-names = "rtc", "rtc-soc"; + interrupts = ; + }; + cps_syscon0: system-controller@440000 { compatible = "syscon", "simple-mfd"; reg = <0x440000 0x1000>; @@ -138,8 +157,12 @@ cps_gpio1: gpio@100 { gpio-controller; #gpio-cells = <2>; gpio-ranges = <&cps_pinctrl 0 0 32>; + interrupt-controller; + interrupts = , + , + , + ; status = "disabled"; - }; cps_gpio2: gpio@140 { @@ -149,20 +172,16 @@ cps_gpio2: gpio@140 { gpio-controller; #gpio-cells = <2>; gpio-ranges = <&cps_pinctrl 0 32 31>; + interrupt-controller; + interrupts = , + , + , + ; status = "disabled"; }; }; - cps_sata0: sata@540000 { - compatible = "marvell,armada-8k-ahci", - "generic-ahci"; - reg = <0x540000 0x30000>; - interrupts = ; - clocks = <&cps_clk 1 15>; - status = "disabled"; - }; - cps_usb3_0: usb3@500000 { compatible = "marvell,armada-8k-xhci", "generic-xhci"; @@ -183,6 +202,15 @@ cps_usb3_1: usb3@510000 { status = "disabled"; }; + cps_sata0: sata@540000 { + compatible = "marvell,armada-8k-ahci", + "generic-ahci"; + reg = <0x540000 0x30000>; + interrupts = ; + clocks = <&cps_clk 1 15>; + status = "disabled"; + }; + cps_xor0: xor@6a0000 { compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; reg = <0x6a0000 0x1000>, @@ -241,6 +269,21 @@ cps_i2c1: i2c@701100 { status = "disabled"; }; + cps_nand: nand@720000 { + /* + * Due to the limiation of the pin available + * this controller is only usable on the CPM + * for A7K and on the CPS for A8K. + */ + compatible = "marvell,armada370-nand"; + reg = <0x720000 0x54>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = ; + clocks = <&cps_clk 1 2>; + status = "disabled"; + }; + cps_trng: trng@760000 { compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76"; reg = <0x760000 0x7d>; diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile index 015eb072ddef..151723b5c733 100644 --- a/arch/arm64/boot/dts/mediatek/Makefile +++ b/arch/arm64/boot/dts/mediatek/Makefile @@ -1,6 +1,8 @@ +dtb-$(CONFIG_ARCH_MEDIATEK) += mt2712-evb.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt6755-evb.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb +dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb always := $(dtb-y) diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts new file mode 100644 index 000000000000..8c804df3da4e --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: YT Shen + * + * SPDX-License-Identifier: (GPL-2.0 OR MIT) + */ + +/dts-v1/; +#include "mt2712e.dtsi" + +/ { + model = "MediaTek MT2712 evaluation board"; + compatible = "mediatek,mt2712-evb", "mediatek,mt2712"; + + aliases { + serial0 = &uart0; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0 0x40000000 0 0x80000000>; + }; + + chosen { + stdout-path = "serial0:921600n8"; + }; +}; + +&uart0 { + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi new file mode 100644 index 000000000000..57d0396b7faa --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: YT Shen + * + * SPDX-License-Identifier: (GPL-2.0 OR MIT) + */ + +#include +#include + +/ { + compatible = "mediatek,mt2712"; + interrupt-parent = <&sysirq>; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + }; + + cluster1 { + core0 { + cpu = <&cpu2>; + }; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x000>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x001>; + enable-method = "psci"; + }; + + cpu2: cpu@200 { + device_type = "cpu"; + compatible = "arm,cortex-a72"; + reg = <0x200>; + enable-method = "psci"; + }; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + baud_clk: dummy26m { + compatible = "fixed-clock"; + clock-frequency = <26000000>; + #clock-cells = <0>; + }; + + sys_clk: dummyclk { + compatible = "fixed-clock"; + clock-frequency = <26000000>; + #clock-cells = <0>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupt-parent = <&gic>; + interrupts = , + , + , + ; + }; + + uart5: serial@1000f000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x1000f000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + sysirq: interrupt-controller@10220a80 { + compatible = "mediatek,mt2712-sysirq", + "mediatek,mt6577-sysirq"; + interrupt-controller; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0 0x10220a80 0 0x40>; + }; + + gic: interrupt-controller@10510000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + interrupt-controller; + reg = <0 0x10510000 0 0x10000>, + <0 0x10520000 0 0x20000>, + <0 0x10540000 0 0x20000>, + <0 0x10560000 0 0x20000>; + interrupts = ; + }; + + uart0: serial@11002000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11002000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + uart1: serial@11003000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11003000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + uart2: serial@11004000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11004000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + uart3: serial@11005000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11005000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + uart4: serial@11019000 { + compatible = "mediatek,mt2712-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11019000 0 0x400>; + interrupts = ; + clocks = <&baud_clk>, <&sys_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; +}; + diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi index 31088a9f71de..4beaa71107d7 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi @@ -108,13 +108,6 @@ clk26m: oscillator@0 { clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <32000>; - clock-output-names = "clk32k"; - }; - timer { compatible = "arm,armv8-timer"; interrupt-parent = <&gic>; @@ -147,6 +140,11 @@ scpsys: scpsys@10006000 { infracfg = <&infrasys>; }; + watchdog: watchdog@10007000 { + compatible = "mediatek,mt6797-wdt", "mediatek,mt6589-wdt"; + reg = <0 0x10007000 0 0x100>; + }; + apmixedsys: apmixed@1000c000 { compatible = "mediatek,mt6797-apmixedsys"; reg = <0 0x1000c000 0 0x1000>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts new file mode 100644 index 000000000000..c08309df2cc7 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Ming Huang + * Sean Wang + * + * SPDX-License-Identifier: (GPL-2.0 OR MIT) + */ + +/dts-v1/; +#include "mt7622.dtsi" + +/ { + model = "MediaTek MT7622 RFB1 board"; + compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; + + chosen { + bootargs = "console=ttyS0,115200n1"; + }; + + memory { + reg = <0 0x40000000 0 0x3F000000>; + }; +}; + +&uart0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi new file mode 100644 index 000000000000..b111fec2ed9d --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Ming Huang + * Sean Wang + * + * SPDX-License-Identifier: (GPL-2.0 OR MIT) + */ + +#include +#include + +/ { + compatible = "mediatek,mt7622"; + interrupt-parent = <&sysirq>; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + clock-frequency = <1300000000>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + clock-frequency = <1300000000>; + }; + }; + + uart_clk: dummy25m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; + + bus_clk: dummy280m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <280000000>; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* 192 KiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved: secmon@43000000 { + reg = <0 0x43000000 0 0x30000>; + no-map; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupt-parent = <&gic>; + interrupts = , + , + , + ; + }; + + sysirq: interrupt-controller@10200620 { + compatible = "mediatek,mt7622-sysirq", + "mediatek,mt6577-sysirq"; + interrupt-controller; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0 0x10200620 0 0x20>; + }; + + gic: interrupt-controller@10300000 { + compatible = "arm,gic-400"; + interrupt-controller; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; + reg = <0 0x10310000 0 0x1000>, + <0 0x10320000 0 0x1000>, + <0 0x10340000 0 0x2000>, + <0 0x10360000 0 0x2000>; + }; + + uart0: serial@11002000 { + compatible = "mediatek,mt7622-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11002000 0 0x400>; + interrupts = ; + clocks = <&uart_clk>, <&bus_clk>; + clock-names = "baud", "bus"; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index cc0f02d9dd02..ff81d7e5805e 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -1,5 +1,6 @@ dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb +dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi index d94640812194..790b7775b901 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi @@ -17,6 +17,7 @@ pinconf { function = PMIC_GPIO_FUNC_NORMAL; power-source = ; input-disable; + output-high; }; }; diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi index bd310ac1967a..1d63e6b879de 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi @@ -88,6 +88,8 @@ adv_bridge: bridge@39 { interrupts = <31 2>; adi,dsi-lanes = <4>; + clocks = <&rpmcc RPM_SMD_BB_CLK2>; + clock-names = "cec"; pd-gpios = <&msmgpio 32 0>; @@ -213,11 +215,14 @@ sdhci@07864000 { }; usb@78d9000 { - extcon = <&usb_id>, <&usb_id>; + extcon = <&usb_id>; status = "okay"; adp-disable; hnp-disable; srp-disable; + dr_mode = "host"; + pinctrl-names = "default"; + pinctrl-0 = <&usb_sw_sel_pm>; ulpi { phy { v1p8-supply = <&pm8916_l7>; @@ -337,19 +342,11 @@ usb2513 { usb_id: usb-id { compatible = "linux,extcon-usb-gpio"; - id-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>; + vbus-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&usb_id_default>; }; - usb-switch { - compatible = "toshiba,tc7usb40mu"; - switch-gpios = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>; - extcon = <&usb_id>; - pinctrl-names = "default"; - pinctrl-0 = <&usb_sw_sel_pm>; - }; - hdmi-out { compatible = "hdmi-connector"; type = "a"; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi index b1142c45fdc9..8e379782597a 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi @@ -24,4 +24,28 @@ pinconf { power-source = ; // 1.8V }; }; + + usb3_vbus_det_gpio: pm8996_gpio22 { + pinconf { + pins = "gpio22"; + function = PMIC_GPIO_FUNC_NORMAL; + input-enable; + bias-pull-down; + qcom,drive-strength = ; + power-source = ; // 1.8V + }; + }; +}; + +&pmi8994_gpios { + usb2_vbus_det_gpio: pmi8996_gpio6 { + pinconf { + pins = "gpio6"; + function = PMIC_GPIO_FUNC_NORMAL; + input-enable; + bias-pull-down; + qcom,drive-strength = ; + power-source = ; // 1.8V + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index d2196fc6d739..789f3e87321e 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -13,6 +13,7 @@ #include "msm8996.dtsi" #include "pm8994.dtsi" +#include "pmi8994.dtsi" #include "apq8096-db820c-pins.dtsi" #include "apq8096-db820c-pmic-pins.dtsi" #include @@ -88,6 +89,55 @@ sdhci@74a4900 { cd-gpios = <&msmgpio 38 0x1>; status = "okay"; }; + + phy@34000 { + status = "okay"; + }; + + phy@7410000 { + status = "okay"; + }; + + phy@7411000 { + status = "okay"; + }; + + phy@7412000 { + status = "okay"; + }; + + usb@6a00000 { + status = "okay"; + + dwc3@6a00000 { + extcon = <&usb3_id>; + dr_mode = "otg"; + }; + }; + + usb3_id: usb3-id { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_vbus_det_gpio>; + }; + + usb@7600000 { + status = "okay"; + + dwc3@7600000 { + extcon = <&usb2_id>; + dr_mode = "otg"; + maximum-speed = "high-speed"; + }; + }; + + usb2_id: usb2-id { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb2_vbus_det_gpio>; + }; }; @@ -106,4 +156,152 @@ button@0 { gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>; }; }; + + rpm-glink { + rpm_requests { + pm8994-regulators { + vdd_l1-supply = <&pm8994_s3>; + vdd_l2_l26_l28-supply = <&pm8994_s3>; + vdd_l3_l11-supply = <&pm8994_s3>; + vdd_l4_l27_l31-supply = <&pm8994_s3>; + vdd_l5_l7-supply = <&pm8994_s5>; + vdd_l14_l15-supply = <&pm8994_s5>; + vdd_l20_l21-supply = <&pm8994_s5>; + vdd_l25-supply = <&pm8994_s3>; + + s3 { + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <1300000>; + }; + s4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + s5 { + regulator-min-microvolt = <2150000>; + regulator-max-microvolt = <2150000>; + }; + s7 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + }; + + l1 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + l2 { + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; + }; + l3 { + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + }; + l4 { + regulator-min-microvolt = <1225000>; + regulator-max-microvolt = <1225000>; + }; + l6 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + l8 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l9 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l10 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l11 { + regulator-min-microvolt = <1150000>; + regulator-max-microvolt = <1150000>; + }; + l12 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l13 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2950000>; + }; + l14 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l15 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l16 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2700000>; + }; + l17 { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + }; + l18 { + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2900000>; + }; + l19 { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + }; + l20 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + regulator-allow-set-load; + }; + l21 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + }; + l22 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + l23 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + l24 { + regulator-min-microvolt = <3075000>; + regulator-max-microvolt = <3075000>; + }; + l25 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-allow-set-load; + }; + l27 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + l28 { + regulator-min-microvolt = <925000>; + regulator-max-microvolt = <925000>; + regulator-allow-set-load; + }; + l29 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + l30 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + l32 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts new file mode 100644 index 000000000000..6a838b5d321e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts @@ -0,0 +1,52 @@ +/dts-v1/; +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "ipq8074.dtsi" + +/ { + #address-cells = <0x2>; + #size-cells = <0x2>; + model = "Qualcomm Technologies, Inc. IPQ8074-HK01"; + compatible = "qcom,ipq8074-hk01", "qcom,ipq8074"; + interrupt-parent = <&intc>; + + aliases { + serial0 = &blsp1_uart5; + }; + + chosen { + stdout-path = "serial0"; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x40000000 0x0 0x20000000>; + }; + + soc { + pinctrl@1000000 { + serial_4_pins: serial4_pinmux { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + bias-disable; + }; + }; + }; + + serial@78b3000 { + pinctrl-0 = <&serial_4_pins>; + pinctrl-names = "default"; + status = "ok"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi new file mode 100644 index 000000000000..2bc5dec5614d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +/ { + model = "Qualcomm Technologies, Inc. IPQ8074"; + compatible = "qcom,ipq8074"; + + soc: soc { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges = <0 0 0 0xffffffff>; + compatible = "simple-bus"; + + pinctrl@1000000 { + compatible = "qcom,ipq8074-pinctrl"; + reg = <0x1000000 0x300000>; + interrupts = ; + gpio-controller; + #gpio-cells = <0x2>; + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + intc: interrupt-controller@b000000 { + compatible = "qcom,msm-qgic2"; + interrupt-controller; + #interrupt-cells = <0x3>; + reg = <0xb000000 0x1000>, <0xb002000 0x1000>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + timer@b120000 { + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0xb120000 0x1000>; + clock-frequency = <19200000>; + + frame@b120000 { + frame-number = <0>; + interrupts = , + ; + reg = <0xb121000 0x1000>, + <0xb122000 0x1000>; + }; + + frame@b123000 { + frame-number = <1>; + interrupts = ; + reg = <0xb123000 0x1000>; + status = "disabled"; + }; + + frame@b124000 { + frame-number = <2>; + interrupts = ; + reg = <0xb124000 0x1000>; + status = "disabled"; + }; + + frame@b125000 { + frame-number = <3>; + interrupts = ; + reg = <0xb125000 0x1000>; + status = "disabled"; + }; + + frame@b126000 { + frame-number = <4>; + interrupts = ; + reg = <0xb126000 0x1000>; + status = "disabled"; + }; + + frame@b127000 { + frame-number = <5>; + interrupts = ; + reg = <0xb127000 0x1000>; + status = "disabled"; + }; + + frame@b128000 { + frame-number = <6>; + interrupts = ; + reg = <0xb128000 0x1000>; + status = "disabled"; + }; + }; + + gcc: gcc@1800000 { + compatible = "qcom,gcc-ipq8074"; + reg = <0x1800000 0x80000>; + #clock-cells = <0x1>; + #reset-cells = <0x1>; + }; + + blsp1_uart5: serial@78b3000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x78b3000 0x200>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + }; + + cpus { + #address-cells = <0x1>; + #size-cells = <0x0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0>; + next-level-cache = <&L2_0>; + enable-method = "psci"; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + enable-method = "psci"; + reg = <0x1>; + next-level-cache = <&L2_0>; + }; + + CPU2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + enable-method = "psci"; + reg = <0x2>; + next-level-cache = <&L2_0>; + }; + + CPU3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + enable-method = "psci"; + reg = <0x3>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache { + compatible = "cache"; + cache-level = <0x2>; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + clocks { + sleep_clk: sleep_clk { + compatible = "fixed-clock"; + clock-frequency = <32000>; + #clock-cells = <0>; + }; + + xo: xo { + compatible = "fixed-clock"; + clock-frequency = <19200000>; + #clock-cells = <0>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 039991f80831..dc3817593e14 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -88,6 +88,11 @@ wcnss_mem: wcnss@89300000 { no-map; }; + venus_mem: venus@89900000 { + reg = <0x0 0x89900000 0x0 0x600000>; + no-map; + }; + mba_mem: mba@8ea00000 { no-map; reg = <0 0x8ea00000 0 0x100000>; @@ -204,6 +209,17 @@ cpu_crit1: trip1 { }; + gpu_opp_table: opp_table { + compatible = "operating-points-v2"; + + opp-400000000 { + opp-hz = /bits/ 64 <400000000>; + }; + opp-19200000 { + opp-hz = /bits/ 64 <19200000>; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = , @@ -694,6 +710,84 @@ tsens: thermal-sensor@4a8000 { #thermal-sensor-cells = <1>; }; + apps_iommu: iommu@1ef0000 { + #address-cells = <1>; + #size-cells = <1>; + #iommu-cells = <1>; + compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; + ranges = <0 0x1e20000 0x40000>; + reg = <0x1ef0000 0x3000>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_APSS_TCU_CLK>; + clock-names = "iface", "bus"; + qcom,iommu-secure-id = <17>; + + // mdp_0: + iommu-ctx@4000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x4000 0x1000>; + interrupts = ; + }; + + // venus_ns: + iommu-ctx@5000 { + compatible = "qcom,msm-iommu-v1-sec"; + reg = <0x5000 0x1000>; + interrupts = ; + }; + }; + + gpu_iommu: iommu@1f08000 { + #address-cells = <1>; + #size-cells = <1>; + #iommu-cells = <1>; + compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; + ranges = <0 0x1f08000 0x10000>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_GFX_TCU_CLK>; + clock-names = "iface", "bus"; + qcom,iommu-secure-id = <18>; + + // gfx3d_user: + iommu-ctx@1000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x1000 0x1000>; + interrupts = ; + }; + + // gfx3d_priv: + iommu-ctx@2000 { + compatible = "qcom,msm-iommu-v1-ns"; + reg = <0x2000 0x1000>; + interrupts = ; + }; + }; + + gpu@1c00000 { + compatible = "qcom,adreno-306.0", "qcom,adreno"; + reg = <0x01c00000 0x20000>; + reg-names = "kgsl_3d0_reg_memory"; + interrupts = ; + interrupt-names = "kgsl_3d0_irq"; + clock-names = + "core", + "iface", + "mem", + "mem_iface", + "alt_mem_iface", + "gfx3d"; + clocks = + <&gcc GCC_OXILI_GFX3D_CLK>, + <&gcc GCC_OXILI_AHB_CLK>, + <&gcc GCC_OXILI_GMEM_CLK>, + <&gcc GCC_BIMC_GFX_CLK>, + <&gcc GCC_BIMC_GPU_CLK>, + <&gcc GFX3D_CLK_SRC>; + power-domains = <&gcc OXILI_GDSC>; + operating-points-v2 = <&gpu_opp_table>; + iommus = <&gpu_iommu 1>, <&gpu_iommu 2>; + }; + mdss: mdss@1a00000 { compatible = "qcom,mdss"; reg = <0x1a00000 0x1000>, @@ -735,6 +829,8 @@ mdp: mdp@1a01000 { "core_clk", "vsync_clk"; + iommus = <&apps_iommu 4>; + ports { #address-cells = <1>; #size-cells = <0>; @@ -990,7 +1086,7 @@ funnel0_out: endpoint { }; replicator@824000 { - compatible = "qcom,coresight-replicator1x", "arm,primecell"; + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; reg = <0x824000 0x1000>; clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; @@ -1207,6 +1303,28 @@ etm3_out: endpoint { }; }; }; + + venus: video-codec@1d00000 { + compatible = "qcom,msm8916-venus"; + reg = <0x01d00000 0xff000>; + interrupts = ; + power-domains = <&gcc VENUS_GDSC>; + clocks = <&gcc GCC_VENUS0_VCODEC0_CLK>, + <&gcc GCC_VENUS0_AHB_CLK>, + <&gcc GCC_VENUS0_AXI_CLK>; + clock-names = "core", "iface", "bus"; + iommus = <&apps_iommu 5>; + memory-region = <&venus_mem>; + status = "okay"; + + video-decoder { + compatible = "venus-decoder"; + }; + + video-encoder { + compatible = "venus-encoder"; + }; + }; }; smd { diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 8f085716e258..887b61c872dd 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -276,12 +276,83 @@ smem { hwlocks = <&tcsr_mutex 3>; }; + rpm-glink { + compatible = "qcom,glink-rpm"; + + interrupts = ; + + qcom,rpm-msg-ram = <&rpm_msg_ram>; + + mboxes = <&apcs_glb 0>; + + rpm_requests { + compatible = "qcom,rpm-msm8996"; + qcom,glink-channels = "rpm_requests"; + + pm8994-regulators { + compatible = "qcom,rpm-pm8994-regulators"; + + pm8994_s1: s1 {}; + pm8994_s2: s2 {}; + pm8994_s3: s3 {}; + pm8994_s4: s4 {}; + pm8994_s5: s5 {}; + pm8994_s6: s6 {}; + pm8994_s7: s7 {}; + pm8994_s8: s8 {}; + pm8994_s9: s9 {}; + pm8994_s10: s10 {}; + pm8994_s11: s11 {}; + pm8994_s12: s12 {}; + + pm8994_l1: l1 {}; + pm8994_l2: l2 {}; + pm8994_l3: l3 {}; + pm8994_l4: l4 {}; + pm8994_l5: l5 {}; + pm8994_l6: l6 {}; + pm8994_l7: l7 {}; + pm8994_l8: l8 {}; + pm8994_l9: l9 {}; + pm8994_l10: l10 {}; + pm8994_l11: l11 {}; + pm8994_l12: l12 {}; + pm8994_l13: l13 {}; + pm8994_l14: l14 {}; + pm8994_l15: l15 {}; + pm8994_l16: l16 {}; + pm8994_l17: l17 {}; + pm8994_l18: l18 {}; + pm8994_l19: l19 {}; + pm8994_l20: l20 {}; + pm8994_l21: l21 {}; + pm8994_l22: l22 {}; + pm8994_l23: l23 {}; + pm8994_l24: l24 {}; + pm8994_l25: l25 {}; + pm8994_l26: l26 {}; + pm8994_l27: l27 {}; + pm8994_l28: l28 {}; + pm8994_l29: l29 {}; + pm8994_l30: l30 {}; + pm8994_l31: l31 {}; + pm8994_l32: l32 {}; + }; + + }; + }; + soc: soc { #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xffffffff>; compatible = "simple-bus"; + rpm_msg_ram: memory@68000 { + compatible = "qcom,rpm-msg-ram"; + reg = <0x68000 0x6000>; + }; + tcsr_mutex_regs: syscon@740000 { compatible = "syscon"; reg = <0x740000 0x20000>; @@ -303,6 +374,13 @@ apcs: syscon@9820000 { reg = <0x9820000 0x1000>; }; + apcs_glb: mailbox@9820000 { + compatible = "qcom,msm8996-apcs-hmss-global"; + reg = <0x9820000 0x1000>; + + #mbox-cells = <1>; + }; + gcc: clock-controller@300000 { compatible = "qcom,gcc-msm8996"; #clock-cells = <1>; @@ -538,6 +616,209 @@ mmcc: clock-controller@8c0000 { <960000000>, <825000000>; }; + + qfprom@74000 { + compatible = "qcom,qfprom"; + reg = <0x74000 0x8ff>; + #address-cells = <1>; + #size-cells = <1>; + + qusb2p_hstx_trim: hstx_trim@24e { + reg = <0x24e 0x2>; + bits = <5 4>; + }; + + qusb2s_hstx_trim: hstx_trim@24f { + reg = <0x24f 0x1>; + bits = <1 4>; + }; + }; + + phy@34000 { + compatible = "qcom,msm8996-qmp-pcie-phy"; + reg = <0x34000 0x488>; + #clock-cells = <1>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>, + <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>, + <&gcc GCC_PCIE_CLKREF_CLK>; + clock-names = "aux", "cfg_ahb", "ref"; + + vdda-phy-supply = <&pm8994_l28>; + vdda-pll-supply = <&pm8994_l12>; + + resets = <&gcc GCC_PCIE_PHY_BCR>, + <&gcc GCC_PCIE_PHY_COM_BCR>, + <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>; + reset-names = "phy", "common", "cfg"; + status = "disabled"; + + pciephy_0: lane@35000 { + reg = <0x035000 0x130>, + <0x035200 0x200>, + <0x035400 0x1dc>; + #phy-cells = <0>; + + clock-output-names = "pcie_0_pipe_clk_src"; + clocks = <&gcc GCC_PCIE_0_PIPE_CLK>; + clock-names = "pipe0"; + resets = <&gcc GCC_PCIE_0_PHY_BCR>; + reset-names = "lane0"; + }; + + pciephy_1: lane@36000 { + reg = <0x036000 0x130>, + <0x036200 0x200>, + <0x036400 0x1dc>; + #phy-cells = <0>; + + clock-output-names = "pcie_1_pipe_clk_src"; + clocks = <&gcc GCC_PCIE_1_PIPE_CLK>; + clock-names = "pipe1"; + resets = <&gcc GCC_PCIE_1_PHY_BCR>; + reset-names = "lane1"; + }; + + pciephy_2: lane@37000 { + reg = <0x037000 0x130>, + <0x037200 0x200>, + <0x037400 0x1dc>; + #phy-cells = <0>; + + clock-output-names = "pcie_2_pipe_clk_src"; + clocks = <&gcc GCC_PCIE_2_PIPE_CLK>; + clock-names = "pipe2"; + resets = <&gcc GCC_PCIE_2_PHY_BCR>; + reset-names = "lane2"; + }; + }; + + phy@7410000 { + compatible = "qcom,msm8996-qmp-usb3-phy"; + reg = <0x7410000 0x1c4>; + #clock-cells = <1>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_USB3_PHY_AUX_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_USB3_CLKREF_CLK>; + clock-names = "aux", "cfg_ahb", "ref"; + + vdda-phy-supply = <&pm8994_l28>; + vdda-pll-supply = <&pm8994_l12>; + + resets = <&gcc GCC_USB3_PHY_BCR>, + <&gcc GCC_USB3PHY_PHY_BCR>; + reset-names = "phy", "common"; + status = "disabled"; + + ssusb_phy_0: lane@7410200 { + reg = <0x7410200 0x200>, + <0x7410400 0x130>, + <0x7410600 0x1a8>; + #phy-cells = <0>; + + clock-output-names = "usb3_phy_pipe_clk_src"; + clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>; + clock-names = "pipe0"; + }; + }; + + hsusb_phy1: phy@7411000 { + compatible = "qcom,msm8996-qusb2-phy"; + reg = <0x7411000 0x180>; + #phy-cells = <0>; + + clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_RX1_USB2_CLKREF_CLK>; + clock-names = "cfg_ahb", "ref"; + + vdda-pll-supply = <&pm8994_l12>; + vdda-phy-dpdm-supply = <&pm8994_l24>; + + resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; + nvmem-cells = <&qusb2p_hstx_trim>; + status = "disabled"; + }; + + hsusb_phy2: phy@7412000 { + compatible = "qcom,msm8996-qusb2-phy"; + reg = <0x7412000 0x180>; + #phy-cells = <0>; + + clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_RX2_USB2_CLKREF_CLK>; + clock-names = "cfg_ahb", "ref"; + + vdda-pll-supply = <&pm8994_l12>; + vdda-phy-dpdm-supply = <&pm8994_l24>; + + resets = <&gcc GCC_QUSB2PHY_SEC_BCR>; + nvmem-cells = <&qusb2s_hstx_trim>; + status = "disabled"; + }; + + usb2: usb@7600000 { + compatible = "qcom,dwc3"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>, + <&gcc GCC_USB20_MASTER_CLK>, + <&gcc GCC_USB20_MOCK_UTMI_CLK>, + <&gcc GCC_USB20_SLEEP_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + + assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>, + <&gcc GCC_USB20_MASTER_CLK>; + assigned-clock-rates = <19200000>, <60000000>; + + power-domains = <&gcc USB30_GDSC>; + status = "disabled"; + + dwc3@7600000 { + compatible = "snps,dwc3"; + reg = <0x7600000 0xcc00>; + interrupts = <0 138 0>; + phys = <&hsusb_phy2>; + phy-names = "usb2-phy"; + }; + }; + + usb3: usb@6a00000 { + compatible = "qcom,dwc3"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>, + <&gcc GCC_USB30_MASTER_CLK>, + <&gcc GCC_AGGRE2_USB3_AXI_CLK>, + <&gcc GCC_USB30_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_SLEEP_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + + assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_MASTER_CLK>; + assigned-clock-rates = <19200000>, <120000000>; + + power-domains = <&gcc USB30_GDSC>; + status = "disabled"; + + dwc3@6a00000 { + compatible = "snps,dwc3"; + reg = <0x6a00000 0xcc00>; + interrupts = <0 131 0>; + phys = <&hsusb_phy1>, <&ssusb_phy_0>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; }; adsp-pil { @@ -558,6 +839,15 @@ adsp-pil { qcom,smem-states = <&adsp_smp2p_out 0>; qcom,smem-state-names = "stop"; + + smd-edge { + interrupts = ; + + label = "lpass"; + qcom,ipc = <&apcs 16 8>; + qcom,smd-edge = <1>; + qcom,remote-pid = <2>; + }; }; adsp-smp2p { @@ -584,6 +874,30 @@ adsp_smp2p_in: slave-kernel { }; }; + modem-smp2p { + compatible = "qcom,smp2p"; + qcom,smem = <435>, <428>; + + interrupts = ; + + qcom,ipc = <&apcs 16 14>; + + qcom,local-pid = <0>; + qcom,remote-pid = <1>; + + modem_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + modem_smp2p_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + smp2p-slpi { compatible = "qcom,smp2p"; qcom,smem = <481>, <430>; diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi index d3879a4e8076..57673f92805d 100644 --- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi @@ -8,6 +8,23 @@ pmic@2 { reg = <0x2 SPMI_USID>; #address-cells = <1>; #size-cells = <0>; + + pmi8994_gpios: gpios@c000 { + compatible = "qcom,pmi8994-gpio", "qcom,spmi-gpio"; + reg = <0xc000>; + gpio-controller; + #gpio-cells = <2>; + interrupts = <2 0xc0 0 IRQ_TYPE_NONE>, + <2 0xc1 0 IRQ_TYPE_NONE>, + <2 0xc2 0 IRQ_TYPE_NONE>, + <2 0xc3 0 IRQ_TYPE_NONE>, + <2 0xc4 0 IRQ_TYPE_NONE>, + <2 0xc5 0 IRQ_TYPE_NONE>, + <2 0xc6 0 IRQ_TYPE_NONE>, + <2 0xc7 0 IRQ_TYPE_NONE>, + <2 0xc8 0 IRQ_TYPE_NONE>, + <2 0xc9 0 IRQ_TYPE_NONE>; + }; }; pmic@3 { diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile index acc4bb30d485..381928bc1358 100644 --- a/arch/arm64/boot/dts/renesas/Makefile +++ b/arch/arm64/boot/dts/renesas/Makefile @@ -2,6 +2,7 @@ dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-salvator-x.dtb r8a7795-es1-h3ulcb.dtb dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb +dtb-$(CONFIG_ARCH_R8A77995) += r8a77995-draak.dtb always := $(dtb-y) clean-files := *.dtb diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts index 95fe207cb6a3..dd4f9b6a4254 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts +++ b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts @@ -9,8 +9,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 - /dts-v1/; #include "r8a7795-es1.dtsi" #include "ulcb.dtsi" diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts index b84c156ed696..3f7d5f51e428 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts @@ -8,8 +8,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 - /dts-v1/; #include "r8a7795-es1.dtsi" #include "salvator-x.dtsi" diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi index a0ba7bd21ea3..aaa5e67a963e 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi @@ -21,6 +21,14 @@ xhci1: usb@ee0400000 { status = "disabled"; }; + /delete-node/ usb-phy@ee0e0200; + /delete-node/ usb@ee0e0100; + /delete-node/ usb@ee0e0000; + /delete-node/ usb@e659c000; + + /delete-node/ dma-controller@e6460000; + /delete-node/ dma-controller@e6470000; + fcpf2: fcp@fe952000 { compatible = "renesas,fcpf"; reg = <0 0xfe952000 0 0x200>; @@ -79,6 +87,5 @@ fdp1@fe948000 { }; &du { - compatible = "renesas,du-r8a7795"; vsps = <&vspd0 &vspd1 &vspd2 &vspd3>; }; diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts index 0426f41765f0..0afe777973de 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts +++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts @@ -9,8 +9,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 - /dts-v1/; #include "r8a7795.dtsi" #include "ulcb.dtsi" @@ -40,3 +38,17 @@ memory@700000000 { reg = <0x7 0x00000000 0x0 0x40000000>; }; }; + +&du { + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&cpg CPG_MOD 721>, + <&cpg CPG_MOD 727>, + <&versaclock5 1>, + <&versaclock5 3>, + <&versaclock5 4>, + <&versaclock5 2>; + clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0", + "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts index 684fb3b9d154..17953070f38d 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts @@ -8,8 +8,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 - /dts-v1/; #include "r8a7795.dtsi" #include "salvator-x.dtsi" diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts index de354957144b..7675de5d4f2c 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts @@ -8,8 +8,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 - /dts-v1/; #include "r8a7795.dtsi" #include "salvator-xs.dtsi" @@ -46,10 +44,12 @@ &du { <&cpg CPG_MOD 722>, <&cpg CPG_MOD 721>, <&cpg CPG_MOD 727>, + <&versaclock6 1>, <&x21_clk>, - <&x22_clk>; + <&x22_clk>, + <&versaclock6 2>; clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0", - "dclkin.1", "dclkin.2"; + "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3"; }; &ehci2 { diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index e31c1b660b3f..2938195b9571 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi @@ -12,6 +12,8 @@ #include #include +#define CPG_AUDIO_CLK_I R8A7795_CLK_S0D4 + / { compatible = "renesas,r8a7795"; #address-cells = <2>; @@ -691,6 +693,126 @@ channel1 { }; }; + drif00: rif@e6f40000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f40000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 515>; + clock-names = "fck"; + dmas = <&dmac1 0x20>, <&dmac2 0x20>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 515>; + renesas,bonding = <&drif01>; + status = "disabled"; + }; + + drif01: rif@e6f50000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f50000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 514>; + clock-names = "fck"; + dmas = <&dmac1 0x22>, <&dmac2 0x22>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 514>; + renesas,bonding = <&drif00>; + status = "disabled"; + }; + + drif10: rif@e6f60000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f60000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 513>; + clock-names = "fck"; + dmas = <&dmac1 0x24>, <&dmac2 0x24>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 513>; + renesas,bonding = <&drif11>; + status = "disabled"; + }; + + drif11: rif@e6f70000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f70000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 512>; + clock-names = "fck"; + dmas = <&dmac1 0x26>, <&dmac2 0x26>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 512>; + renesas,bonding = <&drif10>; + status = "disabled"; + }; + + drif20: rif@e6f80000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f80000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 511>; + clock-names = "fck"; + dmas = <&dmac1 0x28>, <&dmac2 0x28>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 511>; + renesas,bonding = <&drif21>; + status = "disabled"; + }; + + drif21: rif@e6f90000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f90000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 510>; + clock-names = "fck"; + dmas = <&dmac1 0x2a>, <&dmac2 0x2a>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 510>; + renesas,bonding = <&drif20>; + status = "disabled"; + }; + + drif30: rif@e6fa0000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6fa0000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 509>; + clock-names = "fck"; + dmas = <&dmac1 0x2c>, <&dmac2 0x2c>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 509>; + renesas,bonding = <&drif31>; + status = "disabled"; + }; + + drif31: rif@e6fb0000 { + compatible = "renesas,r8a7795-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6fb0000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 508>; + clock-names = "fck"; + dmas = <&dmac1 0x2e>, <&dmac2 0x2e>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 508>; + renesas,bonding = <&drif30>; + status = "disabled"; + }; + hscif0: serial@e6540000 { compatible = "renesas,hscif-r8a7795", "renesas,rcar-gen3-hscif", @@ -776,6 +898,68 @@ hscif4: serial@e66b0000 { status = "disabled"; }; + msiof0: spi@e6e90000 { + compatible = "renesas,msiof-r8a7795", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6e90000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 211>; + dmas = <&dmac1 0x41>, <&dmac1 0x40>, + <&dmac2 0x41>, <&dmac2 0x40>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 211>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof1: spi@e6ea0000 { + compatible = "renesas,msiof-r8a7795", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6ea0000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 210>; + dmas = <&dmac1 0x43>, <&dmac1 0x42>, + <&dmac2 0x43>, <&dmac2 0x42>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 210>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof2: spi@e6c00000 { + compatible = "renesas,msiof-r8a7795", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6c00000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 209>; + dmas = <&dmac0 0x45>, <&dmac0 0x44>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 209>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof3: spi@e6c10000 { + compatible = "renesas,msiof-r8a7795", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6c10000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 208>; + dmas = <&dmac0 0x47>, <&dmac0 0x46>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 208>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + scif0: serial@e6e60000 { compatible = "renesas,scif-r8a7795", "renesas,rcar-gen3-scif", "renesas,scif"; @@ -1267,7 +1451,8 @@ ssi9: ssi-9 { }; sata: sata@ee300000 { - compatible = "renesas,sata-r8a7795"; + compatible = "renesas,sata-r8a7795", + "renesas,rcar-gen3-sata"; reg = <0 0xee300000 0 0x200000>; interrupts = ; clocks = <&cpg CPG_MOD 815>; @@ -1314,6 +1499,34 @@ usb_dmac1: dma-controller@e65b0000 { dma-channels = <2>; }; + usb_dmac2: dma-controller@e6460000 { + compatible = "renesas,r8a7795-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe6460000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 326>; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 326>; + #dma-cells = <1>; + dma-channels = <2>; + }; + + usb_dmac3: dma-controller@e6470000 { + compatible = "renesas,r8a7795-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe6470000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 329>; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 329>; + #dma-cells = <1>; + dma-channels = <2>; + }; + sdhi0: sd@ee100000 { compatible = "renesas,sdhi-r8a7795"; reg = <0 0xee100000 0 0x2000>; @@ -1392,6 +1605,18 @@ usb2_phy2: usb-phy@ee0c0200 { status = "disabled"; }; + usb2_phy3: usb-phy@ee0e0200 { + compatible = "renesas,usb2-phy-r8a7795", + "renesas,rcar-gen3-usb2-phy"; + reg = <0 0xee0e0200 0 0x700>; + interrupts = ; + clocks = <&cpg CPG_MOD 700>; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 700>; + #phy-cells = <0>; + status = "disabled"; + }; + ehci0: usb@ee080100 { compatible = "generic-ehci"; reg = <0 0xee080100 0 0x100>; @@ -1399,6 +1624,7 @@ ehci0: usb@ee080100 { clocks = <&cpg CPG_MOD 703>; phys = <&usb2_phy0>; phy-names = "usb"; + companion = <&ohci0>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 703>; status = "disabled"; @@ -1411,6 +1637,7 @@ ehci1: usb@ee0a0100 { clocks = <&cpg CPG_MOD 702>; phys = <&usb2_phy1>; phy-names = "usb"; + companion = <&ohci1>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 702>; status = "disabled"; @@ -1423,11 +1650,25 @@ ehci2: usb@ee0c0100 { clocks = <&cpg CPG_MOD 701>; phys = <&usb2_phy2>; phy-names = "usb"; + companion = <&ohci2>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 701>; status = "disabled"; }; + ehci3: usb@ee0e0100 { + compatible = "generic-ehci"; + reg = <0 0xee0e0100 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 700>; + phys = <&usb2_phy3>; + phy-names = "usb"; + companion = <&ohci3>; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 700>; + status = "disabled"; + }; + ohci0: usb@ee080000 { compatible = "generic-ohci"; reg = <0 0xee080000 0 0x100>; @@ -1464,6 +1705,18 @@ ohci2: usb@ee0c0000 { status = "disabled"; }; + ohci3: usb@ee0e0000 { + compatible = "generic-ohci"; + reg = <0 0xee0e0000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 700>; + phys = <&usb2_phy3>; + phy-names = "usb"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 700>; + status = "disabled"; + }; + hsusb: usb@e6590000 { compatible = "renesas,usbhs-r8a7795", "renesas,rcar-gen3-usbhs"; @@ -1481,6 +1734,23 @@ hsusb: usb@e6590000 { status = "disabled"; }; + hsusb3: usb@e659c000 { + compatible = "renesas,usbhs-r8a7795", + "renesas,rcar-gen3-usbhs"; + reg = <0 0xe659c000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 705>; + dmas = <&usb_dmac2 0>, <&usb_dmac2 1>, + <&usb_dmac3 0>, <&usb_dmac3 1>; + dma-names = "ch0", "ch1", "ch2", "ch3"; + renesas,buswait = <11>; + phys = <&usb2_phy3>; + phy-names = "usb"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + resets = <&cpg 705>; + status = "disabled"; + }; + pciec0: pcie@fe000000 { compatible = "renesas,pcie-r8a7795", "renesas,pcie-rcar-gen3"; @@ -1535,6 +1805,46 @@ pciec1: pcie@ee800000 { status = "disabled"; }; + imr-lx4@fe860000 { + compatible = "renesas,r8a7795-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe860000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 823>; + power-domains = <&sysc R8A7795_PD_A3VC>; + resets = <&cpg 823>; + }; + + imr-lx4@fe870000 { + compatible = "renesas,r8a7795-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe870000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 822>; + power-domains = <&sysc R8A7795_PD_A3VC>; + resets = <&cpg 822>; + }; + + imr-lx4@fe880000 { + compatible = "renesas,r8a7795-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe880000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 821>; + power-domains = <&sysc R8A7795_PD_A3VC>; + resets = <&cpg 821>; + }; + + imr-lx4@fe890000 { + compatible = "renesas,r8a7795-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe890000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 820>; + power-domains = <&sysc R8A7795_PD_A3VC>; + resets = <&cpg 820>; + }; + vspbc: vsp@fe920000 { compatible = "renesas,vsp2"; reg = <0 0xfe920000 0 0x8000>; @@ -1755,6 +2065,7 @@ port@1 { }; du: display@feb00000 { + compatible = "renesas,du-r8a7795"; reg = <0 0xfeb00000 0 0x80000>, <0 0xfeb90000 0 0x14>; reg-names = "du", "lvds.0"; @@ -1768,6 +2079,7 @@ du: display@feb00000 { <&cpg CPG_MOD 721>, <&cpg CPG_MOD 727>; clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0"; + vsps = <&vspd0 0 &vspd1 0 &vspd2 0 &vspd0 1>; status = "disabled"; ports { diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts index 38b58b7fca4b..daee1f1a3f68 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts +++ b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts @@ -9,8 +9,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7796_CLK_S0D4 - /dts-v1/; #include "r8a7796.dtsi" #include "ulcb.dtsi" @@ -30,3 +28,15 @@ memory@600000000 { reg = <0x6 0x00000000 0x0 0x40000000>; }; }; + +&du { + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&cpg CPG_MOD 727>, + <&versaclock5 1>, + <&versaclock5 3>, + <&versaclock5 2>; + clock-names = "du.0", "du.1", "du.2", "lvds.0", + "dclkin.0", "dclkin.1", "dclkin.2"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts index db4f162d6bdd..b317be03306e 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts +++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts @@ -8,8 +8,6 @@ * kind, whether express or implied. */ -#define CPG_AUDIO_CLK_I R8A7796_CLK_S0D4 - /dts-v1/; #include "r8a7796.dtsi" #include "salvator-x.dtsi" @@ -29,3 +27,32 @@ memory@600000000 { reg = <0x6 0x00000000 0x0 0x80000000>; }; }; + +&du { + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&cpg CPG_MOD 727>, + <&versaclock5 1>, + <&x21_clk>, + <&versaclock5 2>; + clock-names = "du.0", "du.1", "du.2", "lvds.0", + "dclkin.0", "dclkin.1", "dclkin.2"; +}; + +&hdmi0 { + status = "okay"; + + ports { + port@1 { + reg = <1>; + rcar_dw_hdmi0_out: endpoint { + remote-endpoint = <&hdmi0_con>; + }; + }; + }; +}; + +&hdmi0_con { + remote-endpoint = <&rcar_dw_hdmi0_out>; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index 1f6710912045..369092e17e34 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi @@ -12,6 +12,8 @@ #include #include +#define CPG_AUDIO_CLK_I R8A7796_CLK_S0D4 + / { compatible = "renesas,r8a7796"; #address-cells = <2>; @@ -639,6 +641,126 @@ channel1 { }; }; + drif00: rif@e6f40000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f40000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 515>; + clock-names = "fck"; + dmas = <&dmac1 0x20>, <&dmac2 0x20>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 515>; + renesas,bonding = <&drif01>; + status = "disabled"; + }; + + drif01: rif@e6f50000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f50000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 514>; + clock-names = "fck"; + dmas = <&dmac1 0x22>, <&dmac2 0x22>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 514>; + renesas,bonding = <&drif00>; + status = "disabled"; + }; + + drif10: rif@e6f60000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f60000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 513>; + clock-names = "fck"; + dmas = <&dmac1 0x24>, <&dmac2 0x24>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 513>; + renesas,bonding = <&drif11>; + status = "disabled"; + }; + + drif11: rif@e6f70000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f70000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 512>; + clock-names = "fck"; + dmas = <&dmac1 0x26>, <&dmac2 0x26>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 512>; + renesas,bonding = <&drif10>; + status = "disabled"; + }; + + drif20: rif@e6f80000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f80000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 511>; + clock-names = "fck"; + dmas = <&dmac1 0x28>, <&dmac2 0x28>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 511>; + renesas,bonding = <&drif21>; + status = "disabled"; + }; + + drif21: rif@e6f90000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6f90000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 510>; + clock-names = "fck"; + dmas = <&dmac1 0x2a>, <&dmac2 0x2a>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 510>; + renesas,bonding = <&drif20>; + status = "disabled"; + }; + + drif30: rif@e6fa0000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6fa0000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 509>; + clock-names = "fck"; + dmas = <&dmac1 0x2c>, <&dmac2 0x2c>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 509>; + renesas,bonding = <&drif31>; + status = "disabled"; + }; + + drif31: rif@e6fb0000 { + compatible = "renesas,r8a7796-drif", + "renesas,rcar-gen3-drif"; + reg = <0 0xe6fb0000 0 0x64>; + interrupts = ; + clocks = <&cpg CPG_MOD 508>; + clock-names = "fck"; + dmas = <&dmac1 0x2e>, <&dmac2 0x2e>; + dma-names = "rx", "rx"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 508>; + renesas,bonding = <&drif30>; + status = "disabled"; + }; + avb: ethernet@e6800000 { compatible = "renesas,etheravb-r8a7796", "renesas,etheravb-rcar-gen3"; @@ -877,7 +999,7 @@ msiof0: spi@e6e90000 { clocks = <&cpg CPG_MOD 211>; dmas = <&dmac1 0x41>, <&dmac1 0x40>, <&dmac2 0x41>, <&dmac2 0x40>; - dma-names = "tx", "rx"; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 211>; #address-cells = <1>; @@ -893,7 +1015,7 @@ msiof1: spi@e6ea0000 { clocks = <&cpg CPG_MOD 210>; dmas = <&dmac1 0x43>, <&dmac1 0x42>, <&dmac2 0x43>, <&dmac2 0x42>; - dma-names = "tx", "rx"; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 210>; #address-cells = <1>; @@ -1101,36 +1223,133 @@ GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH dma-channels = <16>; }; + usb_dmac0: dma-controller@e65a0000 { + compatible = "renesas,r8a7796-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe65a0000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 330>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 330>; + #dma-cells = <1>; + dma-channels = <2>; + }; + + usb_dmac1: dma-controller@e65b0000 { + compatible = "renesas,r8a7796-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe65b0000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 331>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 331>; + #dma-cells = <1>; + dma-channels = <2>; + }; + hsusb: usb@e6590000 { - /* placeholder */ + compatible = "renesas,usbhs-r8a7796", + "renesas,rcar-gen3-usbhs"; + reg = <0 0xe6590000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 704>; + dmas = <&usb_dmac0 0>, <&usb_dmac0 1>, + <&usb_dmac1 0>, <&usb_dmac1 1>; + dma-names = "ch0", "ch1", "ch2", "ch3"; + renesas,buswait = <11>; + phys = <&usb2_phy0>; + phy-names = "usb"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 704>; + status = "disabled"; }; xhci0: usb@ee000000 { - /* placeholder */ + compatible = "renesas,xhci-r8a7796", + "renesas,rcar-gen3-xhci"; + reg = <0 0xee000000 0 0xc00>; + interrupts = ; + clocks = <&cpg CPG_MOD 328>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 328>; + status = "disabled"; }; ohci0: usb@ee080000 { - /* placeholder */ + compatible = "generic-ohci"; + reg = <0 0xee080000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>; + phys = <&usb2_phy0>; + phy-names = "usb"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 703>; + status = "disabled"; }; ehci0: usb@ee080100 { - /* placeholder */ + compatible = "generic-ehci"; + reg = <0 0xee080100 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>; + phys = <&usb2_phy0>; + phy-names = "usb"; + companion= <&ohci0>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 703>; + status = "disabled"; }; usb2_phy0: usb-phy@ee080200 { - /* placeholder */ + compatible = "renesas,usb2-phy-r8a7796", + "renesas,rcar-gen3-usb2-phy"; + reg = <0 0xee080200 0 0x700>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 703>; + #phy-cells = <0>; + status = "disabled"; }; ohci1: usb@ee0a0000 { - /* placeholder */ + compatible = "generic-ohci"; + reg = <0 0xee0a0000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 702>; + phys = <&usb2_phy1>; + phy-names = "usb"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 702>; + status = "disabled"; }; ehci1: usb@ee0a0100 { - /* placeholder */ + compatible = "generic-ehci"; + reg = <0 0xee0a0100 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 702>; + phys = <&usb2_phy1>; + phy-names = "usb"; + companion= <&ohci1>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 702>; + status = "disabled"; }; usb2_phy1: usb-phy@ee0a0200 { - /* placeholder */ + compatible = "renesas,usb2-phy-r8a7796", + "renesas,rcar-gen3-usb2-phy"; + reg = <0 0xee0a0200 0 0x700>; + clocks = <&cpg CPG_MOD 702>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 702>; + #phy-cells = <0>; + status = "disabled"; }; sdhi0: sd@ee100000 { @@ -1440,8 +1659,150 @@ pciec1: pcie@ee800000 { /* placeholder */ }; + fcpf0: fcp@fe950000 { + compatible = "renesas,fcpf"; + reg = <0 0xfe950000 0 0x200>; + clocks = <&cpg CPG_MOD 615>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 615>; + }; + + vspb: vsp@fe960000 { + compatible = "renesas,vsp2"; + reg = <0 0xfe960000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 626>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 626>; + + renesas,fcp = <&fcpvb0>; + }; + + fcpvb0: fcp@fe96f000 { + compatible = "renesas,fcpv"; + reg = <0 0xfe96f000 0 0x200>; + clocks = <&cpg CPG_MOD 607>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 607>; + }; + + vspi0: vsp@fe9a0000 { + compatible = "renesas,vsp2"; + reg = <0 0xfe9a0000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 631>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 631>; + + renesas,fcp = <&fcpvi0>; + }; + + fcpvi0: fcp@fe9af000 { + compatible = "renesas,fcpv"; + reg = <0 0xfe9af000 0 0x200>; + clocks = <&cpg CPG_MOD 611>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 611>; + }; + + vspd0: vsp@fea20000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea20000 0 0x4000>; + interrupts = ; + clocks = <&cpg CPG_MOD 623>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 623>; + + renesas,fcp = <&fcpvd0>; + }; + + fcpvd0: fcp@fea27000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea27000 0 0x200>; + clocks = <&cpg CPG_MOD 603>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 603>; + }; + + vspd1: vsp@fea28000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea28000 0 0x4000>; + interrupts = ; + clocks = <&cpg CPG_MOD 622>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 622>; + + renesas,fcp = <&fcpvd1>; + }; + + fcpvd1: fcp@fea2f000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea2f000 0 0x200>; + clocks = <&cpg CPG_MOD 602>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 602>; + }; + + vspd2: vsp@fea30000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea30000 0 0x4000>; + interrupts = ; + clocks = <&cpg CPG_MOD 621>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 621>; + + renesas,fcp = <&fcpvd2>; + }; + + fcpvd2: fcp@fea37000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea37000 0 0x200>; + clocks = <&cpg CPG_MOD 601>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 601>; + }; + + hdmi0: hdmi@fead0000 { + compatible = "renesas,r8a7796-hdmi", "renesas,rcar-gen3-hdmi"; + reg = <0 0xfead0000 0 0x10000>; + interrupts = ; + clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7796_CLK_HDMI>; + clock-names = "iahb", "isfr"; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 729>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + dw_hdmi0_in: endpoint { + remote-endpoint = <&du_out_hdmi0>; + }; + }; + port@1 { + reg = <1>; + }; + }; + }; + du: display@feb00000 { - /* placeholder */ + compatible = "renesas,du-r8a7796"; + reg = <0 0xfeb00000 0 0x70000>, + <0 0xfeb90000 0 0x14>; + reg-names = "du", "lvds.0"; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>, + <&cpg CPG_MOD 722>, + <&cpg CPG_MOD 727>; + clock-names = "du.0", "du.1", "du.2", "lvds.0"; + status = "disabled"; + + vsps = <&vspd0 &vspd1 &vspd2>; ports { #address-cells = <1>; @@ -1452,7 +1813,38 @@ port@0 { du_out_rgb: endpoint { }; }; + port@1 { + reg = <1>; + du_out_hdmi0: endpoint { + remote-endpoint = <&dw_hdmi0_in>; + }; + }; + port@2 { + reg = <2>; + du_out_lvds0: endpoint { + }; + }; }; }; + + imr-lx4@fe860000 { + compatible = "renesas,r8a7796-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe860000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 823>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 823>; + }; + + imr-lx4@fe870000 { + compatible = "renesas,r8a7796-imr-lx4", + "renesas,imr-lx4"; + reg = <0 0xfe870000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 822>; + power-domains = <&sysc R8A7796_PD_A3VC>; + resets = <&cpg 822>; + }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts new file mode 100644 index 000000000000..d144370051d5 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts @@ -0,0 +1,46 @@ +/* + * Device Tree Source for the Draak board + * + * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2017 Glider bvba + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/dts-v1/; +#include "r8a77995.dtsi" + +/ { + model = "Renesas Draak board based on r8a77995"; + compatible = "renesas,draak", "renesas,r8a77995"; + + aliases { + serial0 = &scif2; + }; + + chosen { + bootargs = "ignore_loglevel"; + stdout-path = "serial0:115200n8"; + }; + + memory@48000000 { + device_type = "memory"; + /* first 128MB is reserved for secure area. */ + reg = <0x0 0x48000000 0x0 0x18000000>; + }; +}; + +&extal_clk { + clock-frequency = <48000000>; +}; + +&scif2 { + status = "okay"; +}; + +&rwdt { + timeout-sec = <60>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi new file mode 100644 index 000000000000..d0f95b78c022 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi @@ -0,0 +1,155 @@ +/* + * Device Tree Source for the r8a77995 SoC + * + * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2017 Glider bvba + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include + +/ { + compatible = "renesas,r8a77995"; + #address-cells = <2>; + #size-cells = <2>; + + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2"; + method = "smc"; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + a53_0: cpu@0 { + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0x0>; + device_type = "cpu"; + power-domains = <&sysc 5>; + next-level-cache = <&L2_CA53>; + enable-method = "psci"; + }; + + L2_CA53: cache-controller-1 { + compatible = "cache"; + power-domains = <&sysc 21>; + cache-unified; + cache-level = <2>; + }; + }; + + extal_clk: extal { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + scif_clk: scif { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + soc { + compatible = "simple-bus"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + gic: interrupt-controller@f1010000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x0 0xf1010000 0 0x1000>, + <0x0 0xf1020000 0 0x20000>, + <0x0 0xf1040000 0 0x20000>, + <0x0 0xf1060000 0 0x20000>; + interrupts = ; + clocks = <&cpg CPG_MOD 408>; + clock-names = "clk"; + power-domains = <&sysc 32>; + resets = <&cpg 408>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + rwdt: watchdog@e6020000 { + compatible = "renesas,r8a77995-wdt", + "renesas,rcar-gen3-wdt"; + reg = <0 0xe6020000 0 0x0c>; + clocks = <&cpg CPG_MOD 402>; + power-domains = <&sysc 32>; + resets = <&cpg 402>; + status = "disabled"; + }; + + pmu_a53 { + compatible = "arm,cortex-a53-pmu"; + interrupts = ; + }; + + cpg: clock-controller@e6150000 { + compatible = "renesas,r8a77995-cpg-mssr"; + reg = <0 0xe6150000 0 0x1000>; + clocks = <&extal_clk>; + clock-names = "extal"; + #clock-cells = <2>; + #power-domain-cells = <0>; + #reset-cells = <1>; + }; + + rst: reset-controller@e6160000 { + compatible = "renesas,r8a77995-rst"; + reg = <0 0xe6160000 0 0x0200>; + }; + + pfc: pfc@e6060000 { + compatible = "renesas,pfc-r8a77995"; + reg = <0 0xe6060000 0 0x508>; + }; + + prr: chipid@fff00044 { + compatible = "renesas,prr"; + reg = <0 0xfff00044 0 4>; + }; + + sysc: system-controller@e6180000 { + compatible = "renesas,r8a77995-sysc"; + reg = <0 0xe6180000 0 0x0400>; + #power-domain-cells = <1>; + }; + + scif2: serial@e6e88000 { + compatible = "renesas,scif-r8a77995", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6e88000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 310>, + <&cpg CPG_CORE 16>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + power-domains = <&sysc 32>; + resets = <&cpg 310>; + status = "disabled"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index f903957da504..4786c67b5e65 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -268,10 +268,6 @@ endpoint { remote-endpoint = <&adv7123_in>; }; }; - port@3 { - lvds_connector: endpoint { - }; - }; }; }; diff --git a/arch/arm64/boot/dts/renesas/salvator-xs.dtsi b/arch/arm64/boot/dts/renesas/salvator-xs.dtsi index 81227e3c2c6f..bf4d200fb546 100644 --- a/arch/arm64/boot/dts/renesas/salvator-xs.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-xs.dtsi @@ -18,3 +18,13 @@ / { &extal_clk { clock-frequency = <16640000>; }; + +&i2c4 { + versaclock6: clock-generator@6a { + compatible = "idt,5p49v6901"; + reg = <0x6a>; + #clock-cells = <1>; + clocks = <&x23_clk>; + clock-names = "xin"; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index d1a3f3b7a0ab..1b868df2393f 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -34,6 +34,16 @@ audio_clkout: audio-clkout { clock-frequency = <11289600>; }; + hdmi0-out { + compatible = "hdmi-connector"; + type = "a"; + + port { + hdmi0_con: endpoint { + }; + }; + }; + keyboard { compatible = "gpio-keys"; @@ -120,6 +130,12 @@ x12_clk: x12 { #clock-cells = <0>; clock-frequency = <24576000>; }; + + x23_clk: x23-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; }; &audio_clk_a { @@ -153,6 +169,23 @@ &extalr_clk { clock-frequency = <32768>; }; +&hdmi0 { + status = "okay"; + + ports { + port@1 { + reg = <1>; + rcar_dw_hdmi0_out: endpoint { + remote-endpoint = <&hdmi0_con>; + }; + }; + }; +}; + +&hdmi0_con { + remote-endpoint = <&rcar_dw_hdmi0_out>; +}; + &i2c2 { pinctrl-0 = <&i2c2_pins>; pinctrl-names = "default"; @@ -189,6 +222,24 @@ cs2000: clk-multiplier@4f { }; }; +&i2c4 { + status = "okay"; + + clock-frequency = <400000>; + + versaclock5: clock-generator@6a { + compatible = "idt,5p49v5925"; + reg = <0x6a>; + #clock-cells = <1>; + clocks = <&x23_clk>; + clock-names = "xin"; + }; +}; + +&i2c_dvfs { + status = "okay"; +}; + &ohci1 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile index bcfa53b1e6b7..f1c9b13cea5c 100644 --- a/arch/arm64/boot/dts/rockchip/Makefile +++ b/arch/arm64/boot/dts/rockchip/Makefile @@ -1,4 +1,5 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-orion-r68-meta.dtb @@ -7,6 +8,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-firefly.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts index cf272392cebf..8e6a65431756 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts @@ -50,8 +50,189 @@ / { chosen { stdout-path = "serial2:1500000n8"; }; + + dc_12v: dc-12v { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc_sys: vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; + + vcc_phy: vcc-phy-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_phy"; + regulator-always-on; + regulator-boot-on; + }; +}; + +&gmac2phy { + phy-supply = <&vcc_phy>; + clock_in_out = "output"; + assigned-clocks = <&cru SCLK_MAC2PHY_SRC>; + assigned-clock-rate = <50000000>; + assigned-clocks = <&cru SCLK_MAC2PHY>; + assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>; + status = "okay"; +}; + +&i2c1 { + status = "okay"; + + rk805: rk805@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio2>; + interrupts = <6 IRQ_TYPE_LEVEL_LOW>; + #clock-cells = <1>; + clock-output-names = "xin32k", "rk805-clkout2"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc_sys>; + vcc2-supply = <&vcc_sys>; + vcc3-supply = <&vcc_sys>; + vcc4-supply = <&vcc_sys>; + vcc5-supply = <&vcc_io>; + vcc6-supply = <&vcc_io>; + + regulators { + vdd_logic: DCDC_REG1 { + regulator-name = "vdd_logic"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1450000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + + vdd_arm: DCDC_REG2 { + regulator-name = "vdd_arm"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1450000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <950000>; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_io: DCDC_REG4 { + regulator-name = "vcc_io"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc_18: LDO_REG1 { + regulator-name = "vcc_18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc18_emmc: LDO_REG2 { + regulator-name = "vcc18_emmc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_10: LDO_REG3 { + regulator-name = "vdd_10"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + }; + }; +}; + +&pinctrl { + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&tsadc { + status = "okay"; }; &uart2 { status = "okay"; }; + +&u2phy { + status = "okay"; +}; + +&u2phy_host { + status = "okay"; +}; + +&u2phy_otg { + status = "okay"; +}; + +&usb20_otg { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts new file mode 100644 index 000000000000..d4f80786e7c2 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2017 PINE64 + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; +#include "rk3328.dtsi" + +/ { + model = "Pine64 Rock64"; + compatible = "pine64,rock64", "rockchip,rk3328"; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + gmac_clkin: external-gmac-clock { + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "gmac_clkin"; + #clock-cells = <0>; + }; + + vcc_sd: sdmmc-regulator { + compatible = "regulator-fixed"; + gpio = <&gpio0 RK_PD6 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0m1_gpio>; + regulator-name = "vcc_sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_io>; + }; + + vcc_host_5v: vcc-host-5v-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb30_host_drv>; + regulator-name = "vcc_host_5v"; + regulator-always-on; + vin-supply = <&vcc_sys>; + }; + + vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb20_host_drv>; + regulator-name = "vcc_host1_5v"; + regulator-always-on; + vin-supply = <&vcc_sys>; + }; + + vcc_sys: vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; +}; + +&cpu0 { + cpu-supply = <&vdd_arm>; +}; + +&cpu1 { + cpu-supply = <&vdd_arm>; +}; + +&cpu2 { + cpu-supply = <&vdd_arm>; +}; + +&cpu3 { + cpu-supply = <&vdd_arm>; +}; + +&emmc { + bus-width = <8>; + cap-mmc-highspeed; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; + vmmc-supply = <&vcc_io>; + vqmmc-supply = <&vcc18_emmc>; + status = "okay"; +}; + +&gmac2io { + assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; + assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; + clock_in_out = "input"; + phy-supply = <&vcc_io>; + phy-mode = "rgmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmiim1_pins>; + snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; + tx_delay = <0x26>; + rx_delay = <0x11>; + status = "okay"; +}; + +&i2c1 { + status = "okay"; + + rk805: rk805@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio2>; + interrupts = <6 IRQ_TYPE_LEVEL_LOW>; + #clock-cells = <1>; + clock-output-names = "xin32k", "rk805-clkout2"; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc_sys>; + vcc2-supply = <&vcc_sys>; + vcc3-supply = <&vcc_sys>; + vcc4-supply = <&vcc_sys>; + vcc5-supply = <&vcc_io>; + vcc6-supply = <&vcc_sys>; + + regulators { + vdd_logic: DCDC_REG1 { + regulator-name = "vdd_logic"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1450000>; + regulator-ramp-delay = <12500>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + + vdd_arm: DCDC_REG2 { + regulator-name = "vdd_arm"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1450000>; + regulator-ramp-delay = <12500>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <950000>; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_io: DCDC_REG4 { + regulator-name = "vcc_io"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc_18: LDO_REG1 { + regulator-name = "vdd_18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc18_emmc: LDO_REG2 { + regulator-name = "vcc_18emmc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_10: LDO_REG3 { + regulator-name = "vdd_10"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + }; + }; +}; + +&io_domains { + status = "okay"; + + vccio1-supply = <&vcc_io>; + vccio2-supply = <&vcc18_emmc>; + vccio3-supply = <&vcc_io>; + vccio4-supply = <&vcc_18>; + vccio5-supply = <&vcc_io>; + vccio6-supply = <&vcc_io>; + pmuio-supply = <&vcc_io>; +}; + +&pinctrl { + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + usb2 { + usb20_host_drv: usb20-host-drv { + rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + usb3 { + usb30_host_drv: usb30-host-drv { + rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_clk &sdmmc0_cmd &sdmmc0_dectn &sdmmc0_bus4>; + vmmc-supply = <&vcc_sd>; + status = "okay"; +}; + +&tsadc { + rockchip,hw-tshut-mode = <0>; + rockchip,hw-tshut-polarity = <0>; + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&u2phy { + status = "okay"; + + u2phy_host: host-port { + status = "okay"; + }; + + u2phy_otg: otg-port { + status = "okay"; + }; +}; + +&usb20_otg { + dr_mode = "host"; + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 0be96cee27bd..6d615cb6e64d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -47,6 +47,7 @@ #include #include #include +#include / { compatible = "rockchip,rk3328"; @@ -63,6 +64,8 @@ aliases { i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; + ethernet0 = &gmac2io; + ethernet1 = &gmac2phy; }; cpus { @@ -74,8 +77,11 @@ cpu0: cpu@0 { compatible = "arm,cortex-a53", "arm,armv8"; reg = <0x0 0x0>; clocks = <&cru ARMCLK>; + #cooling-cells = <2>; + dynamic-power-coefficient = <120>; enable-method = "psci"; next-level-cache = <&l2>; + operating-points-v2 = <&cpu0_opp_table>; }; cpu1: cpu@1 { @@ -83,8 +89,10 @@ cpu1: cpu@1 { compatible = "arm,cortex-a53", "arm,armv8"; reg = <0x0 0x1>; clocks = <&cru ARMCLK>; + dynamic-power-coefficient = <120>; enable-method = "psci"; next-level-cache = <&l2>; + operating-points-v2 = <&cpu0_opp_table>; }; cpu2: cpu@2 { @@ -92,8 +100,10 @@ cpu2: cpu@2 { compatible = "arm,cortex-a53", "arm,armv8"; reg = <0x0 0x2>; clocks = <&cru ARMCLK>; + dynamic-power-coefficient = <120>; enable-method = "psci"; next-level-cache = <&l2>; + operating-points-v2 = <&cpu0_opp_table>; }; cpu3: cpu@3 { @@ -101,8 +111,10 @@ cpu3: cpu@3 { compatible = "arm,cortex-a53", "arm,armv8"; reg = <0x0 0x3>; clocks = <&cru ARMCLK>; + dynamic-power-coefficient = <120>; enable-method = "psci"; next-level-cache = <&l2>; + operating-points-v2 = <&cpu0_opp_table>; }; l2: l2-cache0 { @@ -110,6 +122,43 @@ l2: l2-cache0 { }; }; + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp-408000000 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <950000>; + clock-latency-ns = <40000>; + opp-suspend; + }; + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <950000>; + clock-latency-ns = <40000>; + }; + opp-816000000 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <1000000>; + clock-latency-ns = <40000>; + }; + opp-1008000000 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <1100000>; + clock-latency-ns = <40000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1225000>; + clock-latency-ns = <40000>; + }; + opp-1296000000 { + opp-hz = /bits/ 64 <1296000000>; + opp-microvolt = <1300000>; + clock-latency-ns = <40000>; + }; + }; + amba { compatible = "simple-bus"; #address-cells = <2>; @@ -156,12 +205,84 @@ xin24m: xin24m { clock-output-names = "xin24m"; }; + i2s0: i2s@ff000000 { + compatible = "rockchip,rk3328-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff000000 0x0 0x1000>; + interrupts = ; + clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0_8CH>; + clock-names = "i2s_clk", "i2s_hclk"; + dmas = <&dmac 11>, <&dmac 12>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + i2s1: i2s@ff010000 { + compatible = "rockchip,rk3328-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff010000 0x0 0x1000>; + interrupts = ; + clocks = <&cru SCLK_I2S1>, <&cru HCLK_I2S1_8CH>; + clock-names = "i2s_clk", "i2s_hclk"; + dmas = <&dmac 14>, <&dmac 15>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + i2s2: i2s@ff020000 { + compatible = "rockchip,rk3328-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff020000 0x0 0x1000>; + interrupts = ; + clocks = <&cru SCLK_I2S2>, <&cru HCLK_I2S2_2CH>; + clock-names = "i2s_clk", "i2s_hclk"; + dmas = <&dmac 0>, <&dmac 1>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + spdif: spdif@ff030000 { + compatible = "rockchip,rk3328-spdif"; + reg = <0x0 0xff030000 0x0 0x1000>; + interrupts = ; + clocks = <&cru SCLK_SPDIF>, <&cru HCLK_SPDIF_8CH>; + clock-names = "mclk", "hclk"; + dmas = <&dmac 10>; + dma-names = "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&spdifm2_tx>; + status = "disabled"; + }; + + pdm: pdm@ff040000 { + compatible = "rockchip,pdm"; + reg = <0x0 0xff040000 0x0 0x1000>; + clocks = <&cru SCLK_PDM>, <&cru HCLK_PDM>; + clock-names = "pdm_clk", "pdm_hclk"; + dmas = <&dmac 16>; + dma-names = "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pdmm0_clk + &pdmm0_sdi0 + &pdmm0_sdi1 + &pdmm0_sdi2 + &pdmm0_sdi3>; + pinctrl-1 = <&pdmm0_clk_sleep + &pdmm0_sdi0_sleep + &pdmm0_sdi1_sleep + &pdmm0_sdi2_sleep + &pdmm0_sdi3_sleep>; + status = "disabled"; + }; + grf: syscon@ff100000 { compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd"; reg = <0x0 0xff100000 0x0 0x1000>; #address-cells = <1>; #size-cells = <1>; + io_domains: io-domains { + compatible = "rockchip,rk3328-io-voltage-domain"; + status = "disabled"; + }; + power: power-controller { compatible = "rockchip,rk3328-power-controller"; #power-domain-cells = <1>; @@ -308,6 +429,108 @@ wdt: watchdog@ff1a0000 { interrupts = ; }; + pwm0: pwm@ff1b0000 { + compatible = "rockchip,rk3328-pwm"; + reg = <0x0 0xff1b0000 0x0 0x10>; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@ff1b0010 { + compatible = "rockchip,rk3328-pwm"; + reg = <0x0 0xff1b0010 0x0 0x10>; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm2: pwm@ff1b0020 { + compatible = "rockchip,rk3328-pwm"; + reg = <0x0 0xff1b0020 0x0 0x10>; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwm2_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm3: pwm@ff1b0030 { + compatible = "rockchip,rk3328-pwm"; + reg = <0x0 0xff1b0030 0x0 0x10>; + interrupts = ; + clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>; + clock-names = "pwm", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&pwmir_pin>; + #pwm-cells = <3>; + status = "disabled"; + }; + + thermal-zones { + soc_thermal: soc-thermal { + polling-delay-passive = <20>; + polling-delay = <1000>; + sustainable-power = <1000>; + + thermal-sensors = <&tsadc 0>; + + trips { + threshold: trip-point0 { + temperature = <70000>; + hysteresis = <2000>; + type = "passive"; + }; + target: trip-point1 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + soc_crit: soc-crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&target>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + contribution = <4096>; + }; + }; + }; + + }; + + tsadc: tsadc@ff250000 { + compatible = "rockchip,rk3328-tsadc"; + reg = <0x0 0xff250000 0x0 0x100>; + interrupts = ; + assigned-clocks = <&cru SCLK_TSADC>; + assigned-clock-rates = <50000>; + clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; + clock-names = "tsadc", "apb_pclk"; + pinctrl-names = "init", "default", "sleep"; + pinctrl-0 = <&otp_gpio>; + pinctrl-1 = <&otp_out>; + pinctrl-2 = <&otp_gpio>; + resets = <&cru SRST_TSADC>; + reset-names = "tsadc-apb"; + rockchip,grf = <&grf>; + rockchip,hw-tshut-temp = <100000>; + #thermal-sensor-cells = <1>; + status = "disabled"; + }; + saradc: adc@ff280000 { compatible = "rockchip,rk3328-saradc", "rockchip,rk3399-saradc"; reg = <0x0 0xff280000 0x0 0x100>; @@ -320,6 +543,51 @@ saradc: adc@ff280000 { status = "disabled"; }; + h265e_mmu: iommu@ff330200 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff330200 0 0x100>; + interrupts = ; + interrupt-names = "h265e_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + vepu_mmu: iommu@ff340800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff340800 0x0 0x40>; + interrupts = ; + interrupt-names = "vepu_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + vpu_mmu: iommu@ff350800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff350800 0x0 0x40>; + interrupts = ; + interrupt-names = "vpu_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + rkvdec_mmu: iommu@ff360480 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff360480 0x0 0x40>, <0x0 0xff3604c0 0x0 0x40>; + interrupts = ; + interrupt-names = "rkvdec_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + vop_mmu: iommu@ff373f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff373f00 0x0 0x100>; + interrupts = ; + interrupt-names = "vop_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + cru: clock-controller@ff440000 { compatible = "rockchip,rk3328-cru", "rockchip,cru", "syscon"; reg = <0x0 0xff440000 0x0 0x1000>; @@ -372,6 +640,43 @@ cru: clock-controller@ff440000 { <32768>; }; + usb2phy_grf: syscon@ff450000 { + compatible = "rockchip,rk3328-usb2phy-grf", "syscon", + "simple-mfd"; + reg = <0x0 0xff450000 0x0 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + + u2phy: usb2-phy@100 { + compatible = "rockchip,rk3328-usb2phy"; + reg = <0x100 0x10>; + clocks = <&xin24m>; + clock-names = "phyclk"; + clock-output-names = "usb480m_phy"; + #clock-cells = <0>; + assigned-clocks = <&cru USB480M>; + assigned-clock-parents = <&u2phy>; + status = "disabled"; + + u2phy_otg: otg-port { + #phy-cells = <0>; + interrupts = , + , + ; + interrupt-names = "otg-bvalid", "otg-id", + "linestate"; + status = "disabled"; + }; + + u2phy_host: host-port { + #phy-cells = <0>; + interrupts = ; + interrupt-names = "linestate"; + status = "disabled"; + }; + }; + }; + sdmmc: dwmmc@ff500000 { compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc"; reg = <0x0 0xff500000 0x0 0x4000>; @@ -424,6 +729,82 @@ gmac2io: ethernet@ff540000 { status = "disabled"; }; + gmac2phy: ethernet@ff550000 { + compatible = "rockchip,rk3328-gmac"; + reg = <0x0 0xff550000 0x0 0x10000>; + rockchip,grf = <&grf>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&cru SCLK_MAC2PHY_SRC>, <&cru SCLK_MAC2PHY_RXTX>, + <&cru SCLK_MAC2PHY_RXTX>, <&cru SCLK_MAC2PHY_REF>, + <&cru ACLK_MAC2PHY>, <&cru PCLK_MAC2PHY>, + <&cru SCLK_MAC2PHY_OUT>; + clock-names = "stmmaceth", "mac_clk_rx", + "mac_clk_tx", "clk_mac_ref", + "aclk_mac", "pclk_mac", + "clk_macphy"; + resets = <&cru SRST_GMAC2PHY_A>, <&cru SRST_MACPHY>; + reset-names = "stmmaceth", "mac-phy"; + phy-mode = "rmii"; + phy-handle = <&phy>; + status = "disabled"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy: phy@0 { + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; + reg = <0>; + clocks = <&cru SCLK_MAC2PHY_OUT>; + resets = <&cru SRST_MACPHY>; + pinctrl-names = "default"; + pinctrl-0 = <&fephyled_rxm1 &fephyled_linkm1>; + phy-is-integrated; + }; + }; + }; + + usb20_otg: usb@ff580000 { + compatible = "rockchip,rk3328-usb", "rockchip,rk3066-usb", + "snps,dwc2"; + reg = <0x0 0xff580000 0x0 0x40000>; + interrupts = ; + clocks = <&cru HCLK_OTG>; + clock-names = "otg"; + dr_mode = "otg"; + g-np-tx-fifo-size = <16>; + g-rx-fifo-size = <280>; + g-tx-fifo-size = <256 128 128 64 32 16>; + g-use-dma; + phys = <&u2phy_otg>; + phy-names = "usb2-phy"; + status = "disabled"; + }; + + usb_host0_ehci: usb@ff5c0000 { + compatible = "generic-ehci"; + reg = <0x0 0xff5c0000 0x0 0x10000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&u2phy>; + clock-names = "usbhost", "utmi"; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host0_ohci: usb@ff5d0000 { + compatible = "generic-ohci"; + reg = <0x0 0xff5d0000 0x0 0x10000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&u2phy>; + clock-names = "usbhost", "utmi"; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + gic: interrupt-controller@ff811000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; @@ -610,6 +991,62 @@ hdmii2c_xfer: hdmii2c-xfer { }; }; + pdm-0 { + pdmm0_clk: pdmm0-clk { + rockchip,pins = <2 RK_PC2 2 &pcfg_pull_none>; + }; + + pdmm0_fsync: pdmm0-fsync { + rockchip,pins = <2 RK_PC7 2 &pcfg_pull_none>; + }; + + pdmm0_sdi0: pdmm0-sdi0 { + rockchip,pins = <2 RK_PC3 2 &pcfg_pull_none>; + }; + + pdmm0_sdi1: pdmm0-sdi1 { + rockchip,pins = <2 RK_PC4 2 &pcfg_pull_none>; + }; + + pdmm0_sdi2: pdmm0-sdi2 { + rockchip,pins = <2 RK_PC5 2 &pcfg_pull_none>; + }; + + pdmm0_sdi3: pdmm0-sdi3 { + rockchip,pins = <2 RK_PC6 2 &pcfg_pull_none>; + }; + + pdmm0_clk_sleep: pdmm0-clk-sleep { + rockchip,pins = + <2 RK_PC2 RK_FUNC_GPIO &pcfg_input_high>; + }; + + pdmm0_sdi0_sleep: pdmm0-sdi0-sleep { + rockchip,pins = + <2 RK_PC3 RK_FUNC_GPIO &pcfg_input_high>; + }; + + pdmm0_sdi1_sleep: pdmm0-sdi1-sleep { + rockchip,pins = + <2 RK_PC4 RK_FUNC_GPIO &pcfg_input_high>; + }; + + pdmm0_sdi2_sleep: pdmm0-sdi2-sleep { + rockchip,pins = + <2 RK_PC5 RK_FUNC_GPIO &pcfg_input_high>; + }; + + pdmm0_sdi3_sleep: pdmm0-sdi3-sleep { + rockchip,pins = + <2 RK_PC6 RK_FUNC_GPIO &pcfg_input_high>; + }; + + pdmm0_fsync_sleep: pdmm0-fsync-sleep { + rockchip,pins = + <2 RK_PC7 RK_FUNC_GPIO &pcfg_input_high>; + }; + }; + tsadc { otp_gpio: otp-gpio { rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi index 4772917c5f7e..a37220a9387c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi @@ -156,7 +156,6 @@ &emmc { disable-wp; mmc-pwrseq = <&emmc_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts index e631d424f08e..5e4d3a7015f5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts @@ -117,7 +117,6 @@ &emmc { clock-frequency = <150000000>; disable-wp; non-removable; - num-slots = <1>; vmmc-supply = <&vcc_io>; vqmmc-supply = <&vcc18_flash>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts index fac116acc12f..d3f6c8e0d206 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts @@ -203,7 +203,6 @@ &emmc { mmc-hs200-1_2v; mmc-hs200-1_8v; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; status = "okay"; @@ -347,7 +346,6 @@ &sdmmc { max-frequency = <50000000>; cap-sd-highspeed; card-detect-delay = <200>; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc_sd>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts index ff48edd8e348..13a9e22f5d2d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts @@ -86,12 +86,10 @@ &emmc { cap-mmc-highspeed; clock-frequency = <150000000>; disable-wp; - keep-power-in-suspend; mmc-hs200-1_8v; no-sdio; no-sd; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_bus8>; vmmc-supply = <&vcc_io>; @@ -281,7 +279,6 @@ &sdmmc { card-detect-delay = <200>; no-emmc; no-sdio; - num-slots = <1>; sd-uhs-sdr12; sd-uhs-sdr25; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts index 7134181f1dc2..b3510d56517a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts @@ -189,7 +189,6 @@ &emmc { disable-wp; mmc-pwrseq = <&emmc_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; status = "okay"; @@ -254,7 +253,6 @@ &sdio0 { keep-power-in-suspend; mmc-pwrseq = <&sdio_pwrseq>; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>; vmmc-supply = <&vcc_io>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 6d5dc0587e59..19fbaa5e7bdd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -700,6 +700,19 @@ timer@ff810000 { interrupts = ; }; + spdif: spdif@ff880000 { + compatible = "rockchip,rk3368-spdif"; + reg = <0x0 0xff880000 0x0 0x1000>; + interrupts = ; + clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>; + clock-names = "mclk", "hclk"; + dmas = <&dmac_bus 3>; + dma-names = "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&spdif_tx>; + status = "disabled"; + }; + i2s_2ch: i2s-2ch@ff890000 { compatible = "rockchip,rk3368-i2s", "rockchip,rk3066-i2s"; reg = <0x0 0xff890000 0x0 0x1000>; @@ -724,6 +737,55 @@ i2s_8ch: i2s-8ch@ff898000 { status = "disabled"; }; + iep_mmu: iommu@ff900800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff900800 0x0 0x100>; + interrupts = ; + interrupt-names = "iep_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + isp_mmu: iommu@ff914000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff914000 0x0 0x100>, + <0x0 0xff915000 0x0 0x100>; + interrupts = ; + interrupt-names = "isp_mmu"; + #iommu-cells = <0>; + rockchip,disable-mmu-reset; + status = "disabled"; + }; + + vop_mmu: iommu@ff930300 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff930300 0x0 0x100>; + interrupts = ; + interrupt-names = "vop_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + hevc_mmu: iommu@ff9a0440 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff9a0440 0x0 0x40>, + <0x0 0xff9a0480 0x0 0x40>; + interrupts = ; + interrupt-names = "hevc_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + vpu_mmu: iommu@ff9a0800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff9a0800 0x0 0x100>; + interrupts = , + ; + interrupt-names = "vepu_mmu", "vdpu_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + gic: interrupt-controller@ffb71000 { compatible = "arm,gic-400"; interrupt-controller; @@ -1024,6 +1086,12 @@ sdmmc_bus4: sdmmc-bus4 { }; }; + spdif { + spdif_tx: spdif-tx { + rockchip,pins = <2 RK_PC7 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + spi0 { spi0_clk: spi0-clk { rockchip,pins = <1 29 RK_FUNC_2 &pcfg_pull_up>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts index 42033bcc614c..56533c344ef2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts @@ -199,7 +199,7 @@ &pcie0 { ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; - pinctrl-0 = <&pcie_clkreqn>; + pinctrl-0 = <&pcie_clkreqn_cpm>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index ba1d9810ad1e..7fd4bfcaa38e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -43,6 +43,7 @@ /dts-v1/; #include #include "rk3399.dtsi" +#include "rk3399-opp.dtsi" / { model = "Firefly-RK3399 Board"; @@ -550,7 +551,7 @@ &pcie0 { ep-gpios = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; - pinctrl-0 = <&pcie_clkreqn>; + pinctrl-0 = <&pcie_clkreqn_cpm>; status = "okay"; }; @@ -630,9 +631,20 @@ &saradc { status = "okay"; }; +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; + status = "okay"; +}; + &sdhci { bus-width = <8>; - keep-power-in-suspend; mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; non-removable; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 7bd31066399b..a3d3cea7dc4f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts @@ -264,6 +264,50 @@ touchscreen@4b { }; }; +&ppvar_bigcpu_pwm { + regulator-min-microvolt = <798674>; + regulator-max-microvolt = <1302172>; +}; + +&ppvar_bigcpu { + regulator-min-microvolt = <798674>; + regulator-max-microvolt = <1302172>; + ctrl-voltage-range = <798674 1302172>; +}; + +&ppvar_litcpu_pwm { + regulator-min-microvolt = <799065>; + regulator-max-microvolt = <1303738>; +}; + +&ppvar_litcpu { + regulator-min-microvolt = <799065>; + regulator-max-microvolt = <1303738>; + ctrl-voltage-range = <799065 1303738>; +}; + +&ppvar_gpu_pwm { + regulator-min-microvolt = <785782>; + regulator-max-microvolt = <1217729>; +}; + +&ppvar_gpu { + regulator-min-microvolt = <785782>; + regulator-max-microvolt = <1217729>; + ctrl-voltage-range = <785782 1217729>; +}; + +&ppvar_centerlogic_pwm { + regulator-min-microvolt = <800069>; + regulator-max-microvolt = <1049692>; +}; + +&ppvar_centerlogic { + regulator-min-microvolt = <800069>; + regulator-max-microvolt = <1049692>; + ctrl-voltage-range = <800069 1049692>; +}; + &saradc { status = "okay"; vref-supply = <&pp1800_ap_io>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index eb5059344023..199a5118b20d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -164,14 +164,9 @@ pp5000: pp5000 { vin-supply = <&ppvar_sys>; }; - ppvar_bigcpu: ppvar-bigcpu { + ppvar_bigcpu_pwm: ppvar-bigcpu-pwm { compatible = "pwm-regulator"; - regulator-name = "ppvar_bigcpu"; - /* - * OVP circuit requires special handling which is not yet - * represented. Keep disabled for now. - */ - status = "disabled"; + regulator-name = "ppvar_bigcpu_pwm"; pwms = <&pwm1 0 3337 0>; pwm-supply = <&ppvar_sys>; @@ -181,18 +176,28 @@ ppvar_bigcpu: ppvar-bigcpu { /* EC turns on w/ ap_core_en; always on for AP */ regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <798674>; - regulator-max-microvolt = <1302172>; + regulator-min-microvolt = <800107>; + regulator-max-microvolt = <1302232>; }; - ppvar_litcpu: ppvar-litcpu { + ppvar_bigcpu: ppvar-bigcpu { + compatible = "vctrl-regulator"; + regulator-name = "ppvar_bigcpu"; + + regulator-min-microvolt = <800107>; + regulator-max-microvolt = <1302232>; + + ctrl-supply = <&ppvar_bigcpu_pwm>; + ctrl-voltage-range = <800107 1302232>; + + regulator-settling-time-up-us = <322>; + min-slew-down-rate = <225>; + ovp-threshold-percent = <16>; + }; + + ppvar_litcpu_pwm: ppvar-litcpu-pwm { compatible = "pwm-regulator"; - regulator-name = "ppvar_litcpu"; - /* - * OVP circuit requires special handling which is not yet - * represented. Keep disabled for now. - */ - status = "disabled"; + regulator-name = "ppvar_litcpu_pwm"; pwms = <&pwm2 0 3337 0>; pwm-supply = <&ppvar_sys>; @@ -202,18 +207,28 @@ ppvar_litcpu: ppvar-litcpu { /* EC turns on w/ ap_core_en; always on for AP */ regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <799065>; - regulator-max-microvolt = <1303738>; + regulator-min-microvolt = <797743>; + regulator-max-microvolt = <1307837>; }; - ppvar_gpu: ppvar-gpu { + ppvar_litcpu: ppvar-litcpu { + compatible = "vctrl-regulator"; + regulator-name = "ppvar_litcpu"; + + regulator-min-microvolt = <797743>; + regulator-max-microvolt = <1307837>; + + ctrl-supply = <&ppvar_litcpu_pwm>; + ctrl-voltage-range = <797743 1307837>; + + regulator-settling-time-up-us = <384>; + min-slew-down-rate = <225>; + ovp-threshold-percent = <16>; + }; + + ppvar_gpu_pwm: ppvar-gpu-pwm { compatible = "pwm-regulator"; - regulator-name = "ppvar_gpu"; - /* - * OVP circuit requires special handling which is not yet - * represented. Keep disabled for now. - */ - status = "disabled"; + regulator-name = "ppvar_gpu_pwm"; pwms = <&pwm0 0 3337 0>; pwm-supply = <&ppvar_sys>; @@ -223,18 +238,28 @@ ppvar_gpu: ppvar-gpu { /* EC turns on w/ ap_core_en; always on for AP */ regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <785782>; - regulator-max-microvolt = <1217729>; + regulator-min-microvolt = <786384>; + regulator-max-microvolt = <1217747>; }; - ppvar_centerlogic: ppvar-centerlogic { + ppvar_gpu: ppvar-gpu { + compatible = "vctrl-regulator"; + regulator-name = "ppvar_gpu"; + + regulator-min-microvolt = <786384>; + regulator-max-microvolt = <1217747>; + + ctrl-supply = <&ppvar_gpu_pwm>; + ctrl-voltage-range = <786384 1217747>; + + regulator-settling-time-up-us = <390>; + min-slew-down-rate = <225>; + ovp-threshold-percent = <16>; + }; + + ppvar_centerlogic_pwm: ppvar-centerlogic-pwm { compatible = "pwm-regulator"; - regulator-name = "ppvar_centerlogic"; - /* - * OVP circuit requires special handling which is not yet - * represented. Keep disabled for now. - */ - status = "disabled"; + regulator-name = "ppvar_centerlogic_pwm"; pwms = <&pwm3 0 3337 0>; pwm-supply = <&ppvar_sys>; @@ -244,8 +269,23 @@ ppvar_centerlogic: ppvar-centerlogic { /* EC turns on w/ ppvar_centerlogic_en; always on for AP */ regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <800069>; - regulator-max-microvolt = <1049692>; + regulator-min-microvolt = <799434>; + regulator-max-microvolt = <1049925>; + }; + + ppvar_centerlogic: ppvar-centerlogic { + compatible = "vctrl-regulator"; + regulator-name = "ppvar_centerlogic"; + + regulator-min-microvolt = <799434>; + regulator-max-microvolt = <1049925>; + + ctrl-supply = <&ppvar_centerlogic_pwm>; + ctrl-voltage-range = <799434 1049925>; + + regulator-settling-time-up-us = <378>; + min-slew-down-rate = <225>; + ovp-threshold-percent = <16>; }; /* Schematics call this PPVAR even though it's fixed */ @@ -555,6 +595,11 @@ &emmc_phy { status = "okay"; }; +&gpu { + mali-supply = <&ppvar_gpu>; + status = "okay"; +}; + ap_i2c_mic: &i2c1 { status = "okay"; @@ -567,12 +612,7 @@ ap_i2c_mic: &i2c1 { headsetcodec: rt5514@57 { compatible = "realtek,rt5514"; reg = <0x57>; - interrupt-parent = <&gpio1>; - interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&mic_int>; - realtek,dmic-init-delay = <20>; - wakeup-source; + realtek,dmic-init-delay-ms = <20>; }; }; @@ -781,9 +821,13 @@ &spi2 { wacky_spi_audio: spi2@0 { compatible = "realtek,rt5514"; reg = <0>; - + interrupt-parent = <&gpio1>; + interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&mic_int>; /* May run faster once verified. */ spi-max-frequency = <10000000>; + wakeup-source; }; }; @@ -1031,7 +1075,7 @@ sdmmc_cmd: sdmmc-cmd { * hurt and dw_mmc will ignore it. We make sure to disable * the pull though so we don't burn needless power. */ - sdmmc_cd: sdmcc-cd { + sdmmc_cd: sdmmc-cd { rockchip,pins = <0 7 RK_FUNC_1 &pcfg_pull_none>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi index be7fe635f7c1..d8a120f945c8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi @@ -118,6 +118,35 @@ opp08 { opp-microvolt = <1250000>; }; }; + + gpu_opp_table: opp-table2 { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <800000>; + }; + opp01 { + opp-hz = /bits/ 64 <297000000>; + opp-microvolt = <800000>; + }; + opp02 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <825000>; + }; + opp03 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <850000>; + }; + opp04 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <925000>; + }; + opp05 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1075000>; + }; + }; }; &cpu_l0 { @@ -143,3 +172,7 @@ &cpu_b0 { &cpu_b1 { operating-points-v2 = <&cluster1_opp>; }; + +&gpu { + operating-points-v2 = <&gpu_opp_table>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi index c83460db130a..81617bcf2522 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi @@ -110,6 +110,35 @@ opp07 { opp-microvolt = <1200000>; }; }; + + gpu_opp_table: opp-table2 { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <800000>; + }; + opp01 { + opp-hz = /bits/ 64 <297000000>; + opp-microvolt = <800000>; + }; + opp02 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <825000>; + }; + opp03 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <875000>; + }; + opp04 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <925000>; + }; + opp05 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1100000>; + }; + }; }; &cpu_l0 { @@ -135,3 +164,7 @@ &cpu_b0 { &cpu_b1 { operating-points-v2 = <&cluster1_opp>; }; + +&gpu { + operating-points-v2 = <&gpu_opp_table>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts new file mode 100644 index 000000000000..9a7486058455 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2017 Theobroma Systems Design und Consulting GmbH + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; +#include "rk3399-puma.dtsi" + +/ { + model = "Theobroma Systems RK3399-Q7 SoM"; + compatible = "tsd,rk3399-puma-haikou", "rockchip,rk3399"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + pinctrl-0 = <&led_pin_module>, <&led_sd_haikou>; + + sd-card-led { + label = "sd_card_led"; + gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "mmc0"; + }; + }; + + dc_12v: dc-12v { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc3v3_baseboard: vcc3v3-baseboard { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_baseboard"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&dc_12v>; + }; + + vcc5v0_otg: vcc5v0-otg-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&otg_vbus_drv>; + regulator-name = "vcc5v0_otg"; + regulator-always-on; + }; +}; + +&i2c1 { + status = "okay"; + clock-frequency = <400000>; +}; + +&i2c2 { + status = "okay"; + clock-frequency = <400000>; +}; + +&i2c3 { + i2c-scl-rising-time-ns = <450>; + i2c-scl-falling-time-ns = <15>; + status = "okay"; +}; + +&i2c4 { + status = "okay"; + clock-frequency = <400000>; +}; + +&i2c6 { + status = "okay"; + clock-frequency = <400000>; +}; + +&i2s0 { + status = "okay"; + rockchip,playback-channels = <8>; + rockchip,capture-channels = <8>; + #sound-dai-cells = <0>; + status = "okay"; +}; + +&pcie_phy { + status = "okay"; +}; + +&pcie0 { + ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; + num-lanes = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreqn_cpm>; + status = "okay"; +}; + +&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&haikou_pin_hog>; + + hog { + haikou_pin_hog: haikou-pin-hog { + rockchip,pins = + /* LID_BTN */ + , + /* BATLOW# */ + , + /* SLP_BTN# */ + , + /* BIOS_DISABLE# */ + ; + }; + }; + + leds { + led_sd_haikou: led-sd-gpio { + rockchip,pins = + ; + }; + }; + + usb2 { + otg_vbus_drv: otg-vbus-drv { + rockchip,pins = + ; + }; + }; +}; + +&pwm0 { + status = "okay"; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; + vmmc-supply = <&vcc3v3_baseboard>; + status = "okay"; +}; + +&spi5 { + status = "okay"; +}; + +&u2phy0 { + status = "okay"; +}; + +&usbdrd3_0 { + status = "okay"; +}; + +&usbdrd_dwc3_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&u2phy0_host { + phy-supply = <&vcc5v0_otg>; + status = "okay"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>; + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi new file mode 100644 index 000000000000..53ff3d191a1d --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2017 Theobroma Systems Design und Consulting GmbH + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include "rk3399.dtsi" +#include "rk3399-opp.dtsi" + +/ { + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&led_pin_module>; + + module-led { + label = "module_led"; + gpios = <&gpio2 RK_PD1 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + panic-indicator; + }; + }; + + /* + * Overwrite the opp-table for CPUB as this board uses a different + * regulator (FAN53555) that only allows 10mV steps and therefore + * can't reach the operation point target voltages from rk3399-opp.dtsi + */ + /delete-node/ opp-table1; + cluster1_opp: opp-table1 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <800000>; + clock-latency-ns = <40000>; + }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <800000>; + }; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <830000>; + opp-suspend; + }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <880000>; + }; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <950000>; + }; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <1030000>; + }; + opp06 { + opp-hz = /bits/ 64 <1608000000>; + opp-microvolt = <1100000>; + }; + opp07 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1200000>; + }; + opp08 { + opp-hz = /bits/ 64 <1992000000>; + opp-microvolt = <1230000>; + turbo-mode; + }; + }; + + clkin_gmac: external-gmac-clock { + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "clkin_gmac"; + #clock-cells = <0>; + }; + + vcc1v2_phy: vcc1v2-phy { + compatible = "regulator-fixed"; + regulator-name = "vcc1v2_phy"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_sys: vcc3v3-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_host: vcc5v0-host-regulator { + compatible = "regulator-fixed"; + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + enable-active-low; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_host_en>; + regulator-name = "vcc5v0_host"; + regulator-always-on; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_sys: vcc5v0-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + vdd_log: vdd-log { + compatible = "pwm-regulator"; + pwms = <&pwm2 0 25000 0>; + regulator-name = "vdd_log"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1400000>; + regulator-always-on; + regulator-boot-on; + status = "okay"; + }; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_l>; +}; + +&emmc_phy { + status = "okay"; +}; + +&gmac { + assigned-clocks = <&cru SCLK_RMII_SRC>; + assigned-clock-parents = <&clkin_gmac>; + clock_in_out = "input"; + phy-supply = <&vcc1v2_phy>; + phy-mode = "rgmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; + tx_delay = <0x10>; + rx_delay = <0x10>; + status = "okay"; +}; + +&i2c0 { + status = "okay"; + i2c-scl-rising-time-ns = <168>; + i2c-scl-falling-time-ns = <4>; + clock-frequency = <400000>; + + rk808: pmic@1b { + compatible = "rockchip,rk808"; + reg = <0x1b>; + interrupt-parent = <&gpio1>; + interrupts = <22 IRQ_TYPE_LEVEL_LOW>; + #clock-cells = <1>; + clock-output-names = "xin32k", "rk808-clkout2"; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc5v0_sys>; + vcc2-supply = <&vcc5v0_sys>; + vcc3-supply = <&vcc5v0_sys>; + vcc4-supply = <&vcc5v0_sys>; + vcc6-supply = <&vcc5v0_sys>; + vcc7-supply = <&vcc5v0_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc5v0_sys>; + vcc10-supply = <&vcc5v0_sys>; + vcc11-supply = <&vcc5v0_sys>; + vcc12-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc1v8_pmu>; + + regulators { + vdd_center: DCDC_REG1 { + regulator-name = "vdd_center"; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_l: DCDC_REG2 { + regulator-name = "vdd_cpu_l"; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG4 { + regulator-name = "vcc_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_ldo1: LDO_REG1 { + regulator-name = "vcc_ldo1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc1v8_hdmi: LDO_REG2 { + regulator-name = "vcc1v8_hdmi"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc1v8_pmu: LDO_REG3 { + regulator-name = "vcc1v8_pmu"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_sd: LDO_REG4 { + regulator-name = "vcc_sd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc_ldo5: LDO_REG5 { + regulator-name = "vcc_ldo5"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ldo6: LDO_REG6 { + regulator-name = "vcc_ldo6"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc0v9_hdmi: LDO_REG7 { + regulator-name = "vcc0v9_hdmi"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_efuse: LDO_REG8 { + regulator-name = "vcc_efuse"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_s3: SWITCH_REG1 { + regulator-name = "vcc3v3_s3"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_s0: SWITCH_REG2 { + regulator-name = "vcc3v3_s0"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; + + vdd_gpu: regulator@60 { + compatible = "fcs,fan53555"; + reg = <0x60>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_gpu"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1230000>; + regulator-ramp-delay = <1000>; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + }; +}; + +&i2c7 { + status = "okay"; + clock-frequency = <400000>; + + fan: fan@18 { + compatible = "ti,amc6821"; + reg = <0x18>; + cooling-min-state = <0>; + cooling-max-state = <9>; + #cooling-cells = <2>; + }; + + rtc_twi: rtc@6f { + compatible = "isil,isl1208"; + reg = <0x6f>; + }; +}; + +&i2c8 { + status = "okay"; + clock-frequency = <400000>; + + vdd_cpu_b: regulator@60 { + compatible = "fcs,fan53555"; + reg = <0x60>; + vin-supply = <&vcc5v0_sys>; + regulator-name = "vdd_cpu_b"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1230000>; + regulator-ramp-delay = <1000>; + fcs,suspend-voltage-selector = <1>; + regulator-always-on; + regulator-boot-on; + }; +}; + +&io_domains { + status = "okay"; + bt656-supply = <&vcc_1v8>; + audio-supply = <&vcc_1v8>; + sdmmc-supply = <&vcc_sd>; + gpio1830-supply = <&vcc_1v8>; +}; + +&pmu_io_domains { + status = "okay"; + pmu1830-supply = <&vcc_1v8>; +}; + +&pwm2 { + status = "okay"; +}; + +&pinctrl { + i2c8 { + i2c8_xfer_a: i2c8-xfer { + rockchip,pins = + , + ; + }; + }; + + leds { + led_pin_module: led-module-gpio { + rockchip,pins = + ; + }; + }; + + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = + ; + }; + }; + + usb2 { + vcc5v0_host_en: vcc5v0-host-en { + rockchip,pins = + ; + }; + }; +}; + +&sdhci { + bus-width = <8>; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + non-removable; + status = "okay"; +}; + +&sdmmc { + vqmmc = <&vcc_sd>; +}; + +&spi1 { + status = "okay"; + + norflash: flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + }; +}; + +&u2phy1 { + status = "okay"; + + u2phy1_otg: otg-port { + status = "okay"; + }; + + u2phy1_host: host-port { + phy-supply = <&vcc5v0_host>; + status = "okay"; + }; +}; + +&usbdrd3_1 { + status = "okay"; +}; + +&usbdrd_dwc3_1 { + status = "okay"; + dr_mode = "host"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts new file mode 100644 index 000000000000..b7bd88fb3ae3 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; +#include +#include "rk3399-sapphire.dtsi" + +/ { + model = "Excavator-RK3399 Board"; + compatible = "rockchip,rk3399-sapphire-excavator", "rockchip,rk3399"; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 1>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + poll-interval = <100>; + + button-up { + label = "Volume Up"; + linux,code = ; + press-threshold-microvolt = <100000>; + }; + + button-down { + label = "Volume Down"; + linux,code = ; + press-threshold-microvolt = <300000>; + }; + + back { + label = "Back"; + linux,code = ; + press-threshold-microvolt = <985000>; + }; + + menu { + label = "Menu"; + linux,code = ; + press-threshold-microvolt = <1314000>; + }; + }; + + edp_panel: edp-panel { + compatible ="lg,lp079qx1-sp0v", "simple-panel"; + backlight = <&backlight>; + enable-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_panel_reset>; + power-supply = <&vcc3v3_s0>; + + ports { + panel_in_edp: endpoint { + remote-endpoint = <&edp_out_panel>; + }; + }; + }; + + keys: gpio-keys { + compatible = "gpio-keys"; + autorepeat; + + power { + debounce-interval = <100>; + gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>; + label = "GPIO Power"; + linux,code = ; + linux,input-type = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&pwr_btn>; + wakeup-source; + }; + }; + + rt5651-sound { + compatible = "simple-audio-card"; + simple-audio-card,name = "realtek,rt5651-codec"; + simple-audio-card,format = "i2s"; + simple-audio-card,mclk-fs = <256>; + simple-audio-card,widgets = + "Microphone", "Mic Jack", + "Headphone", "Headphone Jack"; + simple-audio-card,routing = + "Mic Jack", "MICBIAS1", + "IN1P", "Mic Jack", + "Headphone Jack", "HPOL", + "Headphone Jack", "HPOR"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec { + sound-dai = <&rt5651>; + }; + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&rk808 1>; + clock-names = "ext_clock"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_enable_h>; + + /* + * On the module itself this is one of these (depending + * on the actual card populated): + * - SDIO_RESET_L_WL_REG_ON + * - PDN (power down when low) + */ + reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; + }; +}; + +&backlight { + enable-gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&edp { + status = "okay"; + + ports { + edp_out: port@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + edp_out_panel: endpoint@0 { + reg = <0>; + remote-endpoint = <&panel_in_edp>; + }; + }; + }; +}; + +&i2c1 { + i2c-scl-rising-time-ns = <300>; + i2c-scl-falling-time-ns = <15>; + status = "okay"; + + rt5651: rt5651@1a { + compatible = "rockchip,rt5651"; + reg = <0x1a>; + clocks = <&cru SCLK_I2S_8CH_OUT>; + clock-names = "mclk"; + hp-det-gpio = <&gpio4 RK_PC4 GPIO_ACTIVE_LOW>; + spk-con-gpio = <&gpio0 RK_PB3 GPIO_ACTIVE_HIGH>; + #sound-dai-cells = <0>; + }; +}; + +&i2c4 { + i2c-scl-rising-time-ns = <600>; + i2c-scl-falling-time-ns = <20>; + status = "okay"; + + accelerometer@68 { + compatible = "invensense,mpu6500"; + reg = <0x68>; + interrupt-parent = <&gpio1>; + interrupts = ; + }; +}; + +&i2s0 { + rockchip,playback-channels = <8>; + rockchip,capture-channels = <8>; + #sound-dai-cells = <0>; + status = "okay"; +}; + +&i2s2 { + #sound-dai-cells = <0>; + status = "okay"; +}; + +&pinctrl { + buttons { + pwr_btn: pwr-btn { + rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + sdio-pwrseq { + wifi_enable_h: wifi-enable-h { + rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + lcd-panel { + lcd_panel_reset: lcd-panel-reset { + rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&spdif { + i2c-scl-rising-time-ns = <450>; + i2c-scl-falling-time-ns = <15>; + #sound-dai-cells = <0>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi new file mode 100644 index 000000000000..6c30bb02210d --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -0,0 +1,644 @@ +/* + * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd. + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "dt-bindings/pwm/pwm.h" +#include "rk3399.dtsi" +#include "rk3399-opp.dtsi" + +/ { + compatible = "rockchip,rk3399-sapphire", "rockchip,rk3399"; + + backlight: backlight { + compatible = "pwm-backlight"; + brightness-levels = < + 0 1 2 3 4 5 6 7 + 8 9 10 11 12 13 14 15 + 16 17 18 19 20 21 22 23 + 24 25 26 27 28 29 30 31 + 32 33 34 35 36 37 38 39 + 40 41 42 43 44 45 46 47 + 48 49 50 51 52 53 54 55 + 56 57 58 59 60 61 62 63 + 64 65 66 67 68 69 70 71 + 72 73 74 75 76 77 78 79 + 80 81 82 83 84 85 86 87 + 88 89 90 91 92 93 94 95 + 96 97 98 99 100 101 102 103 + 104 105 106 107 108 109 110 111 + 112 113 114 115 116 117 118 119 + 120 121 122 123 124 125 126 127 + 128 129 130 131 132 133 134 135 + 136 137 138 139 140 141 142 143 + 144 145 146 147 148 149 150 151 + 152 153 154 155 156 157 158 159 + 160 161 162 163 164 165 166 167 + 168 169 170 171 172 173 174 175 + 176 177 178 179 180 181 182 183 + 184 185 186 187 188 189 190 191 + 192 193 194 195 196 197 198 199 + 200 201 202 203 204 205 206 207 + 208 209 210 211 212 213 214 215 + 216 217 218 219 220 221 222 223 + 224 225 226 227 228 229 230 231 + 232 233 234 235 236 237 238 239 + 240 241 242 243 244 245 246 247 + 248 249 250 251 252 253 254 255>; + default-brightness-level = <200>; + pwms = <&pwm0 0 25000 0>; + }; + + clkin_gmac: external-gmac-clock { + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "clkin_gmac"; + #clock-cells = <0>; + }; + + dc_12v: dc-12v { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + /* switched by pmic_sleep */ + vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 { + compatible = "regulator-fixed"; + regulator-name = "vcc1v8_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_1v8>; + }; + + vcc3v3_sys: vcc3v3-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_sys>; + }; + + vcc_sys: vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; + + vcc5v0_host: vcc5v0-host-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio1 RK_PD1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_host_en>; + regulator-name = "vcc5v0_host"; + regulator-always-on; + vin-supply = <&vcc_sys>; + }; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_b>; +}; + +&emmc_phy { + status = "okay"; +}; + +&gmac { + assigned-clocks = <&cru SCLK_RMII_SRC>; + assigned-clock-parents = <&clkin_gmac>; + clock_in_out = "input"; + phy-supply = <&vcc_lan>; + phy-mode = "rgmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; + tx_delay = <0x28>; + rx_delay = <0x11>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + +&hdmi { + ddc-i2c-bus = <&i2c3>; + status = "okay"; +}; + +&i2c0 { + clock-frequency = <400000>; + i2c-scl-rising-time-ns = <168>; + i2c-scl-falling-time-ns = <4>; + status = "okay"; + + rk808: pmic@1b { + compatible = "rockchip,rk808"; + reg = <0x1b>; + interrupt-parent = <&gpio1>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + #clock-cells = <1>; + clock-output-names = "xin32k", "rk808-clkout2"; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l &pmic_dvs2>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc_sys>; + vcc2-supply = <&vcc_sys>; + vcc3-supply = <&vcc_sys>; + vcc4-supply = <&vcc_sys>; + vcc6-supply = <&vcc_sys>; + vcc7-supply = <&vcc_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc_sys>; + vcc10-supply = <&vcc_sys>; + vcc11-supply = <&vcc_sys>; + vcc12-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc1v8_pmu>; + + regulators { + vdd_center: DCDC_REG1 { + regulator-name = "vdd_center"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_l: DCDC_REG2 { + regulator-name = "vdd_cpu_l"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG4 { + regulator-name = "vcc_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc1v8_dvp: LDO_REG1 { + regulator-name = "vcc1v8_dvp"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v0_tp: LDO_REG2 { + regulator-name = "vcc3v0_tp"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc1v8_pmu: LDO_REG3 { + regulator-name = "vcc1v8_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_sdio: LDO_REG4 { + regulator-name = "vcc_sdio"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcca3v0_codec: LDO_REG5 { + regulator-name = "vcca3v0_codec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v5: LDO_REG6 { + regulator-name = "vcc_1v5"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1500000>; + }; + }; + + vcca1v8_codec: LDO_REG7 { + regulator-name = "vcca1v8_codec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v0: LDO_REG8 { + regulator-name = "vcc_3v0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcc3v3_s3: vcc_lan: SWITCH_REG1 { + regulator-name = "vcc3v3_s3"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_s0: SWITCH_REG2 { + regulator-name = "vcc3v3_s0"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; + + vdd_cpu_b: regulator@40 { + compatible = "silergy,syr827"; + reg = <0x40>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu_b"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-ramp-delay = <1000>; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: regulator@41 { + compatible = "silergy,syr828"; + reg = <0x41>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_gpu"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-ramp-delay = <1000>; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_log: vdd-log { + compatible = "pwm-regulator"; + pwms = <&pwm2 0 25000 1>; + regulator-name = "vdd_log"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1400000>; + vin-supply = <&vcc_sys>; + }; +}; + +&i2c3 { + i2c-scl-rising-time-ns = <450>; + i2c-scl-falling-time-ns = <15>; + status = "okay"; +}; + +&io_domains { + status = "okay"; + + bt656-supply = <&vcc_3v0>; + audio-supply = <&vcca1v8_codec>; + sdmmc-supply = <&vcc_sdio>; + gpio1830-supply = <&vcc_3v0>; +}; + +&pcie_phy { + status = "okay"; +}; + +&pcie0 { + assigned-clocks = <&cru SCLK_PCIEPHY_REF>; + assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; + assigned-clock-rates = <100000000>; + ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; + num-lanes = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreqn_cpm>; + status = "okay"; +}; + +&pmu_io_domains { + pmu1830-supply = <&vcc_3v0>; + status = "okay"; +}; + +&pinctrl { + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = + <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + pmic_dvs2: pmic-dvs2 { + rockchip,pins = + <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + vsel1_gpio: vsel1-gpio { + rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + vsel2_gpio: vsel2-gpio { + rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>; + }; + }; + + usb2 { + vcc5v0_host_en: vcc5v0-host-en { + rockchip,pins = + <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pwm0 { + status = "okay"; +}; + +&pwm2 { + status = "okay"; +}; + +&saradc { + vref-supply = <&vcca1v8_s3>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + keep-power-in-suspend; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + non-removable; + status = "okay"; +}; + +&sdio0 { + bus-width = <4>; + cap-sd-highspeed; + cap-sdio-irq; + clock-frequency = <50000000>; + disable-wp; + keep-power-in-suspend; + max-frequency = <50000000>; + mmc-pwrseq = <&sdio_pwrseq>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>; + sd-uhs-sdr104; + status = "okay"; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + clock-frequency = <150000000>; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; + vqmmc-supply = <&vcc_sdio>; + status = "okay"; +}; + +&tsadc { + /* tshut mode 0:CRU 1:GPIO */ + rockchip,hw-tshut-mode = <1>; + /* tshut polarity 0:LOW 1:HIGH */ + rockchip,hw-tshut-polarity = <1>; + status = "okay"; +}; + +&u2phy0 { + status = "okay"; + + u2phy0_otg: otg-port { + status = "okay"; + }; + + u2phy0_host: host-port { + phy-supply = <&vcc5v0_host>; + status = "okay"; + }; +}; + +&u2phy1 { + status = "okay"; + + u2phy1_otg: otg-port { + status = "okay"; + }; + + u2phy1_host: host-port { + phy-supply = <&vcc5v0_host>; + status = "okay"; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer &uart0_cts>; + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&usbdrd3_0 { + status = "okay"; +}; + +&usbdrd_dwc3_0 { + status = "okay"; + dr_mode = "otg"; +}; + +&usbdrd3_1 { + status = "okay"; +}; + +&usbdrd_dwc3_1 { + status = "okay"; + dr_mode = "host"; +}; + +&vopb { + status = "okay"; +}; + +&vopb_mmu { + status = "okay"; +}; + +&vopl { + status = "okay"; +}; + +&vopl_mmu { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 69c56f7316c4..ab7629c5b856 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -110,6 +110,7 @@ cpu_l0: cpu@0 { enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ clocks = <&cru ARMCLKL>; + dynamic-power-coefficient = <100>; }; cpu_l1: cpu@1 { @@ -118,6 +119,7 @@ cpu_l1: cpu@1 { reg = <0x0 0x1>; enable-method = "psci"; clocks = <&cru ARMCLKL>; + dynamic-power-coefficient = <100>; }; cpu_l2: cpu@2 { @@ -126,6 +128,7 @@ cpu_l2: cpu@2 { reg = <0x0 0x2>; enable-method = "psci"; clocks = <&cru ARMCLKL>; + dynamic-power-coefficient = <100>; }; cpu_l3: cpu@3 { @@ -134,6 +137,7 @@ cpu_l3: cpu@3 { reg = <0x0 0x3>; enable-method = "psci"; clocks = <&cru ARMCLKL>; + dynamic-power-coefficient = <100>; }; cpu_b0: cpu@100 { @@ -143,6 +147,7 @@ cpu_b0: cpu@100 { enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ clocks = <&cru ARMCLKB>; + dynamic-power-coefficient = <436>; }; cpu_b1: cpu@101 { @@ -151,9 +156,15 @@ cpu_b1: cpu@101 { reg = <0x0 0x101>; enable-method = "psci"; clocks = <&cru ARMCLKB>; + dynamic-power-coefficient = <436>; }; }; + display-subsystem { + compatible = "rockchip,display-subsystem"; + ports = <&vopl_out>, <&vopb_out>; + }; + pmu_a53 { compatible = "arm,cortex-a53-pmu"; interrupts = ; @@ -238,8 +249,10 @@ pcie0: pcie@f8000000 { linux,pci-domain = <0>; max-link-speed = <1>; msi-map = <0x0 &its 0x0 0x1000>; - phys = <&pcie_phy>; - phy-names = "pcie-phy"; + phys = <&pcie_phy 0>, <&pcie_phy 1>, + <&pcie_phy 2>, <&pcie_phy 3>; + phy-names = "pcie-phy-0", "pcie-phy-1", + "pcie-phy-2", "pcie-phy-3"; ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000 0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>; resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, @@ -287,6 +300,7 @@ sdio0: dwmmc@fe310000 { <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; resets = <&cru SRST_SDIO0>; reset-names = "reset"; status = "disabled"; @@ -400,6 +414,7 @@ usbdrd_dwc3_0: dwc3 { snps,dis-u2-freeclk-exists-quirk; snps,dis_u2_susphy_quirk; snps,dis-del-phy-power-chg-quirk; + snps,dis-tx-ipgap-linecheck-quirk; status = "disabled"; }; }; @@ -427,6 +442,7 @@ usbdrd_dwc3_1: dwc3 { snps,dis-u2-freeclk-exists-quirk; snps,dis_u2_susphy_quirk; snps,dis-del-phy-power-chg-quirk; + snps,dis-tx-ipgap-linecheck-quirk; status = "disabled"; }; }; @@ -676,6 +692,7 @@ spi5: spi@ff200000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -948,6 +965,10 @@ pd_gpu@RK3399_PD_GPU { }; /* These power domains are grouped by VD_LOGIC */ + pd_edp@RK3399_PD_EDP { + reg = ; + clocks = <&cru PCLK_EDP_CTRL>; + }; pd_emmc@RK3399_PD_EMMC { reg = ; clocks = <&cru ACLK_EMMC>; @@ -965,6 +986,11 @@ pd_sd@RK3399_PD_SD { <&cru SCLK_SDMMC>; pm_qos = <&qos_sd>; }; + pd_sdioaudio@RK3399_PD_SDIOAUDIO { + reg = ; + clocks = <&cru HCLK_SDIO>; + pm_qos = <&qos_sdioaudio>; + }; pd_vio@RK3399_PD_VIO { reg = ; #address-cells = <1>; @@ -1151,6 +1177,33 @@ pwm3: pwm@ff420030 { status = "disabled"; }; + vpu_mmu: iommu@ff650800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff650800 0x0 0x40>; + interrupts = ; + interrupt-names = "vpu_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + vdec_mmu: iommu@ff660480 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>; + interrupts = ; + interrupt-names = "vdec_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + + iep_mmu: iommu@ff670800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff670800 0x0 0x40>; + interrupts = ; + interrupt-names = "iep_mmu"; + #iommu-cells = <0>; + status = "disabled"; + }; + efuse0: efuse@ff690000 { compatible = "rockchip,rk3399-efuse"; reg = <0x0 0xff690000 0x0 0x80>; @@ -1295,7 +1348,7 @@ pcie_phy: pcie-phy { compatible = "rockchip,rk3399-pcie-phy"; clocks = <&cru SCLK_PCIEPHY_REF>; clock-names = "refclk"; - #phy-cells = <0>; + #phy-cells = <1>; resets = <&cru SRST_PCIEPHY>; reset-names = "phy"; status = "disabled"; @@ -1385,6 +1438,7 @@ spdif: spdif@ff870000 { clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>; pinctrl-names = "default"; pinctrl-0 = <&spdif_bus>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; status = "disabled"; }; @@ -1399,6 +1453,7 @@ i2s0: i2s@ff880000 { clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>; pinctrl-names = "default"; pinctrl-0 = <&i2s0_8ch_bus>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; status = "disabled"; }; @@ -1412,6 +1467,7 @@ i2s1: i2s@ff890000 { clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>; pinctrl-names = "default"; pinctrl-0 = <&i2s1_2ch_bus>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; status = "disabled"; }; @@ -1423,6 +1479,224 @@ i2s2: i2s@ff8a0000 { dma-names = "tx", "rx"; clock-names = "i2s_clk", "i2s_hclk"; clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + status = "disabled"; + }; + + vopl: vop@ff8f0000 { + compatible = "rockchip,rk3399-vop-lit"; + reg = <0x0 0xff8f0000 0x0 0x3efc>; + interrupts = ; + assigned-clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; + assigned-clock-rates = <400000000>, <100000000>; + clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + iommus = <&vopl_mmu>; + power-domains = <&power RK3399_PD_VOPL>; + resets = <&cru SRST_A_VOP1>, <&cru SRST_H_VOP1>, <&cru SRST_D_VOP1>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vopl_out: port { + #address-cells = <1>; + #size-cells = <0>; + + vopl_out_mipi: endpoint@0 { + reg = <0>; + remote-endpoint = <&mipi_in_vopl>; + }; + + vopl_out_edp: endpoint@1 { + reg = <1>; + remote-endpoint = <&edp_in_vopl>; + }; + + vopl_out_hdmi: endpoint@2 { + reg = <2>; + remote-endpoint = <&hdmi_in_vopl>; + }; + }; + }; + + vopl_mmu: iommu@ff8f3f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff8f3f00 0x0 0x100>; + interrupts = ; + interrupt-names = "vopl_mmu"; + clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; + clock-names = "aclk", "hclk"; + power-domains = <&power RK3399_PD_VOPL>; + #iommu-cells = <0>; + status = "disabled"; + }; + + vopb: vop@ff900000 { + compatible = "rockchip,rk3399-vop-big"; + reg = <0x0 0xff900000 0x0 0x3efc>; + interrupts = ; + assigned-clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; + assigned-clock-rates = <400000000>, <100000000>; + clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + iommus = <&vopb_mmu>; + power-domains = <&power RK3399_PD_VOPB>; + resets = <&cru SRST_A_VOP0>, <&cru SRST_H_VOP0>, <&cru SRST_D_VOP0>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vopb_out: port { + #address-cells = <1>; + #size-cells = <0>; + + vopb_out_edp: endpoint@0 { + reg = <0>; + remote-endpoint = <&edp_in_vopb>; + }; + + vopb_out_mipi: endpoint@1 { + reg = <1>; + remote-endpoint = <&mipi_in_vopb>; + }; + + vopb_out_hdmi: endpoint@2 { + reg = <2>; + remote-endpoint = <&hdmi_in_vopb>; + }; + }; + }; + + vopb_mmu: iommu@ff903f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff903f00 0x0 0x100>; + interrupts = ; + interrupt-names = "vopb_mmu"; + clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; + clock-names = "aclk", "hclk"; + power-domains = <&power RK3399_PD_VOPB>; + #iommu-cells = <0>; + status = "disabled"; + }; + + isp0_mmu: iommu@ff914000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>; + interrupts = ; + interrupt-names = "isp0_mmu"; + #iommu-cells = <0>; + rockchip,disable-mmu-reset; + status = "disabled"; + }; + + isp1_mmu: iommu@ff924000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>; + interrupts = ; + interrupt-names = "isp1_mmu"; + #iommu-cells = <0>; + rockchip,disable-mmu-reset; + status = "disabled"; + }; + + hdmi: hdmi@ff940000 { + compatible = "rockchip,rk3399-dw-hdmi"; + reg = <0x0 0xff940000 0x0 0x20000>; + interrupts = ; + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_SFR>, <&cru PLL_VPLL>, <&cru PCLK_VIO_GRF>; + clock-names = "iahb", "isfr", "vpll", "grf"; + power-domains = <&power RK3399_PD_HDCP>; + reg-io-width = <4>; + rockchip,grf = <&grf>; + status = "disabled"; + + ports { + hdmi_in: port { + #address-cells = <1>; + #size-cells = <0>; + + hdmi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_hdmi>; + }; + hdmi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_hdmi>; + }; + }; + }; + }; + + mipi_dsi: mipi@ff960000 { + compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; + reg = <0x0 0xff960000 0x0 0x8000>; + interrupts = ; + clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>, + <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>; + clock-names = "ref", "pclk", "phy_cfg", "grf"; + power-domains = <&power RK3399_PD_VIO>; + rockchip,grf = <&grf>; + status = "disabled"; + + ports { + mipi_in: port { + #address-cells = <1>; + #size-cells = <0>; + + mipi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_mipi>; + }; + mipi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_mipi>; + }; + }; + }; + }; + + edp: edp@ff970000 { + compatible = "rockchip,rk3399-edp"; + reg = <0x0 0xff970000 0x0 0x8000>; + interrupts = ; + clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>; + clock-names = "dp", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&edp_hpd>; + power-domains = <&power RK3399_PD_EDP>; + resets = <&cru SRST_P_EDP_CTRL>; + reset-names = "dp"; + rockchip,grf = <&grf>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + edp_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + edp_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_edp>; + }; + + edp_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_edp>; + }; + }; + }; + }; + + gpu: gpu@ff9a0000 { + compatible = "rockchip,rk3399-mali", "arm,mali-t860"; + reg = <0x0 0xff9a0000 0x0 0x10000>; + interrupts = , + , + ; + interrupt-names = "gpu", "job", "mmu"; + clocks = <&cru ACLK_GPU>; + power-domains = <&power RK3399_PD_GPU>; status = "disabled"; }; @@ -1786,7 +2060,7 @@ sdmmc_cmd: sdmmc-cmd { <4 RK_PB5 RK_FUNC_1 &pcfg_pull_up>; }; - sdmmc_cd: sdmcc-cd { + sdmmc_cd: sdmmc-cd { rockchip,pins = <0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>; }; @@ -2090,16 +2364,6 @@ hdmi_cec: hdmi-cec { }; pcie { - pcie_clkreqn: pci-clkreqn { - rockchip,pins = - <2 26 RK_FUNC_2 &pcfg_pull_none>; - }; - - pcie_clkreqnb: pci-clkreqnb { - rockchip,pins = - <4 24 RK_FUNC_1 &pcfg_pull_none>; - }; - pcie_clkreqn_cpm: pci-clkreqn-cpm { rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile index 4a13a3a97101..4bc091b365fd 100644 --- a/arch/arm64/boot/dts/socionext/Makefile +++ b/arch/arm64/boot/dts/socionext/Makefile @@ -2,7 +2,8 @@ dtb-$(CONFIG_ARCH_UNIPHIER) += \ uniphier-ld11-global.dtb \ uniphier-ld11-ref.dtb \ uniphier-ld20-global.dtb \ - uniphier-ld20-ref.dtb + uniphier-ld20-ref.dtb \ + uniphier-pxs3-ref.dtb always := $(dtb-y) clean-files := *.dtb diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts index 115357018ef7..2452b2243f42 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts @@ -9,7 +9,7 @@ */ /dts-v1/; -/include/ "uniphier-ld11.dtsi" +#include "uniphier-ld11.dtsi" / { model = "UniPhier LD11 Global Board (REF_LD11_GP)"; @@ -68,3 +68,7 @@ &usb1 { &usb2 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts index cc8ebe34c27c..ffb473ad2e0f 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-ld11.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-ld11.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier LD11 Reference Board"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi index bdce5b89baec..ee4aff53a5f5 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi @@ -150,6 +150,17 @@ serial3: serial@54006b00 { clocks = <&peri_clk 3>; }; + adamv@57920000 { + compatible = "socionext,uniphier-ld11-adamv", + "simple-mfd", "syscon"; + reg = <0x57920000 0x1000>; + + adamv_rst: reset { + compatible = "socionext,uniphier-ld11-adamv-reset"; + #reset-cells = <1>; + }; + }; + i2c0: i2c@58780000 { compatible = "socionext,uniphier-fi2c"; status = "disabled"; @@ -344,6 +355,13 @@ pinctrl: pinctrl { }; }; + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-ld11-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + gic: interrupt-controller@5fe00000 { compatible = "arm,gic-v3"; reg = <0x5fe00000 0x10000>, /* GICD */ @@ -367,8 +385,23 @@ sys_rst: reset { compatible = "socionext,uniphier-ld11-reset"; #reset-cells = <1>; }; + + watchdog { + compatible = "socionext,uniphier-wdt"; + }; + }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5b"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand>; + clocks = <&sys_clk 2>; }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts index 9f620d4101b5..fc2bc9d75d35 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts @@ -9,7 +9,7 @@ */ /dts-v1/; -/include/ "uniphier-ld20.dtsi" +#include "uniphier-ld20.dtsi" / { model = "UniPhier LD20 Global Board (REF_LD20_GP)"; @@ -50,3 +50,7 @@ &serial1 { &i2c0 { status = "okay"; }; + +&nand { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts index 494166aee24c..1ca0c8620dc5 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts @@ -8,9 +8,9 @@ */ /dts-v1/; -/include/ "uniphier-ld20.dtsi" -/include/ "uniphier-ref-daughter.dtsi" -/include/ "uniphier-support-card.dtsi" +#include "uniphier-ld20.dtsi" +#include "uniphier-ref-daughter.dtsi" +#include "uniphier-support-card.dtsi" / { model = "UniPhier LD20 Reference Board"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index de1e75362817..a29c279b6e8e 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi @@ -219,6 +219,17 @@ serial3: serial@54006b00 { clocks = <&peri_clk 3>; }; + adamv@57920000 { + compatible = "socionext,uniphier-ld20-adamv", + "simple-mfd", "syscon"; + reg = <0x57920000 0x1000>; + + adamv_rst: reset { + compatible = "socionext,uniphier-ld20-adamv-reset"; + #reset-cells = <1>; + }; + }; + i2c0: i2c@58780000 { compatible = "socionext,uniphier-fi2c"; status = "disabled"; @@ -309,7 +320,7 @@ smpctrl@59801000 { sdctrl@59810000 { compatible = "socionext,uniphier-ld20-sdctrl", "simple-mfd", "syscon"; - reg = <0x59810000 0x800>; + reg = <0x59810000 0x400>; sd_clk: clock { compatible = "socionext,uniphier-ld20-sd-clock"; @@ -365,6 +376,13 @@ pinctrl: pinctrl { }; }; + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-ld20-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + gic: interrupt-controller@5fe00000 { compatible = "arm,gic-v3"; reg = <0x5fe00000 0x10000>, /* GICD */ @@ -388,8 +406,23 @@ sys_rst: reset { compatible = "socionext,uniphier-ld20-reset"; #reset-cells = <1>; }; + + watchdog { + compatible = "socionext,uniphier-wdt"; + }; + }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5b"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand>; + clocks = <&sys_clk 2>; }; }; }; -/include/ "uniphier-pinctrl.dtsi" +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi deleted file mode 120000 index f42fb6f38bd3..000000000000 --- a/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/uniphier-pinctrl.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi new file mode 100644 index 000000000000..9caabbb8bae3 --- /dev/null +++ b/arch/arm64/boot/dts/socionext/uniphier-pinctrl.dtsi @@ -0,0 +1 @@ +#include diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts new file mode 100644 index 000000000000..d65f746a3f9d --- /dev/null +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts @@ -0,0 +1,62 @@ +/* + * Device Tree Source for UniPhier PXs3 Reference Board + * + * Copyright (C) 2017 Socionext Inc. + * Author: Masahiro Yamada + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +/dts-v1/; +#include "uniphier-pxs3.dtsi" +#include "uniphier-support-card.dtsi" + +/ { + model = "UniPhier PXs3 Reference Board"; + compatible = "socionext,uniphier-pxs3-ref", "socionext,uniphier-pxs3"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + aliases { + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c6 = &i2c6; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0 0x80000000 0 0xa0000000>; + }; +}; + +ðsc { + interrupts = <0 52 4>; +}; + +&serial0 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + +&i2c1 { + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi new file mode 100644 index 000000000000..384729fa740f --- /dev/null +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -0,0 +1,367 @@ +/* + * Device Tree Source for UniPhier PXs3 SoC + * + * Copyright (C) 2017 Socionext Inc. + * Author: Masahiro Yamada + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +/memreserve/ 0x80000000 0x02000000; + +/ { + compatible = "socionext,uniphier-pxs3"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&gic>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + core2 { + cpu = <&cpu2>; + }; + core3 { + cpu = <&cpu3>; + }; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0x000>; + clocks = <&sys_clk 33>; + enable-method = "psci"; + operating-points-v2 = <&cluster0_opp>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0x001>; + clocks = <&sys_clk 33>; + enable-method = "psci"; + operating-points-v2 = <&cluster0_opp>; + }; + + cpu2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0x002>; + clocks = <&sys_clk 33>; + enable-method = "psci"; + operating-points-v2 = <&cluster0_opp>; + }; + + cpu3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0 0x003>; + clocks = <&sys_clk 33>; + enable-method = "psci"; + operating-points-v2 = <&cluster0_opp>; + }; + }; + + cluster0_opp: opp_table { + compatible = "operating-points-v2"; + opp-shared; + + opp-250000000 { + opp-hz = /bits/ 64 <250000000>; + clock-latency-ns = <300>; + }; + opp-325000000 { + opp-hz = /bits/ 64 <325000000>; + clock-latency-ns = <300>; + }; + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + clock-latency-ns = <300>; + }; + opp-650000000 { + opp-hz = /bits/ 64 <650000000>; + clock-latency-ns = <300>; + }; + opp-666667000 { + opp-hz = /bits/ 64 <666667000>; + clock-latency-ns = <300>; + }; + opp-866667000 { + opp-hz = /bits/ 64 <866667000>; + clock-latency-ns = <300>; + }; + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + clock-latency-ns = <300>; + }; + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + clock-latency-ns = <300>; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + clocks { + refclk: ref { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 13 4>, + <1 14 4>, + <1 11 4>, + <1 10 4>; + }; + + soc@0 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0 0xffffffff>; + + serial0: serial@54006800 { + compatible = "socionext,uniphier-uart"; + status = "disabled"; + reg = <0x54006800 0x40>; + interrupts = <0 33 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart0>; + clocks = <&peri_clk 0>; + }; + + serial1: serial@54006900 { + compatible = "socionext,uniphier-uart"; + status = "disabled"; + reg = <0x54006900 0x40>; + interrupts = <0 35 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + clocks = <&peri_clk 1>; + }; + + serial2: serial@54006a00 { + compatible = "socionext,uniphier-uart"; + status = "disabled"; + reg = <0x54006a00 0x40>; + interrupts = <0 37 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + clocks = <&peri_clk 2>; + }; + + serial3: serial@54006b00 { + compatible = "socionext,uniphier-uart"; + status = "disabled"; + reg = <0x54006b00 0x40>; + interrupts = <0 177 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3>; + clocks = <&peri_clk 3>; + }; + + i2c0: i2c@58780000 { + compatible = "socionext,uniphier-fi2c"; + status = "disabled"; + reg = <0x58780000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0 41 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c0>; + clocks = <&peri_clk 4>; + clock-frequency = <100000>; + }; + + i2c1: i2c@58781000 { + compatible = "socionext,uniphier-fi2c"; + status = "disabled"; + reg = <0x58781000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0 42 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + clocks = <&peri_clk 5>; + clock-frequency = <100000>; + }; + + i2c2: i2c@58782000 { + compatible = "socionext,uniphier-fi2c"; + status = "disabled"; + reg = <0x58782000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0 43 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + clocks = <&peri_clk 6>; + clock-frequency = <100000>; + }; + + i2c3: i2c@58783000 { + compatible = "socionext,uniphier-fi2c"; + status = "disabled"; + reg = <0x58783000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0 44 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + clocks = <&peri_clk 7>; + clock-frequency = <100000>; + }; + + /* chip-internal connection for HDMI */ + i2c6: i2c@58786000 { + compatible = "socionext,uniphier-fi2c"; + reg = <0x58786000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0 26 4>; + clocks = <&peri_clk 10>; + clock-frequency = <400000>; + }; + + system_bus: system-bus@58c00000 { + compatible = "socionext,uniphier-system-bus"; + status = "disabled"; + reg = <0x58c00000 0x400>; + #address-cells = <2>; + #size-cells = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_system_bus>; + }; + + smpctrl@59801000 { + compatible = "socionext,uniphier-smpctrl"; + reg = <0x59801000 0x400>; + }; + + sdctrl@59810000 { + compatible = "socionext,uniphier-pxs3-sdctrl", + "simple-mfd", "syscon"; + reg = <0x59810000 0x400>; + + sd_clk: clock { + compatible = "socionext,uniphier-pxs3-sd-clock"; + #clock-cells = <1>; + }; + + sd_rst: reset { + compatible = "socionext,uniphier-pxs3-sd-reset"; + #reset-cells = <1>; + }; + }; + + perictrl@59820000 { + compatible = "socionext,uniphier-pxs3-perictrl", + "simple-mfd", "syscon"; + reg = <0x59820000 0x200>; + + peri_clk: clock { + compatible = "socionext,uniphier-pxs3-peri-clock"; + #clock-cells = <1>; + }; + + peri_rst: reset { + compatible = "socionext,uniphier-pxs3-peri-reset"; + #reset-cells = <1>; + }; + }; + + emmc: sdhc@5a000000 { + compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc"; + reg = <0x5a000000 0x400>; + interrupts = <0 78 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_emmc>; + clocks = <&sys_clk 4>; + bus-width = <8>; + mmc-ddr-1_8v; + mmc-hs200-1_8v; + cdns,phy-input-delay-legacy = <4>; + cdns,phy-input-delay-mmc-highspeed = <2>; + cdns,phy-input-delay-mmc-ddr = <3>; + cdns,phy-dll-delay-sdclk = <21>; + cdns,phy-dll-delay-sdclk-hsmmc = <21>; + }; + + soc-glue@5f800000 { + compatible = "socionext,uniphier-pxs3-soc-glue", + "simple-mfd", "syscon"; + reg = <0x5f800000 0x2000>; + + pinctrl: pinctrl { + compatible = "socionext,uniphier-pxs3-pinctrl"; + }; + }; + + aidet: aidet@5fc20000 { + compatible = "socionext,uniphier-pxs3-aidet"; + reg = <0x5fc20000 0x200>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gic: interrupt-controller@5fe00000 { + compatible = "arm,gic-v3"; + reg = <0x5fe00000 0x10000>, /* GICD */ + <0x5fe80000 0x80000>; /* GICR */ + interrupt-controller; + #interrupt-cells = <3>; + interrupts = <1 9 4>; + }; + + sysctrl@61840000 { + compatible = "socionext,uniphier-pxs3-sysctrl", + "simple-mfd", "syscon"; + reg = <0x61840000 0x10000>; + + sys_clk: clock { + compatible = "socionext,uniphier-pxs3-clock"; + #clock-cells = <1>; + }; + + sys_rst: reset { + compatible = "socionext,uniphier-pxs3-reset"; + #reset-cells = <1>; + }; + + watchdog { + compatible = "socionext,uniphier-wdt"; + }; + }; + + nand: nand@68000000 { + compatible = "socionext,uniphier-denali-nand-v5b"; + status = "disabled"; + reg-names = "nand_data", "denali_reg"; + reg = <0x68000000 0x20>, <0x68100000 0x1000>; + interrupts = <0 65 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_nand>; + clocks = <&sys_clk 2>; + }; + }; +}; + +#include "uniphier-pinctrl.dtsi" diff --git a/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi deleted file mode 120000 index 4685a8d89cba..000000000000 --- a/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/uniphier-ref-daughter.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi new file mode 100644 index 000000000000..e66d999d9f5d --- /dev/null +++ b/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi @@ -0,0 +1 @@ +#include diff --git a/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi b/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi deleted file mode 120000 index 1246db9be2a1..000000000000 --- a/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/uniphier-support-card.dtsi \ No newline at end of file diff --git a/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi b/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi new file mode 100644 index 000000000000..28c5b4ed1d95 --- /dev/null +++ b/arch/arm64/boot/dts/socionext/uniphier-support-card.dtsi @@ -0,0 +1 @@ +#include diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-ep108-clk.dtsi index cdc6a437dcc7..b87b8316f4ac 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108-clk.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108-clk.dtsi @@ -11,7 +11,7 @@ * the License, or (at your option) any later version. */ -&amba { +/ { misc_clk: misc_clk { compatible = "fixed-clock"; #clock-cells = <0>; @@ -29,12 +29,60 @@ sata_clk: sata_clk { #clock-cells = <0>; clock-frequency = <75000000>; }; + + clk100: clk100 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + clk600: clk600 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; }; &can0 { clocks = <&misc_clk &misc_clk>; }; +&can1 { + clocks = <&misc_clk &misc_clk>; +}; + +&fpd_dma_chan1 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan2 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan3 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan4 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan5 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan6 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan7 { + clocks = <&clk600>, <&clk100>; +}; + +&fpd_dma_chan8 { + clocks = <&clk600>, <&clk100>; +}; + &gem0 { clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts index ef1b9e573af0..bf552674a834 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts @@ -13,13 +13,15 @@ /dts-v1/; -/include/ "zynqmp.dtsi" -/include/ "zynqmp-ep108-clk.dtsi" +#include "zynqmp.dtsi" +#include "zynqmp-ep108-clk.dtsi" / { model = "ZynqMP EP108"; aliases { + mmc0 = &sdhci0; + mmc1 = &sdhci1; serial0 = &uart0; }; @@ -37,6 +39,10 @@ &can0 { status = "okay"; }; +&can1 { + status = "okay"; +}; + &gem0 { status = "okay"; phy-handle = <&phy0>; @@ -55,7 +61,7 @@ &i2c0 { status = "okay"; clock-frequency = <400000>; eeprom@54 { - compatible = "at,24c64"; + compatible = "atmel,24c64"; reg = <0x54>; }; }; @@ -64,7 +70,7 @@ &i2c1 { status = "okay"; clock-frequency = <400000>; eeprom@55 { - compatible = "at,24c64"; + compatible = "atmel,24c64"; reg = <0x55>; }; }; @@ -92,7 +98,7 @@ spi0_flash0: spi0_flash0@0 { spi-max-frequency = <50000000>; reg = <0>; - spi0_flash0@00000000 { + spi0_flash0@0 { label = "spi0_flash0"; reg = <0x0 0x100000>; }; @@ -109,7 +115,7 @@ spi1_flash0: spi1_flash0@0 { spi-max-frequency = <50000000>; reg = <0>; - spi1_flash0@00000000 { + spi1_flash0@0 { label = "spi1_flash0"; reg = <0x0 0x100000>; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 54dc28351c8c..7665fbddff28 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -20,33 +20,84 @@ cpus { #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { compatible = "arm,cortex-a53", "arm,armv8"; device_type = "cpu"; enable-method = "psci"; + operating-points-v2 = <&cpu_opp_table>; reg = <0x0>; + cpu-idle-states = <&CPU_SLEEP_0>; }; - cpu@1 { + cpu1: cpu@1 { compatible = "arm,cortex-a53", "arm,armv8"; device_type = "cpu"; enable-method = "psci"; reg = <0x1>; + operating-points-v2 = <&cpu_opp_table>; + cpu-idle-states = <&CPU_SLEEP_0>; }; - cpu@2 { + cpu2: cpu@2 { compatible = "arm,cortex-a53", "arm,armv8"; device_type = "cpu"; enable-method = "psci"; reg = <0x2>; + operating-points-v2 = <&cpu_opp_table>; + cpu-idle-states = <&CPU_SLEEP_0>; }; - cpu@3 { + cpu3: cpu@3 { compatible = "arm,cortex-a53", "arm,armv8"; device_type = "cpu"; enable-method = "psci"; reg = <0x3>; + operating-points-v2 = <&cpu_opp_table>; + cpu-idle-states = <&CPU_SLEEP_0>; }; + + idle-states { + entry-method = "arm,psci"; + + CPU_SLEEP_0: cpu-sleep-0 { + compatible = "arm,idle-state"; + arm,psci-suspend-param = <0x40000000>; + local-timer-stop; + entry-latency-us = <300>; + exit-latency-us = <600>; + min-residency-us = <10000>; + }; + }; + }; + + cpu_opp_table: cpu_opp_table { + compatible = "operating-points-v2"; + opp-shared; + opp00 { + opp-hz = /bits/ 64 <1199999988>; + opp-microvolt = <1000000>; + clock-latency-ns = <500000>; + }; + opp01 { + opp-hz = /bits/ 64 <599999994>; + opp-microvolt = <1000000>; + clock-latency-ns = <500000>; + }; + opp02 { + opp-hz = /bits/ 64 <399999996>; + opp-microvolt = <1000000>; + clock-latency-ns = <500000>; + }; + opp03 { + opp-hz = /bits/ 64 <299999997>; + opp-microvolt = <1000000>; + clock-latency-ns = <500000>; + }; + }; + + dcc: dcc { + compatible = "arm,dcc"; + status = "disabled"; }; pmu { @@ -119,6 +170,190 @@ can1: can@ff070000 { rx-fifo-depth = <0x40>; }; + cci: cci@fd6e0000 { + compatible = "arm,cci-400"; + reg = <0x0 0xfd6e0000 0x0 0x9000>; + ranges = <0x0 0x0 0xfd6e0000 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r1"; + reg = <0x9000 0x5000>; + interrupt-parent = <&gic>; + interrupts = <0 123 4>, + <0 123 4>, + <0 123 4>, + <0 123 4>, + <0 123 4>; + }; + }; + + /* GDMA */ + fpd_dma_chan1: dma@fd500000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd500000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 124 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan2: dma@fd510000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd510000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 125 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan3: dma@fd520000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd520000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 126 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan4: dma@fd530000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd530000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 127 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan5: dma@fd540000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd540000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 128 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan6: dma@fd550000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd550000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 129 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan7: dma@fd560000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd560000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 130 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + fpd_dma_chan8: dma@fd570000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xfd570000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 131 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + }; + + /* LPDDMA default allows only secured access. inorder to enable + * These dma channels, Users should ensure that these dma + * Channels are allowed for non secure access. + */ + lpd_dma_chan1: dma@ffa80000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffa80000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 77 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan2: dma@ffa90000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffa90000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 78 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan3: dma@ffaa0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffaa0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 79 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan4: dma@ffab0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffab0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 80 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan5: dma@ffac0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffac0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 81 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan6: dma@ffad0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffad0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 82 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan7: dma@ffae0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffae0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 83 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + + lpd_dma_chan8: dma@ffaf0000 { + status = "disabled"; + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xffaf0000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 84 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <64>; + }; + gem0: ethernet@ff0b0000 { compatible = "cdns,gem"; status = "disabled"; @@ -215,12 +450,9 @@ pcie: pcie@fd0e0000 { <0x0 0xfd480000 0x0 0x1000>, <0x80 0x00000000 0x0 0x1000000>; reg-names = "breg", "pcireg", "cfg"; - ranges = <0x02000000 0x00000000 0xe0000000 0x00000000 - 0xe0000000 0x00000000 0x10000000 - /* non-prefetchable memory */ - 0x43000000 0x00000006 0x00000000 0x00000006 - 0x00000000 0x00000002 0x00000000>; - /* prefetchable memory */ + ranges = <0x02000000 0x00000000 0xe0000000 0x00000000 0xe0000000 0x00000000 0x10000000 /* non-prefetchable memory */ + 0x43000000 0x00000006 0x00000000 0x00000006 0x00000000 0x00000002 0x00000000>;/* prefetchable memory */ + bus-range = <0x00 0xff>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0x0 0x0 0x0 0x1 &pcie_intc 0x1>, <0x0 0x0 0x0 0x2 &pcie_intc 0x2>, @@ -233,6 +465,16 @@ pcie_intc: legacy-interrupt-controller { }; }; + rtc: rtc@ffa60000 { + compatible = "xlnx,zynqmp-rtc"; + status = "disabled"; + reg = <0x0 0xffa60000 0x0 0x100>; + interrupt-parent = <&gic>; + interrupts = <0 26 4>, <0 27 4>; + interrupt-names = "alarm", "sec"; + calibration = <0x8000>; + }; + sata: ahci@fd0c0000 { compatible = "ceva,ahci-1v84"; status = "disabled"; @@ -262,13 +504,14 @@ sdhci1: sdhci@ff170000 { smmu: smmu@fd800000 { compatible = "arm,mmu-500"; reg = <0x0 0xfd800000 0x0 0x20000>; + status = "disabled"; #global-interrupts = <1>; interrupt-parent = <&gic>; - interrupts = <0 157 4>, - <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>, - <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>, - <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>, - <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>; + interrupts = <0 155 4>, + <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>, + <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>, + <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>, + <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>; }; spi0: spi@ff040000 { @@ -330,7 +573,7 @@ ttc3: timer@ff140000 { }; uart0: serial@ff000000 { - compatible = "cdns,uart-r1p8"; + compatible = "cdns,uart-r1p12", "xlnx,xuartps"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 21 4>; @@ -339,7 +582,7 @@ uart0: serial@ff000000 { }; uart1: serial@ff010000 { - compatible = "cdns,uart-r1p8"; + compatible = "cdns,uart-r1p12", "xlnx,xuartps"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 22 4>; diff --git a/arch/arm64/boot/dts/zte/Makefile b/arch/arm64/boot/dts/zte/Makefile index 667806620f59..d86c4def6bc9 100644 --- a/arch/arm64/boot/dts/zte/Makefile +++ b/arch/arm64/boot/dts/zte/Makefile @@ -1,4 +1,5 @@ dtb-$(CONFIG_ARCH_ZX) += zx296718-evb.dtb +dtb-$(CONFIG_ARCH_ZX) += zx296718-pcbox.dtb always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/zte/zx296718-evb.dts b/arch/arm64/boot/dts/zte/zx296718-evb.dts index bb900d2bbcfb..cb2519ecd724 100644 --- a/arch/arm64/boot/dts/zte/zx296718-evb.dts +++ b/arch/arm64/boot/dts/zte/zx296718-evb.dts @@ -57,16 +57,28 @@ memory@40000000 { reg = <0x40000000 0x40000000>; }; - sound0 { - compatible = "simple-audio-card"; - simple-audio-card,name = "zx_snd_spdif0"; + sound-spdif0 { + compatible = "audio-graph-card"; + dais = <&spdif0_port>; + }; - simple-audio-card,cpu { - sound-dai = <&spdif0>; - }; + sound-i2s0 { + compatible = "audio-graph-card"; + dais = <&i2s0_port>; + pinctrl-names = "default"; + pinctrl-0 = <&lifier_pins>; + pa-gpios = <&bgpio4 0 GPIO_ACTIVE_HIGH>; + widgets = "Line", "Line Out Jack"; + routing = "Amplifier", "LINEOUTL", + "Amplifier", "LINEOUTR", + "Line Out Jack", "Amplifier"; + }; +}; - simple-audio-card,codec { - sound-dai = <&hdmi>; +&aud96p22 { + port { + aud96p22_endpoint: endpoint { + remote-endpoint = <&i2s0_endpoint>; }; }; }; @@ -77,6 +89,36 @@ &emmc { &hdmi { status = "okay"; + + port { + hdmi_endpoint: endpoint { + remote-endpoint = <&spdif0_endpoint>; + }; + }; +}; + +&i2c0 { + status = "okay"; +}; + +&i2s0 { + status = "okay"; + + i2s0_port: port { + i2s0_endpoint: endpoint { + remote-endpoint = <&aud96p22_endpoint>; + dai-format = "i2s"; + frame-master; + bitclock-master; + }; + }; +}; + +&pmm { + amplifier_pins: amplifier { + pins = "TSI3_DATA"; + function = "BGPIO"; + }; }; &sd1 { @@ -85,6 +127,16 @@ &sd1 { &spdif0 { status = "okay"; + + spdif0_port: port { + spdif0_endpoint: endpoint { + remote-endpoint = <&hdmi_endpoint>; + }; + }; +}; + +&tvenc { + status = "okay"; }; &uart0 { diff --git a/arch/arm64/boot/dts/zte/zx296718-pcbox.dts b/arch/arm64/boot/dts/zte/zx296718-pcbox.dts new file mode 100644 index 000000000000..e02509f7082b --- /dev/null +++ b/arch/arm64/boot/dts/zte/zx296718-pcbox.dts @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2017 Sanechips Technology Co., Ltd. + * Copyright 2017 Linaro Ltd. + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +/dts-v1/; +#include "zx296718.dtsi" +#include + +/ { + model = "ZTE ZX296718 PCBOX Board"; + compatible = "zte,zx296718-pcbox", "zte,zx296718"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x80000000>; + }; + + a53_vdd0v9: regulator-a53 { + compatible = "pwm-regulator"; + pwms = <&pwm 3 1250 PWM_POLARITY_INVERTED>; + regulator-name = "A53_VDD0V9"; + regulator-min-microvolt = <855000>; + regulator-max-microvolt = <1183000>; + pwm-dutycycle-unit = <100>; + pwm-dutycycle-range = <0 100>; + regulator-always-on; + regulator-boot-on; + }; + + sound-spdif0 { + compatible = "audio-graph-card"; + dais = <&spdif0_port>; + }; + + sound-i2s0 { + compatible = "audio-graph-card"; + dais = <&i2s0_port>; + }; +}; + +&aud96p22 { + port { + aud96p22_endpoint: endpoint { + remote-endpoint = <&i2s0_endpoint>; + }; + }; +}; + +&cpu0 { + cpu-supply = <&a53_vdd0v9>; +}; + +&emmc { + status = "okay"; +}; + +&hdmi { + status = "disabled"; + + port { + hdmi_endpoint: endpoint { + remote-endpoint = <&spdif0_endpoint>; + }; + }; +}; + +&i2c0 { + status = "okay"; +}; + +&i2s0 { + status = "okay"; + + i2s0_port: port { + i2s0_endpoint: endpoint { + remote-endpoint = <&aud96p22_endpoint>; + dai-format = "i2s"; + frame-master; + bitclock-master; + }; + }; +}; + +&irdec { + status = "okay"; +}; + +&pmm { + pwm3_pins: pwm3 { + pins = "KEY_ROW2"; + function = "PWM"; + }; + + vga_pins: vga { + pins = "KEY_COL1", "KEY_COL2", "VGA_HS", "VGA_VS"; + function = "VGA"; + }; +}; + +&pwm { + pinctrl-names = "default"; + pinctrl-0 = <&pwm3_pins>; + status = "okay"; +}; + +&sd0 { + status = "okay"; +}; + +&sd1 { + status = "okay"; +}; + +&spdif0 { + status = "okay"; + + spdif0_port: port { + spdif0_endpoint: endpoint { + remote-endpoint = <&hdmi_endpoint>; + }; + }; +}; + +&tvenc { + status = "disabled"; +}; + +&uart0 { + status = "okay"; +}; + +&vga { + pinctrl-names = "default"; + pinctrl-0 = <&vga_pins>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi index d83bf789c864..6eef64761009 100644 --- a/arch/arm64/boot/dts/zte/zx296718.dtsi +++ b/arch/arm64/boot/dts/zte/zx296718.dtsi @@ -53,6 +53,13 @@ / { interrupt-parent = <&gic>; aliases { + gpio0 = &bgpio0; + gpio1 = &bgpio1; + gpio2 = &bgpio2; + gpio3 = &bgpio3; + gpio4 = &bgpio4; + gpio5 = &bgpio5; + gpio6 = &bgpio6; serial0 = &uart0; }; @@ -120,26 +127,31 @@ cluster0_opp: opp-table0 { opp-500000000 { opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <866000>; clock-latency-ns = <500000>; }; opp-648000000 { opp-hz = /bits/ 64 <648000000>; + opp-microvolt = <866000>; clock-latency-ns = <500000>; }; opp-800000000 { opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <888000>; clock-latency-ns = <500000>; }; opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <898000>; clock-latency-ns = <500000>; }; opp-1188000000 { opp-hz = /bits/ 64 <1188000000>; + opp-microvolt = <1015000>; clock-latency-ns = <500000>; }; }; @@ -283,11 +295,23 @@ soc { compatible = "simple-bus"; ranges; + irdec: ir-decoder@111000 { + compatible = "zte,zx296718-irdec"; + reg = <0x111000 0x1000>; + interrupts = ; + status = "disabled"; + }; + aon_sysctrl: aon-sysctrl@116000 { compatible = "zte,zx296718-aon-sysctrl", "syscon"; reg = <0x116000 0x1000>; }; + iocfg: pin-controller@119000 { + compatible = "zte,zx296718-iocfg"; + reg = <0x119000 0x1000>; + }; + uart0: uart@11f000 { compatible = "arm,pl011", "arm,primecell"; arm,primecell-periphid = <0x001feffe>; @@ -311,7 +335,6 @@ sd0: mmc@1110000 { clock-frequency = <50000000>; clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>; clock-names = "biu", "ciu"; - num-slots = <1>; max-frequency = <50000000>; cap-sdio-irq; cap-sd-highspeed; @@ -336,7 +359,6 @@ sd1: mmc@1111000 { clock-frequency = <167000000>; clocks = <&topcrm SD1_AHB>, <&topcrm SD1_WCLK>; clock-names = "biu", "ciu"; - num-slots = <1>; max-frequency = <167000000>; cap-sdio-irq; cap-sd-highspeed; @@ -360,12 +382,109 @@ lsp0crm: clock-controller@1420000 { #clock-cells = <1>; }; + bgpio0: gpio@142d000 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d000 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 48 16>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio1: gpio@142d040 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d040 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 80 16>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio2: gpio@142d080 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d080 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 80 3 + &pmm 3 32 4 + &pmm 7 83 9>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio3: gpio@142d0c0 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d0c0 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 92 16>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio4: gpio@142d100 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d100 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 108 12 + &pmm 12 121 4>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio5: gpio@142d140 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d140 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 125 16>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + bgpio6: gpio@142d180 { + compatible = "zte,zx296718-gpio", "zte,zx296702-gpio"; + reg = <0x142d180 0x40>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pmm 0 141 2>; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <2>; + }; + lsp1crm: clock-controller@1430000 { compatible = "zte,zx296718-lsp1crm"; reg = <0x01430000 0x1000>; #clock-cells = <1>; }; + pwm: pwm@1439000 { + compatible = "zte,zx296718-pwm"; + reg = <0x1439000 0x1000>; + clocks = <&lsp1crm LSP1_PWM_PCLK>, + <&lsp1crm LSP1_PWM_WCLK>; + clock-names = "pclk", "wclk"; + #pwm-cells = <3>; + status = "disabled"; + }; + vou: vou@1440000 { compatible = "zte,zx296718-vou"; #address-cells = <1>; @@ -387,6 +506,16 @@ dpc: dpc@0 { "main_wclk", "aux_wclk"; }; + vga: vga@8000 { + compatible = "zte,zx296718-vga"; + reg = <0x8000 0x1000>; + interrupts = ; + clocks = <&topcrm VGA_I2C_WCLK>; + clock-names = "i2c_wclk"; + zte,vga-power-control = <&sysctrl 0x170 0xe0>; + status = "disabled"; + }; + hdmi: hdmi@c000 { compatible = "zte,zx296718-hdmi"; reg = <0xc000 0x4000>; @@ -413,6 +542,12 @@ topcrm: clock-controller@1461000 { #clock-cells = <1>; }; + pmm: pin-controller@1462000 { + compatible = "zte,zx296718-pmm"; + reg = <0x1462000 0x1000>; + zte,auxiliary-controller = <&iocfg>; + }; + sysctrl: sysctrl@1463000 { compatible = "zte,zx296718-sysctrl", "syscon"; reg = <0x1463000 0x1000>; @@ -445,6 +580,38 @@ audiocrm: clock-controller@1480000 { #clock-cells = <1>; }; + i2s0: i2s@1482000 { + compatible = "zte,zx296718-i2s", "zte,zx296702-i2s"; + reg = <0x01482000 0x1000>; + clocks = <&audiocrm AUDIO_I2S0_WCLK>, + <&audiocrm AUDIO_I2S0_PCLK>; + clock-names = "wclk", "pclk"; + assigned-clocks = <&audiocrm I2S0_WCLK_MUX>; + assigned-clock-parents = <&topcrm AUDIO_99M>; + interrupts = ; + dmas = <&dma 22>, <&dma 23>; + dma-names = "tx", "rx"; + #sound-dai-cells = <0>; + status = "disabled"; + }; + + i2c0: i2c@1486000 { + compatible = "zte,zx296718-i2c"; + reg = <0x01486000 0x1000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&audiocrm AUDIO_I2C0_WCLK>; + clock-frequency = <1600000>; + status = "disabled"; + + aud96p22: codec@22 { + compatible = "zte,zx-aud96p22"; + #sound-dai-cells = <0>; + reg = <0x22>; + }; + }; + spdif0: spdif@1488000 { compatible = "zte,zx296702-spdif"; reg = <0x1488000 0x1000>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index b4ca115b3be1..34480e9af2e7 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -68,6 +69,7 @@ CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PCI_LAYERSCAPE=y CONFIG_PCI_HISI=y CONFIG_PCIE_QCOM=y +CONFIG_PCIE_KIRIN=y CONFIG_PCIE_ARMADA_8K=y CONFIG_PCI_AARDVARK=y CONFIG_PCIE_RCAR=y @@ -88,6 +90,7 @@ CONFIG_XEN=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_HIBERNATION=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y CONFIG_CPUFREQ_DT=y @@ -164,6 +167,7 @@ CONFIG_EEPROM_AT25=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SAS_ATA=y CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_HISI_SAS_PCI=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y @@ -203,6 +207,7 @@ CONFIG_MARVELL_PHY=m CONFIG_MESON_GXL_PHY=m CONFIG_MICREL_PHY=y CONFIG_REALTEK_PHY=m +CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m @@ -250,6 +255,8 @@ CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y @@ -279,6 +286,7 @@ CONFIG_SPI_ROCKCHIP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y +CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y CONFIG_PINCTRL_MSM8916=y @@ -297,6 +305,7 @@ CONFIG_GPIO_MAX77620=y CONFIG_POWER_RESET_MSM=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y +CONFIG_SYSCON_REBOOT_MODE=y CONFIG_BATTERY_BQ27XXX=y CONFIG_SENSORS_ARM_SCPI=y CONFIG_SENSORS_LM90=m @@ -304,6 +313,7 @@ CONFIG_SENSORS_INA2XX=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y +CONFIG_BRCMSTB_THERMAL=m CONFIG_EXYNOS_THERMAL=y CONFIG_ROCKCHIP_THERMAL=m CONFIG_WATCHDOG=y @@ -311,19 +321,24 @@ CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m CONFIG_MESON_WATCHDOG=m CONFIG_RENESAS_WDT=y +CONFIG_UNIPHIER_WATCHDOG=y CONFIG_BCM2835_WDT=y +CONFIG_MFD_AXP20X_RSB=y CONFIG_MFD_CROS_EC=y CONFIG_MFD_CROS_EC_I2C=y CONFIG_MFD_CROS_EC_SPI=y CONFIG_MFD_EXYNOS_LPASS=m +CONFIG_MFD_HI6421_PMIC=y CONFIG_MFD_HI655X_PMIC=y CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y +CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_FAN53555=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y CONFIG_REGULATOR_MAX77620=y CONFIG_REGULATOR_PWM=y @@ -358,6 +373,12 @@ CONFIG_DRM_EXYNOS_DSI=y # CONFIG_DRM_EXYNOS_DP is not set CONFIG_DRM_EXYNOS_HDMI=y CONFIG_DRM_EXYNOS_MIC=y +CONFIG_DRM_ROCKCHIP=m +CONFIG_ROCKCHIP_ANALOGIX_DP=y +CONFIG_ROCKCHIP_CDN_DP=y +CONFIG_ROCKCHIP_DW_HDMI=y +CONFIG_ROCKCHIP_DW_MIPI_DSI=y +CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_DRM_RCAR_DU=m CONFIG_DRM_RCAR_LVDS=y CONFIG_DRM_RCAR_VSP=y @@ -370,6 +391,7 @@ CONFIG_DRM_MESON=m CONFIG_FB=y CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m +CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -380,8 +402,8 @@ CONFIG_SND=y CONFIG_SND_SOC=y CONFIG_SND_BCM2835_SOC_I2S=m CONFIG_SND_SOC_SAMSUNG=y -CONFIG_SND_SOC_RCAR=y -CONFIG_SND_SOC_AK4613=y +CONFIG_SND_SOC_RCAR=m +CONFIG_SND_SOC_AK4613=m CONFIG_SND_SIMPLE_CARD=y CONFIG_USB=y CONFIG_USB_OTG=y @@ -403,6 +425,7 @@ CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_ISP1760=y CONFIG_USB_HSIC_USB3503=y +CONFIG_NOP_USB_XCEIV=y CONFIG_USB_MSM_OTG=y CONFIG_USB_QCOM_8X16_PHY=y CONFIG_USB_ULPI=y @@ -451,6 +474,7 @@ CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_XGENE=y CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=m +CONFIG_K3_DMA=y CONFIG_MV_XOR_V2=y CONFIG_PL330_DMA=y CONFIG_TEGRA20_APB_DMA=y @@ -473,6 +497,7 @@ CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_PWM=y CONFIG_COMMON_CLK_QCOM=y CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_IPQ_GCC_8074=y CONFIG_MSM_GCC_8916=y CONFIG_MSM_GCC_8994=y CONFIG_MSM_MMCC_8996=y @@ -482,6 +507,7 @@ CONFIG_ARM_MHU=y CONFIG_PLATFORM_MHU=y CONFIG_BCM2835_MBOX=y CONFIG_HI6220_MBOX=y +CONFIG_ROCKCHIP_IOMMU=y CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_V3=y CONFIG_RPMSG_QCOM_SMD=y @@ -515,6 +541,8 @@ CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y +CONFIG_TEE=y +CONFIG_OPTEE=y CONFIG_ARM_SCPI_PROTOCOL=y CONFIG_RASPBERRYPI_FIRMWARE=y CONFIG_EFI_CAPSULE_LOADER=y @@ -563,8 +591,17 @@ CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA512_ARM64=m CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=m +CONFIG_CRYPTO_AES_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index d92293747d63..7ca54a76f6b9 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -18,18 +18,23 @@ config CRYPTO_SHA512_ARM64 config CRYPTO_SHA1_ARM64_CE tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_HASH + select CRYPTO_SHA1 config CRYPTO_SHA2_ARM64_CE tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_HASH + select CRYPTO_SHA256_ARM64 config CRYPTO_GHASH_ARM64_CE - tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions" - depends on ARM64 && KERNEL_MODE_NEON + tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" + depends on KERNEL_MODE_NEON select CRYPTO_HASH + select CRYPTO_GF128MUL + select CRYPTO_AES + select CRYPTO_AES_ARM64 config CRYPTO_CRCT10DIF_ARM64_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" @@ -49,25 +54,29 @@ config CRYPTO_AES_ARM64_CE tristate "AES core cipher using ARMv8 Crypto Extensions" depends on ARM64 && KERNEL_MODE_NEON select CRYPTO_ALGAPI + select CRYPTO_AES_ARM64 config CRYPTO_AES_ARM64_CE_CCM tristate "AES in CCM mode using ARMv8 Crypto Extensions" depends on ARM64 && KERNEL_MODE_NEON select CRYPTO_ALGAPI select CRYPTO_AES_ARM64_CE + select CRYPTO_AES_ARM64 select CRYPTO_AEAD config CRYPTO_AES_ARM64_CE_BLK tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_BLKCIPHER select CRYPTO_AES_ARM64_CE + select CRYPTO_AES_ARM64 select CRYPTO_SIMD config CRYPTO_AES_ARM64_NEON_BLK tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_BLKCIPHER + select CRYPTO_AES_ARM64 select CRYPTO_AES select CRYPTO_SIMD @@ -82,6 +91,7 @@ config CRYPTO_AES_ARM64_BS depends on KERNEL_MODE_NEON select CRYPTO_BLKCIPHER select CRYPTO_AES_ARM64_NEON_BLK + select CRYPTO_AES_ARM64 select CRYPTO_SIMD endif diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 3363560c79b7..e3a375c4cb83 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -1,7 +1,7 @@ /* * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions * - * Copyright (C) 2013 - 2014 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -32,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data) beq 8f /* out of input? */ cbnz w8, 0b eor v0.16b, v0.16b, v1.16b -1: ld1 {v3.16b}, [x4] /* load first round key */ +1: ld1 {v3.4s}, [x4] /* load first round key */ prfm pldl1strm, [x1] cmp w5, #12 /* which key size? */ add x6, x4, #16 @@ -42,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data) mov v5.16b, v3.16b b 4f 2: mov v4.16b, v3.16b - ld1 {v5.16b}, [x6], #16 /* load 2nd round key */ + ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ 3: aese v0.16b, v4.16b aesmc v0.16b, v0.16b -4: ld1 {v3.16b}, [x6], #16 /* load next round key */ +4: ld1 {v3.4s}, [x6], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b -5: ld1 {v4.16b}, [x6], #16 /* load next round key */ +5: ld1 {v4.4s}, [x6], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b - ld1 {v5.16b}, [x6], #16 /* load next round key */ + ld1 {v5.4s}, [x6], #16 /* load next round key */ bpl 3b aese v0.16b, v4.16b subs w2, w2, #16 /* last data? */ @@ -90,7 +90,7 @@ ENDPROC(ce_aes_ccm_auth_data) * u32 rounds); */ ENTRY(ce_aes_ccm_final) - ld1 {v3.16b}, [x2], #16 /* load first round key */ + ld1 {v3.4s}, [x2], #16 /* load first round key */ ld1 {v0.16b}, [x0] /* load mac */ cmp w3, #12 /* which key size? */ sub w3, w3, #2 /* modified # of rounds */ @@ -100,17 +100,17 @@ ENTRY(ce_aes_ccm_final) mov v5.16b, v3.16b b 2f 0: mov v4.16b, v3.16b -1: ld1 {v5.16b}, [x2], #16 /* load next round key */ +1: ld1 {v5.4s}, [x2], #16 /* load next round key */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b -2: ld1 {v3.16b}, [x2], #16 /* load next round key */ +2: ld1 {v3.4s}, [x2], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b -3: ld1 {v4.16b}, [x2], #16 /* load next round key */ +3: ld1 {v4.4s}, [x2], #16 /* load next round key */ subs w3, w3, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b @@ -137,31 +137,31 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ cmp w4, #12 /* which key size? */ sub w7, w4, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ - ld1 {v3.16b}, [x3] /* load first round key */ + ld1 {v3.4s}, [x3] /* load first round key */ add x10, x3, #16 bmi 1f bne 4f mov v5.16b, v3.16b b 3f 1: mov v4.16b, v3.16b - ld1 {v5.16b}, [x10], #16 /* load 2nd round key */ + ld1 {v5.4s}, [x10], #16 /* load 2nd round key */ 2: /* inner loop: 3 rounds, 2x interleaved */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b -3: ld1 {v3.16b}, [x10], #16 /* load next round key */ +3: ld1 {v3.4s}, [x10], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b -4: ld1 {v4.16b}, [x10], #16 /* load next round key */ +4: ld1 {v4.4s}, [x10], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b aese v1.16b, v3.16b aesmc v1.16b, v1.16b - ld1 {v5.16b}, [x10], #16 /* load next round key */ + ld1 {v5.4s}, [x10], #16 /* load next round key */ bpl 2b aese v0.16b, v4.16b aese v1.16b, v4.16b diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 6a7dbc7c83a6..a1254036f2b1 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -1,7 +1,7 @@ /* * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions * - * Copyright (C) 2013 - 2014 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -44,6 +45,8 @@ asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[], u32 rounds); +asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); + static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { @@ -103,7 +106,45 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) return 0; } -static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) +static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], + u32 abytes, u32 *macp, bool use_neon) +{ + if (likely(use_neon)) { + ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, + num_rounds(key)); + } else { + if (*macp > 0 && *macp < AES_BLOCK_SIZE) { + int added = min(abytes, AES_BLOCK_SIZE - *macp); + + crypto_xor(&mac[*macp], in, added); + + *macp += added; + in += added; + abytes -= added; + } + + while (abytes > AES_BLOCK_SIZE) { + __aes_arm64_encrypt(key->key_enc, mac, mac, + num_rounds(key)); + crypto_xor(mac, in, AES_BLOCK_SIZE); + + in += AES_BLOCK_SIZE; + abytes -= AES_BLOCK_SIZE; + } + + if (abytes > 0) { + __aes_arm64_encrypt(key->key_enc, mac, mac, + num_rounds(key)); + crypto_xor(mac, in, abytes); + *macp = abytes; + } else { + *macp = 0; + } + } +} + +static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[], + bool use_neon) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); @@ -122,8 +163,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) ltag.len = 6; } - ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, - num_rounds(ctx)); + ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp, use_neon); scatterwalk_start(&walk, req->src); do { @@ -135,8 +175,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) n = scatterwalk_clamp(&walk, len); } p = scatterwalk_map(&walk); - ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc, - num_rounds(ctx)); + ccm_update_mac(ctx, mac, p, n, &macp, use_neon); len -= n; scatterwalk_unmap(p); @@ -145,6 +184,56 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) } while (len); } +static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[], + struct crypto_aes_ctx *ctx, bool enc) +{ + u8 buf[AES_BLOCK_SIZE]; + int err = 0; + + while (walk->nbytes) { + int blocks = walk->nbytes / AES_BLOCK_SIZE; + u32 tail = walk->nbytes % AES_BLOCK_SIZE; + u8 *dst = walk->dst.virt.addr; + u8 *src = walk->src.virt.addr; + u32 nbytes = walk->nbytes; + + if (nbytes == walk->total && tail > 0) { + blocks++; + tail = 0; + } + + do { + u32 bsize = AES_BLOCK_SIZE; + + if (nbytes < AES_BLOCK_SIZE) + bsize = nbytes; + + crypto_inc(walk->iv, AES_BLOCK_SIZE); + __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv, + num_rounds(ctx)); + __aes_arm64_encrypt(ctx->key_enc, mac, mac, + num_rounds(ctx)); + if (enc) + crypto_xor(mac, src, bsize); + crypto_xor_cpy(dst, src, buf, bsize); + if (!enc) + crypto_xor(mac, dst, bsize); + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (--blocks); + + err = skcipher_walk_done(walk, tail); + } + + if (!err) { + __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx)); + __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx)); + crypto_xor(mac, buf, AES_BLOCK_SIZE); + } + return err; +} + static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -153,39 +242,46 @@ static int ccm_encrypt(struct aead_request *req) u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; + bool use_neon = may_use_simd(); int err; err = ccm_init_mac(req, mac, len); if (err) return err; - kernel_neon_begin_partial(6); + if (likely(use_neon)) + kernel_neon_begin(); if (req->assoclen) - ccm_calculate_auth_mac(req, mac); + ccm_calculate_auth_mac(req, mac, use_neon); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, true); - while (walk.nbytes) { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; + if (likely(use_neon)) { + while (walk.nbytes) { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; - if (walk.nbytes == walk.total) - tail = 0; + if (walk.nbytes == walk.total) + tail = 0; - ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); + ce_aes_ccm_encrypt(walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes - tail, ctx->key_enc, + num_rounds(ctx), mac, walk.iv); - err = skcipher_walk_done(&walk, tail); + err = skcipher_walk_done(&walk, tail); + } + if (!err) + ce_aes_ccm_final(mac, buf, ctx->key_enc, + num_rounds(ctx)); + + kernel_neon_end(); + } else { + err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); } - if (!err) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - - kernel_neon_end(); - if (err) return err; @@ -205,38 +301,46 @@ static int ccm_decrypt(struct aead_request *req) u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; + bool use_neon = may_use_simd(); int err; err = ccm_init_mac(req, mac, len); if (err) return err; - kernel_neon_begin_partial(6); + if (likely(use_neon)) + kernel_neon_begin(); if (req->assoclen) - ccm_calculate_auth_mac(req, mac); + ccm_calculate_auth_mac(req, mac, use_neon); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_decrypt(&walk, req, true); - while (walk.nbytes) { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; + if (likely(use_neon)) { + while (walk.nbytes) { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; - if (walk.nbytes == walk.total) - tail = 0; + if (walk.nbytes == walk.total) + tail = 0; - ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); + ce_aes_ccm_decrypt(walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes - tail, ctx->key_enc, + num_rounds(ctx), mac, walk.iv); - err = skcipher_walk_done(&walk, tail); + err = skcipher_walk_done(&walk, tail); + } + if (!err) + ce_aes_ccm_final(mac, buf, ctx->key_enc, + num_rounds(ctx)); + + kernel_neon_end(); + } else { + err = ccm_crypt_fallback(&walk, mac, buf, ctx, false); } - if (!err) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - - kernel_neon_end(); if (err) return err; diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index 50d9fe11d0c8..6a75cd75ed11 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c @@ -1,7 +1,7 @@ /* * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions * - * Copyright (C) 2013 - 2014 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -9,6 +9,8 @@ */ #include +#include +#include #include #include #include @@ -20,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); +asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); + struct aes_block { u8 b[AES_BLOCK_SIZE]; }; @@ -44,27 +49,32 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) void *dummy0; int dummy1; - kernel_neon_begin_partial(4); + if (!may_use_simd()) { + __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); + return; + } + + kernel_neon_begin(); __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.16b}, [%[key]], #16 ;" + " ld1 {v1.4s}, [%[key]], #16 ;" " cmp %w[rounds], #10 ;" " bmi 0f ;" " bne 3f ;" " mov v3.16b, v1.16b ;" " b 2f ;" "0: mov v2.16b, v1.16b ;" - " ld1 {v3.16b}, [%[key]], #16 ;" + " ld1 {v3.4s}, [%[key]], #16 ;" "1: aese v0.16b, v2.16b ;" " aesmc v0.16b, v0.16b ;" - "2: ld1 {v1.16b}, [%[key]], #16 ;" + "2: ld1 {v1.4s}, [%[key]], #16 ;" " aese v0.16b, v3.16b ;" " aesmc v0.16b, v0.16b ;" - "3: ld1 {v2.16b}, [%[key]], #16 ;" + "3: ld1 {v2.4s}, [%[key]], #16 ;" " subs %w[rounds], %w[rounds], #3 ;" " aese v0.16b, v1.16b ;" " aesmc v0.16b, v0.16b ;" - " ld1 {v3.16b}, [%[key]], #16 ;" + " ld1 {v3.4s}, [%[key]], #16 ;" " bpl 1b ;" " aese v0.16b, v2.16b ;" " eor v0.16b, v0.16b, v3.16b ;" @@ -89,27 +99,32 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) void *dummy0; int dummy1; - kernel_neon_begin_partial(4); + if (!may_use_simd()) { + __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); + return; + } + + kernel_neon_begin(); __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.16b}, [%[key]], #16 ;" + " ld1 {v1.4s}, [%[key]], #16 ;" " cmp %w[rounds], #10 ;" " bmi 0f ;" " bne 3f ;" " mov v3.16b, v1.16b ;" " b 2f ;" "0: mov v2.16b, v1.16b ;" - " ld1 {v3.16b}, [%[key]], #16 ;" + " ld1 {v3.4s}, [%[key]], #16 ;" "1: aesd v0.16b, v2.16b ;" " aesimc v0.16b, v0.16b ;" - "2: ld1 {v1.16b}, [%[key]], #16 ;" + "2: ld1 {v1.4s}, [%[key]], #16 ;" " aesd v0.16b, v3.16b ;" " aesimc v0.16b, v0.16b ;" - "3: ld1 {v2.16b}, [%[key]], #16 ;" + "3: ld1 {v2.4s}, [%[key]], #16 ;" " subs %w[rounds], %w[rounds], #3 ;" " aesd v0.16b, v1.16b ;" " aesimc v0.16b, v0.16b ;" - " ld1 {v3.16b}, [%[key]], #16 ;" + " ld1 {v3.4s}, [%[key]], #16 ;" " bpl 1b ;" " aesd v0.16b, v2.16b ;" " eor v0.16b, v0.16b, v3.16b ;" @@ -165,20 +180,16 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, key_len != AES_KEYSIZE_256) return -EINVAL; - memcpy(ctx->key_enc, in_key, key_len); ctx->key_length = key_len; + for (i = 0; i < kwords; i++) + ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); - kernel_neon_begin_partial(2); + kernel_neon_begin(); for (i = 0; i < sizeof(rcon); i++) { u32 *rki = ctx->key_enc + (i * kwords); u32 *rko = rki + kwords; -#ifndef CONFIG_CPU_BIG_ENDIAN rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; -#else - rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^ - rki[0]; -#endif rko[1] = rko[0] ^ rki[1]; rko[2] = rko[1] ^ rki[2]; rko[3] = rko[2] ^ rki[3]; @@ -210,9 +221,9 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, key_dec[0] = key_enc[j]; for (i = 1, j--; j > 0; i++, j--) - __asm__("ld1 {v0.16b}, %[in] ;" + __asm__("ld1 {v0.4s}, %[in] ;" "aesimc v1.16b, v0.16b ;" - "st1 {v1.16b}, %[out] ;" + "st1 {v1.4s}, %[out] ;" : [out] "=Q"(key_dec[i]) : [in] "Q"(key_enc[j]) diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index b46093d567e5..50330f5c3adc 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S @@ -2,7 +2,7 @@ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with * Crypto Extensions * - * Copyright (C) 2013 Linaro Ltd + * Copyright (C) 2013 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -22,11 +22,11 @@ cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ - ld1 {v17.16b-v18.16b}, [\rk], #32 -1111: ld1 {v19.16b-v20.16b}, [\rk], #32 -2222: ld1 {v21.16b-v24.16b}, [\rk], #64 - ld1 {v25.16b-v28.16b}, [\rk], #64 - ld1 {v29.16b-v31.16b}, [\rk] + ld1 {v17.4s-v18.4s}, [\rk], #32 +1111: ld1 {v19.4s-v20.4s}, [\rk], #32 +2222: ld1 {v21.4s-v24.4s}, [\rk], #64 + ld1 {v25.4s-v28.4s}, [\rk], #64 + ld1 {v29.4s-v31.4s}, [\rk] .endm /* prepare for encryption with key in rk[] */ diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S index f2f9cc519309..6d2445d603cc 100644 --- a/arch/arm64/crypto/aes-cipher-core.S +++ b/arch/arm64/crypto/aes-cipher-core.S @@ -10,6 +10,7 @@ #include #include +#include .text @@ -17,94 +18,155 @@ out .req x1 in .req x2 rounds .req x3 - tt .req x4 - lt .req x2 + tt .req x2 - .macro __pair, enc, reg0, reg1, in0, in1e, in1d, shift - ubfx \reg0, \in0, #\shift, #8 - .if \enc - ubfx \reg1, \in1e, #\shift, #8 + .macro __pair1, sz, op, reg0, reg1, in0, in1e, in1d, shift + .ifc \op\shift, b0 + ubfiz \reg0, \in0, #2, #8 + ubfiz \reg1, \in1e, #2, #8 .else - ubfx \reg1, \in1d, #\shift, #8 + ubfx \reg0, \in0, #\shift, #8 + ubfx \reg1, \in1e, #\shift, #8 .endif + + /* + * AArch64 cannot do byte size indexed loads from a table containing + * 32-bit quantities, i.e., 'ldrb w12, [tt, w12, uxtw #2]' is not a + * valid instruction. So perform the shift explicitly first for the + * high bytes (the low byte is shifted implicitly by using ubfiz rather + * than ubfx above) + */ + .ifnc \op, b ldr \reg0, [tt, \reg0, uxtw #2] ldr \reg1, [tt, \reg1, uxtw #2] + .else + .if \shift > 0 + lsl \reg0, \reg0, #2 + lsl \reg1, \reg1, #2 + .endif + ldrb \reg0, [tt, \reg0, uxtw] + ldrb \reg1, [tt, \reg1, uxtw] + .endif .endm - .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc + .macro __pair0, sz, op, reg0, reg1, in0, in1e, in1d, shift + ubfx \reg0, \in0, #\shift, #8 + ubfx \reg1, \in1d, #\shift, #8 + ldr\op \reg0, [tt, \reg0, uxtw #\sz] + ldr\op \reg1, [tt, \reg1, uxtw #\sz] + .endm + + .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc, sz, op ldp \out0, \out1, [rk], #8 - __pair \enc, w13, w14, \in0, \in1, \in3, 0 - __pair \enc, w15, w16, \in1, \in2, \in0, 8 - __pair \enc, w17, w18, \in2, \in3, \in1, 16 - __pair \enc, \t0, \t1, \in3, \in0, \in2, 24 + __pair\enc \sz, \op, w12, w13, \in0, \in1, \in3, 0 + __pair\enc \sz, \op, w14, w15, \in1, \in2, \in0, 8 + __pair\enc \sz, \op, w16, w17, \in2, \in3, \in1, 16 + __pair\enc \sz, \op, \t0, \t1, \in3, \in0, \in2, 24 - eor \out0, \out0, w13 - eor \out1, \out1, w14 - eor \out0, \out0, w15, ror #24 - eor \out1, \out1, w16, ror #24 - eor \out0, \out0, w17, ror #16 - eor \out1, \out1, w18, ror #16 + eor \out0, \out0, w12 + eor \out1, \out1, w13 + eor \out0, \out0, w14, ror #24 + eor \out1, \out1, w15, ror #24 + eor \out0, \out0, w16, ror #16 + eor \out1, \out1, w17, ror #16 eor \out0, \out0, \t0, ror #8 eor \out1, \out1, \t1, ror #8 .endm - .macro fround, out0, out1, out2, out3, in0, in1, in2, in3 - __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1 - __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1 + .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op + __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op .endm - .macro iround, out0, out1, out2, out3, in0, in1, in2, in3 - __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0 - __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0 + .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op + __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op + __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op .endm - .macro do_crypt, round, ttab, ltab - ldp w5, w6, [in] - ldp w7, w8, [in, #8] - ldp w9, w10, [rk], #16 - ldp w11, w12, [rk, #-8] + .macro do_crypt, round, ttab, ltab, bsz + ldp w4, w5, [in] + ldp w6, w7, [in, #8] + ldp w8, w9, [rk], #16 + ldp w10, w11, [rk, #-8] +CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) -CPU_BE( rev w8, w8 ) + eor w4, w4, w8 eor w5, w5, w9 eor w6, w6, w10 eor w7, w7, w11 - eor w8, w8, w12 adr_l tt, \ttab - adr_l lt, \ltab tbnz rounds, #1, 1f -0: \round w9, w10, w11, w12, w5, w6, w7, w8 - \round w5, w6, w7, w8, w9, w10, w11, w12 +0: \round w8, w9, w10, w11, w4, w5, w6, w7 + \round w4, w5, w6, w7, w8, w9, w10, w11 1: subs rounds, rounds, #4 - \round w9, w10, w11, w12, w5, w6, w7, w8 - csel tt, tt, lt, hi - \round w5, w6, w7, w8, w9, w10, w11, w12 - b.hi 0b + \round w8, w9, w10, w11, w4, w5, w6, w7 + b.ls 3f +2: \round w4, w5, w6, w7, w8, w9, w10, w11 + b 0b +3: adr_l tt, \ltab + \round w4, w5, w6, w7, w8, w9, w10, w11, \bsz, b +CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) -CPU_BE( rev w8, w8 ) - stp w5, w6, [out] - stp w7, w8, [out, #8] + stp w4, w5, [out] + stp w6, w7, [out, #8] ret .endm - .align 5 + .align L1_CACHE_SHIFT + .type __aes_arm64_inverse_sbox, %object +__aes_arm64_inverse_sbox: + .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 + .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb + .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 + .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb + .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d + .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e + .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 + .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 + .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 + .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 + .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda + .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 + .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a + .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 + .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 + .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b + .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea + .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 + .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 + .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e + .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 + .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b + .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 + .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 + .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 + .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f + .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d + .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef + .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 + .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 + .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 + .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d + .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox + ENTRY(__aes_arm64_encrypt) - do_crypt fround, crypto_ft_tab, crypto_fl_tab + do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 ENDPROC(__aes_arm64_encrypt) .align 5 ENTRY(__aes_arm64_decrypt) - do_crypt iround, crypto_it_tab, crypto_il_tab + do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 ENDPROC(__aes_arm64_decrypt) diff --git a/arch/arm64/crypto/aes-ctr-fallback.h b/arch/arm64/crypto/aes-ctr-fallback.h new file mode 100644 index 000000000000..c9285717b6b5 --- /dev/null +++ b/arch/arm64/crypto/aes-ctr-fallback.h @@ -0,0 +1,53 @@ +/* + * Fallback for sync aes(ctr) in contexts where kernel mode NEON + * is not allowed + * + * Copyright (C) 2017 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); + +static inline int aes_ctr_encrypt_fallback(struct crypto_aes_ctx *ctx, + struct skcipher_request *req) +{ + struct skcipher_walk walk; + u8 buf[AES_BLOCK_SIZE]; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + tail = walk.nbytes % AES_BLOCK_SIZE; + } + + do { + int bsize = min(nbytes, AES_BLOCK_SIZE); + + __aes_arm64_encrypt(ctx->key_enc, buf, walk.iv, + 6 + ctx->key_length / 4); + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + nbytes -= AES_BLOCK_SIZE; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index bcf596b0197e..998ba519a026 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -19,6 +20,7 @@ #include #include "aes-ce-setkey.h" +#include "aes-ctr-fallback.h" #ifdef USE_V8_CRYPTO_EXTENSIONS #define MODE "ce" @@ -241,9 +243,7 @@ static int ctr_encrypt(struct skcipher_request *req) aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, blocks, walk.iv, first); - if (tdst != tsrc) - memcpy(tdst, tsrc, nbytes); - crypto_xor(tdst, tail, nbytes); + crypto_xor_cpy(tdst, tsrc, tail, nbytes); err = skcipher_walk_done(&walk, 0); } kernel_neon_end(); @@ -251,6 +251,17 @@ static int ctr_encrypt(struct skcipher_request *req) return err; } +static int ctr_encrypt_sync(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (!may_use_simd()) + return aes_ctr_encrypt_fallback(ctx, req); + + return ctr_encrypt(req); +} + static int xts_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -357,8 +368,8 @@ static struct skcipher_alg aes_algs[] = { { .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, .setkey = skcipher_aes_setkey, - .encrypt = ctr_encrypt, - .decrypt = ctr_encrypt, + .encrypt = ctr_encrypt_sync, + .decrypt = ctr_encrypt_sync, }, { .base = { .cra_name = "__xts(aes)", @@ -460,11 +471,35 @@ static int mac_init(struct shash_desc *desc) return 0; } +static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, + u8 dg[], int enc_before, int enc_after) +{ + int rounds = 6 + ctx->key_length / 4; + + if (may_use_simd()) { + kernel_neon_begin(); + aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, + enc_after); + kernel_neon_end(); + } else { + if (enc_before) + __aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds); + + while (blocks--) { + crypto_xor(dg, in, AES_BLOCK_SIZE); + in += AES_BLOCK_SIZE; + + if (blocks || enc_after) + __aes_arm64_encrypt(ctx->key_enc, dg, dg, + rounds); + } + } +} + static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - int rounds = 6 + tctx->key.key_length / 4; while (len > 0) { unsigned int l; @@ -476,10 +511,8 @@ static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len) len %= AES_BLOCK_SIZE; - kernel_neon_begin(); - aes_mac_update(p, tctx->key.key_enc, rounds, blocks, - ctx->dg, (ctx->len != 0), (len != 0)); - kernel_neon_end(); + mac_do_update(&tctx->key, p, blocks, ctx->dg, + (ctx->len != 0), (len != 0)); p += blocks * AES_BLOCK_SIZE; @@ -507,11 +540,8 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - int rounds = 6 + tctx->key.key_length / 4; - kernel_neon_begin(); - aes_mac_update(NULL, tctx->key.key_enc, rounds, 0, ctx->dg, 1, 0); - kernel_neon_end(); + mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0); memcpy(out, ctx->dg, AES_BLOCK_SIZE); @@ -522,7 +552,6 @@ static int cmac_final(struct shash_desc *desc, u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - int rounds = 6 + tctx->key.key_length / 4; u8 *consts = tctx->consts; if (ctx->len != AES_BLOCK_SIZE) { @@ -530,9 +559,7 @@ static int cmac_final(struct shash_desc *desc, u8 *out) consts += AES_BLOCK_SIZE; } - kernel_neon_begin(); - aes_mac_update(consts, tctx->key.key_enc, rounds, 1, ctx->dg, 0, 1); - kernel_neon_end(); + mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1); memcpy(out, ctx->dg, AES_BLOCK_SIZE); diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index db2501d93550..c55d68ccb89f 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -1,7 +1,7 @@ /* * Bit sliced AES using NEON instructions * - * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2016 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -9,12 +9,15 @@ */ #include +#include #include #include #include #include #include +#include "aes-ctr-fallback.h" + MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); @@ -58,6 +61,11 @@ struct aesbs_cbc_ctx { u32 enc[AES_MAX_KEYLENGTH_U32]; }; +struct aesbs_ctr_ctx { + struct aesbs_ctx key; /* must be first member */ + struct crypto_aes_ctx fallback; +}; + struct aesbs_xts_ctx { struct aesbs_ctx key; u32 twkey[AES_MAX_KEYLENGTH_U32]; @@ -196,6 +204,25 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } +static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = crypto_aes_expand_key(&ctx->fallback, in_key, key_len); + if (err) + return err; + + ctx->key.rounds = 6 + key_len / 4; + + kernel_neon_begin(); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); + kernel_neon_end(); + + return 0; +} + static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -224,9 +251,8 @@ static int ctr_encrypt(struct skcipher_request *req) u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; - if (dst != src) - memcpy(dst, src, walk.total % AES_BLOCK_SIZE); - crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE); + crypto_xor_cpy(dst, src, final, + walk.total % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, 0); break; @@ -260,6 +286,17 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return aesbs_setkey(tfm, in_key, key_len); } +static int ctr_encrypt_sync(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (!may_use_simd()) + return aes_ctr_encrypt_fallback(&ctx->fallback, req); + + return ctr_encrypt(req); +} + static int __xts_crypt(struct skcipher_request *req, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[])) @@ -356,7 +393,7 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_driver_name = "ctr-aes-neonbs", .base.cra_priority = 250 - 1, .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct aesbs_ctx), + .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, @@ -364,9 +401,9 @@ static struct skcipher_alg aes_algs[] = { { .chunksize = AES_BLOCK_SIZE, .walksize = 8 * AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, - .setkey = aesbs_setkey, - .encrypt = ctr_encrypt, - .decrypt = ctr_encrypt, + .setkey = aesbs_ctr_setkey_sync, + .encrypt = ctr_encrypt_sync, + .decrypt = ctr_encrypt_sync, }, { .base.cra_name = "__xts(aes)", .base.cra_driver_name = "__xts-aes-neonbs", diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c index a7cd575ea223..cbdb75d15cd0 100644 --- a/arch/arm64/crypto/chacha20-neon-glue.c +++ b/arch/arm64/crypto/chacha20-neon-glue.c @@ -1,7 +1,7 @@ /* * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions * - * Copyright (C) 2016 Linaro, Ltd. + * Copyright (C) 2016 - 2017 Linaro, Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -26,6 +26,7 @@ #include #include +#include asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src); asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src); @@ -64,7 +65,7 @@ static int chacha20_neon(struct skcipher_request *req) u32 state[16]; int err; - if (req->cryptlen <= CHACHA20_BLOCK_SIZE) + if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE) return crypto_chacha20_crypt(req); err = skcipher_walk_virt(&walk, req, true); diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index eccb1ae90064..624f4137918c 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions * - * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2016 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,6 +19,7 @@ #include #include +#include #include #define PMULL_MIN_LEN 64L /* minimum size of buffer @@ -105,10 +106,10 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, length -= l; } - if (length >= PMULL_MIN_LEN) { + if (length >= PMULL_MIN_LEN && may_use_simd()) { l = round_down(length, SCALE_F); - kernel_neon_begin_partial(10); + kernel_neon_begin(); *crc = crc32_pmull_le(data, l, *crc); kernel_neon_end(); @@ -137,10 +138,10 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, length -= l; } - if (length >= PMULL_MIN_LEN) { + if (length >= PMULL_MIN_LEN && may_use_simd()) { l = round_down(length, SCALE_F); - kernel_neon_begin_partial(10); + kernel_neon_begin(); *crc = crc32c_pmull_le(data, l, *crc); kernel_neon_end(); diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index 60cb590c2590..96f0cae4a022 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions * - * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2016 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,6 +18,7 @@ #include #include +#include #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U @@ -48,9 +49,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, } if (length > 0) { - kernel_neon_begin_partial(14); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); + if (may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); + } } return 0; diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index f0bb9f0b524f..11ebf1ae248a 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 Linaro Ltd. + * Copyright (C) 2014 - 2017 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -11,31 +11,215 @@ #include #include - SHASH .req v0 - SHASH2 .req v1 - T1 .req v2 - T2 .req v3 - MASK .req v4 - XL .req v5 - XM .req v6 - XH .req v7 - IN1 .req v7 + SHASH .req v0 + SHASH2 .req v1 + T1 .req v2 + T2 .req v3 + MASK .req v4 + XL .req v5 + XM .req v6 + XH .req v7 + IN1 .req v7 + + k00_16 .req v8 + k32_48 .req v9 + + t3 .req v10 + t4 .req v11 + t5 .req v12 + t6 .req v13 + t7 .req v14 + t8 .req v15 + t9 .req v16 + + perm1 .req v17 + perm2 .req v18 + perm3 .req v19 + + sh1 .req v20 + sh2 .req v21 + sh3 .req v22 + sh4 .req v23 + + ss1 .req v24 + ss2 .req v25 + ss3 .req v26 + ss4 .req v27 .text .arch armv8-a+crypto - /* - * void pmull_ghash_update(int blocks, u64 dg[], const char *src, - * struct ghash_key const *k, const char *head) - */ -ENTRY(pmull_ghash_update) + .macro __pmull_p64, rd, rn, rm + pmull \rd\().1q, \rn\().1d, \rm\().1d + .endm + + .macro __pmull2_p64, rd, rn, rm + pmull2 \rd\().1q, \rn\().2d, \rm\().2d + .endm + + .macro __pmull_p8, rq, ad, bd + ext t3.8b, \ad\().8b, \ad\().8b, #1 // A1 + ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2 + ext t7.8b, \ad\().8b, \ad\().8b, #3 // A3 + + __pmull_p8_\bd \rq, \ad + .endm + + .macro __pmull2_p8, rq, ad, bd + tbl t3.16b, {\ad\().16b}, perm1.16b // A1 + tbl t5.16b, {\ad\().16b}, perm2.16b // A2 + tbl t7.16b, {\ad\().16b}, perm3.16b // A3 + + __pmull2_p8_\bd \rq, \ad + .endm + + .macro __pmull_p8_SHASH, rq, ad + __pmull_p8_tail \rq, \ad\().8b, SHASH.8b, 8b,, sh1, sh2, sh3, sh4 + .endm + + .macro __pmull_p8_SHASH2, rq, ad + __pmull_p8_tail \rq, \ad\().8b, SHASH2.8b, 8b,, ss1, ss2, ss3, ss4 + .endm + + .macro __pmull2_p8_SHASH, rq, ad + __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4 + .endm + + .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4 + pmull\t t3.8h, t3.\nb, \bd // F = A1*B + pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1 + pmull\t t5.8h, t5.\nb, \bd // H = A2*B + pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2 + pmull\t t7.8h, t7.\nb, \bd // J = A3*B + pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3 + pmull\t t9.8h, \ad, \b4\().\nb // K = A*B4 + pmull\t \rq\().8h, \ad, \bd // D = A*B + + eor t3.16b, t3.16b, t4.16b // L = E + F + eor t5.16b, t5.16b, t6.16b // M = G + H + eor t7.16b, t7.16b, t8.16b // N = I + J + + uzp1 t4.2d, t3.2d, t5.2d + uzp2 t3.2d, t3.2d, t5.2d + uzp1 t6.2d, t7.2d, t9.2d + uzp2 t7.2d, t7.2d, t9.2d + + // t3 = (L) (P0 + P1) << 8 + // t5 = (M) (P2 + P3) << 16 + eor t4.16b, t4.16b, t3.16b + and t3.16b, t3.16b, k32_48.16b + + // t7 = (N) (P4 + P5) << 24 + // t9 = (K) (P6 + P7) << 32 + eor t6.16b, t6.16b, t7.16b + and t7.16b, t7.16b, k00_16.16b + + eor t4.16b, t4.16b, t3.16b + eor t6.16b, t6.16b, t7.16b + + zip2 t5.2d, t4.2d, t3.2d + zip1 t3.2d, t4.2d, t3.2d + zip2 t9.2d, t6.2d, t7.2d + zip1 t7.2d, t6.2d, t7.2d + + ext t3.16b, t3.16b, t3.16b, #15 + ext t5.16b, t5.16b, t5.16b, #14 + ext t7.16b, t7.16b, t7.16b, #13 + ext t9.16b, t9.16b, t9.16b, #12 + + eor t3.16b, t3.16b, t5.16b + eor t7.16b, t7.16b, t9.16b + eor \rq\().16b, \rq\().16b, t3.16b + eor \rq\().16b, \rq\().16b, t7.16b + .endm + + .macro __pmull_pre_p64 + movi MASK.16b, #0xe1 + shl MASK.2d, MASK.2d, #57 + .endm + + .macro __pmull_pre_p8 + // k00_16 := 0x0000000000000000_000000000000ffff + // k32_48 := 0x00000000ffffffff_0000ffffffffffff + movi k32_48.2d, #0xffffffff + mov k32_48.h[2], k32_48.h[0] + ushr k00_16.2d, k32_48.2d, #32 + + // prepare the permutation vectors + mov_q x5, 0x080f0e0d0c0b0a09 + movi T1.8b, #8 + dup perm1.2d, x5 + eor perm1.16b, perm1.16b, T1.16b + ushr perm2.2d, perm1.2d, #8 + ushr perm3.2d, perm1.2d, #16 + ushr T1.2d, perm1.2d, #24 + sli perm2.2d, perm1.2d, #56 + sli perm3.2d, perm1.2d, #48 + sli T1.2d, perm1.2d, #40 + + // precompute loop invariants + tbl sh1.16b, {SHASH.16b}, perm1.16b + tbl sh2.16b, {SHASH.16b}, perm2.16b + tbl sh3.16b, {SHASH.16b}, perm3.16b + tbl sh4.16b, {SHASH.16b}, T1.16b + ext ss1.8b, SHASH2.8b, SHASH2.8b, #1 + ext ss2.8b, SHASH2.8b, SHASH2.8b, #2 + ext ss3.8b, SHASH2.8b, SHASH2.8b, #3 + ext ss4.8b, SHASH2.8b, SHASH2.8b, #4 + .endm + + // + // PMULL (64x64->128) based reduction for CPUs that can do + // it in a single instruction. + // + .macro __pmull_reduce_p64 + pmull T2.1q, XL.1d, MASK.1d + eor XM.16b, XM.16b, T1.16b + + mov XH.d[0], XM.d[1] + mov XM.d[1], XL.d[0] + + eor XL.16b, XM.16b, T2.16b + ext T2.16b, XL.16b, XL.16b, #8 + pmull XL.1q, XL.1d, MASK.1d + .endm + + // + // Alternative reduction for CPUs that lack support for the + // 64x64->128 PMULL instruction + // + .macro __pmull_reduce_p8 + eor XM.16b, XM.16b, T1.16b + + mov XL.d[1], XM.d[0] + mov XH.d[0], XM.d[1] + + shl T1.2d, XL.2d, #57 + shl T2.2d, XL.2d, #62 + eor T2.16b, T2.16b, T1.16b + shl T1.2d, XL.2d, #63 + eor T2.16b, T2.16b, T1.16b + ext T1.16b, XL.16b, XH.16b, #8 + eor T2.16b, T2.16b, T1.16b + + mov XL.d[1], T2.d[0] + mov XH.d[0], T2.d[1] + + ushr T2.2d, XL.2d, #1 + eor XH.16b, XH.16b, XL.16b + eor XL.16b, XL.16b, T2.16b + ushr T2.2d, T2.2d, #6 + ushr XL.2d, XL.2d, #1 + .endm + + .macro __pmull_ghash, pn ld1 {SHASH.2d}, [x3] ld1 {XL.2d}, [x1] - movi MASK.16b, #0xe1 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 - shl MASK.2d, MASK.2d, #57 eor SHASH2.16b, SHASH2.16b, SHASH.16b + __pmull_pre_\pn + /* do the head block first, if supplied */ cbz x4, 0f ld1 {T1.2d}, [x4] @@ -52,23 +236,17 @@ CPU_LE( rev64 T1.16b, T1.16b ) eor T1.16b, T1.16b, T2.16b eor XL.16b, XL.16b, IN1.16b - pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1 + __pmull2_\pn XH, XL, SHASH // a1 * b1 eor T1.16b, T1.16b, XL.16b - pmull XL.1q, SHASH.1d, XL.1d // a0 * b0 - pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0) + __pmull_\pn XL, XL, SHASH // a0 * b0 + __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0) - ext T1.16b, XL.16b, XH.16b, #8 eor T2.16b, XL.16b, XH.16b - eor XM.16b, XM.16b, T1.16b + ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b - pmull T2.1q, XL.1d, MASK.1d - mov XH.d[0], XM.d[1] - mov XM.d[1], XL.d[0] + __pmull_reduce_\pn - eor XL.16b, XM.16b, T2.16b - ext T2.16b, XL.16b, XL.16b, #8 - pmull XL.1q, XL.1d, MASK.1d eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b @@ -76,4 +254,191 @@ CPU_LE( rev64 T1.16b, T1.16b ) st1 {XL.2d}, [x1] ret -ENDPROC(pmull_ghash_update) + .endm + + /* + * void pmull_ghash_update(int blocks, u64 dg[], const char *src, + * struct ghash_key const *k, const char *head) + */ +ENTRY(pmull_ghash_update_p64) + __pmull_ghash p64 +ENDPROC(pmull_ghash_update_p64) + +ENTRY(pmull_ghash_update_p8) + __pmull_ghash p8 +ENDPROC(pmull_ghash_update_p8) + + KS .req v8 + CTR .req v9 + INP .req v10 + + .macro load_round_keys, rounds, rk + cmp \rounds, #12 + blo 2222f /* 128 bits */ + beq 1111f /* 192 bits */ + ld1 {v17.4s-v18.4s}, [\rk], #32 +1111: ld1 {v19.4s-v20.4s}, [\rk], #32 +2222: ld1 {v21.4s-v24.4s}, [\rk], #64 + ld1 {v25.4s-v28.4s}, [\rk], #64 + ld1 {v29.4s-v31.4s}, [\rk] + .endm + + .macro enc_round, state, key + aese \state\().16b, \key\().16b + aesmc \state\().16b, \state\().16b + .endm + + .macro enc_block, state, rounds + cmp \rounds, #12 + b.lo 2222f /* 128 bits */ + b.eq 1111f /* 192 bits */ + enc_round \state, v17 + enc_round \state, v18 +1111: enc_round \state, v19 + enc_round \state, v20 +2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 + enc_round \state, \key + .endr + aese \state\().16b, v30.16b + eor \state\().16b, \state\().16b, v31.16b + .endm + + .macro pmull_gcm_do_crypt, enc + ld1 {SHASH.2d}, [x4] + ld1 {XL.2d}, [x1] + ldr x8, [x5, #8] // load lower counter + + movi MASK.16b, #0xe1 + ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 +CPU_LE( rev x8, x8 ) + shl MASK.2d, MASK.2d, #57 + eor SHASH2.16b, SHASH2.16b, SHASH.16b + + .if \enc == 1 + ld1 {KS.16b}, [x7] + .endif + +0: ld1 {CTR.8b}, [x5] // load upper counter + ld1 {INP.16b}, [x3], #16 + rev x9, x8 + add x8, x8, #1 + sub w0, w0, #1 + ins CTR.d[1], x9 // set lower counter + + .if \enc == 1 + eor INP.16b, INP.16b, KS.16b // encrypt input + st1 {INP.16b}, [x2], #16 + .endif + + rev64 T1.16b, INP.16b + + cmp w6, #12 + b.ge 2f // AES-192/256? + +1: enc_round CTR, v21 + + ext T2.16b, XL.16b, XL.16b, #8 + ext IN1.16b, T1.16b, T1.16b, #8 + + enc_round CTR, v22 + + eor T1.16b, T1.16b, T2.16b + eor XL.16b, XL.16b, IN1.16b + + enc_round CTR, v23 + + pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1 + eor T1.16b, T1.16b, XL.16b + + enc_round CTR, v24 + + pmull XL.1q, SHASH.1d, XL.1d // a0 * b0 + pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0) + + enc_round CTR, v25 + + ext T1.16b, XL.16b, XH.16b, #8 + eor T2.16b, XL.16b, XH.16b + eor XM.16b, XM.16b, T1.16b + + enc_round CTR, v26 + + eor XM.16b, XM.16b, T2.16b + pmull T2.1q, XL.1d, MASK.1d + + enc_round CTR, v27 + + mov XH.d[0], XM.d[1] + mov XM.d[1], XL.d[0] + + enc_round CTR, v28 + + eor XL.16b, XM.16b, T2.16b + + enc_round CTR, v29 + + ext T2.16b, XL.16b, XL.16b, #8 + + aese CTR.16b, v30.16b + + pmull XL.1q, XL.1d, MASK.1d + eor T2.16b, T2.16b, XH.16b + + eor KS.16b, CTR.16b, v31.16b + + eor XL.16b, XL.16b, T2.16b + + .if \enc == 0 + eor INP.16b, INP.16b, KS.16b + st1 {INP.16b}, [x2], #16 + .endif + + cbnz w0, 0b + +CPU_LE( rev x8, x8 ) + st1 {XL.2d}, [x1] + str x8, [x5, #8] // store lower counter + + .if \enc == 1 + st1 {KS.16b}, [x7] + .endif + + ret + +2: b.eq 3f // AES-192? + enc_round CTR, v17 + enc_round CTR, v18 +3: enc_round CTR, v19 + enc_round CTR, v20 + b 1b + .endm + + /* + * void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], + * struct ghash_key const *k, u8 ctr[], + * int rounds, u8 ks[]) + */ +ENTRY(pmull_gcm_encrypt) + pmull_gcm_do_crypt 1 +ENDPROC(pmull_gcm_encrypt) + + /* + * void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], + * struct ghash_key const *k, u8 ctr[], + * int rounds) + */ +ENTRY(pmull_gcm_decrypt) + pmull_gcm_do_crypt 0 +ENDPROC(pmull_gcm_decrypt) + + /* + * void pmull_gcm_encrypt_block(u8 dst[], u8 src[], u8 rk[], int rounds) + */ +ENTRY(pmull_gcm_encrypt_block) + cbz x2, 0f + load_round_keys w3, x2 +0: ld1 {v0.16b}, [x1] + enc_block v0, w3 + st1 {v0.16b}, [x0] + ret +ENDPROC(pmull_gcm_encrypt_block) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 833ec1e3f3e9..cfc9c92814fd 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 Linaro Ltd. + * Copyright (C) 2014 - 2017 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -9,22 +9,33 @@ */ #include +#include #include +#include +#include +#include +#include +#include #include +#include +#include #include #include #include -MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions"); +MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 +#define GCM_IV_SIZE 12 struct ghash_key { u64 a; u64 b; + be128 k; }; struct ghash_desc_ctx { @@ -33,8 +44,35 @@ struct ghash_desc_ctx { u32 count; }; -asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, const char *head); +struct gcm_aes_ctx { + struct crypto_aes_ctx aes_key; + struct ghash_key ghash_key; +}; + +asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); + +asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); + +static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, + struct ghash_key const *k, + const char *head); + +asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], + const u8 src[], struct ghash_key const *k, + u8 ctr[], int rounds, u8 ks[]); + +asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], + const u8 src[], struct ghash_key const *k, + u8 ctr[], int rounds); + +asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[], + u32 const rk[], int rounds); + +asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); static int ghash_init(struct shash_desc *desc) { @@ -44,6 +82,36 @@ static int ghash_init(struct shash_desc *desc) return 0; } +static void ghash_do_update(int blocks, u64 dg[], const char *src, + struct ghash_key *key, const char *head) +{ + if (likely(may_use_simd())) { + kernel_neon_begin(); + pmull_ghash_update(blocks, dg, src, key, head); + kernel_neon_end(); + } else { + be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; + + do { + const u8 *in = src; + + if (head) { + in = head; + blocks++; + head = NULL; + } else { + src += GHASH_BLOCK_SIZE; + } + + crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); + gf128mul_lle(&dst, &key->k); + } while (--blocks); + + dg[0] = be64_to_cpu(dst.b); + dg[1] = be64_to_cpu(dst.a); + } +} + static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int len) { @@ -67,10 +135,9 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, blocks = len / GHASH_BLOCK_SIZE; len %= GHASH_BLOCK_SIZE; - kernel_neon_begin_partial(8); - pmull_ghash_update(blocks, ctx->digest, src, key, - partial ? ctx->buf : NULL); - kernel_neon_end(); + ghash_do_update(blocks, ctx->digest, src, key, + partial ? ctx->buf : NULL); + src += blocks * GHASH_BLOCK_SIZE; partial = 0; } @@ -89,9 +156,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - kernel_neon_begin_partial(8); - pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL); - kernel_neon_end(); + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -100,16 +165,13 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) return 0; } -static int ghash_setkey(struct crypto_shash *tfm, - const u8 *inkey, unsigned int keylen) +static int __ghash_setkey(struct ghash_key *key, + const u8 *inkey, unsigned int keylen) { - struct ghash_key *key = crypto_shash_ctx(tfm); u64 a, b; - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + /* needed for the fallback */ + memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); /* perform multiplication by 'x' in GF(2^128) */ b = get_unaligned_be64(inkey); @@ -124,33 +186,418 @@ static int ghash_setkey(struct crypto_shash *tfm, return 0; } +static int ghash_setkey(struct crypto_shash *tfm, + const u8 *inkey, unsigned int keylen) +{ + struct ghash_key *key = crypto_shash_ctx(tfm); + + if (keylen != GHASH_BLOCK_SIZE) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return __ghash_setkey(key, inkey, keylen); +} + static struct shash_alg ghash_alg = { - .digestsize = GHASH_DIGEST_SIZE, - .init = ghash_init, - .update = ghash_update, - .final = ghash_final, - .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), - .base = { - .cra_name = "ghash", - .cra_driver_name = "ghash-ce", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ghash_key), - .cra_module = THIS_MODULE, - }, + .base.cra_name = "ghash", + .base.cra_driver_name = "ghash-ce", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = GHASH_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_module = THIS_MODULE, + + .digestsize = GHASH_DIGEST_SIZE, + .init = ghash_init, + .update = ghash_update, + .final = ghash_final, + .setkey = ghash_setkey, + .descsize = sizeof(struct ghash_desc_ctx), +}; + +static int num_rounds(struct crypto_aes_ctx *ctx) +{ + /* + * # of rounds specified by AES: + * 128 bit key 10 rounds + * 192 bit key 12 rounds + * 256 bit key 14 rounds + * => n byte key => 6 + (n/4) rounds + */ + return 6 + ctx->key_length / 4; +} + +static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, + unsigned int keylen) +{ + struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); + u8 key[GHASH_BLOCK_SIZE]; + int ret; + + ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen); + if (ret) { + tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){}, + num_rounds(&ctx->aes_key)); + + return __ghash_setkey(&ctx->ghash_key, key, sizeof(key)); +} + +static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12 ... 16: + break; + default: + return -EINVAL; + } + return 0; +} + +static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], + int *buf_count, struct gcm_aes_ctx *ctx) +{ + if (*buf_count > 0) { + int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count); + + memcpy(&buf[*buf_count], src, buf_added); + + *buf_count += buf_added; + src += buf_added; + count -= buf_added; + } + + if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { + int blocks = count / GHASH_BLOCK_SIZE; + + ghash_do_update(blocks, dg, src, &ctx->ghash_key, + *buf_count ? buf : NULL); + + src += blocks * GHASH_BLOCK_SIZE; + count %= GHASH_BLOCK_SIZE; + *buf_count = 0; + } + + if (count > 0) { + memcpy(buf, src, count); + *buf_count = count; + } +} + +static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); + u8 buf[GHASH_BLOCK_SIZE]; + struct scatter_walk walk; + u32 len = req->assoclen; + int buf_count = 0; + + scatterwalk_start(&walk, req->src); + + do { + u32 n = scatterwalk_clamp(&walk, len); + u8 *p; + + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, len); + } + p = scatterwalk_map(&walk); + + gcm_update_mac(dg, p, n, buf, &buf_count, ctx); + len -= n; + + scatterwalk_unmap(p); + scatterwalk_advance(&walk, n); + scatterwalk_done(&walk, 0, len); + } while (len); + + if (buf_count) { + memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + } +} + +static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx, + u64 dg[], u8 tag[], int cryptlen) +{ + u8 mac[AES_BLOCK_SIZE]; + u128 lengths; + + lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.b = cpu_to_be64(cryptlen * 8); + + ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL); + + put_unaligned_be64(dg[1], mac); + put_unaligned_be64(dg[0], mac + 8); + + crypto_xor(tag, mac, AES_BLOCK_SIZE); +} + +static int gcm_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); + struct skcipher_walk walk; + u8 iv[AES_BLOCK_SIZE]; + u8 ks[AES_BLOCK_SIZE]; + u8 tag[AES_BLOCK_SIZE]; + u64 dg[2] = {}; + int err; + + if (req->assoclen) + gcm_calculate_auth_mac(req, dg); + + memcpy(iv, req->iv, GCM_IV_SIZE); + put_unaligned_be32(1, iv + GCM_IV_SIZE); + + if (likely(may_use_simd())) { + kernel_neon_begin(); + + pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, + num_rounds(&ctx->aes_key)); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + pmull_gcm_encrypt_block(ks, iv, NULL, + num_rounds(&ctx->aes_key)); + put_unaligned_be32(3, iv + GCM_IV_SIZE); + + err = skcipher_walk_aead_encrypt(&walk, req, true); + + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = walk.nbytes / AES_BLOCK_SIZE; + + pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, + walk.src.virt.addr, &ctx->ghash_key, + iv, num_rounds(&ctx->aes_key), ks); + + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + kernel_neon_end(); + } else { + __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, + num_rounds(&ctx->aes_key)); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + + err = skcipher_walk_aead_encrypt(&walk, req, true); + + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = walk.nbytes / AES_BLOCK_SIZE; + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + + do { + __aes_arm64_encrypt(ctx->aes_key.key_enc, + ks, iv, + num_rounds(&ctx->aes_key)); + crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE); + crypto_inc(iv, AES_BLOCK_SIZE); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + } while (--blocks > 0); + + ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, + walk.dst.virt.addr, &ctx->ghash_key, + NULL); + + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + if (walk.nbytes) + __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, + num_rounds(&ctx->aes_key)); + } + + /* handle the tail */ + if (walk.nbytes) { + u8 buf[GHASH_BLOCK_SIZE]; + + crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks, + walk.nbytes); + + memcpy(buf, walk.dst.virt.addr, walk.nbytes); + memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + + err = skcipher_walk_done(&walk, 0); + } + + if (err) + return err; + + gcm_final(req, ctx, dg, tag, req->cryptlen); + + /* copy authtag to end of dst */ + scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen, + crypto_aead_authsize(aead), 1); + + return 0; +} + +static int gcm_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); + unsigned int authsize = crypto_aead_authsize(aead); + struct skcipher_walk walk; + u8 iv[AES_BLOCK_SIZE]; + u8 tag[AES_BLOCK_SIZE]; + u8 buf[GHASH_BLOCK_SIZE]; + u64 dg[2] = {}; + int err; + + if (req->assoclen) + gcm_calculate_auth_mac(req, dg); + + memcpy(iv, req->iv, GCM_IV_SIZE); + put_unaligned_be32(1, iv + GCM_IV_SIZE); + + if (likely(may_use_simd())) { + kernel_neon_begin(); + + pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, + num_rounds(&ctx->aes_key)); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + + err = skcipher_walk_aead_decrypt(&walk, req, true); + + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = walk.nbytes / AES_BLOCK_SIZE; + + pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, + walk.src.virt.addr, &ctx->ghash_key, + iv, num_rounds(&ctx->aes_key)); + + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + if (walk.nbytes) + pmull_gcm_encrypt_block(iv, iv, NULL, + num_rounds(&ctx->aes_key)); + + kernel_neon_end(); + } else { + __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, + num_rounds(&ctx->aes_key)); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + + err = skcipher_walk_aead_decrypt(&walk, req, true); + + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = walk.nbytes / AES_BLOCK_SIZE; + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + + ghash_do_update(blocks, dg, walk.src.virt.addr, + &ctx->ghash_key, NULL); + + do { + __aes_arm64_encrypt(ctx->aes_key.key_enc, + buf, iv, + num_rounds(&ctx->aes_key)); + crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE); + crypto_inc(iv, AES_BLOCK_SIZE); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + } while (--blocks > 0); + + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + if (walk.nbytes) + __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, + num_rounds(&ctx->aes_key)); + } + + /* handle the tail */ + if (walk.nbytes) { + memcpy(buf, walk.src.virt.addr, walk.nbytes); + memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + + crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, + walk.nbytes); + + err = skcipher_walk_done(&walk, 0); + } + + if (err) + return err; + + gcm_final(req, ctx, dg, tag, req->cryptlen - authsize); + + /* compare calculated auth tag with the stored one */ + scatterwalk_map_and_copy(buf, req->src, + req->assoclen + req->cryptlen - authsize, + authsize, 0); + + if (crypto_memneq(tag, buf, authsize)) + return -EBADMSG; + return 0; +} + +static struct aead_alg gcm_aes_alg = { + .ivsize = GCM_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + + .base.cra_name = "gcm(aes)", + .base.cra_driver_name = "gcm-aes-ce", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct gcm_aes_ctx), + .base.cra_module = THIS_MODULE, }; static int __init ghash_ce_mod_init(void) { - return crypto_register_shash(&ghash_alg); + int ret; + + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + + if (elf_hwcap & HWCAP_PMULL) + pmull_ghash_update = pmull_ghash_update_p64; + + else + pmull_ghash_update = pmull_ghash_update_p8; + + ret = crypto_register_shash(&ghash_alg); + if (ret) + return ret; + + if (elf_hwcap & HWCAP_PMULL) { + ret = crypto_register_aead(&gcm_aes_alg); + if (ret) + crypto_unregister_shash(&ghash_alg); + } + return ret; } static void __exit ghash_ce_mod_exit(void) { crypto_unregister_shash(&ghash_alg); + crypto_unregister_aead(&gcm_aes_alg); } -module_cpu_feature_match(PMULL, ghash_ce_mod_init); +static const struct cpu_feature ghash_cpu_feature[] = { + { cpu_feature(PMULL) }, { } +}; +MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature); + +module_init(ghash_ce_mod_init); module_exit(ghash_ce_mod_exit); diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index ea319c055f5d..efbeb3e0dcfb 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -1,7 +1,7 @@ /* * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions * - * Copyright (C) 2014 Linaro Ltd + * Copyright (C) 2014 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -37,8 +38,11 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, { struct sha1_ce_state *sctx = shash_desc_ctx(desc); + if (!may_use_simd()) + return crypto_sha1_update(desc, data, len); + sctx->finalize = 0; - kernel_neon_begin_partial(16); + kernel_neon_begin(); sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); @@ -52,13 +56,16 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, struct sha1_ce_state *sctx = shash_desc_ctx(desc); bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); + if (!may_use_simd()) + return crypto_sha1_finup(desc, data, len, out); + /* * Allow the asm code to perform the finalization if there is no * partial data and the input is a round multiple of the block size. */ sctx->finalize = finalize; - kernel_neon_begin_partial(16); + kernel_neon_begin(); sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_ce_transform); if (!finalize) @@ -71,8 +78,11 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) { struct sha1_ce_state *sctx = shash_desc_ctx(desc); + if (!may_use_simd()) + return crypto_sha1_finup(desc, NULL, 0, out); + sctx->finalize = 0; - kernel_neon_begin_partial(16); + kernel_neon_begin(); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 0ed9486f75dd..fd1ff2b13dfa 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -1,7 +1,7 @@ /* * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions * - * Copyright (C) 2014 Linaro Ltd + * Copyright (C) 2014 - 2017 Linaro Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -34,13 +35,19 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state, finalize); +asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); + static int sha256_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_ce_state *sctx = shash_desc_ctx(desc); + if (!may_use_simd()) + return sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); + sctx->finalize = 0; - kernel_neon_begin_partial(28); + kernel_neon_begin(); sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); @@ -54,13 +61,22 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, struct sha256_ce_state *sctx = shash_desc_ctx(desc); bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); + if (!may_use_simd()) { + if (len) + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha256_block_data_order); + return sha256_base_finish(desc, out); + } + /* * Allow the asm code to perform the finalization if there is no * partial data and the input is a round multiple of the block size. */ sctx->finalize = finalize; - kernel_neon_begin_partial(28); + kernel_neon_begin(); sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha2_ce_transform); if (!finalize) @@ -74,8 +90,14 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) { struct sha256_ce_state *sctx = shash_desc_ctx(desc); + if (!may_use_simd()) { + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha256_block_data_order); + return sha256_base_finish(desc, out); + } + sctx->finalize = 0; - kernel_neon_begin_partial(28); + kernel_neon_begin(); sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); return sha256_base_finish(desc, out); diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index a2226f841960..b064d925fe2a 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -29,6 +29,7 @@ MODULE_ALIAS_CRYPTO("sha256"); asmlinkage void sha256_block_data_order(u32 *digest, const void *data, unsigned int num_blks); +EXPORT_SYMBOL(sha256_block_data_order); asmlinkage void sha256_block_neon(u32 *digest, const void *data, unsigned int num_blks); diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 0cad5a5894b9..b93904b16fc2 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -90,6 +90,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, #define alloc_screen_info(x...) &screen_info #define free_screen_info(x...) +/* redeclare as 'hidden' so the compiler will generate relative references */ +extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); + static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { } diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index fe39e6841326..e5df3fce0008 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -188,11 +188,6 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) -{ - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA); -} - static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); @@ -240,6 +235,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } +static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +{ + switch (kvm_vcpu_trap_get_fault_type(vcpu)) { + case FSC_SEA: + case FSC_SEA_TTW0: + case FSC_SEA_TTW1: + case FSC_SEA_TTW2: + case FSC_SEA_TTW3: + case FSC_SECC: + case FSC_SECC_TTW0: + case FSC_SECC_TTW1: + case FSC_SECC_TTW2: + case FSC_SECC_TTW3: + return true; + default: + return false; + } +} + static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 636c1bced7d4..1b266292f0be 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -1,7 +1,7 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -#define __ALIGN .align 4 -#define __ALIGN_STR ".align 4" +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" #endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3585a5e26151..f7c4d2146aed 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -95,16 +95,19 @@ #define KERNEL_END _end /* - * The size of the KASAN shadow region. This should be 1/8th of the - * size of the entire kernel virtual address space. + * KASAN requires 1/8th of the kernel virtual address space for the shadow + * region. KASAN can bloat the stack significantly, so double the (minimum) + * stack size when KASAN is in use. */ #ifdef CONFIG_KASAN #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) +#define KASAN_THREAD_SHIFT 1 #else #define KASAN_SHADOW_SIZE (0) +#define KASAN_THREAD_SHIFT 0 #endif -#define MIN_THREAD_SHIFT 14 +#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) /* * VMAP'd stacks are allocated at page granularity, so we must ensure that such diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bc4e92337d16..b46e54c2399b 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) +#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index e25c11e727fe..b3162715ed78 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -95,7 +95,7 @@ static int __init dt_scan_depth1_nodes(unsigned long node, * __acpi_map_table() will be called before page_init(), so early_ioremap() * or early_memremap() should be called here to for ACPI table mapping. */ -char *__init __acpi_map_table(unsigned long phys, unsigned long size) +void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { if (!size) return NULL; @@ -103,7 +103,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) return early_memremap(phys, size); } -void __init __acpi_unmap_table(char *map, unsigned long size) +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { if (!map || !size) return; diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index f0e6d717885b..d06fbe4cd38d 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void) return 0; } -late_initcall(armv8_deprecated_init); +core_initcall(armv8_deprecated_init); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cd52d365d1f0..21e2c95d24e7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void) return 0; } -late_initcall(enable_mrs_emulation); +core_initcall(enable_mrs_emulation); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3a68cf38a6b3..5d547deb6996 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -321,6 +321,8 @@ void kernel_neon_end(void) } EXPORT_SYMBOL(kernel_neon_end); +#ifdef CONFIG_EFI + static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); @@ -370,6 +372,8 @@ void __efi_fpsimd_end(void) kernel_neon_end(); } +#endif /* CONFIG_EFI */ + #endif /* CONFIG_KERNEL_MODE_NEON */ #ifdef CONFIG_CPU_PM @@ -440,4 +444,4 @@ static int __init fpsimd_init(void) return 0; } -late_initcall(fpsimd_init); +core_initcall(fpsimd_init); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7434ec0c7a27..0b243ecaf7ac 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -384,6 +384,7 @@ ENTRY(kimage_vaddr) * booted in EL1 or EL2 respectively. */ ENTRY(el2_setup) + msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index e2b7e4f9cc31..0e2ea1c78542 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -22,23 +22,6 @@ #include #include -/* - * Called after each bus is probed, but before its children are examined - */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ - /* nothing to do, expected to be removed in the future */ -} - -/* - * We don't have to worry about legacy ISA devices, so nothing to do here - */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} - #ifdef CONFIG_ACPI /* * Try to assign the IRQ number when probing a new device diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index c45214f8fb54..0bdc96c61bc0 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -751,10 +751,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, */ trace_hardirqs_off(); - /* Check valid user FS if needed */ - addr_limit_user_check(); - do { + /* Check valid user FS if needed */ + addr_limit_user_check(); + if (thread_flags & _TIF_NEED_RESCHED) { schedule(); } else { diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 4e5a664be04b..e09bf5d15606 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -142,25 +142,25 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); - else switch (from->si_code & __SI_MASK) { - case __SI_KILL: + else switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_TIMER: + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_int, &to->si_int); break; - case __SI_POLL: + case SIL_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_FAULT: + case SIL_FAULT: err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr, &to->si_addr); #ifdef BUS_MCEERR_AO @@ -173,29 +173,24 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; - case __SI_CHLD: + case SIL_CHLD: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_status, &to->si_status); err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); break; - case __SI_RT: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ: /* But this is */ + case SIL_RT: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); break; - case __SI_SYS: + case SIL_SYS: err |= __put_user((compat_uptr_t)(unsigned long) from->si_call_addr, &to->si_call_addr); err |= __put_user(from->si_syscall, &to->si_syscall); err |= __put_user(from->si_arch, &to->si_arch); break; - default: /* this is just in case for now ... */ - err |= __put_user(from->si_pid, &to->si_pid); - err |= __put_user(from->si_uid, &to->si_uid); - break; } return err; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ffe089942ac4..9f7195a5773e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -690,7 +690,7 @@ void __init smp_init_cpus(void) acpi_parse_gic_cpu_interface, 0); if (cpu_count > nr_cpu_ids) - pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", + pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", cpu_count, nr_cpu_ids); if (!bootcpu_valid) { diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 3144584617e7..76809ccd309c 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -140,7 +140,8 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; @@ -150,15 +151,16 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) data.trace = trace; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.pc = thread_saved_pc(tsk); } else { - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; @@ -172,9 +174,15 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } + EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 17d8a1677a0b..7debb74843a0 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); vcpu->stat.wfe_exit_stat++; - kvm_vcpu_on_spin(vcpu); + kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); } else { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c index 116786d2e8e8..c77d508b7462 100644 --- a/arch/arm64/kvm/vgic-sys-reg-v3.c +++ b/arch/arm64/kvm/vgic-sys-reg-v3.c @@ -208,29 +208,12 @@ static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu, static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r, u8 apr) { - struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; u8 idx = r->Op2 & 3; - /* - * num_pri_bits are initialized with HW supported values. - * We can rely safely on num_pri_bits even if VM has not - * restored ICC_CTLR_EL1 before restoring APnR registers. - */ - switch (vgic_v3_cpu->num_pri_bits) { - case 7: - vgic_v3_access_apr_reg(vcpu, p, apr, idx); - break; - case 6: - if (idx > 1) - goto err; - vgic_v3_access_apr_reg(vcpu, p, apr, idx); - break; - default: - if (idx > 0) - goto err; - vgic_v3_access_apr_reg(vcpu, p, apr, idx); - } + if (idx > vgic_v3_max_apr_idx(vcpu)) + goto err; + vgic_v3_access_apr_reg(vcpu, p, apr, idx); return true; err: if (!p->is_write) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 89993c4be1be..b64958b23a7f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr) (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); } else { - pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK); + pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); } pr_alert(" CM = %lu, WnR = %lu\n", @@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, - { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index b02a9268dfbf..783de51a6c4e 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -44,8 +44,12 @@ #define A64_COND_NE AARCH64_INSN_COND_NE /* != */ #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ +#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */ +#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */ #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ +#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */ +#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */ #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) /* Unconditional branch (immediate) */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index f32144b2e07f..ba38d403abb2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -527,10 +527,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: emit(A64_CMP(1, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); @@ -542,9 +546,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_JGT: jmp_cond = A64_COND_HI; break; + case BPF_JLT: + jmp_cond = A64_COND_CC; + break; case BPF_JGE: jmp_cond = A64_COND_CS; break; + case BPF_JLE: + jmp_cond = A64_COND_LS; + break; case BPF_JSET: case BPF_JNE: jmp_cond = A64_COND_NE; @@ -552,9 +562,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_JSGT: jmp_cond = A64_COND_GT; break; + case BPF_JSLT: + jmp_cond = A64_COND_LT; + break; case BPF_JSGE: jmp_cond = A64_COND_GE; break; + case BPF_JSLE: + jmp_cond = A64_COND_LE; + break; default: return -EFAULT; } @@ -566,10 +582,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; diff --git a/arch/blackfin/include/asm/bfin_twi.h b/arch/blackfin/include/asm/bfin_twi.h index aaa0834d34aa..211e9c78f6fb 100644 --- a/arch/blackfin/include/asm/bfin_twi.h +++ b/arch/blackfin/include/asm/bfin_twi.h @@ -1,7 +1,7 @@ /* * bfin_twi.h - interface to Blackfin TWIs * - * Copyright 2005-2010 Analog Devices Inc. + * Copyright 2005-2014 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -10,6 +10,138 @@ #define __ASM_BFIN_TWI_H__ #include +#include +#include + +/* + * ADI twi registers layout + */ +struct bfin_twi_regs { + u16 clkdiv; + u16 dummy1; + u16 control; + u16 dummy2; + u16 slave_ctl; + u16 dummy3; + u16 slave_stat; + u16 dummy4; + u16 slave_addr; + u16 dummy5; + u16 master_ctl; + u16 dummy6; + u16 master_stat; + u16 dummy7; + u16 master_addr; + u16 dummy8; + u16 int_stat; + u16 dummy9; + u16 int_mask; + u16 dummy10; + u16 fifo_ctl; + u16 dummy11; + u16 fifo_stat; + u16 dummy12; + u32 __pad[20]; + u16 xmt_data8; + u16 dummy13; + u16 xmt_data16; + u16 dummy14; + u16 rcv_data8; + u16 dummy15; + u16 rcv_data16; + u16 dummy16; +}; + +struct bfin_twi_iface { + int irq; + spinlock_t lock; + char read_write; + u8 command; + u8 *transPtr; + int readNum; + int writeNum; + int cur_mode; + int manual_stop; + int result; + struct i2c_adapter adap; + struct completion complete; + struct i2c_msg *pmsg; + int msg_num; + int cur_msg; + u16 saved_clkdiv; + u16 saved_control; + struct bfin_twi_regs __iomem *regs_base; +}; + +/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/ +/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */ +#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */ +#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */ + +/* TWI_PRESCALE Masks */ +#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */ +#define TWI_ENA 0x0080 /* TWI Enable */ +#define SCCB 0x0200 /* SCCB Compatibility Enable */ + +/* TWI_SLAVE_CTL Masks */ +#define SEN 0x0001 /* Slave Enable */ +#define SADD_LEN 0x0002 /* Slave Address Length */ +#define STDVAL 0x0004 /* Slave Transmit Data Valid */ +#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */ +#define GEN 0x0010 /* General Call Address Matching Enabled */ + +/* TWI_SLAVE_STAT Masks */ +#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */ +#define GCALL 0x0002 /* General Call Indicator */ + +/* TWI_MASTER_CTL Masks */ +#define MEN 0x0001 /* Master Mode Enable */ +#define MADD_LEN 0x0002 /* Master Address Length */ +#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */ +#define FAST 0x0008 /* Use Fast Mode Timing Specs */ +#define STOP 0x0010 /* Issue Stop Condition */ +#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */ +#define DCNT 0x3FC0 /* Data Bytes To Transfer */ +#define SDAOVR 0x4000 /* Serial Data Override */ +#define SCLOVR 0x8000 /* Serial Clock Override */ + +/* TWI_MASTER_STAT Masks */ +#define MPROG 0x0001 /* Master Transfer In Progress */ +#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */ +#define ANAK 0x0004 /* Address Not Acknowledged */ +#define DNAK 0x0008 /* Data Not Acknowledged */ +#define BUFRDERR 0x0010 /* Buffer Read Error */ +#define BUFWRERR 0x0020 /* Buffer Write Error */ +#define SDASEN 0x0040 /* Serial Data Sense */ +#define SCLSEN 0x0080 /* Serial Clock Sense */ +#define BUSBUSY 0x0100 /* Bus Busy Indicator */ + +/* TWI_INT_SRC and TWI_INT_ENABLE Masks */ +#define SINIT 0x0001 /* Slave Transfer Initiated */ +#define SCOMP 0x0002 /* Slave Transfer Complete */ +#define SERR 0x0004 /* Slave Transfer Error */ +#define SOVF 0x0008 /* Slave Overflow */ +#define MCOMP 0x0010 /* Master Transfer Complete */ +#define MERR 0x0020 /* Master Transfer Error */ +#define XMTSERV 0x0040 /* Transmit FIFO Service */ +#define RCVSERV 0x0080 /* Receive FIFO Service */ + +/* TWI_FIFO_CTRL Masks */ +#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */ +#define RCVFLUSH 0x0002 /* Receive Buffer Flush */ +#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */ +#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */ + +/* TWI_FIFO_STAT Masks */ +#define XMTSTAT 0x0003 /* Transmit FIFO Status */ +#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */ +#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */ +#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */ + +#define RCVSTAT 0x000C /* Receive FIFO Status */ +#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */ +#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */ +#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */ #define DEFINE_TWI_REG(reg_name, reg) \ static inline u16 read_##reg_name(struct bfin_twi_iface *iface) \ diff --git a/arch/blackfin/include/uapi/asm/siginfo.h b/arch/blackfin/include/uapi/asm/siginfo.h index c72f4e6e386f..79dfe3979123 100644 --- a/arch/blackfin/include/uapi/asm/siginfo.h +++ b/arch/blackfin/include/uapi/asm/siginfo.h @@ -14,28 +14,36 @@ #define si_uid16 _sifields._kill._uid -#define ILL_ILLPARAOP (__SI_FAULT|2) /* illegal opcode combine ********** */ -#define ILL_ILLEXCPT (__SI_FAULT|4) /* unrecoverable exception ********** */ -#define ILL_CPLB_VI (__SI_FAULT|9) /* D/I CPLB protect violation ******** */ -#define ILL_CPLB_MISS (__SI_FAULT|10) /* D/I CPLB miss ******** */ -#define ILL_CPLB_MULHIT (__SI_FAULT|11) /* D/I CPLB multiple hit ******** */ +#define ILL_ILLPARAOP 2 /* illegal opcode combine ********** */ +#define ILL_ILLEXCPT 4 /* unrecoverable exception ********** */ +#define ILL_CPLB_VI 9 /* D/I CPLB protect violation ******** */ +#define ILL_CPLB_MISS 10 /* D/I CPLB miss ******** */ +#define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit ******** */ +#undef NSIGILL +#define NSIGILL 11 /* * SIGBUS si_codes */ -#define BUS_OPFETCH (__SI_FAULT|4) /* error from instruction fetch ******** */ +#define BUS_OPFETCH 4 /* error from instruction fetch ******** */ +#undef NSIGBUS +#define NSIGBUS 4 /* * SIGTRAP si_codes */ -#define TRAP_STEP (__SI_FAULT|1) /* single-step breakpoint************* */ -#define TRAP_TRACEFLOW (__SI_FAULT|2) /* trace buffer overflow ************* */ -#define TRAP_WATCHPT (__SI_FAULT|3) /* watchpoint match ************* */ -#define TRAP_ILLTRAP (__SI_FAULT|4) /* illegal trap ************* */ +#define TRAP_STEP 1 /* single-step breakpoint************* */ +#define TRAP_TRACEFLOW 2 /* trace buffer overflow ************* */ +#define TRAP_WATCHPT 3 /* watchpoint match ************* */ +#define TRAP_ILLTRAP 4 /* illegal trap ************* */ +#undef NSIGTRAP +#define NSIGTRAP 4 /* * SIGSEGV si_codes */ -#define SEGV_STACKFLOW (__SI_FAULT|3) /* stack overflow */ +#define SEGV_STACKFLOW 3 /* stack overflow */ +#undef NSIGSEGV +#define NSIGSEGV 3 #endif /* _UAPI_BFIN_SIGINFO_H */ diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c index e272bca93c64..f31ace221392 100644 --- a/arch/blackfin/kernel/debug-mmrs.c +++ b/arch/blackfin/kernel/debug-mmrs.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c index e79b3b810c39..c4a8ffb15417 100644 --- a/arch/blackfin/mach-bf537/boards/dnp5370.c +++ b/arch/blackfin/mach-bf537/boards/dnp5370.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 7528148dc492..400e6693643e 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index 37f8f25a1347..696cc9d7820a 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index 7c87b5be53b5..8f7cce829f8e 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h @@ -92,9 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - /* * saved kernel SP and DP of a blocked thread. */ diff --git a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c index 3f646c787e58..925a98eb6d68 100644 --- a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c +++ b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c index a74540514bdb..53b56a429dde 100644 --- a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c +++ b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c index 394c2a73d5e2..5cc622c0225e 100644 --- a/arch/cris/arch-v32/drivers/pci/bios.c +++ b/arch/cris/arch-v32/drivers/pci/bios.c @@ -2,10 +2,6 @@ #include #include -void pcibios_fixup_bus(struct pci_bus *b) -{ -} - void pcibios_set_master(struct pci_dev *dev) { u8 lat; diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index eefd9a4ed156..1cce8243449e 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -17,6 +17,9 @@ config FRV select HAVE_DEBUG_STACKOVERFLOW select ARCH_NO_COHERENT_DMA_MMAP +config CPU_BIG_ENDIAN + def_bool y + config ZONE_DMA bool default y diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index e4d08d74ed9f..021cce78b401 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -92,10 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) extern asmlinkage void save_user_regs(struct user_context *target); extern asmlinkage void *restore_user_regs(const struct user_context *target, ...); -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) -#define forget_segments() do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) diff --git a/arch/frv/include/uapi/asm/siginfo.h b/arch/frv/include/uapi/asm/siginfo.h index d3fd1ca45653..f55d9e0e9068 100644 --- a/arch/frv/include/uapi/asm/siginfo.h +++ b/arch/frv/include/uapi/asm/siginfo.h @@ -4,7 +4,7 @@ #include #include -#define FPE_MDAOVF (__SI_FAULT|9) /* media overflow */ +#define FPE_MDAOVF 9 /* media overflow */ #undef NSIGFPE #define NSIGFPE 9 diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index f1e3b20dce9f..9abf02d6855a 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -102,5 +102,7 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 6e3d36f37a02..3089f7fe2abd 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -23,6 +23,9 @@ config H8300 select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS +config CPU_BIG_ENDIAN + def_bool y + config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug index de9d507ba0fd..4763887ba368 100644 --- a/arch/ia64/Kconfig.debug +++ b/arch/ia64/Kconfig.debug @@ -56,9 +56,4 @@ config IA64_DEBUG_IRQ and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. -config SYSVIPC_COMPAT - bool - depends on COMPAT && SYSVIPC - default y - endmenu diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 4694c64252d6..33389fc36f23 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -98,27 +98,30 @@ typedef struct siginfo { /* * SIGILL si_codes */ -#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */ -#define __ILL_BREAK (__SI_FAULT|10) /* illegal break */ -#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */ +#define ILL_BADIADDR 9 /* unimplemented instruction address */ +#define __ILL_BREAK 10 /* illegal break */ +#define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */ #undef NSIGILL #define NSIGILL 11 /* * SIGFPE si_codes */ -#define __FPE_DECOVF (__SI_FAULT|9) /* decimal overflow */ -#define __FPE_DECDIV (__SI_FAULT|10) /* decimal division by zero */ -#define __FPE_DECERR (__SI_FAULT|11) /* packed decimal error */ -#define __FPE_INVASC (__SI_FAULT|12) /* invalid ASCII digit */ -#define __FPE_INVDEC (__SI_FAULT|13) /* invalid decimal digit */ +#ifdef __KERNEL__ +#define FPE_FIXME 0 /* Broken dup of SI_USER */ +#endif /* __KERNEL__ */ +#define __FPE_DECOVF 9 /* decimal overflow */ +#define __FPE_DECDIV 10 /* decimal division by zero */ +#define __FPE_DECERR 11 /* packed decimal error */ +#define __FPE_INVASC 12 /* invalid ASCII digit */ +#define __FPE_INVDEC 13 /* invalid decimal digit */ #undef NSIGFPE #define NSIGFPE 13 /* * SIGSEGV si_codes */ -#define __SEGV_PSTKOVF (__SI_FAULT|4) /* paragraph stack overflow */ +#define __SEGV_PSTKOVF 4 /* paragraph stack overflow */ #undef NSIGSEGV #define NSIGSEGV 4 diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 5dd5c5d0d642..002eb85a6941 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -111,4 +111,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 7508c306aa9e..1d29b2f8726b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -159,12 +159,12 @@ int acpi_request_vector(u32 int_type) return vector; } -char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) +void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { - return __va(phys_addr); + return __va(phys); } -void __init __acpi_unmap_table(char *map, unsigned long size) +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { } diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 5db52c6813c4..6146d53b6ad7 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -124,31 +124,30 @@ copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from) */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: + err |= __put_user(from->si_code, &to->si_code); + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_FAULT: err |= __put_user(from->si_flags, &to->si_flags); err |= __put_user(from->si_isr, &to->si_isr); - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_addr, &to->si_addr); err |= __put_user(from->si_imm, &to->si_imm); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; - case __SI_RT >> 16: /* Not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_ptr, &to->si_ptr); break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); - default: + case SIL_KILL: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); break; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 7b1fe9462158..3cb17cf9b362 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -349,7 +349,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) } siginfo.si_signo = SIGFPE; siginfo.si_errno = 0; - siginfo.si_code = __SI_FAULT; /* default code */ + siginfo.si_code = FPE_FIXME; /* default code */ siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); if (isr & 0x11) { siginfo.si_code = FPE_FLTINV; @@ -373,7 +373,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) /* raise exception */ siginfo.si_signo = SIGFPE; siginfo.si_errno = 0; - siginfo.si_code = __SI_FAULT; /* default code */ + siginfo.si_code = FPE_FIXME; /* default code */ siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); if (isr & 0x880) { siginfo.si_code = FPE_FLTOVF; diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 4068bde623dc..f5ec736100ee 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -411,13 +411,6 @@ pcibios_disable_device (struct pci_dev *dev) acpi_pci_irq_disable(dev); } -resource_size_t -pcibios_align_resource (void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} - /** * ia64_pci_get_legacy_mem - generic legacy mem routine * @bus: bus to get legacy memory base address for diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 87cde1e4b38c..0777f3a8a1f3 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -194,6 +194,10 @@ config TIMER_DIVIDE int "Timer divider (integer)" default "128" +config CPU_BIG_ENDIAN + bool "Generate big endian code" + default n + config CPU_LITTLE_ENDIAN bool "Generate little endian code" default n diff --git a/arch/m32r/configs/m32104ut_defconfig b/arch/m32r/configs/m32104ut_defconfig index be30e094db71..4aa42acbd512 100644 --- a/arch/m32r/configs/m32104ut_defconfig +++ b/arch/m32r/configs/m32104ut_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -40,7 +39,6 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_ECN=m @@ -48,7 +46,6 @@ CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -106,7 +103,6 @@ CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83627HF=m -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y diff --git a/arch/m32r/configs/m32700ut.smp_defconfig b/arch/m32r/configs/m32700ut.smp_defconfig index a3d727ed6a16..41a0495b65df 100644 --- a/arch/m32r/configs/m32700ut.smp_defconfig +++ b/arch/m32r/configs/m32700ut.smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -30,7 +29,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=m @@ -63,7 +61,6 @@ CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_SERIAL_M32R_PLDSIO=y CONFIG_HW_RANDOM=y CONFIG_DS1302=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_S1D13XXX=y diff --git a/arch/m32r/configs/m32700ut.up_defconfig b/arch/m32r/configs/m32700ut.up_defconfig index b8334163099d..20078a866f45 100644 --- a/arch/m32r/configs/m32700ut.up_defconfig +++ b/arch/m32r/configs/m32700ut.up_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -29,7 +28,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=m @@ -62,7 +60,6 @@ CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_SERIAL_M32R_PLDSIO=y CONFIG_HW_RANDOM=y CONFIG_DS1302=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_S1D13XXX=y diff --git a/arch/m32r/configs/mappi.nommu_defconfig b/arch/m32r/configs/mappi.nommu_defconfig index 7c90ce2fc42b..4bf3820e054a 100644 --- a/arch/m32r/configs/mappi.nommu_defconfig +++ b/arch/m32r/configs/mappi.nommu_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_LOG_BUF_SHIFT=14 @@ -39,7 +38,6 @@ CONFIG_NETDEVICES=y # CONFIG_VT is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_NFS_FS=y diff --git a/arch/m32r/configs/mappi.smp_defconfig b/arch/m32r/configs/mappi.smp_defconfig index 367d07cebcd3..f9ed7bdbf4de 100644 --- a/arch/m32r/configs/mappi.smp_defconfig +++ b/arch/m32r/configs/mappi.smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -31,9 +30,7 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set # CONFIG_STANDALONE is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m @@ -50,7 +47,6 @@ CONFIG_NETDEVICES=y # CONFIG_VT is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_ISO9660_FS=y diff --git a/arch/m32r/configs/mappi.up_defconfig b/arch/m32r/configs/mappi.up_defconfig index cb11384386ce..289ae7421e12 100644 --- a/arch/m32r/configs/mappi.up_defconfig +++ b/arch/m32r/configs/mappi.up_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -29,9 +28,7 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set # CONFIG_STANDALONE is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m @@ -48,7 +45,6 @@ CONFIG_NETDEVICES=y # CONFIG_VT is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_ISO9660_FS=y diff --git a/arch/m32r/configs/mappi2.opsp_defconfig b/arch/m32r/configs/mappi2.opsp_defconfig index 3bff779259b4..2852f6e7e246 100644 --- a/arch/m32r/configs/mappi2.opsp_defconfig +++ b/arch/m32r/configs/mappi2.opsp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -50,7 +49,6 @@ CONFIG_SMC91X=y # CONFIG_SERIO_I8042 is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m # CONFIG_VGA_CONSOLE is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/m32r/configs/mappi2.vdec2_defconfig b/arch/m32r/configs/mappi2.vdec2_defconfig index 75246c9c1af8..8da4dbad8510 100644 --- a/arch/m32r/configs/mappi2.vdec2_defconfig +++ b/arch/m32r/configs/mappi2.vdec2_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -49,7 +48,6 @@ CONFIG_SMC91X=y # CONFIG_SERIO_I8042 is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m # CONFIG_VGA_CONSOLE is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/m32r/configs/mappi3.smp_defconfig b/arch/m32r/configs/mappi3.smp_defconfig index 27cefd41ac1f..5605b23e2faf 100644 --- a/arch/m32r/configs/mappi3.smp_defconfig +++ b/arch/m32r/configs/mappi3.smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -29,9 +28,7 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m @@ -50,7 +47,6 @@ CONFIG_SMC91X=y # CONFIG_VT is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_ISO9660_FS=y diff --git a/arch/m32r/configs/oaks32r_defconfig b/arch/m32r/configs/oaks32r_defconfig index 5087a510ca4f..5ccab127f6ad 100644 --- a/arch/m32r/configs/oaks32r_defconfig +++ b/arch/m32r/configs/oaks32r_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set @@ -37,7 +36,6 @@ CONFIG_NETDEVICES=y # CONFIG_VT is not set CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y diff --git a/arch/m32r/configs/opsput_defconfig b/arch/m32r/configs/opsput_defconfig index 50c6f525db20..3ce1d08355e5 100644 --- a/arch/m32r/configs/opsput_defconfig +++ b/arch/m32r/configs/opsput_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -46,7 +45,6 @@ CONFIG_SERIAL_M32R_SIO_CONSOLE=y CONFIG_SERIAL_M32R_PLDSIO=y CONFIG_HW_RANDOM=y CONFIG_DS1302=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_ISO9660_FS=m diff --git a/arch/m32r/configs/usrv_defconfig b/arch/m32r/configs/usrv_defconfig index a3cfaaedab60..cb8c051c3d46 100644 --- a/arch/m32r/configs/usrv_defconfig +++ b/arch/m32r/configs/usrv_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -34,9 +33,6 @@ CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -62,7 +58,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_M32R_SIO is not set # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 657874eeeccc..c70fa9ac7169 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h @@ -118,14 +118,6 @@ struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -extern void copy_segments(struct task_struct *p, struct mm_struct * mm); -extern void release_segments(struct mm_struct * mm); - -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.lr) #define KSTK_ESP(tsk) ((tsk)->thread.sp) diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index f8f7b47e247f..e268e51a38d1 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -102,4 +102,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 647dd94a0c39..72b96f282689 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -114,6 +114,15 @@ static void set_eit_vector_entries(void) _flush_cache_copyback_all(); } +void abort(void) +{ + BUG(); + + /* if that doesn't kill us, halt */ + panic("Oops failed to kill thread"); +} +EXPORT_SYMBOL(abort); + void __init trap_init(void) { set_eit_vector_entries(); diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 5abb548f0e70..353d90487c2b 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -24,6 +24,9 @@ config M68K select OLD_SIGSUSPEND3 select OLD_SIGACTION +config CPU_BIG_ENDIAN + def_bool y + config RWSEM_GENERIC_SPINLOCK bool default y diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c index 1e3c7e9193d1..856069a3196d 100644 --- a/arch/m68k/coldfire/clk.c +++ b/arch/m68k/coldfire/clk.c @@ -121,6 +121,9 @@ EXPORT_SYMBOL(clk_put); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } EXPORT_SYMBOL(clk_get_rate); diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c index dc589b039b62..04fd7fde9fb3 100644 --- a/arch/m68k/coldfire/m5441x.c +++ b/arch/m68k/coldfire/m5441x.c @@ -222,40 +222,3 @@ void __init config_BSP(char *commandp, int size) m5441x_uarts_init(); m5441x_fec_init(); } - - -#if IS_ENABLED(CONFIG_RTC_DRV_M5441x) -static struct resource m5441x_rtc_resources[] = { - { - .start = MCFRTC_BASE, - .end = MCFRTC_BASE + MCFRTC_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = MCF_IRQ_RTC, - .end = MCF_IRQ_RTC, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device m5441x_rtc = { - .name = "mcfrtc", - .id = 0, - .resource = m5441x_rtc_resources, - .num_resources = ARRAY_SIZE(m5441x_rtc_resources), -}; -#endif - -static struct platform_device *m5441x_devices[] __initdata = { -#if IS_ENABLED(CONFIG_RTC_DRV_M5441x) - &m5441x_rtc, -#endif -}; - -static int __init init_BSP(void) -{ - platform_add_devices(m5441x_devices, ARRAY_SIZE(m5441x_devices)); - return 0; -} - -arch_initcall(init_BSP); diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 6a640be48568..3097fa2ca746 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -243,6 +243,13 @@ static struct resource mcf_pci_io = { .flags = IORESOURCE_IO, }; +static struct resource busn_resource = { + .name = "PCI busn", + .start = 0, + .end = 255, + .flags = IORESOURCE_BUS, +}; + /* * Interrupt mapping and setting. */ @@ -258,6 +265,13 @@ static int mcf_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int __init mcf_pci_init(void) { + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return -ENOMEM; + pr_info("ColdFire: PCI bus initialization...\n"); /* Reset the external PCI bus */ @@ -312,14 +326,28 @@ static int __init mcf_pci_init(void) set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(200)); - rootbus = pci_scan_bus(0, &mcf_pci_ops, NULL); - if (!rootbus) - return -ENODEV; + + pci_add_resource(&bridge->windows, &ioport_resource); + pci_add_resource(&bridge->windows, &iomem_resource); + pci_add_resource(&bridge->windows, &busn_resource); + bridge->dev.parent = NULL; + bridge->sysdata = NULL; + bridge->busnr = 0; + bridge->ops = &mcf_pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = mcf_pci_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return ret; + } + + rootbus = bridge->bus; rootbus->resource[0] = &mcf_pci_io; rootbus->resource[1] = &mcf_pci_mem; - pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq); pci_bus_size_bridges(rootbus); pci_bus_assign_resources(rootbus); pci_bus_add_devices(rootbus); diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index 430d4d54c883..d8a02c7e72d3 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -32,7 +32,7 @@ typedef struct page *pgtable_t; #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) +#define __pmd(x) ((pmd_t) { { (x) }, }) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index fad3dc3cb210..ea573be2b6d0 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -9,7 +9,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) } /* - * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to + * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to * do any flushing here. */ static inline void diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index ec6a49076980..8ae92d6abfd2 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -131,9 +131,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - /* * Return saved PC of a blocked thread. */ diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h index e95f874ded1b..707c7f7b6bea 100644 --- a/arch/metag/include/asm/topology.h +++ b/arch/metag/include/asm/topology.h @@ -4,7 +4,6 @@ #ifdef CONFIG_NUMA #define cpu_to_node(cpu) ((void)(cpu), 0) -#define parent_node(node) ((void)(node), 0) #define cpumask_of_node(node) ((void)node, cpu_online_mask) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 4ed8ebf33509..4f798aa671dd 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -36,6 +36,22 @@ config MICROBLAZE select VIRT_TO_BUS select CPU_NO_EFFICIENT_FFS +# Endianness selection +choice + prompt "Endianness selection" + default CPU_LITTLE_ENDIAN + help + microblaze architectures can be configured for either little or + big endian formats. Be sure to select the appropriate mode. + +config CPU_BIG_ENDIAN + bool "Big endian" + +config CPU_LITTLE_ENDIAN + bool "Little endian" + +endchoice + config SWAP def_bool n diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 740f2b82a182..1f6c486826a0 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -35,6 +35,8 @@ endif CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_DIV) += -mno-xl-soft-div CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_BARREL) += -mxl-barrel-shift CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare +CPUFLAGS-$(CONFIG_BIG_ENDIAN) += -mbig-endian +CPUFLAGS-$(CONFIG_LITTLE_ENDIAN) += -mlittle-endian CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index efd4983cb697..114b93488193 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -81,9 +81,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file, #define HAVE_ARCH_PCI_RESOURCE_TO_USER -extern void pcibios_setup_bus_devices(struct pci_bus *bus); -extern void pcibios_setup_bus_self(struct pci_bus *bus); - /* This part of code was originally in xilinx-pci.h */ #ifdef CONFIG_PCI_XILINX extern void __init xilinx_pci_init(void); diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index e77a596f3f1e..06609ca36115 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += fcntl.h generic-y += ioctl.h generic-y += ioctls.h generic-y += ipcbuf.h +generic-y += kvm_para.h generic-y += mman.h generic-y += msgbuf.h generic-y += param.h diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index e45ada8fb006..94700c5270a9 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, unsigned long attrs) { #ifdef CONFIG_MMU - unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; unsigned long pfn; diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index ea2d83f1f4bb..7de941cbbd94 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -293,7 +293,7 @@ static int __init xilinx_timer_init(struct device_node *timer) return -EINVAL; } - pr_info("%s: irq=%d\n", timer->full_name, irq); + pr_info("%pOF: irq=%d\n", timer, irq); clk = of_clk_get(timer, 0); if (IS_ERR(clk)) { diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 404fb38d06b7..ae79e8638d50 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -508,8 +508,8 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct of_pci_range range; struct of_pci_range_parser parser; - pr_info("PCI host bridge %s %s ranges:\n", - dev->full_name, primary ? "(primary)" : ""); + pr_info("PCI host bridge %pOF %s ranges:\n", + dev, primary ? "(primary)" : ""); /* Check for ranges property */ if (of_pci_range_parser_init(&parser, dev)) @@ -678,144 +678,6 @@ static void pcibios_fixup_resources(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); -/* This function tries to figure out if a bridge resource has been initialized - * by the firmware or not. It doesn't have to be absolutely bullet proof, but - * things go more smoothly when it gets it right. It should covers cases such - * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges - */ -static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, - struct resource *res) -{ - struct pci_controller *hose = pci_bus_to_host(bus); - struct pci_dev *dev = bus->self; - resource_size_t offset; - u16 command; - int i; - - /* Job is a bit different between memory and IO */ - if (res->flags & IORESOURCE_MEM) { - /* If the BAR is non-0 (res != pci_mem_offset) then it's - * probably been initialized by somebody - */ - if (res->start != hose->pci_mem_offset) - return 0; - - /* The BAR is 0, let's check if memory decoding is enabled on - * the bridge. If not, we consider it unassigned - */ - pci_read_config_word(dev, PCI_COMMAND, &command); - if ((command & PCI_COMMAND_MEMORY) == 0) - return 1; - - /* Memory decoding is enabled and the BAR is 0. If any of - * the bridge resources covers that starting address (0 then - * it's good enough for us for memory - */ - for (i = 0; i < 3; i++) { - if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && - hose->mem_resources[i].start == hose->pci_mem_offset) - return 0; - } - - /* Well, it starts at 0 and we know it will collide so we may as - * well consider it as unassigned. That covers the Apple case. - */ - return 1; - } else { - /* If the BAR is non-0, then we consider it assigned */ - offset = (unsigned long)hose->io_base_virt - _IO_BASE; - if (((res->start - offset) & 0xfffffffful) != 0) - return 0; - - /* Here, we are a bit different than memory as typically IO - * space starting at low addresses -is- valid. What we do - * instead if that we consider as unassigned anything that - * doesn't have IO enabled in the PCI command register, - * and that's it. - */ - pci_read_config_word(dev, PCI_COMMAND, &command); - if (command & PCI_COMMAND_IO) - return 0; - - /* It's starting at 0 and IO is disabled in the bridge, consider - * it unassigned - */ - return 1; - } -} - -/* Fixup resources of a PCI<->PCI bridge */ -static void pcibios_fixup_bridge(struct pci_bus *bus) -{ - struct resource *res; - int i; - - struct pci_dev *dev = bus->self; - - pci_bus_for_each_resource(bus, res, i) { - if (!res) - continue; - if (!res->flags) - continue; - if (i >= 3 && bus->self->transparent) - continue; - - pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", - pci_name(dev), i, - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned int)res->flags); - - /* Try to detect uninitialized P2P bridge resources, - * and clear them out so they get re-assigned later - */ - if (pcibios_uninitialized_bridge_resource(bus, res)) { - res->flags = 0; - pr_debug("PCI:%s (unassigned)\n", - pci_name(dev)); - } else { - pr_debug("PCI:%s %016llx-%016llx\n", - pci_name(dev), - (unsigned long long)res->start, - (unsigned long long)res->end); - } - } -} - -void pcibios_setup_bus_self(struct pci_bus *bus) -{ - /* Fix up the bus resources for P2P bridges */ - if (bus->self != NULL) - pcibios_fixup_bridge(bus); -} - -void pcibios_setup_bus_devices(struct pci_bus *bus) -{ - struct pci_dev *dev; - - pr_debug("PCI: Fixup bus devices %d (%s)\n", - bus->number, bus->self ? pci_name(bus->self) : "PHB"); - - list_for_each_entry(dev, &bus->devices, bus_list) { - /* Setup OF node pointer in archdata */ - dev->dev.of_node = pci_device_to_OF_node(dev); - - /* Fixup NUMA node as it may not be setup yet by the generic - * code and is needed by the DMA init - */ - set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); - - /* Read default IRQs and fixup if necessary */ - dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); - } -} - -void pcibios_fixup_bus(struct pci_bus *bus) -{ - /* nothing to do */ -} -EXPORT_SYMBOL(pcibios_fixup_bus); - /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the @@ -829,13 +691,6 @@ EXPORT_SYMBOL(pcibios_fixup_bus); * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} -EXPORT_SYMBOL(pcibios_align_resource); - int pcibios_add_device(struct pci_dev *dev) { dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); @@ -1219,8 +1074,8 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, if (!res->flags) { pr_warn("PCI: I/O resource not set for host "); - pr_cont("bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + pr_cont("bridge %pOF (domain %d)\n", + hose->dn, hose->global_number); /* Workaround for lack of IO resource only on 32-bit */ res->start = (unsigned long)hose->io_base_virt - isa_io_base; res->end = res->start + IO_SPACE_LIMIT; @@ -1241,8 +1096,8 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, if (i > 0) continue; pr_err("PCI: Memory resource 0 not set for "); - pr_cont("host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + pr_cont("host bridge %pOF (domain %d)\n", + hose->dn, hose->global_number); /* Workaround for lack of MEM resource only on 32-bit */ res->start = hose->pci_mem_offset; @@ -1270,7 +1125,7 @@ static void pcibios_scan_phb(struct pci_controller *hose) struct pci_bus *bus; struct device_node *node = hose->dn; - pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); + pr_debug("PCI: Scanning PHB %pOF\n", node); pcibios_setup_phb_resources(hose, &resources); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 48d91d5be4e9..cb7fcc4216fd 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1627,14 +1627,6 @@ config CPU_R5500 NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV instruction set. -config CPU_R6000 - bool "R6000" - depends on SYS_HAS_CPU_R6000 - select CPU_SUPPORTS_32BIT_KERNEL - help - MIPS Technologies R6000 and R6000A series processors. Note these - processors are extremely rare and the support for them is incomplete. - config CPU_NEVADA bool "RM52xx" depends on SYS_HAS_CPU_NEVADA @@ -1950,9 +1942,6 @@ config SYS_HAS_CPU_R5432 config SYS_HAS_CPU_R5500 bool -config SYS_HAS_CPU_R6000 - bool - config SYS_HAS_CPU_NEVADA bool @@ -2180,7 +2169,7 @@ config PAGE_SIZE_32KB config PAGE_SIZE_64KB bool "64kB" - depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000 + depends on !CPU_R3000 && !CPU_TX39XX help Using 64kB page size will result in higher performance kernel at the price of higher memory consumption. This option is available on @@ -2248,11 +2237,11 @@ config CPU_HAS_PREFETCH config CPU_GENERIC_DUMP_TLB bool - default y if !(CPU_R3000 || CPU_R6000 || CPU_R8000 || CPU_TX39XX) + default y if !(CPU_R3000 || CPU_R8000 || CPU_TX39XX) config CPU_R4K_FPU bool - default y if !(CPU_R3000 || CPU_R6000 || CPU_TX39XX || CPU_CAVIUM_OCTEON) + default y if !(CPU_R3000 || CPU_TX39XX) config CPU_R4K_CACHE_TLB bool @@ -2260,6 +2249,7 @@ config CPU_R4K_CACHE_TLB config MIPS_MT_SMP bool "MIPS MT SMP support (1 TC on each available VPE)" + default y depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI @@ -2376,7 +2366,6 @@ config MIPS_CPS bool "MIPS Coherent Processing System support" depends on SYS_SUPPORTS_MIPS_CPS select MIPS_CM - select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU select SMP select SYNC_R4K if (CEVT_R4K || CSRC_R4K) @@ -2393,11 +2382,11 @@ config MIPS_CPS config MIPS_CPS_PM depends on MIPS_CPS - select MIPS_CPC bool config MIPS_CM bool + select MIPS_CPC config MIPS_CPC bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index bc2708c9ada4..a96d97a806c9 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -151,7 +151,6 @@ cflags-y += -fno-stack-check # cflags-$(CONFIG_CPU_R3000) += -march=r3000 cflags-$(CONFIG_CPU_TX39XX) += -march=r3900 -cflags-$(CONFIG_CPU_R6000) += -march=r6000 -Wa,--trap cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap @@ -291,7 +290,8 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \ - PLATFORM="$(platform-y)" + PLATFORM="$(platform-y)" \ + ITS_INPUTS="$(its-y)" ifdef CONFIG_32BIT bootvars-y += ADDR_BITS=32 endif @@ -299,6 +299,10 @@ ifdef CONFIG_64BIT bootvars-y += ADDR_BITS=64 endif +# This is required to get dwarf unwinding tables into .debug_frame +# instead of .eh_frame so we don't discard them. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS @@ -500,8 +504,14 @@ $(eval $(call gen_generic_defconfigs,micro32,r2,eb el)) .PHONY: $(generic_defconfigs) $(generic_defconfigs): $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \ - -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \ - $(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config) + -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ | \ + grep -Ev '^#' + $(Q)cp $(KCONFIG_CONFIG) $(objtree)/.config.$@ + $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig \ + KCONFIG_CONFIG=$(objtree)/.config.$@ >/dev/null + $(Q)$(CONFIG_SHELL) $(srctree)/arch/$(ARCH)/tools/generic-board-config.sh \ + $(srctree) $(objtree) $(objtree)/.config.$@ $(KCONFIG_CONFIG) \ + "$(origin BOARDS)" $(BOARDS) $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig # @@ -509,6 +519,19 @@ $(generic_defconfigs): # $(generic_config_dir)/%.config: ; +# +# Prevent direct use of generic_defconfig, which is intended to be used as the +# basis of the various ISA-specific targets generated above. +# +.PHONY: generic_defconfig +generic_defconfig: + $(Q)echo "generic_defconfig is not intended for direct use, but should instead be" + $(Q)echo "used via an ISA-specific target from the following list:" + $(Q)echo + $(Q)for cfg in $(generic_defconfigs); do echo " $${cfg}"; done + $(Q)echo + $(Q)false + # # Legacy defconfig compatibility - these targets used to be real defconfigs but # now that the boards have been converted to use the generic kernel they are diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 992442a03d8b..da7663770425 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include @@ -344,28 +344,32 @@ static struct platform_device db1200_ide_dev = { /* SD carddetects: they're supposed to be edge-triggered, but ack * doesn't seem to work (CPLD Rev 2). Instead, the screaming one - * is disabled and its counterpart enabled. The 500ms timeout is - * because the carddetect isn't debounced in hardware. + * is disabled and its counterpart enabled. The 200ms timeout is + * because the carddetect usually triggers twice, after debounce. */ static irqreturn_t db1200_mmc_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - if (irq == DB1200_SD0_INSERT_INT) { - disable_irq_nosync(DB1200_SD0_INSERT_INT); - enable_irq(DB1200_SD0_EJECT_INT); - } else { - disable_irq_nosync(DB1200_SD0_EJECT_INT); - enable_irq(DB1200_SD0_INSERT_INT); - } +static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); } + msleep(100); /* debounce */ + if (irq == DB1200_SD0_INSERT_INT) + enable_irq(DB1200_SD0_EJECT_INT); + else + enable_irq(DB1200_SD0_INSERT_INT); + return IRQ_HANDLED; } @@ -374,13 +378,13 @@ static int db1200_mmc_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd, - 0, "sd_insert", mmc_host); + ret = request_threaded_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd, + db1200_mmc_cdfn, 0, "sd_insert", mmc_host); if (ret) goto out; - ret = request_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd, - 0, "sd_eject", mmc_host); + ret = request_threaded_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd, + db1200_mmc_cdfn, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1200_SD0_INSERT_INT, mmc_host); goto out; @@ -436,23 +440,27 @@ static struct led_classdev db1200_mmc_led = { static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - if (irq == PB1200_SD1_INSERT_INT) { - disable_irq_nosync(PB1200_SD1_INSERT_INT); - enable_irq(PB1200_SD1_EJECT_INT); - } else { - disable_irq_nosync(PB1200_SD1_EJECT_INT); - enable_irq(PB1200_SD1_INSERT_INT); - } +static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); } + msleep(100); /* debounce */ + if (irq == PB1200_SD1_INSERT_INT) + enable_irq(PB1200_SD1_EJECT_INT); + else + enable_irq(PB1200_SD1_INSERT_INT); + return IRQ_HANDLED; } @@ -461,13 +469,13 @@ static int pb1200_mmc1_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd, 0, - "sd1_insert", mmc_host); + ret = request_threaded_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd, + pb1200_mmc1_cdfn, 0, "sd1_insert", mmc_host); if (ret) goto out; - ret = request_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd, 0, - "sd1_eject", mmc_host); + ret = request_threaded_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd, + pb1200_mmc1_cdfn, 0, "sd1_eject", mmc_host); if (ret) { free_irq(PB1200_SD1_INSERT_INT, mmc_host); goto out; diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index a5504f57cb00..cd1ae29f95a3 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -450,24 +450,27 @@ static struct platform_device db1300_ide_dev = { static irqreturn_t db1300_mmc_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - /* disable the one currently screaming. No other way to shut it up */ - if (irq == DB1300_SD1_INSERT_INT) { - disable_irq_nosync(DB1300_SD1_INSERT_INT); - enable_irq(DB1300_SD1_EJECT_INT); - } else { - disable_irq_nosync(DB1300_SD1_EJECT_INT); - enable_irq(DB1300_SD1_INSERT_INT); - } +static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m. We can only be called once MMC core has * initialized the controller, so symbol_get() should always succeed. */ mmc_cd = symbol_get(mmc_detect_change); - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); + msleep(100); /* debounce */ + if (irq == DB1300_SD1_INSERT_INT) + enable_irq(DB1300_SD1_EJECT_INT); + else + enable_irq(DB1300_SD1_INSERT_INT); + return IRQ_HANDLED; } @@ -487,13 +490,13 @@ static int db1300_mmc_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, 0, - "sd_insert", mmc_host); + ret = request_threaded_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, + db1300_mmc_cdfn, 0, "sd_insert", mmc_host); if (ret) goto out; - ret = request_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, 0, - "sd_eject", mmc_host); + ret = request_threaded_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, + db1300_mmc_cdfn, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1300_SD1_INSERT_INT, mmc_host); goto out; diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index 1c01d6eadb08..421bd5793f7e 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/alchemy/devboards/db1xxx.c b/arch/mips/alchemy/devboards/db1xxx.c index 2d47f951121a..c9ad28995cd2 100644 --- a/arch/mips/alchemy/devboards/db1xxx.c +++ b/arch/mips/alchemy/devboards/db1xxx.c @@ -2,6 +2,7 @@ * Alchemy DB/PB1xxx board support. */ +#include #include #include @@ -97,6 +98,7 @@ arch_initcall(db1xxx_arch_init); static int __init db1xxx_dev_init(void) { + mips_set_machine_name(board_type_str()); switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { case BCSR_WHOAMI_DB1000: case BCSR_WHOAMI_DB1500: diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c index dda422a0f36c..0137656107a9 100644 --- a/arch/mips/ar7/clock.c +++ b/arch/mips/ar7/clock.c @@ -430,6 +430,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } EXPORT_SYMBOL(clk_get_rate); diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index fa845953f736..6b1000b6a6a6 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -487,17 +487,16 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) { struct clk *ref_clk; void __iomem *pll_base; - const char *dnfn = of_node_full_name(np); ref_clk = of_clk_get(np, 0); if (IS_ERR(ref_clk)) { - pr_err("%s: of_clk_get failed\n", dnfn); + pr_err("%pOF: of_clk_get failed\n", np); goto err; } pll_base = of_iomap(np, 0); if (!pll_base) { - pr_err("%s: can't map pll registers\n", dnfn); + pr_err("%pOF: can't map pll registers\n", np); goto err_clk; } @@ -506,12 +505,12 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) else if (of_device_is_compatible(np, "qca,ar9330-pll")) ar9330_clk_init(ref_clk, pll_base); else { - pr_err("%s: could not find any appropriate clk_init()\n", dnfn); + pr_err("%pOF: could not find any appropriate clk_init()\n", np); goto err_iounmap; } if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) { - pr_err("%s: could not register clk provider\n", dnfn); + pr_err("%pOF: could not register clk provider\n", np); goto err_iounmap; } diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c index 730c0b03060d..b816cb4a25ff 100644 --- a/arch/mips/ath79/pci.c +++ b/arch/mips/ath79/pci.c @@ -22,10 +22,10 @@ #include "pci.h" static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); -static const struct ath79_pci_irq *ath79_pci_irq_map __initdata; -static unsigned ath79_pci_nr_irqs __initdata; +static const struct ath79_pci_irq *ath79_pci_irq_map; +static unsigned ath79_pci_nr_irqs; -static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq ar71xx_pci_irq_map[] = { { .slot = 17, .pin = 1, @@ -41,7 +41,7 @@ static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { } }; -static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq ar724x_pci_irq_map[] = { { .slot = 0, .pin = 1, @@ -49,7 +49,7 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { } }; -static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq qca955x_pci_irq_map[] = { { .bus = 0, .slot = 0, @@ -64,7 +64,7 @@ static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { }, }; -int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) +int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) { int irq = -1; int i; diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 73626040e4d6..19577f771c1f 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -339,6 +339,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 145b5ce8eb7e..1bd5c4f00d19 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -118,6 +118,12 @@ ifeq ($(ADDR_BITS),64) itb_addr_cells = 2 endif +quiet_cmd_its_cat = CAT $@ + cmd_its_cat = cat $^ >$@ + +$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) + $(call if_changed,its_cat) + quiet_cmd_cpp_its_S = ITS $@ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ @@ -128,19 +134,19 @@ quiet_cmd_cpp_its_S = ITS $@ -DADDR_BITS=$(ADDR_BITS) \ -DADDR_CELLS=$(itb_addr_cells) -$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) -$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) -$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) -$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) -$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) quiet_cmd_itb-image = ITB $@ diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile index b9db49203e0c..cbac26ce063e 100644 --- a/arch/mips/boot/dts/Makefile +++ b/arch/mips/boot/dts/Makefile @@ -5,6 +5,7 @@ dts-dirs += ingenic dts-dirs += lantiq dts-dirs += mti dts-dirs += netlogic +dts-dirs += ni dts-dirs += pic32 dts-dirs += qca dts-dirs += ralink diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index fd138d9978c1..6c381844929c 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -1,6 +1,7 @@ /dts-v1/; #include "jz4780.dtsi" +#include / { compatible = "img,ci20", "ingenic,jz4780"; @@ -21,6 +22,13 @@ memory { reg = <0x0 0x10000000 0x30000000 0x30000000>; }; + + eth0_power: fixedregulator@0 { + compatible = "regulator-fixed"; + regulator-name = "eth0_power"; + gpio = <&gpb 25 GPIO_ACTIVE_LOW>; + enable-active-high; + }; }; &ext { @@ -123,6 +131,29 @@ partition@0x8c00000 { }; }; }; + + dm9000@6 { + compatible = "davicom,dm9000"; + davicom,no-eeprom; + + pinctrl-names = "default"; + pinctrl-0 = <&pins_nemc_cs6>; + + reg = <6 0 1 /* addr */ + 6 2 1>; /* data */ + + ingenic,nemc-tAS = <15>; + ingenic,nemc-tAH = <10>; + ingenic,nemc-tBP = <20>; + ingenic,nemc-tAW = <50>; + ingenic,nemc-tSTRV = <100>; + + reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>; + vcc-supply = <ð0_power>; + + interrupt-parent = <&gpe>; + interrupts = <19 4>; + }; }; &bch { @@ -165,4 +196,10 @@ pins_nemc_cs1: nemc-cs1 { groups = "nemc-cs1"; bias-disable; }; + + pins_nemc_cs6: nemc-cs6 { + function = "nemc-cs6"; + groups = "nemc-cs6"; + bias-disable; + }; }; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index 4853ef67b3ab..e906134ecaef 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -44,6 +44,17 @@ cgu: jz4780-cgu@10000000 { #clock-cells = <1>; }; + rtc_dev: rtc@10003000 { + compatible = "ingenic,jz4780-rtc"; + reg = <0x10003000 0x4c>; + + interrupt-parent = <&intc>; + interrupts = <32>; + + clocks = <&cgu JZ4780_CLK_RTCLK>; + clock-names = "rtc"; + }; + pinctrl: pin-controller@10010000 { compatible = "ingenic,jz4780-pinctrl"; reg = <0x10010000 0x600>; diff --git a/arch/mips/boot/dts/ni/169445.dts b/arch/mips/boot/dts/ni/169445.dts new file mode 100644 index 000000000000..5389ef46c480 --- /dev/null +++ b/arch/mips/boot/dts/ni/169445.dts @@ -0,0 +1,100 @@ +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ni,169445"; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + device_type = "cpu"; + compatible = "mti,mips14KEc"; + clocks = <&baseclk>; + reg = <0>; + }; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x10000000>; + }; + + baseclk: baseclock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + cpu_intc: interrupt-controller { + #address-cells = <0>; + compatible = "mti,cpu-interrupt-controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + ahb@1f300000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x1f300000 0x80FFF>; + + gpio1: gpio@10 { + compatible = "ni,169445-nand-gpio"; + reg = <0x10 0x4>; + reg-names = "dat"; + gpio-controller; + #gpio-cells = <2>; + }; + + gpio2: gpio@14 { + compatible = "ni,169445-nand-gpio"; + reg = <0x14 0x4>; + reg-names = "dat"; + gpio-controller; + #gpio-cells = <2>; + no-output; + }; + + nand@0 { + compatible = "gpio-control-nand"; + nand-on-flash-bbt; + nand-ecc-mode = "soft_bch"; + nand-ecc-step-size = <512>; + nand-ecc-strength = <4>; + reg = <0x0 4>; + gpios = <&gpio2 0 0>, /* rdy */ + <&gpio1 1 0>, /* nce */ + <&gpio1 2 0>, /* ale */ + <&gpio1 3 0>, /* cle */ + <&gpio1 4 0>; /* nwp */ + }; + + serial@80000 { + compatible = "ns16550a"; + reg = <0x80000 0x1000>; + interrupt-parent = <&cpu_intc>; + interrupts = <6>; + clocks = <&baseclk>; + reg-shift = <0>; + }; + + ethernet@40000 { + compatible = "snps,dwmac-4.10a"; + interrupt-parent = <&cpu_intc>; + interrupts = <5>; + interrupt-names = "macirq"; + reg = <0x40000 0x2000>; + clock-names = "stmmaceth", "pclk"; + clocks = <&baseclk>, <&baseclk>; + + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; +}; diff --git a/arch/mips/boot/dts/ni/Makefile b/arch/mips/boot/dts/ni/Makefile new file mode 100644 index 000000000000..66cfdffc51c2 --- /dev/null +++ b/arch/mips/boot/dts/ni/Makefile @@ -0,0 +1,7 @@ +dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb + +# Force kbuild to make empty built-in.o if necessary +obj- += dummy.o + +always := $(dtb-y) +clean-files := *.dtb *.dtb.S diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile index 2a7225954bf6..55e2937b61f3 100644 --- a/arch/mips/boot/dts/ralink/Makefile +++ b/arch/mips/boot/dts/ralink/Makefile @@ -2,6 +2,8 @@ dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb +dtb-$(CONFIG_DTB_OMEGA2P) += omega2p.dtb +dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi new file mode 100644 index 000000000000..9ff7e8faaecc --- /dev/null +++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi @@ -0,0 +1,126 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,mt7628a-soc"; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "mti,mips24KEc"; + device_type = "cpu"; + reg = <0>; + }; + }; + + resetc: reset-controller { + compatible = "ralink,rt2880-reset"; + #reset-cells = <1>; + }; + + cpuintc: interrupt-controller { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc: system-controller@0 { + compatible = "ralink,mt7620a-sysc", "syscon"; + reg = <0x0 0x100>; + }; + + intc: interrupt-controller@200 { + compatible = "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + resets = <&resetc 9>; + reset-names = "intc"; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + + ralink,intc-registers = <0x9c 0xa0 + 0x6c 0xa4 + 0x80 0x78>; + }; + + memory-controller@300 { + compatible = "ralink,mt7620a-memc"; + reg = <0x300 0x100>; + }; + + uart0: uartlite@c00 { + compatible = "ns16550a"; + reg = <0xc00 0x100>; + + resets = <&resetc 12>; + reset-names = "uart0"; + + interrupt-parent = <&intc>; + interrupts = <20>; + + reg-shift = <2>; + }; + + uart1: uart1@d00 { + compatible = "ns16550a"; + reg = <0xd00 0x100>; + + resets = <&resetc 19>; + reset-names = "uart1"; + + interrupt-parent = <&intc>; + interrupts = <21>; + + reg-shift = <2>; + }; + + uart2: uart2@e00 { + compatible = "ns16550a"; + reg = <0xe00 0x100>; + + resets = <&resetc 20>; + reset-names = "uart2"; + + interrupt-parent = <&intc>; + interrupts = <22>; + + reg-shift = <2>; + }; + }; + + usb_phy: usb-phy@10120000 { + compatible = "mediatek,mt7628-usbphy"; + reg = <0x10120000 0x1000>; + + #phy-cells = <0>; + + ralink,sysctl = <&sysc>; + resets = <&resetc 22 &resetc 25>; + reset-names = "host", "device"; + }; + + ehci@101c0000 { + compatible = "generic-ehci"; + reg = <0x101c0000 0x1000>; + + phys = <&usb_phy>; + phy-names = "usb"; + + interrupt-parent = <&intc>; + interrupts = <18>; + }; +}; diff --git a/arch/mips/boot/dts/ralink/omega2p.dts b/arch/mips/boot/dts/ralink/omega2p.dts new file mode 100644 index 000000000000..5884fd48f59a --- /dev/null +++ b/arch/mips/boot/dts/ralink/omega2p.dts @@ -0,0 +1,18 @@ +/dts-v1/; + +/include/ "mt7628a.dtsi" + +/ { + compatible = "onion,omega2+", "ralink,mt7688a-soc", "ralink,mt7628a-soc"; + model = "Onion Omega2+"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200"; + stdout-path = &uart0; + }; +}; diff --git a/arch/mips/boot/dts/ralink/vocore2.dts b/arch/mips/boot/dts/ralink/vocore2.dts new file mode 100644 index 000000000000..fa8a5f8f236a --- /dev/null +++ b/arch/mips/boot/dts/ralink/vocore2.dts @@ -0,0 +1,18 @@ +/dts-v1/; + +#include "mt7628a.dtsi" + +/ { + compatible = "vocore,vocore2", "ralink,mt7628a-soc"; + model = "VoCore2"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS2,115200"; + stdout-path = &uart2; + }; +}; diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index b6d6e841a984..50b427879465 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile @@ -16,4 +16,4 @@ obj-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \ cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \ cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o -obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o +obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o cvmx-boot-vector.o diff --git a/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c new file mode 100644 index 000000000000..b7019d21808e --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c @@ -0,0 +1,167 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2017 Cavium, Inc. + */ + + +/* + We install this program at the bootvector: +------------------------------------ + .set noreorder + .set nomacro + .set noat +reset_vector: + dmtc0 $k0, $31, 0 # Save $k0 to DESAVE + dmtc0 $k1, $31, 3 # Save $k1 to KScratch2 + + mfc0 $k0, $12, 0 # Status + mfc0 $k1, $15, 1 # Ebase + + ori $k0, 0x84 # Enable 64-bit addressing, set + # ERL (should already be set) + andi $k1, 0x3ff # mask out core ID + + mtc0 $k0, $12, 0 # Status + sll $k1, 5 + + lui $k0, 0xbfc0 + cache 17, 0($0) # Core-14345, clear L1 Dcache virtual + # tags if the core hit an NMI + + ld $k0, 0x78($k0) # k0 <- (bfc00078) pointer to the reset vector + synci 0($0) # Invalidate ICache to get coherent + # view of target code. + + daddu $k0, $k0, $k1 + nop + + ld $k0, 0($k0) # k0 <- core specific target address + dmfc0 $k1, $31, 3 # Restore $k1 from KScratch2 + + beqz $k0, wait_loop # Spin in wait loop + nop + + jr $k0 + nop + + nop # NOPs needed here to fill delay slots + nop # on endian reversal of previous instructions + +wait_loop: + wait + nop + + b wait_loop + nop + + nop + nop +------------------------------------ + +0000000000000000 : + 0: 40baf800 dmtc0 k0,c0_desave + 4: 40bbf803 dmtc0 k1,c0_kscratch2 + + 8: 401a6000 mfc0 k0,c0_status + c: 401b7801 mfc0 k1,c0_ebase + + 10: 375a0084 ori k0,k0,0x84 + 14: 337b03ff andi k1,k1,0x3ff + + 18: 409a6000 mtc0 k0,c0_status + 1c: 001bd940 sll k1,k1,0x5 + + 20: 3c1abfc0 lui k0,0xbfc0 + 24: bc110000 cache 0x11,0(zero) + + 28: df5a0078 ld k0,120(k0) + 2c: 041f0000 synci 0(zero) + + 30: 035bd02d daddu k0,k0,k1 + 34: 00000000 nop + + 38: df5a0000 ld k0,0(k0) + 3c: 403bf803 dmfc0 k1,c0_kscratch2 + + 40: 13400005 beqz k0,58 + 44: 00000000 nop + + 48: 03400008 jr k0 + 4c: 00000000 nop + + 50: 00000000 nop + 54: 00000000 nop + +0000000000000058 : + 58: 42000020 wait + 5c: 00000000 nop + + 60: 1000fffd b 58 + 64: 00000000 nop + + 68: 00000000 nop + 6c: 00000000 nop + + */ + +#include + +static unsigned long long _cvmx_bootvector_data[16] = { + 0x40baf80040bbf803ull, /* patch low order 8-bits if no KScratch*/ + 0x401a6000401b7801ull, + 0x375a0084337b03ffull, + 0x409a6000001bd940ull, + 0x3c1abfc0bc110000ull, + 0xdf5a0078041f0000ull, + 0x035bd02d00000000ull, + 0xdf5a0000403bf803ull, /* patch low order 8-bits if no KScratch*/ + 0x1340000500000000ull, + 0x0340000800000000ull, + 0x0000000000000000ull, + 0x4200002000000000ull, + 0x1000fffd00000000ull, + 0x0000000000000000ull, + OCTEON_BOOT_MOVEABLE_MAGIC1, + 0 /* To be filled in with address of vector block*/ +}; + +/* 2^10 CPUs */ +#define VECTOR_TABLE_SIZE (1024 * sizeof(struct cvmx_boot_vector_element)) + +static void cvmx_boot_vector_init(void *mem) +{ + uint64_t kseg0_mem; + int i; + + memset(mem, 0, VECTOR_TABLE_SIZE); + kseg0_mem = cvmx_ptr_to_phys(mem) | 0x8000000000000000ull; + + for (i = 0; i < 15; i++) { + uint64_t v = _cvmx_bootvector_data[i]; + + if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7)) + v &= 0xffffffff00000000ull; /* KScratch not availble. */ + cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v); + } + cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, 15 * 8); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, kseg0_mem); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000); +} + +/** + * Get a pointer to the per-core table of reset vector pointers + * + */ +struct cvmx_boot_vector_element *cvmx_boot_vector_get(void) +{ + struct cvmx_boot_vector_element *ret; + + ret = cvmx_bootmem_alloc_named_range_once(VECTOR_TABLE_SIZE, 0, + (1ull << 32) - 1, 8, "__boot_vector1__", cvmx_boot_vector_init); + return ret; +} +EXPORT_SYMBOL(cvmx_boot_vector_get); diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 8d54d774933c..94d97ebfa036 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -44,6 +44,55 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc; /* See header file for descriptions of functions */ +/** + * This macro returns the size of a member of a structure. + * Logically it is the same as "sizeof(s::field)" in C++, but + * C lacks the "::" operator. + */ +#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field) + +/** + * This macro returns a member of the + * cvmx_bootmem_named_block_desc_t structure. These members can't + * be directly addressed as they might be in memory not directly + * reachable. In the case where bootmem is compiled with + * LINUX_HOST, the structure itself might be located on a remote + * Octeon. The argument "field" is the member name of the + * cvmx_bootmem_named_block_desc_t to read. Regardless of the type + * of the field, the return type is always a uint64_t. The "addr" + * parameter is the physical address of the structure. + */ +#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \ + __cvmx_bootmem_desc_get(addr, \ + offsetof(struct cvmx_bootmem_named_block_desc, field), \ + SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field)) + +/** + * This function is the implementation of the get macros defined + * for individual structure members. The argument are generated + * by the macros inorder to read only the needed memory. + * + * @param base 64bit physical address of the complete structure + * @param offset Offset from the beginning of the structure to the member being + * accessed. + * @param size Size of the structure member. + * + * @return Value of the structure member promoted into a uint64_t. + */ +static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset, + int size) +{ + base = (1ull << 63) | (base + offset); + switch (size) { + case 4: + return cvmx_read64_uint32(base); + case 8: + return cvmx_read64_uint64(base); + default: + return 0; + } +} + /* * Wrapper functions are provided for reading/writing the size and * next block values as these may not be directly addressible (in 32 @@ -98,6 +147,42 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name, + void (*init) (void *)) +{ + int64_t addr; + void *ptr; + uint64_t named_block_desc_addr; + + named_block_desc_addr = (uint64_t) + cvmx_bootmem_phy_named_block_find(name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (named_block_desc_addr) { + addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, + base_addr); + return cvmx_phys_to_ptr(addr); + } + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (addr < 0) + return NULL; + ptr = cvmx_phys_to_ptr(addr); + + if (init) + init(ptr); + else + memset(ptr, 0, size); + + return ptr; +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once); + void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name) diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index c1eb1ff7c800..5b3a3f6a9ad3 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2963,3 +2963,12 @@ void octeon_fixup_irqs(void) } #endif /* CONFIG_HOTPLUG_CPU */ + +struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block) +{ + struct octeon_ciu3_info *ciu3_info; + + ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK]; + return ciu3_info->domain[block]; +} +EXPORT_SYMBOL(octeon_irq_get_block_domain); diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 3de786545ded..75e7c8625659 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -205,7 +205,7 @@ int plat_post_relocation(long offset) * Firmware CPU startup hook * */ -static void octeon_boot_secondary(int cpu, struct task_struct *idle) +static int octeon_boot_secondary(int cpu, struct task_struct *idle) { int count; @@ -223,8 +223,12 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle) udelay(1); count--; } - if (count == 0) + if (count == 0) { pr_err("Secondary boot timeout\n"); + return -ETIMEDOUT; + } + + return 0; } /** @@ -408,7 +412,7 @@ late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops octeon_smp_ops = { +const struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, @@ -485,7 +489,7 @@ static void octeon_78xx_send_ipi_mask(const struct cpumask *mask, octeon_78xx_send_ipi_single(cpu, action); } -static struct plat_smp_ops octeon_78xx_smp_ops = { +static const struct plat_smp_ops octeon_78xx_smp_ops = { .send_ipi_single = octeon_78xx_send_ipi_single, .send_ipi_mask = octeon_78xx_send_ipi_mask, .init_secondary = octeon_init_secondary, @@ -501,7 +505,7 @@ static struct plat_smp_ops octeon_78xx_smp_ops = { void __init octeon_setup_smp(void) { - struct plat_smp_ops *ops; + const struct plat_smp_ops *ops; if (octeon_has_feature(OCTEON_FEATURE_CIU3)) ops = &octeon_78xx_smp_ops; diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index e5b18f1a31a0..490b12af103c 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -60,11 +60,8 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_OCTEON=y CONFIG_PATA_OCTEON_CF=y -CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y -CONFIG_MII=y # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_ALTEON is not set @@ -121,22 +118,30 @@ CONFIG_SPI=y CONFIG_SPI_OCTEON=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y -CONFIG_USB=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_HCD_PLATFORM=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB=y +# CONFIG_USB_PCI is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set -# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_CAVIUM_OCTEON=y +CONFIG_EDAC=y +CONFIG_EDAC_OCTEON_PC=y +CONFIG_EDAC_OCTEON_L2C=y +CONFIG_EDAC_OCTEON_LMC=y +CONFIG_EDAC_OCTEON_PCI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y CONFIG_OCTEON_ETHERNET=y -CONFIG_OCTEON_USB=m # CONFIG_IOMMU_SUPPORT is not set +CONFIG_RAS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index b42cfa7865f9..5ea3104a3aca 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -91,6 +91,7 @@ CONFIG_SERIAL_OF_PLATFORM=y CONFIG_I2C=y CONFIG_I2C_JZ4780=y CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_INGENIC=y # CONFIG_HWMON is not set CONFIG_REGULATOR=y CONFIG_REGULATOR_DEBUG=y @@ -99,6 +100,8 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_JZ4740=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_MEMORY=y # CONFIG_DNOTIFY is not set diff --git a/arch/mips/configs/generic/board-ni169445.config b/arch/mips/configs/generic/board-ni169445.config new file mode 100644 index 000000000000..f72223b366ca --- /dev/null +++ b/arch/mips/configs/generic/board-ni169445.config @@ -0,0 +1,30 @@ +# require CONFIG_CPU_MIPS32_R2=y +# require CONFIG_CPU_LITTLE_ENDIAN=y + +CONFIG_FIT_IMAGE_FDT_NI169445=y + +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y + +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC_PLATFORM=y + +CONFIG_MTD=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CMDLINE_PARTS=y + +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_ECC_BCH=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_GPIO=y +CONFIG_MTD_NAND_IDS=y + +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BLOCK=y + +CONFIG_NETDEVICES=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_DWMAC_GENERIC=y diff --git a/arch/mips/configs/generic/board-sead-3.config b/arch/mips/configs/generic/board-sead-3.config index 3b5e1ac579eb..df49a592dbb5 100644 --- a/arch/mips/configs/generic/board-sead-3.config +++ b/arch/mips/configs/generic/board-sead-3.config @@ -1,3 +1,5 @@ +# require CONFIG_32BIT=y + CONFIG_LEGACY_BOARD_SEAD3=y CONFIG_AUXDISPLAY=y diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 91aacf2ef26d..26b1cd5ffbf5 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_MIPS_CPS=y CONFIG_CPU_HAS_MSA=y CONFIG_HIGHMEM=y -CONFIG_NR_CPUS=2 +CONFIG_NR_CPUS=16 CONFIG_MIPS_O32_FP64_SUPPORT=y CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y @@ -61,7 +61,6 @@ CONFIG_HID_KENSINGTON=y CONFIG_HID_LOGITECH=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y -# CONFIG_USB_SUPPORT is not set # CONFIG_MIPS_PLATFORM_DEVICES is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT4_FS=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index b1911816337c..55438fc9991e 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -111,12 +111,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 1ec8ed8d05d1..02be95c1b712 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -37,7 +37,6 @@ CONFIG_PM=y CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="/dev/hda3" CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEBUG=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 078ecac071ab..396408404487 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y CONFIG_SYSVIPC=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 80ecd94ed126..5691673a3327 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y CONFIG_SYSVIPC=y diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index 35ad1f8d1a79..e9cadb37d684 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -3,6 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_KVM_GUEST=y CONFIG_PAGE_SIZE_16KB=y +# CONFIG_MIPS_MT_SMP is not set CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 55b68b981b05..d8c8f5fb8918 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 5ca590cf1635..04827bc9f87f 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -3,7 +3,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_CPU_MIPS32_3_5_FEATURES=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 4011f1869e72..c3d0d0a6e044 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -146,12 +146,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 5720ce23e9aa..7357248b3d7a 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -259,7 +259,6 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m CONFIG_WAN_ROUTER=m diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index fea56c535d92..1e18fd7de209 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -240,12 +240,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_PHONET=m CONFIG_IEEE802154=m diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig new file mode 100644 index 000000000000..e2731c3cc7e7 --- /dev/null +++ b/arch/mips/configs/omega2p_defconfig @@ -0,0 +1,129 @@ +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_OMEGA2P=y +CONFIG_CPU_MIPS32_R2=y +# CONFIG_COMPACTION is not set +CONFIG_HZ_100=y +CONFIG_PREEMPT=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_SUSPEND is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_NETDEVICES=y +# CONFIG_ETHERNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=2 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_MMC=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_PHY_RALINK_USB=y +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=10 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_STACKTRACE=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig index 7d32fbbca962..b22a3cf149b6 100644 --- a/arch/mips/configs/pistachio_defconfig +++ b/arch/mips/configs/pistachio_defconfig @@ -47,6 +47,8 @@ CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -207,7 +209,7 @@ CONFIG_IMGPDC_WDT=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_RC_SUPPORT=y +CONFIG_RC_CORE=y # CONFIG_RC_DECODERS is not set CONFIG_RC_DEVICES=y CONFIG_IR_IMG=y @@ -292,7 +294,8 @@ CONFIG_SQUASHFS_LZO=y CONFIG_PSTORE=y CONFIG_PSTORE_CONSOLE=y CONFIG_PSTORE_RAM=y -# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ASCII=m diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig new file mode 100644 index 000000000000..9121e4194a63 --- /dev/null +++ b/arch/mips/configs/vocore2_defconfig @@ -0,0 +1,129 @@ +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_VOCORE2=y +CONFIG_CPU_MIPS32_R2=y +# CONFIG_COMPACTION is not set +CONFIG_HZ_100=y +CONFIG_PREEMPT=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_SUSPEND is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_NETDEVICES=y +# CONFIG_ETHERNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=2 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_MMC=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_PHY_RALINK_USB=y +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=10 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_STACKTRACE=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c index 629b24db0d3a..008555969534 100644 --- a/arch/mips/fw/arc/init.c +++ b/arch/mips/fw/arc/init.c @@ -51,7 +51,7 @@ void __init prom_init(void) #endif #ifdef CONFIG_SGI_IP27 { - extern struct plat_smp_ops ip27_smp_ops; + extern const struct plat_smp_ops ip27_smp_ops; register_smp_ops(&ip27_smp_ops); } diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig index 51ffbbaddee2..e0436aaf7f38 100644 --- a/arch/mips/generic/Kconfig +++ b/arch/mips/generic/Kconfig @@ -36,4 +36,10 @@ config FIT_IMAGE_FDT_BOSTON enable this if you wish to boot on a MIPS Boston board, as it is expected by the bootloader. +config FIT_IMAGE_FDT_NI169445 + bool "Include FDT for NI 169445" + help + Enable this to include the FDT for the 169445 platform from + National Instruments in the FIT kernel image. + endif diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform index 9a30d69e2281..f5312dfa8184 100644 --- a/arch/mips/generic/Platform +++ b/arch/mips/generic/Platform @@ -12,3 +12,7 @@ platform-$(CONFIG_MIPS_GENERIC) += generic/ cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000 all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb + +its-y := vmlinux.its.S +its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S +its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S new file mode 100644 index 000000000000..a7f51f97b910 --- /dev/null +++ b/arch/mips/generic/board-boston.its.S @@ -0,0 +1,22 @@ +/ { + images { + fdt@boston { + description = "img,boston Device Tree"; + data = /incbin/("boot/dts/img/boston.dtb"); + type = "flat_dt"; + arch = "mips"; + compression = "none"; + hash@0 { + algo = "sha1"; + }; + }; + }; + + configurations { + conf@boston { + description = "Boston Linux kernel"; + kernel = "kernel@0"; + fdt = "fdt@boston"; + }; + }; +}; diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S new file mode 100644 index 000000000000..d12e12fe90be --- /dev/null +++ b/arch/mips/generic/board-ni169445.its.S @@ -0,0 +1,22 @@ +{ + images { + fdt@ni169445 { + description = "NI 169445 device tree"; + data = /incbin/("boot/dts/ni/169445.dtb"); + type = "flat_dt"; + arch = "mips"; + compression = "none"; + hash@0 { + algo = "sha1"; + }; + }; + }; + + configurations { + conf@ni169445 { + description = "NI 169445 Linux Kernel"; + kernel = "kernel@0"; + fdt = "fdt@ni169445"; + }; + }; +}; diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 3f32b376d30e..15a7fb8e2a2e 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -88,6 +89,8 @@ void __init *plat_get_fdt(void) return (void *)fdt; } +#ifdef CONFIG_RELOCATABLE + void __init plat_fdt_relocated(void *new_location) { /* @@ -101,6 +104,8 @@ void __init plat_fdt_relocated(void *new_location) fw_arg1 = (unsigned long)new_location; } +#endif /* CONFIG_RELOCATABLE */ + void __init plat_mem_setup(void) { if (mach && mach->fixup_fdt) diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c index 14064bdd91dd..5322d09dd51b 100644 --- a/arch/mips/generic/irq.c +++ b/arch/mips/generic/irq.c @@ -12,10 +12,11 @@ #include #include #include -#include #include #include +#include +#include int get_c0_fdc_int(void) { @@ -23,7 +24,7 @@ int get_c0_fdc_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_fdc_irq = gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; @@ -39,7 +40,7 @@ int get_c0_perfcount_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_perf_irq = gic_get_c0_perfcount_int(); else if (cp0_perfcount_irq >= 0) mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; @@ -55,7 +56,7 @@ unsigned int get_c0_compare_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_timer_irq = gic_get_c0_compare_int(); else mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S index 3390e2f80b80..f67fbf1c8541 100644 --- a/arch/mips/generic/vmlinux.its.S +++ b/arch/mips/generic/vmlinux.its.S @@ -29,28 +29,3 @@ }; }; }; - -#ifdef CONFIG_FIT_IMAGE_FDT_BOSTON -/ { - images { - fdt@boston { - description = "img,boston Device Tree"; - data = /incbin/("boot/dts/img/boston.dtb"); - type = "flat_dt"; - arch = "mips"; - compression = "none"; - hash@0 { - algo = "sha1"; - }; - }; - }; - - configurations { - conf@boston { - description = "Boston Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@boston"; - }; - }; -}; -#endif /* CONFIG_FIT_IMAGE_FDT_BOSTON */ diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 859cf7048347..81fae23ce7cd 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -55,6 +55,7 @@ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, 0, ra; \ + .cfi_startproc; \ .insn /* @@ -66,12 +67,14 @@ symbol: .frame sp, 0, ra; \ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, framesize, rpc; \ + .cfi_startproc; \ .insn /* * END - mark end of function */ #define END(function) \ + .cfi_endproc; \ .end function; \ .size function, .-function diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index a92aee7b977a..b3e2975f83d3 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -48,8 +48,8 @@ #include #include -extern struct plat_smp_ops bmips43xx_smp_ops; -extern struct plat_smp_ops bmips5000_smp_ops; +extern const struct plat_smp_ops bmips43xx_smp_ops; +extern const struct plat_smp_ops bmips5000_smp_ops; static inline int register_bmips_smp_ops(void) { diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index cd6efb07c980..a41059d47d31 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -15,6 +15,8 @@ #include #include +#include + /* * Descriptor for a cache */ @@ -77,16 +79,9 @@ struct cpuinfo_mips { struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ int package;/* physical package number */ - int core; /* physical core number */ + unsigned int globalnumber; #ifdef CONFIG_64BIT int vmbits; /* Virtual memory size in bits */ -#endif -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) - /* - * There is not necessarily a 1:1 mapping of VPE num to CPU number - * in particular on multi-core systems. - */ - int vpe_id; /* Virtual Processor number */ #endif void *data; /* Additional data */ unsigned int watch_reg_count; /* Number that exist */ @@ -144,11 +139,52 @@ struct proc_cpuinfo_notifier_args { unsigned long n; }; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) -# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) -#else -# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) -#endif +static inline unsigned int cpu_cluster(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where multiple clusters aren't used */ + if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CLUSTER) >> + MIPS_GLOBALNUMBER_CLUSTER_SHF; +} + +static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo) +{ + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CORE) >> + MIPS_GLOBALNUMBER_CORE_SHF; +} + +static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where VP(E)s aren't used */ + if (!IS_ENABLED(CONFIG_MIPS_MT_SMP) && !IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_VP) >> + MIPS_GLOBALNUMBER_VP_SHF; +} + +extern void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster); +extern void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core); +extern void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe); + +static inline bool cpus_are_siblings(int cpua, int cpub) +{ + struct cpuinfo_mips *infoa = &cpu_data[cpua]; + struct cpuinfo_mips *infob = &cpu_data[cpub]; + unsigned int gnuma, gnumb; + + if (infoa->package != infob->package) + return false; + + gnuma = infoa->globalnumber & ~MIPS_GLOBALNUMBER_VP; + gnumb = infob->globalnumber & ~MIPS_GLOBALNUMBER_VP; + if (gnuma != gnumb) + return false; + + return true; +} static inline unsigned long cpu_asid_inc(void) { diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 175fe565f4e1..a45af3de075d 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -151,11 +151,6 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_R5500: #endif -#ifdef CONFIG_SYS_HAS_CPU_R6000 - case CPU_R6000: - case CPU_R6000A: -#endif - #ifdef CONFIG_SYS_HAS_CPU_NEVADA case CPU_NEVADA: #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index d0c152b989f8..ece9b84f3bcb 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -285,11 +285,6 @@ enum cpu_type_enum { CPU_R2000, CPU_R3000, CPU_R3000A, CPU_R3041, CPU_R3051, CPU_R3052, CPU_R3081, CPU_R3081E, - /* - * R6000 class processors - */ - CPU_R6000, CPU_R6000A, - /* * R4000 class processors */ diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h index d75aed36480a..021d09ae5670 100644 --- a/arch/mips/include/asm/floppy.h +++ b/arch/mips/include/asm/floppy.h @@ -10,11 +10,11 @@ #ifndef _ASM_FLOPPY_H #define _ASM_FLOPPY_H -#include +#include static inline void fd_cacheflush(char * addr, long size) { - dma_cache_sync(NULL, addr, size, DMA_BIDIRECTIONAL); + dma_cache_wback_inv((unsigned long)addr, size); } #define MAX_BUFFER_SECTORS 24 diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index c05369e0b8d6..b36097d3cbf4 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -36,6 +36,7 @@ struct mips_fpu_emulator_stats { unsigned long emulated; unsigned long loads; unsigned long stores; + unsigned long branches; unsigned long cp1ops; unsigned long cp1xops; unsigned long errors; @@ -45,6 +46,121 @@ struct mips_fpu_emulator_stats { unsigned long ieee754_zerodiv; unsigned long ieee754_invalidop; unsigned long ds_emul; + + unsigned long abs_s; + unsigned long abs_d; + unsigned long add_s; + unsigned long add_d; + unsigned long bc1eqz; + unsigned long bc1nez; + unsigned long ceil_w_s; + unsigned long ceil_w_d; + unsigned long ceil_l_s; + unsigned long ceil_l_d; + unsigned long class_s; + unsigned long class_d; + unsigned long cmp_af_s; + unsigned long cmp_af_d; + unsigned long cmp_eq_s; + unsigned long cmp_eq_d; + unsigned long cmp_le_s; + unsigned long cmp_le_d; + unsigned long cmp_lt_s; + unsigned long cmp_lt_d; + unsigned long cmp_ne_s; + unsigned long cmp_ne_d; + unsigned long cmp_or_s; + unsigned long cmp_or_d; + unsigned long cmp_ueq_s; + unsigned long cmp_ueq_d; + unsigned long cmp_ule_s; + unsigned long cmp_ule_d; + unsigned long cmp_ult_s; + unsigned long cmp_ult_d; + unsigned long cmp_un_s; + unsigned long cmp_un_d; + unsigned long cmp_une_s; + unsigned long cmp_une_d; + unsigned long cmp_saf_s; + unsigned long cmp_saf_d; + unsigned long cmp_seq_s; + unsigned long cmp_seq_d; + unsigned long cmp_sle_s; + unsigned long cmp_sle_d; + unsigned long cmp_slt_s; + unsigned long cmp_slt_d; + unsigned long cmp_sne_s; + unsigned long cmp_sne_d; + unsigned long cmp_sor_s; + unsigned long cmp_sor_d; + unsigned long cmp_sueq_s; + unsigned long cmp_sueq_d; + unsigned long cmp_sule_s; + unsigned long cmp_sule_d; + unsigned long cmp_sult_s; + unsigned long cmp_sult_d; + unsigned long cmp_sun_s; + unsigned long cmp_sun_d; + unsigned long cmp_sune_s; + unsigned long cmp_sune_d; + unsigned long cvt_d_l; + unsigned long cvt_d_s; + unsigned long cvt_d_w; + unsigned long cvt_l_s; + unsigned long cvt_l_d; + unsigned long cvt_s_d; + unsigned long cvt_s_l; + unsigned long cvt_s_w; + unsigned long cvt_w_s; + unsigned long cvt_w_d; + unsigned long div_s; + unsigned long div_d; + unsigned long floor_w_s; + unsigned long floor_w_d; + unsigned long floor_l_s; + unsigned long floor_l_d; + unsigned long maddf_s; + unsigned long maddf_d; + unsigned long max_s; + unsigned long max_d; + unsigned long maxa_s; + unsigned long maxa_d; + unsigned long min_s; + unsigned long min_d; + unsigned long mina_s; + unsigned long mina_d; + unsigned long mov_s; + unsigned long mov_d; + unsigned long msubf_s; + unsigned long msubf_d; + unsigned long mul_s; + unsigned long mul_d; + unsigned long neg_s; + unsigned long neg_d; + unsigned long recip_s; + unsigned long recip_d; + unsigned long rint_s; + unsigned long rint_d; + unsigned long round_w_s; + unsigned long round_w_d; + unsigned long round_l_s; + unsigned long round_l_d; + unsigned long rsqrt_s; + unsigned long rsqrt_d; + unsigned long sel_s; + unsigned long sel_d; + unsigned long seleqz_s; + unsigned long seleqz_d; + unsigned long selnez_s; + unsigned long selnez_d; + unsigned long sqrt_s; + unsigned long sqrt_d; + unsigned long sub_s; + unsigned long sub_d; + unsigned long trunc_w_s; + unsigned long trunc_w_d; + unsigned long trunc_l_s; + unsigned long trunc_l_d; }; DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); @@ -62,7 +178,7 @@ do { \ extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, - void *__user *fault_addr); + void __user **fault_addr); void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, struct task_struct *tsk); int process_fpemu_return(int sig, void __user *fault_addr, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index ecabc00c1e66..0cbf3af37eca 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -632,4 +632,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); */ #define xlate_dev_kmem_ptr(p) p +void __ioread64_copy(void *to, const void __iomem *from, size_t count); + #endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index bace5b9ae4df..f439cf9cf9d1 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -8,12 +8,16 @@ #define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H #define cpu_has_tlb 1 +#define cpu_has_ftlb 0 #define cpu_has_tlbinv 0 #define cpu_has_segments 0 #define cpu_has_eva 0 #define cpu_has_htw 0 +#define cpu_has_ldpte 0 #define cpu_has_rixiex 0 #define cpu_has_maar 0 +#define cpu_has_rw_llb 0 +#define cpu_has_3kex 0 #define cpu_has_4kex 1 #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 @@ -30,6 +34,12 @@ #define cpu_has_mcheck 1 #define cpu_has_ejtag 1 #define cpu_has_llsc 1 +#define cpu_has_guestctl0ext 0 +#define cpu_has_guestctl1 0 +#define cpu_has_guestctl2 0 +#define cpu_has_guestid 0 +#define cpu_has_drg 0 +#define cpu_has_bp_ghist 0 #define cpu_has_mips16 0 #define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 @@ -37,17 +47,23 @@ #define cpu_has_smartmips 0 #define cpu_has_rixi 0 #define cpu_has_mmips 0 +#define cpu_has_lpa 0 +#define cpu_has_mhv 0 #define cpu_has_vtag_icache 0 #define cpu_has_dc_aliases 0 #define cpu_has_ic_fills_f_dc 1 #define cpu_has_pindexed_dcache 0 #define cpu_has_mips32r1 1 #define cpu_has_mips32r2 0 +#define cpu_has_mips32r6 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 +#define cpu_has_mips64r6 0 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 +#define cpu_has_dsp3 0 #define cpu_has_mipsmt 0 +#define cpu_has_vp 0 #define cpu_has_userlocal 0 #define cpu_has_nofpuex 0 #define cpu_has_64bits 0 @@ -58,9 +74,19 @@ #define cpu_dcache_line_size() 32 #define cpu_icache_line_size() 32 +#define cpu_scache_line_size() 0 #define cpu_has_perf_cntr_intr_bit 0 #define cpu_has_vz 0 #define cpu_has_msa 0 +#define cpu_has_fre 0 +#define cpu_has_cdmm 0 +#define cpu_has_small_pages 0 +#define cpu_has_nan_legacy 1 +#define cpu_has_nan_2008 1 +#define cpu_has_ebase_wg 0 +#define cpu_has_badinstr 0 +#define cpu_has_badinstrp 0 +#define cpu_has_contextconfig 0 #endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 5035f09c5427..24080af570f9 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -710,7 +710,7 @@ /* Broadcom 6345 ENET DMA definitions */ #define ENETDMA_6345_CHANCFG_REG (0x00) -#define ENETDMA_6345_MAXBURST_REG (0x40) +#define ENETDMA_6345_MAXBURST_REG (0x04) #define ENETDMA_6345_RSTART_REG (0x08) diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index bd8b9bbe1771..a4f798629c3d 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -46,9 +46,9 @@ #define cpu_has_64bits 1 #define cpu_has_octeon_cache 1 #define cpu_has_saa octeon_has_saa() -#define cpu_has_mips32r1 0 -#define cpu_has_mips32r2 0 -#define cpu_has_mips64r1 0 +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 1 #define cpu_has_mips64r2 1 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index defd135e7ac8..3fb7a0e09494 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h @@ -23,7 +23,6 @@ struct cpuinfo_ip27 { extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) -#define parent_node(node) (node) #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ &hub_data(node)->h_cpus) diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h index 7f7b0fc554da..f381d465e768 100644 --- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h +++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h @@ -16,7 +16,7 @@ #ifndef __ASM_MACH_JZ4740_JZ4740_NAND_H__ #define __ASM_MACH_JZ4740_JZ4740_NAND_H__ -#include +#include #include #define JZ_NAND_NUM_BANKS 4 diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h index 8064d7a4b33d..d750f93232e4 100644 --- a/arch/mips/include/asm/mach-lantiq/lantiq.h +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h @@ -46,8 +46,6 @@ extern struct clk *clk_get_ppe(void); /* find out what bootsource we have */ extern unsigned char ltq_boot_select(void); -/* find out what caused the last cpu reset */ -extern int ltq_reset_cause(void); /* find out the soc type */ extern int ltq_soc_type(void); diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index c68c0cc879c6..d0ae5d55413b 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -26,7 +26,7 @@ extern void mach_prepare_shutdown(void); /* environment arguments from bootloader */ extern u32 cpu_clock_freq; extern u32 memsize, highmemsize; -extern struct plat_smp_ops loongson3_smp_ops; +extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ extern void __init prom_init_memory(void); diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h index 0d8f3b55bdbc..bcb885615fca 100644 --- a/arch/mips/include/asm/mach-loongson64/topology.h +++ b/arch/mips/include/asm/mach-loongson64/topology.h @@ -4,7 +4,6 @@ #ifdef CONFIG_NUMA #define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2) -#define parent_node(node) (node) #define cpumask_of_node(node) (&__node_data[(node)]->cpumask) struct pci_bus; diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index 987ff580466b..817698abf2eb 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h @@ -10,8 +10,6 @@ #ifndef _MIPS_MALTAINT_H #define _MIPS_MALTAINT_H -#include - /* * Interrupts 0..15 are used for Malta ISA compatible interrupts */ @@ -62,7 +60,4 @@ #define MSC01E_INT_PERFCTR 10 #define MSC01E_INT_CPUCTR 11 -/* GIC external interrupts */ -#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3) - #endif /* !(_MIPS_MALTAINT_H) */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index cfdbab015769..f6231b91b724 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -8,16 +8,18 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cm.h +#endif + #ifndef __MIPS_ASM_MIPS_CM_H__ #define __MIPS_ASM_MIPS_CM_H__ #include #include -#include -#include /* The base address of the CM GCR block */ -extern void __iomem *mips_cm_base; +extern void __iomem *mips_gcr_base; /* The base address of the CM L2-only sync region */ extern void __iomem *mips_cm_l2sync_base; @@ -80,7 +82,7 @@ static inline int mips_cm_probe(void) static inline bool mips_cm_present(void) { #ifdef CONFIG_MIPS_CM - return mips_cm_base != NULL; + return mips_gcr_base != NULL; #else return false; #endif @@ -112,321 +114,219 @@ static inline bool mips_cm_has_l2sync(void) /* Size of the L2-only sync region */ #define MIPS_CM_L2SYNC_SIZE 0x1000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CM_R_(name, off) \ -static inline unsigned long __iomem *addr_gcr_##name(void) \ -{ \ - return (unsigned long __iomem *)(mips_cm_base + (off)); \ -} \ - \ -static inline u32 read32_gcr_##name(void) \ -{ \ - return __raw_readl(addr_gcr_##name()); \ -} \ - \ -static inline u64 read64_gcr_##name(void) \ -{ \ - void __iomem *addr = addr_gcr_##name(); \ - u64 ret; \ - \ - if (mips_cm_is64) { \ - ret = __raw_readq(addr); \ - } else { \ - ret = __raw_readl(addr); \ - ret |= (u64)__raw_readl(addr + 0x4) << 32; \ - } \ - \ - return ret; \ -} \ - \ -static inline unsigned long read_gcr_##name(void) \ -{ \ - if (mips_cm_is64) \ - return read64_gcr_##name(); \ - else \ - return read32_gcr_##name(); \ -} +#define GCR_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) -#define BUILD_CM__W(name, off) \ -static inline void write32_gcr_##name(u32 value) \ -{ \ - __raw_writel(value, addr_gcr_##name()); \ -} \ - \ -static inline void write64_gcr_##name(u64 value) \ -{ \ - __raw_writeq(value, addr_gcr_##name()); \ -} \ - \ -static inline void write_gcr_##name(unsigned long value) \ -{ \ - if (mips_cm_is64) \ - write64_gcr_##name(value); \ - else \ - write32_gcr_##name(value); \ -} +#define GCR_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) -#define BUILD_CM_RW(name, off) \ - BUILD_CM_R_(name, off) \ - BUILD_CM__W(name, off) +#define GCR_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) -#define BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off)) +#define GCR_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) -#define BUILD_CM_Cx__W(name, off) \ - BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off)) +/* GCR_CONFIG - Information about the system */ +GCR_ACCESSOR_RO(64, 0x000, config) +#define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43) +#define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32) +#define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23) +#define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8) +#define CM_GCR_CONFIG_PCORES GENMASK(7, 0) -#define BUILD_CM_Cx_RW(name, off) \ - BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_Cx__W(name, off) - -/* GCB register accessor functions */ -BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00) -BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08) -BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20) -BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30) -BUILD_CM_RW(err_control, MIPS_CM_GCB_OFS + 0x38) -BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40) -BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48) -BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50) -BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58) -BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70) -BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80) -BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88) -BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90) -BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98) -BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0) -BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8) -BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0) -BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8) -BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0) -BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) -BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) -BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) -BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) -BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) -BUILD_CM_RW(l2_pft_control, MIPS_CM_GCB_OFS + 0x300) -BUILD_CM_RW(l2_pft_control_b, MIPS_CM_GCB_OFS + 0x308) -BUILD_CM_RW(bev_base, MIPS_CM_GCB_OFS + 0x680) - -/* Core Local & Core Other register accessor functions */ -BUILD_CM_Cx_RW(reset_release, 0x00) -BUILD_CM_Cx_RW(coherence, 0x08) -BUILD_CM_Cx_R_(config, 0x10) -BUILD_CM_Cx_RW(other, 0x18) -BUILD_CM_Cx_RW(reset_base, 0x20) -BUILD_CM_Cx_R_(id, 0x28) -BUILD_CM_Cx_RW(reset_ext_base, 0x30) -BUILD_CM_Cx_R_(tcid_0_priority, 0x40) -BUILD_CM_Cx_R_(tcid_1_priority, 0x48) -BUILD_CM_Cx_R_(tcid_2_priority, 0x50) -BUILD_CM_Cx_R_(tcid_3_priority, 0x58) -BUILD_CM_Cx_R_(tcid_4_priority, 0x60) -BUILD_CM_Cx_R_(tcid_5_priority, 0x68) -BUILD_CM_Cx_R_(tcid_6_priority, 0x70) -BUILD_CM_Cx_R_(tcid_7_priority, 0x78) -BUILD_CM_Cx_R_(tcid_8_priority, 0x80) - -/* GCR_CONFIG register fields */ -#define CM_GCR_CONFIG_NUMIOCU_SHF 8 -#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_CONFIG_PCORES_SHF 0 -#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0) - -/* GCR_BASE register fields */ -#define CM_GCR_BASE_GCRBASE_SHF 15 -#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_BASE_CMDEFTGT_SHF 0 -#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) +/* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */ +GCR_ACCESSOR_RW(64, 0x008, base) +#define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15) +#define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0) #define CM_GCR_BASE_CMDEFTGT_DISABLED 0 #define CM_GCR_BASE_CMDEFTGT_MEM 1 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 -/* GCR_RESET_EXT_BASE register fields */ -#define CM_GCR_RESET_EXT_BASE_EVARESET BIT(31) -#define CM_GCR_RESET_EXT_BASE_UEB BIT(30) +/* GCR_ACCESS - Controls core/IOCU access to GCRs */ +GCR_ACCESSOR_RW(32, 0x020, access) +#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0) -/* GCR_ACCESS register fields */ -#define CM_GCR_ACCESS_ACCESSEN_SHF 0 -#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0) - -/* GCR_REV register fields */ -#define CM_GCR_REV_MAJOR_SHF 8 -#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8) -#define CM_GCR_REV_MINOR_SHF 0 -#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0) +/* GCR_REV - Indicates the Coherence Manager revision */ +GCR_ACCESSOR_RO(32, 0x030, rev) +#define CM_GCR_REV_MAJOR GENMASK(15, 8) +#define CM_GCR_REV_MINOR GENMASK(7, 0) #define CM_ENCODE_REV(major, minor) \ - (((major) << CM_GCR_REV_MAJOR_SHF) | \ - ((minor) << CM_GCR_REV_MINOR_SHF)) + (((major) << __ffs(CM_GCR_REV_MAJOR)) | \ + ((minor) << __ffs(CM_GCR_REV_MINOR))) #define CM_REV_CM2 CM_ENCODE_REV(6, 0) #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) #define CM_REV_CM3 CM_ENCODE_REV(8, 0) +#define CM_REV_CM3_5 CM_ENCODE_REV(9, 0) -/* GCR_ERR_CONTROL register fields */ -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_SHF 1 -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK (_ULCAST_(0x1) << 1) -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_SHF 0 -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK (_ULCAST_(0x1) << 0) +/* GCR_ERR_CONTROL - Control error checking logic */ +GCR_ACCESSOR_RW(32, 0x038, err_control) +#define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1) +#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0) -/* GCR_ERROR_CAUSE register fields */ -#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27 -#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27) -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF 58 -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_MSK GENMASK_ULL(63, 58) -#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0 -#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0) +/* GCR_ERR_MASK - Control which errors are reported as interrupts */ +GCR_ACCESSOR_RW(64, 0x040, error_mask) -/* GCR_ERROR_MULT register fields */ -#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0 -#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0) +/* GCR_ERR_CAUSE - Indicates the type of error that occurred */ +GCR_ACCESSOR_RW(64, 0x048, error_cause) +#define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27) +#define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58) +#define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0) -/* GCR_L2_ONLY_SYNC_BASE register fields */ -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0) +/* GCR_ERR_ADDR - Indicates the address associated with an error */ +GCR_ACCESSOR_RW(64, 0x050, error_addr) -/* GCR_GIC_BASE register fields */ -#define CM_GCR_GIC_BASE_GICBASE_SHF 17 -#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17) -#define CM_GCR_GIC_BASE_GICEN_SHF 0 -#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0) +/* GCR_ERR_MULT - Indicates when multiple errors have occurred */ +GCR_ACCESSOR_RW(64, 0x058, error_mult) +#define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0) -/* GCR_CPC_BASE register fields */ -#define CM_GCR_CPC_BASE_CPCBASE_SHF 15 -#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_CPC_BASE_CPCEN_SHF 0 -#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0) +/* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */ +GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0) -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_GICEX_SHF 0 -#define CM_GCR_GIC_STATUS_GICEX_MSK (_ULCAST_(0x1) << 0) +/* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RW(64, 0x080, gic_base) +#define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17) +#define CM_GCR_GIC_BASE_GICEN BIT(0) -/* GCR_REGn_BASE register fields */ -#define CM_GCR_REGn_BASE_BASEADDR_SHF 16 -#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16) +/* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RW(64, 0x088, cpc_base) +#define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15) +#define CM_GCR_CPC_BASE_CPCEN BIT(0) -/* GCR_REGn_MASK register fields */ -#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16 -#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16) -#define CM_GCR_REGn_MASK_CCAOVR_SHF 5 -#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5) -#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4 -#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4) -#define CM_GCR_REGn_MASK_DROPL2_SHF 2 -#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2) -#define CM_GCR_REGn_MASK_CMTGT_SHF 0 -#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0) -#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0) -#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0) +/* GCR_REGn_BASE - Base addresses of CM address regions */ +GCR_ACCESSOR_RW(64, 0x090, reg0_base) +GCR_ACCESSOR_RW(64, 0x0a0, reg1_base) +GCR_ACCESSOR_RW(64, 0x0b0, reg2_base) +GCR_ACCESSOR_RW(64, 0x0c0, reg3_base) +#define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16) -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_EX_SHF 0 -#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) +/* GCR_REGn_MASK - Size & destination of CM address regions */ +GCR_ACCESSOR_RW(64, 0x098, reg0_mask) +GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask) +GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask) +GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask) +#define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16) +#define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5) +#define CM_GCR_REGn_MASK_CCAOVREN BIT(4) +#define CM_GCR_REGn_MASK_DROPL2 BIT(2) +#define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0) +#define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0 +#define CM_GCR_REGn_MASK_CMTGT_MEM 0x1 +#define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2 +#define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3 -/* GCR_CPC_STATUS register fields */ -#define CM_GCR_CPC_STATUS_EX_SHF 0 -#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) +/* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RO(32, 0x0d0, gic_status) +#define CM_GCR_GIC_STATUS_EX BIT(0) -/* GCR_L2_CONFIG register fields */ -#define CM_GCR_L2_CONFIG_BYPASS_SHF 20 -#define CM_GCR_L2_CONFIG_BYPASS_MSK (_ULCAST_(0x1) << 20) -#define CM_GCR_L2_CONFIG_SET_SIZE_SHF 12 -#define CM_GCR_L2_CONFIG_SET_SIZE_MSK (_ULCAST_(0xf) << 12) -#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF 8 -#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_L2_CONFIG_ASSOC_SHF 0 -#define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) +/* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RO(32, 0x0f0, cpc_status) +#define CM_GCR_CPC_STATUS_EX BIT(0) -/* GCR_SYS_CONFIG2 register fields */ -#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 -#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) +/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */ +GCR_ACCESSOR_RW(32, 0x130, l2_config) +#define CM_GCR_L2_CONFIG_BYPASS BIT(20) +#define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12) +#define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8) +#define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0) -/* GCR_L2_PFT_CONTROL register fields */ -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_SHF 12 -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_PFT_CONTROL_PFTEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_PFTEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_NPFT_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_NPFT_MSK (_ULCAST_(0xff) << 0) +/* GCR_SYS_CONFIG2 - Further information about the system */ +GCR_ACCESSOR_RO(32, 0x150, sys_config2) +#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0) -/* GCR_L2_PFT_CONTROL_B register fields */ -#define CM_GCR_L2_PFT_CONTROL_B_CEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_B_CEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK (_ULCAST_(0xff) << 0) +/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x300, l2_pft_control) +#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12) +#define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0) -/* GCR_Cx_COHERENCE register fields */ -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) -#define CM3_GCR_Cx_COHERENCE_COHEN_MSK (_ULCAST_(0x1) << 0) +/* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b) +#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0) -/* GCR_Cx_CONFIG register fields */ -#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10 -#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10) -#define CM_GCR_Cx_CONFIG_PVPE_SHF 0 -#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x3ff) << 0) +/* GCR_L2SM_COP - L2 cache op state machine control */ +GCR_ACCESSOR_RW(32, 0x620, l2sm_cop) +#define CM_GCR_L2SM_COP_PRESENT BIT(31) +#define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6) +#define CM_GCR_L2SM_COP_RESULT_DONTCARE 0 +#define CM_GCR_L2SM_COP_RESULT_DONE_OK 1 +#define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2 +#define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3 +#define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4 +#define CM_GCR_L2SM_COP_RUNNING BIT(5) +#define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2) +#define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2 +#define CM_GCR_L2SM_COP_TYPE_HIT_INV 4 +#define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5 +#define CM_GCR_L2SM_COP_TYPE_HIT_WB 6 +#define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7 +#define CM_GCR_L2SM_COP_CMD GENMASK(1, 0) +#define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */ +#define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */ -/* GCR_Cx_OTHER register fields */ -#define CM_GCR_Cx_OTHER_CORENUM_SHF 16 -#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16) -#define CM3_GCR_Cx_OTHER_CORE_SHF 8 -#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8) -#define CM3_GCR_Cx_OTHER_VP_SHF 0 -#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0) +/* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */ +GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop) +#define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48) +#define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6) -/* GCR_Cx_RESET_BASE register fields */ -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12 -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12) +/* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */ +GCR_ACCESSOR_RW(64, 0x680, bev_base) -/* GCR_Cx_RESET_EXT_BASE register fields */ -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31 -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31) -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30 -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1) -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0 -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0) +/* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */ +GCR_CX_ACCESSOR_RW(32, 0x000, reset_release) -/** - * mips_cm_numcores - return the number of cores present in the system - * - * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or - * zero if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numcores(void) -{ - if (!mips_cm_present()) - return 0; +/* GCR_Cx_COHERENCE - Controls core coherence */ +GCR_CX_ACCESSOR_RW(32, 0x008, coherence) +#define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0) +#define CM3_GCR_Cx_COHERENCE_COHEN BIT(0) - return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK) - >> CM_GCR_CONFIG_PCORES_SHF) + 1; -} +/* GCR_Cx_CONFIG - Information about a core's configuration */ +GCR_CX_ACCESSOR_RO(32, 0x010, config) +#define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10) +#define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0) -/** - * mips_cm_numiocu - return the number of IOCUs present in the system - * - * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero - * if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numiocu(void) -{ - if (!mips_cm_present()) - return 0; +/* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */ +GCR_CX_ACCESSOR_RW(32, 0x018, other) +#define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */ +#define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1 +#define CM_GCR_Cx_OTHER_BLOCK_USER 2 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3 +#define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */ +#define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */ +#define CM_GCR_Cx_OTHER_CORE_CM 32 +#define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */ - return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK) - >> CM_GCR_CONFIG_NUMIOCU_SHF; -} +/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ +GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) +#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) + +/* GCR_Cx_ID - Identify the current core */ +GCR_CX_ACCESSOR_RO(32, 0x028, id) +#define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8) +#define CM_GCR_Cx_ID_CORE GENMASK(7, 0) + +/* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */ +GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base) +#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31) +#define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1) +#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0) /** * mips_cm_l2sync - perform an L2-only sync operation @@ -469,7 +369,7 @@ static inline unsigned int mips_cm_max_vp_width(void) uint32_t cfg; if (mips_cm_revision() >= CM_REV_CM3) - return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW; if (mips_cm_present()) { /* @@ -477,8 +377,8 @@ static inline unsigned int mips_cm_max_vp_width(void) * number of VP(E)s, and if that ever changes then this will * need revisiting. */ - cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; - return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE; + return (cfg >> __ffs(CM_GCR_Cx_CONFIG_PVPE)) + 1; } if (IS_ENABLED(CONFIG_SMP)) @@ -499,7 +399,7 @@ static inline unsigned int mips_cm_max_vp_width(void) */ static inline unsigned int mips_cm_vp_id(unsigned int cpu) { - unsigned int core = cpu_data[cpu].core; + unsigned int core = cpu_core(&cpu_data[cpu]); unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); return (core * mips_cm_max_vp_width()) + vp; @@ -508,29 +408,56 @@ static inline unsigned int mips_cm_vp_id(unsigned int cpu) #ifdef CONFIG_MIPS_CM /** - * mips_cm_lock_other - lock access to another core + * mips_cm_lock_other - lock access to redirect/other region + * @cluster: the other cluster to be accessed * @core: the other core to be accessed * @vp: the VP within the other core to be accessed + * @block: the register block to be accessed * - * Call before operating upon a core via the 'other' register region in - * order to prevent the region being moved during access. Must be followed - * by a call to mips_cm_unlock_other. + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cluster, @core, @vp & register + * @block. Must be called before using the redirect/other region, and followed + * by a call to mips_cm_unlock_other() when access to the redirect/other region + * is complete. + * + * This function acquires a spinlock such that code between it & + * mips_cm_unlock_other() calls cannot be pre-empted by anything which may + * reconfigure the redirect/other region, and cannot be interfered with by + * another VP in the core. As such calls to this function should not be nested. */ -extern void mips_cm_lock_other(unsigned int core, unsigned int vp); +extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block); /** - * mips_cm_unlock_other - unlock access to another core + * mips_cm_unlock_other - unlock access to redirect/other region * - * Call after operating upon another core via the 'other' register region. - * Must be called after mips_cm_lock_other. + * Must be called after mips_cm_lock_other() once all required access to the + * redirect/other region has been completed. */ extern void mips_cm_unlock_other(void); #else /* !CONFIG_MIPS_CM */ -static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { } +static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block) { } static inline void mips_cm_unlock_other(void) { } #endif /* !CONFIG_MIPS_CM */ +/** + * mips_cm_lock_other_cpu - lock access to redirect/other region + * @cpu: the other CPU whose register we want to access + * + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cpu & register @block. This is + * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number + * for convenience. + */ +static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block) +{ + struct cpuinfo_mips *d = &cpu_data[cpu]; + + mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block); +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index 8c519f9827a3..f885051a8378 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -8,11 +8,15 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cpc.h +#endif + #ifndef __MIPS_ASM_MIPS_CPC_H__ #define __MIPS_ASM_MIPS_CPC_H__ -#include -#include +#include +#include /* The base address of the CPC registers */ extern void __iomem *mips_cpc_base; @@ -61,89 +65,92 @@ static inline bool mips_cpc_present(void) #define MIPS_CPC_CLCB_OFS 0x2000 #define MIPS_CPC_COCB_OFS 0x4000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CPC_R_(name, off) \ -static inline u32 *addr_cpc_##name(void) \ -{ \ - return (u32 *)(mips_cpc_base + (off)); \ -} \ - \ -static inline u32 read_cpc_##name(void) \ -{ \ - return __raw_readl(mips_cpc_base + (off)); \ -} +#define CPC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) -#define BUILD_CPC__W(name, off) \ -static inline void write_cpc_##name(u32 value) \ -{ \ - __raw_writel(value, mips_cpc_base + (off)); \ -} +#define CPC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) -#define BUILD_CPC_RW(name, off) \ - BUILD_CPC_R_(name, off) \ - BUILD_CPC__W(name, off) +#define CPC_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) -#define BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off)) +#define CPC_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) -#define BUILD_CPC_Cx__W(name, off) \ - BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off)) +/* CPC_ACCESS - Control core/IOCU access to CPC registers prior to CM 3 */ +CPC_ACCESSOR_RW(32, 0x000, access) -#define BUILD_CPC_Cx_RW(name, off) \ - BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_Cx__W(name, off) +/* CPC_SEQDEL - Configure delays between command sequencer steps */ +CPC_ACCESSOR_RW(32, 0x008, seqdel) -/* GCB register accessor functions */ -BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00) -BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08) -BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10) -BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18) -BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20) +/* CPC_RAIL - Configure the delay from rail power-up to stability */ +CPC_ACCESSOR_RW(32, 0x010, rail) -/* Core Local & Core Other accessor functions */ -BUILD_CPC_Cx_RW(cmd, 0x00) -BUILD_CPC_Cx_RW(stat_conf, 0x08) -BUILD_CPC_Cx_RW(other, 0x10) -BUILD_CPC_Cx_RW(vp_stop, 0x20) -BUILD_CPC_Cx_RW(vp_run, 0x28) -BUILD_CPC_Cx_RW(vp_running, 0x30) +/* CPC_RESETLEN - Configure the length of reset sequences */ +CPC_ACCESSOR_RW(32, 0x018, resetlen) -/* CPC_Cx_CMD register fields */ -#define CPC_Cx_CMD_SHF 0 -#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0) -#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0) -#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0) -#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0) -#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0) +/* CPC_REVISION - Indicates the revisison of the CPC */ +CPC_ACCESSOR_RO(32, 0x020, revision) -/* CPC_Cx_STAT_CONF register fields */ -#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23 -#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23) -#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19 -#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19) -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17 -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17) -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16 -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16) -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15 -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15) +/* CPC_PWRUP_CTL - Control power to the Coherence Manager (CM) */ +CPC_ACCESSOR_RW(32, 0x030, pwrup_ctl) +#define CPC_PWRUP_CTL_CM_PWRUP BIT(0) -/* CPC_Cx_OTHER register fields */ -#define CPC_Cx_OTHER_CORENUM_SHF 16 -#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) +/* CPC_CONFIG - Mirrors GCR_CONFIG */ +CPC_ACCESSOR_RW(64, 0x138, config) + +/* CPC_SYS_CONFIG - Control cluster endianness */ +CPC_ACCESSOR_RW(32, 0x140, sys_config) +#define CPC_SYS_CONFIG_BE_IMMEDIATE BIT(2) +#define CPC_SYS_CONFIG_BE_STATUS BIT(1) +#define CPC_SYS_CONFIG_BE BIT(0) + +/* CPC_Cx_CMD - Instruct the CPC to take action on a core */ +CPC_CX_ACCESSOR_RW(32, 0x000, cmd) +#define CPC_Cx_CMD GENMASK(3, 0) +#define CPC_Cx_CMD_CLOCKOFF 0x1 +#define CPC_Cx_CMD_PWRDOWN 0x2 +#define CPC_Cx_CMD_PWRUP 0x3 +#define CPC_Cx_CMD_RESET 0x4 + +/* CPC_Cx_STAT_CONF - Indicates core configuration & state */ +CPC_CX_ACCESSOR_RW(32, 0x008, stat_conf) +#define CPC_Cx_STAT_CONF_PWRUPE BIT(23) +#define CPC_Cx_STAT_CONF_SEQSTATE GENMASK(22, 19) +#define CPC_Cx_STAT_CONF_SEQSTATE_D0 0x0 +#define CPC_Cx_STAT_CONF_SEQSTATE_U0 0x1 +#define CPC_Cx_STAT_CONF_SEQSTATE_U1 0x2 +#define CPC_Cx_STAT_CONF_SEQSTATE_U2 0x3 +#define CPC_Cx_STAT_CONF_SEQSTATE_U3 0x4 +#define CPC_Cx_STAT_CONF_SEQSTATE_U4 0x5 +#define CPC_Cx_STAT_CONF_SEQSTATE_U5 0x6 +#define CPC_Cx_STAT_CONF_SEQSTATE_U6 0x7 +#define CPC_Cx_STAT_CONF_SEQSTATE_D1 0x8 +#define CPC_Cx_STAT_CONF_SEQSTATE_D3 0x9 +#define CPC_Cx_STAT_CONF_SEQSTATE_D2 0xa +#define CPC_Cx_STAT_CONF_CLKGAT_IMPL BIT(17) +#define CPC_Cx_STAT_CONF_PWRDN_IMPL BIT(16) +#define CPC_Cx_STAT_CONF_EJTAG_PROBE BIT(15) + +/* CPC_Cx_OTHER - Configure the core-other register block prior to CM 3 */ +CPC_CX_ACCESSOR_RW(32, 0x010, other) +#define CPC_Cx_OTHER_CORENUM GENMASK(23, 16) + +/* CPC_Cx_VP_STOP - Stop Virtual Processors (VPs) within a core from running */ +CPC_CX_ACCESSOR_RW(32, 0x020, vp_stop) + +/* CPC_Cx_VP_START - Start Virtual Processors (VPs) within a core running */ +CPC_CX_ACCESSOR_RW(32, 0x028, vp_run) + +/* CPC_Cx_VP_RUNNING - Indicate which Virtual Processors (VPs) are running */ +CPC_CX_ACCESSOR_RW(32, 0x030, vp_running) + +/* CPC_Cx_CONFIG - Mirrors GCR_Cx_CONFIG */ +CPC_CX_ACCESSOR_RW(32, 0x090, config) #ifdef CONFIG_MIPS_CPC diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h new file mode 100644 index 000000000000..bf02b5070a98 --- /dev/null +++ b/arch/mips/include/asm/mips-cps.h @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +#define __MIPS_ASM_MIPS_CPS_H__ + +#include +#include + +extern unsigned long __cps_access_bad_size(void) + __compiletime_error("Bad size for CPS accessor"); + +#define CPS_ACCESSOR_A(unit, off, name) \ +static inline void *addr_##unit##_##name(void) \ +{ \ + return mips_##unit##_base + (off); \ +} + +#define CPS_ACCESSOR_R(unit, sz, name) \ +static inline uint##sz##_t read_##unit##_##name(void) \ +{ \ + uint64_t val64; \ + \ + switch (sz) { \ + case 32: \ + return __raw_readl(addr_##unit##_##name()); \ + \ + case 64: \ + if (mips_cm_is64) \ + return __raw_readq(addr_##unit##_##name()); \ + \ + val64 = __raw_readl(addr_##unit##_##name() + 4); \ + val64 <<= 32; \ + val64 |= __raw_readl(addr_##unit##_##name()); \ + return val64; \ + \ + default: \ + return __cps_access_bad_size(); \ + } \ +} + +#define CPS_ACCESSOR_W(unit, sz, name) \ +static inline void write_##unit##_##name(uint##sz##_t val) \ +{ \ + switch (sz) { \ + case 32: \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + case 64: \ + if (mips_cm_is64) { \ + __raw_writeq(val, addr_##unit##_##name()); \ + break; \ + } \ + \ + __raw_writel((uint64_t)val >> 32, \ + addr_##unit##_##name() + 4); \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + default: \ + __cps_access_bad_size(); \ + break; \ + } \ +} + +#define CPS_ACCESSOR_M(unit, sz, name) \ +static inline void change_##unit##_##name(uint##sz##_t mask, \ + uint##sz##_t val) \ +{ \ + uint##sz##_t reg_val = read_##unit##_##name(); \ + reg_val &= ~mask; \ + reg_val |= val; \ + write_##unit##_##name(reg_val); \ +} \ + \ +static inline void set_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, val); \ +} \ + \ +static inline void clear_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, 0); \ +} + +#define CPS_ACCESSOR_RO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) + +#define CPS_ACCESSOR_WO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_W(unit, sz, name) + +#define CPS_ACCESSOR_RW(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) \ + CPS_ACCESSOR_W(unit, sz, name) \ + CPS_ACCESSOR_M(unit, sz, name) + +#include +#include +#include + +/** + * mips_cps_numclusters - return the number of clusters present in the system + * + * Returns the number of clusters in the system. + */ +static inline unsigned int mips_cps_numclusters(void) +{ + unsigned int num_clusters; + + if (mips_cm_revision() < CM_REV_CM3_5) + return 1; + + num_clusters = read_gcr_config() & CM_GCR_CONFIG_NUM_CLUSTERS; + num_clusters >>= __ffs(CM_GCR_CONFIG_NUM_CLUSTERS); + return num_clusters; +} + +/** + * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster + * @cluster: the ID of the cluster whose config we want + * + * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster. + * + * Returns the value of GCR_CONFIG. + */ +static inline uint64_t mips_cps_cluster_config(unsigned int cluster) +{ + uint64_t config; + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we don't have the notion of multiple + * clusters so we can trivially read the GCR_CONFIG register + * within this cluster. + */ + WARN_ON(cluster != 0); + config = read_gcr_config(); + } else { + /* + * From CM 3.5 onwards we read the CPC_CONFIG mirror of + * GCR_CONFIG via the redirect region, since the CPC is always + * powered up allowing us not to need to power up the CM. + */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + config = read_cpc_redir_config(); + mips_cm_unlock_other(); + } + + return config; +} + +/** + * mips_cps_numcores - return the number of cores present in a cluster + * @cluster: the ID of the cluster whose core count we want + * + * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or + * zero if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numcores(unsigned int cluster) +{ + if (!mips_cm_present()) + return 0; + + /* Add one before masking to handle 0xff indicating no cores */ + return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; +} + +/** + * mips_cps_numiocu - return the number of IOCUs present in a cluster + * @cluster: the ID of the cluster whose IOCU count we want + * + * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero + * if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numiocu(unsigned int cluster) +{ + unsigned int num_iocu; + + if (!mips_cm_present()) + return 0; + + num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; + num_iocu >>= __ffs(CM_GCR_CONFIG_NUMIOCU); + return num_iocu; +} + +/** + * mips_cps_numvps - return the number of VPs (threads) supported by a core + * @cluster: the ID of the cluster containing the core we want to examine + * @core: the ID of the core whose VP count we want + * + * Returns the number of Virtual Processors (VPs, ie. hardware threads) that + * are supported by the given @core in the given @cluster. If the core or the + * kernel do not support hardware mutlti-threading this returns 1. + */ +static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) +{ + unsigned int cfg; + + if (!mips_cm_present()) + return 1; + + if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) + return 1; + + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we can only have one cluster & don't have + * CPC_Cx_CONFIG, so we read GCR_Cx_CONFIG. + */ + cfg = read_gcr_co_config(); + } else { + /* + * From CM 3.5 onwards we read CPC_Cx_CONFIG because the CPC is + * always powered, which allows us to not worry about powering + * up the cluster's CM here. + */ + cfg = read_cpc_co_config(); + } + + mips_cm_unlock_other(); + + return (cfg + 1) & CM_GCR_Cx_CONFIG_PVPE; +} + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h new file mode 100644 index 000000000000..a2badf572632 --- /dev/null +++ b/arch/mips/include/asm/mips-gic.h @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-gic.h +#endif + +#ifndef __MIPS_ASM_MIPS_GIC_H__ +#define __MIPS_ASM_MIPS_GIC_H__ + +#include + +/* The base address of the GIC registers */ +extern void __iomem *mips_gic_base; + +/* Offsets from the GIC base address to various control blocks */ +#define MIPS_GIC_SHARED_OFS 0x00000 +#define MIPS_GIC_SHARED_SZ 0x08000 +#define MIPS_GIC_LOCAL_OFS 0x08000 +#define MIPS_GIC_LOCAL_SZ 0x04000 +#define MIPS_GIC_REDIR_OFS 0x0c000 +#define MIPS_GIC_REDIR_SZ 0x04000 +#define MIPS_GIC_USER_OFS 0x10000 +#define MIPS_GIC_USER_SZ 0x10000 + +/* For read-only shared registers */ +#define GIC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-write shared registers */ +#define GIC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-only local registers */ +#define GIC_VX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-write local registers */ +#define GIC_VX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-only shared per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ +static inline void __iomem *addr_gic_##name(unsigned int intr) \ +{ \ + return mips_gic_base + (off) + (intr * (stride)); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + return __raw_readl(addr_gic_##name(intr)); \ +} + +/* For read-write shared per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + \ +static inline void write_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + __raw_writel(val, addr_gic_##name(intr)); \ +} + +/* For read-only local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-write local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-only shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_BIT(off, name) \ +static inline void __iomem *addr_gic_##name(void) \ +{ \ + return mips_gic_base + (off); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + unsigned int val; \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + val = __raw_readq(addr) >> intr % 64; \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + val = __raw_readl(addr) >> intr % 32; \ + } \ + \ + return val & 0x1; \ +} + +/* For read-write shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_BIT(off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(off, name) \ + \ +static inline void write_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + __raw_writeq(BIT(intr % 64), addr); \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + __raw_writel(BIT(intr % 32), addr); \ + } \ +} \ + \ +static inline void change_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + uint64_t _val; \ + \ + addr += (intr / 64) * sizeof(uint64_t); \ + _val = __raw_readq(addr); \ + _val &= ~BIT_ULL(intr % 64); \ + _val |= (uint64_t)val << (intr % 64); \ + __raw_writeq(_val, addr); \ + } else { \ + uint32_t _val; \ + \ + addr += (intr / 32) * sizeof(uint32_t); \ + _val = __raw_readl(addr); \ + _val &= ~BIT(intr % 32); \ + _val |= val << (intr % 32); \ + __raw_writel(_val, addr); \ + } \ +} + +/* For read-only local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* For read-write local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* GIC_SH_CONFIG - Information about the GIC configuration */ +GIC_ACCESSOR_RW(32, 0x000, config) +#define GIC_CONFIG_COUNTSTOP BIT(28) +#define GIC_CONFIG_COUNTBITS GENMASK(27, 24) +#define GIC_CONFIG_NUMINTERRUPTS GENMASK(23, 16) +#define GIC_CONFIG_PVPS GENMASK(6, 0) + +/* GIC_SH_COUNTER - Shared global counter value */ +GIC_ACCESSOR_RW(64, 0x010, counter) +GIC_ACCESSOR_RW(32, 0x010, counter_32l) +GIC_ACCESSOR_RW(32, 0x014, counter_32h) + +/* GIC_SH_POL_* - Configures interrupt polarity */ +GIC_ACCESSOR_RW_INTR_BIT(0x100, pol) +#define GIC_POL_ACTIVE_LOW 0 /* when level triggered */ +#define GIC_POL_ACTIVE_HIGH 1 /* when level triggered */ +#define GIC_POL_FALLING_EDGE 0 /* when single-edge triggered */ +#define GIC_POL_RISING_EDGE 1 /* when single-edge triggered */ + +/* GIC_SH_TRIG_* - Configures interrupts to be edge or level triggered */ +GIC_ACCESSOR_RW_INTR_BIT(0x180, trig) +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_EDGE 1 + +/* GIC_SH_DUAL_* - Configures whether interrupts trigger on both edges */ +GIC_ACCESSOR_RW_INTR_BIT(0x200, dual) +#define GIC_DUAL_SINGLE 0 /* when edge-triggered */ +#define GIC_DUAL_DUAL 1 /* when edge-triggered */ + +/* GIC_SH_WEDGE - Write an 'edge', ie. trigger an interrupt */ +GIC_ACCESSOR_RW(32, 0x280, wedge) +#define GIC_WEDGE_RW BIT(31) +#define GIC_WEDGE_INTR GENMASK(7, 0) + +/* GIC_SH_RMASK_* - Reset/clear shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x300, rmask) + +/* GIC_SH_SMASK_* - Set shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x380, smask) + +/* GIC_SH_MASK_* - Read the current shared interrupt mask */ +GIC_ACCESSOR_RO_INTR_BIT(0x400, mask) + +/* GIC_SH_PEND_* - Read currently pending shared interrupts */ +GIC_ACCESSOR_RO_INTR_BIT(0x480, pend) + +/* GIC_SH_MAPx_PIN - Map shared interrupts to a particular CPU pin */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x500, 0x4, map_pin) +#define GIC_MAP_PIN_MAP_TO_PIN BIT(31) +#define GIC_MAP_PIN_MAP_TO_NMI BIT(30) +#define GIC_MAP_PIN_MAP GENMASK(5, 0) + +/* GIC_SH_MAPx_VP - Map shared interrupts to a particular Virtual Processor */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x2000, 0x20, map_vp) + +/* GIC_Vx_CTL - VP-level interrupt control */ +GIC_VX_ACCESSOR_RW(32, 0x000, ctl) +#define GIC_VX_CTL_FDC_ROUTABLE BIT(4) +#define GIC_VX_CTL_SWINT_ROUTABLE BIT(3) +#define GIC_VX_CTL_PERFCNT_ROUTABLE BIT(2) +#define GIC_VX_CTL_TIMER_ROUTABLE BIT(1) +#define GIC_VX_CTL_EIC BIT(0) + +/* GIC_Vx_PEND - Read currently pending local interrupts */ +GIC_VX_ACCESSOR_RO(32, 0x004, pend) + +/* GIC_Vx_MASK - Read the current local interrupt mask */ +GIC_VX_ACCESSOR_RO(32, 0x008, mask) + +/* GIC_Vx_RMASK - Reset/clear local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x00c, rmask) + +/* GIC_Vx_SMASK - Set local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x010, smask) + +/* GIC_Vx_*_MAP - Route local interrupts to the desired pins */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map) + +/* GIC_Vx_WD_MAP - Route the local watchdog timer interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x040, wd_map) + +/* GIC_Vx_COMPARE_MAP - Route the local count/compare interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x044, compare_map) + +/* GIC_Vx_TIMER_MAP - Route the local CPU timer (cp0 count/compare) interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x048, timer_map) + +/* GIC_Vx_FDC_MAP - Route the local fast debug channel interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x04c, fdc_map) + +/* GIC_Vx_PERFCTR_MAP - Route the local performance counter interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x050, perfctr_map) + +/* GIC_Vx_SWINT0_MAP - Route the local software interrupt 0 */ +GIC_VX_ACCESSOR_RW(32, 0x054, swint0_map) + +/* GIC_Vx_SWINT1_MAP - Route the local software interrupt 1 */ +GIC_VX_ACCESSOR_RW(32, 0x058, swint1_map) + +/* GIC_Vx_OTHER - Configure access to other Virtual Processor registers */ +GIC_VX_ACCESSOR_RW(32, 0x080, other) +#define GIC_VX_OTHER_VPNUM GENMASK(5, 0) + +/* GIC_Vx_IDENT - Retrieve the local Virtual Processor's ID */ +GIC_VX_ACCESSOR_RO(32, 0x088, ident) +#define GIC_VX_IDENT_VPNUM GENMASK(5, 0) + +/* GIC_Vx_COMPARE - Value to compare with GIC_SH_COUNTER */ +GIC_VX_ACCESSOR_RW(64, 0x0a0, compare) + +/* GIC_Vx_EIC_SHADOW_SET_BASE - Set shadow register set for each interrupt */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x100, 0x4, eic_shadow_set) + +/** + * enum mips_gic_local_interrupt - GIC local interrupts + * @GIC_LOCAL_INT_WD: GIC watchdog timer interrupt + * @GIC_LOCAL_INT_COMPARE: GIC count/compare interrupt + * @GIC_LOCAL_INT_TIMER: CP0 count/compare interrupt + * @GIC_LOCAL_INT_PERFCTR: Performance counter interrupt + * @GIC_LOCAL_INT_SWINT0: Software interrupt 0 + * @GIC_LOCAL_INT_SWINT1: Software interrupt 1 + * @GIC_LOCAL_INT_FDC: Fast debug channel interrupt + * @GIC_NUM_LOCAL_INTRS: The number of local interrupts + * + * Enumerates interrupts provided by the GIC that are local to a VP. + */ +enum mips_gic_local_interrupt { + GIC_LOCAL_INT_WD, + GIC_LOCAL_INT_COMPARE, + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_SWINT0, + GIC_LOCAL_INT_SWINT1, + GIC_LOCAL_INT_FDC, + GIC_NUM_LOCAL_INTRS +}; + +/** + * mips_gic_present() - Determine whether a GIC is present + * + * Determines whether a MIPS Global Interrupt Controller (GIC) is present in + * the system that the kernel is running on. + * + * Return true if a GIC is present, else false. + */ +static inline bool mips_gic_present(void) +{ + return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base; +} + +/** + * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq + * + * Determine the virq number to use for the coprocessor 0 count/compare + * interrupt, which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_compare_int(void); + +/** + * gic_get_c0_perfcount_int() - Return performance counter interrupt virq + * + * Determine the virq number to use for CPU performance counter interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_perfcount_int(void); + +/** + * gic_get_c0_fdc_int() - Return fast debug channel interrupt virq + * + * Determine the virq number to use for fast debug channel (FDC) interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_fdc_int(void); + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index dbb0eceda2c6..a6810923b3f0 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -48,6 +48,7 @@ #define CP0_ENTRYLO0 $2 #define CP0_ENTRYLO1 $3 #define CP0_CONF $3 +#define CP0_GLOBALNUMBER $3, 1 #define CP0_CONTEXT $4 #define CP0_PAGEMASK $5 #define CP0_SEGCTL0 $5, 2 @@ -147,6 +148,16 @@ #define MIPS_ENTRYLO_XI (_ULCAST_(1) << (BITS_PER_LONG - 2)) #define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1)) +/* + * MIPSr6+ GlobalNumber register definitions + */ +#define MIPS_GLOBALNUMBER_VP_SHF 0 +#define MIPS_GLOBALNUMBER_VP (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_VP_SHF) +#define MIPS_GLOBALNUMBER_CORE_SHF 8 +#define MIPS_GLOBALNUMBER_CORE (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_CORE_SHF) +#define MIPS_GLOBALNUMBER_CLUSTER_SHF 16 +#define MIPS_GLOBALNUMBER_CLUSTER (_ULCAST_(0xf) << MIPS_GLOBALNUMBER_CLUSTER_SHF) + /* * Values for PageMask register */ @@ -1366,29 +1377,32 @@ do { \ #define __write_64bit_c0_split(source, sel, val) \ do { \ + unsigned long long __tmp; \ unsigned long __flags; \ \ local_irq_save(__flags); \ if (sel == 0) \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ - "dsll\t%L0, %L0, 32\n\t" \ + "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ - "dsll\t%M0, %M0, 32\n\t" \ + "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source "\n\t" \ ".set\tmips0" \ - : : "r" (val)); \ + : "=&r,r" (__tmp) \ + : "r,0" (val)); \ else \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ - "dsll\t%L0, %L0, 32\n\t" \ + "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ - "dsll\t%M0, %M0, 32\n\t" \ + "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source ", " #sel "\n\t" \ ".set\tmips0" \ - : : "r" (val)); \ + : "=&r,r" (__tmp) \ + : "r,0" (val)); \ local_irq_restore(__flags); \ } while (0) @@ -1446,6 +1460,8 @@ do { \ #define read_c0_conf() __read_32bit_c0_register($3, 0) #define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) +#define read_c0_globalnumber() __read_32bit_c0_register($3, 1) + #define read_c0_context() __read_ulong_c0_register($4, 0) #define write_c0_context(val) __write_ulong_c0_register($4, 0, val) diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index e51add184717..06552a965cf4 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -114,8 +114,6 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "R5432 " #elif defined CONFIG_CPU_R5500 #define MODULE_PROC_FAMILY "R5500 " -#elif defined CONFIG_CPU_R6000 -#define MODULE_PROC_FAMILY "R6000 " #elif defined CONFIG_CPU_NEVADA #define MODULE_PROC_FAMILY "NEVADA " #elif defined CONFIG_CPU_R8000 diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index e0717d10e650..a6e6cbebe046 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -84,7 +84,7 @@ nlm_set_nmi_handler(void *handler) */ void nlm_init_boot_cpu(void); unsigned int nlm_get_cpu_frequency(void); -extern struct plat_smp_ops nlm_smp_ops; +extern const struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; /* SWIOTLB */ diff --git a/arch/mips/include/asm/octeon/cvmx-boot-vector.h b/arch/mips/include/asm/octeon/cvmx-boot-vector.h new file mode 100644 index 000000000000..8db08241d53c --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-boot-vector.h @@ -0,0 +1,53 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003-2017 Cavium, Inc. + */ + +#ifndef __CVMX_BOOT_VECTOR_H__ +#define __CVMX_BOOT_VECTOR_H__ + +#include + +/* + * The boot vector table is made up of an array of 1024 elements of + * struct cvmx_boot_vector_element. There is one entry for each + * possible MIPS CPUNum, indexed by the CPUNum. + * + * Once cvmx_boot_vector_get() returns a non-NULL value (indicating + * success), NMI to a core will cause execution to transfer to the + * target_ptr location for that core's entry in the vector table. + * + * The struct cvmx_boot_vector_element fields app0, app1, and app2 can + * be used by the application that has set the target_ptr in any + * application specific manner, they are not touched by the vectoring + * code. + * + * The boot vector code clobbers the CP0_DESAVE register, and on + * OCTEON II and later CPUs also clobbers CP0_KScratch2. All GP + * registers are preserved, except on pre-OCTEON II CPUs, where k1 is + * clobbered. + * + */ + + +/* + * Applications install the boot bus code in cvmx-boot-vector.c, which + * uses this magic: + */ +#define OCTEON_BOOT_MOVEABLE_MAGIC1 0xdb00110ad358eacdull + +struct cvmx_boot_vector_element { + /* kseg0 or xkphys address of target code. */ + uint64_t target_ptr; + /* Three application specific arguments. */ + uint64_t app0; + uint64_t app1; + uint64_t app2; +}; + +struct cvmx_boot_vector_element *cvmx_boot_vector_get(void); + +#endif /* __CVMX_BOOT_VECTOR_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 374562507d0b..72d2e403a6e4 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -255,6 +255,34 @@ extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name); +/** + * Allocate if needed a block of memory from a specific range of the + * free list that was passed to the application by the bootloader, and + * assign it a name in the global named block table. (part of the + * cvmx_bootmem_descriptor_t structure) Named blocks can later be + * freed. If the requested name block is already allocated, return + * the pointer to block of memory. If request cannot be satisfied + * within the address range specified, NULL is returned + * + * @param size Size in bytes of block to allocate + * @param min_addr minimum address of range + * @param max_addr maximum address of range + * @param align Alignment of memory to be allocated. (must be a power of 2) + * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * @param init Initialization function + * + * The initialization function is optional, if omitted the named block + * is initialized to all zeros when it is created, i.e. once. + * + * @return pointer to block of memory, NULL on error + */ +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, + uint64_t min_addr, + uint64_t max_addr, + uint64_t align, + char *name, + void (*init) (void *)); + extern int cvmx_bootmem_free_named(char *name); /** diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h index 0dd0e40c96d4..6e61792d9248 100644 --- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h @@ -128,6 +128,7 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -143,6 +144,10 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; } @@ -180,6 +185,7 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -195,6 +201,10 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; } diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index e638735cc3ac..205ab2ce10f8 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -357,6 +357,34 @@ static inline unsigned int cvmx_get_local_core_num(void) return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1); } +#define CVMX_NODE_BITS (2) /* Number of bits to define a node */ +#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS) +#define CVMX_NODE_IO_SHIFT (36) +#define CVMX_NODE_MEM_SHIFT (40) +#define CVMX_NODE_IO_MASK ((uint64_t)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT) + +static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr, + uint64_t val) +{ + uint64_t composite_csr_addr, node_addr; + + node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + composite_csr_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | node_addr; + + cvmx_write64_uint64(composite_csr_addr, val); + if (((csr_addr >> 40) & 0x7ffff) == (0x118)) + cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT | node_addr); +} + +static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr) +{ + uint64_t node_addr; + + node_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | + (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + return cvmx_read_csr(node_addr); +} + /** * Returns the number of bits set in the provided value. * Simple wrapper for POP instruction. diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 07c0516ef4d5..c99c4b6a79f4 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -362,4 +362,6 @@ extern void octeon_fixup_irqs(void); extern struct semaphore octeon_bootbus_sem; +struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block); + #endif /* __ASM_OCTEON_OCTEON_H */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index db7c322f057f..53b2cb8e5966 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -13,7 +13,7 @@ #include -#include +#include #ifdef CONFIG_SMP @@ -26,7 +26,7 @@ struct plat_smp_ops { void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); void (*init_secondary)(void); void (*smp_finish)(void); - void (*boot_secondary)(int cpu, struct task_struct *idle); + int (*boot_secondary)(int cpu, struct task_struct *idle); void (*smp_setup)(void); void (*prepare_cpus)(unsigned int max_cpus); #ifdef CONFIG_HOTPLUG_CPU @@ -35,11 +35,11 @@ struct plat_smp_ops { #endif }; -extern void register_smp_ops(struct plat_smp_ops *ops); +extern void register_smp_ops(const struct plat_smp_ops *ops); static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->smp_setup(); } @@ -57,7 +57,7 @@ static inline void plat_smp_setup(void) /* UP, nothing to do ... */ } -static inline void register_smp_ops(struct plat_smp_ops *ops) +static inline void register_smp_ops(const struct plat_smp_ops *ops) { } @@ -66,7 +66,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) static inline int register_up_smp_ops(void) { #ifdef CONFIG_SMP_UP - extern struct plat_smp_ops up_smp_ops; + extern const struct plat_smp_ops up_smp_ops; register_smp_ops(&up_smp_ops); @@ -79,7 +79,7 @@ static inline int register_up_smp_ops(void) static inline int register_cmp_smp_ops(void) { #ifdef CONFIG_MIPS_CMP - extern struct plat_smp_ops cmp_smp_ops; + extern const struct plat_smp_ops cmp_smp_ops; if (!mips_cm_present()) return -ENODEV; @@ -95,7 +95,7 @@ static inline int register_cmp_smp_ops(void) static inline int register_vsmp_smp_ops(void) { #ifdef CONFIG_MIPS_MT_SMP - extern struct plat_smp_ops vsmp_smp_ops; + extern const struct plat_smp_ops vsmp_smp_ops; register_smp_ops(&vsmp_smp_ops); diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index bab3d41e5987..9e494f8d9c03 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -58,7 +58,7 @@ extern void calculate_cpu_foreign_map(void); */ static inline void smp_send_reschedule(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); } @@ -66,14 +66,14 @@ static inline void smp_send_reschedule(int cpu) #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ return mp_ops->cpu_disable(); } static inline void __cpu_die(unsigned int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->cpu_die(cpu); } @@ -97,14 +97,14 @@ int mips_smp_ipi_free(const struct cpumask *mask); static inline void arch_send_call_function_single_ipi(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); } diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index eaa5a4d7d5e5..5d3563c55e0c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -19,20 +19,43 @@ #include #include +/* Make the addition of cfi info a little easier. */ + .macro cfi_rel_offset reg offset=0 docfi=0 + .if \docfi + .cfi_rel_offset \reg, \offset + .endif + .endm + + .macro cfi_st reg offset=0 docfi=0 + LONG_S \reg, \offset(sp) + cfi_rel_offset \reg, \offset, \docfi + .endm + + .macro cfi_restore reg offset=0 docfi=0 + .if \docfi + .cfi_restore \reg + .endif + .endm + + .macro cfi_ld reg offset=0 docfi=0 + LONG_L \reg, \offset(sp) + cfi_restore \reg \offset \docfi + .endm + #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #define STATMASK 0x3f #else #define STATMASK 0x1f #endif - .macro SAVE_AT + .macro SAVE_AT docfi=0 .set push .set noat - LONG_S $1, PT_R1(sp) + cfi_st $1, PT_R1, \docfi .set pop .endm - .macro SAVE_TEMP + .macro SAVE_TEMP docfi=0 #ifdef CONFIG_CPU_HAS_SMARTMIPS mflhxu v1 LONG_S v1, PT_LO(sp) @@ -44,20 +67,20 @@ mfhi v1 #endif #ifdef CONFIG_32BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S $10, PT_R10(sp) - LONG_S $11, PT_R11(sp) - LONG_S $12, PT_R12(sp) + cfi_st $10, PT_R10, \docfi + cfi_st $11, PT_R11, \docfi + cfi_st $12, PT_R12, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_HI(sp) mflo v1 #endif - LONG_S $13, PT_R13(sp) - LONG_S $14, PT_R14(sp) - LONG_S $15, PT_R15(sp) - LONG_S $24, PT_R24(sp) + cfi_st $13, PT_R13, \docfi + cfi_st $14, PT_R14, \docfi + cfi_st $15, PT_R15, \docfi + cfi_st $24, PT_R24, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_LO(sp) #endif @@ -71,20 +94,28 @@ #endif .endm - .macro SAVE_STATIC - LONG_S $16, PT_R16(sp) - LONG_S $17, PT_R17(sp) - LONG_S $18, PT_R18(sp) - LONG_S $19, PT_R19(sp) - LONG_S $20, PT_R20(sp) - LONG_S $21, PT_R21(sp) - LONG_S $22, PT_R22(sp) - LONG_S $23, PT_R23(sp) - LONG_S $30, PT_R30(sp) + .macro SAVE_STATIC docfi=0 + cfi_st $16, PT_R16, \docfi + cfi_st $17, PT_R17, \docfi + cfi_st $18, PT_R18, \docfi + cfi_st $19, PT_R19, \docfi + cfi_st $20, PT_R20, \docfi + cfi_st $21, PT_R21, \docfi + cfi_st $22, PT_R22, \docfi + cfi_st $23, PT_R23, \docfi + cfi_st $30, PT_R30, \docfi .endm +/* + * get_saved_sp returns the SP for the current CPU by looking in the + * kernelsp array for it. If tosp is set, it stores the current sp in + * k0 and loads the new value in sp. If not, it clobbers k0 and + * stores the new value in k1, leaving sp unaffected. + */ #ifdef CONFIG_SMP - .macro get_saved_sp /* SMP variation */ + + /* SMP variation */ + .macro get_saved_sp docfi=0 tosp=0 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) @@ -97,7 +128,15 @@ #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -106,7 +145,8 @@ LONG_S \stackp, kernelsp(\temp) .endm #else /* !CONFIG_SMP */ - .macro get_saved_sp /* Uniprocessor variation */ + /* Uniprocessor variation */ + .macro get_saved_sp docfi=0 tosp=0 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS /* * Clear BTB (branch target buffer), forbid RAS (return address @@ -135,7 +175,15 @@ daddiu k1, %hi(kernelsp) dsll k1, k1, 16 #endif + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -143,7 +191,7 @@ .endm #endif - .macro SAVE_SOME + .macro SAVE_SOME docfi=0 .set push .set noat .set reorder @@ -151,7 +199,6 @@ sll k0, 3 /* extract cu0 bit */ .set noreorder bltz k0, 8f - move k1, sp #ifdef CONFIG_EVA /* * Flush interAptiv's Return Prediction Stack (RPS) by writing @@ -178,20 +225,26 @@ MTC0 k0, CP0_ENTRYHI #endif .set reorder + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif /* Called from user mode, new stack. */ - get_saved_sp -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -8: move k0, sp - PTR_SUBU sp, k1, PT_SIZE -#else - .set at=k0 -8: PTR_SUBU k1, PT_SIZE - .set noat - move k0, sp - move sp, k1 + get_saved_sp docfi=\docfi tosp=1 +8: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set at=k1 #endif - LONG_S k0, PT_R29(sp) - LONG_S $3, PT_R3(sp) + PTR_SUBU sp, PT_SIZE +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set noat +#endif + .if \docfi + .cfi_def_cfa sp,0 + .endif + cfi_st k0, PT_R29, \docfi + cfi_rel_offset sp, PT_R29, \docfi + cfi_st v1, PT_R3, \docfi /* * You might think that you don't need to save $0, * but the FPU emulator and gdb remote debug stub @@ -199,23 +252,26 @@ */ LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS - LONG_S $2, PT_R2(sp) + cfi_st v0, PT_R2, \docfi LONG_S v1, PT_STATUS(sp) - LONG_S $4, PT_R4(sp) + cfi_st $4, PT_R4, \docfi mfc0 v1, CP0_CAUSE - LONG_S $5, PT_R5(sp) + cfi_st $5, PT_R5, \docfi LONG_S v1, PT_CAUSE(sp) - LONG_S $6, PT_R6(sp) - MFC0 v1, CP0_EPC - LONG_S $7, PT_R7(sp) + cfi_st $6, PT_R6, \docfi + cfi_st ra, PT_R31, \docfi + MFC0 ra, CP0_EPC + cfi_st $7, PT_R7, \docfi #ifdef CONFIG_64BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S v1, PT_EPC(sp) - LONG_S $25, PT_R25(sp) - LONG_S $28, PT_R28(sp) - LONG_S $31, PT_R31(sp) + LONG_S ra, PT_EPC(sp) + .if \docfi + .cfi_rel_offset ra, PT_EPC + .endif + cfi_st $25, PT_R25, \docfi + cfi_st $28, PT_R28, \docfi /* Set thread_info if we're coming from user mode */ mfc0 k0, CP0_STATUS @@ -232,21 +288,21 @@ .set pop .endm - .macro SAVE_ALL - SAVE_SOME - SAVE_AT - SAVE_TEMP - SAVE_STATIC + .macro SAVE_ALL docfi=0 + SAVE_SOME \docfi + SAVE_AT \docfi + SAVE_TEMP \docfi + SAVE_STATIC \docfi .endm - .macro RESTORE_AT + .macro RESTORE_AT docfi=0 .set push .set noat - LONG_L $1, PT_R1(sp) + cfi_ld $1, PT_R1, \docfi .set pop .endm - .macro RESTORE_TEMP + .macro RESTORE_TEMP docfi=0 #ifdef CONFIG_CPU_CAVIUM_OCTEON /* Restore the Octeon multiplier state */ jal octeon_mult_restore @@ -265,33 +321,37 @@ mthi $24 #endif #ifdef CONFIG_32BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $10, PT_R10(sp) - LONG_L $11, PT_R11(sp) - LONG_L $12, PT_R12(sp) - LONG_L $13, PT_R13(sp) - LONG_L $14, PT_R14(sp) - LONG_L $15, PT_R15(sp) - LONG_L $24, PT_R24(sp) + cfi_ld $10, PT_R10, \docfi + cfi_ld $11, PT_R11, \docfi + cfi_ld $12, PT_R12, \docfi + cfi_ld $13, PT_R13, \docfi + cfi_ld $14, PT_R14, \docfi + cfi_ld $15, PT_R15, \docfi + cfi_ld $24, PT_R24, \docfi .endm - .macro RESTORE_STATIC - LONG_L $16, PT_R16(sp) - LONG_L $17, PT_R17(sp) - LONG_L $18, PT_R18(sp) - LONG_L $19, PT_R19(sp) - LONG_L $20, PT_R20(sp) - LONG_L $21, PT_R21(sp) - LONG_L $22, PT_R22(sp) - LONG_L $23, PT_R23(sp) - LONG_L $30, PT_R30(sp) + .macro RESTORE_STATIC docfi=0 + cfi_ld $16, PT_R16, \docfi + cfi_ld $17, PT_R17, \docfi + cfi_ld $18, PT_R18, \docfi + cfi_ld $19, PT_R19, \docfi + cfi_ld $20, PT_R20, \docfi + cfi_ld $21, PT_R21, \docfi + cfi_ld $22, PT_R22, \docfi + cfi_ld $23, PT_R23, \docfi + cfi_ld $30, PT_R30, \docfi + .endm + + .macro RESTORE_SP docfi=0 + cfi_ld sp, PT_R29, \docfi .endm #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -306,30 +366,30 @@ and v0, v1 or v0, a0 mtc0 v0, CP0_STATUS - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET + .macro RESTORE_SP_AND_RET docfi=0 .set push .set noreorder LONG_L k0, PT_EPC(sp) - LONG_L sp, PT_R29(sp) + RESTORE_SP \docfi jr k0 rfe .set pop .endm #else - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -346,24 +406,24 @@ mtc0 v0, CP0_STATUS LONG_L v1, PT_EPC(sp) MTC0 v1, CP0_EPC - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi #ifdef CONFIG_64BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET - LONG_L sp, PT_R29(sp) + .macro RESTORE_SP_AND_RET docfi=0 + RESTORE_SP \docfi #ifdef CONFIG_CPU_MIPSR6 eretnc #else @@ -375,16 +435,12 @@ #endif - .macro RESTORE_SP - LONG_L sp, PT_R29(sp) - .endm - - .macro RESTORE_ALL - RESTORE_TEMP - RESTORE_STATIC - RESTORE_AT - RESTORE_SOME - RESTORE_SP + .macro RESTORE_ALL docfi=0 + RESTORE_TEMP \docfi + RESTORE_STATIC \docfi + RESTORE_AT \docfi + RESTORE_SOME \docfi + RESTORE_SP \docfi .endm /* diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 780ee2c2a2ac..10c4e9c84448 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -2,6 +2,8 @@ #define _ASM_STACKTRACE_H #include +#include +#include #ifdef CONFIG_KALLSYMS extern int raw_show_trace; @@ -20,6 +22,14 @@ static inline unsigned long unwind_stack(struct task_struct *task, } #endif +#define STR_PTR_LA __stringify(PTR_LA) +#define STR_LONG_S __stringify(LONG_S) +#define STR_LONG_L __stringify(LONG_L) +#define STR_LONGSIZE __stringify(LONGSIZE) + +#define STORE_ONE_REG(r) \ + STR_LONG_S " $" __stringify(r)",("STR_LONGSIZE"*"__stringify(r)")(%1)\n\t" + static __always_inline void prepare_frametrace(struct pt_regs *regs) { #ifndef CONFIG_KALLSYMS @@ -32,21 +42,47 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) __asm__ __volatile__( ".set push\n\t" ".set noat\n\t" -#ifdef CONFIG_64BIT - "1: dla $1, 1b\n\t" - "sd $1, %0\n\t" - "sd $29, %1\n\t" - "sd $31, %2\n\t" -#else - "1: la $1, 1b\n\t" - "sw $1, %0\n\t" - "sw $29, %1\n\t" - "sw $31, %2\n\t" -#endif + /* Store $1 so we can use it */ + STR_LONG_S " $1,"STR_LONGSIZE"(%1)\n\t" + /* Store the PC */ + "1: " STR_PTR_LA " $1, 1b\n\t" + STR_LONG_S " $1,%0\n\t" + STORE_ONE_REG(2) + STORE_ONE_REG(3) + STORE_ONE_REG(4) + STORE_ONE_REG(5) + STORE_ONE_REG(6) + STORE_ONE_REG(7) + STORE_ONE_REG(8) + STORE_ONE_REG(9) + STORE_ONE_REG(10) + STORE_ONE_REG(11) + STORE_ONE_REG(12) + STORE_ONE_REG(13) + STORE_ONE_REG(14) + STORE_ONE_REG(15) + STORE_ONE_REG(16) + STORE_ONE_REG(17) + STORE_ONE_REG(18) + STORE_ONE_REG(19) + STORE_ONE_REG(20) + STORE_ONE_REG(21) + STORE_ONE_REG(22) + STORE_ONE_REG(23) + STORE_ONE_REG(24) + STORE_ONE_REG(25) + STORE_ONE_REG(26) + STORE_ONE_REG(27) + STORE_ONE_REG(28) + STORE_ONE_REG(29) + STORE_ONE_REG(30) + STORE_ONE_REG(31) + /* Restore $1 */ + STR_LONG_L " $1,"STR_LONGSIZE"(%1)\n\t" ".set pop\n\t" - : "=m" (regs->cp0_epc), - "=m" (regs->regs[29]), "=m" (regs->regs[31]) - : : "memory"); + : "=m" (regs->cp0_epc) + : "r" (regs->regs) + : "memory"); } #endif /* _ASM_STACKTRACE_H */ diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 7afda4150a59..0673d2d0f2e6 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data[cpu].package) -#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu])) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) #endif diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h index f82c83749a08..975ff51f80c4 100644 --- a/arch/mips/include/asm/vga.h +++ b/arch/mips/include/asm/vga.h @@ -6,6 +6,7 @@ #ifndef _ASM_VGA_H #define _ASM_VGA_H +#include #include #include @@ -40,9 +41,15 @@ static inline u16 scr_readw(volatile const u16 *addr) return le16_to_cpu(*addr); } +static inline void scr_memsetw(u16 *s, u16 v, unsigned int count) +{ + memset16(s, cpu_to_le16(v), count / 2); +} + #define scr_memcpyw(d, s, c) memcpy(d, s, c) #define scr_memmovew(d, s, c) memmove(d, s, c) #define VT_BUF_HAVE_MEMCPYW #define VT_BUF_HAVE_MEMMOVEW +#define VT_BUF_HAVE_MEMSETW #endif /* _ASM_VGA_H */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index d61897535926..6abea5183d7c 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -981,7 +981,7 @@ struct mm16_r3_format { /* Load from global pointer format */ struct mm16_r5_format { /* Load/store from stack pointer format */ __BITFIELD_FIELD(unsigned int opcode : 6, __BITFIELD_FIELD(unsigned int rt : 5, - __BITFIELD_FIELD(signed int simmediate : 5, + __BITFIELD_FIELD(unsigned int imm : 5, __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;)))) }; diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 655e2fb5395b..da3216007fe0 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -91,20 +91,12 @@ overrides the coredump filter bits */ #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + /* compatibility flags */ #define MAP_FILE 0 -/* - * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. - * This gives us 6 bits, which is enough until someone invents 128 bit address - * spaces. - * - * Assume these are all power of twos. - * When 0 use the default page size. - */ -#define MAP_HUGE_SHIFT 26 -#define MAP_HUGE_MASK 0x3f - #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 8069cf766603..cf6113bbcb98 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -120,7 +120,7 @@ typedef struct siginfo { #undef SI_TIMER #undef SI_MESGQ #define SI_ASYNCIO -2 /* sent by AIO completion */ -#define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ -#define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ +#define SI_TIMER -3 /* sent by timer expiration */ +#define SI_MESGQ -4 /* sent by real time mesq state change */ #endif /* _UAPI_ASM_SIGINFO_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 882823bec153..6c755bc07975 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -120,4 +120,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 46c0581256f1..07f0f4a4b562 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -35,11 +35,15 @@ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o -obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o -obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o -obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o +sw-y := r4k_switch.o +sw-$(CONFIG_CPU_R3000) := r2300_switch.o +sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o +sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o +obj-y += $(sw-y) + +obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o +obj-$(CONFIG_CPU_R3000) += r2300_fpu.o +obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index b849fe6aad94..d173b49f212d 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -327,8 +327,8 @@ LEAF(mips_cps_get_bootcfg) * to handle contiguous VP numbering, but no such systems yet * exist. */ - mfc0 t9, $3, 1 - andi t9, t9, 0xff + mfc0 t9, CP0_GLOBALNUMBER + andi t9, t9, MIPS_GLOBALNUMBER_VP #elif defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d08afc7dc507..cf3fd549e16d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -326,7 +326,7 @@ static int __init fpu_disable(char *s) __setup("nofpu", fpu_disable); -int mips_dsp_disabled; +static int mips_dsp_disabled; static int __init dsp_disable(char *s) { @@ -919,9 +919,12 @@ static void decode_configs(struct cpuinfo_mips *c) #ifndef CONFIG_MIPS_CPS if (cpu_has_mips_r2_r6) { - c->core = get_ebase_cpunum(); + unsigned int core; + + core = get_ebase_cpunum(); if (cpu_has_mipsmt) - c->core >>= fls(core_nvpes()) - 1; + core >>= fls(core_nvpes()) - 1; + cpu_set_core(c, core); } #endif } @@ -1394,24 +1397,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_DIVEC | MIPS_CPU_LLSC; c->tlbsize = 48; break; - case PRID_IMP_R6000: - c->cputype = CPU_R6000; - __cpu_name[cpu] = "R6000"; - set_isa(c, MIPS_CPU_ISA_II); - c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; - c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | - MIPS_CPU_LLSC; - c->tlbsize = 32; - break; - case PRID_IMP_R6000A: - c->cputype = CPU_R6000A; - __cpu_name[cpu] = "R6000A"; - set_isa(c, MIPS_CPU_ISA_II); - c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; - c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | - MIPS_CPU_LLSC; - c->tlbsize = 32; - break; case PRID_IMP_RM7000: c->cputype = CPU_RM7000; __cpu_name[cpu] = "RM7000"; @@ -2113,3 +2098,35 @@ void cpu_report(void) if (cpu_has_msa) pr_info("MSA revision is: %08x\n", c->msa_id); } + +void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster) +{ + /* Ensure the core number fits in the field */ + WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >> + MIPS_GLOBALNUMBER_CLUSTER_SHF)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER; + cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF; +} + +void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core) +{ + /* Ensure the core number fits in the field */ + WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE; + cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF; +} + +void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) +{ + /* Ensure the VP(E) ID fits in the field */ + WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF)); + + /* Ensure we're not using VP(E)s without support */ + WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) && + !IS_ENABLED(CONFIG_CPU_MIPSR6)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; + cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; +} diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 5c429d70e17f..0828d6d963b7 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -87,6 +87,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool elf32; u32 flags; int ret; + loff_t pos; elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags; @@ -108,21 +109,16 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, if (phdr32->p_filesz < sizeof(abiflags)) return -EINVAL; - - ret = kernel_read(elf, phdr32->p_offset, - (char *)&abiflags, - sizeof(abiflags)); + pos = phdr32->p_offset; } else { if (phdr64->p_type != PT_MIPS_ABIFLAGS) return 0; if (phdr64->p_filesz < sizeof(abiflags)) return -EINVAL; - - ret = kernel_read(elf, phdr64->p_offset, - (char *)&abiflags, - sizeof(abiflags)); + pos = phdr64->p_offset; } + ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos); if (ret < 0) return ret; if (ret != sizeof(abiflags)) diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index ae810da4d499..37b9383eacd3 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -150,6 +150,7 @@ LEAF(__r4k_wait) .align 5 BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) + .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS /* * Check to see if the interrupted code has just disabled @@ -181,7 +182,7 @@ NESTED(handle_int, PT_SIZE, sp) 1: .set pop #endif - SAVE_ALL + SAVE_ALL docfi=1 CLI TRACE_IRQS_OFF @@ -269,8 +270,8 @@ NESTED(except_vec_ejtag_debug, 0, sp) */ BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) - SAVE_SOME - SAVE_AT + SAVE_SOME docfi=1 + SAVE_AT docfi=1 .set push .set noreorder PTR_LA v1, except_vec_vi_handler @@ -396,6 +397,7 @@ NESTED(except_vec_nmi, 0, sp) __FINIT NESTED(nmi_handler, PT_SIZE, sp) + .cfi_signal_frame .set push .set noat /* @@ -478,6 +480,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .macro __BUILD_HANDLER exception handler clear verbose ext .align 5 NESTED(handle_\exception, PT_SIZE, sp) + .cfi_signal_frame .set noat SAVE_ALL FEXPORT(handle_\exception\ext) @@ -485,8 +488,8 @@ NESTED(nmi_handler, PT_SIZE, sp) .set at __BUILD_\verbose \exception move a0, sp - PTR_LA ra, ret_from_exception - j do_\handler + jal do_\handler + j ret_from_exception END(handle_\exception) .endm diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 60ab4c44d305..7c246b69c545 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -11,6 +11,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include #include #include #include diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index cb0c57f860d4..e91c8c4e2eb5 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -12,10 +12,10 @@ #include #include -#include +#include #include -void __iomem *mips_cm_base; +void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; @@ -167,8 +167,8 @@ phys_addr_t __mips_cm_l2sync_phys_base(void) * current location. */ base_reg = read_gcr_l2_only_sync_base(); - if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK) - return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK; + if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN) + return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE; /* Default to following the CM */ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; @@ -183,19 +183,19 @@ static void mips_cm_probe_l2sync(void) phys_addr_t addr; /* L2-only sync was introduced with CM major revision 6 */ - major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> - CM_GCR_REV_MAJOR_SHF; + major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >> + __ffs(CM_GCR_REV_MAJOR); if (major_rev < 6) return; /* Find a location for the L2 sync region */ addr = mips_cm_l2sync_phys_base(); - BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr); + BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr); if (!addr) return; /* Set the region base address & enable it */ - write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK); + write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN); /* Map the region */ mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE); @@ -211,41 +211,39 @@ int mips_cm_probe(void) * No need to probe again if we have already been * here before. */ - if (mips_cm_base) + if (mips_gcr_base) return 0; addr = mips_cm_phys_base(); - BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr); + BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr); if (!addr) return -ENODEV; - mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); - if (!mips_cm_base) + mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); + if (!mips_gcr_base) return -ENXIO; /* sanity check that we're looking at a CM */ base_reg = read_gcr_base(); - if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) { + if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) { pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n", (unsigned long)addr); - mips_cm_base = NULL; + mips_gcr_base = NULL; return -ENODEV; } /* set default target to memory */ - base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK; - base_reg |= CM_GCR_BASE_CMDEFTGT_MEM; - write_gcr_base(base_reg); + change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM); /* disable CM regions */ - write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK); /* probe for an L2-only sync region */ mips_cm_probe_l2sync(); @@ -259,16 +257,27 @@ int mips_cm_probe(void) return 0; } -void mips_cm_lock_other(unsigned int core, unsigned int vp) +void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block) { - unsigned curr_core; + unsigned int curr_core, cm_rev; u32 val; + cm_rev = mips_cm_revision(); preempt_disable(); - if (mips_cm_revision() >= CM_REV_CM3) { - val = core << CM3_GCR_Cx_OTHER_CORE_SHF; - val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + if (cm_rev >= CM_REV_CM3) { + val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); + val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); + + if (cm_rev >= CM_REV_CM3_5) { + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); + val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); + } else { + WARN_ON(cluster != 0); + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); + } /* * We need to disable interrupts in SMP systems in order to @@ -282,18 +291,20 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), *this_cpu_ptr(&cm_core_lock_flags)); } else { + WARN_ON(cluster != 0); WARN_ON(vp != 0); + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* * We only have a GCR_CL_OTHER per core in systems with * CM 2.5 & older, so have to ensure other VP(E)s don't * race with us. */ - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); - val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; + val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM); } write_gcr_cl_other(val); @@ -310,7 +321,7 @@ void mips_cm_unlock_other(void) unsigned int curr_core; if (mips_cm_revision() < CM_REV_CM3) { - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); } else { @@ -332,13 +343,13 @@ void mips_cm_error_report(void) return; revision = mips_cm_revision(); + cm_error = read_gcr_error_cause(); + cm_addr = read_gcr_error_addr(); + cm_other = read_gcr_error_mult(); if (revision < CM_REV_CM3) { /* CM2 */ - cm_error = read_gcr_error_cause(); - cm_addr = read_gcr_error_addr(); - cm_other = read_gcr_error_mult(); - cause = cm_error >> CM_GCR_ERROR_CAUSE_ERRTYPE_SHF; - ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF; + cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE); + ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); if (!cause) return; @@ -380,11 +391,8 @@ void mips_cm_error_report(void) ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits; ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit; - cm_error = read64_gcr_error_cause(); - cm_addr = read64_gcr_error_addr(); - cm_other = read64_gcr_error_mult(); - cause = cm_error >> CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF; - ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF; + cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE); + ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); if (!cause) return; diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index a4964c334cab..f66b05ebf637 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -12,8 +12,7 @@ #include #include -#include -#include +#include void __iomem *mips_cpc_base; @@ -40,13 +39,13 @@ static phys_addr_t mips_cpc_phys_base(void) if (!mips_cm_present()) return 0; - if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK)) + if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX)) return 0; /* If the CPC is already enabled, leave it so */ cpc_base = read_gcr_cpc_base(); - if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) - return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; + if (cpc_base & CM_GCR_CPC_BASE_CPCEN) + return cpc_base & CM_GCR_CPC_BASE_CPCBASE; /* Otherwise, use the default address */ cpc_base = mips_cpc_default_phys_base(); @@ -54,7 +53,7 @@ static phys_addr_t mips_cpc_phys_base(void) return cpc_base; /* Enable the CPC, mapped at the default address */ - write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); + write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN); return cpc_base; } @@ -86,10 +85,10 @@ void mips_cpc_lock_other(unsigned int core) return; preempt_disable(); - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); - write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); + write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM)); /* * Ensure the core-other region reflects the appropriate core & @@ -106,7 +105,7 @@ void mips_cpc_unlock_other(void) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); preempt_enable(); diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index ae64c8f56a8c..eb18b186e858 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -46,9 +46,11 @@ #define LL "ll " #define SC "sc " -DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); -DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); -DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); +#ifdef CONFIG_DEBUG_FS +static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); +static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); +static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); +#endif extern const unsigned int fpucondbit[8]; @@ -600,7 +602,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir) } /* R6 removed instructions for the SPECIAL opcode */ -static struct r2_decoder_table spec_op_table[] = { +static const struct r2_decoder_table spec_op_table[] = { { 0xfc1ff83f, 0x00000008, jr_func }, { 0xfc00ffff, 0x00000018, mult_func }, { 0xfc00ffff, 0x00000019, multu_func }, @@ -867,7 +869,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir) } /* R6 removed instructions for the SPECIAL2 opcode */ -static struct r2_decoder_table spec2_op_table[] = { +static const struct r2_decoder_table spec2_op_table[] = { { 0xfc00ffff, 0x70000000, madd_func }, { 0xfc00ffff, 0x70000001, maddu_func }, { 0xfc0007ff, 0x70000002, mul_func }, @@ -881,9 +883,9 @@ static struct r2_decoder_table spec2_op_table[] = { }; static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, - struct r2_decoder_table *table) + const struct r2_decoder_table *table) { - struct r2_decoder_table *p; + const struct r2_decoder_table *p; int err; for (p = table; p->func; p++) { diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 3375745b9198..e42113fe2762 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -10,12 +10,13 @@ * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ +#include +#include +#include +#include +#include +#include -#define USE_ALTERNATE_RESUME_IMPL 1 - .set push - .set arch=mips64r2 -#include "r4k_switch.S" - .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9e6c74bf66c4..6668f67a61c3 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event) return -ENOENT; } - if ((unsigned int)event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) + if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; if (!atomic_inc_not_zero(&active_events)) { diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index d99416094ba9..4655017f2377 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -17,8 +17,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -49,7 +48,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], nc_asm_enter); /* Bitmap indicating which states are supported by the system */ -DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); +static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); /* * Indicates the number of coupled VPEs ready to operate in a non-coherent @@ -114,7 +113,7 @@ static void coupled_barrier(atomic_t *a, unsigned online) int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); - unsigned core = current_cpu_data.core; + unsigned core = cpu_core(¤t_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; @@ -486,7 +485,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); @@ -569,8 +568,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * rest will just be performing a rather unusual nop. */ uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 - ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK - : CM3_GCR_Cx_COHERENCE_COHEN_MSK); + ? CM_GCR_Cx_COHERENCE_COHDOMAINEN + : CM3_GCR_Cx_COHERENCE_COHEN); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); @@ -640,7 +639,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -692,7 +691,7 @@ static int __init cps_pm_init(void) /* Detect whether a CPC is present */ if (mips_cpc_present()) { /* Detect whether clock gating is implemented */ - if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) set_bit(CPS_PM_CLOCK_GATED, state_support); else pr_warn("pm-cps: CPC does not support clock gating\n"); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 70604c753aa4..bd9bf528f19b 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -134,13 +134,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); - seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); + seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n])); #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) if (cpu_has_mipsmt) - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); + seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); else if (cpu_has_vp) - seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id); + seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); #endif sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 5351e1f3950d..c5ff6bfe2825 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -208,13 +208,13 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) * * microMIPS is way more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { + if (mm_insn_16bit(ip->word >> 16)) { switch (ip->mm16_r5_format.opcode) { case mm_swsp16_op: if (ip->mm16_r5_format.rt != 31) return 0; - *poff = ip->mm16_r5_format.simmediate; + *poff = ip->mm16_r5_format.imm; *poff = (*poff << 2) / sizeof(ulong); return 1; @@ -287,7 +287,7 @@ static inline int is_jump_ins(union mips_instruction *ip) * * microMIPS is kind of more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { + if (mm_insn_16bit(ip->word >> 16)) { if ((ip->mm16_r5_format.opcode == mm_pool16c_op && (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) return 1; @@ -313,9 +313,11 @@ static inline int is_jump_ins(union mips_instruction *ip) #endif } -static inline int is_sp_move_ins(union mips_instruction *ip) +static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) { #ifdef CONFIG_CPU_MICROMIPS + unsigned short tmp; + /* * addiusp -imm * addius5 sp,-imm @@ -324,21 +326,40 @@ static inline int is_sp_move_ins(union mips_instruction *ip) * * microMIPS is not more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { - return (ip->mm16_r3_format.opcode == mm_pool16d_op && - ip->mm16_r3_format.simmediate && mm_addiusp_func) || - (ip->mm16_r5_format.opcode == mm_pool16d_op && - ip->mm16_r5_format.rt == 29); + if (mm_insn_16bit(ip->word >> 16)) { + if (ip->mm16_r3_format.opcode == mm_pool16d_op && + ip->mm16_r3_format.simmediate & mm_addiusp_func) { + tmp = ip->mm_b0_format.simmediate >> 1; + tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100; + if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */ + tmp ^= 0x100; + *frame_size = -(signed short)(tmp << 2); + return 1; + } + if (ip->mm16_r5_format.opcode == mm_pool16d_op && + ip->mm16_r5_format.rt == 29) { + tmp = ip->mm16_r5_format.imm >> 1; + *frame_size = -(signed short)(tmp & 0xf); + return 1; + } + return 0; } - return ip->mm_i_format.opcode == mm_addiu32_op && - ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; + if (ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) { + *frame_size = -ip->i_format.simmediate; + return 1; + } #else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; - if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + + if (ip->i_format.opcode == addiu_op || + ip->i_format.opcode == daddiu_op) { + *frame_size = -ip->i_format.simmediate; return 1; + } #endif return 0; } @@ -348,7 +369,9 @@ static int get_frame_info(struct mips_frame_info *info) bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); union mips_instruction insn, *ip, *ip_end; const unsigned int max_insns = 128; + unsigned int last_insn_size = 0; unsigned int i; + bool saw_jump = false; info->pc_offset = -1; info->frame_size = 0; @@ -359,47 +382,44 @@ static int get_frame_info(struct mips_frame_info *info) ip_end = (void *)ip + info->func_size; - for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { + for (i = 0; i < max_insns && ip < ip_end; i++) { + ip = (void *)ip + last_insn_size; if (is_mmips && mm_insn_16bit(ip->halfword[0])) { - insn.halfword[0] = 0; - insn.halfword[1] = ip->halfword[0]; + insn.word = ip->halfword[0] << 16; + last_insn_size = 2; } else if (is_mmips) { - insn.halfword[0] = ip->halfword[1]; - insn.halfword[1] = ip->halfword[0]; + insn.word = ip->halfword[0] << 16 | ip->halfword[1]; + last_insn_size = 4; } else { insn.word = ip->word; + last_insn_size = 4; } - if (is_jump_ins(&insn)) - break; - if (!info->frame_size) { - if (is_sp_move_ins(&insn)) - { -#ifdef CONFIG_CPU_MICROMIPS - if (mm_insn_16bit(ip->halfword[0])) - { - unsigned short tmp; - - if (ip->halfword[0] & mm_addiusp_func) - { - tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); - info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); - } else { - tmp = (ip->halfword[0] >> 1); - info->frame_size = -(signed short)(tmp & 0xf); - } - ip = (void *) &ip->halfword[1]; - ip--; - } else -#endif - info->frame_size = - ip->i_format.simmediate; - } + is_sp_move_ins(&insn, &info->frame_size); + continue; + } else if (!saw_jump && is_jump_ins(ip)) { + /* + * If we see a jump instruction, we are finished + * with the frame save. + * + * Some functions can have a shortcut return at + * the beginning of the function, so don't start + * looking for jump instruction until we see the + * frame setup. + * + * The RA save instruction can get put into the + * delay slot of the jump instruction, so look + * at the next instruction, too. + */ + saw_jump = true; continue; } if (info->pc_offset == -1 && is_ra_save_ins(&insn, &info->pc_offset)) break; + if (saw_jump) + break; } if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 918f2f6d3861..3062ba66c563 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -12,7 +12,9 @@ * Copyright (c) 1998 Harald Koerfgen */ #include +#include #include +#include #include #include #include @@ -31,9 +33,85 @@ PTR 9b+4,bad_stack; \ .previous - .set noreorder .set mips1 +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) +EXPORT_SYMBOL(_save_fp) + fpu_save_single a0, t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) + fpu_restore_single a0, t1 # clobbers t1 + jr ra + END(_restore_fp) + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * The value to initialize fcr31 to comes in $a0. + */ + + .set push + SET_HARDFLOAT + +LEAF(_init_fpu) + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS + + ctc1 a0, fcr31 + + li t0, -1 + + mtc1 t0, $f0 + mtc1 t0, $f1 + mtc1 t0, $f2 + mtc1 t0, $f3 + mtc1 t0, $f4 + mtc1 t0, $f5 + mtc1 t0, $f6 + mtc1 t0, $f7 + mtc1 t0, $f8 + mtc1 t0, $f9 + mtc1 t0, $f10 + mtc1 t0, $f11 + mtc1 t0, $f12 + mtc1 t0, $f13 + mtc1 t0, $f14 + mtc1 t0, $f15 + mtc1 t0, $f16 + mtc1 t0, $f17 + mtc1 t0, $f18 + mtc1 t0, $f19 + mtc1 t0, $f20 + mtc1 t0, $f21 + mtc1 t0, $f22 + mtc1 t0, $f23 + mtc1 t0, $f24 + mtc1 t0, $f25 + mtc1 t0, $f26 + mtc1 t0, $f27 + mtc1 t0, $f28 + mtc1 t0, $f29 + mtc1 t0, $f30 + mtc1 t0, $f31 + jr ra + END(_init_fpu) + + .set pop + + .set noreorder + /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 1049eeafd97d..e57703b1de50 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -25,12 +25,6 @@ .set mips1 .align 5 -/* - * Offset to the current process status flags, the first 32 bytes of the - * stack are not used. - */ -#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) - /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) @@ -68,78 +62,3 @@ LEAF(resume) move v0, a0 jr ra END(resume) - -/* - * Save a thread's fp context. - */ -LEAF(_save_fp) -EXPORT_SYMBOL(_save_fp) - fpu_save_single a0, t1 # clobbers t1 - jr ra - END(_save_fp) - -/* - * Restore a thread's fp context. - */ -LEAF(_restore_fp) - fpu_restore_single a0, t1 # clobbers t1 - jr ra - END(_restore_fp) - -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - - ctc1 a0, fcr31 - - li t0, -1 - - mtc1 t0, $f0 - mtc1 t0, $f1 - mtc1 t0, $f2 - mtc1 t0, $f3 - mtc1 t0, $f4 - mtc1 t0, $f5 - mtc1 t0, $f6 - mtc1 t0, $f7 - mtc1 t0, $f8 - mtc1 t0, $f9 - mtc1 t0, $f10 - mtc1 t0, $f11 - mtc1 t0, $f12 - mtc1 t0, $f13 - mtc1 t0, $f14 - mtc1 t0, $f15 - mtc1 t0, $f16 - mtc1 t0, $f17 - mtc1 t0, $f18 - mtc1 t0, $f19 - mtc1 t0, $f20 - mtc1 t0, $f21 - mtc1 t0, $f22 - mtc1 t0, $f23 - mtc1 t0, $f24 - mtc1 t0, $f25 - mtc1 t0, $f26 - mtc1 t0, $f27 - mtc1 t0, $f28 - mtc1 t0, $f29 - mtc1 t0, $f30 - mtc1 t0, $f31 - jr ra - END(_init_fpu) - - .set pop diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 56d86b09c917..0a83b1708b3c 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,201 @@ .previous .endm +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) +EXPORT_SYMBOL(_save_fp) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) + mfc0 t0, CP0_STATUS +#endif + fpu_save_double a0 t0 t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) + mfc0 t0, CP0_STATUS +#endif + fpu_restore_double a0 t0 t1 # clobbers t1 + jr ra + END(_restore_fp) + +#ifdef CONFIG_CPU_HAS_MSA + +/* + * Save a thread's MSA vector context. + */ +LEAF(_save_msa) +EXPORT_SYMBOL(_save_msa) + msa_save_all a0 + jr ra + END(_save_msa) + +/* + * Restore a thread's MSA vector context. + */ +LEAF(_restore_msa) + msa_restore_all a0 + jr ra + END(_restore_msa) + +LEAF(_init_msa_upper) + msa_init_all_upper + jr ra + END(_init_msa_upper) + +#endif + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * The value to initialize fcr31 to comes in $a0. + */ + + .set push + SET_HARDFLOAT + +LEAF(_init_fpu) + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS + enable_fpu_hazard + + ctc1 a0, fcr31 + + li t1, -1 # SNaN + +#ifdef CONFIG_64BIT + sll t0, t0, 5 + bgez t0, 1f # 16 / 32 register mode? + + dmtc1 t1, $f1 + dmtc1 t1, $f3 + dmtc1 t1, $f5 + dmtc1 t1, $f7 + dmtc1 t1, $f9 + dmtc1 t1, $f11 + dmtc1 t1, $f13 + dmtc1 t1, $f15 + dmtc1 t1, $f17 + dmtc1 t1, $f19 + dmtc1 t1, $f21 + dmtc1 t1, $f23 + dmtc1 t1, $f25 + dmtc1 t1, $f27 + dmtc1 t1, $f29 + dmtc1 t1, $f31 +1: +#endif + +#ifdef CONFIG_CPU_MIPS32 + mtc1 t1, $f0 + mtc1 t1, $f1 + mtc1 t1, $f2 + mtc1 t1, $f3 + mtc1 t1, $f4 + mtc1 t1, $f5 + mtc1 t1, $f6 + mtc1 t1, $f7 + mtc1 t1, $f8 + mtc1 t1, $f9 + mtc1 t1, $f10 + mtc1 t1, $f11 + mtc1 t1, $f12 + mtc1 t1, $f13 + mtc1 t1, $f14 + mtc1 t1, $f15 + mtc1 t1, $f16 + mtc1 t1, $f17 + mtc1 t1, $f18 + mtc1 t1, $f19 + mtc1 t1, $f20 + mtc1 t1, $f21 + mtc1 t1, $f22 + mtc1 t1, $f23 + mtc1 t1, $f24 + mtc1 t1, $f25 + mtc1 t1, $f26 + mtc1 t1, $f27 + mtc1 t1, $f28 + mtc1 t1, $f29 + mtc1 t1, $f30 + mtc1 t1, $f31 + +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) + .set push + .set MIPS_ISA_LEVEL_RAW + .set fp=64 + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip setting upper 32b + + mthc1 t1, $f0 + mthc1 t1, $f1 + mthc1 t1, $f2 + mthc1 t1, $f3 + mthc1 t1, $f4 + mthc1 t1, $f5 + mthc1 t1, $f6 + mthc1 t1, $f7 + mthc1 t1, $f8 + mthc1 t1, $f9 + mthc1 t1, $f10 + mthc1 t1, $f11 + mthc1 t1, $f12 + mthc1 t1, $f13 + mthc1 t1, $f14 + mthc1 t1, $f15 + mthc1 t1, $f16 + mthc1 t1, $f17 + mthc1 t1, $f18 + mthc1 t1, $f19 + mthc1 t1, $f20 + mthc1 t1, $f21 + mthc1 t1, $f22 + mthc1 t1, $f23 + mthc1 t1, $f24 + mthc1 t1, $f25 + mthc1 t1, $f26 + mthc1 t1, $f27 + mthc1 t1, $f28 + mthc1 t1, $f29 + mthc1 t1, $f30 + mthc1 t1, $f31 +1: .set pop +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ +#else + .set MIPS_ISA_ARCH_LEVEL_RAW + dmtc1 t1, $f0 + dmtc1 t1, $f2 + dmtc1 t1, $f4 + dmtc1 t1, $f6 + dmtc1 t1, $f8 + dmtc1 t1, $f10 + dmtc1 t1, $f12 + dmtc1 t1, $f14 + dmtc1 t1, $f16 + dmtc1 t1, $f18 + dmtc1 t1, $f20 + dmtc1 t1, $f22 + dmtc1 t1, $f24 + dmtc1 t1, $f26 + dmtc1 t1, $f28 + dmtc1 t1, $f30 +#endif + jr ra + END(_init_fpu) + + .set pop /* SET_HARDFLOAT */ + .set noreorder /** diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 7b386d54fd65..17cf9341c1cf 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -12,8 +12,6 @@ */ #include #include -#include -#include #include #include #include @@ -22,10 +20,6 @@ #include -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ -#undef fp - -#ifndef USE_ALTERNATE_RESUME_IMPL /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) @@ -63,200 +57,3 @@ move v0, a0 jr ra END(resume) - -#endif /* USE_ALTERNATE_RESUME_IMPL */ - -/* - * Save a thread's fp context. - */ -LEAF(_save_fp) -EXPORT_SYMBOL(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) - mfc0 t0, CP0_STATUS -#endif - fpu_save_double a0 t0 t1 # clobbers t1 - jr ra - END(_save_fp) - -/* - * Restore a thread's fp context. - */ -LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) - mfc0 t0, CP0_STATUS -#endif - fpu_restore_double a0 t0 t1 # clobbers t1 - jr ra - END(_restore_fp) - -#ifdef CONFIG_CPU_HAS_MSA - -/* - * Save a thread's MSA vector context. - */ -LEAF(_save_msa) -EXPORT_SYMBOL(_save_msa) - msa_save_all a0 - jr ra - END(_save_msa) - -/* - * Restore a thread's MSA vector context. - */ -LEAF(_restore_msa) - msa_restore_all a0 - jr ra - END(_restore_msa) - -LEAF(_init_msa_upper) - msa_init_all_upper - jr ra - END(_init_msa_upper) - -#endif - -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - enable_fpu_hazard - - ctc1 a0, fcr31 - - li t1, -1 # SNaN - -#ifdef CONFIG_64BIT - sll t0, t0, 5 - bgez t0, 1f # 16 / 32 register mode? - - dmtc1 t1, $f1 - dmtc1 t1, $f3 - dmtc1 t1, $f5 - dmtc1 t1, $f7 - dmtc1 t1, $f9 - dmtc1 t1, $f11 - dmtc1 t1, $f13 - dmtc1 t1, $f15 - dmtc1 t1, $f17 - dmtc1 t1, $f19 - dmtc1 t1, $f21 - dmtc1 t1, $f23 - dmtc1 t1, $f25 - dmtc1 t1, $f27 - dmtc1 t1, $f29 - dmtc1 t1, $f31 -1: -#endif - -#ifdef CONFIG_CPU_MIPS32 - mtc1 t1, $f0 - mtc1 t1, $f1 - mtc1 t1, $f2 - mtc1 t1, $f3 - mtc1 t1, $f4 - mtc1 t1, $f5 - mtc1 t1, $f6 - mtc1 t1, $f7 - mtc1 t1, $f8 - mtc1 t1, $f9 - mtc1 t1, $f10 - mtc1 t1, $f11 - mtc1 t1, $f12 - mtc1 t1, $f13 - mtc1 t1, $f14 - mtc1 t1, $f15 - mtc1 t1, $f16 - mtc1 t1, $f17 - mtc1 t1, $f18 - mtc1 t1, $f19 - mtc1 t1, $f20 - mtc1 t1, $f21 - mtc1 t1, $f22 - mtc1 t1, $f23 - mtc1 t1, $f24 - mtc1 t1, $f25 - mtc1 t1, $f26 - mtc1 t1, $f27 - mtc1 t1, $f28 - mtc1 t1, $f29 - mtc1 t1, $f30 - mtc1 t1, $f31 - -#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) - .set push - .set MIPS_ISA_LEVEL_RAW - .set fp=64 - sll t0, t0, 5 # is Status.FR set? - bgez t0, 1f # no: skip setting upper 32b - - mthc1 t1, $f0 - mthc1 t1, $f1 - mthc1 t1, $f2 - mthc1 t1, $f3 - mthc1 t1, $f4 - mthc1 t1, $f5 - mthc1 t1, $f6 - mthc1 t1, $f7 - mthc1 t1, $f8 - mthc1 t1, $f9 - mthc1 t1, $f10 - mthc1 t1, $f11 - mthc1 t1, $f12 - mthc1 t1, $f13 - mthc1 t1, $f14 - mthc1 t1, $f15 - mthc1 t1, $f16 - mthc1 t1, $f17 - mthc1 t1, $f18 - mthc1 t1, $f19 - mthc1 t1, $f20 - mthc1 t1, $f21 - mthc1 t1, $f22 - mthc1 t1, $f23 - mthc1 t1, $f24 - mthc1 t1, $f25 - mthc1 t1, $f26 - mthc1 t1, $f27 - mthc1 t1, $f28 - mthc1 t1, $f29 - mthc1 t1, $f30 - mthc1 t1, $f31 -1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ -#else - .set MIPS_ISA_ARCH_LEVEL_RAW - dmtc1 t1, $f0 - dmtc1 t1, $f2 - dmtc1 t1, $f4 - dmtc1 t1, $f6 - dmtc1 t1, $f8 - dmtc1 t1, $f10 - dmtc1 t1, $f12 - dmtc1 t1, $f14 - dmtc1 t1, $f16 - dmtc1 t1, $f18 - dmtc1 t1, $f20 - dmtc1 t1, $f22 - dmtc1 t1, $f24 - dmtc1 t1, $f26 - dmtc1 t1, $f28 - dmtc1 t1, $f30 -#endif - jr ra - END(_init_fpu) - - .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S deleted file mode 100644 index 9cc7bfab3419..000000000000 --- a/arch/mips/kernel/r6000_fpu.S +++ /dev/null @@ -1,99 +0,0 @@ -/* - * r6000_fpu.S: Save/restore floating point context for signal handlers. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * Multi-arch abstraction and asm macros for easier reading: - * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - */ -#include -#include -#include -#include -#include - - .set noreorder - .set mips2 - .set push - SET_HARDFLOAT - -/** - * _save_fp_context() - save FP context from the FPU - * @a0 - pointer to fpregs field of sigcontext - * @a1 - pointer to fpc_csr field of sigcontext - * - * Save FP context, including the 32 FP data registers and the FP - * control & status register, from the FPU to signal context. - */ - LEAF(_save_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - nop - - cfc1 t1,fcr31 - /* Store the 16 double precision registers */ - sdc1 $f0,0(a0) - sdc1 $f2,16(a0) - sdc1 $f4,32(a0) - sdc1 $f6,48(a0) - sdc1 $f8,64(a0) - sdc1 $f10,80(a0) - sdc1 $f12,96(a0) - sdc1 $f14,112(a0) - sdc1 $f16,128(a0) - sdc1 $f18,144(a0) - sdc1 $f20,160(a0) - sdc1 $f22,176(a0) - sdc1 $f24,192(a0) - sdc1 $f26,208(a0) - sdc1 $f28,224(a0) - sdc1 $f30,240(a0) - jr ra - sw t0,(a1) -1: jr ra - nop - END(_save_fp_context) - -/** - * _restore_fp_context() - restore FP context to the FPU - * @a0 - pointer to fpregs field of sigcontext - * @a1 - pointer to fpc_csr field of sigcontext - * - * Restore FP context, including the 32 FP data registers and the FP - * control & status register, from signal context to the FPU. - */ - LEAF(_restore_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - - bgez t0,1f - lw t0,(a1) - /* Restore the 16 double precision registers */ - ldc1 $f0,0(a0) - ldc1 $f2,16(a0) - ldc1 $f4,32(a0) - ldc1 $f6,48(a0) - ldc1 $f8,64(a0) - ldc1 $f10,80(a0) - ldc1 $f12,96(a0) - ldc1 $f14,112(a0) - ldc1 $f16,128(a0) - ldc1 $f18,144(a0) - ldc1 $f20,160(a0) - ldc1 $f22,176(a0) - ldc1 $f24,192(a0) - ldc1 $f26,208(a0) - ldc1 $f28,224(a0) - ldc1 $f30,240(a0) - jr ra - ctc1 t0,fcr31 -1: jr ra - nop - END(_restore_fp_context) - - .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 84165f2b31ff..cf5c7c05e5a3 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -93,38 +93,37 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (from->si_code >> 16) { - case __SI_TIMER >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_int, &to->si_int); break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); - default: + case SIL_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __put_user((unsigned long)from->si_addr, &to->si_addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); break; - case __SI_SYS >> 16: + case SIL_SYS: err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr, sizeof(compat_uptr_t)); err |= __put_user(from->si_syscall, &to->si_syscall); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 1b070a76fcdd..406072e26752 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -179,7 +179,7 @@ static void bmips_prepare_cpus(unsigned int max_cpus) /* * Tell the hardware to boot CPUx - runs on CPU0 */ -static void bmips_boot_secondary(int cpu, struct task_struct *idle) +static int bmips_boot_secondary(int cpu, struct task_struct *idle) { bmips_smp_boot_sp = __KSTK_TOS(idle); bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); @@ -231,6 +231,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) } cpumask_set_cpu(cpu, &bmips_booted_mask); } + + return 0; } /* @@ -245,7 +247,7 @@ static void bmips_init_secondary(void) break; case CPU_BMIPS5000: write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); - current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3; + cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3); break; } } @@ -409,7 +411,7 @@ void __ref play_dead(void) #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops bmips43xx_smp_ops = { +const struct plat_smp_ops bmips43xx_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, @@ -423,7 +425,7 @@ struct plat_smp_ops bmips43xx_smp_ops = { #endif }; -struct plat_smp_ops bmips5000_smp_ops = { +const struct plat_smp_ops bmips5000_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 76923349b4fe..05295a4909f1 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -78,7 +77,7 @@ static void cmp_smp_finish(void) * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp */ -static void cmp_boot_secondary(int cpu, struct task_struct *idle) +static int cmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); @@ -95,6 +94,7 @@ static void cmp_boot_secondary(int cpu, struct task_struct *idle) #endif amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); + return 0; } /* @@ -148,7 +148,7 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) } -struct plat_smp_ops cmp_smp_ops = { +const struct plat_smp_ops cmp_smp_ops = { .send_ipi_single = mips_smp_send_ipi_single, .send_ipi_mask = mips_smp_send_ipi_mask, .init_secondary = cmp_init_secondary, diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index f832e99ad4c3..0063122c85da 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -19,8 +18,7 @@ #include #include -#include -#include +#include #include #include #include @@ -41,55 +39,58 @@ static int __init setup_nothreads(char *s) } early_param("nothreads", setup_nothreads); -static unsigned core_vpe_count(unsigned core) +static unsigned core_vpe_count(unsigned int cluster, unsigned core) { - unsigned cfg; - if (threads_disabled) return 1; - if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) - && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) - return 1; - - mips_cm_lock_other(core, 0); - cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; - mips_cm_unlock_other(); - return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + return mips_cps_numvps(cluster, core); } static void __init cps_smp_setup(void) { - unsigned int ncores, nvpes, core_vpes; + unsigned int nclusters, ncores, nvpes, core_vpes; unsigned long core_entry; - int c, v; + int cl, c, v; /* Detect & record VPE topology */ - ncores = mips_cm_numcores(); + nvpes = 0; + nclusters = mips_cps_numclusters(); pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); - for (c = nvpes = 0; c < ncores; c++) { - core_vpes = core_vpe_count(c); - pr_cont("%c%u", c ? ',' : '{', core_vpes); + for (cl = 0; cl < nclusters; cl++) { + if (cl > 0) + pr_cont(","); + pr_cont("{"); - /* Use the number of VPEs in core 0 for smp_num_siblings */ - if (!c) - smp_num_siblings = core_vpes; + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(cl, c); - for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { - cpu_data[nvpes + v].core = c; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) - cpu_data[nvpes + v].vpe_id = v; -#endif + if (c > 0) + pr_cont(","); + pr_cont("%u", core_vpes); + + /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ + if (!cl && !c) + smp_num_siblings = core_vpes; + + for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { + cpu_set_cluster(&cpu_data[nvpes + v], cl); + cpu_set_core(&cpu_data[nvpes + v], c); + cpu_set_vpe_id(&cpu_data[nvpes + v], v); + } + + nvpes += core_vpes; } - nvpes += core_vpes; + pr_cont("}"); } - pr_cont("} total %u\n", nvpes); + pr_cont(" total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, true); - set_cpu_present(v, true); + set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -121,7 +122,7 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { unsigned ncores, core_vpes, c, cca; - bool cca_unsuitable; + bool cca_unsuitable, cores_limited; u32 *entry_code; mips_mt_set_cpuoptions(); @@ -141,19 +142,22 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) } /* Warn the user if the CCA prevents multi-core */ - ncores = mips_cm_numcores(); - if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) { + cores_limited = false; + if (cca_unsuitable || cpu_has_dc_aliases) { + for_each_present_cpu(c) { + if (cpus_are_siblings(smp_processor_id(), c)) + continue; + + set_cpu_present(c, false); + cores_limited = true; + } + } + if (cores_limited) pr_warn("Using only one core due to %s%s%s\n", cca_unsuitable ? "unsuitable CCA" : "", (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", cpu_has_dc_aliases ? "dcache aliasing" : ""); - for_each_present_cpu(c) { - if (cpu_data[c].core) - set_cpu_present(c, false); - } - } - /* * Patch the start of mips_cps_core_entry to provide: * @@ -168,6 +172,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) __sync(); /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(0); mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), GFP_KERNEL); if (!mips_cps_core_bootcfg) { @@ -177,7 +182,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* Allocate VPE boot configuration structs */ for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(c); + core_vpes = core_vpe_count(0, c); mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, sizeof(*mips_cps_core_bootcfg[c].vpe_config), GFP_KERNEL); @@ -189,7 +194,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) } /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, + atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; @@ -212,11 +217,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) static void boot_core(unsigned int core, unsigned int vpe_id) { - u32 access, stat, seq_state; + u32 stat, seq_state; unsigned timeout; /* Select the appropriate core */ - mips_cm_lock_other(core, 0); + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); @@ -225,12 +230,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_coherence(0); /* Start it with the legacy memory map and exception base */ - write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB); + write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - access = read_gcr_access(); - access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); - write_gcr_access(access); + set_gcr_access(1 << core); if (mips_cpc_present()) { /* Reset the core */ @@ -253,7 +256,8 @@ static void boot_core(unsigned int core, unsigned int vpe_id) timeout = 100; while (true) { stat = read_cpc_co_stat_conf(); - seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK; + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); /* U6 == coherent execution, ie. the core is up */ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) @@ -285,15 +289,15 @@ static void boot_core(unsigned int core, unsigned int vpe_id) static void remote_vpe_boot(void *dummy) { - unsigned core = current_cpu_data.core; + unsigned core = cpu_core(¤t_cpu_data); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } -static void cps_boot_secondary(int cpu, struct task_struct *idle) +static int cps_boot_secondary(int cpu, struct task_struct *idle) { - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; @@ -301,6 +305,10 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) unsigned int remote; int err; + /* We don't yet support booting CPUs in other clusters */ + if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(¤t_cpu_data)) + return -ENOSYS; + vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -316,16 +324,16 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) } if (cpu_has_vp) { - mips_cm_lock_other(core, vpe_id); + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_co_reset_base(core_entry); mips_cm_unlock_other(); } - if (core != current_cpu_data.core) { + if (!cpus_are_siblings(cpu, smp_processor_id())) { /* Boot a VPE on another powered up core */ for (remote = 0; remote < NR_CPUS; remote++) { - if (cpu_data[remote].core != core) + if (!cpus_are_siblings(cpu, remote)) continue; if (cpu_online(remote)) break; @@ -349,6 +357,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) mips_cps_boot_vpes(core_cfg, vpe_id); out: preempt_enable(); + return 0; } static void cps_init_secondary(void) @@ -358,7 +367,7 @@ static void cps_init_secondary(void) dmt(); if (mips_cm_revision() >= CM_REV_CM3) { - unsigned ident = gic_read_local_vp_id(); + unsigned int ident = read_gic_vl_ident(); /* * Ensure that our calculation of the VP ID matches up with @@ -402,7 +411,7 @@ static int cps_cpu_disable(void) if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; + core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -424,15 +433,17 @@ void play_dead(void) local_irq_disable(); idle_task_exit(); cpu = smp_processor_id(); - core = cpu_data[cpu].core; + core = cpu_core(&cpu_data[cpu]); cpu_death = CPU_DEATH_POWER; pr_debug("CPU%d going offline\n", cpu); if (cpu_has_mipsmt || cpu_has_vp) { + core = cpu_core(&cpu_data[cpu]); + /* Look for another online VPE within the core */ for_each_online_cpu(cpu_death_sibling) { - if (cpu_data[cpu_death_sibling].core != core) + if (!cpus_are_siblings(cpu, cpu_death_sibling)) continue; /* @@ -488,7 +499,7 @@ static void wait_for_sibling_halt(void *ptr_cpu) static void cps_cpu_die(unsigned int cpu) { - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; @@ -519,10 +530,11 @@ static void cps_cpu_die(unsigned int cpu) */ fail_time = ktime_add_ms(ktime_get(), 2000); do { - mips_cm_lock_other(core, 0); + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cpc_lock_other(core); stat = read_cpc_co_stat_conf(); - stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; + stat &= CPC_Cx_STAT_CONF_SEQSTATE; + stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); mips_cpc_unlock_other(); mips_cm_unlock_other(); @@ -544,7 +556,7 @@ static void cps_cpu_die(unsigned int cpu) */ if (WARN(ktime_after(ktime_get(), fail_time), "CPU%u hasn't powered down, seq. state %u\n", - cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF)) + cpu, stat)) break; } while (1); @@ -562,7 +574,7 @@ static void cps_cpu_die(unsigned int cpu) panic("Failed to call remote sibling CPU\n"); } else if (cpu_has_vp) { do { - mips_cm_lock_other(core, vpe_id); + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); stat = read_cpc_co_vp_running(); mips_cm_unlock_other(); } while (stat & (1 << vpe_id)); @@ -571,7 +583,7 @@ static void cps_cpu_die(unsigned int cpu) #endif /* CONFIG_HOTPLUG_CPU */ -static struct plat_smp_ops cps_smp_ops = { +static const struct plat_smp_ops cps_smp_ops = { .smp_setup = cps_smp_setup, .prepare_cpus = cps_prepare_cpus, .boot_secondary = cps_boot_secondary, @@ -587,7 +599,7 @@ static struct plat_smp_ops cps_smp_ops = { bool mips_cps_smp_in_use(void) { - extern struct plat_smp_ops *mp_ops; + extern const struct plat_smp_ops *mp_ops; return mp_ops == &cps_smp_ops; } @@ -599,7 +611,7 @@ int register_cps_smp_ops(void) } /* check we have a GIC - we need one for IPIs */ - if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { + if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) { pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); return -ENODEV; } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index ed6b4df583ea..94ab3276b48c 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -36,6 +35,7 @@ #include #include #include +#include static void __init smvp_copy_vpe_config(void) { @@ -83,7 +83,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, if (tc != 0) smvp_copy_vpe_config(); - cpu_data[ncpu].vpe_id = tc; + cpu_set_vpe_id(&cpu_data[ncpu], tc); return ncpu; } @@ -118,14 +118,12 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) static void vsmp_init_secondary(void) { -#ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ - if (gic_present) + if (mips_gic_present()) change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); else -#endif change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7); } @@ -152,7 +150,7 @@ static void vsmp_smp_finish(void) * (unsigned long)idle->thread_info the gp * assumes a 1:1 mapping of TC => VPE */ -static void vsmp_boot_secondary(int cpu, struct task_struct *idle) +static int vsmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); dvpe(); @@ -184,6 +182,8 @@ static void vsmp_boot_secondary(int cpu, struct task_struct *idle) clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(EVPE_ENABLE); + + return 0; } /* @@ -239,7 +239,7 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus) mips_mt_set_cpuoptions(); } -struct plat_smp_ops vsmp_smp_ops = { +const struct plat_smp_ops vsmp_smp_ops = { .send_ipi_single = mips_smp_send_ipi_single, .send_ipi_mask = mips_smp_send_ipi_mask, .init_secondary = vsmp_init_secondary, diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c index 17878d71ef2b..525d3196f793 100644 --- a/arch/mips/kernel/smp-up.c +++ b/arch/mips/kernel/smp-up.c @@ -39,8 +39,9 @@ static void up_smp_finish(void) /* * Firmware CPU startup hook */ -static void up_boot_secondary(int cpu, struct task_struct *idle) +static int up_boot_secondary(int cpu, struct task_struct *idle) { + return 0; } static void __init up_smp_setup(void) @@ -63,7 +64,7 @@ static void up_cpu_die(unsigned int cpu) } #endif -struct plat_smp_ops up_smp_ops = { +const struct plat_smp_ops up_smp_ops = { .send_ipi_single = up_send_ipi_single, .send_ipi_mask = up_send_ipi_mask, .init_secondary = up_init_secondary, diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c7cbddfcdc3b..bbe19b64def5 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -96,8 +96,7 @@ static inline void set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu(i, &cpu_sibling_setup_map) { - if (cpu_data[cpu].package == cpu_data[i].package && - cpu_data[cpu].core == cpu_data[i].core) { + if (cpus_are_siblings(cpu, i)) { cpumask_set_cpu(i, &cpu_sibling_map[cpu]); cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } @@ -134,8 +133,7 @@ void calculate_cpu_foreign_map(void) for_each_online_cpu(i) { core_present = 0; for_each_cpu(k, &temp_foreign_map) - if (cpu_data[i].package == cpu_data[k].package && - cpu_data[i].core == cpu_data[k].core) + if (cpus_are_siblings(i, k)) core_present = 1; if (!core_present) cpumask_set_cpu(i, &temp_foreign_map); @@ -146,10 +144,10 @@ void calculate_cpu_foreign_map(void) &temp_foreign_map, &cpu_sibling_map[i]); } -struct plat_smp_ops *mp_ops; +const struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); -void register_smp_ops(struct plat_smp_ops *ops) +void register_smp_ops(const struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); @@ -186,13 +184,13 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (mips_cpc_present()) { for_each_cpu(cpu, mask) { - core = cpu_data[cpu].core; - - if (core == current_cpu_data.core) + if (cpus_are_siblings(cpu, smp_processor_id())) continue; + core = cpu_core(&cpu_data[cpu]); + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { - mips_cm_lock_other(core, 0); + mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); mips_cpc_unlock_other(); @@ -441,7 +439,11 @@ void smp_prepare_boot_cpu(void) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - mp_ops->boot_secondary(cpu, tidle); + int err; + + err = mp_ops->boot_secondary(cpu, tidle); + if (err) + return err; /* * We must check for timeout here, as the CPU will not be marked diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index c036157fb891..a6ebc8135112 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -72,20 +72,6 @@ EXPORT_SYMBOL(perf_irq); unsigned int mips_hpt_frequency; EXPORT_SYMBOL_GPL(mips_hpt_frequency); -/* - * This function exists in order to cause an error due to a duplicate - * definition if platform code should have its own implementation. The hook - * to use instead is plat_time_init. plat_time_init does not receive the - * irqaction pointer argument anymore. This is because any function which - * initializes an interrupt timer now takes care of its own request_irq rsp. - * setup_irq calls and each clock_event_device should use its own - * struct irqrequest. - */ -void __init plat_timer_setup(void) -{ - BUG(); -} - static __init int cpu_has_mfc0_count_bug(void) { switch (current_cpu_type()) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b68b4d0726d3..5669d3b8bd38 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -50,9 +50,8 @@ #include #include #include -#include +#include #include -#include #include #include #include @@ -734,8 +733,7 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, si.si_code = FPE_FLTUND; else if (fcr31 & FPU_CSR_INE_X) si.si_code = FPE_FLTRES; - else - si.si_code = __SI_FAULT; + force_sig_info(SIGFPE, &si, tsk); } @@ -1673,7 +1671,7 @@ static inline void parity_protection_init(void) /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); - if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) || + if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) || !(cp0_ectl & ERRCTL_PE)) { /* * One of L1 or L2 ECC checking isn't supported, @@ -1693,12 +1691,12 @@ static inline void parity_protection_init(void) /* Configure L2 ECC checking */ if (l2parity) - gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN; else - gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN; write_gcr_err_control(gcr_ectl); gcr_ectl = read_gcr_err_control(); - gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); pr_info("Cache parity protection %sabled\n", @@ -2428,21 +2426,6 @@ void __init trap_init(void) set_except_vector(EXCCODE_TR, handle_tr); set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe); - if (current_cpu_type() == CPU_R6000 || - current_cpu_type() == CPU_R6000A) { - /* - * The R6000 is the only R-series CPU that features a machine - * check exception (similar to the R4000 cache error) and - * unaligned ldc1/sdc1 exception. The handlers have not been - * written yet. Well, anyway there is no R6000 machine on the - * current list of targets for Linux/MIPS. - * (Duh, crap, there is someone with a triple R6k machine) - */ - //set_except_vector(14, handle_mc); - //set_except_vector(15, handle_ndc); - } - - if (board_nmi_handler_setup) board_nmi_handler_setup(); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 5eaf2578ac04..2d0b912f9e3e 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1378,7 +1378,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ -const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; +static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; static void emulate_load_store_microMIPS(struct pt_regs *regs, void __user *addr) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 093517e85a6c..019035d7225c 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -13,13 +13,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include /* Kernel-provided data used by the VDSO. */ @@ -99,9 +99,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr; + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; struct vm_area_struct *vma; - struct resource gic_res; int ret; if (down_write_killable(&mm->mmap_sem)) @@ -125,7 +124,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * only map a page even though the total area is 64K, as we only need * the counter registers at the start. */ - gic_size = gic_present ? PAGE_SIZE : 0; + gic_size = mips_gic_present() ? PAGE_SIZE : 0; vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; @@ -148,13 +147,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { - ret = gic_get_usm_range(&gic_res); - if (ret) - goto out; + gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; - ret = io_remap_pfn_range(vma, base, - gic_res.start >> PAGE_SHIFT, - gic_size, + ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(PAGE_READONLY)); if (ret) goto out; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index d4b2ad18eef2..d535edc01434 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return !!(vcpu->arch.pending_exceptions); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return 1; @@ -509,7 +514,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, dvcpu->arch.wait = 0; - if (swait_active(&dvcpu->wq)) + if (swq_has_sleeper(&dvcpu->wq)) swake_up(&dvcpu->wq); return 0; @@ -1174,7 +1179,7 @@ static void kvm_mips_comparecount_func(unsigned long data) kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; - if (swait_active(&vcpu->wq)) + if (swq_has_sleeper(&vcpu->wq)) swake_up(&vcpu->wq); } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 177769dbb0e8..35bc69b78268 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -17,6 +17,8 @@ config SOC_XWAY bool "XWAY" select SOC_TYPE_XWAY select HW_HAS_PCI + select MFD_SYSCON + select MFD_CORE config SOC_FALCON bool "FALCON" diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c index 7a535d72f541..058b85578cf7 100644 --- a/arch/mips/lantiq/falcon/reset.c +++ b/arch/mips/lantiq/falcon/reset.c @@ -15,28 +15,15 @@ #include -/* CPU0 Reset Source Register */ -#define SYS1_CPU0RS 0x0040 -/* reset cause mask */ -#define CPU0RS_MASK 0x0003 -/* CPU0 Boot Mode Register */ -#define SYS1_BM 0x00a0 -/* boot mode mask */ -#define BM_MASK 0x0005 - -/* allow platform code to find out what surce we booted from */ +/* + * Dummy implementation. Used to allow platform code to find out what + * source was booted from + */ unsigned char ltq_boot_select(void) { - return ltq_sys1_r32(SYS1_BM) & BM_MASK; + return BS_SPI; } -/* allow the watchdog driver to find out what the boot reason was */ -int ltq_reset_cause(void) -{ - return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK; -} -EXPORT_SYMBOL_GPL(ltq_reset_cause); - #define BOOT_REG_BASE (KSEG1 | 0x1F200000) #define BOOT_PW1_REG (BOOT_REG_BASE | 0x20) #define BOOT_PW2_REG (BOOT_REG_BASE | 0x24) diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 33728b7af426..f0bc3312ed11 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -61,10 +61,6 @@ /* we have a cascade of 8 irqs */ #define MIPS_CPU_IRQ_CASCADE 8 -#ifdef CONFIG_MIPS_MT_SMP -int gic_present; -#endif - static int exin_avail; static u32 ltq_eiu_irq[MAX_EIU]; static void __iomem *ltq_icu_membase[MAX_IM]; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 96773bed8a8a..9ff7ccde9de0 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -117,7 +117,7 @@ void __init prom_init(void) int __init plat_of_setup(void) { - return __dt_register_buses(soc_info.compatible, "simple-bus"); + return of_platform_default_populate(NULL, NULL, NULL); } arch_initcall(plat_of_setup); diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile index a2edc538f477..fbb0747c70b7 100644 --- a/arch/mips/lantiq/xway/Makefile +++ b/arch/mips/lantiq/xway/Makefile @@ -1,5 +1,3 @@ -obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o +obj-y := prom.o sysctrl.o clk.o dma.o gptu.o dcdc.o obj-y += vmmc.o - -obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c deleted file mode 100644 index 83fd65d76e81..000000000000 --- a/arch/mips/lantiq/xway/reset.c +++ /dev/null @@ -1,387 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include "../prom.h" - -/* reset request register */ -#define RCU_RST_REQ 0x0010 -/* reset status register */ -#define RCU_RST_STAT 0x0014 -/* vr9 gphy registers */ -#define RCU_GFS_ADD0_XRX200 0x0020 -#define RCU_GFS_ADD1_XRX200 0x0068 -/* xRX300 gphy registers */ -#define RCU_GFS_ADD0_XRX300 0x0020 -#define RCU_GFS_ADD1_XRX300 0x0058 -#define RCU_GFS_ADD2_XRX300 0x00AC -/* xRX330 gphy registers */ -#define RCU_GFS_ADD0_XRX330 0x0020 -#define RCU_GFS_ADD1_XRX330 0x0058 -#define RCU_GFS_ADD2_XRX330 0x00AC -#define RCU_GFS_ADD3_XRX330 0x0264 - -/* xbar BE flag */ -#define RCU_AHB_ENDIAN 0x004C -#define RCU_VR9_BE_AHB1S 0x00000008 - -/* reboot bit */ -#define RCU_RD_GPHY0_XRX200 BIT(31) -#define RCU_RD_SRST BIT(30) -#define RCU_RD_GPHY1_XRX200 BIT(29) -/* xRX300 bits */ -#define RCU_RD_GPHY0_XRX300 BIT(31) -#define RCU_RD_GPHY1_XRX300 BIT(29) -#define RCU_RD_GPHY2_XRX300 BIT(28) -/* xRX330 bits */ -#define RCU_RD_GPHY0_XRX330 BIT(31) -#define RCU_RD_GPHY1_XRX330 BIT(29) -#define RCU_RD_GPHY2_XRX330 BIT(28) -#define RCU_RD_GPHY3_XRX330 BIT(10) - -/* reset cause */ -#define RCU_STAT_SHIFT 26 -/* boot selection */ -#define RCU_BOOT_SEL(x) ((x >> 18) & 0x7) -#define RCU_BOOT_SEL_XRX200(x) (((x >> 17) & 0xf) | ((x >> 8) & 0x10)) - -/* dwc2 USB configuration registers */ -#define RCU_USB1CFG 0x0018 -#define RCU_USB2CFG 0x0034 - -/* USB DMA endianness bits */ -#define RCU_USBCFG_HDSEL_BIT BIT(11) -#define RCU_USBCFG_HOST_END_BIT BIT(10) -#define RCU_USBCFG_SLV_END_BIT BIT(9) - -/* USB reset bits */ -#define RCU_USBRESET 0x0010 - -#define USBRESET_BIT BIT(4) - -#define RCU_USBRESET2 0x0048 - -#define USB1RESET_BIT BIT(4) -#define USB2RESET_BIT BIT(5) - -#define RCU_CFG1A 0x0038 -#define RCU_CFG1B 0x003C - -/* USB PMU devices */ -#define PMU_AHBM BIT(15) -#define PMU_USB0 BIT(6) -#define PMU_USB1 BIT(27) - -/* USB PHY PMU devices */ -#define PMU_USB0_P BIT(0) -#define PMU_USB1_P BIT(26) - -/* remapped base addr of the reset control unit */ -static void __iomem *ltq_rcu_membase; -static struct device_node *ltq_rcu_np; -static DEFINE_SPINLOCK(ltq_rcu_lock); - -static void ltq_rcu_w32(uint32_t val, uint32_t reg_off) -{ - ltq_w32(val, ltq_rcu_membase + reg_off); -} - -static uint32_t ltq_rcu_r32(uint32_t reg_off) -{ - return ltq_r32(ltq_rcu_membase + reg_off); -} - -static void ltq_rcu_w32_mask(uint32_t clr, uint32_t set, uint32_t reg_off) -{ - unsigned long flags; - - spin_lock_irqsave(<q_rcu_lock, flags); - ltq_rcu_w32((ltq_rcu_r32(reg_off) & ~(clr)) | (set), reg_off); - spin_unlock_irqrestore(<q_rcu_lock, flags); -} - -/* This function is used by the watchdog driver */ -int ltq_reset_cause(void) -{ - u32 val = ltq_rcu_r32(RCU_RST_STAT); - return val >> RCU_STAT_SHIFT; -} -EXPORT_SYMBOL_GPL(ltq_reset_cause); - -/* allow platform code to find out what source we booted from */ -unsigned char ltq_boot_select(void) -{ - u32 val = ltq_rcu_r32(RCU_RST_STAT); - - if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) - return RCU_BOOT_SEL_XRX200(val); - - return RCU_BOOT_SEL(val); -} - -struct ltq_gphy_reset { - u32 rd; - u32 addr; -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx200_gphy[] = { - {RCU_RD_GPHY0_XRX200, RCU_GFS_ADD0_XRX200}, - {RCU_RD_GPHY1_XRX200, RCU_GFS_ADD1_XRX200}, -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx300_gphy[] = { - {RCU_RD_GPHY0_XRX300, RCU_GFS_ADD0_XRX300}, - {RCU_RD_GPHY1_XRX300, RCU_GFS_ADD1_XRX300}, - {RCU_RD_GPHY2_XRX300, RCU_GFS_ADD2_XRX300}, -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx330_gphy[] = { - {RCU_RD_GPHY0_XRX330, RCU_GFS_ADD0_XRX330}, - {RCU_RD_GPHY1_XRX330, RCU_GFS_ADD1_XRX330}, - {RCU_RD_GPHY2_XRX330, RCU_GFS_ADD2_XRX330}, - {RCU_RD_GPHY3_XRX330, RCU_GFS_ADD3_XRX330}, -}; - -static void xrx200_gphy_boot_addr(struct ltq_gphy_reset *phy_regs, - dma_addr_t dev_addr) -{ - ltq_rcu_w32_mask(0, phy_regs->rd, RCU_RST_REQ); - ltq_rcu_w32(dev_addr, phy_regs->addr); - ltq_rcu_w32_mask(phy_regs->rd, 0, RCU_RST_REQ); -} - -/* reset and boot a gphy. these phys only exist on xrx200 SoC */ -int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr) -{ - struct clk *clk; - - if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) { - dev_err(dev, "this SoC has no GPHY\n"); - return -EINVAL; - } - - if (of_machine_is_compatible("lantiq,vr9")) { - clk = clk_get_sys("1f203000.rcu", "gphy"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - clk_enable(clk); - } - - dev_info(dev, "booting GPHY%u firmware at %X\n", id, dev_addr); - - if (of_machine_is_compatible("lantiq,vr9")) { - if (id >= ARRAY_SIZE(xrx200_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx200_gphy[id], dev_addr); - } else if (of_machine_is_compatible("lantiq,ar10")) { - if (id >= ARRAY_SIZE(xrx300_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx300_gphy[id], dev_addr); - } else if (of_machine_is_compatible("lantiq,grx390")) { - if (id >= ARRAY_SIZE(xrx330_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx330_gphy[id], dev_addr); - } - return 0; -} - -/* reset a io domain for u micro seconds */ -void ltq_reset_once(unsigned int module, ulong u) -{ - ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ); - udelay(u); - ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ); -} - -static int ltq_assert_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - u32 val; - - if (id < 8) - return -1; - - val = ltq_rcu_r32(RCU_RST_REQ); - val |= BIT(id); - ltq_rcu_w32(val, RCU_RST_REQ); - - return 0; -} - -static int ltq_deassert_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - u32 val; - - if (id < 8) - return -1; - - val = ltq_rcu_r32(RCU_RST_REQ); - val &= ~BIT(id); - ltq_rcu_w32(val, RCU_RST_REQ); - - return 0; -} - -static int ltq_reset_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - ltq_assert_device(rcdev, id); - return ltq_deassert_device(rcdev, id); -} - -static const struct reset_control_ops reset_ops = { - .reset = ltq_reset_device, - .assert = ltq_assert_device, - .deassert = ltq_deassert_device, -}; - -static struct reset_controller_dev reset_dev = { - .ops = &reset_ops, - .owner = THIS_MODULE, - .nr_resets = 32, - .of_reset_n_cells = 1, -}; - -void ltq_rst_init(void) -{ - reset_dev.of_node = of_find_compatible_node(NULL, NULL, - "lantiq,xway-reset"); - if (!reset_dev.of_node) - pr_err("Failed to find reset controller node"); - else - reset_controller_register(&reset_dev); -} - -static void ltq_machine_restart(char *command) -{ - u32 val = ltq_rcu_r32(RCU_RST_REQ); - - if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) - val |= RCU_RD_GPHY1_XRX200 | RCU_RD_GPHY0_XRX200; - - val |= RCU_RD_SRST; - - local_irq_disable(); - ltq_rcu_w32(val, RCU_RST_REQ); - unreachable(); -} - -static void ltq_machine_halt(void) -{ - local_irq_disable(); - unreachable(); -} - -static void ltq_machine_power_off(void) -{ - local_irq_disable(); - unreachable(); -} - -static void ltq_usb_init(void) -{ - /* Power for USB cores 1 & 2 */ - ltq_pmu_enable(PMU_AHBM); - ltq_pmu_enable(PMU_USB0); - ltq_pmu_enable(PMU_USB1); - - ltq_rcu_w32(ltq_rcu_r32(RCU_CFG1A) | BIT(0), RCU_CFG1A); - ltq_rcu_w32(ltq_rcu_r32(RCU_CFG1B) | BIT(0), RCU_CFG1B); - - /* Enable USB PHY power for cores 1 & 2 */ - ltq_pmu_enable(PMU_USB0_P); - ltq_pmu_enable(PMU_USB1_P); - - /* Configure cores to host mode */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USB1CFG) & ~RCU_USBCFG_HDSEL_BIT, - RCU_USB1CFG); - ltq_rcu_w32(ltq_rcu_r32(RCU_USB2CFG) & ~RCU_USBCFG_HDSEL_BIT, - RCU_USB2CFG); - - /* Select DMA endianness (Host-endian: big-endian) */ - ltq_rcu_w32((ltq_rcu_r32(RCU_USB1CFG) & ~RCU_USBCFG_SLV_END_BIT) - | RCU_USBCFG_HOST_END_BIT, RCU_USB1CFG); - ltq_rcu_w32(ltq_rcu_r32((RCU_USB2CFG) & ~RCU_USBCFG_SLV_END_BIT) - | RCU_USBCFG_HOST_END_BIT, RCU_USB2CFG); - - /* Hard reset USB state machines */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET) | USBRESET_BIT, RCU_USBRESET); - udelay(50 * 1000); - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET) & ~USBRESET_BIT, RCU_USBRESET); - - /* Soft reset USB state machines */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET2) - | USB1RESET_BIT | USB2RESET_BIT, RCU_USBRESET2); - udelay(50 * 1000); - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET2) - & ~(USB1RESET_BIT | USB2RESET_BIT), RCU_USBRESET2); -} - -static int __init mips_reboot_setup(void) -{ - struct resource res; - - ltq_rcu_np = of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway"); - if (!ltq_rcu_np) - ltq_rcu_np = of_find_compatible_node(NULL, NULL, - "lantiq,rcu-xrx200"); - - /* check if all the reset register range is available */ - if (!ltq_rcu_np) - panic("Failed to load reset resources from devicetree"); - - if (of_address_to_resource(ltq_rcu_np, 0, &res)) - panic("Failed to get rcu memory range"); - - if (!request_mem_region(res.start, resource_size(&res), res.name)) - pr_err("Failed to request rcu memory"); - - ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res)); - if (!ltq_rcu_membase) - panic("Failed to remap core memory"); - - if (of_machine_is_compatible("lantiq,ar9") || - of_machine_is_compatible("lantiq,vr9")) - ltq_usb_init(); - - if (of_machine_is_compatible("lantiq,vr9")) - ltq_rcu_w32(ltq_rcu_r32(RCU_AHB_ENDIAN) | RCU_VR9_BE_AHB1S, - RCU_AHB_ENDIAN); - - _machine_restart = ltq_machine_restart; - _machine_halt = ltq_machine_halt; - pm_power_off = ltq_machine_power_off; - - return 0; -} - -arch_initcall(mips_reboot_setup); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 95bec460b651..7611c3013793 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -145,15 +145,7 @@ static u32 pmu_clk_cr_b[] = { #define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y)) #define pmu_r32(x) ltq_r32(pmu_membase + (x)) -#define XBAR_ALWAYS_LAST 0x430 -#define XBAR_FPI_BURST_EN BIT(1) -#define XBAR_AHB_BURST_EN BIT(2) - -#define xbar_w32(x, y) ltq_w32((x), ltq_xbar_membase + (y)) -#define xbar_r32(x) ltq_r32(ltq_xbar_membase + (x)) - static void __iomem *pmu_membase; -static void __iomem *ltq_xbar_membase; void __iomem *ltq_cgu_membase; void __iomem *ltq_ebu_membase; @@ -293,16 +285,6 @@ static void pci_ext_disable(struct clk *clk) ltq_cgu_w32((1 << 31) | (1 << 30), pcicr); } -static void xbar_fpi_burst_disable(void) -{ - u32 reg; - - /* bit 1 as 1 --burst; bit 1 as 0 -- single */ - reg = xbar_r32(XBAR_ALWAYS_LAST); - reg &= ~XBAR_FPI_BURST_EN; - xbar_w32(reg, XBAR_ALWAYS_LAST); -} - /* enable a clockout source */ static int clkout_enable(struct clk *clk) { @@ -459,26 +441,6 @@ void __init ltq_soc_init(void) if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase) panic("Failed to remap core resources"); - if (of_machine_is_compatible("lantiq,vr9")) { - struct resource res_xbar; - struct device_node *np_xbar = - of_find_compatible_node(NULL, NULL, - "lantiq,xbar-xway"); - - if (!np_xbar) - panic("Failed to load xbar nodes from devicetree"); - if (of_address_to_resource(np_xbar, 0, &res_xbar)) - panic("Failed to get xbar resources"); - if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), - res_xbar.name)) - panic("Failed to get xbar resources"); - - ltq_xbar_membase = ioremap_nocache(res_xbar.start, - resource_size(&res_xbar)); - if (!ltq_xbar_membase) - panic("Failed to remap xbar resources"); - } - /* make sure to unprotect the memory region where flash is located */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); @@ -507,8 +469,8 @@ void __init ltq_soc_init(void) if (of_machine_is_compatible("lantiq,grx390") || of_machine_is_compatible("lantiq,ar10")) { - clkdev_add_pmu("1e101000.usb", "phy", 1, 2, PMU_ANALOG_USB0_P); - clkdev_add_pmu("1e106000.usb", "phy", 1, 2, PMU_ANALOG_USB1_P); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P); /* rc 0 */ clkdev_add_pmu("1d900000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE0_P); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); @@ -528,8 +490,8 @@ void __init ltq_soc_init(void) else clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M, CLOCK_133M); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE); clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY); clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY); @@ -538,8 +500,8 @@ void __init ltq_soc_init(void) } else if (of_machine_is_compatible("lantiq,grx390")) { clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(), ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); /* rc 2 */ clkdev_add_pmu("1a800000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P); clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); @@ -551,22 +513,23 @@ void __init ltq_soc_init(void) } else if (of_machine_is_compatible("lantiq,ar10")) { clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(), ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); + clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY); + clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,vr9")) { clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(), ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0 | PMU_AHBM); - clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1 | PMU_AHBM); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); clkdev_add_pmu("1d900000.pcie", "phy", 1, 1, PMU1_PCIE_PHY); clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); @@ -579,17 +542,18 @@ void __init ltq_soc_init(void) PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY); + clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY); + clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,ar9")) { clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz(), CLOCK_250M); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); - clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); @@ -598,14 +562,11 @@ void __init ltq_soc_init(void) } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0); } - - if (of_machine_is_compatible("lantiq,vr9")) - xbar_fpi_burst_disable(); } diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c deleted file mode 100644 index f0a0f2d431b2..000000000000 --- a/arch/mips/lantiq/xway/xrx200_phy_fw.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Lantiq XRX200 PHY Firmware Loader - * Author: John Crispin - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2012 John Crispin - */ - -#include -#include -#include -#include - -#include - -#define XRX200_GPHY_FW_ALIGN (16 * 1024) - -static dma_addr_t xway_gphy_load(struct platform_device *pdev) -{ - const struct firmware *fw; - dma_addr_t dev_addr = 0; - const char *fw_name; - void *fw_addr; - size_t size; - - if (of_get_property(pdev->dev.of_node, "firmware1", NULL) || - of_get_property(pdev->dev.of_node, "firmware2", NULL)) { - switch (ltq_soc_type()) { - case SOC_TYPE_VR9: - if (of_property_read_string(pdev->dev.of_node, - "firmware1", &fw_name)) { - dev_err(&pdev->dev, - "failed to load firmware filename\n"); - return 0; - } - break; - case SOC_TYPE_VR9_2: - if (of_property_read_string(pdev->dev.of_node, - "firmware2", &fw_name)) { - dev_err(&pdev->dev, - "failed to load firmware filename\n"); - return 0; - } - break; - } - } else if (of_property_read_string(pdev->dev.of_node, - "firmware", &fw_name)) { - dev_err(&pdev->dev, "failed to load firmware filename\n"); - return 0; - } - - dev_info(&pdev->dev, "requesting %s\n", fw_name); - if (request_firmware(&fw, fw_name, &pdev->dev)) { - dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name); - return 0; - } - - /* - * GPHY cores need the firmware code in a persistent and contiguous - * memory area with a 16 kB boundary aligned start address - */ - size = fw->size + XRX200_GPHY_FW_ALIGN; - - fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL); - if (fw_addr) { - fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); - dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN); - memcpy(fw_addr, fw->data, fw->size); - } else { - dev_err(&pdev->dev, "failed to alloc firmware memory\n"); - } - - release_firmware(fw); - return dev_addr; -} - -static int xway_phy_fw_probe(struct platform_device *pdev) -{ - dma_addr_t fw_addr; - struct property *pp; - unsigned char *phyids; - int i, ret = 0; - - fw_addr = xway_gphy_load(pdev); - if (!fw_addr) - return -EINVAL; - pp = of_find_property(pdev->dev.of_node, "phys", NULL); - if (!pp) - return -ENOENT; - phyids = pp->value; - for (i = 0; i < pp->length && !ret; i++) - ret = xrx200_gphy_boot(&pdev->dev, phyids[i], fw_addr); - if (!ret) - mdelay(100); - return ret; -} - -static const struct of_device_id xway_phy_match[] = { - { .compatible = "lantiq,phy-xrx200" }, - {}, -}; - -static struct platform_driver xway_phy_driver = { - .probe = xway_phy_fw_probe, - .driver = { - .name = "phy-xrx200", - .of_match_table = xway_phy_match, - }, -}; -builtin_platform_driver(xway_phy_driver); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index a37fe3d1ee2f..6ab430d24575 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -6,7 +6,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o -obj-y += iomap.o +obj-y += iomap.o iomap_copy.o obj-$(CONFIG_PCI) += iomap-pci.o lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 2307a3cb2714..68c495ed71e3 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007, 2014 Maciej W. Rozycki */ +#include #include #include #include diff --git a/arch/mips/lib/iomap_copy.c b/arch/mips/lib/iomap_copy.c new file mode 100644 index 000000000000..368bb38267c5 --- /dev/null +++ b/arch/mips/lib/iomap_copy.c @@ -0,0 +1,42 @@ +/* + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +/** + * __ioread64_copy - copy data from MMIO space, in 64-bit units + * @to: destination (must be 64-bit aligned) + * @from: source, in MMIO space (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from MMIO space to kernel space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __ioread64_copy(void *to, const void __iomem *from, size_t count) +{ +#ifdef CONFIG_64BIT + u64 *dst = to; + const u64 __iomem *src = from; + const u64 __iomem *end = src + count; + + while (src < end) + *dst++ = __raw_readq(src++); +#else + __ioread32_copy(to, from, count * 2); +#endif +} +EXPORT_SYMBOL_GPL(__ioread64_copy); diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c index a78fb657068c..8281334df9c8 100644 --- a/arch/mips/loongson64/lemote-2f/clock.c +++ b/arch/mips/loongson64/lemote-2f/clock.c @@ -80,6 +80,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return (unsigned long)clk->rate; } EXPORT_SYMBOL(clk_get_rate); diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index b7a355c3c408..8501109bb0f0 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -319,8 +319,8 @@ static void loongson3_init_secondary(void) loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]); per_cpu(cpu_state, cpu) = CPU_ONLINE; - cpu_data[cpu].core = - cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; + cpu_set_core(&cpu_data[cpu], + cpu_logical_map(cpu) % loongson_sysconf.cores_per_package); cpu_data[cpu].package = cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; @@ -386,7 +386,8 @@ static void __init loongson3_smp_setup(void) ipi_status0_regs_init(); ipi_en0_regs_init(); ipi_mailbox_buf_init(); - cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; + cpu_set_core(&cpu_data[0], + cpu_logical_map(0) % loongson_sysconf.cores_per_package); cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; } @@ -399,7 +400,7 @@ static void __init loongson3_prepare_cpus(unsigned int max_cpus) /* * Setup the PC, SP, and GP of a secondary processor and start it runing! */ -static void loongson3_boot_secondary(int cpu, struct task_struct *idle) +static int loongson3_boot_secondary(int cpu, struct task_struct *idle) { unsigned long startargs[4]; @@ -422,6 +423,7 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle) (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8)); loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -697,7 +699,7 @@ void play_dead(void) static int loongson3_disable_clock(unsigned int cpu) { - uint64_t core_id = cpu_data[cpu].core; + uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { @@ -711,7 +713,7 @@ static int loongson3_disable_clock(unsigned int cpu) static int loongson3_enable_clock(unsigned int cpu) { - uint64_t core_id = cpu_data[cpu].core; + uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { @@ -734,7 +736,7 @@ early_initcall(register_loongson3_notifier); #endif -struct plat_smp_ops loongson3_smp_ops = { +const struct plat_smp_ops loongson3_smp_ops = { .send_ipi_single = loongson3_send_ipi_single, .send_ipi_mask = loongson3_send_ipi_mask, .init_secondary = loongson3_init_secondary, diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile index e9bbc2a6526f..e9f10b88b695 100644 --- a/arch/mips/math-emu/Makefile +++ b/arch/mips/math-emu/Makefile @@ -4,9 +4,11 @@ obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \ dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \ - dp_tint.o dp_fint.o dp_maddf.o dp_2008class.o dp_fmin.o dp_fmax.o \ + dp_tint.o dp_fint.o dp_rint.o dp_maddf.o dp_2008class.o dp_fmin.o \ + dp_fmax.o \ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \ - sp_tint.o sp_fint.o sp_maddf.o sp_2008class.o sp_fmin.o sp_fmax.o \ + sp_tint.o sp_fint.o sp_rint.o sp_maddf.o sp_2008class.o sp_fmin.o \ + sp_fmax.o \ dsemul.o lib-y += ieee754d.o \ diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f08a7b4facb9..192542dbd972 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -58,7 +58,7 @@ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *, mips_instruction); static int fpux_emu(struct pt_regs *, - struct mips_fpu_struct *, mips_instruction, void *__user *); + struct mips_fpu_struct *, mips_instruction, void __user **); /* Control registers */ @@ -830,12 +830,12 @@ do { \ } while (0) #define DIFROMREG(di, x) \ - ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0)) + ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0)) #define DITOREG(di, x) \ do { \ unsigned fpr, i; \ - fpr = (x) & ~(cop1_64bit(xcp) == 0); \ + fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \ set_fpr64(&ctx->fpr[fpr], 0, di); \ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \ set_fpr64(&ctx->fpr[fpr], i, 0); \ @@ -973,7 +973,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx, */ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - struct mm_decoded_insn dec_insn, void *__user *fault_addr) + struct mm_decoded_insn dec_insn, void __user **fault_addr) { unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; unsigned int cond, cbit, bit0; @@ -1195,9 +1195,11 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, bit0 = get_fpr32(fpr, 0) & 0x1; switch (MIPSInst_RS(ir)) { case bc1eqz_op: + MIPS_FPU_EMU_INC_STATS(bc1eqz); cond = bit0 == 0; break; case bc1nez_op: + MIPS_FPU_EMU_INC_STATS(bc1nez); cond = bit0 != 0; break; } @@ -1230,6 +1232,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } branch_common: + MIPS_FPU_EMU_INC_STATS(branches); set_delay_slot(xcp); if (cond) { /* @@ -1460,7 +1463,7 @@ DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg); DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg); static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - mips_instruction ir, void *__user *fault_addr) + mips_instruction ir, void __user **fault_addr) { unsigned rcsr = 0; /* resulting csr */ @@ -1682,15 +1685,19 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { /* binary ops */ case fadd_op: + MIPS_FPU_EMU_INC_STATS(add_s); handler.b = ieee754sp_add; goto scopbop; case fsub_op: + MIPS_FPU_EMU_INC_STATS(sub_s); handler.b = ieee754sp_sub; goto scopbop; case fmul_op: + MIPS_FPU_EMU_INC_STATS(mul_s); handler.b = ieee754sp_mul; goto scopbop; case fdiv_op: + MIPS_FPU_EMU_INC_STATS(div_s); handler.b = ieee754sp_div; goto scopbop; @@ -1699,6 +1706,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sqrt_s); handler.u = ieee754sp_sqrt; goto scopuop; @@ -1711,6 +1719,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rsqrt_s); handler.u = fpemu_sp_rsqrt; goto scopuop; @@ -1718,6 +1727,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(recip_s); handler.u = fpemu_sp_recip; goto scopuop; @@ -1754,6 +1764,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(seleqz_s); SPFROMREG(rv.s, MIPSInst_FT(ir)); if (rv.w & 0x1) rv.w = 0; @@ -1765,6 +1776,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(selnez_s); SPFROMREG(rv.s, MIPSInst_FT(ir)); if (rv.w & 0x1) SPFROMREG(rv.s, MIPSInst_FS(ir)); @@ -1778,6 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maddf_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); @@ -1791,6 +1804,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(msubf_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); @@ -1804,9 +1818,9 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rint_s); SPFROMREG(fs, MIPSInst_FS(ir)); - rv.l = ieee754sp_tlong(fs); - rv.s = ieee754sp_flong(rv.l); + rv.s = ieee754sp_rint(fs); goto copcsr; } @@ -1816,6 +1830,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(class_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_2008class(fs); rfmt = w_fmt; @@ -1828,6 +1843,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(min_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmin(fs, ft); @@ -1840,6 +1856,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(mina_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmina(fs, ft); @@ -1852,6 +1869,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(max_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmax(fs, ft); @@ -1864,6 +1882,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maxa_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmaxa(fs, ft); @@ -1871,15 +1890,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } case fabs_op: + MIPS_FPU_EMU_INC_STATS(abs_s); handler.u = ieee754sp_abs; goto scopuop; case fneg_op: + MIPS_FPU_EMU_INC_STATS(neg_s); handler.u = ieee754sp_neg; goto scopuop; case fmov_op: /* an easy one */ + MIPS_FPU_EMU_INC_STATS(mov_s); SPFROMREG(rv.s, MIPSInst_FS(ir)); goto copcsr; @@ -1922,12 +1944,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, return SIGILL; /* not defined */ case fcvtd_op: + MIPS_FPU_EMU_INC_STATS(cvt_d_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fsp(fs); rfmt = d_fmt; goto copcsr; case fcvtw_op: + MIPS_FPU_EMU_INC_STATS(cvt_w_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_tint(fs); rfmt = w_fmt; @@ -1940,6 +1964,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + if (MIPSInst_FUNC(ir) == fceil_op) + MIPS_FPU_EMU_INC_STATS(ceil_w_s); + if (MIPSInst_FUNC(ir) == ffloor_op) + MIPS_FPU_EMU_INC_STATS(floor_w_s); + if (MIPSInst_FUNC(ir) == fround_op) + MIPS_FPU_EMU_INC_STATS(round_w_s); + if (MIPSInst_FUNC(ir) == ftrunc_op) + MIPS_FPU_EMU_INC_STATS(trunc_w_s); + oldrm = ieee754_csr.rm; SPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -1952,6 +1985,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sel_s); SPFROMREG(fd, MIPSInst_FD(ir)); if (fd.bits & 0x1) SPFROMREG(rv.s, MIPSInst_FT(ir)); @@ -1963,6 +1997,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(cvt_l_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754sp_tlong(fs); rfmt = l_fmt; @@ -1975,6 +2010,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + if (MIPSInst_FUNC(ir) == fceill_op) + MIPS_FPU_EMU_INC_STATS(ceil_l_s); + if (MIPSInst_FUNC(ir) == ffloorl_op) + MIPS_FPU_EMU_INC_STATS(floor_l_s); + if (MIPSInst_FUNC(ir) == froundl_op) + MIPS_FPU_EMU_INC_STATS(round_l_s); + if (MIPSInst_FUNC(ir) == ftruncl_op) + MIPS_FPU_EMU_INC_STATS(trunc_l_s); + oldrm = ieee754_csr.rm; SPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2016,15 +2060,19 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { /* binary ops */ case fadd_op: + MIPS_FPU_EMU_INC_STATS(add_d); handler.b = ieee754dp_add; goto dcopbop; case fsub_op: + MIPS_FPU_EMU_INC_STATS(sub_d); handler.b = ieee754dp_sub; goto dcopbop; case fmul_op: + MIPS_FPU_EMU_INC_STATS(mul_d); handler.b = ieee754dp_mul; goto dcopbop; case fdiv_op: + MIPS_FPU_EMU_INC_STATS(div_d); handler.b = ieee754dp_div; goto dcopbop; @@ -2033,6 +2081,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sqrt_d); handler.u = ieee754dp_sqrt; goto dcopuop; /* @@ -2044,12 +2093,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rsqrt_d); handler.u = fpemu_dp_rsqrt; goto dcopuop; case frecip_op: if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(recip_d); handler.u = fpemu_dp_recip; goto dcopuop; case fmovc_op: @@ -2083,6 +2134,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(seleqz_d); DPFROMREG(rv.d, MIPSInst_FT(ir)); if (rv.l & 0x1) rv.l = 0; @@ -2094,6 +2146,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(selnez_d); DPFROMREG(rv.d, MIPSInst_FT(ir)); if (rv.l & 0x1) DPFROMREG(rv.d, MIPSInst_FS(ir)); @@ -2107,6 +2160,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maddf_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); @@ -2120,6 +2174,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(msubf_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); @@ -2133,9 +2188,9 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rint_d); DPFROMREG(fs, MIPSInst_FS(ir)); - rv.l = ieee754dp_tlong(fs); - rv.d = ieee754dp_flong(rv.l); + rv.d = ieee754dp_rint(fs); goto copcsr; } @@ -2145,9 +2200,10 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(class_d); DPFROMREG(fs, MIPSInst_FS(ir)); - rv.w = ieee754dp_2008class(fs); - rfmt = w_fmt; + rv.l = ieee754dp_2008class(fs); + rfmt = l_fmt; break; } @@ -2157,6 +2213,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(min_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmin(fs, ft); @@ -2169,6 +2226,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(mina_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmina(fs, ft); @@ -2181,6 +2239,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(max_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmax(fs, ft); @@ -2193,6 +2252,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maxa_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmaxa(fs, ft); @@ -2200,15 +2260,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } case fabs_op: + MIPS_FPU_EMU_INC_STATS(abs_d); handler.u = ieee754dp_abs; goto dcopuop; case fneg_op: + MIPS_FPU_EMU_INC_STATS(neg_d); handler.u = ieee754dp_neg; goto dcopuop; case fmov_op: /* an easy one */ + MIPS_FPU_EMU_INC_STATS(mov_d); DPFROMREG(rv.d, MIPSInst_FS(ir)); goto copcsr; @@ -2228,6 +2291,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * unary conv ops */ case fcvts_op: + MIPS_FPU_EMU_INC_STATS(cvt_s_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fdp(fs); rfmt = s_fmt; @@ -2237,6 +2301,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, return SIGILL; /* not defined */ case fcvtw_op: + MIPS_FPU_EMU_INC_STATS(cvt_w_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754dp_tint(fs); /* wrong */ rfmt = w_fmt; @@ -2249,6 +2314,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + if (MIPSInst_FUNC(ir) == fceil_op) + MIPS_FPU_EMU_INC_STATS(ceil_w_d); + if (MIPSInst_FUNC(ir) == ffloor_op) + MIPS_FPU_EMU_INC_STATS(floor_w_d); + if (MIPSInst_FUNC(ir) == fround_op) + MIPS_FPU_EMU_INC_STATS(round_w_d); + if (MIPSInst_FUNC(ir) == ftrunc_op) + MIPS_FPU_EMU_INC_STATS(trunc_w_d); + oldrm = ieee754_csr.rm; DPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2261,6 +2335,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sel_d); DPFROMREG(fd, MIPSInst_FD(ir)); if (fd.bits & 0x1) DPFROMREG(rv.d, MIPSInst_FT(ir)); @@ -2272,6 +2347,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(cvt_l_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754dp_tlong(fs); rfmt = l_fmt; @@ -2284,6 +2360,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + if (MIPSInst_FUNC(ir) == fceill_op) + MIPS_FPU_EMU_INC_STATS(ceil_l_d); + if (MIPSInst_FUNC(ir) == ffloorl_op) + MIPS_FPU_EMU_INC_STATS(floor_l_d); + if (MIPSInst_FUNC(ir) == froundl_op) + MIPS_FPU_EMU_INC_STATS(round_l_d); + if (MIPSInst_FUNC(ir) == ftruncl_op) + MIPS_FPU_EMU_INC_STATS(trunc_l_d); + oldrm = ieee754_csr.rm; DPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2325,12 +2410,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { case fcvts_op: /* convert word to single precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_s_w); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fint(fs.bits); rfmt = s_fmt; goto copcsr; case fcvtd_op: /* convert word to double precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_d_w); SPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fint(fs.bits); rfmt = d_fmt; @@ -2350,6 +2437,90 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, (MIPSInst_FUNC(ir) & 0x20)) return SIGILL; + if (!sig) { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_af_s); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_un_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_eq_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ueq_s); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_lt_s); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_ult_s); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_le_s); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_ule_s); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_or_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_une_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ne_s); + break; + } + } + } else { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_saf_s); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sun_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_seq_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sueq_s); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_slt_s); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_sult_s); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_sle_s); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_sule_s); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sor_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_sune_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sne_s); + break; + } + } + } + /* fmt is w_fmt for single precision so fix it */ rfmt = s_fmt; /* default to false */ @@ -2394,6 +2565,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } } + break; } case l_fmt: @@ -2406,11 +2578,13 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { case fcvts_op: /* convert long to single precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_s_l); rv.s = ieee754sp_flong(bits); rfmt = s_fmt; goto copcsr; case fcvtd_op: /* convert long to double precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_d_l); rv.d = ieee754dp_flong(bits); rfmt = d_fmt; goto copcsr; @@ -2424,6 +2598,90 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, (MIPSInst_FUNC(ir) & 0x20)) return SIGILL; + if (!sig) { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_af_d); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_un_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_eq_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ueq_d); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_lt_d); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_ult_d); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_le_d); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_ule_d); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_or_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_une_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ne_d); + break; + } + } + } else { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_saf_d); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sun_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_seq_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sueq_d); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_slt_d); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_sult_d); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_sle_d); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_sule_d); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sor_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_sune_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sne_d); + break; + } + } + } + /* fmt is l_fmt for double precision so fix it */ rfmt = d_fmt; /* default to false */ @@ -2468,6 +2726,8 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } } + break; + default: return SIGILL; } @@ -2553,7 +2813,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * For simplicity we always terminate upon an ISA mode switch. */ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - int has_fpu, void *__user *fault_addr) + int has_fpu, void __user **fault_addr) { unsigned long oldepc, prevepc; struct mm_decoded_insn dec_insn; diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c index fd71b8daaaf2..5bec64f2884e 100644 --- a/arch/mips/math-emu/dp_fmax.c +++ b/arch/mips/math-emu/dp_fmax.c @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) return ys ? x : y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) else if (xs < ys) return x; - /* Compare exponent */ - if (xe > ye) - return x; - else if (xe < ye) - return y; + /* Signs of inputs are equal, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } else { + /* Inputs are both negative */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return y; + return x; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return y; - return x; + return x; + return y; } union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754dp_inf(xs & ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): @@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): @@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) return y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) return y; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) return y; - return x; + else if (xm > ym) + return x; + else if (xs == 0) + return x; + return y; } diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c index c1072b0dfb95..a287b23818d8 100644 --- a/arch/mips/math-emu/dp_fmin.c +++ b/arch/mips/math-emu/dp_fmin.c @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) return ys ? y : x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) else if (xs < ys) return y; - /* Compare exponent */ - if (xe > ye) - return y; - else if (xe < ye) - return x; + /* Signs of inputs are the same, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } else { + /* Inputs are both negative */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return x; + return y; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return x; - return y; + return y; + return x; } union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754dp_inf(xs | ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): - return x; + return y; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): - return y; + return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) return x; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) + return x; + else if (xm > ym) + return y; + else if (xs == 1) return x; return y; } diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c index caa62f20a888..e0d9be5fbf4c 100644 --- a/arch/mips/math-emu/dp_maddf.c +++ b/arch/mips/math-emu/dp_maddf.c @@ -14,22 +14,45 @@ #include "ieee754dp.h" -enum maddf_flags { - maddf_negate_product = 1 << 0, -}; + +/* 128 bits shift right logical with rounding. */ +void srl128(u64 *hptr, u64 *lptr, int count) +{ + u64 low; + + if (count >= 128) { + *lptr = *hptr != 0 || *lptr != 0; + *hptr = 0; + } else if (count >= 64) { + if (count == 64) { + *lptr = *hptr | (*lptr != 0); + } else { + low = *lptr; + *lptr = *hptr >> (count - 64); + *lptr |= (*hptr << (128 - count)) != 0 || low != 0; + } + *hptr = 0; + } else { + low = *lptr; + *lptr = low >> count | *hptr << (64 - count); + *lptr |= (low << (64 - count)) != 0; + *hptr = *hptr >> count; + } +} static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, union ieee754dp y, enum maddf_flags flags) { int re; int rs; - u64 rm; unsigned lxm; unsigned hxm; unsigned lym; unsigned hym; u64 lrm; u64 hrm; + u64 lzm; + u64 hzm; u64 t; u64 at; int s; @@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, ieee754_clearcx(); - switch (zc) { - case IEEE754_CLASS_SNAN: - ieee754_setcx(IEEE754_INVALID_OPERATION); + /* + * Handle the cases when at least one of x, y or z is a NaN. + * Order of precedence is sNaN, qNaN and z, x, y. + */ + if (zc == IEEE754_CLASS_SNAN) return ieee754dp_nanxcpt(z); - case IEEE754_CLASS_DNORM: - DPDNORMZ; - /* QNAN and ZERO cases are handled separately below */ - } - - switch (CLPAIR(xc, yc)) { - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): - return ieee754dp_nanxcpt(y); - - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): + if (xc == IEEE754_CLASS_SNAN) return ieee754dp_nanxcpt(x); - - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): + if (yc == IEEE754_CLASS_SNAN) + return ieee754dp_nanxcpt(y); + if (zc == IEEE754_CLASS_QNAN) + return z; + if (xc == IEEE754_CLASS_QNAN) + return x; + if (yc == IEEE754_CLASS_QNAN) return y; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): - return x; + if (zc == IEEE754_CLASS_DNORM) + DPDNORMZ; + /* ZERO z cases are handled separately below */ + switch (CLPAIR(xc, yc)) { /* * Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; ieee754_setcx(IEEE754_INVALID_OPERATION); return ieee754dp_indef(); @@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; - return ieee754dp_inf(xs ^ ys); + if ((zc == IEEE754_CLASS_INF) && + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { + /* + * Cases of addition of infinities with opposite signs + * or subtraction of infinities with same signs. + */ + ieee754_setcx(IEEE754_INVALID_OPERATION); + return ieee754dp_indef(); + } + /* + * z is here either not an infinity, or an infinity having the + * same sign as product (x*y) (in case of MADDF.D instruction) + * or product -(x*y) (in MSUBF.D case). The result must be an + * infinity, and its sign is determined only by the value of + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. + */ + if (flags & MADDF_NEGATE_PRODUCT) + return ieee754dp_inf(1 ^ (xs ^ ys)); + else + return ieee754dp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): @@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); - /* Multiplication is 0 so just return z */ + if (zc == IEEE754_CLASS_ZERO) { + /* Handle cases +0 + (-0) and similar ones. */ + if ((!(flags & MADDF_NEGATE_PRODUCT) + && (zs == (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) + && (zs != (xs ^ ys)))) + /* + * Cases of addition of zeros of equal signs + * or subtraction of zeroes of opposite signs. + * The sign of the resulting zero is in any + * such case determined only by the sign of z. + */ + return z; + + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); + } + /* x*y is here 0, and z is not 0, so just return z */ return z; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); /* fall through to real computations */ } @@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, re = xe + ye; rs = xs ^ ys; - if (flags & maddf_negate_product) + if (flags & MADDF_NEGATE_PRODUCT) rs ^= 1; /* shunt to top of word */ @@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, ym <<= 64 - (DP_FBITS + 1); /* - * Multiply 64 bits xm, ym to give high 64 bits rm with stickness. + * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm. */ /* 32 * 32 => 64 */ @@ -195,81 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, hrm = hrm + (t >> 32); - rm = hrm | (lrm != 0); - - /* - * Sticky shift down to normal rounding precision. - */ - if ((s64) rm < 0) { - rm = (rm >> (64 - (DP_FBITS + 1 + 3))) | - ((rm << (DP_FBITS + 1 + 3)) != 0); + /* Put explicit bit at bit 126 if necessary */ + if ((int64_t)hrm < 0) { + lrm = (hrm << 63) | (lrm >> 1); + hrm = hrm >> 1; re++; - } else { - rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) | - ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); } - assert(rm & (DP_HIDDEN_BIT << 3)); - if (zc == IEEE754_CLASS_ZERO) - return ieee754dp_format(rs, re, rm); + assert(hrm & (1 << 62)); - /* And now the addition */ - assert(zm & DP_HIDDEN_BIT); + if (zc == IEEE754_CLASS_ZERO) { + /* + * Move explicit bit from bit 126 to bit 55 since the + * ieee754dp_format code expects the mantissa to be + * 56 bits wide (53 + 3 rounding bits). + */ + srl128(&hrm, &lrm, (126 - 55)); + return ieee754dp_format(rs, re, lrm); + } - /* - * Provide guard,round and stick bit space. - */ - zm <<= 3; + /* Move explicit bit from bit 52 to bit 126 */ + lzm = 0; + hzm = zm << 10; + assert(hzm & (1 << 62)); + /* Make the exponents the same */ if (ze > re) { /* * Have to shift y fraction right to align. */ s = ze - re; - rm = XDPSRS(rm, s); + srl128(&hrm, &lrm, s); re += s; } else if (re > ze) { /* * Have to shift x fraction right to align. */ s = re - ze; - zm = XDPSRS(zm, s); + srl128(&hzm, &lzm, s); ze += s; } assert(ze == re); assert(ze <= DP_EMAX); + /* Do the addition */ if (zs == rs) { /* - * Generate 28 bit result of adding two 27 bit numbers - * leaving result in xm, xs and xe. + * Generate 128 bit result by adding two 127 bit numbers + * leaving result in hzm:lzm, zs and ze. */ - zm = zm + rm; - - if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */ - zm = XDPSRS1(zm); + hzm = hzm + hrm + (lzm > (lzm + lrm)); + lzm = lzm + lrm; + if ((int64_t)hzm < 0) { /* carry out */ + srl128(&hzm, &lzm, 1); ze++; } } else { - if (zm >= rm) { - zm = zm - rm; + if (hzm > hrm || (hzm == hrm && lzm >= lrm)) { + hzm = hzm - hrm - (lzm < lrm); + lzm = lzm - lrm; } else { - zm = rm - zm; + hzm = hrm - hzm - (lrm < lzm); + lzm = lrm - lzm; zs = rs; } - if (zm == 0) + if (lzm == 0 && hzm == 0) return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); /* - * Normalize to rounding precision. + * Put explicit bit at bit 126 if necessary. */ - while ((zm >> (DP_FBITS + 3)) == 0) { - zm <<= 1; - ze--; + if (hzm == 0) { + /* left shift by 63 or 64 bits */ + if ((int64_t)lzm < 0) { + /* MSB of lzm is the explicit bit */ + hzm = lzm >> 1; + lzm = lzm << 63; + ze -= 63; + } else { + hzm = lzm; + lzm = 0; + ze -= 64; + } + } + + t = 0; + while ((hzm >> (62 - t)) == 0) + t++; + + assert(t <= 62); + if (t) { + hzm = hzm << t | lzm >> (64 - t); + lzm = lzm << t; + ze -= t; } } - return ieee754dp_format(zs, ze, zm); + /* + * Move explicit bit from bit 126 to bit 55 since the + * ieee754dp_format code expects the mantissa to be + * 56 bits wide (53 + 3 rounding bits). + */ + srl128(&hzm, &lzm, (126 - 55)); + + return ieee754dp_format(zs, ze, lzm); } union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, @@ -281,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x, union ieee754dp y) { - return _dp_maddf(z, x, y, maddf_negate_product); + return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); } diff --git a/arch/mips/math-emu/dp_rint.c b/arch/mips/math-emu/dp_rint.c new file mode 100644 index 000000000000..c3b9077ff357 --- /dev/null +++ b/arch/mips/math-emu/dp_rint.c @@ -0,0 +1,89 @@ +/* IEEE754 floating point arithmetic + * double precision: common utilities + */ +/* + * MIPS floating point support + * Copyright (C) 1994-2000 Algorithmics Ltd. + * Copyright (C) 2017 Imagination Technologies, Ltd. + * Author: Aleksandar Markovic + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + */ + +#include "ieee754dp.h" + +union ieee754dp ieee754dp_rint(union ieee754dp x) +{ + union ieee754dp ret; + u64 residue; + int sticky; + int round; + int odd; + + COMPXDP; + + ieee754_clearcx(); + + EXPLODEXDP; + FLUSHXDP; + + if (xc == IEEE754_CLASS_SNAN) + return ieee754dp_nanxcpt(x); + + if ((xc == IEEE754_CLASS_QNAN) || + (xc == IEEE754_CLASS_INF) || + (xc == IEEE754_CLASS_ZERO)) + return x; + + if (xe >= DP_FBITS) + return x; + + if (xe < -1) { + residue = xm; + round = 0; + sticky = residue != 0; + xm = 0; + } else { + residue = xm << (64 - DP_FBITS + xe); + round = (residue >> 63) != 0; + sticky = (residue << 1) != 0; + xm >>= DP_FBITS - xe; + } + + odd = (xm & 0x1) != 0x0; + + switch (ieee754_csr.rm) { + case FPU_CSR_RN: /* toward nearest */ + if (round && (sticky || odd)) + xm++; + break; + case FPU_CSR_RZ: /* toward zero */ + break; + case FPU_CSR_RU: /* toward +infinity */ + if ((round || sticky) && !xs) + xm++; + break; + case FPU_CSR_RD: /* toward -infinity */ + if ((round || sticky) && xs) + xm++; + break; + } + + if (round || sticky) + ieee754_setcx(IEEE754_INEXACT); + + ret = ieee754dp_flong(xm); + DPSIGN(ret) = xs; + + return ret; +} diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h index d3be351aed15..92dc8fa565cb 100644 --- a/arch/mips/math-emu/ieee754.h +++ b/arch/mips/math-emu/ieee754.h @@ -67,6 +67,7 @@ union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fint(int x); union ieee754sp ieee754sp_flong(s64 x); union ieee754sp ieee754sp_fdp(union ieee754dp x); +union ieee754sp ieee754sp_rint(union ieee754sp x); int ieee754sp_tint(union ieee754sp x); s64 ieee754sp_tlong(union ieee754sp x); @@ -101,6 +102,7 @@ union ieee754dp ieee754dp_neg(union ieee754dp x); union ieee754dp ieee754dp_fint(int x); union ieee754dp ieee754dp_flong(s64 x); union ieee754dp ieee754dp_fsp(union ieee754sp x); +union ieee754dp ieee754dp_rint(union ieee754dp x); int ieee754dp_tint(union ieee754dp x); s64 ieee754dp_tlong(union ieee754dp x); diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 8bc2f6963324..dd2071f430e0 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -26,6 +26,10 @@ #define CLPAIR(x, y) ((x)*6+(y)) +enum maddf_flags { + MADDF_NEGATE_PRODUCT = 1 << 0, +}; + static inline void ieee754_clearcx(void) { ieee754_csr.cx = 0; diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h index 8476067075fe..0f63e4202cff 100644 --- a/arch/mips/math-emu/ieee754sp.h +++ b/arch/mips/math-emu/ieee754sp.h @@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x) return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; } +/* 64 bit right shift with rounding */ +#define XSPSRS64(v, rs) \ + (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0)) + /* 3bit extended single precision sticky right shift */ #define XSPSRS(v, rs) \ ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0)) diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c index be650ed7db59..8c0ec154aecc 100644 --- a/arch/mips/math-emu/me-debugfs.c +++ b/arch/mips/math-emu/me-debugfs.c @@ -28,14 +28,190 @@ static int fpuemu_stat_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n"); +/* + * Used to obtain names for a debugfs instruction counter, given field name + * in fpuemustats structure. For example, for input "cmp_sueq_d", the output + * would be "cmp.sueq.d". This is needed since dots are not allowed to be + * used in structure field names, and are, on the other hand, desired to be + * used in debugfs item names to be clearly associated to corresponding + * MIPS FPU instructions. + */ +static void adjust_instruction_counter_name(char *out_name, char *in_name) +{ + int i = 0; + + strcpy(out_name, in_name); + while (in_name[i] != '\0') { + if (out_name[i] == '_') + out_name[i] = '.'; + i++; + } +} + +static int fpuemustats_clear_show(struct seq_file *s, void *unused) +{ + __this_cpu_write((fpuemustats).emulated, 0); + __this_cpu_write((fpuemustats).loads, 0); + __this_cpu_write((fpuemustats).stores, 0); + __this_cpu_write((fpuemustats).branches, 0); + __this_cpu_write((fpuemustats).cp1ops, 0); + __this_cpu_write((fpuemustats).cp1xops, 0); + __this_cpu_write((fpuemustats).errors, 0); + __this_cpu_write((fpuemustats).ieee754_inexact, 0); + __this_cpu_write((fpuemustats).ieee754_underflow, 0); + __this_cpu_write((fpuemustats).ieee754_overflow, 0); + __this_cpu_write((fpuemustats).ieee754_zerodiv, 0); + __this_cpu_write((fpuemustats).ieee754_invalidop, 0); + __this_cpu_write((fpuemustats).ds_emul, 0); + + __this_cpu_write((fpuemustats).abs_s, 0); + __this_cpu_write((fpuemustats).abs_d, 0); + __this_cpu_write((fpuemustats).add_s, 0); + __this_cpu_write((fpuemustats).add_d, 0); + __this_cpu_write((fpuemustats).bc1eqz, 0); + __this_cpu_write((fpuemustats).bc1nez, 0); + __this_cpu_write((fpuemustats).ceil_w_s, 0); + __this_cpu_write((fpuemustats).ceil_w_d, 0); + __this_cpu_write((fpuemustats).ceil_l_s, 0); + __this_cpu_write((fpuemustats).ceil_l_d, 0); + __this_cpu_write((fpuemustats).class_s, 0); + __this_cpu_write((fpuemustats).class_d, 0); + __this_cpu_write((fpuemustats).cmp_af_s, 0); + __this_cpu_write((fpuemustats).cmp_af_d, 0); + __this_cpu_write((fpuemustats).cmp_eq_s, 0); + __this_cpu_write((fpuemustats).cmp_eq_d, 0); + __this_cpu_write((fpuemustats).cmp_le_s, 0); + __this_cpu_write((fpuemustats).cmp_le_d, 0); + __this_cpu_write((fpuemustats).cmp_lt_s, 0); + __this_cpu_write((fpuemustats).cmp_lt_d, 0); + __this_cpu_write((fpuemustats).cmp_ne_s, 0); + __this_cpu_write((fpuemustats).cmp_ne_d, 0); + __this_cpu_write((fpuemustats).cmp_or_s, 0); + __this_cpu_write((fpuemustats).cmp_or_d, 0); + __this_cpu_write((fpuemustats).cmp_ueq_s, 0); + __this_cpu_write((fpuemustats).cmp_ueq_d, 0); + __this_cpu_write((fpuemustats).cmp_ule_s, 0); + __this_cpu_write((fpuemustats).cmp_ule_d, 0); + __this_cpu_write((fpuemustats).cmp_ult_s, 0); + __this_cpu_write((fpuemustats).cmp_ult_d, 0); + __this_cpu_write((fpuemustats).cmp_un_s, 0); + __this_cpu_write((fpuemustats).cmp_un_d, 0); + __this_cpu_write((fpuemustats).cmp_une_s, 0); + __this_cpu_write((fpuemustats).cmp_une_d, 0); + __this_cpu_write((fpuemustats).cmp_saf_s, 0); + __this_cpu_write((fpuemustats).cmp_saf_d, 0); + __this_cpu_write((fpuemustats).cmp_seq_s, 0); + __this_cpu_write((fpuemustats).cmp_seq_d, 0); + __this_cpu_write((fpuemustats).cmp_sle_s, 0); + __this_cpu_write((fpuemustats).cmp_sle_d, 0); + __this_cpu_write((fpuemustats).cmp_slt_s, 0); + __this_cpu_write((fpuemustats).cmp_slt_d, 0); + __this_cpu_write((fpuemustats).cmp_sne_s, 0); + __this_cpu_write((fpuemustats).cmp_sne_d, 0); + __this_cpu_write((fpuemustats).cmp_sor_s, 0); + __this_cpu_write((fpuemustats).cmp_sor_d, 0); + __this_cpu_write((fpuemustats).cmp_sueq_s, 0); + __this_cpu_write((fpuemustats).cmp_sueq_d, 0); + __this_cpu_write((fpuemustats).cmp_sule_s, 0); + __this_cpu_write((fpuemustats).cmp_sule_d, 0); + __this_cpu_write((fpuemustats).cmp_sult_s, 0); + __this_cpu_write((fpuemustats).cmp_sult_d, 0); + __this_cpu_write((fpuemustats).cmp_sun_s, 0); + __this_cpu_write((fpuemustats).cmp_sun_d, 0); + __this_cpu_write((fpuemustats).cmp_sune_s, 0); + __this_cpu_write((fpuemustats).cmp_sune_d, 0); + __this_cpu_write((fpuemustats).cvt_d_l, 0); + __this_cpu_write((fpuemustats).cvt_d_s, 0); + __this_cpu_write((fpuemustats).cvt_d_w, 0); + __this_cpu_write((fpuemustats).cvt_l_s, 0); + __this_cpu_write((fpuemustats).cvt_l_d, 0); + __this_cpu_write((fpuemustats).cvt_s_d, 0); + __this_cpu_write((fpuemustats).cvt_s_l, 0); + __this_cpu_write((fpuemustats).cvt_s_w, 0); + __this_cpu_write((fpuemustats).cvt_w_s, 0); + __this_cpu_write((fpuemustats).cvt_w_d, 0); + __this_cpu_write((fpuemustats).div_s, 0); + __this_cpu_write((fpuemustats).div_d, 0); + __this_cpu_write((fpuemustats).floor_w_s, 0); + __this_cpu_write((fpuemustats).floor_w_d, 0); + __this_cpu_write((fpuemustats).floor_l_s, 0); + __this_cpu_write((fpuemustats).floor_l_d, 0); + __this_cpu_write((fpuemustats).maddf_s, 0); + __this_cpu_write((fpuemustats).maddf_d, 0); + __this_cpu_write((fpuemustats).max_s, 0); + __this_cpu_write((fpuemustats).max_d, 0); + __this_cpu_write((fpuemustats).maxa_s, 0); + __this_cpu_write((fpuemustats).maxa_d, 0); + __this_cpu_write((fpuemustats).min_s, 0); + __this_cpu_write((fpuemustats).min_d, 0); + __this_cpu_write((fpuemustats).mina_s, 0); + __this_cpu_write((fpuemustats).mina_d, 0); + __this_cpu_write((fpuemustats).mov_s, 0); + __this_cpu_write((fpuemustats).mov_d, 0); + __this_cpu_write((fpuemustats).msubf_s, 0); + __this_cpu_write((fpuemustats).msubf_d, 0); + __this_cpu_write((fpuemustats).mul_s, 0); + __this_cpu_write((fpuemustats).mul_d, 0); + __this_cpu_write((fpuemustats).neg_s, 0); + __this_cpu_write((fpuemustats).neg_d, 0); + __this_cpu_write((fpuemustats).recip_s, 0); + __this_cpu_write((fpuemustats).recip_d, 0); + __this_cpu_write((fpuemustats).rint_s, 0); + __this_cpu_write((fpuemustats).rint_d, 0); + __this_cpu_write((fpuemustats).round_w_s, 0); + __this_cpu_write((fpuemustats).round_w_d, 0); + __this_cpu_write((fpuemustats).round_l_s, 0); + __this_cpu_write((fpuemustats).round_l_d, 0); + __this_cpu_write((fpuemustats).rsqrt_s, 0); + __this_cpu_write((fpuemustats).rsqrt_d, 0); + __this_cpu_write((fpuemustats).sel_s, 0); + __this_cpu_write((fpuemustats).sel_d, 0); + __this_cpu_write((fpuemustats).seleqz_s, 0); + __this_cpu_write((fpuemustats).seleqz_d, 0); + __this_cpu_write((fpuemustats).selnez_s, 0); + __this_cpu_write((fpuemustats).selnez_d, 0); + __this_cpu_write((fpuemustats).sqrt_s, 0); + __this_cpu_write((fpuemustats).sqrt_d, 0); + __this_cpu_write((fpuemustats).sub_s, 0); + __this_cpu_write((fpuemustats).sub_d, 0); + __this_cpu_write((fpuemustats).trunc_w_s, 0); + __this_cpu_write((fpuemustats).trunc_w_d, 0); + __this_cpu_write((fpuemustats).trunc_l_s, 0); + __this_cpu_write((fpuemustats).trunc_l_d, 0); + + return 0; +} + +static int fpuemustats_clear_open(struct inode *inode, struct file *file) +{ + return single_open(file, fpuemustats_clear_show, inode->i_private); +} + +static const struct file_operations fpuemustats_clear_fops = { + .open = fpuemustats_clear_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int __init debugfs_fpuemu(void) { - struct dentry *d, *dir; + struct dentry *fpuemu_debugfs_base_dir; + struct dentry *fpuemu_debugfs_inst_dir; + struct dentry *d, *reset_file; if (!mips_debugfs_dir) return -ENODEV; - dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); - if (!dir) + + fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats", + mips_debugfs_dir); + if (!fpuemu_debugfs_base_dir) + return -ENOMEM; + + reset_file = debugfs_create_file("fpuemustats_clear", 0444, + mips_debugfs_dir, NULL, + &fpuemustats_clear_fops); + if (!reset_file) return -ENOMEM; #define FPU_EMU_STAT_OFFSET(m) \ @@ -43,7 +219,7 @@ static int __init debugfs_fpuemu(void) #define FPU_STAT_CREATE(m) \ do { \ - d = debugfs_create_file(#m , S_IRUGO, dir, \ + d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ if (!d) \ @@ -53,6 +229,7 @@ do { \ FPU_STAT_CREATE(emulated); FPU_STAT_CREATE(loads); FPU_STAT_CREATE(stores); + FPU_STAT_CREATE(branches); FPU_STAT_CREATE(cp1ops); FPU_STAT_CREATE(cp1xops); FPU_STAT_CREATE(errors); @@ -63,6 +240,139 @@ do { \ FPU_STAT_CREATE(ieee754_invalidop); FPU_STAT_CREATE(ds_emul); + fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions", + fpuemu_debugfs_base_dir); + if (!fpuemu_debugfs_inst_dir) + return -ENOMEM; + +#define FPU_STAT_CREATE_EX(m) \ +do { \ + char name[32]; \ + \ + adjust_instruction_counter_name(name, #m); \ + \ + d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ + (void *)FPU_EMU_STAT_OFFSET(m), \ + &fops_fpuemu_stat); \ + if (!d) \ + return -ENOMEM; \ +} while (0) + + FPU_STAT_CREATE_EX(abs_s); + FPU_STAT_CREATE_EX(abs_d); + FPU_STAT_CREATE_EX(add_s); + FPU_STAT_CREATE_EX(add_d); + FPU_STAT_CREATE_EX(bc1eqz); + FPU_STAT_CREATE_EX(bc1nez); + FPU_STAT_CREATE_EX(ceil_w_s); + FPU_STAT_CREATE_EX(ceil_w_d); + FPU_STAT_CREATE_EX(ceil_l_s); + FPU_STAT_CREATE_EX(ceil_l_d); + FPU_STAT_CREATE_EX(class_s); + FPU_STAT_CREATE_EX(class_d); + FPU_STAT_CREATE_EX(cmp_af_s); + FPU_STAT_CREATE_EX(cmp_af_d); + FPU_STAT_CREATE_EX(cmp_eq_s); + FPU_STAT_CREATE_EX(cmp_eq_d); + FPU_STAT_CREATE_EX(cmp_le_s); + FPU_STAT_CREATE_EX(cmp_le_d); + FPU_STAT_CREATE_EX(cmp_lt_s); + FPU_STAT_CREATE_EX(cmp_lt_d); + FPU_STAT_CREATE_EX(cmp_ne_s); + FPU_STAT_CREATE_EX(cmp_ne_d); + FPU_STAT_CREATE_EX(cmp_or_s); + FPU_STAT_CREATE_EX(cmp_or_d); + FPU_STAT_CREATE_EX(cmp_ueq_s); + FPU_STAT_CREATE_EX(cmp_ueq_d); + FPU_STAT_CREATE_EX(cmp_ule_s); + FPU_STAT_CREATE_EX(cmp_ule_d); + FPU_STAT_CREATE_EX(cmp_ult_s); + FPU_STAT_CREATE_EX(cmp_ult_d); + FPU_STAT_CREATE_EX(cmp_un_s); + FPU_STAT_CREATE_EX(cmp_un_d); + FPU_STAT_CREATE_EX(cmp_une_s); + FPU_STAT_CREATE_EX(cmp_une_d); + FPU_STAT_CREATE_EX(cmp_saf_s); + FPU_STAT_CREATE_EX(cmp_saf_d); + FPU_STAT_CREATE_EX(cmp_seq_s); + FPU_STAT_CREATE_EX(cmp_seq_d); + FPU_STAT_CREATE_EX(cmp_sle_s); + FPU_STAT_CREATE_EX(cmp_sle_d); + FPU_STAT_CREATE_EX(cmp_slt_s); + FPU_STAT_CREATE_EX(cmp_slt_d); + FPU_STAT_CREATE_EX(cmp_sne_s); + FPU_STAT_CREATE_EX(cmp_sne_d); + FPU_STAT_CREATE_EX(cmp_sor_s); + FPU_STAT_CREATE_EX(cmp_sor_d); + FPU_STAT_CREATE_EX(cmp_sueq_s); + FPU_STAT_CREATE_EX(cmp_sueq_d); + FPU_STAT_CREATE_EX(cmp_sule_s); + FPU_STAT_CREATE_EX(cmp_sule_d); + FPU_STAT_CREATE_EX(cmp_sult_s); + FPU_STAT_CREATE_EX(cmp_sult_d); + FPU_STAT_CREATE_EX(cmp_sun_s); + FPU_STAT_CREATE_EX(cmp_sun_d); + FPU_STAT_CREATE_EX(cmp_sune_s); + FPU_STAT_CREATE_EX(cmp_sune_d); + FPU_STAT_CREATE_EX(cvt_d_l); + FPU_STAT_CREATE_EX(cvt_d_s); + FPU_STAT_CREATE_EX(cvt_d_w); + FPU_STAT_CREATE_EX(cvt_l_s); + FPU_STAT_CREATE_EX(cvt_l_d); + FPU_STAT_CREATE_EX(cvt_s_d); + FPU_STAT_CREATE_EX(cvt_s_l); + FPU_STAT_CREATE_EX(cvt_s_w); + FPU_STAT_CREATE_EX(cvt_w_s); + FPU_STAT_CREATE_EX(cvt_w_d); + FPU_STAT_CREATE_EX(div_s); + FPU_STAT_CREATE_EX(div_d); + FPU_STAT_CREATE_EX(floor_w_s); + FPU_STAT_CREATE_EX(floor_w_d); + FPU_STAT_CREATE_EX(floor_l_s); + FPU_STAT_CREATE_EX(floor_l_d); + FPU_STAT_CREATE_EX(maddf_s); + FPU_STAT_CREATE_EX(maddf_d); + FPU_STAT_CREATE_EX(max_s); + FPU_STAT_CREATE_EX(max_d); + FPU_STAT_CREATE_EX(maxa_s); + FPU_STAT_CREATE_EX(maxa_d); + FPU_STAT_CREATE_EX(min_s); + FPU_STAT_CREATE_EX(min_d); + FPU_STAT_CREATE_EX(mina_s); + FPU_STAT_CREATE_EX(mina_d); + FPU_STAT_CREATE_EX(mov_s); + FPU_STAT_CREATE_EX(mov_d); + FPU_STAT_CREATE_EX(msubf_s); + FPU_STAT_CREATE_EX(msubf_d); + FPU_STAT_CREATE_EX(mul_s); + FPU_STAT_CREATE_EX(mul_d); + FPU_STAT_CREATE_EX(neg_s); + FPU_STAT_CREATE_EX(neg_d); + FPU_STAT_CREATE_EX(recip_s); + FPU_STAT_CREATE_EX(recip_d); + FPU_STAT_CREATE_EX(rint_s); + FPU_STAT_CREATE_EX(rint_d); + FPU_STAT_CREATE_EX(round_w_s); + FPU_STAT_CREATE_EX(round_w_d); + FPU_STAT_CREATE_EX(round_l_s); + FPU_STAT_CREATE_EX(round_l_d); + FPU_STAT_CREATE_EX(rsqrt_s); + FPU_STAT_CREATE_EX(rsqrt_d); + FPU_STAT_CREATE_EX(sel_s); + FPU_STAT_CREATE_EX(sel_d); + FPU_STAT_CREATE_EX(seleqz_s); + FPU_STAT_CREATE_EX(seleqz_d); + FPU_STAT_CREATE_EX(selnez_s); + FPU_STAT_CREATE_EX(selnez_d); + FPU_STAT_CREATE_EX(sqrt_s); + FPU_STAT_CREATE_EX(sqrt_d); + FPU_STAT_CREATE_EX(sub_s); + FPU_STAT_CREATE_EX(sub_d); + FPU_STAT_CREATE_EX(trunc_w_s); + FPU_STAT_CREATE_EX(trunc_w_d); + FPU_STAT_CREATE_EX(trunc_l_s); + FPU_STAT_CREATE_EX(trunc_l_d); + return 0; } arch_initcall(debugfs_fpuemu); diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c index 4d000844e48e..74a5a00d2f22 100644 --- a/arch/mips/math-emu/sp_fmax.c +++ b/arch/mips/math-emu/sp_fmax.c @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) return ys ? x : y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) else if (xs < ys) return x; - /* Compare exponent */ - if (xe > ye) - return x; - else if (xe < ye) - return y; + /* Signs of inputs are equal, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } else { + /* Inputs are both negative */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return y; + return x; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return y; - return x; + return x; + return y; } union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754sp_inf(xs & ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): @@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): @@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) return y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) return y; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) return y; - return x; + else if (xm > ym) + return x; + else if (xs == 0) + return x; + return y; } diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c index 4eb1bb9e9dec..c51385f46b09 100644 --- a/arch/mips/math-emu/sp_fmin.c +++ b/arch/mips/math-emu/sp_fmin.c @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) return ys ? y : x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) else if (xs < ys) return y; - /* Compare exponent */ - if (xe > ye) - return y; - else if (xe < ye) - return x; + /* Signs of inputs are the same, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } else { + /* Inputs are both negative */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return x; + return y; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return x; - return y; + return y; + return x; } union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754sp_inf(xs | ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): - return x; + return y; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): - return y; + return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) return x; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) + return x; + else if (xm > ym) + return y; + else if (xs == 1) return x; return y; } diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c index c91d5e5d9b5f..7195fe785d81 100644 --- a/arch/mips/math-emu/sp_maddf.c +++ b/arch/mips/math-emu/sp_maddf.c @@ -14,9 +14,6 @@ #include "ieee754sp.h" -enum maddf_flags { - maddf_negate_product = 1 << 0, -}; static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, union ieee754sp y, enum maddf_flags flags) @@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, int re; int rs; unsigned rm; - unsigned short lxm; - unsigned short hxm; - unsigned short lym; - unsigned short hym; - unsigned lrm; - unsigned hrm; - unsigned t; - unsigned at; + uint64_t rm64; + uint64_t zm64; int s; COMPXSP; @@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, ieee754_clearcx(); - switch (zc) { - case IEEE754_CLASS_SNAN: - ieee754_setcx(IEEE754_INVALID_OPERATION); + /* + * Handle the cases when at least one of x, y or z is a NaN. + * Order of precedence is sNaN, qNaN and z, x, y. + */ + if (zc == IEEE754_CLASS_SNAN) return ieee754sp_nanxcpt(z); - case IEEE754_CLASS_DNORM: - SPDNORMZ; - /* QNAN and ZERO cases are handled separately below */ - } - - switch (CLPAIR(xc, yc)) { - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): - return ieee754sp_nanxcpt(y); - - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): + if (xc == IEEE754_CLASS_SNAN) return ieee754sp_nanxcpt(x); - - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): + if (yc == IEEE754_CLASS_SNAN) + return ieee754sp_nanxcpt(y); + if (zc == IEEE754_CLASS_QNAN) + return z; + if (xc == IEEE754_CLASS_QNAN) + return x; + if (yc == IEEE754_CLASS_QNAN) return y; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): - return x; + if (zc == IEEE754_CLASS_DNORM) + SPDNORMZ; + /* ZERO z cases are handled separately below */ + + switch (CLPAIR(xc, yc)) { + /* * Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; ieee754_setcx(IEEE754_INVALID_OPERATION); return ieee754sp_indef(); @@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; - return ieee754sp_inf(xs ^ ys); + if ((zc == IEEE754_CLASS_INF) && + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { + /* + * Cases of addition of infinities with opposite signs + * or subtraction of infinities with same signs. + */ + ieee754_setcx(IEEE754_INVALID_OPERATION); + return ieee754sp_indef(); + } + /* + * z is here either not an infinity, or an infinity having the + * same sign as product (x*y) (in case of MADDF.D instruction) + * or product -(x*y) (in MSUBF.D case). The result must be an + * infinity, and its sign is determined only by the value of + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. + */ + if (flags & MADDF_NEGATE_PRODUCT) + return ieee754sp_inf(1 ^ (xs ^ ys)); + else + return ieee754sp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): @@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); - /* Multiplication is 0 so just return z */ + if (zc == IEEE754_CLASS_ZERO) { + /* Handle cases +0 + (-0) and similar ones. */ + if ((!(flags & MADDF_NEGATE_PRODUCT) + && (zs == (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) + && (zs != (xs ^ ys)))) + /* + * Cases of addition of zeros of equal signs + * or subtraction of zeroes of opposite signs. + * The sign of the resulting zero is in any + * such case determined only by the sign of z. + */ + return z; + + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); + } + /* x*y is here 0, and z is not 0, so just return z */ return z; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); SPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); SPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); /* fall through to real computations */ } @@ -158,111 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, re = xe + ye; rs = xs ^ ys; - if (flags & maddf_negate_product) + if (flags & MADDF_NEGATE_PRODUCT) rs ^= 1; - /* shunt to top of word */ - xm <<= 32 - (SP_FBITS + 1); - ym <<= 32 - (SP_FBITS + 1); + /* Multiple 24 bit xm and ym to give 48 bit results */ + rm64 = (uint64_t)xm * ym; - /* - * Multiply 32 bits xm, ym to give high 32 bits rm with stickness. - */ - lxm = xm & 0xffff; - hxm = xm >> 16; - lym = ym & 0xffff; - hym = ym >> 16; + /* Shunt to top of word */ + rm64 = rm64 << 16; - lrm = lxm * lym; /* 16 * 16 => 32 */ - hrm = hxm * hym; /* 16 * 16 => 32 */ - - t = lxm * hym; /* 16 * 16 => 32 */ - at = lrm + (t << 16); - hrm += at < lrm; - lrm = at; - hrm = hrm + (t >> 16); - - t = hxm * lym; /* 16 * 16 => 32 */ - at = lrm + (t << 16); - hrm += at < lrm; - lrm = at; - hrm = hrm + (t >> 16); - - rm = hrm | (lrm != 0); - - /* - * Sticky shift down to normal rounding precision. - */ - if ((int) rm < 0) { - rm = (rm >> (32 - (SP_FBITS + 1 + 3))) | - ((rm << (SP_FBITS + 1 + 3)) != 0); + /* Put explicit bit at bit 62 if necessary */ + if ((int64_t) rm64 < 0) { + rm64 = rm64 >> 1; re++; - } else { - rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) | - ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); } - assert(rm & (SP_HIDDEN_BIT << 3)); - if (zc == IEEE754_CLASS_ZERO) + assert(rm64 & (1 << 62)); + + if (zc == IEEE754_CLASS_ZERO) { + /* + * Move explicit bit from bit 62 to bit 26 since the + * ieee754sp_format code expects the mantissa to be + * 27 bits wide (24 + 3 rounding bits). + */ + rm = XSPSRS64(rm64, (62 - 26)); return ieee754sp_format(rs, re, rm); + } - /* And now the addition */ - - assert(zm & SP_HIDDEN_BIT); - - /* - * Provide guard,round and stick bit space. - */ - zm <<= 3; + /* Move explicit bit from bit 23 to bit 62 */ + zm64 = (uint64_t)zm << (62 - 23); + assert(zm64 & (1 << 62)); + /* Make the exponents the same */ if (ze > re) { /* * Have to shift r fraction right to align. */ s = ze - re; - rm = XSPSRS(rm, s); + rm64 = XSPSRS64(rm64, s); re += s; } else if (re > ze) { /* * Have to shift z fraction right to align. */ s = re - ze; - zm = XSPSRS(zm, s); + zm64 = XSPSRS64(zm64, s); ze += s; } assert(ze == re); assert(ze <= SP_EMAX); + /* Do the addition */ if (zs == rs) { /* - * Generate 28 bit result of adding two 27 bit numbers - * leaving result in zm, zs and ze. + * Generate 64 bit result by adding two 63 bit numbers + * leaving result in zm64, zs and ze. */ - zm = zm + rm; - - if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */ - zm = XSPSRS1(zm); + zm64 = zm64 + rm64; + if ((int64_t)zm64 < 0) { /* carry out */ + zm64 = XSPSRS1(zm64); ze++; } } else { - if (zm >= rm) { - zm = zm - rm; + if (zm64 >= rm64) { + zm64 = zm64 - rm64; } else { - zm = rm - zm; + zm64 = rm64 - zm64; zs = rs; } - if (zm == 0) + if (zm64 == 0) return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); /* - * Normalize in extended single precision + * Put explicit bit at bit 62 if necessary. */ - while ((zm >> (SP_MBITS + 3)) == 0) { - zm <<= 1; + while ((zm64 >> 62) == 0) { + zm64 <<= 1; ze--; } - } + + /* + * Move explicit bit from bit 62 to bit 26 since the + * ieee754sp_format code expects the mantissa to be + * 27 bits wide (24 + 3 rounding bits). + */ + zm = XSPSRS64(zm64, (62 - 26)); + return ieee754sp_format(zs, ze, zm); } @@ -275,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x, union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x, union ieee754sp y) { - return _sp_maddf(z, x, y, maddf_negate_product); + return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); } diff --git a/arch/mips/math-emu/sp_rint.c b/arch/mips/math-emu/sp_rint.c new file mode 100644 index 000000000000..70765b17e196 --- /dev/null +++ b/arch/mips/math-emu/sp_rint.c @@ -0,0 +1,90 @@ +/* IEEE754 floating point arithmetic + * single precision + */ +/* + * MIPS floating point support + * Copyright (C) 1994-2000 Algorithmics Ltd. + * Copyright (C) 2017 Imagination Technologies, Ltd. + * Author: Aleksandar Markovic + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + */ + +#include "ieee754sp.h" + +union ieee754sp ieee754sp_rint(union ieee754sp x) +{ + union ieee754sp ret; + u32 residue; + int sticky; + int round; + int odd; + + COMPXDP; /* <-- DP needed for 64-bit mantissa tmp */ + + ieee754_clearcx(); + + EXPLODEXSP; + FLUSHXSP; + + if (xc == IEEE754_CLASS_SNAN) + return ieee754sp_nanxcpt(x); + + if ((xc == IEEE754_CLASS_QNAN) || + (xc == IEEE754_CLASS_INF) || + (xc == IEEE754_CLASS_ZERO)) + return x; + + if (xe >= SP_FBITS) + return x; + + if (xe < -1) { + residue = xm; + round = 0; + sticky = residue != 0; + xm = 0; + } else { + residue = xm << (xe + 1); + residue <<= 31 - SP_FBITS; + round = (residue >> 31) != 0; + sticky = (residue << 1) != 0; + xm >>= SP_FBITS - xe; + } + + odd = (xm & 0x1) != 0x0; + + switch (ieee754_csr.rm) { + case FPU_CSR_RN: /* toward nearest */ + if (round && (sticky || odd)) + xm++; + break; + case FPU_CSR_RZ: /* toward zero */ + break; + case FPU_CSR_RU: /* toward +infinity */ + if ((round || sticky) && !xs) + xm++; + break; + case FPU_CSR_RD: /* toward -infinity */ + if ((round || sticky) && xs) + xm++; + break; + } + + if (round || sticky) + ieee754_setcx(IEEE754_INEXACT); + + ret = ieee754sp_flong(xm); + SPSIGN(ret) = xs; + + return ret; +} diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 81d6a15c93d0..6f534b209971 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -37,7 +37,7 @@ #include /* for run_uncached() */ #include #include -#include +#include /* * Bits describing what cache ops an SMP callback function may perform. diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 899e46279902..44ac64d51827 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -20,6 +20,7 @@ #include #include #include +#include /* Cache operations. */ void (*flush_cache_all)(void); @@ -44,7 +45,6 @@ void (*__flush_cache_vunmap)(void); void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); -void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size); /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 8e78251eccc2..c01bd20d0208 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -127,23 +127,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) return gfp | dma_flag; } -static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - - gfp = massage_gfp_flags(dev, gfp); - - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = plat_map_dma_mem(dev, ret, size); - } - - return ret; -} - static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -151,13 +134,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - /* - * XXX: seems like the coherent and non-coherent implementations could - * be consolidated. - */ - if (attrs & DMA_ATTR_NON_CONSISTENT) - return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); - gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) @@ -172,7 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, ret = page_address(page); memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); - if (!plat_device_is_coherent(dev)) { + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && + !plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } @@ -180,14 +157,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, return ret; } - -static void mips_dma_free_noncoherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - free_pages((unsigned long) vaddr, get_order(size)); -} - static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { @@ -195,14 +164,9 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (attrs & DMA_ATTR_NON_CONSISTENT) { - mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); - return; - } - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - if (!plat_device_is_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); page = virt_to_page((void *) addr); @@ -409,12 +373,12 @@ static void mips_dma_sync_sg_for_device(struct device *dev, } } -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } -int mips_dma_supported(struct device *dev, u64 mask) +static int mips_dma_supported(struct device *dev, u64 mask) { return plat_dma_supported(dev, mask); } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8ce2983a7015..5f6ea7d746de 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 28adeabe851f..33d3251ecd37 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -7,6 +7,7 @@ * written by Ralf Baechle */ #include +#include #include #include #include diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index c909c3342729..acfb89273dad 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include /* * MIPS32/MIPS64 L2 cache handling @@ -63,34 +63,25 @@ static void mips_sc_prefetch_enable(void) * prefetching for both code & data, for all ports. */ pftctl = read_gcr_l2_pft_control(); - if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { - pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; - pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; - pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) { + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK; + pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK; + pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN; write_gcr_l2_pft_control(pftctl); - pftctl = read_gcr_l2_pft_control_b(); - pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; - pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; - write_gcr_l2_pft_control_b(pftctl); + set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID | + CM_GCR_L2_PFT_CONTROL_B_CEN); } } static void mips_sc_prefetch_disable(void) { - unsigned long pftctl; - if (mips_cm_revision() < CM_REV_CM2_5) return; - pftctl = read_gcr_l2_pft_control(); - pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; - write_gcr_l2_pft_control(pftctl); - - pftctl = read_gcr_l2_pft_control_b(); - pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; - pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; - write_gcr_l2_pft_control_b(pftctl); + clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN); + clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID | + CM_GCR_L2_PFT_CONTROL_B_CEN); } static bool mips_sc_prefetch_is_enabled(void) @@ -101,9 +92,9 @@ static bool mips_sc_prefetch_is_enabled(void) return false; pftctl = read_gcr_l2_pft_control(); - if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) + if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT)) return false; - return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); + return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN); } static struct bcache_ops mips_sc_ops = { @@ -160,21 +151,21 @@ static int __init mips_sc_probe_cm3(void) unsigned long cfg = read_gcr_l2_config(); unsigned long sets, line_sz, assoc; - if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK) + if (cfg & CM_GCR_L2_CONFIG_BYPASS) return 0; - sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; - sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; + sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE; + sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE); if (sets) c->scache.sets = 64 << sets; - line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; - line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; + line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE; + line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE); if (line_sz) c->scache.linesz = 2 << line_sz; - assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; - assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; + assoc = cfg & CM_GCR_L2_CONFIG_ASSOC; + assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC); c->scache.ways = assoc + 1; c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); diff --git a/arch/mips/mm/tlbex-fault.S b/arch/mips/mm/tlbex-fault.S index 318855eb5f80..77db401fc620 100644 --- a/arch/mips/mm/tlbex-fault.S +++ b/arch/mips/mm/tlbex-fault.S @@ -12,14 +12,15 @@ .macro tlb_do_page_fault, write NESTED(tlb_do_page_fault_\write, PT_SIZE, sp) - SAVE_ALL + .cfi_signal_frame + SAVE_ALL docfi=1 MFC0 a2, CP0_BADVADDR KMODE move a0, sp REG_S a2, PT_BVADDR(sp) li a1, \write - PTR_LA ra, ret_from_exception - j do_page_fault + jal do_page_fault + j ret_from_exception END(tlb_do_page_fault_\write) .endm diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5aadc69c8ce3..79b9f2ad3ff5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2634,11 +2634,6 @@ void build_tlb_refill_handler(void) #endif break; - case CPU_R6000: - case CPU_R6000A: - panic("No R6000 TLB refill handler yet"); - break; - case CPU_R8000: panic("No R8000 TLB refill handler yet"); break; diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c index c398582c316f..a6699c15277d 100644 --- a/arch/mips/mti-malta/malta-dtshim.c +++ b/arch/mips/mti-malta/malta-dtshim.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #define ROCIT_REG_BASE 0x1f403000 @@ -236,7 +236,7 @@ static void __init remove_gic(void *fdt) /* if we have a CM which reports a GIC is present, leave the DT alone */ err = mips_cm_probe(); - if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_GICEX_MSK)) + if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) return; if (malta_scon() == MIPS_REVISION_SCON_ROCIT) { diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 0f3b881a3190..009f2918b320 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -21,8 +21,7 @@ #include #include #include -#include -#include +#include #include #include diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index b0f9b188e833..a840e0c1642c 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -29,9 +28,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -215,7 +214,7 @@ void __init arch_init_irq(void) msc_nr_irqs); } - if (gic_present) { + if (mips_gic_present()) { corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI; } else if (cpu_has_veic) { set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch); diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index a01d5debfcaf..de34adb76157 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include @@ -128,7 +128,7 @@ static int __init plat_enable_iocoherency(void) BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); pr_info("Enabled Bonito IOBC coherency\n"); } - } else if (mips_cm_numiocu() != 0) { + } else if (mips_cps_numiocu(0) != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index cea4ec909806..66c866740ff2 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -40,6 +39,7 @@ #include #include #include +#include #include #include @@ -85,8 +85,8 @@ static void __init estimate_frequencies(void) local_irq_save(flags); - if (gic_present) - gic_start_count(); + if (mips_gic_present()) + clear_gic_config(GIC_CONFIG_COUNTSTOP); /* * Read counters exactly on rising edge of update flag. @@ -95,8 +95,8 @@ static void __init estimate_frequencies(void) while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); start = read_c0_count(); - if (gic_present) - gicstart = gic_read_count(); + if (mips_gic_present()) + gicstart = read_gic_counter(); /* Wait for falling edge before reading RTC. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); @@ -105,8 +105,8 @@ static void __init estimate_frequencies(void) /* Read counters again exactly on rising edge of update flag. */ while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); - if (gic_present) - giccount = gic_read_count(); + if (mips_gic_present()) + giccount = read_gic_counter(); /* Wait for falling edge before reading RTC again. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); @@ -128,7 +128,7 @@ static void __init estimate_frequencies(void) count /= secs; mips_hpt_frequency = count; - if (gic_present) { + if (mips_gic_present()) { giccount = div_u64(giccount - gicstart, secs); gic_frequency = giccount; } @@ -154,7 +154,7 @@ int get_c0_fdc_int(void) if (cpu_has_veic) return -1; - else if (gic_present) + else if (mips_gic_present()) return gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; @@ -167,7 +167,7 @@ int get_c0_perfcount_int(void) if (cpu_has_veic) { set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch); mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; - } else if (gic_present) { + } else if (mips_gic_present()) { mips_cpu_perf_irq = gic_get_c0_perfcount_int(); } else if (cp0_perfcount_irq >= 0) { mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; @@ -184,7 +184,7 @@ unsigned int get_c0_compare_int(void) if (cpu_has_veic) { set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; - } else if (gic_present) { + } else if (mips_gic_present()) { mips_cpu_timer_irq = gic_get_c0_compare_int(); } else { mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; @@ -258,8 +258,7 @@ void __init plat_time_init(void) setup_pit_timer(); #endif -#ifdef CONFIG_MIPS_GIC - if (gic_present) { + if (mips_gic_present()) { freq = freqround(gic_frequency, 5000); printk("GIC frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); @@ -268,5 +267,4 @@ void __init plat_time_init(void) timer_probe(); #endif } -#endif } diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 3f87b96da5c4..7646891c4e9b 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -113,6 +113,7 @@ struct jit_ctx { u64 *reg_val_types; unsigned int long_b_conversion:1; unsigned int gen_b_offsets:1; + unsigned int use_bbit_insns:1; }; static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) @@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) return build_int_epilogue(ctx, MIPS_R_T9); } -static bool use_bbit_insns(void) -{ - switch (current_cpu_type()) { - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_CAVIUM_OCTEON3: - return true; - default: - return false; - } -} - static bool is_bad_offset(int b_off) { return b_off > 0x1ffff || b_off < -0x20000; @@ -682,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, unsigned int target; u64 t64; s64 t64s; + int bpf_op = BPF_OP(insn->code); switch (insn->code) { case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ @@ -770,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, divu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -798,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, ddivu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -829,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); did_move = false; if (insn->src_reg == BPF_REG_10) { - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); did_move = true; } else { @@ -839,7 +828,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -847,7 +836,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); @@ -879,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, ddivu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -923,7 +912,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -931,7 +920,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, tmp_reg, src, 0); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); @@ -962,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, divu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -989,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1002,8 +991,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, goto jeq_common; case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: @@ -1020,33 +1013,39 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, MIPS_R_AT, dst, 0); dst = MIPS_R_AT; } - if (BPF_OP(insn->code) == BPF_JSET) { + if (bpf_op == BPF_JSET) { emit_instr(ctx, and, MIPS_R_AT, dst, src); cmp_eq = false; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JSGT) { + } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) { emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, blez, MIPS_R_AT, b_off); + if (bpf_op == BPF_JSGT) + emit_instr(ctx, blez, MIPS_R_AT, b_off); + else + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + if (bpf_op == BPF_JSGT) + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + else + emit_instr(ctx, blez, MIPS_R_AT, b_off); emit_instr(ctx, nop); break; - } else if (BPF_OP(insn->code) == BPF_JSGE) { + } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) { emit_instr(ctx, slt, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JSGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGT) { + } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) { /* dst or src could be AT */ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src); @@ -1054,16 +1053,16 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JGT; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGE) { + } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) { emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; } else { /* JNE/JEQ case */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); } jeq_common: /* @@ -1122,7 +1121,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); + case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */ + cmp_eq = (bpf_op == BPF_JSGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1132,65 +1133,92 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bltz, dst, b_off); - else + switch (bpf_op) { + case BPF_JSGT: emit_instr(ctx, blez, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, bgtz, dst, b_off); + break; + } emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bgez, dst, b_off); - else + switch (bpf_op) { + case BPF_JSGT: emit_instr(ctx, bgtz, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, blez, dst, b_off); + break; + } emit_instr(ctx, nop); break; } /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = insn->imm + (cmp_eq ? 0 : 1); + if (bpf_op == BPF_JSGT) + t64s = insn->imm + 1; + else if (bpf_op == BPF_JSLE) + t64s = insn->imm + 1; + else + t64s = insn->imm; + + cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE; if (t64s >= S16_MIN && t64s <= S16_MAX) { emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; } emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: - cmp_eq = (BPF_OP(insn->code) == BPF_JGE); + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + cmp_eq = (bpf_op == BPF_JGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); - if (t64s >= 0 && t64s <= S16_MAX) { - emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - cmp_eq = true; - goto jeq_common; - } + if (bpf_op == BPF_JGT) + t64s = (u64)(u32)(insn->imm) + 1; + else if (bpf_op == BPF_JLE) + t64s = (u64)(u32)(insn->imm) + 1; + else + t64s = (u64)(u32)(insn->imm); + + cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE; + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ @@ -1198,7 +1226,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; - if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { + if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) { if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) @@ -1724,10 +1752,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, case BPF_JEQ: case BPF_JGT: case BPF_JGE: + case BPF_JLT: + case BPF_JLE: case BPF_JSET: case BPF_JNE: case BPF_JSGT: case BPF_JSGE: + case BPF_JSLT: + case BPF_JSLE: if (follow_taken) { rvt[idx] |= RVT_BRANCH_TAKEN; idx += insn->off; @@ -1853,6 +1885,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); + preempt_disable(); + switch (current_cpu_type()) { + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_CAVIUM_OCTEON3: + ctx.use_bbit_insns = 1; + break; + default: + ctx.use_bbit_insns = 0; + } + preempt_enable(); + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); if (ctx.offsets == NULL) goto out_err; diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index bddf1ef553a4..39a300bd6cc2 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -122,7 +122,7 @@ static void nlm_init_secondary(void) int hwtid; hwtid = hard_smp_processor_id(); - current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE; + cpu_set_core(¤t_cpu_data, hwtid / NLM_THREADS_PER_CORE); current_cpu_data.package = nlm_nodeid(); nlm_percpu_init(hwtid); nlm_smp_irq_init(hwtid); @@ -147,7 +147,7 @@ unsigned long nlm_next_gp; unsigned long nlm_next_sp; static cpumask_t phys_cpu_present_mask; -void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) +int nlm_boot_secondary(int logical_cpu, struct task_struct *idle) { uint64_t picbase; int hwtid; @@ -161,6 +161,8 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) /* barrier for sp/gp store above */ __sync(); nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */ + + return 0; } void __init nlm_smp_setup(void) @@ -272,7 +274,7 @@ int nlm_wakeup_secondary_cpus(void) return 0; } -struct plat_smp_ops nlm_smp_ops = { +const struct plat_smp_ops nlm_smp_ops = { .send_ipi_single = nlm_send_ipi_single, .send_ipi_mask = nlm_send_ipi_mask, .init_secondary = nlm_init_secondary, diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c index f03131fec41d..4d1b4c003376 100644 --- a/arch/mips/netlogic/xlr/platform-flash.c +++ b/arch/mips/netlogic/xlr/platform-flash.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index c57da6f13929..c3e4c18ef8d4 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -38,9 +38,9 @@ static int perfcount_irq; #ifdef CONFIG_MIPS_MT_SMP static int cpu_has_mipsmt_pertccounters; #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \ - M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id)) + M_PERFCTL_VPEID(cpu_vpe_id(¤t_cpu_data))) #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ - 0 : cpu_data[smp_processor_id()].vpe_id) + 0 : cpu_vpe_id(¤t_cpu_data)) /* * The number of bits to shift to convert between counters per core and diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 72eb1a56c645..107d9f90d668 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -100,11 +100,12 @@ static void paravirt_smp_finish(void) local_irq_enable(); } -static void paravirt_boot_secondary(int cpu, struct task_struct *idle) +static int paravirt_boot_secondary(int cpu, struct task_struct *idle) { paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle); smp_wmb(); paravirt_smp_sp[cpu] = __KSTK_TOS(idle); + return 0; } static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) @@ -133,7 +134,7 @@ static void paravirt_prepare_cpus(unsigned int max_cpus) } } -struct plat_smp_ops paravirt_smp_ops = { +const struct plat_smp_ops paravirt_smp_ops = { .send_ipi_single = paravirt_send_ipi_single, .send_ipi_mask = paravirt_send_ipi_mask, .init_secondary = paravirt_init_secondary, diff --git a/arch/mips/paravirt/setup.c b/arch/mips/paravirt/setup.c index cb8448b373a7..d2ffec1409a7 100644 --- a/arch/mips/paravirt/setup.c +++ b/arch/mips/paravirt/setup.c @@ -14,7 +14,7 @@ #include #include -extern struct plat_smp_ops paravirt_smp_ops; +extern const struct plat_smp_ops paravirt_smp_ops; const char *get_system_type(void) { diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c index 1c02f5737367..b4c263f16b15 100644 --- a/arch/mips/pci/fixup-capcella.c +++ b/arch/mips/pci/fixup-capcella.c @@ -32,13 +32,13 @@ #define INTC PC104PLUS_INTC_IRQ #define INTD PC104PLUS_INTD_IRQ -static char irq_tab_capcella[][5] __initdata = { +static char irq_tab_capcella[][5] = { [11] = { -1, INT1, INT1, INT1, INT1 }, [12] = { -1, INT2, INT2, INT2, INT2 }, [14] = { -1, INTA, INTB, INTC, INTD } }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_capcella[slot][pin]; } diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c index b3ab59318d91..44be65c3e6bb 100644 --- a/arch/mips/pci/fixup-cobalt.c +++ b/arch/mips/pci/fixup-cobalt.c @@ -147,7 +147,7 @@ static void qube_raq_via_board_id_fixup(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, qube_raq_via_board_id_fixup); -static char irq_tab_qube1[] __initdata = { +static char irq_tab_qube1[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, @@ -156,7 +156,7 @@ static char irq_tab_qube1[] __initdata = { [COBALT_PCICONF_ETH1] = 0 }; -static char irq_tab_cobalt[] __initdata = { +static char irq_tab_cobalt[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, @@ -165,7 +165,7 @@ static char irq_tab_cobalt[] __initdata = { [COBALT_PCICONF_ETH1] = ETH1_IRQ }; -static char irq_tab_raq2[] __initdata = { +static char irq_tab_raq2[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ, @@ -174,7 +174,7 @@ static char irq_tab_raq2[] __initdata = { [COBALT_PCICONF_ETH1] = ETH1_IRQ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (cobalt_board_id <= COBALT_BRD_ID_QUBE1) return irq_tab_qube1[slot]; diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c index 19caf775c206..c31cb6af1cd0 100644 --- a/arch/mips/pci/fixup-emma2rh.c +++ b/arch/mips/pci/fixup-emma2rh.c @@ -43,7 +43,7 @@ */ #define MAX_SLOT_NUM 10 -static unsigned char irq_map[][5] __initdata = { +static unsigned char irq_map[][5] = { [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, 0,}, [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, @@ -85,7 +85,7 @@ static void emma2rh_pci_host_fixup(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, emma2rh_pci_host_fixup); -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_map[slot][pin]; } diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c index 50da773faede..b47c2771dc99 100644 --- a/arch/mips/pci/fixup-fuloong2e.c +++ b/arch/mips/pci/fixup-fuloong2e.c @@ -19,7 +19,7 @@ /* South bridge slot number is set by the pci probe process */ static u8 sb_slot = 5; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = 0; diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c index 133685e215ee..c6ec18a07e63 100644 --- a/arch/mips/pci/fixup-ip32.c +++ b/arch/mips/pci/fixup-ip32.c @@ -21,7 +21,7 @@ #define INTB MACEPCI_SHARED0_IRQ #define INTC MACEPCI_SHARED1_IRQ #define INTD MACEPCI_SHARED2_IRQ -static char irq_tab_mace[][5] __initdata = { +static char irq_tab_mace[][5] = { /* Dummy INT#A INT#B INT#C INT#D */ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ {0, SCSI0, SCSI0, SCSI0, SCSI0}, @@ -39,7 +39,7 @@ static char irq_tab_mace[][5] __initdata = { * irqs. I suppose a device without a pin A will thank us for doing it * right if there exists such a broken piece of crap. */ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_mace[slot][pin]; } diff --git a/arch/mips/pci/fixup-jmr3927.c b/arch/mips/pci/fixup-jmr3927.c index 0f1069527cba..d3102eeea898 100644 --- a/arch/mips/pci/fixup-jmr3927.c +++ b/arch/mips/pci/fixup-jmr3927.c @@ -31,7 +31,7 @@ #include #include -int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char irq = pin; diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c index 2b5427d3f35c..81530a13b349 100644 --- a/arch/mips/pci/fixup-lantiq.c +++ b/arch/mips/pci/fixup-lantiq.c @@ -23,7 +23,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return of_irq_parse_and_map_pci(dev, slot, pin); } diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c index 95ab9a1bd010..20cdfdc08938 100644 --- a/arch/mips/pci/fixup-lemote2f.c +++ b/arch/mips/pci/fixup-lemote2f.c @@ -30,7 +30,7 @@ #define PCID 7 /* all the pci device has the PCIA pin, check the datasheet. */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0}, /* 11: Unused */ {0, 0, 0, 0, 0}, /* 12: Unused */ @@ -51,7 +51,7 @@ static char irq_tab[][5] __initdata = { {0, 0, 0, 0, 0}, /* 27: Unused */ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; diff --git a/arch/mips/pci/fixup-loongson3.c b/arch/mips/pci/fixup-loongson3.c index 2b6d5e196f99..8a741c2c6685 100644 --- a/arch/mips/pci/fixup-loongson3.c +++ b/arch/mips/pci/fixup-loongson3.c @@ -32,7 +32,7 @@ static void print_fixup_info(const struct pci_dev *pdev) pdev->vendor, pdev->device, pdev->irq); } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { print_fixup_info(dev); return dev->irq; diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c index 40e920c653cc..3ec85331795e 100644 --- a/arch/mips/pci/fixup-malta.c +++ b/arch/mips/pci/fixup-malta.c @@ -12,7 +12,7 @@ static char pci_irq[5] = { }; -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */ {0, 0, 0, 0, 0 }, /* 1: Unused */ @@ -38,7 +38,7 @@ static char irq_tab[][5] __initdata = { {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; virq = irq_tab[slot][pin]; diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c index 8e4f8288eca2..66eaf456bc89 100644 --- a/arch/mips/pci/fixup-mpc30x.c +++ b/arch/mips/pci/fixup-mpc30x.c @@ -22,19 +22,19 @@ #include -static const int internal_func_irqs[] __initconst = { +static const int internal_func_irqs[] = { VRC4173_CASCADE_IRQ, VRC4173_AC97_IRQ, VRC4173_USB_IRQ, }; -static const int irq_tab_mpc30x[] __initconst = { +static const int irq_tab_mpc30x[] = { [12] = VRC4173_PCMCIA1_IRQ, [13] = VRC4173_PCMCIA2_IRQ, [29] = MQ200_IRQ, }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == 30) return internal_func_irqs[PCI_FUNC(dev->devfn)]; diff --git a/arch/mips/pci/fixup-pmcmsp.c b/arch/mips/pci/fixup-pmcmsp.c index fab405c21c2f..4ad2ef02087b 100644 --- a/arch/mips/pci/fixup-pmcmsp.c +++ b/arch/mips/pci/fixup-pmcmsp.c @@ -47,7 +47,7 @@ #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -86,7 +86,7 @@ static char irq_tab[][5] __initdata = { #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -125,7 +125,7 @@ static char irq_tab[][5] __initdata = { #else /* Unknown board -- don't assign any IRQs */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -202,7 +202,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) * RETURNS: IRQ number * ****************************************************************************/ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); diff --git a/arch/mips/pci/fixup-rbtx4927.c b/arch/mips/pci/fixup-rbtx4927.c index 321db265829c..d6aaed1d6be9 100644 --- a/arch/mips/pci/fixup-rbtx4927.c +++ b/arch/mips/pci/fixup-rbtx4927.c @@ -36,7 +36,7 @@ #include #include -int __init rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char irq = pin; diff --git a/arch/mips/pci/fixup-rbtx4938.c b/arch/mips/pci/fixup-rbtx4938.c index a80579af609b..ff22a22db73e 100644 --- a/arch/mips/pci/fixup-rbtx4938.c +++ b/arch/mips/pci/fixup-rbtx4938.c @@ -13,7 +13,7 @@ #include #include -int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4938_pcic1_map_irq(dev, slot); diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c index f67ebeeb4200..adb9a58641e8 100644 --- a/arch/mips/pci/fixup-sni.c +++ b/arch/mips/pci/fixup-sni.c @@ -40,7 +40,7 @@ * seem to be a documentation error. At least on my RM200C the Cirrus * Logic CL-GD5434 VGA is device 3. */ -static char irq_tab_rm200[8][5] __initdata = { +static char irq_tab_rm200[8][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -57,7 +57,7 @@ static char irq_tab_rm200[8][5] __initdata = { * * The VGA card is optional for RM300 systems. */ -static char irq_tab_rm300d[8][5] __initdata = { +static char irq_tab_rm300d[8][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -69,7 +69,7 @@ static char irq_tab_rm300d[8][5] __initdata = { { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; -static char irq_tab_rm300e[5][5] __initdata = { +static char irq_tab_rm300e[5][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -96,7 +96,7 @@ static char irq_tab_rm300e[5][5] __initdata = { #define INTC PCIT_IRQ_INTC #define INTD PCIT_IRQ_INTD -static char irq_tab_pcit[13][5] __initdata = { +static char irq_tab_pcit[13][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ @@ -113,7 +113,7 @@ static char irq_tab_pcit[13][5] __initdata = { { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ }; -static char irq_tab_pcit_cplus[13][5] __initdata = { +static char irq_tab_pcit_cplus[13][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ @@ -130,7 +130,7 @@ static inline int is_rm300_revd(void) return (csmsr & 0xa0) == 0x20; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (sni_brd_type) { case SNI_BRD_PCI_TOWER_CPLUS: diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c index d0b0083fbd27..cc581535f257 100644 --- a/arch/mips/pci/fixup-tb0219.c +++ b/arch/mips/pci/fixup-tb0219.c @@ -23,7 +23,7 @@ #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c index 4196ccf3ea3d..b827b5cad5fd 100644 --- a/arch/mips/pci/fixup-tb0226.c +++ b/arch/mips/pci/fixup-tb0226.c @@ -23,7 +23,7 @@ #include #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c index 8c5039ed75d7..98f26285f2e3 100644 --- a/arch/mips/pci/fixup-tb0287.c +++ b/arch/mips/pci/fixup-tb0287.c @@ -22,7 +22,7 @@ #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char bus; int irq = -1; diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index e99ca7702d8a..f15ec98de2de 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -522,7 +522,7 @@ static int __init alchemy_pci_init(void) arch_initcall(alchemy_pci_init); -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct alchemy_pci_context *ctx = dev->sysdata; if (ctx && ctx->board_map_irq) diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c index 76f16eaed0ad..230d7dd273e2 100644 --- a/arch/mips/pci/pci-bcm47xx.c +++ b/arch/mips/pci/pci-bcm47xx.c @@ -28,7 +28,7 @@ #include #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } diff --git a/arch/mips/pci/pci-lasat.c b/arch/mips/pci/pci-lasat.c index 40d2797d2bc4..47f4ee6bbb3b 100644 --- a/arch/mips/pci/pci-lasat.c +++ b/arch/mips/pci/pci-lasat.c @@ -61,7 +61,7 @@ arch_initcall(lasat_pci_setup); #define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7) #define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8) -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 1: diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 174575a9a112..0c65c38e05d6 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -78,6 +78,12 @@ static void pcibios_scanbus(struct pci_controller *hose) static int need_domain_info; LIST_HEAD(resources); struct pci_bus *bus; + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY)) next_busno = (*hose->get_busno)(); @@ -87,18 +93,24 @@ static void pcibios_scanbus(struct pci_controller *hose) pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); pci_add_resource(&resources, hose->busn_resource); - bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, - &resources); - hose->bus = bus; + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = next_busno; + bridge->ops = hose->pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = pcibios_map_irq; + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + hose->bus = bus = bridge->bus; need_domain_info = need_domain_info || pci_domain_nr(bus); set_pci_need_domain_info(hose, need_domain_info); - if (!bus) { - pci_free_resource_list(&resources); - return; - } - next_busno = bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ @@ -127,7 +139,7 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) struct of_pci_range range; struct of_pci_range_parser parser; - pr_info("PCI host bridge %s ranges:\n", node->full_name); + pr_info("PCI host bridge %pOF ranges:\n", node); hose->of_node = node; if (of_pci_range_parser_init(&parser, node)) @@ -224,8 +236,6 @@ static int __init pcibios_init(void) list_for_each_entry(hose, &controllers, list) pcibios_scanbus(hose); - pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq); - pci_initialized = 1; return 0; diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c index cfbbc3e3e914..88e625fb3a47 100644 --- a/arch/mips/pci/pci-malta.c +++ b/arch/mips/pci/pci-malta.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include #include @@ -201,7 +201,7 @@ void __init mips_pcibios_init(void) msc_mem_resource.start = start & mask; msc_mem_resource.end = (start & mask) | ~mask; msc_controller.mem_offset = (start & mask) - (map & mask); - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { write_gcr_reg0_base(start); write_gcr_reg0_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); @@ -213,7 +213,7 @@ void __init mips_pcibios_init(void) msc_io_resource.end = (map & mask) | ~mask; msc_controller.io_offset = 0; ioport_resource.end = ~mask; - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { write_gcr_reg1_base(start); write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 628c5132b3d8..90fba9bf98da 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -291,7 +291,7 @@ static int mt7620_pci_probe(struct platform_device *pdev) IORESOURCE_MEM, 1); u32 val = 0; - rstpcie0 = devm_reset_control_get(&pdev->dev, "pcie0"); + rstpcie0 = devm_reset_control_get_exclusive(&pdev->dev, "pcie0"); if (IS_ERR(rstpcie0)) return PTR_ERR(rstpcie0); @@ -361,7 +361,7 @@ static int mt7620_pci_probe(struct platform_device *pdev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u16 cmd; u32 val; diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 9ee01936862e..3e92a06fa772 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -59,8 +59,7 @@ union octeon_pci_address { } s; }; -int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev, - u8 slot, u8 pin); +int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; /** @@ -74,7 +73,7 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; * as it goes through each bridge. * Returns Interrupt number for the device */ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (octeon_pcibios_map_irq) return octeon_pcibios_map_irq(dev, slot, pin); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index d6360fe73d05..711cdccdf65b 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -181,7 +181,7 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) spin_unlock_irqrestore(&rt2880_pci_lock, flags); } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u16 cmd; int irq = -1; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 3520e9b414e7..958899ffe99c 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -207,8 +207,7 @@ static int rt3883_pci_irq_init(struct device *dev, irq = irq_of_parse_and_map(rpc->intc_of_node, 0); if (irq == 0) { - dev_err(dev, "%s has no IRQ", - of_node_full_name(rpc->intc_of_node)); + dev_err(dev, "%pOF has no IRQ", rpc->intc_of_node); return -EINVAL; } @@ -438,8 +437,8 @@ static int rt3883_pci_probe(struct platform_device *pdev) } if (!rpc->intc_of_node) { - dev_err(dev, "%s has no %s child node", - of_node_full_name(rpc->intc_of_node), + dev_err(dev, "%pOF has no %s child node", + rpc->intc_of_node, "interrupt controller"); return -EINVAL; } @@ -454,8 +453,8 @@ static int rt3883_pci_probe(struct platform_device *pdev) } if (!rpc->pci_controller.of_node) { - dev_err(dev, "%s has no %s child node", - of_node_full_name(rpc->intc_of_node), + dev_err(dev, "%pOF has no %s child node", + rpc->intc_of_node, "PCI host bridge"); err = -EINVAL; goto err_put_intc_node; @@ -565,7 +564,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) return err; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return of_irq_parse_and_map_pci(dev, slot, pin); } diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c index 000c0e1f9ef8..a6418460e3c4 100644 --- a/arch/mips/pci/pci-tx4938.c +++ b/arch/mips/pci/pci-tx4938.c @@ -112,7 +112,7 @@ int __init tx4938_pciclk66_setup(void) return pciclk; } -int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) +int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { switch (slot) { diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c index 9d6acc00f348..09a65f7dbe7c 100644 --- a/arch/mips/pci/pci-tx4939.c +++ b/arch/mips/pci/pci-tx4939.c @@ -48,7 +48,7 @@ void __init tx4939_report_pci1clk(void) ((pciclk + 50000) / 100000) % 10); } -int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) +int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) { switch (slot) { @@ -68,7 +68,7 @@ int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) return -1; } -int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4939_pcic1_map_irq(dev, slot); diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c index 7babf01600cb..9eff9137f78e 100644 --- a/arch/mips/pci/pci-xlp.c +++ b/arch/mips/pci/pci-xlp.c @@ -205,7 +205,7 @@ int xlp_socdev_to_node(const struct pci_dev *lnkdev) return PCI_SLOT(lnkdev->devfn) / 8; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_dev *lnkdev; int lnkfunc, node; diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c index 26d2dabef281..2a1c81a129ba 100644 --- a/arch/mips/pci/pci-xlr.c +++ b/arch/mips/pci/pci-xlr.c @@ -315,7 +315,7 @@ static void xls_pcie_ack_b(struct irq_data *d) } } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return get_irq_vector(dev); } diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index ad3584dbc9d7..fd2887415bc8 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -1464,8 +1464,7 @@ static int cvmx_pcie_rc_initialize(int pcie_port) * as it goes through each bridge. * Returns Interrupt number for the device */ -int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, - u8 slot, u8 pin) +int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* * The EBH5600 board with the PCI to PCIe bridge mistakenly diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index 1c91cad7988f..0b06c953d293 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c @@ -19,8 +19,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/arch/mips/pistachio/irq.c b/arch/mips/pistachio/irq.c index 0a6b24c24652..709a8219073a 100644 --- a/arch/mips/pistachio/irq.c +++ b/arch/mips/pistachio/irq.c @@ -10,7 +10,6 @@ #include #include -#include #include #include diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 17a0f1dec05b..8a6af9b76202 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -12,9 +12,9 @@ #include #include #include -#include #include +#include #include unsigned int get_c0_compare_int(void) diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index ffa0f7101a97..2b08242ade62 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -22,6 +22,8 @@ #include #include +#include + #ifdef CONFIG_MIPS_MT_SMP #define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */ #define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */ diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c index 7cf4eb50fc72..a7a4e9f5146d 100644 --- a/arch/mips/pnx833x/common/platform.c +++ b/arch/mips/pnx833x/common/platform.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 710b04cf4851..b4627080b828 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -82,6 +82,16 @@ choice depends on SOC_MT7620 select BUILTIN_DTB + config DTB_OMEGA2P + bool "Onion Omega2+" + depends on SOC_MT7620 + select BUILTIN_DTB + + config DTB_VOCORE2 + bool "VoCore2" + depends on SOC_MT7620 + select BUILTIN_DTB + endchoice endif diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c index eb1c61917eb7..1b7df115eb60 100644 --- a/arch/mips/ralink/clk.c +++ b/arch/mips/ralink/clk.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL_GPL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } EXPORT_SYMBOL_GPL(clk_get_rate); diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c index 2058280450b5..bda576f2cad8 100644 --- a/arch/mips/ralink/irq-gic.c +++ b/arch/mips/ralink/irq-gic.c @@ -11,7 +11,7 @@ #include #include -#include +#include int get_c0_perfcount_int(void) { diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 0695c2d64e49..1b274742077d 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -12,8 +12,7 @@ #include #include -#include -#include +#include #include #include @@ -199,7 +198,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info) mips_cm_probe(); mips_cpc_probe(); - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { /* * mips_cm_probe() wipes out bootloader * config for CM regions and we have to configure them diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 0966adccf520..32ea3e6731d6 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index 4cd47d23d81a..545446dfe7fa 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -195,7 +195,7 @@ static void ip27_smp_finish(void) * set sp to the kernel stack of the newly created idle process, gp to the proc * struct so that current_thread_info() will work. */ -static void ip27_boot_secondary(int cpu, struct task_struct *idle) +static int ip27_boot_secondary(int cpu, struct task_struct *idle) { unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); @@ -203,6 +203,7 @@ static void ip27_boot_secondary(int cpu, struct task_struct *idle) LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 0, (void *) sp, (void *) gp); + return 0; } static void __init ip27_smp_setup(void) @@ -231,7 +232,7 @@ static void __init ip27_prepare_cpus(unsigned int max_cpus) /* We already did everything necessary earlier */ } -struct plat_smp_ops ip27_smp_ops = { +const struct plat_smp_ops ip27_smp_ops = { .send_ipi_single = ip27_send_ipi_single, .send_ipi_mask = ip27_send_ipi_mask, .init_secondary = ip27_init_secondary, diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index d0e94ffcc1b8..90c9d1255ad7 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -117,7 +117,7 @@ static void bcm1480_smp_finish(void) * Setup the PC, SP, and GP of a secondary processor and start it * running! */ -static void bcm1480_boot_secondary(int cpu, struct task_struct *idle) +static int bcm1480_boot_secondary(int cpu, struct task_struct *idle) { int retval; @@ -126,6 +126,7 @@ static void bcm1480_boot_secondary(int cpu, struct task_struct *idle) (unsigned long)task_thread_info(idle), 0); if (retval != 0) printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); + return retval; } /* @@ -157,7 +158,7 @@ static void __init bcm1480_prepare_cpus(unsigned int max_cpus) { } -struct plat_smp_ops bcm1480_smp_ops = { +const struct plat_smp_ops bcm1480_smp_ops = { .send_ipi_single = bcm1480_send_ipi_single, .send_ipi_mask = bcm1480_send_ipi_mask, .init_secondary = bcm1480_init_secondary, diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index c1a11a11db7f..115399202eab 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -229,8 +229,8 @@ static int __init initrd_setup(char *str) #endif -extern struct plat_smp_ops sb_smp_ops; -extern struct plat_smp_ops bcm1480_smp_ops; +extern const struct plat_smp_ops sb_smp_ops; +extern const struct plat_smp_ops bcm1480_smp_ops; /* * prom_init is called just after the cpu type is determined, from setup_arch() diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 0a4a2c3982d8..5baabca52f25 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -106,7 +106,7 @@ static void sb1250_smp_finish(void) * Setup the PC, SP, and GP of a secondary processor and start it * running! */ -static void sb1250_boot_secondary(int cpu, struct task_struct *idle) +static int sb1250_boot_secondary(int cpu, struct task_struct *idle) { int retval; @@ -115,6 +115,7 @@ static void sb1250_boot_secondary(int cpu, struct task_struct *idle) (unsigned long)task_thread_info(idle), 0); if (retval != 0) printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); + return retval; } /* @@ -146,7 +147,7 @@ static void __init sb1250_prepare_cpus(unsigned int max_cpus) { } -struct plat_smp_ops sb_smp_ops = { +const struct plat_smp_ops sb_smp_ops = { .send_ipi_single = sb1250_send_ipi_single, .send_ipi_mask = sb1250_send_ipi_mask, .init_secondary = sb1250_init_secondary, diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh new file mode 100755 index 000000000000..5c4f93687039 --- /dev/null +++ b/arch/mips/tools/generic-board-config.sh @@ -0,0 +1,90 @@ +#!/bin/sh +# +# Copyright (C) 2017 Imagination Technologies +# Author: Paul Burton +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This script merges configuration fragments for boards supported by the +# generic MIPS kernel. It checks each for requirements specified using +# formatted comments, and then calls merge_config.sh to merge those +# fragments which have no unmet requirements. +# +# An example of requirements in your board config fragment might be: +# +# # require CONFIG_CPU_MIPS32_R2=y +# # require CONFIG_CPU_LITTLE_ENDIAN=y +# +# This would mean that your board is only included in kernels which are +# configured for little endian MIPS32r2 CPUs, and not for example in kernels +# configured for 64 bit or big endian systems. +# + +srctree="$1" +objtree="$2" +ref_cfg="$3" +cfg="$4" +boards_origin="$5" +shift 5 + +cd "${srctree}" + +# Only print Skipping... lines if the user explicitly specified BOARDS=. In the +# general case it only serves to obscure the useful output about what actually +# was included. +case ${boards_origin} in +"command line") + print_skipped=1 + ;; +environment*) + print_skipped=1 + ;; +*) + print_skipped=0 + ;; +esac + +for board in $@; do + board_cfg="arch/mips/configs/generic/board-${board}.config" + if [ ! -f "${board_cfg}" ]; then + echo "WARNING: Board config '${board_cfg}' not found" + continue + fi + + # For each line beginning with # require, cut out the field following + # it & search for that in the reference config file. If the requirement + # is not found then the subshell will exit with code 1, and we'll + # continue on to the next board. + grep -E '^# require ' "${board_cfg}" | \ + cut -d' ' -f 3- | \ + while read req; do + case ${req} in + *=y) + # If we require something =y then we check that a line + # containing it is present in the reference config. + grep -Eq "^${req}\$" "${ref_cfg}" && continue + ;; + *=n) + # If we require something =n then we just invert that + # check, considering the requirement met if there isn't + # a line containing the value =y in the reference + # config. + grep -Eq "^${req/%=n/=y}\$" "${ref_cfg}" || continue + ;; + *) + echo "WARNING: Unhandled requirement '${req}'" + ;; + esac + + [ ${print_skipped} -eq 1 ] && echo "Skipping ${board_cfg}" + exit 1 + done || continue + + # Merge this board config fragment into our final config file + ./scripts/kconfig/merge_config.sh \ + -m -O ${objtree} ${cfg} ${board_cfg} \ + | grep -Ev '^(#|Using)' +done diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index 0bd2a1e1ff9a..fb998726bd5d 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c @@ -386,9 +386,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - return txx9_board_vec->pci_map_irq(dev, slot, pin); + return txx9_pci_map_irq(dev, slot, pin); } char * (*txx9_board_pcibios_setup)(char *str) __initdata; @@ -424,5 +425,8 @@ char *__init txx9_pcibios_setup(char *str) txx9_pci_err_action = TXX9_PCI_ERR_IGNORE; return NULL; } + + txx9_pci_map_irq = txx9_board_vec->pci_map_irq; + return str; } diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index e2690d7ca4dd..e22b422f282c 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -11,12 +11,10 @@ #include "vdso.h" #include -#include #include #include #include -#include #include #include @@ -126,9 +124,9 @@ static __always_inline u64 read_gic_count(const union mips_vdso_data *data) u32 hi, hi2, lo; do { - hi = __raw_readl(gic + GIC_UMV_SH_COUNTER_63_32_OFS); - lo = __raw_readl(gic + GIC_UMV_SH_COUNTER_31_00_OFS); - hi2 = __raw_readl(gic + GIC_UMV_SH_COUNTER_63_32_OFS); + hi = __raw_readl(gic + sizeof(lo)); + lo = __raw_readl(gic); + hi2 = __raw_readl(gic + sizeof(lo)); } while (hi2 != hi); return (((u64)hi) << 32) + lo; diff --git a/arch/mips/vdso/sigreturn.S b/arch/mips/vdso/sigreturn.S index 715bf5993529..30c6219912ac 100644 --- a/arch/mips/vdso/sigreturn.S +++ b/arch/mips/vdso/sigreturn.S @@ -19,31 +19,21 @@ .cfi_sections .debug_frame LEAF(__vdso_rt_sigreturn) - .cfi_startproc - .frame sp, 0, ra - .mask 0x00000000, 0 - .fmask 0x00000000, 0 .cfi_signal_frame li v0, __NR_rt_sigreturn syscall - .cfi_endproc END(__vdso_rt_sigreturn) #if _MIPS_SIM == _MIPS_SIM_ABI32 LEAF(__vdso_sigreturn) - .cfi_startproc - .frame sp, 0, ra - .mask 0x00000000, 0 - .fmask 0x00000000, 0 .cfi_signal_frame li v0, __NR_sigreturn syscall - .cfi_endproc END(__vdso_sigreturn) #endif diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig index 1fd41ec1dfb5..d06dae131139 100644 --- a/arch/mn10300/configs/asb2303_defconfig +++ b/arch/mn10300/configs/asb2303_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_TINY_RCU=y @@ -28,16 +27,13 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_MTD=y CONFIG_MTD_DEBUG=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y -CONFIG_MTD_CHAR=y CONFIG_MTD_CFI=y CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -48,8 +44,6 @@ CONFIG_MTD_PHYSMAP=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig index cd0a6cb17dee..b1d80cee97ee 100644 --- a/arch/mn10300/configs/asb2364_defconfig +++ b/arch/mn10300/configs/asb2364_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -40,7 +39,6 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set CONFIG_IPV6=y # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set @@ -50,10 +48,8 @@ CONFIG_IPV6=y CONFIG_CONNECTOR=y CONFIG_MTD=y CONFIG_MTD_DEBUG=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y -CONFIG_MTD_CHAR=y CONFIG_MTD_CFI=y CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -64,8 +60,6 @@ CONFIG_MTD_PHYSMAP=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -77,7 +71,6 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_PROC_KCORE=y # CONFIG_PROC_PAGE_MONITOR is not set @@ -93,4 +86,3 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index c710db354ff2..ac82a3f26dbf 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -102,4 +102,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 89e8027e07fb..7c475fd99c46 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -59,10 +59,6 @@ void arch_cpu_idle(void) } #endif -void release_segments(struct mm_struct *mm) -{ -} - void machine_restart(char *cmd) { #ifdef CONFIG_KERNEL_DEBUGGER @@ -112,14 +108,6 @@ void release_thread(struct task_struct *dead_task) { } -/* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -void copy_segments(struct task_struct *p, struct mm_struct *new_mm) -{ -} - /* * this gets called so that we can store lazy state into memory and copy the * current task into the new thread. diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts index 31c51f9a2f09..36ccdf05837d 100644 --- a/arch/nios2/boot/dts/3c120_devboard.dts +++ b/arch/nios2/boot/dts/3c120_devboard.dts @@ -159,6 +159,7 @@ partition@800000 { }; chosen { - bootargs = "debug console=ttyJ0,115200"; + bootargs = "debug earlycon console=ttyJ0,115200"; + stdout-path = &jtag_uart; }; }; diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index 7b3c6f280293..f8dc62222741 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h @@ -18,7 +18,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) } /* - * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to + * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to * do any flushing here. */ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index 645129aaa9a0..20e86209ef2e 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c @@ -107,7 +107,10 @@ static struct nios2_clocksource nios2_cs = { cycles_t get_cycles(void) { - return nios2_timer_read(&nios2_cs.cs); + /* Only read timer if it has been initialized */ + if (nios2_cs.timer.base) + return nios2_timer_read(&nios2_cs.cs); + return 0; } EXPORT_SYMBOL(get_cycles); diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 1e95920b0737..a0f2e4a323c1 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -29,6 +29,9 @@ config OPENRISC select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 select NO_BOOTMEM +config CPU_BIG_ENDIAN + def_bool y + config MMU def_bool y diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index ff97374ca069..71a6f08de8f2 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -414,6 +414,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ +struct vm_area_struct; + /* * or32 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 13648519bd41..a57dedbfc7b7 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -59,6 +59,9 @@ config PARISC config CPU_BIG_ENDIAN def_bool y +config CPU_BIG_ENDIAN + def_bool y + config MMU def_bool y @@ -254,6 +257,18 @@ config PARISC_PAGE_SIZE_64KB endchoice +config PARISC_SELF_EXTRACT + bool "Build kernel as self-extracting executable" + default y + help + Say Y if you want to build the parisc kernel as a kind of + self-extracting executable. + + If you say N here, the kernel will be compressed with gzip + which can be loaded by the palo bootloader directly too. + + If you don't know what to do here, say Y. + config SMP bool "Symmetric multi-processing support" ---help--- diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 58fae5d2449d..01946ebaff72 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -129,8 +129,13 @@ Image: vmlinux bzImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +ifdef CONFIG_PARISC_SELF_EXTRACT vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ +else +vmlinuz: vmlinux + @gzip -cf -9 $< > $@ +endif install: $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 5450a11c9d10..7d7e594bda36 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -15,7 +15,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs +KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls endif diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 13a4bf9ac4da..9345b44b86f0 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -24,7 +24,8 @@ /* Symbols defined by linker scripts */ extern char input_data[]; extern int input_len; -extern __le32 output_len; /* at unaligned address, little-endian */ +/* output_len is inserted by the linker possibly at an unaligned address */ +extern __le32 output_len __aligned(1); extern char _text, _end; extern char _bss, _ebss; extern char _startcode_end; diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig index 0764d3971cf6..8d41a73bd71b 100644 --- a/arch/parisc/configs/c3000_defconfig +++ b/arch/parisc/configs/c3000_defconfig @@ -31,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y -CONFIG_NETFILTER_DEBUG=y CONFIG_NET_PKTGEN=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 26b4455baa83..510341f62d97 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -280,6 +280,7 @@ void setup_pdc(void); /* in inventory.c */ /* wrapper-functions from pdc.c */ int pdc_add_valid(unsigned long address); +int pdc_instr(unsigned int *instr); int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len); int pdc_chassis_disp(unsigned long disp); int pdc_chassis_warn(unsigned long *warn); diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index a5dc9066c6d8..ad9c9c3b4136 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -1,6 +1,7 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +extern int init_per_cpu(int cpuid); #if defined(CONFIG_SMP) diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 9a9c2fe4be50..775b5d5e41a1 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -57,6 +57,9 @@ overrides the coredump filter bits */ #define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */ +#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ + #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ @@ -64,17 +67,6 @@ #define MAP_FILE 0 #define MAP_VARIABLE 0 -/* - * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. - * This gives us 6 bits, which is enough until someone invents 128 bit address - * spaces. - * - * Assume these are all power of twos. - * When 0 use the default page size. - */ -#define MAP_HUGE_SHIFT 26 -#define MAP_HUGE_MASK 0x3f - #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index a0d4dc9f4eb2..3b2bf7ae703b 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -101,4 +101,6 @@ #define SO_PEERGROUPS 0x4034 +#define SO_ZEROCOPY 0x4035 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index ab80e5c6f651..6d471c00c71a 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -232,6 +232,26 @@ int pdc_add_valid(unsigned long address) } EXPORT_SYMBOL(pdc_add_valid); +/** + * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler. + * @instr: Pointer to variable which will get instruction opcode. + * + * The return value is PDC_OK (0) in case call succeeded. + */ +int __init pdc_instr(unsigned int *instr) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result)); + convert_to_wide(pdc_result); + *instr = pdc_result[0]; + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + /** * pdc_chassis_info - Return chassis information. * @result: The return buffer. diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 05730a83895c..00aed082969b 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -216,8 +217,16 @@ void __init pdc_pdt_init(void) } for (i = 0; i < pdt_status.pdt_entries; i++) { + unsigned long addr; + report_mem_err(pdt_entry[i]); + addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK; + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && + addr >= initrd_start && addr < initrd_end) + pr_crit("CRITICAL: initrd possibly broken " + "due to bad memory!\n"); + /* mark memory page bad */ memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); } diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a45a67d526f8..30f92391a93e 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -146,7 +146,7 @@ void machine_power_off(void) /* prevent soft lockup/stalled CPU messages for endless loop. */ rcu_sysrq_start(); - lockup_detector_suspend(); + lockup_detector_soft_poweroff(); for (;;); } diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index a778bd3c107c..e120d63c1b28 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -317,7 +317,7 @@ void __init collect_boot_cpu_data(void) * * o Enable CPU profiling hooks. */ -int init_per_cpu(int cpunum) +int __init init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index dee6f9d6a153..f7d0c3b33d70 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include static char __initdata command_line[COMMAND_LINE_SIZE]; @@ -115,7 +117,6 @@ void __init dma_ops_init(void) } #endif -extern int init_per_cpu(int cpuid); extern void collect_boot_cpu_data(void); void __init setup_arch(char **cmdline_p) @@ -398,9 +399,8 @@ static int __init parisc_init(void) } arch_initcall(parisc_init); -void start_parisc(void) +void __init start_parisc(void) { - extern void start_kernel(void); extern void early_trap_init(void); int ret, cpunum; diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index 70aaabb8b3cb..9e0cb6a577d6 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -290,25 +290,25 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) if (to->si_code < 0) err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (to->si_code >> 16) { - case __SI_CHLD >> 16: + switch (siginfo_layout(to->si_signo, to->si_code)) { + case SIL_CHLD: err |= __get_user(to->si_utime, &from->si_utime); err |= __get_user(to->si_stime, &from->si_stime); err |= __get_user(to->si_status, &from->si_status); default: + case SIL_KILL: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __get_user(addr, &from->si_addr); to->si_addr = compat_ptr(addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __get_user(to->si_band, &from->si_band); err |= __get_user(to->si_fd, &from->si_fd); break; - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(to->si_int, &from->si_int); @@ -337,41 +337,40 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from) at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (from->si_code >> 16) { - case __SI_CHLD >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_CHLD: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); - default: + case SIL_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_FAULT >> 16: + case SIL_FAULT: addr = ptr_to_compat(from->si_addr); err |= __put_user(addr, &to->si_addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; - case __SI_RT >> 16: /* Not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; - case __SI_SYS >> 16: + case SIL_SYS: err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr); err |= __put_user(from->si_syscall, &to->si_syscall); err |= __put_user(from->si_arch, &to->si_arch); diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 63365106ea19..30c28ab14540 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -255,12 +255,11 @@ void arch_send_call_function_single_ipi(int cpu) static void __init smp_cpu_init(int cpunum) { - extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ - (void) init_per_cpu(cpunum); + init_per_cpu(cpunum); disable_sr_hashing(); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 991654c88eec..230333157fe3 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -817,7 +817,7 @@ void __init initialize_ivt(const void *iva) u32 check = 0; u32 *ivap; u32 *hpmcp; - u32 length; + u32 length, instr; if (strcmp((const char *)iva, "cows can fly")) panic("IVT invalid"); @@ -827,6 +827,14 @@ void __init initialize_ivt(const void *iva) for (i = 0; i < 8; i++) *ivap++ = 0; + /* + * Use PDC_INSTR firmware function to get instruction that invokes + * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of + * the PA 1.1 Firmware Architecture document. + */ + if (pdc_instr(&instr) == PDC_OK) + ivap[0] = instr; + /* Compute Checksum for HPMC handler */ length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 48dc7d4d20bb..caab39dfa95d 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -279,6 +280,17 @@ static void unwind_frame_regs(struct unwind_frame_info *info) info->prev_sp = sp - 64; info->prev_ip = 0; + + /* The stack is at the end inside the thread_union + * struct. If we reach data, we have reached the + * beginning of the stack and should stop unwinding. */ + if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && + info->prev_sp < ((unsigned long) task_thread_info(info->t) + + THREAD_SZ_ALGN)) { + info->prev_sp = 0; + break; + } + if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) break; info->prev_ip = tmp; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 5b101f6a5607..e247edbca68e 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -261,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, struct task_struct *tsk; struct mm_struct *mm; unsigned long acc_type; - int fault; + int fault = 0; unsigned int flags; if (faulthandler_disabled()) @@ -315,7 +316,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; - else if (fault & VM_FAULT_SIGBUS) + else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) goto bad_area; BUG(); } @@ -352,8 +354,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, if (user_mode(regs)) { struct siginfo si; - - show_signal_msg(regs, code, address, tsk, vma); + unsigned int lsb = 0; switch (code) { case 15: /* Data TLB miss fault/Data page fault */ @@ -386,6 +387,30 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; break; } + +#ifdef CONFIG_MEMORY_FAILURE + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n", + tsk->comm, tsk->pid, address); + si.si_signo = SIGBUS; + si.si_code = BUS_MCEERR_AR; + } +#endif + + /* + * Either small page or large page may be poisoned. + * In other words, VM_FAULT_HWPOISON_LARGE and + * VM_FAULT_HWPOISON are mutually exclusive. + */ + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + else if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + else + show_signal_msg(regs, code, address, tsk, vma); + si.si_addr_lsb = lsb; + si.si_errno = 0; si.si_addr = (void __user *) address; force_sig_info(si.si_signo, &si, current); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 81b0031f909f..809c468edab1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -85,6 +85,17 @@ config NMI_IPI depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR) default y +config PPC_WATCHDOG + bool + depends on HARDLOCKUP_DETECTOR + depends on HAVE_HARDLOCKUP_DETECTOR_ARCH + default y + help + This is a placeholder when the powerpc hardlockup detector + watchdog is selected (arch/powerpc/kernel/watchdog.c). It is + seleted via the generic lockup detector menu which is why we + have no standalone config option for it here. + config STACKTRACE_SUPPORT bool default y @@ -165,7 +176,7 @@ config PPC select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S_64 && !RELOCATABLE && !HIBERNATION) + select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select HAVE_CBPF_JIT if !PPC64 select HAVE_CONTEXT_TRACKING if PPC64 @@ -356,10 +367,6 @@ config PPC_ADV_DEBUG_DAC_RANGE depends on PPC_ADV_DEBUG_REGS && 44x default y -config PPC_EMULATE_SSTEP - bool - default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT - config ZONE_DMA32 bool default y if PPC64 @@ -394,7 +401,7 @@ config HUGETLB_PAGE_SIZE_VARIABLE config MATH_EMULATION bool "Math emulation" - depends on 4xx || 8xx || PPC_MPC832x || BOOKE + depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE ---help--- Some PowerPC chips designed for embedded applications do not have a floating-point unit and therefore do not implement the @@ -956,9 +963,9 @@ config PPC_PCI_CHOICE config PCI bool "PCI support" if PPC_PCI_CHOICE - default y if !40x && !CPM2 && !8xx && !PPC_83xx \ + default y if !40x && !CPM2 && !PPC_8xx && !PPC_83xx \ && !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON - default PCI_QSPAN if !4xx && !CPM2 && 8xx + default PCI_QSPAN if PPC_8xx select GENERIC_PCI_IOMAP help Find out whether your system includes a PCI bus. PCI is the name of @@ -974,7 +981,7 @@ config PCI_SYSCALL config PCI_QSPAN bool "QSpan PCI" - depends on !4xx && !CPM2 && 8xx + depends on PPC_8xx select PPC_I8259 help Say Y here if you have a system based on a Motorola 8xx-series @@ -1165,12 +1172,23 @@ config CONSISTENT_SIZE config PIN_TLB bool "Pinned Kernel TLBs (860 ONLY)" - depends on ADVANCED_OPTIONS && 8xx + depends on ADVANCED_OPTIONS && PPC_8xx && \ + !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX + +config PIN_TLB_DATA + bool "Pinned TLB for DATA" + depends on PIN_TLB + default y config PIN_TLB_IMMR bool "Pinned TLB for IMMR" depends on PIN_TLB default y + +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y endmenu if PPC64 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index e2b3e7a00c9e..1381693a4a51 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -250,7 +250,7 @@ KBUILD_AFLAGS += $(aflags-y) KBUILD_CFLAGS += $(cflags-y) head-y := arch/powerpc/kernel/head_$(BITS).o -head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o +head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o @@ -317,6 +317,10 @@ PHONY += ppc64le_defconfig ppc64le_defconfig: $(call merge_into_defconfig,ppc64_defconfig,le) +PHONY += powernv_be_defconfig +powernv_be_defconfig: + $(call merge_into_defconfig,powernv_defconfig,be) + PHONY += mpc85xx_defconfig mpc85xx_defconfig: $(call merge_into_defconfig,mpc85xx_basic_defconfig,\ diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c index 9d3bd4c45a24..f7da65169124 100644 --- a/arch/powerpc/boot/4xx.c +++ b/arch/powerpc/boot/4xx.c @@ -564,7 +564,7 @@ void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) fbdv = 16; cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */ opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */ - ppdv = ((pllmr & 0x00001800) >> 13) + 1; /* PLB:PCI */ + ppdv = ((pllmr & 0x00006000) >> 13) + 1; /* PLB:PCI */ epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */ udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1; diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 6f952fe1f084..c4e6fe35c075 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -107,17 +107,18 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ $(libfdt) libfdt-wrapper.c \ ns16550.c serial.c simple_alloc.c div64.S util.S \ elf_util.c $(zlib-y) devtree.c stdlib.c \ - oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \ - uartlite.c mpc52xx-psc.c opal.c + oflib.c ofconsole.c cuboot.c cpm-serial.c \ + uartlite.c opal.c +src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S endif src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c -src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c +src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c -src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c +src-wlib-$(CONFIG_EMBEDDED6xx) += mpsc.c mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c src-plat-y := of.c epapr.c src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ @@ -132,7 +133,7 @@ src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ treeboot-iss4xx.c treeboot-currituck.c \ treeboot-akebono.c \ simpleboot.c fixed-head.S virtex.c -src-plat-$(CONFIG_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c +src-plat-$(CONFIG_PPC_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c src-plat-$(CONFIG_PPC_83xx) += cuboot-83xx.c fixed-head.S redboot-83xx.c diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index 12866ccb5694..dcf2f15e6797 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -26,17 +26,17 @@ _zimage_start_opd: #ifdef __powerpc64__ .balign 8 -p_start: .llong _start -p_etext: .llong _etext -p_bss_start: .llong __bss_start -p_end: .llong _end +p_start: .8byte _start +p_etext: .8byte _etext +p_bss_start: .8byte __bss_start +p_end: .8byte _end -p_toc: .llong __toc_start + 0x8000 - p_base -p_dyn: .llong __dynamic_start - p_base -p_rela: .llong __rela_dyn_start - p_base -p_prom: .llong 0 +p_toc: .8byte __toc_start + 0x8000 - p_base +p_dyn: .8byte __dynamic_start - p_base +p_rela: .8byte __rela_dyn_start - p_base +p_prom: .8byte 0 .weak _platform_stack_top -p_pstack: .llong _platform_stack_top +p_pstack: .8byte _platform_stack_top #else p_start: .long _start p_etext: .long _etext diff --git a/arch/powerpc/boot/dts/fsp2.dts b/arch/powerpc/boot/dts/fsp2.dts index 475953ada707..f10a64aeb83b 100644 --- a/arch/powerpc/boot/dts/fsp2.dts +++ b/arch/powerpc/boot/dts/fsp2.dts @@ -52,6 +52,7 @@ cuboot */ clocks { mmc_clk: mmc_clk { compatible = "fixed-clock"; + #clock-cells = <0>; clock-frequency = <50000000>; clock-output-names = "mmc_clk"; }; @@ -359,20 +360,6 @@ UIC2_15: uic2_15 { interrupts = <31 0x4 15 0x84>; }; - mmc0: sdhci@020c0000 { - compatible = "st,sdhci-stih407", "st,sdhci"; - status = "disabled"; - reg = <0x020c0000 0x20000>; - reg-names = "mmc"; - interrupt-parent = <&UIC1_3>; - interrupts = <21 0x4 22 0x4>; - interrupt-names = "mmcirq"; - pinctrl-names = "default"; - pinctrl-0 = <>; - clock-names = "mmc"; - clocks = <&mmc_clk>; - }; - plb6 { compatible = "ibm,plb6"; #address-cells = <2>; @@ -501,6 +488,24 @@ MAL1: mcmal1 { /*RXDE*/ 4 &UIC1_2 13 0x4>; }; + mmc0: mmc@20c0000 { + compatible = "st,sdhci-stih407", "st,sdhci"; + reg = <0x020c0000 0x20000>; + reg-names = "mmc"; + interrupts = <21 0x4>; + interrupt-parent = <&UIC1_3>; + interrupt-names = "mmcirq"; + pinctrl-names = "default"; + pinctrl-0 = <>; + clock-names = "mmc"; + clocks = <&mmc_clk>; + bus-width = <4>; + non-removable; + sd-uhs-sdr50; + sd-uhs-sdr104; + sd-uhs-ddr50; + }; + opb { compatible = "ibm,opb"; #address-cells = <1>; diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h index 68e388ee94fe..c63299f9fdd9 100644 --- a/arch/powerpc/boot/ppc_asm.h +++ b/arch/powerpc/boot/ppc_asm.h @@ -80,4 +80,12 @@ .long 0xa6037b7d; /* mtsrr1 r11 */ \ .long 0x2400004c /* rfid */ +#ifdef CONFIG_PPC_8xx +#define MFTBL(dest) mftb dest +#define MFTBU(dest) mftbu dest +#else +#define MFTBL(dest) mfspr dest, SPRN_TBRL +#define MFTBU(dest) mfspr dest, SPRN_TBRU +#endif + #endif /* _PPC64_PPC_ASM_H */ diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index e04c1e4063ae..7b5c02b1afd0 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -120,15 +120,19 @@ int serial_console_init(void) if (dt_is_compatible(devp, "ns16550") || dt_is_compatible(devp, "pnpPNP,501")) rc = ns16550_console_init(devp, &serial_cd); +#ifdef CONFIG_EMBEDDED6xx else if (dt_is_compatible(devp, "marvell,mv64360-mpsc")) rc = mpsc_console_init(devp, &serial_cd); +#endif else if (dt_is_compatible(devp, "fsl,cpm1-scc-uart") || dt_is_compatible(devp, "fsl,cpm1-smc-uart") || dt_is_compatible(devp, "fsl,cpm2-scc-uart") || dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); +#ifdef CONFIG_PPC_MPC52XX else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); +#endif else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") || dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a")) rc = uartlite_console_init(devp, &serial_cd); diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 243b8497d58b..ec069177d942 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -71,32 +71,18 @@ udelay: add r4,r4,r5 addi r4,r4,-1 divw r4,r4,r5 /* BUS ticks */ -#ifdef CONFIG_8xx -1: mftbu r5 - mftb r6 - mftbu r7 -#else -1: mfspr r5, SPRN_TBRU - mfspr r6, SPRN_TBRL - mfspr r7, SPRN_TBRU -#endif +1: MFTBU(r5) + MFTBL(r6) + MFTBU(r7) cmpw 0,r5,r7 bne 1b /* Get [synced] base time */ addc r9,r6,r4 /* Compute end time */ addze r8,r5 -#ifdef CONFIG_8xx -2: mftbu r5 -#else -2: mfspr r5, SPRN_TBRU -#endif +2: MFTBU(r5) cmpw 0,r5,r8 blt 2b bgt 3f -#ifdef CONFIG_8xx - mftb r6 -#else - mfspr r6, SPRN_TBRL -#endif + MFTBL(r6) cmpw 0,r6,r9 blt 2b 3: blr diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig index 3438ed99c088..e57344c3b0d7 100644 --- a/arch/powerpc/configs/40x/acadia_defconfig +++ b/arch/powerpc/configs/40x/acadia_defconfig @@ -64,4 +64,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig index 36c44c0b560c..0f66f8a87be8 100644 --- a/arch/powerpc/configs/40x/ep405_defconfig +++ b/arch/powerpc/configs/40x/ep405_defconfig @@ -64,4 +64,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig index ad2156c6e2fc..b5cc7426c21f 100644 --- a/arch/powerpc/configs/40x/kilauea_defconfig +++ b/arch/powerpc/configs/40x/kilauea_defconfig @@ -72,4 +72,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig index 28adb782ec51..caab658d1da1 100644 --- a/arch/powerpc/configs/40x/klondike_defconfig +++ b/arch/powerpc/configs/40x/klondike_defconfig @@ -26,7 +26,6 @@ CONFIG_SCSI_SAS_ATTRS=y # CONFIG_VT is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig index a00f434c4d47..e0b1489b7c7b 100644 --- a/arch/powerpc/configs/40x/makalu_defconfig +++ b/arch/powerpc/configs/40x/makalu_defconfig @@ -62,4 +62,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig index e500e6a12b3e..aac06d2ad01a 100644 --- a/arch/powerpc/configs/40x/obs600_defconfig +++ b/arch/powerpc/configs/40x/obs600_defconfig @@ -72,4 +72,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig index 65dc084a154c..a2b2770eee8f 100644 --- a/arch/powerpc/configs/40x/virtex_defconfig +++ b/arch/powerpc/configs/40x/virtex_defconfig @@ -41,9 +41,9 @@ CONFIG_NETDEVICES=y CONFIG_SERIO_XILINX_XPS_PS2=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_XILINX_HWICAP=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y @@ -74,4 +74,3 @@ CONFIG_FONT_8x16=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_KERNEL=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig index 567f99bd64a3..6faa03cd661c 100644 --- a/arch/powerpc/configs/40x/walnut_defconfig +++ b/arch/powerpc/configs/40x/walnut_defconfig @@ -57,4 +57,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig index 143b2fbddb46..9fcd361607e2 100644 --- a/arch/powerpc/configs/44x/akebono_defconfig +++ b/arch/powerpc/configs/44x/akebono_defconfig @@ -123,7 +123,6 @@ CONFIG_NLS_DEFAULT="n" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_XMON=y @@ -135,5 +134,4 @@ CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1_PPC=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig index 477d99fefd9a..6f3a6ecc81e7 100644 --- a/arch/powerpc/configs/44x/bamboo_defconfig +++ b/arch/powerpc/configs/44x/bamboo_defconfig @@ -55,4 +55,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig index 3799a26de6f4..5f1df5fe4453 100644 --- a/arch/powerpc/configs/44x/currituck_defconfig +++ b/arch/powerpc/configs/44x/currituck_defconfig @@ -81,7 +81,6 @@ CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NLS_DEFAULT="n" CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_XMON=y @@ -94,5 +93,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig index c265f54ab9e5..e2b6578993d5 100644 --- a/arch/powerpc/configs/44x/ebony_defconfig +++ b/arch/powerpc/configs/44x/ebony_defconfig @@ -59,5 +59,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig index bb6bd6d90821..f6dc23fef683 100644 --- a/arch/powerpc/configs/44x/eiger_defconfig +++ b/arch/powerpc/configs/44x/eiger_defconfig @@ -84,18 +84,14 @@ CONFIG_CRYPTO_CCM=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_ARC4=y CONFIG_CRYPTO_BLOWFISH=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig index e8e6a6999852..bae6b26bcfba 100644 --- a/arch/powerpc/configs/44x/fsp2_defconfig +++ b/arch/powerpc/configs/44x/fsp2_defconfig @@ -92,8 +92,10 @@ CONFIG_MMC_DEBUG=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y +CONFIG_MMC_SDHCI_ST=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_M41T80=y +CONFIG_RESET_CONTROLLER=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -115,7 +117,6 @@ CONFIG_PRINTK_TIME=y CONFIG_MESSAGE_LOGLEVEL_DEFAULT=3 CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_CRYPTO_CBC=y diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig index 060f2edddb71..4453a4590b1a 100644 --- a/arch/powerpc/configs/44x/icon_defconfig +++ b/arch/powerpc/configs/44x/icon_defconfig @@ -47,8 +47,6 @@ CONFIG_FUSION_LOGGING=y CONFIG_NETDEVICES=y CONFIG_IBM_EMAC=y # CONFIG_WLAN is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 # CONFIG_MOUSE_PS2_ALPS is not set # CONFIG_MOUSE_PS2_LOGIPS2PP is not set # CONFIG_MOUSE_PS2_SYNAPTICS is not set @@ -94,4 +92,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig index 115a6b2be18b..d24bfa6ecd62 100644 --- a/arch/powerpc/configs/44x/iss476-smp_defconfig +++ b/arch/powerpc/configs/44x/iss476-smp_defconfig @@ -63,7 +63,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_PPC_EARLY_DEBUG=y @@ -72,5 +71,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig index b999048c4ae6..5d3f685a7af8 100644 --- a/arch/powerpc/configs/44x/katmai_defconfig +++ b/arch/powerpc/configs/44x/katmai_defconfig @@ -60,4 +60,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig index b8c9ee45d0a2..7b8355a5698d 100644 --- a/arch/powerpc/configs/44x/rainier_defconfig +++ b/arch/powerpc/configs/44x/rainier_defconfig @@ -66,4 +66,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig index a4bb048448da..918cfb63f0c8 100644 --- a/arch/powerpc/configs/44x/redwood_defconfig +++ b/arch/powerpc/configs/44x/redwood_defconfig @@ -83,18 +83,14 @@ CONFIG_CRYPTO_CCM=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_ARC4=y CONFIG_CRYPTO_BLOWFISH=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig index b3792fd8111d..1e04122912f3 100644 --- a/arch/powerpc/configs/44x/sequoia_defconfig +++ b/arch/powerpc/configs/44x/sequoia_defconfig @@ -67,4 +67,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig index ff6f86241418..42cc7b4ed95f 100644 --- a/arch/powerpc/configs/44x/taishan_defconfig +++ b/arch/powerpc/configs/44x/taishan_defconfig @@ -61,4 +61,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig index ce052064bcbb..99cc3dc02df1 100644 --- a/arch/powerpc/configs/44x/virtex5_defconfig +++ b/arch/powerpc/configs/44x/virtex5_defconfig @@ -40,9 +40,9 @@ CONFIG_NETDEVICES=y CONFIG_SERIO_XILINX_XPS_PS2=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_XILINX_HWICAP=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y @@ -73,4 +73,3 @@ CONFIG_FONT_8x16=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_KERNEL=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index ab932488e68b..b5c866073efd 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig @@ -97,4 +97,3 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index c1faac800806..73948e88ac82 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig @@ -77,4 +77,3 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index 9493b02ac660..6fc7f786c83c 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig @@ -14,6 +14,7 @@ CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y CONFIG_PPC_LITE5200=y # CONFIG_PPC_PMAC is not set +CONFIG_GEN_RTC=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -44,7 +45,6 @@ CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -62,4 +62,3 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index fe8126bc1655..ae2a1f74103b 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig @@ -41,16 +41,16 @@ CONFIG_ATA=y CONFIG_PATA_MPC52xx=y CONFIG_NETDEVICES=y CONFIG_FEC_MPC52xx=y -CONFIG_MARVELL_PHY=y -CONFIG_DAVICOM_PHY=y -CONFIG_QSEMI_PHY=y -CONFIG_LXT_PHY=y -CONFIG_CICADA_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_SMSC_PHY=y -CONFIG_BROADCOM_PHY=y -CONFIG_ICPLUS_PHY=y CONFIG_MDIO_BITBANG=y +CONFIG_BROADCOM_PHY=y +CONFIG_CICADA_PHY=y +CONFIG_DAVICOM_PHY=y +CONFIG_ICPLUS_PHY=y +CONFIG_LXT_PHY=y +CONFIG_MARVELL_PHY=y +CONFIG_QSEMI_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_VITESSE_PHY=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -90,4 +90,3 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index b8b316b884aa..0777e6efd22d 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig @@ -48,7 +48,6 @@ CONFIG_PATA_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_FEC_MPC52xx=y CONFIG_LXT_PHY=y -CONFIG_FIXED_PHY=y CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 @@ -92,4 +91,3 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig index b60cac088a7b..dd884df32dfd 100644 --- a/arch/powerpc/configs/83xx/asp8347_defconfig +++ b/arch/powerpc/configs/83xx/asp8347_defconfig @@ -42,7 +42,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_NETDEVICES=y CONFIG_GIANFAR=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -71,4 +70,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig index 9547dcdd6489..d21b5cb365f2 100644 --- a/arch/powerpc/configs/83xx/kmeter1_defconfig +++ b/arch/powerpc/configs/83xx/kmeter1_defconfig @@ -55,7 +55,6 @@ CONFIG_HDLC=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HW_RANDOM=y diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig index 80aa844c1428..1f69f4edf074 100644 --- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig @@ -48,8 +48,6 @@ CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y CONFIG_CICADA_PHY=y -CONFIG_FIXED_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -87,4 +85,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig index d89d13bc6901..797fc3ffddee 100644 --- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig @@ -47,7 +47,6 @@ CONFIG_MD_RAID1=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -85,4 +84,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig index e789518a2881..4f914906ee4b 100644 --- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig @@ -14,7 +14,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC832x_MDS=y -CONFIG_QUICC_ENGINE=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -36,7 +35,6 @@ CONFIG_SCSI=y CONFIG_NETDEVICES=y CONFIG_UCC_GETH=y CONFIG_DAVICOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -50,6 +48,7 @@ CONFIG_I2C_MPC=y CONFIG_WATCHDOG=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1374=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -59,4 +58,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig index 917a49ca2bd1..a484eb8401e8 100644 --- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig @@ -14,7 +14,7 @@ CONFIG_LDM_PARTITION=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC832x_RDB=y -CONFIG_QUICC_ENGINE=y +CONFIG_GEN_RTC=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -38,7 +38,6 @@ CONFIG_NETDEVICES=y CONFIG_UCC_GETH=y CONFIG_E1000=y CONFIG_ICPLUS_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -46,7 +45,6 @@ CONFIG_ICPLUS_PHY=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -62,6 +60,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_MMC_SPI=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y @@ -78,4 +77,3 @@ CONFIG_NLS_ISO8859_1=y CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig index 00f636e95cc8..37f4d93b3f81 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig @@ -49,7 +49,6 @@ CONFIG_MD_RAID1=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_CICADA_PHY=y -CONFIG_FIXED_PHY=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -84,4 +83,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig index a539d44d1dba..7adb6708a761 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig @@ -75,4 +75,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig index 9f0ddc830c82..d7ce3551529d 100644 --- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig @@ -35,7 +35,6 @@ CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y CONFIG_MARVELL_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -58,4 +57,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig index ceed4c1f0ab5..92134cee3f37 100644 --- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig @@ -14,7 +14,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC836x_MDS=y -CONFIG_QUICC_ENGINE=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -41,7 +40,6 @@ CONFIG_SCSI=y CONFIG_NETDEVICES=y CONFIG_UCC_GETH=y CONFIG_MARVELL_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -55,6 +53,7 @@ CONFIG_I2C_MPC=y CONFIG_WATCHDOG=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1374=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -64,4 +63,3 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig index a6819bf3ef5e..97f7ea5f205f 100644 --- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig @@ -12,7 +12,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC836x_RDK=y -CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_PCI=y CONFIG_NET=y @@ -39,11 +38,9 @@ CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_NETDEVICES=y CONFIG_UCC_GETH=y CONFIG_BROADCOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_QE=y @@ -63,6 +60,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_USB_SUPPORT is not set +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -72,4 +70,3 @@ CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_PPC_EARLY_DEBUG=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig index 4bd1992e4d98..ee7510a33d06 100644 --- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig @@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC837x_MDS=y +CONFIG_GEN_RTC=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -35,7 +36,6 @@ CONFIG_SATA_FSL=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -43,7 +43,6 @@ CONFIG_MARVELL_PHY=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -58,4 +57,3 @@ CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index 2d4bb63882b8..8966a9af4230 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig @@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_MPC837x_RDB=y +CONFIG_GEN_RTC=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -41,9 +42,7 @@ CONFIG_MD_RAID456=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y -CONFIG_FIXED_PHY=y CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -51,7 +50,6 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -86,4 +84,3 @@ CONFIG_CRC_T10DIF=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig index b3380dbd1925..7d74699334da 100644 --- a/arch/powerpc/configs/83xx/sbc834x_defconfig +++ b/arch/powerpc/configs/83xx/sbc834x_defconfig @@ -11,6 +11,7 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_SBC834x=y +CONFIG_GEN_RTC=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -41,7 +42,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_BROADCOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -52,7 +52,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -72,5 +71,4 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index a917f7afb4f9..dd98f43b2fb8 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -22,7 +22,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_GE_IMP3A=y -CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_CPM2=y CONFIG_HIGHMEM=y @@ -161,6 +160,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RX8581=y CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -233,5 +233,4 @@ CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_TALITOS=y diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig index bd814dfb0bbd..9ce6f48cfb61 100644 --- a/arch/powerpc/configs/85xx/ksi8560_defconfig +++ b/arch/powerpc/configs/85xx/ksi8560_defconfig @@ -8,6 +8,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_KSI8560=y CONFIG_CPM2=y +CONFIG_GEN_RTC=y CONFIG_HIGHMEM=y CONFIG_BINFMT_MISC=y CONFIG_MATH_EMULATION=y @@ -39,14 +40,12 @@ CONFIG_FS_ENET=y CONFIG_FS_ENET_MDIO_FCC=y CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_VT is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -57,4 +56,3 @@ CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig index 32af10def641..5fbc3f904046 100644 --- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig +++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_MPC8540_ADS=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y CONFIG_MATH_EMULATION=y # CONFIG_SECCOMP is not set @@ -30,7 +31,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_NETDEVICES=y CONFIG_GIANFAR=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -38,7 +38,6 @@ CONFIG_GIANFAR=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -47,4 +46,3 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig index a52b2170ee33..ff981d7905c7 100644 --- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig +++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig @@ -7,6 +7,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_MPC8560_ADS=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y CONFIG_MATH_EMULATION=y # CONFIG_SECCOMP is not set @@ -32,16 +33,14 @@ CONFIG_FS_ENET=y # CONFIG_FS_ENET_HAS_SCC is not set CONFIG_GIANFAR=y CONFIG_E1000=y -CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_MARVELL_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_VT is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -50,4 +49,3 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig index 002bb48abaa3..974f0706d777 100644 --- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig +++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_MPC85xx_CDS=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y CONFIG_MATH_EMULATION=y # CONFIG_SECCOMP is not set @@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E1000=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -43,7 +43,6 @@ CONFIG_E1000=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -52,4 +51,3 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig index 97ae02377cf3..7e3e84a842e4 100644 --- a/arch/powerpc/configs/85xx/sbc8548_defconfig +++ b/arch/powerpc/configs/85xx/sbc8548_defconfig @@ -6,6 +6,7 @@ CONFIG_EXPERT=y CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_SBC8548=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y CONFIG_MATH_EMULATION=y # CONFIG_SECCOMP is not set @@ -36,7 +37,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_BROADCOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -44,10 +44,8 @@ CONFIG_BROADCOM_PHY=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y # CONFIG_USB_SUPPORT is not set CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig index 13579cb30539..6106fadbbd8b 100644 --- a/arch/powerpc/configs/85xx/socrates_defconfig +++ b/arch/powerpc/configs/85xx/socrates_defconfig @@ -42,8 +42,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=800 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -86,4 +84,3 @@ CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_FONTS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig index 384926f3ce1d..5b9cc01b9098 100644 --- a/arch/powerpc/configs/85xx/stx_gp3_defconfig +++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig @@ -39,8 +39,6 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=m # CONFIG_VT is not set @@ -68,4 +66,3 @@ CONFIG_CRC_T10DIF=m CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_BDI_SWITCH=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig index 908f3885f4a5..98982a0e82d8 100644 --- a/arch/powerpc/configs/85xx/tqm8540_defconfig +++ b/arch/powerpc/configs/85xx/tqm8540_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_TQM8540=y +CONFIG_GEN_RTC=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -35,14 +36,12 @@ CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_VT is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -56,4 +55,3 @@ CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig index f47e57610b7c..a6e21db1dafe 100644 --- a/arch/powerpc/configs/85xx/tqm8541_defconfig +++ b/arch/powerpc/configs/85xx/tqm8541_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_TQM8541=y +CONFIG_GEN_RTC=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig index 42f5d0a7698e..2697e4e8a761 100644 --- a/arch/powerpc/configs/85xx/tqm8548_defconfig +++ b/arch/powerpc/configs/85xx/tqm8548_defconfig @@ -43,7 +43,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_NETDEVICES=y CONFIG_GIANFAR=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -66,4 +65,3 @@ CONFIG_ROOT_NFS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig index 71552b7929cd..ca1de3979474 100644 --- a/arch/powerpc/configs/85xx/tqm8555_defconfig +++ b/arch/powerpc/configs/85xx/tqm8555_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_TQM8555=y +CONFIG_GEN_RTC=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig index 25aac973d6d7..ca3b8c8ef30f 100644 --- a/arch/powerpc/configs/85xx/tqm8560_defconfig +++ b/arch/powerpc/configs/85xx/tqm8560_defconfig @@ -9,6 +9,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set CONFIG_TQM8560=y +CONFIG_GEN_RTC=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y CONFIG_NET=y @@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y @@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig index 72900b84d3e0..6531139a8a8d 100644 --- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig +++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig @@ -54,7 +54,6 @@ CONFIG_IP_PIMSM_V2=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y CONFIG_MTD_REDBOOT_PARTS=y @@ -86,7 +85,6 @@ CONFIG_DUMMY=y CONFIG_GIANFAR=y CONFIG_E1000=y CONFIG_BROADCOM_PHY=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_SERIO_LIBPS2=y @@ -107,8 +105,8 @@ CONFIG_SENSORS_LM90=y CONFIG_WATCHDOG=y CONFIG_USB=y CONFIG_USB_MON=y -CONFIG_USB_ISP1760=y CONFIG_USB_STORAGE=y +CONFIG_USB_ISP1760=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_PCA955X=y @@ -143,4 +141,3 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig index 6a3f825452e9..935ea3ade7de 100644 --- a/arch/powerpc/configs/adder875_defconfig +++ b/arch/powerpc/configs/adder875_defconfig @@ -12,6 +12,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set CONFIG_PPC_ADDER875=y CONFIG_8xx_COPYBACK=y +CONFIG_GEN_RTC=y CONFIG_HZ_1000=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -41,7 +42,6 @@ CONFIG_DAVICOM_PHY=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y # CONFIG_HWMON is not set CONFIG_THERMAL=y # CONFIG_USB_SUPPORT is not set diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig index 8d3e3c41258d..12f397d403c6 100644 --- a/arch/powerpc/configs/amigaone_defconfig +++ b/arch/powerpc/configs/amigaone_defconfig @@ -45,7 +45,6 @@ CONFIG_PARPORT_PC_FIFO=y CONFIG_BLK_DEV_FD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -120,5 +119,4 @@ CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/be.config b/arch/powerpc/configs/be.config new file mode 100644 index 000000000000..c5cdc99a6530 --- /dev/null +++ b/arch/powerpc/configs/be.config @@ -0,0 +1 @@ +CONFIG_CPU_BIG_ENDIAN=y diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 7c9d95370150..f1552af9eecc 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig @@ -27,6 +27,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_GEN_RTC=y CONFIG_HIGHMEM=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BINFMT_MISC=y @@ -197,7 +198,6 @@ CONFIG_TUN=m # CONFIG_ATM_DRIVERS is not set CONFIG_MV643XX_ETH=y CONFIG_VITESSE_PHY=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -209,7 +209,6 @@ CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_MPSC=y CONFIG_SERIAL_MPSC_CONSOLE=y CONFIG_NVRAM=m -CONFIG_GEN_RTC=m CONFIG_RAW_DRIVER=y CONFIG_MAX_RAW_DEVS=8192 CONFIG_I2C=m @@ -390,7 +389,6 @@ CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1=y @@ -402,4 +400,3 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index aa564599e368..560a93a84efe 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -4,7 +4,6 @@ CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_SYSVIPC=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -34,10 +33,10 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=m CONFIG_IRQ_ALL_CPUS=y CONFIG_NUMA=y -CONFIG_MEMORY_HOTREMOVE=y CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y CONFIG_PCIEPORTBUS=y @@ -53,7 +52,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=y CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_IPV6=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m @@ -141,7 +139,6 @@ CONFIG_SKY2=m CONFIG_GELIC_NET=m CONFIG_GELIC_WIRELESS=y CONFIG_SPIDER_NET=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO_I8042 is not set @@ -149,8 +146,6 @@ CONFIG_SPIDER_NET=y CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_TXX9_NR_UARTS=2 -CONFIG_SERIAL_TXX9_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_HVC_RTAS=y CONFIG_IPMI_HANDLER=m @@ -159,7 +154,6 @@ CONFIG_IPMI_SI=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set @@ -207,7 +201,6 @@ CONFIG_NLS_ISO8859_13=m CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index 1f6f90cd8aff..a203b1cf67d3 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -16,6 +16,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y CONFIG_MAC_PARTITION=y # CONFIG_PPC_PMAC is not set +CONFIG_GEN_RTC=y CONFIG_HIGHMEM=y CONFIG_BINFMT_MISC=y CONFIG_IRQ_ALL_CPUS=y @@ -79,7 +80,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set CONFIG_NVRAM=y -CONFIG_GEN_RTC=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FIRMWARE_EDID=y @@ -124,5 +124,4 @@ CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig index 3403b85f9d81..2e6c8a45ae88 100644 --- a/arch/powerpc/configs/ep8248e_defconfig +++ b/arch/powerpc/configs/ep8248e_defconfig @@ -70,5 +70,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig index 95411aeeeb8d..7cb590e8f8fd 100644 --- a/arch/powerpc/configs/ep88xc_defconfig +++ b/arch/powerpc/configs/ep88xc_defconfig @@ -14,6 +14,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set CONFIG_PPC_EP88XC=y CONFIG_8xx_COPYBACK=y +CONFIG_GEN_RTC=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -45,7 +46,6 @@ CONFIG_LXT_PHY=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set # CONFIG_DNOTIFY is not set diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index e18f2e06553f..063817fee61c 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -4,7 +4,6 @@ CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -25,6 +24,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_PMAC64=y +CONFIG_GEN_RTC=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_PCI_MSI=y @@ -115,7 +115,6 @@ CONFIG_USB_USBNET=m # CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -123,7 +122,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_AGP=m @@ -140,10 +138,11 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m CONFIG_SND_AOA_FABRIC_LAYOUT=m @@ -213,20 +212,20 @@ CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_APPLEDISPLAY=m -CONFIG_FS_DAX=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=y CONFIG_REISERFS_FS_XATTR=y CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y +CONFIG_FS_DAX=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y @@ -254,14 +253,12 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y CONFIG_BOOTX_TEXT=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y @@ -276,5 +273,4 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index c0eec4a5df4e..805b0f87653c 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig @@ -45,7 +45,6 @@ CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_NETDEVICES=y # CONFIG_WLAN is not set CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -54,7 +53,6 @@ CONFIG_INPUT_JOYSTICK=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y @@ -66,11 +64,12 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_VERBOSE_PROCFS is not set +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQUENCER_OSS=y # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=y diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig index e56e80090529..71d8d2430b6c 100644 --- a/arch/powerpc/configs/holly_defconfig +++ b/arch/powerpc/configs/holly_defconfig @@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_EMBEDDED6xx=y CONFIG_PPC_HOLLY=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,115200" @@ -37,7 +38,6 @@ CONFIG_NETDEVICES=y CONFIG_VORTEX=y CONFIG_TSI108_ETH=y CONFIG_PHYLIB=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -49,7 +49,6 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index b413c19d7031..477794c41d50 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig @@ -26,7 +26,6 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m -CONFIG_NF_CT_PROTO_SCTP=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -79,7 +78,6 @@ CONFIG_NET_TULIP=y CONFIG_TULIP=y CONFIG_TULIP_MMIO=y CONFIG_R8169=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -142,4 +140,3 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index c4018179e219..078cdb427fc9 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -3,7 +3,6 @@ CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -24,6 +23,7 @@ CONFIG_MAC_PARTITION=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_MAPLE=y CONFIG_UDBG_RTAS_CONSOLE=y +CONFIG_GEN_RTC=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_PCI_MSI=y @@ -53,9 +53,6 @@ CONFIG_AMD8111_ETH=y CONFIG_TIGON3=y CONFIG_E1000=y CONFIG_USB_PEGASUS=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200 # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -63,7 +60,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HVC_RTAS=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_AMD8111=y @@ -100,8 +96,8 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49W=y CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y CONFIG_USB_SERIAL_TI=m CONFIG_EXT2_FS=y -CONFIG_FS_DAX=y CONFIG_EXT4_FS=y +CONFIG_FS_DAX=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y @@ -127,5 +123,4 @@ CONFIG_BOOTX_TEXT=y CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig index 197acaa026eb..5d5f08e5b8d9 100644 --- a/arch/powerpc/configs/mgcoge_defconfig +++ b/arch/powerpc/configs/mgcoge_defconfig @@ -46,7 +46,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y CONFIG_FS_ENET=y CONFIG_FS_ENET_MDIO_FCC=y -CONFIG_FIXED_PHY=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -83,5 +82,4 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_BDI_SWITCH=y CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 0b4854cf26cb..10be5773ad5d 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -12,6 +12,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set # CONFIG_PPC_CHRP is not set CONFIG_PPC_MPC512x=y +CONFIG_MPC512x_LPBFIFO=y CONFIG_MPC5121_ADS=y CONFIG_MPC512x_GENERIC=y CONFIG_PDM360NG=y @@ -61,25 +62,22 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y CONFIG_NETDEVICES=y CONFIG_FS_ENET=y -CONFIG_MARVELL_PHY=y -CONFIG_DAVICOM_PHY=y -CONFIG_QSEMI_PHY=y -CONFIG_LXT_PHY=y -CONFIG_CICADA_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_SMSC_PHY=y -CONFIG_BROADCOM_PHY=y -CONFIG_ICPLUS_PHY=y -CONFIG_REALTEK_PHY=y -CONFIG_NATIONAL_PHY=y -CONFIG_STE10XP=y -CONFIG_LSI_ET1011C_PHY=y -CONFIG_FIXED_PHY=y CONFIG_MDIO_BITBANG=y +CONFIG_BROADCOM_PHY=y +CONFIG_CICADA_PHY=y +CONFIG_DAVICOM_PHY=y +CONFIG_ICPLUS_PHY=y +CONFIG_LSI_ET1011C_PHY=y +CONFIG_LXT_PHY=y +CONFIG_MARVELL_PHY=y +CONFIG_NATIONAL_PHY=y +CONFIG_QSEMI_PHY=y +CONFIG_REALTEK_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_STE10XP=y +CONFIG_VITESSE_PHY=y # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 @@ -111,10 +109,9 @@ CONFIG_RTC_DRV_M41T80=y CONFIG_RTC_DRV_MPC5121=y CONFIG_DMADEVICES=y CONFIG_MPC512X_DMA=y -CONFIG_MPC512x_LPBFIFO=y -CONFIG_FS_DAX=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y +CONFIG_FS_DAX=y # CONFIG_DNOTIFY is not set CONFIG_VFAT_FS=y CONFIG_TMPFS=y @@ -126,5 +123,4 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 88336d0df0d6..7a2b2aa37def 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -50,7 +50,6 @@ CONFIG_NETDEVICES=y CONFIG_FEC_MPC52xx=y CONFIG_AMD_PHY=y CONFIG_LXT_PHY=y -CONFIG_FIXED_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -71,12 +70,10 @@ CONFIG_SENSORS_LM87=m CONFIG_WATCHDOG=y CONFIG_MFD_SM501=m CONFIG_DRM=y -CONFIG_FB=y CONFIG_FB_FOREIGN_ENDIAN=y CONFIG_FB_RADEON=y CONFIG_FB_SM501=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y @@ -130,4 +127,3 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig index d933326b4cf9..4b14c02b437c 100644 --- a/arch/powerpc/configs/mpc7448_hpc2_defconfig +++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig @@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_EMBEDDED6xx=y CONFIG_MPC7448HPC2=y +CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -38,7 +39,6 @@ CONFIG_8139TOO=y # CONFIG_8139TOO_PIO is not set CONFIG_TSI108_ETH=y CONFIG_PHYLIB=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -46,7 +46,6 @@ CONFIG_PHYLIB=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -54,4 +53,3 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig index 4cb0f617c0d6..b1e88b64536b 100644 --- a/arch/powerpc/configs/mpc8272_ads_defconfig +++ b/arch/powerpc/configs/mpc8272_ads_defconfig @@ -77,5 +77,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 6574477fd726..d1b82035d35f 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -21,7 +21,6 @@ CONFIG_MPC837x_MDS=y CONFIG_MPC837x_RDB=y CONFIG_SBC834x=y CONFIG_ASP834x=y -CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_MATH_EMULATION=y CONFIG_PCI=y @@ -60,13 +59,11 @@ CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y CONFIG_UCC_GETH=y CONFIG_GIANFAR=y -CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y -CONFIG_VITESSE_PHY=y CONFIG_ICPLUS_PHY=y -CONFIG_FIXED_PHY=y +CONFIG_MARVELL_PHY=y +CONFIG_VITESSE_PHY=y CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -99,6 +96,7 @@ CONFIG_USB_EHCI_FSL=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_DS1374=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y @@ -109,7 +107,5 @@ CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_TALITOS=y diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig index 998454471a48..f1f176c29fa3 100644 --- a/arch/powerpc/configs/mpc866_ads_defconfig +++ b/arch/powerpc/configs/mpc866_ads_defconfig @@ -14,6 +14,7 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_MPC86XADS=y CONFIG_8xx_COPYBACK=y CONFIG_8xx_CPU6=y +CONFIG_GEN_RTC=y CONFIG_HZ_1000=y CONFIG_MATH_EMULATION=y # CONFIG_SECCOMP is not set @@ -28,12 +29,10 @@ CONFIG_SYN_COOKIES=y CONFIG_BLK_DEV_LOOP=y CONFIG_NETDEVICES=y CONFIG_FS_ENET=y -CONFIG_FIXED_PHY=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT4_FS=y @@ -43,4 +42,3 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_CRC_CCITT=y CONFIG_CRC32_SLICEBY4=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/mpc86xx_basic_defconfig b/arch/powerpc/configs/mpc86xx_basic_defconfig index 3283f0586e11..67bd1fa036ee 100644 --- a/arch/powerpc/configs/mpc86xx_basic_defconfig +++ b/arch/powerpc/configs/mpc86xx_basic_defconfig @@ -1,11 +1,11 @@ -CONFIG_HIGHMEM=y -CONFIG_KEXEC=y CONFIG_PPC_86xx=y -CONFIG_PROC_KCORE=y +CONFIG_MPC8641_HPCN=y +CONFIG_SBC8641D=y +CONFIG_MPC8610_HPCD=y CONFIG_GEF_PPC9A=y CONFIG_GEF_SBC310=y CONFIG_GEF_SBC610=y -CONFIG_MPC8610_HPCD=y -CONFIG_MPC8641_HPCN=y -CONFIG_SBC8641D=y CONFIG_MVME7100=y +CONFIG_HIGHMEM=y +CONFIG_KEXEC=y +CONFIG_PROC_KCORE=y diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 91f53f1bec5d..ec3fcc2bf737 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -13,6 +13,7 @@ CONFIG_EXPERT=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_CFQ is not set CONFIG_8xx_COPYBACK=y +CONFIG_GEN_RTC=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -51,7 +52,6 @@ CONFIG_DAVICOM_PHY=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y -CONFIG_GEN_RTC=y # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set # CONFIG_DNOTIFY is not set diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig index 139add95a16a..63e38c7220f1 100644 --- a/arch/powerpc/configs/mvme5100_defconfig +++ b/arch/powerpc/configs/mvme5100_defconfig @@ -35,7 +35,6 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m -CONFIG_NF_CT_PROTO_SCTP=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -70,7 +69,6 @@ CONFIG_TUN=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_E100=y # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -130,4 +128,3 @@ CONFIG_CRYPTO_DES=y CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index fe43ff47bd2f..6daa56f8895c 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -3,7 +3,6 @@ CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_SYSVIPC=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y @@ -116,9 +115,10 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y +CONFIG_SND_SEQUENCER=y CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_USB_USX2Y=y @@ -145,6 +145,7 @@ CONFIG_EDAC=y CONFIG_EDAC_PASEMI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y +CONFIG_RAS=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -167,7 +168,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_CRC_CCITT=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y @@ -175,7 +175,5 @@ CONFIG_DETECT_HUNG_TASK=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_BLOWFISH=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index fc1e7a7388b8..1aab9a62a681 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -21,6 +21,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_PMAC=y CONFIG_PPC601_SYNC_FIX=y +CONFIG_GEN_RTC=y CONFIG_HIGHMEM=y CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -179,10 +180,10 @@ CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=m CONFIG_USB_USBNET=m # CONFIG_USB_NET_CDC_SUBSET is not set -CONFIG_PRISM54=m CONFIG_B43=m CONFIG_B43LEGACY=m CONFIG_P54_COMMON=m +CONFIG_PRISM54=m CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set @@ -193,7 +194,6 @@ CONFIG_SERIAL_8250=m CONFIG_SERIAL_PMACZILOG=m CONFIG_SERIAL_PMACZILOG_TTYS=y CONFIG_NVRAM=y -CONFIG_GEN_RTC=y CONFIG_I2C_CHARDEV=m CONFIG_APM_POWER=y CONFIG_BATTERY_PMU=y @@ -201,8 +201,9 @@ CONFIG_HWMON=m CONFIG_AGP=m CONFIG_AGP_UNINORTH=m CONFIG_DRM=m -CONFIG_DRM_R128=m CONFIG_DRM_RADEON=m +CONFIG_DRM_LEGACY=y +CONFIG_DRM_R128=m CONFIG_FB=y CONFIG_FB_OF=y CONFIG_FB_CONTROL=y @@ -226,11 +227,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_DUMMY=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m @@ -300,8 +302,6 @@ CONFIG_NFSD_V4=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y @@ -310,7 +310,6 @@ CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_BOOTX_TEXT=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_SHA512=m @@ -325,4 +324,3 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 34fc9bbfca9e..caee834760d2 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -1,10 +1,8 @@ CONFIG_PPC64=y -CONFIG_SMP=y CONFIG_NR_CPUS=2048 CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y @@ -26,8 +24,8 @@ CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y @@ -62,7 +60,6 @@ CONFIG_PPC_64K_PAGES=y CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y CONFIG_PM=y -CONFIG_PCI_MSI=y CONFIG_HOTPLUG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -158,7 +155,6 @@ CONFIG_NETCONSOLE=y CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m -CONFIG_VHOST_NET=m CONFIG_VORTEX=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -184,16 +180,13 @@ CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y # CONFIG_SERIO_SERPORT is not set -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_JSM=m CONFIG_VIRTIO_CONSOLE=m -CONFIG_POWERNV_OP_PANEL=m CONFIG_IPMI_HANDLER=y CONFIG_IPMI_DEVICE_INTERFACE=y CONFIG_IPMI_POWERNV=y @@ -296,9 +289,12 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y @@ -310,6 +306,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -318,14 +315,13 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_VMX=y -CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m +CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 370c0bbcff71..10fb1df63b46 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -51,9 +51,9 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_XILINX_HWICAP=m CONFIG_I2C=m @@ -85,4 +85,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index 2766e8f590bc..66dd6bf45cde 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig @@ -68,9 +68,9 @@ CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_XILINX_HWICAP=m CONFIG_I2C=m @@ -94,7 +94,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=m -CONFIG_LOGFS=m CONFIG_CRAMFS=y CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y @@ -108,6 +107,5 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set CONFIG_VIRTUALIZATION=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index c5246d29f385..6ddca80c52c3 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -1,8 +1,6 @@ CONFIG_PPC64=y -CONFIG_SMP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -28,9 +26,10 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y CONFIG_PPC_SPLPAR=y +CONFIG_DTL=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y -CONFIG_DTL=y +CONFIG_IBMEBUS=y CONFIG_PPC_MAPLE=y CONFIG_PPC_PASEMI=y CONFIG_PPC_PASEMI_IOMMU=y @@ -41,9 +40,8 @@ CONFIG_PS3_FLASH=m CONFIG_PS3_LPM=m CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_RTAS_FLASH=m -CONFIG_IBMEBUS=y -CONFIG_CPU_FREQ_PMAC64=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_PMAC64=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y @@ -51,14 +49,15 @@ CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_CRASH_DUMP=y CONFIG_IRQ_ALL_CPUS=y -CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y -CONFIG_PCCARD=y -CONFIG_ELECTRA_CF=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m +CONFIG_PCCARD=y +CONFIG_ELECTRA_CF=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -154,7 +153,6 @@ CONFIG_DUMMY=m CONFIG_NETCONSOLE=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m -CONFIG_VHOST_NET=m CONFIG_VORTEX=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -181,15 +179,14 @@ CONFIG_SUNGEM=y CONFIG_GELIC_NET=m CONFIG_GELIC_WIRELESS=y CONFIG_SPIDER_NET=m -CONFIG_MARVELL_PHY=y CONFIG_BROADCOM_PHY=m +CONFIG_MARVELL_PHY=y CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y CONFIG_INPUT_PCSPKR=m @@ -197,7 +194,6 @@ CONFIG_INPUT_PCSPKR=m CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_ICOM=m -CONFIG_SERIAL_TXX9_CONSOLE=y CONFIG_SERIAL_JSM=m CONFIG_HVC_CONSOLE=y CONFIG_HVC_RTAS=y @@ -226,11 +222,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m CONFIG_SND_AOA_FABRIC_LAYOUT=m @@ -250,6 +247,9 @@ CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=m CONFIG_USB_APPLEDISPLAY=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m +CONFIG_LEDS_POWERNV=m CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -267,7 +267,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m -CONFIG_FS_DAX=y +CONFIG_RAS=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -287,6 +287,7 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_OVERLAY_FS=m @@ -328,9 +329,11 @@ CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y @@ -343,6 +346,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -351,19 +355,14 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_CRYPTO_DEV_VMX=y -CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=m -CONFIG_LEDS_POWERNV=m +CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 6340e6c53c54..41d85cb3c9a2 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -3,7 +3,6 @@ CONFIG_PPC_BOOK3E_64=y CONFIG_SMP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y @@ -30,8 +29,8 @@ CONFIG_BINFMT_MISC=m CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSEMEM_MANUAL=y CONFIG_PCI_MSI=y -CONFIG_PCCARD=y CONFIG_HOTPLUG_PCI=y +CONFIG_PCCARD=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -108,15 +107,14 @@ CONFIG_E100=y CONFIG_E1000=y CONFIG_IXGB=m CONFIG_SUNGEM=y -CONFIG_MARVELL_PHY=y CONFIG_BROADCOM_PHY=m +CONFIG_MARVELL_PHY=y CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y # CONFIG_SERIO_SERPORT is not set @@ -143,11 +141,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_HID_DRAGONRISE=y CONFIG_HID_GYRATION=y CONFIG_HID_TWINHAN=y @@ -172,10 +171,8 @@ CONFIG_INFINIBAND=m CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_ISER=m -CONFIG_EDAC=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y -CONFIG_FS_DAX=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -192,6 +189,7 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y +CONFIG_FS_DAX=y CONFIG_AUTOFS4_FS=m CONFIG_ISO9660_FS=y CONFIG_UDF_FS=m @@ -251,5 +249,4 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 18d0d60dadbf..da0e8d535eb8 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -11,10 +11,10 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_CGROUPS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set @@ -61,7 +61,7 @@ CONFIG_SBC8641D=y CONFIG_MPC8610_HPCD=y CONFIG_GEF_SBC610=y CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m @@ -70,7 +70,6 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_PMAC=y CONFIG_TAU=y CONFIG_TAU_AVERAGE=y -CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_MCU_MPC8349EMITX=y CONFIG_HIGHMEM=y @@ -141,7 +140,6 @@ CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -187,7 +185,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -195,7 +192,6 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -334,9 +330,7 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_HIDP=m CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m @@ -370,7 +364,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_CDROM_PKTCDVD=m CONFIG_VIRTIO_BLK=m -CONFIG_BLK_DEV_HD=y CONFIG_ENCLOSURE_SERVICES=m CONFIG_SENSORS_TSL2550=m CONFIG_EEPROM_AT24=m @@ -548,16 +541,16 @@ CONFIG_PCMCIA_XIRC2PS=m CONFIG_FDDI=y CONFIG_SKFP=m CONFIG_NET_SB1000=m -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_VITESSE_PHY=m CONFIG_PLIP=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y @@ -585,7 +578,6 @@ CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y CONFIG_USB_EPSON2888=y CONFIG_USB_KC2190=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y CONFIG_MOUSE_SERIAL=m @@ -647,7 +639,6 @@ CONFIG_SYNCLINKMP=m CONFIG_SYNCLINK_GT=m CONFIG_NOZOMI=m CONFIG_N_HDLC=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CS=m @@ -657,13 +648,13 @@ CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_UARTLITE=m CONFIG_SERIAL_PMACZILOG=m CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 CONFIG_SERIAL_JSM=m -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_PRINTER=m CONFIG_LP_CONSOLE=y CONFIG_PPDEV=m @@ -748,9 +739,10 @@ CONFIG_MFD_SM501_GPIO=y CONFIG_AGP=y CONFIG_AGP_UNINORTH=y CONFIG_DRM=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_LEGACY=y CONFIG_DRM_TDFX=m CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m CONFIG_DRM_MGA=m CONFIG_DRM_SIS=m CONFIG_DRM_VIA=m @@ -797,17 +789,18 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_DYNAMIC_MINORS=y # CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y CONFIG_SND_DEBUG_VERBOSE=y CONFIG_SND_PCM_XRUN_DEBUG=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_DUMMY=m CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m @@ -899,7 +892,7 @@ CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_FSL=y +CONFIG_USB_EHCI_FSL=m CONFIG_USB_OHCI_HCD=m CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_OHCI_HCD_PPC_OF_LE=y @@ -967,7 +960,6 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m -CONFIG_USB_LED=m CONFIG_USB_IDMOUSE=m CONFIG_USB_FTDI_ELAN=m CONFIG_USB_APPLEDISPLAY=m @@ -1020,15 +1012,14 @@ CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m -CONFIG_FS_DAX=y +CONFIG_QUICC_ENGINE=y CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT4_FS=m +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_FS=y CONFIG_JBD2_DEBUG=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y @@ -1042,6 +1033,7 @@ CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_GFS2_FS=m +CONFIG_FS_DAX=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m @@ -1146,7 +1138,6 @@ CONFIG_DEBUG_VM=y CONFIG_DEBUG_HIGHMEM=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_SHIRQ=y -CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y @@ -1173,7 +1164,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -1201,7 +1191,6 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_HIFN_795X=m CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y CONFIG_CRYPTO_DEV_TALITOS=m diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig index 50b2bad51d0a..0ededa8c837d 100644 --- a/arch/powerpc/configs/pq2fads_defconfig +++ b/arch/powerpc/configs/pq2fads_defconfig @@ -79,4 +79,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index ee0ec5a682fc..2efa025bf483 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -5,7 +5,6 @@ CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -94,7 +93,6 @@ CONFIG_USB_USBNET=m # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_KEYBOARD is not set @@ -161,7 +159,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index fd5d98a0b95c..3d935969e5a2 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -1,11 +1,8 @@ CONFIG_PPC64=y -CONFIG_SMP=y CONFIG_NR_CPUS=2048 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -18,17 +15,16 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_LOG_CPU_MAX_BUF_SHIFT=13 CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_CGROUP_PERF=y CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y @@ -43,12 +39,12 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y CONFIG_PPC_SPLPAR=y +CONFIG_DTL=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y -CONFIG_DTL=y +CONFIG_IBMEBUS=y # CONFIG_PPC_PMAC is not set CONFIG_RTAS_FLASH=m -CONFIG_IBMEBUS=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m @@ -155,7 +151,6 @@ CONFIG_NETCONSOLE=y CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m -CONFIG_VHOST_NET=m CONFIG_VORTEX=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -183,12 +178,10 @@ CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y CONFIG_INPUT_PCSPKR=m # CONFIG_SERIO_SERPORT is not set -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_ICOM=m @@ -198,8 +191,6 @@ CONFIG_HVC_RTAS=y CONFIG_HVCS=m CONFIG_VIRTIO_CONSOLE=m CONFIG_IBM_BSR=m -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_GENERIC=y CONFIG_RAW_DRIVER=y CONFIG_MAX_RAW_DEVS=1024 CONFIG_FB=y @@ -227,6 +218,9 @@ CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m +CONFIG_LEDS_POWERNV=m CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -238,9 +232,10 @@ CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_ISER=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_GENERIC=y CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m -CONFIG_FS_DAX=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -256,6 +251,7 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_OVERLAY_FS=m @@ -294,9 +290,11 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y @@ -307,6 +305,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -315,19 +314,14 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_CRYPTO_DEV_VMX=y -CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=m -CONFIG_LEDS_POWERNV=m +CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig index 78fddf24b5d3..cd72193fac0a 100644 --- a/arch/powerpc/configs/tqm8xx_defconfig +++ b/arch/powerpc/configs/tqm8xx_defconfig @@ -18,6 +18,7 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_TQM8XX=y CONFIG_8xx_COPYBACK=y # CONFIG_8xx_CPU15 is not set +CONFIG_GEN_RTC=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -44,7 +45,6 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_NETDEVICES=y CONFIG_FS_ENET=y CONFIG_DAVICOM_PHY=y -CONFIG_FIXED_PHY=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -53,7 +53,6 @@ CONFIG_FIXED_PHY=y CONFIG_SERIAL_CPM=y CONFIG_SERIAL_CPM_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_GEN_RTC=y # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set # CONFIG_DNOTIFY is not set diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index dcdd51b57783..9c7400a19e9d 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -55,9 +55,6 @@ CONFIG_B43_SDIO=y # CONFIG_B43_PHY_LP is not set CONFIG_B43_DEBUG=y CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -68,7 +65,6 @@ CONFIG_INPUT_UINPUT=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set CONFIG_NVRAM=y CONFIG_I2C=y @@ -83,11 +79,12 @@ CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_VERBOSE_PROCFS is not set +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQUENCER_OSS=y CONFIG_HID_APPLE=m CONFIG_HID_WACOM=m CONFIG_MMC=y @@ -119,5 +116,4 @@ CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DMA_API_DEBUG=y CONFIG_PPC_EARLY_DEBUG=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 5c4fbc80dc6c..2542ea15d338 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += rwsem.h generic-y += vtime.h +generic-y += msi.h diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index cee3aa087653..7f2a7702596c 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -25,7 +25,7 @@ #define PPC_LCMPI stringify_in_c(cmpdi) #define PPC_LCMPLI stringify_in_c(cmpldi) #define PPC_LCMP stringify_in_c(cmpd) -#define PPC_LONG stringify_in_c(.llong) +#define PPC_LONG stringify_in_c(.8byte) #define PPC_LONG_ALIGN stringify_in_c(.balign 8) #define PPC_TLNEI stringify_in_c(tdnei) #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 7fb755880409..4d453f979553 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -294,13 +294,11 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, - pmd_t **pmdp); - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 36fc7bfe9e11..f88452019114 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -40,7 +40,7 @@ * Define the address range of the kernel non-linear virtual area */ #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) -#define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) +#define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */ /* * The vmalloc space starts at the beginning of that region, and @@ -48,9 +48,11 @@ * (we keep a quarter for the virtual memmap) */ #define H_VMALLOC_START H_KERN_VIRT_START -#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1) +#define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */ #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) +#define H_KERN_IO_START H_VMALLOC_END + /* * Region IDs */ diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 5c28bd6f2ae1..2d1ca488ca44 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -54,9 +54,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static inline bool gigantic_page_supported(void) { - if (radix_enabled()) - return true; - return false; + return true; } #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 6981a52b3887..508275bb05d5 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -104,6 +104,7 @@ #define HPTE_R_C ASM_CONST(0x0000000000000080) #define HPTE_R_R ASM_CONST(0x0000000000000100) #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) +#define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI) #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) @@ -468,7 +469,7 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize); int htab_remove_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize); -extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); +extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 5b4023c616f7..c3b00e8ff791 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -83,6 +83,9 @@ typedef struct { mm_context_id_t id; u16 user_psize; /* page size index */ + /* Number of bits in the mm_cpumask */ + atomic_t active_cpus; + /* NPU NMMU context */ struct npu_context *npu_context; @@ -97,11 +100,6 @@ typedef struct { #ifdef CONFIG_PPC_SUBPAGE_PROT struct subpage_prot_table spt; #endif /* CONFIG_PPC_SUBPAGE_PROT */ -#ifdef CONFIG_PPC_ICSWX - struct spinlock *cop_lockp; /* guard acop and cop_pid */ - unsigned long acop; /* mask of enabled coprocessor types */ - unsigned int cop_pid; /* pid value used with coprocessors */ -#endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_64K_PAGES /* for 4K PTE fragment support */ void *pte_frag; diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index e2329db9d6f4..1fcfa425cefa 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -41,8 +41,6 @@ extern struct kmem_cache *pgtable_cache[]; pgtable_cache[(shift) - 1]; \ }) -#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO - extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); extern void pte_fragment_free(unsigned long *, int); extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 818a58fc3f4f..b9aff515b4de 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -272,8 +272,10 @@ extern unsigned long __vmalloc_end; extern unsigned long __kernel_virt_start; extern unsigned long __kernel_virt_size; +extern unsigned long __kernel_io_start; #define KERN_VIRT_START __kernel_virt_start #define KERN_VIRT_SIZE __kernel_virt_size +#define KERN_IO_START __kernel_io_start extern struct page *vmemmap; extern unsigned long ioremap_bot; extern unsigned long pci_io_base; @@ -298,7 +300,6 @@ extern unsigned long pci_io_base; * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE */ -#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) #define FULL_IO_SIZE 0x80000000ul #define ISA_IO_BASE (KERN_IO_START) #define ISA_IO_END (KERN_IO_START + 0x10000ul) @@ -409,6 +410,11 @@ static inline int pte_write(pte_t pte) return __pte_write(pte) || pte_savedwrite(pte); } +static inline int pte_read(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ)); +} + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -1167,6 +1173,7 @@ static inline bool arch_needs_pgtable_deposit(void) return false; return true; } +extern void serialize_against_pte_lookup(struct mm_struct *mm); static inline pmd_t pmd_mkdevmap(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 544440b5aff3..1e5ba94e62ef 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -110,6 +110,8 @@ */ #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END) +#define RADIX_KERN_IO_START (RADIX_KERN_VIRT_START + (RADIX_KERN_VIRT_SIZE >> 1)) + #ifndef __ASSEMBLY__ #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE) #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index cc7fbde4f53c..9b433a624bf3 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -22,22 +22,21 @@ extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end extern void radix__local_flush_tlb_mm(struct mm_struct *mm); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize); extern void radix__tlb_flush(struct mmu_gather *tlb); #ifdef CONFIG_SMP extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize); #else #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p) -#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) #endif +extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); +extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, unsigned long page_size); extern void radix__flush_tlb_lpid(unsigned long lpid); diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 87fcc1948817..7ee763d3bea9 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -133,6 +133,7 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); extern void bad_page_fault(struct pt_regs *, unsigned long, int); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void die(const char *, struct pt_regs *, long); +extern bool die_will_crash(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 5a90292afbad..d122f7f957ce 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -5,7 +5,7 @@ /* bytes per L1 cache line */ -#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 #elif defined(CONFIG_PPC_E500MC) diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index 52586f9956bb..eb43b5c3a7b5 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -67,6 +67,17 @@ #define ERR_DEEP_STATE_ESL_MISMATCH -2 #ifndef __ASSEMBLY__ +/* Additional SPRs that need to be saved/restored during stop */ +struct stop_sprs { + u64 pid; + u64 ldbar; + u64 fscr; + u64 hfscr; + u64 mmcr1; + u64 mmcr2; + u64 mmcra; +}; + extern u32 pnv_fastsleep_workaround_at_entry[]; extern u32 pnv_fastsleep_workaround_at_exit[]; @@ -90,20 +101,4 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err) #endif -/* Idle state entry routines */ -#ifdef CONFIG_PPC_P7_NAP -#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ - /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ - std r0,0(r1); \ - ptesync; \ - ld r0,0(r1); \ -236: cmpd cr0,r0,r0; \ - bne 236b; \ - IDLE_INST; \ - -#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ - IDLE_STATE_ENTER_SEQ(IDLE_INST) \ - b . -#endif /* CONFIG_PPC_P7_NAP */ - #endif diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index d02ad93bf708..a9bf921f4efc 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -513,7 +513,7 @@ enum { #else CPU_FTRS_GENERIC_32 | #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | #endif #ifdef CONFIG_40x @@ -565,7 +565,7 @@ enum { #else CPU_FTRS_GENERIC_32 & #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & #endif #ifdef CONFIG_40x diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 8e37b71674f4..9847ae3a12d1 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -131,7 +131,6 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe) struct eeh_dev { int mode; /* EEH mode */ int class_code; /* Class code of the device */ - int config_addr; /* Config address */ int pe_config_addr; /* PE config address */ u32 config_space[16]; /* Saved PCI config space */ int pcix_cap; /* Saved PCIx capability */ @@ -141,7 +140,6 @@ struct eeh_dev { struct eeh_pe *pe; /* Associated PE */ struct list_head list; /* Form link list in the PE */ struct list_head rmv_list; /* Record the removed edevs */ - struct pci_controller *phb; /* Associated PHB */ struct pci_dn *pdn; /* Associated PCI device node */ struct pci_dev *pdev; /* Associated PCI device */ bool in_error; /* Error flag for edev */ @@ -262,7 +260,8 @@ typedef void *(*eeh_traverse_func)(void *data, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); -struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, + int pe_no, int config_addr); int eeh_add_to_parent_pe(struct eeh_dev *edev); int eeh_rmv_from_parent_pe(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index ce88bbe1d809..5a23010af600 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -209,11 +209,13 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, extern int fadump_reserve_mem(void); extern int setup_fadump(void); extern int is_fadump_active(void); +extern int should_fadump_crash(void); extern void crash_fadump(struct pt_regs *, const char *); extern void fadump_cleanup(void); #else /* CONFIG_FA_DUMP */ static inline int is_fadump_active(void) { return 0; } +static inline int should_fadump_crash(void) { return 0; } static inline void crash_fadump(struct pt_regs *regs, const char *str) { } #endif #endif diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 2de2319b99e2..8f88f771cc55 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -19,11 +19,11 @@ */ #if defined(CONFIG_PPC64) && !defined(__powerpc64__) /* 64 bits kernel, 32 bits code (ie. vdso32) */ -#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_LONG .8byte #define FTR_ENTRY_OFFSET .long 0xffffffff; .long #elif defined(CONFIG_PPC64) -#define FTR_ENTRY_LONG .llong -#define FTR_ENTRY_OFFSET .llong +#define FTR_ENTRY_LONG .8byte +#define FTR_ENTRY_OFFSET .8byte #else #define FTR_ENTRY_LONG .long #define FTR_ENTRY_OFFSET .long diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 4508b322f2cd..6c40dfda5912 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ #include #include +#include #ifdef CONFIG_HIGHMEM #include #include @@ -62,9 +63,6 @@ enum fixed_addresses { __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags); - #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) @@ -72,5 +70,11 @@ extern void __set_fixmap (enum fixed_addresses idx, #include +static inline void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags)); +} + #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h index f79d6c74eb2a..8def56ec05c6 100644 --- a/arch/powerpc/include/asm/fs_pd.h +++ b/arch/powerpc/include/asm/fs_pd.h @@ -26,7 +26,7 @@ #define cpm2_unmap(addr) do {} while(0) #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #include extern immap_t __iomem *mpc8xx_immr; diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 8add8b861e8d..c97603d617e3 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -12,6 +12,10 @@ typedef struct { unsigned int mce_exceptions; unsigned int spurious_irqs; unsigned int hmi_exceptions; + unsigned int sreset_irqs; +#ifdef CONFIG_PPC_WATCHDOG + unsigned int soft_nmi_irqs; +#endif #ifdef CONFIG_PPC_DOORBELL unsigned int doorbell_irqs; #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 7f4025a6c69e..b8a0fb442c64 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -218,18 +218,4 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, } #endif /* CONFIG_HUGETLB_PAGE */ -/* - * FSL Book3E platforms require special gpage handling - the gpages - * are reserved early in the boot process by memblock instead of via - * the .dts as on IBM platforms. - */ -#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ - defined(CONFIG_PPC_8xx)) -extern void __init reserve_hugetlb_gpages(void); -#else -static inline void reserve_hugetlb_gpages(void) -{ -} -#endif - #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 57d38b504ff7..3d34dc0869f6 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -280,7 +280,18 @@ #define H_RESIZE_HPT_COMMIT 0x370 #define H_REGISTER_PROC_TBL 0x37C #define H_SIGNAL_SYS_RESET 0x380 -#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET +#define H_INT_GET_SOURCE_INFO 0x3A8 +#define H_INT_SET_SOURCE_CONFIG 0x3AC +#define H_INT_GET_SOURCE_CONFIG 0x3B0 +#define H_INT_GET_QUEUE_INFO 0x3B4 +#define H_INT_SET_QUEUE_CONFIG 0x3B8 +#define H_INT_GET_QUEUE_CONFIG 0x3BC +#define H_INT_SET_OS_REPORTING_LINE 0x3C0 +#define H_INT_GET_OS_REPORTING_LINE 0x3C4 +#define H_INT_ESB 0x3C8 +#define H_INT_SYNC 0x3CC +#define H_INT_RESET 0x3D0 +#define MAX_HCALL_OPCODE H_INT_RESET /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 27e588f6c72e..6a2c87577541 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -69,7 +69,10 @@ struct coprocessor_completion_block { #define CSB_CC_WR_PROTECTION (16) #define CSB_CC_UNKNOWN_CODE (17) #define CSB_CC_ABORT (18) +#define CSB_CC_EXCEED_BYTE_COUNT (19) /* P9 or later */ #define CSB_CC_TRANSPORT (20) +#define CSB_CC_INVALID_CRB (21) /* P9 or later */ +#define CSB_CC_INVALID_DDE (30) /* P9 or later */ #define CSB_CC_SEGMENTED_DDL (31) #define CSB_CC_PROGRESS_POINT (32) #define CSB_CC_DDE_OVERFLOW (33) diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h new file mode 100644 index 000000000000..7f74c282710f --- /dev/null +++ b/arch/powerpc/include/asm/imc-pmu.h @@ -0,0 +1,128 @@ +#ifndef __ASM_POWERPC_IMC_PMU_H +#define __ASM_POWERPC_IMC_PMU_H + +/* + * IMC Nest Performance Monitor counter support. + * + * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. + * (C) 2017 Anju T Sudhakar, IBM Corporation. + * (C) 2017 Hemant K Shaw, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or later version. + */ + +#include +#include +#include +#include +#include + +/* + * For static allocation of some of the structures. + */ +#define IMC_MAX_PMUS 32 + +/* + * Compatibility macros for IMC devices + */ +#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters" +#define IMC_DTB_UNIT_COMPAT "ibm,imc-counters" + + +/* + * LDBAR: Counter address and Enable/Disable macro. + * perf/imc-pmu.c has the LDBAR layout information. + */ +#define THREAD_IMC_LDBAR_MASK 0x0003ffffffffe000ULL +#define THREAD_IMC_ENABLE 0x8000000000000000ULL + +/* + * Structure to hold memory address information for imc units. + */ +struct imc_mem_info { + u64 *vbase; + u32 id; +}; + +/* + * Place holder for nest pmu events and values. + */ +struct imc_events { + u32 value; + char *name; + char *unit; + char *scale; +}; + +/* Event attribute array index */ +#define IMC_FORMAT_ATTR 0 +#define IMC_EVENT_ATTR 1 +#define IMC_CPUMASK_ATTR 2 +#define IMC_NULL_ATTR 3 + +/* PMU Format attribute macros */ +#define IMC_EVENT_OFFSET_MASK 0xffffffffULL + +/* + * Device tree parser code detects IMC pmu support and + * registers new IMC pmus. This structure will hold the + * pmu functions, events, counter memory information + * and attrs for each imc pmu and will be referenced at + * the time of pmu registration. + */ +struct imc_pmu { + struct pmu pmu; + struct imc_mem_info *mem_info; + struct imc_events **events; + /* + * Attribute groups for the PMU. Slot 0 used for + * format attribute, slot 1 used for cpusmask attribute, + * slot 2 used for event attribute. Slot 3 keep as + * NULL. + */ + const struct attribute_group *attr_groups[4]; + u32 counter_mem_size; + int domain; + /* + * flag to notify whether the memory is mmaped + * or allocated by kernel. + */ + bool imc_counter_mmaped; +}; + +/* + * Structure to hold id, lock and reference count for the imc events which + * are inited. + */ +struct imc_pmu_ref { + struct mutex lock; + unsigned int id; + int refc; +}; + +/* + * In-Memory Collection Counters type. + * Data comes from Device tree. + * Three device type are supported. + */ + +enum { + IMC_TYPE_THREAD = 0x1, + IMC_TYPE_CORE = 0x4, + IMC_TYPE_CHIP = 0x10, +}; + +/* + * Domains for IMC PMUs + */ +#define IMC_DOMAIN_NEST 1 +#define IMC_DOMAIN_CORE 2 +#define IMC_DOMAIN_THREAD 3 + +extern int init_imc_pmu(struct device_node *parent, + struct imc_pmu *pmu_ptr, int pmu_id); +extern void thread_imc_disable(void); +#endif /* __ASM_POWERPC_IMC_PMU_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 7cea76f11c26..83596f32f50b 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -104,6 +104,10 @@ struct kvmppc_host_state { u8 napping; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* + * hwthread_req/hwthread_state pair is used to pull sibling threads + * out of guest on pre-ISAv3.0B CPUs where threads share MMU. + */ u8 hwthread_req; u8 hwthread_state; u8 host_ipi; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cd2fc1cc1cc7..73b92017b6d7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -76,7 +76,6 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); - void (*panic)(char *str); void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 35bec1c5bd5a..309592589e30 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -77,76 +77,8 @@ extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); -/* - * switch_mm is the entry point called from the architecture independent - * code in kernel/sched/core.c - */ -static inline void switch_mm_irqs_off(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - bool new_on_cpu = false; - - /* Mark this context has been used on the new CPU */ - if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - - /* - * This full barrier orders the store to the cpumask above vs - * a subsequent operation which allows this CPU to begin loading - * translations for next. - * - * When using the radix MMU that operation is the load of the - * MMU context id, which is then moved to SPRN_PID. - * - * For the hash MMU it is either the first load from slb_cache - * in switch_slb(), and/or the store of paca->mm_ctx_id in - * copy_mm_to_paca(). - * - * On the read side the barrier is in pte_xchg(), which orders - * the store to the PTE vs the load of mm_cpumask. - */ - smp_mb(); - - new_on_cpu = true; - } - - /* 32-bit keeps track of the current PGDIR in the thread struct */ -#ifdef CONFIG_PPC32 - tsk->thread.pgdir = next->pgd; -#endif /* CONFIG_PPC32 */ - - /* 64-bit Book3E keeps track of current PGD in the PACA */ -#ifdef CONFIG_PPC_BOOK3E_64 - get_paca()->pgd = next->pgd; -#endif - /* Nothing else to do if we aren't actually switching */ - if (prev == next) - return; - -#ifdef CONFIG_PPC_ICSWX - /* Switch coprocessor context only if prev or next uses a coprocessor */ - if (prev->context.acop || next->context.acop) - switch_cop(next); -#endif /* CONFIG_PPC_ICSWX */ - - /* We must stop all altivec streams before changing the HW - * context - */ -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall"); -#endif /* CONFIG_ALTIVEC */ - - if (new_on_cpu) - radix_kvm_prefetch_workaround(next); - - /* - * The actual HW switching method differs between the various - * sub architectures. Out of line for now - */ - switch_mmu_context(prev, next, tsk); -} +extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) @@ -168,11 +100,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - unsigned long flags; - - local_irq_save(flags); switch_mm(prev, next, current); - local_irq_restore(flags); } /* We don't currently use enter_lazy_tlb() for anything */ diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 6f8e79cd35d8..3760150a0ff0 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -1,9 +1,8 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_PPC_WATCHDOG extern void arch_touch_nmi_watchdog(void); - extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 91314268f04f..185c6a47f9ba 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -121,7 +121,7 @@ extern int icache_44x_need_flush; #include #elif defined(CONFIG_FSL_BOOKE) #include -#elif defined(CONFIG_8xx) +#elif defined(CONFIG_PPC_8xx) #include #endif @@ -337,9 +337,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, - pmd_t **pmdp); - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index e5805ad78e12..17989c3d9a24 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -14,6 +14,7 @@ static inline int pte_write(pte_t pte) { return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } +static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 3130a73652c7..450a60b81d2a 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -42,6 +42,7 @@ #define OPAL_I2C_STOP_ERR -24 #define OPAL_XIVE_PROVISIONING -31 #define OPAL_XIVE_FREE_ACTIVE -32 +#define OPAL_TIMEOUT -33 /* API Tokens (in r0) */ #define OPAL_INVALID_CALL -1 @@ -190,7 +191,16 @@ #define OPAL_NPU_INIT_CONTEXT 146 #define OPAL_NPU_DESTROY_CONTEXT 147 #define OPAL_NPU_MAP_LPAR 148 -#define OPAL_LAST 148 +#define OPAL_IMC_COUNTERS_INIT 149 +#define OPAL_IMC_COUNTERS_START 150 +#define OPAL_IMC_COUNTERS_STOP 151 +#define OPAL_GET_POWERCAP 152 +#define OPAL_SET_POWERCAP 153 +#define OPAL_GET_POWER_SHIFT_RATIO 154 +#define OPAL_SET_POWER_SHIFT_RATIO 155 +#define OPAL_SENSOR_GROUP_CLEAR 156 +#define OPAL_PCI_SET_P2P 157 +#define OPAL_LAST 157 /* Device tree flags */ @@ -1084,6 +1094,18 @@ enum { XIVE_DUMP_EMU_STATE = 5, }; +/* "type" argument options for OPAL_IMC_COUNTERS_* calls */ +enum { + OPAL_IMC_COUNTERS_NEST = 1, + OPAL_IMC_COUNTERS_CORE = 2, +}; + + +/* PCI p2p descriptor */ +#define OPAL_PCI_P2P_ENABLE 0x1 +#define OPAL_PCI_P2P_LOAD 0x2 +#define OPAL_PCI_P2P_STORE 0x4 + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_API_H */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 588fb1c23af9..726c23304a57 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -50,7 +50,7 @@ int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day, uint32_t hour_min); int64_t opal_cec_power_down(uint64_t request); int64_t opal_cec_reboot(void); -int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag); +int64_t opal_cec_reboot2(uint32_t reboot_type, const char *diag); int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); @@ -267,6 +267,19 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id); int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id); +int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, + uint64_t desc, uint16_t pe_number); + +int64_t opal_imc_counters_init(uint32_t type, uint64_t address, + uint64_t cpu_pir); +int64_t opal_imc_counters_start(uint32_t type, uint64_t cpu_pir); +int64_t opal_imc_counters_stop(uint32_t type, uint64_t cpu_pir); + +int opal_get_powercap(u32 handle, int token, u32 *pcap); +int opal_set_powercap(u32 handle, int token, u32 pcap); +int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr); +int opal_set_power_shift_ratio(u32 handle, int token, u32 psr); +int opal_sensor_group_clear(u32 group_hndl, int token); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, @@ -345,6 +358,10 @@ static inline int opal_get_async_rc(struct opal_msg msg) void opal_wake_poller(void); +void opal_powercap_init(void); +void opal_psr_init(void); +void opal_sensor_groups_init(void); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_OPAL_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index dc88a31cc79a..04b60af027ae 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -31,6 +31,7 @@ #endif #include #include +#include register struct paca_struct *local_paca asm("r13"); @@ -183,6 +184,12 @@ struct paca_struct { struct paca_struct **thread_sibling_pacas; /* The PSSCR value that the kernel requested before going to stop */ u64 requested_psscr; + + /* + * Save area for additional SPRs that need to be + * saved/restored during cpuidle stop. + */ + struct stop_sprs stop_sprs; #endif #ifdef CONFIG_PPC_STD_MMU_64 diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 56c67d3f0108..0b8aa1fe2d5f 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -195,7 +195,6 @@ struct pci_dn { struct pci_dn *parent; struct pci_controller *phb; /* for pci devices */ struct iommu_table_group *table_group; /* for phb's or bridges */ - struct device_node *node; /* back-pointer to the device_node */ int pci_ext_config_space; /* for pci devices */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index d795c5d5789c..45ae1212ab8a 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -17,6 +17,8 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) } #endif /* MODULE */ +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) + #ifdef CONFIG_PPC_BOOK3S #include #else diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index afae9a336136..7d0d38f58243 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -66,22 +66,14 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif -pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *shift); -static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *shift) -{ - VM_WARN(!arch_irqs_disabled(), - "%s called with irq enabled\n", __func__); - return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); -} +/* can we use this in kvm */ unsigned long vmalloc_to_phys(void *vmalloc_addr); void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) void mark_initmem_nx(void); #else static inline void mark_initmem_nx(void) { } diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index de9681034353..3e5cf251ad9a 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -26,6 +26,8 @@ extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state); extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state); extern int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg); +extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, + u64 desc); int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode); int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index fa9ebaead91e..ce0930d68857 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -193,6 +193,7 @@ #define PPC_INST_CLRBHRB 0x7c00035c #define PPC_INST_COPY 0x7c20060c #define PPC_INST_CP_ABORT 0x7c00068c +#define PPC_INST_DARN 0x7c0005e6 #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe #define PPC_INST_DCBAL 0x7c2005ec @@ -204,6 +205,8 @@ #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_STDCX 0x7c0001ad +#define PPC_INST_LQARX 0x7c000228 +#define PPC_INST_STQCX 0x7c00016d #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 @@ -261,7 +264,7 @@ #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_VPMSUMW 0x10000488 #define PPC_INST_VPMSUMD 0x100004c8 -#define PPC_INST_XXLOR 0xf0000510 +#define PPC_INST_XXLOR 0xf0000490 #define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_TRECHKPT 0x7c0007dd @@ -395,16 +398,25 @@ #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) #define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \ + ___PPC_RT(t) | \ + (((l) & 0x3) << 16)) #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ __PPC_RA(a) | __PPC_RB(b)) #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ __PPC_RA(a) | __PPC_RB(b)) +#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) #define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC) @@ -414,6 +426,8 @@ ___PPC_RB(b)) #define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \ ___PPC_RB(b)) +#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \ + ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6baeeb9acd0d..36f3e41c9fbe 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -378,10 +378,16 @@ BEGIN_FTR_SECTION_NESTED(96); \ cmpwi dest,0; \ beq- 90b; \ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) -#elif defined(CONFIG_8xx) -#define MFTB(dest) mftb dest #else -#define MFTB(dest) mfspr dest, SPRN_TBRL +#define MFTB(dest) MFTBL(dest) +#endif + +#ifdef CONFIG_PPC_8xx +#define MFTBL(dest) mftb dest +#define MFTBU(dest) mftbu dest +#else +#define MFTBL(dest) mfspr dest, SPRN_TBRL +#define MFTBU(dest) mfspr dest, SPRN_TBRU #endif #ifndef CONFIG_SMP @@ -411,7 +417,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) * and they must be used. */ -#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) +#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx) #define tlbia \ li r4,1024; \ mtctr r4; \ @@ -439,7 +445,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) .machine push ; \ .machine "power4" ; \ lis scratch,0x60000000@h; \ - dcbt r0,scratch,0b01010; \ + dcbt 0,scratch,0b01010; \ .machine pop /* diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 35c00d7a0cf8..825bd5998701 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -159,7 +159,10 @@ struct of_drconf_cell { #define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */ #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ -#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ +#define OV5_XIVE_SUPPORT 0x17C0 /* XIVE Exploitation Support Mask */ +#define OV5_XIVE_LEGACY 0x1700 /* XIVE legacy mode Only */ +#define OV5_XIVE_EXPLOIT 0x1740 /* XIVE exploitation mode Only */ +#define OV5_XIVE_EITHER 0x1780 /* XIVE legacy or exploitation mode */ /* MMU Base Architecture */ #define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */ #define OV5_MMU_HASH 0x1800 /* Hash MMU Only */ diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h new file mode 100644 index 000000000000..2d633e9d686c --- /dev/null +++ b/arch/powerpc/include/asm/pte-walk.h @@ -0,0 +1,35 @@ +#ifndef _ASM_POWERPC_PTE_WALK_H +#define _ASM_POWERPC_PTE_WALK_H + +#include + +/* Don't use this directly */ +extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift); + +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift) +{ + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); + return __find_linux_pte(pgdir, ea, is_thp, hshift); +} + +static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) +{ + pgd_t *pgdir = init_mm.pgd; + return __find_linux_pte(pgdir, ea, NULL, hshift); +} +/* + * This is what we should always use. Any other lockless page table lookup needs + * careful audit against THP split. + */ +static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift) +{ + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); + VM_WARN(pgdir != current->mm->pgd, + "%s lock less page table lookup called on wrong mm\n", __func__); + return __find_linux_pte(pgdir, ea, is_thp, hshift); +} + +#endif /* _ASM_POWERPC_PTE_WALK_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a3b6575c7842..f92eaf7a4c0d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -22,9 +22,9 @@ #include #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #include -#endif /* CONFIG_8xx */ +#endif /* CONFIG_PPC_8xx */ #define MSR_SF_LG 63 /* Enable 64 bit mode */ #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ @@ -135,7 +135,7 @@ #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) -#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) +#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) /* Default MSR for kernel mode. */ #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) @@ -272,16 +272,65 @@ #define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ -#define DSISR_NOHPTE 0x40000000 /* no translation found */ -#define DSISR_PROTFAULT 0x08000000 /* protection fault */ -#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */ -#define DSISR_ISSTORE 0x02000000 /* access was a store */ -#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ -#define DSISR_NOSEGMENT 0x00200000 /* SLB miss */ -#define DSISR_KEYFAULT 0x00200000 /* Key fault */ -#define DSISR_UNSUPP_MMU 0x00080000 /* Unsupported MMU config */ -#define DSISR_SET_RC 0x00040000 /* Failed setting of R/C bits */ -#define DSISR_PGDIRFAULT 0x00020000 /* Fault on page directory */ +#define DSISR_BAD_DIRECT_ST 0x80000000 /* Obsolete: Direct store error */ +#define DSISR_NOHPTE 0x40000000 /* no translation found */ +#define DSISR_ATTR_CONFLICT 0x20000000 /* P9: Process vs. Partition attr */ +#define DSISR_NOEXEC_OR_G 0x10000000 /* Alias of SRR1 bit, see below */ +#define DSISR_PROTFAULT 0x08000000 /* protection fault */ +#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */ +#define DSISR_ISSTORE 0x02000000 /* access was a store */ +#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +#define DSISR_NOSEGMENT 0x00200000 /* STAB miss (unsupported) */ +#define DSISR_KEYFAULT 0x00200000 /* Storage Key fault */ +#define DSISR_BAD_EXT_CTRL 0x00100000 /* Obsolete: External ctrl error */ +#define DSISR_UNSUPP_MMU 0x00080000 /* P9: Unsupported MMU config */ +#define DSISR_SET_RC 0x00040000 /* P9: Failed setting of R/C bits */ +#define DSISR_PRTABLE_FAULT 0x00020000 /* P9: Fault on process table */ +#define DSISR_ICSWX_NO_CT 0x00004000 /* P7: icswx unavailable cp type */ +#define DSISR_BAD_COPYPASTE 0x00000008 /* P9: Copy/Paste on wrong memtype */ +#define DSISR_BAD_AMO 0x00000004 /* P9: Incorrect AMO opcode */ +#define DSISR_BAD_CI_LDST 0x00000002 /* P8: Bad HV CI load/store */ + +/* + * DSISR_NOEXEC_OR_G doesn't actually exist. This bit is always + * 0 on DSIs. However, on ISIs, the corresponding bit in SRR1 + * indicates an attempt at executing from a no-execute PTE + * or segment or from a guarded page. + * + * We add a definition here for completeness as we alias + * DSISR and SRR1 in do_page_fault. + */ + +/* + * DSISR bits that are treated as a fault. Any bit set + * here will skip hash_page, and cause do_page_fault to + * trigger a SIGBUS or SIGSEGV: + */ +#define DSISR_BAD_FAULT_32S (DSISR_BAD_DIRECT_ST | \ + DSISR_BADACCESS | \ + DSISR_BAD_EXT_CTRL) +#define DSISR_BAD_FAULT_64S (DSISR_BAD_FAULT_32S | \ + DSISR_ATTR_CONFLICT | \ + DSISR_KEYFAULT | \ + DSISR_UNSUPP_MMU | \ + DSISR_PRTABLE_FAULT | \ + DSISR_ICSWX_NO_CT | \ + DSISR_BAD_COPYPASTE | \ + DSISR_BAD_AMO | \ + DSISR_BAD_CI_LDST) +/* + * These bits are equivalent in SRR1 and DSISR for 0x400 + * instruction access interrupts on Book3S + */ +#define DSISR_SRR1_MATCH_32S (DSISR_NOHPTE | \ + DSISR_NOEXEC_OR_G | \ + DSISR_PROTFAULT) +#define DSISR_SRR1_MATCH_64S (DSISR_SRR1_MATCH_32S | \ + DSISR_KEYFAULT | \ + DSISR_UNSUPP_MMU | \ + DSISR_SET_RC | \ + DSISR_PRTABLE_FAULT) + #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */ @@ -307,6 +356,7 @@ #define SPRN_PMSR 0x355 /* Power Management Status Reg */ #define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */ #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ +#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ #define SPRN_PMCR 0x374 /* Power Management Control Register */ /* HFSCR and FSCR bit numbers are the same */ @@ -675,6 +725,7 @@ * may not be recoverable */ #define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ #define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ +#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ #define SRR1_PROGILL 0x00080000 /* Illegal instruction */ #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ @@ -1114,7 +1165,7 @@ #endif #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 #define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 @@ -1197,10 +1248,8 @@ * differentiated by the version number in the Communication Processor * Module (CPM). */ -#define PVR_821 0x00500000 -#define PVR_823 PVR_821 -#define PVR_850 PVR_821 -#define PVR_860 PVR_821 +#define PVR_8xx 0x00500000 + #define PVR_8240 0x00810100 #define PVR_8245 0x80811014 #define PVR_8260 PVR_8240 @@ -1295,12 +1344,12 @@ static inline void msr_check_and_clear(unsigned long bits) ".section __ftr_fixup,\"a\"\n" \ ".align 3\n" \ "98:\n" \ - " .llong %1\n" \ - " .llong %1\n" \ - " .llong 97b-98b\n" \ - " .llong 99b-98b\n" \ - " .llong 0\n" \ - " .llong 0\n" \ + " .8byte %1\n" \ + " .8byte %1\n" \ + " .8byte 97b-98b\n" \ + " .8byte 99b-98b\n" \ + " .8byte 0\n" \ + " .8byte 0\n" \ ".previous" \ : "=r" (rval) \ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ @@ -1313,7 +1362,7 @@ static inline void msr_check_and_clear(unsigned long bits) #else /* __powerpc64__ */ -#if defined(CONFIG_8xx) +#if defined(CONFIG_PPC_8xx) #define mftbl() ({unsigned long rval; \ asm volatile("mftbl %0" : "=r" (rval)); rval;}) #define mftbu() ({unsigned long rval; \ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 737e012ef56e..eb2a33d5df26 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -221,10 +221,7 @@ #define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */ #define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */ #endif - -#ifdef CONFIG_PPC_ICSWX #define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */ -#endif /* Bit definitions for CCR1. */ #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 654d64c9f3ac..3a3fb0ca68f5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,7 +23,6 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void initmem_init(void); -void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 8ea98504f900..fac963e10d39 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -97,6 +97,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) #endif DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); static inline struct cpumask *cpu_sibling_mask(int cpu) @@ -109,6 +110,11 @@ static inline struct cpumask *cpu_core_mask(int cpu) return per_cpu(cpu_core_map, cpu); } +static inline struct cpumask *cpu_l2_cache_mask(int cpu) +{ + return per_cpu(cpu_l2_cache_map, cpu); +} + extern int cpu_to_core_id(int cpu); /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index d3a42cc45a82..ab9d849644d0 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -23,12 +23,9 @@ struct pt_regs; #define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024) #define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064) -/* Emulate instructions that cause a transfer of control. */ -extern int emulate_step(struct pt_regs *regs, unsigned int instr); - enum instruction_type { COMPUTE, /* arith/logical/CR op, etc. */ - LOAD, + LOAD, /* load and store types need to be contiguous */ LOAD_MULTI, LOAD_FP, LOAD_VMX, @@ -55,10 +52,31 @@ enum instruction_type { #define INSTR_TYPE_MASK 0x1f +#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX) + +/* Compute flags, ORed in with type */ +#define SETREG 0x20 +#define SETCC 0x40 +#define SETXER 0x80 + +/* Branch flags, ORed in with type */ +#define SETLK 0x20 +#define BRTAKEN 0x40 +#define DECCTR 0x80 + /* Load/store flags, ORed in with type */ #define SIGNEXT 0x20 #define UPDATE 0x40 /* matches bit in opcode 31 instructions */ #define BYTEREV 0x80 +#define FPCONV 0x100 + +/* Barrier type field, ORed in with type */ +#define BARRIER_MASK 0xe0 +#define BARRIER_SYNC 0x00 +#define BARRIER_ISYNC 0x20 +#define BARRIER_EIEIO 0x40 +#define BARRIER_LWSYNC 0x60 +#define BARRIER_PTESYNC 0x80 /* Cacheop values, ORed in with type */ #define CACHEOP_MASK 0x700 @@ -67,10 +85,17 @@ enum instruction_type { #define DCBTST 0x200 #define DCBT 0x300 #define ICBI 0x400 +#define DCBZ 0x500 + +/* VSX flags values */ +#define VSX_FPCONV 1 /* do floating point SP/DP conversion */ +#define VSX_SPLAT 2 /* store loaded value into all elements */ +#define VSX_LDLEFT 4 /* load VSX register from left */ +#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */ /* Size field in type word */ -#define SIZE(n) ((n) << 8) -#define GETSIZE(w) ((w) >> 8) +#define SIZE(n) ((n) << 12) +#define GETSIZE(w) ((w) >> 12) #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) @@ -83,7 +108,63 @@ struct instruction_op { int update_reg; /* For MFSPR */ int spr; + u32 ccval; + u32 xerval; + u8 element_size; /* for VSX/VMX loads/stores */ + u8 vsx_flags; }; -extern int analyse_instr(struct instruction_op *op, struct pt_regs *regs, +union vsx_reg { + u8 b[16]; + u16 h[8]; + u32 w[4]; + unsigned long d[2]; + float fp[4]; + double dp[2]; + __vector128 v; +}; + +/* + * Decode an instruction, and return information about it in *op + * without changing *regs. + * + * Return value is 1 if the instruction can be emulated just by + * updating *regs with the information in *op, -1 if we need the + * GPRs but *regs doesn't contain the full register set, or 0 + * otherwise. + */ +extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, unsigned int instr); + +/* + * Emulate an instruction that can be executed just by updating + * fields in *regs. + */ +void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op); + +/* + * Emulate instructions that cause a transfer of control, + * arithmetic/logical instructions, loads and stores, + * cache operations and barriers. + * + * Returns 1 if the instruction was emulated successfully, + * 0 if it could not be emulated, or -1 for an instruction that + * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.). + */ +extern int emulate_step(struct pt_regs *regs, unsigned int instr); + +/* + * Emulate a load or store instruction by reading/writing the + * memory of the current process. FP/VMX/VSX registers are assumed + * to hold live values if the appropriate enable bit in regs->msr is + * set; otherwise this will use the saved values in the thread struct + * for user-mode accesses. + */ +extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op); + +extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool cross_endian); +extern void emulate_vsx_store(struct instruction_op *op, + const union vsx_reg *reg, void *mem, + bool cross_endian); +extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index da3cdffca440..cc9addefb51c 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -10,6 +10,7 @@ #define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR +#define __HAVE_ARCH_MEMSET16 extern char * strcpy(char *,const char *); extern char * strncpy(char *,const char *, __kernel_size_t); @@ -23,6 +24,31 @@ extern void * memmove(void *,const void *,__kernel_size_t); extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); +#ifdef CONFIG_PPC64 +#define __HAVE_ARCH_MEMSET32 +#define __HAVE_ARCH_MEMSET64 + +extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t); +extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); +extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t); + +static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n) +{ + return __memset16(p, v, n * 2); +} + +static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) +{ + return __memset32(p, v, n * 4); +} + +static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) +{ + return __memset64(p, v, n * 8); +} +#else +extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_STRING_H */ diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 2cf846edb3fc..cb61eae5b7ed 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx "97: mftb %0\n" #else "97: mfspr %0, %2\n" @@ -45,11 +45,7 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 0\n" ".previous" -#ifdef CONFIG_8xx - : "=r" (ret) : "i" (CPU_FTR_601)); -#else : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); -#endif return ret; #endif } diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 609557569f65..a7eabff27a0f 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -69,13 +69,22 @@ static inline int mm_is_core_local(struct mm_struct *mm) topology_sibling_cpumask(smp_processor_id())); } +#ifdef CONFIG_PPC_BOOK3S_64 +static inline int mm_is_thread_local(struct mm_struct *mm) +{ + if (atomic_read(&mm->context.active_cpus) > 1) + return false; + return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); +} +#else /* CONFIG_PPC_BOOK3S_64 */ static inline int mm_is_thread_local(struct mm_struct *mm) { return cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())); } +#endif /* !CONFIG_PPC_BOOK3S_64 */ -#else +#else /* CONFIG_SMP */ static inline int mm_is_core_local(struct mm_struct *mm) { return 1; diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index dc4e15937ccf..2d84bca8d053 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -16,8 +16,6 @@ struct device_node; #include -#define parent_node(node) (node) - #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ node_to_cpumask_map[node]) diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h new file mode 100644 index 000000000000..fd5963acd658 --- /dev/null +++ b/arch/powerpc/include/asm/vas.h @@ -0,0 +1,159 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_VAS_H +#define _ASM_POWERPC_VAS_H + +/* + * Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25 + * (Local FIFO Size Register) of the VAS workbook. + */ +#define VAS_RX_FIFO_SIZE_MIN (1 << 10) /* 1KB */ +#define VAS_RX_FIFO_SIZE_MAX (8 << 20) /* 8MB */ + +/* + * Threshold Control Mode: Have paste operation fail if the number of + * requests in receive FIFO exceeds a threshold. + * + * NOTE: No special error code yet if paste is rejected because of these + * limits. So users can't distinguish between this and other errors. + */ +#define VAS_THRESH_DISABLED 0 +#define VAS_THRESH_FIFO_GT_HALF_FULL 1 +#define VAS_THRESH_FIFO_GT_QTR_FULL 2 +#define VAS_THRESH_FIFO_GT_EIGHTH_FULL 3 + +/* + * Get/Set bit fields + */ +#define GET_FIELD(m, v) (((v) & (m)) >> MASK_LSH(m)) +#define MASK_LSH(m) (__builtin_ffsl(m) - 1) +#define SET_FIELD(m, v, val) \ + (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_LSH(m)) & (m))) + +/* + * Co-processor Engine type. + */ +enum vas_cop_type { + VAS_COP_TYPE_FAULT, + VAS_COP_TYPE_842, + VAS_COP_TYPE_842_HIPRI, + VAS_COP_TYPE_GZIP, + VAS_COP_TYPE_GZIP_HIPRI, + VAS_COP_TYPE_FTW, + VAS_COP_TYPE_MAX, +}; + +/* + * Receive window attributes specified by the (in-kernel) owner of window. + */ +struct vas_rx_win_attr { + void *rx_fifo; + int rx_fifo_size; + int wcreds_max; + + bool pin_win; + bool rej_no_credit; + bool tx_wcred_mode; + bool rx_wcred_mode; + bool tx_win_ord_mode; + bool rx_win_ord_mode; + bool data_stamp; + bool nx_win; + bool fault_win; + bool user_win; + bool notify_disable; + bool intr_disable; + bool notify_early; + + int lnotify_lpid; + int lnotify_pid; + int lnotify_tid; + u32 pswid; + + int tc_mode; +}; + +/* + * Window attributes specified by the in-kernel owner of a send window. + */ +struct vas_tx_win_attr { + enum vas_cop_type cop; + int wcreds_max; + int lpid; + int pidr; /* hardware PID (from SPRN_PID) */ + int pid; /* linux process id */ + int pswid; + int rsvd_txbuf_count; + int tc_mode; + + bool user_win; + bool pin_win; + bool rej_no_credit; + bool rsvd_txbuf_enable; + bool tx_wcred_mode; + bool rx_wcred_mode; + bool tx_win_ord_mode; + bool rx_win_ord_mode; +}; + +/* + * Helper to initialize receive window attributes to defaults for an + * NX window. + */ +void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop); + +/* + * Open a VAS receive window for the instance of VAS identified by @vasid + * Use @attr to initialize the attributes of the window. + * + * Return a handle to the window or ERR_PTR() on error. + */ +struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, + struct vas_rx_win_attr *attr); + +/* + * Helper to initialize send window attributes to defaults for an NX window. + */ +extern void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, + enum vas_cop_type cop); + +/* + * Open a VAS send window for the instance of VAS identified by @vasid + * and the co-processor type @cop. Use @attr to initialize attributes + * of the window. + * + * Note: The instance of VAS must already have an open receive window for + * the coprocessor type @cop. + * + * Return a handle to the send window or ERR_PTR() on error. + */ +struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, + struct vas_tx_win_attr *attr); + +/* + * Close the send or receive window identified by @win. For receive windows + * return -EAGAIN if there are active send windows attached to this receive + * window. + */ +int vas_win_close(struct vas_window *win); + +/* + * Copy the co-processor request block (CRB) @crb into the local L2 cache. + */ +int vas_copy_crb(void *crb, int offset); + +/* + * Paste a previously copied CRB (see vas_copy_crb()) from the L2 cache to + * the hardware address associated with the window @win. @re is expected/ + * assumed to be true for NX windows. + */ +int vas_paste_crb(struct vas_window *win, int offset, bool re); + +#endif /* __ASM_POWERPC_VAS_H */ diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h index ab3acd2f2786..7a7b541b7493 100644 --- a/arch/powerpc/include/asm/vga.h +++ b/arch/powerpc/include/asm/vga.h @@ -33,8 +33,16 @@ static inline u16 scr_readw(volatile const u16 *addr) return le16_to_cpu(*addr); } +#define VT_BUF_HAVE_MEMSETW +static inline void scr_memsetw(u16 *s, u16 v, unsigned int n) +{ + memset16(s, cpu_to_le16(v), n / 2); +} + #define VT_BUF_HAVE_MEMCPYW +#define VT_BUF_HAVE_MEMMOVEW #define scr_memcpyw memcpy +#define scr_memmovew memmove #endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index c23ff4389ca2..371fbebf1ec9 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -45,6 +45,7 @@ struct xive_irq_data { void __iomem *trig_mmio; u32 esb_shift; int src_chip; + u32 hw_irq; /* Setup/used by frontend */ int target; @@ -55,6 +56,7 @@ struct xive_irq_data { #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 #define XIVE_IRQ_FLAG_MASK_FW 0x08 #define XIVE_IRQ_FLAG_EOI_FW 0x10 +#define XIVE_IRQ_FLAG_H_INT_ESB 0x20 #define XIVE_INVALID_CHIP_ID -1 @@ -110,11 +112,13 @@ extern bool __xive_enabled; static inline bool xive_enabled(void) { return __xive_enabled; } +extern bool xive_spapr_init(void); extern bool xive_native_init(void); extern void xive_smp_probe(void); extern int xive_smp_prepare_cpu(unsigned int cpu); extern void xive_smp_setup_cpu(void); extern void xive_smp_disable_cpu(void); +extern void xive_teardown_cpu(void); extern void xive_kexec_teardown_cpu(int secondary); extern void xive_shutdown(void); extern void xive_flush_interrupt(void); @@ -147,6 +151,7 @@ extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) static inline bool xive_enabled(void) { return false; } +static inline bool xive_spapr_init(void) { return false; } static inline bool xive_native_init(void) { return false; } static inline void xive_smp_probe(void) { } extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; } diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index ab45cc2f3101..03c06ba7464f 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -29,20 +29,4 @@ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ -/* - * When MAP_HUGETLB is set, bits [26:31] of the flags argument to mmap(2), - * encode the log2 of the huge page size. A value of zero indicates that the - * default huge page size should be used. To use a non-default huge page size, - * one of these defines can be used, or the size can be encoded by hand. Note - * that on most systems only a subset, or possibly none, of these sizes will be - * available. - */ -#define MAP_HUGE_512KB (19 << MAP_HUGE_SHIFT) /* 512KB HugeTLB Page */ -#define MAP_HUGE_1MB (20 << MAP_HUGE_SHIFT) /* 1MB HugeTLB Page */ -#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) /* 2MB HugeTLB Page */ -#define MAP_HUGE_8MB (23 << MAP_HUGE_SHIFT) /* 8MB HugeTLB Page */ -#define MAP_HUGE_16MB (24 << MAP_HUGE_SHIFT) /* 16MB HugeTLB Page */ -#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) /* 1GB HugeTLB Page */ -#define MAP_HUGE_16GB (34 << MAP_HUGE_SHIFT) /* 16GB HugeTLB Page */ - #endif /* _UAPI_ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4aa7c147e447..91960f83039c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,7 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_VDSO32) += vdso32/ -obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o +obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o @@ -83,7 +83,7 @@ extra-y := head_$(BITS).o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o -extra-$(CONFIG_8xx) := head_8xx.o +extra-$(CONFIG_PPC_8xx) := head_8xx.o extra-y += vmlinux.lds obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index ec7a8b099dd9..43ef25156480 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -27,6 +27,7 @@ #include #include #include +#include struct aligninfo { unsigned char len; @@ -40,364 +41,9 @@ struct aligninfo { #define LD 0 /* load */ #define ST 1 /* store */ #define SE 2 /* sign-extend value, or FP ld/st as word */ -#define F 4 /* to/from fp regs */ -#define U 8 /* update index register */ -#define M 0x10 /* multiple load/store */ #define SW 0x20 /* byte swap */ -#define S 0x40 /* single-precision fp or... */ -#define SX 0x40 /* ... byte count in XER */ -#define HARD 0x80 /* string, stwcx. */ #define E4 0x40 /* SPE endianness is word */ #define E8 0x80 /* SPE endianness is double word */ -#define SPLT 0x80 /* VSX SPLAT load */ - -/* DSISR bits reported for a DCBZ instruction: */ -#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ - -/* - * The PowerPC stores certain bits of the instruction that caused the - * alignment exception in the DSISR register. This array maps those - * bits to information about the operand length and what the - * instruction would do. - */ -static struct aligninfo aligninfo[128] = { - { 4, LD }, /* 00 0 0000: lwz / lwarx */ - INVALID, /* 00 0 0001 */ - { 4, ST }, /* 00 0 0010: stw */ - INVALID, /* 00 0 0011 */ - { 2, LD }, /* 00 0 0100: lhz */ - { 2, LD+SE }, /* 00 0 0101: lha */ - { 2, ST }, /* 00 0 0110: sth */ - { 4, LD+M }, /* 00 0 0111: lmw */ - { 4, LD+F+S }, /* 00 0 1000: lfs */ - { 8, LD+F }, /* 00 0 1001: lfd */ - { 4, ST+F+S }, /* 00 0 1010: stfs */ - { 8, ST+F }, /* 00 0 1011: stfd */ - { 16, LD }, /* 00 0 1100: lq */ - { 8, LD }, /* 00 0 1101: ld/ldu/lwa */ - INVALID, /* 00 0 1110 */ - { 8, ST }, /* 00 0 1111: std/stdu */ - { 4, LD+U }, /* 00 1 0000: lwzu */ - INVALID, /* 00 1 0001 */ - { 4, ST+U }, /* 00 1 0010: stwu */ - INVALID, /* 00 1 0011 */ - { 2, LD+U }, /* 00 1 0100: lhzu */ - { 2, LD+SE+U }, /* 00 1 0101: lhau */ - { 2, ST+U }, /* 00 1 0110: sthu */ - { 4, ST+M }, /* 00 1 0111: stmw */ - { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ - { 8, LD+F+U }, /* 00 1 1001: lfdu */ - { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ - { 8, ST+F+U }, /* 00 1 1011: stfdu */ - { 16, LD+F }, /* 00 1 1100: lfdp */ - INVALID, /* 00 1 1101 */ - { 16, ST+F }, /* 00 1 1110: stfdp */ - INVALID, /* 00 1 1111 */ - { 8, LD }, /* 01 0 0000: ldx */ - INVALID, /* 01 0 0001 */ - { 8, ST }, /* 01 0 0010: stdx */ - INVALID, /* 01 0 0011 */ - INVALID, /* 01 0 0100 */ - { 4, LD+SE }, /* 01 0 0101: lwax */ - INVALID, /* 01 0 0110 */ - INVALID, /* 01 0 0111 */ - { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ - { 4, LD+M+HARD }, /* 01 0 1001: lswi */ - { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ - { 4, ST+M+HARD }, /* 01 0 1011: stswi */ - INVALID, /* 01 0 1100 */ - { 8, LD+U }, /* 01 0 1101: ldu */ - INVALID, /* 01 0 1110 */ - { 8, ST+U }, /* 01 0 1111: stdu */ - { 8, LD+U }, /* 01 1 0000: ldux */ - INVALID, /* 01 1 0001 */ - { 8, ST+U }, /* 01 1 0010: stdux */ - INVALID, /* 01 1 0011 */ - INVALID, /* 01 1 0100 */ - { 4, LD+SE+U }, /* 01 1 0101: lwaux */ - INVALID, /* 01 1 0110 */ - INVALID, /* 01 1 0111 */ - INVALID, /* 01 1 1000 */ - INVALID, /* 01 1 1001 */ - INVALID, /* 01 1 1010 */ - INVALID, /* 01 1 1011 */ - INVALID, /* 01 1 1100 */ - INVALID, /* 01 1 1101 */ - INVALID, /* 01 1 1110 */ - INVALID, /* 01 1 1111 */ - INVALID, /* 10 0 0000 */ - INVALID, /* 10 0 0001 */ - INVALID, /* 10 0 0010: stwcx. */ - INVALID, /* 10 0 0011 */ - INVALID, /* 10 0 0100 */ - INVALID, /* 10 0 0101 */ - INVALID, /* 10 0 0110 */ - INVALID, /* 10 0 0111 */ - { 4, LD+SW }, /* 10 0 1000: lwbrx */ - INVALID, /* 10 0 1001 */ - { 4, ST+SW }, /* 10 0 1010: stwbrx */ - INVALID, /* 10 0 1011 */ - { 2, LD+SW }, /* 10 0 1100: lhbrx */ - { 4, LD+SE }, /* 10 0 1101 lwa */ - { 2, ST+SW }, /* 10 0 1110: sthbrx */ - { 16, ST }, /* 10 0 1111: stq */ - INVALID, /* 10 1 0000 */ - INVALID, /* 10 1 0001 */ - INVALID, /* 10 1 0010 */ - INVALID, /* 10 1 0011 */ - INVALID, /* 10 1 0100 */ - INVALID, /* 10 1 0101 */ - INVALID, /* 10 1 0110 */ - INVALID, /* 10 1 0111 */ - INVALID, /* 10 1 1000 */ - INVALID, /* 10 1 1001 */ - INVALID, /* 10 1 1010 */ - INVALID, /* 10 1 1011 */ - INVALID, /* 10 1 1100 */ - INVALID, /* 10 1 1101 */ - INVALID, /* 10 1 1110 */ - { 0, ST+HARD }, /* 10 1 1111: dcbz */ - { 4, LD }, /* 11 0 0000: lwzx */ - INVALID, /* 11 0 0001 */ - { 4, ST }, /* 11 0 0010: stwx */ - INVALID, /* 11 0 0011 */ - { 2, LD }, /* 11 0 0100: lhzx */ - { 2, LD+SE }, /* 11 0 0101: lhax */ - { 2, ST }, /* 11 0 0110: sthx */ - INVALID, /* 11 0 0111 */ - { 4, LD+F+S }, /* 11 0 1000: lfsx */ - { 8, LD+F }, /* 11 0 1001: lfdx */ - { 4, ST+F+S }, /* 11 0 1010: stfsx */ - { 8, ST+F }, /* 11 0 1011: stfdx */ - { 16, LD+F }, /* 11 0 1100: lfdpx */ - { 4, LD+F+SE }, /* 11 0 1101: lfiwax */ - { 16, ST+F }, /* 11 0 1110: stfdpx */ - { 4, ST+F }, /* 11 0 1111: stfiwx */ - { 4, LD+U }, /* 11 1 0000: lwzux */ - INVALID, /* 11 1 0001 */ - { 4, ST+U }, /* 11 1 0010: stwux */ - INVALID, /* 11 1 0011 */ - { 2, LD+U }, /* 11 1 0100: lhzux */ - { 2, LD+SE+U }, /* 11 1 0101: lhaux */ - { 2, ST+U }, /* 11 1 0110: sthux */ - INVALID, /* 11 1 0111 */ - { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ - { 8, LD+F+U }, /* 11 1 1001: lfdux */ - { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ - { 8, ST+F+U }, /* 11 1 1011: stfdux */ - INVALID, /* 11 1 1100 */ - { 4, LD+F }, /* 11 1 1101: lfiwzx */ - INVALID, /* 11 1 1110 */ - INVALID, /* 11 1 1111 */ -}; - -/* - * The dcbz (data cache block zero) instruction - * gives an alignment fault if used on non-cacheable - * memory. We handle the fault mainly for the - * case when we are running with the cache disabled - * for debugging. - */ -static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) -{ - long __user *p; - int i, size; - -#ifdef __powerpc64__ - size = ppc64_caches.l1d.block_size; -#else - size = L1_CACHE_BYTES; -#endif - p = (long __user *) (regs->dar & -size); - if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) - return -EFAULT; - for (i = 0; i < size / sizeof(long); ++i) - if (__put_user_inatomic(0, p+i)) - return -EFAULT; - return 1; -} - -/* - * Emulate load & store multiple instructions - * On 64-bit machines, these instructions only affect/use the - * bottom 4 bytes of each register, and the loads clear the - * top 4 bytes of the affected register. - */ -#ifdef __BIG_ENDIAN__ -#ifdef CONFIG_PPC64 -#define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) -#else -#define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) -#endif -#else -#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3)))) -#endif - -#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) - -static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, - unsigned int reg, unsigned int nb, - unsigned int flags, unsigned int instr, - unsigned long swiz) -{ - unsigned long *rptr; - unsigned int nb0, i, bswiz; - unsigned long p; - - /* - * We do not try to emulate 8 bytes multiple as they aren't really - * available in our operating environments and we don't try to - * emulate multiples operations in kernel land as they should never - * be used/generated there at least not on unaligned boundaries - */ - if (unlikely((nb > 4) || !user_mode(regs))) - return 0; - - /* lmw, stmw, lswi/x, stswi/x */ - nb0 = 0; - if (flags & HARD) { - if (flags & SX) { - nb = regs->xer & 127; - if (nb == 0) - return 1; - } else { - unsigned long pc = regs->nip ^ (swiz & 4); - - if (__get_user_inatomic(instr, - (unsigned int __user *)pc)) - return -EFAULT; - if (swiz == 0 && (flags & SW)) - instr = cpu_to_le32(instr); - nb = (instr >> 11) & 0x1f; - if (nb == 0) - nb = 32; - } - if (nb + reg * 4 > 128) { - nb0 = nb + reg * 4 - 128; - nb = 128 - reg * 4; - } -#ifdef __LITTLE_ENDIAN__ - /* - * String instructions are endian neutral but the code - * below is not. Force byte swapping on so that the - * effects of swizzling are undone in the load/store - * loops below. - */ - flags ^= SW; -#endif - } else { - /* lwm, stmw */ - nb = (32 - reg) * 4; - } - - if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) - return -EFAULT; /* bad address */ - - rptr = ®s->gpr[reg]; - p = (unsigned long) addr; - bswiz = (flags & SW)? 3: 0; - - if (!(flags & ST)) { - /* - * This zeroes the top 4 bytes of the affected registers - * in 64-bit mode, and also zeroes out any remaining - * bytes of the last register for lsw*. - */ - memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long)); - if (nb0 > 0) - memset(®s->gpr[0], 0, - ((nb0 + 3) / 4) * sizeof(unsigned long)); - - for (i = 0; i < nb; ++i, ++p) - if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz), - SWIZ_PTR(p))) - return -EFAULT; - if (nb0 > 0) { - rptr = ®s->gpr[0]; - addr += nb; - for (i = 0; i < nb0; ++i, ++p) - if (__get_user_inatomic(REG_BYTE(rptr, - i ^ bswiz), - SWIZ_PTR(p))) - return -EFAULT; - } - - } else { - for (i = 0; i < nb; ++i, ++p) - if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz), - SWIZ_PTR(p))) - return -EFAULT; - if (nb0 > 0) { - rptr = ®s->gpr[0]; - addr += nb; - for (i = 0; i < nb0; ++i, ++p) - if (__put_user_inatomic(REG_BYTE(rptr, - i ^ bswiz), - SWIZ_PTR(p))) - return -EFAULT; - } - } - return 1; -} - -/* - * Emulate floating-point pair loads and stores. - * Only POWER6 has these instructions, and it does true little-endian, - * so we don't need the address swizzling. - */ -static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, - unsigned int flags) -{ - char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); - char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); - int i, ret, sw = 0; - - if (reg & 1) - return 0; /* invalid form: FRS/FRT must be even */ - if (flags & SW) - sw = 7; - ret = 0; - for (i = 0; i < 8; ++i) { - if (!(flags & ST)) { - ret |= __get_user(ptr0[i^sw], addr + i); - ret |= __get_user(ptr1[i^sw], addr + i + 8); - } else { - ret |= __put_user(ptr0[i^sw], addr + i); - ret |= __put_user(ptr1[i^sw], addr + i + 8); - } - } - if (ret) - return -EFAULT; - return 1; /* exception handled and fixed up */ -} - -#ifdef CONFIG_PPC64 -static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr, - unsigned int reg, unsigned int flags) -{ - char *ptr0 = (char *)®s->gpr[reg]; - char *ptr1 = (char *)®s->gpr[reg+1]; - int i, ret, sw = 0; - - if (reg & 1) - return 0; /* invalid form: GPR must be even */ - if (flags & SW) - sw = 7; - ret = 0; - for (i = 0; i < 8; ++i) { - if (!(flags & ST)) { - ret |= __get_user(ptr0[i^sw], addr + i); - ret |= __get_user(ptr1[i^sw], addr + i + 8); - } else { - ret |= __put_user(ptr0[i^sw], addr + i); - ret |= __put_user(ptr1[i^sw], addr + i + 8); - } - } - if (ret) - return -EFAULT; - return 1; /* exception handled and fixed up */ -} -#endif /* CONFIG_PPC64 */ #ifdef CONFIG_SPE @@ -636,133 +282,21 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, } #endif /* CONFIG_SPE */ -#ifdef CONFIG_VSX -/* - * Emulate VSX instructions... - */ -static int emulate_vsx(unsigned char __user *addr, unsigned int reg, - unsigned int areg, struct pt_regs *regs, - unsigned int flags, unsigned int length, - unsigned int elsize) -{ - char *ptr; - unsigned long *lptr; - int ret = 0; - int sw = 0; - int i, j; - - /* userland only */ - if (unlikely(!user_mode(regs))) - return 0; - - flush_vsx_to_thread(current); - - if (reg < 32) - ptr = (char *) ¤t->thread.fp_state.fpr[reg][0]; - else - ptr = (char *) ¤t->thread.vr_state.vr[reg - 32]; - - lptr = (unsigned long *) ptr; - -#ifdef __LITTLE_ENDIAN__ - if (flags & SW) { - elsize = length; - sw = length-1; - } else { - /* - * The elements are BE ordered, even in LE mode, so process - * them in reverse order. - */ - addr += length - elsize; - - /* 8 byte memory accesses go in the top 8 bytes of the VR */ - if (length == 8) - ptr += 8; - } -#else - if (flags & SW) - sw = elsize-1; -#endif - - for (j = 0; j < length; j += elsize) { - for (i = 0; i < elsize; ++i) { - if (flags & ST) - ret |= __put_user(ptr[i^sw], addr + i); - else - ret |= __get_user(ptr[i^sw], addr + i); - } - ptr += elsize; -#ifdef __LITTLE_ENDIAN__ - addr -= elsize; -#else - addr += elsize; -#endif - } - -#ifdef __BIG_ENDIAN__ -#define VSX_HI 0 -#define VSX_LO 1 -#else -#define VSX_HI 1 -#define VSX_LO 0 -#endif - - if (!ret) { - if (flags & U) - regs->gpr[areg] = regs->dar; - - /* Splat load copies the same data to top and bottom 8 bytes */ - if (flags & SPLT) - lptr[VSX_LO] = lptr[VSX_HI]; - /* For 8 byte loads, zero the low 8 bytes */ - else if (!(flags & ST) && (8 == length)) - lptr[VSX_LO] = 0; - } else - return -EFAULT; - - return 1; -} -#endif - /* * Called on alignment exception. Attempts to fixup * * Return 1 on success * Return 0 if unable to handle the interrupt * Return -EFAULT if data address is bad + * Other negative return values indicate that the instruction can't + * be emulated, and the process should be given a SIGBUS. */ int fix_alignment(struct pt_regs *regs) { - unsigned int instr, nb, flags, instruction = 0; - unsigned int reg, areg; - unsigned int dsisr; - unsigned char __user *addr; - unsigned long p, swiz; - int ret, i; - union data { - u64 ll; - double dd; - unsigned char v[8]; - struct { -#ifdef __LITTLE_ENDIAN__ - int low32; - unsigned hi32; -#else - unsigned hi32; - int low32; -#endif - } x32; - struct { -#ifdef __LITTLE_ENDIAN__ - short low16; - unsigned char hi48[6]; -#else - unsigned char hi48[6]; - short low16; -#endif - } x16; - } data; + unsigned int instr; + struct instruction_op op; + int r, type; /* * We require a complete register set, if not, then our assembly @@ -770,121 +304,23 @@ int fix_alignment(struct pt_regs *regs) */ CHECK_FULL_REGS(regs); - dsisr = regs->dsisr; - - /* Some processors don't provide us with a DSISR we can use here, - * let's make one up from the instruction - */ - if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { - unsigned long pc = regs->nip; - - if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE)) - pc ^= 4; - if (unlikely(__get_user_inatomic(instr, - (unsigned int __user *)pc))) - return -EFAULT; - if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) - instr = cpu_to_le32(instr); - dsisr = make_dsisr(instr); - instruction = instr; + if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip))) + return -EFAULT; + if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { + /* We don't handle PPC little-endian any more... */ + if (cpu_has_feature(CPU_FTR_PPC_LE)) + return -EIO; + instr = swab32(instr); } - /* extract the operation and registers from the dsisr */ - reg = (dsisr >> 5) & 0x1f; /* source/dest register */ - areg = dsisr & 0x1f; /* register to update */ - #ifdef CONFIG_SPE if ((instr >> 26) == 0x4) { + int reg = (instr >> 21) & 0x1f; PPC_WARN_ALIGNMENT(spe, regs); return emulate_spe(regs, reg, instr); } #endif - instr = (dsisr >> 10) & 0x7f; - instr |= (dsisr >> 13) & 0x60; - - /* Lookup the operation in our table */ - nb = aligninfo[instr].len; - flags = aligninfo[instr].flags; - - /* - * Handle some cases which give overlaps in the DSISR values. - */ - if (IS_XFORM(instruction)) { - switch (get_xop(instruction)) { - case 532: /* ldbrx */ - nb = 8; - flags = LD+SW; - break; - case 660: /* stdbrx */ - nb = 8; - flags = ST+SW; - break; - case 20: /* lwarx */ - case 84: /* ldarx */ - case 116: /* lharx */ - case 276: /* lqarx */ - return 0; /* not emulated ever */ - } - } - - /* Byteswap little endian loads and stores */ - swiz = 0; - if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { - flags ^= SW; -#ifdef __BIG_ENDIAN__ - /* - * So-called "PowerPC little endian" mode works by - * swizzling addresses rather than by actually doing - * any byte-swapping. To emulate this, we XOR each - * byte address with 7. We also byte-swap, because - * the processor's address swizzling depends on the - * operand size (it xors the address with 7 for bytes, - * 6 for halfwords, 4 for words, 0 for doublewords) but - * we will xor with 7 and load/store each byte separately. - */ - if (cpu_has_feature(CPU_FTR_PPC_LE)) - swiz = 7; -#endif - } - - /* DAR has the operand effective address */ - addr = (unsigned char __user *)regs->dar; - -#ifdef CONFIG_VSX - if ((instruction & 0xfc00003e) == 0x7c000018) { - unsigned int elsize; - - /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */ - reg |= (instruction & 0x1) << 5; - /* Simple inline decoder instead of a table */ - /* VSX has only 8 and 16 byte memory accesses */ - nb = 8; - if (instruction & 0x200) - nb = 16; - - /* Vector stores in little-endian mode swap individual - elements, so process them separately */ - elsize = 4; - if (instruction & 0x80) - elsize = 8; - - flags = 0; - if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) - flags |= SW; - if (instruction & 0x100) - flags |= ST; - if (instruction & 0x040) - flags |= U; - /* splat load needs a special decoder */ - if ((instruction & 0x400) == 0){ - flags |= SPLT; - nb = 8; - } - PPC_WARN_ALIGNMENT(vsx, regs); - return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize); - } -#endif /* * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment @@ -896,173 +332,27 @@ int fix_alignment(struct pt_regs *regs) * when pasting to a co-processor. Furthermore, paste_last is the * synchronisation point for preceding copy/paste sequences. */ - if ((instruction & 0xfc0006fe) == PPC_INST_COPY) + if ((instr & 0xfc0006fe) == PPC_INST_COPY) return -EIO; - /* A size of 0 indicates an instruction we don't support, with - * the exception of DCBZ which is handled as a special case here - */ - if (instr == DCBZ) { + r = analyse_instr(&op, regs, instr); + if (r < 0) + return -EINVAL; + + type = op.type & INSTR_TYPE_MASK; + if (!OP_IS_LOAD_STORE(type)) { + if (op.type != CACHEOP + DCBZ) + return -EINVAL; PPC_WARN_ALIGNMENT(dcbz, regs); - return emulate_dcbz(regs, addr); - } - if (unlikely(nb == 0)) - return 0; - - /* Load/Store Multiple instructions are handled in their own - * function - */ - if (flags & M) { - PPC_WARN_ALIGNMENT(multiple, regs); - return emulate_multiple(regs, addr, reg, nb, - flags, instr, swiz); + r = emulate_dcbz(op.ea, regs); + } else { + if (type == LARX || type == STCX) + return -EIO; + PPC_WARN_ALIGNMENT(unaligned, regs); + r = emulate_loadstore(regs, &op); } - /* Verify the address of the operand */ - if (unlikely(user_mode(regs) && - !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), - addr, nb))) - return -EFAULT; - - /* Force the fprs into the save area so we can reference them */ - if (flags & F) { - /* userland only */ - if (unlikely(!user_mode(regs))) - return 0; - flush_fp_to_thread(current); - } - - if (nb == 16) { - if (flags & F) { - /* Special case for 16-byte FP loads and stores */ - PPC_WARN_ALIGNMENT(fp_pair, regs); - return emulate_fp_pair(addr, reg, flags); - } else { -#ifdef CONFIG_PPC64 - /* Special case for 16-byte loads and stores */ - PPC_WARN_ALIGNMENT(lq_stq, regs); - return emulate_lq_stq(regs, addr, reg, flags); -#else - return 0; -#endif - } - } - - PPC_WARN_ALIGNMENT(unaligned, regs); - - /* If we are loading, get the data from user space, else - * get it from register values - */ - if (!(flags & ST)) { - unsigned int start = 0; - - switch (nb) { - case 4: - start = offsetof(union data, x32.low32); - break; - case 2: - start = offsetof(union data, x16.low16); - break; - } - - data.ll = 0; - ret = 0; - p = (unsigned long)addr; - - for (i = 0; i < nb; i++) - ret |= __get_user_inatomic(data.v[start + i], - SWIZ_PTR(p++)); - - if (unlikely(ret)) - return -EFAULT; - - } else if (flags & F) { - data.ll = current->thread.TS_FPR(reg); - if (flags & S) { - /* Single-precision FP store requires conversion... */ -#ifdef CONFIG_PPC_FPU - preempt_disable(); - enable_kernel_fp(); - cvt_df(&data.dd, (float *)&data.x32.low32); - disable_kernel_fp(); - preempt_enable(); -#else - return 0; -#endif - } - } else - data.ll = regs->gpr[reg]; - - if (flags & SW) { - switch (nb) { - case 8: - data.ll = swab64(data.ll); - break; - case 4: - data.x32.low32 = swab32(data.x32.low32); - break; - case 2: - data.x16.low16 = swab16(data.x16.low16); - break; - } - } - - /* Perform other misc operations like sign extension - * or floating point single precision conversion - */ - switch (flags & ~(U|SW)) { - case LD+SE: /* sign extending integer loads */ - case LD+F+SE: /* sign extend for lfiwax */ - if ( nb == 2 ) - data.ll = data.x16.low16; - else /* nb must be 4 */ - data.ll = data.x32.low32; - break; - - /* Single-precision FP load requires conversion... */ - case LD+F+S: -#ifdef CONFIG_PPC_FPU - preempt_disable(); - enable_kernel_fp(); - cvt_fd((float *)&data.x32.low32, &data.dd); - disable_kernel_fp(); - preempt_enable(); -#else - return 0; -#endif - break; - } - - /* Store result to memory or update registers */ - if (flags & ST) { - unsigned int start = 0; - - switch (nb) { - case 4: - start = offsetof(union data, x32.low32); - break; - case 2: - start = offsetof(union data, x16.low16); - break; - } - - ret = 0; - p = (unsigned long)addr; - - for (i = 0; i < nb; i++) - ret |= __put_user_inatomic(data.v[start + i], - SWIZ_PTR(p++)); - - if (unlikely(ret)) - return -EFAULT; - } else if (flags & F) - current->thread.TS_FPR(reg) = data.ll; - else - regs->gpr[reg] = data.ll; - - /* Update RA as needed */ - if (flags & U) - regs->gpr[areg] = regs->dar; - - return 1; + if (!r) + return 1; + return r; } diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6e95c2c19a7e..8cfb20e38cfe 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -746,6 +746,14 @@ int main(void) OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas); OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr); +#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f) + STOP_SPR(STOP_PID, pid); + STOP_SPR(STOP_LDBAR, ldbar); + STOP_SPR(STOP_FSCR, fscr); + STOP_SPR(STOP_HFSCR, hfscr); + STOP_SPR(STOP_MMCR1, mmcr1); + STOP_SPR(STOP_MMCR2, mmcr2); + STOP_SPR(STOP_MMCRA, mmcra); #endif DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 8275858a434d..3f46ca1c59f9 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -253,7 +253,7 @@ int __init btext_find_display(int allow_nonstdout) for_each_node_by_type(np, "display") { if (of_get_property(np, "linux,opened", NULL)) { - printk("trying %s ...\n", np->full_name); + printk("trying %pOF ...\n", np); rc = btext_initialize(np); printk("result: %d\n", rc); } diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index c641983bbdd6..a8f20e5928e1 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -167,10 +167,10 @@ static void release_cache_debugcheck(struct cache *cache) list_for_each_entry(iter, &cache_list, list) WARN_ONCE(iter->next_local == cache, - "cache for %s(%s) refers to cache for %s(%s)\n", - iter->ofnode->full_name, + "cache for %pOF(%s) refers to cache for %pOF(%s)\n", + iter->ofnode, cache_type_string(iter), - cache->ofnode->full_name, + cache->ofnode, cache_type_string(cache)); } @@ -179,8 +179,8 @@ static void release_cache(struct cache *cache) if (!cache) return; - pr_debug("freeing L%d %s cache for %s\n", cache->level, - cache_type_string(cache), cache->ofnode->full_name); + pr_debug("freeing L%d %s cache for %pOF\n", cache->level, + cache_type_string(cache), cache->ofnode); release_cache_debugcheck(cache); list_del(&cache->list); @@ -194,8 +194,8 @@ static void cache_cpu_set(struct cache *cache, int cpu) while (next) { WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), - "CPU %i already accounted in %s(%s)\n", - cpu, next->ofnode->full_name, + "CPU %i already accounted in %pOF(%s)\n", + cpu, next->ofnode, cache_type_string(next)); cpumask_set_cpu(cpu, &next->shared_cpu_map); next = next->next_local; @@ -355,7 +355,7 @@ static int cache_is_unified_d(const struct device_node *np) */ static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) { - pr_debug("creating L%d ucache for %s\n", level, node->full_name); + pr_debug("creating L%d ucache for %pOF\n", level, node); return new_cache(cache_is_unified_d(node), level, node); } @@ -365,8 +365,8 @@ static struct cache *cache_do_one_devnode_split(struct device_node *node, { struct cache *dcache, *icache; - pr_debug("creating L%d dcache and icache for %s\n", level, - node->full_name); + pr_debug("creating L%d dcache and icache for %pOF\n", level, + node); dcache = new_cache(CACHE_TYPE_DATA, level, node); icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); @@ -679,7 +679,6 @@ static struct kobj_type cache_index_type = { static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) { - const char *cache_name; const char *cache_type; struct cache *cache; char *buf; @@ -690,7 +689,6 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) return; cache = dir->cache; - cache_name = cache->ofnode->full_name; cache_type = cache_type_string(cache); /* We don't want to create an attribute that can't provide a @@ -707,14 +705,14 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) rc = attr->show(&dir->kobj, attr, buf); if (rc <= 0) { pr_debug("not creating %s attribute for " - "%s(%s) (rc = %zd)\n", - attr->attr.name, cache_name, + "%pOF(%s) (rc = %zd)\n", + attr->attr.name, cache->ofnode, cache_type, rc); continue; } if (sysfs_create_file(&dir->kobj, &attr->attr)) - pr_debug("could not create %s attribute for %s(%s)\n", - attr->attr.name, cache_name, cache_type); + pr_debug("could not create %s attribute for %pOF(%s)\n", + attr->attr.name, cache->ofnode, cache_type); } kfree(buf); @@ -831,8 +829,8 @@ static void cache_cpu_clear(struct cache *cache, int cpu) struct cache *next = cache->next_local; WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), - "CPU %i not accounted in %s(%s)\n", - cpu, cache->ofnode->full_name, + "CPU %i not accounted in %pOF(%s)\n", + cpu, cache->ofnode, cache_type_string(cache)); cpumask_clear_cpu(cpu, &cache->shared_cpu_map); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6f849832a669..760872916013 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1259,10 +1259,10 @@ static struct cpu_spec __initdata cpu_specs[] = { .platform = "ppc603", }, #endif /* CONFIG_PPC_BOOK3S_32 */ -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx { /* 8xx */ .pvr_mask = 0xffff0000, - .pvr_value = 0x00500000, + .pvr_value = PVR_8xx, .cpu_name = "8xx", /* CPU_FTR_MAYBE_CAN_DOZE is possible, * if the 8xx code is there.... */ @@ -1274,7 +1274,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_8xx, .platform = "ppc823", }, -#endif /* CONFIG_8xx */ +#endif /* CONFIG_PPC_8xx */ #ifdef CONFIG_40x { /* 403GC */ .pvr_mask = 0xffffff00, @@ -1936,6 +1936,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, +#ifdef CONFIG_PPC_47x { /* 476 DD2 core */ .pvr_mask = 0xffffffff, .pvr_value = 0x11a52080, @@ -1992,6 +1993,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_47x, .platform = "ppc470", }, +#endif /* CONFIG_PPC_47x */ { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1df770e8cbe0..7275fed271af 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void) case PVR_POWER8: case PVR_POWER8E: case PVR_POWER8NVL: - __flush_tlb_power8(POWER8_TLB_SETS); + __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL); break; case PVR_POWER9: - __flush_tlb_power9(POWER9_TLB_SETS_HASH); + __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL); break; default: pr_err("unknown CPU version for boot TLB flush\n"); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 63992b2d8e15..116000b45531 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -44,6 +44,7 @@ #include #include #include +#include /** Overview: @@ -169,10 +170,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) char buffer[128]; n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", - edev->phb->global_number, pdn->busno, + pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", - edev->phb->global_number, pdn->busno, + pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg); @@ -352,8 +353,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) * worried about _PAGE_SPLITTING/collapse. Also we will not hit * page table free, because of init_mm. */ - ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, - NULL, &hugepage_shift); + ptep = find_init_mm_pte(token, &hugepage_shift); if (!ptep) return token; WARN_ON(hugepage_shift); @@ -435,7 +435,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) int ret; int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); unsigned long flags; - struct pci_dn *pdn; + struct device_node *dn; struct pci_dev *dev; struct eeh_pe *pe, *parent_pe, *phb_pe; int rc = 0; @@ -493,9 +493,10 @@ int eeh_dev_check_failure(struct eeh_dev *edev) if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; if (pe->check_count % EEH_MAX_FAILS == 0) { - pdn = eeh_dev_to_pdn(edev); - if (pdn->node) - location = of_get_property(pdn->node, "ibm,loc-code", NULL); + dn = pci_device_to_OF_node(dev); + if (dn) + location = of_get_property(dn, "ibm,loc-code", + NULL); printk(KERN_ERR "EEH: %d reads ignored for recovering device at " "location=%s driver=%s pci addr=%s\n", pe->check_count, @@ -1018,6 +1019,10 @@ int eeh_init(void) } else if ((ret = eeh_ops->init())) return ret; + /* Initialize PHB PEs */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + eeh_dev_phb_init_dynamic(hose); + /* Initialize EEH event */ ret = eeh_event_init(); if (ret) @@ -1064,7 +1069,7 @@ core_initcall_sync(eeh_init); */ void eeh_add_device_early(struct pci_dn *pdn) { - struct pci_controller *phb; + struct pci_controller *phb = pdn ? pdn->phb : NULL; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); if (!edev) @@ -1074,7 +1079,6 @@ void eeh_add_device_early(struct pci_dn *pdn) return; /* USB Bus children of PCI devices will not have BUID's */ - phb = edev->phb; if (NULL == phb || (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid)) return; diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index d6b2ca70d14d..a34e6912c15e 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c @@ -50,21 +50,16 @@ */ struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) { - struct pci_controller *phb = pdn->phb; struct eeh_dev *edev; /* Allocate EEH device */ edev = kzalloc(sizeof(*edev), GFP_KERNEL); - if (!edev) { - pr_warn("%s: out of memory\n", - __func__); + if (!edev) return NULL; - } /* Associate EEH device with OF node */ pdn->edev = edev; edev->pdn = pdn; - edev->phb = phb; INIT_LIST_HEAD(&edev->list); INIT_LIST_HEAD(&edev->rmv_list); @@ -83,21 +78,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb) /* EEH PE for PHB */ eeh_phb_pe_create(phb); } - -/** - * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs - * - * Scan all the existing PHBs and create EEH devices for their OF - * nodes and their children OF nodes - */ -static int __init eeh_dev_phb_init(void) -{ - struct pci_controller *phb, *tmp; - - list_for_each_entry_safe(phb, tmp, &hose_list, list_node) - eeh_dev_phb_init_dynamic(phb); - - return 0; -} - -core_initcall(eeh_dev_phb_init); diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index c405c79e50cd..8b840191df59 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -428,7 +428,7 @@ static void *eeh_add_virt_device(void *data, void *userdata) if (!(edev->physfn)) { pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n", - __func__, edev->phb->global_number, pdn->busno, + __func__, pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); return NULL; } diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index cc4b206f77e4..2e8d1b2b5af4 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -230,10 +230,15 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root, * Bus/Device/Function number. The extra data referred by flag * indicates which type of address should be used. */ +struct eeh_pe_get_flag { + int pe_no; + int config_addr; +}; + static void *__eeh_pe_get(void *data, void *flag) { struct eeh_pe *pe = (struct eeh_pe *)data; - struct eeh_dev *edev = (struct eeh_dev *)flag; + struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; /* Unexpected PHB PE */ if (pe->type & EEH_PE_PHB) @@ -244,17 +249,17 @@ static void *__eeh_pe_get(void *data, void *flag) * have non-zero PE address */ if (eeh_has_flag(EEH_VALID_PE_ZERO)) { - if (edev->pe_config_addr == pe->addr) + if (tmp->pe_no == pe->addr) return pe; } else { - if (edev->pe_config_addr && - (edev->pe_config_addr == pe->addr)) + if (tmp->pe_no && + (tmp->pe_no == pe->addr)) return pe; } /* Try BDF address */ - if (edev->config_addr && - (edev->config_addr == pe->config_addr)) + if (tmp->config_addr && + (tmp->config_addr == pe->config_addr)) return pe; return NULL; @@ -262,7 +267,9 @@ static void *__eeh_pe_get(void *data, void *flag) /** * eeh_pe_get - Search PE based on the given address - * @edev: EEH device + * @phb: PCI controller + * @pe_no: PE number + * @config_addr: Config address * * Search the corresponding PE based on the specified address which * is included in the eeh device. The function is used to check if @@ -271,12 +278,14 @@ static void *__eeh_pe_get(void *data, void *flag) * which is composed of PCI bus/device/function number, or unified * PE address. */ -struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, + int pe_no, int config_addr) { - struct eeh_pe *root = eeh_phb_pe_get(edev->phb); + struct eeh_pe *root = eeh_phb_pe_get(phb); + struct eeh_pe_get_flag tmp = { pe_no, config_addr }; struct eeh_pe *pe; - pe = eeh_pe_traverse(root, __eeh_pe_get, edev); + pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp); return pe; } @@ -330,11 +339,13 @@ static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) int eeh_add_to_parent_pe(struct eeh_dev *edev) { struct eeh_pe *pe, *parent; + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + int config_addr = (pdn->busno << 8) | (pdn->devfn); /* Check if the PE number is valid */ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n", - __func__, edev->config_addr, edev->phb->global_number); + __func__, config_addr, pdn->phb->global_number); return -EINVAL; } @@ -344,7 +355,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) * PE should be composed of PCI bus and its subordinate * components. */ - pe = eeh_pe_get(edev); + pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr); if (pe && !(pe->type & EEH_PE_INVALID)) { /* Mark the PE as type of PCI bus */ pe->type = EEH_PE_BUS; @@ -353,11 +364,11 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) /* Put the edev to PE */ list_add_tail(&edev->list, &pe->edevs); pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", - edev->phb->global_number, - edev->config_addr >> 8, - PCI_SLOT(edev->config_addr & 0xFF), - PCI_FUNC(edev->config_addr & 0xFF), - pe->addr); + pdn->phb->global_number, + pdn->busno, + PCI_SLOT(pdn->devfn), + PCI_FUNC(pdn->devfn), + pe->addr); return 0; } else if (pe && (pe->type & EEH_PE_INVALID)) { list_add_tail(&edev->list, &pe->edevs); @@ -376,25 +387,25 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " "PE#%x, Parent PE#%x\n", - edev->phb->global_number, - edev->config_addr >> 8, - PCI_SLOT(edev->config_addr & 0xFF), - PCI_FUNC(edev->config_addr & 0xFF), - pe->addr, pe->parent->addr); + pdn->phb->global_number, + pdn->busno, + PCI_SLOT(pdn->devfn), + PCI_FUNC(pdn->devfn), + pe->addr, pe->parent->addr); return 0; } /* Create a new EEH PE */ if (edev->physfn) - pe = eeh_pe_alloc(edev->phb, EEH_PE_VF); + pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF); else - pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); + pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE); if (!pe) { pr_err("%s: out of memory!\n", __func__); return -ENOMEM; } pe->addr = edev->pe_config_addr; - pe->config_addr = edev->config_addr; + pe->config_addr = config_addr; /* * Put the new EEH PE into hierarchy tree. If the parent @@ -404,10 +415,10 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) */ parent = eeh_pe_get_parent(edev); if (!parent) { - parent = eeh_phb_pe_get(edev->phb); + parent = eeh_phb_pe_get(pdn->phb); if (!parent) { pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", - __func__, edev->phb->global_number); + __func__, pdn->phb->global_number); edev->pe = NULL; kfree(pe); return -EEXIST; @@ -424,10 +435,10 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) edev->pe = pe; pr_debug("EEH: Add %04x:%02x:%02x.%01x to " "Device PE#%x, Parent PE#%x\n", - edev->phb->global_number, - edev->config_addr >> 8, - PCI_SLOT(edev->config_addr & 0xFF), - PCI_FUNC(edev->config_addr & 0xFF), + pdn->phb->global_number, + pdn->busno, + PCI_SLOT(pdn->devfn), + PCI_FUNC(pdn->devfn), pe->addr, pe->parent->addr); return 0; @@ -446,13 +457,14 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) { struct eeh_pe *pe, *parent, *child; int cnt; + struct pci_dn *pdn = eeh_dev_to_pdn(edev); if (!edev->pe) { pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", - __func__, edev->phb->global_number, - edev->config_addr >> 8, - PCI_SLOT(edev->config_addr & 0xFF), - PCI_FUNC(edev->config_addr & 0xFF)); + __func__, pdn->phb->global_number, + pdn->busno, + PCI_SLOT(pdn->devfn), + PCI_FUNC(pdn->devfn)); return -EEXIST; } @@ -712,10 +724,10 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) return; pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n", - __func__, edev->phb->global_number, - edev->config_addr >> 8, - PCI_SLOT(edev->config_addr & 0xFF), - PCI_FUNC(edev->config_addr & 0xFF)); + __func__, pdn->phb->global_number, + pdn->busno, + PCI_SLOT(pdn->devfn), + PCI_FUNC(pdn->devfn)); /* Check slot status */ cap = edev->pcie_cap; diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index 1ceecdda810b..797549289798 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -51,7 +51,6 @@ static ssize_t eeh_show_##_name(struct device *dev, \ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); -EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); static ssize_t eeh_pe_state_show(struct device *dev, @@ -103,7 +102,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev) return; rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); - rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state); @@ -128,7 +126,6 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev) } device_remove_file(&pdev->dev, &dev_attr_eeh_mode); - device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 8587059ad848..e780e1fbf6c2 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -43,6 +43,13 @@ #define LOAD_MSR_KERNEL(r, x) li r,(x) #endif +/* + * Align to 4k in order to ensure that all functions modyfing srr0/srr1 + * fit into one page in order to not encounter a TLB miss between the + * modification of srr0/srr1 and the associated rfi. + */ + .align 12 + #ifdef CONFIG_BOOKE .globl mcheck_transfer_to_handler mcheck_transfer_to_handler: @@ -586,6 +593,10 @@ ppc_swapcontext: handle_page_fault: stw r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_6xx + andis. r0,r5,DSISR_DABRMATCH@h + bne- handle_dabr_fault +#endif bl do_page_fault cmpwi r3,0 beq+ ret_from_except @@ -599,6 +610,17 @@ handle_page_fault: bl bad_page_fault b ret_from_except_full +#ifdef CONFIG_6xx + /* We have a data breakpoint exception - handle it */ +handle_dabr_fault: + SAVE_NVGPRS(r1) + lwz r0,_TRAP(r1) + clrrwi r0,r0,1 + stw r0,_TRAP(r1) + bl do_break + b ret_from_except_full +#endif + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index e925c1c99c71..4a0fd4f40245 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -966,16 +966,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) #ifdef CONFIG_PPC_BOOK3E cmpwi cr0,r3,0x280 #else - BEGIN_FTR_SECTION - cmpwi cr0,r3,0xe80 - FTR_SECTION_ELSE - cmpwi cr0,r3,0xa00 - ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) + cmpwi cr0,r3,0xa00 #endif /* CONFIG_PPC_BOOK3E */ bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; bl doorbell_exception - b ret_from_except #endif /* CONFIG_PPC_DOORBELL */ 1: b ret_from_except /* What else to do here ? */ @@ -1109,7 +1104,7 @@ _ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(rtas_return_loc) .align 3 -1: .llong rtas_restore_regs +1: .8byte rtas_restore_regs rtas_restore_regs: /* relocation is on at this point */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f14f3c04ec7e..b82586c53560 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -541,7 +541,7 @@ EXC_COMMON_BEGIN(instruction_access_common) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,_NIP(r1) - andis. r4,r12,0x5820 + andis. r4,r12,DSISR_BAD_FAULT_64S@h li r5,0x400 std r3,_DAR(r1) std r4,_DSISR(r1) @@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100) EXC_VIRT(program_check, 0x4700, 0x100, 0x700) TRAMP_KVM(PACA_EXGEN, 0x700) EXC_COMMON_BEGIN(program_check_common) - EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) + /* + * It's possible to receive a TM Bad Thing type program check with + * userspace register values (in particular r1), but with SRR1 reporting + * that we came from the kernel. Normally that would confuse the bad + * stack logic, and we would report a bad kernel stack pointer. Instead + * we switch to the emergency stack if we're taking a TM Bad Thing from + * the kernel. + */ + li r10,MSR_PR /* Build a mask of MSR_PR .. */ + oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */ + and r10,r10,r12 /* Mask SRR1 with that. */ + srdi r10,r10,8 /* Shift it so we can compare */ + cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */ + bne 1f /* If != go to normal path. */ + + /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */ + andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */ + /* 3 in EXCEPTION_PROLOG_COMMON */ + mr r10,r1 /* Save r1 */ + ld r1,PACAEMERGSP(r13) /* Use emergency stack */ + subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ + b 3f /* Jump into the macro !! */ +1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD @@ -1314,7 +1336,7 @@ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) #endif -#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH) +#ifdef CONFIG_PPC_WATCHDOG #define MASKED_DEC_HANDLER_LABEL 3f @@ -1343,10 +1365,10 @@ EXC_COMMON_BEGIN(soft_nmi_common) ADD_NVGPRS;ADD_RECONCILE) b ret_from_except -#else +#else /* CONFIG_PPC_WATCHDOG */ #define MASKED_DEC_HANDLER_LABEL 2f /* normal return */ #define MASKED_DEC_HANDLER(_H) -#endif +#endif /* CONFIG_PPC_WATCHDOG */ /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: @@ -1370,19 +1392,16 @@ masked_##_H##interrupt: \ ori r10,r10,0xffff; \ mtspr SPRN_DEC,r10; \ b MASKED_DEC_HANDLER_LABEL; \ -1: cmpwi r10,PACA_IRQ_DBELL; \ - beq 2f; \ - cmpwi r10,PACA_IRQ_HMI; \ - beq 2f; \ +1: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \ + bne 2f; \ mfspr r10,SPRN_##_H##SRR1; \ - rldicl r10,r10,48,1; /* clear MSR_EE */ \ - rotldi r10,r10,16; \ + xori r10,r10,MSR_EE; /* clear MSR_EE */ \ mtspr SPRN_##_H##SRR1,r10; \ 2: mtcrf 0x80,r9; \ ld r9,PACA_EXGEN+EX_R9(r13); \ ld r10,PACA_EXGEN+EX_R10(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \ - GET_SCRATCH0(r13); \ + /* returns to kernel where r13 must be set up, so don't restore it */ \ ##_H##rfid; \ b .; \ MASKED_DEC_HANDLER(_H) @@ -1485,8 +1504,10 @@ USE_TEXT_SECTION() */ .balign IFETCH_ALIGN_BYTES do_hash_page: -#ifdef CONFIG_PPC_STD_MMU_64 - andis. r0,r4,0xa450 /* weird error? */ + #ifdef CONFIG_PPC_STD_MMU_64 + lis r0,DSISR_BAD_FAULT_64S@h + ori r0,r0,DSISR_BAD_FAULT_64S@l + and. r0,r4,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ CURRENT_THREAD_INFO(r11, r1) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ @@ -1669,25 +1690,27 @@ _GLOBAL(__replay_interrupt) * we don't give a damn about, so we don't bother storing them. */ mfmsr r12 - LOAD_REG_ADDR(r11, 1f) + LOAD_REG_ADDR(r11, replay_interrupt_return) mfcr r9 ori r12,r12,MSR_EE cmpwi r3,0x900 beq decrementer_common cmpwi r3,0x500 - beq hardware_interrupt_common BEGIN_FTR_SECTION - cmpwi r3,0xe80 - beq h_doorbell_common_msgclr - cmpwi r3,0xea0 beq h_virt_irq_common +FTR_SECTION_ELSE + beq hardware_interrupt_common +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300) +BEGIN_FTR_SECTION + cmpwi r3,0xa00 + beq h_doorbell_common_msgclr cmpwi r3,0xe60 beq hmi_exception_common FTR_SECTION_ELSE cmpwi r3,0xa00 beq doorbell_super_common_msgclr ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) -1: +replay_interrupt_return: blr _ASM_NOKPROBE_SYMBOL(__replay_interrupt) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index dc0c49cfd90a..e1431800bfb9 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -125,6 +125,13 @@ int is_fadump_boot_memory_area(u64 addr, ulong size) return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size; } +int should_fadump_crash(void) +{ + if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) + return 0; + return 1; +} + int is_fadump_active(void) { return fw_dump.dump_active; @@ -518,7 +525,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) struct fadump_crash_info_header *fdh = NULL; int old_cpu, this_cpu; - if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) + if (!should_fadump_crash()) return; /* @@ -1446,6 +1453,25 @@ static void fadump_init_files(void) return; } +static int fadump_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + /* + * If firmware-assisted dump has been registered then trigger + * firmware-assisted dump and let firmware handle everything + * else. If this returns, then fadump was not registered, so + * go through the rest of the panic path. + */ + crash_fadump(NULL, ptr); + + return NOTIFY_DONE; +} + +static struct notifier_block fadump_panic_block = { + .notifier_call = fadump_panic_event, + .priority = INT_MIN /* may not return; must be done last */ +}; + /* * Prepare for firmware-assisted dump. */ @@ -1478,6 +1504,9 @@ int __init setup_fadump(void) init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); fadump_init_files(); + atomic_notifier_chain_register(&panic_notifier_list, + &fadump_panic_block); + return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index e22734278458..8c54166491e7 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -388,7 +388,7 @@ DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR stw r10,_DSISR(r11) - andis. r0,r10,0xa470 /* weird error? */ + andis. r0,r10,DSISR_BAD_FAULT_32S@h bne 1f /* if not, try to put a PTE */ mfspr r4,SPRN_DAR /* into the hash table */ rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ @@ -403,13 +403,13 @@ DataAccess: DO_KVM 0x400 InstructionAccess: EXCEPTION_PROLOG - andis. r0,r9,0x4000 /* no pte found? */ + andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */ beq 1f /* if so, try to put a PTE */ li r3,0 /* into the hash table */ mr r4,r12 /* SRR0 is fault address */ bl hash_page 1: mr r4,r12 - mr r5,r9 + andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ EXC_XFER_LITE(0x400, handle_page_fault) /* External interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 0ddc602b33a4..ff8511d6d8ea 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -92,13 +92,13 @@ END_FTR_SECTION(0, 1) .balign 8 .globl __secondary_hold_spinloop __secondary_hold_spinloop: - .llong 0x0 + .8byte 0x0 /* Secondary processors write this value with their cpu # */ /* after they enter the spin loop immediately below. */ .globl __secondary_hold_acknowledge __secondary_hold_acknowledge: - .llong 0x0 + .8byte 0x0 #ifdef CONFIG_RELOCATABLE /* This flag is set to 1 by a loader if the kernel should run @@ -650,7 +650,7 @@ __after_prom_start: bctr .balign 8 -p_end: .llong _end - copy_to_here +p_end: .8byte _end - copy_to_here 4: /* @@ -892,7 +892,7 @@ _GLOBAL(relative_toc) blr .balign 8 -p_toc: .llong __toc_start + 0x8000 - 0b +p_toc: .8byte __toc_start + 0x8000 - 0b /* * This is where the main kernel code starts. diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index c032fe8c2d26..4fee00d414e8 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -50,18 +50,20 @@ mtspr spr, reg #endif -/* Macro to test if an address is a kernel address */ #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 -#define IS_KERNEL(tmp, addr) \ - andis. tmp, addr, 0x8000 /* Address >= 0x80000000 */ -#define BRANCH_UNLESS_KERNEL(label) beq label -#else -#define IS_KERNEL(tmp, addr) \ - rlwinm tmp, addr, 16, 16, 31; \ - cmpli cr0, tmp, PAGE_OFFSET >> 16 -#define BRANCH_UNLESS_KERNEL(label) blt label +/* By simply checking Address >= 0x80000000, we know if its a kernel address */ +#define SIMPLE_KERNEL_ADDRESS 1 #endif +/* + * We need an ITLB miss handler for kernel addresses if: + * - Either we have modules + * - Or we have not pinned the first 8M + */ +#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \ + defined(CONFIG_DEBUG_PAGEALLOC) +#define ITLB_MISS_KERNEL 1 +#endif /* * Value for the bits that have fixed value in RPN entries. @@ -123,7 +125,6 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - SYNC rfi /* enables MMU */ /* @@ -170,7 +171,7 @@ turn_on_mmu: stw r1,0(r11); \ tovirt(r1,r11); /* set new kernel sp */ \ li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ - MTMSRD(r10); /* (except for mach check in rtas) */ \ + mtmsr r10; \ stw r0,GPR0(r11); \ SAVE_4GPRS(3, r11); \ SAVE_2GPRS(7, r11) @@ -300,7 +301,7 @@ SystemCall: /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) + EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) . = 0x1100 /* @@ -325,7 +326,7 @@ SystemCall: #endif InstructionTLBMiss: -#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 @@ -343,15 +344,32 @@ InstructionTLBMiss: INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) /* Only modules will cause ITLB Misses as we always * pin the first 8MB of kernel memory */ -#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mfcr r3 #endif -#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) - IS_KERNEL(r11, r10) +#ifdef ITLB_MISS_KERNEL +#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) + andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ +#else + rlwinm r11, r10, 16, 0xfff8 + cmpli cr0, r11, PAGE_OFFSET@h +#ifndef CONFIG_PIN_TLB_TEXT + /* It is assumed that kernel code fits into the first 8M page */ +_ENTRY(ITLBMiss_cmp) + cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h +#endif +#endif #endif mfspr r11, SPRN_M_TW /* Get level 1 table */ -#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) - BRANCH_UNLESS_KERNEL(3f) +#ifdef ITLB_MISS_KERNEL +#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) + beq+ 3f +#else + blt+ 3f +#endif +#ifndef CONFIG_PIN_TLB_TEXT + blt cr7, ITLBMissLinear +#endif lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 3: #endif @@ -369,7 +387,7 @@ InstructionTLBMiss: rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ lwz r10, 0(r10) /* Get the pte */ 4: -#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtcr r3 #endif /* Insert the APG into the TWC from the Linux PTE. */ @@ -400,7 +418,7 @@ InstructionTLBMiss: MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ -#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mfspr r3, SPRN_SPRG_SCRATCH2 #endif EXCEPTION_EPILOG_0 @@ -447,23 +465,23 @@ DataStoreTLBMiss: * kernel page tables. */ mfspr r10, SPRN_MD_EPN - rlwinm r10, r10, 16, 0xfff8 - cmpli cr0, r10, PAGE_OFFSET@h + rlwinm r11, r10, 16, 0xfff8 + cmpli cr0, r11, PAGE_OFFSET@h mfspr r11, SPRN_M_TW /* Get level 1 table */ blt+ 3f + rlwinm r11, r10, 16, 0xfff8 #ifndef CONFIG_PIN_TLB_IMMR - cmpli cr0, r10, VIRT_IMMR_BASE@h + cmpli cr0, r11, VIRT_IMMR_BASE@h #endif _ENTRY(DTLBMiss_cmp) - cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h - lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha + cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h #ifndef CONFIG_PIN_TLB_IMMR _ENTRY(DTLBMiss_jmp) beq- DTLBMissIMMR #endif blt cr7, DTLBMissLinear + lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 3: - mfspr r10, SPRN_MD_EPN /* Insert level 1 index */ rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 @@ -569,8 +587,8 @@ _ENTRY(DTLBMiss_jmp) InstructionTLBError: EXCEPTION_PROLOG mr r4,r12 - mr r5,r9 - andis. r10,r5,0x4000 + andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ + andis. r10,r9,SRR1_ISI_NOPT@h beq+ 1f tlbie r4 itlbie: @@ -595,7 +613,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */ mfspr r5,SPRN_DSISR stw r5,_DSISR(r11) mfspr r4,SPRN_DAR - andis. r10,r5,0x4000 + andis. r10,r5,DSISR_NOHPTE@h beq+ 1f tlbie r4 dtlbie: @@ -684,7 +702,7 @@ DTLBMissLinear: /* Set 8M byte page and mark it valid */ li r11, MD_PS8MEG | MD_SVALID MTSPR_CPU6(SPRN_MD_TWC, r11, r3) - rlwinm r10, r10, 16, 0x0f800000 /* 8xx supports max 256Mb RAM */ + rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ _PAGE_PRESENT MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */ @@ -695,6 +713,22 @@ DTLBMissLinear: EXCEPTION_EPILOG_0 rfi +#ifndef CONFIG_PIN_TLB_TEXT +ITLBMissLinear: + mtcr r3 + /* Set 8M byte page and mark it valid */ + li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC + MTSPR_CPU6(SPRN_MI_TWC, r11, r3) + rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ + ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ + _PAGE_PRESENT + MTSPR_CPU6(SPRN_MI_RPN, r10, r11) /* Update TLB entry */ + + mfspr r3, SPRN_SPRG_SCRATCH2 + EXCEPTION_EPILOG_0 + rfi +#endif + /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. @@ -705,9 +739,10 @@ FixupDAR:/* Entry point for dcbx workaround. */ mtspr SPRN_SPRG_SCRATCH2, r10 /* fetch instruction from memory. */ mfspr r10, SPRN_SRR0 - IS_KERNEL(r11, r10) + rlwinm r11, r10, 16, 0xfff8 + cmpli cr0, r11, PAGE_OFFSET@h mfspr r11, SPRN_M_TW /* Get level 1 table */ - BRANCH_UNLESS_KERNEL(3f) + blt+ 3f rlwinm r11, r10, 16, 0xfff8 _ENTRY(FixupDAR_cmp) cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h @@ -915,10 +950,8 @@ start_here: rfi /* Load up the kernel context */ 2: - SYNC /* Force all PTE updates to finish */ tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ - TLBSYNC /* ... on all CPUs */ /* set up the PTE pointers for the Abatron bdiGDB. */ @@ -955,15 +988,14 @@ initial_mmu: mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */ tlbia /* Invalidate all TLB entries */ -/* Always pin the first 8 MB ITLB to prevent ITLB - misses while mucking around with SRR0/SRR1 in asm -*/ +#ifdef CONFIG_PIN_TLB_TEXT lis r8, MI_RSV4I@h ori r8, r8, 0x1c00 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ +#endif -#ifdef CONFIG_PIN_TLB +#ifdef CONFIG_PIN_TLB_DATA oris r10, r10, MD_RSV4I@h mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ #endif @@ -989,6 +1021,7 @@ initial_mmu: * internal registers (among other things). */ #ifdef CONFIG_PIN_TLB_IMMR + oris r10, r10, MD_RSV4I@h ori r10, r10, 0x1c00 mtspr SPRN_MD_CTR, r10 diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e6252c5a57a4..1125c9be9e06 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -85,7 +85,61 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) std r3,_WORT(r1) mfspr r3,SPRN_WORC std r3,_WORC(r1) +/* + * On POWER9, there are idle states such as stop4, invoked via cpuidle, + * that lose hypervisor resources. In such cases, we need to save + * additional SPRs before entering those idle states so that they can + * be restored to their older values on wakeup from the idle state. + * + * On POWER8, the only such deep idle state is winkle which is used + * only in the context of CPU-Hotplug, where these additional SPRs are + * reinitiazed to a sane value. Hence there is no need to save/restore + * these SPRs. + */ +BEGIN_FTR_SECTION + blr +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) +power9_save_additional_sprs: + mfspr r3, SPRN_PID + mfspr r4, SPRN_LDBAR + std r3, STOP_PID(r13) + std r4, STOP_LDBAR(r13) + + mfspr r3, SPRN_FSCR + mfspr r4, SPRN_HFSCR + std r3, STOP_FSCR(r13) + std r4, STOP_HFSCR(r13) + + mfspr r3, SPRN_MMCRA + mfspr r4, SPRN_MMCR1 + std r3, STOP_MMCRA(r13) + std r4, STOP_MMCR1(r13) + + mfspr r3, SPRN_MMCR2 + std r3, STOP_MMCR2(r13) + blr + +power9_restore_additional_sprs: + ld r3,_LPCR(r1) + ld r4, STOP_PID(r13) + mtspr SPRN_LPCR,r3 + mtspr SPRN_PID, r4 + + ld r3, STOP_LDBAR(r13) + ld r4, STOP_FSCR(r13) + mtspr SPRN_LDBAR, r3 + mtspr SPRN_FSCR, r4 + + ld r3, STOP_HFSCR(r13) + ld r4, STOP_MMCRA(r13) + mtspr SPRN_HFSCR, r3 + mtspr SPRN_MMCRA, r4 + /* We have already restored PACA_MMCR0 */ + ld r3, STOP_MMCR1(r13) + ld r4, STOP_MMCR2(r13) + mtspr SPRN_MMCR1, r3 + mtspr SPRN_MMCR2, r4 blr /* @@ -141,7 +195,16 @@ pnv_powersave_common: std r5,_CCR(r1) std r1,PACAR1(r13) +BEGIN_FTR_SECTION /* + * POWER9 does not require real mode to stop, and presently does not + * set hwthread_state for KVM (threads don't share MMU context), so + * we can remain in virtual mode for this. + */ + bctr +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + /* + * POWER8 * Go to real mode to do the nap, as required by the architecture. * Also, we need to be in real mode before setting hwthread_state, * because as soon as we do that, another thread can switch @@ -151,6 +214,20 @@ pnv_powersave_common: mtmsrd r7,0 bctr +/* + * This is the sequence required to execute idle instructions, as + * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. + */ +#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ + /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ + std r0,0(r1); \ + ptesync; \ + ld r0,0(r1); \ +236: cmpd cr0,r0,r0; \ + bne 236b; \ + IDLE_INST; + + .globl pnv_enter_arch207_idle_mode pnv_enter_arch207_idle_mode: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -242,20 +319,27 @@ enter_winkle: /* * r3 - PSSCR value corresponding to the requested stop state. */ -power_enter_stop: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - /* Tell KVM we're entering idle */ +power_enter_stop_kvm_rm: + /* + * This is currently unused because POWER9 KVM does not have to + * gather secondary threads into sibling mode, but the code is + * here in case that function is required. + * + * Tell KVM we're entering idle. + */ li r4,KVM_HWTHREAD_IN_IDLE /* DO THIS IN REAL MODE! See comment above. */ stb r4,HSTATE_HWTHREAD_STATE(r13) #endif +power_enter_stop: /* * Check if we are executing the lite variant with ESL=EC=0 */ andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ bne .Lhandle_esl_ec_set - IDLE_STATE_ENTER_SEQ(PPC_STOP) + PPC_STOP li r3,0 /* Since we didn't lose state, return 0 */ /* @@ -288,7 +372,8 @@ power_enter_stop: ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) cmpd r3,r4 bge .Lhandle_deep_stop - IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) + PPC_STOP /* Does not return (system reset interrupt) */ + .Lhandle_deep_stop: /* * Entering deep idle state. @@ -310,7 +395,7 @@ lwarx_loop_stop: bl save_sprs_to_stack - IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) + PPC_STOP /* Does not return (system reset interrupt) */ /* * Entered with MSR[EE]=0 and no soft-masked interrupts pending. @@ -411,6 +496,18 @@ pnv_powersave_wakeup_mce: b pnv_powersave_wakeup +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +kvm_start_guest_check: + li r0,KVM_HWTHREAD_IN_KERNEL + stb r0,HSTATE_HWTHREAD_STATE(r13) + /* Order setting hwthread_state vs. testing hwthread_req */ + sync + lbz r0,HSTATE_HWTHREAD_REQ(r13) + cmpwi r0,0 + beqlr + b kvm_start_guest +#endif + /* * Called from reset vector for powersave wakeups. * cr3 - set to gt if waking up with partial/complete hypervisor state loss @@ -435,15 +532,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) mr r3,r12 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - li r0,KVM_HWTHREAD_IN_KERNEL - stb r0,HSTATE_HWTHREAD_STATE(r13) - /* Order setting hwthread_state vs. testing hwthread_req */ - sync - lbz r0,HSTATE_HWTHREAD_REQ(r13) - cmpwi r0,0 - beq 1f - b kvm_start_guest -1: +BEGIN_FTR_SECTION + bl kvm_start_guest_check +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #endif /* Return SRR1 from power7_nap() */ @@ -809,9 +900,16 @@ no_segments: mtctr r12 bctrl +/* + * On POWER9, we can come here on wakeup from a cpuidle stop state. + * Hence restore the additional SPRs to the saved value. + * + * On POWER8, we come here only on winkle. Since winkle is used + * only in the case of CPU-Hotplug, we don't need to restore + * the additional SPRs. + */ BEGIN_FTR_SECTION - ld r4,_LPCR(r1) - mtspr SPRN_LPCR,r4 + bl power9_restore_additional_sprs END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) hypervisor_state_restored: diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index a582e0d42525..aa9f1b8261db 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -19,6 +19,8 @@ #include #include #include +#include + #define IOWA_MAX_BUS 8 @@ -75,8 +77,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) * We won't find huge pages here (iomem). Also can't hit * a page table free due to init_mm */ - ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, - NULL, &hugepage_shift); + ptep = find_init_mm_pte(vaddr, &hugepage_shift); if (ptep == NULL) paddr = 0; else { @@ -192,7 +193,7 @@ void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, if (iowa_bus_count >= IOWA_MAX_BUS) { pr_err("IOWA:Too many pci bridges, " - "workarounds disabled for %s\n", np->full_name); + "workarounds disabled for %pOF\n", np); return; } @@ -207,6 +208,6 @@ void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, iowa_bus_count++; - pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); + pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np); } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 233ca3fe4754..af7a20dc6e09 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -127,8 +127,7 @@ static ssize_t fail_iommu_store(struct device *dev, return count; } -static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show, - fail_iommu_store); +static DEVICE_ATTR_RW(fail_iommu); static int fail_iommu_bus_notify(struct notifier_block *nb, unsigned long action, void *data) @@ -190,7 +189,7 @@ static unsigned long iommu_range_alloc(struct device *dev, unsigned int pool_nr; struct iommu_pool *pool; - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = (1ull << align_order) - 1; /* This allocator was derived from x86_64's bit string search */ @@ -208,7 +207,7 @@ static unsigned long iommu_range_alloc(struct device *dev, * We don't need to disable preemption here because any CPU can * safely use any IOMMU pool. */ - pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); + pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); if (largealloc) pool = &(tbl->large_pool); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f291f7826abc..4e65bf82f5e0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -24,7 +24,7 @@ * mask register (of which only 16 are defined), hence the weird shifting * and complement of the cached_irq_mask. I want to be able to stuff * this right into the SIU SMASK register. - * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx + * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx * to reduce code space and undefined function references. */ @@ -143,9 +143,10 @@ notrace unsigned int __check_irq_replay(void) */ unsigned char happened = local_paca->irq_happened; - /* Clear bit 0 which we wouldn't clear otherwise */ - local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; if (happened & PACA_IRQ_HARD_DIS) { + /* Clear bit 0 which we wouldn't clear otherwise */ + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + /* * We may have missed a decrementer interrupt if hard disabled. * Check the decrementer register in case we had a rollover @@ -173,41 +174,39 @@ notrace unsigned int __check_irq_replay(void) * This is a higher priority interrupt than the others, so * replay it first. */ - local_paca->irq_happened &= ~PACA_IRQ_HMI; - if (happened & PACA_IRQ_HMI) + if (happened & PACA_IRQ_HMI) { + local_paca->irq_happened &= ~PACA_IRQ_HMI; return 0xe60; + } - /* - * We may have missed a decrementer interrupt. We check the - * decrementer itself rather than the paca irq_happened field - * in case we also had a rollover while hard disabled - */ - local_paca->irq_happened &= ~PACA_IRQ_DEC; - if (happened & PACA_IRQ_DEC) + if (happened & PACA_IRQ_DEC) { + local_paca->irq_happened &= ~PACA_IRQ_DEC; return 0x900; + } - /* Finally check if an external interrupt happened */ - local_paca->irq_happened &= ~PACA_IRQ_EE; - if (happened & PACA_IRQ_EE) + if (happened & PACA_IRQ_EE) { + local_paca->irq_happened &= ~PACA_IRQ_EE; return 0x500; + } #ifdef CONFIG_PPC_BOOK3E - /* Finally check if an EPR external interrupt happened - * this bit is typically set if we need to handle another - * "edge" interrupt from within the MPIC "EPR" handler + /* + * Check if an EPR external interrupt happened this bit is typically + * set if we need to handle another "edge" interrupt from within the + * MPIC "EPR" handler. */ - local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; - if (happened & PACA_IRQ_EE_EDGE) + if (happened & PACA_IRQ_EE_EDGE) { + local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; return 0x500; + } - local_paca->irq_happened &= ~PACA_IRQ_DBELL; - if (happened & PACA_IRQ_DBELL) - return 0x280; -#else - local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (happened & PACA_IRQ_DBELL) { - if (cpu_has_feature(CPU_FTR_HVMODE)) - return 0xe80; + local_paca->irq_happened &= ~PACA_IRQ_DBELL; + return 0x280; + } +#else + if (happened & PACA_IRQ_DBELL) { + local_paca->irq_happened &= ~PACA_IRQ_DBELL; return 0xa00; } #endif /* CONFIG_PPC_BOOK3E */ @@ -483,6 +482,18 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, " Hypervisor Maintenance Interrupts\n"); } + seq_printf(p, "%*s: ", prec, "NMI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); + seq_printf(p, " System Reset interrupts\n"); + +#ifdef CONFIG_PPC_WATCHDOG + seq_printf(p, "%*s: ", prec, "WDG"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); + seq_printf(p, " Watchdog soft-NMI interrupts\n"); +#endif + #ifdef CONFIG_PPC_DOORBELL if (cpu_has_feature(CPU_FTR_DBELL)) { seq_printf(p, "%*s: ", prec, "DBL"); @@ -507,6 +518,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += per_cpu(irq_stat, cpu).spurious_irqs; sum += per_cpu(irq_stat, cpu).timer_irqs_others; sum += per_cpu(irq_stat, cpu).hmi_exceptions; + sum += per_cpu(irq_stat, cpu).sreset_irqs; +#ifdef CONFIG_PPC_WATCHDOG + sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; +#endif #ifdef CONFIG_PPC_DOORBELL sum += per_cpu(irq_stat, cpu).doorbell_irqs; #endif diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index bb6f8993412e..1df6c74aa731 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -164,7 +164,7 @@ void __init isa_bridge_find_early(struct pci_controller *hose) /* Set the global ISA io base to indicate we have an ISA bridge */ isa_io_base = ISA_IO_BASE; - pr_debug("ISA bridge (early) is %s\n", np->full_name); + pr_debug("ISA bridge (early) is %pOF\n", np); } /** @@ -187,15 +187,15 @@ void __init isa_bridge_init_non_pci(struct device_node *np) pna = of_n_addr_cells(np); if (of_property_read_u32(np, "#address-cells", &na) || of_property_read_u32(np, "#size-cells", &ns)) { - pr_warn("ISA: Non-PCI bridge %s is missing address format\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF is missing address format\n", + np); return; } /* Check it's a supported address format */ if (na != 2 || ns != 1) { - pr_warn("ISA: Non-PCI bridge %s has unsupported address format\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF has unsupported address format\n", + np); return; } rs = na + ns + pna; @@ -203,8 +203,8 @@ void __init isa_bridge_init_non_pci(struct device_node *np) /* Grab the ranges property */ ranges = of_get_property(np, "ranges", &rlen); if (ranges == NULL || rlen < rs) { - pr_warn("ISA: Non-PCI bridge %s has absent or invalid ranges\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF has absent or invalid ranges\n", + np); return; } @@ -220,8 +220,8 @@ void __init isa_bridge_init_non_pci(struct device_node *np) /* Got something ? */ if (!size || !pbasep) { - pr_warn("ISA: Non-PCI bridge %s has no usable IO range\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF has no usable IO range\n", + np); return; } @@ -233,15 +233,15 @@ void __init isa_bridge_init_non_pci(struct device_node *np) /* Map pbase */ pbase = of_translate_address(np, pbasep); if (pbase == OF_BAD_ADDR) { - pr_warn("ISA: Non-PCI bridge %s failed to translate IO base\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF failed to translate IO base\n", + np); return; } /* We need page alignment */ if ((cbase & ~PAGE_MASK) || (pbase & ~PAGE_MASK)) { - pr_warn("ISA: Non-PCI bridge %s has non aligned IO range\n", - np->full_name); + pr_warn("ISA: Non-PCI bridge %pOF has non aligned IO range\n", + np); return; } @@ -255,7 +255,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np) __ioremap_at(pbase, (void *)ISA_IO_BASE, size, pgprot_val(pgprot_noncached(__pgprot(0)))); - pr_debug("ISA: Non-PCI bridge is %s\n", np->full_name); + pr_debug("ISA: Non-PCI bridge is %pOF\n", np); } /** @@ -277,8 +277,8 @@ static void isa_bridge_find_late(struct pci_dev *pdev, /* Set the global ISA io base to indicate we have an ISA bridge */ isa_io_base = ISA_IO_BASE; - pr_debug("ISA bridge (late) is %s on %s\n", - devnode->full_name, pci_name(pdev)); + pr_debug("ISA bridge (late) is %pOF on %s\n", + devnode, pci_name(pdev)); } /** diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index dbf098121ce6..35e240a0a408 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -67,9 +67,9 @@ static struct hard_trap_info #endif #else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */ { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */ -#if defined(CONFIG_8xx) +#if defined(CONFIG_PPC_8xx) { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */ -#else /* ! CONFIG_8xx */ +#else /* ! CONFIG_PPC_8xx */ { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */ { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */ { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */ diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 1086ea37c832..9ad37f827a97 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -25,7 +25,6 @@ #include #include #include -#include /* hardlockup_detector_disable() */ #include #include @@ -719,12 +718,6 @@ static __init void kvm_free_tmp(void) static int __init kvm_guest_init(void) { - /* - * The hardlockup detector is likely to get false positives in - * KVM guests, so disable it by default. - */ - hardlockup_detector_disable(); - if (!kvm_para_available()) goto free_tmp; diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 97ec8557f974..6408f09dbbd9 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -181,7 +181,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) mtctr r4 li r4,0 1: - lwzx r0,r0,r4 + lwzx r0,0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b isync @@ -328,7 +328,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) mtctr r4 li r4,0 1: - lwzx r0,r0,r4 + lwzx r0,0,r4 dcbf 0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 0694d20f85b6..5e5a64a8b4e4 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -147,8 +147,8 @@ static int __init add_legacy_port(struct device_node *np, int want_index, legacy_serial_ports[index].serial_out = tsi_serial_out; } - printk(KERN_DEBUG "Found legacy serial port %d for %s\n", - index, np->full_name); + printk(KERN_DEBUG "Found legacy serial port %d for %pOF\n", + index, np); printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", (iotype == UPIO_PORT) ? "port" : "mem", (unsigned long long)base, (unsigned long long)taddr, irq, @@ -207,7 +207,7 @@ static int __init add_legacy_isa_port(struct device_node *np, int index = -1; u64 taddr; - DBG(" -> add_legacy_isa_port(%s)\n", np->full_name); + DBG(" -> add_legacy_isa_port(%pOF)\n", np); /* Get the ISA port number */ reg = of_get_property(np, "reg", NULL); @@ -256,7 +256,7 @@ static int __init add_legacy_pci_port(struct device_node *np, unsigned int flags; int iotype, index = -1, lindex = 0; - DBG(" -> add_legacy_pci_port(%s)\n", np->full_name); + DBG(" -> add_legacy_pci_port(%pOF)\n", np); /* We only support ports that have a clock frequency properly * encoded in the device-tree (that is have an fcode). Anything @@ -374,7 +374,7 @@ void __init find_legacy_serial_ports(void) if (path != NULL) { stdout = of_find_node_by_path(path); if (stdout) - DBG("stdout is %s\n", stdout->full_name); + DBG("stdout is %pOF\n", stdout); } else { DBG(" no linux,stdout-path !\n"); } @@ -603,7 +603,7 @@ static int __init check_legacy_serial_console(void) DBG(" can't find stdout package %s !\n", name); return -ENODEV; } - DBG("stdout is %s\n", prom_stdout->full_name); + DBG("stdout is %pOF\n", prom_stdout); name = of_get_property(prom_stdout, "name", NULL); if (!name) { diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index e0e131e662ed..9b2ea7e71c06 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -22,11 +22,14 @@ #undef DEBUG #define pr_fmt(fmt) "mce: " fmt +#include #include #include #include #include #include + +#include #include static DEFINE_PER_CPU(int, mce_nest_count); @@ -446,3 +449,33 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt) return 0; } EXPORT_SYMBOL(get_mce_fault_addr); + +/* + * This function is called in real mode. Strictly no printk's please. + * + * regs->nip and regs->msr contains srr0 and ssr1. + */ +long machine_check_early(struct pt_regs *regs) +{ + long handled = 0; + + __this_cpu_inc(irq_stat.mce_exceptions); + + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) + handled = cur_cpu_spec->machine_check_early(regs); + return handled; +} + +long hmi_exception_realmode(struct pt_regs *regs) +{ + __this_cpu_inc(irq_stat.hmi_exceptions); + + wait_for_subcore_guest_exit(); + + if (ppc_md.hmi_exception_early) + ppc_md.hmi_exception_early(regs); + + wait_for_tb_resync(); + + return 0; +} diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index b76ca198e09c..72f153c6f3fa 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) long __machine_check_early_realmode_p9(struct pt_regs *regs) { + /* + * On POWER9 DD2.1 and below, it's possible to get a machine check + * caused by a paste instruction where only DSISR bit 25 is set. This + * will result in the MCE handler seeing an unknown event and the kernel + * crashing. An MCE that occurs like this is spurious, so we don't need + * to do anything in terms of servicing it. If there is something that + * needs to be serviced, the CPU will raise the MCE again with the + * correct DSISR so that it can be serviced properly. So detect this + * case and mark it as handled. + */ + if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000) + return 1; + return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); } diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 34aeac54f120..becaec990140 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -45,7 +45,7 @@ static int of_pci_phb_probe(struct platform_device *dev) if (ppc_md.pci_setup_phb == NULL) return -ENODEV; - pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name); + pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node); /* Alloc and setup PHB data structure */ phb = pcibios_alloc_controller(dev->dev.of_node); diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 6f8273f5e988..91e037ab20a1 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -104,8 +104,10 @@ static unsigned long can_optimize(struct kprobe *p) * and that can be emulated. */ if (!is_conditional_branch(*p->ainsn.insn) && - analyse_instr(&op, ®s, *p->ainsn.insn)) + analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { + emulate_update_regs(®s, &op); nip = regs.nip; + } return nip; } diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 4937bef7652f..52fc864cdec4 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -60,10 +60,6 @@ optprobe_template_entry: std r5,_CCR(r1) lbz r5,PACASOFTIRQEN(r13) std r5,SOFTE(r1) - mfdar r5 - std r5,_DAR(r1) - mfdsisr r5 - std r5,_DSISR(r1) /* * We may get here from a module, so load the kernel TOC in r2. @@ -122,10 +118,6 @@ optprobe_template_call_emulate: mtxer r5 ld r5,_CCR(r1) mtcr r5 - ld r5,_DAR(r1) - mtdar r5 - ld r5,_DSISR(r1) - mtdsisr r5 REST_GPR(0,r1) REST_10GPRS(2,r1) REST_10GPRS(12,r1) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 8d63627e067f..2ff2b8a19f71 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -99,18 +99,27 @@ static inline void free_lppacas(void) { } * If you make the number of persistent SLB entries dynamic, please also * update PR KVM to flush and restore them accordingly. */ -static struct slb_shadow *slb_shadow; +static struct slb_shadow * __initdata slb_shadow; static void __init allocate_slb_shadows(int nr_cpus, int limit) { int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus); + + if (early_radix_enabled()) + return; + slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit)); memset(slb_shadow, 0, size); } static struct slb_shadow * __init init_slb_shadow(int cpu) { - struct slb_shadow *s = &slb_shadow[cpu]; + struct slb_shadow *s; + + if (early_radix_enabled()) + return NULL; + + s = &slb_shadow[cpu]; /* * When we come through here to initialise boot_paca, the slb_shadow @@ -215,7 +224,7 @@ void __init allocate_pacas(void) paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); - printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", + printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", paca_size, nr_cpu_ids, paca); allocate_lppacas(nr_cpu_ids, limit); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 341a7469cab8..02831a396419 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -373,9 +373,8 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) if (virq) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { - pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.args_count, oirq.args[0], oirq.args[1], - of_node_full_name(oirq.np)); + pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %pOF\n", + oirq.args_count, oirq.args[0], oirq.args[1], oirq.np); virq = irq_create_of_mapping(&oirq); } @@ -741,8 +740,8 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct of_pci_range range; struct of_pci_range_parser parser; - printk(KERN_INFO "PCI host bridge %s %s ranges:\n", - dev->full_name, primary ? "(primary)" : ""); + printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n", + dev, primary ? "(primary)" : ""); /* Check for ranges property */ if (of_pci_range_parser_init(&parser, dev)) @@ -1556,8 +1555,8 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, if (!res->flags) { pr_debug("PCI: I/O resource not set for host" - " bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + " bridge %pOF (domain %d)\n", + hose->dn, hose->global_number); } else { offset = pcibios_io_space_offset(hose); @@ -1668,7 +1667,7 @@ void pcibios_scan_phb(struct pci_controller *hose) struct device_node *node = hose->dn; int mode; - pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); + pr_debug("PCI: Scanning PHB %pOF\n", node); /* Get some IO space for the new PHB */ pcibios_setup_phb_io_space(hose); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 41c86c6b6e4d..1d817f4d97d9 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -79,8 +79,8 @@ make_one_node_map(struct device_node* node, u8 pci_bus) return; bus_range = of_get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, " - "assuming it starts at 0\n", node->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, " + "assuming it starts at 0\n", node); pci_to_OF_bus_map[pci_bus] = 0; } else pci_to_OF_bus_map[pci_bus] = bus_range[0]; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ed5e9ff61a68..932b9741aa8f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -111,7 +111,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus) if (hose->io_base_alloc == NULL) return 0; - pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); + pr_debug("IO unmapping for PHB %pOF\n", hose->dn); pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ @@ -151,7 +151,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); - pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); + pr_debug("IO mapping for PHB %pOF\n", hose->dn); pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); pr_debug(" size=0x%016llx (alloc=0x%016lx)\n", diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 592693437070..0e395afbf0f4 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -139,7 +139,6 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev) #ifdef CONFIG_PCI_IOV static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent, - struct pci_dev *pdev, int vf_index, int busno, int devfn) { @@ -150,10 +149,8 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent, return NULL; pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); - if (!pdn) { - dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__); + if (!pdn) return NULL; - } pdn->phb = parent->phb; pdn->parent = parent; @@ -167,13 +164,6 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent, INIT_LIST_HEAD(&pdn->list); list_add_tail(&pdn->list, &parent->child_list); - /* - * If we already have PCI device instance, lets - * bind them. - */ - if (pdev) - pdev->dev.archdata.pci_data = pdn; - return pdn; } #endif @@ -201,7 +191,7 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev) for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) { struct eeh_dev *edev __maybe_unused; - pdn = add_one_dev_pci_data(parent, NULL, i, + pdn = add_one_dev_pci_data(parent, i, pci_iov_virtfn_bus(pdev, i), pci_iov_virtfn_devfn(pdev, i)); if (!pdn) { @@ -303,7 +293,6 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose, if (pdn == NULL) return NULL; dn->data = pdn; - pdn->node = dn; pdn->phb = hose; #ifdef CONFIG_PPC_POWERNV pdn->pe_number = IODA_INVALID_PE; @@ -352,6 +341,7 @@ EXPORT_SYMBOL_GPL(pci_add_device_node_info); void pci_remove_device_node_info(struct device_node *dn) { struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL; + struct device_node *parent; #ifdef CONFIG_EEH struct eeh_dev *edev = pdn_to_eeh_dev(pdn); @@ -364,8 +354,10 @@ void pci_remove_device_node_info(struct device_node *dn) WARN_ON(!list_empty(&pdn->child_list)); list_del(&pdn->list); - if (pdn->parent) - of_node_put(pdn->parent->node); + + parent = of_get_parent(dn); + if (parent) + of_node_put(parent); dn->data = NULL; kfree(pdn); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index ea3d98115b88..0d790f8432d2 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -211,19 +211,19 @@ void of_scan_pci_bridge(struct pci_dev *dev) unsigned int flags; u64 size; - pr_debug("of_scan_pci_bridge(%s)\n", node->full_name); + pr_debug("of_scan_pci_bridge(%pOF)\n", node); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { - printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", - node->full_name); + printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %pOF\n", + node); return; } ranges = of_get_property(node, "ranges", &len); if (ranges == NULL) { - printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", - node->full_name); + printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %pOF\n", + node); return; } @@ -233,8 +233,8 @@ void of_scan_pci_bridge(struct pci_dev *dev) bus = pci_add_new_bus(dev->bus, dev, of_read_number(busrange, 1)); if (!bus) { - printk(KERN_ERR "Failed to create pci bus for %s\n", - node->full_name); + printk(KERN_ERR "Failed to create pci bus for %pOF\n", + node); return; } } @@ -262,13 +262,13 @@ void of_scan_pci_bridge(struct pci_dev *dev) res = bus->resource[0]; if (res->flags) { printk(KERN_ERR "PCI: ignoring extra I/O range" - " for bridge %s\n", node->full_name); + " for bridge %pOF\n", node); continue; } } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { printk(KERN_ERR "PCI: too many memory ranges" - " for bridge %s\n", node->full_name); + " for bridge %pOF\n", node); continue; } res = bus->resource[i]; @@ -307,7 +307,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn)); #endif - pr_debug(" * %s\n", dn->full_name); + pr_debug(" * %pOF\n", dn); if (!of_device_is_available(dn)) return NULL; @@ -350,8 +350,8 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus, struct device_node *child; struct pci_dev *dev; - pr_debug("of_scan_bus(%s) bus no %d...\n", - node->full_name, bus->number); + pr_debug("of_scan_bus(%pOF) bus no %d...\n", + node, bus->number); /* Scan direct children */ for_each_child_of_node(node, child) { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1f0fd361e09b..a0c74bbf3454 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -230,7 +230,8 @@ void enable_kernel_fp(void) } EXPORT_SYMBOL(enable_kernel_fp); -static int restore_fp(struct task_struct *tsk) { +static int restore_fp(struct task_struct *tsk) +{ if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) { load_fp_state(¤t->thread.fp_state); current->thread.load_fp++; @@ -330,11 +331,19 @@ static inline int restore_altivec(struct task_struct *tsk) { return 0; } #ifdef CONFIG_VSX static void __giveup_vsx(struct task_struct *tsk) { - if (tsk->thread.regs->msr & MSR_FP) + unsigned long msr = tsk->thread.regs->msr; + + /* + * We should never be ssetting MSR_VSX without also setting + * MSR_FP and MSR_VEC + */ + WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC))); + + /* __giveup_fpu will clear MSR_VSX */ + if (msr & MSR_FP) __giveup_fpu(tsk); - if (tsk->thread.regs->msr & MSR_VEC) + if (msr & MSR_VEC) __giveup_altivec(tsk); - tsk->thread.regs->msr &= ~MSR_VSX; } static void giveup_vsx(struct task_struct *tsk) @@ -346,14 +355,6 @@ static void giveup_vsx(struct task_struct *tsk) msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); } -static void save_vsx(struct task_struct *tsk) -{ - if (tsk->thread.regs->msr & MSR_FP) - save_fpu(tsk); - if (tsk->thread.regs->msr & MSR_VEC) - save_altivec(tsk); -} - void enable_kernel_vsx(void) { unsigned long cpumsr; @@ -374,10 +375,6 @@ void enable_kernel_vsx(void) */ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) return; - if (current->thread.regs->msr & MSR_FP) - __giveup_fpu(current); - if (current->thread.regs->msr & MSR_VEC) - __giveup_altivec(current); __giveup_vsx(current); } } @@ -407,7 +404,6 @@ static int restore_vsx(struct task_struct *tsk) } #else static inline int restore_vsx(struct task_struct *tsk) { return 0; } -static inline void save_vsx(struct task_struct *tsk) { } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE @@ -487,6 +483,8 @@ void giveup_all(struct task_struct *tsk) msr_check_and_set(msr_all_available); check_if_tm_restore_required(tsk); + WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); + #ifdef CONFIG_PPC_FPU if (usermsr & MSR_FP) __giveup_fpu(tsk); @@ -495,10 +493,6 @@ void giveup_all(struct task_struct *tsk) if (usermsr & MSR_VEC) __giveup_altivec(tsk); #endif -#ifdef CONFIG_VSX - if (usermsr & MSR_VSX) - __giveup_vsx(tsk); -#endif #ifdef CONFIG_SPE if (usermsr & MSR_SPE) __giveup_spe(tsk); @@ -553,19 +547,13 @@ void save_all(struct task_struct *tsk) msr_check_and_set(msr_all_available); - /* - * Saving the way the register space is in hardware, save_vsx boils - * down to a save_fpu() and save_altivec() - */ - if (usermsr & MSR_VSX) { - save_vsx(tsk); - } else { - if (usermsr & MSR_FP) - save_fpu(tsk); + WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); - if (usermsr & MSR_VEC) - save_altivec(tsk); - } + if (usermsr & MSR_FP) + save_fpu(tsk); + + if (usermsr & MSR_VEC) + save_altivec(tsk); if (usermsr & MSR_SPE) __giveup_spe(tsk); @@ -1392,13 +1380,13 @@ void show_regs(struct pt_regs * regs) show_regs_print_info(KERN_DEFAULT); - printk("NIP: "REG" LR: "REG" CTR: "REG"\n", + printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); printk("REGS: %p TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); - printk("MSR: "REG" ", regs->msr); + printk("MSR: "REG" ", regs->msr); print_msr_bits(regs->msr); - printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); + pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) pr_cont("CFAR: "REG" ", regs->orig_gpr3); @@ -1991,11 +1979,25 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) void notrace __ppc64_runlatch_on(void) { struct thread_info *ti = current_thread_info(); - unsigned long ctrl; - ctrl = mfspr(SPRN_CTRLF); - ctrl |= CTRL_RUNLATCH; - mtspr(SPRN_CTRLT, ctrl); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + /* + * Least significant bit (RUN) is the only writable bit of + * the CTRL register, so we can avoid mfspr. 2.06 is not the + * earliest ISA where this is the case, but it's convenient. + */ + mtspr(SPRN_CTRLT, CTRL_RUNLATCH); + } else { + unsigned long ctrl; + + /* + * Some architectures (e.g., Cell) have writable fields other + * than RUN, so do the read-modify-write. + */ + ctrl = mfspr(SPRN_CTRLF); + ctrl |= CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + } ti->local_flags |= _TLF_RUNLATCH; } @@ -2004,13 +2006,18 @@ void notrace __ppc64_runlatch_on(void) void notrace __ppc64_runlatch_off(void) { struct thread_info *ti = current_thread_info(); - unsigned long ctrl; ti->local_flags &= ~_TLF_RUNLATCH; - ctrl = mfspr(SPRN_CTRLF); - ctrl &= ~CTRL_RUNLATCH; - mtspr(SPRN_CTRLT, ctrl); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + mtspr(SPRN_CTRLT, 0); + } else { + unsigned long ctrl; + + ctrl = mfspr(SPRN_CTRLF); + ctrl &= ~CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + } } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 613f79f03877..02190e90c7ae 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -177,6 +177,7 @@ struct platform_support { bool hash_mmu; bool radix_mmu; bool radix_gtse; + bool xive; }; /* Platforms codes are now obsolete in the kernel. Now only used within this @@ -1041,6 +1042,27 @@ static void __init prom_parse_mmu_model(u8 val, } } +static void __init prom_parse_xive_model(u8 val, + struct platform_support *support) +{ + switch (val) { + case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ + prom_debug("XIVE - either mode supported\n"); + support->xive = true; + break; + case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ + prom_debug("XIVE - exploitation mode supported\n"); + support->xive = true; + break; + case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ + prom_debug("XIVE - legacy mode supported\n"); + break; + default: + prom_debug("Unknown xive support option: 0x%x\n", val); + break; + } +} + static void __init prom_parse_platform_support(u8 index, u8 val, struct platform_support *support) { @@ -1054,6 +1076,10 @@ static void __init prom_parse_platform_support(u8 index, u8 val, support->radix_gtse = true; } break; + case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ + prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), + support); + break; } } @@ -1062,7 +1088,8 @@ static void __init prom_check_platform_support(void) struct platform_support supported = { .hash_mmu = false, .radix_mmu = false, - .radix_gtse = false + .radix_gtse = false, + .xive = false }; int prop_len = prom_getproplen(prom.chosen, "ibm,arch-vec-5-platform-support"); @@ -1095,6 +1122,11 @@ static void __init prom_check_platform_support(void) /* We're probably on a legacy hypervisor */ prom_debug("Assuming legacy hash support\n"); } + + if (supported.xive) { + prom_debug("Asking for XIVE\n"); + ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); + } } static void __init prom_send_capabilities(void) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 660ed39e9c9a..f52ad5bb7109 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk) * in the appropriate thread structures from live. */ - if (tsk != current) + if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current)) return; if (MSR_TM_SUSPENDED(mfmsr())) { @@ -1594,11 +1594,8 @@ static int ppr_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - int ret; - - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.ppr, 0, sizeof(u64)); - return ret; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.ppr, 0, sizeof(u64)); } static int ppr_set(struct task_struct *target, @@ -1606,11 +1603,8 @@ static int ppr_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.ppr, 0, sizeof(u64)); - return ret; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.ppr, 0, sizeof(u64)); } static int dscr_get(struct task_struct *target, @@ -1618,22 +1612,16 @@ static int dscr_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - int ret; - - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.dscr, 0, sizeof(u64)); - return ret; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.dscr, 0, sizeof(u64)); } static int dscr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.dscr, 0, sizeof(u64)); - return ret; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.dscr, 0, sizeof(u64)); } #endif #ifdef CONFIG_PPC_BOOK3S_64 @@ -1642,22 +1630,16 @@ static int tar_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - int ret; - - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.tar, 0, sizeof(u64)); - return ret; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.tar, 0, sizeof(u64)); } static int tar_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.tar, 0, sizeof(u64)); - return ret; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.tar, 0, sizeof(u64)); } static int ebb_active(struct task_struct *target, diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index d88736fbece6..e8cfc69f59ae 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -82,7 +82,7 @@ _GLOBAL(relocate) 6: blr .balign 8 -p_dyn: .llong __dynamic_start - 0b -p_rela: .llong __rela_dyn_start - 0b -p_st: .llong _stext - 0b +p_dyn: .8byte __dynamic_start - 0b +p_rela: .8byte __rela_dyn_start - 0b +p_st: .8byte _stext - 0b diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index b8a4987f58cf..1643e9e53655 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -914,7 +914,7 @@ int rtas_online_cpus_mask(cpumask_var_t cpus) if (ret) { cpumask_var_t tmp_mask; - if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) return ret; /* Use tmp_mask to preserve cpus mask from first failure */ @@ -962,7 +962,7 @@ int rtas_ibm_suspend_me(u64 handle) return -EIO; } - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) return -ENOMEM; atomic_set(&data.working, 0); diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 73f1934582c2..c2b148b1634a 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -91,26 +91,14 @@ static int rtas_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct device_node *busdn, *dn; struct pci_dn *pdn; - bool found = false; int ret; - /* Search only direct children of the bus */ *val = 0xFFFFFFFF; - busdn = pci_bus_to_OF_node(bus); - for (dn = busdn->child; dn; dn = dn->sibling) { - pdn = PCI_DN(dn); - if (pdn && pdn->devfn == devfn - && of_device_is_available(dn)) { - found = true; - break; - } - } - if (!found) - return PCIBIOS_DEVICE_NOT_FOUND; + pdn = pci_get_pdn_by_devfn(bus, devfn); + /* Validity of pdn is checked in here */ ret = rtas_read_config(pdn, where, size, val); if (*val == EEH_IO_ERROR_VALUE(size) && eeh_dev_check_failure(pdn_to_eeh_dev(pdn))) @@ -153,24 +141,11 @@ static int rtas_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - struct device_node *busdn, *dn; struct pci_dn *pdn; - bool found = false; - /* Search only direct children of the bus */ - busdn = pci_bus_to_OF_node(bus); - for (dn = busdn->child; dn; dn = dn->sibling) { - pdn = PCI_DN(dn); - if (pdn && pdn->devfn == devfn - && of_device_is_available(dn)) { - found = true; - break; - } - } - - if (!found) - return PCIBIOS_DEVICE_NOT_FOUND; + pdn = pci_get_pdn_by_devfn(bus, devfn); + /* Validity of pdn is checked in here. */ return rtas_write_config(pdn, where, size, val); } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 94a948207cd2..2e3bc16d02b2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -481,7 +481,7 @@ void __init smp_setup_cpu_maps(void) __be32 cpu_be; int j, len; - DBG(" * %s...\n", dn->full_name); + DBG(" * %pOF...\n", dn); intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); @@ -551,7 +551,7 @@ void __init smp_setup_cpu_maps(void) if (maxcpus > nr_cpu_ids) { printk(KERN_WARNING "Partition configured for %d cpus, " - "operating system maximum is %d.\n", + "operating system maximum is %u.\n", maxcpus, nr_cpu_ids); maxcpus = nr_cpu_ids; } else @@ -704,30 +704,6 @@ int check_legacy_ioport(unsigned long base_port) } EXPORT_SYMBOL(check_legacy_ioport); -static int ppc_panic_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - /* - * If firmware-assisted dump has been registered then trigger - * firmware-assisted dump and let firmware handle everything else. - */ - crash_fadump(NULL, ptr); - ppc_md.panic(ptr); /* May not return */ - return NOTIFY_DONE; -} - -static struct notifier_block ppc_panic_block = { - .notifier_call = ppc_panic_event, - .priority = INT_MIN /* may not return; must be done last */ -}; - -void __init setup_panic(void) -{ - if (!ppc_md.panic) - return; - atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); -} - #ifdef CONFIG_CHECK_CACHE_COHERENCY /* * For platforms that have configurable cache-coherency. This function @@ -872,9 +848,6 @@ void __init setup_arch(char **cmdline_p) /* Probe the machine type, establish ppc_md. */ probe_machine(); - /* Setup panic notifier if requested by the platform. */ - setup_panic(); - /* * Configure ppc_md.power_save (ppc32 only, 64-bit machines do * it from their respective probe() function. @@ -916,13 +889,6 @@ void __init setup_arch(char **cmdline_p) /* Reserve large chunks of memory for use by CMA for KVM. */ kvm_cma_reserve(); - /* - * Reserve any gigantic pages requested on the command line. - * memblock needs to have been initialized by the time this is - * called since this will reserve memory. - */ - reserve_hugetlb_gpages(); - klp_init_thread_info(&init_thread_info); init_mm.start_code = (unsigned long)_stext; @@ -938,9 +904,6 @@ void __init setup_arch(char **cmdline_p) #endif #endif -#ifdef CONFIG_PPC_64K_PAGES - init_mm.context.pte_frag = NULL; -#endif #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(&init_mm); #endif diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2f88f6cf1a42..51ebc01fff52 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -98,6 +98,9 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ notrace void __init machine_init(u64 dt_ptr) { + unsigned int *addr = &memset_nocache_branch; + unsigned long insn; + /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); @@ -105,7 +108,9 @@ notrace void __init machine_init(u64 dt_ptr) udbg_early_init(); patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP); - patch_instruction(&memset_nocache_branch, PPC_INST_NOP); + + insn = create_cond_branch(addr, branch_target(addr), 0x820000); + patch_instruction(addr, insn); /* replace b by bne cr0 */ /* Do some early initialization based on the flat device tree */ early_init_devtree(__va(dt_ptr)); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index af23d4b576ec..b89c6aac48c9 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -564,6 +564,9 @@ static __init u64 safe_stack_limit(void) /* Other BookE, we assume the first GB is bolted */ return 1ul << 30; #else + if (early_radix_enabled()) + return ULONG_MAX; + /* BookS, the first segment is bolted */ if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) return 1UL << SID_SHIFT_1T; @@ -578,7 +581,8 @@ void __init irqstack_early_init(void) /* * Interrupt stacks must be in the first segment since we - * cannot afford to take SLB misses on them. + * cannot afford to take SLB misses on them. They are not + * accessed in realmode. */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) @@ -649,8 +653,9 @@ void __init emergency_stack_init(void) * aligned. * * Since we use these as temporary stacks during secondary CPU - * bringup, we need to get at them in real mode. This means they - * must also be within the RMO region. + * bringup, machine check, system reset, and HMI, we need to get + * at them in real mode. This means they must also be within the RMO + * region. * * The IRQ stacks allocated elsewhere in this file are zeroed and * initialized in kernel/irq.c. These are initialized here in order @@ -751,3 +756,31 @@ unsigned long memory_block_size_bytes(void) struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif + +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +u64 hw_nmi_get_sample_period(int watchdog_thresh) +{ + return ppc_proc_freq * watchdog_thresh; +} +#endif + +/* + * The perf based hardlockup detector breaks PMU event based branches, so + * disable it by default. Book3S has a soft-nmi hardlockup detector based + * on the decrementer interrupt, so it does not suffer from this problem. + * + * It is likely to get false positives in VM guests, so disable it there + * by default too. + */ +static int __init disable_hardlockup_detector(void) +{ +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF + hardlockup_detector_disable(); +#else + if (firmware_has_feature(FW_FEATURE_LPAR)) + hardlockup_detector_disable(); +#endif + + return 0; +} +early_initcall(disable_hardlockup_detector); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 97bb1385e771..92fb1c8dbbd8 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -913,42 +913,40 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) */ err = __put_user(s->si_signo, &d->si_signo); err |= __put_user(s->si_errno, &d->si_errno); - err |= __put_user((short)s->si_code, &d->si_code); + err |= __put_user(s->si_code, &d->si_code); if (s->si_code < 0) err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad, SI_PAD_SIZE32); - else switch(s->si_code >> 16) { - case __SI_CHLD >> 16: + else switch(siginfo_layout(s->si_signo, s->si_code)) { + case SIL_CHLD: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); err |= __put_user(s->si_utime, &d->si_utime); err |= __put_user(s->si_stime, &d->si_stime); err |= __put_user(s->si_status, &d->si_status); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __put_user((unsigned int)(unsigned long)s->si_addr, &d->si_addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(s->si_band, &d->si_band); err |= __put_user(s->si_fd, &d->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __put_user(s->si_tid, &d->si_tid); err |= __put_user(s->si_overrun, &d->si_overrun); err |= __put_user(s->si_int, &d->si_int); break; - case __SI_SYS >> 16: + case SIL_SYS: err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr); err |= __put_user(s->si_syscall, &d->si_syscall); err |= __put_user(s->si_arch, &d->si_arch); break; - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(s->si_int, &d->si_int); /* fallthrough */ - case __SI_KILL >> 16: - default: + case SIL_KILL: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); break; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c83c115858c1..b2c002993d78 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, if (MSR_TM_RESV(msr)) return -EINVAL; - /* pull in MSR TM from user context */ + /* pull in MSR TS bits from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); + /* + * Ensure that TM is enabled in regs->msr before we leave the signal + * handler. It could be the case that (a) user disabled the TM bit + * through the manipulation of the MSR bits in uc_mcontext or (b) the + * TM bit was disabled because a sufficient number of context switches + * happened whilst in the signal handler and load_tm overflowed, + * disabling the TM bit. In either case we can end up with an illegal + * TM state leading to a TM Bad Thing when we return to userspace. + */ + regs->msr |= MSR_TM; + /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8d3320562c70..e0a4c1f82e25 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -75,9 +75,11 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); +EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); /* SMP operations for this machine */ @@ -571,6 +573,26 @@ static void smp_store_cpu_info(int id) #endif } +/* + * Relationships between CPUs are maintained in a set of per-cpu cpumasks so + * rather than just passing around the cpumask we pass around a function that + * returns the that cpumask for the given CPU. + */ +static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) +{ + cpumask_set_cpu(i, get_cpumask(j)); + cpumask_set_cpu(j, get_cpumask(i)); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void set_cpus_unrelated(int i, int j, + struct cpumask *(*get_cpumask)(int)) +{ + cpumask_clear_cpu(i, get_cpumask(j)); + cpumask_clear_cpu(j, get_cpumask(i)); +} +#endif + void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -590,6 +612,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); + zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); /* @@ -602,7 +626,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } + /* Init the cpumasks so the boot CPU is related to itself */ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); + cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); if (smp_ops && smp_ops->probe) @@ -828,33 +854,6 @@ int cpu_first_thread_of_core(int core) } EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); -static void traverse_siblings_chip_id(int cpu, bool add, int chipid) -{ - const struct cpumask *mask; - struct device_node *np; - int i, plen; - const __be32 *prop; - - mask = add ? cpu_online_mask : cpu_present_mask; - for_each_cpu(i, mask) { - np = of_get_cpu_node(i, NULL); - if (!np) - continue; - prop = of_get_property(np, "ibm,chip-id", &plen); - if (prop && plen == sizeof(int) && - of_read_number(prop, 1) == chipid) { - if (add) { - cpumask_set_cpu(cpu, cpu_core_mask(i)); - cpumask_set_cpu(i, cpu_core_mask(cpu)); - } else { - cpumask_clear_cpu(cpu, cpu_core_mask(i)); - cpumask_clear_cpu(i, cpu_core_mask(cpu)); - } - } - of_node_put(np); - } -} - /* Must be called when no change can occur to cpu_present_mask, * i.e. during cpu online or offline. */ @@ -877,52 +876,93 @@ static struct device_node *cpu_to_l2cache(int cpu) return cache; } -static void traverse_core_siblings(int cpu, bool add) +static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) { struct device_node *l2_cache, *np; - const struct cpumask *mask; - int i, chip, plen; - const __be32 *prop; - - /* First see if we have ibm,chip-id properties in cpu nodes */ - np = of_get_cpu_node(cpu, NULL); - if (np) { - chip = -1; - prop = of_get_property(np, "ibm,chip-id", &plen); - if (prop && plen == sizeof(int)) - chip = of_read_number(prop, 1); - of_node_put(np); - if (chip >= 0) { - traverse_siblings_chip_id(cpu, add, chip); - return; - } - } + int i; l2_cache = cpu_to_l2cache(cpu); - mask = add ? cpu_online_mask : cpu_present_mask; - for_each_cpu(i, mask) { + if (!l2_cache) + return false; + + for_each_cpu(i, cpu_online_mask) { + /* + * when updating the marks the current CPU has not been marked + * online, but we need to update the cache masks + */ np = cpu_to_l2cache(i); if (!np) continue; - if (np == l2_cache) { - if (add) { - cpumask_set_cpu(cpu, cpu_core_mask(i)); - cpumask_set_cpu(i, cpu_core_mask(cpu)); - } else { - cpumask_clear_cpu(cpu, cpu_core_mask(i)); - cpumask_clear_cpu(i, cpu_core_mask(cpu)); - } - } + + if (np == l2_cache) + set_cpus_related(cpu, i, mask_fn); + of_node_put(np); } of_node_put(l2_cache); + + return true; } +#ifdef CONFIG_HOTPLUG_CPU +static void remove_cpu_from_masks(int cpu) +{ + int i; + + /* NB: cpu_core_mask is a superset of the others */ + for_each_cpu(i, cpu_core_mask(cpu)) { + set_cpus_unrelated(cpu, i, cpu_core_mask); + set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); + set_cpus_unrelated(cpu, i, cpu_sibling_mask); + } +} +#endif + +static void add_cpu_to_masks(int cpu) +{ + int first_thread = cpu_first_thread_sibling(cpu); + int chipid = cpu_to_chip_id(cpu); + int i; + + /* + * This CPU will not be in the online mask yet so we need to manually + * add it to it's own thread sibling mask. + */ + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); + + for (i = first_thread; i < first_thread + threads_per_core; i++) + if (cpu_online(i)) + set_cpus_related(i, cpu, cpu_sibling_mask); + + /* + * Copy the thread sibling mask into the cache sibling mask + * and mark any CPUs that share an L2 with this CPU. + */ + for_each_cpu(i, cpu_sibling_mask(cpu)) + set_cpus_related(cpu, i, cpu_l2_cache_mask); + update_mask_by_l2(cpu, cpu_l2_cache_mask); + + /* + * Copy the cache sibling mask into core sibling mask and mark + * any CPUs on the same chip as this CPU. + */ + for_each_cpu(i, cpu_l2_cache_mask(cpu)) + set_cpus_related(cpu, i, cpu_core_mask); + + if (chipid == -1) + return; + + for_each_cpu(i, cpu_online_mask) + if (cpu_to_chip_id(i) == chipid) + set_cpus_related(cpu, i, cpu_core_mask); +} + +static bool shared_caches; + /* Activate a secondary processor. */ void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); - int i, base; mmgrab(&init_mm); current->active_mm = &init_mm; @@ -945,22 +985,15 @@ void start_secondary(void *unused) vdso_getcpu_init(); #endif - /* Update sibling maps */ - base = cpu_first_thread_sibling(cpu); - for (i = 0; i < threads_per_core; i++) { - if (cpu_is_offline(base + i) && (cpu != base + i)) - continue; - cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); - cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); + /* Update topology CPU masks */ + add_cpu_to_masks(cpu); - /* cpu_core_map should be a superset of - * cpu_sibling_map even if we don't have cache - * information, so update the former here, too. - */ - cpumask_set_cpu(cpu, cpu_core_mask(base + i)); - cpumask_set_cpu(base + i, cpu_core_mask(cpu)); - } - traverse_core_siblings(cpu, true); + /* + * Check for any shared caches. Note that this must be done on a + * per-core basis because one core in the pair might be disabled. + */ + if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu))) + shared_caches = true; set_numa_node(numa_cpu_lookup_table[cpu]); set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); @@ -1003,6 +1036,35 @@ static struct sched_domain_topology_level powerpc_topology[] = { { NULL, }, }; +/* + * P9 has a slightly odd architecture where pairs of cores share an L2 cache. + * This topology makes it *much* cheaper to migrate tasks between adjacent cores + * since the migrated task remains cache hot. We want to take advantage of this + * at the scheduler level so an extra topology level is required. + */ +static int powerpc_shared_cache_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} + +/* + * We can't just pass cpu_l2_cache_mask() directly because + * returns a non-const pointer and the compiler barfs on that. + */ +static const struct cpumask *shared_cache_mask(int cpu) +{ + return cpu_l2_cache_mask(cpu); +} + +static struct sched_domain_topology_level power9_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + void __init smp_cpus_done(unsigned int max_cpus) { /* @@ -1016,14 +1078,23 @@ void __init smp_cpus_done(unsigned int max_cpus) dump_numa_cpu_topology(); - set_sched_topology(powerpc_topology); + /* + * If any CPU detects that it's sharing a cache with another CPU then + * use the deeper topology that is aware of this sharing. + */ + if (shared_caches) { + pr_info("Using shared cache scheduler topology\n"); + set_sched_topology(power9_topology); + } else { + pr_info("Using standard scheduler topology\n"); + set_sched_topology(powerpc_topology); + } } #ifdef CONFIG_HOTPLUG_CPU int __cpu_disable(void) { int cpu = smp_processor_id(); - int base, i; int err; if (!smp_ops->cpu_disable) @@ -1034,14 +1105,7 @@ int __cpu_disable(void) return err; /* Update sibling maps */ - base = cpu_first_thread_sibling(cpu); - for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { - cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); - cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); - cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); - cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); - } - traverse_core_siblings(cpu, false); + remove_cpu_from_masks(cpu); return 0; } diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 988f38dced0f..82d8aae81c6a 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -179,7 +179,7 @@ nothing_to_copy: sld r3, r3, r0 li r0, 0 1: - dcbf r0,r3 + dcbf 0,r3 addi r3,r3,0x20 bdnz 1b diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 4d6b1d3a747f..7ccb7f81f8db 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -17,13 +17,13 @@ #include #ifdef CONFIG_PPC64 -#define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) -#define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) -#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) -#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) -#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) -#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall) -#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) +#define SYSCALL(func) .8byte DOTSYM(sys_##func),DOTSYM(sys_##func) +#define COMPAT_SYS(func) .8byte DOTSYM(sys_##func),DOTSYM(compat_sys_##func) +#define PPC_SYS(func) .8byte DOTSYM(ppc_##func),DOTSYM(ppc_##func) +#define OLDSYS(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) +#define SYS32ONLY(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) +#define PPC64ONLY(func) .8byte DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall) +#define SYSX(f, f3264, f32) .8byte DOTSYM(f),DOTSYM(f3264) #else #define SYSCALL(func) .long sys_##func #define COMPAT_SYS(func) .long sys_##func diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bfcfd9ef09f2..13c9dcdcba69 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -114,6 +114,28 @@ static void pmac_backlight_unblank(void) static inline void pmac_backlight_unblank(void) { } #endif +/* + * If oops/die is expected to crash the machine, return true here. + * + * This should not be expected to be 100% accurate, there may be + * notifiers registered or other unexpected conditions that may bring + * down the kernel. Or if the current process in the kernel is holding + * locks or has other critical state, the kernel may become effectively + * unusable anyway. + */ +bool die_will_crash(void) +{ + if (should_fadump_crash()) + return true; + if (kexec_should_crash(current)) + return true; + if (in_interrupt() || panic_on_oops || + !current->pid || is_global_init(current)) + return true; + + return false; +} + static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; @@ -162,21 +184,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, crash_fadump(regs, "die oops"); - /* - * A system reset (0x100) is a request to dump, so we always send - * it through the crashdump code. - */ - if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { + if (kexec_should_crash(current)) crash_kexec(regs); - /* - * We aren't the primary crash CPU. We need to send it - * to a holding pattern to avoid it ending up in the panic - * code. - */ - crash_kexec_secondary(regs); - } - if (!signr) return; @@ -202,18 +212,25 @@ NOKPROBE_SYMBOL(oops_end); static int __die(const char *str, struct pt_regs *regs, long err) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP NR_CPUS=%d ", NR_CPUS); -#endif + + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + printk("LE "); + else + printk("BE "); + + if (IS_ENABLED(CONFIG_PREEMPT)) + pr_cont("PREEMPT "); + + if (IS_ENABLED(CONFIG_SMP)) + pr_cont("SMP NR_CPUS=%d ", NR_CPUS); + if (debug_pagealloc_enabled()) - printk("DEBUG_PAGEALLOC "); -#ifdef CONFIG_NUMA - printk("NUMA "); -#endif - printk("%s\n", ppc_md.name ? ppc_md.name : ""); + pr_cont("DEBUG_PAGEALLOC "); + + if (IS_ENABLED(CONFIG_NUMA)) + pr_cont("NUMA "); + + pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) return 1; @@ -288,23 +305,52 @@ void system_reset_exception(struct pt_regs *regs) if (!nested) nmi_enter(); + __this_cpu_inc(irq_stat.sreset_irqs); + /* See if any machine dependent calls */ if (ppc_md.system_reset_exception) { if (ppc_md.system_reset_exception(regs)) goto out; } - die("System Reset", regs, SIGABRT); + if (debugger(regs)) + goto out; + + /* + * A system reset is a request to dump, so we always send + * it through the crashdump code (if fadump or kdump are + * registered). + */ + crash_fadump(regs, "System Reset"); + + crash_kexec(regs); + + /* + * We aren't the primary crash CPU. We need to send it + * to a holding pattern to avoid it ending up in the panic + * code. + */ + crash_kexec_secondary(regs); + + /* + * No debugger or crash dump registered, print logs then + * panic. + */ + __die("System Reset", regs, SIGABRT); + + mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + nmi_panic(regs, "System Reset"); out: #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - panic("Unrecoverable nested System Reset"); + nmi_panic(regs, "Unrecoverable nested System Reset"); #endif /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - panic("Unrecoverable System Reset"); + nmi_panic(regs, "Unrecoverable System Reset"); if (!nested) nmi_exit(); @@ -312,39 +358,6 @@ void system_reset_exception(struct pt_regs *regs) /* What should we do here? We could issue a shutdown or hard reset. */ } -#ifdef CONFIG_PPC64 -/* - * This function is called in real mode. Strictly no printk's please. - * - * regs->nip and regs->msr contains srr0 and ssr1. - */ -long machine_check_early(struct pt_regs *regs) -{ - long handled = 0; - - __this_cpu_inc(irq_stat.mce_exceptions); - - if (cur_cpu_spec && cur_cpu_spec->machine_check_early) - handled = cur_cpu_spec->machine_check_early(regs); - return handled; -} - -long hmi_exception_realmode(struct pt_regs *regs) -{ - __this_cpu_inc(irq_stat.hmi_exceptions); - - wait_for_subcore_guest_exit(); - - if (ppc_md.hmi_exception_early) - ppc_md.hmi_exception_early(regs); - - wait_for_tb_resync(); - - return 0; -} - -#endif - /* * I/O accesses can cause machine checks on powermacs. * Check if the NIP corresponds to the address of a sync @@ -397,11 +410,6 @@ static inline int check_io_access(struct pt_regs *regs) /* On 4xx, the reason for the machine check or program exception is in the ESR. */ #define get_reason(regs) ((regs)->dsisr) -#ifndef CONFIG_FSL_BOOKE -#define get_mc_reason(regs) ((regs)->dsisr) -#else -#define get_mc_reason(regs) (mfspr(SPRN_MCSR)) -#endif #define REASON_FP ESR_FP #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) #define REASON_PRIVILEGED ESR_PPR @@ -415,111 +423,21 @@ static inline int check_io_access(struct pt_regs *regs) /* On non-4xx, the reason for the machine check or program exception is in the MSR. */ #define get_reason(regs) ((regs)->msr) -#define get_mc_reason(regs) ((regs)->msr) -#define REASON_TM 0x200000 -#define REASON_FP 0x100000 -#define REASON_ILLEGAL 0x80000 -#define REASON_PRIVILEGED 0x40000 -#define REASON_TRAP 0x20000 +#define REASON_TM SRR1_PROGTM +#define REASON_FP SRR1_PROGFPE +#define REASON_ILLEGAL SRR1_PROGILL +#define REASON_PRIVILEGED SRR1_PROGPRIV +#define REASON_TRAP SRR1_PROGTRAP #define single_stepping(regs) ((regs)->msr & MSR_SE) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #endif -#if defined(CONFIG_4xx) -int machine_check_4xx(struct pt_regs *regs) -{ - unsigned long reason = get_mc_reason(regs); - - if (reason & ESR_IMCP) { - printk("Instruction"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } else - printk("Data"); - printk(" machine check in kernel mode.\n"); - - return 0; -} - -int machine_check_440A(struct pt_regs *regs) -{ - unsigned long reason = get_mc_reason(regs); - - printk("Machine check in kernel mode.\n"); - if (reason & ESR_IMCP){ - printk("Instruction Synchronous Machine Check exception\n"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } - else { - u32 mcsr = mfspr(SPRN_MCSR); - if (mcsr & MCSR_IB) - printk("Instruction Read PLB Error\n"); - if (mcsr & MCSR_DRB) - printk("Data Read PLB Error\n"); - if (mcsr & MCSR_DWB) - printk("Data Write PLB Error\n"); - if (mcsr & MCSR_TLBP) - printk("TLB Parity Error\n"); - if (mcsr & MCSR_ICP){ - flush_instruction_cache(); - printk("I-Cache Parity Error\n"); - } - if (mcsr & MCSR_DCSP) - printk("D-Cache Search Parity Error\n"); - if (mcsr & MCSR_DCFP) - printk("D-Cache Flush Parity Error\n"); - if (mcsr & MCSR_IMPE) - printk("Machine Check exception is imprecise\n"); - - /* Clear MCSR */ - mtspr(SPRN_MCSR, mcsr); - } - return 0; -} - -int machine_check_47x(struct pt_regs *regs) -{ - unsigned long reason = get_mc_reason(regs); - u32 mcsr; - - printk(KERN_ERR "Machine check in kernel mode.\n"); - if (reason & ESR_IMCP) { - printk(KERN_ERR - "Instruction Synchronous Machine Check exception\n"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - return 0; - } - mcsr = mfspr(SPRN_MCSR); - if (mcsr & MCSR_IB) - printk(KERN_ERR "Instruction Read PLB Error\n"); - if (mcsr & MCSR_DRB) - printk(KERN_ERR "Data Read PLB Error\n"); - if (mcsr & MCSR_DWB) - printk(KERN_ERR "Data Write PLB Error\n"); - if (mcsr & MCSR_TLBP) - printk(KERN_ERR "TLB Parity Error\n"); - if (mcsr & MCSR_ICP) { - flush_instruction_cache(); - printk(KERN_ERR "I-Cache Parity Error\n"); - } - if (mcsr & MCSR_DCSP) - printk(KERN_ERR "D-Cache Search Parity Error\n"); - if (mcsr & PPC47x_MCSR_GPR) - printk(KERN_ERR "GPR Parity Error\n"); - if (mcsr & PPC47x_MCSR_FPR) - printk(KERN_ERR "FPR Parity Error\n"); - if (mcsr & PPC47x_MCSR_IPR) - printk(KERN_ERR "Machine Check exception is imprecise\n"); - - /* Clear MCSR */ - mtspr(SPRN_MCSR, mcsr); - - return 0; -} -#elif defined(CONFIG_E500) +#if defined(CONFIG_E500) int machine_check_e500mc(struct pt_regs *regs) { unsigned long mcsr = mfspr(SPRN_MCSR); + unsigned long pvr = mfspr(SPRN_PVR); unsigned long reason = mcsr; int recoverable = 1; @@ -561,8 +479,15 @@ int machine_check_e500mc(struct pt_regs *regs) * may still get logged and cause a machine check. We should * only treat the non-write shadow case as non-recoverable. */ - if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) - recoverable = 0; + /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit + * is not implemented but L1 data cache always runs in write + * shadow mode. Hence on data cache parity errors HW will + * automatically invalidate the L1 Data Cache. + */ + if (PVR_VER(pvr) != PVR_VER_E6500) { + if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) + recoverable = 0; + } } if (reason & MCSR_L2MMU_MHIT) { @@ -618,7 +543,7 @@ int machine_check_e500mc(struct pt_regs *regs) int machine_check_e500(struct pt_regs *regs) { - unsigned long reason = get_mc_reason(regs); + unsigned long reason = mfspr(SPRN_MCSR); if (reason & MCSR_BUS_RBERR) { if (fsl_rio_mcheck_exception(regs)) @@ -665,7 +590,7 @@ int machine_check_generic(struct pt_regs *regs) #elif defined(CONFIG_E200) int machine_check_e200(struct pt_regs *regs) { - unsigned long reason = get_mc_reason(regs); + unsigned long reason = mfspr(SPRN_MCSR); printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); @@ -687,35 +612,10 @@ int machine_check_e200(struct pt_regs *regs) return 0; } -#elif defined(CONFIG_PPC_8xx) -int machine_check_8xx(struct pt_regs *regs) -{ - unsigned long reason = get_mc_reason(regs); - - pr_err("Machine check in kernel mode.\n"); - pr_err("Caused by (from SRR1=%lx): ", reason); - if (reason & 0x40000000) - pr_err("Fetch error at address %lx\n", regs->nip); - else - pr_err("Data access error at address %lx\n", regs->dar); - -#ifdef CONFIG_PCI - /* the qspan pci read routines can cause machine checks -- Cort - * - * yuck !!! that totally needs to go away ! There are better ways - * to deal with that than having a wart in the mcheck handler. - * -- BenH - */ - bad_page_fault(regs, regs->dar, SIGBUS); - return 1; -#else - return 0; -#endif -} -#else +#elif defined(CONFIG_PPC32) int machine_check_generic(struct pt_regs *regs) { - unsigned long reason = get_mc_reason(regs); + unsigned long reason = regs->msr; printk("Machine check in kernel mode.\n"); printk("Caused by (from SRR1=%lx): ", reason); @@ -752,10 +652,14 @@ int machine_check_generic(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); int recover = 0; + bool nested = in_nmi(); + if (!nested) + nmi_enter(); - __this_cpu_inc(irq_stat.mce_exceptions); + /* 64s accounts the mce in machine_check_early when in HVMODE */ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) + __this_cpu_inc(irq_stat.mce_exceptions); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); @@ -783,10 +687,11 @@ void machine_check_exception(struct pt_regs *regs) /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - panic("Unrecoverable Machine check"); + nmi_panic(regs, "Unrecoverable Machine check"); bail: - exception_exit(prev_state); + if (!nested) + nmi_exit(); } void SMIException(struct pt_regs *regs) @@ -1672,24 +1577,6 @@ void performance_monitor_exception(struct pt_regs *regs) perf_irq(regs); } -#ifdef CONFIG_8xx -void SoftwareEmulation(struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - - if (!user_mode(regs)) { - debugger(regs); - die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", - regs, SIGFPE); - } - - if (!emulate_math(regs)) - return; - - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); -} -#endif /* CONFIG_8xx */ - #ifdef CONFIG_PPC_ADV_DEBUG_REGS static void handle_debug(struct pt_regs *regs, unsigned long debug_status) { diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 003b20964ea0..5d105b8eeece 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -205,3 +205,12 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return orig_ret_vaddr; } + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->gpr[1] <= ret->stack; + else + return regs->gpr[1] < ret->stack; +} diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 6b2b69616e77..769c2624e0a6 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -232,15 +232,9 @@ __do_get_tspec: lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) /* Get a stable TB value */ -#ifdef CONFIG_8xx -2: mftbu r3 - mftbl r4 - mftbu r0 -#else -2: mfspr r3, SPRN_TBRU - mfspr r4, SPRN_TBRL - mfspr r0, SPRN_TBRU -#endif +2: MFTBU(r3) + MFTBL(r4) + MFTBU(r0) cmplw cr0,r3,r0 bne- 2b diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b1a250560198..882628fa6987 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #include #include -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) && !defined(CONFIG_PPC32) #define STRICT_ALIGN_SIZE (1 << 24) #else #define STRICT_ALIGN_SIZE PAGE_SIZE diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 34721a257a77..c702a8981452 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -216,6 +216,9 @@ void soft_nmi_interrupt(struct pt_regs *regs) return; nmi_enter(); + + __this_cpu_inc(irq_stat.soft_nmi_irqs); + tb = get_tb(); if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { per_cpu(wd_timer_tb, cpu) = tb; @@ -307,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu) if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) return 0; - if (watchdog_suspended) - return 0; - if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) return 0; @@ -355,36 +355,39 @@ static void watchdog_calc_timeouts(void) wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; } -void watchdog_nmi_reconfigure(void) +void watchdog_nmi_stop(void) +{ + int cpu; + + for_each_cpu(cpu, &wd_cpus_enabled) + stop_wd_on_cpu(cpu); +} + +void watchdog_nmi_start(void) { int cpu; watchdog_calc_timeouts(); - - for_each_cpu(cpu, &wd_cpus_enabled) - stop_wd_on_cpu(cpu); - for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) start_wd_on_cpu(cpu); } /* - * This runs after lockup_detector_init() which sets up watchdog_cpumask. + * Invoked from core watchdog init. */ -static int __init powerpc_watchdog_init(void) +int __init watchdog_nmi_probe(void) { int err; - watchdog_calc_timeouts(); - - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", - start_wd_on_cpu, stop_wd_on_cpu); - if (err < 0) + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "powerpc/watchdog:online", + start_wd_on_cpu, stop_wd_on_cpu); + if (err < 0) { pr_warn("Watchdog could not be initialized"); - + return err; + } return 0; } -arch_initcall(powerpc_watchdog_init); static void handle_backtrace_ipi(struct pt_regs *regs) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index b42812e014c0..7c62967d672c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "trace_hv.h" @@ -599,8 +600,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, * hugepage split and collapse. */ local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(current->mm->pgd, - hva, NULL, NULL); + ptep = find_current_mm_pte(current->mm->pgd, + hva, NULL, NULL); if (ptep) { pte = kvmppc_read_update_linux_pte(ptep, 1); if (__pte_write(pte)) @@ -1940,6 +1941,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); if (ret < 0) { + kfree(ctx); kvm_put_kvm(kvm); return ret; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index f6b3e67c5762..c5d7435455f1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -17,6 +17,7 @@ #include #include #include +#include /* * Supported radix tree geometry. @@ -322,13 +323,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, gpa = vcpu->arch.fault_gpa & ~0xfffUL; gpa &= ~0xF000000000000000ul; gfn = gpa >> PAGE_SHIFT; - if (!(dsisr & DSISR_PGDIRFAULT)) + if (!(dsisr & DSISR_PRTABLE_FAULT)) gpa |= ea & 0xfff; memslot = gfn_to_memslot(kvm, gfn); /* No memslot means it's an emulated MMIO region */ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { - if (dsisr & (DSISR_PGDIRFAULT | DSISR_BADACCESS | + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | DSISR_SET_RC)) { /* * Bad address in guest page table tree, or other @@ -359,8 +360,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (writing) pgflags |= _PAGE_DIRTY; local_irq_save(flags); - ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva, - NULL, NULL); + ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL); if (ptep) { pte = READ_ONCE(*ptep); if (pte_present(pte) && @@ -374,8 +374,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, spin_unlock(&kvm->mmu_lock); return RESUME_GUEST; } - ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, - gpa, NULL, &shift); + /* + * We are walking the secondary page table here. We can do this + * without disabling irq. + */ + ptep = __find_linux_pte(kvm->arch.pgtable, + gpa, NULL, &shift); if (ptep && pte_present(*ptep)) { kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); @@ -427,8 +431,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, pgflags |= _PAGE_WRITE; } else { local_irq_save(flags); - ptep = __find_linux_pte_or_hugepte(current->mm->pgd, - hva, NULL, NULL); + ptep = find_current_mm_pte(current->mm->pgd, + hva, NULL, NULL); if (ptep && pte_write(*ptep) && pte_dirty(*ptep)) pgflags |= _PAGE_WRITE; local_irq_restore(flags); @@ -499,8 +503,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned int shift; unsigned long old; - ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa, - NULL, &shift); + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep)) { old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, gpa, shift); @@ -525,8 +528,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned int shift; int ref = 0; - ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa, - NULL, &shift); + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, gpa, shift); @@ -545,8 +547,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned int shift; int ref = 0; - ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa, - NULL, &shift); + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) ref = 1; return ref; @@ -562,8 +563,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, unsigned int shift; int ret = 0; - ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa, - NULL, &shift); + ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { ret = 1; if (shift) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 53766e2bc029..8f2da8bba737 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -265,8 +265,11 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) { struct kvmppc_spapr_tce_table *stt = filp->private_data; struct kvmppc_spapr_tce_iommu_table *stit, *tmp; + struct kvm *kvm = stt->kvm; + mutex_lock(&kvm->lock); list_del_rcu(&stt->list); + mutex_unlock(&kvm->lock); list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { WARN_ON(!kref_read(&stit->kref)); @@ -298,7 +301,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, unsigned long npages, size; int ret = -ENOMEM; int i; - int fd = -1; if (!args->size) return -EINVAL; @@ -328,11 +330,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, goto fail; } - ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, - stt, O_RDWR | O_CLOEXEC); - if (ret < 0) - goto fail; - mutex_lock(&kvm->lock); /* Check this LIOBN hasn't been previously allocated */ @@ -344,17 +341,19 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, } } - if (!ret) { + if (!ret) + ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, + stt, O_RDWR | O_CLOEXEC); + + if (ret >= 0) { list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); kvm_get_kvm(kvm); } mutex_unlock(&kvm->lock); - if (!ret) - return fd; - - put_unused_fd(fd); + if (ret >= 0) + return ret; fail: for (i = 0; i < npages; i++) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 3adfd2f5301c..c32e9bfe75b1 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -39,6 +39,7 @@ #include #include #include +#include #ifdef CONFIG_BUG @@ -353,7 +354,16 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, pte_t *ptep, pte; unsigned shift = 0; - ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift); + /* + * Called in real mode with MSR_EE = 0. We are safe here. + * It is ok to do the lookup with arch.pgdir here, because + * we are doing this on secondary cpus and current task there + * is not the hypervisor. Also this is safe against THP in the + * host, because an IPI to primary thread will wait for the secondary + * to exit which will agains result in the below page table walk + * to finish. + */ + ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift); if (!ptep || !pte_present(*ptep)) return -ENXIO; pte = *ptep; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 359c79cdf0cc..73bf1ebfa78f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -181,7 +181,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) struct swait_queue_head *wqp; wqp = kvm_arch_vcpu_wq(vcpu); - if (swait_active(wqp)) { + if (swq_has_sleeper(wqp)) { swake_up(wqp); ++vcpu->stat.halt_wakeup; } @@ -485,7 +485,13 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, switch (subfunc) { case H_VPA_REG_VPA: /* register VPA */ - if (len < sizeof(struct lppaca)) + /* + * The size of our lppaca is 1kB because of the way we align + * it for the guest to avoid crossing a 4kB boundary. We only + * use 640 bytes of the structure though, so we should accept + * clients that set a size of 640. + */ + if (len < 640) break; vpap = &tvcpu->arch.vpa; err = 0; @@ -2111,6 +2117,15 @@ static int kvmppc_grab_hwthread(int cpu) struct paca_struct *tpaca; long timeout = 10000; + /* + * ISA v3.0 idle routines do not set hwthread_state or test + * hwthread_req, so they can not grab idle threads. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + WARN(1, "KVM: can not control sibling threads\n"); + return -EBUSY; + } + tpaca = &paca[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ @@ -2145,10 +2160,12 @@ static void kvmppc_release_hwthread(int cpu) struct paca_struct *tpaca; tpaca = &paca[cpu]; - tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + tpaca->kvm_hstate.hwthread_req = 0; + } static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) @@ -3325,6 +3342,14 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, if (radix_enabled()) return -EINVAL; + /* + * POWER7, POWER8 and POWER9 all support 32 storage keys for data. + * POWER7 doesn't support keys for instruction accesses, + * POWER8 and POWER9 do. + */ + info->data_keys = 32; + info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; + info->flags = KVM_PPC_PAGE_SIZES_REAL; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) info->flags |= KVM_PPC_1T_SEGMENTS; @@ -4187,11 +4212,13 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if ((cfg->process_table & PRTS_MASK) > 24) return -EINVAL; + mutex_lock(&kvm->lock); kvm->arch.process_table = cfg->process_table; kvmppc_setup_partition_table(kvm); lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); + mutex_unlock(&kvm->lock); return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 584c74c8119f..4efe364f1188 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Translate address of a vmalloc'd thing to a linear map address */ static void *real_vmalloc_addr(void *x) @@ -31,9 +32,9 @@ static void *real_vmalloc_addr(void *x) /* * assume we don't have huge pages in vmalloc space... * So don't worry about THP collapse/split. Called - * Only in realmode, hence won't need irq_save/restore. + * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore. */ - p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL); + p = find_init_mm_pte(addr, NULL); if (!p || !pte_present(*p)) return NULL; addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); @@ -230,14 +231,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, * If we had a page table table change after lookup, we would * retry via mmu_notifier_retry. */ - if (realmode) - ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL, - &hpage_shift); - else { + if (!realmode) local_irq_save(irq_flags); - ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, - &hpage_shift); - } + /* + * If called in real mode we have MSR_EE = 0. Otherwise + * we disable irq above. + */ + ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift); if (ptep) { pte_t pte; unsigned int host_pte_size; @@ -269,7 +269,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, if (!realmode) local_irq_restore(irq_flags); - ptel &= ~(HPTE_R_PP0 - psize); + ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel |= pa; if (pa) diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c index abf5f01b6eb1..5b81a807d742 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xive.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c @@ -38,7 +38,6 @@ static inline void __iomem *get_tima_phys(void) #define __x_tima get_tima_phys() #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) -#define __x_readb __raw_rm_readb #define __x_writeb __raw_rm_writeb #define __x_readw __raw_rm_readw #define __x_readq __raw_rm_readq diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9c9c983b864f..ec69fa45d5a2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -149,9 +149,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) subf r4, r4, r3 mtspr SPRN_DEC, r4 +BEGIN_FTR_SECTION /* hwthread_req may have got set by cede or no vcpu, so clear it */ li r0, 0 stb r0, HSTATE_HWTHREAD_REQ(r13) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) /* * For external interrupts we need to call the Linux @@ -314,6 +316,7 @@ kvm_novcpu_exit: * Relocation is off and most register values are lost. * r13 points to the PACA. * r3 contains the SRR1 wakeup value, SRR1 is trashed. + * This is not used by ISAv3.0B processors. */ .globl kvm_start_guest kvm_start_guest: @@ -432,6 +435,9 @@ kvm_secondary_got_guest: * While waiting we also need to check if we get given a vcpu to run. */ kvm_no_guest: +BEGIN_FTR_SECTION + twi 31,0,0 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r3, HSTATE_HWTHREAD_REQ(r13) cmpwi r3, 0 bne 53f @@ -765,6 +771,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_restore_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif @@ -976,7 +985,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) #ifdef CONFIG_KVM_XICS /* We are entering the guest on that thread, push VCPU to XIVE */ ld r10, HSTATE_XIVE_TIMA_PHYS(r13) - cmpldi cr0, r10, r0 + cmpldi cr0, r10, 0 beq no_xive ld r11, VCPU_XIVE_SAVED_STATE(r4) li r9, TM_QW1_OS @@ -1112,6 +1121,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION mtspr SPRN_PPR, r0 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + +/* Move canary into DSISR to check for later */ +BEGIN_FTR_SECTION + li r0, 0x7fff + mtspr SPRN_HDSISR, r0 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) @@ -1280,7 +1296,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER bne 2f mfspr r3,SPRN_HDEC - cmpwi r3,0 + EXTEND_HDEC(r3) + cmpdi r3,0 mr r4,r9 bge fast_guest_return 2: @@ -1623,6 +1640,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_save_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif @@ -1742,7 +1762,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* * Are we running hash or radix ? */ - beq cr2,3f + ld r5, VCPU_KVM(r9) + lbz r0, KVM_RADIX(r5) + cmpwi cr2, r0, 0 + beq cr2, 3f /* Radix: Handle the case where the guest used an illegal PID */ LOAD_REG_ADDR(r4, mmu_base_pid) @@ -1940,9 +1963,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) kvmppc_hdsi: ld r3, VCPU_KVM(r9) lbz r0, KVM_RADIX(r3) - cmpwi r0, 0 mfspr r4, SPRN_HDAR mfspr r6, SPRN_HDSISR +BEGIN_FTR_SECTION + /* Look for DSISR canary. If we find it, retry instruction */ + cmpdi r6, 0x7fff + beq 6f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + cmpwi r0, 0 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ /* HPTE not found fault or protection fault? */ andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h @@ -2459,6 +2487,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ ld r9, HSTATE_KVM_VCPU(r13) bl kvmppc_save_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) @@ -2512,8 +2543,10 @@ kvm_do_nap: clrrdi r0, r0, 1 mtspr SPRN_CTRLT, r0 +BEGIN_FTR_SECTION li r0,1 stb r0,HSTATE_HWTHREAD_REQ(r13) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION @@ -2569,6 +2602,9 @@ kvm_end_cede: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_restore_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 08b200a0bbce..bf457843e032 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -48,7 +48,6 @@ #define __x_tima xive_tima #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) -#define __x_readb __raw_readb #define __x_writeb __raw_writeb #define __x_readw __raw_readw #define __x_readq __raw_readq @@ -623,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, return -EINVAL; state = &sb->irq_state[idx]; arch_spin_lock(&sb->lock); - *server = state->guest_server; + *server = state->act_server; *priority = state->guest_priority; arch_spin_unlock(&sb->lock); @@ -1332,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) xive->saved_src_count++; /* Convert saved state into something compatible with xics */ - val = state->guest_server; + val = state->act_server; prio = state->saved_scan_prio; if (prio == MASKED) { @@ -1508,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* First convert prio and mark interrupt as untargetted */ act_prio = xive_prio_from_guest(guest_prio); state->act_priority = MASKED; - state->guest_server = server; /* * We need to drop the lock due to the mutex below. Hopefully diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 5938f7644dc1..6ba63f8e8a61 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state { struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ /* Targetting as set by guest */ - u32 guest_server; /* Current guest selected target */ u8 guest_priority; /* Guest set priority */ u8 saved_priority; /* Saved priority when masking */ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index d1ed2c41b5d2..c7a5deadd1cc 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -28,7 +28,8 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) * bit. */ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); + u8 pipr = be64_to_cpu(qw1) & 0xff; if (pipr >= xc->hw_cppr) return; } @@ -336,7 +337,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; u8 pending = xc->pending; u32 hirq; - u8 pipr; pr_devel("H_IPOLL(server=%ld)\n", server); @@ -353,7 +353,8 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long pending = 0xff; } else { /* Grab pending interrupt if any */ - pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); + u8 pipr = be64_to_cpu(qw1) & 0xff; if (pipr < 8) pending |= 1 << pipr; } diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 32fdab57d604..f9f6468f4171 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -455,16 +455,20 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm, if (err) goto free_vcpu; - if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) + if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) { + err = -ENOMEM; goto uninit_vcpu; + } err = kvmppc_e500_tlb_init(vcpu_e500); if (err) goto uninit_id; vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); - if (!vcpu->arch.shared) + if (!vcpu->arch.shared) { + err = -ENOMEM; goto uninit_tlb; + } return vcpu; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 77fd043b3ecc..c6c734424c70 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "e500.h" #include "timing.h" @@ -476,7 +477,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, * can't run hence pfn won't change. */ local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL); + ptep = find_linux_pte(pgdir, hva, NULL, NULL); if (ptep) { pte_t pte = READ_ONCE(*ptep); diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index f48a0c22e8f9..d0b6b5788afc 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -331,8 +331,10 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm, goto uninit_vcpu; vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - if (!vcpu->arch.shared) + if (!vcpu->arch.shared) { + err = -ENOMEM; goto uninit_tlb; + } return vcpu; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1a75c0b5f4ca..3480faaf1ef8 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) return !!(v->arch.pending_exceptions) || kvm_request_pending(v); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return 1; diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 3c3146ba62da..50d5bf954cff 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -31,7 +31,8 @@ obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o obj-y += checksum_$(BITS).o checksum_wrappers.o -obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o +obj-y += sstep.o ldstfp.o quad.o +obj64-y += quad.o obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 8aedbb5f4b86..da425bb6b369 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -67,6 +67,20 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) +_GLOBAL(memset16) + rlwinm. r0 ,r5, 31, 1, 31 + addi r6, r3, -4 + beq- 2f + rlwimi r4 ,r4 ,16 ,0 ,15 + mtctr r0 +1: stwu r4, 4(r6) + bdnz 1b +2: andi. r0, r5, 1 + beqlr + sth r4, 4(r6) + blr +EXPORT_SYMBOL(memset16) + /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -77,22 +91,24 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1) * replaced by a nop once cache is active. This is done in machine_init() */ _GLOBAL(memset) + cmplwi 0,r5,4 + blt 7f + rlwimi r4,r4,8,16,23 rlwimi r4,r4,16,0,15 - addi r6,r3,-4 - cmplwi 0,r5,4 - blt 7f - stwu r4,4(r6) + stw r4,0(r3) beqlr - andi. r0,r6,3 + andi. r0,r3,3 add r5,r0,r5 - subf r6,r0,r6 + subf r6,r0,r3 cmplwi 0,r4,0 - bne 2f /* Use normal procedure if r4 is not zero */ -EXPORT_SYMBOL(memset) + /* + * Skip optimised bloc until cache is enabled. Will be replaced + * by 'bne' during boot to use normal procedure if r4 is not zero + */ _GLOBAL(memset_nocache_branch) - b 2f /* Skip optimised bloc until cache is enabled */ + b 2f clrlwi r7,r6,32-LG_CACHELINE_BYTES add r8,r7,r5 @@ -119,7 +135,6 @@ _GLOBAL(memset_nocache_branch) 1: stwu r4,4(r6) bdnz 1b 6: andi. r5,r5,3 -7: cmpwi 0,r5,0 beqlr mtctr r5 addi r6,r6,3 @@ -127,6 +142,15 @@ _GLOBAL(memset_nocache_branch) bdnz 8b blr +7: cmpwi 0,r5,0 + beqlr + mtctr r5 + addi r6,r3,-1 +9: stbu r4,1(r6) + bdnz 9b + blr +EXPORT_SYMBOL(memset) + /* * This version uses dcbz on the complete cache lines in the * destination area to reduce memory traffic. This requires that diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index a84d333ecb09..ca5fc8fa7efc 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -45,13 +45,13 @@ _GLOBAL(copypage_power7) .machine push .machine "power4" /* setup read stream 0 */ - dcbt r0,r4,0b01000 /* addr from */ - dcbt r0,r7,0b01010 /* length and depth from */ + dcbt 0,r4,0b01000 /* addr from */ + dcbt 0,r7,0b01010 /* length and depth from */ /* setup write stream 1 */ - dcbtst r0,r9,0b01000 /* addr to */ - dcbtst r0,r10,0b01010 /* length and depth to */ + dcbtst 0,r9,0b01000 /* addr to */ + dcbtst 0,r10,0b01010 /* length and depth to */ eieio - dcbt r0,r8,0b01010 /* all streams GO */ + dcbt 0,r8,0b01010 /* all streams GO */ .machine pop #ifdef CONFIG_ALTIVEC @@ -83,7 +83,7 @@ _GLOBAL(copypage_power7) li r12,112 .align 5 -1: lvx v7,r0,r4 +1: lvx v7,0,r4 lvx v6,r4,r6 lvx v5,r4,r7 lvx v4,r4,r8 @@ -92,7 +92,7 @@ _GLOBAL(copypage_power7) lvx v1,r4,r11 lvx v0,r4,r12 addi r4,r4,128 - stvx v7,r0,r3 + stvx v7,0,r3 stvx v6,r3,r6 stvx v5,r3,r7 stvx v4,r3,r8 diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 706b7cc19846..d416a4a66578 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -315,13 +315,13 @@ err1; stb r0,0(r3) .machine push .machine "power4" /* setup read stream 0 */ - dcbt r0,r6,0b01000 /* addr from */ - dcbt r0,r7,0b01010 /* length and depth from */ + dcbt 0,r6,0b01000 /* addr from */ + dcbt 0,r7,0b01010 /* length and depth from */ /* setup write stream 1 */ - dcbtst r0,r9,0b01000 /* addr to */ - dcbtst r0,r10,0b01010 /* length and depth to */ + dcbtst 0,r9,0b01000 /* addr to */ + dcbtst 0,r10,0b01010 /* length and depth to */ eieio - dcbt r0,r8,0b01010 /* all streams GO */ + dcbt 0,r8,0b01010 /* all streams GO */ .machine pop beq cr1,.Lunwind_stack_nonvmx_copy @@ -376,26 +376,26 @@ err3; std r0,0(r3) li r11,48 bf cr7*4+3,5f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 addi r4,r4,16 -err3; stvx v1,r0,r3 +err3; stvx v1,0,r3 addi r3,r3,16 5: bf cr7*4+2,6f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 err3; lvx v0,r4,r9 addi r4,r4,32 -err3; stvx v1,r0,r3 +err3; stvx v1,0,r3 err3; stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f -err3; lvx v3,r0,r4 +err3; lvx v3,0,r4 err3; lvx v2,r4,r9 err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 addi r4,r4,64 -err3; stvx v3,r0,r3 +err3; stvx v3,0,r3 err3; stvx v2,r3,r9 err3; stvx v1,r3,r10 err3; stvx v0,r3,r11 @@ -421,7 +421,7 @@ err3; stvx v0,r3,r11 */ .align 5 8: -err4; lvx v7,r0,r4 +err4; lvx v7,0,r4 err4; lvx v6,r4,r9 err4; lvx v5,r4,r10 err4; lvx v4,r4,r11 @@ -430,7 +430,7 @@ err4; lvx v2,r4,r14 err4; lvx v1,r4,r15 err4; lvx v0,r4,r16 addi r4,r4,128 -err4; stvx v7,r0,r3 +err4; stvx v7,0,r3 err4; stvx v6,r3,r9 err4; stvx v5,r3,r10 err4; stvx v4,r3,r11 @@ -451,29 +451,29 @@ err4; stvx v0,r3,r16 mtocrf 0x01,r6 bf cr7*4+1,9f -err3; lvx v3,r0,r4 +err3; lvx v3,0,r4 err3; lvx v2,r4,r9 err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 addi r4,r4,64 -err3; stvx v3,r0,r3 +err3; stvx v3,0,r3 err3; stvx v2,r3,r9 err3; stvx v1,r3,r10 err3; stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 err3; lvx v0,r4,r9 addi r4,r4,32 -err3; stvx v1,r0,r3 +err3; stvx v1,0,r3 err3; stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 addi r4,r4,16 -err3; stvx v1,r0,r3 +err3; stvx v1,0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -553,25 +553,25 @@ err3; lvx v0,0,r4 addi r4,r4,16 bf cr7*4+3,5f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 addi r3,r3,16 vor v0,v1,v1 5: bf cr7*4+2,6f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) err3; lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 err3; stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f -err3; lvx v3,r0,r4 +err3; lvx v3,0,r4 VPERM(v8,v0,v3,v16) err3; lvx v2,r4,r9 VPERM(v9,v3,v2,v16) @@ -580,7 +580,7 @@ err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 err3; stvx v9,r3,r9 err3; stvx v10,r3,r10 err3; stvx v11,r3,r11 @@ -606,7 +606,7 @@ err3; stvx v11,r3,r11 */ .align 5 8: -err4; lvx v7,r0,r4 +err4; lvx v7,0,r4 VPERM(v8,v0,v7,v16) err4; lvx v6,r4,r9 VPERM(v9,v7,v6,v16) @@ -623,7 +623,7 @@ err4; lvx v1,r4,r15 err4; lvx v0,r4,r16 VPERM(v15,v1,v0,v16) addi r4,r4,128 -err4; stvx v8,r0,r3 +err4; stvx v8,0,r3 err4; stvx v9,r3,r9 err4; stvx v10,r3,r10 err4; stvx v11,r3,r11 @@ -644,7 +644,7 @@ err4; stvx v15,r3,r16 mtocrf 0x01,r6 bf cr7*4+1,9f -err3; lvx v3,r0,r4 +err3; lvx v3,0,r4 VPERM(v8,v0,v3,v16) err3; lvx v2,r4,r9 VPERM(v9,v3,v2,v16) @@ -653,27 +653,27 @@ err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 err3; stvx v9,r3,r9 err3; stvx v10,r3,r10 err3; stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) err3; lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 err3; stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f -err3; lvx v1,r0,r4 +err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 -err3; stvx v8,r0,r3 +err3; stvx v8,0,r3 addi r3,r3,16 /* Up to 15B to go */ diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index a58777c1b2cb..ae15eba49c1f 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S @@ -21,27 +21,19 @@ #define STKFRM (PPC_MIN_STKFRM + 16) - .macro inst32 op -reg = 0 - .rept 32 -20: \op reg,0,r4 - b 3f - EX_TABLE(20b,99f) -reg = reg + 1 - .endr - .endm - -/* Get the contents of frN into fr0; N is in r3. */ +/* Get the contents of frN into *p; N is in r3 and p is in r4. */ _GLOBAL(get_fpr) mflr r0 + mfmsr r6 + ori r7, r6, MSR_FP + MTMSRD(r7) + isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f - blr /* fr0 is already in fr0 */ - nop -reg = 1 - .rept 31 - fmr fr0,reg - blr +reg = 0 + .rept 32 + stfd reg, 0(r4) + b 2f reg = reg + 1 .endr 1: mflr r5 @@ -49,18 +41,23 @@ reg = reg + 1 mtctr r5 mtlr r0 bctr +2: MTMSRD(r6) + isync + blr -/* Put the contents of fr0 into frN; N is in r3. */ +/* Put the contents of *p into frN; N is in r3 and p is in r4. */ _GLOBAL(put_fpr) mflr r0 + mfmsr r6 + ori r7, r6, MSR_FP + MTMSRD(r7) + isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f - blr /* fr0 is already in fr0 */ - nop -reg = 1 - .rept 31 - fmr reg,fr0 - blr +reg = 0 + .rept 32 + lfd reg, 0(r4) + b 2f reg = reg + 1 .endr 1: mflr r5 @@ -68,127 +65,24 @@ reg = reg + 1 mtctr r5 mtlr r0 bctr - -/* Load FP reg N from float at *p. N is in r3, p in r4. */ -_GLOBAL(do_lfs) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - ori r7,r6,MSR_FP - cmpwi cr7,r3,0 - MTMSRD(r7) +2: MTMSRD(r6) isync - beq cr7,1f - stfd fr0,STKFRM-16(r1) -1: li r9,-EFAULT -2: lfs fr0,0(r4) - li r9,0 -3: bl put_fpr - beq cr7,4f - lfd fr0,STKFRM-16(r1) -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM blr - EX_TABLE(2b,3b) - -/* Load FP reg N from double at *p. N is in r3, p in r4. */ -_GLOBAL(do_lfd) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - ori r7,r6,MSR_FP - cmpwi cr7,r3,0 - MTMSRD(r7) - isync - beq cr7,1f - stfd fr0,STKFRM-16(r1) -1: li r9,-EFAULT -2: lfd fr0,0(r4) - li r9,0 -3: beq cr7,4f - bl put_fpr - lfd fr0,STKFRM-16(r1) -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM - blr - EX_TABLE(2b,3b) - -/* Store FP reg N to float at *p. N is in r3, p in r4. */ -_GLOBAL(do_stfs) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - ori r7,r6,MSR_FP - cmpwi cr7,r3,0 - MTMSRD(r7) - isync - beq cr7,1f - stfd fr0,STKFRM-16(r1) - bl get_fpr -1: li r9,-EFAULT -2: stfs fr0,0(r4) - li r9,0 -3: beq cr7,4f - lfd fr0,STKFRM-16(r1) -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM - blr - EX_TABLE(2b,3b) - -/* Store FP reg N to double at *p. N is in r3, p in r4. */ -_GLOBAL(do_stfd) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - ori r7,r6,MSR_FP - cmpwi cr7,r3,0 - MTMSRD(r7) - isync - beq cr7,1f - stfd fr0,STKFRM-16(r1) - bl get_fpr -1: li r9,-EFAULT -2: stfd fr0,0(r4) - li r9,0 -3: beq cr7,4f - lfd fr0,STKFRM-16(r1) -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM - blr - EX_TABLE(2b,3b) #ifdef CONFIG_ALTIVEC -/* Get the contents of vrN into v0; N is in r3. */ +/* Get the contents of vrN into *p; N is in r3 and p is in r4. */ _GLOBAL(get_vr) mflr r0 + mfmsr r6 + oris r7, r6, MSR_VEC@h + MTMSRD(r7) + isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f - blr /* v0 is already in v0 */ - nop -reg = 1 - .rept 31 - vor v0,reg,reg /* assembler doesn't know vmr? */ - blr +reg = 0 + .rept 32 + stvx reg, 0, r4 + b 2f reg = reg + 1 .endr 1: mflr r5 @@ -196,18 +90,23 @@ reg = reg + 1 mtctr r5 mtlr r0 bctr +2: MTMSRD(r6) + isync + blr -/* Put the contents of v0 into vrN; N is in r3. */ +/* Put the contents of *p into vrN; N is in r3 and p is in r4. */ _GLOBAL(put_vr) mflr r0 + mfmsr r6 + oris r7, r6, MSR_VEC@h + MTMSRD(r7) + isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f - blr /* v0 is already in v0 */ - nop -reg = 1 - .rept 31 - vor reg,v0,v0 - blr +reg = 0 + .rept 32 + lvx reg, 0, r4 + b 2f reg = reg + 1 .endr 1: mflr r5 @@ -215,62 +114,9 @@ reg = reg + 1 mtctr r5 mtlr r0 bctr - -/* Load vector reg N from *p. N is in r3, p in r4. */ -_GLOBAL(do_lvx) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - oris r7,r6,MSR_VEC@h - cmpwi cr7,r3,0 - li r8,STKFRM-16 - MTMSRD(r7) +2: MTMSRD(r6) isync - beq cr7,1f - stvx v0,r1,r8 -1: li r9,-EFAULT -2: lvx v0,0,r4 - li r9,0 -3: beq cr7,4f - bl put_vr - lvx v0,r1,r8 -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM blr - EX_TABLE(2b,3b) - -/* Store vector reg N to *p. N is in r3, p in r4. */ -_GLOBAL(do_stvx) - PPC_STLU r1,-STKFRM(r1) - mflr r0 - PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) - mfmsr r6 - oris r7,r6,MSR_VEC@h - cmpwi cr7,r3,0 - li r8,STKFRM-16 - MTMSRD(r7) - isync - beq cr7,1f - stvx v0,r1,r8 - bl get_vr -1: li r9,-EFAULT -2: stvx v0,0,r4 - li r9,0 -3: beq cr7,4f - lvx v0,r1,r8 -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) - mtlr r0 - MTMSRD(r6) - isync - mr r3,r9 - addi r1,r1,STKFRM - blr - EX_TABLE(2b,3b) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX @@ -313,7 +159,7 @@ reg = reg + 1 bctr /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ -_GLOBAL(do_lxvd2x) +_GLOBAL(load_vsrn) PPC_STLU r1,-STKFRM(r1) mflr r0 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) @@ -325,49 +171,74 @@ _GLOBAL(do_lxvd2x) isync beq cr7,1f STXVD2X(0,R1,R8) -1: li r9,-EFAULT -2: LXVD2X(0,R0,R4) - li r9,0 -3: beq cr7,4f +1: LXVD2X(0,R0,R4) +#ifdef __LITTLE_ENDIAN__ + XXSWAPD(0,0) +#endif + beq cr7,4f bl put_vsr LXVD2X(0,R1,R8) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) isync - mr r3,r9 addi r1,r1,STKFRM blr - EX_TABLE(2b,3b) /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ -_GLOBAL(do_stxvd2x) +_GLOBAL(store_vsrn) PPC_STLU r1,-STKFRM(r1) mflr r0 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) mfmsr r6 oris r7,r6,MSR_VSX@h - cmpwi cr7,r3,0 li r8,STKFRM-16 MTMSRD(r7) isync - beq cr7,1f STXVD2X(0,R1,R8) bl get_vsr -1: li r9,-EFAULT -2: STXVD2X(0,R0,R4) - li r9,0 -3: beq cr7,4f +#ifdef __LITTLE_ENDIAN__ + XXSWAPD(0,0) +#endif + STXVD2X(0,R0,R4) LXVD2X(0,R1,R8) -4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) + PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) isync mr r3,r9 addi r1,r1,STKFRM blr - EX_TABLE(2b,3b) - #endif /* CONFIG_VSX */ +/* Convert single-precision to double, without disturbing FPRs. */ +/* conv_sp_to_dp(float *sp, double *dp) */ +_GLOBAL(conv_sp_to_dp) + mfmsr r6 + ori r7, r6, MSR_FP + MTMSRD(r7) + isync + stfd fr0, -16(r1) + lfs fr0, 0(r3) + stfd fr0, 0(r4) + lfd fr0, -16(r1) + MTMSRD(r6) + isync + blr + +/* Convert single-precision to double, without disturbing FPRs. */ +/* conv_sp_to_dp(double *dp, float *sp) */ +_GLOBAL(conv_dp_to_sp) + mfmsr r6 + ori r7, r6, MSR_FP + MTMSRD(r7) + isync + stfd fr0, -16(r1) + lfd fr0, 0(r3) + stfs fr0, 0(r4) + lfd fr0, -16(r1) + MTMSRD(r6) + isync + blr + #endif /* CONFIG_PPC_FPU */ diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index 85fa9869aec5..ec531de99996 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -13,6 +13,23 @@ #include #include +_GLOBAL(__memset16) + rlwimi r4,r4,16,0,15 + /* fall through */ + +_GLOBAL(__memset32) + rldimi r4,r4,32,0 + /* fall through */ + +_GLOBAL(__memset64) + neg r0,r3 + andi. r0,r0,7 + cmplw cr1,r5,r0 + b .Lms +EXPORT_SYMBOL(__memset16) +EXPORT_SYMBOL(__memset32) +EXPORT_SYMBOL(__memset64) + _GLOBAL(memset) neg r0,r3 rlwimi r4,r4,8,16,23 @@ -20,7 +37,7 @@ _GLOBAL(memset) rlwimi r4,r4,16,0,15 cmplw cr1,r5,r0 /* do we get that far? */ rldimi r4,r4,32,0 - PPC_MTOCRF(1,r0) +.Lms: PPC_MTOCRF(1,r0) mr r6,r3 blt cr1,8f beq+ 3f /* if already 8-byte aligned */ diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 786234fd4e91..193909abd18b 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7) .machine push .machine "power4" - dcbt r0,r6,0b01000 - dcbt r0,r7,0b01010 - dcbtst r0,r9,0b01000 - dcbtst r0,r10,0b01010 + dcbt 0,r6,0b01000 + dcbt 0,r7,0b01010 + dcbtst 0,r9,0b01000 + dcbtst 0,r10,0b01010 eieio - dcbt r0,r8,0b01010 /* GO */ + dcbt 0,r8,0b01010 /* GO */ .machine pop beq cr1,.Lunwind_stack_nonvmx_copy @@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7) li r11,48 bf cr7*4+3,5f - lvx v1,r0,r4 + lvx v1,0,r4 addi r4,r4,16 - stvx v1,r0,r3 + stvx v1,0,r3 addi r3,r3,16 5: bf cr7*4+2,6f - lvx v1,r0,r4 + lvx v1,0,r4 lvx v0,r4,r9 addi r4,r4,32 - stvx v1,r0,r3 + stvx v1,0,r3 stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx v3,r0,r4 + lvx v3,0,r4 lvx v2,r4,r9 lvx v1,r4,r10 lvx v0,r4,r11 addi r4,r4,64 - stvx v3,r0,r3 + stvx v3,0,r3 stvx v2,r3,r9 stvx v1,r3,r10 stvx v0,r3,r11 @@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7) */ .align 5 8: - lvx v7,r0,r4 + lvx v7,0,r4 lvx v6,r4,r9 lvx v5,r4,r10 lvx v4,r4,r11 @@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7) lvx v1,r4,r15 lvx v0,r4,r16 addi r4,r4,128 - stvx v7,r0,r3 + stvx v7,0,r3 stvx v6,r3,r9 stvx v5,r3,r10 stvx v4,r3,r11 @@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7) mtocrf 0x01,r6 bf cr7*4+1,9f - lvx v3,r0,r4 + lvx v3,0,r4 lvx v2,r4,r9 lvx v1,r4,r10 lvx v0,r4,r11 addi r4,r4,64 - stvx v3,r0,r3 + stvx v3,0,r3 stvx v2,r3,r9 stvx v1,r3,r10 stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx v1,r0,r4 + lvx v1,0,r4 lvx v0,r4,r9 addi r4,r4,32 - stvx v1,r0,r3 + stvx v1,0,r3 stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx v1,r0,r4 + lvx v1,0,r4 addi r4,r4,16 - stvx v1,r0,r3 + stvx v1,0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7) addi r4,r4,16 bf cr7*4+3,5f - lvx v1,r0,r4 + lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx v8,r0,r3 + stvx v8,0,r3 addi r3,r3,16 vor v0,v1,v1 5: bf cr7*4+2,6f - lvx v1,r0,r4 + lvx v1,0,r4 VPERM(v8,v0,v1,v16) lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx v8,r0,r3 + stvx v8,0,r3 stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx v3,r0,r4 + lvx v3,0,r4 VPERM(v8,v0,v3,v16) lvx v2,r4,r9 VPERM(v9,v3,v2,v16) @@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7) lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx v8,r0,r3 + stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 @@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7) */ .align 5 8: - lvx v7,r0,r4 + lvx v7,0,r4 VPERM(v8,v0,v7,v16) lvx v6,r4,r9 VPERM(v9,v7,v6,v16) @@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7) lvx v0,r4,r16 VPERM(v15,v1,v0,v16) addi r4,r4,128 - stvx v8,r0,r3 + stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 @@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7) mtocrf 0x01,r6 bf cr7*4+1,9f - lvx v3,r0,r4 + lvx v3,0,r4 VPERM(v8,v0,v3,v16) lvx v2,r4,r9 VPERM(v9,v3,v2,v16) @@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7) lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx v8,r0,r3 + stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx v1,r0,r4 + lvx v1,0,r4 VPERM(v8,v0,v1,v16) lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx v8,r0,r3 + stvx v8,0,r3 stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx v1,r0,r4 + lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx v8,r0,r3 + stvx v8,0,r3 addi r3,r3,16 /* Up to 15B to go */ diff --git a/arch/powerpc/lib/quad.S b/arch/powerpc/lib/quad.S new file mode 100644 index 000000000000..c4d12fae8724 --- /dev/null +++ b/arch/powerpc/lib/quad.S @@ -0,0 +1,62 @@ +/* + * Quadword loads and stores + * for use in instruction emulation. + * + * Copyright 2017 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +/* do_lq(unsigned long ea, unsigned long *regs) */ +_GLOBAL(do_lq) +1: lq r6, 0(r3) + std r6, 0(r4) + std r7, 8(r4) + li r3, 0 + blr +2: li r3, -EFAULT + blr + EX_TABLE(1b, 2b) + +/* do_stq(unsigned long ea, unsigned long val0, unsigned long val1) */ +_GLOBAL(do_stq) +1: stq r4, 0(r3) + li r3, 0 + blr +2: li r3, -EFAULT + blr + EX_TABLE(1b, 2b) + +/* do_lqarx(unsigned long ea, unsigned long *regs) */ +_GLOBAL(do_lqarx) +1: PPC_LQARX(6, 0, 3, 0) + std r6, 0(r4) + std r7, 8(r4) + li r3, 0 + blr +2: li r3, -EFAULT + blr + EX_TABLE(1b, 2b) + +/* do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, + unsigned int *crp) */ + +_GLOBAL(do_stqcx) +1: PPC_STQCX(4, 0, 3) + mfcr r5 + stw r5, 0(r6) + li r3, 0 + blr +2: li r3, -EFAULT + blr + EX_TABLE(1b, 2b) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index ee33327686ae..5e8418c28bd8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -36,14 +36,33 @@ extern char system_call_common[]; /* * Functions in ldstfp.S */ -extern int do_lfs(int rn, unsigned long ea); -extern int do_lfd(int rn, unsigned long ea); -extern int do_stfs(int rn, unsigned long ea); -extern int do_stfd(int rn, unsigned long ea); -extern int do_lvx(int rn, unsigned long ea); -extern int do_stvx(int rn, unsigned long ea); -extern int do_lxvd2x(int rn, unsigned long ea); -extern int do_stxvd2x(int rn, unsigned long ea); +extern void get_fpr(int rn, double *p); +extern void put_fpr(int rn, const double *p); +extern void get_vr(int rn, __vector128 *p); +extern void put_vr(int rn, __vector128 *p); +extern void load_vsrn(int vsr, const void *p); +extern void store_vsrn(int vsr, void *p); +extern void conv_sp_to_dp(const float *sp, double *dp); +extern void conv_dp_to_sp(const double *dp, float *sp); +#endif + +#ifdef __powerpc64__ +/* + * Functions in quad.S + */ +extern int do_lq(unsigned long ea, unsigned long *regs); +extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); +extern int do_lqarx(unsigned long ea, unsigned long *regs); +extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, + unsigned int *crp); +#endif + +#ifdef __LITTLE_ENDIAN__ +#define IS_LE 1 +#define IS_BE 0 +#else +#define IS_LE 0 +#define IS_BE 1 #endif /* @@ -62,15 +81,17 @@ static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, /* * Determine whether a conditional branch instruction would branch. */ -static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs) +static nokprobe_inline int branch_taken(unsigned int instr, + const struct pt_regs *regs, + struct instruction_op *op) { unsigned int bo = (instr >> 21) & 0x1f; unsigned int bi; if ((bo & 4) == 0) { /* decrement counter */ - --regs->ctr; - if (((bo >> 1) & 1) ^ (regs->ctr == 0)) + op->type |= DECCTR; + if (((bo >> 1) & 1) ^ (regs->ctr == 1)) return 0; } if ((bo & 0x10) == 0) { @@ -82,17 +103,26 @@ static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs return 1; } -static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb) +static nokprobe_inline long address_ok(struct pt_regs *regs, + unsigned long ea, int nb) { if (!user_mode(regs)) return 1; - return __access_ok(ea, nb, USER_DS); + if (__access_ok(ea, nb, USER_DS)) + return 1; + if (__access_ok(ea, 1, USER_DS)) + /* Access overlaps the end of the user region */ + regs->dar = USER_DS.seg; + else + regs->dar = ea; + return 0; } /* * Calculate effective address for a D-form instruction */ -static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs *regs) +static nokprobe_inline unsigned long dform_ea(unsigned int instr, + const struct pt_regs *regs) { int ra; unsigned long ea; @@ -102,14 +132,15 @@ static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs if (ra) ea += regs->gpr[ra]; - return truncate_if_32bit(regs->msr, ea); + return ea; } #ifdef __powerpc64__ /* * Calculate effective address for a DS-form instruction */ -static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_regs *regs) +static nokprobe_inline unsigned long dsform_ea(unsigned int instr, + const struct pt_regs *regs) { int ra; unsigned long ea; @@ -119,7 +150,24 @@ static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_reg if (ra) ea += regs->gpr[ra]; - return truncate_if_32bit(regs->msr, ea); + return ea; +} + +/* + * Calculate effective address for a DQ-form instruction + */ +static nokprobe_inline unsigned long dqform_ea(unsigned int instr, + const struct pt_regs *regs) +{ + int ra; + unsigned long ea; + + ra = (instr >> 16) & 0x1f; + ea = (signed short) (instr & ~0xf); /* sign-extend */ + if (ra) + ea += regs->gpr[ra]; + + return ea; } #endif /* __powerpc64 */ @@ -127,7 +175,7 @@ static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_reg * Calculate effective address for an X-form instruction */ static nokprobe_inline unsigned long xform_ea(unsigned int instr, - struct pt_regs *regs) + const struct pt_regs *regs) { int ra, rb; unsigned long ea; @@ -138,7 +186,7 @@ static nokprobe_inline unsigned long xform_ea(unsigned int instr, if (ra) ea += regs->gpr[ra]; - return truncate_if_32bit(regs->msr, ea); + return ea; } /* @@ -151,7 +199,6 @@ static nokprobe_inline unsigned long max_align(unsigned long x) return x & -x; /* isolates rightmost bit */ } - static nokprobe_inline unsigned long byterev_2(unsigned long x) { return ((x >> 8) & 0xff) | ((x & 0xff) << 8); @@ -170,8 +217,36 @@ static nokprobe_inline unsigned long byterev_8(unsigned long x) } #endif +static nokprobe_inline void do_byte_reverse(void *ptr, int nb) +{ + switch (nb) { + case 2: + *(u16 *)ptr = byterev_2(*(u16 *)ptr); + break; + case 4: + *(u32 *)ptr = byterev_4(*(u32 *)ptr); + break; +#ifdef __powerpc64__ + case 8: + *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); + break; + case 16: { + unsigned long *up = (unsigned long *)ptr; + unsigned long tmp; + tmp = byterev_8(up[0]); + up[0] = byterev_8(up[1]); + up[1] = tmp; + break; + } +#endif + default: + WARN_ON_ONCE(1); + } +} + static nokprobe_inline int read_mem_aligned(unsigned long *dest, - unsigned long ea, int nb) + unsigned long ea, int nb, + struct pt_regs *regs) { int err = 0; unsigned long x = 0; @@ -194,59 +269,77 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest, } if (!err) *dest = x; + else + regs->dar = ea; return err; } -static nokprobe_inline int read_mem_unaligned(unsigned long *dest, - unsigned long ea, int nb, struct pt_regs *regs) +/* + * Copy from userspace to a buffer, using the largest possible + * aligned accesses, up to sizeof(long). + */ +static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb, + struct pt_regs *regs) { - int err; - unsigned long x, b, c; -#ifdef __LITTLE_ENDIAN__ - int len = nb; /* save a copy of the length for byte reversal */ -#endif + int err = 0; + int c; - /* unaligned, do this in pieces */ - x = 0; for (; nb > 0; nb -= c) { -#ifdef __LITTLE_ENDIAN__ - c = 1; -#endif -#ifdef __BIG_ENDIAN__ c = max_align(ea); -#endif if (c > nb) c = max_align(nb); - err = read_mem_aligned(&b, ea, c); - if (err) + switch (c) { + case 1: + err = __get_user(*dest, (unsigned char __user *) ea); + break; + case 2: + err = __get_user(*(u16 *)dest, + (unsigned short __user *) ea); + break; + case 4: + err = __get_user(*(u32 *)dest, + (unsigned int __user *) ea); + break; +#ifdef __powerpc64__ + case 8: + err = __get_user(*(unsigned long *)dest, + (unsigned long __user *) ea); + break; +#endif + } + if (err) { + regs->dar = ea; return err; - x = (x << (8 * c)) + b; + } + dest += c; ea += c; } -#ifdef __LITTLE_ENDIAN__ - switch (len) { - case 2: - *dest = byterev_2(x); - break; - case 4: - *dest = byterev_4(x); - break; -#ifdef __powerpc64__ - case 8: - *dest = byterev_8(x); - break; -#endif - } -#endif -#ifdef __BIG_ENDIAN__ - *dest = x; -#endif return 0; } +static nokprobe_inline int read_mem_unaligned(unsigned long *dest, + unsigned long ea, int nb, + struct pt_regs *regs) +{ + union { + unsigned long ul; + u8 b[sizeof(unsigned long)]; + } u; + int i; + int err; + + u.ul = 0; + i = IS_BE ? sizeof(unsigned long) - nb : 0; + err = copy_mem_in(&u.b[i], ea, nb, regs); + if (!err) + *dest = u.ul; + return err; +} + /* * Read memory at address ea for nb bytes, return 0 for success - * or -EFAULT if an error occurred. + * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. + * If nb < sizeof(long), the result is right-justified on BE systems. */ static int read_mem(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) @@ -254,13 +347,14 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb, if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) - return read_mem_aligned(dest, ea, nb); + return read_mem_aligned(dest, ea, nb, regs); return read_mem_unaligned(dest, ea, nb, regs); } NOKPROBE_SYMBOL(read_mem); static nokprobe_inline int write_mem_aligned(unsigned long val, - unsigned long ea, int nb) + unsigned long ea, int nb, + struct pt_regs *regs) { int err = 0; @@ -280,51 +374,72 @@ static nokprobe_inline int write_mem_aligned(unsigned long val, break; #endif } + if (err) + regs->dar = ea; return err; } -static nokprobe_inline int write_mem_unaligned(unsigned long val, - unsigned long ea, int nb, struct pt_regs *regs) +/* + * Copy from a buffer to userspace, using the largest possible + * aligned accesses, up to sizeof(long). + */ +static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb, + struct pt_regs *regs) { - int err; - unsigned long c; + int err = 0; + int c; -#ifdef __LITTLE_ENDIAN__ - switch (nb) { - case 2: - val = byterev_2(val); - break; - case 4: - val = byterev_4(val); - break; -#ifdef __powerpc64__ - case 8: - val = byterev_8(val); - break; -#endif - } -#endif - /* unaligned or little-endian, do this in pieces */ for (; nb > 0; nb -= c) { -#ifdef __LITTLE_ENDIAN__ - c = 1; -#endif -#ifdef __BIG_ENDIAN__ c = max_align(ea); -#endif if (c > nb) c = max_align(nb); - err = write_mem_aligned(val >> (nb - c) * 8, ea, c); - if (err) + switch (c) { + case 1: + err = __put_user(*dest, (unsigned char __user *) ea); + break; + case 2: + err = __put_user(*(u16 *)dest, + (unsigned short __user *) ea); + break; + case 4: + err = __put_user(*(u32 *)dest, + (unsigned int __user *) ea); + break; +#ifdef __powerpc64__ + case 8: + err = __put_user(*(unsigned long *)dest, + (unsigned long __user *) ea); + break; +#endif + } + if (err) { + regs->dar = ea; return err; + } + dest += c; ea += c; } return 0; } +static nokprobe_inline int write_mem_unaligned(unsigned long val, + unsigned long ea, int nb, + struct pt_regs *regs) +{ + union { + unsigned long ul; + u8 b[sizeof(unsigned long)]; + } u; + int i; + + u.ul = val; + i = IS_BE ? sizeof(unsigned long) - nb : 0; + return copy_mem_out(&u.b[i], ea, nb, regs); +} + /* * Write memory at address ea for nb bytes, return 0 for success - * or -EFAULT if an error occurred. + * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. */ static int write_mem(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) @@ -332,163 +447,465 @@ static int write_mem(unsigned long val, unsigned long ea, int nb, if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) - return write_mem_aligned(val, ea, nb); + return write_mem_aligned(val, ea, nb, regs); return write_mem_unaligned(val, ea, nb, regs); } NOKPROBE_SYMBOL(write_mem); #ifdef CONFIG_PPC_FPU /* - * Check the address and alignment, and call func to do the actual - * load or store. + * These access either the real FP register or the image in the + * thread_struct, depending on regs->msr & MSR_FP. */ -static int do_fp_load(int rn, int (*func)(int, unsigned long), - unsigned long ea, int nb, - struct pt_regs *regs) +static int do_fp_load(struct instruction_op *op, unsigned long ea, + struct pt_regs *regs, bool cross_endian) { - int err; + int err, rn, nb; union { - double dbl; - unsigned long ul[2]; - struct { -#ifdef __BIG_ENDIAN__ - unsigned _pad_; - unsigned word; -#endif -#ifdef __LITTLE_ENDIAN__ - unsigned word; - unsigned _pad_; -#endif - } single; - } data; - unsigned long ptr; + int i; + unsigned int u; + float f; + double d[2]; + unsigned long l[2]; + u8 b[2 * sizeof(double)]; + } u; + nb = GETSIZE(op->type); if (!address_ok(regs, ea, nb)) return -EFAULT; - if ((ea & 3) == 0) - return (*func)(rn, ea); - ptr = (unsigned long) &data.ul; - if (sizeof(unsigned long) == 8 || nb == 4) { - err = read_mem_unaligned(&data.ul[0], ea, nb, regs); - if (nb == 4) - ptr = (unsigned long)&(data.single.word); - } else { - /* reading a double on 32-bit */ - err = read_mem_unaligned(&data.ul[0], ea, 4, regs); - if (!err) - err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs); - } + rn = op->reg; + err = copy_mem_in(u.b, ea, nb, regs); if (err) return err; - return (*func)(rn, ptr); + if (unlikely(cross_endian)) { + do_byte_reverse(u.b, min(nb, 8)); + if (nb == 16) + do_byte_reverse(&u.b[8], 8); + } + preempt_disable(); + if (nb == 4) { + if (op->type & FPCONV) + conv_sp_to_dp(&u.f, &u.d[0]); + else if (op->type & SIGNEXT) + u.l[0] = u.i; + else + u.l[0] = u.u; + } + if (regs->msr & MSR_FP) + put_fpr(rn, &u.d[0]); + else + current->thread.TS_FPR(rn) = u.l[0]; + if (nb == 16) { + /* lfdp */ + rn |= 1; + if (regs->msr & MSR_FP) + put_fpr(rn, &u.d[1]); + else + current->thread.TS_FPR(rn) = u.l[1]; + } + preempt_enable(); + return 0; } NOKPROBE_SYMBOL(do_fp_load); -static int do_fp_store(int rn, int (*func)(int, unsigned long), - unsigned long ea, int nb, - struct pt_regs *regs) +static int do_fp_store(struct instruction_op *op, unsigned long ea, + struct pt_regs *regs, bool cross_endian) { - int err; + int rn, nb; union { - double dbl; - unsigned long ul[2]; - struct { -#ifdef __BIG_ENDIAN__ - unsigned _pad_; - unsigned word; -#endif -#ifdef __LITTLE_ENDIAN__ - unsigned word; - unsigned _pad_; -#endif - } single; - } data; - unsigned long ptr; + unsigned int u; + float f; + double d[2]; + unsigned long l[2]; + u8 b[2 * sizeof(double)]; + } u; + nb = GETSIZE(op->type); if (!address_ok(regs, ea, nb)) return -EFAULT; - if ((ea & 3) == 0) - return (*func)(rn, ea); - ptr = (unsigned long) &data.ul[0]; - if (sizeof(unsigned long) == 8 || nb == 4) { - if (nb == 4) - ptr = (unsigned long)&(data.single.word); - err = (*func)(rn, ptr); - if (err) - return err; - err = write_mem_unaligned(data.ul[0], ea, nb, regs); - } else { - /* writing a double on 32-bit */ - err = (*func)(rn, ptr); - if (err) - return err; - err = write_mem_unaligned(data.ul[0], ea, 4, regs); - if (!err) - err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs); + rn = op->reg; + preempt_disable(); + if (regs->msr & MSR_FP) + get_fpr(rn, &u.d[0]); + else + u.l[0] = current->thread.TS_FPR(rn); + if (nb == 4) { + if (op->type & FPCONV) + conv_dp_to_sp(&u.d[0], &u.f); + else + u.u = u.l[0]; } - return err; + if (nb == 16) { + rn |= 1; + if (regs->msr & MSR_FP) + get_fpr(rn, &u.d[1]); + else + u.l[1] = current->thread.TS_FPR(rn); + } + preempt_enable(); + if (unlikely(cross_endian)) { + do_byte_reverse(u.b, min(nb, 8)); + if (nb == 16) + do_byte_reverse(&u.b[8], 8); + } + return copy_mem_out(u.b, ea, nb, regs); } NOKPROBE_SYMBOL(do_fp_store); #endif #ifdef CONFIG_ALTIVEC /* For Altivec/VMX, no need to worry about alignment */ -static nokprobe_inline int do_vec_load(int rn, int (*func)(int, unsigned long), - unsigned long ea, struct pt_regs *regs) +static nokprobe_inline int do_vec_load(int rn, unsigned long ea, + int size, struct pt_regs *regs, + bool cross_endian) { + int err; + union { + __vector128 v; + u8 b[sizeof(__vector128)]; + } u = {}; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; - return (*func)(rn, ea); + /* align to multiple of size */ + ea &= ~(size - 1); + err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); + if (err) + return err; + if (unlikely(cross_endian)) + do_byte_reverse(&u.b[ea & 0xf], size); + preempt_disable(); + if (regs->msr & MSR_VEC) + put_vr(rn, &u.v); + else + current->thread.vr_state.vr[rn] = u.v; + preempt_enable(); + return 0; } -static nokprobe_inline int do_vec_store(int rn, int (*func)(int, unsigned long), - unsigned long ea, struct pt_regs *regs) +static nokprobe_inline int do_vec_store(int rn, unsigned long ea, + int size, struct pt_regs *regs, + bool cross_endian) { + union { + __vector128 v; + u8 b[sizeof(__vector128)]; + } u; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; - return (*func)(rn, ea); + /* align to multiple of size */ + ea &= ~(size - 1); + + preempt_disable(); + if (regs->msr & MSR_VEC) + get_vr(rn, &u.v); + else + u.v = current->thread.vr_state.vr[rn]; + preempt_enable(); + if (unlikely(cross_endian)) + do_byte_reverse(&u.b[ea & 0xf], size); + return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); } #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_VSX -static nokprobe_inline int do_vsx_load(int rn, int (*func)(int, unsigned long), - unsigned long ea, struct pt_regs *regs) +#ifdef __powerpc64__ +static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, + int reg, bool cross_endian) { int err; - unsigned long val[2]; if (!address_ok(regs, ea, 16)) return -EFAULT; - if ((ea & 3) == 0) - return (*func)(rn, ea); - err = read_mem_unaligned(&val[0], ea, 8, regs); - if (!err) - err = read_mem_unaligned(&val[1], ea + 8, 8, regs); - if (!err) - err = (*func)(rn, (unsigned long) &val[0]); + /* if aligned, should be atomic */ + if ((ea & 0xf) == 0) { + err = do_lq(ea, ®s->gpr[reg]); + } else { + err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); + if (!err) + err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); + } + if (!err && unlikely(cross_endian)) + do_byte_reverse(®s->gpr[reg], 16); return err; } -static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long), - unsigned long ea, struct pt_regs *regs) +static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, + int reg, bool cross_endian) { int err; - unsigned long val[2]; + unsigned long vals[2]; if (!address_ok(regs, ea, 16)) return -EFAULT; - if ((ea & 3) == 0) - return (*func)(rn, ea); - err = (*func)(rn, (unsigned long) &val[0]); - if (err) - return err; - err = write_mem_unaligned(val[0], ea, 8, regs); + vals[0] = regs->gpr[reg]; + vals[1] = regs->gpr[reg + 1]; + if (unlikely(cross_endian)) + do_byte_reverse(vals, 16); + + /* if aligned, should be atomic */ + if ((ea & 0xf) == 0) + return do_stq(ea, vals[0], vals[1]); + + err = write_mem(vals[IS_LE], ea, 8, regs); if (!err) - err = write_mem_unaligned(val[1], ea + 8, 8, regs); + err = write_mem(vals[IS_BE], ea + 8, 8, regs); return err; } +#endif /* __powerpc64 */ + +#ifdef CONFIG_VSX +void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool rev) +{ + int size, read_size; + int i, j; + const unsigned int *wp; + const unsigned short *hp; + const unsigned char *bp; + + size = GETSIZE(op->type); + reg->d[0] = reg->d[1] = 0; + + switch (op->element_size) { + case 16: + /* whole vector; lxv[x] or lxvl[l] */ + if (size == 0) + break; + memcpy(reg, mem, size); + if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) + rev = !rev; + if (rev) + do_byte_reverse(reg, 16); + break; + case 8: + /* scalar loads, lxvd2x, lxvdsx */ + read_size = (size >= 8) ? 8 : size; + i = IS_LE ? 8 : 8 - read_size; + memcpy(®->b[i], mem, read_size); + if (rev) + do_byte_reverse(®->b[i], 8); + if (size < 8) { + if (op->type & SIGNEXT) { + /* size == 4 is the only case here */ + reg->d[IS_LE] = (signed int) reg->d[IS_LE]; + } else if (op->vsx_flags & VSX_FPCONV) { + preempt_disable(); + conv_sp_to_dp(®->fp[1 + IS_LE], + ®->dp[IS_LE]); + preempt_enable(); + } + } else { + if (size == 16) { + unsigned long v = *(unsigned long *)(mem + 8); + reg->d[IS_BE] = !rev ? v : byterev_8(v); + } else if (op->vsx_flags & VSX_SPLAT) + reg->d[IS_BE] = reg->d[IS_LE]; + } + break; + case 4: + /* lxvw4x, lxvwsx */ + wp = mem; + for (j = 0; j < size / 4; ++j) { + i = IS_LE ? 3 - j : j; + reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); + } + if (op->vsx_flags & VSX_SPLAT) { + u32 val = reg->w[IS_LE ? 3 : 0]; + for (; j < 4; ++j) { + i = IS_LE ? 3 - j : j; + reg->w[i] = val; + } + } + break; + case 2: + /* lxvh8x */ + hp = mem; + for (j = 0; j < size / 2; ++j) { + i = IS_LE ? 7 - j : j; + reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); + } + break; + case 1: + /* lxvb16x */ + bp = mem; + for (j = 0; j < size; ++j) { + i = IS_LE ? 15 - j : j; + reg->b[i] = *bp++; + } + break; + } +} +EXPORT_SYMBOL_GPL(emulate_vsx_load); +NOKPROBE_SYMBOL(emulate_vsx_load); + +void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, + void *mem, bool rev) +{ + int size, write_size; + int i, j; + union vsx_reg buf; + unsigned int *wp; + unsigned short *hp; + unsigned char *bp; + + size = GETSIZE(op->type); + + switch (op->element_size) { + case 16: + /* stxv, stxvx, stxvl, stxvll */ + if (size == 0) + break; + if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) + rev = !rev; + if (rev) { + /* reverse 16 bytes */ + buf.d[0] = byterev_8(reg->d[1]); + buf.d[1] = byterev_8(reg->d[0]); + reg = &buf; + } + memcpy(mem, reg, size); + break; + case 8: + /* scalar stores, stxvd2x */ + write_size = (size >= 8) ? 8 : size; + i = IS_LE ? 8 : 8 - write_size; + if (size < 8 && op->vsx_flags & VSX_FPCONV) { + buf.d[0] = buf.d[1] = 0; + preempt_disable(); + conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); + preempt_enable(); + reg = &buf; + } + memcpy(mem, ®->b[i], write_size); + if (size == 16) + memcpy(mem + 8, ®->d[IS_BE], 8); + if (unlikely(rev)) { + do_byte_reverse(mem, write_size); + if (size == 16) + do_byte_reverse(mem + 8, 8); + } + break; + case 4: + /* stxvw4x */ + wp = mem; + for (j = 0; j < size / 4; ++j) { + i = IS_LE ? 3 - j : j; + *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); + } + break; + case 2: + /* stxvh8x */ + hp = mem; + for (j = 0; j < size / 2; ++j) { + i = IS_LE ? 7 - j : j; + *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); + } + break; + case 1: + /* stvxb16x */ + bp = mem; + for (j = 0; j < size; ++j) { + i = IS_LE ? 15 - j : j; + *bp++ = reg->b[i]; + } + break; + } +} +EXPORT_SYMBOL_GPL(emulate_vsx_store); +NOKPROBE_SYMBOL(emulate_vsx_store); + +static nokprobe_inline int do_vsx_load(struct instruction_op *op, + unsigned long ea, struct pt_regs *regs, + bool cross_endian) +{ + int reg = op->reg; + u8 mem[16]; + union vsx_reg buf; + int size = GETSIZE(op->type); + + if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) + return -EFAULT; + + emulate_vsx_load(op, &buf, mem, cross_endian); + preempt_disable(); + if (reg < 32) { + /* FP regs + extensions */ + if (regs->msr & MSR_FP) { + load_vsrn(reg, &buf); + } else { + current->thread.fp_state.fpr[reg][0] = buf.d[0]; + current->thread.fp_state.fpr[reg][1] = buf.d[1]; + } + } else { + if (regs->msr & MSR_VEC) + load_vsrn(reg, &buf); + else + current->thread.vr_state.vr[reg - 32] = buf.v; + } + preempt_enable(); + return 0; +} + +static nokprobe_inline int do_vsx_store(struct instruction_op *op, + unsigned long ea, struct pt_regs *regs, + bool cross_endian) +{ + int reg = op->reg; + u8 mem[16]; + union vsx_reg buf; + int size = GETSIZE(op->type); + + if (!address_ok(regs, ea, size)) + return -EFAULT; + + preempt_disable(); + if (reg < 32) { + /* FP regs + extensions */ + if (regs->msr & MSR_FP) { + store_vsrn(reg, &buf); + } else { + buf.d[0] = current->thread.fp_state.fpr[reg][0]; + buf.d[1] = current->thread.fp_state.fpr[reg][1]; + } + } else { + if (regs->msr & MSR_VEC) + store_vsrn(reg, &buf); + else + buf.v = current->thread.vr_state.vr[reg - 32]; + } + preempt_enable(); + emulate_vsx_store(op, &buf, mem, cross_endian); + return copy_mem_out(mem, ea, size, regs); +} #endif /* CONFIG_VSX */ +int emulate_dcbz(unsigned long ea, struct pt_regs *regs) +{ + int err; + unsigned long i, size; + +#ifdef __powerpc64__ + size = ppc64_caches.l1d.block_size; + if (!(regs->msr & MSR_64BIT)) + ea &= 0xffffffffUL; +#else + size = L1_CACHE_BYTES; +#endif + ea &= ~(size - 1); + if (!address_ok(regs, ea, size)) + return -EFAULT; + for (i = 0; i < size; i += sizeof(long)) { + err = __put_user(0, (unsigned long __user *) (ea + i)); + if (err) { + regs->dar = ea; + return err; + } + } + return 0; +} +NOKPROBE_SYMBOL(emulate_dcbz); + #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ "1: " op " %2,0,%3\n" \ @@ -526,24 +943,27 @@ static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long), : "=r" (err) \ : "r" (addr), "i" (-EFAULT), "0" (err)) -static nokprobe_inline void set_cr0(struct pt_regs *regs, int rd) +static nokprobe_inline void set_cr0(const struct pt_regs *regs, + struct instruction_op *op) { - long val = regs->gpr[rd]; + long val = op->val; - regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); + op->type |= SETCC; + op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); #ifdef __powerpc64__ if (!(regs->msr & MSR_64BIT)) val = (int) val; #endif if (val < 0) - regs->ccr |= 0x80000000; + op->ccval |= 0x80000000; else if (val > 0) - regs->ccr |= 0x40000000; + op->ccval |= 0x40000000; else - regs->ccr |= 0x20000000; + op->ccval |= 0x20000000; } -static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd, +static nokprobe_inline void add_with_carry(const struct pt_regs *regs, + struct instruction_op *op, int rd, unsigned long val1, unsigned long val2, unsigned long carry_in) { @@ -551,24 +971,29 @@ static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd, if (carry_in) ++val; - regs->gpr[rd] = val; + op->type = COMPUTE + SETREG + SETXER; + op->reg = rd; + op->val = val; #ifdef __powerpc64__ if (!(regs->msr & MSR_64BIT)) { val = (unsigned int) val; val1 = (unsigned int) val1; } #endif + op->xerval = regs->xer; if (val < val1 || (carry_in && val == val1)) - regs->xer |= XER_CA; + op->xerval |= XER_CA; else - regs->xer &= ~XER_CA; + op->xerval &= ~XER_CA; } -static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2, - int crfld) +static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, + struct instruction_op *op, + long v1, long v2, int crfld) { unsigned int crval, shift; + op->type = COMPUTE + SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -577,14 +1002,17 @@ static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2 else crval |= 2; shift = (7 - crfld) * 4; - regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); + op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); } -static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, - unsigned long v2, int crfld) +static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, + struct instruction_op *op, + unsigned long v1, + unsigned long v2, int crfld) { unsigned int crval, shift; + op->type = COMPUTE + SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -593,7 +1021,90 @@ static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long else crval |= 2; shift = (7 - crfld) * 4; - regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); + op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); +} + +static nokprobe_inline void do_cmpb(const struct pt_regs *regs, + struct instruction_op *op, + unsigned long v1, unsigned long v2) +{ + unsigned long long out_val, mask; + int i; + + out_val = 0; + for (i = 0; i < 8; i++) { + mask = 0xffUL << (i * 8); + if ((v1 & mask) == (v2 & mask)) + out_val |= mask; + } + op->val = out_val; +} + +/* + * The size parameter is used to adjust the equivalent popcnt instruction. + * popcntb = 8, popcntw = 32, popcntd = 64 + */ +static nokprobe_inline void do_popcnt(const struct pt_regs *regs, + struct instruction_op *op, + unsigned long v1, int size) +{ + unsigned long long out = v1; + + out -= (out >> 1) & 0x5555555555555555; + out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2)); + out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f; + + if (size == 8) { /* popcntb */ + op->val = out; + return; + } + out += out >> 8; + out += out >> 16; + if (size == 32) { /* popcntw */ + op->val = out & 0x0000003f0000003f; + return; + } + + out = (out + (out >> 32)) & 0x7f; + op->val = out; /* popcntd */ +} + +#ifdef CONFIG_PPC64 +static nokprobe_inline void do_bpermd(const struct pt_regs *regs, + struct instruction_op *op, + unsigned long v1, unsigned long v2) +{ + unsigned char perm, idx; + unsigned int i; + + perm = 0; + for (i = 0; i < 8; i++) { + idx = (v1 >> (i * 8)) & 0xff; + if (idx < 64) + if (v2 & PPC_BIT(idx)) + perm |= 1 << i; + } + op->val = perm; +} +#endif /* CONFIG_PPC64 */ +/* + * The size parameter adjusts the equivalent prty instruction. + * prtyw = 32, prtyd = 64 + */ +static nokprobe_inline void do_prty(const struct pt_regs *regs, + struct instruction_op *op, + unsigned long v, int size) +{ + unsigned long long res = v ^ (v >> 8); + + res ^= res >> 16; + if (size == 32) { /* prtyw */ + op->val = res & 0x0000000100000001; + return; + } + + res ^= res >> 32; + op->val = res & 1; /*prtyd */ } static nokprobe_inline int trap_compare(long v1, long v2) @@ -629,14 +1140,18 @@ static nokprobe_inline int trap_compare(long v1, long v2) #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) /* - * Decode an instruction, and execute it if that can be done just by - * modifying *regs (i.e. integer arithmetic and logical instructions, - * branches, and barrier instructions). - * Returns 1 if the instruction has been executed, or 0 if not. - * Sets *op to indicate what the instruction does. + * Decode an instruction, and return information about it in *op + * without changing *regs. + * Integer arithmetic and logical instructions, branches, and barrier + * instructions can be emulated just using the information in *op. + * + * Return value is 1 if the instruction can be emulated just by + * updating *regs with the information in *op, -1 if we need the + * GPRs but *regs doesn't contain the full register set, or 0 + * otherwise. */ -int analyse_instr(struct instruction_op *op, struct pt_regs *regs, - unsigned int instr) +int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, + unsigned int instr) { unsigned int opcode, ra, rb, rd, spr, u; unsigned long int imm; @@ -653,12 +1168,11 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, imm = (signed short)(instr & 0xfffc); if ((instr & 2) == 0) imm += regs->nip; - regs->nip += 4; - regs->nip = truncate_if_32bit(regs->msr, regs->nip); + op->val = truncate_if_32bit(regs->msr, imm); if (instr & 1) - regs->link = regs->nip; - if (branch_taken(instr, regs)) - regs->nip = truncate_if_32bit(regs->msr, imm); + op->type |= SETLK; + if (branch_taken(instr, regs, op)) + op->type |= BRTAKEN; return 1; #ifdef CONFIG_PPC64 case 17: /* sc */ @@ -669,38 +1183,37 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, return 0; #endif case 18: /* b */ - op->type = BRANCH; + op->type = BRANCH | BRTAKEN; imm = instr & 0x03fffffc; if (imm & 0x02000000) imm -= 0x04000000; if ((instr & 2) == 0) imm += regs->nip; + op->val = truncate_if_32bit(regs->msr, imm); if (instr & 1) - regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); - imm = truncate_if_32bit(regs->msr, imm); - regs->nip = imm; + op->type |= SETLK; return 1; case 19: switch ((instr >> 1) & 0x3ff) { case 0: /* mcrf */ + op->type = COMPUTE + SETCC; rd = 7 - ((instr >> 23) & 0x7); ra = 7 - ((instr >> 18) & 0x7); rd *= 4; ra *= 4; val = (regs->ccr >> ra) & 0xf; - regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); - goto instr_done; + op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); + return 1; case 16: /* bclr */ case 528: /* bcctr */ op->type = BRANCH; imm = (instr & 0x400)? regs->ctr: regs->link; - regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); - imm = truncate_if_32bit(regs->msr, imm); + op->val = truncate_if_32bit(regs->msr, imm); if (instr & 1) - regs->link = regs->nip; - if (branch_taken(instr, regs)) - regs->nip = imm; + op->type |= SETLK; + if (branch_taken(instr, regs, op)) + op->type |= BRTAKEN; return 1; case 18: /* rfid, scary */ @@ -710,9 +1223,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, return 0; case 150: /* isync */ - op->type = BARRIER; - isync(); - goto instr_done; + op->type = BARRIER | BARRIER_ISYNC; + return 1; case 33: /* crnor */ case 129: /* crandc */ @@ -722,45 +1234,44 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 289: /* creqv */ case 417: /* crorc */ case 449: /* cror */ + op->type = COMPUTE + SETCC; ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; rd = (instr >> 21) & 0x1f; ra = (regs->ccr >> (31 - ra)) & 1; rb = (regs->ccr >> (31 - rb)) & 1; val = (instr >> (6 + ra * 2 + rb)) & 1; - regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | + op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | (val << (31 - rd)); - goto instr_done; + return 1; } break; case 31: switch ((instr >> 1) & 0x3ff) { case 598: /* sync */ - op->type = BARRIER; + op->type = BARRIER + BARRIER_SYNC; #ifdef __powerpc64__ switch ((instr >> 21) & 3) { case 1: /* lwsync */ - asm volatile("lwsync" : : : "memory"); - goto instr_done; + op->type = BARRIER + BARRIER_LWSYNC; + break; case 2: /* ptesync */ - asm volatile("ptesync" : : : "memory"); - goto instr_done; + op->type = BARRIER + BARRIER_PTESYNC; + break; } #endif - mb(); - goto instr_done; + return 1; case 854: /* eieio */ - op->type = BARRIER; - eieio(); - goto instr_done; + op->type = BARRIER + BARRIER_EIEIO; + return 1; } break; } /* Following cases refer to regs->gpr[], so we need all regs */ if (!FULL_REGS(regs)) - return 0; + return -1; rd = (instr >> 21) & 0x1f; ra = (instr >> 16) & 0x1f; @@ -771,21 +1282,21 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 2: /* tdi */ if (rd & trap_compare(regs->gpr[ra], (short) instr)) goto trap; - goto instr_done; + return 1; #endif case 3: /* twi */ if (rd & trap_compare((int)regs->gpr[ra], (short) instr)) goto trap; - goto instr_done; + return 1; case 7: /* mulli */ - regs->gpr[rd] = regs->gpr[ra] * (short) instr; - goto instr_done; + op->val = regs->gpr[ra] * (short) instr; + goto compute_done; case 8: /* subfic */ imm = (short) instr; - add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); - goto instr_done; + add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); + return 1; case 10: /* cmpli */ imm = (unsigned short) instr; @@ -794,8 +1305,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, if ((rd & 1) == 0) val = (unsigned int) val; #endif - do_cmp_unsigned(regs, val, imm, rd >> 2); - goto instr_done; + do_cmp_unsigned(regs, op, val, imm, rd >> 2); + return 1; case 11: /* cmpi */ imm = (short) instr; @@ -804,47 +1315,58 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, if ((rd & 1) == 0) val = (int) val; #endif - do_cmp_signed(regs, val, imm, rd >> 2); - goto instr_done; + do_cmp_signed(regs, op, val, imm, rd >> 2); + return 1; case 12: /* addic */ imm = (short) instr; - add_with_carry(regs, rd, regs->gpr[ra], imm, 0); - goto instr_done; + add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); + return 1; case 13: /* addic. */ imm = (short) instr; - add_with_carry(regs, rd, regs->gpr[ra], imm, 0); - set_cr0(regs, rd); - goto instr_done; + add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); + set_cr0(regs, op); + return 1; case 14: /* addi */ imm = (short) instr; if (ra) imm += regs->gpr[ra]; - regs->gpr[rd] = imm; - goto instr_done; + op->val = imm; + goto compute_done; case 15: /* addis */ imm = ((short) instr) << 16; if (ra) imm += regs->gpr[ra]; - regs->gpr[rd] = imm; - goto instr_done; + op->val = imm; + goto compute_done; + + case 19: + if (((instr >> 1) & 0x1f) == 2) { + /* addpcis */ + imm = (short) (instr & 0xffc1); /* d0 + d2 fields */ + imm |= (instr >> 15) & 0x3e; /* d1 field */ + op->val = regs->nip + (imm << 16) + 4; + goto compute_done; + } + op->type = UNKNOWN; + return 0; case 20: /* rlwimi */ mb = (instr >> 6) & 0x1f; me = (instr >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); imm = MASK32(mb, me); - regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); + op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); goto logical_done; case 21: /* rlwinm */ mb = (instr >> 6) & 0x1f; me = (instr >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); - regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); + op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 23: /* rlwnm */ @@ -852,40 +1374,37 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, me = (instr >> 1) & 0x1f; rb = regs->gpr[rb] & 0x1f; val = DATA32(regs->gpr[rd]); - regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); + op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 24: /* ori */ - imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] | imm; - goto instr_done; + op->val = regs->gpr[rd] | (unsigned short) instr; + goto logical_done_nocc; case 25: /* oris */ imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] | (imm << 16); - goto instr_done; + op->val = regs->gpr[rd] | (imm << 16); + goto logical_done_nocc; case 26: /* xori */ - imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] ^ imm; - goto instr_done; + op->val = regs->gpr[rd] ^ (unsigned short) instr; + goto logical_done_nocc; case 27: /* xoris */ imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); - goto instr_done; + op->val = regs->gpr[rd] ^ (imm << 16); + goto logical_done_nocc; case 28: /* andi. */ - imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] & imm; - set_cr0(regs, ra); - goto instr_done; + op->val = regs->gpr[rd] & (unsigned short) instr; + set_cr0(regs, op); + goto logical_done_nocc; case 29: /* andis. */ imm = (unsigned short) instr; - regs->gpr[ra] = regs->gpr[rd] & (imm << 16); - set_cr0(regs, ra); - goto instr_done; + op->val = regs->gpr[rd] & (imm << 16); + set_cr0(regs, op); + goto logical_done_nocc; #ifdef __powerpc64__ case 30: /* rld* */ @@ -896,48 +1415,60 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, val = ROTATE(val, sh); switch ((instr >> 2) & 3) { case 0: /* rldicl */ - regs->gpr[ra] = val & MASK64_L(mb); - goto logical_done; + val &= MASK64_L(mb); + break; case 1: /* rldicr */ - regs->gpr[ra] = val & MASK64_R(mb); - goto logical_done; + val &= MASK64_R(mb); + break; case 2: /* rldic */ - regs->gpr[ra] = val & MASK64(mb, 63 - sh); - goto logical_done; + val &= MASK64(mb, 63 - sh); + break; case 3: /* rldimi */ imm = MASK64(mb, 63 - sh); - regs->gpr[ra] = (regs->gpr[ra] & ~imm) | + val = (regs->gpr[ra] & ~imm) | (val & imm); - goto logical_done; } + op->val = val; + goto logical_done; } else { sh = regs->gpr[rb] & 0x3f; val = ROTATE(val, sh); switch ((instr >> 1) & 7) { case 0: /* rldcl */ - regs->gpr[ra] = val & MASK64_L(mb); + op->val = val & MASK64_L(mb); goto logical_done; case 1: /* rldcr */ - regs->gpr[ra] = val & MASK64_R(mb); + op->val = val & MASK64_R(mb); goto logical_done; } } #endif - break; /* illegal instruction */ + op->type = UNKNOWN; /* illegal instruction */ + return 0; case 31: + /* isel occupies 32 minor opcodes */ + if (((instr >> 1) & 0x1f) == 15) { + mb = (instr >> 6) & 0x1f; /* bc field */ + val = (regs->ccr >> (31 - mb)) & 1; + val2 = (ra) ? regs->gpr[ra] : 0; + + op->val = (val) ? val2 : regs->gpr[rb]; + goto compute_done; + } + switch ((instr >> 1) & 0x3ff) { case 4: /* tw */ if (rd == 0x1f || (rd & trap_compare((int)regs->gpr[ra], (int)regs->gpr[rb]))) goto trap; - goto instr_done; + return 1; #ifdef __powerpc64__ case 68: /* td */ if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) goto trap; - goto instr_done; + return 1; #endif case 83: /* mfmsr */ if (regs->msr & MSR_PR) @@ -966,74 +1497,50 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, #endif case 19: /* mfcr */ + imm = 0xffffffffUL; if ((instr >> 20) & 1) { imm = 0xf0000000UL; for (sh = 0; sh < 8; ++sh) { - if (instr & (0x80000 >> sh)) { - regs->gpr[rd] = regs->ccr & imm; + if (instr & (0x80000 >> sh)) break; - } imm >>= 4; } - - goto instr_done; } - - regs->gpr[rd] = regs->ccr; - regs->gpr[rd] &= 0xffffffffUL; - goto instr_done; + op->val = regs->ccr & imm; + goto compute_done; case 144: /* mtcrf */ + op->type = COMPUTE + SETCC; imm = 0xf0000000UL; val = regs->gpr[rd]; + op->ccval = regs->ccr; for (sh = 0; sh < 8; ++sh) { if (instr & (0x80000 >> sh)) - regs->ccr = (regs->ccr & ~imm) | + op->ccval = (op->ccval & ~imm) | (val & imm); imm >>= 4; } - goto instr_done; + return 1; case 339: /* mfspr */ spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); - switch (spr) { - case SPRN_XER: /* mfxer */ - regs->gpr[rd] = regs->xer; - regs->gpr[rd] &= 0xffffffffUL; - goto instr_done; - case SPRN_LR: /* mflr */ - regs->gpr[rd] = regs->link; - goto instr_done; - case SPRN_CTR: /* mfctr */ - regs->gpr[rd] = regs->ctr; - goto instr_done; - default: - op->type = MFSPR; - op->reg = rd; - op->spr = spr; - return 0; - } - break; + op->type = MFSPR; + op->reg = rd; + op->spr = spr; + if (spr == SPRN_XER || spr == SPRN_LR || + spr == SPRN_CTR) + return 1; + return 0; case 467: /* mtspr */ spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); - switch (spr) { - case SPRN_XER: /* mtxer */ - regs->xer = (regs->gpr[rd] & 0xffffffffUL); - goto instr_done; - case SPRN_LR: /* mtlr */ - regs->link = regs->gpr[rd]; - goto instr_done; - case SPRN_CTR: /* mtctr */ - regs->ctr = regs->gpr[rd]; - goto instr_done; - default: - op->type = MTSPR; - op->val = regs->gpr[rd]; - op->spr = spr; - return 0; - } - break; + op->type = MTSPR; + op->val = regs->gpr[rd]; + op->spr = spr; + if (spr == SPRN_XER || spr == SPRN_LR || + spr == SPRN_CTR) + return 1; + return 0; /* * Compare instructions @@ -1048,8 +1555,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, val2 = (int) val2; } #endif - do_cmp_signed(regs, val, val2, rd >> 2); - goto instr_done; + do_cmp_signed(regs, op, val, val2, rd >> 2); + return 1; case 32: /* cmpl */ val = regs->gpr[ra]; @@ -1061,109 +1568,114 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, val2 = (unsigned int) val2; } #endif - do_cmp_unsigned(regs, val, val2, rd >> 2); - goto instr_done; + do_cmp_unsigned(regs, op, val, val2, rd >> 2); + return 1; + + case 508: /* cmpb */ + do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); + goto logical_done_nocc; /* * Arithmetic instructions */ case 8: /* subfc */ - add_with_carry(regs, rd, ~regs->gpr[ra], + add_with_carry(regs, op, rd, ~regs->gpr[ra], regs->gpr[rb], 1); goto arith_done; #ifdef __powerpc64__ case 9: /* mulhdu */ - asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : + asm("mulhdu %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 10: /* addc */ - add_with_carry(regs, rd, regs->gpr[ra], + add_with_carry(regs, op, rd, regs->gpr[ra], regs->gpr[rb], 0); goto arith_done; case 11: /* mulhwu */ - asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : + asm("mulhwu %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 40: /* subf */ - regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; + op->val = regs->gpr[rb] - regs->gpr[ra]; goto arith_done; #ifdef __powerpc64__ case 73: /* mulhd */ - asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : + asm("mulhd %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 75: /* mulhw */ - asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : + asm("mulhw %0,%1,%2" : "=r" (op->val) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 104: /* neg */ - regs->gpr[rd] = -regs->gpr[ra]; + op->val = -regs->gpr[ra]; goto arith_done; case 136: /* subfe */ - add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], - regs->xer & XER_CA); + add_with_carry(regs, op, rd, ~regs->gpr[ra], + regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 138: /* adde */ - add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], - regs->xer & XER_CA); + add_with_carry(regs, op, rd, regs->gpr[ra], + regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 200: /* subfze */ - add_with_carry(regs, rd, ~regs->gpr[ra], 0L, + add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 202: /* addze */ - add_with_carry(regs, rd, regs->gpr[ra], 0L, + add_with_carry(regs, op, rd, regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 232: /* subfme */ - add_with_carry(regs, rd, ~regs->gpr[ra], -1L, + add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; #ifdef __powerpc64__ case 233: /* mulld */ - regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; + op->val = regs->gpr[ra] * regs->gpr[rb]; goto arith_done; #endif case 234: /* addme */ - add_with_carry(regs, rd, regs->gpr[ra], -1L, + add_with_carry(regs, op, rd, regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; case 235: /* mullw */ - regs->gpr[rd] = (unsigned int) regs->gpr[ra] * - (unsigned int) regs->gpr[rb]; + op->val = (long)(int) regs->gpr[ra] * + (int) regs->gpr[rb]; + goto arith_done; case 266: /* add */ - regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; + op->val = regs->gpr[ra] + regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 457: /* divdu */ - regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; + op->val = regs->gpr[ra] / regs->gpr[rb]; goto arith_done; #endif case 459: /* divwu */ - regs->gpr[rd] = (unsigned int) regs->gpr[ra] / + op->val = (unsigned int) regs->gpr[ra] / (unsigned int) regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 489: /* divd */ - regs->gpr[rd] = (long int) regs->gpr[ra] / + op->val = (long int) regs->gpr[ra] / (long int) regs->gpr[rb]; goto arith_done; #endif case 491: /* divw */ - regs->gpr[rd] = (int) regs->gpr[ra] / + op->val = (int) regs->gpr[ra] / (int) regs->gpr[rb]; goto arith_done; @@ -1172,57 +1684,79 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, * Logical instructions */ case 26: /* cntlzw */ - asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : - "r" (regs->gpr[rd])); + op->val = __builtin_clz((unsigned int) regs->gpr[rd]); goto logical_done; #ifdef __powerpc64__ case 58: /* cntlzd */ - asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : - "r" (regs->gpr[rd])); + op->val = __builtin_clzl(regs->gpr[rd]); goto logical_done; #endif case 28: /* and */ - regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; + op->val = regs->gpr[rd] & regs->gpr[rb]; goto logical_done; case 60: /* andc */ - regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; + op->val = regs->gpr[rd] & ~regs->gpr[rb]; goto logical_done; + case 122: /* popcntb */ + do_popcnt(regs, op, regs->gpr[rd], 8); + goto logical_done_nocc; + case 124: /* nor */ - regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); + op->val = ~(regs->gpr[rd] | regs->gpr[rb]); goto logical_done; + case 154: /* prtyw */ + do_prty(regs, op, regs->gpr[rd], 32); + goto logical_done_nocc; + + case 186: /* prtyd */ + do_prty(regs, op, regs->gpr[rd], 64); + goto logical_done_nocc; +#ifdef CONFIG_PPC64 + case 252: /* bpermd */ + do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); + goto logical_done_nocc; +#endif case 284: /* xor */ - regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); + op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); goto logical_done; case 316: /* xor */ - regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; + op->val = regs->gpr[rd] ^ regs->gpr[rb]; goto logical_done; + case 378: /* popcntw */ + do_popcnt(regs, op, regs->gpr[rd], 32); + goto logical_done_nocc; + case 412: /* orc */ - regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; + op->val = regs->gpr[rd] | ~regs->gpr[rb]; goto logical_done; case 444: /* or */ - regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; + op->val = regs->gpr[rd] | regs->gpr[rb]; goto logical_done; case 476: /* nand */ - regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); + op->val = ~(regs->gpr[rd] & regs->gpr[rb]); goto logical_done; - +#ifdef CONFIG_PPC64 + case 506: /* popcntd */ + do_popcnt(regs, op, regs->gpr[rd], 64); + goto logical_done_nocc; +#endif case 922: /* extsh */ - regs->gpr[ra] = (signed short) regs->gpr[rd]; + op->val = (signed short) regs->gpr[rd]; goto logical_done; case 954: /* extsb */ - regs->gpr[ra] = (signed char) regs->gpr[rd]; + op->val = (signed char) regs->gpr[rd]; goto logical_done; #ifdef __powerpc64__ case 986: /* extsw */ - regs->gpr[ra] = (signed int) regs->gpr[rd]; + op->val = (signed int) regs->gpr[rd]; goto logical_done; #endif @@ -1232,75 +1766,83 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 24: /* slw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) - regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; + op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; else - regs->gpr[ra] = 0; + op->val = 0; goto logical_done; case 536: /* srw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) - regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; + op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; else - regs->gpr[ra] = 0; + op->val = 0; goto logical_done; case 792: /* sraw */ + op->type = COMPUTE + SETREG + SETXER; sh = regs->gpr[rb] & 0x3f; ival = (signed int) regs->gpr[rd]; - regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); + op->val = ival >> (sh < 32 ? sh : 31); + op->xerval = regs->xer; if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) - regs->xer |= XER_CA; + op->xerval |= XER_CA; else - regs->xer &= ~XER_CA; + op->xerval &= ~XER_CA; goto logical_done; case 824: /* srawi */ + op->type = COMPUTE + SETREG + SETXER; sh = rb; ival = (signed int) regs->gpr[rd]; - regs->gpr[ra] = ival >> sh; + op->val = ival >> sh; + op->xerval = regs->xer; if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) - regs->xer |= XER_CA; + op->xerval |= XER_CA; else - regs->xer &= ~XER_CA; + op->xerval &= ~XER_CA; goto logical_done; #ifdef __powerpc64__ case 27: /* sld */ sh = regs->gpr[rb] & 0x7f; if (sh < 64) - regs->gpr[ra] = regs->gpr[rd] << sh; + op->val = regs->gpr[rd] << sh; else - regs->gpr[ra] = 0; + op->val = 0; goto logical_done; case 539: /* srd */ sh = regs->gpr[rb] & 0x7f; if (sh < 64) - regs->gpr[ra] = regs->gpr[rd] >> sh; + op->val = regs->gpr[rd] >> sh; else - regs->gpr[ra] = 0; + op->val = 0; goto logical_done; case 794: /* srad */ + op->type = COMPUTE + SETREG + SETXER; sh = regs->gpr[rb] & 0x7f; ival = (signed long int) regs->gpr[rd]; - regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); + op->val = ival >> (sh < 64 ? sh : 63); + op->xerval = regs->xer; if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) - regs->xer |= XER_CA; + op->xerval |= XER_CA; else - regs->xer &= ~XER_CA; + op->xerval &= ~XER_CA; goto logical_done; case 826: /* sradi with sh_5 = 0 */ case 827: /* sradi with sh_5 = 1 */ + op->type = COMPUTE + SETREG + SETXER; sh = rb | ((instr & 2) << 4); ival = (signed long int) regs->gpr[rd]; - regs->gpr[ra] = ival >> sh; + op->val = ival >> sh; + op->xerval = regs->xer; if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) - regs->xer |= XER_CA; + op->xerval |= XER_CA; else - regs->xer &= ~XER_CA; + op->xerval &= ~XER_CA; goto logical_done; #endif /* __powerpc64__ */ @@ -1333,18 +1875,24 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, op->type = MKOP(CACHEOP, ICBI, 0); op->ea = xform_ea(instr, regs); return 0; + + case 1014: /* dcbz */ + op->type = MKOP(CACHEOP, DCBZ, 0); + op->ea = xform_ea(instr, regs); + return 0; } break; } - /* - * Loads and stores. - */ +/* + * Loads and stores. + */ op->type = UNKNOWN; op->update_reg = ra; op->reg = rd; op->val = regs->gpr[rd]; u = (instr >> 20) & UPDATE; + op->vsx_flags = 0; switch (opcode) { case 31: @@ -1368,9 +1916,30 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, op->type = MKOP(STCX, 0, 8); break; - case 21: /* ldx */ - case 53: /* ldux */ - op->type = MKOP(LOAD, u, 8); + case 52: /* lbarx */ + op->type = MKOP(LARX, 0, 1); + break; + + case 694: /* stbcx. */ + op->type = MKOP(STCX, 0, 1); + break; + + case 116: /* lharx */ + op->type = MKOP(LARX, 0, 2); + break; + + case 726: /* sthcx. */ + op->type = MKOP(STCX, 0, 2); + break; + + case 276: /* lqarx */ + if (!((rd & 1) || rd == ra || rd == rb)) + op->type = MKOP(LARX, 0, 16); + break; + + case 182: /* stqcx. */ + if (!(rd & 1)) + op->type = MKOP(STCX, 0, 16); break; #endif @@ -1385,22 +1954,58 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, break; #ifdef CONFIG_ALTIVEC + /* + * Note: for the load/store vector element instructions, + * bits of the EA say which field of the VMX register to use. + */ + case 7: /* lvebx */ + op->type = MKOP(LOAD_VMX, 0, 1); + op->element_size = 1; + break; + + case 39: /* lvehx */ + op->type = MKOP(LOAD_VMX, 0, 2); + op->element_size = 2; + break; + + case 71: /* lvewx */ + op->type = MKOP(LOAD_VMX, 0, 4); + op->element_size = 4; + break; + case 103: /* lvx */ case 359: /* lvxl */ - if (!(regs->msr & MSR_VEC)) - goto vecunavail; op->type = MKOP(LOAD_VMX, 0, 16); + op->element_size = 16; + break; + + case 135: /* stvebx */ + op->type = MKOP(STORE_VMX, 0, 1); + op->element_size = 1; + break; + + case 167: /* stvehx */ + op->type = MKOP(STORE_VMX, 0, 2); + op->element_size = 2; + break; + + case 199: /* stvewx */ + op->type = MKOP(STORE_VMX, 0, 4); + op->element_size = 4; break; case 231: /* stvx */ case 487: /* stvxl */ - if (!(regs->msr & MSR_VEC)) - goto vecunavail; op->type = MKOP(STORE_VMX, 0, 16); break; #endif /* CONFIG_ALTIVEC */ #ifdef __powerpc64__ + case 21: /* ldx */ + case 53: /* ldux */ + op->type = MKOP(LOAD, u, 8); + break; + case 149: /* stdx */ case 181: /* stdux */ op->type = MKOP(STORE, u, 8); @@ -1457,41 +2062,52 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, if (rb == 0) rb = 32; /* # bytes to load */ op->type = MKOP(LOAD_MULTI, 0, rb); - op->ea = 0; - if (ra) - op->ea = truncate_if_32bit(regs->msr, - regs->gpr[ra]); + op->ea = ra ? regs->gpr[ra] : 0; break; #ifdef CONFIG_PPC_FPU case 535: /* lfsx */ case 567: /* lfsux */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; - op->type = MKOP(LOAD_FP, u, 4); + op->type = MKOP(LOAD_FP, u | FPCONV, 4); break; case 599: /* lfdx */ case 631: /* lfdux */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; op->type = MKOP(LOAD_FP, u, 8); break; case 663: /* stfsx */ case 695: /* stfsux */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; - op->type = MKOP(STORE_FP, u, 4); + op->type = MKOP(STORE_FP, u | FPCONV, 4); break; case 727: /* stfdx */ case 759: /* stfdux */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; op->type = MKOP(STORE_FP, u, 8); break; -#endif + +#ifdef __powerpc64__ + case 791: /* lfdpx */ + op->type = MKOP(LOAD_FP, 0, 16); + break; + + case 855: /* lfiwax */ + op->type = MKOP(LOAD_FP, SIGNEXT, 4); + break; + + case 887: /* lfiwzx */ + op->type = MKOP(LOAD_FP, 0, 4); + break; + + case 919: /* stfdpx */ + op->type = MKOP(STORE_FP, 0, 16); + break; + + case 983: /* stfiwx */ + op->type = MKOP(STORE_FP, 0, 4); + break; +#endif /* __powerpc64 */ +#endif /* CONFIG_PPC_FPU */ #ifdef __powerpc64__ case 660: /* stdbrx */ @@ -1509,14 +2125,11 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, op->val = byterev_4(regs->gpr[rd]); break; - case 725: + case 725: /* stswi */ if (rb == 0) rb = 32; /* # bytes to store */ op->type = MKOP(STORE_MULTI, 0, rb); - op->ea = 0; - if (ra) - op->ea = truncate_if_32bit(regs->msr, - regs->gpr[ra]); + op->ea = ra ? regs->gpr[ra] : 0; break; case 790: /* lhbrx */ @@ -1529,20 +2142,184 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, break; #ifdef CONFIG_VSX - case 844: /* lxvd2x */ - case 876: /* lxvd2ux */ - if (!(regs->msr & MSR_VSX)) - goto vsxunavail; + case 12: /* lxsiwzx */ op->reg = rd | ((instr & 1) << 5); - op->type = MKOP(LOAD_VSX, u, 16); + op->type = MKOP(LOAD_VSX, 0, 4); + op->element_size = 8; + break; + + case 76: /* lxsiwax */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, SIGNEXT, 4); + op->element_size = 8; + break; + + case 140: /* stxsiwx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 4); + op->element_size = 8; + break; + + case 268: /* lxvx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 269: /* lxvl */ + case 301: { /* lxvll */ + int nb; + op->reg = rd | ((instr & 1) << 5); + op->ea = ra ? regs->gpr[ra] : 0; + nb = regs->gpr[rb] & 0xff; + if (nb > 16) + nb = 16; + op->type = MKOP(LOAD_VSX, 0, nb); + op->element_size = 16; + op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) | + VSX_CHECK_VEC; + break; + } + case 332: /* lxvdsx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 8); + op->element_size = 8; + op->vsx_flags = VSX_SPLAT; + break; + + case 364: /* lxvwsx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 4); + op->element_size = 4; + op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; + break; + + case 396: /* stxvx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 397: /* stxvl */ + case 429: { /* stxvll */ + int nb; + op->reg = rd | ((instr & 1) << 5); + op->ea = ra ? regs->gpr[ra] : 0; + nb = regs->gpr[rb] & 0xff; + if (nb > 16) + nb = 16; + op->type = MKOP(STORE_VSX, 0, nb); + op->element_size = 16; + op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) | + VSX_CHECK_VEC; + break; + } + case 524: /* lxsspx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV; + break; + + case 588: /* lxsdx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 8); + op->element_size = 8; + break; + + case 652: /* stxsspx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV; + break; + + case 716: /* stxsdx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 8); + op->element_size = 8; + break; + + case 780: /* lxvw4x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 4; + break; + + case 781: /* lxsibzx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 1); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 812: /* lxvh8x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 2; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 813: /* lxsihzx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 2); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 844: /* lxvd2x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 8; + break; + + case 876: /* lxvb16x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 1; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 908: /* stxvw4x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 4; + break; + + case 909: /* stxsibx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 1); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 940: /* stxvh8x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 2; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 941: /* stxsihx */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 2); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; break; case 972: /* stxvd2x */ - case 1004: /* stxvd2ux */ - if (!(regs->msr & MSR_VSX)) - goto vsxunavail; op->reg = rd | ((instr & 1) << 5); - op->type = MKOP(STORE_VSX, u, 16); + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 8; + break; + + case 1004: /* stxvb16x */ + op->reg = rd | ((instr & 1) << 5); + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 1; + op->vsx_flags = VSX_CHECK_VEC; break; #endif /* CONFIG_VSX */ @@ -1606,37 +2383,62 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, #ifdef CONFIG_PPC_FPU case 48: /* lfs */ case 49: /* lfsu */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; - op->type = MKOP(LOAD_FP, u, 4); + op->type = MKOP(LOAD_FP, u | FPCONV, 4); op->ea = dform_ea(instr, regs); break; case 50: /* lfd */ case 51: /* lfdu */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; op->type = MKOP(LOAD_FP, u, 8); op->ea = dform_ea(instr, regs); break; case 52: /* stfs */ case 53: /* stfsu */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; - op->type = MKOP(STORE_FP, u, 4); + op->type = MKOP(STORE_FP, u | FPCONV, 4); op->ea = dform_ea(instr, regs); break; case 54: /* stfd */ case 55: /* stfdu */ - if (!(regs->msr & MSR_FP)) - goto fpunavail; op->type = MKOP(STORE_FP, u, 8); op->ea = dform_ea(instr, regs); break; #endif +#ifdef __powerpc64__ + case 56: /* lq */ + if (!((rd & 1) || (rd == ra))) + op->type = MKOP(LOAD, 0, 16); + op->ea = dqform_ea(instr, regs); + break; +#endif + +#ifdef CONFIG_VSX + case 57: /* lfdp, lxsd, lxssp */ + op->ea = dsform_ea(instr, regs); + switch (instr & 3) { + case 0: /* lfdp */ + if (rd & 1) + break; /* reg must be even */ + op->type = MKOP(LOAD_FP, 0, 16); + break; + case 2: /* lxsd */ + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, 0, 8); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + case 3: /* lxssp */ + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, 0, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; + break; + } + break; +#endif /* CONFIG_VSX */ + #ifdef __powerpc64__ case 58: /* ld[u], lwa */ op->ea = dsform_ea(instr, regs); @@ -1652,7 +2454,57 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, break; } break; +#endif +#ifdef CONFIG_VSX + case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ + switch (instr & 7) { + case 0: /* stfdp with LSB of DS field = 0 */ + case 4: /* stfdp with LSB of DS field = 1 */ + op->ea = dsform_ea(instr, regs); + op->type = MKOP(STORE_FP, 0, 16); + break; + + case 1: /* lxv */ + op->ea = dqform_ea(instr, regs); + if (instr & 8) + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, 0, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 2: /* stxsd with LSB of DS field = 0 */ + case 6: /* stxsd with LSB of DS field = 1 */ + op->ea = dsform_ea(instr, regs); + op->reg = rd + 32; + op->type = MKOP(STORE_VSX, 0, 8); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + + case 3: /* stxssp with LSB of DS field = 0 */ + case 7: /* stxssp with LSB of DS field = 1 */ + op->ea = dsform_ea(instr, regs); + op->reg = rd + 32; + op->type = MKOP(STORE_VSX, 0, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; + break; + + case 5: /* stxv */ + op->ea = dqform_ea(instr, regs); + if (instr & 8) + op->reg = rd + 32; + op->type = MKOP(STORE_VSX, 0, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + } + break; +#endif /* CONFIG_VSX */ + +#ifdef __powerpc64__ case 62: /* std[u] */ op->ea = dsform_ea(instr, regs); switch (instr & 3) { @@ -1662,6 +2514,10 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 1: /* stdu */ op->type = MKOP(STORE, UPDATE, 8); break; + case 2: /* stq */ + if (!(rd & 1)) + op->type = MKOP(STORE, 0, 16); + break; } break; #endif /* __powerpc64__ */ @@ -1671,15 +2527,18 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, logical_done: if (instr & 1) - set_cr0(regs, ra); - goto instr_done; + set_cr0(regs, op); + logical_done_nocc: + op->reg = ra; + op->type |= SETREG; + return 1; arith_done: if (instr & 1) - set_cr0(regs, rd); - - instr_done: - regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); + set_cr0(regs, op); + compute_done: + op->reg = rd; + op->type |= SETREG; return 1; priv: @@ -1691,24 +2550,6 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, op->type = INTERRUPT | 0x700; op->val = SRR1_PROGTRAP; return 0; - -#ifdef CONFIG_PPC_FPU - fpunavail: - op->type = INTERRUPT | 0x800; - return 0; -#endif - -#ifdef CONFIG_ALTIVEC - vecunavail: - op->type = INTERRUPT | 0xf20; - return 0; -#endif - -#ifdef CONFIG_VSX - vsxunavail: - op->type = INTERRUPT | 0xf40; - return 0; -#endif } EXPORT_SYMBOL_GPL(analyse_instr); NOKPROBE_SYMBOL(analyse_instr); @@ -1770,6 +2611,351 @@ static nokprobe_inline void do_byterev(unsigned long *valp, int size) } } +/* + * Emulate an instruction that can be executed just by updating + * fields in *regs. + */ +void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) +{ + unsigned long next_pc; + + next_pc = truncate_if_32bit(regs->msr, regs->nip + 4); + switch (op->type & INSTR_TYPE_MASK) { + case COMPUTE: + if (op->type & SETREG) + regs->gpr[op->reg] = op->val; + if (op->type & SETCC) + regs->ccr = op->ccval; + if (op->type & SETXER) + regs->xer = op->xerval; + break; + + case BRANCH: + if (op->type & SETLK) + regs->link = next_pc; + if (op->type & BRTAKEN) + next_pc = op->val; + if (op->type & DECCTR) + --regs->ctr; + break; + + case BARRIER: + switch (op->type & BARRIER_MASK) { + case BARRIER_SYNC: + mb(); + break; + case BARRIER_ISYNC: + isync(); + break; + case BARRIER_EIEIO: + eieio(); + break; + case BARRIER_LWSYNC: + asm volatile("lwsync" : : : "memory"); + break; + case BARRIER_PTESYNC: + asm volatile("ptesync" : : : "memory"); + break; + } + break; + + case MFSPR: + switch (op->spr) { + case SPRN_XER: + regs->gpr[op->reg] = regs->xer & 0xffffffffUL; + break; + case SPRN_LR: + regs->gpr[op->reg] = regs->link; + break; + case SPRN_CTR: + regs->gpr[op->reg] = regs->ctr; + break; + default: + WARN_ON_ONCE(1); + } + break; + + case MTSPR: + switch (op->spr) { + case SPRN_XER: + regs->xer = op->val & 0xffffffffUL; + break; + case SPRN_LR: + regs->link = op->val; + break; + case SPRN_CTR: + regs->ctr = op->val; + break; + default: + WARN_ON_ONCE(1); + } + break; + + default: + WARN_ON_ONCE(1); + } + regs->nip = next_pc; +} + +/* + * Emulate a previously-analysed load or store instruction. + * Return values are: + * 0 = instruction emulated successfully + * -EFAULT = address out of range or access faulted (regs->dar + * contains the faulting address) + * -EACCES = misaligned access, instruction requires alignment + * -EINVAL = unknown operation in *op + */ +int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) +{ + int err, size, type; + int i, rd, nb; + unsigned int cr; + unsigned long val; + unsigned long ea; + bool cross_endian; + + err = 0; + size = GETSIZE(op->type); + type = op->type & INSTR_TYPE_MASK; + cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); + ea = truncate_if_32bit(regs->msr, op->ea); + + switch (type) { + case LARX: + if (ea & (size - 1)) + return -EACCES; /* can't handle misaligned */ + if (!address_ok(regs, ea, size)) + return -EFAULT; + err = 0; + val = 0; + switch (size) { +#ifdef __powerpc64__ + case 1: + __get_user_asmx(val, ea, err, "lbarx"); + break; + case 2: + __get_user_asmx(val, ea, err, "lharx"); + break; +#endif + case 4: + __get_user_asmx(val, ea, err, "lwarx"); + break; +#ifdef __powerpc64__ + case 8: + __get_user_asmx(val, ea, err, "ldarx"); + break; + case 16: + err = do_lqarx(ea, ®s->gpr[op->reg]); + break; +#endif + default: + return -EINVAL; + } + if (err) { + regs->dar = ea; + break; + } + if (size < 16) + regs->gpr[op->reg] = val; + break; + + case STCX: + if (ea & (size - 1)) + return -EACCES; /* can't handle misaligned */ + if (!address_ok(regs, ea, size)) + return -EFAULT; + err = 0; + switch (size) { +#ifdef __powerpc64__ + case 1: + __put_user_asmx(op->val, ea, err, "stbcx.", cr); + break; + case 2: + __put_user_asmx(op->val, ea, err, "stbcx.", cr); + break; +#endif + case 4: + __put_user_asmx(op->val, ea, err, "stwcx.", cr); + break; +#ifdef __powerpc64__ + case 8: + __put_user_asmx(op->val, ea, err, "stdcx.", cr); + break; + case 16: + err = do_stqcx(ea, regs->gpr[op->reg], + regs->gpr[op->reg + 1], &cr); + break; +#endif + default: + return -EINVAL; + } + if (!err) + regs->ccr = (regs->ccr & 0x0fffffff) | + (cr & 0xe0000000) | + ((regs->xer >> 3) & 0x10000000); + else + regs->dar = ea; + break; + + case LOAD: +#ifdef __powerpc64__ + if (size == 16) { + err = emulate_lq(regs, ea, op->reg, cross_endian); + break; + } +#endif + err = read_mem(®s->gpr[op->reg], ea, size, regs); + if (!err) { + if (op->type & SIGNEXT) + do_signext(®s->gpr[op->reg], size); + if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) + do_byterev(®s->gpr[op->reg], size); + } + break; + +#ifdef CONFIG_PPC_FPU + case LOAD_FP: + /* + * If the instruction is in userspace, we can emulate it even + * if the VMX state is not live, because we have the state + * stored in the thread_struct. If the instruction is in + * the kernel, we must not touch the state in the thread_struct. + */ + if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) + return 0; + err = do_fp_load(op, ea, regs, cross_endian); + break; +#endif +#ifdef CONFIG_ALTIVEC + case LOAD_VMX: + if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) + return 0; + err = do_vec_load(op->reg, ea, size, regs, cross_endian); + break; +#endif +#ifdef CONFIG_VSX + case LOAD_VSX: { + unsigned long msrbit = MSR_VSX; + + /* + * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX + * when the target of the instruction is a vector register. + */ + if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) + msrbit = MSR_VEC; + if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) + return 0; + err = do_vsx_load(op, ea, regs, cross_endian); + break; + } +#endif + case LOAD_MULTI: + if (!address_ok(regs, ea, size)) + return -EFAULT; + rd = op->reg; + for (i = 0; i < size; i += 4) { + unsigned int v32 = 0; + + nb = size - i; + if (nb > 4) + nb = 4; + err = copy_mem_in((u8 *) &v32, ea, nb, regs); + if (err) + break; + if (unlikely(cross_endian)) + v32 = byterev_4(v32); + regs->gpr[rd] = v32; + ea += 4; + /* reg number wraps from 31 to 0 for lsw[ix] */ + rd = (rd + 1) & 0x1f; + } + break; + + case STORE: +#ifdef __powerpc64__ + if (size == 16) { + err = emulate_stq(regs, ea, op->reg, cross_endian); + break; + } +#endif + if ((op->type & UPDATE) && size == sizeof(long) && + op->reg == 1 && op->update_reg == 1 && + !(regs->msr & MSR_PR) && + ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { + err = handle_stack_update(ea, regs); + break; + } + if (unlikely(cross_endian)) + do_byterev(&op->val, size); + err = write_mem(op->val, ea, size, regs); + break; + +#ifdef CONFIG_PPC_FPU + case STORE_FP: + if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) + return 0; + err = do_fp_store(op, ea, regs, cross_endian); + break; +#endif +#ifdef CONFIG_ALTIVEC + case STORE_VMX: + if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) + return 0; + err = do_vec_store(op->reg, ea, size, regs, cross_endian); + break; +#endif +#ifdef CONFIG_VSX + case STORE_VSX: { + unsigned long msrbit = MSR_VSX; + + /* + * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX + * when the target of the instruction is a vector register. + */ + if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) + msrbit = MSR_VEC; + if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) + return 0; + err = do_vsx_store(op, ea, regs, cross_endian); + break; + } +#endif + case STORE_MULTI: + if (!address_ok(regs, ea, size)) + return -EFAULT; + rd = op->reg; + for (i = 0; i < size; i += 4) { + unsigned int v32 = regs->gpr[rd]; + + nb = size - i; + if (nb > 4) + nb = 4; + if (unlikely(cross_endian)) + v32 = byterev_4(v32); + err = copy_mem_out((u8 *) &v32, ea, nb, regs); + if (err) + break; + ea += 4; + /* reg number wraps from 31 to 0 for stsw[ix] */ + rd = (rd + 1) & 0x1f; + } + break; + + default: + return -EINVAL; + } + + if (err) + return err; + + if (op->type & UPDATE) + regs->gpr[op->update_reg] = op->ea; + + return 0; +} +NOKPROBE_SYMBOL(emulate_loadstore); + /* * Emulate instructions that cause a transfer of control, * loads and stores, and a few other instructions. @@ -1780,182 +2966,59 @@ static nokprobe_inline void do_byterev(unsigned long *valp, int size) int emulate_step(struct pt_regs *regs, unsigned int instr) { struct instruction_op op; - int r, err, size; + int r, err, type; unsigned long val; - unsigned int cr; - int i, rd, nb; + unsigned long ea; r = analyse_instr(&op, regs, instr); - if (r != 0) + if (r < 0) return r; + if (r > 0) { + emulate_update_regs(regs, &op); + return 1; + } err = 0; - size = GETSIZE(op.type); - switch (op.type & INSTR_TYPE_MASK) { - case CACHEOP: - if (!address_ok(regs, op.ea, 8)) - return 0; - switch (op.type & CACHEOP_MASK) { - case DCBST: - __cacheop_user_asmx(op.ea, err, "dcbst"); - break; - case DCBF: - __cacheop_user_asmx(op.ea, err, "dcbf"); - break; - case DCBTST: - if (op.reg == 0) - prefetchw((void *) op.ea); - break; - case DCBT: - if (op.reg == 0) - prefetch((void *) op.ea); - break; - case ICBI: - __cacheop_user_asmx(op.ea, err, "icbi"); - break; - } + type = op.type & INSTR_TYPE_MASK; + + if (OP_IS_LOAD_STORE(type)) { + err = emulate_loadstore(regs, &op); if (err) return 0; goto instr_done; + } - case LARX: - if (op.ea & (size - 1)) - break; /* can't handle misaligned */ - if (!address_ok(regs, op.ea, size)) + switch (type) { + case CACHEOP: + ea = truncate_if_32bit(regs->msr, op.ea); + if (!address_ok(regs, ea, 8)) return 0; - err = 0; - switch (size) { - case 4: - __get_user_asmx(val, op.ea, err, "lwarx"); + switch (op.type & CACHEOP_MASK) { + case DCBST: + __cacheop_user_asmx(ea, err, "dcbst"); break; -#ifdef __powerpc64__ - case 8: - __get_user_asmx(val, op.ea, err, "ldarx"); + case DCBF: + __cacheop_user_asmx(ea, err, "dcbf"); break; -#endif - default: - return 0; - } - if (!err) - regs->gpr[op.reg] = val; - goto ldst_done; - - case STCX: - if (op.ea & (size - 1)) - break; /* can't handle misaligned */ - if (!address_ok(regs, op.ea, size)) - return 0; - err = 0; - switch (size) { - case 4: - __put_user_asmx(op.val, op.ea, err, "stwcx.", cr); + case DCBTST: + if (op.reg == 0) + prefetchw((void *) ea); break; -#ifdef __powerpc64__ - case 8: - __put_user_asmx(op.val, op.ea, err, "stdcx.", cr); + case DCBT: + if (op.reg == 0) + prefetch((void *) ea); break; -#endif - default: + case ICBI: + __cacheop_user_asmx(ea, err, "icbi"); + break; + case DCBZ: + err = emulate_dcbz(ea, regs); + break; + } + if (err) { + regs->dar = ea; return 0; } - if (!err) - regs->ccr = (regs->ccr & 0x0fffffff) | - (cr & 0xe0000000) | - ((regs->xer >> 3) & 0x10000000); - goto ldst_done; - - case LOAD: - err = read_mem(®s->gpr[op.reg], op.ea, size, regs); - if (!err) { - if (op.type & SIGNEXT) - do_signext(®s->gpr[op.reg], size); - if (op.type & BYTEREV) - do_byterev(®s->gpr[op.reg], size); - } - goto ldst_done; - -#ifdef CONFIG_PPC_FPU - case LOAD_FP: - if (size == 4) - err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); - else - err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); - goto ldst_done; -#endif -#ifdef CONFIG_ALTIVEC - case LOAD_VMX: - err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); - goto ldst_done; -#endif -#ifdef CONFIG_VSX - case LOAD_VSX: - err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); - goto ldst_done; -#endif - case LOAD_MULTI: - if (regs->msr & MSR_LE) - return 0; - rd = op.reg; - for (i = 0; i < size; i += 4) { - nb = size - i; - if (nb > 4) - nb = 4; - err = read_mem(®s->gpr[rd], op.ea, nb, regs); - if (err) - return 0; - if (nb < 4) /* left-justify last bytes */ - regs->gpr[rd] <<= 32 - 8 * nb; - op.ea += 4; - ++rd; - } - goto instr_done; - - case STORE: - if ((op.type & UPDATE) && size == sizeof(long) && - op.reg == 1 && op.update_reg == 1 && - !(regs->msr & MSR_PR) && - op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { - err = handle_stack_update(op.ea, regs); - goto ldst_done; - } - err = write_mem(op.val, op.ea, size, regs); - goto ldst_done; - -#ifdef CONFIG_PPC_FPU - case STORE_FP: - if (size == 4) - err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); - else - err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); - goto ldst_done; -#endif -#ifdef CONFIG_ALTIVEC - case STORE_VMX: - err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); - goto ldst_done; -#endif -#ifdef CONFIG_VSX - case STORE_VSX: - err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); - goto ldst_done; -#endif - case STORE_MULTI: - if (regs->msr & MSR_LE) - return 0; - rd = op.reg; - for (i = 0; i < size; i += 4) { - val = regs->gpr[rd]; - nb = size - i; - if (nb > 4) - nb = 4; - else - val >>= 32 - 8 * nb; - err = write_mem(val, op.ea, nb, regs); - if (err) - return 0; - op.ea += 4; - ++rd; - } goto instr_done; case MFMSR: @@ -1998,12 +3061,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) } return 0; - ldst_done: - if (err) - return 0; - if (op.type & UPDATE) - regs->gpr[op.update_reg] = op.ea; - instr_done: regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); return 1; diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index d5b4d9498c54..56aac4c22025 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -184,7 +184,7 @@ err1; std r0,8(r3) mtctr r6 mr r8,r3 14: -err1; dcbz r0,r3 +err1; dcbz 0,r3 add r3,r3,r9 bdnz 14b diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index f4c6472f2fc4..f29212e40f40 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -22,8 +22,11 @@ extern int __map_without_ltlbs; +static unsigned long block_mapped_ram; + /* - * Return PA for this VA if it is in IMMR area, or 0 + * Return PA for this VA if it is in an area mapped with LTLBs. + * Otherwise, returns 0 */ phys_addr_t v_block_mapped(unsigned long va) { @@ -33,11 +36,13 @@ phys_addr_t v_block_mapped(unsigned long va) return 0; if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) return p + va - VIRT_IMMR_BASE; + if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) + return __pa(va); return 0; } /* - * Return VA for a given PA or 0 if not mapped + * Return VA for a given PA mapped with LTLBs or 0 if not mapped */ unsigned long p_block_mapped(phys_addr_t pa) { @@ -47,6 +52,8 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; if (pa >= p && pa < p + IMMR_SIZE) return VIRT_IMMR_BASE + pa - p; + if (pa < block_mapped_ram) + return (unsigned long)__va(pa); return 0; } @@ -58,7 +65,7 @@ unsigned long p_block_mapped(phys_addr_t pa) void __init MMU_init_hw(void) { /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ -#ifdef CONFIG_PIN_TLB +#ifdef CONFIG_PIN_TLB_DATA unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY; #ifdef CONFIG_PIN_TLB_IMMR @@ -80,7 +87,7 @@ void __init MMU_init_hw(void) #endif } -static void mmu_mapin_immr(void) +static void __init mmu_mapin_immr(void) { unsigned long p = PHYS_IMMR_BASE; unsigned long v = VIRT_IMMR_BASE; @@ -96,8 +103,11 @@ static void mmu_mapin_immr(void) extern unsigned int DTLBMiss_jmp; #endif extern unsigned int DTLBMiss_cmp, FixupDAR_cmp; +#ifndef CONFIG_PIN_TLB_TEXT +extern unsigned int ITLBMiss_cmp; +#endif -void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped) +static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped) { unsigned int instr = *addr; @@ -115,6 +125,9 @@ unsigned long __init mmu_mapin_ram(unsigned long top) mmu_mapin_immr(); #ifndef CONFIG_PIN_TLB_IMMR patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); +#endif +#ifndef CONFIG_PIN_TLB_TEXT + mmu_patch_cmp_limit(&ITLBMiss_cmp, 0); #endif } else { mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); @@ -133,11 +146,13 @@ unsigned long __init mmu_mapin_ram(unsigned long top) if (mapped) memblock_set_current_limit(mapped); + block_mapped_ram = mapped; + return mapped; } -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) +void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 7414034df1c3..fb844d2f266e 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -8,7 +8,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y := fault.o mem.o pgtable.o mmap.o \ init_$(BITS).o pgtable_$(BITS).o \ - init-common.o + init-common.o mmu_context.o obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ tlb_nohash_low.o obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o @@ -22,8 +22,6 @@ ifeq ($(CONFIG_PPC_STD_MMU_64),y) obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o endif -obj-$(CONFIG_PPC_ICSWX) += icswx.o -obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c index b1c144b03fcf..5c4c93dcff19 100644 --- a/arch/powerpc/mm/dump_hashpagetable.c +++ b/arch/powerpc/mm/dump_hashpagetable.c @@ -205,7 +205,7 @@ static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r, aps_index = calculate_pagesize(st, aps, "actual"); if (aps_index != 2) seq_printf(st->seq, "LP enc: %lx", lp); - seq_puts(st->seq, "\n"); + seq_putc(st->seq, '\n'); } diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index 44fe4833910f..c9282d27b203 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -350,7 +350,7 @@ static void note_page(struct pg_state *st, unsigned long addr, st->current_flags, pg_level[st->level].num); - seq_puts(st->seq, "\n"); + seq_putc(st->seq, '\n'); } /* diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4c422632047b..4797d08581ce 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -45,43 +45,39 @@ #include #include -#include "icswx.h" +static inline bool notify_page_fault(struct pt_regs *regs) +{ + bool ret = false; #ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs) -{ - int ret = 0; - /* kprobe_running() needs smp_processor_id() */ if (!user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 11)) - ret = 1; + ret = true; preempt_enable(); } +#endif /* CONFIG_KPROBES */ + + if (unlikely(debugger_fault_handler(regs))) + ret = true; return ret; } -#else -static inline int notify_page_fault(struct pt_regs *regs) -{ - return 0; -} -#endif /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. */ -static int store_updates_sp(struct pt_regs *regs) +static bool store_updates_sp(struct pt_regs *regs) { unsigned int inst; if (get_user(inst, (unsigned int __user *)regs->nip)) - return 0; + return false; /* check for 1 in the rA field */ if (((inst >> 16) & 0x1f) != 1) - return 0; + return false; /* check major opcode */ switch (inst >> 26) { case 37: /* stwu */ @@ -89,7 +85,7 @@ static int store_updates_sp(struct pt_regs *regs) case 45: /* sthu */ case 53: /* stfsu */ case 55: /* stfdu */ - return 1; + return true; case 62: /* std or stdu */ return (inst & 3) == 1; case 31: @@ -101,18 +97,53 @@ static int store_updates_sp(struct pt_regs *regs) case 439: /* sthux */ case 695: /* stfsux */ case 759: /* stfdux */ - return 1; + return true; } } - return 0; + return false; } /* * do_page_fault error handling helpers */ -#define MM_FAULT_RETURN 0 -#define MM_FAULT_CONTINUE -1 -#define MM_FAULT_ERR(sig) (sig) +static int +__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) +{ + /* + * If we are in kernel mode, bail out with a SEGV, this will + * be caught by the assembly which will restore the non-volatile + * registers before calling bad_page_fault() + */ + if (!user_mode(regs)) + return SIGSEGV; + + _exception(SIGSEGV, regs, si_code, address); + + return 0; +} + +static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) +{ + return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); +} + +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +{ + struct mm_struct *mm = current->mm; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ + up_read(&mm->mmap_sem); + + return __bad_area_nosemaphore(regs, address, si_code); +} + +static noinline int bad_area(struct pt_regs *regs, unsigned long address) +{ + return __bad_area(regs, address, SEGV_MAPERR); +} static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int fault) @@ -121,7 +152,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int lsb = 0; if (!user_mode(regs)) - return MM_FAULT_ERR(SIGBUS); + return SIGBUS; current->thread.trap_nr = BUS_ADRERR; info.si_signo = SIGBUS; @@ -142,25 +173,17 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address, #endif info.si_addr_lsb = lsb; force_sig_info(SIGBUS, &info, current); - return MM_FAULT_RETURN; + return 0; } static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) { /* - * Pagefault was interrupted by SIGKILL. We have no reason to - * continue the pagefault. + * Kernel page fault interrupted by SIGKILL. We have no reason to + * continue processing. */ - if (fatal_signal_pending(current)) { - /* Coming from kernel, we need to deal with uaccess fixups */ - if (user_mode(regs)) - return MM_FAULT_RETURN; - return MM_FAULT_ERR(SIGKILL); - } - - /* No fault: be happy */ - if (!(fault & VM_FAULT_ERROR)) - return MM_FAULT_CONTINUE; + if (fatal_signal_pending(current) && !user_mode(regs)) + return SIGKILL; /* Out of memory */ if (fault & VM_FAULT_OOM) { @@ -169,169 +192,38 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) * made us unable to handle the page fault gracefully. */ if (!user_mode(regs)) - return MM_FAULT_ERR(SIGKILL); + return SIGSEGV; pagefault_out_of_memory(); - return MM_FAULT_RETURN; + } else { + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + return do_sigbus(regs, addr, fault); + else if (fault & VM_FAULT_SIGSEGV) + return bad_area_nosemaphore(regs, addr); + else + BUG(); } - - if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) - return do_sigbus(regs, addr, fault); - - /* We don't understand the fault code, this is fatal */ - BUG(); - return MM_FAULT_CONTINUE; + return 0; } -/* - * For 600- and 800-family processors, the error_code parameter is DSISR - * for a data fault, SRR1 for an instruction fault. For 400-family processors - * the error_code parameter is ESR for a data fault, 0 for an instruction - * fault. - * For 64-bit processors, the error_code parameter is - * - DSISR for a non-SLB data access fault, - * - SRR1 & 0x08000000 for a non-SLB instruction access fault - * - 0 any SLB fault. - * - * The return value is 0 if the fault was handled, or the signal - * number if this is a kernel fault that can't be handled here. - */ -int do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) +/* Is this a bad kernel fault ? */ +static bool bad_kernel_fault(bool is_exec, unsigned long error_code, + unsigned long address) { - enum ctx_state prev_state = exception_enter(); - struct vm_area_struct * vma; - struct mm_struct *mm = current->mm; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - int code = SEGV_MAPERR; - int is_write = 0; - int trap = TRAP(regs); - int is_exec = trap == 0x400; - int is_user = user_mode(regs); - int fault; - int rc = 0, store_update_sp = 0; - -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) - /* - * Fortunately the bit assignments in SRR1 for an instruction - * fault and DSISR for a data fault are mostly the same for the - * bits we are interested in. But there are some bits which - * indicate errors in DSISR but can validly be set in SRR1. - */ - if (is_exec) - error_code &= 0x48200000; - else - is_write = error_code & DSISR_ISSTORE; -#else - is_write = error_code & ESR_DST; -#endif /* CONFIG_4xx || CONFIG_BOOKE */ - -#ifdef CONFIG_PPC_ICSWX - /* - * we need to do this early because this "data storage - * interrupt" does not update the DAR/DEAR so we don't want to - * look at it - */ - if (error_code & ICSWX_DSI_UCT) { - rc = acop_handle_fault(regs, address, error_code); - if (rc) - goto bail; + if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) { + printk_ratelimited(KERN_CRIT "kernel tried to execute" + " exec-protected page (%lx) -" + "exploit attempt? (uid: %d)\n", + address, from_kuid(&init_user_ns, + current_uid())); } -#endif /* CONFIG_PPC_ICSWX */ - - if (notify_page_fault(regs)) - goto bail; - - if (unlikely(debugger_fault_handler(regs))) - goto bail; - - /* - * The kernel should never take an execute fault nor should it - * take a page fault to a kernel address. - */ - if (!is_user && (is_exec || (address >= TASK_SIZE))) { - rc = SIGSEGV; - goto bail; - } - -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ - defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_8xx)) - if (error_code & DSISR_DABRMATCH) { - /* breakpoint match */ - do_break(regs, address, error_code); - goto bail; - } -#endif - - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); - - if (faulthandler_disabled() || mm == NULL) { - if (!is_user) { - rc = SIGSEGV; - goto bail; - } - /* faulthandler_disabled() in user mode is really bad, - as is current->mm == NULL. */ - printk(KERN_EMERG "Page fault in user mode with " - "faulthandler_disabled() = %d mm = %p\n", - faulthandler_disabled(), mm); - printk(KERN_EMERG "NIP = %lx MSR = %lx\n", - regs->nip, regs->msr); - die("Weird page fault", regs, SIGSEGV); - } - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - /* - * We want to do this outside mmap_sem, because reading code around nip - * can result in fault, which will cause a deadlock when called with - * mmap_sem held - */ - if (is_write && is_user) - store_update_sp = store_updates_sp(regs); - - if (is_user) - flags |= FAULT_FLAG_USER; - - /* When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem - * we will deadlock attempting to validate the fault against the - * address space. Luckily the kernel only validly references user - * space from well defined areas of code, which are listed in the - * exceptions table. - * - * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibility of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. - */ - if (!down_read_trylock(&mm->mmap_sem)) { - if (!is_user && !search_exception_tables(regs->nip)) - goto bad_area_nosemaphore; - -retry: - down_read(&mm->mmap_sem); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case we'll have missed the might_sleep() from - * down_read(): - */ - might_sleep(); - } - - vma = find_vma(mm, address); - if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; + return is_exec || (address >= TASK_SIZE); +} +static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, + struct vm_area_struct *vma, + bool store_update_sp) +{ /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. @@ -345,7 +237,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; if (uregs == NULL) - goto bad_area; + return true; /* * A user-mode access to an address a long way below @@ -360,55 +252,62 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * expand the stack rather than segfaulting. */ if (address + 2048 < uregs->gpr[1] && !store_update_sp) - goto bad_area; + return true; } - if (expand_stack(vma, address)) - goto bad_area; - -good_area: - code = SEGV_ACCERR; -#if defined(CONFIG_6xx) - if (error_code & 0x95700000) - /* an error such as lwarx to I/O controller space, - address matching DABR, eciwx, etc. */ - goto bad_area; -#endif /* CONFIG_6xx */ -#if defined(CONFIG_8xx) - /* The MPC8xx seems to always set 0x80000000, which is - * "undefined". Of those that can be set, this is the only - * one which seems bad. - */ - if (error_code & 0x10000000) - /* Guarded storage error. */ - goto bad_area; -#endif /* CONFIG_8xx */ + return false; +} +static bool access_error(bool is_write, bool is_exec, + struct vm_area_struct *vma) +{ + /* + * Allow execution from readable areas if the MMU does not + * provide separate controls over reading and executing. + * + * Note: That code used to not be enabled for 4xx/BookE. + * It is now as I/D cache coherency for these is done at + * set_pte_at() time and I see no reason why the test + * below wouldn't be valid on those processors. This -may- + * break programs compiled with a really old ABI though. + */ if (is_exec) { - /* - * Allow execution from readable areas if the MMU does not - * provide separate controls over reading and executing. - * - * Note: That code used to not be enabled for 4xx/BookE. - * It is now as I/D cache coherency for these is done at - * set_pte_at() time and I see no reason why the test - * below wouldn't be valid on those processors. This -may- - * break programs compiled with a really old ABI though. - */ - if (!(vma->vm_flags & VM_EXEC) && - (cpu_has_feature(CPU_FTR_NOEXECUTE) || - !(vma->vm_flags & (VM_READ | VM_WRITE)))) - goto bad_area; - /* a write */ - } else if (is_write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - flags |= FAULT_FLAG_WRITE; - /* a read */ - } else { - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) - goto bad_area; + return !(vma->vm_flags & VM_EXEC) && + (cpu_has_feature(CPU_FTR_NOEXECUTE) || + !(vma->vm_flags & (VM_READ | VM_WRITE))); } + + if (is_write) { + if (unlikely(!(vma->vm_flags & VM_WRITE))) + return true; + return false; + } + + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + return true; + + return false; +} + +#ifdef CONFIG_PPC_SMLPAR +static inline void cmo_account_page_fault(void) +{ + if (firmware_has_feature(FW_FEATURE_CMO)) { + u32 page_ins; + + preempt_disable(); + page_ins = be32_to_cpu(get_lppaca()->page_ins); + page_ins += 1 << PAGE_FACTOR; + get_lppaca()->page_ins = cpu_to_be32(page_ins); + preempt_enable(); + } +} +#else +static inline void cmo_account_page_fault(void) { } +#endif /* CONFIG_PPC_SMLPAR */ + #ifdef CONFIG_PPC_STD_MMU +static void sanity_check_fault(bool is_write, unsigned long error_code) +{ /* * For hash translation mode, we should never get a * PROTFAULT. Any update to pte to reduce access will result in us @@ -440,14 +339,166 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, */ if (!radix_enabled() && !is_write) WARN_ON_ONCE(error_code & DSISR_PROTFAULT); +} +#else +static void sanity_check_fault(bool is_write, unsigned long error_code) { } #endif /* CONFIG_PPC_STD_MMU */ +/* + * Define the correct "is_write" bit in error_code based + * on the processor family + */ +#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +#define page_fault_is_write(__err) ((__err) & ESR_DST) +#define page_fault_is_bad(__err) (0) +#else +#define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) +#if defined(CONFIG_PPC_8xx) +#define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) +#elif defined(CONFIG_PPC64) +#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) +#else +#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) +#endif +#endif + +/* + * For 600- and 800-family processors, the error_code parameter is DSISR + * for a data fault, SRR1 for an instruction fault. For 400-family processors + * the error_code parameter is ESR for a data fault, 0 for an instruction + * fault. + * For 64-bit processors, the error_code parameter is + * - DSISR for a non-SLB data access fault, + * - SRR1 & 0x08000000 for a non-SLB instruction access fault + * - 0 any SLB fault. + * + * The return value is 0 if the fault was handled, or the signal + * number if this is a kernel fault that can't be handled here. + */ +static int __do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + struct vm_area_struct * vma; + struct mm_struct *mm = current->mm; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + int is_exec = TRAP(regs) == 0x400; + int is_user = user_mode(regs); + int is_write = page_fault_is_write(error_code); + int fault, major = 0; + bool store_update_sp = false; + + if (notify_page_fault(regs)) + return 0; + + if (unlikely(page_fault_is_bad(error_code))) { + if (is_user) { + _exception(SIGBUS, regs, BUS_OBJERR, address); + return 0; + } + return SIGBUS; + } + + /* Additional sanity check(s) */ + sanity_check_fault(is_write, error_code); + + /* + * The kernel should never take an execute fault nor should it + * take a page fault to a kernel address. + */ + if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address))) + return SIGSEGV; + + /* + * If we're in an interrupt, have no user context or are running + * in a region with pagefaults disabled then we must not take the fault + */ + if (unlikely(faulthandler_disabled() || !mm)) { + if (is_user) + printk_ratelimited(KERN_ERR "Page fault in user mode" + " with faulthandler_disabled()=%d" + " mm=%p\n", + faulthandler_disabled(), mm); + return bad_area_nosemaphore(regs, address); + } + + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + + /* + * We want to do this outside mmap_sem, because reading code around nip + * can result in fault, which will cause a deadlock when called with + * mmap_sem held + */ + if (is_write && is_user) + store_update_sp = store_updates_sp(regs); + + if (is_user) + flags |= FAULT_FLAG_USER; + if (is_write) + flags |= FAULT_FLAG_WRITE; + if (is_exec) + flags |= FAULT_FLAG_INSTRUCTION; + + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the + * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user + * space from well defined areas of code, which are listed in the + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform + * the source reference check when there is a possibility of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. + */ + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (!is_user && !search_exception_tables(regs->nip)) + return bad_area_nosemaphore(regs, address); + +retry: + down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in + * which case we'll have missed the might_sleep() from + * down_read(): + */ + might_sleep(); + } + + vma = find_vma(mm, address); + if (unlikely(!vma)) + return bad_area(regs, address); + if (likely(vma->vm_start <= address)) + goto good_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) + return bad_area(regs, address); + + /* The stack is being expanded, check if it's valid */ + if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp))) + return bad_area(regs, address); + + /* Try to expand it */ + if (unlikely(expand_stack(vma, address))) + return bad_area(regs, address); + +good_area: + if (unlikely(access_error(is_write, is_exec, vma))) + return bad_area(regs, address); + /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, address, flags); + major |= fault & VM_FAULT_MAJOR; /* * Handle the retry right now, the mmap_sem has been released in that @@ -465,64 +516,39 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (!fatal_signal_pending(current)) goto retry; } - /* We will enter mm_fault_error() below */ - } else - up_read(¤t->mm->mmap_sem); - if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { - if (fault & VM_FAULT_SIGSEGV) - goto bad_area_nosemaphore; - rc = mm_fault_error(regs, address, fault); - if (rc >= MM_FAULT_RETURN) - goto bail; - else - rc = 0; + /* + * User mode? Just return to handle the fatal exception otherwise + * return to bad_page_fault + */ + return is_user ? 0 : SIGBUS; } + up_read(¤t->mm->mmap_sem); + + if (unlikely(fault & VM_FAULT_ERROR)) + return mm_fault_error(regs, address, fault); + /* * Major/minor page fault accounting. */ - if (fault & VM_FAULT_MAJOR) { + if (major) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); -#ifdef CONFIG_PPC_SMLPAR - if (firmware_has_feature(FW_FEATURE_CMO)) { - u32 page_ins; - - preempt_disable(); - page_ins = be32_to_cpu(get_lppaca()->page_ins); - page_ins += 1 << PAGE_FACTOR; - get_lppaca()->page_ins = cpu_to_be32(page_ins); - preempt_enable(); - } -#endif /* CONFIG_PPC_SMLPAR */ + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + cmo_account_page_fault(); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } + return 0; +} +NOKPROBE_SYMBOL(__do_page_fault); - goto bail; - -bad_area: - up_read(&mm->mmap_sem); - -bad_area_nosemaphore: - /* User mode accesses cause a SIGSEGV */ - if (is_user) { - _exception(SIGSEGV, regs, code, address); - goto bail; - } - - if (is_exec && (error_code & DSISR_PROTFAULT)) - printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected" - " page (%lx) - exploit attempt? (uid: %d)\n", - address, from_kuid(&init_user_ns, current_uid())); - - rc = SIGSEGV; - -bail: +int do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + enum ctx_state prev_state = exception_enter(); + int rc = __do_page_fault(regs, address, error_code); exception_exit(prev_state); return rc; } diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 6f962e5cb5e1..ffbd7c0bda96 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -575,7 +575,6 @@ _GLOBAL(flush_hash_pages) rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ stwcx. r8,0,r5 /* update the pte */ bne- 33b -EXPORT_SYMBOL(flush_hash_pages) /* Get the address of the primary PTE group in the hash table (r3) */ _GLOBAL(flush_hash_patch_A) @@ -634,6 +633,7 @@ _GLOBAL(flush_hash_patch_B) SYNC_601 isync blr +EXPORT_SYMBOL(flush_hash_pages) /* * Flush an entry from the TLB diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7a20669c19e7..67ec2e927253 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -61,6 +61,7 @@ #include #include #include +#include #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -507,9 +508,9 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, printk(KERN_INFO "Huge page(16GB) memory: " "addr = 0x%lX size = 0x%lX pages = %d\n", phys_addr, block_size, expected_pages); - if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { + if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) { memblock_reserve(phys_addr, block_size * expected_pages); - add_gpage(phys_addr, block_size, expected_pages); + pseries_add_gpage(phys_addr, block_size, expected_pages); } return 0; } @@ -1019,6 +1020,7 @@ void __init hash__early_init_mmu(void) __kernel_virt_size = H_KERN_VIRT_SIZE; __vmalloc_start = H_VMALLOC_START; __vmalloc_end = H_VMALLOC_END; + __kernel_io_start = H_KERN_IO_START; vmemmap = (struct page *)H_VMEMMAP_BASE; ioremap_bot = IOREMAP_BASE; @@ -1228,7 +1230,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long vsid; pte_t *ptep; unsigned hugeshift; - const struct cpumask *tmp; int rc, user_region = 0; int psize, ssize; @@ -1280,8 +1281,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, } /* Check CPU locality */ - tmp = cpumask_of(smp_processor_id()); - if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) + if (user_region && mm_is_thread_local(mm)) flags |= HPTE_LOCAL_UPDATE; #ifndef CONFIG_PPC_64K_PAGES @@ -1297,7 +1297,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, #endif /* CONFIG_PPC_64K_PAGES */ /* Get PTE and page size from page tables */ - ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift); + ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); rc = 1; @@ -1526,7 +1526,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, * THP pages use update_mmu_cache_pmd. We don't do * hash preload there. Hence can ignore THP here */ - ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift); + ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift); if (!ptep) goto out_exit; @@ -1543,7 +1543,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, #endif /* CONFIG_PPC_64K_PAGES */ /* Is that local to this CPU ? */ - if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) + if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; /* Hash it in */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e1bf5ca397fe..1571a498a33f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -24,6 +24,8 @@ #include #include #include +#include + #ifdef CONFIG_HUGETLB_PAGE @@ -36,32 +38,15 @@ unsigned int HPAGE_SHIFT; EXPORT_SYMBOL(HPAGE_SHIFT); -/* - * Tracks gpages after the device tree is scanned and before the - * huge_boot_pages list is ready. On non-Freescale implementations, this is - * just used to track 16G pages and so is a single array. FSL-based - * implementations may have more than one gpage size, so we need multiple - * arrays - */ -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) -#define MAX_NUMBER_GPAGES 128 -struct psize_gpages { - u64 gpage_list[MAX_NUMBER_GPAGES]; - unsigned int nr_gpages; -}; -static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; -#else -#define MAX_NUMBER_GPAGES 1024 -static u64 gpage_freearray[MAX_NUMBER_GPAGES]; -static unsigned nr_gpages; -#endif - #define hugepd_none(hpd) (hpd_val(hpd) == 0) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { - /* Only called for hugetlbfs pages, hence can ignore THP */ - return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL); + /* + * Only called for hugetlbfs pages, hence can ignore THP and the + * irq disabled walk. + */ + return __find_linux_pte(mm->pgd, addr, NULL, NULL); } static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, @@ -210,145 +195,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(*hpdp, addr, pdshift); } -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) -/* Build list of addresses of gigantic pages. This function is used in early +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * Tracks gpages after the device tree is scanned and before the + * huge_boot_pages list is ready on pseries. + */ +#define MAX_NUMBER_GPAGES 1024 +__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +__initdata static unsigned nr_gpages; + +/* + * Build list of addresses of gigantic pages. This function is used in early * boot before the buddy allocator is setup. */ -void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) -{ - unsigned int idx = shift_to_mmu_psize(__ffs(page_size)); - int i; - - if (addr == 0) - return; - - gpage_freearray[idx].nr_gpages = number_of_pages; - - for (i = 0; i < number_of_pages; i++) { - gpage_freearray[idx].gpage_list[i] = addr; - addr += page_size; - } -} - -/* - * Moves the gigantic page addresses from the temporary list to the - * huge_boot_pages list. - */ -int alloc_bootmem_huge_page(struct hstate *hstate) -{ - struct huge_bootmem_page *m; - int idx = shift_to_mmu_psize(huge_page_shift(hstate)); - int nr_gpages = gpage_freearray[idx].nr_gpages; - - if (nr_gpages == 0) - return 0; - -#ifdef CONFIG_HIGHMEM - /* - * If gpages can be in highmem we can't use the trick of storing the - * data structure in the page; allocate space for this - */ - m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0); - m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; -#else - m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); -#endif - - list_add(&m->list, &huge_boot_pages); - gpage_freearray[idx].nr_gpages = nr_gpages; - gpage_freearray[idx].gpage_list[nr_gpages] = 0; - m->hstate = hstate; - - return 1; -} -/* - * Scan the command line hugepagesz= options for gigantic pages; store those in - * a list that we use to allocate the memory once all options are parsed. - */ - -unsigned long gpage_npages[MMU_PAGE_COUNT]; - -static int __init do_gpage_early_setup(char *param, char *val, - const char *unused, void *arg) -{ - static phys_addr_t size; - unsigned long npages; - - /* - * The hugepagesz and hugepages cmdline options are interleaved. We - * use the size variable to keep track of whether or not this was done - * properly and skip over instances where it is incorrect. Other - * command-line parsing code will issue warnings, so we don't need to. - * - */ - if ((strcmp(param, "default_hugepagesz") == 0) || - (strcmp(param, "hugepagesz") == 0)) { - size = memparse(val, NULL); - } else if (strcmp(param, "hugepages") == 0) { - if (size != 0) { - if (sscanf(val, "%lu", &npages) <= 0) - npages = 0; - if (npages > MAX_NUMBER_GPAGES) { - pr_warn("MMU: %lu pages requested for page " -#ifdef CONFIG_PHYS_ADDR_T_64BIT - "size %llu KB, limiting to " -#else - "size %u KB, limiting to " -#endif - __stringify(MAX_NUMBER_GPAGES) "\n", - npages, size / 1024); - npages = MAX_NUMBER_GPAGES; - } - gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; - size = 0; - } - } - return 0; -} - - -/* - * This function allocates physical space for pages that are larger than the - * buddy allocator can handle. We want to allocate these in highmem because - * the amount of lowmem is limited. This means that this function MUST be - * called before lowmem_end_addr is set up in MMU_init() in order for the lmb - * allocate to grab highmem. - */ -void __init reserve_hugetlb_gpages(void) -{ - static __initdata char cmdline[COMMAND_LINE_SIZE]; - phys_addr_t size, base; - int i; - - strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, - NULL, &do_gpage_early_setup); - - /* - * Walk gpage list in reverse, allocating larger page sizes first. - * Skip over unsupported sizes, or sizes that have 0 gpages allocated. - * When we reach the point in the list where pages are no longer - * considered gpages, we're done. - */ - for (i = MMU_PAGE_COUNT-1; i >= 0; i--) { - if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0) - continue; - else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT)) - break; - - size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i)); - base = memblock_alloc_base(size * gpage_npages[i], size, - MEMBLOCK_ALLOC_ANYWHERE); - add_gpage(base, size, gpage_npages[i]); - } -} - -#else /* !PPC_FSL_BOOK3E */ - -/* Build list of addresses of gigantic pages. This function is used in early - * boot before the buddy allocator is setup. - */ -void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) +void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { if (!addr) return; @@ -360,10 +220,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) } } -/* Moves the gigantic page addresses from the temporary list to the - * huge_boot_pages list. - */ -int alloc_bootmem_huge_page(struct hstate *hstate) +int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; if (nr_gpages == 0) @@ -376,6 +233,17 @@ int alloc_bootmem_huge_page(struct hstate *hstate) } #endif + +int __init alloc_bootmem_huge_page(struct hstate *h) +{ + +#ifdef CONFIG_PPC_BOOK3S_64 + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) + return pseries_alloc_bootmem_huge_page(h); +#endif + return __alloc_bootmem_huge_page(h); +} + #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -407,8 +275,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) batchp = &get_cpu_var(hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || - cpumask_equal(mm_cpumask(tlb->mm), - cpumask_of(smp_processor_id()))) { + mm_is_thread_local(tlb->mm)) { kmem_cache_free(hugepte_cache, hugepte); put_cpu_var(hugepd_freelist_cur); return; @@ -886,9 +753,8 @@ void flush_dcache_icache_hugepage(struct page *page) * This function need to be called with interrupts disabled. We use this variant * when we have MSR[EE] = 0 but the paca->soft_enabled = 1 */ - -pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *shift) +pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hpage_shift) { pgd_t pgd, *pgdp; pud_t pud, *pudp; @@ -897,8 +763,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, hugepd_t *hpdp = NULL; unsigned pdshift = PGDIR_SHIFT; - if (shift) - *shift = 0; + if (hpage_shift) + *hpage_shift = 0; if (is_thp) *is_thp = false; @@ -968,16 +834,15 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, ret_pte = hugepte_offset(*hpdp, ea, pdshift); pdshift = hugepd_shift(*hpdp); out: - if (shift) - *shift = pdshift; + if (hpage_shift) + *hpage_shift = pdshift; return ret_pte; } -EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte); +EXPORT_SYMBOL_GPL(__find_linux_pte); int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask; unsigned long pte_end; struct page *head, *page; pte_t pte; @@ -988,18 +853,10 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, end = pte_end; pte = READ_ONCE(*ptep); - mask = _PAGE_PRESENT | _PAGE_READ; - /* - * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined - * as 0 and _PAGE_RO has to be set when a page is not writable - */ - if (write) - mask |= _PAGE_WRITE; - else - mask |= _PAGE_RO; - - if ((pte_val(pte) & mask) != mask) + if (!pte_present(pte) || !pte_read(pte)) + return 0; + if (write && !pte_write(pte)) return 0; /* hugepages are never "special" */ diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c deleted file mode 100644 index 1fa794d7d59f..000000000000 --- a/arch/powerpc/mm/icswx.c +++ /dev/null @@ -1,292 +0,0 @@ -/* - * ICSWX and ACOP Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "icswx.h" - -/* - * The processor and its L2 cache cause the icswx instruction to - * generate a COP_REQ transaction on PowerBus. The transaction has no - * address, and the processor does not perform an MMU access to - * authenticate the transaction. The command portion of the PowerBus - * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor - * Process ID (PID), which the coprocessor compares to the authorized - * LPID and PID held in the coprocessor, to determine if the process - * is authorized to generate the transaction. The data of the COP_REQ - * transaction is 128-byte or less in size and is placed in cacheable - * memory on a 128-byte cache line boundary. - * - * The task to use a coprocessor should use use_cop() to mark the use - * of the Coprocessor Type (CT) and context switching. On a server - * class processor, the PID register is used only for coprocessor - * management + * and so a coprocessor PID is allocated before - * executing icswx + * instruction. Drop_cop() is used to free the - * coprocessor PID. - * - * Example: - * Host Fabric Interface (HFI) is a PowerPC network coprocessor. - * Each HFI have multiple windows. Each HFI window serves as a - * network device sending to and receiving from HFI network. - * HFI immediate send function uses icswx instruction. The immediate - * send function allows small (single cache-line) packets be sent - * without using the regular HFI send FIFO and doorbell, which are - * much slower than immediate send. - * - * For each task intending to use HFI immediate send, the HFI driver - * calls use_cop() to obtain a coprocessor PID for the task. - * The HFI driver then allocate a free HFI window and save the - * coprocessor PID to the HFI window to allow the task to use the - * HFI window. - * - * The HFI driver repeatedly creates immediate send packets and - * issues icswx instruction to send data through the HFI window. - * The HFI compares the coprocessor PID in the CPU PID register - * to the PID held in the HFI window to determine if the transaction - * is allowed. - * - * When the task to release the HFI window, the HFI driver calls - * drop_cop() to release the coprocessor PID. - */ - -void switch_cop(struct mm_struct *next) -{ -#ifdef CONFIG_PPC_ICSWX_PID - mtspr(SPRN_PID, next->context.cop_pid); -#endif - mtspr(SPRN_ACOP, next->context.acop); -} - -/** - * Start using a coprocessor. - * @acop: mask of coprocessor to be used. - * @mm: The mm the coprocessor to associate with. Most likely current mm. - * - * Return a positive PID if successful. Negative errno otherwise. - * The returned PID will be fed to the coprocessor to determine if an - * icswx transaction is authenticated. - */ -int use_cop(unsigned long acop, struct mm_struct *mm) -{ - int ret; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) - return -ENODEV; - - if (!mm || !acop) - return -EINVAL; - - /* The page_table_lock ensures mm_users won't change under us */ - spin_lock(&mm->page_table_lock); - spin_lock(mm->context.cop_lockp); - - ret = get_cop_pid(mm); - if (ret < 0) - goto out; - - /* update acop */ - mm->context.acop |= acop; - - sync_cop(mm); - - /* - * If this is a threaded process then there might be other threads - * running. We need to send an IPI to force them to pick up any - * change in PID and ACOP. - */ - if (atomic_read(&mm->mm_users) > 1) - smp_call_function(sync_cop, mm, 1); - -out: - spin_unlock(mm->context.cop_lockp); - spin_unlock(&mm->page_table_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(use_cop); - -/** - * Stop using a coprocessor. - * @acop: mask of coprocessor to be stopped. - * @mm: The mm the coprocessor associated with. - */ -void drop_cop(unsigned long acop, struct mm_struct *mm) -{ - int free_pid; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) - return; - - if (WARN_ON_ONCE(!mm)) - return; - - /* The page_table_lock ensures mm_users won't change under us */ - spin_lock(&mm->page_table_lock); - spin_lock(mm->context.cop_lockp); - - mm->context.acop &= ~acop; - - free_pid = disable_cop_pid(mm); - sync_cop(mm); - - /* - * If this is a threaded process then there might be other threads - * running. We need to send an IPI to force them to pick up any - * change in PID and ACOP. - */ - if (atomic_read(&mm->mm_users) > 1) - smp_call_function(sync_cop, mm, 1); - - if (free_pid != COP_PID_NONE) - free_cop_pid(free_pid); - - spin_unlock(mm->context.cop_lockp); - spin_unlock(&mm->page_table_lock); -} -EXPORT_SYMBOL_GPL(drop_cop); - -static int acop_use_cop(int ct) -{ - /* There is no alternate policy, yet */ - return -1; -} - -/* - * Get the instruction word at the NIP - */ -static u32 acop_get_inst(struct pt_regs *regs) -{ - u32 inst; - u32 __user *p; - - p = (u32 __user *)regs->nip; - if (!access_ok(VERIFY_READ, p, sizeof(*p))) - return 0; - - if (__get_user(inst, p)) - return 0; - - return inst; -} - -/** - * @regs: registers at time of interrupt - * @address: storage address - * @error_code: Fault code, usually the DSISR or ESR depending on - * processor type - * - * Return 0 if we are able to resolve the data storage fault that - * results from a CT miss in the ACOP register. - */ -int acop_handle_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - int ct; - u32 inst = 0; - - if (!cpu_has_feature(CPU_FTR_ICSWX)) { - pr_info("No coprocessors available"); - _exception(SIGILL, regs, ILL_ILLOPN, address); - } - - if (!user_mode(regs)) { - /* this could happen if the HV denies the - * kernel access, for now we just die */ - die("ICSWX from kernel failed", regs, SIGSEGV); - } - - /* Some implementations leave us a hint for the CT */ - ct = ICSWX_GET_CT_HINT(error_code); - if (ct < 0) { - /* we have to peek at the instruction word to figure out CT */ - u32 ccw; - u32 rs; - - inst = acop_get_inst(regs); - if (inst == 0) - return -1; - - rs = (inst >> (31 - 10)) & 0x1f; - ccw = regs->gpr[rs]; - ct = (ccw >> 16) & 0x3f; - } - - /* - * We could be here because another thread has enabled acop - * but the ACOP register has yet to be updated. - * - * This should have been taken care of by the IPI to sync all - * the threads (see smp_call_function(sync_cop, mm, 1)), but - * that could take forever if there are a significant amount - * of threads. - * - * Given the number of threads on some of these systems, - * perhaps this is the best way to sync ACOP rather than whack - * every thread with an IPI. - */ - if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) { - sync_cop(current->active_mm); - return 0; - } - - /* check for alternate policy */ - if (!acop_use_cop(ct)) - return 0; - - /* at this point the CT is unknown to the system */ - pr_warn("%s[%d]: Coprocessor %d is unavailable\n", - current->comm, current->pid, ct); - - /* get inst if we don't already have it */ - if (inst == 0) { - inst = acop_get_inst(regs); - if (inst == 0) - return -1; - } - - /* Check if the instruction is the "record form" */ - if (inst & 1) { - /* - * the instruction is "record" form so we can reject - * using CR0 - */ - regs->ccr &= ~(0xful << 28); - regs->ccr |= ICSWX_RC_NOT_FOUND << 28; - - /* Move on to the next instruction */ - regs->nip += 4; - } else { - /* - * There is no architected mechanism to report a bad - * CT so we could either SIGILL or report nothing. - * Since the non-record version should only bu used - * for "hints" or "don't care" we should probably do - * nothing. However, I could see how some people - * might want an SIGILL so it here if you want it. - */ -#ifdef CONFIG_PPC_ICSWX_USE_SIGILL - _exception(SIGILL, regs, ILL_ILLOPN, address); -#else - regs->nip += 4; -#endif - } - - return 0; -} -EXPORT_SYMBOL_GPL(acop_handle_fault); diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h deleted file mode 100644 index 6dedc08e62c8..000000000000 --- a/arch/powerpc/mm/icswx.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _ARCH_POWERPC_MM_ICSWX_H_ -#define _ARCH_POWERPC_MM_ICSWX_H_ - -/* - * ICSWX and ACOP Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include - -/* also used to denote that PIDs are not used */ -#define COP_PID_NONE 0 - -static inline void sync_cop(void *arg) -{ - struct mm_struct *mm = arg; - - if (mm == current->active_mm) - switch_cop(current->active_mm); -} - -#ifdef CONFIG_PPC_ICSWX_PID -extern int get_cop_pid(struct mm_struct *mm); -extern int disable_cop_pid(struct mm_struct *mm); -extern void free_cop_pid(int free_pid); -#else -#define get_cop_pid(m) (COP_PID_NONE) -#define disable_cop_pid(m) (COP_PID_NONE) -#define free_cop_pid(p) -#endif - -/* - * These are implementation bits for architected registers. If this - * ever becomes architecture the should be moved to reg.h et. al. - */ -/* UCT is the same bit for Server and Embedded */ -#define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */ - -#ifdef CONFIG_PPC_BOOK3E -/* Embedded implementation gives us no hints as to what the CT is */ -#define ICSWX_GET_CT_HINT(x) (-1) -#else -/* Server implementation contains the CT value in the DSISR */ -#define ICSWX_DSISR_CTMASK 0x00003f00 -#define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8) -#endif - -#define ICSWX_RC_STARTED 0x8 /* The request has been started */ -#define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */ -#define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */ -#define ICSWX_RC_UNDEFINED 0x1 /* Reserved */ - -extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code); - -static inline u64 acop_copro_type_bit(unsigned int type) -{ - return 1ULL << (63 - type); -} - -#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ diff --git a/arch/powerpc/mm/icswx_pid.c b/arch/powerpc/mm/icswx_pid.c deleted file mode 100644 index 91e30eb7d054..000000000000 --- a/arch/powerpc/mm/icswx_pid.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * ICSWX and ACOP/PID Management - * - * Copyright (C) 2011 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "icswx.h" - -#define COP_PID_MIN (COP_PID_NONE + 1) -#define COP_PID_MAX (0xFFFF) - -static DEFINE_SPINLOCK(mmu_context_acop_lock); -static DEFINE_IDA(cop_ida); - -static int new_cop_pid(struct ida *ida, int min_id, int max_id, - spinlock_t *lock) -{ - int index; - int err; - -again: - if (!ida_pre_get(ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(lock); - err = ida_get_new_above(ida, min_id, &index); - spin_unlock(lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > max_id) { - spin_lock(lock); - ida_remove(ida, index); - spin_unlock(lock); - return -ENOMEM; - } - - return index; -} - -int get_cop_pid(struct mm_struct *mm) -{ - int pid; - - if (mm->context.cop_pid == COP_PID_NONE) { - pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, - &mmu_context_acop_lock); - if (pid >= 0) - mm->context.cop_pid = pid; - } - return mm->context.cop_pid; -} - -int disable_cop_pid(struct mm_struct *mm) -{ - int free_pid = COP_PID_NONE; - - if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { - free_pid = mm->context.cop_pid; - mm->context.cop_pid = COP_PID_NONE; - } - return free_pid; -} - -void free_cop_pid(int free_pid) -{ - spin_lock(&mmu_context_acop_lock); - ida_remove(&cop_ida, free_pid); - spin_unlock(&mmu_context_acop_lock); -} diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 8a7c38b8d335..6419b33ca309 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -113,6 +113,12 @@ void __init MMU_setup(void) __map_without_bats = 1; __map_without_ltlbs = 1; } +#ifdef CONFIG_STRICT_KERNEL_RWX + if (rodata_enabled) { + __map_without_bats = 1; + __map_without_ltlbs = 1; + } +#endif } /* @@ -132,8 +138,6 @@ void __init MMU_init(void) * Reserve gigantic pages for hugetlb. This MUST occur before * lowmem_end_addr is initialized below. */ - reserve_hugetlb_gpages(); - if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII memblock_enforce_memory_limit(memblock.memory.regions[0].size); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 5b4c25d12ff3..588a521966ec 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -356,7 +356,7 @@ struct page *realmode_pfn_to_page(unsigned long pfn) } EXPORT_SYMBOL_GPL(realmode_pfn_to_page); -#elif defined(CONFIG_FLATMEM) +#else struct page *realmode_pfn_to_page(unsigned long pfn) { @@ -365,7 +365,7 @@ struct page *realmode_pfn_to_page(unsigned long pfn) } EXPORT_SYMBOL_GPL(realmode_pfn_to_page); -#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_PPC_STD_MMU_64 static bool disable_radix; @@ -381,7 +381,7 @@ early_param("disable_radix", parse_disable_radix); * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do * radix. If not, we clear the radix feature bit so we fall back to hash. */ -static void early_check_vec5(void) +static void __init early_check_vec5(void) { unsigned long root, chosen; int size; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 46b4e67d2372..4362b86ef84c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -436,7 +436,7 @@ void flush_dcache_icache_page(struct page *page) return; } #endif -#if defined(CONFIG_8xx) || defined(CONFIG_PPC64) +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) /* On 8xx there is no need to kmap since highmem is not supported */ __flush_dcache_icache(page_address(page)); #else diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c new file mode 100644 index 000000000000..0f613bc63c50 --- /dev/null +++ b/arch/powerpc/mm/mmu_context.c @@ -0,0 +1,99 @@ +/* + * Common implementation of switch_mm_irqs_off + * + * Copyright IBM Corp. 2017 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include + +#include + +#if defined(CONFIG_PPC32) +static inline void switch_mm_pgdir(struct task_struct *tsk, + struct mm_struct *mm) +{ + /* 32-bit keeps track of the current PGDIR in the thread struct */ + tsk->thread.pgdir = mm->pgd; +} +#elif defined(CONFIG_PPC_BOOK3E_64) +static inline void switch_mm_pgdir(struct task_struct *tsk, + struct mm_struct *mm) +{ + /* 64-bit Book3E keeps track of current PGD in the PACA */ + get_paca()->pgd = mm->pgd; +} +#else +static inline void switch_mm_pgdir(struct task_struct *tsk, + struct mm_struct *mm) { } +#endif + +#ifdef CONFIG_PPC_BOOK3S_64 +static inline void inc_mm_active_cpus(struct mm_struct *mm) +{ + atomic_inc(&mm->context.active_cpus); +} +#else +static inline void inc_mm_active_cpus(struct mm_struct *mm) { } +#endif + +void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + bool new_on_cpu = false; + + /* Mark this context has been used on the new CPU */ + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + inc_mm_active_cpus(next); + + /* + * This full barrier orders the store to the cpumask above vs + * a subsequent operation which allows this CPU to begin loading + * translations for next. + * + * When using the radix MMU that operation is the load of the + * MMU context id, which is then moved to SPRN_PID. + * + * For the hash MMU it is either the first load from slb_cache + * in switch_slb(), and/or the store of paca->mm_ctx_id in + * copy_mm_to_paca(). + * + * On the read side the barrier is in pte_xchg(), which orders + * the store to the PTE vs the load of mm_cpumask. + */ + smp_mb(); + + new_on_cpu = true; + } + + /* Some subarchs need to track the PGD elsewhere */ + switch_mm_pgdir(tsk, next); + + /* Nothing else to do if we aren't actually switching */ + if (prev == next) + return; + + /* + * We must stop all altivec streams before changing the HW + * context + */ + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + asm volatile ("dssall"); + + if (new_on_cpu) + radix_kvm_prefetch_workaround(next); + + /* + * The actual HW switching method differs between the various + * sub architectures. Out of line for now + */ + switch_mmu_context(prev, next, tsk); +} + diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index a75f63833284..05e15386d4cb 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -25,8 +25,6 @@ #include #include -#include "icswx.h" - static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); @@ -165,16 +163,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) return index; mm->context.id = index; -#ifdef CONFIG_PPC_ICSWX - mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!mm->context.cop_lockp) { - __destroy_context(index); - subpage_prot_free(mm); - mm->context.id = MMU_NO_CONTEXT; - return -ENOMEM; - } - spin_lock_init(mm->context.cop_lockp); -#endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_64K_PAGES mm->context.pte_frag = NULL; @@ -182,6 +170,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(mm); #endif + atomic_set(&mm->context.active_cpus, 0); + return 0; } @@ -226,12 +216,6 @@ void destroy_context(struct mm_struct *mm) #ifdef CONFIG_SPAPR_TCE_IOMMU WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); #endif -#ifdef CONFIG_PPC_ICSWX - drop_cop(mm->context.acop, mm); - kfree(mm->context.cop_lockp); - mm->context.cop_lockp = NULL; -#endif /* CONFIG_PPC_ICSWX */ - if (radix_enabled()) { /* * Radix doesn't have a valid bit in the process table diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index d46128b22150..57fbc554c785 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -27,7 +27,7 @@ /* * On 40x and 8xx, we directly inline tlbia and tlbivax */ -#if defined(CONFIG_40x) || defined(CONFIG_8xx) +#if defined(CONFIG_40x) || defined(CONFIG_PPC_8xx) static inline void _tlbil_all(void) { asm volatile ("sync; tlbia; isync" : : : "memory"); @@ -38,7 +38,7 @@ static inline void _tlbil_pid(unsigned int pid) } #define _tlbil_pid_noind(pid) _tlbil_pid(pid) -#else /* CONFIG_40x || CONFIG_8xx */ +#else /* CONFIG_40x || CONFIG_PPC_8xx */ extern void _tlbil_all(void); extern void _tlbil_pid(unsigned int pid); #ifdef CONFIG_PPC_BOOK3E @@ -46,12 +46,12 @@ extern void _tlbil_pid_noind(unsigned int pid); #else #define _tlbil_pid_noind(pid) _tlbil_pid(pid) #endif -#endif /* !(CONFIG_40x || CONFIG_8xx) */ +#endif /* !(CONFIG_40x || CONFIG_PPC_8xx) */ /* * On 8xx, we directly inline tlbie, on others, it's extern */ -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx static inline void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind) { @@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, { __tlbil_va(address, pid); } -#endif /* CONFIG_8xx */ +#endif /* CONFIG_PPC_8xx */ #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) extern void _tlbivax_bcast(unsigned long address, unsigned int pid, diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 31eed8fa8e99..3b65917785a5 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -64,6 +65,27 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, trace_hugepage_set_pmd(addr, pmd_val(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } + +static void do_nothing(void *unused) +{ + +} +/* + * Serialize against find_current_mm_pte which does lock-less + * lookup in page tables with local interrupts disabled. For huge pages + * it casts pmd_t to pte_t. Since format of pte_t is different from + * pmd_t we want to prevent transit from pmd pointing to page table + * to pmd pointing to huge page (and back) while interrupts are disabled. + * We clear pmd to possibly replace it with page table pointer in + * different code paths. So make sure we wait for the parallel + * find_current_mm_pte to finish. + */ +void serialize_against_pte_lookup(struct mm_struct *mm) +{ + smp_mb(); + smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); +} + /* * We use this to invalidate a pmdp entry before switching from a * hugepte to regular pmd entry. @@ -77,7 +99,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, * This ensures that generic code that rely on IRQ disabling * to prevent a parallel THP split work as expected. */ - kick_all_cpus_sync(); + serialize_against_pte_lookup(vma->vm_mm); } static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index 443a2c66a304..ec277913e01b 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -239,7 +239,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * by sending an IPI to all the cpus and executing a dummy * function there. */ - kick_all_cpus_sync(); + serialize_against_pte_lookup(vma->vm_mm); /* * Now invalidate the hpte entries in the range * covered by pmd. This make sure we take a @@ -329,7 +329,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, unsigned int psize; unsigned long vsid; unsigned long flags = 0; - const struct cpumask *tmp; /* get the base page size,vsid and segment size */ #ifdef CONFIG_DEBUG_VM @@ -350,8 +349,7 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, ssize = mmu_kernel_ssize; } - tmp = cpumask_of(smp_processor_id()); - if (cpumask_equal(mm_cpumask(mm), tmp)) + if (mm_is_thread_local(mm)) flags |= HPTE_LOCAL_UPDATE; return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); @@ -380,16 +378,16 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, */ memset(pgtable, 0, PTE_FRAG_SIZE); /* - * Serialize against find_linux_pte_or_hugepte which does lock-less + * Serialize against find_current_mm_pte variants which does lock-less * lookup in page tables with local interrupts disabled. For huge pages * it casts pmd_t to pte_t. Since format of pte_t is different from * pmd_t we want to prevent transit from pmd pointing to page table * to pmd pointing to huge page (and back) while interrupts are disabled. * We clear pmd to possibly replace it with page table pointer in * different code paths. So make sure we wait for the parallel - * find_linux_pte_or_hugepage to finish. + * find_curren_mm_pte to finish. */ - kick_all_cpus_sync(); + serialize_against_pte_lookup(mm); return old_pmd; } diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 671a45d86c18..39c252b54d16 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -8,10 +8,15 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#define pr_fmt(fmt) "radix-mmu: " fmt + +#include #include #include #include #include +#include #include #include @@ -31,9 +36,13 @@ unsigned int mmu_base_pid; static int native_register_process_table(unsigned long base, unsigned long pg_sz, unsigned long table_size) { - unsigned long patb1 = base | table_size | PATB_GR; + unsigned long patb0, patb1; + + patb0 = be64_to_cpu(partition_tb[0].patb0); + patb1 = base | table_size | PATB_GR; + + mmu_partition_table_set_entry(0, patb0, patb1); - partition_tb->patb1 = cpu_to_be64(patb1); return 0; } @@ -179,10 +188,14 @@ static inline void __meminit print_mapping(unsigned long start, unsigned long end, unsigned long size) { + char buf[10]; + if (end <= start) return; - pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size); + string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf)); + + pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf); } static int __meminit create_physical_mapping(unsigned long start, @@ -526,6 +539,7 @@ void __init radix__early_init_mmu(void) __kernel_virt_size = RADIX_KERN_VIRT_SIZE; __vmalloc_start = RADIX_VMALLOC_START; __vmalloc_end = RADIX_VMALLOC_END; + __kernel_io_start = RADIX_KERN_IO_START; vmemmap = (struct page *)RADIX_VMEMMAP_BASE; ioremap_bot = IOREMAP_BASE; @@ -836,9 +850,12 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre */ pmd = *pmdp; pmd_clear(pmdp); + /*FIXME!! Verify whether we need this kick below */ - kick_all_cpus_sync(); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + serialize_against_pte_lookup(vma->vm_mm); + + radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); + return pmd; } @@ -897,16 +914,16 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); /* - * Serialize against find_linux_pte_or_hugepte which does lock-less + * Serialize against find_current_mm_pte which does lock-less * lookup in page tables with local interrupts disabled. For huge pages * it casts pmd_t to pte_t. Since format of pte_t is different from * pmd_t we want to prevent transit from pmd pointing to page table * to pmd pointing to huge page (and back) while interrupts are disabled. * We clear pmd to possibly replace it with page table pointer in * different code paths. So make sure we wait for the parallel - * find_linux_pte_or_hugepage to finish. + * find_current_mm_pte to finish. */ - kick_all_cpus_sync(); + serialize_against_pte_lookup(mm); return old_pmd; } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index a9e4bfc025bc..f6c7f54c0515 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -242,7 +243,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) /* * Map in a chunk of physical memory starting at start. */ -void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) +static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { unsigned long v, s, f; phys_addr_t p; @@ -294,7 +295,7 @@ void __init mapin_ram(void) * Returns true (1) if PTE was found, zero otherwise. The pointer to * the PTE pointer is unmodified if PTE is not found. */ -int +static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; @@ -323,9 +324,7 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) return(retval); } -#ifdef CONFIG_DEBUG_PAGEALLOC - -static int __change_page_attr(struct page *page, pgprot_t prot) +static int __change_page_attr_noflush(struct page *page, pgprot_t prot) { pte_t *kpte; pmd_t *kpmd; @@ -339,8 +338,6 @@ static int __change_page_attr(struct page *page, pgprot_t prot) if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) return -EINVAL; __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); - wmb(); - flush_tlb_page(NULL, address); pte_unmap(kpte); return 0; @@ -349,24 +346,60 @@ static int __change_page_attr(struct page *page, pgprot_t prot) /* * Change the page attributes of an page in the linear mapping. * - * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY + * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY */ static int change_page_attr(struct page *page, int numpages, pgprot_t prot) { int i, err = 0; unsigned long flags; + struct page *start = page; local_irq_save(flags); for (i = 0; i < numpages; i++, page++) { - err = __change_page_attr(page, prot); + err = __change_page_attr_noflush(page, prot); if (err) break; } + wmb(); local_irq_restore(flags); + flush_tlb_kernel_range((unsigned long)page_address(start), + (unsigned long)page_address(page)); return err; } +void mark_initmem_nx(void) +{ + struct page *page = virt_to_page(_sinittext); + unsigned long numpages = PFN_UP((unsigned long)_einittext) - + PFN_DOWN((unsigned long)_sinittext); + change_page_attr(page, numpages, PAGE_KERNEL); +} + +#ifdef CONFIG_STRICT_KERNEL_RWX +void mark_rodata_ro(void) +{ + struct page *page; + unsigned long numpages; + + page = virt_to_page(_stext); + numpages = PFN_UP((unsigned long)_etext) - + PFN_DOWN((unsigned long)_stext); + + change_page_attr(page, numpages, PAGE_KERNEL_ROX); + /* + * mark .rodata as read only. Use __init_begin rather than __end_rodata + * to cover NOTES and EXCEPTION_TABLE. + */ + page = virt_to_page(__start_rodata); + numpages = PFN_UP((unsigned long)__init_begin) - + PFN_DOWN((unsigned long)__start_rodata); + + change_page_attr(page, numpages, PAGE_KERNEL_RO); +} +#endif + +#ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) @@ -375,18 +408,3 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); } #endif /* CONFIG_DEBUG_PAGEALLOC */ - -static int fixmaps; - -void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) -{ - unsigned long address = __fix_to_virt(idx); - - if (idx >= __end_of_fixed_addresses) { - BUG(); - return; - } - - map_kernel_page(address, phys, pgprot_val(flags)); - fixmaps++; -} diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 0736e94c7615..ac0717a90ca6 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -104,6 +104,8 @@ unsigned long __vmalloc_start; EXPORT_SYMBOL(__vmalloc_start); unsigned long __vmalloc_end; EXPORT_SYMBOL(__vmalloc_end); +unsigned long __kernel_io_start; +EXPORT_SYMBOL(__kernel_io_start); struct page *vmemmap; EXPORT_SYMBOL(vmemmap); unsigned long __pte_frag_nr; diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index bde378559d01..906a86fe457b 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -121,12 +121,25 @@ slb_miss_kernel_load_vmemmap: 1: #endif /* CONFIG_SPARSEMEM_VMEMMAP */ - /* vmalloc mapping gets the encoding from the PACA as the mapping - * can be demoted from 64K -> 4K dynamically on some machines + /* + * r10 contains the ESID, which is the original faulting EA shifted + * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28) + * which is 0xd00038000. That can't be used as an immediate, even if we + * ignored the 0xd, so we have to load it into a register, and we only + * have one register free. So we must load all of (H_VMALLOC_END >> 28) + * into a register and compare ESID against that. + */ + lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000 + ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800 + // Rotate left 4, then mask with 0xffffffff0 + rldic r11,r11,4,28 // r11 = 0xd00038000 + cmpld r10,r11 // if r10 >= r11 + bge 5f // goto io_mapping + + /* + * vmalloc mapping gets the encoding from the PACA as the mapping + * can be demoted from 64K -> 4K dynamically on some machines. */ - clrldi r11,r10,48 - cmpldi r11,(H_VMALLOC_SIZE >> 28) - 1 - bgt 5f lhz r11,PACAVMALLOCSLLP(r13) b 6f 5: diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 16ae1bbe13f0..b3e849c4886e 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -54,23 +54,15 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) */ __tlbiel_pid(pid, 0, ric); - if (ric == RIC_FLUSH_ALL) - /* For the remaining sets, just flush the TLB */ - ric = RIC_FLUSH_TLB; + /* For PWC, only one flush is needed */ + if (ric == RIC_FLUSH_PWC) { + asm volatile("ptesync": : :"memory"); + return; + } + /* For the remaining sets, just flush the TLB */ for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) - __tlbiel_pid(pid, set, ric); - - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); -} - -static inline void tlbiel_pwc(unsigned long pid) -{ - asm volatile("ptesync": : :"memory"); - - /* For PWC flush, we don't look at set number */ - __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); + __tlbiel_pid(pid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); @@ -146,31 +138,23 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm) preempt_disable(); pid = mm->context.id; if (pid != MMU_NO_CONTEXT) - _tlbiel_pid(pid, RIC_FLUSH_ALL); + _tlbiel_pid(pid, RIC_FLUSH_TLB); preempt_enable(); } EXPORT_SYMBOL(radix__local_flush_tlb_mm); -void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) +#ifndef CONFIG_SMP +static void radix__local_flush_all_mm(struct mm_struct *mm) { unsigned long pid; - struct mm_struct *mm = tlb->mm; - /* - * If we are doing a full mm flush, we will do a tlb flush - * with RIC_FLUSH_ALL later. - */ - if (tlb->fullmm) - return; preempt_disable(); - pid = mm->context.id; if (pid != MMU_NO_CONTEXT) - tlbiel_pwc(pid); - + _tlbiel_pid(pid, RIC_FLUSH_ALL); preempt_enable(); } -EXPORT_SYMBOL(radix__local_flush_tlb_pwc); +#endif /* CONFIG_SMP */ void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize) @@ -202,6 +186,24 @@ void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; + preempt_disable(); + pid = mm->context.id; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto no_context; + + if (!mm_is_thread_local(mm)) + _tlbie_pid(pid, RIC_FLUSH_TLB); + else + _tlbiel_pid(pid, RIC_FLUSH_TLB); +no_context: + preempt_enable(); +} +EXPORT_SYMBOL(radix__flush_tlb_mm); + +static void radix__flush_all_mm(struct mm_struct *mm) +{ + unsigned long pid; + preempt_disable(); pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) @@ -214,31 +216,10 @@ void radix__flush_tlb_mm(struct mm_struct *mm) no_context: preempt_enable(); } -EXPORT_SYMBOL(radix__flush_tlb_mm); void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) { - unsigned long pid; - struct mm_struct *mm = tlb->mm; - - /* - * If we are doing a full mm flush, we will do a tlb flush - * with RIC_FLUSH_ALL later. - */ - if (tlb->fullmm) - return; - preempt_disable(); - - pid = mm->context.id; - if (unlikely(pid == MMU_NO_CONTEXT)) - goto no_context; - - if (!mm_is_thread_local(mm)) - _tlbie_pid(pid, RIC_FLUSH_PWC); - else - tlbiel_pwc(pid); -no_context: - preempt_enable(); + tlb->need_flush_all = 1; } EXPORT_SYMBOL(radix__flush_tlb_pwc); @@ -271,6 +252,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) } EXPORT_SYMBOL(radix__flush_tlb_page); +#else /* CONFIG_SMP */ +#define radix__flush_all_mm radix__local_flush_all_mm #endif /* CONFIG_SMP */ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -288,6 +271,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, { struct mm_struct *mm = vma->vm_mm; + radix__flush_tlb_mm(mm); } EXPORT_SYMBOL(radix__flush_tlb_range); @@ -319,7 +303,10 @@ void radix__tlb_flush(struct mmu_gather *tlb) */ if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all) radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize); - else + else if (tlb->need_flush_all) { + tlb->need_flush_all = 0; + radix__flush_all_mm(mm); + } else radix__flush_tlb_mm(mm); } @@ -364,6 +351,43 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, preempt_enable(); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) +{ + int local = mm_is_thread_local(mm); + unsigned long ap = mmu_get_ap(mmu_virtual_psize); + unsigned long pid, end; + + + pid = mm ? mm->context.id : 0; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto no_context; + + /* 4k page size, just blow the world */ + if (PAGE_SIZE == 0x1000) { + radix__flush_all_mm(mm); + return; + } + + /* Otherwise first do the PWC */ + if (local) + _tlbiel_pid(pid, RIC_FLUSH_PWC); + else + _tlbie_pid(pid, RIC_FLUSH_PWC); + + /* Then iterate the pages */ + end = addr + HPAGE_PMD_SIZE; + for (; addr < end; addr += PAGE_SIZE) { + if (local) + _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); + else + _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); + } +no_context: + preempt_enable(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, unsigned long page_size) { diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index b5b0fb97b9c0..881ebd53ffc2 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -29,6 +29,8 @@ #include #include #include +#include + #include @@ -138,13 +140,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, */ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) { - const struct cpumask *tmp; - int i, local = 0; + int i, local; i = batch->index; - tmp = cpumask_of(smp_processor_id()); - if (cpumask_equal(mm_cpumask(batch->mm), tmp)) - local = 1; + local = mm_is_thread_local(batch->mm); if (i == 1) flush_hash_page(batch->vpn[0], batch->pte[0], batch->psize, batch->ssize, local); @@ -207,8 +206,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { - pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp, - &hugepage_shift); + pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp, + &hugepage_shift); unsigned long pte; if (ptep == NULL) diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index eabecfcaef7c..048b8e9f4492 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -60,7 +60,7 @@ _GLOBAL(__tlbil_va) isync 1: blr -#elif defined(CONFIG_8xx) +#elif defined(CONFIG_PPC_8xx) /* * Nothing to do for 8xx, everything is inline diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 30cf03f53428..47fc6660845d 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset) #define COND_EQ (CR0_EQ | COND_CMP_TRUE) #define COND_NE (CR0_EQ | COND_CMP_FALSE) #define COND_LT (CR0_LT | COND_CMP_TRUE) +#define COND_LE (CR0_GT | COND_CMP_FALSE) #endif diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 861c5af1c9c4..a66e64b0b251 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -25,11 +25,7 @@ int bpf_jit_enable __read_mostly; static void bpf_jit_fill_ill_insns(void *area, unsigned int size) { - int *p = area; - - /* Fill whole space with trap instructions */ - while (p < (int *)((char *)area + size)) - *p++ = BREAKPOINT_INSTRUCTION; + memset32(area, BREAKPOINT_INSTRUCTION, size/4); } static inline void bpf_flush_icache(void *start, void *end) @@ -795,12 +791,24 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JSGT | BPF_X: true_cond = COND_GT; goto cond_branch; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_X: + true_cond = COND_LT; + goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: true_cond = COND_GE; goto cond_branch; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_X: + true_cond = COND_LE; + goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: true_cond = COND_EQ; @@ -817,14 +825,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, cond_branch: switch (code) { case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: /* unsigned comparison */ PPC_CMPLD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: /* signed comparison */ PPC_CMPD(dst_reg, src_reg); break; @@ -834,7 +846,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: /* * Need sign-extended load, so only positive * values can be used as imm in cmpldi @@ -849,7 +863,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, } break; case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: /* * signed comparison, so any 16-bit value * can be used in cmpdi diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 4d606b99a5cb..3f3a5ce66495 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -8,6 +8,7 @@ obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ isa207-common.o power8-pmu.o power9-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o +obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 0fc26714780a..0af051a1974e 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -22,6 +22,7 @@ #ifdef CONFIG_PPC64 #include "../kernel/ppc32.h" #endif +#include /* @@ -127,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb) return -EFAULT; local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift); + ptep = find_current_mm_pte(pgdir, addr, NULL, &shift); if (!ptep) goto err_out; if (!shift) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 2e3eb7431571..9e3da168d54c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -793,6 +793,11 @@ void perf_event_print_debug(void) u32 pmcs[MAX_HWEVENTS]; int i; + if (!ppmu) { + pr_info("Performance monitor hardware not registered.\n"); + return; + } + if (!ppmu->n_counter) return; diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c new file mode 100644 index 000000000000..9ccac86f3463 --- /dev/null +++ b/arch/powerpc/perf/imc-pmu.c @@ -0,0 +1,1306 @@ +/* + * In-Memory Collection (IMC) Performance Monitor counter support. + * + * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. + * (C) 2017 Anju T Sudhakar, IBM Corporation. + * (C) 2017 Hemant K Shaw, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or later version. + */ +#include +#include +#include +#include +#include +#include +#include + +/* Nest IMC data structures and variables */ + +/* + * Used to avoid races in counting the nest-pmu units during hotplug + * register and unregister + */ +static DEFINE_MUTEX(nest_init_lock); +static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); +static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS]; +static cpumask_t nest_imc_cpumask; +struct imc_pmu_ref *nest_imc_refc; +static int nest_pmus; + +/* Core IMC data structures and variables */ + +static cpumask_t core_imc_cpumask; +struct imc_pmu_ref *core_imc_refc; +static struct imc_pmu *core_imc_pmu; + +/* Thread IMC data structures and variables */ + +static DEFINE_PER_CPU(u64 *, thread_imc_mem); +static struct imc_pmu *thread_imc_pmu; +static int thread_imc_mem_size; + +struct imc_pmu *imc_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct imc_pmu, pmu); +} + +PMU_FORMAT_ATTR(event, "config:0-40"); +PMU_FORMAT_ATTR(offset, "config:0-31"); +PMU_FORMAT_ATTR(rvalue, "config:32"); +PMU_FORMAT_ATTR(mode, "config:33-40"); +static struct attribute *imc_format_attrs[] = { + &format_attr_event.attr, + &format_attr_offset.attr, + &format_attr_rvalue.attr, + &format_attr_mode.attr, + NULL, +}; + +static struct attribute_group imc_format_group = { + .name = "format", + .attrs = imc_format_attrs, +}; + +/* Get the cpumask printed to a buffer "buf" */ +static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); + cpumask_t *active_mask; + + switch(imc_pmu->domain){ + case IMC_DOMAIN_NEST: + active_mask = &nest_imc_cpumask; + break; + case IMC_DOMAIN_CORE: + active_mask = &core_imc_cpumask; + break; + default: + return 0; + } + + return cpumap_print_to_pagebuf(true, buf, active_mask); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL); + +static struct attribute *imc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group imc_pmu_cpumask_attr_group = { + .attrs = imc_pmu_cpumask_attrs, +}; + +/* device_str_attr_create : Populate event "name" and string "str" in attribute */ +static struct attribute *device_str_attr_create(const char *name, const char *str) +{ + struct perf_pmu_events_attr *attr; + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return NULL; + sysfs_attr_init(&attr->attr.attr); + + attr->event_str = str; + attr->attr.attr.name = name; + attr->attr.attr.mode = 0444; + attr->attr.show = perf_event_sysfs_show; + + return &attr->attr.attr; +} + +struct imc_events *imc_parse_event(struct device_node *np, const char *scale, + const char *unit, const char *prefix, u32 base) +{ + struct imc_events *event; + const char *s; + u32 reg; + + event = kzalloc(sizeof(struct imc_events), GFP_KERNEL); + if (!event) + return NULL; + + if (of_property_read_u32(np, "reg", ®)) + goto error; + /* Add the base_reg value to the "reg" */ + event->value = base + reg; + + if (of_property_read_string(np, "event-name", &s)) + goto error; + + event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); + if (!event->name) + goto error; + + if (of_property_read_string(np, "scale", &s)) + s = scale; + + if (s) { + event->scale = kstrdup(s, GFP_KERNEL); + if (!event->scale) + goto error; + } + + if (of_property_read_string(np, "unit", &s)) + s = unit; + + if (s) { + event->unit = kstrdup(s, GFP_KERNEL); + if (!event->unit) + goto error; + } + + return event; +error: + kfree(event->unit); + kfree(event->scale); + kfree(event->name); + kfree(event); + + return NULL; +} + +/* + * update_events_in_group: Update the "events" information in an attr_group + * and assign the attr_group to the pmu "pmu". + */ +static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) +{ + struct attribute_group *attr_group; + struct attribute **attrs, *dev_str; + struct device_node *np, *pmu_events; + struct imc_events *ev; + u32 handle, base_reg; + int i=0, j=0, ct; + const char *prefix, *g_scale, *g_unit; + const char *ev_val_str, *ev_scale_str, *ev_unit_str; + + if (!of_property_read_u32(node, "events", &handle)) + pmu_events = of_find_node_by_phandle(handle); + else + return 0; + + /* Did not find any node with a given phandle */ + if (!pmu_events) + return 0; + + /* Get a count of number of child nodes */ + ct = of_get_child_count(pmu_events); + + /* Get the event prefix */ + if (of_property_read_string(node, "events-prefix", &prefix)) + return 0; + + /* Get a global unit and scale data if available */ + if (of_property_read_string(node, "scale", &g_scale)) + g_scale = NULL; + + if (of_property_read_string(node, "unit", &g_unit)) + g_unit = NULL; + + /* "reg" property gives out the base offset of the counters data */ + of_property_read_u32(node, "reg", &base_reg); + + /* Allocate memory for the events */ + pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); + if (!pmu->events) + return -ENOMEM; + + ct = 0; + /* Parse the events and update the struct */ + for_each_child_of_node(pmu_events, np) { + ev = imc_parse_event(np, g_scale, g_unit, prefix, base_reg); + if (ev) + pmu->events[ct++] = ev; + } + + /* Allocate memory for attribute group */ + attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); + if (!attr_group) + return -ENOMEM; + + /* + * Allocate memory for attributes. + * Since we have count of events for this pmu, we also allocate + * memory for the scale and unit attribute for now. + * "ct" has the total event structs added from the events-parent node. + * So allocate three times the "ct" (this includes event, event_scale and + * event_unit). + */ + attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); + if (!attrs) { + kfree(attr_group); + kfree(pmu->events); + return -ENOMEM; + } + + attr_group->name = "events"; + attr_group->attrs = attrs; + do { + ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i]->value); + dev_str = device_str_attr_create(pmu->events[i]->name, ev_val_str); + if (!dev_str) + continue; + + attrs[j++] = dev_str; + if (pmu->events[i]->scale) { + ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale",pmu->events[i]->name); + dev_str = device_str_attr_create(ev_scale_str, pmu->events[i]->scale); + if (!dev_str) + continue; + + attrs[j++] = dev_str; + } + + if (pmu->events[i]->unit) { + ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit",pmu->events[i]->name); + dev_str = device_str_attr_create(ev_unit_str, pmu->events[i]->unit); + if (!dev_str) + continue; + + attrs[j++] = dev_str; + } + } while (++i < ct); + + /* Save the event attribute */ + pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; + + kfree(pmu->events); + return 0; +} + +/* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */ +static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) +{ + return per_cpu(local_nest_imc_refc, cpu); +} + +static void nest_change_cpu_context(int old_cpu, int new_cpu) +{ + struct imc_pmu **pn = per_nest_pmu_arr; + int i; + + if (old_cpu < 0 || new_cpu < 0) + return; + + for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++) + perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); +} + +static int ppc_nest_imc_cpu_offline(unsigned int cpu) +{ + int nid, target = -1; + const struct cpumask *l_cpumask; + struct imc_pmu_ref *ref; + + /* + * Check in the designated list for this cpu. Dont bother + * if not one of them. + */ + if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) + return 0; + + /* + * Now that this cpu is one of the designated, + * find a next cpu a) which is online and b) in same chip. + */ + nid = cpu_to_node(cpu); + l_cpumask = cpumask_of_node(nid); + target = cpumask_any_but(l_cpumask, cpu); + + /* + * Update the cpumask with the target cpu and + * migrate the context if needed + */ + if (target >= 0 && target < nr_cpu_ids) { + cpumask_set_cpu(target, &nest_imc_cpumask); + nest_change_cpu_context(cpu, target); + } else { + opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, + get_hard_smp_processor_id(cpu)); + /* + * If this is the last cpu in this chip then, skip the reference + * count mutex lock and make the reference count on this chip zero. + */ + ref = get_nest_pmu_ref(cpu); + if (!ref) + return -EINVAL; + + ref->refc = 0; + } + return 0; +} + +static int ppc_nest_imc_cpu_online(unsigned int cpu) +{ + const struct cpumask *l_cpumask; + static struct cpumask tmp_mask; + int res; + + /* Get the cpumask of this node */ + l_cpumask = cpumask_of_node(cpu_to_node(cpu)); + + /* + * If this is not the first online CPU on this node, then + * just return. + */ + if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask)) + return 0; + + /* + * If this is the first online cpu on this node + * disable the nest counters by making an OPAL call. + */ + res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, + get_hard_smp_processor_id(cpu)); + if (res) + return res; + + /* Make this CPU the designated target for counter collection */ + cpumask_set_cpu(cpu, &nest_imc_cpumask); + return 0; +} + +static int nest_pmu_cpumask_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, + "perf/powerpc/imc:online", + ppc_nest_imc_cpu_online, + ppc_nest_imc_cpu_offline); +} + +static void nest_imc_counters_release(struct perf_event *event) +{ + int rc, node_id; + struct imc_pmu_ref *ref; + + if (event->cpu < 0) + return; + + node_id = cpu_to_node(event->cpu); + + /* + * See if we need to disable the nest PMU. + * If no events are currently in use, then we have to take a + * mutex to ensure that we don't race with another task doing + * enable or disable the nest counters. + */ + ref = get_nest_pmu_ref(event->cpu); + if (!ref) + return; + + /* Take the mutex lock for this node and then decrement the reference count */ + mutex_lock(&ref->lock); + ref->refc--; + if (ref->refc == 0) { + rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, + get_hard_smp_processor_id(event->cpu)); + if (rc) { + mutex_unlock(&ref->lock); + pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); + return; + } + } else if (ref->refc < 0) { + WARN(1, "nest-imc: Invalid event reference count\n"); + ref->refc = 0; + } + mutex_unlock(&ref->lock); +} + +static int nest_imc_event_init(struct perf_event *event) +{ + int chip_id, rc, node_id; + u32 l_config, config = event->attr.config; + struct imc_mem_info *pcni; + struct imc_pmu *pmu; + struct imc_pmu_ref *ref; + bool flag = false; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling not supported */ + if (event->hw.sample_period) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + pmu = imc_event_to_pmu(event); + + /* Sanity check for config (event offset) */ + if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) + return -EINVAL; + + /* + * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). + * Get the base memory addresss for this cpu. + */ + chip_id = topology_physical_package_id(event->cpu); + pcni = pmu->mem_info; + do { + if (pcni->id == chip_id) { + flag = true; + break; + } + pcni++; + } while (pcni); + + if (!flag) + return -ENODEV; + + /* + * Add the event offset to the base address. + */ + l_config = config & IMC_EVENT_OFFSET_MASK; + event->hw.event_base = (u64)pcni->vbase + l_config; + node_id = cpu_to_node(event->cpu); + + /* + * Get the imc_pmu_ref struct for this node. + * Take the mutex lock and then increment the count of nest pmu events + * inited. + */ + ref = get_nest_pmu_ref(event->cpu); + if (!ref) + return -EINVAL; + + mutex_lock(&ref->lock); + if (ref->refc == 0) { + rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, + get_hard_smp_processor_id(event->cpu)); + if (rc) { + mutex_unlock(&ref->lock); + pr_err("nest-imc: Unable to start the counters for node %d\n", + node_id); + return rc; + } + } + ++ref->refc; + mutex_unlock(&ref->lock); + + event->destroy = nest_imc_counters_release; + return 0; +} + +/* + * core_imc_mem_init : Initializes memory for the current core. + * + * Uses alloc_pages_node() and uses the returned address as an argument to + * an opal call to configure the pdbar. The address sent as an argument is + * converted to physical address before the opal call is made. This is the + * base address at which the core imc counters are populated. + */ +static int core_imc_mem_init(int cpu, int size) +{ + int phys_id, rc = 0, core_id = (cpu / threads_per_core); + struct imc_mem_info *mem_info; + + /* + * alloc_pages_node() will allocate memory for core in the + * local node only. + */ + phys_id = topology_physical_package_id(cpu); + mem_info = &core_imc_pmu->mem_info[core_id]; + mem_info->id = core_id; + + /* We need only vbase for core counters */ + mem_info->vbase = page_address(alloc_pages_node(phys_id, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, + get_order(size))); + if (!mem_info->vbase) + return -ENOMEM; + + /* Init the mutex */ + core_imc_refc[core_id].id = core_id; + mutex_init(&core_imc_refc[core_id].lock); + + rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, + __pa((void *)mem_info->vbase), + get_hard_smp_processor_id(cpu)); + if (rc) { + free_pages((u64)mem_info->vbase, get_order(size)); + mem_info->vbase = NULL; + } + + return rc; +} + +static bool is_core_imc_mem_inited(int cpu) +{ + struct imc_mem_info *mem_info; + int core_id = (cpu / threads_per_core); + + mem_info = &core_imc_pmu->mem_info[core_id]; + if (!mem_info->vbase) + return false; + + return true; +} + +static int ppc_core_imc_cpu_online(unsigned int cpu) +{ + const struct cpumask *l_cpumask; + static struct cpumask tmp_mask; + int ret = 0; + + /* Get the cpumask for this core */ + l_cpumask = cpu_sibling_mask(cpu); + + /* If a cpu for this core is already set, then, don't do anything */ + if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask)) + return 0; + + if (!is_core_imc_mem_inited(cpu)) { + ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); + if (ret) { + pr_info("core_imc memory allocation for cpu %d failed\n", cpu); + return ret; + } + } + + /* set the cpu in the mask */ + cpumask_set_cpu(cpu, &core_imc_cpumask); + return 0; +} + +static int ppc_core_imc_cpu_offline(unsigned int cpu) +{ + unsigned int ncpu, core_id; + struct imc_pmu_ref *ref; + + /* + * clear this cpu out of the mask, if not present in the mask, + * don't bother doing anything. + */ + if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) + return 0; + + /* Find any online cpu in that core except the current "cpu" */ + ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); + + if (ncpu >= 0 && ncpu < nr_cpu_ids) { + cpumask_set_cpu(ncpu, &core_imc_cpumask); + perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); + } else { + /* + * If this is the last cpu in this core then, skip taking refernce + * count mutex lock for this core and directly zero "refc" for + * this core. + */ + opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(cpu)); + core_id = cpu / threads_per_core; + ref = &core_imc_refc[core_id]; + if (!ref) + return -EINVAL; + + ref->refc = 0; + } + return 0; +} + +static int core_imc_pmu_cpumask_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, + "perf/powerpc/imc_core:online", + ppc_core_imc_cpu_online, + ppc_core_imc_cpu_offline); +} + +static void core_imc_counters_release(struct perf_event *event) +{ + int rc, core_id; + struct imc_pmu_ref *ref; + + if (event->cpu < 0) + return; + /* + * See if we need to disable the IMC PMU. + * If no events are currently in use, then we have to take a + * mutex to ensure that we don't race with another task doing + * enable or disable the core counters. + */ + core_id = event->cpu / threads_per_core; + + /* Take the mutex lock and decrement the refernce count for this core */ + ref = &core_imc_refc[core_id]; + if (!ref) + return; + + mutex_lock(&ref->lock); + ref->refc--; + if (ref->refc == 0) { + rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(event->cpu)); + if (rc) { + mutex_unlock(&ref->lock); + pr_err("IMC: Unable to stop the counters for core %d\n", core_id); + return; + } + } else if (ref->refc < 0) { + WARN(1, "core-imc: Invalid event reference count\n"); + ref->refc = 0; + } + mutex_unlock(&ref->lock); +} + +static int core_imc_event_init(struct perf_event *event) +{ + int core_id, rc; + u64 config = event->attr.config; + struct imc_mem_info *pcmi; + struct imc_pmu *pmu; + struct imc_pmu_ref *ref; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling not supported */ + if (event->hw.sample_period) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + event->hw.idx = -1; + pmu = imc_event_to_pmu(event); + + /* Sanity check for config (event offset) */ + if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) + return -EINVAL; + + if (!is_core_imc_mem_inited(event->cpu)) + return -ENODEV; + + core_id = event->cpu / threads_per_core; + pcmi = &core_imc_pmu->mem_info[core_id]; + if ((!pcmi->vbase)) + return -ENODEV; + + /* Get the core_imc mutex for this core */ + ref = &core_imc_refc[core_id]; + if (!ref) + return -EINVAL; + + /* + * Core pmu units are enabled only when it is used. + * See if this is triggered for the first time. + * If yes, take the mutex lock and enable the core counters. + * If not, just increment the count in core_imc_refc struct. + */ + mutex_lock(&ref->lock); + if (ref->refc == 0) { + rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(event->cpu)); + if (rc) { + mutex_unlock(&ref->lock); + pr_err("core-imc: Unable to start the counters for core %d\n", + core_id); + return rc; + } + } + ++ref->refc; + mutex_unlock(&ref->lock); + + event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); + event->destroy = core_imc_counters_release; + return 0; +} + +/* + * Allocates a page of memory for each of the online cpus, and write the + * physical base address of that page to the LDBAR for that cpu. + * + * LDBAR Register Layout: + * + * 0 4 8 12 16 20 24 28 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * | | [ ] [ Counter Address [8:50] + * | * Mode | + * | * PB Scope + * * Enable/Disable + * + * 32 36 40 44 48 52 56 60 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * Counter Address [8:50] ] + * + */ +static int thread_imc_mem_alloc(int cpu_id, int size) +{ + u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); + int phys_id = topology_physical_package_id(cpu_id); + + if (!local_mem) { + /* + * This case could happen only once at start, since we dont + * free the memory in cpu offline path. + */ + local_mem = page_address(alloc_pages_node(phys_id, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, + get_order(size))); + if (!local_mem) + return -ENOMEM; + + per_cpu(thread_imc_mem, cpu_id) = local_mem; + } + + ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE; + + mtspr(SPRN_LDBAR, ldbar_value); + return 0; +} + +static int ppc_thread_imc_cpu_online(unsigned int cpu) +{ + return thread_imc_mem_alloc(cpu, thread_imc_mem_size); +} + +static int ppc_thread_imc_cpu_offline(unsigned int cpu) +{ + mtspr(SPRN_LDBAR, 0); + return 0; +} + +static int thread_imc_cpu_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, + "perf/powerpc/imc_thread:online", + ppc_thread_imc_cpu_online, + ppc_thread_imc_cpu_offline); +} + +void thread_imc_pmu_sched_task(struct perf_event_context *ctx, + bool sched_in) +{ + int core_id; + struct imc_pmu_ref *ref; + + if (!is_core_imc_mem_inited(smp_processor_id())) + return; + + core_id = smp_processor_id() / threads_per_core; + /* + * imc pmus are enabled only when it is used. + * See if this is triggered for the first time. + * If yes, take the mutex lock and enable the counters. + * If not, just increment the count in ref count struct. + */ + ref = &core_imc_refc[core_id]; + if (!ref) + return; + + if (sched_in) { + mutex_lock(&ref->lock); + if (ref->refc == 0) { + if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(smp_processor_id()))) { + mutex_unlock(&ref->lock); + pr_err("thread-imc: Unable to start the counter\ + for core %d\n", core_id); + return; + } + } + ++ref->refc; + mutex_unlock(&ref->lock); + } else { + mutex_lock(&ref->lock); + ref->refc--; + if (ref->refc == 0) { + if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(smp_processor_id()))) { + mutex_unlock(&ref->lock); + pr_err("thread-imc: Unable to stop the counters\ + for core %d\n", core_id); + return; + } + } else if (ref->refc < 0) { + ref->refc = 0; + } + mutex_unlock(&ref->lock); + } + + return; +} + +static int thread_imc_event_init(struct perf_event *event) +{ + u32 config = event->attr.config; + struct task_struct *target; + struct imc_pmu *pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling not supported */ + if (event->hw.sample_period) + return -EINVAL; + + event->hw.idx = -1; + pmu = imc_event_to_pmu(event); + + /* Sanity check for config offset */ + if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) + return -EINVAL; + + target = event->hw.target; + if (!target) + return -EINVAL; + + event->pmu->task_ctx_nr = perf_sw_context; + return 0; +} + +static bool is_thread_imc_pmu(struct perf_event *event) +{ + if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) + return true; + + return false; +} + +static u64 * get_event_base_addr(struct perf_event *event) +{ + u64 addr; + + if (is_thread_imc_pmu(event)) { + addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); + return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); + } + + return (u64 *)event->hw.event_base; +} + +static void thread_imc_pmu_start_txn(struct pmu *pmu, + unsigned int txn_flags) +{ + if (txn_flags & ~PERF_PMU_TXN_ADD) + return; + perf_pmu_disable(pmu); +} + +static void thread_imc_pmu_cancel_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); +} + +static int thread_imc_pmu_commit_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); + return 0; +} + +static u64 imc_read_counter(struct perf_event *event) +{ + u64 *addr, data; + + /* + * In-Memory Collection (IMC) counters are free flowing counters. + * So we take a snapshot of the counter value on enable and save it + * to calculate the delta at later stage to present the event counter + * value. + */ + addr = get_event_base_addr(event); + data = be64_to_cpu(READ_ONCE(*addr)); + local64_set(&event->hw.prev_count, data); + + return data; +} + +static void imc_event_update(struct perf_event *event) +{ + u64 counter_prev, counter_new, final_count; + + counter_prev = local64_read(&event->hw.prev_count); + counter_new = imc_read_counter(event); + final_count = counter_new - counter_prev; + + /* Update the delta to the event count */ + local64_add(final_count, &event->count); +} + +static void imc_event_start(struct perf_event *event, int flags) +{ + /* + * In Memory Counters are free flowing counters. HW or the microcode + * keeps adding to the counter offset in memory. To get event + * counter value, we snapshot the value here and we calculate + * delta at later point. + */ + imc_read_counter(event); +} + +static void imc_event_stop(struct perf_event *event, int flags) +{ + /* + * Take a snapshot and calculate the delta and update + * the event counter values. + */ + imc_event_update(event); +} + +static int imc_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + imc_event_start(event, flags); + + return 0; +} + +static int thread_imc_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + imc_event_start(event, flags); + + /* Enable the sched_task to start the engine */ + perf_sched_cb_inc(event->ctx->pmu); + return 0; +} + +static void thread_imc_event_del(struct perf_event *event, int flags) +{ + /* + * Take a snapshot and calculate the delta and update + * the event counter values. + */ + imc_event_update(event); + perf_sched_cb_dec(event->ctx->pmu); +} + +/* update_pmu_ops : Populate the appropriate operations for "pmu" */ +static int update_pmu_ops(struct imc_pmu *pmu) +{ + pmu->pmu.task_ctx_nr = perf_invalid_context; + pmu->pmu.add = imc_event_add; + pmu->pmu.del = imc_event_stop; + pmu->pmu.start = imc_event_start; + pmu->pmu.stop = imc_event_stop; + pmu->pmu.read = imc_event_update; + pmu->pmu.attr_groups = pmu->attr_groups; + pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; + + switch (pmu->domain) { + case IMC_DOMAIN_NEST: + pmu->pmu.event_init = nest_imc_event_init; + pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; + break; + case IMC_DOMAIN_CORE: + pmu->pmu.event_init = core_imc_event_init; + pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; + break; + case IMC_DOMAIN_THREAD: + pmu->pmu.event_init = thread_imc_event_init; + pmu->pmu.sched_task = thread_imc_pmu_sched_task; + pmu->pmu.add = thread_imc_event_add; + pmu->pmu.del = thread_imc_event_del; + pmu->pmu.start_txn = thread_imc_pmu_start_txn; + pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; + pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; + break; + default: + break; + } + + return 0; +} + +/* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */ +static int init_nest_pmu_ref(void) +{ + int nid, i, cpu; + + nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc), + GFP_KERNEL); + + if (!nest_imc_refc) + return -ENOMEM; + + i = 0; + for_each_node(nid) { + /* + * Mutex lock to avoid races while tracking the number of + * sessions using the chip's nest pmu units. + */ + mutex_init(&nest_imc_refc[i].lock); + + /* + * Loop to init the "id" with the node_id. Variable "i" initialized to + * 0 and will be used as index to the array. "i" will not go off the + * end of the array since the "for_each_node" loops for "N_POSSIBLE" + * nodes only. + */ + nest_imc_refc[i++].id = nid; + } + + /* + * Loop to init the per_cpu "local_nest_imc_refc" with the proper + * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple. + */ + for_each_possible_cpu(cpu) { + nid = cpu_to_node(cpu); + for (i = 0; i < num_possible_nodes(); i++) { + if (nest_imc_refc[i].id == nid) { + per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; + break; + } + } + } + return 0; +} + +static void cleanup_all_core_imc_memory(void) +{ + int i, nr_cores = num_present_cpus() / threads_per_core; + struct imc_mem_info *ptr = core_imc_pmu->mem_info; + int size = core_imc_pmu->counter_mem_size; + + /* mem_info will never be NULL */ + for (i = 0; i < nr_cores; i++) { + if (ptr[i].vbase) + free_pages((u64)ptr->vbase, get_order(size)); + } + + kfree(ptr); + kfree(core_imc_refc); +} + +static void thread_imc_ldbar_disable(void *dummy) +{ + /* + * By Zeroing LDBAR, we disable thread-imc + * updates. + */ + mtspr(SPRN_LDBAR, 0); +} + +void thread_imc_disable(void) +{ + on_each_cpu(thread_imc_ldbar_disable, NULL, 1); +} + +static void cleanup_all_thread_imc_memory(void) +{ + int i, order = get_order(thread_imc_mem_size); + + for_each_online_cpu(i) { + if (per_cpu(thread_imc_mem, i)) + free_pages((u64)per_cpu(thread_imc_mem, i), order); + + } +} + +/* + * Common function to unregister cpu hotplug callback and + * free the memory. + * TODO: Need to handle pmu unregistering, which will be + * done in followup series. + */ +static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) +{ + if (pmu_ptr->domain == IMC_DOMAIN_NEST) { + mutex_lock(&nest_init_lock); + if (nest_pmus == 1) { + cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); + kfree(nest_imc_refc); + } + + if (nest_pmus > 0) + nest_pmus--; + mutex_unlock(&nest_init_lock); + } + + /* Free core_imc memory */ + if (pmu_ptr->domain == IMC_DOMAIN_CORE) { + cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE); + cleanup_all_core_imc_memory(); + } + + /* Free thread_imc memory */ + if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { + cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE); + cleanup_all_thread_imc_memory(); + } + + /* Only free the attr_groups which are dynamically allocated */ + kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); + kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); + kfree(pmu_ptr); + return; +} + + +/* + * imc_mem_init : Function to support memory allocation for core imc. + */ +static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, + int pmu_index) +{ + const char *s; + int nr_cores, cpu, res; + + if (of_property_read_string(parent, "name", &s)) + return -ENODEV; + + switch (pmu_ptr->domain) { + case IMC_DOMAIN_NEST: + /* Update the pmu name */ + pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); + if (!pmu_ptr->pmu.name) + return -ENOMEM; + + /* Needed for hotplug/migration */ + per_nest_pmu_arr[pmu_index] = pmu_ptr; + break; + case IMC_DOMAIN_CORE: + /* Update the pmu name */ + pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); + if (!pmu_ptr->pmu.name) + return -ENOMEM; + + nr_cores = num_present_cpus() / threads_per_core; + pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), + GFP_KERNEL); + + if (!pmu_ptr->mem_info) + return -ENOMEM; + + core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), + GFP_KERNEL); + + if (!core_imc_refc) + return -ENOMEM; + + core_imc_pmu = pmu_ptr; + break; + case IMC_DOMAIN_THREAD: + /* Update the pmu name */ + pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); + if (!pmu_ptr->pmu.name) + return -ENOMEM; + + thread_imc_mem_size = pmu_ptr->counter_mem_size; + for_each_online_cpu(cpu) { + res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); + if (res) + return res; + } + + thread_imc_pmu = pmu_ptr; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * init_imc_pmu : Setup and register the IMC pmu device. + * + * @parent: Device tree unit node + * @pmu_ptr: memory allocated for this pmu + * @pmu_idx: Count of nest pmc registered + * + * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback. + * Handles failure cases and accordingly frees memory. + */ +int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx) +{ + int ret; + + ret = imc_mem_init(pmu_ptr, parent, pmu_idx); + if (ret) + goto err_free; + + switch (pmu_ptr->domain) { + case IMC_DOMAIN_NEST: + /* + * Nest imc pmu need only one cpu per chip, we initialize the + * cpumask for the first nest imc pmu and use the same for the + * rest. To handle the cpuhotplug callback unregister, we track + * the number of nest pmus in "nest_pmus". + */ + mutex_lock(&nest_init_lock); + if (nest_pmus == 0) { + ret = init_nest_pmu_ref(); + if (ret) { + mutex_unlock(&nest_init_lock); + goto err_free; + } + /* Register for cpu hotplug notification. */ + ret = nest_pmu_cpumask_init(); + if (ret) { + mutex_unlock(&nest_init_lock); + goto err_free; + } + } + nest_pmus++; + mutex_unlock(&nest_init_lock); + break; + case IMC_DOMAIN_CORE: + ret = core_imc_pmu_cpumask_init(); + if (ret) { + cleanup_all_core_imc_memory(); + return ret; + } + + break; + case IMC_DOMAIN_THREAD: + ret = thread_imc_cpu_init(); + if (ret) { + cleanup_all_thread_imc_memory(); + return ret; + } + + break; + default: + return -1; /* Unknown domain */ + } + + ret = update_events_in_group(parent, pmu_ptr); + if (ret) + goto err_free; + + ret = update_pmu_ops(pmu_ptr); + if (ret) + goto err_free; + + ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); + if (ret) + goto err_free; + + pr_info("%s performance monitor hardware support registered\n", + pmu_ptr->pmu.name); + + return 0; + +err_free: + imc_common_cpuhp_mem_free(pmu_ptr); + return ret; +} diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 3f3aa9a7063a..2efee3f196f5 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -99,7 +99,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event)) *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; else - *mmcra |= MMCRA_SDAR_MODE_TLB; + *mmcra |= MMCRA_SDAR_MODE_DCACHE; } else *mmcra |= MMCRA_SDAR_MODE_TLB; } @@ -488,8 +488,8 @@ static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int return -1; } -int isa207_get_alternatives(u64 event, u64 alt[], - const unsigned int ev_alt[][MAX_ALT], int size) +int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, + const unsigned int ev_alt[][MAX_ALT]) { int i, j, num_alt = 0; u64 alt_event; @@ -505,5 +505,30 @@ int isa207_get_alternatives(u64 event, u64 alt[], } } + if (flags & PPMU_ONLY_COUNT_RUN) { + /* + * We're only counting in RUN state, so PM_CYC is equivalent to + * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL. + */ + j = num_alt; + for (i = 0; i < num_alt; ++i) { + switch (alt[i]) { + case 0x1e: /* PMC_CYC */ + alt[j++] = 0x600f4; /* PM_RUN_CYC */ + break; + case 0x600f4: + alt[j++] = 0x1e; + break; + case 0x2: /* PM_INST_CMPL */ + alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ + break; + case 0x500fa: + alt[j++] = 0x2; + break; + } + } + num_alt = j; + } + return num_alt; } diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 8acbe6e802c7..6c737d675792 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -247,6 +247,7 @@ #define MMCRA_SDAR_MODE_SHIFT 42 #define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) #define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT) +#define MMCRA_SDAR_MODE_DCACHE (2ull << MMCRA_SDAR_MODE_SHIFT) #define MMCRA_IFM_SHIFT 30 #define MMCRA_THR_CTR_MANT_SHIFT 19 #define MMCRA_THR_CTR_MANT_MASK 0x7Ful @@ -287,8 +288,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]); void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); -int isa207_get_alternatives(u64 event, u64 alt[], - const unsigned int ev_alt[][MAX_ALT], int size); +int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, + const unsigned int ev_alt[][MAX_ALT]); void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, struct pt_regs *regs); void isa207_get_mem_weight(u64 *weight); diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 5463516e369b..c9356955cab4 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -50,34 +50,11 @@ static const unsigned int event_alternatives[][MAX_ALT] = { static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { - int i, j, num_alt = 0; + int num_alt = 0; - num_alt = isa207_get_alternatives(event, alt, event_alternatives, - (int)ARRAY_SIZE(event_alternatives)); - if (flags & PPMU_ONLY_COUNT_RUN) { - /* - * We're only counting in RUN state, so PM_CYC is equivalent to - * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL. - */ - j = num_alt; - for (i = 0; i < num_alt; ++i) { - switch (alt[i]) { - case PM_CYC: - alt[j++] = PM_RUN_CYC; - break; - case PM_RUN_CYC: - alt[j++] = PM_CYC; - break; - case PM_INST_CMPL: - alt[j++] = PM_RUN_INST_CMPL; - break; - case PM_RUN_INST_CMPL: - alt[j++] = PM_INST_CMPL; - break; - } - } - num_alt = j; - } + num_alt = isa207_get_alternatives(event, alt, + ARRAY_SIZE(event_alternatives), flags, + event_alternatives); return num_alt; } diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index 50689180a6c1..e99c6bf4d391 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -16,13 +16,16 @@ EVENT(PM_CYC, 0x0001e) EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) EVENT(PM_CMPLU_STALL, 0x1e054) EVENT(PM_INST_CMPL, 0x00002) -EVENT(PM_BRU_CMPL, 0x4d05e) +EVENT(PM_BR_CMPL, 0x4d05e) EVENT(PM_BR_MPRED_CMPL, 0x400f6) /* All L1 D cache load references counted at finish, gated by reject */ EVENT(PM_LD_REF_L1, 0x100fc) /* Load Missed L1 */ EVENT(PM_LD_MISS_L1_FIN, 0x2c04e) +EVENT(PM_LD_MISS_L1, 0x3e054) +/* Alternate event code for PM_LD_MISS_L1 */ +EVENT(PM_LD_MISS_L1_ALT, 0x400f0) /* Store Missed L1 */ EVENT(PM_ST_MISS_L1, 0x300f0) /* L1 cache data prefetches */ @@ -62,3 +65,7 @@ EVENT(PM_INST_DISP, 0x200f2) EVENT(PM_INST_DISP_ALT, 0x300f2) /* Alternate Branch event code */ EVENT(PM_BR_CMPL_ALT, 0x10012) +/* Branch event that are not strongly biased */ +EVENT(PM_BR_2PATH, 0x20036) +/* ALternate branch event that are not strongly biased */ +EVENT(PM_BR_2PATH_ALT, 0x40036) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 2280cf87ff9c..24b5b5b7a206 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -109,14 +109,17 @@ static const unsigned int power9_event_alternatives[][MAX_ALT] = { { PM_INST_DISP, PM_INST_DISP_ALT }, { PM_RUN_CYC_ALT, PM_RUN_CYC }, { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, + { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, + { PM_BR_2PATH, PM_BR_2PATH_ALT }, }; static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int num_alt = 0; - num_alt = isa207_get_alternatives(event, alt, power9_event_alternatives, - (int)ARRAY_SIZE(power9_event_alternatives)); + num_alt = isa207_get_alternatives(event, alt, + ARRAY_SIZE(power9_event_alternatives), flags, + power9_event_alternatives); return num_alt; } @@ -125,7 +128,7 @@ GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); -GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_CMPL); +GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN); @@ -143,7 +146,7 @@ CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL); CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS); CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST); CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); -CACHE_EVENT_ATTR(branch-loads, PM_BRU_CMPL); +CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL); CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); @@ -152,7 +155,7 @@ static struct attribute *power9_events_attr[] = { GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC), GENERIC_EVENT_PTR(PM_CMPLU_STALL), GENERIC_EVENT_PTR(PM_INST_CMPL), - GENERIC_EVENT_PTR(PM_BRU_CMPL), + GENERIC_EVENT_PTR(PM_BR_CMPL), GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), GENERIC_EVENT_PTR(PM_LD_REF_L1), GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN), @@ -169,7 +172,7 @@ static struct attribute *power9_events_attr[] = { CACHE_EVENT_PTR(PM_L2_ST_MISS), CACHE_EVENT_PTR(PM_L2_ST), CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), - CACHE_EVENT_PTR(PM_BRU_CMPL), + CACHE_EVENT_PTR(PM_BR_CMPL), CACHE_EVENT_PTR(PM_DTLB_MISS), CACHE_EVENT_PTR(PM_ITLB_MISS), NULL @@ -244,7 +247,7 @@ static int power9_generic_events[] = { [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN, @@ -370,7 +373,7 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { }, [ C(BPU) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = PM_BRU_CMPL, + [ C(RESULT_ACCESS) ] = PM_BR_CMPL, [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, }, [ C(OP_WRITE) ] = { @@ -459,8 +462,8 @@ static int __init init_power9_pmu(void) * Power9 DD1 should use PM_BR_CMPL_ALT event code for * "branches" to provide correct counter value. */ - EVENT_VAR(PM_BRU_CMPL, _g).id = PM_BR_CMPL_ALT; - EVENT_VAR(PM_BRU_CMPL, _c).id = PM_BR_CMPL_ALT; + EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT; + EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT; rc = register_power_pmu(&power9_isa207_pmu); } else { rc = register_power_pmu(&power9_pmu); diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 72b824160660..2c5651992369 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -1,6 +1,6 @@ -obj-$(CONFIG_44x) += misc_44x.o +obj-y += misc_44x.o machine_check.o ifneq ($(CONFIG_PPC4xx_CPM),y) -obj-$(CONFIG_44x) += idle.o +obj-y += idle.o endif obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o obj-$(CONFIG_EBONY) += ebony.o diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c new file mode 100644 index 000000000000..034d70d6d335 --- /dev/null +++ b/arch/powerpc/platforms/44x/machine_check.c @@ -0,0 +1,89 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include + +#include + +int machine_check_440A(struct pt_regs *regs) +{ + unsigned long reason = regs->dsisr; + + printk("Machine check in kernel mode.\n"); + if (reason & ESR_IMCP){ + printk("Instruction Synchronous Machine Check exception\n"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } + else { + u32 mcsr = mfspr(SPRN_MCSR); + if (mcsr & MCSR_IB) + printk("Instruction Read PLB Error\n"); + if (mcsr & MCSR_DRB) + printk("Data Read PLB Error\n"); + if (mcsr & MCSR_DWB) + printk("Data Write PLB Error\n"); + if (mcsr & MCSR_TLBP) + printk("TLB Parity Error\n"); + if (mcsr & MCSR_ICP){ + flush_instruction_cache(); + printk("I-Cache Parity Error\n"); + } + if (mcsr & MCSR_DCSP) + printk("D-Cache Search Parity Error\n"); + if (mcsr & MCSR_DCFP) + printk("D-Cache Flush Parity Error\n"); + if (mcsr & MCSR_IMPE) + printk("Machine Check exception is imprecise\n"); + + /* Clear MCSR */ + mtspr(SPRN_MCSR, mcsr); + } + return 0; +} + +#ifdef CONFIG_PPC_47x +int machine_check_47x(struct pt_regs *regs) +{ + unsigned long reason = regs->dsisr; + u32 mcsr; + + printk(KERN_ERR "Machine check in kernel mode.\n"); + if (reason & ESR_IMCP) { + printk(KERN_ERR "Instruction Synchronous Machine Check exception\n"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + return 0; + } + mcsr = mfspr(SPRN_MCSR); + if (mcsr & MCSR_IB) + printk(KERN_ERR "Instruction Read PLB Error\n"); + if (mcsr & MCSR_DRB) + printk(KERN_ERR "Data Read PLB Error\n"); + if (mcsr & MCSR_DWB) + printk(KERN_ERR "Data Write PLB Error\n"); + if (mcsr & MCSR_TLBP) + printk(KERN_ERR "TLB Parity Error\n"); + if (mcsr & MCSR_ICP) { + flush_instruction_cache(); + printk(KERN_ERR "I-Cache Parity Error\n"); + } + if (mcsr & MCSR_DCSP) + printk(KERN_ERR "D-Cache Search Parity Error\n"); + if (mcsr & PPC47x_MCSR_GPR) + printk(KERN_ERR "GPR Parity Error\n"); + if (mcsr & PPC47x_MCSR_FPR) + printk(KERN_ERR "FPR Parity Error\n"); + if (mcsr & PPC47x_MCSR_IPR) + printk(KERN_ERR "Machine Check exception is imprecise\n"); + + /* Clear MCSR */ + mtspr(SPRN_MCSR, mcsr); + + return 0; +} +#endif /* CONFIG_PPC_47x */ diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile new file mode 100644 index 000000000000..9779c32db34e --- /dev/null +++ b/arch/powerpc/platforms/4xx/Makefile @@ -0,0 +1,8 @@ +obj-y += uic.o machine_check.o +obj-$(CONFIG_PPC4xx_OCM) += ocm.o +obj-$(CONFIG_4xx_SOC) += soc.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o +obj-$(CONFIG_PPC4xx_MSI) += msi.o +obj-$(CONFIG_PPC4xx_CPM) += cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/platforms/4xx/cpm.c similarity index 97% rename from arch/powerpc/sysdev/ppc4xx_cpm.c rename to arch/powerpc/platforms/4xx/cpm.c index ba95adf81d8d..53ff81ca8a3c 100644 --- a/arch/powerpc/sysdev/ppc4xx_cpm.c +++ b/arch/powerpc/platforms/4xx/cpm.c @@ -240,7 +240,7 @@ static int cpm_suspend_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops cpm_suspend_ops = { +static const struct platform_suspend_ops cpm_suspend_ops = { .valid = cpm_suspend_valid, .enter = cpm_suspend_enter, }; @@ -278,8 +278,8 @@ static int __init cpm_init(void) dcr_len = dcr_resource_len(np, 0); if (dcr_base == 0 || dcr_len == 0) { - printk(KERN_ERR "cpm: could not parse dcr property for %s\n", - np->full_name); + printk(KERN_ERR "cpm: could not parse dcr property for %pOF\n", + np); ret = -EINVAL; goto node_put; } @@ -287,8 +287,8 @@ static int __init cpm_init(void) cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); if (!DCR_MAP_OK(cpm.dcr_host)) { - printk(KERN_ERR "cpm: failed to map dcr property for %s\n", - np->full_name); + printk(KERN_ERR "cpm: failed to map dcr property for %pOF\n", + np); ret = -EINVAL; goto node_put; } diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/platforms/4xx/gpio.c similarity index 98% rename from arch/powerpc/sysdev/ppc4xx_gpio.c rename to arch/powerpc/platforms/4xx/gpio.c index 5382d04dd872..2238e369cde4 100644 --- a/arch/powerpc/sysdev/ppc4xx_gpio.c +++ b/arch/powerpc/platforms/4xx/gpio.c @@ -198,8 +198,7 @@ static int __init ppc4xx_add_gpiochips(void) goto err; continue; err: - pr_err("%s: registration failed with status %d\n", - np->full_name, ret); + pr_err("%pOF: registration failed with status %d\n", np, ret); kfree(ppc4xx_gc); /* try others anyway */ } diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c similarity index 100% rename from arch/powerpc/sysdev/ppc4xx_hsta_msi.c rename to arch/powerpc/platforms/4xx/hsta_msi.c diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c new file mode 100644 index 000000000000..aa039dfaf82f --- /dev/null +++ b/arch/powerpc/platforms/4xx/machine_check.c @@ -0,0 +1,26 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include + +#include + +int machine_check_4xx(struct pt_regs *regs) +{ + unsigned long reason = regs->dsisr; + + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + printk(" machine check in kernel mode.\n"); + + return 0; +} diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/platforms/4xx/msi.c similarity index 98% rename from arch/powerpc/sysdev/ppc4xx_msi.c rename to arch/powerpc/platforms/4xx/msi.c index 590dab4f47d6..d50417e23add 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/platforms/4xx/msi.c @@ -233,8 +233,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev) /* Get MSI ranges */ err = of_address_to_resource(dev->dev.of_node, 0, &res); if (err) { - dev_err(&dev->dev, "%s resource error!\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node); goto error_out; } diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/platforms/4xx/ocm.c similarity index 100% rename from arch/powerpc/sysdev/ppc4xx_ocm.c rename to arch/powerpc/platforms/4xx/ocm.c diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/platforms/4xx/pci.c similarity index 95% rename from arch/powerpc/sysdev/ppc4xx_pci.c rename to arch/powerpc/platforms/4xx/pci.c index 086aca69ecae..73e6b36bcd51 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -32,7 +32,7 @@ #include #include -#include "ppc4xx_pci.h" +#include "pci.h" static int dma_offset_set; @@ -127,9 +127,9 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, * within 32 bits space */ if (cpu_addr != 0 || pci_addr > 0xffffffff) { - printk(KERN_WARNING "%s: Ignored unsupported dma range" + printk(KERN_WARNING "%pOF: Ignored unsupported dma range" " 0x%016llx...0x%016llx -> 0x%016llx\n", - hose->dn->full_name, + hose->dn, pci_addr, pci_addr + size - 1, cpu_addr); continue; } @@ -152,8 +152,7 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, /* We only support one global DMA offset */ if (dma_offset_set && pci_dram_offset != res->start) { - printk(KERN_ERR "%s: dma-ranges(s) mismatch\n", - hose->dn->full_name); + printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn); return -ENXIO; } @@ -161,17 +160,16 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, * DMA bounce buffers */ if (size < total_memory) { - printk(KERN_ERR "%s: dma-ranges too small " + printk(KERN_ERR "%pOF: dma-ranges too small " "(size=%llx total_memory=%llx)\n", - hose->dn->full_name, size, (u64)total_memory); + hose->dn, size, (u64)total_memory); return -ENXIO; } /* Check we are a power of 2 size and that base is a multiple of size*/ if ((size & (size - 1)) != 0 || (res->start & (size - 1)) != 0) { - printk(KERN_ERR "%s: dma-ranges unaligned\n", - hose->dn->full_name); + printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn); return -ENXIO; } @@ -181,8 +179,8 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, if (res->end > 0xffffffff && !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx") || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) { - printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n", - hose->dn->full_name); + printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n", + hose->dn); return -ENXIO; } out: @@ -233,8 +231,7 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose, */ if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || size < 0x1000 || (plb_addr & (size - 1)) != 0) { - printk(KERN_WARNING "%s: Resource out of range\n", - hose->dn->full_name); + printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn); return -1; } ma = (0xffffffffu << ilog2(size)) | 1; @@ -266,8 +263,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, if (!(res->flags & IORESOURCE_MEM)) continue; if (j > 2) { - printk(KERN_WARNING "%s: Too many ranges\n", - hose->dn->full_name); + printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn); break; } @@ -292,8 +288,8 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, if (j <= 2 && !found_isa_hole && hose->isa_mem_size) if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0, hose->isa_mem_size, 0, j) == 0) - printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", - hose->dn->full_name); + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); } static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose, @@ -333,21 +329,20 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np) /* Check if device is enabled */ if (!of_device_is_available(np)) { - printk(KERN_INFO "%s: Port disabled via device-tree\n", - np->full_name); + printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np); return; } /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &rsrc_cfg)) { - printk(KERN_ERR "%s: Can't get PCI config register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get PCI config register base !", + np); return; } /* Fetch host bridge internal registers address */ if (of_address_to_resource(np, 3, &rsrc_reg)) { - printk(KERN_ERR "%s: Can't get PCI internal register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get PCI internal register base !", + np); return; } @@ -361,7 +356,7 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np) /* Map registers */ reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); if (reg == NULL) { - printk(KERN_ERR "%s: Can't map registers !", np->full_name); + printk(KERN_ERR "%pOF: Can't map registers !", np); goto fail; } @@ -423,8 +418,8 @@ static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose, if (!is_power_of_2(size) || size < 0x1000 || (plb_addr & (size - 1)) != 0) { - printk(KERN_WARNING "%s: Resource out of range\n", - hose->dn->full_name); + printk(KERN_WARNING "%pOF: Resource out of range\n", + hose->dn); return -1; } @@ -467,8 +462,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, if (!(res->flags & IORESOURCE_MEM)) continue; if (j > 1) { - printk(KERN_WARNING "%s: Too many ranges\n", - hose->dn->full_name); + printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn); break; } @@ -493,8 +487,8 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, if (j <= 1 && !found_isa_hole && hose->isa_mem_size) if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0, hose->isa_mem_size, 0, j) == 0) - printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", - hose->dn->full_name); + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); } static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose, @@ -539,14 +533,14 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &rsrc_cfg)) { - printk(KERN_ERR "%s:Can't get PCI-X config register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get PCI-X config register base !", + np); return; } /* Fetch host bridge internal registers address */ if (of_address_to_resource(np, 3, &rsrc_reg)) { - printk(KERN_ERR "%s: Can't get PCI-X internal register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !", + np); return; } @@ -568,7 +562,7 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) /* Map registers */ reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg)); if (reg == NULL) { - printk(KERN_ERR "%s: Can't map registers !", np->full_name); + printk(KERN_ERR "%pOF: Can't map registers !", np); goto fail; } @@ -1246,8 +1240,8 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); if (mbase == NULL) { - printk(KERN_ERR "%s: Can't map internal config space !", - port->node->full_name); + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); goto done; } @@ -1389,7 +1383,7 @@ static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port) port->index); return; } - + while (timeout_ms--) { val = in_le32(mbase + PECFG_TLDLP); @@ -1448,8 +1442,7 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops; #endif if (ppc4xx_pciex_hwops == NULL) { - printk(KERN_WARNING "PCIE: unknown host type %s\n", - np->full_name); + printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np); return -ENODEV; } @@ -1730,8 +1723,7 @@ static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, (index < 2 && size < 0x100000) || (index == 2 && size < 0x100) || (plb_addr & (size - 1)) != 0) { - printk(KERN_WARNING "%s: Resource out of range\n", - hose->dn->full_name); + printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn); return -1; } @@ -1807,8 +1799,8 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, if (!(res->flags & IORESOURCE_MEM)) continue; if (j > 1) { - printk(KERN_WARNING "%s: Too many ranges\n", - port->node->full_name); + printk(KERN_WARNING "%pOF: Too many ranges\n", + port->node); break; } @@ -1834,8 +1826,8 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, hose->isa_mem_phys, 0, hose->isa_mem_size, 0, j) == 0) - printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", - hose->dn->full_name); + printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n", + hose->dn); /* Configure IO, always 64K starting at 0. We hard wire it to 64K ! * Note also that it -has- to be region index 2 on this HW @@ -1970,8 +1962,8 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) (hose->first_busno + 1) * 0x100000, busses * 0x100000); if (cfg_data == NULL) { - printk(KERN_ERR "%s: Can't map external config space !", - port->node->full_name); + printk(KERN_ERR "%pOF: Can't map external config space !", + port->node); goto fail; } hose->cfg_data = cfg_data; @@ -1982,13 +1974,13 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) */ mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); if (mbase == NULL) { - printk(KERN_ERR "%s: Can't map internal config space !", - port->node->full_name); + printk(KERN_ERR "%pOF: Can't map internal config space !", + port->node); goto fail; } hose->cfg_addr = mbase; - pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, + pr_debug("PCIE %pOF, bus %d..%d\n", port->node, hose->first_busno, hose->last_busno); pr_debug(" config space mapped at: root @0x%p, other @0x%p\n", hose->cfg_addr, hose->cfg_data); @@ -2100,14 +2092,13 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) /* Get the port number from the device-tree */ pval = of_get_property(np, "port", NULL); if (pval == NULL) { - printk(KERN_ERR "PCIE: Can't find port number for %s\n", - np->full_name); + printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np); return; } portno = *pval; if (portno >= ppc4xx_pciex_port_count) { - printk(KERN_ERR "PCIE: port number out of range for %s\n", - np->full_name); + printk(KERN_ERR "PCIE: port number out of range for %pOF\n", + np); return; } port = &ppc4xx_pciex_ports[portno]; @@ -2125,8 +2116,8 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) if (ppc4xx_pciex_hwops->want_sdr) { pval = of_get_property(np, "sdr-base", NULL); if (pval == NULL) { - printk(KERN_ERR "PCIE: missing sdr-base for %s\n", - np->full_name); + printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n", + np); return; } port->sdr_base = *pval; @@ -2142,29 +2133,26 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) } else if (!strcmp(val, "pci")) { port->endpoint = 0; } else { - printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n", - np->full_name); + printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n", + np); return; } /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &port->cfg_space)) { - printk(KERN_ERR "%s: Can't get PCI-E config space !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np); return; } /* Fetch host bridge internal registers address */ if (of_address_to_resource(np, 1, &port->utl_regs)) { - printk(KERN_ERR "%s: Can't get UTL register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get UTL register base !", np); return; } /* Map DCRs */ dcrs = dcr_resource_start(np, 0); if (dcrs == 0) { - printk(KERN_ERR "%s: Can't get DCR register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get DCR register base !", np); return; } port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/platforms/4xx/pci.h similarity index 100% rename from arch/powerpc/sysdev/ppc4xx_pci.h rename to arch/powerpc/platforms/4xx/pci.h diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/platforms/4xx/soc.c similarity index 97% rename from arch/powerpc/sysdev/ppc4xx_soc.c rename to arch/powerpc/platforms/4xx/soc.c index d41134d2f786..5e36508b2a70 100644 --- a/arch/powerpc/sysdev/ppc4xx_soc.c +++ b/arch/powerpc/platforms/4xx/soc.c @@ -90,7 +90,7 @@ static int __init ppc4xx_l2c_probe(void) /* Get l2 cache size */ prop = of_get_property(np, "cache-size", NULL); if (prop == NULL) { - printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name); + printk(KERN_ERR "%pOF: Can't get cache-size!\n", np); of_node_put(np); return -ENODEV; } @@ -99,8 +99,7 @@ static int __init ppc4xx_l2c_probe(void) /* Map DCRs */ dcrreg = of_get_property(np, "dcr-reg", &len); if (!dcrreg || (len != 4 * sizeof(u32))) { - printk(KERN_ERR "%s: Can't get DCR register base !", - np->full_name); + printk(KERN_ERR "%pOF: Can't get DCR register base !", np); of_node_put(np); return -ENODEV; } diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/platforms/4xx/uic.c similarity index 95% rename from arch/powerpc/sysdev/uic.c rename to arch/powerpc/platforms/4xx/uic.c index a00949f3e378..8b4dd0da0839 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/platforms/4xx/uic.c @@ -243,16 +243,16 @@ static struct uic * __init uic_init_one(struct device_node *node) raw_spin_lock_init(&uic->lock); indexp = of_get_property(node, "cell-index", &len); if (!indexp || (len != sizeof(u32))) { - printk(KERN_ERR "uic: Device node %s has missing or invalid " - "cell-index property\n", node->full_name); + printk(KERN_ERR "uic: Device node %pOF has missing or invalid " + "cell-index property\n", node); return NULL; } uic->index = *indexp; dcrreg = of_get_property(node, "dcr-reg", &len); if (!dcrreg || (len != 2*sizeof(u32))) { - printk(KERN_ERR "uic: Device node %s has missing or invalid " - "dcr-reg property\n", node->full_name); + printk(KERN_ERR "uic: Device node %pOF has missing or invalid " + "dcr-reg property\n", node); return NULL; } uic->dcrbase = *dcrreg; @@ -292,7 +292,7 @@ void __init uic_init_tree(void) * top-level interrupt controller */ primary_uic = uic_init_one(np); if (!primary_uic) - panic("Unable to initialize primary UIC %s\n", np->full_name); + panic("Unable to initialize primary UIC %pOF\n", np); irq_set_default_host(primary_uic->irqhost); of_node_put(np); @@ -306,8 +306,8 @@ void __init uic_init_tree(void) uic = uic_init_one(np); if (! uic) - panic("Unable to initialize a secondary UIC %s\n", - np->full_name); + panic("Unable to initialize a secondary UIC %pOF\n", + np); cascade_virq = irq_of_parse_and_map(np, 0); diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index add5a5374fa0..b3097fe6441b 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -363,7 +363,7 @@ static int get_cpmf_mult_x2(void) */ /* applies to the IPS_DIV, and PCI_DIV values */ -static struct clk_div_table divtab_2346[] = { +static const struct clk_div_table divtab_2346[] = { { .val = 2, .div = 2, }, { .val = 3, .div = 3, }, { .val = 4, .div = 4, }, @@ -372,7 +372,7 @@ static struct clk_div_table divtab_2346[] = { }; /* applies to the MBX_DIV, LPC_DIV, and NFC_DIV values */ -static struct clk_div_table divtab_1234[] = { +static const struct clk_div_table divtab_1234[] = { { .val = 1, .div = 1, }, { .val = 2, .div = 2, }, { .val = 3, .div = 3, }, diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 6b4f4cb7009a..f99e79ee060e 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -387,8 +387,8 @@ static unsigned int __init get_fifo_size(struct device_node *np, if (fp) return *fp; - pr_warning("no %s property in %s node, defaulting to %d\n", - prop_name, np->full_name, DEFAULT_FIFO_SIZE); + pr_warning("no %s property in %pOF node, defaulting to %d\n", + prop_name, np, DEFAULT_FIFO_SIZE); return DEFAULT_FIFO_SIZE; } @@ -426,15 +426,15 @@ static void __init mpc512x_psc_fifo_init(void) psc = of_iomap(np, 0); if (!psc) { - pr_err("%s: Can't map %s device\n", - __func__, np->full_name); + pr_err("%s: Can't map %pOF device\n", + __func__, np); continue; } /* FIFO space is 4KiB, check if requested size is available */ if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) { - pr_err("%s: no fifo space available for %s\n", - __func__, np->full_name); + pr_err("%s: no fifo space available for %pOF\n", + __func__, np); iounmap(psc); /* * chances are that another device requests less diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 39b49822ace1..1ecbf176d35a 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -99,7 +99,7 @@ static void __init efika_pcisetup(void) bus_range = of_get_property(pcictrl, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING EFIKA_PLATFORM_NAME - ": Can't get bus-range for %s\n", pcictrl->full_name); + ": Can't get bus-range for %pOF\n", pcictrl); goto out_put; } @@ -109,14 +109,14 @@ static void __init efika_pcisetup(void) else printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d", bus_range[0], bus_range[1]); - printk(" controlled by %s\n", pcictrl->full_name); + printk(" controlled by %pOF\n", pcictrl); printk("\n"); hose = pcibios_alloc_controller(pcictrl); if (!hose) { printk(KERN_WARNING EFIKA_PLATFORM_NAME - ": Can't allocate PCI controller structure for %s\n", - pcictrl->full_name); + ": Can't allocate PCI controller structure for %pOF\n", + pcictrl); goto out_put; } diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index a3227040cc86..1fcab233d2f2 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -156,7 +156,7 @@ static void __init media5200_init_irq(void) fpga_np = of_find_compatible_node(NULL, NULL, "fsl,media5200-fpga"); if (!fpga_np) goto out; - pr_debug("%s: found fpga node: %s\n", __func__, fpga_np->full_name); + pr_debug("%s: found fpga node: %pOF\n", __func__, fpga_np); media5200_irq.regs = of_iomap(fpga_np, 0); if (!media5200_irq.regs) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 22645a7c6b8a..9e974b1e1697 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -226,7 +226,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct, dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]); if ((intsize < 1) || (intspec[0] > 3)) { - dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name); + dev_err(gpt->dev, "bad irq specifier in %pOF\n", ct); return -EINVAL; } @@ -331,7 +331,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!of_find_property(node, "gpio-controller", NULL)) return; - gpt->gc.label = kstrdup(node->full_name, GFP_KERNEL); + gpt->gc.label = kasprintf(GFP_KERNEL, "%pOF", node); if (!gpt->gc.label) { dev_err(gpt->dev, "out of memory\n"); return; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index 00282c2b0cae..af0f79995214 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -369,19 +369,19 @@ mpc52xx_add_bridge(struct device_node *node) const int *bus_range; struct resource rsrc; - pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name); + pr_debug("Adding MPC52xx PCI host bridge %pOF\n", node); pci_add_flags(PCI_REASSIGN_ALL_BUS); if (of_address_to_resource(node, 0, &rsrc) != 0) { - printk(KERN_ERR "Can't get %s resources\n", node->full_name); + printk(KERN_ERR "Can't get %pOF resources\n", node); return -EINVAL; } bus_range = of_get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get %s bus-range, assume bus 0\n", - node->full_name); + printk(KERN_WARNING "Can't get %pOF bus-range, assume bus 0\n", + node); bus_range = NULL; } diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 63c5ab6489c9..96bb55ca61d3 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -128,7 +128,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) return -ENODEV; gc->owner = THIS_MODULE; - gc->label = np->full_name; + gc->label = kasprintf(GFP_KERNEL, "%pOF", np); gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; @@ -141,6 +141,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) static int mcu_gpiochip_remove(struct mcu *mcu) { + kfree(mcu->gc.label); gpiochip_remove(&mcu->gc); return 0; } diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 763ffca9628d..a4539c5accb0 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -113,7 +113,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, unreg: platform_device_del(pdev); err: - pr_err("%s: registration failed\n", np->full_name); + pr_err("%pOF: registration failed\n", np); next: i++; } diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 978b85bb3233..7fa3e197871a 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -361,7 +361,7 @@ static int pmc_probe(struct platform_device *ofdev) return -EBUSY; } - pmc_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc)); + pmc_regs = ioremap(res.start, sizeof(*pmc_regs)); if (!pmc_regs) { ret = -ENOMEM; @@ -374,7 +374,7 @@ static int pmc_probe(struct platform_device *ofdev) goto out_pmc; } - clock_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc)); + clock_regs = ioremap(res.start, sizeof(*clock_regs)); if (!clock_regs) { ret = -ENOMEM; diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 0908abd7e36f..9fb57f78cdbe 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -508,8 +508,8 @@ static void __init p1022_ds_setup_arch(void) * allocate one static local variable for each * call to this function. */ - pr_info("p1022ds: disabling %s node", - np2->full_name); + pr_info("p1022ds: disabling %pOF node", + np2); of_update_property(np2, &nor_status); of_node_put(np2); } @@ -524,8 +524,8 @@ static void __init p1022_ds_setup_arch(void) .length = sizeof("disabled"), }; - pr_info("p1022ds: disabling %s node", - np2->full_name); + pr_info("p1022ds: disabling %pOF node", + np2); of_update_property(np2, &nand_status); of_node_put(np2); } diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index cd6ce845f398..77e618dce4a8 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -100,8 +100,8 @@ static void xes_mpc85xx_fixups(void) err = of_address_to_resource(np, 0, &r[0]); if (err) { printk(KERN_WARNING "xes_mpc85xx: Could not get " - "resource for device tree node '%s'", - np->full_name); + "resource for device tree node '%pOF'", + np); continue; } diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 80cbcb0ad9b1..536b0c5d5ce3 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -5,7 +5,6 @@ config CPM1 choice prompt "8xx Machine Type" depends on PPC_8xx - depends on 8xx default MPC885ADS config MPC8XXFADS @@ -92,7 +91,7 @@ endmenu # menu "MPC8xx CPM Options" - depends on 8xx + depends on PPC_8xx # This doesn't really belong here, but it is convenient to ask # 8xx specific questions. diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile index 76a81c3350a8..f9af3218bd9c 100644 --- a/arch/powerpc/platforms/8xx/Makefile +++ b/arch/powerpc/platforms/8xx/Makefile @@ -1,7 +1,7 @@ # # Makefile for the PowerPC 8xx linux kernel. # -obj-$(CONFIG_PPC_8xx) += m8xx_setup.o +obj-y += m8xx_setup.o machine_check.o pic.o obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o obj-$(CONFIG_PPC_EP88XC) += ep88xc.o diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index f81069f79a94..1917d69f84df 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -23,7 +23,7 @@ #include #include -#include +#include "pic.h" #include "mpc8xx.h" diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c new file mode 100644 index 000000000000..402016705a39 --- /dev/null +++ b/arch/powerpc/platforms/8xx/machine_check.c @@ -0,0 +1,37 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include + +#include + +int machine_check_8xx(struct pt_regs *regs) +{ + unsigned long reason = regs->msr; + + pr_err("Machine check in kernel mode.\n"); + pr_err("Caused by (from SRR1=%lx): ", reason); + if (reason & 0x40000000) + pr_err("Fetch error at address %lx\n", regs->nip); + else + pr_err("Data access error at address %lx\n", regs->dar); + +#ifdef CONFIG_PCI + /* the qspan pci read routines can cause machine checks -- Cort + * + * yuck !!! that totally needs to go away ! There are better ways + * to deal with that than having a wart in the mcheck handler. + * -- BenH + */ + bad_page_fault(regs, regs->dar, SIGBUS); + return 1; +#else + return 0; +#endif +} diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/platforms/8xx/pic.c similarity index 99% rename from arch/powerpc/sysdev/mpc8xx_pic.c rename to arch/powerpc/platforms/8xx/pic.c index 2842f9d63d21..8d5a25d43ef3 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -9,7 +9,7 @@ #include #include -#include "mpc8xx_pic.h" +#include "pic.h" #define PIC_VEC_SPURRIOUS 15 diff --git a/arch/powerpc/sysdev/mpc8xx_pic.h b/arch/powerpc/platforms/8xx/pic.h similarity index 100% rename from arch/powerpc/sysdev/mpc8xx_pic.h rename to arch/powerpc/platforms/8xx/pic.h diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 2f629e0551e9..13663efc1d31 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -32,7 +32,6 @@ config PPC_85xx config PPC_8xx bool "Freescale 8xx" select FSL_SOC - select 8xx select PPC_LIB_RHEAP select SYS_SUPPORTS_HUGETLBFS @@ -149,10 +148,6 @@ config 6xx depends on PPC32 && PPC_BOOK3S select PPC_HAVE_PMU_SUPPORT -# this is temp to handle compat with arch=ppc -config 8xx - bool - config E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E @@ -271,44 +266,6 @@ config VSX If in doubt, say Y here. -config PPC_ICSWX - bool "Support for PowerPC icswx coprocessor instruction" - depends on PPC_BOOK3S_64 - default n - ---help--- - - This option enables kernel support for the PowerPC Initiate - Coprocessor Store Word (icswx) coprocessor instruction on POWER7 - and POWER8 processors. POWER9 uses new copy/paste instructions - to invoke the coprocessor. - - This option is only useful if you have a processor that supports - the icswx coprocessor instruction. It does not have any effect - on processors without the icswx coprocessor instruction. - - This option slightly increases kernel memory usage. - - If in doubt, say N here. - -config PPC_ICSWX_PID - bool "icswx requires direct PID management" - depends on PPC_ICSWX - default y - ---help--- - The PID register in server is used explicitly for ICSWX. In - embedded systems PID management is done by the system. - -config PPC_ICSWX_USE_SIGILL - bool "Should a bad CT cause a SIGILL?" - depends on PPC_ICSWX - default n - ---help--- - Should a bad CT used for "non-record form ICSWX" cause an - illegal instruction signal or should it be silent as - architected. - - If in doubt, say N here. - config SPE_POSSIBLE def_bool y depends on E200 || (E500 && !PPC_E500MC) @@ -413,7 +370,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON + depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON default n if PPC_47x default y diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 469ef170d218..d7a55ecfaee5 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o obj-$(CONFIG_PPC_PMAC) += powermac/ obj-$(CONFIG_PPC_CHRP) += chrp/ +obj-$(CONFIG_4xx) += 4xx/ obj-$(CONFIG_40x) += 40x/ obj-$(CONFIG_44x) += 44x/ obj-$(CONFIG_PPC_MPC512x) += 512x/ diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 45cb9821173c..b9d466cc2b8a 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -40,7 +40,7 @@ static int __init amigaone_add_bridge(struct device_node *dev) const int *bus_range; struct pci_controller *hose; - printk(KERN_INFO "Adding PCI host bridge %s\n", dev->full_name); + printk(KERN_INFO "Adding PCI host bridge %pOF\n", dev); cfg_addr = of_get_address(dev, 0, NULL, NULL); cfg_data = of_get_address(dev, 1, NULL, NULL); @@ -49,8 +49,8 @@ static int __init amigaone_add_bridge(struct device_node *dev) bus_range = of_get_property(dev, "bus-range", &len); if ((bus_range == NULL) || (len < 2 * sizeof(int))) - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); hose = pcibios_alloc_controller(dev); if (hose == NULL) diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 8d3ae2cc52bf..6ea3f248b155 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -187,8 +187,8 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev) irq_domain = irq_find_host(dn); if (!irq_domain) { - dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", - dn->full_name); + dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", + dn); goto out_error; } @@ -326,8 +326,8 @@ static void axon_msi_shutdown(struct platform_device *device) struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; - pr_devel("axon_msi: disabling %s\n", - irq_domain_get_of_node(msic->irq_domain)->full_name); + pr_devel("axon_msi: disabling %pOF\n", + irq_domain_get_of_node(msic->irq_domain)); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); @@ -340,12 +340,12 @@ static int axon_msi_probe(struct platform_device *device) unsigned int virq; int dcr_base, dcr_len; - pr_devel("axon_msi: setting up dn %s\n", dn->full_name); + pr_devel("axon_msi: setting up dn %pOF\n", dn); msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL); if (!msic) { - printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n", - dn->full_name); + printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", + dn); goto out; } @@ -354,30 +354,30 @@ static int axon_msi_probe(struct platform_device *device) if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR - "axon_msi: couldn't parse dcr properties on %s\n", - dn->full_name); + "axon_msi: couldn't parse dcr properties on %pOF\n", + dn); goto out_free_msic; } msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(msic->dcr_host)) { - printk(KERN_ERR "axon_msi: dcr_map failed for %s\n", - dn->full_name); + printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", + dn); goto out_free_msic; } msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, &msic->fifo_phys, GFP_KERNEL); if (!msic->fifo_virt) { - printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", - dn->full_name); + printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", + dn); goto out_free_msic; } virq = irq_of_parse_and_map(dn, 0); if (!virq) { - printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n", - dn->full_name); + printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", + dn); goto out_free_fifo; } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); @@ -385,8 +385,8 @@ static int axon_msi_probe(struct platform_device *device) /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", - dn->full_name); + printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", + dn); goto out_free_fifo; } @@ -412,7 +412,7 @@ static int axon_msi_probe(struct platform_device *device) axon_msi_debug_setup(dn, msic); - printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); + printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); return 0; diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 871d38479a25..6fc85e29dc08 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -303,8 +303,8 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, iic->node = of_node_get(node); out_be64(&iic->regs->prio, 0); - printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n", - hw_cpu, iic->target_id, node->full_name); + printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n", + hw_cpu, iic->target_id, node); } static int __init setup_iic(void) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 29d4f96ed33e..4b91ad08eefd 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -278,8 +278,8 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base) if (of_node_to_nid(np) != nid) continue; if (of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "iommu: can't get address for %s\n", - np->full_name); + printk(KERN_ERR "iommu: can't get address for %pOF\n", + np); continue; } *base = r.start; @@ -458,8 +458,8 @@ static inline u32 cell_iommu_get_ioid(struct device_node *np) ioid = of_get_property(np, "ioid", NULL); if (ioid == NULL) { - printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", - np->full_name); + printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", + np); return 0; } @@ -559,8 +559,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) */ iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { - dev_err(dev, "iommu: missing iommu for %s (node %d)\n", - of_node_full_name(dev->of_node), dev_to_node(dev)); + dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", + dev->of_node, dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); @@ -720,12 +720,12 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) /* Get node ID */ nid = of_node_to_nid(np); if (nid < 0) { - printk(KERN_ERR "iommu: failed to get node for %s\n", - np->full_name); + printk(KERN_ERR "iommu: failed to get node for %pOF\n", + np); return NULL; } - pr_debug("iommu: setting up iommu for node %d (%s)\n", - nid, np->full_name); + pr_debug("iommu: setting up iommu for node %d (%pOF)\n", + nid, np); /* XXX todo: If we can have multiple windows on the same IOMMU, which * isn't the case today, we probably want here to check whether the @@ -736,8 +736,8 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) */ if (cbe_nr_iommus >= NR_IOMMUS) { - printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", - np->full_name); + printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", + np); return NULL; } diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 460ab392f0e7..2f704afe9af3 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -196,8 +196,8 @@ static int __init cbe_ptcal_enable(void) for_each_node_by_type(np, "cpu") { const u32 *nid = of_get_property(np, "node-id", NULL); if (!nid) { - printk(KERN_ERR "%s: node %s is missing node-id?\n", - __func__, np->full_name); + printk(KERN_ERR "%s: node %pOF is missing node-id?\n", + __func__, np); continue; } cbe_ptcal_enable_on_node(*nid, order); diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index f1f7878893f3..d1e61e273e64 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c @@ -130,8 +130,8 @@ int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) struct resource r; unsigned long offset = (unsigned long)data; - pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%s)\n", - np->full_name); + pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", + np); priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL); if (!priv) { diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index ff924af00e78..aa44bfc46467 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -323,8 +323,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip, irq_set_handler_data(virq, pic); irq_set_chained_handler(virq, spider_irq_cascade); - printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", - pic->node_id, addr, of_node->full_name); + printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n", + pic->node_id, addr, of_node); /* Enable the interrupt detection enable bit. Do this last! */ out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index 672d310dcf14..f636ee22b203 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -191,8 +191,8 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) goto err; } ret = -EINVAL; - pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0], - oirq.np->full_name); + pr_debug(" irq %d no 0x%x on %pOF\n", i, oirq.args[0], + oirq.np); spu->irqs[i] = irq_create_of_mapping(&oirq); if (!spu->irqs[i]) { pr_debug("spu_new: failed to map it !\n"); @@ -243,32 +243,32 @@ static int __init spu_map_device(struct spu *spu) ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, &spu->local_store_phys); if (ret) { - pr_debug("spu_new: failed to map %s resource 0\n", - np->full_name); + pr_debug("spu_new: failed to map %pOF resource 0\n", + np); goto out; } ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, &spu->problem_phys); if (ret) { - pr_debug("spu_new: failed to map %s resource 1\n", - np->full_name); + pr_debug("spu_new: failed to map %pOF resource 1\n", + np); goto out_unmap; } ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); if (ret) { - pr_debug("spu_new: failed to map %s resource 2\n", - np->full_name); + pr_debug("spu_new: failed to map %pOF resource 2\n", + np); goto out_unmap; } if (!firmware_has_feature(FW_FEATURE_LPAR)) ret = spu_map_resource(spu, 3, (void __iomem**)&spu->priv1, NULL); if (ret) { - pr_debug("spu_new: failed to map %s resource 3\n", - np->full_name); + pr_debug("spu_new: failed to map %pOF resource 3\n", + np); goto out_unmap; } - pr_debug("spu_new: %s maps:\n", np->full_name); + pr_debug("spu_new: %pOF maps:\n", np); pr_debug(" local store : 0x%016lx -> 0x%p\n", spu->local_store_phys, spu->local_store); pr_debug(" problem state : 0x%016lx -> 0x%p\n", @@ -316,8 +316,8 @@ static int __init of_create_spu(struct spu *spu, void *data) spu->node = of_node_to_nid(spe); if (spu->node >= MAX_NUMNODES) { - printk(KERN_WARNING "SPE %s on node %d ignored," - " node number too big\n", spe->full_name, spu->node); + printk(KERN_WARNING "SPE %pOF on node %d ignored," + " node number too big\n", spe, spu->node); printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); ret = -ENODEV; goto out; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index ae2f740a82f1..5ffcdeb1eb17 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1749,7 +1749,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file_inode(file); - int err = filemap_write_and_wait_range(inode->i_mapping, start, end); + int err = file_write_and_wait_range(file, start, end); if (!err) { inode_lock(inode); err = spufs_mfc_flush(file, NULL); diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 1b87e198faa7..27264794f5c0 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -235,14 +235,14 @@ chrp_find_bridges(void) ++index; /* The GG2 bridge on the LongTrail doesn't have an address */ if (of_address_to_resource(dev, 0, &r) && !is_longtrail) { - printk(KERN_WARNING "Can't use %s: no address\n", - dev->full_name); + printk(KERN_WARNING "Can't use %pOF: no address\n", + dev); continue; } bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s\n", - dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF\n", + dev); continue; } if (bus_range[1] == bus_range[0]) @@ -250,15 +250,15 @@ chrp_find_bridges(void) else printk(KERN_INFO "PCI buses %d..%d", bus_range[0], bus_range[1]); - printk(" controlled by %s", dev->full_name); + printk(" controlled by %pOF", dev); if (!is_longtrail) printk(" at %llx", (unsigned long long)r.start); printk("\n"); hose = pcibios_alloc_controller(dev); if (!hose) { - printk("Can't allocate PCI controller structure for %s\n", - dev->full_name); + printk("Can't allocate PCI controller structure for %pOF\n", + dev); continue; } hose->first_busno = hose->self_busno = bus_range[0]; @@ -297,8 +297,8 @@ chrp_find_bridges(void) } } } else { - printk("No methods for %s (model %s), using RTAS\n", - dev->full_name, model); + printk("No methods for %pOF (model %s), using RTAS\n", + dev, model); hose->ops = &rtas_pci_ops; } diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 2b4dc6abde6c..19760712b39d 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -63,7 +63,7 @@ static struct platform_device mv643xx_eth_mvmdio_device = { .name = "orion-mdio", .id = -1, .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources), - .resource = mv643xx_eth_shared_resources, + .resource = mv643xx_eth_mvmdio_resources, }; static struct resource mv643xx_eth_port1_resources[] = { diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index f29cf29b11f8..f514d5d28cd4 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -41,12 +41,12 @@ static int __init linkstation_add_bridge(struct device_node *dev) struct pci_controller *hose; const int *bus_range; - printk("Adding PCI host bridge %s\n", dev->full_name); + printk("Adding PCI host bridge %pOF\n", dev); bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); hose = pcibios_alloc_controller(dev); if (hose == NULL) diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 8e3590941960..273dfa3f0252 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -115,7 +115,7 @@ static int __init mvme5100_add_bridge(struct device_node *dev) struct pci_controller *hose; unsigned short devid; - pr_info("Adding PCI host bridge %s\n", dev->full_name); + pr_info("Adding PCI host bridge %pOF\n", dev); bus_range = of_get_property(dev, "bus-range", &len); diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index 471a50bcd074..ed1914dd34bb 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -44,7 +44,7 @@ static int __init storcenter_add_bridge(struct device_node *dev) struct pci_controller *hose; const int *bus_range; - printk("Adding PCI host bridge %s\n", dev->full_name); + printk("Adding PCI host bridge %pOF\n", dev); hose = pcibios_alloc_controller(dev); if (hose == NULL) diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 69794d9389c2..e3821379e86f 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -73,8 +73,8 @@ static void __init fixup_bus_range(struct device_node *bridge) /* Lookup the "bus-range" property for the hose */ prop = of_find_property(bridge, "bus-range", &len); if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s\n", - bridge->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF\n", + bridge); return; } bus_range = prop->value; @@ -498,12 +498,12 @@ static int __init maple_add_bridge(struct device_node *dev) const int *bus_range; int primary = 1; - DBG("Adding PCI host bridge %s\n", dev->full_name); + DBG("Adding PCI host bridge %pOF\n", dev); bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n", - dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n", + dev); } hose = pcibios_alloc_controller(dev); diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index 10c4e8fc6ea9..5ff6108f19e9 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -193,7 +193,7 @@ static int __init pas_add_bridge(struct device_node *dev) { struct pci_controller *hose; - pr_debug("Adding PCI host bridge %s\n", dev->full_name); + pr_debug("Adding PCI host bridge %pOF\n", dev); hose = pcibios_alloc_controller(dev); if (!hose) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 1e02328c3f2d..9e3f39d36e88 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -2658,25 +2658,25 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ if (i >= MAX_MACIO_CHIPS) { printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); - printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); + printk(KERN_ERR "pmac_feature: %pOF skipped\n", node); return; } addrp = of_get_pci_address(node, 0, &size, NULL); if (addrp == NULL) { - printk(KERN_ERR "pmac_feature: %s: can't find base !\n", - node->full_name); + printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n", + node); return; } addr = of_translate_address(node, addrp); if (addr == 0) { - printk(KERN_ERR "pmac_feature: %s, can't translate base !\n", - node->full_name); + printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n", + node); return; } base = ioremap(addr, (unsigned long)size); if (!base) { - printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n", - node->full_name); + printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n", + node); return; } if (type == macio_keylargo || type == macio_keylargo2) { diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index f627c9fd7b48..70183eb3d5c8 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -494,8 +494,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); if (host == NULL) { - printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", - np->full_name); + printk(KERN_ERR "low_i2c: Can't allocate host for %pOF\n", + np); return NULL; } @@ -505,8 +505,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) */ addrp = of_get_property(np, "AAPL,address", NULL); if (addrp == NULL) { - printk(KERN_ERR "low_i2c: Can't find address for %s\n", - np->full_name); + printk(KERN_ERR "low_i2c: Can't find address for %pOF\n", + np); kfree(host); return NULL; } @@ -538,13 +538,13 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) host->irq = irq_of_parse_and_map(np, 0); if (!host->irq) printk(KERN_WARNING - "low_i2c: Failed to map interrupt for %s\n", - np->full_name); + "low_i2c: Failed to map interrupt for %pOF\n", + np); host->base = ioremap((*addrp), 0x1000); if (host->base == NULL) { - printk(KERN_ERR "low_i2c: Can't map registers for %s\n", - np->full_name); + printk(KERN_ERR "low_i2c: Can't map registers for %pOF\n", + np); kfree(host); return NULL; } @@ -560,8 +560,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) "keywest i2c", host)) host->irq = 0; - printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n", - *addrp, host->irq, np->full_name); + printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %pOF\n", + *addrp, host->irq, np); return host; } @@ -798,7 +798,7 @@ static void __init pmu_i2c_probe(void) if (busnode == NULL) return; - printk(KERN_INFO "PMU i2c %s\n", busnode->full_name); + printk(KERN_INFO "PMU i2c %pOF\n", busnode); /* * We add bus 1 and 2 only for now, bus 0 is "special" @@ -913,7 +913,7 @@ static void __init smu_i2c_probe(void) if (controller == NULL) return; - printk(KERN_INFO "SMU i2c %s\n", controller->full_name); + printk(KERN_INFO "SMU i2c %pOF\n", controller); /* Look for childs, note that they might not be of the right * type as older device trees mix i2c busses and other things @@ -945,8 +945,8 @@ static void __init smu_i2c_probe(void) bus->flags = 0; list_add(&bus->link, &pmac_i2c_busses); - printk(KERN_INFO " channel %x bus %s\n", - bus->channel, busnode->full_name); + printk(KERN_INFO " channel %x bus %pOF\n", + bus->channel, busnode); } } @@ -1129,7 +1129,7 @@ int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode) */ if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) { printk(KERN_ERR "low_i2c: Invalid mode %d requested on" - " bus %s !\n", mode, bus->busnode->full_name); + " bus %pOF !\n", mode, bus->busnode); return -EINVAL; } bus->mode = mode; @@ -1146,8 +1146,8 @@ int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, WARN_ON(!bus->opened); DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x," - " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize, - subaddr, len, bus->busnode->full_name); + " %d bytes, bus %pOF\n", bus->channel, addrdir, bus->mode, subsize, + subaddr, len, bus->busnode); rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len); @@ -1241,13 +1241,13 @@ static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args) bus = pmac_i2c_find_bus(func->node); if (bus == NULL) { - printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n", - func->node->full_name); + printk(KERN_ERR "low_i2c: Can't find bus for %pOF (pfunc)\n", + func->node); return NULL; } if (pmac_i2c_open(bus, 0)) { - printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n", - func->node->full_name); + printk(KERN_ERR "low_i2c: Can't open i2c bus for %pOF (pfunc)\n", + func->node); return NULL; } @@ -1417,7 +1417,7 @@ static struct pmf_handlers pmac_i2c_pfunc_handlers = { static void __init pmac_i2c_dev_create(struct device_node *np, int quirks) { - DBG("dev_create(%s)\n", np->full_name); + DBG("dev_create(%pOF)\n", np); pmf_register_driver(np, &pmac_i2c_pfunc_handlers, (void *)(long)quirks); @@ -1425,20 +1425,20 @@ static void __init pmac_i2c_dev_create(struct device_node *np, int quirks) static void __init pmac_i2c_dev_init(struct device_node *np, int quirks) { - DBG("dev_create(%s)\n", np->full_name); + DBG("dev_create(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL); } static void pmac_i2c_dev_suspend(struct device_node *np, int quirks) { - DBG("dev_suspend(%s)\n", np->full_name); + DBG("dev_suspend(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); } static void pmac_i2c_dev_resume(struct device_node *np, int quirks) { - DBG("dev_resume(%s)\n", np->full_name); + DBG("dev_resume(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); } diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 6e06c3be2e9a..0b8174a79993 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -783,7 +783,7 @@ static int __init pmac_add_bridge(struct device_node *dev) const int *bus_range; int primary = 1, has_address = 0; - DBG("Adding PCI host bridge %s\n", dev->full_name); + DBG("Adding PCI host bridge %pOF\n", dev); /* Fetch host bridge registers address */ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); @@ -791,8 +791,8 @@ static int __init pmac_add_bridge(struct device_node *dev) /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); } hose = pcibios_alloc_controller(dev); diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 459138ed4571..860159d46ab8 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -54,8 +54,8 @@ static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask) raw_spin_lock_irqsave(&feature_lock, flags); tmp = readb(addr); tmp = (tmp & ~mask) | (value & mask); - DBG("Do write 0x%02x to GPIO %s (%p)\n", - tmp, func->node->full_name, addr); + DBG("Do write 0x%02x to GPIO %pOF (%p)\n", + tmp, func->node, addr); writeb(tmp, addr); raw_spin_unlock_irqrestore(&feature_lock, flags); @@ -107,8 +107,8 @@ static void macio_gpio_init_one(struct macio_chip *macio) if (gparent == NULL) return; - DBG("Installing GPIO functions for macio %s\n", - macio->of_node->full_name); + DBG("Installing GPIO functions for macio %pOF\n", + macio->of_node); /* * Ok, got one, we dont need anything special to track them down, so @@ -129,8 +129,8 @@ static void macio_gpio_init_one(struct macio_chip *macio) pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset); } - DBG("Calling initial GPIO functions for macio %s\n", - macio->of_node->full_name); + DBG("Calling initial GPIO functions for macio %pOF\n", + macio->of_node); /* And now we run all the init ones */ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) @@ -267,8 +267,8 @@ static struct pmf_handlers macio_mmio_handlers = { static void macio_mmio_init_one(struct macio_chip *macio) { - DBG("Installing MMIO functions for macio %s\n", - macio->of_node->full_name); + DBG("Installing MMIO functions for macio %pOF\n", + macio->of_node); pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio); } @@ -298,8 +298,8 @@ static void uninorth_install_pfunc(void) { struct device_node *np; - DBG("Installing functions for UniN %s\n", - uninorth_node->full_name); + DBG("Installing functions for UniN %pOF\n", + uninorth_node); /* * Install handlers for the bridge itself @@ -317,8 +317,8 @@ static void uninorth_install_pfunc(void) break; } if (unin_hwclock) { - DBG("Installing functions for UniN clock %s\n", - unin_hwclock->full_name); + DBG("Installing functions for UniN clock %pOF\n", + unin_hwclock); pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL); pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT, NULL); diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index 695e8c4d4224..df3c93bef228 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -708,7 +708,7 @@ int pmf_register_driver(struct device_node *np, if (handlers == NULL) return -EINVAL; - DBG("pmf: registering driver for node %s\n", np->full_name); + DBG("pmf: registering driver for node %pOF\n", np); spin_lock_irqsave(&pmf_lock, flags); dev = pmf_find_device(np); @@ -781,7 +781,7 @@ void pmf_unregister_driver(struct device_node *np) struct pmf_device *dev; unsigned long flags; - DBG("pmf: unregistering driver for node %s\n", np->full_name); + DBG("pmf: unregistering driver for node %pOF\n", np); spin_lock_irqsave(&pmf_lock, flags); dev = pmf_find_device(np); @@ -940,7 +940,7 @@ int pmf_call_one(struct pmf_function *func, struct pmf_args *args) void *instdata = NULL; int rc = 0; - DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name); + DBG(" ** pmf_call_one(%pOF/%s) **\n", dev->node, func->name); if (dev->handlers->begin) instdata = dev->handlers->begin(func, args); diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index f5f9ad7c3398..5e0719b27294 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -364,8 +364,8 @@ static void __init pmac_pic_probe_oldstyle(void) (addr + 0x10); of_node_put(master); - printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n", - master->full_name, max_real_irqs); + printk(KERN_INFO "irq: Found primary Apple PIC %pOF for %d irqs\n", + master, max_real_irqs); /* Map interrupts of cascaded controller */ if (slave && !of_address_to_resource(slave, 0, &r)) { @@ -378,8 +378,8 @@ static void __init pmac_pic_probe_oldstyle(void) (addr + 0x10); pmac_irq_cascade = irq_of_parse_and_map(slave, 0); - printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" - " cascade: %d\n", slave->full_name, + printk(KERN_INFO "irq: Found slave Apple PIC %pOF for %d irqs" + " cascade: %d\n", slave, max_irqs - max_real_irqs, pmac_irq_cascade); } of_node_put(slave); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 6b4e9d181126..ab668cb72263 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -556,7 +556,7 @@ static int __init check_pmac_serial_console(void) pr_debug(" can't find stdout package %s !\n", name); return -ENODEV; } - pr_debug("stdout is %s\n", prom_stdout->full_name); + pr_debug("stdout is %pOF\n", prom_stdout); name = of_get_property(prom_stdout, "name", NULL); if (!name) { diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 6a6f4ef46b9e..340cbe263b33 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -30,3 +30,25 @@ config OPAL_PRD help This enables the opal-prd driver, a facility to run processor recovery diagnostics on OpenPower machines + +config PPC_MEMTRACE + bool "Enable removal of RAM from kernel mappings for tracing" + depends on PPC_POWERNV && MEMORY_HOTREMOVE + default n + help + Enabling this option allows for the removal of memory (RAM) + from the kernel mappings to be used for hardware tracing. + +config PPC_VAS + bool "IBM Virtual Accelerator Switchboard (VAS)" + depends on PPC_POWERNV && PPC_64K_PAGES + default y + help + This enables support for IBM Virtual Accelerator Switchboard (VAS). + + VAS allows accelerators in co-processors like NX-GZIP and NX-842 + to be accessible to kernel subsystems and user processes. + + VAS adapters are found in POWER9 based systems. + + If unsure, say N. diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index b5d98cb3f482..37d60f7dd86d 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -2,7 +2,7 @@ obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o -obj-y += opal-kmsg.o +obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o @@ -12,3 +12,6 @@ obj-$(CONFIG_PPC_SCOM) += opal-xscom.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o +obj-$(CONFIG_PERF_EVENTS) += opal-imc.o +obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o +obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h new file mode 100644 index 000000000000..c9a503623431 --- /dev/null +++ b/arch/powerpc/platforms/powernv/copy-paste.h @@ -0,0 +1,46 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include + +#define CR0_SHIFT 28 +#define CR0_MASK 0xF +/* + * Copy/paste instructions: + * + * copy RA,RB + * Copy contents of address (RA) + effective_address(RB) + * to internal copy-buffer. + * + * paste RA,RB + * Paste contents of internal copy-buffer to the address + * (RA) + effective_address(RB) + */ +static inline int vas_copy(void *crb, int offset) +{ + asm volatile(PPC_COPY(%0, %1)";" + : + : "b" (offset), "b" (crb) + : "memory"); + + return 0; +} + +static inline int vas_paste(void *paste_address, int offset) +{ + u32 cr; + + cr = 0; + asm volatile(PPC_PASTE(%1, %2)";" + "mfocrf %0, 0x80;" + : "=r" (cr) + : "b" (offset), "b" (paste_address) + : "memory", "cr0"); + + return (cr >> CR0_SHIFT) & CR0_MASK; +} diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 3f48f6df1cf3..8864065eba22 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -113,7 +113,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, size_t count, loff_t *ppos) { struct pci_controller *hose = filp->private_data; - struct eeh_dev *edev; struct eeh_pe *pe; int pe_no, type, func; unsigned long addr, mask; @@ -135,13 +134,7 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, return -EINVAL; /* Retrieve PE */ - edev = kzalloc(sizeof(*edev), GFP_KERNEL); - if (!edev) - return -ENOMEM; - edev->phb = hose; - edev->pe_config_addr = pe_no; - pe = eeh_pe_get(edev); - kfree(edev); + pe = eeh_pe_get(hose, pe_no, 0); if (!pe) return -ENODEV; @@ -359,6 +352,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) struct eeh_dev *edev = pdn_to_eeh_dev(pdn); uint32_t pcie_flags; int ret; + int config_addr = (pdn->busno << 8) | (pdn->devfn); /* * When probing the root bridge, which doesn't have any @@ -393,8 +387,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) } } - edev->config_addr = (pdn->busno << 8) | (pdn->devfn); - edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr]; + edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; /* Create PE */ ret = eeh_add_to_parent_pe(edev); @@ -933,7 +926,6 @@ void pnv_pci_reset_secondary_bus(struct pci_dev *dev) static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, int pos, u16 mask) { - struct eeh_dev *edev = pdn_to_eeh_dev(pdn); int i, status = 0; /* Wait for Transaction Pending bit to be cleared */ @@ -947,7 +939,7 @@ static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", __func__, type, - edev->phb->global_number, pdn->busno, + pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); } @@ -1381,7 +1373,6 @@ static int pnv_eeh_get_pe(struct pci_controller *hose, struct pnv_phb *phb = hose->private_data; struct pnv_ioda_pe *pnv_pe; struct eeh_pe *dev_pe; - struct eeh_dev edev; /* * If PHB supports compound PE, to fetch @@ -1397,10 +1388,7 @@ static int pnv_eeh_get_pe(struct pci_controller *hose, } /* Find the PE according to PE# */ - memset(&edev, 0, sizeof(struct eeh_dev)); - edev.phb = hose; - edev.pe_config_addr = pe_no; - dev_pe = eeh_pe_get(&edev); + dev_pe = eeh_pe_get(hose, pe_no, 0); if (!dev_pe) return -EEXIST; @@ -1711,6 +1699,7 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn) struct eeh_dev *edev = pdn_to_eeh_dev(pdn); struct pnv_phb *phb; s64 ret; + int config_addr = (pdn->busno << 8) | (pdn->devfn); if (!edev) return -EEXIST; @@ -1725,14 +1714,14 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn) if (edev->physfn) { ret = pnv_eeh_restore_vf_config(pdn); } else { - phb = edev->phb->private_data; + phb = pdn->phb->private_data; ret = opal_pci_reinit(phb->opal_id, - OPAL_REINIT_PCI_DEV, edev->config_addr); + OPAL_REINIT_PCI_DEV, config_addr); } if (ret) { pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", - __func__, edev->config_addr, ret); + __func__, config_addr, ret); return -EIO; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index a553aeea7af6..443d5ca71995 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -69,7 +69,7 @@ static int pnv_save_sprs_for_deep_states(void) * all cpus at boot. Get these reg values of current cpu and use the * same across all cpus. */ - uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; + uint64_t lpcr_val = mfspr(SPRN_LPCR); uint64_t hid0_val = mfspr(SPRN_HID0); uint64_t hid1_val = mfspr(SPRN_HID1); uint64_t hid4_val = mfspr(SPRN_HID4); @@ -388,6 +388,20 @@ void power9_idle(void) } #ifdef CONFIG_HOTPLUG_CPU +static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) +{ + u64 pir = get_hard_smp_processor_id(cpu); + + mtspr(SPRN_LPCR, lpcr_val); + + /* + * Program the LPCR via stop-api only if the deepest stop state + * can lose hypervisor context. + */ + if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) + opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); +} + /* * pnv_cpu_offline: A function that puts the CPU into the deepest * available platform idle state on a CPU-Offline. @@ -397,6 +411,20 @@ unsigned long pnv_cpu_offline(unsigned int cpu) { unsigned long srr1; u32 idle_states = pnv_get_supported_cpuidle_states(); + u64 lpcr_val; + + /* + * We don't want to take decrementer interrupts while we are + * offline, so clear LPCR:PECE1. We keep PECE2 (and + * LPCR_PECE_HVEE on P9) enabled as to let IPIs in. + * + * If the CPU gets woken up by a special wakeup, ensure that + * the SLW engine sets LPCR with decrementer bit cleared, else + * the CPU will come back to the kernel due to a spurious + * wakeup. + */ + lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); __ppc64_runlatch_off(); @@ -428,6 +456,16 @@ unsigned long pnv_cpu_offline(unsigned int cpu) __ppc64_runlatch_on(); + /* + * Re-enable decrementer interrupts in LPCR. + * + * Further, we want stop states to be woken up by decrementer + * for non-hotplug cases. So program the LPCR via stop api as + * well. + */ + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); + return srr1; } #endif diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c new file mode 100644 index 000000000000..de470caf0784 --- /dev/null +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -0,0 +1,282 @@ +/* + * Copyright (C) IBM Corporation, 2014, 2017 + * Anton Blanchard, Rashmica Gupta. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define pr_fmt(fmt) "memtrace: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* This enables us to keep track of the memory removed from each node. */ +struct memtrace_entry { + void *mem; + u64 start; + u64 size; + u32 nid; + struct dentry *dir; + char name[16]; +}; + +static u64 memtrace_size; + +static struct memtrace_entry *memtrace_array; +static unsigned int memtrace_array_nr; + + +static ssize_t memtrace_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct memtrace_entry *ent = filp->private_data; + + return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size); +} + +static bool valid_memtrace_range(struct memtrace_entry *dev, + unsigned long start, unsigned long size) +{ + if ((start >= dev->start) && + ((start + size) <= (dev->start + dev->size))) + return true; + + return false; +} + +static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long size = vma->vm_end - vma->vm_start; + struct memtrace_entry *dev = filp->private_data; + + if (!valid_memtrace_range(dev, vma->vm_pgoff << PAGE_SHIFT, size)) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff + (dev->start >> PAGE_SHIFT), + size, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static const struct file_operations memtrace_fops = { + .llseek = default_llseek, + .read = memtrace_read, + .mmap = memtrace_mmap, + .open = simple_open, +}; + +static void flush_memory_region(u64 base, u64 size) +{ + unsigned long line_size = ppc64_caches.l1d.size; + u64 end = base + size; + u64 addr; + + base = round_down(base, line_size); + end = round_up(end, line_size); + + for (addr = base; addr < end; addr += line_size) + asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); +} + +static int check_memblock_online(struct memory_block *mem, void *arg) +{ + if (mem->state != MEM_ONLINE) + return -1; + + return 0; +} + +static int change_memblock_state(struct memory_block *mem, void *arg) +{ + unsigned long state = (unsigned long)arg; + + mem->state = state; + + return 0; +} + +static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) +{ + u64 end_pfn = start_pfn + nr_pages - 1; + + if (walk_memory_range(start_pfn, end_pfn, NULL, + check_memblock_online)) + return false; + + walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE, + change_memblock_state); + + if (offline_pages(start_pfn, nr_pages)) { + walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE, + change_memblock_state); + return false; + } + + walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, + change_memblock_state); + + /* RCU grace period? */ + flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), + nr_pages << PAGE_SHIFT); + + lock_device_hotplug(); + remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); + unlock_device_hotplug(); + + return true; +} + +static u64 memtrace_alloc_node(u32 nid, u64 size) +{ + u64 start_pfn, end_pfn, nr_pages; + u64 base_pfn; + + if (!NODE_DATA(nid) || !node_spanned_pages(nid)) + return 0; + + start_pfn = node_start_pfn(nid); + end_pfn = node_end_pfn(nid); + nr_pages = size >> PAGE_SHIFT; + + /* Trace memory needs to be aligned to the size */ + end_pfn = round_down(end_pfn - nr_pages, nr_pages); + + for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) + return base_pfn << PAGE_SHIFT; + } + + return 0; +} + +static int memtrace_init_regions_runtime(u64 size) +{ + u32 nid; + u64 m; + + memtrace_array = kcalloc(num_online_nodes(), + sizeof(struct memtrace_entry), GFP_KERNEL); + if (!memtrace_array) { + pr_err("Failed to allocate memtrace_array\n"); + return -EINVAL; + } + + for_each_online_node(nid) { + m = memtrace_alloc_node(nid, size); + + /* + * A node might not have any local memory, so warn but + * continue on. + */ + if (!m) { + pr_err("Failed to allocate trace memory on node %d\n", nid); + continue; + } + + pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m); + + memtrace_array[memtrace_array_nr].start = m; + memtrace_array[memtrace_array_nr].size = size; + memtrace_array[memtrace_array_nr].nid = nid; + memtrace_array_nr++; + } + + return 0; +} + +static struct dentry *memtrace_debugfs_dir; + +static int memtrace_init_debugfs(void) +{ + int ret = 0; + int i; + + for (i = 0; i < memtrace_array_nr; i++) { + struct dentry *dir; + struct memtrace_entry *ent = &memtrace_array[i]; + + ent->mem = ioremap(ent->start, ent->size); + /* Warn but continue on */ + if (!ent->mem) { + pr_err("Failed to map trace memory at 0x%llx\n", + ent->start); + ret = -1; + continue; + } + + snprintf(ent->name, 16, "%08x", ent->nid); + dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir); + if (!dir) + return -1; + + ent->dir = dir; + debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops); + debugfs_create_x64("start", 0400, dir, &ent->start); + debugfs_create_x64("size", 0400, dir, &ent->size); + } + + return ret; +} + +static int memtrace_enable_set(void *data, u64 val) +{ + if (memtrace_size) + return -EINVAL; + + if (!val) + return -EINVAL; + + /* Make sure size is aligned to a memory block */ + if (val & (memory_block_size_bytes() - 1)) + return -EINVAL; + + if (memtrace_init_regions_runtime(val)) + return -EINVAL; + + if (memtrace_init_debugfs()) + return -EINVAL; + + memtrace_size = val; + + return 0; +} + +static int memtrace_enable_get(void *data, u64 *val) +{ + *val = memtrace_size; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get, + memtrace_enable_set, "0x%016llx\n"); + +static int memtrace_init(void) +{ + memtrace_debugfs_dir = debugfs_create_dir("memtrace", + powerpc_debugfs_root); + if (!memtrace_debugfs_dir) + return -1; + + debugfs_create_file("enable", 0600, memtrace_debugfs_dir, + NULL, &memtrace_init_fops); + + return 0; +} +machine_device_initcall(powernv, memtrace_init); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4c7b8591f737..2cb6cbea4b3b 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -545,6 +545,12 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; + /* + * Unfortunately the nest mmu does not support flushing specific + * addresses so we have to flush the whole mm. + */ + flush_tlb_mm(npu_context->mm); + /* * Loop over all the NPUs this process is active on and launch * an invalidate. @@ -576,12 +582,6 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, } } - /* - * Unfortunately the nest mmu does not support flushing specific - * addresses so we have to flush the whole mm. - */ - flush_tlb_mm(npu_context->mm); - mmio_invalidate_wait(mmio_atsd_reg, flush); if (flush) /* Wait for the flush to complete */ diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index 83bebeec0fea..cf33769a7b72 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -171,8 +171,8 @@ int __init opal_async_comp_init(void) async = of_get_property(opal_node, "opal-msg-async-num", NULL); if (!async) { - pr_err("%s: %s has no opal-msg-async-num\n", - __func__, opal_node->full_name); + pr_err("%s: %pOF has no opal-msg-async-num\n", + __func__, opal_node); err = -ENOENT; goto out_opal_node; } diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index 4ec6219287fc..2fa3ac80cb4e 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -520,7 +520,7 @@ static ssize_t image_data_write(struct file *filp, struct kobject *kobj, * update_flash : Flash new firmware image * */ -static struct bin_attribute image_data_attr = { +static const struct bin_attribute image_data_attr = { .attr = {.name = "image", .mode = 0200}, .size = MAX_IMAGE_SIZE, /* Limit image size */ .write = image_data_write, diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 88f3c61eec95..d78fed728cdf 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -30,6 +30,8 @@ #include #include +#include "powernv.h" + static int opal_hmi_handler_nb_init; struct OpalHmiEvtNode { struct list_head list; @@ -267,8 +269,6 @@ static void hmi_event_handler(struct work_struct *work) spin_unlock_irqrestore(&opal_hmi_evt_lock, flags); if (unrecoverable) { - int ret; - /* Pull all HMI events from OPAL before we panic. */ while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) { u32 type; @@ -284,23 +284,7 @@ static void hmi_event_handler(struct work_struct *work) print_hmi_event_info(hmi_evt); } - /* - * Unrecoverable HMI exception. We need to inform BMC/OCC - * about this error so that it can collect relevant data - * for error analysis before rebooting. - */ - ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, - "Unrecoverable HMI exception"); - if (ret == OPAL_UNSUPPORTED) { - pr_emerg("Reboot type %d not supported\n", - OPAL_REBOOT_PLATFORM_ERROR); - } - - /* - * Fall through and panic if opal_cec_reboot2() returns - * OPAL_UNSUPPORTED. - */ - panic("Unrecoverable HMI exception"); + pnv_platform_error_reboot(NULL, "Unrecoverable HMI exception"); } } diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c new file mode 100644 index 000000000000..21f6531fae20 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -0,0 +1,226 @@ +/* + * OPAL IMC interface detection driver + * Supported on POWERNV platform + * + * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. + * (C) 2017 Anju T Sudhakar, IBM Corporation. + * (C) 2017 Hemant K Shaw, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * imc_get_mem_addr_nest: Function to get nest counter memory region + * for each chip + */ +static int imc_get_mem_addr_nest(struct device_node *node, + struct imc_pmu *pmu_ptr, + u32 offset) +{ + int nr_chips = 0, i; + u64 *base_addr_arr, baddr; + u32 *chipid_arr; + + nr_chips = of_property_count_u32_elems(node, "chip-id"); + if (nr_chips <= 0) + return -ENODEV; + + base_addr_arr = kcalloc(nr_chips, sizeof(u64), GFP_KERNEL); + if (!base_addr_arr) + return -ENOMEM; + + chipid_arr = kcalloc(nr_chips, sizeof(u32), GFP_KERNEL); + if (!chipid_arr) + return -ENOMEM; + + if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips)) + goto error; + + if (of_property_read_u64_array(node, "base-addr", base_addr_arr, + nr_chips)) + goto error; + + pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(struct imc_mem_info), + GFP_KERNEL); + if (!pmu_ptr->mem_info) + goto error; + + for (i = 0; i < nr_chips; i++) { + pmu_ptr->mem_info[i].id = chipid_arr[i]; + baddr = base_addr_arr[i] + offset; + pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr); + } + + pmu_ptr->imc_counter_mmaped = true; + kfree(base_addr_arr); + kfree(chipid_arr); + return 0; + +error: + kfree(pmu_ptr->mem_info); + kfree(base_addr_arr); + kfree(chipid_arr); + return -1; +} + +/* + * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index + * and domain as the inputs. + * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets + */ +static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) +{ + int ret = 0; + struct imc_pmu *pmu_ptr; + u32 offset; + + /* memory for pmu */ + pmu_ptr = kzalloc(sizeof(struct imc_pmu), GFP_KERNEL); + if (!pmu_ptr) + return -ENOMEM; + + /* Set the domain */ + pmu_ptr->domain = domain; + + ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); + if (ret) { + ret = -EINVAL; + goto free_pmu; + } + + if (!of_property_read_u32(parent, "offset", &offset)) { + if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { + ret = -EINVAL; + goto free_pmu; + } + } + + /* Function to register IMC pmu */ + ret = init_imc_pmu(parent, pmu_ptr, pmu_index); + if (ret) + pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name); + + return 0; + +free_pmu: + kfree(pmu_ptr); + return ret; +} + +static void disable_nest_pmu_counters(void) +{ + int nid, cpu; + const struct cpumask *l_cpumask; + + get_online_cpus(); + for_each_online_node(nid) { + l_cpumask = cpumask_of_node(nid); + cpu = cpumask_first(l_cpumask); + opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, + get_hard_smp_processor_id(cpu)); + } + put_online_cpus(); +} + +static void disable_core_pmu_counters(void) +{ + cpumask_t cores_map; + int cpu, rc; + + get_online_cpus(); + /* Disable the IMC Core functions */ + cores_map = cpu_online_cores_map(); + for_each_cpu(cpu, &cores_map) { + rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, + get_hard_smp_processor_id(cpu)); + if (rc) + pr_err("%s: Failed to stop Core (cpu = %d)\n", + __FUNCTION__, cpu); + } + put_online_cpus(); +} + +static int opal_imc_counters_probe(struct platform_device *pdev) +{ + struct device_node *imc_dev = pdev->dev.of_node; + int pmu_count = 0, domain; + u32 type; + + /* + * Check whether this is kdump kernel. If yes, force the engines to + * stop and return. + */ + if (is_kdump_kernel()) { + disable_nest_pmu_counters(); + disable_core_pmu_counters(); + return -ENODEV; + } + + for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { + if (of_property_read_u32(imc_dev, "type", &type)) { + pr_warn("IMC Device without type property\n"); + continue; + } + + switch (type) { + case IMC_TYPE_CHIP: + domain = IMC_DOMAIN_NEST; + break; + case IMC_TYPE_CORE: + domain =IMC_DOMAIN_CORE; + break; + case IMC_TYPE_THREAD: + domain = IMC_DOMAIN_THREAD; + break; + default: + pr_warn("IMC Unknown Device type \n"); + domain = -1; + break; + } + + if (!imc_pmu_create(imc_dev, pmu_count, domain)) + pmu_count++; + } + + return 0; +} + +static void opal_imc_counters_shutdown(struct platform_device *pdev) +{ + /* + * Function only stops the engines which is bare minimum. + * TODO: Need to handle proper memory cleanup and pmu + * unregister. + */ + disable_nest_pmu_counters(); + disable_core_pmu_counters(); +} + +static const struct of_device_id opal_imc_match[] = { + { .compatible = IMC_DTB_COMPAT }, + {}, +}; + +static struct platform_driver opal_imc_driver = { + .driver = { + .name = "opal-imc-counters", + .of_match_table = opal_imc_match, + }, + .probe = opal_imc_counters_probe, + .shutdown = opal_imc_counters_shutdown, +}; + +builtin_platform_driver(opal_imc_driver); diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c new file mode 100644 index 000000000000..badb29bde93f --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-powercap.c @@ -0,0 +1,244 @@ +/* + * PowerNV OPAL Powercap interface + * + * Copyright 2017 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "opal-powercap: " fmt + +#include +#include +#include + +#include + +DEFINE_MUTEX(powercap_mutex); + +static struct kobject *powercap_kobj; + +struct powercap_attr { + u32 handle; + struct kobj_attribute attr; +}; + +static struct pcap { + struct attribute_group pg; + struct powercap_attr *pattrs; +} *pcaps; + +static ssize_t powercap_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct powercap_attr *pcap_attr = container_of(attr, + struct powercap_attr, attr); + struct opal_msg msg; + u32 pcap; + int ret, token; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_devel("Failed to get token\n"); + return token; + } + + ret = mutex_lock_interruptible(&powercap_mutex); + if (ret) + goto out_token; + + ret = opal_get_powercap(pcap_attr->handle, token, (u32 *)__pa(&pcap)); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_devel("Failed to wait for the async response\n"); + ret = -EIO; + goto out; + } + ret = opal_error_code(opal_get_async_rc(msg)); + if (!ret) { + ret = sprintf(buf, "%u\n", be32_to_cpu(pcap)); + if (ret < 0) + ret = -EIO; + } + break; + case OPAL_SUCCESS: + ret = sprintf(buf, "%u\n", be32_to_cpu(pcap)); + if (ret < 0) + ret = -EIO; + break; + default: + ret = opal_error_code(ret); + } + +out: + mutex_unlock(&powercap_mutex); +out_token: + opal_async_release_token(token); + return ret; +} + +static ssize_t powercap_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + struct powercap_attr *pcap_attr = container_of(attr, + struct powercap_attr, attr); + struct opal_msg msg; + u32 pcap; + int ret, token; + + ret = kstrtoint(buf, 0, &pcap); + if (ret) + return ret; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_devel("Failed to get token\n"); + return token; + } + + ret = mutex_lock_interruptible(&powercap_mutex); + if (ret) + goto out_token; + + ret = opal_set_powercap(pcap_attr->handle, token, pcap); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_devel("Failed to wait for the async response\n"); + ret = -EIO; + goto out; + } + ret = opal_error_code(opal_get_async_rc(msg)); + if (!ret) + ret = count; + break; + case OPAL_SUCCESS: + ret = count; + break; + default: + ret = opal_error_code(ret); + } + +out: + mutex_unlock(&powercap_mutex); +out_token: + opal_async_release_token(token); + return ret; +} + +static void powercap_add_attr(int handle, const char *name, + struct powercap_attr *attr) +{ + attr->handle = handle; + sysfs_attr_init(&attr->attr.attr); + attr->attr.attr.name = name; + attr->attr.attr.mode = 0444; + attr->attr.show = powercap_show; +} + +void __init opal_powercap_init(void) +{ + struct device_node *powercap, *node; + int i = 0; + + powercap = of_find_compatible_node(NULL, NULL, "ibm,opal-powercap"); + if (!powercap) { + pr_devel("Powercap node not found\n"); + return; + } + + pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps), + GFP_KERNEL); + if (!pcaps) + return; + + powercap_kobj = kobject_create_and_add("powercap", opal_kobj); + if (!powercap_kobj) { + pr_warn("Failed to create powercap kobject\n"); + goto out_pcaps; + } + + i = 0; + for_each_child_of_node(powercap, node) { + u32 cur, min, max; + int j = 0; + bool has_cur = false, has_min = false, has_max = false; + + if (!of_property_read_u32(node, "powercap-min", &min)) { + j++; + has_min = true; + } + + if (!of_property_read_u32(node, "powercap-max", &max)) { + j++; + has_max = true; + } + + if (!of_property_read_u32(node, "powercap-current", &cur)) { + j++; + has_cur = true; + } + + pcaps[i].pattrs = kcalloc(j, sizeof(struct powercap_attr), + GFP_KERNEL); + if (!pcaps[i].pattrs) + goto out_pcaps_pattrs; + + pcaps[i].pg.attrs = kcalloc(j + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!pcaps[i].pg.attrs) { + kfree(pcaps[i].pattrs); + goto out_pcaps_pattrs; + } + + j = 0; + pcaps[i].pg.name = node->name; + if (has_min) { + powercap_add_attr(min, "powercap-min", + &pcaps[i].pattrs[j]); + pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; + j++; + } + + if (has_max) { + powercap_add_attr(max, "powercap-max", + &pcaps[i].pattrs[j]); + pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; + j++; + } + + if (has_cur) { + powercap_add_attr(cur, "powercap-current", + &pcaps[i].pattrs[j]); + pcaps[i].pattrs[j].attr.attr.mode |= 0220; + pcaps[i].pattrs[j].attr.store = powercap_store; + pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; + j++; + } + + if (sysfs_create_group(powercap_kobj, &pcaps[i].pg)) { + pr_warn("Failed to create powercap attribute group %s\n", + pcaps[i].pg.name); + goto out_pcaps_pattrs; + } + i++; + } + + return; + +out_pcaps_pattrs: + while (--i >= 0) { + kfree(pcaps[i].pattrs); + kfree(pcaps[i].pg.attrs); + } + kobject_put(powercap_kobj); +out_pcaps: + kfree(pcaps); +} diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 2d6ee1c5ad85..de4dd09f4a15 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -241,15 +241,9 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf, size = be16_to_cpu(hdr.size); - msg = kmalloc(size, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - rc = copy_from_user(msg, buf, size); - if (rc) { - size = -EFAULT; - goto out_free; - } + msg = memdup_user(buf, size); + if (IS_ERR(msg)) + return PTR_ERR(msg); rc = opal_prd_msg(msg); if (rc) { @@ -257,7 +251,6 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf, size = -EIO; } -out_free: kfree(msg); return size; diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c new file mode 100644 index 000000000000..7313b7fc9071 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-psr.c @@ -0,0 +1,175 @@ +/* + * PowerNV OPAL Power-Shift-Ratio interface + * + * Copyright 2017 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "opal-psr: " fmt + +#include +#include +#include + +#include + +DEFINE_MUTEX(psr_mutex); + +static struct kobject *psr_kobj; + +struct psr_attr { + u32 handle; + struct kobj_attribute attr; +} *psr_attrs; + +static ssize_t psr_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr); + struct opal_msg msg; + int psr, ret, token; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_devel("Failed to get token\n"); + return token; + } + + ret = mutex_lock_interruptible(&psr_mutex); + if (ret) + goto out_token; + + ret = opal_get_power_shift_ratio(psr_attr->handle, token, + (u32 *)__pa(&psr)); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_devel("Failed to wait for the async response\n"); + ret = -EIO; + goto out; + } + ret = opal_error_code(opal_get_async_rc(msg)); + if (!ret) { + ret = sprintf(buf, "%u\n", be32_to_cpu(psr)); + if (ret < 0) + ret = -EIO; + } + break; + case OPAL_SUCCESS: + ret = sprintf(buf, "%u\n", be32_to_cpu(psr)); + if (ret < 0) + ret = -EIO; + break; + default: + ret = opal_error_code(ret); + } + +out: + mutex_unlock(&psr_mutex); +out_token: + opal_async_release_token(token); + return ret; +} + +static ssize_t psr_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr); + struct opal_msg msg; + int psr, ret, token; + + ret = kstrtoint(buf, 0, &psr); + if (ret) + return ret; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_devel("Failed to get token\n"); + return token; + } + + ret = mutex_lock_interruptible(&psr_mutex); + if (ret) + goto out_token; + + ret = opal_set_power_shift_ratio(psr_attr->handle, token, psr); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_devel("Failed to wait for the async response\n"); + ret = -EIO; + goto out; + } + ret = opal_error_code(opal_get_async_rc(msg)); + if (!ret) + ret = count; + break; + case OPAL_SUCCESS: + ret = count; + break; + default: + ret = opal_error_code(ret); + } + +out: + mutex_unlock(&psr_mutex); +out_token: + opal_async_release_token(token); + return ret; +} + +void __init opal_psr_init(void) +{ + struct device_node *psr, *node; + int i = 0; + + psr = of_find_compatible_node(NULL, NULL, + "ibm,opal-power-shift-ratio"); + if (!psr) { + pr_devel("Power-shift-ratio node not found\n"); + return; + } + + psr_attrs = kcalloc(of_get_child_count(psr), sizeof(struct psr_attr), + GFP_KERNEL); + if (!psr_attrs) + return; + + psr_kobj = kobject_create_and_add("psr", opal_kobj); + if (!psr_kobj) { + pr_warn("Failed to create psr kobject\n"); + goto out; + } + + for_each_child_of_node(psr, node) { + if (of_property_read_u32(node, "handle", + &psr_attrs[i].handle)) + goto out_kobj; + + sysfs_attr_init(&psr_attrs[i].attr.attr); + if (of_property_read_string(node, "label", + &psr_attrs[i].attr.attr.name)) + goto out_kobj; + psr_attrs[i].attr.attr.mode = 0664; + psr_attrs[i].attr.show = psr_show; + psr_attrs[i].attr.store = psr_store; + if (sysfs_create_file(psr_kobj, &psr_attrs[i].attr.attr)) { + pr_devel("Failed to create psr sysfs file %s\n", + psr_attrs[i].attr.attr.name); + goto out_kobj; + } + i++; + } + + return; +out_kobj: + kobject_put(psr_kobj); +out: + kfree(psr_attrs); +} diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c new file mode 100644 index 000000000000..7e5a235ebf76 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c @@ -0,0 +1,212 @@ +/* + * PowerNV OPAL Sensor-groups interface + * + * Copyright 2017 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "opal-sensor-groups: " fmt + +#include +#include +#include + +#include + +DEFINE_MUTEX(sg_mutex); + +static struct kobject *sg_kobj; + +struct sg_attr { + u32 handle; + struct kobj_attribute attr; +}; + +static struct sensor_group { + char name[20]; + struct attribute_group sg; + struct sg_attr *sgattrs; +} *sgs; + +static ssize_t sg_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct sg_attr *sattr = container_of(attr, struct sg_attr, attr); + struct opal_msg msg; + u32 data; + int ret, token; + + ret = kstrtoint(buf, 0, &data); + if (ret) + return ret; + + if (data != 1) + return -EINVAL; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_devel("Failed to get token\n"); + return token; + } + + ret = mutex_lock_interruptible(&sg_mutex); + if (ret) + goto out_token; + + ret = opal_sensor_group_clear(sattr->handle, token); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_devel("Failed to wait for the async response\n"); + ret = -EIO; + goto out; + } + ret = opal_error_code(opal_get_async_rc(msg)); + if (!ret) + ret = count; + break; + case OPAL_SUCCESS: + ret = count; + break; + default: + ret = opal_error_code(ret); + } + +out: + mutex_unlock(&sg_mutex); +out_token: + opal_async_release_token(token); + return ret; +} + +static struct sg_ops_info { + int opal_no; + const char *attr_name; + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +} ops_info[] = { + { OPAL_SENSOR_GROUP_CLEAR, "clear", sg_store }, +}; + +static void add_attr(int handle, struct sg_attr *attr, int index) +{ + attr->handle = handle; + sysfs_attr_init(&attr->attr.attr); + attr->attr.attr.name = ops_info[index].attr_name; + attr->attr.attr.mode = 0220; + attr->attr.store = ops_info[index].store; +} + +static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg, + u32 handle) +{ + int i, j; + int count = 0; + + for (i = 0; i < len; i++) + for (j = 0; j < ARRAY_SIZE(ops_info); j++) + if (be32_to_cpu(ops[i]) == ops_info[j].opal_no) { + add_attr(handle, &sg->sgattrs[count], j); + sg->sg.attrs[count] = + &sg->sgattrs[count].attr.attr; + count++; + } + + return sysfs_create_group(sg_kobj, &sg->sg); +} + +static int get_nr_attrs(const __be32 *ops, int len) +{ + int i, j; + int nr_attrs = 0; + + for (i = 0; i < len; i++) + for (j = 0; j < ARRAY_SIZE(ops_info); j++) + if (be32_to_cpu(ops[i]) == ops_info[j].opal_no) + nr_attrs++; + + return nr_attrs; +} + +void __init opal_sensor_groups_init(void) +{ + struct device_node *sg, *node; + int i = 0; + + sg = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group"); + if (!sg) { + pr_devel("Sensor groups node not found\n"); + return; + } + + sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL); + if (!sgs) + return; + + sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj); + if (!sg_kobj) { + pr_warn("Failed to create sensor group kobject\n"); + goto out_sgs; + } + + for_each_child_of_node(sg, node) { + const __be32 *ops; + u32 sgid, len, nr_attrs, chipid; + + ops = of_get_property(node, "ops", &len); + if (!ops) + continue; + + nr_attrs = get_nr_attrs(ops, len); + if (!nr_attrs) + continue; + + sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(struct sg_attr), + GFP_KERNEL); + if (!sgs[i].sgattrs) + goto out_sgs_sgattrs; + + sgs[i].sg.attrs = kcalloc(nr_attrs + 1, + sizeof(struct attribute *), + GFP_KERNEL); + + if (!sgs[i].sg.attrs) { + kfree(sgs[i].sgattrs); + goto out_sgs_sgattrs; + } + + if (of_property_read_u32(node, "sensor-group-id", &sgid)) { + pr_warn("sensor-group-id property not found\n"); + goto out_sgs_sgattrs; + } + + if (!of_property_read_u32(node, "ibm,chip-id", &chipid)) + sprintf(sgs[i].name, "%s%d", node->name, chipid); + else + sprintf(sgs[i].name, "%s", node->name); + + sgs[i].sg.name = sgs[i].name; + if (add_attr_group(ops, len, &sgs[i], sgid)) { + pr_warn("Failed to create sensor attribute group %s\n", + sgs[i].sg.name); + goto out_sgs_sgattrs; + } + i++; + } + + return; + +out_sgs_sgattrs: + while (--i >= 0) { + kfree(sgs[i].sgattrs); + kfree(sgs[i].sg.attrs); + } + kobject_put(sg_kobj); +out_sgs: + kfree(sgs); +} diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 4ca6c26a56d5..8c1ede2d3f7e 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -27,7 +27,7 @@ .globl opal_tracepoint_refcount opal_tracepoint_refcount: - .llong 0 + .8byte 0 .section ".text" @@ -310,3 +310,12 @@ OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); +OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); +OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); +OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP); +OPAL_CALL(opal_pci_set_p2p, OPAL_PCI_SET_P2P); +OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP); +OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP); +OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO); +OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO); +OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 28651fb25417..81c0a943dea9 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -36,14 +36,14 @@ static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count) const __be32 *gcid; if (!of_get_property(dev, "scom-controller", NULL)) { - pr_err("%s: device %s is not a SCOM controller\n", - __func__, dev->full_name); + pr_err("%s: device %pOF is not a SCOM controller\n", + __func__, dev); return SCOM_MAP_INVALID; } gcid = of_get_property(dev, "ibm,chip-id", NULL); if (!gcid) { - pr_err("%s: device %s has no ibm,chip-id\n", - __func__, dev->full_name); + pr_err("%s: device %pOF has no ibm,chip-id\n", + __func__, dev); return SCOM_MAP_INVALID; } m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index cad6b57ce494..65c79ecf5a4d 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -25,11 +26,17 @@ #include #include #include +#include +#include +#include +#include #include #include #include #include +#include +#include #include "powernv.h" @@ -162,12 +169,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, sizeof(struct mcheck_recoverable_range); /* - * Allocate a buffer to hold the MC recoverable ranges. We would be - * accessing them in real mode, hence it needs to be within - * RMO region. + * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), - ppc64_rma_size)); + mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64))); memset(mc_recoverable_range, 0, size); for (i = 0; i < mc_recoverable_range_len; i++) { @@ -422,24 +426,88 @@ static int opal_recover_mce(struct pt_regs *regs, /* Fatal machine check */ pr_err("Machine check interrupt is fatal\n"); recovered = 0; - } else if ((evt->severity == MCE_SEV_ERROR_SYNC) && - (user_mode(regs) && !is_global_init(current))) { + } + + if (!recovered && evt->severity == MCE_SEV_ERROR_SYNC) { /* - * For now, kill the task if we have received exception when - * in userspace. + * Try to kill processes if we get a synchronous machine check + * (e.g., one caused by execution of this instruction). This + * will devolve into a panic if we try to kill init or are in + * an interrupt etc. * * TODO: Queue up this address for hwpoisioning later. + * TODO: This is not quite right for d-side machine + * checks ->nip is not necessarily the important + * address. */ - _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); - recovered = 1; + if ((user_mode(regs))) { + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); + recovered = 1; + } else if (die_will_crash()) { + /* + * die() would kill the kernel, so better to go via + * the platform reboot code that will log the + * machine check. + */ + recovered = 0; + } else { + die("Machine check", regs, SIGBUS); + recovered = 1; + } } + return recovered; } +void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) +{ + /* + * This is mostly taken from kernel/panic.c, but tries to do + * relatively minimal work. Don't use delay functions (TB may + * be broken), don't crash dump (need to set a firmware log), + * don't run notifiers. We do want to get some information to + * Linux console. + */ + console_verbose(); + bust_spinlocks(1); + pr_emerg("Hardware platform error: %s\n", msg); + if (regs) + show_regs(regs); + smp_send_stop(); + printk_safe_flush_on_panic(); + kmsg_dump(KMSG_DUMP_PANIC); + bust_spinlocks(0); + debug_locks_off(); + console_flush_on_panic(); + + /* + * Don't bother to shut things down because this will + * xstop the system. + */ + if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg) + == OPAL_UNSUPPORTED) { + pr_emerg("Reboot type %d not supported for %s\n", + OPAL_REBOOT_PLATFORM_ERROR, msg); + } + + /* + * We reached here. There can be three possibilities: + * 1. We are running on a firmware level that do not support + * opal_cec_reboot2() + * 2. We are running on a firmware level that do not support + * OPAL_REBOOT_PLATFORM_ERROR reboot type. + * 3. We are running on FSP based system that does not need + * opal to trigger checkstop explicitly for error analysis. + * The FSP PRD component would have already got notified + * about this error through other channels. + */ + + ppc_md.restart(NULL); +} + int opal_machine_check(struct pt_regs *regs) { struct machine_check_event evt; - int ret; if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return 0; @@ -455,43 +523,7 @@ int opal_machine_check(struct pt_regs *regs) if (opal_recover_mce(regs, &evt)) return 1; - /* - * Unrecovered machine check, we are heading to panic path. - * - * We may have hit this MCE in very early stage of kernel - * initialization even before opal-prd has started running. If - * this is the case then this MCE error may go un-noticed or - * un-analyzed if we go down panic path. We need to inform - * BMC/OCC about this error so that they can collect relevant - * data for error analysis before rebooting. - * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so. - * This function may not return on BMC based system. - */ - ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, - "Unrecoverable Machine Check exception"); - if (ret == OPAL_UNSUPPORTED) { - pr_emerg("Reboot type %d not supported\n", - OPAL_REBOOT_PLATFORM_ERROR); - } - - /* - * We reached here. There can be three possibilities: - * 1. We are running on a firmware level that do not support - * opal_cec_reboot2() - * 2. We are running on a firmware level that do not support - * OPAL_REBOOT_PLATFORM_ERROR reboot type. - * 3. We are running on FSP based system that does not need opal - * to trigger checkstop explicitly for error analysis. The FSP - * PRD component would have already got notified about this - * error through other channels. - * - * If hardware marked this as an unrecoverable MCE, we are - * going to panic anyway. Even if it didn't, it's not safe to - * continue at this point, so we should explicitly panic. - */ - - panic("PowerNV Unrecovered Machine Check"); - return 0; + pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception"); } /* Early hmi handler called in real mode. */ @@ -720,6 +752,15 @@ static void opal_pdev_init(const char *compatible) of_platform_device_create(np, NULL, NULL); } +static void __init opal_imc_init_dev(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT); + if (np) + of_platform_device_create(np, NULL, NULL); +} + static int kopald(void *unused) { unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1; @@ -793,6 +834,9 @@ static int __init opal_init(void) /* Setup a heatbeat thread if requested by OPAL */ opal_init_heartbeat(); + /* Detect In-Memory Collection counters and create devices*/ + opal_imc_init_dev(); + /* Create leds platform devices */ leds = of_find_node_by_path("/ibm,opal/leds"); if (leds) { @@ -836,6 +880,15 @@ static int __init opal_init(void) /* Initialise OPAL kmsg dumper for flushing console on panic */ opal_kmsg_init(); + /* Initialise OPAL powercap interface */ + opal_powercap_init(); + + /* Initialise OPAL Power-Shifting-Ratio interface */ + opal_psr_init(); + + /* Initialise OPAL sensor groups */ + opal_sensor_groups_init(); + return 0; } machine_subsys_initcall(powernv, opal_init); @@ -952,6 +1005,7 @@ int opal_error_code(int rc) case OPAL_UNSUPPORTED: return -EIO; case OPAL_HARDWARE: return -EIO; case OPAL_INTERNAL_ERROR: return -EIO; + case OPAL_TIMEOUT: return -ETIMEDOUT; default: pr_err("%s: unexpected OPAL error %d\n", __func__, rc); return -EIO; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index b900eb1d5e17..57f9e55f4352 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -444,8 +444,8 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) r = of_get_property(dn, "ibm,opal-m64-window", NULL); if (!r) { - pr_info(" No on %s\n", - dn->full_name); + pr_info(" No on %pOF\n", + dn); return; } @@ -1408,7 +1408,6 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, int num); -static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe) { @@ -2402,7 +2401,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, return 0; } -static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) +void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) { uint16_t window_id = (pe->pe_number << 1 ) + 1; int64_t rc; @@ -3797,8 +3796,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, if (!of_device_is_available(np)) return; - pr_info("Initializing %s PHB (%s)\n", - pnv_phb_names[ioda_type], of_node_full_name(np)); + pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { @@ -3813,8 +3811,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); if (!phb->hose) { - pr_err(" Can't allocate PCI controller for %s\n", - np->full_name); + pr_err(" Can't allocate PCI controller for %pOF\n", + np); memblock_free(__pa(phb), sizeof(struct pnv_phb)); return; } @@ -3825,7 +3823,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, hose->first_busno = be32_to_cpu(prop32[0]); hose->last_busno = be32_to_cpu(prop32[1]); } else { - pr_warn(" Broken on %s\n", np->full_name); + pr_warn(" Broken on %pOF\n", np); hose->first_busno = 0; hose->last_busno = 0xff; } @@ -4046,7 +4044,7 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np) const __be64 *prop64; u64 hub_id; - pr_info("Probing IODA IO-Hub %s\n", np->full_name); + pr_info("Probing IODA IO-Hub %pOF\n", np); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 7905d179d036..5422f4a6317c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -37,6 +37,8 @@ #include "powernv.h" #include "pci.h" +static DEFINE_MUTEX(p2p_mutex); + int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { struct device_node *parent = np; @@ -1017,6 +1019,79 @@ void pnv_pci_dma_bus_setup(struct pci_bus *bus) } } +int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc) +{ + struct pci_controller *hose; + struct pnv_phb *phb_init, *phb_target; + struct pnv_ioda_pe *pe_init; + int rc; + + if (!opal_check_token(OPAL_PCI_SET_P2P)) + return -ENXIO; + + hose = pci_bus_to_host(initiator->bus); + phb_init = hose->private_data; + + hose = pci_bus_to_host(target->bus); + phb_target = hose->private_data; + + pe_init = pnv_ioda_get_pe(initiator); + if (!pe_init) + return -ENODEV; + + /* + * Configuring the initiator's PHB requires to adjust its + * TVE#1 setting. Since the same device can be an initiator + * several times for different target devices, we need to keep + * a reference count to know when we can restore the default + * bypass setting on its TVE#1 when disabling. Opal is not + * tracking PE states, so we add a reference count on the PE + * in linux. + * + * For the target, the configuration is per PHB, so we keep a + * target reference count on the PHB. + */ + mutex_lock(&p2p_mutex); + + if (desc & OPAL_PCI_P2P_ENABLE) { + /* always go to opal to validate the configuration */ + rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id, + desc, pe_init->pe_number); + + if (rc != OPAL_SUCCESS) { + rc = -EIO; + goto out; + } + + pe_init->p2p_initiator_count++; + phb_target->p2p_target_count++; + } else { + if (!pe_init->p2p_initiator_count || + !phb_target->p2p_target_count) { + rc = -EINVAL; + goto out; + } + + if (--pe_init->p2p_initiator_count == 0) + pnv_pci_ioda2_set_bypass(pe_init, true); + + if (--phb_target->p2p_target_count == 0) { + rc = opal_pci_set_p2p(phb_init->opal_id, + phb_target->opal_id, desc, + pe_init->pe_number); + if (rc != OPAL_SUCCESS) { + rc = -EIO; + goto out; + } + } + } + rc = 0; +out: + mutex_unlock(&p2p_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); + void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index f16bc403ec03..a95273c524f6 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -78,6 +78,9 @@ struct pnv_ioda_pe { struct pnv_ioda_pe *master; struct list_head slaves; + /* PCI peer-to-peer*/ + int p2p_initiator_count; + /* Link in list of PE#s */ struct list_head list; }; @@ -189,6 +192,7 @@ struct pnv_phb { #ifdef CONFIG_CXL_BASE struct cxl_afu *cxl_afu; #endif + int p2p_target_count; }; extern struct pci_ops pnv_pci_ops; @@ -229,6 +233,7 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern bool pnv_pci_enable_device_hook(struct pci_dev *dev); +extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, const char *fmt, ...); diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 6dbc0a1da1f6..a159d48573d7 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -7,6 +7,8 @@ extern void pnv_smp_init(void); static inline void pnv_smp_init(void) { } #endif +extern void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) __noreturn; + struct pci_dev; #ifdef CONFIG_PCI diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 1a9d84371a4d..718f50ed22f1 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -16,11 +16,13 @@ #include #include #include +#include #include #include #include #include +#define DARN_ERR 0xFFFFFFFFFFFFFFFFul struct powernv_rng { void __iomem *regs; @@ -67,6 +69,41 @@ int powernv_get_random_real_mode(unsigned long *v) return 1; } +int powernv_get_random_darn(unsigned long *v) +{ + unsigned long val; + + /* Using DARN with L=1 - 64-bit conditioned random number */ + asm volatile(PPC_DARN(%0, 1) : "=r"(val)); + + if (val == DARN_ERR) + return 0; + + *v = val; + + return 1; +} + +static int initialise_darn(void) +{ + unsigned long val; + int i; + + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -ENODEV; + + for (i = 0; i < 10; i++) { + if (powernv_get_random_darn(&val)) { + ppc_md.get_random_seed = powernv_get_random_darn; + return 0; + } + } + + pr_warn("Unable to use DARN for get_random_seed()\n"); + + return -EIO; +} + int powernv_get_random_long(unsigned long *v) { struct powernv_rng *rng; @@ -88,7 +125,7 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng, chip_id = of_get_ibm_chip_id(dn); if (chip_id == -1) - pr_warn("No ibm,chip-id found for %s.\n", dn->full_name); + pr_warn("No ibm,chip-id found for %pOF.\n", dn); for_each_possible_cpu(cpu) { if (per_cpu(powernv_rng, cpu) == NULL || @@ -141,8 +178,8 @@ static __init int rng_init(void) for_each_compatible_node(dn, NULL, "ibm,power-rng") { rc = rng_create(dn); if (rc) { - pr_err("Failed creating rng for %s (%d).\n", - dn->full_name, rc); + pr_err("Failed creating rng for %pOF (%d).\n", + dn, rc); continue; } @@ -150,6 +187,8 @@ static __init int rng_init(void) of_platform_device_create(dn, NULL, NULL); } + initialise_darn(); + return 0; } machine_subsys_initcall(powernv, rng_init); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 897aa1400eb8..bbb73aa0eb8f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE static unsigned long pnv_memory_block_size(void) { - return 256UL * 1024 * 1024; + /* + * We map the kernel linear region with 1GB large pages on radix. For + * memory hot unplug to work our memory block size must be at least + * this size. + */ + if (radix_enabled()) + return 1UL * 1024 * 1024 * 1024; + else + return 256UL * 1024 * 1024; } #endif diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 40dae96f7e20..c17f81e433f7 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -57,7 +57,7 @@ static void pnv_smp_setup_cpu(int cpu) static int pnv_smp_kick_cpu(int nr) { - unsigned int pcpu = get_hard_smp_processor_id(nr); + unsigned int pcpu; unsigned long start_here = __pa(ppc_function_entry(generic_secondary_smp_init)); long rc; @@ -66,6 +66,7 @@ static int pnv_smp_kick_cpu(int nr) if (nr < 0 || nr >= nr_cpu_ids) return -EINVAL; + pcpu = get_hard_smp_processor_id(nr); /* * If we already started or OPAL is not supported, we just * kick the CPU via the PACA @@ -164,12 +165,6 @@ static void pnv_smp_cpu_kill_self(void) if (cpu_has_feature(CPU_FTR_ARCH_207S)) wmask = SRR1_WAKEMASK_P8; - /* We don't want to take decrementer interrupts while we are offline, - * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) - * enabled as to let IPIs in. - */ - mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); - while (!generic_check_cpu_restart(cpu)) { /* * Clear IPI flag, since we don't handle IPIs while @@ -219,8 +214,6 @@ static void pnv_smp_cpu_kill_self(void) } - /* Re-enable decrementer interrupts */ - mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c new file mode 100644 index 000000000000..5aae845b8cd9 --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -0,0 +1,1134 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "vas: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "vas.h" +#include "copy-paste.h" + +/* + * Compute the paste address region for the window @window using the + * ->paste_base_addr and ->paste_win_id_shift we got from device tree. + */ +static void compute_paste_address(struct vas_window *window, u64 *addr, int *len) +{ + int winid; + u64 base, shift; + + base = window->vinst->paste_base_addr; + shift = window->vinst->paste_win_id_shift; + winid = window->winid; + + *addr = base + (winid << shift); + if (len) + *len = PAGE_SIZE; + + pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr); +} + +static inline void get_hvwc_mmio_bar(struct vas_window *window, + u64 *start, int *len) +{ + u64 pbaddr; + + pbaddr = window->vinst->hvwc_bar_start; + *start = pbaddr + window->winid * VAS_HVWC_SIZE; + *len = VAS_HVWC_SIZE; +} + +static inline void get_uwc_mmio_bar(struct vas_window *window, + u64 *start, int *len) +{ + u64 pbaddr; + + pbaddr = window->vinst->uwc_bar_start; + *start = pbaddr + window->winid * VAS_UWC_SIZE; + *len = VAS_UWC_SIZE; +} + +/* + * Map the paste bus address of the given send window into kernel address + * space. Unlike MMIO regions (map_mmio_region() below), paste region must + * be mapped cache-able and is only applicable to send windows. + */ +static void *map_paste_region(struct vas_window *txwin) +{ + int len; + void *map; + char *name; + u64 start; + + name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id, + txwin->winid); + if (!name) + goto free_name; + + txwin->paste_addr_name = name; + compute_paste_address(txwin, &start, &len); + + if (!request_mem_region(start, len, name)) { + pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", + __func__, start, len); + goto free_name; + } + + map = ioremap_cache(start, len); + if (!map) { + pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__, + start, len); + goto free_name; + } + + pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map); + return map; + +free_name: + kfree(name); + return ERR_PTR(-ENOMEM); +} + +static void *map_mmio_region(char *name, u64 start, int len) +{ + void *map; + + if (!request_mem_region(start, len, name)) { + pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", + __func__, start, len); + return NULL; + } + + map = ioremap(start, len); + if (!map) { + pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start, + len); + return NULL; + } + + return map; +} + +static void unmap_region(void *addr, u64 start, int len) +{ + iounmap(addr); + release_mem_region((phys_addr_t)start, len); +} + +/* + * Unmap the paste address region for a window. + */ +static void unmap_paste_region(struct vas_window *window) +{ + int len; + u64 busaddr_start; + + if (window->paste_kaddr) { + compute_paste_address(window, &busaddr_start, &len); + unmap_region(window->paste_kaddr, busaddr_start, len); + window->paste_kaddr = NULL; + kfree(window->paste_addr_name); + window->paste_addr_name = NULL; + } +} + +/* + * Unmap the MMIO regions for a window. + */ +static void unmap_winctx_mmio_bars(struct vas_window *window) +{ + int len; + u64 busaddr_start; + + if (window->hvwc_map) { + get_hvwc_mmio_bar(window, &busaddr_start, &len); + unmap_region(window->hvwc_map, busaddr_start, len); + window->hvwc_map = NULL; + } + + if (window->uwc_map) { + get_uwc_mmio_bar(window, &busaddr_start, &len); + unmap_region(window->uwc_map, busaddr_start, len); + window->uwc_map = NULL; + } +} + +/* + * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the + * OS/User Window Context (UWC) MMIO Base Address Region for the given window. + * Map these bus addresses and save the mapped kernel addresses in @window. + */ +int map_winctx_mmio_bars(struct vas_window *window) +{ + int len; + u64 start; + + get_hvwc_mmio_bar(window, &start, &len); + window->hvwc_map = map_mmio_region("HVWCM_Window", start, len); + + get_uwc_mmio_bar(window, &start, &len); + window->uwc_map = map_mmio_region("UWCM_Window", start, len); + + if (!window->hvwc_map || !window->uwc_map) { + unmap_winctx_mmio_bars(window); + return -1; + } + + return 0; +} + +/* + * Reset all valid registers in the HV and OS/User Window Contexts for + * the window identified by @window. + * + * NOTE: We cannot really use a for loop to reset window context. Not all + * offsets in a window context are valid registers and the valid + * registers are not sequential. And, we can only write to offsets + * with valid registers. + */ +void reset_window_regs(struct vas_window *window) +{ + write_hvwc_reg(window, VREG(LPID), 0ULL); + write_hvwc_reg(window, VREG(PID), 0ULL); + write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL); + write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL); + write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL); + write_hvwc_reg(window, VREG(AMR), 0ULL); + write_hvwc_reg(window, VREG(SEIDR), 0ULL); + write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL); + write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); + write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL); + write_hvwc_reg(window, VREG(PSWID), 0ULL); + write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL); + write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL); + write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL); + write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); + write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); + write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL); + write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); + write_hvwc_reg(window, VREG(TX_WCRED), 0ULL); + write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); + write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL); + write_hvwc_reg(window, VREG(WINCTL), 0ULL); + write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); + write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL); + write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL); + write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL); + write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); + + /* Skip read-only registers: NX_UTIL and NX_UTIL_SE */ + + /* + * The send and receive window credit adder registers are also + * accessible from HVWC and have been initialized above. We don't + * need to initialize from the OS/User Window Context, so skip + * following calls: + * + * write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); + * write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); + */ +} + +/* + * Initialize window context registers related to Address Translation. + * These registers are common to send/receive windows although they + * differ for user/kernel windows. As we resolve the TODOs we may + * want to add fields to vas_winctx and move the initialization to + * init_vas_winctx_regs(). + */ +static void init_xlate_regs(struct vas_window *window, bool user_win) +{ + u64 lpcr, val; + + /* + * MSR_TA, MSR_US are false for both kernel and user. + * MSR_DR and MSR_PR are false for kernel. + */ + val = 0ULL; + val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1); + val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1); + if (user_win) { + val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1); + val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1); + } + write_hvwc_reg(window, VREG(XLATE_MSR), val); + + lpcr = mfspr(SPRN_LPCR); + val = 0ULL; + /* + * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the + * Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB. + * + * NOTE: From Section 1.3.1, Address Translation Context of the + * Nest MMU Workbook, LPCR_SC should be 0 for Power9. + */ + val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5); + val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL); + val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC); + val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0); + write_hvwc_reg(window, VREG(XLATE_LPCR), val); + + /* + * Section 1.3.1 (Address translation Context) of NMMU workbook. + * 0b00 Hashed Page Table mode + * 0b01 Reserved + * 0b10 Radix on HPT + * 0b11 Radix on Radix + */ + val = 0ULL; + val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2); + write_hvwc_reg(window, VREG(XLATE_CTL), val); + + /* + * TODO: Can we mfspr(AMR) even for user windows? + */ + val = 0ULL; + val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR)); + write_hvwc_reg(window, VREG(AMR), val); + + val = 0ULL; + val = SET_FIELD(VAS_SEIDR, val, 0); + write_hvwc_reg(window, VREG(SEIDR), val); +} + +/* + * Initialize Reserved Send Buffer Count for the send window. It involves + * writing to the register, reading it back to confirm that the hardware + * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook. + * + * Since we can only make a best-effort attempt to fulfill the request, + * we don't return any errors if we cannot. + * + * TODO: Reserved (aka dedicated) send buffers are not supported yet. + */ +static void init_rsvd_tx_buf_count(struct vas_window *txwin, + struct vas_winctx *winctx) +{ + write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL); +} + +/* + * init_winctx_regs() + * Initialize window context registers for a receive window. + * Except for caching control and marking window open, the registers + * are initialized in the order listed in Section 3.1.4 (Window Context + * Cache Register Details) of the VAS workbook although they don't need + * to be. + * + * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL + * (so that it can get a large contiguous area) and passes that buffer + * to kernel via device tree. We now write that buffer address to the + * FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL + * write the per-chip RX FIFO addresses to the windows during boot-up + * as a one-time task? That could work for NX but what about other + * receivers? Let the receivers tell us the rx-fifo buffers for now. + */ +int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) +{ + u64 val; + int fifo_size; + + reset_window_regs(window); + + val = 0ULL; + val = SET_FIELD(VAS_LPID, val, winctx->lpid); + write_hvwc_reg(window, VREG(LPID), val); + + val = 0ULL; + val = SET_FIELD(VAS_PID_ID, val, winctx->pidr); + write_hvwc_reg(window, VREG(PID), val); + + init_xlate_regs(window, winctx->user_win); + + val = 0ULL; + val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0); + write_hvwc_reg(window, VREG(FAULT_TX_WIN), val); + + /* In PowerNV, interrupts go to HV. */ + write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); + + val = 0ULL; + val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port); + write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val); + + val = 0ULL; + val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid); + write_hvwc_reg(window, VREG(PSWID), val); + + write_hvwc_reg(window, VREG(SPARE1), 0ULL); + write_hvwc_reg(window, VREG(SPARE2), 0ULL); + write_hvwc_reg(window, VREG(SPARE3), 0ULL); + + /* + * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR + * register as is - do NOT shift the address into VAS_LFIFO_BAR + * bit fields! Ok to set the page migration select fields - + * VAS ignores the lower 10+ bits in the address anyway, because + * the minimum FIFO size is 1K? + * + * See also: Design note in function header. + */ + val = __pa(winctx->rx_fifo); + val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); + write_hvwc_reg(window, VREG(LFIFO_BAR), val); + + val = 0ULL; + val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp); + write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val); + + val = 0ULL; + val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type); + val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable); + write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val); + + write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); + write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); + write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); + + val = 0ULL; + val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max); + write_hvwc_reg(window, VREG(LRX_WCRED), val); + + val = 0ULL; + val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max); + write_hvwc_reg(window, VREG(TX_WCRED), val); + + write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); + write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); + + fifo_size = winctx->rx_fifo_size / 1024; + + val = 0ULL; + val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size)); + write_hvwc_reg(window, VREG(LFIFO_SIZE), val); + + /* Update window control and caching control registers last so + * we mark the window open only after fully initializing it and + * pushing context to cache. + */ + + write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); + + init_rsvd_tx_buf_count(window, winctx); + + /* for a send window, point to the matching receive window */ + val = 0ULL; + val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id); + write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val); + + write_hvwc_reg(window, VREG(SPARE4), 0ULL); + + val = 0ULL; + val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable); + val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable); + val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early); + val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg); + write_hvwc_reg(window, VREG(LNOTIFY_CTL), val); + + val = 0ULL; + val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid); + write_hvwc_reg(window, VREG(LNOTIFY_PID), val); + + val = 0ULL; + val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid); + write_hvwc_reg(window, VREG(LNOTIFY_LPID), val); + + val = 0ULL; + val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid); + write_hvwc_reg(window, VREG(LNOTIFY_TID), val); + + val = 0ULL; + val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope); + val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope); + write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val); + + /* Skip read-only registers NX_UTIL and NX_UTIL_SE */ + + write_hvwc_reg(window, VREG(SPARE5), 0ULL); + write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); + write_hvwc_reg(window, VREG(SPARE6), 0ULL); + + /* Finally, push window context to memory and... */ + val = 0ULL; + val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1); + write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val); + + /* ... mark the window open for business */ + val = 0ULL; + val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit); + val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win); + val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode); + val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode); + val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode); + val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode); + val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win); + val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win); + val = SET_FIELD(VAS_WINCTL_OPEN, val, 1); + write_hvwc_reg(window, VREG(WINCTL), val); + + return 0; +} + +static DEFINE_SPINLOCK(vas_ida_lock); + +static void vas_release_window_id(struct ida *ida, int winid) +{ + spin_lock(&vas_ida_lock); + ida_remove(ida, winid); + spin_unlock(&vas_ida_lock); +} + +static int vas_assign_window_id(struct ida *ida) +{ + int rc, winid; + + do { + rc = ida_pre_get(ida, GFP_KERNEL); + if (!rc) + return -EAGAIN; + + spin_lock(&vas_ida_lock); + rc = ida_get_new(ida, &winid); + spin_unlock(&vas_ida_lock); + } while (rc == -EAGAIN); + + if (rc) + return rc; + + if (winid > VAS_WINDOWS_PER_CHIP) { + pr_err("Too many (%d) open windows\n", winid); + vas_release_window_id(ida, winid); + return -EAGAIN; + } + + return winid; +} + +static void vas_window_free(struct vas_window *window) +{ + int winid = window->winid; + struct vas_instance *vinst = window->vinst; + + unmap_winctx_mmio_bars(window); + kfree(window); + + vas_release_window_id(&vinst->ida, winid); +} + +static struct vas_window *vas_window_alloc(struct vas_instance *vinst) +{ + int winid; + struct vas_window *window; + + winid = vas_assign_window_id(&vinst->ida); + if (winid < 0) + return ERR_PTR(winid); + + window = kzalloc(sizeof(*window), GFP_KERNEL); + if (!window) + goto out_free; + + window->vinst = vinst; + window->winid = winid; + + if (map_winctx_mmio_bars(window)) + goto out_free; + + return window; + +out_free: + kfree(window); + vas_release_window_id(&vinst->ida, winid); + return ERR_PTR(-ENOMEM); +} + +static void put_rx_win(struct vas_window *rxwin) +{ + /* Better not be a send window! */ + WARN_ON_ONCE(rxwin->tx_win); + + atomic_dec(&rxwin->num_txwins); +} + +/* + * Get the VAS receive window associated with NX engine identified + * by @cop and if applicable, @pswid. + * + * See also function header of set_vinst_win(). + */ +static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst, + enum vas_cop_type cop, u32 pswid) +{ + struct vas_window *rxwin; + + mutex_lock(&vinst->mutex); + + if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) + rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL); + else + rxwin = ERR_PTR(-EINVAL); + + if (!IS_ERR(rxwin)) + atomic_inc(&rxwin->num_txwins); + + mutex_unlock(&vinst->mutex); + + return rxwin; +} + +/* + * We have two tables of windows in a VAS instance. The first one, + * ->windows[], contains all the windows in the instance and allows + * looking up a window by its id. It is used to look up send windows + * during fault handling and receive windows when pairing user space + * send/receive windows. + * + * The second table, ->rxwin[], contains receive windows that are + * associated with NX engines. This table has VAS_COP_TYPE_MAX + * entries and is used to look up a receive window by its + * coprocessor type. + * + * Here, we save @window in the ->windows[] table. If it is a receive + * window, we also save the window in the ->rxwin[] table. + */ +static void set_vinst_win(struct vas_instance *vinst, + struct vas_window *window) +{ + int id = window->winid; + + mutex_lock(&vinst->mutex); + + /* + * There should only be one receive window for a coprocessor type + * unless its a user (FTW) window. + */ + if (!window->user_win && !window->tx_win) { + WARN_ON_ONCE(vinst->rxwin[window->cop]); + vinst->rxwin[window->cop] = window; + } + + WARN_ON_ONCE(vinst->windows[id] != NULL); + vinst->windows[id] = window; + + mutex_unlock(&vinst->mutex); +} + +/* + * Clear this window from the table(s) of windows for this VAS instance. + * See also function header of set_vinst_win(). + */ +static void clear_vinst_win(struct vas_window *window) +{ + int id = window->winid; + struct vas_instance *vinst = window->vinst; + + mutex_lock(&vinst->mutex); + + if (!window->user_win && !window->tx_win) { + WARN_ON_ONCE(!vinst->rxwin[window->cop]); + vinst->rxwin[window->cop] = NULL; + } + + WARN_ON_ONCE(vinst->windows[id] != window); + vinst->windows[id] = NULL; + + mutex_unlock(&vinst->mutex); +} + +static void init_winctx_for_rxwin(struct vas_window *rxwin, + struct vas_rx_win_attr *rxattr, + struct vas_winctx *winctx) +{ + /* + * We first zero (memset()) all fields and only set non-zero fields. + * Following fields are 0/false but maybe deserve a comment: + * + * ->notify_os_intr_reg In powerNV, send intrs to HV + * ->notify_disable False for NX windows + * ->intr_disable False for Fault Windows + * ->xtra_write False for NX windows + * ->notify_early NA for NX windows + * ->rsvd_txbuf_count NA for Rx windows + * ->lpid, ->pid, ->tid NA for Rx windows + */ + + memset(winctx, 0, sizeof(struct vas_winctx)); + + winctx->rx_fifo = rxattr->rx_fifo; + winctx->rx_fifo_size = rxattr->rx_fifo_size; + winctx->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT; + winctx->pin_win = rxattr->pin_win; + + winctx->nx_win = rxattr->nx_win; + winctx->fault_win = rxattr->fault_win; + winctx->rx_word_mode = rxattr->rx_win_ord_mode; + winctx->tx_word_mode = rxattr->tx_win_ord_mode; + winctx->rx_wcred_mode = rxattr->rx_wcred_mode; + winctx->tx_wcred_mode = rxattr->tx_wcred_mode; + + if (winctx->nx_win) { + winctx->data_stamp = true; + winctx->intr_disable = true; + winctx->pin_win = true; + + WARN_ON_ONCE(winctx->fault_win); + WARN_ON_ONCE(!winctx->rx_word_mode); + WARN_ON_ONCE(!winctx->tx_word_mode); + WARN_ON_ONCE(winctx->notify_after_count); + } else if (winctx->fault_win) { + winctx->notify_disable = true; + } else if (winctx->user_win) { + /* + * Section 1.8.1 Low Latency Core-Core Wake up of + * the VAS workbook: + * + * - disable credit checks ([tr]x_wcred_mode = false) + * - disable FIFO writes + * - enable ASB_Notify, disable interrupt + */ + winctx->fifo_disable = true; + winctx->intr_disable = true; + winctx->rx_fifo = NULL; + } + + winctx->lnotify_lpid = rxattr->lnotify_lpid; + winctx->lnotify_pid = rxattr->lnotify_pid; + winctx->lnotify_tid = rxattr->lnotify_tid; + winctx->pswid = rxattr->pswid; + winctx->dma_type = VAS_DMA_TYPE_INJECT; + winctx->tc_mode = rxattr->tc_mode; + + winctx->min_scope = VAS_SCOPE_LOCAL; + winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; +} + +static bool rx_win_args_valid(enum vas_cop_type cop, + struct vas_rx_win_attr *attr) +{ + dump_rx_win_attr(attr); + + if (cop >= VAS_COP_TYPE_MAX) + return false; + + if (cop != VAS_COP_TYPE_FTW && + attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN) + return false; + + if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX) + return false; + + if (attr->nx_win) { + /* cannot be fault or user window if it is nx */ + if (attr->fault_win || attr->user_win) + return false; + /* + * Section 3.1.4.32: NX Windows must not disable notification, + * and must not enable interrupts or early notification. + */ + if (attr->notify_disable || !attr->intr_disable || + attr->notify_early) + return false; + } else if (attr->fault_win) { + /* cannot be both fault and user window */ + if (attr->user_win) + return false; + + /* + * Section 3.1.4.32: Fault windows must disable notification + * but not interrupts. + */ + if (!attr->notify_disable || attr->intr_disable) + return false; + + } else if (attr->user_win) { + /* + * User receive windows are only for fast-thread-wakeup + * (FTW). They don't need a FIFO and must disable interrupts + */ + if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable) + return false; + } else { + /* Rx window must be one of NX or Fault or User window. */ + return false; + } + + return true; +} + +void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop) +{ + memset(rxattr, 0, sizeof(*rxattr)); + + if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { + rxattr->pin_win = true; + rxattr->nx_win = true; + rxattr->fault_win = false; + rxattr->intr_disable = true; + rxattr->rx_wcred_mode = true; + rxattr->tx_wcred_mode = true; + rxattr->rx_win_ord_mode = true; + rxattr->tx_win_ord_mode = true; + } else if (cop == VAS_COP_TYPE_FAULT) { + rxattr->pin_win = true; + rxattr->fault_win = true; + rxattr->notify_disable = true; + rxattr->rx_wcred_mode = true; + rxattr->tx_wcred_mode = true; + rxattr->rx_win_ord_mode = true; + rxattr->tx_win_ord_mode = true; + } else if (cop == VAS_COP_TYPE_FTW) { + rxattr->user_win = true; + rxattr->intr_disable = true; + + /* + * As noted in the VAS Workbook we disable credit checks. + * If we enable credit checks in the future, we must also + * implement a mechanism to return the user credits or new + * paste operations will fail. + */ + } +} +EXPORT_SYMBOL_GPL(vas_init_rx_win_attr); + +struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, + struct vas_rx_win_attr *rxattr) +{ + struct vas_window *rxwin; + struct vas_winctx winctx; + struct vas_instance *vinst; + + if (!rx_win_args_valid(cop, rxattr)) + return ERR_PTR(-EINVAL); + + vinst = find_vas_instance(vasid); + if (!vinst) { + pr_devel("vasid %d not found!\n", vasid); + return ERR_PTR(-EINVAL); + } + pr_devel("Found instance %d\n", vasid); + + rxwin = vas_window_alloc(vinst); + if (IS_ERR(rxwin)) { + pr_devel("Unable to allocate memory for Rx window\n"); + return rxwin; + } + + rxwin->tx_win = false; + rxwin->nx_win = rxattr->nx_win; + rxwin->user_win = rxattr->user_win; + rxwin->cop = cop; + if (rxattr->user_win) + rxwin->pid = task_pid_vnr(current); + + init_winctx_for_rxwin(rxwin, rxattr, &winctx); + init_winctx_regs(rxwin, &winctx); + + set_vinst_win(vinst, rxwin); + + return rxwin; +} +EXPORT_SYMBOL_GPL(vas_rx_win_open); + +void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop) +{ + memset(txattr, 0, sizeof(*txattr)); + + if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { + txattr->rej_no_credit = false; + txattr->rx_wcred_mode = true; + txattr->tx_wcred_mode = true; + txattr->rx_win_ord_mode = true; + txattr->tx_win_ord_mode = true; + } else if (cop == VAS_COP_TYPE_FTW) { + txattr->user_win = true; + } +} +EXPORT_SYMBOL_GPL(vas_init_tx_win_attr); + +static void init_winctx_for_txwin(struct vas_window *txwin, + struct vas_tx_win_attr *txattr, + struct vas_winctx *winctx) +{ + /* + * We first zero all fields and only set non-zero ones. Following + * are some fields set to 0/false for the stated reason: + * + * ->notify_os_intr_reg In powernv, send intrs to HV + * ->rsvd_txbuf_count Not supported yet. + * ->notify_disable False for NX windows + * ->xtra_write False for NX windows + * ->notify_early NA for NX windows + * ->lnotify_lpid NA for Tx windows + * ->lnotify_pid NA for Tx windows + * ->lnotify_tid NA for Tx windows + * ->tx_win_cred_mode Ignore for now for NX windows + * ->rx_win_cred_mode Ignore for now for NX windows + */ + memset(winctx, 0, sizeof(struct vas_winctx)); + + winctx->wcreds_max = txattr->wcreds_max ?: VAS_WCREDS_DEFAULT; + + winctx->user_win = txattr->user_win; + winctx->nx_win = txwin->rxwin->nx_win; + winctx->pin_win = txattr->pin_win; + + winctx->rx_wcred_mode = txattr->rx_wcred_mode; + winctx->tx_wcred_mode = txattr->tx_wcred_mode; + winctx->rx_word_mode = txattr->rx_win_ord_mode; + winctx->tx_word_mode = txattr->tx_win_ord_mode; + + if (winctx->nx_win) { + winctx->data_stamp = true; + winctx->intr_disable = true; + } + + winctx->lpid = txattr->lpid; + winctx->pidr = txattr->pidr; + winctx->rx_win_id = txwin->rxwin->winid; + + winctx->dma_type = VAS_DMA_TYPE_INJECT; + winctx->tc_mode = txattr->tc_mode; + winctx->min_scope = VAS_SCOPE_LOCAL; + winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; + + winctx->pswid = 0; +} + +static bool tx_win_args_valid(enum vas_cop_type cop, + struct vas_tx_win_attr *attr) +{ + if (attr->tc_mode != VAS_THRESH_DISABLED) + return false; + + if (cop > VAS_COP_TYPE_MAX) + return false; + + if (attr->user_win && + (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count)) + return false; + + return true; +} + +struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, + struct vas_tx_win_attr *attr) +{ + int rc; + struct vas_window *txwin; + struct vas_window *rxwin; + struct vas_winctx winctx; + struct vas_instance *vinst; + + if (!tx_win_args_valid(cop, attr)) + return ERR_PTR(-EINVAL); + + vinst = find_vas_instance(vasid); + if (!vinst) { + pr_devel("vasid %d not found!\n", vasid); + return ERR_PTR(-EINVAL); + } + + rxwin = get_vinst_rxwin(vinst, cop, attr->pswid); + if (IS_ERR(rxwin)) { + pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop); + return rxwin; + } + + txwin = vas_window_alloc(vinst); + if (IS_ERR(txwin)) { + rc = PTR_ERR(txwin); + goto put_rxwin; + } + + txwin->tx_win = 1; + txwin->rxwin = rxwin; + txwin->nx_win = txwin->rxwin->nx_win; + txwin->pid = attr->pid; + txwin->user_win = attr->user_win; + + init_winctx_for_txwin(txwin, attr, &winctx); + + init_winctx_regs(txwin, &winctx); + + /* + * If its a kernel send window, map the window address into the + * kernel's address space. For user windows, user must issue an + * mmap() to map the window into their address space. + * + * NOTE: If kernel ever resubmits a user CRB after handling a page + * fault, we will need to map this into kernel as well. + */ + if (!txwin->user_win) { + txwin->paste_kaddr = map_paste_region(txwin); + if (IS_ERR(txwin->paste_kaddr)) { + rc = PTR_ERR(txwin->paste_kaddr); + goto free_window; + } + } + + set_vinst_win(vinst, txwin); + + return txwin; + +free_window: + vas_window_free(txwin); + +put_rxwin: + put_rx_win(rxwin); + return ERR_PTR(rc); + +} +EXPORT_SYMBOL_GPL(vas_tx_win_open); + +int vas_copy_crb(void *crb, int offset) +{ + return vas_copy(crb, offset); +} +EXPORT_SYMBOL_GPL(vas_copy_crb); + +#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53) +int vas_paste_crb(struct vas_window *txwin, int offset, bool re) +{ + int rc; + void *addr; + uint64_t val; + + /* + * Only NX windows are supported for now and hardware assumes + * report-enable flag is set for NX windows. Ensure software + * complies too. + */ + WARN_ON_ONCE(txwin->nx_win && !re); + + addr = txwin->paste_kaddr; + if (re) { + /* + * Set the REPORT_ENABLE bit (equivalent to writing + * to 1K offset of the paste address) + */ + val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1); + addr += val; + } + + /* + * Map the raw CR value from vas_paste() to an error code (there + * is just pass or fail for now though). + */ + rc = vas_paste(addr, offset); + if (rc == 2) + rc = 0; + else + rc = -EINVAL; + + print_fifo_msg_count(txwin); + + return rc; +} +EXPORT_SYMBOL_GPL(vas_paste_crb); + +static void poll_window_busy_state(struct vas_window *window) +{ + int busy; + u64 val; + +retry: + /* + * Poll Window Busy flag + */ + val = read_hvwc_reg(window, VREG(WIN_STATUS)); + busy = GET_FIELD(VAS_WIN_BUSY, val); + if (busy) { + val = 0; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + goto retry; + } +} + +static void poll_window_castout(struct vas_window *window) +{ + int cached; + u64 val; + + /* Cast window context out of the cache */ +retry: + val = read_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL)); + cached = GET_FIELD(VAS_WIN_CACHE_STATUS, val); + if (cached) { + val = 0ULL; + val = SET_FIELD(VAS_CASTOUT_REQ, val, 1); + val = SET_FIELD(VAS_PUSH_TO_MEM, val, 0); + write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val); + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + goto retry; + } +} + +/* + * Close a window. + * + * See Section 1.12.1 of VAS workbook v1.05 for details on closing window: + * - Disable new paste operations (unmap paste address) + * - Poll for the "Window Busy" bit to be cleared + * - Clear the Open/Enable bit for the Window. + * - Poll for return of window Credits (implies FIFO empty for Rx win?) + * - Unpin and cast window context out of cache + * + * Besides the hardware, kernel has some bookkeeping of course. + */ +int vas_win_close(struct vas_window *window) +{ + u64 val; + + if (!window) + return 0; + + if (!window->tx_win && atomic_read(&window->num_txwins) != 0) { + pr_devel("Attempting to close an active Rx window!\n"); + WARN_ON_ONCE(1); + return -EBUSY; + } + + unmap_paste_region(window); + + clear_vinst_win(window); + + poll_window_busy_state(window); + + /* Unpin window from cache and close it */ + val = read_hvwc_reg(window, VREG(WINCTL)); + val = SET_FIELD(VAS_WINCTL_PIN, val, 0); + val = SET_FIELD(VAS_WINCTL_OPEN, val, 0); + write_hvwc_reg(window, VREG(WINCTL), val); + + poll_window_castout(window); + + /* if send window, drop reference to matching receive window */ + if (window->tx_win) + put_rx_win(window->rxwin); + + vas_window_free(window); + + return 0; +} +EXPORT_SYMBOL_GPL(vas_win_close); diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c new file mode 100644 index 000000000000..565a4878fefa --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas.c @@ -0,0 +1,151 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "vas: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vas.h" + +static DEFINE_MUTEX(vas_mutex); +static LIST_HEAD(vas_instances); + +static int init_vas_instance(struct platform_device *pdev) +{ + int rc, vasid; + struct resource *res; + struct vas_instance *vinst; + struct device_node *dn = pdev->dev.of_node; + + rc = of_property_read_u32(dn, "ibm,vas-id", &vasid); + if (rc) { + pr_err("No ibm,vas-id property for %s?\n", pdev->name); + return -ENODEV; + } + + if (pdev->num_resources != 4) { + pr_err("Unexpected DT configuration for [%s, %d]\n", + pdev->name, vasid); + return -ENODEV; + } + + vinst = kzalloc(sizeof(*vinst), GFP_KERNEL); + if (!vinst) + return -ENOMEM; + + INIT_LIST_HEAD(&vinst->node); + ida_init(&vinst->ida); + mutex_init(&vinst->mutex); + vinst->vas_id = vasid; + vinst->pdev = pdev; + + res = &pdev->resource[0]; + vinst->hvwc_bar_start = res->start; + + res = &pdev->resource[1]; + vinst->uwc_bar_start = res->start; + + res = &pdev->resource[2]; + vinst->paste_base_addr = res->start; + + res = &pdev->resource[3]; + if (res->end > 62) { + pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end); + goto free_vinst; + } + + vinst->paste_win_id_shift = 63 - res->end; + + pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, " + "paste_win_id_shift 0x%llx\n", pdev->name, vasid, + vinst->paste_base_addr, vinst->paste_win_id_shift); + + mutex_lock(&vas_mutex); + list_add(&vinst->node, &vas_instances); + mutex_unlock(&vas_mutex); + + dev_set_drvdata(&pdev->dev, vinst); + + return 0; + +free_vinst: + kfree(vinst); + return -ENODEV; + +} + +/* + * Although this is read/used multiple times, it is written to only + * during initialization. + */ +struct vas_instance *find_vas_instance(int vasid) +{ + struct list_head *ent; + struct vas_instance *vinst; + + mutex_lock(&vas_mutex); + list_for_each(ent, &vas_instances) { + vinst = list_entry(ent, struct vas_instance, node); + if (vinst->vas_id == vasid) { + mutex_unlock(&vas_mutex); + return vinst; + } + } + mutex_unlock(&vas_mutex); + + pr_devel("Instance %d not found\n", vasid); + return NULL; +} + +static int vas_probe(struct platform_device *pdev) +{ + return init_vas_instance(pdev); +} + +static const struct of_device_id powernv_vas_match[] = { + { .compatible = "ibm,vas",}, + {}, +}; + +static struct platform_driver vas_driver = { + .driver = { + .name = "vas", + .of_match_table = powernv_vas_match, + }, + .probe = vas_probe, +}; + +static int __init vas_init(void) +{ + int found = 0; + struct device_node *dn; + + platform_driver_register(&vas_driver); + + for_each_compatible_node(dn, NULL, "ibm,vas") { + of_platform_device_create(dn, NULL, NULL); + found++; + } + + if (!found) + return -ENODEV; + + pr_devel("Found %d instances\n", found); + + return 0; +} +device_initcall(vas_init); diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h new file mode 100644 index 000000000000..38dee5d50f31 --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas.h @@ -0,0 +1,467 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _VAS_H +#define _VAS_H +#include +#include +#include +#include + +/* + * Overview of Virtual Accelerator Switchboard (VAS). + * + * VAS is a hardware "switchboard" that allows senders and receivers to + * exchange messages with _minimal_ kernel involvment. The receivers are + * typically NX coprocessor engines that perform compression or encryption + * in hardware, but receivers can also be other software threads. + * + * Senders are user/kernel threads that submit compression/encryption or + * other requests to the receivers. Senders must format their messages as + * Coprocessor Request Blocks (CRB)s and submit them using the "copy" and + * "paste" instructions which were introduced in Power9. + * + * A Power node can have (upto?) 8 Power chips. There is one instance of + * VAS in each Power9 chip. Each instance of VAS has 64K windows or ports, + * Senders and receivers must each connect to a separate window before they + * can exchange messages through the switchboard. + * + * Each window is described by two types of window contexts: + * + * Hypervisor Window Context (HVWC) of size VAS_HVWC_SIZE bytes + * + * OS/User Window Context (UWC) of size VAS_UWC_SIZE bytes. + * + * A window context can be viewed as a set of 64-bit registers. The settings + * in these registers configure/control/determine the behavior of the VAS + * hardware when messages are sent/received through the window. The registers + * in the HVWC are configured by the kernel while the registers in the UWC can + * be configured by the kernel or by the user space application that is using + * the window. + * + * The HVWCs for all windows on a specific instance of VAS are in a contiguous + * range of hardware addresses or Base address region (BAR) referred to as the + * HVWC BAR for the instance. Similarly the UWCs for all windows on an instance + * are referred to as the UWC BAR for the instance. + * + * The two BARs for each instance are defined Power9 MMIO Ranges spreadsheet + * and available to the kernel in the VAS node's "reg" property in the device + * tree: + * + * /proc/device-tree/vasm@.../reg + * + * (see vas_probe() for details on the reg property). + * + * The kernel maps the HVWC and UWC BAR regions into the kernel address + * space (hvwc_map and uwc_map). The kernel can then access the window + * contexts of a specific window using: + * + * hvwc = hvwc_map + winid * VAS_HVWC_SIZE. + * uwc = uwc_map + winid * VAS_UWC_SIZE. + * + * where winid is the window index (0..64K). + * + * As mentioned, a window context is used to "configure" a window. Besides + * this configuration address, each _send_ window also has a unique hardware + * "paste" address that is used to submit requests/CRBs (see vas_paste_crb()). + * + * The hardware paste address for a window is computed using the "paste + * base address" and "paste win id shift" reg properties in the VAS device + * tree node using: + * + * paste_addr = paste_base + ((winid << paste_win_id_shift)) + * + * (again, see vas_probe() for ->paste_base_addr and ->paste_win_id_shift). + * + * The kernel maps this hardware address into the sender's address space + * after which they can use the 'paste' instruction (new in Power9) to + * send a message (submit a request aka CRB) to the coprocessor. + * + * NOTE: In the initial version, senders can only in-kernel drivers/threads. + * Support for user space threads will be added in follow-on patches. + * + * TODO: Do we need to map the UWC into user address space so they can return + * credits? Its NA for NX but may be needed for other receive windows. + * + */ + +#define VAS_WINDOWS_PER_CHIP (64 << 10) + +/* + * Hypervisor and OS/USer Window Context sizes + */ +#define VAS_HVWC_SIZE 512 +#define VAS_UWC_SIZE PAGE_SIZE + +/* + * Initial per-process credits. + * Max send window credits: 4K-1 (12-bits in VAS_TX_WCRED) + * Max receive window credits: 64K-1 (16 bits in VAS_LRX_WCRED) + * + * TODO: Needs tuning for per-process credits + */ +#define VAS_WCREDS_MIN 16 +#define VAS_WCREDS_MAX ((64 << 10) - 1) +#define VAS_WCREDS_DEFAULT (1 << 10) + +/* + * VAS Window Context Register Offsets and bitmasks. + * See Section 3.1.4 of VAS Work book + */ +#define VAS_LPID_OFFSET 0x010 +#define VAS_LPID PPC_BITMASK(0, 11) + +#define VAS_PID_OFFSET 0x018 +#define VAS_PID_ID PPC_BITMASK(0, 19) + +#define VAS_XLATE_MSR_OFFSET 0x020 +#define VAS_XLATE_MSR_DR PPC_BIT(0) +#define VAS_XLATE_MSR_TA PPC_BIT(1) +#define VAS_XLATE_MSR_PR PPC_BIT(2) +#define VAS_XLATE_MSR_US PPC_BIT(3) +#define VAS_XLATE_MSR_HV PPC_BIT(4) +#define VAS_XLATE_MSR_SF PPC_BIT(5) + +#define VAS_XLATE_LPCR_OFFSET 0x028 +#define VAS_XLATE_LPCR_PAGE_SIZE PPC_BITMASK(0, 2) +#define VAS_XLATE_LPCR_ISL PPC_BIT(3) +#define VAS_XLATE_LPCR_TC PPC_BIT(4) +#define VAS_XLATE_LPCR_SC PPC_BIT(5) + +#define VAS_XLATE_CTL_OFFSET 0x030 +#define VAS_XLATE_MODE PPC_BITMASK(0, 1) + +#define VAS_AMR_OFFSET 0x040 +#define VAS_AMR PPC_BITMASK(0, 63) + +#define VAS_SEIDR_OFFSET 0x048 +#define VAS_SEIDR PPC_BITMASK(0, 63) + +#define VAS_FAULT_TX_WIN_OFFSET 0x050 +#define VAS_FAULT_TX_WIN PPC_BITMASK(48, 63) + +#define VAS_OSU_INTR_SRC_RA_OFFSET 0x060 +#define VAS_OSU_INTR_SRC_RA PPC_BITMASK(8, 63) + +#define VAS_HV_INTR_SRC_RA_OFFSET 0x070 +#define VAS_HV_INTR_SRC_RA PPC_BITMASK(8, 63) + +#define VAS_PSWID_OFFSET 0x078 +#define VAS_PSWID_EA_HANDLE PPC_BITMASK(0, 31) + +#define VAS_SPARE1_OFFSET 0x080 +#define VAS_SPARE2_OFFSET 0x088 +#define VAS_SPARE3_OFFSET 0x090 +#define VAS_SPARE4_OFFSET 0x130 +#define VAS_SPARE5_OFFSET 0x160 +#define VAS_SPARE6_OFFSET 0x188 + +#define VAS_LFIFO_BAR_OFFSET 0x0A0 +#define VAS_LFIFO_BAR PPC_BITMASK(8, 53) +#define VAS_PAGE_MIGRATION_SELECT PPC_BITMASK(54, 56) + +#define VAS_LDATA_STAMP_CTL_OFFSET 0x0A8 +#define VAS_LDATA_STAMP PPC_BITMASK(0, 1) +#define VAS_XTRA_WRITE PPC_BIT(2) + +#define VAS_LDMA_CACHE_CTL_OFFSET 0x0B0 +#define VAS_LDMA_TYPE PPC_BITMASK(0, 1) +#define VAS_LDMA_FIFO_DISABLE PPC_BIT(2) + +#define VAS_LRFIFO_PUSH_OFFSET 0x0B8 +#define VAS_LRFIFO_PUSH PPC_BITMASK(0, 15) + +#define VAS_CURR_MSG_COUNT_OFFSET 0x0C0 +#define VAS_CURR_MSG_COUNT PPC_BITMASK(0, 7) + +#define VAS_LNOTIFY_AFTER_COUNT_OFFSET 0x0C8 +#define VAS_LNOTIFY_AFTER_COUNT PPC_BITMASK(0, 7) + +#define VAS_LRX_WCRED_OFFSET 0x0E0 +#define VAS_LRX_WCRED PPC_BITMASK(0, 15) + +#define VAS_LRX_WCRED_ADDER_OFFSET 0x190 +#define VAS_LRX_WCRED_ADDER PPC_BITMASK(0, 15) + +#define VAS_TX_WCRED_OFFSET 0x0F0 +#define VAS_TX_WCRED PPC_BITMASK(4, 15) + +#define VAS_TX_WCRED_ADDER_OFFSET 0x1A0 +#define VAS_TX_WCRED_ADDER PPC_BITMASK(4, 15) + +#define VAS_LFIFO_SIZE_OFFSET 0x100 +#define VAS_LFIFO_SIZE PPC_BITMASK(0, 3) + +#define VAS_WINCTL_OFFSET 0x108 +#define VAS_WINCTL_OPEN PPC_BIT(0) +#define VAS_WINCTL_REJ_NO_CREDIT PPC_BIT(1) +#define VAS_WINCTL_PIN PPC_BIT(2) +#define VAS_WINCTL_TX_WCRED_MODE PPC_BIT(3) +#define VAS_WINCTL_RX_WCRED_MODE PPC_BIT(4) +#define VAS_WINCTL_TX_WORD_MODE PPC_BIT(5) +#define VAS_WINCTL_RX_WORD_MODE PPC_BIT(6) +#define VAS_WINCTL_RSVD_TXBUF PPC_BIT(7) +#define VAS_WINCTL_THRESH_CTL PPC_BITMASK(8, 9) +#define VAS_WINCTL_FAULT_WIN PPC_BIT(10) +#define VAS_WINCTL_NX_WIN PPC_BIT(11) + +#define VAS_WIN_STATUS_OFFSET 0x110 +#define VAS_WIN_BUSY PPC_BIT(1) + +#define VAS_WIN_CTX_CACHING_CTL_OFFSET 0x118 +#define VAS_CASTOUT_REQ PPC_BIT(0) +#define VAS_PUSH_TO_MEM PPC_BIT(1) +#define VAS_WIN_CACHE_STATUS PPC_BIT(4) + +#define VAS_TX_RSVD_BUF_COUNT_OFFSET 0x120 +#define VAS_RXVD_BUF_COUNT PPC_BITMASK(58, 63) + +#define VAS_LRFIFO_WIN_PTR_OFFSET 0x128 +#define VAS_LRX_WIN_ID PPC_BITMASK(0, 15) + +/* + * Local Notification Control Register controls what happens in _response_ + * to a paste command and hence applies only to receive windows. + */ +#define VAS_LNOTIFY_CTL_OFFSET 0x138 +#define VAS_NOTIFY_DISABLE PPC_BIT(0) +#define VAS_INTR_DISABLE PPC_BIT(1) +#define VAS_NOTIFY_EARLY PPC_BIT(2) +#define VAS_NOTIFY_OSU_INTR PPC_BIT(3) + +#define VAS_LNOTIFY_PID_OFFSET 0x140 +#define VAS_LNOTIFY_PID PPC_BITMASK(0, 19) + +#define VAS_LNOTIFY_LPID_OFFSET 0x148 +#define VAS_LNOTIFY_LPID PPC_BITMASK(0, 11) + +#define VAS_LNOTIFY_TID_OFFSET 0x150 +#define VAS_LNOTIFY_TID PPC_BITMASK(0, 15) + +#define VAS_LNOTIFY_SCOPE_OFFSET 0x158 +#define VAS_LNOTIFY_MIN_SCOPE PPC_BITMASK(0, 1) +#define VAS_LNOTIFY_MAX_SCOPE PPC_BITMASK(2, 3) + +#define VAS_NX_UTIL_OFFSET 0x1B0 +#define VAS_NX_UTIL PPC_BITMASK(0, 63) + +/* SE: Side effects */ +#define VAS_NX_UTIL_SE_OFFSET 0x1B8 +#define VAS_NX_UTIL_SE PPC_BITMASK(0, 63) + +#define VAS_NX_UTIL_ADDER_OFFSET 0x180 +#define VAS_NX_UTIL_ADDER PPC_BITMASK(32, 63) + +/* + * Local Notify Scope Control Register. (Receive windows only). + */ +enum vas_notify_scope { + VAS_SCOPE_LOCAL, + VAS_SCOPE_GROUP, + VAS_SCOPE_VECTORED_GROUP, + VAS_SCOPE_UNUSED, +}; + +/* + * Local DMA Cache Control Register (Receive windows only). + */ +enum vas_dma_type { + VAS_DMA_TYPE_INJECT, + VAS_DMA_TYPE_WRITE, +}; + +/* + * Local Notify Scope Control Register. (Receive windows only). + * Not applicable to NX receive windows. + */ +enum vas_notify_after_count { + VAS_NOTIFY_AFTER_256 = 0, + VAS_NOTIFY_NONE, + VAS_NOTIFY_AFTER_2 +}; + +/* + * One per instance of VAS. Each instance will have a separate set of + * receive windows, one per coprocessor type. + * + * See also function header of set_vinst_win() for details on ->windows[] + * and ->rxwin[] tables. + */ +struct vas_instance { + int vas_id; + struct ida ida; + struct list_head node; + struct platform_device *pdev; + + u64 hvwc_bar_start; + u64 uwc_bar_start; + u64 paste_base_addr; + u64 paste_win_id_shift; + + struct mutex mutex; + struct vas_window *rxwin[VAS_COP_TYPE_MAX]; + struct vas_window *windows[VAS_WINDOWS_PER_CHIP]; +}; + +/* + * In-kernel state a VAS window. One per window. + */ +struct vas_window { + /* Fields common to send and receive windows */ + struct vas_instance *vinst; + int winid; + bool tx_win; /* True if send window */ + bool nx_win; /* True if NX window */ + bool user_win; /* True if user space window */ + void *hvwc_map; /* HV window context */ + void *uwc_map; /* OS/User window context */ + pid_t pid; /* Linux process id of owner */ + + /* Fields applicable only to send windows */ + void *paste_kaddr; + char *paste_addr_name; + struct vas_window *rxwin; + + /* Feilds applicable only to receive windows */ + enum vas_cop_type cop; + atomic_t num_txwins; +}; + +/* + * Container for the hardware state of a window. One per-window. + * + * A VAS Window context is a 512-byte area in the hardware that contains + * a set of 64-bit registers. Individual bit-fields in these registers + * determine the configuration/operation of the hardware. struct vas_winctx + * is a container for the register fields in the window context. + */ +struct vas_winctx { + void *rx_fifo; + int rx_fifo_size; + int wcreds_max; + int rsvd_txbuf_count; + + bool user_win; + bool nx_win; + bool fault_win; + bool rsvd_txbuf_enable; + bool pin_win; + bool rej_no_credit; + bool tx_wcred_mode; + bool rx_wcred_mode; + bool tx_word_mode; + bool rx_word_mode; + bool data_stamp; + bool xtra_write; + bool notify_disable; + bool intr_disable; + bool fifo_disable; + bool notify_early; + bool notify_os_intr_reg; + + int lpid; + int pidr; /* value from SPRN_PID, not linux pid */ + int lnotify_lpid; + int lnotify_pid; + int lnotify_tid; + u32 pswid; + int rx_win_id; + int fault_win_id; + int tc_mode; + + u64 irq_port; + + enum vas_dma_type dma_type; + enum vas_notify_scope min_scope; + enum vas_notify_scope max_scope; + enum vas_notify_after_count notify_after_count; +}; + +extern struct vas_instance *find_vas_instance(int vasid); + +/* + * VREG(x): + * Expand a register's short name (eg: LPID) into two parameters: + * - the register's short name in string form ("LPID"), and + * - the name of the macro (eg: VAS_LPID_OFFSET), defining the + * register's offset in the window context + */ +#define VREG_SFX(n, s) __stringify(n), VAS_##n##s +#define VREG(r) VREG_SFX(r, _OFFSET) + +#ifdef vas_debug +static inline void dump_rx_win_attr(struct vas_rx_win_attr *attr) +{ + pr_err("fault %d, notify %d, intr %d early %d\n", + attr->fault_win, attr->notify_disable, + attr->intr_disable, attr->notify_early); + + pr_err("rx_fifo_size %d, max value %d\n", + attr->rx_fifo_size, VAS_RX_FIFO_SIZE_MAX); +} + +static inline void vas_log_write(struct vas_window *win, char *name, + void *regptr, u64 val) +{ + if (val) + pr_err("%swin #%d: %s reg %p, val 0x%016llx\n", + win->tx_win ? "Tx" : "Rx", win->winid, name, + regptr, val); +} + +#else /* vas_debug */ + +#define vas_log_write(win, name, reg, val) +#define dump_rx_win_attr(attr) + +#endif /* vas_debug */ + +static inline void write_uwc_reg(struct vas_window *win, char *name, + s32 reg, u64 val) +{ + void *regptr; + + regptr = win->uwc_map + reg; + vas_log_write(win, name, regptr, val); + + out_be64(regptr, val); +} + +static inline void write_hvwc_reg(struct vas_window *win, char *name, + s32 reg, u64 val) +{ + void *regptr; + + regptr = win->hvwc_map + reg; + vas_log_write(win, name, regptr, val); + + out_be64(regptr, val); +} + +static inline u64 read_hvwc_reg(struct vas_window *win, + char *name __maybe_unused, s32 reg) +{ + return in_be64(win->hvwc_map+reg); +} + +#ifdef vas_debug + +static void print_fifo_msg_count(struct vas_window *txwin) +{ + uint64_t read_hvwc_reg(struct vas_window *w, char *n, uint64_t o); + pr_devel("Winid %d, Msg count %llu\n", txwin->winid, + (uint64_t)read_hvwc_reg(txwin, VREG(LRFIFO_PUSH))); +} +#else /* vas_debug */ + +#define print_fifo_msg_count(window) + +#endif /* vas_debug */ + +#endif /* _VAS_H */ diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 814a7eaa7769..50dbaf24b1ee 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -170,14 +170,8 @@ int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str, int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id) { - int result; - - result = read_node(PS3_LPAR_ID_PME, - make_first_field("bus", bus_index), - make_field("id", 0), - 0, 0, - bus_id, NULL); - return result; + return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index), + make_field("id", 0), 0, 0, bus_id, NULL); } int ps3_repository_read_bus_type(unsigned int bus_index, @@ -224,15 +218,9 @@ int ps3_repository_read_dev_str(unsigned int bus_index, int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index, u64 *dev_id) { - int result; - - result = read_node(PS3_LPAR_ID_PME, - make_first_field("bus", bus_index), - make_field("dev", dev_index), - make_field("id", 0), - 0, - dev_id, NULL); - return result; + return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index), + make_field("dev", dev_index), make_field("id", 0), 0, + dev_id, NULL); } int ps3_repository_read_dev_type(unsigned int bus_index, diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 6244bc849469..9dabea6e1443 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -104,20 +104,6 @@ static void __noreturn ps3_halt(void) ps3_sys_manager_halt(); /* never returns */ } -static void ps3_panic(char *str) -{ - DBG("%s:%d %s\n", __func__, __LINE__, str); - - smp_send_stop(); - printk("\n"); - printk(" System does not reboot automatically.\n"); - printk(" Please press POWER button.\n"); - printk("\n"); - - while(1) - lv1_pause(1); -} - #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) static void __init prealloc(struct ps3_prealloc *p) @@ -269,7 +255,6 @@ define_machine(ps3) { .probe = ps3_probe, .setup_arch = ps3_setup_arch, .init_IRQ = ps3_init_IRQ, - .panic = ps3_panic, .get_boot_time = ps3_get_boot_time, .set_dabr = ps3_set_dabr, .calibrate_decr = ps3_calibrate_decr, diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 3a6dfd14f64b..71dd69d9ec64 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -7,6 +7,7 @@ config PPC_PSERIES select PCI select PCI_MSI select PPC_XICS + select PPC_XIVE_SPAPR select PPC_ICP_NATIVE select PPC_ICP_HV select PPC_ICS_RTAS diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 39187696ee74..e45b5f10645a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -254,22 +254,18 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, return first_dn; } -int dlpar_attach_node(struct device_node *dn) +int dlpar_attach_node(struct device_node *dn, struct device_node *parent) { int rc; - dn->parent = pseries_of_derive_parent(dn->full_name); - if (IS_ERR(dn->parent)) - return PTR_ERR(dn->parent); + dn->parent = parent; rc = of_attach_node(dn); if (rc) { - printk(KERN_ERR "Failed to add device node %s\n", - dn->full_name); + printk(KERN_ERR "Failed to add device node %pOF\n", dn); return rc; } - of_node_put(dn->parent); return 0; } diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 1eef46d9cf30..6b812ad990e4 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -247,14 +247,13 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) /* Initialize the fake PE */ memset(&pe, 0, sizeof(struct eeh_pe)); - pe.phb = edev->phb; + pe.phb = pdn->phb; pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); /* Enable EEH on the device */ ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); if (!ret) { /* Retrieve PE address */ - edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8); edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); pe.addr = edev->pe_config_addr; @@ -279,7 +278,6 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) /* This device doesn't support EEH, but it may have an * EEH parent, in which case we mark it as supported. */ - edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr; edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; eeh_add_to_parent_pe(edev); } diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c index 32187dc76730..6eeb0d4bab61 100644 --- a/arch/powerpc/platforms/pseries/event_sources.c +++ b/arch/powerpc/platforms/pseries/event_sources.c @@ -36,8 +36,8 @@ void request_event_sources_irqs(struct device_node *np, virqs[count] = irq_create_of_mapping(&oirq); if (!virqs[count]) { pr_err("event-sources: Unable to allocate " - "interrupt number for %s\n", - np->full_name); + "interrupt number for %pOF\n", + np); WARN_ON(1); } else { count++; @@ -48,7 +48,7 @@ void request_event_sources_irqs(struct device_node *np, for (i = 0; i < count; i++) { if (request_irq(virqs[i], handler, 0, name, NULL)) { pr_err("event-sources: Unable to request interrupt " - "%d for %s\n", virqs[i], np->full_name); + "%d for %pOF\n", virqs[i], np); WARN_ON(1); return; } diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 6afd1efd3633..fadb95efbb9e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include "pseries.h" @@ -109,7 +110,10 @@ static void pseries_mach_cpu_die(void) local_irq_disable(); idle_task_exit(); - xics_teardown_cpu(); + if (xive_enabled()) + xive_teardown_cpu(); + else + xics_teardown_cpu(); if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { set_cpu_current_state(cpu, CPU_STATE_INACTIVE); @@ -174,7 +178,10 @@ static int pseries_cpu_disable(void) boot_cpuid = cpumask_any(cpu_online_mask); /* FIXME: abstract this to not be platform specific later on */ - xics_migrate_irqs_away(); + if (xive_enabled()) + xive_smp_disable_cpu(); + else + xics_migrate_irqs_away(); return 0; } @@ -264,8 +271,8 @@ static int pseries_add_processor(struct device_node *np) /* If we get here, it most likely means that NR_CPUS is * less than the partition's max processors setting. */ - printk(KERN_ERR "Cannot add cpu %s; this system configuration" - " supports %d logical cpus.\n", np->full_name, + printk(KERN_ERR "Cannot add cpu %pOF; this system configuration" + " supports %d logical cpus.\n", np, num_possible_cpus()); goto out_unlock; } @@ -455,15 +462,19 @@ static ssize_t dlpar_cpu_add(u32 drc_index) } dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); - of_node_put(parent); if (!dn) { pr_warn("Failed call to configure-connector, drc index: %x\n", drc_index); dlpar_release_drc(drc_index); + of_node_put(parent); return -EINVAL; } - rc = dlpar_attach_node(dn); + rc = dlpar_attach_node(dn, parent); + + /* Regardless we are done with parent now */ + of_node_put(parent); + if (rc) { saved_rc = rc; pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index ca9b2f4aaa22..1d48ab424bd9 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -336,7 +336,38 @@ static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) return mem_block; } +static int dlpar_change_lmb_state(struct of_drconf_cell *lmb, bool online) +{ + struct memory_block *mem_block; + int rc; + + mem_block = lmb_to_memblock(lmb); + if (!mem_block) + return -EINVAL; + + if (online && mem_block->dev.offline) + rc = device_online(&mem_block->dev); + else if (!online && !mem_block->dev.offline) + rc = device_offline(&mem_block->dev); + else + rc = 0; + + put_device(&mem_block->dev); + + return rc; +} + +static int dlpar_online_lmb(struct of_drconf_cell *lmb) +{ + return dlpar_change_lmb_state(lmb, true); +} + #ifdef CONFIG_MEMORY_HOTREMOVE +static int dlpar_offline_lmb(struct of_drconf_cell *lmb) +{ + return dlpar_change_lmb_state(lmb, false); +} + static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long block_sz, start_pfn; @@ -431,19 +462,13 @@ static int dlpar_add_lmb(struct of_drconf_cell *); static int dlpar_remove_lmb(struct of_drconf_cell *lmb) { - struct memory_block *mem_block; unsigned long block_sz; int nid, rc; if (!lmb_is_removable(lmb)) return -EINVAL; - mem_block = lmb_to_memblock(lmb); - if (!mem_block) - return -EINVAL; - - rc = device_offline(&mem_block->dev); - put_device(&mem_block->dev); + rc = dlpar_offline_lmb(lmb); if (rc) return rc; @@ -737,20 +762,6 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index, } #endif /* CONFIG_MEMORY_HOTREMOVE */ -static int dlpar_online_lmb(struct of_drconf_cell *lmb) -{ - struct memory_block *mem_block; - int rc; - - mem_block = lmb_to_memblock(lmb); - if (!mem_block) - return -EINVAL; - - rc = device_online(&mem_block->dev); - put_device(&mem_block->dev); - return rc; -} - static int dlpar_add_lmb(struct of_drconf_cell *lmb) { unsigned long block_sz; @@ -817,6 +828,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) return -EINVAL; for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { + if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) + continue; + rc = dlpar_acquire_drc(lmbs[i].drc_index); if (rc) continue; @@ -859,6 +873,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) lmbs[i].base_addr, lmbs[i].drc_index); lmbs[i].reserved = 0; } + rc = 0; } return rc; diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 74b5b8e239c8..c511a1743a44 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -23,7 +23,7 @@ .globl hcall_tracepoint_refcount hcall_tracepoint_refcount: - .llong 0 + .8byte 0 .section ".text" #endif diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 52146b1356d2..408a86044133 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -150,8 +150,7 @@ static const struct dma_map_ops ibmebus_dma_ops = { static int ibmebus_match_path(struct device *dev, void *data) { struct device_node *dn = to_platform_device(dev)->dev.of_node; - return (dn->full_name && - (strcasecmp((char *)data, dn->full_name) == 0)); + return (of_find_node_by_path(data) == dn); } static int ibmebus_match_node(struct device *dev, void *data) @@ -395,7 +394,7 @@ static ssize_t devspec_show(struct device *dev, struct platform_device *ofdev; ofdev = to_platform_device(dev); - return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); + return sprintf(buf, "%pOF\n", ofdev->dev.of_node); } static DEVICE_ATTR_RO(devspec); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 8374adee27e3..7c181467d0ad 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -511,8 +511,8 @@ static void iommu_table_setparms(struct pci_controller *phb, basep = of_get_property(node, "linux,tce-base", NULL); sizep = of_get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { - printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has " - "missing tce entries !\n", dn->full_name); + printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has " + "missing tce entries !\n", dn); return; } @@ -587,7 +587,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) dn = pci_bus_to_OF_node(bus); - pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name); + pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn); if (bus->self) { /* This is not a root bus, any setup will be done for the @@ -701,8 +701,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) dn = pci_bus_to_OF_node(bus); - pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", - dn->full_name); + pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n", + dn); /* Find nearest ibm,dma-window, walking up the device tree */ for (pdn = dn; pdn != NULL; pdn = pdn->parent) { @@ -718,8 +718,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) ppci = PCI_DN(pdn); - pr_debug(" parent is %s, iommu_table: 0x%p\n", - pdn->full_name, ppci->table_group); + pr_debug(" parent is %pOF, iommu_table: 0x%p\n", + pdn, ppci->table_group); if (!ppci->table_group) { ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node); @@ -817,28 +817,28 @@ static void remove_ddw(struct device_node *np, bool remove_prop) ret = tce_clearrange_multi_pSeriesLP(0, 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); if (ret) - pr_warning("%s failed to clear tces in window.\n", - np->full_name); + pr_warning("%pOF failed to clear tces in window.\n", + np); else - pr_debug("%s successfully cleared tces in window.\n", - np->full_name); + pr_debug("%pOF successfully cleared tces in window.\n", + np); ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); if (ret) - pr_warning("%s: failed to remove direct window: rtas returned " + pr_warning("%pOF: failed to remove direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np->full_name, ret, ddw_avail[2], liobn); + np, ret, ddw_avail[2], liobn); else - pr_debug("%s: successfully removed direct window: rtas returned " + pr_debug("%pOF: successfully removed direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np->full_name, ret, ddw_avail[2], liobn); + np, ret, ddw_avail[2], liobn); delprop: if (remove_prop) ret = of_remove_property(np, win64); if (ret) - pr_warning("%s: failed to remove direct window property: %d\n", - np->full_name, ret); + pr_warning("%pOF: failed to remove direct window property: %d\n", + np, ret); } static u64 find_existing_ddw(struct device_node *pdn) @@ -1004,7 +1004,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) * list. */ list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { - if (!strcmp(fpdn->pdn->full_name, pdn->full_name)) + if (fpdn->pdn == pdn) goto out_unlock; } @@ -1087,8 +1087,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) ddwprop->tce_shift = cpu_to_be32(page_shift); ddwprop->window_shift = cpu_to_be32(len); - dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", - create.liobn, dn->full_name); + dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n", + create.liobn, dn); window = kzalloc(sizeof(*window), GFP_KERNEL); if (!window) @@ -1097,15 +1097,15 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { - dev_info(&dev->dev, "failed to map direct window for %s: %d\n", - dn->full_name, ret); + dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n", + dn, ret); goto out_free_window; } ret = of_add_property(pdn, win64); if (ret) { - dev_err(&dev->dev, "unable to add dma window property for %s: %d", - pdn->full_name, ret); + dev_err(&dev->dev, "unable to add dma window property for %pOF: %d", + pdn, ret); goto out_free_window; } @@ -1158,7 +1158,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) * already allocated. */ dn = pci_device_to_OF_node(dev); - pr_debug(" node is %s\n", dn->full_name); + pr_debug(" node is %pOF\n", dn); for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group; pdn = pdn->parent) { @@ -1169,11 +1169,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) if (!pdn || !PCI_DN(pdn)) { printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: " - "no DMA window found for pci dev=%s dn=%s\n", - pci_name(dev), of_node_full_name(dn)); + "no DMA window found for pci dev=%s dn=%pOF\n", + pci_name(dev), dn); return; } - pr_debug(" parent is %s\n", pdn->full_name); + pr_debug(" parent is %pOF\n", pdn); pci = PCI_DN(pdn); if (!pci->table_group) { @@ -1213,7 +1213,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) /* only attempt to use a new window if 64-bit DMA is requested */ if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { dn = pci_device_to_OF_node(pdev); - dev_dbg(dev, "node is %s\n", dn->full_name); + dev_dbg(dev, "node is %pOF\n", dn); /* * the device tree might contain the dma-window properties diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 6681ac97fb18..eeb13429d685 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -51,5 +52,8 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } } - xics_kexec_teardown_cpu(secondary); + if (xive_enabled()) + xive_kexec_teardown_cpu(secondary); + else + xics_kexec_teardown_cpu(secondary); } diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 2da4851eff99..f7042ad492ba 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -226,10 +226,12 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) return -ENOENT; dn = dlpar_configure_connector(drc_index, parent_dn); - if (!dn) + if (!dn) { + of_node_put(parent_dn); return -ENOENT; + } - rc = dlpar_attach_node(dn); + rc = dlpar_attach_node(dn, parent_dn); if (rc) dlpar_free_cc_nodes(dn); diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 326ef0dd6038..b7496948129e 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -132,19 +132,14 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev) static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) { struct device_node *dn; - struct pci_dn *pdn; const __be32 *p; u32 req_msi; - pdn = pci_get_pdn(pdev); - if (!pdn) - return -ENODEV; - - dn = pdn->node; + dn = pci_device_to_OF_node(pdev); p = of_get_property(dn, prop_name, NULL); if (!p) { - pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); + pr_debug("rtas_msi: No %s on %pOF\n", prop_name, dn); return -ENOENT; } @@ -182,8 +177,8 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) while (dn) { p = of_get_property(dn, "ibm,pe-total-#msi", NULL); if (p) { - pr_debug("rtas_msi: found prop on dn %s\n", - dn->full_name); + pr_debug("rtas_msi: found prop on dn %pOF\n", + dn); *total = be32_to_cpup(p); return dn; } @@ -197,7 +192,6 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) { struct device_node *dn; - struct pci_dn *pdn; struct eeh_dev *edev; /* Found our PE and assume 8 at that point. */ @@ -210,8 +204,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) edev = pdn_to_eeh_dev(PCI_DN(dn)); if (edev->pe) edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); - pdn = eeh_dev_to_pdn(edev); - dn = pdn ? pdn->node : NULL; + dn = pci_device_to_OF_node(edev->pdev); if (!dn) return NULL; @@ -222,7 +215,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) /* Hardcode of 8 for old firmwares */ *total = 8; - pr_debug("rtas_msi: using PE dn %s\n", dn->full_name); + pr_debug("rtas_msi: using PE dn %pOF\n", dn); return dn; } @@ -242,7 +235,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) const __be32 *p; u32 class; - pr_debug("rtas_msi: counting %s\n", dn->full_name); + pr_debug("rtas_msi: counting %pOF\n", dn); p = of_get_property(dn, "class-code", NULL); class = p ? be32_to_cpup(p) : 0; @@ -300,7 +293,7 @@ static int msi_quota_for_device(struct pci_dev *dev, int request) goto out; } - pr_debug("rtas_msi: found PE %s\n", pe_dn->full_name); + pr_debug("rtas_msi: found PE %pOF\n", pe_dn); memset(&counts, 0, sizeof(struct msi_counts)); diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 547fd13e4f8e..561917fa54a8 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -38,7 +38,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; - pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name); + pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn); phb = pcibios_alloc_controller(dn); if (!phb) diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 1361a9db534b..4470a3194311 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -46,7 +46,7 @@ extern void dlpar_free_cc_nodes(struct device_node *); extern void dlpar_free_cc_property(struct property *); extern struct device_node *dlpar_configure_connector(__be32, struct device_node *); -extern int dlpar_attach_node(struct device_node *); +extern int dlpar_attach_node(struct device_node *, struct device_node *); extern int dlpar_detach_node(struct device_node *); extern int dlpar_acquire_drc(u32 drc_index); extern int dlpar_release_drc(u32 drc_index); diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 164a13d3998a..35c891aabef0 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -229,10 +229,9 @@ static int __init pseries_energy_init(void) int cpu, err; struct device *cpu_dev; - if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) { - printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); - return 0; - } + if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) + return 0; /* H_BEST_ENERGY hcall not supported */ + /* Create the sysfs files */ err = device_create_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index bb70b26334f0..4923ffe230cf 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -379,6 +379,21 @@ static void fwnmi_release_errinfo(void) int pSeries_system_reset_exception(struct pt_regs *regs) { +#ifdef __LITTLE_ENDIAN__ + /* + * Some firmware byteswaps SRR registers and gives incorrect SRR1. Try + * to detect the bad SRR1 pattern here. Flip the NIP back to correct + * endian for reporting purposes. Unfortunately the MSR can't be fixed, + * so clear it. It will be missing MSR_RI so we won't try to recover. + */ + if ((be64_to_cpu(regs->msr) & + (MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR| + MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) { + regs->nip = be64_to_cpu((__be64)regs->nip); + regs->msr = 0; + } +#endif + if (fwnmi_active) { struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); if (errhdr) { diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 011ef2180fe6..296c188fd5ca 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -362,20 +362,13 @@ static int do_update_property(char *buf, size_t bufsize) static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { - int rv = 0; + int rv; char *kbuf; char *tmp; - if (!(kbuf = kmalloc(count + 1, GFP_KERNEL))) { - rv = -ENOMEM; - goto out; - } - if (copy_from_user(kbuf, buf, count)) { - rv = -EFAULT; - goto out; - } - - kbuf[count] = '\0'; + kbuf = memdup_user_nul(buf, count); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); tmp = strchr(kbuf, ' '); if (!tmp) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index b5d86426e97b..5f1beb8367ac 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -176,8 +177,11 @@ static void __init pseries_setup_i8259_cascade(void) static void __init pseries_init_irq(void) { - xics_init(); - pseries_setup_i8259_cascade(); + /* Try using a XIVE if available, otherwise use a XICS */ + if (!xive_spapr_init()) { + xics_init(); + pseries_setup_i8259_cascade(); + } } static void pseries_lpar_enable_pmcs(void) @@ -722,7 +726,6 @@ define_machine(pseries) { .pcibios_fixup = pSeries_final_fixup, .restart = rtas_restart, .halt = rtas_halt, - .panic = rtas_os_term, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 24785f63fb40..2e184829e5d4 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -136,7 +137,9 @@ static inline int smp_startup_cpu(unsigned int lcpu) static void smp_setup_cpu(int cpu) { - if (cpu != boot_cpuid) + if (xive_enabled()) + xive_smp_setup_cpu(); + else if (cpu != boot_cpuid) xics_setup_cpu(); if (firmware_has_feature(FW_FEATURE_SPLPAR)) @@ -181,6 +184,13 @@ static int smp_pSeries_kick_cpu(int nr) return 0; } +static int pseries_smp_prepare_cpu(int cpu) +{ + if (xive_enabled()) + return xive_smp_prepare_cpu(cpu); + return 0; +} + static void smp_pseries_cause_ipi(int cpu) { /* POWER9 should not use this handler */ @@ -211,7 +221,7 @@ static int pseries_cause_nmi_ipi(int cpu) return 0; } -static __init void pSeries_smp_probe(void) +static __init void pSeries_smp_probe_xics(void) { xics_smp_probe(); @@ -221,11 +231,24 @@ static __init void pSeries_smp_probe(void) smp_ops->cause_ipi = icp_ops->cause_ipi; } +static __init void pSeries_smp_probe(void) +{ + if (xive_enabled()) + /* + * Don't use P9 doorbells when XIVE is enabled. IPIs + * using MMIOs should be faster + */ + xive_smp_probe(); + else + pSeries_smp_probe_xics(); +} + static struct smp_ops_t pseries_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */ .cause_nmi_ipi = pseries_cause_nmi_ipi, .probe = pSeries_smp_probe, + .prepare_cpu = pseries_smp_prepare_cpu, .kick_cpu = smp_pSeries_kick_cpu, .setup_cpu = smp_setup_cpu, .cpu_bootable = smp_generic_cpu_bootable, diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index e76aefae2aa2..89726f07d249 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -151,7 +151,7 @@ static ssize_t store_hibernate(struct device *dev, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) return -ENOMEM; stream_id = simple_strtoul(buf, NULL, 16); diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 8a47f168476b..12277bc9fd9e 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1357,14 +1357,14 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) */ parent_node = of_get_parent(of_node); if (parent_node) { - if (!strcmp(parent_node->full_name, "/ibm,platform-facilities")) + if (!strcmp(parent_node->type, "ibm,platform-facilities")) family = PFO; - else if (!strcmp(parent_node->full_name, "/vdevice")) + else if (!strcmp(parent_node->type, "vdevice")) family = VDEVICE; else { - pr_warn("%s: parent(%s) of %s not recognized.\n", + pr_warn("%s: parent(%pOF) of %s not recognized.\n", __func__, - parent_node->full_name, + parent_node, of_node_name); of_node_put(parent_node); return NULL; @@ -1555,7 +1555,7 @@ static ssize_t devspec_show(struct device *dev, { struct device_node *of_node = dev->of_node; - return sprintf(buf, "%s\n", of_node_full_name(of_node)); + return sprintf(buf, "%pOF\n", of_node); } static DEVICE_ATTR_RO(devspec); diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S index 3696ea6c4826..4aad9dd10ace 100644 --- a/arch/powerpc/purgatory/trampoline.S +++ b/arch/powerpc/purgatory/trampoline.S @@ -67,7 +67,7 @@ master: mr %r16,%r3 /* save dt address in reg16 */ li %r4,20 LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ - cmpwi %r0,%r6,2 /* v2 or later? */ + cmpwi %cr0,%r6,2 /* v2 or later? */ blt 1f li %r4,28 STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ @@ -104,13 +104,13 @@ master: .balign 8 .globl kernel kernel: - .llong 0x0 + .8byte 0x0 .size kernel, . - kernel .balign 8 .globl dt_offset dt_offset: - .llong 0x0 + .8byte 0x0 .size dt_offset, . - dt_offset diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index c0ae11d4f62f..79416fa2e3ba 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -36,25 +36,15 @@ obj-$(CONFIG_AXON_RAM) += axonram.o obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o obj-$(CONFIG_PPC_I8259) += i8259.o obj-$(CONFIG_IPIC) += ipic.o -obj-$(CONFIG_4xx) += uic.o -obj-$(CONFIG_PPC4xx_OCM) += ppc4xx_ocm.o -obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o obj-$(CONFIG_OF_RTC) += of_rtc.o -ifeq ($(CONFIG_PCI),y) -obj-$(CONFIG_4xx) += ppc4xx_pci.o -endif -obj-$(CONFIG_PPC4xx_HSTA_MSI) += ppc4xx_hsta_msi.o -obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o -obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o -obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o obj-$(CONFIG_CPM) += cpm_common.o +obj-$(CONFIG_CPM1) += cpm1.o obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o obj-$(CONFIG_QUICC_ENGINE) += cpm_common.o obj-$(CONFIG_PPC_DCR) += dcr.o -obj-$(CONFIG_8xx) += mpc8xx_pic.o cpm1.o obj-$(CONFIG_UCODE_PATCH) += micropatch.o obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 2799706106c6..c60e84e4558d 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -110,7 +110,7 @@ axon_ram_irq_handler(int irq, void *dev) static blk_qc_t axon_ram_make_request(struct request_queue *queue, struct bio *bio) { - struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; + struct axon_ram_bank *bank = bio->bi_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; struct bio_vec vec; @@ -188,15 +188,12 @@ static int axon_ram_probe(struct platform_device *device) axon_ram_bank_id++; - dev_info(&device->dev, "Found memory controller on %s\n", - device->dev.of_node->full_name); + dev_info(&device->dev, "Found memory controller on %pOF\n", + device->dev.of_node); - bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL); - if (bank == NULL) { - dev_err(&device->dev, "Out of memory\n"); - rc = -ENOMEM; - goto failed; - } + bank = kzalloc(sizeof(*bank), GFP_KERNEL); + if (!bank) + return -ENOMEM; device->dev.platform_data = bank; @@ -292,25 +289,22 @@ static int axon_ram_probe(struct platform_device *device) return 0; failed: - if (bank != NULL) { - if (bank->irq_id) - free_irq(bank->irq_id, device); - if (bank->disk != NULL) { - if (bank->disk->major > 0) - unregister_blkdev(bank->disk->major, - bank->disk->disk_name); - if (bank->disk->flags & GENHD_FL_UP) - del_gendisk(bank->disk); - put_disk(bank->disk); - } - kill_dax(bank->dax_dev); - put_dax(bank->dax_dev); - device->dev.platform_data = NULL; - if (bank->io_addr != 0) - iounmap((void __iomem *) bank->io_addr); - kfree(bank); + if (bank->irq_id) + free_irq(bank->irq_id, device); + if (bank->disk != NULL) { + if (bank->disk->major > 0) + unregister_blkdev(bank->disk->major, + bank->disk->disk_name); + if (bank->disk->flags & GENHD_FL_UP) + del_gendisk(bank->disk); + put_disk(bank->disk); } - + kill_dax(bank->dax_dev); + put_dax(bank->dax_dev); + device->dev.platform_data = NULL; + if (bank->io_addr != 0) + iounmap((void __iomem *) bank->io_addr); + kfree(bank); return rc; } diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index 121e26fffd50..d72eda568b7d 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c @@ -195,8 +195,8 @@ dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; u64 addr; - pr_debug("dcr_map(%s, 0x%x, 0x%x)\n", - dev->full_name, dcr_n, dcr_c); + pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n", + dev, dcr_n, dcr_c); addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c index 37a69097e022..00ccf3e4fcb4 100644 --- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c @@ -101,8 +101,8 @@ int __init instantiate_cache_sram(struct platform_device *dev, if (!request_mem_region(cache_sram->base_phys, cache_sram->size, "fsl_85xx_cache_sram")) { - dev_err(&dev->dev, "%s: request memory failed\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: request memory failed\n", + dev->dev.of_node); ret = -ENXIO; goto out_free; } @@ -110,16 +110,16 @@ int __init instantiate_cache_sram(struct platform_device *dev, cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { - dev_err(&dev->dev, "%s: ioremap_prot failed\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: ioremap_prot failed\n", + dev->dev.of_node); ret = -ENOMEM; goto out_release; } cache_sram->rh = rh_create(sizeof(unsigned int)); if (IS_ERR(cache_sram->rh)) { - dev_err(&dev->dev, "%s: Unable to create remote heap\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: Unable to create remote heap\n", + dev->dev.of_node); ret = PTR_ERR(cache_sram->rh); goto out_unmap; } diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index a6f0b96ce2c9..d902306f4718 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -388,8 +388,8 @@ static int __init fsl_gtm_init(void) gtm = kzalloc(sizeof(*gtm), GFP_KERNEL); if (!gtm) { - pr_err("%s: unable to allocate memory\n", - np->full_name); + pr_err("%pOF: unable to allocate memory\n", + np); continue; } @@ -397,7 +397,7 @@ static int __init fsl_gtm_init(void) clock = of_get_property(np, "clock-frequency", &size); if (!clock || size != sizeof(*clock)) { - pr_err("%s: no clock-frequency\n", np->full_name); + pr_err("%pOF: no clock-frequency\n", np); goto err; } gtm->clock = *clock; @@ -407,8 +407,8 @@ static int __init fsl_gtm_init(void) irq = irq_of_parse_and_map(np, i); if (!irq) { - pr_err("%s: not enough interrupts specified\n", - np->full_name); + pr_err("%pOF: not enough interrupts specified\n", + np); goto err; } gtm->timers[i].irq = irq; @@ -417,8 +417,8 @@ static int __init fsl_gtm_init(void) gtm->regs = of_iomap(np, 0); if (!gtm->regs) { - pr_err("%s: unable to iomap registers\n", - np->full_name); + pr_err("%pOF: unable to iomap registers\n", + np); goto err; } diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 8a244828782e..44cbf4c12ea1 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -214,8 +214,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) phandle = np->phandle; else { dev_err(&pdev->dev, - "node %s has an invalid fsl,msi phandle %u\n", - hose->dn->full_name, np->phandle); + "node %pOF has an invalid fsl,msi phandle %u\n", + hose->dn, np->phandle); return -EINVAL; } } @@ -438,16 +438,16 @@ static int fsl_of_msi_probe(struct platform_device *dev) if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { err = of_address_to_resource(dev->dev.of_node, 0, &res); if (err) { - dev_err(&dev->dev, "invalid resource for node %s\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "invalid resource for node %pOF\n", + dev->dev.of_node); goto error_out; } msi->msi_regs = ioremap(res.start, resource_size(&res)); if (!msi->msi_regs) { err = -ENOMEM; - dev_err(&dev->dev, "could not map node %s\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "could not map node %pOF\n", + dev->dev.of_node); goto error_out; } msi->msiir_offset = @@ -522,8 +522,8 @@ static int fsl_of_msi_probe(struct platform_device *dev) for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { if (p[i * 2] % IRQS_PER_MSI_REG || p[i * 2 + 1] % IRQS_PER_MSI_REG) { - pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n", - __func__, dev->dev.of_node->full_name, + pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n", + __func__, dev->dev.of_node, p[i * 2 + 1], p[i * 2]); err = -EINVAL; goto error_out; diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index d3a597456b6e..22d98057f773 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -202,7 +202,6 @@ static void setup_pci_atmu(struct pci_controller *hose) u32 pcicsrbar = 0, pcicsrbar_sz; u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; - const char *name = hose->dn->full_name; const u64 *reg; int len; bool setup_inbound; @@ -290,12 +289,12 @@ static void setup_pci_atmu(struct pci_controller *hose) paddr_lo -= offset; if (paddr_hi == paddr_lo) { - pr_err("%s: No outbound window space\n", name); + pr_err("%pOF: No outbound window space\n", hose->dn); return; } if (paddr_lo == 0) { - pr_err("%s: No space for inbound window\n", name); + pr_err("%pOF: No space for inbound window\n", hose->dn); return; } @@ -313,7 +312,7 @@ static void setup_pci_atmu(struct pci_controller *hose) paddr_lo = min(paddr_lo, (u64)pcicsrbar); - pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); + pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar); /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); @@ -336,12 +335,12 @@ static void setup_pci_atmu(struct pci_controller *hose) u64 address = be64_to_cpup(reg); if ((address >= mem) && (address < (mem + PAGE_SIZE))) { - pr_info("%s: extending DDR ATMU to cover MSIIR", name); + pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn); mem += PAGE_SIZE; } else { /* TODO: Create a new ATMU for MSIIR */ - pr_warn("%s: msi-address-64 address of %llx is " - "unsupported\n", name, address); + pr_warn("%pOF: msi-address-64 address of %llx is " + "unsupported\n", hose->dn, address); } } @@ -354,8 +353,8 @@ static void setup_pci_atmu(struct pci_controller *hose) if ((1ull << mem_log) != mem) { mem_log++; if ((1ull << mem_log) > mem) - pr_info("%s: Setting PCI inbound window " - "greater than memory size\n", name); + pr_info("%pOF: Setting PCI inbound window " + "greater than memory size\n", hose->dn); } piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); @@ -402,7 +401,7 @@ static void setup_pci_atmu(struct pci_controller *hose) */ ppc_md.dma_set_mask = fsl_pci_dma_set_mask; - pr_info("%s: Setup 64-bit PCI DMA window\n", name); + pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn); } } else { u64 paddr = 0; @@ -443,18 +442,18 @@ static void setup_pci_atmu(struct pci_controller *hose) #ifdef CONFIG_SWIOTLB ppc_swiotlb_enable = 1; #else - pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " + pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to " "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", - name); + hose->dn); #endif /* adjusting outbound windows could reclaim space in mem map */ if (paddr_hi < 0xffffffffull) - pr_warning("%s: WARNING: Outbound window cfg leaves " + pr_warning("%pOF: WARNING: Outbound window cfg leaves " "gaps in memory map. Adjusting the memory map " "could reduce unnecessary bounce buffering.\n", - name); + hose->dn); - pr_info("%s: DMA window size is 0x%llx\n", name, + pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn, (u64)hose->dma_window_size); } } @@ -532,11 +531,11 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) dev = pdev->dev.of_node; if (!of_device_is_available(dev)) { - pr_warning("%s: disabled\n", dev->full_name); + pr_warning("%pOF: disabled\n", dev); return -ENODEV; } - pr_debug("Adding PCI host bridge %s\n", dev->full_name); + pr_debug("Adding PCI host bridge %pOF\n", dev); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc)) { @@ -547,8 +546,8 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); pci_add_flags(PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); @@ -809,11 +808,11 @@ int __init mpc83xx_add_bridge(struct device_node *dev) is_mpc83xx_pci = 1; if (!of_device_is_available(dev)) { - pr_warning("%s: disabled by the firmware.\n", - dev->full_name); + pr_warning("%pOF: disabled by the firmware.\n", + dev); return -ENODEV; } - pr_debug("Adding PCI host bridge %s\n", dev->full_name); + pr_debug("Adding PCI host bridge %pOF\n", dev); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc_reg)) { @@ -848,8 +847,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev) /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); } pci_add_flags(PCI_REASSIGN_ALL_BUS); diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 1c41c51f22cb..5011ffea4e4b 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -71,6 +71,8 @@ #define RIWAR_WRTYP_ALLOC 0x00006000 #define RIWAR_SIZE_MASK 0x0000003F +static DEFINE_SPINLOCK(fsl_rio_config_lock); + #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ @@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *val) { struct rio_priv *priv = mport->priv; + unsigned long flags; u8 *data; u32 rval, err = 0; @@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; + spin_lock_irqsave(&fsl_rio_config_lock, flags); + out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); @@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, __fsl_read_rio_config(rval, data, err, "lwz"); break; default: + spin_unlock_irqrestore(&fsl_rio_config_lock, flags); return -EINVAL; } @@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, err, destid, hopcount, offset); } + spin_unlock_irqrestore(&fsl_rio_config_lock, flags); *val = rval; return err; @@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 val) { struct rio_priv *priv = mport->priv; + unsigned long flags; u8 *data; + int ret = 0; + pr_debug ("fsl_rio_config_write:" " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", @@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; + spin_lock_irqsave(&fsl_rio_config_lock, flags); + out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); @@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, out_be32((u32 *) data, val); break; default: - return -EINVAL; + ret = -EINVAL; } + spin_unlock_irqrestore(&fsl_rio_config_lock, flags); - return 0; + return ret; } static void fsl_rio_inbound_mem_init(struct rio_priv *priv) @@ -450,12 +463,12 @@ int fsl_rio_setup(struct platform_device *dev) rc = of_address_to_resource(dev->dev.of_node, 0, ®s); if (rc) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", + dev->dev.of_node); return -EFAULT; } - dev_info(&dev->dev, "Of-device full name %s\n", - dev->dev.of_node->full_name); + dev_info(&dev->dev, "Of-device full name %pOF\n", + dev->dev.of_node); dev_info(&dev->dev, "Regs: %pR\n", ®s); rio_regs_win = ioremap(regs.start, resource_size(®s)); @@ -494,8 +507,8 @@ int fsl_rio_setup(struct platform_device *dev) } rc = of_address_to_resource(rmu_node, 0, &rmu_regs); if (rc) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - rmu_node->full_name); + dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", + rmu_node); goto err_rmu; } rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); @@ -529,8 +542,8 @@ int fsl_rio_setup(struct platform_device *dev) aw = of_n_addr_cells(np); dt_range = of_get_property(np, "reg", &rlen); if (!dt_range) { - pr_err("%s: unable to find 'reg' property\n", - np->full_name); + pr_err("%pOF: unable to find 'reg' property\n", + np); rc = -ENOMEM; goto err_pw; } @@ -557,8 +570,8 @@ int fsl_rio_setup(struct platform_device *dev) aw = of_n_addr_cells(np); dt_range = of_get_property(np, "reg", &rlen); if (!dt_range) { - pr_err("%s: unable to find 'reg' property\n", - np->full_name); + pr_err("%pOF: unable to find 'reg' property\n", + np); rc = -ENOMEM; goto err; } @@ -569,15 +582,15 @@ int fsl_rio_setup(struct platform_device *dev) for_each_child_of_node(dev->dev.of_node, np) { port_index = of_get_property(np, "cell-index", NULL); if (!port_index) { - dev_err(&dev->dev, "Can't get %s property 'cell-index'\n", - np->full_name); + dev_err(&dev->dev, "Can't get %pOF property 'cell-index'\n", + np); continue; } dt_range = of_get_property(np, "ranges", &rlen); if (!dt_range) { - dev_err(&dev->dev, "Can't get %s property 'ranges'\n", - np->full_name); + dev_err(&dev->dev, "Can't get %pOF property 'ranges'\n", + np); continue; } @@ -598,8 +611,8 @@ int fsl_rio_setup(struct platform_device *dev) range_start = of_read_number(dt_range + aw, paw); range_size = of_read_number(dt_range + aw + paw, sw); - dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n", - np->full_name, range_start, range_size); + dev_info(&dev->dev, "%pOF: LAW start 0x%016llx, size 0x%016llx.\n", + np, range_start, range_size); port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) @@ -757,8 +770,8 @@ int fsl_rio_setup(struct platform_device *dev) */ static int fsl_of_rio_rpn_probe(struct platform_device *dev) { - printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", - dev->dev.of_node->full_name); + printk(KERN_INFO "Setting up RapidIO peer-to-peer network %pOF\n", + dev->dev.of_node); return fsl_rio_setup(dev); }; diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index c1826de4e749..88b35a3dcdc5 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c @@ -104,6 +104,8 @@ #define DOORBELL_MESSAGE_SIZE 0x08 +static DEFINE_SPINLOCK(fsl_rio_doorbell_lock); + struct rio_msg_regs { u32 omr; u32 osr; @@ -626,9 +628,13 @@ int fsl_rio_port_write_init(struct fsl_rio_pw *pw) int fsl_rio_doorbell_send(struct rio_mport *mport, int index, u16 destid, u16 data) { + unsigned long flags; + pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", index, destid, data); + spin_lock_irqsave(&fsl_rio_doorbell_lock, flags); + /* In the serial version silicons, such as MPC8548, MPC8641, * below operations is must be. */ @@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport, out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); out_be32(&dbell->dbell_regs->odmr, 0x00000001); + spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags); + return 0; } @@ -1074,8 +1082,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) priv = mport->priv; if (!node) { - dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n", - priv->dev->of_node->full_name); + dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n", + priv->dev->of_node); return -EINVAL; } @@ -1086,8 +1094,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) aw = of_n_addr_cells(node); msg_addr = of_get_property(node, "reg", &mlen); if (!msg_addr) { - pr_err("%s: unable to find 'reg' property of message-unit\n", - node->full_name); + pr_err("%pOF: unable to find 'reg' property of message-unit\n", + node); kfree(rmu); return -ENOMEM; } @@ -1098,8 +1106,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) rmu->txirq = irq_of_parse_and_map(node, 0); rmu->rxirq = irq_of_parse_and_map(node, 1); - printk(KERN_INFO "%s: txirq: %d, rxirq %d\n", - node->full_name, rmu->txirq, rmu->rxirq); + printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n", + node, rmu->txirq, rmu->rxirq); priv->rmm_handle = rmu; diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 19101f9cfcfc..1f614fb2be56 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -98,7 +98,7 @@ u32 fsl_get_sys_freq(void) } EXPORT_SYMBOL(fsl_get_sys_freq); -#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) +#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) u32 get_brgfreq(void) { diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h index d73daa4f0ccf..2640446f8bc4 100644 --- a/arch/powerpc/sysdev/fsl_soc.h +++ b/arch/powerpc/sysdev/fsl_soc.h @@ -7,7 +7,7 @@ struct spi_device; extern phys_addr_t get_immrbase(void); -#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) +#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) extern u32 get_brgfreq(void); extern u32 get_baudrate(void); #else diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index f267ee0afc08..16f1edd78c40 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -315,6 +315,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 7, }, [48] = { + .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_A, .force = IPIC_SEFCR, diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index b9aac951a90f..ead3e2549ebf 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1650,8 +1650,8 @@ void __init mpic_init(struct mpic *mpic) if (mpic->flags & MPIC_SECONDARY) { int virq = irq_of_parse_and_map(mpic->node, 0); if (virq) { - printk(KERN_INFO "%s: hooking up to IRQ %d\n", - mpic->node->full_name, virq); + printk(KERN_INFO "%pOF: hooking up to IRQ %d\n", + mpic->node, virq); irq_set_handler_data(virq, mpic); irq_set_chained_handler(virq, &mpic_cascade); } diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index db2286be5d9a..eb69a5186243 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -192,7 +192,7 @@ static int mpic_msgr_probe(struct platform_device *dev) return -ENOMEM; } } - dev_info(&dev->dev, "Of-device full name %s\n", np->full_name); + dev_info(&dev->dev, "Of-device full name %pOF\n", np); /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index 1d48a5385905..9ed860aee9c3 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -60,7 +60,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) np = NULL; while ((np = of_find_all_nodes(np))) { - pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); + pr_debug("mpic: mapping hwirqs for %pOF\n", np); index = 0; while (of_irq_parse_one(np, index++, &oirq) == 0) { diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c index 9d9b06217f8b..a418579591be 100644 --- a/arch/powerpc/sysdev/mpic_timer.c +++ b/arch/powerpc/sysdev/mpic_timer.c @@ -466,8 +466,7 @@ static int timer_group_get_irq(struct device_node *np, p = of_get_property(np, "fsl,available-ranges", &len); if (p && len % (2 * sizeof(u32)) != 0) { - pr_err("%s: malformed available-ranges property.\n", - np->full_name); + pr_err("%pOF: malformed available-ranges property.\n", np); return -EINVAL; } @@ -484,8 +483,7 @@ static int timer_group_get_irq(struct device_node *np, for (j = 0; j < count; j++) { irq = irq_of_parse_and_map(np, irq_index); if (!irq) { - pr_err("%s: irq parse and map failed.\n", - np->full_name); + pr_err("%pOF: irq parse and map failed.\n", np); return -EINVAL; } @@ -508,8 +506,7 @@ static void timer_group_init(struct device_node *np) priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL); if (!priv) { - pr_err("%s: cannot allocate memory for group.\n", - np->full_name); + pr_err("%pOF: cannot allocate memory for group.\n", np); return; } @@ -518,29 +515,27 @@ static void timer_group_init(struct device_node *np) priv->regs = of_iomap(np, i++); if (!priv->regs) { - pr_err("%s: cannot ioremap timer register address.\n", - np->full_name); + pr_err("%pOF: cannot ioremap timer register address.\n", np); goto out; } if (priv->flags & FSL_GLOBAL_TIMER) { priv->group_tcr = of_iomap(np, i++); if (!priv->group_tcr) { - pr_err("%s: cannot ioremap tcr address.\n", - np->full_name); + pr_err("%pOF: cannot ioremap tcr address.\n", np); goto out; } } ret = timer_group_get_freq(np, priv); if (ret < 0) { - pr_err("%s: cannot get timer frequency.\n", np->full_name); + pr_err("%pOF: cannot get timer frequency.\n", np); goto out; } ret = timer_group_get_irq(np, priv); if (ret < 0) { - pr_err("%s: cannot get timer irqs.\n", np->full_name); + pr_err("%pOF: cannot get timer irqs.\n", np); goto out; } diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 5ebd3f018295..c4dae27172b3 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -86,13 +86,13 @@ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp) p = of_get_property(bmp->of_node, "msi-available-ranges", &len); if (!p) { pr_debug("msi_bitmap: no msi-available-ranges property " \ - "found on %s\n", bmp->of_node->full_name); + "found on %pOF\n", bmp->of_node); return 1; } if (len % (2 * sizeof(u32)) != 0) { printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges" - " property on %s\n", bmp->of_node->full_name); + " property on %pOF\n", bmp->of_node); return -EINVAL; } diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index 026bbc3b2c47..185a67e742a6 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c @@ -452,8 +452,8 @@ static int __init mv64x60_device_setup(void) err = mv64x60_mpsc_device_setup(np, id++); if (err) printk(KERN_ERR "Failed to initialize MV64x60 " - "serial device %s: error %d.\n", - np->full_name, err); + "serial device %pOF: error %d.\n", + np, err); } id = 0; @@ -463,8 +463,8 @@ static int __init mv64x60_device_setup(void) if (IS_ERR(pdev)) { err = PTR_ERR(pdev); printk(KERN_ERR "Failed to initialize MV64x60 " - "network block %s: error %d.\n", - np->full_name, err); + "network block %pOF: error %d.\n", + np, err); continue; } for_each_child_of_node(np, np2) { @@ -474,9 +474,9 @@ static int __init mv64x60_device_setup(void) err = mv64x60_eth_device_setup(np2, id2++, pdev); if (err) printk(KERN_ERR "Failed to initialize " - "MV64x60 network device %s: " + "MV64x60 network device %pOF: " "error %d.\n", - np2->full_name, err); + np2, err); } } @@ -485,8 +485,8 @@ static int __init mv64x60_device_setup(void) err = mv64x60_i2c_device_setup(np, id++); if (err) printk(KERN_ERR "Failed to initialize MV64x60 I2C " - "bus %s: error %d.\n", - np->full_name, err); + "bus %pOF: error %d.\n", + np, err); } /* support up to one watchdog timer */ @@ -494,8 +494,8 @@ static int __init mv64x60_device_setup(void) if (np) { if ((err = mv64x60_wdt_device_setup(np, id))) printk(KERN_ERR "Failed to initialize MV64x60 " - "Watchdog %s: error %d.\n", - np->full_name, err); + "Watchdog %pOF: error %d.\n", + np, err); of_node_put(np); } diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c index 330d56613c5a..d52b3b81e05f 100644 --- a/arch/powerpc/sysdev/mv64x60_pci.c +++ b/arch/powerpc/sysdev/mv64x60_pci.c @@ -70,7 +70,7 @@ static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */ +static const struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */ .attr = { .name = "hs_reg", .mode = S_IRUGO | S_IWUSR, @@ -136,8 +136,8 @@ static int __init mv64x60_add_bridge(struct device_node *dev) /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); hose = pcibios_alloc_controller(dev); if (!hose) diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c index 6f54b54b1328..153fdac4720f 100644 --- a/arch/powerpc/sysdev/of_rtc.c +++ b/arch/powerpc/sysdev/of_rtc.c @@ -38,21 +38,21 @@ void __init of_instantiate_rtc(void) res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) { printk(KERN_ERR "OF RTC: Out of memory " - "allocating resource structure for %s\n", - node->full_name); + "allocating resource structure for %pOF\n", + node); continue; } err = of_address_to_resource(node, 0, res); if (err) { printk(KERN_ERR "OF RTC: Error " - "translating resources for %s\n", - node->full_name); + "translating resources for %pOF\n", + node); continue; } - printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n", - node->full_name, plat_name, + printk(KERN_INFO "OF_RTC: %pOF is a %s @ 0x%llx-0x%llx\n", + node, plat_name, (unsigned long long)res->start, (unsigned long long)res->end); platform_device_register_simple(plat_name, -1, res, 1); diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 76ea32c1b664..0f6fd5d04d33 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -194,12 +194,13 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, ent->dn = of_node_get(dn); snprintf(ent->name, 16, "%08x", i); - ent->path.data = (void*) dn->full_name; - ent->path.size = strlen(dn->full_name); + ent->path.data = (void*)kasprintf(GFP_KERNEL, "%pOF", dn); + ent->path.size = strlen((char *)ent->path.data); dir = debugfs_create_dir(ent->name, root); if (!dir) { of_node_put(dn); + kfree(ent->path.data); kfree(ent); return -1; } diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c index 6afddae2fb47..f02d4576138c 100644 --- a/arch/powerpc/sysdev/simple_gpio.c +++ b/arch/powerpc/sysdev/simple_gpio.c @@ -142,7 +142,6 @@ void __init simple_gpiochip_init(const char *compatible) } continue; err: - pr_err("%s: registration failed, status %d\n", - np->full_name, ret); + pr_err("%pOF: registration failed, status %d\n", np, ret); } } diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 5692dd569b9b..28ff1f53cefc 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -213,8 +213,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, assume" - " bus 0\n", dev->full_name); + printk(KERN_WARNING "Can't get bus-range for %pOF, assume" + " bus 0\n", dev); } hose = pcibios_alloc_controller(dev); diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig index 12ccd7373d2f..3e3e25b5e30d 100644 --- a/arch/powerpc/sysdev/xive/Kconfig +++ b/arch/powerpc/sysdev/xive/Kconfig @@ -9,3 +9,8 @@ config PPC_XIVE_NATIVE default n select PPC_XIVE depends on PPC_POWERNV + +config PPC_XIVE_SPAPR + bool + default n + select PPC_XIVE diff --git a/arch/powerpc/sysdev/xive/Makefile b/arch/powerpc/sysdev/xive/Makefile index 3fab303fc169..536d6e5706e3 100644 --- a/arch/powerpc/sysdev/xive/Makefile +++ b/arch/powerpc/sysdev/xive/Makefile @@ -2,3 +2,4 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-y += common.o obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o +obj-$(CONFIG_PPC_XIVE_SPAPR) += spapr.o diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 6595462b1fc8..a3b8d7d1316e 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -40,7 +40,8 @@ #undef DEBUG_ALL #ifdef DEBUG_ALL -#define DBG_VERBOSE(fmt...) pr_devel(fmt) +#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ + smp_processor_id(), ## __VA_ARGS__) #else #define DBG_VERBOSE(fmt...) do { } while(0) #endif @@ -190,7 +191,7 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) * This is used to perform the magic loads from an ESB * described in xive.h */ -static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset) +static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) { u64 val; @@ -198,13 +199,28 @@ static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset) if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) offset |= offset << 4; - val = in_be64(xd->eoi_mmio + offset); + if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) + val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); + else + val = in_be64(xd->eoi_mmio + offset); return (u8)val; } +static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) +{ + /* Handle HW errata */ + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) + offset |= offset << 4; + + if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) + xive_ops->esb_rw(xd->hw_irq, offset, data, 1); + else + out_be64(xd->eoi_mmio + offset, data); +} + #ifdef CONFIG_XMON -static void xive_dump_eq(const char *name, struct xive_q *q) +static notrace void xive_dump_eq(const char *name, struct xive_q *q) { u32 i0, i1, idx; @@ -218,7 +234,7 @@ static void xive_dump_eq(const char *name, struct xive_q *q) q->toggle, i0, i1); } -void xmon_xive_do_dump(int cpu) +notrace void xmon_xive_do_dump(int cpu) { struct xive_cpu *xc = per_cpu(xive_cpu, cpu); @@ -227,7 +243,7 @@ void xmon_xive_do_dump(int cpu) xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]); #ifdef CONFIG_SMP { - u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET); + u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, val & XIVE_ESB_VAL_P ? 'P' : 'p', val & XIVE_ESB_VAL_P ? 'Q' : 'q'); @@ -297,7 +313,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) { /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) - out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); + xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { /* * The FW told us to call it. This happens for some @@ -326,10 +342,10 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) * properly. */ if (xd->flags & XIVE_IRQ_FLAG_LSI) - in_be64(xd->eoi_mmio); + xive_esb_read(xd, XIVE_ESB_LOAD_EOI); else { - eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); - DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val); + eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); + DBG_VERBOSE("eoi_val=%x\n", eoi_val); /* Re-trigger if needed */ if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) @@ -383,12 +399,12 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd, * ESB accordingly on unmask. */ if (mask) { - val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01); + val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); xd->saved_p = !!(val & XIVE_ESB_VAL_P); } else if (xd->saved_p) - xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); + xive_esb_read(xd, XIVE_ESB_SET_PQ_10); else - xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); + xive_esb_read(xd, XIVE_ESB_SET_PQ_00); } /* @@ -447,7 +463,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask, int cpu, first, num, i; /* Pick up a starting point CPU in the mask based on fuzz */ - num = cpumask_weight(mask); + num = min_t(int, cpumask_weight(mask), nr_cpu_ids); first = fuzz % num; /* Locate it */ @@ -672,6 +688,10 @@ static int xive_irq_set_affinity(struct irq_data *d, if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) return -EINVAL; + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(d)) + return IRQ_SET_MASK_OK; + /* * If existing target is already in the new mask, and is * online then do nothing. @@ -768,7 +788,7 @@ static int xive_irq_retrigger(struct irq_data *d) * To perform a retrigger, we first set the PQ bits to * 11, then perform an EOI. */ - xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); + xive_esb_read(xd, XIVE_ESB_SET_PQ_11); /* * Note: We pass "0" to the hw_irq argument in order to @@ -803,7 +823,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) irqd_set_forwarded_to_vcpu(d); /* Set it to PQ=10 state to prevent further sends */ - pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); + pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); /* No target ? nothing to do */ if (xd->target == XIVE_INVALID_TARGET) { @@ -832,7 +852,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) * for sure the queue slot is no longer in use. */ if (pq & 2) { - pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); + pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11); xd->saved_p = true; /* @@ -989,6 +1009,9 @@ static void xive_ipi_eoi(struct irq_data *d) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); + DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", + d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); + /* Handle possible race with unplug and drop stale IPIs */ if (!xc) return; @@ -1368,6 +1391,27 @@ void xive_flush_interrupt(void) #endif /* CONFIG_SMP */ +void xive_teardown_cpu(void) +{ + struct xive_cpu *xc = __this_cpu_read(xive_cpu); + unsigned int cpu = smp_processor_id(); + + /* Set CPPR to 0 to disable flow of interrupts */ + xc->cppr = 0; + out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); + + if (xive_ops->teardown_cpu) + xive_ops->teardown_cpu(cpu, xc); + +#ifdef CONFIG_SMP + /* Get rid of IPI */ + xive_cleanup_cpu_ipi(cpu, xc); +#endif + + /* Disable and free the queues */ + xive_cleanup_cpu_queues(cpu, xc); +} + void xive_kexec_teardown_cpu(int secondary) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); @@ -1395,8 +1439,8 @@ void xive_shutdown(void) xive_ops->shutdown(); } -bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, - u8 max_prio) +bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, + u8 max_prio) { xive_tima = area; xive_tima_offset = offset; @@ -1424,6 +1468,22 @@ bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, return true; } +__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) +{ + unsigned int alloc_order; + struct page *pages; + __be32 *qpage; + + alloc_order = xive_alloc_order(queue_shift); + pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); + if (!pages) + return ERR_PTR(-ENOMEM); + qpage = (__be32 *)page_address(pages); + memset(qpage, 0, 1 << queue_shift); + + return qpage; +} + static int __init xive_off(char *arg) { xive_cmdline_disabled = true; diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 0f95476b01f6..ebc244b08d67 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -82,6 +82,8 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) return -ENOMEM; } + data->hw_irq = hw_irq; + if (!data->trig_page) return 0; if (data->trig_page == data->eoi_page) { @@ -202,17 +204,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue); static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) { struct xive_q *q = &xc->queue[prio]; - unsigned int alloc_order; - struct page *pages; __be32 *qpage; - alloc_order = (xive_queue_shift > PAGE_SHIFT) ? - (xive_queue_shift - PAGE_SHIFT) : 0; - pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); - if (!pages) - return -ENOMEM; - qpage = (__be32 *)page_address(pages); - memset(qpage, 0, 1 << xive_queue_shift); + qpage = xive_queue_page_alloc(cpu, xive_queue_shift); + if (IS_ERR(qpage)) + return PTR_ERR(qpage); + return xive_native_configure_queue(get_hard_smp_processor_id(cpu), q, prio, qpage, xive_queue_shift, false); } @@ -227,8 +224,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 * from an IPI and iounmap isn't safe */ __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio); - alloc_order = (xive_queue_shift > PAGE_SHIFT) ? - (xive_queue_shift - PAGE_SHIFT) : 0; + alloc_order = xive_alloc_order(xive_queue_shift); free_pages((unsigned long)q->qpage, alloc_order); q->qpage = NULL; } @@ -515,13 +511,13 @@ static bool xive_parse_provisioning(struct device_node *np) static void xive_native_setup_pools(void) { /* Allocate a pool big enough */ - pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids); + pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); - pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n", + pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n", xive_pool_vps, nr_cpu_ids); } @@ -531,7 +527,7 @@ u32 xive_native_default_eq_shift(void) } EXPORT_SYMBOL_GPL(xive_native_default_eq_shift); -bool xive_native_init(void) +bool __init xive_native_init(void) { struct device_node *np; struct resource r; @@ -551,7 +547,7 @@ bool xive_native_init(void) pr_devel("not found !\n"); return false; } - pr_devel("Found %s\n", np->full_name); + pr_devel("Found %pOF\n", np); /* Resource 1 is HV window */ if (of_address_to_resource(np, 1, &r)) { diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c new file mode 100644 index 000000000000..d9c4c9366049 --- /dev/null +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -0,0 +1,666 @@ +/* + * Copyright 2016,2017 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "xive: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xive-internal.h" + +static u32 xive_queue_shift; + +struct xive_irq_bitmap { + unsigned long *bitmap; + unsigned int base; + unsigned int count; + spinlock_t lock; + struct list_head list; +}; + +static LIST_HEAD(xive_irq_bitmaps); + +static int xive_irq_bitmap_add(int base, int count) +{ + struct xive_irq_bitmap *xibm; + + xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC); + if (!xibm) + return -ENOMEM; + + spin_lock_init(&xibm->lock); + xibm->base = base; + xibm->count = count; + xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); + list_add(&xibm->list, &xive_irq_bitmaps); + + pr_info("Using IRQ range [%x-%x]", xibm->base, + xibm->base + xibm->count - 1); + return 0; +} + +static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm) +{ + int irq; + + irq = find_first_zero_bit(xibm->bitmap, xibm->count); + if (irq != xibm->count) { + set_bit(irq, xibm->bitmap); + irq += xibm->base; + } else { + irq = -ENOMEM; + } + + return irq; +} + +static int xive_irq_bitmap_alloc(void) +{ + struct xive_irq_bitmap *xibm; + unsigned long flags; + int irq = -ENOENT; + + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { + spin_lock_irqsave(&xibm->lock, flags); + irq = __xive_irq_bitmap_alloc(xibm); + spin_unlock_irqrestore(&xibm->lock, flags); + if (irq >= 0) + break; + } + return irq; +} + +static void xive_irq_bitmap_free(int irq) +{ + unsigned long flags; + struct xive_irq_bitmap *xibm; + + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { + if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) { + spin_lock_irqsave(&xibm->lock, flags); + clear_bit(irq - xibm->base, xibm->bitmap); + spin_unlock_irqrestore(&xibm->lock, flags); + break; + } + } +} + +static long plpar_int_get_source_info(unsigned long flags, + unsigned long lisn, + unsigned long *src_flags, + unsigned long *eoi_page, + unsigned long *trig_page, + unsigned long *esb_shift) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn); + if (rc) { + pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc); + return rc; + } + + *src_flags = retbuf[0]; + *eoi_page = retbuf[1]; + *trig_page = retbuf[2]; + *esb_shift = retbuf[3]; + + pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n", + retbuf[0], retbuf[1], retbuf[2], retbuf[3]); + + return 0; +} + +#define XIVE_SRC_SET_EISN (1ull << (63 - 62)) +#define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */ + +static long plpar_int_set_source_config(unsigned long flags, + unsigned long lisn, + unsigned long target, + unsigned long prio, + unsigned long sw_irq) +{ + long rc; + + + pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n", + flags, lisn, target, prio, sw_irq); + + + rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn, + target, prio, sw_irq); + if (rc) { + pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n", + lisn, target, prio, rc); + return rc; + } + + return 0; +} + +static long plpar_int_get_queue_info(unsigned long flags, + unsigned long target, + unsigned long priority, + unsigned long *esn_page, + unsigned long *esn_size) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, priority); + if (rc) { + pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n", + target, priority, rc); + return rc; + } + + *esn_page = retbuf[0]; + *esn_size = retbuf[1]; + + pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n", + retbuf[0], retbuf[1]); + + return 0; +} + +#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63)) + +static long plpar_int_set_queue_config(unsigned long flags, + unsigned long target, + unsigned long priority, + unsigned long qpage, + unsigned long qsize) +{ + long rc; + + pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n", + flags, target, priority, qpage, qsize); + + rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target, + priority, qpage, qsize); + if (rc) { + pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n", + target, priority, qpage, rc); + return rc; + } + + return 0; +} + +static long plpar_int_sync(unsigned long flags, unsigned long lisn) +{ + long rc; + + rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn); + if (rc) { + pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc); + return rc; + } + + return 0; +} + +#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63)) + +static long plpar_int_esb(unsigned long flags, + unsigned long lisn, + unsigned long offset, + unsigned long in_data, + unsigned long *out_data) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n", + flags, lisn, offset, in_data); + + rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, in_data); + if (rc) { + pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n", + lisn, offset, rc); + return rc; + } + + *out_data = retbuf[0]; + + return 0; +} + +static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write) +{ + unsigned long read_data; + long rc; + + rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0, + lisn, offset, data, &read_data); + if (rc) + return -1; + + return write ? 0 : read_data; +} + +#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60)) +#define XIVE_SRC_LSI (1ull << (63 - 61)) +#define XIVE_SRC_TRIGGER (1ull << (63 - 62)) +#define XIVE_SRC_STORE_EOI (1ull << (63 - 63)) + +static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) +{ + long rc; + unsigned long flags; + unsigned long eoi_page; + unsigned long trig_page; + unsigned long esb_shift; + + memset(data, 0, sizeof(*data)); + + rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page, + &esb_shift); + if (rc) + return -EINVAL; + + if (flags & XIVE_SRC_H_INT_ESB) + data->flags |= XIVE_IRQ_FLAG_H_INT_ESB; + if (flags & XIVE_SRC_STORE_EOI) + data->flags |= XIVE_IRQ_FLAG_STORE_EOI; + if (flags & XIVE_SRC_LSI) + data->flags |= XIVE_IRQ_FLAG_LSI; + data->eoi_page = eoi_page; + data->esb_shift = esb_shift; + data->trig_page = trig_page; + + /* + * No chip-id for the sPAPR backend. This has an impact how we + * pick a target. See xive_pick_irq_target(). + */ + data->src_chip = XIVE_INVALID_CHIP_ID; + + data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); + if (!data->eoi_mmio) { + pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); + return -ENOMEM; + } + + data->hw_irq = hw_irq; + + /* Full function page supports trigger */ + if (flags & XIVE_SRC_TRIGGER) { + data->trig_mmio = data->eoi_mmio; + return 0; + } + + data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); + if (!data->trig_mmio) { + pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); + return -ENOMEM; + } + return 0; +} + +static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) +{ + long rc; + + rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target, + prio, sw_irq); + + return rc == 0 ? 0 : -ENXIO; +} + +/* This can be called multiple time to change a queue configuration */ +static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, + __be32 *qpage, u32 order) +{ + s64 rc = 0; + unsigned long esn_page; + unsigned long esn_size; + u64 flags, qpage_phys; + + /* If there's an actual queue page, clean it */ + if (order) { + if (WARN_ON(!qpage)) + return -EINVAL; + qpage_phys = __pa(qpage); + } else { + qpage_phys = 0; + } + + /* Initialize the rest of the fields */ + q->msk = order ? ((1u << (order - 2)) - 1) : 0; + q->idx = 0; + q->toggle = 0; + + rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); + if (rc) { + pr_err("Error %lld getting queue info prio %d\n", rc, prio); + rc = -EIO; + goto fail; + } + + /* TODO: add support for the notification page */ + q->eoi_phys = esn_page; + + /* Default is to always notify */ + flags = XIVE_EQ_ALWAYS_NOTIFY; + + /* Configure and enable the queue in HW */ + rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); + if (rc) { + pr_err("Error %lld setting queue for prio %d\n", rc, prio); + rc = -EIO; + } else { + q->qpage = qpage; + } +fail: + return rc; +} + +static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, + u8 prio) +{ + struct xive_q *q = &xc->queue[prio]; + __be32 *qpage; + + qpage = xive_queue_page_alloc(cpu, xive_queue_shift); + if (IS_ERR(qpage)) + return PTR_ERR(qpage); + + return xive_spapr_configure_queue(cpu, q, prio, qpage, + xive_queue_shift); +} + +static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, + u8 prio) +{ + struct xive_q *q = &xc->queue[prio]; + unsigned int alloc_order; + long rc; + + rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); + if (rc) + pr_err("Error %ld setting queue for prio %d\n", rc, prio); + + alloc_order = xive_alloc_order(xive_queue_shift); + free_pages((unsigned long)q->qpage, alloc_order); + q->qpage = NULL; +} + +static bool xive_spapr_match(struct device_node *node) +{ + /* Ignore cascaded controllers for the moment */ + return 1; +} + +#ifdef CONFIG_SMP +static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) +{ + int irq = xive_irq_bitmap_alloc(); + + if (irq < 0) { + pr_err("Failed to allocate IPI on CPU %d\n", cpu); + return -ENXIO; + } + + xc->hw_ipi = irq; + return 0; +} + +static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) +{ + if (!xc->hw_ipi) + return; + + xive_irq_bitmap_free(xc->hw_ipi); + xc->hw_ipi = 0; +} +#endif /* CONFIG_SMP */ + +static void xive_spapr_shutdown(void) +{ + long rc; + + rc = plpar_hcall_norets(H_INT_RESET, 0); + if (rc) + pr_err("H_INT_RESET failed %ld\n", rc); +} + +/* + * Perform an "ack" cycle on the current thread. Grab the pending + * active priorities and update the CPPR to the most favored one. + */ +static void xive_spapr_update_pending(struct xive_cpu *xc) +{ + u8 nsr, cppr; + u16 ack; + + /* + * Perform the "Acknowledge O/S to Register" cycle. + * + * Let's speedup the access to the TIMA using the raw I/O + * accessor as we don't need the synchronisation routine of + * the higher level ones + */ + ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG)); + + /* Synchronize subsequent queue accesses */ + mb(); + + /* + * Grab the CPPR and the "NSR" field which indicates the source + * of the interrupt (if any) + */ + cppr = ack & 0xff; + nsr = ack >> 8; + + if (nsr & TM_QW1_NSR_EO) { + if (cppr == 0xff) + return; + /* Mark the priority pending */ + xc->pending_prio |= 1 << cppr; + + /* + * A new interrupt should never have a CPPR less favored + * than our current one. + */ + if (cppr >= xc->cppr) + pr_err("CPU %d odd ack CPPR, got %d at %d\n", + smp_processor_id(), cppr, xc->cppr); + + /* Update our idea of what the CPPR is */ + xc->cppr = cppr; + } +} + +static void xive_spapr_eoi(u32 hw_irq) +{ + /* Not used */; +} + +static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc) +{ + /* Only some debug on the TIMA settings */ + pr_debug("(HW value: %08x %08x %08x)\n", + in_be32(xive_tima + TM_QW1_OS + TM_WORD0), + in_be32(xive_tima + TM_QW1_OS + TM_WORD1), + in_be32(xive_tima + TM_QW1_OS + TM_WORD2)); +} + +static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) +{ + /* Nothing to do */; +} + +static void xive_spapr_sync_source(u32 hw_irq) +{ + /* Specs are unclear on what this is doing */ + plpar_int_sync(0, hw_irq); +} + +static const struct xive_ops xive_spapr_ops = { + .populate_irq_data = xive_spapr_populate_irq_data, + .configure_irq = xive_spapr_configure_irq, + .setup_queue = xive_spapr_setup_queue, + .cleanup_queue = xive_spapr_cleanup_queue, + .match = xive_spapr_match, + .shutdown = xive_spapr_shutdown, + .update_pending = xive_spapr_update_pending, + .eoi = xive_spapr_eoi, + .setup_cpu = xive_spapr_setup_cpu, + .teardown_cpu = xive_spapr_teardown_cpu, + .sync_source = xive_spapr_sync_source, + .esb_rw = xive_spapr_esb_rw, +#ifdef CONFIG_SMP + .get_ipi = xive_spapr_get_ipi, + .put_ipi = xive_spapr_put_ipi, +#endif /* CONFIG_SMP */ + .name = "spapr", +}; + +/* + * get max priority from "/ibm,plat-res-int-priorities" + */ +static bool xive_get_max_prio(u8 *max_prio) +{ + struct device_node *rootdn; + const __be32 *reg; + u32 len; + int prio, found; + + rootdn = of_find_node_by_path("/"); + if (!rootdn) { + pr_err("not root node found !\n"); + return false; + } + + reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); + if (!reg) { + pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); + return false; + } + + if (len % (2 * sizeof(u32)) != 0) { + pr_err("invalid 'ibm,plat-res-int-priorities' property\n"); + return false; + } + + /* HW supports priorities in the range [0-7] and 0xFF is a + * wildcard priority used to mask. We scan the ranges reserved + * by the hypervisor to find the lowest priority we can use. + */ + found = 0xFF; + for (prio = 0; prio < 8; prio++) { + int reserved = 0; + int i; + + for (i = 0; i < len / (2 * sizeof(u32)); i++) { + int base = be32_to_cpu(reg[2 * i]); + int range = be32_to_cpu(reg[2 * i + 1]); + + if (prio >= base && prio < base + range) + reserved++; + } + + if (!reserved) + found = prio; + } + + if (found == 0xFF) { + pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n"); + return false; + } + + *max_prio = found; + return true; +} + +bool __init xive_spapr_init(void) +{ + struct device_node *np; + struct resource r; + void __iomem *tima; + struct property *prop; + u8 max_prio; + u32 val; + u32 len; + const __be32 *reg; + int i; + + if (xive_cmdline_disabled) + return false; + + pr_devel("%s()\n", __func__); + np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe"); + if (!np) { + pr_devel("not found !\n"); + return false; + } + pr_devel("Found %s\n", np->full_name); + + /* Resource 1 is the OS ring TIMA */ + if (of_address_to_resource(np, 1, &r)) { + pr_err("Failed to get thread mgmnt area resource\n"); + return false; + } + tima = ioremap(r.start, resource_size(&r)); + if (!tima) { + pr_err("Failed to map thread mgmnt area\n"); + return false; + } + + if (!xive_get_max_prio(&max_prio)) + return false; + + /* Feed the IRQ number allocator with the ranges given in the DT */ + reg = of_get_property(np, "ibm,xive-lisn-ranges", &len); + if (!reg) { + pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n"); + return false; + } + + if (len % (2 * sizeof(u32)) != 0) { + pr_err("invalid 'ibm,xive-lisn-ranges' property\n"); + return false; + } + + for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) + xive_irq_bitmap_add(be32_to_cpu(reg[0]), + be32_to_cpu(reg[1])); + + /* Iterate the EQ sizes and pick one */ + of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) { + xive_queue_shift = val; + if (val == PAGE_SHIFT) + break; + } + + /* Initialize XIVE core with our backend */ + if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio)) + return false; + + pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); + return true; +} diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index d07ef2d29caf..f34abed0c05f 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -47,6 +47,7 @@ struct xive_ops { void (*update_pending)(struct xive_cpu *xc); void (*eoi)(u32 hw_irq); void (*sync_source)(u32 hw_irq); + u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write); #ifdef CONFIG_SMP int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc); void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc); @@ -56,6 +57,12 @@ struct xive_ops { bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, u8 max_prio); +__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift); + +static inline u32 xive_alloc_order(u32 queue_shift) +{ + return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0; +} extern bool xive_cmdline_disabled; diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 0b2f771593eb..1dd88315cff4 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -5,6 +5,10 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror GCOV_PROFILE := n UBSAN_SANITIZE := n +# Disable ftrace for the entire directory +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))) + ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y += xmon.o nonstdio.o spr_access.o diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 08e367e3e8c3..33351c6704b1 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -89,6 +89,7 @@ static unsigned long nidump = 16; static unsigned long ncsum = 4096; static int termch; static char tmpstr[128]; +static int tracing_enabled; static long bus_error_jmp[JMP_BUF_LEN]; static int catch_memory_errors; @@ -234,6 +235,7 @@ Commands:\n\ "\ dr dump stream of raw bytes\n\ dt dump the tracing buffers (uses printk)\n\ + dtc dump the tracing buffers for current CPU (uses printk)\n\ " #ifdef CONFIG_PPC_POWERNV " dx# dump xive on CPU #\n\ @@ -461,6 +463,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi) local_irq_save(flags); hard_irq_disable(); + tracing_enabled = tracing_is_on(); + tracing_off(); + bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { regs->nip = bp->address + offset; @@ -981,6 +986,8 @@ cmds(struct pt_regs *excp) break; case 'x': case 'X': + if (tracing_enabled) + tracing_on(); return cmd; case EOF: printf(" \n"); @@ -1732,23 +1739,25 @@ static void dump_206_sprs(void) /* Actually some of these pre-date 2.06, but whatevs */ - printf("srr0 = %.16x srr1 = %.16x dsisr = %.8x\n", + printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8x\n", mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR)); - printf("dscr = %.16x ppr = %.16x pir = %.8x\n", + printf("dscr = %.16lx ppr = %.16lx pir = %.8x\n", mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR)); + printf("amr = %.16lx uamor = %.16lx\n", + mfspr(SPRN_AMR), mfspr(SPRN_UAMOR)); if (!(mfmsr() & MSR_HV)) return; - printf("sdr1 = %.16x hdar = %.16x hdsisr = %.8x\n", + printf("sdr1 = %.16lx hdar = %.16lx hdsisr = %.8x\n", mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR)); - printf("hsrr0 = %.16x hsrr1 = %.16x hdec = %.8x\n", + printf("hsrr0 = %.16lx hsrr1 = %.16lx hdec = %.16lx\n", mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC)); - printf("lpcr = %.16x pcr = %.16x lpidr = %.8x\n", + printf("lpcr = %.16lx pcr = %.16lx lpidr = %.8x\n", mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID)); - printf("hsprg0 = %.16x hsprg1 = %.16x\n", - mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1)); - printf("dabr = %.16x dabrx = %.16x\n", + printf("hsprg0 = %.16lx hsprg1 = %.16lx amor = %.16lx\n", + mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1), mfspr(SPRN_AMOR)); + printf("dabr = %.16lx dabrx = %.16lx\n", mfspr(SPRN_DABR), mfspr(SPRN_DABRX)); #endif } @@ -1761,42 +1770,65 @@ static void dump_207_sprs(void) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return; - printf("dpdes = %.16x tir = %.16x cir = %.8x\n", + printf("dpdes = %.16lx tir = %.16lx cir = %.8x\n", mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR)); - printf("fscr = %.16x tar = %.16x pspb = %.8x\n", + printf("fscr = %.16lx tar = %.16lx pspb = %.8x\n", mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB)); msr = mfmsr(); if (msr & MSR_TM) { /* Only if TM has been enabled in the kernel */ - printf("tfhar = %.16x tfiar = %.16x texasr = %.16x\n", + printf("tfhar = %.16lx tfiar = %.16lx texasr = %.16lx\n", mfspr(SPRN_TFHAR), mfspr(SPRN_TFIAR), mfspr(SPRN_TEXASR)); } - printf("mmcr0 = %.16x mmcr1 = %.16x mmcr2 = %.16x\n", + printf("mmcr0 = %.16lx mmcr1 = %.16lx mmcr2 = %.16lx\n", mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2)); printf("pmc1 = %.8x pmc2 = %.8x pmc3 = %.8x pmc4 = %.8x\n", mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), mfspr(SPRN_PMC3), mfspr(SPRN_PMC4)); - printf("mmcra = %.16x siar = %.16x pmc5 = %.8x\n", + printf("mmcra = %.16lx siar = %.16lx pmc5 = %.8x\n", mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5)); - printf("sdar = %.16x sier = %.16x pmc6 = %.8x\n", + printf("sdar = %.16lx sier = %.16lx pmc6 = %.8x\n", mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6)); - printf("ebbhr = %.16x ebbrr = %.16x bescr = %.16x\n", + printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n", mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); + printf("iamr = %.16lx\n", mfspr(SPRN_IAMR)); if (!(msr & MSR_HV)) return; - printf("hfscr = %.16x dhdes = %.16x rpr = %.16x\n", + printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n", mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR)); - printf("dawr = %.16x dawrx = %.16x ciabr = %.16x\n", + printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n", mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR)); #endif } +static void dump_300_sprs(void) +{ +#ifdef CONFIG_PPC64 + bool hv = mfmsr() & MSR_HV; + + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return; + + printf("pidr = %.16lx tidr = %.16lx\n", + mfspr(SPRN_PID), mfspr(SPRN_TIDR)); + printf("asdr = %.16lx psscr = %.16lx\n", + mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR) + : mfspr(SPRN_PSSCR_PR)); + + if (!hv) + return; + + printf("ptcr = %.16lx\n", + mfspr(SPRN_PTCR)); +#endif +} + static void dump_one_spr(int spr, bool show_unimplemented) { unsigned long val; @@ -1850,6 +1882,7 @@ static void super_regs(void) dump_206_sprs(); dump_207_sprs(); + dump_300_sprs(); return; } @@ -2231,6 +2264,17 @@ static void xmon_rawdump (unsigned long adrs, long ndump) printf("\n"); } +static void dump_tracing(void) +{ + int c; + + c = inchar(); + if (c == 'c') + ftrace_dump(DUMP_ORIG); + else + ftrace_dump(DUMP_ALL); +} + #ifdef CONFIG_PPC64 static void dump_one_paca(int cpu) { @@ -2507,6 +2551,11 @@ dump(void) } #endif + if (c == 't') { + dump_tracing(); + return; + } + if (c == '\n') termch = c; @@ -2525,9 +2574,6 @@ dump(void) dump_log_buf(); } else if (c == 'o') { dump_opal_msglog(); - } else if (c == 't') { - ftrace_dump(DUMP_ALL); - tracing_on(); } else if (c == 'r') { scanhex(&ndump); if (ndump == 0) diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h new file mode 100644 index 000000000000..c02f4aba88a6 --- /dev/null +++ b/arch/s390/include/asm/ap.h @@ -0,0 +1,126 @@ +/* + * Adjunct processor (AP) interfaces + * + * Copyright IBM Corp. 2017 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Tony Krowiak + * Martin Schwidefsky + * Harald Freudenberger + */ + +#ifndef _ASM_S390_AP_H_ +#define _ASM_S390_AP_H_ + +/** + * The ap_qid_t identifier of an ap queue. + * If the AP facilities test (APFT) facility is available, + * card and queue index are 8 bit values, otherwise + * card index is 6 bit and queue index a 4 bit value. + */ +typedef unsigned int ap_qid_t; + +#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) +#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) +#define AP_QID_QUEUE(_qid) ((_qid) & 255) + +/** + * struct ap_queue_status - Holds the AP queue status. + * @queue_empty: Shows if queue is empty + * @replies_waiting: Waiting replies + * @queue_full: Is 1 if the queue is full + * @irq_enabled: Shows if interrupts are enabled for the AP + * @response_code: Holds the 8 bit response code + * + * The ap queue status word is returned by all three AP functions + * (PQAP, NQAP and DQAP). There's a set of flags in the first + * byte, followed by a 1 byte response code. + */ +struct ap_queue_status { + unsigned int queue_empty : 1; + unsigned int replies_waiting : 1; + unsigned int queue_full : 1; + unsigned int _pad1 : 4; + unsigned int irq_enabled : 1; + unsigned int response_code : 8; + unsigned int _pad2 : 16; +}; + +/** + * ap_test_queue(): Test adjunct processor queue. + * @qid: The AP queue number + * @tbit: Test facilities bit + * @info: Pointer to queue descriptor + * + * Returns AP queue status structure. + */ +struct ap_queue_status ap_test_queue(ap_qid_t qid, + int tbit, + unsigned long *info); + +struct ap_config_info { + unsigned int apsc : 1; /* S bit */ + unsigned int apxa : 1; /* N bit */ + unsigned int qact : 1; /* C bit */ + unsigned int rc8a : 1; /* R bit */ + unsigned char _reserved1 : 4; + unsigned char _reserved2[3]; + unsigned char Na; /* max # of APs - 1 */ + unsigned char Nd; /* max # of Domains - 1 */ + unsigned char _reserved3[10]; + unsigned int apm[8]; /* AP ID mask */ + unsigned int aqm[8]; /* AP queue mask */ + unsigned int adm[8]; /* AP domain mask */ + unsigned char _reserved4[16]; +} __aligned(8); + +/* + * ap_query_configuration(): Fetch cryptographic config info + * + * Returns the ap configuration info fetched via PQAP(QCI). + * On success 0 is returned, on failure a negative errno + * is returned, e.g. if the PQAP(QCI) instruction is not + * available, the return value will be -EOPNOTSUPP. + */ +int ap_query_configuration(struct ap_config_info *info); + +/* + * struct ap_qirq_ctrl - convenient struct for easy invocation + * of the ap_queue_irq_ctrl() function. This struct is passed + * as GR1 parameter to the PQAP(AQIC) instruction. For details + * please see the AR documentation. + */ +struct ap_qirq_ctrl { + unsigned int _res1 : 8; + unsigned int zone : 8; /* zone info */ + unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */ + unsigned int _res2 : 4; + unsigned int gisc : 3; /* guest isc field */ + unsigned int _res3 : 6; + unsigned int gf : 2; /* gisa format */ + unsigned int _res4 : 1; + unsigned int gisa : 27; /* gisa origin */ + unsigned int _res5 : 1; + unsigned int isc : 3; /* irq sub class */ +}; + +/** + * ap_queue_irq_ctrl(): Control interruption on a AP queue. + * @qid: The AP queue number + * @qirqctrl: struct ap_qirq_ctrl, see above + * @ind: The notification indicator byte + * + * Returns AP queue status. + * + * Control interruption on the given AP queue. + * Just a simple wrapper function for the low level PQAP(AQIC) + * instruction available for other kernel modules. + */ +struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid, + struct ap_qirq_ctrl qirqctrl, + void *ind); + +#endif /* _ASM_S390_AP_H_ */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a409d5991934..51375e766e90 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -226,7 +226,9 @@ struct kvm_s390_sie_block { #define ECB3_RI 0x01 __u8 ecb3; /* 0x0063 */ __u32 scaol; /* 0x0064 */ - __u8 reserved68[4]; /* 0x0068 */ + __u8 reserved68; /* 0x0068 */ + __u8 epdx; /* 0x0069 */ + __u8 reserved6a[2]; /* 0x006a */ __u32 todpr; /* 0x006c */ __u8 reserved70[16]; /* 0x0070 */ __u64 mso; /* 0x0080 */ @@ -265,6 +267,7 @@ struct kvm_s390_sie_block { __u64 cbrlo; /* 0x01b8 */ __u8 reserved1c0[8]; /* 0x01c0 */ #define ECD_HOSTREGMGMT 0x20000000 +#define ECD_MEF 0x08000000 __u32 ecd; /* 0x01c8 */ __u8 reserved1cc[18]; /* 0x01cc */ __u64 pp; /* 0x01de */ @@ -739,6 +742,7 @@ struct kvm_arch{ struct kvm_s390_cpu_model model; struct kvm_s390_crypto crypto; struct kvm_s390_vsie vsie; + u8 epdx; u64 epoch; struct kvm_s390_migration_state *migration_state; /* subset of available cpu features enabled by user space */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index bd6f30304518..3f46a6577b8d 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -5,12 +5,11 @@ #include typedef struct { + spinlock_t lock; cpumask_t cpu_attach_mask; atomic_t flush_count; unsigned int flush_mm; - spinlock_t pgtable_lock; struct list_head pgtable_list; - spinlock_t gmap_lock; struct list_head gmap_list; unsigned long gmap_asce; unsigned long asce; @@ -27,10 +26,8 @@ typedef struct { } mm_context_t; #define INIT_MM_CONTEXT(name) \ - .context.pgtable_lock = \ - __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ + .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ - .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), static inline int tprot(unsigned long addr) diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 72e9ca83a668..3c9abedc323c 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -17,9 +17,8 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - spin_lock_init(&mm->context.pgtable_lock); + spin_lock_init(&mm->context.lock); INIT_LIST_HEAD(&mm->context.pgtable_list); - spin_lock_init(&mm->context.gmap_lock); INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.flush_count, 0); @@ -103,7 +102,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev == next) return; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - cpumask_set_cpu(cpu, mm_cpumask(next)); /* Clear old ASCE by loading the kernel ASCE. */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); @@ -121,9 +119,8 @@ static inline void finish_arch_post_lock_switch(void) preempt_disable(); while (atomic_read(&mm->context.flush_count)) cpu_relax(); - - if (mm->context.flush_mm) - __tlb_flush_mm(mm); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + __tlb_flush_mm_lazy(mm); preempt_enable(); } set_fs(current->thread.mm_segment); @@ -136,6 +133,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { switch_mm(prev, next, current); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); set_user_asce(next); } diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h index ca21b28a7b17..22b0f49e87c1 100644 --- a/arch/s390/include/asm/page-states.h +++ b/arch/s390/include/asm/page-states.h @@ -15,6 +15,6 @@ #define ESSA_SET_STABLE_IF_RESIDENT 6 #define ESSA_SET_STABLE_NODAT 7 -#define ESSA_MAX ESSA_SET_STABLE_IF_RESIDENT +#define ESSA_MAX ESSA_SET_STABLE_NODAT #endif diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index f36b4b726057..386df9adef0a 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -122,6 +123,8 @@ struct zpci_dev { unsigned long iommu_pages; unsigned int next_bit; + struct iommu_device iommu_dev; /* IOMMU core handle */ + char res_name[16]; struct zpci_bar_struct bars[PCI_BAR_COUNT]; @@ -174,6 +177,10 @@ int clp_enable_fh(struct zpci_dev *, u8); int clp_disable_fh(struct zpci_dev *); int clp_get_state(u32 fid, enum zpci_state *state); +/* IOMMU Interface */ +int zpci_init_iommu(struct zpci_dev *zdev); +void zpci_destroy_iommu(struct zpci_dev *zdev); + #ifdef CONFIG_PCI /* Error handling and recovery */ void zpci_event_error(void *); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index dce708e061ea..20e75a2ca93a 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); + pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); + + pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); } #define __HAVE_ARCH_PMDP_SET_WRPROTECT diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 4d759f8f4bc7..b08d5bc2666e 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void) * Flush TLB entries for a specific mm on all CPUs (in case gmap is used * this implicates multiple ASCEs!). */ -static inline void __tlb_flush_full(struct mm_struct *mm) -{ - preempt_disable(); - atomic_inc(&mm->context.flush_count); - if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { - /* Local TLB flush */ - __tlb_flush_local(); - } else { - /* Global TLB flush */ - __tlb_flush_global(); - /* Reset TLB flush mask */ - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); - } - atomic_dec(&mm->context.flush_count); - preempt_enable(); -} - static inline void __tlb_flush_mm(struct mm_struct *mm) { unsigned long gmap_asce; @@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) */ preempt_disable(); atomic_inc(&mm->context.flush_count); + /* Reset TLB flush mask */ + cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); + barrier(); gmap_asce = READ_ONCE(mm->context.gmap_asce); if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { if (gmap_asce) __tlb_flush_idte(gmap_asce); __tlb_flush_idte(mm->context.asce); } else { - __tlb_flush_full(mm); + /* Global TLB flush */ + __tlb_flush_global(); } - /* Reset TLB flush mask */ - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); atomic_dec(&mm->context.flush_count); preempt_enable(); } @@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void) } #else #define __tlb_flush_global() __tlb_flush_local() -#define __tlb_flush_full(mm) __tlb_flush_local() /* * Flush TLB entries for a specific ASCE on all CPUs. @@ -117,10 +101,12 @@ static inline void __tlb_flush_kernel(void) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) { + spin_lock(&mm->context.lock); if (mm->context.flush_mm) { - __tlb_flush_mm(mm); mm->context.flush_mm = 0; + __tlb_flush_mm(mm); } + spin_unlock(&mm->context.lock); } /* diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 69d09c39bbcd..cd7359e23d86 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -88,6 +88,12 @@ struct kvm_s390_io_adapter_req { /* kvm attributes for KVM_S390_VM_TOD */ #define KVM_S390_VM_TOD_LOW 0 #define KVM_S390_VM_TOD_HIGH 1 +#define KVM_S390_VM_TOD_EXT 2 + +struct kvm_s390_vm_tod_clock { + __u8 epoch_idx; + __u64 tod; +}; /* kvm attributes for KVM_S390_VM_CPU_MODEL */ /* processor related attributes are r/w */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 52a63f4175cb..a56916c83565 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -108,4 +108,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index c620049c61f2..f549c4657376 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -75,35 +75,34 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (from->si_code >> 16) { - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_RT: err |= __put_user(from->si_int, &to->si_int); /* fallthrough */ - case __SI_KILL >> 16: + case SIL_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __put_user((unsigned long) from->si_addr, &to->si_addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_int, &to->si_int); @@ -127,32 +126,31 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) if (to->si_code < 0) err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (to->si_code >> 16) { - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + switch (siginfo_layout(to->si_signo, to->si_code)) { + case SIL_RT: err |= __get_user(to->si_int, &from->si_int); /* fallthrough */ - case __SI_KILL >> 16: + case SIL_KILL: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(to->si_utime, &from->si_utime); err |= __get_user(to->si_stime, &from->si_stime); err |= __get_user(to->si_status, &from->si_status); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __get_user(tmp, &from->si_addr); to->si_addr = (void __force __user *) (u64) (tmp & PSW32_ADDR_INSN); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __get_user(to->si_band, &from->si_band); err |= __get_user(to->si_fd, &from->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __get_user(to->si_tid, &from->si_tid); err |= __get_user(to->si_overrun, &from->si_overrun); err |= __get_user(to->si_int, &from->si_int); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index ca8cd80e8feb..60181caf8e8a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -404,18 +404,6 @@ static inline void save_vector_registers(void) #endif } -static int __init topology_setup(char *str) -{ - bool enabled; - int rc; - - rc = kstrtobool(str, &enabled); - if (!rc && !enabled) - S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY; - return rc; -} -early_param("topology", topology_setup); - static int __init disable_vector_extension(char *str) { S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c1bf75ffb875..7e1e40323b78 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event) } /* Check online status of the CPU to which the event is pinned */ - if ((unsigned int)event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return -ENODEV; + if (event->cpu >= 0) { + if ((unsigned int)event->cpu >= nr_cpumask_bits) + return -ENODEV; + if (!cpu_online(event->cpu)) + return -ENODEV; + } /* Force reset of idle/hv excludes regardless of what the * user requested. diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bb47c92476f0..ed0bdd220e1a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -8,6 +8,8 @@ #include #include +#include +#include #include #include #include @@ -29,12 +31,20 @@ #define PTF_VERTICAL (1UL) #define PTF_CHECK (2UL) +enum { + TOPOLOGY_MODE_HW, + TOPOLOGY_MODE_SINGLE, + TOPOLOGY_MODE_PACKAGE, + TOPOLOGY_MODE_UNINITIALIZED +}; + struct mask_info { struct mask_info *next; unsigned char id; cpumask_t mask; }; +static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; static void set_topology_timer(void); static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; @@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) cpumask_t mask; cpumask_copy(&mask, cpumask_of(cpu)); - if (!MACHINE_HAS_TOPOLOGY) - return mask; - for (; info; info = info->next) { - if (cpumask_test_cpu(cpu, &info->mask)) - return info->mask; + switch (topology_mode) { + case TOPOLOGY_MODE_HW: + while (info) { + if (cpumask_test_cpu(cpu, &info->mask)) { + mask = info->mask; + break; + } + info = info->next; + } + if (cpumask_empty(&mask)) + cpumask_copy(&mask, cpumask_of(cpu)); + break; + case TOPOLOGY_MODE_PACKAGE: + cpumask_copy(&mask, cpu_present_mask); + break; + default: + /* fallthrough */ + case TOPOLOGY_MODE_SINGLE: + cpumask_copy(&mask, cpumask_of(cpu)); + break; } return mask; } @@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu) int i; cpumask_copy(&mask, cpumask_of(cpu)); - if (!MACHINE_HAS_TOPOLOGY) + if (topology_mode != TOPOLOGY_MODE_HW) return mask; cpu -= cpu % (smp_cpu_mtid + 1); for (i = 0; i <= smp_cpu_mtid; i++) @@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void) { int cpu; - mutex_lock(&smp_cpu_state_mutex); for_each_possible_cpu(cpu) smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); - mutex_unlock(&smp_cpu_state_mutex); } static int ptf(unsigned long fc) @@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc) static void update_cpu_masks(void) { struct cpu_topology_s390 *topo; - int cpu; + int cpu, id; for_each_possible_cpu(cpu) { topo = &cpu_topology[cpu]; @@ -231,12 +254,13 @@ static void update_cpu_masks(void) topo->core_mask = cpu_group_map(&socket_info, cpu); topo->book_mask = cpu_group_map(&book_info, cpu); topo->drawer_mask = cpu_group_map(&drawer_info, cpu); - if (!MACHINE_HAS_TOPOLOGY) { + if (topology_mode != TOPOLOGY_MODE_HW) { + id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; topo->thread_id = cpu; topo->core_id = cpu; - topo->socket_id = cpu; - topo->book_id = cpu; - topo->drawer_id = cpu; + topo->socket_id = id; + topo->book_id = id; + topo->drawer_id = id; if (cpu_present(cpu)) cpumask_set_cpu(cpu, &cpus_with_topology); } @@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void) struct sysinfo_15_1_x *info = tl_info; int rc = 0; + mutex_lock(&smp_cpu_state_mutex); cpumask_clear(&cpus_with_topology); if (MACHINE_HAS_TOPOLOGY) { rc = 1; @@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void) update_cpu_masks(); if (!MACHINE_HAS_TOPOLOGY) topology_update_polarization_simple(); + mutex_unlock(&smp_cpu_state_mutex); return rc; } @@ -289,6 +315,11 @@ void topology_schedule_update(void) schedule_work(&topology_work); } +static void topology_flush_work(void) +{ + flush_work(&topology_work); +} + static void topology_timer_fn(unsigned long ignored) { if (ptf(PTF_CHECK)) @@ -459,6 +490,12 @@ void __init topology_init_early(void) struct sysinfo_15_1_x *info; set_sched_topology(s390_topology); + if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { + if (MACHINE_HAS_TOPOLOGY) + topology_mode = TOPOLOGY_MODE_HW; + else + topology_mode = TOPOLOGY_MODE_SINGLE; + } if (!MACHINE_HAS_TOPOLOGY) goto out; tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); @@ -474,12 +511,97 @@ void __init topology_init_early(void) __arch_update_cpu_topology(); } +static inline int topology_get_mode(int enabled) +{ + if (!enabled) + return TOPOLOGY_MODE_SINGLE; + return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; +} + +static inline int topology_is_enabled(void) +{ + return topology_mode != TOPOLOGY_MODE_SINGLE; +} + +static int __init topology_setup(char *str) +{ + bool enabled; + int rc; + + rc = kstrtobool(str, &enabled); + if (rc) + return rc; + topology_mode = topology_get_mode(enabled); + return 0; +} +early_param("topology", topology_setup); + +static int topology_ctl_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + unsigned int len; + int new_mode; + char buf[2]; + + if (!*lenp || *ppos) { + *lenp = 0; + return 0; + } + if (!write) { + strncpy(buf, topology_is_enabled() ? "1\n" : "0\n", + ARRAY_SIZE(buf)); + len = strnlen(buf, ARRAY_SIZE(buf)); + if (len > *lenp) + len = *lenp; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + goto out; + } + len = *lenp; + if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) + return -EFAULT; + if (buf[0] != '0' && buf[0] != '1') + return -EINVAL; + mutex_lock(&smp_cpu_state_mutex); + new_mode = topology_get_mode(buf[0] == '1'); + if (topology_mode != new_mode) { + topology_mode = new_mode; + topology_schedule_update(); + } + mutex_unlock(&smp_cpu_state_mutex); + topology_flush_work(); +out: + *lenp = len; + *ppos += len; + return 0; +} + +static struct ctl_table topology_ctl_table[] = { + { + .procname = "topology", + .mode = 0644, + .proc_handler = topology_ctl_handler, + }, + { }, +}; + +static struct ctl_table topology_dir_table[] = { + { + .procname = "s390", + .maxlen = 0, + .mode = 0555, + .child = topology_ctl_table, + }, + { }, +}; + static int __init topology_init(void) { if (MACHINE_HAS_TOPOLOGY) set_topology_timer(); else topology_update_polarization_simple(); + register_sysctl_table(topology_dir_table); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index e4d36094aceb..d93a2c0474bf 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); vcpu->stat.diagnose_44++; - kvm_vcpu_on_spin(vcpu); + kvm_vcpu_on_spin(vcpu, true); return 0; } diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index c2e0ddc1356e..bcbd86621d01 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -308,7 +308,7 @@ static inline int in_addr_range(u64 addr, u64 a, u64 b) return (addr >= a) && (addr <= b); else /* "overflowing" interval */ - return (addr <= a) && (addr >= b); + return (addr >= a) || (addr <= b); } #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a619ddae610d..a832ad031cee 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2479,6 +2479,7 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, struct kvm_s390_mchk_info *mchk; union mci mci; __u64 cr14 = 0; /* upper bits are not used */ + int rc; mci.val = mcck_info->mcic; if (mci.sr) @@ -2496,12 +2497,13 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, if (mci.ck) { /* Inject the floating machine check */ inti.type = KVM_S390_MCHK; - WARN_ON_ONCE(__inject_vm(vcpu->kvm, &inti)); + rc = __inject_vm(vcpu->kvm, &inti); } else { /* Inject the machine check to specified vcpu */ irq.type = KVM_S390_MCHK; - WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); + rc = kvm_s390_inject_vcpu(vcpu, &irq); } + WARN_ON_ONCE(rc); } int kvm_set_routing_entry(struct kvm *kvm, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index af09d3437631..40d0a1a97889 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -130,6 +130,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; +struct kvm_s390_tod_clock_ext { + __u8 epoch_idx; + __u64 tod; + __u8 reserved[7]; +} __packed; + /* allow nested virtualization in KVM (if enabled by user space) */ static int nested; module_param(nested, int, S_IRUGO); @@ -874,6 +880,26 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm, return 0; } +static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_tod_clock gtod; + + if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) + return -EFAULT; + + if (test_kvm_facility(kvm, 139)) + kvm_s390_set_tod_clock_ext(kvm, >od); + else if (gtod.epoch_idx == 0) + kvm_s390_set_tod_clock(kvm, gtod.tod); + else + return -EINVAL; + + VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", + gtod.epoch_idx, gtod.tod); + + return 0; +} + static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) { u8 gtod_high; @@ -909,6 +935,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) return -EINVAL; switch (attr->attr) { + case KVM_S390_VM_TOD_EXT: + ret = kvm_s390_set_tod_ext(kvm, attr); + break; case KVM_S390_VM_TOD_HIGH: ret = kvm_s390_set_tod_high(kvm, attr); break; @@ -922,6 +951,43 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } +static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, + struct kvm_s390_vm_tod_clock *gtod) +{ + struct kvm_s390_tod_clock_ext htod; + + preempt_disable(); + + get_tod_clock_ext((char *)&htod); + + gtod->tod = htod.tod + kvm->arch.epoch; + gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; + + if (gtod->tod < htod.tod) + gtod->epoch_idx += 1; + + preempt_enable(); +} + +static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_tod_clock gtod; + + memset(>od, 0, sizeof(gtod)); + + if (test_kvm_facility(kvm, 139)) + kvm_s390_get_tod_clock_ext(kvm, >od); + else + gtod.tod = kvm_s390_get_tod_clock_fast(kvm); + + if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) + return -EFAULT; + + VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", + gtod.epoch_idx, gtod.tod); + return 0; +} + static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) { u8 gtod_high = 0; @@ -954,6 +1020,9 @@ static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) return -EINVAL; switch (attr->attr) { + case KVM_S390_VM_TOD_EXT: + ret = kvm_s390_get_tod_ext(kvm, attr); + break; case KVM_S390_VM_TOD_HIGH: ret = kvm_s390_get_tod_high(kvm, attr); break; @@ -1505,7 +1574,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, if (r < 0) pgstev = 0; /* save the value */ - res[i++] = (pgstev >> 24) & 0x3; + res[i++] = (pgstev >> 24) & 0x43; /* * if the next bit is too far away, stop. * if we reached the previous "next", find the next one @@ -1583,7 +1652,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, pgstev = bits[i]; pgstev = pgstev << 24; - mask &= _PGSTE_GPS_USAGE_MASK; + mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT; set_pgste_bits(kvm->mm, hva, mask, pgstev); } srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -1858,8 +1927,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask, S390_ARCH_FAC_LIST_SIZE_BYTE); + /* we are always in czam mode - even on pre z14 machines */ + set_kvm_facility(kvm->arch.model.fac_mask, 138); + set_kvm_facility(kvm->arch.model.fac_list, 138); + /* we emulate STHYI in kvm */ set_kvm_facility(kvm->arch.model.fac_mask, 74); set_kvm_facility(kvm->arch.model.fac_list, 74); + if (MACHINE_HAS_TLB_GUEST) { + set_kvm_facility(kvm->arch.model.fac_mask, 147); + set_kvm_facility(kvm->arch.model.fac_list, 147); + } kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); kvm->arch.model.ibc = sclp.ibc & 0x0fff; @@ -2369,6 +2446,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= ECA_VX; vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; } + if (test_kvm_facility(vcpu->kvm, 139)) + vcpu->arch.sie_block->ecd |= ECD_MEF; + vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) | SDNXC; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; @@ -2447,6 +2527,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_s390_vcpu_has_irq(vcpu, 0); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); +} + void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); @@ -2855,6 +2940,35 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) return 0; } +void kvm_s390_set_tod_clock_ext(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod) +{ + struct kvm_vcpu *vcpu; + struct kvm_s390_tod_clock_ext htod; + int i; + + mutex_lock(&kvm->lock); + preempt_disable(); + + get_tod_clock_ext((char *)&htod); + + kvm->arch.epoch = gtod->tod - htod.tod; + kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; + + if (kvm->arch.epoch > gtod->tod) + kvm->arch.epdx -= 1; + + kvm_s390_vcpu_block_all(kvm); + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu->arch.sie_block->epoch = kvm->arch.epoch; + vcpu->arch.sie_block->epdx = kvm->arch.epdx; + } + + kvm_s390_vcpu_unblock_all(kvm); + preempt_enable(); + mutex_unlock(&kvm->lock); +} + void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) { struct kvm_vcpu *vcpu; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 6fedc8bc7a37..9f8fdd7b2311 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -272,6 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int handle_sthyi(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ +void kvm_s390_set_tod_clock_ext(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod); void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 785ad028bde6..c954ac49eee4 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -988,6 +988,8 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) if (pgstev & _PGSTE_GPS_ZERO) res |= 1; } + if (pgstev & _PGSTE_GPS_NODAT) + res |= 0x20; vcpu->run->s.regs.gprs[r1] = res; /* * It is possible that all the normal 511 slots were full, in which case @@ -1027,7 +1029,9 @@ static int handle_essa(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); /* Check for invalid operation request code */ orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; - if (orc > ESSA_MAX) + /* ORCs 0-6 are always valid */ + if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT + : ESSA_SET_STABLE_IF_RESIDENT)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (likely(!vcpu->kvm->arch.migration_state)) { diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 1a252f537081..9d592ef4104b 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -155,29 +155,26 @@ static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, return rc; } -static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) +static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, + u64 *status_reg) { - int rc; unsigned int i; struct kvm_vcpu *v; + bool all_stopped = true; - switch (parameter & 0xff) { - case 0: - rc = SIGP_CC_NOT_OPERATIONAL; - break; - case 1: - case 2: - kvm_for_each_vcpu(i, v, vcpu->kvm) { - v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; - kvm_clear_async_pf_completion_queue(v); - } - - rc = SIGP_CC_ORDER_CODE_ACCEPTED; - break; - default: - rc = -EOPNOTSUPP; + kvm_for_each_vcpu(i, v, vcpu->kvm) { + if (v == vcpu) + continue; + if (!is_vcpu_stopped(v)) + all_stopped = false; } - return rc; + + *status_reg &= 0xffffffff00000000UL; + + /* Reject set arch order, with czam we're always in z/Arch mode. */ + *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : + SIGP_STATUS_INCORRECT_STATE); + return SIGP_CC_STATUS_STORED; } static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, @@ -446,7 +443,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) switch (order_code) { case SIGP_SET_ARCHITECTURE: vcpu->stat.instruction_sigp_arch++; - rc = __sigp_set_arch(vcpu, parameter); + rc = __sigp_set_arch(vcpu, parameter, + &vcpu->run->s.regs.gprs[r1]); break; default: rc = handle_sigp_dst(vcpu, order_code, cpu_addr, diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c index a2e5c24f47a7..395926b8c1ed 100644 --- a/arch/s390/kvm/sthyi.c +++ b/arch/s390/kvm/sthyi.c @@ -436,14 +436,6 @@ int handle_sthyi(struct kvm_vcpu *vcpu) if (addr & ~PAGE_MASK) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - /* - * If the page has not yet been faulted in, we want to do that - * now and not after all the expensive calculations. - */ - r = write_guest(vcpu, addr, reg2, &cc, 1); - if (r) - return kvm_s390_inject_prog_cond(vcpu, r); - sctns = (void *)get_zeroed_page(GFP_KERNEL); if (!sctns) return -ENOMEM; diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index ba8203e4d516..b18b5652e5c5 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -349,6 +349,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->eca |= scb_o->eca & ECA_IB; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) scb_s->eca |= scb_o->eca & ECA_CEI; + /* Epoch Extension */ + if (test_kvm_facility(vcpu->kvm, 139)) + scb_s->ecd |= scb_o->ecd & ECD_MEF; prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); @@ -806,8 +809,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; - struct mcck_volatile_info *mcck_info; - struct sie_page *sie_page; int rc; handle_last_fault(vcpu, vsie_page); @@ -831,9 +832,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (rc == -EINTR) { VCPU_EVENT(vcpu, 3, "%s", "machine check"); - sie_page = container_of(scb_s, struct sie_page, sie_block); - mcck_info = &sie_page->mcck_info; - kvm_s390_reinject_machine_check(vcpu, mcck_info); + kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); return 0; } @@ -919,6 +918,13 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu, */ preempt_disable(); scb_s->epoch += vcpu->kvm->arch.epoch; + + if (scb_s->ecd & ECD_MEF) { + scb_s->epdx += vcpu->kvm->arch.epdx; + if (scb_s->epoch < vcpu->kvm->arch.epoch) + scb_s->epdx += 1; + } + preempt_enable(); } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 9e1494e3d849..2f66290c9b92 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) if (!gmap) return NULL; gmap->mm = mm; - spin_lock(&mm->context.gmap_lock); + spin_lock(&mm->context.lock); list_add_rcu(&gmap->list, &mm->context.gmap_list); if (list_is_singular(&mm->context.gmap_list)) gmap_asce = gmap->asce; else gmap_asce = -1UL; WRITE_ONCE(mm->context.gmap_asce, gmap_asce); - spin_unlock(&mm->context.gmap_lock); + spin_unlock(&mm->context.lock); return gmap; } EXPORT_SYMBOL_GPL(gmap_create); @@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap) spin_unlock(&gmap->shadow_lock); } /* Remove gmap from the pre-mm list */ - spin_lock(&gmap->mm->context.gmap_lock); + spin_lock(&gmap->mm->context.lock); list_del_rcu(&gmap->list); if (list_empty(&gmap->mm->context.gmap_list)) gmap_asce = 0; @@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap) else gmap_asce = -1UL; WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce); - spin_unlock(&gmap->mm->context.gmap_lock); + spin_unlock(&gmap->mm->context.lock); synchronize_rcu(); /* Put reference */ gmap_put(gmap); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8ecc25e760fa..98ffe3ee9411 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask, result; struct page *head, *page; + unsigned long mask; int refs; - result = write ? 0 : _SEGMENT_ENTRY_PROTECT; - mask = result | _SEGMENT_ENTRY_INVALID; - if ((pmd_val(pmd) & mask) != result) + mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID; + if ((pmd_val(pmd) & mask) != 0) return 0; VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index c5b74dd61197..05f1f27e6708 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -83,7 +83,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) int rc, notify; /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ - BUG_ON(mm->context.asce_limit < _REGION2_SIZE); + VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); if (end >= TASK_SIZE_MAX) return -ENOMEM; rc = 0; @@ -124,7 +124,7 @@ void crst_table_downgrade(struct mm_struct *mm) pgd_t *pgd; /* downgrade should only happen from 3 to 2 levels (compat only) */ - BUG_ON(mm->context.asce_limit != _REGION2_SIZE); + VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE); if (current->active_mm == mm) { clear_user_asce(); @@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) /* Try to get a fragment of a 4K page as a 2K page table */ if (!mm_alloc_pgste(mm)) { table = NULL; - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); @@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) list_del(&page->lru); } } - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); if (table) return table; } @@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm) /* Return the first 2K fragment of the page */ atomic_set(&page->_mapcount, 1); clear_table(table, _PAGE_INVALID, PAGE_SIZE); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); list_add(&page->lru, &mm->context.pgtable_list); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); } return table; } @@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) if (!mm_alloc_pgste(mm)) { /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_mapcount, 1U << bit); if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); if (mask != 0) return; } @@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, return; } bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); table = (unsigned long *) (__pa(table) | (1U << bit)); tlb_remove_table(tlb, table); } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4198a71b8fdd..ae677f814bc0 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -919,7 +919,7 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, case ESSA_GET_STATE: break; case ESSA_SET_STABLE: - pgstev &= ~_PGSTE_GPS_USAGE_MASK; + pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); pgstev |= _PGSTE_GPS_USAGE_STABLE; break; case ESSA_SET_UNUSED: @@ -965,6 +965,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, pgstev |= _PGSTE_GPS_USAGE_STABLE; } break; + case ESSA_SET_STABLE_NODAT: + pgstev &= ~_PGSTE_GPS_USAGE_MASK; + pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT; + break; default: /* we should never get here! */ break; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 1803797fc885..8ec88497a28d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ mask = 0x2000; /* jh */ goto branch_ks; + case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ + mask = 0x4000; /* jl */ + goto branch_ks; case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ mask = 0xa000; /* jhe */ goto branch_ks; + case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ + mask = 0xc000; /* jle */ + goto branch_ks; case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ mask = 0x2000; /* jh */ goto branch_ku; + case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ + mask = 0x4000; /* jl */ + goto branch_ku; case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ mask = 0xa000; /* jhe */ goto branch_ku; + case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ + mask = 0xc000; /* jle */ + goto branch_ku; case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ mask = 0x7000; /* jne */ goto branch_ku; @@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ mask = 0x2000; /* jh */ goto branch_xs; + case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ + mask = 0x4000; /* jl */ + goto branch_xs; case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ mask = 0xa000; /* jhe */ goto branch_xs; + case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ + mask = 0xc000; /* jle */ + goto branch_xs; case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ mask = 0x2000; /* jh */ goto branch_xu; + case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ + mask = 0x4000; /* jl */ + goto branch_xu; case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ mask = 0xa000; /* jhe */ goto branch_xu; + case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ + mask = 0xc000; /* jle */ + goto branch_xu; case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ mask = 0x7000; /* jne */ goto branch_xu; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 7b30af5da222..a25d95a6612d 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -262,10 +262,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) return rc; } -void pcibios_fixup_bus(struct pci_bus *bus) -{ -} - resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) @@ -776,6 +772,7 @@ void pcibios_remove_bus(struct pci_bus *bus) zpci_exit_slot(zdev); zpci_cleanup_bus_resources(zdev); + zpci_destroy_iommu(zdev); zpci_free_domain(zdev); spin_lock(&zpci_list_lock); @@ -848,11 +845,15 @@ int zpci_create_device(struct zpci_dev *zdev) if (rc) goto out; + rc = zpci_init_iommu(zdev); + if (rc) + goto out_free; + mutex_init(&zdev->lock); if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { rc = zpci_enable_device(zdev); if (rc) - goto out_free; + goto out_destroy_iommu; } rc = zpci_scan_bus(zdev); if (rc) @@ -869,6 +870,8 @@ int zpci_create_device(struct zpci_dev *zdev) out_disable: if (zdev->state == ZPCI_FN_STATE_ONLINE) zpci_disable_device(zdev); +out_destroy_iommu: + zpci_destroy_iommu(zdev); out_free: zpci_free_domain(zdev); out: diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c index 29d72bf8ed2b..70dd8f17d054 100644 --- a/arch/s390/tools/gen_facilities.c +++ b/arch/s390/tools/gen_facilities.c @@ -83,6 +83,7 @@ static struct facility_def facility_defs[] = { 78, /* enhanced-DAT 2 */ 130, /* instruction-execution-protection */ 131, /* enhanced-SOP 2 and side-effect */ + 139, /* multiple epoch facility */ 146, /* msa extension 8 */ -1 /* END */ } diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 5de60a77eaa1..0bcbe58b11e9 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig index e5335123b5e9..72b72e50a92e 100644 --- a/arch/sh/configs/ap325rxa_defconfig +++ b/arch/sh/configs/ap325rxa_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -28,14 +27,10 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -51,8 +46,6 @@ CONFIG_NETDEVICES=y CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -82,7 +75,6 @@ CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y CONFIG_MMC_SPI=y @@ -110,8 +102,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig index 6cb327977d13..4710df43a5b5 100644 --- a/arch/sh/configs/apsh4a3a_defconfig +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -28,15 +27,11 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -46,8 +41,6 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -66,7 +59,6 @@ CONFIG_FONTS=y CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_LOGO=y -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y @@ -96,7 +88,6 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_FTRACE is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index fe45d2c9b151..825c641726c4 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -53,7 +52,6 @@ CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_NET_KEY=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -70,8 +68,6 @@ CONFIG_NETDEVICES=y CONFIG_MDIO_BITBANG=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set @@ -83,7 +79,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_FB=y CONFIG_FB_SH7785FB=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -124,6 +119,5 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_VM=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_DWARF_UNWINDER=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig index 67e150631ea5..5a90e24aa8a6 100644 --- a/arch/sh/configs/cayman_defconfig +++ b/arch/sh/configs/cayman_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set @@ -19,7 +18,6 @@ CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set @@ -38,7 +36,6 @@ CONFIG_NET_ETHERNET=y CONFIG_HW_RANDOM=y CONFIG_I2C=m CONFIG_WATCHDOG=y -CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y @@ -67,5 +64,4 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index ec243ca29529..3f08dc54480b 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 @@ -32,7 +31,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_STANDALONE is not set @@ -43,8 +41,6 @@ CONFIG_NET_ETHERNET=y CONFIG_NET_PCI=y CONFIG_8139TOO=y # CONFIG_8139TOO_PIO is not set -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_MAPLE=y # CONFIG_MOUSE_PS2 is not set @@ -56,7 +52,6 @@ CONFIG_HW_RANDOM=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_SH_WDT=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_PVR2=y @@ -74,5 +69,4 @@ CONFIG_LOGO=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig index 5fcb17bff24a..0c5dfccbfe37 100644 --- a/arch/sh/configs/ecovec24-romimage_defconfig +++ b/arch/sh/configs/ecovec24-romimage_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -26,19 +25,15 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -# CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -51,7 +46,6 @@ CONFIG_I2C=y CONFIG_I2C_SH_MOBILE=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set CONFIG_USB=y CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y @@ -64,4 +58,3 @@ CONFIG_TMPFS=y # CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig index 0b364e3b0ff8..3568310c2c2f 100644 --- a/arch/sh/configs/ecovec24_defconfig +++ b/arch/sh/configs/ecovec24_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -29,16 +28,12 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_IRDA=y CONFIG_SH_SIR=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -53,8 +48,6 @@ CONFIG_NETDEVICES=y CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -140,8 +133,6 @@ CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index 41fa3a7eed96..db756e099052 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_SUBTYPE_SH7705=y CONFIG_SH_EDOSK7705=y CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_PREVENT_FIRMWARE_BUILD is not set -# CONFIG_MISC_DEVICES is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -35,5 +34,4 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_SYSFS is not set # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index e1077a041ac3..aab4ff1e247c 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="_edosk7760" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -31,7 +30,6 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set @@ -39,10 +37,7 @@ CONFIG_DEBUG_DRIVER=y CONFIG_DEBUG_DEVRES=y CONFIG_MTD=y CONFIG_MTD_DEBUG=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_JEDECPROBE=y @@ -62,12 +57,9 @@ CONFIG_MTD_ABSENT=y CONFIG_MTD_PHYSMAP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=26000 -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -92,7 +84,6 @@ CONFIG_SND=y # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_SOC=y -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y @@ -119,8 +110,6 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_TIMER_STATS=y CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index 67cb1094a033..2985fe7c6d50 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -26,13 +25,10 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_JEDECPROBE=y @@ -43,14 +39,11 @@ CONFIG_MTD_CFI_GEOMETRY=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=y -# CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -65,7 +58,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y CONFIG_FB_SH7760=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y -# CONFIG_HID_SUPPORT is not set CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y @@ -73,7 +65,6 @@ CONFIG_USB_STORAGE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_AUTOFS_FS=y CONFIG_AUTOFS4_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y @@ -123,6 +114,5 @@ CONFIG_NLS_UTF8=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig index 496edcdf95a3..4dcf7f552582 100644 --- a/arch/sh/configs/hp6xx_defconfig +++ b/arch/sh/configs/hp6xx_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -37,7 +36,6 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3 CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_LEGACY_PTY_COUNT=64 # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_HIT=y @@ -46,7 +44,6 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FONTS=y CONFIG_FONT_PEARL_8x8=y -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y @@ -55,7 +52,6 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_NLS_CODEPAGE_850=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_ECB=y diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig index 029a506ca325..9cc37f29e3b4 100644 --- a/arch/sh/configs/kfr2r09-romimage_defconfig +++ b/arch/sh/configs/kfr2r09-romimage_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -26,12 +25,10 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -# CONFIG_MISC_DEVICES is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -44,7 +41,6 @@ CONFIG_I2C=y CONFIG_I2C_SH_MOBILE=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y CONFIG_USB_CDC_COMPOSITE=y # CONFIG_DNOTIFY is not set @@ -55,5 +51,4 @@ CONFIG_TMPFS=y # CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig index fac13ded07b2..46693d033644 100644 --- a/arch/sh/configs/kfr2r09_defconfig +++ b/arch/sh/configs/kfr2r09_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -33,15 +32,12 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y @@ -49,7 +45,6 @@ CONFIG_MTD_PHYSMAP=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_GENERIC=y CONFIG_MTD_UBI=y -# CONFIG_MISC_DEVICES is not set # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -77,7 +72,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set # CONFIG_LOGO_SUPERH_MONO is not set # CONFIG_LOGO_SUPERH_CLUT224 is not set -# CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y @@ -91,4 +85,3 @@ CONFIG_TMPFS=y # CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 6783f31315c7..467f4d2d8e87 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_SYSCTL_SYSCALL is not set @@ -24,10 +23,8 @@ CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_IP_NF_QUEUE=m CONFIG_ATALK=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=y @@ -118,7 +115,6 @@ CONFIG_NFSD_V3=y CONFIG_SMB_FS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index e3c0894b1bb4..9e3edfdf9b2e 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_SYSCTL_SYSCALL is not set @@ -28,7 +27,6 @@ CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_NETFILTER=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -61,7 +59,6 @@ CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_ROMFS_FS=y CONFIG_NLS_CODEPAGE_437=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig index 9479872b1ae6..fb7415dbc102 100644 --- a/arch/sh/configs/magicpanelr2_defconfig +++ b/arch/sh/configs/magicpanelr2_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -35,16 +34,13 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -55,8 +51,6 @@ CONFIG_NETDEVICES=y CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2 is not set CONFIG_SERIAL_8250=y @@ -68,7 +62,6 @@ CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y # CONFIG_RTC_HCTOSYS is not set @@ -96,7 +89,5 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KOBJECT=y CONFIG_DEBUG_INFO=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRC_CCITT=m CONFIG_CRC16=m diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig index f1d2e1b5ee41..c3f7d5899922 100644 --- a/arch/sh/configs/microdev_defconfig +++ b/arch/sh/configs/microdev_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y @@ -19,7 +18,6 @@ CONFIG_SUPERHYWAY=y CONFIG_NET=y CONFIG_INET=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set @@ -45,6 +43,5 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_ECB=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index cc61eda44922..e04f21be0756 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -26,15 +25,11 @@ CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -47,8 +42,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -101,7 +94,6 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_MANAGER=y # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig index f3d5d9f76310..0a432b5f50e7 100644 --- a/arch/sh/configs/polaris_defconfig +++ b/arch/sh/configs/polaris_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -37,14 +36,11 @@ CONFIG_IP_MULTICAST=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FIRMWARE_IN_KERNEL is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -57,8 +53,6 @@ CONFIG_NETDEVICES=y CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -71,7 +65,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y @@ -91,5 +84,3 @@ CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_SG=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index 920b8471ceb7..435bcd66c667 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -35,13 +34,11 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_BRIDGE=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_COMPLEX_MAPPINGS=y @@ -110,7 +107,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index c77da6be06b8..5877e6d1f285 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -42,7 +41,6 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_BRIDGE=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -104,7 +102,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_4KSTACKS=y diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig index 5df916d931c5..b195bc01e406 100644 --- a/arch/sh/configs/rsk7201_defconfig +++ b/arch/sh/configs/rsk7201_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -37,10 +36,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -58,8 +54,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_THERMAL=y -CONFIG_VIDEO_OUTPUT_CONTROL=y -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y @@ -71,5 +65,3 @@ CONFIG_ROMFS_FS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig index 3c4f6f4d52b0..8c471959bbc7 100644 --- a/arch/sh/configs/rsk7203_defconfig +++ b/arch/sh/configs/rsk7203_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -44,7 +43,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -52,10 +50,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -64,8 +59,6 @@ CONFIG_NETDEVICES=y CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -81,7 +74,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HWMON is not set CONFIG_THERMAL=y CONFIG_REGULATOR=y -CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y @@ -130,6 +122,4 @@ CONFIG_DEBUG_VM=y CONFIG_DEBUG_LIST=y CONFIG_DEBUG_SG=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_DEBUG_STACK_USAGE=y diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig index eecdf65bb789..2b9b731fc86b 100644 --- a/arch/sh/configs/rsk7264_defconfig +++ b/arch/sh/configs/rsk7264_defconfig @@ -35,7 +35,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -61,11 +60,9 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HWMON is not set CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE_DEBUG=y -CONFIG_USB_LIBUSUAL=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig index 8370b10df357..d041f7bcb84c 100644 --- a/arch/sh/configs/rsk7269_defconfig +++ b/arch/sh/configs/rsk7269_defconfig @@ -24,7 +24,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -44,11 +43,9 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HWMON is not set CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE_DEBUG=y -CONFIG_USB_LIBUSUAL=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set @@ -60,5 +57,4 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_FTRACE is not set diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index a3d081095ce2..379d673f5ce8 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set @@ -22,7 +21,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m @@ -48,7 +46,6 @@ CONFIG_HW_RANDOM=y CONFIG_SPI=y CONFIG_SPI_SH_SCI=y CONFIG_MFD_SM501=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=m CONFIG_FB_SM501=y @@ -83,7 +80,6 @@ CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y -CONFIG_USB_LIBUSUAL=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_R9701=y CONFIG_EXT2_FS=y @@ -94,6 +90,5 @@ CONFIG_TMPFS=y CONFIG_MINIX_FS=y CONFIG_NLS_CODEPAGE_932=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index b1a04f3c598b..11177bceda83 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set @@ -22,15 +21,11 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y @@ -56,7 +51,6 @@ CONFIG_HW_RANDOM=y CONFIG_SPI=y CONFIG_SPI_SH_SCI=y CONFIG_MFD_SM501=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=m CONFIG_FB_SM501=y @@ -91,7 +85,6 @@ CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y -CONFIG_USB_LIBUSUAL=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_R9701=y CONFIG_EXT2_FS=y @@ -102,6 +95,5 @@ CONFIG_TMPFS=y CONFIG_MINIX_FS=y CONFIG_NLS_CODEPAGE_932=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index bbd4c2298708..95e5208b8260 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_LOCALVERSION="_SDK7780" CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -39,7 +38,6 @@ CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set CONFIG_IPV6=y # CONFIG_INET6_XFRM_MODE_BEET is not set CONFIG_NET_SCHED=y @@ -47,7 +45,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_PARPORT=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_PLATFORM=y @@ -63,8 +60,6 @@ CONFIG_BLK_DEV_DM=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_NETCONSOLE=y CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y @@ -78,7 +73,6 @@ CONFIG_SSB=y CONFIG_SSB_DRIVER_PCICORE=y CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=m -CONFIG_DISPLAY_SUPPORT=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y @@ -101,7 +95,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set @@ -144,8 +137,6 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_TIMER_STATS=y CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_SH_STANDARD_BIOS=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index 36642ec2cb97..e9ee0c878ead 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_KERNEL_LZO=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -90,13 +89,11 @@ CONFIG_NET_KEY=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_FTL=y @@ -119,7 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_PLATFORM=y @@ -140,8 +136,6 @@ CONFIG_MDIO_BITBANG=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_SH_SCI=y @@ -157,7 +151,6 @@ CONFIG_SPI=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_SH_WDT=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y @@ -223,9 +216,7 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_RCU_CPU_STALL_VERBOSE is not set CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_FUNCTION_TRACER=y # CONFIG_FUNCTION_GRAPH_TRACER is not set CONFIG_DMA_API_DEBUG=y diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 91853a67ec34..3553acd5edb1 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -57,7 +56,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -65,9 +63,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -78,8 +73,6 @@ CONFIG_EEPROM_93CX6=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -109,7 +102,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_LIST=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_DEBUG_STACK_USAGE=y CONFIG_CRYPTO_DEFLATE=y CONFIG_CRYPTO_LZO=y diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 201acb4652f7..fc77a67b16e7 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -27,26 +26,19 @@ CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y CONFIG_SYN_COOKIES=y -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_RAM=y CONFIG_MTD_PHYSMAP=y -# CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y CONFIG_SCSI_MULTI_LUN=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_USB_USBNET=y # CONFIG_USB_NET_AX8817X is not set CONFIG_USB_NET_DM9601=y @@ -104,5 +96,4 @@ CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_NFSD=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig index 9a9ad9adf959..f54722dbc8f5 100644 --- a/arch/sh/configs/se7619_defconfig +++ b/arch/sh/configs/se7619_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set @@ -24,10 +23,7 @@ CONFIG_BINFMT_ZFLAT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -48,4 +44,3 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_SYSFS is not set CONFIG_ROMFS_FS=y # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index 044e0844fda1..ddfc69841955 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y @@ -27,11 +26,8 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -58,5 +54,4 @@ CONFIG_PROC_KCORE=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig index 1248635e4f88..5a1097641247 100644 --- a/arch/sh/configs/se7712_defconfig +++ b/arch/sh/configs/se7712_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -47,7 +46,6 @@ CONFIG_SYN_COOKIES=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_NET_SCHED=y @@ -68,9 +66,6 @@ CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_IND=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -104,8 +99,6 @@ CONFIG_ROOT_NFS=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig index c3ba6e8a9818..9c0ef13bee10 100644 --- a/arch/sh/configs/se7721_defconfig +++ b/arch/sh/configs/se7721_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -46,7 +45,6 @@ CONFIG_SYN_COOKIES=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_NET_SCHED=y @@ -67,9 +65,6 @@ CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_IND=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -132,6 +127,5 @@ CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_CCITT=y diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig index ae998c7e2ee0..ccc7fc423fde 100644 --- a/arch/sh/configs/se7722_defconfig +++ b/arch/sh/configs/se7722_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -26,7 +25,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set @@ -57,6 +55,5 @@ CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig index 1faa788aecae..aedb3a2d9a10 100644 --- a/arch/sh/configs/se7724_defconfig +++ b/arch/sh/configs/se7724_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -30,14 +29,10 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -53,8 +48,6 @@ CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set @@ -137,8 +130,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig index 912c98590e22..b23f67542728 100644 --- a/arch/sh/configs/se7750_defconfig +++ b/arch/sh/configs/se7750_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y @@ -25,11 +24,8 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -58,5 +54,4 @@ CONFIG_ROOT_NFS=y CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig index 75c92fc1876b..162343683937 100644 --- a/arch/sh/configs/se7751_defconfig +++ b/arch/sh/configs/se7751_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 @@ -25,13 +24,9 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_NETFILTER_DEBUG=y -CONFIG_IP_NF_QUEUE=y CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -49,5 +44,4 @@ CONFIG_EXT2_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig index b0ef63ce525a..ec32c82646ed 100644 --- a/arch/sh/configs/se7780_defconfig +++ b/arch/sh/configs/se7780_defconfig @@ -24,7 +24,6 @@ CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set CONFIG_IPV6=y # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set # CONFIG_INET6_XFRM_MODE_TUNNEL is not set @@ -32,8 +31,6 @@ CONFIG_IPV6=y # CONFIG_IPV6_SIT is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -54,8 +51,6 @@ CONFIG_SMSC_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y CONFIG_NET_PCI=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set @@ -94,7 +89,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y @@ -110,5 +104,4 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig index 7eae4e59d7f0..360592d63a2f 100644 --- a/arch/sh/configs/secureedge5410_defconfig +++ b/arch/sh/configs/secureedge5410_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y @@ -18,12 +17,9 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK_RO=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -34,14 +30,11 @@ CONFIG_MTD_CFI_GEOMETRY=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_PLATRAM=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_NET_PCI=y CONFIG_8139CP=y CONFIG_8139TOO=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -51,7 +44,6 @@ CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1302=y @@ -60,4 +52,3 @@ CONFIG_EXT2_FS=y CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_ROMFS_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 0cf4097b71e8..2156223405a1 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -34,7 +33,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_STANDALONE is not set @@ -70,7 +68,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_AUTOFS_FS=y CONFIG_AUTOFS4_FS=y CONFIG_ISO9660_FS=m CONFIG_JOLIET=y @@ -126,7 +123,6 @@ CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_HMAC=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index df25ae774ee0..34094e05e892 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -42,7 +41,6 @@ CONFIG_NET_IPIP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_NET_PKTGEN=y @@ -50,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=y -# CONFIG_MISC_DEVICES is not set CONFIG_RAID_ATTRS=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -72,8 +69,6 @@ CONFIG_TUN=y CONFIG_VETH=y CONFIG_NET_ETHERNET=y CONFIG_SMSC911X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_MOUSEDEV is not set @@ -95,9 +90,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y -# CONFIG_HID_SUPPORT is not set CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_MON=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -172,7 +165,6 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set CONFIG_DEBUG_INFO=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_AUTHENC=y diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig index f92ad17cd629..65a1aad899c8 100644 --- a/arch/sh/configs/sh7710voipgw_defconfig +++ b/arch/sh/configs/sh7710voipgw_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -24,7 +23,6 @@ CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y CONFIG_SYN_COOKIES=y -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set CONFIG_NETFILTER=y @@ -36,8 +34,6 @@ CONFIG_NET_CLS_ROUTE4=y CONFIG_NET_CLS_U32=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -59,5 +55,4 @@ CONFIG_THERMAL=y # CONFIG_DNOTIFY is not set CONFIG_JFFS2_FS=y CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index f83ac7b0b031..d15e53647983 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_CGROUPS=y @@ -18,7 +17,6 @@ CONFIG_HIBERNATION=y CONFIG_CPU_IDLE=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set -# CONFIG_MISC_DEVICES is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -44,5 +42,4 @@ CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index cfde98ddb29d..b0c4bc830fb8 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -32,13 +31,11 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set CONFIG_IPV6=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_M25P80=y CONFIG_BLK_DEV_RAM=y @@ -48,7 +45,6 @@ CONFIG_NETDEVICES=y CONFIG_VITESSE_PHY=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index 479536440264..2ef780fb9813 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -26,11 +25,9 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLKDEVS=y CONFIG_MTD_CFI=y @@ -43,14 +40,11 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_STAA=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=y -# CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SH_ETH=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set @@ -65,7 +59,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y CONFIG_FB_SH7760=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y -# CONFIG_HID_SUPPORT is not set CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y @@ -74,7 +67,6 @@ CONFIG_MMC=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_AUTOFS_FS=y CONFIG_AUTOFS4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -124,6 +116,5 @@ CONFIG_NLS_UTF8=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 025bd3ac5ab0..742634b37c0a 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_CGROUPS=y @@ -20,7 +19,6 @@ CONFIG_HIBERNATION=y CONFIG_CPU_IDLE=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set -# CONFIG_MISC_DEVICES is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -46,5 +44,4 @@ CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 2fce54d9c388..2ddf5ca7094e 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -44,13 +43,9 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -58,7 +53,6 @@ CONFIG_MTD_PHYSMAP=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set @@ -69,7 +63,6 @@ CONFIG_NET_ETHERNET=y CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y CONFIG_R8169=y -# CONFIG_NETDEV_10000 is not set # CONFIG_WLAN is not set CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y @@ -113,7 +106,6 @@ CONFIG_SND_CMIPCI=y CONFIG_SND_EMU10K1=y # CONFIG_SND_SUPERH is not set CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_MMC=y @@ -154,9 +146,7 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_FTRACE is not set CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig index d29da4a0f6c2..7098828d392e 100644 --- a/arch/sh/configs/sh7785lcr_defconfig +++ b/arch/sh/configs/sh7785lcr_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -26,27 +25,21 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y CONFIG_R8169=y -# CONFIG_NETDEV_10000 is not set CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_KEYBOARD_ATKBD is not set @@ -121,8 +114,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig index 4802e14a4649..d589cfdfb7eb 100644 --- a/arch/sh/configs/shmin_defconfig +++ b/arch/sh/configs/shmin_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set @@ -28,10 +27,8 @@ CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_PNP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y @@ -53,6 +50,5 @@ CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig index 4a4269ad5b04..755c4f73c718 100644 --- a/arch/sh/configs/shx3_defconfig +++ b/arch/sh/configs/shx3_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -56,7 +55,6 @@ CONFIG_NET=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set CONFIG_CAN=m CONFIG_CAN_RAW=m CONFIG_CAN_BCM=m @@ -70,8 +68,6 @@ CONFIG_PATA_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set @@ -82,7 +78,6 @@ CONFIG_I2C=m CONFIG_SPI=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_R8A66597_HCD=m @@ -104,7 +99,6 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_VM=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SH_STANDARD_BIOS=y CONFIG_DEBUG_STACK_USAGE=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index a77b778c745b..ceb48e9b70f4 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -49,7 +48,6 @@ CONFIG_SYN_COOKIES=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_IPV6=y CONFIG_IPV6_PRIVACY=y @@ -79,7 +77,6 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m @@ -88,7 +85,6 @@ CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -96,7 +92,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -106,7 +101,6 @@ CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m @@ -154,7 +148,6 @@ CONFIG_FW_LOADER=m CONFIG_CONNECTOR=m CONFIG_MTD=m CONFIG_MTD_DEBUG=y -CONFIG_MTD_CHAR=m CONFIG_MTD_BLOCK=m CONFIG_FTL=m CONFIG_NFTL=m @@ -261,7 +254,6 @@ CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_MD4=m diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig index 2d288b887fbd..5f2921a85192 100644 --- a/arch/sh/configs/ul2_defconfig +++ b/arch/sh/configs/ul2_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y @@ -29,7 +28,6 @@ CONFIG_UNIX=y CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_CFG80211=y CONFIG_MAC80211=y @@ -37,9 +35,6 @@ CONFIG_MAC80211_RC_PID=y # CONFIG_MAC80211_RC_MINSTREL is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -50,8 +45,6 @@ CONFIG_ATA=y CONFIG_PATA_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_LIBERTAS=m CONFIG_LIBERTAS_SDIO=m CONFIG_LIBERTAS_DEBUG=y @@ -70,7 +63,6 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set -# CONFIG_HID_SUPPORT is not set CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_R8A66597_HCD=y @@ -92,6 +84,5 @@ CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_MICHAEL_MIC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 01c9a91ee896..7d5591b7c088 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -46,20 +45,15 @@ CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y @@ -73,7 +67,6 @@ CONFIG_NET_PCI=y CONFIG_8139CP=y CONFIG_SKY2=y CONFIG_SKY2_DEBUG=y -# CONFIG_NETDEV_10000 is not set CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_KEYBOARD_ATKBD is not set @@ -150,8 +143,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y CONFIG_FRAME_POINTER=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_FTRACE is not set # CONFIG_DUMP_CODE is not set CONFIG_CRYPTO_HMAC=y diff --git a/arch/sh/drivers/pci/fixups-cayman.c b/arch/sh/drivers/pci/fixups-cayman.c index edc2fb7a5bb2..32467884d6f7 100644 --- a/arch/sh/drivers/pci/fixups-cayman.c +++ b/arch/sh/drivers/pci/fixups-cayman.c @@ -5,7 +5,7 @@ #include #include "pci-sh5.h" -int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int result = -1; diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index 1d1c5a227e50..48aaefd8f5d6 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c @@ -63,11 +63,10 @@ static void gapspci_fixup_resources(struct pci_dev *dev) res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1; res.flags = IORESOURCE_MEM; pcibios_resource_to_bus(dev->bus, ®ion, &res); - BUG_ON(!dma_declare_coherent_memory(&dev->dev, + BUG_ON(dma_declare_coherent_memory(&dev->dev, res.start, region.start, resource_size(&res), - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)); break; default: @@ -76,7 +75,7 @@ static void gapspci_fixup_resources(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, gapspci_fixup_resources); -int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* * The interrupt routing semantics here are quite trivial. diff --git a/arch/sh/drivers/pci/fixups-r7780rp.c b/arch/sh/drivers/pci/fixups-r7780rp.c index 57ed3f09d0c2..2c9b58f848dd 100644 --- a/arch/sh/drivers/pci/fixups-r7780rp.c +++ b/arch/sh/drivers/pci/fixups-r7780rp.c @@ -15,7 +15,7 @@ #include #include "pci-sh4.h" -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { return evt2irq(0xa20) + slot; } diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c index eaddb56c45c6..358ac104f08c 100644 --- a/arch/sh/drivers/pci/fixups-rts7751r2d.c +++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c @@ -20,18 +20,18 @@ #define PCIMCR_MRSET_OFF 0xBFFFFFFF #define PCIMCR_RFSH_OFF 0xFFFFFFFB -static u8 rts7751r2d_irq_tab[] __initdata = { +static u8 rts7751r2d_irq_tab[] = { IRQ_PCI_INTA, IRQ_PCI_INTB, IRQ_PCI_INTC, IRQ_PCI_INTD, }; -static char lboxre2_irq_tab[] __initdata = { +static char lboxre2_irq_tab[] = { IRQ_ETH0, IRQ_ETH1, IRQ_INTA, IRQ_INTD, }; -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { if (mach_is_lboxre2()) return lboxre2_irq_tab[slot]; diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c index c0a015ae6ecf..24e96dfbdb22 100644 --- a/arch/sh/drivers/pci/fixups-sdk7780.c +++ b/arch/sh/drivers/pci/fixups-sdk7780.c @@ -22,7 +22,7 @@ #define IRQ_INTD evt2irq(0xa80) /* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */ -static char sdk7780_irq_tab[4][16] __initdata = { +static char sdk7780_irq_tab[4][16] = { /* INTA */ { IRQ_INTA, IRQ_INTD, IRQ_INTC, IRQ_INTD, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, @@ -37,7 +37,7 @@ static char sdk7780_irq_tab[4][16] __initdata = { -1, -1, -1 }, }; -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { return sdk7780_irq_tab[pin-1][slot]; } diff --git a/arch/sh/drivers/pci/fixups-se7751.c b/arch/sh/drivers/pci/fixups-se7751.c index 84a88ca92008..1cb8d0ac4fdb 100644 --- a/arch/sh/drivers/pci/fixups-se7751.c +++ b/arch/sh/drivers/pci/fixups-se7751.c @@ -7,7 +7,7 @@ #include #include "pci-sh4.h" -int __init pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin) { switch (slot) { case 0: return evt2irq(0x3a0); diff --git a/arch/sh/drivers/pci/fixups-sh03.c b/arch/sh/drivers/pci/fixups-sh03.c index 16207bef9f52..55ac1ba2c74f 100644 --- a/arch/sh/drivers/pci/fixups-sh03.c +++ b/arch/sh/drivers/pci/fixups-sh03.c @@ -4,7 +4,7 @@ #include #include -int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; diff --git a/arch/sh/drivers/pci/fixups-snapgear.c b/arch/sh/drivers/pci/fixups-snapgear.c index 6e33ba4cd076..a931e5928f58 100644 --- a/arch/sh/drivers/pci/fixups-snapgear.c +++ b/arch/sh/drivers/pci/fixups-snapgear.c @@ -19,7 +19,7 @@ #include #include "pci-sh4.h" -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { int irq = -1; diff --git a/arch/sh/drivers/pci/fixups-titan.c b/arch/sh/drivers/pci/fixups-titan.c index bd1addb1b8be..a9d563e479d5 100644 --- a/arch/sh/drivers/pci/fixups-titan.c +++ b/arch/sh/drivers/pci/fixups-titan.c @@ -19,7 +19,7 @@ #include #include "pci-sh4.h" -static char titan_irq_tab[] __initdata = { +static char titan_irq_tab[] = { TITAN_IRQ_WAN, TITAN_IRQ_LAN, TITAN_IRQ_MPCIA, @@ -27,7 +27,7 @@ static char titan_irq_tab[] __initdata = { TITAN_IRQ_USB, }; -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { int irq = titan_irq_tab[slot]; diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index c99ee286b69f..5976a2c8a3e3 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -39,8 +39,12 @@ static void pcibios_scanbus(struct pci_channel *hose) LIST_HEAD(resources); struct resource *res; resource_size_t offset; - int i; - struct pci_bus *bus; + int i, ret; + struct pci_host_bridge *bridge; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; for (i = 0; i < hose->nr_resources; i++) { res = hose->resources + i; @@ -52,19 +56,26 @@ static void pcibios_scanbus(struct pci_channel *hose) pci_add_resource_offset(&resources, res, offset); } - bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, - &resources); - hose->bus = bus; + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = next_busno; + bridge->ops = hose->pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = pcibios_map_platform_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + hose->bus = bridge->bus; need_domain_info = need_domain_info || hose->index; hose->need_domain_info = need_domain_info; - if (!bus) { - pci_free_resource_list(&resources); - return; - } - - next_busno = bus->busn_res.end + 1; + next_busno = hose->bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { @@ -72,9 +83,9 @@ static void pcibios_scanbus(struct pci_channel *hose) need_domain_info = 1; } - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - pci_bus_add_devices(bus); + pci_bus_size_bridges(hose->bus); + pci_bus_assign_resources(hose->bus); + pci_bus_add_devices(hose->bus); } /* @@ -144,8 +155,6 @@ static int __init pcibios_init(void) for (hose = hose_head; hose; hose = hose->next) pcibios_scanbus(hose); - pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); - dma_debug_add_bus(&pci_bus_type); pci_initialized = 1; @@ -154,14 +163,6 @@ static int __init pcibios_init(void) } subsys_initcall(pcibios_init); -/* - * Called after each bus is probed, but before its children - * are examined. - */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ -} - /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index a162a7f86b2e..0167a7352719 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -467,7 +467,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port) return 0; } -int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { return evt2irq(0xae0); } diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 18e0377f72bb..88ce1e22237b 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -136,10 +136,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned lo /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while(0) -#define release_segments(mm) do { } while(0) - /* * FPU lazy state save handling. */ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index eedd4f625d07..777a16318aff 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -170,10 +170,6 @@ struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while (0) -#define release_segments(mm) do { } while (0) -#define forget_segments() do { } while (0) /* * FPU lazy state save handling. */ diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h index 4d1ef6d74bd6..2ae0e938b657 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h @@ -43,9 +43,7 @@ enum { GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, - /* Port H */ - GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, - GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, + /* Port H - Port H does not have a Data Register */ /* Port I - not on device */ diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 2a0ca8780f0d..13c495a9fc00 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -45,9 +45,7 @@ enum { GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, - /* Port H */ - GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, - GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, + /* Port H - Port H does not have a Data Register */ /* Port I - not on device */ diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h index 3bb74e534d0f..78961ab78a5a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h @@ -67,7 +67,7 @@ enum { GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, /* PTQ */ - GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, + GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, /* PTR */ diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h index 5340f3bc1863..b40fb541e72a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h @@ -40,7 +40,7 @@ enum { /* PTJ */ GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, - GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV, + GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, /* PTK */ GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, @@ -48,7 +48,7 @@ enum { /* PTL */ GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, - GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV, + GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, /* PTM */ GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, @@ -56,7 +56,7 @@ enum { /* PTN */ GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, - GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV, + GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, /* PTO */ GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, @@ -68,7 +68,7 @@ enum { /* PTQ */ GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, - GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV, + GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, /* PTR */ GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index a4a626199c47..4e83f950713e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -44,7 +44,6 @@ config SPARC select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select LOCKDEP_SMALL if LOCKDEP - select ARCH_WANT_RELAX_ORDER config SPARC32 def_bool !64BIT @@ -97,6 +96,9 @@ config ARCH_PROC_KCORE_TEXT config CPU_BIG_ENDIAN def_bool y +config CPU_BIG_ENDIAN + def_bool y + config ARCH_ATU bool default y if SPARC64 diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index ca8609d7292f..4d4e1cc6402f 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -238,3 +238,4 @@ CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC16=m CONFIG_LIBCRC32C=m +CONFIG_VCC=m diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index c90930de76ba..3cd4f6b198b6 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -344,8 +344,7 @@ static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk, keystream, AES_BLOCK_SIZE); - crypto_xor((u8 *) keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, (u8 *) keystream, src, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index d1f837dc77a4..0ca7caab1b06 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -4,6 +4,13 @@ #include #include +#ifdef CONFIG_HUGETLB_PAGE +struct pud_huge_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; +#endif void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 73cb8978df58..3dc9215d0357 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid); unsigned long sun4v_cpu_yield(void); #endif +/* cpu_poke() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_POKE + * RET0: status + * ERRORS: ENOCPU cpuid refers to a CPU that does not exist + * EINVAL cpuid is current CPU + * + * Poke CPU cpuid. If the target CPU is currently suspended having + * invoked the cpu-yield service, that vCPU will be resumed. + * Poke interrupts may only be sent to valid, non-local CPUs. + * It is not legal to poke the current vCPU. + */ +#define HV_FAST_CPU_POKE 0x13 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_cpu_poke(unsigned long cpuid); +#endif + /* cpu_qconf() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_QCONF diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 5961b2d8398a..8ee1f97589a1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_16GB_SHIFT 34 #define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 @@ -28,7 +29,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 4 +#define HUGE_MAX_HSTATE 5 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6fbd931f0570..4fefe3762083 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); } +static inline bool is_hugetlb_pud(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_PUD_HUGE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { @@ -687,6 +692,8 @@ static inline unsigned long pmd_write(pmd_t pmd) return pte_write(pte); } +#define pud_write(pud) pte_write(__pte(pud_val(pud))) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline unsigned long pmd_dirty(pmd_t pmd) { @@ -823,9 +830,18 @@ static inline unsigned long __pmd_page(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + unsigned long pfn; + + pfn = pte_pfn(pte); + + return ((unsigned long) __va(pfn << PAGE_SHIFT)); +} + #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) -#define pud_page_vaddr(pud) \ - ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index ce2233f7e662..a75089285db8 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -33,6 +33,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); extern cpumask_t cpu_core_map[NR_CPUS]; +void smp_init_cpu_poke(void); +void scheduler_poke(void); + void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu); #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) #define smp_fill_in_cpu_possible_map() do { } while (0) +#define smp_init_cpu_poke() do { } while (0) +#define scheduler_poke() do { } while (0) #endif /* !(CONFIG_SMP) */ diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index ff05992dae7a..dfc538609eb2 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -73,6 +73,8 @@ struct sun4v_1insn_patch_entry { }; extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, __sun4v_1insn_patch_end; +extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch, + __fast_win_ctrl_1insn_patch_end; struct sun4v_2insn_patch_entry { unsigned int addr; diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 32258e08da03..acf55063aa3d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; nop; \ 699: + /* PUD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PUD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PUD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We have to propagate bits [32:22] from the virtual address + * to resolve at 4M granularity. + */ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ +700: ba 700f; \ + nop; \ + .section .pud_huge_patch, "ax"; \ + .word 700b; \ + nop; \ + .previous; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PUD_HUGE), REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(0x1ffc0000), REG2; \ + sllx REG2, 1, REG2; \ + brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it @@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ diff --git a/arch/sparc/include/asm/vga.h b/arch/sparc/include/asm/vga.h index ec0e9967d93d..f54e8b6fb197 100644 --- a/arch/sparc/include/asm/vga.h +++ b/arch/sparc/include/asm/vga.h @@ -8,9 +8,13 @@ #define _LINUX_ASM_VGA_H_ #include +#include #include #define VT_BUF_HAVE_RW +#define VT_BUF_HAVE_MEMSETW +#define VT_BUF_HAVE_MEMCPYW +#define VT_BUF_HAVE_MEMMOVEW #undef scr_writew #undef scr_readw @@ -29,6 +33,27 @@ static inline u16 scr_readw(const u16 *addr) return *addr; } +static inline void scr_memsetw(u16 *p, u16 v, unsigned int n) +{ + BUG_ON((long) p >= 0); + + memset16(p, cpu_to_le16(v), n / 2); +} + +static inline void scr_memcpyw(u16 *d, u16 *s, unsigned int n) +{ + BUG_ON((long) d >= 0); + + memcpy(d, s, n); +} + +static inline void scr_memmovew(u16 *d, u16 *s, unsigned int n) +{ + BUG_ON((long) d >= 0); + + memmove(d, s, n); +} + #define VGA_MAP_MEM(x,s) (x) #endif diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d1c47e9f0090..f3d4ac232690 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -52,6 +52,7 @@ struct vio_ver_info { #define VDEV_NETWORK_SWITCH 0x02 #define VDEV_DISK 0x03 #define VDEV_DISK_SERVER 0x04 +#define VDEV_CONSOLE_CON 0x05 u8 resv1[3]; u64 resv2[5]; @@ -282,6 +283,14 @@ struct vio_dring_state { struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES]; }; +#define VIO_TAG_SIZE ((int)sizeof(struct vio_msg_tag)) +#define VIO_VCC_MTU_SIZE (LDC_PACKET_SIZE - VIO_TAG_SIZE) + +struct vio_vcc { + struct vio_msg_tag tag; + char data[VIO_VCC_MTU_SIZE]; +}; + static inline void *vio_dring_cur(struct vio_dring_state *dr) { return dr->base + (dr->entry_size * dr->prod); diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h index 2d9b79ccaa50..157f46fe374f 100644 --- a/arch/sparc/include/uapi/asm/siginfo.h +++ b/arch/sparc/include/uapi/asm/siginfo.h @@ -16,10 +16,17 @@ #define SI_NOINFO 32767 /* no information in siginfo_t */ +/* + * SIGFPE si_codes + */ +#ifdef __KERNEL__ +#define FPE_FIXME 0 /* Broken dup of SI_USER */ +#endif /* __KERNEL__ */ + /* * SIGEMT si_codes */ -#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */ +#define EMT_TAGOVF 1 /* tag overflow */ #define NSIGEMT 1 #endif /* _UAPI__SPARC_SIGINFO_H */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 186fd8199f54..b2f5c50d0947 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -98,6 +98,8 @@ #define SO_PEERGROUPS 0x003d +#define SO_ZEROCOPY 0x003e + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 1276ca2567ba..5c237467d156 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -38,7 +38,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) or %g1, %g3, %g1 bne,pn %xcc, 1f sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 - wrpr %g0, 7, %cleanwin +661: wrpr %g0, 7, %cleanwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x85880000 ! allclean + .previous sethi %hi(TASK_REGOFF), %g2 sethi %hi(TSTATE_PEF), %g3 @@ -88,16 +92,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp bne,pn %xcc, 3f mov PRIMARY_CONTEXT, %l4 - rdpr %canrestore, %g3 +661: rdpr %canrestore, %g3 + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + rdpr %wstate, %g2 - wrpr %g0, 0, %canrestore +661: wrpr %g0, 0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous sll %g2, 3, %g2 /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ mov 1, %l5 sth %l5, [%l6 + TI_SYS_NOERROR] - wrpr %g3, 0, %otherwin +661: wrpr %g3, 0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x87880000 ! otherw + .previous + wrpr %g2, 0, %wstate sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 78e0211753d2..4de9fbd1a177 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -603,10 +603,10 @@ niagara_tlb_fixup: be,pt %xcc, niagara4_patch nop cmp %g1, SUN4V_CHIP_SPARC_M7 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_M8 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_SN be,pt %xcc, niagara4_patch @@ -621,6 +621,18 @@ niagara_tlb_fixup: ba,a,pt %xcc, 80f nop + +sparc_m7_patch: + call m7_patch_copyops + nop + call m7_patch_bzero + nop + call m7_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + niagara4_patch: call niagara4_patch_copyops nop @@ -881,7 +893,6 @@ sparc64_boot_end: #include "misctrap.S" #include "syscalls.S" #include "helpers.S" -#include "hvcalls.S" #include "sun4v_tlb_miss.S" #include "sun4v_ivec.S" #include "ktlb.S" @@ -926,6 +937,7 @@ swapper_4m_tsb: ! 0x0000000000428000 +#include "hvcalls.S" #include "systbls_64.S" .data diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 267731234ce8..d41ce33d87d6 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void) group = HV_GRP_CORE; major = 1; - minor = 1; + minor = 6; if (sun4v_hvapi_register(group, major, &minor)) goto bad; diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index 4116ee5c7791..e57007ff7f8f 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield) nop ENDPROC(sun4v_cpu_yield) + /* %o0: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_poke) + mov HV_FAST_CPU_POKE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_poke) + /* %o0: type * %o1: queue paddr * %o2: num queue entries diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 840e0b21bfe3..acffbc894ab0 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1480,6 +1480,7 @@ int ldc_rx_reset(struct ldc_channel *lp) { return __set_rx_head(lp, lp->rx_tail); } +EXPORT_SYMBOL(ldc_rx_reset); void __ldc_print(struct ldc_channel *lp, const char *caller) { @@ -1493,6 +1494,7 @@ void __ldc_print(struct ldc_channel *lp, const char *caller) lp->tx_head, lp->tx_tail, lp->tx_num_entries, lp->rcv_nxt, lp->snd_nxt); } +EXPORT_SYMBOL(__ldc_print); static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) { diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index 4371f72ff025..98c223edac84 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c @@ -25,6 +25,12 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) { LIST_HEAD(resources); struct pci_bus *root_bus; + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return; pci_add_resource_offset(&resources, &info->io_space, info->io_space.start - 0x1000); @@ -32,15 +38,21 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) info->busn.flags = IORESOURCE_BUS; pci_add_resource(&resources, &info->busn); - root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info, - &resources); - if (!root_bus) { - pci_free_resource_list(&resources); + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = &ofdev->dev; + bridge->sysdata = info; + bridge->busnr = 0; + bridge->ops = info->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = info->map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); return; } - /* Setup IRQs of all devices using custom routines */ - pci_fixup_irqs(pci_common_swizzle, info->map_irq); + root_bus = bridge->bus; /* Assign devices with resources */ pci_assign_unassigned_resources(); @@ -94,9 +106,3 @@ void pcibios_fixup_bus(struct pci_bus *pbus) } } } - -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 1e77128a8f88..83ba5005d44c 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -695,7 +695,7 @@ static int grpci1_of_probe(struct platform_device *ofdev) return err; } -static struct of_device_id grpci1_of_match[] = { +static const struct of_device_id grpci1_of_match[] __initconst = { { .name = "GAISLER_PCIFBRG", }, diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index f727c4de1316..ff0e5c90310f 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -886,7 +886,7 @@ static int grpci2_of_probe(struct platform_device *ofdev) return err; } -static struct of_device_id grpci2_of_match[] = { +static const struct of_device_id grpci2_of_match[] __initconst = { { .name = "GAISLER_GRPCI2", }, diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 7eceaa10836f..3f8670c92951 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -690,16 +690,6 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, return bus; } -void pcibios_fixup_bus(struct pci_bus *pbus) -{ -} - -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} - int pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd, oldcmd; diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 732af9a9f6dd..4a133c052af8 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -746,12 +746,6 @@ static void watchdog_reset() { } #endif -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} - int pcibios_enable_device(struct pci_dev *pdev, int mask) { return 0; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index b96104da5bd6..44e5da405f96 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -77,8 +77,13 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); - if (!need_resched() && !cpu_is_offline(smp_processor_id())) + if (!need_resched() && !cpu_is_offline(smp_processor_id())) { sun4v_cpu_yield(); + /* If resumed by cpu_poke then we need to explicitly + * call scheduler_ipi(). + */ + scheduler_poke(); + } /* Re-enable interrupts. */ __asm__ __volatile__( diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 709a82ebd294..dff86fad0a1f 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -224,10 +224,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 rdpr %otherwin, %l2 srl %l1, 3, %l1 - wrpr %l2, %g0, %canrestore +661: wrpr %l2, %g0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x89880000 ! normalw + .previous + wrpr %l1, %g0, %wstate brnz,pt %l2, user_rtt_restore - wrpr %g0, %g0, %otherwin +661: wrpr %g0, %g0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous ldx [%g6 + TI_FLAGS], %g3 wr %g0, ASI_AIUP, %asi diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 150ee7d4b059..db4c4d7e28a0 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -300,6 +300,11 @@ static void __init sun4v_patch(void) break; } + if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) { + sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch, + &__fast_win_ctrl_1insn_patch_end); + } + sun4v_hvapi_init(); } @@ -363,6 +368,7 @@ void __init start_early_boot(void) check_if_starfire(); per_cpu_patch(); sun4v_patch(); + smp_init_cpu_poke(); cpu = hard_smp_processor_id(); if (cpu >= NR_CPUS) { diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index b4096bb665b2..0e4c08c45a37 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -85,34 +85,34 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { - switch (from->si_code >> 16) { - case __SI_TIMER >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_int, &to->si_int); break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: + case SIL_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_FAULT >> 16: + case SIL_FAULT: err |= __put_user(from->si_trapno, &to->si_trapno); err |= __put_user((unsigned long)from->si_addr, &to->si_addr); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 3218bc43302e..4898329970c5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map); static cpumask_t smp_commenced_mask; +static DEFINE_PER_CPU(bool, poke); +static bool cpu_poke; + void smp_info(struct seq_file *m) { int i; @@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus) { } +static void send_cpu_ipi(int cpu) +{ + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); +} + +void scheduler_poke(void) +{ + if (!cpu_poke) + return; + + if (!__this_cpu_read(poke)) + return; + + __this_cpu_write(poke, false); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); +} + +static unsigned long send_cpu_poke(int cpu) +{ + unsigned long hv_err; + + per_cpu(poke, cpu) = true; + hv_err = sun4v_cpu_poke(cpu); + if (hv_err != HV_EOK) { + per_cpu(poke, cpu) = false; + pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", + __func__, hv_err); + } + + return hv_err; +} + void smp_send_reschedule(int cpu) { if (cpu == smp_processor_id()) { WARN_ON_ONCE(preemptible()); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); - } else { - xcall_deliver((u64) &xcall_receive_signal, - 0, 0, cpumask_of(cpu)); + return; } + + /* Use cpu poke to resume idle cpu if supported. */ + if (cpu_poke && idle_cpu(cpu)) { + unsigned long ret; + + ret = send_cpu_poke(cpu); + if (ret == HV_EOK) + return; + } + + /* Use IPI in following cases: + * - cpu poke not supported + * - cpu not idle + * - send_cpu_poke() returns with error + */ + send_cpu_ipi(cpu); +} + +void smp_init_cpu_poke(void) +{ + unsigned long major; + unsigned long minor; + int ret; + + if (tlb_type != hypervisor) + return; + + ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); + if (ret) { + pr_debug("HV_GRP_CORE is not registered\n"); + return; + } + + if (major == 1 && minor >= 6) { + /* CPU POKE is registered. */ + cpu_poke = true; + return; + } + + pr_debug("CPU_POKE not supported\n"); } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 466d4aed06c7..581cf35ee7e3 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -306,7 +306,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, info.si_errno = 0; info.si_addr = (void __user *)pc; info.si_trapno = 0; - info.si_code = __SI_FAULT; + info.si_code = FPE_FIXME; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index ad31af1dd726..0a56dc257cb9 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -265,6 +265,45 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u sun4v_insn_access_exception(regs, addr, type_ctx); } +bool is_no_fault_exception(struct pt_regs *regs) +{ + unsigned char asi; + u32 insn; + + if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT) + return false; + + /* + * Must do a little instruction decoding here in order to + * decide on a course of action. The bits of interest are: + * insn[31:30] = op, where 3 indicates the load/store group + * insn[24:19] = op3, which identifies individual opcodes + * insn[13] indicates an immediate offset + * op3[4]=1 identifies alternate space instructions + * op3[5:4]=3 identifies floating point instructions + * op3[2]=1 identifies stores + * See "Opcode Maps" in the appendix of any Sparc V9 + * architecture spec for full details. + */ + if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */ + if (insn & 0x2000) /* immediate offset */ + asi = (regs->tstate >> 24); /* saved %asi */ + else + asi = (insn >> 5); /* immediate asi */ + if ((asi & 0xf2) == ASI_PNF) { + if (insn & 0x1000000) { /* op3[5:4]=3 */ + handle_ldf_stq(insn, regs); + return true; + } else if (insn & 0x200000) { /* op3[2], stores */ + return false; + } + handle_ld_nf(insn, regs); + return true; + } + } + return false; +} + void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); @@ -296,6 +335,9 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un die_if_kernel("Dax", regs); } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -352,6 +394,9 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -2258,7 +2303,7 @@ static void do_fpe_common(struct pt_regs *regs) info.si_errno = 0; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; - info.si_code = __SI_FAULT; + info.si_code = FPE_FIXME; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; @@ -2575,6 +2620,9 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); goto out; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -2597,6 +2645,9 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db872dbfafe9..f74115364b1e 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - sethi %uhi(_PAGE_PMD_HUGE), %g7 + sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 sllx %g7, 32, %g7 andcc %g5, %g7, %g0 diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1c8763c9c52b..da1ac3f22b24 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -246,6 +246,7 @@ u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) return node; } +EXPORT_SYMBOL(vio_vdev_node); static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, struct vio_dev *vdev) diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index d4f13c037a40..dcd278f29573 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -814,15 +814,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, case VDEV_NETWORK_SWITCH: case VDEV_DISK: case VDEV_DISK_SERVER: + case VDEV_CONSOLE_CON: break; default: return -EINVAL; } - if (!ops || !ops->send_attr || !ops->handle_attr || - !ops->handshake_complete) - return -EINVAL; + if (dev_class == VDEV_NETWORK || + dev_class == VDEV_NETWORK_SWITCH || + dev_class == VDEV_DISK || + dev_class == VDEV_DISK_SERVER) { + if (!ops || !ops->send_attr || !ops->handle_attr || + !ops->handshake_complete) + return -EINVAL; + } if (!ver_table || ver_table_size < 0) return -EINVAL; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 03b3d65d1266..d78847d56a4b 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -154,6 +154,16 @@ SECTIONS *(.get_tick_patch) __get_tick_patch_end = .; } + .pud_huge_patch : { + __pud_huge_patch = .; + *(.pud_huge_patch) + __pud_huge_patch_end = .; + } + .fast_win_ctrl_1insn_patch : { + __fast_win_ctrl_1insn_patch = .; + *(.fast_win_ctrl_1insn_patch) + __fast_win_ctrl_1insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/sparc/lib/M7copy_from_user.S b/arch/sparc/lib/M7copy_from_user.S new file mode 100644 index 000000000000..66464b3e3649 --- /dev/null +++ b/arch/sparc/lib/M7copy_from_user.S @@ -0,0 +1,40 @@ +/* + * M7copy_from_user.S: SPARC M7 optimized copy from userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_LD(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME M7copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7copy_to_user.S b/arch/sparc/lib/M7copy_to_user.S new file mode 100644 index 000000000000..a60ac467f808 --- /dev/null +++ b/arch/sparc/lib/M7copy_to_user.S @@ -0,0 +1,51 @@ +/* + * M7copy_to_user.S: SPARC M7 optimized copy to userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_ST(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME M7copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S new file mode 100644 index 000000000000..cbd42ea7c3f7 --- /dev/null +++ b/arch/sparc/lib/M7memcpy.S @@ -0,0 +1,923 @@ +/* + * M7memcpy: Optimized SPARC M7 memcpy + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + .file "M7memcpy.S" + +/* + * memcpy(s1, s2, len) + * + * Copy s2 to s1, always copy n bytes. + * Note: this C code does not work for overlapped copies. + * + * Fast assembler language version of the following C-program for memcpy + * which represents the `standard' for the C-library. + * + * void * + * memcpy(void *s, const void *s0, size_t n) + * { + * if (n != 0) { + * char *s1 = s; + * const char *s2 = s0; + * do { + * *s1++ = *s2++; + * } while (--n != 0); + * } + * return (s); + * } + * + * + * SPARC T7/M7 Flow : + * + * if (count < SMALL_MAX) { + * if count < SHORTCOPY (SHORTCOPY=3) + * copy bytes; exit with dst addr + * if src & dst aligned on word boundary but not long word boundary, + * copy with ldw/stw; branch to finish_up + * if src & dst aligned on long word boundary + * copy with ldx/stx; branch to finish_up + * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) + * copy bytes; exit with dst addr + * move enough bytes to get src to word boundary + * if dst now on word boundary + * move_words: + * copy words; branch to finish_up + * if dst now on half word boundary + * load words, shift half words, store words; branch to finish_up + * if dst on byte 1 + * load words, shift 3 bytes, store words; branch to finish_up + * if dst on byte 3 + * load words, shift 1 byte, store words; branch to finish_up + * finish_up: + * copy bytes; exit with dst addr + * } else { More than SMALL_MAX bytes + * move bytes until dst is on long word boundary + * if( src is on long word boundary ) { + * if (count < MED_MAX) { + * finish_long: src/dst aligned on 8 bytes + * copy with ldx/stx in 8-way unrolled loop; + * copy final 0-63 bytes; exit with dst addr + * } else { src/dst aligned; count > MED_MAX + * align dst on 64 byte boundary; for main data movement: + * prefetch src data to L2 cache; let HW prefetch move data to L1 cache + * Use BIS (block initializing store) to avoid copying store cache + * lines from memory. But pre-store first element of each cache line + * ST_CHUNK lines in advance of the rest of that cache line. That + * gives time for replacement cache lines to be written back without + * excess STQ and Miss Buffer filling. Repeat until near the end, + * then finish up storing before going to finish_long. + * } + * } else { src/dst not aligned on 8 bytes + * if src is word aligned and count < MED_WMAX + * move words in 8-way unrolled loop + * move final 0-31 bytes; exit with dst addr + * if count < MED_UMAX + * use alignaddr/faligndata combined with ldd/std in 8-way + * unrolled loop to move data. + * go to unalign_done + * else + * setup alignaddr for faligndata instructions + * align dst on 64 byte boundary; prefetch src data to L1 cache + * loadx8, falign, block-store, prefetch loop + * (only use block-init-store when src/dst on 8 byte boundaries.) + * unalign_done: + * move remaining bytes for unaligned cases. exit with dst addr. + * } + * + */ + +#include +#include + +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +/* + * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache + * line as "least recently used" which means if many threads are + * active, it has a high probability of being pushed out of the cache + * between the first initializing store and the final stores. + * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which + * marks the cache line as "most recently used" for all + * but the last cache line + */ +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_MRU_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P +#else +#define STORE_MRU_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef STORE_INIT_MRU +#define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME M7memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#define BLOCK_SIZE 64 +#define SHORTCOPY 3 +#define SHORTCHECK 14 +#define SHORT_LONG 64 /* max copy for short longword-aligned case */ + /* must be at least 64 */ +#define SMALL_MAX 128 +#define MED_UMAX 1024 /* max copy for medium un-aligned case */ +#define MED_WMAX 1024 /* max copy for medium word-aligned case */ +#define MED_MAX 1024 /* max copy for medium longword-aligned case */ +#define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ +#define ALIGN_PRE 24 /* distance for aligned prefetch loop */ + + .register %g2,#scratch + + .section ".text" + .global FUNC_NAME + .type FUNC_NAME, #function + .align 16 +FUNC_NAME: + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %g1 ! save %o0 + brz,pn %o2, .Lsmallx + cmp %o2, 3 + ble,pn %icc, .Ltiny_cp + cmp %o2, 19 + ble,pn %icc, .Lsmall_cp + or %o0, %o1, %g2 + cmp %o2, SMALL_MAX + bl,pn %icc, .Lmedium_cp + nop + +.Lmedium: + neg %o0, %o5 + andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned + brz,pt %o5, .Ldst_aligned_on_8 + + ! %o5 has the bytes to be written in partial store. + sub %o2, %o5, %o2 + sub %o1, %o0, %o1 ! %o1 gets the difference +7: ! dst aligning loop + add %o1, %o0, %o4 + EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte + subcc %o5, 1, %o5 + EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) + bgu,pt %xcc, 7b + add %o0, 1, %o0 ! advance dst + add %o1, %o0, %o1 ! restore %o1 +.Ldst_aligned_on_8: + andcc %o1, 7, %o5 + brnz,pt %o5, .Lsrc_dst_unaligned_on_8 + nop + +.Lsrc_dst_aligned_on_8: + ! check if we are copying MED_MAX or more bytes + set MED_MAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bgu,pn %xcc, .Llarge_align8_copy + nop + +/* + * Special case for handling when src and dest are both long word aligned + * and total data to move is less than MED_MAX bytes + */ +.Lmedlong: + subcc %o2, 63, %o2 ! adjust length to allow cc test + ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes + nop +.Lmedl64: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load + subcc %o2, 64, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) + EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) + EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store + EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) + EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 + add %o1, 64, %o1 ! increase src ptr by 64 + EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) + add %o0, 64, %o0 ! increase dst ptr by 64 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) + bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) +.Lmedl63: + addcc %o2, 32, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load + sub %o2, 32, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 + add %o1, 32, %o1 ! increase src ptr by 32 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedl31: + addcc %o2, 16, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left + nop ! + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) + sub %o2, 16, %o2 ! decrease count by 16 + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) +.Lmedl15: + addcc %o2, 15, %o2 ! restore count + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + add %o0, 8, %o0 ! increase dst ptr by 8 + subcc %o2, 8, %o2 ! decrease count by 8 + bnz,pn %xcc, .Lmedw7 + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 + + .align 16 +.Lsrc_dst_unaligned_on_8: + ! DST is 8-byte aligned, src is not +2: + andcc %o1, 0x3, %o5 ! test word alignment + bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned + nop + +/* + * Handle all cases where src and dest are aligned on word + * boundaries. Use unrolled loops for better performance. + * This option wins over standard large data move when + * source and destination is in cache for.Lmedium + * to short data moves. + */ + set MED_WMAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop + nop + + subcc %o2, 31, %o2 ! adjust length to allow cc test + ! for end of loop + ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 +.Lmedw32: + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) + subcc %o2, 32, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) + add %o1, 32, %o1 ! increase src ptr by 32 + EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) + or %o4, %o5, %o5 + bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedw31: + addcc %o2, 31, %o2 ! restore count + + bz,pt %xcc, .Lsmallx ! exit if finished + nop + cmp %o2, 16 + blt,pt %xcc, .Lmedw15 + nop + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes + sllx %o4, 32, %o5 + subcc %o2, 16, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) +.Lmedw15: + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + subcc %o2, 8, %o2 ! decrease count by 8 + EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes + add %o0, 8, %o0 ! increase dst ptr by 8 + EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + bz,pt %xcc, .Lsmallx ! exit if finished +.Lmedw7: ! count is ge 1, less than 8 + cmp %o2, 4 ! check for 4 bytes left + blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left + nop ! + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + add %o1, 4, %o1 ! increase src ptr by 4 + add %o0, 4, %o0 ! increase dst ptr by 4 + subcc %o2, 4, %o2 ! decrease count by 4 + bnz .Lsmallleft3 + EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + retl + mov EX_RETVAL(%g1), %o0 + + .align 16 +.Llarge_align8_copy: ! Src and dst share 8 byte alignment + ! align dst to 64 byte boundary + andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned + brz,pn %o3, .Laligned_to_64 + andcc %o0, 8, %o3 ! odd long words to move? + brz,pt %o3, .Laligned_to_16 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 8, %o2 + add %o1, 8, %o1 ! increment src ptr + add %o0, 8, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_16: + andcc %o0, 16, %o3 ! pair of long words to move? + brz,pt %o3, .Laligned_to_32 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 16, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_32: + andcc %o0, 32, %o3 ! four long words to move? + brz,pt %o3, .Laligned_to_64 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 32, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) + add %o1, 32, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 32, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_64: +! +! Using block init store (BIS) instructions to avoid fetching cache +! lines from memory. Use ST_CHUNK stores to first element of each cache +! line (similar to prefetching) to avoid overfilling STQ or miss buffers. +! Gives existing cache lines time to be moved out of L1/L2/L3 cache. +! Initial stores using MRU version of BIS to keep cache line in +! cache until we are ready to store final element of cache line. +! Then store last element using the LRU version of BIS. +! + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 +! +! We use STORE_MRU_ASI for the first seven stores to each cache line +! followed by STORE_ASI (mark as LRU) for the last store. That +! mixed approach reduces the probability that the cache line is removed +! before we finish setting it, while minimizing the effects on +! other cached values during a large memcpy +! +! ST_CHUNK batches up initial BIS operations for several cache lines +! to allow multiple requests to not be blocked by overflowing the +! the store miss buffer. Then the matching stores for all those +! BIS operations are executed. +! + + sub %o0, 8, %o0 ! adjust %o0 for ASI alignment +.Lalign_loop: + cmp %o5, ST_CHUNK*64 + blu,pt %xcc, .Lalign_loop_fin + mov ST_CHUNK,%o3 +.Lalign_loop_start: + prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + bgu %xcc,.Lalign_loop_start + add %o0, 56, %o0 + + mov ST_CHUNK,%o3 + sllx %o3, 6, %o4 ! ST_CHUNK*64 + sub %o1, %o4, %o1 ! reset %o1 + sub %o0, %o4, %o0 ! reset %o0 + +.Lalign_loop_rest: + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + add %o0, 16, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) + sub %o5, 64, %o5 + bgu %xcc,.Lalign_loop_rest + ! mark cache line as LRU + EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) + + cmp %o5, ST_CHUNK*64 + bgu,pt %xcc, .Lalign_loop_start + mov ST_CHUNK,%o3 + + cmp %o5, 0 + beq .Lalign_done + nop +.Lalign_loop_fin: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) + add %o1, 64, %o1 + EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) + add %o0, 64, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) + bgu %xcc,.Lalign_loop_fin + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) + +.Lalign_done: + add %o0, 8, %o0 ! restore %o0 from ASI alignment + membar #StoreStore + sub %o2, 63, %o2 ! adjust length to allow cc test + ba .Lmedl63 ! in .Lmedl63 + nop + + .align 16 + ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX +.Lunalignsetup: +.Lunalignrejoin: + mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) +#else + VISEntryHalf +#endif + mov %o3, %g1 ! restore %g1 + + set MED_UMAX, %o3 + cmp %o2, %o3 ! check for.Lmedium unaligned limit + bge,pt %xcc,.Lunalign_large + prefetch [%o1 + (4 * BLOCK_SIZE)], 20 + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + cmp %o2, 8 ! Insure we do not load beyond + bgt .Lunalign_adjust ! end of source buffer + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o2, 64, %o2 ! adjust to leave loop + sub %o5, 64, %o5 ! early if necessary +.Lunalign_adjust: + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) +.Lunalign_loop: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) + subcc %o5, BLOCK_SIZE, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) + EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) + EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) + EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) + faligndata %f10, %f12, %f26 + EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) + add %o4, BLOCK_SIZE, %o4 + EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) + EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) + add %o0, BLOCK_SIZE, %o0 + bgu,pt %xcc, .Lunalign_loop + prefetch [%o4 + (5 * BLOCK_SIZE)], 20 + ba .Lunalign_done + nop + +.Lunalign_large: + andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? + bz %xcc, .Lunalignsrc + sub %o3, 64, %o3 ! %o3 will be multiple of 8 + neg %o3 ! bytes until dest is 64 byte aligned + sub %o2, %o3, %o2 ! update cnt with bytes to be moved + ! Move bytes according to source alignment + andcc %o1, 0x1, %o5 + bnz %xcc, .Lunalignbyte ! check for byte alignment + nop + andcc %o1, 2, %o5 ! check for half word alignment + bnz %xcc, .Lunalignhalf + nop + ! Src is word aligned +.Lunalignword: + EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 + subcc %o3, 8, %o3 ! decrease count by 8 + EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 + add %o0, 8, %o0 ! increase dst ptr by 8 + bnz %xcc, .Lunalignword + EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) + ba .Lunalignsrc + nop + + ! Src is half-word aligned +.Lunalignhalf: + EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes + sllx %o4, 32, %o5 ! shift left + EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + sllx %o5, 16, %o5 + EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + add %o1, 8, %o1 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignhalf + add %o0, 8, %o0 + ba .Lunalignsrc + nop + + ! Src is Byte aligned +.Lunalignbyte: + sub %o0, %o1, %o0 ! share pointer advance +.Lunalignbyte_loop: + EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 56, %o5 + EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 40, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 24, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 8, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + add %o0, %o1, %o0 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + sub %o0, %o1, %o0 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignbyte_loop + add %o1, 8, %o1 + add %o0,%o1, %o0 ! restore pointer + + ! Destination is now block (64 byte aligned) +.Lunalignsrc: + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + add %o2, 64, %o2 ! Insure we do not load beyond + sub %o5, 64, %o5 ! end of source buffer + + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + + EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 +.Lunalign_sloop: + EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) + faligndata %f14, %f16, %f0 + EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) + faligndata %f16, %f18, %f2 + EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) + faligndata %f18, %f20, %f4 + EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f20, %f22, %f6 + EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f22, %f24, %f8 + EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f24, %f26, %f10 + EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f26, %f28, %f12 + EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) + add %o4, 64, %o4 + EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f28, %f30, %f14 + EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) + add %o0, 64, %o0 + EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) + fsrc2 %f30, %f14 + bgu,pt %xcc, .Lunalign_sloop + prefetch [%o4 + (8 * BLOCK_SIZE)], 20 + +.Lunalign_done: + ! Handle trailing bytes, 64 to 127 + ! Dest long word aligned, Src not long word aligned + cmp %o2, 15 + bleu %xcc, .Lunalign_short + + andn %o2, 0x7, %o5 ! %o5 is multiple of 8 + and %o2, 0x7, %o2 ! residue bytes in %o2 + add %o2, 8, %o2 + sub %o5, 8, %o5 ! insure we do not load past end of src + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword +.Lunalign_by8: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 + faligndata %f0, %f2, %f16 + subcc %o5, 8, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) + fsrc2 %f2, %f0 + bgu,pt %xcc, .Lunalign_by8 + add %o0, 8, %o0 + +.Lunalign_short: +#ifdef NON_USER_COPY + VISExitHalfFast +#else + VISExitHalf +#endif + ba .Lsmallrest + nop + +/* + * This is a special case of nested memcpy. This can happen when kernel + * calls unaligned memcpy back to back without saving FP registers. We need + * traps(context switch) to save/restore FP registers. If the kernel calls + * memcpy without this trap sequence we will hit FP corruption. Let's use + * the normal integer load/store method in this case. + */ + +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail_cp: + or %o0, %o1, %g2 +#endif +.Lmedium_cp: + LOAD(prefetch, %o1 + 0x40, #n_reads_strong) + andcc %g2, 0x7, %g0 + bne,pn %xcc, .Lmedium_unaligned_cp + nop + +.Lmedium_noprefetch_cp: + andncc %o2, 0x20 - 1, %o5 + be,pn %xcc, 2f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) + add %o1, 0x20, %o1 + subcc %o5, 0x20, %o5 + EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) + bne,pt %xcc, 1b + add %o0, 0x20, %o0 +2: andcc %o2, 0x18, %o5 + be,pt %xcc, 3f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + add %o0, 0x08, %o0 + subcc %o5, 0x08, %o5 + bne,pt %xcc, 1b + EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) +3: brz,pt %o2, .Lexit_cp + cmp %o2, 0x04 + bl,pn %xcc, .Ltiny_cp + nop + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 0x04, %o1 + add %o0, 0x04, %o0 + subcc %o2, 0x04, %o2 + bne,pn %xcc, .Ltiny_cp + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) + ba,a,pt %xcc, .Lexit_cp + +.Lmedium_unaligned_cp: + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %o3 + and %o3, 0x7, %o3 + brz,pt %o3, 2f + sub %o2, %o3, %o2 + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 1, %o1 + subcc %o3, 1, %o3 + add %o0, 1, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) +2: + and %o1, 0x7, %o3 + brz,pn %o3, .Lmedium_noprefetch_cp + sll %o3, 3, %o3 + mov 64, %g2 + sub %g2, %o3, %g2 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) + sllx %o4, %o3, %o4 + andn %o2, 0x08 - 1, %o5 + sub %o2, %o5, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + subcc %o5, 0x08, %o5 + srlx %g3, %g2, %g7 + or %g7, %o4, %g7 + EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) + add %o0, 0x08, %o0 + bne,pt %xcc, 1b + sllx %g3, %o3, %o4 + srl %o3, 3, %o3 + add %o1, %o3, %o1 + brz,pn %o2, .Lexit_cp + nop + ba,pt %xcc, .Lsmall_unaligned_cp + +.Ltiny_cp: + EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) + ba,pt %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) + +.Lsmall_cp: + andcc %g2, 0x3, %g0 + bne,pn %xcc, .Lsmall_unaligned_cp + andn %o2, 0x4 - 1, %o5 + sub %o2, %o5, %o2 +1: + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x04, %o1 + subcc %o5, 0x04, %o5 + add %o0, 0x04, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) + brz,pt %o2, .Lexit_cp + nop + ba,a,pt %xcc, .Ltiny_cp + +.Lsmall_unaligned_cp: +1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 1, %o1 + add %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) + ba,a,pt %xcc, .Lexit_cp + +.Lsmallrest: + tst %o2 + bz,pt %xcc, .Lsmallx + cmp %o2, 4 + blt,pn %xcc, .Lsmallleft3 + nop + sub %o2, 3, %o2 +.Lsmallnotalign4: + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte + subcc %o2, 4, %o2 ! reduce count by 4 + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 + add %o1, 4, %o1 ! advance SRC by 4 + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) + EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) + add %o0, 4, %o0 ! advance DST by 4 + EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) + EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) + bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain + EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) + addcc %o2, 3, %o2 ! restore count + bz,pt %xcc, .Lsmallx +.Lsmallleft3: ! 1, 2, or 3 bytes remain + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte + subcc %o2, 1, %o2 + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte + EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte + EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte +.Lsmallx: + retl + mov EX_RETVAL(%g1), %o0 +.Lsmallfin: + tst %o2 + bnz,pn %xcc, .Lsmallleft3 + nop + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 +.Lexit_cp: + retl + mov EX_RETVAL(%g1), %o0 + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/M7memset.S b/arch/sparc/lib/M7memset.S new file mode 100644 index 000000000000..62ea91b3a6b8 --- /dev/null +++ b/arch/sparc/lib/M7memset.S @@ -0,0 +1,352 @@ +/* + * M7memset.S: SPARC M7 optimized memset. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +/* + * M7memset.S: M7 optimized memset. + * + * char *memset(sp, c, n) + * + * Set an array of n chars starting at sp to the character c. + * Return sp. + * + * Fast assembler language version of the following C-program for memset + * which represents the `standard' for the C-library. + * + * void * + * memset(void *sp1, int c, size_t n) + * { + * if (n != 0) { + * char *sp = sp1; + * do { + * *sp++ = (char)c; + * } while (--n != 0); + * } + * return (sp1); + * } + * + * The algorithm is as follows : + * + * For small 6 or fewer bytes stores, bytes will be stored. + * + * For less than 32 bytes stores, align the address on 4 byte boundary. + * Then store as many 4-byte chunks, followed by trailing bytes. + * + * For sizes greater than 32 bytes, align the address on 8 byte boundary. + * if (count >= 64) { + * store 8-bytes chunks to align the address on 64 byte boundary + * if (value to be set is zero && count >= MIN_ZERO) { + * Using BIS stores, set the first long word of each + * 64-byte cache line to zero which will also clear the + * other seven long words of the cache line. + * } + * else if (count >= MIN_LOOP) { + * Using BIS stores, set the first long word of each of + * ST_CHUNK cache lines (64 bytes each) before the main + * loop is entered. + * In the main loop, continue pre-setting the first long + * word of each cache line ST_CHUNK lines in advance while + * setting the other seven long words (56 bytes) of each + * cache line until fewer than ST_CHUNK*64 bytes remain. + * Then set the remaining seven long words of each cache + * line that has already had its first long word set. + * } + * store remaining data in 64-byte chunks until less than + * 64 bytes remain. + * } + * Store as many 8-byte chunks, followed by trailing bytes. + * + * BIS = Block Init Store + * Doing the advance store of the first element of the cache line + * initiates the displacement of a cache line while only using a single + * instruction in the pipeline. That avoids various pipeline delays, + * such as filling the miss buffer. The performance effect is + * similar to prefetching for normal stores. + * The special case for zero fills runs faster and uses fewer instruction + * cycles than the normal memset loop. + * + * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence + * BIS stores must be followed by a membar #StoreStore. The benefit of + * the BIS store must be balanced against the cost of the membar operation. + */ + +/* + * ASI_STBI_P marks the cache line as "least recently used" + * which means if many threads are active, it has a high chance + * of being pushed out of the cache between the first initializing + * store and the final stores. + * Thus, we use ASI_STBIMRU_P which marks the cache line as + * "most recently used" for all but the last store to the cache line. + */ + +#include +#include + +#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P +#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P + + +#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */ +#define MIN_LOOP 16320 +#define MIN_ZERO 512 + + .section ".text" + .align 32 + +/* + * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE) + * (can create a more optimized version later.) + */ + .globl M7clear_page + .globl M7clear_user_page +M7clear_page: /* clear_page(dest) */ +M7clear_user_page: + set PAGE_SIZE, %o1 + /* fall through into bzero code */ + + .size M7clear_page,.-M7clear_page + .size M7clear_user_page,.-M7clear_user_page + +/* + * Define bzero(dest, n) as memset(dest, 0, n) + * (can create a more optimized version later.) + */ + .globl M7bzero +M7bzero: /* bzero(dest, size) */ + mov %o1, %o2 + mov 0, %o1 + /* fall through into memset code */ + + .size M7bzero,.-M7bzero + + .global M7memset + .type M7memset, #function + .register %g3, #scratch +M7memset: + mov %o0, %o5 ! copy sp1 before using it + cmp %o2, 7 ! if small counts, just write bytes + bleu,pn %xcc, .wrchar + and %o1, 0xff, %o1 ! o1 is (char)c + + sll %o1, 8, %o3 + or %o1, %o3, %o1 ! now o1 has 2 bytes of c + sll %o1, 16, %o3 + cmp %o2, 32 + blu,pn %xcc, .wdalign + or %o1, %o3, %o1 ! now o1 has 4 bytes of c + + sllx %o1, 32, %o3 + or %o1, %o3, %o1 ! now o1 has 8 bytes of c + +.dbalign: + andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? + bz,pt %xcc, .blkalign ! already long word aligned + sub %o3, 8, %o3 ! -(bytes till long word aligned) + + add %o2, %o3, %o2 ! update o2 with new count + ! Set -(%o3) bytes till sp1 long word aligned +1: stb %o1, [%o5] ! there is at least 1 byte to set + inccc %o3 ! byte clearing loop + bl,pt %xcc, 1b + inc %o5 + + ! Now sp1 is long word aligned (sp1 is found in %o5) +.blkalign: + cmp %o2, 64 ! check if there are 64 bytes to set + blu,pn %xcc, .wrshort + mov %o2, %o3 + + andcc %o5, 63, %o3 ! is sp1 block aligned? + bz,pt %xcc, .blkwr ! now block aligned + sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) + add %o2, %o3, %o2 ! o2 is the remainder + + ! Store -(%o3) bytes till dst is block (64 byte) aligned. + ! Use long word stores. + ! Recall that dst is already long word aligned +1: + addcc %o3, 8, %o3 + stx %o1, [%o5] + bl,pt %xcc, 1b + add %o5, 8, %o5 + + ! Now sp1 is block aligned +.blkwr: + andn %o2, 63, %o4 ! calculate size of blocks in bytes + brz,pn %o1, .wrzero ! special case if c == 0 + and %o2, 63, %o3 ! %o3 = bytes left after blk stores. + + set MIN_LOOP, %g1 + cmp %o4, %g1 ! check there are enough bytes to set + blu,pn %xcc, .short_set ! to justify cost of membar + ! must be > pre-cleared lines + nop + + ! initial cache-clearing stores + ! get store pipeline moving + rd %asi, %g3 ! save %asi to be restored later + wr %g0, ASI_STBIMRU_P, %asi + + ! Primary memset loop for large memsets +.wr_loop: + sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment + mov ST_CHUNK, %g1 +.wr_loop_start: + stxa %o1, [%o5+8]%asi + subcc %g1, 4, %g1 + stxa %o1, [%o5+8+64]%asi + add %o5, 256, %o5 + stxa %o1, [%o5+8-128]%asi + bgu %xcc, .wr_loop_start + stxa %o1, [%o5+8-64]%asi + + sub %o5, ST_CHUNK*64, %o5 ! reset %o5 + mov ST_CHUNK, %g1 + +.wr_loop_rest: + stxa %o1, [%o5+8+8]%asi + sub %o4, 64, %o4 + stxa %o1, [%o5+16+8]%asi + subcc %g1, 1, %g1 + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu %xcc, .wr_loop_rest + stxa %o1, [%o5]ASI_STBI_P + + ! If more than ST_CHUNK*64 bytes remain to set, continue + ! setting the first long word of each cache line in advance + ! to keep the store pipeline moving. + + cmp %o4, ST_CHUNK*64 + bge,pt %xcc, .wr_loop_start + mov ST_CHUNK, %g1 + + brz,a,pn %o4, .asi_done + add %o5, 8, %o5 ! restore %o5 offset + +.wr_loop_small: + stxa %o1, [%o5+8]%asi + stxa %o1, [%o5+8+8]%asi + stxa %o1, [%o5+16+8]%asi + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + subcc %o4, 64, %o4 + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu,pt %xcc, .wr_loop_small + stxa %o1, [%o5]ASI_STBI_P + + ba .asi_done + add %o5, 8, %o5 ! restore %o5 offset + + ! Special case loop for zero fill memsets + ! For each 64 byte cache line, single STBI to first element + ! clears line +.wrzero: + cmp %o4, MIN_ZERO ! check if enough bytes to set + ! to pay %asi + membar cost + blu %xcc, .short_set + nop + sub %o4, 256, %o4 + +.wrzero_loop: + mov 64, %g3 + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 256, %o4 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o5, 256, %o5 + sub %g3, 192, %g3 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %g3, 64, %g3 + bge,pt %xcc, .wrzero_loop + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o4, 256, %o4 + + brz,pn %o4, .bsi_done + nop + +.wrzero_small: + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 64, %o4 + bgu,pt %xcc, .wrzero_small + add %o5, 64, %o5 + ba,a .bsi_done + +.asi_done: + wr %g3, 0x0, %asi ! restored saved %asi +.bsi_done: + membar #StoreStore ! required by use of Block Store Init + +.short_set: + cmp %o4, 64 ! check if 64 bytes to set + blu %xcc, 5f + nop +4: ! set final blocks of 64 bytes + stx %o1, [%o5] + stx %o1, [%o5+8] + stx %o1, [%o5+16] + stx %o1, [%o5+24] + subcc %o4, 64, %o4 + stx %o1, [%o5+32] + stx %o1, [%o5+40] + add %o5, 64, %o5 + stx %o1, [%o5-16] + bgu,pt %xcc, 4b + stx %o1, [%o5-8] + +5: + ! Set the remaining long words +.wrshort: + subcc %o3, 8, %o3 ! Can we store any long words? + blu,pn %xcc, .wrchars + and %o2, 7, %o2 ! calc bytes left after long words +6: + subcc %o3, 8, %o3 + stx %o1, [%o5] ! store the long words + bgeu,pt %xcc, 6b + add %o5, 8, %o5 + +.wrchars: ! check for extra chars + brnz %o2, .wrfin + nop + retl + nop + +.wdalign: + andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary + bz,pn %xcc, .wrword + andn %o2, 3, %o3 ! create word sized count in %o3 + + dec %o2 ! decrement count + stb %o1, [%o5] ! clear a byte + b .wdalign + inc %o5 ! next byte + +.wrword: + subcc %o3, 4, %o3 + st %o1, [%o5] ! 4-byte writing loop + bnz,pt %xcc, .wrword + add %o5, 4, %o5 + + and %o2, 3, %o2 ! leftover count, if any + +.wrchar: + ! Set the remaining bytes, if any + brz %o2, .exit + nop +.wrfin: + deccc %o2 + stb %o1, [%o5] + bgu,pt %xcc, .wrfin + inc %o5 +.exit: + retl ! %o0 was preserved + nop + + .size M7memset,.-M7memset diff --git a/arch/sparc/lib/M7patch.S b/arch/sparc/lib/M7patch.S new file mode 100644 index 000000000000..9000b7bc5f2b --- /dev/null +++ b/arch/sparc/lib/M7patch.S @@ -0,0 +1,51 @@ +/* + * M7patch.S: Patch generic routines with M7 variant. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +#include + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + +ENTRY(m7_patch_copyops) + NG_DO_PATCH(memcpy, M7memcpy) + NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) + NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) + retl + nop +ENDPROC(m7_patch_copyops) + +ENTRY(m7_patch_bzero) + NG_DO_PATCH(memset, M7memset) + NG_DO_PATCH(__bzero, M7bzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop +ENDPROC(m7_patch_bzero) + +ENTRY(m7_patch_pageops) + NG_DO_PATCH(copy_user_page, NG4copy_user_page) + NG_DO_PATCH(_clear_page, M7clear_page) + NG_DO_PATCH(clear_user_page, M7clear_user_page) + retl + nop +ENDPROC(m7_patch_pageops) diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 07c03e72d812..a1a2d39ec96e 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -36,6 +36,11 @@ lib-$(CONFIG_SPARC64) += NG2patch.o lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o +lib-$(CONFIG_SPARC64) += Memcpy_utils.o + +lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o +lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o + lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S new file mode 100644 index 000000000000..64fbac28b3db --- /dev/null +++ b/arch/sparc/lib/Memcpy_utils.S @@ -0,0 +1,345 @@ +#ifndef __ASM_MEMCPY_UTILS +#define __ASM_MEMCPY_UTILS + +#include +#include +#include + +ENTRY(__restore_asi_fp) + VISExitHalf + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi_fp) + +ENTRY(__restore_asi) + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi) + +ENTRY(memcpy_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(memcpy_retl_o2) +ENTRY(memcpy_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(memcpy_retl_o2_plus_1) +ENTRY(memcpy_retl_o2_plus_3) + ba,pt %xcc, __restore_asi + add %o2, 3, %o0 +ENDPROC(memcpy_retl_o2_plus_3) +ENTRY(memcpy_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(memcpy_retl_o2_plus_4) +ENTRY(memcpy_retl_o2_plus_5) + ba,pt %xcc, __restore_asi + add %o2, 5, %o0 +ENDPROC(memcpy_retl_o2_plus_5) +ENTRY(memcpy_retl_o2_plus_6) + ba,pt %xcc, __restore_asi + add %o2, 6, %o0 +ENDPROC(memcpy_retl_o2_plus_6) +ENTRY(memcpy_retl_o2_plus_7) + ba,pt %xcc, __restore_asi + add %o2, 7, %o0 +ENDPROC(memcpy_retl_o2_plus_7) +ENTRY(memcpy_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_8) +ENTRY(memcpy_retl_o2_plus_15) + ba,pt %xcc, __restore_asi + add %o2, 15, %o0 +ENDPROC(memcpy_retl_o2_plus_15) +ENTRY(memcpy_retl_o2_plus_15_8) + add %o2, 15, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_15_8) +ENTRY(memcpy_retl_o2_plus_16) + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_16) +ENTRY(memcpy_retl_o2_plus_24) + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_24) +ENTRY(memcpy_retl_o2_plus_31) + ba,pt %xcc, __restore_asi + add %o2, 31, %o0 +ENDPROC(memcpy_retl_o2_plus_31) +ENTRY(memcpy_retl_o2_plus_32) + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_32) +ENTRY(memcpy_retl_o2_plus_31_32) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_31_32) +ENTRY(memcpy_retl_o2_plus_31_24) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_31_24) +ENTRY(memcpy_retl_o2_plus_31_16) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_31_16) +ENTRY(memcpy_retl_o2_plus_31_8) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_31_8) +ENTRY(memcpy_retl_o2_plus_63) + ba,pt %xcc, __restore_asi + add %o2, 63, %o0 +ENDPROC(memcpy_retl_o2_plus_63) +ENTRY(memcpy_retl_o2_plus_63_64) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 64, %o0 +ENDPROC(memcpy_retl_o2_plus_63_64) +ENTRY(memcpy_retl_o2_plus_63_56) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 56, %o0 +ENDPROC(memcpy_retl_o2_plus_63_56) +ENTRY(memcpy_retl_o2_plus_63_48) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 48, %o0 +ENDPROC(memcpy_retl_o2_plus_63_48) +ENTRY(memcpy_retl_o2_plus_63_40) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 40, %o0 +ENDPROC(memcpy_retl_o2_plus_63_40) +ENTRY(memcpy_retl_o2_plus_63_32) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_63_32) +ENTRY(memcpy_retl_o2_plus_63_24) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_63_24) +ENTRY(memcpy_retl_o2_plus_63_16) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_63_16) +ENTRY(memcpy_retl_o2_plus_63_8) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_63_8) +ENTRY(memcpy_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5) +ENTRY(memcpy_retl_o2_plus_o5_plus_1) + add %o5, 1, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_1) +ENTRY(memcpy_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_4) +ENTRY(memcpy_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8) +ENTRY(memcpy_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16) +ENTRY(memcpy_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24) +ENTRY(memcpy_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32) +ENTRY(memcpy_retl_o2_plus_o5_64) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_64) +ENTRY(memcpy_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1) +ENTRY(memcpy_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_1) +ENTRY(memcpy_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_8) +ENTRY(memcpy_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4) +ENTRY(memcpy_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8) +ENTRY(memcpy_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16) +ENTRY(memcpy_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24) +ENTRY(memcpy_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32) +ENTRY(memcpy_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40) +ENTRY(memcpy_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48) +ENTRY(memcpy_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56) +ENTRY(memcpy_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64) +ENTRY(memcpy_retl_o2_plus_o5_plus_64) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64) +ENTRY(memcpy_retl_o2_plus_o3_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp) + add %o3, 1, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp) + add %o3, 4, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp) +ENTRY(memcpy_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp) + add %o5, 56, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp) + add %o5, 48, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp) + add %o5, 40, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp) + +#endif diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 78ea962edcbe..b5dacd1d2078 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -94,155 +94,6 @@ .text #ifndef EX_RETVAL #define EX_RETVAL(x) x -__restore_asi_fp: - VISExitHalf -__restore_asi: - retl - wr %g0, ASI_AIUS, %asi - -ENTRY(NG4_retl_o2) - ba,pt %xcc, __restore_asi - mov %o2, %o0 -ENDPROC(NG4_retl_o2) -ENTRY(NG4_retl_o2_plus_1) - ba,pt %xcc, __restore_asi - add %o2, 1, %o0 -ENDPROC(NG4_retl_o2_plus_1) -ENTRY(NG4_retl_o2_plus_4) - ba,pt %xcc, __restore_asi - add %o2, 4, %o0 -ENDPROC(NG4_retl_o2_plus_4) -ENTRY(NG4_retl_o2_plus_o5) - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5) -ENTRY(NG4_retl_o2_plus_o5_plus_4) - add %o5, 4, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_4) -ENTRY(NG4_retl_o2_plus_o5_plus_8) - add %o5, 8, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_8) -ENTRY(NG4_retl_o2_plus_o5_plus_16) - add %o5, 16, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_16) -ENTRY(NG4_retl_o2_plus_o5_plus_24) - add %o5, 24, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_24) -ENTRY(NG4_retl_o2_plus_o5_plus_32) - add %o5, 32, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_32) -ENTRY(NG4_retl_o2_plus_g1) - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1) -ENTRY(NG4_retl_o2_plus_g1_plus_1) - add %g1, 1, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_1) -ENTRY(NG4_retl_o2_plus_g1_plus_8) - add %g1, 8, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_8) -ENTRY(NG4_retl_o2_plus_o4) - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4) -ENTRY(NG4_retl_o2_plus_o4_plus_8) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8) -ENTRY(NG4_retl_o2_plus_o4_plus_16) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16) -ENTRY(NG4_retl_o2_plus_o4_plus_24) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24) -ENTRY(NG4_retl_o2_plus_o4_plus_32) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32) -ENTRY(NG4_retl_o2_plus_o4_plus_40) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40) -ENTRY(NG4_retl_o2_plus_o4_plus_48) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48) -ENTRY(NG4_retl_o2_plus_o4_plus_56) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56) -ENTRY(NG4_retl_o2_plus_o4_plus_64) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64) -ENTRY(NG4_retl_o2_plus_o4_fp) - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) #endif .align 64 @@ -275,12 +126,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong) @@ -305,43 +156,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, .Llarge_aligned sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 8, %o1 subcc %g1, 8, %g1 add %o0, 8, %o0 bne,pt %icc, 1b - EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) + EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8) .Llarge_aligned: /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4) add %o1, 0x40, %o1 - EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) - EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) - EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b LOAD(prefetch, %o1 + 0x200, #n_reads_strong) @@ -367,17 +218,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) -1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -386,14 +237,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) - EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) - EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) - EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) - EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) - EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) - EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) - EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) @@ -421,38 +272,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andncc %o2, 0x20 - 1, %o5 be,pn %icc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) - EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %icc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %icc, 1b - EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit cmp %o2, 0x04 bl,pn %icc, .Ltiny nop - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %icc, .Ltiny - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %icc, .Lexit .Lmedium_unaligned: /* First get dest 8 byte aligned. */ @@ -461,12 +312,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 2f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %g1 brz,pn %g1, .Lmedium_noprefetch @@ -474,16 +325,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov 64, %g2 sub %g2, %g1, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %g1, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b sllx %g3, %g1, %o4 @@ -494,17 +345,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ba,pt %icc, .Lsmall_unaligned .Ltiny: - EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2) ba,pt %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2) .Lsmall: andcc %g2, 0x3, %g0 @@ -512,23 +363,23 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %icc, 1b - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit nop ba,a,pt %icc, .Ltiny .Lsmall_unaligned: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %icc, 1b - EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %icc, .Lexit nop .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 5a8cb37f0a3b..f9b42b3c63b0 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -168,18 +168,25 @@ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srlx %o2, 31, %g2 cmp %g2, 0 + + /* software trap 5 "Range Check" if dst >= 0x80000000 */ tne %xcc, 5 PREAMBLE mov %o0, %o4 + + /* if len == 0 */ cmp %o2, 0 - be,pn %XCC, 85f + be,pn %XCC, end_return or %o0, %o1, %o3 + + /* if len < 16 */ cmp %o2, 16 - blu,a,pn %XCC, 80f + blu,a,pn %XCC, less_than_16 or %o3, %o2, %o3 + /* if len < 192 */ cmp %o2, (3 * 64) - blu,pt %XCC, 70f + blu,pt %XCC, less_than_192 andcc %o3, 0x7, %g0 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve @@ -362,7 +369,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ cmp %o2, 0 add %o1, %g1, %o1 VISExitHalf - be,pn %XCC, 85f + be,pn %XCC, end_return sub %o0, %o1, %o3 andcc %g1, 0x7, %g0 @@ -392,14 +399,15 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, 2, %o2 1: andcc %o2, 0x1, %g0 - be,pt %icc, 85f + be,pt %icc, end_return nop EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) - ba,pt %xcc, 85f + ba,pt %xcc, end_return EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) .align 64 -70: /* 16 < len <= 64 */ + /* 16 <= len < 192 */ +less_than_192: bne,pn %XCC, 75f sub %o0, %o1, %o3 @@ -429,7 +437,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 - be,pt %XCC, 85f + be,pt %XCC, end_return nop ba,pt %xcc, 90f nop @@ -475,13 +483,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srl %g1, 3, %g1 andcc %o2, 0x7, %o2 - be,pn %icc, 85f + be,pn %icc, end_return add %o1, %g1, %o1 ba,pt %xcc, 90f sub %o0, %o1, %o3 .align 64 -80: /* 0 < len <= 16 */ + /* 0 < len < 16 */ +less_than_16: andcc %o3, 0x3, %g0 bne,pn %XCC, 90f sub %o0, %o1, %o3 @@ -493,7 +502,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bgu,pt %XCC, 1b add %o1, 4, %o1 -85: retl +end_return: + retl mov EX_RETVAL(%o4), %o0 .align 32 diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index f80cfc64c55b..d809099ffd47 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 1; } +static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, + unsigned long end, int write, struct page **pages, + int *nr) +{ + struct page *head, *page; + int refs; + + if (!(pud_val(pud) & _PAGE_VALID)) + return 0; + + if (write && !pud_write(pud)) + return 0; + + refs = 0; + page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + head = compound_head(page); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pud_val(pud) != pud_val(*pudp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + return 1; +} + static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + if (unlikely(pud_large(pud))) { + if (!gup_huge_pud(pudp, pud, addr, next, + write, pages, nr)) + return 0; + } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 28ee8d8ffa07..bcd8cdbc377f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_16GB_SHIFT: + hugepage_size = _PAGE_SZ16GB_4V; + pte_val(entry) |= _PAGE_PUD_HUGE; + break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ16GB_4V: + shift = HPAGE_16GB_SHIFT; + break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; @@ -259,22 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return NULL; - - if (sz >= PMD_SIZE) - pte = (pte_t *)pmd; - else - pte = pte_alloc_map(mm, pmd, addr); - } - - return pte; + if (!pud) + return NULL; + if (sz >= PUD_SIZE) + return (pte_t *)pud; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + if (sz >= PMD_SIZE) + return (pte_t *)pmd; + return pte_alloc_map(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -283,34 +287,40 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (!pgd_none(*pgd)) { - pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); - } - } - } - - return pte; + if (pgd_none(*pgd)) + return NULL; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + if (is_hugetlb_pud(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return NULL; + if (is_hugetlb_pmd(*pmd)) + return (pte_t *)pmd; + return pte_offset_map(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int i, nptes, orig_shift, shift; - unsigned long size; + unsigned int nptes, orig_shift, shift; + unsigned long i, size; pte_t orig; size = huge_tte_to_size(entry); - shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; + else + shift = PAGE_SHIFT; + nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) @@ -333,19 +343,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned int i, nptes, hugepage_shift; + unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); - if (size >= HPAGE_SIZE) - nptes = size >> PMD_SHIFT; - else - nptes = size >> PAGE_SHIFT; - hugepage_shift = pte_none(entry) ? PAGE_SHIFT : - huge_tte_to_shift(entry); + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; + else + shift = PAGE_SHIFT; + + nptes = size >> shift; + orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; @@ -354,11 +368,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); - maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, - hugepage_shift); + orig_shift); return entry; } @@ -371,7 +385,8 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return 0; + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, @@ -435,8 +450,11 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); + if (is_hugetlb_pud(*pud)) + pud_clear(pud); + else + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index afa0099f3748..b2ba410b26f4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void) arch_initcall(hugetlbpage_init); +static void __init pud_huge_patch(void) +{ + struct pud_huge_patch_entry *p; + unsigned long addr; + + p = &__pud_huge_patch; + addr = p->addr; + *(unsigned int *)addr = p->insn; + + __asm__ __volatile__("flush %0" : : "r" (addr)); +} + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_16GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_16GB; + hv_pgsz_idx = HV_PGSZ_IDX_16GB; + pud_huge_patch(); + break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; @@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * { struct mm_struct *mm; unsigned long flags; + bool is_huge_tsb; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); + is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && - is_hugetlb_pmd(__pmd(pte_val(pte)))) { - /* We are fabricating 8MB pages using 4MB real hw pages. */ - pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, - address, pte_val(pte)); - } else + if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { + unsigned long hugepage_size = PAGE_SIZE; + + if (is_vm_hugetlb_page(vma)) + hugepage_size = huge_page_size(hstate_vma(vma)); + + if (hugepage_size >= PUD_SIZE) { + unsigned long mask = 0x1ffc00000UL; + + /* Transfer bits [32:22] from address to resolve + * at 4M granularity. + */ + pte_val(pte) &= ~mask; + pte_val(pte) |= (address & mask); + } else if (hugepage_size >= PMD_SIZE) { + /* We are fabricating 8MB pages using 4MB + * real hw pages. + */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + } + + if (hugepage_size >= PMD_SIZE) { + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, + REAL_HPAGE_SHIFT, address, pte_val(pte)); + is_huge_tsb = true; + } + } #endif + if (!is_huge_tsb) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 8799ae9a8788..c340af7b1371 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -128,6 +128,8 @@ static u32 WDISP10(u32 off) #define BA (BRANCH | CONDA) #define BG (BRANCH | CONDG) +#define BL (BRANCH | CONDL) +#define BLE (BRANCH | CONDLE) #define BGU (BRANCH | CONDGU) #define BLEU (BRANCH | CONDLEU) #define BGE (BRANCH | CONDGE) @@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JGT: br_opcode = BGU; break; + case BPF_JLT: + br_opcode = BLU; + break; case BPF_JGE: br_opcode = BGEU; break; + case BPF_JLE: + br_opcode = BLEU; + break; case BPF_JSET: case BPF_JNE: br_opcode = BNE; @@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JSGT: br_opcode = BG; break; + case BPF_JSLT: + br_opcode = BL; + break; case BPF_JSGE: br_opcode = BGE; break; + case BPF_JSLE: + br_opcode = BLE; + break; default: /* Make sure we dont leak kernel information to the * user. @@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JGT: cbcond_opcode = CBCONDGU; break; + case BPF_JLT: + cbcond_opcode = CBCONDLU; + break; case BPF_JGE: cbcond_opcode = CBCONDGEU; break; + case BPF_JLE: + cbcond_opcode = CBCONDLEU; + break; case BPF_JNE: cbcond_opcode = CBCONDNE; break; case BPF_JSGT: cbcond_opcode = CBCONDG; break; + case BPF_JSLT: + cbcond_opcode = CBCONDL; + break; case BPF_JSGE: cbcond_opcode = CBCONDGE; break; + case BPF_JSLE: + cbcond_opcode = CBCONDLE; + break; default: /* Make sure we dont leak kernel information to the * user. @@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: { int err; @@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: { int err; diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 0d925fa0f0c1..9f94435cc44f 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -409,5 +409,4 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index 149d8e8eacb8..1c5bd4f8ffca 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -189,7 +189,6 @@ CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -521,7 +520,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m CONFIG_CRC_CCITT=m CONFIG_CRC7=m diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index bbc71a29b2c6..7061dc8af43a 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -68,8 +68,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) int dma_set_mask(struct device *dev, u64 mask); /* - * dma_alloc_noncoherent() is #defined to return coherent memory, - * so there's no need to do any flushing here. + * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to + * do any flushing here. */ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) diff --git a/arch/tile/include/uapi/asm/siginfo.h b/arch/tile/include/uapi/asm/siginfo.h index 56d661bb010b..e83f931aa1f0 100644 --- a/arch/tile/include/uapi/asm/siginfo.h +++ b/arch/tile/include/uapi/asm/siginfo.h @@ -26,8 +26,8 @@ /* * Additional Tile-specific SIGILL si_codes */ -#define ILL_DBLFLT (__SI_FAULT|9) /* double fault */ -#define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */ +#define ILL_DBLFLT 9 /* double fault */ +#define ILL_HARDWALL 10 /* user networks hardwall violation */ #undef NSIGILL #define NSIGILL 10 diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 0e863f1ee08c..971d87a1d8cf 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -64,7 +64,7 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); + err |= __put_user(from->si_code, &to->si_code); if (from->si_code < 0) { err |= __put_user(from->si_pid, &to->si_pid); @@ -77,28 +77,26 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr */ err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_FAULT: break; - case __SI_CHLD >> 16: + case SIL_CHLD: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); /* FALL THROUGH */ default: - case __SI_KILL >> 16: + case SIL_KILL: err |= __put_user(from->si_uid, &to->si_uid); break; - case __SI_POLL >> 16: + case SIL_POLL: err |= __put_user(from->si_fd, &to->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_int, &to->si_int); break; - /* This is not generated by the kernel as of now. */ - case __SI_RT >> 16: - case __SI_MESGQ >> 16: + case SIL_RT: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); break; diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index bc6656b5708b..bbf81579b1f8 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -66,16 +66,6 @@ static int pci_scan_flags[TILE_NUM_PCIE]; static struct pci_ops tile_cfg_ops; -/* - * We don't need to worry about the alignment of resources. - */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} -EXPORT_SYMBOL(pcibios_align_resource); - /* * Open a FD to the hypervisor PCI device. * @@ -274,6 +264,7 @@ static void fixup_read_and_payload_sizes(void) */ int __init pcibios_init(void) { + struct pci_host_bridge *bridge; int i; pr_info("PCI: Probing PCI hardware\n"); @@ -306,16 +297,26 @@ int __init pcibios_init(void) pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); - bus = pci_scan_root_bus(NULL, 0, controller->ops, - controller, &resources); + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + break; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = controller; + bridge->busnr = 0; + bridge->ops = controller->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = tile_map_irq; + + pci_scan_root_bus_bridge(bridge); + bus = bridge->bus; controller->root_bus = bus; controller->last_busno = bus->busn_res.end; } } - /* Do machine dependent PCI interrupt routing */ - pci_fixup_irqs(pci_common_swizzle, tile_map_irq); - /* * This comes from the generic Linux PCI driver. * @@ -369,14 +370,6 @@ int __init pcibios_init(void) } subsys_initcall(pcibios_init); -/* - * No bus fixups needed. - */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ - /* Nothing needs to be done. */ -} - void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling. */ diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b554a68eea1b..9aa238ac7b35 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -108,15 +108,6 @@ static struct pci_ops tile_cfg_ops; /* Mask of CPUs that should receive PCIe interrupts. */ static struct cpumask intr_cpus_map; -/* We don't need to worry about the alignment of resources. */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, - resource_size_t align) -{ - return res->start; -} -EXPORT_SYMBOL(pcibios_align_resource); - /* * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. * For now, we simply send interrupts to non-dataplane CPUs. @@ -669,6 +660,7 @@ int __init pcibios_init(void) resource_size_t offset; LIST_HEAD(resources); int next_busno; + struct pci_host_bridge *bridge; int i; tile_pci_init(); @@ -881,15 +873,25 @@ int __init pcibios_init(void) controller->mem_offset); pci_add_resource(&resources, &controller->io_space); controller->first_busno = next_busno; - bus = pci_scan_root_bus(NULL, next_busno, controller->ops, - controller, &resources); + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + break; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = controller; + bridge->busnr = next_busno; + bridge->ops = controller->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = tile_map_irq; + + pci_scan_root_bus_bridge(bridge); + bus = bridge->bus; controller->root_bus = bus; next_busno = bus->busn_res.end + 1; } - /* Do machine dependent PCI interrupt routing */ - pci_fixup_irqs(pci_common_swizzle, tile_map_irq); - /* * This comes from the generic Linux PCI driver. * @@ -1038,11 +1040,6 @@ int __init pcibios_init(void) } subsys_initcall(pcibios_init); -/* No bus fixups needed. */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ -} - /* Process any "pci=" kernel boot arguments. */ char *__init pcibios_setup(char *str) { diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 443a70bccc1c..ad83c1e66dbd 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -140,7 +140,7 @@ static int __init setup_maxnodemem(char *str) { char *endp; unsigned long long maxnodemem; - long node; + unsigned long node; node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; if (node >= MAX_NUMNODES || *endp != ':') @@ -1200,7 +1200,7 @@ static void __init validate_hv(void) * We use a struct cpumask for this, so it must be big enough. */ if ((smp_height * smp_width) > nr_cpu_ids) - early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n", + early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n", smp_height, smp_width, nr_cpu_ids); #endif diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 54804866f238..9b08c6055f15 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -188,7 +188,7 @@ static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep) /* Make it the requested signal. */ *sigp = sig; - *codep = code | __SI_FAULT; + *codep = code; return 1; } diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index 4b2ed5858b2e..e26376ab5452 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -20,6 +20,7 @@ config LD_SCRIPT_DYN bool default y depends on !LD_SCRIPT_STATIC + select MODULE_REL_CRCS if MODVERSIONS source "fs/Kconfig.binfmt" diff --git a/arch/um/Makefile b/arch/um/Makefile index 6ca4f66085c1..b76fcce397a1 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -121,7 +121,7 @@ archheaders: archprepare: include/generated/user_constants.h LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static -LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib +LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie) CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ $(call cc-option, -fno-stack-protector,) \ diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig index 5636221b8785..8f114e3b0a7a 100644 --- a/arch/um/configs/i386_defconfig +++ b/arch/um/configs/i386_defconfig @@ -53,7 +53,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UML_NET=y CONFIG_UML_NET_ETHERTAP=y diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig index 7a67b7ac1a7e..5d0875fc0db2 100644 --- a/arch/um/configs/x86_64_defconfig +++ b/arch/um/configs/x86_64_defconfig @@ -51,7 +51,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UML_NET=y CONFIG_UML_NET_ETHERTAP=y diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index af326fb6510d..c4d162a94be9 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -148,12 +148,7 @@ void mconsole_proc(struct mc_request *req) } do { - loff_t pos = file->f_pos; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); - set_fs(old_fs); - file->f_pos = pos; + len = kernel_read(file, buf, PAGE_SIZE - 1, &file->f_pos); if (len < 0) { mconsole_reply(req, "Read of file failed", 1, 0); goto out_free; diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index f6d1a3f747a9..86942a492454 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -58,11 +58,6 @@ static inline void release_thread(struct task_struct *task) { } -static inline void mm_copy_segments(struct mm_struct *from_mm, - struct mm_struct *new_mm) -{ -} - #define init_stack (init_thread_union.stack) /* diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 053baff03674..9300f7630d2a 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -11,6 +11,7 @@ #include #include #include +#include struct thread_info { struct task_struct *task; /* main task structure */ @@ -22,6 +23,8 @@ struct thread_info { 0-0xBFFFFFFF for user 0-0xFFFFFFFF for kernel */ struct thread_info *real_thread; /* Points to non-IRQ stack */ + unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore + them out-of-band */ }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 574e03fc7ba2..d8ddaf9790d2 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -278,7 +278,7 @@ extern int protect(struct mm_id * mm_idp, unsigned long addr, extern int is_skas_winch(int pid, int fd, void *data); extern int start_userspace(unsigned long stub_stack); extern int copy_context_skas0(unsigned long stack, int pid); -extern void userspace(struct uml_pt_regs *regs); +extern void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs); extern int map_stub_pages(int fd, unsigned long code, unsigned long data, unsigned long stack); extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)); diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c index 1bf61266da8e..f138a4a0db99 100644 --- a/arch/um/kernel/gmon_syms.c +++ b/arch/um/kernel/gmon_syms.c @@ -7,3 +7,10 @@ extern void __bb_init_func(void *) __attribute__((weak)); EXPORT_SYMBOL(__bb_init_func); + +extern void __gcov_init(void *) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_init); +extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_merge_add); +extern void __gcov_exit(void) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_exit); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 2c7f721eccbc..691b83b10649 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -131,7 +131,7 @@ void new_thread_handler(void) * callback returns only if the kernel thread execs a process */ n = fn(arg); - userspace(¤t->thread.regs.regs); + userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } /* Called magically, see new_thread_handler above */ @@ -150,7 +150,7 @@ void fork_handler(void) current->thread.prev_sched = NULL; - userspace(¤t->thread.regs.regs); + userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } int copy_thread(unsigned long clone_flags, unsigned long sp, diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 0b034ebbda2a..7f69d17de354 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void __init timer_setup(void) +static void __init um_timer_setup(void) { int err; @@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts) void __init time_init(void) { timer_set_signal_handler(); - late_time_init = timer_setup; + late_time_init = um_timer_setup; } diff --git a/arch/um/os-Linux/drivers/tuntap_user.c b/arch/um/os-Linux/drivers/tuntap_user.c index c2e6e1dad876..db24ce0d09a6 100644 --- a/arch/um/os-Linux/drivers/tuntap_user.c +++ b/arch/um/os-Linux/drivers/tuntap_user.c @@ -80,7 +80,7 @@ static int tuntap_open_tramp(char *gate, int *fd_out, int me, int remote, pid = run_helper(tuntap_pre_exec, &data, argv); if (pid < 0) - return -pid; + return pid; close(remote); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 819d68656673..c94c3bd70ccd 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -88,12 +88,11 @@ void wait_stub_done(int pid) extern unsigned long current_stub_stack(void); -static void get_skas_faultinfo(int pid, struct faultinfo *fi) +static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs) { int err; - unsigned long fpregs[FP_SIZE]; - err = get_fp_registers(pid, fpregs); + err = get_fp_registers(pid, aux_fp_regs); if (err < 0) { printk(UM_KERN_ERR "save_fp_registers returned %d\n", err); @@ -113,7 +112,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi) */ memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); - err = put_fp_registers(pid, fpregs); + err = put_fp_registers(pid, aux_fp_regs); if (err < 0) { printk(UM_KERN_ERR "put_fp_registers returned %d\n", err); @@ -121,9 +120,9 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi) } } -static void handle_segv(int pid, struct uml_pt_regs * regs) +static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs) { - get_skas_faultinfo(pid, ®s->faultinfo); + get_skas_faultinfo(pid, ®s->faultinfo, aux_fp_regs); segv(regs->faultinfo, 0, 1, NULL); } @@ -332,7 +331,7 @@ int start_userspace(unsigned long stub_stack) return err; } -void userspace(struct uml_pt_regs *regs) +void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs) { int err, status, op, pid = userspace_pid[0]; /* To prevent races if using_sysemu changes under us.*/ @@ -407,11 +406,11 @@ void userspace(struct uml_pt_regs *regs) case SIGSEGV: if (PTRACE_FULL_FAULTINFO) { get_skas_faultinfo(pid, - ®s->faultinfo); + ®s->faultinfo, aux_fp_regs); (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si, regs); } - else handle_segv(pid, regs); + else handle_segv(pid, regs, aux_fp_regs); break; case SIGTRAP + 0x80: handle_trap(pid, regs, local_using_sysemu); diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index b1b6b75c5b17..82bf5f8442ba 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -154,10 +154,10 @@ static int __init nosysemu_cmd_param(char *str, int* add) __uml_setup("nosysemu", nosysemu_cmd_param, "nosysemu\n" -" Turns off syscall emulation patch for ptrace (SYSEMU) on.\n" +" Turns off syscall emulation patch for ptrace (SYSEMU).\n" " SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n" -" behaviour of ptrace() and helps reducing host context switch rate.\n" -" To make it working, you need a kernel patch for your host, too.\n" +" behaviour of ptrace() and helps reduce host context switch rates.\n" +" To make it work, you need a kernel patch for your host, too.\n" " See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n" " information.\n\n"); diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c index 1053bca1f8aa..9f26840e41b1 100644 --- a/arch/unicore32/kernel/pci.c +++ b/arch/unicore32/kernel/pci.c @@ -101,7 +101,7 @@ void pci_puv3_preinit(void) writel(readl(PCIBRI_CMD) | PCIBRI_CMD_IO | PCIBRI_CMD_MEM, PCIBRI_CMD); } -static int __init pci_puv3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int pci_puv3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->bus->number == 0) { #ifdef CONFIG_ARCH_FPGA /* 4 pci slots */ @@ -252,19 +252,46 @@ void pcibios_fixup_bus(struct pci_bus *bus) } EXPORT_SYMBOL(pcibios_fixup_bus); +static struct resource busn_resource = { + .name = "PCI busn", + .start = 0, + .end = 255, + .flags = IORESOURCE_BUS, +}; + static int __init pci_common_init(void) { struct pci_bus *puv3_bus; + struct pci_host_bridge *bridge; + int ret; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return -ENOMEM; pci_puv3_preinit(); - puv3_bus = pci_scan_bus(0, &pci_puv3_ops, NULL); + pci_add_resource(&bridge->windows, &ioport_resource); + pci_add_resource(&bridge->windows, &iomem_resource); + pci_add_resource(&bridge->windows, &busn_resource); + bridge->sysdata = NULL; + bridge->busnr = 0; + bridge->ops = &pci_puv3_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = pci_puv3_map_irq; + + /* Scan our single hose. */ + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + return; + } + + puv3_bus = bridge->bus; if (!puv3_bus) panic("PCI: unable to scan bus!"); - pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq); - pci_bus_size_bridges(puv3_bus); pci_bus_assign_resources(puv3_bus); pci_bus_add_devices(puv3_bus); diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index f65a804b86f0..0038a2d10a7a 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -8,7 +8,7 @@ obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_XEN) += xen/ # Hyper-V paravirtualization support -obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/ +obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ obj-y += realmode/ obj-y += kernel/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index acb366bf6bc1..971feac13506 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -53,7 +53,6 @@ config X86 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 - select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_PMEM_API if X86_64 # Causing hangs/crashes, see the commit that added this change for details. select ARCH_HAS_REFCOUNT if BROKEN @@ -1806,7 +1805,9 @@ config X86_SMAP config X86_INTEL_MPX prompt "Intel MPX (Memory Protection Extensions)" def_bool n - depends on CPU_SUP_INTEL + # Note: only available in 64-bit mode due to VMA flags shortage + depends on CPU_SUP_INTEL && X86_64 + select ARCH_USES_HIGH_VMA_FLAGS ---help--- MPX provides hardware features that can be used in conjunction with compiler-instrumented code to check @@ -2321,6 +2322,10 @@ source "kernel/livepatch/Kconfig" endmenu +config ARCH_HAS_ADD_PAGES + def_bool y + depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG + config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y depends on X86_64 || (X86_32 && HIGHMEM) @@ -2341,6 +2346,10 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION def_bool y depends on X86_64 && HUGETLB_PAGE && MIGRATION +config ARCH_ENABLE_THP_MIGRATION + def_bool y + depends on X86_64 && TRANSPARENT_HUGEPAGE + menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 926c2cc4facc..e56dbc67e837 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -997,6 +997,9 @@ struct boot_params *efi_main(struct efi_config *c, if (boot_params->secure_boot == efi_secureboot_mode_unset) boot_params->secure_boot = efi_get_secureboot(sys_table); + /* Ask the firmware to clear memory on unclean shutdown */ + efi_enable_reset_attack_mitigation(sys_table); + setup_graphics(boot_params); setup_efi_pci(boot_params); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 4a55cdcdc008..5c15d6b57329 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -475,8 +475,8 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, unsigned int nbytes = walk->nbytes; aesni_enc(ctx, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); + crypto_inc(ctrblk, AES_BLOCK_SIZE); } diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 246c67006ed0..8c1fcb6bad21 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -33,7 +33,7 @@ #define s3 ((16 + 2 + (3 * 256)) * 4) /* register macros */ -#define CTX %rdi +#define CTX %r12 #define RIO %rsi #define RX0 %rax @@ -56,12 +56,12 @@ #define RX2bh %ch #define RX3bh %dh -#define RT0 %rbp +#define RT0 %rdi #define RT1 %rsi #define RT2 %r8 #define RT3 %r9 -#define RT0d %ebp +#define RT0d %edi #define RT1d %esi #define RT2d %r8d #define RT3d %r9d @@ -120,13 +120,14 @@ ENTRY(__blowfish_enc_blk) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ - movq %rbp, %r11; + movq %r12, %r11; + movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; @@ -142,7 +143,7 @@ ENTRY(__blowfish_enc_blk) round_enc(14); add_roundkey_enc(16); - movq %r11, %rbp; + movq %r11, %r12; movq %r10, RIO; test %cl, %cl; @@ -157,12 +158,13 @@ ENDPROC(__blowfish_enc_blk) ENTRY(blowfish_dec_blk) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ - movq %rbp, %r11; + movq %r12, %r11; + movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; @@ -181,7 +183,7 @@ ENTRY(blowfish_dec_blk) movq %r10, RIO; write_block(); - movq %r11, %rbp; + movq %r11, %r12; ret; ENDPROC(blowfish_dec_blk) @@ -298,20 +300,21 @@ ENDPROC(blowfish_dec_blk) ENTRY(__blowfish_enc_blk_4way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ - pushq %rbp; + pushq %r12; pushq %rbx; pushq %rcx; - preload_roundkey_enc(0); - + movq %rdi, CTX movq %rsi, %r11; movq %rdx, RIO; + preload_roundkey_enc(0); + read_block4(); round_enc4(0); @@ -324,39 +327,40 @@ ENTRY(__blowfish_enc_blk_4way) round_enc4(14); add_preloaded_roundkey4(); - popq %rbp; + popq %r12; movq %r11, RIO; - test %bpl, %bpl; + test %r12b, %r12b; jnz .L__enc_xor4; write_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; .L__enc_xor4: xor_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; ENDPROC(__blowfish_enc_blk_4way) ENTRY(blowfish_dec_blk_4way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ - pushq %rbp; + pushq %r12; pushq %rbx; - preload_roundkey_dec(17); - movq %rsi, %r11; + movq %rdi, CTX; + movq %rsi, %r11 movq %rdx, RIO; + preload_roundkey_dec(17); read_block4(); round_dec4(17); @@ -373,7 +377,7 @@ ENTRY(blowfish_dec_blk_4way) write_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; ENDPROC(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index 17c05531dfd1..f9eca34301e2 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -271,8 +271,7 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk) unsigned int nbytes = walk->nbytes; blowfish_enc_blk(ctx, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, BF_BLOCK_SIZE); } diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 310319c601ed..95ba6956a7f6 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -75,17 +75,17 @@ #define RCD1bh %dh #define RT0 %rsi -#define RT1 %rbp +#define RT1 %r12 #define RT2 %r8 #define RT0d %esi -#define RT1d %ebp +#define RT1d %r12d #define RT2d %r8d #define RT2bl %r8b #define RXOR %r9 -#define RRBP %r10 +#define RR12 %r10 #define RDST %r11 #define RXORd %r9d @@ -197,7 +197,7 @@ ENTRY(__camellia_enc_blk) * %rdx: src * %rcx: bool xor */ - movq %rbp, RRBP; + movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; @@ -227,13 +227,13 @@ ENTRY(__camellia_enc_blk) enc_outunpack(mov, RT1); - movq RRBP, %rbp; + movq RR12, %r12; ret; .L__enc_xor: enc_outunpack(xor, RT1); - movq RRBP, %rbp; + movq RR12, %r12; ret; ENDPROC(__camellia_enc_blk) @@ -248,7 +248,7 @@ ENTRY(camellia_dec_blk) movl $24, RXORd; cmovel RXORd, RT2d; /* max */ - movq %rbp, RRBP; + movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; @@ -271,7 +271,7 @@ ENTRY(camellia_dec_blk) dec_outunpack(); - movq RRBP, %rbp; + movq RR12, %r12; ret; ENDPROC(camellia_dec_blk) @@ -433,7 +433,7 @@ ENTRY(__camellia_enc_blk_2way) */ pushq %rbx; - movq %rbp, RRBP; + movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; @@ -461,14 +461,14 @@ ENTRY(__camellia_enc_blk_2way) enc_outunpack2(mov, RT2); - movq RRBP, %rbp; + movq RR12, %r12; popq %rbx; ret; .L__enc2_xor: enc_outunpack2(xor, RT2); - movq RRBP, %rbp; + movq RR12, %r12; popq %rbx; ret; ENDPROC(__camellia_enc_blk_2way) @@ -485,7 +485,7 @@ ENTRY(camellia_dec_blk_2way) cmovel RXORd, RT2d; /* max */ movq %rbx, RXOR; - movq %rbp, RRBP; + movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; @@ -508,7 +508,7 @@ ENTRY(camellia_dec_blk_2way) dec_outunpack2(); - movq RRBP, %rbp; + movq RR12, %r12; movq RXOR, %rbx; ret; ENDPROC(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index b4a8806234ea..86107c961bb4 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -47,7 +47,7 @@ /********************************************************************** 16-way AVX cast5 **********************************************************************/ -#define CTX %rdi +#define CTX %r15 #define RL1 %xmm0 #define RR1 %xmm1 @@ -70,8 +70,8 @@ #define RTMP %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %rdi +#define RID1d %edi #define RID2 %rsi #define RID2d %esi @@ -226,7 +226,7 @@ .align 16 __cast5_enc_blk16: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RL1: blocks 1 and 2 * RR1: blocks 3 and 4 * RL2: blocks 5 and 6 @@ -246,9 +246,11 @@ __cast5_enc_blk16: * RR4: encrypted blocks 15 and 16 */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -283,7 +285,7 @@ __cast5_enc_blk16: .L__skip_enc: popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; @@ -298,7 +300,7 @@ ENDPROC(__cast5_enc_blk16) .align 16 __cast5_dec_blk16: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 @@ -318,9 +320,11 @@ __cast5_dec_blk16: * RR4: decrypted blocks 15 and 16 */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -356,7 +360,7 @@ __cast5_dec_blk16: vmovdqa .Lbswap_mask, RKM; popq %rbx; - popq %rbp; + popq %r15; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); @@ -372,12 +376,14 @@ ENDPROC(__cast5_dec_blk16) ENTRY(cast5_ecb_enc_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; @@ -400,18 +406,22 @@ ENTRY(cast5_ecb_enc_16way) vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); + popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_enc_16way) ENTRY(cast5_ecb_dec_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + + movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; @@ -434,20 +444,22 @@ ENTRY(cast5_ecb_dec_16way) vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); + popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_dec_16way) ENTRY(cast5_cbc_dec_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -483,23 +495,24 @@ ENTRY(cast5_cbc_dec_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast5_cbc_dec_16way) ENTRY(cast5_ctr_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: iv (big endian, 64bit) */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -558,8 +571,8 @@ ENTRY(cast5_ctr_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 8648158f3916..dbea6020ffe7 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -256,8 +256,7 @@ static void ctr_crypt_final(struct blkcipher_desc *desc, unsigned int nbytes = walk->nbytes; __cast5_encrypt(ctx, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, CAST5_BLOCK_SIZE); } diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 952d3156a933..7f30b6f0d72c 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -47,7 +47,7 @@ /********************************************************************** 8-way AVX cast6 **********************************************************************/ -#define CTX %rdi +#define CTX %r15 #define RA1 %xmm0 #define RB1 %xmm1 @@ -70,8 +70,8 @@ #define RTMP %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %rdi +#define RID1d %edi #define RID2 %rsi #define RID2d %esi @@ -264,15 +264,17 @@ .align 8 __cast6_enc_blk8: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -297,7 +299,7 @@ __cast6_enc_blk8: QBAR(11); popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; @@ -310,15 +312,17 @@ ENDPROC(__cast6_enc_blk8) .align 8 __cast6_dec_blk8: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -343,7 +347,7 @@ __cast6_dec_blk8: QBAR(0); popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); @@ -354,12 +358,14 @@ ENDPROC(__cast6_dec_blk8) ENTRY(cast6_ecb_enc_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); @@ -368,18 +374,21 @@ ENTRY(cast6_ecb_enc_8way) store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_enc_8way) ENTRY(cast6_ecb_dec_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); @@ -388,20 +397,22 @@ ENTRY(cast6_ecb_dec_8way) store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_dec_8way) ENTRY(cast6_cbc_dec_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -411,8 +422,8 @@ ENTRY(cast6_cbc_dec_8way) store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast6_cbc_dec_8way) @@ -425,9 +436,10 @@ ENTRY(cast6_ctr_8way) * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN - pushq %r12; + pushq %r15 + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -438,8 +450,8 @@ ENTRY(cast6_ctr_8way) store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast6_ctr_8way) @@ -452,7 +464,9 @@ ENTRY(cast6_xts_enc_8way) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ @@ -464,6 +478,7 @@ ENTRY(cast6_xts_enc_8way) /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_xts_enc_8way) @@ -476,7 +491,9 @@ ENTRY(cast6_xts_dec_8way) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ @@ -488,6 +505,7 @@ ENTRY(cast6_xts_dec_8way) /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index f3e91647ca27..8e49ce117494 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -64,12 +64,12 @@ #define RW2bh %ch #define RT0 %r15 -#define RT1 %rbp +#define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d -#define RT1d %ebp +#define RT1d %esi #define RT2d %r14d #define RT3d %edx @@ -177,13 +177,14 @@ ENTRY(des3_ede_x86_64_crypt_blk) * %rsi: dst * %rdx: src */ - pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; + pushq %rsi; /* dst */ + read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); @@ -241,6 +242,8 @@ ENTRY(des3_ede_x86_64_crypt_blk) round1(32+15, RL0, RR0, dummy2); final_permutation(RR0, RL0); + + popq %rsi /* dst */ write_block(%rsi, RR0, RL0); popq %r15; @@ -248,7 +251,6 @@ ENTRY(des3_ede_x86_64_crypt_blk) popq %r13; popq %r12; popq %rbx; - popq %rbp; ret; ENDPROC(des3_ede_x86_64_crypt_blk) @@ -432,13 +434,14 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) * %rdx: src (3 blocks) */ - pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; + pushq %rsi /* dst */ + /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; @@ -520,6 +523,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) bswapl RR2d; bswapl RL2d; + popq %rsi /* dst */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); @@ -532,7 +536,6 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) popq %r13; popq %r12; popq %rbx; - popq %rbp; ret; ENDPROC(des3_ede_x86_64_crypt_blk_3way) diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index d6fc59aaaadf..30c0a37f4882 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c @@ -277,8 +277,7 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, unsigned int nbytes = walk->nbytes; des3_ede_enc_blk(ctx, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); } diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1eab79c9ac48..9f712a7dfd79 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -89,7 +89,7 @@ #define REG_RE %rdx #define REG_RTA %r12 #define REG_RTB %rbx -#define REG_T1 %ebp +#define REG_T1 %r11d #define xmm_mov vmovups #define avx2_zeroupper vzeroupper #define RND_F1 1 @@ -637,7 +637,6 @@ _loop3: ENTRY(\name) push %rbx - push %rbp push %r12 push %r13 push %r14 @@ -673,7 +672,6 @@ _loop3: pop %r14 pop %r13 pop %r12 - pop %rbp pop %rbx ret diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index a4109506a5e8..6204bd53528c 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -37,7 +37,7 @@ #define REG_A %ecx #define REG_B %esi #define REG_C %edi -#define REG_D %ebp +#define REG_D %r12d #define REG_E %edx #define REG_T1 %eax @@ -74,10 +74,10 @@ ENTRY(\name) push %rbx - push %rbp push %r12 + push %rbp + mov %rsp, %rbp - mov %rsp, %r12 sub $64, %rsp # allocate workspace and $~15, %rsp # align stack @@ -99,10 +99,9 @@ xor %rax, %rax rep stosq - mov %r12, %rsp # deallocate workspace - - pop %r12 + mov %rbp, %rsp # deallocate workspace pop %rbp + pop %r12 pop %rbx ret diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index e08888a1a5f2..001bbcf93c79 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -103,7 +103,7 @@ SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx -TBL = %rbp +TBL = %r12 a = %eax b = %ebx @@ -350,13 +350,13 @@ a = TMP_ ENTRY(sha256_transform_avx) .align 32 pushq %rbx - pushq %rbp + pushq %r12 pushq %r13 pushq %r14 pushq %r15 - pushq %r12 + pushq %rbp + movq %rsp, %rbp - mov %rsp, %r12 subq $STACK_SIZE, %rsp # allocate stack space and $~15, %rsp # align stack pointer @@ -452,13 +452,12 @@ loop2: done_hash: - mov %r12, %rsp - - popq %r12 + mov %rbp, %rsp + popq %rbp popq %r15 popq %r14 popq %r13 - popq %rbp + popq %r12 popq %rbx ret ENDPROC(sha256_transform_avx) diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 89c8f09787d2..1420db15dcdd 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -98,8 +98,6 @@ d = %r8d e = %edx # clobbers NUM_BLKS y3 = %esi # clobbers INP - -TBL = %rbp SRND = CTX # SRND is same register as CTX a = %eax @@ -531,7 +529,6 @@ STACK_SIZE = _RSP + _RSP_SIZE ENTRY(sha256_transform_rorx) .align 32 pushq %rbx - pushq %rbp pushq %r12 pushq %r13 pushq %r14 @@ -568,8 +565,6 @@ ENTRY(sha256_transform_rorx) mov CTX, _CTX(%rsp) loop0: - lea K256(%rip), TBL - ## Load first 16 dwords from two blocks VMOVDQ 0*32(INP),XTMP0 VMOVDQ 1*32(INP),XTMP1 @@ -597,19 +592,19 @@ last_block_enter: .align 16 loop1: - vpaddd 0*32(TBL, SRND), X0, XFER + vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 0*32 - vpaddd 1*32(TBL, SRND), X0, XFER + vpaddd K256+1*32(SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 1*32 - vpaddd 2*32(TBL, SRND), X0, XFER + vpaddd K256+2*32(SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 2*32 - vpaddd 3*32(TBL, SRND), X0, XFER + vpaddd K256+3*32(SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 3*32 @@ -619,10 +614,11 @@ loop1: loop2: ## Do last 16 rounds with no scheduling - vpaddd 0*32(TBL, SRND), X0, XFER + vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 0*32 - vpaddd 1*32(TBL, SRND), X1, XFER + + vpaddd K256+1*32(SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 1*32 add $2*32, SRND @@ -676,9 +672,6 @@ loop3: ja done_hash do_last_block: - #### do last block - lea K256(%rip), TBL - VMOVDQ 0*16(INP),XWORD0 VMOVDQ 1*16(INP),XWORD1 VMOVDQ 2*16(INP),XWORD2 @@ -718,7 +711,6 @@ done_hash: popq %r14 popq %r13 popq %r12 - popq %rbp popq %rbx ret ENDPROC(sha256_transform_rorx) diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index 39b83c93e7fd..c6c05ed2c16a 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -95,7 +95,7 @@ SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx -TBL = %rbp +TBL = %r12 a = %eax b = %ebx @@ -356,13 +356,13 @@ a = TMP_ ENTRY(sha256_transform_ssse3) .align 32 pushq %rbx - pushq %rbp + pushq %r12 pushq %r13 pushq %r14 pushq %r15 - pushq %r12 + pushq %rbp + mov %rsp, %rbp - mov %rsp, %r12 subq $STACK_SIZE, %rsp and $~15, %rsp @@ -462,13 +462,12 @@ loop2: done_hash: - mov %r12, %rsp - - popq %r12 + mov %rbp, %rsp + popq %rbp popq %r15 popq %r14 popq %r13 - popq %rbp + popq %r12 popq %rbx ret diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 7f5f6c6ec72e..b16d56005162 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -69,8 +69,9 @@ XFER = YTMP0 BYTE_FLIP_MASK = %ymm9 -# 1st arg -CTX = %rdi +# 1st arg is %rdi, which is saved to the stack and accessed later via %r12 +CTX1 = %rdi +CTX2 = %r12 # 2nd arg INP = %rsi # 3rd arg @@ -81,7 +82,7 @@ d = %r8 e = %rdx y3 = %rsi -TBL = %rbp +TBL = %rdi # clobbers CTX1 a = %rax b = %rbx @@ -91,26 +92,26 @@ g = %r10 h = %r11 old_h = %r11 -T1 = %r12 +T1 = %r12 # clobbers CTX2 y0 = %r13 y1 = %r14 y2 = %r15 -y4 = %r12 - # Local variables (stack frame) XFER_SIZE = 4*8 SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 +CTX_SIZE = 1*8 RSPSAVE_SIZE = 1*8 -GPRSAVE_SIZE = 6*8 +GPRSAVE_SIZE = 5*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE -frame_RSPSAVE = frame_INPEND + INPEND_SIZE +frame_CTX = frame_INPEND + INPEND_SIZE +frame_RSPSAVE = frame_CTX + CTX_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE @@ -576,12 +577,11 @@ ENTRY(sha512_transform_rorx) mov %rax, frame_RSPSAVE(%rsp) # Save GPRs - mov %rbp, frame_GPRSAVE(%rsp) - mov %rbx, 8*1+frame_GPRSAVE(%rsp) - mov %r12, 8*2+frame_GPRSAVE(%rsp) - mov %r13, 8*3+frame_GPRSAVE(%rsp) - mov %r14, 8*4+frame_GPRSAVE(%rsp) - mov %r15, 8*5+frame_GPRSAVE(%rsp) + mov %rbx, 8*0+frame_GPRSAVE(%rsp) + mov %r12, 8*1+frame_GPRSAVE(%rsp) + mov %r13, 8*2+frame_GPRSAVE(%rsp) + mov %r14, 8*3+frame_GPRSAVE(%rsp) + mov %r15, 8*4+frame_GPRSAVE(%rsp) shl $7, NUM_BLKS # convert to bytes jz done_hash @@ -589,14 +589,17 @@ ENTRY(sha512_transform_rorx) mov NUM_BLKS, frame_INPEND(%rsp) ## load initial digest - mov 8*0(CTX),a - mov 8*1(CTX),b - mov 8*2(CTX),c - mov 8*3(CTX),d - mov 8*4(CTX),e - mov 8*5(CTX),f - mov 8*6(CTX),g - mov 8*7(CTX),h + mov 8*0(CTX1), a + mov 8*1(CTX1), b + mov 8*2(CTX1), c + mov 8*3(CTX1), d + mov 8*4(CTX1), e + mov 8*5(CTX1), f + mov 8*6(CTX1), g + mov 8*7(CTX1), h + + # save %rdi (CTX) before it gets clobbered + mov %rdi, frame_CTX(%rsp) vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK @@ -652,14 +655,15 @@ loop2: subq $1, frame_SRND(%rsp) jne loop2 - addm 8*0(CTX),a - addm 8*1(CTX),b - addm 8*2(CTX),c - addm 8*3(CTX),d - addm 8*4(CTX),e - addm 8*5(CTX),f - addm 8*6(CTX),g - addm 8*7(CTX),h + mov frame_CTX(%rsp), CTX2 + addm 8*0(CTX2), a + addm 8*1(CTX2), b + addm 8*2(CTX2), c + addm 8*3(CTX2), d + addm 8*4(CTX2), e + addm 8*5(CTX2), f + addm 8*6(CTX2), g + addm 8*7(CTX2), h mov frame_INP(%rsp), INP add $128, INP @@ -669,12 +673,11 @@ loop2: done_hash: # Restore GPRs - mov frame_GPRSAVE(%rsp) ,%rbp - mov 8*1+frame_GPRSAVE(%rsp) ,%rbx - mov 8*2+frame_GPRSAVE(%rsp) ,%r12 - mov 8*3+frame_GPRSAVE(%rsp) ,%r13 - mov 8*4+frame_GPRSAVE(%rsp) ,%r14 - mov 8*5+frame_GPRSAVE(%rsp) ,%r15 + mov 8*0+frame_GPRSAVE(%rsp), %rbx + mov 8*1+frame_GPRSAVE(%rsp), %r12 + mov 8*2+frame_GPRSAVE(%rsp), %r13 + mov 8*3+frame_GPRSAVE(%rsp), %r14 + mov 8*4+frame_GPRSAVE(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index b3f49d286348..73b471da3622 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -76,8 +76,8 @@ #define RT %xmm14 #define RR %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %r13 +#define RID1d %r13d #define RID2 %rsi #define RID2d %esi @@ -259,7 +259,7 @@ __twofish_enc_blk8: vmovdqu w(CTX), RK1; - pushq %rbp; + pushq %r13; pushq %rbx; pushq %rcx; @@ -282,7 +282,7 @@ __twofish_enc_blk8: popq %rcx; popq %rbx; - popq %rbp; + popq %r13; outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); @@ -301,7 +301,7 @@ __twofish_dec_blk8: vmovdqu (w+4*4)(CTX), RK1; - pushq %rbp; + pushq %r13; pushq %rbx; inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); @@ -322,7 +322,7 @@ __twofish_dec_blk8: vmovdqu (w)(CTX), RK1; popq %rbx; - popq %rbp; + popq %r13; outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 829e89cfcee2..9fb9a1f1e47b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void) return 0; } - if (lockup_detector_suspend() != 0) { - pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n"); - return 0; - } + cpus_read_lock(); + + hardlockup_detector_perf_stop(); x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); @@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void) x86_pmu.commit_scheduling = NULL; x86_pmu.stop_scheduling = NULL; - lockup_detector_resume(); - - cpus_read_lock(); + hardlockup_detector_perf_restart(); for_each_online_cpu(c) free_excl_cntrs(c); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4cf100ff2a37..72db0664a53d 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), @@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 8e2457cb6b4a..005908ee9333 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init), {}, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index db1fe377e6dd..a7196818416a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { static struct intel_uncore_type skx_uncore_iio = { .name = "iio", .num_counters = 4, - .num_boxes = 5, + .num_boxes = 6, .perf_ctr_bits = 48, .event_ctl = SKX_IIO0_MSR_PMON_CTL0, .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, @@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = { static struct intel_uncore_type skx_uncore_irp = { .name = "irp", .num_counters = 2, - .num_boxes = 5, + .num_boxes = 6, .perf_ctr_bits = 48, .event_ctl = SKX_IRP0_MSR_PMON_CTL0, .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 4bb3ec69e8ea..06723671ae4e 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -63,6 +63,14 @@ static bool test_intel(int idx) case INTEL_FAM6_ATOM_SILVERMONT1: case INTEL_FAM6_ATOM_SILVERMONT2: case INTEL_FAM6_ATOM_AIRMONT: + + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_ATOM_DENVERTON: + + case INTEL_FAM6_ATOM_GEMINI_LAKE: + + case INTEL_FAM6_XEON_PHI_KNL: + case INTEL_FAM6_XEON_PHI_KNM: if (idx == PERF_MSR_SMI) return true; break; diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile index 171ae09864d7..367a8203cfcf 100644 --- a/arch/x86/hyperv/Makefile +++ b/arch/x86/hyperv/Makefile @@ -1 +1 @@ -obj-y := hv_init.o +obj-y := hv_init.o mmu.o diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 5b882cc0c0e9..1a8eb550c40f 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #ifdef CONFIG_HYPERV_TSCPAGE @@ -75,10 +77,25 @@ static struct clocksource hyperv_cs_msr = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void *hypercall_pg; +void *hv_hypercall_pg; +EXPORT_SYMBOL_GPL(hv_hypercall_pg); struct clocksource *hyperv_cs; EXPORT_SYMBOL_GPL(hyperv_cs); +u32 *hv_vp_index; +EXPORT_SYMBOL_GPL(hv_vp_index); + +static int hv_cpu_init(unsigned int cpu) +{ + u64 msr_vp_index; + + hv_get_vp_index(msr_vp_index); + + hv_vp_index[smp_processor_id()] = msr_vp_index; + + return 0; +} + /* * This function is to be invoked early in the boot sequence after the * hypervisor has been detected. @@ -94,6 +111,16 @@ void hyperv_init(void) if (x86_hyper != &x86_hyper_ms_hyperv) return; + /* Allocate percpu VP index */ + hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), + GFP_KERNEL); + if (!hv_vp_index) + return; + + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online", + hv_cpu_init, NULL) < 0) + goto free_vp_index; + /* * Setup the hypercall page and enable hypercalls. * 1. Register the guest ID @@ -102,17 +129,19 @@ void hyperv_init(void) guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); - hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); - if (hypercall_pg == NULL) { + hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); + if (hv_hypercall_pg == NULL) { wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); - return; + goto free_vp_index; } rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hypercall_msr.enable = 1; - hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg); + hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + hyper_alloc_mmu(); + /* * Register Hyper-V specific clocksource. */ @@ -148,6 +177,12 @@ void hyperv_init(void) hyperv_cs = &hyperv_cs_msr; if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); + + return; + +free_vp_index: + kfree(hv_vp_index); + hv_vp_index = NULL; } /* @@ -170,51 +205,6 @@ void hyperv_cleanup(void) } EXPORT_SYMBOL_GPL(hyperv_cleanup); -/* - * hv_do_hypercall- Invoke the specified hypercall - */ -u64 hv_do_hypercall(u64 control, void *input, void *output) -{ - u64 input_address = (input) ? virt_to_phys(input) : 0; - u64 output_address = (output) ? virt_to_phys(output) : 0; -#ifdef CONFIG_X86_64 - u64 hv_status = 0; - - if (!hypercall_pg) - return (u64)ULLONG_MAX; - - __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); - __asm__ __volatile__("call *%3" : "=a" (hv_status) : - "c" (control), "d" (input_address), - "m" (hypercall_pg)); - - return hv_status; - -#else - - u32 control_hi = control >> 32; - u32 control_lo = control & 0xFFFFFFFF; - u32 hv_status_hi = 1; - u32 hv_status_lo = 1; - u32 input_address_hi = input_address >> 32; - u32 input_address_lo = input_address & 0xFFFFFFFF; - u32 output_address_hi = output_address >> 32; - u32 output_address_lo = output_address & 0xFFFFFFFF; - - if (!hypercall_pg) - return (u64)ULLONG_MAX; - - __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), - "=a"(hv_status_lo) : "d" (control_hi), - "a" (control_lo), "b" (input_address_hi), - "c" (input_address_lo), "D"(output_address_hi), - "S"(output_address_lo), "m" (hypercall_pg)); - - return hv_status_lo | ((u64)hv_status_hi << 32); -#endif /* !x86_64 */ -} -EXPORT_SYMBOL_GPL(hv_do_hypercall); - void hyperv_report_panic(struct pt_regs *regs) { static bool panic_reported; diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c new file mode 100644 index 000000000000..39e7f6e50919 --- /dev/null +++ b/arch/x86/hyperv/mmu.c @@ -0,0 +1,272 @@ +#define pr_fmt(fmt) "Hyper-V: " fmt + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ +struct hv_flush_pcpu { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +}; + +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ +struct hv_flush_pcpu_ex { + u64 address_space; + u64 flags; + struct { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; + } hv_vp_set; + u64 gva_list[]; +}; + +/* Each gva in gva_list encodes up to 4096 pages to flush */ +#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) + +static struct hv_flush_pcpu __percpu *pcpu_flush; + +static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; + +/* + * Fills in gva_list starting from offset. Returns the number of items added. + */ +static inline int fill_gva_list(u64 gva_list[], int offset, + unsigned long start, unsigned long end) +{ + int gva_n = offset; + unsigned long cur = start, diff; + + do { + diff = end > cur ? end - cur : 0; + + gva_list[gva_n] = cur & PAGE_MASK; + /* + * Lower 12 bits encode the number of additional + * pages to flush (in addition to the 'cur' page). + */ + if (diff >= HV_TLB_FLUSH_UNIT) + gva_list[gva_n] |= ~PAGE_MASK; + else if (diff) + gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; + + cur += HV_TLB_FLUSH_UNIT; + gva_n++; + + } while (cur < end); + + return gva_n - offset; +} + +/* Return the number of banks in the resulting vp_set */ +static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, + const struct cpumask *cpus) +{ + int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + + /* + * Some banks may end up being empty but this is acceptable. + */ + for_each_cpu(cpu, cpus) { + vcpu = hv_cpu_number_to_vp_number(cpu); + vcpu_bank = vcpu / 64; + vcpu_offset = vcpu % 64; + + /* valid_bank_mask can represent up to 64 banks */ + if (vcpu_bank >= 64) + return 0; + + __set_bit(vcpu_offset, (unsigned long *) + &flush->hv_vp_set.bank_contents[vcpu_bank]); + if (vcpu_bank >= nr_bank) + nr_bank = vcpu_bank + 1; + } + flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); + + return nr_bank; +} + +static void hyperv_flush_tlb_others(const struct cpumask *cpus, + const struct flush_tlb_info *info) +{ + int cpu, vcpu, gva_n, max_gvas; + struct hv_flush_pcpu *flush; + u64 status = U64_MAX; + unsigned long flags; + + trace_hyperv_mmu_flush_tlb_others(cpus, info); + + if (!pcpu_flush || !hv_hypercall_pg) + goto do_native; + + if (cpumask_empty(cpus)) + return; + + local_irq_save(flags); + + flush = this_cpu_ptr(pcpu_flush); + + if (info->mm) { + flush->address_space = virt_to_phys(info->mm->pgd); + flush->flags = 0; + } else { + flush->address_space = 0; + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; + } + + flush->processor_mask = 0; + if (cpumask_equal(cpus, cpu_present_mask)) { + flush->flags |= HV_FLUSH_ALL_PROCESSORS; + } else { + for_each_cpu(cpu, cpus) { + vcpu = hv_cpu_number_to_vp_number(cpu); + if (vcpu >= 64) + goto do_native; + + __set_bit(vcpu, (unsigned long *) + &flush->processor_mask); + } + } + + /* + * We can flush not more than max_gvas with one hypercall. Flush the + * whole address space if we were asked to do more. + */ + max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]); + + if (info->end == TLB_FLUSH_ALL) { + flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; + status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, + flush, NULL); + } else if (info->end && + ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { + status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, + flush, NULL); + } else { + gva_n = fill_gva_list(flush->gva_list, 0, + info->start, info->end); + status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST, + gva_n, 0, flush, NULL); + } + + local_irq_restore(flags); + + if (!(status & HV_HYPERCALL_RESULT_MASK)) + return; +do_native: + native_flush_tlb_others(cpus, info); +} + +static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, + const struct flush_tlb_info *info) +{ + int nr_bank = 0, max_gvas, gva_n; + struct hv_flush_pcpu_ex *flush; + u64 status = U64_MAX; + unsigned long flags; + + trace_hyperv_mmu_flush_tlb_others(cpus, info); + + if (!pcpu_flush_ex || !hv_hypercall_pg) + goto do_native; + + if (cpumask_empty(cpus)) + return; + + local_irq_save(flags); + + flush = this_cpu_ptr(pcpu_flush_ex); + + if (info->mm) { + flush->address_space = virt_to_phys(info->mm->pgd); + flush->flags = 0; + } else { + flush->address_space = 0; + flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; + } + + flush->hv_vp_set.valid_bank_mask = 0; + + if (!cpumask_equal(cpus, cpu_present_mask)) { + flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; + nr_bank = cpumask_to_vp_set(flush, cpus); + } + + if (!nr_bank) { + flush->hv_vp_set.format = HV_GENERIC_SET_ALL; + flush->flags |= HV_FLUSH_ALL_PROCESSORS; + } + + /* + * We can flush not more than max_gvas with one hypercall. Flush the + * whole address space if we were asked to do more. + */ + max_gvas = + (PAGE_SIZE - sizeof(*flush) - nr_bank * + sizeof(flush->hv_vp_set.bank_contents[0])) / + sizeof(flush->gva_list[0]); + + if (info->end == TLB_FLUSH_ALL) { + flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, + 0, nr_bank + 2, flush, NULL); + } else if (info->end && + ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, + 0, nr_bank + 2, flush, NULL); + } else { + gva_n = fill_gva_list(flush->gva_list, nr_bank, + info->start, info->end); + status = hv_do_rep_hypercall( + HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, + gva_n, nr_bank + 2, flush, NULL); + } + + local_irq_restore(flags); + + if (!(status & HV_HYPERCALL_RESULT_MASK)) + return; +do_native: + native_flush_tlb_others(cpus, info); +} + +void hyperv_setup_mmu_ops(void) +{ + if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) + return; + + setup_clear_cpu_cap(X86_FEATURE_PCID); + + if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) { + pr_info("Using hypercall for remote TLB flush\n"); + pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others; + } else { + pr_info("Using ext hypercall for remote TLB flush\n"); + pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; + } +} + +void hyper_alloc_mmu(void) +{ + if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) + return; + + if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) + pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); + else + pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); +} diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 8d0879f1d42c..8e02b30cf08e 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -407,10 +407,10 @@ static int load_aout_library(struct file *file) unsigned long bss, start_addr, len, error; int retval; struct exec ex; - + loff_t pos = 0; retval = -ENOEXEC; - error = kernel_read(file, 0, (char *) &ex, sizeof(ex)); + error = kernel_read(file, &ex, sizeof(ex), &pos); if (error != sizeof(ex)) goto out; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index e0bb46c02857..0e2a5edbce00 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ksig->ka.sa.sa_restorer) sp = (unsigned long) ksig->ka.sa.sa_restorer; - if (fpu->fpstate_active) { + if (fpu->initialized) { unsigned long fx_aligned, math_size; sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1b020381ab38..c096624137ae 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ output, input...) \ { \ - register void *__sp asm(_ASM_SP); \ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ "call %P[new2]", feature2) \ - : output, "+r" (__sp) \ + : output, ASM_CALL_CONSTRAINT \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ [new2] "i" (newfunc2), ## input); \ } diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 676ee5807d86..b0dc91f4bedc 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -11,10 +11,12 @@ # define __ASM_FORM_COMMA(x) " " #x "," #endif -#ifdef CONFIG_X86_32 +#ifndef __x86_64__ +/* 32 bit */ # define __ASM_SEL(a,b) __ASM_FORM(a) # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) #else +/* 64 bit */ # define __ASM_SEL(a,b) __ASM_FORM(b) # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) #endif @@ -132,4 +134,15 @@ /* For C file, we already have NOKPROBE_SYMBOL macro */ #endif +#ifndef __ASSEMBLY__ +/* + * This output constraint should be used for any inline asm which has a "call" + * instruction. Otherwise the asm may be inserted before the frame pointer + * gets set up by the containing function. If you forget to do this, objtool + * may print a "call without frame pointer save/setup" warning. + */ +register unsigned long current_stack_pointer asm(_ASM_SP); +#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) +#endif + #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 8b4140f6724f..cb9a1af109b4 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -7,6 +7,4 @@ void clflush_cache_range(void *addr, unsigned int size); -#define mmio_flush_range(addr, size) clflush_cache_range(addr, size) - #endif /* _ASM_X86_CACHEFLUSH_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 42bbbf0f173d..2519c6c801c9 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -288,6 +288,7 @@ #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 1a2ba368da39..9d0e13738ed3 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -121,7 +121,6 @@ static inline int desc_empty(const void *ptr) #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) -#define store_idt(dtr) native_store_idt(dtr) #define store_tr(tr) (tr = native_store_tr()) #define load_TLS(t, cpu) native_load_tls(t, cpu) @@ -228,7 +227,7 @@ static inline void native_store_gdt(struct desc_ptr *dtr) asm volatile("sgdt %0":"=m" (*dtr)); } -static inline void native_store_idt(struct desc_ptr *dtr) +static inline void store_idt(struct desc_ptr *dtr) { asm volatile("sidt %0":"=m" (*dtr)); } diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 554cdb205d17..e3221ffa304e 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -23,11 +23,9 @@ /* * High level FPU state handling functions: */ -extern void fpu__activate_curr(struct fpu *fpu); -extern void fpu__activate_fpstate_read(struct fpu *fpu); -extern void fpu__activate_fpstate_write(struct fpu *fpu); -extern void fpu__current_fpstate_write_begin(void); -extern void fpu__current_fpstate_write_end(void); +extern void fpu__initialize(struct fpu *fpu); +extern void fpu__prepare_read(struct fpu *fpu); +extern void fpu__prepare_write(struct fpu *fpu); extern void fpu__save(struct fpu *fpu); extern void fpu__restore(struct fpu *fpu); extern int fpu__restore_sig(void __user *buf, int ia32_frame); @@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); err; \ }) -#define check_insn(insn, output, input...) \ -({ \ - int err; \ +#define kernel_insn(insn, output, input...) \ asm volatile("1:" #insn "\n\t" \ "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl $-1,%[err]\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : [err] "=r" (err), output \ - : "0"(0), input); \ - err; \ -}) + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ + : output : input) static inline int copy_fregs_to_user(struct fregs_state __user *fx) { @@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { - int err; - if (IS_ENABLED(CONFIG_X86_32)) { - err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); + kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { - err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); + kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { /* See comment in copy_fxregs_to_kernel() below. */ - err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); + kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); } } - /* Copying from a kernel buffer to FPU registers should never fail: */ - WARN_ON_FPU(err); } static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) @@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) static inline void copy_kernel_to_fregs(struct fregs_state *fx) { - int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); - - WARN_ON_FPU(err); + kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } static inline int copy_user_to_fregs(struct fregs_state __user *fx) @@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact * XSAVE area format. */ -#define XSTATE_XRESTORE(st, lmask, hmask, err) \ +#define XSTATE_XRESTORE(st, lmask, hmask) \ asm volatile(ALTERNATIVE(XRSTOR, \ XRSTORS, X86_FEATURE_XSAVES) \ "\n" \ - "xor %[err], %[err]\n" \ "3:\n" \ - ".pushsection .fixup,\"ax\"\n" \ - "4: movl $-2, %[err]\n" \ - "jmp 3b\n" \ - ".popsection\n" \ - _ASM_EXTABLE(661b, 4b) \ - : [err] "=r" (err) \ + _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ + : \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") @@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); - /* We should never fault when copying from a kernel buffer: */ + /* + * We should never fault when copying from a kernel buffer, and the FPU + * state we set at boot time should be valid. + */ WARN_ON_FPU(err); } @@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) u32 hmask = mask >> 32; int err; - WARN_ON(!alternatives_patched); + WARN_ON_FPU(!alternatives_patched); XSTATE_XSAVE(xstate, lmask, hmask, err); @@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; - int err; - XSTATE_XRESTORE(xstate, lmask, hmask, err); - - /* We should never fault when copying from a kernel buffer: */ - WARN_ON_FPU(err); + XSTATE_XRESTORE(xstate, lmask, hmask); } /* @@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) */ static inline void fpregs_deactivate(struct fpu *fpu) { - WARN_ON_FPU(!fpu->fpregs_active); - - fpu->fpregs_active = 0; this_cpu_write(fpu_fpregs_owner_ctx, NULL); trace_x86_fpu_regs_deactivated(fpu); } static inline void fpregs_activate(struct fpu *fpu) { - WARN_ON_FPU(fpu->fpregs_active); - - fpu->fpregs_active = 1; this_cpu_write(fpu_fpregs_owner_ctx, fpu); trace_x86_fpu_regs_activated(fpu); } -/* - * The question "does this thread have fpu access?" - * is slightly racy, since preemption could come in - * and revoke it immediately after the test. - * - * However, even in that very unlikely scenario, - * we can just assume we have FPU access - typically - * to save the FP state - we'll just take a #NM - * fault and get the FPU access back. - */ -static inline int fpregs_active(void) -{ - return current->thread.fpu.fpregs_active; -} - /* * FPU state switching for scheduling. * @@ -571,14 +527,13 @@ static inline int fpregs_active(void) static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { - if (old_fpu->fpregs_active) { + if (old_fpu->initialized) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else old_fpu->last_cpu = cpu; /* But leave fpu_fpregs_owner_ctx! */ - old_fpu->fpregs_active = 0; trace_x86_fpu_regs_deactivated(old_fpu); } else old_fpu->last_cpu = -1; @@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) { bool preload = static_cpu_has(X86_FEATURE_FPU) && - new_fpu->fpstate_active; + new_fpu->initialized; if (preload) { if (!fpregs_state_valid(new_fpu, cpu)) @@ -617,8 +572,7 @@ static inline void user_fpu_begin(void) struct fpu *fpu = ¤t->thread.fpu; preempt_disable(); - if (!fpregs_active()) - fpregs_activate(fpu); + fpregs_activate(fpu); preempt_enable(); } diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3c80f5b9c09d..a1520575d86b 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -68,6 +68,9 @@ struct fxregs_state { /* Default value for fxregs_state.mxcsr: */ #define MXCSR_DEFAULT 0x1f80 +/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */ +#define MXCSR_AND_FLAGS_SIZE sizeof(u64) + /* * Software based FPU emulation state. This is arbitrary really, * it matches the x87 format to make it easier to understand: @@ -290,36 +293,13 @@ struct fpu { unsigned int last_cpu; /* - * @fpstate_active: + * @initialized: * - * This flag indicates whether this context is active: if the task + * This flag indicates whether this context is initialized: if the task * is not running then we can restore from this context, if the task * is running then we should save into this context. */ - unsigned char fpstate_active; - - /* - * @fpregs_active: - * - * This flag determines whether a given context is actively - * loaded into the FPU's registers and that those registers - * represent the task's current FPU state. - * - * Note the interaction with fpstate_active: - * - * # task does not use the FPU: - * fpstate_active == 0 - * - * # task uses the FPU and regs are active: - * fpstate_active == 1 && fpregs_active == 1 - * - * # the regs are inactive but still match fpstate: - * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu - * - * The third state is what we use for the lazy restore optimization - * on lazy-switching CPUs. - */ - unsigned char fpregs_active; + unsigned char initialized; /* * @state: diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 1b2799e0699a..83fee2469eb7 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void); void *get_xsave_addr(struct xregs_state *xsave, int xstate); const void *get_xsave_field_ptr(int xstate_field); int using_compacted_format(void); -int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, - void __user *ubuf, struct xregs_state *xsave); -int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, - struct xregs_state *xsave); +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); +int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); + +/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ +extern int validate_xstate_header(const struct xstate_header *hdr); + #endif diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index fde36f189836..fa2558e12024 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -219,8 +219,8 @@ struct x86_emulate_ops { struct x86_instruction_info *info, enum x86_intercept_stage stage); - void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, - u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); + bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx, bool check_limit); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 369e41c23f07..c73e493adf07 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -79,15 +79,14 @@ | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) -#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL #define CR3_PCID_INVD BIT_64(63) #define CR4_RESERVED_BITS \ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ - | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \ - | X86_CR4_PKE)) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ + | X86_CR4_SMAP | X86_CR4_PKE)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) @@ -204,7 +203,6 @@ enum { #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ - PFERR_USER_MASK | \ PFERR_WRITE_MASK | \ PFERR_PRESENT_MASK) @@ -317,15 +315,17 @@ struct kvm_pio_request { int size; }; +#define PT64_ROOT_MAX_LEVEL 5 + struct rsvd_bits_validate { - u64 rsvd_bits_mask[2][4]; + u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL]; u64 bad_mt_xwr; }; /* - * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level - * 32-bit). The kvm_mmu structure abstracts the details of the current mmu - * mode. + * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, + * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the + * current mmu mode. */ struct kvm_mmu { void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); @@ -548,8 +548,8 @@ struct kvm_vcpu_arch { struct kvm_queued_exception { bool pending; + bool injected; bool has_error_code; - bool reinject; u8 nr; u32 error_code; u8 nested_apf; @@ -687,8 +687,12 @@ struct kvm_vcpu_arch { int pending_ioapic_eoi; int pending_external_vector; - /* GPA available (AMD only) */ + /* GPA available */ bool gpa_available; + gpa_t gpa_val; + + /* be preempted when it's in kernel-mode(cpl=0) */ + bool preempted_in_kernel; }; struct kvm_lpage_info { @@ -947,7 +951,6 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); - u32 (*get_pkru)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); @@ -969,7 +972,7 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); - bool (*get_enable_apicv)(void); + bool (*get_enable_apicv)(struct kvm_vcpu *vcpu); void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); @@ -979,7 +982,7 @@ struct kvm_x86_ops { void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); - int (*get_tdp_level)(void); + int (*get_tdp_level)(struct kvm_vcpu *vcpu); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); int (*get_lpage_level)(void); bool (*rdtscp_supported)(void); @@ -1297,20 +1300,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); } -static inline u64 get_canonical(u64 la) -{ - return ((int64_t)la << 16) >> 16; -} - -static inline bool is_noncanonical_address(u64 la) -{ -#ifdef CONFIG_X86_64 - return get_canonical(la) != la; -#else - return false; -#endif -} - #define TSS_IOPB_BASE_OFFSET 0x66 #define TSS_BASE_SIZE 0x68 #define TSS_IOPB_SIZE (65536 / 8) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index bc62e7cbf1b1..59ad3d132353 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); void __init kvm_guest_init(void); -void kvm_async_pf_task_wait(u32 token); +void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); extern void kvm_disable_steal_time(void); @@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void) #else /* CONFIG_KVM_GUEST */ #define kvm_guest_init() do {} while (0) -#define kvm_async_pf_task_wait(T) do {} while(0) +#define kvm_async_pf_task_wait(T, I) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0) static inline bool kvm_para_available(void) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 8e618fcf1f7c..6a77c63540f7 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -21,7 +21,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT -extern unsigned long sme_me_mask; +extern u64 sme_me_mask; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, unsigned long decrypted_kernel_vaddr, @@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); #else /* !CONFIG_AMD_MEM_ENCRYPT */ -#define sme_me_mask 0UL +#define sme_me_mask 0ULL static inline void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) { } diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 7ae318c340d9..c120b5db178a 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, return __pkru_allows_pkey(vma_pkey(vma), write); } +/* + * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID + * bits. This serves two purposes. It prevents a nasty situation in + * which PCID-unaware code saves CR3, loads some other value (with PCID + * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if + * the saved ASID was nonzero. It also means that any bugs involving + * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger + * deterministically. + */ + +static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) +{ + if (static_cpu_has(X86_FEATURE_PCID)) { + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(mm->pgd) | (asid + 1); + } else { + VM_WARN_ON_ONCE(asid != 0); + return __sme_pa(mm->pgd); + } +} + +static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) +{ + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; +} /* * This can be used from process context to figure out what the value of @@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); - - if (static_cpu_has(X86_FEATURE_PCID)) - cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ VM_WARN_ON(in_nmi() || preemptible()); diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 831eb7895535..c471ca1f9412 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -86,7 +86,6 @@ static inline void e820__memblock_alloc_reserved_mpc_new(void) { } #endif int generic_processor_info(int apicid, int version); -int __generic_processor_info(int apicid, int version, bool enabled); #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC) diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 58b9291b46d8..738503e1f80c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -3,6 +3,8 @@ #include #include +#include +#include #include /* @@ -170,12 +172,153 @@ void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) extern struct clocksource *hyperv_cs; +extern void *hv_hypercall_pg; + +static inline u64 hv_do_hypercall(u64 control, void *input, void *output) +{ + u64 input_address = input ? virt_to_phys(input) : 0; + u64 output_address = output ? virt_to_phys(output) : 0; + u64 hv_status; + +#ifdef CONFIG_X86_64 + if (!hv_hypercall_pg) + return U64_MAX; + + __asm__ __volatile__("mov %4, %%r8\n" + "call *%5" + : "=a" (hv_status), ASM_CALL_CONSTRAINT, + "+c" (control), "+d" (input_address) + : "r" (output_address), "m" (hv_hypercall_pg) + : "cc", "memory", "r8", "r9", "r10", "r11"); +#else + u32 input_address_hi = upper_32_bits(input_address); + u32 input_address_lo = lower_32_bits(input_address); + u32 output_address_hi = upper_32_bits(output_address); + u32 output_address_lo = lower_32_bits(output_address); + + if (!hv_hypercall_pg) + return U64_MAX; + + __asm__ __volatile__("call *%7" + : "=A" (hv_status), + "+c" (input_address_lo), ASM_CALL_CONSTRAINT + : "A" (control), + "b" (input_address_hi), + "D"(output_address_hi), "S"(output_address_lo), + "m" (hv_hypercall_pg) + : "cc", "memory"); +#endif /* !x86_64 */ + return hv_status; +} + +#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) +#define HV_HYPERCALL_FAST_BIT BIT(16) +#define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_REP_COMP_OFFSET 32 +#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_REP_START_OFFSET 48 +#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) + +/* Fast hypercall with 8 bytes of input and no output */ +static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) +{ + u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; + +#ifdef CONFIG_X86_64 + { + __asm__ __volatile__("call *%4" + : "=a" (hv_status), ASM_CALL_CONSTRAINT, + "+c" (control), "+d" (input1) + : "m" (hv_hypercall_pg) + : "cc", "r8", "r9", "r10", "r11"); + } +#else + { + u32 input1_hi = upper_32_bits(input1); + u32 input1_lo = lower_32_bits(input1); + + __asm__ __volatile__ ("call *%5" + : "=A"(hv_status), + "+c"(input1_lo), + ASM_CALL_CONSTRAINT + : "A" (control), + "b" (input1_hi), + "m" (hv_hypercall_pg) + : "cc", "edi", "esi"); + } +#endif + return hv_status; +} + +/* + * Rep hypercalls. Callers of this functions are supposed to ensure that + * rep_count and varhead_size comply with Hyper-V hypercall definition. + */ +static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, + void *input, void *output) +{ + u64 control = code; + u64 status; + u16 rep_comp; + + control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; + control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + + do { + status = hv_do_hypercall(control, input, output); + if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) + return status; + + /* Bits 32-43 of status have 'Reps completed' data. */ + rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >> + HV_HYPERCALL_REP_COMP_OFFSET; + + control &= ~HV_HYPERCALL_REP_START_MASK; + control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; + + touch_nmi_watchdog(); + } while (rep_comp < rep_count); + + return status; +} + +/* + * Hypervisor's notion of virtual processor ID is different from + * Linux' notion of CPU ID. This information can only be retrieved + * in the context of the calling CPU. Setup a map for easy access + * to this information. + */ +extern u32 *hv_vp_index; + +/** + * hv_cpu_number_to_vp_number() - Map CPU to VP. + * @cpu_number: CPU number in Linux terms + * + * This function returns the mapping between the Linux processor + * number and the hypervisor's virtual processor number, useful + * in making hypercalls and such that talk about specific + * processors. + * + * Return: Virtual processor number in Hyper-V terms + */ +static inline int hv_cpu_number_to_vp_number(int cpu_number) +{ + return hv_vp_index[cpu_number]; +} void hyperv_init(void); +void hyperv_setup_mmu_ops(void); +void hyper_alloc_mmu(void); void hyperv_report_panic(struct pt_regs *regs); bool hv_is_hypercall_page_setup(void); void hyperv_cleanup(void); -#endif +#else /* CONFIG_HYPERV */ +static inline void hyperv_init(void) {} +static inline bool hv_is_hypercall_page_setup(void) { return false; } +static inline void hyperv_cleanup(void) {} +static inline void hyperv_setup_mmu_ops(void) {} +#endif /* CONFIG_HYPERV */ + #ifdef CONFIG_HYPERV_TSCPAGE struct ms_hyperv_tsc_page *hv_get_tsc_page(void); static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index c25dd22f7c70..12deec722cf0 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -71,11 +71,6 @@ static inline void write_cr3(unsigned long x) PVOP_VCALL1(pv_mmu_ops.write_cr3, x); } -static inline unsigned long __read_cr4(void) -{ - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); -} - static inline void __write_cr4(unsigned long x) { PVOP_VCALL1(pv_cpu_ops.write_cr4, x); @@ -228,10 +223,6 @@ static inline void set_ldt(const void *addr, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); } -static inline void store_idt(struct desc_ptr *dtr) -{ - PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); -} static inline unsigned long paravirt_store_tr(void) { return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); @@ -365,12 +356,6 @@ static inline void paravirt_release_p4d(unsigned long pfn) PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); } -static inline void pte_update(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); -} - static inline pte_t __pte(pteval_t val) { pteval_t ret; @@ -472,28 +457,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); } -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd) -{ - if (sizeof(pmdval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); - else - PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, - native_pmd_val(pmd)); -} - -static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud) -{ - if (sizeof(pudval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); - else - PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, - native_pud_val(pud)); -} - static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { pmdval_t val = native_pmd_val(pmd); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 6b64fc6367f2..280d94c36dad 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -107,7 +107,6 @@ struct pv_cpu_ops { unsigned long (*read_cr0)(void); void (*write_cr0)(unsigned long); - unsigned long (*read_cr4)(void); void (*write_cr4)(unsigned long); #ifdef CONFIG_X86_64 @@ -119,8 +118,6 @@ struct pv_cpu_ops { void (*load_tr_desc)(void); void (*load_gdt)(const struct desc_ptr *); void (*load_idt)(const struct desc_ptr *); - /* store_gdt has been removed. */ - void (*store_idt)(struct desc_ptr *); void (*set_ldt)(const void *desc, unsigned entries); unsigned long (*store_tr)(void); void (*load_tls)(struct thread_struct *t, unsigned int cpu); @@ -245,12 +242,6 @@ struct pv_mmu_ops { void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmdval); - void (*set_pud_at)(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pudval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -468,8 +459,8 @@ int paravirt_disable_iospace(void); */ #ifdef CONFIG_X86_32 #define PVOP_VCALL_ARGS \ - unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ - register void *__sp asm("esp") + unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; + #define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) @@ -489,8 +480,8 @@ int paravirt_disable_iospace(void); /* [re]ax isn't an arg, but the return val */ #define PVOP_VCALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx, __eax = __eax; \ - register void *__sp asm("rsp") + __edx = __edx, __ecx = __ecx, __eax = __eax; + #define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) @@ -541,7 +532,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ @@ -551,7 +542,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ @@ -578,7 +569,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index bbeae4a2bd01..b714934512b3 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -55,8 +55,6 @@ extern pmdval_t early_pmd_flags; #else /* !CONFIG_PARAVIRT */ #define set_pte(ptep, pte) native_set_pte(ptep, pte) #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) -#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) -#define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) #define set_pte_atomic(ptep, pte) \ native_set_pte_atomic(ptep, pte) @@ -87,8 +85,6 @@ extern pmdval_t early_pmd_flags; #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) #define pmd_clear(pmd) native_pmd_clear(pmd) -#define pte_update(mm, addr, ptep) do { } while (0) - #define pgd_val(x) native_pgd_val(x) #define __pgd(x) native_make_pgd(x) @@ -979,31 +975,18 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, native_set_pte(ptep, pte); } -static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp , pmd_t pmd) +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) { native_set_pmd(pmdp, pmd); } -static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud) +static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) { native_set_pud(pudp, pud); } -#ifndef CONFIG_PARAVIRT -/* - * Rules for using pte_update - it must be called after any PTE update which - * has not been done using the set_pte / clear_pte interfaces. It is used by - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE - * updates should either be sets, clears, or set_pte_atomic for P->P - * transitions, which means this hook should only be called for user PTEs. - * This hook implies a P->P protection or access change has taken place, which - * requires a subsequent TLB flush. - */ -#define pte_update(mm, addr, ptep) do { } while (0) -#endif - /* * We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware @@ -1031,7 +1014,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = native_ptep_get_and_clear(ptep); - pte_update(mm, addr, ptep); return pte; } @@ -1058,7 +1040,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); - pte_update(mm, addr, ptep); } #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) @@ -1172,6 +1153,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); } + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); +} +#endif #endif #define PKRU_AD_BIT 0x1 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 2160c1fee920..972a4698c530 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -180,15 +180,21 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* * Encode and de-code a swap entry * - * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number - * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names - * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry + * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number + * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names + * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry * * G (8) is aliased and used as a PROT_NONE indicator for * !present ptes. We need to start storing swap entries above * there. We also need to avoid using A and D because of an * erratum where they can be incorrectly set by hardware on * non-present PTEs. + * + * SD (1) in swp entry is used to store soft dirty bit, which helps us + * remember soft dirty over page migration + * + * Bit 7 in swp entry should be 0 because pmd_present checks not only P, + * but also L and G. */ #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) #define SWP_TYPE_BITS 5 @@ -204,7 +210,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; } ((type) << (SWP_TYPE_FIRST_BIT)) \ | ((offset) << SWP_OFFSET_FIRST_BIT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) +#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 399261ce904c..f1492473f10e 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -99,15 +99,15 @@ /* * Tracking soft dirty bit when a page goes to a swap is tricky. * We need a bit which can be stored in pte _and_ not conflict - * with swap entry format. On x86 bits 6 and 7 are *not* involved - * into swap entry computation, but bit 6 is used for nonlinear - * file mapping, so we borrow bit 7 for soft dirty tracking. + * with swap entry format. On x86 bits 1-4 are *not* involved + * into swap entry computation, but bit 7 is used for thp migration, + * so we borrow bit 1 for soft dirty tracking. * * Please note that this bit must be treated as swap dirty page - * mark if and only if the PTE has present bit clear! + * mark if and only if the PTE/PMD has present bit clear! */ #ifdef CONFIG_MEM_SOFT_DIRTY -#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE +#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW #else #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) #endif diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index ec1f3c651150..4f44505dbf87 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -100,19 +100,14 @@ static __always_inline bool should_resched(int preempt_offset) #ifdef CONFIG_PREEMPT extern asmlinkage void ___preempt_schedule(void); -# define __preempt_schedule() \ -({ \ - register void *__sp asm(_ASM_SP); \ - asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \ -}) +# define __preempt_schedule() \ + asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) extern asmlinkage void preempt_schedule(void); extern asmlinkage void ___preempt_schedule_notrace(void); -# define __preempt_schedule_notrace() \ -({ \ - register void *__sp asm(_ASM_SP); \ - asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \ -}) +# define __preempt_schedule_notrace() \ + asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) + extern asmlinkage void preempt_schedule_notrace(void); #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3fa26a61eabc..b390ff76e58f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -677,8 +677,6 @@ static inline void sync_core(void) * Like all of Linux's memory ordering operations, this is a * compiler barrier as well. */ - register void *__sp asm(_ASM_SP); - #ifdef CONFIG_X86_32 asm volatile ( "pushfl\n\t" @@ -686,7 +684,7 @@ static inline void sync_core(void) "pushl $1f\n\t" "iret\n\t" "1:" - : "+r" (__sp) : : "memory"); + : ASM_CALL_CONSTRAINT : : "memory"); #else unsigned int tmp; @@ -703,7 +701,7 @@ static inline void sync_core(void) "iretq\n\t" UNWIND_HINT_RESTORE "1:" - : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); + : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); #endif } diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index a34e0d4b957d..7116b7931c7b 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) ({ \ long tmp; \ struct rw_semaphore* ret; \ - register void *__sp asm(_ASM_SP); \ \ asm volatile("# beginning down_write\n\t" \ LOCK_PREFIX " xadd %1,(%4)\n\t" \ @@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) " call " slow_path "\n" \ "1:\n" \ "# ending down_write" \ - : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ + : "+m" (sem->count), "=d" (tmp), \ + "=a" (ret), ASM_CALL_CONSTRAINT \ : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ : "memory", "cc"); \ ret; \ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index e4585a393965..a65cf544686a 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -39,6 +39,7 @@ static inline void vsmp_init(void) { } #endif void setup_bios_corruption_check(void); +void early_platform_quirks(void); extern unsigned long saved_video_mode; diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 9efaabf5b54b..a24dfcf79f4a 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -135,6 +135,11 @@ static inline void native_wbinvd(void) extern asmlinkage void native_load_gs_index(unsigned); +static inline unsigned long __read_cr4(void) +{ + return native_read_cr4(); +} + #ifdef CONFIG_PARAVIRT #include #else @@ -173,11 +178,6 @@ static inline void write_cr3(unsigned long x) native_write_cr3(x); } -static inline unsigned long __read_cr4(void) -{ - return native_read_cr4(); -} - static inline void __write_cr4(unsigned long x) { native_write_cr4(x); diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index e9ee84873de5..e371e7229042 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -340,6 +340,30 @@ extern void *memset(void *, int, size_t); #endif #endif /* !CONFIG_FORTIFY_SOURCE */ +#define __HAVE_ARCH_MEMSET16 +static inline void *memset16(uint16_t *s, uint16_t v, size_t n) +{ + int d0, d1; + asm volatile("rep\n\t" + "stosw" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + +#define __HAVE_ARCH_MEMSET32 +static inline void *memset32(uint32_t *s, uint32_t v, size_t n) +{ + int d0, d1; + asm volatile("rep\n\t" + "stosl" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + /* * find the first occurrence of byte 'c', or 1 past the area if none */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 2a8c822de1fc..f372a70a523f 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -58,6 +58,42 @@ extern void *__memcpy(void *to, const void *from, size_t len); void *memset(void *s, int c, size_t n); void *__memset(void *s, int c, size_t n); +#define __HAVE_ARCH_MEMSET16 +static inline void *memset16(uint16_t *s, uint16_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosw" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + +#define __HAVE_ARCH_MEMSET32 +static inline void *memset32(uint32_t *s, uint32_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosl" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + +#define __HAVE_ARCH_MEMSET64 +static inline void *memset64(uint64_t *s, uint64_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosq" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + #define __HAVE_ARCH_MEMMOVE void *memmove(void *dest, const void *src, size_t count); void *__memmove(void *dest, const void *src, size_t count); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 58fffe79e417..14835dd205a5 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -107,6 +107,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_IRQ_SHIFT 8 #define V_IRQ_MASK (1 << V_IRQ_SHIFT) +#define V_GIF_SHIFT 9 +#define V_GIF_MASK (1 << V_GIF_SHIFT) + #define V_INTR_PRIO_SHIFT 16 #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) @@ -116,6 +119,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) +#define V_GIF_ENABLE_SHIFT 25 +#define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT) + #define AVIC_ENABLE_SHIFT 31 #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 5161da1a0fa0..89e7eeb5cec1 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -158,17 +158,6 @@ struct thread_info { */ #ifndef __ASSEMBLY__ -static inline unsigned long current_stack_pointer(void) -{ - unsigned long sp; -#ifdef CONFIG_X86_64 - asm("mov %%rsp,%0" : "=g" (sp)); -#else - asm("mov %%esp,%0" : "=g" (sp)); -#endif - return sp; -} - /* * Walks up the stack frames to make sure that the specified object is * entirely contained by a single stack frame. diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d23e61dc0640..4893abf7f74f 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -198,6 +198,8 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) cr4_set_bits(mask); } +extern void initialize_tlbstate_and_flush(void); + static inline void __native_flush_tlb(void) { /* diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 342e59789fcd..39f7a27bef13 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu, TP_STRUCT__entry( __field(struct fpu *, fpu) - __field(bool, fpregs_active) - __field(bool, fpstate_active) + __field(bool, initialized) __field(u64, xfeatures) __field(u64, xcomp_bv) ), TP_fast_assign( __entry->fpu = fpu; - __entry->fpregs_active = fpu->fpregs_active; - __entry->fpstate_active = fpu->fpstate_active; + __entry->initialized = fpu->initialized; if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { __entry->xfeatures = fpu->state.xsave.header.xfeatures; __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; } ), - TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", + TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx", __entry->fpu, - __entry->fpregs_active, - __entry->fpstate_active, + __entry->initialized, __entry->xfeatures, __entry->xcomp_bv ) diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h new file mode 100644 index 000000000000..4253bca99989 --- /dev/null +++ b/arch/x86/include/asm/trace/hyperv.h @@ -0,0 +1,40 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hyperv + +#if !defined(_TRACE_HYPERV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HYPERV_H + +#include + +#if IS_ENABLED(CONFIG_HYPERV) + +TRACE_EVENT(hyperv_mmu_flush_tlb_others, + TP_PROTO(const struct cpumask *cpus, + const struct flush_tlb_info *info), + TP_ARGS(cpus, info), + TP_STRUCT__entry( + __field(unsigned int, ncpus) + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(unsigned long, end) + ), + TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); + __entry->mm = info->mm; + __entry->addr = info->start; + __entry->end = info->end; + ), + TP_printk("ncpus %d mm %p addr %lx, end %lx", + __entry->ncpus, __entry->mm, + __entry->addr, __entry->end) + ); + +#endif /* CONFIG_HYPERV */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH asm/trace/ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hyperv +#endif /* _TRACE_HYPERV_H */ + +/* This part must be outside protection */ +#include diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 184eb9894dae..4b892917edeb 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) ({ \ int __ret_gu; \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ - register void *__sp asm(_ASM_SP); \ __chk_user_ptr(ptr); \ might_fault(); \ asm volatile("call __get_user_%P4" \ - : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ + : "=a" (__ret_gu), "=r" (__val_gu), \ + ASM_CALL_CONSTRAINT \ : "0" (ptr), "i" (sizeof(*(ptr)))); \ (x) = (__force __typeof__(*(ptr))) __val_gu; \ __builtin_expect(__ret_gu, 0); \ @@ -337,7 +337,7 @@ do { \ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (retval), "=&A"(x) \ - : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ + : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ "i" (errret), "0" (retval)); \ }) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 35cd06f636ab..caec8417539f 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -72,6 +72,7 @@ #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 #define SECONDARY_EXEC_RDRAND 0x00000800 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 +#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 #define SECONDARY_EXEC_RDSEED 0x00010000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 @@ -114,6 +115,10 @@ #define VMX_MISC_SAVE_EFER_LMA 0x00000020 #define VMX_MISC_ACTIVITY_HLT 0x00000040 +/* VMFUNC functions */ +#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 +#define VMFUNC_EPTP_ENTRIES 512 + static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) { return vmx_basic & GENMASK_ULL(30, 0); @@ -187,6 +192,8 @@ enum vmcs_field { APIC_ACCESS_ADDR_HIGH = 0x00002015, POSTED_INTR_DESC_ADDR = 0x00002016, POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, + VM_FUNCTION_CONTROL = 0x00002018, + VM_FUNCTION_CONTROL_HIGH = 0x00002019, EPT_POINTER = 0x0000201a, EPT_POINTER_HIGH = 0x0000201b, EOI_EXIT_BITMAP0 = 0x0000201c, @@ -197,6 +204,8 @@ enum vmcs_field { EOI_EXIT_BITMAP2_HIGH = 0x00002021, EOI_EXIT_BITMAP3 = 0x00002022, EOI_EXIT_BITMAP3_HIGH = 0x00002023, + EPTP_LIST_ADDRESS = 0x00002024, + EPTP_LIST_ADDRESS_HIGH = 0x00002025, VMREAD_BITMAP = 0x00002026, VMWRITE_BITMAP = 0x00002028, XSS_EXIT_BITMAP = 0x0000202C, @@ -444,6 +453,7 @@ enum vmcs_field { #define VMX_EPT_EXECUTE_ONLY_BIT (1ull) #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) +#define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7) #define VMX_EPTP_UC_BIT (1ull << 8) #define VMX_EPTP_WB_BIT (1ull << 14) #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) @@ -459,12 +469,14 @@ enum vmcs_field { #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */ -#define VMX_EPT_DEFAULT_GAW 3 -#define VMX_EPT_MAX_GAW 0x4 #define VMX_EPT_MT_EPTE_SHIFT 3 -#define VMX_EPT_GAW_EPTP_SHIFT 3 -#define VMX_EPT_AD_ENABLE_BIT (1ull << 6) -#define VMX_EPT_DEFAULT_MT 0x6ull +#define VMX_EPTP_PWL_MASK 0x38ull +#define VMX_EPTP_PWL_4 0x18ull +#define VMX_EPTP_PWL_5 0x20ull +#define VMX_EPTP_AD_ENABLE_BIT (1ull << 6) +#define VMX_EPTP_MT_MASK 0x7ull +#define VMX_EPTP_MT_WB 0x6ull +#define VMX_EPTP_MT_UC 0x0ull #define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 9606688caa4b..7cb282e9e587 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[]; register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ - register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \ - register void *__sp asm(_ASM_SP); + register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; -#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp) +#define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) @@ -552,13 +551,13 @@ static inline void MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, struct desc_struct desc) { - u32 *p = (u32 *) &desc; - mcl->op = __HYPERVISOR_update_descriptor; if (sizeof(maddr) == sizeof(long)) { mcl->args[0] = maddr; mcl->args[1] = *(unsigned long *)&desc; } else { + u32 *p = (u32 *)&desc; + mcl->args[0] = maddr; mcl->args[1] = maddr >> 32; mcl->args[2] = *p++; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 8417ef7c3885..07b6531813c4 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) unsigned long pfn; int ret; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return mfn; - if (unlikely(mfn >= machine_to_phys_nr)) return ~0; @@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x) #define p4d_val_ma(x) ((x).p4d) #endif -void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid); - xmaddr_t arbitrary_virt_to_machine(void *address); unsigned long arbitrary_virt_to_mfn(void *vaddr); void make_lowmem_page_readonly(void *vaddr); diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 127ddadee1a5..f65d12504e80 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -149,12 +149,9 @@ */ #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) -/* - * HV_VP_SET available - */ +/* Recommend using the newer ExProcessorMasks interface */ #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) - /* * Crash notification flag. */ @@ -242,7 +239,11 @@ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) /* Declare the various hypercall operations. */ +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 #define HVCALL_POST_MESSAGE 0x005c #define HVCALL_SIGNAL_EVENT 0x005d @@ -259,6 +260,16 @@ #define HV_PROCESSOR_POWER_STATE_C2 2 #define HV_PROCESSOR_POWER_STATE_C3 3 +#define HV_FLUSH_ALL_PROCESSORS BIT(0) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARCE_4K, + HV_GENERIC_SET_ALL, +}; + /* hypercall status code */ #define HV_STATUS_SUCCESS 0 #define HV_STATUS_INVALID_HYPERCALL_CODE 2 diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h index 39bca7fac087..3be08f07695c 100644 --- a/arch/x86/include/uapi/asm/mman.h +++ b/arch/x86/include/uapi/asm/mman.h @@ -3,9 +3,6 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ -#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) -#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS /* * Take the 4 protection key bits out of the vma->vm_flags diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 97bb2caf3428..079535e53e2a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -118,7 +118,7 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { * This is just a simple wrapper around early_memremap(), * with sanity checks for phys == 0 and size == 0. */ -char *__init __acpi_map_table(unsigned long phys, unsigned long size) +void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { if (!phys || !size) @@ -127,7 +127,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) return early_memremap(phys, size); } -void __init __acpi_unmap_table(char *map, unsigned long size) +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { if (!map || !size) return; @@ -199,8 +199,10 @@ static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; +#ifdef CONFIG_X86_X2APIC int apic_id; u8 enabled; +#endif processor = (struct acpi_madt_local_x2apic *)header; @@ -209,9 +211,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) acpi_table_print_madt_entry(header); +#ifdef CONFIG_X86_X2APIC apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; -#ifdef CONFIG_X86_X2APIC + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size @@ -1083,7 +1086,7 @@ static void __init mp_config_acpi_legacy_irqs(void) mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; #endif set_bit(MP_ISA_BUS, mp_bus_not_pci); - pr_debug("Bus #%d is ISA\n", MP_ISA_BUS); + pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs()); /* * Use the default configuration for the IRQs 0-15. Unless @@ -1370,7 +1373,7 @@ static void __init acpi_reduced_hw_init(void) * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ -static struct dmi_system_id __initdata acpi_dmi_table[] = { +static const struct dmi_system_id acpi_dmi_table[] __initconst = { /* * Boxes that need ACPI disabled */ @@ -1445,7 +1448,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { }; /* second table for DMI checks that should run after early-quirks */ -static struct dmi_system_id __initdata acpi_dmi_table_late[] = { +static const struct dmi_system_id acpi_dmi_table_late[] __initconst = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 7834f73efbf1..d705c769f77d 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2097,7 +2097,7 @@ static int allocate_logical_cpuid(int apicid) /* Allocate a new cpuid. */ if (nr_logical_cpuids >= nr_cpu_ids) { - WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. " + WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. " "Processor %d/0x%x and the rest are ignored.\n", nr_cpu_ids, nr_logical_cpuids, apicid); return -EINVAL; @@ -2130,7 +2130,7 @@ int generic_processor_info(int apicid, int version) * Since fixing handling of boot_cpu_physical_apicid requires * another discussion and tests on each platform, we leave it * for now and here we use read_apic_id() directly in this - * function, __generic_processor_info(). + * function, generic_processor_info(). */ if (disabled_cpu_apicid != BAD_APICID && disabled_cpu_apicid != read_apic_id() && diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 446b0d3d4932..e4b0d92b3ae0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -2043,7 +2043,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata apm_dmi_table[] = { +static const struct dmi_system_id apm_dmi_table[] __initconst = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9862e2cd6d93..d58184b7cd44 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -763,6 +763,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c) } } +static void init_amd_zn(struct cpuinfo_x86 *c) +{ + /* + * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects + * all up to and including B1. + */ + if (c->x86_model <= 1 && c->x86_mask <= 1) + set_cpu_cap(c, X86_FEATURE_CPB); +} + static void init_amd(struct cpuinfo_x86 *c) { early_init_amd(c); @@ -791,6 +801,7 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x10: init_amd_gh(c); break; case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; + case 0x17: init_amd_zn(c); break; } /* Enable workaround for FXSAVE leak */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index db684880d74a..0af86d9242da 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -21,14 +21,6 @@ void __init check_bugs(void) { -#ifdef CONFIG_X86_32 - /* - * Regardless of whether PCID is enumerated, the SDM says - * that it can't be enabled in 32-bit mode. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); -#endif - identify_boot_cpu(); if (!IS_ENABLED(CONFIG_SMP)) { diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index efba8e3da3e2..c9176bae7fd8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -169,21 +169,21 @@ static int __init x86_mpx_setup(char *s) __setup("nompx", x86_mpx_setup); #ifdef CONFIG_X86_64 -static int __init x86_pcid_setup(char *s) +static int __init x86_nopcid_setup(char *s) { - /* require an exact match without trailing characters */ - if (strlen(s)) - return 0; + /* nopcid doesn't accept parameters */ + if (s) + return -EINVAL; /* do not emit a message if the feature is not present */ if (!boot_cpu_has(X86_FEATURE_PCID)) - return 1; + return 0; setup_clear_cpu_cap(X86_FEATURE_PCID); pr_info("nopcid: PCID feature disabled\n"); - return 1; + return 0; } -__setup("nopcid", x86_pcid_setup); +early_param("nopcid", x86_nopcid_setup); #endif static int __init x86_noinvpcid_setup(char *s) @@ -329,25 +329,6 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) } } -static void setup_pcid(struct cpuinfo_x86 *c) -{ - if (cpu_has(c, X86_FEATURE_PCID)) { - if (cpu_has(c, X86_FEATURE_PGE)) { - cr4_set_bits(X86_CR4_PCIDE); - } else { - /* - * flush_tlb_all(), as currently implemented, won't - * work if PCID is on but PGE is not. Since that - * combination doesn't exist on real hardware, there's - * no reason to try to fully support it, but it's - * polite to avoid corrupting data if we're on - * an improperly configured VM. - */ - clear_cpu_cap(c, X86_FEATURE_PCID); - } - } -} - /* * Protection Keys are not available in 32-bit mode. */ @@ -923,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); fpu__init_system(c); + +#ifdef CONFIG_X86_32 + /* + * Regardless of whether PCID is enumerated, the SDM says + * that it can't be enabled in 32-bit mode. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); +#endif } void __init early_cpu_init(void) @@ -1162,9 +1151,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) setup_smep(c); setup_smap(c); - /* Set up PCID */ - setup_pcid(c); - /* * The vendor-specific functions might have changed features. * Now we do "generic changes." @@ -1583,6 +1569,7 @@ void cpu_init(void) mmgrab(&init_mm); me->active_mm = &init_mm; BUG_ON(me->mm); + initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); load_sp0(t, ¤t->thread); @@ -1637,6 +1624,7 @@ void cpu_init(void) mmgrab(&init_mm); curr->active_mm = &init_mm; BUG_ON(curr->mm); + initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); load_sp0(t, thread); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index fbafd24174af..236324e83a3a 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -59,8 +59,6 @@ void hyperv_vector_handler(struct pt_regs *regs) void hv_setup_vmbus_irq(void (*handler)(void)) { vmbus_handler = handler; - /* Setup the IDT for hypervisor callback */ - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); } void hv_remove_vmbus_irq(void) @@ -250,6 +248,9 @@ static void __init ms_hyperv_init_platform(void) * Setup the hook to get control post apic initialization. */ x86_platform.apic_post_init = hyperv_init; + hyperv_setup_mmu_ops(); + /* Setup the IDT for hypervisor callback */ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); #endif } diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a4516ca4c4f3..927abeaf63e2 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -12,10 +12,10 @@ #include #include #include -#include #include #include #include +#include #include #include #include @@ -594,7 +594,7 @@ static void __init apple_airport_reset(int bus, int slot, int func) u64 addr; int i; - if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) + if (!x86_apple_machine) return; /* Card may have been put into PCI_D3hot by grub quirk */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e1114f070c2d..f92a6593de1e 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -100,7 +100,7 @@ void __kernel_fpu_begin(void) kernel_fpu_disable(); - if (fpu->fpregs_active) { + if (fpu->initialized) { /* * Ignore return value -- we don't care if reg state * is clobbered. @@ -116,7 +116,7 @@ void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; - if (fpu->fpregs_active) + if (fpu->initialized) copy_kernel_to_fpregs(&fpu->state); kernel_fpu_enable(); @@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu) preempt_disable(); trace_x86_fpu_before_save(fpu); - if (fpu->fpregs_active) { + if (fpu->initialized) { if (!copy_fpregs_to_fpstate(fpu)) { copy_kernel_to_fpregs(&fpu->state); } @@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init); int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) { - dst_fpu->fpregs_active = 0; dst_fpu->last_cpu = -1; - if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) + if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) return 0; WARN_ON_FPU(src_fpu != ¤t->thread.fpu); @@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) /* * Save current FPU registers directly into the child * FPU context, without any memory-to-memory copying. - * In lazy mode, if the FPU context isn't loaded into - * fpregs, CR0.TS will be set and do_device_not_available - * will load the FPU context. * - * We have to do all this with preemption disabled, - * mostly because of the FNSAVE case, because in that - * case we must not allow preemption in the window - * between the FNSAVE and us marking the context lazy. - * - * It shouldn't be an issue as even FNSAVE is plenty - * fast in terms of critical section length. + * ( The function 'fails' in the FNSAVE case, which destroys + * register contents so we have to copy them back. ) */ - preempt_disable(); if (!copy_fpregs_to_fpstate(dst_fpu)) { - memcpy(&src_fpu->state, &dst_fpu->state, - fpu_kernel_xstate_size); - + memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size); copy_kernel_to_fpregs(&src_fpu->state); } - preempt_enable(); trace_x86_fpu_copy_src(src_fpu); trace_x86_fpu_copy_dst(dst_fpu); @@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) * Activate the current task's in-memory FPU context, * if it has not been used before: */ -void fpu__activate_curr(struct fpu *fpu) +void fpu__initialize(struct fpu *fpu) { WARN_ON_FPU(fpu != ¤t->thread.fpu); - if (!fpu->fpstate_active) { + if (!fpu->initialized) { fpstate_init(&fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); /* Safe to do for the current task: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } -EXPORT_SYMBOL_GPL(fpu__activate_curr); +EXPORT_SYMBOL_GPL(fpu__initialize); /* * This function must be called before we read a task's fpstate. * - * If the task has not used the FPU before then initialize its - * fpstate. + * There's two cases where this gets called: + * + * - for the current task (when coredumping), in which case we have + * to save the latest FPU registers into the fpstate, + * + * - or it's called for stopped tasks (ptrace), in which case the + * registers were already saved by the context-switch code when + * the task scheduled out - we only have to initialize the registers + * if they've never been initialized. * * If the task has used the FPU before then save it. */ -void fpu__activate_fpstate_read(struct fpu *fpu) +void fpu__prepare_read(struct fpu *fpu) { - /* - * If fpregs are active (in the current CPU), then - * copy them to the fpstate: - */ - if (fpu->fpregs_active) { + if (fpu == ¤t->thread.fpu) { fpu__save(fpu); } else { - if (!fpu->fpstate_active) { + if (!fpu->initialized) { fpstate_init(&fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); /* Safe to do for current and for stopped child tasks: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } } @@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu) /* * This function must be called before we write a task's fpstate. * - * If the task has used the FPU before then unlazy it. + * If the task has used the FPU before then invalidate any cached FPU registers. * If the task has not used the FPU before then initialize its fpstate. * * After this function call, after registers in the fpstate are * modified and the child task has woken up, the child task will * restore the modified FPU state from the modified context. If we - * didn't clear its lazy status here then the lazy in-registers + * didn't clear its cached status here then the cached in-registers * state pending on its former CPU could be restored, corrupting * the modifications. */ -void fpu__activate_fpstate_write(struct fpu *fpu) +void fpu__prepare_write(struct fpu *fpu) { /* * Only stopped child tasks can be used to modify the FPU @@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu) */ WARN_ON_FPU(fpu == ¤t->thread.fpu); - if (fpu->fpstate_active) { - /* Invalidate any lazy state: */ + if (fpu->initialized) { + /* Invalidate any cached state: */ __fpu_invalidate_fpregs_state(fpu); } else { fpstate_init(&fpu->state); @@ -310,73 +300,10 @@ void fpu__activate_fpstate_write(struct fpu *fpu) trace_x86_fpu_activate_state(fpu); /* Safe to do for stopped child tasks: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } -/* - * This function must be called before we write the current - * task's fpstate. - * - * This call gets the current FPU register state and moves - * it in to the 'fpstate'. Preemption is disabled so that - * no writes to the 'fpstate' can occur from context - * swiches. - * - * Must be followed by a fpu__current_fpstate_write_end(). - */ -void fpu__current_fpstate_write_begin(void) -{ - struct fpu *fpu = ¤t->thread.fpu; - - /* - * Ensure that the context-switching code does not write - * over the fpstate while we are doing our update. - */ - preempt_disable(); - - /* - * Move the fpregs in to the fpu's 'fpstate'. - */ - fpu__activate_fpstate_read(fpu); - - /* - * The caller is about to write to 'fpu'. Ensure that no - * CPU thinks that its fpregs match the fpstate. This - * ensures we will not be lazy and skip a XRSTOR in the - * future. - */ - __fpu_invalidate_fpregs_state(fpu); -} - -/* - * This function must be paired with fpu__current_fpstate_write_begin() - * - * This will ensure that the modified fpstate gets placed back in - * the fpregs if necessary. - * - * Note: This function may be called whether or not an _actual_ - * write to the fpstate occurred. - */ -void fpu__current_fpstate_write_end(void) -{ - struct fpu *fpu = ¤t->thread.fpu; - - /* - * 'fpu' now has an updated copy of the state, but the - * registers may still be out of date. Update them with - * an XRSTOR if they are active. - */ - if (fpregs_active()) - copy_kernel_to_fpregs(&fpu->state); - - /* - * Our update is done and the fpregs/fpstate are in sync - * if necessary. Context switches can happen again. - */ - preempt_enable(); -} - /* * 'fpu__restore()' is called to copy FPU registers from * the FPU fpstate to the live hw registers and to activate @@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void) */ void fpu__restore(struct fpu *fpu) { - fpu__activate_curr(fpu); + fpu__initialize(fpu); /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); @@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu) { preempt_disable(); - if (fpu->fpregs_active) { - /* Ignore delayed exceptions from user space */ - asm volatile("1: fwait\n" - "2:\n" - _ASM_EXTABLE(1b, 2b)); - fpregs_deactivate(fpu); + if (fpu == ¤t->thread.fpu) { + if (fpu->initialized) { + /* Ignore delayed exceptions from user space */ + asm volatile("1: fwait\n" + "2:\n" + _ASM_EXTABLE(1b, 2b)); + fpregs_deactivate(fpu); + } } - fpu->fpstate_active = 0; + fpu->initialized = 0; trace_x86_fpu_dropped(fpu); @@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu) * Make sure fpstate is cleared and initialized. */ if (static_cpu_has(X86_FEATURE_FPU)) { - fpu__activate_curr(fpu); + preempt_disable(); + fpu__initialize(fpu); user_fpu_begin(); copy_init_fpstate_to_fpregs(); + preempt_enable(); } } diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index d5d44c452624..7affb7e3d9a5 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void) WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; - WARN_ON_FPU(current->thread.fpu.fpstate_active); + WARN_ON_FPU(current->thread.fpu.initialized); } /* diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index b188b16841e3..3ea151372389 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r { struct fpu *target_fpu = &target->thread.fpu; - return target_fpu->fpstate_active ? regset->n : 0; + return target_fpu->initialized ? regset->n : 0; } int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) { struct fpu *target_fpu = &target->thread.fpu; - if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) + if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized) return regset->n; else return 0; @@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return -ENODEV; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); fpstate_sanitize_xstate(fpu); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, @@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return -ENODEV; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); fpstate_sanitize_xstate(fpu); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, @@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, xsave = &fpu->state.xsave; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); if (using_compacted_format()) { - ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave); + if (kbuf) + ret = copy_xstate_to_kernel(kbuf, xsave, pos, count); + else + ret = copy_xstate_to_user(ubuf, xsave, pos, count); } else { fpstate_sanitize_xstate(fpu); /* @@ -129,12 +132,23 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, xsave = &fpu->state.xsave; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - ret = copyin_to_xsaves(kbuf, ubuf, xsave); - else + if (using_compacted_format()) { + if (kbuf) + ret = copy_kernel_to_xstate(xsave, kbuf); + else + ret = copy_user_to_xstate(xsave, ubuf); + } else { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); + if (!ret) + ret = validate_xstate_header(&xsave->header); + } + + /* + * mxcsr reserved bits must be masked to zero for security reasons. + */ + xsave->i387.mxcsr &= mxcsr_feature_mask; /* * In case of failure, mark all states as init: @@ -142,16 +156,6 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, if (ret) fpstate_init(&fpu->state); - /* - * mxcsr reserved bits must be masked to zero for security reasons. - */ - xsave->i387.mxcsr &= mxcsr_feature_mask; - xsave->header.xfeatures &= xfeatures_mask; - /* - * These bits must be zero. - */ - memset(&xsave->header.reserved, 0, 48); - return ret; } @@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); if (!boot_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); @@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, struct user_i387_ia32_struct env; int ret; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); fpstate_sanitize_xstate(fpu); if (!boot_cpu_has(X86_FEATURE_FPU)) @@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) struct fpu *fpu = &tsk->thread.fpu; int fpvalid; - fpvalid = fpu->fpstate_active; + fpvalid = fpu->initialized; if (fpvalid) fpvalid = !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct), diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 83c23c230b4c..fb639e70048f 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) */ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) { - struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; + struct fpu *fpu = ¤t->thread.fpu; + struct xregs_state *xsave = &fpu->state.xsave; struct task_struct *tsk = current; int ia32_fxstate = (buf != buf_fx); @@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) sizeof(struct user_i387_ia32_struct), NULL, (struct _fpstate_32 __user *) buf) ? -1 : 1; - if (fpregs_active() || using_compacted_format()) { + if (fpu->initialized || using_compacted_format()) { /* Save the live register state to the user directly. */ if (copy_fpregs_to_sigframe(buf_fx)) return -1; /* Update the thread's fxstate to save the fsave header. */ if (ia32_fxstate) - copy_fxregs_to_kernel(&tsk->thread.fpu); + copy_fxregs_to_kernel(fpu); } else { /* * It is a *bug* if kernel uses compacted-format for xsave @@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) return -1; } - fpstate_sanitize_xstate(&tsk->thread.fpu); + fpstate_sanitize_xstate(fpu); if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) return -1; } @@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk, struct xstate_header *header = &xsave->header; if (use_xsave()) { - /* These bits must be zero. */ - memset(header->reserved, 0, 48); + /* + * Note: we don't need to zero the reserved bits in the + * xstate_header here because we either didn't copy them at all, + * or we checked earlier that they aren't set. + */ /* * Init the state that is not present in the memory @@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk, if (fx_only) header->xfeatures = XFEATURE_MASK_FPSSE; else - header->xfeatures &= (xfeatures_mask & xfeatures); + header->xfeatures &= xfeatures; } if (use_fxsr()) { @@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) if (!access_ok(VERIFY_READ, buf, size)) return -EACCES; - fpu__activate_curr(fpu); + fpu__initialize(fpu); if (!static_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_set(current, NULL, @@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) /* * For 32-bit frames with fxstate, copy the user state to the * thread's fpu state, reconstruct fxstate from the fsave - * header. Sanitize the copied state etc. + * header. Validate and sanitize the copied state. */ struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; /* - * Drop the current fpu which clears fpu->fpstate_active. This ensures + * Drop the current fpu which clears fpu->initialized. This ensures * that any context-switch during the copy of the new state, * avoids the intermediate state from getting restored/saved. * Thus avoiding the new restored state from getting corrupted. * We will be ready to restore/save the state only after - * fpu->fpstate_active is again set. + * fpu->initialized is again set. */ fpu__drop(fpu); if (using_compacted_format()) { - err = copyin_to_xsaves(NULL, buf_fx, - &fpu->state.xsave); + err = copy_user_to_xstate(&fpu->state.xsave, buf_fx); } else { - err = __copy_from_user(&fpu->state.xsave, - buf_fx, state_size); + err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size); + + if (!err && state_size > offsetof(struct xregs_state, header)) + err = validate_xstate_header(&fpu->state.xsave.header); } if (err || __copy_from_user(&env, buf, sizeof(env))) { @@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } - fpu->fpstate_active = 1; + fpu->initialized = 1; preempt_disable(); fpu__restore(fpu); preempt_enable(); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c24ac1efb12d..f1d5476c9022 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -483,6 +483,30 @@ int using_compacted_format(void) return boot_cpu_has(X86_FEATURE_XSAVES); } +/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ +int validate_xstate_header(const struct xstate_header *hdr) +{ + /* No unknown or supervisor features may be set */ + if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR)) + return -EINVAL; + + /* Userspace must use the uncompacted format */ + if (hdr->xcomp_bv) + return -EINVAL; + + /* + * If 'reserved' is shrunken to add a new field, make sure to validate + * that new field here! + */ + BUILD_BUG_ON(sizeof(hdr->reserved) != 48); + + /* No reserved bits may be set */ + if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) + return -EINVAL; + + return 0; +} + static void __xstate_dump_leaves(void) { int i; @@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state) { struct fpu *fpu = ¤t->thread.fpu; - if (!fpu->fpstate_active) + if (!fpu->initialized) return NULL; /* * fpu__save() takes the CPU's xstate registers @@ -920,48 +944,55 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, } #endif /* ! CONFIG_ARCH_HAS_PKEYS */ +/* + * Weird legacy quirk: SSE and YMM states store information in the + * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP + * area is marked as unused in the xfeatures header, we need to copy + * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use. + */ +static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) +{ + if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM))) + return false; + + if (xfeatures & XFEATURE_MASK_FP) + return false; + + return true; +} + /* * This is similar to user_regset_copyout(), but will not add offset to * the source data pointer or increment pos, count, kbuf, and ubuf. */ -static inline int xstate_copyout(unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf, - const void *data, const int start_pos, - const int end_pos) +static inline void +__copy_xstate_to_kernel(void *kbuf, const void *data, + unsigned int offset, unsigned int size, unsigned int size_total) { - if ((count == 0) || (pos < start_pos)) - return 0; + if (offset < size_total) { + unsigned int copy = min(size, size_total - offset); - if (end_pos < 0 || pos < end_pos) { - unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); - - if (kbuf) { - memcpy(kbuf + pos, data, copy); - } else { - if (__copy_to_user(ubuf + pos, data, copy)) - return -EFAULT; - } + memcpy(kbuf + offset, data, copy); } - return 0; } /* * Convert from kernel XSAVES compacted format to standard format and copy - * to a ptrace buffer. It supports partial copy but pos always starts from - * zero. This is called from xstateregs_get() and there we check the CPU - * has XSAVES. + * to a kernel-space ptrace buffer. + * + * It supports partial copy but pos always starts from zero. This is called + * from xstateregs_get() and there we check the CPU has XSAVES. */ -int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, - void __user *ubuf, struct xregs_state *xsave) +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) { unsigned int offset, size; - int ret, i; struct xstate_header header; + int i; /* * Currently copy_regset_to_user() starts from pos 0: */ - if (unlikely(pos != 0)) + if (unlikely(offset_start != 0)) return -EFAULT; /* @@ -977,8 +1008,91 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, offset = offsetof(struct xregs_state, header); size = sizeof(header); - ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count); + __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); + for (i = 0; i < XFEATURE_MAX; i++) { + /* + * Copy only in-use xstates: + */ + if ((header.xfeatures >> i) & 1) { + void *src = __raw_xsave_addr(xsave, 1 << i); + + offset = xstate_offsets[i]; + size = xstate_sizes[i]; + + /* The next component has to fit fully into the output buffer: */ + if (offset + size > size_total) + break; + + __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); + } + + } + + if (xfeatures_mxcsr_quirk(header.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); + } + + /* + * Fill xsave->i387.sw_reserved value for ptrace frame: + */ + offset = offsetof(struct fxregs_state, sw_reserved); + size = sizeof(xstate_fx_sw_bytes); + + __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); + + return 0; +} + +static inline int +__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total) +{ + if (!size) + return 0; + + if (offset < size_total) { + unsigned int copy = min(size, size_total - offset); + + if (__copy_to_user(ubuf + offset, data, copy)) + return -EFAULT; + } + return 0; +} + +/* + * Convert from kernel XSAVES compacted format to standard format and copy + * to a user-space buffer. It supports partial copy but pos always starts from + * zero. This is called from xstateregs_get() and there we check the CPU + * has XSAVES. + */ +int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) +{ + unsigned int offset, size; + int ret, i; + struct xstate_header header; + + /* + * Currently copy_regset_to_user() starts from pos 0: + */ + if (unlikely(offset_start != 0)) + return -EFAULT; + + /* + * The destination is a ptrace buffer; we put in only user xstates: + */ + memset(&header, 0, sizeof(header)); + header.xfeatures = xsave->header.xfeatures; + header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; + + /* + * Copy xregs_state->header: + */ + offset = offsetof(struct xregs_state, header); + size = sizeof(header); + + ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total); if (ret) return ret; @@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, offset = xstate_offsets[i]; size = xstate_sizes[i]; - ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count); + /* The next component has to fit fully into the output buffer: */ + if (offset + size > size_total) + break; + ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total); if (ret) return ret; - - if (offset + size >= count) - break; } } + if (xfeatures_mxcsr_quirk(header.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total); + } + /* * Fill xsave->i387.sw_reserved value for ptrace frame: */ offset = offsetof(struct fxregs_state, sw_reserved); size = sizeof(xstate_fx_sw_bytes); - ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); - + ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total); if (ret) return ret; @@ -1018,55 +1137,42 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, } /* - * Convert from a ptrace standard-format buffer to kernel XSAVES format - * and copy to the target thread. This is called from xstateregs_set() and - * there we check the CPU has XSAVES and a whole standard-sized buffer - * exists. + * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format + * and copy to the target thread. This is called from xstateregs_set(). */ -int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, - struct xregs_state *xsave) +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) { unsigned int offset, size; int i; - u64 xfeatures; - u64 allowed_features; + struct xstate_header hdr; offset = offsetof(struct xregs_state, header); - size = sizeof(xfeatures); + size = sizeof(hdr); - if (kbuf) { - memcpy(&xfeatures, kbuf + offset, size); - } else { - if (__copy_from_user(&xfeatures, ubuf + offset, size)) - return -EFAULT; - } + memcpy(&hdr, kbuf + offset, size); - /* - * Reject if the user sets any disabled or supervisor features: - */ - allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR; - - if (xfeatures & ~allowed_features) + if (validate_xstate_header(&hdr)) return -EINVAL; for (i = 0; i < XFEATURE_MAX; i++) { u64 mask = ((u64)1 << i); - if (xfeatures & mask) { + if (hdr.xfeatures & mask) { void *dst = __raw_xsave_addr(xsave, 1 << i); offset = xstate_offsets[i]; size = xstate_sizes[i]; - if (kbuf) { - memcpy(dst, kbuf + offset, size); - } else { - if (__copy_from_user(dst, ubuf + offset, size)) - return -EFAULT; - } + memcpy(dst, kbuf + offset, size); } } + if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + memcpy(&xsave->i387.mxcsr, kbuf + offset, size); + } + /* * The state that came in from userspace was user-state only. * Mask all the user states out of 'xfeatures': @@ -1076,7 +1182,63 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, /* * Add back in the features that came in from userspace: */ - xsave->header.xfeatures |= xfeatures; + xsave->header.xfeatures |= hdr.xfeatures; + + return 0; +} + +/* + * Convert from a ptrace or sigreturn standard-format user-space buffer to + * kernel XSAVES format and copy to the target thread. This is called from + * xstateregs_set(), as well as potentially from the sigreturn() and + * rt_sigreturn() system calls. + */ +int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf) +{ + unsigned int offset, size; + int i; + struct xstate_header hdr; + + offset = offsetof(struct xregs_state, header); + size = sizeof(hdr); + + if (__copy_from_user(&hdr, ubuf + offset, size)) + return -EFAULT; + + if (validate_xstate_header(&hdr)) + return -EINVAL; + + for (i = 0; i < XFEATURE_MAX; i++) { + u64 mask = ((u64)1 << i); + + if (hdr.xfeatures & mask) { + void *dst = __raw_xsave_addr(xsave, 1 << i); + + offset = xstate_offsets[i]; + size = xstate_sizes[i]; + + if (__copy_from_user(dst, ubuf + offset, size)) + return -EFAULT; + } + } + + if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size)) + return -EFAULT; + } + + /* + * The state that came in from userspace was user-state only. + * Mask all the user states out of 'xfeatures': + */ + xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR; + + /* + * Add back in the features that came in from userspace: + */ + xsave->header.xfeatures |= hdr.xfeatures; return 0; } diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index 50c89e8a95f2..7ebcc4a74438 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id) * Quirk table for systems that misbehave (lock up, etc.) if port * 0x80 is used: */ -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { +static const struct dmi_system_id io_delay_0xed_port_dmi_table[] __initconst = { { .callback = dmi_io_delay_0xed_port, .ident = "Compaq Presario V6000", diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1f38d9a4d9de..d4eb450144fd 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack) static inline void *current_stack(void) { - return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); + return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); } static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) @@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) /* Save the next esp at the bottom of the stack */ prev_esp = (u32 *)irqstk; - *prev_esp = current_stack_pointer(); + *prev_esp = current_stack_pointer; if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); @@ -139,7 +139,7 @@ void do_softirq_own_stack(void) /* Push the previous esp onto the stack */ prev_esp = (u32 *)irqstk; - *prev_esp = current_stack_pointer(); + *prev_esp = current_stack_pointer; call_on_stack(__do_softirq, isp); } diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 4b0592ca9e47..8c1cc08f514f 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent) return 0; out_clean_nodes: - for (j = i - 1; j > 0; j--) + for (j = i - 1; j >= 0; j--) cleanup_setup_data_node(*(kobjp + j)); kfree(kobjp); out_setup_data_kobj: diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 874827b0d7ca..8bb9594d0761 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, return NULL; } -void kvm_async_pf_task_wait(u32 token) +/* + * @interrupt_kernel: Is this called from a routine which interrupts the kernel + * (other than user space)? + */ +void kvm_async_pf_task_wait(u32 token, int interrupt_kernel) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; @@ -140,7 +144,10 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.halted = is_idle_task(current) || preempt_count() > 1; + n.halted = is_idle_task(current) || + (IS_ENABLED(CONFIG_PREEMPT_COUNT) + ? preempt_count() > 1 || rcu_preempt_depth() + : interrupt_kernel); init_swait_queue_head(&n.wq); hlist_add_head(&n.link, &b->list); raw_spin_unlock(&b->lock); @@ -180,7 +187,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n) hlist_del_init(&n->link); if (n->halted) smp_send_reschedule(n->cpu); - else if (swait_active(&n->wq)) + else if (swq_has_sleeper(&n->wq)) swake_up(&n->wq); } @@ -268,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) case KVM_PV_REASON_PAGE_NOT_PRESENT: /* page is swapped out by the host. */ prev_state = exception_enter(); - kvm_async_pf_task_wait((u32)read_cr2()); + kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs)); exception_exit(prev_state); break; case KVM_PV_REASON_PAGE_READY: diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index a14df9eecfed..19a3e8f961c7 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -327,7 +327,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .set_debugreg = native_set_debugreg, .read_cr0 = native_read_cr0, .write_cr0 = native_write_cr0, - .read_cr4 = native_read_cr4, .write_cr4 = native_write_cr4, #ifdef CONFIG_X86_64 .read_cr8 = native_read_cr8, @@ -343,7 +342,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .set_ldt = native_set_ldt, .load_gdt = native_load_gdt, .load_idt = native_load_idt, - .store_idt = native_store_idt, .store_tr = native_store_tr, .load_tls = native_load_tls, #ifdef CONFIG_X86_64 @@ -411,8 +409,6 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, - .set_pmd_at = native_set_pmd_at, - .pte_update = paravirt_nop, .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, @@ -424,7 +420,6 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .pmd_clear = native_pmd_clear, #endif .set_pud = native_set_pud, - .set_pud_at = native_set_pud_at, .pmd_val = PTE_IDENT, .make_pmd = PTE_IDENT, diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 0bee04d41bed..eaa591cfd98b 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -1,6 +1,7 @@ /* * This file contains work-arounds for x86 and x86_64 platform bugs. */ +#include #include #include @@ -656,3 +657,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); #endif #endif + +bool x86_apple_machine; +EXPORT_SYMBOL(x86_apple_machine); + +void __init early_platform_quirks(void) +{ + x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") || + dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."); +} diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 54984b142641..54180fa6f66f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -150,7 +150,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d) /* * This is a single dmi_table handling all reboot quirks. */ -static struct dmi_system_id __initdata reboot_dmi_table[] = { +static const struct dmi_system_id reboot_dmi_table[] __initconst = { /* Acer */ { /* Handle reboot issue on Acer Aspire one */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9cc16a841745..0957dd73d127 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1178,8 +1178,11 @@ void __init setup_arch(char **cmdline_p) * with the current CR4 value. This may not be necessary, but * auditing all the early-boot CR4 manipulation would be needed to * rule it out. + * + * Mask off features that don't work outside long mode (just + * PCIDE for now). */ - mmu_cr4_features = __read_cr4(); + mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE; memblock_set_current_limit(get_max_mapped()); @@ -1216,6 +1219,8 @@ void __init setup_arch(char **cmdline_p) io_delay_init(); + early_platform_quirks(); + /* * Parse the ACPI tables for possible boot-time SMP configuration. */ diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 6e8fcb6f7e1e..28dafed6c682 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -168,7 +168,7 @@ void __init setup_per_cpu_areas(void) unsigned long delta; int rc; - pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); /* diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e04442345fc0..4e188fda5961 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, sp = (unsigned long) ka->sa.sa_restorer; } - if (fpu->fpstate_active) { + if (fpu->initialized) { sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), &buf_fx, &math_size); *fpstate = (void __user *)sp; @@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, return (void __user *)-1L; /* save i387 and extended state */ - if (fpu->fpstate_active && + if (fpu->initialized && copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) return (void __user *)-1L; @@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) /* * Ensure the signal handler starts with the new fpu state. */ - if (fpu->fpstate_active) + if (fpu->initialized) fpu__clear(fpu); } signal_setup_done(failed, ksig, stepping); diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index 71beb28600d4..ab9feb5887b1 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -129,7 +129,7 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, 3 ints plus the relevant union member. */ put_user_ex(from->si_signo, &to->si_signo); put_user_ex(from->si_errno, &to->si_errno); - put_user_ex((short)from->si_code, &to->si_code); + put_user_ex(from->si_code, &to->si_code); if (from->si_code < 0) { put_user_ex(from->si_pid, &to->si_pid); @@ -142,8 +142,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, */ put_user_ex(from->_sifields._pad[0], &to->_sifields._pad[0]); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: + switch (siginfo_layout(from->si_signo, from->si_code)) { + case SIL_FAULT: if (from->si_signo == SIGBUS && (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) @@ -160,11 +160,11 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, put_user_ex(from->si_pkey, &to->si_pkey); } break; - case __SI_SYS >> 16: + case SIL_SYS: put_user_ex(from->si_syscall, &to->si_syscall); put_user_ex(from->si_arch, &to->si_arch); break; - case __SI_CHLD >> 16: + case SIL_CHLD: if (!x32_ABI) { put_user_ex(from->si_utime, &to->si_utime); put_user_ex(from->si_stime, &to->si_stime); @@ -174,21 +174,18 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, } put_user_ex(from->si_status, &to->si_status); /* FALL THROUGH */ - default: - case __SI_KILL >> 16: + case SIL_KILL: put_user_ex(from->si_uid, &to->si_uid); break; - case __SI_POLL >> 16: + case SIL_POLL: put_user_ex(from->si_fd, &to->si_fd); break; - case __SI_TIMER >> 16: + case SIL_TIMER: put_user_ex(from->si_overrun, &to->si_overrun); put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); break; - /* This is not generated by the kernel as of now. */ - case __SI_RT >> 16: - case __SI_MESGQ >> 16: + case SIL_RT: put_user_ex(from->si_uid, &to->si_uid); put_user_ex(from->si_int, &to->si_int); break; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 54b9e89d4d6b..ad59edd84de7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -226,16 +226,12 @@ static int enable_start_cpu0; static void notrace start_secondary(void *unused) { /* - * Don't put *anything* before cpu_init(), SMP booting is too - * fragile that we want to limit the things done here to the - * most necessary things. + * Don't put *anything* except direct CPU state initialization + * before cpu_init(), SMP booting is too fragile that we want to + * limit the things done here to the most necessary things. */ - cpu_init(); - x86_cpuinit.early_percpu_clock_init(); - preempt_disable(); - smp_callin(); - - enable_start_cpu0 = 0; + if (boot_cpu_has(X86_FEATURE_PCID)) + __write_cr4(__read_cr4() | X86_CR4_PCIDE); #ifdef CONFIG_X86_32 /* switch away from the initial page table */ @@ -243,6 +239,13 @@ static void notrace start_secondary(void *unused) __flush_tlb_all(); #endif + cpu_init(); + x86_cpuinit.early_percpu_clock_init(); + preempt_disable(); + smp_callin(); + + enable_start_cpu0 = 0; + /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* @@ -1461,7 +1464,7 @@ __init void prefill_possible_map(void) /* nr_cpu_ids could be reduced via nr_cpus= */ if (possible > nr_cpu_ids) { - pr_warn("%d Processors exceeds NR_CPUS limit of %d\n", + pr_warn("%d Processors exceeds NR_CPUS limit of %u\n", possible, nr_cpu_ids); possible = nr_cpu_ids; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 34ea3651362e..67db4f43309e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) * from double_fault. */ BUG_ON((unsigned long)(current_top_of_stack() - - current_stack_pointer()) >= THREAD_SIZE); + current_stack_pointer) >= THREAD_SIZE); preempt_enable_no_resched(); } diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 3ea624452f93..3c48bc8bf08c 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -23,6 +23,7 @@ config KVM depends on HIGH_RES_TIMERS # for TASKSTATS/TASK_DELAY_ACCT: depends on NET && MULTIUSER + depends on X86_LOCAL_APIC select PREEMPT_NOTIFIERS select MMU_NOTIFIER select ANON_INODES diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 19adbb418443..0099e10eb045 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -126,16 +126,20 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); /* - * The existing code assumes virtual address is 48-bit in the canonical - * address checks; exit if it is ever changed. + * The existing code assumes virtual address is 48-bit or 57-bit in the + * canonical address checks; exit if it is ever changed. */ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); - if (best && ((best->eax & 0xff00) >> 8) != 48 && - ((best->eax & 0xff00) >> 8) != 0) - return -EINVAL; + if (best) { + int vaddr_bits = (best->eax & 0xff00) >> 8; + + if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) + return -EINVAL; + } /* Update physical-address width */ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); + kvm_mmu_reset_context(vcpu); kvm_pmu_refresh(vcpu); return 0; @@ -383,7 +387,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.ecx*/ const u32 kvm_cpuid_7_0_ecx_x86_features = - F(AVX512VBMI) | F(PKU) | 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ); + F(AVX512VBMI) | F(LA57) | F(PKU) | + 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ); /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = @@ -853,16 +858,24 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); } -void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) +bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx, bool check_limit) { u32 function = *eax, index = *ecx; struct kvm_cpuid_entry2 *best; + bool entry_found = true; best = kvm_find_cpuid_entry(vcpu, function, index); - if (!best) - best = check_cpuid_limit(vcpu, function, index); + if (!best) { + entry_found = false; + if (!check_limit) + goto out; + best = check_cpuid_limit(vcpu, function, index); + } + +out: if (best) { *eax = best->eax; *ebx = best->ebx; @@ -870,7 +883,8 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) *edx = best->edx; } else *eax = *ebx = *ecx = *edx = 0; - trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx); + trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found); + return entry_found; } EXPORT_SYMBOL_GPL(kvm_cpuid); @@ -883,7 +897,7 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) eax = kvm_register_read(vcpu, VCPU_REGS_RAX); ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); - kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); + kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true); kvm_register_write(vcpu, VCPU_REGS_RAX, eax); kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index da6728383052..0bc5c1315708 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -3,6 +3,7 @@ #include "x86.h" #include +#include int kvm_update_cpuid(struct kvm_vcpu *vcpu); bool kvm_mpx_supported(void); @@ -20,7 +21,8 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); -void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); +bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx, bool check_limit); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); @@ -29,95 +31,86 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) return vcpu->arch.maxphyaddr; } -static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; +struct cpuid_reg { + u32 function; + u32 index; + int reg; +}; - if (!static_cpu_has(X86_FEATURE_XSAVE)) +static const struct cpuid_reg reverse_cpuid[] = { + [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, + [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, + [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, + [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, + [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, + [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, + [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, + [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, + [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, + [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX}, + [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, + [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, + [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, + [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, + [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, +}; + +static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) +{ + unsigned x86_leaf = x86_feature / 32; + + BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); + BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); + + return reverse_cpuid[x86_leaf]; +} + +static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) +{ + struct kvm_cpuid_entry2 *entry; + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); + + entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); + if (!entry) + return NULL; + + switch (cpuid.reg) { + case CPUID_EAX: + return &entry->eax; + case CPUID_EBX: + return &entry->ebx; + case CPUID_ECX: + return &entry->ecx; + case CPUID_EDX: + return &entry->edx; + default: + BUILD_BUG(); + return NULL; + } +} + +static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature) +{ + int *reg; + + if (x86_feature == X86_FEATURE_XSAVE && + !static_cpu_has(X86_FEATURE_XSAVE)) return false; - best = kvm_find_cpuid_entry(vcpu, 1, 0); - return best && (best->ecx & bit(X86_FEATURE_XSAVE)); + reg = guest_cpuid_get_register(vcpu, x86_feature); + if (!reg) + return false; + + return *reg & bit(x86_feature); } -static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu) +static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature) { - struct kvm_cpuid_entry2 *best; + int *reg; - best = kvm_find_cpuid_entry(vcpu, 1, 0); - return best && (best->edx & bit(X86_FEATURE_MTRR)); -} - -static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); -} - -static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_SMEP)); -} - -static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_SMAP)); -} - -static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); -} - -static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ecx & bit(X86_FEATURE_PKU)); -} - -static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - return best && (best->edx & bit(X86_FEATURE_LM)); -} - -static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - return best && (best->ecx & bit(X86_FEATURE_OSVW)); -} - -static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 1, 0); - return best && (best->ecx & bit(X86_FEATURE_PCID)); -} - -static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 1, 0); - return best && (best->ecx & bit(X86_FEATURE_X2APIC)); + reg = guest_cpuid_get_register(vcpu, x86_feature); + if (reg) + *reg &= ~bit(x86_feature); } static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) @@ -128,58 +121,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; } -static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - return best && (best->edx & bit(X86_FEATURE_GBPAGES)); -} - -static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_RTM)); -} - -static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_MPX)); -} - -static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - return best && (best->edx & bit(X86_FEATURE_RDTSCP)); -} - -/* - * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 - */ -#define BIT_NRIPS 3 - -static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0); - - /* - * NRIPS is a scattered cpuid feature, so we can't use - * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit - * position 8, not 3). - */ - return best && (best->edx & bit(BIT_NRIPS)); -} -#undef BIT_NRIPS - static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index fb0055953fbc..d90cdc77e077 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -28,6 +28,7 @@ #include "x86.h" #include "tss.h" +#include "mmu.h" /* * Operand types @@ -424,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #op " %al \n\t" \ FOP_RET -asm(".global kvm_fastop_exception \n" - "kvm_fastop_exception: xor %esi, %esi; ret"); +asm(".pushsection .fixup, \"ax\"\n" + ".global kvm_fastop_exception \n" + "kvm_fastop_exception: xor %esi, %esi; ret\n" + ".popsection"); FOP_START(setcc) FOP_SETCC(seto) @@ -688,16 +691,18 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, ulong la; u32 lim; u16 sel; + u8 va_bits; la = seg_base(ctxt, addr.seg) + addr.ea; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: *linear = la; - if (is_noncanonical_address(la)) + va_bits = ctxt_virt_addr_bits(ctxt); + if (get_canonical(la, va_bits) != la) goto bad; - *max_size = min_t(u64, ~0u, (1ull << 48) - la); + *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); if (size > *max_size) goto bad; break; @@ -1748,8 +1753,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; - if (is_noncanonical_address(get_desc_base(&seg_desc) | - ((u64)base3 << 32))) + if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | + ((u64)base3 << 32), ctxt)) return emulate_gp(ctxt, 0); } load: @@ -2333,7 +2338,7 @@ static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) eax = 0x80000001; ecx = 0; - ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); return edx & bit(X86_FEATURE_LM); } @@ -2636,7 +2641,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt) u32 eax, ebx, ecx, edx; eax = ecx = 0; - ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; @@ -2656,7 +2661,7 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) eax = 0x00000000; ecx = 0x00000000; - ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit @@ -2840,8 +2845,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; - if (is_noncanonical_address(rcx) || - is_noncanonical_address(rdx)) + if (emul_is_noncanonical_address(rcx, ctxt) || + emul_is_noncanonical_address(rdx, ctxt)) return emulate_gp(ctxt, 0); break; } @@ -3551,7 +3556,7 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) /* * Check MOVBE is set in the guest-visible CPUID leaf. */ - ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); @@ -3756,7 +3761,7 @@ static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode == X86EMUL_MODE_PROT64 && - is_noncanonical_address(desc_ptr.address)) + emul_is_noncanonical_address(desc_ptr.address, ctxt)) return emulate_gp(ctxt, 0); if (lgdt) ctxt->ops->set_gdt(ctxt, &desc_ptr); @@ -3865,7 +3870,7 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt) eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); - ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; @@ -3924,7 +3929,7 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt) { u32 eax = 1, ebx, ecx = 0, edx; - ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); if (!(edx & FFL(FXSR))) return emulate_ud(ctxt); @@ -4097,8 +4102,19 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - if (efer & EFER_LMA) - rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; + if (efer & EFER_LMA) { + u64 maxphyaddr; + u32 eax, ebx, ecx, edx; + + eax = 0x80000008; + ecx = 0; + if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, + &edx, false)) + maxphyaddr = eax & 0xff; + else + maxphyaddr = 36; + rsvd = rsvd_bits(maxphyaddr, 62); + } if (new_val & rsvd) return emulate_gp(ctxt, 0); @@ -5284,7 +5300,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { - register void *__sp asm(_ASM_SP); ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) @@ -5292,7 +5307,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), - [fastop]"+S"(fop), "+r"(__sp) + [fastop]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 337b6d2730fa..dc97f2544b6f 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1160,6 +1160,12 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), pdata); } + case HV_X64_MSR_TSC_FREQUENCY: + data = (u64)vcpu->arch.virtual_tsc_khz * 1000; + break; + case HV_X64_MSR_APIC_FREQUENCY: + data = APIC_BUS_FREQUENCY; + break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; @@ -1268,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) switch (code) { case HVCALL_NOTIFY_LONG_SPIN_WAIT: - kvm_vcpu_on_spin(vcpu); + kvm_vcpu_on_spin(vcpu, true); break; case HVCALL_POST_MESSAGE: case HVCALL_SIGNAL_EVENT: diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index e1e89ee4af75..9add410f195f 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -4,7 +4,7 @@ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_PGE) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, enum kvm_reg reg) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 589dcc117086..69c5612be786 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -54,8 +54,6 @@ #define PRIu64 "u" #define PRIo64 "o" -#define APIC_BUS_CYCLE_NS 1 - /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ #define apic_debug(fmt, arg...) @@ -1326,6 +1324,10 @@ static void apic_timer_expired(struct kvm_lapic *apic) atomic_inc(&apic->lapic_timer.pending); kvm_set_pending_timer(vcpu); + /* + * For x86, the atomic_inc() is serialized, thus + * using swait_active() is safe. + */ if (swait_active(q)) swake_up(q); diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 29caa2c3dff9..215721e1426a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -12,6 +12,9 @@ #define KVM_APIC_SHORT_MASK 0xc0000 #define KVM_APIC_DEST_MASK 0x800 +#define APIC_BUS_CYCLE_NS 1 +#define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS) + struct kvm_timer { struct hrtimer timer; s64 period; /* unit: ns */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 04d750813c9d..106d4a029a8a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2169,8 +2169,8 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, } struct mmu_page_path { - struct kvm_mmu_page *parent[PT64_ROOT_LEVEL]; - unsigned int idx[PT64_ROOT_LEVEL]; + struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; + unsigned int idx[PT64_ROOT_MAX_LEVEL]; }; #define for_each_sp(pvec, sp, parents, i) \ @@ -2385,8 +2385,8 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, iterator->shadow_addr = vcpu->arch.mmu.root_hpa; iterator->level = vcpu->arch.mmu.shadow_root_level; - if (iterator->level == PT64_ROOT_LEVEL && - vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && + if (iterator->level == PT64_ROOT_4LEVEL && + vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && !vcpu->arch.mmu.direct_map) --iterator->level; @@ -2610,9 +2610,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, sp = list_last_entry(&kvm->arch.active_mmu_pages, struct kvm_mmu_page, link); - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); - - return true; + return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); } /* @@ -3262,7 +3260,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); -static void make_mmu_pages_available(struct kvm_vcpu *vcpu); +static int make_mmu_pages_available(struct kvm_vcpu *vcpu); static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, gfn_t gfn, bool prefault) @@ -3302,7 +3300,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; - make_mmu_pages_available(vcpu); + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; if (likely(!force_pt_level)) transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); @@ -3326,8 +3325,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && - (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || + if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL && + (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL || vcpu->arch.mmu.direct_map)) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -3379,10 +3378,14 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) struct kvm_mmu_page *sp; unsigned i; - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) { spin_lock(&vcpu->kvm->mmu_lock); - make_mmu_pages_available(vcpu); - sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL); + if(make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return 1; + } + sp = kvm_mmu_get_page(vcpu, 0, 0, + vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = __pa(sp->spt); @@ -3392,7 +3395,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) MMU_WARN_ON(VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); - make_mmu_pages_available(vcpu); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return 1; + } sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); root = __pa(sp->spt); @@ -3423,15 +3429,18 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; MMU_WARN_ON(VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); - make_mmu_pages_available(vcpu); - sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, - 0, ACC_ALL); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return 1; + } + sp = kvm_mmu_get_page(vcpu, root_gfn, 0, + vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); @@ -3445,7 +3454,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * the shadow page table may be a PAE or a long mode page table. */ pm_mask = PT_PRESENT_MASK; - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; for (i = 0; i < 4; ++i) { @@ -3463,7 +3472,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) return 1; } spin_lock(&vcpu->kvm->mmu_lock); - make_mmu_pages_available(vcpu); + if (make_mmu_pages_available(vcpu) < 0) { + spin_unlock(&vcpu->kvm->mmu_lock); + return 1; + } sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 0, ACC_ALL); root = __pa(sp->spt); @@ -3478,7 +3490,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * If we shadow a 32 bit page table with a long mode page * table we enter this path. */ - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu.lm_root == NULL) { /* * The additional page necessary for this is only @@ -3523,7 +3535,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); mmu_sync_children(vcpu, sp); @@ -3588,6 +3600,13 @@ static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) { + /* + * A nested guest cannot use the MMIO cache if it is using nested + * page tables, because cr2 is a nGPA while the cache stores GPAs. + */ + if (mmu_is_nested(vcpu)) + return false; + if (direct) return vcpu_match_mmio_gpa(vcpu, addr); @@ -3599,7 +3618,7 @@ static bool walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) { struct kvm_shadow_walk_iterator iterator; - u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; + u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; int root, leaf; bool reserved = false; @@ -3640,7 +3659,23 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) return reserved; } -int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) +/* + * Return values of handle_mmio_page_fault: + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction + * directly. + * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page + * fault path update the mmio spte. + * RET_MMIO_PF_RETRY: let CPU fault again on the address. + * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). + */ +enum { + RET_MMIO_PF_EMULATE = 1, + RET_MMIO_PF_INVALID = 2, + RET_MMIO_PF_RETRY = 0, + RET_MMIO_PF_BUG = -1 +}; + +static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; bool reserved; @@ -3802,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, case KVM_PV_REASON_PAGE_NOT_PRESENT: vcpu->arch.apf.host_apf_reason = 0; local_irq_disable(); - kvm_async_pf_task_wait(fault_address); + kvm_async_pf_task_wait(fault_address, 0); local_irq_enable(); break; case KVM_PV_REASON_PAGE_READY: @@ -3872,7 +3907,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; - make_mmu_pages_available(vcpu); + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; if (likely(!force_pt_level)) transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); @@ -4025,7 +4061,13 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; - case PT64_ROOT_LEVEL: + case PT64_ROOT_5LEVEL: + rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][4] = + rsvd_check->rsvd_bits_mask[0][4]; + case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); @@ -4055,7 +4097,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, { __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, cpuid_maxphyaddr(vcpu), context->root_level, - context->nx, guest_cpuid_has_gbpages(vcpu), + context->nx, + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), guest_cpuid_is_amd(vcpu)); } @@ -4065,6 +4108,8 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, { u64 bad_mt_xwr; + rsvd_check->rsvd_bits_mask[0][4] = + rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][3] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][2] = @@ -4074,6 +4119,7 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); /* large page */ + rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; rsvd_check->rsvd_bits_mask[1][2] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); @@ -4120,8 +4166,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, boot_cpu_data.x86_phys_bits, context->shadow_root_level, uses_nx, - guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), - true); + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), + is_pse(vcpu), true); if (!shadow_me_mask) return; @@ -4185,66 +4231,85 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, boot_cpu_data.x86_phys_bits, execonly); } +#define BYTE_MASK(access) \ + ((1 & (access) ? 2 : 0) | \ + (2 & (access) ? 4 : 0) | \ + (3 & (access) ? 8 : 0) | \ + (4 & (access) ? 16 : 0) | \ + (5 & (access) ? 32 : 0) | \ + (6 & (access) ? 64 : 0) | \ + (7 & (access) ? 128 : 0)) + + static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, bool ept) { - unsigned bit, byte, pfec; - u8 map; - bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0; + unsigned byte; + + const u8 x = BYTE_MASK(ACC_EXEC_MASK); + const u8 w = BYTE_MASK(ACC_WRITE_MASK); + const u8 u = BYTE_MASK(ACC_USER_MASK); + + bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; + bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; + bool cr0_wp = is_write_protection(vcpu); - cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); - cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { - pfec = byte << 1; - map = 0; - wf = pfec & PFERR_WRITE_MASK; - uf = pfec & PFERR_USER_MASK; - ff = pfec & PFERR_FETCH_MASK; + unsigned pfec = byte << 1; + /* - * PFERR_RSVD_MASK bit is set in PFEC if the access is not - * subject to SMAP restrictions, and cleared otherwise. The - * bit is only meaningful if the SMAP bit is set in CR4. + * Each "*f" variable has a 1 bit for each UWX value + * that causes a fault with the given PFEC. */ - smapf = !(pfec & PFERR_RSVD_MASK); - for (bit = 0; bit < 8; ++bit) { - x = bit & ACC_EXEC_MASK; - w = bit & ACC_WRITE_MASK; - u = bit & ACC_USER_MASK; - if (!ept) { - /* Not really needed: !nx will cause pte.nx to fault */ - x |= !mmu->nx; - /* Allow supervisor writes if !cr0.wp */ - w |= !is_write_protection(vcpu) && !uf; - /* Disallow supervisor fetches of user code if cr4.smep */ - x &= !(cr4_smep && u && !uf); + /* Faults from writes to non-writable pages */ + u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0; + /* Faults from user mode accesses to supervisor pages */ + u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0; + /* Faults from fetches of non-executable pages*/ + u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0; + /* Faults from kernel mode fetches of user pages */ + u8 smepf = 0; + /* Faults from kernel mode accesses of user pages */ + u8 smapf = 0; - /* - * SMAP:kernel-mode data accesses from user-mode - * mappings should fault. A fault is considered - * as a SMAP violation if all of the following - * conditions are ture: - * - X86_CR4_SMAP is set in CR4 - * - A user page is accessed - * - Page fault in kernel mode - * - if CPL = 3 or X86_EFLAGS_AC is clear - * - * Here, we cover the first three conditions. - * The fourth is computed dynamically in - * permission_fault() and is in smapf. - * - * Also, SMAP does not affect instruction - * fetches, add the !ff check here to make it - * clearer. - */ - smap = cr4_smap && u && !uf && !ff; - } + if (!ept) { + /* Faults from kernel mode accesses to user pages */ + u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; - fault = (ff && !x) || (uf && !u) || (wf && !w) || - (smapf && smap); - map |= fault << bit; + /* Not really needed: !nx will cause pte.nx to fault */ + if (!mmu->nx) + ff = 0; + + /* Allow supervisor writes if !cr0.wp */ + if (!cr0_wp) + wf = (pfec & PFERR_USER_MASK) ? wf : 0; + + /* Disallow supervisor fetches of user code if cr4.smep */ + if (cr4_smep) + smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; + + /* + * SMAP:kernel-mode data accesses from user-mode + * mappings should fault. A fault is considered + * as a SMAP violation if all of the following + * conditions are ture: + * - X86_CR4_SMAP is set in CR4 + * - A user page is accessed + * - The access is not a fetch + * - Page fault in kernel mode + * - if CPL = 3 or X86_EFLAGS_AC is clear + * + * Here, we cover the first three conditions. + * The fourth is computed dynamically in permission_fault(); + * PFERR_RSVD_MASK bit will be set in PFEC if the access is + * *not* subject to SMAP restrictions. + */ + if (cr4_smap) + smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; } - mmu->permissions[byte] = map; + + mmu->permissions[byte] = ff | uf | wf | smepf | smapf; } } @@ -4358,7 +4423,10 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, static void paging64_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); + int root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; + + paging64_init_context_common(vcpu, context, root_level); } static void paging32_init_context(struct kvm_vcpu *vcpu, @@ -4399,7 +4467,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; - context->shadow_root_level = kvm_x86_ops->get_tdp_level(); + context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu); context->root_hpa = INVALID_PAGE; context->direct_map = true; context->set_cr3 = kvm_x86_ops->set_tdp_cr3; @@ -4413,7 +4481,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->root_level = 0; } else if (is_long_mode(vcpu)) { context->nx = is_nx(vcpu); - context->root_level = PT64_ROOT_LEVEL; + context->root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; reset_rsvds_bits_mask(vcpu, context); context->gva_to_gpa = paging64_gva_to_gpa; } else if (is_pae(vcpu)) { @@ -4470,7 +4539,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, MMU_WARN_ON(VALID_PAGE(context->root_hpa)); - context->shadow_root_level = kvm_x86_ops->get_tdp_level(); + context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; context->ept_ad = accessed_dirty; @@ -4479,7 +4548,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->sync_page = ept_sync_page; context->invlpg = ept_invlpg; context->update_pte = ept_update_pte; - context->root_level = context->shadow_root_level; + context->root_level = PT64_ROOT_4LEVEL; context->root_hpa = INVALID_PAGE; context->direct_map = false; context->base_role.ad_disabled = !accessed_dirty; @@ -4524,7 +4593,8 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; } else if (is_long_mode(vcpu)) { g_context->nx = is_nx(vcpu); - g_context->root_level = PT64_ROOT_LEVEL; + g_context->root_level = is_la57_mode(vcpu) ? + PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; reset_rsvds_bits_mask(vcpu, g_context); g_context->gva_to_gpa = paging64_gva_to_gpa_nested; } else if (is_pae(vcpu)) { @@ -4814,12 +4884,12 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) } EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); -static void make_mmu_pages_available(struct kvm_vcpu *vcpu) +static int make_mmu_pages_available(struct kvm_vcpu *vcpu) { LIST_HEAD(invalid_list); if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) - return; + return 0; while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) @@ -4828,6 +4898,10 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu) ++vcpu->kvm->stat.mmu_recycled; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + + if (!kvm_mmu_available_pages(vcpu->kvm)) + return -ENOSPC; + return 0; } int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, @@ -4835,7 +4909,13 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, { int r, emulation_type = EMULTYPE_RETRY; enum emulation_result er; - bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu); + bool direct = vcpu->arch.mmu.direct_map; + + /* With shadow page tables, fault_address contains a GVA or nGPA. */ + if (vcpu->arch.mmu.direct_map) { + vcpu->arch.gpa_available = true; + vcpu->arch.gpa_val = cr2; + } if (unlikely(error_code & PFERR_RSVD_MASK)) { r = handle_mmio_page_fault(vcpu, cr2, direct); @@ -4847,6 +4927,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, return 1; if (r < 0) return r; + /* Must be RET_MMIO_PF_INVALID. */ } r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), @@ -4862,11 +4943,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, * This can occur when using nested virtualization with nested * paging in both guests. If true, we simply unprotect the page * and resume the guest. - * - * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used - * in PFERR_NEXT_GUEST_PAGE) */ - if (error_code == PFERR_NESTED_GUEST_PAGE) { + if (vcpu->arch.mmu.direct_map && + (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); return 1; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 4b9a3ae6b725..64a2dbd2b1af 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -37,7 +37,8 @@ #define PT32_DIR_PSE36_MASK \ (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) -#define PT64_ROOT_LEVEL 4 +#define PT64_ROOT_5LEVEL 5 +#define PT64_ROOT_4LEVEL 4 #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 @@ -48,6 +49,9 @@ static inline u64 rsvd_bits(int s, int e) { + if (e < s) + return 0; + return ((1ULL << (e - s + 1)) - 1) << s; } @@ -56,23 +60,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); -/* - * Return values of handle_mmio_page_fault: - * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction - * directly. - * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page - * fault path update the mmio spte. - * RET_MMIO_PF_RETRY: let CPU fault again on the address. - * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). - */ -enum { - RET_MMIO_PF_EMULATE = 1, - RET_MMIO_PF_INVALID = 2, - RET_MMIO_PF_RETRY = 0, - RET_MMIO_PF_BUG = -1 -}; - -int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty); diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index dcce533d420c..d22ddbdf5e6e 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -62,11 +62,11 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { + if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); - __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); + __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level); return; } diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 0149ac59c273..e9ea2d45ae66 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) * enable MTRRs and it is obviously undesirable to run the * guest entirely with UC memory and we use WB. */ - if (guest_cpuid_has_mtrr(vcpu)) + if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR)) return MTRR_TYPE_UNCACHABLE; else return MTRR_TYPE_WRBACK; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index b0454c7e4cff..86b68dc5a649 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -790,8 +790,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, &map_writable)) return 0; - if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr, - walker.gfn, pfn, walker.pte_access, &r)) + if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) return r; /* @@ -819,7 +818,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); - make_mmu_pages_available(vcpu); + if (make_mmu_pages_available(vcpu) < 0) + goto out_unlock; if (!force_pt_level) transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); r = FNAME(fetch)(vcpu, addr, &walker, write_fault, diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8dbd8dbc83eb..0e68f0b3cbf7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -280,9 +280,9 @@ module_param(avic, int, S_IRUGO); static int vls = true; module_param(vls, int, 0444); -/* AVIC VM ID bit masks and lock */ -static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR); -static DEFINE_SPINLOCK(avic_vm_id_lock); +/* enable/disable Virtual GIF */ +static int vgif = true; +module_param(vgif, int, 0444); static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu); @@ -479,19 +479,33 @@ static inline void clr_intercept(struct vcpu_svm *svm, int bit) recalc_intercepts(svm); } +static inline bool vgif_enabled(struct vcpu_svm *svm) +{ + return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); +} + static inline void enable_gif(struct vcpu_svm *svm) { - svm->vcpu.arch.hflags |= HF_GIF_MASK; + if (vgif_enabled(svm)) + svm->vmcb->control.int_ctl |= V_GIF_MASK; + else + svm->vcpu.arch.hflags |= HF_GIF_MASK; } static inline void disable_gif(struct vcpu_svm *svm) { - svm->vcpu.arch.hflags &= ~HF_GIF_MASK; + if (vgif_enabled(svm)) + svm->vmcb->control.int_ctl &= ~V_GIF_MASK; + else + svm->vcpu.arch.hflags &= ~HF_GIF_MASK; } static inline bool gif_set(struct vcpu_svm *svm) { - return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); + if (vgif_enabled(svm)) + return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); + else + return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); } static unsigned long iopm_base; @@ -567,10 +581,10 @@ static inline void invlpga(unsigned long addr, u32 asid) asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); } -static int get_npt_level(void) +static int get_npt_level(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - return PT64_ROOT_LEVEL; + return PT64_ROOT_4LEVEL; #else return PT32E_ROOT_LEVEL; #endif @@ -641,7 +655,7 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); unsigned nr = vcpu->arch.exception.nr; bool has_error_code = vcpu->arch.exception.has_error_code; - bool reinject = vcpu->arch.exception.reinject; + bool reinject = vcpu->arch.exception.injected; u32 error_code = vcpu->arch.exception.error_code; /* @@ -973,6 +987,7 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) static void disable_nmi_singlestep(struct vcpu_svm *svm) { svm->nmi_singlestep = false; + if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { /* Clear our flags if they were not set by the guest */ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) @@ -989,6 +1004,8 @@ static void disable_nmi_singlestep(struct vcpu_svm *svm) */ #define SVM_VM_DATA_HASH_BITS 8 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS); +static u32 next_vm_id = 0; +static bool next_vm_id_wrapped = 0; static DEFINE_SPINLOCK(svm_vm_data_hash_lock); /* Note: @@ -1108,6 +1125,13 @@ static __init int svm_hardware_setup(void) } } + if (vgif) { + if (!boot_cpu_has(X86_FEATURE_VGIF)) + vgif = false; + else + pr_info("Virtual GIF supported\n"); + } + return 0; err: @@ -1176,7 +1200,6 @@ static void avic_init_vmcb(struct vcpu_svm *svm) vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT; vmcb->control.int_ctl |= AVIC_ENABLE_MASK; - svm->vcpu.arch.apicv_active = true; } static void init_vmcb(struct vcpu_svm *svm) @@ -1292,7 +1315,7 @@ static void init_vmcb(struct vcpu_svm *svm) set_intercept(svm, INTERCEPT_PAUSE); } - if (avic) + if (kvm_vcpu_apicv_active(&svm->vcpu)) avic_init_vmcb(svm); /* @@ -1305,6 +1328,12 @@ static void init_vmcb(struct vcpu_svm *svm) svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; } + if (vgif) { + clr_intercept(svm, INTERCEPT_STGI); + clr_intercept(svm, INTERCEPT_CLGI); + svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; + } + mark_all_dirty(svm->vmcb); enable_gif(svm); @@ -1387,34 +1416,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return 0; } -static inline int avic_get_next_vm_id(void) -{ - int id; - - spin_lock(&avic_vm_id_lock); - - /* AVIC VM ID is one-based. */ - id = find_next_zero_bit(avic_vm_id_bitmap, AVIC_VM_ID_NR, 1); - if (id <= AVIC_VM_ID_MASK) - __set_bit(id, avic_vm_id_bitmap); - else - id = -EAGAIN; - - spin_unlock(&avic_vm_id_lock); - return id; -} - -static inline int avic_free_vm_id(int id) -{ - if (id <= 0 || id > AVIC_VM_ID_MASK) - return -EINVAL; - - spin_lock(&avic_vm_id_lock); - __clear_bit(id, avic_vm_id_bitmap); - spin_unlock(&avic_vm_id_lock); - return 0; -} - static void avic_vm_destroy(struct kvm *kvm) { unsigned long flags; @@ -1423,8 +1424,6 @@ static void avic_vm_destroy(struct kvm *kvm) if (!avic) return; - avic_free_vm_id(vm_data->avic_vm_id); - if (vm_data->avic_logical_id_table_page) __free_page(vm_data->avic_logical_id_table_page); if (vm_data->avic_physical_id_table_page) @@ -1438,19 +1437,16 @@ static void avic_vm_destroy(struct kvm *kvm) static int avic_vm_init(struct kvm *kvm) { unsigned long flags; - int vm_id, err = -ENOMEM; + int err = -ENOMEM; struct kvm_arch *vm_data = &kvm->arch; struct page *p_page; struct page *l_page; + struct kvm_arch *ka; + u32 vm_id; if (!avic) return 0; - vm_id = avic_get_next_vm_id(); - if (vm_id < 0) - return vm_id; - vm_data->avic_vm_id = (u32)vm_id; - /* Allocating physical APIC ID table (4KB) */ p_page = alloc_page(GFP_KERNEL); if (!p_page) @@ -1468,6 +1464,22 @@ static int avic_vm_init(struct kvm *kvm) clear_page(page_address(l_page)); spin_lock_irqsave(&svm_vm_data_hash_lock, flags); + again: + vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK; + if (vm_id == 0) { /* id is 1-based, zero is not okay */ + next_vm_id_wrapped = 1; + goto again; + } + /* Is it still in use? Only possible if wrapped at least once */ + if (next_vm_id_wrapped) { + hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) { + struct kvm *k2 = container_of(ka, struct kvm, arch); + struct kvm_arch *vd2 = &k2->arch; + if (vd2->avic_vm_id == vm_id) + goto again; + } + } + vm_data->avic_vm_id = vm_id; hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id); spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); @@ -1580,13 +1592,30 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) } init_vmcb(svm); - kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); + kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true); kvm_register_write(vcpu, VCPU_REGS_RDX, eax); if (kvm_vcpu_apicv_active(vcpu) && !init_event) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); } +static int avic_init_vcpu(struct vcpu_svm *svm) +{ + int ret; + + if (!kvm_vcpu_apicv_active(&svm->vcpu)) + return 0; + + ret = avic_init_backing_page(&svm->vcpu); + if (ret) + return ret; + + INIT_LIST_HEAD(&svm->ir_list); + spin_lock_init(&svm->ir_list_lock); + + return ret; +} + static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; @@ -1623,14 +1652,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) if (!hsave_page) goto free_page3; - if (avic) { - err = avic_init_backing_page(&svm->vcpu); - if (err) - goto free_page4; - - INIT_LIST_HEAD(&svm->ir_list); - spin_lock_init(&svm->ir_list_lock); - } + err = avic_init_vcpu(svm); + if (err) + goto free_page4; /* We initialize this flag to true to make sure that the is_running * bit would be set the first time the vcpu is loaded. @@ -2384,7 +2408,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; - vcpu->arch.mmu.shadow_root_level = get_npt_level(); + vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu); reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } @@ -3147,6 +3171,13 @@ static int stgi_interception(struct vcpu_svm *svm) if (nested_svm_check_permissions(svm)) return 1; + /* + * If VGIF is enabled, the STGI intercept is only added to + * detect the opening of the NMI window; remove it now. + */ + if (vgif_enabled(svm)) + clr_intercept(svm, INTERCEPT_STGI); + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; ret = kvm_skip_emulated_instruction(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); @@ -3744,7 +3775,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm) static int pause_interception(struct vcpu_svm *svm) { - kvm_vcpu_on_spin(&(svm->vcpu)); + struct kvm_vcpu *vcpu = &svm->vcpu; + bool in_kernel = (svm_get_cpl(vcpu) == 0); + + kvm_vcpu_on_spin(vcpu, in_kernel); return 1; } @@ -4228,8 +4262,6 @@ static int handle_exit(struct kvm_vcpu *vcpu) trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); - vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF); - if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) @@ -4374,9 +4406,9 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) return; } -static bool svm_get_enable_apicv(void) +static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu) { - return avic; + return avic && irqchip_split(vcpu->kvm); } static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) @@ -4393,7 +4425,7 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; - if (!avic) + if (!kvm_vcpu_apicv_active(&svm->vcpu)) return; vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; @@ -4682,9 +4714,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes * 1, because that's a separate STGI/VMRUN intercept. The next time we * get that intercept, this function will be called again though and - * we'll get the vintr intercept. + * we'll get the vintr intercept. However, if the vGIF feature is + * enabled, the STGI interception will not occur. Enable the irq + * window under the assumption that the hardware will set the GIF. */ - if (gif_set(svm) && nested_svm_intr(svm)) { + if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) { svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } @@ -4698,8 +4732,11 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) == HF_NMI_MASK) return; /* IRET will cause a vm exit */ - if ((svm->vcpu.arch.hflags & HF_GIF_MASK) == 0) + if (!gif_set(svm)) { + if (vgif_enabled(svm)) + set_intercept(svm, INTERCEPT_STGI); return; /* STGI will cause a vm exit */ + } if (svm->nested.exit_required) return; /* we're not going to run the guest yet */ @@ -5071,17 +5108,14 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) static void svm_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_cpuid_entry2 *entry; /* Update nrips enabled cache */ - svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); + svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); if (!kvm_vcpu_apicv_active(vcpu)) return; - entry = kvm_find_cpuid_entry(vcpu, 1, 0); - if (entry) - entry->ecx &= ~bit(X86_FEATURE_X2APIC); + guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC); } static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) @@ -5279,6 +5313,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, */ if (info->rep_prefix != REPE_PREFIX) goto out; + break; case SVM_EXIT_IOIO: { u64 exit_info; u32 bytes; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0a6cc6754ec5..8a202c49e2a0 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -151,8 +151,8 @@ TRACE_EVENT(kvm_fast_mmio, */ TRACE_EVENT(kvm_cpuid, TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, - unsigned long rcx, unsigned long rdx), - TP_ARGS(function, rax, rbx, rcx, rdx), + unsigned long rcx, unsigned long rdx, bool found), + TP_ARGS(function, rax, rbx, rcx, rdx, found), TP_STRUCT__entry( __field( unsigned int, function ) @@ -160,6 +160,7 @@ TRACE_EVENT(kvm_cpuid, __field( unsigned long, rbx ) __field( unsigned long, rcx ) __field( unsigned long, rdx ) + __field( bool, found ) ), TP_fast_assign( @@ -168,11 +169,13 @@ TRACE_EVENT(kvm_cpuid, __entry->rbx = rbx; __entry->rcx = rcx; __entry->rdx = rdx; + __entry->found = found; ), - TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", + TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s", __entry->function, __entry->rax, - __entry->rbx, __entry->rcx, __entry->rdx) + __entry->rbx, __entry->rcx, __entry->rdx, + __entry->found ? "found" : "not found") ); #define AREG(x) { APIC_##x, "APIC_" #x } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 70b90c0810d0..a2b804e10c95 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -122,7 +122,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_TSD) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) @@ -200,6 +200,8 @@ struct loaded_vmcs { int cpu; bool launched; bool nmi_known_unmasked; + unsigned long vmcs_host_cr3; /* May not match real cr3 */ + unsigned long vmcs_host_cr4; /* May not match real cr4 */ struct list_head loaded_vmcss_on_cpu_link; }; @@ -243,11 +245,13 @@ struct __packed vmcs12 { u64 virtual_apic_page_addr; u64 apic_access_addr; u64 posted_intr_desc_addr; + u64 vm_function_control; u64 ept_pointer; u64 eoi_exit_bitmap0; u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; + u64 eptp_list_address; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; @@ -481,6 +485,7 @@ struct nested_vmx { u64 nested_vmx_cr4_fixed0; u64 nested_vmx_cr4_fixed1; u64 nested_vmx_vmcs_enum; + u64 nested_vmx_vmfunc_controls; }; #define POSTED_INTR_ON 0 @@ -573,6 +578,8 @@ struct vcpu_vmx { #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; + u32 secondary_exec_control; + /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested @@ -595,8 +602,6 @@ struct vcpu_vmx { int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; - unsigned long vmcs_host_cr3; /* May not match real cr3 */ - unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; @@ -761,11 +766,13 @@ static const unsigned short vmcs_field_to_offset_table[] = { FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), + FIELD64(VM_FUNCTION_CONTROL, vm_function_control), FIELD64(EPT_POINTER, ept_pointer), FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), + FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), @@ -889,25 +896,6 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) return to_vmx(vcpu)->nested.cached_vmcs12; } -static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) -{ - struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT); - if (is_error_page(page)) - return NULL; - - return page; -} - -static void nested_release_page(struct page *page) -{ - kvm_release_page_dirty(page); -} - -static void nested_release_page_clean(struct page *page) -{ - kvm_release_page_clean(page); -} - static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu); static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); @@ -1212,6 +1200,16 @@ static inline bool cpu_has_vmx_ept_4levels(void) return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } +static inline bool cpu_has_vmx_ept_mt_wb(void) +{ + return vmx_capability.ept & VMX_EPTP_WB_BIT; +} + +static inline bool cpu_has_vmx_ept_5levels(void) +{ + return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT; +} + static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; @@ -1317,6 +1315,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void) SECONDARY_EXEC_TSC_SCALING; } +static inline bool cpu_has_vmx_vmfunc(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_VMFUNC; +} + static inline bool report_flexpriority(void) { return flexpriority_enabled; @@ -1357,8 +1361,7 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) { - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) && - vmx_xsaves_supported(); + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); } static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) @@ -1391,6 +1394,18 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; } +static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); +} + +static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) +{ + return nested_cpu_has_vmfunc(vmcs12) && + (vmcs12->vm_function_control & + VMX_VMFUNC_EPTP_SWITCHING); +} + static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) @@ -2187,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) struct pi_desc old, new; unsigned int dest; - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + /* + * In case of hot-plug or hot-unplug, we may have to undo + * vmx_vcpu_pi_put even if there is no assigned device. And we + * always keep PI.NDST up to date for simplicity: it makes the + * code easier, and CPU migration is not a fast path. + */ + if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; + /* + * First handle the simple case where no cmpxchg is necessary; just + * allow posting non-urgent interrupts. + * + * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change + * PI.NDST: pi_post_block will do it for us and the wakeup_handler + * expects the VCPU to be on the blocked_vcpu_list that matches + * PI.NDST. + */ + if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || + vcpu->cpu == cpu) { + pi_clear_sn(pi_desc); + return; + } + + /* The full case. */ do { old.control = new.control = pi_desc->control; - /* - * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there - * are two possible cases: - * 1. After running 'pre_block', context switch - * happened. For this case, 'sn' was set in - * vmx_vcpu_put(), so we need to clear it here. - * 2. After running 'pre_block', we were blocked, - * and woken up by some other guy. For this case, - * we don't need to do anything, 'pi_post_block' - * will do everything for us. However, we cannot - * check whether it is case #1 or case #2 here - * (maybe, not needed), so we also clear sn here, - * I think it is not a big deal. - */ - if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { - if (vcpu->cpu != cpu) { - dest = cpu_physical_id(cpu); + dest = cpu_physical_id(cpu); - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - } + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; - /* set 'NV' to 'notification vector' */ - new.nv = POSTED_INTR_VECTOR; - } - - /* Allow posting non-urgent interrupts */ new.sn = 0; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); } static void decache_tsc_multiplier(struct vcpu_vmx *vmx) @@ -2450,15 +2463,14 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ -static int nested_vmx_check_exception(struct kvm_vcpu *vcpu) +static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; if (nr == PF_VECTOR) { if (vcpu->arch.exception.nested_apf) { - nested_vmx_inject_exception_vmexit(vcpu, - vcpu->arch.apf.nested_apf_token); + *exit_qual = vcpu->arch.apf.nested_apf_token; return 1; } /* @@ -2472,16 +2484,15 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu) */ if (nested_vmx_is_page_fault_vmexit(vmcs12, vcpu->arch.exception.error_code)) { - nested_vmx_inject_exception_vmexit(vcpu, vcpu->arch.cr2); + *exit_qual = vcpu->arch.cr2; return 1; } } else { - unsigned long exit_qual = 0; - if (nr == DB_VECTOR) - exit_qual = vcpu->arch.dr6; - if (vmcs12->exception_bitmap & (1u << nr)) { - nested_vmx_inject_exception_vmexit(vcpu, exit_qual); + if (nr == DB_VECTOR) + *exit_qual = vcpu->arch.dr6; + else + *exit_qual = 0; return 1; } } @@ -2494,14 +2505,9 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned nr = vcpu->arch.exception.nr; bool has_error_code = vcpu->arch.exception.has_error_code; - bool reinject = vcpu->arch.exception.reinject; u32 error_code = vcpu->arch.exception.error_code; u32 intr_info = nr | INTR_INFO_VALID_MASK; - if (!reinject && is_guest_mode(vcpu) && - nested_vmx_check_exception(vcpu)) - return; - if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; @@ -2600,7 +2606,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); - if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) + if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only @@ -2660,12 +2666,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) } } -static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); - return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); -} - /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for @@ -2674,7 +2674,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { - return nested && guest_cpuid_has_vmx(vcpu); + return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); } /* @@ -2797,21 +2797,21 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) vmx->nested.nested_vmx_procbased_ctls_low &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); - /* secondary cpu-based controls */ + /* + * secondary cpu-based controls. Do not include those that + * depend on CPUID bits, they are added later by vmx_cpuid_update. + */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); vmx->nested.nested_vmx_secondary_ctls_low = 0; vmx->nested.nested_vmx_secondary_ctls_high &= - SECONDARY_EXEC_RDRAND | SECONDARY_EXEC_RDSEED | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_DESC | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_WBINVD_EXITING | - SECONDARY_EXEC_XSAVES; + SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ @@ -2834,6 +2834,17 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) } else vmx->nested.nested_vmx_ept_caps = 0; + if (cpu_has_vmx_vmfunc()) { + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_VMFUNC; + /* + * Advertise EPTP switching unconditionally + * since we emulate it + */ + vmx->nested.nested_vmx_vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; + } + /* * Old versions of KVM use the single-context version without * checking for support, so declare that it is supported even @@ -3203,6 +3214,9 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) *pdata = vmx->nested.nested_vmx_ept_caps | ((u64)vmx->nested.nested_vmx_vpid_caps << 32); break; + case MSR_IA32_VMX_VMFUNC: + *pdata = vmx->nested.nested_vmx_vmfunc_controls; + break; default: return 1; } @@ -3256,7 +3270,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || - (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -3280,7 +3295,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: - if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Otherwise falls through */ default: @@ -3339,9 +3355,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || - (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; - if (is_noncanonical_address(data & PAGE_MASK) || + if (is_noncanonical_address(data & PAGE_MASK, vcpu) || (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); @@ -3402,7 +3419,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; case MSR_TSC_AUX: - if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) @@ -3639,8 +3657,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_RDSEED | + SECONDARY_EXEC_RDRAND | SECONDARY_EXEC_ENABLE_PML | - SECONDARY_EXEC_TSC_SCALING; + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_ENABLE_VMFUNC; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -4272,16 +4293,22 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) vmx->emulation_required = emulation_required(vcpu); } +static int get_ept_level(struct kvm_vcpu *vcpu) +{ + if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) + return 5; + return 4; +} + static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) { - u64 eptp; + u64 eptp = VMX_EPTP_MT_WB; + + eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; - /* TODO write the value reading from MSR */ - eptp = VMX_EPT_DEFAULT_MT | - VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; if (enable_ept_ad_bits && (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) - eptp |= VMX_EPT_AD_ENABLE_BIT; + eptp |= VMX_EPTP_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; @@ -4983,7 +5010,7 @@ static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_activ } } -static bool vmx_get_enable_apicv(void) +static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; } @@ -5048,21 +5075,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; if (vcpu->mode == IN_GUEST_MODE) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - /* - * Currently, we don't support urgent interrupt, - * all interrupts are recognized as non-urgent - * interrupt, so we cannot post interrupts when - * 'SN' is set. + * The vector of interrupt to be delivered to vcpu had + * been set in PIR before this function. * - * If the vcpu is in guest mode, it means it is - * running instead of being scheduled out and - * waiting in the run queue, and that's the only - * case when 'SN' is set currently, warning if - * 'SN' is set. + * Following cases will be reached in this block, and + * we always send a notification event in all cases as + * explained below. + * + * Case 1: vcpu keeps in non-root mode. Sending a + * notification event posts the interrupt to vcpu. + * + * Case 2: vcpu exits to root mode and is still + * runnable. PIR will be synced to vIRR before the + * next vcpu entry. Sending a notification event in + * this case has no effect, as vcpu is not in root + * mode. + * + * Case 3: vcpu exits to root mode and is blocked. + * vcpu_block() has already synced PIR to vIRR and + * never blocks vcpu if vIRR is not cleared. Therefore, + * a blocked vcpu here does not wait for any requested + * interrupts in PIR, and sending a notification event + * which has no effect is safe here. */ - WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); return true; @@ -5140,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) */ cr3 = __read_cr3(); vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ - vmx->host_state.vmcs_host_cr3 = cr3; + vmx->loaded_vmcs->vmcs_host_cr3 = cr3; /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ - vmx->host_state.vmcs_host_cr4 = cr4; + vmx->loaded_vmcs->vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 @@ -5163,7 +5199,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ - native_store_idt(&dt); + store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; @@ -5243,10 +5279,24 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx) return exec_control; } -static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) +static bool vmx_rdrand_supported(void) { + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDRAND; +} + +static bool vmx_rdseed_supported(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_RDSEED; +} + +static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; - if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu)) + if (!cpu_need_virtualize_apic_accesses(vcpu)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; @@ -5260,7 +5310,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - if (!kvm_vcpu_apicv_active(&vmx->vcpu)) + if (!kvm_vcpu_apicv_active(vcpu)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; @@ -5274,7 +5324,92 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) if (!enable_pml) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; - return exec_control; + if (vmx_xsaves_supported()) { + /* Exposing XSAVES only when XSAVE is exposed */ + bool xsaves_enabled = + guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); + + if (!xsaves_enabled) + exec_control &= ~SECONDARY_EXEC_XSAVES; + + if (nested) { + if (xsaves_enabled) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_XSAVES; + else + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_XSAVES; + } + } + + if (vmx_rdtscp_supported()) { + bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); + if (!rdtscp_enabled) + exec_control &= ~SECONDARY_EXEC_RDTSCP; + + if (nested) { + if (rdtscp_enabled) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_RDTSCP; + else + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_RDTSCP; + } + } + + if (vmx_invpcid_supported()) { + /* Exposing INVPCID only when PCID is exposed */ + bool invpcid_enabled = + guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && + guest_cpuid_has(vcpu, X86_FEATURE_PCID); + + if (!invpcid_enabled) { + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; + guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); + } + + if (nested) { + if (invpcid_enabled) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_INVPCID; + else + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_ENABLE_INVPCID; + } + } + + if (vmx_rdrand_supported()) { + bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); + if (rdrand_enabled) + exec_control &= ~SECONDARY_EXEC_RDRAND; + + if (nested) { + if (rdrand_enabled) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_RDRAND; + else + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_RDRAND; + } + } + + if (vmx_rdseed_supported()) { + bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); + if (rdseed_enabled) + exec_control &= ~SECONDARY_EXEC_RDSEED; + + if (nested) { + if (rdseed_enabled) + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_RDSEED; + else + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_RDSEED; + } + } + + vmx->secondary_exec_control = exec_control; } static void ept_set_mmio_spte_mask(void) @@ -5318,8 +5453,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { + vmx_compute_secondary_exec_control(vmx); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, - vmx_secondary_exec_control(vmx)); + vmx->secondary_exec_control); } if (kvm_vcpu_apicv_active(&vmx->vcpu)) { @@ -5357,6 +5493,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif + if (cpu_has_vmx_vmfunc()) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); @@ -5835,6 +5974,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + vcpu->mmio_needed = 0; return 0; } @@ -6330,7 +6470,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; - u32 error_code; + u64 error_code; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -6362,9 +6502,10 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) EPT_VIOLATION_EXECUTABLE)) ? PFERR_PRESENT_MASK : 0; - vcpu->arch.gpa_available = true; - vcpu->arch.exit_qualification = exit_qualification; + error_code |= (exit_qualification & 0x100) != 0 ? + PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; + vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } @@ -6373,23 +6514,20 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) int ret; gpa_t gpa; + /* + * A nested guest cannot optimize MMIO vmexits, because we have an + * nGPA here instead of the required GPA. + */ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { + if (!is_guest_mode(vcpu) && + !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); return kvm_skip_emulated_instruction(vcpu); } - ret = handle_mmio_page_fault(vcpu, gpa, true); - vcpu->arch.gpa_available = true; - if (likely(ret == RET_MMIO_PF_EMULATE)) - return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == - EMULATE_DONE; - - if (unlikely(ret == RET_MMIO_PF_INVALID)) - return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); - - if (unlikely(ret == RET_MMIO_PF_RETRY)) - return 1; + ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); + if (ret >= 0) + return ret; /* It is the real ept misconfig */ WARN_ON(1); @@ -6611,7 +6749,8 @@ static __init int hardware_setup(void) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || - !cpu_has_vmx_ept_4levels()) { + !cpu_has_vmx_ept_4levels() || + !cpu_has_vmx_ept_mt_wb()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; @@ -6754,7 +6893,13 @@ static int handle_pause(struct kvm_vcpu *vcpu) if (ple_gap) grow_ple_window(vcpu); - kvm_vcpu_on_spin(vcpu); + /* + * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" + * VM-execution control is ignored if CPL > 0. OTOH, KVM + * never set PAUSE_EXITING and just set PLE if supported, + * so the vcpu must be CPL=0 if it gets a PAUSE exit. + */ + kvm_vcpu_on_spin(vcpu, true); return kvm_skip_emulated_instruction(vcpu); } @@ -6769,6 +6914,12 @@ static int handle_mwait(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } +static int handle_invalid_op(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + static int handle_monitor_trap(struct kvm_vcpu *vcpu) { return 1; @@ -6985,7 +7136,7 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, * non-canonical form. This is the only check on the memory * destination for long mode! */ - exn = is_noncanonical_address(*ret); + exn = is_noncanonical_address(*ret, vcpu); } else if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: @@ -7149,19 +7300,19 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } - page = nested_get_page(vcpu, vmptr); - if (page == NULL) { + page = kvm_vcpu_gpa_to_page(vcpu, vmptr); + if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); @@ -7242,16 +7393,16 @@ static void free_nested(struct vcpu_vmx *vmx) kfree(vmx->nested.cached_vmcs12); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { - nested_release_page(vmx->nested.apic_access_page); + kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { - nested_release_page(vmx->nested.virtual_apic_page); + kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); - nested_release_page(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } @@ -7618,15 +7769,15 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; - page = nested_get_page(vcpu, vmptr); - if (page == NULL) { + page = kvm_vcpu_gpa_to_page(vcpu, vmptr); + if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); return kvm_skip_emulated_instruction(vcpu); @@ -7639,7 +7790,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) */ memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); set_current_vmptr(vmx, vmptr); } @@ -7790,7 +7941,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: - if (is_noncanonical_address(operand.gla)) { + if (is_noncanonical_address(operand.gla, vcpu)) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); @@ -7847,6 +7998,124 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu) return 1; } +static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int maxphyaddr = cpuid_maxphyaddr(vcpu); + + /* Check for memory type validity */ + switch (address & VMX_EPTP_MT_MASK) { + case VMX_EPTP_MT_UC: + if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT)) + return false; + break; + case VMX_EPTP_MT_WB: + if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT)) + return false; + break; + default: + return false; + } + + /* only 4 levels page-walk length are valid */ + if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) + return false; + + /* Reserved bits should not be set */ + if (address >> maxphyaddr || ((address >> 7) & 0x1f)) + return false; + + /* AD, if set, should be supported */ + if (address & VMX_EPTP_AD_ENABLE_BIT) { + if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT)) + return false; + } + + return true; +} + +static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; + u64 address; + bool accessed_dirty; + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + + if (!nested_cpu_has_eptp_switching(vmcs12) || + !nested_cpu_has_ept(vmcs12)) + return 1; + + if (index >= VMFUNC_EPTP_ENTRIES) + return 1; + + + if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, + &address, index * 8, 8)) + return 1; + + accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); + + /* + * If the (L2) guest does a vmfunc to the currently + * active ept pointer, we don't have to do anything else + */ + if (vmcs12->ept_pointer != address) { + if (!valid_ept_address(vcpu, address)) + return 1; + + kvm_mmu_unload(vcpu); + mmu->ept_ad = accessed_dirty; + mmu->base_role.ad_disabled = !accessed_dirty; + vmcs12->ept_pointer = address; + /* + * TODO: Check what's the correct approach in case + * mmu reload fails. Currently, we just let the next + * reload potentially fail + */ + kvm_mmu_reload(vcpu); + } + + return 0; +} + +static int handle_vmfunc(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12; + u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; + + /* + * VMFUNC is only supported for nested guests, but we always enable the + * secondary control for simplicity; for non-nested mode, fake that we + * didn't by injecting #UD. + */ + if (!is_guest_mode(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + vmcs12 = get_vmcs12(vcpu); + if ((vmcs12->vm_function_control & (1 << function)) == 0) + goto fail; + + switch (function) { + case 0: + if (nested_vmx_eptp_switching(vcpu, vmcs12)) + goto fail; + break; + default: + goto fail; + } + return kvm_skip_emulated_instruction(vcpu); + +fail: + nested_vmx_vmexit(vcpu, vmx->exit_reason, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -7894,9 +8163,12 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, + [EXIT_REASON_RDRAND] = handle_invalid_op, + [EXIT_REASON_RDSEED] = handle_invalid_op, [EXIT_REASON_XSAVES] = handle_xsaves, [EXIT_REASON_XRSTORS] = handle_xrstors, [EXIT_REASON_PML_FULL] = handle_pml_full, + [EXIT_REASON_VMFUNC] = handle_vmfunc, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, }; @@ -8079,12 +8351,14 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, - vmcs_readl(EXIT_QUALIFICATION), - vmx->idt_vectoring_info, - intr_info, - vmcs_read32(VM_EXIT_INTR_ERROR_CODE), - KVM_ISA_VMX); + if (vmx->nested.nested_run_pending) + return false; + + if (unlikely(vmx->fail)) { + pr_info_ratelimited("%s failed vm entry %x\n", __func__, + vmcs_read32(VM_INSTRUCTION_ERROR)); + return true; + } /* * The host physical addresses of some pages of guest memory @@ -8098,14 +8372,12 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) */ nested_mark_vmcs12_pages_dirty(vcpu); - if (vmx->nested.nested_run_pending) - return false; - - if (unlikely(vmx->fail)) { - pr_info_ratelimited("%s failed vm entry %x\n", __func__, - vmcs_read32(VM_INSTRUCTION_ERROR)); - return true; - } + trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, + vmcs_readl(EXIT_QUALIFICATION), + vmx->idt_vectoring_info, + intr_info, + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + KVM_ISA_VMX); switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: @@ -8212,6 +8484,10 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) * table is L0's fault. */ return false; + case EXIT_REASON_INVPCID: + return + nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && + nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: @@ -8229,6 +8505,9 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) case EXIT_REASON_PML_FULL: /* We emulate PML support to L1. */ return false; + case EXIT_REASON_VMFUNC: + /* VM functions are emulated through L2->L0 vmexits. */ + return false; default: return true; } @@ -8487,7 +8766,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); - vcpu->arch.gpa_available = false; /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more @@ -8765,7 +9043,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - register void *__sp asm(_ASM_SP); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { @@ -8794,7 +9071,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif - "+r"(__sp) + ASM_CALL_CONSTRAINT : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), @@ -8994,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { + if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) { vmcs_writel(HOST_CR3, cr3); - vmx->host_state.vmcs_host_cr3 = cr3; + vmx->loaded_vmcs->vmcs_host_cr3 = cr3; } cr4 = cr4_read_shadow(); - if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { + if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); - vmx->host_state.vmcs_host_cr4 = cr4; + vmx->loaded_vmcs->vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the @@ -9153,12 +9430,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - - vmx->loaded_vmcs->launched = 1; - - vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); - /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current @@ -9180,6 +9451,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; + vmx->idt_vectoring_info = 0; + + vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); + if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + return; + + vmx->loaded_vmcs->launched = 1; + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); @@ -9310,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; + /* + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR + * or POSTED_INTR_WAKEUP_VECTOR. + */ + vmx->pi_desc.nv = POSTED_INTR_VECTOR; + vmx->pi_desc.sn = 1; + return &vmx->vcpu; free_vmcs: @@ -9341,11 +9627,6 @@ static void __init vmx_check_processor_compat(void *rtn) } } -static int get_ept_level(void) -{ - return VMX_EPT_DEFAULT_GAW + 1; -} - static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; @@ -9462,39 +9743,13 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { - struct kvm_cpuid_entry2 *best; struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); - if (vmx_rdtscp_supported()) { - bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu); - if (!rdtscp_enabled) - secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP; - - if (nested) { - if (rdtscp_enabled) - vmx->nested.nested_vmx_secondary_ctls_high |= - SECONDARY_EXEC_RDTSCP; - else - vmx->nested.nested_vmx_secondary_ctls_high &= - ~SECONDARY_EXEC_RDTSCP; - } + if (cpu_has_secondary_exec_ctrls()) { + vmx_compute_secondary_exec_control(vmx); + vmcs_set_secondary_exec_control(vmx->secondary_exec_control); } - /* Exposing INVPCID only when PCID is exposed */ - best = kvm_find_cpuid_entry(vcpu, 0x7, 0); - if (vmx_invpcid_supported() && - (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) || - !guest_cpuid_has_pcid(vcpu))) { - secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID; - - if (best) - best->ebx &= ~bit(X86_FEATURE_INVPCID); - } - - if (cpu_has_secondary_exec_ctrls()) - vmcs_set_secondary_exec_control(secondary_exec_ctl); - if (nested_vmx_allowed(vcpu)) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; @@ -9535,7 +9790,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) { - return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT; + return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; } /* Callbacks for nested_ept_init_mmu_context: */ @@ -9548,18 +9803,15 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { - bool wants_ad; - WARN_ON(mmu_is_nested(vcpu)); - wants_ad = nested_ept_ad_enabled(vcpu); - if (wants_ad && !enable_ept_ad_bits) + if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) return 1; kvm_mmu_unload(vcpu); kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, - wants_ad); + nested_ept_ad_enabled(vcpu)); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; @@ -9592,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, WARN_ON(!is_guest_mode(vcpu)); - if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { + if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && + !to_vmx(vcpu)->nested.nested_run_pending) { vmcs12->vm_exit_intr_error_code = fault->error_code; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | @@ -9610,6 +9863,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); + struct page *page; u64 hpa; if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { @@ -9619,17 +9873,19 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, * physical address remains valid. We keep a reference * to it so we can release it later. */ - if (vmx->nested.apic_access_page) /* shouldn't happen */ - nested_release_page(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = - nested_get_page(vcpu, vmcs12->apic_access_addr); + if (vmx->nested.apic_access_page) { /* shouldn't happen */ + kvm_release_page_dirty(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = NULL; + } + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ - if (vmx->nested.apic_access_page) { + if (!is_error_page(page)) { + vmx->nested.apic_access_page = page; hpa = page_to_phys(vmx->nested.apic_access_page); vmcs_write64(APIC_ACCESS_ADDR, hpa); } else { @@ -9644,10 +9900,11 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { - if (vmx->nested.virtual_apic_page) /* shouldn't happen */ - nested_release_page(vmx->nested.virtual_apic_page); - vmx->nested.virtual_apic_page = - nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); + if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ + kvm_release_page_dirty(vmx->nested.virtual_apic_page); + vmx->nested.virtual_apic_page = NULL; + } + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); /* * If translation failed, VM entry will fail because @@ -9662,7 +9919,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, * control. But such a configuration is useless, so * let's keep the code simple. */ - if (vmx->nested.virtual_apic_page) { + if (!is_error_page(page)) { + vmx->nested.virtual_apic_page = page; hpa = page_to_phys(vmx->nested.virtual_apic_page); vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); } @@ -9671,16 +9929,14 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, if (nested_cpu_has_posted_intr(vmcs12)) { if (vmx->nested.pi_desc_page) { /* shouldn't happen */ kunmap(vmx->nested.pi_desc_page); - nested_release_page(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); + vmx->nested.pi_desc_page = NULL; } - vmx->nested.pi_desc_page = - nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); - vmx->nested.pi_desc = - (struct pi_desc *)kmap(vmx->nested.pi_desc_page); - if (!vmx->nested.pi_desc) { - nested_release_page_clean(vmx->nested.pi_desc_page); + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); + if (is_error_page(page)) return; - } + vmx->nested.pi_desc_page = page; + vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); vmx->nested.pi_desc = (struct pi_desc *)((void *)vmx->nested.pi_desc + (unsigned long)(vmcs12->posted_intr_desc_addr & @@ -9746,6 +10002,18 @@ static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, return 0; } +static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) + return 0; + + if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) + return -EINVAL; + + return 0; +} + /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. @@ -9762,8 +10030,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) return false; - page = nested_get_page(vcpu, vmcs12->msr_bitmap); - if (!page) + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); + if (is_error_page(page)) return false; msr_bitmap_l1 = (unsigned long *)kmap(page); @@ -9793,7 +10061,7 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, } } kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); return true; } @@ -10187,13 +10455,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { - exec_control = vmx_secondary_exec_control(vmx); + exec_control = vmx->secondary_exec_control; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDTSCP | + SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_APIC_REGISTER_VIRT); + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_ENABLE_VMFUNC); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & @@ -10201,6 +10472,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, exec_control |= vmcs12_exec_ctrl; } + /* All VMFUNCs are currently emulated through L0 vmexits. */ + if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); @@ -10266,6 +10541,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + } else { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif } /* @@ -10426,6 +10706,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -10453,6 +10736,18 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmx->nested.nested_vmx_entry_ctls_high)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_cpu_has_vmfunc(vmcs12)) { + if (vmcs12->vm_function_control & + ~vmx->nested.nested_vmx_vmfunc_controls) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + if (nested_cpu_has_eptp_switching(vmcs12)) { + if (!nested_cpu_has_ept(vmcs12) || + !page_address_valid(vcpu, vmcs12->eptp_list_address)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + } + } + if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -10699,7 +10994,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, u32 idt_vectoring; unsigned int nr; - if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { + if (vcpu->arch.exception.injected) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; @@ -10738,12 +11033,20 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qual; - if (vcpu->arch.exception.pending || - vcpu->arch.nmi_injected || - vcpu->arch.interrupt.pending) + if (kvm_event_needs_reinjection(vcpu)) return -EBUSY; + if (vcpu->arch.exception.pending && + nested_vmx_check_exception(vcpu, &exit_qual)) { + if (vmx->nested.nested_run_pending) + return -EBUSY; + nested_vmx_inject_exception_vmexit(vcpu, exit_qual); + vcpu->arch.exception.pending = false; + return 0; + } + if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) @@ -11106,46 +11409,30 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u32 vm_inst_error = 0; /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); - leave_guest_mode(vcpu); - prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, - exit_qualification); - - if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, - vmcs12->vm_exit_msr_store_count)) - nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); - - if (unlikely(vmx->fail)) - vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR); - - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - /* - * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of - * the VM-exit interrupt information (valid interrupt) is always set to - * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need - * kvm_cpu_has_interrupt(). See the commit message for details. + * The only expected VM-instruction error is "VM entry with + * invalid control field(s)." Anything else indicates a + * problem with L0. */ - if (nested_exit_intr_ack_set(vcpu) && - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - kvm_cpu_has_interrupt(vcpu)) { - int irq = kvm_cpu_get_interrupt(vcpu); - WARN_ON(irq < 0); - vmcs12->vm_exit_intr_info = irq | - INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; + WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD)); + + leave_guest_mode(vcpu); + + if (likely(!vmx->fail)) { + prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, + exit_qualification); + + if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, + vmcs12->vm_exit_msr_store_count)) + nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); } - trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, - vmcs12->exit_qualification, - vmcs12->idt_vectoring_info_field, - vmcs12->vm_exit_intr_info, - vmcs12->vm_exit_intr_error_code, - KVM_ISA_VMX); - + vmx_switch_vmcs(vcpu, &vmx->vmcs01); vm_entry_controls_reset_shadow(vmx); vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); @@ -11154,8 +11441,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); - load_vmcs12_host_state(vcpu, vmcs12); - /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); @@ -11184,16 +11469,16 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { - nested_release_page(vmx->nested.apic_access_page); + kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { - nested_release_page(vmx->nested.virtual_apic_page); + kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); - nested_release_page(vmx->nested.pi_desc_page); + kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } @@ -11204,21 +11489,57 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - /* - * Exiting from L2 to L1, we're now back to L1 which thinks it just - * finished a VMLAUNCH or VMRESUME instruction, so we need to set the - * success or failure flag accordingly. - */ - if (unlikely(vmx->fail)) { - vmx->fail = 0; - nested_vmx_failValid(vcpu, vm_inst_error); - } else - nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + + if (likely(!vmx->fail)) { + /* + * TODO: SDM says that with acknowledge interrupt on + * exit, bit 31 of the VM-exit interrupt information + * (valid interrupt) is always set to 1 on + * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't + * need kvm_cpu_has_interrupt(). See the commit + * message for details. + */ + if (nested_exit_intr_ack_set(vcpu) && + exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + kvm_cpu_has_interrupt(vcpu)) { + int irq = kvm_cpu_get_interrupt(vcpu); + WARN_ON(irq < 0); + vmcs12->vm_exit_intr_info = irq | + INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; + } + + trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, + vmcs12->exit_qualification, + vmcs12->idt_vectoring_info_field, + vmcs12->vm_exit_intr_info, + vmcs12->vm_exit_intr_error_code, + KVM_ISA_VMX); + + load_vmcs12_host_state(vcpu, vmcs12); + + return; + } + + /* + * After an early L2 VM-entry failure, we're now back + * in L1 which thinks it just finished a VMLAUNCH or + * VMRESUME instruction, so we need to set the failure + * flag and the VM-instruction error field of the VMCS + * accordingly. + */ + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + /* + * The emulated instruction was already skipped in + * nested_vmx_run, but the updated RIP was never + * written back to the vmcs01. + */ + skip_emulated_instruction(vcpu); + vmx->fail = 0; } /* @@ -11369,14 +11690,14 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; - page = nested_get_page(vcpu, vmcs12->pml_address); - if (!page) + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); + if (is_error_page(page)) return 0; pml_address = kmap(page); pml_address[vmcs12->guest_pml_index--] = gpa; kunmap(page); - nested_release_page_clean(page); + kvm_release_page_clean(page); } return 0; @@ -11389,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); } +static void __pi_post_block(struct kvm_vcpu *vcpu) +{ + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + struct pi_desc old, new; + unsigned int dest; + + do { + old.control = new.control = pi_desc->control; + WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, + "Wakeup handler not enabled while the VCPU is blocked\n"); + + dest = cpu_physical_id(vcpu->cpu); + + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; + + /* set 'NV' to 'notification vector' */ + new.nv = POSTED_INTR_VECTOR; + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); + + if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_del(&vcpu->blocked_vcpu_list); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + vcpu->pre_pcpu = -1; + } +} + /* * This routine does the following things for vCPU which is going * to be blocked if VT-d PI is enabled. @@ -11404,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, */ static int pi_pre_block(struct kvm_vcpu *vcpu) { - unsigned long flags; unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -11414,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) !kvm_vcpu_apicv_active(vcpu)) return 0; - vcpu->pre_pcpu = vcpu->cpu; - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, - vcpu->pre_pcpu)); - spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); + WARN_ON(irqs_disabled()); + local_irq_disable(); + if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { + vcpu->pre_pcpu = vcpu->cpu; + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_add_tail(&vcpu->blocked_vcpu_list, + &per_cpu(blocked_vcpu_on_cpu, + vcpu->pre_pcpu)); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + } do { old.control = new.control = pi_desc->control; - /* - * We should not block the vCPU if - * an interrupt is posted for it. - */ - if (pi_test_on(pi_desc) == 1) { - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock_irqrestore( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - vcpu->pre_pcpu = -1; - - return 1; - } - WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " "is set before blocking\n"); @@ -11463,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); - return 0; + /* We should not block the vCPU if an interrupt is posted for it. */ + if (pi_test_on(pi_desc) == 1) + __pi_post_block(vcpu); + + local_irq_enable(); + return (vcpu->pre_pcpu == -1); } static int vmx_pre_block(struct kvm_vcpu *vcpu) @@ -11482,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu) static void pi_post_block(struct kvm_vcpu *vcpu) { - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - struct pi_desc old, new; - unsigned int dest; - unsigned long flags; - - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (vcpu->pre_pcpu == -1) return; - do { - old.control = new.control = pi_desc->control; - - dest = cpu_physical_id(vcpu->cpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - - /* Allow posting non-urgent interrupts */ - new.sn = 0; - - /* set 'NV' to 'notification vector' */ - new.nv = POSTED_INTR_VECTOR; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); - - if(vcpu->pre_pcpu != -1) { - spin_lock_irqsave( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock_irqrestore( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - vcpu->pre_pcpu = -1; - } + WARN_ON(irqs_disabled()); + local_irq_disable(); + __pi_post_block(vcpu); + local_irq_enable(); } static void vmx_post_block(struct kvm_vcpu *vcpu) @@ -11547,7 +11858,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; - int idx, ret = -EINVAL; + int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || @@ -11556,7 +11867,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - BUG_ON(guest_irq >= irq_rt->nr_rt_entries); + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) @@ -11599,12 +11915,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); - else { - /* suppress notification event before unposting */ - pi_set_sn(vcpu_to_pi_desc(vcpu)); + else ret = irq_set_vcpu_affinity(host_irq, NULL); - pi_clear_sn(vcpu_to_pi_desc(vcpu)); - } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef5102f80497..03869eb7fcd6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -311,13 +311,13 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 new_state = msr_info->data & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); - u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | - 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); + u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | + (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); + if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE) + return 1; if (!msr_info->host_initiated && - ((msr_info->data & reserved_bits) != 0 || - new_state == X2APIC_ENABLE || - (new_state == MSR_IA32_APICBASE_ENABLE && + ((new_state == MSR_IA32_APICBASE_ENABLE && old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && old_state == 0))) @@ -390,15 +390,28 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, kvm_make_request(KVM_REQ_EVENT, vcpu); - if (!vcpu->arch.exception.pending) { + if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { queue: if (has_error && !is_protmode(vcpu)) has_error = false; - vcpu->arch.exception.pending = true; + if (reinject) { + /* + * On vmentry, vcpu->arch.exception.pending is only + * true if an event injection was blocked by + * nested_run_pending. In that case, however, + * vcpu_enter_guest requests an immediate exit, + * and the guest shouldn't proceed far enough to + * need reinjection. + */ + WARN_ON_ONCE(vcpu->arch.exception.pending); + vcpu->arch.exception.injected = true; + } else { + vcpu->arch.exception.pending = true; + vcpu->arch.exception.injected = false; + } vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; - vcpu->arch.exception.reinject = reinject; return; } @@ -413,8 +426,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { - /* generate double fault per SDM Table 5-5 */ + /* + * Generate double fault per SDM Table 5-5. Set + * exception.pending = true so that the double fault + * can trigger a nested vmexit. + */ vcpu->arch.exception.pending = true; + vcpu->arch.exception.injected = false; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; @@ -755,19 +773,22 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & CR4_RESERVED_BITS) return 1; - if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) return 1; - if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) return 1; - if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) return 1; - if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) return 1; - if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) + return 1; + + if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) return 1; if (is_long_mode(vcpu)) { @@ -780,7 +801,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { - if (!guest_cpuid_has_pcid(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ @@ -814,10 +835,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return 0; } - if (is_long_mode(vcpu)) { - if (cr3 & CR3_L_MODE_RESERVED_BITS) - return 1; - } else if (is_pae(vcpu) && is_paging(vcpu) && + if (is_long_mode(vcpu) && + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62))) + return 1; + else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; @@ -884,7 +905,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; - if (!guest_cpuid_has_rtm(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) fixed |= DR6_RTM; return fixed; } @@ -994,6 +1015,7 @@ static u32 emulated_msrs[] = { MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, + HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_RESET, @@ -1022,21 +1044,11 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) if (efer & efer_reserved_bits) return false; - if (efer & EFER_FFXSR) { - struct kvm_cpuid_entry2 *feat; - - feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) + if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) return false; - } - if (efer & EFER_SVME) { - struct kvm_cpuid_entry2 *feat; - - feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) + if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) return false; - } return true; } @@ -1084,7 +1096,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_KERNEL_GS_BASE: case MSR_CSTAR: case MSR_LSTAR: - if (is_noncanonical_address(msr->data)) + if (is_noncanonical_address(msr->data, vcpu)) return 1; break; case MSR_IA32_SYSENTER_EIP: @@ -1101,7 +1113,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * value, and that something deterministic happens if the guest * invokes 64-bit SYSENTER. */ - msr->data = get_canonical(msr->data); + msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu)); } return kvm_x86_ops->set_msr(vcpu, msr); } @@ -1534,8 +1546,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; - if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) + if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) update_ia32_tsc_adjust_msr(vcpu, offset); + kvm_vcpu_write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); @@ -2185,7 +2198,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: - if (guest_cpuid_has_tsc_adjust(vcpu)) { + if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; adjust_tsc_offset_guest(vcpu, adj); @@ -2307,12 +2320,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: - if (!guest_cpuid_has_osvw(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: - if (!guest_cpuid_has_osvw(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) return 1; vcpu->arch.osvw.status = data; break; @@ -2537,12 +2550,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: - if (!guest_cpuid_has_osvw(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) return 1; msr_info->data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: - if (!guest_cpuid_has_osvw(vcpu)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) return 1; msr_info->data = vcpu->arch.osvw.status; break; @@ -2882,6 +2895,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; + + if (vcpu->preempted) + vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); + /* * Disable page faults because we're in atomic context here. * kvm_write_guest_offset_cached() would call might_fault() @@ -3074,8 +3091,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); + /* + * FIXME: pass injected and pending separately. This is only + * needed for nested virtualization, whose state cannot be + * migrated yet. For now we can combine them. + */ events->exception.injected = - vcpu->arch.exception.pending && + (vcpu->arch.exception.pending || + vcpu->arch.exception.injected) && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; @@ -3130,6 +3153,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, return -EINVAL; process_nmi(vcpu); + vcpu->arch.exception.injected = false; vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; @@ -4671,25 +4695,18 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, */ if (vcpu->arch.gpa_available && emulator_can_use_gpa(ctxt) && - vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) && - (addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) { - gpa = exception->address; - goto mmio; + (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) { + gpa = vcpu->arch.gpa_val; + ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); + } else { + ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); + if (ret < 0) + return X86EMUL_PROPAGATE_FAULT; } - ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); - - if (ret < 0) - return X86EMUL_PROPAGATE_FAULT; - - /* For APIC access vmexit */ - if (ret) - goto mmio; - - if (ops->read_write_emulate(vcpu, gpa, val, bytes)) + if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; -mmio: /* * Is this MMIO handled locally? */ @@ -5227,10 +5244,10 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } -static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, - u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) +static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit) { - kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); + return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) @@ -6362,11 +6379,42 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) int r; /* try to reinject previous events if any */ + if (vcpu->arch.exception.injected) { + kvm_x86_ops->queue_exception(vcpu); + return 0; + } + + /* + * Exceptions must be injected immediately, or the exception + * frame will have the address of the NMI or interrupt handler. + */ + if (!vcpu->arch.exception.pending) { + if (vcpu->arch.nmi_injected) { + kvm_x86_ops->set_nmi(vcpu); + return 0; + } + + if (vcpu->arch.interrupt.pending) { + kvm_x86_ops->set_irq(vcpu); + return 0; + } + } + + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { + r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + if (r != 0) + return r; + } + + /* try to inject new event if pending */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); + vcpu->arch.exception.pending = false; + vcpu->arch.exception.injected = true; + if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); @@ -6378,27 +6426,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) } kvm_x86_ops->queue_exception(vcpu); - return 0; - } - - if (vcpu->arch.nmi_injected) { - kvm_x86_ops->set_nmi(vcpu); - return 0; - } - - if (vcpu->arch.interrupt.pending) { - kvm_x86_ops->set_irq(vcpu); - return 0; - } - - if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); - if (r != 0) - return r; - } - - /* try to inject new event if pending */ - if (vcpu->arch.smi_pending && !is_smm(vcpu)) { + } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { vcpu->arch.smi_pending = false; enter_smm(vcpu); } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { @@ -6615,7 +6643,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); vcpu->arch.hflags |= HF_SMM_MASK; memset(buf, 0, 512); - if (guest_cpuid_has_longmode(vcpu)) + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) enter_smm_save_state_64(vcpu, buf); else enter_smm_save_state_32(vcpu, buf); @@ -6667,7 +6695,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); - if (guest_cpuid_has_longmode(vcpu)) + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) kvm_x86_ops->set_efer(vcpu, 0); kvm_update_cpuid(vcpu); @@ -6774,6 +6802,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; + vcpu->mmio_needed = 0; r = 0; goto out; } @@ -6862,6 +6891,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_x86_ops->enable_nmi_window(vcpu); if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); + WARN_ON(vcpu->arch.exception.pending); } if (kvm_lapic_enabled(vcpu)) { @@ -7004,6 +7034,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); + vcpu->arch.gpa_available = false; r = kvm_x86_ops->handle_exit(vcpu); return r; @@ -7194,16 +7225,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int r; sigset_t sigsaved; - fpu__activate_curr(fpu); + fpu__initialize(fpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { + if (kvm_run->immediate_exit) { + r = -EINTR; + goto out; + } kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); r = -EAGAIN; + if (signal_pending(current)) { + r = -EINTR; + vcpu->run->exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; + } goto out; } @@ -7422,7 +7462,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int pending_vec, max_bits, idx; struct desc_ptr dt; - if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) + if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + (sregs->cr4 & X86_CR4_OSXSAVE)) + return -EINVAL; + + apic_base_msr.data = sregs->apic_base; + apic_base_msr.host_initiated = true; + if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; dt.size = sregs->idt.limit; @@ -7441,9 +7487,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); - apic_base_msr.data = sregs->apic_base; - apic_base_msr.host_initiated = true; - kvm_set_apic_base(vcpu, &apic_base_msr); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); @@ -7734,6 +7777,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.nmi_injected = false; kvm_clear_interrupt_queue(vcpu); kvm_clear_exception_queue(vcpu); + vcpu->arch.exception.pending = false; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); @@ -7936,7 +7980,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; - vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(); + vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) @@ -7993,6 +8037,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_pmu_init(vcpu); vcpu->arch.pending_external_vector = -1; + vcpu->arch.preempted_in_kernel = false; kvm_hv_vcpu_init(vcpu); @@ -8416,6 +8461,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (vcpu->arch.pv.pv_unhalted) return true; + if (vcpu->arch.exception.pending) + return true; + if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu))) @@ -8440,6 +8488,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.preempted_in_kernel; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -8578,6 +8631,13 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) sizeof(val)); } +static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val) +{ + + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val, + sizeof(u32)); +} + void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { @@ -8605,6 +8665,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; + u32 val; if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ @@ -8612,15 +8673,26 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_del_async_pf_gfn(vcpu, work->arch.gfn); trace_kvm_async_pf_ready(work->arch.token, work->gva); - if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && - !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { - fault.vector = PF_VECTOR; - fault.error_code_valid = true; - fault.error_code = 0; - fault.nested_page_fault = false; - fault.address = work->arch.token; - fault.async_page_fault = true; - kvm_inject_page_fault(vcpu, &fault); + if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && + !apf_get_user(vcpu, &val)) { + if (val == KVM_PV_REASON_PAGE_NOT_PRESENT && + vcpu->arch.exception.pending && + vcpu->arch.exception.nr == PF_VECTOR && + !apf_put_user(vcpu, 0)) { + vcpu->arch.exception.injected = false; + vcpu->arch.exception.pending = false; + vcpu->arch.exception.nr = 0; + vcpu->arch.exception.has_error_code = false; + vcpu->arch.exception.error_code = 0; + } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + fault.async_page_fault = true; + kvm_inject_page_fault(vcpu, &fault); + } } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 612067074905..51e349cf5f45 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -11,7 +11,7 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { - vcpu->arch.exception.pending = false; + vcpu->arch.exception.injected = false; } static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, @@ -29,7 +29,7 @@ static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) { - return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || + return vcpu->arch.exception.injected || vcpu->arch.interrupt.pending || vcpu->arch.nmi_injected; } @@ -62,6 +62,16 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) return cs_l; } +static inline bool is_la57_mode(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_X86_64 + return (vcpu->arch.efer & EFER_LMA) && + kvm_read_cr4_bits(vcpu, X86_CR4_LA57); +#else + return 0; +#endif +} + static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) { return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; @@ -87,10 +97,48 @@ static inline u32 bit(int bitno) return 1 << (bitno & 31); } +static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; +} + +static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) +{ + return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; +} + +static inline u64 get_canonical(u64 la, u8 vaddr_bits) +{ + return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_X86_64 + return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; +#else + return false; +#endif +} + +static inline bool emul_is_noncanonical_address(u64 la, + struct x86_emulate_ctxt *ctxt) +{ +#ifdef CONFIG_X86_64 + return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; +#else + return false; +#endif +} + static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) { - vcpu->arch.mmio_gva = gva & PAGE_MASK; + /* + * If this is a shadow nested page table, the "GVA" is + * actually a nGPA. + */ + vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; vcpu->arch.access = access; vcpu->arch.mmio_gfn = gfn; vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index d4a7df2205b8..220638a4cb94 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info) struct desc_struct code_descriptor; struct fpu *fpu = ¤t->thread.fpu; - fpu__activate_curr(fpu); + fpu__initialize(fpu); #ifdef RE_ENTRANT_CHECKING if (emulating) { diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c076f710de4c..c3521e2be396 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_refcount); +/* + * Handler for when we fail to restore a task's FPU state. We should never get + * here because the FPU state of a task using the FPU (task->thread.fpu.state) + * should always be valid. However, past bugs have allowed userspace to set + * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn(). + * These caused XRSTOR to fail when switching to the task, leaking the FPU + * registers of the task previously executing on the CPU. Mitigate this class + * of vulnerability by restoring from the initial state (essentially, zeroing + * out all the FPU registers) if we can't restore from the task's FPU state. + */ +bool ex_handler_fprestore(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr) +{ + regs->ip = ex_fixup_addr(fixup); + + WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.", + (void *)instruction_pointer(regs)); + + __copy_kernel_to_fpregs(&init_fpstate, -1); + return true; +} +EXPORT_SYMBOL_GPL(ex_handler_fprestore); + bool ex_handler_ext(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr) { diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b836a7274e12..e2baeaa053a5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ -static void fill_sig_info_pkey(int si_code, siginfo_t *info, - struct vm_area_struct *vma) +static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) { /* This is effectively an #ifdef */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) @@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, * valid VMA, so we should never reach this without a * valid VMA. */ - if (!vma) { + if (!pkey) { WARN_ONCE(1, "PKU fault with no VMA passed in"); info->si_pkey = 0; return; @@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, * absolutely guranteed to be 100% accurate because of * the race explained above. */ - info->si_pkey = vma_pkey(vma); + info->si_pkey = *pkey; } static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, - struct task_struct *tsk, struct vm_area_struct *vma, - int fault) + struct task_struct *tsk, u32 *pkey, int fault) { unsigned lsb = 0; siginfo_t info; @@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; - fill_sig_info_pkey(si_code, &info, vma); + fill_sig_info_pkey(si_code, &info, pkey); force_sig_info(si_signo, &info, tsk); } @@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; unsigned long flags; int sig; - /* No context means no VMA to pass down */ - struct vm_area_struct *vma = NULL; /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs, X86_TRAP_PF)) { @@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* XXX: hwpoison faults will set the wrong code. */ force_sig_info_fault(signal, si_code, address, - tsk, vma, 0); + tsk, NULL, 0); } /* @@ -806,7 +802,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, if (is_vmalloc_addr((void *)address) && (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { - register void *__sp asm("rsp"); unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); /* * We're likely to be running with very little stack space @@ -821,7 +816,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, asm volatile ("movq %[stack], %%rsp\n\t" "call handle_stack_overflow\n\t" "1: jmp 1b" - : "+r" (__sp) + : ASM_CALL_CONSTRAINT : "D" ("kernel stack overflow (page fault)"), "S" (regs), "d" (address), [stack] "rm" (stack)); @@ -897,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma, - int si_code) + unsigned long address, u32 *pkey, int si_code) { struct task_struct *tsk = current; @@ -946,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; - force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); + force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0); return; } @@ -959,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static noinline void bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma) + unsigned long address, u32 *pkey) { - __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR); } static void @@ -969,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma, int si_code) { struct mm_struct *mm = current->mm; + u32 pkey; + + if (vma) + pkey = vma_pkey(vma); /* * Something tried to access memory that isn't in our memory map.. @@ -976,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, error_code, address, vma, si_code); + __bad_area_nosemaphore(regs, error_code, address, + (vma) ? &pkey : NULL, si_code); } static noinline void @@ -1019,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - struct vm_area_struct *vma, unsigned int fault) + u32 *pkey, unsigned int fault) { struct task_struct *tsk = current; int code = BUS_ADRERR; @@ -1046,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, code = BUS_MCEERR_AR; } #endif - force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); + force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault); } static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma, - unsigned int fault) + unsigned long address, u32 *pkey, unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { no_context(regs, error_code, address, 0, 0); @@ -1076,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, vma, fault); + do_sigbus(regs, error_code, address, pkey, fault); else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address, vma); + bad_area_nosemaphore(regs, error_code, address, pkey); else BUG(); } @@ -1268,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, struct mm_struct *mm; int fault, major = 0; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + u32 pkey; tsk = current; mm = tsk->mm; @@ -1468,9 +1467,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, return; } + pkey = vma_pkey(vma); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, error_code, address, vma, fault); + mm_fault_error(regs, error_code, address, &pkey, fault); return; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7777ccc0e9f9..af5c1ed21d43 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -19,6 +19,7 @@ #include #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -193,6 +194,38 @@ static void __init probe_page_size_mask(void) } } +static void setup_pcid(void) +{ +#ifdef CONFIG_X86_64 + if (boot_cpu_has(X86_FEATURE_PCID)) { + if (boot_cpu_has(X86_FEATURE_PGE)) { + /* + * This can't be cr4_set_bits_and_update_boot() -- + * the trampoline code can't handle CR4.PCIDE and + * it wouldn't do any good anyway. Despite the name, + * cr4_set_bits_and_update_boot() doesn't actually + * cause the bits in question to remain set all the + * way through the secondary boot asm. + * + * Instead, we brute-force it and set CR4.PCIDE + * manually in start_secondary(). + */ + cr4_set_bits(X86_CR4_PCIDE); + } else { + /* + * flush_tlb_all(), as currently implemented, won't + * work if PCID is on but PGE is not. Since that + * combination doesn't exist on real hardware, there's + * no reason to try to fully support it, but it's + * polite to avoid corrupting data if we're on + * an improperly configured VM. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); + } + } +#endif +} + #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -592,6 +625,7 @@ void __init init_mem_mapping(void) unsigned long end; probe_page_size_mask(); + setup_pcid(); #ifdef CONFIG_X86_64 end = max_pfn << PAGE_SHIFT; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 136422d7d539..048fbe8fc274 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -761,7 +761,7 @@ void __init paging_init(void) * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating. */ -static void update_end_of_memory_vars(u64 start, u64 size) +static void update_end_of_memory_vars(u64 start, u64 size) { unsigned long end_pfn = PFN_UP(start + size); @@ -772,22 +772,30 @@ static void update_end_of_memory_vars(u64 start, u64 size) } } -int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) +int add_pages(int nid, unsigned long start_pfn, + unsigned long nr_pages, bool want_memblock) { - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - init_memory_mapping(start, start + size); - ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); WARN_ON_ONCE(ret); /* update max_pfn, max_low_pfn and high_memory */ - update_end_of_memory_vars(start, size); + update_end_of_memory_vars(start_pfn << PAGE_SHIFT, + nr_pages << PAGE_SHIFT); return ret; } + +int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + init_memory_mapping(start, start + size); + + return add_pages(nid, start_pfn, nr_pages, want_memblock); +} EXPORT_SYMBOL_GPL(arch_add_memory); #define PAGE_INUSE 0xFD diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 0fbd09269757..16c5f37933a2 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#define DISABLE_BRANCH_PROFILING + #include #include #include @@ -37,7 +39,7 @@ static char sme_cmdline_off[] __initdata = "off"; * reside in the .data section so as not to be zeroed out when the .bss * section is later cleared. */ -unsigned long sme_me_mask __section(.data) = 0; +u64 sme_me_mask __section(.data) = 0; EXPORT_SYMBOL_GPL(sme_me_mask); /* Buffer used for early in-place encryption by BSP, no locking needed */ diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 218834a3e9ad..b372f3442bbf 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -426,10 +426,8 @@ int ptep_set_access_flags(struct vm_area_struct *vma, { int changed = !pte_same(*ptep, entry); - if (changed && dirty) { + if (changed && dirty) *ptep = entry; - pte_update(vma->vm_mm, address, ptep); - } return changed; } @@ -486,9 +484,6 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma, ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *) &ptep->pte); - if (ret) - pte_update(vma->vm_mm, addr, ptep); - return ret; } diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 2dab69a706ec..d7bc0eea20a5 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -18,7 +18,6 @@ #include /* boot_cpu_has, ... */ #include /* vma_pkey() */ -#include /* fpregs_active() */ int __execute_only_pkey(struct mm_struct *mm) { @@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm) */ preempt_disable(); if (!need_to_set_mm_pkey && - fpregs_active() && + current->thread.fpu.initialized && !__pkru_allows_read(read_pkru(), execute_only_pkey)) { preempt_enable(); return execute_only_pkey; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index ce104b962a17..49d9778376d7 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -121,8 +121,27 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * hypothetical buggy code that directly switches to swapper_pg_dir * without going through leave_mm() / switch_mm_irqs_off() or that * does something like write_cr3(read_cr3_pa()). + * + * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() + * isn't free. */ - VM_BUG_ON(__read_cr3() != (__sme_pa(real_prev->pgd) | prev_asid)); +#ifdef CONFIG_DEBUG_VM + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { + /* + * If we were to BUG here, we'd be very likely to kill + * the system so hard that we don't see the call trace. + * Try to recover instead by ignoring the error and doing + * a global flush to minimize the chance of corruption. + * + * (This is far from being a fully correct recovery. + * Architecturally, the CPU could prefetch something + * back into an incorrect ASID slot and leave it there + * to cause trouble down the road. It's better than + * nothing, though.) + */ + __flush_tlb_all(); + } +#endif if (real_prev == next) { VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != @@ -152,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, */ this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, next_tlb_gen); - write_cr3(__sme_pa(next->pgd) | prev_asid); + write_cr3(build_cr3(next, prev_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } @@ -172,7 +191,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * mapped in the new pgd, we'll double-fault. Forcibly * map it. */ - unsigned int index = pgd_index(current_stack_pointer()); + unsigned int index = pgd_index(current_stack_pointer); pgd_t *pgd = next->pgd + index; if (unlikely(pgd_none(*pgd))) @@ -196,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(__sme_pa(next->pgd) | new_asid); + write_cr3(build_cr3(next, new_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); + write_cr3(build_cr3_noflush(next, new_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } @@ -213,6 +232,50 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, switch_ldt(real_prev, next); } +/* + * Call this when reinitializing a CPU. It fixes the following potential + * problems: + * + * - The ASID changed from what cpu_tlbstate thinks it is (most likely + * because the CPU was taken down and came back up with CR3's PCID + * bits clear. CPU hotplug can do this. + * + * - The TLB contains junk in slots corresponding to inactive ASIDs. + * + * - The CPU went so far out to lunch that it may have missed a TLB + * flush. + */ +void initialize_tlbstate_and_flush(void) +{ + int i; + struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); + u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); + unsigned long cr3 = __read_cr3(); + + /* Assert that CR3 already references the right mm. */ + WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); + + /* + * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization + * doesn't work like other CR4 bits because it can only be set from + * long mode.) + */ + WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && + !(cr4_read_shadow() & X86_CR4_PCIDE)); + + /* Force ASID 0 and force a TLB flush. */ + write_cr3(build_cr3(mm, 0)); + + /* Reinitialize tlbstate. */ + this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); + this_cpu_write(cpu_tlbstate.next_asid, 1); + this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); + this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); + + for (i = 1; i < TLB_NR_DYN_ASIDS; i++) + this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); +} + /* * flush_tlb_func_common()'s memory ordering requirement is that any * TLB fills that happen after we flush the TLB are ordered after we diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e1324f280e06..0554e8aef4d5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 +#define X86_JL 0x7C #define X86_JGE 0x7D +#define X86_JLE 0x7E #define X86_JG 0x7F static void bpf_flush_icache(void *start, void *end) @@ -282,10 +284,10 @@ static void emit_bpf_tail_call(u8 **pprog) /* if (index >= array->map.max_entries) * goto out; */ - EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ + EMIT2(0x89, 0xD2); /* mov edx, edx */ + EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); - EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ -#define OFFSET1 47 /* number of bytes to jump */ +#define OFFSET1 43 /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -294,21 +296,20 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 36 +#define OFFSET2 32 EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */ /* prog = array->ptrs[index]; */ - EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ + EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ offsetof(struct bpf_array, ptrs)); - EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ /* if (prog == NULL) * goto out; */ - EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */ + EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ #define OFFSET3 10 EMIT2(X86_JE, OFFSET3); /* je out */ label3 = cnt; @@ -888,9 +889,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, add_2reg(0xC0, dst_reg, src_reg)); @@ -911,9 +916,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ EMIT1(add_1mod(0x48, dst_reg)); @@ -935,18 +944,34 @@ xadd: if (is_imm8(insn->off)) /* GT is unsigned '>', JA in x86 */ jmp_cond = X86_JA; break; + case BPF_JLT: + /* LT is unsigned '<', JB in x86 */ + jmp_cond = X86_JB; + break; case BPF_JGE: /* GE is unsigned '>=', JAE in x86 */ jmp_cond = X86_JAE; break; + case BPF_JLE: + /* LE is unsigned '<=', JBE in x86 */ + jmp_cond = X86_JBE; + break; case BPF_JSGT: /* signed '>', GT in x86 */ jmp_cond = X86_JG; break; + case BPF_JSLT: + /* signed '<', LT in x86 */ + jmp_cond = X86_JL; + break; case BPF_JSGE: /* signed '>=', GE in x86 */ jmp_cond = X86_JGE; break; + case BPF_JSLE: + /* signed '<=', LE in x86 */ + jmp_cond = X86_JLE; + break; default: /* to silence gcc warning */ return -EFAULT; } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 11e407489db0..f2228b150faa 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -618,3 +618,20 @@ static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); + +/* + * VMD-enabled root ports will change the source ID for all messages + * to the VMD device. Rather than doing device matching with the source + * ID, the AER driver should traverse the child device tree, reading + * AER registers to find the faulting device. + */ +static void quirk_no_aersid(struct pci_dev *pdev) +{ + /* VMD Domain */ + if (is_vmd(pdev->bus)) + pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 5a18aedcb341..b901ece278dd 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -215,16 +215,23 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) struct irq_alloc_info info; int polarity; int ret; + u8 gsi; if (dev->irq_managed && dev->irq > 0) return 0; + ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); + if (ret < 0) { + dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); + return ret; + } + switch (intel_mid_identify_cpu()) { case INTEL_MID_CPU_CHIP_TANGIER: polarity = IOAPIC_POL_HIGH; /* Special treatment for IRQ0 */ - if (dev->irq == 0) { + if (gsi == 0) { /* * Skip HS UART common registers device since it has * IRQ0 assigned and not used by the kernel. @@ -253,10 +260,11 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to * IOAPIC RTE entries, so we just enable RTE for the device. */ - ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info); + ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); if (ret < 0) return ret; + dev->irq = ret; dev->irq_managed = 1; return 0; diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9bd115484745..0f5f60b14f48 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1092,7 +1092,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pciirq_dmi_table[] = { +static const struct dmi_system_id pciirq_dmi_table[] __initconst = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 6217b23e85f6..928b6dceeca0 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -1032,25 +1032,6 @@ void __init efi_enter_virtual_mode(void) efi_dump_pagetable(); } -/* - * Convenience functions to obtain memory types and attributes - */ -int efi_mem_type(unsigned long phys_addr) -{ - efi_memory_desc_t *md; - - if (!efi_enabled(EFI_MEMMAP)) - return -ENOTSUPP; - - for_each_efi_memory_desc(md) { - if ((md->phys_addr <= phys_addr) && - (phys_addr < (md->phys_addr + - (md->num_pages << EFI_PAGE_SHIFT)))) - return md->type; - } - return -EINVAL; -} - static int __init arch_parse_efi_cmdline(char *str) { if (!str) { diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index 5a0483e7bf66..dc036e511f48 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c @@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) return 0; } -static struct bt_sfi_data tng_bt_sfi_data __initdata = { +static const struct bt_sfi_data tng_bt_sfi_data __initdata = { .setup = tng_bt_sfi_setup, }; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 9e304e2ea4f5..4f5fa65a1011 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -30,13 +30,13 @@ static int tangier_probe(struct platform_device *pdev) { struct irq_alloc_info info; struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data; - int gsi, irq; + int gsi = TANGIER_EXT_TIMER0_MSI; + int irq; if (!pdata) return -EINVAL; /* IOAPIC builds identity mapping between GSI and IRQ on MID */ - gsi = pdata->irq; ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0); irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); if (irq < 0) { @@ -44,11 +44,11 @@ static int tangier_probe(struct platform_device *pdev) return irq; } + pdata->irq = irq; return 0; } static struct intel_mid_wdt_pdata tangier_pdata = { - .irq = TANGIER_EXT_TIMER0_MSI, .probe = tangier_probe, }; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c index b1526b95fd43..2905376559f1 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c @@ -11,7 +11,7 @@ */ #include -#include +#include #include /*tc35876x DSI_LVDS bridge chip and panel platform data*/ diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 12a272582cdc..86676cec99a1 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -183,6 +183,7 @@ void __init x86_intel_mid_early_setup(void) x86_init.timers.timer_init = intel_mid_time_init; x86_init.timers.setup_percpu_clockev = x86_init_noop; + x86_init.timers.wallclock_init = intel_mid_rtc_init; x86_init.irqs.pre_vector_init = x86_init_noop; @@ -191,7 +192,6 @@ void __init x86_intel_mid_early_setup(void) x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; - x86_init.timers.wallclock_init = intel_mid_rtc_init; x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; x86_init.pci.init = intel_mid_pci_init; diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c index ef03852ea6e8..49ec5b94c71f 100644 --- a/arch/x86/platform/intel-mid/pwr.c +++ b/arch/x86/platform/intel-mid/pwr.c @@ -444,7 +444,7 @@ static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states) static int pnw_set_initial_state(struct mid_pwr *pwr) { /* On Penwell SRAM must stay powered on */ - const u32 states[] = { + static const u32 states[] = { 0xf00fffff, /* PM_SSC(0) */ 0xffffffff, /* PM_SSC(1) */ 0xffffffff, /* PM_SSC(2) */ @@ -455,7 +455,7 @@ static int pnw_set_initial_state(struct mid_pwr *pwr) static int tng_set_initial_state(struct mid_pwr *pwr) { - const u32 states[] = { + static const u32 states[] = { 0xffffffff, /* PM_SSC(0) */ 0xffffffff, /* PM_SSC(1) */ 0xffffffff, /* PM_SSC(2) */ diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 78459a6d455a..84fcfde53f8f 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -181,6 +181,7 @@ static void fix_processor_context(void) #endif load_TR_desc(); /* This does ltr */ load_mm_ldt(current->active_mm); /* This does lldt */ + initialize_tlbstate_and_flush(); fpu__resume_cpu(); @@ -427,7 +428,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d) return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); } -static struct dmi_system_id msr_save_dmi_table[] = { +static const struct dmi_system_id msr_save_dmi_table[] = { { .callback = msr_initialize_bdw, .ident = "BROADWELL BDX_EP", diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f2598d81cd55..f910c514438f 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -295,7 +295,26 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) return -EOVERFLOW; rdr->jump_address = (unsigned long)restore_registers; rdr->jump_address_phys = __pa_symbol(restore_registers); - rdr->cr3 = restore_cr3; + + /* + * The restore code fixes up CR3 and CR4 in the following sequence: + * + * [in hibernation asm] + * 1. CR3 <= temporary page tables + * 2. CR4 <= mmu_cr4_features (from the kernel that restores us) + * 3. CR3 <= rdr->cr3 + * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel) + * [in restore_processor_state()] + * 5. CR4 <= saved CR4 + * 6. CR3 <= saved CR3 + * + * Our mmu_cr4_features has CR4.PCIDE=0, and toggling + * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so + * rdr->cr3 needs to point to valid page tables but must not + * have any of the PCID bits set. + */ + rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK; + rdr->magic = RESTORE_MAGIC; hibernation_e820_save(rdr->e820_digest); diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c index 28775f55bde2..3c423dfcd78b 100644 --- a/arch/x86/um/os-Linux/registers.c +++ b/arch/x86/um/os-Linux/registers.c @@ -5,6 +5,7 @@ */ #include +#include #include #ifdef __i386__ #include @@ -31,7 +32,7 @@ int save_fp_registers(int pid, unsigned long *fp_regs) if (have_xstate_support) { iov.iov_base = fp_regs; - iov.iov_len = sizeof(struct _xstate); + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0) return -errno; return 0; @@ -51,10 +52,9 @@ int restore_fp_registers(int pid, unsigned long *fp_regs) { #ifdef PTRACE_SETREGSET struct iovec iov; - if (have_xstate_support) { iov.iov_base = fp_regs; - iov.iov_len = sizeof(struct _xstate); + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0) return -errno; return 0; @@ -125,13 +125,19 @@ int put_fp_registers(int pid, unsigned long *regs) void arch_init_registers(int pid) { #ifdef PTRACE_GETREGSET - struct _xstate fp_regs; + void * fp_regs; struct iovec iov; - iov.iov_base = &fp_regs; - iov.iov_len = sizeof(struct _xstate); + fp_regs = malloc(FP_SIZE * sizeof(unsigned long)); + if(fp_regs == NULL) + return; + + iov.iov_base = fp_regs; + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0) have_xstate_support = 1; + + free(fp_regs); #endif } #endif diff --git a/arch/x86/um/os-Linux/tls.c b/arch/x86/um/os-Linux/tls.c index 9d94b3b76c74..ed8ea90967dc 100644 --- a/arch/x86/um/os-Linux/tls.c +++ b/arch/x86/um/os-Linux/tls.c @@ -37,7 +37,7 @@ void check_host_supports_tls(int *supports_tls, int *tls_min) continue; else if (errno == ENOSYS) *supports_tls = 0; - return; + return; } } diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index 02250b2633b8..3099c209546f 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c @@ -51,7 +51,7 @@ void foo(void) DEFINE(HOST_ORIG_AX, ORIG_EAX); #else #ifdef FP_XSTATE_MAGIC1 - DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long)); + DEFINE_LONGS(HOST_FP_SIZE, 2696); #else DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); #endif diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index ae2a2e2d6362..69b9deff7e5c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1038,7 +1038,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_cr0 = xen_read_cr0, .write_cr0 = xen_write_cr0, - .read_cr4 = native_read_cr4, .write_cr4 = xen_write_cr4, #ifdef CONFIG_X86_64 @@ -1073,7 +1072,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .alloc_ldt = xen_alloc_ldt, .free_ldt = xen_free_ldt, - .store_idt = native_store_idt, .store_tr = xen_store_tr, .write_ldt_entry = xen_write_ldt_entry, diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3be06f3caf3c..3e15345abfe7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, else rmd->mfn++; - rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index e437714750f8..71495f1a86d7 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr) return PagePinned(page); } -void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) -{ - struct multicall_space mcs; - struct mmu_update *u; - - trace_xen_mmu_set_domain_pte(ptep, pteval, domid); - - mcs = xen_mc_entry(sizeof(*u)); - u = mcs.args; - - /* ptep might be kmapped when using 32-bit HIGHPTE */ - u->ptr = virt_to_machine(ptep).maddr; - u->val = pte_val_ma(pteval); - - MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); - - xen_mc_issue(PARAVIRT_LAZY_MMU); -} -EXPORT_SYMBOL_GPL(xen_set_domain_pte); - static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; @@ -1258,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void) * from _brk_limit way up to the max_pfn_mapped (which is the end of * the ramdisk). We continue on, erasing PMD entries that point to page * tables - do note that they are accessible at this stage via __va. - * For good measure we also round up to the PMD - which means that if + * As Xen is aligning the memory end to a 4MB boundary, for good + * measure we also round up to PMD_SIZE * 2 - which means that if * anybody is using __ka address to the initial boot-stack - and try * to use it - they are going to crash. The xen_start_info has been * taken care of already in xen_setup_kernel_pagetable. */ addr = xen_start_info->pt_base; - size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); + size = xen_start_info->nr_pt_frames * PAGE_SIZE; - xen_cleanhighmap(addr, addr + size); + xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2)); xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); -#ifdef DEBUG - /* This is superfluous and is not necessary, but you know what - * lets do it. The MODULES_VADDR -> MODULES_END should be clear of - * anything at this stage. */ - xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); -#endif } #endif @@ -2240,7 +2215,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) * not the first page table in the page table pool. * Iterate through the initial page tables to find the real page table base. */ -static phys_addr_t xen_find_pt_base(pmd_t *pmd) +static phys_addr_t __init xen_find_pt_base(pmd_t *pmd) { phys_addr_t pt_base, paddr; unsigned pmdidx; @@ -2429,8 +2404,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_single = xen_flush_tlb_single, .flush_tlb_others = xen_flush_tlb_others, - .pte_update = paravirt_nop, - .pgd_alloc = xen_pgd_alloc, .pgd_free = xen_pgd_free, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 276da636dd39..6083ba462f35 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void) unsigned int level, topidx, mididx; unsigned long *mid_mfn_p; - if (xen_feature(XENFEAT_auto_translated_physmap) || - xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) + if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) return; /* Pre-initialize p2m_top_mfn to be completely missing */ @@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void) void xen_setup_mfn_list_list(void) { - if (xen_feature(XENFEAT_auto_translated_physmap)) - return; - BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) @@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long pfn; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return; - xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); @@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn) unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); unsigned long p2m_pfn; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return 0; - ptep = lookup_address(addr, &level); BUG_ON(!ptep || level != PG_LEVEL_4K); pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); @@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, if (unlikely(pfn_s >= xen_p2m_size)) return 0; - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return pfn_e - pfn_s; - if (pfn_s > pfn_e) return 0; @@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) pte_t *ptep; unsigned int level; - /* don't track P2M changes in autotranslate guests */ - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) - return true; - if (unlikely(pfn >= xen_p2m_size)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; @@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i, ret = 0; pte_t *pte; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return 0; - if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); @@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, { int i, ret = 0; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return 0; - for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c81046323ebc..ac55c02f98e9 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk( WARN_ON(size == 0); - BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); - mfn_save = virt_to_mfn(buf); for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; @@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void) void __init xen_arch_setup(void) { xen_panic_handler_init(); - if (!xen_feature(XENFEAT_auto_translated_physmap)) - xen_pvmmu_arch_setup(); + xen_pvmmu_arch_setup(); #ifdef CONFIG_ACPI if (!(xen_start_info->flags & SIF_INITDOMAIN)) { diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 30ee8c608853..5b0027d4ecc0 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -208,11 +208,6 @@ struct mm_struct; /* Free all resources held by a thread. */ #define release_thread(thread) do { } while(0) -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while(0) -#define release_segments(mm) do { } while(0) -#define forget_segments() do { } while (0) - extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 24365b30aae9..b15b278aa314 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -103,20 +103,12 @@ overrides the coredump filter bits */ #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + /* compatibility flags */ #define MAP_FILE 0 -/* - * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. - * This gives us 6 bits, which is enough until someone invents 128 bit address - * spaces. - * - * Assume these are all power of twos. - * When 0 use the default page size. - */ -#define MAP_HUGE_SHIFT 26 -#define MAP_HUGE_MASK 0x3f - #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 3eed2761c149..220059999e74 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -113,4 +113,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _XTENSA_SOCKET_H */ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 78b2e0db4fb2..ceefb9a706d6 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -206,7 +206,7 @@ static void bfqg_get(struct bfq_group *bfqg) bfqg->ref++; } -void bfqg_put(struct bfq_group *bfqg) +static void bfqg_put(struct bfq_group *bfqg) { bfqg->ref--; @@ -385,7 +385,7 @@ static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); } -struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) +static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) { struct bfq_group_data *bgd; @@ -395,7 +395,7 @@ struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) return &bgd->pd; } -void bfq_cpd_init(struct blkcg_policy_data *cpd) +static void bfq_cpd_init(struct blkcg_policy_data *cpd) { struct bfq_group_data *d = cpd_to_bfqgd(cpd); @@ -403,12 +403,12 @@ void bfq_cpd_init(struct blkcg_policy_data *cpd) CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; } -void bfq_cpd_free(struct blkcg_policy_data *cpd) +static void bfq_cpd_free(struct blkcg_policy_data *cpd) { kfree(cpd_to_bfqgd(cpd)); } -struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) +static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) { struct bfq_group *bfqg; @@ -426,7 +426,7 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) return &bfqg->pd; } -void bfq_pd_init(struct blkg_policy_data *pd) +static void bfq_pd_init(struct blkg_policy_data *pd) { struct blkcg_gq *blkg = pd_to_blkg(pd); struct bfq_group *bfqg = blkg_to_bfqg(blkg); @@ -445,7 +445,7 @@ void bfq_pd_init(struct blkg_policy_data *pd) bfqg->rq_pos_tree = RB_ROOT; } -void bfq_pd_free(struct blkg_policy_data *pd) +static void bfq_pd_free(struct blkg_policy_data *pd) { struct bfq_group *bfqg = pd_to_bfqg(pd); @@ -453,7 +453,7 @@ void bfq_pd_free(struct blkg_policy_data *pd) bfqg_put(bfqg); } -void bfq_pd_reset_stats(struct blkg_policy_data *pd) +static void bfq_pd_reset_stats(struct blkg_policy_data *pd) { struct bfq_group *bfqg = pd_to_bfqg(pd); @@ -740,7 +740,7 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, * blkio already grabs the queue_lock for us, so no need to use * RCU-based magic */ -void bfq_pd_offline(struct blkg_policy_data *pd) +static void bfq_pd_offline(struct blkg_policy_data *pd) { struct bfq_service_tree *st; struct bfq_group *bfqg = pd_to_bfqg(pd); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 436b6ca6b175..a4783da90ba8 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -128,7 +128,7 @@ BFQ_BFQQ_FNS(busy); BFQ_BFQQ_FNS(wait_request); BFQ_BFQQ_FNS(non_blocking_wait_rq); BFQ_BFQQ_FNS(fifo_expire); -BFQ_BFQQ_FNS(idle_window); +BFQ_BFQQ_FNS(has_short_ttime); BFQ_BFQQ_FNS(sync); BFQ_BFQQ_FNS(IO_bound); BFQ_BFQQ_FNS(in_large_burst); @@ -239,7 +239,7 @@ static int T_slow[2]; static int T_fast[2]; static int device_speed_thresh[2]; -#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) +#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) @@ -720,7 +720,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, entity->budget = new_budget; bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget); - bfq_requeue_bfqq(bfqd, bfqq); + bfq_requeue_bfqq(bfqd, bfqq, false); } } @@ -731,10 +731,10 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, unsigned int old_wr_coeff = bfqq->wr_coeff; bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); - if (bic->saved_idle_window) - bfq_mark_bfqq_idle_window(bfqq); + if (bic->saved_has_short_ttime) + bfq_mark_bfqq_has_short_ttime(bfqq); else - bfq_clear_bfqq_idle_window(bfqq); + bfq_clear_bfqq_has_short_ttime(bfqq); if (bic->saved_IO_bound) bfq_mark_bfqq_IO_bound(bfqq); @@ -2012,7 +2012,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) return; bic->saved_ttime = bfqq->ttime; - bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); + bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); @@ -2563,7 +2563,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_del_bfqq_busy(bfqd, bfqq, true); } else { - bfq_requeue_bfqq(bfqd, bfqq); + bfq_requeue_bfqq(bfqd, bfqq, true); /* * Resort priority tree of potential close cooperators. */ @@ -3038,8 +3038,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, } bfq_log_bfqq(bfqd, bfqq, - "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, - slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); + "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, + slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); /* * Increase, decrease or leave budget unchanged according to @@ -3114,35 +3114,56 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) { struct bfq_data *bfqd = bfqq->bfqd; - bool idling_boosts_thr, idling_boosts_thr_without_issues, + bool rot_without_queueing = + !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, + bfqq_sequential_and_IO_bound, + idling_boosts_thr, idling_boosts_thr_without_issues, idling_needed_for_service_guarantees, asymmetric_scenario; if (bfqd->strict_guarantees) return true; + /* + * Idling is performed only if slice_idle > 0. In addition, we + * do not idle if + * (a) bfqq is async + * (b) bfqq is in the idle io prio class: in this case we do + * not idle because we want to minimize the bandwidth that + * queues in this class can steal to higher-priority queues + */ + if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || + bfq_class_idle(bfqq)) + return false; + + bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && + bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); + /* * The next variable takes into account the cases where idling * boosts the throughput. * * The value of the variable is computed considering, first, that * idling is virtually always beneficial for the throughput if: - * (a) the device is not NCQ-capable, or - * (b) regardless of the presence of NCQ, the device is rotational - * and the request pattern for bfqq is I/O-bound and sequential. + * (a) the device is not NCQ-capable and rotational, or + * (b) regardless of the presence of NCQ, the device is rotational and + * the request pattern for bfqq is I/O-bound and sequential, or + * (c) regardless of whether it is rotational, the device is + * not NCQ-capable and the request pattern for bfqq is + * I/O-bound and sequential. * * Secondly, and in contrast to the above item (b), idling an * NCQ-capable flash-based device would not boost the * throughput even with sequential I/O; rather it would lower * the throughput in proportion to how fast the device * is. Accordingly, the next variable is true if any of the - * above conditions (a) and (b) is true, and, in particular, - * happens to be false if bfqd is an NCQ-capable flash-based - * device. + * above conditions (a), (b) or (c) is true, and, in + * particular, happens to be false if bfqd is an NCQ-capable + * flash-based device. */ - idling_boosts_thr = !bfqd->hw_tag || - (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && - bfq_bfqq_idle_window(bfqq)); + idling_boosts_thr = rot_without_queueing || + ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && + bfqq_sequential_and_IO_bound); /* * The value of the next variable, @@ -3313,16 +3334,13 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); /* - * We have now all the components we need to compute the return - * value of the function, which is true only if both the following - * conditions hold: - * 1) bfqq is sync, because idling make sense only for sync queues; - * 2) idling either boosts the throughput (without issues), or - * is necessary to preserve service guarantees. + * We have now all the components we need to compute the + * return value of the function, which is true only if idling + * either boosts the throughput (without issues), or is + * necessary to preserve service guarantees. */ - return bfq_bfqq_sync(bfqq) && - (idling_boosts_thr_without_issues || - idling_needed_for_service_guarantees); + return idling_boosts_thr_without_issues || + idling_needed_for_service_guarantees; } /* @@ -3338,10 +3356,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) */ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) { - struct bfq_data *bfqd = bfqq->bfqd; - - return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && - bfq_bfqq_may_idle(bfqq); + return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); } /* @@ -3765,6 +3780,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) default: dev_err(bfqq->bfqd->queue->backing_dev_info->dev, "bfq: bad prio class %d\n", ioprio_class); + /* fall through */ case IOPRIO_CLASS_NONE: /* * No prio set, inherit CPU scheduling settings. @@ -3783,7 +3799,6 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) case IOPRIO_CLASS_IDLE: bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; bfqq->new_ioprio = 7; - bfq_clear_bfqq_idle_window(bfqq); break; } @@ -3843,8 +3858,14 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_set_next_ioprio_data(bfqq, bic); if (is_sync) { + /* + * No need to mark as has_short_ttime if in + * idle_class, because no device idling is performed + * for queues in idle class + */ if (!bfq_class_idle(bfqq)) - bfq_mark_bfqq_idle_window(bfqq); + /* tentatively mark as has_short_ttime */ + bfq_mark_bfqq_has_short_ttime(bfqq); bfq_mark_bfqq_sync(bfqq); bfq_mark_bfqq_just_created(bfqq); } else @@ -3985,18 +4006,19 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); } -/* - * Disable idle window if the process thinks too long or seeks so much that - * it doesn't matter. - */ -static void bfq_update_idle_window(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - struct bfq_io_cq *bic) +static void bfq_update_has_short_ttime(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct bfq_io_cq *bic) { - int enable_idle; + bool has_short_ttime = true; - /* Don't idle for async or idle io prio class. */ - if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) + /* + * No need to update has_short_ttime if bfqq is async or in + * idle io prio class, or if bfq_slice_idle is zero, because + * no device idling is performed for bfqq in this case. + */ + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || + bfqd->bfq_slice_idle == 0) return; /* Idle window just restored, statistics are meaningless. */ @@ -4004,27 +4026,22 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, bfqd->bfq_wr_min_idle_time)) return; - enable_idle = bfq_bfqq_idle_window(bfqq); - + /* Think time is infinite if no process is linked to + * bfqq. Otherwise check average think time to + * decide whether to mark as has_short_ttime + */ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || - bfqd->bfq_slice_idle == 0 || - (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && - bfqq->wr_coeff == 1)) - enable_idle = 0; - else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) { - if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle && - bfqq->wr_coeff == 1) - enable_idle = 0; - else - enable_idle = 1; - } - bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", - enable_idle); + (bfq_sample_valid(bfqq->ttime.ttime_samples) && + bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) + has_short_ttime = false; - if (enable_idle) - bfq_mark_bfqq_idle_window(bfqq); + bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", + has_short_ttime); + + if (has_short_ttime) + bfq_mark_bfqq_has_short_ttime(bfqq); else - bfq_clear_bfqq_idle_window(bfqq); + bfq_clear_bfqq_has_short_ttime(bfqq); } /* @@ -4040,14 +4057,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqq->meta_pending++; bfq_update_io_thinktime(bfqd, bfqq); + bfq_update_has_short_ttime(bfqd, bfqq, bic); bfq_update_io_seektime(bfqd, bfqq, rq); - if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || - !BFQQ_SEEKY(bfqq)) - bfq_update_idle_window(bfqd, bfqq, bic); bfq_log_bfqq(bfqd, bfqq, - "rq_enqueued: idle_window=%d (seeky %d)", - bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq)); + "rq_enqueued: has_short_ttime=%d (seeky %d)", + bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); @@ -4787,16 +4802,15 @@ static ssize_t bfq_var_show(unsigned int var, char *page) return sprintf(page, "%u\n", var); } -static ssize_t bfq_var_store(unsigned long *var, const char *page, - size_t count) +static int bfq_var_store(unsigned long *var, const char *page) { unsigned long new_val; int ret = kstrtoul(page, 10, &new_val); - if (ret == 0) - *var = new_val; - - return count; + if (ret) + return ret; + *var = new_val; + return 0; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ @@ -4837,19 +4851,23 @@ static ssize_t \ __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct bfq_data *bfqd = e->elevator_data; \ - unsigned long uninitialized_var(__data); \ - int ret = bfq_var_store(&__data, (page), count); \ - if (__data < (MIN)) \ - __data = (MIN); \ - else if (__data > (MAX)) \ - __data = (MAX); \ + unsigned long __data, __min = (MIN), __max = (MAX); \ + int ret; \ + \ + ret = bfq_var_store(&__data, (page)); \ + if (ret) \ + return ret; \ + if (__data < __min) \ + __data = __min; \ + else if (__data > __max) \ + __data = __max; \ if (__CONV == 1) \ *(__PTR) = msecs_to_jiffies(__data); \ else if (__CONV == 2) \ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ else \ *(__PTR) = __data; \ - return ret; \ + return count; \ } STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, INT_MAX, 2); @@ -4865,14 +4883,18 @@ STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ { \ struct bfq_data *bfqd = e->elevator_data; \ - unsigned long uninitialized_var(__data); \ - int ret = bfq_var_store(&__data, (page), count); \ - if (__data < (MIN)) \ - __data = (MIN); \ - else if (__data > (MAX)) \ - __data = (MAX); \ + unsigned long __data, __min = (MIN), __max = (MAX); \ + int ret; \ + \ + ret = bfq_var_store(&__data, (page)); \ + if (ret) \ + return ret; \ + if (__data < __min) \ + __data = __min; \ + else if (__data > __max) \ + __data = __max; \ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ - return ret; \ + return count; \ } USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, UINT_MAX); @@ -4882,8 +4904,12 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, const char *page, size_t count) { struct bfq_data *bfqd = e->elevator_data; - unsigned long uninitialized_var(__data); - int ret = bfq_var_store(&__data, (page), count); + unsigned long __data; + int ret; + + ret = bfq_var_store(&__data, (page)); + if (ret) + return ret; if (__data == 0) bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); @@ -4895,7 +4921,7 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, bfqd->bfq_user_max_budget = __data; - return ret; + return count; } /* @@ -4906,8 +4932,12 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, const char *page, size_t count) { struct bfq_data *bfqd = e->elevator_data; - unsigned long uninitialized_var(__data); - int ret = bfq_var_store(&__data, (page), count); + unsigned long __data; + int ret; + + ret = bfq_var_store(&__data, (page)); + if (ret) + return ret; if (__data < 1) __data = 1; @@ -4918,15 +4948,19 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, if (bfqd->bfq_user_max_budget == 0) bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); - return ret; + return count; } static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, const char *page, size_t count) { struct bfq_data *bfqd = e->elevator_data; - unsigned long uninitialized_var(__data); - int ret = bfq_var_store(&__data, (page), count); + unsigned long __data; + int ret; + + ret = bfq_var_store(&__data, (page)); + if (ret) + return ret; if (__data > 1) __data = 1; @@ -4936,15 +4970,19 @@ static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, bfqd->strict_guarantees = __data; - return ret; + return count; } static ssize_t bfq_low_latency_store(struct elevator_queue *e, const char *page, size_t count) { struct bfq_data *bfqd = e->elevator_data; - unsigned long uninitialized_var(__data); - int ret = bfq_var_store(&__data, (page), count); + unsigned long __data; + int ret; + + ret = bfq_var_store(&__data, (page)); + if (ret) + return ret; if (__data > 1) __data = 1; @@ -4952,7 +4990,7 @@ static ssize_t bfq_low_latency_store(struct elevator_queue *e, bfq_end_wr(bfqd); bfqd->low_latency = __data; - return ret; + return count; } #define BFQ_ATTR(name) \ @@ -4998,6 +5036,7 @@ static struct elevator_type iosched_bfq_mq = { .elevator_name = "bfq", .elevator_owner = THIS_MODULE, }; +MODULE_ALIAS("bfq-iosched"); static int __init bfq_init(void) { @@ -5048,10 +5087,12 @@ static int __init bfq_init(void) ret = elv_register(&iosched_bfq_mq); if (ret) - goto err_pol_unreg; + goto slab_kill; return 0; +slab_kill: + bfq_slab_kill(); err_pol_unreg: #ifdef CONFIG_BFQ_GROUP_IOSCHED blkcg_policy_unregister(&blkcg_policy_bfq); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 859f0a8c97c8..ac0809c72c98 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -360,11 +360,11 @@ struct bfq_io_cq { uint64_t blkcg_serial_nr; /* the current blkcg serial */ #endif /* - * Snapshot of the idle window before merging; taken to - * remember this value while the queue is merged, so as to be - * able to restore it in case of split. + * Snapshot of the has_short_time flag before merging; taken + * to remember its value while the queue is merged, so as to + * be able to restore it in case of split. */ - bool saved_idle_window; + bool saved_has_short_ttime; /* * Same purpose as the previous two fields for the I/O bound * classification of a queue. @@ -638,7 +638,7 @@ enum bfqq_state_flags { * without idling the device */ BFQQF_fifo_expire, /* FIFO checked in this slice */ - BFQQF_idle_window, /* slice idling enabled */ + BFQQF_has_short_ttime, /* queue has a short think time */ BFQQF_sync, /* synchronous queue */ BFQQF_IO_bound, /* * bfqq has timed-out at least once @@ -667,7 +667,7 @@ BFQ_BFQQ_FNS(busy); BFQ_BFQQ_FNS(wait_request); BFQ_BFQQ_FNS(non_blocking_wait_rq); BFQ_BFQQ_FNS(fifo_expire); -BFQ_BFQQ_FNS(idle_window); +BFQ_BFQQ_FNS(has_short_ttime); BFQ_BFQQ_FNS(sync); BFQ_BFQQ_FNS(IO_bound); BFQ_BFQQ_FNS(in_large_burst); @@ -817,7 +817,6 @@ extern const int bfq_timeout; struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync); void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync); struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic); -void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, struct rb_root *root); @@ -917,7 +916,8 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool ins_into_idle_tree, bool expiration); void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); -void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); +void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bool expiration); void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool expiration); void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq); @@ -929,13 +929,16 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ - bfqq_group(bfqq)->blkg_path, ##args); \ + blk_add_cgroup_trace_msg((bfqd)->queue, \ + bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \ + "bfq%d%c " fmt, (bfqq)->pid, \ + bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \ } while (0) -#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args) +#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ + blk_add_cgroup_trace_msg((bfqd)->queue, \ + bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \ +} while (0) #else /* CONFIG_BFQ_GROUP_IOSCHED */ diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 911aa7431dbe..414ba686a847 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -44,7 +44,8 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity) BFQ_DEFAULT_GRP_CLASS - 1; } -static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + bool expiration); static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); @@ -54,6 +55,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); * @new_entity: if not NULL, pointer to the entity whose activation, * requeueing or repositionig triggered the invocation of * this function. + * @expiration: id true, this function is being invoked after the + * expiration of the in-service entity * * This function is called to update sd->next_in_service, which, in * its turn, may change as a consequence of the insertion or @@ -72,19 +75,20 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); * entity. */ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_entity *new_entity) + struct bfq_entity *new_entity, + bool expiration) { struct bfq_entity *next_in_service = sd->next_in_service; bool parent_sched_may_change = false; + bool change_without_lookup = false; /* * If this update is triggered by the activation, requeueing * or repositiong of an entity that does not coincide with * sd->next_in_service, then a full lookup in the active tree * can be avoided. In fact, it is enough to check whether the - * just-modified entity has a higher priority than - * sd->next_in_service, or, even if it has the same priority - * as sd->next_in_service, is eligible and has a lower virtual + * just-modified entity has the same priority as + * sd->next_in_service, is eligible and has a lower virtual * finish time than sd->next_in_service. If this compound * condition holds, then the new entity becomes the new * next_in_service. Otherwise no change is needed. @@ -96,13 +100,12 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, * set to true, and left as true if * sd->next_in_service is NULL. */ - bool replace_next = true; + change_without_lookup = true; /* * If there is already a next_in_service candidate - * entity, then compare class priorities or timestamps - * to decide whether to replace sd->service_tree with - * new_entity. + * entity, then compare timestamps to decide whether + * to replace sd->service_tree with new_entity. */ if (next_in_service) { unsigned int new_entity_class_idx = @@ -110,32 +113,26 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, struct bfq_service_tree *st = sd->service_tree + new_entity_class_idx; - /* - * For efficiency, evaluate the most likely - * sub-condition first. - */ - replace_next = + change_without_lookup = (new_entity_class_idx == bfq_class_idx(next_in_service) && !bfq_gt(new_entity->start, st->vtime) && bfq_gt(next_in_service->finish, - new_entity->finish)) - || - new_entity_class_idx < - bfq_class_idx(next_in_service); + new_entity->finish)); } - if (replace_next) + if (change_without_lookup) next_in_service = new_entity; - } else /* invoked because of a deactivation: lookup needed */ - next_in_service = bfq_lookup_next_entity(sd); + } - if (next_in_service) { + if (!change_without_lookup) /* lookup needed */ + next_in_service = bfq_lookup_next_entity(sd, expiration); + + if (next_in_service) parent_sched_may_change = !sd->next_in_service || bfq_update_parent_budget(next_in_service); - } sd->next_in_service = next_in_service; @@ -1127,10 +1124,12 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, * @requeue: true if this is a requeue, which implies that bfqq is * being expired; thus ALL its ancestors stop being served and must * therefore be requeued + * @expiration: true if this function is being invoked in the expiration path + * of the in-service queue */ static void bfq_activate_requeue_entity(struct bfq_entity *entity, bool non_blocking_wait_rq, - bool requeue) + bool requeue, bool expiration) { struct bfq_sched_data *sd; @@ -1138,7 +1137,8 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, sd = entity->sched_data; __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); - if (!bfq_update_next_in_service(sd, entity) && !requeue) + if (!bfq_update_next_in_service(sd, entity, expiration) && + !requeue) break; } } @@ -1194,6 +1194,8 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. * @entity: the entity to deactivate. * @ins_into_idle_tree: true if the entity can be put into the idle tree + * @expiration: true if this function is being invoked in the expiration path + * of the in-service queue */ static void bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree, @@ -1222,7 +1224,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, * then, since entity has just been * deactivated, a new one must be found. */ - bfq_update_next_in_service(sd, NULL); + bfq_update_next_in_service(sd, NULL, expiration); if (sd->next_in_service || sd->in_service_entity) { /* @@ -1281,7 +1283,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, __bfq_requeue_entity(entity); sd = entity->sched_data; - if (!bfq_update_next_in_service(sd, entity) && + if (!bfq_update_next_in_service(sd, entity, expiration) && !expiration) /* * next_in_service unchanged or not causing @@ -1416,12 +1418,14 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) /** * bfq_lookup_next_entity - return the first eligible entity in @sd. * @sd: the sched_data. + * @expiration: true if we are on the expiration path of the in-service queue * * This function is invoked when there has been a change in the trees - * for sd, and we need know what is the new next entity after this - * change. + * for sd, and we need to know what is the new next entity to serve + * after this change. */ -static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + bool expiration) { struct bfq_service_tree *st = sd->service_tree; struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); @@ -1448,8 +1452,24 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) * class, unless the idle class needs to be served. */ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { + /* + * If expiration is true, then bfq_lookup_next_entity + * is being invoked as a part of the expiration path + * of the in-service queue. In this case, even if + * sd->in_service_entity is not NULL, + * sd->in_service_entiy at this point is actually not + * in service any more, and, if needed, has already + * been properly queued or requeued into the right + * tree. The reason why sd->in_service_entity is still + * not NULL here, even if expiration is true, is that + * sd->in_service_entiy is reset as a last step in the + * expiration path. So, if expiration is true, tell + * __bfq_lookup_next_entity that there is no + * sd->in_service_entity. + */ entity = __bfq_lookup_next_entity(st + class_idx, - sd->in_service_entity); + sd->in_service_entity && + !expiration); if (entity) break; @@ -1562,7 +1582,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) for_each_entity(entity) { struct bfq_sched_data *sd = entity->sched_data; - if (!bfq_update_next_in_service(sd, NULL)) + if (!bfq_update_next_in_service(sd, NULL, false)) break; } @@ -1610,16 +1630,17 @@ void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) struct bfq_entity *entity = &bfqq->entity; bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), - false); + false, false); bfq_clear_bfqq_non_blocking_wait_rq(bfqq); } -void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bool expiration) { struct bfq_entity *entity = &bfqq->entity; bfq_activate_requeue_entity(entity, false, - bfqq == bfqd->in_service_queue); + bfqq == bfqd->in_service_queue, expiration); } /* diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 9b1ea478577b..5df32907ff3b 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -146,7 +146,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip->bip_vec + bip->bip_vcnt; if (bip->bip_vcnt && - bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), + bvec_gap_to_prev(bio->bi_disk->queue, &bip->bip_vec[bip->bip_vcnt - 1], offset)) return 0; @@ -190,7 +190,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, static blk_status_t bio_integrity_process(struct bio *bio, struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) { - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); struct blk_integrity_iter iter; struct bvec_iter bviter; struct bio_vec bv; @@ -199,7 +199,7 @@ static blk_status_t bio_integrity_process(struct bio *bio, void *prot_buf = page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; + iter.disk_name = bio->bi_disk->disk_name; iter.interval = 1 << bi->interval_exp; iter.seed = proc_iter->bi_sector; iter.prot_buf = prot_buf; @@ -236,8 +236,8 @@ static blk_status_t bio_integrity_process(struct bio *bio, bool bio_integrity_prep(struct bio *bio) { struct bio_integrity_payload *bip; - struct blk_integrity *bi; - struct request_queue *q; + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); + struct request_queue *q = bio->bi_disk->queue; void *buf; unsigned long start, end; unsigned int len, nr_pages; @@ -245,8 +245,9 @@ bool bio_integrity_prep(struct bio *bio) unsigned int intervals; blk_status_t status; - bi = bdev_get_integrity(bio->bi_bdev); - q = bdev_get_queue(bio->bi_bdev); + if (!bi) + return true; + if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) return true; @@ -257,9 +258,6 @@ bool bio_integrity_prep(struct bio *bio) if (bio_integrity(bio)) return true; - if (bi == NULL) - return true; - if (bio_data_dir(bio) == READ) { if (!bi->profile->verify_fn || !(bi->flags & BLK_INTEGRITY_VERIFY)) @@ -354,7 +352,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) struct bio_integrity_payload *bip = container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); struct bvec_iter iter = bio->bi_iter; /* @@ -387,7 +385,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) */ bool __bio_integrity_endio(struct bio *bio) { - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); struct bio_integrity_payload *bip = bio_integrity(bio); if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && @@ -413,7 +411,7 @@ bool __bio_integrity_endio(struct bio *bio) void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) { struct bio_integrity_payload *bip = bio_integrity(bio); - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); bip->bip_iter.bi_sector += bytes_done >> 9; @@ -430,7 +428,7 @@ EXPORT_SYMBOL(bio_integrity_advance); void bio_integrity_trim(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); } diff --git a/block/bio.c b/block/bio.c index 9a63597aaacc..101c2a9b5481 100644 --- a/block/bio.c +++ b/block/bio.c @@ -593,10 +593,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); /* - * most users will be overriding ->bi_bdev with a new target, + * most users will be overriding ->bi_disk with a new target, * so we don't set nor calculate new physical/hw segment counts here */ - bio->bi_bdev = bio_src->bi_bdev; + bio->bi_disk = bio_src->bi_disk; bio_set_flag(bio, BIO_CLONED); bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; @@ -681,7 +681,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); if (!bio) return NULL; - bio->bi_bdev = bio_src->bi_bdev; + bio->bi_disk = bio_src->bi_disk; bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; @@ -936,6 +936,10 @@ static void submit_bio_wait_endio(struct bio *bio) * * Simple wrapper around submit_bio(). Returns 0 on success, or the error from * bio_endio() on failure. + * + * WARNING: Unlike to how submit_bio() is usually used, this function does not + * result in bio reference to be consumed. The caller must drop the reference + * on his own. */ int submit_bio_wait(struct bio *bio) { @@ -1235,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, */ bmd->is_our_pages = map_data ? 0 : 1; memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); - iov_iter_init(&bmd->iter, iter->type, bmd->iov, - iter->nr_segs, iter->count); + bmd->iter = *iter; + bmd->iter.iov = bmd->iov; ret = -ENOMEM; bio = bio_kmalloc(gfp_mask, nr_pages); @@ -1327,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, int ret, offset; struct iov_iter i; struct iovec iov; + struct bio_vec *bvec; iov_for_each(iov, i, *iter) { unsigned long uaddr = (unsigned long) iov.iov_base; @@ -1371,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, ret = get_user_pages_fast(uaddr, local_nr_pages, (iter->type & WRITE) != WRITE, &pages[cur_page]); - if (ret < local_nr_pages) { + if (unlikely(ret < local_nr_pages)) { + for (j = cur_page; j < page_limit; j++) { + if (!pages[j]) + break; + put_page(pages[j]); + } ret = -EFAULT; goto out_unmap; } @@ -1379,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, offset = offset_in_page(uaddr); for (j = cur_page; j < page_limit; j++) { unsigned int bytes = PAGE_SIZE - offset; + unsigned short prev_bi_vcnt = bio->bi_vcnt; if (len <= 0) break; @@ -1393,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q, bytes) break; + /* + * check if vector was merged with previous + * drop page reference if needed + */ + if (bio->bi_vcnt == prev_bi_vcnt) + put_page(pages[j]); + len -= bytes; offset = 0; } @@ -1419,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, return bio; out_unmap: - for (j = 0; j < nr_pages; j++) { - if (!pages[j]) - break; - put_page(pages[j]); + bio_for_each_segment_all(bvec, bio, j) { + put_page(bvec->bv_page); } out: kfree(pages); @@ -1732,29 +1748,29 @@ void bio_check_pages_dirty(struct bio *bio) } } -void generic_start_io_acct(int rw, unsigned long sectors, - struct hd_struct *part) +void generic_start_io_acct(struct request_queue *q, int rw, + unsigned long sectors, struct hd_struct *part) { int cpu = part_stat_lock(); - part_round_stats(cpu, part); + part_round_stats(q, cpu, part); part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, sectors[rw], sectors); - part_inc_in_flight(part, rw); + part_inc_in_flight(q, part, rw); part_stat_unlock(); } EXPORT_SYMBOL(generic_start_io_acct); -void generic_end_io_acct(int rw, struct hd_struct *part, - unsigned long start_time) +void generic_end_io_acct(struct request_queue *q, int rw, + struct hd_struct *part, unsigned long start_time) { unsigned long duration = jiffies - start_time; int cpu = part_stat_lock(); part_stat_add(cpu, part, ticks[rw], duration); - part_round_stats(cpu, part); - part_dec_in_flight(part, rw); + part_round_stats(q, cpu, part); + part_dec_in_flight(q, part, rw); part_stat_unlock(); } @@ -1826,8 +1842,8 @@ void bio_endio(struct bio *bio) goto again; } - if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { - trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, + if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { + trace_block_bio_complete(bio->bi_disk->queue, bio, blk_status_to_errno(bio->bi_status)); bio_clear_flag(bio, BIO_TRACE_COMPLETION); } @@ -2085,7 +2101,7 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src) if (src->bi_css) WARN_ON(bio_associate_blkcg(dst, src->bi_css)); } - +EXPORT_SYMBOL_GPL(bio_clone_blkcg_association); #endif /* CONFIG_BLK_CGROUP */ static void __init biovec_init_slabs(void) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0480892e97e5..d3f56baee936 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1067,7 +1067,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); if (!blkcg) { ret = ERR_PTR(-ENOMEM); - goto free_blkcg; + goto unlock; } } @@ -1111,8 +1111,10 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) for (i--; i >= 0; i--) if (blkcg->cpd[i]) blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); -free_blkcg: - kfree(blkcg); + + if (blkcg != &blkcg_root) + kfree(blkcg); +unlock: mutex_unlock(&blkcg_pol_mutex); return ret; } diff --git a/block/blk-core.c b/block/blk-core.c index dbecbf4a64e0..048be4aa6024 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -280,7 +280,7 @@ EXPORT_SYMBOL(blk_start_queue_async); void blk_start_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); - WARN_ON(!irqs_disabled()); + WARN_ON(!in_interrupt() && !irqs_disabled()); WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); @@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); +#ifdef CONFIG_BLK_DEV_IO_TRACE + mutex_init(&q->blk_trace_mutex); +#endif mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); @@ -1469,15 +1472,10 @@ static void add_acct_request(struct request_queue *q, struct request *rq, __elv_add_request(q, rq, where); } -static void part_round_stats_single(int cpu, struct hd_struct *part, - unsigned long now) +static void part_round_stats_single(struct request_queue *q, int cpu, + struct hd_struct *part, unsigned long now, + unsigned int inflight) { - int inflight; - - if (now == part->stamp) - return; - - inflight = part_in_flight(part); if (inflight) { __part_stat_add(cpu, part, time_in_queue, inflight * (now - part->stamp)); @@ -1488,6 +1486,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, /** * part_round_stats() - Round off the performance stats on a struct disk_stats. + * @q: target block queue * @cpu: cpu number for stats access * @part: target partition * @@ -1502,13 +1501,31 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, * /proc/diskstats. This accounts immediately for all queue usage up to * the current jiffies and restarts the counters again. */ -void part_round_stats(int cpu, struct hd_struct *part) +void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) { + struct hd_struct *part2 = NULL; unsigned long now = jiffies; + unsigned int inflight[2]; + int stats = 0; - if (part->partno) - part_round_stats_single(cpu, &part_to_disk(part)->part0, now); - part_round_stats_single(cpu, part, now); + if (part->stamp != now) + stats |= 1; + + if (part->partno) { + part2 = &part_to_disk(part)->part0; + if (part2->stamp != now) + stats |= 2; + } + + if (!stats) + return; + + part_in_flight(q, part, inflight); + + if (stats & 2) + part_round_stats_single(q, cpu, part2, now, inflight[1]); + if (stats & 1) + part_round_stats_single(q, cpu, part, now, inflight[0]); } EXPORT_SYMBOL_GPL(part_round_stats); @@ -1896,40 +1913,15 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } -/* - * If bio->bi_dev is a partition, remap the location - */ -static inline void blk_partition_remap(struct bio *bio) -{ - struct block_device *bdev = bio->bi_bdev; - - /* - * Zone reset does not include bi_size so bio_sectors() is always 0. - * Include a test for the reset op code and perform the remap if needed. - */ - if (bdev != bdev->bd_contains && - (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) { - struct hd_struct *p = bdev->bd_part; - - bio->bi_iter.bi_sector += p->start_sect; - bio->bi_bdev = bdev->bd_contains; - - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, - bdev->bd_dev, - bio->bi_iter.bi_sector - p->start_sect); - } -} - static void handle_bad_sector(struct bio *bio) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", - bdevname(bio->bi_bdev, b), - bio->bi_opf, + bio_devname(bio, b), bio->bi_opf, (unsigned long long)bio_end_sector(bio), - (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); + (long long)get_capacity(bio->bi_disk)); } #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -1967,6 +1959,38 @@ static inline bool should_fail_request(struct hd_struct *part, #endif /* CONFIG_FAIL_MAKE_REQUEST */ +/* + * Remap block n of partition p to block n+start(p) of the disk. + */ +static inline int blk_partition_remap(struct bio *bio) +{ + struct hd_struct *p; + int ret = 0; + + /* + * Zone reset does not include bi_size so bio_sectors() is always 0. + * Include a test for the reset op code and perform the remap if needed. + */ + if (!bio->bi_partno || + (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)) + return 0; + + rcu_read_lock(); + p = __disk_get_part(bio->bi_disk, bio->bi_partno); + if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) { + bio->bi_iter.bi_sector += p->start_sect; + bio->bi_partno = 0; + trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), + bio->bi_iter.bi_sector - p->start_sect); + } else { + printk("%s: fail for partition %d\n", __func__, bio->bi_partno); + ret = -EIO; + } + rcu_read_unlock(); + + return ret; +} + /* * Check whether this bio extends beyond the end of the device. */ @@ -1978,7 +2002,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) return 0; /* Test device or partition size, when known. */ - maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; + maxsector = get_capacity(bio->bi_disk); if (maxsector) { sector_t sector = bio->bi_iter.bi_sector; @@ -2003,20 +2027,18 @@ generic_make_request_checks(struct bio *bio) int nr_sectors = bio_sectors(bio); blk_status_t status = BLK_STS_IOERR; char b[BDEVNAME_SIZE]; - struct hd_struct *part; might_sleep(); if (bio_check_eod(bio, nr_sectors)) goto end_io; - q = bdev_get_queue(bio->bi_bdev); + q = bio->bi_disk->queue; if (unlikely(!q)) { printk(KERN_ERR "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", - bdevname(bio->bi_bdev, b), - (long long) bio->bi_iter.bi_sector); + bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); goto end_io; } @@ -2028,17 +2050,11 @@ generic_make_request_checks(struct bio *bio) if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) goto not_supported; - part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_iter.bi_size) || - should_fail_request(&part_to_disk(part)->part0, - bio->bi_iter.bi_size)) + if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) goto end_io; - /* - * If this device has partitions, remap block n - * of partition p to block n+start(p) of the disk. - */ - blk_partition_remap(bio); + if (blk_partition_remap(bio)) + goto end_io; if (bio_check_eod(bio, nr_sectors)) goto end_io; @@ -2067,16 +2083,16 @@ generic_make_request_checks(struct bio *bio) goto not_supported; break; case REQ_OP_WRITE_SAME: - if (!bdev_write_same(bio->bi_bdev)) + if (!q->limits.max_write_same_sectors) goto not_supported; break; case REQ_OP_ZONE_REPORT: case REQ_OP_ZONE_RESET: - if (!bdev_is_zoned(bio->bi_bdev)) + if (!blk_queue_is_zoned(q)) goto not_supported; break; case REQ_OP_WRITE_ZEROES: - if (!bdev_write_zeroes_sectors(bio->bi_bdev)) + if (!q->limits.max_write_zeroes_sectors) goto not_supported; break; default: @@ -2183,7 +2199,7 @@ blk_qc_t generic_make_request(struct bio *bio) bio_list_init(&bio_list_on_stack[0]); current->bio_list = bio_list_on_stack; do { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); + struct request_queue *q = bio->bi_disk->queue; if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { struct bio_list lower, same; @@ -2201,7 +2217,7 @@ blk_qc_t generic_make_request(struct bio *bio) bio_list_init(&lower); bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) - if (q == bdev_get_queue(bio->bi_bdev)) + if (q == bio->bi_disk->queue) bio_list_add(&same, bio); else bio_list_add(&lower, bio); @@ -2244,7 +2260,7 @@ blk_qc_t submit_bio(struct bio *bio) unsigned int count; if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - count = bdev_logical_block_size(bio->bi_bdev) >> 9; + count = queue_logical_block_size(bio->bi_disk->queue); else count = bio_sectors(bio); @@ -2261,8 +2277,7 @@ blk_qc_t submit_bio(struct bio *bio) current->comm, task_pid_nr(current), op_is_write(bio_op(bio)) ? "WRITE" : "READ", (unsigned long long)bio->bi_iter.bi_sector, - bdevname(bio->bi_bdev, b), - count); + bio_devname(bio, b), count); } } @@ -2330,7 +2345,12 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * if (q->mq_ops) { if (blk_queue_io_stat(q)) blk_account_io_start(rq, true); - blk_mq_sched_insert_request(rq, false, true, false, false); + /* + * Since we have a scheduler attached on the top device, + * bypass a potential scheduler on the bottom device for + * insert. + */ + blk_mq_request_bypass_insert(rq); return BLK_STS_OK; } @@ -2431,8 +2451,8 @@ void blk_account_io_done(struct request *req) part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); - part_round_stats(cpu, part); - part_dec_in_flight(part, rw); + part_round_stats(req->q, cpu, part); + part_dec_in_flight(req->q, part, rw); hd_struct_put(part); part_stat_unlock(); @@ -2489,8 +2509,8 @@ void blk_account_io_start(struct request *rq, bool new_io) part = &rq->rq_disk->part0; hd_struct_get(part); } - part_round_stats(cpu, part); - part_inc_in_flight(part, rw); + part_round_stats(rq->q, cpu, part); + part_inc_in_flight(rq->q, part, rw); rq->part = part; } @@ -2603,7 +2623,7 @@ struct request *blk_peek_request(struct request_queue *q) } EXPORT_SYMBOL(blk_peek_request); -void blk_dequeue_request(struct request *rq) +static void blk_dequeue_request(struct request *rq) { struct request_queue *q = rq->q; @@ -2630,9 +2650,6 @@ void blk_dequeue_request(struct request *rq) * Description: * Dequeue @req and start timeout timer on it. This hands off the * request to the driver. - * - * Block internal functions which don't want to start timer should - * call blk_dequeue_request(). */ void blk_start_request(struct request *req) { @@ -3035,8 +3052,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; - if (bio->bi_bdev) - rq->rq_disk = bio->bi_bdev->bd_disk; + if (bio->bi_disk) + rq->rq_disk = bio->bi_disk; } #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE diff --git a/block/blk-flush.c b/block/blk-flush.c index ed5fe322abba..4938bec8cfef 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -1,12 +1,12 @@ /* - * Functions to sequence FLUSH and FUA writes. + * Functions to sequence PREFLUSH and FUA writes. * * Copyright (C) 2011 Max Planck Institute for Gravitational Physics * Copyright (C) 2011 Tejun Heo * * This file is released under the GPLv2. * - * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three + * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * @@ -16,9 +16,9 @@ * REQ_FUA means that the data must be on non-volatile media on request * completion. * - * If the device doesn't have writeback cache, FLUSH and FUA don't make any - * difference. The requests are either completed immediately if there's no - * data or executed as normal requests otherwise. + * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any + * difference. The requests are either completed immediately if there's no data + * or executed as normal requests otherwise. * * If the device has writeback cache and supports FUA, REQ_PREFLUSH is * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. @@ -31,7 +31,7 @@ * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush * completes, all the requests which were pending are proceeded to the next - * step. This allows arbitrary merging of different types of FLUSH/FUA + * step. This allows arbitrary merging of different types of PREFLUSH/FUA * requests. * * Currently, the following conditions are used to determine when to issue @@ -47,19 +47,19 @@ * C3. The second condition is ignored if there is a request which has * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid * starvation in the unlikely case where there are continuous stream of - * FUA (without FLUSH) requests. + * FUA (without PREFLUSH) requests. * * For devices which support FUA, it isn't clear whether C2 (and thus C3) * is beneficial. * - * Note that a sequenced FLUSH/FUA request with DATA is completed twice. + * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. * Once while executing DATA and again after the whole sequence is * complete. The first completion updates the contained bio but doesn't * finish it so that the bio submitter is notified only after the whole * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in * req_bio_endio(). * - * The above peculiarity requires that each FLUSH/FUA request has only one + * The above peculiarity requires that each PREFLUSH/FUA request has only one * bio attached to it, which is guaranteed as they aren't allowed to be * merged in the usual way. */ @@ -76,7 +76,7 @@ #include "blk-mq-tag.h" #include "blk-mq-sched.h" -/* FLUSH/FUA sequences */ +/* PREFLUSH/FUA sequences */ enum { REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ @@ -148,7 +148,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) /** * blk_flush_complete_seq - complete flush sequence - * @rq: FLUSH/FUA request being sequenced + * @rq: PREFLUSH/FUA request being sequenced * @fq: flush queue * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) * @error: whether an error occurred @@ -406,7 +406,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) } /** - * blk_insert_flush - insert a new FLUSH/FUA request + * blk_insert_flush - insert a new PREFLUSH/FUA request * @rq: request to insert * * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. @@ -525,7 +525,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, return -ENXIO; bio = bio_alloc(gfp_mask, 0); - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; ret = submit_bio_wait(bio); diff --git a/block/blk-lib.c b/block/blk-lib.c index 3fe0aec90597..62240f8832ca 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -77,7 +77,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio_set_op_attrs(bio, op, 0); bio->bi_iter.bi_size = req_sects << 9; @@ -168,7 +168,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, while (nr_sects) { bio = next_bio(bio, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; @@ -241,7 +241,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, while (nr_sects) { bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE_ZEROES; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; @@ -269,9 +269,9 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, */ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { - sector_t bytes = (nr_sects << 9) + PAGE_SIZE - 1; + sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); - return min(bytes >> PAGE_SHIFT, (sector_t)BIO_MAX_PAGES); + return min(pages, (sector_t)BIO_MAX_PAGES); } /** @@ -323,7 +323,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); while (nr_sects != 0) { diff --git a/block/blk-merge.c b/block/blk-merge.c index 99038830fb42..aa524cad5bea 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -633,8 +633,8 @@ static void blk_account_io_merge(struct request *req) cpu = part_stat_lock(); part = req->part; - part_round_stats(cpu, part); - part_dec_in_flight(part, rq_data_dir(req)); + part_round_stats(req->q, cpu, part); + part_dec_in_flight(req->q, part, rq_data_dir(req)); hd_struct_put(part); part_stat_unlock(); @@ -786,7 +786,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) return false; /* must be same device and not a special request */ - if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) + if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) return false; /* only merge integrity protected bio into ditto rq */ diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 4f927a58dff8..de294d775acf 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -48,8 +48,6 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags, static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(QUEUED), QUEUE_FLAG_NAME(STOPPED), - QUEUE_FLAG_NAME(SYNCFULL), - QUEUE_FLAG_NAME(ASYNCFULL), QUEUE_FLAG_NAME(DYING), QUEUE_FLAG_NAME(BYPASS), QUEUE_FLAG_NAME(BIDI), @@ -744,7 +742,7 @@ static int blk_mq_debugfs_release(struct inode *inode, struct file *file) return seq_release(inode, file); } -const struct file_operations blk_mq_debugfs_fops = { +static const struct file_operations blk_mq_debugfs_fops = { .open = blk_mq_debugfs_open, .read = seq_read, .write = blk_mq_debugfs_write, @@ -817,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q) goto err; /* - * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir + * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir * didn't exist yet (because we don't know what to name the directory * until the queue is registered to a gendisk). */ + if (q->elevator && !q->sched_debugfs_dir) + blk_mq_debugfs_register_sched(q); + + /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) goto err; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d0be72ccb091..6714507aa6c7 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -214,7 +214,11 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) bitnr += tags->nr_reserved_tags; rq = tags->rqs[bitnr]; - if (rq->q == hctx->queue) + /* + * We can hit rq == NULL here, because the tagging functions + * test and set the bit before assining ->rqs[]. + */ + if (rq && rq->q == hctx->queue) iter_data->fn(hctx, rq, iter_data->data, reserved); return true; } @@ -248,9 +252,15 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) if (!reserved) bitnr += tags->nr_reserved_tags; - rq = tags->rqs[bitnr]; - iter_data->fn(rq, iter_data->data, reserved); + /* + * We can hit rq == NULL here, because the tagging functions + * test and set the bit before assining ->rqs[]. + */ + rq = tags->rqs[bitnr]; + if (rq) + iter_data->fn(rq, iter_data->data, reserved); + return true; } @@ -288,11 +298,12 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); -int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) +int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, + int (reinit_request)(void *, struct request *)) { int i, j, ret = 0; - if (!set->ops->reinit_request) + if (WARN_ON_ONCE(!reinit_request)) goto out; for (i = 0; i < set->nr_hw_queues; i++) { @@ -305,8 +316,8 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) if (!tags->static_rqs[j]) continue; - ret = set->ops->reinit_request(set->driver_data, - tags->static_rqs[j]); + ret = reinit_request(set->driver_data, + tags->static_rqs[j]); if (ret) goto out; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 4603b115e234..98a18609755e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -83,6 +83,41 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); } +struct mq_inflight { + struct hd_struct *part; + unsigned int *inflight; +}; + +static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, + bool reserved) +{ + struct mq_inflight *mi = priv; + + if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) && + !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) { + /* + * index[0] counts the specific partition that was asked + * for. index[1] counts the ones that are active on the + * whole device, so increment that if mi->part is indeed + * a partition, and not a whole device. + */ + if (rq->part == mi->part) + mi->inflight[0]++; + if (mi->part->partno) + mi->inflight[1]++; + } +} + +void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + struct mq_inflight mi = { .part = part, .inflight = inflight, }; + + inflight[0] = inflight[1] = 0; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); +} + void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; @@ -624,11 +659,10 @@ static void blk_mq_requeue_work(struct work_struct *work) container_of(work, struct request_queue, requeue_work.work); LIST_HEAD(rq_list); struct request *rq, *next; - unsigned long flags; - spin_lock_irqsave(&q->requeue_lock, flags); + spin_lock_irq(&q->requeue_lock); list_splice_init(&q->requeue_list, &rq_list); - spin_unlock_irqrestore(&q->requeue_lock, flags); + spin_unlock_irq(&q->requeue_lock); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { if (!(rq->rq_flags & RQF_SOFTBARRIER)) @@ -1102,9 +1136,19 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { int srcu_idx; + /* + * We should be running this queue from one of the CPUs that + * are mapped to it. + */ WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && cpu_online(hctx->next_cpu)); + /* + * We can't run the queue inline with ints disabled. Ensure that + * we catch bad users of this early. + */ + WARN_ON_ONCE(in_interrupt()); + if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { rcu_read_lock(); blk_mq_sched_dispatch_requests(hctx); @@ -1218,7 +1262,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and - * BLK_MQ_RQ_QUEUE_BUSY is usually returned. + * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queue() returns. Please use @@ -1235,7 +1279,7 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queue); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and - * BLK_MQ_RQ_QUEUE_BUSY is usually returned. + * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queues() returns. Please use @@ -1357,6 +1401,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_mq_hctx_mark_pending(hctx, ctx); } +/* + * Should only be used carefully, when the caller knows we want to + * bypass a potential IO scheduler on the target device. + */ +void blk_mq_request_bypass_insert(struct request *rq) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); + + spin_lock(&hctx->lock); + list_add_tail(&rq->queuelist, &hctx->dispatch); + spin_unlock(&hctx->lock); + + blk_mq_run_hw_queue(hctx, false); +} + void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list) diff --git a/block/blk-mq.h b/block/blk-mq.h index 60b01c0309bc..ef15b3414da5 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -54,6 +54,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, */ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); +void blk_mq_request_bypass_insert(struct request *rq); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); @@ -133,4 +134,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) return hctx->nr_ctx && hctx->tags; } +void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); + #endif diff --git a/block/blk-settings.c b/block/blk-settings.c index be1f115b538b..8559e9563c52 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -68,6 +68,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) { + WARN_ON_ONCE(q->mq_ops); q->rq_timed_out_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 27aceab1cc31..b8362c0df51d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -931,7 +931,9 @@ void blk_unregister_queue(struct gendisk *disk) if (WARN_ON(!q)) return; + mutex_lock(&q->sysfs_lock); queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); + mutex_unlock(&q->sysfs_lock); wbt_exit(q); diff --git a/block/blk-tag.c b/block/blk-tag.c index 2290f65b9d73..e1a9c15eb1b8 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -290,7 +290,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) */ clear_bit_unlock(tag, bqt->tag_map); } -EXPORT_SYMBOL(blk_queue_end_tag); /** * blk_queue_start_tag - find a free tag and assign it diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 80f5481fe9f6..17816a028dcb 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -373,10 +373,8 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) if (likely(!blk_trace_note_message_enabled(__td->queue))) \ break; \ if ((__tg)) { \ - char __pbuf[128]; \ - \ - blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ + blk_add_cgroup_trace_msg(__td->queue, \ + tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\ } else { \ blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ } \ @@ -1913,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td) tg->disptime = jiffies - 1; throtl_select_dispatch(sq); - throtl_schedule_next_dispatch(sq, false); + throtl_schedule_next_dispatch(sq, true); } rcu_read_unlock(); throtl_select_dispatch(&td->service_queue); - throtl_schedule_next_dispatch(&td->service_queue, false); + throtl_schedule_next_dispatch(&td->service_queue, true); queue_work(kthrotld_workqueue, &td->dispatch_work); } @@ -2114,14 +2112,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) { #ifdef CONFIG_BLK_DEV_THROTTLING_LOW - int ret; - - ret = bio_associate_current(bio); - if (ret == 0 || ret == -EBUSY) + if (bio->bi_css) bio->bi_cg_private = tg; blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio)); -#else - bio_associate_current(bio); #endif } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 3bd15d8095b1..ff57fb51b338 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -116,7 +116,7 @@ int blkdev_report_zones(struct block_device *bdev, if (!bio) return -ENOMEM; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blk_zone_start(q, sector); bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); @@ -234,7 +234,7 @@ int blkdev_reset_zones(struct block_device *bdev, bio = bio_alloc(gfp_mask, 0); bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); ret = submit_bio_wait(bio); diff --git a/block/blk.h b/block/blk.h index 3a3d715bd725..fcb9775b997d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -64,7 +64,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); void blk_queue_bypass_start(struct request_queue *q); void blk_queue_bypass_end(struct request_queue *q); -void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); void blk_freeze_queue(struct request_queue *q); @@ -204,6 +203,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq e->type->ops.sq.elevator_deactivate_req_fn(q, rq); } +struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); + #ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); diff --git a/block/bsg-lib.c b/block/bsg-lib.c index dd56d7460cb9..15d25ccd51a5 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req) failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: - kfree(job); return -ENOMEM; } @@ -208,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) struct bsg_job *job = blk_mq_rq_to_pdu(req); struct scsi_request *sreq = &job->sreq; - memset(job, 0, sizeof(*job)); + /* called right after the request is allocated for the request_queue */ - scsi_req_init(sreq); - sreq->sense_len = SCSI_SENSE_BUFFERSIZE; - sreq->sense = kzalloc(sreq->sense_len, gfp); + sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); if (!sreq->sense) return -ENOMEM; + return 0; +} + +static void bsg_initialize_rq(struct request *req) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(req); + struct scsi_request *sreq = &job->sreq; + void *sense = sreq->sense; + + /* called right before the request is given to the request_queue user */ + + memset(job, 0, sizeof(*job)); + + scsi_req_init(sreq); + + sreq->sense = sense; + sreq->sense_len = SCSI_SENSE_BUFFERSIZE; + job->req = req; - job->reply = sreq->sense; + job->reply = sense; job->reply_len = sreq->sense_len; job->dd_data = job + 1; - - return 0; } static void bsg_exit_rq(struct request_queue *q, struct request *req) @@ -239,8 +252,9 @@ static void bsg_exit_rq(struct request_queue *q, struct request *req) * @job_fn: bsg job handler * @dd_job_size: size of LLD data needed for each job */ -struct request_queue *bsg_setup_queue(struct device *dev, char *name, - bsg_job_fn *job_fn, int dd_job_size) +struct request_queue *bsg_setup_queue(struct device *dev, const char *name, + bsg_job_fn *job_fn, int dd_job_size, + void (*release)(struct device *)) { struct request_queue *q; int ret; @@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, q->cmd_size = sizeof(struct bsg_job) + dd_job_size; q->init_rq_fn = bsg_init_rq; q->exit_rq_fn = bsg_exit_rq; + q->initialize_rq_fn = bsg_initialize_rq; q->request_fn = bsg_request_fn; ret = blk_init_allocated_queue(q); @@ -264,7 +279,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); - ret = bsg_register_queue(q, dev, name, NULL); + ret = bsg_register_queue(q, dev, name, release); if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue\n", dev->kobj.name); diff --git a/block/bsg.c b/block/bsg.c index 37663b664666..ee1335c68de7 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -932,15 +932,8 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } - /* - * block device ioctls - */ default: -#if 0 - return ioctl_by_bdev(bd->bdev, cmd, arg); -#else return -ENOTTY; -#endif } } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3d5c28945719..9f342ef1ad42 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -93,13 +93,14 @@ struct cfq_ttime { * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { - struct rb_root rb; - struct rb_node *left; + struct rb_root_cached rb; + struct rb_node *rb_rightmost; unsigned count; u64 min_vdisktime; struct cfq_ttime ttime; }; -#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ +#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \ + .rb_rightmost = NULL, \ .ttime = {.last_end_request = ktime_get_ns(),},} /* @@ -656,20 +657,17 @@ static inline void cfqg_put(struct cfq_group *cfqg) } #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ + blk_add_cgroup_trace_msg((cfqd)->queue, \ + cfqg_to_blkg((cfqq)->cfqg)->blkcg, \ + "cfq%d%c%c " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ - __pbuf, ##args); \ + ##args); \ } while (0) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ + blk_add_cgroup_trace_msg((cfqd)->queue, \ + cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \ } while (0) static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, @@ -984,10 +982,9 @@ static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) static void update_min_vdisktime(struct cfq_rb_root *st) { - struct cfq_group *cfqg; + if (!RB_EMPTY_ROOT(&st->rb.rb_root)) { + struct cfq_group *cfqg = rb_entry_cfqg(st->rb.rb_leftmost); - if (st->left) { - cfqg = rb_entry_cfqg(st->left); st->min_vdisktime = max_vdisktime(st->min_vdisktime, cfqg->vdisktime); } @@ -1169,46 +1166,28 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, } } -/* - * The below is leftmost cache rbtree addon - */ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) { /* Service tree is empty */ if (!root->count) return NULL; - if (!root->left) - root->left = rb_first(&root->rb); - - if (root->left) - return rb_entry(root->left, struct cfq_queue, rb_node); - - return NULL; + return rb_entry(rb_first_cached(&root->rb), struct cfq_queue, rb_node); } static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) { - if (!root->left) - root->left = rb_first(&root->rb); - - if (root->left) - return rb_entry_cfqg(root->left); - - return NULL; -} - -static void rb_erase_init(struct rb_node *n, struct rb_root *root) -{ - rb_erase(n, root); - RB_CLEAR_NODE(n); + return rb_entry_cfqg(rb_first_cached(&root->rb)); } static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { - if (root->left == n) - root->left = NULL; - rb_erase_init(n, &root->rb); + if (root->rb_rightmost == n) + root->rb_rightmost = rb_prev(n); + + rb_erase_cached(n, &root->rb); + RB_CLEAR_NODE(n); + --root->count; } @@ -1258,29 +1237,30 @@ cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) static void __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { - struct rb_node **node = &st->rb.rb_node; + struct rb_node **node = &st->rb.rb_root.rb_node; struct rb_node *parent = NULL; struct cfq_group *__cfqg; s64 key = cfqg_key(st, cfqg); - int left = 1; + bool leftmost = true, rightmost = true; while (*node != NULL) { parent = *node; __cfqg = rb_entry_cfqg(parent); - if (key < cfqg_key(st, __cfqg)) + if (key < cfqg_key(st, __cfqg)) { node = &parent->rb_left; - else { + rightmost = false; + } else { node = &parent->rb_right; - left = 0; + leftmost = false; } } - if (left) - st->left = &cfqg->rb_node; + if (rightmost) + st->rb_rightmost = &cfqg->rb_node; rb_link_node(&cfqg->rb_node, parent, node); - rb_insert_color(&cfqg->rb_node, &st->rb); + rb_insert_color_cached(&cfqg->rb_node, &st->rb, leftmost); } /* @@ -1381,7 +1361,7 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) * so that groups get lesser vtime based on their weights, so that * if group does not loose all if it was not continuously backlogged. */ - n = rb_last(&st->rb); + n = st->rb_rightmost; if (n) { __cfqg = rb_entry_cfqg(n); cfqg->vdisktime = __cfqg->vdisktime + @@ -2223,14 +2203,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_queue *__cfqq; u64 rb_key; struct cfq_rb_root *st; - int left; + bool leftmost = true; int new_cfqq = 1; u64 now = ktime_get_ns(); st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; - parent = rb_last(&st->rb); + parent = st->rb_rightmost; if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; @@ -2264,10 +2244,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->service_tree = NULL; } - left = 1; parent = NULL; cfqq->service_tree = st; - p = &st->rb.rb_node; + p = &st->rb.rb_root.rb_node; while (*p) { parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); @@ -2279,16 +2258,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, p = &parent->rb_left; else { p = &parent->rb_right; - left = 0; + leftmost = false; } } - if (left) - st->left = &cfqq->rb_node; - cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); - rb_insert_color(&cfqq->rb_node, &st->rb); + rb_insert_color_cached(&cfqq->rb_node, &st->rb, leftmost); st->count++; if (add_front || !new_cfqq) return; @@ -2735,7 +2711,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) /* There is nothing to dispatch */ if (!st) return NULL; - if (RB_EMPTY_ROOT(&st->rb)) + if (RB_EMPTY_ROOT(&st->rb.rb_root)) return NULL; return cfq_rb_first(st); } @@ -2937,7 +2913,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. */ - if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag && + !cfqd->cfq_group_idle) return; WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); @@ -3221,7 +3198,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *cfqg; - if (RB_EMPTY_ROOT(&st->rb)) + if (RB_EMPTY_ROOT(&st->rb.rb_root)) return NULL; cfqg = cfq_rb_first_group(st); update_min_vdisktime(st); @@ -4714,13 +4691,12 @@ cfq_var_show(unsigned int var, char *page) return sprintf(page, "%u\n", var); } -static ssize_t -cfq_var_store(unsigned int *var, const char *page, size_t count) +static void +cfq_var_store(unsigned int *var, const char *page) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); - return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ @@ -4766,7 +4742,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ - int ret = cfq_var_store(&__data, (page), count); \ + cfq_var_store(&__data, (page)); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ @@ -4775,7 +4751,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ else \ *(__PTR) = __data; \ - return ret; \ + return count; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, @@ -4800,13 +4776,13 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ - int ret = cfq_var_store(&__data, (page), count); \ + cfq_var_store(&__data, (page)); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ - return ret; \ + return count; \ } USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX); USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX); diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c68f6bbc0dcd..b83f77460d28 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -373,13 +373,12 @@ deadline_var_show(int var, char *page) return sprintf(page, "%d\n", var); } -static ssize_t -deadline_var_store(int *var, const char *page, size_t count) +static void +deadline_var_store(int *var, const char *page) { char *p = (char *) page; *var = simple_strtol(p, &p, 10); - return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ @@ -403,7 +402,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) { \ struct deadline_data *dd = e->elevator_data; \ int __data; \ - int ret = deadline_var_store(&__data, (page), count); \ + deadline_var_store(&__data, (page)); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ @@ -412,7 +411,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ - return ret; \ + return count; \ } STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); diff --git a/block/elevator.c b/block/elevator.c index 4bb2f0c93fa6..153926a90901 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -1055,6 +1055,10 @@ static int __elevator_change(struct request_queue *q, const char *name) char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; + /* Make sure queue is not in the middle of being removed */ + if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) + return -ENOENT; + /* * Special case for mq, turn off scheduling */ diff --git a/block/genhd.c b/block/genhd.c index 51c1d407d93c..dd305c65ffb0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -45,6 +45,52 @@ static void disk_add_events(struct gendisk *disk); static void disk_del_events(struct gendisk *disk); static void disk_release_events(struct gendisk *disk); +void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) +{ + if (q->mq_ops) + return; + + atomic_inc(&part->in_flight[rw]); + if (part->partno) + atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); +} + +void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) +{ + if (q->mq_ops) + return; + + atomic_dec(&part->in_flight[rw]); + if (part->partno) + atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); +} + +void part_in_flight(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + if (q->mq_ops) { + blk_mq_in_flight(q, part, inflight); + return; + } + + inflight[0] = atomic_read(&part->in_flight[0]) + + atomic_read(&part->in_flight[1]); + if (part->partno) { + part = &part_to_disk(part)->part0; + inflight[1] = atomic_read(&part->in_flight[0]) + + atomic_read(&part->in_flight[1]); + } +} + +struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) +{ + struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); + + if (unlikely(partno < 0 || partno >= ptbl->len)) + return NULL; + return rcu_dereference(ptbl->part[partno]); +} + /** * disk_get_part - get partition * @disk: disk to look partition from @@ -61,21 +107,12 @@ static void disk_release_events(struct gendisk *disk); */ struct hd_struct *disk_get_part(struct gendisk *disk, int partno) { - struct hd_struct *part = NULL; - struct disk_part_tbl *ptbl; - - if (unlikely(partno < 0)) - return NULL; + struct hd_struct *part; rcu_read_lock(); - - ptbl = rcu_dereference(disk->part_tbl); - if (likely(partno < ptbl->len)) { - part = rcu_dereference(ptbl->part[partno]); - if (part) - get_device(part_to_dev(part)); - } - + part = __disk_get_part(disk, partno); + if (part) + get_device(part_to_dev(part)); rcu_read_unlock(); return part; @@ -1098,12 +1135,13 @@ static const struct attribute_group *disk_attr_groups[] = { * original ptbl is freed using RCU callback. * * LOCKING: - * Matching bd_mutx locked. + * Matching bd_mutex locked or the caller is the only user of @disk. */ static void disk_replace_part_tbl(struct gendisk *disk, struct disk_part_tbl *new_ptbl) { - struct disk_part_tbl *old_ptbl = disk->part_tbl; + struct disk_part_tbl *old_ptbl = + rcu_dereference_protected(disk->part_tbl, 1); rcu_assign_pointer(disk->part_tbl, new_ptbl); @@ -1122,14 +1160,16 @@ static void disk_replace_part_tbl(struct gendisk *disk, * uses RCU to allow unlocked dereferencing for stats and other stuff. * * LOCKING: - * Matching bd_mutex locked, might sleep. + * Matching bd_mutex locked or the caller is the only user of @disk. + * Might sleep. * * RETURNS: * 0 on success, -errno on failure. */ int disk_expand_part_tbl(struct gendisk *disk, int partno) { - struct disk_part_tbl *old_ptbl = disk->part_tbl; + struct disk_part_tbl *old_ptbl = + rcu_dereference_protected(disk->part_tbl, 1); struct disk_part_tbl *new_ptbl; int len = old_ptbl ? old_ptbl->len : 0; int i, target; @@ -1212,6 +1252,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) struct disk_part_iter piter; struct hd_struct *hd; char buf[BDEVNAME_SIZE]; + unsigned int inflight[2]; int cpu; /* @@ -1225,8 +1266,9 @@ static int diskstats_show(struct seq_file *seqf, void *v) disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); while ((hd = disk_part_iter_next(&piter))) { cpu = part_stat_lock(); - part_round_stats(cpu, hd); + part_round_stats(gp->queue, cpu, hd); part_stat_unlock(); + part_in_flight(gp->queue, hd, inflight); seq_printf(seqf, "%4d %7d %s %lu %lu %lu " "%u %lu %lu %lu %u %u %u %u\n", MAJOR(part_devt(hd)), MINOR(part_devt(hd)), @@ -1239,7 +1281,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) part_stat_read(hd, merges[WRITE]), part_stat_read(hd, sectors[WRITE]), jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), - part_in_flight(hd), + inflight[0], jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)) ); @@ -1321,6 +1363,14 @@ EXPORT_SYMBOL(alloc_disk); struct gendisk *alloc_disk_node(int minors, int node_id) { struct gendisk *disk; + struct disk_part_tbl *ptbl; + + if (minors > DISK_MAX_PARTS) { + printk(KERN_ERR + "block: can't allocated more than %d partitions\n", + DISK_MAX_PARTS); + minors = DISK_MAX_PARTS; + } disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); if (disk) { @@ -1334,7 +1384,8 @@ struct gendisk *alloc_disk_node(int minors, int node_id) kfree(disk); return NULL; } - disk->part_tbl->part[0] = &disk->part0; + ptbl = rcu_dereference_protected(disk->part_tbl, 1); + rcu_assign_pointer(ptbl->part[0], &disk->part0); /* * set_capacity() and get_capacity() currently don't use diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 1b964a387afe..a1cad4331edd 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -457,13 +457,12 @@ deadline_var_show(int var, char *page) return sprintf(page, "%d\n", var); } -static ssize_t -deadline_var_store(int *var, const char *page, size_t count) +static void +deadline_var_store(int *var, const char *page) { char *p = (char *) page; *var = simple_strtol(p, &p, 10); - return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ @@ -487,7 +486,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) { \ struct deadline_data *dd = e->elevator_data; \ int __data; \ - int ret = deadline_var_store(&__data, (page), count); \ + deadline_var_store(&__data, (page)); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ @@ -496,7 +495,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ - return ret; \ + return count; \ } STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); @@ -660,6 +659,7 @@ static struct elevator_type mq_deadline = { .elevator_name = "mq-deadline", .elevator_owner = THIS_MODULE, }; +MODULE_ALIAS("mq-deadline-iosched"); static int __init deadline_init(void) { diff --git a/block/opal_proto.h b/block/opal_proto.h index f40c9acf8895..e20be8258854 100644 --- a/block/opal_proto.h +++ b/block/opal_proto.h @@ -46,6 +46,7 @@ enum opal_response_token { #define GENERIC_HOST_SESSION_NUM 0x41 #define TPER_SYNC_SUPPORTED 0x01 +#define MBR_ENABLED_MASK 0x10 #define TINY_ATOM_DATA_MASK 0x3F #define TINY_ATOM_SIGNED 0x40 diff --git a/block/partition-generic.c b/block/partition-generic.c index c5ec8246e25e..88c555db4e5d 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -112,11 +112,14 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); + struct request_queue *q = part_to_disk(p)->queue; + unsigned int inflight[2]; int cpu; cpu = part_stat_lock(); - part_round_stats(cpu, p); + part_round_stats(q, cpu, p); part_stat_unlock(); + part_in_flight(q, p, inflight); return sprintf(buf, "%8lu %8lu %8llu %8u " "%8lu %8lu %8llu %8u " @@ -130,7 +133,7 @@ ssize_t part_stat_show(struct device *dev, part_stat_read(p, merges[WRITE]), (unsigned long long)part_stat_read(p, sectors[WRITE]), jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), - part_in_flight(p), + inflight[0], jiffies_to_msecs(part_stat_read(p, io_ticks)), jiffies_to_msecs(part_stat_read(p, time_in_queue))); } @@ -249,15 +252,20 @@ void __delete_partition(struct percpu_ref *ref) call_rcu(&part->rcu_head, delete_partition_rcu_cb); } +/* + * Must be called either with bd_mutex held, before a disk can be opened or + * after all disk users are gone. + */ void delete_partition(struct gendisk *disk, int partno) { - struct disk_part_tbl *ptbl = disk->part_tbl; + struct disk_part_tbl *ptbl = + rcu_dereference_protected(disk->part_tbl, 1); struct hd_struct *part; if (partno >= ptbl->len) return; - part = ptbl->part[partno]; + part = rcu_dereference_protected(ptbl->part[partno], 1); if (!part) return; @@ -277,6 +285,10 @@ static ssize_t whole_disk_show(struct device *dev, static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, whole_disk_show, NULL); +/* + * Must be called either with bd_mutex held, before a disk can be opened or + * after all disk users are gone. + */ struct hd_struct *add_partition(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags, struct partition_meta_info *info) @@ -292,7 +304,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, err = disk_expand_part_tbl(disk, partno); if (err) return ERR_PTR(err); - ptbl = disk->part_tbl; + ptbl = rcu_dereference_protected(disk->part_tbl, 1); if (ptbl->part[partno]) return ERR_PTR(-EBUSY); @@ -391,7 +403,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, device_del(pdev); out_put: put_device(pdev); - blk_free_devt(devt); return ERR_PTR(err); } diff --git a/block/sed-opal.c b/block/sed-opal.c index 9b30ae5ab843..9ed51d0c6b1d 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -80,6 +80,7 @@ struct parsed_resp { struct opal_dev { bool supported; + bool mbr_enabled; void *data; sec_send_recv *send_recv; @@ -283,6 +284,14 @@ static bool check_tper(const void *data) return true; } +static bool check_mbrenabled(const void *data) +{ + const struct d0_locking_features *lfeat = data; + u8 sup_feat = lfeat->supported_features; + + return !!(sup_feat & MBR_ENABLED_MASK); +} + static bool check_sum(const void *data) { const struct d0_single_user_mode *sum = data; @@ -417,6 +426,7 @@ static int opal_discovery0_end(struct opal_dev *dev) u32 hlen = be32_to_cpu(hdr->length); print_buffer(dev->resp, hlen); + dev->mbr_enabled = false; if (hlen > IO_BUFFER_LENGTH - sizeof(*hdr)) { pr_debug("Discovery length overflows buffer (%zu+%u)/%u\n", @@ -442,6 +452,8 @@ static int opal_discovery0_end(struct opal_dev *dev) check_geometry(dev, body); break; case FC_LOCKING: + dev->mbr_enabled = check_mbrenabled(body->features); + break; case FC_ENTERPRISE: case FC_DATASTORE: /* some ignored properties */ @@ -2190,6 +2202,21 @@ static int __opal_lock_unlock(struct opal_dev *dev, return next(dev); } +static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key) +{ + u8 mbr_done_tf = 1; + const struct opal_step mbrdone_step [] = { + { opal_discovery0, }, + { start_admin1LSP_opal_session, key }, + { set_mbr_done, &mbr_done_tf }, + { end_opal_session, }, + { NULL, } + }; + + dev->steps = mbrdone_step; + return next(dev); +} + static int opal_lock_unlock(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk) { @@ -2345,6 +2372,11 @@ bool opal_unlock_from_suspend(struct opal_dev *dev) suspend->unlk.session.sum); was_failure = true; } + if (dev->mbr_enabled) { + ret = __opal_set_mbr_done(dev, &suspend->unlk.session.opal_key); + if (ret) + pr_debug("Failed to set MBR Done in S3 resume\n"); + } } mutex_unlock(&dev->dev_lock); return was_failure; diff --git a/crypto/Kconfig b/crypto/Kconfig index caa770e535a2..0a121f9ddf8e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1753,6 +1753,8 @@ config CRYPTO_USER_API_AEAD tristate "User-space interface for AEAD cipher algorithms" depends on NET select CRYPTO_AEAD + select CRYPTO_BLKCIPHER + select CRYPTO_NULL select CRYPTO_USER_API help This option enables the user-spaces interface for AEAD diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 92a3d540d920..337cf382718e 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -21,6 +21,7 @@ #include #include #include +#include #include struct alg_type_list { @@ -507,6 +508,696 @@ void af_alg_complete(struct crypto_async_request *req, int err) } EXPORT_SYMBOL_GPL(af_alg_complete); +/** + * af_alg_alloc_tsgl - allocate the TX SGL + * + * @sk socket of connection to user space + * @return: 0 upon success, < 0 upon error + */ +int af_alg_alloc_tsgl(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct scatterlist *sg = NULL; + + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); + if (!list_empty(&ctx->tsgl_list)) + sg = sgl->sg; + + if (!sg || sgl->cur >= MAX_SGL_ENTS) { + sgl = sock_kmalloc(sk, sizeof(*sgl) + + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), + GFP_KERNEL); + if (!sgl) + return -ENOMEM; + + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); + sgl->cur = 0; + + if (sg) + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + + list_add_tail(&sgl->list, &ctx->tsgl_list); + } + + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); + +/** + * aead_count_tsgl - Count number of TX SG entries + * + * The counting starts from the beginning of the SGL to @bytes. If + * an offset is provided, the counting of the SG entries starts at the offset. + * + * @sk socket of connection to user space + * @bytes Count the number of SG entries holding given number of bytes. + * @offset Start the counting of SG entries from the given offset. + * @return Number of TX SG entries found given the constraints + */ +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl, *tmp; + unsigned int i; + unsigned int sgl_count = 0; + + if (!bytes) + return 0; + + list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { + struct scatterlist *sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + size_t bytes_count; + + /* Skip offset */ + if (offset >= sg[i].length) { + offset -= sg[i].length; + bytes -= sg[i].length; + continue; + } + + bytes_count = sg[i].length - offset; + + offset = 0; + sgl_count++; + + /* If we have seen requested number of bytes, stop */ + if (bytes_count >= bytes) + return sgl_count; + + bytes -= bytes_count; + } + } + + return sgl_count; +} +EXPORT_SYMBOL_GPL(af_alg_count_tsgl); + +/** + * aead_pull_tsgl - Release the specified buffers from TX SGL + * + * If @dst is non-null, reassign the pages to dst. The caller must release + * the pages. If @dst_offset is given only reassign the pages to @dst starting + * at the @dst_offset (byte). The caller must ensure that @dst is large + * enough (e.g. by using af_alg_count_tsgl with the same offset). + * + * @sk socket of connection to user space + * @used Number of bytes to pull from TX SGL + * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The + * caller must release the buffers in dst. + * @dst_offset Reassign the TX SGL from given offset. All buffers before + * reaching the offset is released. + */ +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct scatterlist *sg; + unsigned int i, j = 0; + + while (!list_empty(&ctx->tsgl_list)) { + sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, + list); + sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + size_t plen = min_t(size_t, used, sg[i].length); + struct page *page = sg_page(sg + i); + + if (!page) + continue; + + /* + * Assumption: caller created af_alg_count_tsgl(len) + * SG entries in dst. + */ + if (dst) { + if (dst_offset >= plen) { + /* discard page before offset */ + dst_offset -= plen; + } else { + /* reassign page to dst after offset */ + get_page(page); + sg_set_page(dst + j, page, + plen - dst_offset, + sg[i].offset + dst_offset); + dst_offset = 0; + j++; + } + } + + sg[i].length -= plen; + sg[i].offset += plen; + + used -= plen; + ctx->used -= plen; + + if (sg[i].length) + return; + + put_page(page); + sg_assign_page(sg + i, NULL); + } + + list_del(&sgl->list); + sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * + (MAX_SGL_ENTS + 1)); + } + + if (!ctx->used) + ctx->merge = 0; +} +EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); + +/** + * af_alg_free_areq_sgls - Release TX and RX SGLs of the request + * + * @areq Request holding the TX and RX SGL + */ +void af_alg_free_areq_sgls(struct af_alg_async_req *areq) +{ + struct sock *sk = areq->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_rsgl *rsgl, *tmp; + struct scatterlist *tsgl; + struct scatterlist *sg; + unsigned int i; + + list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { + ctx->rcvused -= rsgl->sg_num_bytes; + af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); + if (rsgl != &areq->first_rsgl) + sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + } + + tsgl = areq->tsgl; + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } + + if (areq->tsgl && areq->tsgl_entries) + sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); +} +EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); + +/** + * af_alg_wait_for_wmem - wait for availability of writable memory + * + * @sk socket of connection to user space + * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @return 0 when writable memory is available, < 0 upon error + */ +int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err = -ERESTARTSYS; + long timeout; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + if (signal_pending(current)) + break; + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { + err = 0; + break; + } + } + remove_wait_queue(sk_sleep(sk), &wait); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); + +/** + * af_alg_wmem_wakeup - wakeup caller when writable memory is available + * + * @sk socket of connection to user space + */ +void af_alg_wmem_wakeup(struct sock *sk) +{ + struct socket_wq *wq; + + if (!af_alg_writable(sk)) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); + +/** + * af_alg_wait_for_data - wait for availability of TX data + * + * @sk socket of connection to user space + * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @return 0 when writable memory is available, < 0 upon error + */ +int af_alg_wait_for_data(struct sock *sk, unsigned flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + long timeout; + int err = -ERESTARTSYS; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + if (signal_pending(current)) + break; + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + &wait)) { + err = 0; + break; + } + } + remove_wait_queue(sk_sleep(sk), &wait); + + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_wait_for_data); + +/** + * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel + * + * @sk socket of connection to user space + */ + +void af_alg_data_wakeup(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct socket_wq *wq; + + if (!ctx->used) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(af_alg_data_wakeup); + +/** + * af_alg_sendmsg - implementation of sendmsg system call handler + * + * The sendmsg system call handler obtains the user data and stores it + * in ctx->tsgl_list. This implies allocation of the required numbers of + * struct af_alg_tsgl. + * + * In addition, the ctx is filled with the information sent via CMSG. + * + * @sock socket of connection to user space + * @msg message from user space + * @size size of message from user space + * @ivsize the size of the IV for the cipher operation to verify that the + * user-space-provided IV has the right size + * @return the number of copied data upon success, < 0 upon error + */ +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct af_alg_control con = {}; + long copied = 0; + bool enc = 0; + bool init = 0; + int err = 0; + + if (msg->msg_controllen) { + err = af_alg_cmsg_send(msg, &con); + if (err) + return err; + + init = 1; + switch (con.op) { + case ALG_OP_ENCRYPT: + enc = 1; + break; + case ALG_OP_DECRYPT: + enc = 0; + break; + default: + return -EINVAL; + } + + if (con.iv && con.iv->ivlen != ivsize) + return -EINVAL; + } + + lock_sock(sk); + if (!ctx->more && ctx->used) { + err = -EINVAL; + goto unlock; + } + + if (init) { + ctx->enc = enc; + if (con.iv) + memcpy(ctx->iv, con.iv->iv, ivsize); + + ctx->aead_assoclen = con.aead_assoclen; + } + + while (size) { + struct scatterlist *sg; + size_t len = size; + size_t plen; + + /* use the existing memory in an allocated page */ + if (ctx->merge) { + sgl = list_entry(ctx->tsgl_list.prev, + struct af_alg_tsgl, list); + sg = sgl->sg + sgl->cur - 1; + len = min_t(size_t, len, + PAGE_SIZE - sg->offset - sg->length); + + err = memcpy_from_msg(page_address(sg_page(sg)) + + sg->offset + sg->length, + msg, len); + if (err) + goto unlock; + + sg->length += len; + ctx->merge = (sg->offset + sg->length) & + (PAGE_SIZE - 1); + + ctx->used += len; + copied += len; + size -= len; + continue; + } + + if (!af_alg_writable(sk)) { + err = af_alg_wait_for_wmem(sk, msg->msg_flags); + if (err) + goto unlock; + } + + /* allocate a new page */ + len = min_t(unsigned long, len, af_alg_sndbuf(sk)); + + err = af_alg_alloc_tsgl(sk); + if (err) + goto unlock; + + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, + list); + sg = sgl->sg; + if (sgl->cur) + sg_unmark_end(sg + sgl->cur - 1); + + do { + unsigned int i = sgl->cur; + + plen = min_t(size_t, len, PAGE_SIZE); + + sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); + if (!sg_page(sg + i)) { + err = -ENOMEM; + goto unlock; + } + + err = memcpy_from_msg(page_address(sg_page(sg + i)), + msg, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } + + sg[i].length = plen; + len -= plen; + ctx->used += plen; + copied += plen; + size -= plen; + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); + + if (!size) + sg_mark_end(sg + sgl->cur - 1); + + ctx->merge = plen & (PAGE_SIZE - 1); + } + + err = 0; + + ctx->more = msg->msg_flags & MSG_MORE; + +unlock: + af_alg_data_wakeup(sk); + release_sock(sk); + + return copied ?: err; +} +EXPORT_SYMBOL_GPL(af_alg_sendmsg); + +/** + * af_alg_sendpage - sendpage system call handler + * + * This is a generic implementation of sendpage to fill ctx->tsgl_list. + */ +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + int err = -EINVAL; + + if (flags & MSG_SENDPAGE_NOTLAST) + flags |= MSG_MORE; + + lock_sock(sk); + if (!ctx->more && ctx->used) + goto unlock; + + if (!size) + goto done; + + if (!af_alg_writable(sk)) { + err = af_alg_wait_for_wmem(sk, flags); + if (err) + goto unlock; + } + + err = af_alg_alloc_tsgl(sk); + if (err) + goto unlock; + + ctx->merge = 0; + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); + + if (sgl->cur) + sg_unmark_end(sgl->sg + sgl->cur - 1); + + sg_mark_end(sgl->sg + sgl->cur); + + get_page(page); + sg_set_page(sgl->sg + sgl->cur, page, size, offset); + sgl->cur++; + ctx->used += size; + +done: + ctx->more = flags & MSG_MORE; + +unlock: + af_alg_data_wakeup(sk); + release_sock(sk); + + return err ?: size; +} +EXPORT_SYMBOL_GPL(af_alg_sendpage); + +/** + * af_alg_async_cb - AIO callback handler + * + * This handler cleans up the struct af_alg_async_req upon completion of the + * AIO operation. + * + * The number of bytes to be generated with the AIO operation must be set + * in areq->outlen before the AIO callback handler is invoked. + */ +void af_alg_async_cb(struct crypto_async_request *_req, int err) +{ + struct af_alg_async_req *areq = _req->data; + struct sock *sk = areq->sk; + struct kiocb *iocb = areq->iocb; + unsigned int resultlen; + + lock_sock(sk); + + /* Buffer size written by crypto operation. */ + resultlen = areq->outlen; + + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); + __sock_put(sk); + + iocb->ki_complete(iocb, err ? err : resultlen, 0); + + release_sock(sk); +} +EXPORT_SYMBOL_GPL(af_alg_async_cb); + +/** + * af_alg_poll - poll system call handler + */ +unsigned int af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + unsigned int mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; + + if (!ctx->more || ctx->used) + mask |= POLLIN | POLLRDNORM; + + if (af_alg_writable(sk)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + + return mask; +} +EXPORT_SYMBOL_GPL(af_alg_poll); + +/** + * af_alg_alloc_areq - allocate struct af_alg_async_req + * + * @sk socket of connection to user space + * @areqlen size of struct af_alg_async_req + crypto_*_reqsize + * @return allocated data structure or ERR_PTR upon error + */ +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen) +{ + struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + + if (unlikely(!areq)) + return ERR_PTR(-ENOMEM); + + areq->areqlen = areqlen; + areq->sk = sk; + areq->last_rsgl = NULL; + INIT_LIST_HEAD(&areq->rsgl_list); + areq->tsgl = NULL; + areq->tsgl_entries = 0; + + return areq; +} +EXPORT_SYMBOL_GPL(af_alg_alloc_areq); + +/** + * af_alg_get_rsgl - create the RX SGL for the output data from the crypto + * operation + * + * @sk socket of connection to user space + * @msg user space message + * @flags flags used to invoke recvmsg with + * @areq instance of the cryptographic request that will hold the RX SGL + * @maxsize maximum number of bytes to be pulled from user space + * @outlen number of bytes in the RX SGL + * @return 0 on success, < 0 upon error + */ +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + size_t len = 0; + + while (maxsize > len && msg_data_left(msg)) { + struct af_alg_rsgl *rsgl; + size_t seglen; + int err; + + /* limit the amount of readable buffers */ + if (!af_alg_readable(sk)) + break; + + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + + seglen = min_t(size_t, (maxsize - len), + msg_data_left(msg)); + + if (list_empty(&areq->rsgl_list)) { + rsgl = &areq->first_rsgl; + } else { + rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); + if (unlikely(!rsgl)) + return -ENOMEM; + } + + rsgl->sgl.npages = 0; + list_add_tail(&rsgl->list, &areq->rsgl_list); + + /* make one iovec available as scatterlist */ + err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); + if (err < 0) + return err; + + /* chain the new scatterlist with previous one */ + if (areq->last_rsgl) + af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); + + areq->last_rsgl = rsgl; + len += err; + ctx->rcvused += err; + rsgl->sg_num_bytes = err; + iov_iter_advance(&msg->msg_iter, err); + } + + *outlen = len; + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_get_rsgl); + static int __init af_alg_init(void) { int err = proto_register(&alg_proto, 0); diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7ab4d4a..5e8666e6ccae 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -588,6 +588,35 @@ int crypto_unregister_ahash(struct ahash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_ahash); +int crypto_register_ahashes(struct ahash_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_ahash(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_ahash(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_ahashes); + +void crypto_unregister_ahashes(struct ahash_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_ahash(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); + int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst) { diff --git a/crypto/algapi.c b/crypto/algapi.c index e4cc7615a139..aa699ff6c876 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -975,13 +975,15 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) { int relalign = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { int size = sizeof(unsigned long); - int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1); + int d = (((unsigned long)dst ^ (unsigned long)src1) | + ((unsigned long)dst ^ (unsigned long)src2)) & + (size - 1); relalign = d ? 1 << __ffs(d) : size; @@ -992,34 +994,37 @@ void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) * process the remainder of the input using optimal strides. */ while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ ^= *src++; + *dst++ = *src1++ ^ *src2++; len--; } } while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - *(u64 *)dst ^= *(u64 *)src; + *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; dst += 8; - src += 8; + src1 += 8; + src2 += 8; len -= 8; } while (len >= 4 && !(relalign & 3)) { - *(u32 *)dst ^= *(u32 *)src; + *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; dst += 4; - src += 4; + src1 += 4; + src2 += 4; len -= 4; } while (len >= 2 && !(relalign & 1)) { - *(u16 *)dst ^= *(u16 *)src; + *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; dst += 2; - src += 2; + src1 += 2; + src2 += 2; len -= 2; } while (len--) - *dst++ ^= *src++; + *dst++ = *src1++ ^ *src2++; } EXPORT_SYMBOL_GPL(__crypto_xor); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index be117495eb43..516b38c3a169 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -5,88 +5,56 @@ * * This file provides the user-space API for AEAD ciphers. * - * This file is derived from algif_skcipher.c. - * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. + * + * The following concept of the memory management is used: + * + * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is + * filled by user space with the data submitted via sendpage/sendmsg. Filling + * up the TX SGL does not cause a crypto operation -- the data will only be + * tracked by the kernel. Upon receipt of one recvmsg call, the caller must + * provide a buffer which is tracked with the RX SGL. + * + * During the processing of the recvmsg operation, the cipher request is + * allocated and prepared. As part of the recvmsg operation, the processed + * TX buffers are extracted from the TX SGL into a separate SGL. + * + * After the completion of the crypto operation, the RX SGL and the cipher + * request is released. The extracted TX SGL parts are released together with + * the RX SGL release. */ #include #include #include +#include +#include #include #include #include -#include #include #include #include #include -struct aead_sg_list { - unsigned int cur; - struct scatterlist sg[ALG_MAX_PAGES]; -}; - -struct aead_async_rsgl { - struct af_alg_sgl sgl; - struct list_head list; -}; - -struct aead_async_req { - struct scatterlist *tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; - struct kiocb *iocb; - struct sock *sk; - unsigned int tsgls; - char iv[]; -}; - struct aead_tfm { struct crypto_aead *aead; bool has_key; + struct crypto_skcipher *null_tfm; }; -struct aead_ctx { - struct aead_sg_list tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; - - void *iv; - - struct af_alg_completion completion; - - unsigned long used; - - unsigned int len; - bool more; - bool merge; - bool enc; - - size_t aead_assoclen; - struct aead_request aead_req; -}; - -static inline int aead_sndbuf(struct sock *sk) +static inline bool aead_sufficient_data(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - - ctx->used, 0); -} - -static inline bool aead_writable(struct sock *sk) -{ - return PAGE_SIZE <= aead_sndbuf(sk); -} - -static inline bool aead_sufficient_data(struct aead_ctx *ctx) -{ - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct af_alg_ctx *ctx = ask->private; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int as = crypto_aead_authsize(tfm); /* * The minimum amount of memory needed for an AEAD cipher is @@ -95,484 +63,58 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); } -static void aead_reset_ctx(struct aead_ctx *ctx) -{ - struct aead_sg_list *sgl = &ctx->tsgl; - - sg_init_table(sgl->sg, ALG_MAX_PAGES); - sgl->cur = 0; - ctx->used = 0; - ctx->more = 0; - ctx->merge = 0; -} - -static void aead_put_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; - struct scatterlist *sg = sgl->sg; - unsigned int i; - - for (i = 0; i < sgl->cur; i++) { - if (!sg_page(sg + i)) - continue; - - put_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - } - aead_reset_ctx(ctx); -} - -static void aead_wmem_wakeup(struct sock *sk) -{ - struct socket_wq *wq; - - if (!aead_writable(sk)) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); - rcu_read_unlock(); -} - -static int aead_wait_for_data(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - long timeout; - int err = -ERESTARTSYS; - - if (flags & MSG_DONTWAIT) - return -EAGAIN; - - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - return err; -} - -static void aead_data_wakeup(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct socket_wq *wq; - - if (ctx->more) - return; - if (!ctx->used) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); - rcu_read_unlock(); -} - static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned ivsize = - crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; - struct af_alg_control con = {}; - long copied = 0; - bool enc = 0; - bool init = 0; - int err = -EINVAL; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivsize = crypto_aead_ivsize(tfm); - if (msg->msg_controllen) { - err = af_alg_cmsg_send(msg, &con); - if (err) - return err; - - init = 1; - switch (con.op) { - case ALG_OP_ENCRYPT: - enc = 1; - break; - case ALG_OP_DECRYPT: - enc = 0; - break; - default: - return -EINVAL; - } - - if (con.iv && con.iv->ivlen != ivsize) - return -EINVAL; - } - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (init) { - ctx->enc = enc; - if (con.iv) - memcpy(ctx->iv, con.iv->iv, ivsize); - - ctx->aead_assoclen = con.aead_assoclen; - } - - while (size) { - size_t len = size; - struct scatterlist *sg = NULL; - - /* use the existing memory in an allocated page */ - if (ctx->merge) { - sg = sgl->sg + sgl->cur - 1; - len = min_t(unsigned long, len, - PAGE_SIZE - sg->offset - sg->length); - err = memcpy_from_msg(page_address(sg_page(sg)) + - sg->offset + sg->length, - msg, len); - if (err) - goto unlock; - - sg->length += len; - ctx->merge = (sg->offset + sg->length) & - (PAGE_SIZE - 1); - - ctx->used += len; - copied += len; - size -= len; - continue; - } - - if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; - } - - /* allocate a new page */ - len = min_t(unsigned long, size, aead_sndbuf(sk)); - while (len) { - size_t plen = 0; - - if (sgl->cur >= ALG_MAX_PAGES) { - aead_put_sgl(sk); - err = -E2BIG; - goto unlock; - } - - sg = sgl->sg + sgl->cur; - plen = min_t(size_t, len, PAGE_SIZE); - - sg_assign_page(sg, alloc_page(GFP_KERNEL)); - err = -ENOMEM; - if (!sg_page(sg)) - goto unlock; - - err = memcpy_from_msg(page_address(sg_page(sg)), - msg, plen); - if (err) { - __free_page(sg_page(sg)); - sg_assign_page(sg, NULL); - goto unlock; - } - - sg->offset = 0; - sg->length = plen; - len -= plen; - ctx->used += plen; - copied += plen; - sgl->cur++; - size -= plen; - ctx->merge = plen & (PAGE_SIZE - 1); - } - } - - err = 0; - - ctx->more = msg->msg_flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } - -unlock: - aead_data_wakeup(sk); - release_sock(sk); - - return err ?: copied; + return af_alg_sendmsg(sock, msg, size, ivsize); } -static ssize_t aead_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) +static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, + struct scatterlist *src, + struct scatterlist *dst, unsigned int len) +{ + SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); + + skcipher_request_set_tfm(skreq, null_tfm); + skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, + NULL, NULL); + skcipher_request_set_crypt(skreq, src, dst, len, NULL); + + return crypto_skcipher_encrypt(skreq); +} + +static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; - int err = -EINVAL; - - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - if (sgl->cur >= ALG_MAX_PAGES) - return -E2BIG; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (!size) - goto done; - - if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; - } - - ctx->merge = 0; - - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; - - err = 0; - -done: - ctx->more = flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } - -unlock: - aead_data_wakeup(sk); - release_sock(sk); - - return err ?: size; -} - -#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ - ((char *)req + sizeof(struct aead_request) + \ - crypto_aead_reqsize(tfm)) - - #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ - crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ - sizeof(struct aead_request) - -static void aead_async_cb(struct crypto_async_request *_req, int err) -{ - struct aead_request *req = _req->data; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); - struct sock *sk = areq->sk; - struct scatterlist *sg = areq->tsgl; - struct aead_async_rsgl *rsgl; - struct kiocb *iocb = areq->iocb; - unsigned int i, reqlen = GET_REQ_SIZE(tfm); - - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } - - for (i = 0; i < areq->tsgls; i++) - put_page(sg_page(sg + i)); - - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - sock_kfree_s(sk, req, reqlen); - __sock_put(sk); - iocb->ki_complete(iocb, err, err); -} - -static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, - int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); - struct aead_async_req *areq; - struct aead_request *req = NULL; - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL, *rsgl; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct af_alg_ctx *ctx = ask->private; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + struct crypto_skcipher *null_tfm = aeadc->null_tfm; unsigned int as = crypto_aead_authsize(tfm); - unsigned int i, reqlen = GET_REQ_SIZE(tfm); - int err = -ENOMEM; - unsigned long used; - size_t outlen = 0; - size_t usedpages = 0; - - lock_sock(sk); - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - if (!aead_sufficient_data(ctx)) - goto unlock; - - used = ctx->used; - if (ctx->enc) - outlen = used + as; - else - outlen = used - as; - - req = sock_kmalloc(sk, reqlen, GFP_KERNEL); - if (unlikely(!req)) - goto unlock; - - areq = GET_ASYM_REQ(req, tfm); - memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); - INIT_LIST_HEAD(&areq->list); - areq->iocb = msg->msg_iocb; - areq->sk = sk; - memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); - aead_request_set_tfm(req, tfm); - aead_request_set_ad(req, ctx->aead_assoclen); - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - aead_async_cb, req); - used -= ctx->aead_assoclen; - - /* take over all tx sgls from ctx */ - areq->tsgl = sock_kmalloc(sk, - sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), - GFP_KERNEL); - if (unlikely(!areq->tsgl)) - goto free; - - sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); - for (i = 0; i < sgl->cur; i++) - sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), - sgl->sg[i].length, sgl->sg[i].offset); - - areq->tsgls = sgl->cur; - - /* create rx sgls */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); - - if (list_empty(&areq->list)) { - rsgl = &areq->first_rsgl; - - } else { - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); - if (unlikely(!rsgl)) { - err = -ENOMEM; - goto free; - } - } - rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &areq->list); - - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) - goto free; - - usedpages += err; - - /* chain the new scatterlist with previous one */ - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - - iov_iter_advance(&msg->msg_iter, err); - } - - /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) { - err = -EINVAL; - goto unlock; - } - - aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, - areq->iv); - err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); - if (err) { - if (err == -EINPROGRESS) { - sock_hold(sk); - err = -EIOCBQUEUED; - aead_reset_ctx(ctx); - goto unlock; - } else if (err == -EBADMSG) { - aead_put_sgl(sk); - } - goto free; - } - aead_put_sgl(sk); - -free: - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } - if (areq->tsgl) - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - if (req) - sock_kfree_s(sk, req, reqlen); -unlock: - aead_wmem_wakeup(sk); - release_sock(sk); - return err ? err : outlen; -} - -static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL; - struct aead_async_rsgl *rsgl, *tmp; - int err = -EINVAL; - unsigned long used = 0; - size_t outlen = 0; - size_t usedpages = 0; - - lock_sock(sk); + struct af_alg_async_req *areq; + struct af_alg_tsgl *tsgl; + struct scatterlist *src; + int err = 0; + size_t used = 0; /* [in] TX bufs to be en/decrypted */ + size_t outlen = 0; /* [out] RX bufs produced by kernel */ + size_t usedpages = 0; /* [in] RX bufs to be used from user */ + size_t processed = 0; /* [in] TX bufs to be consumed */ /* - * Please see documentation of aead_request_set_crypt for the - * description of the AEAD memory structure expected from the caller. + * Data length provided by caller via sendmsg/sendpage that has not + * yet been processed. */ - - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - /* data length provided by caller via sendmsg/sendpage */ used = ctx->used; /* @@ -584,8 +126,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) * the error message in sendmsg/sendpage and still call recvmsg. This * check here protects the kernel integrity. */ - if (!aead_sufficient_data(ctx)) - goto unlock; + if (!aead_sufficient_data(sk)) + return -EINVAL; /* * Calculate the minimum output buffer size holding the result of the @@ -606,104 +148,191 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) */ used -= ctx->aead_assoclen; - /* convert iovecs of output buffers into scatterlists */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); + /* Allocate cipher request for current operation. */ + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + + crypto_aead_reqsize(tfm)); + if (IS_ERR(areq)) + return PTR_ERR(areq); - if (list_empty(&ctx->list)) { - rsgl = &ctx->first_rsgl; - } else { - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); - if (unlikely(!rsgl)) { - err = -ENOMEM; - goto unlock; - } - } - rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &ctx->list); + /* convert iovecs of output buffers into RX SGL */ + err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); + if (err) + goto free; - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) - goto unlock; - usedpages += err; - /* chain the new scatterlist with previous one */ - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - - iov_iter_advance(&msg->msg_iter, err); - } - - /* ensure output buffer is sufficiently large */ + /* + * Ensure output buffer is sufficiently large. If the caller provides + * less buffer space, only use the relative required input size. This + * allows AIO operation where the caller sent all data to be processed + * and the AIO operation performs the operation on the different chunks + * of the input data. + */ if (usedpages < outlen) { - err = -EINVAL; - goto unlock; + size_t less = outlen - usedpages; + + if (used < less) { + err = -EINVAL; + goto free; + } + used -= less; + outlen -= less; } - sg_mark_end(sgl->sg + sgl->cur - 1); - aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, - used, ctx->iv); - aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); + processed = used + ctx->aead_assoclen; + tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); - err = af_alg_wait_for_completion(ctx->enc ? - crypto_aead_encrypt(&ctx->aead_req) : - crypto_aead_decrypt(&ctx->aead_req), - &ctx->completion); + /* + * Copy of AAD from source to destination + * + * The AAD is copied to the destination buffer without change. Even + * when user space uses an in-place cipher operation, the kernel + * will copy the data as it does not see whether such in-place operation + * is initiated. + * + * To ensure efficiency, the following implementation ensure that the + * ciphers are invoked to perform a crypto operation in-place. This + * is achieved by memory management specified as follows. + */ - if (err) { - /* EBADMSG implies a valid cipher operation took place */ - if (err == -EBADMSG) - aead_put_sgl(sk); + /* Use the RX SGL as source (and destination) for crypto op. */ + src = areq->first_rsgl.sgl.sg; - goto unlock; + if (ctx->enc) { + /* + * Encryption operation - The in-place cipher operation is + * achieved by the following operation: + * + * TX SGL: AAD || PT + * | | + * | copy | + * v v + * RX SGL: AAD || PT || Tag + */ + err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + areq->first_rsgl.sgl.sg, processed); + if (err) + goto free; + af_alg_pull_tsgl(sk, processed, NULL, 0); + } else { + /* + * Decryption operation - To achieve an in-place cipher + * operation, the following SGL structure is used: + * + * TX SGL: AAD || CT || Tag + * | | ^ + * | copy | | Create SGL link. + * v v | + * RX SGL: AAD || CT ----+ + */ + + /* Copy AAD || CT to RX SGL buffer for in-place operation. */ + err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + areq->first_rsgl.sgl.sg, outlen); + if (err) + goto free; + + /* Create TX SGL for tag and chain it to RX SGL. */ + areq->tsgl_entries = af_alg_count_tsgl(sk, processed, + processed - as); + if (!areq->tsgl_entries) + areq->tsgl_entries = 1; + areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * + areq->tsgl_entries, + GFP_KERNEL); + if (!areq->tsgl) { + err = -ENOMEM; + goto free; + } + sg_init_table(areq->tsgl, areq->tsgl_entries); + + /* Release TX SGL, except for tag data and reassign tag data. */ + af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); + + /* chain the areq TX SGL holding the tag with RX SGL */ + if (usedpages) { + /* RX SGL present */ + struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; + + sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); + sg_chain(sgl_prev->sg, sgl_prev->npages + 1, + areq->tsgl); + } else + /* no RX SGL present (e.g. authentication only) */ + src = areq->tsgl; } - aead_put_sgl(sk); - err = 0; + /* Initialize the crypto operation */ + aead_request_set_crypt(&areq->cra_u.aead_req, src, + areq->first_rsgl.sgl.sg, used, ctx->iv); + aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); + aead_request_set_tfm(&areq->cra_u.aead_req, tfm); -unlock: - list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { - af_alg_free_sg(&rsgl->sgl); - list_del(&rsgl->list); - if (rsgl != &ctx->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { + /* AIO operation */ + areq->iocb = msg->msg_iocb; + aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_async_cb, areq); + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req); + } else { + /* Synchronous operation */ + aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + err = af_alg_wait_for_completion(ctx->enc ? + crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req), + &ctx->completion); } - INIT_LIST_HEAD(&ctx->list); - aead_wmem_wakeup(sk); - release_sock(sk); + + /* AIO operation in progress */ + if (err == -EINPROGRESS) { + sock_hold(sk); + + /* Remember output size that will be generated. */ + areq->outlen = outlen; + + return -EIOCBQUEUED; + } + +free: + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); return err ? err : outlen; } -static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, - int flags) -{ - return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? - aead_recvmsg_async(sock, msg, flags) : - aead_recvmsg_sync(sock, msg, flags); -} - -static unsigned int aead_poll(struct file *file, struct socket *sock, - poll_table *wait) +static int aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned int mask; + int ret = 0; - sock_poll_wait(file, sk_sleep(sk), wait); - mask = 0; + lock_sock(sk); + while (msg_data_left(msg)) { + int err = _aead_recvmsg(sock, msg, ignored, flags); - if (!ctx->more) - mask |= POLLIN | POLLRDNORM; + /* + * This error covers -EIOCBQUEUED which implies that we can + * only handle one AIO request. If the caller wants to have + * multiple AIO requests in parallel, he must make multiple + * separate AIO calls. + * + * Also return the error if no data has been processed so far. + */ + if (err <= 0) { + if (err == -EIOCBQUEUED || err == -EBADMSG || !ret) + ret = err; + goto out; + } - if (aead_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + ret += err; + } - return mask; +out: + af_alg_wmem_wakeup(sk); + release_sock(sk); + return ret; } static struct proto_ops algif_aead_ops = { @@ -723,9 +352,9 @@ static struct proto_ops algif_aead_ops = { .release = af_alg_release, .sendmsg = aead_sendmsg, - .sendpage = aead_sendpage, + .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, - .poll = aead_poll, + .poll = af_alg_poll, }; static int aead_check_key(struct socket *sock) @@ -787,7 +416,7 @@ static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, if (err) return err; - return aead_sendpage(sock, page, offset, size, flags); + return af_alg_sendpage(sock, page, offset, size, flags); } static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, @@ -821,13 +450,14 @@ static struct proto_ops algif_aead_ops_nokey = { .sendmsg = aead_sendmsg_nokey, .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, - .poll = aead_poll, + .poll = af_alg_poll, }; static void *aead_bind(const char *name, u32 type, u32 mask) { struct aead_tfm *tfm; struct crypto_aead *aead; + struct crypto_skcipher *null_tfm; tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); if (!tfm) @@ -839,7 +469,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask) return ERR_CAST(aead); } + null_tfm = crypto_get_default_null_skcipher2(); + if (IS_ERR(null_tfm)) { + crypto_free_aead(aead); + kfree(tfm); + return ERR_CAST(null_tfm); + } + tfm->aead = aead; + tfm->null_tfm = null_tfm; return tfm; } @@ -873,12 +511,15 @@ static int aead_setkey(void *private, const u8 *key, unsigned int keylen) static void aead_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned int ivlen = crypto_aead_ivsize( - crypto_aead_reqtfm(&ctx->aead_req)); + struct af_alg_ctx *ctx = ask->private; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivlen = crypto_aead_ivsize(tfm); - WARN_ON(refcount_read(&sk->sk_refcnt) != 0); - aead_put_sgl(sk); + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); + crypto_put_default_null_skcipher2(); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -886,11 +527,11 @@ static void aead_sock_destruct(struct sock *sk) static int aead_accept_parent_nokey(void *private, struct sock *sk) { - struct aead_ctx *ctx; + struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct aead_tfm *tfm = private; struct crypto_aead *aead = tfm->aead; - unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); + unsigned int len = sizeof(*ctx); unsigned int ivlen = crypto_aead_ivsize(aead); ctx = sock_kmalloc(sk, len, GFP_KERNEL); @@ -905,23 +546,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) } memset(ctx->iv, 0, ivlen); + INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; + ctx->rcvused = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - ctx->tsgl.cur = 0; ctx->aead_assoclen = 0; af_alg_init_completion(&ctx->completion); - sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); - INIT_LIST_HEAD(&ctx->list); ask->private = ctx; - aead_request_set_tfm(&ctx->aead_req, aead); - aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - sk->sk_destruct = aead_sock_destruct; return 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 903605dbc1a5..8ae4170aaeb4 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -10,6 +10,21 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * The following concept of the memory management is used: + * + * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is + * filled by user space with the data submitted via sendpage/sendmsg. Filling + * up the TX SGL does not cause a crypto operation -- the data will only be + * tracked by the kernel. Upon receipt of one recvmsg call, the caller must + * provide a buffer which is tracked with the RX SGL. + * + * During the processing of the recvmsg operation, the cipher request is + * allocated and prepared. As part of the recvmsg operation, the processed + * TX buffers are extracted from the TX SGL into a separate SGL. + * + * After the completion of the crypto operation, the RX SGL and the cipher + * request is released. The extracted TX SGL parts are released together with + * the RX SGL release. */ #include @@ -18,284 +33,16 @@ #include #include #include -#include #include #include #include #include -struct skcipher_sg_list { - struct list_head list; - - int cur; - - struct scatterlist sg[0]; -}; - struct skcipher_tfm { struct crypto_skcipher *skcipher; bool has_key; }; -struct skcipher_ctx { - struct list_head tsgl; - struct af_alg_sgl rsgl; - - void *iv; - - struct af_alg_completion completion; - - atomic_t inflight; - size_t used; - - unsigned int len; - bool more; - bool merge; - bool enc; - - struct skcipher_request req; -}; - -struct skcipher_async_rsgl { - struct af_alg_sgl sgl; - struct list_head list; -}; - -struct skcipher_async_req { - struct kiocb *iocb; - struct skcipher_async_rsgl first_sgl; - struct list_head list; - struct scatterlist *tsg; - atomic_t *inflight; - struct skcipher_request req; -}; - -#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ - sizeof(struct scatterlist) - 1) - -static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) -{ - struct skcipher_async_rsgl *rsgl, *tmp; - struct scatterlist *sgl; - struct scatterlist *sg; - int i, n; - - list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &sreq->first_sgl) - kfree(rsgl); - } - sgl = sreq->tsg; - n = sg_nents(sgl); - for_each_sg(sgl, sg, n, i) { - struct page *page = sg_page(sg); - - /* some SGs may not have a page mapped */ - if (page && page_ref_count(page)) - put_page(page); - } - - kfree(sreq->tsg); -} - -static void skcipher_async_cb(struct crypto_async_request *req, int err) -{ - struct skcipher_async_req *sreq = req->data; - struct kiocb *iocb = sreq->iocb; - - atomic_dec(sreq->inflight); - skcipher_free_async_sgls(sreq); - kzfree(sreq); - iocb->ki_complete(iocb, err, err); -} - -static inline int skcipher_sndbuf(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - - ctx->used, 0); -} - -static inline bool skcipher_writable(struct sock *sk) -{ - return PAGE_SIZE <= skcipher_sndbuf(sk); -} - -static int skcipher_alloc_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - struct scatterlist *sg = NULL; - - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); - if (!list_empty(&ctx->tsgl)) - sg = sgl->sg; - - if (!sg || sgl->cur >= MAX_SGL_ENTS) { - sgl = sock_kmalloc(sk, sizeof(*sgl) + - sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), - GFP_KERNEL); - if (!sgl) - return -ENOMEM; - - sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); - sgl->cur = 0; - - if (sg) - sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); - - list_add_tail(&sgl->list, &ctx->tsgl); - } - - return 0; -} - -static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int i; - - while (!list_empty(&ctx->tsgl)) { - sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, - list); - sg = sgl->sg; - - for (i = 0; i < sgl->cur; i++) { - size_t plen = min_t(size_t, used, sg[i].length); - - if (!sg_page(sg + i)) - continue; - - sg[i].length -= plen; - sg[i].offset += plen; - - used -= plen; - ctx->used -= plen; - - if (sg[i].length) - return; - if (put) - put_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - } - - list_del(&sgl->list); - sock_kfree_s(sk, sgl, - sizeof(*sgl) + sizeof(sgl->sg[0]) * - (MAX_SGL_ENTS + 1)); - } - - if (!ctx->used) - ctx->merge = 0; -} - -static void skcipher_free_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - - skcipher_pull_sgl(sk, ctx->used, 1); -} - -static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - int err = -ERESTARTSYS; - long timeout; - - if (flags & MSG_DONTWAIT) - return -EAGAIN; - - sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); - - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - return err; -} - -static void skcipher_wmem_wakeup(struct sock *sk) -{ - struct socket_wq *wq; - - if (!skcipher_writable(sk)) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); - rcu_read_unlock(); -} - -static int skcipher_wait_for_data(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - long timeout; - int err = -ERESTARTSYS; - - if (flags & MSG_DONTWAIT) { - return -EAGAIN; - } - - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - return err; -} - -static void skcipher_data_wakeup(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct socket_wq *wq; - - if (!ctx->used) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); - rcu_read_unlock(); -} - static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { @@ -303,446 +50,144 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; struct skcipher_tfm *skc = pask->private; struct crypto_skcipher *tfm = skc->skcipher; unsigned ivsize = crypto_skcipher_ivsize(tfm); - struct skcipher_sg_list *sgl; - struct af_alg_control con = {}; - long copied = 0; - bool enc = 0; - bool init = 0; - int err; - int i; - if (msg->msg_controllen) { - err = af_alg_cmsg_send(msg, &con); - if (err) - return err; - - init = 1; - switch (con.op) { - case ALG_OP_ENCRYPT: - enc = 1; - break; - case ALG_OP_DECRYPT: - enc = 0; - break; - default: - return -EINVAL; - } - - if (con.iv && con.iv->ivlen != ivsize) - return -EINVAL; - } - - err = -EINVAL; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (init) { - ctx->enc = enc; - if (con.iv) - memcpy(ctx->iv, con.iv->iv, ivsize); - } - - while (size) { - struct scatterlist *sg; - unsigned long len = size; - size_t plen; - - if (ctx->merge) { - sgl = list_entry(ctx->tsgl.prev, - struct skcipher_sg_list, list); - sg = sgl->sg + sgl->cur - 1; - len = min_t(unsigned long, len, - PAGE_SIZE - sg->offset - sg->length); - - err = memcpy_from_msg(page_address(sg_page(sg)) + - sg->offset + sg->length, - msg, len); - if (err) - goto unlock; - - sg->length += len; - ctx->merge = (sg->offset + sg->length) & - (PAGE_SIZE - 1); - - ctx->used += len; - copied += len; - size -= len; - continue; - } - - if (!skcipher_writable(sk)) { - err = skcipher_wait_for_wmem(sk, msg->msg_flags); - if (err) - goto unlock; - } - - len = min_t(unsigned long, len, skcipher_sndbuf(sk)); - - err = skcipher_alloc_sgl(sk); - if (err) - goto unlock; - - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); - sg = sgl->sg; - if (sgl->cur) - sg_unmark_end(sg + sgl->cur - 1); - do { - i = sgl->cur; - plen = min_t(size_t, len, PAGE_SIZE); - - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); - err = -ENOMEM; - if (!sg_page(sg + i)) - goto unlock; - - err = memcpy_from_msg(page_address(sg_page(sg + i)), - msg, plen); - if (err) { - __free_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - goto unlock; - } - - sg[i].length = plen; - len -= plen; - ctx->used += plen; - copied += plen; - size -= plen; - sgl->cur++; - } while (len && sgl->cur < MAX_SGL_ENTS); - - if (!size) - sg_mark_end(sg + sgl->cur - 1); - - ctx->merge = plen & (PAGE_SIZE - 1); - } - - err = 0; - - ctx->more = msg->msg_flags & MSG_MORE; - -unlock: - skcipher_data_wakeup(sk); - release_sock(sk); - - return copied ?: err; + return af_alg_sendmsg(sock, msg, size, ivsize); } -static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) +static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - int err = -EINVAL; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct af_alg_ctx *ctx = ask->private; + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; + unsigned int bs = crypto_skcipher_blocksize(tfm); + struct af_alg_async_req *areq; + int err = 0; + size_t len = 0; - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; + /* Allocate cipher request for current operation. */ + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + + crypto_skcipher_reqsize(tfm)); + if (IS_ERR(areq)) + return PTR_ERR(areq); - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (!size) - goto done; - - if (!skcipher_writable(sk)) { - err = skcipher_wait_for_wmem(sk, flags); - if (err) - goto unlock; - } - - err = skcipher_alloc_sgl(sk); + /* convert iovecs of output buffers into RX SGL */ + err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); if (err) - goto unlock; + goto free; - ctx->merge = 0; - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); + /* Process only as much RX buffers for which we have TX data */ + if (len > ctx->used) + len = ctx->used; - if (sgl->cur) - sg_unmark_end(sgl->sg + sgl->cur - 1); + /* + * If more buffers are to be expected to be processed, process only + * full block size buffers. + */ + if (ctx->more || len < ctx->used) + len -= len % bs; - sg_mark_end(sgl->sg + sgl->cur); - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; - -done: - ctx->more = flags & MSG_MORE; - -unlock: - skcipher_data_wakeup(sk); - release_sock(sk); - - return err ?: size; -} - -static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) -{ - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int nents = 0; - - list_for_each_entry(sgl, &ctx->tsgl, list) { - sg = sgl->sg; - - while (!sg->length) - sg++; - - nents += sg_nents(sg); + /* + * Create a per request TX SGL for this request which tracks the + * SG entries from the global TX SGL. + */ + areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); + if (!areq->tsgl_entries) + areq->tsgl_entries = 1; + areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, + GFP_KERNEL); + if (!areq->tsgl) { + err = -ENOMEM; + goto free; } - return nents; -} + sg_init_table(areq->tsgl, areq->tsgl_entries); + af_alg_pull_tsgl(sk, len, areq->tsgl, 0); -static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, - int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct sock *psk = ask->parent; - struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - struct skcipher_async_req *sreq; - struct skcipher_request *req; - struct skcipher_async_rsgl *last_rsgl = NULL; - unsigned int txbufs = 0, len = 0, tx_nents; - unsigned int reqsize = crypto_skcipher_reqsize(tfm); - unsigned int ivsize = crypto_skcipher_ivsize(tfm); - int err = -ENOMEM; - bool mark = false; - char *iv; + /* Initialize the crypto operation */ + skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); + skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, + areq->first_rsgl.sgl.sg, len, ctx->iv); - sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); - if (unlikely(!sreq)) - goto out; - - req = &sreq->req; - iv = (char *)(req + 1) + reqsize; - sreq->iocb = msg->msg_iocb; - INIT_LIST_HEAD(&sreq->list); - sreq->inflight = &ctx->inflight; - - lock_sock(sk); - tx_nents = skcipher_all_sg_nents(ctx); - sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); - if (unlikely(!sreq->tsg)) - goto unlock; - sg_init_table(sreq->tsg, tx_nents); - memcpy(iv, ctx->iv, ivsize); - skcipher_request_set_tfm(req, tfm); - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, - skcipher_async_cb, sreq); - - while (iov_iter_count(&msg->msg_iter)) { - struct skcipher_async_rsgl *rsgl; - int used; - - if (!ctx->used) { - err = skcipher_wait_for_data(sk, flags); - if (err) - goto free; - } - sgl = list_first_entry(&ctx->tsgl, - struct skcipher_sg_list, list); - sg = sgl->sg; - - while (!sg->length) - sg++; - - used = min_t(unsigned long, ctx->used, - iov_iter_count(&msg->msg_iter)); - used = min_t(unsigned long, used, sg->length); - - if (txbufs == tx_nents) { - struct scatterlist *tmp; - int x; - /* Ran out of tx slots in async request - * need to expand */ - tmp = kcalloc(tx_nents * 2, sizeof(*tmp), - GFP_KERNEL); - if (!tmp) { - err = -ENOMEM; - goto free; - } - - sg_init_table(tmp, tx_nents * 2); - for (x = 0; x < tx_nents; x++) - sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), - sreq->tsg[x].length, - sreq->tsg[x].offset); - kfree(sreq->tsg); - sreq->tsg = tmp; - tx_nents *= 2; - mark = true; - } - /* Need to take over the tx sgl from ctx - * to the asynch req - these sgls will be freed later */ - sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, - sg->offset); - - if (list_empty(&sreq->list)) { - rsgl = &sreq->first_sgl; - list_add_tail(&rsgl->list, &sreq->list); - } else { - rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); - if (!rsgl) { - err = -ENOMEM; - goto free; - } - list_add_tail(&rsgl->list, &sreq->list); - } - - used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); - err = used; - if (used < 0) - goto free; - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - len += used; - skcipher_pull_sgl(sk, used, 0); - iov_iter_advance(&msg->msg_iter, used); + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { + /* AIO operation */ + areq->iocb = msg->msg_iocb; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, + CRYPTO_TFM_REQ_MAY_SLEEP, + af_alg_async_cb, areq); + err = ctx->enc ? + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + } else { + /* Synchronous operation */ + skcipher_request_set_callback(&areq->cra_u.skcipher_req, + CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, + &ctx->completion); + err = af_alg_wait_for_completion(ctx->enc ? + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), + &ctx->completion); } - if (mark) - sg_mark_end(sreq->tsg + txbufs - 1); - - skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, iv); - err = ctx->enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + /* AIO operation in progress */ if (err == -EINPROGRESS) { - atomic_inc(&ctx->inflight); - err = -EIOCBQUEUED; - sreq = NULL; - goto unlock; - } -free: - skcipher_free_async_sgls(sreq); -unlock: - skcipher_wmem_wakeup(sk); - release_sock(sk); - kzfree(sreq); -out: - return err; -} + sock_hold(sk); -static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, - int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct sock *psk = ask->parent; - struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; - unsigned bs = crypto_skcipher_blocksize(tfm); - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int err = -EAGAIN; - int used; - long copied = 0; + /* Remember output size that will be generated. */ + areq->outlen = len; - lock_sock(sk); - while (msg_data_left(msg)) { - if (!ctx->used) { - err = skcipher_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - used = min_t(unsigned long, ctx->used, msg_data_left(msg)); - - used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); - err = used; - if (err < 0) - goto unlock; - - if (ctx->more || used < ctx->used) - used -= used % bs; - - err = -EINVAL; - if (!used) - goto free; - - sgl = list_first_entry(&ctx->tsgl, - struct skcipher_sg_list, list); - sg = sgl->sg; - - while (!sg->length) - sg++; - - skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, - ctx->iv); - - err = af_alg_wait_for_completion( - ctx->enc ? - crypto_skcipher_encrypt(&ctx->req) : - crypto_skcipher_decrypt(&ctx->req), - &ctx->completion); - -free: - af_alg_free_sg(&ctx->rsgl); - - if (err) - goto unlock; - - copied += used; - skcipher_pull_sgl(sk, used, 1); - iov_iter_advance(&msg->msg_iter, used); + return -EIOCBQUEUED; } - err = 0; +free: + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); -unlock: - skcipher_wmem_wakeup(sk); - release_sock(sk); - - return copied ?: err; + return err ? err : len; } static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) -{ - return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? - skcipher_recvmsg_async(sock, msg, flags) : - skcipher_recvmsg_sync(sock, msg, flags); -} - -static unsigned int skcipher_poll(struct file *file, struct socket *sock, - poll_table *wait) { struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - unsigned int mask; + int ret = 0; - sock_poll_wait(file, sk_sleep(sk), wait); - mask = 0; + lock_sock(sk); + while (msg_data_left(msg)) { + int err = _skcipher_recvmsg(sock, msg, ignored, flags); - if (ctx->used) - mask |= POLLIN | POLLRDNORM; + /* + * This error covers -EIOCBQUEUED which implies that we can + * only handle one AIO request. If the caller wants to have + * multiple AIO requests in parallel, he must make multiple + * separate AIO calls. + * + * Also return the error if no data has been processed so far. + */ + if (err <= 0) { + if (err == -EIOCBQUEUED || !ret) + ret = err; + goto out; + } - if (skcipher_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + ret += err; + } - return mask; +out: + af_alg_wmem_wakeup(sk); + release_sock(sk); + return ret; } + static struct proto_ops algif_skcipher_ops = { .family = PF_ALG, @@ -760,9 +205,9 @@ static struct proto_ops algif_skcipher_ops = { .release = af_alg_release, .sendmsg = skcipher_sendmsg, - .sendpage = skcipher_sendpage, + .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, - .poll = skcipher_poll, + .poll = af_alg_poll, }; static int skcipher_check_key(struct socket *sock) @@ -824,7 +269,7 @@ static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, if (err) return err; - return skcipher_sendpage(sock, page, offset, size, flags); + return af_alg_sendpage(sock, page, offset, size, flags); } static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, @@ -858,7 +303,7 @@ static struct proto_ops algif_skcipher_ops_nokey = { .sendmsg = skcipher_sendmsg_nokey, .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, - .poll = skcipher_poll, + .poll = af_alg_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) @@ -900,26 +345,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) return err; } -static void skcipher_wait(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - int ctr = 0; - - while (atomic_read(&ctx->inflight) && ctr++ < 100) - msleep(100); -} - static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); + struct af_alg_ctx *ctx = ask->private; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; - if (atomic_read(&ctx->inflight)) - skcipher_wait(sk); - - skcipher_free_sgl(sk); + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -927,11 +362,11 @@ static void skcipher_sock_destruct(struct sock *sk) static int skcipher_accept_parent_nokey(void *private, struct sock *sk) { - struct skcipher_ctx *ctx; + struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct skcipher_tfm *tfm = private; struct crypto_skcipher *skcipher = tfm->skcipher; - unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); + unsigned int len = sizeof(*ctx); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -946,22 +381,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); - INIT_LIST_HEAD(&ctx->tsgl); + INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; + ctx->rcvused = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - atomic_set(&ctx->inflight, 0); af_alg_init_completion(&ctx->completion); ask->private = ctx; - skcipher_request_set_tfm(&ctx->req, skcipher); - skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - sk->sk_destruct = skcipher_sock_destruct; return 0; diff --git a/crypto/ctr.c b/crypto/ctr.c index 477d9226ccaa..854d924f9d8e 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -65,8 +65,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, unsigned int nbytes = walk->nbytes; crypto_cipher_encrypt_one(tfm, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, bsize); } diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e93ab0..70018397e59a 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->V); - drbg->Vbuf = NULL; - kzfree(drbg->C); - drbg->Cbuf = NULL; + kzfree(drbg->Vbuf); + drbg->V = NULL; + kzfree(drbg->Cbuf); + drbg->C = NULL; kzfree(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 61c7708905d0..4271fc77d261 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -20,8 +20,6 @@ struct ecdh_ctx { unsigned int curve_id; unsigned int ndigits; u64 private_key[ECC_MAX_DIGITS]; - u64 public_key[2 * ECC_MAX_DIGITS]; - u64 shared_secret[ECC_MAX_DIGITS]; }; static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) @@ -70,41 +68,58 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, static int ecdh_compute_value(struct kpp_request *req) { - int ret = 0; struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); - size_t copied, nbytes; + u64 *public_key; + u64 *shared_secret = NULL; void *buf; + size_t copied, nbytes, public_key_sz; + int ret = -ENOMEM; nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + /* Public part is a point thus it has both coordinates */ + public_key_sz = 2 * nbytes; + + public_key = kmalloc(public_key_sz, GFP_KERNEL); + if (!public_key) + return -ENOMEM; if (req->src) { - copied = sg_copy_to_buffer(req->src, 1, ctx->public_key, - 2 * nbytes); - if (copied != 2 * nbytes) - return -EINVAL; + shared_secret = kmalloc(nbytes, GFP_KERNEL); + if (!shared_secret) + goto free_pubkey; + + copied = sg_copy_to_buffer(req->src, 1, public_key, + public_key_sz); + if (copied != public_key_sz) { + ret = -EINVAL; + goto free_all; + } ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, - ctx->private_key, - ctx->public_key, - ctx->shared_secret); + ctx->private_key, public_key, + shared_secret); - buf = ctx->shared_secret; + buf = shared_secret; } else { ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits, - ctx->private_key, ctx->public_key); - buf = ctx->public_key; - /* Public part is a point thus it has both coordinates */ - nbytes *= 2; + ctx->private_key, public_key); + buf = public_key; + nbytes = public_key_sz; } if (ret < 0) - return ret; + goto free_all; copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes); if (copied != nbytes) - return -EINVAL; + ret = -EINVAL; + /* fall through */ +free_all: + kzfree(shared_secret); +free_pubkey: + kfree(public_key); return ret; } diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 29dd2b4a3b85..d9e45a958720 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -55,8 +55,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, do { crypto_xor(iv, src, bsize); crypto_cipher_encrypt_one(tfm, dst, iv); - memcpy(iv, dst, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; @@ -79,8 +78,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, memcpy(tmpbuf, src, bsize); crypto_xor(iv, src, bsize); crypto_cipher_encrypt_one(tfm, src, iv); - memcpy(iv, tmpbuf, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, tmpbuf, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); @@ -127,8 +125,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, do { crypto_cipher_decrypt_one(tfm, dst, src); crypto_xor(dst, iv, bsize); - memcpy(iv, src, bsize); - crypto_xor(iv, dst, bsize); + crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; @@ -153,8 +150,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, memcpy(tmpbuf, src, bsize); crypto_cipher_decrypt_one(tfm, src, src); crypto_xor(src, iv, bsize); - memcpy(iv, tmpbuf, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, src, tmpbuf, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); diff --git a/crypto/rng.c b/crypto/rng.c index 5e8469244960..b4a618668161 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -43,12 +43,14 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) if (!buf) return -ENOMEM; - get_random_bytes(buf, slen); + err = get_random_bytes_wait(buf, slen); + if (err) + goto out; seed = buf; } err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - +out: kzfree(buf); return err; } diff --git a/crypto/scompress.c b/crypto/scompress.c index ae1d3cf209e4..2075e2c4e7df 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) seq_puts(m, "type : scomp\n"); } -static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) -{ - return 0; -} - static void crypto_scomp_free_scratches(void * __percpu *scratches) { int i; @@ -125,12 +120,26 @@ static int crypto_scomp_alloc_all_scratches(void) if (!scomp_src_scratches) return -ENOMEM; scomp_dst_scratches = crypto_scomp_alloc_scratches(); - if (!scomp_dst_scratches) + if (!scomp_dst_scratches) { + crypto_scomp_free_scratches(scomp_src_scratches); + scomp_src_scratches = NULL; return -ENOMEM; + } } return 0; } +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) +{ + int ret; + + mutex_lock(&scomp_lock); + ret = crypto_scomp_alloc_all_scratches(); + mutex_unlock(&scomp_lock); + + return ret; +} + static void crypto_scomp_sg_free(struct scatterlist *sgl) { int i, n; @@ -211,9 +220,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) scratch_dst, &req->dlen, *ctx); if (!ret) { if (!req->dst) { - req->dst = crypto_scomp_sg_alloc(req->dlen, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC); + req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); if (!req->dst) goto out; } @@ -240,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); crypto_free_scomp(*ctx); + + mutex_lock(&scomp_lock); + crypto_scomp_free_all_scratches(); + mutex_unlock(&scomp_lock); } int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) @@ -316,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = { int crypto_register_scomp(struct scomp_alg *alg) { struct crypto_alg *base = &alg->base; - int ret = -ENOMEM; - - mutex_lock(&scomp_lock); - if (crypto_scomp_alloc_all_scratches()) - goto error; base->cra_type = &crypto_scomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; - ret = crypto_register_alg(base); - if (ret) - goto error; - - mutex_unlock(&scomp_lock); - return ret; - -error: - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - return ret; + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_scomp); int crypto_unregister_scomp(struct scomp_alg *alg) { - int ret; - - mutex_lock(&scomp_lock); - ret = crypto_unregister_alg(&alg->base); - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - - return ret; + return crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_scomp); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 94970a794975..7c3382facc82 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -229,6 +229,46 @@ x4 ^= x2; \ }) +static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k) +{ + k += 100; + S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); + S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20); + S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16); + S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12); + S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8); + S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4); + S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0); + S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4); + S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8); + S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12); + S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16); + S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20); + S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24); + S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28); + k -= 50; + S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18); + S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14); + S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10); + S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6); + S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2); + S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2); + S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6); + S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10); + S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14); + S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18); + S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22); + k -= 50; + S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24); + S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20); + S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16); + S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12); + S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8); + S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4); + S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0); + S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0); +} + int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -395,42 +435,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, keyiter(k[23], r1, r0, r3, 131, 31); /* Apply S-boxes */ - - S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); - S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20); - S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16); - S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12); - S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8); - S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4); - S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0); - S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4); - S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8); - S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12); - S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16); - S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20); - S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24); - S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28); - k -= 50; - S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18); - S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14); - S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10); - S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6); - S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2); - S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2); - S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6); - S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10); - S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14); - S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18); - S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22); - k -= 50; - S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24); - S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20); - S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16); - S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12); - S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8); - S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4); - S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0); - S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0); + __serpent_setkey_sbox(r0, r1, r2, r3, r4, ctx->expkey); return 0; } diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0dd6a432d6ca..0022a18d36ee 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1404,9 +1404,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, speed_template_32_40_48); test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, @@ -1837,9 +1837,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, speed_template_32_40_48); test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index b1aacfc62b1f..90265ab4437a 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o acpi-y += sysfs.o acpi-y += property.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o +acpi-$(CONFIG_X86) += x86/apple.o acpi-$(CONFIG_X86) += x86/utils.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c index c1c4877ca96c..2cd9f738812b 100644 --- a/drivers/acpi/acpi_lpat.c +++ b/drivers/acpi/acpi_lpat.c @@ -25,7 +25,7 @@ * @raw: the raw value, used as a key to get the temerature from the * above mapping table * - * A positive converted temperarure value will be returned on success, + * A positive converted temperature value will be returned on success, * a negative errno will be returned in error cases. */ int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, @@ -55,11 +55,11 @@ EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp); * acpi_lpat_temp_to_raw(): Return raw value from temperature through * LPAT conversion table * - * @lpat: the temperature_raw mapping table + * @lpat_table: the temperature_raw mapping table * @temp: the temperature, used as a key to get the raw value from the * above mapping table * - * A positive converted temperature value will be returned on success, + * The raw value will be returned on success, * a negative errno will be returned in error cases. */ int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f88caf5aab76..032ae44710e5 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -465,7 +465,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, acpi_dev_free_resource_list(&resource_list); if (!pdata->mmio_base) { - ret = -ENOMEM; + /* Skip the device, but continue the namespace scan. */ + ret = 0; goto err_out; } diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index f098e25b6b41..86c10599d9f8 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -670,7 +670,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, } -void __init acpi_processor_check_duplicates(void) +static void __init acpi_processor_check_duplicates(void) { /* check the correctness for all processors in ACPI namespace */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index e88fe3632dd6..0972ec0e2eb8 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -418,7 +418,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id video_dmi_table[] = { +static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index bf22c29d2517..11b113f8e367 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void) for (i = 0; i < wdat->entries; i++) { const struct acpi_generic_address *gas; struct resource_entry *rentry; - struct resource res; + struct resource res = {}; bool found; gas = &entries[i].register_region; diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index b125bdd3d58b..1709551bc4aa 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -18,6 +18,7 @@ acpi-y := \ dsmthdat.o \ dsobject.o \ dsopcode.o \ + dspkginit.o \ dsutils.o \ dswexec.o \ dswload.o \ diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index bb6a84b0b4b3..7a1a68b5ac5c 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -114,6 +114,8 @@ ac_get_all_tables_from_file(char *filename, u8 get_only_aml_tables, struct acpi_new_table_desc **return_list_head); +void ac_delete_table_list(struct acpi_new_table_desc *list_head); + u8 ac_is_file_binary(FILE * file); acpi_status ac_validate_table_header(FILE * file, long table_offset); diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 0d95c85cce06..f8f3a6e74128 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -237,6 +237,11 @@ acpi_ds_initialize_objects(u32 table_index, * dsobject - Parser/Interpreter interface - object initialization and conversion */ acpi_status +acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + union acpi_operand_object **obj_desc_ptr); + +acpi_status acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 buffer_length, @@ -258,6 +263,14 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *node, union acpi_parse_object *op); +/* + * dspkginit - Package object initialization + */ +acpi_status +acpi_ds_init_package_element(u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, void *context); + /* * dsutils - Parser/Interpreter interface utility routines */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 8ddd3b20e0c6..0d45b8bb1678 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -199,6 +199,7 @@ struct acpi_namespace_node { #define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */ #define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */ +#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */ #define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */ #define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */ #define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */ @@ -604,7 +605,7 @@ struct acpi_update_state { * Pkg state - used to traverse nested package structures */ struct acpi_pkg_state { - ACPI_STATE_COMMON u16 index; + ACPI_STATE_COMMON u32 index; union acpi_operand_object *source_object; union acpi_operand_object *dest_object; struct acpi_walk_state *walk_state; @@ -867,7 +868,7 @@ struct acpi_parse_obj_named { /* This version is used by the iASL compiler only */ -#define ACPI_MAX_PARSEOP_NAME 20 +#define ACPI_MAX_PARSEOP_NAME 20 struct acpi_parse_obj_asl { ACPI_PARSE_COMMON union acpi_parse_object *child; @@ -907,7 +908,7 @@ union acpi_parse_object { struct asl_comment_state { u8 comment_type; u32 spaces_before; - union acpi_parse_object *latest_parse_node; + union acpi_parse_object *latest_parse_op; union acpi_parse_object *parsing_paren_brace_node; u8 capture_comments; }; diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 27c3f982d810..5226146190bf 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -122,7 +122,9 @@ struct acpi_object_integer { _type *pointer; \ u32 length; -struct acpi_object_string { /* Null terminated, ASCII characters only */ +/* Null terminated, ASCII characters only */ + +struct acpi_object_string { ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */ }; @@ -211,7 +213,9 @@ struct acpi_object_method { union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\ union acpi_operand_object *handler; /* Handler for Address space */ -struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ +/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ + +struct acpi_object_notify_common { ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; struct acpi_object_device { @@ -258,7 +262,9 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; u8 access_length; /* For serial regions/fields */ -struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ +/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ + +struct acpi_object_field_common { ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */ }; @@ -333,11 +339,12 @@ struct acpi_object_addr_handler { struct acpi_object_reference { ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */ u8 target_type; /* Used for Index Op */ - u8 reserved; + u8 resolved; /* Reference has been resolved to a value */ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */ struct acpi_namespace_node *node; /* ref_of or Namepath */ union acpi_operand_object **where; /* Target of Index */ u8 *index_pointer; /* Used for Buffers and Strings */ + u8 *aml; /* Used for deferred resolution of the ref */ u32 value; /* Used for Local/Arg/Index/ddb_handle */ }; diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index c8da453bd960..84a3ceb6e384 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -76,7 +76,8 @@ void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc); acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc); acpi_status -acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature); +acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, + char *signature, u32 *table_index); u8 acpi_tb_is_table_loaded(u32 table_index); @@ -132,6 +133,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address, acpi_status acpi_tb_unload_table(u32 table_index); +void acpi_tb_notify_table(u32 event, void *table); + void acpi_tb_terminate(void); acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index); diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 2a3cc4296481..745134ade35f 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -516,7 +516,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, - u16 index); + u32 index); acpi_status acpi_ut_create_update_state_and_push(union acpi_operand_object *object, @@ -538,6 +538,13 @@ acpi_status acpi_ut_short_divide(u64 in_dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder); +acpi_status +acpi_ut_short_multiply(u64 in_multiplicand, u32 multiplier, u64 *outproduct); + +acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result); + +acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result); + /* * utmisc */ diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index 46bf270ac525..5a606eac0c22 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -310,7 +310,7 @@ void acpi_db_decode_and_display_object(char *target, char *output_type) } else { - acpi_os_printf("Object (%p) Pathname: %s\n", + acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n", node, (char *)ret_buf.pointer); } @@ -326,7 +326,7 @@ void acpi_db_decode_and_display_object(char *target, char *output_type) obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { - acpi_os_printf("\nAttached Object (%p):\n", obj_desc); + acpi_os_printf("\nAttached Object %p:", obj_desc); if (!acpi_os_readable (obj_desc, sizeof(union acpi_operand_object))) { acpi_os_printf @@ -335,9 +335,36 @@ void acpi_db_decode_and_display_object(char *target, char *output_type) return; } - acpi_ut_debug_dump_buffer((void *)obj_desc, - sizeof(union acpi_operand_object), - display, ACPI_UINT32_MAX); + if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *) + obj_desc)) == + ACPI_DESC_TYPE_NAMED) { + acpi_os_printf(" Namespace Node - "); + status = + acpi_get_name((struct acpi_namespace_node *) + obj_desc, + ACPI_FULL_PATHNAME_NO_TRAILING, + &ret_buf); + if (ACPI_FAILURE(status)) { + acpi_os_printf + ("Could not convert name to pathname\n"); + } else { + acpi_os_printf("Pathname: %s", + (char *)ret_buf.pointer); + } + + acpi_os_printf("\n"); + acpi_ut_debug_dump_buffer((void *)obj_desc, + sizeof(struct + acpi_namespace_node), + display, ACPI_UINT32_MAX); + } else { + acpi_os_printf("\n"); + acpi_ut_debug_dump_buffer((void *)obj_desc, + sizeof(union + acpi_operand_object), + display, ACPI_UINT32_MAX); + } + acpi_ex_dump_object_descriptor(obj_desc, 1); } } diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index c5dccc54307d..7bcf5f5ea029 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -184,6 +184,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, /* Execute flag should always be set when this function is entered */ if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) { + ACPI_ERROR((AE_INFO, "Parse execute mode is not set")); return_ACPI_STATUS(AE_AML_INTERNAL); } @@ -556,6 +557,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, return_ACPI_STATUS(AE_OK); } + ACPI_ERROR((AE_INFO, "Parse deferred mode is not set")); return_ACPI_STATUS(AE_AML_INTERNAL); } diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 7df3152ed856..82448551781b 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -52,12 +52,6 @@ #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsobject") -/* Local prototypes */ -static acpi_status -acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, - union acpi_parse_object *op, - union acpi_operand_object **obj_desc_ptr); - #ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * @@ -73,15 +67,13 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, * Simple objects are any objects other than a package object! * ******************************************************************************/ - -static acpi_status +acpi_status acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, union acpi_parse_object *op, union acpi_operand_object **obj_desc_ptr) { union acpi_operand_object *obj_desc; acpi_status status; - acpi_object_type type; ACPI_FUNCTION_TRACE(ds_build_internal_object); @@ -89,140 +81,47 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { /* * This is a named object reference. If this name was - * previously looked up in the namespace, it was stored in this op. - * Otherwise, go ahead and look it up now + * previously looked up in the namespace, it was stored in + * this op. Otherwise, go ahead and look it up now */ if (!op->common.node) { - status = acpi_ns_lookup(walk_state->scope_info, - op->common.value.string, - ACPI_TYPE_ANY, - ACPI_IMODE_EXECUTE, - ACPI_NS_SEARCH_PARENT | - ACPI_NS_DONT_OPEN_SCOPE, NULL, - ACPI_CAST_INDIRECT_PTR(struct - acpi_namespace_node, - &(op-> - common. - node))); - if (ACPI_FAILURE(status)) { - /* Check if we are resolving a named reference within a package */ + /* Check if we are resolving a named reference within a package */ - if ((status == AE_NOT_FOUND) - && (acpi_gbl_enable_interpreter_slack) - && - ((op->common.parent->common.aml_opcode == - AML_PACKAGE_OP) - || (op->common.parent->common.aml_opcode == - AML_VARIABLE_PACKAGE_OP))) { - /* - * We didn't find the target and we are populating elements - * of a package - ignore if slack enabled. Some ASL code - * contains dangling invalid references in packages and - * expects that no exception will be issued. Leave the - * element as a null element. It cannot be used, but it - * can be overwritten by subsequent ASL code - this is - * typically the case. - */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Ignoring unresolved reference in package [%4.4s]\n", - walk_state-> - scope_info->scope. - node->name.ascii)); - - return_ACPI_STATUS(AE_OK); - } else { + if ((op->common.parent->common.aml_opcode == + AML_PACKAGE_OP) + || (op->common.parent->common.aml_opcode == + AML_VARIABLE_PACKAGE_OP)) { + /* + * We won't resolve package elements here, we will do this + * after all ACPI tables are loaded into the namespace. This + * behavior supports both forward references to named objects + * and external references to objects in other tables. + */ + goto create_new_object; + } else { + status = acpi_ns_lookup(walk_state->scope_info, + op->common.value.string, + ACPI_TYPE_ANY, + ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT | + ACPI_NS_DONT_OPEN_SCOPE, + NULL, + ACPI_CAST_INDIRECT_PTR + (struct + acpi_namespace_node, + &(op->common.node))); + if (ACPI_FAILURE(status)) { ACPI_ERROR_NAMESPACE(op->common.value. string, status); + return_ACPI_STATUS(status); } - - return_ACPI_STATUS(status); - } - } - - /* Special object resolution for elements of a package */ - - if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || - (op->common.parent->common.aml_opcode == - AML_VARIABLE_PACKAGE_OP)) { - /* - * Attempt to resolve the node to a value before we insert it into - * the package. If this is a reference to a common data type, - * resolve it immediately. According to the ACPI spec, package - * elements can only be "data objects" or method references. - * Attempt to resolve to an Integer, Buffer, String or Package. - * If cannot, return the named reference (for things like Devices, - * Methods, etc.) Buffer Fields and Fields will resolve to simple - * objects (int/buf/str/pkg). - * - * NOTE: References to things like Devices, Methods, Mutexes, etc. - * will remain as named references. This behavior is not described - * in the ACPI spec, but it appears to be an oversight. - */ - obj_desc = - ACPI_CAST_PTR(union acpi_operand_object, - op->common.node); - - status = - acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR - (struct - acpi_namespace_node, - &obj_desc), - walk_state); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Special handling for Alias objects. We need to setup the type - * and the Op->Common.Node to point to the Alias target. Note, - * Alias has at most one level of indirection internally. - */ - type = op->common.node->type; - if (type == ACPI_TYPE_LOCAL_ALIAS) { - type = obj_desc->common.type; - op->common.node = - ACPI_CAST_PTR(struct acpi_namespace_node, - op->common.node->object); - } - - switch (type) { - /* - * For these types, we need the actual node, not the subobject. - * However, the subobject did not get an extra reference count above. - * - * TBD: should ex_resolve_node_to_value be changed to fix this? - */ - case ACPI_TYPE_DEVICE: - case ACPI_TYPE_THERMAL: - - acpi_ut_add_reference(op->common.node->object); - - /*lint -fallthrough */ - /* - * For these types, we need the actual node, not the subobject. - * The subobject got an extra reference count in ex_resolve_node_to_value. - */ - case ACPI_TYPE_MUTEX: - case ACPI_TYPE_METHOD: - case ACPI_TYPE_POWER: - case ACPI_TYPE_PROCESSOR: - case ACPI_TYPE_EVENT: - case ACPI_TYPE_REGION: - - /* We will create a reference object for these types below */ - break; - - default: - /* - * All other types - the node was resolved to an actual - * object, we are done. - */ - goto exit; } } } +create_new_object: + /* Create and init a new internal ACPI object */ obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info @@ -240,7 +139,27 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } -exit: + /* + * Handling for unresolved package reference elements. + * These are elements that are namepaths. + */ + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) { + obj_desc->reference.resolved = TRUE; + + if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) && + !obj_desc->reference.node) { + /* + * Name was unresolved above. + * Get the prefix node for later lookup + */ + obj_desc->reference.node = + walk_state->scope_info->scope.node; + obj_desc->reference.aml = op->common.aml; + obj_desc->reference.resolved = FALSE; + } + } + *obj_desc_ptr = obj_desc; return_ACPI_STATUS(status); } @@ -349,200 +268,6 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_OK); } -/******************************************************************************* - * - * FUNCTION: acpi_ds_build_internal_package_obj - * - * PARAMETERS: walk_state - Current walk state - * op - Parser object to be translated - * element_count - Number of elements in the package - this is - * the num_elements argument to Package() - * obj_desc_ptr - Where the ACPI internal object is returned - * - * RETURN: Status - * - * DESCRIPTION: Translate a parser Op package object to the equivalent - * namespace object - * - * NOTE: The number of elements in the package will be always be the num_elements - * count, regardless of the number of elements in the package list. If - * num_elements is smaller, only that many package list elements are used. - * if num_elements is larger, the Package object is padded out with - * objects of type Uninitialized (as per ACPI spec.) - * - * Even though the ASL compilers do not allow num_elements to be smaller - * than the Package list length (for the fixed length package opcode), some - * BIOS code modifies the AML on the fly to adjust the num_elements, and - * this code compensates for that. This also provides compatibility with - * other AML interpreters. - * - ******************************************************************************/ - -acpi_status -acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, - union acpi_parse_object *op, - u32 element_count, - union acpi_operand_object **obj_desc_ptr) -{ - union acpi_parse_object *arg; - union acpi_parse_object *parent; - union acpi_operand_object *obj_desc = NULL; - acpi_status status = AE_OK; - u32 i; - u16 index; - u16 reference_count; - - ACPI_FUNCTION_TRACE(ds_build_internal_package_obj); - - /* Find the parent of a possibly nested package */ - - parent = op->common.parent; - while ((parent->common.aml_opcode == AML_PACKAGE_OP) || - (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) { - parent = parent->common.parent; - } - - /* - * If we are evaluating a Named package object "Name (xxxx, Package)", - * the package object already exists, otherwise it must be created. - */ - obj_desc = *obj_desc_ptr; - if (!obj_desc) { - obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); - *obj_desc_ptr = obj_desc; - if (!obj_desc) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - obj_desc->package.node = parent->common.node; - } - - /* - * Allocate the element array (array of pointers to the individual - * objects) based on the num_elements parameter. Add an extra pointer slot - * so that the list is always null terminated. - */ - obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) - element_count + - 1) * sizeof(void *)); - - if (!obj_desc->package.elements) { - acpi_ut_delete_object_desc(obj_desc); - return_ACPI_STATUS(AE_NO_MEMORY); - } - - obj_desc->package.count = element_count; - - /* - * Initialize the elements of the package, up to the num_elements count. - * Package is automatically padded with uninitialized (NULL) elements - * if num_elements is greater than the package list length. Likewise, - * Package is truncated if num_elements is less than the list length. - */ - arg = op->common.value.arg; - arg = arg->common.next; - for (i = 0; arg && (i < element_count); i++) { - if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { - if (arg->common.node->type == ACPI_TYPE_METHOD) { - /* - * A method reference "looks" to the parser to be a method - * invocation, so we special case it here - */ - arg->common.aml_opcode = AML_INT_NAMEPATH_OP; - status = - acpi_ds_build_internal_object(walk_state, - arg, - &obj_desc-> - package. - elements[i]); - } else { - /* This package element is already built, just get it */ - - obj_desc->package.elements[i] = - ACPI_CAST_PTR(union acpi_operand_object, - arg->common.node); - } - } else { - status = - acpi_ds_build_internal_object(walk_state, arg, - &obj_desc->package. - elements[i]); - } - - if (*obj_desc_ptr) { - - /* Existing package, get existing reference count */ - - reference_count = - (*obj_desc_ptr)->common.reference_count; - if (reference_count > 1) { - - /* Make new element ref count match original ref count */ - - for (index = 0; index < (reference_count - 1); - index++) { - acpi_ut_add_reference((obj_desc-> - package. - elements[i])); - } - } - } - - arg = arg->common.next; - } - - /* Check for match between num_elements and actual length of package_list */ - - if (arg) { - /* - * num_elements was exhausted, but there are remaining elements in the - * package_list. Truncate the package to num_elements. - * - * Note: technically, this is an error, from ACPI spec: "It is an error - * for NumElements to be less than the number of elements in the - * PackageList". However, we just print a message and - * no exception is returned. This provides Windows compatibility. Some - * BIOSs will alter the num_elements on the fly, creating this type - * of ill-formed package object. - */ - while (arg) { - /* - * We must delete any package elements that were created earlier - * and are not going to be used because of the package truncation. - */ - if (arg->common.node) { - acpi_ut_remove_reference(ACPI_CAST_PTR - (union - acpi_operand_object, - arg->common.node)); - arg->common.node = NULL; - } - - /* Find out how many elements there really are */ - - i++; - arg = arg->common.next; - } - - ACPI_INFO(("Actual Package length (%u) is larger than " - "NumElements field (%u), truncated", - i, element_count)); - } else if (i < element_count) { - /* - * Arg list (elements) was exhausted, but we did not reach num_elements count. - * Note: this is not an error, the package is padded out with NULLs. - */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Package List length (%u) smaller than NumElements " - "count (%u), padded with null elements\n", - i, element_count)); - } - - obj_desc->package.flags |= AOPOBJ_DATA_VALID; - op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc); - return_ACPI_STATUS(status); -} - /******************************************************************************* * * FUNCTION: acpi_ds_create_node @@ -662,11 +387,20 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, case ACPI_TYPE_PACKAGE: /* - * Defer evaluation of Package term_arg operand + * Defer evaluation of Package term_arg operand and all + * package elements. (01/2017): We defer the element + * resolution to allow forward references from the package + * in order to provide compatibility with other ACPI + * implementations. */ obj_desc->package.node = ACPI_CAST_PTR(struct acpi_namespace_node, walk_state->operands[0]); + + if (!op->named.data) { + return_ACPI_STATUS(AE_OK); + } + obj_desc->package.aml_start = op->named.data; obj_desc->package.aml_length = op->named.length; break; @@ -818,9 +552,11 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, /* Node was saved in Op */ obj_desc->reference.node = op->common.node; - obj_desc->reference.object = - op->common.node->object; obj_desc->reference.class = ACPI_REFCLASS_NAME; + if (op->common.node) { + obj_desc->reference.object = + op->common.node->object; + } break; case AML_DEBUG_OP: diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index dfc3c25a083d..0336df7ac47d 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -599,6 +599,15 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, */ walk_state->operand_index = walk_state->num_operands; + /* Ignore if child is not valid */ + + if (!op->common.value.arg) { + ACPI_ERROR((AE_INFO, + "Dispatch: Missing child while executing TermArg for %X", + op->common.aml_opcode)); + return_ACPI_STATUS(AE_OK); + } + status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c new file mode 100644 index 000000000000..6d487edfe2de --- /dev/null +++ b/drivers/acpi/acpica/dspkginit.c @@ -0,0 +1,496 @@ +/****************************************************************************** + * + * Module Name: dspkginit - Completion of deferred package initialization + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" +#include "acnamesp.h" +#include "amlcode.h" +#include "acdispat.h" +#include "acinterp.h" + +#define _COMPONENT ACPI_NAMESPACE +ACPI_MODULE_NAME("dspkginit") + +/* Local prototypes */ +static void +acpi_ds_resolve_package_element(union acpi_operand_object **element); + +/******************************************************************************* + * + * FUNCTION: acpi_ds_build_internal_package_obj + * + * PARAMETERS: walk_state - Current walk state + * op - Parser object to be translated + * element_count - Number of elements in the package - this is + * the num_elements argument to Package() + * obj_desc_ptr - Where the ACPI internal object is returned + * + * RETURN: Status + * + * DESCRIPTION: Translate a parser Op package object to the equivalent + * namespace object + * + * NOTE: The number of elements in the package will be always be the num_elements + * count, regardless of the number of elements in the package list. If + * num_elements is smaller, only that many package list elements are used. + * if num_elements is larger, the Package object is padded out with + * objects of type Uninitialized (as per ACPI spec.) + * + * Even though the ASL compilers do not allow num_elements to be smaller + * than the Package list length (for the fixed length package opcode), some + * BIOS code modifies the AML on the fly to adjust the num_elements, and + * this code compensates for that. This also provides compatibility with + * other AML interpreters. + * + ******************************************************************************/ + +acpi_status +acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 element_count, + union acpi_operand_object **obj_desc_ptr) +{ + union acpi_parse_object *arg; + union acpi_parse_object *parent; + union acpi_operand_object *obj_desc = NULL; + acpi_status status = AE_OK; + u16 reference_count; + u32 index; + u32 i; + + ACPI_FUNCTION_TRACE(ds_build_internal_package_obj); + + /* Find the parent of a possibly nested package */ + + parent = op->common.parent; + while ((parent->common.aml_opcode == AML_PACKAGE_OP) || + (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) { + parent = parent->common.parent; + } + + /* + * If we are evaluating a Named package object of the form: + * Name (xxxx, Package) + * the package object already exists, otherwise it must be created. + */ + obj_desc = *obj_desc_ptr; + if (!obj_desc) { + obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); + *obj_desc_ptr = obj_desc; + if (!obj_desc) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + obj_desc->package.node = parent->common.node; + } + + if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */ + return_ACPI_STATUS(AE_OK); + } + + /* + * Allocate the element array (array of pointers to the individual + * objects) based on the num_elements parameter. Add an extra pointer slot + * so that the list is always null terminated. + */ + obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) + element_count + + 1) * sizeof(void *)); + + if (!obj_desc->package.elements) { + acpi_ut_delete_object_desc(obj_desc); + return_ACPI_STATUS(AE_NO_MEMORY); + } + + obj_desc->package.count = element_count; + arg = op->common.value.arg; + arg = arg->common.next; + + if (arg) { + obj_desc->package.flags |= AOPOBJ_DATA_VALID; + } + + /* + * Initialize the elements of the package, up to the num_elements count. + * Package is automatically padded with uninitialized (NULL) elements + * if num_elements is greater than the package list length. Likewise, + * Package is truncated if num_elements is less than the list length. + */ + for (i = 0; arg && (i < element_count); i++) { + if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { + if (arg->common.node->type == ACPI_TYPE_METHOD) { + /* + * A method reference "looks" to the parser to be a method + * invocation, so we special case it here + */ + arg->common.aml_opcode = AML_INT_NAMEPATH_OP; + status = + acpi_ds_build_internal_object(walk_state, + arg, + &obj_desc-> + package. + elements[i]); + } else { + /* This package element is already built, just get it */ + + obj_desc->package.elements[i] = + ACPI_CAST_PTR(union acpi_operand_object, + arg->common.node); + } + } else { + status = + acpi_ds_build_internal_object(walk_state, arg, + &obj_desc->package. + elements[i]); + if (status == AE_NOT_FOUND) { + ACPI_ERROR((AE_INFO, "%-48s", + "****DS namepath not found")); + } + + /* + * Initialize this package element. This function handles the + * resolution of named references within the package. + */ + acpi_ds_init_package_element(0, + obj_desc->package. + elements[i], NULL, + &obj_desc->package. + elements[i]); + } + + if (*obj_desc_ptr) { + + /* Existing package, get existing reference count */ + + reference_count = + (*obj_desc_ptr)->common.reference_count; + if (reference_count > 1) { + + /* Make new element ref count match original ref count */ + /* TBD: Probably need an acpi_ut_add_references function */ + + for (index = 0; + index < ((u32)reference_count - 1); + index++) { + acpi_ut_add_reference((obj_desc-> + package. + elements[i])); + } + } + } + + arg = arg->common.next; + } + + /* Check for match between num_elements and actual length of package_list */ + + if (arg) { + /* + * num_elements was exhausted, but there are remaining elements in + * the package_list. Truncate the package to num_elements. + * + * Note: technically, this is an error, from ACPI spec: "It is an + * error for NumElements to be less than the number of elements in + * the PackageList". However, we just print a message and no + * exception is returned. This provides compatibility with other + * ACPI implementations. Some firmware implementations will alter + * the num_elements on the fly, possibly creating this type of + * ill-formed package object. + */ + while (arg) { + /* + * We must delete any package elements that were created earlier + * and are not going to be used because of the package truncation. + */ + if (arg->common.node) { + acpi_ut_remove_reference(ACPI_CAST_PTR + (union + acpi_operand_object, + arg->common.node)); + arg->common.node = NULL; + } + + /* Find out how many elements there really are */ + + i++; + arg = arg->common.next; + } + + ACPI_INFO(("Actual Package length (%u) is larger than " + "NumElements field (%u), truncated", + i, element_count)); + } else if (i < element_count) { + /* + * Arg list (elements) was exhausted, but we did not reach + * num_elements count. + * + * Note: this is not an error, the package is padded out + * with NULLs. + */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Package List length (%u) smaller than NumElements " + "count (%u), padded with null elements\n", + i, element_count)); + } + + obj_desc->package.flags |= AOPOBJ_DATA_VALID; + op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ds_init_package_element + * + * PARAMETERS: acpi_pkg_callback + * + * RETURN: Status + * + * DESCRIPTION: Resolve a named reference element within a package object + * + ******************************************************************************/ + +acpi_status +acpi_ds_init_package_element(u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, void *context) +{ + union acpi_operand_object **element_ptr; + + if (!source_object) { + return (AE_OK); + } + + /* + * The following code is a bit of a hack to workaround a (current) + * limitation of the acpi_pkg_callback interface. We need a pointer + * to the location within the element array because a new object + * may be created and stored there. + */ + if (context) { + + /* A direct call was made to this function */ + + element_ptr = (union acpi_operand_object **)context; + } else { + /* Call came from acpi_ut_walk_package_tree */ + + element_ptr = state->pkg.this_target_obj; + } + + /* We are only interested in reference objects/elements */ + + if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) { + + /* Attempt to resolve the (named) reference to a namespace node */ + + acpi_ds_resolve_package_element(element_ptr); + } else if (source_object->common.type == ACPI_TYPE_PACKAGE) { + source_object->package.flags |= AOPOBJ_DATA_VALID; + } + + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ds_resolve_package_element + * + * PARAMETERS: element_ptr - Pointer to a reference object + * + * RETURN: Possible new element is stored to the indirect element_ptr + * + * DESCRIPTION: Resolve a package element that is a reference to a named + * object. + * + ******************************************************************************/ + +static void +acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr) +{ + acpi_status status; + union acpi_generic_state scope_info; + union acpi_operand_object *element = *element_ptr; + struct acpi_namespace_node *resolved_node; + char *external_path = NULL; + acpi_object_type type; + + ACPI_FUNCTION_TRACE(ds_resolve_package_element); + + /* Check if reference element is already resolved */ + + if (element->reference.resolved) { + return_VOID; + } + + /* Element must be a reference object of correct type */ + + scope_info.scope.node = element->reference.node; /* Prefix node */ + + status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, /* Pointer to AML path */ + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, + NULL, &resolved_node); + if (ACPI_FAILURE(status)) { + status = acpi_ns_externalize_name(ACPI_UINT32_MAX, + (char *)element->reference. + aml, NULL, &external_path); + + ACPI_EXCEPTION((AE_INFO, status, + "Could not find/resolve named package element: %s", + external_path)); + + ACPI_FREE(external_path); + *element_ptr = NULL; + return_VOID; + } else if (resolved_node->type == ACPI_TYPE_ANY) { + + /* Named reference not resolved, return a NULL package element */ + + ACPI_ERROR((AE_INFO, + "Could not resolve named package element [%4.4s] in [%4.4s]", + resolved_node->name.ascii, + scope_info.scope.node->name.ascii)); + *element_ptr = NULL; + return_VOID; + } +#if 0 + else if (resolved_node->flags & ANOBJ_TEMPORARY) { + /* + * A temporary node found here indicates that the reference is + * to a node that was created within this method. We are not + * going to allow it (especially if the package is returned + * from the method) -- the temporary node will be deleted out + * from under the method. (05/2017). + */ + ACPI_ERROR((AE_INFO, + "Package element refers to a temporary name [%4.4s], " + "inserting a NULL element", + resolved_node->name.ascii)); + *element_ptr = NULL; + return_VOID; + } +#endif + + /* + * Special handling for Alias objects. We need resolved_node to point + * to the Alias target. This effectively "resolves" the alias. + */ + if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) { + resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node, + resolved_node->object); + } + + /* Update the reference object */ + + element->reference.resolved = TRUE; + element->reference.node = resolved_node; + type = element->reference.node->type; + + /* + * Attempt to resolve the node to a value before we insert it into + * the package. If this is a reference to a common data type, + * resolve it immediately. According to the ACPI spec, package + * elements can only be "data objects" or method references. + * Attempt to resolve to an Integer, Buffer, String or Package. + * If cannot, return the named reference (for things like Devices, + * Methods, etc.) Buffer Fields and Fields will resolve to simple + * objects (int/buf/str/pkg). + * + * NOTE: References to things like Devices, Methods, Mutexes, etc. + * will remain as named references. This behavior is not described + * in the ACPI spec, but it appears to be an oversight. + */ + status = acpi_ex_resolve_node_to_value(&resolved_node, NULL); + if (ACPI_FAILURE(status)) { + return_VOID; + } +#if 0 +/* TBD - alias support */ + /* + * Special handling for Alias objects. We need to setup the type + * and the Op->Common.Node to point to the Alias target. Note, + * Alias has at most one level of indirection internally. + */ + type = op->common.node->type; + if (type == ACPI_TYPE_LOCAL_ALIAS) { + type = obj_desc->common.type; + op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, + op->common.node->object); + } +#endif + + switch (type) { + /* + * These object types are a result of named references, so we will + * leave them as reference objects. In other words, these types + * have no intrinsic "value". + */ + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_THERMAL: + + /* TBD: This may not be necesssary */ + + acpi_ut_add_reference(resolved_node->object); + break; + + case ACPI_TYPE_MUTEX: + case ACPI_TYPE_METHOD: + case ACPI_TYPE_POWER: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_EVENT: + case ACPI_TYPE_REGION: + + break; + + default: + /* + * For all other types - the node was resolved to an actual + * operand object with a value, return the object + */ + *element_ptr = (union acpi_operand_object *)resolved_node; + break; + } + + return_VOID; +} diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 9c941947a063..3a3cb8624f41 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, void *ignored) { acpi_status status; + acpi_event_status event_status; struct acpi_gpe_event_info *gpe_event_info; u32 gpe_enabled_count; u32 gpe_index; + u32 gpe_number; u32 i; u32 j; @@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; gpe_event_info = &gpe_block->event_info[gpe_index]; + gpe_number = gpe_block->block_base_number + gpe_index; /* * Ignore GPEs that have no corresponding _Lxx/_Exx method - * and GPEs that are used to wake the system + * and GPEs that are used for wakeup */ - if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == - ACPI_GPE_DISPATCH_NONE) - || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == - ACPI_GPE_DISPATCH_HANDLER) - || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == - ACPI_GPE_DISPATCH_RAW_HANDLER) + if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != + ACPI_GPE_DISPATCH_METHOD) || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { continue; } + event_status = 0; + (void)acpi_hw_get_gpe_status(gpe_event_info, + &event_status); + status = acpi_ev_add_gpe_reference(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", - gpe_index + - gpe_block->block_base_number)); + gpe_number)); continue; } + gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED; + + if (event_status & ACPI_EVENT_FLAG_STATUS_SET) { + ACPI_INFO(("GPE 0x%02X active on init", + gpe_number)); + (void)acpi_ev_gpe_dispatch(gpe_block->node, + gpe_event_info, + gpe_number); + } + gpe_enabled_count++; } } diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 57718a3e029a..67c7c4ce276c 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, */ gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED); + } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) { + /* + * A reference to this GPE has been added during the GPE block + * initialization, so drop it now to prevent the GPE from being + * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag. + */ + (void)acpi_ev_remove_gpe_reference(gpe_event_info); + gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED; } /* diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index d43d7da4c734..b8adb11f1b07 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -87,32 +87,27 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) target_node->object); } - /* - * For objects that can never change (i.e., the NS node will - * permanently point to the same object), we can simply attach - * the object to the new NS node. For other objects (such as - * Integers, buffers, etc.), we have to point the Alias node - * to the original Node. - */ + /* Ensure that the target node is valid */ + + if (!target_node) { + return_ACPI_STATUS(AE_NULL_OBJECT); + } + + /* Construct the alias object (a namespace node) */ + switch (target_node->type) { - - /* For these types, the sub-object can change dynamically via a Store */ - - case ACPI_TYPE_INTEGER: - case ACPI_TYPE_STRING: - case ACPI_TYPE_BUFFER: - case ACPI_TYPE_PACKAGE: - case ACPI_TYPE_BUFFER_FIELD: + case ACPI_TYPE_METHOD: /* - * These types open a new scope, so we need the NS node in order to access - * any children. + * Control method aliases need to be differentiated with + * a special type */ - case ACPI_TYPE_DEVICE: - case ACPI_TYPE_POWER: - case ACPI_TYPE_PROCESSOR: - case ACPI_TYPE_THERMAL: - case ACPI_TYPE_LOCAL_SCOPE: + alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS; + break; + + default: /* + * All other object types. + * * The new alias has the type ALIAS and points to the original * NS node, not the object itself. */ @@ -120,35 +115,12 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) alias_node->object = ACPI_CAST_PTR(union acpi_operand_object, target_node); break; - - case ACPI_TYPE_METHOD: - /* - * Control method aliases need to be differentiated - */ - alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS; - alias_node->object = - ACPI_CAST_PTR(union acpi_operand_object, target_node); - break; - - default: - - /* Attach the original source object to the new Alias Node */ - - /* - * The new alias assumes the type of the target, and it points - * to the same object. The reference count of the object has an - * additional reference to prevent deletion out from under either the - * target node or the alias Node - */ - status = acpi_ns_attach_object(alias_node, - acpi_ns_get_attached_object - (target_node), - target_node->type); - break; } /* Since both operands are Nodes, we don't need to delete them */ + alias_node->object = + ACPI_CAST_PTR(union acpi_operand_object, target_node); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 44092f744477..83398dc4b7c2 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -102,7 +102,7 @@ static struct acpi_exdump_info acpi_ex_dump_package[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"}, - {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"}, + {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"}, {ACPI_EXD_PACKAGE, 0, NULL} }; @@ -384,6 +384,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, count = info->offset; while (count) { + if (!obj_desc) { + return; + } + target = ACPI_ADD_PTR(u8, obj_desc, info->offset); name = info->name; @@ -469,9 +473,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, start = *ACPI_CAST_PTR(void *, target); next = start; - acpi_os_printf("%20s : %p", name, next); + acpi_os_printf("%20s : %p ", name, next); if (next) { - acpi_os_printf("(%s %2.2X)", + acpi_os_printf("%s (Type %2.2X)", acpi_ut_get_object_type_name (next), next->common.type); @@ -493,6 +497,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, break; } } + } else { + acpi_os_printf("- No attached objects"); } acpi_os_printf("\n"); @@ -1129,7 +1135,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, default: - acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type); + acpi_os_printf("[%s] Type: %2.2X\n", + acpi_ut_get_type_name(obj_desc->common.type), + obj_desc->common.type); break; } } @@ -1167,11 +1175,17 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) acpi_ex_dump_namespace_node((struct acpi_namespace_node *) obj_desc, flags); - acpi_os_printf("\nAttached Object (%p):\n", - ((struct acpi_namespace_node *)obj_desc)-> - object); - obj_desc = ((struct acpi_namespace_node *)obj_desc)->object; + if (!obj_desc) { + return_VOID; + } + + acpi_os_printf("\nAttached Object %p", obj_desc); + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { + acpi_os_printf(" - Namespace Node"); + } + + acpi_os_printf(":\n"); goto dump_object; } @@ -1191,6 +1205,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) dump_object: + if (!obj_desc) { + return_VOID; + } + /* Common Fields */ acpi_ex_dump_object(obj_desc, acpi_ex_dump_common); diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index f222a80ca38e..1e7649ce0a7b 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -265,6 +265,8 @@ acpi_ex_do_logical_numeric_op(u16 opcode, default: + ACPI_ERROR((AE_INFO, + "Invalid numeric logical opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } @@ -345,6 +347,9 @@ acpi_ex_do_logical_op(u16 opcode, default: + ACPI_ERROR((AE_INFO, + "Invalid object type for logical operator: %X", + operand0->common.type)); status = AE_AML_INTERNAL; break; } @@ -388,6 +393,8 @@ acpi_ex_do_logical_op(u16 opcode, default: + ACPI_ERROR((AE_INFO, + "Invalid comparison opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } @@ -456,6 +463,8 @@ acpi_ex_do_logical_op(u16 opcode, default: + ACPI_ERROR((AE_INFO, + "Invalid comparison opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index eecb3bff7fd7..57980b7d3594 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -414,6 +414,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) default: + ACPI_ERROR((AE_INFO, + "Invalid object type: %X", + (operand[0])->common.type)); status = AE_AML_INTERNAL; goto cleanup; } diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index de74a4c25085..acb417b58bbb 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -107,7 +107,7 @@ acpi_hw_get_access_bit_width(u64 address, ACPI_IS_ALIGNED(reg->bit_width, 8)) { access_bit_width = reg->bit_width; } else if (reg->access_width) { - access_bit_width = (1 << (reg->access_width + 2)); + access_bit_width = ACPI_ACCESS_BIT_WIDTH(reg->access_width); } else { access_bit_width = ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset + diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 7ef13934968f..e5c095ca6083 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -72,13 +72,16 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); static struct acpi_sleep_functions acpi_sleep_dispatch[] = { {ACPI_STRUCT_INIT(legacy_function, ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)), - ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) }, + ACPI_STRUCT_INIT(extended_function, + acpi_hw_extended_sleep)}, {ACPI_STRUCT_INIT(legacy_function, ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)), - ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) }, + ACPI_STRUCT_INIT(extended_function, + acpi_hw_extended_wake_prep)}, {ACPI_STRUCT_INIT(legacy_function, ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)), - ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) } + ACPI_STRUCT_INIT(extended_function, + acpi_hw_extended_wake)} }; /* diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index e5f4fa496572..f2733f51ca8d 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -292,6 +292,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, { acpi_status status; char *path = pathname; + char *external_path; struct acpi_namespace_node *prefix_node; struct acpi_namespace_node *current_node = NULL; struct acpi_namespace_node *this_node = NULL; @@ -427,13 +428,22 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, num_carats++; this_node = this_node->parent; if (!this_node) { + /* + * Current scope has no parent scope. Externalize + * the internal path for error message. + */ + status = + acpi_ns_externalize_name + (ACPI_UINT32_MAX, pathname, NULL, + &external_path); + if (ACPI_SUCCESS(status)) { + ACPI_ERROR((AE_INFO, + "%s: Path has too many parent prefixes (^)", + external_path)); - /* Current scope has no parent scope */ + ACPI_FREE(external_path); + } - ACPI_ERROR((AE_INFO, - "%s: Path has too many parent prefixes (^) " - "- reached beyond root node", - pathname)); return_ACPI_STATUS(AE_NOT_FOUND); } } @@ -634,6 +644,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, this_node->object; } } +#ifdef ACPI_ASL_COMPILER + if (!acpi_gbl_disasm_flag && + (this_node->flags & ANOBJ_IS_EXTERNAL)) { + this_node->flags |= IMPLICIT_EXTERNAL; + } +#endif } /* Special handling for the last segment (num_segments == 0) */ diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 9095d51f6b37..67b7370dcae5 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -69,9 +69,14 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info) u8 user_arg_type; u32 i; - /* If not a predefined name, cannot typecheck args */ - - if (!info->predefined) { + /* + * If not a predefined name, cannot typecheck args, because + * we have no idea what argument types are expected. + * Also, ignore typecheck if warnings/errors if this method + * has already been evaluated at least once -- in order + * to suppress repetitive messages. + */ + if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) { return; } @@ -93,6 +98,10 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info) acpi_ut_get_type_name (user_arg_type), acpi_ut_get_type_name(arg_type))); + + /* Prevent any additional typechecking for this method */ + + info->node->flags |= ANOBJ_EVALUATED; } } } @@ -121,7 +130,7 @@ acpi_ns_check_acpi_compliance(char *pathname, u32 aml_param_count; u32 required_param_count; - if (!predefined) { + if (!predefined || (node->flags & ANOBJ_EVALUATED)) { return; } @@ -215,6 +224,10 @@ acpi_ns_check_argument_count(char *pathname, u32 aml_param_count; u32 required_param_count; + if (node->flags & ANOBJ_EVALUATED) { + return; + } + if (!predefined) { /* * Not a predefined name. Check the incoming user argument count diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index ce33e7297ea7..9c6297949712 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -396,6 +396,20 @@ acpi_ns_init_one_object(acpi_handle obj_handle, info->package_init++; status = acpi_ds_get_package_arguments(obj_desc); + if (ACPI_FAILURE(status)) { + break; + } + + /* + * Resolve all named references in package objects (and all + * sub-packages). This action has been deferred until the entire + * namespace has been loaded, in order to support external and + * forward references from individual package elements (05/2017). + */ + status = acpi_ut_walk_package_tree(obj_desc, NULL, + acpi_ds_init_package_element, + NULL); + obj_desc->package.flags |= AOPOBJ_DATA_VALID; break; default: diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index aa16aeaa8937..a410760a0308 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -89,7 +89,14 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) { acpi_size size; - ACPI_FUNCTION_ENTRY(); + /* Validate the Node */ + + if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { + ACPI_ERROR((AE_INFO, + "Invalid/cached reference target node: %p, descriptor type %d", + node, ACPI_GET_DESCRIPTOR_TYPE(node))); + return (0); + } size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE); return (size); diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 4954cb6c9090..a8ea8fb1d299 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -614,6 +614,8 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info, default: /* Should not get here, type was validated by caller */ + ACPI_ERROR((AE_INFO, "Invalid Package type: %X", + package->ret_info.type)); return (AE_AML_INTERNAL); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index b4224005783c..bb04dec168ad 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -164,6 +164,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, INCREMENT_ARG_LIST(walk_state->arg_types); } + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Final argument count: %u pass %u\n", + walk_state->arg_count, + walk_state->pass_number)); + /* * Handle executable code at "module-level". This refers to * executable opcodes that appear outside of any control method. @@ -277,6 +282,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, AML_NAME_OP) && (walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2)) { + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Setup Package/Buffer: Pass %u, AML Ptr: %p\n", + walk_state->pass_number, + aml_op_start)); + /* * Skip parsing of Buffers and Packages because we don't have * enough info in the first pass to parse them correctly. @@ -570,6 +580,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) /* Check for arguments that need to be processed */ + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Parseloop: argument count: %u\n", + walk_state->arg_count)); + if (walk_state->arg_count) { /* * There are arguments (complex ones), push Op and diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index ef6384e374fc..0bef6df71bba 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -359,6 +359,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, acpi_ps_build_named_op(walk_state, aml_op_start, op, &named_op); acpi_ps_free_op(op); + +#ifdef ACPI_ASL_COMPILER + if (acpi_gbl_disasm_flag + && walk_state->opcode == AML_EXTERNAL_OP + && status == AE_NOT_FOUND) { + /* + * If parsing of AML_EXTERNAL_OP's name path fails, then skip + * past this opcode and keep parsing. This is a much better + * alternative than to abort the entire disassembler. At this + * point, the parser_state is at the end of the namepath of the + * external declaration opcode. Setting walk_state->Aml to + * walk_state->parser_state.Aml + 2 moves increments the + * walk_state->Aml past the object type and the paramcount of the + * external opcode. For the error message, only print the AML + * offset. We could attempt to print the name but this may cause + * a segmentation fault when printing the namepath because the + * AML may be incorrect. + */ + acpi_os_printf + ("// Invalid external declaration at AML offset 0x%x.\n", + walk_state->aml - + walk_state->parser_state.aml_start); + walk_state->aml = walk_state->parser_state.aml + 2; + return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); + } +#endif if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index 59a4f9ed06a7..be65e65e216e 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c @@ -615,7 +615,7 @@ ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer) * device we are querying * name - Method name of the resources we want. * (METHOD_NAME__CRS, METHOD_NAME__PRS, or - * METHOD_NAME__AEI) + * METHOD_NAME__AEI or METHOD_NAME__DMA) * user_function - Called for each resource * context - Passed to user_function * @@ -641,11 +641,12 @@ acpi_walk_resources(acpi_handle device_handle, if (!device_handle || !user_function || !name || (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) && !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) && - !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) { + !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) && + !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) { return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Get the _CRS/_PRS/_AEI resource list */ + /* Get the _CRS/_PRS/_AEI/_DMA resource list */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_rs_get_method_data(device_handle, name, &buffer); diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index c9d6fa6d7cc6..b19a2f0ea331 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -50,6 +50,57 @@ #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbdata") +/* Local prototypes */ +static acpi_status +acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index); + +static u8 +acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index); + +/******************************************************************************* + * + * FUNCTION: acpi_tb_compare_tables + * + * PARAMETERS: table_desc - Table 1 descriptor to be compared + * table_index - Index of table 2 to be compared + * + * RETURN: TRUE if both tables are identical. + * + * DESCRIPTION: This function compares a table with another table that has + * already been installed in the root table list. + * + ******************************************************************************/ + +static u8 +acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) +{ + acpi_status status = AE_OK; + u8 is_identical; + struct acpi_table_header *table; + u32 table_length; + u8 table_flags; + + status = + acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index], + &table, &table_length, &table_flags); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + /* + * Check for a table match on the entire table length, + * not just the header. + */ + is_identical = (u8)((table_desc->length != table_length || + memcmp(table_desc->pointer, table, table_length)) ? + FALSE : TRUE); + + /* Release the acquired table */ + + acpi_tb_release_table(table, table_length, table_flags); + return (is_identical); +} + /******************************************************************************* * * FUNCTION: acpi_tb_init_table_descriptor @@ -64,6 +115,7 @@ ACPI_MODULE_NAME("tbdata") * DESCRIPTION: Initialize a new table descriptor * ******************************************************************************/ + void acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc, acpi_physical_address address, @@ -338,7 +390,7 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc) acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc) { - if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) { + if (!table_desc->pointer && !acpi_gbl_enable_table_validation) { /* * Only validates the header of the table. * Note that Length contains the size of the mapping after invoking @@ -354,22 +406,100 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc) return (acpi_tb_validate_table(table_desc)); } +/******************************************************************************* + * + * FUNCTION: acpi_tb_check_duplication + * + * PARAMETERS: table_desc - Table descriptor + * table_index - Where the table index is returned + * + * RETURN: Status + * + * DESCRIPTION: Avoid installing duplicated tables. However table override and + * user aided dynamic table load is allowed, thus comparing the + * address of the table is not sufficient, and checking the entire + * table content is required. + * + ******************************************************************************/ + +static acpi_status +acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index) +{ + u32 i; + + ACPI_FUNCTION_TRACE(tb_check_duplication); + + /* Check if table is already registered */ + + for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { + + /* Do not compare with unverified tables */ + + if (! + (acpi_gbl_root_table_list.tables[i]. + flags & ACPI_TABLE_IS_VERIFIED)) { + continue; + } + + /* + * Check for a table match on the entire table length, + * not just the header. + */ + if (!acpi_tb_compare_tables(table_desc, i)) { + continue; + } + + /* + * Note: the current mechanism does not unregister a table if it is + * dynamically unloaded. The related namespace entries are deleted, + * but the table remains in the root table list. + * + * The assumption here is that the number of different tables that + * will be loaded is actually small, and there is minimal overhead + * in just keeping the table in case it is needed again. + * + * If this assumption changes in the future (perhaps on large + * machines with many table load/unload operations), tables will + * need to be unregistered when they are unloaded, and slots in the + * root table list should be reused when empty. + */ + if (acpi_gbl_root_table_list.tables[i].flags & + ACPI_TABLE_IS_LOADED) { + + /* Table is still loaded, this is an error */ + + return_ACPI_STATUS(AE_ALREADY_EXISTS); + } else { + *table_index = i; + return_ACPI_STATUS(AE_CTRL_TERMINATE); + } + } + + /* Indicate no duplication to the caller */ + + return_ACPI_STATUS(AE_OK); +} + /****************************************************************************** * * FUNCTION: acpi_tb_verify_temp_table * * PARAMETERS: table_desc - Table descriptor * signature - Table signature to verify + * table_index - Where the table index is returned * * RETURN: Status * * DESCRIPTION: This function is called to validate and verify the table, the * returned table descriptor is in "VALIDATED" state. + * Note that 'TableIndex' is required to be set to !NULL to + * enable duplication check. * *****************************************************************************/ acpi_status -acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature) +acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, + char *signature, u32 *table_index) { acpi_status status = AE_OK; @@ -392,9 +522,10 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature) goto invalidate_and_exit; } - /* Verify the checksum */ + if (acpi_gbl_enable_table_validation) { + + /* Verify the checksum */ - if (acpi_gbl_verify_table_checksum) { status = acpi_tb_verify_checksum(table_desc->pointer, table_desc->length); @@ -411,9 +542,34 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature) goto invalidate_and_exit; } + + /* Avoid duplications */ + + if (table_index) { + status = + acpi_tb_check_duplication(table_desc, table_index); + if (ACPI_FAILURE(status)) { + if (status != AE_CTRL_TERMINATE) { + ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, + "%4.4s 0x%8.8X%8.8X" + " Table is duplicated", + acpi_ut_valid_nameseg + (table_desc->signature. + ascii) ? table_desc-> + signature. + ascii : "????", + ACPI_FORMAT_UINT64 + (table_desc->address))); + } + + goto invalidate_and_exit; + } + } + + table_desc->flags |= ACPI_TABLE_IS_VERIFIED; } - return_ACPI_STATUS(AE_OK); + return_ACPI_STATUS(status); invalidate_and_exit: acpi_tb_invalidate_table(table_desc); @@ -436,6 +592,8 @@ acpi_status acpi_tb_resize_root_table_list(void) { struct acpi_table_desc *tables; u32 table_count; + u32 current_table_count, max_table_count; + u32 i; ACPI_FUNCTION_TRACE(tb_resize_root_table_list); @@ -455,8 +613,8 @@ acpi_status acpi_tb_resize_root_table_list(void) table_count = acpi_gbl_root_table_list.current_table_count; } - tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count + - ACPI_ROOT_TABLE_SIZE_INCREMENT) * + max_table_count = table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; + tables = ACPI_ALLOCATE_ZEROED(((acpi_size)max_table_count) * sizeof(struct acpi_table_desc)); if (!tables) { ACPI_ERROR((AE_INFO, @@ -466,9 +624,16 @@ acpi_status acpi_tb_resize_root_table_list(void) /* Copy and free the previous table array */ + current_table_count = 0; if (acpi_gbl_root_table_list.tables) { - memcpy(tables, acpi_gbl_root_table_list.tables, - (acpi_size)table_count * sizeof(struct acpi_table_desc)); + for (i = 0; i < table_count; i++) { + if (acpi_gbl_root_table_list.tables[i].address) { + memcpy(tables + current_table_count, + acpi_gbl_root_table_list.tables + i, + sizeof(struct acpi_table_desc)); + current_table_count++; + } + } if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { ACPI_FREE(acpi_gbl_root_table_list.tables); @@ -476,8 +641,8 @@ acpi_status acpi_tb_resize_root_table_list(void) } acpi_gbl_root_table_list.tables = tables; - acpi_gbl_root_table_list.max_table_count = - table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; + acpi_gbl_root_table_list.max_table_count = max_table_count; + acpi_gbl_root_table_list.current_table_count = current_table_count; acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED; return_ACPI_STATUS(AE_OK); @@ -818,13 +983,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node) acpi_ev_update_gpes(owner_id); } - /* Invoke table handler if present */ - - if (acpi_gbl_table_handler) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, - acpi_gbl_table_handler_context); - } + /* Invoke table handler */ + acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table); return_ACPI_STATUS(status); } @@ -894,15 +1055,11 @@ acpi_status acpi_tb_unload_table(u32 table_index) return_ACPI_STATUS(AE_NOT_EXIST); } - /* Invoke table handler if present */ + /* Invoke table handler */ - if (acpi_gbl_table_handler) { - status = acpi_get_table_by_index(table_index, &table); - if (ACPI_SUCCESS(status)) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD, - table, - acpi_gbl_table_handler_context); - } + status = acpi_get_table_by_index(table_index, &table); + if (ACPI_SUCCESS(status)) { + acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table); } /* Delete the portion of the namespace owned by this table */ @@ -918,3 +1075,26 @@ acpi_status acpi_tb_unload_table(u32 table_index) } ACPI_EXPORT_SYMBOL(acpi_tb_unload_table) + +/******************************************************************************* + * + * FUNCTION: acpi_tb_notify_table + * + * PARAMETERS: event - Table event + * table - Validated table pointer + * + * RETURN: None + * + * DESCRIPTION: Notify a table event to the users. + * + ******************************************************************************/ + +void acpi_tb_notify_table(u32 event, void *table) +{ + /* Invoke table handler if present */ + + if (acpi_gbl_table_handler) { + (void)acpi_gbl_table_handler(event, table, + acpi_gbl_table_handler_context); + } +} diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 4620f3c68c13..0dfc0ac3c141 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -48,54 +48,6 @@ #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbinstal") -/* Local prototypes */ -static u8 -acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index); - -/******************************************************************************* - * - * FUNCTION: acpi_tb_compare_tables - * - * PARAMETERS: table_desc - Table 1 descriptor to be compared - * table_index - Index of table 2 to be compared - * - * RETURN: TRUE if both tables are identical. - * - * DESCRIPTION: This function compares a table with another table that has - * already been installed in the root table list. - * - ******************************************************************************/ - -static u8 -acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) -{ - acpi_status status = AE_OK; - u8 is_identical; - struct acpi_table_header *table; - u32 table_length; - u8 table_flags; - - status = - acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index], - &table, &table_length, &table_flags); - if (ACPI_FAILURE(status)) { - return (FALSE); - } - - /* - * Check for a table match on the entire table length, - * not just the header. - */ - is_identical = (u8)((table_desc->length != table_length || - memcmp(table_desc->pointer, table, table_length)) ? - FALSE : TRUE); - - /* Release the acquired table */ - - acpi_tb_release_table(table, table_length, table_flags); - return (is_identical); -} - /******************************************************************************* * * FUNCTION: acpi_tb_install_table_with_override @@ -112,7 +64,6 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) * table array. * ******************************************************************************/ - void acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, u8 override, u32 *table_index) @@ -210,95 +161,29 @@ acpi_tb_install_standard_table(acpi_physical_address address, goto release_and_exit; } - /* Validate and verify a table before installation */ - - status = acpi_tb_verify_temp_table(&new_table_desc, NULL); - if (ACPI_FAILURE(status)) { - goto release_and_exit; - } - /* Acquire the table lock */ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); - if (reload) { - /* - * Validate the incoming table signature. - * - * 1) Originally, we checked the table signature for "SSDT" or "PSDT". - * 2) We added support for OEMx tables, signature "OEM". - * 3) Valid tables were encountered with a null signature, so we just - * gave up on validating the signature, (05/2008). - * 4) We encountered non-AML tables such as the MADT, which caused - * interpreter errors and kernel faults. So now, we once again allow - * only "SSDT", "OEMx", and now, also a null signature. (05/2011). - */ - if ((new_table_desc.signature.ascii[0] != 0x00) && - (!ACPI_COMPARE_NAME - (&new_table_desc.signature, ACPI_SIG_SSDT)) - && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) { - ACPI_BIOS_ERROR((AE_INFO, - "Table has invalid signature [%4.4s] (0x%8.8X), " - "must be SSDT or OEMx", - acpi_ut_valid_nameseg(new_table_desc. - signature. - ascii) ? - new_table_desc.signature. - ascii : "????", - new_table_desc.signature.integer)); + /* Validate and verify a table before installation */ - status = AE_BAD_SIGNATURE; - goto unlock_and_exit; - } - - /* Check if table is already registered */ - - for (i = 0; i < acpi_gbl_root_table_list.current_table_count; - ++i) { + status = acpi_tb_verify_temp_table(&new_table_desc, NULL, &i); + if (ACPI_FAILURE(status)) { + if (status == AE_CTRL_TERMINATE) { /* - * Check for a table match on the entire table length, - * not just the header. + * Table was unloaded, allow it to be reloaded. + * As we are going to return AE_OK to the caller, we should + * take the responsibility of freeing the input descriptor. + * Refill the input descriptor to ensure + * acpi_tb_install_table_with_override() can be called again to + * indicate the re-installation. */ - if (!acpi_tb_compare_tables(&new_table_desc, i)) { - continue; - } - - /* - * Note: the current mechanism does not unregister a table if it is - * dynamically unloaded. The related namespace entries are deleted, - * but the table remains in the root table list. - * - * The assumption here is that the number of different tables that - * will be loaded is actually small, and there is minimal overhead - * in just keeping the table in case it is needed again. - * - * If this assumption changes in the future (perhaps on large - * machines with many table load/unload operations), tables will - * need to be unregistered when they are unloaded, and slots in the - * root table list should be reused when empty. - */ - if (acpi_gbl_root_table_list.tables[i].flags & - ACPI_TABLE_IS_LOADED) { - - /* Table is still loaded, this is an error */ - - status = AE_ALREADY_EXISTS; - goto unlock_and_exit; - } else { - /* - * Table was unloaded, allow it to be reloaded. - * As we are going to return AE_OK to the caller, we should - * take the responsibility of freeing the input descriptor. - * Refill the input descriptor to ensure - * acpi_tb_install_table_with_override() can be called again to - * indicate the re-installation. - */ - acpi_tb_uninstall_table(&new_table_desc); - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); - *table_index = i; - return_ACPI_STATUS(AE_OK); - } + acpi_tb_uninstall_table(&new_table_desc); + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + *table_index = i; + return_ACPI_STATUS(AE_OK); } + goto unlock_and_exit; } /* Add the table to the global root table list */ @@ -306,14 +191,10 @@ acpi_tb_install_standard_table(acpi_physical_address address, acpi_tb_install_table_with_override(&new_table_desc, override, table_index); - /* Invoke table handler if present */ + /* Invoke table handler */ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); - if (acpi_gbl_table_handler) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, - new_table_desc.pointer, - acpi_gbl_table_handler_context); - } + acpi_tb_notify_table(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); unlock_and_exit: @@ -382,9 +263,11 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc) finish_override: - /* Validate and verify a table before overriding */ - - status = acpi_tb_verify_temp_table(&new_table_desc, NULL); + /* + * Validate and verify a table before overriding, no nested table + * duplication check as it's too complicated and unnecessary. + */ + status = acpi_tb_verify_temp_table(&new_table_desc, NULL, NULL); if (ACPI_FAILURE(status)) { return; } diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 010b1c43df92..26ad596c973e 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -167,7 +167,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables) acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) { acpi_status status; - u32 i; + struct acpi_table_desc *table_desc; + u32 i, j; ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); @@ -179,6 +180,8 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) return_ACPI_STATUS(AE_SUPPORT); } + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); + /* * Ensure OS early boot logic, which is required by some hosts. If the * table state is reported to be wrong, developers should fix the @@ -186,17 +189,39 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) * early stage. */ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { - if (acpi_gbl_root_table_list.tables[i].pointer) { + table_desc = &acpi_gbl_root_table_list.tables[i]; + if (table_desc->pointer) { ACPI_ERROR((AE_INFO, "Table [%4.4s] is not invalidated during early boot stage", - acpi_gbl_root_table_list.tables[i]. - signature.ascii)); + table_desc->signature.ascii)); + } + } + + if (!acpi_gbl_enable_table_validation) { + /* + * Now it's safe to do full table validation. We can do deferred + * table initilization here once the flag is set. + */ + acpi_gbl_enable_table_validation = TRUE; + for (i = 0; i < acpi_gbl_root_table_list.current_table_count; + ++i) { + table_desc = &acpi_gbl_root_table_list.tables[i]; + if (!(table_desc->flags & ACPI_TABLE_IS_VERIFIED)) { + status = + acpi_tb_verify_temp_table(table_desc, NULL, + &j); + if (ACPI_FAILURE(status)) { + acpi_tb_uninstall_table(table_desc); + } + } } } acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE; - status = acpi_tb_resize_root_table_list(); + acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED; + + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); } @@ -369,6 +394,10 @@ void acpi_put_table(struct acpi_table_header *table) ACPI_FUNCTION_TRACE(acpi_put_table); + if (!table) { + return_VOID; + } + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); /* Walk the root table list */ diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index b71ce3b817ea..d81f442228b8 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -206,7 +206,7 @@ acpi_status acpi_tb_load_namespace(void) for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { table = &acpi_gbl_root_table_list.tables[i]; - if (!acpi_gbl_root_table_list.tables[i].address || + if (!table->address || (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT) && !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_PSDT) diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index 6600bc257516..fb406daf47fa 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -69,8 +69,10 @@ static const char acpi_gbl_hex_to_ascii[] = { char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) { + u64 index; - return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); + acpi_ut_short_shift_right(integer, position, &index); + return (acpi_gbl_hex_to_ascii[index & 0xF]); } /******************************************************************************* diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index aa0502d1d019..5f9c680076c4 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c @@ -47,15 +47,6 @@ #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmath") -/* - * Optional support for 64-bit double-precision integer divide. This code - * is configurable and is implemented in order to support 32-bit kernel - * environments where a 64-bit double-precision math library is not available. - * - * Support for a more normal 64-bit divide/modulo (with check for a divide- - * by-zero) appears after this optional section of code. - */ -#ifndef ACPI_USE_NATIVE_DIVIDE /* Structures used only for 64-bit divide */ typedef struct uint64_struct { u32 lo; @@ -69,6 +60,217 @@ typedef union uint64_overlay { } uint64_overlay; +/* + * Optional support for 64-bit double-precision integer multiply and shift. + * This code is configurable and is implemented in order to support 32-bit + * kernel environments where a 64-bit double-precision math library is not + * available. + */ +#ifndef ACPI_USE_NATIVE_MATH64 + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_multiply + * + * PARAMETERS: multiplicand - 64-bit multiplicand + * multiplier - 32-bit multiplier + * out_product - Pointer to where the product is returned + * + * DESCRIPTION: Perform a short multiply. + * + ******************************************************************************/ + +acpi_status +acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) +{ + union uint64_overlay multiplicand_ovl; + union uint64_overlay product; + u32 carry32; + + ACPI_FUNCTION_TRACE(ut_short_multiply); + + multiplicand_ovl.full = multiplicand; + + /* + * The Product is 64 bits, the carry is always 32 bits, + * and is generated by the second multiply. + */ + ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier, + product.part.hi, carry32); + + ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier, + product.part.lo, carry32); + + product.part.hi += carry32; + + /* Return only what was requested */ + + if (out_product) { + *out_product = product.full; + } + + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_shift_left + * + * PARAMETERS: operand - 64-bit shift operand + * count - 32-bit shift count + * out_result - Pointer to where the result is returned + * + * DESCRIPTION: Perform a short left shift. + * + ******************************************************************************/ + +acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) +{ + union uint64_overlay operand_ovl; + + ACPI_FUNCTION_TRACE(ut_short_shift_left); + + operand_ovl.full = operand; + + if ((count & 63) >= 32) { + operand_ovl.part.hi = operand_ovl.part.lo; + operand_ovl.part.lo ^= operand_ovl.part.lo; + count = (count & 63) - 32; + } + ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi, + operand_ovl.part.lo, count); + + /* Return only what was requested */ + + if (out_result) { + *out_result = operand_ovl.full; + } + + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_shift_right + * + * PARAMETERS: operand - 64-bit shift operand + * count - 32-bit shift count + * out_result - Pointer to where the result is returned + * + * DESCRIPTION: Perform a short right shift. + * + ******************************************************************************/ + +acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) +{ + union uint64_overlay operand_ovl; + + ACPI_FUNCTION_TRACE(ut_short_shift_right); + + operand_ovl.full = operand; + + if ((count & 63) >= 32) { + operand_ovl.part.lo = operand_ovl.part.hi; + operand_ovl.part.hi ^= operand_ovl.part.hi; + count = (count & 63) - 32; + } + ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi, + operand_ovl.part.lo, count); + + /* Return only what was requested */ + + if (out_result) { + *out_result = operand_ovl.full; + } + + return_ACPI_STATUS(AE_OK); +} +#else + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_multiply + * + * PARAMETERS: See function headers above + * + * DESCRIPTION: Native version of the ut_short_multiply function. + * + ******************************************************************************/ + +acpi_status +acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) +{ + + ACPI_FUNCTION_TRACE(ut_short_multiply); + + /* Return only what was requested */ + + if (out_product) { + *out_product = multiplicand * multiplier; + } + + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_shift_left + * + * PARAMETERS: See function headers above + * + * DESCRIPTION: Native version of the ut_short_shift_left function. + * + ******************************************************************************/ + +acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) +{ + + ACPI_FUNCTION_TRACE(ut_short_shift_left); + + /* Return only what was requested */ + + if (out_result) { + *out_result = operand << count; + } + + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_short_shift_right + * + * PARAMETERS: See function headers above + * + * DESCRIPTION: Native version of the ut_short_shift_right function. + * + ******************************************************************************/ + +acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) +{ + + ACPI_FUNCTION_TRACE(ut_short_shift_right); + + /* Return only what was requested */ + + if (out_result) { + *out_result = operand >> count; + } + + return_ACPI_STATUS(AE_OK); +} +#endif + +/* + * Optional support for 64-bit double-precision integer divide. This code + * is configurable and is implemented in order to support 32-bit kernel + * environments where a 64-bit double-precision math library is not available. + * + * Support for a more normal 64-bit divide/modulo (with check for a divide- + * by-zero) appears after this optional section of code. + */ +#ifndef ACPI_USE_NATIVE_DIVIDE + /******************************************************************************* * * FUNCTION: acpi_ut_short_divide @@ -258,6 +460,7 @@ acpi_ut_divide(u64 in_dividend, } #else + /******************************************************************************* * * FUNCTION: acpi_ut_short_divide, acpi_ut_divide @@ -272,6 +475,7 @@ acpi_ut_divide(u64 in_dividend, * perform the divide. * ******************************************************************************/ + acpi_status acpi_ut_short_divide(u64 in_dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder) diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 443ffad01209..45c78c2adbf0 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -224,7 +224,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object, * * RETURN: Status * - * DESCRIPTION: Walk through a package + * DESCRIPTION: Walk through a package, including subpackages * ******************************************************************************/ @@ -236,8 +236,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, acpi_status status = AE_OK; union acpi_generic_state *state_list = NULL; union acpi_generic_state *state; - u32 this_index; union acpi_operand_object *this_source_obj; + u32 this_index; ACPI_FUNCTION_TRACE(ut_walk_package_tree); @@ -251,8 +251,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, /* Get one element of the package */ this_index = state->pkg.index; - this_source_obj = (union acpi_operand_object *) + this_source_obj = state->pkg.source_object->package.elements[this_index]; + state->pkg.this_target_obj = + &state->pkg.source_object->package.elements[this_index]; /* * Check for: @@ -339,6 +341,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, /* We should never get here */ + ACPI_ERROR((AE_INFO, "State list did not terminate correctly")); + return_ACPI_STATUS(AE_AML_INTERNAL); } diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 64e6641bfe82..cb3db9fed50d 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -483,6 +483,11 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, /* A namespace node should never get here */ + ACPI_ERROR((AE_INFO, + "Received a namespace node [%4.4s] " + "where an operand object is required", + ACPI_CAST_PTR(struct acpi_namespace_node, + internal_object)->name.ascii)); return_ACPI_STATUS(AE_AML_INTERNAL); } diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 7e6e1ae6140f..c008589b41bd 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -176,7 +176,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr) u64 number = 0; while (isdigit((int)*string)) { - number *= 10; + acpi_ut_short_multiply(number, 10, &number); number += *(string++) - '0'; } @@ -286,7 +286,7 @@ static char *acpi_ut_format_number(char *string, /* Generate full string in reverse order */ pos = acpi_ut_put_number(reversed_string, number, base, upper); - i = ACPI_PTR_DIFF(pos, reversed_string); + i = (s32)ACPI_PTR_DIFF(pos, reversed_string); /* Printing 100 using %2d gives "100", not "00" */ @@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) if (!s) { s = ""; } - length = acpi_ut_bound_string_length(s, precision); + length = (s32)acpi_ut_bound_string_length(s, precision); if (!(type & ACPI_FORMAT_LEFT)) { while (length < width--) { pos = @@ -579,7 +579,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) } } - return (ACPI_PTR_DIFF(pos, string)); + return ((int)ACPI_PTR_DIFF(pos, string)); } /******************************************************************************* diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index 70f78a4bf13b..f9801d13547f 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -237,6 +237,13 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } + /* + * Don't attempt to perform any validation on the 2nd byte. + * Although all known ASL compilers insert a zero for the 2nd + * byte, it can also be a checksum (as per the ACPI spec), + * and this is occasionally seen in the field. July 2017. + */ + /* Return the pointer to the end_tag if requested */ if (!user_function) { diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index 64308c304ade..eafabcd2fada 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c @@ -226,7 +226,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, - u16 index) + u32 index) { union acpi_generic_state *state; diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c index f42be01d99fd..9633ee142855 100644 --- a/drivers/acpi/acpica/utstrtoul64.c +++ b/drivers/acpi/acpica/utstrtoul64.c @@ -276,8 +276,8 @@ static u64 acpi_ut_strtoul_base10(char *string, u32 flags) /* Convert and insert (add) the decimal digit */ - next_value = - (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO); + acpi_ut_short_multiply(return_value, 10, &next_value); + next_value += (ascii_digit - ACPI_ASCII_ZERO); /* Check for overflow (32 or 64 bit) - return current converted value */ @@ -335,9 +335,8 @@ static u64 acpi_ut_strtoul_base16(char *string, u32 flags) /* Convert and insert the hex digit */ - return_value = - (return_value << 4) | - acpi_ut_ascii_char_to_hex(ascii_digit); + acpi_ut_short_shift_left(return_value, 4, &return_value); + return_value |= acpi_ut_ascii_char_to_hex(ascii_digit); string++; valid_digits++; diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 9a07a42cae34..3c8de88ecbd5 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -591,6 +591,10 @@ void acpi_ut_dump_allocations(u32 component, const char *module) return_VOID; } + if (!acpi_gbl_global_list) { + goto exit; + } + element = acpi_gbl_global_list->list_head; while (element) { if ((element->component & component) && @@ -602,7 +606,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module) if (element->size < sizeof(struct acpi_common_descriptor)) { - acpi_os_printf("%p Length 0x%04X %9.9s-%u " + acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u " "[Not a Descriptor - too small]\n", descriptor, element->size, element->module, element->line); @@ -612,7 +616,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module) if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != ACPI_DESC_TYPE_CACHED) { acpi_os_printf - ("%p Length 0x%04X %9.9s-%u [%s] ", + ("%p Length 0x%04X %9.9s-%4.4u [%s] ", descriptor, element->size, element->module, element->line, acpi_ut_get_descriptor_name @@ -705,6 +709,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module) element = element->next; } +exit: (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); /* Print summary */ diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 6e9f14c0a71b..cb4126051f62 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -120,11 +120,6 @@ int apei_exec_collect_resources(struct apei_exec_context *ctx, struct dentry; struct dentry *apei_get_debugfs_dir(void); -#define apei_estatus_for_each_section(estatus, section) \ - for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ - (void *)section - (void *)estatus < estatus->data_length; \ - section = (void *)(section+1) + section->error_data_length) - static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) { if (estatus->raw_data_length) diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index ec50c32ea3da..b38737c83a24 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -281,7 +281,7 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region( ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); for (i = 0; i < trigger_tab->entry_count; i++) { if (entry->action == ACPI_EINJ_TRIGGER_ERROR && - entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE && + entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && (entry->register_region.address & param2) == (param1 & param2)) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index d661d452b238..3c3a37b8503b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes) } ghes_do_proc(ghes, ghes->estatus); +out: + ghes_clear_estatus(ghes); + + if (rc == -ENOENT) + return rc; + /* * GHESv2 type HEST entries introduce support for error acknowledgment, * so only acknowledge the error if this support is present. */ - if (is_hest_type_generic_v2(ghes)) { - rc = ghes_ack_error(ghes->generic_v2); - if (rc) - return rc; - } -out: - ghes_clear_estatus(ghes); + if (is_hest_type_generic_v2(ghes)) + return ghes_ack_error(ghes->generic_v2); + return rc; } @@ -1157,7 +1159,8 @@ static int ghes_probe(struct platform_device *ghes_dev) generic->header.source_id); goto err_edac_unreg; } - rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); + rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED, + "GHES IRQ", ghes); if (rc) { pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", generic->header.source_id); @@ -1265,9 +1268,14 @@ static int __init ghes_init(void) if (acpi_disabled) return -ENODEV; - if (hest_disable) { + switch (hest_disable) { + case HEST_NOT_FOUND: + return -ENODEV; + case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); return -EINVAL; + default: + break; } if (ghes_disable) { diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 456b488eb1df..9cb74115a43d 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -37,7 +37,7 @@ #define HEST_PFX "HEST: " -bool hest_disable; +int hest_disable; EXPORT_SYMBOL_GPL(hest_disable); /* HEST table parsing */ @@ -213,7 +213,7 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) static int __init setup_hest_disable(char *str) { - hest_disable = 1; + hest_disable = HEST_DISABLED; return 0; } @@ -232,9 +232,10 @@ void __init acpi_hest_init(void) status = acpi_get_table(ACPI_SIG_HEST, 0, (struct acpi_table_header **)&hest_tab); - if (status == AE_NOT_FOUND) - goto err; - else if (ACPI_FAILURE(status)) { + if (status == AE_NOT_FOUND) { + hest_disable = HEST_NOT_FOUND; + return; + } else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(HEST_PFX "Failed to get table, %s\n", msg); rc = -EINVAL; @@ -257,5 +258,5 @@ void __init acpi_hest_init(void) pr_info(HEST_PFX "Table parsing has been initialized.\n"); return; err: - hest_disable = 1; + hest_disable = HEST_DISABLED; } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 736783c67ea0..de56394dd161 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -693,13 +693,36 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) return iort_iommu_xlate(info->dev, parent, streamid); } +static int nc_dma_get_range(struct device *dev, u64 *size) +{ + struct acpi_iort_node *node; + struct acpi_iort_named_component *ncomp; + + node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, + iort_match_node_callback, dev); + if (!node) + return -ENODEV; + + ncomp = (struct acpi_iort_named_component *)node->node_data; + + *size = ncomp->memory_address_limit >= 64 ? U64_MAX : + 1ULL<memory_address_limit; + + return 0; +} + /** - * iort_set_dma_mask - Set-up dma mask for a device. + * iort_dma_setup() - Set-up device DMA parameters. * * @dev: device to configure + * @dma_addr: device DMA address result pointer + * @size: DMA range size result pointer */ -void iort_set_dma_mask(struct device *dev) +void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { + u64 mask, dmaaddr = 0, size = 0, offset = 0; + int ret, msb; + /* * Set default coherent_dma_mask to 32 bit. Drivers are expected to * setup the correct supported mask. @@ -713,6 +736,36 @@ void iort_set_dma_mask(struct device *dev) */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; + + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + + if (dev_is_pci(dev)) + ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); + else + ret = nc_dma_get_range(dev, &size); + + if (!ret) { + msb = fls64(dmaaddr + size - 1); + /* + * Round-up to the power-of-two mask or set + * the mask to the whole 64-bit address space + * in case the DMA region covers the full + * memory window. + */ + mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; + /* + * Limit coherent and dma mask based on size + * retrieved from firmware. + */ + dev->coherent_dma_mask = mask; + *dev->dma_mask = mask; + } + + *dma_addr = dmaaddr; + *dma_size = size; + + dev->dma_pfn_offset = PFN_DOWN(offset); + dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); } /** @@ -1125,12 +1178,44 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) return ret; } +static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) +{ + if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { + struct acpi_iort_node *parent; + struct acpi_iort_id_mapping *map; + int i; + + map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, + iort_node->mapping_offset); + + for (i = 0; i < iort_node->mapping_count; i++, map++) { + if (!map->output_reference) + continue; + + parent = ACPI_ADD_PTR(struct acpi_iort_node, + iort_table, map->output_reference); + /* + * If we detect a RC->SMMU mapping, make sure + * we enable ACS on the system. + */ + if ((parent->type == ACPI_IORT_NODE_SMMU) || + (parent->type == ACPI_IORT_NODE_SMMU_V3)) { + pci_request_acs(); + return true; + } + } + } + + return false; +} + static void __init iort_init_platform_devices(void) { struct acpi_iort_node *iort_node, *iort_end; struct acpi_table_iort *iort; struct fwnode_handle *fwnode; int i, ret; + bool acs_enabled = false; /* * iort_table and iort both point to the start of IORT table, but @@ -1150,6 +1235,9 @@ static void __init iort_init_platform_devices(void) return; } + if (!acs_enabled) + acs_enabled = iort_enable_acs(iort_node); + if ((iort_node->type == ACPI_IORT_NODE_SMMU) || (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 1cbb88d938e5..13e7b56e33ae 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -620,7 +620,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static struct device_attribute alarm_attr = { +static const struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index bb542acc0574..995c4d8922b1 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -30,30 +30,13 @@ #include "internal.h" -enum acpi_blacklist_predicates { - all_versions, - less_than_or_equal, - equal, - greater_than_or_equal, -}; - -struct acpi_blacklist_item { - char oem_id[7]; - char oem_table_id[9]; - u32 oem_revision; - char *table; - enum acpi_blacklist_predicates oem_revision_predicate; - char *reason; - u32 is_critical_error; -}; - -static struct dmi_system_id acpi_rev_dmi_table[] __initdata; +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; /* * POLICY: If *anything* doesn't work, put it on the blacklist. * If they are critical errors, mark it critical, and abort driver load. */ -static struct acpi_blacklist_item acpi_blacklist[] __initdata = { +static struct acpi_platform_list acpi_blacklist[] __initdata = { /* Compaq Presario 1700 */ {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal, "Multiple problems", 1}, @@ -67,65 +50,27 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, "Incorrect _ADR", 1}, - {""} + { } }; int __init acpi_blacklisted(void) { - int i = 0; + int i; int blacklisted = 0; - struct acpi_table_header table_header; - while (acpi_blacklist[i].oem_id[0] != '\0') { - if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) { - i++; - continue; - } + i = acpi_match_platform_list(acpi_blacklist); + if (i >= 0) { + pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n", + acpi_blacklist[i].oem_id, + acpi_blacklist[i].oem_table_id, + acpi_blacklist[i].oem_revision); - if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) { - i++; - continue; - } + pr_err(PREFIX "Reason: %s. This is a %s error\n", + acpi_blacklist[i].reason, + (acpi_blacklist[i].data ? + "non-recoverable" : "recoverable")); - if (strncmp - (acpi_blacklist[i].oem_table_id, table_header.oem_table_id, - 8)) { - i++; - continue; - } - - if ((acpi_blacklist[i].oem_revision_predicate == all_versions) - || (acpi_blacklist[i].oem_revision_predicate == - less_than_or_equal - && table_header.oem_revision <= - acpi_blacklist[i].oem_revision) - || (acpi_blacklist[i].oem_revision_predicate == - greater_than_or_equal - && table_header.oem_revision >= - acpi_blacklist[i].oem_revision) - || (acpi_blacklist[i].oem_revision_predicate == equal - && table_header.oem_revision == - acpi_blacklist[i].oem_revision)) { - - printk(KERN_ERR PREFIX - "Vendor \"%6.6s\" System \"%8.8s\" " - "Revision 0x%x has a known ACPI BIOS problem.\n", - acpi_blacklist[i].oem_id, - acpi_blacklist[i].oem_table_id, - acpi_blacklist[i].oem_revision); - - printk(KERN_ERR PREFIX - "Reason: %s. This is a %s error\n", - acpi_blacklist[i].reason, - (acpi_blacklist[i]. - is_critical_error ? "non-recoverable" : - "recoverable")); - - blacklisted = acpi_blacklist[i].is_critical_error; - break; - } else { - i++; - } + blacklisted = acpi_blacklist[i].data; } (void)early_acpi_osi_init(); @@ -144,7 +89,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d) } #endif -static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE /* * DELL XPS 13 (2015) switches sound between HDA and I2S diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index af74b420ec83..4d0979e02a28 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) } #endif -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { /* * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 @@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = { {} }; #else -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #endif @@ -995,9 +995,6 @@ void __init acpi_early_init(void) printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); - /* It's safe to verify table checksums during late stage */ - acpi_gbl_verify_table_checksum = TRUE; - /* enable workarounds, unless strict ACPI spec. compliance */ if (!acpi_strict) acpi_gbl_enable_interpreter_slack = TRUE; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 2ed6935d4483..fbcc73f7a099 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -401,6 +401,8 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) if (val != ACPI_NOTIFY_DEVICE_WAKE) return; + acpi_handle_debug(handle, "Wake notify\n"); + adev = acpi_bus_get_acpi_device(handle); if (!adev) return; @@ -409,8 +411,12 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) if (adev->wakeup.flags.notifier_present) { pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup()); - if (adev->wakeup.context.func) + if (adev->wakeup.context.func) { + acpi_handle_debug(handle, "Running %pF for %s\n", + adev->wakeup.context.func, + dev_name(adev->wakeup.context.dev)); adev->wakeup.context.func(&adev->wakeup.context); + } } mutex_unlock(&acpi_pm_notifier_lock); @@ -682,55 +688,88 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context) } } +static DEFINE_MUTEX(acpi_wakeup_lock); + +static int __acpi_device_wakeup_enable(struct acpi_device *adev, + u32 target_state, int max_count) +{ + struct acpi_device_wakeup *wakeup = &adev->wakeup; + acpi_status status; + int error = 0; + + mutex_lock(&acpi_wakeup_lock); + + if (wakeup->enable_count >= max_count) + goto out; + + if (wakeup->enable_count > 0) + goto inc; + + error = acpi_enable_wakeup_device_power(adev, target_state); + if (error) + goto out; + + status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); + if (ACPI_FAILURE(status)) { + acpi_disable_wakeup_device_power(adev); + error = -EIO; + goto out; + } + +inc: + wakeup->enable_count++; + +out: + mutex_unlock(&acpi_wakeup_lock); + return error; +} + /** - * acpi_device_wakeup - Enable/disable wakeup functionality for device. - * @adev: ACPI device to enable/disable wakeup functionality for. + * acpi_device_wakeup_enable - Enable wakeup functionality for device. + * @adev: ACPI device to enable wakeup functionality for. * @target_state: State the system is transitioning into. - * @enable: Whether to enable or disable the wakeup functionality. * - * Enable/disable the GPE associated with @adev so that it can generate - * wakeup signals for the device in response to external (remote) events and - * enable/disable device wakeup power. + * Enable the GPE associated with @adev so that it can generate wakeup signals + * for the device in response to external (remote) events and enable wakeup + * power for it. * * Callers must ensure that @adev is a valid ACPI device node before executing * this function. */ -static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state, - bool enable) +static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state) { - struct acpi_device_wakeup *wakeup = &adev->wakeup; - - if (enable) { - acpi_status res; - int error; - - if (adev->wakeup.flags.enabled) - return 0; - - error = acpi_enable_wakeup_device_power(adev, target_state); - if (error) - return error; - - res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); - if (ACPI_FAILURE(res)) { - acpi_disable_wakeup_device_power(adev); - return -EIO; - } - adev->wakeup.flags.enabled = 1; - } else if (adev->wakeup.flags.enabled) { - acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); - acpi_disable_wakeup_device_power(adev); - adev->wakeup.flags.enabled = 0; - } - return 0; + return __acpi_device_wakeup_enable(adev, target_state, 1); } /** - * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. - * @dev: Device to enable/disable to generate wakeup events. - * @enable: Whether to enable or disable the wakeup functionality. + * acpi_device_wakeup_disable - Disable wakeup functionality for device. + * @adev: ACPI device to disable wakeup functionality for. + * + * Disable the GPE associated with @adev and disable wakeup power for it. + * + * Callers must ensure that @adev is a valid ACPI device node before executing + * this function. */ -int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +static void acpi_device_wakeup_disable(struct acpi_device *adev) +{ + struct acpi_device_wakeup *wakeup = &adev->wakeup; + + mutex_lock(&acpi_wakeup_lock); + + if (!wakeup->enable_count) + goto out; + + acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); + acpi_disable_wakeup_device_power(adev); + + wakeup->enable_count--; + +out: + mutex_unlock(&acpi_wakeup_lock); +} + +static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, + int max_count) { struct acpi_device *adev; int error; @@ -744,13 +783,41 @@ int acpi_pm_set_device_wakeup(struct device *dev, bool enable) if (!acpi_device_can_wakeup(adev)) return -EINVAL; - error = acpi_device_wakeup(adev, acpi_target_system_state(), enable); + if (!enable) { + acpi_device_wakeup_disable(adev); + dev_dbg(dev, "Wakeup disabled by ACPI\n"); + return 0; + } + + error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(), + max_count); if (!error) - dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled"); + dev_dbg(dev, "Wakeup enabled by ACPI\n"); return error; } -EXPORT_SYMBOL(acpi_pm_set_device_wakeup); + +/** + * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. + * @dev: Device to enable/disable to generate wakeup events. + * @enable: Whether to enable or disable the wakeup functionality. + */ +int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +{ + return __acpi_pm_set_device_wakeup(dev, enable, 1); +} +EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup); + +/** + * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge. + * @dev: Bridge device to enable/disable to generate wakeup events. + * @enable: Whether to enable or disable the wakeup functionality. + */ +int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) +{ + return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX); +} +EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup); /** * acpi_dev_pm_low_power - Put ACPI device into a low-power state. @@ -800,13 +867,15 @@ int acpi_dev_runtime_suspend(struct device *dev) remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > PM_QOS_FLAGS_NONE; - error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup); - if (remote_wakeup && error) - return -EAGAIN; + if (remote_wakeup) { + error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0); + if (error) + return -EAGAIN; + } error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); - if (error) - acpi_device_wakeup(adev, ACPI_STATE_S0, false); + if (error && remote_wakeup) + acpi_device_wakeup_disable(adev); return error; } @@ -829,7 +898,7 @@ int acpi_dev_runtime_resume(struct device *dev) return 0; error = acpi_dev_pm_full_power(adev); - acpi_device_wakeup(adev, ACPI_STATE_S0, false); + acpi_device_wakeup_disable(adev); return error; } EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); @@ -884,13 +953,15 @@ int acpi_dev_suspend_late(struct device *dev) target_state = acpi_target_system_state(); wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); - error = acpi_device_wakeup(adev, target_state, wakeup); - if (wakeup && error) - return error; + if (wakeup) { + error = acpi_device_wakeup_enable(adev, target_state); + if (error) + return error; + } error = acpi_dev_pm_low_power(dev, adev, target_state); - if (error) - acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false); + if (error && wakeup) + acpi_device_wakeup_disable(adev); return error; } @@ -913,7 +984,7 @@ int acpi_dev_resume_early(struct device *dev) return 0; error = acpi_dev_pm_full_power(adev); - acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false); + acpi_device_wakeup_disable(adev); return error; } EXPORT_SYMBOL_GPL(acpi_dev_resume_early); @@ -1056,7 +1127,7 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off) */ dev_pm_qos_hide_latency_limit(dev); dev_pm_qos_hide_flags(dev); - acpi_device_wakeup(adev, ACPI_STATE_S0, false); + acpi_device_wakeup_disable(adev); acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); } } @@ -1100,7 +1171,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) dev_pm_domain_set(dev, &acpi_general_pm_domain); if (power_on) { acpi_dev_pm_full_power(adev); - acpi_device_wakeup(adev, ACPI_STATE_S0, false); + acpi_device_wakeup_disable(adev); } dev->pm_domain->detach = acpi_dev_pm_detach; diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 0c00208b423e..2305e1ab978e 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -585,7 +585,7 @@ static struct attribute *dock_attributes[] = { NULL }; -static struct attribute_group dock_attribute_group = { +static const struct attribute_group dock_attribute_group = { .attrs = dock_attributes }; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ae3d6d152633..236b14324780 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -112,8 +112,7 @@ enum { EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */ EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STOPPED, /* Driver is stopped */ - EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the - * current command processing */ + EC_FLAGS_GPE_MASKED, /* GPE masked */ }; #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ @@ -425,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec) wake_up(&ec->wait); } -static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag) +static void acpi_ec_mask_gpe(struct acpi_ec *ec) { - if (!test_bit(flag, &ec->flags)) { + if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) { acpi_ec_disable_gpe(ec, false); ec_dbg_drv("Polling enabled"); - set_bit(flag, &ec->flags); + set_bit(EC_FLAGS_GPE_MASKED, &ec->flags); } } -static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag) +static void acpi_ec_unmask_gpe(struct acpi_ec *ec) { - if (test_bit(flag, &ec->flags)) { - clear_bit(flag, &ec->flags); + if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) { + clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags); acpi_ec_enable_gpe(ec, false); ec_dbg_drv("Polling disabled"); } @@ -464,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) static void acpi_ec_submit_query(struct acpi_ec *ec) { - acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM); + acpi_ec_mask_gpe(ec); if (!acpi_ec_event_enabled(ec)) return; if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { @@ -480,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec) if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) ec_dbg_evt("Command(%s) unblocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM); + acpi_ec_unmask_gpe(ec); } static inline void __acpi_ec_enable_event(struct acpi_ec *ec) @@ -700,7 +699,7 @@ static void advance_transaction(struct acpi_ec *ec) ++t->irq_count; /* Allow triggering on 0 threshold */ if (t->irq_count == ec_storm_threshold) - acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM); + acpi_ec_mask_gpe(ec); } } out: @@ -798,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, spin_lock_irqsave(&ec->lock, tmp); if (t->irq_count == ec_storm_threshold) - acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM); + acpi_ec_unmask_gpe(ec); ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); ec->curr = NULL; /* Disable GPE for command processing (IBF=0/OBF=1) */ @@ -1586,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec) { if (!boot_ec) return false; - if (ec->handle == boot_ec->handle && - ec->gpe == boot_ec->gpe && - ec->command_addr == boot_ec->command_addr && + if (ec->command_addr == boot_ec->command_addr && ec->data_addr == boot_ec->data_addr) return true; return false; @@ -1613,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device) if (acpi_is_boot_ec(ec)) { boot_ec_is_ecdt = false; + /* + * Trust PNP0C09 namespace location rather than ECDT ID. + * + * But trust ECDT GPE rather than _GPE because of ASUS quirks, + * so do not change boot_ec->gpe to ec->gpe. + */ + boot_ec->handle = ec->handle; acpi_handle_debug(ec->handle, "duplicated.\n"); acpi_ec_free(ec); ec = boot_ec; @@ -1747,18 +1751,20 @@ static int __init acpi_ec_ecdt_start(void) if (!boot_ec) return -ENODEV; - /* - * The DSDT EC should have already been started in - * acpi_ec_add(). - */ + /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */ if (!boot_ec_is_ecdt) return -ENODEV; /* * At this point, the namespace and the GPE is initialized, so * start to find the namespace objects and handle the events. + * + * Note: ec->handle can be valid if this function is called after + * acpi_ec_add(), hence the fast path. */ - if (!acpi_ec_ecdt_get_handle(&handle)) + if (boot_ec->handle != ACPI_ROOT_OBJECT) + handle = boot_ec->handle; + else if (!acpi_ec_ecdt_get_handle(&handle)) return -ENODEV; return acpi_config_boot_ec(boot_ec, handle, true, true); } @@ -1803,7 +1809,7 @@ static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id ec_dmi_table[] __initdata = { +static const struct dmi_system_id ec_dmi_table[] __initconst = { { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), @@ -2011,8 +2017,8 @@ int __init acpi_ec_init(void) return result; /* Drivers must be started after acpi_ec_query_init() */ - ecdt_fail = acpi_ec_ecdt_start(); dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + ecdt_fail = acpi_ec_ecdt_start(); return ecdt_fail && dsdt_fail ? -ENODEV : 0; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 3f5af4d7a739..4361c4415b4f 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -232,6 +232,12 @@ static inline void suspend_nvs_restore(void) {} void acpi_init_properties(struct acpi_device *adev); void acpi_free_properties(struct acpi_device *adev); +#ifdef CONFIG_X86 +void acpi_extract_apple_properties(struct acpi_device *adev); +#else +static inline void acpi_extract_apple_properties(struct acpi_device *adev) {} +#endif + /*-------------------------------------------------------------------------- Watchdog -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig index 6d3351452ea2..929ba4da0b30 100644 --- a/drivers/acpi/nfit/Kconfig +++ b/drivers/acpi/nfit/Kconfig @@ -2,7 +2,7 @@ config ACPI_NFIT tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" depends on PHYS_ADDR_T_64BIT depends on BLK_DEV - depends on ARCH_HAS_MMIO_FLUSH + depends on ARCH_HAS_PMEM_API select LIBNVDIMM help Infrastructure to probe ACPI 6 compliant platforms for diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 1893e416e7c0..9c2c49b6a240 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -228,6 +228,10 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (cmd == ND_CMD_CALL) { call_pkg = buf; func = call_pkg->nd_command; + + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) + if (call_pkg->nd_reserved2[i]) + return -EINVAL; } if (nvdimm) { @@ -1674,8 +1678,19 @@ static ssize_t range_index_show(struct device *dev, } static DEVICE_ATTR_RO(range_index); +static ssize_t ecc_unit_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); + + return sprintf(buf, "%d\n", nfit_spa->clear_err_unit); +} +static DEVICE_ATTR_RO(ecc_unit_size); + static struct attribute *acpi_nfit_region_attributes[] = { &dev_attr_range_index.attr, + &dev_attr_ecc_unit_size.attr, NULL, }; @@ -1804,6 +1819,7 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, spa->range_index, i); + struct acpi_nfit_control_region *dcr = nfit_mem->dcr; if (!memdev || !nfit_mem->dcr) { dev_err(dev, "%s: failed to find DCR\n", __func__); @@ -1811,13 +1827,13 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, } map->region_offset = memdev->region_offset; - map->serial_number = nfit_mem->dcr->serial_number; + map->serial_number = dcr->serial_number; map2->region_offset = memdev->region_offset; - map2->serial_number = nfit_mem->dcr->serial_number; - map2->vendor_id = nfit_mem->dcr->vendor_id; - map2->manufacturing_date = nfit_mem->dcr->manufacturing_date; - map2->manufacturing_location = nfit_mem->dcr->manufacturing_location; + map2->serial_number = dcr->serial_number; + map2->vendor_id = dcr->vendor_id; + map2->manufacturing_date = dcr->manufacturing_date; + map2->manufacturing_location = dcr->manufacturing_location; } /* v1.1 namespaces */ @@ -1835,6 +1851,28 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, cmp_map_compat, NULL); nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + /* record the result of the sort for the mapping position */ + for (i = 0; i < nr; i++) { + struct nfit_set_info_map2 *map2 = &info2->mapping[i]; + int j; + + for (j = 0; j < nr; j++) { + struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; + struct nvdimm *nvdimm = mapping->nvdimm; + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_control_region *dcr = nfit_mem->dcr; + + if (map2->serial_number == dcr->serial_number && + map2->vendor_id == dcr->vendor_id && + map2->manufacturing_date == dcr->manufacturing_date && + map2->manufacturing_location + == dcr->manufacturing_location) { + mapping->position = i; + break; + } + } + } + ndr_desc->nd_set = nd_set; devm_kfree(dev, info); devm_kfree(dev, info2); @@ -1930,7 +1968,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); else { if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) - mmio_flush_range((void __force *) + arch_invalidate_pmem((void __force *) mmio->addr.aperture + offset, c); memcpy(iobuf + copied, mmio->addr.aperture + offset, c); diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 723bee58bbcf..76998a51bf99 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "internal.h" @@ -257,12 +258,11 @@ bool acpi_osi_is_win8(void) } EXPORT_SYMBOL(acpi_osi_is_win8); -static void __init acpi_osi_dmi_darwin(bool enable, - const struct dmi_system_id *d) +static void __init acpi_osi_dmi_darwin(void) { - pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident); + pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n"); osi_config.darwin_dmi = 1; - __acpi_osi_setup_darwin(enable); + __acpi_osi_setup_darwin(true); } static void __init acpi_osi_dmi_linux(bool enable, @@ -273,13 +273,6 @@ static void __init acpi_osi_dmi_linux(bool enable, __acpi_osi_setup_linux(enable); } -static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d) -{ - acpi_osi_dmi_darwin(true, d); - - return 0; -} - static int __init dmi_enable_osi_linux(const struct dmi_system_id *d) { acpi_osi_dmi_linux(true, d); @@ -319,7 +312,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d) * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden * by acpi_osi=!Linux/acpi_osi=!Darwin command line options. */ -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { { .callback = dmi_disable_osi_vista, .ident = "Fujitsu Siemens", @@ -481,30 +474,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), }, }, - - /* - * Enable _OSI("Darwin") for all apple platforms. - */ - { - .callback = dmi_enable_osi_darwin, - .ident = "Apple hardware", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - }, - }, - { - .callback = dmi_enable_osi_darwin, - .ident = "Apple hardware", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), - }, - }, {} }; static __init void acpi_osi_dmi_blacklisted(void) { dmi_check_system(acpi_osi_dmi_table); + + /* Enable _OSI("Darwin") for Apple platforms. */ + if (x86_apple_machine) + acpi_osi_dmi_darwin(); } int __init early_acpi_osi_init(void) diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 9eec3095e6c3..6fc204a52493 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -33,6 +33,7 @@ #include #include #include +#include #include /* for acpi_hest_init() */ #include "internal.h" @@ -431,8 +432,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) * been called successfully. We know the feature set supported by the * platform, so avoid calling _OSC at all */ - - if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) { + if (x86_apple_machine) { root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL; decode_osc_control(root, "OS assumes control of", root->osc_control_set); diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index f62c68e24317..e90b61f7d2db 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 3b7d5be5b7ed..6c99d3f81095 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -27,6 +27,9 @@ #define GPI1_LDO_ON (3 << 0) #define GPI1_LDO_OFF (4 << 0) +#define AXP288_ADC_TS_PIN_GPADC 0xf2 +#define AXP288_ADC_TS_PIN_ON 0xf3 + static struct pmic_table power_table[] = { { .address = 0x00, @@ -209,11 +212,23 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) { u8 buf[2]; + int ret; - if (regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2)) - return -EIO; + ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_PIN_GPADC); + if (ret) + return ret; - return (buf[0] << 4) + ((buf[1] >> 4) & 0x0F); + /* After switching to the GPADC pin give things some time to settle */ + usleep_range(6000, 10000); + + ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); + if (ret == 0) + ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); + + regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); + + return ret; } static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = { diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 591d1dd3f04e..9d6aff22684e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device) result = acpi_cppc_processor_probe(pr); if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) - dev_warn(&device->dev, "CPPC data invalid or not present\n"); + dev_dbg(&device->dev, "CPPC data invalid or not present\n"); if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fe3d2a40f311..2736e25e9dc6 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -48,6 +48,8 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); +#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) + static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; module_param(max_cstate, uint, 0000); static unsigned int nocst __read_mostly; @@ -759,7 +761,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, if (cx->type != ACPI_STATE_C1) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { - index = CPUIDLE_DRIVER_STATE_START; + index = ACPI_IDLE_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) { @@ -787,7 +789,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static void acpi_idle_enter_freeze(struct cpuidle_device *dev, +static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -811,7 +813,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev, static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, struct cpuidle_device *dev) { - int i, count = CPUIDLE_DRIVER_STATE_START; + int i, count = ACPI_IDLE_STATE_START; struct acpi_processor_cx *cx; if (max_cstate == 0) @@ -838,7 +840,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, static int acpi_processor_setup_cstates(struct acpi_processor *pr) { - int i, count = CPUIDLE_DRIVER_STATE_START; + int i, count; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_driver *drv = &acpi_idle_driver; @@ -846,6 +848,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) if (max_cstate == 0) max_cstate = 1; + if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { + cpuidle_poll_state_init(drv); + count = 1; + } else { + count = 0; + } + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; @@ -865,14 +874,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) drv->safe_state_index = count; } /* - * Halt-induced C1 is not good for ->enter_freeze, because it + * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not * particularly interesting from the suspend-to-idle angle, so * avoid C1 and the situations in which we may need to fall back * to it altogether. */ if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) - state->enter_freeze = acpi_idle_enter_freeze; + state->enter_s2idle = acpi_idle_enter_s2idle; count++; if (count == CPUIDLE_STATE_MAX) @@ -1289,7 +1298,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) return -EINVAL; drv->safe_state_index = -1; - for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { + for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7cfbda4d7c51..74f738cb6073 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_idle_dmi_table[] __initdata = { +static const struct dmi_system_id processor_idle_dmi_table[] __initconst = { { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 476a52c60cf3..3fb8ff513461 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -19,21 +19,19 @@ #include "internal.h" -static int acpi_data_get_property_array(struct acpi_device_data *data, +static int acpi_data_get_property_array(const struct acpi_device_data *data, const char *name, acpi_object_type type, const union acpi_object **obj); -/* ACPI _DSD device properties UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ -static const u8 prp_uuid[16] = { - 0x14, 0xd8, 0xff, 0xda, 0xba, 0x6e, 0x8c, 0x4d, - 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 -}; -/* ACPI _DSD data subnodes UUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ -static const u8 ads_uuid[16] = { - 0xe6, 0xe3, 0xb8, 0xdb, 0x86, 0x58, 0xa6, 0x4b, - 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b -}; +/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ +static const guid_t prp_guid = + GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c, + 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01); +/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ +static const guid_t ads_guid = + GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6, + 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b); static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, const union acpi_object *desc, @@ -56,8 +54,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc, return false; dn->name = link->package.elements[0].string.pointer; - dn->fwnode.type = FWNODE_ACPI_DATA; - dn->fwnode.ops = &acpi_fwnode_ops; + dn->fwnode.ops = &acpi_data_fwnode_ops; dn->parent = parent; INIT_LIST_HEAD(&dn->data.subnodes); @@ -190,22 +187,23 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, { int i; - /* Look for the ACPI data subnodes UUID. */ + /* Look for the ACPI data subnodes GUID. */ for (i = 0; i < desc->package.count; i += 2) { - const union acpi_object *uuid, *links; + const union acpi_object *guid, *links; - uuid = &desc->package.elements[i]; + guid = &desc->package.elements[i]; links = &desc->package.elements[i + 1]; /* - * The first element must be a UUID and the second one must be + * The first element must be a GUID and the second one must be * a package. */ - if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16 - || links->type != ACPI_TYPE_PACKAGE) + if (guid->type != ACPI_TYPE_BUFFER || + guid->buffer.length != 16 || + links->type != ACPI_TYPE_PACKAGE) break; - if (memcmp(uuid->buffer.pointer, ads_uuid, sizeof(ads_uuid))) + if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid)) continue; return acpi_add_nondev_subnodes(scope, links, &data->subnodes, @@ -298,26 +296,27 @@ static bool acpi_extract_properties(const union acpi_object *desc, if (desc->package.count % 2) return false; - /* Look for the device properties UUID. */ + /* Look for the device properties GUID. */ for (i = 0; i < desc->package.count; i += 2) { - const union acpi_object *uuid, *properties; + const union acpi_object *guid, *properties; - uuid = &desc->package.elements[i]; + guid = &desc->package.elements[i]; properties = &desc->package.elements[i + 1]; /* - * The first element must be a UUID and the second one must be + * The first element must be a GUID and the second one must be * a package. */ - if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16 - || properties->type != ACPI_TYPE_PACKAGE) + if (guid->type != ACPI_TYPE_BUFFER || + guid->buffer.length != 16 || + properties->type != ACPI_TYPE_PACKAGE) break; - if (memcmp(uuid->buffer.pointer, prp_uuid, sizeof(prp_uuid))) + if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid)) continue; /* - * We found the matching UUID. Now validate the format of the + * We found the matching GUID. Now validate the format of the * package immediately following it. */ if (!acpi_properties_format_valid(properties)) @@ -339,6 +338,9 @@ void acpi_init_properties(struct acpi_device *adev) INIT_LIST_HEAD(&adev->data.subnodes); + if (!adev->handle) + return; + /* * Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in * Device Tree compatible properties for this device. @@ -373,6 +375,9 @@ void acpi_init_properties(struct acpi_device *adev) if (acpi_of && !adev->flags.of_compatible_ok) acpi_handle_info(adev->handle, ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n"); + + if (!adev->data.pointer) + acpi_extract_apple_properties(adev); } static void acpi_destroy_nondev_subnodes(struct list_head *list) @@ -418,7 +423,7 @@ void acpi_free_properties(struct acpi_device *adev) * %-EINVAL if the property doesn't exist, * %-EPROTO if the property value type doesn't match @type. */ -static int acpi_data_get_property(struct acpi_device_data *data, +static int acpi_data_get_property(const struct acpi_device_data *data, const char *name, acpi_object_type type, const union acpi_object **obj) { @@ -460,20 +465,21 @@ static int acpi_data_get_property(struct acpi_device_data *data, * @type: Expected property type. * @obj: Location to store the property value (if not %NULL). */ -int acpi_dev_get_property(struct acpi_device *adev, const char *name, +int acpi_dev_get_property(const struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj) { return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL; } EXPORT_SYMBOL_GPL(acpi_dev_get_property); -static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *fwnode) +static const struct acpi_device_data * +acpi_device_data_of_node(const struct fwnode_handle *fwnode) { - if (fwnode->type == FWNODE_ACPI) { - struct acpi_device *adev = to_acpi_device_node(fwnode); + if (is_acpi_device_node(fwnode)) { + const struct acpi_device *adev = to_acpi_device_node(fwnode); return &adev->data; - } else if (fwnode->type == FWNODE_ACPI_DATA) { - struct acpi_data_node *dn = to_acpi_data_node(fwnode); + } else if (is_acpi_data_node(fwnode)) { + const struct acpi_data_node *dn = to_acpi_data_node(fwnode); return &dn->data; } return NULL; @@ -485,8 +491,8 @@ static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *f * @propname: Name of the property. * @valptr: Location to store a pointer to the property value (if not %NULL). */ -int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, - void **valptr) +int acpi_node_prop_get(const struct fwnode_handle *fwnode, + const char *propname, void **valptr) { return acpi_data_get_property(acpi_device_data_of_node(fwnode), propname, ACPI_TYPE_ANY, @@ -512,7 +518,7 @@ int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, * %-EPROTO if the property is not a package or the type of its elements * doesn't match @type. */ -static int acpi_data_get_property_array(struct acpi_device_data *data, +static int acpi_data_get_property_array(const struct acpi_device_data *data, const char *name, acpi_object_type type, const union acpi_object **obj) @@ -572,13 +578,13 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, * * Return: %0 on success, negative error code on failure. */ -int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, +int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *propname, size_t index, size_t num_args, struct acpi_reference_args *args) { const union acpi_object *element, *end; const union acpi_object *obj; - struct acpi_device_data *data; + const struct acpi_device_data *data; struct acpi_device *device; int ret, idx = 0; @@ -674,7 +680,7 @@ int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); -static int acpi_data_prop_read_single(struct acpi_device_data *data, +static int acpi_data_prop_read_single(const struct acpi_device_data *data, const char *propname, enum dev_prop_type proptype, void *val) { @@ -813,7 +819,7 @@ static int acpi_copy_property_array_string(const union acpi_object *items, return nval; } -static int acpi_data_prop_read(struct acpi_device_data *data, +static int acpi_data_prop_read(const struct acpi_device_data *data, const char *propname, enum dev_prop_type proptype, void *val, size_t nval) @@ -867,7 +873,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data, return ret; } -int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, +int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val, size_t nval) { return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL; @@ -885,8 +891,9 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, * of the property. Otherwise, read at most @nval values to the array at the * location pointed to by @val. */ -int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, - enum dev_prop_type proptype, void *val, size_t nval) +int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, enum dev_prop_type proptype, + void *val, size_t nval) { return acpi_data_prop_read(acpi_device_data_of_node(fwnode), propname, proptype, val, nval); @@ -897,13 +904,16 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. */ -struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode, +struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { - struct acpi_device *adev = to_acpi_device_node(fwnode); - struct list_head *head, *next; + const struct acpi_device *adev = to_acpi_device_node(fwnode); + const struct list_head *head; + struct list_head *next; + + if (!child || is_acpi_device_node(child)) { + struct acpi_device *child_adev; - if (!child || child->type == FWNODE_ACPI) { if (adev) head = &adev->children; else @@ -919,16 +929,17 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode, child = NULL; goto nondev; } - adev = list_entry(next, struct acpi_device, node); + child_adev = list_entry(next, struct acpi_device, node); } else { - adev = list_first_entry(head, struct acpi_device, node); + child_adev = list_first_entry(head, struct acpi_device, + node); } - return acpi_fwnode_handle(adev); + return acpi_fwnode_handle(child_adev); } nondev: - if (!child || child->type == FWNODE_ACPI_DATA) { - struct acpi_data_node *data = to_acpi_data_node(fwnode); + if (!child || is_acpi_data_node(child)) { + const struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; if (adev) @@ -963,7 +974,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode, * Returns parent node of an ACPI device or data firmware node or %NULL if * not available. */ -struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode) +struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) { if (is_acpi_data_node(fwnode)) { /* All data nodes have parent pointer so just return that */ @@ -992,8 +1003,8 @@ struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode) * %NULL if there is no next endpoint, ERR_PTR() in case of error. In case * of success the next endpoint is returned. */ -struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode, - struct fwnode_handle *prev) +struct fwnode_handle *acpi_graph_get_next_endpoint( + const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { struct fwnode_handle *port = NULL; struct fwnode_handle *endpoint; @@ -1040,7 +1051,8 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode, * the child node on success, NULL otherwise. */ static struct fwnode_handle *acpi_graph_get_child_prop_value( - struct fwnode_handle *fwnode, const char *prop_name, unsigned int val) + const struct fwnode_handle *fwnode, const char *prop_name, + unsigned int val) { struct fwnode_handle *child; @@ -1069,17 +1081,18 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( * fields requested by the caller. Returns %0 in case of success and * negative errno otherwise. */ -int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode, +int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode, struct fwnode_handle **parent, struct fwnode_handle **port, struct fwnode_handle **endpoint) { + struct fwnode_handle *fwnode; unsigned int port_nr, endpoint_nr; struct acpi_reference_args args; int ret; memset(&args, 0, sizeof(args)); - ret = acpi_node_get_property_reference(fwnode, "remote-endpoint", 0, + ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0, &args); if (ret) return ret; @@ -1121,7 +1134,7 @@ int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode, return 0; } -static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode) +static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode) { if (!is_acpi_device_node(fwnode)) return false; @@ -1129,16 +1142,17 @@ static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode) return acpi_device_is_present(to_acpi_device_node(fwnode)); } -static bool acpi_fwnode_property_present(struct fwnode_handle *fwnode, +static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { return !acpi_node_prop_get(fwnode, propname, NULL); } -static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode, - const char *propname, - unsigned int elem_size, - void *val, size_t nval) +static int +acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) { enum dev_prop_type type; @@ -1162,16 +1176,17 @@ static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode, return acpi_node_prop_read(fwnode, propname, type, val, nval); } -static int acpi_fwnode_property_read_string_array(struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) +static int +acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval) { return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, val, nval); } static struct fwnode_handle * -acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode, +acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname) { struct fwnode_handle *child; @@ -1187,8 +1202,34 @@ acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode, return NULL; } +static int +acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int args_count, unsigned int index, + struct fwnode_reference_args *args) +{ + struct acpi_reference_args acpi_args; + unsigned int i; + int ret; + + ret = __acpi_node_get_property_reference(fwnode, prop, index, + args_count, &acpi_args); + if (ret < 0) + return ret; + if (!args) + return 0; + + args->nargs = acpi_args.nargs; + args->fwnode = acpi_fwnode_handle(acpi_args.adev); + + for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++) + args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0; + + return 0; +} + static struct fwnode_handle * -acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, +acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { struct fwnode_handle *endpoint; @@ -1201,7 +1242,7 @@ acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, } static struct fwnode_handle * -acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint = NULL; @@ -1210,7 +1251,13 @@ acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) return endpoint; } -static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, +static struct fwnode_handle * +acpi_fwnode_get_parent(struct fwnode_handle *fwnode) +{ + return acpi_node_get_parent(fwnode); +} + +static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode); @@ -1223,16 +1270,40 @@ static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, return 0; } -const struct fwnode_operations acpi_fwnode_ops = { - .device_is_available = acpi_fwnode_device_is_available, - .property_present = acpi_fwnode_property_present, - .property_read_int_array = acpi_fwnode_property_read_int_array, - .property_read_string_array = acpi_fwnode_property_read_string_array, - .get_parent = acpi_node_get_parent, - .get_next_child_node = acpi_get_next_subnode, - .get_named_child_node = acpi_fwnode_get_named_child_node, - .graph_get_next_endpoint = acpi_fwnode_graph_get_next_endpoint, - .graph_get_remote_endpoint = acpi_fwnode_graph_get_remote_endpoint, - .graph_get_port_parent = acpi_node_get_parent, - .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, -}; +#define DECLARE_ACPI_FWNODE_OPS(ops) \ + const struct fwnode_operations ops = { \ + .device_is_available = acpi_fwnode_device_is_available, \ + .property_present = acpi_fwnode_property_present, \ + .property_read_int_array = \ + acpi_fwnode_property_read_int_array, \ + .property_read_string_array = \ + acpi_fwnode_property_read_string_array, \ + .get_parent = acpi_node_get_parent, \ + .get_next_child_node = acpi_get_next_subnode, \ + .get_named_child_node = acpi_fwnode_get_named_child_node, \ + .get_reference_args = acpi_fwnode_get_reference_args, \ + .graph_get_next_endpoint = \ + acpi_fwnode_graph_get_next_endpoint, \ + .graph_get_remote_endpoint = \ + acpi_fwnode_graph_get_remote_endpoint, \ + .graph_get_port_parent = acpi_fwnode_get_parent, \ + .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \ + }; \ + EXPORT_SYMBOL_GPL(ops) + +DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); +DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); +const struct fwnode_operations acpi_static_fwnode_ops; + +bool is_acpi_device_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && + fwnode->ops == &acpi_device_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_device_node); + +bool is_acpi_data_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_data_node); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index cd4c4271dc4c..d85e010ee2cc 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, return AE_OK; } +static int __acpi_dev_get_resources(struct acpi_device *adev, + struct list_head *list, + int (*preproc)(struct acpi_resource *, void *), + void *preproc_data, char *method) +{ + struct res_proc_context c; + acpi_status status; + + if (!adev || !adev->handle || !list_empty(list)) + return -EINVAL; + + if (!acpi_has_method(adev->handle, method)) + return 0; + + c.list = list; + c.preproc = preproc; + c.preproc_data = preproc_data; + c.count = 0; + c.error = 0; + status = acpi_walk_resources(adev->handle, method, + acpi_dev_process_resource, &c); + if (ACPI_FAILURE(status)) { + acpi_dev_free_resource_list(list); + return c.error ? c.error : -EIO; + } + + return c.count; +} + /** * acpi_dev_get_resources - Get current resources of a device. * @adev: ACPI device node to get the resources for. @@ -601,31 +630,46 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, int (*preproc)(struct acpi_resource *, void *), void *preproc_data) { - struct res_proc_context c; - acpi_status status; - - if (!adev || !adev->handle || !list_empty(list)) - return -EINVAL; - - if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) - return 0; - - c.list = list; - c.preproc = preproc; - c.preproc_data = preproc_data; - c.count = 0; - c.error = 0; - status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS, - acpi_dev_process_resource, &c); - if (ACPI_FAILURE(status)) { - acpi_dev_free_resource_list(list); - return c.error ? c.error : -EIO; - } - - return c.count; + return __acpi_dev_get_resources(adev, list, preproc, preproc_data, + METHOD_NAME__CRS); } EXPORT_SYMBOL_GPL(acpi_dev_get_resources); +static int is_memory(struct acpi_resource *ares, void *not_used) +{ + struct resource_win win; + struct resource *res = &win.res; + + memset(&win, 0, sizeof(win)); + + return !(acpi_dev_resource_memory(ares, res) + || acpi_dev_resource_address_space(ares, &win) + || acpi_dev_resource_ext_address_space(ares, &win)); +} + +/** + * acpi_dev_get_dma_resources - Get current DMA resources of a device. + * @adev: ACPI device node to get the resources for. + * @list: Head of the resultant list of resources (must be empty). + * + * Evaluate the _DMA method for the given device node and process its + * output. + * + * The resultant struct resource objects are put on the list pointed to + * by @list, that must be empty initially, as members of struct + * resource_entry objects. Callers of this routine should use + * %acpi_dev_free_resource_list() to free that list. + * + * The number of resources in the output list is returned on success, + * an error code reflecting the error condition is returned otherwise. + */ +int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list) +{ + return __acpi_dev_get_resources(adev, list, is_memory, NULL, + METHOD_NAME__DMA); +} +EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); + /** * acpi_dev_filter_resource_type - Filter ACPI resource according to resource * types diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index ad0b13ad4bbb..a2428e9462dd 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include "sbshc.h" #include "battery.h" @@ -58,8 +58,6 @@ static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); -static bool sbs_manager_broken; - #define MAX_SBS_BAT 4 #define ACPI_SBS_BLOCK_MAX 32 @@ -476,7 +474,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static struct device_attribute alarm_attr = { +static const struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, @@ -632,31 +630,12 @@ static void acpi_sbs_callback(void *context) } } -static int disable_sbs_manager(const struct dmi_system_id *d) -{ - sbs_manager_broken = true; - return 0; -} - -static struct dmi_system_id acpi_sbs_dmi_table[] = { - { - .callback = disable_sbs_manager, - .ident = "Apple", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.") - }, - }, - { }, -}; - static int acpi_sbs_add(struct acpi_device *device) { struct acpi_sbs *sbs; int result = 0; int id; - dmi_check_system(acpi_sbs_dmi_table); - sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); if (!sbs) { result = -ENOMEM; @@ -677,7 +656,7 @@ static int acpi_sbs_add(struct acpi_device *device) result = 0; - if (!sbs_manager_broken) { + if (!x86_apple_machine) { result = acpi_manager_get_info(sbs); if (!result) { sbs->manager_present = 1; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 70fd5502c284..602f8ff212f2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -1359,6 +1360,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) return DEV_DMA_NON_COHERENT; } +/** + * acpi_dma_get_range() - Get device DMA parameters. + * + * @dev: device to configure + * @dma_addr: pointer device DMA address result + * @offset: pointer to the DMA offset result + * @size: pointer to DMA range size result + * + * Evaluate DMA regions and return respectively DMA region start, offset + * and size in dma_addr, offset and size on parsing success; it does not + * update the passed in values on failure. + * + * Return 0 on success, < 0 on failure. + */ +int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, + u64 *size) +{ + struct acpi_device *adev; + LIST_HEAD(list); + struct resource_entry *rentry; + int ret; + struct device *dma_dev = dev; + u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; + + /* + * Walk the device tree chasing an ACPI companion with a _DMA + * object while we go. Stop if we find a device with an ACPI + * companion containing a _DMA method. + */ + do { + adev = ACPI_COMPANION(dma_dev); + if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA)) + break; + + dma_dev = dma_dev->parent; + } while (dma_dev); + + if (!dma_dev) + return -ENODEV; + + if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) { + acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n"); + return -EINVAL; + } + + ret = acpi_dev_get_dma_resources(adev, &list); + if (ret > 0) { + list_for_each_entry(rentry, &list, node) { + if (dma_offset && rentry->offset != dma_offset) { + ret = -EINVAL; + dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n"); + goto out; + } + dma_offset = rentry->offset; + + /* Take lower and upper limits */ + if (rentry->res->start < dma_start) + dma_start = rentry->res->start; + if (rentry->res->end > dma_end) + dma_end = rentry->res->end; + } + + if (dma_start >= dma_end) { + ret = -EINVAL; + dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); + goto out; + } + + *dma_addr = dma_start - dma_offset; + len = dma_end - dma_start; + *size = max(len, len + 1); + *offset = dma_offset; + } + out: + acpi_dev_free_resource_list(&list); + + return ret >= 0 ? 0 : ret; +} + /** * acpi_dma_configure - Set-up DMA configuration for the device. * @dev: The pointer to the device @@ -1367,20 +1447,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { const struct iommu_ops *iommu; - u64 size; + u64 dma_addr = 0, size = 0; - iort_set_dma_mask(dev); + iort_dma_setup(dev, &dma_addr, &size); iommu = iort_iommu_configure(dev); if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) return -EPROBE_DEFER; - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); - /* - * Assume dma valid range starts at 0 and covers the whole - * coherent_dma_mask. - */ - arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, dma_addr, size, + iommu, attr == DEV_DMA_COHERENT); return 0; } @@ -1452,6 +1528,12 @@ static bool acpi_is_spi_i2c_slave(struct acpi_device *device) struct list_head resource_list; bool is_spi_i2c_slave = false; + /* Macs use device properties in lieu of _CRS resources */ + if (x86_apple_machine && + (fwnode_property_present(&device->fwnode, "spiSclkPeriod") || + fwnode_property_present(&device->fwnode, "i2cAddress"))) + return true; + INIT_LIST_HEAD(&resource_list); acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, &is_spi_i2c_slave); @@ -1467,8 +1549,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device->device_type = type; device->handle = handle; device->parent = acpi_bus_get_parent(handle); - device->fwnode.type = FWNODE_ACPI; - device->fwnode.ops = &acpi_fwnode_ops; + device->fwnode.ops = &acpi_device_fwnode_ops; acpi_set_device_status(device, sta); acpi_device_get_busid(device); acpi_set_pnp_ids(handle, &device->pnp, type); @@ -2058,6 +2139,9 @@ int __init acpi_scan_init(void) acpi_get_spcr_uart_addr(); } + acpi_gpe_apply_masked_gpes(); + acpi_update_all_gpes(); + mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. @@ -2082,9 +2166,6 @@ int __init acpi_scan_init(void) } } - acpi_gpe_apply_masked_gpes(); - acpi_update_all_gpes(); - acpi_scan_initialized = true; out: diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index fa8243c5c062..6804ddab3052 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -160,7 +160,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpisleep_dmi_table[] __initdata = { +static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", @@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = { #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" +#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 #define ACPI_LPS0_SCREEN_OFF 3 #define ACPI_LPS0_SCREEN_ON 4 #define ACPI_LPS0_ENTRY 5 @@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle; static guid_t lps0_dsm_guid; static char lps0_dsm_func_mask; +/* Device constraint entry structure */ +struct lpi_device_info { + char *name; + int enabled; + union acpi_object *package; +}; + +/* Constraint package structure */ +struct lpi_device_constraint { + int uid; + int min_dstate; + int function_states; +}; + +struct lpi_constraints { + acpi_handle handle; + int min_dstate; +}; + +static struct lpi_constraints *lpi_constraints_table; +static int lpi_constraints_table_size; + +static void lpi_device_get_constraints(void) +{ + union acpi_object *out_obj; + int i; + + out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, + 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, + NULL, ACPI_TYPE_PACKAGE); + + acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", + out_obj ? "successful" : "failed"); + + if (!out_obj) + return; + + lpi_constraints_table = kcalloc(out_obj->package.count, + sizeof(*lpi_constraints_table), + GFP_KERNEL); + if (!lpi_constraints_table) + goto free_acpi_buffer; + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); + + for (i = 0; i < out_obj->package.count; i++) { + struct lpi_constraints *constraint; + acpi_status status; + union acpi_object *package = &out_obj->package.elements[i]; + struct lpi_device_info info = { }; + int package_count = 0, j; + + if (!package) + continue; + + for (j = 0; j < package->package.count; ++j) { + union acpi_object *element = + &(package->package.elements[j]); + + switch (element->type) { + case ACPI_TYPE_INTEGER: + info.enabled = element->integer.value; + break; + case ACPI_TYPE_STRING: + info.name = element->string.pointer; + break; + case ACPI_TYPE_PACKAGE: + package_count = element->package.count; + info.package = element->package.elements; + break; + } + } + + if (!info.enabled || !info.package || !info.name) + continue; + + constraint = &lpi_constraints_table[lpi_constraints_table_size]; + + status = acpi_get_handle(NULL, info.name, &constraint->handle); + if (ACPI_FAILURE(status)) + continue; + + acpi_handle_debug(lps0_device_handle, + "index:%d Name:%s\n", i, info.name); + + constraint->min_dstate = -1; + + for (j = 0; j < package_count; ++j) { + union acpi_object *info_obj = &info.package[j]; + union acpi_object *cnstr_pkg; + union acpi_object *obj; + struct lpi_device_constraint dev_info; + + switch (info_obj->type) { + case ACPI_TYPE_INTEGER: + /* version */ + break; + case ACPI_TYPE_PACKAGE: + if (info_obj->package.count < 2) + break; + + cnstr_pkg = info_obj->package.elements; + obj = &cnstr_pkg[0]; + dev_info.uid = obj->integer.value; + obj = &cnstr_pkg[1]; + dev_info.min_dstate = obj->integer.value; + + acpi_handle_debug(lps0_device_handle, + "uid:%d min_dstate:%s\n", + dev_info.uid, + acpi_power_state_string(dev_info.min_dstate)); + + constraint->min_dstate = dev_info.min_dstate; + break; + } + } + + if (constraint->min_dstate < 0) { + acpi_handle_debug(lps0_device_handle, + "Incomplete constraint defined\n"); + continue; + } + + lpi_constraints_table_size++; + } + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); + +free_acpi_buffer: + ACPI_FREE(out_obj); +} + +static void lpi_check_constraints(void) +{ + int i; + + for (i = 0; i < lpi_constraints_table_size; ++i) { + struct acpi_device *adev; + + if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev)) + continue; + + acpi_handle_debug(adev->handle, + "LPI: required min power state:%s current power state:%s\n", + acpi_power_state_string(lpi_constraints_table[i].min_dstate), + acpi_power_state_string(adev->power.state)); + + if (!adev->flags.power_manageable) { + acpi_handle_info(adev->handle, "LPI: Device not power manageble\n"); + continue; + } + + if (adev->power.state < lpi_constraints_table[i].min_dstate) + acpi_handle_info(adev->handle, + "LPI: Constraint not met; min power state:%s current power state:%s\n", + acpi_power_state_string(lpi_constraints_table[i].min_dstate), + acpi_power_state_string(adev->power.state)); + } +} + static void acpi_sleep_run_lps0_dsm(unsigned int func) { union acpi_object *out_obj; @@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev, if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { lps0_dsm_func_mask = bitmask; lps0_device_handle = adev->handle; + /* + * Use suspend-to-idle by default if the default + * suspend mode was not set from the command line. + */ + if (mem_sleep_default > PM_SUSPEND_MEM) + mem_sleep_current = PM_SUSPEND_TO_IDLE; } acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", @@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev, "_DSM function 0 evaluation failed\n"); } ACPI_FREE(out_obj); + + lpi_device_get_constraints(); + return 0; } @@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = { .attach = lps0_device_attach, }; -static int acpi_freeze_begin(void) +static int acpi_s2idle_begin(void) { acpi_scan_lock_acquire(); s2idle_in_progress = true; return 0; } -static int acpi_freeze_prepare(void) +static int acpi_s2idle_prepare(void) { if (lps0_device_handle) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); @@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void) return 0; } -static void acpi_freeze_wake(void) +static void acpi_s2idle_wake(void) { + + if (pm_debug_messages_on) + lpi_check_constraints(); + /* * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means * that the SCI has triggered while suspended, so cancel the wakeup in @@ -772,7 +946,7 @@ static void acpi_freeze_wake(void) } } -static void acpi_freeze_sync(void) +static void acpi_s2idle_sync(void) { /* * Process all pending events in case there are any wakeup ones. @@ -785,7 +959,7 @@ static void acpi_freeze_sync(void) s2idle_wakeup = false; } -static void acpi_freeze_restore(void) +static void acpi_s2idle_restore(void) { if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); @@ -798,19 +972,19 @@ static void acpi_freeze_restore(void) } } -static void acpi_freeze_end(void) +static void acpi_s2idle_end(void) { s2idle_in_progress = false; acpi_scan_lock_release(); } -static const struct platform_freeze_ops acpi_freeze_ops = { - .begin = acpi_freeze_begin, - .prepare = acpi_freeze_prepare, - .wake = acpi_freeze_wake, - .sync = acpi_freeze_sync, - .restore = acpi_freeze_restore, - .end = acpi_freeze_end, +static const struct platform_s2idle_ops acpi_s2idle_ops = { + .begin = acpi_s2idle_begin, + .prepare = acpi_s2idle_prepare, + .wake = acpi_s2idle_wake, + .sync = acpi_s2idle_sync, + .restore = acpi_s2idle_restore, + .end = acpi_s2idle_end, }; static void acpi_sleep_suspend_setup(void) @@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void) &acpi_suspend_ops_old : &acpi_suspend_ops); acpi_scan_add_handler(&lps0_handler); - freeze_set_ops(&acpi_freeze_ops); + s2idle_set_ops(&acpi_s2idle_ops); } #else /* !CONFIG_SUSPEND */ @@ -870,7 +1044,7 @@ static struct syscore_ops acpi_sleep_syscore_ops = { .resume = acpi_restore_bm_rld, }; -void acpi_sleep_syscore_init(void) +static void acpi_sleep_syscore_init(void) { register_syscore_ops(&acpi_sleep_syscore_ops); } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 98aa8c808a33..324b35bfe781 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -53,17 +53,24 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) */ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb) { + bool xgene_8250 = false; + if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE) return false; - if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE)) + if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) && + memcmp(tb->header.oem_id, "HPE ", ACPI_OEM_ID_SIZE)) return false; if (!memcmp(tb->header.oem_table_id, "XGENESPC", ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0) - return true; + xgene_8250 = true; - return false; + if (!memcmp(tb->header.oem_table_id, "ProLiant", + ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1) + xgene_8250 = true; + + return xgene_8250; } /** @@ -105,16 +112,17 @@ int __init parse_spcr(bool earlycon) } if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - switch (table->serial_port.access_width) { + switch (ACPI_ACCESS_BIT_WIDTH(( + table->serial_port.access_width))) { default: pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); - case ACPI_ACCESS_SIZE_BYTE: + case 8: iotype = "mmio"; break; - case ACPI_ACCESS_SIZE_WORD: + case 16: iotype = "mmio16"; break; - case ACPI_ACCESS_SIZE_DWORD: + case 32: iotype = "mmio32"; break; } @@ -181,11 +189,19 @@ int __init parse_spcr(bool earlycon) uart = "qdf2400_e44"; } - if (xgene_8250_erratum_present(table)) + if (xgene_8250_erratum_present(table)) { iotype = "mmio32"; - snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype, - table->serial_port.address, baud_rate); + /* for xgene v1 and v2 we don't know the clock rate of the + * UART so don't attempt to change to the baud rate state + * in the table because driver cannot calculate the dividers + */ + snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype, + table->serial_port.address); + } else { + snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype, + table->serial_port.address, baud_rate); + } pr_info("console: %s\n", opts); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index e414fabf7315..78a5a23010ab 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -2,6 +2,8 @@ * sysfs.c - ACPI sysfs interface to userspace. */ +#define pr_fmt(fmt) "ACPI: " fmt + #include #include #include @@ -306,11 +308,13 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); /* * ACPI table sysfs I/F: * /sys/firmware/acpi/tables/ + * /sys/firmware/acpi/tables/data/ * /sys/firmware/acpi/tables/dynamic/ */ static LIST_HEAD(acpi_table_attr_list); static struct kobject *tables_kobj; +static struct kobject *tables_data_kobj; static struct kobject *dynamic_tables_kobj; static struct kobject *hotplug_kobj; @@ -325,6 +329,11 @@ struct acpi_table_attr { struct list_head node; }; +struct acpi_data_attr { + struct bin_attribute attr; + u64 addr; +}; + static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) @@ -420,6 +429,70 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) return AE_OK; } +static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct acpi_data_attr *data_attr; + void __iomem *base; + ssize_t rc; + + data_attr = container_of(bin_attr, struct acpi_data_attr, attr); + + base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); + if (!base) + return -ENOMEM; + rc = memory_read_from_buffer(buf, count, &offset, base, + data_attr->attr.size); + acpi_os_unmap_memory(base, data_attr->attr.size); + + return rc; +} + +static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) +{ + struct acpi_table_bert *bert = th; + + if (bert->header.length < sizeof(struct acpi_table_bert) || + bert->region_length < sizeof(struct acpi_hest_generic_status)) { + kfree(data_attr); + return -EINVAL; + } + data_attr->addr = bert->address; + data_attr->attr.size = bert->region_length; + data_attr->attr.attr.name = "BERT"; + + return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); +} + +static struct acpi_data_obj { + char *name; + int (*fn)(void *, struct acpi_data_attr *); +} acpi_data_objs[] = { + { ACPI_SIG_BERT, acpi_bert_data_init }, +}; + +#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) + +static int acpi_table_data_init(struct acpi_table_header *th) +{ + struct acpi_data_attr *data_attr; + int i; + + for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { + if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) { + data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); + if (!data_attr) + return -ENOMEM; + sysfs_attr_init(&data_attr->attr.attr); + data_attr->attr.read = acpi_data_show; + data_attr->attr.attr.mode = 0400; + return acpi_data_objs[i].fn(th, data_attr); + } + } + return 0; +} + static int acpi_tables_sysfs_init(void) { struct acpi_table_attr *table_attr; @@ -432,6 +505,10 @@ static int acpi_tables_sysfs_init(void) if (!tables_kobj) goto err; + tables_data_kobj = kobject_create_and_add("data", tables_kobj); + if (!tables_data_kobj) + goto err_tables_data; + dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj); if (!dynamic_tables_kobj) goto err_dynamic_tables; @@ -456,13 +533,17 @@ static int acpi_tables_sysfs_init(void) return ret; } list_add_tail(&table_attr->node, &acpi_table_attr_list); + acpi_table_data_init(table_header); } kobject_uevent(tables_kobj, KOBJ_ADD); + kobject_uevent(tables_data_kobj, KOBJ_ADD); kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); return 0; err_dynamic_tables: + kobject_put(tables_data_kobj); +err_tables_data: kobject_put(tables_kobj); err: return -ENOMEM; @@ -552,11 +633,15 @@ static void fixed_event_count(u32 event_number) static void acpi_global_event_handler(u32 event_type, acpi_handle device, u32 event_number, void *context) { - if (event_type == ACPI_EVENT_TYPE_GPE) + if (event_type == ACPI_EVENT_TYPE_GPE) { gpe_count(event_number); - - if (event_type == ACPI_EVENT_TYPE_FIXED) + pr_debug("GPE event 0x%02x\n", event_number); + } else if (event_type == ACPI_EVENT_TYPE_FIXED) { fixed_event_count(event_number); + pr_debug("Fixed event 0x%02x\n", event_number); + } else { + pr_debug("Other event 0x%02x\n", event_number); + } } static int get_status(u32 index, acpi_event_status *status, diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index ff425390bfa8..80ce2a7d224b 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -740,10 +740,10 @@ int __init acpi_table_init(void) if (acpi_verify_table_checksum) { pr_info("Early table checksum verification enabled\n"); - acpi_gbl_verify_table_checksum = TRUE; + acpi_gbl_enable_table_validation = TRUE; } else { pr_info("Early table checksum verification disabled\n"); - acpi_gbl_verify_table_checksum = FALSE; + acpi_gbl_enable_table_validation = FALSE; } status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 1d0417b87cb7..551b71a24b85 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) { return 0; } -static struct dmi_system_id thermal_dmi_table[] __initdata = { +static const struct dmi_system_id thermal_dmi_table[] __initconst = { /* * Award BIOS on this AOpen makes thermal control almost worthless. * http://bugzilla.kernel.org/show_bug.cgi?id=8842 diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index b9d956c916f5..0a9e5979aaa9 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -816,3 +816,39 @@ static int __init acpi_backlight(char *str) return 1; } __setup("acpi_backlight=", acpi_backlight); + +/** + * acpi_match_platform_list - Check if the system matches with a given list + * @plat: pointer to acpi_platform_list table terminated by a NULL entry + * + * Return the matched index if the system is found in the platform list. + * Otherwise, return a negative error code. + */ +int acpi_match_platform_list(const struct acpi_platform_list *plat) +{ + struct acpi_table_header hdr; + int idx = 0; + + if (acpi_disabled) + return -ENODEV; + + for (; plat->oem_id[0]; plat++, idx++) { + if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr))) + continue; + + if (strncmp(plat->oem_id, hdr.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + if (strncmp(plat->oem_table_id, hdr.oem_table_id, ACPI_OEM_TABLE_ID_SIZE)) + continue; + + if ((plat->pred == all_versions) || + (plat->pred == less_than_or_equal && hdr.oem_revision <= plat->oem_revision) || + (plat->pred == greater_than_or_equal && hdr.oem_revision >= plat->oem_revision) || + (plat->pred == equal && hdr.oem_revision == plat->oem_revision)) + return idx; + } + + return -ENODEV; +} +EXPORT_SYMBOL(acpi_match_platform_list); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index d179e8d9177d..601e5d372887 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -103,6 +103,12 @@ static int video_detect_force_native(const struct dmi_system_id *d) return 0; } +static int video_detect_force_none(const struct dmi_system_id *d) +{ + acpi_backlight_dmi = acpi_backlight_none; + return 0; +} + static const struct dmi_system_id video_detect_dmi_table[] = { /* On Samsung X360, the BIOS will set a flag (VDRV) if generic * ACPI backlight device is used. This flag will definitively break @@ -313,6 +319,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + { + .callback = video_detect_force_none, + .ident = "Dell OptiPlex 9020M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), + }, + }, { }, }; diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c new file mode 100644 index 000000000000..51b4cf9f25da --- /dev/null +++ b/drivers/acpi/x86/apple.c @@ -0,0 +1,141 @@ +/* + * apple.c - Apple ACPI quirks + * Copyright (C) 2017 Lukas Wunner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +/* Apple _DSM device properties GUID */ +static const guid_t apple_prp_guid = + GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c, + 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b); + +/** + * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties + * @adev: ACPI device for which to retrieve the properties + * + * Invoke Apple's custom _DSM once to check the protocol version and once more + * to retrieve the properties. They are marshalled up in a single package as + * alternating key/value elements, unlike _DSD which stores them as a package + * of 2-element packages. Convert to _DSD format and make them available under + * the primary fwnode. + */ +void acpi_extract_apple_properties(struct acpi_device *adev) +{ + unsigned int i, j = 0, newsize = 0, numprops, numvalid; + union acpi_object *props, *newprops; + unsigned long *valid = NULL; + void *free_space; + + if (!x86_apple_machine) + return; + + props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0, + NULL, ACPI_TYPE_BUFFER); + if (!props) + return; + + if (!props->buffer.length) + goto out_free; + + if (props->buffer.pointer[0] != 3) { + acpi_handle_info(adev->handle, FW_INFO + "unsupported properties version %*ph\n", + props->buffer.length, props->buffer.pointer); + goto out_free; + } + + ACPI_FREE(props); + props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1, + NULL, ACPI_TYPE_PACKAGE); + if (!props) + return; + + numprops = props->package.count / 2; + if (!numprops) + goto out_free; + + valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL); + if (!valid) + goto out_free; + + /* newsize = key length + value length of each tuple */ + for (i = 0; i < numprops; i++) { + union acpi_object *key = &props->package.elements[i * 2]; + union acpi_object *val = &props->package.elements[i * 2 + 1]; + + if ( key->type != ACPI_TYPE_STRING || + (val->type != ACPI_TYPE_INTEGER && + val->type != ACPI_TYPE_BUFFER)) + continue; /* skip invalid properties */ + + __set_bit(i, valid); + newsize += key->string.length + 1; + if ( val->type == ACPI_TYPE_BUFFER) + newsize += val->buffer.length; + } + + numvalid = bitmap_weight(valid, numprops); + if (numprops > numvalid) + acpi_handle_info(adev->handle, FW_INFO + "skipped %u properties: wrong type\n", + numprops - numvalid); + if (numvalid == 0) + goto out_free; + + /* newsize += top-level package + 3 objects for each key/value tuple */ + newsize += (1 + 3 * numvalid) * sizeof(union acpi_object); + newprops = ACPI_ALLOCATE_ZEROED(newsize); + if (!newprops) + goto out_free; + + /* layout: top-level package | packages | key/value tuples | strings */ + newprops->type = ACPI_TYPE_PACKAGE; + newprops->package.count = numvalid; + newprops->package.elements = &newprops[1]; + free_space = &newprops[1 + 3 * numvalid]; + + for_each_set_bit(i, valid, numprops) { + union acpi_object *key = &props->package.elements[i * 2]; + union acpi_object *val = &props->package.elements[i * 2 + 1]; + unsigned int k = 1 + numvalid + j * 2; /* index into newprops */ + unsigned int v = k + 1; + + newprops[1 + j].type = ACPI_TYPE_PACKAGE; + newprops[1 + j].package.count = 2; + newprops[1 + j].package.elements = &newprops[k]; + + newprops[k].type = ACPI_TYPE_STRING; + newprops[k].string.length = key->string.length; + newprops[k].string.pointer = free_space; + memcpy(free_space, key->string.pointer, key->string.length); + free_space += key->string.length + 1; + + newprops[v].type = val->type; + if (val->type == ACPI_TYPE_INTEGER) { + newprops[v].integer.value = val->integer.value; + } else { + newprops[v].buffer.length = val->buffer.length; + newprops[v].buffer.pointer = free_space; + memcpy(free_space, val->buffer.pointer, + val->buffer.length); + free_space += val->buffer.length; + } + j++; /* count valid properties */ + } + WARN_ON(free_space != (void *)newprops + newsize); + + adev->data.properties = newprops; + adev->data.pointer = newprops; + +out_free: + ACPI_FREE(props); + kfree(valid); +} diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d055b3f2a207..ab34239a76ee 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, debug_id, (u64)fda->num_fds); continue; } - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) task_close_fd(proc, fd_array[fd_index]); } break; @@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp, (u64)node->ptr); binder_node_unlock(node); } else { - int ret; struct binder_ref_data dest_rdata; binder_node_unlock(node); @@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ parent_buffer = parent->buffer - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); @@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - parent_buffer = (u8 *)(parent->buffer - + parent_buffer = (u8 *)((uintptr_t)parent->buffer - binder_alloc_get_user_buffer_offset( &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; @@ -3083,6 +3082,7 @@ static void binder_transaction(struct binder_proc *proc, err_dead_proc_or_thread: return_error = BR_DEAD_REPLY; return_error_line = __LINE__; + binder_dequeue_work(proc, tcomplete); err_translate_failed: err_bad_object_type: err_bad_offset: diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 8fe165844e47..064f5e31ec55 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct binder_alloc *alloc; uintptr_t page_addr; size_t index; + struct vm_area_struct *vma; alloc = page->alloc; if (!mutex_trylock(&alloc->mutex)) @@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - if (alloc->vma) { + vma = alloc->vma; + if (vma) { mm = get_task_mm(alloc->tsk); if (!mm) goto err_get_task_mm_failed; if (!down_write_trylock(&mm->mmap_sem)) goto err_down_write_mmap_sem_failed; + } + list_lru_isolate(lru, item); + spin_unlock(lock); + + if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range(alloc->vma, + zap_page_range(vma, page_addr + alloc->user_buffer_offset, PAGE_SIZE); @@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_kernel_end(alloc, index); - list_lru_isolate(lru, item); - + spin_lock(lock); mutex_unlock(&alloc->mutex); - return LRU_REMOVED; + return LRU_REMOVED_RETRY; err_down_write_mmap_sem_failed: - mmput(mm); + mmput_async(mm); err_get_task_mm_failed: err_page_already_freed: mutex_unlock(&alloc->mutex); diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 363fc5330c21..488c93724220 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -153,6 +153,16 @@ config AHCI_CEVA If unsure, say N. +config AHCI_MTK + tristate "MediaTek AHCI SATA support" + depends on ARCH_MEDIATEK + select MFD_SYSCON + help + This option enables support for the MediaTek SoC's + onboard AHCI SATA controller. + + If unsure, say N. + config AHCI_MVEBU tristate "Marvell EBU AHCI SATA support" depends on ARCH_MVEBU diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index a26ef5a93919..ff9cd2e37458 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_DM816) += ahci_dm816.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_MTK) += ahci_mtk.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5a5fd0b404eb..9f78bb03bb76 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, static int ahci_pci_reset_controller(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); + int rc; - ahci_reset_controller(host); + rc = ahci_reset_controller(host); + if (rc) + return rc; if (pdev->vendor == PCI_VENDOR_ID_INTEL) { struct ahci_host_priv *hpriv = host->private_data; @@ -1469,7 +1472,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar, return; dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count); - dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n"); + dev_warn(&pdev->dev, + "Switch your BIOS from RAID to AHCI mode to use them.\n"); + + /* + * Don't rely on the msi-x capability in the remap case, + * share the legacy interrupt across ahci and remapped devices. + */ + hpriv->flags |= AHCI_HFLAG_NO_MSI; } static int ahci_get_irq_vector(struct ata_host *host, int port) diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c new file mode 100644 index 000000000000..80854f71559a --- /dev/null +++ b/drivers/ata/ahci_mtk.c @@ -0,0 +1,196 @@ +/* + * MeidaTek AHCI SATA driver + * + * Copyright (c) 2017 MediaTek Inc. + * Author: Ryder Lee + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ahci.h" + +#define DRV_NAME "ahci" + +#define SYS_CFG 0x14 +#define SYS_CFG_SATA_MSK GENMASK(31, 30) +#define SYS_CFG_SATA_EN BIT(31) + +struct mtk_ahci_plat { + struct regmap *mode; + struct reset_control *axi_rst; + struct reset_control *sw_rst; + struct reset_control *reg_rst; +}; + +static const struct ata_port_info ahci_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + +static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT(DRV_NAME), +}; + +static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv, + struct device *dev) +{ + struct mtk_ahci_plat *plat = hpriv->plat_data; + int err; + + /* reset AXI bus and PHY part */ + plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi"); + if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER) + return PTR_ERR(plat->axi_rst); + + plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw"); + if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER) + return PTR_ERR(plat->sw_rst); + + plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg"); + if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER) + return PTR_ERR(plat->reg_rst); + + err = reset_control_assert(plat->axi_rst); + if (err) { + dev_err(dev, "failed to assert AXI bus\n"); + return err; + } + + err = reset_control_assert(plat->sw_rst); + if (err) { + dev_err(dev, "failed to assert PHY digital part\n"); + return err; + } + + err = reset_control_assert(plat->reg_rst); + if (err) { + dev_err(dev, "failed to assert PHY register part\n"); + return err; + } + + err = reset_control_deassert(plat->reg_rst); + if (err) { + dev_err(dev, "failed to deassert PHY register part\n"); + return err; + } + + err = reset_control_deassert(plat->sw_rst); + if (err) { + dev_err(dev, "failed to deassert PHY digital part\n"); + return err; + } + + err = reset_control_deassert(plat->axi_rst); + if (err) { + dev_err(dev, "failed to deassert AXI bus\n"); + return err; + } + + return 0; +} + +static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv, + struct device *dev) +{ + struct mtk_ahci_plat *plat = hpriv->plat_data; + struct device_node *np = dev->of_node; + + /* enable SATA function if needed */ + if (of_find_property(np, "mediatek,phy-mode", NULL)) { + plat->mode = syscon_regmap_lookup_by_phandle( + np, "mediatek,phy-mode"); + if (IS_ERR(plat->mode)) { + dev_err(dev, "missing phy-mode phandle\n"); + return PTR_ERR(plat->mode); + } + + regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK, + SYS_CFG_SATA_EN); + } + + of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map); + + return 0; +} + +static int mtk_ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_ahci_plat *plat; + struct ahci_host_priv *hpriv; + int err; + + plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + hpriv->plat_data = plat; + + err = mtk_ahci_parse_property(hpriv, dev); + if (err) + return err; + + err = mtk_ahci_platform_resets(hpriv, dev); + if (err) + return err; + + err = ahci_platform_enable_resources(hpriv); + if (err) + return err; + + err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, + &ahci_platform_sht); + if (err) + goto disable_resources; + + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return err; +} + +static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend, + ahci_platform_resume); + +static const struct of_device_id ahci_of_match[] = { + { .compatible = "mediatek,mtk-ahci", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ahci_of_match); + +static struct platform_driver mtk_ahci_driver = { + .probe = mtk_ahci_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = DRV_NAME, + .of_match_table = ahci_of_match, + .pm = &ahci_pm_ops, + }, +}; +module_platform_driver(mtk_ahci_driver); + +MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 62a04c8fb5c9..99f9a895a459 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -93,6 +93,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match); static struct platform_driver ahci_driver = { .probe = ahci_probe, .remove = ata_platform_remove_one, + .shutdown = ahci_platform_shutdown, .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 8401c3b5be92..b702c20fbc2b 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = { { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ + { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index cd2eab6aa92e..a270a1173c8c 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -602,6 +602,40 @@ static void ahci_host_stop(struct ata_host *host) ahci_platform_disable_resources(hpriv); } +/** + * ahci_platform_shutdown - Disable interrupts and stop DMA for host ports + * @dev: platform device pointer for the host + * + * This function is called during system shutdown and performs the minimal + * deconfiguration required to ensure that an ahci_platform host cannot + * corrupt or otherwise interfere with a new kernel being started with kexec. + */ +void ahci_platform_shutdown(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + int i; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + /* Disable port interrupts */ + if (ap->ops->freeze) + ap->ops->freeze(ap); + + /* Stop the port DMA engines */ + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + } + + /* Disable and clear host interrupts */ + writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT); +} +EXPORT_SYMBOL_GPL(ahci_platform_shutdown); + #ifdef CONFIG_PM_SLEEP /** * ahci_platform_suspend_host - Suspend an ahci-platform host diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1945a8ea2099..ee4c1ec9dca0 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = { }; #define ENOUGH(v, unit) (((v)-1)/(unit)+1) -#define EZ(v, unit) ((v)?ENOUGH(v, unit):0) +#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0) static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) { - q->setup = EZ(t->setup * 1000, T); - q->act8b = EZ(t->act8b * 1000, T); - q->rec8b = EZ(t->rec8b * 1000, T); - q->cyc8b = EZ(t->cyc8b * 1000, T); - q->active = EZ(t->active * 1000, T); - q->recover = EZ(t->recover * 1000, T); - q->dmack_hold = EZ(t->dmack_hold * 1000, T); - q->cycle = EZ(t->cycle * 1000, T); - q->udma = EZ(t->udma * 1000, UT); + q->setup = EZ(t->setup, T); + q->act8b = EZ(t->act8b, T); + q->rec8b = EZ(t->rec8b, T); + q->cyc8b = EZ(t->cyc8b, T); + q->active = EZ(t->active, T); + q->recover = EZ(t->recover, T); + q->dmack_hold = EZ(t->dmack_hold, T); + q->cycle = EZ(t->cycle, T); + q->udma = EZ(t->udma, UT); } void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 8a01d09ac4db..23a62e4015d0 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -34,7 +34,7 @@ struct zpodd { static int eject_tray(struct ata_device *dev) { struct ata_taskfile tf; - const char cdb[] = { GPCMD_START_STOP_UNIT, + static const char cdb[] = { GPCMD_START_STOP_UNIT, 0, 0, 0, 0x02, /* LoEj */ 0, 0, 0, 0, 0, 0, 0, @@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) unsigned int ret; struct rm_feature_desc *desc = (void *)(buf + 8); struct ata_taskfile tf; - char cdb[] = { GPCMD_GET_CONFIGURATION, + static const char cdb[] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 8d4d959a821c..8706533db57b 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = { { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 }, { }, }; diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 6c15a554efbe..dc1255294628 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) static const struct pci_device_id cs5536[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), }, { }, }; diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 1ba03d6df951..d3d851b014a3 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -840,7 +840,6 @@ static int octeon_cf_probe(struct platform_device *pdev) struct property *reg_prop; int n_addr, n_size, reg_len; struct device_node *node; - const void *prop; void __iomem *cs0; void __iomem *cs1 = NULL; struct ata_host *host; @@ -850,7 +849,7 @@ static int octeon_cf_probe(struct platform_device *pdev) void __iomem *base; struct octeon_cf_port *cf_port; int rv = -ENOMEM; - + u32 bus_width; node = pdev->dev.of_node; if (node == NULL) @@ -860,11 +859,10 @@ static int octeon_cf_probe(struct platform_device *pdev) if (!cf_port) return -ENOMEM; - cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL); + cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide"); - prop = of_get_property(node, "cavium,bus-width", NULL); - if (prop) - is_16bit = (be32_to_cpup(prop) == 16); + if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0) + is_16bit = (bus_width == 16); else is_16bit = false; diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index 8c704523bae7..46950e0267e0 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "sata_gemini.h" #define DRV_NAME "gemini_sata_bridge" @@ -43,17 +44,6 @@ struct sata_gemini { struct clk *sata1_pclk; }; -/* Global IDE PAD Skew Control Register */ -#define GEMINI_GLOBAL_IDE_SKEW_CTRL 0x18 -#define GEMINI_IDE1_HOST_STROBE_DELAY_SHIFT 28 -#define GEMINI_IDE1_DEVICE_STROBE_DELAY_SHIFT 24 -#define GEMINI_IDE1_OUTPUT_IO_SKEW_SHIFT 20 -#define GEMINI_IDE1_INPUT_IO_SKEW_SHIFT 16 -#define GEMINI_IDE0_HOST_STROBE_DELAY_SHIFT 12 -#define GEMINI_IDE0_DEVICE_STROBE_DELAY_SHIFT 8 -#define GEMINI_IDE0_OUTPUT_IO_SKEW_SHIFT 4 -#define GEMINI_IDE0_INPUT_IO_SKEW_SHIFT 0 - /* Miscellaneous Control Register */ #define GEMINI_GLOBAL_MISC_CTRL 0x30 /* @@ -91,8 +81,6 @@ struct sata_gemini { #define GEMINI_IDE_IOMUX_MODE2 (2 << 24) #define GEMINI_IDE_IOMUX_MODE3 (3 << 24) #define GEMINI_IDE_IOMUX_SHIFT (24) -#define GEMINI_IDE_PADS_ENABLE BIT(4) -#define GEMINI_PFLASH_PADS_DISABLE BIT(1) /* * Registers directly controlling the PATA<->SATA adapters @@ -274,14 +262,14 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg) return ret; } - sg->sata0_reset = devm_reset_control_get(dev, "sata0"); + sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0"); if (IS_ERR(sg->sata0_reset)) { dev_err(dev, "no SATA0 reset controller\n"); clk_disable_unprepare(sg->sata1_pclk); clk_disable_unprepare(sg->sata0_pclk); return PTR_ERR(sg->sata0_reset); } - sg->sata1_reset = devm_reset_control_get(dev, "sata1"); + sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1"); if (IS_ERR(sg->sata1_reset)) { dev_err(dev, "no SATA1 reset controller\n"); clk_disable_unprepare(sg->sata1_pclk); @@ -300,17 +288,39 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg) return 0; } +static int gemini_setup_ide_pins(struct device *dev) +{ + struct pinctrl *p; + struct pinctrl_state *ide_state; + int ret; + + p = devm_pinctrl_get(dev); + if (IS_ERR(p)) + return PTR_ERR(p); + + ide_state = pinctrl_lookup_state(p, "ide"); + if (IS_ERR(ide_state)) + return PTR_ERR(ide_state); + + ret = pinctrl_select_state(p, ide_state); + if (ret) { + dev_err(dev, "could not select IDE state\n"); + return ret; + } + + return 0; +} + static int gemini_sata_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct sata_gemini *sg; - static struct regmap *map; + struct regmap *map; struct resource *res; enum gemini_muxmode muxmode; u32 gmode; u32 gmask; - u32 val; int ret; sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL); @@ -362,16 +372,6 @@ static int gemini_sata_probe(struct platform_device *pdev) gmask = GEMINI_IDE_IOMUX_MASK; gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT); - /* - * If we mux out the IDE, parallel flash must be disabled. - * SATA0 and SATA1 have dedicated pins and may coexist with - * parallel flash. - */ - if (sg->ide_pins) - gmode |= GEMINI_IDE_PADS_ENABLE | GEMINI_PFLASH_PADS_DISABLE; - else - gmask |= GEMINI_IDE_PADS_ENABLE; - ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode); if (ret) { dev_err(dev, "unable to set up IDE muxing\n"); @@ -379,14 +379,15 @@ static int gemini_sata_probe(struct platform_device *pdev) goto out_unprep_clk; } - /* FIXME: add more elaborate IDE skew control handling */ + /* + * Route out the IDE pins if desired. + * This is done by looking up a special pin control state called + * "ide" that will route out the IDE pins. + */ if (sg->ide_pins) { - ret = regmap_read(map, GEMINI_GLOBAL_IDE_SKEW_CTRL, &val); - if (ret) { - dev_err(dev, "cannot read IDE skew control register\n"); + ret = gemini_setup_ide_pins(dev); + if (ret) return ret; - } - dev_info(dev, "IDE skew control: %08x\n", val); } dev_info(dev, "set up the Gemini IDE/SATA nexus\n"); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 0fd6ac7e57ba..a9d692c6c182 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -339,7 +339,7 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost) if (!reg) continue; if (index == *reg) { - seq_printf(m, "devspec: %s\n", np->full_name); + seq_printf(m, "devspec: %pOF\n", np); break; } } diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 1fd25e872ece..8d98130ecd40 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = { NULL }; -static struct attribute_group adummy_group_attrs = { +static const struct attribute_group adummy_group_attrs = { .name = NULL, /* We want them in dev's root folder */ .attrs = adummy_attrs }; @@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page) return 0; } -static struct atmdev_ops adummy_ops = +static const struct atmdev_ops adummy_ops = { .open = adummy_open, .close = adummy_close, diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 906705e5f776..acf16c323e38 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); /********** module entry **********/ -static struct pci_device_id amb_pci_tbl[] = { +static const struct pci_device_id amb_pci_tbl[] = { { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, { 0, } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 56fa16c85ebf..afebeb1c3e1e 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = { */ -static struct atmdev_ops atmtcp_c_dev_ops = { +static const struct atmdev_ops atmtcp_c_dev_ops = { .close = atmtcp_c_close, .send = atmtcp_c_send }; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b042ec458544..ce47eb17901d 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2292,7 +2292,7 @@ static int eni_init_one(struct pci_dev *pci_dev, } -static struct pci_device_id eni_pci_tbl[] = { +static const struct pci_device_id eni_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 22dcab952a24..6b6368a56526 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev) func_exit (); } -static struct pci_device_id firestream_pci_tbl[] = { +static const struct pci_device_id firestream_pci_tbl[] = { { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50}, { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155}, { 0, } diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f0433adcd8fc..f8b7e86907cc 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev) } -static struct pci_device_id fore200e_pca_tbl[] = { +static const struct pci_device_id fore200e_pca_tbl[] = { { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &fore200e_bus[0] }, { 0, } diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 37ee21c5a5ca..e58538c29377 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -161,7 +161,7 @@ static unsigned int clocktab[] = { CLK_LOW }; -static struct atmdev_ops he_ops = +static const struct atmdev_ops he_ops = { .open = he_open, .close = he_close, @@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); -static struct pci_device_id he_pci_tbl[] = { +static const struct pci_device_id he_pci_tbl[] = { { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 0f18480b33b5..7e76b35f422c 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames"); MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames"); MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); -static struct pci_device_id hrz_pci_tbl[] = { +static const struct pci_device_id hrz_pci_tbl[] = { { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 60bacba03d17..47f3c4ae0594 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, static void idt77252_softint(struct work_struct *work); -static struct atmdev_ops idt77252_ops = +static const struct atmdev_ops idt77252_ops = { .dev_close = idt77252_dev_close, .open = idt77252_open, @@ -3725,7 +3725,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, return err; } -static struct pci_device_id idt77252_pci_tbl[] = +static const struct pci_device_id idt77252_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 }, { 0, } diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index a4fa6c82261e..fc72b763fdd7 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev) kfree(iadev); } -static struct pci_device_id ia_pci_tbl[] = { +static const struct pci_device_id ia_pci_tbl[] = { { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 1a9bc51284b0..2351dad78ff5 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci, return result; } -static struct pci_device_id lanai_pci_tbl[] = { +static const struct pci_device_id lanai_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) }, { 0, } /* terminal entry */ diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index d879f3bca107..a9702836cbae 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; -static struct atmdev_ops atm_ops = { +static const struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, @@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev) kfree(card); } -static struct pci_device_id nicstar_pci_tbl[] = { +static const struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index c8f2ca6d8b29..0df1a1c80b00 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = { NULL }; -static struct attribute_group solos_attr_group = { +static const struct attribute_group solos_attr_group = { .attrs = solos_attrs, .name = "parameters", }; @@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = { NULL }; -static struct attribute_group gpio_attr_group = { +static const struct attribute_group gpio_attr_group = { .attrs = gpio_attrs, .name = "gpio", }; @@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb) return 0; } -static struct atmdev_ops fpga_ops = { +static const struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, @@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev) kfree(card); } -static struct pci_device_id fpga_pci_tbl[] = { +static const struct pci_device_id fpga_pci_tbl[] = { { 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 07bdd51b3b9a..1ef67db03c8e 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1642,7 +1642,7 @@ static int zatm_init_one(struct pci_dev *pci_dev, MODULE_LICENSE("GPL"); -static struct pci_device_id zatm_pci_tbl[] = { +static const struct pci_device_id zatm_pci_tbl[] = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index cfeb049a01ef..642afd88870b 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, static int charlcd_open(struct inode *inode, struct file *file) { struct charlcd_priv *priv = to_priv(the_charlcd); + int ret; + ret = -EBUSY; if (!atomic_dec_and_test(&charlcd_available)) - return -EBUSY; /* open only once at a time */ + goto fail; /* open only once at a time */ + ret = -EPERM; if (file->f_mode & FMODE_READ) /* device is write-only */ - return -EPERM; + goto fail; if (priv->must_clear) { charlcd_clear_display(&priv->lcd); priv->must_clear = false; } return nonseekable_open(inode, file); + + fail: + atomic_inc(&charlcd_available); + return ret; } static int charlcd_release(struct inode *inode, struct file *file) diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index df126dcdaf18..6911acd896d9 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file, static int keypad_open(struct inode *inode, struct file *file) { - if (!atomic_dec_and_test(&keypad_available)) - return -EBUSY; /* open only once at a time */ + int ret; + ret = -EBUSY; + if (!atomic_dec_and_test(&keypad_available)) + goto fail; /* open only once at a time */ + + ret = -EPERM; if (file->f_mode & FMODE_WRITE) /* device is read-only */ - return -EPERM; + goto fail; keypad_buflen = 0; /* flush the buffer on opening */ return 0; + fail: + atomic_inc(&keypad_available); + return ret; } static int keypad_release(struct inode *inode, struct file *file) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index f046d21de57d..1a5f6a157a57 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -140,13 +140,10 @@ config EXTRA_FIRMWARE config EXTRA_FIRMWARE_DIR string "Firmware blobs root directory" depends on EXTRA_FIRMWARE != "" - default "firmware" + default "/lib/firmware" help This option controls the directory in which the kernel build system looks for the firmware files listed in the EXTRA_FIRMWARE option. - The default is firmware/ in the kernel source tree, but by changing - this option you can point it elsewhere, such as /lib/firmware/ or - some other directory containing the firmware files. config FW_LOADER_USER_HELPER bool diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 41be9ff7d70a..6df7d6676a48 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) } #ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit; -static void parsing_done_workfn(struct work_struct *work); -static DECLARE_WORK(parsing_done_work, parsing_done_workfn); +static cpumask_var_t cpus_to_visit __initdata; +static void __init parsing_done_workfn(struct work_struct *work); +static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); -static int +static int __init init_cpu_capacity_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, return 0; } -static struct notifier_block init_cpu_capacity_notifier = { +static struct notifier_block init_cpu_capacity_notifier __initdata = { .notifier_call = init_cpu_capacity_callback, }; @@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void) } core_initcall(register_cpufreq_notifier); -static void parsing_done_workfn(struct work_struct *work) +static void __init parsing_done_workfn(struct work_struct *work) { cpufreq_unregister_notifier(&init_cpu_capacity_notifier, CPUFREQ_POLICY_NOTIFIER); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2c3b359b3536..321cd7b4d817 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev, buf[n++] = ','; if (nr_cpu_ids == total_cpus-1) - n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); + n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); else - n += snprintf(&buf[n], len - n, "%d-%d", + n += snprintf(&buf[n], len - n, "%u-%d", nr_cpu_ids, total_cpus-1); } diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 1c152aed6b82..744f64f43454 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -37,7 +37,7 @@ static inline dma_addr_t dma_get_device_base(struct device *dev, return mem->device_base; } -static bool dma_init_coherent_memory( +static int dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, struct dma_coherent_mem **mem) { @@ -45,25 +45,28 @@ static bool dma_init_coherent_memory( void __iomem *mem_base = NULL; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); + int ret; - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) - goto out; - if (!size) + if (!size) { + ret = -EINVAL; goto out; + } - if (flags & DMA_MEMORY_MAP) - mem_base = memremap(phys_addr, size, MEMREMAP_WC); - else - mem_base = ioremap(phys_addr, size); - if (!mem_base) + mem_base = memremap(phys_addr, size, MEMREMAP_WC); + if (!mem_base) { + ret = -EINVAL; goto out; - + } dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dma_mem) + if (!dma_mem) { + ret = -ENOMEM; goto out; + } dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dma_mem->bitmap) + if (!dma_mem->bitmap) { + ret = -ENOMEM; goto out; + } dma_mem->virt_base = mem_base; dma_mem->device_base = device_addr; @@ -73,17 +76,13 @@ static bool dma_init_coherent_memory( spin_lock_init(&dma_mem->spinlock); *mem = dma_mem; - return true; + return 0; out: kfree(dma_mem); - if (mem_base) { - if (flags & DMA_MEMORY_MAP) - memunmap(mem_base); - else - iounmap(mem_base); - } - return false; + if (mem_base) + memunmap(mem_base); + return ret; } static void dma_release_coherent_memory(struct dma_coherent_mem *mem) @@ -91,10 +90,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) if (!mem) return; - if (mem->flags & DMA_MEMORY_MAP) - memunmap(mem->virt_base); - else - iounmap(mem->virt_base); + memunmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); } @@ -109,8 +105,6 @@ static int dma_assign_coherent_memory(struct device *dev, return -EBUSY; dev->dma_mem = mem; - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - return 0; } @@ -118,16 +112,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { struct dma_coherent_mem *mem; + int ret; - if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags, - &mem)) - return 0; + ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); + if (ret) + return ret; - if (dma_assign_coherent_memory(dev, mem) == 0) - return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO; - - dma_release_coherent_memory(mem); - return 0; + ret = dma_assign_coherent_memory(dev, mem); + if (ret) + dma_release_coherent_memory(mem); + return ret; } EXPORT_SYMBOL(dma_declare_coherent_memory); @@ -171,7 +165,6 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, int order = get_order(size); unsigned long flags; int pageno; - int dma_memory_map; void *ret; spin_lock_irqsave(&mem->spinlock, flags); @@ -188,15 +181,9 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, */ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); ret = mem->virt_base + (pageno << PAGE_SHIFT); - dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); - if (dma_memory_map) - memset(ret, 0, size); - else - memset_io(ret, 0, size); - + memset(ret, 0, size); return ret; - err: spin_unlock_irqrestore(&mem->spinlock, flags); return NULL; @@ -359,14 +346,17 @@ static struct reserved_mem *dma_reserved_default_memory __initdata; static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { struct dma_coherent_mem *mem = rmem->priv; + int ret; - if (!mem && - !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, - &mem)) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return -ENODEV; + if (!mem) { + ret = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, + DMA_MEMORY_EXCLUSIVE, &mem); + if (ret) { + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return ret; + } } mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index b555ff9dd8fc..e584eddef0a7 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -176,13 +176,10 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); - if (rc) { + if (!rc) devres_add(dev, res); - rc = 0; - } else { + else devres_free(res); - rc = -ENOMEM; - } return rc; } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index a5fb884a136d..4b57cf5bc81d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -258,38 +258,6 @@ static int fw_cache_piggyback_on_request(const char *name); * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); -static bool __enable_firmware = false; - -static void enable_firmware(void) -{ - mutex_lock(&fw_lock); - __enable_firmware = true; - mutex_unlock(&fw_lock); -} - -static void disable_firmware(void) -{ - mutex_lock(&fw_lock); - __enable_firmware = false; - mutex_unlock(&fw_lock); -} - -/* - * When disabled only the built-in firmware and the firmware cache will be - * used to look for firmware. - */ -static bool firmware_enabled(void) -{ - bool enabled = false; - - mutex_lock(&fw_lock); - if (__enable_firmware) - enabled = true; - mutex_unlock(&fw_lock); - - return enabled; -} - static struct firmware_cache fw_cache; static struct firmware_buf *__allocate_fw_buf(const char *fw_name, @@ -1246,12 +1214,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) /* error or already assigned */ goto out; - if (!firmware_enabled()) { - WARN(1, "firmware request while host is not available\n"); - ret = -EHOSTDOWN; - goto out; - } - ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) @@ -1762,62 +1724,6 @@ static void device_uncache_fw_images_delay(unsigned long delay) msecs_to_jiffies(delay)); } -/** - * fw_pm_notify - notifier for suspend/resume - * @notify_block: unused - * @mode: mode we are switching to - * @unused: unused - * - * Used to modify the firmware_class state as we move in between states. - * The firmware_class implements a firmware cache to enable device driver - * to fetch firmware upon resume before the root filesystem is ready. We - * disable API calls which do not use the built-in firmware or the firmware - * cache when we know these calls will not work. - * - * The inner logic behind all this is a bit complex so it is worth summarizing - * the kernel's own suspend/resume process with context and focus on how this - * can impact the firmware API. - * - * First a review on how we go to suspend:: - * - * pm_suspend() --> enter_state() --> - * sys_sync() - * suspend_prepare() --> - * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...); - * suspend_freeze_processes() --> - * freeze_processes() --> - * __usermodehelper_set_disable_depth(UMH_DISABLED); - * freeze all tasks ... - * freeze_kernel_threads() - * suspend_devices_and_enter() --> - * dpm_suspend_start() --> - * dpm_prepare() - * dpm_suspend() - * suspend_enter() --> - * platform_suspend_prepare() - * dpm_suspend_late() - * freeze_enter() - * syscore_suspend() - * - * When we resume we bail out of a loop from suspend_devices_and_enter() and - * unwind back out to the caller enter_state() where we were before as follows:: - * - * enter_state() --> - * suspend_devices_and_enter() --> (bail from loop) - * dpm_resume_end() --> - * dpm_resume() - * dpm_complete() - * suspend_finish() --> - * suspend_thaw_processes() --> - * thaw_processes() --> - * __usermodehelper_set_disable_depth(UMH_FREEZING); - * thaw_workqueues(); - * thaw all processes ... - * usermodehelper_enable(); - * pm_notifier_call_chain(PM_POST_SUSPEND); - * - * fw_pm_notify() works through pm_notifier_call_chain(). - */ static int fw_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { @@ -1831,7 +1737,6 @@ static int fw_pm_notify(struct notifier_block *notify_block, */ kill_pending_fw_fallback_reqs(true); device_cache_fw_images(); - disable_firmware(); break; case PM_POST_SUSPEND: @@ -1844,7 +1749,6 @@ static int fw_pm_notify(struct notifier_block *notify_block, mutex_lock(&fw_lock); fw_cache.state = FW_LOADER_NO_CACHE; mutex_unlock(&fw_lock); - enable_firmware(); device_uncache_fw_images_delay(10 * MSEC_PER_SEC); break; @@ -1893,7 +1797,6 @@ static void __init fw_cache_init(void) static int fw_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { - disable_firmware(); /* * Kill all pending fallback requests to avoid both stalling shutdown, * and avoid a deadlock with the usermode_lock. @@ -1909,7 +1812,6 @@ static struct notifier_block fw_shutdown_nb = { static int __init firmware_class_init(void) { - enable_firmware(); fw_cache_init(); register_reboot_notifier(&fw_shutdown_nb); #ifdef CONFIG_FW_LOADER_USER_HELPER @@ -1921,7 +1823,6 @@ static int __init firmware_class_init(void) static void __exit firmware_class_exit(void) { - disable_firmware(); #ifdef CONFIG_PM_SLEEP unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c7c4e0325cdb..4e3b61cda520 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -388,6 +388,19 @@ static ssize_t show_phys_device(struct device *dev, } #ifdef CONFIG_MEMORY_HOTREMOVE +static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, + unsigned long nr_pages, int online_type, + struct zone *default_zone) +{ + struct zone *zone; + + zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); + if (zone != default_zone) { + strcat(buf, " "); + strcat(buf, zone->name); + } +} + static ssize_t show_valid_zones(struct device *dev, struct device_attribute *attr, char *buf) { @@ -395,7 +408,7 @@ static ssize_t show_valid_zones(struct device *dev, unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long valid_start_pfn, valid_end_pfn; - bool append = false; + struct zone *default_zone; int nid; /* @@ -418,16 +431,13 @@ static ssize_t show_valid_zones(struct device *dev, } nid = pfn_to_nid(start_pfn); - if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) { - strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name); - append = true; - } + default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); + strcat(buf, default_zone->name); - if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) { - if (append) - strcat(buf, " "); - strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name); - } + print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, + default_zone); + print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, + default_zone); out: strcat(buf, "\n"); diff --git a/drivers/base/node.c b/drivers/base/node.c index d8dc83017d8d..3855902f2c5b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -160,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev, "interleave_hit %lu\n" "local_node %lu\n" "other_node %lu\n", - sum_zone_node_page_state(dev->id, NUMA_HIT), - sum_zone_node_page_state(dev->id, NUMA_MISS), - sum_zone_node_page_state(dev->id, NUMA_FOREIGN), - sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT), - sum_zone_node_page_state(dev->id, NUMA_LOCAL), - sum_zone_node_page_state(dev->id, NUMA_OTHER)); + sum_zone_numa_state(dev->id, NUMA_HIT), + sum_zone_numa_state(dev->id, NUMA_MISS), + sum_zone_numa_state(dev->id, NUMA_FOREIGN), + sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_numa_state(dev->id, NUMA_LOCAL), + sum_zone_numa_state(dev->id, NUMA_OTHER)); } static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); @@ -181,9 +181,17 @@ static ssize_t node_read_vmstat(struct device *dev, n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], sum_zone_node_page_state(nid, i)); - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) +#ifdef CONFIG_NUMA + for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], + sum_zone_numa_state(nid, i)); +#endif + + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + n += sprintf(buf+n, "%s %lu\n", + vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS], node_page_state(pgdat, i)); return n; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index d1bd99271066..9045c5f3734e 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev, struct platform_device *pdev = to_platform_device(dev); char *driver_override, *old, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 60303aa28587..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GPD_STATE_ACTIVE) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } genpd->status = GPD_STATE_POWER_OFF; + genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) goto err; genpd->status = GPD_STATE_ACTIVE; + genpd_update_accounting(genpd); + return 0; err: @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; + genpd->accounting_time = ktime_get(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; @@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); mutex_unlock(&of_genpd_mutex); - pr_debug("Added domain provider from %s\n", np->full_name); + pr_debug("Added domain provider from %pOF\n", np); return 0; } @@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } @@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ret = genpd_parse_state(&st[i++], np); if (ret) { pr_err - ("Parsing idle state node %s failed with err %d\n", - np->full_name, ret); + ("Parsing idle state node %pOF failed with err %d\n", + np, ret); of_node_put(np); kfree(st); return ret; @@ -2327,7 +2359,7 @@ static int pm_genpd_summary_one(struct seq_file *s, return 0; } -static int pm_genpd_summary_show(struct seq_file *s, void *data) +static int genpd_summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int pm_genpd_summary_open(struct inode *inode, struct file *file) +static int genpd_status_show(struct seq_file *s, void *data) { - return single_open(file, pm_genpd_summary_show, NULL); + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_POWER_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GPD_STATE_POWER_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; } -static const struct file_operations pm_genpd_summary_fops = { - .open = pm_genpd_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +static int genpd_sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->master_links, master_node) + seq_printf(s, "%s\n", link->slave->name); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms)\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %lld\n", i, msecs); + } + + genpd_unlock(genpd); + return ret; +} + +static int genpd_active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GPD_STATE_ACTIVE) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int genpd_devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +#define define_genpd_open_function(name) \ +static int genpd_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, genpd_##name##_show, inode->i_private); \ +} + +define_genpd_open_function(summary); +define_genpd_open_function(status); +define_genpd_open_function(sub_domains); +define_genpd_open_function(idle_states); +define_genpd_open_function(active_time); +define_genpd_open_function(total_idle_time); +define_genpd_open_function(devices); + +#define define_genpd_debugfs_fops(name) \ +static const struct file_operations genpd_##name##_fops = { \ + .open = genpd_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +define_genpd_debugfs_fops(summary); +define_genpd_debugfs_fops(status); +define_genpd_debugfs_fops(sub_domains); +define_genpd_debugfs_fops(idle_states); +define_genpd_debugfs_fops(active_time); +define_genpd_debugfs_fops(total_idle_time); +define_genpd_debugfs_fops(devices); static int __init pm_genpd_debug_init(void) { struct dentry *d; + struct generic_pm_domain *genpd; pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); if (!d) return -ENOMEM; + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); + if (!d) + return -ENOMEM; + + debugfs_create_file("current_state", 0444, + d, genpd, &genpd_status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &genpd_sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &genpd_idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &genpd_active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &genpd_total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &genpd_devices_fops); + } + return 0; } late_initcall(pm_genpd_debug_init); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c99f8730de82..770b1539a083 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, dev_name(dev), pm_verb(state.event), info, error); } -#ifdef CONFIG_PM_DEBUG -static void dpm_show_time(ktime_t starttime, pm_message_t state, +static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, const char *info) { ktime_t calltime; @@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, usecs = usecs64; if (usecs == 0) usecs = 1; - pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", - info ?: "", info ? " " : "", pm_verb(state.event), - usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); + + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + error ? "aborted" : "complete", + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } -#else -static inline void dpm_show_time(ktime_t starttime, pm_message_t state, - const char *info) {} -#endif /* CONFIG_PM_DEBUG */ static int dpm_run_callback(pm_callback_t cb, struct device *dev, pm_message_t state, const char *info) @@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) put_device(dev); } -/** - * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Call the "noirq" resume handlers for all devices in dpm_noirq_list and - * enable device drivers to receive interrupts. - */ -void dpm_resume_noirq(pm_message_t state) +void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "noirq"); + dpm_show_time(starttime, state, 0, "noirq"); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +void dpm_noirq_end(void) +{ resume_device_irqs(); device_wakeup_disarm_wake_irqs(); cpuidle_resume(); - trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and + * allow device drivers' interrupt handlers to be called. + */ +void dpm_resume_noirq(pm_message_t state) +{ + dpm_noirq_resume_devices(state); + dpm_noirq_end(); } /** @@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "early"); + dpm_show_time(starttime, state, 0, "early"); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } @@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, NULL); + dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); @@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore || dev->power.direct_complete) goto Complete; @@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev) return __device_suspend_noirq(dev, pm_transition, false); } -/** - * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Prevent device drivers from receiving interrupts and call the "noirq" suspend - * handlers for all non-sysdev devices. - */ -int dpm_suspend_noirq(pm_message_t state) +void dpm_noirq_begin(void) +{ + cpuidle_pause(); + device_wakeup_arm_wake_irqs(); + suspend_device_irqs(); +} + +int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); - cpuidle_pause(); - device_wakeup_arm_wake_irqs(); - suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1208,14 +1217,31 @@ int dpm_suspend_noirq(pm_message_t state) if (error) { suspend_stats.failed_suspend_noirq++; dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - dpm_resume_noirq(resume_event(state)); - } else { - dpm_show_time(starttime, state, "noirq"); } + dpm_show_time(starttime, state, error, "noirq"); trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } +/** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers' interrupt handlers from being called and invoke + * "noirq" suspend callbacks for all non-sysdev devices. + */ +int dpm_suspend_noirq(pm_message_t state) +{ + int ret; + + dpm_noirq_begin(); + ret = dpm_noirq_suspend_devices(state); + if (ret) + dpm_resume_noirq(resume_event(state)); + + return ret; +} + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state) suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - } else { - dpm_show_time(starttime, state, "late"); } + dpm_show_time(starttime, state, error, "late"); trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state) if (error) { suspend_stats.failed_suspend++; dpm_save_failed_step(SUSPEND_SUSPEND); - } else - dpm_show_time(starttime, state, NULL); + } + dpm_show_time(starttime, state, error, NULL); trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } @@ -1835,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_pm_callbacks = - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && - (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && + !dev->bus->suspend && !dev->bus->resume)) && + (!dev->class || (pm_ops_is_empty(dev->class->pm) && + !dev->class->suspend && !dev->class->resume)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && - (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && + !dev->driver->suspend && !dev->driver->resume)); spin_unlock_irq(&dev->power.lock); } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a8cc14fd8ae4..a6de32530693 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, opp->available = availability_req; + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + /* Notify the change of the OPP availability */ if (availability_req) blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, @@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); + dev_pm_opp_put(opp); + goto put_table; + unlock: mutex_unlock(&opp_table->lock); +put_table: dev_pm_opp_put_opp_table(opp_table); return r; } diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 57eec1ca0569..0b718886479b 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -/* Returns opp descriptor node for a device, caller must do of_node_put() */ -struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +/* Returns opp descriptor node for a device node, caller must + * do of_node_put() */ +static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) { /* * There should be only ONE phandle present in "operating-points-v2" * property. */ - return of_parse_phandle(dev->of_node, "operating-points-v2", 0); + return of_parse_phandle(np, "operating-points-v2", 0); +} + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +{ + return _opp_of_get_opp_desc_node(dev->of_node); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); @@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) ret = dev_pm_opp_of_add_table(cpu_dev); if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); + /* + * OPP may get registered dynamically, don't print error + * message here. + */ + pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); /* Free all other OPPs */ dev_pm_opp_of_cpumask_remove_table(cpumask); @@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np; - struct device *tcpu_dev; + struct device_node *np, *tmp_np, *cpu_np; int cpu, ret = 0; /* Get OPP descriptor node */ @@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (cpu == cpu_dev->id) continue; - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + cpu_np = of_get_cpu_node(cpu, NULL); + if (!cpu_np) { + dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENODEV; + ret = -ENOENT; goto put_cpu_node; } /* Get OPP descriptor node */ - tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); + tmp_np = _opp_of_get_opp_desc_node(cpu_np); if (!tmp_np) { - dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", - __func__); + pr_err("%pOF: Couldn't find opp node\n", cpu_np); ret = -ENOENT; goto put_cpu_node; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index f850daeffba4..277d43a83f53 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } -static bool dev_pm_qos_invalid_request(struct device *dev, - struct dev_pm_qos_request *req) +static bool dev_pm_qos_invalid_req_type(struct device *dev, + enum dev_pm_qos_req_type type) { - return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE - && !dev->power.set_latency_tolerance); + return type == DEV_PM_QOS_LATENCY_TOLERANCE && + !dev->power.set_latency_tolerance; } static int __dev_pm_qos_add_request(struct device *dev, @@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev, { int ret = 0; - if (!dev || dev_pm_qos_invalid_request(dev, req)) + if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) return -EINVAL; if (WARN(dev_pm_qos_request_active(req), diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..cdd6f256da59 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; + dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { - if (wakeup_sysfs_add(dev)) - return; + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } - dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); @@ -863,7 +865,7 @@ bool pm_wakeup_pending(void) void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); - freeze_wake(); + s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); diff --git a/drivers/base/property.c b/drivers/base/property.c index edf02c1b5845..d0b65bbe7e15 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -25,19 +25,25 @@ struct property_set { const struct property_entry *properties; }; -static inline bool is_pset_node(struct fwnode_handle *fwnode) +static const struct fwnode_operations pset_fwnode_ops; + +static inline bool is_pset_node(const struct fwnode_handle *fwnode) { - return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops; } -static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) -{ - return is_pset_node(fwnode) ? - container_of(fwnode, struct property_set, fwnode) : NULL; -} +#define to_pset_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \ + \ + is_pset_node(__to_pset_node_fwnode) ? \ + container_of(__to_pset_node_fwnode, \ + struct property_set, fwnode) : \ + NULL; \ + }) -static const struct property_entry *pset_prop_get(struct property_set *pset, - const char *name) +static const struct property_entry * +pset_prop_get(const struct property_set *pset, const char *name) { const struct property_entry *prop; @@ -51,7 +57,7 @@ static const struct property_entry *pset_prop_get(struct property_set *pset, return NULL; } -static const void *pset_prop_find(struct property_set *pset, +static const void *pset_prop_find(const struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; @@ -71,7 +77,7 @@ static const void *pset_prop_find(struct property_set *pset, return pointer; } -static int pset_prop_read_u8_array(struct property_set *pset, +static int pset_prop_read_u8_array(const struct property_set *pset, const char *propname, u8 *values, size_t nval) { @@ -86,7 +92,7 @@ static int pset_prop_read_u8_array(struct property_set *pset, return 0; } -static int pset_prop_read_u16_array(struct property_set *pset, +static int pset_prop_read_u16_array(const struct property_set *pset, const char *propname, u16 *values, size_t nval) { @@ -101,7 +107,7 @@ static int pset_prop_read_u16_array(struct property_set *pset, return 0; } -static int pset_prop_read_u32_array(struct property_set *pset, +static int pset_prop_read_u32_array(const struct property_set *pset, const char *propname, u32 *values, size_t nval) { @@ -116,7 +122,7 @@ static int pset_prop_read_u32_array(struct property_set *pset, return 0; } -static int pset_prop_read_u64_array(struct property_set *pset, +static int pset_prop_read_u64_array(const struct property_set *pset, const char *propname, u64 *values, size_t nval) { @@ -131,7 +137,7 @@ static int pset_prop_read_u64_array(struct property_set *pset, return 0; } -static int pset_prop_count_elems_of_size(struct property_set *pset, +static int pset_prop_count_elems_of_size(const struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; @@ -143,7 +149,7 @@ static int pset_prop_count_elems_of_size(struct property_set *pset, return prop->length / length; } -static int pset_prop_read_string_array(struct property_set *pset, +static int pset_prop_read_string_array(const struct property_set *pset, const char *propname, const char **strings, size_t nval) { @@ -187,18 +193,18 @@ struct fwnode_handle *dev_fwnode(struct device *dev) } EXPORT_SYMBOL_GPL(dev_fwnode); -static bool pset_fwnode_property_present(struct fwnode_handle *fwnode, +static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { return !!pset_prop_get(to_pset_node(fwnode), propname); } -static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode, +static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) { - struct property_set *node = to_pset_node(fwnode); + const struct property_set *node = to_pset_node(fwnode); if (!val) return pset_prop_count_elems_of_size(node, propname, elem_size); @@ -217,9 +223,10 @@ static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode, return -ENXIO; } -static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) +static int +pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) { return pset_prop_read_string_array(to_pset_node(fwnode), propname, val, nval); @@ -249,7 +256,8 @@ EXPORT_SYMBOL_GPL(device_property_present); * @fwnode: Firmware node whose property to check * @propname: Name of the property */ -bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) +bool fwnode_property_present(const struct fwnode_handle *fwnode, + const char *propname) { bool ret; @@ -431,7 +439,7 @@ int device_property_match_string(struct device *dev, const char *propname, } EXPORT_SYMBOL_GPL(device_property_match_string); -static int fwnode_property_read_int_array(struct fwnode_handle *fwnode, +static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) @@ -467,7 +475,7 @@ static int fwnode_property_read_int_array(struct fwnode_handle *fwnode, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u8), @@ -493,7 +501,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u16), @@ -519,7 +527,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u32), @@ -545,7 +553,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u64), @@ -571,7 +579,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_string_array(struct fwnode_handle *fwnode, +int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { @@ -603,7 +611,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); * %-EPROTO or %-EILSEQ if the property is not a string, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_string(struct fwnode_handle *fwnode, +int fwnode_property_read_string(const struct fwnode_handle *fwnode, const char *propname, const char **val) { int ret = fwnode_property_read_string_array(fwnode, propname, val, 1); @@ -627,7 +635,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string); * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_match_string(struct fwnode_handle *fwnode, +int fwnode_property_match_string(const struct fwnode_handle *fwnode, const char *propname, const char *string) { const char **values; @@ -657,6 +665,34 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(fwnode_property_match_string); +/** + * fwnode_property_get_reference_args() - Find a reference with arguments + * @fwnode: Firmware node where to look for the reference + * @prop: The name of the property + * @nargs_prop: The name of the property telling the number of + * arguments in the referred node. NULL if @nargs is known, + * otherwise @nargs is ignored. Only relevant on OF. + * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL. + * @index: Index of the reference, from zero onwards. + * @args: Result structure with reference and integer arguments. + * + * Obtain a reference based on a named property in an fwnode, with + * integer arguments. + * + * Caller is responsible to call fwnode_handle_put() on the returned + * args->fwnode pointer. + * + */ +int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, + nargs, index, args); +} +EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); + static int property_copy_string_array(struct property_entry *dst, const struct property_entry *src) { @@ -900,7 +936,6 @@ int device_add_properties(struct device *dev, if (IS_ERR(p)) return PTR_ERR(p); - p->fwnode.type = FWNODE_PDATA; p->fwnode.ops = &pset_fwnode_ops; set_secondary_fwnode(dev, &p->fwnode); return 0; @@ -935,7 +970,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent); * Return parent firmware node of the given node if possible or %NULL if no * parent was available. */ -struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode) +struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_parent); } @@ -946,8 +981,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent); * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. */ -struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode, - struct fwnode_handle *child) +struct fwnode_handle * +fwnode_get_next_child_node(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) { return fwnode_call_ptr_op(fwnode, get_next_child_node, child); } @@ -978,8 +1014,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node); * @fwnode: Firmware node to find the named child node for. * @childname: String to match child node name against. */ -struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode, - const char *childname) +struct fwnode_handle * +fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) { return fwnode_call_ptr_op(fwnode, get_named_child_node, childname); } @@ -1025,7 +1062,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. */ -bool fwnode_device_is_available(struct fwnode_handle *fwnode) +bool fwnode_device_is_available(const struct fwnode_handle *fwnode) { return fwnode_call_bool_op(fwnode, device_is_available); } @@ -1163,7 +1200,7 @@ EXPORT_SYMBOL(device_get_mac_address); * are available. */ struct fwnode_handle * -fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, +fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev); @@ -1177,7 +1214,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); * Return: the firmware node of the device the @endpoint belongs to. */ struct fwnode_handle * -fwnode_graph_get_port_parent(struct fwnode_handle *endpoint) +fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint) { struct fwnode_handle *port, *parent; @@ -1197,7 +1234,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); * Extracts firmware node of a remote device the @fwnode points to. */ struct fwnode_handle * -fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode) +fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint, *parent; @@ -1216,7 +1253,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); * * Extracts firmware node of a remote port the @fwnode points to. */ -struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode) +struct fwnode_handle * +fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode) { return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode)); } @@ -1229,7 +1267,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); * Extracts firmware node of a remote endpoint the @fwnode points to. */ struct fwnode_handle * -fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint); } @@ -1244,8 +1282,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); * Return: Remote fwnode handle associated with remote endpoint node linked * to @node. Use fwnode_node_put() on it when done. */ -struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode, - u32 port_id, u32 endpoint_id) +struct fwnode_handle * +fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id, + u32 endpoint_id) { struct fwnode_handle *endpoint = NULL; @@ -1281,7 +1320,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node); * information in @endpoint. The caller must hold a reference to * @fwnode. */ -int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, +int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { memset(endpoint, 0, sizeof(*endpoint)); diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index b5c48a8d485f..54f81c554815 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -3,11 +3,8 @@ config BCMA_POSSIBLE depends on HAS_IOMEM && HAS_DMA default y -menu "Broadcom specific AMBA" - depends on BCMA_POSSIBLE - -config BCMA - tristate "BCMA support" +menuconfig BCMA + tristate "Broadcom specific AMBA" depends on BCMA_POSSIBLE help Bus driver for Broadcom specific Advanced Microcontroller Bus @@ -117,5 +114,3 @@ config BCMA_DEBUG This turns on additional debugging messages. If unsure, say N - -endmenu diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 7bde8d7a2816..982d5781d3ce 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM53573: case BCMA_CHIP_ID_BCM47094: chip->ngpio = 32; break; diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 245a879b036e..255591ab3716 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1678,9 +1678,12 @@ static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T Enquiry2->FirmwareID.FirmwareType = '0'; Enquiry2->FirmwareID.TurnID = 0; } - sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d", - Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion, - Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID); + snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion), + "%d.%02d-%c-%02d", + Enquiry2->FirmwareID.MajorVersion, + Enquiry2->FirmwareID.MinorVersion, + Enquiry2->FirmwareID.FirmwareType, + Enquiry2->FirmwareID.TurnID); if (!((Controller->FirmwareVersion[0] == '5' && strcmp(Controller->FirmwareVersion, "5.06") >= 0) || (Controller->FirmwareVersion[0] == '4' && @@ -6588,7 +6591,8 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) &dac960_proc_fops); } - sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); + snprintf(Controller->ControllerName, sizeof(Controller->ControllerName), + "c%d", Controller->ControllerNumber); ControllerProcEntry = proc_mkdir(Controller->ControllerName, DAC960_ProcDirectoryEntry); proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 80aaf3420e12..2dfe99b328f8 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -17,6 +17,7 @@ if BLK_DEV config BLK_DEV_NULL_BLK tristate "Null test block driver" + select CONFIGFS_FS config BLK_DEV_FD tristate "Normal floppy disk support" @@ -111,33 +112,6 @@ source "drivers/block/mtip32xx/Kconfig" source "drivers/block/zram/Kconfig" -config BLK_CPQ_CISS_DA - tristate "Compaq Smart Array 5xxx support" - depends on PCI - select CHECK_SIGNATURE - select BLK_SCSI_REQUEST - help - This is the driver for Compaq Smart Array 5xxx controllers. - Everyone using these boards should say Y here. - See for the current list of - boards supported by this driver, and for further information - on the use of this driver. - -config CISS_SCSI_TAPE - bool "SCSI tape drive support for Smart Array 5xxx" - depends on BLK_CPQ_CISS_DA && PROC_FS - depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA - help - When enabled (Y), this option allows SCSI tape drives and SCSI medium - changers (tape robots) to be accessed via a Compaq 5xxx array - controller. (See for more details.) - - "SCSI support" and "SCSI tape support" must also be enabled for this - option to work. - - When this option is disabled (N), the SCSI portion of the driver - is not compiled. - config BLK_DEV_DAC960 tristate "Mylex DAC960/DAC1100 PCI RAID Controller support" depends on PCI diff --git a/drivers/block/Makefile b/drivers/block/Makefile index ec8c36897b75..1f456d86a190 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o -obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o obj-$(CONFIG_XILINX_SYSACE) += xsysace.o obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 104b71c0490d..2d7178f7754e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -294,14 +294,13 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) { - struct block_device *bdev = bio->bi_bdev; - struct brd_device *brd = bdev->bd_disk->private_data; + struct brd_device *brd = bio->bi_disk->private_data; struct bio_vec bvec; sector_t sector; struct bvec_iter iter; sector = bio->bi_iter.bi_sector; - if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) goto io_error; bio_for_each_segment(bvec, bio, iter) { @@ -326,7 +325,11 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, struct page *page, bool is_write) { struct brd_device *brd = bdev->bd_disk->private_data; - int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector); + int err; + + if (PageTransHuge(page)) + return -ENOTSUPP; + err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector); page_endio(page, is_write, err); return err; } @@ -339,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, if (!brd) return -ENODEV; - page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); + page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT); if (!page) return -ENOSPC; *kaddr = page_address(page); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c deleted file mode 100644 index 678af946be30..000000000000 --- a/drivers/block/cciss.c +++ /dev/null @@ -1,5415 +0,0 @@ -/* - * Disk Array driver for HP Smart Array controllers. - * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - * 02111-1307, USA. - * - * Questions/Comments/Bugfixes to iss_storagedev@hp.com - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) -#define DRIVER_NAME "HP CISS Driver (v 3.6.26)" -#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) - -/* Embedded module documentation macros - see modules.h */ -MODULE_AUTHOR("Hewlett-Packard Company"); -MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); -MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); -MODULE_VERSION("3.6.26"); -MODULE_LICENSE("GPL"); -static int cciss_tape_cmds = 6; -module_param(cciss_tape_cmds, int, 0644); -MODULE_PARM_DESC(cciss_tape_cmds, - "number of commands to allocate for tape devices (default: 6)"); -static int cciss_simple_mode; -module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(cciss_simple_mode, - "Use 'simple mode' rather than 'performant mode'"); - -static int cciss_allow_hpsa; -module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(cciss_allow_hpsa, - "Prevent cciss driver from accessing hardware known to be " - " supported by the hpsa driver"); - -static DEFINE_MUTEX(cciss_mutex); -static struct proc_dir_entry *proc_cciss; - -#include "cciss_cmd.h" -#include "cciss.h" -#include - -/* define the PCI info for the cards we can control */ -static const struct pci_device_id cciss_pci_device_id[] = { - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, - {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, - {0,} -}; - -MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); - -/* board_id = Subsystem Device ID & Vendor ID - * product = Marketing Name for the board - * access = Address of the struct of function pointers - */ -static struct board_type products[] = { - {0x40700E11, "Smart Array 5300", &SA5_access}, - {0x40800E11, "Smart Array 5i", &SA5B_access}, - {0x40820E11, "Smart Array 532", &SA5B_access}, - {0x40830E11, "Smart Array 5312", &SA5B_access}, - {0x409A0E11, "Smart Array 641", &SA5_access}, - {0x409B0E11, "Smart Array 642", &SA5_access}, - {0x409C0E11, "Smart Array 6400", &SA5_access}, - {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, - {0x40910E11, "Smart Array 6i", &SA5_access}, - {0x3225103C, "Smart Array P600", &SA5_access}, - {0x3223103C, "Smart Array P800", &SA5_access}, - {0x3234103C, "Smart Array P400", &SA5_access}, - {0x3235103C, "Smart Array P400i", &SA5_access}, - {0x3211103C, "Smart Array E200i", &SA5_access}, - {0x3212103C, "Smart Array E200", &SA5_access}, - {0x3213103C, "Smart Array E200i", &SA5_access}, - {0x3214103C, "Smart Array E200i", &SA5_access}, - {0x3215103C, "Smart Array E200i", &SA5_access}, - {0x3237103C, "Smart Array E500", &SA5_access}, - {0x323D103C, "Smart Array P700m", &SA5_access}, -}; - -/* How long to wait (in milliseconds) for board to go into simple mode */ -#define MAX_CONFIG_WAIT 30000 -#define MAX_IOCTL_CONFIG_WAIT 1000 - -/*define how many times we will try a command because of bus resets */ -#define MAX_CMD_RETRIES 3 - -#define MAX_CTLR 32 - -/* Originally cciss driver only supports 8 major numbers */ -#define MAX_CTLR_ORIG 8 - -static ctlr_info_t *hba[MAX_CTLR]; - -static struct task_struct *cciss_scan_thread; -static DEFINE_MUTEX(scan_mutex); -static LIST_HEAD(scan_q); - -static void do_cciss_request(struct request_queue *q); -static irqreturn_t do_cciss_intx(int irq, void *dev_id); -static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); -static int cciss_open(struct block_device *bdev, fmode_t mode); -static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); -static void cciss_release(struct gendisk *disk, fmode_t mode); -static int cciss_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg); -static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); - -static int cciss_revalidate(struct gendisk *disk); -static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); -static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all, int via_ioctl); - -static void cciss_read_capacity(ctlr_info_t *h, int logvol, - sector_t *total_size, unsigned int *block_size); -static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, - sector_t *total_size, unsigned int *block_size); -static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, - sector_t total_size, - unsigned int block_size, InquiryData_struct *inq_buff, - drive_info_struct *drv); -static void cciss_interrupt_mode(ctlr_info_t *); -static int cciss_enter_simple_mode(struct ctlr_info *h); -static void start_io(ctlr_info_t *h); -static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, - __u8 page_code, unsigned char scsi3addr[], - int cmd_type); -static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, - int attempt_retry); -static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); - -static int add_to_scan_list(struct ctlr_info *h); -static int scan_thread(void *data); -static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); -static void cciss_hba_release(struct device *dev); -static void cciss_device_release(struct device *dev); -static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); -static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); -static inline u32 next_command(ctlr_info_t *h); -static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, - u32 *cfg_base_addr, u64 *cfg_base_addr_index, - u64 *cfg_offset); -static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, - unsigned long *memory_bar); -static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); -static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable); - -/* performant mode helper functions */ -static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, - int *bucket_map); -static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); - -#ifdef CONFIG_PROC_FS -static void cciss_procinit(ctlr_info_t *h); -#else -static void cciss_procinit(ctlr_info_t *h) -{ -} -#endif /* CONFIG_PROC_FS */ - -#ifdef CONFIG_COMPAT -static int cciss_compat_ioctl(struct block_device *, fmode_t, - unsigned, unsigned long); -#endif - -static const struct block_device_operations cciss_fops = { - .owner = THIS_MODULE, - .open = cciss_unlocked_open, - .release = cciss_release, - .ioctl = cciss_ioctl, - .getgeo = cciss_getgeo, -#ifdef CONFIG_COMPAT - .compat_ioctl = cciss_compat_ioctl, -#endif - .revalidate_disk = cciss_revalidate, -}; - -/* set_performant_mode: Modify the tag for cciss performant - * set bit 0 for pull model, bits 3-1 for block fetch - * register number - */ -static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) -{ - if (likely(h->transMethod & CFGTBL_Trans_Performant)) - c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); -} - -/* - * Enqueuing and dequeuing functions for cmdlists. - */ -static inline void addQ(struct list_head *list, CommandList_struct *c) -{ - list_add_tail(&c->list, list); -} - -static inline void removeQ(CommandList_struct *c) -{ - /* - * After kexec/dump some commands might still - * be in flight, which the firmware will try - * to complete. Resetting the firmware doesn't work - * with old fw revisions, so we have to mark - * them off as 'stale' to prevent the driver from - * falling over. - */ - if (WARN_ON(list_empty(&c->list))) { - c->cmd_type = CMD_MSG_STALE; - return; - } - - list_del_init(&c->list); -} - -static void enqueue_cmd_and_start_io(ctlr_info_t *h, - CommandList_struct *c) -{ - unsigned long flags; - set_performant_mode(h, c); - spin_lock_irqsave(&h->lock, flags); - addQ(&h->reqQ, c); - h->Qdepth++; - if (h->Qdepth > h->maxQsinceinit) - h->maxQsinceinit = h->Qdepth; - start_io(h); - spin_unlock_irqrestore(&h->lock, flags); -} - -static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, - int nr_cmds) -{ - int i; - - if (!cmd_sg_list) - return; - for (i = 0; i < nr_cmds; i++) { - kfree(cmd_sg_list[i]); - cmd_sg_list[i] = NULL; - } - kfree(cmd_sg_list); -} - -static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( - ctlr_info_t *h, int chainsize, int nr_cmds) -{ - int j; - SGDescriptor_struct **cmd_sg_list; - - if (chainsize <= 0) - return NULL; - - cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); - if (!cmd_sg_list) - return NULL; - - /* Build up chain blocks for each command */ - for (j = 0; j < nr_cmds; j++) { - /* Need a block of chainsized s/g elements. */ - cmd_sg_list[j] = kmalloc((chainsize * - sizeof(*cmd_sg_list[j])), GFP_KERNEL); - if (!cmd_sg_list[j]) { - dev_err(&h->pdev->dev, "Cannot get memory " - "for s/g chains.\n"); - goto clean; - } - } - return cmd_sg_list; -clean: - cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); - return NULL; -} - -static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) -{ - SGDescriptor_struct *chain_sg; - u64bit temp64; - - if (c->Header.SGTotal <= h->max_cmd_sgentries) - return; - - chain_sg = &c->SG[h->max_cmd_sgentries - 1]; - temp64.val32.lower = chain_sg->Addr.lower; - temp64.val32.upper = chain_sg->Addr.upper; - pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); -} - -static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, - SGDescriptor_struct *chain_block, int len) -{ - SGDescriptor_struct *chain_sg; - u64bit temp64; - - chain_sg = &c->SG[h->max_cmd_sgentries - 1]; - chain_sg->Ext = CCISS_SG_CHAIN; - chain_sg->Len = len; - temp64.val = pci_map_single(h->pdev, chain_block, len, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - dev_warn(&h->pdev->dev, - "%s: error mapping chain block for DMA\n", - __func__); - return -1; - } - chain_sg->Addr.lower = temp64.val32.lower; - chain_sg->Addr.upper = temp64.val32.upper; - - return 0; -} - -#include "cciss_scsi.c" /* For SCSI tape support */ - -static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", - "UNKNOWN" -}; -#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) - -#ifdef CONFIG_PROC_FS - -/* - * Report information about this controller. - */ -#define ENG_GIG 1000000000 -#define ENG_GIG_FACTOR (ENG_GIG/512) -#define ENGAGE_SCSI "engage scsi" - -static void cciss_seq_show_header(struct seq_file *seq) -{ - ctlr_info_t *h = seq->private; - - seq_printf(seq, "%s: HP %s Controller\n" - "Board ID: 0x%08lx\n" - "Firmware Version: %c%c%c%c\n" - "IRQ: %d\n" - "Logical drives: %d\n" - "Current Q depth: %d\n" - "Current # commands on controller: %d\n" - "Max Q depth since init: %d\n" - "Max # commands on controller since init: %d\n" - "Max SG entries since init: %d\n", - h->devname, - h->product_name, - (unsigned long)h->board_id, - h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], - h->firm_ver[3], (unsigned int)h->intr[h->intr_mode], - h->num_luns, - h->Qdepth, h->commands_outstanding, - h->maxQsinceinit, h->max_outstanding, h->maxSG); - -#ifdef CONFIG_CISS_SCSI_TAPE - cciss_seq_tape_report(seq, h); -#endif /* CONFIG_CISS_SCSI_TAPE */ -} - -static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) -{ - ctlr_info_t *h = seq->private; - unsigned long flags; - - /* prevent displaying bogus info during configuration - * or deconfiguration of a logical volume - */ - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(&h->lock, flags); - return ERR_PTR(-EBUSY); - } - h->busy_configuring = 1; - spin_unlock_irqrestore(&h->lock, flags); - - if (*pos == 0) - cciss_seq_show_header(seq); - - return pos; -} - -static int cciss_seq_show(struct seq_file *seq, void *v) -{ - sector_t vol_sz, vol_sz_frac; - ctlr_info_t *h = seq->private; - unsigned ctlr = h->ctlr; - loff_t *pos = v; - drive_info_struct *drv = h->drv[*pos]; - - if (*pos > h->highest_lun) - return 0; - - if (drv == NULL) /* it's possible for h->drv[] to have holes. */ - return 0; - - if (drv->heads == 0) - return 0; - - vol_sz = drv->nr_blocks; - vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); - vol_sz_frac *= 100; - sector_div(vol_sz_frac, ENG_GIG_FACTOR); - - if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) - drv->raid_level = RAID_UNKNOWN; - seq_printf(seq, "cciss/c%dd%d:" - "\t%4u.%02uGB\tRAID %s\n", - ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, - raid_label[drv->raid_level]); - return 0; -} - -static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - ctlr_info_t *h = seq->private; - - if (*pos > h->highest_lun) - return NULL; - *pos += 1; - - return pos; -} - -static void cciss_seq_stop(struct seq_file *seq, void *v) -{ - ctlr_info_t *h = seq->private; - - /* Only reset h->busy_configuring if we succeeded in setting - * it during cciss_seq_start. */ - if (v == ERR_PTR(-EBUSY)) - return; - - h->busy_configuring = 0; -} - -static const struct seq_operations cciss_seq_ops = { - .start = cciss_seq_start, - .show = cciss_seq_show, - .next = cciss_seq_next, - .stop = cciss_seq_stop, -}; - -static int cciss_seq_open(struct inode *inode, struct file *file) -{ - int ret = seq_open(file, &cciss_seq_ops); - struct seq_file *seq = file->private_data; - - if (!ret) - seq->private = PDE_DATA(inode); - - return ret; -} - -static ssize_t -cciss_proc_write(struct file *file, const char __user *buf, - size_t length, loff_t *ppos) -{ - int err; - char *buffer; - -#ifndef CONFIG_CISS_SCSI_TAPE - return -EINVAL; -#endif - - if (!buf || length > PAGE_SIZE - 1) - return -EINVAL; - - buffer = memdup_user_nul(buf, length); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - -#ifdef CONFIG_CISS_SCSI_TAPE - if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { - struct seq_file *seq = file->private_data; - ctlr_info_t *h = seq->private; - - err = cciss_engage_scsi(h); - if (err == 0) - err = length; - } else -#endif /* CONFIG_CISS_SCSI_TAPE */ - err = -EINVAL; - /* might be nice to have "disengage" too, but it's not - safely possible. (only 1 module use count, lock issues.) */ - - kfree(buffer); - return err; -} - -static const struct file_operations cciss_proc_fops = { - .owner = THIS_MODULE, - .open = cciss_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, - .write = cciss_proc_write, -}; - -static void cciss_procinit(ctlr_info_t *h) -{ - struct proc_dir_entry *pde; - - if (proc_cciss == NULL) - proc_cciss = proc_mkdir("driver/cciss", NULL); - if (!proc_cciss) - return; - pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | - S_IROTH, proc_cciss, - &cciss_proc_fops, h); -} -#endif /* CONFIG_PROC_FS */ - -#define MAX_PRODUCT_NAME_LEN 19 - -#define to_hba(n) container_of(n, struct ctlr_info, dev) -#define to_drv(n) container_of(n, drive_info_struct, dev) - -/* List of controllers which cannot be hard reset on kexec with reset_devices */ -static u32 unresettable_controller[] = { - 0x3223103C, /* Smart Array P800 */ - 0x3234103C, /* Smart Array P400 */ - 0x3235103C, /* Smart Array P400i */ - 0x3211103C, /* Smart Array E200i */ - 0x3212103C, /* Smart Array E200 */ - 0x3213103C, /* Smart Array E200i */ - 0x3214103C, /* Smart Array E200i */ - 0x3215103C, /* Smart Array E200i */ - 0x3237103C, /* Smart Array E500 */ - 0x323D103C, /* Smart Array P700m */ - 0x40800E11, /* Smart Array 5i */ - 0x409C0E11, /* Smart Array 6400 */ - 0x409D0E11, /* Smart Array 6400 EM */ - 0x40700E11, /* Smart Array 5300 */ - 0x40820E11, /* Smart Array 532 */ - 0x40830E11, /* Smart Array 5312 */ - 0x409A0E11, /* Smart Array 641 */ - 0x409B0E11, /* Smart Array 642 */ - 0x40910E11, /* Smart Array 6i */ -}; - -/* List of controllers which cannot even be soft reset */ -static u32 soft_unresettable_controller[] = { - 0x40800E11, /* Smart Array 5i */ - 0x40700E11, /* Smart Array 5300 */ - 0x40820E11, /* Smart Array 532 */ - 0x40830E11, /* Smart Array 5312 */ - 0x409A0E11, /* Smart Array 641 */ - 0x409B0E11, /* Smart Array 642 */ - 0x40910E11, /* Smart Array 6i */ - /* Exclude 640x boards. These are two pci devices in one slot - * which share a battery backed cache module. One controls the - * cache, the other accesses the cache through the one that controls - * it. If we reset the one controlling the cache, the other will - * likely not be happy. Just forbid resetting this conjoined mess. - */ - 0x409C0E11, /* Smart Array 6400 */ - 0x409D0E11, /* Smart Array 6400 EM */ -}; - -static int ctlr_is_hard_resettable(u32 board_id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == board_id) - return 0; - return 1; -} - -static int ctlr_is_soft_resettable(u32 board_id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) - if (soft_unresettable_controller[i] == board_id) - return 0; - return 1; -} - -static int ctlr_is_resettable(u32 board_id) -{ - return ctlr_is_hard_resettable(board_id) || - ctlr_is_soft_resettable(board_id); -} - -static ssize_t host_show_resettable(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct ctlr_info *h = to_hba(dev); - - return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); -} -static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); - -static ssize_t host_store_rescan(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ctlr_info *h = to_hba(dev); - - add_to_scan_list(h); - wake_up_process(cciss_scan_thread); - wait_for_completion_interruptible(&h->scan_wait); - - return count; -} -static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); - -static ssize_t host_show_transport_mode(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct ctlr_info *h = to_hba(dev); - - return snprintf(buf, 20, "%s\n", - h->transMethod & CFGTBL_Trans_Performant ? - "performant" : "simple"); -} -static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); - -static ssize_t dev_show_unique_id(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - __u8 sn[16]; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) - ret = -EBUSY; - else - memcpy(sn, drv->serial_no, sizeof(sn)); - spin_unlock_irqrestore(&h->lock, flags); - - if (ret) - return ret; - else - return snprintf(buf, 16 * 2 + 2, - "%02X%02X%02X%02X%02X%02X%02X%02X" - "%02X%02X%02X%02X%02X%02X%02X%02X\n", - sn[0], sn[1], sn[2], sn[3], - sn[4], sn[5], sn[6], sn[7], - sn[8], sn[9], sn[10], sn[11], - sn[12], sn[13], sn[14], sn[15]); -} -static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); - -static ssize_t dev_show_vendor(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - char vendor[VENDOR_LEN + 1]; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) - ret = -EBUSY; - else - memcpy(vendor, drv->vendor, VENDOR_LEN + 1); - spin_unlock_irqrestore(&h->lock, flags); - - if (ret) - return ret; - else - return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); -} -static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); - -static ssize_t dev_show_model(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - char model[MODEL_LEN + 1]; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) - ret = -EBUSY; - else - memcpy(model, drv->model, MODEL_LEN + 1); - spin_unlock_irqrestore(&h->lock, flags); - - if (ret) - return ret; - else - return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); -} -static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); - -static ssize_t dev_show_rev(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - char rev[REV_LEN + 1]; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) - ret = -EBUSY; - else - memcpy(rev, drv->rev, REV_LEN + 1); - spin_unlock_irqrestore(&h->lock, flags); - - if (ret) - return ret; - else - return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); -} -static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); - -static ssize_t cciss_show_lunid(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - unsigned long flags; - unsigned char lunid[8]; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(&h->lock, flags); - return -EBUSY; - } - if (!drv->heads) { - spin_unlock_irqrestore(&h->lock, flags); - return -ENOTTY; - } - memcpy(lunid, drv->LunID, sizeof(lunid)); - spin_unlock_irqrestore(&h->lock, flags); - return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - lunid[0], lunid[1], lunid[2], lunid[3], - lunid[4], lunid[5], lunid[6], lunid[7]); -} -static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); - -static ssize_t cciss_show_raid_level(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - int raid; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(&h->lock, flags); - return -EBUSY; - } - raid = drv->raid_level; - spin_unlock_irqrestore(&h->lock, flags); - if (raid < 0 || raid > RAID_UNKNOWN) - raid = RAID_UNKNOWN; - - return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", - raid_label[raid]); -} -static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); - -static ssize_t cciss_show_usage_count(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - unsigned long flags; - int count; - - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(&h->lock, flags); - return -EBUSY; - } - count = drv->usage_count; - spin_unlock_irqrestore(&h->lock, flags); - return snprintf(buf, 20, "%d\n", count); -} -static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); - -static struct attribute *cciss_host_attrs[] = { - &dev_attr_rescan.attr, - &dev_attr_resettable.attr, - &dev_attr_transport_mode.attr, - NULL -}; - -static struct attribute_group cciss_host_attr_group = { - .attrs = cciss_host_attrs, -}; - -static const struct attribute_group *cciss_host_attr_groups[] = { - &cciss_host_attr_group, - NULL -}; - -static struct device_type cciss_host_type = { - .name = "cciss_host", - .groups = cciss_host_attr_groups, - .release = cciss_hba_release, -}; - -static struct attribute *cciss_dev_attrs[] = { - &dev_attr_unique_id.attr, - &dev_attr_model.attr, - &dev_attr_vendor.attr, - &dev_attr_rev.attr, - &dev_attr_lunid.attr, - &dev_attr_raid_level.attr, - &dev_attr_usage_count.attr, - NULL -}; - -static struct attribute_group cciss_dev_attr_group = { - .attrs = cciss_dev_attrs, -}; - -static const struct attribute_group *cciss_dev_attr_groups[] = { - &cciss_dev_attr_group, - NULL -}; - -static struct device_type cciss_dev_type = { - .name = "cciss_device", - .groups = cciss_dev_attr_groups, - .release = cciss_device_release, -}; - -static struct bus_type cciss_bus_type = { - .name = "cciss", -}; - -/* - * cciss_hba_release is called when the reference count - * of h->dev goes to zero. - */ -static void cciss_hba_release(struct device *dev) -{ - /* - * nothing to do, but need this to avoid a warning - * about not having a release handler from lib/kref.c. - */ -} - -/* - * Initialize sysfs entry for each controller. This sets up and registers - * the 'cciss#' directory for each individual controller under - * /sys/bus/pci/devices//. - */ -static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) -{ - device_initialize(&h->dev); - h->dev.type = &cciss_host_type; - h->dev.bus = &cciss_bus_type; - dev_set_name(&h->dev, "%s", h->devname); - h->dev.parent = &h->pdev->dev; - - return device_add(&h->dev); -} - -/* - * Remove sysfs entries for an hba. - */ -static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) -{ - device_del(&h->dev); - put_device(&h->dev); /* final put. */ -} - -/* cciss_device_release is called when the reference count - * of h->drv[x]dev goes to zero. - */ -static void cciss_device_release(struct device *dev) -{ - drive_info_struct *drv = to_drv(dev); - kfree(drv); -} - -/* - * Initialize sysfs for each logical drive. This sets up and registers - * the 'c#d#' directory for each individual logical drive under - * /sys/bus/pci/devices/drv[drv_index]->device_initialized) - return 0; - - dev = &h->drv[drv_index]->dev; - device_initialize(dev); - dev->type = &cciss_dev_type; - dev->bus = &cciss_bus_type; - dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); - dev->parent = &h->dev; - h->drv[drv_index]->device_initialized = 1; - return device_add(dev); -} - -/* - * Remove sysfs entries for a logical drive. - */ -static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, - int ctlr_exiting) -{ - struct device *dev = &h->drv[drv_index]->dev; - - /* special case for c*d0, we only destroy it on controller exit */ - if (drv_index == 0 && !ctlr_exiting) - return; - - device_del(dev); - put_device(dev); /* the "final" put. */ - h->drv[drv_index] = NULL; -} - -/* - * For operations that cannot sleep, a command block is allocated at init, - * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track - * which ones are free or in use. - */ -static CommandList_struct *cmd_alloc(ctlr_info_t *h) -{ - CommandList_struct *c; - int i; - u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; - - do { - i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); - if (i == h->nr_cmds) - return NULL; - } while (test_and_set_bit(i, h->cmd_pool_bits) != 0); - c = h->cmd_pool + i; - memset(c, 0, sizeof(CommandList_struct)); - cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); - c->err_info = h->errinfo_pool + i; - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); - err_dma_handle = h->errinfo_pool_dhandle - + i * sizeof(ErrorInfo_struct); - h->nr_allocs++; - - c->cmdindex = i; - - INIT_LIST_HEAD(&c->list); - c->busaddr = (__u32) cmd_dma_handle; - temp64.val = (__u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(ErrorInfo_struct); - - c->ctlr = h->ctlr; - return c; -} - -/* allocate a command using pci_alloc_consistent, used for ioctls, - * etc., not for the main i/o path. - */ -static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) -{ - CommandList_struct *c; - u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; - - c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct), - &cmd_dma_handle); - if (c == NULL) - return NULL; - - c->cmdindex = -1; - - c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct), - &err_dma_handle); - - if (c->err_info == NULL) { - pci_free_consistent(h->pdev, - sizeof(CommandList_struct), c, cmd_dma_handle); - return NULL; - } - - INIT_LIST_HEAD(&c->list); - c->busaddr = (__u32) cmd_dma_handle; - temp64.val = (__u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(ErrorInfo_struct); - - c->ctlr = h->ctlr; - return c; -} - -static void cmd_free(ctlr_info_t *h, CommandList_struct *c) -{ - int i; - - i = c - h->cmd_pool; - clear_bit(i, h->cmd_pool_bits); - h->nr_frees++; -} - -static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) -{ - u64bit temp64; - - temp64.val32.lower = c->ErrDesc.Addr.lower; - temp64.val32.upper = c->ErrDesc.Addr.upper; - pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), - c->err_info, (dma_addr_t) temp64.val); - pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, - (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); -} - -static inline ctlr_info_t *get_host(struct gendisk *disk) -{ - return disk->queue->queuedata; -} - -static inline drive_info_struct *get_drv(struct gendisk *disk) -{ - return disk->private_data; -} - -/* - * Open. Make sure the device is really there. - */ -static int cciss_open(struct block_device *bdev, fmode_t mode) -{ - ctlr_info_t *h = get_host(bdev->bd_disk); - drive_info_struct *drv = get_drv(bdev->bd_disk); - - dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); - if (drv->busy_configuring) - return -EBUSY; - /* - * Root is allowed to open raw volume zero even if it's not configured - * so array config can still work. Root is also allowed to open any - * volume that has a LUN ID, so it can issue IOCTL to reread the - * disk information. I don't think I really like this - * but I'm already using way to many device nodes to claim another one - * for "raw controller". - */ - if (drv->heads == 0) { - if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ - /* if not node 0 make sure it is a partition = 0 */ - if (MINOR(bdev->bd_dev) & 0x0f) { - return -ENXIO; - /* if it is, make sure we have a LUN ID */ - } else if (memcmp(drv->LunID, CTLR_LUNID, - sizeof(drv->LunID))) { - return -ENXIO; - } - } - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - } - drv->usage_count++; - h->usage_count++; - return 0; -} - -static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) -{ - int ret; - - mutex_lock(&cciss_mutex); - ret = cciss_open(bdev, mode); - mutex_unlock(&cciss_mutex); - - return ret; -} - -/* - * Close. Sync first. - */ -static void cciss_release(struct gendisk *disk, fmode_t mode) -{ - ctlr_info_t *h; - drive_info_struct *drv; - - mutex_lock(&cciss_mutex); - h = get_host(disk); - drv = get_drv(disk); - dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); - drv->usage_count--; - h->usage_count--; - mutex_unlock(&cciss_mutex); -} - -#ifdef CONFIG_COMPAT - -static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg); -static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg); - -static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg) -{ - switch (cmd) { - case CCISS_GETPCIINFO: - case CCISS_GETINTINFO: - case CCISS_SETINTINFO: - case CCISS_GETNODENAME: - case CCISS_SETNODENAME: - case CCISS_GETHEARTBEAT: - case CCISS_GETBUSTYPES: - case CCISS_GETFIRMVER: - case CCISS_GETDRIVVER: - case CCISS_REVALIDVOLS: - case CCISS_DEREGDISK: - case CCISS_REGNEWDISK: - case CCISS_REGNEWD: - case CCISS_RESCANDISK: - case CCISS_GETLUNINFO: - return cciss_ioctl(bdev, mode, cmd, arg); - - case CCISS_PASSTHRU32: - return cciss_ioctl32_passthru(bdev, mode, cmd, arg); - case CCISS_BIG_PASSTHRU32: - return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); - - default: - return -ENOIOCTLCMD; - } -} - -static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg) -{ - IOCTL32_Command_struct __user *arg32 = - (IOCTL32_Command_struct __user *) arg; - IOCTL_Command_struct arg64; - IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); - int err; - u32 cp; - - memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= - copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= - copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= - copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); - - if (err) - return -EFAULT; - - err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); - if (err) - return err; - err |= - copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) - return -EFAULT; - return err; -} - -static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg) -{ - BIG_IOCTL32_Command_struct __user *arg32 = - (BIG_IOCTL32_Command_struct __user *) arg; - BIG_IOCTL_Command_struct arg64; - BIG_IOCTL_Command_struct __user *p = - compat_alloc_user_space(sizeof(arg64)); - int err; - u32 cp; - - memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= - copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= - copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= - copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(arg64.malloc_size, &arg32->malloc_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); - - if (err) - return -EFAULT; - - err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); - if (err) - return err; - err |= - copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) - return -EFAULT; - return err; -} -#endif - -static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - drive_info_struct *drv = get_drv(bdev->bd_disk); - - if (!drv->cylinders) - return -ENXIO; - - geo->heads = drv->heads; - geo->sectors = drv->sectors; - geo->cylinders = drv->cylinders; - return 0; -} - -static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) -{ - if (c->err_info->CommandStatus == CMD_TARGET_STATUS && - c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) - (void)check_for_unit_attention(h, c); -} - -static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) -{ - cciss_pci_info_struct pciinfo; - - if (!argp) - return -EINVAL; - pciinfo.domain = pci_domain_nr(h->pdev->bus); - pciinfo.bus = h->pdev->bus->number; - pciinfo.dev_fn = h->pdev->devfn; - pciinfo.board_id = h->board_id; - if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) - return -EFAULT; - return 0; -} - -static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) -{ - cciss_coalint_struct intinfo; - unsigned long flags; - - if (!argp) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); - intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); - spin_unlock_irqrestore(&h->lock, flags); - if (copy_to_user - (argp, &intinfo, sizeof(cciss_coalint_struct))) - return -EFAULT; - return 0; -} - -static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) -{ - cciss_coalint_struct intinfo; - unsigned long flags; - int i; - - if (!argp) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&intinfo, argp, sizeof(intinfo))) - return -EFAULT; - if ((intinfo.delay == 0) && (intinfo.count == 0)) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - /* Update the field, and then ring the doorbell */ - writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); - writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - - for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) - break; - udelay(1000); /* delay and try again */ - } - spin_unlock_irqrestore(&h->lock, flags); - if (i >= MAX_IOCTL_CONFIG_WAIT) - return -EAGAIN; - return 0; -} - -static int cciss_getnodename(ctlr_info_t *h, void __user *argp) -{ - NodeName_type NodeName; - unsigned long flags; - int i; - - if (!argp) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - for (i = 0; i < 16; i++) - NodeName[i] = readb(&h->cfgtable->ServerName[i]); - spin_unlock_irqrestore(&h->lock, flags); - if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) - return -EFAULT; - return 0; -} - -static int cciss_setnodename(ctlr_info_t *h, void __user *argp) -{ - NodeName_type NodeName; - unsigned long flags; - int i; - - if (!argp) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) - return -EFAULT; - spin_lock_irqsave(&h->lock, flags); - /* Update the field, and then ring the doorbell */ - for (i = 0; i < 16; i++) - writeb(NodeName[i], &h->cfgtable->ServerName[i]); - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) - break; - udelay(1000); /* delay and try again */ - } - spin_unlock_irqrestore(&h->lock, flags); - if (i >= MAX_IOCTL_CONFIG_WAIT) - return -EAGAIN; - return 0; -} - -static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) -{ - Heartbeat_type heartbeat; - unsigned long flags; - - if (!argp) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - heartbeat = readl(&h->cfgtable->HeartBeat); - spin_unlock_irqrestore(&h->lock, flags); - if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) - return -EFAULT; - return 0; -} - -static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) -{ - BusTypes_type BusTypes; - unsigned long flags; - - if (!argp) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - BusTypes = readl(&h->cfgtable->BusTypes); - spin_unlock_irqrestore(&h->lock, flags); - if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) - return -EFAULT; - return 0; -} - -static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) -{ - FirmwareVer_type firmware; - - if (!argp) - return -EINVAL; - memcpy(firmware, h->firm_ver, 4); - - if (copy_to_user - (argp, firmware, sizeof(FirmwareVer_type))) - return -EFAULT; - return 0; -} - -static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) -{ - DriverVer_type DriverVer = DRIVER_VERSION; - - if (!argp) - return -EINVAL; - if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) - return -EFAULT; - return 0; -} - -static int cciss_getluninfo(ctlr_info_t *h, - struct gendisk *disk, void __user *argp) -{ - LogvolInfo_struct luninfo; - drive_info_struct *drv = get_drv(disk); - - if (!argp) - return -EINVAL; - memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); - luninfo.num_opens = drv->usage_count; - luninfo.num_parts = 0; - if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) - return -EFAULT; - return 0; -} - -static int cciss_passthru(ctlr_info_t *h, void __user *argp) -{ - IOCTL_Command_struct iocommand; - CommandList_struct *c; - char *buff = NULL; - u64bit temp64; - DECLARE_COMPLETION_ONSTACK(wait); - - if (!argp) - return -EINVAL; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - - if (copy_from_user - (&iocommand, argp, sizeof(IOCTL_Command_struct))) - return -EFAULT; - if ((iocommand.buf_size < 1) && - (iocommand.Request.Type.Direction != XFER_NONE)) { - return -EINVAL; - } - if (iocommand.buf_size > 0) { - buff = kmalloc(iocommand.buf_size, GFP_KERNEL); - if (buff == NULL) - return -EFAULT; - } - if (iocommand.Request.Type.Direction == XFER_WRITE) { - /* Copy the data into the buffer we created */ - if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { - kfree(buff); - return -EFAULT; - } - } else { - memset(buff, 0, iocommand.buf_size); - } - c = cmd_special_alloc(h); - if (!c) { - kfree(buff); - return -ENOMEM; - } - /* Fill in the command type */ - c->cmd_type = CMD_IOCTL_PEND; - /* Fill in Command Header */ - c->Header.ReplyQueue = 0; /* unused in simple mode */ - if (iocommand.buf_size > 0) { /* buffer to fill */ - c->Header.SGList = 1; - c->Header.SGTotal = 1; - } else { /* no buffers to fill */ - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } - c->Header.LUN = iocommand.LUN_info; - /* use the kernel address the cmd block for tag */ - c->Header.Tag.lower = c->busaddr; - - /* Fill in Request block */ - c->Request = iocommand.Request; - - /* Fill in the scatter gather information */ - if (iocommand.buf_size > 0) { - temp64.val = pci_map_single(h->pdev, buff, - iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); - c->SG[0].Addr.lower = temp64.val32.lower; - c->SG[0].Addr.upper = temp64.val32.upper; - c->SG[0].Len = iocommand.buf_size; - c->SG[0].Ext = 0; /* we are not chaining */ - } - c->waiting = &wait; - - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); - - /* unlock the buffers from DMA */ - temp64.val32.lower = c->SG[0].Addr.lower; - temp64.val32.upper = c->SG[0].Addr.upper; - pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, - PCI_DMA_BIDIRECTIONAL); - check_ioctl_unit_attention(h, c); - - /* Copy the error information out */ - iocommand.error_info = *(c->err_info); - if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { - kfree(buff); - cmd_special_free(h, c); - return -EFAULT; - } - - if (iocommand.Request.Type.Direction == XFER_READ) { - /* Copy the data out of the buffer we created */ - if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { - kfree(buff); - cmd_special_free(h, c); - return -EFAULT; - } - } - kfree(buff); - cmd_special_free(h, c); - return 0; -} - -static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) -{ - BIG_IOCTL_Command_struct *ioc; - CommandList_struct *c; - unsigned char **buff = NULL; - int *buff_size = NULL; - u64bit temp64; - BYTE sg_used = 0; - int status = 0; - int i; - DECLARE_COMPLETION_ONSTACK(wait); - __u32 left; - __u32 sz; - BYTE __user *data_ptr; - - if (!argp) - return -EINVAL; - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); - if (!ioc) { - status = -ENOMEM; - goto cleanup1; - } - if (copy_from_user(ioc, argp, sizeof(*ioc))) { - status = -EFAULT; - goto cleanup1; - } - if ((ioc->buf_size < 1) && - (ioc->Request.Type.Direction != XFER_NONE)) { - status = -EINVAL; - goto cleanup1; - } - /* Check kmalloc limits using all SGs */ - if (ioc->malloc_size > MAX_KMALLOC_SIZE) { - status = -EINVAL; - goto cleanup1; - } - if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { - status = -EINVAL; - goto cleanup1; - } - buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); - if (!buff) { - status = -ENOMEM; - goto cleanup1; - } - buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); - if (!buff_size) { - status = -ENOMEM; - goto cleanup1; - } - left = ioc->buf_size; - data_ptr = ioc->buf; - while (left) { - sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; - buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } - if (ioc->Request.Type.Direction == XFER_WRITE) { - if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -EFAULT; - goto cleanup1; - } - } else { - memset(buff[sg_used], 0, sz); - } - left -= sz; - data_ptr += sz; - sg_used++; - } - c = cmd_special_alloc(h); - if (!c) { - status = -ENOMEM; - goto cleanup1; - } - c->cmd_type = CMD_IOCTL_PEND; - c->Header.ReplyQueue = 0; - c->Header.SGList = sg_used; - c->Header.SGTotal = sg_used; - c->Header.LUN = ioc->LUN_info; - c->Header.Tag.lower = c->busaddr; - - c->Request = ioc->Request; - for (i = 0; i < sg_used; i++) { - temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], - PCI_DMA_BIDIRECTIONAL); - c->SG[i].Addr.lower = temp64.val32.lower; - c->SG[i].Addr.upper = temp64.val32.upper; - c->SG[i].Len = buff_size[i]; - c->SG[i].Ext = 0; /* we are not chaining */ - } - c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); - /* unlock the buffers from DMA */ - for (i = 0; i < sg_used; i++) { - temp64.val32.lower = c->SG[i].Addr.lower; - temp64.val32.upper = c->SG[i].Addr.upper; - pci_unmap_single(h->pdev, - (dma_addr_t) temp64.val, buff_size[i], - PCI_DMA_BIDIRECTIONAL); - } - check_ioctl_unit_attention(h, c); - /* Copy the error information out */ - ioc->error_info = *(c->err_info); - if (copy_to_user(argp, ioc, sizeof(*ioc))) { - cmd_special_free(h, c); - status = -EFAULT; - goto cleanup1; - } - if (ioc->Request.Type.Direction == XFER_READ) { - /* Copy the data out of the buffer we created */ - BYTE __user *ptr = ioc->buf; - for (i = 0; i < sg_used; i++) { - if (copy_to_user(ptr, buff[i], buff_size[i])) { - cmd_special_free(h, c); - status = -EFAULT; - goto cleanup1; - } - ptr += buff_size[i]; - } - } - cmd_special_free(h, c); - status = 0; -cleanup1: - if (buff) { - for (i = 0; i < sg_used; i++) - kfree(buff[i]); - kfree(buff); - } - kfree(buff_size); - kfree(ioc); - return status; -} - -static int cciss_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct gendisk *disk = bdev->bd_disk; - ctlr_info_t *h = get_host(disk); - void __user *argp = (void __user *)arg; - - dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", - cmd, arg); - switch (cmd) { - case CCISS_GETPCIINFO: - return cciss_getpciinfo(h, argp); - case CCISS_GETINTINFO: - return cciss_getintinfo(h, argp); - case CCISS_SETINTINFO: - return cciss_setintinfo(h, argp); - case CCISS_GETNODENAME: - return cciss_getnodename(h, argp); - case CCISS_SETNODENAME: - return cciss_setnodename(h, argp); - case CCISS_GETHEARTBEAT: - return cciss_getheartbeat(h, argp); - case CCISS_GETBUSTYPES: - return cciss_getbustypes(h, argp); - case CCISS_GETFIRMVER: - return cciss_getfirmver(h, argp); - case CCISS_GETDRIVVER: - return cciss_getdrivver(h, argp); - case CCISS_DEREGDISK: - case CCISS_REGNEWD: - case CCISS_REVALIDVOLS: - return rebuild_lun_table(h, 0, 1); - case CCISS_GETLUNINFO: - return cciss_getluninfo(h, disk, argp); - case CCISS_PASSTHRU: - return cciss_passthru(h, argp); - case CCISS_BIG_PASSTHRU: - return cciss_bigpassthru(h, argp); - - /* scsi_cmd_blk_ioctl handles these, below, though some are not */ - /* very meaningful for cciss. SG_IO is the main one people want. */ - - case SG_GET_VERSION_NUM: - case SG_SET_TIMEOUT: - case SG_GET_TIMEOUT: - case SG_GET_RESERVED_SIZE: - case SG_SET_RESERVED_SIZE: - case SG_EMULATED_HOST: - case SG_IO: - case SCSI_IOCTL_SEND_COMMAND: - return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); - - /* scsi_cmd_blk_ioctl would normally handle these, below, but */ - /* they aren't a good fit for cciss, as CD-ROMs are */ - /* not supported, and we don't have any bus/target/lun */ - /* which we present to the kernel. */ - - case CDROM_SEND_PACKET: - case CDROMCLOSETRAY: - case CDROMEJECT: - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - default: - return -ENOTTY; - } -} - -static void cciss_check_queues(ctlr_info_t *h) -{ - int start_queue = h->next_to_run; - int i; - - /* check to see if we have maxed out the number of commands that can - * be placed on the queue. If so then exit. We do this check here - * in case the interrupt we serviced was from an ioctl and did not - * free any new commands. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) - return; - - /* We have room on the queue for more commands. Now we need to queue - * them up. We will also keep track of the next queue to run so - * that every queue gets a chance to be started first. - */ - for (i = 0; i < h->highest_lun + 1; i++) { - int curr_queue = (start_queue + i) % (h->highest_lun + 1); - /* make sure the disk has been added and the drive is real - * because this can be called from the middle of init_one. - */ - if (!h->drv[curr_queue]) - continue; - if (!(h->drv[curr_queue]->queue) || - !(h->drv[curr_queue]->heads)) - continue; - blk_start_queue(h->gendisk[curr_queue]->queue); - - /* check to see if we have maxed out the number of commands - * that can be placed on the queue. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { - if (curr_queue == start_queue) { - h->next_to_run = - (start_queue + 1) % (h->highest_lun + 1); - break; - } else { - h->next_to_run = curr_queue; - break; - } - } - } -} - -static void cciss_softirq_done(struct request *rq) -{ - CommandList_struct *c = rq->completion_data; - ctlr_info_t *h = hba[c->ctlr]; - SGDescriptor_struct *curr_sg = c->SG; - u64bit temp64; - unsigned long flags; - int i, ddir; - int sg_index = 0; - - if (c->Request.Type.Direction == XFER_READ) - ddir = PCI_DMA_FROMDEVICE; - else - ddir = PCI_DMA_TODEVICE; - - /* command did not need to be retried */ - /* unmap the DMA mapping for all the scatter gather elements */ - for (i = 0; i < c->Header.SGList; i++) { - if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { - cciss_unmap_sg_chain_block(h, c); - /* Point to the next block */ - curr_sg = h->cmd_sg_list[c->cmdindex]; - sg_index = 0; - } - temp64.val32.lower = curr_sg[sg_index].Addr.lower; - temp64.val32.upper = curr_sg[sg_index].Addr.upper; - pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, - ddir); - ++sg_index; - } - - dev_dbg(&h->pdev->dev, "Done with %p\n", rq); - - /* set the residual count for pc requests */ - if (blk_rq_is_passthrough(rq)) - scsi_req(rq)->resid_len = c->err_info->ResidualCnt; - blk_end_request_all(rq, scsi_req(rq)->result ? - BLK_STS_IOERR : BLK_STS_OK); - - spin_lock_irqsave(&h->lock, flags); - cmd_free(h, c); - cciss_check_queues(h); - spin_unlock_irqrestore(&h->lock, flags); -} - -static inline void log_unit_to_scsi3addr(ctlr_info_t *h, - unsigned char scsi3addr[], uint32_t log_unit) -{ - memcpy(scsi3addr, h->drv[log_unit]->LunID, - sizeof(h->drv[log_unit]->LunID)); -} - -/* This function gets the SCSI vendor, model, and revision of a logical drive - * via the inquiry page 0. Model, vendor, and rev are set to empty strings if - * they cannot be read. - */ -static void cciss_get_device_descr(ctlr_info_t *h, int logvol, - char *vendor, char *model, char *rev) -{ - int rc; - InquiryData_struct *inq_buf; - unsigned char scsi3addr[8]; - - *vendor = '\0'; - *model = '\0'; - *rev = '\0'; - - inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); - if (!inq_buf) - return; - - log_unit_to_scsi3addr(h, scsi3addr, logvol); - rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, - scsi3addr, TYPE_CMD); - if (rc == IO_OK) { - memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); - vendor[VENDOR_LEN] = '\0'; - memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); - model[MODEL_LEN] = '\0'; - memcpy(rev, &inq_buf->data_byte[32], REV_LEN); - rev[REV_LEN] = '\0'; - } - - kfree(inq_buf); - return; -} - -/* This function gets the serial number of a logical drive via - * inquiry page 0x83. Serial no. is 16 bytes. If the serial - * number cannot be had, for whatever reason, 16 bytes of 0xff - * are returned instead. - */ -static void cciss_get_serial_no(ctlr_info_t *h, int logvol, - unsigned char *serial_no, int buflen) -{ -#define PAGE_83_INQ_BYTES 64 - int rc; - unsigned char *buf; - unsigned char scsi3addr[8]; - - if (buflen > 16) - buflen = 16; - memset(serial_no, 0xff, buflen); - buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); - if (!buf) - return; - memset(serial_no, 0, buflen); - log_unit_to_scsi3addr(h, scsi3addr, logvol); - rc = sendcmd_withirq(h, CISS_INQUIRY, buf, - PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); - if (rc == IO_OK) - memcpy(serial_no, &buf[8], buflen); - kfree(buf); - return; -} - -static void cciss_initialize_rq(struct request *rq) -{ - struct scsi_request *sreq = blk_mq_rq_to_pdu(rq); - - scsi_req_init(sreq); -} - -/* - * cciss_add_disk sets up the block device queue for a logical drive - */ -static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, - int drv_index) -{ - disk->queue = blk_alloc_queue(GFP_KERNEL); - if (!disk->queue) - goto init_queue_failure; - - disk->queue->cmd_size = sizeof(struct scsi_request); - disk->queue->request_fn = do_cciss_request; - disk->queue->initialize_rq_fn = cciss_initialize_rq; - disk->queue->queue_lock = &h->lock; - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue); - if (blk_init_allocated_queue(disk->queue) < 0) - goto cleanup_queue; - - sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); - disk->major = h->major; - disk->first_minor = drv_index << NWD_SHIFT; - disk->fops = &cciss_fops; - if (cciss_create_ld_sysfs_entry(h, drv_index)) - goto cleanup_queue; - disk->private_data = h->drv[drv_index]; - - /* Set up queue information */ - blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); - - /* This is a hardware imposed limit. */ - blk_queue_max_segments(disk->queue, h->maxsgentries); - - blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); - - blk_queue_softirq_done(disk->queue, cciss_softirq_done); - - disk->queue->queuedata = h; - - blk_queue_logical_block_size(disk->queue, - h->drv[drv_index]->block_size); - - /* Make sure all queue data is written out before */ - /* setting h->drv[drv_index]->queue, as setting this */ - /* allows the interrupt handler to start the queue */ - wmb(); - h->drv[drv_index]->queue = disk->queue; - device_add_disk(&h->drv[drv_index]->dev, disk); - return 0; - -cleanup_queue: - blk_cleanup_queue(disk->queue); - disk->queue = NULL; -init_queue_failure: - return -1; -} - -/* This function will check the usage_count of the drive to be updated/added. - * If the usage_count is zero and it is a heretofore unknown drive, or, - * the drive's capacity, geometry, or serial number has changed, - * then the drive information will be updated and the disk will be - * re-registered with the kernel. If these conditions don't hold, - * then it will be left alone for the next reboot. The exception to this - * is disk 0 which will always be left registered with the kernel since it - * is also the controller node. Any changes to disk 0 will show up on - * the next reboot. - */ -static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, - int first_time, int via_ioctl) -{ - struct gendisk *disk; - InquiryData_struct *inq_buff = NULL; - unsigned int block_size; - sector_t total_size; - unsigned long flags = 0; - int ret = 0; - drive_info_struct *drvinfo; - - /* Get information about the disk and modify the driver structure */ - inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); - drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); - if (inq_buff == NULL || drvinfo == NULL) - goto mem_msg; - - /* testing to see if 16-byte CDBs are already being used */ - if (h->cciss_read == CCISS_READ_16) { - cciss_read_capacity_16(h, drv_index, - &total_size, &block_size); - - } else { - cciss_read_capacity(h, drv_index, &total_size, &block_size); - /* if read_capacity returns all F's this volume is >2TB */ - /* in size so we switch to 16-byte CDB's for all */ - /* read/write ops */ - if (total_size == 0xFFFFFFFFULL) { - cciss_read_capacity_16(h, drv_index, - &total_size, &block_size); - h->cciss_read = CCISS_READ_16; - h->cciss_write = CCISS_WRITE_16; - } else { - h->cciss_read = CCISS_READ_10; - h->cciss_write = CCISS_WRITE_10; - } - } - - cciss_geometry_inquiry(h, drv_index, total_size, block_size, - inq_buff, drvinfo); - drvinfo->block_size = block_size; - drvinfo->nr_blocks = total_size + 1; - - cciss_get_device_descr(h, drv_index, drvinfo->vendor, - drvinfo->model, drvinfo->rev); - cciss_get_serial_no(h, drv_index, drvinfo->serial_no, - sizeof(drvinfo->serial_no)); - /* Save the lunid in case we deregister the disk, below. */ - memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, - sizeof(drvinfo->LunID)); - - /* Is it the same disk we already know, and nothing's changed? */ - if (h->drv[drv_index]->raid_level != -1 && - ((memcmp(drvinfo->serial_no, - h->drv[drv_index]->serial_no, 16) == 0) && - drvinfo->block_size == h->drv[drv_index]->block_size && - drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && - drvinfo->heads == h->drv[drv_index]->heads && - drvinfo->sectors == h->drv[drv_index]->sectors && - drvinfo->cylinders == h->drv[drv_index]->cylinders)) - /* The disk is unchanged, nothing to update */ - goto freeret; - - /* If we get here it's not the same disk, or something's changed, - * so we need to * deregister it, and re-register it, if it's not - * in use. - * If the disk already exists then deregister it before proceeding - * (unless it's the first disk (for the controller node). - */ - if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { - dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); - spin_lock_irqsave(&h->lock, flags); - h->drv[drv_index]->busy_configuring = 1; - spin_unlock_irqrestore(&h->lock, flags); - - /* deregister_disk sets h->drv[drv_index]->queue = NULL - * which keeps the interrupt handler from starting - * the queue. - */ - ret = deregister_disk(h, drv_index, 0, via_ioctl); - } - - /* If the disk is in use return */ - if (ret) - goto freeret; - - /* Save the new information from cciss_geometry_inquiry - * and serial number inquiry. If the disk was deregistered - * above, then h->drv[drv_index] will be NULL. - */ - if (h->drv[drv_index] == NULL) { - drvinfo->device_initialized = 0; - h->drv[drv_index] = drvinfo; - drvinfo = NULL; /* so it won't be freed below. */ - } else { - /* special case for cxd0 */ - h->drv[drv_index]->block_size = drvinfo->block_size; - h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; - h->drv[drv_index]->heads = drvinfo->heads; - h->drv[drv_index]->sectors = drvinfo->sectors; - h->drv[drv_index]->cylinders = drvinfo->cylinders; - h->drv[drv_index]->raid_level = drvinfo->raid_level; - memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); - memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, - VENDOR_LEN + 1); - memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); - memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); - } - - ++h->num_luns; - disk = h->gendisk[drv_index]; - set_capacity(disk, h->drv[drv_index]->nr_blocks); - - /* If it's not disk 0 (drv_index != 0) - * or if it was disk 0, but there was previously - * no actual corresponding configured logical drive - * (raid_leve == -1) then we want to update the - * logical drive's information. - */ - if (drv_index || first_time) { - if (cciss_add_disk(h, disk, drv_index) != 0) { - cciss_free_gendisk(h, drv_index); - cciss_free_drive_info(h, drv_index); - dev_warn(&h->pdev->dev, "could not update disk %d\n", - drv_index); - --h->num_luns; - } - } - -freeret: - kfree(inq_buff); - kfree(drvinfo); - return; -mem_msg: - dev_err(&h->pdev->dev, "out of memory\n"); - goto freeret; -} - -/* This function will find the first index of the controllers drive array - * that has a null drv pointer and allocate the drive info struct and - * will return that index This is where new drives will be added. - * If the index to be returned is greater than the highest_lun index for - * the controller then highest_lun is set * to this new index. - * If there are no available indexes or if tha allocation fails, then -1 - * is returned. * "controller_node" is used to know if this is a real - * logical drive, or just the controller node, which determines if this - * counts towards highest_lun. - */ -static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) -{ - int i; - drive_info_struct *drv; - - /* Search for an empty slot for our drive info */ - for (i = 0; i < CISS_MAX_LUN; i++) { - - /* if not cxd0 case, and it's occupied, skip it. */ - if (h->drv[i] && i != 0) - continue; - /* - * If it's cxd0 case, and drv is alloc'ed already, and a - * disk is configured there, skip it. - */ - if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) - continue; - - /* - * We've found an empty slot. Update highest_lun - * provided this isn't just the fake cxd0 controller node. - */ - if (i > h->highest_lun && !controller_node) - h->highest_lun = i; - - /* If adding a real disk at cxd0, and it's already alloc'ed */ - if (i == 0 && h->drv[i] != NULL) - return i; - - /* - * Found an empty slot, not already alloc'ed. Allocate it. - * Mark it with raid_level == -1, so we know it's new later on. - */ - drv = kzalloc(sizeof(*drv), GFP_KERNEL); - if (!drv) - return -1; - drv->raid_level = -1; /* so we know it's new */ - h->drv[i] = drv; - return i; - } - return -1; -} - -static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) -{ - kfree(h->drv[drv_index]); - h->drv[drv_index] = NULL; -} - -static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) -{ - put_disk(h->gendisk[drv_index]); - h->gendisk[drv_index] = NULL; -} - -/* cciss_add_gendisk finds a free hba[]->drv structure - * and allocates a gendisk if needed, and sets the lunid - * in the drvinfo structure. It returns the index into - * the ->drv[] array, or -1 if none are free. - * is_controller_node indicates whether highest_lun should - * count this disk, or if it's only being added to provide - * a means to talk to the controller in case no logical - * drives have yet been configured. - */ -static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], - int controller_node) -{ - int drv_index; - - drv_index = cciss_alloc_drive_info(h, controller_node); - if (drv_index == -1) - return -1; - - /*Check if the gendisk needs to be allocated */ - if (!h->gendisk[drv_index]) { - h->gendisk[drv_index] = - alloc_disk(1 << NWD_SHIFT); - if (!h->gendisk[drv_index]) { - dev_err(&h->pdev->dev, - "could not allocate a new disk %d\n", - drv_index); - goto err_free_drive_info; - } - } - memcpy(h->drv[drv_index]->LunID, lunid, - sizeof(h->drv[drv_index]->LunID)); - if (cciss_create_ld_sysfs_entry(h, drv_index)) - goto err_free_disk; - /* Don't need to mark this busy because nobody */ - /* else knows about this disk yet to contend */ - /* for access to it. */ - h->drv[drv_index]->busy_configuring = 0; - wmb(); - return drv_index; - -err_free_disk: - cciss_free_gendisk(h, drv_index); -err_free_drive_info: - cciss_free_drive_info(h, drv_index); - return -1; -} - -/* This is for the special case of a controller which - * has no logical drives. In this case, we still need - * to register a disk so the controller can be accessed - * by the Array Config Utility. - */ -static void cciss_add_controller_node(ctlr_info_t *h) -{ - struct gendisk *disk; - int drv_index; - - if (h->gendisk[0] != NULL) /* already did this? Then bail. */ - return; - - drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); - if (drv_index == -1) - goto error; - h->drv[drv_index]->block_size = 512; - h->drv[drv_index]->nr_blocks = 0; - h->drv[drv_index]->heads = 0; - h->drv[drv_index]->sectors = 0; - h->drv[drv_index]->cylinders = 0; - h->drv[drv_index]->raid_level = -1; - memset(h->drv[drv_index]->serial_no, 0, 16); - disk = h->gendisk[drv_index]; - if (cciss_add_disk(h, disk, drv_index) == 0) - return; - cciss_free_gendisk(h, drv_index); - cciss_free_drive_info(h, drv_index); -error: - dev_warn(&h->pdev->dev, "could not add disk 0.\n"); - return; -} - -/* This function will add and remove logical drives from the Logical - * drive array of the controller and maintain persistency of ordering - * so that mount points are preserved until the next reboot. This allows - * for the removal of logical drives in the middle of the drive array - * without a re-ordering of those drives. - * INPUT - * h = The controller to perform the operations on - */ -static int rebuild_lun_table(ctlr_info_t *h, int first_time, - int via_ioctl) -{ - int num_luns; - ReportLunData_struct *ld_buff = NULL; - int return_code; - int listlength = 0; - int i; - int drv_found; - int drv_index = 0; - unsigned char lunid[8] = CTLR_LUNID; - unsigned long flags; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - - /* Set busy_configuring flag for this operation */ - spin_lock_irqsave(&h->lock, flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(&h->lock, flags); - return -EBUSY; - } - h->busy_configuring = 1; - spin_unlock_irqrestore(&h->lock, flags); - - ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); - if (ld_buff == NULL) - goto mem_msg; - - return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, - sizeof(ReportLunData_struct), - 0, CTLR_LUNID, TYPE_CMD); - - if (return_code == IO_OK) - listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); - else { /* reading number of logical volumes failed */ - dev_warn(&h->pdev->dev, - "report logical volume command failed\n"); - listlength = 0; - goto freeret; - } - - num_luns = listlength / 8; /* 8 bytes per entry */ - if (num_luns > CISS_MAX_LUN) { - num_luns = CISS_MAX_LUN; - dev_warn(&h->pdev->dev, "more luns configured" - " on controller than can be handled by" - " this driver.\n"); - } - - if (num_luns == 0) - cciss_add_controller_node(h); - - /* Compare controller drive array to driver's drive array - * to see if any drives are missing on the controller due - * to action of Array Config Utility (user deletes drive) - * and deregister logical drives which have disappeared. - */ - for (i = 0; i <= h->highest_lun; i++) { - int j; - drv_found = 0; - - /* skip holes in the array from already deleted drives */ - if (h->drv[i] == NULL) - continue; - - for (j = 0; j < num_luns; j++) { - memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); - if (memcmp(h->drv[i]->LunID, lunid, - sizeof(lunid)) == 0) { - drv_found = 1; - break; - } - } - if (!drv_found) { - /* Deregister it from the OS, it's gone. */ - spin_lock_irqsave(&h->lock, flags); - h->drv[i]->busy_configuring = 1; - spin_unlock_irqrestore(&h->lock, flags); - return_code = deregister_disk(h, i, 1, via_ioctl); - if (h->drv[i] != NULL) - h->drv[i]->busy_configuring = 0; - } - } - - /* Compare controller drive array to driver's drive array. - * Check for updates in the drive information and any new drives - * on the controller due to ACU adding logical drives, or changing - * a logical drive's size, etc. Reregister any new/changed drives - */ - for (i = 0; i < num_luns; i++) { - int j; - - drv_found = 0; - - memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); - /* Find if the LUN is already in the drive array - * of the driver. If so then update its info - * if not in use. If it does not exist then find - * the first free index and add it. - */ - for (j = 0; j <= h->highest_lun; j++) { - if (h->drv[j] != NULL && - memcmp(h->drv[j]->LunID, lunid, - sizeof(h->drv[j]->LunID)) == 0) { - drv_index = j; - drv_found = 1; - break; - } - } - - /* check if the drive was found already in the array */ - if (!drv_found) { - drv_index = cciss_add_gendisk(h, lunid, 0); - if (drv_index == -1) - goto freeret; - } - cciss_update_drive_info(h, drv_index, first_time, via_ioctl); - } /* end for */ - -freeret: - kfree(ld_buff); - h->busy_configuring = 0; - /* We return -1 here to tell the ACU that we have registered/updated - * all of the drives that we can and to keep it from calling us - * additional times. - */ - return -1; -mem_msg: - dev_err(&h->pdev->dev, "out of memory\n"); - h->busy_configuring = 0; - goto freeret; -} - -static void cciss_clear_drive_info(drive_info_struct *drive_info) -{ - /* zero out the disk size info */ - drive_info->nr_blocks = 0; - drive_info->block_size = 0; - drive_info->heads = 0; - drive_info->sectors = 0; - drive_info->cylinders = 0; - drive_info->raid_level = -1; - memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); - memset(drive_info->model, 0, sizeof(drive_info->model)); - memset(drive_info->rev, 0, sizeof(drive_info->rev)); - memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); - /* - * don't clear the LUNID though, we need to remember which - * one this one is. - */ -} - -/* This function will deregister the disk and it's queue from the - * kernel. It must be called with the controller lock held and the - * drv structures busy_configuring flag set. It's parameters are: - * - * disk = This is the disk to be deregistered - * drv = This is the drive_info_struct associated with the disk to be - * deregistered. It contains information about the disk used - * by the driver. - * clear_all = This flag determines whether or not the disk information - * is going to be completely cleared out and the highest_lun - * reset. Sometimes we want to clear out information about - * the disk in preparation for re-adding it. In this case - * the highest_lun should be left unchanged and the LunID - * should not be cleared. - * via_ioctl - * This indicates whether we've reached this path via ioctl. - * This affects the maximum usage count allowed for c0d0 to be messed with. - * If this path is reached via ioctl(), then the max_usage_count will - * be 1, as the process calling ioctl() has got to have the device open. - * If we get here via sysfs, then the max usage count will be zero. -*/ -static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all, int via_ioctl) -{ - int i; - struct gendisk *disk; - drive_info_struct *drv; - int recalculate_highest_lun; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - - drv = h->drv[drv_index]; - disk = h->gendisk[drv_index]; - - /* make sure logical volume is NOT is use */ - if (clear_all || (h->gendisk[0] == disk)) { - if (drv->usage_count > via_ioctl) - return -EBUSY; - } else if (drv->usage_count > 0) - return -EBUSY; - - recalculate_highest_lun = (drv == h->drv[h->highest_lun]); - - /* invalidate the devices and deregister the disk. If it is disk - * zero do not deregister it but just zero out it's values. This - * allows us to delete disk zero but keep the controller registered. - */ - if (h->gendisk[0] != disk) { - struct request_queue *q = disk->queue; - if (disk->flags & GENHD_FL_UP) { - cciss_destroy_ld_sysfs_entry(h, drv_index, 0); - del_gendisk(disk); - } - if (q) - blk_cleanup_queue(q); - /* If clear_all is set then we are deleting the logical - * drive, not just refreshing its info. For drives - * other than disk 0 we will call put_disk. We do not - * do this for disk 0 as we need it to be able to - * configure the controller. - */ - if (clear_all){ - /* This isn't pretty, but we need to find the - * disk in our array and NULL our the pointer. - * This is so that we will call alloc_disk if - * this index is used again later. - */ - for (i=0; i < CISS_MAX_LUN; i++){ - if (h->gendisk[i] == disk) { - h->gendisk[i] = NULL; - break; - } - } - put_disk(disk); - } - } else { - set_capacity(disk, 0); - cciss_clear_drive_info(drv); - } - - --h->num_luns; - - /* if it was the last disk, find the new hightest lun */ - if (clear_all && recalculate_highest_lun) { - int newhighest = -1; - for (i = 0; i <= h->highest_lun; i++) { - /* if the disk has size > 0, it is available */ - if (h->drv[i] && h->drv[i]->heads) - newhighest = i; - } - h->highest_lun = newhighest; - } - return 0; -} - -static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, - size_t size, __u8 page_code, unsigned char *scsi3addr, - int cmd_type) -{ - u64bit buff_dma_handle; - int status = IO_OK; - - c->cmd_type = CMD_IOCTL_PEND; - c->Header.ReplyQueue = 0; - if (buff != NULL) { - c->Header.SGList = 1; - c->Header.SGTotal = 1; - } else { - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } - c->Header.Tag.lower = c->busaddr; - memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); - - c->Request.Type.Type = cmd_type; - if (cmd_type == TYPE_CMD) { - switch (cmd) { - case CISS_INQUIRY: - /* are we trying to read a vital product page */ - if (page_code != 0) { - c->Request.CDB[1] = 0x01; - c->Request.CDB[2] = page_code; - } - c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; - c->Request.Timeout = 0; - c->Request.CDB[0] = CISS_INQUIRY; - c->Request.CDB[4] = size & 0xFF; - break; - case CISS_REPORT_LOG: - case CISS_REPORT_PHYS: - /* Talking to controller so It's a physical command - mode = 00 target = 0. Nothing to write. - */ - c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; - c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ - c->Request.CDB[7] = (size >> 16) & 0xFF; - c->Request.CDB[8] = (size >> 8) & 0xFF; - c->Request.CDB[9] = size & 0xFF; - break; - - case CCISS_READ_CAPACITY: - c->Request.CDBLen = 10; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; - break; - case CCISS_READ_CAPACITY_16: - c->Request.CDBLen = 16; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; - c->Request.CDB[1] = 0x10; - c->Request.CDB[10] = (size >> 24) & 0xFF; - c->Request.CDB[11] = (size >> 16) & 0xFF; - c->Request.CDB[12] = (size >> 8) & 0xFF; - c->Request.CDB[13] = size & 0xFF; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; - break; - case CCISS_CACHE_FLUSH: - c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; - c->Request.Timeout = 0; - c->Request.CDB[0] = BMIC_WRITE; - c->Request.CDB[6] = BMIC_CACHE_FLUSH; - c->Request.CDB[7] = (size >> 8) & 0xFF; - c->Request.CDB[8] = size & 0xFF; - break; - case TEST_UNIT_READY: - c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; - c->Request.Timeout = 0; - break; - default: - dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); - return IO_ERROR; - } - } else if (cmd_type == TYPE_MSG) { - switch (cmd) { - case CCISS_ABORT_MSG: - c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; /* abort */ - c->Request.CDB[1] = 0; /* abort a command */ - /* buff contains the tag of the command to abort */ - memcpy(&c->Request.CDB[4], buff, 8); - break; - case CCISS_RESET_MSG: - c->Request.CDBLen = 16; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; - c->Request.Timeout = 0; - memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); - c->Request.CDB[0] = cmd; /* reset */ - c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; - break; - case CCISS_NOOP_MSG: - c->Request.CDBLen = 1; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; - c->Request.Timeout = 0; - c->Request.CDB[0] = cmd; - break; - default: - dev_warn(&h->pdev->dev, - "unknown message type %d\n", cmd); - return IO_ERROR; - } - } else { - dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); - return IO_ERROR; - } - /* Fill in the scatter gather information */ - if (size > 0) { - buff_dma_handle.val = (__u64) pci_map_single(h->pdev, - buff, size, - PCI_DMA_BIDIRECTIONAL); - c->SG[0].Addr.lower = buff_dma_handle.val32.lower; - c->SG[0].Addr.upper = buff_dma_handle.val32.upper; - c->SG[0].Len = size; - c->SG[0].Ext = 0; /* we are not chaining */ - } - return status; -} - -static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, - u8 reset_type) -{ - CommandList_struct *c; - int return_status; - - c = cmd_alloc(h); - if (!c) - return -ENOMEM; - return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, - CTLR_LUNID, TYPE_MSG); - c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ - if (return_status != IO_OK) { - cmd_special_free(h, c); - return return_status; - } - c->waiting = NULL; - enqueue_cmd_and_start_io(h, c); - /* Don't wait for completion, the reset won't complete. Don't free - * the command either. This is the last command we will send before - * re-initializing everything, so it doesn't matter and won't leak. - */ - return 0; -} - -static int check_target_status(ctlr_info_t *h, CommandList_struct *c) -{ - switch (c->err_info->ScsiStatus) { - case SAM_STAT_GOOD: - return IO_OK; - case SAM_STAT_CHECK_CONDITION: - switch (0xf & c->err_info->SenseInfo[2]) { - case 0: return IO_OK; /* no sense */ - case 1: return IO_OK; /* recovered error */ - default: - if (check_for_unit_attention(h, c)) - return IO_NEEDS_RETRY; - dev_warn(&h->pdev->dev, "cmd 0x%02x " - "check condition, sense key = 0x%02x\n", - c->Request.CDB[0], c->err_info->SenseInfo[2]); - } - break; - default: - dev_warn(&h->pdev->dev, "cmd 0x%02x" - "scsi status = 0x%02x\n", - c->Request.CDB[0], c->err_info->ScsiStatus); - break; - } - return IO_ERROR; -} - -static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) -{ - int return_status = IO_OK; - - if (c->err_info->CommandStatus == CMD_SUCCESS) - return IO_OK; - - switch (c->err_info->CommandStatus) { - case CMD_TARGET_STATUS: - return_status = check_target_status(h, c); - break; - case CMD_DATA_UNDERRUN: - case CMD_DATA_OVERRUN: - /* expected for inquiry and report lun commands */ - break; - case CMD_INVALID: - dev_warn(&h->pdev->dev, "cmd 0x%02x is " - "reported invalid\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_PROTOCOL_ERR: - dev_warn(&h->pdev->dev, "cmd 0x%02x has " - "protocol error\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_HARDWARE_ERR: - dev_warn(&h->pdev->dev, "cmd 0x%02x had " - " hardware error\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_CONNECTION_LOST: - dev_warn(&h->pdev->dev, "cmd 0x%02x had " - "connection lost\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_ABORTED: - dev_warn(&h->pdev->dev, "cmd 0x%02x was " - "aborted\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_ABORT_FAILED: - dev_warn(&h->pdev->dev, "cmd 0x%02x reports " - "abort failed\n", c->Request.CDB[0]); - return_status = IO_ERROR; - break; - case CMD_UNSOLICITED_ABORT: - dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", - c->Request.CDB[0]); - return_status = IO_NEEDS_RETRY; - break; - case CMD_UNABORTABLE: - dev_warn(&h->pdev->dev, "cmd unabortable\n"); - return_status = IO_ERROR; - break; - default: - dev_warn(&h->pdev->dev, "cmd 0x%02x returned " - "unknown status %x\n", c->Request.CDB[0], - c->err_info->CommandStatus); - return_status = IO_ERROR; - } - return return_status; -} - -static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, - int attempt_retry) -{ - DECLARE_COMPLETION_ONSTACK(wait); - u64bit buff_dma_handle; - int return_status = IO_OK; - -resend_cmd2: - c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - - wait_for_completion(&wait); - - if (c->err_info->CommandStatus == 0 || !attempt_retry) - goto command_done; - - return_status = process_sendcmd_error(h, c); - - if (return_status == IO_NEEDS_RETRY && - c->retry_count < MAX_CMD_RETRIES) { - dev_warn(&h->pdev->dev, "retrying 0x%02x\n", - c->Request.CDB[0]); - c->retry_count++; - /* erase the old error information */ - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); - return_status = IO_OK; - reinit_completion(&wait); - goto resend_cmd2; - } - -command_done: - /* unlock the buffers from DMA */ - buff_dma_handle.val32.lower = c->SG[0].Addr.lower; - buff_dma_handle.val32.upper = c->SG[0].Addr.upper; - pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, - c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); - return return_status; -} - -static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, - __u8 page_code, unsigned char scsi3addr[], - int cmd_type) -{ - CommandList_struct *c; - int return_status; - - c = cmd_special_alloc(h); - if (!c) - return -ENOMEM; - return_status = fill_cmd(h, c, cmd, buff, size, page_code, - scsi3addr, cmd_type); - if (return_status == IO_OK) - return_status = sendcmd_withirq_core(h, c, 1); - - cmd_special_free(h, c); - return return_status; -} - -static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, - sector_t total_size, - unsigned int block_size, - InquiryData_struct *inq_buff, - drive_info_struct *drv) -{ - int return_code; - unsigned long t; - unsigned char scsi3addr[8]; - - memset(inq_buff, 0, sizeof(InquiryData_struct)); - log_unit_to_scsi3addr(h, scsi3addr, logvol); - return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, - sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); - if (return_code == IO_OK) { - if (inq_buff->data_byte[8] == 0xFF) { - dev_warn(&h->pdev->dev, - "reading geometry failed, volume " - "does not support reading geometry\n"); - drv->heads = 255; - drv->sectors = 32; /* Sectors per track */ - drv->cylinders = total_size + 1; - drv->raid_level = RAID_UNKNOWN; - } else { - drv->heads = inq_buff->data_byte[6]; - drv->sectors = inq_buff->data_byte[7]; - drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; - drv->cylinders += inq_buff->data_byte[5]; - drv->raid_level = inq_buff->data_byte[8]; - } - drv->block_size = block_size; - drv->nr_blocks = total_size + 1; - t = drv->heads * drv->sectors; - if (t > 1) { - sector_t real_size = total_size + 1; - unsigned long rem = sector_div(real_size, t); - if (rem) - real_size++; - drv->cylinders = real_size; - } - } else { /* Get geometry failed */ - dev_warn(&h->pdev->dev, "reading geometry failed\n"); - } -} - -static void -cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, - unsigned int *block_size) -{ - ReadCapdata_struct *buf; - int return_code; - unsigned char scsi3addr[8]; - - buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); - if (!buf) { - dev_warn(&h->pdev->dev, "out of memory\n"); - return; - } - - log_unit_to_scsi3addr(h, scsi3addr, logvol); - return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, - sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); - if (return_code == IO_OK) { - *total_size = be32_to_cpu(*(__be32 *) buf->total_size); - *block_size = be32_to_cpu(*(__be32 *) buf->block_size); - } else { /* read capacity command failed */ - dev_warn(&h->pdev->dev, "read capacity failed\n"); - *total_size = 0; - *block_size = BLOCK_SIZE; - } - kfree(buf); -} - -static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, - sector_t *total_size, unsigned int *block_size) -{ - ReadCapdata_struct_16 *buf; - int return_code; - unsigned char scsi3addr[8]; - - buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); - if (!buf) { - dev_warn(&h->pdev->dev, "out of memory\n"); - return; - } - - log_unit_to_scsi3addr(h, scsi3addr, logvol); - return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, - buf, sizeof(ReadCapdata_struct_16), - 0, scsi3addr, TYPE_CMD); - if (return_code == IO_OK) { - *total_size = be64_to_cpu(*(__be64 *) buf->total_size); - *block_size = be32_to_cpu(*(__be32 *) buf->block_size); - } else { /* read capacity command failed */ - dev_warn(&h->pdev->dev, "read capacity failed\n"); - *total_size = 0; - *block_size = BLOCK_SIZE; - } - dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", - (unsigned long long)*total_size+1, *block_size); - kfree(buf); -} - -static int cciss_revalidate(struct gendisk *disk) -{ - ctlr_info_t *h = get_host(disk); - drive_info_struct *drv = get_drv(disk); - int logvol; - int FOUND = 0; - unsigned int block_size; - sector_t total_size; - InquiryData_struct *inq_buff = NULL; - - for (logvol = 0; logvol <= h->highest_lun; logvol++) { - if (!h->drv[logvol]) - continue; - if (memcmp(h->drv[logvol]->LunID, drv->LunID, - sizeof(drv->LunID)) == 0) { - FOUND = 1; - break; - } - } - - if (!FOUND) - return 1; - - inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); - if (inq_buff == NULL) { - dev_warn(&h->pdev->dev, "out of memory\n"); - return 1; - } - if (h->cciss_read == CCISS_READ_10) { - cciss_read_capacity(h, logvol, - &total_size, &block_size); - } else { - cciss_read_capacity_16(h, logvol, - &total_size, &block_size); - } - cciss_geometry_inquiry(h, logvol, total_size, block_size, - inq_buff, drv); - - blk_queue_logical_block_size(drv->queue, drv->block_size); - set_capacity(disk, drv->nr_blocks); - - kfree(inq_buff); - return 0; -} - -/* - * Map (physical) PCI mem into (virtual) kernel space - */ -static void __iomem *remap_pci_mem(ulong base, ulong size) -{ - ulong page_base = ((ulong) base) & PAGE_MASK; - ulong page_offs = ((ulong) base) - page_base; - void __iomem *page_remapped = ioremap(page_base, page_offs + size); - - return page_remapped ? (page_remapped + page_offs) : NULL; -} - -/* - * Takes jobs of the Q and sends them to the hardware, then puts it on - * the Q to wait for completion. - */ -static void start_io(ctlr_info_t *h) -{ - CommandList_struct *c; - - while (!list_empty(&h->reqQ)) { - c = list_entry(h->reqQ.next, CommandList_struct, list); - /* can't do anything if fifo is full */ - if ((h->access.fifo_full(h))) { - dev_warn(&h->pdev->dev, "fifo full\n"); - break; - } - - /* Get the first entry from the Request Q */ - removeQ(c); - h->Qdepth--; - - /* Tell the controller execute command */ - h->access.submit_command(h, c); - - /* Put job onto the completed Q */ - addQ(&h->cmpQ, c); - } -} - -/* Assumes that h->lock is held. */ -/* Zeros out the error record and then resends the command back */ -/* to the controller */ -static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) -{ - /* erase the old error information */ - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); - - /* add it to software queue and then send it to the controller */ - addQ(&h->reqQ, c); - h->Qdepth++; - if (h->Qdepth > h->maxQsinceinit) - h->maxQsinceinit = h->Qdepth; - - start_io(h); -} - -static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, - unsigned int msg_byte, unsigned int host_byte, - unsigned int driver_byte) -{ - /* inverse of macros in scsi.h */ - return (scsi_status_byte & 0xff) | - ((msg_byte & 0xff) << 8) | - ((host_byte & 0xff) << 16) | - ((driver_byte & 0xff) << 24); -} - -static inline int evaluate_target_status(ctlr_info_t *h, - CommandList_struct *cmd, int *retry_cmd) -{ - unsigned char sense_key; - unsigned char status_byte, msg_byte, host_byte, driver_byte; - int error_value; - - *retry_cmd = 0; - /* If we get in here, it means we got "target status", that is, scsi status */ - status_byte = cmd->err_info->ScsiStatus; - driver_byte = DRIVER_OK; - msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ - - if (blk_rq_is_passthrough(cmd->rq)) - host_byte = DID_PASSTHROUGH; - else - host_byte = DID_OK; - - error_value = make_status_bytes(status_byte, msg_byte, - host_byte, driver_byte); - - if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { - if (!blk_rq_is_passthrough(cmd->rq)) - dev_warn(&h->pdev->dev, "cmd %p " - "has SCSI Status 0x%x\n", - cmd, cmd->err_info->ScsiStatus); - return error_value; - } - - /* check the sense key */ - sense_key = 0xf & cmd->err_info->SenseInfo[2]; - /* no status or recovered error */ - if (((sense_key == 0x0) || (sense_key == 0x1)) && - !blk_rq_is_passthrough(cmd->rq)) - error_value = 0; - - if (check_for_unit_attention(h, cmd)) { - *retry_cmd = !blk_rq_is_passthrough(cmd->rq); - return 0; - } - - /* Not SG_IO or similar? */ - if (!blk_rq_is_passthrough(cmd->rq)) { - if (error_value != 0) - dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" - " sense key = 0x%x\n", cmd, sense_key); - return error_value; - } - - scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen; - return error_value; -} - -/* checks the status of the job and calls complete buffers to mark all - * buffers for the completed job. Note that this function does not need - * to hold the hba/queue lock. - */ -static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, - int timeout) -{ - int retry_cmd = 0; - struct request *rq = cmd->rq; - struct scsi_request *sreq = scsi_req(rq); - - sreq->result = 0; - - if (timeout) - sreq->result = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); - - if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ - goto after_error_processing; - - switch (cmd->err_info->CommandStatus) { - case CMD_TARGET_STATUS: - sreq->result = evaluate_target_status(h, cmd, &retry_cmd); - break; - case CMD_DATA_UNDERRUN: - if (!blk_rq_is_passthrough(cmd->rq)) { - dev_warn(&h->pdev->dev, "cmd %p has" - " completed with data underrun " - "reported\n", cmd); - } - break; - case CMD_DATA_OVERRUN: - if (!blk_rq_is_passthrough(cmd->rq)) - dev_warn(&h->pdev->dev, "cciss: cmd %p has" - " completed with data overrun " - "reported\n", cmd); - break; - case CMD_INVALID: - dev_warn(&h->pdev->dev, "cciss: cmd %p is " - "reported invalid\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_PROTOCOL_ERR: - dev_warn(&h->pdev->dev, "cciss: cmd %p has " - "protocol error\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_HARDWARE_ERR: - dev_warn(&h->pdev->dev, "cciss: cmd %p had " - " hardware error\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_CONNECTION_LOST: - dev_warn(&h->pdev->dev, "cciss: cmd %p had " - "connection lost\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_ABORTED: - dev_warn(&h->pdev->dev, "cciss: cmd %p was " - "aborted\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ABORT); - break; - case CMD_ABORT_FAILED: - dev_warn(&h->pdev->dev, "cciss: cmd %p reports " - "abort failed\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_UNSOLICITED_ABORT: - dev_warn(&h->pdev->dev, "cciss%d: unsolicited " - "abort %p\n", h->ctlr, cmd); - if (cmd->retry_count < MAX_CMD_RETRIES) { - retry_cmd = 1; - dev_warn(&h->pdev->dev, "retrying %p\n", cmd); - cmd->retry_count++; - } else - dev_warn(&h->pdev->dev, - "%p retried too many times\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ABORT); - break; - case CMD_TIMEOUT: - dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - case CMD_UNABORTABLE: - dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - break; - default: - dev_warn(&h->pdev->dev, "cmd %p returned " - "unknown status %x\n", cmd, - cmd->err_info->CommandStatus); - sreq->result = make_status_bytes(SAM_STAT_GOOD, - cmd->err_info->CommandStatus, DRIVER_OK, - blk_rq_is_passthrough(cmd->rq) ? - DID_PASSTHROUGH : DID_ERROR); - } - -after_error_processing: - - /* We need to return this command */ - if (retry_cmd) { - resend_cciss_cmd(h, cmd); - return; - } - cmd->rq->completion_data = cmd; - blk_complete_request(cmd->rq); -} - -static inline u32 cciss_tag_contains_index(u32 tag) -{ -#define DIRECT_LOOKUP_BIT 0x10 - return tag & DIRECT_LOOKUP_BIT; -} - -static inline u32 cciss_tag_to_index(u32 tag) -{ -#define DIRECT_LOOKUP_SHIFT 5 - return tag >> DIRECT_LOOKUP_SHIFT; -} - -static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) -{ -#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) -#define CCISS_SIMPLE_ERROR_BITS 0x03 - if (likely(h->transMethod & CFGTBL_Trans_Performant)) - return tag & ~CCISS_PERF_ERROR_BITS; - return tag & ~CCISS_SIMPLE_ERROR_BITS; -} - -static inline void cciss_mark_tag_indexed(u32 *tag) -{ - *tag |= DIRECT_LOOKUP_BIT; -} - -static inline void cciss_set_tag_index(u32 *tag, u32 index) -{ - *tag |= (index << DIRECT_LOOKUP_SHIFT); -} - -/* - * Get a request and submit it to the controller. - */ -static void do_cciss_request(struct request_queue *q) -{ - ctlr_info_t *h = q->queuedata; - CommandList_struct *c; - sector_t start_blk; - int seg; - struct request *creq; - u64bit temp64; - struct scatterlist *tmp_sg; - SGDescriptor_struct *curr_sg; - drive_info_struct *drv; - int i, dir; - int sg_index = 0; - int chained = 0; - - queue: - creq = blk_peek_request(q); - if (!creq) - goto startio; - - BUG_ON(creq->nr_phys_segments > h->maxsgentries); - - c = cmd_alloc(h); - if (!c) - goto full; - - blk_start_request(creq); - - tmp_sg = h->scatter_list[c->cmdindex]; - spin_unlock_irq(q->queue_lock); - - c->cmd_type = CMD_RWREQ; - c->rq = creq; - - /* fill in the request */ - drv = creq->rq_disk->private_data; - c->Header.ReplyQueue = 0; /* unused in simple mode */ - /* got command from pool, so use the command block index instead */ - /* for direct lookups. */ - /* The first 2 bits are reserved for controller error reporting. */ - cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); - cciss_mark_tag_indexed(&c->Header.Tag.lower); - memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); - c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ - c->Request.Type.Type = TYPE_CMD; /* It is a command. */ - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = - (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; - c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = - (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; - start_blk = blk_rq_pos(creq); - dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", - (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); - sg_init_table(tmp_sg, h->maxsgentries); - seg = blk_rq_map_sg(q, creq, tmp_sg); - - /* get the DMA records for the setup */ - if (c->Request.Type.Direction == XFER_READ) - dir = PCI_DMA_FROMDEVICE; - else - dir = PCI_DMA_TODEVICE; - - curr_sg = c->SG; - sg_index = 0; - chained = 0; - - for (i = 0; i < seg; i++) { - if (((sg_index+1) == (h->max_cmd_sgentries)) && - !chained && ((seg - i) > 1)) { - /* Point to next chain block. */ - curr_sg = h->cmd_sg_list[c->cmdindex]; - sg_index = 0; - chained = 1; - } - curr_sg[sg_index].Len = tmp_sg[i].length; - temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), - tmp_sg[i].offset, - tmp_sg[i].length, dir); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - dev_warn(&h->pdev->dev, - "%s: error mapping page for DMA\n", __func__); - scsi_req(creq)->result = - make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK, - DID_SOFT_ERROR); - cmd_free(h, c); - return; - } - curr_sg[sg_index].Addr.lower = temp64.val32.lower; - curr_sg[sg_index].Addr.upper = temp64.val32.upper; - curr_sg[sg_index].Ext = 0; /* we are not chaining */ - ++sg_index; - } - if (chained) { - if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], - (seg - (h->max_cmd_sgentries - 1)) * - sizeof(SGDescriptor_struct))) { - scsi_req(creq)->result = - make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK, - DID_SOFT_ERROR); - cmd_free(h, c); - return; - } - } - - /* track how many SG entries we are using */ - if (seg > h->maxSG) - h->maxSG = seg; - - dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " - "chained[%d]\n", - blk_rq_sectors(creq), seg, chained); - - c->Header.SGTotal = seg + chained; - if (seg <= h->max_cmd_sgentries) - c->Header.SGList = c->Header.SGTotal; - else - c->Header.SGList = h->max_cmd_sgentries; - set_performant_mode(h, c); - - switch (req_op(creq)) { - case REQ_OP_READ: - case REQ_OP_WRITE: - if(h->cciss_read == CCISS_READ_10) { - c->Request.CDB[1] = 0; - c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ - c->Request.CDB[3] = (start_blk >> 16) & 0xff; - c->Request.CDB[4] = (start_blk >> 8) & 0xff; - c->Request.CDB[5] = start_blk & 0xff; - c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ - c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; - c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; - c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; - } else { - u32 upper32 = upper_32_bits(start_blk); - - c->Request.CDBLen = 16; - c->Request.CDB[1]= 0; - c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ - c->Request.CDB[3]= (upper32 >> 16) & 0xff; - c->Request.CDB[4]= (upper32 >> 8) & 0xff; - c->Request.CDB[5]= upper32 & 0xff; - c->Request.CDB[6]= (start_blk >> 24) & 0xff; - c->Request.CDB[7]= (start_blk >> 16) & 0xff; - c->Request.CDB[8]= (start_blk >> 8) & 0xff; - c->Request.CDB[9]= start_blk & 0xff; - c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; - c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; - c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; - c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; - c->Request.CDB[14] = c->Request.CDB[15] = 0; - } - break; - case REQ_OP_SCSI_IN: - case REQ_OP_SCSI_OUT: - c->Request.CDBLen = scsi_req(creq)->cmd_len; - memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB); - scsi_req(creq)->sense = c->err_info->SenseInfo; - break; - default: - dev_warn(&h->pdev->dev, "bad request type %d\n", - creq->cmd_flags); - BUG(); - } - - spin_lock_irq(q->queue_lock); - - addQ(&h->reqQ, c); - h->Qdepth++; - if (h->Qdepth > h->maxQsinceinit) - h->maxQsinceinit = h->Qdepth; - - goto queue; -full: - blk_stop_queue(q); -startio: - /* We will already have the driver lock here so not need - * to lock it. - */ - start_io(h); -} - -static inline unsigned long get_next_completion(ctlr_info_t *h) -{ - return h->access.command_completed(h); -} - -static inline int interrupt_pending(ctlr_info_t *h) -{ - return h->access.intr_pending(h); -} - -static inline long interrupt_not_for_us(ctlr_info_t *h) -{ - return ((h->access.intr_pending(h) == 0) || - (h->interrupts_enabled == 0)); -} - -static inline int bad_tag(ctlr_info_t *h, u32 tag_index, - u32 raw_tag) -{ - if (unlikely(tag_index >= h->nr_cmds)) { - dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); - return 1; - } - return 0; -} - -static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, - u32 raw_tag) -{ - removeQ(c); - if (likely(c->cmd_type == CMD_RWREQ)) - complete_command(h, c, 0); - else if (c->cmd_type == CMD_IOCTL_PEND) - complete(c->waiting); -#ifdef CONFIG_CISS_SCSI_TAPE - else if (c->cmd_type == CMD_SCSI) - complete_scsi_command(c, 0, raw_tag); -#endif -} - -static inline u32 next_command(ctlr_info_t *h) -{ - u32 a; - - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) - return h->access.command_completed(h); - - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ - (h->reply_pool_head)++; - h->commands_outstanding--; - } else { - a = FIFO_EMPTY; - } - /* Check for wraparound */ - if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { - h->reply_pool_head = h->reply_pool; - h->reply_pool_wraparound ^= 1; - } - return a; -} - -/* process completion of an indexed ("direct lookup") command */ -static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) -{ - u32 tag_index; - CommandList_struct *c; - - tag_index = cciss_tag_to_index(raw_tag); - if (bad_tag(h, tag_index, raw_tag)) - return next_command(h); - c = h->cmd_pool + tag_index; - finish_cmd(h, c, raw_tag); - return next_command(h); -} - -/* process completion of a non-indexed command */ -static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) -{ - CommandList_struct *c = NULL; - __u32 busaddr_masked, tag_masked; - - tag_masked = cciss_tag_discard_error_bits(h, raw_tag); - list_for_each_entry(c, &h->cmpQ, list) { - busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); - if (busaddr_masked == tag_masked) { - finish_cmd(h, c, raw_tag); - return next_command(h); - } - } - bad_tag(h, h->nr_cmds + 1, raw_tag); - return next_command(h); -} - -/* Some controllers, like p400, will give us one interrupt - * after a soft reset, even if we turned interrupts off. - * Only need to check for this in the cciss_xxx_discard_completions - * functions. - */ -static int ignore_bogus_interrupt(ctlr_info_t *h) -{ - if (likely(!reset_devices)) - return 0; - - if (likely(h->interrupts_enabled)) - return 0; - - dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " - "(known firmware bug.) Ignoring.\n"); - - return 1; -} - -static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) -{ - ctlr_info_t *h = dev_id; - unsigned long flags; - u32 raw_tag; - - if (ignore_bogus_interrupt(h)) - return IRQ_NONE; - - if (interrupt_not_for_us(h)) - return IRQ_NONE; - spin_lock_irqsave(&h->lock, flags); - while (interrupt_pending(h)) { - raw_tag = get_next_completion(h); - while (raw_tag != FIFO_EMPTY) - raw_tag = next_command(h); - } - spin_unlock_irqrestore(&h->lock, flags); - return IRQ_HANDLED; -} - -static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) -{ - ctlr_info_t *h = dev_id; - unsigned long flags; - u32 raw_tag; - - if (ignore_bogus_interrupt(h)) - return IRQ_NONE; - - spin_lock_irqsave(&h->lock, flags); - raw_tag = get_next_completion(h); - while (raw_tag != FIFO_EMPTY) - raw_tag = next_command(h); - spin_unlock_irqrestore(&h->lock, flags); - return IRQ_HANDLED; -} - -static irqreturn_t do_cciss_intx(int irq, void *dev_id) -{ - ctlr_info_t *h = dev_id; - unsigned long flags; - u32 raw_tag; - - if (interrupt_not_for_us(h)) - return IRQ_NONE; - spin_lock_irqsave(&h->lock, flags); - while (interrupt_pending(h)) { - raw_tag = get_next_completion(h); - while (raw_tag != FIFO_EMPTY) { - if (cciss_tag_contains_index(raw_tag)) - raw_tag = process_indexed_cmd(h, raw_tag); - else - raw_tag = process_nonindexed_cmd(h, raw_tag); - } - } - spin_unlock_irqrestore(&h->lock, flags); - return IRQ_HANDLED; -} - -/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never - * check the interrupt pending register because it is not set. - */ -static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) -{ - ctlr_info_t *h = dev_id; - unsigned long flags; - u32 raw_tag; - - spin_lock_irqsave(&h->lock, flags); - raw_tag = get_next_completion(h); - while (raw_tag != FIFO_EMPTY) { - if (cciss_tag_contains_index(raw_tag)) - raw_tag = process_indexed_cmd(h, raw_tag); - else - raw_tag = process_nonindexed_cmd(h, raw_tag); - } - spin_unlock_irqrestore(&h->lock, flags); - return IRQ_HANDLED; -} - -/** - * add_to_scan_list() - add controller to rescan queue - * @h: Pointer to the controller. - * - * Adds the controller to the rescan queue if not already on the queue. - * - * returns 1 if added to the queue, 0 if skipped (could be on the - * queue already, or the controller could be initializing or shutting - * down). - **/ -static int add_to_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h; - int found = 0; - int ret = 0; - - if (h->busy_initializing) - return 0; - - if (!mutex_trylock(&h->busy_shutting_down)) - return 0; - - mutex_lock(&scan_mutex); - list_for_each_entry(test_h, &scan_q, scan_list) { - if (test_h == h) { - found = 1; - break; - } - } - if (!found && !h->busy_scanning) { - reinit_completion(&h->scan_wait); - list_add_tail(&h->scan_list, &scan_q); - ret = 1; - } - mutex_unlock(&scan_mutex); - mutex_unlock(&h->busy_shutting_down); - - return ret; -} - -/** - * remove_from_scan_list() - remove controller from rescan queue - * @h: Pointer to the controller. - * - * Removes the controller from the rescan queue if present. Blocks if - * the controller is currently conducting a rescan. The controller - * can be in one of three states: - * 1. Doesn't need a scan - * 2. On the scan list, but not scanning yet (we remove it) - * 3. Busy scanning (and not on the list). In this case we want to wait for - * the scan to complete to make sure the scanning thread for this - * controller is completely idle. - **/ -static void remove_from_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h, *tmp_h; - - mutex_lock(&scan_mutex); - list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { - if (test_h == h) { /* state 2. */ - list_del(&h->scan_list); - complete_all(&h->scan_wait); - mutex_unlock(&scan_mutex); - return; - } - } - if (h->busy_scanning) { /* state 3. */ - mutex_unlock(&scan_mutex); - wait_for_completion(&h->scan_wait); - } else { /* state 1, nothing to do. */ - mutex_unlock(&scan_mutex); - } -} - -/** - * scan_thread() - kernel thread used to rescan controllers - * @data: Ignored. - * - * A kernel thread used scan for drive topology changes on - * controllers. The thread processes only one controller at a time - * using a queue. Controllers are added to the queue using - * add_to_scan_list() and removed from the queue either after done - * processing or using remove_from_scan_list(). - * - * returns 0. - **/ -static int scan_thread(void *data) -{ - struct ctlr_info *h; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - if (kthread_should_stop()) - break; - - while (1) { - mutex_lock(&scan_mutex); - if (list_empty(&scan_q)) { - mutex_unlock(&scan_mutex); - break; - } - - h = list_entry(scan_q.next, - struct ctlr_info, - scan_list); - list_del(&h->scan_list); - h->busy_scanning = 1; - mutex_unlock(&scan_mutex); - - rebuild_lun_table(h, 0, 0); - complete_all(&h->scan_wait); - mutex_lock(&scan_mutex); - h->busy_scanning = 0; - mutex_unlock(&scan_mutex); - } - } - - return 0; -} - -static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) -{ - if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) - return 0; - - switch (c->err_info->SenseInfo[12]) { - case STATE_CHANGED: - dev_warn(&h->pdev->dev, "a state change " - "detected, command retried\n"); - return 1; - break; - case LUN_FAILED: - dev_warn(&h->pdev->dev, "LUN failure " - "detected, action required\n"); - return 1; - break; - case REPORT_LUNS_CHANGED: - dev_warn(&h->pdev->dev, "report LUN data changed\n"); - /* - * Here, we could call add_to_scan_list and wake up the scan thread, - * except that it's quite likely that we will get more than one - * REPORT_LUNS_CHANGED condition in quick succession, which means - * that those which occur after the first one will likely happen - * *during* the scan_thread's rescan. And the rescan code is not - * robust enough to restart in the middle, undoing what it has already - * done, and it's not clear that it's even possible to do this, since - * part of what it does is notify the block layer, which starts - * doing it's own i/o to read partition tables and so on, and the - * driver doesn't have visibility to know what might need undoing. - * In any event, if possible, it is horribly complicated to get right - * so we just don't do it for now. - * - * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. - */ - return 1; - break; - case POWER_OR_RESET: - dev_warn(&h->pdev->dev, - "a power on or device reset detected\n"); - return 1; - break; - case UNIT_ATTENTION_CLEARED: - dev_warn(&h->pdev->dev, - "unit attention cleared by another initiator\n"); - return 1; - break; - default: - dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); - return 1; - } -} - -/* - * We cannot read the structure directly, for portability we must use - * the io functions. - * This is for debug only. - */ -static void print_cfg_table(ctlr_info_t *h) -{ - int i; - char temp_name[17]; - CfgTable_struct *tb = h->cfgtable; - - dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); - dev_dbg(&h->pdev->dev, "------------------------------------\n"); - for (i = 0; i < 4; i++) - temp_name[i] = readb(&(tb->Signature[i])); - temp_name[4] = '\0'; - dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); - dev_dbg(&h->pdev->dev, " Spec Number = %d\n", - readl(&(tb->SpecValence))); - dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", - readl(&(tb->TransportSupport))); - dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", - readl(&(tb->TransportActive))); - dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", - readl(&(tb->HostWrite.TransportRequest))); - dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", - readl(&(tb->HostWrite.CoalIntDelay))); - dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", - readl(&(tb->HostWrite.CoalIntCount))); - dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%x\n", - readl(&(tb->CmdsOutMax))); - dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", - readl(&(tb->BusTypes))); - for (i = 0; i < 16; i++) - temp_name[i] = readb(&(tb->ServerName[i])); - temp_name[16] = '\0'; - dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); - dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", - readl(&(tb->HeartBeat))); -} - -static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) -{ - int i, offset, mem_type, bar_type; - if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ - return 0; - offset = 0; - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; - if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) - offset += 4; - else { - mem_type = pci_resource_flags(pdev, i) & - PCI_BASE_ADDRESS_MEM_TYPE_MASK; - switch (mem_type) { - case PCI_BASE_ADDRESS_MEM_TYPE_32: - case PCI_BASE_ADDRESS_MEM_TYPE_1M: - offset += 4; /* 32 bit */ - break; - case PCI_BASE_ADDRESS_MEM_TYPE_64: - offset += 8; - break; - default: /* reserved in PCI 2.2 */ - dev_warn(&pdev->dev, - "Base address is invalid\n"); - return -1; - break; - } - } - if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) - return i + 1; - } - return -1; -} - -/* Fill in bucket_map[], given nsgs (the max number of - * scatter gather elements supported) and bucket[], - * which is an array of 8 integers. The bucket[] array - * contains 8 different DMA transfer sizes (in 16 - * byte increments) which the controller uses to fetch - * commands. This function fills in bucket_map[], which - * maps a given number of scatter gather elements to one of - * the 8 DMA transfer sizes. The point of it is to allow the - * controller to only do as much DMA as needed to fetch the - * command, with the DMA transfer size encoded in the lower - * bits of the command address. - */ -static void calc_bucket_map(int bucket[], int num_buckets, - int nsgs, int *bucket_map) -{ - int i, j, b, size; - - /* even a command with 0 SGs requires 4 blocks */ -#define MINIMUM_TRANSFER_BLOCKS 4 -#define NUM_BUCKETS 8 - /* Note, bucket_map must have nsgs+1 entries. */ - for (i = 0; i <= nsgs; i++) { - /* Compute size of a command with i SG entries */ - size = i + MINIMUM_TRANSFER_BLOCKS; - b = num_buckets; /* Assume the biggest bucket */ - /* Find the bucket that is just big enough */ - for (j = 0; j < 8; j++) { - if (bucket[j] >= size) { - b = j; - break; - } - } - /* for a command with i SG entries, use bucket b. */ - bucket_map[i] = b; - } -} - -static void cciss_wait_for_mode_change_ack(ctlr_info_t *h) -{ - int i; - - /* under certain very rare conditions, this can take awhile. - * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right - * as we enter this code.) */ - for (i = 0; i < MAX_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) - break; - usleep_range(10000, 20000); - } -} - -static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags) -{ - /* This is a bit complicated. There are 8 registers on - * the controller which we write to to tell it 8 different - * sizes of commands which there may be. It's a way of - * reducing the DMA done to fetch each command. Encoded into - * each command's tag are 3 bits which communicate to the controller - * which of the eight sizes that command fits within. The size of - * each command depends on how many scatter gather entries there are. - * Each SG entry requires 16 bytes. The eight registers are programmed - * with the number of 16-byte blocks a command of that size requires. - * The smallest command possible requires 5 such 16 byte blocks. - * the largest command possible requires MAXSGENTRIES + 4 16-byte - * blocks. Note, this only extends to the SG entries contained - * within the command block, and does not extend to chained blocks - * of SG elements. bft[] contains the eight values we write to - * the registers. They are not evenly distributed, but have more - * sizes for small commands, and fewer sizes for larger commands. - */ - __u32 trans_offset; - int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; - /* - * 5 = 1 s/g entry or 4k - * 6 = 2 s/g entry or 8k - * 8 = 4 s/g entry or 16k - * 10 = 6 s/g entry or 24k - */ - unsigned long register_value; - BUILD_BUG_ON(28 > MAXSGENTRIES + 4); - - h->reply_pool_wraparound = 1; /* spec: init to 1 */ - - /* Controller spec: zero out this buffer. */ - memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); - h->reply_pool_head = h->reply_pool; - - trans_offset = readl(&(h->cfgtable->TransMethodOffset)); - calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, - h->blockFetchTable); - writel(bft[0], &h->transtable->BlockFetch0); - writel(bft[1], &h->transtable->BlockFetch1); - writel(bft[2], &h->transtable->BlockFetch2); - writel(bft[3], &h->transtable->BlockFetch3); - writel(bft[4], &h->transtable->BlockFetch4); - writel(bft[5], &h->transtable->BlockFetch5); - writel(bft[6], &h->transtable->BlockFetch6); - writel(bft[7], &h->transtable->BlockFetch7); - - /* size of controller ring buffer */ - writel(h->max_commands, &h->transtable->RepQSize); - writel(1, &h->transtable->RepQCount); - writel(0, &h->transtable->RepQCtrAddrLow32); - writel(0, &h->transtable->RepQCtrAddrHigh32); - writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); - writel(0, &h->transtable->RepQAddr0High32); - writel(CFGTBL_Trans_Performant | use_short_tags, - &(h->cfgtable->HostWrite.TransportRequest)); - - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - cciss_wait_for_mode_change_ack(h); - register_value = readl(&(h->cfgtable->TransportActive)); - if (!(register_value & CFGTBL_Trans_Performant)) - dev_warn(&h->pdev->dev, "cciss: unable to get board into" - " performant mode\n"); -} - -static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) -{ - __u32 trans_support; - - if (cciss_simple_mode) - return; - - dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); - /* Attempt to put controller into performant mode if supported */ - /* Does board support performant mode? */ - trans_support = readl(&(h->cfgtable->TransportSupport)); - if (!(trans_support & PERFORMANT_MODE)) - return; - - dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); - /* Performant mode demands commands on a 32 byte boundary - * pci_alloc_consistent aligns on page boundarys already. - * Just need to check if divisible by 32 - */ - if ((sizeof(CommandList_struct) % 32) != 0) { - dev_warn(&h->pdev->dev, "%s %d %s\n", - "cciss info: command size[", - (int)sizeof(CommandList_struct), - "] not divisible by 32, no performant mode..\n"); - return; - } - - /* Performant mode ring buffer and supporting data structures */ - h->reply_pool = (__u64 *)pci_alloc_consistent( - h->pdev, h->max_commands * sizeof(__u64), - &(h->reply_pool_dhandle)); - - /* Need a block fetch table for performant mode */ - h->blockFetchTable = kmalloc(((h->maxsgentries+1) * - sizeof(__u32)), GFP_KERNEL); - - if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) - goto clean_up; - - cciss_enter_performant_mode(h, - trans_support & CFGTBL_Trans_use_short_tags); - - /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; - - return; -clean_up: - kfree(h->blockFetchTable); - if (h->reply_pool) - pci_free_consistent(h->pdev, - h->max_commands * sizeof(__u64), - h->reply_pool, - h->reply_pool_dhandle); - return; - -} /* cciss_put_controller_into_performant_mode */ - -/* If MSI/MSI-X is supported by the kernel we will try to enable it on - * controllers that are capable. If not, we use IO-APIC mode. - */ - -static void cciss_interrupt_mode(ctlr_info_t *h) -{ - int ret; - - /* Some boards advertise MSI but don't really support it */ - if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || - (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) - goto default_int_mode; - - ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX); - if (ret >= 0) { - h->intr[0] = pci_irq_vector(h->pdev, 0); - h->intr[1] = pci_irq_vector(h->pdev, 1); - h->intr[2] = pci_irq_vector(h->pdev, 2); - h->intr[3] = pci_irq_vector(h->pdev, 3); - return; - } - - ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI); - -default_int_mode: - /* if we get here we're going to use the default interrupt mode */ - h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0); - return; -} - -static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) -{ - int i; - u32 subsystem_vendor_id, subsystem_device_id; - - subsystem_vendor_id = pdev->subsystem_vendor; - subsystem_device_id = pdev->subsystem_device; - *board_id = ((subsystem_device_id << 16) & 0xffff0000) | - subsystem_vendor_id; - - for (i = 0; i < ARRAY_SIZE(products); i++) { - /* Stand aside for hpsa driver on request */ - if (cciss_allow_hpsa) - return -ENODEV; - if (*board_id == products[i].board_id) - return i; - } - dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", - *board_id); - return -ENODEV; -} - -static inline bool cciss_board_disabled(ctlr_info_t *h) -{ - u16 command; - - (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); - return ((command & PCI_COMMAND_MEMORY) == 0); -} - -static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, - unsigned long *memory_bar) -{ - int i; - - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) - if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { - /* addressing mode bits already removed */ - *memory_bar = pci_resource_start(pdev, i); - dev_dbg(&pdev->dev, "memory BAR = %lx\n", - *memory_bar); - return 0; - } - dev_warn(&pdev->dev, "no memory BAR found\n"); - return -ENODEV; -} - -static int cciss_wait_for_board_state(struct pci_dev *pdev, - void __iomem *vaddr, int wait_for_ready) -#define BOARD_READY 1 -#define BOARD_NOT_READY 0 -{ - int i, iterations; - u32 scratchpad; - - if (wait_for_ready) - iterations = CCISS_BOARD_READY_ITERATIONS; - else - iterations = CCISS_BOARD_NOT_READY_ITERATIONS; - - for (i = 0; i < iterations; i++) { - scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); - if (wait_for_ready) { - if (scratchpad == CCISS_FIRMWARE_READY) - return 0; - } else { - if (scratchpad != CCISS_FIRMWARE_READY) - return 0; - } - msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); - } - dev_warn(&pdev->dev, "board not ready, timed out.\n"); - return -ENODEV; -} - -static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, - u32 *cfg_base_addr, u64 *cfg_base_addr_index, - u64 *cfg_offset) -{ - *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); - *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); - *cfg_base_addr &= (u32) 0x0000ffff; - *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); - if (*cfg_base_addr_index == -1) { - dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " - "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); - return -ENODEV; - } - return 0; -} - -static int cciss_find_cfgtables(ctlr_info_t *h) -{ - u64 cfg_offset; - u32 cfg_base_addr; - u64 cfg_base_addr_index; - u32 trans_offset; - int rc; - - rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, - &cfg_base_addr_index, &cfg_offset); - if (rc) - return rc; - h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, - cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); - if (!h->cfgtable) - return -ENOMEM; - rc = write_driver_ver_to_cfgtable(h->cfgtable); - if (rc) - return rc; - /* Find performant mode table. */ - trans_offset = readl(&h->cfgtable->TransMethodOffset); - h->transtable = remap_pci_mem(pci_resource_start(h->pdev, - cfg_base_addr_index)+cfg_offset+trans_offset, - sizeof(*h->transtable)); - if (!h->transtable) - return -ENOMEM; - return 0; -} - -static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h) -{ - h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); - - /* Limit commands in memory limited kdump scenario. */ - if (reset_devices && h->max_commands > 32) - h->max_commands = 32; - - if (h->max_commands < 16) { - dev_warn(&h->pdev->dev, "Controller reports " - "max supported commands of %d, an obvious lie. " - "Using 16. Ensure that firmware is up to date.\n", - h->max_commands); - h->max_commands = 16; - } -} - -/* Interrogate the hardware for some limits: - * max commands, max SG elements without chaining, and with chaining, - * SG chain block size, etc. - */ -static void cciss_find_board_params(ctlr_info_t *h) -{ - cciss_get_max_perf_mode_cmds(h); - h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; - h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); - /* - * The P600 may exhibit poor performnace under some workloads - * if we use the value in the configuration table. Limit this - * controller to MAXSGENTRIES (32) instead. - */ - if (h->board_id == 0x3225103C) - h->maxsgentries = MAXSGENTRIES; - /* - * Limit in-command s/g elements to 32 save dma'able memory. - * Howvever spec says if 0, use 31 - */ - h->max_cmd_sgentries = 31; - if (h->maxsgentries > 512) { - h->max_cmd_sgentries = 32; - h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; - h->maxsgentries--; /* save one for chain pointer */ - } else { - h->maxsgentries = 31; /* default to traditional values */ - h->chainsize = 0; - } -} - -static inline bool CISS_signature_present(ctlr_info_t *h) -{ - if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { - dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); - return false; - } - return true; -} - -/* Need to enable prefetch in the SCSI core for 6400 in x86 */ -static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) -{ -#ifdef CONFIG_X86 - u32 prefetch; - - prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); - prefetch |= 0x100; - writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); -#endif -} - -/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result - * in a prefetch beyond physical memory. - */ -static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) -{ - u32 dma_prefetch; - __u32 dma_refetch; - - if (h->board_id != 0x3225103C) - return; - dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); - dma_prefetch |= 0x8000; - writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); - pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); - dma_refetch |= 0x1; - pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); -} - -static int cciss_pci_init(ctlr_info_t *h) -{ - int prod_index, err; - - prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); - if (prod_index < 0) - return -ENODEV; - h->product_name = products[prod_index].product_name; - h->access = *(products[prod_index].access); - - if (cciss_board_disabled(h)) { - dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); - return -ENODEV; - } - - pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | - PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); - - err = pci_enable_device(h->pdev); - if (err) { - dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); - return err; - } - - err = pci_request_regions(h->pdev, "cciss"); - if (err) { - dev_warn(&h->pdev->dev, - "Cannot obtain PCI resources, aborting\n"); - return err; - } - - dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); - dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); - -/* If the kernel supports MSI/MSI-X we will try to enable that functionality, - * else we use the IO-APIC interrupt assigned to us by system ROM. - */ - cciss_interrupt_mode(h); - err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); - if (err) - goto err_out_free_res; - h->vaddr = remap_pci_mem(h->paddr, 0x250); - if (!h->vaddr) { - err = -ENOMEM; - goto err_out_free_res; - } - err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); - if (err) - goto err_out_free_res; - err = cciss_find_cfgtables(h); - if (err) - goto err_out_free_res; - print_cfg_table(h); - cciss_find_board_params(h); - - if (!CISS_signature_present(h)) { - err = -ENODEV; - goto err_out_free_res; - } - cciss_enable_scsi_prefetch(h); - cciss_p600_dma_prefetch_quirk(h); - err = cciss_enter_simple_mode(h); - if (err) - goto err_out_free_res; - cciss_put_controller_into_performant_mode(h); - return 0; - -err_out_free_res: - /* - * Deliberately omit pci_disable_device(): it does something nasty to - * Smart Array controllers that pci_enable_device does not undo - */ - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - if (h->vaddr) - iounmap(h->vaddr); - pci_release_regions(h->pdev); - return err; -} - -/* Function to find the first free pointer into our hba[] array - * Returns -1 if no free entries are left. - */ -static int alloc_cciss_hba(struct pci_dev *pdev) -{ - int i; - - for (i = 0; i < MAX_CTLR; i++) { - if (!hba[i]) { - ctlr_info_t *h; - - h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); - if (!h) - goto Enomem; - hba[i] = h; - return i; - } - } - dev_warn(&pdev->dev, "This driver supports a maximum" - " of %d controllers.\n", MAX_CTLR); - return -1; -Enomem: - dev_warn(&pdev->dev, "out of memory.\n"); - return -1; -} - -static void free_hba(ctlr_info_t *h) -{ - int i; - - hba[h->ctlr] = NULL; - for (i = 0; i < h->highest_lun + 1; i++) - if (h->gendisk[i] != NULL) - put_disk(h->gendisk[i]); - kfree(h); -} - -/* Send a message CDB to the firmware. */ -static int cciss_message(struct pci_dev *pdev, unsigned char opcode, - unsigned char type) -{ - typedef struct { - CommandListHeader_struct CommandHeader; - RequestBlock_struct Request; - ErrDescriptor_struct ErrorDescriptor; - } Command; - static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); - Command *cmd; - dma_addr_t paddr64; - uint32_t paddr32, tag; - void __iomem *vaddr; - int i, err; - - vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); - if (vaddr == NULL) - return -ENOMEM; - - /* The Inbound Post Queue only accepts 32-bit physical addresses for the - CCISS commands, so they must be allocated from the lower 4GiB of - memory. */ - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - iounmap(vaddr); - return -ENOMEM; - } - - cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); - if (cmd == NULL) { - iounmap(vaddr); - return -ENOMEM; - } - - /* This must fit, because of the 32-bit consistent DMA mask. Also, - although there's no guarantee, we assume that the address is at - least 4-byte aligned (most likely, it's page-aligned). */ - paddr32 = paddr64; - - cmd->CommandHeader.ReplyQueue = 0; - cmd->CommandHeader.SGList = 0; - cmd->CommandHeader.SGTotal = 0; - cmd->CommandHeader.Tag.lower = paddr32; - cmd->CommandHeader.Tag.upper = 0; - memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); - - cmd->Request.CDBLen = 16; - cmd->Request.Type.Type = TYPE_MSG; - cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; - cmd->Request.Type.Direction = XFER_NONE; - cmd->Request.Timeout = 0; /* Don't time out */ - cmd->Request.CDB[0] = opcode; - cmd->Request.CDB[1] = type; - memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ - - cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); - cmd->ErrorDescriptor.Addr.upper = 0; - cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); - - writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); - - for (i = 0; i < 10; i++) { - tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); - if ((tag & ~3) == paddr32) - break; - msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); - } - - iounmap(vaddr); - - /* we leak the DMA buffer here ... no choice since the controller could - still complete the command. */ - if (i == 10) { - dev_err(&pdev->dev, - "controller message %02x:%02x timed out\n", - opcode, type); - return -ETIMEDOUT; - } - - pci_free_consistent(pdev, cmd_sz, cmd, paddr64); - - if (tag & 2) { - dev_err(&pdev->dev, "controller message %02x:%02x failed\n", - opcode, type); - return -EIO; - } - - dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", - opcode, type); - return 0; -} - -#define cciss_noop(p) cciss_message(p, 3, 0) - -static int cciss_controller_hard_reset(struct pci_dev *pdev, - void * __iomem vaddr, u32 use_doorbell) -{ - u16 pmcsr; - int pos; - - if (use_doorbell) { - /* For everything after the P600, the PCI power state method - * of resetting the controller doesn't work, so we have this - * other way using the doorbell register. - */ - dev_info(&pdev->dev, "using doorbell to reset controller\n"); - writel(use_doorbell, vaddr + SA5_DOORBELL); - } else { /* Try to do it the PCI power state way */ - - /* Quoting from the Open CISS Specification: "The Power - * Management Control/Status Register (CSR) controls the power - * state of the device. The normal operating state is D0, - * CSR=00h. The software off state is D3, CSR=03h. To reset - * the controller, place the interface device in D3 then to D0, - * this causes a secondary PCI reset which will reset the - * controller." */ - - pos = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pos == 0) { - dev_err(&pdev->dev, - "cciss_controller_hard_reset: " - "PCI PM not supported\n"); - return -ENODEV; - } - dev_info(&pdev->dev, "using PCI PM to reset controller\n"); - /* enter the D3hot power management state */ - pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= PCI_D3hot; - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); - - msleep(500); - - /* enter the D0 power management state */ - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= PCI_D0; - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); - - /* - * The P600 requires a small delay when changing states. - * Otherwise we may think the board did not reset and we bail. - * This for kdump only and is particular to the P600. - */ - msleep(500); - } - return 0; -} - -static void init_driver_version(char *driver_version, int len) -{ - memset(driver_version, 0, len); - strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); -} - -static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable) -{ - char *driver_version; - int i, size = sizeof(cfgtable->driver_version); - - driver_version = kmalloc(size, GFP_KERNEL); - if (!driver_version) - return -ENOMEM; - - init_driver_version(driver_version, size); - for (i = 0; i < size; i++) - writeb(driver_version[i], &cfgtable->driver_version[i]); - kfree(driver_version); - return 0; -} - -static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable, - unsigned char *driver_ver) -{ - int i; - - for (i = 0; i < sizeof(cfgtable->driver_version); i++) - driver_ver[i] = readb(&cfgtable->driver_version[i]); -} - -static int controller_reset_failed(CfgTable_struct __iomem *cfgtable) -{ - - char *driver_ver, *old_driver_ver; - int rc, size = sizeof(cfgtable->driver_version); - - old_driver_ver = kmalloc(2 * size, GFP_KERNEL); - if (!old_driver_ver) - return -ENOMEM; - driver_ver = old_driver_ver + size; - - /* After a reset, the 32 bytes of "driver version" in the cfgtable - * should have been changed, otherwise we know the reset failed. - */ - init_driver_version(old_driver_ver, size); - read_driver_ver_from_cfgtable(cfgtable, driver_ver); - rc = !memcmp(driver_ver, old_driver_ver, size); - kfree(old_driver_ver); - return rc; -} - -/* This does a hard reset of the controller using PCI power management - * states or using the doorbell register. */ -static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) -{ - u64 cfg_offset; - u32 cfg_base_addr; - u64 cfg_base_addr_index; - void __iomem *vaddr; - unsigned long paddr; - u32 misc_fw_support; - int rc; - CfgTable_struct __iomem *cfgtable; - u32 use_doorbell; - u32 board_id; - u16 command_register; - - /* For controllers as old a the p600, this is very nearly - * the same thing as - * - * pci_save_state(pci_dev); - * pci_set_power_state(pci_dev, PCI_D3hot); - * pci_set_power_state(pci_dev, PCI_D0); - * pci_restore_state(pci_dev); - * - * For controllers newer than the P600, the pci power state - * method of resetting doesn't work so we have another way - * using the doorbell register. - */ - - /* Exclude 640x boards. These are two pci devices in one slot - * which share a battery backed cache module. One controls the - * cache, the other accesses the cache through the one that controls - * it. If we reset the one controlling the cache, the other will - * likely not be happy. Just forbid resetting this conjoined mess. - */ - cciss_lookup_board_id(pdev, &board_id); - if (!ctlr_is_resettable(board_id)) { - dev_warn(&pdev->dev, "Controller not resettable\n"); - return -ENODEV; - } - - /* if controller is soft- but not hard resettable... */ - if (!ctlr_is_hard_resettable(board_id)) - return -ENOTSUPP; /* try soft reset later. */ - - /* Save the PCI command register */ - pci_read_config_word(pdev, 4, &command_register); - /* Turn the board off. This is so that later pci_restore_state() - * won't turn the board on before the rest of config space is ready. - */ - pci_disable_device(pdev); - pci_save_state(pdev); - - /* find the first memory BAR, so we can find the cfg table */ - rc = cciss_pci_find_memory_BAR(pdev, &paddr); - if (rc) - return rc; - vaddr = remap_pci_mem(paddr, 0x250); - if (!vaddr) - return -ENOMEM; - - /* find cfgtable in order to check if reset via doorbell is supported */ - rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, - &cfg_base_addr_index, &cfg_offset); - if (rc) - goto unmap_vaddr; - cfgtable = remap_pci_mem(pci_resource_start(pdev, - cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); - if (!cfgtable) { - rc = -ENOMEM; - goto unmap_vaddr; - } - rc = write_driver_ver_to_cfgtable(cfgtable); - if (rc) - goto unmap_vaddr; - - /* If reset via doorbell register is supported, use that. - * There are two such methods. Favor the newest method. - */ - misc_fw_support = readl(&cfgtable->misc_fw_support); - use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; - if (use_doorbell) { - use_doorbell = DOORBELL_CTLR_RESET2; - } else { - use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; - if (use_doorbell) { - dev_warn(&pdev->dev, "Controller claims that " - "'Bit 2 doorbell reset' is " - "supported, but not 'bit 5 doorbell reset'. " - "Firmware update is recommended.\n"); - rc = -ENOTSUPP; /* use the soft reset */ - goto unmap_cfgtable; - } - } - - rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); - if (rc) - goto unmap_cfgtable; - pci_restore_state(pdev); - rc = pci_enable_device(pdev); - if (rc) { - dev_warn(&pdev->dev, "failed to enable device.\n"); - goto unmap_cfgtable; - } - pci_write_config_word(pdev, 4, command_register); - - /* Some devices (notably the HP Smart Array 5i Controller) - need a little pause here */ - msleep(CCISS_POST_RESET_PAUSE_MSECS); - - /* Wait for board to become not ready, then ready. */ - dev_info(&pdev->dev, "Waiting for board to reset.\n"); - rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); - if (rc) { - dev_warn(&pdev->dev, "Failed waiting for board to hard reset." - " Will try soft reset.\n"); - rc = -ENOTSUPP; /* Not expected, but try soft reset later */ - goto unmap_cfgtable; - } - rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); - if (rc) { - dev_warn(&pdev->dev, - "failed waiting for board to become ready " - "after hard reset\n"); - goto unmap_cfgtable; - } - - rc = controller_reset_failed(vaddr); - if (rc < 0) - goto unmap_cfgtable; - if (rc) { - dev_warn(&pdev->dev, "Unable to successfully hard reset " - "controller. Will try soft reset.\n"); - rc = -ENOTSUPP; /* Not expected, but try soft reset later */ - } else { - dev_info(&pdev->dev, "Board ready after hard reset.\n"); - } - -unmap_cfgtable: - iounmap(cfgtable); - -unmap_vaddr: - iounmap(vaddr); - return rc; -} - -static int cciss_init_reset_devices(struct pci_dev *pdev) -{ - int rc, i; - - if (!reset_devices) - return 0; - - /* Reset the controller with a PCI power-cycle or via doorbell */ - rc = cciss_kdump_hard_reset_controller(pdev); - - /* -ENOTSUPP here means we cannot reset the controller - * but it's already (and still) up and running in - * "performant mode". Or, it might be 640x, which can't reset - * due to concerns about shared bbwc between 6402/6404 pair. - */ - if (rc == -ENOTSUPP) - return rc; /* just try to do the kdump anyhow. */ - if (rc) - return -ENODEV; - - /* Now try to get the controller to respond to a no-op */ - dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); - for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { - if (cciss_noop(pdev) == 0) - break; - else - dev_warn(&pdev->dev, "no-op failed%s\n", - (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? - "; re-trying" : "")); - msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); - } - return 0; -} - -static int cciss_allocate_cmd_pool(ctlr_info_t *h) -{ - h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) * - sizeof(unsigned long), GFP_KERNEL); - h->cmd_pool = pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(CommandList_struct), - &(h->cmd_pool_dhandle)); - h->errinfo_pool = pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(ErrorInfo_struct), - &(h->errinfo_pool_dhandle)); - if ((h->cmd_pool_bits == NULL) - || (h->cmd_pool == NULL) - || (h->errinfo_pool == NULL)) { - dev_err(&h->pdev->dev, "out of memory"); - return -ENOMEM; - } - return 0; -} - -static int cciss_allocate_scatterlists(ctlr_info_t *h) -{ - int i; - - /* zero it, so that on free we need not know how many were alloc'ed */ - h->scatter_list = kzalloc(h->max_commands * - sizeof(struct scatterlist *), GFP_KERNEL); - if (!h->scatter_list) - return -ENOMEM; - - for (i = 0; i < h->nr_cmds; i++) { - h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * - h->maxsgentries, GFP_KERNEL); - if (h->scatter_list[i] == NULL) { - dev_err(&h->pdev->dev, "could not allocate " - "s/g lists\n"); - return -ENOMEM; - } - } - return 0; -} - -static void cciss_free_scatterlists(ctlr_info_t *h) -{ - int i; - - if (h->scatter_list) { - for (i = 0; i < h->nr_cmds; i++) - kfree(h->scatter_list[i]); - kfree(h->scatter_list); - } -} - -static void cciss_free_cmd_pool(ctlr_info_t *h) -{ - kfree(h->cmd_pool_bits); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(CommandList_struct), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(ErrorInfo_struct), - h->errinfo_pool, h->errinfo_pool_dhandle); -} - -static int cciss_request_irq(ctlr_info_t *h, - irqreturn_t (*msixhandler)(int, void *), - irqreturn_t (*intxhandler)(int, void *)) -{ - if (h->pdev->msi_enabled || h->pdev->msix_enabled) { - if (!request_irq(h->intr[h->intr_mode], msixhandler, - 0, h->devname, h)) - return 0; - dev_err(&h->pdev->dev, "Unable to get msi irq %d" - " for %s\n", h->intr[h->intr_mode], - h->devname); - return -1; - } - - if (!request_irq(h->intr[h->intr_mode], intxhandler, - IRQF_SHARED, h->devname, h)) - return 0; - dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", - h->intr[h->intr_mode], h->devname); - return -1; -} - -static int cciss_kdump_soft_reset(ctlr_info_t *h) -{ - if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { - dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); - return -EIO; - } - - dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); - if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { - dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); - return -1; - } - - dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); - if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { - dev_warn(&h->pdev->dev, "Board failed to become ready " - "after soft reset.\n"); - return -1; - } - - return 0; -} - -static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) -{ - int ctlr = h->ctlr; - - free_irq(h->intr[h->intr_mode], h); - pci_free_irq_vectors(h->pdev); - cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); - cciss_free_scatterlists(h); - cciss_free_cmd_pool(h); - kfree(h->blockFetchTable); - if (h->reply_pool) - pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), - h->reply_pool, h->reply_pool_dhandle); - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - if (h->vaddr) - iounmap(h->vaddr); - unregister_blkdev(h->major, h->devname); - cciss_destroy_hba_sysfs_entry(h); - pci_release_regions(h->pdev); - kfree(h); - hba[ctlr] = NULL; -} - -/* - * This is it. Find all the controllers and register them. I really hate - * stealing all these major device numbers. - * returns the number of block devices registered. - */ -static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int i; - int j = 0; - int rc; - int try_soft_reset = 0; - int dac, return_code; - InquiryData_struct *inq_buff; - ctlr_info_t *h; - unsigned long flags; - - /* - * By default the cciss driver is used for all older HP Smart Array - * controllers. There are module paramaters that allow a user to - * override this behavior and instead use the hpsa SCSI driver. If - * this is the case cciss may be loaded first from the kdump initrd - * image and cause a kernel panic. So if reset_devices is true and - * cciss_allow_hpsa is set just bail. - */ - if ((reset_devices) && (cciss_allow_hpsa == 1)) - return -ENODEV; - rc = cciss_init_reset_devices(pdev); - if (rc) { - if (rc != -ENOTSUPP) - return rc; - /* If the reset fails in a particular way (it has no way to do - * a proper hard reset, so returns -ENOTSUPP) we can try to do - * a soft reset once we get the controller configured up to the - * point that it can accept a command. - */ - try_soft_reset = 1; - rc = 0; - } - -reinit_after_soft_reset: - - i = alloc_cciss_hba(pdev); - if (i < 0) - return -ENOMEM; - - h = hba[i]; - h->pdev = pdev; - h->busy_initializing = 1; - h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; - INIT_LIST_HEAD(&h->cmpQ); - INIT_LIST_HEAD(&h->reqQ); - mutex_init(&h->busy_shutting_down); - - if (cciss_pci_init(h) != 0) - goto clean_no_release_regions; - - sprintf(h->devname, "cciss%d", i); - h->ctlr = i; - - if (cciss_tape_cmds < 2) - cciss_tape_cmds = 2; - if (cciss_tape_cmds > 16) - cciss_tape_cmds = 16; - - init_completion(&h->scan_wait); - - if (cciss_create_hba_sysfs_entry(h)) - goto clean0; - - /* configure PCI DMA stuff */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) - dac = 1; - else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) - dac = 0; - else { - dev_err(&h->pdev->dev, "no suitable DMA available\n"); - goto clean1; - } - - /* - * register with the major number, or get a dynamic major number - * by passing 0 as argument. This is done for greater than - * 8 controller support. - */ - if (i < MAX_CTLR_ORIG) - h->major = COMPAQ_CISS_MAJOR + i; - rc = register_blkdev(h->major, h->devname); - if (rc == -EBUSY || rc == -EINVAL) { - dev_err(&h->pdev->dev, - "Unable to get major number %d for %s " - "on hba %d\n", h->major, h->devname, i); - goto clean1; - } else { - if (i >= MAX_CTLR_ORIG) - h->major = rc; - } - - /* make sure the board interrupts are off */ - h->access.set_intr_mask(h, CCISS_INTR_OFF); - rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); - if (rc) - goto clean2; - - dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", - h->devname, pdev->device, pci_name(pdev), - h->intr[h->intr_mode], dac ? "" : " not"); - - if (cciss_allocate_cmd_pool(h)) - goto clean4; - - if (cciss_allocate_scatterlists(h)) - goto clean4; - - h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, - h->chainsize, h->nr_cmds); - if (!h->cmd_sg_list && h->chainsize > 0) - goto clean4; - - spin_lock_init(&h->lock); - - /* Initialize the pdev driver private data. - have it point to h. */ - pci_set_drvdata(pdev, h); - /* command and error info recs zeroed out before - they are used */ - bitmap_zero(h->cmd_pool_bits, h->nr_cmds); - - h->num_luns = 0; - h->highest_lun = -1; - for (j = 0; j < CISS_MAX_LUN; j++) { - h->drv[j] = NULL; - h->gendisk[j] = NULL; - } - - /* At this point, the controller is ready to take commands. - * Now, if reset_devices and the hard reset didn't work, try - * the soft reset and see if that works. - */ - if (try_soft_reset) { - - /* This is kind of gross. We may or may not get a completion - * from the soft reset command, and if we do, then the value - * from the fifo may or may not be valid. So, we wait 10 secs - * after the reset throwing away any completions we get during - * that time. Unregister the interrupt handler and register - * fake ones to scoop up any residual completions. - */ - spin_lock_irqsave(&h->lock, flags); - h->access.set_intr_mask(h, CCISS_INTR_OFF); - spin_unlock_irqrestore(&h->lock, flags); - free_irq(h->intr[h->intr_mode], h); - rc = cciss_request_irq(h, cciss_msix_discard_completions, - cciss_intx_discard_completions); - if (rc) { - dev_warn(&h->pdev->dev, "Failed to request_irq after " - "soft reset.\n"); - goto clean4; - } - - rc = cciss_kdump_soft_reset(h); - if (rc) { - dev_warn(&h->pdev->dev, "Soft reset failed.\n"); - goto clean4; - } - - dev_info(&h->pdev->dev, "Board READY.\n"); - dev_info(&h->pdev->dev, - "Waiting for stale completions to drain.\n"); - h->access.set_intr_mask(h, CCISS_INTR_ON); - msleep(10000); - h->access.set_intr_mask(h, CCISS_INTR_OFF); - - rc = controller_reset_failed(h->cfgtable); - if (rc) - dev_info(&h->pdev->dev, - "Soft reset appears to have failed.\n"); - - /* since the controller's reset, we have to go back and re-init - * everything. Easiest to just forget what we've done and do it - * all over again. - */ - cciss_undo_allocations_after_kdump_soft_reset(h); - try_soft_reset = 0; - if (rc) - /* don't go to clean4, we already unallocated */ - return -ENODEV; - - goto reinit_after_soft_reset; - } - - cciss_scsi_setup(h); - - /* Turn the interrupts on so we can service requests */ - h->access.set_intr_mask(h, CCISS_INTR_ON); - - /* Get the firmware version */ - inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); - if (inq_buff == NULL) { - dev_err(&h->pdev->dev, "out of memory\n"); - goto clean4; - } - - return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, - sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); - if (return_code == IO_OK) { - h->firm_ver[0] = inq_buff->data_byte[32]; - h->firm_ver[1] = inq_buff->data_byte[33]; - h->firm_ver[2] = inq_buff->data_byte[34]; - h->firm_ver[3] = inq_buff->data_byte[35]; - } else { /* send command failed */ - dev_warn(&h->pdev->dev, "unable to determine firmware" - " version of controller\n"); - } - kfree(inq_buff); - - cciss_procinit(h); - - h->cciss_max_sectors = 8192; - - rebuild_lun_table(h, 1, 0); - cciss_engage_scsi(h); - h->busy_initializing = 0; - return 0; - -clean4: - cciss_free_cmd_pool(h); - cciss_free_scatterlists(h); - cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); - free_irq(h->intr[h->intr_mode], h); -clean2: - unregister_blkdev(h->major, h->devname); -clean1: - cciss_destroy_hba_sysfs_entry(h); -clean0: - pci_release_regions(pdev); -clean_no_release_regions: - h->busy_initializing = 0; - - /* - * Deliberately omit pci_disable_device(): it does something nasty to - * Smart Array controllers that pci_enable_device does not undo - */ - pci_set_drvdata(pdev, NULL); - free_hba(h); - return -ENODEV; -} - -static void cciss_shutdown(struct pci_dev *pdev) -{ - ctlr_info_t *h; - char *flush_buf; - int return_code; - - h = pci_get_drvdata(pdev); - flush_buf = kzalloc(4, GFP_KERNEL); - if (!flush_buf) { - dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); - return; - } - /* write all data in the battery backed cache to disk */ - return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, - 4, 0, CTLR_LUNID, TYPE_CMD); - kfree(flush_buf); - if (return_code != IO_OK) - dev_warn(&h->pdev->dev, "Error flushing cache\n"); - h->access.set_intr_mask(h, CCISS_INTR_OFF); - free_irq(h->intr[h->intr_mode], h); -} - -static int cciss_enter_simple_mode(struct ctlr_info *h) -{ - u32 trans_support; - - trans_support = readl(&(h->cfgtable->TransportSupport)); - if (!(trans_support & SIMPLE_MODE)) - return -ENOTSUPP; - - h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); - writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - cciss_wait_for_mode_change_ack(h); - print_cfg_table(h); - if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { - dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); - return -ENODEV; - } - h->transMethod = CFGTBL_Trans_Simple; - return 0; -} - - -static void cciss_remove_one(struct pci_dev *pdev) -{ - ctlr_info_t *h; - int i, j; - - if (pci_get_drvdata(pdev) == NULL) { - dev_err(&pdev->dev, "Unable to remove device\n"); - return; - } - - h = pci_get_drvdata(pdev); - i = h->ctlr; - if (hba[i] == NULL) { - dev_err(&pdev->dev, "device appears to already be removed\n"); - return; - } - - mutex_lock(&h->busy_shutting_down); - - remove_from_scan_list(h); - remove_proc_entry(h->devname, proc_cciss); - unregister_blkdev(h->major, h->devname); - - /* remove it from the disk list */ - for (j = 0; j < CISS_MAX_LUN; j++) { - struct gendisk *disk = h->gendisk[j]; - if (disk) { - struct request_queue *q = disk->queue; - - if (disk->flags & GENHD_FL_UP) { - cciss_destroy_ld_sysfs_entry(h, j, 1); - del_gendisk(disk); - } - if (q) - blk_cleanup_queue(q); - } - } - -#ifdef CONFIG_CISS_SCSI_TAPE - cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ -#endif - - cciss_shutdown(pdev); - - pci_free_irq_vectors(h->pdev); - - iounmap(h->transtable); - iounmap(h->cfgtable); - iounmap(h->vaddr); - - cciss_free_cmd_pool(h); - /* Free up sg elements */ - for (j = 0; j < h->nr_cmds; j++) - kfree(h->scatter_list[j]); - kfree(h->scatter_list); - cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); - kfree(h->blockFetchTable); - if (h->reply_pool) - pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), - h->reply_pool, h->reply_pool_dhandle); - /* - * Deliberately omit pci_disable_device(): it does something nasty to - * Smart Array controllers that pci_enable_device does not undo - */ - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - cciss_destroy_hba_sysfs_entry(h); - mutex_unlock(&h->busy_shutting_down); - free_hba(h); -} - -static struct pci_driver cciss_pci_driver = { - .name = "cciss", - .probe = cciss_init_one, - .remove = cciss_remove_one, - .id_table = cciss_pci_device_id, /* id_table */ - .shutdown = cciss_shutdown, -}; - -/* - * This is it. Register the PCI driver information for the cards we control - * the OS will call our registered routines when it finds one of our cards. - */ -static int __init cciss_init(void) -{ - int err; - - /* - * The hardware requires that commands are aligned on a 64-bit - * boundary. Given that we use pci_alloc_consistent() to allocate an - * array of them, the size must be a multiple of 8 bytes. - */ - BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); - printk(KERN_INFO DRIVER_NAME "\n"); - - err = bus_register(&cciss_bus_type); - if (err) - return err; - - /* Start the scan thread */ - cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); - if (IS_ERR(cciss_scan_thread)) { - err = PTR_ERR(cciss_scan_thread); - goto err_bus_unregister; - } - - /* Register for our PCI devices */ - err = pci_register_driver(&cciss_pci_driver); - if (err) - goto err_thread_stop; - - return err; - -err_thread_stop: - kthread_stop(cciss_scan_thread); -err_bus_unregister: - bus_unregister(&cciss_bus_type); - - return err; -} - -static void __exit cciss_cleanup(void) -{ - int i; - - pci_unregister_driver(&cciss_pci_driver); - /* double check that all controller entrys have been removed */ - for (i = 0; i < MAX_CTLR; i++) { - if (hba[i] != NULL) { - dev_warn(&hba[i]->pdev->dev, - "had to remove controller\n"); - cciss_remove_one(hba[i]->pdev); - } - } - kthread_stop(cciss_scan_thread); - if (proc_cciss) - remove_proc_entry("driver/cciss", NULL); - bus_unregister(&cciss_bus_type); -} - -module_init(cciss_init); -module_exit(cciss_cleanup); diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h deleted file mode 100644 index 24b5fd75501a..000000000000 --- a/drivers/block/cciss.h +++ /dev/null @@ -1,433 +0,0 @@ -#ifndef CCISS_H -#define CCISS_H - -#include -#include - -#include "cciss_cmd.h" - - -#define NWD_SHIFT 4 -#define MAX_PART (1 << NWD_SHIFT) - -#define IO_OK 0 -#define IO_ERROR 1 -#define IO_NEEDS_RETRY 3 - -#define VENDOR_LEN 8 -#define MODEL_LEN 16 -#define REV_LEN 4 - -struct ctlr_info; -typedef struct ctlr_info ctlr_info_t; - -struct access_method { - void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); - void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); - unsigned long (*fifo_full)(ctlr_info_t *h); - bool (*intr_pending)(ctlr_info_t *h); - unsigned long (*command_completed)(ctlr_info_t *h); -}; -typedef struct _drive_info_struct -{ - unsigned char LunID[8]; - int usage_count; - struct request_queue *queue; - sector_t nr_blocks; - int block_size; - int heads; - int sectors; - int cylinders; - int raid_level; /* set to -1 to indicate that - * the drive is not in use/configured - */ - int busy_configuring; /* This is set when a drive is being removed - * to prevent it from being opened or it's - * queue from being started. - */ - struct device dev; - __u8 serial_no[16]; /* from inquiry page 0x83, - * not necc. null terminated. - */ - char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ - char model[MODEL_LEN + 1]; /* SCSI model string */ - char rev[REV_LEN + 1]; /* SCSI revision string */ - char device_initialized; /* indicates whether dev is initialized */ -} drive_info_struct; - -struct ctlr_info -{ - int ctlr; - char devname[8]; - char *product_name; - char firm_ver[4]; /* Firmware version */ - struct pci_dev *pdev; - __u32 board_id; - void __iomem *vaddr; - unsigned long paddr; - int nr_cmds; /* Number of commands allowed on this controller */ - CfgTable_struct __iomem *cfgtable; - int interrupts_enabled; - int major; - int max_commands; - int commands_outstanding; - int max_outstanding; /* Debug */ - int num_luns; - int highest_lun; - int usage_count; /* number of opens all all minor devices */ - /* Need space for temp sg list - * number of scatter/gathers supported - * number of scatter/gathers in chained block - */ - struct scatterlist **scatter_list; - int maxsgentries; - int chainsize; - int max_cmd_sgentries; - SGDescriptor_struct **cmd_sg_list; - -# define PERF_MODE_INT 0 -# define DOORBELL_INT 1 -# define SIMPLE_MODE_INT 2 -# define MEMQ_MODE_INT 3 - unsigned int intr[4]; - int intr_mode; - int cciss_max_sectors; - BYTE cciss_read; - BYTE cciss_write; - BYTE cciss_read_capacity; - - /* information about each logical volume */ - drive_info_struct *drv[CISS_MAX_LUN]; - - struct access_method access; - - /* queue and queue Info */ - struct list_head reqQ; - struct list_head cmpQ; - unsigned int Qdepth; - unsigned int maxQsinceinit; - unsigned int maxSG; - spinlock_t lock; - - /* pointers to command and error info pool */ - CommandList_struct *cmd_pool; - dma_addr_t cmd_pool_dhandle; - ErrorInfo_struct *errinfo_pool; - dma_addr_t errinfo_pool_dhandle; - unsigned long *cmd_pool_bits; - int nr_allocs; - int nr_frees; - int busy_configuring; - int busy_initializing; - int busy_scanning; - struct mutex busy_shutting_down; - - /* This element holds the zero based queue number of the last - * queue to be started. It is used for fairness. - */ - int next_to_run; - - /* Disk structures we need to pass back */ - struct gendisk *gendisk[CISS_MAX_LUN]; -#ifdef CONFIG_CISS_SCSI_TAPE - struct cciss_scsi_adapter_data_t *scsi_ctlr; -#endif - unsigned char alive; - struct list_head scan_list; - struct completion scan_wait; - struct device dev; - /* - * Performant mode tables. - */ - u32 trans_support; - u32 trans_offset; - struct TransTable_struct *transtable; - unsigned long transMethod; - - /* - * Performant mode completion buffer - */ - u64 *reply_pool; - dma_addr_t reply_pool_dhandle; - u64 *reply_pool_head; - size_t reply_pool_size; - unsigned char reply_pool_wraparound; - u32 *blockFetchTable; -}; - -/* Defining the diffent access_methods - * - * Memory mapped FIFO interface (SMART 53xx cards) - */ -#define SA5_DOORBELL 0x20 -#define SA5_REQUEST_PORT_OFFSET 0x40 -#define SA5_REPLY_INTR_MASK_OFFSET 0x34 -#define SA5_REPLY_PORT_OFFSET 0x44 -#define SA5_INTR_STATUS 0x30 -#define SA5_SCRATCHPAD_OFFSET 0xB0 - -#define SA5_CTCFG_OFFSET 0xB4 -#define SA5_CTMEM_OFFSET 0xB8 - -#define SA5_INTR_OFF 0x08 -#define SA5B_INTR_OFF 0x04 -#define SA5_INTR_PENDING 0x08 -#define SA5B_INTR_PENDING 0x04 -#define FIFO_EMPTY 0xffffffff -#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ -/* Perf. mode flags */ -#define SA5_PERF_INTR_PENDING 0x04 -#define SA5_PERF_INTR_OFF 0x05 -#define SA5_OUTDB_STATUS_PERF_BIT 0x01 -#define SA5_OUTDB_CLEAR_PERF_BIT 0x01 -#define SA5_OUTDB_CLEAR 0xA0 -#define SA5_OUTDB_CLEAR_PERF_BIT 0x01 -#define SA5_OUTDB_STATUS 0x9C - - -#define CISS_ERROR_BIT 0x02 - -#define CCISS_INTR_ON 1 -#define CCISS_INTR_OFF 0 - - -/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board - * to become ready, in seconds, before giving up on it. - * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait - * between polling the board to see if it is ready, in - * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived - * the above. - */ -#define CCISS_BOARD_READY_WAIT_SECS (120) -#define CCISS_BOARD_NOT_READY_WAIT_SECS (100) -#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) -#define CCISS_BOARD_READY_ITERATIONS \ - ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ - CCISS_BOARD_READY_POLL_INTERVAL_MSECS) -#define CCISS_BOARD_NOT_READY_ITERATIONS \ - ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ - CCISS_BOARD_READY_POLL_INTERVAL_MSECS) -#define CCISS_POST_RESET_PAUSE_MSECS (3000) -#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000) -#define CCISS_POST_RESET_NOOP_RETRIES (12) -#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000) - -/* - Send the command to the hardware -*/ -static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) -{ -#ifdef CCISS_DEBUG - printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n", - h->ctlr, c->busaddr); -#endif /* CCISS_DEBUG */ - writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); - readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); - h->commands_outstanding++; - if ( h->commands_outstanding > h->max_outstanding) - h->max_outstanding = h->commands_outstanding; -} - -/* - * This card is the opposite of the other cards. - * 0 turns interrupts on... - * 0x08 turns them off... - */ -static void SA5_intr_mask(ctlr_info_t *h, unsigned long val) -{ - if (val) - { /* Turn interrupts on */ - h->interrupts_enabled = 1; - writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } else /* Turn them off */ - { - h->interrupts_enabled = 0; - writel( SA5_INTR_OFF, - h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } -} -/* - * This card is the opposite of the other cards. - * 0 turns interrupts on... - * 0x04 turns them off... - */ -static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val) -{ - if (val) - { /* Turn interrupts on */ - h->interrupts_enabled = 1; - writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } else /* Turn them off */ - { - h->interrupts_enabled = 0; - writel( SA5B_INTR_OFF, - h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } -} - -/* Performant mode intr_mask */ -static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val) -{ - if (val) { /* turn on interrupts */ - h->interrupts_enabled = 1; - writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } else { - h->interrupts_enabled = 0; - writel(SA5_PERF_INTR_OFF, - h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); - } -} - -/* - * Returns true if fifo is full. - * - */ -static unsigned long SA5_fifo_full(ctlr_info_t *h) -{ - if( h->commands_outstanding >= h->max_commands) - return(1); - else - return(0); - -} -/* - * returns value read from hardware. - * returns FIFO_EMPTY if there is nothing to read - */ -static unsigned long SA5_completed(ctlr_info_t *h) -{ - unsigned long register_value - = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); - if(register_value != FIFO_EMPTY) - { - h->commands_outstanding--; -#ifdef CCISS_DEBUG - printk("cciss: Read %lx back from board\n", register_value); -#endif /* CCISS_DEBUG */ - } -#ifdef CCISS_DEBUG - else - { - printk("cciss: FIFO Empty read\n"); - } -#endif - return ( register_value); - -} - -/* Performant mode command completed */ -static unsigned long SA5_performant_completed(ctlr_info_t *h) -{ - unsigned long register_value = FIFO_EMPTY; - - /* flush the controller write of the reply queue by reading - * outbound doorbell status register. - */ - register_value = readl(h->vaddr + SA5_OUTDB_STATUS); - /* msi auto clears the interrupt pending bit. */ - if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) { - writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); - /* Do a read in order to flush the write to the controller - * (as per spec.) - */ - register_value = readl(h->vaddr + SA5_OUTDB_STATUS); - } - - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - register_value = *(h->reply_pool_head); - (h->reply_pool_head)++; - h->commands_outstanding--; - } else { - register_value = FIFO_EMPTY; - } - /* Check for wraparound */ - if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { - h->reply_pool_head = h->reply_pool; - h->reply_pool_wraparound ^= 1; - } - - return register_value; -} -/* - * Returns true if an interrupt is pending.. - */ -static bool SA5_intr_pending(ctlr_info_t *h) -{ - unsigned long register_value = - readl(h->vaddr + SA5_INTR_STATUS); -#ifdef CCISS_DEBUG - printk("cciss: intr_pending %lx\n", register_value); -#endif /* CCISS_DEBUG */ - if( register_value & SA5_INTR_PENDING) - return 1; - return 0 ; -} - -/* - * Returns true if an interrupt is pending.. - */ -static bool SA5B_intr_pending(ctlr_info_t *h) -{ - unsigned long register_value = - readl(h->vaddr + SA5_INTR_STATUS); -#ifdef CCISS_DEBUG - printk("cciss: intr_pending %lx\n", register_value); -#endif /* CCISS_DEBUG */ - if( register_value & SA5B_INTR_PENDING) - return 1; - return 0 ; -} - -static bool SA5_performant_intr_pending(ctlr_info_t *h) -{ - unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); - - if (!register_value) - return false; - - if (h->pdev->msi_enabled || h->pdev->msix_enabled) - return true; - - /* Read outbound doorbell to flush */ - register_value = readl(h->vaddr + SA5_OUTDB_STATUS); - return register_value & SA5_OUTDB_STATUS_PERF_BIT; -} - -static struct access_method SA5_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5_intr_mask, - .fifo_full = SA5_fifo_full, - .intr_pending = SA5_intr_pending, - .command_completed = SA5_completed, -}; - -static struct access_method SA5B_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5B_intr_mask, - .fifo_full = SA5_fifo_full, - .intr_pending = SA5B_intr_pending, - .command_completed = SA5_completed, -}; - -static struct access_method SA5_performant_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5_performant_intr_mask, - .fifo_full = SA5_fifo_full, - .intr_pending = SA5_performant_intr_pending, - .command_completed = SA5_performant_completed, -}; - -struct board_type { - __u32 board_id; - char *product_name; - struct access_method *access; - int nr_cmds; /* Max cmds this kind of ctlr can handle. */ -}; - -#endif /* CCISS_H */ diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h deleted file mode 100644 index d9be6b4d49a6..000000000000 --- a/drivers/block/cciss_cmd.h +++ /dev/null @@ -1,269 +0,0 @@ -#ifndef CCISS_CMD_H -#define CCISS_CMD_H - -#include - -/* DEFINES */ -#define CISS_VERSION "1.00" - -/* general boundary definitions */ -#define MAXSGENTRIES 32 -#define CCISS_SG_CHAIN 0x80000000 -#define MAXREPLYQS 256 - -/* Unit Attentions ASC's as defined for the MSA2012sa */ -#define POWER_OR_RESET 0x29 -#define STATE_CHANGED 0x2a -#define UNIT_ATTENTION_CLEARED 0x2f -#define LUN_FAILED 0x3e -#define REPORT_LUNS_CHANGED 0x3f - -/* Unit Attentions ASCQ's as defined for the MSA2012sa */ - - /* These ASCQ's defined for ASC = POWER_OR_RESET */ -#define POWER_ON_RESET 0x00 -#define POWER_ON_REBOOT 0x01 -#define SCSI_BUS_RESET 0x02 -#define MSA_TARGET_RESET 0x03 -#define CONTROLLER_FAILOVER 0x04 -#define TRANSCEIVER_SE 0x05 -#define TRANSCEIVER_LVD 0x06 - - /* These ASCQ's defined for ASC = STATE_CHANGED */ -#define RESERVATION_PREEMPTED 0x03 -#define ASYM_ACCESS_CHANGED 0x06 -#define LUN_CAPACITY_CHANGED 0x09 - -/* config space register offsets */ -#define CFG_VENDORID 0x00 -#define CFG_DEVICEID 0x02 -#define CFG_I2OBAR 0x10 -#define CFG_MEM1BAR 0x14 - -/* i2o space register offsets */ -#define I2O_IBDB_SET 0x20 -#define I2O_IBDB_CLEAR 0x70 -#define I2O_INT_STATUS 0x30 -#define I2O_INT_MASK 0x34 -#define I2O_IBPOST_Q 0x40 -#define I2O_OBPOST_Q 0x44 -#define I2O_DMA1_CFG 0x214 - -/* Configuration Table */ -#define CFGTBL_ChangeReq 0x00000001l -#define CFGTBL_AccCmds 0x00000001l -#define DOORBELL_CTLR_RESET 0x00000004l -#define DOORBELL_CTLR_RESET2 0x00000020l - -#define CFGTBL_Trans_Simple 0x00000002l -#define CFGTBL_Trans_Performant 0x00000004l -#define CFGTBL_Trans_use_short_tags 0x20000000l - -#define CFGTBL_BusType_Ultra2 0x00000001l -#define CFGTBL_BusType_Ultra3 0x00000002l -#define CFGTBL_BusType_Fibre1G 0x00000100l -#define CFGTBL_BusType_Fibre2G 0x00000200l -typedef struct _vals32 -{ - __u32 lower; - __u32 upper; -} vals32; - -typedef union _u64bit -{ - vals32 val32; - __u64 val; -} u64bit; - -/* Type defs used in the following structs */ -#define QWORD vals32 - -/* STRUCTURES */ -#define CISS_MAX_PHYS_LUN 1024 -/* SCSI-3 Cmmands */ - -#pragma pack(1) - -#define CISS_INQUIRY 0x12 -/* Date returned */ -typedef struct _InquiryData_struct -{ - BYTE data_byte[36]; -} InquiryData_struct; - -#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ -#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ -/* Data returned */ -typedef struct _ReportLUNdata_struct -{ - BYTE LUNListLength[4]; - DWORD reserved; - BYTE LUN[CISS_MAX_LUN][8]; -} ReportLunData_struct; - -#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ -typedef struct _ReadCapdata_struct -{ - BYTE total_size[4]; /* Total size in blocks */ - BYTE block_size[4]; /* Size of blocks in bytes */ -} ReadCapdata_struct; - -#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ - -/* service action to differentiate a 16 byte read capacity from - other commands that use the 0x9e SCSI op code */ - -#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10 - -typedef struct _ReadCapdata_struct_16 -{ - BYTE total_size[8]; /* Total size in blocks */ - BYTE block_size[4]; /* Size of blocks in bytes */ - BYTE prot_en:1; /* protection enable bit */ - BYTE rto_en:1; /* reference tag own enable bit */ - BYTE reserved:6; /* reserved bits */ - BYTE reserved2[18]; /* reserved bytes per spec */ -} ReadCapdata_struct_16; - -/* Define the supported read/write commands for cciss based controllers */ - -#define CCISS_READ_10 0x28 /* Read(10) */ -#define CCISS_WRITE_10 0x2a /* Write(10) */ -#define CCISS_READ_16 0x88 /* Read(16) */ -#define CCISS_WRITE_16 0x8a /* Write(16) */ - -/* Define the CDB lengths supported by cciss based controllers */ - -#define CDB_LEN10 10 -#define CDB_LEN16 16 - -/* BMIC commands */ -#define BMIC_READ 0x26 -#define BMIC_WRITE 0x27 -#define BMIC_CACHE_FLUSH 0xc2 -#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ - -#define CCISS_ABORT_MSG 0x00 -#define CCISS_RESET_MSG 0x01 -#define CCISS_RESET_TYPE_CONTROLLER 0x00 -#define CCISS_RESET_TYPE_BUS 0x01 -#define CCISS_RESET_TYPE_TARGET 0x03 -#define CCISS_RESET_TYPE_LUN 0x04 -#define CCISS_NOOP_MSG 0x03 - -/* Command List Structure */ -#define CTLR_LUNID "\0\0\0\0\0\0\0\0" - -typedef struct _CommandListHeader_struct { - BYTE ReplyQueue; - BYTE SGList; - HWORD SGTotal; - QWORD Tag; - LUNAddr_struct LUN; -} CommandListHeader_struct; -typedef struct _ErrDescriptor_struct { - QWORD Addr; - DWORD Len; -} ErrDescriptor_struct; -typedef struct _SGDescriptor_struct { - QWORD Addr; - DWORD Len; - DWORD Ext; -} SGDescriptor_struct; - -/* Command types */ -#define CMD_RWREQ 0x00 -#define CMD_IOCTL_PEND 0x01 -#define CMD_SCSI 0x03 -#define CMD_MSG_DONE 0x04 -#define CMD_MSG_TIMEOUT 0x05 -#define CMD_MSG_STALE 0xff - -/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT - * because low bits of the address are used to to indicate that - * whether the tag contains an index or an address. PAD_32 and - * PAD_64 can be adjusted independently as needed for 32-bit - * and 64-bits systems. - */ -#define COMMANDLIST_ALIGNMENT (32) -#define IS_64_BIT ((sizeof(long) - 4)/4) -#define IS_32_BIT (!IS_64_BIT) -#define PAD_32 (0) -#define PAD_64 (4) -#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) -#define DIRECT_LOOKUP_BIT 0x10 -#define DIRECT_LOOKUP_SHIFT 5 - -typedef struct _CommandList_struct { - CommandListHeader_struct Header; - RequestBlock_struct Request; - ErrDescriptor_struct ErrDesc; - SGDescriptor_struct SG[MAXSGENTRIES]; - /* information associated with the command */ - __u32 busaddr; /* physical address of this record */ - ErrorInfo_struct * err_info; /* pointer to the allocated mem */ - int ctlr; - int cmd_type; - long cmdindex; - struct list_head list; - struct request * rq; - struct completion *waiting; - int retry_count; - void * scsi_cmd; - char pad[PADSIZE]; -} CommandList_struct; - -/* Configuration Table Structure */ -typedef struct _HostWrite_struct { - DWORD TransportRequest; - DWORD Reserved; - DWORD CoalIntDelay; - DWORD CoalIntCount; -} HostWrite_struct; - -typedef struct _CfgTable_struct { - BYTE Signature[4]; - DWORD SpecValence; -#define SIMPLE_MODE 0x02 -#define PERFORMANT_MODE 0x04 -#define MEMQ_MODE 0x08 - DWORD TransportSupport; - DWORD TransportActive; - HostWrite_struct HostWrite; - DWORD CmdsOutMax; - DWORD BusTypes; - DWORD TransMethodOffset; - BYTE ServerName[16]; - DWORD HeartBeat; - DWORD SCSI_Prefetch; - DWORD MaxSGElements; - DWORD MaxLogicalUnits; - DWORD MaxPhysicalDrives; - DWORD MaxPhysicalDrivesPerLogicalUnit; - DWORD MaxPerformantModeCommands; - u8 reserved[0x78 - 0x58]; - u32 misc_fw_support; /* offset 0x78 */ -#define MISC_FW_DOORBELL_RESET (0x02) -#define MISC_FW_DOORBELL_RESET2 (0x10) - u8 driver_version[32]; -} CfgTable_struct; - -struct TransTable_struct { - u32 BlockFetch0; - u32 BlockFetch1; - u32 BlockFetch2; - u32 BlockFetch3; - u32 BlockFetch4; - u32 BlockFetch5; - u32 BlockFetch6; - u32 BlockFetch7; - u32 RepQSize; - u32 RepQCount; - u32 RepQCtrAddrLow32; - u32 RepQCtrAddrHigh32; - u32 RepQAddr0Low32; - u32 RepQAddr0High32; -}; - -#pragma pack() -#endif /* CCISS_CMD_H */ diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c deleted file mode 100644 index 01a1f7e24978..000000000000 --- a/drivers/block/cciss_scsi.c +++ /dev/null @@ -1,1653 +0,0 @@ -/* - * Disk Array driver for HP Smart Array controllers, SCSI Tape module. - * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA - * 02111-1307, USA. - * - * Questions/Comments/Bugfixes to iss_storagedev@hp.com - * - * Author: Stephen M. Cameron - */ -#ifdef CONFIG_CISS_SCSI_TAPE - -/* Here we have code to present the driver as a scsi driver - as it is simultaneously presented as a block driver. The - reason for doing this is to allow access to SCSI tape drives - through the array controller. Note in particular, neither - physical nor logical disks are presented through the scsi layer. */ - -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include "cciss_scsi.h" - -#define CCISS_ABORT_MSG 0x00 -#define CCISS_RESET_MSG 0x01 - -static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, - size_t size, - __u8 page_code, unsigned char *scsi3addr, - int cmd_type); - -static CommandList_struct *cmd_alloc(ctlr_info_t *h); -static CommandList_struct *cmd_special_alloc(ctlr_info_t *h); -static void cmd_free(ctlr_info_t *h, CommandList_struct *c); -static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c); - -static int cciss_scsi_write_info(struct Scsi_Host *sh, - char *buffer, /* data buffer */ - int length); /* length of data in buffer */ -static int cciss_scsi_show_info(struct seq_file *m, - struct Scsi_Host *sh); - -static int cciss_scsi_queue_command (struct Scsi_Host *h, - struct scsi_cmnd *cmd); -static int cciss_eh_device_reset_handler(struct scsi_cmnd *); -static int cciss_eh_abort_handler(struct scsi_cmnd *); - -static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = { - { .name = "cciss0", .ndevices = 0 }, - { .name = "cciss1", .ndevices = 0 }, - { .name = "cciss2", .ndevices = 0 }, - { .name = "cciss3", .ndevices = 0 }, - { .name = "cciss4", .ndevices = 0 }, - { .name = "cciss5", .ndevices = 0 }, - { .name = "cciss6", .ndevices = 0 }, - { .name = "cciss7", .ndevices = 0 }, -}; - -static struct scsi_host_template cciss_driver_template = { - .module = THIS_MODULE, - .name = "cciss", - .proc_name = "cciss", - .write_info = cciss_scsi_write_info, - .show_info = cciss_scsi_show_info, - .queuecommand = cciss_scsi_queue_command, - .this_id = 7, - .use_clustering = DISABLE_CLUSTERING, - /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ - .eh_device_reset_handler= cciss_eh_device_reset_handler, - .eh_abort_handler = cciss_eh_abort_handler, -}; - -#pragma pack(1) - -#define SCSI_PAD_32 8 -#define SCSI_PAD_64 8 - -struct cciss_scsi_cmd_stack_elem_t { - CommandList_struct cmd; - ErrorInfo_struct Err; - __u32 busaddr; - int cmdindex; - u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64]; -}; - -#pragma pack() - -#pragma pack(1) -struct cciss_scsi_cmd_stack_t { - struct cciss_scsi_cmd_stack_elem_t *pool; - struct cciss_scsi_cmd_stack_elem_t **elem; - dma_addr_t cmd_pool_handle; - int top; - int nelems; -}; -#pragma pack() - -struct cciss_scsi_adapter_data_t { - struct Scsi_Host *scsi_host; - struct cciss_scsi_cmd_stack_t cmd_stack; - SGDescriptor_struct **cmd_sg_list; - int registered; - spinlock_t lock; // to protect ccissscsi[ctlr]; -}; - -#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \ - &h->scsi_ctlr->lock, flags); -#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \ - &h->scsi_ctlr->lock, flags); - -static CommandList_struct * -scsi_cmd_alloc(ctlr_info_t *h) -{ - /* assume only one process in here at a time, locking done by caller. */ - /* use h->lock */ - /* might be better to rewrite how we allocate scsi commands in a way that */ - /* needs no locking at all. */ - - /* take the top memory chunk off the stack and return it, if any. */ - struct cciss_scsi_cmd_stack_elem_t *c; - struct cciss_scsi_adapter_data_t *sa; - struct cciss_scsi_cmd_stack_t *stk; - u64bit temp64; - - sa = h->scsi_ctlr; - stk = &sa->cmd_stack; - - if (stk->top < 0) - return NULL; - c = stk->elem[stk->top]; - /* memset(c, 0, sizeof(*c)); */ - memset(&c->cmd, 0, sizeof(c->cmd)); - memset(&c->Err, 0, sizeof(c->Err)); - /* set physical addr of cmd and addr of scsi parameters */ - c->cmd.busaddr = c->busaddr; - c->cmd.cmdindex = c->cmdindex; - /* (__u32) (stk->cmd_pool_handle + - (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ - - temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct)); - /* (__u64) (stk->cmd_pool_handle + - (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) + - sizeof(CommandList_struct)); */ - stk->top--; - c->cmd.ErrDesc.Addr.lower = temp64.val32.lower; - c->cmd.ErrDesc.Addr.upper = temp64.val32.upper; - c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct); - - c->cmd.ctlr = h->ctlr; - c->cmd.err_info = &c->Err; - - return (CommandList_struct *) c; -} - -static void -scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c) -{ - /* assume only one process in here at a time, locking done by caller. */ - /* use h->lock */ - /* drop the free memory chunk on top of the stack. */ - - struct cciss_scsi_adapter_data_t *sa; - struct cciss_scsi_cmd_stack_t *stk; - - sa = h->scsi_ctlr; - stk = &sa->cmd_stack; - stk->top++; - if (stk->top >= stk->nelems) { - dev_err(&h->pdev->dev, - "scsi_cmd_free called too many times.\n"); - BUG(); - } - stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c; -} - -static int -scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) -{ - int i; - struct cciss_scsi_cmd_stack_t *stk; - size_t size; - - stk = &sa->cmd_stack; - stk->nelems = cciss_tape_cmds + 2; - sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, - h->chainsize, stk->nelems); - if (!sa->cmd_sg_list && h->chainsize > 0) - return -ENOMEM; - - size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; - - /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ - BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); - /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ - stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) - pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); - - if (stk->pool == NULL) { - cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); - sa->cmd_sg_list = NULL; - return -ENOMEM; - } - stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL); - if (!stk->elem) { - pci_free_consistent(h->pdev, size, stk->pool, - stk->cmd_pool_handle); - return -1; - } - for (i = 0; i < stk->nelems; i++) { - stk->elem[i] = &stk->pool[i]; - stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + - (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); - stk->elem[i]->cmdindex = i; - } - stk->top = stk->nelems-1; - return 0; -} - -static void -scsi_cmd_stack_free(ctlr_info_t *h) -{ - struct cciss_scsi_adapter_data_t *sa; - struct cciss_scsi_cmd_stack_t *stk; - size_t size; - - sa = h->scsi_ctlr; - stk = &sa->cmd_stack; - if (stk->top != stk->nelems-1) { - dev_warn(&h->pdev->dev, - "bug: %d scsi commands are still outstanding.\n", - stk->nelems - stk->top); - } - size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; - - pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); - stk->pool = NULL; - cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); - kfree(stk->elem); - stk->elem = NULL; -} - -#if 0 -static void -print_cmd(CommandList_struct *cp) -{ - printk("queue:%d\n", cp->Header.ReplyQueue); - printk("sglist:%d\n", cp->Header.SGList); - printk("sgtot:%d\n", cp->Header.SGTotal); - printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, - cp->Header.Tag.lower); - printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes); - printk("CDBLen:%d\n", cp->Request.CDBLen); - printk("Type:%d\n",cp->Request.Type.Type); - printk("Attr:%d\n",cp->Request.Type.Attribute); - printk(" Dir:%d\n",cp->Request.Type.Direction); - printk("Timeout:%d\n",cp->Request.Timeout); - printk("CDB: %16ph\n", cp->Request.CDB); - printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n", - cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, - cp->ErrDesc.Len); - printk("sgs..........Errorinfo:\n"); - printk("scsistatus:%d\n", cp->err_info->ScsiStatus); - printk("senselen:%d\n", cp->err_info->SenseLen); - printk("cmd status:%d\n", cp->err_info->CommandStatus); - printk("resid cnt:%d\n", cp->err_info->ResidualCnt); - printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size); - printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num); - printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value); -} -#endif - -static int -find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun) -{ - /* finds an unused bus, target, lun for a new device */ - /* assumes h->scsi_ctlr->lock is held */ - int i, found=0; - unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; - - memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); - - target_taken[SELF_SCSI_ID] = 1; - for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) - target_taken[ccissscsi[h->ctlr].dev[i].target] = 1; - - for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) { - if (!target_taken[i]) { - *bus = 0; *target=i; *lun = 0; found=1; - break; - } - } - return (!found); -} -struct scsi2map { - char scsi3addr[8]; - int bus, target, lun; -}; - -static int -cciss_scsi_add_entry(ctlr_info_t *h, int hostno, - struct cciss_scsi_dev_t *device, - struct scsi2map *added, int *nadded) -{ - /* assumes h->scsi_ctlr->lock is held */ - int n = ccissscsi[h->ctlr].ndevices; - struct cciss_scsi_dev_t *sd; - int i, bus, target, lun; - unsigned char addr1[8], addr2[8]; - - if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { - dev_warn(&h->pdev->dev, "Too many devices, " - "some will be inaccessible.\n"); - return -1; - } - - bus = target = -1; - lun = 0; - /* Is this device a non-zero lun of a multi-lun device */ - /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */ - if (device->scsi3addr[4] != 0) { - /* Search through our list and find the device which */ - /* has the same 8 byte LUN address, excepting byte 4. */ - /* Assign the same bus and target for this new LUN. */ - /* Use the logical unit number from the firmware. */ - memcpy(addr1, device->scsi3addr, 8); - addr1[4] = 0; - for (i = 0; i < n; i++) { - sd = &ccissscsi[h->ctlr].dev[i]; - memcpy(addr2, sd->scsi3addr, 8); - addr2[4] = 0; - /* differ only in byte 4? */ - if (memcmp(addr1, addr2, 8) == 0) { - bus = sd->bus; - target = sd->target; - lun = device->scsi3addr[4]; - break; - } - } - } - - sd = &ccissscsi[h->ctlr].dev[n]; - if (lun == 0) { - if (find_bus_target_lun(h, - &sd->bus, &sd->target, &sd->lun) != 0) - return -1; - } else { - sd->bus = bus; - sd->target = target; - sd->lun = lun; - } - added[*nadded].bus = sd->bus; - added[*nadded].target = sd->target; - added[*nadded].lun = sd->lun; - (*nadded)++; - - memcpy(sd->scsi3addr, device->scsi3addr, 8); - memcpy(sd->vendor, device->vendor, sizeof(sd->vendor)); - memcpy(sd->revision, device->revision, sizeof(sd->revision)); - memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); - sd->devtype = device->devtype; - - ccissscsi[h->ctlr].ndevices++; - - /* initially, (before registering with scsi layer) we don't - know our hostno and we don't want to print anything first - time anyway (the scsi layer's inquiries will show that info) */ - if (hostno != -1) - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", - scsi_device_type(sd->devtype), hostno, - sd->bus, sd->target, sd->lun); - return 0; -} - -static void -cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry, - struct scsi2map *removed, int *nremoved) -{ - /* assumes h->ctlr]->scsi_ctlr->lock is held */ - int i; - struct cciss_scsi_dev_t sd; - - if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; - sd = ccissscsi[h->ctlr].dev[entry]; - removed[*nremoved].bus = sd.bus; - removed[*nremoved].target = sd.target; - removed[*nremoved].lun = sd.lun; - (*nremoved)++; - for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++) - ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1]; - ccissscsi[h->ctlr].ndevices--; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", - scsi_device_type(sd.devtype), hostno, - sd.bus, sd.target, sd.lun); -} - - -#define SCSI3ADDR_EQ(a,b) ( \ - (a)[7] == (b)[7] && \ - (a)[6] == (b)[6] && \ - (a)[5] == (b)[5] && \ - (a)[4] == (b)[4] && \ - (a)[3] == (b)[3] && \ - (a)[2] == (b)[2] && \ - (a)[1] == (b)[1] && \ - (a)[0] == (b)[0]) - -static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr) -{ - /* called when scsi_add_device fails in order to re-adjust */ - /* ccissscsi[] to match the mid layer's view. */ - unsigned long flags; - int i, j; - CPQ_TAPE_LOCK(h, flags); - for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { - if (memcmp(scsi3addr, - ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) { - for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++) - ccissscsi[h->ctlr].dev[j] = - ccissscsi[h->ctlr].dev[j+1]; - ccissscsi[h->ctlr].ndevices--; - break; - } - } - CPQ_TAPE_UNLOCK(h, flags); -} - -static int device_is_the_same(struct cciss_scsi_dev_t *dev1, - struct cciss_scsi_dev_t *dev2) -{ - return dev1->devtype == dev2->devtype && - memcmp(dev1->scsi3addr, dev2->scsi3addr, - sizeof(dev1->scsi3addr)) == 0 && - memcmp(dev1->device_id, dev2->device_id, - sizeof(dev1->device_id)) == 0 && - memcmp(dev1->vendor, dev2->vendor, - sizeof(dev1->vendor)) == 0 && - memcmp(dev1->model, dev2->model, - sizeof(dev1->model)) == 0 && - memcmp(dev1->revision, dev2->revision, - sizeof(dev1->revision)) == 0; -} - -static int -adjust_cciss_scsi_table(ctlr_info_t *h, int hostno, - struct cciss_scsi_dev_t sd[], int nsds) -{ - /* sd contains scsi3 addresses and devtypes, but - bus target and lun are not filled in. This funciton - takes what's in sd to be the current and adjusts - ccissscsi[] to be in line with what's in sd. */ - - int i,j, found, changes=0; - struct cciss_scsi_dev_t *csd; - unsigned long flags; - struct scsi2map *added, *removed; - int nadded, nremoved; - struct Scsi_Host *sh = NULL; - - added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA, - GFP_KERNEL); - removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA, - GFP_KERNEL); - - if (!added || !removed) { - dev_warn(&h->pdev->dev, - "Out of memory in adjust_cciss_scsi_table\n"); - goto free_and_out; - } - - CPQ_TAPE_LOCK(h, flags); - - if (hostno != -1) /* if it's not the first time... */ - sh = h->scsi_ctlr->scsi_host; - - /* find any devices in ccissscsi[] that are not in - sd[] and remove them from ccissscsi[] */ - - i = 0; - nremoved = 0; - nadded = 0; - while (i < ccissscsi[h->ctlr].ndevices) { - csd = &ccissscsi[h->ctlr].dev[i]; - found=0; - for (j=0;jscsi3addr)) { - if (device_is_the_same(&sd[j], csd)) - found=2; - else - found=1; - break; - } - } - - if (found == 0) { /* device no longer present. */ - changes++; - cciss_scsi_remove_entry(h, hostno, i, - removed, &nremoved); - /* remove ^^^, hence i not incremented */ - } else if (found == 1) { /* device is different in some way */ - changes++; - dev_info(&h->pdev->dev, - "device c%db%dt%dl%d has changed.\n", - hostno, csd->bus, csd->target, csd->lun); - cciss_scsi_remove_entry(h, hostno, i, - removed, &nremoved); - /* remove ^^^, hence i not incremented */ - if (cciss_scsi_add_entry(h, hostno, &sd[j], - added, &nadded) != 0) - /* we just removed one, so add can't fail. */ - BUG(); - csd->devtype = sd[j].devtype; - memcpy(csd->device_id, sd[j].device_id, - sizeof(csd->device_id)); - memcpy(csd->vendor, sd[j].vendor, - sizeof(csd->vendor)); - memcpy(csd->model, sd[j].model, - sizeof(csd->model)); - memcpy(csd->revision, sd[j].revision, - sizeof(csd->revision)); - } else /* device is same as it ever was, */ - i++; /* so just move along. */ - } - - /* Now, make sure every device listed in sd[] is also - listed in ccissscsi[], adding them if they aren't found */ - - for (i=0;ictlr].ndevices; j++) { - csd = &ccissscsi[h->ctlr].dev[j]; - if (SCSI3ADDR_EQ(sd[i].scsi3addr, - csd->scsi3addr)) { - if (device_is_the_same(&sd[i], csd)) - found=2; /* found device */ - else - found=1; /* found a bug. */ - break; - } - } - if (!found) { - changes++; - if (cciss_scsi_add_entry(h, hostno, &sd[i], - added, &nadded) != 0) - break; - } else if (found == 1) { - /* should never happen... */ - changes++; - dev_warn(&h->pdev->dev, - "device unexpectedly changed\n"); - /* but if it does happen, we just ignore that device */ - } - } - CPQ_TAPE_UNLOCK(h, flags); - - /* Don't notify scsi mid layer of any changes the first time through */ - /* (or if there are no changes) scsi_scan_host will do it later the */ - /* first time through. */ - if (hostno == -1 || !changes) - goto free_and_out; - - /* Notify scsi mid layer of any removed devices */ - for (i = 0; i < nremoved; i++) { - struct scsi_device *sdev = - scsi_device_lookup(sh, removed[i].bus, - removed[i].target, removed[i].lun); - if (sdev != NULL) { - scsi_remove_device(sdev); - scsi_device_put(sdev); - } else { - /* We don't expect to get here. */ - /* future cmds to this device will get selection */ - /* timeout as if the device was gone. */ - dev_warn(&h->pdev->dev, "didn't find " - "c%db%dt%dl%d\n for removal.", - hostno, removed[i].bus, - removed[i].target, removed[i].lun); - } - } - - /* Notify scsi mid layer of any added devices */ - for (i = 0; i < nadded; i++) { - int rc; - rc = scsi_add_device(sh, added[i].bus, - added[i].target, added[i].lun); - if (rc == 0) - continue; - dev_warn(&h->pdev->dev, "scsi_add_device " - "c%db%dt%dl%d failed, device not added.\n", - hostno, added[i].bus, added[i].target, added[i].lun); - /* now we have to remove it from ccissscsi, */ - /* since it didn't get added to scsi mid layer */ - fixup_botched_add(h, added[i].scsi3addr); - } - -free_and_out: - kfree(added); - kfree(removed); - return 0; -} - -static int -lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr) -{ - int i; - struct cciss_scsi_dev_t *sd; - unsigned long flags; - - CPQ_TAPE_LOCK(h, flags); - for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { - sd = &ccissscsi[h->ctlr].dev[i]; - if (sd->bus == bus && - sd->target == target && - sd->lun == lun) { - memcpy(scsi3addr, &sd->scsi3addr[0], 8); - CPQ_TAPE_UNLOCK(h, flags); - return 0; - } - } - CPQ_TAPE_UNLOCK(h, flags); - return -1; -} - -static void -cciss_scsi_setup(ctlr_info_t *h) -{ - struct cciss_scsi_adapter_data_t * shba; - - ccissscsi[h->ctlr].ndevices = 0; - shba = kmalloc(sizeof(*shba), GFP_KERNEL); - if (shba == NULL) - return; - shba->scsi_host = NULL; - spin_lock_init(&shba->lock); - shba->registered = 0; - if (scsi_cmd_stack_setup(h, shba) != 0) { - kfree(shba); - shba = NULL; - } - h->scsi_ctlr = shba; - return; -} - -static void complete_scsi_command(CommandList_struct *c, int timeout, - __u32 tag) -{ - struct scsi_cmnd *cmd; - ctlr_info_t *h; - ErrorInfo_struct *ei; - - ei = c->err_info; - - /* First, see if it was a message rather than a command */ - if (c->Request.Type.Type == TYPE_MSG) { - c->cmd_type = CMD_MSG_DONE; - return; - } - - cmd = (struct scsi_cmnd *) c->scsi_cmd; - h = hba[c->ctlr]; - - scsi_dma_unmap(cmd); - if (c->Header.SGTotal > h->max_cmd_sgentries) - cciss_unmap_sg_chain_block(h, c); - - cmd->result = (DID_OK << 16); /* host byte */ - cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ - /* cmd->result |= (GOOD < 1); */ /* status byte */ - - cmd->result |= (ei->ScsiStatus); - /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */ - - /* copy the sense data whether we need to or not. */ - - memcpy(cmd->sense_buffer, ei->SenseInfo, - ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? - SCSI_SENSE_BUFFERSIZE : - ei->SenseLen); - scsi_set_resid(cmd, ei->ResidualCnt); - - if (ei->CommandStatus != 0) { /* an error has occurred */ - switch (ei->CommandStatus) { - case CMD_TARGET_STATUS: - /* Pass it up to the upper layers... */ - if (!ei->ScsiStatus) { - - /* Ordinarily, this case should never happen, but there is a bug - in some released firmware revisions that allows it to happen - if, for example, a 4100 backplane loses power and the tape - drive is in it. We assume that it's a fatal error of some - kind because we can't show that it wasn't. We will make it - look like selection timeout since that is the most common - reason for this to occur, and it's severe enough. */ - - cmd->result = DID_NO_CONNECT << 16; - } - break; - case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ - break; - case CMD_DATA_OVERRUN: - dev_warn(&h->pdev->dev, "%p has" - " completed with data overrun " - "reported\n", c); - break; - case CMD_INVALID: { - /* - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false); - print_cmd(c); - */ - /* We get CMD_INVALID if you address a non-existent tape drive instead - of a selection timeout (no response). You will see this if you yank - out a tape drive, then try to access it. This is kind of a shame - because it means that any other CMD_INVALID (e.g. driver bug) will - get interpreted as a missing target. */ - cmd->result = DID_NO_CONNECT << 16; - } - break; - case CMD_PROTOCOL_ERR: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, - "%p has protocol error\n", c); - break; - case CMD_HARDWARE_ERR: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, - "%p had hardware error\n", c); - break; - case CMD_CONNECTION_LOST: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, - "%p had connection lost\n", c); - break; - case CMD_ABORTED: - cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "%p was aborted\n", c); - break; - case CMD_ABORT_FAILED: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, - "%p reports abort failed\n", c); - break; - case CMD_UNSOLICITED_ABORT: - cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "%p aborted due to an " - "unsolicited abort\n", c); - break; - case CMD_TIMEOUT: - cmd->result = DID_TIME_OUT << 16; - dev_warn(&h->pdev->dev, "%p timedout\n", c); - break; - case CMD_UNABORTABLE: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, "c %p command " - "unabortable\n", c); - break; - default: - cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, - "%p returned unknown status %x\n", c, - ei->CommandStatus); - } - } - cmd->scsi_done(cmd); - scsi_cmd_free(h, c); -} - -static int -cciss_scsi_detect(ctlr_info_t *h) -{ - struct Scsi_Host *sh; - int error; - - sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *)); - if (sh == NULL) - goto fail; - sh->io_port = 0; // good enough? FIXME, - sh->n_io_port = 0; // I don't think we use these two... - sh->this_id = SELF_SCSI_ID; - sh->can_queue = cciss_tape_cmds; - sh->sg_tablesize = h->maxsgentries; - sh->max_cmd_len = MAX_COMMAND_SIZE; - sh->max_sectors = h->cciss_max_sectors; - - ((struct cciss_scsi_adapter_data_t *) - h->scsi_ctlr)->scsi_host = sh; - sh->hostdata[0] = (unsigned long) h; - sh->irq = h->intr[SIMPLE_MODE_INT]; - sh->unique_id = sh->irq; - error = scsi_add_host(sh, &h->pdev->dev); - if (error) - goto fail_host_put; - scsi_scan_host(sh); - return 1; - - fail_host_put: - scsi_host_put(sh); - fail: - return 0; -} - -static void -cciss_unmap_one(struct pci_dev *pdev, - CommandList_struct *c, - size_t buflen, - int data_direction) -{ - u64bit addr64; - - addr64.val32.lower = c->SG[0].Addr.lower; - addr64.val32.upper = c->SG[0].Addr.upper; - pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); -} - -static void -cciss_map_one(struct pci_dev *pdev, - CommandList_struct *c, - unsigned char *buf, - size_t buflen, - int data_direction) -{ - __u64 addr64; - - addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); - c->SG[0].Addr.lower = - (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); - c->SG[0].Addr.upper = - (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); - c->SG[0].Len = buflen; - c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ - c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ -} - -static int -cciss_scsi_do_simple_cmd(ctlr_info_t *h, - CommandList_struct *c, - unsigned char *scsi3addr, - unsigned char *cdb, - unsigned char cdblen, - unsigned char *buf, int bufsize, - int direction) -{ - DECLARE_COMPLETION_ONSTACK(wait); - - c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */ - c->scsi_cmd = NULL; - c->Header.ReplyQueue = 0; /* unused in simple mode */ - memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN)); - c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ - // Fill in the request block... - - /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", - scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], - scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ - - memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); - memcpy(c->Request.CDB, cdb, cdblen); - c->Request.Timeout = 0; - c->Request.CDBLen = cdblen; - c->Request.Type.Type = TYPE_CMD; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = direction; - - /* Fill in the SG list and do dma mapping */ - cciss_map_one(h->pdev, c, (unsigned char *) buf, - bufsize, DMA_FROM_DEVICE); - - c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); - - /* undo the dma mapping */ - cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE); - return(0); -} - -static void -cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) -{ - ErrorInfo_struct *ei; - - ei = c->err_info; - switch (ei->CommandStatus) { - case CMD_TARGET_STATUS: - dev_warn(&h->pdev->dev, - "cmd %p has completed with errors\n", c); - dev_warn(&h->pdev->dev, - "cmd %p has SCSI Status = %x\n", - c, ei->ScsiStatus); - if (ei->ScsiStatus == 0) - dev_warn(&h->pdev->dev, - "SCSI status is abnormally zero. " - "(probably indicates selection timeout " - "reported incorrectly due to a known " - "firmware bug, circa July, 2001.)\n"); - break; - case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ - dev_info(&h->pdev->dev, "UNDERRUN\n"); - break; - case CMD_DATA_OVERRUN: - dev_warn(&h->pdev->dev, "%p has" - " completed with data overrun " - "reported\n", c); - break; - case CMD_INVALID: { - /* controller unfortunately reports SCSI passthru's */ - /* to non-existent targets as invalid commands. */ - dev_warn(&h->pdev->dev, - "%p is reported invalid (probably means " - "target device no longer present)\n", c); - /* - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false); - print_cmd(c); - */ - } - break; - case CMD_PROTOCOL_ERR: - dev_warn(&h->pdev->dev, "%p has protocol error\n", c); - break; - case CMD_HARDWARE_ERR: - /* cmd->result = DID_ERROR << 16; */ - dev_warn(&h->pdev->dev, "%p had hardware error\n", c); - break; - case CMD_CONNECTION_LOST: - dev_warn(&h->pdev->dev, "%p had connection lost\n", c); - break; - case CMD_ABORTED: - dev_warn(&h->pdev->dev, "%p was aborted\n", c); - break; - case CMD_ABORT_FAILED: - dev_warn(&h->pdev->dev, - "%p reports abort failed\n", c); - break; - case CMD_UNSOLICITED_ABORT: - dev_warn(&h->pdev->dev, - "%p aborted due to an unsolicited abort\n", c); - break; - case CMD_TIMEOUT: - dev_warn(&h->pdev->dev, "%p timedout\n", c); - break; - case CMD_UNABORTABLE: - dev_warn(&h->pdev->dev, - "%p unabortable\n", c); - break; - default: - dev_warn(&h->pdev->dev, - "%p returned unknown status %x\n", - c, ei->CommandStatus); - } -} - -static int -cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr, - unsigned char page, unsigned char *buf, - unsigned char bufsize) -{ - int rc; - CommandList_struct *c; - char cdb[6]; - ErrorInfo_struct *ei; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - c = scsi_cmd_alloc(h); - spin_unlock_irqrestore(&h->lock, flags); - - if (c == NULL) { /* trouble... */ - printk("cmd_alloc returned NULL!\n"); - return -1; - } - - ei = c->err_info; - - cdb[0] = CISS_INQUIRY; - cdb[1] = (page != 0); - cdb[2] = page; - cdb[3] = 0; - cdb[4] = bufsize; - cdb[5] = 0; - rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb, - 6, buf, bufsize, XFER_READ); - - if (rc != 0) return rc; /* something went wrong */ - - if (ei->CommandStatus != 0 && - ei->CommandStatus != CMD_DATA_UNDERRUN) { - cciss_scsi_interpret_error(h, c); - rc = -1; - } - spin_lock_irqsave(&h->lock, flags); - scsi_cmd_free(h, c); - spin_unlock_irqrestore(&h->lock, flags); - return rc; -} - -/* Get the device id from inquiry page 0x83 */ -static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr, - unsigned char *device_id, int buflen) -{ - int rc; - unsigned char *buf; - - if (buflen > 16) - buflen = 16; - buf = kzalloc(64, GFP_KERNEL); - if (!buf) - return -1; - rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); - if (rc == 0) - memcpy(device_id, &buf[8], buflen); - kfree(buf); - return rc != 0; -} - -static int -cciss_scsi_do_report_phys_luns(ctlr_info_t *h, - ReportLunData_struct *buf, int bufsize) -{ - int rc; - CommandList_struct *c; - unsigned char cdb[12]; - unsigned char scsi3addr[8]; - ErrorInfo_struct *ei; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - c = scsi_cmd_alloc(h); - spin_unlock_irqrestore(&h->lock, flags); - if (c == NULL) { /* trouble... */ - printk("cmd_alloc returned NULL!\n"); - return -1; - } - - memset(&scsi3addr[0], 0, 8); /* address the controller */ - cdb[0] = CISS_REPORT_PHYS; - cdb[1] = 0; - cdb[2] = 0; - cdb[3] = 0; - cdb[4] = 0; - cdb[5] = 0; - cdb[6] = (bufsize >> 24) & 0xFF; //MSB - cdb[7] = (bufsize >> 16) & 0xFF; - cdb[8] = (bufsize >> 8) & 0xFF; - cdb[9] = bufsize & 0xFF; - cdb[10] = 0; - cdb[11] = 0; - - rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, - cdb, 12, - (unsigned char *) buf, - bufsize, XFER_READ); - - if (rc != 0) return rc; /* something went wrong */ - - ei = c->err_info; - if (ei->CommandStatus != 0 && - ei->CommandStatus != CMD_DATA_UNDERRUN) { - cciss_scsi_interpret_error(h, c); - rc = -1; - } - spin_lock_irqsave(&h->lock, flags); - scsi_cmd_free(h, c); - spin_unlock_irqrestore(&h->lock, flags); - return rc; -} - -static void -cciss_update_non_disk_devices(ctlr_info_t *h, int hostno) -{ - /* the idea here is we could get notified from /proc - that some devices have changed, so we do a report - physical luns cmd, and adjust our list of devices - accordingly. (We can't rely on the scsi-mid layer just - doing inquiries, because the "busses" that the scsi - mid-layer probes are totally fabricated by this driver, - so new devices wouldn't show up. - - the scsi3addr's of devices won't change so long as the - adapter is not reset. That means we can rescan and - tell which devices we already know about, vs. new - devices, vs. disappearing devices. - - Also, if you yank out a tape drive, then put in a disk - in it's place, (say, a configured volume from another - array controller for instance) _don't_ poke this driver - (so it thinks it's still a tape, but _do_ poke the scsi - mid layer, so it does an inquiry... the scsi mid layer - will see the physical disk. This would be bad. Need to - think about how to prevent that. One idea would be to - snoop all scsi responses and if an inquiry repsonse comes - back that reports a disk, chuck it an return selection - timeout instead and adjust our table... Not sure i like - that though. - - */ -#define OBDR_TAPE_INQ_SIZE 49 -#define OBDR_TAPE_SIG "$DR-10" - ReportLunData_struct *ld_buff; - unsigned char *inq_buff; - unsigned char scsi3addr[8]; - __u32 num_luns=0; - unsigned char *ch; - struct cciss_scsi_dev_t *currentsd, *this_device; - int ncurrent=0; - int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; - int i; - - ld_buff = kzalloc(reportlunsize, GFP_KERNEL); - inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); - currentsd = kzalloc(sizeof(*currentsd) * - (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL); - if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) { - printk(KERN_ERR "cciss: out of memory\n"); - goto out; - } - this_device = ¤tsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; - if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) { - ch = &ld_buff->LUNListLength[0]; - num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; - if (num_luns > CISS_MAX_PHYS_LUN) { - printk(KERN_WARNING - "cciss: Maximum physical LUNs (%d) exceeded. " - "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, - num_luns - CISS_MAX_PHYS_LUN); - num_luns = CISS_MAX_PHYS_LUN; - } - } - else { - printk(KERN_ERR "cciss: Report physical LUNs failed.\n"); - goto out; - } - - - /* adjust our table of devices */ - for (i = 0; i < num_luns; i++) { - /* for each physical lun, do an inquiry */ - if (ld_buff->LUN[i][3] & 0xC0) continue; - memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); - memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); - - if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, - (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) - /* Inquiry failed (msg printed already) */ - continue; /* so we will skip this device. */ - - this_device->devtype = (inq_buff[0] & 0x1f); - this_device->bus = -1; - this_device->target = -1; - this_device->lun = -1; - memcpy(this_device->scsi3addr, scsi3addr, 8); - memcpy(this_device->vendor, &inq_buff[8], - sizeof(this_device->vendor)); - memcpy(this_device->model, &inq_buff[16], - sizeof(this_device->model)); - memcpy(this_device->revision, &inq_buff[32], - sizeof(this_device->revision)); - memset(this_device->device_id, 0, - sizeof(this_device->device_id)); - cciss_scsi_get_device_id(h, scsi3addr, - this_device->device_id, sizeof(this_device->device_id)); - - switch (this_device->devtype) { - case 0x05: /* CD-ROM */ { - - /* We don't *really* support actual CD-ROM devices, - * just this "One Button Disaster Recovery" tape drive - * which temporarily pretends to be a CD-ROM drive. - * So we check that the device is really an OBDR tape - * device by checking for "$DR-10" in bytes 43-48 of - * the inquiry data. - */ - char obdr_sig[7]; - - strncpy(obdr_sig, &inq_buff[43], 6); - obdr_sig[6] = '\0'; - if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) - /* Not OBDR device, ignore it. */ - break; - } - /* fall through . . . */ - case 0x01: /* sequential access, (tape) */ - case 0x08: /* medium changer */ - if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { - printk(KERN_INFO "cciss%d: %s ignored, " - "too many devices.\n", h->ctlr, - scsi_device_type(this_device->devtype)); - break; - } - currentsd[ncurrent] = *this_device; - ncurrent++; - break; - default: - break; - } - } - - adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent); -out: - kfree(inq_buff); - kfree(ld_buff); - kfree(currentsd); - return; -} - -static int -is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c -{ - int verb_len = strlen(verb); - if (len >= verb_len && !memcmp(verb,ptr,verb_len)) - return verb_len; - else - return 0; -} - -static int -cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length) -{ - int arg_len; - - if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) - cciss_update_non_disk_devices(h, hostno); - else - return -EINVAL; - return length; -} - -static int -cciss_scsi_write_info(struct Scsi_Host *sh, - char *buffer, /* data buffer */ - int length) /* length of data in buffer */ -{ - ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0]; - if (h == NULL) /* This really shouldn't ever happen. */ - return -EINVAL; - - return cciss_scsi_user_command(h, sh->host_no, - buffer, length); -} - -static int -cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh) -{ - - ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0]; - int i; - - if (h == NULL) /* This really shouldn't ever happen. */ - return -EINVAL; - - seq_printf(m, "cciss%d: SCSI host: %d\n", - h->ctlr, sh->host_no); - - /* this information is needed by apps to know which cciss - device corresponds to which scsi host number without - having to open a scsi target device node. The device - information is not a duplicate of /proc/scsi/scsi because - the two may be out of sync due to scsi hotplug, rather - this info is for an app to be able to use to know how to - get them back in sync. */ - - for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { - struct cciss_scsi_dev_t *sd = - &ccissscsi[h->ctlr].dev[i]; - seq_printf(m, "c%db%dt%dl%d %02d " - "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - sh->host_no, sd->bus, sd->target, sd->lun, - sd->devtype, - sd->scsi3addr[0], sd->scsi3addr[1], - sd->scsi3addr[2], sd->scsi3addr[3], - sd->scsi3addr[4], sd->scsi3addr[5], - sd->scsi3addr[6], sd->scsi3addr[7]); - } - return 0; -} - -/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci - dma mapping and fills in the scatter gather entries of the - cciss command, c. */ - -static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, - struct scsi_cmnd *cmd) -{ - unsigned int len; - struct scatterlist *sg; - __u64 addr64; - int request_nsgs, i, chained, sg_index; - struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr; - SGDescriptor_struct *curr_sg; - - BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); - - chained = 0; - sg_index = 0; - curr_sg = c->SG; - request_nsgs = scsi_dma_map(cmd); - if (request_nsgs) { - scsi_for_each_sg(cmd, sg, request_nsgs, i) { - if (sg_index + 1 == h->max_cmd_sgentries && - !chained && request_nsgs - i > 1) { - chained = 1; - sg_index = 0; - curr_sg = sa->cmd_sg_list[c->cmdindex]; - } - addr64 = (__u64) sg_dma_address(sg); - len = sg_dma_len(sg); - curr_sg[sg_index].Addr.lower = - (__u32) (addr64 & 0x0FFFFFFFFULL); - curr_sg[sg_index].Addr.upper = - (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); - curr_sg[sg_index].Len = len; - curr_sg[sg_index].Ext = 0; - ++sg_index; - } - if (chained) - cciss_map_sg_chain_block(h, c, - sa->cmd_sg_list[c->cmdindex], - (request_nsgs - (h->max_cmd_sgentries - 1)) * - sizeof(SGDescriptor_struct)); - } - /* track how many SG entries we are using */ - if (request_nsgs > h->maxSG) - h->maxSG = request_nsgs; - c->Header.SGTotal = (u16) request_nsgs + chained; - if (request_nsgs > h->max_cmd_sgentries) - c->Header.SGList = h->max_cmd_sgentries; - else - c->Header.SGList = c->Header.SGTotal; - return; -} - - -static int -cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) -{ - ctlr_info_t *h; - int rc; - unsigned char scsi3addr[8]; - CommandList_struct *c; - unsigned long flags; - - // Get the ptr to our adapter structure (hba[i]) out of cmd->host. - // We violate cmd->host privacy here. (Is there another way?) - h = (ctlr_info_t *) cmd->device->host->hostdata[0]; - - rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id, - cmd->device->lun, scsi3addr); - if (rc != 0) { - /* the scsi nexus does not match any that we presented... */ - /* pretend to mid layer that we got selection timeout */ - cmd->result = DID_NO_CONNECT << 16; - done(cmd); - /* we might want to think about registering controller itself - as a processor device on the bus so sg binds to it. */ - return 0; - } - - /* Ok, we have a reasonable scsi nexus, so send the cmd down, and - see what the device thinks of it. */ - - spin_lock_irqsave(&h->lock, flags); - c = scsi_cmd_alloc(h); - spin_unlock_irqrestore(&h->lock, flags); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n"); - /* FIXME: next 3 lines are -> BAD! <- */ - cmd->result = DID_NO_CONNECT << 16; - done(cmd); - return 0; - } - - // Fill in the command list header - - cmd->scsi_done = done; // save this for use by completion code - - /* save c in case we have to abort it */ - cmd->host_scribble = (unsigned char *) c; - - c->cmd_type = CMD_SCSI; - c->scsi_cmd = cmd; - c->Header.ReplyQueue = 0; /* unused in simple mode */ - memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); - c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ - - // Fill in the request block... - - c->Request.Timeout = 0; - memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); - BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); - c->Request.CDBLen = cmd->cmd_len; - memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); - c->Request.Type.Type = TYPE_CMD; - c->Request.Type.Attribute = ATTR_SIMPLE; - switch (cmd->sc_data_direction) { - case DMA_TO_DEVICE: - c->Request.Type.Direction = XFER_WRITE; - break; - case DMA_FROM_DEVICE: - c->Request.Type.Direction = XFER_READ; - break; - case DMA_NONE: - c->Request.Type.Direction = XFER_NONE; - break; - case DMA_BIDIRECTIONAL: - // This can happen if a buggy application does a scsi passthru - // and sets both inlen and outlen to non-zero. ( see - // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) - - c->Request.Type.Direction = XFER_RSVD; - // This is technically wrong, and cciss controllers should - // reject it with CMD_INVALID, which is the most correct - // response, but non-fibre backends appear to let it - // slide by, and give the same results as if this field - // were set correctly. Either way is acceptable for - // our purposes here. - - break; - - default: - dev_warn(&h->pdev->dev, "unknown data direction: %d\n", - cmd->sc_data_direction); - BUG(); - break; - } - cciss_scatter_gather(h, c, cmd); - enqueue_cmd_and_start_io(h, c); - /* the cmd'll come back via intr handler in complete_scsi_command() */ - return 0; -} - -static DEF_SCSI_QCMD(cciss_scsi_queue_command) - -static void cciss_unregister_scsi(ctlr_info_t *h) -{ - struct cciss_scsi_adapter_data_t *sa; - struct cciss_scsi_cmd_stack_t *stk; - unsigned long flags; - - /* we are being forcibly unloaded, and may not refuse. */ - - spin_lock_irqsave(&h->lock, flags); - sa = h->scsi_ctlr; - stk = &sa->cmd_stack; - - /* if we weren't ever actually registered, don't unregister */ - if (sa->registered) { - spin_unlock_irqrestore(&h->lock, flags); - scsi_remove_host(sa->scsi_host); - scsi_host_put(sa->scsi_host); - spin_lock_irqsave(&h->lock, flags); - } - - /* set scsi_host to NULL so our detect routine will - find us on register */ - sa->scsi_host = NULL; - spin_unlock_irqrestore(&h->lock, flags); - scsi_cmd_stack_free(h); - kfree(sa); -} - -static int cciss_engage_scsi(ctlr_info_t *h) -{ - struct cciss_scsi_adapter_data_t *sa; - struct cciss_scsi_cmd_stack_t *stk; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - sa = h->scsi_ctlr; - stk = &sa->cmd_stack; - - if (sa->registered) { - dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n"); - spin_unlock_irqrestore(&h->lock, flags); - return -ENXIO; - } - sa->registered = 1; - spin_unlock_irqrestore(&h->lock, flags); - cciss_update_non_disk_devices(h, -1); - cciss_scsi_detect(h); - return 0; -} - -static void -cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h) -{ - unsigned long flags; - - CPQ_TAPE_LOCK(h, flags); - seq_printf(seq, - "Sequential access devices: %d\n\n", - ccissscsi[h->ctlr].ndevices); - CPQ_TAPE_UNLOCK(h, flags); -} - -static int wait_for_device_to_become_ready(ctlr_info_t *h, - unsigned char lunaddr[]) -{ - int rc; - int count = 0; - int waittime = HZ; - CommandList_struct *c; - - c = cmd_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "out of memory in " - "wait_for_device_to_become_ready.\n"); - return IO_ERROR; - } - - /* Send test unit ready until device ready, or give up. */ - while (count < 20) { - - /* Wait for a bit. do this first, because if we send - * the TUR right away, the reset will just abort it. - */ - schedule_timeout_uninterruptible(waittime); - count++; - - /* Increase wait time with each try, up to a point. */ - if (waittime < (HZ * 30)) - waittime = waittime * 2; - - /* Send the Test Unit Ready */ - rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0, - lunaddr, TYPE_CMD); - if (rc == 0) - rc = sendcmd_withirq_core(h, c, 0); - - (void) process_sendcmd_error(h, c); - - if (rc != 0) - goto retry_tur; - - if (c->err_info->CommandStatus == CMD_SUCCESS) - break; - - if (c->err_info->CommandStatus == CMD_TARGET_STATUS && - c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) { - if (c->err_info->SenseInfo[2] == NO_SENSE) - break; - if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) { - unsigned char asc; - asc = c->err_info->SenseInfo[12]; - check_for_unit_attention(h, c); - if (asc == POWER_OR_RESET) - break; - } - } -retry_tur: - dev_warn(&h->pdev->dev, "Waiting %d secs " - "for device to become ready.\n", - waittime / HZ); - rc = 1; /* device not ready. */ - } - - if (rc) - dev_warn(&h->pdev->dev, "giving up on device.\n"); - else - dev_warn(&h->pdev->dev, "device is ready.\n"); - - cmd_free(h, c); - return rc; -} - -/* Need at least one of these error handlers to keep ../scsi/hosts.c from - * complaining. Doing a host- or bus-reset can't do anything good here. - * Despite what it might say in scsi_error.c, there may well be commands - * on the controller, as the cciss driver registers twice, once as a block - * device for the logical drives, and once as a scsi device, for any tape - * drives. So we know there are no commands out on the tape drives, but we - * don't know there are no commands on the controller, and it is likely - * that there probably are, as the cciss block device is most commonly used - * as a boot device (embedded controller on HP/Compaq systems.) -*/ - -static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) -{ - int rc; - CommandList_struct *cmd_in_trouble; - unsigned char lunaddr[8]; - ctlr_info_t *h; - - /* find the controller to which the command to be aborted was sent */ - h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; - if (h == NULL) /* paranoia */ - return FAILED; - dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n"); - /* find the command that's giving us trouble */ - cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; - if (cmd_in_trouble == NULL) /* paranoia */ - return FAILED; - memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); - /* send a reset to the SCSI LUN which the command was sent to */ - rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr, - TYPE_MSG); - if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0) - return SUCCESS; - dev_warn(&h->pdev->dev, "resetting device failed.\n"); - return FAILED; -} - -static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) -{ - int rc; - CommandList_struct *cmd_to_abort; - unsigned char lunaddr[8]; - ctlr_info_t *h; - - /* find the controller to which the command to be aborted was sent */ - h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; - if (h == NULL) /* paranoia */ - return FAILED; - dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n"); - - /* find the command to be aborted */ - cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; - if (cmd_to_abort == NULL) /* paranoia */ - return FAILED; - memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); - rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag, - 0, 0, lunaddr, TYPE_MSG); - if (rc == 0) - return SUCCESS; - return FAILED; - -} - -#else /* no CONFIG_CISS_SCSI_TAPE */ - -/* If no tape support, then these become defined out of existence */ - -#define cciss_scsi_setup(cntl_num) -#define cciss_engage_scsi(h) - -#endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h deleted file mode 100644 index e71d986727ca..000000000000 --- a/drivers/block/cciss_scsi.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Disk Array driver for HP Smart Array controllers, SCSI Tape module. - * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA - * 02111-1307, USA. - * - * Questions/Comments/Bugfixes to iss_storagedev@hp.com - * - */ -#ifdef CONFIG_CISS_SCSI_TAPE -#ifndef _CCISS_SCSI_H_ -#define _CCISS_SCSI_H_ - -#include /* possibly irrelevant, since we don't show disks */ - - /* the scsi id of the adapter... */ -#define SELF_SCSI_ID 15 - /* 15 is somewhat arbitrary, since the scsi-2 bus - that's presented by the driver to the OS is - fabricated. The "real" scsi-3 bus the - hardware presents is fabricated too. - The actual, honest-to-goodness physical - bus that the devices are attached to is not - addressible natively, and may in fact turn - out to be not scsi at all. */ - - -/* - -If the upper scsi layer tries to track how many commands we have -outstanding, it will be operating under the misapprehension that it is -the only one sending us requests. We also have the block interface, -which is where most requests must surely come from, so the upper layer's -notion of how many requests we have outstanding will be wrong most or -all of the time. - -Note, the normal SCSI mid-layer error handling doesn't work well -for this driver because 1) it takes the io_request_lock before -calling error handlers and uses a local variable to store flags, -so the io_request_lock cannot be released and interrupts enabled -inside the error handlers, and, the error handlers cannot poll -for command completion because they might get commands from the -block half of the driver completing, and not know what to do -with them. That's what we get for making a hybrid scsi/block -driver, I suppose. - -*/ - -struct cciss_scsi_dev_t { - int devtype; - int bus, target, lun; /* as presented to the OS */ - unsigned char scsi3addr[8]; /* as presented to the HW */ - unsigned char device_id[16]; /* from inquiry pg. 0x83 */ - unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ - unsigned char model[16]; /* bytes 16-31 of inquiry data */ - unsigned char revision[4]; /* bytes 32-35 of inquiry data */ -}; - -struct cciss_scsi_hba_t { - char *name; - int ndevices; -#define CCISS_MAX_SCSI_DEVS_PER_HBA 16 - struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA]; -}; - -#endif /* _CCISS_SCSI_H_ */ -#endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index e02c45cd3c5a..5f0eaee8c8a7 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, op_flags |= REQ_SYNC; bio = bio_alloc_drbd(GFP_NOIO); - bio->bi_bdev = bdev->md_bdev; + bio_set_dev(bio, bdev->md_bdev); bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, device->md_io.page, size, 0) != size) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 809fd245c3dc..bd97908c766f 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; - bio->bi_bdev = device->ldev->md_bdev; + bio_set_dev(bio, device->ldev->md_bdev); bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index d17b6e6393c7..7e8589ce631c 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -63,19 +63,15 @@ # define __must_hold(x) #endif -/* module parameter, defined in drbd_main.c */ -extern unsigned int minor_count; -extern bool disable_sendpage; -extern bool allow_oos; -void tl_abort_disk_io(struct drbd_device *device); - +/* shared module parameters, defined in drbd_main.c */ #ifdef CONFIG_DRBD_FAULT_INJECTION -extern int enable_faults; -extern int fault_rate; -extern int fault_devs; +extern int drbd_enable_faults; +extern int drbd_fault_rate; #endif -extern char usermode_helper[]; +extern unsigned int drbd_minor_count; +extern char drbd_usermode_helper[]; +extern int drbd_proc_details; /* This is used to stop/restart our threads. @@ -181,8 +177,8 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type); static inline int drbd_insert_fault(struct drbd_device *device, unsigned int type) { #ifdef CONFIG_DRBD_FAULT_INJECTION - return fault_rate && - (enable_faults & (1<bi_bdev) { - drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); + if (!bio->bi_disk) { + drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index e2ed28d45ce1..8cb3791898ae 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -77,41 +77,41 @@ MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices (" MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); #include -/* allow_open_on_secondary */ -MODULE_PARM_DESC(allow_oos, "DONT USE!"); /* thanks to these macros, if compiled into the kernel (not-module), - * this becomes the boot parameter drbd.minor_count */ -module_param(minor_count, uint, 0444); -module_param(disable_sendpage, bool, 0644); -module_param(allow_oos, bool, 0); -module_param(proc_details, int, 0644); + * these become boot parameters (e.g., drbd.minor_count) */ #ifdef CONFIG_DRBD_FAULT_INJECTION -int enable_faults; -int fault_rate; -static int fault_count; -int fault_devs; +int drbd_enable_faults; +int drbd_fault_rate; +static int drbd_fault_count; +static int drbd_fault_devs; /* bitmap of enabled faults */ -module_param(enable_faults, int, 0664); +module_param_named(enable_faults, drbd_enable_faults, int, 0664); /* fault rate % value - applies to all enabled faults */ -module_param(fault_rate, int, 0664); +module_param_named(fault_rate, drbd_fault_rate, int, 0664); /* count of faults inserted */ -module_param(fault_count, int, 0664); +module_param_named(fault_count, drbd_fault_count, int, 0664); /* bitmap of devices to insert faults on */ -module_param(fault_devs, int, 0644); +module_param_named(fault_devs, drbd_fault_devs, int, 0644); #endif -/* module parameter, defined */ -unsigned int minor_count = DRBD_MINOR_COUNT_DEF; -bool disable_sendpage; -bool allow_oos; -int proc_details; /* Detail level in proc drbd*/ +/* module parameters we can keep static */ +static bool drbd_allow_oos; /* allow_open_on_secondary */ +static bool drbd_disable_sendpage; +MODULE_PARM_DESC(allow_oos, "DONT USE!"); +module_param_named(allow_oos, drbd_allow_oos, bool, 0); +module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644); +/* module parameters we share */ +int drbd_proc_details; /* Detail level in proc drbd*/ +module_param_named(proc_details, drbd_proc_details, int, 0644); +/* module parameters shared with defaults */ +unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF; /* Module parameter for setting the user mode helper program * to run. Default is /sbin/drbdadm */ -char usermode_helper[80] = "/sbin/drbdadm"; - -module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); +char drbd_usermode_helper[80] = "/sbin/drbdadm"; +module_param_named(minor_count, drbd_minor_count, uint, 0444); +module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644); /* in 2.6.x, our device mapping and config info contains our virtual gendisks * as member "struct gendisk *vdisk;" @@ -923,7 +923,9 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device) } /* communicated if (agreed_features & DRBD_FF_WSAME) */ -void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q) +static void +assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, + struct request_queue *q) { if (q) { p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); @@ -1560,7 +1562,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa * put_page(); and would cause either a VM_BUG directly, or * __page_cache_release a page that would actually still be referenced * by someone, leading to some obscure delayed Oops somewhere else. */ - if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) + if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page)) return _drbd_no_send_page(peer_device, page, offset, size, msg_flags); msg_flags |= MSG_NOSIGNAL; @@ -1932,7 +1934,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) if (device->state.role != R_PRIMARY) { if (mode & FMODE_WRITE) rv = -EROFS; - else if (!allow_oos) + else if (!drbd_allow_oos) rv = -EMEDIUMTYPE; } @@ -1952,6 +1954,19 @@ static void drbd_release(struct gendisk *gd, fmode_t mode) mutex_unlock(&drbd_main_mutex); } +/* need to hold resource->req_lock */ +void drbd_queue_unplug(struct drbd_device *device) +{ + if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) { + D_ASSERT(device, device->state.role == R_PRIMARY); + if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) { + drbd_queue_work_if_unqueued( + &first_peer_device(device)->connection->sender_work, + &device->unplug_work); + } + } +} + static void drbd_set_defaults(struct drbd_device *device) { /* Beware! The actual layout differs @@ -2008,18 +2023,14 @@ void drbd_init_set_defaults(struct drbd_device *device) device->unplug_work.cb = w_send_write_hint; device->bm_io_work.w.cb = w_bitmap_io; - init_timer(&device->resync_timer); - init_timer(&device->md_sync_timer); - init_timer(&device->start_resync_timer); - init_timer(&device->request_timer); - device->resync_timer.function = resync_timer_fn; - device->resync_timer.data = (unsigned long) device; - device->md_sync_timer.function = md_sync_timer_fn; - device->md_sync_timer.data = (unsigned long) device; - device->start_resync_timer.function = start_resync_timer_fn; - device->start_resync_timer.data = (unsigned long) device; - device->request_timer.function = request_timer_fn; - device->request_timer.data = (unsigned long) device; + setup_timer(&device->resync_timer, resync_timer_fn, + (unsigned long)device); + setup_timer(&device->md_sync_timer, md_sync_timer_fn, + (unsigned long)device); + setup_timer(&device->start_resync_timer, start_resync_timer_fn, + (unsigned long)device); + setup_timer(&device->request_timer, request_timer_fn, + (unsigned long)device); init_waitqueue_head(&device->misc_wait); init_waitqueue_head(&device->state_wait); @@ -2131,7 +2142,7 @@ static void drbd_destroy_mempools(void) static int drbd_create_mempools(void) { struct page *page; - const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; + const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; int i; /* prepare our caches and mempools */ @@ -2167,13 +2178,12 @@ static int drbd_create_mempools(void) goto Enomem; /* mempools */ - drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER); + drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); if (drbd_io_bio_set == NULL) goto Enomem; drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0, - BIOSET_NEED_BVECS | - BIOSET_NEED_RESCUER); + BIOSET_NEED_BVECS); if (drbd_md_io_bio_set == NULL) goto Enomem; @@ -2409,7 +2419,6 @@ static void drbd_cleanup(void) destroy_workqueue(retry.wq); drbd_genl_unregister(); - drbd_debugfs_cleanup(); idr_for_each_entry(&drbd_devices, device, i) drbd_delete_device(device); @@ -2420,6 +2429,8 @@ static void drbd_cleanup(void) drbd_free_resource(resource); } + drbd_debugfs_cleanup(); + drbd_destroy_mempools(); unregister_blkdev(DRBD_MAJOR, "drbd"); @@ -2972,12 +2983,12 @@ static int __init drbd_init(void) { int err; - if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { - pr_err("invalid minor_count (%d)\n", minor_count); + if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) { + pr_err("invalid minor_count (%d)\n", drbd_minor_count); #ifdef MODULE return -EINVAL; #else - minor_count = DRBD_MINOR_COUNT_DEF; + drbd_minor_count = DRBD_MINOR_COUNT_DEF; #endif } @@ -3900,12 +3911,12 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type) static struct fault_random_state rrs = {0, 0}; unsigned int ret = ( - (fault_devs == 0 || - ((1 << device_to_minor(device)) & fault_devs) != 0) && - (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); + (drbd_fault_devs == 0 || + ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) && + (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate)); if (ret) { - fault_count++; + drbd_fault_count++; if (__ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "***Simulating %s failure\n", diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index ad0fcb43e45c..a12f77e6891e 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -344,7 +344,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd) (char[60]) { }, /* address */ NULL }; char mb[14]; - char *argv[] = {usermode_helper, cmd, mb, NULL }; + char *argv[] = {drbd_usermode_helper, cmd, mb, NULL }; struct drbd_connection *connection = first_peer_device(device)->connection; struct sib_info sib; int ret; @@ -359,19 +359,19 @@ int drbd_khelper(struct drbd_device *device, char *cmd) * write out any unsynced meta data changes now */ drbd_md_sync(device); - drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb); + drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb); sib.sib_reason = SIB_HELPER_PRE; sib.helper_name = cmd; drbd_bcast_event(device, &sib); notify_helper(NOTIFY_CALL, device, connection, cmd, 0); - ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); + ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC); if (ret) drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, mb, + drbd_usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); else drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, mb, + drbd_usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); sib.sib_reason = SIB_HELPER_POST; sib.helper_exit_code = ret; @@ -396,24 +396,24 @@ enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd) (char[60]) { }, /* address */ NULL }; char *resource_name = connection->resource->name; - char *argv[] = {usermode_helper, cmd, resource_name, NULL }; + char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL }; int ret; setup_khelper_env(connection, envp); conn_md_sync(connection); - drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name); + drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name); /* TODO: conn_bcast_event() ?? */ notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0); - ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); + ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC); if (ret) drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, resource_name, + drbd_usermode_helper, cmd, resource_name, (ret >> 8) & 0xff, ret); else drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, resource_name, + drbd_usermode_helper, cmd, resource_name, (ret >> 8) & 0xff, ret); /* TODO: conn_bcast_event() ?? */ notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret); @@ -1236,12 +1236,18 @@ static void fixup_discard_if_not_supported(struct request_queue *q) static void decide_on_write_same_support(struct drbd_device *device, struct request_queue *q, - struct request_queue *b, struct o_qlim *o) + struct request_queue *b, struct o_qlim *o, + bool disable_write_same) { struct drbd_peer_device *peer_device = first_peer_device(device); struct drbd_connection *connection = peer_device->connection; bool can_do = b ? b->limits.max_write_same_sectors : true; + if (can_do && disable_write_same) { + can_do = false; + drbd_info(peer_device, "WRITE_SAME disabled by config\n"); + } + if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) { can_do = false; drbd_info(peer_device, "peer does not support WRITE_SAME\n"); @@ -1302,6 +1308,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi struct request_queue *b = NULL; struct disk_conf *dc; bool discard_zeroes_if_aligned = true; + bool disable_write_same = false; if (bdev) { b = bdev->backing_bdev->bd_disk->queue; @@ -1311,6 +1318,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi dc = rcu_dereference(device->ldev->disk_conf); max_segments = dc->max_bio_bvecs; discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned; + disable_write_same = dc->disable_write_same; rcu_read_unlock(); blk_set_stacking_limits(&q->limits); @@ -1321,7 +1329,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_SIZE-1); decide_on_discard_support(device, q, b, discard_zeroes_if_aligned); - decide_on_write_same_support(device, q, b, o); + decide_on_write_same_support(device, q, b, o, disable_write_same); if (b) { blk_queue_stack_limits(q, b); @@ -1612,7 +1620,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) if (write_ordering_changed(old_disk_conf, new_disk_conf)) drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH); - if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned) + if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned + || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same) drbd_reconsider_queue_parameters(device, device->ldev, NULL); drbd_md_sync(device); @@ -2140,34 +2149,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) static int adm_detach(struct drbd_device *device, int force) { - enum drbd_state_rv retcode; - void *buffer; - int ret; - if (force) { set_bit(FORCE_DETACH, &device->flags); drbd_force_state(device, NS(disk, D_FAILED)); - retcode = SS_SUCCESS; - goto out; + return SS_SUCCESS; } - drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ - buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */ - if (buffer) { - retcode = drbd_request_state(device, NS(disk, D_FAILED)); - drbd_md_put_buffer(device); - } else /* already <= D_FAILED */ - retcode = SS_NOTHING_TO_DO; - /* D_FAILED will transition to DISKLESS. */ - drbd_resume_io(device); - ret = wait_event_interruptible(device->misc_wait, - device->state.disk != D_FAILED); - if ((int)retcode == (int)SS_IS_DISKLESS) - retcode = SS_NOTHING_TO_DO; - if (ret) - retcode = ERR_INTR; -out: - return retcode; + return drbd_request_detach_interruptible(device); } /* Detaching the disk is a process in multiple stages. First we need to lock diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 8378142f7a55..582caeb0de86 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -127,7 +127,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se seq_putc(seq, '='); seq_putc(seq, '>'); for (i = 0; i < y; i++) - seq_printf(seq, "."); + seq_putc(seq, '.'); seq_puts(seq, "] "); if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T) @@ -179,7 +179,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se seq_printf_with_thousands_grouping(seq, dbdt); seq_puts(seq, " ("); /* ------------------------- ~3s average ------------------------ */ - if (proc_details >= 1) { + if (drbd_proc_details >= 1) { /* this is what drbd_rs_should_slow_down() uses */ i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; dt = (jiffies - device->rs_mark_time[i]) / HZ; @@ -209,7 +209,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se } seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); - if (proc_details >= 1) { + if (drbd_proc_details >= 1) { /* 64 bit: * we convert to sectors in the display below. */ unsigned long bm_bits = drbd_bm_bits(device); @@ -332,13 +332,13 @@ static int drbd_seq_show(struct seq_file *seq, void *v) state.conn == C_VERIFY_T) drbd_syncer_progress(device, seq, state); - if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { + if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { lc_seq_printf_stats(seq, device->resync); lc_seq_printf_stats(seq, device->act_log); put_ldev(device); } - if (proc_details >= 2) + if (drbd_proc_details >= 2) seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt)); } rcu_read_unlock(); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c7e95e6380fb..796eaf347dc0 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -332,7 +332,7 @@ static void drbd_free_pages(struct drbd_device *device, struct page *page, int i if (page == NULL) return; - if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) i = page_chain_free(page); else { struct page *tmp; @@ -1100,7 +1100,10 @@ static int conn_connect(struct drbd_connection *connection) idr_for_each_entry(&connection->peer_devices, peer_device, vnr) mutex_lock(peer_device->device->state_mutex); + /* avoid a race with conn_request_state( C_DISCONNECTING ) */ + spin_lock_irq(&connection->resource->req_lock); set_bit(STATE_SENT, &connection->flags); + spin_unlock_irq(&connection->resource->req_lock); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) mutex_unlock(peer_device->device->state_mutex); @@ -1194,6 +1197,14 @@ static int decode_header(struct drbd_connection *connection, void *header, struc return 0; } +static void drbd_unplug_all_devices(struct drbd_connection *connection) +{ + if (current->plug == &connection->receiver_plug) { + blk_finish_plug(&connection->receiver_plug); + blk_start_plug(&connection->receiver_plug); + } /* else: maybe just schedule() ?? */ +} + static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi) { void *buffer = connection->data.rbuf; @@ -1209,6 +1220,36 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in return err; } +static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi) +{ + void *buffer = connection->data.rbuf; + unsigned int size = drbd_header_size(connection); + int err; + + err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT); + if (err != size) { + /* If we have nothing in the receive buffer now, to reduce + * application latency, try to drain the backend queues as + * quickly as possible, and let remote TCP know what we have + * received so far. */ + if (err == -EAGAIN) { + drbd_tcp_quickack(connection->data.socket); + drbd_unplug_all_devices(connection); + } + if (err > 0) { + buffer += err; + size -= err; + } + err = drbd_recv_all_warn(connection, buffer, size); + if (err) + return err; + } + + err = decode_header(connection, connection->data.rbuf, pi); + connection->last_received = jiffies; + + return err; +} /* This is blkdev_issue_flush, but asynchronous. * We want to submit to all component volumes in parallel, * then wait for all completions. @@ -1223,7 +1264,7 @@ struct one_flush_context { struct issue_flush_context *ctx; }; -void one_flush_endio(struct bio *bio) +static void one_flush_endio(struct bio *bio) { struct one_flush_context *octx = bio->bi_private; struct drbd_device *device = octx->device; @@ -1265,7 +1306,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont octx->device = device; octx->ctx = ctx; - bio->bi_bdev = device->ldev->backing_bdev; + bio_set_dev(bio, device->ldev->backing_bdev); bio->bi_private = octx; bio->bi_end_io = one_flush_endio; bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; @@ -1548,7 +1589,7 @@ int drbd_submit_peer_request(struct drbd_device *device, } /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; - bio->bi_bdev = device->ldev->backing_bdev; + bio_set_dev(bio, device->ldev->backing_bdev); bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; @@ -4085,7 +4126,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info return config_unknown_volume(connection, pi); device = peer_device->device; - p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); + p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO); if (!p_uuid) { drbd_err(device, "kmalloc of p_uuid failed\n"); return false; @@ -4882,8 +4923,8 @@ static void drbdd(struct drbd_connection *connection) struct data_cmd const *cmd; drbd_thread_current_set_cpu(&connection->receiver); - update_receiver_timing_details(connection, drbd_recv_header); - if (drbd_recv_header(connection, &pi)) + update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug); + if (drbd_recv_header_maybe_unplug(connection, &pi)) goto err_out; cmd = &drbd_cmd_handler[pi.cmd]; @@ -5375,8 +5416,11 @@ int drbd_receiver(struct drbd_thread *thi) } } while (h == 0); - if (h > 0) + if (h > 0) { + blk_start_plug(&connection->receiver_plug); drbdd(connection); + blk_finish_plug(&connection->receiver_plug); + } conn_disconnect(connection); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f6e865b2d543..de8566e55334 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -36,14 +36,18 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, /* Update disk stats at start of I/O request */ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) { - generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, - &device->vdisk->part0); + struct request_queue *q = device->rq_queue; + + generic_start_io_acct(q, bio_data_dir(req->master_bio), + req->i.size >> 9, &device->vdisk->part0); } /* Update disk stats when completing request upwards */ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) { - generic_end_io_acct(bio_data_dir(req->master_bio), + struct request_queue *q = device->rq_queue; + + generic_end_io_acct(q, bio_data_dir(req->master_bio), &device->vdisk->part0, req->start_jif); } @@ -1175,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req) else type = DRBD_FAULT_DT_RD; - bio->bi_bdev = device->ldev->backing_bdev; + bio_set_dev(bio, device->ldev->backing_bdev); /* State may have changed since we grabbed our reference on the * ->ldev member. Double check, and short-circuit to endio. @@ -1275,6 +1279,57 @@ static bool may_do_writes(struct drbd_device *device) return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; } +struct drbd_plug_cb { + struct blk_plug_cb cb; + struct drbd_request *most_recent_req; + /* do we need more? */ +}; + +static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule) +{ + struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb); + struct drbd_resource *resource = plug->cb.data; + struct drbd_request *req = plug->most_recent_req; + + kfree(cb); + if (!req) + return; + + spin_lock_irq(&resource->req_lock); + /* In case the sender did not process it yet, raise the flag to + * have it followed with P_UNPLUG_REMOTE just after. */ + req->rq_state |= RQ_UNPLUG; + /* but also queue a generic unplug */ + drbd_queue_unplug(req->device); + kref_put(&req->kref, drbd_req_destroy); + spin_unlock_irq(&resource->req_lock); +} + +static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource) +{ + /* A lot of text to say + * return (struct drbd_plug_cb*)blk_check_plugged(); */ + struct drbd_plug_cb *plug; + struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug)); + + if (cb) + plug = container_of(cb, struct drbd_plug_cb, cb); + else + plug = NULL; + return plug; +} + +static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) +{ + struct drbd_request *tmp = plug->most_recent_req; + /* Will be sent to some peer. + * Remember to tag it with UNPLUG_REMOTE on unplug */ + kref_get(&req->kref); + plug->most_recent_req = req; + if (tmp) + kref_put(&tmp->kref, drbd_req_destroy); +} + static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) { struct drbd_resource *resource = device->resource; @@ -1347,6 +1402,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request no_remote = true; } + if (no_remote == false) { + struct drbd_plug_cb *plug = drbd_check_plugged(resource); + if (plug) + drbd_update_plug(plug, req); + } + /* If it took the fast path in drbd_request_prepare, add it here. * The slow path has added it already. */ if (list_empty(&req->req_pending_master_completion)) @@ -1395,7 +1456,10 @@ void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned l static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) { + struct blk_plug plug; struct drbd_request *req, *tmp; + + blk_start_plug(&plug); list_for_each_entry_safe(req, tmp, incoming, tl_requests) { const int rw = bio_data_dir(req->master_bio); @@ -1413,6 +1477,7 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom list_del_init(&req->tl_requests); drbd_send_and_submit(device, req); } + blk_finish_plug(&plug); } static bool prepare_al_transaction_nonblock(struct drbd_device *device, @@ -1420,12 +1485,12 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device, struct list_head *pending, struct list_head *later) { - struct drbd_request *req, *tmp; + struct drbd_request *req; int wake = 0; int err; spin_lock_irq(&device->al_lock); - list_for_each_entry_safe(req, tmp, incoming, tl_requests) { + while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { err = drbd_al_begin_io_nonblock(device, &req->i); if (err == -ENOBUFS) break; @@ -1442,17 +1507,20 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device, return !list_empty(pending); } -void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) +static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) { - struct drbd_request *req, *tmp; + struct blk_plug plug; + struct drbd_request *req; - list_for_each_entry_safe(req, tmp, pending, tl_requests) { + blk_start_plug(&plug); + while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { req->rq_state |= RQ_IN_ACT_LOG; req->in_actlog_jif = jiffies; atomic_dec(&device->ap_actlog_cnt); list_del_init(&req->tl_requests); drbd_send_and_submit(device, req); } + blk_finish_plug(&plug); } void do_submit(struct work_struct *ws) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 9e1866ab238f..a2254f825601 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -212,6 +212,11 @@ enum drbd_req_state_bits { /* Should call drbd_al_complete_io() for this request... */ __RQ_IN_ACT_LOG, + /* This was the most recent request during some blk_finish_plug() + * or its implicit from-schedule equivalent. + * We may use it as hint to send a P_UNPLUG_REMOTE */ + __RQ_UNPLUG, + /* The peer has sent a retry ACK */ __RQ_POSTPONED, @@ -249,6 +254,7 @@ enum drbd_req_state_bits { #define RQ_WSAME (1UL << __RQ_WSAME) #define RQ_UNMAP (1UL << __RQ_UNMAP) #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) +#define RQ_UNPLUG (1UL << __RQ_UNPLUG) #define RQ_POSTPONED (1UL << __RQ_POSTPONED) #define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP) #define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK) diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index eea0c4aec978..0813c654c893 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -346,7 +346,7 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2) enum drbd_role conn_highest_role(struct drbd_connection *connection) { - enum drbd_role role = R_UNKNOWN; + enum drbd_role role = R_SECONDARY; struct drbd_peer_device *peer_device; int vnr; @@ -579,11 +579,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, unsigned long flags; union drbd_state os, ns; enum drbd_state_rv rv; + void *buffer = NULL; init_completion(&done); if (f & CS_SERIALIZE) mutex_lock(device->state_mutex); + if (f & CS_INHIBIT_MD_IO) + buffer = drbd_md_get_buffer(device, __func__); spin_lock_irqsave(&device->resource->req_lock, flags); os = drbd_read_state(device); @@ -636,6 +639,8 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, } abort: + if (buffer) + drbd_md_put_buffer(device); if (f & CS_SERIALIZE) mutex_unlock(device->state_mutex); @@ -664,6 +669,47 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask, return rv; } +/* + * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while + * there is IO in-flight: the transition into D_FAILED for detach purposes + * may get misinterpreted as actual IO error in a confused endio function. + * + * We wrap it all into wait_event(), to retry in case the drbd_req_state() + * returns SS_IN_TRANSIENT_STATE. + * + * To avoid potential deadlock with e.g. the receiver thread trying to grab + * drbd_md_get_buffer() while trying to get out of the "transient state", we + * need to grab and release the meta data buffer inside of that wait_event loop. + */ +static enum drbd_state_rv +request_detach(struct drbd_device *device) +{ + return drbd_req_state(device, NS(disk, D_FAILED), + CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO); +} + +enum drbd_state_rv +drbd_request_detach_interruptible(struct drbd_device *device) +{ + enum drbd_state_rv rv; + int ret; + + drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ + wait_event_interruptible(device->state_wait, + (rv = request_detach(device)) != SS_IN_TRANSIENT_STATE); + drbd_resume_io(device); + + ret = wait_event_interruptible(device->misc_wait, + device->state.disk != D_FAILED); + + if (rv == SS_IS_DISKLESS) + rv = SS_NOTHING_TO_DO; + if (ret) + rv = ERR_INTR; + + return rv; +} + enum drbd_state_rv _drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask, union drbd_state val, enum chg_state_flags f) diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h index 6c9d5d4a8a75..0276c98fbbdd 100644 --- a/drivers/block/drbd/drbd_state.h +++ b/drivers/block/drbd/drbd_state.h @@ -71,6 +71,10 @@ enum chg_state_flags { CS_DC_SUSP = 1 << 10, CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK, CS_IGN_OUTD_FAIL = 1 << 11, + + /* Make sure no meta data IO is in flight, by calling + * drbd_md_get_buffer(). Used for graceful detach. */ + CS_INHIBIT_MD_IO = 1 << 12, }; /* drbd_dev_state and drbd_state are different types. This is to stress the @@ -156,6 +160,10 @@ static inline int drbd_request_state(struct drbd_device *device, return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED); } +/* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */ +enum drbd_state_rv +drbd_request_detach_interruptible(struct drbd_device *device); + enum drbd_role conn_highest_role(struct drbd_connection *connection); enum drbd_role conn_highest_peer(struct drbd_connection *connection); enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1d8726a8df34..03471b3fce86 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -65,6 +65,11 @@ void drbd_md_endio(struct bio *bio) device = bio->bi_private; device->md_io.error = blk_status_to_errno(bio->bi_status); + /* special case: drbd_md_read() during drbd_adm_attach() */ + if (device->ldev) + put_ldev(device); + bio_put(bio); + /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able * to timeout on the lower level device, and eventually detach from it. * If this io completion runs after that timeout expired, this @@ -79,9 +84,6 @@ void drbd_md_endio(struct bio *bio) drbd_md_put_buffer(device); device->md_io.done = 1; wake_up(&device->misc_wait); - bio_put(bio); - if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */ - put_ldev(device); } /* reads on behalf of the partner, @@ -128,6 +130,14 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l block_id = peer_req->block_id; peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; + if (peer_req->flags & EE_WAS_ERROR) { + /* In protocol != C, we usually do not send write acks. + * In case of a write error, send the neg ack anyways. */ + if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) + inc_unacked(device); + drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); + } + spin_lock_irqsave(&device->resource->req_lock, flags); device->writ_cnt += peer_req->i.size >> 9; list_move_tail(&peer_req->w.list, &device->done_ee); @@ -195,7 +205,8 @@ void drbd_peer_request_endio(struct bio *bio) } } -void drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device) +static void +drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device) { panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n", device->minor, device->resource->name, device->vnr); @@ -1382,18 +1393,22 @@ static int drbd_send_barrier(struct drbd_connection *connection) return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0); } +static int pd_send_unplug_remote(struct drbd_peer_device *pd) +{ + struct drbd_socket *sock = &pd->connection->data; + if (!drbd_prepare_command(pd, sock)) + return -EIO; + return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0); +} + int w_send_write_hint(struct drbd_work *w, int cancel) { struct drbd_device *device = container_of(w, struct drbd_device, unplug_work); - struct drbd_socket *sock; if (cancel) return 0; - sock = &first_peer_device(device)->connection->data; - if (!drbd_prepare_command(first_peer_device(device), sock)) - return -EIO; - return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0); + return pd_send_unplug_remote(first_peer_device(device)); } static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch) @@ -1455,6 +1470,7 @@ int w_send_dblock(struct drbd_work *w, int cancel) struct drbd_device *device = req->device; struct drbd_peer_device *const peer_device = first_peer_device(device); struct drbd_connection *connection = peer_device->connection; + bool do_send_unplug = req->rq_state & RQ_UNPLUG; int err; if (unlikely(cancel)) { @@ -1470,6 +1486,9 @@ int w_send_dblock(struct drbd_work *w, int cancel) err = drbd_send_dblock(peer_device, req); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); + if (do_send_unplug && !err) + pd_send_unplug_remote(peer_device); + return err; } @@ -1484,6 +1503,7 @@ int w_send_read_req(struct drbd_work *w, int cancel) struct drbd_device *device = req->device; struct drbd_peer_device *const peer_device = first_peer_device(device); struct drbd_connection *connection = peer_device->connection; + bool do_send_unplug = req->rq_state & RQ_UNPLUG; int err; if (unlikely(cancel)) { @@ -1501,6 +1521,9 @@ int w_send_read_req(struct drbd_work *w, int cancel) req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); + if (do_send_unplug && !err) + pd_send_unplug_remote(peer_device); + return err; } @@ -1513,7 +1536,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) drbd_al_begin_io(device, &req->i); drbd_req_make_private_bio(req, req->master_bio); - req->private_bio->bi_bdev = device->ldev->backing_bdev; + bio_set_dev(req->private_bio, device->ldev->backing_bdev); generic_make_request(req->private_bio); return 0; @@ -1733,6 +1756,11 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) return; } + if (!connection) { + drbd_err(device, "No connection to peer, aborting!\n"); + return; + } + if (!test_bit(B_RS_H_DONE, &device->flags)) { if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9c00f29e40c1..60c086a53609 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) cbdata.drive = drive; bio_init(&bio, &bio_vec, 1); - bio.bi_bdev = bdev; + bio_set_dev(&bio, bdev); bio_add_page(&bio, page, size, 0); bio.bi_iter.bi_sector = 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f321b96405f5..85de67334695 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -213,10 +213,13 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) */ blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; - if (use_dio) + if (use_dio) { + queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; - else + } else { + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; + } blk_mq_unfreeze_queue(lo->lo_queue); } @@ -460,12 +463,21 @@ static void lo_complete_rq(struct request *rq) blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); } +static void lo_rw_aio_do_completion(struct loop_cmd *cmd) +{ + if (!atomic_dec_and_test(&cmd->ref)) + return; + kfree(cmd->bvec); + cmd->bvec = NULL; + blk_mq_complete_request(cmd->rq); +} + static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) { struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); cmd->ret = ret; - blk_mq_complete_request(cmd->rq); + lo_rw_aio_do_completion(cmd); } static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, @@ -473,22 +485,51 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, { struct iov_iter iter; struct bio_vec *bvec; - struct bio *bio = cmd->rq->bio; + struct request *rq = cmd->rq; + struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; + unsigned int offset; + int segments = 0; int ret; - /* nomerge for loop request queue */ - WARN_ON(cmd->rq->bio != cmd->rq->biotail); + if (rq->bio != rq->biotail) { + struct req_iterator iter; + struct bio_vec tmp; + + __rq_for_each_bio(bio, rq) + segments += bio_segments(bio); + bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO); + if (!bvec) + return -EIO; + cmd->bvec = bvec; + + /* + * The bios of the request may be started from the middle of + * the 'bvec' because of bio splitting, so we can't directly + * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment + * API will take care of all details for us. + */ + rq_for_each_segment(tmp, rq, iter) { + *bvec = tmp; + bvec++; + } + bvec = cmd->bvec; + offset = 0; + } else { + /* + * Same here, this bio may be started from the middle of the + * 'bvec' because of bio splitting, so offset from the bvec + * must be passed to iov iterator + */ + offset = bio->bi_iter.bi_bvec_done; + bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + segments = bio_segments(bio); + } + atomic_set(&cmd->ref, 2); - bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, - bio_segments(bio), blk_rq_bytes(cmd->rq)); - /* - * This bio may be started from the middle of the 'bvec' - * because of bio splitting, so offset from the bvec must - * be passed to iov iterator - */ - iter.iov_offset = bio->bi_iter.bi_bvec_done; + segments, blk_rq_bytes(rq)); + iter.iov_offset = offset; cmd->iocb.ki_pos = pos; cmd->iocb.ki_filp = file; @@ -500,6 +541,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, else ret = call_read_iter(file, &cmd->iocb, &iter); + lo_rw_aio_do_completion(cmd); + if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0); return 0; @@ -546,74 +589,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) } } -struct switch_request { - struct file *file; - struct completion wait; -}; - static inline void loop_update_dio(struct loop_device *lo) { __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | lo->use_dio); } -/* - * Do the actual switch; called from the BIO completion routine - */ -static void do_loop_switch(struct loop_device *lo, struct switch_request *p) -{ - struct file *file = p->file; - struct file *old_file = lo->lo_backing_file; - struct address_space *mapping; - - /* if no new file, only flush of queued bios requested */ - if (!file) - return; - - mapping = file->f_mapping; - mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); - lo->lo_backing_file = file; - lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? - mapping->host->i_bdev->bd_block_size : PAGE_SIZE; - lo->old_gfp_mask = mapping_gfp_mask(mapping); - mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); - loop_update_dio(lo); -} - -/* - * loop_switch performs the hard work of switching a backing store. - * First it needs to flush existing IO, it does this by sending a magic - * BIO down the pipe. The completion of this BIO does the actual switch. - */ -static int loop_switch(struct loop_device *lo, struct file *file) -{ - struct switch_request w; - - w.file = file; - - /* freeze queue and wait for completion of scheduled requests */ - blk_mq_freeze_queue(lo->lo_queue); - - /* do the switch action */ - do_loop_switch(lo, &w); - - /* unfreeze */ - blk_mq_unfreeze_queue(lo->lo_queue); - - return 0; -} - -/* - * Helper to flush the IOs in loop, but keeping loop thread running - */ -static int loop_flush(struct loop_device *lo) -{ - /* loop not yet configured, no running thread, nothing to flush */ - if (lo->lo_state != Lo_bound) - return 0; - return loop_switch(lo, NULL); -} - static void loop_reread_partitions(struct loop_device *lo, struct block_device *bdev) { @@ -678,9 +659,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, goto out_putf; /* and ... switch */ - error = loop_switch(lo, file); - if (error) - goto out_putf; + blk_mq_freeze_queue(lo->lo_queue); + mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); + lo->lo_backing_file = file; + lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); + mapping_set_gfp_mask(file->f_mapping, + lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + loop_update_dio(lo); + blk_mq_unfreeze_queue(lo->lo_queue); fput(old_file); if (lo->lo_flags & LO_FLAGS_PARTSCAN) @@ -867,7 +853,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct file *file, *f; struct inode *inode; struct address_space *mapping; - unsigned lo_blocksize; int lo_flags = 0; int error; loff_t size; @@ -911,9 +896,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, !file->f_op->write_iter) lo_flags |= LO_FLAGS_READ_ONLY; - lo_blocksize = S_ISBLK(inode->i_mode) ? - inode->i_bdev->bd_block_size : PAGE_SIZE; - error = -EFBIG; size = get_loop_size(lo, file); if ((loff_t)(sector_t)size != size) @@ -927,7 +909,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); lo->use_dio = false; - lo->lo_blocksize = lo_blocksize; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -947,7 +928,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); - set_blocksize(bdev, lo_blocksize); + set_blocksize(bdev, S_ISBLK(inode->i_mode) ? + block_size(inode->i_bdev) : PAGE_SIZE); lo->lo_state = Lo_bound; if (part_shift) @@ -1053,6 +1035,9 @@ static int loop_clr_fd(struct loop_device *lo) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); + blk_queue_logical_block_size(lo->lo_queue, 512); + blk_queue_physical_block_size(lo->lo_queue, 512); + blk_queue_io_min(lo->lo_queue, 512); if (bdev) { bdput(bdev); invalidate_bdev(bdev); @@ -1336,6 +1321,26 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) return error; } +static int loop_set_block_size(struct loop_device *lo, unsigned long arg) +{ + if (lo->lo_state != Lo_bound) + return -ENXIO; + + if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) + return -EINVAL; + + blk_mq_freeze_queue(lo->lo_queue); + + blk_queue_logical_block_size(lo->lo_queue, arg); + blk_queue_physical_block_size(lo->lo_queue, arg); + blk_queue_io_min(lo->lo_queue, arg); + loop_update_dio(lo); + + blk_mq_unfreeze_queue(lo->lo_queue); + + return 0; +} + static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { @@ -1384,6 +1389,11 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_dio(lo, arg); break; + case LOOP_SET_BLOCK_SIZE: + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + err = loop_set_block_size(lo, arg); + break; default: err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; } @@ -1583,12 +1593,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) err = loop_clr_fd(lo); if (!err) return; - } else { + } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, * but flush possible ongoing bios in thread. */ - loop_flush(lo); + blk_mq_freeze_queue(lo->lo_queue); + blk_mq_unfreeze_queue(lo->lo_queue); } mutex_unlock(&lo->lo_ctl_mutex); @@ -1770,9 +1781,13 @@ static int loop_add(struct loop_device **l, int i) } lo->lo_queue->queuedata = lo; + blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); + /* - * It doesn't make sense to enable merge because the I/O - * submitted to backing file is handled page by page. + * By default, we do buffer IO, so it doesn't make sense to enable + * merge because the I/O submitted to backing file is handled page by + * page. For directio mode, merge does help to dispatch bigger request + * to underlayer disk. We will enable merge once directio is enabled. */ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); @@ -1966,10 +1981,6 @@ static int __init loop_init(void) struct loop_device *lo; int err; - err = misc_register(&loop_misc); - if (err < 0) - return err; - part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); @@ -1987,12 +1998,12 @@ static int __init loop_init(void) if ((1UL << part_shift) > DISK_MAX_PARTS) { err = -EINVAL; - goto misc_out; + goto err_out; } if (max_loop > 1UL << (MINORBITS - part_shift)) { err = -EINVAL; - goto misc_out; + goto err_out; } /* @@ -2011,6 +2022,11 @@ static int __init loop_init(void) range = 1UL << MINORBITS; } + err = misc_register(&loop_misc); + if (err < 0) + goto err_out; + + if (register_blkdev(LOOP_MAJOR, "loop")) { err = -EIO; goto misc_out; @@ -2030,6 +2046,7 @@ static int __init loop_init(void) misc_out: misc_deregister(&loop_misc); +err_out: return err; } diff --git a/drivers/block/loop.h b/drivers/block/loop.h index fecd3f97ef8c..1f3956702993 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -48,7 +48,6 @@ struct loop_device { struct file * lo_backing_file; struct block_device *lo_device; - unsigned lo_blocksize; void *key_data; gfp_t old_gfp_mask; @@ -68,10 +67,11 @@ struct loop_device { struct loop_cmd { struct kthread_work work; struct request *rq; - struct list_head list; - bool use_aio; /* use AIO interface to handle I/O */ + bool use_aio; /* use AIO interface to handle I/O */ + atomic_t ref; /* only for aio */ long ret; struct kiocb iocb; + struct bio_vec *bvec; }; /* Support for loadable transfer modules */ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5bdf923294a5..883dfebd3014 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -128,7 +128,7 @@ static struct dentry *nbd_dbg_dir; #define NBD_MAGIC 0x68797548 static unsigned int nbds_max = 16; -static int max_part; +static int max_part = 16; static struct workqueue_struct *recv_workqueue; static int part_shift; @@ -165,7 +165,7 @@ static ssize_t pid_show(struct device *dev, return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); } -static struct device_attribute pid_attr = { +static const struct device_attribute pid_attr = { .attr = { .name = "pid", .mode = S_IRUGO}, .show = pid_show, }; @@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * appropriate. */ ret = nbd_handle_cmd(cmd, hctx->queue_num); + if (ret < 0) + ret = BLK_STS_IOERR; + else if (!ret) + ret = BLK_STS_OK; complete(&cmd->send_complete); - return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; + return ret; } static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, @@ -1194,6 +1198,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + /* The block layer will pass back some non-nbd ioctls in case we have + * special handling for them, but we don't so just return an error. + */ + if (_IOC_TYPE(cmd) != 0xab) + return -EINVAL; + mutex_lock(&nbd->config_lock); /* Don't allow ioctl operations on a nbd device that was created with @@ -1584,6 +1594,15 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) } } else { nbd = idr_find(&nbd_index_idr, index); + if (!nbd) { + ret = nbd_dev_add(index); + if (ret < 0) { + mutex_unlock(&nbd_index_mutex); + printk(KERN_ERR "nbd: failed to add new device\n"); + return ret; + } + nbd = idr_find(&nbd_index_idr, index); + } } if (!nbd) { printk(KERN_ERR "nbd: couldn't find device at index %d\n", @@ -2137,4 +2156,4 @@ MODULE_LICENSE("GPL"); module_param(nbds_max, int, 0444); MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); module_param(max_part, int, 0444); -MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); +MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)"); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 81142ce781da..8042c26ea9e6 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1,3 +1,7 @@ +/* + * Add configfs and memory store: Kyungchan Koh and + * Shaohua Li + */ #include #include @@ -9,6 +13,24 @@ #include #include #include +#include +#include + +#define SECTOR_SHIFT 9 +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) +#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#define SECTOR_MASK (PAGE_SECTORS - 1) + +#define FREE_BATCH 16 + +#define TICKS_PER_SEC 50ULL +#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) + +static inline u64 mb_per_tick(int mbps) +{ + return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); +} struct nullb_cmd { struct list_head list; @@ -19,17 +41,82 @@ struct nullb_cmd { unsigned int tag; struct nullb_queue *nq; struct hrtimer timer; + blk_status_t error; }; struct nullb_queue { unsigned long *tag_map; wait_queue_head_t wait; unsigned int queue_depth; + struct nullb_device *dev; struct nullb_cmd *cmds; }; +/* + * Status flags for nullb_device. + * + * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. + * UP: Device is currently on and visible in userspace. + * THROTTLED: Device is being throttled. + * CACHE: Device is using a write-back cache. + */ +enum nullb_device_flags { + NULLB_DEV_FL_CONFIGURED = 0, + NULLB_DEV_FL_UP = 1, + NULLB_DEV_FL_THROTTLED = 2, + NULLB_DEV_FL_CACHE = 3, +}; + +/* + * nullb_page is a page in memory for nullb devices. + * + * @page: The page holding the data. + * @bitmap: The bitmap represents which sector in the page has data. + * Each bit represents one block size. For example, sector 8 + * will use the 7th bit + * The highest 2 bits of bitmap are for special purpose. LOCK means the cache + * page is being flushing to storage. FREE means the cache page is freed and + * should be skipped from flushing to storage. Please see + * null_make_cache_space + */ +struct nullb_page { + struct page *page; + unsigned long bitmap; +}; +#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) +#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) + +struct nullb_device { + struct nullb *nullb; + struct config_item item; + struct radix_tree_root data; /* data stored in the disk */ + struct radix_tree_root cache; /* disk cache data */ + unsigned long flags; /* device flags */ + unsigned int curr_cache; + struct badblocks badblocks; + + unsigned long size; /* device size in MB */ + unsigned long completion_nsec; /* time in ns to complete a request */ + unsigned long cache_size; /* disk cache size in MB */ + unsigned int submit_queues; /* number of submission queues */ + unsigned int home_node; /* home node for the device */ + unsigned int queue_mode; /* block interface */ + unsigned int blocksize; /* block size */ + unsigned int irqmode; /* IRQ completion handler */ + unsigned int hw_queue_depth; /* queue depth */ + unsigned int index; /* index of the disk, only valid with a disk */ + unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ + bool use_lightnvm; /* register as a LightNVM device */ + bool blocking; /* blocking blk-mq device */ + bool use_per_node_hctx; /* use per-node allocation for hardware context */ + bool power; /* power on/off the device */ + bool memory_backed; /* if data is stored in memory */ + bool discard; /* if support discard */ +}; + struct nullb { + struct nullb_device *dev; struct list_head list; unsigned int index; struct request_queue *q; @@ -37,8 +124,10 @@ struct nullb { struct nvm_dev *ndev; struct blk_mq_tag_set *tag_set; struct blk_mq_tag_set __tag_set; - struct hrtimer timer; unsigned int queue_depth; + atomic_long_t cur_bytes; + struct hrtimer bw_timer; + unsigned long cache_flush_pos; spinlock_t lock; struct nullb_queue *queues; @@ -49,7 +138,7 @@ struct nullb { static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; -static int nullb_indexes; +static DEFINE_IDA(nullb_indexes); static struct kmem_cache *ppa_cache; static struct blk_mq_tag_set tag_set; @@ -65,15 +154,15 @@ enum { NULL_Q_MQ = 2, }; -static int submit_queues; -module_param(submit_queues, int, S_IRUGO); +static int g_submit_queues = 1; +module_param_named(submit_queues, g_submit_queues, int, S_IRUGO); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); -static int home_node = NUMA_NO_NODE; -module_param(home_node, int, S_IRUGO); +static int g_home_node = NUMA_NO_NODE; +module_param_named(home_node, g_home_node, int, S_IRUGO); MODULE_PARM_DESC(home_node, "Home node for the device"); -static int queue_mode = NULL_Q_MQ; +static int g_queue_mode = NULL_Q_MQ; static int null_param_store_val(const char *str, int *val, int min, int max) { @@ -92,7 +181,7 @@ static int null_param_store_val(const char *str, int *val, int min, int max) static int null_set_queue_mode(const char *str, const struct kernel_param *kp) { - return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); + return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); } static const struct kernel_param_ops null_queue_mode_param_ops = { @@ -100,38 +189,38 @@ static const struct kernel_param_ops null_queue_mode_param_ops = { .get = param_get_int, }; -device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); +device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO); MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); -static int gb = 250; -module_param(gb, int, S_IRUGO); +static int g_gb = 250; +module_param_named(gb, g_gb, int, S_IRUGO); MODULE_PARM_DESC(gb, "Size in GB"); -static int bs = 512; -module_param(bs, int, S_IRUGO); +static int g_bs = 512; +module_param_named(bs, g_bs, int, S_IRUGO); MODULE_PARM_DESC(bs, "Block size (in bytes)"); static int nr_devices = 1; module_param(nr_devices, int, S_IRUGO); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); -static bool use_lightnvm; -module_param(use_lightnvm, bool, S_IRUGO); +static bool g_use_lightnvm; +module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO); MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); -static bool blocking; -module_param(blocking, bool, S_IRUGO); +static bool g_blocking; +module_param_named(blocking, g_blocking, bool, S_IRUGO); MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); static bool shared_tags; module_param(shared_tags, bool, S_IRUGO); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); -static int irqmode = NULL_IRQ_SOFTIRQ; +static int g_irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) { - return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, + return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, NULL_IRQ_TIMER); } @@ -140,21 +229,358 @@ static const struct kernel_param_ops null_irqmode_param_ops = { .get = param_get_int, }; -device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); +device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); -static unsigned long completion_nsec = 10000; -module_param(completion_nsec, ulong, S_IRUGO); +static unsigned long g_completion_nsec = 10000; +module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); -static int hw_queue_depth = 64; -module_param(hw_queue_depth, int, S_IRUGO); +static int g_hw_queue_depth = 64; +module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); -static bool use_per_node_hctx = false; -module_param(use_per_node_hctx, bool, S_IRUGO); +static bool g_use_per_node_hctx; +module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); +static struct nullb_device *null_alloc_dev(void); +static void null_free_dev(struct nullb_device *dev); +static void null_del_dev(struct nullb *nullb); +static int null_add_dev(struct nullb_device *dev); +static void null_free_device_storage(struct nullb_device *dev, bool is_cache); + +static inline struct nullb_device *to_nullb_device(struct config_item *item) +{ + return item ? container_of(item, struct nullb_device, item) : NULL; +} + +static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", val); +} + +static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static ssize_t nullb_device_uint_attr_store(unsigned int *val, + const char *page, size_t count) +{ + unsigned int tmp; + int result; + + result = kstrtouint(page, 0, &tmp); + if (result) + return result; + + *val = tmp; + return count; +} + +static ssize_t nullb_device_ulong_attr_store(unsigned long *val, + const char *page, size_t count) +{ + int result; + unsigned long tmp; + + result = kstrtoul(page, 0, &tmp); + if (result) + return result; + + *val = tmp; + return count; +} + +static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, + size_t count) +{ + bool tmp; + int result; + + result = kstrtobool(page, &tmp); + if (result) + return result; + + *val = tmp; + return count; +} + +/* The following macro should only be used with TYPE = {uint, ulong, bool}. */ +#define NULLB_DEVICE_ATTR(NAME, TYPE) \ +static ssize_t \ +nullb_device_##NAME##_show(struct config_item *item, char *page) \ +{ \ + return nullb_device_##TYPE##_attr_show( \ + to_nullb_device(item)->NAME, page); \ +} \ +static ssize_t \ +nullb_device_##NAME##_store(struct config_item *item, const char *page, \ + size_t count) \ +{ \ + if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ + return -EBUSY; \ + return nullb_device_##TYPE##_attr_store( \ + &to_nullb_device(item)->NAME, page, count); \ +} \ +CONFIGFS_ATTR(nullb_device_, NAME); + +NULLB_DEVICE_ATTR(size, ulong); +NULLB_DEVICE_ATTR(completion_nsec, ulong); +NULLB_DEVICE_ATTR(submit_queues, uint); +NULLB_DEVICE_ATTR(home_node, uint); +NULLB_DEVICE_ATTR(queue_mode, uint); +NULLB_DEVICE_ATTR(blocksize, uint); +NULLB_DEVICE_ATTR(irqmode, uint); +NULLB_DEVICE_ATTR(hw_queue_depth, uint); +NULLB_DEVICE_ATTR(index, uint); +NULLB_DEVICE_ATTR(use_lightnvm, bool); +NULLB_DEVICE_ATTR(blocking, bool); +NULLB_DEVICE_ATTR(use_per_node_hctx, bool); +NULLB_DEVICE_ATTR(memory_backed, bool); +NULLB_DEVICE_ATTR(discard, bool); +NULLB_DEVICE_ATTR(mbps, uint); +NULLB_DEVICE_ATTR(cache_size, ulong); + +static ssize_t nullb_device_power_show(struct config_item *item, char *page) +{ + return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); +} + +static ssize_t nullb_device_power_store(struct config_item *item, + const char *page, size_t count) +{ + struct nullb_device *dev = to_nullb_device(item); + bool newp = false; + ssize_t ret; + + ret = nullb_device_bool_attr_store(&newp, page, count); + if (ret < 0) + return ret; + + if (!dev->power && newp) { + if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) + return count; + if (null_add_dev(dev)) { + clear_bit(NULLB_DEV_FL_UP, &dev->flags); + return -ENOMEM; + } + + set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); + dev->power = newp; + } else if (dev->power && !newp) { + mutex_lock(&lock); + dev->power = newp; + null_del_dev(dev->nullb); + mutex_unlock(&lock); + clear_bit(NULLB_DEV_FL_UP, &dev->flags); + } + + return count; +} + +CONFIGFS_ATTR(nullb_device_, power); + +static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) +{ + struct nullb_device *t_dev = to_nullb_device(item); + + return badblocks_show(&t_dev->badblocks, page, 0); +} + +static ssize_t nullb_device_badblocks_store(struct config_item *item, + const char *page, size_t count) +{ + struct nullb_device *t_dev = to_nullb_device(item); + char *orig, *buf, *tmp; + u64 start, end; + int ret; + + orig = kstrndup(page, count, GFP_KERNEL); + if (!orig) + return -ENOMEM; + + buf = strstrip(orig); + + ret = -EINVAL; + if (buf[0] != '+' && buf[0] != '-') + goto out; + tmp = strchr(&buf[1], '-'); + if (!tmp) + goto out; + *tmp = '\0'; + ret = kstrtoull(buf + 1, 0, &start); + if (ret) + goto out; + ret = kstrtoull(tmp + 1, 0, &end); + if (ret) + goto out; + ret = -EINVAL; + if (start > end) + goto out; + /* enable badblocks */ + cmpxchg(&t_dev->badblocks.shift, -1, 0); + if (buf[0] == '+') + ret = badblocks_set(&t_dev->badblocks, start, + end - start + 1, 1); + else + ret = badblocks_clear(&t_dev->badblocks, start, + end - start + 1); + if (ret == 0) + ret = count; +out: + kfree(orig); + return ret; +} +CONFIGFS_ATTR(nullb_device_, badblocks); + +static struct configfs_attribute *nullb_device_attrs[] = { + &nullb_device_attr_size, + &nullb_device_attr_completion_nsec, + &nullb_device_attr_submit_queues, + &nullb_device_attr_home_node, + &nullb_device_attr_queue_mode, + &nullb_device_attr_blocksize, + &nullb_device_attr_irqmode, + &nullb_device_attr_hw_queue_depth, + &nullb_device_attr_index, + &nullb_device_attr_use_lightnvm, + &nullb_device_attr_blocking, + &nullb_device_attr_use_per_node_hctx, + &nullb_device_attr_power, + &nullb_device_attr_memory_backed, + &nullb_device_attr_discard, + &nullb_device_attr_mbps, + &nullb_device_attr_cache_size, + &nullb_device_attr_badblocks, + NULL, +}; + +static void nullb_device_release(struct config_item *item) +{ + struct nullb_device *dev = to_nullb_device(item); + + badblocks_exit(&dev->badblocks); + null_free_device_storage(dev, false); + null_free_dev(dev); +} + +static struct configfs_item_operations nullb_device_ops = { + .release = nullb_device_release, +}; + +static struct config_item_type nullb_device_type = { + .ct_item_ops = &nullb_device_ops, + .ct_attrs = nullb_device_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct +config_item *nullb_group_make_item(struct config_group *group, const char *name) +{ + struct nullb_device *dev; + + dev = null_alloc_dev(); + if (!dev) + return ERR_PTR(-ENOMEM); + + config_item_init_type_name(&dev->item, name, &nullb_device_type); + + return &dev->item; +} + +static void +nullb_group_drop_item(struct config_group *group, struct config_item *item) +{ + struct nullb_device *dev = to_nullb_device(item); + + if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { + mutex_lock(&lock); + dev->power = false; + null_del_dev(dev->nullb); + mutex_unlock(&lock); + } + + config_item_put(item); +} + +static ssize_t memb_group_features_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n"); +} + +CONFIGFS_ATTR_RO(memb_group_, features); + +static struct configfs_attribute *nullb_group_attrs[] = { + &memb_group_attr_features, + NULL, +}; + +static struct configfs_group_operations nullb_group_ops = { + .make_item = nullb_group_make_item, + .drop_item = nullb_group_drop_item, +}; + +static struct config_item_type nullb_group_type = { + .ct_group_ops = &nullb_group_ops, + .ct_attrs = nullb_group_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem nullb_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "nullb", + .ci_type = &nullb_group_type, + }, + }, +}; + +static inline int null_cache_active(struct nullb *nullb) +{ + return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); +} + +static struct nullb_device *null_alloc_dev(void) +{ + struct nullb_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); + INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); + if (badblocks_init(&dev->badblocks, 0)) { + kfree(dev); + return NULL; + } + + dev->size = g_gb * 1024; + dev->completion_nsec = g_completion_nsec; + dev->submit_queues = g_submit_queues; + dev->home_node = g_home_node; + dev->queue_mode = g_queue_mode; + dev->blocksize = g_bs; + dev->irqmode = g_irqmode; + dev->hw_queue_depth = g_hw_queue_depth; + dev->use_lightnvm = g_use_lightnvm; + dev->blocking = g_blocking; + dev->use_per_node_hctx = g_use_per_node_hctx; + return dev; +} + +static void null_free_dev(struct nullb_device *dev) +{ + kfree(dev); +} + static void put_tag(struct nullb_queue *nq, unsigned int tag) { clear_bit_unlock(tag, nq->tag_map); @@ -193,7 +619,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) cmd = &nq->cmds[tag]; cmd->tag = tag; cmd->nq = nq; - if (irqmode == NULL_IRQ_TIMER) { + if (nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; @@ -229,19 +655,21 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) static void end_cmd(struct nullb_cmd *cmd) { struct request_queue *q = NULL; + int queue_mode = cmd->nq->dev->queue_mode; if (cmd->rq) q = cmd->rq->q; switch (queue_mode) { case NULL_Q_MQ: - blk_mq_end_request(cmd->rq, BLK_STS_OK); + blk_mq_end_request(cmd->rq, cmd->error); return; case NULL_Q_RQ: INIT_LIST_HEAD(&cmd->rq->queuelist); - blk_end_request_all(cmd->rq, BLK_STS_OK); + blk_end_request_all(cmd->rq, cmd->error); break; case NULL_Q_BIO: + cmd->bio->bi_status = cmd->error; bio_endio(cmd->bio); break; } @@ -267,25 +695,582 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) static void null_cmd_end_timer(struct nullb_cmd *cmd) { - ktime_t kt = completion_nsec; + ktime_t kt = cmd->nq->dev->completion_nsec; hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); } static void null_softirq_done_fn(struct request *rq) { - if (queue_mode == NULL_Q_MQ) + struct nullb *nullb = rq->q->queuedata; + + if (nullb->dev->queue_mode == NULL_Q_MQ) end_cmd(blk_mq_rq_to_pdu(rq)); else end_cmd(rq->special); } -static inline void null_handle_cmd(struct nullb_cmd *cmd) +static struct nullb_page *null_alloc_page(gfp_t gfp_flags) { + struct nullb_page *t_page; + + t_page = kmalloc(sizeof(struct nullb_page), gfp_flags); + if (!t_page) + goto out; + + t_page->page = alloc_pages(gfp_flags, 0); + if (!t_page->page) + goto out_freepage; + + t_page->bitmap = 0; + return t_page; +out_freepage: + kfree(t_page); +out: + return NULL; +} + +static void null_free_page(struct nullb_page *t_page) +{ + __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); + if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) + return; + __free_page(t_page->page); + kfree(t_page); +} + +static void null_free_sector(struct nullb *nullb, sector_t sector, + bool is_cache) +{ + unsigned int sector_bit; + u64 idx; + struct nullb_page *t_page, *ret; + struct radix_tree_root *root; + + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; + idx = sector >> PAGE_SECTORS_SHIFT; + sector_bit = (sector & SECTOR_MASK); + + t_page = radix_tree_lookup(root, idx); + if (t_page) { + __clear_bit(sector_bit, &t_page->bitmap); + + if (!t_page->bitmap) { + ret = radix_tree_delete_item(root, idx, t_page); + WARN_ON(ret != t_page); + null_free_page(ret); + if (is_cache) + nullb->dev->curr_cache -= PAGE_SIZE; + } + } +} + +static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, + struct nullb_page *t_page, bool is_cache) +{ + struct radix_tree_root *root; + + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; + + if (radix_tree_insert(root, idx, t_page)) { + null_free_page(t_page); + t_page = radix_tree_lookup(root, idx); + WARN_ON(!t_page || t_page->page->index != idx); + } else if (is_cache) + nullb->dev->curr_cache += PAGE_SIZE; + + return t_page; +} + +static void null_free_device_storage(struct nullb_device *dev, bool is_cache) +{ + unsigned long pos = 0; + int nr_pages; + struct nullb_page *ret, *t_pages[FREE_BATCH]; + struct radix_tree_root *root; + + root = is_cache ? &dev->cache : &dev->data; + + do { + int i; + + nr_pages = radix_tree_gang_lookup(root, + (void **)t_pages, pos, FREE_BATCH); + + for (i = 0; i < nr_pages; i++) { + pos = t_pages[i]->page->index; + ret = radix_tree_delete_item(root, pos, t_pages[i]); + WARN_ON(ret != t_pages[i]); + null_free_page(ret); + } + + pos++; + } while (nr_pages == FREE_BATCH); + + if (is_cache) + dev->curr_cache = 0; +} + +static struct nullb_page *__null_lookup_page(struct nullb *nullb, + sector_t sector, bool for_write, bool is_cache) +{ + unsigned int sector_bit; + u64 idx; + struct nullb_page *t_page; + struct radix_tree_root *root; + + idx = sector >> PAGE_SECTORS_SHIFT; + sector_bit = (sector & SECTOR_MASK); + + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; + t_page = radix_tree_lookup(root, idx); + WARN_ON(t_page && t_page->page->index != idx); + + if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) + return t_page; + + return NULL; +} + +static struct nullb_page *null_lookup_page(struct nullb *nullb, + sector_t sector, bool for_write, bool ignore_cache) +{ + struct nullb_page *page = NULL; + + if (!ignore_cache) + page = __null_lookup_page(nullb, sector, for_write, true); + if (page) + return page; + return __null_lookup_page(nullb, sector, for_write, false); +} + +static struct nullb_page *null_insert_page(struct nullb *nullb, + sector_t sector, bool ignore_cache) +{ + u64 idx; + struct nullb_page *t_page; + + t_page = null_lookup_page(nullb, sector, true, ignore_cache); + if (t_page) + return t_page; + + spin_unlock_irq(&nullb->lock); + + t_page = null_alloc_page(GFP_NOIO); + if (!t_page) + goto out_lock; + + if (radix_tree_preload(GFP_NOIO)) + goto out_freepage; + + spin_lock_irq(&nullb->lock); + idx = sector >> PAGE_SECTORS_SHIFT; + t_page->page->index = idx; + t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); + radix_tree_preload_end(); + + return t_page; +out_freepage: + null_free_page(t_page); +out_lock: + spin_lock_irq(&nullb->lock); + return null_lookup_page(nullb, sector, true, ignore_cache); +} + +static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) +{ + int i; + unsigned int offset; + u64 idx; + struct nullb_page *t_page, *ret; + void *dst, *src; + + idx = c_page->page->index; + + t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); + + __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); + if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { + null_free_page(c_page); + if (t_page && t_page->bitmap == 0) { + ret = radix_tree_delete_item(&nullb->dev->data, + idx, t_page); + null_free_page(t_page); + } + return 0; + } + + if (!t_page) + return -ENOMEM; + + src = kmap_atomic(c_page->page); + dst = kmap_atomic(t_page->page); + + for (i = 0; i < PAGE_SECTORS; + i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { + if (test_bit(i, &c_page->bitmap)) { + offset = (i << SECTOR_SHIFT); + memcpy(dst + offset, src + offset, + nullb->dev->blocksize); + __set_bit(i, &t_page->bitmap); + } + } + + kunmap_atomic(dst); + kunmap_atomic(src); + + ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); + null_free_page(ret); + nullb->dev->curr_cache -= PAGE_SIZE; + + return 0; +} + +static int null_make_cache_space(struct nullb *nullb, unsigned long n) +{ + int i, err, nr_pages; + struct nullb_page *c_pages[FREE_BATCH]; + unsigned long flushed = 0, one_round; + +again: + if ((nullb->dev->cache_size * 1024 * 1024) > + nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) + return 0; + + nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, + (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); + /* + * nullb_flush_cache_page could unlock before using the c_pages. To + * avoid race, we don't allow page free + */ + for (i = 0; i < nr_pages; i++) { + nullb->cache_flush_pos = c_pages[i]->page->index; + /* + * We found the page which is being flushed to disk by other + * threads + */ + if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) + c_pages[i] = NULL; + else + __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); + } + + one_round = 0; + for (i = 0; i < nr_pages; i++) { + if (c_pages[i] == NULL) + continue; + err = null_flush_cache_page(nullb, c_pages[i]); + if (err) + return err; + one_round++; + } + flushed += one_round << PAGE_SHIFT; + + if (n > flushed) { + if (nr_pages == 0) + nullb->cache_flush_pos = 0; + if (one_round == 0) { + /* give other threads a chance */ + spin_unlock_irq(&nullb->lock); + spin_lock_irq(&nullb->lock); + } + goto again; + } + return 0; +} + +static int copy_to_nullb(struct nullb *nullb, struct page *source, + unsigned int off, sector_t sector, size_t n, bool is_fua) +{ + size_t temp, count = 0; + unsigned int offset; + struct nullb_page *t_page; + void *dst, *src; + + while (count < n) { + temp = min_t(size_t, nullb->dev->blocksize, n - count); + + if (null_cache_active(nullb) && !is_fua) + null_make_cache_space(nullb, PAGE_SIZE); + + offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; + t_page = null_insert_page(nullb, sector, + !null_cache_active(nullb) || is_fua); + if (!t_page) + return -ENOSPC; + + src = kmap_atomic(source); + dst = kmap_atomic(t_page->page); + memcpy(dst + offset, src + off + count, temp); + kunmap_atomic(dst); + kunmap_atomic(src); + + __set_bit(sector & SECTOR_MASK, &t_page->bitmap); + + if (is_fua) + null_free_sector(nullb, sector, true); + + count += temp; + sector += temp >> SECTOR_SHIFT; + } + return 0; +} + +static int copy_from_nullb(struct nullb *nullb, struct page *dest, + unsigned int off, sector_t sector, size_t n) +{ + size_t temp, count = 0; + unsigned int offset; + struct nullb_page *t_page; + void *dst, *src; + + while (count < n) { + temp = min_t(size_t, nullb->dev->blocksize, n - count); + + offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; + t_page = null_lookup_page(nullb, sector, false, + !null_cache_active(nullb)); + + dst = kmap_atomic(dest); + if (!t_page) { + memset(dst + off + count, 0, temp); + goto next; + } + src = kmap_atomic(t_page->page); + memcpy(dst + off + count, src + offset, temp); + kunmap_atomic(src); +next: + kunmap_atomic(dst); + + count += temp; + sector += temp >> SECTOR_SHIFT; + } + return 0; +} + +static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) +{ + size_t temp; + + spin_lock_irq(&nullb->lock); + while (n > 0) { + temp = min_t(size_t, n, nullb->dev->blocksize); + null_free_sector(nullb, sector, false); + if (null_cache_active(nullb)) + null_free_sector(nullb, sector, true); + sector += temp >> SECTOR_SHIFT; + n -= temp; + } + spin_unlock_irq(&nullb->lock); +} + +static int null_handle_flush(struct nullb *nullb) +{ + int err; + + if (!null_cache_active(nullb)) + return 0; + + spin_lock_irq(&nullb->lock); + while (true) { + err = null_make_cache_space(nullb, + nullb->dev->cache_size * 1024 * 1024); + if (err || nullb->dev->curr_cache == 0) + break; + } + + WARN_ON(!radix_tree_empty(&nullb->dev->cache)); + spin_unlock_irq(&nullb->lock); + return err; +} + +static int null_transfer(struct nullb *nullb, struct page *page, + unsigned int len, unsigned int off, bool is_write, sector_t sector, + bool is_fua) +{ + int err = 0; + + if (!is_write) { + err = copy_from_nullb(nullb, page, off, sector, len); + flush_dcache_page(page); + } else { + flush_dcache_page(page); + err = copy_to_nullb(nullb, page, off, sector, len, is_fua); + } + + return err; +} + +static int null_handle_rq(struct nullb_cmd *cmd) +{ + struct request *rq = cmd->rq; + struct nullb *nullb = cmd->nq->dev->nullb; + int err; + unsigned int len; + sector_t sector; + struct req_iterator iter; + struct bio_vec bvec; + + sector = blk_rq_pos(rq); + + if (req_op(rq) == REQ_OP_DISCARD) { + null_handle_discard(nullb, sector, blk_rq_bytes(rq)); + return 0; + } + + spin_lock_irq(&nullb->lock); + rq_for_each_segment(bvec, rq, iter) { + len = bvec.bv_len; + err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, + op_is_write(req_op(rq)), sector, + req_op(rq) & REQ_FUA); + if (err) { + spin_unlock_irq(&nullb->lock); + return err; + } + sector += len >> SECTOR_SHIFT; + } + spin_unlock_irq(&nullb->lock); + + return 0; +} + +static int null_handle_bio(struct nullb_cmd *cmd) +{ + struct bio *bio = cmd->bio; + struct nullb *nullb = cmd->nq->dev->nullb; + int err; + unsigned int len; + sector_t sector; + struct bio_vec bvec; + struct bvec_iter iter; + + sector = bio->bi_iter.bi_sector; + + if (bio_op(bio) == REQ_OP_DISCARD) { + null_handle_discard(nullb, sector, + bio_sectors(bio) << SECTOR_SHIFT); + return 0; + } + + spin_lock_irq(&nullb->lock); + bio_for_each_segment(bvec, bio, iter) { + len = bvec.bv_len; + err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, + op_is_write(bio_op(bio)), sector, + bio_op(bio) & REQ_FUA); + if (err) { + spin_unlock_irq(&nullb->lock); + return err; + } + sector += len >> SECTOR_SHIFT; + } + spin_unlock_irq(&nullb->lock); + return 0; +} + +static void null_stop_queue(struct nullb *nullb) +{ + struct request_queue *q = nullb->q; + + if (nullb->dev->queue_mode == NULL_Q_MQ) + blk_mq_stop_hw_queues(q); + else { + spin_lock_irq(q->queue_lock); + blk_stop_queue(q); + spin_unlock_irq(q->queue_lock); + } +} + +static void null_restart_queue_async(struct nullb *nullb) +{ + struct request_queue *q = nullb->q; + unsigned long flags; + + if (nullb->dev->queue_mode == NULL_Q_MQ) + blk_mq_start_stopped_hw_queues(q, true); + else { + spin_lock_irqsave(q->queue_lock, flags); + blk_start_queue_async(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } +} + +static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) +{ + struct nullb_device *dev = cmd->nq->dev; + struct nullb *nullb = dev->nullb; + int err = 0; + + if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { + struct request *rq = cmd->rq; + + if (!hrtimer_active(&nullb->bw_timer)) + hrtimer_restart(&nullb->bw_timer); + + if (atomic_long_sub_return(blk_rq_bytes(rq), + &nullb->cur_bytes) < 0) { + null_stop_queue(nullb); + /* race with timer */ + if (atomic_long_read(&nullb->cur_bytes) > 0) + null_restart_queue_async(nullb); + if (dev->queue_mode == NULL_Q_RQ) { + struct request_queue *q = nullb->q; + + spin_lock_irq(q->queue_lock); + rq->rq_flags |= RQF_DONTPREP; + blk_requeue_request(q, rq); + spin_unlock_irq(q->queue_lock); + return BLK_STS_OK; + } else + /* requeue request */ + return BLK_STS_RESOURCE; + } + } + + if (nullb->dev->badblocks.shift != -1) { + int bad_sectors; + sector_t sector, size, first_bad; + bool is_flush = true; + + if (dev->queue_mode == NULL_Q_BIO && + bio_op(cmd->bio) != REQ_OP_FLUSH) { + is_flush = false; + sector = cmd->bio->bi_iter.bi_sector; + size = bio_sectors(cmd->bio); + } + if (dev->queue_mode != NULL_Q_BIO && + req_op(cmd->rq) != REQ_OP_FLUSH) { + is_flush = false; + sector = blk_rq_pos(cmd->rq); + size = blk_rq_sectors(cmd->rq); + } + if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector, + size, &first_bad, &bad_sectors)) { + cmd->error = BLK_STS_IOERR; + goto out; + } + } + + if (dev->memory_backed) { + if (dev->queue_mode == NULL_Q_BIO) { + if (bio_op(cmd->bio) == REQ_OP_FLUSH) + err = null_handle_flush(nullb); + else + err = null_handle_bio(cmd); + } else { + if (req_op(cmd->rq) == REQ_OP_FLUSH) + err = null_handle_flush(nullb); + else + err = null_handle_rq(cmd); + } + } + cmd->error = errno_to_blk_status(err); +out: /* Complete IO by inline, softirq or timer */ - switch (irqmode) { + switch (dev->irqmode) { case NULL_IRQ_SOFTIRQ: - switch (queue_mode) { + switch (dev->queue_mode) { case NULL_Q_MQ: blk_mq_complete_request(cmd->rq); break; @@ -307,6 +1292,34 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) null_cmd_end_timer(cmd); break; } + return BLK_STS_OK; +} + +static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) +{ + struct nullb *nullb = container_of(timer, struct nullb, bw_timer); + ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); + unsigned int mbps = nullb->dev->mbps; + + if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) + return HRTIMER_NORESTART; + + atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); + null_restart_queue_async(nullb); + + hrtimer_forward_now(&nullb->bw_timer, timer_interval); + + return HRTIMER_RESTART; +} + +static void nullb_setup_bwtimer(struct nullb *nullb) +{ + ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); + + hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + nullb->bw_timer.function = nullb_bwtimer_fn; + atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); + hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); } static struct nullb_queue *nullb_to_queue(struct nullb *nullb) @@ -366,20 +1379,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + struct nullb_queue *nq = hctx->driver_data; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); - if (irqmode == NULL_IRQ_TIMER) { + if (nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; } cmd->rq = bd->rq; - cmd->nq = hctx->driver_data; + cmd->nq = nq; blk_mq_start_request(bd->rq); - null_handle_cmd(cmd); - return BLK_STS_OK; + return null_handle_cmd(cmd); } static const struct blk_mq_ops null_mq_ops = { @@ -438,7 +1451,8 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) { - sector_t size = gb * 1024 * 1024 * 1024ULL; + struct nullb *nullb = dev->q->queuedata; + sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL; sector_t blksize; struct nvm_id_group *grp; @@ -460,7 +1474,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) id->ppaf.ch_offset = 56; id->ppaf.ch_len = 8; - sector_div(size, bs); /* convert size to pages */ + sector_div(size, nullb->dev->blocksize); /* convert size to pages */ size >>= 8; /* concert size to pgs pr blk */ grp = &id->grp; grp->mtype = 0; @@ -474,8 +1488,8 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) grp->num_blk = blksize; grp->num_pln = 1; - grp->fpg_sz = bs; - grp->csecs = bs; + grp->fpg_sz = nullb->dev->blocksize; + grp->csecs = nullb->dev->blocksize; grp->trdt = 25000; grp->trdm = 25000; grp->tprt = 500000; @@ -483,7 +1497,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) grp->tbet = 1500000; grp->tbem = 1500000; grp->mpos = 0x010101; /* single plane rwe */ - grp->cpar = hw_queue_depth; + grp->cpar = nullb->dev->hw_queue_depth; return 0; } @@ -568,19 +1582,44 @@ static void null_nvm_unregister(struct nullb *nullb) {} static void null_del_dev(struct nullb *nullb) { + struct nullb_device *dev = nullb->dev; + + ida_simple_remove(&nullb_indexes, nullb->index); + list_del_init(&nullb->list); - if (use_lightnvm) + if (dev->use_lightnvm) null_nvm_unregister(nullb); else del_gendisk(nullb->disk); + + if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { + hrtimer_cancel(&nullb->bw_timer); + atomic_long_set(&nullb->cur_bytes, LONG_MAX); + null_restart_queue_async(nullb); + } + blk_cleanup_queue(nullb->q); - if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) + if (dev->queue_mode == NULL_Q_MQ && + nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); - if (!use_lightnvm) + if (!dev->use_lightnvm) put_disk(nullb->disk); cleanup_queues(nullb); + if (null_cache_active(nullb)) + null_free_device_storage(nullb->dev, true); kfree(nullb); + dev->nullb = NULL; +} + +static void null_config_discard(struct nullb *nullb) +{ + if (nullb->dev->discard == false) + return; + nullb->q->limits.discard_granularity = nullb->dev->blocksize; + nullb->q->limits.discard_alignment = nullb->dev->blocksize; + blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); } static int null_open(struct block_device *bdev, fmode_t mode) @@ -605,6 +1644,7 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) init_waitqueue_head(&nq->wait); nq->queue_depth = nullb->queue_depth; + nq->dev = nullb->dev; } static void null_init_queues(struct nullb *nullb) @@ -652,13 +1692,13 @@ static int setup_commands(struct nullb_queue *nq) static int setup_queues(struct nullb *nullb) { - nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), - GFP_KERNEL); + nullb->queues = kzalloc(nullb->dev->submit_queues * + sizeof(struct nullb_queue), GFP_KERNEL); if (!nullb->queues) return -ENOMEM; nullb->nr_queues = 0; - nullb->queue_depth = hw_queue_depth; + nullb->queue_depth = nullb->dev->hw_queue_depth; return 0; } @@ -668,7 +1708,7 @@ static int init_driver_queues(struct nullb *nullb) struct nullb_queue *nq; int i, ret = 0; - for (i = 0; i < submit_queues; i++) { + for (i = 0; i < nullb->dev->submit_queues; i++) { nq = &nullb->queues[i]; null_init_queue(nullb, nq); @@ -686,10 +1726,10 @@ static int null_gendisk_register(struct nullb *nullb) struct gendisk *disk; sector_t size; - disk = nullb->disk = alloc_disk_node(1, home_node); + disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); if (!disk) return -ENOMEM; - size = gb * 1024 * 1024 * 1024ULL; + size = (sector_t)nullb->dev->size * 1024 * 1024ULL; set_capacity(disk, size >> 9); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; @@ -704,49 +1744,86 @@ static int null_gendisk_register(struct nullb *nullb) return 0; } -static int null_init_tag_set(struct blk_mq_tag_set *set) +static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) { set->ops = &null_mq_ops; - set->nr_hw_queues = submit_queues; - set->queue_depth = hw_queue_depth; - set->numa_node = home_node; + set->nr_hw_queues = nullb ? nullb->dev->submit_queues : + g_submit_queues; + set->queue_depth = nullb ? nullb->dev->hw_queue_depth : + g_hw_queue_depth; + set->numa_node = nullb ? nullb->dev->home_node : g_home_node; set->cmd_size = sizeof(struct nullb_cmd); set->flags = BLK_MQ_F_SHOULD_MERGE; set->driver_data = NULL; - if (blocking) + if ((nullb && nullb->dev->blocking) || g_blocking) set->flags |= BLK_MQ_F_BLOCKING; return blk_mq_alloc_tag_set(set); } -static int null_add_dev(void) +static void null_validate_conf(struct nullb_device *dev) +{ + dev->blocksize = round_down(dev->blocksize, 512); + dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); + if (dev->use_lightnvm && dev->blocksize != 4096) + dev->blocksize = 4096; + + if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ) + dev->queue_mode = NULL_Q_MQ; + + if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { + if (dev->submit_queues != nr_online_nodes) + dev->submit_queues = nr_online_nodes; + } else if (dev->submit_queues > nr_cpu_ids) + dev->submit_queues = nr_cpu_ids; + else if (dev->submit_queues == 0) + dev->submit_queues = 1; + + dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); + dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); + + /* Do memory allocation, so set blocking */ + if (dev->memory_backed) + dev->blocking = true; + else /* cache is meaningless */ + dev->cache_size = 0; + dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, + dev->cache_size); + dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); + /* can not stop a queue */ + if (dev->queue_mode == NULL_Q_BIO) + dev->mbps = 0; +} + +static int null_add_dev(struct nullb_device *dev) { struct nullb *nullb; int rv; - nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); + null_validate_conf(dev); + + nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); if (!nullb) { rv = -ENOMEM; goto out; } + nullb->dev = dev; + dev->nullb = nullb; spin_lock_init(&nullb->lock); - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) - submit_queues = nr_online_nodes; - rv = setup_queues(nullb); if (rv) goto out_free_nullb; - if (queue_mode == NULL_Q_MQ) { + if (dev->queue_mode == NULL_Q_MQ) { if (shared_tags) { nullb->tag_set = &tag_set; rv = 0; } else { nullb->tag_set = &nullb->__tag_set; - rv = null_init_tag_set(nullb->tag_set); + rv = null_init_tag_set(nullb, nullb->tag_set); } if (rv) @@ -758,8 +1835,8 @@ static int null_add_dev(void) goto out_cleanup_tags; } null_init_queues(nullb); - } else if (queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + } else if (dev->queue_mode == NULL_Q_BIO) { + nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; @@ -769,7 +1846,8 @@ static int null_add_dev(void) if (rv) goto out_cleanup_blk_queue; } else { - nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, + dev->home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; @@ -781,20 +1859,34 @@ static int null_add_dev(void) goto out_cleanup_blk_queue; } + if (dev->mbps) { + set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); + nullb_setup_bwtimer(nullb); + } + + if (dev->cache_size > 0) { + set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); + blk_queue_write_cache(nullb->q, true, true); + blk_queue_flush_queueable(nullb->q, true); + } + nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); - nullb->index = nullb_indexes++; + nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); + dev->index = nullb->index; mutex_unlock(&lock); - blk_queue_logical_block_size(nullb->q, bs); - blk_queue_physical_block_size(nullb->q, bs); + blk_queue_logical_block_size(nullb->q, dev->blocksize); + blk_queue_physical_block_size(nullb->q, dev->blocksize); + + null_config_discard(nullb); sprintf(nullb->disk_name, "nullb%d", nullb->index); - if (use_lightnvm) + if (dev->use_lightnvm) rv = null_nvm_register(nullb); else rv = null_gendisk_register(nullb); @@ -810,7 +1902,7 @@ static int null_add_dev(void) out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: - if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) + if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); out_cleanup_queues: cleanup_queues(nullb); @@ -825,51 +1917,63 @@ static int __init null_init(void) int ret = 0; unsigned int i; struct nullb *nullb; + struct nullb_device *dev; - if (bs > PAGE_SIZE) { + /* check for nullb_page.bitmap */ + if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) + return -EINVAL; + + if (g_bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); - bs = PAGE_SIZE; + g_bs = PAGE_SIZE; } - if (use_lightnvm && bs != 4096) { + if (g_use_lightnvm && g_bs != 4096) { pr_warn("null_blk: LightNVM only supports 4k block size\n"); pr_warn("null_blk: defaults block size to 4k\n"); - bs = 4096; + g_bs = 4096; } - if (use_lightnvm && queue_mode != NULL_Q_MQ) { + if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) { pr_warn("null_blk: LightNVM only supported for blk-mq\n"); pr_warn("null_blk: defaults queue mode to blk-mq\n"); - queue_mode = NULL_Q_MQ; + g_queue_mode = NULL_Q_MQ; } - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { - if (submit_queues < nr_online_nodes) { - pr_warn("null_blk: submit_queues param is set to %u.", + if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { + if (g_submit_queues != nr_online_nodes) { + pr_warn("null_blk: submit_queues param is set to %u.\n", nr_online_nodes); - submit_queues = nr_online_nodes; + g_submit_queues = nr_online_nodes; } - } else if (submit_queues > nr_cpu_ids) - submit_queues = nr_cpu_ids; - else if (!submit_queues) - submit_queues = 1; + } else if (g_submit_queues > nr_cpu_ids) + g_submit_queues = nr_cpu_ids; + else if (g_submit_queues <= 0) + g_submit_queues = 1; - if (queue_mode == NULL_Q_MQ && shared_tags) { - ret = null_init_tag_set(&tag_set); + if (g_queue_mode == NULL_Q_MQ && shared_tags) { + ret = null_init_tag_set(NULL, &tag_set); if (ret) return ret; } + config_group_init(&nullb_subsys.su_group); + mutex_init(&nullb_subsys.su_mutex); + + ret = configfs_register_subsystem(&nullb_subsys); + if (ret) + goto err_tagset; + mutex_init(&lock); null_major = register_blkdev(0, "nullb"); if (null_major < 0) { ret = null_major; - goto err_tagset; + goto err_conf; } - if (use_lightnvm) { + if (g_use_lightnvm) { ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 0, 0, NULL); if (!ppa_cache) { @@ -880,9 +1984,14 @@ static int __init null_init(void) } for (i = 0; i < nr_devices; i++) { - ret = null_add_dev(); - if (ret) + dev = null_alloc_dev(); + if (!dev) goto err_dev; + ret = null_add_dev(dev); + if (ret) { + null_free_dev(dev); + goto err_dev; + } } pr_info("null: module loaded\n"); @@ -891,13 +2000,17 @@ static int __init null_init(void) err_dev: while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); + dev = nullb->dev; null_del_dev(nullb); + null_free_dev(dev); } kmem_cache_destroy(ppa_cache); err_ppa: unregister_blkdev(null_major, "nullb"); +err_conf: + configfs_unregister_subsystem(&nullb_subsys); err_tagset: - if (queue_mode == NULL_Q_MQ && shared_tags) + if (g_queue_mode == NULL_Q_MQ && shared_tags) blk_mq_free_tag_set(&tag_set); return ret; } @@ -906,16 +2019,22 @@ static void __exit null_exit(void) { struct nullb *nullb; + configfs_unregister_subsystem(&nullb_subsys); + unregister_blkdev(null_major, "nullb"); mutex_lock(&lock); while (!list_empty(&nullb_list)) { + struct nullb_device *dev; + nullb = list_entry(nullb_list.next, struct nullb, list); + dev = nullb->dev; null_del_dev(nullb); + null_free_dev(dev); } mutex_unlock(&lock); - if (queue_mode == NULL_Q_MQ && shared_tags) + if (g_queue_mode == NULL_Q_MQ && shared_tags) blk_mq_free_tag_set(&tag_set); kmem_cache_destroy(ppa_cache); @@ -924,5 +2043,5 @@ static void __exit null_exit(void) module_init(null_init); module_exit(null_exit); -MODULE_AUTHOR("Jens Axboe "); +MODULE_AUTHOR("Jens Axboe "); MODULE_LICENSE("GPL"); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 6b8b097abbb9..67974796c350 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) bio = pkt->r_bios[f]; bio_reset(bio); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); - bio->bi_bdev = pd->bdev; + bio_set_dev(bio, pd->bdev); bio->bi_end_io = pkt_end_io_read; bio->bi_private = pkt; @@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt) pkt->sector = new_sector; bio_reset(pkt->bio); - pkt->bio->bi_bdev = pd->bdev; + bio_set_set(pkt->bio, pd->bdev); bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); pkt->bio->bi_iter.bi_sector = new_sector; pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; @@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) bio_reset(pkt->w_bio); pkt->w_bio->bi_iter.bi_sector = pkt->sector; - pkt->w_bio->bi_bdev = pd->bdev; + bio_set_dev(pkt->w_bio, pd->bdev); pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; @@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) psd->pd = pd; psd->bio = bio; - cloned_bio->bi_bdev = pd->bdev; + bio_set_dev(cloned_bio, pd->bdev); cloned_bio->bi_private = psd; cloned_bio->bi_end_io = pkt_end_io_read_cloned; pd->stats.secs_r += bio_sectors(bio); @@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) pd = q->queuedata; if (!pd) { - pr_err("%s incorrect request queue\n", - bdevname(bio->bi_bdev, b)); + pr_err("%s incorrect request queue\n", bio_devname(bio, b)); goto end_io; } diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index e0e81cacd781..6a55959cbf78 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -409,10 +409,8 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev) priv->cache.page_size = CACHE_PAGE_SIZE; priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * CACHE_PAGE_COUNT, GFP_KERNEL); - if (priv->cache.tags == NULL) { - dev_err(&dev->core, "Could not allocate cache tags\n"); + if (!priv->cache.tags) return -ENOMEM; - } dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n", CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); @@ -743,7 +741,11 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) goto out_unmap_reports; } - ps3vram_cache_init(dev); + error = ps3vram_cache_init(dev); + if (error < 0) { + goto out_unmap_reports; + } + ps3vram_proc_init(dev); queue = blk_alloc_queue(GFP_KERNEL); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b008b6a98098..b640ad8a6d20 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3435,7 +3435,7 @@ static void rbd_acquire_lock(struct work_struct *work) struct rbd_device *rbd_dev = container_of(to_delayed_work(work), struct rbd_device, lock_dwork); enum rbd_lock_state lock_state; - int ret; + int ret = 0; dout("%s rbd_dev %p\n", __func__, rbd_dev); again: diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 7f4acebf4657..e397d3ee7308 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = { static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) { - generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), + generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio), &card->gendisk->part0); } @@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card, struct bio *bio, unsigned long start_time) { - generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, - start_time); + generic_end_io_acct(card->queue, bio_data_dir(bio), + &card->gendisk->part0, start_time); } static void bio_dma_done_cb(struct rsxx_cardinfo *card, diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index d0368682bd43..7cedb4295e9d 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1,19 +1,12 @@ -/* Copyright 2012 STEC, Inc. +/* + * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST + * was acquired by Western Digital in 2012. * - * This file is licensed under the terms of the 3-clause - * BSD License (http://opensource.org/licenses/BSD-3-Clause) - * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), - * at your option. Both licenses are also available in the LICENSE file - * distributed with this project. This file may not be copied, modified, - * or distributed except in accordance with those terms. - * Gordoni Waidhofer - * Initial Driver Design! - * Thomas Swann - * Interrupt handling. - * Ramprasad Chinthekindi - * biomode implementation. - * Akhil Bhansali - * Added support for DISCARD / FLUSH and FUA. + * Copyright 2012 sTec, Inc. + * Copyright (c) 2017 Western Digital Corporation or its affiliates. + * + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2. */ #include @@ -23,11 +16,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -37,9 +30,9 @@ #include #include #include -#include #include -#include +#include +#include #include #include #include @@ -51,19 +44,6 @@ static int skd_dbg_level; static int skd_isr_comp_limit = 4; -enum { - STEC_LINK_2_5GTS = 0, - STEC_LINK_5GTS = 1, - STEC_LINK_8GTS = 2, - STEC_LINK_UNKNOWN = 0xFF -}; - -enum { - SKD_FLUSH_INITIALIZER, - SKD_FLUSH_ZERO_SIZE_FIRST, - SKD_FLUSH_DATA_SECOND, -}; - #define SKD_ASSERT(expr) \ do { \ if (unlikely(!(expr))) { \ @@ -73,17 +53,11 @@ enum { } while (0) #define DRV_NAME "skd" -#define DRV_VERSION "2.2.1" -#define DRV_BUILD_ID "0260" #define PFX DRV_NAME ": " -#define DRV_BIN_VERSION 0x100 -#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID -MODULE_AUTHOR("bug-reports: support@stec-inc.com"); -MODULE_LICENSE("Dual BSD/GPL"); +MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); -MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); +MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver"); #define PCI_VENDOR_ID_STEC 0x1B39 #define PCI_DEVICE_ID_S1120 0x0001 @@ -96,34 +70,32 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); #define SKD_PAUSE_TIMEOUT (5 * 1000) #define SKD_N_FITMSG_BYTES (512u) +#define SKD_MAX_REQ_PER_MSG 14 -#define SKD_N_SPECIAL_CONTEXT 32u #define SKD_N_SPECIAL_FITMSG_BYTES (128u) /* SG elements are 32 bytes, so we can make this 4096 and still be under the * 128KB limit. That allows 4096*4K = 16M xfer size */ #define SKD_N_SG_PER_REQ_DEFAULT 256u -#define SKD_N_SG_PER_SPECIAL 256u #define SKD_N_COMPLETION_ENTRY 256u #define SKD_N_READ_CAP_BYTES (8u) #define SKD_N_INTERNAL_BYTES (512u) +#define SKD_SKCOMP_SIZE \ + ((sizeof(struct fit_completion_entry_v1) + \ + sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY) + /* 5 bits of uniqifier, 0xF800 */ -#define SKD_ID_INCR (0x400) #define SKD_ID_TABLE_MASK (3u << 8u) #define SKD_ID_RW_REQUEST (0u << 8u) #define SKD_ID_INTERNAL (1u << 8u) -#define SKD_ID_SPECIAL_REQUEST (2u << 8u) #define SKD_ID_FIT_MSG (3u << 8u) #define SKD_ID_SLOT_MASK 0x00FFu #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu -#define SKD_N_TIMEOUT_SLOT 4u -#define SKD_TIMEOUT_SLOT_MASK 3u - #define SKD_N_MAX_SECTORS 2048u #define SKD_MAX_RETRIES 2u @@ -141,7 +113,6 @@ enum skd_drvr_state { SKD_DRVR_STATE_ONLINE, SKD_DRVR_STATE_PAUSING, SKD_DRVR_STATE_PAUSED, - SKD_DRVR_STATE_DRAINING_TIMEOUT, SKD_DRVR_STATE_RESTARTING, SKD_DRVR_STATE_RESUMING, SKD_DRVR_STATE_STOPPING, @@ -158,7 +129,6 @@ enum skd_drvr_state { #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) -#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) #define SKD_START_WAIT_SECONDS 90u @@ -169,12 +139,6 @@ enum skd_req_state { SKD_REQ_STATE_BUSY, SKD_REQ_STATE_COMPLETED, SKD_REQ_STATE_TIMEOUT, - SKD_REQ_STATE_ABORTED, -}; - -enum skd_fit_msg_state { - SKD_MSG_STATE_IDLE, - SKD_MSG_STATE_BUSY, }; enum skd_check_status_action { @@ -185,34 +149,29 @@ enum skd_check_status_action { SKD_CHECK_STATUS_BUSY_IMMINENT, }; +struct skd_msg_buf { + struct fit_msg_hdr fmh; + struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG]; +}; + struct skd_fitmsg_context { - enum skd_fit_msg_state state; - - struct skd_fitmsg_context *next; - u32 id; - u16 outstanding; u32 length; - u32 offset; - u8 *msg_buf; + struct skd_msg_buf *msg_buf; dma_addr_t mb_dma_address; }; struct skd_request_context { enum skd_req_state state; - struct skd_request_context *next; - u16 id; u32 fitmsg_id; - struct request *req; u8 flush_cmd; - u32 timeout_stamp; - u8 sg_data_dir; + enum dma_data_direction data_dir; struct scatterlist *sg; u32 n_sg; u32 sg_byte_count; @@ -224,38 +183,19 @@ struct skd_request_context { struct fit_comp_error_info err_info; + blk_status_t status; }; -#define SKD_DATA_DIR_HOST_TO_CARD 1 -#define SKD_DATA_DIR_CARD_TO_HOST 2 struct skd_special_context { struct skd_request_context req; - u8 orphaned; - void *data_buf; dma_addr_t db_dma_address; - u8 *msg_buf; + struct skd_msg_buf *msg_buf; dma_addr_t mb_dma_address; }; -struct skd_sg_io { - fmode_t mode; - void __user *argp; - - struct sg_io_hdr sg; - - u8 cdb[16]; - - u32 dxfer_len; - u32 iovcnt; - struct sg_iovec *iov; - struct sg_iovec no_iov_iov; - - struct skd_special_context *skspcl; -}; - typedef enum skd_irq_type { SKD_IRQ_LEGACY, SKD_IRQ_MSI, @@ -265,7 +205,7 @@ typedef enum skd_irq_type { #define SKD_MAX_BARS 2 struct skd_device { - volatile void __iomem *mem_map[SKD_MAX_BARS]; + void __iomem *mem_map[SKD_MAX_BARS]; resource_size_t mem_phys[SKD_MAX_BARS]; u32 mem_size[SKD_MAX_BARS]; @@ -276,21 +216,20 @@ struct skd_device { spinlock_t lock; struct gendisk *disk; + struct blk_mq_tag_set tag_set; struct request_queue *queue; + struct skd_fitmsg_context *skmsg; struct device *class_dev; int gendisk_on; int sync_done; - atomic_t device_count; u32 devno; u32 major; - char name[32]; char isr_name[30]; enum skd_drvr_state state; u32 drive_state; - u32 in_flight; u32 cur_max_queue_depth; u32 queue_low_water_mark; u32 dev_max_queue_depth; @@ -298,27 +237,20 @@ struct skd_device { u32 num_fitmsg_context; u32 num_req_context; - u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; - u32 timeout_stamp; - struct skd_fitmsg_context *skmsg_free_list; struct skd_fitmsg_context *skmsg_table; - struct skd_request_context *skreq_free_list; - struct skd_request_context *skreq_table; - - struct skd_special_context *skspcl_free_list; - struct skd_special_context *skspcl_table; - struct skd_special_context internal_skspcl; u32 read_cap_blocksize; u32 read_cap_last_lba; int read_cap_is_valid; int inquiry_is_valid; u8 inq_serial_num[13]; /*12 chars plus null term */ - u8 id_str[80]; /* holds a composite name (pci + sernum) */ u8 skcomp_cycle; u32 skcomp_ix; + struct kmem_cache *msgbuf_cache; + struct kmem_cache *sglist_cache; + struct kmem_cache *databuf_cache; struct fit_completion_entry_v1 *skcomp_table; struct fit_comp_error_info *skerr_table; dma_addr_t cq_dma_address; @@ -329,7 +261,6 @@ struct skd_device { u32 timer_countdown; u32 timer_substate; - int n_special; int sgs_per_request; u32 last_mtd; @@ -343,7 +274,7 @@ struct skd_device { u32 timo_slot; - + struct work_struct start_queue; struct work_struct completion_worker; }; @@ -353,53 +284,32 @@ struct skd_device { static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) { - u32 val; - - if (likely(skdev->dbg_level < 2)) - return readl(skdev->mem_map[1] + offset); - else { - barrier(); - val = readl(skdev->mem_map[1] + offset); - barrier(); - pr_debug("%s:%s:%d offset %x = %x\n", - skdev->name, __func__, __LINE__, offset, val); - return val; - } + u32 val = readl(skdev->mem_map[1] + offset); + if (unlikely(skdev->dbg_level >= 2)) + dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); + return val; } static inline void skd_reg_write32(struct skd_device *skdev, u32 val, u32 offset) { - if (likely(skdev->dbg_level < 2)) { - writel(val, skdev->mem_map[1] + offset); - barrier(); - } else { - barrier(); - writel(val, skdev->mem_map[1] + offset); - barrier(); - pr_debug("%s:%s:%d offset %x = %x\n", - skdev->name, __func__, __LINE__, offset, val); - } + writel(val, skdev->mem_map[1] + offset); + if (unlikely(skdev->dbg_level >= 2)) + dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); } static inline void skd_reg_write64(struct skd_device *skdev, u64 val, u32 offset) { - if (likely(skdev->dbg_level < 2)) { - writeq(val, skdev->mem_map[1] + offset); - barrier(); - } else { - barrier(); - writeq(val, skdev->mem_map[1] + offset); - barrier(); - pr_debug("%s:%s:%d offset %x = %016llx\n", - skdev->name, __func__, __LINE__, offset, val); - } + writeq(val, skdev->mem_map[1] + offset); + if (unlikely(skdev->dbg_level >= 2)) + dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, + val); } -#define SKD_IRQ_DEFAULT SKD_IRQ_MSI +#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX static int skd_isr_type = SKD_IRQ_DEFAULT; module_param(skd_isr_type, int, 0444); @@ -412,7 +322,7 @@ static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; module_param(skd_max_req_per_msg, int, 0444); MODULE_PARM_DESC(skd_max_req_per_msg, "Maximum SCSI requests packed in a single message." - " (1-14, default==1)"); + " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)"); #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" @@ -429,10 +339,10 @@ MODULE_PARM_DESC(skd_sgs_per_request, "Maximum SG elements per block request." " (1-4096, default==256)"); -static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; +static int skd_max_pass_thru = 1; module_param(skd_max_pass_thru, int, 0444); MODULE_PARM_DESC(skd_max_pass_thru, - "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); + "Maximum SCSI pass-thru at a time. IGNORED"); module_param(skd_dbg_level, int, 0444); MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); @@ -449,9 +359,6 @@ static void skd_send_fitmsg(struct skd_device *skdev, struct skd_fitmsg_context *skmsg); static void skd_send_special_fitmsg(struct skd_device *skdev, struct skd_special_context *skspcl); -static void skd_request_fn(struct request_queue *rq); -static void skd_end_request(struct skd_device *skdev, - struct skd_request_context *skreq, blk_status_t status); static bool skd_preop_sg_list(struct skd_device *skdev, struct skd_request_context *skreq); static void skd_postop_sg_list(struct skd_device *skdev, @@ -460,19 +367,14 @@ static void skd_postop_sg_list(struct skd_device *skdev, static void skd_restart_device(struct skd_device *skdev); static int skd_quiesce_dev(struct skd_device *skdev); static int skd_unquiesce_dev(struct skd_device *skdev); -static void skd_release_special(struct skd_device *skdev, - struct skd_special_context *skspcl); static void skd_disable_interrupts(struct skd_device *skdev); static void skd_isr_fwstate(struct skd_device *skdev); -static void skd_recover_requests(struct skd_device *skdev, int requeue); +static void skd_recover_requests(struct skd_device *skdev); static void skd_soft_reset(struct skd_device *skdev); -static const char *skd_name(struct skd_device *skdev); const char *skd_drive_state_to_str(int state); const char *skd_skdev_state_to_str(enum skd_drvr_state state); static void skd_log_skdev(struct skd_device *skdev, const char *event); -static void skd_log_skmsg(struct skd_device *skdev, - struct skd_fitmsg_context *skmsg, const char *event); static void skd_log_skreq(struct skd_device *skdev, struct skd_request_context *skreq, const char *event); @@ -481,18 +383,20 @@ static void skd_log_skreq(struct skd_device *skdev, * READ/WRITE REQUESTS ***************************************************************************** */ -static void skd_fail_all_pending(struct skd_device *skdev) +static void skd_inc_in_flight(struct request *rq, void *data, bool reserved) { - struct request_queue *q = skdev->queue; - struct request *req; + int *count = data; - for (;; ) { - req = blk_peek_request(q); - if (req == NULL) - break; - blk_start_request(req); - __blk_end_request_all(req, BLK_STS_IOERR); - } + count++; +} + +static int skd_in_flight(struct skd_device *skdev) +{ + int count = 0; + + blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); + + return count; } static void @@ -501,9 +405,9 @@ skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, unsigned count) { if (data_dir == READ) - scsi_req->cdb[0] = 0x28; + scsi_req->cdb[0] = READ_10; else - scsi_req->cdb[0] = 0x2a; + scsi_req->cdb[0] = WRITE_10; scsi_req->cdb[1] = 0; scsi_req->cdb[2] = (lba & 0xff000000) >> 24; @@ -522,7 +426,7 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, { skreq->flush_cmd = 1; - scsi_req->cdb[0] = 0x35; + scsi_req->cdb[0] = SYNCHRONIZE_CACHE; scsi_req->cdb[1] = 0; scsi_req->cdb[2] = 0; scsi_req->cdb[3] = 0; @@ -534,374 +438,12 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, scsi_req->cdb[9] = 0; } -static void skd_request_fn_not_online(struct request_queue *q); - -static void skd_request_fn(struct request_queue *q) +/* + * Return true if and only if all pending requests should be failed. + */ +static bool skd_fail_all(struct request_queue *q) { struct skd_device *skdev = q->queuedata; - struct skd_fitmsg_context *skmsg = NULL; - struct fit_msg_hdr *fmh = NULL; - struct skd_request_context *skreq; - struct request *req = NULL; - struct skd_scsi_request *scsi_req; - unsigned long io_flags; - u32 lba; - u32 count; - int data_dir; - u32 be_lba; - u32 be_count; - u64 be_dmaa; - u64 cmdctxt; - u32 timo_slot; - void *cmd_ptr; - int flush, fua; - - if (skdev->state != SKD_DRVR_STATE_ONLINE) { - skd_request_fn_not_online(q); - return; - } - - if (blk_queue_stopped(skdev->queue)) { - if (skdev->skmsg_free_list == NULL || - skdev->skreq_free_list == NULL || - skdev->in_flight >= skdev->queue_low_water_mark) - /* There is still some kind of shortage */ - return; - - queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); - } - - /* - * Stop conditions: - * - There are no more native requests - * - There are already the maximum number of requests in progress - * - There are no more skd_request_context entries - * - There are no more FIT msg buffers - */ - for (;; ) { - - flush = fua = 0; - - req = blk_peek_request(q); - - /* Are there any native requests to start? */ - if (req == NULL) - break; - - lba = (u32)blk_rq_pos(req); - count = blk_rq_sectors(req); - data_dir = rq_data_dir(req); - io_flags = req->cmd_flags; - - if (req_op(req) == REQ_OP_FLUSH) - flush++; - - if (io_flags & REQ_FUA) - fua++; - - pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " - "count=%u(0x%x) dir=%d\n", - skdev->name, __func__, __LINE__, - req, lba, lba, count, count, data_dir); - - /* At this point we know there is a request */ - - /* Are too many requets already in progress? */ - if (skdev->in_flight >= skdev->cur_max_queue_depth) { - pr_debug("%s:%s:%d qdepth %d, limit %d\n", - skdev->name, __func__, __LINE__, - skdev->in_flight, skdev->cur_max_queue_depth); - break; - } - - /* Is a skd_request_context available? */ - skreq = skdev->skreq_free_list; - if (skreq == NULL) { - pr_debug("%s:%s:%d Out of req=%p\n", - skdev->name, __func__, __LINE__, q); - break; - } - SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); - SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); - - /* Now we check to see if we can get a fit msg */ - if (skmsg == NULL) { - if (skdev->skmsg_free_list == NULL) { - pr_debug("%s:%s:%d Out of msg\n", - skdev->name, __func__, __LINE__); - break; - } - } - - skreq->flush_cmd = 0; - skreq->n_sg = 0; - skreq->sg_byte_count = 0; - - /* - * OK to now dequeue request from q. - * - * At this point we are comitted to either start or reject - * the native request. Note that skd_request_context is - * available but is still at the head of the free list. - */ - blk_start_request(req); - skreq->req = req; - skreq->fitmsg_id = 0; - - /* Either a FIT msg is in progress or we have to start one. */ - if (skmsg == NULL) { - /* Are there any FIT msg buffers available? */ - skmsg = skdev->skmsg_free_list; - if (skmsg == NULL) { - pr_debug("%s:%s:%d Out of msg skdev=%p\n", - skdev->name, __func__, __LINE__, - skdev); - break; - } - SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); - SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); - - skdev->skmsg_free_list = skmsg->next; - - skmsg->state = SKD_MSG_STATE_BUSY; - skmsg->id += SKD_ID_INCR; - - /* Initialize the FIT msg header */ - fmh = (struct fit_msg_hdr *)skmsg->msg_buf; - memset(fmh, 0, sizeof(*fmh)); - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; - skmsg->length = sizeof(*fmh); - } - - skreq->fitmsg_id = skmsg->id; - - /* - * Note that a FIT msg may have just been started - * but contains no SoFIT requests yet. - */ - - /* - * Transcode the request, checking as we go. The outcome of - * the transcoding is represented by the error variable. - */ - cmd_ptr = &skmsg->msg_buf[skmsg->length]; - memset(cmd_ptr, 0, 32); - - be_lba = cpu_to_be32(lba); - be_count = cpu_to_be32(count); - be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); - cmdctxt = skreq->id + SKD_ID_INCR; - - scsi_req = cmd_ptr; - scsi_req->hdr.tag = cmdctxt; - scsi_req->hdr.sg_list_dma_address = be_dmaa; - - if (data_dir == READ) - skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; - else - skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; - - if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { - skd_prep_zerosize_flush_cdb(scsi_req, skreq); - SKD_ASSERT(skreq->flush_cmd == 1); - - } else { - skd_prep_rw_cdb(scsi_req, data_dir, lba, count); - } - - if (fua) - scsi_req->cdb[1] |= SKD_FUA_NV; - - if (!req->bio) - goto skip_sg; - - if (!skd_preop_sg_list(skdev, skreq)) { - /* - * Complete the native request with error. - * Note that the request context is still at the - * head of the free list, and that the SoFIT request - * was encoded into the FIT msg buffer but the FIT - * msg length has not been updated. In short, the - * only resource that has been allocated but might - * not be used is that the FIT msg could be empty. - */ - pr_debug("%s:%s:%d error Out\n", - skdev->name, __func__, __LINE__); - skd_end_request(skdev, skreq, BLK_STS_RESOURCE); - continue; - } - -skip_sg: - scsi_req->hdr.sg_list_len_bytes = - cpu_to_be32(skreq->sg_byte_count); - - /* Complete resource allocations. */ - skdev->skreq_free_list = skreq->next; - skreq->state = SKD_REQ_STATE_BUSY; - skreq->id += SKD_ID_INCR; - - skmsg->length += sizeof(struct skd_scsi_request); - fmh->num_protocol_cmds_coalesced++; - - /* - * Update the active request counts. - * Capture the timeout timestamp. - */ - skreq->timeout_stamp = skdev->timeout_stamp; - timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; - skdev->timeout_slot[timo_slot]++; - skdev->in_flight++; - pr_debug("%s:%s:%d req=0x%x busy=%d\n", - skdev->name, __func__, __LINE__, - skreq->id, skdev->in_flight); - - /* - * If the FIT msg buffer is full send it. - */ - if (skmsg->length >= SKD_N_FITMSG_BYTES || - fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { - skd_send_fitmsg(skdev, skmsg); - skmsg = NULL; - fmh = NULL; - } - } - - /* - * Is a FIT msg in progress? If it is empty put the buffer back - * on the free list. If it is non-empty send what we got. - * This minimizes latency when there are fewer requests than - * what fits in a FIT msg. - */ - if (skmsg != NULL) { - /* Bigger than just a FIT msg header? */ - if (skmsg->length > sizeof(struct fit_msg_hdr)) { - pr_debug("%s:%s:%d sending msg=%p, len %d\n", - skdev->name, __func__, __LINE__, - skmsg, skmsg->length); - skd_send_fitmsg(skdev, skmsg); - } else { - /* - * The FIT msg is empty. It means we got started - * on the msg, but the requests were rejected. - */ - skmsg->state = SKD_MSG_STATE_IDLE; - skmsg->id += SKD_ID_INCR; - skmsg->next = skdev->skmsg_free_list; - skdev->skmsg_free_list = skmsg; - } - skmsg = NULL; - fmh = NULL; - } - - /* - * If req is non-NULL it means there is something to do but - * we are out of a resource. - */ - if (req) - blk_stop_queue(skdev->queue); -} - -static void skd_end_request(struct skd_device *skdev, - struct skd_request_context *skreq, blk_status_t error) -{ - if (unlikely(error)) { - struct request *req = skreq->req; - char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; - u32 lba = (u32)blk_rq_pos(req); - u32 count = blk_rq_sectors(req); - - pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", - skd_name(skdev), cmd, lba, count, skreq->id); - } else - pr_debug("%s:%s:%d id=0x%x error=%d\n", - skdev->name, __func__, __LINE__, skreq->id, error); - - __blk_end_request_all(skreq->req, error); -} - -static bool skd_preop_sg_list(struct skd_device *skdev, - struct skd_request_context *skreq) -{ - struct request *req = skreq->req; - int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; - int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; - struct scatterlist *sg = &skreq->sg[0]; - int n_sg; - int i; - - skreq->sg_byte_count = 0; - - /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || - skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ - - n_sg = blk_rq_map_sg(skdev->queue, req, sg); - if (n_sg <= 0) - return false; - - /* - * Map scatterlist to PCI bus addresses. - * Note PCI might change the number of entries. - */ - n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); - if (n_sg <= 0) - return false; - - SKD_ASSERT(n_sg <= skdev->sgs_per_request); - - skreq->n_sg = n_sg; - - for (i = 0; i < n_sg; i++) { - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; - u32 cnt = sg_dma_len(&sg[i]); - uint64_t dma_addr = sg_dma_address(&sg[i]); - - sgd->control = FIT_SGD_CONTROL_NOT_LAST; - sgd->byte_count = cnt; - skreq->sg_byte_count += cnt; - sgd->host_side_addr = dma_addr; - sgd->dev_side_addr = 0; - } - - skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; - skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; - - if (unlikely(skdev->dbg_level > 1)) { - pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", - skdev->name, __func__, __LINE__, - skreq->id, skreq->sksg_list, skreq->sksg_dma_address); - for (i = 0; i < n_sg; i++) { - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " - "addr=0x%llx next=0x%llx\n", - skdev->name, __func__, __LINE__, - i, sgd->byte_count, sgd->control, - sgd->host_side_addr, sgd->next_desc_ptr); - } - } - - return true; -} - -static void skd_postop_sg_list(struct skd_device *skdev, - struct skd_request_context *skreq) -{ - int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; - int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; - - /* - * restore the next ptr for next IO request so we - * don't have to set it every time. - */ - skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = - skreq->sksg_dma_address + - ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); - pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); -} - -static void skd_request_fn_not_online(struct request_queue *q) -{ - struct skd_device *skdev = q->queuedata; - int error; SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); @@ -922,8 +464,7 @@ static void skd_request_fn_not_online(struct request_queue *q) case SKD_DRVR_STATE_BUSY: case SKD_DRVR_STATE_BUSY_IMMINENT: case SKD_DRVR_STATE_BUSY_ERASE: - case SKD_DRVR_STATE_DRAINING_TIMEOUT: - return; + return false; case SKD_DRVR_STATE_BUSY_SANITIZE: case SKD_DRVR_STATE_STOPPING: @@ -931,15 +472,218 @@ static void skd_request_fn_not_online(struct request_queue *q) case SKD_DRVR_STATE_FAULT: case SKD_DRVR_STATE_DISAPPEARED: default: - error = -EIO; - break; + return true; + } +} + +static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *mqd) +{ + struct request *const req = mqd->rq; + struct request_queue *const q = req->q; + struct skd_device *skdev = q->queuedata; + struct skd_fitmsg_context *skmsg; + struct fit_msg_hdr *fmh; + const u32 tag = blk_mq_unique_tag(req); + struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req); + struct skd_scsi_request *scsi_req; + unsigned long flags = 0; + const u32 lba = blk_rq_pos(req); + const u32 count = blk_rq_sectors(req); + const int data_dir = rq_data_dir(req); + + if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) + return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; + + blk_mq_start_request(req); + + WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n", + tag, skd_max_queue_depth, q->nr_requests); + + SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); + + dev_dbg(&skdev->pdev->dev, + "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, + lba, count, count, data_dir); + + skreq->id = tag + SKD_ID_RW_REQUEST; + skreq->flush_cmd = 0; + skreq->n_sg = 0; + skreq->sg_byte_count = 0; + + skreq->fitmsg_id = 0; + + skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (req->bio && !skd_preop_sg_list(skdev, skreq)) { + dev_dbg(&skdev->pdev->dev, "error Out\n"); + skreq->status = BLK_STS_RESOURCE; + blk_mq_complete_request(req); + return BLK_STS_OK; } - /* If we get here, terminate all pending block requeusts - * with EIO and any scsi pass thru with appropriate sense - */ + dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, + skreq->n_sg * + sizeof(struct fit_sg_descriptor), + DMA_TO_DEVICE); - skd_fail_all_pending(skdev); + /* Either a FIT msg is in progress or we have to start one. */ + if (skd_max_req_per_msg == 1) { + skmsg = NULL; + } else { + spin_lock_irqsave(&skdev->lock, flags); + skmsg = skdev->skmsg; + } + if (!skmsg) { + skmsg = &skdev->skmsg_table[tag]; + skdev->skmsg = skmsg; + + /* Initialize the FIT msg header */ + fmh = &skmsg->msg_buf->fmh; + memset(fmh, 0, sizeof(*fmh)); + fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; + skmsg->length = sizeof(*fmh); + } else { + fmh = &skmsg->msg_buf->fmh; + } + + skreq->fitmsg_id = skmsg->id; + + scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced]; + memset(scsi_req, 0, sizeof(*scsi_req)); + + scsi_req->hdr.tag = skreq->id; + scsi_req->hdr.sg_list_dma_address = + cpu_to_be64(skreq->sksg_dma_address); + + if (req_op(req) == REQ_OP_FLUSH) { + skd_prep_zerosize_flush_cdb(scsi_req, skreq); + SKD_ASSERT(skreq->flush_cmd == 1); + } else { + skd_prep_rw_cdb(scsi_req, data_dir, lba, count); + } + + if (req->cmd_flags & REQ_FUA) + scsi_req->cdb[1] |= SKD_FUA_NV; + + scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count); + + /* Complete resource allocations. */ + skreq->state = SKD_REQ_STATE_BUSY; + + skmsg->length += sizeof(struct skd_scsi_request); + fmh->num_protocol_cmds_coalesced++; + + dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, + skd_in_flight(skdev)); + + /* + * If the FIT msg buffer is full send it. + */ + if (skd_max_req_per_msg == 1) { + skd_send_fitmsg(skdev, skmsg); + } else { + if (mqd->last || + fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { + skd_send_fitmsg(skdev, skmsg); + skdev->skmsg = NULL; + } + spin_unlock_irqrestore(&skdev->lock, flags); + } + + return BLK_STS_OK; +} + +static enum blk_eh_timer_return skd_timed_out(struct request *req, + bool reserved) +{ + struct skd_device *skdev = req->q->queuedata; + + dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n", + blk_mq_unique_tag(req)); + + return BLK_EH_RESET_TIMER; +} + +static void skd_complete_rq(struct request *req) +{ + struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); + + blk_mq_end_request(req, skreq->status); +} + +static bool skd_preop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + struct request *req = blk_mq_rq_from_pdu(skreq); + struct scatterlist *sgl = &skreq->sg[0], *sg; + int n_sg; + int i; + + skreq->sg_byte_count = 0; + + WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE && + skreq->data_dir != DMA_FROM_DEVICE); + + n_sg = blk_rq_map_sg(skdev->queue, req, sgl); + if (n_sg <= 0) + return false; + + /* + * Map scatterlist to PCI bus addresses. + * Note PCI might change the number of entries. + */ + n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); + if (n_sg <= 0) + return false; + + SKD_ASSERT(n_sg <= skdev->sgs_per_request); + + skreq->n_sg = n_sg; + + for_each_sg(sgl, sg, n_sg, i) { + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; + u32 cnt = sg_dma_len(sg); + uint64_t dma_addr = sg_dma_address(sg); + + sgd->control = FIT_SGD_CONTROL_NOT_LAST; + sgd->byte_count = cnt; + skreq->sg_byte_count += cnt; + sgd->host_side_addr = dma_addr; + sgd->dev_side_addr = 0; + } + + skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; + skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; + + if (unlikely(skdev->dbg_level > 1)) { + dev_dbg(&skdev->pdev->dev, + "skreq=%x sksg_list=%p sksg_dma=%llx\n", + skreq->id, skreq->sksg_list, skreq->sksg_dma_address); + for (i = 0; i < n_sg; i++) { + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; + + dev_dbg(&skdev->pdev->dev, + " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", + i, sgd->byte_count, sgd->control, + sgd->host_side_addr, sgd->next_desc_ptr); + } + } + + return true; +} + +static void skd_postop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + /* + * restore the next ptr for next IO request so we + * don't have to set it every time. + */ + skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = + skreq->sksg_dma_address + + ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); + pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); } /* @@ -950,12 +694,22 @@ static void skd_request_fn_not_online(struct request_queue *q) static void skd_timer_tick_not_online(struct skd_device *skdev); +static void skd_start_queue(struct work_struct *work) +{ + struct skd_device *skdev = container_of(work, typeof(*skdev), + start_queue); + + /* + * Although it is safe to call blk_start_queue() from interrupt + * context, blk_mq_start_hw_queues() must not be called from + * interrupt context. + */ + blk_mq_start_hw_queues(skdev->queue); +} + static void skd_timer_tick(ulong arg) { struct skd_device *skdev = (struct skd_device *)arg; - - u32 timo_slot; - u32 overdue_timestamp; unsigned long reqflags; u32 state; @@ -972,37 +726,9 @@ static void skd_timer_tick(ulong arg) if (state != skdev->drive_state) skd_isr_fwstate(skdev); - if (skdev->state != SKD_DRVR_STATE_ONLINE) { + if (skdev->state != SKD_DRVR_STATE_ONLINE) skd_timer_tick_not_online(skdev); - goto timer_func_out; - } - skdev->timeout_stamp++; - timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; - /* - * All requests that happened during the previous use of - * this slot should be done by now. The previous use was - * over 7 seconds ago. - */ - if (skdev->timeout_slot[timo_slot] == 0) - goto timer_func_out; - - /* Something is overdue */ - overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; - - pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", - skdev->name, __func__, __LINE__, - skdev->timeout_slot[timo_slot], skdev->in_flight); - pr_err("(%s): Overdue IOs (%d), busy %d\n", - skd_name(skdev), skdev->timeout_slot[timo_slot], - skdev->in_flight); - - skdev->timer_countdown = SKD_DRAINING_TIMO; - skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; - skdev->timo_slot = timo_slot; - blk_stop_queue(skdev->queue); - -timer_func_out: mod_timer(&skdev->timer, (jiffies + HZ)); spin_unlock_irqrestore(&skdev->lock, reqflags); @@ -1015,9 +741,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) case SKD_DRVR_STATE_LOAD: break; case SKD_DRVR_STATE_BUSY_SANITIZE: - pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", - skdev->name, __func__, __LINE__, - skdev->drive_state, skdev->state); + dev_dbg(&skdev->pdev->dev, + "drive busy sanitize[%x], driver[%x]\n", + skdev->drive_state, skdev->state); /* If we've been in sanitize for 3 seconds, we figure we're not * going to get anymore completions, so recover requests now */ @@ -1025,22 +751,21 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) skdev->timer_countdown--; return; } - skd_recover_requests(skdev, 0); + skd_recover_requests(skdev); break; case SKD_DRVR_STATE_BUSY: case SKD_DRVR_STATE_BUSY_IMMINENT: case SKD_DRVR_STATE_BUSY_ERASE: - pr_debug("%s:%s:%d busy[%x], countdown=%d\n", - skdev->name, __func__, __LINE__, - skdev->state, skdev->timer_countdown); + dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", + skdev->state, skdev->timer_countdown); if (skdev->timer_countdown > 0) { skdev->timer_countdown--; return; } - pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", - skdev->name, __func__, __LINE__, - skdev->state, skdev->timer_countdown); + dev_dbg(&skdev->pdev->dev, + "busy[%x], timedout=%d, restarting device.", + skdev->state, skdev->timer_countdown); skd_restart_device(skdev); break; @@ -1054,12 +779,12 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) * revcover at some point. */ skdev->state = SKD_DRVR_STATE_FAULT; - pr_err("(%s): DriveFault Connect Timeout (%x)\n", - skd_name(skdev), skdev->drive_state); + dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", + skdev->drive_state); /*start the queue so we can respond with error to requests */ /* wakeup anyone waiting for startup complete */ - blk_start_queue(skdev->queue); + schedule_work(&skdev->start_queue); skdev->gendisk_on = -1; wake_up_interruptible(&skdev->waitq); break; @@ -1072,29 +797,6 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) case SKD_DRVR_STATE_PAUSED: break; - case SKD_DRVR_STATE_DRAINING_TIMEOUT: - pr_debug("%s:%s:%d " - "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", - skdev->name, __func__, __LINE__, - skdev->timo_slot, - skdev->timer_countdown, - skdev->in_flight, - skdev->timeout_slot[skdev->timo_slot]); - /* if the slot has cleared we can let the I/O continue */ - if (skdev->timeout_slot[skdev->timo_slot] == 0) { - pr_debug("%s:%s:%d Slot drained, starting queue.\n", - skdev->name, __func__, __LINE__); - skdev->state = SKD_DRVR_STATE_ONLINE; - blk_start_queue(skdev->queue); - return; - } - if (skdev->timer_countdown > 0) { - skdev->timer_countdown--; - return; - } - skd_restart_device(skdev); - break; - case SKD_DRVR_STATE_RESTARTING: if (skdev->timer_countdown > 0) { skdev->timer_countdown--; @@ -1103,8 +805,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) /* For now, we fault the drive. Could attempt resets to * revcover at some point. */ skdev->state = SKD_DRVR_STATE_FAULT; - pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", - skd_name(skdev), skdev->drive_state); + dev_err(&skdev->pdev->dev, + "DriveFault Reconnect Timeout (%x)\n", + skdev->drive_state); /* * Recovering does two things: @@ -1124,18 +827,18 @@ static void skd_timer_tick_not_online(struct skd_device *skdev) /* It never came out of soft reset. Try to * recover the requests and then let them * fail. This is to mitigate hung processes. */ - skd_recover_requests(skdev, 0); + skd_recover_requests(skdev); else { - pr_err("(%s): Disable BusMaster (%x)\n", - skd_name(skdev), skdev->drive_state); + dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", + skdev->drive_state); pci_disable_device(skdev->pdev); skd_disable_interrupts(skdev); - skd_recover_requests(skdev, 0); + skd_recover_requests(skdev); } /*start the queue so we can respond with error to requests */ /* wakeup anyone waiting for startup complete */ - blk_start_queue(skdev->queue); + schedule_work(&skdev->start_queue); skdev->gendisk_on = -1; wake_up_interruptible(&skdev->waitq); break; @@ -1154,13 +857,11 @@ static int skd_start_timer(struct skd_device *skdev) { int rc; - init_timer(&skdev->timer); setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); rc = mod_timer(&skdev->timer, (jiffies + HZ)); if (rc) - pr_err("%s: failed to start timer %d\n", - __func__, rc); + dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); return rc; } @@ -1169,634 +870,6 @@ static void skd_kill_timer(struct skd_device *skdev) del_timer_sync(&skdev->timer); } -/* - ***************************************************************************** - * IOCTL - ***************************************************************************** - */ -static int skd_ioctl_sg_io(struct skd_device *skdev, - fmode_t mode, void __user *argp); -static int skd_sg_io_get_and_check_args(struct skd_device *skdev, - struct skd_sg_io *sksgio); -static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, - struct skd_sg_io *sksgio); -static int skd_sg_io_prep_buffering(struct skd_device *skdev, - struct skd_sg_io *sksgio); -static int skd_sg_io_copy_buffer(struct skd_device *skdev, - struct skd_sg_io *sksgio, int dxfer_dir); -static int skd_sg_io_send_fitmsg(struct skd_device *skdev, - struct skd_sg_io *sksgio); -static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); -static int skd_sg_io_release_skspcl(struct skd_device *skdev, - struct skd_sg_io *sksgio); -static int skd_sg_io_put_status(struct skd_device *skdev, - struct skd_sg_io *sksgio); - -static void skd_complete_special(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 - *skcomp, - volatile struct fit_comp_error_info *skerr, - struct skd_special_context *skspcl); - -static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, - uint cmd_in, ulong arg) -{ - static const int sg_version_num = 30527; - int rc = 0, timeout; - struct gendisk *disk = bdev->bd_disk; - struct skd_device *skdev = disk->private_data; - int __user *p = (int __user *)arg; - - pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", - skdev->name, __func__, __LINE__, - disk->disk_name, current->comm, mode, cmd_in, arg); - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - switch (cmd_in) { - case SG_SET_TIMEOUT: - rc = get_user(timeout, p); - if (!rc) - disk->queue->sg_timeout = clock_t_to_jiffies(timeout); - break; - case SG_GET_TIMEOUT: - rc = jiffies_to_clock_t(disk->queue->sg_timeout); - break; - case SG_GET_VERSION_NUM: - rc = put_user(sg_version_num, p); - break; - case SG_IO: - rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg); - break; - - default: - rc = -ENOTTY; - break; - } - - pr_debug("%s:%s:%d %s: completion rc %d\n", - skdev->name, __func__, __LINE__, disk->disk_name, rc); - return rc; -} - -static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, - void __user *argp) -{ - int rc; - struct skd_sg_io sksgio; - - memset(&sksgio, 0, sizeof(sksgio)); - sksgio.mode = mode; - sksgio.argp = argp; - sksgio.iov = &sksgio.no_iov_iov; - - switch (skdev->state) { - case SKD_DRVR_STATE_ONLINE: - case SKD_DRVR_STATE_BUSY_IMMINENT: - break; - - default: - pr_debug("%s:%s:%d drive not online\n", - skdev->name, __func__, __LINE__); - rc = -ENXIO; - goto out; - } - - rc = skd_sg_io_get_and_check_args(skdev, &sksgio); - if (rc) - goto out; - - rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); - if (rc) - goto out; - - rc = skd_sg_io_prep_buffering(skdev, &sksgio); - if (rc) - goto out; - - rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); - if (rc) - goto out; - - rc = skd_sg_io_send_fitmsg(skdev, &sksgio); - if (rc) - goto out; - - rc = skd_sg_io_await(skdev, &sksgio); - if (rc) - goto out; - - rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); - if (rc) - goto out; - - rc = skd_sg_io_put_status(skdev, &sksgio); - if (rc) - goto out; - - rc = 0; - -out: - skd_sg_io_release_skspcl(skdev, &sksgio); - - if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) - kfree(sksgio.iov); - return rc; -} - -static int skd_sg_io_get_and_check_args(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct sg_io_hdr *sgp = &sksgio->sg; - int i, acc; - - if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { - pr_debug("%s:%s:%d access sg failed %p\n", - skdev->name, __func__, __LINE__, sksgio->argp); - return -EFAULT; - } - - if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { - pr_debug("%s:%s:%d copy_from_user sg failed %p\n", - skdev->name, __func__, __LINE__, sksgio->argp); - return -EFAULT; - } - - if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { - pr_debug("%s:%s:%d interface_id invalid 0x%x\n", - skdev->name, __func__, __LINE__, sgp->interface_id); - return -EINVAL; - } - - if (sgp->cmd_len > sizeof(sksgio->cdb)) { - pr_debug("%s:%s:%d cmd_len invalid %d\n", - skdev->name, __func__, __LINE__, sgp->cmd_len); - return -EINVAL; - } - - if (sgp->iovec_count > 256) { - pr_debug("%s:%s:%d iovec_count invalid %d\n", - skdev->name, __func__, __LINE__, sgp->iovec_count); - return -EINVAL; - } - - if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { - pr_debug("%s:%s:%d dxfer_len invalid %d\n", - skdev->name, __func__, __LINE__, sgp->dxfer_len); - return -EINVAL; - } - - switch (sgp->dxfer_direction) { - case SG_DXFER_NONE: - acc = -1; - break; - - case SG_DXFER_TO_DEV: - acc = VERIFY_READ; - break; - - case SG_DXFER_FROM_DEV: - case SG_DXFER_TO_FROM_DEV: - acc = VERIFY_WRITE; - break; - - default: - pr_debug("%s:%s:%d dxfer_dir invalid %d\n", - skdev->name, __func__, __LINE__, sgp->dxfer_direction); - return -EINVAL; - } - - if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { - pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", - skdev->name, __func__, __LINE__, sgp->cmdp); - return -EFAULT; - } - - if (sgp->mx_sb_len != 0) { - if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { - pr_debug("%s:%s:%d access sbp failed %p\n", - skdev->name, __func__, __LINE__, sgp->sbp); - return -EFAULT; - } - } - - if (sgp->iovec_count == 0) { - sksgio->iov[0].iov_base = sgp->dxferp; - sksgio->iov[0].iov_len = sgp->dxfer_len; - sksgio->iovcnt = 1; - sksgio->dxfer_len = sgp->dxfer_len; - } else { - struct sg_iovec *iov; - uint nbytes = sizeof(*iov) * sgp->iovec_count; - size_t iov_data_len; - - iov = kmalloc(nbytes, GFP_KERNEL); - if (iov == NULL) { - pr_debug("%s:%s:%d alloc iovec failed %d\n", - skdev->name, __func__, __LINE__, - sgp->iovec_count); - return -ENOMEM; - } - sksgio->iov = iov; - sksgio->iovcnt = sgp->iovec_count; - - if (copy_from_user(iov, sgp->dxferp, nbytes)) { - pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", - skdev->name, __func__, __LINE__, sgp->dxferp); - return -EFAULT; - } - - /* - * Sum up the vecs, making sure they don't overflow - */ - iov_data_len = 0; - for (i = 0; i < sgp->iovec_count; i++) { - if (iov_data_len + iov[i].iov_len < iov_data_len) - return -EINVAL; - iov_data_len += iov[i].iov_len; - } - - /* SG_IO howto says that the shorter of the two wins */ - if (sgp->dxfer_len < iov_data_len) { - sksgio->iovcnt = iov_shorten((struct iovec *)iov, - sgp->iovec_count, - sgp->dxfer_len); - sksgio->dxfer_len = sgp->dxfer_len; - } else - sksgio->dxfer_len = iov_data_len; - } - - if (sgp->dxfer_direction != SG_DXFER_NONE) { - struct sg_iovec *iov = sksgio->iov; - for (i = 0; i < sksgio->iovcnt; i++, iov++) { - if (!access_ok(acc, iov->iov_base, iov->iov_len)) { - pr_debug("%s:%s:%d access data failed %p/%d\n", - skdev->name, __func__, __LINE__, - iov->iov_base, (int)iov->iov_len); - return -EFAULT; - } - } - } - - return 0; -} - -static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct skd_special_context *skspcl = NULL; - int rc; - - for (;;) { - ulong flags; - - spin_lock_irqsave(&skdev->lock, flags); - skspcl = skdev->skspcl_free_list; - if (skspcl != NULL) { - skdev->skspcl_free_list = - (struct skd_special_context *)skspcl->req.next; - skspcl->req.id += SKD_ID_INCR; - skspcl->req.state = SKD_REQ_STATE_SETUP; - skspcl->orphaned = 0; - skspcl->req.n_sg = 0; - } - spin_unlock_irqrestore(&skdev->lock, flags); - - if (skspcl != NULL) { - rc = 0; - break; - } - - pr_debug("%s:%s:%d blocking\n", - skdev->name, __func__, __LINE__); - - rc = wait_event_interruptible_timeout( - skdev->waitq, - (skdev->skspcl_free_list != NULL), - msecs_to_jiffies(sksgio->sg.timeout)); - - pr_debug("%s:%s:%d unblocking, rc=%d\n", - skdev->name, __func__, __LINE__, rc); - - if (rc <= 0) { - if (rc == 0) - rc = -ETIMEDOUT; - else - rc = -EINTR; - break; - } - /* - * If we get here rc > 0 meaning the timeout to - * wait_event_interruptible_timeout() had time left, hence the - * sought event -- non-empty free list -- happened. - * Retry the allocation. - */ - } - sksgio->skspcl = skspcl; - - return rc; -} - -static int skd_skreq_prep_buffering(struct skd_device *skdev, - struct skd_request_context *skreq, - u32 dxfer_len) -{ - u32 resid = dxfer_len; - - /* - * The DMA engine must have aligned addresses and byte counts. - */ - resid += (-resid) & 3; - skreq->sg_byte_count = resid; - - skreq->n_sg = 0; - - while (resid > 0) { - u32 nbytes = PAGE_SIZE; - u32 ix = skreq->n_sg; - struct scatterlist *sg = &skreq->sg[ix]; - struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; - struct page *page; - - if (nbytes > resid) - nbytes = resid; - - page = alloc_page(GFP_KERNEL); - if (page == NULL) - return -ENOMEM; - - sg_set_page(sg, page, nbytes, 0); - - /* TODO: This should be going through a pci_???() - * routine to do proper mapping. */ - sksg->control = FIT_SGD_CONTROL_NOT_LAST; - sksg->byte_count = nbytes; - - sksg->host_side_addr = sg_phys(sg); - - sksg->dev_side_addr = 0; - sksg->next_desc_ptr = skreq->sksg_dma_address + - (ix + 1) * sizeof(*sksg); - - skreq->n_sg++; - resid -= nbytes; - } - - if (skreq->n_sg > 0) { - u32 ix = skreq->n_sg - 1; - struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; - - sksg->control = FIT_SGD_CONTROL_LAST; - sksg->next_desc_ptr = 0; - } - - if (unlikely(skdev->dbg_level > 1)) { - u32 i; - - pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", - skdev->name, __func__, __LINE__, - skreq->id, skreq->sksg_list, skreq->sksg_dma_address); - for (i = 0; i < skreq->n_sg; i++) { - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; - - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " - "addr=0x%llx next=0x%llx\n", - skdev->name, __func__, __LINE__, - i, sgd->byte_count, sgd->control, - sgd->host_side_addr, sgd->next_desc_ptr); - } - } - - return 0; -} - -static int skd_sg_io_prep_buffering(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct skd_special_context *skspcl = sksgio->skspcl; - struct skd_request_context *skreq = &skspcl->req; - u32 dxfer_len = sksgio->dxfer_len; - int rc; - - rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); - /* - * Eventually, errors or not, skd_release_special() is called - * to recover allocations including partial allocations. - */ - return rc; -} - -static int skd_sg_io_copy_buffer(struct skd_device *skdev, - struct skd_sg_io *sksgio, int dxfer_dir) -{ - struct skd_special_context *skspcl = sksgio->skspcl; - u32 iov_ix = 0; - struct sg_iovec curiov; - u32 sksg_ix = 0; - u8 *bufp = NULL; - u32 buf_len = 0; - u32 resid = sksgio->dxfer_len; - int rc; - - curiov.iov_len = 0; - curiov.iov_base = NULL; - - if (dxfer_dir != sksgio->sg.dxfer_direction) { - if (dxfer_dir != SG_DXFER_TO_DEV || - sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) - return 0; - } - - while (resid > 0) { - u32 nbytes = PAGE_SIZE; - - if (curiov.iov_len == 0) { - curiov = sksgio->iov[iov_ix++]; - continue; - } - - if (buf_len == 0) { - struct page *page; - page = sg_page(&skspcl->req.sg[sksg_ix++]); - bufp = page_address(page); - buf_len = PAGE_SIZE; - } - - nbytes = min_t(u32, nbytes, resid); - nbytes = min_t(u32, nbytes, curiov.iov_len); - nbytes = min_t(u32, nbytes, buf_len); - - if (dxfer_dir == SG_DXFER_TO_DEV) - rc = __copy_from_user(bufp, curiov.iov_base, nbytes); - else - rc = __copy_to_user(curiov.iov_base, bufp, nbytes); - - if (rc) - return -EFAULT; - - resid -= nbytes; - curiov.iov_len -= nbytes; - curiov.iov_base += nbytes; - buf_len -= nbytes; - } - - return 0; -} - -static int skd_sg_io_send_fitmsg(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct skd_special_context *skspcl = sksgio->skspcl; - struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; - struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; - - memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); - - /* Initialize the FIT msg header */ - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; - fmh->num_protocol_cmds_coalesced = 1; - - /* Initialize the SCSI request */ - if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) - scsi_req->hdr.sg_list_dma_address = - cpu_to_be64(skspcl->req.sksg_dma_address); - scsi_req->hdr.tag = skspcl->req.id; - scsi_req->hdr.sg_list_len_bytes = - cpu_to_be32(skspcl->req.sg_byte_count); - memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); - - skspcl->req.state = SKD_REQ_STATE_BUSY; - skd_send_special_fitmsg(skdev, skspcl); - - return 0; -} - -static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) -{ - unsigned long flags; - int rc; - - rc = wait_event_interruptible_timeout(skdev->waitq, - (sksgio->skspcl->req.state != - SKD_REQ_STATE_BUSY), - msecs_to_jiffies(sksgio->sg. - timeout)); - - spin_lock_irqsave(&skdev->lock, flags); - - if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { - pr_debug("%s:%s:%d skspcl %p aborted\n", - skdev->name, __func__, __LINE__, sksgio->skspcl); - - /* Build check cond, sense and let command finish. */ - /* For a timeout, we must fabricate completion and sense - * data to complete the command */ - sksgio->skspcl->req.completion.status = - SAM_STAT_CHECK_CONDITION; - - memset(&sksgio->skspcl->req.err_info, 0, - sizeof(sksgio->skspcl->req.err_info)); - sksgio->skspcl->req.err_info.type = 0x70; - sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; - sksgio->skspcl->req.err_info.code = 0x44; - sksgio->skspcl->req.err_info.qual = 0; - rc = 0; - } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) - /* No longer on the adapter. We finish. */ - rc = 0; - else { - /* Something's gone wrong. Still busy. Timeout or - * user interrupted (control-C). Mark as an orphan - * so it will be disposed when completed. */ - sksgio->skspcl->orphaned = 1; - sksgio->skspcl = NULL; - if (rc == 0) { - pr_debug("%s:%s:%d timed out %p (%u ms)\n", - skdev->name, __func__, __LINE__, - sksgio, sksgio->sg.timeout); - rc = -ETIMEDOUT; - } else { - pr_debug("%s:%s:%d cntlc %p\n", - skdev->name, __func__, __LINE__, sksgio); - rc = -EINTR; - } - } - - spin_unlock_irqrestore(&skdev->lock, flags); - - return rc; -} - -static int skd_sg_io_put_status(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct sg_io_hdr *sgp = &sksgio->sg; - struct skd_special_context *skspcl = sksgio->skspcl; - int resid = 0; - - u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); - - sgp->status = skspcl->req.completion.status; - resid = sksgio->dxfer_len - nb; - - sgp->masked_status = sgp->status & STATUS_MASK; - sgp->msg_status = 0; - sgp->host_status = 0; - sgp->driver_status = 0; - sgp->resid = resid; - if (sgp->masked_status || sgp->host_status || sgp->driver_status) - sgp->info |= SG_INFO_CHECK; - - pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", - skdev->name, __func__, __LINE__, - sgp->status, sgp->masked_status, sgp->resid); - - if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { - if (sgp->mx_sb_len > 0) { - struct fit_comp_error_info *ei = &skspcl->req.err_info; - u32 nbytes = sizeof(*ei); - - nbytes = min_t(u32, nbytes, sgp->mx_sb_len); - - sgp->sb_len_wr = nbytes; - - if (__copy_to_user(sgp->sbp, ei, nbytes)) { - pr_debug("%s:%s:%d copy_to_user sense failed %p\n", - skdev->name, __func__, __LINE__, - sgp->sbp); - return -EFAULT; - } - } - } - - if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { - pr_debug("%s:%s:%d copy_to_user sg failed %p\n", - skdev->name, __func__, __LINE__, sksgio->argp); - return -EFAULT; - } - - return 0; -} - -static int skd_sg_io_release_skspcl(struct skd_device *skdev, - struct skd_sg_io *sksgio) -{ - struct skd_special_context *skspcl = sksgio->skspcl; - - if (skspcl != NULL) { - ulong flags; - - sksgio->skspcl = NULL; - - spin_lock_irqsave(&skdev->lock, flags); - skd_release_special(skdev, skspcl); - spin_unlock_irqrestore(&skdev->lock, flags); - } - - return 0; -} - /* ***************************************************************************** * INTERNAL REQUESTS -- generated by driver itself @@ -1811,14 +884,15 @@ static int skd_format_internal_skspcl(struct skd_device *skdev) uint64_t dma_address; struct skd_scsi_request *scsi; - fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; + fmh = &skspcl->msg_buf->fmh; fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; fmh->num_protocol_cmds_coalesced = 1; - scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; + scsi = &skspcl->msg_buf->scsi[0]; memset(scsi, 0, sizeof(*scsi)); dma_address = skspcl->req.sksg_dma_address; scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); + skspcl->req.n_sg = 1; sgd->control = FIT_SGD_CONTROL_LAST; sgd->byte_count = 0; sgd->host_side_addr = skspcl->db_dma_address; @@ -1846,11 +920,9 @@ static void skd_send_internal_skspcl(struct skd_device *skdev, */ return; - SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); skspcl->req.state = SKD_REQ_STATE_BUSY; - skspcl->req.id += SKD_ID_INCR; - scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; + scsi = &skspcl->msg_buf->scsi[0]; scsi->hdr.tag = skspcl->req.id; memset(scsi->cdb, 0, sizeof(scsi->cdb)); @@ -1940,32 +1012,35 @@ static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, /* If the check condition is of special interest, log a message */ if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) && (code == 0x04) && (qual == 0x06)) { - pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" - "ascq/fruc %02x/%02x/%02x/%02x\n", - skd_name(skdev), key, code, qual, fruc); + dev_err(&skdev->pdev->dev, + "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", + key, code, qual, fruc); } } static void skd_complete_internal(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 - *skcomp, - volatile struct fit_comp_error_info *skerr, + struct fit_completion_entry_v1 *skcomp, + struct fit_comp_error_info *skerr, struct skd_special_context *skspcl) { u8 *buf = skspcl->data_buf; u8 status; int i; - struct skd_scsi_request *scsi = - (struct skd_scsi_request *)&skspcl->msg_buf[64]; + struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0]; + + lockdep_assert_held(&skdev->lock); SKD_ASSERT(skspcl == &skdev->internal_skspcl); - pr_debug("%s:%s:%d complete internal %x\n", - skdev->name, __func__, __LINE__, scsi->cdb[0]); + dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); + + dma_sync_single_for_cpu(&skdev->pdev->dev, + skspcl->db_dma_address, + skspcl->req.sksg_list[0].byte_count, + DMA_BIDIRECTIONAL); skspcl->req.completion = *skcomp; skspcl->req.state = SKD_REQ_STATE_IDLE; - skspcl->req.id += SKD_ID_INCR; status = skspcl->req.completion.status; @@ -1981,14 +1056,15 @@ static void skd_complete_internal(struct skd_device *skdev, skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); else { if (skdev->state == SKD_DRVR_STATE_STOPPING) { - pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", - skdev->name, __func__, __LINE__, - skdev->state); + dev_dbg(&skdev->pdev->dev, + "TUR failed, don't send anymore state 0x%x\n", + skdev->state); return; } - pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", - skdev->name, __func__, __LINE__); - skd_send_internal_skspcl(skdev, skspcl, 0x00); + dev_dbg(&skdev->pdev->dev, + "**** TUR failed, retry skerr\n"); + skd_send_internal_skspcl(skdev, skspcl, + TEST_UNIT_READY); } break; @@ -1997,14 +1073,15 @@ static void skd_complete_internal(struct skd_device *skdev, skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); else { if (skdev->state == SKD_DRVR_STATE_STOPPING) { - pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", - skdev->name, __func__, __LINE__, - skdev->state); + dev_dbg(&skdev->pdev->dev, + "write buffer failed, don't send anymore state 0x%x\n", + skdev->state); return; } - pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", - skdev->name, __func__, __LINE__); - skd_send_internal_skspcl(skdev, skspcl, 0x00); + dev_dbg(&skdev->pdev->dev, + "**** write buffer failed, retry skerr\n"); + skd_send_internal_skspcl(skdev, skspcl, + TEST_UNIT_READY); } break; @@ -2014,33 +1091,31 @@ static void skd_complete_internal(struct skd_device *skdev, skd_send_internal_skspcl(skdev, skspcl, READ_CAPACITY); else { - pr_err( - "(%s):*** W/R Buffer mismatch %d ***\n", - skd_name(skdev), skdev->connect_retries); + dev_err(&skdev->pdev->dev, + "*** W/R Buffer mismatch %d ***\n", + skdev->connect_retries); if (skdev->connect_retries < SKD_MAX_CONNECT_RETRIES) { skdev->connect_retries++; skd_soft_reset(skdev); } else { - pr_err( - "(%s): W/R Buffer Connect Error\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, + "W/R Buffer Connect Error\n"); return; } } } else { if (skdev->state == SKD_DRVR_STATE_STOPPING) { - pr_debug("%s:%s:%d " - "read buffer failed, don't send anymore state 0x%x\n", - skdev->name, __func__, __LINE__, - skdev->state); + dev_dbg(&skdev->pdev->dev, + "read buffer failed, don't send anymore state 0x%x\n", + skdev->state); return; } - pr_debug("%s:%s:%d " - "**** read buffer failed, retry skerr\n", - skdev->name, __func__, __LINE__); - skd_send_internal_skspcl(skdev, skspcl, 0x00); + dev_dbg(&skdev->pdev->dev, + "**** read buffer failed, retry skerr\n"); + skd_send_internal_skspcl(skdev, skspcl, + TEST_UNIT_READY); } break; @@ -2054,10 +1129,9 @@ static void skd_complete_internal(struct skd_device *skdev, (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; - pr_debug("%s:%s:%d last lba %d, bs %d\n", - skdev->name, __func__, __LINE__, - skdev->read_cap_last_lba, - skdev->read_cap_blocksize); + dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", + skdev->read_cap_last_lba, + skdev->read_cap_blocksize); set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); @@ -2068,13 +1142,10 @@ static void skd_complete_internal(struct skd_device *skdev, (skerr->key == MEDIUM_ERROR)) { skdev->read_cap_last_lba = ~0; set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); - pr_debug("%s:%s:%d " - "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); skd_send_internal_skspcl(skdev, skspcl, INQUIRY); } else { - pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); } @@ -2091,8 +1162,7 @@ static void skd_complete_internal(struct skd_device *skdev, } if (skd_unquiesce_dev(skdev) < 0) - pr_debug("%s:%s:%d **** failed, to ONLINE device\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); /* connection is complete */ skdev->connect_retries = 0; break; @@ -2120,27 +1190,20 @@ static void skd_send_fitmsg(struct skd_device *skdev, struct skd_fitmsg_context *skmsg) { u64 qcmd; - struct fit_msg_hdr *fmh; - pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", - skdev->name, __func__, __LINE__, - skmsg->mb_dma_address, skdev->in_flight); - pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", - skdev->name, __func__, __LINE__, - skmsg->msg_buf, skmsg->offset); + dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n", + skmsg->mb_dma_address, skd_in_flight(skdev)); + dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); qcmd = skmsg->mb_dma_address; qcmd |= FIT_QCMD_QID_NORMAL; - fmh = (struct fit_msg_hdr *)skmsg->msg_buf; - skmsg->outstanding = fmh->num_protocol_cmds_coalesced; - if (unlikely(skdev->dbg_level > 1)) { u8 *bp = (u8 *)skmsg->msg_buf; int i; for (i = 0; i < skmsg->length; i += 8) { - pr_debug("%s:%s:%d msg[%2d] %8ph\n", - skdev->name, __func__, __LINE__, i, &bp[i]); + dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, + &bp[i]); if (i == 0) i = 64 - 8; } @@ -2160,6 +1223,12 @@ static void skd_send_fitmsg(struct skd_device *skdev, */ qcmd |= FIT_QCMD_MSGSIZE_64; + dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address, + skmsg->length, DMA_TO_DEVICE); + + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ + smp_wmb(); + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); } @@ -2168,30 +1237,31 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, { u64 qcmd; + WARN_ON_ONCE(skspcl->req.n_sg != 1); + if (unlikely(skdev->dbg_level > 1)) { u8 *bp = (u8 *)skspcl->msg_buf; int i; for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { - pr_debug("%s:%s:%d spcl[%2d] %8ph\n", - skdev->name, __func__, __LINE__, i, &bp[i]); + dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, + &bp[i]); if (i == 0) i = 64 - 8; } - pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", - skdev->name, __func__, __LINE__, - skspcl, skspcl->req.id, skspcl->req.sksg_list, - skspcl->req.sksg_dma_address); + dev_dbg(&skdev->pdev->dev, + "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", + skspcl, skspcl->req.id, skspcl->req.sksg_list, + skspcl->req.sksg_dma_address); for (i = 0; i < skspcl->req.n_sg; i++) { struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[i]; - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " - "addr=0x%llx next=0x%llx\n", - skdev->name, __func__, __LINE__, - i, sgd->byte_count, sgd->control, - sgd->host_side_addr, sgd->next_desc_ptr); + dev_dbg(&skdev->pdev->dev, + " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", + i, sgd->byte_count, sgd->control, + sgd->host_side_addr, sgd->next_desc_ptr); } } @@ -2202,6 +1272,20 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, qcmd = skspcl->mb_dma_address; qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; + dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address, + SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE); + dma_sync_single_for_device(&skdev->pdev->dev, + skspcl->req.sksg_dma_address, + 1 * sizeof(struct fit_sg_descriptor), + DMA_TO_DEVICE); + dma_sync_single_for_device(&skdev->pdev->dev, + skspcl->db_dma_address, + skspcl->req.sksg_list[0].byte_count, + DMA_BIDIRECTIONAL); + + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ + smp_wmb(); + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); } @@ -2212,8 +1296,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, */ static void skd_complete_other(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 *skcomp, - volatile struct fit_comp_error_info *skerr); + struct fit_completion_entry_v1 *skcomp, + struct fit_comp_error_info *skerr); struct sns_info { u8 type; @@ -2262,21 +1346,20 @@ static struct sns_info skd_chkstat_table[] = { static enum skd_check_status_action skd_check_status(struct skd_device *skdev, - u8 cmp_status, volatile struct fit_comp_error_info *skerr) + u8 cmp_status, struct fit_comp_error_info *skerr) { - int i, n; + int i; - pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", - skd_name(skdev), skerr->key, skerr->code, skerr->qual, - skerr->fruc); + dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", + skerr->key, skerr->code, skerr->qual, skerr->fruc); - pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", - skdev->name, __func__, __LINE__, skerr->type, cmp_status, - skerr->key, skerr->code, skerr->qual, skerr->fruc); + dev_dbg(&skdev->pdev->dev, + "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", + skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual, + skerr->fruc); /* Does the info match an entry in the good category? */ - n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); - for (i = 0; i < n; i++) { + for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) { struct sns_info *sns = &skd_chkstat_table[i]; if (sns->mask & 0x10) @@ -2300,10 +1383,9 @@ skd_check_status(struct skd_device *skdev, continue; if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { - pr_err("(%s): SMART Alert: sense key/asc/ascq " - "%02x/%02x/%02x\n", - skd_name(skdev), skerr->key, - skerr->code, skerr->qual); + dev_err(&skdev->pdev->dev, + "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n", + skerr->key, skerr->code, skerr->qual); } return sns->action; } @@ -2312,335 +1394,80 @@ skd_check_status(struct skd_device *skdev, * zero status means good */ if (cmp_status) { - pr_debug("%s:%s:%d status check: error\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "status check: error\n"); return SKD_CHECK_STATUS_REPORT_ERROR; } - pr_debug("%s:%s:%d status check good default\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "status check good default\n"); return SKD_CHECK_STATUS_REPORT_GOOD; } static void skd_resolve_req_exception(struct skd_device *skdev, - struct skd_request_context *skreq) + struct skd_request_context *skreq, + struct request *req) { u8 cmp_status = skreq->completion.status; switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { case SKD_CHECK_STATUS_REPORT_GOOD: case SKD_CHECK_STATUS_REPORT_SMART_ALERT: - skd_end_request(skdev, skreq, BLK_STS_OK); + skreq->status = BLK_STS_OK; + blk_mq_complete_request(req); break; case SKD_CHECK_STATUS_BUSY_IMMINENT: skd_log_skreq(skdev, skreq, "retry(busy)"); - blk_requeue_request(skdev->queue, skreq->req); - pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); + blk_requeue_request(skdev->queue, req); + dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; skdev->timer_countdown = SKD_TIMER_MINUTES(20); skd_quiesce_dev(skdev); break; case SKD_CHECK_STATUS_REQUEUE_REQUEST: - if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { + if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { skd_log_skreq(skdev, skreq, "retry"); - blk_requeue_request(skdev->queue, skreq->req); + blk_requeue_request(skdev->queue, req); break; } - /* fall through to report error */ + /* fall through */ case SKD_CHECK_STATUS_REPORT_ERROR: default: - skd_end_request(skdev, skreq, BLK_STS_IOERR); + skreq->status = BLK_STS_IOERR; + blk_mq_complete_request(req); break; } } -/* assume spinlock is already held */ static void skd_release_skreq(struct skd_device *skdev, struct skd_request_context *skreq) { - u32 msg_slot; - struct skd_fitmsg_context *skmsg; - - u32 timo_slot; - - /* - * Reclaim the FIT msg buffer if this is - * the first of the requests it carried to - * be completed. The FIT msg buffer used to - * send this request cannot be reused until - * we are sure the s1120 card has copied - * it to its memory. The FIT msg might have - * contained several requests. As soon as - * any of them are completed we know that - * the entire FIT msg was transferred. - * Only the first completed request will - * match the FIT msg buffer id. The FIT - * msg buffer id is immediately updated. - * When subsequent requests complete the FIT - * msg buffer id won't match, so we know - * quite cheaply that it is already done. - */ - msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; - SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); - - skmsg = &skdev->skmsg_table[msg_slot]; - if (skmsg->id == skreq->fitmsg_id) { - SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); - SKD_ASSERT(skmsg->outstanding > 0); - skmsg->outstanding--; - if (skmsg->outstanding == 0) { - skmsg->state = SKD_MSG_STATE_IDLE; - skmsg->id += SKD_ID_INCR; - skmsg->next = skdev->skmsg_free_list; - skdev->skmsg_free_list = skmsg; - } - } - - /* - * Decrease the number of active requests. - * Also decrements the count in the timeout slot. - */ - SKD_ASSERT(skdev->in_flight > 0); - skdev->in_flight -= 1; - - timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; - SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); - skdev->timeout_slot[timo_slot] -= 1; - - /* - * Reset backpointer - */ - skreq->req = NULL; - /* * Reclaim the skd_request_context */ skreq->state = SKD_REQ_STATE_IDLE; - skreq->id += SKD_ID_INCR; - skreq->next = skdev->skreq_free_list; - skdev->skreq_free_list = skreq; } -#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA - -static void skd_do_inq_page_00(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 *skcomp, - volatile struct fit_comp_error_info *skerr, - uint8_t *cdb, uint8_t *buf) -{ - uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; - - /* Caller requested "supported pages". The driver needs to insert - * its page. - */ - pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", - skdev->name, __func__, __LINE__); - - /* If the device rejected the request because the CDB was - * improperly formed, then just leave. - */ - if (skcomp->status == SAM_STAT_CHECK_CONDITION && - skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) - return; - - /* Get the amount of space the caller allocated */ - max_bytes = (cdb[3] << 8) | cdb[4]; - - /* Get the number of pages actually returned by the device */ - drive_pages = (buf[2] << 8) | buf[3]; - drive_bytes = drive_pages + 4; - new_size = drive_pages + 1; - - /* Supported pages must be in numerical order, so find where - * the driver page needs to be inserted into the list of - * pages returned by the device. - */ - for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { - if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) - return; /* Device using this page code. abort */ - else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) - break; - } - - if (insert_pt < max_bytes) { - uint16_t u; - - /* Shift everything up one byte to make room. */ - for (u = new_size + 3; u > insert_pt; u--) - buf[u] = buf[u - 1]; - buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; - - /* SCSI byte order increment of num_returned_bytes by 1 */ - skcomp->num_returned_bytes = - be32_to_cpu(skcomp->num_returned_bytes) + 1; - skcomp->num_returned_bytes = - be32_to_cpu(skcomp->num_returned_bytes); - } - - /* update page length field to reflect the driver's page too */ - buf[2] = (uint8_t)((new_size >> 8) & 0xFF); - buf[3] = (uint8_t)((new_size >> 0) & 0xFF); -} - -static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) -{ - int pcie_reg; - u16 pci_bus_speed; - u8 pci_lanes; - - pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (pcie_reg) { - u16 linksta; - pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); - - pci_bus_speed = linksta & 0xF; - pci_lanes = (linksta & 0x3F0) >> 4; - } else { - *speed = STEC_LINK_UNKNOWN; - *width = 0xFF; - return; - } - - switch (pci_bus_speed) { - case 1: - *speed = STEC_LINK_2_5GTS; - break; - case 2: - *speed = STEC_LINK_5GTS; - break; - case 3: - *speed = STEC_LINK_8GTS; - break; - default: - *speed = STEC_LINK_UNKNOWN; - break; - } - - if (pci_lanes <= 0x20) - *width = pci_lanes; - else - *width = 0xFF; -} - -static void skd_do_inq_page_da(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 *skcomp, - volatile struct fit_comp_error_info *skerr, - uint8_t *cdb, uint8_t *buf) -{ - struct pci_dev *pdev = skdev->pdev; - unsigned max_bytes; - struct driver_inquiry_data inq; - u16 val; - - pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", - skdev->name, __func__, __LINE__); - - memset(&inq, 0, sizeof(inq)); - - inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; - - skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); - inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); - inq.pcie_device_number = PCI_SLOT(pdev->devfn); - inq.pcie_function_number = PCI_FUNC(pdev->devfn); - - pci_read_config_word(pdev, PCI_VENDOR_ID, &val); - inq.pcie_vendor_id = cpu_to_be16(val); - - pci_read_config_word(pdev, PCI_DEVICE_ID, &val); - inq.pcie_device_id = cpu_to_be16(val); - - pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); - inq.pcie_subsystem_vendor_id = cpu_to_be16(val); - - pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); - inq.pcie_subsystem_device_id = cpu_to_be16(val); - - /* Driver version, fixed lenth, padded with spaces on the right */ - inq.driver_version_length = sizeof(inq.driver_version); - memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); - memcpy(inq.driver_version, DRV_VER_COMPL, - min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); - - inq.page_length = cpu_to_be16((sizeof(inq) - 4)); - - /* Clear the error set by the device */ - skcomp->status = SAM_STAT_GOOD; - memset((void *)skerr, 0, sizeof(*skerr)); - - /* copy response into output buffer */ - max_bytes = (cdb[3] << 8) | cdb[4]; - memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); - - skcomp->num_returned_bytes = - be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); -} - -static void skd_do_driver_inq(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 *skcomp, - volatile struct fit_comp_error_info *skerr, - uint8_t *cdb, uint8_t *buf) -{ - if (!buf) - return; - else if (cdb[0] != INQUIRY) - return; /* Not an INQUIRY */ - else if ((cdb[1] & 1) == 0) - return; /* EVPD not set */ - else if (cdb[2] == 0) - /* Need to add driver's page to supported pages list */ - skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); - else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) - /* Caller requested driver's page */ - skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); -} - -static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) -{ - if (!sg) - return NULL; - if (!sg_page(sg)) - return NULL; - return sg_virt(sg); -} - -static void skd_process_scsi_inq(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 - *skcomp, - volatile struct fit_comp_error_info *skerr, - struct skd_special_context *skspcl) -{ - uint8_t *buf; - struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; - struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; - - dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, - skspcl->req.sg_data_dir); - buf = skd_sg_1st_page_ptr(skspcl->req.sg); - - if (buf) - skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); -} - - static int skd_isr_completion_posted(struct skd_device *skdev, int limit, int *enqueued) { - volatile struct fit_completion_entry_v1 *skcmp = NULL; - volatile struct fit_comp_error_info *skerr; + struct fit_completion_entry_v1 *skcmp; + struct fit_comp_error_info *skerr; u16 req_id; - u32 req_slot; + u32 tag; + u16 hwq = 0; + struct request *rq; struct skd_request_context *skreq; - u16 cmp_cntxt = 0; - u8 cmp_status = 0; - u8 cmp_cycle = 0; - u32 cmp_bytes = 0; + u16 cmp_cntxt; + u8 cmp_status; + u8 cmp_cycle; + u32 cmp_bytes; int rc = 0; int processed = 0; + lockdep_assert_held(&skdev->lock); + for (;; ) { SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); @@ -2652,16 +1479,14 @@ static int skd_isr_completion_posted(struct skd_device *skdev, skerr = &skdev->skerr_table[skdev->skcomp_ix]; - pr_debug("%s:%s:%d " - "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " - "busy=%d rbytes=0x%x proto=%d\n", - skdev->name, __func__, __LINE__, skdev->skcomp_cycle, - skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, - skdev->in_flight, cmp_bytes, skdev->proto_ver); + dev_dbg(&skdev->pdev->dev, + "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n", + skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, + cmp_cntxt, cmp_status, skd_in_flight(skdev), + cmp_bytes, skdev->proto_ver); if (cmp_cycle != skdev->skcomp_cycle) { - pr_debug("%s:%s:%d end of completions\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "end of completions\n"); break; } /* @@ -2680,49 +1505,38 @@ static int skd_isr_completion_posted(struct skd_device *skdev, * r/w request (see skd_start() above) or a special request. */ req_id = cmp_cntxt; - req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; + tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK; /* Is this other than a r/w request? */ - if (req_slot >= skdev->num_req_context) { + if (tag >= skdev->num_req_context) { /* * This is not a completion for a r/w request. */ + WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], + tag)); skd_complete_other(skdev, skcmp, skerr); continue; } - skreq = &skdev->skreq_table[req_slot]; + rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); + if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt, + tag)) + continue; + skreq = blk_mq_rq_to_pdu(rq); /* * Make sure the request ID for the slot matches. */ if (skreq->id != req_id) { - pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", - skdev->name, __func__, __LINE__, - req_id, skreq->id); - { - u16 new_id = cmp_cntxt; - pr_err("(%s): Completion mismatch " - "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", - skd_name(skdev), req_id, - skreq->id, new_id); + dev_err(&skdev->pdev->dev, + "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n", + req_id, skreq->id, cmp_cntxt); - continue; - } + continue; } SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); - if (skreq->state == SKD_REQ_STATE_ABORTED) { - pr_debug("%s:%s:%d reclaim req %p id=%04x\n", - skdev->name, __func__, __LINE__, - skreq, skreq->id); - /* a previously timed out command can - * now be cleaned up */ - skd_release_skreq(skdev, skreq); - continue; - } - skreq->completion = *skcmp; if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { skreq->err_info = *skerr; @@ -2734,27 +1548,17 @@ static int skd_isr_completion_posted(struct skd_device *skdev, if (skreq->n_sg > 0) skd_postop_sg_list(skdev, skreq); - if (!skreq->req) { - pr_debug("%s:%s:%d NULL backptr skdreq %p, " - "req=0x%x req_id=0x%x\n", - skdev->name, __func__, __LINE__, - skreq, skreq->id, req_id); - } else { - /* - * Capture the outcome and post it back to the - * native request. - */ - if (likely(cmp_status == SAM_STAT_GOOD)) - skd_end_request(skdev, skreq, BLK_STS_OK); - else - skd_resolve_req_exception(skdev, skreq); - } + skd_release_skreq(skdev, skreq); /* - * Release the skreq, its FIT msg (if one), timeout slot, - * and queue depth. + * Capture the outcome and post it back to the native request. */ - skd_release_skreq(skdev, skreq); + if (likely(cmp_status == SAM_STAT_GOOD)) { + skreq->status = BLK_STS_OK; + blk_mq_complete_request(rq); + } else { + skd_resolve_req_exception(skdev, skreq, rq); + } /* skd_isr_comp_limit equal zero means no limit */ if (limit) { @@ -2765,8 +1569,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev, } } - if ((skdev->state == SKD_DRVR_STATE_PAUSING) - && (skdev->in_flight) == 0) { + if (skdev->state == SKD_DRVR_STATE_PAUSING && + skd_in_flight(skdev) == 0) { skdev->state = SKD_DRVR_STATE_PAUSED; wake_up_interruptible(&skdev->waitq); } @@ -2775,21 +1579,22 @@ static int skd_isr_completion_posted(struct skd_device *skdev, } static void skd_complete_other(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 *skcomp, - volatile struct fit_comp_error_info *skerr) + struct fit_completion_entry_v1 *skcomp, + struct fit_comp_error_info *skerr) { u32 req_id = 0; u32 req_table; u32 req_slot; struct skd_special_context *skspcl; + lockdep_assert_held(&skdev->lock); + req_id = skcomp->tag; req_table = req_id & SKD_ID_TABLE_MASK; req_slot = req_id & SKD_ID_SLOT_MASK; - pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", - skdev->name, __func__, __LINE__, - req_table, req_id, req_slot); + dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, + req_id, req_slot); /* * Based on the request id, determine how to dispatch this completion. @@ -2799,28 +1604,12 @@ static void skd_complete_other(struct skd_device *skdev, switch (req_table) { case SKD_ID_RW_REQUEST: /* - * The caller, skd_completion_posted_isr() above, + * The caller, skd_isr_completion_posted() above, * handles r/w requests. The only way we get here * is if the req_slot is out of bounds. */ break; - case SKD_ID_SPECIAL_REQUEST: - /* - * Make sure the req_slot is in bounds and that the id - * matches. - */ - if (req_slot < skdev->n_special) { - skspcl = &skdev->skspcl_table[req_slot]; - if (skspcl->req.id == req_id && - skspcl->req.state == SKD_REQ_STATE_BUSY) { - skd_complete_special(skdev, - skcomp, skerr, skspcl); - return; - } - } - break; - case SKD_ID_INTERNAL: if (req_slot == 0) { skspcl = &skdev->internal_skspcl; @@ -2851,72 +1640,9 @@ static void skd_complete_other(struct skd_device *skdev, */ } -static void skd_complete_special(struct skd_device *skdev, - volatile struct fit_completion_entry_v1 - *skcomp, - volatile struct fit_comp_error_info *skerr, - struct skd_special_context *skspcl) -{ - pr_debug("%s:%s:%d completing special request %p\n", - skdev->name, __func__, __LINE__, skspcl); - if (skspcl->orphaned) { - /* Discard orphaned request */ - /* ?: Can this release directly or does it need - * to use a worker? */ - pr_debug("%s:%s:%d release orphaned %p\n", - skdev->name, __func__, __LINE__, skspcl); - skd_release_special(skdev, skspcl); - return; - } - - skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); - - skspcl->req.state = SKD_REQ_STATE_COMPLETED; - skspcl->req.completion = *skcomp; - skspcl->req.err_info = *skerr; - - skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, - skerr->code, skerr->qual, skerr->fruc); - - wake_up_interruptible(&skdev->waitq); -} - -/* assume spinlock is already held */ -static void skd_release_special(struct skd_device *skdev, - struct skd_special_context *skspcl) -{ - int i, was_depleted; - - for (i = 0; i < skspcl->req.n_sg; i++) { - struct page *page = sg_page(&skspcl->req.sg[i]); - __free_page(page); - } - - was_depleted = (skdev->skspcl_free_list == NULL); - - skspcl->req.state = SKD_REQ_STATE_IDLE; - skspcl->req.id += SKD_ID_INCR; - skspcl->req.next = - (struct skd_request_context *)skdev->skspcl_free_list; - skdev->skspcl_free_list = (struct skd_special_context *)skspcl; - - if (was_depleted) { - pr_debug("%s:%s:%d skspcl was depleted\n", - skdev->name, __func__, __LINE__); - /* Free list was depleted. Their might be waiters. */ - wake_up_interruptible(&skdev->waitq); - } -} - static void skd_reset_skcomp(struct skd_device *skdev) { - u32 nbytes; - struct fit_completion_entry_v1 *skcomp; - - nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; - nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; - - memset(skdev->skcomp_table, 0, nbytes); + memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); skdev->skcomp_ix = 0; skdev->skcomp_cycle = 1; @@ -2941,7 +1667,7 @@ static void skd_completion_worker(struct work_struct *work) * process everything in compq */ skd_isr_completion_posted(skdev, 0, &flush_enqueued); - skd_request_fn(skdev->queue); + schedule_work(&skdev->start_queue); spin_unlock_irqrestore(&skdev->lock, flags); } @@ -2951,14 +1677,13 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev); static irqreturn_t skd_isr(int irq, void *ptr) { - struct skd_device *skdev; + struct skd_device *skdev = ptr; u32 intstat; u32 ack; int rc = 0; int deferred = 0; int flush_enqueued = 0; - skdev = (struct skd_device *)ptr; spin_lock(&skdev->lock); for (;; ) { @@ -2967,8 +1692,8 @@ skd_isr(int irq, void *ptr) ack = FIT_INT_DEF_MASK; ack &= intstat; - pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", - skdev->name, __func__, __LINE__, intstat, ack); + dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, + ack); /* As long as there is an int pending on device, keep * running loop. When none, get out, but if we've never @@ -3018,12 +1743,12 @@ skd_isr(int irq, void *ptr) } if (unlikely(flush_enqueued)) - skd_request_fn(skdev->queue); + schedule_work(&skdev->start_queue); if (deferred) schedule_work(&skdev->completion_worker); else if (!flush_enqueued) - skd_request_fn(skdev->queue); + schedule_work(&skdev->start_queue); spin_unlock(&skdev->lock); @@ -3033,13 +1758,13 @@ skd_isr(int irq, void *ptr) static void skd_drive_fault(struct skd_device *skdev) { skdev->state = SKD_DRVR_STATE_FAULT; - pr_err("(%s): Drive FAULT\n", skd_name(skdev)); + dev_err(&skdev->pdev->dev, "Drive FAULT\n"); } static void skd_drive_disappeared(struct skd_device *skdev) { skdev->state = SKD_DRVR_STATE_DISAPPEARED; - pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); + dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); } static void skd_isr_fwstate(struct skd_device *skdev) @@ -3052,10 +1777,9 @@ static void skd_isr_fwstate(struct skd_device *skdev) sense = SKD_READL(skdev, FIT_STATUS); state = sense & FIT_SR_DRIVE_STATE_MASK; - pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", - skd_name(skdev), - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, - skd_drive_state_to_str(state), state); + dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, + skd_drive_state_to_str(state), state); skdev->drive_state = state; @@ -3066,7 +1790,7 @@ static void skd_isr_fwstate(struct skd_device *skdev) break; } if (skdev->state == SKD_DRVR_STATE_RESTARTING) - skd_recover_requests(skdev, 0); + skd_recover_requests(skdev); if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { skdev->timer_countdown = SKD_STARTING_TIMO; skdev->state = SKD_DRVR_STATE_STARTING; @@ -3087,11 +1811,11 @@ static void skd_isr_fwstate(struct skd_device *skdev) skdev->cur_max_queue_depth * 2 / 3 + 1; if (skdev->queue_low_water_mark < 1) skdev->queue_low_water_mark = 1; - pr_info( - "(%s): Queue depth limit=%d dev=%d lowat=%d\n", - skd_name(skdev), - skdev->cur_max_queue_depth, - skdev->dev_max_queue_depth, skdev->queue_low_water_mark); + dev_info(&skdev->pdev->dev, + "Queue depth limit=%d dev=%d lowat=%d\n", + skdev->cur_max_queue_depth, + skdev->dev_max_queue_depth, + skdev->queue_low_water_mark); skd_refresh_device_data(skdev); break; @@ -3107,7 +1831,7 @@ static void skd_isr_fwstate(struct skd_device *skdev) */ skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; skdev->timer_countdown = SKD_TIMER_SECONDS(3); - blk_start_queue(skdev->queue); + schedule_work(&skdev->start_queue); break; case FIT_SR_DRIVE_BUSY_ERASE: skdev->state = SKD_DRVR_STATE_BUSY_ERASE; @@ -3128,8 +1852,7 @@ static void skd_isr_fwstate(struct skd_device *skdev) } break; case FIT_SR_DRIVE_FW_BOOTING: - pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", - skdev->name, __func__, __LINE__, skdev->name); + dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); skdev->state = SKD_DRVR_STATE_WAIT_BOOT; skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; break; @@ -3141,17 +1864,17 @@ static void skd_isr_fwstate(struct skd_device *skdev) case FIT_SR_DRIVE_FAULT: skd_drive_fault(skdev); - skd_recover_requests(skdev, 0); - blk_start_queue(skdev->queue); + skd_recover_requests(skdev); + schedule_work(&skdev->start_queue); break; /* PCIe bus returned all Fs? */ case 0xFF: - pr_info("(%s): state=0x%x sense=0x%x\n", - skd_name(skdev), state, sense); + dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, + sense); skd_drive_disappeared(skdev); - skd_recover_requests(skdev, 0); - blk_start_queue(skdev->queue); + skd_recover_requests(skdev); + schedule_work(&skdev->start_queue); break; default: /* @@ -3159,92 +1882,33 @@ static void skd_isr_fwstate(struct skd_device *skdev) */ break; } - pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", - skd_name(skdev), - skd_skdev_state_to_str(prev_driver_state), prev_driver_state, - skd_skdev_state_to_str(skdev->state), skdev->state); + dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", + skd_skdev_state_to_str(prev_driver_state), prev_driver_state, + skd_skdev_state_to_str(skdev->state), skdev->state); } -static void skd_recover_requests(struct skd_device *skdev, int requeue) +static void skd_recover_request(struct request *req, void *data, bool reserved) { - int i; + struct skd_device *const skdev = data; + struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); - for (i = 0; i < skdev->num_req_context; i++) { - struct skd_request_context *skreq = &skdev->skreq_table[i]; + if (skreq->state != SKD_REQ_STATE_BUSY) + return; - if (skreq->state == SKD_REQ_STATE_BUSY) { - skd_log_skreq(skdev, skreq, "recover"); + skd_log_skreq(skdev, skreq, "recover"); - SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); - SKD_ASSERT(skreq->req != NULL); + /* Release DMA resources for the request. */ + if (skreq->n_sg > 0) + skd_postop_sg_list(skdev, skreq); - /* Release DMA resources for the request. */ - if (skreq->n_sg > 0) - skd_postop_sg_list(skdev, skreq); + skreq->state = SKD_REQ_STATE_IDLE; + skreq->status = BLK_STS_IOERR; + blk_mq_complete_request(req); +} - if (requeue && - (unsigned long) ++skreq->req->special < - SKD_MAX_RETRIES) - blk_requeue_request(skdev->queue, skreq->req); - else - skd_end_request(skdev, skreq, BLK_STS_IOERR); - - skreq->req = NULL; - - skreq->state = SKD_REQ_STATE_IDLE; - skreq->id += SKD_ID_INCR; - } - if (i > 0) - skreq[-1].next = skreq; - skreq->next = NULL; - } - skdev->skreq_free_list = skdev->skreq_table; - - for (i = 0; i < skdev->num_fitmsg_context; i++) { - struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; - - if (skmsg->state == SKD_MSG_STATE_BUSY) { - skd_log_skmsg(skdev, skmsg, "salvaged"); - SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); - skmsg->state = SKD_MSG_STATE_IDLE; - skmsg->id += SKD_ID_INCR; - } - if (i > 0) - skmsg[-1].next = skmsg; - skmsg->next = NULL; - } - skdev->skmsg_free_list = skdev->skmsg_table; - - for (i = 0; i < skdev->n_special; i++) { - struct skd_special_context *skspcl = &skdev->skspcl_table[i]; - - /* If orphaned, reclaim it because it has already been reported - * to the process as an error (it was just waiting for - * a completion that didn't come, and now it will never come) - * If busy, change to a state that will cause it to error - * out in the wait routine and let it do the normal - * reporting and reclaiming - */ - if (skspcl->req.state == SKD_REQ_STATE_BUSY) { - if (skspcl->orphaned) { - pr_debug("%s:%s:%d orphaned %p\n", - skdev->name, __func__, __LINE__, - skspcl); - skd_release_special(skdev, skspcl); - } else { - pr_debug("%s:%s:%d not orphaned %p\n", - skdev->name, __func__, __LINE__, - skspcl); - skspcl->req.state = SKD_REQ_STATE_ABORTED; - } - } - } - skdev->skspcl_free_list = skdev->skspcl_table; - - for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) - skdev->timeout_slot[i] = 0; - - skdev->in_flight = 0; +static void skd_recover_requests(struct skd_device *skdev) +{ + blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); } static void skd_isr_msg_from_dev(struct skd_device *skdev) @@ -3255,8 +1919,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev) mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); - pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", - skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); + dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, + skdev->last_mtd); /* ignore any mtd that is an ack for something we didn't send */ if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) @@ -3267,13 +1931,10 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev) skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { - pr_err("(%s): protocol mismatch\n", - skdev->name); - pr_err("(%s): got=%d support=%d\n", - skdev->name, skdev->proto_ver, - FIT_PROTOCOL_VERSION_1); - pr_err("(%s): please upgrade driver\n", - skdev->name); + dev_err(&skdev->pdev->dev, "protocol mismatch\n"); + dev_err(&skdev->pdev->dev, " got=%d support=%d\n", + skdev->proto_ver, FIT_PROTOCOL_VERSION_1); + dev_err(&skdev->pdev->dev, " please upgrade driver\n"); skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; skd_soft_reset(skdev); break; @@ -3327,9 +1988,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev) SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); skdev->last_mtd = mtd; - pr_err("(%s): Time sync driver=0x%x device=0x%x\n", - skd_name(skdev), - skdev->connect_time_stamp, skdev->drive_jiffies); + dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", + skdev->connect_time_stamp, skdev->drive_jiffies); break; case FIT_MTD_ARM_QUEUE: @@ -3351,8 +2011,7 @@ static void skd_disable_interrupts(struct skd_device *skdev) sense = SKD_READL(skdev, FIT_CONTROL); sense &= ~FIT_CR_ENABLE_INTERRUPTS; SKD_WRITEL(skdev, sense, FIT_CONTROL); - pr_debug("%s:%s:%d sense 0x%x\n", - skdev->name, __func__, __LINE__, sense); + dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); /* Note that the 1s is written. A 1-bit means * disable, a 0 means enable. @@ -3371,13 +2030,11 @@ static void skd_enable_interrupts(struct skd_device *skdev) /* Note that the compliment of mask is written. A 1-bit means * disable, a 0 means enable. */ SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); - pr_debug("%s:%s:%d interrupt mask=0x%x\n", - skdev->name, __func__, __LINE__, ~val); + dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); val = SKD_READL(skdev, FIT_CONTROL); val |= FIT_CR_ENABLE_INTERRUPTS; - pr_debug("%s:%s:%d control=0x%x\n", - skdev->name, __func__, __LINE__, val); + dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); SKD_WRITEL(skdev, val, FIT_CONTROL); } @@ -3393,8 +2050,7 @@ static void skd_soft_reset(struct skd_device *skdev) val = SKD_READL(skdev, FIT_CONTROL); val |= (FIT_CR_SOFT_RESET); - pr_debug("%s:%s:%d control=0x%x\n", - skdev->name, __func__, __LINE__, val); + dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); SKD_WRITEL(skdev, val, FIT_CONTROL); } @@ -3411,8 +2067,7 @@ static void skd_start_device(struct skd_device *skdev) sense = SKD_READL(skdev, FIT_STATUS); - pr_debug("%s:%s:%d initial status=0x%x\n", - skdev->name, __func__, __LINE__, sense); + dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); state = sense & FIT_SR_DRIVE_STATE_MASK; skdev->drive_state = state; @@ -3425,25 +2080,23 @@ static void skd_start_device(struct skd_device *skdev) switch (skdev->drive_state) { case FIT_SR_DRIVE_OFFLINE: - pr_err("(%s): Drive offline...\n", skd_name(skdev)); + dev_err(&skdev->pdev->dev, "Drive offline...\n"); break; case FIT_SR_DRIVE_FW_BOOTING: - pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", - skdev->name, __func__, __LINE__, skdev->name); + dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); skdev->state = SKD_DRVR_STATE_WAIT_BOOT; skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; break; case FIT_SR_DRIVE_BUSY_SANITIZE: - pr_info("(%s): Start: BUSY_SANITIZE\n", - skd_name(skdev)); + dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; break; case FIT_SR_DRIVE_BUSY_ERASE: - pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); + dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); skdev->state = SKD_DRVR_STATE_BUSY_ERASE; skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; break; @@ -3454,14 +2107,13 @@ static void skd_start_device(struct skd_device *skdev) break; case FIT_SR_DRIVE_BUSY: - pr_err("(%s): Drive Busy...\n", skd_name(skdev)); + dev_err(&skdev->pdev->dev, "Drive Busy...\n"); skdev->state = SKD_DRVR_STATE_BUSY; skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; break; case FIT_SR_DRIVE_SOFT_RESET: - pr_err("(%s) drive soft reset in prog\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); break; case FIT_SR_DRIVE_FAULT: @@ -3471,9 +2123,8 @@ static void skd_start_device(struct skd_device *skdev) */ skd_drive_fault(skdev); /*start the queue so we can respond with error to requests */ - pr_debug("%s:%s:%d starting %s queue\n", - skdev->name, __func__, __LINE__, skdev->name); - blk_start_queue(skdev->queue); + dev_dbg(&skdev->pdev->dev, "starting queue\n"); + schedule_work(&skdev->start_queue); skdev->gendisk_on = -1; wake_up_interruptible(&skdev->waitq); break; @@ -3483,38 +2134,33 @@ static void skd_start_device(struct skd_device *skdev) * to the BAR1 addresses. */ skd_drive_disappeared(skdev); /*start the queue so we can respond with error to requests */ - pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", - skdev->name, __func__, __LINE__, skdev->name); - blk_start_queue(skdev->queue); + dev_dbg(&skdev->pdev->dev, + "starting queue to error-out reqs\n"); + schedule_work(&skdev->start_queue); skdev->gendisk_on = -1; wake_up_interruptible(&skdev->waitq); break; default: - pr_err("(%s) Start: unknown state %x\n", - skd_name(skdev), skdev->drive_state); + dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", + skdev->drive_state); break; } state = SKD_READL(skdev, FIT_CONTROL); - pr_debug("%s:%s:%d FIT Control Status=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); state = SKD_READL(skdev, FIT_INT_STATUS_HOST); - pr_debug("%s:%s:%d Intr Status=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); state = SKD_READL(skdev, FIT_INT_MASK_HOST); - pr_debug("%s:%s:%d Intr Mask=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); - pr_debug("%s:%s:%d Msg from Dev=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); state = SKD_READL(skdev, FIT_HW_VERSION); - pr_debug("%s:%s:%d HW version=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); spin_unlock_irqrestore(&skdev->lock, flags); } @@ -3529,14 +2175,12 @@ static void skd_stop_device(struct skd_device *skdev) spin_lock_irqsave(&skdev->lock, flags); if (skdev->state != SKD_DRVR_STATE_ONLINE) { - pr_err("(%s): skd_stop_device not online no sync\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); goto stop_out; } if (skspcl->req.state != SKD_REQ_STATE_IDLE) { - pr_err("(%s): skd_stop_device no special\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "%s no special\n", __func__); goto stop_out; } @@ -3554,16 +2198,13 @@ static void skd_stop_device(struct skd_device *skdev) switch (skdev->sync_done) { case 0: - pr_err("(%s): skd_stop_device no sync\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); break; case 1: - pr_err("(%s): skd_stop_device sync done\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); break; default: - pr_err("(%s): skd_stop_device sync error\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); } stop_out: @@ -3593,8 +2234,8 @@ static void skd_stop_device(struct skd_device *skdev) } if (dev_state != FIT_SR_DRIVE_INIT) - pr_err("(%s): skd_stop_device state error 0x%02x\n", - skd_name(skdev), dev_state); + dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, + dev_state); } /* assume spinlock is held */ @@ -3607,8 +2248,7 @@ static void skd_restart_device(struct skd_device *skdev) state = SKD_READL(skdev, FIT_STATUS); - pr_debug("%s:%s:%d drive status=0x%x\n", - skdev->name, __func__, __LINE__, state); + dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); state &= FIT_SR_DRIVE_STATE_MASK; skdev->drive_state = state; @@ -3628,9 +2268,8 @@ static int skd_quiesce_dev(struct skd_device *skdev) switch (skdev->state) { case SKD_DRVR_STATE_BUSY: case SKD_DRVR_STATE_BUSY_IMMINENT: - pr_debug("%s:%s:%d stopping %s queue\n", - skdev->name, __func__, __LINE__, skdev->name); - blk_stop_queue(skdev->queue); + dev_dbg(&skdev->pdev->dev, "stopping queue\n"); + blk_mq_stop_hw_queues(skdev->queue); break; case SKD_DRVR_STATE_ONLINE: case SKD_DRVR_STATE_STOPPING: @@ -3642,8 +2281,8 @@ static int skd_quiesce_dev(struct skd_device *skdev) case SKD_DRVR_STATE_RESUMING: default: rc = -EINVAL; - pr_debug("%s:%s:%d state [%d] not implemented\n", - skdev->name, __func__, __LINE__, skdev->state); + dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", + skdev->state); } return rc; } @@ -3655,8 +2294,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev) skd_log_skdev(skdev, "unquiesce"); if (skdev->state == SKD_DRVR_STATE_ONLINE) { - pr_debug("%s:%s:%d **** device already ONLINE\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); return 0; } if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { @@ -3669,8 +2307,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev) * to become available. */ skdev->state = SKD_DRVR_STATE_BUSY; - pr_debug("%s:%s:%d drive BUSY state\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); return 0; } @@ -3689,26 +2326,24 @@ static int skd_unquiesce_dev(struct skd_device *skdev) case SKD_DRVR_STATE_IDLE: case SKD_DRVR_STATE_LOAD: skdev->state = SKD_DRVR_STATE_ONLINE; - pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", - skd_name(skdev), - skd_skdev_state_to_str(prev_driver_state), - prev_driver_state, skd_skdev_state_to_str(skdev->state), - skdev->state); - pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", - skdev->name, __func__, __LINE__); - pr_debug("%s:%s:%d starting %s queue\n", - skdev->name, __func__, __LINE__, skdev->name); - pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); - blk_start_queue(skdev->queue); + dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", + skd_skdev_state_to_str(prev_driver_state), + prev_driver_state, skd_skdev_state_to_str(skdev->state), + skdev->state); + dev_dbg(&skdev->pdev->dev, + "**** device ONLINE...starting block queue\n"); + dev_dbg(&skdev->pdev->dev, "starting queue\n"); + dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); + schedule_work(&skdev->start_queue); skdev->gendisk_on = 1; wake_up_interruptible(&skdev->waitq); break; case SKD_DRVR_STATE_DISAPPEARED: default: - pr_debug("%s:%s:%d **** driver state %d, not implemented \n", - skdev->name, __func__, __LINE__, - skdev->state); + dev_dbg(&skdev->pdev->dev, + "**** driver state %d, not implemented\n", + skdev->state); return -EBUSY; } return 0; @@ -3726,11 +2361,10 @@ static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) unsigned long flags; spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d MSIX = 0x%x\n", - skdev->name, __func__, __LINE__, - SKD_READL(skdev, FIT_INT_STATUS_HOST)); - pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), - irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); spin_unlock_irqrestore(&skdev->lock, flags); return IRQ_HANDLED; @@ -3742,9 +2376,8 @@ static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) unsigned long flags; spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d MSIX = 0x%x\n", - skdev->name, __func__, __LINE__, - SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", + SKD_READL(skdev, FIT_INT_STATUS_HOST)); SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); skd_isr_fwstate(skdev); spin_unlock_irqrestore(&skdev->lock, flags); @@ -3759,19 +2392,18 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data) int deferred; spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d MSIX = 0x%x\n", - skdev->name, __func__, __LINE__, - SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", + SKD_READL(skdev, FIT_INT_STATUS_HOST)); SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, &flush_enqueued); if (flush_enqueued) - skd_request_fn(skdev->queue); + schedule_work(&skdev->start_queue); if (deferred) schedule_work(&skdev->completion_worker); else if (!flush_enqueued) - skd_request_fn(skdev->queue); + schedule_work(&skdev->start_queue); spin_unlock_irqrestore(&skdev->lock, flags); @@ -3784,9 +2416,8 @@ static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) unsigned long flags; spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d MSIX = 0x%x\n", - skdev->name, __func__, __LINE__, - SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", + SKD_READL(skdev, FIT_INT_STATUS_HOST)); SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); skd_isr_msg_from_dev(skdev); spin_unlock_irqrestore(&skdev->lock, flags); @@ -3799,9 +2430,8 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) unsigned long flags; spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d MSIX = 0x%x\n", - skdev->name, __func__, __LINE__, - SKD_READL(skdev, FIT_INT_STATUS_HOST)); + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", + SKD_READL(skdev, FIT_INT_STATUS_HOST)); SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); spin_unlock_irqrestore(&skdev->lock, flags); return IRQ_HANDLED; @@ -3850,8 +2480,7 @@ static int skd_acquire_msix(struct skd_device *skdev) rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, PCI_IRQ_MSIX); if (rc < 0) { - pr_err("(%s): failed to enable MSI-X %d\n", - skd_name(skdev), rc); + dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); goto out; } @@ -3859,8 +2488,7 @@ static int skd_acquire_msix(struct skd_device *skdev) sizeof(struct skd_msix_entry), GFP_KERNEL); if (!skdev->msix_entries) { rc = -ENOMEM; - pr_err("(%s): msix table allocation error\n", - skd_name(skdev)); + dev_err(&skdev->pdev->dev, "msix table allocation error\n"); goto out; } @@ -3877,16 +2505,15 @@ static int skd_acquire_msix(struct skd_device *skdev) msix_entries[i].handler, 0, qentry->isr_name, skdev); if (rc) { - pr_err("(%s): Unable to register(%d) MSI-X " - "handler %d: %s\n", - skd_name(skdev), rc, i, qentry->isr_name); + dev_err(&skdev->pdev->dev, + "Unable to register(%d) MSI-X handler %d: %s\n", + rc, i, qentry->isr_name); goto msix_out; } } - pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", - skdev->name, __func__, __LINE__, - pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT); + dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", + SKD_MAX_MSIX_COUNT); return 0; msix_out: @@ -3909,8 +2536,8 @@ static int skd_acquire_irq(struct skd_device *skdev) if (!rc) return 0; - pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n", - skd_name(skdev), rc); + dev_err(&skdev->pdev->dev, + "failed to enable MSI-X, re-trying with MSI %d\n", rc); } snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, @@ -3920,8 +2547,8 @@ static int skd_acquire_irq(struct skd_device *skdev) irq_flag |= PCI_IRQ_MSI; rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); if (rc < 0) { - pr_err("(%s): failed to allocate the MSI interrupt %d\n", - skd_name(skdev), rc); + dev_err(&skdev->pdev->dev, + "failed to allocate the MSI interrupt %d\n", rc); return rc; } @@ -3930,8 +2557,8 @@ static int skd_acquire_irq(struct skd_device *skdev) skdev->isr_name, skdev); if (rc) { pci_free_irq_vectors(pdev); - pr_err("(%s): failed to allocate interrupt %d\n", - skd_name(skdev), rc); + dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", + rc); return rc; } @@ -3965,20 +2592,45 @@ static void skd_release_irq(struct skd_device *skdev) ***************************************************************************** */ +static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, + dma_addr_t *dma_handle, gfp_t gfp, + enum dma_data_direction dir) +{ + struct device *dev = &skdev->pdev->dev; + void *buf; + + buf = kmem_cache_alloc(s, gfp); + if (!buf) + return NULL; + *dma_handle = dma_map_single(dev, buf, s->size, dir); + if (dma_mapping_error(dev, *dma_handle)) { + kfree(buf); + buf = NULL; + } + return buf; +} + +static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s, + void *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + if (!vaddr) + return; + + dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir); + kmem_cache_free(s, vaddr); +} + static int skd_cons_skcomp(struct skd_device *skdev) { int rc = 0; struct fit_completion_entry_v1 *skcomp; - u32 nbytes; - nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; - nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; + dev_dbg(&skdev->pdev->dev, + "comp pci_alloc, total bytes %zd entries %d\n", + SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); - pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", - skdev->name, __func__, __LINE__, - nbytes, SKD_N_COMPLETION_ENTRY); - - skcomp = pci_zalloc_consistent(skdev->pdev, nbytes, + skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, &skdev->cq_dma_address); if (skcomp == NULL) { @@ -4000,14 +2652,14 @@ static int skd_cons_skmsg(struct skd_device *skdev) int rc = 0; u32 i; - pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", - skdev->name, __func__, __LINE__, - sizeof(struct skd_fitmsg_context), - skdev->num_fitmsg_context, - sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); + dev_dbg(&skdev->pdev->dev, + "skmsg_table kcalloc, struct %lu, count %u total %lu\n", + sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, + sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); - skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) - *skdev->num_fitmsg_context, GFP_KERNEL); + skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, + sizeof(struct skd_fitmsg_context), + GFP_KERNEL); if (skdev->skmsg_table == NULL) { rc = -ENOMEM; goto err_out; @@ -4020,9 +2672,8 @@ static int skd_cons_skmsg(struct skd_device *skdev) skmsg->id = i + SKD_ID_FIT_MSG; - skmsg->state = SKD_MSG_STATE_IDLE; skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, - SKD_N_FITMSG_BYTES + 64, + SKD_N_FITMSG_BYTES, &skmsg->mb_dma_address); if (skmsg->msg_buf == NULL) { @@ -4030,22 +2681,13 @@ static int skd_cons_skmsg(struct skd_device *skdev) goto err_out; } - skmsg->offset = (u32)((u64)skmsg->msg_buf & - (~FIT_QCMD_BASE_ADDRESS_MASK)); - skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; - skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & - FIT_QCMD_BASE_ADDRESS_MASK); - skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; - skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; + WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & + (FIT_QCMD_ALIGN - 1), + "not aligned: msg_buf %p mb_dma_address %#llx\n", + skmsg->msg_buf, skmsg->mb_dma_address); memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); - - skmsg->next = &skmsg[1]; } - /* Free list is in order starting with the 0th entry. */ - skdev->skmsg_table[i - 1].next = NULL; - skdev->skmsg_free_list = skdev->skmsg_table; - err_out: return rc; } @@ -4055,18 +2697,14 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, dma_addr_t *ret_dma_addr) { struct fit_sg_descriptor *sg_list; - u32 nbytes; - nbytes = sizeof(*sg_list) * n_sg; - - sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); + sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr, + GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); if (sg_list != NULL) { uint64_t dma_address = *ret_dma_addr; u32 i; - memset(sg_list, 0, nbytes); - for (i = 0; i < n_sg - 1; i++) { uint64_t ndp_off; ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); @@ -4079,153 +2717,63 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, return sg_list; } -static int skd_cons_skreq(struct skd_device *skdev) +static void skd_free_sg_list(struct skd_device *skdev, + struct fit_sg_descriptor *sg_list, + dma_addr_t dma_addr) { - int rc = 0; - u32 i; + if (WARN_ON_ONCE(!sg_list)) + return; - pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", - skdev->name, __func__, __LINE__, - sizeof(struct skd_request_context), - skdev->num_req_context, - sizeof(struct skd_request_context) * skdev->num_req_context); - - skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) - * skdev->num_req_context, GFP_KERNEL); - if (skdev->skreq_table == NULL) { - rc = -ENOMEM; - goto err_out; - } - - pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", - skdev->name, __func__, __LINE__, - skdev->sgs_per_request, sizeof(struct scatterlist), - skdev->sgs_per_request * sizeof(struct scatterlist)); - - for (i = 0; i < skdev->num_req_context; i++) { - struct skd_request_context *skreq; - - skreq = &skdev->skreq_table[i]; - - skreq->id = i + SKD_ID_RW_REQUEST; - skreq->state = SKD_REQ_STATE_IDLE; - - skreq->sg = kzalloc(sizeof(struct scatterlist) * - skdev->sgs_per_request, GFP_KERNEL); - if (skreq->sg == NULL) { - rc = -ENOMEM; - goto err_out; - } - sg_init_table(skreq->sg, skdev->sgs_per_request); - - skreq->sksg_list = skd_cons_sg_list(skdev, - skdev->sgs_per_request, - &skreq->sksg_dma_address); - - if (skreq->sksg_list == NULL) { - rc = -ENOMEM; - goto err_out; - } - - skreq->next = &skreq[1]; - } - - /* Free list is in order starting with the 0th entry. */ - skdev->skreq_table[i - 1].next = NULL; - skdev->skreq_free_list = skdev->skreq_table; - -err_out: - return rc; + skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr, + DMA_TO_DEVICE); } -static int skd_cons_skspcl(struct skd_device *skdev) +static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx, unsigned int numa_node) { - int rc = 0; - u32 i, nbytes; + struct skd_device *skdev = set->driver_data; + struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); - pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", - skdev->name, __func__, __LINE__, - sizeof(struct skd_special_context), - skdev->n_special, - sizeof(struct skd_special_context) * skdev->n_special); + skreq->state = SKD_REQ_STATE_IDLE; + skreq->sg = (void *)(skreq + 1); + sg_init_table(skreq->sg, skd_sgs_per_request); + skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request, + &skreq->sksg_dma_address); - skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) - * skdev->n_special, GFP_KERNEL); - if (skdev->skspcl_table == NULL) { - rc = -ENOMEM; - goto err_out; - } + return skreq->sksg_list ? 0 : -ENOMEM; +} - for (i = 0; i < skdev->n_special; i++) { - struct skd_special_context *skspcl; +static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx) +{ + struct skd_device *skdev = set->driver_data; + struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); - skspcl = &skdev->skspcl_table[i]; - - skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; - skspcl->req.state = SKD_REQ_STATE_IDLE; - - skspcl->req.next = &skspcl[1].req; - - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - - skspcl->msg_buf = - pci_zalloc_consistent(skdev->pdev, nbytes, - &skspcl->mb_dma_address); - if (skspcl->msg_buf == NULL) { - rc = -ENOMEM; - goto err_out; - } - - skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * - SKD_N_SG_PER_SPECIAL, GFP_KERNEL); - if (skspcl->req.sg == NULL) { - rc = -ENOMEM; - goto err_out; - } - - skspcl->req.sksg_list = skd_cons_sg_list(skdev, - SKD_N_SG_PER_SPECIAL, - &skspcl->req. - sksg_dma_address); - if (skspcl->req.sksg_list == NULL) { - rc = -ENOMEM; - goto err_out; - } - } - - /* Free list is in order starting with the 0th entry. */ - skdev->skspcl_table[i - 1].req.next = NULL; - skdev->skspcl_free_list = skdev->skspcl_table; - - return rc; - -err_out: - return rc; + skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address); } static int skd_cons_sksb(struct skd_device *skdev) { int rc = 0; struct skd_special_context *skspcl; - u32 nbytes; skspcl = &skdev->internal_skspcl; skspcl->req.id = 0 + SKD_ID_INTERNAL; skspcl->req.state = SKD_REQ_STATE_IDLE; - nbytes = SKD_N_INTERNAL_BYTES; - - skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, - &skspcl->db_dma_address); + skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache, + &skspcl->db_dma_address, + GFP_DMA | __GFP_ZERO, + DMA_BIDIRECTIONAL); if (skspcl->data_buf == NULL) { rc = -ENOMEM; goto err_out; } - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, - &skspcl->mb_dma_address); + skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache, + &skspcl->mb_dma_address, + GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); if (skspcl->msg_buf == NULL) { rc = -ENOMEM; goto err_out; @@ -4247,6 +2795,14 @@ static int skd_cons_sksb(struct skd_device *skdev) return rc; } +static const struct blk_mq_ops skd_mq_ops = { + .queue_rq = skd_mq_queue_rq, + .complete = skd_complete_rq, + .timeout = skd_timed_out, + .init_request = skd_init_request, + .exit_request = skd_exit_request, +}; + static int skd_cons_disk(struct skd_device *skdev) { int rc = 0; @@ -4268,31 +2824,46 @@ static int skd_cons_disk(struct skd_device *skdev) disk->fops = &skd_blockdev_ops; disk->private_data = skdev; - q = blk_init_queue(skd_request_fn, &skdev->lock); - if (!q) { - rc = -ENOMEM; + memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); + skdev->tag_set.ops = &skd_mq_ops; + skdev->tag_set.nr_hw_queues = 1; + skdev->tag_set.queue_depth = skd_max_queue_depth; + skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + + skdev->sgs_per_request * sizeof(struct scatterlist); + skdev->tag_set.numa_node = NUMA_NO_NODE; + skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_SG_MERGE | + BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); + skdev->tag_set.driver_data = skdev; + rc = blk_mq_alloc_tag_set(&skdev->tag_set); + if (rc) + goto err_out; + q = blk_mq_init_queue(&skdev->tag_set); + if (IS_ERR(q)) { + blk_mq_free_tag_set(&skdev->tag_set); + rc = PTR_ERR(q); goto err_out; } - blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); + q->queuedata = skdev; skdev->queue = q; disk->queue = q; - q->queuedata = skdev; blk_queue_write_cache(q, true, true); blk_queue_max_segments(q, skdev->sgs_per_request); blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); - /* set sysfs ptimal_io_size to 8K */ + /* set optimal I/O size to 8KB */ blk_queue_io_opt(q, 8192); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_rq_timeout(q, 8 * HZ); + spin_lock_irqsave(&skdev->lock, flags); - pr_debug("%s:%s:%d stopping %s queue\n", - skdev->name, __func__, __LINE__, skdev->name); - blk_stop_queue(skdev->queue); + dev_dbg(&skdev->pdev->dev, "stopping queue\n"); + blk_mq_stop_hw_queues(skdev->queue); spin_unlock_irqrestore(&skdev->lock, flags); err_out: @@ -4306,13 +2877,13 @@ static struct skd_device *skd_construct(struct pci_dev *pdev) { struct skd_device *skdev; int blk_major = skd_major; + size_t size; int rc; skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); if (!skdev) { - pr_err(PFX "(%s): memory alloc failure\n", - pci_name(pdev)); + dev_err(&pdev->dev, "memory alloc failure\n"); return NULL; } @@ -4320,60 +2891,71 @@ static struct skd_device *skd_construct(struct pci_dev *pdev) skdev->pdev = pdev; skdev->devno = skd_next_devno++; skdev->major = blk_major; - sprintf(skdev->name, DRV_NAME "%d", skdev->devno); skdev->dev_max_queue_depth = 0; skdev->num_req_context = skd_max_queue_depth; skdev->num_fitmsg_context = skd_max_queue_depth; - skdev->n_special = skd_max_pass_thru; skdev->cur_max_queue_depth = 1; skdev->queue_low_water_mark = 1; skdev->proto_ver = 99; skdev->sgs_per_request = skd_sgs_per_request; skdev->dbg_level = skd_dbg_level; - atomic_set(&skdev->device_count, 0); - spin_lock_init(&skdev->lock); + INIT_WORK(&skdev->start_queue, skd_start_queue); INIT_WORK(&skdev->completion_worker, skd_completion_worker); - pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); + size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES); + skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!skdev->msgbuf_cache) + goto err_out; + WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size, + "skd-msgbuf: %d < %zd\n", + kmem_cache_size(skdev->msgbuf_cache), size); + size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor); + skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!skdev->sglist_cache) + goto err_out; + WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size, + "skd-sglist: %d < %zd\n", + kmem_cache_size(skdev->sglist_cache), size); + size = SKD_N_INTERNAL_BYTES; + skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!skdev->databuf_cache) + goto err_out; + WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size, + "skd-databuf: %d < %zd\n", + kmem_cache_size(skdev->databuf_cache), size); + + dev_dbg(&skdev->pdev->dev, "skcomp\n"); rc = skd_cons_skcomp(skdev); if (rc < 0) goto err_out; - pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "skmsg\n"); rc = skd_cons_skmsg(skdev); if (rc < 0) goto err_out; - pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); - rc = skd_cons_skreq(skdev); - if (rc < 0) - goto err_out; - - pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); - rc = skd_cons_skspcl(skdev); - if (rc < 0) - goto err_out; - - pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "sksb\n"); rc = skd_cons_sksb(skdev); if (rc < 0) goto err_out; - pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "disk\n"); rc = skd_cons_disk(skdev); if (rc < 0) goto err_out; - pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "VICTORY\n"); return skdev; err_out: - pr_debug("%s:%s:%d construct failed\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "construct failed\n"); skd_destruct(skdev); return NULL; } @@ -4386,14 +2968,9 @@ static struct skd_device *skd_construct(struct pci_dev *pdev) static void skd_free_skcomp(struct skd_device *skdev) { - if (skdev->skcomp_table != NULL) { - u32 nbytes; - - nbytes = sizeof(skdev->skcomp_table[0]) * - SKD_N_COMPLETION_ENTRY; - pci_free_consistent(skdev->pdev, nbytes, + if (skdev->skcomp_table) + pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, skdev->skcomp_table, skdev->cq_dma_address); - } skdev->skcomp_table = NULL; skdev->cq_dma_address = 0; @@ -4412,8 +2989,6 @@ static void skd_free_skmsg(struct skd_device *skdev) skmsg = &skdev->skmsg_table[i]; if (skmsg->msg_buf != NULL) { - skmsg->msg_buf += skmsg->offset; - skmsg->mb_dma_address += skmsg->offset; pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, skmsg->msg_buf, skmsg->mb_dma_address); @@ -4426,109 +3001,23 @@ static void skd_free_skmsg(struct skd_device *skdev) skdev->skmsg_table = NULL; } -static void skd_free_sg_list(struct skd_device *skdev, - struct fit_sg_descriptor *sg_list, - u32 n_sg, dma_addr_t dma_addr) -{ - if (sg_list != NULL) { - u32 nbytes; - - nbytes = sizeof(*sg_list) * n_sg; - - pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); - } -} - -static void skd_free_skreq(struct skd_device *skdev) -{ - u32 i; - - if (skdev->skreq_table == NULL) - return; - - for (i = 0; i < skdev->num_req_context; i++) { - struct skd_request_context *skreq; - - skreq = &skdev->skreq_table[i]; - - skd_free_sg_list(skdev, skreq->sksg_list, - skdev->sgs_per_request, - skreq->sksg_dma_address); - - skreq->sksg_list = NULL; - skreq->sksg_dma_address = 0; - - kfree(skreq->sg); - } - - kfree(skdev->skreq_table); - skdev->skreq_table = NULL; -} - -static void skd_free_skspcl(struct skd_device *skdev) -{ - u32 i; - u32 nbytes; - - if (skdev->skspcl_table == NULL) - return; - - for (i = 0; i < skdev->n_special; i++) { - struct skd_special_context *skspcl; - - skspcl = &skdev->skspcl_table[i]; - - if (skspcl->msg_buf != NULL) { - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - pci_free_consistent(skdev->pdev, nbytes, - skspcl->msg_buf, - skspcl->mb_dma_address); - } - - skspcl->msg_buf = NULL; - skspcl->mb_dma_address = 0; - - skd_free_sg_list(skdev, skspcl->req.sksg_list, - SKD_N_SG_PER_SPECIAL, - skspcl->req.sksg_dma_address); - - skspcl->req.sksg_list = NULL; - skspcl->req.sksg_dma_address = 0; - - kfree(skspcl->req.sg); - } - - kfree(skdev->skspcl_table); - skdev->skspcl_table = NULL; -} - static void skd_free_sksb(struct skd_device *skdev) { - struct skd_special_context *skspcl; - u32 nbytes; + struct skd_special_context *skspcl = &skdev->internal_skspcl; - skspcl = &skdev->internal_skspcl; - - if (skspcl->data_buf != NULL) { - nbytes = SKD_N_INTERNAL_BYTES; - - pci_free_consistent(skdev->pdev, nbytes, - skspcl->data_buf, skspcl->db_dma_address); - } + skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf, + skspcl->db_dma_address, DMA_BIDIRECTIONAL); skspcl->data_buf = NULL; skspcl->db_dma_address = 0; - if (skspcl->msg_buf != NULL) { - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - pci_free_consistent(skdev->pdev, nbytes, - skspcl->msg_buf, skspcl->mb_dma_address); - } + skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf, + skspcl->mb_dma_address, DMA_TO_DEVICE); skspcl->msg_buf = NULL; skspcl->mb_dma_address = 0; - skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, + skd_free_sg_list(skdev, skspcl->req.sksg_list, skspcl->req.sksg_dma_address); skspcl->req.sksg_list = NULL; @@ -4539,15 +3028,20 @@ static void skd_free_disk(struct skd_device *skdev) { struct gendisk *disk = skdev->disk; - if (disk != NULL) { - struct request_queue *q = disk->queue; + if (disk && (disk->flags & GENHD_FL_UP)) + del_gendisk(disk); - if (disk->flags & GENHD_FL_UP) - del_gendisk(disk); - if (q) - blk_cleanup_queue(q); - put_disk(disk); + if (skdev->queue) { + blk_cleanup_queue(skdev->queue); + skdev->queue = NULL; + if (disk) + disk->queue = NULL; } + + if (skdev->tag_set.tags) + blk_mq_free_tag_set(&skdev->tag_set); + + put_disk(disk); skdev->disk = NULL; } @@ -4556,26 +3050,25 @@ static void skd_destruct(struct skd_device *skdev) if (skdev == NULL) return; + cancel_work_sync(&skdev->start_queue); - pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "disk\n"); skd_free_disk(skdev); - pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "sksb\n"); skd_free_sksb(skdev); - pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); - skd_free_skspcl(skdev); - - pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); - skd_free_skreq(skdev); - - pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "skmsg\n"); skd_free_skmsg(skdev); - pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "skcomp\n"); skd_free_skcomp(skdev); - pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); + kmem_cache_destroy(skdev->databuf_cache); + kmem_cache_destroy(skdev->sglist_cache); + kmem_cache_destroy(skdev->msgbuf_cache); + + dev_dbg(&skdev->pdev->dev, "skdev\n"); kfree(skdev); } @@ -4592,9 +3085,8 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) skdev = bdev->bd_disk->private_data; - pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", - skdev->name, __func__, __LINE__, - bdev->bd_disk->disk_name, current->comm); + dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", + bdev->bd_disk->disk_name, current->comm); if (skdev->read_cap_is_valid) { capacity = get_capacity(skdev->disk); @@ -4609,18 +3101,16 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) { - pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, "add_disk\n"); device_add_disk(parent, skdev->disk); return 0; } static const struct block_device_operations skd_blockdev_ops = { .owner = THIS_MODULE, - .ioctl = skd_bdev_ioctl, .getgeo = skd_bdev_getgeo, }; - /* ***************************************************************************** * PCIe DRIVER GLUE @@ -4671,10 +3161,8 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) char pci_str[32]; struct skd_device *skdev; - pr_info("STEC s1120 Driver(%s) version %s-b%s\n", - DRV_NAME, DRV_VERSION, DRV_BUILD_ID); - pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", - pci_name(pdev), pdev->vendor, pdev->device); + dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor, + pdev->device); rc = pci_enable_device(pdev); if (rc) @@ -4685,16 +3173,13 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!rc) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - - pr_err("(%s): consistent DMA mask error %d\n", - pci_name(pdev), rc); + dev_err(&pdev->dev, "consistent DMA mask error %d\n", + rc); } } else { - (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - - pr_err("(%s): DMA mask error %d\n", - pci_name(pdev), rc); + dev_err(&pdev->dev, "DMA mask error %d\n", rc); goto err_out_regions; } } @@ -4714,19 +3199,17 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } skd_pci_info(skdev, pci_str); - pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); + dev_info(&pdev->dev, "%s 64bit\n", pci_str); pci_set_master(pdev); rc = pci_enable_pcie_error_reporting(pdev); if (rc) { - pr_err( - "(%s): bad enable of PCIe error reporting rc=%d\n", - skd_name(skdev), rc); + dev_err(&pdev->dev, + "bad enable of PCIe error reporting rc=%d\n", rc); skdev->pcie_error_reporting_is_enabled = 0; } else skdev->pcie_error_reporting_is_enabled = 1; - pci_set_drvdata(pdev, skdev); for (i = 0; i < SKD_MAX_BARS; i++) { @@ -4735,21 +3218,19 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) skdev->mem_map[i] = ioremap(skdev->mem_phys[i], skdev->mem_size[i]); if (!skdev->mem_map[i]) { - pr_err("(%s): Unable to map adapter memory!\n", - skd_name(skdev)); + dev_err(&pdev->dev, + "Unable to map adapter memory!\n"); rc = -ENODEV; goto err_out_iounmap; } - pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", - skdev->name, __func__, __LINE__, - skdev->mem_map[i], - (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); + dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", + skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], + skdev->mem_size[i]); } rc = skd_acquire_irq(skdev); if (rc) { - pr_err("(%s): interrupt resource error %d\n", - skd_name(skdev), rc); + dev_err(&pdev->dev, "interrupt resource error %d\n", rc); goto err_out_iounmap; } @@ -4771,29 +3252,14 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { /* we timed out, something is wrong with the device, don't add the disk structure */ - pr_err( - "(%s): error: waiting for s1120 timed out %d!\n", - skd_name(skdev), rc); + dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n", + rc); /* in case of no error; we timeout with ENXIO */ if (!rc) rc = -ENXIO; goto err_out_timer; } - -#ifdef SKD_VMK_POLL_HANDLER - if (skdev->irq_type == SKD_IRQ_MSIX) { - /* MSIX completion handler is being used for coredump */ - vmklnx_scsi_register_poll_handler(skdev->scsi_host, - skdev->msix_entries[5].vector, - skd_comp_q, skdev); - } else { - vmklnx_scsi_register_poll_handler(skdev->scsi_host, - skdev->pdev->irq, skd_isr, - skdev); - } -#endif /* SKD_VMK_POLL_HANDLER */ - return rc; err_out_timer: @@ -4826,7 +3292,7 @@ static void skd_pci_remove(struct pci_dev *pdev) skdev = pci_get_drvdata(pdev); if (!skdev) { - pr_err("%s: no device data for PCI\n", pci_name(pdev)); + dev_err(&pdev->dev, "no device data for PCI\n"); return; } skd_stop_device(skdev); @@ -4834,7 +3300,7 @@ static void skd_pci_remove(struct pci_dev *pdev) for (i = 0; i < SKD_MAX_BARS; i++) if (skdev->mem_map[i]) - iounmap((u32 *)skdev->mem_map[i]); + iounmap(skdev->mem_map[i]); if (skdev->pcie_error_reporting_is_enabled) pci_disable_pcie_error_reporting(pdev); @@ -4855,7 +3321,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) skdev = pci_get_drvdata(pdev); if (!skdev) { - pr_err("%s: no device data for PCI\n", pci_name(pdev)); + dev_err(&pdev->dev, "no device data for PCI\n"); return -EIO; } @@ -4865,7 +3331,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) for (i = 0; i < SKD_MAX_BARS; i++) if (skdev->mem_map[i]) - iounmap((u32 *)skdev->mem_map[i]); + iounmap(skdev->mem_map[i]); if (skdev->pcie_error_reporting_is_enabled) pci_disable_pcie_error_reporting(pdev); @@ -4885,7 +3351,7 @@ static int skd_pci_resume(struct pci_dev *pdev) skdev = pci_get_drvdata(pdev); if (!skdev) { - pr_err("%s: no device data for PCI\n", pci_name(pdev)); + dev_err(&pdev->dev, "no device data for PCI\n"); return -1; } @@ -4903,15 +3369,14 @@ static int skd_pci_resume(struct pci_dev *pdev) if (!rc) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - pr_err("(%s): consistent DMA mask error %d\n", - pci_name(pdev), rc); + dev_err(&pdev->dev, "consistent DMA mask error %d\n", + rc); } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - pr_err("(%s): DMA mask error %d\n", - pci_name(pdev), rc); + dev_err(&pdev->dev, "DMA mask error %d\n", rc); goto err_out_regions; } } @@ -4919,8 +3384,8 @@ static int skd_pci_resume(struct pci_dev *pdev) pci_set_master(pdev); rc = pci_enable_pcie_error_reporting(pdev); if (rc) { - pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", - skdev->name, rc); + dev_err(&pdev->dev, + "bad enable of PCIe error reporting rc=%d\n", rc); skdev->pcie_error_reporting_is_enabled = 0; } else skdev->pcie_error_reporting_is_enabled = 1; @@ -4932,21 +3397,17 @@ static int skd_pci_resume(struct pci_dev *pdev) skdev->mem_map[i] = ioremap(skdev->mem_phys[i], skdev->mem_size[i]); if (!skdev->mem_map[i]) { - pr_err("(%s): Unable to map adapter memory!\n", - skd_name(skdev)); + dev_err(&pdev->dev, "Unable to map adapter memory!\n"); rc = -ENODEV; goto err_out_iounmap; } - pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", - skdev->name, __func__, __LINE__, - skdev->mem_map[i], - (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); + dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", + skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], + skdev->mem_size[i]); } rc = skd_acquire_irq(skdev); if (rc) { - - pr_err("(%s): interrupt resource error %d\n", - pci_name(pdev), rc); + dev_err(&pdev->dev, "interrupt resource error %d\n", rc); goto err_out_iounmap; } @@ -4984,15 +3445,15 @@ static void skd_pci_shutdown(struct pci_dev *pdev) { struct skd_device *skdev; - pr_err("skd_pci_shutdown called\n"); + dev_err(&pdev->dev, "%s called\n", __func__); skdev = pci_get_drvdata(pdev); if (!skdev) { - pr_err("%s: no device data for PCI\n", pci_name(pdev)); + dev_err(&pdev->dev, "no device data for PCI\n"); return; } - pr_err("%s: calling stop\n", skd_name(skdev)); + dev_err(&pdev->dev, "calling stop\n"); skd_stop_device(skdev); } @@ -5012,21 +3473,6 @@ static struct pci_driver skd_driver = { ***************************************************************************** */ -static const char *skd_name(struct skd_device *skdev) -{ - memset(skdev->id_str, 0, sizeof(skdev->id_str)); - - if (skdev->inquiry_is_valid) - snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", - skdev->name, skdev->inq_serial_num, - pci_name(skdev->pdev)); - else - snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", - skdev->name, pci_name(skdev->pdev)); - - return skdev->id_str; -} - const char *skd_drive_state_to_str(int state) { switch (state) { @@ -5078,8 +3524,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) return "PAUSING"; case SKD_DRVR_STATE_PAUSED: return "PAUSED"; - case SKD_DRVR_STATE_DRAINING_TIMEOUT: - return "DRAINING_TIMEOUT"; case SKD_DRVR_STATE_RESTARTING: return "RESTARTING"; case SKD_DRVR_STATE_RESUMING: @@ -5106,18 +3550,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) } } -static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) -{ - switch (state) { - case SKD_MSG_STATE_IDLE: - return "IDLE"; - case SKD_MSG_STATE_BUSY: - return "BUSY"; - default: - return "???"; - } -} - static const char *skd_skreq_state_to_str(enum skd_req_state state) { switch (state) { @@ -5131,8 +3563,6 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state) return "COMPLETED"; case SKD_REQ_STATE_TIMEOUT: return "TIMEOUT"; - case SKD_REQ_STATE_ABORTED: - return "ABORTED"; default: return "???"; } @@ -5140,58 +3570,34 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state) static void skd_log_skdev(struct skd_device *skdev, const char *event) { - pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", - skdev->name, __func__, __LINE__, skdev->name, skdev, event); - pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", - skdev->name, __func__, __LINE__, - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, - skd_skdev_state_to_str(skdev->state), skdev->state); - pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", - skdev->name, __func__, __LINE__, - skdev->in_flight, skdev->cur_max_queue_depth, - skdev->dev_max_queue_depth, skdev->queue_low_water_mark); - pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", - skdev->name, __func__, __LINE__, - skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); -} - -static void skd_log_skmsg(struct skd_device *skdev, - struct skd_fitmsg_context *skmsg, const char *event) -{ - pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", - skdev->name, __func__, __LINE__, skdev->name, skmsg, event); - pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", - skdev->name, __func__, __LINE__, - skd_skmsg_state_to_str(skmsg->state), skmsg->state, - skmsg->id, skmsg->length); + dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); + dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, + skd_skdev_state_to_str(skdev->state), skdev->state); + dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", + skd_in_flight(skdev), skdev->cur_max_queue_depth, + skdev->dev_max_queue_depth, skdev->queue_low_water_mark); + dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n", + skdev->skcomp_cycle, skdev->skcomp_ix); } static void skd_log_skreq(struct skd_device *skdev, struct skd_request_context *skreq, const char *event) { - pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", - skdev->name, __func__, __LINE__, skdev->name, skreq, event); - pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", - skdev->name, __func__, __LINE__, - skd_skreq_state_to_str(skreq->state), skreq->state, - skreq->id, skreq->fitmsg_id); - pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", - skdev->name, __func__, __LINE__, - skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); + struct request *req = blk_mq_rq_from_pdu(skreq); + u32 lba = blk_rq_pos(req); + u32 count = blk_rq_sectors(req); - if (skreq->req != NULL) { - struct request *req = skreq->req; - u32 lba = (u32)blk_rq_pos(req); - u32 count = blk_rq_sectors(req); + dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); + dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", + skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id, + skreq->fitmsg_id); + dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n", + skreq->data_dir, skreq->n_sg); - pr_debug("%s:%s:%d " - "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", - skdev->name, __func__, __LINE__, - req, lba, lba, count, count, - (int)rq_data_dir(req)); - } else - pr_debug("%s:%s:%d req=NULL\n", - skdev->name, __func__, __LINE__); + dev_dbg(&skdev->pdev->dev, + "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba, + count, count, (int)rq_data_dir(req)); } /* @@ -5202,7 +3608,14 @@ static void skd_log_skreq(struct skd_device *skdev, static int __init skd_init(void) { - pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); + BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8); + BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32); + BUILD_BUG_ON(sizeof(struct skd_command_header) != 16); + BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32); + BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44); + BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0); + BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64); + BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES); switch (skd_isr_type) { case SKD_IRQ_LEGACY: @@ -5222,7 +3635,8 @@ static int __init skd_init(void) skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; } - if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { + if (skd_max_req_per_msg < 1 || + skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) { pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; @@ -5246,19 +3660,11 @@ static int __init skd_init(void) skd_isr_comp_limit = 0; } - if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { - pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", - skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); - skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; - } - return pci_register_driver(&skd_driver); } static void __exit skd_exit(void) { - pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); - pci_unregister_driver(&skd_driver); if (skd_major) diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h index 61c757ff0161..de35f47e953c 100644 --- a/drivers/block/skd_s1120.h +++ b/drivers/block/skd_s1120.h @@ -1,19 +1,15 @@ -/* Copyright 2012 STEC, Inc. +/* + * Copyright 2012 STEC, Inc. + * Copyright (c) 2017 Western Digital Corporation or its affiliates. * - * This file is licensed under the terms of the 3-clause - * BSD License (http://opensource.org/licenses/BSD-3-Clause) - * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), - * at your option. Both licenses are also available in the LICENSE file - * distributed with this project. This file may not be copied, modified, - * or distributed except in accordance with those terms. + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2. */ #ifndef SKD_S1120_H #define SKD_S1120_H -#pragma pack(push, s1120_h, 1) - /* * Q-channel, 64-bit r/w */ @@ -30,7 +26,7 @@ #define FIT_QCMD_MSGSIZE_128 (0x1 << 4) #define FIT_QCMD_MSGSIZE_256 (0x2 << 4) #define FIT_QCMD_MSGSIZE_512 (0x3 << 4) -#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull) +#define FIT_QCMD_ALIGN L1_CACHE_BYTES /* * Control, 32-bit r/w @@ -250,7 +246,7 @@ struct fit_msg_hdr { * 20-23 of the FIT_MTD_FITFW_INIT response. */ struct fit_completion_entry_v1 { - uint32_t num_returned_bytes; + __be32 num_returned_bytes; uint16_t tag; uint8_t status; /* SCSI status */ uint8_t cycle; @@ -278,7 +274,7 @@ struct fit_comp_error_info { uint16_t sks_low; /* 10: Sense Key Specific (LSW) */ uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */ uint16_t uec; /* 14: Additional Sense Bytes */ - uint64_t per; /* 16: Additional Sense Bytes */ + uint64_t per __packed; /* 16: Additional Sense Bytes */ uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */ }; @@ -292,11 +288,11 @@ struct fit_comp_error_info { * Version one has the last 32 bits sg_list_len_bytes; */ struct skd_command_header { - uint64_t sg_list_dma_address; + __be64 sg_list_dma_address; uint16_t tag; uint8_t attribute; uint8_t add_cdb_len; /* In 32 bit words */ - uint32_t sg_list_len_bytes; + __be32 sg_list_len_bytes; }; struct skd_scsi_request { @@ -309,22 +305,20 @@ struct driver_inquiry_data { uint8_t peripheral_device_type:5; uint8_t qualifier:3; uint8_t page_code; - uint16_t page_length; - uint16_t pcie_bus_number; + __be16 page_length; + __be16 pcie_bus_number; uint8_t pcie_device_number; uint8_t pcie_function_number; uint8_t pcie_link_speed; uint8_t pcie_link_lanes; - uint16_t pcie_vendor_id; - uint16_t pcie_device_id; - uint16_t pcie_subsystem_vendor_id; - uint16_t pcie_subsystem_device_id; + __be16 pcie_vendor_id; + __be16 pcie_device_id; + __be16 pcie_subsystem_vendor_id; + __be16 pcie_subsystem_device_id; uint8_t reserved1[2]; uint8_t reserved2[3]; uint8_t driver_version_length; uint8_t driver_version[0x14]; }; -#pragma pack(pop, s1120_h) - #endif /* SKD_S1120_H */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index d3d5523862c2..34e17ee799be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -265,7 +265,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, } spin_lock_irqsave(&vblk->vqs[qid].lock, flags); - if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT) + if (blk_rq_is_scsi(req)) err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); else err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index fe7cd58c43d0..987d665e82de 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -705,9 +705,9 @@ static unsigned int xen_blkbk_unmap_prepare( GNTMAP_host_map, pages[i]->handle); pages[i]->handle = BLKBACK_INVALID_HANDLE; invcount++; - } + } - return invcount; + return invcount; } static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data) @@ -1251,6 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, break; case BLKIF_OP_WRITE_BARRIER: drain = true; + /* fall through */ case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; operation = REQ_OP_WRITE; @@ -1362,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, goto fail_put_bio; biolist[nbio++] = bio; - bio->bi_bdev = preq.bdev; + bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; @@ -1381,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, goto fail_put_bio; biolist[nbio++] = bio; - bio->bi_bdev = preq.bdev; + bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio_set_op_attrs(bio, operation, operation_flags); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 2adb8599be93..21c1be1eb226 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -816,7 +816,8 @@ static void frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through if not online */ + /* fall through */ + /* if not online */ case XenbusStateUnknown: /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ device_unregister(&dev->dev); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2468c28d4771..891265acb10e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2456,7 +2456,7 @@ static void blkback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* Missed the backend's Closing state -- fallthrough */ + /* fall through */ case XenbusStateClosing: if (info) blkfront_closing(info); diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index b8ecba6dcd3b..7cd4a8ec3c8f 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -13,3 +13,15 @@ config ZRAM disks and maybe many more. See zram.txt for more information. + +config ZRAM_WRITEBACK + bool "Write back incompressible page to backing device" + depends on ZRAM + default n + help + With incompressible page, there is no memory saving to keep it + in memory. Instead, write it out to backing device. + For this feature, admin should set up backing device via + /sys/block/zramX/backing_dev. + + See zram.txt for more infomration. diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 3b1b6340ba13..f149d3e61234 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -175,20 +175,11 @@ static inline void update_used_max(struct zram *zram, } while (old_max != cur_max); } -static inline void zram_fill_page(char *ptr, unsigned long len, +static inline void zram_fill_page(void *ptr, unsigned long len, unsigned long value) { - int i; - unsigned long *page = (unsigned long *)ptr; - WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); - - if (likely(value == 0)) { - memset(ptr, 0, len); - } else { - for (i = 0; i < len / sizeof(*page); i++) - page[i] = value; - } + memset_l(ptr, value, len / sizeof(unsigned long)); } static bool page_same_filled(void *ptr, unsigned long *element) @@ -270,6 +261,349 @@ static ssize_t mem_used_max_store(struct device *dev, return len; } +#ifdef CONFIG_ZRAM_WRITEBACK +static bool zram_wb_enabled(struct zram *zram) +{ + return zram->backing_dev; +} + +static void reset_bdev(struct zram *zram) +{ + struct block_device *bdev; + + if (!zram_wb_enabled(zram)) + return; + + bdev = zram->bdev; + if (zram->old_block_size) + set_blocksize(bdev, zram->old_block_size); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + /* hope filp_close flush all of IO */ + filp_close(zram->backing_dev, NULL); + zram->backing_dev = NULL; + zram->old_block_size = 0; + zram->bdev = NULL; + + kvfree(zram->bitmap); + zram->bitmap = NULL; +} + +static ssize_t backing_dev_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zram *zram = dev_to_zram(dev); + struct file *file = zram->backing_dev; + char *p; + ssize_t ret; + + down_read(&zram->init_lock); + if (!zram_wb_enabled(zram)) { + memcpy(buf, "none\n", 5); + up_read(&zram->init_lock); + return 5; + } + + p = file_path(file, buf, PAGE_SIZE - 1); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + goto out; + } + + ret = strlen(p); + memmove(buf, p, ret); + buf[ret++] = '\n'; +out: + up_read(&zram->init_lock); + return ret; +} + +static ssize_t backing_dev_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + char *file_name; + struct file *backing_dev = NULL; + struct inode *inode; + struct address_space *mapping; + unsigned int bitmap_sz, old_block_size = 0; + unsigned long nr_pages, *bitmap = NULL; + struct block_device *bdev = NULL; + int err; + struct zram *zram = dev_to_zram(dev); + + file_name = kmalloc(PATH_MAX, GFP_KERNEL); + if (!file_name) + return -ENOMEM; + + down_write(&zram->init_lock); + if (init_done(zram)) { + pr_info("Can't setup backing device for initialized device\n"); + err = -EBUSY; + goto out; + } + + strlcpy(file_name, buf, len); + + backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); + if (IS_ERR(backing_dev)) { + err = PTR_ERR(backing_dev); + backing_dev = NULL; + goto out; + } + + mapping = backing_dev->f_mapping; + inode = mapping->host; + + /* Support only block device in this moment */ + if (!S_ISBLK(inode->i_mode)) { + err = -ENOTBLK; + goto out; + } + + bdev = bdgrab(I_BDEV(inode)); + err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); + if (err < 0) + goto out; + + nr_pages = i_size_read(inode) >> PAGE_SHIFT; + bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); + bitmap = kvzalloc(bitmap_sz, GFP_KERNEL); + if (!bitmap) { + err = -ENOMEM; + goto out; + } + + old_block_size = block_size(bdev); + err = set_blocksize(bdev, PAGE_SIZE); + if (err) + goto out; + + reset_bdev(zram); + spin_lock_init(&zram->bitmap_lock); + + zram->old_block_size = old_block_size; + zram->bdev = bdev; + zram->backing_dev = backing_dev; + zram->bitmap = bitmap; + zram->nr_pages = nr_pages; + up_write(&zram->init_lock); + + pr_info("setup backing device %s\n", file_name); + kfree(file_name); + + return len; +out: + if (bitmap) + kvfree(bitmap); + + if (bdev) + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + + if (backing_dev) + filp_close(backing_dev, NULL); + + up_write(&zram->init_lock); + + kfree(file_name); + + return err; +} + +static unsigned long get_entry_bdev(struct zram *zram) +{ + unsigned long entry; + + spin_lock(&zram->bitmap_lock); + /* skip 0 bit to confuse zram.handle = 0 */ + entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1); + if (entry == zram->nr_pages) { + spin_unlock(&zram->bitmap_lock); + return 0; + } + + set_bit(entry, zram->bitmap); + spin_unlock(&zram->bitmap_lock); + + return entry; +} + +static void put_entry_bdev(struct zram *zram, unsigned long entry) +{ + int was_set; + + spin_lock(&zram->bitmap_lock); + was_set = test_and_clear_bit(entry, zram->bitmap); + spin_unlock(&zram->bitmap_lock); + WARN_ON_ONCE(!was_set); +} + +void zram_page_end_io(struct bio *bio) +{ + struct page *page = bio->bi_io_vec[0].bv_page; + + page_endio(page, op_is_write(bio_op(bio)), + blk_status_to_errno(bio->bi_status)); + bio_put(bio); +} + +/* + * Returns 1 if the submission is successful. + */ +static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, + unsigned long entry, struct bio *parent) +{ + struct bio *bio; + + bio = bio_alloc(GFP_ATOMIC, 1); + if (!bio) + return -ENOMEM; + + bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); + bio_set_dev(bio, zram->bdev); + if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { + bio_put(bio); + return -EIO; + } + + if (!parent) { + bio->bi_opf = REQ_OP_READ; + bio->bi_end_io = zram_page_end_io; + } else { + bio->bi_opf = parent->bi_opf; + bio_chain(bio, parent); + } + + submit_bio(bio); + return 1; +} + +struct zram_work { + struct work_struct work; + struct zram *zram; + unsigned long entry; + struct bio *bio; +}; + +#if PAGE_SIZE != 4096 +static void zram_sync_read(struct work_struct *work) +{ + struct bio_vec bvec; + struct zram_work *zw = container_of(work, struct zram_work, work); + struct zram *zram = zw->zram; + unsigned long entry = zw->entry; + struct bio *bio = zw->bio; + + read_from_bdev_async(zram, &bvec, entry, bio); +} + +/* + * Block layer want one ->make_request_fn to be active at a time + * so if we use chained IO with parent IO in same context, + * it's a deadlock. To avoid, it, it uses worker thread context. + */ +static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, + unsigned long entry, struct bio *bio) +{ + struct zram_work work; + + work.zram = zram; + work.entry = entry; + work.bio = bio; + + INIT_WORK_ONSTACK(&work.work, zram_sync_read); + queue_work(system_unbound_wq, &work.work); + flush_work(&work.work); + destroy_work_on_stack(&work.work); + + return 1; +} +#else +static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, + unsigned long entry, struct bio *bio) +{ + WARN_ON(1); + return -EIO; +} +#endif + +static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, + unsigned long entry, struct bio *parent, bool sync) +{ + if (sync) + return read_from_bdev_sync(zram, bvec, entry, parent); + else + return read_from_bdev_async(zram, bvec, entry, parent); +} + +static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, + u32 index, struct bio *parent, + unsigned long *pentry) +{ + struct bio *bio; + unsigned long entry; + + bio = bio_alloc(GFP_ATOMIC, 1); + if (!bio) + return -ENOMEM; + + entry = get_entry_bdev(zram); + if (!entry) { + bio_put(bio); + return -ENOSPC; + } + + bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); + bio_set_dev(bio, zram->bdev); + if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, + bvec->bv_offset)) { + bio_put(bio); + put_entry_bdev(zram, entry); + return -EIO; + } + + if (!parent) { + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; + bio->bi_end_io = zram_page_end_io; + } else { + bio->bi_opf = parent->bi_opf; + bio_chain(bio, parent); + } + + submit_bio(bio); + *pentry = entry; + + return 0; +} + +static void zram_wb_clear(struct zram *zram, u32 index) +{ + unsigned long entry; + + zram_clear_flag(zram, index, ZRAM_WB); + entry = zram_get_element(zram, index); + zram_set_element(zram, index, 0); + put_entry_bdev(zram, entry); +} + +#else +static bool zram_wb_enabled(struct zram *zram) { return false; } +static inline void reset_bdev(struct zram *zram) {}; +static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, + u32 index, struct bio *parent, + unsigned long *pentry) + +{ + return -EIO; +} + +static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, + unsigned long entry, struct bio *parent, bool sync) +{ + return -EIO; +} +static void zram_wb_clear(struct zram *zram, u32 index) {} +#endif + + /* * We switched to per-cpu streams and this attr is not needed anymore. * However, we will keep it around for some time, because: @@ -432,51 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index) bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); } -static bool zram_same_page_read(struct zram *zram, u32 index, - struct page *page, - unsigned int offset, unsigned int len) -{ - zram_slot_lock(zram, index); - if (unlikely(!zram_get_handle(zram, index) || - zram_test_flag(zram, index, ZRAM_SAME))) { - void *mem; - - zram_slot_unlock(zram, index); - mem = kmap_atomic(page); - zram_fill_page(mem + offset, len, - zram_get_element(zram, index)); - kunmap_atomic(mem); - return true; - } - zram_slot_unlock(zram, index); - - return false; -} - -static bool zram_same_page_write(struct zram *zram, u32 index, - struct page *page) -{ - unsigned long element; - void *mem = kmap_atomic(page); - - if (page_same_filled(mem, &element)) { - kunmap_atomic(mem); - /* Free memory associated with this sector now. */ - zram_slot_lock(zram, index); - zram_free_page(zram, index); - zram_set_flag(zram, index, ZRAM_SAME); - zram_set_element(zram, index, element); - zram_slot_unlock(zram, index); - - atomic64_inc(&zram->stats.same_pages); - atomic64_inc(&zram->stats.pages_stored); - return true; - } - kunmap_atomic(mem); - - return false; -} - static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; @@ -515,7 +804,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) */ static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram_get_handle(zram, index); + unsigned long handle; + + if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) { + zram_wb_clear(zram, index); + atomic64_dec(&zram->stats.pages_stored); + return; + } /* * No memory is allocated for same element filled pages. @@ -529,6 +824,7 @@ static void zram_free_page(struct zram *zram, size_t index) return; } + handle = zram_get_handle(zram, index); if (!handle) return; @@ -542,18 +838,45 @@ static void zram_free_page(struct zram *zram, size_t index) zram_set_obj_size(zram, index, 0); } -static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) +static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, + struct bio *bio, bool partial_io) { int ret; unsigned long handle; unsigned int size; void *src, *dst; - if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) - return 0; + if (zram_wb_enabled(zram)) { + zram_slot_lock(zram, index); + if (zram_test_flag(zram, index, ZRAM_WB)) { + struct bio_vec bvec; + + zram_slot_unlock(zram, index); + + bvec.bv_page = page; + bvec.bv_len = PAGE_SIZE; + bvec.bv_offset = 0; + return read_from_bdev(zram, &bvec, + zram_get_element(zram, index), + bio, partial_io); + } + zram_slot_unlock(zram, index); + } zram_slot_lock(zram, index); handle = zram_get_handle(zram, index); + if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { + unsigned long value; + void *mem; + + value = handle ? zram_get_element(zram, index) : 0; + mem = kmap_atomic(page); + zram_fill_page(mem, PAGE_SIZE, value); + kunmap_atomic(mem); + zram_slot_unlock(zram, index); + return 0; + } + size = zram_get_obj_size(zram, index); src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); @@ -581,7 +904,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) } static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset) + u32 index, int offset, struct bio *bio) { int ret; struct page *page; @@ -594,7 +917,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return -ENOMEM; } - ret = zram_decompress_page(zram, page, index); + ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); if (unlikely(ret)) goto out; @@ -613,30 +936,57 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return ret; } -static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm, - struct page *page, - unsigned long *out_handle, unsigned int *out_comp_len) +static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, + u32 index, struct bio *bio) { - int ret; - unsigned int comp_len; - void *src; + int ret = 0; unsigned long alloced_pages; unsigned long handle = 0; + unsigned int comp_len = 0; + void *src, *dst, *mem; + struct zcomp_strm *zstrm; + struct page *page = bvec->bv_page; + unsigned long element = 0; + enum zram_pageflags flags = 0; + bool allow_wb = true; + + mem = kmap_atomic(page); + if (page_same_filled(mem, &element)) { + kunmap_atomic(mem); + /* Free memory associated with this sector now. */ + flags = ZRAM_SAME; + atomic64_inc(&zram->stats.same_pages); + goto out; + } + kunmap_atomic(mem); compress_again: + zstrm = zcomp_stream_get(zram->comp); src = kmap_atomic(page); - ret = zcomp_compress(*zstrm, src, &comp_len); + ret = zcomp_compress(zstrm, src, &comp_len); kunmap_atomic(src); if (unlikely(ret)) { + zcomp_stream_put(zram->comp); pr_err("Compression failed! err=%d\n", ret); - if (handle) - zs_free(zram->mem_pool, handle); + zs_free(zram->mem_pool, handle); return ret; } - if (unlikely(comp_len > max_zpage_size)) + if (unlikely(comp_len > max_zpage_size)) { + if (zram_wb_enabled(zram) && allow_wb) { + zcomp_stream_put(zram->comp); + ret = write_to_bdev(zram, bvec, index, bio, &element); + if (!ret) { + flags = ZRAM_WB; + ret = 1; + goto out; + } + allow_wb = false; + goto compress_again; + } comp_len = PAGE_SIZE; + } /* * handle allocation has 2 paths: @@ -663,7 +1013,6 @@ static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm, handle = zs_malloc(zram->mem_pool, comp_len, GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); - *zstrm = zcomp_stream_get(zram->comp); if (handle) goto compress_again; return -ENOMEM; @@ -673,34 +1022,11 @@ static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm, update_used_max(zram, alloced_pages); if (zram->limit_pages && alloced_pages > zram->limit_pages) { + zcomp_stream_put(zram->comp); zs_free(zram->mem_pool, handle); return -ENOMEM; } - *out_handle = handle; - *out_comp_len = comp_len; - return 0; -} - -static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) -{ - int ret; - unsigned long handle; - unsigned int comp_len; - void *src, *dst; - struct zcomp_strm *zstrm; - struct page *page = bvec->bv_page; - - if (zram_same_page_write(zram, index, page)) - return 0; - - zstrm = zcomp_stream_get(zram->comp); - ret = zram_compress(zram, &zstrm, page, &handle, &comp_len); - if (ret) { - zcomp_stream_put(zram->comp); - return ret; - } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); src = zstrm->buffer; @@ -712,25 +1038,31 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) zcomp_stream_put(zram->comp); zs_unmap_object(zram->mem_pool, handle); - + atomic64_add(comp_len, &zram->stats.compr_data_size); +out: /* * Free memory associated with this sector * before overwriting unused sectors. */ zram_slot_lock(zram, index); zram_free_page(zram, index); - zram_set_handle(zram, index, handle); - zram_set_obj_size(zram, index, comp_len); + + if (flags) { + zram_set_flag(zram, index, flags); + zram_set_element(zram, index, element); + } else { + zram_set_handle(zram, index, handle); + zram_set_obj_size(zram, index, comp_len); + } zram_slot_unlock(zram, index); /* Update stats */ - atomic64_add(comp_len, &zram->stats.compr_data_size); atomic64_inc(&zram->stats.pages_stored); - return 0; + return ret; } static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset) + u32 index, int offset, struct bio *bio) { int ret; struct page *page = NULL; @@ -748,7 +1080,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, if (!page) return -ENOMEM; - ret = zram_decompress_page(zram, page, index); + ret = __zram_bvec_read(zram, page, index, bio, true); if (ret) goto out; @@ -763,7 +1095,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, vec.bv_offset = 0; } - ret = __zram_bvec_write(zram, &vec, index); + ret = __zram_bvec_write(zram, &vec, index, bio); out: if (is_partial_io(bvec)) __free_page(page); @@ -808,28 +1140,34 @@ static void zram_bio_discard(struct zram *zram, u32 index, } } +/* + * Returns errno if it has some problem. Otherwise return 0 or 1. + * Returns 0 if IO request was done synchronously + * Returns 1 if IO request was successfully submitted. + */ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, - int offset, bool is_write) + int offset, bool is_write, struct bio *bio) { unsigned long start_time = jiffies; int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; + struct request_queue *q = zram->disk->queue; int ret; - generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, + generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT, &zram->disk->part0); if (!is_write) { atomic64_inc(&zram->stats.num_reads); - ret = zram_bvec_read(zram, bvec, index, offset); + ret = zram_bvec_read(zram, bvec, index, offset, bio); flush_dcache_page(bvec->bv_page); } else { atomic64_inc(&zram->stats.num_writes); - ret = zram_bvec_write(zram, bvec, index, offset); + ret = zram_bvec_write(zram, bvec, index, offset, bio); } - generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); + generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time); - if (unlikely(ret)) { + if (unlikely(ret < 0)) { if (!is_write) atomic64_inc(&zram->stats.failed_reads); else @@ -868,7 +1206,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, unwritten); if (zram_bvec_rw(zram, &bv, index, offset, - op_is_write(bio_op(bio))) < 0) + op_is_write(bio_op(bio)), bio) < 0) goto out; bv.bv_offset += bv.bv_len; @@ -922,16 +1260,18 @@ static void zram_slot_free_notify(struct block_device *bdev, static int zram_rw_page(struct block_device *bdev, sector_t sector, struct page *page, bool is_write) { - int offset, err = -EIO; + int offset, ret; u32 index; struct zram *zram; struct bio_vec bv; + if (PageTransHuge(page)) + return -ENOTSUPP; zram = bdev->bd_disk->private_data; if (!valid_io_request(zram, sector, PAGE_SIZE)) { atomic64_inc(&zram->stats.invalid_io); - err = -EINVAL; + ret = -EINVAL; goto out; } @@ -942,7 +1282,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, bv.bv_len = PAGE_SIZE; bv.bv_offset = 0; - err = zram_bvec_rw(zram, &bv, index, offset, is_write); + ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL); out: /* * If I/O fails, just return error(ie, non-zero) without @@ -952,9 +1292,20 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, * bio->bi_end_io does things to handle the error * (e.g., SetPageError, set_page_dirty and extra works). */ - if (err == 0) + if (unlikely(ret < 0)) + return ret; + + switch (ret) { + case 0: page_endio(page, is_write, 0); - return err; + break; + case 1: + ret = 0; + break; + default: + WARN_ON(1); + } + return ret; } static void zram_reset_device(struct zram *zram) @@ -983,6 +1334,7 @@ static void zram_reset_device(struct zram *zram) zram_meta_free(zram, disksize); memset(&zram->stats, 0, sizeof(zram->stats)); zcomp_destroy(comp); + reset_bdev(zram); } static ssize_t disksize_store(struct device *dev, @@ -1108,6 +1460,9 @@ static DEVICE_ATTR_WO(mem_limit); static DEVICE_ATTR_WO(mem_used_max); static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); +#ifdef CONFIG_ZRAM_WRITEBACK +static DEVICE_ATTR_RW(backing_dev); +#endif static struct attribute *zram_disk_attrs[] = { &dev_attr_disksize.attr, @@ -1118,6 +1473,9 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_mem_used_max.attr, &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, +#ifdef CONFIG_ZRAM_WRITEBACK + &dev_attr_backing_dev.attr, +#endif &dev_attr_io_stat.attr, &dev_attr_mm_stat.attr, &dev_attr_debug_stat.attr, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index e34e44d02e3e..31762db861e3 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -60,9 +60,10 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /* Flags for zram pages (table[page_no].value) */ enum zram_pageflags { - /* Page consists entirely of zeros */ + /* Page consists the same element */ ZRAM_SAME = ZRAM_FLAG_SHIFT, ZRAM_ACCESS, /* page is now accessed */ + ZRAM_WB, /* page is stored on backing_device */ __NR_ZRAM_PAGEFLAGS, }; @@ -115,5 +116,13 @@ struct zram { * zram is claimed so open request will be failed */ bool claim; /* Protected by bdev->bd_mutex */ +#ifdef CONFIG_ZRAM_WRITEBACK + struct file *backing_dev; + struct block_device *bdev; + unsigned int old_block_size; + unsigned long *bitmap; + unsigned long nr_pages; + spinlock_t bitmap_lock; +#endif }; #endif diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 35952a94875e..fae5a74dc737 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA depends on BT_HCIUART_SERDEV depends on PM select BT_HCIUART_H4 + select BT_BCM help Nokia H4+ is serial protocol for communication between Bluetooth device and host. This protocol is required for Bluetooth devices @@ -167,6 +168,7 @@ config BT_HCIUART_INTEL config BT_HCIUART_BCM bool "Broadcom protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV select BT_HCIUART_H4 select BT_BCM help diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index b793853ff05f..204afe66de92 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files - * for AR3012 */ + * for AR3012 + */ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index d4b0b655dde6..b07ca9565291 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -93,6 +93,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev); /* Hardware states */ #define CARD_READY 1 +#define CARD_ACTIVITY 2 #define CARD_HAS_PCCARD_ID 4 #define CARD_HAS_POWER_LED 5 #define CARD_HAS_ACTIVITY_LED 6 @@ -160,16 +161,14 @@ static void bluecard_activity_led_timeout(u_long arg) struct bluecard_info *info = (struct bluecard_info *)arg; unsigned int iobase = info->p_dev->resource[0]->start; - if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) - return; - - if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { - /* Disable activity LED */ - outb(0x08 | 0x20, iobase + 0x30); - } else { - /* Disable power LED */ - outb(0x00, iobase + 0x30); + if (test_bit(CARD_ACTIVITY, &(info->hw_state))) { + /* leave LED in inactive state for HZ/10 for blink effect */ + clear_bit(CARD_ACTIVITY, &(info->hw_state)); + mod_timer(&(info->timer), jiffies + HZ / 10); } + + /* Disable activity LED, enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); } @@ -177,22 +176,22 @@ static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; - if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) + /* don't disturb running blink timer */ + if (timer_pending(&(info->timer))) return; + set_bit(CARD_ACTIVITY, &(info->hw_state)); + if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { - /* Enable activity LED */ - outb(0x10 | 0x40, iobase + 0x30); - - /* Stop the LED after HZ/4 */ - mod_timer(&(info->timer), jiffies + HZ / 4); + /* Enable activity LED, keep power LED enabled */ + outb(0x18 | 0x60, iobase + 0x30); } else { - /* Enable power LED */ - outb(0x08 | 0x20, iobase + 0x30); - - /* Stop the LED after HZ/2 */ - mod_timer(&(info->timer), jiffies + HZ / 2); + /* Disable power LED */ + outb(0x00, iobase + 0x30); } + + /* Stop the LED after HZ/10 */ + mod_timer(&(info->timer), jiffies + HZ / 10); } @@ -625,16 +624,13 @@ static int bluecard_hci_flush(struct hci_dev *hdev) static int bluecard_hci_open(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); - if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { - unsigned int iobase = info->p_dev->resource[0]->start; - - /* Enable LED */ - outb(0x08 | 0x20, iobase + 0x30); - } + /* Enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); return 0; } @@ -643,15 +639,15 @@ static int bluecard_hci_open(struct hci_dev *hdev) static int bluecard_hci_close(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; bluecard_hci_flush(hdev); - if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { - unsigned int iobase = info->p_dev->resource[0]->start; + /* Stop LED timer */ + del_timer_sync(&(info->timer)); - /* Disable LED */ - outb(0x00, iobase + 0x30); - } + /* Disable power LED */ + outb(0x00, iobase + 0x30); return 0; } diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 32dcac017395..194788739a83 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link) unsigned long try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 9ab6cfbb831d..cc4bdefa6648 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -287,6 +287,37 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) return skb; } +static int btbcm_read_info(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Verbose Config Version Info */ + skb = btbcm_read_verbose_config(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); + kfree_skb(skb); + + /* Read Controller Features */ + skb = btbcm_read_controller_features(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]); + kfree_skb(skb); + + /* Read Local Name */ + skb = btbcm_read_local_name(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); + kfree_skb(skb); + + return 0; +} + static const struct { u16 subver; const char *name; @@ -322,13 +353,10 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); - /* Read Verbose Config Version Info */ - skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); - kfree_skb(skb); + /* Read controller information */ + err = btbcm_read_info(hdev); + if (err) + return err; switch ((rev & 0xf000) >> 12) { case 0: @@ -431,29 +459,10 @@ int btbcm_setup_patchram(struct hci_dev *hdev) subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); - /* Read Verbose Config Version Info */ - skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); - kfree_skb(skb); - - /* Read Controller Features */ - skb = btbcm_read_controller_features(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]); - kfree_skb(skb); - - /* Read Local Name */ - skb = btbcm_read_local_name(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); - kfree_skb(skb); + /* Read controller information */ + err = btbcm_read_info(hdev); + if (err) + return err; switch ((rev & 0xf000) >> 12) { case 0: diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index eb794f08b238..03341ce98c32 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1455,7 +1455,8 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) fw_dump_ptr = fw_dump_data; /* Dump all the memory data into single file, a userspace script will - be used to split all the memory data to multiple files*/ + * be used to split all the memory data to multiple files + */ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; @@ -1482,7 +1483,8 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) } /* fw_dump_data will be free in device coredump release function - after 5 min*/ + * after 5 min + */ dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); } diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 28afd5d585f9..0bbdfcef2aa8 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) * and lower 2 bytes from patch will be used. */ *rome_version = (le32_to_cpu(ver->soc_id) << 16) | - (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + (le16_to_cpu(ver->rome_ver) & 0x0000ffff); out: kfree_skb(skb); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 8279094dd713..d9a99b4302ea 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) return ret; ret = fw->size; *buff = kmemdup(fw->data, ret, GFP_KERNEL); + if (!*buff) + ret = -ENOMEM; release_firmware(fw); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 1cb958e199eb..c8e945d19ffe 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data) if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time - * we're called we'll have more memory. */ + * we're called we'll have more memory. + */ return -ENOMEM; } diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index 7df79bb12350..310e9c2e09b6 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link) int try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, btuart_check_config, &try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fa24d693af24..7a5c06aaa181 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -66,6 +66,7 @@ static struct usb_driver btusb_driver; #define BTUSB_BCM2045 0x40000 #define BTUSB_IFNUM_2 0x80000 #define BTUSB_CW6622 0x100000 +#define BTUSB_BCM_NO_PRODID 0x200000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -131,7 +132,8 @@ static const struct usb_device_id btusb_table[] = { { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ - { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM920703 (HTC Vive) */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), @@ -169,6 +171,10 @@ static const struct usb_device_id btusb_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom devices with missing product id */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID }, + /* Intel Bluetooth USB Bootloader (RAM module) */ { USB_DEVICE(0x8087, 0x0a5a), .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, @@ -268,6 +274,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -357,6 +364,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -656,7 +664,8 @@ static void btusb_intr_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -745,7 +754,8 @@ static void btusb_bulk_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -840,7 +850,8 @@ static void btusb_isoc_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -952,7 +963,8 @@ static void btusb_diag_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -1076,6 +1088,10 @@ static int btusb_open(struct hci_dev *hdev) } data->intf->needs_remote_wakeup = 1; + /* device specific wakeup source enabled and required for USB + * remote wakeup while host is suspended + */ + device_wakeup_enable(&data->udev->dev); if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; @@ -1139,6 +1155,7 @@ static int btusb_close(struct hci_dev *hdev) goto failed; data->intf->needs_remote_wakeup = 0; + device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf); failed: @@ -2892,11 +2909,25 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info == BTUSB_IGNORE) return -ENODEV; + if (id->driver_info & BTUSB_BCM_NO_PRODID) { + struct usb_device *udev = interface_to_usbdev(intf); + + /* For the broken Broadcom devices that show 0000:0000 + * as USB vendor and product information, check that the + * manufacturer string identifies them as Broadcom based + * devices. + */ + if (!udev->manufacturer || + strcmp(udev->manufacturer, "Broadcom Corp")) + return -ENODEV; + } + if (id->driver_info & BTUSB_ATH3012) { struct usb_device *udev = interface_to_usbdev(intf); /* Old firmware would otherwise let ath3k driver load - * patch and sysconfig files */ + * patch and sysconfig files + */ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) return -ENODEV; } @@ -3067,6 +3098,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL @@ -3259,13 +3296,28 @@ static void play_deferred(struct btusb_data *data) int err; while ((urb = usb_get_from_anchor(&data->deferred))) { + usb_anchor_urb(urb, &data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + data->hdev->name, urb, -err); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&data->deferred); } static int btusb_resume(struct usb_interface *intf) diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 85a3978b064f..5ef8000f90a9 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data) complete(&lhst->wait_reg_completion); } -/* Called by Shared Transport layer when receive data is - * available */ +/* Called by Shared Transport layer when receive data is available */ static long st_receive(void *priv_data, struct sk_buff *skb) { struct ti_st *lhst = priv_data; @@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev) } /* Is ST registration callback - * called with ERROR status? */ + * called with ERROR status? + */ if (hst->reg_status != 0) { BT_ERR("ST registration completed with invalid " "status %d", hst->reg_status); @@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int bt_ti_probe(struct platform_device *pdev) { - static struct ti_st *hst; + struct ti_st *hst; struct hci_dev *hdev; int err; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 6a662d0161b4..e2540113d0da 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -34,6 +36,7 @@ #include #include #include +#include #include #include @@ -41,11 +44,15 @@ #include "btbcm.h" #include "hci_uart.h" +#define BCM_NULL_PKT 0x00 +#define BCM_NULL_SIZE 0 + #define BCM_LM_DIAG_PKT 0x07 #define BCM_LM_DIAG_SIZE 63 #define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ +/* platform device driver resources */ struct bcm_device { struct list_head list; @@ -59,6 +66,7 @@ struct bcm_device { bool clk_enabled; u32 init_speed; + u32 oper_speed; int irq; u8 irq_polarity; @@ -68,6 +76,12 @@ struct bcm_device { #endif }; +/* serdev driver resources */ +struct bcm_serdev { + struct hci_uart hu; +}; + +/* generic bcm uart resources */ struct bcm_data { struct sk_buff *rx_skb; struct sk_buff_head txq; @@ -79,6 +93,14 @@ struct bcm_data { static DEFINE_MUTEX(bcm_device_lock); static LIST_HEAD(bcm_device_list); +static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, speed); + else + hci_uart_set_baudrate(hu, speed); +} + static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; @@ -176,7 +198,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data) static int bcm_request_irq(struct bcm_data *bcm) { struct bcm_device *bdev = bcm->dev; - int err = 0; + int err; /* If this is not a platform device, do not enable PM functionalities */ mutex_lock(&bcm_device_lock); @@ -185,22 +207,24 @@ static int bcm_request_irq(struct bcm_data *bcm) goto unlock; } - if (bdev->irq > 0) { - err = devm_request_irq(&bdev->pdev->dev, bdev->irq, - bcm_host_wake, IRQF_TRIGGER_RISING, - "host_wake", bdev); - if (err) - goto unlock; - - device_init_wakeup(&bdev->pdev->dev, true); - - pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, - BCM_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&bdev->pdev->dev); - pm_runtime_set_active(&bdev->pdev->dev); - pm_runtime_enable(&bdev->pdev->dev); + if (bdev->irq <= 0) { + err = -EOPNOTSUPP; + goto unlock; } + err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, + IRQF_TRIGGER_RISING, "host_wake", bdev); + if (err) + goto unlock; + + device_init_wakeup(&bdev->pdev->dev, true); + + pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, + BCM_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&bdev->pdev->dev); + pm_runtime_set_active(&bdev->pdev->dev); + pm_runtime_enable(&bdev->pdev->dev); + unlock: mutex_unlock(&bcm_device_lock); @@ -287,6 +311,14 @@ static int bcm_open(struct hci_uart *hu) hu->priv = bcm; + /* If this is a serdev defined device, then only use + * serdev open primitive and skip the rest. + */ + if (hu->serdev) { + serdev_device_open(hu->serdev); + goto out; + } + if (!hu->tty->dev) goto out; @@ -301,6 +333,7 @@ static int bcm_open(struct hci_uart *hu) if (hu->tty->dev->parent == dev->pdev->dev.parent) { bcm->dev = dev; hu->init_speed = dev->init_speed; + hu->oper_speed = dev->oper_speed; #ifdef CONFIG_PM dev->hu = hu; #endif @@ -321,6 +354,12 @@ static int bcm_close(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + /* If this is a serdev defined device, only use serdev + * close primitive and then continue as usual. + */ + if (hu->serdev) + serdev_device_close(hu->serdev); + /* Protect bcm->dev against removal of the device or driver */ mutex_lock(&bcm_device_lock); if (bcm_device_exists(bdev)) { @@ -396,7 +435,7 @@ static int bcm_setup(struct hci_uart *hu) speed = 0; if (speed) - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); /* Operational speed if any */ if (hu->oper_speed) @@ -409,7 +448,7 @@ static int bcm_setup(struct hci_uart *hu) if (speed) { err = bcm_set_baudrate(hu, speed); if (!err) - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); } finalize: @@ -432,11 +471,19 @@ static int bcm_setup(struct hci_uart *hu) .lsize = 0, \ .maxlen = BCM_LM_DIAG_SIZE +#define BCM_RECV_NULL \ + .type = BCM_NULL_PKT, \ + .hlen = BCM_NULL_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_NULL_SIZE + static const struct h4_recv_pkt bcm_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, + { BCM_RECV_NULL, .recv = hci_recv_diag }, }; static int bcm_recv(struct hci_uart *hu, const void *data, int count) @@ -697,8 +744,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data) case ACPI_RESOURCE_TYPE_SERIAL_BUS: sb = &ares->data.uart_serial_bus; - if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) { dev->init_speed = sb->default_baud_rate; + dev->oper_speed = 4000000; + } break; default: @@ -851,7 +900,6 @@ static const struct hci_uart_proto bcm_proto = { .name = "Broadcom", .manufacturer = 15, .init_speed = 115200, - .oper_speed = 4000000, .open = bcm_open, .close = bcm_close, .flush = bcm_flush, @@ -901,9 +949,57 @@ static struct platform_driver bcm_driver = { }, }; +static int bcm_serdev_probe(struct serdev_device *serdev) +{ + struct bcm_serdev *bcmdev; + u32 speed; + int err; + + bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL); + if (!bcmdev) + return -ENOMEM; + + bcmdev->hu.serdev = serdev; + serdev_device_set_drvdata(serdev, bcmdev); + + err = device_property_read_u32(&serdev->dev, "max-speed", &speed); + if (!err) + bcmdev->hu.oper_speed = speed; + + return hci_uart_register_device(&bcmdev->hu, &bcm_proto); +} + +static void bcm_serdev_remove(struct serdev_device *serdev) +{ + struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&bcmdev->hu); +} + +#ifdef CONFIG_OF +static const struct of_device_id bcm_bluetooth_of_match[] = { + { .compatible = "brcm,bcm43438-bt" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); +#endif + +static struct serdev_device_driver bcm_serdev_driver = { + .probe = bcm_serdev_probe, + .remove = bcm_serdev_remove, + .driver = { + .name = "hci_uart_bcm", + .of_match_table = of_match_ptr(bcm_bluetooth_of_match), + }, +}; + int __init bcm_init(void) { + /* For now, we need to keep both platform device + * driver (ACPI generated) and serdev driver (DT). + */ platform_driver_register(&bcm_driver); + serdev_device_driver_register(&bcm_serdev_driver); return hci_uart_register_proto(&bcm_proto); } @@ -911,6 +1007,7 @@ int __init bcm_init(void) int __exit bcm_deinit(void) { platform_driver_unregister(&bcm_driver); + serdev_device_driver_unregister(&bcm_serdev_driver); return hci_uart_unregister_proto(&bcm_proto); } diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 4e328d7d47bb..3b82a87224a9 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const struct h4_recv_pkt *pkts, int pkts_count) { struct hci_uart *hu = hci_get_drvdata(hdev); - u8 alignment = hu->alignment; + u8 alignment = hu->alignment ? hu->alignment : 1; while (count) { int i, len; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8397b716fa65..a746627e784e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); /* Error if the tty has no write op instead of leaving an exploitable - hole */ + * hole + */ if (tty->ops->write == NULL) return -EOPNOTSUPP; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index c982943f0747..424c15aa7bb7 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev) cmd = (struct hci_command *)action_ptr; if (cmd->opcode == 0xff36) { /* ignore remote change - * baud rate HCI VS command */ + * baud rate HCI VS command + */ bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware"); break; } @@ -742,14 +743,8 @@ static int hci_ti_probe(struct serdev_device *serdev) static void hci_ti_remove(struct serdev_device *serdev) { struct ll_device *lldev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &lldev->hu; - struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - hu->proto->close(hu); + hci_uart_unregister_device(&lldev->hu); } static const struct of_device_id hci_ti_of_match[] = { diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 181a15b549e5..3539fd03f47e 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) { struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &btdev->hu; - struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - hu->proto->close(hu); - - pm_runtime_disable(&btdev->serdev->dev); + hci_uart_unregister_device(&btdev->hu); } static int nokia_bluetooth_runtime_suspend(struct device *dev) diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index aea930101dd2..b725ac4f7ff6 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -354,3 +354,16 @@ int hci_uart_register_device(struct hci_uart *hu, return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); + +void hci_uart_unregister_device(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + cancel_work_sync(&hu->write_work); + + hu->proto->close(hu); +} +EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index c6e9e1cf63f8..d9cd95d81149 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -112,6 +112,7 @@ struct hci_uart { int hci_uart_register_proto(const struct hci_uart_proto *p); int hci_uart_unregister_proto(const struct hci_uart_proto *p); int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p); +void hci_uart_unregister_device(struct hci_uart *hu); int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 2408ea38a39c..ae3d8f3444b9 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -132,7 +132,7 @@ config SIMPLE_PM_BUS config SUNXI_RSB tristate "Allwinner sunXi Reduced Serial Bus Driver" - default MACH_SUN8I || MACH_SUN9I + default MACH_SUN8I || MACH_SUN9I || ARM64 depends on ARCH_SUNXI select REGMAP help diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index c49da15d9790..3c29d36702a8 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -2124,8 +2124,8 @@ int notrace __cci_control_port_by_device(struct device_node *dn, bool enable) return -ENODEV; port = __cci_ace_get_port(dn, ACE_LITE_PORT); - if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n", - dn->full_name)) + if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n", + dn)) return -ENODEV; cci_port_control(port, enable); return 0; @@ -2200,14 +2200,14 @@ static int cci_probe_ports(struct device_node *np) if (of_property_read_string(cp, "interface-type", &match_str)) { - WARN(1, "node %s missing interface-type property\n", - cp->full_name); + WARN(1, "node %pOF missing interface-type property\n", + cp); continue; } is_ace = strcmp(match_str, "ace") == 0; if (!is_ace && strcmp(match_str, "ace-lite")) { - WARN(1, "node %s containing invalid interface-type property, skipping it\n", - cp->full_name); + WARN(1, "node %pOF containing invalid interface-type property, skipping it\n", + cp); continue; } diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 4bd361d64270..3d56ebcda720 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -156,8 +156,8 @@ static int __init weim_parse_dt(struct platform_device *pdev, ret = weim_timing_setup(child, base, devtype); if (ret) - dev_warn(&pdev->dev, "%s set timing failed.\n", - child->full_name); + dev_warn(&pdev->dev, "%pOF set timing failed.\n", + child); else have_child = 1; } @@ -166,8 +166,8 @@ static int __init weim_parse_dt(struct platform_device *pdev, ret = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev); if (ret) - dev_err(&pdev->dev, "%s fail to create devices.\n", - pdev->dev.of_node->full_name); + dev_err(&pdev->dev, "%pOF fail to create devices.\n", + pdev->dev.of_node); return ret; } diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index bf500e0e7362..77791f3dcfc6 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -70,8 +70,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev) if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) - goto err0; + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err1; + } pm_runtime_get_sync(&pdev->dev); reg = readl_relaxed(regs + OCP2SCP_TIMING); @@ -83,6 +85,9 @@ static int omap_ocp2scp_probe(struct platform_device *pdev) return 0; +err1: + pm_runtime_disable(&pdev->dev); + err0: device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 795c9d9c96a6..328ca93781cf 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -556,20 +556,20 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb) /* Runtime addresses for all slaves should be set first */ for_each_available_child_of_node(np, child) { - dev_dbg(dev, "setting child %s runtime address\n", - child->full_name); + dev_dbg(dev, "setting child %pOF runtime address\n", + child); ret = of_property_read_u32(child, "reg", &hwaddr); if (ret) { - dev_err(dev, "%s: invalid 'reg' property: %d\n", - child->full_name, ret); + dev_err(dev, "%pOF: invalid 'reg' property: %d\n", + child, ret); continue; } rtaddr = sunxi_rsb_get_rtaddr(hwaddr); if (!rtaddr) { - dev_err(dev, "%s: unknown hardware device address\n", - child->full_name); + dev_err(dev, "%pOF: unknown hardware device address\n", + child); continue; } @@ -586,15 +586,15 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb) /* send command */ ret = _sunxi_rsb_run_xfer(rsb); if (ret) - dev_warn(dev, "%s: set runtime address failed: %d\n", - child->full_name, ret); + dev_warn(dev, "%pOF: set runtime address failed: %d\n", + child, ret); } /* Then we start adding devices and probing them */ for_each_available_child_of_node(np, child) { struct sunxi_rsb_device *rdev; - dev_dbg(dev, "adding child %s\n", child->full_name); + dev_dbg(dev, "adding child %pOF\n", child); ret = of_property_read_u32(child, "reg", &hwaddr); if (ret) @@ -606,8 +606,8 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb) rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr); if (IS_ERR(rdev)) - dev_err(dev, "failed to add child device %s: %ld\n", - child->full_name, PTR_ERR(rdev)); + dev_err(dev, "failed to add child device %pOF: %ld\n", + child, PTR_ERR(rdev)); } return 0; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 1b223c32a8ae..95a031e9eced 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -13,10 +13,8 @@ menuconfig HW_RANDOM that's usually called /dev/hwrng, and which exposes one of possibly several hardware random number generators. - These hardware random number generators do not feed directly - into the kernel's random number generator. That is usually - handled by the "rngd" daemon. Documentation/hw_random.txt - has more information. + These hardware random number generators do feed into the + kernel's random number generator entropy pool. If unsure, say Y. @@ -255,6 +253,20 @@ config HW_RANDOM_MXC_RNGA If unsure, say Y. +config HW_RANDOM_IMX_RNGC + tristate "Freescale i.MX RNGC Random Number Generator" + depends on ARCH_MXC + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator Version C hardware found on some Freescale i.MX + processors. Version B is also supported by this driver. + + To compile this driver as a module, choose M here: the + module will be called imx-rngc. + + If unsure, say Y. + config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index b085975ec1d2..39a67defac67 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o +obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 503a41dfa193..9701ac7d8b47 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -28,7 +28,10 @@ #define RNG_MODULE_NAME "hw_random" static struct hwrng *current_rng; +/* the current rng has been explicitly chosen by user via sysfs */ +static int cur_rng_set_by_user; static struct task_struct *hwrng_fill; +/* list of registered rngs, sorted decending by quality */ static LIST_HEAD(rng_list); /* Protects rng_list and current_rng */ static DEFINE_MUTEX(rng_mutex); @@ -303,6 +306,7 @@ static ssize_t hwrng_attr_current_store(struct device *dev, list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { err = 0; + cur_rng_set_by_user = 1; if (rng != current_rng) err = set_current_rng(rng); break; @@ -351,16 +355,27 @@ static ssize_t hwrng_attr_available_show(struct device *dev, return strlen(buf); } +static ssize_t hwrng_attr_selected_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); +} + static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, hwrng_attr_current_show, hwrng_attr_current_store); static DEVICE_ATTR(rng_available, S_IRUGO, hwrng_attr_available_show, NULL); +static DEVICE_ATTR(rng_selected, S_IRUGO, + hwrng_attr_selected_show, + NULL); static struct attribute *rng_dev_attrs[] = { &dev_attr_rng_current.attr, &dev_attr_rng_available.attr, + &dev_attr_rng_selected.attr, NULL }; @@ -417,6 +432,7 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *old_rng, *tmp; + struct list_head *rng_list_ptr; if (!rng->name || (!rng->data_read && !rng->read)) goto out; @@ -432,14 +448,27 @@ int hwrng_register(struct hwrng *rng) init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); + /* rng_list is sorted by decreasing quality */ + list_for_each(rng_list_ptr, &rng_list) { + tmp = list_entry(rng_list_ptr, struct hwrng, list); + if (tmp->quality < rng->quality) + break; + } + list_add_tail(&rng->list, rng_list_ptr); + old_rng = current_rng; err = 0; - if (!old_rng) { + if (!old_rng || + (!cur_rng_set_by_user && rng->quality > old_rng->quality)) { + /* + * Set new rng as current as the new rng source + * provides better entropy quality and was not + * chosen by userspace. + */ err = set_current_rng(rng); if (err) goto out_unlock; } - list_add_tail(&rng->list, &rng_list); if (old_rng && !rng->init) { /* @@ -466,12 +495,13 @@ void hwrng_unregister(struct hwrng *rng) list_del(&rng->list); if (current_rng == rng) { drop_current_rng(); + cur_rng_set_by_user = 0; + /* rng_list is sorted by quality, use the best (=first) one */ if (!list_empty(&rng_list)) { - struct hwrng *tail; + struct hwrng *new_rng; - tail = list_entry(rng_list.prev, struct hwrng, list); - - set_current_rng(tail); + new_rng = list_entry(rng_list.next, struct hwrng, list); + set_current_rng(new_rng); } } diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c new file mode 100644 index 000000000000..88db42d30760 --- /dev/null +++ b/drivers/char/hw_random/imx-rngc.c @@ -0,0 +1,331 @@ +/* + * RNG driver for Freescale RNGC + * + * Copyright (C) 2008-2012 Freescale Semiconductor, Inc. + * Copyright (C) 2017 Martin Kaiser + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RNGC_COMMAND 0x0004 +#define RNGC_CONTROL 0x0008 +#define RNGC_STATUS 0x000C +#define RNGC_ERROR 0x0010 +#define RNGC_FIFO 0x0014 + +#define RNGC_CMD_CLR_ERR 0x00000020 +#define RNGC_CMD_CLR_INT 0x00000010 +#define RNGC_CMD_SEED 0x00000002 +#define RNGC_CMD_SELF_TEST 0x00000001 + +#define RNGC_CTRL_MASK_ERROR 0x00000040 +#define RNGC_CTRL_MASK_DONE 0x00000020 + +#define RNGC_STATUS_ERROR 0x00010000 +#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00 +#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8 +#define RNGC_STATUS_SEED_DONE 0x00000020 +#define RNGC_STATUS_ST_DONE 0x00000010 + +#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008 + +#define RNGC_TIMEOUT 3000 /* 3 sec */ + + +static bool self_test = true; +module_param(self_test, bool, 0); + +struct imx_rngc { + struct device *dev; + struct clk *clk; + void __iomem *base; + struct hwrng rng; + struct completion rng_op_done; + /* + * err_reg is written only by the irq handler and read only + * when interrupts are masked, we need no spinlock + */ + u32 err_reg; +}; + + +static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc) +{ + u32 ctrl, cmd; + + /* mask interrupts */ + ctrl = readl(rngc->base + RNGC_CONTROL); + ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR; + writel(ctrl, rngc->base + RNGC_CONTROL); + + /* + * CLR_INT clears the interrupt only if there's no error + * CLR_ERR clear the interrupt and the error register if there + * is an error + */ + cmd = readl(rngc->base + RNGC_COMMAND); + cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR; + writel(cmd, rngc->base + RNGC_COMMAND); +} + +static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc) +{ + u32 ctrl; + + ctrl = readl(rngc->base + RNGC_CONTROL); + ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR); + writel(ctrl, rngc->base + RNGC_CONTROL); +} + +static int imx_rngc_self_test(struct imx_rngc *rngc) +{ + u32 cmd; + int ret; + + imx_rngc_irq_unmask(rngc); + + /* run self test */ + cmd = readl(rngc->base + RNGC_COMMAND); + writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); + + ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT); + if (!ret) { + imx_rngc_irq_mask_clear(rngc); + return -ETIMEDOUT; + } + + if (rngc->err_reg != 0) + return -EIO; + + return 0; +} + +static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + unsigned int status; + unsigned int level; + int retval = 0; + + while (max >= sizeof(u32)) { + status = readl(rngc->base + RNGC_STATUS); + + /* is there some error while reading this random number? */ + if (status & RNGC_STATUS_ERROR) + break; + + /* how many random numbers are in FIFO? [0-16] */ + level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >> + RNGC_STATUS_FIFO_LEVEL_SHIFT; + + if (level) { + /* retrieve a random number from FIFO */ + *(u32 *)data = readl(rngc->base + RNGC_FIFO); + + retval += sizeof(u32); + data += sizeof(u32); + max -= sizeof(u32); + } + } + + return retval ? retval : -EIO; +} + +static irqreturn_t imx_rngc_irq(int irq, void *priv) +{ + struct imx_rngc *rngc = (struct imx_rngc *)priv; + u32 status; + + /* + * clearing the interrupt will also clear the error register + * read error and status before clearing + */ + status = readl(rngc->base + RNGC_STATUS); + rngc->err_reg = readl(rngc->base + RNGC_ERROR); + + imx_rngc_irq_mask_clear(rngc); + + if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE)) + complete(&rngc->rng_op_done); + + return IRQ_HANDLED; +} + +static int imx_rngc_init(struct hwrng *rng) +{ + struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + u32 cmd; + int ret; + + /* clear error */ + cmd = readl(rngc->base + RNGC_COMMAND); + writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND); + + /* create seed, repeat while there is some statistical error */ + do { + imx_rngc_irq_unmask(rngc); + + /* seed creation */ + cmd = readl(rngc->base + RNGC_COMMAND); + writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); + + ret = wait_for_completion_timeout(&rngc->rng_op_done, + RNGC_TIMEOUT); + + if (!ret) { + imx_rngc_irq_mask_clear(rngc); + return -ETIMEDOUT; + } + + } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); + + return rngc->err_reg ? -EIO : 0; +} + +static int imx_rngc_probe(struct platform_device *pdev) +{ + struct imx_rngc *rngc; + struct resource *res; + int ret; + int irq; + + rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL); + if (!rngc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rngc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rngc->base)) + return PTR_ERR(rngc->base); + + rngc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rngc->clk)) { + dev_err(&pdev->dev, "Can not get rng_clk\n"); + return PTR_ERR(rngc->clk); + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "Couldn't get irq %d\n", irq); + return irq; + } + + ret = clk_prepare_enable(rngc->clk); + if (ret) + return ret; + + ret = devm_request_irq(&pdev->dev, + irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); + if (ret) { + dev_err(rngc->dev, "Can't get interrupt working.\n"); + goto err; + } + + init_completion(&rngc->rng_op_done); + + rngc->rng.name = pdev->name; + rngc->rng.init = imx_rngc_init; + rngc->rng.read = imx_rngc_read; + + rngc->dev = &pdev->dev; + platform_set_drvdata(pdev, rngc); + + imx_rngc_irq_mask_clear(rngc); + + if (self_test) { + ret = imx_rngc_self_test(rngc); + if (ret) { + dev_err(rngc->dev, "FSL RNGC self test failed.\n"); + goto err; + } + } + + ret = hwrng_register(&rngc->rng); + if (ret) { + dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret); + goto err; + } + + dev_info(&pdev->dev, "Freescale RNGC registered.\n"); + return 0; + +err: + clk_disable_unprepare(rngc->clk); + + return ret; +} + +static int __exit imx_rngc_remove(struct platform_device *pdev) +{ + struct imx_rngc *rngc = platform_get_drvdata(pdev); + + hwrng_unregister(&rngc->rng); + + clk_disable_unprepare(rngc->clk); + + return 0; +} + +#ifdef CONFIG_PM +static int imx_rngc_suspend(struct device *dev) +{ + struct imx_rngc *rngc = dev_get_drvdata(dev); + + clk_disable_unprepare(rngc->clk); + + return 0; +} + +static int imx_rngc_resume(struct device *dev) +{ + struct imx_rngc *rngc = dev_get_drvdata(dev); + + clk_prepare_enable(rngc->clk); + + return 0; +} + +static const struct dev_pm_ops imx_rngc_pm_ops = { + .suspend = imx_rngc_suspend, + .resume = imx_rngc_resume, +}; +#endif + +static const struct of_device_id imx_rngc_dt_ids[] = { + { .compatible = "fsl,imx25-rngb", .data = NULL, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); + +static struct platform_driver imx_rngc_driver = { + .driver = { + .name = "imx_rngc", +#ifdef CONFIG_PM + .pm = &imx_rngc_pm_ops, +#endif + .of_match_table = imx_rngc_dt_ids, + }, + .remove = __exit_p(imx_rngc_remove), +}; + +module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("H/W RNGC driver for i.MX"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index f4f866ee54bc..d3a979e25724 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = { static struct platform_device *sonypi_platform_device; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index fe597e6c55c4..1d6729be4cd6 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, goto out; } - msleep(TPM_TIMEOUT); /* CHECK */ + tpm_msleep(TPM_TIMEOUT); rmb(); } while (time_before(jiffies, stop)); @@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip) dev_info( &chip->dev, HW_ERR "TPM command timed out during continue self test"); - msleep(delay_msec); + tpm_msleep(delay_msec); continue; } @@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip) } if (rc != TPM_WARN_DOING_SELFTEST) return rc; - msleep(delay_msec); + tpm_msleep(delay_msec); } while (--loops > 0); return rc; @@ -1085,7 +1085,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, } } else { do { - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; @@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev) */ if (rc != TPM_WARN_RETRY) break; - msleep(TPM_TIMEOUT_RETRY); + tpm_msleep(TPM_TIMEOUT_RETRY); } if (rc) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 04fbff2edbf3..2d5466a72e40 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -50,7 +50,8 @@ enum tpm_const { enum tpm_timeout { TPM_TIMEOUT = 5, /* msecs */ - TPM_TIMEOUT_RETRY = 100 /* msecs */ + TPM_TIMEOUT_RETRY = 100, /* msecs */ + TPM_TIMEOUT_RANGE_US = 300 /* usecs */ }; /* TPM addresses */ @@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev); int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel); +static inline void tpm_msleep(unsigned int delay_msec) +{ + usleep_range(delay_msec * 1000, + (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US); +}; + struct tpm_chip *tpm_chip_find_get(int chip_num); __must_check int tpm_try_get_ops(struct tpm_chip *chip); void tpm_put_ops(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index f7f34b2aa981..e1a41b788f08 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) if (rc != TPM2_RC_TESTING) break; - msleep(delay_msec); + tpm_msleep(delay_msec); } return rc; diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a4ac63a21d8a..8f0a98dea327 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = { SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) }; -static struct acpi_device_id crb_device_ids[] = { +static const struct acpi_device_id crb_device_ids[] = { {"MSFT0101", 0}, {"", 0}, }; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index f01d083eced2..25f6e2665385 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -32,26 +32,70 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; -static struct vio_device_id tpm_ibmvtpm_device_table[] = { +static const struct vio_device_id tpm_ibmvtpm_device_table[] = { { "IBM,vtpm", "IBM,vtpm"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); /** + * + * ibmvtpm_send_crq_word - Send a CRQ request + * @vdev: vio device struct + * @w1: pre-constructed first word of tpm crq (second word is reserved) + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1) +{ + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0); +} + +/** + * * ibmvtpm_send_crq - Send a CRQ request * * @vdev: vio device struct - * @w1: first word - * @w2: second word + * @valid: Valid field + * @msg: Type field + * @len: Length field + * @data: Data field + * + * The ibmvtpm crq is defined as follows: + * + * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * ----------------------------------------------------------------------- + * Word0 | Valid | Type | Length | Data + * ----------------------------------------------------------------------- + * Word1 | Reserved + * ----------------------------------------------------------------------- + * + * Which matches the following structure (on bigendian host): + * + * struct ibmvtpm_crq { + * u8 valid; + * u8 msg; + * __be16 len; + * __be32 data; + * __be64 reserved; + * } __attribute__((packed, aligned(8))); + * + * However, the value is passed in a register so just compute the numeric value + * to load into the register avoiding byteswap altogether. Endian only affects + * memory loads and stores - registers are internally represented the same. * * Return: - * 0 -Sucess + * 0 (H_SUCCESS) - Success * Non-zero - Failure */ -static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) +static int ibmvtpm_send_crq(struct vio_dev *vdev, + u8 valid, u8 msg, u16 len, u32 data) { - return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); + u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) | + (u64)data; + return ibmvtpm_send_crq_word(vdev, w1); } /** @@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - __be64 *word = (__be64 *)&crq; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) spin_lock(&ibmvtpm->rtce_lock); ibmvtpm->res_len = 0; memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = cpu_to_be16(count); - crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); /* * set the processing flag before the Hcall, since we may get the @@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), - be64_to_cpu(word[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, + count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; @@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) */ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); @@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) */ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_VERSION; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_version failed rc=%d\n", rc); @@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); @@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init failed rc=%d\n", rc); @@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc = 0; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "tpm_ibmvtpm_suspend failed rc=%d\n", rc); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 3b1b9f9322d5..d8f10047fbba 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; - msleep(TPM_MSLEEP_TIME); + tpm_msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) @@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip) wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) @@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip) wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b617b2eeb080..63bc6c3b949e 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l) if (access & TPM_ACCESS_VALID) return 0; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -1; } @@ -117,7 +117,7 @@ static int request_locality(struct tpm_chip *chip, int l) do { if (check_locality(chip, l)) return l; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } return -1; @@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip) burstcnt = (value >> 8) & 0xFFFF; if (burstcnt) return burstcnt; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -EBUSY; } @@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) priv->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; if (!priv->irq_tested) - msleep(1); + tpm_msleep(1); if (!priv->irq_tested) disable_interrupts(chip); priv->irq_tested = true; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 23f33f95d4a6..d1aed2513bd9 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -451,9 +451,6 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, * device is created by remoteproc, the DMA memory is * associated with the grandparent device: * vdev => rproc => platform-dev. - * The code here would have been less quirky if - * DMA_MEMORY_INCLUDES_CHILDREN had been supported - * in dma-coherent.c */ if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) goto free_buf; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 68ca2d9fcd73..1c4e1aa6767e 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -31,6 +31,13 @@ config COMMON_CLK_WM831X source "drivers/clk/versatile/Kconfig" +config CLK_HSDK + bool "PLL Driver for HSDK platform" + depends on OF || COMPILE_TEST + ---help--- + This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs + control. + config COMMON_CLK_MAX77686 tristate "Clock driver for Maxim 77620/77686/77802 MFD" depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST @@ -39,10 +46,10 @@ config COMMON_CLK_MAX77686 clock. config COMMON_CLK_RK808 - tristate "Clock driver for RK808/RK818" + tristate "Clock driver for RK805/RK808/RK818" depends on MFD_RK808 ---help--- - This driver supports RK808 and RK818 crystal oscillator clock. These + This driver supports RK805, RK808 and RK818 crystal oscillator clock. These multi-function devices have two fixed-rate oscillators, clocked at 32KHz each. Clkout1 is always on, Clkout2 can off by control register. @@ -210,14 +217,14 @@ config COMMON_CLK_OXNAS Support for the OXNAS SoC Family clocks. config COMMON_CLK_VC5 - tristate "Clock driver for IDT VersaClock5 devices" + tristate "Clock driver for IDT VersaClock 5,6 devices" depends on I2C depends on OF select REGMAP_I2C help ---help--- - This driver supports the IDT VersaClock5 programmable clock - generator. + This driver supports the IDT VersaClock 5 and VersaClock 6 + programmable clock generators. source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index cd376b3fb47a..c99f363826f0 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -27,8 +27,8 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o +obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o -obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o @@ -44,6 +44,7 @@ obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o +obj-$(CONFIG_ARCH_STM32) += clk-stm32h7.o obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o obj-$(CONFIG_ARCH_U300) += clk-u300.o diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 13e67bd35cff..c68947b65a4c 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -6,6 +6,7 @@ obj-y += pmc.o sckc.o obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o obj-y += clk-system.o clk-peripheral.o clk-programmable.o +obj-$(CONFIG_HAVE_AT91_AUDIO_PLL) += clk-audio-pll.o obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c new file mode 100644 index 000000000000..da7bafcfbe70 --- /dev/null +++ b/drivers/clk/at91/clk-audio-pll.c @@ -0,0 +1,536 @@ +/* + * Copyright (C) 2016 Atmel Corporation, + * Songjun Wu , + * Nicolas Ferre + * Copyright (C) 2017 Free Electrons, + * Quentin Schulz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The Sama5d2 SoC has two audio PLLs (PMC and PAD) that shares the same parent + * (FRAC). FRAC can output between 620 and 700MHz and only multiply the rate of + * its own parent. PMC and PAD can then divide the FRAC rate to best match the + * asked rate. + * + * Traits of FRAC clock: + * enable - clk_enable writes nd, fracr parameters and enables PLL + * rate - rate is adjustable. + * clk->rate = parent->rate * ((nd + 1) + (fracr / 2^22)) + * parent - fixed parent. No clk_set_parent support + * + * Traits of PMC clock: + * enable - clk_enable writes qdpmc, and enables PMC output + * rate - rate is adjustable. + * clk->rate = parent->rate / (qdpmc + 1) + * parent - fixed parent. No clk_set_parent support + * + * Traits of PAD clock: + * enable - clk_enable writes divisors and enables PAD output + * rate - rate is adjustable. + * clk->rate = parent->rate / (qdaudio * div)) + * parent - fixed parent. No clk_set_parent support + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define AUDIO_PLL_DIV_FRAC BIT(22) +#define AUDIO_PLL_ND_MAX (AT91_PMC_AUDIO_PLL_ND_MASK >> \ + AT91_PMC_AUDIO_PLL_ND_OFFSET) + +#define AUDIO_PLL_QDPAD(qd, div) ((AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(qd) & \ + AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK) | \ + (AT91_PMC_AUDIO_PLL_QDPAD_DIV(div) & \ + AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK)) + +#define AUDIO_PLL_QDPMC_MAX (AT91_PMC_AUDIO_PLL_QDPMC_MASK >> \ + AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) + +#define AUDIO_PLL_FOUT_MIN 620000000UL +#define AUDIO_PLL_FOUT_MAX 700000000UL + +struct clk_audio_frac { + struct clk_hw hw; + struct regmap *regmap; + u32 fracr; + u8 nd; +}; + +struct clk_audio_pad { + struct clk_hw hw; + struct regmap *regmap; + u8 qdaudio; + u8 div; +}; + +struct clk_audio_pmc { + struct clk_hw hw; + struct regmap *regmap; + u8 qdpmc; +}; + +#define to_clk_audio_frac(hw) container_of(hw, struct clk_audio_frac, hw) +#define to_clk_audio_pad(hw) container_of(hw, struct clk_audio_pad, hw) +#define to_clk_audio_pmc(hw) container_of(hw, struct clk_audio_pmc, hw) + +static int clk_audio_pll_frac_enable(struct clk_hw *hw) +{ + struct clk_audio_frac *frac = to_clk_audio_frac(hw); + + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_RESETN, 0); + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_RESETN, + AT91_PMC_AUDIO_PLL_RESETN); + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL1, + AT91_PMC_AUDIO_PLL_FRACR_MASK, frac->fracr); + + /* + * reset and enable have to be done in 2 separated writes + * for AT91_PMC_AUDIO_PLL0 + */ + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PLLEN | + AT91_PMC_AUDIO_PLL_ND_MASK, + AT91_PMC_AUDIO_PLL_PLLEN | + AT91_PMC_AUDIO_PLL_ND(frac->nd)); + + return 0; +} + +static int clk_audio_pll_pad_enable(struct clk_hw *hw) +{ + struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw); + + regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL1, + AT91_PMC_AUDIO_PLL_QDPAD_MASK, + AUDIO_PLL_QDPAD(apad_ck->qdaudio, apad_ck->div)); + regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PADEN, AT91_PMC_AUDIO_PLL_PADEN); + + return 0; +} + +static int clk_audio_pll_pmc_enable(struct clk_hw *hw) +{ + struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw); + + regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PMCEN | + AT91_PMC_AUDIO_PLL_QDPMC_MASK, + AT91_PMC_AUDIO_PLL_PMCEN | + AT91_PMC_AUDIO_PLL_QDPMC(apmc_ck->qdpmc)); + return 0; +} + +static void clk_audio_pll_frac_disable(struct clk_hw *hw) +{ + struct clk_audio_frac *frac = to_clk_audio_frac(hw); + + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PLLEN, 0); + /* do it in 2 separated writes */ + regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_RESETN, 0); +} + +static void clk_audio_pll_pad_disable(struct clk_hw *hw) +{ + struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw); + + regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PADEN, 0); +} + +static void clk_audio_pll_pmc_disable(struct clk_hw *hw) +{ + struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw); + + regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0, + AT91_PMC_AUDIO_PLL_PMCEN, 0); +} + +static unsigned long clk_audio_pll_fout(unsigned long parent_rate, + unsigned long nd, unsigned long fracr) +{ + unsigned long long fr = (unsigned long long)parent_rate * fracr; + + pr_debug("A PLL: %s, fr = %llu\n", __func__, fr); + + fr = DIV_ROUND_CLOSEST_ULL(fr, AUDIO_PLL_DIV_FRAC); + + pr_debug("A PLL: %s, fr = %llu\n", __func__, fr); + + return parent_rate * (nd + 1) + fr; +} + +static unsigned long clk_audio_pll_frac_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_audio_frac *frac = to_clk_audio_frac(hw); + unsigned long fout; + + fout = clk_audio_pll_fout(parent_rate, frac->nd, frac->fracr); + + pr_debug("A PLL: %s, fout = %lu (nd = %u, fracr = %lu)\n", __func__, + fout, frac->nd, (unsigned long)frac->fracr); + + return fout; +} + +static unsigned long clk_audio_pll_pad_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw); + unsigned long apad_rate = 0; + + if (apad_ck->qdaudio && apad_ck->div) + apad_rate = parent_rate / (apad_ck->qdaudio * apad_ck->div); + + pr_debug("A PLL/PAD: %s, apad_rate = %lu (div = %u, qdaudio = %u)\n", + __func__, apad_rate, apad_ck->div, apad_ck->qdaudio); + + return apad_rate; +} + +static unsigned long clk_audio_pll_pmc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw); + unsigned long apmc_rate = 0; + + apmc_rate = parent_rate / (apmc_ck->qdpmc + 1); + + pr_debug("A PLL/PMC: %s, apmc_rate = %lu (qdpmc = %u)\n", __func__, + apmc_rate, apmc_ck->qdpmc); + + return apmc_rate; +} + +static int clk_audio_pll_frac_compute_frac(unsigned long rate, + unsigned long parent_rate, + unsigned long *nd, + unsigned long *fracr) +{ + unsigned long long tmp, rem; + + if (!rate) + return -EINVAL; + + tmp = rate; + rem = do_div(tmp, parent_rate); + if (!tmp || tmp >= AUDIO_PLL_ND_MAX) + return -EINVAL; + + *nd = tmp - 1; + + tmp = rem * AUDIO_PLL_DIV_FRAC; + tmp = DIV_ROUND_CLOSEST_ULL(tmp, parent_rate); + if (tmp > AT91_PMC_AUDIO_PLL_FRACR_MASK) + return -EINVAL; + + /* we can cast here as we verified the bounds just above */ + *fracr = (unsigned long)tmp; + + return 0; +} + +static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + unsigned long fracr, nd; + int ret; + + pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__, + req->rate, req->best_parent_rate); + + req->rate = clamp(req->rate, AUDIO_PLL_FOUT_MIN, AUDIO_PLL_FOUT_MAX); + + req->min_rate = max(req->min_rate, AUDIO_PLL_FOUT_MIN); + req->max_rate = min(req->max_rate, AUDIO_PLL_FOUT_MAX); + + ret = clk_audio_pll_frac_compute_frac(req->rate, req->best_parent_rate, + &nd, &fracr); + if (ret) + return ret; + + req->rate = clk_audio_pll_fout(req->best_parent_rate, nd, fracr); + + req->best_parent_hw = clk_hw_get_parent(hw); + + pr_debug("A PLL: %s, best_rate = %lu (nd = %lu, fracr = %lu)\n", + __func__, req->rate, nd, fracr); + + return 0; +} + +static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_hw *pclk = clk_hw_get_parent(hw); + long best_rate = -EINVAL; + unsigned long best_parent_rate; + unsigned long tmp_qd; + u32 div; + long tmp_rate; + int tmp_diff; + int best_diff = -1; + + pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__, + rate, *parent_rate); + + /* + * Rate divisor is actually made of two different divisors, multiplied + * between themselves before dividing the rate. + * tmp_qd goes from 1 to 31 and div is either 2 or 3. + * In order to avoid testing twice the rate divisor (e.g. divisor 12 can + * be found with (tmp_qd, div) = (2, 6) or (3, 4)), we remove any loop + * for a rate divisor when div is 2 and tmp_qd is a multiple of 3. + * We cannot inverse it (condition div is 3 and tmp_qd is even) or we + * would miss some rate divisor that aren't reachable with div being 2 + * (e.g. rate divisor 90 is made with div = 3 and tmp_qd = 30, thus + * tmp_qd is even so we skip it because we think div 2 could make this + * rate divisor which isn't possible since tmp_qd has to be <= 31). + */ + for (tmp_qd = 1; tmp_qd < AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX; tmp_qd++) + for (div = 2; div <= 3; div++) { + if (div == 2 && tmp_qd % 3 == 0) + continue; + + best_parent_rate = clk_hw_round_rate(pclk, + rate * tmp_qd * div); + tmp_rate = best_parent_rate / (div * tmp_qd); + tmp_diff = abs(rate - tmp_rate); + + if (best_diff < 0 || best_diff > tmp_diff) { + *parent_rate = best_parent_rate; + best_rate = tmp_rate; + best_diff = tmp_diff; + } + } + + pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n", + __func__, best_rate, best_parent_rate); + + return best_rate; +} + +static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_hw *pclk = clk_hw_get_parent(hw); + long best_rate = -EINVAL; + unsigned long best_parent_rate = 0; + u32 tmp_qd = 0, div; + long tmp_rate; + int tmp_diff; + int best_diff = -1; + + pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__, + rate, *parent_rate); + + for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) { + best_parent_rate = clk_round_rate(pclk->clk, rate * div); + tmp_rate = best_parent_rate / div; + tmp_diff = abs(rate - tmp_rate); + + if (best_diff < 0 || best_diff > tmp_diff) { + *parent_rate = best_parent_rate; + best_rate = tmp_rate; + best_diff = tmp_diff; + tmp_qd = div; + } + } + + pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n", + __func__, best_rate, *parent_rate, tmp_qd - 1); + + return best_rate; +} + +static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_audio_frac *frac = to_clk_audio_frac(hw); + unsigned long fracr, nd; + int ret; + + pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__, rate, + parent_rate); + + if (rate < AUDIO_PLL_FOUT_MIN || rate > AUDIO_PLL_FOUT_MAX) + return -EINVAL; + + ret = clk_audio_pll_frac_compute_frac(rate, parent_rate, &nd, &fracr); + if (ret) + return ret; + + frac->nd = nd; + frac->fracr = fracr; + + return 0; +} + +static int clk_audio_pll_pad_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw); + u8 tmp_div; + + pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__, + rate, parent_rate); + + if (!rate) + return -EINVAL; + + tmp_div = parent_rate / rate; + if (tmp_div % 3 == 0) { + apad_ck->qdaudio = tmp_div / 3; + apad_ck->div = 3; + } else { + apad_ck->qdaudio = tmp_div / 2; + apad_ck->div = 2; + } + + return 0; +} + +static int clk_audio_pll_pmc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw); + + if (!rate) + return -EINVAL; + + pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__, + rate, parent_rate); + + apmc_ck->qdpmc = parent_rate / rate - 1; + + return 0; +} + +static const struct clk_ops audio_pll_frac_ops = { + .enable = clk_audio_pll_frac_enable, + .disable = clk_audio_pll_frac_disable, + .recalc_rate = clk_audio_pll_frac_recalc_rate, + .determine_rate = clk_audio_pll_frac_determine_rate, + .set_rate = clk_audio_pll_frac_set_rate, +}; + +static const struct clk_ops audio_pll_pad_ops = { + .enable = clk_audio_pll_pad_enable, + .disable = clk_audio_pll_pad_disable, + .recalc_rate = clk_audio_pll_pad_recalc_rate, + .round_rate = clk_audio_pll_pad_round_rate, + .set_rate = clk_audio_pll_pad_set_rate, +}; + +static const struct clk_ops audio_pll_pmc_ops = { + .enable = clk_audio_pll_pmc_enable, + .disable = clk_audio_pll_pmc_disable, + .recalc_rate = clk_audio_pll_pmc_recalc_rate, + .round_rate = clk_audio_pll_pmc_round_rate, + .set_rate = clk_audio_pll_pmc_set_rate, +}; + +static int of_sama5d2_clk_audio_pll_setup(struct device_node *np, + struct clk_init_data *init, + struct clk_hw *hw, + struct regmap **clk_audio_regmap) +{ + struct regmap *regmap; + const char *parent_names[1]; + int ret; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + init->name = np->name; + of_clk_parent_fill(np, parent_names, 1); + init->parent_names = parent_names; + init->num_parents = 1; + + hw->init = init; + *clk_audio_regmap = regmap; + + ret = clk_hw_register(NULL, hw); + if (ret) + return ret; + + return of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} + +static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np) +{ + struct clk_audio_frac *frac_ck; + struct clk_init_data init = {}; + + frac_ck = kzalloc(sizeof(*frac_ck), GFP_KERNEL); + if (!frac_ck) + return; + + init.ops = &audio_pll_frac_ops; + init.flags = CLK_SET_RATE_GATE; + + if (of_sama5d2_clk_audio_pll_setup(np, &init, &frac_ck->hw, + &frac_ck->regmap)) + kfree(frac_ck); +} + +static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np) +{ + struct clk_audio_pad *apad_ck; + struct clk_init_data init = {}; + + apad_ck = kzalloc(sizeof(*apad_ck), GFP_KERNEL); + if (!apad_ck) + return; + + init.ops = &audio_pll_pad_ops; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; + + if (of_sama5d2_clk_audio_pll_setup(np, &init, &apad_ck->hw, + &apad_ck->regmap)) + kfree(apad_ck); +} + +static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np) +{ + struct clk_audio_pad *apmc_ck; + struct clk_init_data init = {}; + + apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL); + if (!apmc_ck) + return; + + init.ops = &audio_pll_pmc_ops; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; + + if (of_sama5d2_clk_audio_pll_setup(np, &init, &apmc_ck->hw, + &apmc_ck->regmap)) + kfree(apmc_ck); +} + +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup, + "atmel,sama5d2-clk-audio-pll-frac", + of_sama5d2_clk_audio_pll_frac_setup); +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup, + "atmel,sama5d2-clk-audio-pll-pad", + of_sama5d2_clk_audio_pll_pad_setup); +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup, + "atmel,sama5d2-clk-audio-pll-pmc", + of_sama5d2_clk_audio_pll_pmc_setup); diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index f0b7ae904ce2..33481368740e 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -26,6 +26,13 @@ #define GENERATED_SOURCE_MAX 6 #define GENERATED_MAX_DIV 255 +#define GCK_ID_SSC0 43 +#define GCK_ID_SSC1 44 +#define GCK_ID_I2S0 54 +#define GCK_ID_I2S1 55 +#define GCK_ID_CLASSD 59 +#define GCK_INDEX_DT_AUDIO_PLL 5 + struct clk_generated { struct clk_hw hw; struct regmap *regmap; @@ -34,6 +41,7 @@ struct clk_generated { u32 id; u32 gckdiv; u8 parent_id; + bool audio_pll_allowed; }; #define to_clk_generated(hw) \ @@ -99,21 +107,41 @@ clk_generated_recalc_rate(struct clk_hw *hw, return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1); } +static void clk_generated_best_diff(struct clk_rate_request *req, + struct clk_hw *parent, + unsigned long parent_rate, u32 div, + int *best_diff, long *best_rate) +{ + unsigned long tmp_rate; + int tmp_diff; + + if (!div) + tmp_rate = parent_rate; + else + tmp_rate = parent_rate / div; + tmp_diff = abs(req->rate - tmp_rate); + + if (*best_diff < 0 || *best_diff > tmp_diff) { + *best_rate = tmp_rate; + *best_diff = tmp_diff; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } +} + static int clk_generated_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct clk_generated *gck = to_clk_generated(hw); struct clk_hw *parent = NULL; + struct clk_rate_request req_parent = *req; long best_rate = -EINVAL; - unsigned long tmp_rate, min_rate; + unsigned long min_rate, parent_rate; int best_diff = -1; - int tmp_diff; int i; + u32 div; - for (i = 0; i < clk_hw_get_num_parents(hw); i++) { - u32 div; - unsigned long parent_rate; - + for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) { parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; @@ -124,25 +152,43 @@ static int clk_generated_determine_rate(struct clk_hw *hw, (gck->range.max && min_rate > gck->range.max)) continue; - for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { - tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div); - tmp_diff = abs(req->rate - tmp_rate); + div = DIV_ROUND_CLOSEST(parent_rate, req->rate); - if (best_diff < 0 || best_diff > tmp_diff) { - best_rate = tmp_rate; - best_diff = tmp_diff; - req->best_parent_rate = parent_rate; - req->best_parent_hw = parent; - } - - if (!best_diff || tmp_rate < req->rate) - break; - } + clk_generated_best_diff(req, parent, parent_rate, div, + &best_diff, &best_rate); if (!best_diff) break; } + /* + * The audio_pll rate can be modified, unlike the five others clocks + * that should never be altered. + * The audio_pll can technically be used by multiple consumers. However, + * with the rate locking, the first consumer to enable to clock will be + * the one definitely setting the rate of the clock. + * Since audio IPs are most likely to request the same rate, we enforce + * that the only clks able to modify gck rate are those of audio IPs. + */ + + if (!gck->audio_pll_allowed) + goto end; + + parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL); + if (!parent) + goto end; + + for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { + req_parent.rate = req->rate * div; + __clk_determine_rate(parent, &req_parent); + clk_generated_best_diff(req, parent, req_parent.rate, div, + &best_diff, &best_rate); + + if (!best_diff) + break; + } + +end: pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n", __func__, best_rate, __clk_get_name((req->best_parent_hw)->clk), @@ -252,7 +298,8 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, init.ops = &generated_ops; init.parent_names = parent_names; init.num_parents = num_parents; - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; gck->id = id; gck->hw.init = &init; @@ -284,6 +331,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) struct device_node *gcknp; struct clk_range range = CLK_RANGE(0, 0); struct regmap *regmap; + struct clk_generated *gck; num_parents = of_clk_get_parent_count(np); if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX) @@ -315,6 +363,21 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, parent_names, num_parents, id, &range); + + gck = to_clk_generated(hw); + + if (of_device_is_compatible(np, + "atmel,sama5d2-clk-generated")) { + if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 || + gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 || + gck->id == GCK_ID_CLASSD) + gck->audio_pll_allowed = true; + else + gck->audio_pll_allowed = false; + } else { + gck->audio_pll_allowed = false; + } + if (IS_ERR(hw)) continue; diff --git a/drivers/clk/axs10x/Makefile b/drivers/clk/axs10x/Makefile index 01996b871b06..d747deafbf1e 100644 --- a/drivers/clk/axs10x/Makefile +++ b/drivers/clk/axs10x/Makefile @@ -1 +1,2 @@ obj-y += i2s_pll_clock.o +obj-y += pll_clock.o diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c new file mode 100644 index 000000000000..25d8c240ddfb --- /dev/null +++ b/drivers/clk/axs10x/pll_clock.c @@ -0,0 +1,346 @@ +/* + * Synopsys AXS10X SDP Generic PLL clock driver + * + * Copyright (C) 2017 Synopsys + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PLL registers addresses */ +#define PLL_REG_IDIV 0x0 +#define PLL_REG_FBDIV 0x4 +#define PLL_REG_ODIV 0x8 + +/* + * Bit fields of the PLL IDIV/FBDIV/ODIV registers: + * ________________________________________________________________________ + * |31 15| 14 | 13 | 12 |11 6|5 0| + * |-------RESRVED------|-NOUPDATE-|-BYPASS-|-EDGE-|--HIGHTIME--|--LOWTIME--| + * |____________________|__________|________|______|____________|___________| + * + * Following macros determine the way of access to these registers + * They should be set up only using the macros. + * reg should be an u32 variable. + */ + +#define PLL_REG_GET_LOW(reg) \ + (((reg) & (0x3F << 0)) >> 0) +#define PLL_REG_GET_HIGH(reg) \ + (((reg) & (0x3F << 6)) >> 6) +#define PLL_REG_GET_EDGE(reg) \ + (((reg) & (BIT(12))) ? 1 : 0) +#define PLL_REG_GET_BYPASS(reg) \ + (((reg) & (BIT(13))) ? 1 : 0) +#define PLL_REG_GET_NOUPD(reg) \ + (((reg) & (BIT(14))) ? 1 : 0) +#define PLL_REG_GET_PAD(reg) \ + (((reg) & (0x1FFFF << 15)) >> 15) + +#define PLL_REG_SET_LOW(reg, value) \ + { reg |= (((value) & 0x3F) << 0); } +#define PLL_REG_SET_HIGH(reg, value) \ + { reg |= (((value) & 0x3F) << 6); } +#define PLL_REG_SET_EDGE(reg, value) \ + { reg |= (((value) & 0x01) << 12); } +#define PLL_REG_SET_BYPASS(reg, value) \ + { reg |= (((value) & 0x01) << 13); } +#define PLL_REG_SET_NOUPD(reg, value) \ + { reg |= (((value) & 0x01) << 14); } +#define PLL_REG_SET_PAD(reg, value) \ + { reg |= (((value) & 0x1FFFF) << 15); } + +#define PLL_LOCK BIT(0) +#define PLL_ERROR BIT(1) +#define PLL_MAX_LOCK_TIME 100 /* 100 us */ + +struct axs10x_pll_cfg { + u32 rate; + u32 idiv; + u32 fbdiv; + u32 odiv; +}; + +static const struct axs10x_pll_cfg arc_pll_cfg[] = { + { 33333333, 1, 1, 1 }, + { 50000000, 1, 30, 20 }, + { 75000000, 2, 45, 10 }, + { 90000000, 2, 54, 10 }, + { 100000000, 1, 30, 10 }, + { 125000000, 2, 45, 6 }, + {} +}; + +static const struct axs10x_pll_cfg pgu_pll_cfg[] = { + { 25200000, 1, 84, 90 }, + { 50000000, 1, 100, 54 }, + { 74250000, 1, 44, 16 }, + {} +}; + +struct axs10x_pll_clk { + struct clk_hw hw; + void __iomem *base; + void __iomem *lock; + const struct axs10x_pll_cfg *pll_cfg; + struct device *dev; +}; + +static inline void axs10x_pll_write(struct axs10x_pll_clk *clk, u32 reg, + u32 val) +{ + iowrite32(val, clk->base + reg); +} + +static inline u32 axs10x_pll_read(struct axs10x_pll_clk *clk, u32 reg) +{ + return ioread32(clk->base + reg); +} + +static inline struct axs10x_pll_clk *to_axs10x_pll_clk(struct clk_hw *hw) +{ + return container_of(hw, struct axs10x_pll_clk, hw); +} + +static inline u32 axs10x_div_get_value(u32 reg) +{ + if (PLL_REG_GET_BYPASS(reg)) + return 1; + + return PLL_REG_GET_HIGH(reg) + PLL_REG_GET_LOW(reg); +} + +static inline u32 axs10x_encode_div(unsigned int id, int upd) +{ + u32 div = 0; + + PLL_REG_SET_LOW(div, (id % 2 == 0) ? id >> 1 : (id >> 1) + 1); + PLL_REG_SET_HIGH(div, id >> 1); + PLL_REG_SET_EDGE(div, id % 2); + PLL_REG_SET_BYPASS(div, id == 1 ? 1 : 0); + PLL_REG_SET_NOUPD(div, upd == 0 ? 1 : 0); + + return div; +} + +static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + u64 rate; + u32 idiv, fbdiv, odiv; + struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw); + + idiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_IDIV)); + fbdiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_FBDIV)); + odiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_ODIV)); + + rate = (u64)parent_rate * fbdiv; + do_div(rate, idiv * odiv); + + return rate; +} + +static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int i; + long best_rate; + struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw); + const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg; + + if (pll_cfg[0].rate == 0) + return -EINVAL; + + best_rate = pll_cfg[0].rate; + + for (i = 1; pll_cfg[i].rate != 0; i++) { + if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate)) + best_rate = pll_cfg[i].rate; + } + + return best_rate; +} + +static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + int i; + struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw); + const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg; + + for (i = 0; pll_cfg[i].rate != 0; i++) { + if (pll_cfg[i].rate == rate) { + axs10x_pll_write(clk, PLL_REG_IDIV, + axs10x_encode_div(pll_cfg[i].idiv, 0)); + axs10x_pll_write(clk, PLL_REG_FBDIV, + axs10x_encode_div(pll_cfg[i].fbdiv, 0)); + axs10x_pll_write(clk, PLL_REG_ODIV, + axs10x_encode_div(pll_cfg[i].odiv, 1)); + + /* + * Wait until CGU relocks and check error status. + * If after timeout CGU is unlocked yet return error + */ + udelay(PLL_MAX_LOCK_TIME); + if (!(ioread32(clk->lock) & PLL_LOCK)) + return -ETIMEDOUT; + + if (ioread32(clk->lock) & PLL_ERROR) + return -EINVAL; + + return 0; + } + } + + dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate, + parent_rate); + return -EINVAL; +} + +static const struct clk_ops axs10x_pll_ops = { + .recalc_rate = axs10x_pll_recalc_rate, + .round_rate = axs10x_pll_round_rate, + .set_rate = axs10x_pll_set_rate, +}; + +static int axs10x_pll_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const char *parent_name; + struct axs10x_pll_clk *pll_clk; + struct resource *mem; + struct clk_init_data init = { }; + int ret; + + pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); + if (!pll_clk) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pll_clk->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(pll_clk->base)) + return PTR_ERR(pll_clk->base); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pll_clk->lock = devm_ioremap_resource(dev, mem); + if (IS_ERR(pll_clk->lock)) + return PTR_ERR(pll_clk->lock); + + init.name = dev->of_node->name; + init.ops = &axs10x_pll_ops; + parent_name = of_clk_get_parent_name(dev->of_node, 0); + init.parent_names = &parent_name; + init.num_parents = 1; + pll_clk->hw.init = &init; + pll_clk->dev = dev; + pll_clk->pll_cfg = of_device_get_match_data(dev); + + if (!pll_clk->pll_cfg) { + dev_err(dev, "No OF match data provided\n"); + return -EINVAL; + } + + ret = devm_clk_hw_register(dev, &pll_clk->hw); + if (ret) { + dev_err(dev, "failed to register %s clock\n", init.name); + return ret; + } + + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, + &pll_clk->hw); +} + +static int axs10x_pll_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static void __init of_axs10x_pll_clk_setup(struct device_node *node) +{ + const char *parent_name; + struct axs10x_pll_clk *pll_clk; + struct clk_init_data init = { }; + int ret; + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (!pll_clk) + return; + + pll_clk->base = of_iomap(node, 0); + if (!pll_clk->base) { + pr_err("failed to map pll div registers\n"); + goto err_free_pll_clk; + } + + pll_clk->lock = of_iomap(node, 1); + if (!pll_clk->lock) { + pr_err("failed to map pll lock register\n"); + goto err_unmap_base; + } + + init.name = node->name; + init.ops = &axs10x_pll_ops; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = &parent_name; + init.num_parents = parent_name ? 1 : 0; + pll_clk->hw.init = &init; + pll_clk->pll_cfg = arc_pll_cfg; + + ret = clk_hw_register(NULL, &pll_clk->hw); + if (ret) { + pr_err("failed to register %s clock\n", node->name); + goto err_unmap_lock; + } + + ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw); + if (ret) { + pr_err("failed to add hw provider for %s clock\n", node->name); + goto err_unregister_clk; + } + + return; + +err_unregister_clk: + clk_hw_unregister(&pll_clk->hw); +err_unmap_lock: + iounmap(pll_clk->lock); +err_unmap_base: + iounmap(pll_clk->base); +err_free_pll_clk: + kfree(pll_clk); +} +CLK_OF_DECLARE(axs10x_pll_clock, "snps,axs10x-arc-pll-clock", + of_axs10x_pll_clk_setup); + +static const struct of_device_id axs10x_pll_clk_id[] = { + { .compatible = "snps,axs10x-pgu-pll-clock", .data = &pgu_pll_cfg}, + { } +}; +MODULE_DEVICE_TABLE(of, axs10x_pll_clk_id); + +static struct platform_driver axs10x_pll_clk_driver = { + .driver = { + .name = "axs10x-pll-clock", + .of_match_table = axs10x_pll_clk_id, + }, + .probe = axs10x_pll_clk_probe, + .remove = axs10x_pll_clk_remove, +}; +builtin_platform_driver(axs10x_pll_clk_driver); + +MODULE_AUTHOR("Vlad Zakharov "); +MODULE_DESCRIPTION("Synopsys AXS10X SDP Generic PLL Clock Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index 1d99292e2039..e7331ace0337 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c @@ -679,8 +679,7 @@ static void __init berlin2_clock_setup(struct device_node *np) if (!IS_ERR(hws[n])) continue; - pr_err("%s: Unable to register leaf clock %d\n", - np->full_name, n); + pr_err("%pOF: Unable to register leaf clock %d\n", np, n); goto bg2_fail; } diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 3b784b593afd..67c270b143f7 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -304,14 +304,14 @@ static void __init berlin2q_clock_setup(struct device_node *np) gbase = of_iomap(parent_np, 0); if (!gbase) { - pr_err("%s: Unable to map global base\n", np->full_name); + pr_err("%pOF: Unable to map global base\n", np); return; } /* BG2Q CPU PLL is not part of global registers */ cpupll_base = of_iomap(parent_np, 1); if (!cpupll_base) { - pr_err("%s: Unable to map cpupll base\n", np->full_name); + pr_err("%pOF: Unable to map cpupll base\n", np); iounmap(gbase); return; } @@ -376,8 +376,7 @@ static void __init berlin2q_clock_setup(struct device_node *np) if (!IS_ERR(hws[n])) continue; - pr_err("%s: Unable to register leaf clock %d\n", - np->full_name, n); + pr_err("%pOF: Unable to register leaf clock %d\n", np, n); goto bg2q_fail; } diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c index ea8568536193..bf0582cbbf38 100644 --- a/drivers/clk/clk-asm9260.c +++ b/drivers/clk/clk-asm9260.c @@ -338,8 +338,8 @@ static void __init asm9260_acc_init(struct device_node *np) if (!IS_ERR(hws[n])) continue; - pr_err("%s: Unable to register leaf clock %d\n", - np->full_name, n); + pr_err("%pOF: Unable to register leaf clock %d\n", + np, n); goto fail; } diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index c834f5abfc49..4c10456f8a32 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c @@ -105,6 +105,7 @@ int __must_check clk_bulk_prepare(int num_clks, return ret; } +EXPORT_SYMBOL_GPL(clk_bulk_prepare); #endif /* CONFIG_HAVE_CLK_PREPARE */ diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 7ec36722f8ab..49819b546134 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -23,8 +23,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) num_parents = of_count_phandle_with_args(node, "assigned-clock-parents", "#clock-cells"); if (num_parents == -EINVAL) - pr_err("clk: invalid value of clock-parents property at %s\n", - node->full_name); + pr_err("clk: invalid value of clock-parents property at %pOF\n", + node); for (index = 0; index < num_parents; index++) { rc = of_parse_phandle_with_args(node, "assigned-clock-parents", @@ -41,8 +41,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) pclk = of_clk_get_from_provider(&clkspec); if (IS_ERR(pclk)) { if (PTR_ERR(pclk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get parent clock %d for %s\n", - index, node->full_name); + pr_warn("clk: couldn't get parent clock %d for %pOF\n", + index, node); return PTR_ERR(pclk); } @@ -57,8 +57,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { if (PTR_ERR(clk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get assigned clock %d for %s\n", - index, node->full_name); + pr_warn("clk: couldn't get assigned clock %d for %pOF\n", + index, node); rc = PTR_ERR(clk); goto err; } @@ -102,8 +102,8 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { if (PTR_ERR(clk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get clock %d for %s\n", - index, node->full_name); + pr_warn("clk: couldn't get clock %d for %pOF\n", + index, node); return PTR_ERR(clk); } diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index c54baede4d68..e8ea81c30f0c 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -343,6 +343,15 @@ static int cs2000_set_rate(struct clk_hw *hw, return __cs2000_set_rate(priv, ch, rate, parent_rate); } +static int cs2000_set_saved_rate(struct cs2000_priv *priv) +{ + int ch = 0; /* it uses ch0 only at this point */ + + return __cs2000_set_rate(priv, ch, + priv->saved_rate, + priv->saved_parent_rate); +} + static int cs2000_enable(struct clk_hw *hw) { struct cs2000_priv *priv = hw_to_priv(hw); @@ -535,11 +544,8 @@ static int cs2000_probe(struct i2c_client *client, static int cs2000_resume(struct device *dev) { struct cs2000_priv *priv = dev_get_drvdata(dev); - int ch = 0; /* it uses ch0 only at this point */ - return __cs2000_set_rate(priv, ch, - priv->saved_rate, - priv->saved_parent_rate); + return cs2000_set_saved_rate(priv); } static const struct dev_pm_ops cs2000_pm_ops = { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 9bb472cccca6..4ed516cb7276 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -385,12 +385,14 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_divider *divider = to_clk_divider(hw); - unsigned int value; + int value; unsigned long flags = 0; u32 val; value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags); + if (value < 0) + return value; if (divider->lock) spin_lock_irqsave(divider->lock, flags); @@ -403,7 +405,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, val = clk_readl(divider->reg); val &= ~(div_mask(divider->width) << divider->shift); } - val |= value << divider->shift; + val |= (u32)value << divider->shift; clk_writel(val, divider->reg); if (divider->lock) diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index aab904618eb6..fdf625fb10fa 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -49,16 +49,12 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, return ret; } -static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate, + unsigned long *m, unsigned long *n) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long scale; - unsigned long m, n; - u64 ret; - - if (!rate || rate >= *parent_rate) - return *parent_rate; /* * Get rate closer to *parent_rate to guarantee there is no overflow @@ -71,7 +67,23 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, rational_best_approximation(rate, *parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), - &m, &n); + m, n); +} + +static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long m, n; + u64 ret; + + if (!rate || rate >= *parent_rate) + return *parent_rate; + + if (fd->approximation) + fd->approximation(hw, rate, parent_rate, &m, &n); + else + clk_fd_general_approximation(hw, rate, parent_rate, &m, &n); ret = (u64)*parent_rate * m; do_div(ret, n); diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 4e0c054a787c..dd82485e09a1 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -86,7 +86,7 @@ static void clk_gate_disable(struct clk_hw *hw) clk_gate_endisable(hw, 0); } -static int clk_gate_is_enabled(struct clk_hw *hw) +int clk_gate_is_enabled(struct clk_hw *hw) { u32 reg; struct clk_gate *gate = to_clk_gate(hw); @@ -101,6 +101,7 @@ static int clk_gate_is_enabled(struct clk_hw *hw) return reg ? 1 : 0; } +EXPORT_SYMBOL_GPL(clk_gate_is_enabled); const struct clk_ops clk_gate_ops = { .enable = clk_gate_enable, diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c index b4cf2f699a21..f940e5af845b 100644 --- a/drivers/clk/clk-gemini.c +++ b/drivers/clk/clk-gemini.c @@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(gemini_clk_lock); #define GEMINI_GLOBAL_MISC_CONTROL 0x30 #define PCI_CLK_66MHZ BIT(18) -#define PCI_CLK_OE BIT(17) #define GEMINI_GLOBAL_CLOCK_CONTROL 0x34 #define PCI_CLKRUN_EN BIT(16) @@ -159,9 +158,6 @@ static int gemini_pci_enable(struct clk_hw *hw) regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, 0, PCI_CLKRUN_EN); - regmap_update_bits(pciclk->map, - GEMINI_GLOBAL_MISC_CONTROL, - 0, PCI_CLK_OE); return 0; } @@ -169,9 +165,6 @@ static void gemini_pci_disable(struct clk_hw *hw) { struct clk_gemini_pci *pciclk = to_pciclk(hw); - regmap_update_bits(pciclk->map, - GEMINI_GLOBAL_MISC_CONTROL, - PCI_CLK_OE, 0); regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, PCI_CLKRUN_EN, 0); } diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c new file mode 100644 index 000000000000..bbf237173b37 --- /dev/null +++ b/drivers/clk/clk-hsdk-pll.c @@ -0,0 +1,431 @@ +/* + * Synopsys HSDK SDP Generic PLL clock driver + * + * Copyright (C) 2017 Synopsys + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */ +#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */ +#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */ +#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */ + +#define CGU_PLL_CTRL_ODIV_SHIFT 2 +#define CGU_PLL_CTRL_IDIV_SHIFT 4 +#define CGU_PLL_CTRL_FBDIV_SHIFT 9 +#define CGU_PLL_CTRL_BAND_SHIFT 20 + +#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT) +#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT) +#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT) + +#define CGU_PLL_CTRL_PD BIT(0) +#define CGU_PLL_CTRL_BYPASS BIT(1) + +#define CGU_PLL_STATUS_LOCK BIT(0) +#define CGU_PLL_STATUS_ERR BIT(1) + +#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */ + +#define CGU_PLL_SOURCE_MAX 1 + +#define CORE_IF_CLK_THRESHOLD_HZ 500000000 +#define CREG_CORE_IF_CLK_DIV_1 0x0 +#define CREG_CORE_IF_CLK_DIV_2 0x1 + +struct hsdk_pll_cfg { + u32 rate; + u32 idiv; + u32 fbdiv; + u32 odiv; + u32 band; +}; + +static const struct hsdk_pll_cfg asdt_pll_cfg[] = { + { 100000000, 0, 11, 3, 0 }, + { 133000000, 0, 15, 3, 0 }, + { 200000000, 1, 47, 3, 0 }, + { 233000000, 1, 27, 2, 0 }, + { 300000000, 1, 35, 2, 0 }, + { 333000000, 1, 39, 2, 0 }, + { 400000000, 1, 47, 2, 0 }, + { 500000000, 0, 14, 1, 0 }, + { 600000000, 0, 17, 1, 0 }, + { 700000000, 0, 20, 1, 0 }, + { 800000000, 0, 23, 1, 0 }, + { 900000000, 1, 26, 0, 0 }, + { 1000000000, 1, 29, 0, 0 }, + { 1100000000, 1, 32, 0, 0 }, + { 1200000000, 1, 35, 0, 0 }, + { 1300000000, 1, 38, 0, 0 }, + { 1400000000, 1, 41, 0, 0 }, + { 1500000000, 1, 44, 0, 0 }, + { 1600000000, 1, 47, 0, 0 }, + {} +}; + +static const struct hsdk_pll_cfg hdmi_pll_cfg[] = { + { 297000000, 0, 21, 2, 0 }, + { 540000000, 0, 19, 1, 0 }, + { 594000000, 0, 21, 1, 0 }, + {} +}; + +struct hsdk_pll_clk { + struct clk_hw hw; + void __iomem *regs; + void __iomem *spec_regs; + const struct hsdk_pll_devdata *pll_devdata; + struct device *dev; +}; + +struct hsdk_pll_devdata { + const struct hsdk_pll_cfg *pll_cfg; + int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate, + const struct hsdk_pll_cfg *cfg); +}; + +static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long, + const struct hsdk_pll_cfg *); +static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long, + const struct hsdk_pll_cfg *); + +static const struct hsdk_pll_devdata core_pll_devdata = { + .pll_cfg = asdt_pll_cfg, + .update_rate = hsdk_pll_core_update_rate, +}; + +static const struct hsdk_pll_devdata sdt_pll_devdata = { + .pll_cfg = asdt_pll_cfg, + .update_rate = hsdk_pll_comm_update_rate, +}; + +static const struct hsdk_pll_devdata hdmi_pll_devdata = { + .pll_cfg = hdmi_pll_cfg, + .update_rate = hsdk_pll_comm_update_rate, +}; + +static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val) +{ + iowrite32(val, clk->regs + reg); +} + +static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg) +{ + return ioread32(clk->regs + reg); +} + +static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk, + const struct hsdk_pll_cfg *cfg) +{ + u32 val = 0; + + /* Powerdown and Bypass bits should be cleared */ + val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT; + val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT; + val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT; + val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT; + + dev_dbg(clk->dev, "write configurarion: %#x\n", val); + + hsdk_pll_write(clk, CGU_PLL_CTRL, val); +} + +static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk) +{ + return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK); +} + +static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk) +{ + return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR); +} + +static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw) +{ + return container_of(hw, struct hsdk_pll_clk, hw); +} + +static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + u32 val; + u64 rate; + u32 idiv, fbdiv, odiv; + struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw); + + val = hsdk_pll_read(clk, CGU_PLL_CTRL); + + dev_dbg(clk->dev, "current configurarion: %#x\n", val); + + /* Check if PLL is disabled */ + if (val & CGU_PLL_CTRL_PD) + return 0; + + /* Check if PLL is bypassed */ + if (val & CGU_PLL_CTRL_BYPASS) + return parent_rate; + + /* input divider = reg.idiv + 1 */ + idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT); + /* fb divider = 2*(reg.fbdiv + 1) */ + fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT)); + /* output divider = 2^(reg.odiv) */ + odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT); + + rate = (u64)parent_rate * fbdiv; + do_div(rate, idiv * odiv); + + return rate; +} + +static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int i; + unsigned long best_rate; + struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw); + const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg; + + if (pll_cfg[0].rate == 0) + return -EINVAL; + + best_rate = pll_cfg[0].rate; + + for (i = 1; pll_cfg[i].rate != 0; i++) { + if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate)) + best_rate = pll_cfg[i].rate; + } + + dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate); + + return best_rate; +} + +static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk, + unsigned long rate, + const struct hsdk_pll_cfg *cfg) +{ + hsdk_pll_set_cfg(clk, cfg); + + /* + * Wait until CGU relocks and check error status. + * If after timeout CGU is unlocked yet return error. + */ + udelay(HSDK_PLL_MAX_LOCK_TIME); + if (!hsdk_pll_is_locked(clk)) + return -ETIMEDOUT; + + if (hsdk_pll_is_err(clk)) + return -EINVAL; + + return 0; +} + +static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk, + unsigned long rate, + const struct hsdk_pll_cfg *cfg) +{ + /* + * When core clock exceeds 500MHz, the divider for the interface + * clock must be programmed to div-by-2. + */ + if (rate > CORE_IF_CLK_THRESHOLD_HZ) + iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs); + + hsdk_pll_set_cfg(clk, cfg); + + /* + * Wait until CGU relocks and check error status. + * If after timeout CGU is unlocked yet return error. + */ + udelay(HSDK_PLL_MAX_LOCK_TIME); + if (!hsdk_pll_is_locked(clk)) + return -ETIMEDOUT; + + if (hsdk_pll_is_err(clk)) + return -EINVAL; + + /* + * Program divider to div-by-1 if we succesfuly set core clock below + * 500MHz threshold. + */ + if (rate <= CORE_IF_CLK_THRESHOLD_HZ) + iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs); + + return 0; +} + +static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + int i; + struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw); + const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg; + + for (i = 0; pll_cfg[i].rate != 0; i++) { + if (pll_cfg[i].rate == rate) { + return clk->pll_devdata->update_rate(clk, rate, + &pll_cfg[i]); + } + } + + dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate, + parent_rate); + + return -EINVAL; +} + +static const struct clk_ops hsdk_pll_ops = { + .recalc_rate = hsdk_pll_recalc_rate, + .round_rate = hsdk_pll_round_rate, + .set_rate = hsdk_pll_set_rate, +}; + +static int hsdk_pll_clk_probe(struct platform_device *pdev) +{ + int ret; + struct resource *mem; + const char *parent_name; + unsigned int num_parents; + struct hsdk_pll_clk *pll_clk; + struct clk_init_data init = { }; + struct device *dev = &pdev->dev; + + pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); + if (!pll_clk) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pll_clk->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(pll_clk->regs)) + return PTR_ERR(pll_clk->regs); + + init.name = dev->of_node->name; + init.ops = &hsdk_pll_ops; + parent_name = of_clk_get_parent_name(dev->of_node, 0); + init.parent_names = &parent_name; + num_parents = of_clk_get_parent_count(dev->of_node); + if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) { + dev_err(dev, "wrong clock parents number: %u\n", num_parents); + return -EINVAL; + } + init.num_parents = num_parents; + + pll_clk->hw.init = &init; + pll_clk->dev = dev; + pll_clk->pll_devdata = of_device_get_match_data(dev); + + if (!pll_clk->pll_devdata) { + dev_err(dev, "No OF match data provided\n"); + return -EINVAL; + } + + ret = devm_clk_hw_register(dev, &pll_clk->hw); + if (ret) { + dev_err(dev, "failed to register %s clock\n", init.name); + return ret; + } + + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, + &pll_clk->hw); +} + +static int hsdk_pll_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static void __init of_hsdk_pll_clk_setup(struct device_node *node) +{ + int ret; + const char *parent_name; + unsigned int num_parents; + struct hsdk_pll_clk *pll_clk; + struct clk_init_data init = { }; + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (!pll_clk) + return; + + pll_clk->regs = of_iomap(node, 0); + if (!pll_clk->regs) { + pr_err("failed to map pll registers\n"); + goto err_free_pll_clk; + } + + pll_clk->spec_regs = of_iomap(node, 1); + if (!pll_clk->spec_regs) { + pr_err("failed to map pll registers\n"); + goto err_unmap_comm_regs; + } + + init.name = node->name; + init.ops = &hsdk_pll_ops; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = &parent_name; + num_parents = of_clk_get_parent_count(node); + if (num_parents > CGU_PLL_SOURCE_MAX) { + pr_err("too much clock parents: %u\n", num_parents); + goto err_unmap_spec_regs; + } + init.num_parents = num_parents; + + pll_clk->hw.init = &init; + pll_clk->pll_devdata = &core_pll_devdata; + + ret = clk_hw_register(NULL, &pll_clk->hw); + if (ret) { + pr_err("failed to register %s clock\n", node->name); + goto err_unmap_spec_regs; + } + + ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw); + if (ret) { + pr_err("failed to add hw provider for %s clock\n", node->name); + goto err_unmap_spec_regs; + } + + return; + +err_unmap_spec_regs: + iounmap(pll_clk->spec_regs); +err_unmap_comm_regs: + iounmap(pll_clk->regs); +err_free_pll_clk: + kfree(pll_clk); +} + +/* Core PLL needed early for ARC cpus timers */ +CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock", +of_hsdk_pll_clk_setup); + +static const struct of_device_id hsdk_pll_clk_id[] = { + { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata}, + { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata}, + { } +}; + +static struct platform_driver hsdk_pll_clk_driver = { + .driver = { + .name = "hsdk-gp-pll-clock", + .of_match_table = hsdk_pll_clk_id, + }, + .probe = hsdk_pll_clk_probe, + .remove = hsdk_pll_clk_remove, +}; +builtin_platform_driver(hsdk_pll_clk_driver); diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c deleted file mode 100644 index 2a83a3ff1d09..000000000000 --- a/drivers/clk/clk-mb86s7x.c +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright (C) 2013-2015 FUJITSU SEMICONDUCTOR LIMITED - * Copyright (C) 2015 Linaro Ltd. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define to_crg_clk(p) container_of(p, struct crg_clk, hw) -#define to_clc_clk(p) container_of(p, struct cl_clk, hw) - -struct mb86s7x_peri_clk { - u32 payload_size; - u32 cntrlr; - u32 domain; - u32 port; - u32 en; - u64 frequency; -} __packed __aligned(4); - -struct hack_rate { - unsigned clk_id; - unsigned long rate; - int gated; -}; - -struct crg_clk { - struct clk_hw hw; - u8 cntrlr, domain, port; -}; - -static int crg_gate_control(struct clk_hw *hw, int en) -{ - struct crg_clk *crgclk = to_crg_clk(hw); - struct mb86s7x_peri_clk cmd; - int ret; - - cmd.payload_size = sizeof(cmd); - cmd.cntrlr = crgclk->cntrlr; - cmd.domain = crgclk->domain; - cmd.port = crgclk->port; - cmd.en = en; - - /* Port is UngatedCLK */ - if (cmd.port == 8) - return en ? 0 : -EINVAL; - - pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u En-%u}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port, cmd.en); - - ret = mb86s7x_send_packet(CMD_PERI_CLOCK_GATE_SET_REQ, - &cmd, sizeof(cmd)); - if (ret < 0) { - pr_err("%s:%d failed!\n", __func__, __LINE__); - return ret; - } - - pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u En-%u}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port, cmd.en); - - /* If the request was rejected */ - if (cmd.en != en) - ret = -EINVAL; - else - ret = 0; - - return ret; -} - -static int crg_port_prepare(struct clk_hw *hw) -{ - return crg_gate_control(hw, 1); -} - -static void crg_port_unprepare(struct clk_hw *hw) -{ - crg_gate_control(hw, 0); -} - -static int -crg_rate_control(struct clk_hw *hw, int set, unsigned long *rate) -{ - struct crg_clk *crgclk = to_crg_clk(hw); - struct mb86s7x_peri_clk cmd; - int code, ret; - - cmd.payload_size = sizeof(cmd); - cmd.cntrlr = crgclk->cntrlr; - cmd.domain = crgclk->domain; - cmd.port = crgclk->port; - cmd.frequency = *rate; - - if (set) { - code = CMD_PERI_CLOCK_RATE_SET_REQ; - pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port, cmd.frequency); - } else { - code = CMD_PERI_CLOCK_RATE_GET_REQ; - pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-GET}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port); - } - - ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd)); - if (ret < 0) { - pr_err("%s:%d failed!\n", __func__, __LINE__); - return ret; - } - - if (set) - pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port, cmd.frequency); - else - pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-GOT %lluHz}\n", - __func__, __LINE__, cmd.cntrlr, - cmd.domain, cmd.port, cmd.frequency); - - *rate = cmd.frequency; - return 0; -} - -static unsigned long -crg_port_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) -{ - unsigned long rate; - - crg_rate_control(hw, 0, &rate); - - return rate; -} - -static long -crg_port_round_rate(struct clk_hw *hw, - unsigned long rate, unsigned long *pr) -{ - return rate; -} - -static int -crg_port_set_rate(struct clk_hw *hw, - unsigned long rate, unsigned long parent_rate) -{ - return crg_rate_control(hw, 1, &rate); -} - -const struct clk_ops crg_port_ops = { - .prepare = crg_port_prepare, - .unprepare = crg_port_unprepare, - .recalc_rate = crg_port_recalc_rate, - .round_rate = crg_port_round_rate, - .set_rate = crg_port_set_rate, -}; - -struct mb86s70_crg11 { - struct mutex lock; /* protects CLK populating and searching */ -}; - -static struct clk *crg11_get(struct of_phandle_args *clkspec, void *data) -{ - struct mb86s70_crg11 *crg11 = data; - struct clk_init_data init; - u32 cntrlr, domain, port; - struct crg_clk *crgclk; - struct clk *clk; - char clkp[20]; - - if (clkspec->args_count != 3) - return ERR_PTR(-EINVAL); - - cntrlr = clkspec->args[0]; - domain = clkspec->args[1]; - port = clkspec->args[2]; - - if (port > 7) - snprintf(clkp, 20, "UngatedCLK%d_%X", cntrlr, domain); - else - snprintf(clkp, 20, "CLK%d_%X_%d", cntrlr, domain, port); - - mutex_lock(&crg11->lock); - - clk = __clk_lookup(clkp); - if (clk) { - mutex_unlock(&crg11->lock); - return clk; - } - - crgclk = kzalloc(sizeof(*crgclk), GFP_KERNEL); - if (!crgclk) { - mutex_unlock(&crg11->lock); - return ERR_PTR(-ENOMEM); - } - - init.name = clkp; - init.num_parents = 0; - init.ops = &crg_port_ops; - init.flags = 0; - crgclk->hw.init = &init; - crgclk->cntrlr = cntrlr; - crgclk->domain = domain; - crgclk->port = port; - clk = clk_register(NULL, &crgclk->hw); - if (IS_ERR(clk)) - pr_err("%s:%d Error!\n", __func__, __LINE__); - else - pr_debug("Registered %s\n", clkp); - - clk_register_clkdev(clk, clkp, NULL); - mutex_unlock(&crg11->lock); - return clk; -} - -static void __init crg_port_init(struct device_node *node) -{ - struct mb86s70_crg11 *crg11; - - crg11 = kzalloc(sizeof(*crg11), GFP_KERNEL); - if (!crg11) - return; - - mutex_init(&crg11->lock); - - of_clk_add_provider(node, crg11_get, crg11); -} -CLK_OF_DECLARE(crg11_gate, "fujitsu,mb86s70-crg11", crg_port_init); - -struct cl_clk { - struct clk_hw hw; - int cluster; -}; - -struct mb86s7x_cpu_freq { - u32 payload_size; - u32 cluster_class; - u32 cluster_id; - u32 cpu_id; - u64 frequency; -}; - -static void mhu_cluster_rate(struct clk_hw *hw, unsigned long *rate, int get) -{ - struct cl_clk *clc = to_clc_clk(hw); - struct mb86s7x_cpu_freq cmd; - int code, ret; - - cmd.payload_size = sizeof(cmd); - cmd.cluster_class = 0; - cmd.cluster_id = clc->cluster; - cmd.cpu_id = 0; - cmd.frequency = *rate; - - if (get) - code = CMD_CPU_CLOCK_RATE_GET_REQ; - else - code = CMD_CPU_CLOCK_RATE_SET_REQ; - - pr_debug("%s:%d CMD Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n", - __func__, __LINE__, cmd.cluster_class, - cmd.cluster_id, cmd.cpu_id, cmd.frequency); - - ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd)); - if (ret < 0) { - pr_err("%s:%d failed!\n", __func__, __LINE__); - return; - } - - pr_debug("%s:%d REP Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n", - __func__, __LINE__, cmd.cluster_class, - cmd.cluster_id, cmd.cpu_id, cmd.frequency); - - *rate = cmd.frequency; -} - -static unsigned long -clc_recalc_rate(struct clk_hw *hw, unsigned long unused) -{ - unsigned long rate; - - mhu_cluster_rate(hw, &rate, 1); - return rate; -} - -static long -clc_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *unused) -{ - return rate; -} - -static int -clc_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long unused) -{ - unsigned long res = rate; - - mhu_cluster_rate(hw, &res, 0); - - return (res == rate) ? 0 : -EINVAL; -} - -static struct clk_ops clk_clc_ops = { - .recalc_rate = clc_recalc_rate, - .round_rate = clc_round_rate, - .set_rate = clc_set_rate, -}; - -static struct clk_hw *mb86s7x_clclk_register(struct device *cpu_dev) -{ - struct clk_init_data init; - struct cl_clk *clc; - int ret; - - clc = kzalloc(sizeof(*clc), GFP_KERNEL); - if (!clc) - return ERR_PTR(-ENOMEM); - - clc->hw.init = &init; - clc->cluster = topology_physical_package_id(cpu_dev->id); - - init.name = dev_name(cpu_dev); - init.ops = &clk_clc_ops; - init.flags = CLK_GET_RATE_NOCACHE; - init.num_parents = 0; - - ret = devm_clk_hw_register(cpu_dev, &clc->hw); - if (ret) - return ERR_PTR(ret); - return &clc->hw; -} - -static int mb86s7x_clclk_of_init(void) -{ - int cpu, ret = -ENODEV; - struct device_node *np; - struct clk_hw *hw; - - np = of_find_compatible_node(NULL, NULL, "fujitsu,mb86s70-scb-1.0"); - if (!np || !of_device_is_available(np)) - goto exit; - - for_each_possible_cpu(cpu) { - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); - continue; - } - - hw = mb86s7x_clclk_register(cpu_dev); - if (IS_ERR(hw)) { - pr_err("failed to register cpu%d clock\n", cpu); - continue; - } - if (clk_hw_register_clkdev(hw, NULL, dev_name(cpu_dev))) { - pr_err("failed to register cpu%d clock lookup\n", cpu); - continue; - } - pr_debug("registered clk for %s\n", dev_name(cpu_dev)); - } - ret = 0; - - platform_device_register_simple("arm-bL-cpufreq-dt", -1, NULL, 0); -exit: - of_node_put(np); - return ret; -} -module_init(mb86s7x_clclk_of_init); diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c index b86dac851116..58428d0043fd 100644 --- a/drivers/clk/clk-moxart.c +++ b/drivers/clk/clk-moxart.c @@ -18,7 +18,7 @@ static void __init moxart_of_pll_clk_init(struct device_node *node) { - static void __iomem *base; + void __iomem *base; struct clk_hw *hw; struct clk *ref_clk; unsigned int mul; @@ -30,7 +30,7 @@ static void __init moxart_of_pll_clk_init(struct device_node *node) base = of_iomap(node, 0); if (!base) { - pr_err("%s: of_iomap failed\n", node->full_name); + pr_err("%pOF: of_iomap failed\n", node); return; } @@ -39,13 +39,13 @@ static void __init moxart_of_pll_clk_init(struct device_node *node) ref_clk = of_clk_get(node, 0); if (IS_ERR(ref_clk)) { - pr_err("%s: of_clk_get failed\n", node->full_name); + pr_err("%pOF: of_clk_get failed\n", node); return; } hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mul, 1); if (IS_ERR(hw)) { - pr_err("%s: failed to register clock\n", node->full_name); + pr_err("%pOF: failed to register clock\n", node); return; } @@ -57,7 +57,7 @@ CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock", static void __init moxart_of_apb_clk_init(struct device_node *node) { - static void __iomem *base; + void __iomem *base; struct clk_hw *hw; struct clk *pll_clk; unsigned int div, val; @@ -70,7 +70,7 @@ static void __init moxart_of_apb_clk_init(struct device_node *node) base = of_iomap(node, 0); if (!base) { - pr_err("%s: of_iomap failed\n", node->full_name); + pr_err("%pOF: of_iomap failed\n", node); return; } @@ -83,13 +83,13 @@ static void __init moxart_of_apb_clk_init(struct device_node *node) pll_clk = of_clk_get(node, 0); if (IS_ERR(pll_clk)) { - pr_err("%s: of_clk_get failed\n", node->full_name); + pr_err("%pOF: of_clk_get failed\n", node); return; } hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, 1, div); if (IS_ERR(hw)) { - pr_err("%s: failed to register clock\n", node->full_name); + pr_err("%pOF: failed to register clock\n", node); return; } diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index f3931e38fac0..b0ea753b8709 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -536,6 +537,17 @@ static const struct clockgen_chipinfo chipinfo[] = { .pll_mask = 0x07, .flags = CG_PLL_8BIT, }, + { + .compat = "fsl,ls1088a-clockgen", + .cmux_groups = { + &clockgen2_cmux_cga12 + }, + .cmux_to_group = { + 0, 0, -1 + }, + .pll_mask = 0x07, + .flags = CG_VER3 | CG_LITTLE_ENDIAN, + }, { .compat = "fsl,ls1012a-clockgen", .cmux_groups = { @@ -1113,6 +1125,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx) for (i = 0; i < ARRAY_SIZE(pll->div); i++) { struct clk *clk; + int ret; snprintf(pll->div[i].name, sizeof(pll->div[i].name), "cg-pll%d-div%d", idx, i + 1); @@ -1126,6 +1139,11 @@ static void __init create_one_pll(struct clockgen *cg, int idx) } pll->div[i].clk = clk; + ret = clk_register_clkdev(clk, pll->div[i].name, NULL); + if (ret != 0) + pr_err("%s: %s: register to lookup table failed %ld\n", + __func__, pll->div[i].name, PTR_ERR(clk)); + } } @@ -1348,8 +1366,7 @@ static void __init clockgen_init(struct device_node *np) } if (i == ARRAY_SIZE(chipinfo)) { - pr_err("%s: unknown clockgen node %s\n", __func__, - np->full_name); + pr_err("%s: unknown clockgen node %pOF\n", __func__, np); goto err; } clockgen.info = chipinfo[i]; @@ -1362,8 +1379,8 @@ static void __init clockgen_init(struct device_node *np) if (guts) { clockgen.guts = of_iomap(guts, 0); if (!clockgen.guts) { - pr_err("%s: Couldn't map %s regs\n", __func__, - guts->full_name); + pr_err("%s: Couldn't map %pOF regs\n", __func__, + guts); } } @@ -1398,6 +1415,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); +CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); /* Legacy nodes */ diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 2492442eea77..20d90769cced 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -519,6 +519,11 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate, SI5351_CLK_INTEGER_MODE, (hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0); + /* Do a pll soft reset on the affected pll */ + si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET, + hwdata->num == 0 ? SI5351_PLL_RESET_A : + SI5351_PLL_RESET_B); + dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), @@ -1091,13 +1096,6 @@ static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate, si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num, SI5351_CLK_POWERDOWN, 0); - /* - * Do a pll soft reset on both plls, needed in some cases to get - * all outputs running. - */ - si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET, - SI5351_PLL_RESET_A | SI5351_PLL_RESET_B); - dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n", __func__, clk_hw_get_name(hw), (1 << rdiv), diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 68e2a4e499f1..96c6b6bc8f0e 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -1541,8 +1541,8 @@ static void __init stm32f4_rcc_init(struct device_node *np) base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock); if (IS_ERR(clks[idx])) { - pr_err("%s: Unable to register leaf clock %s\n", - np->full_name, gd->name); + pr_err("%pOF: Unable to register leaf clock %s\n", + np, gd->name); goto fail; } } diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c new file mode 100644 index 000000000000..a94c3f56c590 --- /dev/null +++ b/drivers/clk/clk-stm32h7.c @@ -0,0 +1,1410 @@ +/* + * Copyright (C) Gabriel Fernandez 2017 + * Author: Gabriel Fernandez + * + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Reset Clock Control Registers */ +#define RCC_CR 0x00 +#define RCC_CFGR 0x10 +#define RCC_D1CFGR 0x18 +#define RCC_D2CFGR 0x1C +#define RCC_D3CFGR 0x20 +#define RCC_PLLCKSELR 0x28 +#define RCC_PLLCFGR 0x2C +#define RCC_PLL1DIVR 0x30 +#define RCC_PLL1FRACR 0x34 +#define RCC_PLL2DIVR 0x38 +#define RCC_PLL2FRACR 0x3C +#define RCC_PLL3DIVR 0x40 +#define RCC_PLL3FRACR 0x44 +#define RCC_D1CCIPR 0x4C +#define RCC_D2CCIP1R 0x50 +#define RCC_D2CCIP2R 0x54 +#define RCC_D3CCIPR 0x58 +#define RCC_BDCR 0x70 +#define RCC_CSR 0x74 +#define RCC_AHB3ENR 0xD4 +#define RCC_AHB1ENR 0xD8 +#define RCC_AHB2ENR 0xDC +#define RCC_AHB4ENR 0xE0 +#define RCC_APB3ENR 0xE4 +#define RCC_APB1LENR 0xE8 +#define RCC_APB1HENR 0xEC +#define RCC_APB2ENR 0xF0 +#define RCC_APB4ENR 0xF4 + +static DEFINE_SPINLOCK(stm32rcc_lock); + +static void __iomem *base; +static struct clk_hw **hws; + +/* System clock parent */ +static const char * const sys_src[] = { + "hsi_ck", "csi_ck", "hse_ck", "pll1_p" }; + +static const char * const tracein_src[] = { + "hsi_ck", "csi_ck", "hse_ck", "pll1_r" }; + +static const char * const per_src[] = { + "hsi_ker", "csi_ker", "hse_ck", "disabled" }; + +static const char * const pll_src[] = { + "hsi_ck", "csi_ck", "hse_ck", "no clock" }; + +static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" }; + +static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" }; + +static const char * const qspi_src[] = { + "hclk", "pll1_q", "pll2_r", "per_ck" }; + +static const char * const fmc_src[] = { + "hclk", "pll1_q", "pll2_r", "per_ck" }; + +/* Kernel clock parent */ +static const char * const swp_src[] = { "pclk1", "hsi_ker" }; + +static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" }; + +static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" }; + +static const char * const spdifrx_src[] = { + "pll1_q", "pll2_r", "pll3_r", "hsi_ker" }; + +static const char *spi_src1[5] = { + "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" }; + +static const char * const spi_src2[] = { + "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" }; + +static const char * const spi_src3[] = { + "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" }; + +static const char * const lptim_src1[] = { + "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" }; + +static const char * const lptim_src2[] = { + "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" }; + +static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" }; + +static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" }; + +/* i2c 1,2,3 src */ +static const char * const i2c_src1[] = { + "pclk1", "pll3_r", "hsi_ker", "csi_ker" }; + +static const char * const i2c_src2[] = { + "pclk4", "pll3_r", "hsi_ker", "csi_ker" }; + +static const char * const rng_src[] = { + "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" }; + +/* usart 1,6 src */ +static const char * const usart_src1[] = { + "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" }; + +/* usart 2,3,4,5,7,8 src */ +static const char * const usart_src2[] = { + "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" }; + +static const char *sai_src[5] = { + "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" }; + +static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" }; + +/* lptim 2,3,4,5 src */ +static const char * const lpuart1_src[] = { + "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" }; + +static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" }; + +/* RTC clock parent */ +static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" }; + +/* Micro-controller output clock parent */ +static const char * const mco_src1[] = { + "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" }; + +static const char * const mco_src2[] = { + "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" }; + +/* LCD clock */ +static const char * const ltdc_src[] = {"pll3_r"}; + +/* Gate clock with ready bit and backup domain management */ +struct stm32_ready_gate { + struct clk_gate gate; + u8 bit_rdy; +}; + +#define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\ + gate) + +#define RGATE_TIMEOUT 10000 + +static int ready_gate_clk_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32_ready_gate *rgate = to_ready_gate_clk(gate); + int bit_status; + unsigned int timeout = RGATE_TIMEOUT; + + if (clk_gate_ops.is_enabled(hw)) + return 0; + + clk_gate_ops.enable(hw); + + /* We can't use readl_poll_timeout() because we can blocked if + * someone enables this clock before clocksource changes. + * Only jiffies counter is available. Jiffies are incremented by + * interruptions and enable op does not allow to be interrupted. + */ + do { + bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy)); + + if (bit_status) + udelay(100); + + } while (bit_status && --timeout); + + return bit_status; +} + +static void ready_gate_clk_disable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32_ready_gate *rgate = to_ready_gate_clk(gate); + int bit_status; + unsigned int timeout = RGATE_TIMEOUT; + + if (!clk_gate_ops.is_enabled(hw)) + return; + + clk_gate_ops.disable(hw); + + do { + bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy)); + + if (bit_status) + udelay(100); + + } while (bit_status && --timeout); +} + +static const struct clk_ops ready_gate_clk_ops = { + .enable = ready_gate_clk_enable, + .disable = ready_gate_clk_disable, + .is_enabled = clk_gate_is_enabled, +}; + +static struct clk_hw *clk_register_ready_gate(struct device *dev, + const char *name, const char *parent_name, + void __iomem *reg, u8 bit_idx, u8 bit_rdy, + unsigned long flags, spinlock_t *lock) +{ + struct stm32_ready_gate *rgate; + struct clk_init_data init = { NULL }; + struct clk_hw *hw; + int ret; + + rgate = kzalloc(sizeof(*rgate), GFP_KERNEL); + if (!rgate) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &ready_gate_clk_ops; + init.flags = flags; + init.parent_names = &parent_name; + init.num_parents = 1; + + rgate->bit_rdy = bit_rdy; + rgate->gate.lock = lock; + rgate->gate.reg = reg; + rgate->gate.bit_idx = bit_idx; + rgate->gate.hw.init = &init; + + hw = &rgate->gate.hw; + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(rgate); + hw = ERR_PTR(ret); + } + + return hw; +} + +struct gate_cfg { + u32 offset; + u8 bit_idx; +}; + +struct muxdiv_cfg { + u32 offset; + u8 shift; + u8 width; +}; + +struct composite_clk_cfg { + struct gate_cfg *gate; + struct muxdiv_cfg *mux; + struct muxdiv_cfg *div; + const char *name; + const char * const *parent_name; + int num_parents; + u32 flags; +}; + +struct composite_clk_gcfg_t { + u8 flags; + const struct clk_ops *ops; +}; + +/* + * General config definition of a composite clock (only clock diviser for rate) + */ +struct composite_clk_gcfg { + struct composite_clk_gcfg_t *mux; + struct composite_clk_gcfg_t *div; + struct composite_clk_gcfg_t *gate; +}; + +#define M_CFG_MUX(_mux_ops, _mux_flags)\ + .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops} + +#define M_CFG_DIV(_rate_ops, _rate_flags)\ + .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops} + +#define M_CFG_GATE(_gate_ops, _gate_flags)\ + .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops} + +static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width, + u32 flags, spinlock_t *lock) +{ + struct clk_mux *mux; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->reg = reg; + mux->shift = shift; + mux->mask = (1 << width) - 1; + mux->flags = flags; + mux->lock = lock; + + return mux; +} + +static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width, + u32 flags, spinlock_t *lock) +{ + struct clk_divider *div; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + + if (!div) + return ERR_PTR(-ENOMEM); + + div->reg = reg; + div->shift = shift; + div->width = width; + div->flags = flags; + div->lock = lock; + + return div; +} + +static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags, + spinlock_t *lock) +{ + struct clk_gate *gate; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + gate->reg = reg; + gate->bit_idx = bit_idx; + gate->flags = flags; + gate->lock = lock; + + return gate; +} + +struct composite_cfg { + struct clk_hw *mux_hw; + struct clk_hw *div_hw; + struct clk_hw *gate_hw; + + const struct clk_ops *mux_ops; + const struct clk_ops *div_ops; + const struct clk_ops *gate_ops; +}; + +static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, + const struct composite_clk_cfg *cfg, + struct composite_cfg *composite, spinlock_t *lock) +{ + struct clk_mux *mux = NULL; + struct clk_divider *div = NULL; + struct clk_gate *gate = NULL; + const struct clk_ops *mux_ops, *div_ops, *gate_ops; + struct clk_hw *mux_hw; + struct clk_hw *div_hw; + struct clk_hw *gate_hw; + + mux_ops = div_ops = gate_ops = NULL; + mux_hw = div_hw = gate_hw = NULL; + + if (gcfg->mux && gcfg->mux) { + mux = _get_cmux(base + cfg->mux->offset, + cfg->mux->shift, + cfg->mux->width, + gcfg->mux->flags, lock); + + if (!IS_ERR(mux)) { + mux_hw = &mux->hw; + mux_ops = gcfg->mux->ops ? + gcfg->mux->ops : &clk_mux_ops; + } + } + + if (gcfg->div && cfg->div) { + div = _get_cdiv(base + cfg->div->offset, + cfg->div->shift, + cfg->div->width, + gcfg->div->flags, lock); + + if (!IS_ERR(div)) { + div_hw = &div->hw; + div_ops = gcfg->div->ops ? + gcfg->div->ops : &clk_divider_ops; + } + } + + if (gcfg->gate && gcfg->gate) { + gate = _get_cgate(base + cfg->gate->offset, + cfg->gate->bit_idx, + gcfg->gate->flags, lock); + + if (!IS_ERR(gate)) { + gate_hw = &gate->hw; + gate_ops = gcfg->gate->ops ? + gcfg->gate->ops : &clk_gate_ops; + } + } + + composite->mux_hw = mux_hw; + composite->mux_ops = mux_ops; + + composite->div_hw = div_hw; + composite->div_ops = div_ops; + + composite->gate_hw = gate_hw; + composite->gate_ops = gate_ops; +} + +/* Kernel Timer */ +struct timer_ker { + u8 dppre_shift; + struct clk_hw hw; + spinlock_t *lock; +}; + +#define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw) + +static unsigned long timer_ker_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct timer_ker *clk_elem = to_timer_ker(hw); + u32 timpre; + u32 dppre_shift = clk_elem->dppre_shift; + u32 prescaler; + u32 mul; + + timpre = (readl(base + RCC_CFGR) >> 15) & 0x01; + + prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03; + + mul = 2; + + if (prescaler < 4) + mul = 1; + + else if (timpre && prescaler > 4) + mul = 4; + + return parent_rate * mul; +} + +static const struct clk_ops timer_ker_ops = { + .recalc_rate = timer_ker_recalc_rate, +}; + +static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev, + const char *name, const char *parent_name, + unsigned long flags, + u8 dppre_shift, + spinlock_t *lock) +{ + struct timer_ker *element; + struct clk_init_data init; + struct clk_hw *hw; + int err; + + element = kzalloc(sizeof(*element), GFP_KERNEL); + if (!element) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &timer_ker_ops; + init.flags = flags; + init.parent_names = &parent_name; + init.num_parents = 1; + + element->hw.init = &init; + element->lock = lock; + element->dppre_shift = dppre_shift; + + hw = &element->hw; + err = clk_hw_register(dev, hw); + + if (err) { + kfree(element); + return ERR_PTR(err); + } + + return hw; +} + +static const struct clk_div_table d1cpre_div_table[] = { + { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1}, + { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1}, + { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 }, + { 12, 64 }, { 13, 128 }, { 14, 256 }, + { 15, 512 }, + { 0 }, +}; + +static const struct clk_div_table ppre_div_table[] = { + { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1}, + { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 }, + { 0 }, +}; + +static void register_core_and_bus_clocks(void) +{ + /* CORE AND BUS */ + hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre", + "sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0, + d1cpre_div_table, &stm32rcc_lock); + + hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre", + CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0, + d1cpre_div_table, &stm32rcc_lock); + + /* D1 DOMAIN */ + /* * CPU Systick */ + hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick", + "d1cpre", 0, 1, 8); + + /* * APB3 peripheral */ + hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0, + base + RCC_D1CFGR, 4, 3, 0, + ppre_div_table, &stm32rcc_lock); + + /* D2 DOMAIN */ + /* * APB1 peripheral */ + hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0, + base + RCC_D2CFGR, 4, 3, 0, + ppre_div_table, &stm32rcc_lock); + + /* Timers prescaler clocks */ + clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0, + 4, &stm32rcc_lock); + + /* * APB2 peripheral */ + hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0, + base + RCC_D2CFGR, 8, 3, 0, ppre_div_table, + &stm32rcc_lock); + + clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8, + &stm32rcc_lock); + + /* D3 DOMAIN */ + /* * APB4 peripheral */ + hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0, + base + RCC_D3CFGR, 4, 3, 0, + ppre_div_table, &stm32rcc_lock); +} + +/* MUX clock configuration */ +struct stm32_mux_clk { + const char *name; + const char * const *parents; + u8 num_parents; + u32 offset; + u8 shift; + u8 width; + u32 flags; +}; + +#define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\ +{\ + .name = _name,\ + .parents = _parents,\ + .num_parents = ARRAY_SIZE(_parents),\ + .offset = _mux_offset,\ + .shift = _mux_shift,\ + .width = _mux_width,\ + .flags = _flags,\ +} + +#define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\ + M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\ + +static const struct stm32_mux_clk stm32_mclk[] __initconst = { + M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3), + M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3), + M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3), + M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3), +}; + +/* Oscillary clock configuration */ +struct stm32_osc_clk { + const char *name; + const char *parent; + u32 gate_offset; + u8 bit_idx; + u8 bit_rdy; + u32 flags; +}; + +#define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\ +{\ + .name = _name,\ + .parent = _parent,\ + .gate_offset = _gate_offset,\ + .bit_idx = _bit_idx,\ + .bit_rdy = _bit_rdy,\ + .flags = _flags,\ +} + +#define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\ + OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0) + +static const struct stm32_osc_clk stm32_oclk[] __initconst = { + OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED), + OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED), + OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED), + OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED), + OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED), + OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED), +}; + +/* PLL configuration */ +struct st32h7_pll_cfg { + u8 bit_idx; + u32 offset_divr; + u8 bit_frac_en; + u32 offset_frac; + u8 divm; +}; + +struct stm32_pll_data { + const char *name; + const char *parent_name; + unsigned long flags; + const struct st32h7_pll_cfg *cfg; +}; + +static const struct st32h7_pll_cfg stm32h7_pll1 = { + .bit_idx = 24, + .offset_divr = RCC_PLL1DIVR, + .bit_frac_en = 0, + .offset_frac = RCC_PLL1FRACR, + .divm = 4, +}; + +static const struct st32h7_pll_cfg stm32h7_pll2 = { + .bit_idx = 26, + .offset_divr = RCC_PLL2DIVR, + .bit_frac_en = 4, + .offset_frac = RCC_PLL2FRACR, + .divm = 12, +}; + +static const struct st32h7_pll_cfg stm32h7_pll3 = { + .bit_idx = 28, + .offset_divr = RCC_PLL3DIVR, + .bit_frac_en = 8, + .offset_frac = RCC_PLL3FRACR, + .divm = 20, +}; + +static const struct stm32_pll_data stm32_pll[] = { + { "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 }, + { "vco2", "pllsrc", 0, &stm32h7_pll2 }, + { "vco3", "pllsrc", 0, &stm32h7_pll3 }, +}; + +struct stm32_fractional_divider { + void __iomem *mreg; + u8 mshift; + u8 mwidth; + u32 mmask; + + void __iomem *nreg; + u8 nshift; + u8 nwidth; + + void __iomem *freg_status; + u8 freg_bit; + void __iomem *freg_value; + u8 fshift; + u8 fwidth; + + u8 flags; + struct clk_hw hw; + spinlock_t *lock; +}; + +struct stm32_pll_obj { + spinlock_t *lock; + struct stm32_fractional_divider div; + struct stm32_ready_gate rgate; + struct clk_hw hw; +}; + +#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw) + +static int pll_is_enabled(struct clk_hw *hw) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct clk_hw *_hw = &clk_elem->rgate.gate.hw; + + __clk_hw_set_clk(_hw, hw); + + return ready_gate_clk_ops.is_enabled(_hw); +} + +static int pll_enable(struct clk_hw *hw) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct clk_hw *_hw = &clk_elem->rgate.gate.hw; + + __clk_hw_set_clk(_hw, hw); + + return ready_gate_clk_ops.enable(_hw); +} + +static void pll_disable(struct clk_hw *hw) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct clk_hw *_hw = &clk_elem->rgate.gate.hw; + + __clk_hw_set_clk(_hw, hw); + + ready_gate_clk_ops.disable(_hw); +} + +static int pll_frac_is_enabled(struct clk_hw *hw) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct stm32_fractional_divider *fd = &clk_elem->div; + + return (readl(fd->freg_status) >> fd->freg_bit) & 0x01; +} + +static unsigned long pll_read_frac(struct clk_hw *hw) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct stm32_fractional_divider *fd = &clk_elem->div; + + return (readl(fd->freg_value) >> fd->fshift) & + GENMASK(fd->fwidth - 1, 0); +} + +static unsigned long pll_fd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct stm32_pll_obj *clk_elem = to_pll(hw); + struct stm32_fractional_divider *fd = &clk_elem->div; + unsigned long m, n; + u32 val, mask; + u64 rate, rate1 = 0; + + val = readl(fd->mreg); + mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift; + m = (val & mask) >> fd->mshift; + + val = readl(fd->nreg); + mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift; + n = ((val & mask) >> fd->nshift) + 1; + + if (!n || !m) + return parent_rate; + + rate = (u64)parent_rate * n; + do_div(rate, m); + + if (pll_frac_is_enabled(hw)) { + val = pll_read_frac(hw); + rate1 = (u64)parent_rate * (u64)val; + do_div(rate1, (m * 8191)); + } + + return rate + rate1; +} + +static const struct clk_ops pll_ops = { + .enable = pll_enable, + .disable = pll_disable, + .is_enabled = pll_is_enabled, + .recalc_rate = pll_fd_recalc_rate, +}; + +static struct clk_hw *clk_register_stm32_pll(struct device *dev, + const char *name, + const char *parent, + unsigned long flags, + const struct st32h7_pll_cfg *cfg, + spinlock_t *lock) +{ + struct stm32_pll_obj *pll; + struct clk_init_data init = { NULL }; + struct clk_hw *hw; + int ret; + struct stm32_fractional_divider *div = NULL; + struct stm32_ready_gate *rgate; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &pll_ops; + init.flags = flags; + init.parent_names = &parent; + init.num_parents = 1; + pll->hw.init = &init; + + hw = &pll->hw; + rgate = &pll->rgate; + + rgate->bit_rdy = cfg->bit_idx + 1; + rgate->gate.lock = lock; + rgate->gate.reg = base + RCC_CR; + rgate->gate.bit_idx = cfg->bit_idx; + + div = &pll->div; + div->flags = 0; + div->mreg = base + RCC_PLLCKSELR; + div->mshift = cfg->divm; + div->mwidth = 6; + div->nreg = base + cfg->offset_divr; + div->nshift = 0; + div->nwidth = 9; + + div->freg_status = base + RCC_PLLCFGR; + div->freg_bit = cfg->bit_frac_en; + div->freg_value = base + cfg->offset_frac; + div->fshift = 3; + div->fwidth = 13; + + div->lock = lock; + + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(pll); + hw = ERR_PTR(ret); + } + + return hw; +} + +/* ODF CLOCKS */ +static unsigned long odf_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return clk_divider_ops.recalc_rate(hw, parent_rate); +} + +static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clk_divider_ops.round_rate(hw, rate, prate); +} + +static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_hw *hwp; + int pll_status; + int ret; + + hwp = clk_hw_get_parent(hw); + + pll_status = pll_is_enabled(hwp); + + if (pll_status) + pll_disable(hwp); + + ret = clk_divider_ops.set_rate(hw, rate, parent_rate); + + if (pll_status) + pll_enable(hwp); + + return ret; +} + +static const struct clk_ops odf_divider_ops = { + .recalc_rate = odf_divider_recalc_rate, + .round_rate = odf_divider_round_rate, + .set_rate = odf_divider_set_rate, +}; + +static int odf_gate_enable(struct clk_hw *hw) +{ + struct clk_hw *hwp; + int pll_status; + int ret; + + if (clk_gate_ops.is_enabled(hw)) + return 0; + + hwp = clk_hw_get_parent(hw); + + pll_status = pll_is_enabled(hwp); + + if (pll_status) + pll_disable(hwp); + + ret = clk_gate_ops.enable(hw); + + if (pll_status) + pll_enable(hwp); + + return ret; +} + +static void odf_gate_disable(struct clk_hw *hw) +{ + struct clk_hw *hwp; + int pll_status; + + if (!clk_gate_ops.is_enabled(hw)) + return; + + hwp = clk_hw_get_parent(hw); + + pll_status = pll_is_enabled(hwp); + + if (pll_status) + pll_disable(hwp); + + clk_gate_ops.disable(hw); + + if (pll_status) + pll_enable(hwp); +} + +static const struct clk_ops odf_gate_ops = { + .enable = odf_gate_enable, + .disable = odf_gate_disable, + .is_enabled = clk_gate_is_enabled, +}; + +static struct composite_clk_gcfg odf_clk_gcfg = { + M_CFG_DIV(&odf_divider_ops, 0), + M_CFG_GATE(&odf_gate_ops, 0), +}; + +#define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\ + _rate_shift, _rate_width, _flags)\ +{\ + .mux = NULL,\ + .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\ + .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\ + .name = _name,\ + .parent_name = &(const char *) {_parent},\ + .num_parents = 1,\ + .flags = _flags,\ +} + +#define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\ + _rate_shift, _rate_width)\ +M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\ + _rate_shift, _rate_width, 0)\ + +static const struct composite_clk_cfg stm32_odf[3][3] = { + { + M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7, + CLK_IGNORE_UNUSED), + M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7, + CLK_IGNORE_UNUSED), + M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7, + CLK_IGNORE_UNUSED), + }, + + { + M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7), + M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7), + M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7), + }, + { + M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7), + M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7), + M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7), + } +}; + +/* PERIF CLOCKS */ +struct pclk_t { + u32 gate_offset; + u8 bit_idx; + const char *name; + const char *parent; + u32 flags; +}; + +#define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\ +{\ + .gate_offset = _gate_offset,\ + .bit_idx = _bit_idx,\ + .name = _name,\ + .parent = _parent,\ + .flags = _flags,\ +} + +#define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\ + PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0) + +static const struct pclk_t pclk[] = { + PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"), + PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"), + PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"), + PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"), + PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"), + PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"), + PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"), + PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"), + PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"), + PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"), + PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"), + PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"), + PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"), + PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"), + PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"), + PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"), + PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"), + PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"), + PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"), + PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"), + PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"), + PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"), + PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"), + PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"), + PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"), + PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"), + PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"), + PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"), + PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"), + PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"), + PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"), + PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"), + PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"), + PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"), + PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"), + PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"), + PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"), + PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"), + PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"), + PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"), + PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"), + PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"), + PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"), + PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"), + PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"), + PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"), + PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"), + PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"), + PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"), + PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"), + PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"), + PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"), + PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"), + PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"), + PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"), +}; + +/* KERNEL CLOCKS */ +#define KER_CLKF(_gate_offset, _bit_idx,\ + _mux_offset, _mux_shift, _mux_width,\ + _name, _parent_name,\ + _flags) \ +{ \ + .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\ + .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\ + .name = _name, \ + .parent_name = _parent_name, \ + .num_parents = ARRAY_SIZE(_parent_name),\ + .flags = _flags,\ +} + +#define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\ + _name, _parent_name) \ +KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\ + _name, _parent_name, 0)\ + +#define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\ + _name, _parent_name,\ + _flags) \ +{ \ + .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\ + .mux = NULL,\ + .name = _name, \ + .parent_name = _parent_name, \ + .num_parents = 1,\ + .flags = _flags,\ +} + +static const struct composite_clk_cfg kclk[] = { + KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src), + KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src, + CLK_IGNORE_UNUSED), + KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src, + CLK_IGNORE_UNUSED), + KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src), + KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src), + KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src), + KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src), + KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src), + KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src), + KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src, + CLK_SET_RATE_PARENT), + KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT), + KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2), + KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2), + KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src), + KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1), + KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1), + KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1), + KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2), + KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2), + KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2), + KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2), + KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src), + KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1), + KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1), + KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1), + KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src), + KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src), + KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src), + KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src), + KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT), + KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT), + KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT), + KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2), + KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2), + KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1), + KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1), + KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1), + KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src), + KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src), + KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2), + KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2), + KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2), + KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2), + KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2), + KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3), + KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src), +}; + +static struct composite_clk_gcfg kernel_clk_cfg = { + M_CFG_MUX(NULL, 0), + M_CFG_GATE(NULL, 0), +}; + +/* RTC clock */ +/* + * RTC & LSE registers are protected against parasitic write access. + * PWR_CR_DBP bit must be set to enable write access to RTC registers. + */ +/* STM32_PWR_CR */ +#define PWR_CR 0x00 +/* STM32_PWR_CR bit field */ +#define PWR_CR_DBP BIT(8) + +static struct composite_clk_gcfg rtc_clk_cfg = { + M_CFG_MUX(NULL, 0), + M_CFG_GATE(NULL, 0), +}; + +static const struct composite_clk_cfg rtc_clk = + KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src); + +/* Micro-controller output clock */ +static struct composite_clk_gcfg mco_clk_cfg = { + M_CFG_MUX(NULL, 0), + M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO), +}; + +#define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\ + _rate_offset, _rate_shift, _rate_width,\ + _flags)\ +{\ + .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\ + .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\ + .gate = NULL,\ + .name = _name,\ + .parent_name = _parents,\ + .num_parents = ARRAY_SIZE(_parents),\ + .flags = _flags,\ +} + +static const struct composite_clk_cfg mco_clk[] = { + M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0), + M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0), +}; + +static void __init stm32h7_rcc_init(struct device_node *np) +{ + struct clk_hw_onecell_data *clk_data; + struct composite_cfg c_cfg; + int n; + const char *hse_clk, *lse_clk, *i2s_clk; + struct regmap *pdrm; + + clk_data = kzalloc(sizeof(*clk_data) + + sizeof(*clk_data->hws) * STM32H7_MAX_CLKS, + GFP_KERNEL); + if (!clk_data) + return; + + clk_data->num = STM32H7_MAX_CLKS; + + hws = clk_data->hws; + + for (n = 0; n < STM32H7_MAX_CLKS; n++) + hws[n] = ERR_PTR(-ENOENT); + + /* get RCC base @ from DT */ + base = of_iomap(np, 0); + if (!base) { + pr_err("%s: unable to map resource", np->name); + goto err_free_clks; + } + + pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); + if (IS_ERR(pdrm)) + pr_warn("%s: Unable to get syscfg\n", __func__); + else + /* In any case disable backup domain write protection + * and will never be enabled. + * Needed by LSE & RTC clocks. + */ + regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP); + + /* Put parent names from DT */ + hse_clk = of_clk_get_parent_name(np, 0); + lse_clk = of_clk_get_parent_name(np, 1); + i2s_clk = of_clk_get_parent_name(np, 2); + + sai_src[3] = i2s_clk; + spi_src1[3] = i2s_clk; + + /* Register Internal oscillators */ + clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000); + clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000); + clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000); + clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000); + + /* This clock is coming from outside. Frequencies unknown */ + hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL, + 0, 0); + + hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0, + base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO, + &stm32rcc_lock); + + hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0, + base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ALLOW_ZERO, + &stm32rcc_lock); + + /* Mux system clocks */ + for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++) + hws[MCLK_BANK + n] = clk_hw_register_mux(NULL, + stm32_mclk[n].name, + stm32_mclk[n].parents, + stm32_mclk[n].num_parents, + stm32_mclk[n].flags, + stm32_mclk[n].offset + base, + stm32_mclk[n].shift, + stm32_mclk[n].width, + 0, + &stm32rcc_lock); + + register_core_and_bus_clocks(); + + /* Oscillary clocks */ + for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++) + hws[OSC_BANK + n] = clk_register_ready_gate(NULL, + stm32_oclk[n].name, + stm32_oclk[n].parent, + stm32_oclk[n].gate_offset + base, + stm32_oclk[n].bit_idx, + stm32_oclk[n].bit_rdy, + stm32_oclk[n].flags, + &stm32rcc_lock); + + hws[HSE_CK] = clk_register_ready_gate(NULL, + "hse_ck", + hse_clk, + RCC_CR + base, + 16, 17, + 0, + &stm32rcc_lock); + + hws[LSE_CK] = clk_register_ready_gate(NULL, + "lse_ck", + lse_clk, + RCC_BDCR + base, + 0, 1, + 0, + &stm32rcc_lock); + + hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL, + "csi_ker_div122", "csi_ker", 0, 1, 122); + + /* PLLs */ + for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) { + int odf; + + /* Register the VCO */ + clk_register_stm32_pll(NULL, stm32_pll[n].name, + stm32_pll[n].parent_name, stm32_pll[n].flags, + stm32_pll[n].cfg, + &stm32rcc_lock); + + /* Register the 3 output dividers */ + for (odf = 0; odf < 3; odf++) { + int idx = n * 3 + odf; + + get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf], + &c_cfg, &stm32rcc_lock); + + hws[ODF_BANK + idx] = clk_hw_register_composite(NULL, + stm32_odf[n][odf].name, + stm32_odf[n][odf].parent_name, + stm32_odf[n][odf].num_parents, + c_cfg.mux_hw, c_cfg.mux_ops, + c_cfg.div_hw, c_cfg.div_ops, + c_cfg.gate_hw, c_cfg.gate_ops, + stm32_odf[n][odf].flags); + } + } + + /* Peripheral clocks */ + for (n = 0; n < ARRAY_SIZE(pclk); n++) + hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name, + pclk[n].parent, + pclk[n].flags, base + pclk[n].gate_offset, + pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock); + + /* Kernel clocks */ + for (n = 0; n < ARRAY_SIZE(kclk); n++) { + get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg, + &stm32rcc_lock); + + hws[KERN_BANK + n] = clk_hw_register_composite(NULL, + kclk[n].name, + kclk[n].parent_name, + kclk[n].num_parents, + c_cfg.mux_hw, c_cfg.mux_ops, + c_cfg.div_hw, c_cfg.div_ops, + c_cfg.gate_hw, c_cfg.gate_ops, + kclk[n].flags); + } + + /* RTC clock (default state is off) */ + clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0); + + get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock); + + hws[RTC_CK] = clk_hw_register_composite(NULL, + rtc_clk.name, + rtc_clk.parent_name, + rtc_clk.num_parents, + c_cfg.mux_hw, c_cfg.mux_ops, + c_cfg.div_hw, c_cfg.div_ops, + c_cfg.gate_hw, c_cfg.gate_ops, + rtc_clk.flags); + + /* Micro-controller clocks */ + for (n = 0; n < ARRAY_SIZE(mco_clk); n++) { + get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg, + &stm32rcc_lock); + + hws[MCO_BANK + n] = clk_hw_register_composite(NULL, + mco_clk[n].name, + mco_clk[n].parent_name, + mco_clk[n].num_parents, + c_cfg.mux_hw, c_cfg.mux_ops, + c_cfg.div_hw, c_cfg.div_ops, + c_cfg.gate_hw, c_cfg.gate_ops, + mco_clk[n].flags); + } + + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); + + return; + +err_free_clks: + kfree(clk_data); +} + +/* The RCC node is a clock and reset controller, and these + * functionalities are supported by different drivers that + * matches the same compatible strings. + */ +CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init); diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index ea7d552a2f2b..decffb3826ec 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -57,6 +57,7 @@ #define VC5_PRIM_SRC_SHDN 0x10 #define VC5_PRIM_SRC_SHDN_EN_XTAL BIT(7) #define VC5_PRIM_SRC_SHDN_EN_CLKIN BIT(6) +#define VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ BIT(3) #define VC5_PRIM_SRC_SHDN_SP BIT(1) #define VC5_PRIM_SRC_SHDN_EN_GBL_SHDN BIT(0) @@ -122,12 +123,16 @@ /* flags to describe chip features */ /* chip has built-in oscilator */ #define VC5_HAS_INTERNAL_XTAL BIT(0) +/* chip has PFD requency doubler */ +#define VC5_HAS_PFD_FREQ_DBL BIT(1) /* Supported IDT VC5 models. */ enum vc5_model { IDT_VC5_5P49V5923, + IDT_VC5_5P49V5925, IDT_VC5_5P49V5933, IDT_VC5_5P49V5935, + IDT_VC6_5P49V6901, }; /* Structure to describe features of a particular VC5 model */ @@ -157,6 +162,8 @@ struct vc5_driver_data { struct clk *pin_clkin; unsigned char clk_mux_ins; struct clk_hw clk_mux; + struct clk_hw clk_mul; + struct clk_hw clk_pfd; struct vc5_hw_data clk_pll; struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM]; struct vc5_hw_data clk_out[VC5_MAX_CLK_OUT_NUM]; @@ -166,6 +173,14 @@ static const char * const vc5_mux_names[] = { "mux" }; +static const char * const vc5_dbl_names[] = { + "dbl" +}; + +static const char * const vc5_pfd_names[] = { + "pfd" +}; + static const char * const vc5_pll_names[] = { "pll" }; @@ -254,11 +269,64 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); } -static unsigned long vc5_mux_recalc_rate(struct clk_hw *hw, +static const struct clk_ops vc5_mux_ops = { + .set_parent = vc5_mux_set_parent, + .get_parent = vc5_mux_get_parent, +}; + +static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct vc5_driver_data *vc5 = - container_of(hw, struct vc5_driver_data, clk_mux); + container_of(hw, struct vc5_driver_data, clk_mul); + unsigned int premul; + + regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul); + if (premul & VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ) + parent_rate *= 2; + + return parent_rate; +} + +static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + if ((*parent_rate == rate) || ((*parent_rate * 2) == rate)) + return rate; + else + return -EINVAL; +} + +static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_mul); + u32 mask; + + if ((parent_rate * 2) == rate) + mask = VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ; + else + mask = 0; + + regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, + VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ, + mask); + + return 0; +} + +static const struct clk_ops vc5_dbl_ops = { + .recalc_rate = vc5_dbl_recalc_rate, + .round_rate = vc5_dbl_round_rate, + .set_rate = vc5_dbl_set_rate, +}; + +static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_pfd); unsigned int prediv, div; regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv); @@ -276,7 +344,7 @@ static unsigned long vc5_mux_recalc_rate(struct clk_hw *hw, return parent_rate / VC5_REF_DIVIDER_REF_DIV(div); } -static long vc5_mux_round_rate(struct clk_hw *hw, unsigned long rate, +static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { unsigned long idiv; @@ -296,11 +364,11 @@ static long vc5_mux_round_rate(struct clk_hw *hw, unsigned long rate, return *parent_rate / idiv; } -static int vc5_mux_set_rate(struct clk_hw *hw, unsigned long rate, +static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct vc5_driver_data *vc5 = - container_of(hw, struct vc5_driver_data, clk_mux); + container_of(hw, struct vc5_driver_data, clk_pfd); unsigned long idiv; u8 div; @@ -328,12 +396,10 @@ static int vc5_mux_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static const struct clk_ops vc5_mux_ops = { - .set_parent = vc5_mux_set_parent, - .get_parent = vc5_mux_get_parent, - .recalc_rate = vc5_mux_recalc_rate, - .round_rate = vc5_mux_round_rate, - .set_rate = vc5_mux_set_rate, +static const struct clk_ops vc5_pfd_ops = { + .recalc_rate = vc5_pfd_recalc_rate, + .round_rate = vc5_pfd_round_rate, + .set_rate = vc5_pfd_set_rate, }; /* @@ -426,6 +492,10 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw, div_frc = (od_frc[0] << 22) | (od_frc[1] << 14) | (od_frc[2] << 6) | (od_frc[3] >> 2); + /* Avoid division by zero if the output is not configured. */ + if (div_int == 0 && div_frc == 0) + return 0; + /* The PLL divider has 12 integer bits and 30 fractional bits */ return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); } @@ -503,6 +573,25 @@ static int vc5_clk_out_prepare(struct clk_hw *hw) { struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; + const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_SEL_EXT | + VC5_OUT_DIV_CONTROL_EN_FOD; + unsigned int src; + int ret; + + /* + * If the input mux is disabled, enable it first and + * select source from matching FOD. + */ + regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src); + if ((src & mask) == 0) { + src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD; + ret = regmap_update_bits(vc5->regmap, + VC5_OUT_DIV_CONTROL(hwdata->num), + mask | VC5_OUT_DIV_CONTROL_RESET, src); + if (ret) + return ret; + } /* Enable the clock buffer */ regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1), @@ -516,7 +605,7 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw) struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; - /* Enable the clock buffer */ + /* Disable the clock buffer */ regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1), VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0); } @@ -537,6 +626,9 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw) regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src); src &= mask; + if (src == 0) /* Input mux set to DISABLED */ + return 0; + if ((src & fodclkmask) == VC5_OUT_DIV_CONTROL_EN_FOD) return 0; @@ -595,7 +687,9 @@ static int vc5_map_index_to_output(const enum vc5_model model, case IDT_VC5_5P49V5933: return (n == 0) ? 0 : 3; case IDT_VC5_5P49V5923: + case IDT_VC5_5P49V5925: case IDT_VC5_5P49V5935: + case IDT_VC6_5P49V6901: default: return n; } @@ -672,12 +766,46 @@ static int vc5_probe(struct i2c_client *client, goto err_clk; } + if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) { + /* Register frequency doubler */ + memset(&init, 0, sizeof(init)); + init.name = vc5_dbl_names[0]; + init.ops = &vc5_dbl_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = vc5_mux_names; + init.num_parents = 1; + vc5->clk_mul.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", + init.name); + goto err_clk; + } + } + + /* Register PFD */ + memset(&init, 0, sizeof(init)); + init.name = vc5_pfd_names[0]; + init.ops = &vc5_pfd_ops; + init.flags = CLK_SET_RATE_PARENT; + if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) + init.parent_names = vc5_dbl_names; + else + init.parent_names = vc5_mux_names; + init.num_parents = 1; + vc5->clk_pfd.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", init.name); + goto err_clk; + } + /* Register PLL */ memset(&init, 0, sizeof(init)); init.name = vc5_pll_names[0]; init.ops = &vc5_pll_ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = vc5_mux_names; + init.parent_names = vc5_pfd_names; init.num_parents = 1; vc5->clk_pll.num = 0; vc5->clk_pll.vc5 = vc5; @@ -785,6 +913,13 @@ static const struct vc5_chip_info idt_5p49v5923_info = { .flags = 0, }; +static const struct vc5_chip_info idt_5p49v5925_info = { + .model = IDT_VC5_5P49V5925, + .clk_fod_cnt = 4, + .clk_out_cnt = 5, + .flags = 0, +}; + static const struct vc5_chip_info idt_5p49v5933_info = { .model = IDT_VC5_5P49V5933, .clk_fod_cnt = 2, @@ -799,18 +934,29 @@ static const struct vc5_chip_info idt_5p49v5935_info = { .flags = VC5_HAS_INTERNAL_XTAL, }; +static const struct vc5_chip_info idt_5p49v6901_info = { + .model = IDT_VC6_5P49V6901, + .clk_fod_cnt = 4, + .clk_out_cnt = 5, + .flags = VC5_HAS_PFD_FREQ_DBL, +}; + static const struct i2c_device_id vc5_id[] = { { "5p49v5923", .driver_data = IDT_VC5_5P49V5923 }, + { "5p49v5925", .driver_data = IDT_VC5_5P49V5925 }, { "5p49v5933", .driver_data = IDT_VC5_5P49V5933 }, { "5p49v5935", .driver_data = IDT_VC5_5P49V5935 }, + { "5p49v6901", .driver_data = IDT_VC6_5P49V6901 }, { } }; MODULE_DEVICE_TABLE(i2c, vc5_id); static const struct of_device_id clk_vc5_of_match[] = { { .compatible = "idt,5p49v5923", .data = &idt_5p49v5923_info }, + { .compatible = "idt,5p49v5925", .data = &idt_5p49v5925_info }, { .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info }, { .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info }, + { .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info }, { }, }; MODULE_DEVICE_TABLE(of, clk_vc5_of_match); diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index bc37030e38ba..4c75821a3933 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -192,7 +192,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty reg = of_iomap(np, 0); if (reg == NULL) { - pr_err("Unable to map CSR register for %s\n", np->full_name); + pr_err("Unable to map CSR register for %pOF\n", np); return; } of_property_read_string(np, "clock-output-names", &clk_name); @@ -409,12 +409,12 @@ static void xgene_pmdclk_init(struct device_node *np) /* Parse the DTS register for resource */ rc = of_address_to_resource(np, 0, &res); if (rc != 0) { - pr_err("no DTS register for %s\n", np->full_name); + pr_err("no DTS register for %pOF\n", np); return; } csr_reg = of_iomap(np, 0); if (!csr_reg) { - pr_err("Unable to map resource for %s\n", np->full_name); + pr_err("Unable to map resource for %pOF\n", np); return; } of_property_read_string(np, "clock-output-names", &clk_name); @@ -703,16 +703,14 @@ static void __init xgene_devclk_init(struct device_node *np) rc = of_address_to_resource(np, i, &res); if (rc != 0) { if (i == 0) { - pr_err("no DTS register for %s\n", - np->full_name); + pr_err("no DTS register for %pOF\n", np); return; } break; } map_res = of_iomap(np, i); if (map_res == NULL) { - pr_err("Unable to map resource %d for %s\n", - i, np->full_name); + pr_err("Unable to map resource %d for %pOF\n", i, np); goto err; } if (strcmp(res.name, "div-reg") == 0) @@ -747,8 +745,7 @@ static void __init xgene_devclk_init(struct device_node *np) pr_debug("Add %s clock\n", clk_name); rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); if (rc != 0) - pr_err("%s: could register provider clk %s\n", __func__, - np->full_name); + pr_err("%s: could register provider clk %pOF\n", __func__, np); return; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index fc58c52a26b4..c8d83acda006 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3132,7 +3132,7 @@ int of_clk_add_provider(struct device_node *np, mutex_lock(&of_clk_mutex); list_add(&cp->link, &of_clk_providers); mutex_unlock(&of_clk_mutex); - pr_debug("Added clock from %s\n", np->full_name); + pr_debug("Added clock from %pOF\n", np); ret = of_clk_set_defaults(np, true); if (ret < 0) @@ -3167,7 +3167,7 @@ int of_clk_add_hw_provider(struct device_node *np, mutex_lock(&of_clk_mutex); list_add(&cp->link, &of_clk_providers); mutex_unlock(&of_clk_mutex); - pr_debug("Added clk_hw provider from %s\n", np->full_name); + pr_debug("Added clk_hw provider from %pOF\n", np); ret = of_clk_set_defaults(np, true); if (ret < 0) diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index bb8a77a5985f..6b2f29df3f70 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -77,8 +77,8 @@ static struct clk *__of_clk_get_by_name(struct device_node *np, break; } else if (name && index >= 0) { if (PTR_ERR(clk) != -EPROBE_DEFER) - pr_err("ERROR: could not get clock %s:%s(%i)\n", - np->full_name, name ? name : "", index); + pr_err("ERROR: could not get clock %pOF:%s(%i)\n", + np, name ? name : "", index); return clk; } diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index 4181b6808545..e786d717f75d 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -55,9 +55,9 @@ static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = { }; static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = { - { HI6220_WDT0_PCLK, "wdt0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, }, - { HI6220_WDT1_PCLK, "wdt1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, }, - { HI6220_WDT2_PCLK, "wdt2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, }, + { HI6220_WDT0_PCLK, "wdt0_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, }, + { HI6220_WDT1_PCLK, "wdt1_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, }, + { HI6220_WDT2_PCLK, "wdt2_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, }, { HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, }, { HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, }, { HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, }, diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c index 1e3c9ea5f9dc..7bcaf270db11 100644 --- a/drivers/clk/imx/clk-imx51-imx53.c +++ b/drivers/clk/imx/clk-imx51-imx53.c @@ -416,10 +416,10 @@ static void __init mx51_clocks_init(struct device_node *np) clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1, lp_apm_sel, ARRAY_SIZE(lp_apm_sel)); - clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3, - mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel)); - clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3, - mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel)); + clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux_flags("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3, + mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel), CLK_SET_RATE_PARENT); + clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux_flags("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3, + mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel), CLK_SET_RATE_PARENT); clk[IMX5_CLK_TVE_EXT_SEL] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1, mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel), CLK_SET_RATE_PARENT); clk[IMX5_CLK_TVE_SEL] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index 5fd4ddac1bf1..9642cdf0fb88 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -71,7 +71,7 @@ static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; -static struct clk_div_table clk_enet_ref_table[] = { +static const struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, { .val = 1, .div = 10, }, { .val = 2, .div = 5, }, @@ -79,14 +79,14 @@ static struct clk_div_table clk_enet_ref_table[] = { { } }; -static struct clk_div_table post_div_table[] = { +static const struct clk_div_table post_div_table[] = { { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, { .val = 0, .div = 4, }, { } }; -static struct clk_div_table video_div_table[] = { +static const struct clk_div_table video_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 1, }, diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index b5c96de41ccf..e6d389e333d7 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -105,7 +105,7 @@ static int const clks_init_on[] __initconst = { IMX6SX_CLK_EPIT2, }; -static struct clk_div_table clk_enet_ref_table[] = { +static const struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, { .val = 1, .div = 10, }, { .val = 2, .div = 5, }, @@ -113,14 +113,14 @@ static struct clk_div_table clk_enet_ref_table[] = { { } }; -static struct clk_div_table post_div_table[] = { +static const struct clk_div_table post_div_table[] = { { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, { .val = 0, .div = 4, }, { } }; -static struct clk_div_table video_div_table[] = { +static const struct clk_div_table video_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 1, }, diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index b4e0dff3c8c2..5e8c18afce9a 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -78,7 +78,7 @@ static int const clks_init_on[] __initconst = { IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG, }; -static struct clk_div_table clk_enet_ref_table[] = { +static const struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, { .val = 1, .div = 10, }, { .val = 2, .div = 5, }, @@ -86,14 +86,14 @@ static struct clk_div_table clk_enet_ref_table[] = { { } }; -static struct clk_div_table post_div_table[] = { +static const struct clk_div_table post_div_table[] = { { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, { .val = 0, .div = 4, }, { } }; -static struct clk_div_table video_div_table[] = { +static const struct clk_div_table video_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 1, }, diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 3da121826b1b..2305699db467 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -27,7 +27,7 @@ static u32 share_count_sai2; static u32 share_count_sai3; static u32 share_count_nand; -static struct clk_div_table test_div_table[] = { +static const struct clk_div_table test_div_table[] = { { .val = 3, .div = 1, }, { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, @@ -35,7 +35,7 @@ static struct clk_div_table test_div_table[] = { { } }; -static struct clk_div_table post_div_table[] = { +static const struct clk_div_table post_div_table[] = { { .val = 3, .div = 4, }, { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index 59b1863deb88..6dae54325a91 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -102,7 +102,7 @@ static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", }; -static struct clk_div_table pll4_audio_div_table[] = { +static const struct clk_div_table pll4_audio_div_table[] = { { .val = 0, .div = 1 }, { .val = 1, .div = 2 }, { .val = 2, .div = 6 }, diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c index edd8e6918050..16e56772d280 100644 --- a/drivers/clk/mediatek/clk-cpumux.c +++ b/drivers/clk/mediatek/clk-cpumux.c @@ -27,7 +27,6 @@ static inline struct mtk_clk_cpumux *to_mtk_clk_cpumux(struct clk_hw *_hw) static u8 clk_cpumux_get_parent(struct clk_hw *hw) { struct mtk_clk_cpumux *mux = to_mtk_clk_cpumux(hw); - int num_parents = clk_hw_get_num_parents(hw); unsigned int val; regmap_read(mux->regmap, mux->reg, &val); @@ -35,9 +34,6 @@ static u8 clk_cpumux_get_parent(struct clk_hw *hw) val >>= mux->shift; val &= mux->mask; - if (val >= num_parents) - return -EINVAL; - return val; } @@ -98,7 +94,7 @@ int __init mtk_clk_register_cpumuxes(struct device_node *node, regmap = syscon_node_to_regmap(node); if (IS_ERR(regmap)) { - pr_err("Cannot find regmap for %s: %ld\n", node->full_name, + pr_err("Cannot find regmap for %pOF: %ld\n", node, PTR_ERR(regmap)); return PTR_ERR(regmap); } diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index 0541df78141c..9c0ae4278a94 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -114,7 +114,7 @@ int mtk_clk_register_gates(struct device_node *node, regmap = syscon_node_to_regmap(node); if (IS_ERR(regmap)) { - pr_err("Cannot find regmap for %s: %ld\n", node->full_name, + pr_err("Cannot find regmap for %pOF: %ld\n", node, PTR_ERR(regmap)); return PTR_ERR(regmap); } diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index 309049d41f1b..d3551d5efef2 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c @@ -72,7 +72,7 @@ void mtk_register_reset_controller(struct device_node *np, regmap = syscon_node_to_regmap(np); if (IS_ERR(regmap)) { - pr_err("Cannot find regmap for %s: %ld\n", np->full_name, + pr_err("Cannot find regmap for %pOF: %ld\n", np, PTR_ERR(regmap)); return; } diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 5588f75a8414..d2d0174a6eca 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -6,6 +6,7 @@ config COMMON_CLK_AMLOGIC config COMMON_CLK_MESON8B bool depends on COMMON_CLK_AMLOGIC + select RESET_CONTROLLER help Support for the clock controller on AmLogic S802 (Meson8), S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile index 83b6d9d65aa1..b139d41b25da 100644 --- a/drivers/clk/meson/Makefile +++ b/drivers/clk/meson/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o -obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o +obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-regmap.o gxbb-aoclk-32k.o diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c new file mode 100644 index 000000000000..491634dbc985 --- /dev/null +++ b/drivers/clk/meson/gxbb-aoclk-32k.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2017 BayLibre, SAS. + * Author: Neil Armstrong + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include "gxbb-aoclk.h" + +/* + * The AO Domain embeds a dual/divider to generate a more precise + * 32,768KHz clock for low-power suspend mode and CEC. + * ______ ______ + * | | | | + * ______ | Div1 |-| Cnt1 | ______ + * | | /|______| |______|\ | | + * Xtal-->| Gate |---| ______ ______ X-X--| Gate |--> + * |______| | \| | | |/ | |______| + * | | Div2 |-| Cnt2 | | + * | |______| |______| | + * |_______________________| + * + * The dividing can be switched to single or dual, with a counter + * for each divider to set when the switching is done. + * The entire dividing mechanism can be also bypassed. + */ + +#define CLK_CNTL0_N1_MASK GENMASK(11, 0) +#define CLK_CNTL0_N2_MASK GENMASK(23, 12) +#define CLK_CNTL0_DUALDIV_EN BIT(28) +#define CLK_CNTL0_OUT_GATE_EN BIT(30) +#define CLK_CNTL0_IN_GATE_EN BIT(31) + +#define CLK_CNTL1_M1_MASK GENMASK(11, 0) +#define CLK_CNTL1_M2_MASK GENMASK(23, 12) +#define CLK_CNTL1_BYPASS_EN BIT(24) +#define CLK_CNTL1_SELECT_OSC BIT(27) + +#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10) + +struct cec_32k_freq_table { + unsigned long parent_rate; + unsigned long target_rate; + bool dualdiv; + unsigned int n1; + unsigned int n2; + unsigned int m1; + unsigned int m2; +}; + +static const struct cec_32k_freq_table aoclk_cec_32k_table[] = { + [0] = { + .parent_rate = 24000000, + .target_rate = 32768, + .dualdiv = true, + .n1 = 733, + .n2 = 732, + .m1 = 8, + .m2 = 11, + }, +}; + +/* + * If CLK_CNTL0_DUALDIV_EN == 0 + * - will use N1 divider only + * If CLK_CNTL0_DUALDIV_EN == 1 + * - hold M1 cycles of N1 divider then changes to N2 + * - hold M2 cycles of N2 divider then changes to N1 + * Then we can get more accurate division. + */ +static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw); + unsigned long n1; + u32 reg0, reg1; + + regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, ®0); + regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, ®1); + + if (reg1 & CLK_CNTL1_BYPASS_EN) + return parent_rate; + + if (reg0 & CLK_CNTL0_DUALDIV_EN) { + unsigned long n2, m1, m2, f1, f2, p1, p2; + + n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1; + n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1; + + m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1; + m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1; + + f1 = DIV_ROUND_CLOSEST(parent_rate, n1); + f2 = DIV_ROUND_CLOSEST(parent_rate, n2); + + p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2)); + p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2)); + + return DIV_ROUND_UP(100000000, p1 + p2); + } + + n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1; + + return DIV_ROUND_CLOSEST(parent_rate, n1); +} + +static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate, + unsigned long prate) +{ + int i; + + for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i) + if (aoclk_cec_32k_table[i].parent_rate == prate && + aoclk_cec_32k_table[i].target_rate == rate) + return &aoclk_cec_32k_table[i]; + + return NULL; +} + +static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate, + *prate); + + /* If invalid return first one */ + if (!freq) + return aoclk_cec_32k_table[0].target_rate; + + return freq->target_rate; +} + +/* + * From the Amlogic init procedure, the IN and OUT gates needs to be handled + * in the init procedure to avoid any glitches. + */ + +static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate, + parent_rate); + struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw); + u32 reg = 0; + + if (!freq) + return -EINVAL; + + /* Disable clock */ + regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, + CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0); + + reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1); + if (freq->dualdiv) + reg |= CLK_CNTL0_DUALDIV_EN | + FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1); + + regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg); + + reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1); + if (freq->dualdiv) + reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1); + + regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg); + + /* Enable clock */ + regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, + CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN); + + udelay(200); + + regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, + CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN); + + regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1, + CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC); + + /* Select 32k from XTAL */ + regmap_update_bits(cec_32k->regmap, + AO_RTI_PWR_CNTL_REG0, + PWR_CNTL_ALT_32K_SEL, + FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4)); + + return 0; +} + +const struct clk_ops meson_aoclk_cec_32k_ops = { + .recalc_rate = aoclk_cec_32k_recalc_rate, + .round_rate = aoclk_cec_32k_round_rate, + .set_rate = aoclk_cec_32k_set_rate, +}; diff --git a/drivers/clk/meson/gxbb-aoclk-regmap.c b/drivers/clk/meson/gxbb-aoclk-regmap.c new file mode 100644 index 000000000000..2515fbfa0467 --- /dev/null +++ b/drivers/clk/meson/gxbb-aoclk-regmap.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017 BayLibre, SAS. + * Author: Neil Armstrong + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include "gxbb-aoclk.h" + +static int aoclk_gate_regmap_enable(struct clk_hw *hw) +{ + struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw); + + return regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0, + BIT(gate->bit_idx), BIT(gate->bit_idx)); +} + +static void aoclk_gate_regmap_disable(struct clk_hw *hw) +{ + struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw); + + regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0, + BIT(gate->bit_idx), 0); +} + +static int aoclk_gate_regmap_is_enabled(struct clk_hw *hw) +{ + struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw); + unsigned int val; + int ret; + + ret = regmap_read(gate->regmap, AO_RTI_GEN_CNTL_REG0, &val); + if (ret) + return ret; + + return (val & BIT(gate->bit_idx)) != 0; +} + +const struct clk_ops meson_aoclk_gate_regmap_ops = { + .enable = aoclk_gate_regmap_enable, + .disable = aoclk_gate_regmap_disable, + .is_enabled = aoclk_gate_regmap_is_enabled, +}; diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c index b45c5fba7e35..6c161e0a8e59 100644 --- a/drivers/clk/meson/gxbb-aoclk.c +++ b/drivers/clk/meson/gxbb-aoclk.c @@ -56,16 +56,20 @@ #include #include #include +#include +#include #include +#include #include #include +#include "gxbb-aoclk.h" static DEFINE_SPINLOCK(gxbb_aoclk_lock); struct gxbb_aoclk_reset_controller { struct reset_controller_dev reset; unsigned int *data; - void __iomem *base; + struct regmap *regmap; }; static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev, @@ -74,9 +78,8 @@ static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev, struct gxbb_aoclk_reset_controller *reset = container_of(rcdev, struct gxbb_aoclk_reset_controller, reset); - writel(BIT(reset->data[id]), reset->base); - - return 0; + return regmap_write(reset->regmap, AO_RTI_GEN_CNTL_REG0, + BIT(reset->data[id])); } static const struct reset_control_ops gxbb_aoclk_reset_ops = { @@ -84,13 +87,12 @@ static const struct reset_control_ops gxbb_aoclk_reset_ops = { }; #define GXBB_AO_GATE(_name, _bit) \ -static struct clk_gate _name##_ao = { \ - .reg = (void __iomem *)0, \ +static struct aoclk_gate_regmap _name##_ao = { \ .bit_idx = (_bit), \ .lock = &gxbb_aoclk_lock, \ .hw.init = &(struct clk_init_data) { \ .name = #_name "_ao", \ - .ops = &clk_gate_ops, \ + .ops = &meson_aoclk_gate_regmap_ops, \ .parent_names = (const char *[]){ "clk81" }, \ .num_parents = 1, \ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \ @@ -104,6 +106,17 @@ GXBB_AO_GATE(uart1, 3); GXBB_AO_GATE(uart2, 5); GXBB_AO_GATE(ir_blaster, 6); +static struct aoclk_cec_32k cec_32k_ao = { + .lock = &gxbb_aoclk_lock, + .hw.init = &(struct clk_init_data) { + .name = "cec_32k_ao", + .ops = &meson_aoclk_cec_32k_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_IGNORE_UNUSED, + }, +}; + static unsigned int gxbb_aoclk_reset[] = { [RESET_AO_REMOTE] = 16, [RESET_AO_I2C_MASTER] = 18, @@ -113,7 +126,7 @@ static unsigned int gxbb_aoclk_reset[] = { [RESET_AO_IR_BLASTER] = 23, }; -static struct clk_gate *gxbb_aoclk_gate[] = { +static struct aoclk_gate_regmap *gxbb_aoclk_gate[] = { [CLKID_AO_REMOTE] = &remote_ao, [CLKID_AO_I2C_MASTER] = &i2c_master_ao, [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao, @@ -130,30 +143,30 @@ static struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { [CLKID_AO_UART1] = &uart1_ao.hw, [CLKID_AO_UART2] = &uart2_ao.hw, [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw, + [CLKID_AO_CEC_32K] = &cec_32k_ao.hw, }, - .num = ARRAY_SIZE(gxbb_aoclk_gate), + .num = 7, }; static int gxbb_aoclkc_probe(struct platform_device *pdev) { - struct resource *res; - void __iomem *base; - int ret, clkid; - struct device *dev = &pdev->dev; struct gxbb_aoclk_reset_controller *rstc; + struct device *dev = &pdev->dev; + struct regmap *regmap; + int ret, clkid; rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL); if (!rstc) return -ENOMEM; - /* Generic clocks */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); + regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to get regmap\n"); + return -ENODEV; + } /* Reset Controller */ - rstc->base = base; + rstc->regmap = regmap; rstc->data = gxbb_aoclk_reset; rstc->reset.ops = &gxbb_aoclk_reset_ops; rstc->reset.nr_resets = ARRAY_SIZE(gxbb_aoclk_reset); @@ -161,10 +174,10 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev) ret = devm_reset_controller_register(dev, &rstc->reset); /* - * Populate base address and register all clks + * Populate regmap and register all clks */ - for (clkid = 0; clkid < gxbb_aoclk_onecell_data.num; clkid++) { - gxbb_aoclk_gate[clkid]->reg = base; + for (clkid = 0; clkid < ARRAY_SIZE(gxbb_aoclk_gate); clkid++) { + gxbb_aoclk_gate[clkid]->regmap = regmap; ret = devm_clk_hw_register(dev, gxbb_aoclk_onecell_data.hws[clkid]); @@ -172,12 +185,18 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev) return ret; } + /* Specific clocks */ + cec_32k_ao.regmap = regmap; + ret = devm_clk_hw_register(dev, &cec_32k_ao.hw); + if (ret) + return ret; + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, &gxbb_aoclk_onecell_data); } static const struct of_device_id gxbb_aoclkc_match_table[] = { - { .compatible = "amlogic,gxbb-aoclkc" }, + { .compatible = "amlogic,meson-gx-aoclkc" }, { } }; diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h new file mode 100644 index 000000000000..e8604c8f7eee --- /dev/null +++ b/drivers/clk/meson/gxbb-aoclk.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2017 BayLibre, SAS + * Author: Neil Armstrong + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef __GXBB_AOCLKC_H +#define __GXBB_AOCLKC_H + +/* AO Configuration Clock registers offsets */ +#define AO_RTI_PWR_CNTL_REG1 0x0c +#define AO_RTI_PWR_CNTL_REG0 0x10 +#define AO_RTI_GEN_CNTL_REG0 0x40 +#define AO_OSCIN_CNTL 0x58 +#define AO_CRT_CLK_CNTL1 0x68 +#define AO_RTC_ALT_CLK_CNTL0 0x94 +#define AO_RTC_ALT_CLK_CNTL1 0x98 + +struct aoclk_gate_regmap { + struct clk_hw hw; + unsigned bit_idx; + struct regmap *regmap; + spinlock_t *lock; +}; + +#define to_aoclk_gate_regmap(_hw) \ + container_of(_hw, struct aoclk_gate_regmap, hw) + +extern const struct clk_ops meson_aoclk_gate_regmap_ops; + +struct aoclk_cec_32k { + struct clk_hw hw; + struct regmap *regmap; + spinlock_t *lock; +}; + +#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw) + +extern const struct clk_ops meson_aoclk_cec_32k_ops; + +#endif /* __GXBB_AOCLKC_H */ diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index a7ea5f3da89d..b2d1e8ed7152 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -850,13 +850,14 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = { .shift = 0, .width = 8, }, + .flags = CLK_DIVIDER_ROUND_CLOSEST, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "cts_amclk_div", .ops = &meson_clk_audio_divider_ops, .parent_names = (const char *[]){ "cts_amclk_sel" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -880,7 +881,7 @@ static struct clk_mux gxbb_cts_mclk_i958_sel = { /* Default parent unknown (register reset value: 0) */ .table = (u32[]){ 1, 2, 3 }, .lock = &clk_lock, - .hw.init = &(struct clk_init_data){ + .hw.init = &(struct clk_init_data) { .name = "cts_mclk_i958_sel", .ops = &clk_mux_ops, .parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" }, @@ -894,12 +895,13 @@ static struct clk_divider gxbb_cts_mclk_i958_div = { .shift = 16, .width = 8, .lock = &clk_lock, - .hw.init = &(struct clk_init_data){ + .flags = CLK_DIVIDER_ROUND_CLOSEST, + .hw.init = &(struct clk_init_data) { .name = "cts_mclk_i958_div", .ops = &clk_divider_ops, .parent_names = (const char *[]){ "cts_mclk_i958_sel" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -979,6 +981,156 @@ static struct clk_mux gxbb_32k_clk_sel = { }, }; +static const char * const gxbb_sd_emmc_clk0_parent_names[] = { + "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", + + /* + * Following these parent clocks, we should also have had mpll2, mpll3 + * and gp0_pll but these clocks are too precious to be used here. All + * the necessary rates for MMC and NAND operation can be acheived using + * xtal or fclk_div clocks + */ +}; + +/* SDIO clock */ +static struct clk_mux gxbb_sd_emmc_a_clk0_sel = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .mask = 0x7, + .shift = 9, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_a_clk0_sel", + .ops = &clk_mux_ops, + .parent_names = gxbb_sd_emmc_clk0_parent_names, + .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_divider gxbb_sd_emmc_a_clk0_div = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .shift = 0, + .width = 7, + .lock = &clk_lock, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_a_clk0_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_gate gxbb_sd_emmc_a_clk0 = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .bit_idx = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sd_emmc_a_clk0", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" }, + .num_parents = 1, + + /* + * FIXME: + * We need CLK_IGNORE_UNUSED because mmc DT node point to xtal + * instead of this clock. CCF would gate this on boot, killing + * the mmc controller. Please remove this flag once DT properly + * point to this clock instead of xtal + * + * Same goes for emmc B and C clocks + */ + .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + }, +}; + +/* SDcard clock */ +static struct clk_mux gxbb_sd_emmc_b_clk0_sel = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .mask = 0x7, + .shift = 25, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_b_clk0_sel", + .ops = &clk_mux_ops, + .parent_names = gxbb_sd_emmc_clk0_parent_names, + .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_divider gxbb_sd_emmc_b_clk0_div = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .shift = 16, + .width = 7, + .lock = &clk_lock, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_b_clk0_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_gate gxbb_sd_emmc_b_clk0 = { + .reg = (void *)HHI_SD_EMMC_CLK_CNTL, + .bit_idx = 23, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sd_emmc_b_clk0", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + }, +}; + +/* EMMC/NAND clock */ +static struct clk_mux gxbb_sd_emmc_c_clk0_sel = { + .reg = (void *)HHI_NAND_CLK_CNTL, + .mask = 0x7, + .shift = 9, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_c_clk0_sel", + .ops = &clk_mux_ops, + .parent_names = gxbb_sd_emmc_clk0_parent_names, + .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_divider gxbb_sd_emmc_c_clk0_div = { + .reg = (void *)HHI_NAND_CLK_CNTL, + .shift = 0, + .width = 7, + .lock = &clk_lock, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + .hw.init = &(struct clk_init_data) { + .name = "sd_emmc_c_clk0_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_gate gxbb_sd_emmc_c_clk0 = { + .reg = (void *)HHI_NAND_CLK_CNTL, + .bit_idx = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sd_emmc_c_clk0", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1); @@ -1188,6 +1340,16 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_32K_CLK] = &gxbb_32k_clk.hw, [CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw, [CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw, + [CLKID_SD_EMMC_A_CLK0_SEL] = &gxbb_sd_emmc_a_clk0_sel.hw, + [CLKID_SD_EMMC_A_CLK0_DIV] = &gxbb_sd_emmc_a_clk0_div.hw, + [CLKID_SD_EMMC_A_CLK0] = &gxbb_sd_emmc_a_clk0.hw, + [CLKID_SD_EMMC_B_CLK0_SEL] = &gxbb_sd_emmc_b_clk0_sel.hw, + [CLKID_SD_EMMC_B_CLK0_DIV] = &gxbb_sd_emmc_b_clk0_div.hw, + [CLKID_SD_EMMC_B_CLK0] = &gxbb_sd_emmc_b_clk0.hw, + [CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw, + [CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw, + [CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw, + [NR_CLKS] = NULL, }, .num = NR_CLKS, }; @@ -1310,6 +1472,16 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_32K_CLK] = &gxbb_32k_clk.hw, [CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw, [CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw, + [CLKID_SD_EMMC_A_CLK0_SEL] = &gxbb_sd_emmc_a_clk0_sel.hw, + [CLKID_SD_EMMC_A_CLK0_DIV] = &gxbb_sd_emmc_a_clk0_div.hw, + [CLKID_SD_EMMC_A_CLK0] = &gxbb_sd_emmc_a_clk0.hw, + [CLKID_SD_EMMC_B_CLK0_SEL] = &gxbb_sd_emmc_b_clk0_sel.hw, + [CLKID_SD_EMMC_B_CLK0_DIV] = &gxbb_sd_emmc_b_clk0_div.hw, + [CLKID_SD_EMMC_B_CLK0] = &gxbb_sd_emmc_b_clk0.hw, + [CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw, + [CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw, + [CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw, + [NR_CLKS] = NULL, }, .num = NR_CLKS, }; @@ -1425,6 +1597,9 @@ static struct clk_gate *const gxbb_clk_gates[] = { &gxbb_cts_amclk, &gxbb_cts_mclk_i958, &gxbb_32k_clk, + &gxbb_sd_emmc_a_clk0, + &gxbb_sd_emmc_b_clk0, + &gxbb_sd_emmc_c_clk0, }; static struct clk_mux *const gxbb_clk_muxes[] = { @@ -1437,6 +1612,9 @@ static struct clk_mux *const gxbb_clk_muxes[] = { &gxbb_cts_mclk_i958_sel, &gxbb_cts_i958, &gxbb_32k_clk_sel, + &gxbb_sd_emmc_a_clk0_sel, + &gxbb_sd_emmc_b_clk0_sel, + &gxbb_sd_emmc_c_clk0_sel, }; static struct clk_divider *const gxbb_clk_dividers[] = { @@ -1446,6 +1624,9 @@ static struct clk_divider *const gxbb_clk_dividers[] = { &gxbb_mali_1_div, &gxbb_cts_mclk_i958_div, &gxbb_32k_clk_div, + &gxbb_sd_emmc_a_clk0_div, + &gxbb_sd_emmc_b_clk0_div, + &gxbb_sd_emmc_c_clk0_div, }; static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = { diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index d63e77e8433d..5b1d4b374d1c 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -167,130 +167,33 @@ * CLKID index values * * These indices are entirely contrived and do not map onto the hardware. - * Migrate them out of this header and into the DT header file when they need - * to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h + * It has now been decided to expose everything by default in the DT header: + * include/dt-bindings/clock/gxbb-clkc.h. Only the clocks ids we don't want + * to expose, such as the internal muxes and dividers of composite clocks, + * will remain defined here. */ -#define CLKID_SYS_PLL 0 /* ID 1 is unused (it was used by the non-existing CLKID_CPUCLK before) */ -/* CLKID_HDMI_PLL */ -#define CLKID_FIXED_PLL 3 -/* CLKID_FCLK_DIV2 */ -/* CLKID_FCLK_DIV3 */ -/* CLKID_FCLK_DIV4 */ -#define CLKID_FCLK_DIV5 7 -#define CLKID_FCLK_DIV7 8 -/* CLKID_GP0_PLL */ #define CLKID_MPEG_SEL 10 #define CLKID_MPEG_DIV 11 -/* CLKID_CLK81 */ -#define CLKID_MPLL0 13 -#define CLKID_MPLL1 14 -/* CLKID_MPLL2 */ -#define CLKID_DDR 16 -#define CLKID_DOS 17 -#define CLKID_ISA 18 -#define CLKID_PL301 19 -#define CLKID_PERIPHS 20 -/* CLKID_SPICC */ -/* CLKID_I2C */ -/* #define CLKID_SAR_ADC */ -#define CLKID_SMART_CARD 24 -/* CLKID_RNG0 */ -/* CLKID_UART0 */ -#define CLKID_SDHC 27 -#define CLKID_STREAM 28 -#define CLKID_ASYNC_FIFO 29 -#define CLKID_SDIO 30 -#define CLKID_ABUF 31 -#define CLKID_HIU_IFACE 32 -#define CLKID_ASSIST_MISC 33 -/* CLKID_SPI */ -#define CLKID_I2S_SPDIF 35 -/* CLKID_ETH */ -#define CLKID_DEMUX 37 -/* CLKID_AIU_GLUE */ -/* CLKID_IEC958 */ -/* CLKID_I2S_OUT */ -#define CLKID_AMCLK 41 -#define CLKID_AIFIFO2 42 -#define CLKID_MIXER 43 -/* CLKID_MIXER_IFACE */ -#define CLKID_ADC 45 -#define CLKID_BLKMV 46 -/* CLKID_AIU */ -/* CLKID_UART1 */ -#define CLKID_G2D 49 -/* CLKID_USB0 */ -/* CLKID_USB1 */ -#define CLKID_RESET 52 -#define CLKID_NAND 53 -#define CLKID_DOS_PARSER 54 -/* CLKID_USB */ -#define CLKID_VDIN1 56 -#define CLKID_AHB_ARB0 57 -#define CLKID_EFUSE 58 -#define CLKID_BOOT_ROM 59 -#define CLKID_AHB_DATA_BUS 60 -#define CLKID_AHB_CTRL_BUS 61 -#define CLKID_HDMI_INTR_SYNC 62 -/* CLKID_HDMI_PCLK */ -/* CLKID_USB1_DDR_BRIDGE */ -/* CLKID_USB0_DDR_BRIDGE */ -#define CLKID_MMC_PCLK 66 -#define CLKID_DVIN 67 -/* CLKID_UART2 */ -/* #define CLKID_SANA */ -#define CLKID_VPU_INTR 70 -#define CLKID_SEC_AHB_AHB3_BRIDGE 71 -#define CLKID_CLK81_A53 72 -#define CLKID_VCLK2_VENCI0 73 -#define CLKID_VCLK2_VENCI1 74 -#define CLKID_VCLK2_VENCP0 75 -#define CLKID_VCLK2_VENCP1 76 -/* CLKID_GCLK_VENCI_INT0 */ -#define CLKID_GCLK_VENCI_INT 78 -#define CLKID_DAC_CLK 79 -/* CLKID_AOCLK_GATE */ -/* CLKID_IEC958_GATE */ -#define CLKID_ENC480P 82 -#define CLKID_RNG1 83 -#define CLKID_GCLK_VENCI_INT1 84 -#define CLKID_VCLK2_VENCLMCC 85 -#define CLKID_VCLK2_VENCL 86 -#define CLKID_VCLK_OTHER 87 -#define CLKID_EDP 88 -#define CLKID_AO_MEDIA_CPU 89 -#define CLKID_AO_AHB_SRAM 90 -#define CLKID_AO_AHB_BUS 91 -#define CLKID_AO_IFACE 92 -/* CLKID_AO_I2C */ -/* CLKID_SD_EMMC_A */ -/* CLKID_SD_EMMC_B */ -/* CLKID_SD_EMMC_C */ -/* CLKID_SAR_ADC_CLK */ -/* CLKID_SAR_ADC_SEL */ #define CLKID_SAR_ADC_DIV 99 -/* CLKID_MALI_0_SEL */ -#define CLKID_MALI_0_DIV 101 -/* CLKID_MALI_0 */ -/* CLKID_MALI_1_SEL */ -#define CLKID_MALI_1_DIV 104 -/* CLKID_MALI_1 */ -/* CLKID_MALI */ -/* CLKID_CTS_AMCLK */ +#define CLKID_MALI_0_DIV 101 +#define CLKID_MALI_1_DIV 104 #define CLKID_CTS_AMCLK_SEL 108 #define CLKID_CTS_AMCLK_DIV 109 -/* CLKID_CTS_MCLK_I958 */ #define CLKID_CTS_MCLK_I958_SEL 111 #define CLKID_CTS_MCLK_I958_DIV 112 -/* CLKID_CTS_I958 */ -#define CLKID_32K_CLK 114 #define CLKID_32K_CLK_SEL 115 #define CLKID_32K_CLK_DIV 116 +#define CLKID_SD_EMMC_A_CLK0_SEL 117 +#define CLKID_SD_EMMC_A_CLK0_DIV 118 +#define CLKID_SD_EMMC_B_CLK0_SEL 120 +#define CLKID_SD_EMMC_B_CLK0_DIV 121 +#define CLKID_SD_EMMC_C_CLK0_SEL 123 +#define CLKID_SD_EMMC_C_CLK0_DIV 124 -#define NR_CLKS 117 +#define NR_CLKS 126 -/* include the CLKIDs that have been made part of the stable DT binding */ +/* include the CLKIDs that have been made part of the DT binding */ #include #endif /* __GXBB_H */ diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 6ec512ad2598..20ab7190d328 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include "clkc.h" @@ -32,6 +34,13 @@ static DEFINE_SPINLOCK(clk_lock); +static void __iomem *clk_base; + +struct meson8b_clk_reset { + struct reset_controller_dev reset; + void __iomem *base; +}; + static const struct pll_rate_table sys_pll_rate_table[] = { PLL_RATE(312000000, 52, 1, 2), PLL_RATE(336000000, 56, 1, 2), @@ -590,6 +599,7 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_MPLL0] = &meson8b_mpll0.hw, [CLKID_MPLL1] = &meson8b_mpll1.hw, [CLKID_MPLL2] = &meson8b_mpll2.hw, + [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, }; @@ -695,20 +705,114 @@ static struct clk_divider *const meson8b_clk_dividers[] = { &meson8b_mpeg_clk_div, }; +static const struct meson8b_clk_reset_line { + u32 reg; + u8 bit_idx; +} meson8b_clk_reset_bits[] = { + [CLKC_RESET_L2_CACHE_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30 + }, + [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29 + }, + [CLKC_RESET_SCU_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28 + }, + [CLKC_RESET_CPU3_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27 + }, + [CLKC_RESET_CPU2_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26 + }, + [CLKC_RESET_CPU1_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25 + }, + [CLKC_RESET_CPU0_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24 + }, + [CLKC_RESET_A5_GLOBAL_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18 + }, + [CLKC_RESET_A5_AXI_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17 + }, + [CLKC_RESET_A5_ABP_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16 + }, + [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = { + .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30 + }, + [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = { + .reg = HHI_VID_CLK_CNTL, .bit_idx = 15 + }, + [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = { + .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7 + }, + [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = { + .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3 + }, + [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = { + .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1 + }, + [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = { + .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0 + }, +}; + +static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct meson8b_clk_reset *meson8b_clk_reset = + container_of(rcdev, struct meson8b_clk_reset, reset); + unsigned long flags; + const struct meson8b_clk_reset_line *reset; + u32 val; + + if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) + return -EINVAL; + + reset = &meson8b_clk_reset_bits[id]; + + spin_lock_irqsave(&clk_lock, flags); + + val = readl(meson8b_clk_reset->base + reset->reg); + if (assert) + val |= BIT(reset->bit_idx); + else + val &= ~BIT(reset->bit_idx); + writel(val, meson8b_clk_reset->base + reset->reg); + + spin_unlock_irqrestore(&clk_lock, flags); + + return 0; +} + +static int meson8b_clk_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return meson8b_clk_reset_update(rcdev, id, true); +} + +static int meson8b_clk_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return meson8b_clk_reset_update(rcdev, id, false); +} + +static const struct reset_control_ops meson8b_clk_reset_ops = { + .assert = meson8b_clk_reset_assert, + .deassert = meson8b_clk_reset_deassert, +}; + static int meson8b_clkc_probe(struct platform_device *pdev) { - void __iomem *clk_base; int ret, clkid, i; struct clk_hw *parent_hw; struct clk *parent_clk; struct device *dev = &pdev->dev; - /* Generic clocks and PLLs */ - clk_base = of_iomap(dev->of_node, 1); - if (!clk_base) { - pr_err("%s: Unable to map clk base\n", __func__); + if (!clk_base) return -ENXIO; - } /* Populate base address for PLLs */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++) @@ -748,7 +852,7 @@ static int meson8b_clkc_probe(struct platform_device *pdev) /* FIXME convert to devm_clk_register */ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]); if (ret) - goto iounmap; + return ret; } /* @@ -771,15 +875,11 @@ static int meson8b_clkc_probe(struct platform_device *pdev) if (ret) { pr_err("%s: failed to register clock notifier for cpu_clk\n", __func__); - goto iounmap; + return ret; } return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, &meson8b_hw_onecell_data); - -iounmap: - iounmap(clk_base); - return ret; } static const struct of_device_id meson8b_clkc_match_table[] = { @@ -798,3 +898,39 @@ static struct platform_driver meson8b_driver = { }; builtin_platform_driver(meson8b_driver); + +static void __init meson8b_clkc_reset_init(struct device_node *np) +{ + struct meson8b_clk_reset *rstc; + int ret; + + /* Generic clocks, PLLs and some of the reset-bits */ + clk_base = of_iomap(np, 1); + if (!clk_base) { + pr_err("%s: Unable to map clk base\n", __func__); + return; + } + + rstc = kzalloc(sizeof(*rstc), GFP_KERNEL); + if (!rstc) + return; + + /* Reset Controller */ + rstc->base = clk_base; + rstc->reset.ops = &meson8b_clk_reset_ops; + rstc->reset.nr_resets = ARRAY_SIZE(meson8b_clk_reset_bits); + rstc->reset.of_node = np; + ret = reset_controller_register(&rstc->reset); + if (ret) { + pr_err("%s: Failed to register clkc reset controller: %d\n", + __func__, ret); + return; + } +} + +CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", + meson8b_clkc_reset_init); +CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", + meson8b_clkc_reset_init); +CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", + meson8b_clkc_reset_init); diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index a687e02547dc..2eaf8a52e7dd 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -37,6 +37,9 @@ #define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */ #define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */ #define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */ +#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */ +#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ +#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ #define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ #define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */ #define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */ @@ -60,110 +63,19 @@ * CLKID index values * * These indices are entirely contrived and do not map onto the hardware. - * Migrate them out of this header and into the DT header file when they need - * to be exposed to client nodes in DT: include/dt-bindings/clock/meson8b-clkc.h + * It has now been decided to expose everything by default in the DT header: + * include/dt-bindings/clock/gxbb-clkc.h. Only the clocks ids we don't want + * to expose, such as the internal muxes and dividers of composite clocks, + * will remain defined here. */ -/* CLKID_UNUSED */ -/* CLKID_XTAL */ -/* CLKID_PLL_FIXED */ -/* CLKID_PLL_VID */ -/* CLKID_PLL_SYS */ -/* CLKID_FCLK_DIV2 */ -/* CLKID_FCLK_DIV3 */ -/* CLKID_FCLK_DIV4 */ -/* CLKID_FCLK_DIV5 */ -/* CLKID_FCLK_DIV7 */ -/* CLKID_CLK81 */ -/* CLKID_MALI */ -/* CLKID_CPUCLK */ -/* CLKID_ZERO */ -/* CLKID_MPEG_SEL */ -/* CLKID_MPEG_DIV */ -#define CLKID_DDR 16 -#define CLKID_DOS 17 -#define CLKID_ISA 18 -#define CLKID_PL301 19 -#define CLKID_PERIPHS 20 -#define CLKID_SPICC 21 -#define CLKID_I2C 22 -/* #define CLKID_SAR_ADC */ -#define CLKID_SMART_CARD 24 -/* #define CLKID_RNG0 */ -#define CLKID_UART0 26 -#define CLKID_SDHC 27 -#define CLKID_STREAM 28 -#define CLKID_ASYNC_FIFO 29 -/* #define CLKID_SDIO */ -#define CLKID_ABUF 31 -#define CLKID_HIU_IFACE 32 -#define CLKID_ASSIST_MISC 33 -#define CLKID_SPI 34 -#define CLKID_I2S_SPDIF 35 -/* #define CLKID_ETH */ -#define CLKID_DEMUX 37 -#define CLKID_AIU_GLUE 38 -#define CLKID_IEC958 39 -#define CLKID_I2S_OUT 40 -#define CLKID_AMCLK 41 -#define CLKID_AIFIFO2 42 -#define CLKID_MIXER 43 -#define CLKID_MIXER_IFACE 44 -#define CLKID_ADC 45 -#define CLKID_BLKMV 46 -#define CLKID_AIU 47 -#define CLKID_UART1 48 -#define CLKID_G2D 49 -/* #define CLKID_USB0 */ -/* #define CLKID_USB1 */ -#define CLKID_RESET 52 -#define CLKID_NAND 53 -#define CLKID_DOS_PARSER 54 -/* #define CLKID_USB */ -#define CLKID_VDIN1 56 -#define CLKID_AHB_ARB0 57 -#define CLKID_EFUSE 58 -#define CLKID_BOOT_ROM 59 -#define CLKID_AHB_DATA_BUS 60 -#define CLKID_AHB_CTRL_BUS 61 -#define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 -/* CLKID_USB1_DDR_BRIDGE */ -/* CLKID_USB0_DDR_BRIDGE */ -#define CLKID_MMC_PCLK 66 -#define CLKID_DVIN 67 -#define CLKID_UART2 68 -/* #define CLKID_SANA */ -#define CLKID_VPU_INTR 70 -#define CLKID_SEC_AHB_AHB3_BRIDGE 71 -#define CLKID_CLK81_A9 72 -#define CLKID_VCLK2_VENCI0 73 -#define CLKID_VCLK2_VENCI1 74 -#define CLKID_VCLK2_VENCP0 75 -#define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT 77 -#define CLKID_GCLK_VENCP_INT 78 -#define CLKID_DAC_CLK 79 -#define CLKID_AOCLK_GATE 80 -#define CLKID_IEC958_GATE 81 -#define CLKID_ENC480P 82 -#define CLKID_RNG1 83 -#define CLKID_GCLK_VENCL_INT 84 -#define CLKID_VCLK2_VENCLMCC 85 -#define CLKID_VCLK2_VENCL 86 -#define CLKID_VCLK2_OTHER 87 -#define CLKID_EDP 88 -#define CLKID_AO_MEDIA_CPU 89 -#define CLKID_AO_AHB_SRAM 90 -#define CLKID_AO_AHB_BUS 91 -#define CLKID_AO_IFACE 92 -#define CLKID_MPLL0 93 -#define CLKID_MPLL1 94 -#define CLKID_MPLL2 95 - #define CLK_NR_CLKS 96 -/* include the CLKIDs that have been made part of the stable DT binding */ +/* + * include the CLKID and RESETID that have + * been made part of the stable DT binding + */ #include +#include #endif /* __MESON8B_H */ diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index 61893fe73251..089927e4cda2 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c @@ -9,7 +9,7 @@ void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit, int nr_clks) { - static struct clk **clk_table; + struct clk **clk_table; clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); if (!clk_table) diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c index 5b98ff9076f3..7b359afd620e 100644 --- a/drivers/clk/nxp/clk-lpc32xx.c +++ b/drivers/clk/nxp/clk-lpc32xx.c @@ -885,7 +885,7 @@ static const struct clk_ops clk_usb_i2c_ops = { .recalc_rate = clk_usb_i2c_recalc_rate, }; -static int clk_gate_enable(struct clk_hw *hw) +static int lpc32xx_clk_gate_enable(struct clk_hw *hw) { struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw); u32 mask = BIT(clk->bit_idx); @@ -894,7 +894,7 @@ static int clk_gate_enable(struct clk_hw *hw) return regmap_update_bits(clk_regmap, clk->reg, mask, val); } -static void clk_gate_disable(struct clk_hw *hw) +static void lpc32xx_clk_gate_disable(struct clk_hw *hw) { struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw); u32 mask = BIT(clk->bit_idx); @@ -903,7 +903,7 @@ static void clk_gate_disable(struct clk_hw *hw) regmap_update_bits(clk_regmap, clk->reg, mask, val); } -static int clk_gate_is_enabled(struct clk_hw *hw) +static int lpc32xx_clk_gate_is_enabled(struct clk_hw *hw) { struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw); u32 val; @@ -916,9 +916,9 @@ static int clk_gate_is_enabled(struct clk_hw *hw) } static const struct clk_ops lpc32xx_clk_gate_ops = { - .enable = clk_gate_enable, - .disable = clk_gate_disable, - .is_enabled = clk_gate_is_enabled, + .enable = lpc32xx_clk_gate_enable, + .disable = lpc32xx_clk_gate_disable, + .is_enabled = lpc32xx_clk_gate_is_enabled, }; #define div_mask(width) ((1 << (width)) - 1) diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index d990fe44aef3..cc03d5508627 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -412,8 +412,6 @@ static const struct clk_ops clk_smd_rpm_ops = { static const struct clk_ops clk_smd_rpm_branch_ops = { .prepare = clk_smd_rpm_prepare, .unprepare = clk_smd_rpm_unprepare, - .round_rate = clk_smd_rpm_round_rate, - .recalc_rate = clk_smd_rpm_recalc_rate, }; /* msm8916 */ diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 2cfe7000fc60..3410ee68d4bc 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1176,7 +1176,7 @@ static struct clk_rcg2 bimc_gpu_clk_src = { .parent_names = gcc_xo_gpll0_bimc, .num_parents = 3, .flags = CLK_GET_RATE_NOCACHE, - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 8abc200d4fd3..7ddec886fcd3 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -2730,6 +2730,32 @@ static struct clk_fixed_factor ufs_rx_cfg_clk_src = { }, }; +static struct clk_branch gcc_hlos1_vote_lpass_core_smmu_clk = { + .halt_reg = 0x7d010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7d010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "hlos1_vote_lpass_core_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_hlos1_vote_lpass_adsp_smmu_clk = { + .halt_reg = 0x7d014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7d014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "hlos1_vote_lpass_adsp_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_rx_cfg_clk = { .halt_reg = 0x75014, .clkr = { @@ -3307,6 +3333,8 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr, [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr, [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr, + [GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK] = &gcc_hlos1_vote_lpass_core_smmu_clk.clkr, + [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &gcc_hlos1_vote_lpass_adsp_smmu_clk.clkr, [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr, [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr, [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr, diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 78d1df9112ba..acbb38151ba1 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -15,6 +15,7 @@ config CLK_RENESAS select CLK_R8A7794 if ARCH_R8A7794 select CLK_R8A7795 if ARCH_R8A7795 select CLK_R8A7796 if ARCH_R8A7796 + select CLK_R8A77995 if ARCH_R8A77995 select CLK_SH73A0 if ARCH_SH73A0 if CLK_RENESAS @@ -34,94 +35,103 @@ config CLK_EMEV2 bool "Emma Mobile EV2 clock support" if COMPILE_TEST config CLK_RZA1 - bool + bool "RZ/A1H clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP config CLK_R8A73A4 - bool + bool "R-Mobile APE6 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP select CLK_RENESAS_DIV6 config CLK_R8A7740 - bool + bool "R-Mobile A1 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP select CLK_RENESAS_DIV6 config CLK_R8A7743 - bool + bool "RZ/G1M clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG config CLK_R8A7745 - bool + bool "RZ/G1E clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG config CLK_R8A7778 - bool + bool "R-Car M1A clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP config CLK_R8A7779 - bool + bool "R-Car H1 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP config CLK_R8A7790 - bool + bool "R-Car H2 clock support" if COMPILE_TEST select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY select CLK_RCAR_GEN2_CPG select CLK_RENESAS_DIV6 config CLK_R8A7791 - bool + bool "R-Car M2-W/N clock support" if COMPILE_TEST select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY select CLK_RCAR_GEN2_CPG select CLK_RENESAS_DIV6 config CLK_R8A7792 - bool + bool "R-Car V2H clock support" if COMPILE_TEST select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY select CLK_RCAR_GEN2_CPG config CLK_R8A7794 - bool + bool "R-Car E2 clock support" if COMPILE_TEST select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY select CLK_RCAR_GEN2_CPG select CLK_RENESAS_DIV6 config CLK_R8A7795 - bool + bool "R-Car H3 clock support" if COMPILE_TEST select CLK_RCAR_GEN3_CPG config CLK_R8A7796 - bool + bool "R-Car M3-W clock support" if COMPILE_TEST + select CLK_RCAR_GEN3_CPG + +config CLK_R8A77995 + bool "R-Car D3 clock support" if COMPILE_TEST select CLK_RCAR_GEN3_CPG config CLK_SH73A0 - bool + bool "SH-Mobile AG5 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP select CLK_RENESAS_DIV6 # Family config CLK_RCAR_GEN2 - bool + bool "R-Car Gen2 legacy clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP select CLK_RENESAS_DIV6 config CLK_RCAR_GEN2_CPG - bool + bool "R-Car Gen2 CPG clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSSR config CLK_RCAR_GEN3_CPG - bool + bool "R-Car Gen3 CPG clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSSR +config CLK_RCAR_USB2_CLOCK_SEL + bool "Renesas R-Car USB2 clock selector support" + depends on ARCH_RENESAS || COMPILE_TEST + help + This is a driver for R-Car USB2 clock selector # Generic config CLK_RENESAS_CPG_MSSR - bool + bool "CPG/MSSR clock support" if COMPILE_TEST select CLK_RENESAS_DIV6 config CLK_RENESAS_CPG_MSTP - bool + bool "MSTP clock support" if COMPILE_TEST config CLK_RENESAS_DIV6 bool "DIV6 clock support" if COMPILE_TEST diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index 02d04124371f..9bda3ec5b199 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -13,12 +13,14 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o +obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o # Family obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o +obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o # Generic obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c index 0627860233cb..3e0040c0ac87 100644 --- a/drivers/clk/renesas/clk-div6.c +++ b/drivers/clk/renesas/clk-div6.c @@ -29,6 +29,9 @@ * @hw: handle between common and hardware-specific interfaces * @reg: IO-remapped register * @div: divisor value (1-64) + * @src_shift: Shift to access the register bits to select the parent clock + * @src_width: Number of register bits to select the parent clock (may be 0) + * @parents: Array to map from valid parent clocks indices to hardware indices */ struct div6_clock { struct clk_hw hw; diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index f1617dd044cb..500a9e4e03c4 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -335,7 +335,7 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np) u32 ncells; if (of_property_read_u32(np, "#power-domain-cells", &ncells)) { - pr_warn("%s lacks #power-domain-cells\n", np->full_name); + pr_warn("%pOF lacks #power-domain-cells\n", np); return; } diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c index 51a2479ed5d7..0b2e56d0d94b 100644 --- a/drivers/clk/renesas/clk-rcar-gen2.c +++ b/drivers/clk/renesas/clk-rcar-gen2.c @@ -407,8 +407,7 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np) if (rcar_rst_read_mode_pins(&cpg_mode)) { /* Backward-compatibility with old DT */ - pr_warn("%s: failed to obtain mode pins from RST\n", - np->full_name); + pr_warn("%pOF: failed to obtain mode pins from RST\n", np); cpg_mode = rcar_gen2_read_mode_pins(); } diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c index a832b9b6f7b0..7f85bbf20bf7 100644 --- a/drivers/clk/renesas/r8a7792-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c @@ -118,6 +118,13 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = { DEF_MOD("vin1", 810, R8A7792_CLK_ZG), DEF_MOD("vin0", 811, R8A7792_CLK_ZG), DEF_MOD("etheravb", 812, R8A7792_CLK_HP), + DEF_MOD("imr-lx3", 821, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-1", 822, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-0", 823, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-5", 825, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-4", 826, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-3", 827, R8A7792_CLK_ZG), + DEF_MOD("imr-lsx3-2", 828, R8A7792_CLK_ZG), DEF_MOD("gyro-adc", 901, R8A7792_CLK_P), DEF_MOD("gpio7", 904, R8A7792_CLK_CP), DEF_MOD("gpio6", 905, R8A7792_CLK_CP), diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index c091a8e024b8..762b2f8824f1 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -305,23 +305,23 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = { (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { - /* EXTAL div PLL1 mult PLL3 mult */ - { 1, 192, 192, }, - { 1, 192, 128, }, - { 0, /* Prohibited setting */ }, - { 1, 192, 192, }, - { 1, 160, 160, }, - { 1, 160, 106, }, - { 0, /* Prohibited setting */ }, - { 1, 160, 160, }, - { 1, 128, 128, }, - { 1, 128, 84, }, - { 0, /* Prohibited setting */ }, - { 1, 128, 128, }, - { 2, 192, 192, }, - { 2, 192, 128, }, - { 0, /* Prohibited setting */ }, - { 2, 192, 192, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div */ + { 1, 192, 1, 192, 1, }, + { 1, 192, 1, 128, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, }, + { 1, 160, 1, 160, 1, }, + { 1, 160, 1, 106, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, }, + { 1, 128, 1, 128, 1, }, + { 1, 128, 1, 84, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, }, + { 2, 192, 1, 192, 1, }, + { 2, 192, 1, 128, 1, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, }, }; static const struct soc_device_attribute r8a7795es1[] __initconst = { diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index acc6d0f153e1..e5e7fb212288 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -138,6 +138,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("sdif0", 314, R8A7796_CLK_SD0), DEF_MOD("pcie1", 318, R8A7796_CLK_S3D1), DEF_MOD("pcie0", 319, R8A7796_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A7796_CLK_S3D1), DEF_MOD("usb-dmac0", 330, R8A7796_CLK_S3D1), DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1), DEF_MOD("rwdt", 402, R8A7796_CLK_R), @@ -277,23 +278,23 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = { (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { - /* EXTAL div PLL1 mult PLL3 mult */ - { 1, 192, 192, }, - { 1, 192, 128, }, - { 0, /* Prohibited setting */ }, - { 1, 192, 192, }, - { 1, 160, 160, }, - { 1, 160, 106, }, - { 0, /* Prohibited setting */ }, - { 1, 160, 160, }, - { 1, 128, 128, }, - { 1, 128, 84, }, - { 0, /* Prohibited setting */ }, - { 1, 128, 128, }, - { 2, 192, 192, }, - { 2, 192, 128, }, - { 0, /* Prohibited setting */ }, - { 2, 192, 192, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div */ + { 1, 192, 1, 192, 1, }, + { 1, 192, 1, 128, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, }, + { 1, 160, 1, 160, 1, }, + { 1, 160, 1, 106, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, }, + { 1, 128, 1, 128, 1, }, + { 1, 128, 1, 84, 1, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, }, + { 2, 192, 1, 192, 1, }, + { 2, 192, 1, 128, 1, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, }, }; static int __init r8a7796_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c new file mode 100644 index 000000000000..e594cf8ee63b --- /dev/null +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -0,0 +1,236 @@ +/* + * r8a77995 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2017 Glider bvba + * + * Based on r8a7795-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include + +#include + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A77995_CLK_CP, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL3, + CLK_PLL0D2, + CLK_PLL0D3, + CLK_PLL0D5, + CLK_PLL1D2, + CLK_PE, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_SSPSRC, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + + DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 4, 250), + DEF_FIXED(".pll0d2", CLK_PLL0D2, CLK_PLL0, 2, 1), + DEF_FIXED(".pll0d3", CLK_PLL0D3, CLK_PLL0, 3, 1), + DEF_FIXED(".pll0d5", CLK_PLL0D5, CLK_PLL0, 5, 1), + DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1), + DEF_FIXED(".pe", CLK_PE, CLK_PLL0D3, 4, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + + /* Core Clock Outputs */ + DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1), + DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1), + DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1), + DEF_FIXED("zx", R8A77995_CLK_ZX, CLK_PLL1, 3, 1), + DEF_FIXED("s0d1", R8A77995_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s1d1", R8A77995_CLK_S1D1, CLK_S1, 1, 1), + DEF_FIXED("s1d2", R8A77995_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A77995_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A77995_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A77995_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A77995_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A77995_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A77995_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A77995_CLK_S3D4, CLK_S3, 4, 1), + + DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("osc", R8A77995_CLK_OSC, CLK_EXTAL, 384, 1), + DEF_FIXED("r", R8A77995_CLK_R, CLK_EXTAL, 1536, 1), + + DEF_GEN3_PE("s1d4c", R8A77995_CLK_S1D4C, CLK_S1, 4, CLK_PE, 2), + DEF_GEN3_PE("s3d1c", R8A77995_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1), + DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), + DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), + + DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, CLK_SDSRC, 0x268), + + DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244), + DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014), +}; + +static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { + DEF_MOD("scif5", 202, R8A77995_CLK_S3D4C), + DEF_MOD("scif4", 203, R8A77995_CLK_S3D4C), + DEF_MOD("scif3", 204, R8A77995_CLK_S3D4C), + DEF_MOD("scif1", 206, R8A77995_CLK_S3D4C), + DEF_MOD("scif0", 207, R8A77995_CLK_S3D4C), + DEF_MOD("msiof3", 208, R8A77995_CLK_MSO), + DEF_MOD("msiof2", 209, R8A77995_CLK_MSO), + DEF_MOD("msiof1", 210, R8A77995_CLK_MSO), + DEF_MOD("msiof0", 211, R8A77995_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A77995_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A77995_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A77995_CLK_S3D1), + DEF_MOD("cmt3", 300, R8A77995_CLK_R), + DEF_MOD("cmt2", 301, R8A77995_CLK_R), + DEF_MOD("cmt1", 302, R8A77995_CLK_R), + DEF_MOD("cmt0", 303, R8A77995_CLK_R), + DEF_MOD("scif2", 310, R8A77995_CLK_S3D4C), + DEF_MOD("emmc0", 312, R8A77995_CLK_SD0), + DEF_MOD("usb-dmac0", 330, R8A77995_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A77995_CLK_S3D1), + DEF_MOD("rwdt", 402, R8A77995_CLK_R), + DEF_MOD("intc-ex", 407, R8A77995_CLK_CP), + DEF_MOD("intc-ap", 408, R8A77995_CLK_S3D1), + DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1), + DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C), + DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C), + DEF_MOD("thermal", 522, R8A77995_CLK_CP), + DEF_MOD("pwm", 523, R8A77995_CLK_S3D4C), + DEF_MOD("fcpvd1", 602, R8A77995_CLK_S1D2), + DEF_MOD("fcpvd0", 603, R8A77995_CLK_S1D2), + DEF_MOD("fcpvbs", 607, R8A77995_CLK_S0D1), + DEF_MOD("vspd1", 622, R8A77995_CLK_S1D2), + DEF_MOD("vspd0", 623, R8A77995_CLK_S1D2), + DEF_MOD("vspbs", 627, R8A77995_CLK_S0D1), + DEF_MOD("ehci0", 703, R8A77995_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A77995_CLK_S3D2), + DEF_MOD("du1", 723, R8A77995_CLK_S2D1), + DEF_MOD("du0", 724, R8A77995_CLK_S2D1), + DEF_MOD("lvds", 727, R8A77995_CLK_S2D1), + DEF_MOD("vin7", 804, R8A77995_CLK_S1D2), + DEF_MOD("vin6", 805, R8A77995_CLK_S1D2), + DEF_MOD("vin5", 806, R8A77995_CLK_S1D2), + DEF_MOD("vin4", 807, R8A77995_CLK_S1D2), + DEF_MOD("etheravb", 812, R8A77995_CLK_S3D2), + DEF_MOD("imr0", 823, R8A77995_CLK_S1D2), + DEF_MOD("gpio6", 906, R8A77995_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A77995_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A77995_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A77995_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A77995_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A77995_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A77995_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A77995_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A77995_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A77995_CLK_S3D4), + DEF_MOD("i2c3", 928, R8A77995_CLK_S3D2), + DEF_MOD("i2c2", 929, R8A77995_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A77995_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A77995_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A77995_CLK_S3D4), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A77995_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), +}; + +static const unsigned int r8a77995_crit_mod_clks[] __initconst = { + MOD_CLK_ID(408), /* INTC-AP (GIC) */ +}; + + +/* + * CPG Clock Data + */ + +/* + * MD19 EXTAL (MHz) PLL0 PLL1 PLL3 + *-------------------------------------------------------------------- + * 0 48 x 1 x250/4 x100/3 x100/3 + * 1 48 x 1 x250/4 x100/3 x116/6 + */ +#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = { + /* EXTAL div PLL1 mult/div PLL3 mult/div */ + { 1, 100, 3, 100, 3, }, + { 1, 100, 3, 116, 6, }, +}; + +static int __init r8a77995_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + + return rcar_gen3_cpg_init(cpg_pll_config, 0, cpg_mode); +} + +const struct cpg_mssr_info r8a77995_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a77995_core_clks, + .num_core_clks = ARRAY_SIZE(r8a77995_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a77995_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a77995_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a77995_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a77995_crit_mod_clks), + + /* Callbacks */ + .init = r8a77995_cpg_mssr_init, + .cpg_clk_register = rcar_gen3_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 3dee900522b7..951105816547 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -60,6 +60,7 @@ struct sd_clock { unsigned int div_num; unsigned int div_min; unsigned int div_max; + unsigned int cur_div_idx; }; /* SDn divider @@ -96,21 +97,10 @@ static const struct sd_div_table cpg_sd_div_table[] = { static int cpg_sd_clock_enable(struct clk_hw *hw) { struct sd_clock *clock = to_sd_clock(hw); - u32 val, sd_fc; - unsigned int i; - - val = readl(clock->reg); - - sd_fc = val & CPG_SD_FC_MASK; - for (i = 0; i < clock->div_num; i++) - if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) - break; - - if (i >= clock->div_num) - return -EINVAL; + u32 val = readl(clock->reg); val &= ~(CPG_SD_STP_MASK); - val |= clock->div_table[i].val & CPG_SD_STP_MASK; + val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK; writel(val, clock->reg); @@ -135,21 +125,9 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct sd_clock *clock = to_sd_clock(hw); - unsigned long rate = parent_rate; - u32 val, sd_fc; - unsigned int i; - val = readl(clock->reg); - - sd_fc = val & CPG_SD_FC_MASK; - for (i = 0; i < clock->div_num; i++) - if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) - break; - - if (i >= clock->div_num) - return -EINVAL; - - return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div); + return DIV_ROUND_CLOSEST(parent_rate, + clock->div_table[clock->cur_div_idx].div); } static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock, @@ -190,6 +168,8 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, if (i >= clock->div_num) return -EINVAL; + clock->cur_div_idx = i; + val = readl(clock->reg); val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); @@ -215,6 +195,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, struct sd_clock *clock; struct clk *clk; unsigned int i; + u32 sd_fc; clock = kzalloc(sizeof(*clock), GFP_KERNEL); if (!clock) @@ -231,6 +212,18 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, clock->div_table = cpg_sd_div_table; clock->div_num = ARRAY_SIZE(cpg_sd_div_table); + sd_fc = readl(clock->reg) & CPG_SD_FC_MASK; + for (i = 0; i < clock->div_num; i++) + if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) + break; + + if (WARN_ON(i >= clock->div_num)) { + kfree(clock); + return ERR_PTR(-EINVAL); + } + + clock->cur_div_idx = i; + clock->div_max = clock->div_table[0].div; clock->div_min = clock->div_max; for (i = 1; i < clock->div_num; i++) { @@ -279,7 +272,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, unsigned int div = 1; u32 value; - parent = clks[core->parent]; + parent = clks[core->parent & 0xffff]; /* CLK_TYPE_PE uses high bits */ if (IS_ERR(parent)) return ERR_CAST(parent); @@ -303,6 +296,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, case CLK_TYPE_GEN3_PLL1: mult = cpg_pll_config->pll1_mult; + div = cpg_pll_config->pll1_div; break; case CLK_TYPE_GEN3_PLL2: @@ -320,6 +314,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, case CLK_TYPE_GEN3_PLL3: mult = cpg_pll_config->pll3_mult; + div = cpg_pll_config->pll3_div; break; case CLK_TYPE_GEN3_PLL4: @@ -360,6 +355,24 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, parent = clks[cpg_clk_extalr]; break; + case CLK_TYPE_GEN3_PE: + /* + * Peripheral clock with a fixed divider, selectable between + * clean and spread spectrum parents using MD12 + */ + if (cpg_mode & BIT(12)) { + /* Clean */ + div = core->div & 0xffff; + } else { + /* SCCG */ + parent = clks[core->parent >> 16]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + div = core->div >> 16; + } + mult = 1; + break; + default: return ERR_PTR(-EINVAL); } diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index 073be54b5d03..d756ef8b78eb 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -20,15 +20,24 @@ enum rcar_gen3_clk_types { CLK_TYPE_GEN3_PLL4, CLK_TYPE_GEN3_SD, CLK_TYPE_GEN3_R, + CLK_TYPE_GEN3_PE, }; #define DEF_GEN3_SD(_name, _id, _parent, _offset) \ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset) +#define DEF_GEN3_PE(_name, _id, _parent_sscg, _div_sscg, _parent_clean, \ + _div_clean) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_PE, \ + (_parent_sscg) << 16 | (_parent_clean), \ + .div = (_div_sscg) << 16 | (_div_clean)) + struct rcar_gen3_cpg_pll_config { - unsigned int extal_div; - unsigned int pll1_mult; - unsigned int pll3_mult; + u8 extal_div; + u8 pll1_mult; + u8 pll1_div; + u8 pll3_mult; + u8 pll3_div; }; #define CPG_RCKCR 0x240 diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c new file mode 100644 index 000000000000..6cd030a58964 --- /dev/null +++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c @@ -0,0 +1,188 @@ +/* + * Renesas R-Car USB2.0 clock selector + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * Based on renesas-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define USB20_CLKSET0 0x00 +#define CLKSET0_INTCLK_EN BIT(11) +#define CLKSET0_PRIVATE BIT(0) +#define CLKSET0_EXTAL_ONLY (CLKSET0_INTCLK_EN | CLKSET0_PRIVATE) + +struct usb2_clock_sel_priv { + void __iomem *base; + struct clk_hw hw; + bool extal; + bool xtal; +}; +#define to_priv(_hw) container_of(_hw, struct usb2_clock_sel_priv, hw) + +static void usb2_clock_sel_enable_extal_only(struct usb2_clock_sel_priv *priv) +{ + u16 val = readw(priv->base + USB20_CLKSET0); + + pr_debug("%s: enter %d %d %x\n", __func__, + priv->extal, priv->xtal, val); + + if (priv->extal && !priv->xtal && val != CLKSET0_EXTAL_ONLY) + writew(CLKSET0_EXTAL_ONLY, priv->base + USB20_CLKSET0); +} + +static void usb2_clock_sel_disable_extal_only(struct usb2_clock_sel_priv *priv) +{ + if (priv->extal && !priv->xtal) + writew(CLKSET0_PRIVATE, priv->base + USB20_CLKSET0); +} + +static int usb2_clock_sel_enable(struct clk_hw *hw) +{ + usb2_clock_sel_enable_extal_only(to_priv(hw)); + + return 0; +} + +static void usb2_clock_sel_disable(struct clk_hw *hw) +{ + usb2_clock_sel_disable_extal_only(to_priv(hw)); +} + +/* + * This module seems a mux, but this driver assumes a gate because + * ehci/ohci platform drivers don't support clk_set_parent() for now. + * If this driver acts as a gate, ehci/ohci-platform drivers don't need + * any modification. + */ +static const struct clk_ops usb2_clock_sel_clock_ops = { + .enable = usb2_clock_sel_enable, + .disable = usb2_clock_sel_disable, +}; + +static const struct of_device_id rcar_usb2_clock_sel_match[] = { + { .compatible = "renesas,rcar-gen3-usb2-clock-sel" }, + { } +}; + +static int rcar_usb2_clock_sel_suspend(struct device *dev) +{ + struct usb2_clock_sel_priv *priv = dev_get_drvdata(dev); + + usb2_clock_sel_disable_extal_only(priv); + pm_runtime_put(dev); + + return 0; +} + +static int rcar_usb2_clock_sel_resume(struct device *dev) +{ + struct usb2_clock_sel_priv *priv = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + usb2_clock_sel_enable_extal_only(priv); + + return 0; +} + +static int rcar_usb2_clock_sel_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct usb2_clock_sel_priv *priv = platform_get_drvdata(pdev); + + of_clk_del_provider(dev->of_node); + clk_hw_unregister(&priv->hw); + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static int rcar_usb2_clock_sel_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct usb2_clock_sel_priv *priv; + struct resource *res; + struct clk *clk; + struct clk_init_data init; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + clk = devm_clk_get(dev, "usb_extal"); + if (!IS_ERR(clk) && !clk_prepare_enable(clk)) { + priv->extal = !!clk_get_rate(clk); + clk_disable_unprepare(clk); + } + clk = devm_clk_get(dev, "usb_xtal"); + if (!IS_ERR(clk) && !clk_prepare_enable(clk)) { + priv->xtal = !!clk_get_rate(clk); + clk_disable_unprepare(clk); + } + + if (!priv->extal && !priv->xtal) { + dev_err(dev, "This driver needs usb_extal or usb_xtal\n"); + return -ENOENT; + } + + platform_set_drvdata(pdev, priv); + dev_set_drvdata(dev, priv); + + init.name = "rcar_usb2_clock_sel"; + init.ops = &usb2_clock_sel_clock_ops; + init.flags = 0; + init.parent_names = NULL; + init.num_parents = 0; + priv->hw.init = &init; + + clk = clk_register(NULL, &priv->hw); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + return of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw); +} + +static const struct dev_pm_ops rcar_usb2_clock_sel_pm_ops = { + .suspend = rcar_usb2_clock_sel_suspend, + .resume = rcar_usb2_clock_sel_resume, +}; + +static struct platform_driver rcar_usb2_clock_sel_driver = { + .driver = { + .name = "rcar-usb2-clock-sel", + .of_match_table = rcar_usb2_clock_sel_match, + .pm = &rcar_usb2_clock_sel_pm_ops, + }, + .probe = rcar_usb2_clock_sel_probe, + .remove = rcar_usb2_clock_sel_remove, +}; +builtin_platform_driver(rcar_usb2_clock_sel_driver); + +MODULE_DESCRIPTION("Renesas R-Car USB2 clock selector Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 1f607c806f9b..e580a5e6346c 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -679,6 +679,12 @@ static const struct of_device_id cpg_mssr_match[] = { .compatible = "renesas,r8a7796-cpg-mssr", .data = &r8a7796_cpg_mssr_info, }, +#endif +#ifdef CONFIG_CLK_R8A77995 + { + .compatible = "renesas,r8a77995-cpg-mssr", + .data = &r8a77995_cpg_mssr_info, + }, #endif { /* sentinel */ } }; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 43d7c7f6832d..94b9071d1061 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -138,6 +138,7 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info; extern const struct cpg_mssr_info r8a7794_cpg_mssr_info; extern const struct cpg_mssr_info r8a7795_cpg_mssr_info; extern const struct cpg_mssr_info r8a7796_cpg_mssr_info; +extern const struct cpg_mssr_info r8a77995_cpg_mssr_info; /* diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c index e243f2eae68f..5970a50671b9 100644 --- a/drivers/clk/rockchip/clk-rk3128.c +++ b/drivers/clk/rockchip/clk-rk3128.c @@ -201,7 +201,7 @@ static struct rockchip_clk_branch rk3128_uart2_fracmux __initdata = MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(15), 8, 2, MFLAGS); -static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { +static struct rockchip_clk_branch common_clk_branches[] __initdata = { /* * Clock-Architecture Diagram 1 */ @@ -315,13 +315,13 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { RK2928_CLKGATE_CON(10), 8, GFLAGS), GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, - RK2928_CLKGATE_CON(10), 8, GFLAGS), + RK2928_CLKGATE_CON(10), 0, GFLAGS), GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, - RK2928_CLKGATE_CON(10), 8, GFLAGS), + RK2928_CLKGATE_CON(10), 1, GFLAGS), GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, - RK2928_CLKGATE_CON(10), 8, GFLAGS), + RK2928_CLKGATE_CON(10), 2, GFLAGS), GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, - RK2928_CLKGATE_CON(10), 8, GFLAGS), + RK2928_CLKGATE_CON(2), 15, GFLAGS), COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, @@ -459,10 +459,6 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { RK2928_CLKSEL_CON(2), 14, 2, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(10), 15, GFLAGS), - COMPOSITE(SCLK_SFC, "sclk_sfc", mux_sclk_sfc_src_p, 0, - RK2928_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS, - RK2928_CLKGATE_CON(3), 15, GFLAGS), - COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "cpll", 0, RK2928_CLKSEL_CON(29), 8, 6, DFLAGS, RK2928_CLKGATE_CON(1), 0, GFLAGS), @@ -495,7 +491,6 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { GATE(ACLK_DMAC, "aclk_dmac", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS), GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 15, GFLAGS), GATE(0, "aclk_cpu_to_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS), - GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS), GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS), @@ -541,13 +536,12 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { GATE(0, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 6, GFLAGS), GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS), - GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS), GATE(PCLK_ACODEC, "pclk_acodec", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS), GATE(0, "pclk_ddrupctl", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 7, GFLAGS), GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), - GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS), + GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS), GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), /* PD_MMC */ @@ -561,6 +555,21 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0), }; +static struct rockchip_clk_branch rk3126_clk_branches[] __initdata = { + GATE(0, "pclk_stimer", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 15, GFLAGS), + GATE(0, "pclk_s_efuse", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS), + GATE(0, "pclk_sgrf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 8, GFLAGS), +}; + +static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = { + COMPOSITE(SCLK_SFC, "sclk_sfc", mux_sclk_sfc_src_p, 0, + RK2928_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK2928_CLKGATE_CON(3), 15, GFLAGS), + + GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS), + GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS), +}; + static const char *const rk3128_critical_clocks[] __initconst = { "aclk_cpu", "hclk_cpu", @@ -568,9 +577,11 @@ static const char *const rk3128_critical_clocks[] __initconst = { "aclk_peri", "hclk_peri", "pclk_peri", + "pclk_pmu", + "sclk_timer5", }; -static void __init rk3128_clk_init(struct device_node *np) +static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; @@ -578,23 +589,21 @@ static void __init rk3128_clk_init(struct device_node *np) reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: could not map cru region\n", __func__); - return; + return ERR_PTR(-ENOMEM); } ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); - return; + return ERR_PTR(-ENOMEM); } rockchip_clk_register_plls(ctx, rk3128_pll_clks, ARRAY_SIZE(rk3128_pll_clks), RK3128_GRF_SOC_STATUS0); - rockchip_clk_register_branches(ctx, rk3128_clk_branches, - ARRAY_SIZE(rk3128_clk_branches)); - rockchip_clk_protect_critical(rk3128_critical_clocks, - ARRAY_SIZE(rk3128_critical_clocks)); + rockchip_clk_register_branches(ctx, common_clk_branches, + ARRAY_SIZE(common_clk_branches)); rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", mux_armclk_p, ARRAY_SIZE(mux_armclk_p), @@ -606,6 +615,40 @@ static void __init rk3128_clk_init(struct device_node *np) rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL); + return ctx; +} + +static void __init rk3126_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + + ctx = rk3128_common_clk_init(np); + if (IS_ERR(ctx)) + return; + + rockchip_clk_register_branches(ctx, rk3126_clk_branches, + ARRAY_SIZE(rk3126_clk_branches)); + rockchip_clk_protect_critical(rk3128_critical_clocks, + ARRAY_SIZE(rk3128_critical_clocks)); + + rockchip_clk_of_add_provider(np, ctx); +} + +CLK_OF_DECLARE(rk3126_cru, "rockchip,rk3126-cru", rk3126_clk_init); + +static void __init rk3128_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + + ctx = rk3128_common_clk_init(np); + if (IS_ERR(ctx)) + return; + + rockchip_clk_register_branches(ctx, rk3128_clk_branches, + ARRAY_SIZE(rk3128_clk_branches)); + rockchip_clk_protect_critical(rk3128_critical_clocks, + ARRAY_SIZE(rk3128_critical_clocks)); + rockchip_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index bb405d9044a3..11e7f2d1c054 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -391,7 +391,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS, RK2928_CLKGATE_CON(2), 11, GFLAGS), - COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, + COMPOSITE_NODIV(SCLK_SDIO_SRC, "sclk_sdio_src", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(11), 10, 2, MFLAGS, RK2928_CLKGATE_CON(2), 13, GFLAGS), DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c index 7c05ab366348..089cb17925e5 100644 --- a/drivers/clk/rockchip/clk-rv1108.c +++ b/drivers/clk/rockchip/clk-rv1108.c @@ -93,9 +93,24 @@ static struct rockchip_pll_rate_table rv1108_pll_rates[] = { } static struct rockchip_cpuclk_rate_table rv1108_cpuclk_rates[] __initdata = { - RV1108_CPUCLK_RATE(816000000, 4), - RV1108_CPUCLK_RATE(600000000, 4), - RV1108_CPUCLK_RATE(312000000, 4), + RV1108_CPUCLK_RATE(1608000000, 7), + RV1108_CPUCLK_RATE(1512000000, 7), + RV1108_CPUCLK_RATE(1488000000, 5), + RV1108_CPUCLK_RATE(1416000000, 5), + RV1108_CPUCLK_RATE(1392000000, 5), + RV1108_CPUCLK_RATE(1296000000, 5), + RV1108_CPUCLK_RATE(1200000000, 5), + RV1108_CPUCLK_RATE(1104000000, 5), + RV1108_CPUCLK_RATE(1008000000, 5), + RV1108_CPUCLK_RATE(912000000, 5), + RV1108_CPUCLK_RATE(816000000, 3), + RV1108_CPUCLK_RATE(696000000, 3), + RV1108_CPUCLK_RATE(600000000, 3), + RV1108_CPUCLK_RATE(500000000, 3), + RV1108_CPUCLK_RATE(408000000, 1), + RV1108_CPUCLK_RATE(312000000, 1), + RV1108_CPUCLK_RATE(216000000, 1), + RV1108_CPUCLK_RATE(96000000, 1), }; static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = { @@ -105,7 +120,7 @@ static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = { .mux_core_alt = 1, .mux_core_main = 0, .mux_core_shift = 8, - .mux_core_mask = 0x1, + .mux_core_mask = 0x3, }; PNAME(mux_pll_p) = { "xin24m", "xin24m"}; @@ -114,30 +129,42 @@ PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" }; PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" }; PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" }; PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" }; -PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" }; +PNAME(mux_pll_src_4plls_p) = { "dpll", "gpll", "hdmiphy", "usb480m" }; PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" }; PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" }; PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" }; -PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" }; +PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_gpll", "aclk_peri_src_dpll" }; PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" }; PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" }; PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" }; PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; -PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" }; +PNAME(mux_sclk_mac_p) = { "sclk_mac_pre", "ext_gmac" }; PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" }; PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" }; -PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" }; -PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; +PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "dummy", "xin12m" }; +PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "dummy", "xin12m" }; +PNAME(mux_wifi_src_p) = { "gpll", "xin24m" }; +PNAME(mux_cifout_src_p) = { "hdmiphy", "gpll" }; +PNAME(mux_cifout_p) = { "sclk_cifout_src", "xin24m" }; +PNAME(mux_sclk_cif0_src_p) = { "pclk_vip", "clk_cif0_chn_out", "pclkin_cvbs2cif" }; +PNAME(mux_sclk_cif1_src_p) = { "pclk_vip", "clk_cif1_chn_out", "pclkin_cvbs2cif" }; +PNAME(mux_sclk_cif2_src_p) = { "pclk_vip", "clk_cif2_chn_out", "pclkin_cvbs2cif" }; +PNAME(mux_sclk_cif3_src_p) = { "pclk_vip", "clk_cif3_chn_out", "pclkin_cvbs2cif" }; +PNAME(mux_dsp_src_p) = { "dpll", "gpll", "apll", "usb480m" }; +PNAME(mux_dclk_hdmiphy_p) = { "hdmiphy", "xin24m" }; +PNAME(mux_dclk_vop_p) = { "dclk_hdmiphy", "dclk_vop_src" }; +PNAME(mux_hdmi_cec_src_p) = { "dpll", "gpll", "xin24m" }; +PNAME(mux_cvbs_src_p) = { "apll", "io_cvbs_clkin", "hdmiphy", "gpll" }; static struct rockchip_pll_clock rv1108_pll_clks[] __initdata = { [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RV1108_PLL_CON(0), - RV1108_PLL_CON(3), 8, 31, 0, rv1108_pll_rates), + RV1108_PLL_CON(3), 8, 0, 0, rv1108_pll_rates), [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RV1108_PLL_CON(8), - RV1108_PLL_CON(11), 8, 31, 0, NULL), + RV1108_PLL_CON(11), 8, 1, 0, NULL), [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RV1108_PLL_CON(16), - RV1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rv1108_pll_rates), + RV1108_PLL_CON(19), 8, 2, 0, rv1108_pll_rates), }; #define MFLAGS CLK_MUX_HIWORD_MASK @@ -170,10 +197,10 @@ static struct rockchip_clk_branch rv1108_i2s2_fracmux __initdata = RV1108_CLKSEL_CON(7), 12, 2, MFLAGS); static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { - MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT, - RV1108_MISC_CON, 13, 2, MFLAGS), + MUX(0, "hdmiphy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT, + RV1108_MISC_CON, 13, 1, MFLAGS), MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT, - RV1108_MISC_CON, 15, 2, MFLAGS), + RV1108_MISC_CON, 15, 1, MFLAGS), /* * Clock-Architecture Diagram 2 */ @@ -197,50 +224,212 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKGATE_CON(11), 1, GFLAGS), /* PD_RKVENC */ + COMPOSITE(0, "aclk_rkvenc_pre", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(37), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 8, GFLAGS), + FACTOR_GATE(0, "hclk_rkvenc_pre", "aclk_rkvenc_pre", 0, 1, 4, + RV1108_CLKGATE_CON(8), 10, GFLAGS), + COMPOSITE(SCLK_VENC_CORE, "clk_venc_core", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(37), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 9, GFLAGS), + GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_pre", 0, + RV1108_CLKGATE_CON(19), 8, GFLAGS), + GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_rkvenc_pre", 0, + RV1108_CLKGATE_CON(19), 9, GFLAGS), + GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(19), 11, GFLAGS), + GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(19), 10, GFLAGS), /* PD_RKVDEC */ + COMPOSITE(SCLK_HEVC_CORE, "sclk_hevc_core", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(36), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 2, GFLAGS), + FACTOR_GATE(0, "hclk_rkvdec_pre", "sclk_hevc_core", 0, 1, 4, + RV1108_CLKGATE_CON(8), 10, GFLAGS), + COMPOSITE(SCLK_HEVC_CABAC, "clk_hevc_cabac", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 1, GFLAGS), + + COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 0, GFLAGS), + COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(36), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 3, GFLAGS), + GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", 0, + RV1108_CLKGATE_CON(19), 0, GFLAGS), + GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0, + RV1108_CLKGATE_CON(19), 1, GFLAGS), + GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", 0, + RV1108_CLKGATE_CON(19), 2, GFLAGS), + GATE(HCLK_VPU, "hclk_vpu", "hclk_rkvdec_pre", 0, + RV1108_CLKGATE_CON(19), 3, GFLAGS), + GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(19), 4, GFLAGS), + GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(19), 5, GFLAGS), + GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(19), 6, GFLAGS), /* PD_PMU_wrapper */ COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(38), 0, 5, DFLAGS, RV1108_CLKGATE_CON(8), 12, GFLAGS), - GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(0, "pclk_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(10), 0, GFLAGS), - GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(0, "pclk_intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(10), 1, GFLAGS), - GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pmu_24m_ena", 0, RV1108_CLKGATE_CON(10), 2, GFLAGS), - GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(0, "pclk_pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(10), 3, GFLAGS), - GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(0, "pclk_pmu_niu", "pmu_24m_ena", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(10), 4, GFLAGS), - GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pmu_24m_ena", 0, RV1108_CLKGATE_CON(10), 5, GFLAGS), - GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, + GATE(PCLK_PWM0_PMU, "pclk_pwm0_pmu", "pmu_24m_ena", 0, RV1108_CLKGATE_CON(10), 6, GFLAGS), - COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_PWM0_PMU, "sclk_pwm0_pmu", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(8), 15, GFLAGS), - COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_I2C0_PMU, "sclk_i2c0_pmu", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(8), 14, GFLAGS), GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(8), 13, GFLAGS), + /* + * Clock-Architecture Diagram 3 + */ + COMPOSITE(SCLK_WIFI, "sclk_wifi", mux_wifi_src_p, 0, + RV1108_CLKSEL_CON(28), 15, 1, MFLAGS, 8, 6, DFLAGS, + RV1108_CLKGATE_CON(9), 8, GFLAGS), + COMPOSITE_NODIV(0, "sclk_cifout_src", mux_cifout_src_p, 0, + RV1108_CLKSEL_CON(40), 8, 1, MFLAGS, + RV1108_CLKGATE_CON(9), 11, GFLAGS), + COMPOSITE_NOGATE(SCLK_CIFOUT, "sclk_cifout", mux_cifout_p, 0, + RV1108_CLKSEL_CON(40), 12, 1, MFLAGS, 0, 5, DFLAGS), + COMPOSITE_NOMUX(SCLK_MIPI_CSI_OUT, "sclk_mipi_csi_out", "xin24m", 0, + RV1108_CLKSEL_CON(41), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 12, GFLAGS), + + GATE(0, "pclk_acodecphy", "pclk_top_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 6, GFLAGS), + GATE(0, "pclk_usbgrf", "pclk_top_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 14, GFLAGS), + + GATE(ACLK_CIF0, "aclk_cif0", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(18), 10, GFLAGS), + GATE(HCLK_CIF0, "hclk_cif0", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 10, GFLAGS), + COMPOSITE_NODIV(SCLK_CIF0, "sclk_cif0", mux_sclk_cif0_src_p, 0, + RV1108_CLKSEL_CON(31), 0, 2, MFLAGS, + RV1108_CLKGATE_CON(7), 9, GFLAGS), + GATE(ACLK_CIF1, "aclk_cif1", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(17), 6, GFLAGS), + GATE(HCLK_CIF1, "hclk_cif1", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(17), 7, GFLAGS), + COMPOSITE_NODIV(SCLK_CIF1, "sclk_cif1", mux_sclk_cif1_src_p, 0, + RV1108_CLKSEL_CON(31), 2, 2, MFLAGS, + RV1108_CLKGATE_CON(7), 10, GFLAGS), + GATE(ACLK_CIF2, "aclk_cif2", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(17), 8, GFLAGS), + GATE(HCLK_CIF2, "hclk_cif2", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(17), 9, GFLAGS), + COMPOSITE_NODIV(SCLK_CIF2, "sclk_cif2", mux_sclk_cif2_src_p, 0, + RV1108_CLKSEL_CON(31), 4, 2, MFLAGS, + RV1108_CLKGATE_CON(7), 11, GFLAGS), + GATE(ACLK_CIF3, "aclk_cif3", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(17), 10, GFLAGS), + GATE(HCLK_CIF3, "hclk_cif3", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(17), 11, GFLAGS), + COMPOSITE_NODIV(SCLK_CIF3, "sclk_cif3", mux_sclk_cif3_src_p, 0, + RV1108_CLKSEL_CON(31), 6, 2, MFLAGS, + RV1108_CLKGATE_CON(7), 12, GFLAGS), + GATE(0, "pclk_cif1to4", "pclk_vip", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(7), 8, GFLAGS), + + /* PD_DSP_wrapper */ + COMPOSITE(SCLK_DSP, "sclk_dsp", mux_dsp_src_p, 0, + RV1108_CLKSEL_CON(42), 8, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 0, GFLAGS), + GATE(0, "clk_dsp_sys_wd", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 0, GFLAGS), + GATE(0, "clk_dsp_epp_wd", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 1, GFLAGS), + GATE(0, "clk_dsp_edp_wd", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 2, GFLAGS), + GATE(0, "clk_dsp_iop_wd", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 3, GFLAGS), + GATE(0, "clk_dsp_free", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 13, GFLAGS), + COMPOSITE_NOMUX(SCLK_DSP_IOP, "sclk_dsp_iop", "sclk_dsp", 0, + RV1108_CLKSEL_CON(44), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 1, GFLAGS), + COMPOSITE_NOMUX(SCLK_DSP_EPP, "sclk_dsp_epp", "sclk_dsp", 0, + RV1108_CLKSEL_CON(44), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 2, GFLAGS), + COMPOSITE_NOMUX(SCLK_DSP_EDP, "sclk_dsp_edp", "sclk_dsp", 0, + RV1108_CLKSEL_CON(45), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 3, GFLAGS), + COMPOSITE_NOMUX(SCLK_DSP_EDAP, "sclk_dsp_edap", "sclk_dsp", 0, + RV1108_CLKSEL_CON(45), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 4, GFLAGS), + GATE(0, "pclk_dsp_iop_niu", "sclk_dsp_iop", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 4, GFLAGS), + GATE(0, "aclk_dsp_epp_niu", "sclk_dsp_epp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 5, GFLAGS), + GATE(0, "aclk_dsp_edp_niu", "sclk_dsp_edp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 6, GFLAGS), + GATE(0, "pclk_dsp_dbg_niu", "sclk_dsp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 7, GFLAGS), + GATE(0, "aclk_dsp_edap_niu", "sclk_dsp_edap", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 14, GFLAGS), + COMPOSITE_NOMUX(SCLK_DSP_PFM, "sclk_dsp_pfm", "sclk_dsp", 0, + RV1108_CLKSEL_CON(43), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 5, GFLAGS), + COMPOSITE_NOMUX(PCLK_DSP_CFG, "pclk_dsp_cfg", "sclk_dsp", 0, + RV1108_CLKSEL_CON(43), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(9), 6, GFLAGS), + GATE(0, "pclk_dsp_cfg_niu", "pclk_dsp_cfg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 8, GFLAGS), + GATE(0, "pclk_dsp_pfm_mon", "pclk_dsp_cfg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 9, GFLAGS), + GATE(0, "pclk_intc", "pclk_dsp_cfg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 10, GFLAGS), + GATE(0, "pclk_dsp_grf", "pclk_dsp_cfg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 11, GFLAGS), + GATE(0, "pclk_mailbox", "pclk_dsp_cfg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 12, GFLAGS), + GATE(0, "aclk_dsp_epp_perf", "sclk_dsp_epp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(16), 15, GFLAGS), + GATE(0, "aclk_dsp_edp_perf", "sclk_dsp_edp", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(11), 8, GFLAGS), + /* * Clock-Architecture Diagram 4 */ - COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, + COMPOSITE(0, "aclk_vio0_pre", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS, RV1108_CLKGATE_CON(6), 0, GFLAGS), - GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED, + GATE(ACLK_VIO0, "aclk_vio0", "aclk_vio0_pre", 0, RV1108_CLKGATE_CON(17), 0, GFLAGS), COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0, RV1108_CLKSEL_CON(29), 0, 5, DFLAGS, RV1108_CLKGATE_CON(7), 2, GFLAGS), + GATE(HCLK_VIO, "hclk_vio", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(17), 2, GFLAGS), COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0, RV1108_CLKSEL_CON(29), 8, 5, DFLAGS, RV1108_CLKGATE_CON(7), 3, GFLAGS), + GATE(PCLK_VIO, "pclk_vio", "pclk_vio_pre", 0, + RV1108_CLKGATE_CON(17), 3, GFLAGS), + COMPOSITE(0, "aclk_vio1_pre", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(28), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 1, GFLAGS), + GATE(ACLK_VIO1, "aclk_vio1", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(17), 1, GFLAGS), INVERTER(0, "pclk_vip", "ext_vip", RV1108_CLKSEL_CON(31), 8, IFLAGS), @@ -252,8 +441,63 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKGATE_CON(6), 5, GFLAGS), GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(6), 4, GFLAGS), - COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0, - RV1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS), + COMPOSITE_NOGATE(0, "dclk_hdmiphy_pre", mux_dclk_hdmiphy_pre_p, 0, + RV1108_CLKSEL_CON(32), 6, 1, MFLAGS, 8, 6, DFLAGS), + COMPOSITE_NOGATE(DCLK_VOP_SRC, "dclk_vop_src", mux_dclk_hdmiphy_pre_p, 0, + RV1108_CLKSEL_CON(32), 6, 1, MFLAGS, 0, 6, DFLAGS), + MUX(DCLK_HDMIPHY, "dclk_hdmiphy", mux_dclk_hdmiphy_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(32), 15, 1, MFLAGS), + MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(32), 7, 1, MFLAGS), + GATE(ACLK_VOP, "aclk_vop", "aclk_vio0_pre", 0, + RV1108_CLKGATE_CON(18), 0, GFLAGS), + GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 1, GFLAGS), + GATE(ACLK_IEP, "aclk_iep", "aclk_vio0_pre", 0, + RV1108_CLKGATE_CON(18), 2, GFLAGS), + GATE(HCLK_IEP, "hclk_iep", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 3, GFLAGS), + + GATE(ACLK_RGA, "aclk_rga", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(18), 4, GFLAGS), + GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 5, GFLAGS), + COMPOSITE(SCLK_RGA, "sclk_rga", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(33), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 6, GFLAGS), + + COMPOSITE(SCLK_CVBS_HOST, "sclk_cvbs_host", mux_cvbs_src_p, 0, + RV1108_CLKSEL_CON(33), 13, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 7, GFLAGS), + FACTOR(0, "sclk_cvbs_27m", "sclk_cvbs_host", 0, 1, 2), + + GATE(SCLK_HDMI_SFR, "sclk_hdmi_sfr", "xin24m", 0, + RV1108_CLKGATE_CON(6), 8, GFLAGS), + + COMPOSITE(SCLK_HDMI_CEC, "sclk_hdmi_cec", mux_hdmi_cec_src_p, 0, + RV1108_CLKSEL_CON(34), 14, 2, MFLAGS, 0, 14, DFLAGS, + RV1108_CLKGATE_CON(6), 9, GFLAGS), + GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 8, GFLAGS), + GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 9, GFLAGS), + + GATE(ACLK_ISP, "aclk_isp", "aclk_vio1_pre", 0, + RV1108_CLKGATE_CON(18), 12, GFLAGS), + GATE(HCLK_ISP, "hclk_isp", "hclk_vio_pre", 0, + RV1108_CLKGATE_CON(18), 11, GFLAGS), + COMPOSITE(SCLK_ISP, "sclk_isp", mux_pll_src_4plls_p, 0, + RV1108_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 3, GFLAGS), + + GATE(0, "clk_dsiphy24m", "xin24m", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(9), 10, GFLAGS), + GATE(0, "pclk_vdacphy", "pclk_top_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 9, GFLAGS), + GATE(0, "pclk_mipi_dsiphy", "pclk_top_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 11, GFLAGS), + GATE(0, "pclk_mipi_csiphy", "pclk_top_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 12, GFLAGS), /* * Clock-Architecture Diagram 5 @@ -261,10 +505,11 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { FACTOR(0, "xin12m", "xin24m", 0, 1, 2), - COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0, + + COMPOSITE(SCLK_I2S0_SRC, "i2s0_src", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(2), 0, GFLAGS), - COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, + COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT, RV1108_CLKSEL_CON(8), 0, RV1108_CLKGATE_CON(2), 1, GFLAGS, &rv1108_i2s0_fracmux), @@ -274,7 +519,7 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKSEL_CON(5), 15, 1, MFLAGS, RV1108_CLKGATE_CON(2), 3, GFLAGS), - COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0, + COMPOSITE(SCLK_I2S1_SRC, "i2s1_src", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, @@ -284,7 +529,7 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, RV1108_CLKGATE_CON(2), 6, GFLAGS), - COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0, + COMPOSITE(SCLK_I2S2_SRC, "i2s2_src", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(3), 8, GFLAGS), COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, @@ -303,32 +548,53 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKGATE_CON(1), 2, GFLAGS), COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0, RV1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS), - COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0, + COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus_pre", "aclk_bus_pre", 0, RV1108_CLKSEL_CON(3), 0, 5, DFLAGS, RV1108_CLKGATE_CON(1), 4, GFLAGS), - COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0, + COMPOSITE_NOMUX(0, "pclk_bus_pre", "aclk_bus_pre", 0, RV1108_CLKSEL_CON(3), 8, 5, DFLAGS, RV1108_CLKGATE_CON(1), 5, GFLAGS), - GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED, + GATE(PCLK_BUS, "pclk_bus", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(1), 6, GFLAGS), - GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED, + GATE(0, "pclk_top_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(1), 7, GFLAGS), - GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED, + GATE(0, "pclk_ddr_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(1), 8, GFLAGS), - GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED, + GATE(SCLK_TIMER0, "clk_timer0", "xin24m", 0, RV1108_CLKGATE_CON(1), 9, GFLAGS), - GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED, + GATE(SCLK_TIMER1, "clk_timer1", "xin24m", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(1), 10, GFLAGS), - GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(13), 4, GFLAGS), - COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 7, GFLAGS), + GATE(HCLK_I2S1_2CH, "hclk_i2s1_2ch", "hclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 8, GFLAGS), + GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 9, GFLAGS), + + GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 10, GFLAGS), + GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 11, GFLAGS), + COMPOSITE(SCLK_CRYPTO, "sclk_crypto", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(11), 7, 1, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(2), 12, GFLAGS), + + COMPOSITE(SCLK_SPI, "sclk_spi", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(11), 15, 1, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(3), 0, GFLAGS), + GATE(PCLK_SPI, "pclk_spi", "pclk_bus_pre", 0, + RV1108_CLKGATE_CON(13), 5, GFLAGS), + + COMPOSITE(SCLK_UART0_SRC, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(3), 1, GFLAGS), - COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_UART1_SRC, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(3), 3, GFLAGS), - COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_UART2_SRC, "uart2_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(3), 5, GFLAGS), @@ -344,44 +610,58 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKSEL_CON(18), 0, RV1108_CLKGATE_CON(3), 6, GFLAGS, &rv1108_uart2_fracmux), - GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 10, GFLAGS), - GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 11, GFLAGS), - GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 12, GFLAGS), - COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RV1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS, + COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(19), 15, 1, MFLAGS, 8, 7, DFLAGS, RV1108_CLKGATE_CON(3), 7, GFLAGS), - COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RV1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS, + COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(3), 8, GFLAGS), - COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RV1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS, + COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(20), 15, 1, MFLAGS, 8, 7, DFLAGS, RV1108_CLKGATE_CON(3), 9, GFLAGS), - GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 0, GFLAGS), - GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 1, GFLAGS), - GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 2, GFLAGS), - COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + COMPOSITE(SCLK_PWM, "clk_pwm", mux_pll_src_2plls_p, 0, RV1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS, RV1108_CLKGATE_CON(3), 10, GFLAGS), - GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_PWM, "pclk_pwm", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 6, GFLAGS), - GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_WDT, "pclk_wdt", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 3, GFLAGS), - GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 7, GFLAGS), - GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 8, GFLAGS), - GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_pre", 0, RV1108_CLKGATE_CON(13), 9, GFLAGS), GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(14), 0, GFLAGS), + GATE(PCLK_EFUSE0, "pclk_efuse0", "pclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 12, GFLAGS), + GATE(PCLK_EFUSE1, "pclk_efuse1", "pclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 13, GFLAGS), + GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_pre", 0, + RV1108_CLKGATE_CON(13), 13, GFLAGS), + COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0, + RV1108_CLKSEL_CON(21), 0, 10, DFLAGS, + RV1108_CLKGATE_CON(3), 11, GFLAGS), + GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_pre", 0, + RV1108_CLKGATE_CON(13), 14, GFLAGS), + COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0, + RV1108_CLKSEL_CON(22), 0, 10, DFLAGS, + RV1108_CLKGATE_CON(3), 12, GFLAGS), GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0, RV1108_CLKGATE_CON(12), 2, GFLAGS), @@ -397,18 +677,24 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { RV1108_CLKGATE_CON(0), 9, GFLAGS), GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(0), 10, GFLAGS), - COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED, + COMPOSITE_NOGATE(0, "clk_ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED, RV1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3, - DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + DFLAGS | CLK_DIVIDER_POWER_OF_TWO), + FACTOR(0, "clk_ddr", "clk_ddrphy_src", 0, 1, 2), + GATE(0, "clk_ddrphy4x", "clk_ddr", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(10), 9, GFLAGS), - GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED, + GATE(0, "pclk_ddrupctl", "pclk_ddr_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(12), 4, GFLAGS), - GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED, + GATE(0, "nclk_ddrupctl", "clk_ddr", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(12), 5, GFLAGS), - GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED, + GATE(0, "pclk_ddrmon", "pclk_ddr_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(12), 6, GFLAGS), GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(0), 11, GFLAGS), + GATE(0, "pclk_mschniu", "pclk_ddr_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 2, GFLAGS), + GATE(0, "pclk_ddrphy", "pclk_ddr_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 4, GFLAGS), /* * Clock-Architecture Diagram 6 @@ -418,23 +704,23 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0, RV1108_CLKSEL_CON(23), 10, 5, DFLAGS, RV1108_CLKGATE_CON(4), 5, GFLAGS), - GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED, + GATE(PCLK_PERI, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 13, GFLAGS), COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0, RV1108_CLKSEL_CON(23), 5, 5, DFLAGS, RV1108_CLKGATE_CON(4), 4, GFLAGS), - GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED, + GATE(HCLK_PERI, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 12, GFLAGS), GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(4), 1, GFLAGS), GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(4), 2, GFLAGS), - COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED, - RV1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS, + COMPOSITE(ACLK_PERI, "aclk_periph", mux_aclk_peri_src_p, 0, + RV1108_CLKSEL_CON(23), 15, 1, MFLAGS, 0, 5, DFLAGS, RV1108_CLKGATE_CON(15), 11, GFLAGS), - COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0, RV1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS, RV1108_CLKGATE_CON(5), 0, GFLAGS), @@ -454,23 +740,31 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 2, GFLAGS), COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0, - RV1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKSEL_CON(27), 14, 1, MFLAGS, 8, 5, DFLAGS, RV1108_CLKGATE_CON(5), 3, GFLAGS), GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 3, GFLAGS), + GATE(HCLK_HOST0, "hclk_host0", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 6, GFLAGS), + GATE(0, "hclk_host0_arb", "hclk_periph", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 7, GFLAGS), + GATE(HCLK_OTG, "hclk_otg", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 8, GFLAGS), + GATE(0, "hclk_otg_pmu", "hclk_periph", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 9, GFLAGS), + GATE(SCLK_USBPHY, "clk_usbphy", "xin24m", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(5), 5, GFLAGS), + COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0, - RV1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKSEL_CON(27), 7, 1, MFLAGS, 0, 7, DFLAGS, RV1108_CLKGATE_CON(5), 4, GFLAGS), GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 10, GFLAGS), - COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0, - RV1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS, + COMPOSITE(SCLK_MAC_PRE, "sclk_mac_pre", mux_pll_src_apll_gpll_p, 0, + RV1108_CLKSEL_CON(24), 12, 1, MFLAGS, 0, 5, DFLAGS, RV1108_CLKGATE_CON(4), 10, GFLAGS), - MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT, - RV1108_CLKSEL_CON(24), 8, 2, MFLAGS), - GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS), - GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS), - GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS), + MUX(SCLK_MAC, "sclk_mac", mux_sclk_mac_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(24), 8, 1, MFLAGS), + GATE(SCLK_MAC_RX, "sclk_mac_rx", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS), + GATE(SCLK_MAC_REF, "sclk_mac_ref", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS), + GATE(SCLK_MAC_REFOUT, "sclk_mac_refout", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS), + GATE(ACLK_GMAC, "aclk_gmac", "aclk_periph", 0, RV1108_CLKGATE_CON(15), 4, GFLAGS), + GATE(PCLK_GMAC, "pclk_gmac", "pclk_periph", 0, RV1108_CLKGATE_CON(15), 5, GFLAGS), MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RV1108_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RV1108_SDMMC_CON1, 1), @@ -484,10 +778,16 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { static const char *const rv1108_critical_clocks[] __initconst = { "aclk_core", - "aclk_bus_src_gpll", + "aclk_bus", + "hclk_bus", + "pclk_bus", "aclk_periph", "hclk_periph", "pclk_periph", + "nclk_ddrupctl", + "pclk_ddrmon", + "pclk_acodecphy", + "pclk_pmu", }; static void __init rv1108_clk_init(struct device_node *np) diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index fe1d393cf678..35dbd63c2f49 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "clk.h" /** @@ -164,6 +165,40 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, return notifier_from_errno(ret); } +/** + * fractional divider must set that denominator is 20 times larger than + * numerator to generate precise clock frequency. + */ +static void rockchip_fractional_approximation(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate, + unsigned long *m, unsigned long *n) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long p_rate, p_parent_rate; + struct clk_hw *p_parent; + unsigned long scale; + + p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); + if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { + p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); + p_parent_rate = clk_hw_get_rate(p_parent); + *parent_rate = p_parent_rate; + } + + /* + * Get rate closer to *parent_rate to guarantee there is no overflow + * for m and n. In the result it will be the nearest rate left shifted + * by (scale - fd->nwidth) bits. + */ + scale = fls_long(*parent_rate / rate - 1); + if (scale > fd->nwidth) + rate <<= scale - fd->nwidth; + + rational_best_approximation(rate, *parent_rate, + GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), + m, n); +} + static struct clk *rockchip_clk_register_frac_branch( struct rockchip_clk_provider *ctx, const char *name, const char *const *parent_names, u8 num_parents, @@ -210,6 +245,7 @@ static struct clk *rockchip_clk_register_frac_branch( div->nwidth = 16; div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; + div->approximation = rockchip_fractional_approximation; div_ops = &clk_fractional_divider_ops; clk = clk_register_composite(NULL, name, parent_names, num_parents, diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 1fab56f396d4..b117783ed404 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -180,7 +180,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev) } clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss", mout_audss_p, ARRAY_SIZE(mout_audss_p), - CLK_SET_RATE_NO_REPARENT, + CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); cdclk = devm_clk_get(&pdev->dev, "cdclk"); @@ -195,11 +195,11 @@ static int exynos_audss_clk_probe(struct platform_device *pdev) reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp", - "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, - 0, &lock); + "mout_audss", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL, - "dout_aud_bus", "dout_srp", 0, + "dout_aud_bus", "dout_srp", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s", diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index e40b77583c47..d8d3cb67b402 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { #define PLL_ENABLED (1 << 31) #define PLL_LOCKED (1 << 29) +static void exynos4_clk_enable_pll(u32 reg) +{ + u32 pll_con = readl(reg_base + reg); + pll_con |= PLL_ENABLED; + writel(pll_con, reg_base + reg); + + while (!(pll_con & PLL_LOCKED)) { + cpu_relax(); + pll_con = readl(reg_base + reg); + } +} + static void exynos4_clk_wait_for_pll(u32 reg) { u32 pll_con; @@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void) samsung_clk_save(reg_base, exynos4_save_pll, ARRAY_SIZE(exynos4_clk_pll_regs)); + exynos4_clk_enable_pll(EPLL_CON0); + exynos4_clk_enable_pll(VPLL_CON0); + if (exynos4_soc == EXYNOS4210) { samsung_clk_save(reg_base, exynos4_save_soc, ARRAY_SIZE(exynos4210_clk_save)); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 9a6476aa7d81..25601967d1cd 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -537,8 +537,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = { MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore", mout_mx_mspll_ccore_p, SRC_TOP7, 16, 2), - MUX(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, - SRC_TOP7, 20, 2), + MUX_F(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, + SRC_TOP7, 20, 2, CLK_SET_RATE_PARENT, 0), MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1), MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1), @@ -547,8 +547,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = { MUX(0, "mout_aclk432_cam", mout_group6_5800_p, SRC_TOP8, 24, 2), MUX(0, "mout_aclk432_scaler", mout_group6_5800_p, SRC_TOP8, 28, 2), - MUX(CLK_MOUT_USER_MAU_EPLL, "mout_user_mau_epll", mout_group16_5800_p, - SRC_TOP9, 8, 1), + MUX_F(CLK_MOUT_USER_MAU_EPLL, "mout_user_mau_epll", mout_group16_5800_p, + SRC_TOP9, 8, 1, CLK_SET_RATE_PARENT, 0), MUX(0, "mout_user_aclk550_cam", mout_group15_5800_p, SRC_TOP9, 16, 1), MUX(0, "mout_user_aclkfl1_550_cam", mout_group13_5800_p, @@ -590,6 +590,8 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), + GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", + SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -629,6 +631,11 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { "mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3), }; +static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { + GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", + SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), +}; + static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p, SRC_TOP7, 4, 1), @@ -706,7 +713,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1), MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1), MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1), - MUX(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1), + MUX_F(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1, + CLK_SET_RATE_PARENT, 0), MUX(0, "mout_sclk_dpll", mout_dpll_p, SRC_TOP6, 24, 1), MUX(0, "mout_sclk_cpll", mout_cpll_p, SRC_TOP6, 28, 1), @@ -1001,9 +1009,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0), - GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", - SRC_MASK_TOP7, 20, 0, 0), - /* sclk */ GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0", GATE_TOP_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0), @@ -1440,6 +1445,8 @@ static void __init exynos5x_clk_init(struct device_node *np, ARRAY_SIZE(exynos5420_mux_clks)); samsung_clk_register_div(ctx, exynos5420_div_clks, ARRAY_SIZE(exynos5420_div_clks)); + samsung_clk_register_gate(ctx, exynos5420_gate_clks, + ARRAY_SIZE(exynos5420_gate_clks)); } else { samsung_clk_register_fixed_factor( ctx, exynos5800_fixed_factor_clks, diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 7342928c35cd..6427d0ebe2de 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -11,6 +11,19 @@ config SUN50I_A64_CCU default ARM64 && ARCH_SUNXI depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST +config SUN4I_A10_CCU + bool "Support for the Allwinner A10/A20 CCU" + select SUNXI_CCU_DIV + select SUNXI_CCU_MULT + select SUNXI_CCU_NK + select SUNXI_CCU_NKM + select SUNXI_CCU_NM + select SUNXI_CCU_MP + select SUNXI_CCU_PHASE + default MACH_SUN4I + default MACH_SUN7I + depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST + config SUN5I_CCU bool "Support for the Allwinner sun5i family CCM" default MACH_SUN5I @@ -48,6 +61,11 @@ config SUN8I_V3S_CCU config SUN8I_DE2_CCU bool "Support for the Allwinner SoCs DE2 CCU" +config SUN8I_R40_CCU + bool "Support for the Allwinner R40 CCU" + default MACH_SUN8I + depends on MACH_SUN8I || COMPILE_TEST + config SUN9I_A80_CCU bool "Support for the Allwinner A80 CCU" default MACH_SUN9I diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index 0c45fa50283d..85a0633c1eac 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -1,5 +1,6 @@ # Common objects lib-$(CONFIG_SUNXI_CCU) += ccu_common.o +lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o # Base clock types @@ -19,6 +20,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o # SoC support obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o +obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o @@ -28,6 +30,7 @@ obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o obj-$(CONFIG_SUN8I_DE2_CCU) += ccu-sun8i-de2.o obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o +obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c new file mode 100644 index 000000000000..286b0049b7b6 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -0,0 +1,1456 @@ +/* + * Copyright (c) 2017 Priit Laes . + * Copyright (c) 2017 Maxime Ripard. + * Copyright (c) 2017 Jonathan Liu. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_mult.h" +#include "ccu_nk.h" +#include "ccu_nkm.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun4i-a10.h" + +static struct ccu_nkmp pll_core_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .p = _SUNXI_CCU_DIV(16, 2), + .common = { + .reg = 0x000, + .hw.init = CLK_HW_INIT("pll-core", + "hosc", + &ccu_nkmp_ops, + 0), + }, +}; + +/* + * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from + * the base (2x, 4x and 8x), and one variable divider (the one true + * pll audio). + * + * We don't have any need for the variable divider for now, so we just + * hardcode it to match with the clock names. + */ +#define SUN4I_PLL_AUDIO_REG 0x008 +static struct ccu_nm pll_audio_base_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0), + .m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0), + .common = { + .reg = 0x008, + .hw.init = CLK_HW_INIT("pll-audio-base", + "hosc", + &ccu_nm_ops, + 0), + }, + +}; + +static struct ccu_mult pll_video0_clk = { + .enable = BIT(31), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127), + .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14), + 270000000, 297000000), + .common = { + .reg = 0x010, + .features = (CCU_FEATURE_FRACTIONAL | + CCU_FEATURE_ALL_PREDIV), + .prediv = 8, + .hw.init = CLK_HW_INIT("pll-video0", + "hosc", + &ccu_mult_ops, + 0), + }, +}; + +static struct ccu_nkmp pll_ve_sun4i_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .p = _SUNXI_CCU_DIV(16, 2), + .common = { + .reg = 0x018, + .hw.init = CLK_HW_INIT("pll-ve", + "hosc", + &ccu_nkmp_ops, + 0), + }, +}; + +static struct ccu_nk pll_ve_sun7i_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .common = { + .reg = 0x018, + .hw.init = CLK_HW_INIT("pll-ve", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static struct ccu_nk pll_ddr_base_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .common = { + .reg = 0x020, + .hw.init = CLK_HW_INIT("pll-ddr-base", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static SUNXI_CCU_M(pll_ddr_clk, "pll-ddr", "pll-ddr-base", 0x020, 0, 2, + CLK_IS_CRITICAL); + +static struct ccu_div pll_ddr_other_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(16, 2, CLK_DIVIDER_POWER_OF_TWO), + .common = { + .reg = 0x020, + .hw.init = CLK_HW_INIT("pll-ddr-other", "pll-ddr-base", + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_nk pll_periph_base_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .common = { + .reg = 0x028, + .hw.init = CLK_HW_INIT("pll-periph-base", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static CLK_FIXED_FACTOR(pll_periph_clk, "pll-periph", "pll-periph-base", + 2, 1, CLK_SET_RATE_PARENT); + +/* Not documented on A10 */ +static struct ccu_div pll_periph_sata_clk = { + .enable = BIT(14), + .div = _SUNXI_CCU_DIV(0, 2), + .fixed_post_div = 6, + .common = { + .reg = 0x028, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-periph-sata", + "pll-periph-base", + &ccu_div_ops, 0), + }, +}; + +static struct ccu_mult pll_video1_clk = { + .enable = BIT(31), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127), + .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14), + 270000000, 297000000), + .common = { + .reg = 0x030, + .features = (CCU_FEATURE_FRACTIONAL | + CCU_FEATURE_ALL_PREDIV), + .prediv = 8, + .hw.init = CLK_HW_INIT("pll-video1", + "hosc", + &ccu_mult_ops, + 0), + }, +}; + +/* Not present on A10 */ +static struct ccu_nk pll_gpu_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .common = { + .reg = 0x040, + .hw.init = CLK_HW_INIT("pll-gpu", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(hosc_clk, "hosc", "osc24M", 0x050, BIT(0), 0); + +static const char *const cpu_parents[] = { "osc32k", "hosc", + "pll-core", "pll-periph" }; +static const struct ccu_mux_fixed_prediv cpu_predivs[] = { + { .index = 3, .div = 3, }, +}; + +#define SUN4I_AHB_REG 0x054 +static struct ccu_mux cpu_clk = { + .mux = { + .shift = 16, + .width = 2, + .fixed_predivs = cpu_predivs, + .n_predivs = ARRAY_SIZE(cpu_predivs), + }, + .common = { + .reg = 0x054, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("cpu", + cpu_parents, + &ccu_mux_ops, + CLK_IS_CRITICAL), + } +}; + +static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x054, 0, 2, 0); + +static struct ccu_div ahb_sun4i_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + .common = { + .reg = 0x054, + .hw.init = CLK_HW_INIT("ahb", "axi", &ccu_div_ops, 0), + }, +}; + +static const char *const ahb_sun7i_parents[] = { "axi", "pll-periph", + "pll-periph" }; +static const struct ccu_mux_fixed_prediv ahb_sun7i_predivs[] = { + { .index = 1, .div = 2, }, + { /* Sentinel */ }, +}; +static struct ccu_div ahb_sun7i_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = { + .shift = 6, + .width = 2, + .fixed_predivs = ahb_sun7i_predivs, + .n_predivs = ARRAY_SIZE(ahb_sun7i_predivs), + }, + + .common = { + .reg = 0x054, + .hw.init = CLK_HW_INIT_PARENTS("ahb", + ahb_sun7i_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct clk_div_table apb0_div_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { /* Sentinel */ }, +}; +static SUNXI_CCU_DIV_TABLE(apb0_clk, "apb0", "ahb", + 0x054, 8, 2, apb0_div_table, 0); + +static const char *const apb1_parents[] = { "hosc", "pll-periph", "osc32k" }; +static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", apb1_parents, 0x058, + 0, 5, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + 0); + +/* Not present on A20 */ +static SUNXI_CCU_GATE(axi_dram_clk, "axi-dram", "ahb", + 0x05c, BIT(31), 0); + +static SUNXI_CCU_GATE(ahb_otg_clk, "ahb-otg", "ahb", + 0x060, BIT(0), 0); +static SUNXI_CCU_GATE(ahb_ehci0_clk, "ahb-ehci0", "ahb", + 0x060, BIT(1), 0); +static SUNXI_CCU_GATE(ahb_ohci0_clk, "ahb-ohci0", "ahb", + 0x060, BIT(2), 0); +static SUNXI_CCU_GATE(ahb_ehci1_clk, "ahb-ehci1", "ahb", + 0x060, BIT(3), 0); +static SUNXI_CCU_GATE(ahb_ohci1_clk, "ahb-ohci1", "ahb", + 0x060, BIT(4), 0); +static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb", + 0x060, BIT(5), 0); +static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", + 0x060, BIT(7), 0); +static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", + 0x060, BIT(8), 0); +static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", + 0x060, BIT(9), 0); +static SUNXI_CCU_GATE(ahb_mmc2_clk, "ahb-mmc2", "ahb", + 0x060, BIT(10), 0); +static SUNXI_CCU_GATE(ahb_mmc3_clk, "ahb-mmc3", "ahb", + 0x060, BIT(11), 0); +static SUNXI_CCU_GATE(ahb_ms_clk, "ahb-ms", "ahb", + 0x060, BIT(12), 0); +static SUNXI_CCU_GATE(ahb_nand_clk, "ahb-nand", "ahb", + 0x060, BIT(13), 0); +static SUNXI_CCU_GATE(ahb_sdram_clk, "ahb-sdram", "ahb", + 0x060, BIT(14), CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(ahb_ace_clk, "ahb-ace", "ahb", + 0x060, BIT(16), 0); +static SUNXI_CCU_GATE(ahb_emac_clk, "ahb-emac", "ahb", + 0x060, BIT(17), 0); +static SUNXI_CCU_GATE(ahb_ts_clk, "ahb-ts", "ahb", + 0x060, BIT(18), 0); +static SUNXI_CCU_GATE(ahb_spi0_clk, "ahb-spi0", "ahb", + 0x060, BIT(20), 0); +static SUNXI_CCU_GATE(ahb_spi1_clk, "ahb-spi1", "ahb", + 0x060, BIT(21), 0); +static SUNXI_CCU_GATE(ahb_spi2_clk, "ahb-spi2", "ahb", + 0x060, BIT(22), 0); +static SUNXI_CCU_GATE(ahb_spi3_clk, "ahb-spi3", "ahb", + 0x060, BIT(23), 0); +static SUNXI_CCU_GATE(ahb_pata_clk, "ahb-pata", "ahb", + 0x060, BIT(24), 0); +/* Not documented on A20 */ +static SUNXI_CCU_GATE(ahb_sata_clk, "ahb-sata", "ahb", + 0x060, BIT(25), 0); +/* Not present on A20 */ +static SUNXI_CCU_GATE(ahb_gps_clk, "ahb-gps", "ahb", + 0x060, BIT(26), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(ahb_hstimer_clk, "ahb-hstimer", "ahb", + 0x060, BIT(28), 0); + +static SUNXI_CCU_GATE(ahb_ve_clk, "ahb-ve", "ahb", + 0x064, BIT(0), 0); +static SUNXI_CCU_GATE(ahb_tvd_clk, "ahb-tvd", "ahb", + 0x064, BIT(1), 0); +static SUNXI_CCU_GATE(ahb_tve0_clk, "ahb-tve0", "ahb", + 0x064, BIT(2), 0); +static SUNXI_CCU_GATE(ahb_tve1_clk, "ahb-tve1", "ahb", + 0x064, BIT(3), 0); +static SUNXI_CCU_GATE(ahb_lcd0_clk, "ahb-lcd0", "ahb", + 0x064, BIT(4), 0); +static SUNXI_CCU_GATE(ahb_lcd1_clk, "ahb-lcd1", "ahb", + 0x064, BIT(5), 0); +static SUNXI_CCU_GATE(ahb_csi0_clk, "ahb-csi0", "ahb", + 0x064, BIT(8), 0); +static SUNXI_CCU_GATE(ahb_csi1_clk, "ahb-csi1", "ahb", + 0x064, BIT(9), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(ahb_hdmi1_clk, "ahb-hdmi1", "ahb", + 0x064, BIT(10), 0); +static SUNXI_CCU_GATE(ahb_hdmi0_clk, "ahb-hdmi0", "ahb", + 0x064, BIT(11), 0); +static SUNXI_CCU_GATE(ahb_de_be0_clk, "ahb-de-be0", "ahb", + 0x064, BIT(12), 0); +static SUNXI_CCU_GATE(ahb_de_be1_clk, "ahb-de-be1", "ahb", + 0x064, BIT(13), 0); +static SUNXI_CCU_GATE(ahb_de_fe0_clk, "ahb-de-fe0", "ahb", + 0x064, BIT(14), 0); +static SUNXI_CCU_GATE(ahb_de_fe1_clk, "ahb-de-fe1", "ahb", + 0x064, BIT(15), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(ahb_gmac_clk, "ahb-gmac", "ahb", + 0x064, BIT(17), 0); +static SUNXI_CCU_GATE(ahb_mp_clk, "ahb-mp", "ahb", + 0x064, BIT(18), 0); +static SUNXI_CCU_GATE(ahb_gpu_clk, "ahb-gpu", "ahb", + 0x064, BIT(20), 0); + +static SUNXI_CCU_GATE(apb0_codec_clk, "apb0-codec", "apb0", + 0x068, BIT(0), 0); +static SUNXI_CCU_GATE(apb0_spdif_clk, "apb0-spdif", "apb0", + 0x068, BIT(1), 0); +static SUNXI_CCU_GATE(apb0_ac97_clk, "apb0-ac97", "apb0", + 0x068, BIT(2), 0); +static SUNXI_CCU_GATE(apb0_i2s0_clk, "apb0-i2s0", "apb0", + 0x068, BIT(3), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(apb0_i2s1_clk, "apb0-i2s1", "apb0", + 0x068, BIT(4), 0); +static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0", + 0x068, BIT(5), 0); +static SUNXI_CCU_GATE(apb0_ir0_clk, "apb0-ir0", "apb0", + 0x068, BIT(6), 0); +static SUNXI_CCU_GATE(apb0_ir1_clk, "apb0-ir1", "apb0", + 0x068, BIT(7), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(apb0_i2s2_clk, "apb0-i2s2", "apb0", + 0x068, BIT(8), 0); +static SUNXI_CCU_GATE(apb0_keypad_clk, "apb0-keypad", "apb0", + 0x068, BIT(10), 0); + +static SUNXI_CCU_GATE(apb1_i2c0_clk, "apb1-i2c0", "apb1", + 0x06c, BIT(0), 0); +static SUNXI_CCU_GATE(apb1_i2c1_clk, "apb1-i2c1", "apb1", + 0x06c, BIT(1), 0); +static SUNXI_CCU_GATE(apb1_i2c2_clk, "apb1-i2c2", "apb1", + 0x06c, BIT(2), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(apb1_i2c3_clk, "apb1-i2c3", "apb1", + 0x06c, BIT(3), 0); +static SUNXI_CCU_GATE(apb1_can_clk, "apb1-can", "apb1", + 0x06c, BIT(4), 0); +static SUNXI_CCU_GATE(apb1_scr_clk, "apb1-scr", "apb1", + 0x06c, BIT(5), 0); +static SUNXI_CCU_GATE(apb1_ps20_clk, "apb1-ps20", "apb1", + 0x06c, BIT(6), 0); +static SUNXI_CCU_GATE(apb1_ps21_clk, "apb1-ps21", "apb1", + 0x06c, BIT(7), 0); +/* Not present on A10 */ +static SUNXI_CCU_GATE(apb1_i2c4_clk, "apb1-i2c4", "apb1", + 0x06c, BIT(15), 0); +static SUNXI_CCU_GATE(apb1_uart0_clk, "apb1-uart0", "apb1", + 0x06c, BIT(16), 0); +static SUNXI_CCU_GATE(apb1_uart1_clk, "apb1-uart1", "apb1", + 0x06c, BIT(17), 0); +static SUNXI_CCU_GATE(apb1_uart2_clk, "apb1-uart2", "apb1", + 0x06c, BIT(18), 0); +static SUNXI_CCU_GATE(apb1_uart3_clk, "apb1-uart3", "apb1", + 0x06c, BIT(19), 0); +static SUNXI_CCU_GATE(apb1_uart4_clk, "apb1-uart4", "apb1", + 0x06c, BIT(20), 0); +static SUNXI_CCU_GATE(apb1_uart5_clk, "apb1-uart5", "apb1", + 0x06c, BIT(21), 0); +static SUNXI_CCU_GATE(apb1_uart6_clk, "apb1-uart6", "apb1", + 0x06c, BIT(22), 0); +static SUNXI_CCU_GATE(apb1_uart7_clk, "apb1-uart7", "apb1", + 0x06c, BIT(23), 0); + +static const char *const mod0_default_parents[] = { "hosc", "pll-periph", + "pll-ddr-other" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* Undocumented on A10 */ +static SUNXI_CCU_MP_WITH_MUX_GATE(ms_clk, "ms", mod0_default_parents, 0x084, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* MMC output and sample clocks are not present on A10 */ +static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0", + 0x088, 8, 3, 0); +static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0", + 0x088, 20, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* MMC output and sample clocks are not present on A10 */ +static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1", + 0x08c, 8, 3, 0); +static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1", + 0x08c, 20, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* MMC output and sample clocks are not present on A10 */ +static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2", + 0x090, 8, 3, 0); +static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2", + 0x090, 20, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents, 0x094, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* MMC output and sample clocks are not present on A10 */ +static SUNXI_CCU_PHASE(mmc3_output_clk, "mmc3_output", "mmc3", + 0x094, 8, 3, 0); +static SUNXI_CCU_PHASE(mmc3_sample_clk, "mmc3_sample", "mmc3", + 0x094, 20, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", mod0_default_parents, 0x098, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents, 0x09c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* Undocumented on A10 */ +static SUNXI_CCU_MP_WITH_MUX_GATE(pata_clk, "pata", mod0_default_parents, 0x0ac, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* TODO: Check whether A10 actually supports osc32k as 4th parent? */ +static const char *const ir_parents_sun4i[] = { "hosc", "pll-periph", + "pll-ddr-other" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_sun4i_clk, "ir0", ir_parents_sun4i, 0x0b0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_sun4i_clk, "ir1", ir_parents_sun4i, 0x0b4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); +static const char *const ir_parents_sun7i[] = { "hosc", "pll-periph", + "pll-ddr-other", "osc32k" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_sun7i_clk, "ir0", ir_parents_sun7i, 0x0b0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_sun7i_clk, "ir1", ir_parents_sun7i, 0x0b4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char *const audio_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", audio_parents, + 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_MUX_WITH_GATE(ac97_clk, "ac97", audio_parents, + 0x0bc, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +/* Undocumented on A10 */ +static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", audio_parents, + 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static const char *const keypad_parents[] = { "hosc", "losc"}; +static const u8 keypad_table[] = { 0, 2 }; +static struct ccu_mp keypad_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(0, 5), + .p = _SUNXI_CCU_DIV(16, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 2, keypad_table), + .common = { + .reg = 0x0c4, + .hw.init = CLK_HW_INIT_PARENTS("keypad", + keypad_parents, + &ccu_mp_ops, + 0), + }, +}; + +/* + * SATA supports external clock as parent via BIT(24) and is probably an + * optional crystal or oscillator that can be connected to the + * SATA-CLKM / SATA-CLKP pins. + */ +static const char *const sata_parents[] = {"pll-periph-sata", "sata-ext"}; +static SUNXI_CCU_MUX_WITH_GATE(sata_clk, "sata", sata_parents, + 0x0c8, 24, 1, BIT(31), CLK_SET_RATE_PARENT); + + +static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "pll-periph", + 0x0cc, BIT(6), 0); +static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "pll-periph", + 0x0cc, BIT(7), 0); +static SUNXI_CCU_GATE(usb_phy_clk, "usb-phy", "pll-periph", + 0x0cc, BIT(8), 0); + +/* TODO: GPS CLK 0x0d0 */ + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, 0x0d4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +/* Not present on A10 */ +static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", audio_parents, + 0x0d8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +/* Not present on A10 */ +static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", audio_parents, + 0x0dc, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr", + 0x100, BIT(0), 0); +static SUNXI_CCU_GATE(dram_csi0_clk, "dram-csi0", "pll-ddr", + 0x100, BIT(1), 0); +static SUNXI_CCU_GATE(dram_csi1_clk, "dram-csi1", "pll-ddr", + 0x100, BIT(2), 0); +static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "pll-ddr", + 0x100, BIT(3), 0); +static SUNXI_CCU_GATE(dram_tvd_clk, "dram-tvd", "pll-ddr", + 0x100, BIT(4), 0); +static SUNXI_CCU_GATE(dram_tve0_clk, "dram-tve0", "pll-ddr", + 0x100, BIT(5), 0); +static SUNXI_CCU_GATE(dram_tve1_clk, "dram-tve1", "pll-ddr", + 0x100, BIT(6), 0); + +/* Clock seems to be critical only on sun4i */ +static SUNXI_CCU_GATE(dram_out_clk, "dram-out", "pll-ddr", + 0x100, BIT(15), CLK_IS_CRITICAL); +static SUNXI_CCU_GATE(dram_de_fe1_clk, "dram-de-fe1", "pll-ddr", + 0x100, BIT(24), 0); +static SUNXI_CCU_GATE(dram_de_fe0_clk, "dram-de-fe0", "pll-ddr", + 0x100, BIT(25), 0); +static SUNXI_CCU_GATE(dram_de_be0_clk, "dram-de-be0", "pll-ddr", + 0x100, BIT(26), 0); +static SUNXI_CCU_GATE(dram_de_be1_clk, "dram-de-be1", "pll-ddr", + 0x100, BIT(27), 0); +static SUNXI_CCU_GATE(dram_mp_clk, "dram-mp", "pll-ddr", + 0x100, BIT(28), 0); +static SUNXI_CCU_GATE(dram_ace_clk, "dram-ace", "pll-ddr", + 0x100, BIT(29), 0); + +static const char *const de_parents[] = { "pll-video0", "pll-video1", + "pll-ddr-other" }; +static SUNXI_CCU_M_WITH_MUX_GATE(de_be0_clk, "de-be0", de_parents, + 0x104, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(de_be1_clk, "de-be1", de_parents, + 0x108, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(de_fe0_clk, "de-fe0", de_parents, + 0x10c, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(de_fe1_clk, "de-fe1", de_parents, + 0x110, 0, 4, 24, 2, BIT(31), 0); + +/* Undocumented on A10 */ +static SUNXI_CCU_M_WITH_MUX_GATE(de_mp_clk, "de-mp", de_parents, + 0x114, 0, 4, 24, 2, BIT(31), 0); + +static const char *const disp_parents[] = { "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x" }; +static SUNXI_CCU_MUX_WITH_GATE(tcon0_ch0_clk, "tcon0-ch0-sclk", disp_parents, + 0x118, 24, 2, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_MUX_WITH_GATE(tcon1_ch0_clk, "tcon1-ch0-sclk", disp_parents, + 0x11c, 24, 2, BIT(31), CLK_SET_RATE_PARENT); + +static const char *const csi_sclk_parents[] = { "pll-video0", "pll-ve", + "pll-ddr-other", "pll-periph" }; + +static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", + csi_sclk_parents, + 0x120, 0, 4, 24, 2, BIT(31), 0); + +/* TVD clock setup for A10 */ +static const char *const tvd_parents[] = { "pll-video0", "pll-video1" }; +static SUNXI_CCU_MUX_WITH_GATE(tvd_sun4i_clk, "tvd", tvd_parents, + 0x128, 24, 1, BIT(31), 0); + +/* TVD clock setup for A20 */ +static SUNXI_CCU_MP_WITH_MUX_GATE(tvd_sclk2_sun7i_clk, + "tvd-sclk2", tvd_parents, + 0x128, + 0, 4, /* M */ + 16, 4, /* P */ + 8, 1, /* mux */ + BIT(15), /* gate */ + 0); + +static SUNXI_CCU_M_WITH_GATE(tvd_sclk1_sun7i_clk, "tvd-sclk1", "tvd-sclk2", + 0x128, 0, 4, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(tcon0_ch1_sclk2_clk, "tcon0-ch1-sclk2", + disp_parents, + 0x12c, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_GATE(tcon0_ch1_clk, + "tcon0-ch1-sclk1", "tcon0-ch1-sclk2", + 0x12c, 11, 1, BIT(15), + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_ch1_sclk2_clk, "tcon1-ch1-sclk2", + disp_parents, + 0x130, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_GATE(tcon1_ch1_clk, + "tcon1-ch1-sclk1", "tcon1-ch1-sclk2", + 0x130, 11, 1, BIT(15), + CLK_SET_RATE_PARENT); + +static const char *const csi_parents[] = { "hosc", "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x"}; +static const u8 csi_table[] = { 0, 1, 2, 5, 6}; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi0_clk, "csi0", + csi_parents, csi_table, + 0x134, 0, 5, 24, 3, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi1_clk, "csi1", + csi_parents, csi_table, + 0x138, 0, 5, 24, 3, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", 0x13c, 16, 8, BIT(31), 0); + +static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio", + 0x140, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(avs_clk, "avs", "hosc", 0x144, BIT(31), 0); + +static const char *const ace_parents[] = { "pll-ve", "pll-ddr-other" }; +static SUNXI_CCU_M_WITH_MUX_GATE(ace_clk, "ace", ace_parents, + 0x148, 0, 4, 24, 1, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", disp_parents, + 0x150, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static const char *const gpu_parents_sun4i[] = { "pll-video0", "pll-ve", + "pll-ddr-other", + "pll-video1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(gpu_sun4i_clk, "gpu", gpu_parents_sun4i, + 0x154, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static const char *const gpu_parents_sun7i[] = { "pll-video0", "pll-ve", + "pll-ddr-other", "pll-video1", + "pll-gpu" }; +static const u8 gpu_table_sun7i[] = { 0, 1, 2, 3, 4 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(gpu_sun7i_clk, "gpu", + gpu_parents_sun7i, gpu_table_sun7i, + 0x154, 0, 4, 24, 3, BIT(31), + CLK_SET_RATE_PARENT); + +static const char *const mbus_sun4i_parents[] = { "hosc", "pll-periph", + "pll-ddr-other" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_sun4i_clk, "mbus", mbus_sun4i_parents, + 0x15c, 0, 4, 16, 2, 24, 2, BIT(31), + 0); +static const char *const mbus_sun7i_parents[] = { "hosc", "pll-periph-base", + "pll-ddr-other" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_sun7i_clk, "mbus", mbus_sun7i_parents, + 0x15c, 0, 4, 16, 2, 24, 2, BIT(31), + CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(hdmi1_slow_clk, "hdmi1-slow", "hosc", 0x178, BIT(31), 0); + +static const char *const hdmi1_parents[] = { "pll-video0", "pll-video1" }; +static const u8 hdmi1_table[] = { 0, 1}; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(hdmi1_clk, "hdmi1", + hdmi1_parents, hdmi1_table, + 0x17c, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static const char *const out_parents[] = { "hosc", "osc32k", "hosc" }; +static const struct ccu_mux_fixed_prediv clk_out_predivs[] = { + { .index = 0, .div = 750, }, +}; + +static struct ccu_mp out_a_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 2, + .fixed_predivs = clk_out_predivs, + .n_predivs = ARRAY_SIZE(clk_out_predivs), + }, + .common = { + .reg = 0x1f0, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-a", + out_parents, + &ccu_mp_ops, + 0), + }, +}; +static struct ccu_mp out_b_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 2, + .fixed_predivs = clk_out_predivs, + .n_predivs = ARRAY_SIZE(clk_out_predivs), + }, + .common = { + .reg = 0x1f4, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-b", + out_parents, + &ccu_mp_ops, + 0), + }, +}; + +static struct ccu_common *sun4i_sun7i_ccu_clks[] = { + &hosc_clk.common, + &pll_core_clk.common, + &pll_audio_base_clk.common, + &pll_video0_clk.common, + &pll_ve_sun4i_clk.common, + &pll_ve_sun7i_clk.common, + &pll_ddr_base_clk.common, + &pll_ddr_clk.common, + &pll_ddr_other_clk.common, + &pll_periph_base_clk.common, + &pll_periph_sata_clk.common, + &pll_video1_clk.common, + &pll_gpu_clk.common, + &cpu_clk.common, + &axi_clk.common, + &axi_dram_clk.common, + &ahb_sun4i_clk.common, + &ahb_sun7i_clk.common, + &apb0_clk.common, + &apb1_clk.common, + &ahb_otg_clk.common, + &ahb_ehci0_clk.common, + &ahb_ohci0_clk.common, + &ahb_ehci1_clk.common, + &ahb_ohci1_clk.common, + &ahb_ss_clk.common, + &ahb_dma_clk.common, + &ahb_bist_clk.common, + &ahb_mmc0_clk.common, + &ahb_mmc1_clk.common, + &ahb_mmc2_clk.common, + &ahb_mmc3_clk.common, + &ahb_ms_clk.common, + &ahb_nand_clk.common, + &ahb_sdram_clk.common, + &ahb_ace_clk.common, + &ahb_emac_clk.common, + &ahb_ts_clk.common, + &ahb_spi0_clk.common, + &ahb_spi1_clk.common, + &ahb_spi2_clk.common, + &ahb_spi3_clk.common, + &ahb_pata_clk.common, + &ahb_sata_clk.common, + &ahb_gps_clk.common, + &ahb_hstimer_clk.common, + &ahb_ve_clk.common, + &ahb_tvd_clk.common, + &ahb_tve0_clk.common, + &ahb_tve1_clk.common, + &ahb_lcd0_clk.common, + &ahb_lcd1_clk.common, + &ahb_csi0_clk.common, + &ahb_csi1_clk.common, + &ahb_hdmi1_clk.common, + &ahb_hdmi0_clk.common, + &ahb_de_be0_clk.common, + &ahb_de_be1_clk.common, + &ahb_de_fe0_clk.common, + &ahb_de_fe1_clk.common, + &ahb_gmac_clk.common, + &ahb_mp_clk.common, + &ahb_gpu_clk.common, + &apb0_codec_clk.common, + &apb0_spdif_clk.common, + &apb0_ac97_clk.common, + &apb0_i2s0_clk.common, + &apb0_i2s1_clk.common, + &apb0_pio_clk.common, + &apb0_ir0_clk.common, + &apb0_ir1_clk.common, + &apb0_i2s2_clk.common, + &apb0_keypad_clk.common, + &apb1_i2c0_clk.common, + &apb1_i2c1_clk.common, + &apb1_i2c2_clk.common, + &apb1_i2c3_clk.common, + &apb1_can_clk.common, + &apb1_scr_clk.common, + &apb1_ps20_clk.common, + &apb1_ps21_clk.common, + &apb1_i2c4_clk.common, + &apb1_uart0_clk.common, + &apb1_uart1_clk.common, + &apb1_uart2_clk.common, + &apb1_uart3_clk.common, + &apb1_uart4_clk.common, + &apb1_uart5_clk.common, + &apb1_uart6_clk.common, + &apb1_uart7_clk.common, + &nand_clk.common, + &ms_clk.common, + &mmc0_clk.common, + &mmc0_output_clk.common, + &mmc0_sample_clk.common, + &mmc1_clk.common, + &mmc1_output_clk.common, + &mmc1_sample_clk.common, + &mmc2_clk.common, + &mmc2_output_clk.common, + &mmc2_sample_clk.common, + &mmc3_clk.common, + &mmc3_output_clk.common, + &mmc3_sample_clk.common, + &ts_clk.common, + &ss_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &spi2_clk.common, + &pata_clk.common, + &ir0_sun4i_clk.common, + &ir1_sun4i_clk.common, + &ir0_sun7i_clk.common, + &ir1_sun7i_clk.common, + &i2s0_clk.common, + &ac97_clk.common, + &spdif_clk.common, + &keypad_clk.common, + &sata_clk.common, + &usb_ohci0_clk.common, + &usb_ohci1_clk.common, + &usb_phy_clk.common, + &spi3_clk.common, + &i2s1_clk.common, + &i2s2_clk.common, + &dram_ve_clk.common, + &dram_csi0_clk.common, + &dram_csi1_clk.common, + &dram_ts_clk.common, + &dram_tvd_clk.common, + &dram_tve0_clk.common, + &dram_tve1_clk.common, + &dram_out_clk.common, + &dram_de_fe1_clk.common, + &dram_de_fe0_clk.common, + &dram_de_be0_clk.common, + &dram_de_be1_clk.common, + &dram_mp_clk.common, + &dram_ace_clk.common, + &de_be0_clk.common, + &de_be1_clk.common, + &de_fe0_clk.common, + &de_fe1_clk.common, + &de_mp_clk.common, + &tcon0_ch0_clk.common, + &tcon1_ch0_clk.common, + &csi_sclk_clk.common, + &tvd_sun4i_clk.common, + &tvd_sclk1_sun7i_clk.common, + &tvd_sclk2_sun7i_clk.common, + &tcon0_ch1_sclk2_clk.common, + &tcon0_ch1_clk.common, + &tcon1_ch1_sclk2_clk.common, + &tcon1_ch1_clk.common, + &csi0_clk.common, + &csi1_clk.common, + &ve_clk.common, + &codec_clk.common, + &avs_clk.common, + &ace_clk.common, + &hdmi_clk.common, + &gpu_sun4i_clk.common, + &gpu_sun7i_clk.common, + &mbus_sun4i_clk.common, + &mbus_sun7i_clk.common, + &hdmi1_slow_clk.common, + &hdmi1_clk.common, + &out_a_clk.common, + &out_b_clk.common +}; + +/* Post-divider for pll-audio is hardcoded to 4 */ +static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", + "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", + "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", + "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", + "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", + "pll-video0", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", + "pll-video1", 1, 2, CLK_SET_RATE_PARENT); + + +static struct clk_hw_onecell_data sun4i_a10_hw_clks = { + .hws = { + [CLK_HOSC] = &hosc_clk.common.hw, + [CLK_PLL_CORE] = &pll_core_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_sun4i_clk.common.hw, + [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw, + [CLK_PLL_PERIPH_BASE] = &pll_periph_base_clk.common.hw, + [CLK_PLL_PERIPH] = &pll_periph_clk.hw, + [CLK_PLL_PERIPH_SATA] = &pll_periph_sata_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AXI_DRAM] = &axi_dram_clk.common.hw, + [CLK_AHB] = &ahb_sun4i_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_AHB_OTG] = &ahb_otg_clk.common.hw, + [CLK_AHB_EHCI0] = &ahb_ehci0_clk.common.hw, + [CLK_AHB_OHCI0] = &ahb_ohci0_clk.common.hw, + [CLK_AHB_EHCI1] = &ahb_ehci1_clk.common.hw, + [CLK_AHB_OHCI1] = &ahb_ohci1_clk.common.hw, + [CLK_AHB_SS] = &ahb_ss_clk.common.hw, + [CLK_AHB_DMA] = &ahb_dma_clk.common.hw, + [CLK_AHB_BIST] = &ahb_bist_clk.common.hw, + [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw, + [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw, + [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw, + [CLK_AHB_MMC3] = &ahb_mmc3_clk.common.hw, + [CLK_AHB_MS] = &ahb_ms_clk.common.hw, + [CLK_AHB_NAND] = &ahb_nand_clk.common.hw, + [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw, + [CLK_AHB_ACE] = &ahb_ace_clk.common.hw, + [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw, + [CLK_AHB_TS] = &ahb_ts_clk.common.hw, + [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw, + [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw, + [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw, + [CLK_AHB_SPI3] = &ahb_spi3_clk.common.hw, + [CLK_AHB_PATA] = &ahb_pata_clk.common.hw, + [CLK_AHB_SATA] = &ahb_sata_clk.common.hw, + [CLK_AHB_GPS] = &ahb_gps_clk.common.hw, + [CLK_AHB_VE] = &ahb_ve_clk.common.hw, + [CLK_AHB_TVD] = &ahb_tvd_clk.common.hw, + [CLK_AHB_TVE0] = &ahb_tve0_clk.common.hw, + [CLK_AHB_TVE1] = &ahb_tve1_clk.common.hw, + [CLK_AHB_LCD0] = &ahb_lcd0_clk.common.hw, + [CLK_AHB_LCD1] = &ahb_lcd1_clk.common.hw, + [CLK_AHB_CSI0] = &ahb_csi0_clk.common.hw, + [CLK_AHB_CSI1] = &ahb_csi1_clk.common.hw, + [CLK_AHB_HDMI0] = &ahb_hdmi0_clk.common.hw, + [CLK_AHB_DE_BE0] = &ahb_de_be0_clk.common.hw, + [CLK_AHB_DE_BE1] = &ahb_de_be1_clk.common.hw, + [CLK_AHB_DE_FE0] = &ahb_de_fe0_clk.common.hw, + [CLK_AHB_DE_FE1] = &ahb_de_fe1_clk.common.hw, + [CLK_AHB_MP] = &ahb_mp_clk.common.hw, + [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw, + [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw, + [CLK_APB0_SPDIF] = &apb0_spdif_clk.common.hw, + [CLK_APB0_AC97] = &apb0_ac97_clk.common.hw, + [CLK_APB0_I2S0] = &apb0_i2s0_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR0] = &apb0_ir0_clk.common.hw, + [CLK_APB0_IR1] = &apb0_ir1_clk.common.hw, + [CLK_APB0_KEYPAD] = &apb0_keypad_clk.common.hw, + [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw, + [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw, + [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw, + [CLK_APB1_CAN] = &apb1_can_clk.common.hw, + [CLK_APB1_SCR] = &apb1_scr_clk.common.hw, + [CLK_APB1_PS20] = &apb1_ps20_clk.common.hw, + [CLK_APB1_PS21] = &apb1_ps21_clk.common.hw, + [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw, + [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw, + [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw, + [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw, + [CLK_APB1_UART4] = &apb1_uart4_clk.common.hw, + [CLK_APB1_UART5] = &apb1_uart5_clk.common.hw, + [CLK_APB1_UART6] = &apb1_uart6_clk.common.hw, + [CLK_APB1_UART7] = &apb1_uart7_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MS] = &ms_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC3] = &mmc3_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_PATA] = &pata_clk.common.hw, + [CLK_IR0] = &ir0_sun4i_clk.common.hw, + [CLK_IR1] = &ir1_sun4i_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_AC97] = &ac97_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_KEYPAD] = &keypad_clk.common.hw, + [CLK_SATA] = &sata_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, + [CLK_USB_PHY] = &usb_phy_clk.common.hw, + /* CLK_GPS is unimplemented */ + [CLK_SPI3] = &spi3_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw, + [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw, + [CLK_DRAM_TVE0] = &dram_tve0_clk.common.hw, + [CLK_DRAM_TVE1] = &dram_tve1_clk.common.hw, + [CLK_DRAM_OUT] = &dram_out_clk.common.hw, + [CLK_DRAM_DE_FE1] = &dram_de_fe1_clk.common.hw, + [CLK_DRAM_DE_FE0] = &dram_de_fe0_clk.common.hw, + [CLK_DRAM_DE_BE0] = &dram_de_be0_clk.common.hw, + [CLK_DRAM_DE_BE1] = &dram_de_be1_clk.common.hw, + [CLK_DRAM_MP] = &dram_mp_clk.common.hw, + [CLK_DRAM_ACE] = &dram_ace_clk.common.hw, + [CLK_DE_BE0] = &de_be0_clk.common.hw, + [CLK_DE_BE1] = &de_be1_clk.common.hw, + [CLK_DE_FE0] = &de_fe0_clk.common.hw, + [CLK_DE_FE1] = &de_fe1_clk.common.hw, + [CLK_DE_MP] = &de_mp_clk.common.hw, + [CLK_TCON0_CH0] = &tcon0_ch0_clk.common.hw, + [CLK_TCON1_CH0] = &tcon1_ch0_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, + [CLK_TVD] = &tvd_sun4i_clk.common.hw, + [CLK_TCON0_CH1_SCLK2] = &tcon0_ch1_sclk2_clk.common.hw, + [CLK_TCON0_CH1] = &tcon0_ch1_clk.common.hw, + [CLK_TCON1_CH1_SCLK2] = &tcon1_ch1_sclk2_clk.common.hw, + [CLK_TCON1_CH1] = &tcon1_ch1_clk.common.hw, + [CLK_CSI0] = &csi0_clk.common.hw, + [CLK_CSI1] = &csi1_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_ACE] = &ace_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_GPU] = &gpu_sun7i_clk.common.hw, + [CLK_MBUS] = &mbus_sun4i_clk.common.hw, + }, + .num = CLK_NUMBER_SUN4I, +}; +static struct clk_hw_onecell_data sun7i_a20_hw_clks = { + .hws = { + [CLK_HOSC] = &hosc_clk.common.hw, + [CLK_PLL_CORE] = &pll_core_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_sun7i_clk.common.hw, + [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw, + [CLK_PLL_PERIPH_BASE] = &pll_periph_base_clk.common.hw, + [CLK_PLL_PERIPH] = &pll_periph_clk.hw, + [CLK_PLL_PERIPH_SATA] = &pll_periph_sata_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB] = &ahb_sun7i_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_AHB_OTG] = &ahb_otg_clk.common.hw, + [CLK_AHB_EHCI0] = &ahb_ehci0_clk.common.hw, + [CLK_AHB_OHCI0] = &ahb_ohci0_clk.common.hw, + [CLK_AHB_EHCI1] = &ahb_ehci1_clk.common.hw, + [CLK_AHB_OHCI1] = &ahb_ohci1_clk.common.hw, + [CLK_AHB_SS] = &ahb_ss_clk.common.hw, + [CLK_AHB_DMA] = &ahb_dma_clk.common.hw, + [CLK_AHB_BIST] = &ahb_bist_clk.common.hw, + [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw, + [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw, + [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw, + [CLK_AHB_MMC3] = &ahb_mmc3_clk.common.hw, + [CLK_AHB_MS] = &ahb_ms_clk.common.hw, + [CLK_AHB_NAND] = &ahb_nand_clk.common.hw, + [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw, + [CLK_AHB_ACE] = &ahb_ace_clk.common.hw, + [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw, + [CLK_AHB_TS] = &ahb_ts_clk.common.hw, + [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw, + [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw, + [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw, + [CLK_AHB_SPI3] = &ahb_spi3_clk.common.hw, + [CLK_AHB_PATA] = &ahb_pata_clk.common.hw, + [CLK_AHB_SATA] = &ahb_sata_clk.common.hw, + [CLK_AHB_HSTIMER] = &ahb_hstimer_clk.common.hw, + [CLK_AHB_VE] = &ahb_ve_clk.common.hw, + [CLK_AHB_TVD] = &ahb_tvd_clk.common.hw, + [CLK_AHB_TVE0] = &ahb_tve0_clk.common.hw, + [CLK_AHB_TVE1] = &ahb_tve1_clk.common.hw, + [CLK_AHB_LCD0] = &ahb_lcd0_clk.common.hw, + [CLK_AHB_LCD1] = &ahb_lcd1_clk.common.hw, + [CLK_AHB_CSI0] = &ahb_csi0_clk.common.hw, + [CLK_AHB_CSI1] = &ahb_csi1_clk.common.hw, + [CLK_AHB_HDMI1] = &ahb_hdmi1_clk.common.hw, + [CLK_AHB_HDMI0] = &ahb_hdmi0_clk.common.hw, + [CLK_AHB_DE_BE0] = &ahb_de_be0_clk.common.hw, + [CLK_AHB_DE_BE1] = &ahb_de_be1_clk.common.hw, + [CLK_AHB_DE_FE0] = &ahb_de_fe0_clk.common.hw, + [CLK_AHB_DE_FE1] = &ahb_de_fe1_clk.common.hw, + [CLK_AHB_GMAC] = &ahb_gmac_clk.common.hw, + [CLK_AHB_MP] = &ahb_mp_clk.common.hw, + [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw, + [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw, + [CLK_APB0_SPDIF] = &apb0_spdif_clk.common.hw, + [CLK_APB0_AC97] = &apb0_ac97_clk.common.hw, + [CLK_APB0_I2S0] = &apb0_i2s0_clk.common.hw, + [CLK_APB0_I2S1] = &apb0_i2s1_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR0] = &apb0_ir0_clk.common.hw, + [CLK_APB0_IR1] = &apb0_ir1_clk.common.hw, + [CLK_APB0_I2S2] = &apb0_i2s2_clk.common.hw, + [CLK_APB0_KEYPAD] = &apb0_keypad_clk.common.hw, + [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw, + [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw, + [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw, + [CLK_APB1_I2C3] = &apb1_i2c3_clk.common.hw, + [CLK_APB1_CAN] = &apb1_can_clk.common.hw, + [CLK_APB1_SCR] = &apb1_scr_clk.common.hw, + [CLK_APB1_PS20] = &apb1_ps20_clk.common.hw, + [CLK_APB1_PS21] = &apb1_ps21_clk.common.hw, + [CLK_APB1_I2C4] = &apb1_i2c4_clk.common.hw, + [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw, + [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw, + [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw, + [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw, + [CLK_APB1_UART4] = &apb1_uart4_clk.common.hw, + [CLK_APB1_UART5] = &apb1_uart5_clk.common.hw, + [CLK_APB1_UART6] = &apb1_uart6_clk.common.hw, + [CLK_APB1_UART7] = &apb1_uart7_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MS] = &ms_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw, + [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC3] = &mmc3_clk.common.hw, + [CLK_MMC3_OUTPUT] = &mmc3_output_clk.common.hw, + [CLK_MMC3_SAMPLE] = &mmc3_sample_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_PATA] = &pata_clk.common.hw, + [CLK_IR0] = &ir0_sun7i_clk.common.hw, + [CLK_IR1] = &ir1_sun7i_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_AC97] = &ac97_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_KEYPAD] = &keypad_clk.common.hw, + [CLK_SATA] = &sata_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, + [CLK_USB_PHY] = &usb_phy_clk.common.hw, + /* CLK_GPS is unimplemented */ + [CLK_SPI3] = &spi3_clk.common.hw, + [CLK_I2S1] = &i2s1_clk.common.hw, + [CLK_I2S2] = &i2s2_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw, + [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw, + [CLK_DRAM_TVE0] = &dram_tve0_clk.common.hw, + [CLK_DRAM_TVE1] = &dram_tve1_clk.common.hw, + [CLK_DRAM_OUT] = &dram_out_clk.common.hw, + [CLK_DRAM_DE_FE1] = &dram_de_fe1_clk.common.hw, + [CLK_DRAM_DE_FE0] = &dram_de_fe0_clk.common.hw, + [CLK_DRAM_DE_BE0] = &dram_de_be0_clk.common.hw, + [CLK_DRAM_DE_BE1] = &dram_de_be1_clk.common.hw, + [CLK_DRAM_MP] = &dram_mp_clk.common.hw, + [CLK_DRAM_ACE] = &dram_ace_clk.common.hw, + [CLK_DE_BE0] = &de_be0_clk.common.hw, + [CLK_DE_BE1] = &de_be1_clk.common.hw, + [CLK_DE_FE0] = &de_fe0_clk.common.hw, + [CLK_DE_FE1] = &de_fe1_clk.common.hw, + [CLK_DE_MP] = &de_mp_clk.common.hw, + [CLK_TCON0_CH0] = &tcon0_ch0_clk.common.hw, + [CLK_TCON1_CH0] = &tcon1_ch0_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, + [CLK_TVD_SCLK2] = &tvd_sclk2_sun7i_clk.common.hw, + [CLK_TVD] = &tvd_sclk1_sun7i_clk.common.hw, + [CLK_TCON0_CH1_SCLK2] = &tcon0_ch1_sclk2_clk.common.hw, + [CLK_TCON0_CH1] = &tcon0_ch1_clk.common.hw, + [CLK_TCON1_CH1_SCLK2] = &tcon1_ch1_sclk2_clk.common.hw, + [CLK_TCON1_CH1] = &tcon1_ch1_clk.common.hw, + [CLK_CSI0] = &csi0_clk.common.hw, + [CLK_CSI1] = &csi1_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_ACE] = &ace_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_GPU] = &gpu_sun7i_clk.common.hw, + [CLK_MBUS] = &mbus_sun7i_clk.common.hw, + [CLK_HDMI1_SLOW] = &hdmi1_slow_clk.common.hw, + [CLK_HDMI1] = &hdmi1_clk.common.hw, + [CLK_OUT_A] = &out_a_clk.common.hw, + [CLK_OUT_B] = &out_b_clk.common.hw, + }, + .num = CLK_NUMBER_SUN7I, +}; + +static struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + [RST_USB_PHY1] = { 0x0cc, BIT(1) }, + [RST_USB_PHY2] = { 0x0cc, BIT(2) }, + [RST_GPS] = { 0x0d0, BIT(0) }, + [RST_DE_BE0] = { 0x104, BIT(30) }, + [RST_DE_BE1] = { 0x108, BIT(30) }, + [RST_DE_FE0] = { 0x10c, BIT(30) }, + [RST_DE_FE1] = { 0x110, BIT(30) }, + [RST_DE_MP] = { 0x114, BIT(30) }, + [RST_TVE0] = { 0x118, BIT(29) }, + [RST_TCON0] = { 0x118, BIT(30) }, + [RST_TVE1] = { 0x11c, BIT(29) }, + [RST_TCON1] = { 0x11c, BIT(30) }, + [RST_CSI0] = { 0x134, BIT(30) }, + [RST_CSI1] = { 0x138, BIT(30) }, + [RST_VE] = { 0x13c, BIT(0) }, + [RST_ACE] = { 0x148, BIT(16) }, + [RST_LVDS] = { 0x14c, BIT(0) }, + [RST_GPU] = { 0x154, BIT(30) }, + [RST_HDMI_H] = { 0x170, BIT(0) }, + [RST_HDMI_SYS] = { 0x170, BIT(1) }, + [RST_HDMI_AUDIO_DMA] = { 0x170, BIT(2) }, +}; + +static const struct sunxi_ccu_desc sun4i_a10_ccu_desc = { + .ccu_clks = sun4i_sun7i_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun4i_sun7i_ccu_clks), + + .hw_clks = &sun4i_a10_hw_clks, + + .resets = sunxi_a10_a20_ccu_resets, + .num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets), +}; + +static const struct sunxi_ccu_desc sun7i_a20_ccu_desc = { + .ccu_clks = sun4i_sun7i_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun4i_sun7i_ccu_clks), + + .hw_clks = &sun7i_a20_hw_clks, + + .resets = sunxi_a10_a20_ccu_resets, + .num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets), +}; + +static void __init sun4i_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *desc) +{ + void __iomem *reg; + u32 val; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + /* Force the PLL-Audio-1x divider to 4 */ + val = readl(reg + SUN4I_PLL_AUDIO_REG); + val &= ~GENMASK(29, 26); + writel(val | (4 << 26), reg + SUN4I_PLL_AUDIO_REG); + + /* + * Use the peripheral PLL6 as the AHB parent, instead of CPU / + * AXI which have rate changes due to cpufreq. + * + * This is especially a big deal for the HS timer whose parent + * clock is AHB. + * + * NB! These bits are undocumented in A10 manual. + */ + val = readl(reg + SUN4I_AHB_REG); + val &= ~GENMASK(7, 6); + writel(val | (2 << 6), reg + SUN4I_AHB_REG); + + sunxi_ccu_probe(node, reg, desc); +} + +static void __init sun4i_a10_ccu_setup(struct device_node *node) +{ + sun4i_ccu_init(node, &sun4i_a10_ccu_desc); +} +CLK_OF_DECLARE(sun4i_a10_ccu, "allwinner,sun4i-a10-ccu", + sun4i_a10_ccu_setup); + +static void __init sun7i_a20_ccu_setup(struct device_node *node) +{ + sun4i_ccu_init(node, &sun7i_a20_ccu_desc); +} +CLK_OF_DECLARE(sun7i_a20_ccu, "allwinner,sun7i-a20-ccu", + sun7i_a20_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h new file mode 100644 index 000000000000..c5947c7c050e --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h @@ -0,0 +1,61 @@ +/* + * Copyright 2017 Priit Laes + * + * Priit Laes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN4I_A10_H_ +#define _CCU_SUN4I_A10_H_ + +#include +#include +#include + +/* The HOSC is exported */ +#define CLK_PLL_CORE 2 +#define CLK_PLL_AUDIO_BASE 3 +#define CLK_PLL_AUDIO 4 +#define CLK_PLL_AUDIO_2X 5 +#define CLK_PLL_AUDIO_4X 6 +#define CLK_PLL_AUDIO_8X 7 +#define CLK_PLL_VIDEO0 8 +#define CLK_PLL_VIDEO0_2X 9 +#define CLK_PLL_VE 10 +#define CLK_PLL_DDR_BASE 11 +#define CLK_PLL_DDR 12 +#define CLK_PLL_DDR_OTHER 13 +#define CLK_PLL_PERIPH_BASE 14 +#define CLK_PLL_PERIPH 15 +#define CLK_PLL_PERIPH_SATA 16 +#define CLK_PLL_VIDEO1 17 +#define CLK_PLL_VIDEO1_2X 18 +#define CLK_PLL_GPU 19 + +/* The CPU clock is exported */ +#define CLK_AXI 21 +#define CLK_AXI_DRAM 22 +#define CLK_AHB 23 +#define CLK_APB0 24 +#define CLK_APB1 25 + +/* AHB gates are exported (23..68) */ +/* APB0 gates are exported (69..78) */ +/* APB1 gates are exported (79..95) */ +/* IP module clocks are exported (96..128) */ +/* DRAM gates are exported (129..142)*/ +/* Media (display engine clocks & etc) are exported (143..169) */ + +#define CLK_NUMBER_SUN4I (CLK_MBUS + 1) +#define CLK_NUMBER_SUN7I (CLK_OUT_B + 1) + +#endif /* _CCU_SUN4I_A10_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 31d7ffda9aab..ab9e850b3707 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -976,8 +976,7 @@ static void __init sun5i_ccu_init(struct device_node *node, reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 4d6078fca9ac..8af434815fba 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -1217,8 +1217,7 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c index 8a753ed0426d..d93b452f0df9 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c @@ -716,8 +716,7 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 10b38dc46f75..13eb5b23c5e7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -777,8 +777,7 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index 947f9f6e05d2..e43acebdfbcd 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -418,14 +418,8 @@ static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1-sample", "mmc1", static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1-output", "mmc1", 0x08c, 8, 3, 0); -/* TODO Support MMC2 clock's new timing mode. */ -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, - 0x090, - 0, 4, /* M */ - 16, 2, /* P */ - 24, 2, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_MMC_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, + 0x090, 0); static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2-sample", "mmc2", 0x090, 20, 3, 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 62e4f0d2b2fc..1729ff6a5aae 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -135,7 +135,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de", static const char * const cpux_parents[] = { "osc32k", "osc24M", "pll-cpux" , "pll-cpux" }; static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents, - 0x050, 16, 2, CLK_IS_CRITICAL); + 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT); static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0); @@ -1103,6 +1103,13 @@ static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = { .num_resets = ARRAY_SIZE(sun50i_h5_ccu_resets), }; +static struct ccu_pll_nb sun8i_h3_pll_cpu_nb = { + .common = &pll_cpux_clk.common, + /* copy from pll_cpux_clk */ + .enable = BIT(31), + .lock = BIT(28), +}; + static struct ccu_mux_nb sun8i_h3_cpu_nb = { .common = &cpux_clk.common, .cm = &cpux_clk.mux, @@ -1118,8 +1125,7 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node, reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } @@ -1130,6 +1136,10 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node, sunxi_ccu_probe(node, reg, desc); + /* Gate then ungate PLL CPU after any rate changes */ + ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb); + + /* Reparent CPU during PLL CPU rate changes */ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, &sun8i_h3_cpu_nb); } diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index e54816ec1dbe..71feb7b24e8a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -290,8 +290,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node, reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.h b/drivers/clk/sunxi-ng/ccu-sun8i-r.h index a7a407f12b56..fb01bffb929d 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.h @@ -13,7 +13,7 @@ */ #ifndef _CCU_SUN8I_R_H -#define _CCU_SUN8I_R_H_ +#define _CCU_SUN8I_R_H #include #include diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c new file mode 100644 index 000000000000..933f2e68f42a --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -0,0 +1,1290 @@ +/* + * Copyright (c) 2017 Icenowy Zheng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_mult.h" +#include "ccu_nk.h" +#include "ccu_nkm.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun8i-r40.h" + +/* TODO: The result of N*K is required to be in [10, 88] range. */ +static struct ccu_nkmp pll_cpu_clk = { + .enable = BIT(31), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 5), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .p = _SUNXI_CCU_DIV_MAX(16, 2, 4), + .common = { + .reg = 0x000, + .hw.init = CLK_HW_INIT("pll-cpu", + "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +/* + * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from + * the base (2x, 4x and 8x), and one variable divider (the one true + * pll audio). + * + * We don't have any need for the variable divider for now, so we just + * hardcode it to match with the clock names + */ +#define SUN8I_R40_PLL_AUDIO_REG 0x008 + +static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", + "osc24M", 0x008, + 8, 7, /* N */ + 0, 5, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* TODO: The result of N/M is required to be in [8, 25] range. */ +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0", + "osc24M", 0x0010, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* TODO: The result of N/M is required to be in [8, 25] range. */ +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", + "osc24M", 0x0018, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* TODO: The result of N*K is required to be in [10, 77] range. */ +static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0", + "osc24M", 0x020, + 8, 5, /* N */ + 4, 2, /* K */ + 0, 2, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* TODO: The result of N*K is required to be in [21, 58] range. */ +static struct ccu_nk pll_periph0_clk = { + .enable = BIT(31), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 5), + .k = _SUNXI_CCU_MULT(4, 2), + .fixed_post_div = 2, + .common = { + .reg = 0x028, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-periph0", "osc24M", + &ccu_nk_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_div pll_periph0_sata_clk = { + .enable = BIT(24), + .div = _SUNXI_CCU_DIV(0, 2), + /* + * The formula of pll-periph0 (1x) is 24MHz*N*K/2, and the formula + * of pll-periph0-sata is 24MHz*N*K/M/6, so the postdiv here is + * 6/2 = 3. + */ + .fixed_post_div = 3, + .common = { + .reg = 0x028, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-periph0-sata", + "pll-periph0", + &ccu_div_ops, 0), + }, +}; + +/* TODO: The result of N*K is required to be in [21, 58] range. */ +static struct ccu_nk pll_periph1_clk = { + .enable = BIT(31), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 5), + .k = _SUNXI_CCU_MULT(4, 2), + .fixed_post_div = 2, + .common = { + .reg = 0x02c, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-periph1", "osc24M", + &ccu_nk_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +/* TODO: The result of N/M is required to be in [8, 25] range. */ +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1", + "osc24M", 0x030, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +static struct ccu_nkm pll_sata_clk = { + .enable = BIT(31), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 5), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .fixed_post_div = 6, + .common = { + .reg = 0x034, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-sata", "osc24M", + &ccu_nkm_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static const char * const pll_sata_out_parents[] = { "pll-sata", + "pll-periph0-sata" }; +static SUNXI_CCU_MUX_WITH_GATE(pll_sata_out_clk, "pll-sata-out", + pll_sata_out_parents, 0x034, + 30, 1, /* mux */ + BIT(14), /* gate */ + CLK_SET_RATE_PARENT); + +/* TODO: The result of N/M is required to be in [8, 25] range. */ +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", + "osc24M", 0x038, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* + * The MIPI PLL has 2 modes: "MIPI" and "HDMI". + * + * The MIPI mode is a standard NKM-style clock. The HDMI mode is an + * integer / fractional clock with switchable multipliers and dividers. + * This is not supported here. We hardcode the PLL to MIPI mode. + * + * TODO: In the MIPI mode, M/N is required to be equal or lesser than 3, + * which cannot be implemented now. + */ +#define SUN8I_R40_PLL_MIPI_REG 0x040 + +static const char * const pll_mipi_parents[] = { "pll-video0" }; +static struct ccu_nkm pll_mipi_clk = { + .enable = BIT(31) | BIT(23) | BIT(22), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 4), + .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), + .m = _SUNXI_CCU_DIV(0, 4), + .mux = _SUNXI_CCU_MUX(21, 1), + .common = { + .reg = 0x040, + .hw.init = CLK_HW_INIT_PARENTS("pll-mipi", + pll_mipi_parents, + &ccu_nkm_ops, + CLK_SET_RATE_UNGATE) + }, +}; + +/* TODO: The result of N/M is required to be in [8, 25] range. */ +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de", + "osc24M", 0x048, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +/* TODO: The N factor is required to be in [16, 75] range. */ +static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1", + "osc24M", 0x04c, + 8, 7, /* N */ + 0, 2, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); + +static const char * const cpu_parents[] = { "osc32k", "osc24M", + "pll-cpu", "pll-cpu" }; +static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents, + 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x050, 0, 2, 0); + +static const char * const ahb1_parents[] = { "osc32k", "osc24M", + "axi", "pll-periph0" }; +static const struct ccu_mux_var_prediv ahb1_predivs[] = { + { .index = 3, .shift = 6, .width = 2 }, +}; +static struct ccu_div ahb1_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + + .mux = { + .shift = 12, + .width = 2, + + .var_predivs = ahb1_predivs, + .n_var_predivs = ARRAY_SIZE(ahb1_predivs), + }, + + .common = { + .reg = 0x054, + .features = CCU_FEATURE_VARIABLE_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ahb1", + ahb1_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct clk_div_table apb1_div_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { /* Sentinel */ }, +}; +static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1", + 0x054, 8, 2, apb1_div_table, 0); + +static const char * const apb2_parents[] = { "osc32k", "osc24M", + "pll-periph0-2x", + "pll-periph0-2x" }; +static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058, + 0, 5, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + 0); + +static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1", + 0x060, BIT(1), 0); +static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1", + 0x060, BIT(5), 0); +static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1", + 0x060, BIT(8), 0); +static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1", + 0x060, BIT(9), 0); +static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1", + 0x060, BIT(10), 0); +static SUNXI_CCU_GATE(bus_mmc3_clk, "bus-mmc3", "ahb1", + 0x060, BIT(11), 0); +static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1", + 0x060, BIT(13), 0); +static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1", + 0x060, BIT(14), 0); +static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb1", + 0x060, BIT(17), 0); +static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1", + 0x060, BIT(18), 0); +static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1", + 0x060, BIT(19), 0); +static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1", + 0x060, BIT(20), 0); +static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1", + 0x060, BIT(21), 0); +static SUNXI_CCU_GATE(bus_spi2_clk, "bus-spi2", "ahb1", + 0x060, BIT(22), 0); +static SUNXI_CCU_GATE(bus_spi3_clk, "bus-spi3", "ahb1", + 0x060, BIT(23), 0); +static SUNXI_CCU_GATE(bus_sata_clk, "bus-sata", "ahb1", + 0x060, BIT(24), 0); +static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1", + 0x060, BIT(25), 0); +static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1", + 0x060, BIT(26), 0); +static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb1", + 0x060, BIT(27), 0); +static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb1", + 0x060, BIT(28), 0); +static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1", + 0x060, BIT(29), 0); +static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb1", + 0x060, BIT(30), 0); +static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb1", + 0x060, BIT(31), 0); + +static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1", + 0x064, BIT(0), 0); +static SUNXI_CCU_GATE(bus_mp_clk, "bus-mp", "ahb1", + 0x064, BIT(2), 0); +static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1", + 0x064, BIT(5), 0); +static SUNXI_CCU_GATE(bus_csi0_clk, "bus-csi0", "ahb1", + 0x064, BIT(8), 0); +static SUNXI_CCU_GATE(bus_csi1_clk, "bus-csi1", "ahb1", + 0x064, BIT(9), 0); +static SUNXI_CCU_GATE(bus_hdmi0_clk, "bus-hdmi0", "ahb1", + 0x064, BIT(10), 0); +static SUNXI_CCU_GATE(bus_hdmi1_clk, "bus-hdmi1", "ahb1", + 0x064, BIT(11), 0); +static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1", + 0x064, BIT(12), 0); +static SUNXI_CCU_GATE(bus_tve0_clk, "bus-tve0", "ahb1", + 0x064, BIT(13), 0); +static SUNXI_CCU_GATE(bus_tve1_clk, "bus-tve1", "ahb1", + 0x064, BIT(14), 0); +static SUNXI_CCU_GATE(bus_tve_top_clk, "bus-tve-top", "ahb1", + 0x064, BIT(15), 0); +static SUNXI_CCU_GATE(bus_gmac_clk, "bus-gmac", "ahb1", + 0x064, BIT(17), 0); +static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1", + 0x064, BIT(20), 0); +static SUNXI_CCU_GATE(bus_tvd0_clk, "bus-tvd0", "ahb1", + 0x064, BIT(21), 0); +static SUNXI_CCU_GATE(bus_tvd1_clk, "bus-tvd1", "ahb1", + 0x064, BIT(22), 0); +static SUNXI_CCU_GATE(bus_tvd2_clk, "bus-tvd2", "ahb1", + 0x064, BIT(23), 0); +static SUNXI_CCU_GATE(bus_tvd3_clk, "bus-tvd3", "ahb1", + 0x064, BIT(24), 0); +static SUNXI_CCU_GATE(bus_tvd_top_clk, "bus-tvd-top", "ahb1", + 0x064, BIT(25), 0); +static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb1", + 0x064, BIT(26), 0); +static SUNXI_CCU_GATE(bus_tcon_lcd1_clk, "bus-tcon-lcd1", "ahb1", + 0x064, BIT(27), 0); +static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb1", + 0x064, BIT(28), 0); +static SUNXI_CCU_GATE(bus_tcon_tv1_clk, "bus-tcon-tv1", "ahb1", + 0x064, BIT(29), 0); +static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb1", + 0x064, BIT(30), 0); + +static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1", + 0x068, BIT(0), 0); +static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", + 0x068, BIT(1), 0); +static SUNXI_CCU_GATE(bus_ac97_clk, "bus-ac97", "apb1", + 0x068, BIT(2), 0); +static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1", + 0x068, BIT(5), 0); +static SUNXI_CCU_GATE(bus_ir0_clk, "bus-ir0", "apb1", + 0x068, BIT(6), 0); +static SUNXI_CCU_GATE(bus_ir1_clk, "bus-ir1", "apb1", + 0x068, BIT(7), 0); +static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", + 0x068, BIT(8), 0); +static SUNXI_CCU_GATE(bus_keypad_clk, "bus-keypad", "apb1", + 0x068, BIT(10), 0); +static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", + 0x068, BIT(12), 0); +static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", + 0x068, BIT(13), 0); +static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", + 0x068, BIT(14), 0); + +static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", + 0x06c, BIT(0), 0); +static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", + 0x06c, BIT(1), 0); +static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", + 0x06c, BIT(2), 0); +static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", + 0x06c, BIT(3), 0); +/* + * In datasheet here's "Reserved", however the gate exists in BSP soucre + * code. + */ +static SUNXI_CCU_GATE(bus_can_clk, "bus-can", "apb2", + 0x06c, BIT(4), 0); +static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2", + 0x06c, BIT(5), 0); +static SUNXI_CCU_GATE(bus_ps20_clk, "bus-ps20", "apb2", + 0x06c, BIT(6), 0); +static SUNXI_CCU_GATE(bus_ps21_clk, "bus-ps21", "apb2", + 0x06c, BIT(7), 0); +static SUNXI_CCU_GATE(bus_i2c4_clk, "bus-i2c4", "apb2", + 0x06c, BIT(15), 0); +static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", + 0x06c, BIT(16), 0); +static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", + 0x06c, BIT(17), 0); +static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", + 0x06c, BIT(18), 0); +static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", + 0x06c, BIT(19), 0); +static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2", + 0x06c, BIT(20), 0); +static SUNXI_CCU_GATE(bus_uart5_clk, "bus-uart5", "apb2", + 0x06c, BIT(21), 0); +static SUNXI_CCU_GATE(bus_uart6_clk, "bus-uart6", "apb2", + 0x06c, BIT(22), 0); +static SUNXI_CCU_GATE(bus_uart7_clk, "bus-uart7", "apb2", + 0x06c, BIT(23), 0); + +static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1", + 0x070, BIT(7), 0); + +static const char * const ths_parents[] = { "osc24M" }; +static struct ccu_div ths_clk = { + .enable = BIT(31), + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 2), + .common = { + .reg = 0x074, + .hw.init = CLK_HW_INIT_PARENTS("ths", + ths_parents, + &ccu_div_ops, + 0), + }, +}; + +static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0", + "pll-periph1" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents, 0x094, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const ts_parents[] = { "osc24M", "pll-periph0", }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x", + "pll-periph1-2x" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x09c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, 0x0ac, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, + 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, + 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents, + 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_MUX_WITH_GATE(ac97_clk, "ac97", i2s_parents, + 0x0bc, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_parents, + 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const keypad_parents[] = { "osc24M", "osc32k" }; +static const u8 keypad_table[] = { 0, 2 }; +static struct ccu_mp keypad_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(0, 5), + .p = _SUNXI_CCU_DIV(16, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 2, keypad_table), + .common = { + .reg = 0x0c4, + .hw.init = CLK_HW_INIT_PARENTS("keypad", + keypad_parents, + &ccu_mp_ops, + 0), + } +}; + +static const char * const sata_parents[] = { "pll-sata-out", "sata-ext" }; +static SUNXI_CCU_MUX_WITH_GATE(sata_clk, "sata", sata_parents, + 0x0c8, 24, 1, BIT(31), CLK_SET_RATE_PARENT); + +/* + * There are 3 OHCI 12M clock source selection bits in this register. + * We will force them to 0 (12M divided from 48M). + */ +#define SUN8I_R40_USB_CLK_REG 0x0cc + +static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", + 0x0cc, BIT(8), 0); +static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", + 0x0cc, BIT(9), 0); +static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M", + 0x0cc, BIT(10), 0); +static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", + 0x0cc, BIT(16), 0); +static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc12M", + 0x0cc, BIT(17), 0); +static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc12M", + 0x0cc, BIT(18), 0); + +static const char * const ir_parents[] = { "osc24M", "pll-periph0", + "pll-periph1", "osc32k" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_clk, "ir0", ir_parents, 0x0d0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_clk, "ir1", ir_parents, 0x0d4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" }; +static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents, + 0x0f4, 0, 2, 20, 2, CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram", + 0x100, BIT(0), 0); +static SUNXI_CCU_GATE(dram_csi0_clk, "dram-csi0", "dram", + 0x100, BIT(1), 0); +static SUNXI_CCU_GATE(dram_csi1_clk, "dram-csi1", "dram", + 0x100, BIT(2), 0); +static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram", + 0x100, BIT(3), 0); +static SUNXI_CCU_GATE(dram_tvd_clk, "dram-tvd", "dram", + 0x100, BIT(4), 0); +static SUNXI_CCU_GATE(dram_mp_clk, "dram-mp", "dram", + 0x100, BIT(5), 0); +static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram", + 0x100, BIT(6), 0); + +static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" }; +static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, + 0x104, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", de_parents, + 0x108, 0, 4, 24, 3, BIT(31), 0); + +static const char * const tcon_parents[] = { "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x", + "pll-mipi" }; +static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents, + 0x110, 24, 3, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents, + 0x114, 24, 3, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_parents, + 0x118, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_parents, + 0x11c, 0, 4, 24, 3, BIT(31), 0); + +static const char * const deinterlace_parents[] = { "pll-periph0", + "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", + deinterlace_parents, 0x124, 0, 4, 24, 3, + BIT(31), 0); + +static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1", + "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents, + 0x130, 0, 5, 8, 3, BIT(15), 0); + +static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents, + 0x134, 16, 4, 24, 3, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents, + 0x134, 0, 5, 8, 3, BIT(15), 0); + +static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", + 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio", + 0x140, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", + 0x144, BIT(31), 0); + +static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, + 0x150, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", + 0x154, BIT(31), 0); + +/* + * In the SoC's user manual, the P factor is mentioned, but not used in + * the frequency formula. + * + * Here the factor is included, according to the BSP kernel source, + * which contains the P factor of this clock. + */ +static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x", + "pll-ddr0" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x15c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + CLK_IS_CRITICAL); + +static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-video1", + "pll-periph0" }; +static SUNXI_CCU_M_WITH_MUX_GATE(dsi_dphy_clk, "dsi-dphy", dsi_dphy_parents, + 0x168, 0, 4, 8, 2, BIT(15), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(tve0_clk, "tve0", tcon_parents, + 0x180, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(tve1_clk, "tve1", tcon_parents, + 0x184, 0, 4, 24, 3, BIT(31), 0); + +static const char * const tvd_parents[] = { "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x" }; +static SUNXI_CCU_M_WITH_MUX_GATE(tvd0_clk, "tvd0", tvd_parents, + 0x188, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(tvd1_clk, "tvd1", tvd_parents, + 0x18c, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(tvd2_clk, "tvd2", tvd_parents, + 0x190, 0, 4, 24, 3, BIT(31), 0); +static SUNXI_CCU_M_WITH_MUX_GATE(tvd3_clk, "tvd3", tvd_parents, + 0x194, 0, 4, 24, 3, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", + 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const out_parents[] = { "osc24M", "osc32k", "osc24M" }; +static const struct ccu_mux_fixed_prediv out_predivs[] = { + { .index = 0, .div = 750, }, +}; + +static struct ccu_mp outa_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 2, + .fixed_predivs = out_predivs, + .n_predivs = ARRAY_SIZE(out_predivs), + }, + .common = { + .reg = 0x1f0, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, + &ccu_mp_ops, 0), + } +}; + +static struct ccu_mp outb_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 2, + .fixed_predivs = out_predivs, + .n_predivs = ARRAY_SIZE(out_predivs), + }, + .common = { + .reg = 0x1f4, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, + &ccu_mp_ops, 0), + } +}; + +static struct ccu_common *sun8i_r40_ccu_clks[] = { + &pll_cpu_clk.common, + &pll_audio_base_clk.common, + &pll_video0_clk.common, + &pll_ve_clk.common, + &pll_ddr0_clk.common, + &pll_periph0_clk.common, + &pll_periph0_sata_clk.common, + &pll_periph1_clk.common, + &pll_video1_clk.common, + &pll_sata_clk.common, + &pll_sata_out_clk.common, + &pll_gpu_clk.common, + &pll_mipi_clk.common, + &pll_de_clk.common, + &pll_ddr1_clk.common, + &cpu_clk.common, + &axi_clk.common, + &ahb1_clk.common, + &apb1_clk.common, + &apb2_clk.common, + &bus_mipi_dsi_clk.common, + &bus_ce_clk.common, + &bus_dma_clk.common, + &bus_mmc0_clk.common, + &bus_mmc1_clk.common, + &bus_mmc2_clk.common, + &bus_mmc3_clk.common, + &bus_nand_clk.common, + &bus_dram_clk.common, + &bus_emac_clk.common, + &bus_ts_clk.common, + &bus_hstimer_clk.common, + &bus_spi0_clk.common, + &bus_spi1_clk.common, + &bus_spi2_clk.common, + &bus_spi3_clk.common, + &bus_sata_clk.common, + &bus_otg_clk.common, + &bus_ehci0_clk.common, + &bus_ehci1_clk.common, + &bus_ehci2_clk.common, + &bus_ohci0_clk.common, + &bus_ohci1_clk.common, + &bus_ohci2_clk.common, + &bus_ve_clk.common, + &bus_mp_clk.common, + &bus_deinterlace_clk.common, + &bus_csi0_clk.common, + &bus_csi1_clk.common, + &bus_hdmi0_clk.common, + &bus_hdmi1_clk.common, + &bus_de_clk.common, + &bus_tve0_clk.common, + &bus_tve1_clk.common, + &bus_tve_top_clk.common, + &bus_gmac_clk.common, + &bus_gpu_clk.common, + &bus_tvd0_clk.common, + &bus_tvd1_clk.common, + &bus_tvd2_clk.common, + &bus_tvd3_clk.common, + &bus_tvd_top_clk.common, + &bus_tcon_lcd0_clk.common, + &bus_tcon_lcd1_clk.common, + &bus_tcon_tv0_clk.common, + &bus_tcon_tv1_clk.common, + &bus_tcon_top_clk.common, + &bus_codec_clk.common, + &bus_spdif_clk.common, + &bus_ac97_clk.common, + &bus_pio_clk.common, + &bus_ir0_clk.common, + &bus_ir1_clk.common, + &bus_ths_clk.common, + &bus_keypad_clk.common, + &bus_i2s0_clk.common, + &bus_i2s1_clk.common, + &bus_i2s2_clk.common, + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_i2c2_clk.common, + &bus_i2c3_clk.common, + &bus_can_clk.common, + &bus_scr_clk.common, + &bus_ps20_clk.common, + &bus_ps21_clk.common, + &bus_i2c4_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_uart3_clk.common, + &bus_uart4_clk.common, + &bus_uart5_clk.common, + &bus_uart6_clk.common, + &bus_uart7_clk.common, + &bus_dbg_clk.common, + &ths_clk.common, + &nand_clk.common, + &mmc0_clk.common, + &mmc1_clk.common, + &mmc2_clk.common, + &mmc3_clk.common, + &ts_clk.common, + &ce_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &spi2_clk.common, + &spi3_clk.common, + &i2s0_clk.common, + &i2s1_clk.common, + &i2s2_clk.common, + &ac97_clk.common, + &spdif_clk.common, + &keypad_clk.common, + &sata_clk.common, + &usb_phy0_clk.common, + &usb_phy1_clk.common, + &usb_phy2_clk.common, + &usb_ohci0_clk.common, + &usb_ohci1_clk.common, + &usb_ohci2_clk.common, + &ir0_clk.common, + &ir1_clk.common, + &dram_clk.common, + &dram_ve_clk.common, + &dram_csi0_clk.common, + &dram_csi1_clk.common, + &dram_ts_clk.common, + &dram_tvd_clk.common, + &dram_mp_clk.common, + &dram_deinterlace_clk.common, + &de_clk.common, + &mp_clk.common, + &tcon_lcd0_clk.common, + &tcon_lcd1_clk.common, + &tcon_tv0_clk.common, + &tcon_tv1_clk.common, + &deinterlace_clk.common, + &csi1_mclk_clk.common, + &csi_sclk_clk.common, + &csi0_mclk_clk.common, + &ve_clk.common, + &codec_clk.common, + &avs_clk.common, + &hdmi_clk.common, + &hdmi_slow_clk.common, + &mbus_clk.common, + &dsi_dphy_clk.common, + &tve0_clk.common, + &tve1_clk.common, + &tvd0_clk.common, + &tvd1_clk.common, + &tvd2_clk.common, + &tvd3_clk.common, + &gpu_clk.common, + &outa_clk.common, + &outb_clk.common, +}; + +/* Fixed Factor clocks */ +static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); + +/* We hardcode the divider to 4 for now */ +static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", + "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", + "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", + "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", + "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", + "pll-periph0", 1, 2, 0); +static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x", + "pll-periph1", 1, 2, 0); +static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", + "pll-video0", 1, 2, 0); +static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", + "pll-video1", 1, 2, 0); + +static struct clk_hw_onecell_data sun8i_r40_hw_clks = { + .hws = { + [CLK_OSC_12M] = &osc12M_clk.hw, + [CLK_PLL_CPU] = &pll_cpu_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_PERIPH0_SATA] = &pll_periph0_sata_clk.common.hw, + [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_PLL_SATA] = &pll_sata_clk.common.hw, + [CLK_PLL_SATA_OUT] = &pll_sata_out_clk.common.hw, + [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, + [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw, + [CLK_PLL_DE] = &pll_de_clk.common.hw, + [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_APB2] = &apb2_clk.common.hw, + [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw, + [CLK_BUS_CE] = &bus_ce_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, + [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, + [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, + [CLK_BUS_MMC3] = &bus_mmc3_clk.common.hw, + [CLK_BUS_NAND] = &bus_nand_clk.common.hw, + [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, + [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, + [CLK_BUS_TS] = &bus_ts_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw, + [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw, + [CLK_BUS_SPI3] = &bus_spi3_clk.common.hw, + [CLK_BUS_SATA] = &bus_sata_clk.common.hw, + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, + [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw, + [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw, + [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, + [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw, + [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_MP] = &bus_mp_clk.common.hw, + [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw, + [CLK_BUS_CSI0] = &bus_csi0_clk.common.hw, + [CLK_BUS_CSI1] = &bus_csi1_clk.common.hw, + [CLK_BUS_HDMI0] = &bus_hdmi0_clk.common.hw, + [CLK_BUS_HDMI1] = &bus_hdmi1_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_TVE0] = &bus_tve0_clk.common.hw, + [CLK_BUS_TVE1] = &bus_tve1_clk.common.hw, + [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw, + [CLK_BUS_GMAC] = &bus_gmac_clk.common.hw, + [CLK_BUS_GPU] = &bus_gpu_clk.common.hw, + [CLK_BUS_TVD0] = &bus_tvd0_clk.common.hw, + [CLK_BUS_TVD1] = &bus_tvd1_clk.common.hw, + [CLK_BUS_TVD2] = &bus_tvd2_clk.common.hw, + [CLK_BUS_TVD3] = &bus_tvd3_clk.common.hw, + [CLK_BUS_TVD_TOP] = &bus_tvd_top_clk.common.hw, + [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw, + [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw, + [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw, + [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw, + [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw, + [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, + [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw, + [CLK_BUS_AC97] = &bus_ac97_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_IR0] = &bus_ir0_clk.common.hw, + [CLK_BUS_IR1] = &bus_ir1_clk.common.hw, + [CLK_BUS_THS] = &bus_ths_clk.common.hw, + [CLK_BUS_KEYPAD] = &bus_keypad_clk.common.hw, + [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, + [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw, + [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw, + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw, + [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw, + [CLK_BUS_CAN] = &bus_can_clk.common.hw, + [CLK_BUS_SCR] = &bus_scr_clk.common.hw, + [CLK_BUS_PS20] = &bus_ps20_clk.common.hw, + [CLK_BUS_PS21] = &bus_ps21_clk.common.hw, + [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, + [CLK_BUS_UART4] = &bus_uart4_clk.common.hw, + [CLK_BUS_UART5] = &bus_uart5_clk.common.hw, + [CLK_BUS_UART6] = &bus_uart6_clk.common.hw, + [CLK_BUS_UART7] = &bus_uart7_clk.common.hw, + [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, + [CLK_THS] = &ths_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC3] = &mmc3_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_SPI3] = &spi3_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_I2S1] = &i2s1_clk.common.hw, + [CLK_I2S2] = &i2s2_clk.common.hw, + [CLK_AC97] = &ac97_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_KEYPAD] = &keypad_clk.common.hw, + [CLK_SATA] = &sata_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_USB_PHY2] = &usb_phy2_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, + [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw, + [CLK_IR0] = &ir0_clk.common.hw, + [CLK_IR1] = &ir1_clk.common.hw, + [CLK_DRAM] = &dram_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw, + [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw, + [CLK_DRAM_MP] = &dram_mp_clk.common.hw, + [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_MP] = &mp_clk.common.hw, + [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw, + [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw, + [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw, + [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw, + [CLK_DEINTERLACE] = &deinterlace_clk.common.hw, + [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, + [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw, + [CLK_TVE0] = &tve0_clk.common.hw, + [CLK_TVE1] = &tve1_clk.common.hw, + [CLK_TVD0] = &tvd0_clk.common.hw, + [CLK_TVD1] = &tvd1_clk.common.hw, + [CLK_TVD2] = &tvd2_clk.common.hw, + [CLK_TVD3] = &tvd3_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + [CLK_OUTA] = &outa_clk.common.hw, + [CLK_OUTB] = &outb_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun8i_r40_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + [RST_USB_PHY1] = { 0x0cc, BIT(1) }, + [RST_USB_PHY2] = { 0x0cc, BIT(2) }, + + [RST_DRAM] = { 0x0f4, BIT(31) }, + [RST_MBUS] = { 0x0fc, BIT(31) }, + + [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) }, + [RST_BUS_CE] = { 0x2c0, BIT(5) }, + [RST_BUS_DMA] = { 0x2c0, BIT(6) }, + [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, + [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, + [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, + [RST_BUS_MMC3] = { 0x2c0, BIT(11) }, + [RST_BUS_NAND] = { 0x2c0, BIT(13) }, + [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, + [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, + [RST_BUS_TS] = { 0x2c0, BIT(18) }, + [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, + [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, + [RST_BUS_SPI1] = { 0x2c0, BIT(21) }, + [RST_BUS_SPI2] = { 0x2c0, BIT(22) }, + [RST_BUS_SPI3] = { 0x2c0, BIT(23) }, + [RST_BUS_SATA] = { 0x2c0, BIT(24) }, + [RST_BUS_OTG] = { 0x2c0, BIT(25) }, + [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, + [RST_BUS_EHCI1] = { 0x2c0, BIT(27) }, + [RST_BUS_EHCI2] = { 0x2c0, BIT(28) }, + [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, + [RST_BUS_OHCI1] = { 0x2c0, BIT(30) }, + [RST_BUS_OHCI2] = { 0x2c0, BIT(31) }, + + [RST_BUS_VE] = { 0x2c4, BIT(0) }, + [RST_BUS_MP] = { 0x2c4, BIT(2) }, + [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) }, + [RST_BUS_CSI0] = { 0x2c4, BIT(8) }, + [RST_BUS_CSI1] = { 0x2c4, BIT(9) }, + [RST_BUS_HDMI0] = { 0x2c4, BIT(10) }, + [RST_BUS_HDMI1] = { 0x2c4, BIT(11) }, + [RST_BUS_DE] = { 0x2c4, BIT(12) }, + [RST_BUS_TVE0] = { 0x2c4, BIT(13) }, + [RST_BUS_TVE1] = { 0x2c4, BIT(14) }, + [RST_BUS_TVE_TOP] = { 0x2c4, BIT(15) }, + [RST_BUS_GMAC] = { 0x2c4, BIT(17) }, + [RST_BUS_GPU] = { 0x2c4, BIT(20) }, + [RST_BUS_TVD0] = { 0x2c4, BIT(21) }, + [RST_BUS_TVD1] = { 0x2c4, BIT(22) }, + [RST_BUS_TVD2] = { 0x2c4, BIT(23) }, + [RST_BUS_TVD3] = { 0x2c4, BIT(24) }, + [RST_BUS_TVD_TOP] = { 0x2c4, BIT(25) }, + [RST_BUS_TCON_LCD0] = { 0x2c4, BIT(26) }, + [RST_BUS_TCON_LCD1] = { 0x2c4, BIT(27) }, + [RST_BUS_TCON_TV0] = { 0x2c4, BIT(28) }, + [RST_BUS_TCON_TV1] = { 0x2c4, BIT(29) }, + [RST_BUS_TCON_TOP] = { 0x2c4, BIT(30) }, + [RST_BUS_DBG] = { 0x2c4, BIT(31) }, + + [RST_BUS_LVDS] = { 0x2c8, BIT(0) }, + + [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, + [RST_BUS_SPDIF] = { 0x2d0, BIT(1) }, + [RST_BUS_AC97] = { 0x2d0, BIT(2) }, + [RST_BUS_IR0] = { 0x2d0, BIT(6) }, + [RST_BUS_IR1] = { 0x2d0, BIT(7) }, + [RST_BUS_THS] = { 0x2d0, BIT(8) }, + [RST_BUS_KEYPAD] = { 0x2d0, BIT(10) }, + [RST_BUS_I2S0] = { 0x2d0, BIT(12) }, + [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, + [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, + + [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, + [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, + [RST_BUS_I2C2] = { 0x2d8, BIT(2) }, + [RST_BUS_I2C3] = { 0x2d8, BIT(3) }, + [RST_BUS_CAN] = { 0x2d8, BIT(4) }, + [RST_BUS_SCR] = { 0x2d8, BIT(5) }, + [RST_BUS_PS20] = { 0x2d8, BIT(6) }, + [RST_BUS_PS21] = { 0x2d8, BIT(7) }, + [RST_BUS_I2C4] = { 0x2d8, BIT(15) }, + [RST_BUS_UART0] = { 0x2d8, BIT(16) }, + [RST_BUS_UART1] = { 0x2d8, BIT(17) }, + [RST_BUS_UART2] = { 0x2d8, BIT(18) }, + [RST_BUS_UART3] = { 0x2d8, BIT(19) }, + [RST_BUS_UART4] = { 0x2d8, BIT(20) }, + [RST_BUS_UART5] = { 0x2d8, BIT(21) }, + [RST_BUS_UART6] = { 0x2d8, BIT(22) }, + [RST_BUS_UART7] = { 0x2d8, BIT(23) }, +}; + +static const struct sunxi_ccu_desc sun8i_r40_ccu_desc = { + .ccu_clks = sun8i_r40_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_r40_ccu_clks), + + .hw_clks = &sun8i_r40_hw_clks, + + .resets = sun8i_r40_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_r40_ccu_resets), +}; + +static struct ccu_pll_nb sun8i_r40_pll_cpu_nb = { + .common = &pll_cpu_clk.common, + /* copy from pll_cpu_clk */ + .enable = BIT(31), + .lock = BIT(28), +}; + +static struct ccu_mux_nb sun8i_r40_cpu_nb = { + .common = &cpu_clk.common, + .cm = &cpu_clk.mux, + .delay_us = 1, /* > 8 clock cycles at 24 MHz */ + .bypass_index = 1, /* index of 24 MHz oscillator */ +}; + +static void __init sun8i_r40_ccu_setup(struct device_node *node) +{ + void __iomem *reg; + u32 val; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + /* Force the PLL-Audio-1x divider to 4 */ + val = readl(reg + SUN8I_R40_PLL_AUDIO_REG); + val &= ~GENMASK(19, 16); + writel(val | (3 << 16), reg + SUN8I_R40_PLL_AUDIO_REG); + + /* Force PLL-MIPI to MIPI mode */ + val = readl(reg + SUN8I_R40_PLL_MIPI_REG); + val &= ~BIT(16); + writel(val, reg + SUN8I_R40_PLL_MIPI_REG); + + /* Force OHCI 12M parent to 12M divided from 48M */ + val = readl(reg + SUN8I_R40_USB_CLK_REG); + val &= ~GENMASK(25, 20); + writel(val, reg + SUN8I_R40_USB_CLK_REG); + + sunxi_ccu_probe(node, reg, &sun8i_r40_ccu_desc); + + /* Gate then ungate PLL CPU after any rate changes */ + ccu_pll_notifier_register(&sun8i_r40_pll_cpu_nb); + + /* Reparent CPU during PLL CPU rate changes */ + ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, + &sun8i_r40_cpu_nb); +} +CLK_OF_DECLARE(sun8i_r40_ccu, "allwinner,sun8i-r40-ccu", + sun8i_r40_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h new file mode 100644 index 000000000000..0db8e1e97af8 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h @@ -0,0 +1,69 @@ +/* + * Copyright 2017 Icenowy Zheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN8I_R40_H_ +#define _CCU_SUN8I_R40_H_ + +#include +#include + +#define CLK_OSC_12M 0 +#define CLK_PLL_CPU 1 +#define CLK_PLL_AUDIO_BASE 2 +#define CLK_PLL_AUDIO 3 +#define CLK_PLL_AUDIO_2X 4 +#define CLK_PLL_AUDIO_4X 5 +#define CLK_PLL_AUDIO_8X 6 +#define CLK_PLL_VIDEO0 7 +#define CLK_PLL_VIDEO0_2X 8 +#define CLK_PLL_VE 9 +#define CLK_PLL_DDR0 10 +#define CLK_PLL_PERIPH0 11 +#define CLK_PLL_PERIPH0_SATA 12 +#define CLK_PLL_PERIPH0_2X 13 +#define CLK_PLL_PERIPH1 14 +#define CLK_PLL_PERIPH1_2X 15 +#define CLK_PLL_VIDEO1 16 +#define CLK_PLL_VIDEO1_2X 17 +#define CLK_PLL_SATA 18 +#define CLK_PLL_SATA_OUT 19 +#define CLK_PLL_GPU 20 +#define CLK_PLL_MIPI 21 +#define CLK_PLL_DE 22 +#define CLK_PLL_DDR1 23 + +/* The CPU clock is exported */ + +#define CLK_AXI 25 +#define CLK_AHB1 26 +#define CLK_APB1 27 +#define CLK_APB2 28 + +/* All the bus gates are exported */ + +/* The first bunch of module clocks are exported */ + +#define CLK_DRAM 132 + +/* All the DRAM gates are exported */ + +/* Some more module clocks are exported */ + +#define CLK_MBUS 155 + +/* Another bunch of module clocks are exported */ + +#define CLK_NUMBER (CLK_OUTB + 1) + +#endif /* _CCU_SUN8I_R40_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index a34a78d7fb28..621b1cd996db 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -575,8 +575,7 @@ static void __init sun8i_v3s_ccu_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("%s: Could not map the clock registers\n", - of_node_full_name(node)); + pr_err("%pOF: Could not map the clock registers\n", node); return; } diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h index d6fdd7a789aa..cadd1a9f93b6 100644 --- a/drivers/clk/sunxi-ng/ccu_common.h +++ b/drivers/clk/sunxi-ng/ccu_common.h @@ -23,6 +23,10 @@ #define CCU_FEATURE_FIXED_POSTDIV BIT(3) #define CCU_FEATURE_ALL_PREDIV BIT(4) #define CCU_FEATURE_LOCK_REG BIT(5) +#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6) + +/* MMC timing mode switch bit */ +#define CCU_MMC_NEW_TIMING_MODE BIT(30) struct device_node; diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c index c0e5c10d0091..baa3cf96507b 100644 --- a/drivers/clk/sunxi-ng/ccu_div.c +++ b/drivers/clk/sunxi-ng/ccu_div.c @@ -21,10 +21,18 @@ static unsigned long ccu_div_round_rate(struct ccu_mux_internal *mux, { struct ccu_div *cd = data; - return divider_round_rate_parent(&cd->common.hw, parent, + if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= cd->fixed_post_div; + + rate = divider_round_rate_parent(&cd->common.hw, parent, rate, parent_rate, cd->div.table, cd->div.width, cd->div.flags); + + if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= cd->fixed_post_div; + + return rate; } static void ccu_div_disable(struct clk_hw *hw) @@ -62,8 +70,13 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw, parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1, parent_rate); - return divider_recalc_rate(hw, parent_rate, val, cd->div.table, - cd->div.flags); + val = divider_recalc_rate(hw, parent_rate, val, cd->div.table, + cd->div.flags); + + if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) + val /= cd->fixed_post_div; + + return val; } static int ccu_div_determine_rate(struct clk_hw *hw, @@ -86,6 +99,9 @@ static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate, parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1, parent_rate); + if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= cd->fixed_post_div; + val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width, cd->div.flags); diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h index 08d074451204..f3a5028dcd14 100644 --- a/drivers/clk/sunxi-ng/ccu_div.h +++ b/drivers/clk/sunxi-ng/ccu_div.h @@ -86,9 +86,10 @@ struct ccu_div_internal { struct ccu_div { u32 enable; - struct ccu_div_internal div; + struct ccu_div_internal div; struct ccu_mux_internal mux; struct ccu_common common; + unsigned int fixed_post_div; }; #define SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \ diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c index 8b5eb7756bf7..d1d168d4c4f0 100644 --- a/drivers/clk/sunxi-ng/ccu_frac.c +++ b/drivers/clk/sunxi-ng/ccu_frac.c @@ -67,25 +67,25 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, { u32 reg; - printk("%s: Read fractional\n", clk_hw_get_name(&common->hw)); + pr_debug("%s: Read fractional\n", clk_hw_get_name(&common->hw)); if (!(common->features & CCU_FEATURE_FRACTIONAL)) return 0; - printk("%s: clock is fractional (rates %lu and %lu)\n", - clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]); + pr_debug("%s: clock is fractional (rates %lu and %lu)\n", + clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]); reg = readl(common->base + common->reg); - printk("%s: clock reg is 0x%x (select is 0x%x)\n", - clk_hw_get_name(&common->hw), reg, cf->select); + pr_debug("%s: clock reg is 0x%x (select is 0x%x)\n", + clk_hw_get_name(&common->hw), reg, cf->select); return (reg & cf->select) ? cf->rates[1] : cf->rates[0]; } int ccu_frac_helper_set_rate(struct ccu_common *common, struct ccu_frac_internal *cf, - unsigned long rate) + unsigned long rate, u32 lock) { unsigned long flags; u32 reg, sel; @@ -106,5 +106,7 @@ int ccu_frac_helper_set_rate(struct ccu_common *common, writel(reg | sel, common->base + common->reg); spin_unlock_irqrestore(common->lock, flags); + ccu_helper_wait_for_lock(common, lock); + return 0; } diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h index 7b1ee380156f..efe2dd6bac01 100644 --- a/drivers/clk/sunxi-ng/ccu_frac.h +++ b/drivers/clk/sunxi-ng/ccu_frac.h @@ -48,6 +48,6 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, int ccu_frac_helper_set_rate(struct ccu_common *common, struct ccu_frac_internal *cf, - unsigned long rate); + unsigned long rate, u32 lock); #endif /* _CCU_FRAC_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c new file mode 100644 index 000000000000..f9869f7353c0 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "ccu_common.h" + +/** + * sunxi_ccu_set_mmc_timing_mode: Configure the MMC clock timing mode + * @clk: clock to be configured + * @new_mode: true for new timing mode introduced in A83T and later + * + * Returns 0 on success, -ENOTSUPP if the clock does not support + * switching modes. + */ +int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode) +{ + struct clk_hw *hw = __clk_get_hw(clk); + struct ccu_common *cm = hw_to_ccu_common(hw); + unsigned long flags; + u32 val; + + if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH)) + return -ENOTSUPP; + + spin_lock_irqsave(cm->lock, flags); + + val = readl(cm->base + cm->reg); + if (new_mode) + val |= CCU_MMC_NEW_TIMING_MODE; + else + val &= ~CCU_MMC_NEW_TIMING_MODE; + writel(val, cm->base + cm->reg); + + spin_unlock_irqrestore(cm->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode); + +/** + * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode + * @clk: clock to query + * + * Returns 0 if the clock is in old timing mode, > 0 if it is in + * new timing mode, and -ENOTSUPP if the clock does not support + * this function. + */ +int sunxi_ccu_get_mmc_timing_mode(struct clk *clk) +{ + struct clk_hw *hw = __clk_get_hw(clk); + struct ccu_common *cm = hw_to_ccu_common(hw); + + if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH)) + return -ENOTSUPP; + + return !!(readl(cm->base + cm->reg) & CCU_MMC_NEW_TIMING_MODE); +} +EXPORT_SYMBOL_GPL(sunxi_ccu_get_mmc_timing_mode); diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index b917ad7a386c..688855e7dc8c 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -172,3 +172,83 @@ const struct clk_ops ccu_mp_ops = { .recalc_rate = ccu_mp_recalc_rate, .set_rate = ccu_mp_set_rate, }; + +/* + * Support for MMC timing mode switching + * + * The MMC clocks on some SoCs support switching between old and + * new timing modes. A platform specific API is provided to query + * and set the timing mode on supported SoCs. + * + * In addition, a special class of ccu_mp_ops is provided, which + * takes in to account the timing mode switch. When the new timing + * mode is active, the clock output rate is halved. This new class + * is a wrapper around the generic ccu_mp_ops. When clock rates + * are passed through to ccu_mp_ops callbacks, they are doubled + * if the new timing mode bit is set, to account for the post + * divider. Conversely, when clock rates are passed back, they + * are halved if the mode bit is set. + */ + +static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate); + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val = readl(cm->base + cm->reg); + + if (val & CCU_MMC_NEW_TIMING_MODE) + return rate / 2; + return rate; +} + +static int ccu_mp_mmc_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val = readl(cm->base + cm->reg); + int ret; + + /* adjust the requested clock rate */ + if (val & CCU_MMC_NEW_TIMING_MODE) { + req->rate *= 2; + req->min_rate *= 2; + req->max_rate *= 2; + } + + ret = ccu_mp_determine_rate(hw, req); + + /* re-adjust the requested clock rate back */ + if (val & CCU_MMC_NEW_TIMING_MODE) { + req->rate /= 2; + req->min_rate /= 2; + req->max_rate /= 2; + } + + return ret; +} + +static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val = readl(cm->base + cm->reg); + + if (val & CCU_MMC_NEW_TIMING_MODE) + rate *= 2; + + return ccu_mp_set_rate(hw, rate, parent_rate); +} + +const struct clk_ops ccu_mp_mmc_ops = { + .disable = ccu_mp_disable, + .enable = ccu_mp_enable, + .is_enabled = ccu_mp_is_enabled, + + .get_parent = ccu_mp_get_parent, + .set_parent = ccu_mp_set_parent, + + .determine_rate = ccu_mp_mmc_determine_rate, + .recalc_rate = ccu_mp_mmc_recalc_rate, + .set_rate = ccu_mp_mmc_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index 915625e97d98..aaef11d747ea 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -14,6 +14,7 @@ #ifndef _CCU_MP_H_ #define _CCU_MP_H_ +#include #include #include "ccu_common.h" @@ -74,4 +75,33 @@ static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw) extern const struct clk_ops ccu_mp_ops; +/* + * Special class of M-P clock that supports MMC timing modes + * + * Since the MMC clock registers all follow the same layout, we can + * simplify the macro for this particular case. In addition, as + * switching modes also affects the output clock rate, we need to + * have CLK_GET_RATE_NOCACHE for all these types of clocks. + */ + +#define SUNXI_CCU_MP_MMC_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _flags) \ + struct ccu_mp _struct = { \ + .enable = BIT(31), \ + .m = _SUNXI_CCU_DIV(0, 4), \ + .p = _SUNXI_CCU_DIV(16, 2), \ + .mux = _SUNXI_CCU_MUX(24, 2), \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_MMC_TIMING_SWITCH, \ + .hw.init = CLK_HW_INIT_PARENTS(_name, \ + _parents, \ + &ccu_mp_mmc_ops, \ + CLK_GET_RATE_NOCACHE | \ + _flags), \ + } \ + } + +extern const struct clk_ops ccu_mp_mmc_ops; + #endif /* _CCU_MP_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c index 20d0300867f2..12e0783caee6 100644 --- a/drivers/clk/sunxi-ng/ccu_mult.c +++ b/drivers/clk/sunxi-ng/ccu_mult.c @@ -111,10 +111,14 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; - if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) - return ccu_frac_helper_set_rate(&cm->common, &cm->frac, rate); - else + if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) { + ccu_frac_helper_enable(&cm->common, &cm->frac); + + return ccu_frac_helper_set_rate(&cm->common, &cm->frac, + rate, cm->lock); + } else { ccu_frac_helper_disable(&cm->common, &cm->frac); + } parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1, parent_rate); diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c index 44b16dc8fea6..841840e35e61 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.c +++ b/drivers/clk/sunxi-ng/ccu_nkm.c @@ -75,7 +75,7 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); - unsigned long n, m, k; + unsigned long n, m, k, rate; u32 reg; reg = readl(nkm->common.base + nkm->common.reg); @@ -98,7 +98,12 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw, if (!m) m++; - return parent_rate * n * k / m; + rate = parent_rate * n * k / m; + + if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= nkm->fixed_post_div; + + return rate; } static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux, @@ -117,9 +122,17 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux, _nkm.min_m = 1; _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; + if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nkm->fixed_post_div; + ccu_nkm_find_best(*parent_rate, rate, &_nkm); - return *parent_rate * _nkm.n * _nkm.k / _nkm.m; + rate = *parent_rate * _nkm.n * _nkm.k / _nkm.m; + + if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= nkm->fixed_post_div; + + return rate; } static int ccu_nkm_determine_rate(struct clk_hw *hw, @@ -139,6 +152,9 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; + if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nkm->fixed_post_div; + _nkm.min_n = nkm->n.min ?: 1; _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; _nkm.min_k = nkm->k.min ?: 1; diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h index 34580894f4d1..cc6efb70a102 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.h +++ b/drivers/clk/sunxi-ng/ccu_nkm.h @@ -34,6 +34,8 @@ struct ccu_nkm { struct ccu_div_internal m; struct ccu_mux_internal mux; + unsigned int fixed_post_div; + struct ccu_common common; }; diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index 5e5e90a4a50c..a32158e8f2e3 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -117,10 +117,23 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; - if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) - return ccu_frac_helper_set_rate(&nm->common, &nm->frac, rate); - else + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) { + spin_lock_irqsave(nm->common.lock, flags); + + /* most SoCs require M to be 0 if fractional mode is used */ + reg = readl(nm->common.base + nm->common.reg); + reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift); + writel(reg, nm->common.base + nm->common.reg); + + spin_unlock_irqrestore(nm->common.lock, flags); + + ccu_frac_helper_enable(&nm->common, &nm->frac); + + return ccu_frac_helper_set_rate(&nm->common, &nm->frac, + rate, nm->lock); + } else { ccu_frac_helper_disable(&nm->common, &nm->frac); + } _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c index 6c31d48783a7..1dc4e98ea802 100644 --- a/drivers/clk/sunxi-ng/ccu_reset.c +++ b/drivers/clk/sunxi-ng/ccu_reset.c @@ -8,6 +8,7 @@ * the License, or (at your option) any later version. */ +#include #include #include @@ -49,7 +50,18 @@ static int ccu_reset_deassert(struct reset_controller_dev *rcdev, return 0; } +static int ccu_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + ccu_reset_assert(rcdev, id); + udelay(10); + ccu_reset_deassert(rcdev, id); + + return 0; +} + const struct reset_control_ops ccu_reset_ops = { .assert = ccu_reset_assert, .deassert = ccu_reset_deassert, + .reset = ccu_reset_reset, }; diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c index 63fdb790df29..bee305bdddbe 100644 --- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c +++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c @@ -78,6 +78,10 @@ static void __init sun8i_h3_bus_gates_init(struct device_node *node) clk_parent = APB1; else if (index >= 96 && index <= 127) clk_parent = APB2; + else { + WARN_ON(true); + continue; + } clk_reg = reg + 4 * (index / 32); clk_bit = index % 32; diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index f2c9274b8bd5..aa4add580516 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -666,15 +666,14 @@ static struct clk * __init sunxi_mux_clk_setup(struct device_node *node, reg = of_iomap(node, 0); if (!reg) { - pr_err("Could not map registers for mux-clk: %s\n", - of_node_full_name(node)); + pr_err("Could not map registers for mux-clk: %pOF\n", node); return NULL; } i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS); if (of_property_read_string(node, "clock-output-names", &clk_name)) { - pr_err("%s: could not read clock-output-names from \"%s\"\n", - __func__, of_node_full_name(node)); + pr_err("%s: could not read clock-output-names from \"%pOF\"\n", + __func__, node); goto out_unmap; } @@ -797,16 +796,15 @@ static void __init sunxi_divider_clk_setup(struct device_node *node, reg = of_iomap(node, 0); if (!reg) { - pr_err("Could not map registers for mux-clk: %s\n", - of_node_full_name(node)); + pr_err("Could not map registers for mux-clk: %pOF\n", node); return; } clk_parent = of_clk_get_parent_name(node, 0); if (of_property_read_string(node, "clock-output-names", &clk_name)) { - pr_err("%s: could not read clock-output-names from \"%s\"\n", - __func__, of_node_full_name(node)); + pr_err("%s: could not read clock-output-names from \"%pOF\"\n", + __func__, node); goto out_unmap; } @@ -1010,8 +1008,7 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, reg = of_iomap(node, 0); if (!reg) { - pr_err("Could not map registers for divs-clk: %s\n", - of_node_full_name(node)); + pr_err("Could not map registers for divs-clk: %pOF\n", node); return NULL; } diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c index 74e7544f861b..11a5066e5c27 100644 --- a/drivers/clk/tegra/clk-emc.c +++ b/drivers/clk/tegra/clk-emc.c @@ -378,7 +378,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, err = of_property_read_u32(node, "clock-frequency", &tmp); if (err) { - pr_err("timing %s: failed to read rate\n", node->full_name); + pr_err("timing %pOF: failed to read rate\n", node); return err; } @@ -386,8 +386,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp); if (err) { - pr_err("timing %s: failed to read parent rate\n", - node->full_name); + pr_err("timing %pOF: failed to read parent rate\n", node); return err; } @@ -395,8 +394,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, timing->parent = of_clk_get_by_name(node, "emc-parent"); if (IS_ERR(timing->parent)) { - pr_err("timing %s: failed to get parent clock\n", - node->full_name); + pr_err("timing %pOF: failed to get parent clock\n", node); return PTR_ERR(timing->parent); } @@ -409,8 +407,8 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, } } if (timing->parent_index == 0xff) { - pr_err("timing %s: %s is not a valid parent\n", - node->full_name, __clk_get_name(timing->parent)); + pr_err("timing %pOF: %s is not a valid parent\n", + node, __clk_get_name(timing->parent)); clk_put(timing->parent); return -EINVAL; } diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 159a854779e6..7c369e21c91c 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -363,7 +363,7 @@ static void _clk_pll_enable(struct clk_hw *hw) val = pll_readl(pll->params->iddq_reg, pll); val &= ~BIT(pll->params->iddq_bit_idx); pll_writel(val, pll->params->iddq_reg, pll); - udelay(2); + udelay(5); } if (pll->params->reset_reg) { @@ -418,6 +418,26 @@ static void _clk_pll_disable(struct clk_hw *hw) } } +static void pll_clk_start_ss(struct tegra_clk_pll *pll) +{ + if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) { + u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll); + + val |= pll->params->ssc_ctrl_en_mask; + pll_writel(val, pll->params->ssc_ctrl_reg, pll); + } +} + +static void pll_clk_stop_ss(struct tegra_clk_pll *pll) +{ + if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) { + u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll); + + val &= ~pll->params->ssc_ctrl_en_mask; + pll_writel(val, pll->params->ssc_ctrl_reg, pll); + } +} + static int clk_pll_enable(struct clk_hw *hw) { struct tegra_clk_pll *pll = to_clk_pll(hw); @@ -431,6 +451,8 @@ static int clk_pll_enable(struct clk_hw *hw) ret = clk_pll_wait_for_lock(pll); + pll_clk_start_ss(pll); + if (pll->lock) spin_unlock_irqrestore(pll->lock, flags); @@ -445,6 +467,8 @@ static void clk_pll_disable(struct clk_hw *hw) if (pll->lock) spin_lock_irqsave(pll->lock, flags); + pll_clk_stop_ss(pll); + _clk_pll_disable(hw); if (pll->lock) @@ -666,6 +690,8 @@ static void _get_pll_mnp(struct tegra_clk_pll *pll, struct tegra_clk_pll_params *params = pll->params; struct div_nmp *div_nmp = params->div_nmp; + *cfg = (struct tegra_clk_pll_freq_table) { }; + if ((params->flags & (TEGRA_PLLM | TEGRA_PLLMB)) && (pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) { @@ -716,26 +742,6 @@ static void _update_pll_cpcon(struct tegra_clk_pll *pll, pll_writel_misc(val, pll); } -static void pll_clk_start_ss(struct tegra_clk_pll *pll) -{ - if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) { - u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll); - - val |= pll->params->ssc_ctrl_en_mask; - pll_writel(val, pll->params->ssc_ctrl_reg, pll); - } -} - -static void pll_clk_stop_ss(struct tegra_clk_pll *pll) -{ - if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) { - u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll); - - val &= ~pll->params->ssc_ctrl_en_mask; - pll_writel(val, pll->params->ssc_ctrl_reg, pll); - } -} - static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg, unsigned long rate) { @@ -2251,7 +2257,7 @@ tegra_clk_register_pllu_tegra114(const char *name, const char *parent_name, } #endif -#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC) +#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC) || defined(CONFIG_ARCH_TEGRA_210_SOC) static const struct clk_ops tegra_clk_pllss_ops = { .is_enabled = clk_pll_is_enabled, .enable = clk_pll_enable, @@ -2349,7 +2355,6 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name, struct tegra_clk_pll_params *pll_params, spinlock_t *lock, unsigned long parent_rate) { - u32 val; struct tegra_clk_pll *pll; struct clk *clk; @@ -2363,26 +2368,8 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name, if (IS_ERR(pll)) return ERR_CAST(pll); - /* program minimum rate by default */ - - val = pll_readl_base(pll); - if (val & PLL_BASE_ENABLE) - WARN_ON(readl_relaxed(clk_base + pll_params->iddq_reg) & - BIT(pll_params->iddq_bit_idx)); - else { - val = 0x4 << divm_shift(pll); - val |= 0x41 << divn_shift(pll); - pll_writel_base(val, pll); - } - - /* disable lock override */ - - val = pll_readl_misc(pll); - val &= ~BIT(29); - pll_writel_misc(val, pll); - clk = _tegra_clk_register_pll(pll, name, parent_name, flags, - &tegra_clk_pllre_ops); + &tegra_clk_pll_ops); if (IS_ERR(clk)) kfree(pll); @@ -2604,46 +2591,6 @@ struct clk *tegra_clk_register_pllc_tegra210(const char *name, return clk; } -struct clk *tegra_clk_register_pllxc_tegra210(const char *name, - const char *parent_name, void __iomem *clk_base, - void __iomem *pmc, unsigned long flags, - struct tegra_clk_pll_params *pll_params, - spinlock_t *lock) -{ - struct tegra_clk_pll *pll; - struct clk *clk, *parent; - unsigned long parent_rate; - - parent = __clk_lookup(parent_name); - if (!parent) { - WARN(1, "parent clk %s of %s must be registered first\n", - name, parent_name); - return ERR_PTR(-EINVAL); - } - - if (!pll_params->pdiv_tohw) - return ERR_PTR(-EINVAL); - - parent_rate = clk_get_rate(parent); - - pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate); - - if (pll_params->adjust_vco) - pll_params->vco_min = pll_params->adjust_vco(pll_params, - parent_rate); - - pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); - if (IS_ERR(pll)) - return ERR_CAST(pll); - - clk = _tegra_clk_register_pll(pll, name, parent_name, flags, - &tegra_clk_pll_ops); - if (IS_ERR(clk)) - kfree(pll); - - return clk; -} - struct clk *tegra_clk_register_pllss_tegra210(const char *name, const char *parent_name, void __iomem *clk_base, unsigned long flags, @@ -2652,10 +2599,8 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name, { struct tegra_clk_pll *pll; struct clk *clk, *parent; - struct tegra_clk_pll_freq_table cfg; unsigned long parent_rate; u32 val; - int i; if (!pll_params->div_nmp) return ERR_PTR(-EINVAL); @@ -2667,13 +2612,11 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name, return ERR_PTR(-EINVAL); } - pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); - if (IS_ERR(pll)) - return ERR_CAST(pll); - - val = pll_readl_base(pll); - val &= ~PLLSS_REF_SRC_SEL_MASK; - pll_writel_base(val, pll); + val = readl_relaxed(clk_base + pll_params->base_reg); + if (val & PLLSS_REF_SRC_SEL_MASK) { + WARN(1, "not supported reference clock for %s\n", name); + return ERR_PTR(-EINVAL); + } parent_rate = clk_get_rate(parent); @@ -2683,36 +2626,10 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name, pll_params->vco_min = pll_params->adjust_vco(pll_params, parent_rate); - /* initialize PLL to minimum rate */ - - cfg.m = _pll_fixed_mdiv(pll_params, parent_rate); - cfg.n = cfg.m * pll_params->vco_min / parent_rate; - - for (i = 0; pll_params->pdiv_tohw[i].pdiv; i++) - ; - if (!i) { - kfree(pll); - return ERR_PTR(-EINVAL); - } - - cfg.p = pll_params->pdiv_tohw[i-1].hw_val; - - _update_pll_mnp(pll, &cfg); - - pll_writel_misc(PLLSS_MISC_DEFAULT, pll); - - val = pll_readl_base(pll); - if (val & PLL_BASE_ENABLE) { - if (val & BIT(pll_params->iddq_bit_idx)) { - WARN(1, "%s is on but IDDQ set\n", name); - kfree(pll); - return ERR_PTR(-EINVAL); - } - } else - val |= BIT(pll_params->iddq_bit_idx); - - val &= ~PLLSS_LOCK_OVERRIDE; - pll_writel_base(val, pll); + pll_params->flags |= TEGRA_PLL_BYPASS; + pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); + if (IS_ERR(pll)) + return ERR_CAST(pll); clk = _tegra_clk_register_pll(pll, name, parent_name, flags, &tegra_clk_pll_ops); diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 294bfe40a4f5..848255cc0209 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -216,7 +216,8 @@ _clk_num, _clk_id) \ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\ 30, MASK(2), 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP,\ - _clk_num, 0, _clk_id, _parents##_idx, 0, NULL) + _clk_num, TEGRA_PERIPH_ON_APB, _clk_id, \ + _parents##_idx, 0, NULL) #define XUSB(_name, _parents, _offset, \ _clk_num, _gate_flags, _clk_id) \ diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 474de0f0c26d..4f6fd307cb70 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c @@ -232,8 +232,15 @@ static void __init tegra_super_clk_init(void __iomem *clk_base, if (!dt_clk) return; - clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base, - pmc_base, CLK_IGNORE_UNUSED, params, NULL); +#if defined(CONFIG_ARCH_TEGRA_210_SOC) + if (gen_info->gen == gen5) + clk = tegra_clk_register_pllc_tegra210("pll_x", "pll_ref", + clk_base, pmc_base, CLK_IGNORE_UNUSED, params, NULL); + else +#endif + clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base, + pmc_base, CLK_IGNORE_UNUSED, params, NULL); + *dt_clk = clk; /* PLLX_OUT0 */ diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 1024e853ea65..6d7a613f2656 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -146,7 +146,7 @@ #define PLLD_SDM_EN_MASK BIT(16) #define PLLD2_SDM_EN_MASK BIT(31) -#define PLLD2_SSC_EN_MASK BIT(30) +#define PLLD2_SSC_EN_MASK 0 #define PLLDP_SS_CFG 0x598 #define PLLDP_SDM_EN_MASK BIT(31) @@ -241,6 +241,9 @@ #define PLL_SDM_COEFF BIT(13) #define sdin_din_to_data(din) ((u16)((din) ? : 0xFFFFU)) #define sdin_data_to_din(dat) (((dat) == 0xFFFFU) ? 0 : (s16)dat) +/* This macro returns ndiv effective scaled to SDM range */ +#define sdin_get_n_eff(cfg) ((cfg)->n * PLL_SDM_COEFF + ((cfg)->sdm_data ? \ + (PLL_SDM_COEFF/2 + sdin_data_to_din((cfg)->sdm_data)) : 0)) /* Tegra CPU clock and reset control regs */ #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470 @@ -715,8 +718,6 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss, plldss->params->defaults_set = true; if (val & PLL_ENABLE) { - pr_warn("%s already enabled. Postponing set full defaults\n", - pll_name); /* * PLL is ON: check if defaults already set, then set those @@ -755,6 +756,10 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss, (~PLLDSS_MISC1_CFG_EN_SDM)); } + if (!plldss->params->defaults_set) + pr_warn("%s already enabled. Postponing set full defaults\n", + pll_name); + /* Enable lock detect */ if (val & PLLDSS_BASE_LOCK_OVERRIDE) { val &= ~PLLDSS_BASE_LOCK_OVERRIDE; @@ -1288,8 +1293,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, s -= PLL_SDM_COEFF / 2; cfg->sdm_data = sdin_din_to_data(s); } - cfg->output_rate *= cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 + - sdin_data_to_din(cfg->sdm_data); + cfg->output_rate *= sdin_get_n_eff(cfg); cfg->output_rate /= p * cfg->m * PLL_SDM_COEFF; } else { cfg->output_rate *= cfg->n; @@ -1314,8 +1318,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, */ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg) { - cfg->n = cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 + - sdin_data_to_din(cfg->sdm_data); + cfg->n = sdin_get_n_eff(cfg); cfg->m *= PLL_SDM_COEFF; } @@ -2204,7 +2207,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true }, [tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, }, [tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true }, - [tegra_clk_vfir] = { .dt_id = TEGRA210_CLK_VFIR, .present = true }, [tegra_clk_spdif_in_8] = { .dt_id = TEGRA210_CLK_SPDIF_IN, .present = true }, [tegra_clk_spdif_out] = { .dt_id = TEGRA210_CLK_SPDIF_OUT, .present = true }, [tegra_clk_vi_10] = { .dt_id = TEGRA210_CLK_VI, .present = true }, @@ -2470,15 +2472,14 @@ static void tegra210_utmi_param_configure(void) reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count); reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].active_delay_count); writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); /* Program UTMIP PLL delay and oscillator frequency counts */ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); + reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].enable_delay_count); @@ -2494,7 +2495,8 @@ static void tegra210_utmi_param_configure(void) reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - udelay(1); + + udelay(20); /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); @@ -2552,6 +2554,7 @@ static int tegra210_enable_pllu(void) reg = readl_relaxed(clk_base + pllu.params->ext_misc_reg[0]); reg &= ~BIT(pllu.params->iddq_bit_idx); writel_relaxed(reg, clk_base + pllu.params->ext_misc_reg[0]); + udelay(5); reg = readl_relaxed(clk_base + PLLU_BASE); reg &= ~GENMASK(20, 0); @@ -2559,6 +2562,7 @@ static int tegra210_enable_pllu(void) reg |= fentry->n << 8; reg |= fentry->p << 16; writel(reg, clk_base + PLLU_BASE); + udelay(1); reg |= PLL_ENABLE; writel(reg, clk_base + PLLU_BASE); @@ -2699,7 +2703,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base, struct clk *clk; /* PLLC */ - clk = tegra_clk_register_pllxc_tegra210("pll_c", "pll_ref", clk_base, + clk = tegra_clk_register_pllc_tegra210("pll_c", "pll_ref", clk_base, pmc, 0, &pll_c_params, NULL); if (!WARN_ON(IS_ERR(clk))) clk_register_clkdev(clk, "pll_c", NULL); @@ -2798,14 +2802,14 @@ static void __init tegra210_pll_init(void __iomem *clk_base, /* PLLU_60M */ clk = clk_register_gate(NULL, "pll_u_60M", "pll_u_out2", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, - 23, 0, NULL); + 23, 0, &pll_u_lock); clk_register_clkdev(clk, "pll_u_60M", NULL); clks[TEGRA210_CLK_PLL_U_60M] = clk; /* PLLU_48M */ clk = clk_register_gate(NULL, "pll_u_48M", "pll_u_out1", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, - 25, 0, NULL); + 25, 0, &pll_u_lock); clk_register_clkdev(clk, "pll_u_48M", NULL); clks[TEGRA210_CLK_PLL_U_48M] = clk; diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 945b07093afa..872f1189ad7f 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -362,12 +362,6 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name, struct tegra_clk_pll_params *pll_params, spinlock_t *lock); -struct clk *tegra_clk_register_pllxc_tegra210(const char *name, - const char *parent_name, void __iomem *clk_base, - void __iomem *pmc, unsigned long flags, - struct tegra_clk_pll_params *pll_params, - spinlock_t *lock); - struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name, void __iomem *clk_base, void __iomem *pmc, unsigned long flags, diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 255cafb18336..d6036c788fab 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -222,7 +222,7 @@ static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock, /* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */ postfix = strrchr(name, '.'); - if (strlen(postfix) > 1) { + if (postfix && strlen(postfix) > 1) { if (strlen(postfix) > ADPLL_MAX_CON_ID) dev_warn(d->dev, "clock %s con_id lookup may fail\n", name); @@ -486,7 +486,7 @@ static u8 ti_adpll_get_parent(struct clk_hw *hw) return 0; } -static struct clk_ops ti_adpll_ops = { +static const struct clk_ops ti_adpll_ops = { .prepare = ti_adpll_prepare, .unprepare = ti_adpll_unprepare, .is_prepared = ti_adpll_is_prepared, diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 06f486b3488c..83b148f8037c 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -304,7 +304,7 @@ static void omap2_apll_disable(struct clk_hw *hw) ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } -static struct clk_ops omap2_apll_ops = { +static const struct clk_ops omap2_apll_ops = { .enable = &omap2_apll_enable, .disable = &omap2_apll_disable, .is_enabled = &omap2_apll_is_enabled, diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index fbedc6a9fed0..07a805125e98 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -138,8 +138,8 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) for (i = 0; i < num_clks; i++) { clk = of_clk_get(node, i); if (IS_ERR(clk)) { - pr_err("%s: Failed get %s' clock nr %d (%ld)\n", - __func__, node->full_name, i, PTR_ERR(clk)); + pr_err("%s: Failed get %pOF' clock nr %d (%ld)\n", + __func__, node, i, PTR_ERR(clk)); continue; } clk_hw = __clk_get_hw(clk); diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 66a0d0ed8b55..071af44b1ba8 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -268,7 +268,7 @@ static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static struct clk_ops ti_fapll_ops = { +static const struct clk_ops ti_fapll_ops = { .enable = ti_fapll_enable, .disable = ti_fapll_disable, .is_enabled = ti_fapll_is_enabled, @@ -478,7 +478,7 @@ static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static struct clk_ops ti_fapll_synt_ops = { +static const struct clk_ops ti_fapll_synt_ops = { .enable = ti_fapll_synth_enable, .disable = ti_fapll_synth_disable, .is_enabled = ti_fapll_synth_is_enabled, diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index 2cf386347f0c..e09f3dd46318 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -110,10 +110,6 @@ static int uniphier_clk_remove(struct platform_device *pdev) static const struct of_device_id uniphier_clk_match[] = { /* System clock */ - { - .compatible = "socionext,uniphier-sld3-clock", - .data = uniphier_sld3_sys_clk_data, - }, { .compatible = "socionext,uniphier-ld4-clock", .data = uniphier_ld4_sys_clk_data, @@ -142,22 +138,22 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-ld20-clock", .data = uniphier_ld20_sys_clk_data, }, + { + .compatible = "socionext,uniphier-pxs3-clock", + .data = uniphier_pxs3_sys_clk_data, + }, /* Media I/O clock, SD clock */ - { - .compatible = "socionext,uniphier-sld3-mio-clock", - .data = uniphier_sld3_mio_clk_data, - }, { .compatible = "socionext,uniphier-ld4-mio-clock", - .data = uniphier_sld3_mio_clk_data, + .data = uniphier_ld4_mio_clk_data, }, { .compatible = "socionext,uniphier-pro4-mio-clock", - .data = uniphier_sld3_mio_clk_data, + .data = uniphier_ld4_mio_clk_data, }, { .compatible = "socionext,uniphier-sld8-mio-clock", - .data = uniphier_sld3_mio_clk_data, + .data = uniphier_ld4_mio_clk_data, }, { .compatible = "socionext,uniphier-pro5-sd-clock", @@ -169,12 +165,16 @@ static const struct of_device_id uniphier_clk_match[] = { }, { .compatible = "socionext,uniphier-ld11-mio-clock", - .data = uniphier_sld3_mio_clk_data, + .data = uniphier_ld4_mio_clk_data, }, { .compatible = "socionext,uniphier-ld20-sd-clock", .data = uniphier_pro5_sd_clk_data, }, + { + .compatible = "socionext,uniphier-pxs3-sd-clock", + .data = uniphier_pro5_sd_clk_data, + }, /* Peripheral clock */ { .compatible = "socionext,uniphier-ld4-peri-clock", @@ -204,6 +204,10 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-ld20-peri-clock", .data = uniphier_pro4_peri_clk_data, }, + { + .compatible = "socionext,uniphier-pxs3-peri-clock", + .data = uniphier_pro4_peri_clk_data, + }, { /* sentinel */ } }; diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c index 218d20f099ce..16e4d303f535 100644 --- a/drivers/clk/uniphier/clk-uniphier-mio.c +++ b/drivers/clk/uniphier/clk-uniphier-mio.c @@ -76,7 +76,7 @@ #define UNIPHIER_MIO_CLK_DMAC(idx) \ UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25) -const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = { +const struct uniphier_clk_data uniphier_ld4_mio_clk_data[] = { UNIPHIER_MIO_CLK_SD_FIXED, UNIPHIER_MIO_CLK_SD(0, 0), UNIPHIER_MIO_CLK_SD(1, 1), @@ -85,11 +85,9 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = { UNIPHIER_MIO_CLK_USB2(8, 0), UNIPHIER_MIO_CLK_USB2(9, 1), UNIPHIER_MIO_CLK_USB2(10, 2), - UNIPHIER_MIO_CLK_USB2(11, 3), UNIPHIER_MIO_CLK_USB2_PHY(12, 0), UNIPHIER_MIO_CLK_USB2_PHY(13, 1), UNIPHIER_MIO_CLK_USB2_PHY(14, 2), - UNIPHIER_MIO_CLK_USB2_PHY(15, 3), { /* sentinel */ } }; diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index ad0218182a9f..0e396f3da526 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -17,7 +17,7 @@ #include "clk-uniphier.h" -#define UNIPHIER_SLD3_SYS_CLK_SD \ +#define UNIPHIER_LD4_SYS_CLK_SD \ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 8), \ UNIPHIER_CLK_FACTOR("sd-133m", -1, "vpll27a", 1, 2) @@ -30,7 +30,7 @@ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15) /* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */ -#define UNIPHIER_SLD3_SYS_CLK_NAND(idx) \ +#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \ UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \ UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2) @@ -45,7 +45,7 @@ #define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \ UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2) -#define UNIPHIER_SLD3_SYS_CLK_STDMAC(idx) \ +#define UNIPHIER_LD4_SYS_CLK_STDMAC(idx) \ UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x2104, 10) #define UNIPHIER_LD11_SYS_CLK_STDMAC(idx) \ @@ -57,19 +57,23 @@ #define UNIPHIER_PRO4_SYS_CLK_USB3(idx, ch) \ UNIPHIER_CLK_GATE("usb3" #ch, (idx), NULL, 0x2104, 16 + (ch)) -const struct uniphier_clk_data uniphier_sld3_sys_clk_data[] = { - UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */ - UNIPHIER_CLK_FACTOR("upll", -1, "ref", 6000, 512), /* 288 MHz */ - UNIPHIER_CLK_FACTOR("a2pll", -1, "ref", 24, 1), /* 589.824 MHz */ - UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */ - UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16), - UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), - UNIPHIER_SLD3_SYS_CLK_NAND(2), - UNIPHIER_SLD3_SYS_CLK_SD, - UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), - { /* sentinel */ } -}; +#define UNIPHIER_LD11_SYS_CLK_AIO(idx) \ + UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 10), \ + UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2108, 0) + +#define UNIPHIER_LD11_SYS_CLK_EVEA(idx) \ + UNIPHIER_CLK_FACTOR("evea-io100m", -1, "spll", 1, 20), \ + UNIPHIER_CLK_GATE("evea", (idx), "evea-io100m", 0x2108, 1) + +#define UNIPHIER_LD11_SYS_CLK_EXIV(idx) \ + UNIPHIER_CLK_FACTOR("exiv-io200m", -1, "spll", 1, 10), \ + UNIPHIER_CLK_GATE("exiv", (idx), "exiv-io200m", 0x2110, 2) + +#define UNIPHIER_PRO4_SYS_CLK_ETHER(idx) \ + UNIPHIER_CLK_GATE("ether", (idx), NULL, 0x2104, 12) + +#define UNIPHIER_LD11_SYS_CLK_ETHER(idx) \ + UNIPHIER_CLK_GATE("ether", (idx), NULL, 0x210c, 6) const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */ @@ -78,10 +82,10 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), - UNIPHIER_SLD3_SYS_CLK_NAND(2), - UNIPHIER_SLD3_SYS_CLK_SD, + UNIPHIER_LD4_SYS_CLK_NAND(2), + UNIPHIER_LD4_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ + UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ { /* sentinel */ } }; @@ -92,10 +96,11 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32), - UNIPHIER_SLD3_SYS_CLK_NAND(2), - UNIPHIER_SLD3_SYS_CLK_SD, + UNIPHIER_LD4_SYS_CLK_NAND(2), + UNIPHIER_LD4_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */ + UNIPHIER_PRO4_SYS_CLK_ETHER(6), + UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */ UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0), UNIPHIER_PRO4_SYS_CLK_USB3(15, 1), @@ -108,10 +113,10 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), - UNIPHIER_SLD3_SYS_CLK_NAND(2), - UNIPHIER_SLD3_SYS_CLK_SD, + UNIPHIER_LD4_SYS_CLK_NAND(2), + UNIPHIER_LD4_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ + UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ { /* sentinel */ } }; @@ -123,7 +128,7 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), UNIPHIER_PRO5_SYS_CLK_NAND(2), UNIPHIER_PRO5_SYS_CLK_SD, - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC */ + UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC */ UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0), UNIPHIER_PRO4_SYS_CLK_USB3(15, 1), @@ -136,7 +141,8 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), UNIPHIER_PRO5_SYS_CLK_NAND(2), UNIPHIER_PRO5_SYS_CLK_SD, - UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, RLE */ + UNIPHIER_PRO4_SYS_CLK_ETHER(6), + UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, RLE */ /* GIO is always clock-enabled: no function for 0x2104 bit6 */ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0), UNIPHIER_PRO4_SYS_CLK_USB3(15, 1), @@ -156,8 +162,12 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = { UNIPHIER_LD11_SYS_CLK_NAND(2), UNIPHIER_LD11_SYS_CLK_EMMC(4), /* Index 5 reserved for eMMC PHY */ + UNIPHIER_LD11_SYS_CLK_ETHER(6), UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */ UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25), + UNIPHIER_LD11_SYS_CLK_AIO(40), + UNIPHIER_LD11_SYS_CLK_EVEA(41), + UNIPHIER_LD11_SYS_CLK_EXIV(42), /* CPU gears */ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8), UNIPHIER_CLK_DIV4("mpll", 2, 3, 4, 8), @@ -185,6 +195,7 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = { UNIPHIER_LD11_SYS_CLK_EMMC(4), /* Index 5 reserved for eMMC PHY */ UNIPHIER_LD20_SYS_CLK_SD, + UNIPHIER_LD11_SYS_CLK_ETHER(6), UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC */ /* GIO is always clock-enabled: no function for 0x210c bit5 */ /* @@ -194,6 +205,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = { UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14), UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12), UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13), + UNIPHIER_LD11_SYS_CLK_AIO(40), + UNIPHIER_LD11_SYS_CLK_EVEA(41), + UNIPHIER_LD11_SYS_CLK_EXIV(42), /* CPU gears */ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8), UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8), @@ -209,3 +223,33 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = { "spll/4", "spll/8", "s2pll/4", "s2pll/8"), { /* sentinel */ } }; + +const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = { + UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 104, 1), /* ARM: 2600 MHz */ + UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */ + UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2400 MHz */ + UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34), + UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40), + UNIPHIER_LD20_SYS_CLK_SD, + UNIPHIER_LD11_SYS_CLK_NAND(2), + UNIPHIER_LD11_SYS_CLK_EMMC(4), + UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x2104, 4), /* =GIO0 */ + UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x2104, 5), /* =GIO1 */ + UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x2104, 6), /* =GIO1-1 */ + UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16), + UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18), + UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20), + UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17), + UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19), + /* CPU gears */ + UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8), + UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8), + UNIPHIER_CLK_DIV4("s2pll", 2, 3, 4, 8), + UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8, + "cpll/2", "spll/2", "cpll/3", "spll/3", + "spll/4", "spll/8", "cpll/4", "cpll/8"), + UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8, + "s2pll/2", "spll/2", "s2pll/3", "spll/3", + "spll/4", "spll/8", "s2pll/4", "s2pll/8"), + { /* sentinel */ } +}; diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h index 01c16ecec48f..d10a009ada96 100644 --- a/drivers/clk/uniphier/clk-uniphier.h +++ b/drivers/clk/uniphier/clk-uniphier.h @@ -147,7 +147,6 @@ struct clk_hw *uniphier_clk_register_mux(struct device *dev, const char *name, const struct uniphier_clk_mux_data *data); -extern const struct uniphier_clk_data uniphier_sld3_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld4_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_pro4_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_sld8_sys_clk_data[]; @@ -155,7 +154,8 @@ extern const struct uniphier_clk_data uniphier_pro5_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[]; -extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[]; +extern const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[]; +extern const struct uniphier_clk_data uniphier_ld4_mio_clk_data[]; extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[]; extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[]; extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[]; diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c index 0e950769ed03..f50592775c9d 100644 --- a/drivers/clk/ux500/clk-prcc.c +++ b/drivers/clk/ux500/clk-prcc.c @@ -79,13 +79,13 @@ static int clk_prcc_is_enabled(struct clk_hw *hw) return clk->is_enabled; } -static struct clk_ops clk_prcc_pclk_ops = { +static const struct clk_ops clk_prcc_pclk_ops = { .enable = clk_prcc_pclk_enable, .disable = clk_prcc_pclk_disable, .is_enabled = clk_prcc_is_enabled, }; -static struct clk_ops clk_prcc_kclk_ops = { +static const struct clk_ops clk_prcc_kclk_ops = { .enable = clk_prcc_kclk_enable, .disable = clk_prcc_kclk_disable, .is_enabled = clk_prcc_is_enabled, @@ -96,7 +96,7 @@ static struct clk *clk_reg_prcc(const char *name, resource_size_t phy_base, u32 cg_sel, unsigned long flags, - struct clk_ops *clk_prcc_ops) + const struct clk_ops *clk_prcc_ops) { struct clk_prcc *clk; struct clk_init_data clk_prcc_init; diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c index 7f343821f4e4..6e3e16b2e5ca 100644 --- a/drivers/clk/ux500/clk-prcmu.c +++ b/drivers/clk/ux500/clk-prcmu.c @@ -186,7 +186,7 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw) clk->is_prepared = 0; } -static struct clk_ops clk_prcmu_scalable_ops = { +static const struct clk_ops clk_prcmu_scalable_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, .is_prepared = clk_prcmu_is_prepared, @@ -198,7 +198,7 @@ static struct clk_ops clk_prcmu_scalable_ops = { .set_rate = clk_prcmu_set_rate, }; -static struct clk_ops clk_prcmu_gate_ops = { +static const struct clk_ops clk_prcmu_gate_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, .is_prepared = clk_prcmu_is_prepared, @@ -208,19 +208,19 @@ static struct clk_ops clk_prcmu_gate_ops = { .recalc_rate = clk_prcmu_recalc_rate, }; -static struct clk_ops clk_prcmu_scalable_rate_ops = { +static const struct clk_ops clk_prcmu_scalable_rate_ops = { .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; -static struct clk_ops clk_prcmu_rate_ops = { +static const struct clk_ops clk_prcmu_rate_ops = { .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; -static struct clk_ops clk_prcmu_opp_gate_ops = { +static const struct clk_ops clk_prcmu_opp_gate_ops = { .prepare = clk_prcmu_opp_prepare, .unprepare = clk_prcmu_opp_unprepare, .is_prepared = clk_prcmu_is_prepared, @@ -230,7 +230,7 @@ static struct clk_ops clk_prcmu_opp_gate_ops = { .recalc_rate = clk_prcmu_recalc_rate, }; -static struct clk_ops clk_prcmu_opp_volt_scalable_ops = { +static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = { .prepare = clk_prcmu_opp_volt_prepare, .unprepare = clk_prcmu_opp_volt_unprepare, .is_prepared = clk_prcmu_is_prepared, @@ -247,7 +247,7 @@ static struct clk *clk_reg_prcmu(const char *name, u8 cg_sel, unsigned long rate, unsigned long flags, - struct clk_ops *clk_prcmu_ops) + const struct clk_ops *clk_prcmu_ops) { struct clk_prcmu *clk; struct clk_init_data clk_prcmu_init; diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c index 266ddea630d2..8a4e93ce1e42 100644 --- a/drivers/clk/ux500/clk-sysctrl.c +++ b/drivers/clk/ux500/clk-sysctrl.c @@ -98,18 +98,18 @@ static u8 clk_sysctrl_get_parent(struct clk_hw *hw) return clk->parent_index; } -static struct clk_ops clk_sysctrl_gate_ops = { +static const struct clk_ops clk_sysctrl_gate_ops = { .prepare = clk_sysctrl_prepare, .unprepare = clk_sysctrl_unprepare, }; -static struct clk_ops clk_sysctrl_gate_fixed_rate_ops = { +static const struct clk_ops clk_sysctrl_gate_fixed_rate_ops = { .prepare = clk_sysctrl_prepare, .unprepare = clk_sysctrl_unprepare, .recalc_rate = clk_sysctrl_recalc_rate, }; -static struct clk_ops clk_sysctrl_set_parent_ops = { +static const struct clk_ops clk_sysctrl_set_parent_ops = { .set_parent = clk_sysctrl_set_parent, .get_parent = clk_sysctrl_get_parent, }; @@ -124,7 +124,7 @@ static struct clk *clk_reg_sysctrl(struct device *dev, unsigned long rate, unsigned long enable_delay_us, unsigned long flags, - struct clk_ops *clk_sysctrl_ops) + const struct clk_ops *clk_sysctrl_ops) { struct clk_sysctrl *clk; struct clk_init_data clk_sysctrl_init; diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c index 7e5add7d7752..e7a868b83fe5 100644 --- a/drivers/clk/versatile/clk-vexpress-osc.c +++ b/drivers/clk/versatile/clk-vexpress-osc.c @@ -61,7 +61,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate, return regmap_write(osc->reg, 0, rate); } -static struct clk_ops vexpress_osc_ops = { +static const struct clk_ops vexpress_osc_ops = { .recalc_rate = vexpress_osc_recalc_rate, .round_rate = vexpress_osc_round_rate, .set_rate = vexpress_osc_set_rate, diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index 27f853d4c76b..354dd508c516 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -451,7 +451,7 @@ static struct zx_clk_fixed_factor top_ffactor_clk[] = { FFACTOR(0, "emmc_mux_div2", "emmc_mux", 1, 2, CLK_SET_RATE_PARENT), }; -static struct clk_div_table noc_div_table[] = { +static const struct clk_div_table noc_div_table[] = { { .val = 1, .div = 2, }, { .val = 3, .div = 4, }, }; @@ -644,7 +644,7 @@ static int __init top_clocks_init(struct device_node *np) return 0; } -static struct clk_div_table common_even_div_table[] = { +static const struct clk_div_table common_even_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 3, .div = 4, }, @@ -656,7 +656,7 @@ static struct clk_div_table common_even_div_table[] = { { .val = 15, .div = 16, }, }; -static struct clk_div_table common_div_table[] = { +static const struct clk_div_table common_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 3, }, diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 17b861ea2626..ae3167c28b12 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -10,25 +10,45 @@ #include #include #include -#include #include #include #include #include #include +#include static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); static int gic_timer_irq; static unsigned int gic_frequency; +static u64 notrace gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + if (mips_cm_is64) + return read_gic_counter(); + + do { + hi = read_gic_counter_32h(); + lo = read_gic_counter_32l(); + hi2 = read_gic_counter_32h(); + } while (hi2 != hi); + + return (((u64) hi) << 32) + lo; +} + static int gic_next_event(unsigned long delta, struct clock_event_device *evt) { + unsigned long flags; u64 cnt; int res; cnt = gic_read_count(); cnt += (u64)delta; - gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); + local_irq_save(flags); + write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask))); + write_gic_vo_compare(cnt); + local_irq_restore(flags); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } @@ -37,7 +57,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; - gic_write_compare(gic_read_compare()); + write_gic_vl_compare(read_gic_vl_compare()); cd->event_handler(cd); return IRQ_HANDLED; } @@ -139,10 +159,15 @@ static struct clocksource gic_clocksource = { static int __init __gic_clocksource_init(void) { + unsigned int count_width; int ret; /* Set clocksource mask. */ - gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width()); + count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; + count_width >>= __fls(GIC_CONFIG_COUNTBITS); + count_width *= 4; + count_width += 32; + gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); /* Calculate a somewhat reasonable rating value. */ gic_clocksource.rating = 200 + gic_frequency / 10000000; @@ -159,7 +184,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) struct clk *clk; int ret; - if (!gic_present || !node->parent || + if (!mips_gic_present() || !node->parent || !of_device_is_compatible(node->parent, "mti,gic")) { pr_warn("No DT definition for the mips gic driver\n"); return -ENXIO; @@ -197,7 +222,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) } /* And finally start the counter */ - gic_start_count(); + clear_gic_config(GIC_CONFIG_COUNTSTOP); return 0; } diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c index 6a20dc8b253f..9a7d7f0f23fe 100644 --- a/drivers/clocksource/numachip.c +++ b/drivers/clocksource/numachip.c @@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi return 0; } -static struct clock_event_device numachip2_clockevent = { +static const struct clock_event_device numachip2_clockevent __initconst = { .name = "numachip2", .rating = 400, .set_next_event = numachip2_set_next_event, diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 2ff64d9d4fb3..62d24690ba02 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void) return -readl(sched_clk_base + TIMER_VALUE); } -static int integrator_clocksource_init(unsigned long inrate, - void __iomem *base) +static int __init integrator_clocksource_init(unsigned long inrate, + void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 2011fec2d6ad..bdce4488ded1 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -71,15 +71,6 @@ config ARM_HIGHBANK_CPUFREQ If in doubt, say N. -config ARM_DB8500_CPUFREQ - tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500 - default ARCH_U8500 - depends on HAS_IOMEM - depends on !CPU_THERMAL || THERMAL - help - This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC - series. - config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC @@ -96,14 +87,13 @@ config ARM_KIRKWOOD_CPUFREQ This adds the CPUFreq driver for Marvell Kirkwood SoCs. -config ARM_MT8173_CPUFREQ - tristate "Mediatek MT8173 CPUFreq support" +config ARM_MEDIATEK_CPUFREQ + tristate "CPU Frequency scaling support for MediaTek SoCs" depends on ARCH_MEDIATEK && REGULATOR - depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on !CPU_THERMAL || THERMAL select PM_OPP help - This adds the CPUFreq driver support for Mediatek MT8173 SoC. + This adds the CPUFreq driver support for MediaTek SoCs. config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" @@ -242,6 +232,11 @@ config ARM_STI_CPUFREQ this config option if you wish to add CPUFreq support for STi based SoCs. +config ARM_TANGO_CPUFREQ + bool + depends on CPUFREQ_DT && ARCH_TANGO + default y + config ARM_TEGRA20_CPUFREQ bool "Tegra20 CPUFreq support" depends on ARCH_TEGRA diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ab3a42cd29ef..c7af9b2a255e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -53,12 +53,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o -obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o -obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o +obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o @@ -75,6 +74,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o +obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index ea6d62547b10..17504129fd77 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -483,11 +483,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) return ret; } - if (arm_bL_ops->get_transition_latency) - policy->cpuinfo.transition_latency = - arm_bL_ops->get_transition_latency(cpu_dev); - else - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cpuinfo.transition_latency = + arm_bL_ops->get_transition_latency(cpu_dev); if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); @@ -622,7 +619,8 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) return -EBUSY; } - if (!ops || !strlen(ops->name) || !ops->init_opp_table) { + if (!ops || !strlen(ops->name) || !ops->init_opp_table || + !ops->get_transition_latency) { pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); return -ENODEV; } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 10be285c9055..a1c3025f9df7 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -172,7 +172,6 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) return -EFAULT; } - cpumask_set_cpu(policy->cpu, policy->cpus); cpu->cur_policy = policy; /* Set policy->cur to max now. The governors will adjust later. */ diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 1c262923fe58..a753c50e9e41 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -9,11 +9,16 @@ #include #include +#include #include #include "cpufreq-dt.h" -static const struct of_device_id machines[] __initconst = { +/* + * Machines for which the cpufreq device is *always* created, mostly used for + * platforms using "operating-points" (V1) property. + */ +static const struct of_device_id whitelist[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, { .compatible = "allwinner,sun5i-a13", }, @@ -22,7 +27,6 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "allwinner,sun6i-a31s", }, { .compatible = "allwinner,sun7i-a20", }, { .compatible = "allwinner,sun8i-a23", }, - { .compatible = "allwinner,sun8i-a33", }, { .compatible = "allwinner,sun8i-a83t", }, { .compatible = "allwinner,sun8i-h3", }, @@ -32,7 +36,6 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "arm,integrator-cp", }, { .compatible = "hisilicon,hi3660", }, - { .compatible = "hisilicon,hi6220", }, { .compatible = "fsl,imx27", }, { .compatible = "fsl,imx51", }, @@ -46,11 +49,8 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "samsung,exynos3250", }, { .compatible = "samsung,exynos4210", }, { .compatible = "samsung,exynos4212", }, - { .compatible = "samsung,exynos4412", }, { .compatible = "samsung,exynos5250", }, #ifndef CONFIG_BL_SWITCHER - { .compatible = "samsung,exynos5420", }, - { .compatible = "samsung,exynos5433", }, { .compatible = "samsung,exynos5800", }, #endif @@ -67,6 +67,8 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "renesas,r8a7792", }, { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, + { .compatible = "renesas,r8a7795", }, + { .compatible = "renesas,r8a7796", }, { .compatible = "renesas,sh73a0", }, { .compatible = "rockchip,rk2928", }, @@ -76,17 +78,17 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "rockchip,rk3188", }, { .compatible = "rockchip,rk3228", }, { .compatible = "rockchip,rk3288", }, + { .compatible = "rockchip,rk3328", }, { .compatible = "rockchip,rk3366", }, { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, - { .compatible = "sigma,tango4" }, - - { .compatible = "socionext,uniphier-pro5", }, - { .compatible = "socionext,uniphier-pxs2", }, { .compatible = "socionext,uniphier-ld6b", }, - { .compatible = "socionext,uniphier-ld11", }, - { .compatible = "socionext,uniphier-ld20", }, + + { .compatible = "st-ericsson,u8500", }, + { .compatible = "st-ericsson,u8540", }, + { .compatible = "st-ericsson,u9500", }, + { .compatible = "st-ericsson,u9540", }, { .compatible = "ti,omap2", }, { .compatible = "ti,omap3", }, @@ -94,27 +96,72 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "ti,omap5", }, { .compatible = "xlnx,zynq-7000", }, - - { .compatible = "zte,zx296718", }, + { .compatible = "xlnx,zynqmp", }, { } }; +/* + * Machines for which the cpufreq device is *not* created, mostly used for + * platforms using "operating-points-v2" property. + */ +static const struct of_device_id blacklist[] __initconst = { + { .compatible = "calxeda,highbank", }, + { .compatible = "calxeda,ecx-2000", }, + + { .compatible = "marvell,armadaxp", }, + + { .compatible = "nvidia,tegra124", }, + + { .compatible = "st,stih407", }, + { .compatible = "st,stih410", }, + + { .compatible = "sigma,tango4", }, + + { .compatible = "ti,am33xx", }, + { .compatible = "ti,am43", }, + { .compatible = "ti,dra7", }, + + { } +}; + +static bool __init cpu0_node_has_opp_v2_prop(void) +{ + struct device_node *np = of_cpu_device_node_get(0); + bool ret = false; + + if (of_get_property(np, "operating-points-v2", NULL)) + ret = true; + + of_node_put(np); + return ret; +} + static int __init cpufreq_dt_platdev_init(void) { struct device_node *np = of_find_node_by_path("/"); const struct of_device_id *match; + const void *data = NULL; if (!np) return -ENODEV; - match = of_match_node(machines, np); - of_node_put(np); - if (!match) - return -ENODEV; + match = of_match_node(whitelist, np); + if (match) { + data = match->data; + goto create_pdev; + } + if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np)) + goto create_pdev; + + of_node_put(np); + return -ENODEV; + +create_pdev: + of_node_put(np); return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", - -1, match->data, + -1, data, sizeof(struct cpufreq_dt_platform_data))); } device_initcall(cpufreq_dt_platdev_init); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index fef3c2160691..d83ab94d041a 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; return 0; diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 5503d491b016..dbf82f36d270 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -357,7 +357,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return 0; } @@ -369,6 +368,7 @@ static int nforce2_cpu_exit(struct cpufreq_policy *policy) static struct cpufreq_driver nforce2_driver = { .name = "nforce2", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = nforce2_verify, .target = nforce2_target, .get = nforce2_get, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9bf97a366029..ea43b147a7fe 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -524,6 +524,32 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); +unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) +{ + unsigned int latency; + + if (policy->transition_delay_us) + return policy->transition_delay_us; + + latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; + if (latency) { + /* + * For platforms that can change the frequency very fast (< 10 + * us), the above formula gives a decent transition delay. But + * for platforms where transition_latency is in milliseconds, it + * ends up giving unrealistic values. + * + * Cap the default transition delay to 10 ms, which seems to be + * a reasonable amount of time after which we should reevaluate + * the frequency. + */ + return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000); + } + + return LATENCY_MULTIPLIER; +} +EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); + /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ @@ -1817,9 +1843,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); * twice in parallel for the same policy and that it will never be called in * parallel with either ->target() or ->target_index() for the same policy. * - * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch() - * callback to indicate an error condition, the hardware configuration must be - * preserved. + * Returns the actual frequency set for the CPU. + * + * If 0 is returned by the driver's ->fast_switch() callback to indicate an + * error condition, the hardware configuration must be preserved. */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) @@ -1988,13 +2015,13 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) if (!policy->governor) return -EINVAL; - if (policy->governor->max_transition_latency && - policy->cpuinfo.transition_latency > - policy->governor->max_transition_latency) { + /* Platform doesn't want dynamic frequency switching ? */ + if (policy->governor->dynamic_switching && + cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { struct cpufreq_governor *gov = cpufreq_fallback_governor(); if (gov) { - pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", + pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", policy->governor->name, gov->name); policy->governor = gov; } else { diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 88220ff3e1c2..f20f20a77d4d 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -246,7 +246,6 @@ gov_show_one_common(sampling_rate); gov_show_one_common(sampling_down_factor); gov_show_one_common(up_threshold); gov_show_one_common(ignore_nice_load); -gov_show_one_common(min_sampling_rate); gov_show_one(cs, down_threshold); gov_show_one(cs, freq_step); @@ -254,12 +253,10 @@ gov_attr_rw(sampling_rate); gov_attr_rw(sampling_down_factor); gov_attr_rw(up_threshold); gov_attr_rw(ignore_nice_load); -gov_attr_ro(min_sampling_rate); gov_attr_rw(down_threshold); gov_attr_rw(freq_step); static struct attribute *cs_attributes[] = { - &min_sampling_rate.attr, &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, @@ -297,10 +294,7 @@ static int cs_init(struct dbs_data *dbs_data) dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; dbs_data->ignore_nice_load = 0; - dbs_data->tuners = tuners; - dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); return 0; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 47e24b5384b3..58d4f4e1ad6a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -47,14 +47,11 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct policy_dbs_info *policy_dbs; - unsigned int rate; int ret; - ret = sscanf(buf, "%u", &rate); + ret = sscanf(buf, "%u", &dbs_data->sampling_rate); if (ret != 1) return -EINVAL; - dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); - /* * We are operating under dbs_data->mutex and so the list and its * entries can't be freed concurrently. @@ -275,6 +272,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; u64 delta_ns, lst; + if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy)) + return; + /* * The work may not be allowed to be queued up right now. * Possible reasons: @@ -392,7 +392,6 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) struct dbs_governor *gov = dbs_governor_of(policy); struct dbs_data *dbs_data; struct policy_dbs_info *policy_dbs; - unsigned int latency; int ret = 0; /* State should be equivalent to EXIT */ @@ -431,16 +430,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) if (ret) goto free_policy_dbs_info; - /* policy latency is in ns. Convert it to us first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - /* Bring kernel and HW constraints together */ - dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, - LATENCY_MULTIPLIER * latency); + dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 0236ec2cd654..8463f5def0f5 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -41,7 +41,6 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; struct dbs_data { struct gov_attr_set attr_set; void *tuners; - unsigned int min_sampling_rate; unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; @@ -160,7 +159,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ { \ .name = _name_, \ - .max_transition_latency = TRANSITION_LATENCY_LIMIT, \ + .dynamic_switching = true, \ .owner = THIS_MODULE, \ .init = cpufreq_dbs_governor_init, \ .exit = cpufreq_dbs_governor_exit, \ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3937acf7e026..6b423eebfd5d 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -319,7 +319,6 @@ gov_show_one_common(sampling_rate); gov_show_one_common(up_threshold); gov_show_one_common(sampling_down_factor); gov_show_one_common(ignore_nice_load); -gov_show_one_common(min_sampling_rate); gov_show_one_common(io_is_busy); gov_show_one(od, powersave_bias); @@ -329,10 +328,8 @@ gov_attr_rw(up_threshold); gov_attr_rw(sampling_down_factor); gov_attr_rw(ignore_nice_load); gov_attr_rw(powersave_bias); -gov_attr_ro(min_sampling_rate); static struct attribute *od_attributes[] = { - &min_sampling_rate.attr, &sampling_rate.attr, &up_threshold.attr, &sampling_down_factor.attr, @@ -373,17 +370,8 @@ static int od_init(struct dbs_data *dbs_data) if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - /* - * In nohz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). - */ - dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; - - /* For correct statistics, we need 10 ticks for each measure */ - dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); } dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c deleted file mode 100644 index 4ee0431579c1..000000000000 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) STMicroelectronics 2009 - * Copyright (C) ST-Ericsson SA 2010-2012 - * - * License Terms: GNU General Public License v2 - * Author: Sundar Iyer - * Author: Martin Persson - * Author: Jonas Aaberg - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -static struct cpufreq_frequency_table *freq_table; -static struct clk *armss_clk; -static struct thermal_cooling_device *cdev; - -static int dbx500_cpufreq_target(struct cpufreq_policy *policy, - unsigned int index) -{ - /* update armss clk frequency */ - return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); -} - -static int dbx500_cpufreq_init(struct cpufreq_policy *policy) -{ - policy->clk = armss_clk; - return cpufreq_generic_init(policy, freq_table, 20 * 1000); -} - -static int dbx500_cpufreq_exit(struct cpufreq_policy *policy) -{ - if (!IS_ERR(cdev)) - cpufreq_cooling_unregister(cdev); - return 0; -} - -static void dbx500_cpufreq_ready(struct cpufreq_policy *policy) -{ - cdev = cpufreq_cooling_register(policy); - if (IS_ERR(cdev)) - pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev)); - else - pr_info("Cooling device registered: %s\n", cdev->type); -} - -static struct cpufreq_driver dbx500_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = dbx500_cpufreq_target, - .get = cpufreq_generic_get, - .init = dbx500_cpufreq_init, - .exit = dbx500_cpufreq_exit, - .ready = dbx500_cpufreq_ready, - .name = "DBX500", - .attr = cpufreq_generic_attr, -}; - -static int dbx500_cpufreq_probe(struct platform_device *pdev) -{ - struct cpufreq_frequency_table *pos; - - freq_table = dev_get_platdata(&pdev->dev); - if (!freq_table) { - pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n"); - return -ENODEV; - } - - armss_clk = clk_get(&pdev->dev, "armss"); - if (IS_ERR(armss_clk)) { - pr_err("dbx500-cpufreq: Failed to get armss clk\n"); - return PTR_ERR(armss_clk); - } - - pr_info("dbx500-cpufreq: Available frequencies:\n"); - cpufreq_for_each_entry(pos, freq_table) - pr_info(" %d Mhz\n", pos->frequency / 1000); - - return cpufreq_register_driver(&dbx500_cpufreq_driver); -} - -static struct platform_driver dbx500_cpufreq_plat_driver = { - .driver = { - .name = "cpufreq-ux500", - }, - .probe = dbx500_cpufreq_probe, -}; - -static int __init dbx500_cpufreq_register(void) -{ - return platform_driver_register(&dbx500_cpufreq_plat_driver); -} -device_initcall(dbx500_cpufreq_register); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("cpufreq driver for DBX500"); diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index bfce11cba1df..45e2ca62515e 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -165,9 +165,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) if (pos->frequency > max_freq) pos->frequency = CPUFREQ_ENTRY_INVALID; - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return cpufreq_table_validate_and_show(policy, elanfreq_table); } @@ -196,6 +193,7 @@ __setup("elanfreq=", elanfreq_setup); static struct cpufreq_driver elanfreq_driver = { .get = elanfreq_get_cpu_frequency, + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = elanfreq_target, .init = elanfreq_cpu_init, diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 3488c9c175eb..8f52a06664e3 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -428,7 +428,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) policy->max = maxfreq; policy->cpuinfo.min_freq = maxfreq / max_duration; policy->cpuinfo.max_freq = maxfreq; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return 0; } @@ -438,6 +437,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) * MediaGX/Geode GX initialize cpufreq driver */ static struct cpufreq_driver gx_suspmod_driver = { + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .get = gx_get_cpuspeed, .verify = cpufreq_gx_verify, .target = cpufreq_gx_target, diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b6edd3ccaa55..14466a9b01c0 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -47,6 +47,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) struct dev_pm_opp *opp; unsigned long freq_hz, volt, volt_old; unsigned int old_freq, new_freq; + bool pll1_sys_temp_enabled = false; int ret; new_freq = freq_table[index].frequency; @@ -124,6 +125,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { clk_set_rate(pll1_sys_clk, new_freq * 1000); clk_set_parent(pll1_sw_clk, pll1_sys_clk); + } else { + /* pll1_sys needs to be enabled for divider rate change to work. */ + pll1_sys_temp_enabled = true; + clk_prepare_enable(pll1_sys_clk); } } @@ -135,6 +140,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) return ret; } + /* PLL1 is only needed until after ARM-PODF is set. */ + if (pll1_sys_temp_enabled) + clk_disable_unprepare(pll1_sys_clk); + /* scaling down? scale voltage after frequency */ if (new_freq < old_freq) { ret = regulator_set_voltage_tol(arm_reg, volt, 0); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 65ee4fcace1f..93a0e88bef76 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -37,8 +37,7 @@ #include #include -#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) -#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) +#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 @@ -172,28 +171,6 @@ struct vid_data { int32_t ratio; }; -/** - * struct _pid - Stores PID data - * @setpoint: Target set point for busyness or performance - * @integral: Storage for accumulated error values - * @p_gain: PID proportional gain - * @i_gain: PID integral gain - * @d_gain: PID derivative gain - * @deadband: PID deadband - * @last_err: Last error storage for integral part of PID calculation - * - * Stores PID coefficients and last error for PID controller. - */ -struct _pid { - int setpoint; - int32_t integral; - int32_t p_gain; - int32_t i_gain; - int32_t d_gain; - int deadband; - int32_t last_err; -}; - /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. @@ -223,7 +200,6 @@ struct global_params { * @last_update: Time of the last update. * @pstate: Stores P state limits for this CPU * @vid: Stores VID limits for this CPU - * @pid: Stores PID parameters for this CPU * @last_sample_time: Last Sample time * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented * This shift is a multiplier to mperf delta to @@ -258,7 +234,6 @@ struct cpudata { struct pstate_data pstate; struct vid_data vid; - struct _pid pid; u64 last_update; u64 last_sample_time; @@ -283,28 +258,6 @@ struct cpudata { static struct cpudata **all_cpu_data; -/** - * struct pstate_adjust_policy - Stores static PID configuration data - * @sample_rate_ms: PID calculation sample rate in ms - * @sample_rate_ns: Sample rate calculation in ns - * @deadband: PID deadband - * @setpoint: PID Setpoint - * @p_gain_pct: PID proportional gain - * @i_gain_pct: PID integral gain - * @d_gain_pct: PID derivative gain - * - * Stores per CPU model static PID configuration data. - */ -struct pstate_adjust_policy { - int sample_rate_ms; - s64 sample_rate_ns; - int deadband; - int setpoint; - int p_gain_pct; - int d_gain_pct; - int i_gain_pct; -}; - /** * struct pstate_funcs - Per CPU model specific callbacks * @get_max: Callback to get maximum non turbo effective P state @@ -314,7 +267,6 @@ struct pstate_adjust_policy { * @get_scaling: Callback to get frequency scaling factor * @get_val: Callback to convert P state to actual MSR write value * @get_vid: Callback to get VID data for Atom platforms - * @update_util: Active mode utilization update callback. * * Core and Atom CPU models have different way to get P State limits. This * structure is used to store those callbacks. @@ -328,20 +280,9 @@ struct pstate_funcs { int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); - void (*update_util)(struct update_util_data *data, u64 time, - unsigned int flags); }; static struct pstate_funcs pstate_funcs __read_mostly; -static struct pstate_adjust_policy pid_params __read_mostly = { - .sample_rate_ms = 10, - .sample_rate_ns = 10 * NSEC_PER_MSEC, - .deadband = 0, - .setpoint = 97, - .p_gain_pct = 20, - .d_gain_pct = 0, - .i_gain_pct = 0, -}; static int hwp_active __read_mostly; static bool per_cpu_limits __read_mostly; @@ -509,56 +450,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) } #endif -static signed int pid_calc(struct _pid *pid, int32_t busy) -{ - signed int result; - int32_t pterm, dterm, fp_error; - int32_t integral_limit; - - fp_error = pid->setpoint - busy; - - if (abs(fp_error) <= pid->deadband) - return 0; - - pterm = mul_fp(pid->p_gain, fp_error); - - pid->integral += fp_error; - - /* - * We limit the integral here so that it will never - * get higher than 30. This prevents it from becoming - * too large an input over long periods of time and allows - * it to get factored out sooner. - * - * The value of 30 was chosen through experimentation. - */ - integral_limit = int_tofp(30); - if (pid->integral > integral_limit) - pid->integral = integral_limit; - if (pid->integral < -integral_limit) - pid->integral = -integral_limit; - - dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); - pid->last_err = fp_error; - - result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; - result = result + (1 << (FRAC_BITS-1)); - return (signed int)fp_toint(result); -} - -static inline void intel_pstate_pid_reset(struct cpudata *cpu) -{ - struct _pid *pid = &cpu->pid; - - pid->p_gain = percent_fp(pid_params.p_gain_pct); - pid->d_gain = percent_fp(pid_params.d_gain_pct); - pid->i_gain = percent_fp(pid_params.i_gain_pct); - pid->setpoint = int_tofp(pid_params.setpoint); - pid->last_err = pid->setpoint - int_tofp(100); - pid->deadband = int_tofp(pid_params.deadband); - pid->integral = 0; -} - static inline void update_turbo_state(void) { u64 misc_en; @@ -911,82 +802,6 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } -/************************** debugfs begin ************************/ -static int pid_param_set(void *data, u64 val) -{ - unsigned int cpu; - - *(u32 *)data = val; - pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; - for_each_possible_cpu(cpu) - if (all_cpu_data[cpu]) - intel_pstate_pid_reset(all_cpu_data[cpu]); - - return 0; -} - -static int pid_param_get(void *data, u64 *val) -{ - *val = *(u32 *)data; - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); - -static struct dentry *debugfs_parent; - -struct pid_param { - char *name; - void *value; - struct dentry *dentry; -}; - -static struct pid_param pid_files[] = { - {"sample_rate_ms", &pid_params.sample_rate_ms, }, - {"d_gain_pct", &pid_params.d_gain_pct, }, - {"i_gain_pct", &pid_params.i_gain_pct, }, - {"deadband", &pid_params.deadband, }, - {"setpoint", &pid_params.setpoint, }, - {"p_gain_pct", &pid_params.p_gain_pct, }, - {NULL, NULL, } -}; - -static void intel_pstate_debug_expose_params(void) -{ - int i; - - debugfs_parent = debugfs_create_dir("pstate_snb", NULL); - if (IS_ERR_OR_NULL(debugfs_parent)) - return; - - for (i = 0; pid_files[i].name; i++) { - struct dentry *dentry; - - dentry = debugfs_create_file(pid_files[i].name, 0660, - debugfs_parent, pid_files[i].value, - &fops_pid_param); - if (!IS_ERR(dentry)) - pid_files[i].dentry = dentry; - } -} - -static void intel_pstate_debug_hide_params(void) -{ - int i; - - if (IS_ERR_OR_NULL(debugfs_parent)) - return; - - for (i = 0; pid_files[i].name; i++) { - debugfs_remove(pid_files[i].dentry); - pid_files[i].dentry = NULL; - } - - debugfs_remove(debugfs_parent); - debugfs_parent = NULL; -} - -/************************** debugfs end ************************/ - /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ @@ -1622,7 +1437,7 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) cpu->sample.core_avg_perf); } -static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) +static inline int32_t get_target_pstate(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int32_t busy_frac, boost; @@ -1660,44 +1475,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) return target; } -static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) -{ - int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; - u64 duration_ns; - - /* - * perf_scaled is the ratio of the average P-state during the last - * sampling period to the P-state requested last time (in percent). - * - * That measures the system's response to the previous P-state - * selection. - */ - max_pstate = cpu->pstate.max_pstate_physical; - current_pstate = cpu->pstate.current_pstate; - perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, - div_fp(100 * max_pstate, current_pstate)); - - /* - * Since our utilization update callback will not run unless we are - * in C0, check if the actual elapsed time is significantly greater (3x) - * than our sample interval. If it is, then we were idle for a long - * enough period of time to adjust our performance metric. - */ - duration_ns = cpu->sample.time - cpu->last_sample_time; - if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { - sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); - perf_scaled = mul_fp(perf_scaled, sample_ratio); - } else { - sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift), - cpu->sample.tsc); - if (sample_ratio < int_tofp(1)) - perf_scaled = 0; - } - - cpu->sample.busy_scaled = perf_scaled; - return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); -} - static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) { int max_pstate = intel_pstate_get_base_pstate(cpu); @@ -1717,13 +1494,15 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } -static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) +static void intel_pstate_adjust_pstate(struct cpudata *cpu) { int from = cpu->pstate.current_pstate; struct sample *sample; + int target_pstate; update_turbo_state(); + target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1740,31 +1519,27 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) fp_toint(cpu->iowait_boost * 100)); } -static void intel_pstate_update_util_pid(struct update_util_data *data, - u64 time, unsigned int flags) -{ - struct cpudata *cpu = container_of(data, struct cpudata, update_util); - u64 delta_ns = time - cpu->sample.time; - - if ((s64)delta_ns < pid_params.sample_rate_ns) - return; - - if (intel_pstate_sample(cpu, time)) { - int target_pstate; - - target_pstate = get_target_pstate_use_performance(cpu); - intel_pstate_adjust_pstate(cpu, target_pstate); - } -} - static void intel_pstate_update_util(struct update_util_data *data, u64 time, unsigned int flags) { struct cpudata *cpu = container_of(data, struct cpudata, update_util); u64 delta_ns; + /* Don't allow remote callbacks */ + if (smp_processor_id() != cpu->cpu) + return; + if (flags & SCHED_CPUFREQ_IOWAIT) { cpu->iowait_boost = int_tofp(1); + cpu->last_update = time; + /* + * The last time the busy was 100% so P-state was max anyway + * so avoid overhead of computation. + */ + if (fp_toint(cpu->sample.busy_scaled) == 100) + return; + + goto set_pstate; } else if (cpu->iowait_boost) { /* Clear iowait_boost if the CPU may have been idle. */ delta_ns = time - cpu->last_update; @@ -1773,15 +1548,12 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, } cpu->last_update = time; delta_ns = time - cpu->sample.time; - if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) + if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) return; - if (intel_pstate_sample(cpu, time)) { - int target_pstate; - - target_pstate = get_target_pstate_use_cpu_load(cpu); - intel_pstate_adjust_pstate(cpu, target_pstate); - } +set_pstate: + if (intel_pstate_sample(cpu, time)) + intel_pstate_adjust_pstate(cpu); } static struct pstate_funcs core_funcs = { @@ -1791,7 +1563,6 @@ static struct pstate_funcs core_funcs = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util_pid, }; static const struct pstate_funcs silvermont_funcs = { @@ -1802,7 +1573,6 @@ static const struct pstate_funcs silvermont_funcs = { .get_val = atom_get_val, .get_scaling = silvermont_get_scaling, .get_vid = atom_get_vid, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs airmont_funcs = { @@ -1813,7 +1583,6 @@ static const struct pstate_funcs airmont_funcs = { .get_val = atom_get_val, .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, - .update_util = intel_pstate_update_util, }; static const struct pstate_funcs knl_funcs = { @@ -1824,7 +1593,6 @@ static const struct pstate_funcs knl_funcs = { .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util_pid, }; static const struct pstate_funcs bxt_funcs = { @@ -1834,7 +1602,6 @@ static const struct pstate_funcs bxt_funcs = { .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, .get_val = core_get_val, - .update_util = intel_pstate_update_util, }; #define ICPU(model, policy) \ @@ -1878,8 +1645,6 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { {} }; -static bool pid_in_use(void); - static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -1910,8 +1675,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_disable_ee(cpunum); intel_pstate_hwp_enable(cpu); - } else if (pid_in_use()) { - intel_pstate_pid_reset(cpu); } intel_pstate_get_cpu_pstates(cpu); @@ -1934,7 +1697,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) /* Prevent intel_pstate_update_util() from using stale data. */ cpu->sample.time = 0; cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, - pstate_funcs.update_util); + intel_pstate_update_util); cpu->update_util_set = true; } @@ -2132,7 +1895,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.max_freq *= cpu->pstate.scaling; intel_pstate_init_acpi_perf_limits(policy); - cpumask_set_cpu(policy->cpu, policy->cpus); policy->fast_switch_possible = true; @@ -2146,7 +1908,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) return ret; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else @@ -2261,12 +2022,6 @@ static struct cpufreq_driver intel_cpufreq = { static struct cpufreq_driver *default_driver = &intel_pstate; -static bool pid_in_use(void) -{ - return intel_pstate_driver == &intel_pstate && - pstate_funcs.update_util == intel_pstate_update_util_pid; -} - static void intel_pstate_driver_cleanup(void) { unsigned int cpu; @@ -2301,9 +2056,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); - if (pid_in_use()) - intel_pstate_debug_expose_params(); - return 0; } @@ -2312,9 +2064,6 @@ static int intel_pstate_unregister_driver(void) if (hwp_active) return -EBUSY; - if (pid_in_use()) - intel_pstate_debug_hide_params(); - cpufreq_unregister_driver(intel_pstate_driver); intel_pstate_driver_cleanup(); @@ -2382,24 +2131,6 @@ static int __init intel_pstate_msrs_not_valid(void) return 0; } -#ifdef CONFIG_ACPI -static void intel_pstate_use_acpi_profile(void) -{ - switch (acpi_gbl_FADT.preferred_profile) { - case PM_MOBILE: - case PM_TABLET: - case PM_APPLIANCE_PC: - case PM_DESKTOP: - case PM_WORKSTATION: - pstate_funcs.update_util = intel_pstate_update_util; - } -} -#else -static void intel_pstate_use_acpi_profile(void) -{ -} -#endif - static void __init copy_cpu_funcs(struct pstate_funcs *funcs) { pstate_funcs.get_max = funcs->get_max; @@ -2409,10 +2140,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_scaling = funcs->get_scaling; pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; - pstate_funcs.update_util = funcs->update_util; pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; - - intel_pstate_use_acpi_profile(); } #ifdef CONFIG_ACPI @@ -2466,39 +2194,31 @@ enum { PPC, }; -struct hw_vendor_info { - u16 valid; - char oem_id[ACPI_OEM_ID_SIZE]; - char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; - int oem_pwr_table; -}; - /* Hardware vendor-specific info that has its own power management modes */ -static struct hw_vendor_info vendor_info[] __initdata = { - {1, "HP ", "ProLiant", PSS}, - {1, "ORACLE", "X4-2 ", PPC}, - {1, "ORACLE", "X4-2L ", PPC}, - {1, "ORACLE", "X4-2B ", PPC}, - {1, "ORACLE", "X3-2 ", PPC}, - {1, "ORACLE", "X3-2L ", PPC}, - {1, "ORACLE", "X3-2B ", PPC}, - {1, "ORACLE", "X4470M2 ", PPC}, - {1, "ORACLE", "X4270M3 ", PPC}, - {1, "ORACLE", "X4270M2 ", PPC}, - {1, "ORACLE", "X4170M2 ", PPC}, - {1, "ORACLE", "X4170 M3", PPC}, - {1, "ORACLE", "X4275 M3", PPC}, - {1, "ORACLE", "X6-2 ", PPC}, - {1, "ORACLE", "Sudbury ", PPC}, - {0, "", ""}, +static struct acpi_platform_list plat_info[] __initdata = { + {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS}, + {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + { } /* End */ }; static bool __init intel_pstate_platform_pwr_mgmt_exists(void) { - struct acpi_table_header hdr; - struct hw_vendor_info *v_info; const struct x86_cpu_id *id; u64 misc_pwr; + int idx; id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { @@ -2507,21 +2227,15 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) return true; } - if (acpi_disabled || - ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) + idx = acpi_match_platform_list(plat_info); + if (idx < 0) return false; - for (v_info = vendor_info; v_info->valid; v_info++) { - if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && - !strncmp(hdr.oem_table_id, v_info->oem_table_id, - ACPI_OEM_TABLE_ID_SIZE)) - switch (v_info->oem_pwr_table) { - case PSS: - return intel_pstate_no_acpi_pss(); - case PPC: - return intel_pstate_has_acpi_ppc() && - (!force_load); - } + switch (plat_info[idx].data) { + case PSS: + return intel_pstate_no_acpi_pss(); + case PPC: + return intel_pstate_has_acpi_ppc() && !force_load; } return false; @@ -2556,9 +2270,7 @@ static int __init intel_pstate_init(void) if (x86_match_cpu(hwp_support_ids)) { copy_cpu_funcs(&core_funcs); - if (no_hwp) { - pstate_funcs.update_util = intel_pstate_update_util; - } else { + if (!no_hwp) { hwp_active++; intel_pstate.attr = hwp_cpufreq_attrs; goto hwp_cpu_matched; diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 074971b12635..542aa9adba1a 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -270,7 +270,6 @@ static int longrun_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = longrun_low_freq; policy->cpuinfo.max_freq = longrun_high_freq; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; longrun_get_policy(policy); return 0; diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 9ac27b22476c..da344696beed 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -114,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = { .attr = cpufreq_generic_attr, }; -static struct platform_device_id platform_device_ids[] = { +static const struct platform_device_id platform_device_ids[] = { { .name = "loongson2_cpufreq", }, diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c similarity index 95% rename from drivers/cpufreq/mt8173-cpufreq.c rename to drivers/cpufreq/mediatek-cpufreq.c index f9f00fb4bc3a..18c4bd9a5c65 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -507,7 +507,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -static struct cpufreq_driver mt8173_cpufreq_driver = { +static struct cpufreq_driver mtk_cpufreq_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, .verify = cpufreq_generic_frequency_table_verify, @@ -520,7 +520,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = { .attr = cpufreq_generic_attr, }; -static int mt8173_cpufreq_probe(struct platform_device *pdev) +static int mtk_cpufreq_probe(struct platform_device *pdev) { struct mtk_cpu_dvfs_info *info, *tmp; int cpu, ret; @@ -547,7 +547,7 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev) list_add(&info->list_head, &dvfs_info_list); } - ret = cpufreq_register_driver(&mt8173_cpufreq_driver); + ret = cpufreq_register_driver(&mtk_cpufreq_driver); if (ret) { dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); goto release_dvfs_info_list; @@ -564,15 +564,18 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev) return ret; } -static struct platform_driver mt8173_cpufreq_platdrv = { +static struct platform_driver mtk_cpufreq_platdrv = { .driver = { - .name = "mt8173-cpufreq", + .name = "mtk-cpufreq", }, - .probe = mt8173_cpufreq_probe, + .probe = mtk_cpufreq_probe, }; /* List of machines supported by this driver */ -static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { +static const struct of_device_id mtk_cpufreq_machines[] __initconst = { + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt7622", }, + { .compatible = "mediatek,mt7623", }, { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, @@ -580,7 +583,7 @@ static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { { } }; -static int __init mt8173_cpufreq_driver_init(void) +static int __init mtk_cpufreq_driver_init(void) { struct device_node *np; const struct of_device_id *match; @@ -591,14 +594,14 @@ static int __init mt8173_cpufreq_driver_init(void) if (!np) return -ENODEV; - match = of_match_node(mt8173_cpufreq_machines, np); + match = of_match_node(mtk_cpufreq_machines, np); of_node_put(np); if (!match) { - pr_warn("Machine is not compatible with mt8173-cpufreq\n"); + pr_warn("Machine is not compatible with mtk-cpufreq\n"); return -ENODEV; } - err = platform_driver_register(&mt8173_cpufreq_platdrv); + err = platform_driver_register(&mtk_cpufreq_platdrv); if (err) return err; @@ -608,7 +611,7 @@ static int __init mt8173_cpufreq_driver_init(void) * and the device registration codes are put here to handle defer * probing. */ - pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); + pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); if (IS_ERR(pdev)) { pr_err("failed to register mtk-cpufreq platform device\n"); return PTR_ERR(pdev); @@ -616,4 +619,4 @@ static int __init mt8173_cpufreq_driver_init(void) return 0; } -device_initcall(mt8173_cpufreq_driver_init); +device_initcall(mtk_cpufreq_driver_init); diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index ff44016ea031..61ae06ca008e 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -442,7 +442,8 @@ static struct cpufreq_driver pmac_cpufreq_driver = { .init = pmac_cpufreq_cpu_init, .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, - .flags = CPUFREQ_PM_NO_WARN, + .flags = CPUFREQ_PM_NO_WARN | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .attr = cpufreq_generic_attr, .name = "powermac", }; @@ -626,14 +627,16 @@ static int __init pmac_cpufreq_setup(void) if (!value) goto out; cur_freq = (*value) / 1000; - transition_latency = CPUFREQ_ETERNAL; /* Check for 7447A based MacRISC3 */ if (of_machine_is_compatible("MacRISC3") && of_get_property(cpunode, "dynamic-power-step", NULL) && PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { pmac_cpufreq_init_7447A(cpunode); + + /* Allow dynamic switching */ transition_latency = 8000000; + pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING; /* Check for other MacRISC3 machines */ } else if (of_machine_is_compatible("PowerBook3,4") || of_machine_is_compatible("PowerBook3,5") || diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 267e0894c62d..be623dd7b9f2 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -516,7 +516,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) goto bail; } - DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); + DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock); /* Now get all the platform functions */ pfunc_cpu_getfreq = diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 9f013ed42977..80ac313e6c59 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -578,7 +578,7 @@ static int acer_cpufreq_pst(const struct dmi_system_id *d) * A BIOS update is all that can save them. * Mention this, and disable cpufreq. */ -static struct dmi_system_id powernow_dmi_table[] = { +static const struct dmi_system_id powernow_dmi_table[] = { { .callback = acer_cpufreq_pst, .ident = "Acer Aspire", diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index f82074eea779..5d31c2db12a3 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -602,6 +602,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) } clk_base = of_iomap(np, 0); + of_node_put(np); if (!clk_base) { pr_err("%s: failed to map clock registers\n", __func__); return -EFAULT; @@ -612,6 +613,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { pr_err("%s: failed to get alias of dmc node '%s'\n", __func__, np->name); + of_node_put(np); return id; } @@ -619,6 +621,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) if (!dmc_base[id]) { pr_err("%s: failed to map dmc%d registers\n", __func__, id); + of_node_put(np); return -EFAULT; } } diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index 728eab77e8e0..e2d8a77c36d5 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -197,11 +197,12 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) static int __init sa1100_cpu_init(struct cpufreq_policy *policy) { - return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); + return cpufreq_generic_init(policy, sa11x0_freq_table, 0); } static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1100_target, .get = sa11x0_getspeed, diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 2bac9b6cfeea..66e5fb088ecc 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -306,13 +306,14 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) static int __init sa1110_cpu_init(struct cpufreq_policy *policy) { - return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); + return cpufreq_generic_init(policy, sa11x0_freq_table, 0); } /* sa1110_driver needs __refdata because it must remain after init registers * it with cpufreq_register_driver() */ static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1110_target, .get = sa11x0_getspeed, diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 719c3d9f07fb..28893d435cf5 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -137,8 +137,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; } - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", policy->min / 1000, policy->min % 1000, @@ -159,6 +157,7 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) static struct cpufreq_driver sh_cpufreq_driver = { .name = "sh", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .get = sh_cpufreq_get, .target = sh_cpufreq_target, .verify = sh_cpufreq_verify, diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index b86953a3ddc4..0412a246a785 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -207,7 +207,7 @@ static unsigned int speedstep_detect_chipset(void) * 8100 which use a pretty old revision of the 82815 * host bridge. Abort on these systems. */ - static struct pci_dev *hostbridge; + struct pci_dev *hostbridge; hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_MC, diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 1b8062182c81..ccab452a4ef5 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -35,7 +35,7 @@ static int relaxed_check; static unsigned int pentium3_get_frequency(enum speedstep_processor processor) { /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ - struct { + static const struct { unsigned int ratio; /* Frequency Multiplier (x10) */ u8 bitmap; /* power on configuration bits [27, 25:22] (in MSR 0x2a) */ @@ -58,7 +58,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) }; /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ - struct { + static const struct { unsigned int value; /* Front Side Bus speed in MHz */ u8 bitmap; /* power on configuration bits [18: 19] (in MSR 0x2a) */ diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 37b30071c220..d23f24ccff38 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -266,7 +266,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) pr_debug("workaround worked.\n"); } - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return cpufreq_table_validate_and_show(policy, speedstep_freqs); } @@ -290,6 +289,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = speedstep_target, .init = speedstep_cpu_init, diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index d2d0430d09d4..47105735df12 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -65,8 +65,8 @@ static int sti_cpufreq_fetch_major(void) { ret = of_property_read_u32_index(np, "st,syscfg", MAJOR_ID_INDEX, &major_offset); if (ret) { - dev_err(dev, "No major number offset provided in %s [%d]\n", - np->full_name, ret); + dev_err(dev, "No major number offset provided in %pOF [%d]\n", + np, ret); return ret; } @@ -92,8 +92,8 @@ static int sti_cpufreq_fetch_minor(void) MINOR_ID_INDEX, &minor_offset); if (ret) { dev_err(dev, - "No minor number offset provided %s [%d]\n", - np->full_name, ret); + "No minor number offset provided %pOF [%d]\n", + np, ret); return ret; } diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c new file mode 100644 index 000000000000..89a7f860bfe8 --- /dev/null +++ b/drivers/cpufreq/tango-cpufreq.c @@ -0,0 +1,38 @@ +#include +#include +#include +#include +#include + +static const struct of_device_id machines[] __initconst = { + { .compatible = "sigma,tango4" }, + { /* sentinel */ } +}; + +static int __init tango_cpufreq_init(void) +{ + struct device *cpu_dev = get_cpu_device(0); + unsigned long max_freq; + struct clk *cpu_clk; + void *res; + + if (!of_match_node(machines, of_root)) + return -ENODEV; + + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) + return -ENODEV; + + max_freq = clk_get_rate(cpu_clk); + + dev_pm_opp_add(cpu_dev, max_freq / 1, 0); + dev_pm_opp_add(cpu_dev, max_freq / 2, 0); + dev_pm_opp_add(cpu_dev, max_freq / 3, 0); + dev_pm_opp_add(cpu_dev, max_freq / 5, 0); + dev_pm_opp_add(cpu_dev, max_freq / 9, 0); + + res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0); + + return PTR_ERR_OR_ZERO(res); +} +device_initcall(tango_cpufreq_init); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index a7b5658c0460..4bf47de6101f 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, - { .compatible = "ti,am4372", .data = &am4x_soc_data, }, + { .compatible = "ti,am43", .data = &am4x_soc_data, }, { .compatible = "ti,dra7", .data = &dra7_soc_data }, {}, }; @@ -245,8 +245,6 @@ static int ti_cpufreq_init(void) if (ret) goto fail_put_node; - of_node_put(opp_data->opp_node); - ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev, version, VERSION_COUNT)); if (ret) { @@ -255,6 +253,8 @@ static int ti_cpufreq_init(void) goto fail_put_node; } + of_node_put(opp_data->opp_node); + register_cpufreq_dt: platform_device_register_simple("cpufreq-dt", -1, NULL, 0); diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 6f9dfa80563a..db62d9844751 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -58,13 +58,12 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = 250000; policy->max = policy->cpuinfo.max_freq = 1000000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->clk = clk_get(NULL, "MAIN_CLK"); return PTR_ERR_OR_ZERO(policy->clk); } static struct cpufreq_driver ucv2_driver = { - .flags = CPUFREQ_STICKY, + .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = ucv2_verify_speed, .target = ucv2_target, .get = cpufreq_generic_get, diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 3ba81b1dffad..0b67a05a7aae 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -5,6 +5,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o +obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o ################################################################################## # ARM SoC drivers diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 7080c384ad5d..52a75053ee03 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto out_fail; + goto init_fail; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto out_fail; + goto init_fail; } /* @@ -149,6 +149,8 @@ static int __init arm_idle_init(void) } return 0; +init_fail: + kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c index 12b9145913de..72b5e47286b4 100644 --- a/drivers/cpuidle/cpuidle-cps.c +++ b/drivers/cpuidle/cpuidle-cps.c @@ -37,7 +37,7 @@ static int cps_nc_enter(struct cpuidle_device *dev, * TODO: don't treat core 0 specially, just prevent the final core * TODO: remap interrupt affinity temporarily */ - if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT)) + if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT)) index = STATE_NC_WAIT; /* Select the appropriate cps_pm_state */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 60bb64f4329d..484cc8909d5c 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, unsigned int max_latency, unsigned int forbidden_flags, - bool freeze) + bool s2idle) { unsigned int latency_req = 0; int i, ret = 0; @@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, if (s->disabled || su->disable || s->exit_latency <= latency_req || s->exit_latency > max_latency || (s->flags & forbidden_flags) - || (freeze && !s->enter_freeze)) + || (s2idle && !s->enter_s2idle)) continue; latency_req = s->exit_latency; @@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, } #ifdef CONFIG_SUSPEND -static void enter_freeze_proper(struct cpuidle_driver *drv, +static void enter_s2idle_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { /* @@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, * suspended is generally unsafe. */ stop_critical_timings(); - drv->states[index].enter_freeze(dev, drv, index); + drv->states[index].enter_s2idle(dev, drv, index); WARN_ON(!irqs_disabled()); /* * timekeeping_resume() that will be called by tick_unfreeze() for the @@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, } /** - * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. + * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. * - * If there are states with the ->enter_freeze callback, find the deepest of + * If there are states with the ->enter_s2idle callback, find the deepest of * them and enter it with frozen tick. */ -int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) +int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int index; /* - * Find the deepest state with ->enter_freeze present, which guarantees + * Find the deepest state with ->enter_s2idle present, which guarantees * that interrupts won't be enabled when it exits and allows the tick to * be frozen safely. */ index = find_deepest_state(drv, dev, UINT_MAX, 0, true); if (index > 0) - enter_freeze_proper(drv, dev, index); + enter_s2idle_proper(drv, dev, index); return index; } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index e53fb861beb0..dc32f34e68d9 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -179,36 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) } } -#ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int __cpuidle poll_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) -{ - local_irq_enable(); - if (!current_set_polling_and_test()) { - while (!need_resched()) - cpu_relax(); - } - current_clr_polling(); - - return index; -} - -static void poll_idle_init(struct cpuidle_driver *drv) -{ - struct cpuidle_state *state = &drv->states[0]; - - snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); - snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); - state->exit_latency = 0; - state->target_residency = 0; - state->power_usage = -1; - state->enter = poll_idle; - state->disabled = false; -} -#else -static void poll_idle_init(struct cpuidle_driver *drv) {} -#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */ - /** * __cpuidle_register_driver: register the driver * @drv: a valid pointer to a struct cpuidle_driver @@ -246,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, (void *)1, 1); - poll_idle_init(drv); - return 0; } diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ae8eb0359889..53342b7f1010 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state, /* * Since this is not a "coupled" state, it's safe to assume interrupts * won't be enabled when it exits allowing the tick to be frozen - * safely. So enter() can be also enter_freeze() callback. + * safely. So enter() can be also enter_s2idle() callback. */ - idle_state->enter_freeze = match_id->data; + idle_state->enter_s2idle = match_id->data; err = of_property_read_u32(state_node, "wakeup-latency-us", &idle_state->exit_latency); @@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } /* @@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state, err = of_property_read_u32(state_node, "min-residency-us", &idle_state->target_residency); if (err) { - pr_debug(" * %s missing min-residency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing min-residency-us property\n", + state_node); return -EINVAL; } @@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, } if (!idle_state_valid(state_node, i, cpumask)) { - pr_warn("%s idle state not valid, bailing out\n", - state_node->full_name); + pr_warn("%pOF idle state not valid, bailing out\n", + state_node); err = -EINVAL; break; } @@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, idle_state = &drv->states[state_idx++]; err = init_state_node(idle_state, matches, state_node); if (err) { - pr_err("Parsing idle state node %s failed with err %d\n", - state_node->full_name, err); + pr_err("Parsing idle state node %pOF failed with err %d\n", + state_node, err); err = -EINVAL; break; } diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ac321f09e717..ce1a2ffffb2a 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device_state *last_state; int last_residency, last_idx = ldev->last_state_idx; + int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); /* Special case when user has set very strict latency requirement */ @@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv, } /* consider demotion */ - if (last_idx > CPUIDLE_DRIVER_STATE_START && + if (last_idx > first_idx && (drv->states[last_idx].disabled || dev->states_usage[last_idx].disable || drv->states[last_idx].exit_latency > latency_req)) { int i; - for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { + for (i = last_idx - 1; i > first_idx; i--) { if (drv->states[i].exit_latency <= latency_req) break; } @@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, return i; } - if (last_idx > CPUIDLE_DRIVER_STATE_START && + if (last_idx > first_idx && last_residency < last_state->threshold.demotion_time) { last_state->stats.demotion_count++; last_state->stats.promotion_count = 0; @@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int i; + int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); struct ladder_device_state *lstate; struct cpuidle_state *state; - ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; + ldev->last_state_idx = first_idx; - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + for (i = first_idx; i < drv->state_count; i++) { state = &drv->states[i]; lstate = &ldev->states[i]; @@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv, if (i < drv->state_count - 1) lstate->threshold.promotion_time = state->exit_latency; - if (i > CPUIDLE_DRIVER_STATE_START) + if (i > first_idx) lstate->threshold.demotion_time = state->exit_latency; } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 61b64c2b2cb8..48eaf2879228 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) expected_interval = get_typical_interval(data); expected_interval = min(expected_interval, data->next_timer_us); - if (CPUIDLE_DRIVER_STATE_START > 0) { - struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START]; + first_idx = 0; + if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) { + struct cpuidle_state *s = &drv->states[1]; unsigned int polling_threshold; /* @@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) polling_threshold = max_t(unsigned int, 20, s->target_residency); if (data->next_timer_us > polling_threshold && latency_req > s->exit_latency && !s->disabled && - !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable) - first_idx = CPUIDLE_DRIVER_STATE_START; - else - first_idx = CPUIDLE_DRIVER_STATE_START - 1; - } else { - first_idx = 0; + !dev->states_usage[1].disable) + first_idx = 1; } /* diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c new file mode 100644 index 000000000000..7416b16287de --- /dev/null +++ b/drivers/cpuidle/poll_state.c @@ -0,0 +1,37 @@ +/* + * poll_state.c - Polling idle state + * + * This file is released under the GPLv2. + */ + +#include +#include +#include + +static int __cpuidle poll_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + local_irq_enable(); + if (!current_set_polling_and_test()) { + while (!need_resched()) + cpu_relax(); + } + current_clr_polling(); + + return index; +} + +void cpuidle_poll_state_init(struct cpuidle_driver *drv) +{ + struct cpuidle_state *state = &drv->states[0]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); + snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); + state->exit_latency = 0; + state->target_residency = 0; + state->power_usage = -1; + state->enter = poll_idle; + state->disabled = false; + state->flags = CPUIDLE_FLAG_POLLING; +} +EXPORT_SYMBOL_GPL(cpuidle_poll_state_init); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4b75084fabad..fe33c199fc1a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -525,12 +525,26 @@ config CRYPTO_DEV_ATMEL_SHA To compile this driver as a module, choose M here: the module will be called atmel-sha. +config CRYPTO_DEV_ATMEL_ECC + tristate "Support for Microchip / Atmel ECC hw accelerator" + depends on ARCH_AT91 || COMPILE_TEST + depends on I2C + select CRYPTO_ECDH + select CRC16 + help + Microhip / Atmel ECC hw accelerator. + Select this if you want to use the Microchip / Atmel module for + ECDH algorithm. + + To compile this driver as a module, choose M here: the module + will be called atmel-ecc. + config CRYPTO_DEV_CCP - bool "Support for AMD Cryptographic Coprocessor" + bool "Support for AMD Secure Processor" depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM help - The AMD Cryptographic Coprocessor provides hardware offload support - for encryption, hashing and related operations. + The AMD Secure Processor provides support for the Cryptographic Coprocessor + (CCP) and the Platform Security Processor (PSP) devices. if CRYPTO_DEV_CCP source "drivers/crypto/ccp/Kconfig" @@ -616,6 +630,14 @@ config CRYPTO_DEV_SUN4I_SS To compile this driver as a module, choose M here: the module will be called sun4i-ss. +config CRYPTO_DEV_SUN4I_SS_PRNG + bool "Support for Allwinner Security System PRNG" + depends on CRYPTO_DEV_SUN4I_SS + select CRYPTO_RNG + help + Select this option if you want to provide kernel-side support for + the Pseudo-Random Number Generator found in the Security System. + config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP @@ -686,4 +708,25 @@ config CRYPTO_DEV_SAFEXCEL chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash algorithms. +config CRYPTO_DEV_ARTPEC6 + tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." + depends on ARM && (ARCH_ARTPEC || COMPILE_TEST) + depends on HAS_DMA + depends on OF + select CRYPTO_AEAD + select CRYPTO_AES + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select CRYPTO_CTR + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA384 + select CRYPTO_SHA512 + help + Enables the driver for the on-chip crypto accelerator + of Axis ARTPEC SoCs. + + To compile this driver as a module, choose M here. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 2c555a3393b2..808432b44c6b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o +obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ @@ -35,7 +36,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o -obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/ +obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ @@ -43,3 +44,4 @@ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ +obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c new file mode 100644 index 000000000000..e66f18a0ddd0 --- /dev/null +++ b/drivers/crypto/atmel-ecc.c @@ -0,0 +1,781 @@ +/* + * Microchip / Atmel ECC (I2C) driver. + * + * Copyright (c) 2017, Microchip Technology Inc. + * Author: Tudor Ambarus + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "atmel-ecc.h" + +/* Used for binding tfm objects to i2c clients. */ +struct atmel_ecc_driver_data { + struct list_head i2c_client_list; + spinlock_t i2c_list_lock; +} ____cacheline_aligned; + +static struct atmel_ecc_driver_data driver_data; + +/** + * atmel_ecc_i2c_client_priv - i2c_client private data + * @client : pointer to i2c client device + * @i2c_client_list_node: part of i2c_client_list + * @lock : lock for sending i2c commands + * @wake_token : wake token array of zeros + * @wake_token_sz : size in bytes of the wake_token + * @tfm_count : number of active crypto transformations on i2c client + * + * Reads and writes from/to the i2c client are sequential. The first byte + * transmitted to the device is treated as the byte size. Any attempt to send + * more than this number of bytes will cause the device to not ACK those bytes. + * After the host writes a single command byte to the input buffer, reads are + * prohibited until after the device completes command execution. Use a mutex + * when sending i2c commands. + */ +struct atmel_ecc_i2c_client_priv { + struct i2c_client *client; + struct list_head i2c_client_list_node; + struct mutex lock; + u8 wake_token[WAKE_TOKEN_MAX_SIZE]; + size_t wake_token_sz; + atomic_t tfm_count ____cacheline_aligned; +}; + +/** + * atmel_ecdh_ctx - transformation context + * @client : pointer to i2c client device + * @fallback : used for unsupported curves or when user wants to use its own + * private key. + * @public_key : generated when calling set_secret(). It's the responsibility + * of the user to not call set_secret() while + * generate_public_key() or compute_shared_secret() are in flight. + * @curve_id : elliptic curve id + * @n_sz : size in bytes of the n prime + * @do_fallback: true when the device doesn't support the curve or when the user + * wants to use its own private key. + */ +struct atmel_ecdh_ctx { + struct i2c_client *client; + struct crypto_kpp *fallback; + const u8 *public_key; + unsigned int curve_id; + size_t n_sz; + bool do_fallback; +}; + +/** + * atmel_ecc_work_data - data structure representing the work + * @ctx : transformation context. + * @cbk : pointer to a callback function to be invoked upon completion of this + * request. This has the form: + * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status) + * where: + * @work_data: data structure representing the work + * @areq : optional pointer to an argument passed with the original + * request. + * @status : status returned from the i2c client device or i2c error. + * @areq: optional pointer to a user argument for use at callback time. + * @work: describes the task to be executed. + * @cmd : structure used for communicating with the device. + */ +struct atmel_ecc_work_data { + struct atmel_ecdh_ctx *ctx; + void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq, + int status); + void *areq; + struct work_struct work; + struct atmel_ecc_cmd cmd; +}; + +static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len) +{ + return cpu_to_le16(bitrev16(crc16(crc, buffer, len))); +} + +/** + * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC. + * CRC16 verification of the count, opcode, param1, param2 and data bytes. + * The checksum is saved in little-endian format in the least significant + * two bytes of the command. CRC polynomial is 0x8005 and the initial register + * value should be zero. + * + * @cmd : structure used for communicating with the device. + */ +static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd) +{ + u8 *data = &cmd->count; + size_t len = cmd->count - CRC_SIZE; + u16 *crc16 = (u16 *)(data + len); + + *crc16 = atmel_ecc_crc16(0, data, len); +} + +static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd) +{ + cmd->word_addr = COMMAND; + cmd->opcode = OPCODE_READ; + /* + * Read the word from Configuration zone that contains the lock bytes + * (UserExtra, Selector, LockValue, LockConfig). + */ + cmd->param1 = CONFIG_ZONE; + cmd->param2 = DEVICE_LOCK_ADDR; + cmd->count = READ_COUNT; + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_READ; + cmd->rxsize = READ_RSP_SIZE; +} + +static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid) +{ + cmd->word_addr = COMMAND; + cmd->count = GENKEY_COUNT; + cmd->opcode = OPCODE_GENKEY; + cmd->param1 = GENKEY_MODE_PRIVATE; + /* a random private key will be generated and stored in slot keyID */ + cmd->param2 = cpu_to_le16(keyid); + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_GENKEY; + cmd->rxsize = GENKEY_RSP_SIZE; +} + +static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd, + struct scatterlist *pubkey) +{ + size_t copied; + + cmd->word_addr = COMMAND; + cmd->count = ECDH_COUNT; + cmd->opcode = OPCODE_ECDH; + cmd->param1 = ECDH_PREFIX_MODE; + /* private key slot */ + cmd->param2 = cpu_to_le16(DATA_SLOT_2); + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE); + if (copied != ATMEL_ECC_PUBKEY_SIZE) + return -EINVAL; + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_ECDH; + cmd->rxsize = ECDH_RSP_SIZE; + + return 0; +} + +/* + * After wake and after execution of a command, there will be error, status, or + * result bytes in the device's output register that can be retrieved by the + * system. When the length of that group is four bytes, the codes returned are + * detailed in error_list. + */ +static int atmel_ecc_status(struct device *dev, u8 *status) +{ + size_t err_list_len = ARRAY_SIZE(error_list); + int i; + u8 err_id = status[1]; + + if (*status != STATUS_SIZE) + return 0; + + if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) + return 0; + + for (i = 0; i < err_list_len; i++) + if (error_list[i].value == err_id) + break; + + /* if err_id is not in the error_list then ignore it */ + if (i != err_list_len) { + dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); + return err_id; + } + + return 0; +} + +static int atmel_ecc_wakeup(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + u8 status[STATUS_RSP_SIZE]; + int ret; + + /* + * The device ignores any levels or transitions on the SCL pin when the + * device is idle, asleep or during waking up. Don't check for error + * when waking up the device. + */ + i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); + + /* + * Wait to wake the device. Typical execution times for ecdh and genkey + * are around tens of milliseconds. Delta is chosen to 50 microseconds. + */ + usleep_range(TWHI_MIN, TWHI_MAX); + + ret = i2c_master_recv(client, status, STATUS_SIZE); + if (ret < 0) + return ret; + + return atmel_ecc_status(&client->dev, status); +} + +static int atmel_ecc_sleep(struct i2c_client *client) +{ + u8 sleep = SLEEP_TOKEN; + + return i2c_master_send(client, &sleep, 1); +} + +static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq, + int status) +{ + struct kpp_request *req = areq; + struct atmel_ecdh_ctx *ctx = work_data->ctx; + struct atmel_ecc_cmd *cmd = &work_data->cmd; + size_t copied; + size_t n_sz = ctx->n_sz; + + if (status) + goto free_work_data; + + /* copy the shared secret */ + copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX], + n_sz); + if (copied != n_sz) + status = -EINVAL; + + /* fall through */ +free_work_data: + kzfree(work_data); + kpp_request_complete(req, status); +} + +/* + * atmel_ecc_send_receive() - send a command to the device and receive its + * response. + * @client: i2c client device + * @cmd : structure used to communicate with the device + * + * After the device receives a Wake token, a watchdog counter starts within the + * device. After the watchdog timer expires, the device enters sleep mode + * regardless of whether some I/O transmission or command execution is in + * progress. If a command is attempted when insufficient time remains prior to + * watchdog timer execution, the device will return the watchdog timeout error + * code without attempting to execute the command. There is no way to reset the + * counter other than to put the device into sleep or idle mode and then + * wake it up again. + */ +static int atmel_ecc_send_receive(struct i2c_client *client, + struct atmel_ecc_cmd *cmd) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + int ret; + + mutex_lock(&i2c_priv->lock); + + ret = atmel_ecc_wakeup(client); + if (ret) + goto err; + + /* send the command */ + ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); + if (ret < 0) + goto err; + + /* delay the appropriate amount of time for command to execute */ + msleep(cmd->msecs); + + /* receive the response */ + ret = i2c_master_recv(client, cmd->data, cmd->rxsize); + if (ret < 0) + goto err; + + /* put the device into low-power mode */ + ret = atmel_ecc_sleep(client); + if (ret < 0) + goto err; + + mutex_unlock(&i2c_priv->lock); + return atmel_ecc_status(&client->dev, cmd->data); +err: + mutex_unlock(&i2c_priv->lock); + return ret; +} + +static void atmel_ecc_work_handler(struct work_struct *work) +{ + struct atmel_ecc_work_data *work_data = + container_of(work, struct atmel_ecc_work_data, work); + struct atmel_ecc_cmd *cmd = &work_data->cmd; + struct i2c_client *client = work_data->ctx->client; + int status; + + status = atmel_ecc_send_receive(client, cmd); + work_data->cbk(work_data, work_data->areq, status); +} + +static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data, + void (*cbk)(struct atmel_ecc_work_data *work_data, + void *areq, int status), + void *areq) +{ + work_data->cbk = (void *)cbk; + work_data->areq = areq; + + INIT_WORK(&work_data->work, atmel_ecc_work_handler); + schedule_work(&work_data->work); +} + +static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) +{ + if (curve_id == ECC_CURVE_NIST_P256) + return ATMEL_ECC_NIST_P256_N_SIZE; + + return 0; +} + +/* + * A random private key is generated and stored in the device. The device + * returns the pair public key. + */ +static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + struct atmel_ecc_cmd *cmd; + void *public_key; + struct ecdh params; + int ret = -ENOMEM; + + /* free the old public key, if any */ + kfree(ctx->public_key); + /* make sure you don't free the old public key twice */ + ctx->public_key = NULL; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { + dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n"); + return -EINVAL; + } + + ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); + if (!ctx->n_sz || params.key_size) { + /* fallback to ecdh software implementation */ + ctx->do_fallback = true; + return crypto_kpp_set_secret(ctx->fallback, buf, len); + } + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL); + if (!public_key) + goto free_cmd; + + ctx->do_fallback = false; + ctx->curve_id = params.curve_id; + + atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2); + + ret = atmel_ecc_send_receive(ctx->client, cmd); + if (ret) + goto free_public_key; + + /* save the public key */ + memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE); + ctx->public_key = public_key; + + kfree(cmd); + return 0; + +free_public_key: + kfree(public_key); +free_cmd: + kfree(cmd); + return ret; +} + +static int atmel_ecdh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + size_t copied; + int ret = 0; + + if (ctx->do_fallback) { + kpp_request_set_tfm(req, ctx->fallback); + return crypto_kpp_generate_public_key(req); + } + + /* public key was saved at private key generation */ + copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key, + ATMEL_ECC_PUBKEY_SIZE); + if (copied != ATMEL_ECC_PUBKEY_SIZE) + ret = -EINVAL; + + return ret; +} + +static int atmel_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + struct atmel_ecc_work_data *work_data; + gfp_t gfp; + int ret; + + if (ctx->do_fallback) { + kpp_request_set_tfm(req, ctx->fallback); + return crypto_kpp_compute_shared_secret(req); + } + + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : + GFP_ATOMIC; + + work_data = kmalloc(sizeof(*work_data), gfp); + if (!work_data) + return -ENOMEM; + + work_data->ctx = ctx; + + ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src); + if (ret) + goto free_work_data; + + atmel_ecc_enqueue(work_data, atmel_ecdh_done, req); + + return -EINPROGRESS; + +free_work_data: + kfree(work_data); + return ret; +} + +static struct i2c_client *atmel_ecc_i2c_client_alloc(void) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; + struct i2c_client *client = ERR_PTR(-ENODEV); + int min_tfm_cnt = INT_MAX; + int tfm_cnt; + + spin_lock(&driver_data.i2c_list_lock); + + if (list_empty(&driver_data.i2c_client_list)) { + spin_unlock(&driver_data.i2c_list_lock); + return ERR_PTR(-ENODEV); + } + + list_for_each_entry(i2c_priv, &driver_data.i2c_client_list, + i2c_client_list_node) { + tfm_cnt = atomic_read(&i2c_priv->tfm_count); + if (tfm_cnt < min_tfm_cnt) { + min_tfm_cnt = tfm_cnt; + min_i2c_priv = i2c_priv; + } + if (!min_tfm_cnt) + break; + } + + if (min_i2c_priv) { + atomic_inc(&min_i2c_priv->tfm_count); + client = min_i2c_priv->client; + } + + spin_unlock(&driver_data.i2c_list_lock); + + return client; +} + +static void atmel_ecc_i2c_client_free(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + + atomic_dec(&i2c_priv->tfm_count); +} + +static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) +{ + const char *alg = kpp_alg_name(tfm); + struct crypto_kpp *fallback; + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->client = atmel_ecc_i2c_client_alloc(); + if (IS_ERR(ctx->client)) { + pr_err("tfm - i2c_client binding failed\n"); + return PTR_ERR(ctx->client); + } + + fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm)); + + dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback))); + + ctx->fallback = fallback; + + return 0; +} + +static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + kfree(ctx->public_key); + crypto_free_kpp(ctx->fallback); + atmel_ecc_i2c_client_free(ctx->client); +} + +static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->fallback); + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + return ATMEL_ECC_PUBKEY_SIZE; +} + +static struct kpp_alg atmel_ecdh = { + .set_secret = atmel_ecdh_set_secret, + .generate_public_key = atmel_ecdh_generate_public_key, + .compute_shared_secret = atmel_ecdh_compute_shared_secret, + .init = atmel_ecdh_init_tfm, + .exit = atmel_ecdh_exit_tfm, + .max_size = atmel_ecdh_max_size, + .base = { + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_name = "ecdh", + .cra_driver_name = "atmel-ecdh", + .cra_priority = ATMEL_ECC_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct atmel_ecdh_ctx), + }, +}; + +static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate) +{ + u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); + + /* return the size of the wake_token in bytes */ + return DIV_ROUND_UP(no_of_bits, 8); +} + +static int device_sanity_check(struct i2c_client *client) +{ + struct atmel_ecc_cmd *cmd; + int ret; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + atmel_ecc_init_read_cmd(cmd); + + ret = atmel_ecc_send_receive(client, cmd); + if (ret) + goto free_cmd; + + /* + * It is vital that the Configuration, Data and OTP zones be locked + * prior to release into the field of the system containing the device. + * Failure to lock these zones may permit modification of any secret + * keys and may lead to other security problems. + */ + if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { + dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); + ret = -ENOTSUPP; + } + + /* fall through */ +free_cmd: + kfree(cmd); + return ret; +} + +static int atmel_ecc_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv; + struct device *dev = &client->dev; + int ret; + u32 bus_clk_rate; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "I2C_FUNC_I2C not supported\n"); + return -ENODEV; + } + + ret = of_property_read_u32(client->adapter->dev.of_node, + "clock-frequency", &bus_clk_rate); + if (ret) { + dev_err(dev, "of: failed to read clock-frequency property\n"); + return ret; + } + + if (bus_clk_rate > 1000000L) { + dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", + bus_clk_rate); + return -EINVAL; + } + + i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); + if (!i2c_priv) + return -ENOMEM; + + i2c_priv->client = client; + mutex_init(&i2c_priv->lock); + + /* + * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - + * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz + * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. + */ + i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate); + + memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); + + atomic_set(&i2c_priv->tfm_count, 0); + + i2c_set_clientdata(client, i2c_priv); + + ret = device_sanity_check(client); + if (ret) + return ret; + + spin_lock(&driver_data.i2c_list_lock); + list_add_tail(&i2c_priv->i2c_client_list_node, + &driver_data.i2c_client_list); + spin_unlock(&driver_data.i2c_list_lock); + + ret = crypto_register_kpp(&atmel_ecdh); + if (ret) { + spin_lock(&driver_data.i2c_list_lock); + list_del(&i2c_priv->i2c_client_list_node); + spin_unlock(&driver_data.i2c_list_lock); + + dev_err(dev, "%s alg registration failed\n", + atmel_ecdh.base.cra_driver_name); + } else { + dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n"); + } + + return ret; +} + +static int atmel_ecc_remove(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + + /* Return EBUSY if i2c client already allocated. */ + if (atomic_read(&i2c_priv->tfm_count)) { + dev_err(&client->dev, "Device is busy\n"); + return -EBUSY; + } + + crypto_unregister_kpp(&atmel_ecdh); + + spin_lock(&driver_data.i2c_list_lock); + list_del(&i2c_priv->i2c_client_list_node); + spin_unlock(&driver_data.i2c_list_lock); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id atmel_ecc_dt_ids[] = { + { + .compatible = "atmel,atecc508a", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids); +#endif + +static const struct i2c_device_id atmel_ecc_id[] = { + { "atecc508a", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, atmel_ecc_id); + +static struct i2c_driver atmel_ecc_driver = { + .driver = { + .name = "atmel-ecc", + .of_match_table = of_match_ptr(atmel_ecc_dt_ids), + }, + .probe = atmel_ecc_probe, + .remove = atmel_ecc_remove, + .id_table = atmel_ecc_id, +}; + +static int __init atmel_ecc_init(void) +{ + spin_lock_init(&driver_data.i2c_list_lock); + INIT_LIST_HEAD(&driver_data.i2c_client_list); + return i2c_add_driver(&atmel_ecc_driver); +} + +static void __exit atmel_ecc_exit(void) +{ + flush_scheduled_work(); + i2c_del_driver(&atmel_ecc_driver); +} + +module_init(atmel_ecc_init); +module_exit(atmel_ecc_exit); + +MODULE_AUTHOR("Tudor Ambarus "); +MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h new file mode 100644 index 000000000000..25232c8abcc2 --- /dev/null +++ b/drivers/crypto/atmel-ecc.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017, Microchip Technology Inc. + * Author: Tudor Ambarus + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef __ATMEL_ECC_H__ +#define __ATMEL_ECC_H__ + +#define ATMEL_ECC_PRIORITY 300 + +#define COMMAND 0x03 /* packet function */ +#define SLEEP_TOKEN 0x01 +#define WAKE_TOKEN_MAX_SIZE 8 + +/* Definitions of Data and Command sizes */ +#define WORD_ADDR_SIZE 1 +#define COUNT_SIZE 1 +#define CRC_SIZE 2 +#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) + +/* size in bytes of the n prime */ +#define ATMEL_ECC_NIST_P256_N_SIZE 32 +#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) + +#define STATUS_RSP_SIZE 4 +#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) +#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ + CMD_OVERHEAD_SIZE) +#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) +#define MAX_RSP_SIZE GENKEY_RSP_SIZE + +/** + * atmel_ecc_cmd - structure used for communicating with the device. + * @word_addr: indicates the function of the packet sent to the device. This + * byte should have a value of COMMAND for normal operation. + * @count : number of bytes to be transferred to (or from) the device. + * @opcode : the command code. + * @param1 : the first parameter; always present. + * @param2 : the second parameter; always present. + * @data : optional remaining input data. Includes a 2-byte CRC. + * @rxsize : size of the data received from i2c client. + * @msecs : command execution time in milliseconds + */ +struct atmel_ecc_cmd { + u8 word_addr; + u8 count; + u8 opcode; + u8 param1; + u16 param2; + u8 data[MAX_RSP_SIZE]; + u8 msecs; + u16 rxsize; +} __packed; + +/* Status/Error codes */ +#define STATUS_SIZE 0x04 +#define STATUS_NOERR 0x00 +#define STATUS_WAKE_SUCCESSFUL 0x11 + +static const struct { + u8 value; + const char *error_text; +} error_list[] = { + { 0x01, "CheckMac or Verify miscompare" }, + { 0x03, "Parse Error" }, + { 0x05, "ECC Fault" }, + { 0x0F, "Execution Error" }, + { 0xEE, "Watchdog about to expire" }, + { 0xFF, "CRC or other communication error" }, +}; + +/* Definitions for eeprom organization */ +#define CONFIG_ZONE 0 + +/* Definitions for Indexes common to all commands */ +#define RSP_DATA_IDX 1 /* buffer index of data in response */ +#define DATA_SLOT_2 2 /* used for ECDH private key */ + +/* Definitions for the device lock state */ +#define DEVICE_LOCK_ADDR 0x15 +#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) +#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) + +/* + * Wake High delay to data communication (microseconds). SDA should be stable + * high for this entire duration. + */ +#define TWHI_MIN 1500 +#define TWHI_MAX 1550 + +/* Wake Low duration */ +#define TWLO_USEC 60 + +/* Command execution time (milliseconds) */ +#define MAX_EXEC_TIME_ECDH 58 +#define MAX_EXEC_TIME_GENKEY 115 +#define MAX_EXEC_TIME_READ 1 + +/* Command opcode */ +#define OPCODE_ECDH 0x43 +#define OPCODE_GENKEY 0x40 +#define OPCODE_READ 0x02 + +/* Definitions for the READ Command */ +#define READ_COUNT 7 + +/* Definitions for the GenKey Command */ +#define GENKEY_COUNT 7 +#define GENKEY_MODE_PRIVATE 0x04 + +/* Definitions for the ECDH Command */ +#define ECDH_COUNT 71 +#define ECDH_PREFIX_MODE 0x00 + +#endif /* __ATMEL_ECC_H__ */ diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index dad4e5bad827..3e2f41b3eaf3 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2883,7 +2883,7 @@ static int atmel_sha_probe(struct platform_device *pdev) static int atmel_sha_remove(struct platform_device *pdev) { - static struct atmel_sha_dev *sha_dd; + struct atmel_sha_dev *sha_dd; sha_dd = platform_get_drvdata(pdev); if (!sha_dd) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index b25f1b3c981f..f4b335dda568 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1487,7 +1487,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) static int atmel_tdes_remove(struct platform_device *pdev) { - static struct atmel_tdes_dev *tdes_dd; + struct atmel_tdes_dev *tdes_dd; tdes_dd = platform_get_drvdata(pdev); if (!tdes_dd) diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile new file mode 100644 index 000000000000..be9a84a4b667 --- /dev/null +++ b/drivers/crypto/axis/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c new file mode 100644 index 000000000000..d9fbbf01062b --- /dev/null +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -0,0 +1,3192 @@ +/* + * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api. + * + * Copyright (C) 2014-2017 Axis Communications AB + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* Max length of a line in all cache levels for Artpec SoCs. */ +#define ARTPEC_CACHE_LINE_MAX 32 + +#define PDMA_OUT_CFG 0x0000 +#define PDMA_OUT_BUF_CFG 0x0004 +#define PDMA_OUT_CMD 0x0008 +#define PDMA_OUT_DESCRQ_PUSH 0x0010 +#define PDMA_OUT_DESCRQ_STAT 0x0014 + +#define A6_PDMA_IN_CFG 0x0028 +#define A6_PDMA_IN_BUF_CFG 0x002c +#define A6_PDMA_IN_CMD 0x0030 +#define A6_PDMA_IN_STATQ_PUSH 0x0038 +#define A6_PDMA_IN_DESCRQ_PUSH 0x0044 +#define A6_PDMA_IN_DESCRQ_STAT 0x0048 +#define A6_PDMA_INTR_MASK 0x0068 +#define A6_PDMA_ACK_INTR 0x006c +#define A6_PDMA_MASKED_INTR 0x0074 + +#define A7_PDMA_IN_CFG 0x002c +#define A7_PDMA_IN_BUF_CFG 0x0030 +#define A7_PDMA_IN_CMD 0x0034 +#define A7_PDMA_IN_STATQ_PUSH 0x003c +#define A7_PDMA_IN_DESCRQ_PUSH 0x0048 +#define A7_PDMA_IN_DESCRQ_STAT 0x004C +#define A7_PDMA_INTR_MASK 0x006c +#define A7_PDMA_ACK_INTR 0x0070 +#define A7_PDMA_MASKED_INTR 0x0078 + +#define PDMA_OUT_CFG_EN BIT(0) + +#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) +#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) + +#define PDMA_OUT_CMD_START BIT(0) +#define A6_PDMA_OUT_CMD_STOP BIT(3) +#define A7_PDMA_OUT_CMD_STOP BIT(2) + +#define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0) +#define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6) + +#define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0) +#define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4) + +#define PDMA_IN_CFG_EN BIT(0) + +#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) +#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) +#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10) + +#define PDMA_IN_CMD_START BIT(0) +#define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2) +#define A6_PDMA_IN_CMD_STOP BIT(3) +#define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1) +#define A7_PDMA_IN_CMD_STOP BIT(2) + +#define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0) +#define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6) + +#define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0) +#define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6) + +#define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0) +#define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4) + +#define A6_PDMA_INTR_MASK_IN_DATA BIT(2) +#define A6_PDMA_INTR_MASK_IN_EOP BIT(3) +#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4) + +#define A7_PDMA_INTR_MASK_IN_DATA BIT(3) +#define A7_PDMA_INTR_MASK_IN_EOP BIT(4) +#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5) + +#define A6_CRY_MD_OPER GENMASK(19, 16) + +#define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20) +#define A6_CRY_MD_HASH_HMAC_FIN BIT(23) + +#define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20) +#define A6_CRY_MD_CIPHER_DECR BIT(22) +#define A6_CRY_MD_CIPHER_TWEAK BIT(23) +#define A6_CRY_MD_CIPHER_DSEQ BIT(24) + +#define A7_CRY_MD_OPER GENMASK(11, 8) + +#define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12) +#define A7_CRY_MD_HASH_HMAC_FIN BIT(15) + +#define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12) +#define A7_CRY_MD_CIPHER_DECR BIT(14) +#define A7_CRY_MD_CIPHER_TWEAK BIT(15) +#define A7_CRY_MD_CIPHER_DSEQ BIT(16) + +/* DMA metadata constants */ +#define regk_crypto_aes_cbc 0x00000002 +#define regk_crypto_aes_ctr 0x00000003 +#define regk_crypto_aes_ecb 0x00000001 +#define regk_crypto_aes_gcm 0x00000004 +#define regk_crypto_aes_xts 0x00000005 +#define regk_crypto_cache 0x00000002 +#define a6_regk_crypto_dlkey 0x0000000a +#define a7_regk_crypto_dlkey 0x0000000e +#define regk_crypto_ext 0x00000001 +#define regk_crypto_hmac_sha1 0x00000007 +#define regk_crypto_hmac_sha256 0x00000009 +#define regk_crypto_hmac_sha384 0x0000000b +#define regk_crypto_hmac_sha512 0x0000000d +#define regk_crypto_init 0x00000000 +#define regk_crypto_key_128 0x00000000 +#define regk_crypto_key_192 0x00000001 +#define regk_crypto_key_256 0x00000002 +#define regk_crypto_null 0x00000000 +#define regk_crypto_sha1 0x00000006 +#define regk_crypto_sha256 0x00000008 +#define regk_crypto_sha384 0x0000000a +#define regk_crypto_sha512 0x0000000c + +/* DMA descriptor structures */ +struct pdma_descr_ctrl { + unsigned char short_descr : 1; + unsigned char pad1 : 1; + unsigned char eop : 1; + unsigned char intr : 1; + unsigned char short_len : 3; + unsigned char pad2 : 1; +} __packed; + +struct pdma_data_descr { + unsigned int len : 24; + unsigned int buf : 32; +} __packed; + +struct pdma_short_descr { + unsigned char data[7]; +} __packed; + +struct pdma_descr { + struct pdma_descr_ctrl ctrl; + union { + struct pdma_data_descr data; + struct pdma_short_descr shrt; + }; +}; + +struct pdma_stat_descr { + unsigned char pad1 : 1; + unsigned char pad2 : 1; + unsigned char eop : 1; + unsigned char pad3 : 5; + unsigned int len : 24; +}; + +/* Each descriptor array can hold max 64 entries */ +#define PDMA_DESCR_COUNT 64 + +#define MODULE_NAME "Artpec-6 CA" + +/* Hash modes (including HMAC variants) */ +#define ARTPEC6_CRYPTO_HASH_SHA1 1 +#define ARTPEC6_CRYPTO_HASH_SHA256 2 +#define ARTPEC6_CRYPTO_HASH_SHA384 3 +#define ARTPEC6_CRYPTO_HASH_SHA512 4 + +/* Crypto modes */ +#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 +#define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2 +#define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3 +#define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5 + +/* The PDMA is a DMA-engine tightly coupled with a ciphering engine. + * It operates on a descriptor array with up to 64 descriptor entries. + * The arrays must be 64 byte aligned in memory. + * + * The ciphering unit has no registers and is completely controlled by + * a 4-byte metadata that is inserted at the beginning of each dma packet. + * + * A dma packet is a sequence of descriptors terminated by setting the .eop + * field in the final descriptor of the packet. + * + * Multiple packets are used for providing context data, key data and + * the plain/ciphertext. + * + * PDMA Descriptors (Array) + * +------+------+------+~~+-------+------+---- + * | 0 | 1 | 2 |~~| 11 EOP| 12 | .... + * +--+---+--+---+----+-+~~+-------+----+-+---- + * | | | | | + * | | | | | + * __|__ +-------++-------++-------+ +----+ + * | MD | |Payload||Payload||Payload| | MD | + * +-----+ +-------++-------++-------+ +----+ + */ + +struct artpec6_crypto_bounce_buffer { + struct list_head list; + size_t length; + struct scatterlist *sg; + size_t offset; + /* buf is aligned to ARTPEC_CACHE_LINE_MAX and + * holds up to ARTPEC_CACHE_LINE_MAX bytes data. + */ + void *buf; +}; + +struct artpec6_crypto_dma_map { + dma_addr_t dma_addr; + size_t size; + enum dma_data_direction dir; +}; + +struct artpec6_crypto_dma_descriptors { + struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64); + struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64); + u32 stat[PDMA_DESCR_COUNT] __aligned(64); + struct list_head bounce_buffers; + /* Enough maps for all out/in buffers, and all three descr. arrays */ + struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2]; + dma_addr_t out_dma_addr; + dma_addr_t in_dma_addr; + dma_addr_t stat_dma_addr; + size_t out_cnt; + size_t in_cnt; + size_t map_count; +}; + +enum artpec6_crypto_variant { + ARTPEC6_CRYPTO, + ARTPEC7_CRYPTO, +}; + +struct artpec6_crypto { + void __iomem *base; + spinlock_t queue_lock; + struct list_head queue; /* waiting for pdma fifo space */ + struct list_head pending; /* submitted to pdma fifo */ + struct tasklet_struct task; + struct kmem_cache *dma_cache; + int pending_count; + struct timer_list timer; + enum artpec6_crypto_variant variant; + void *pad_buffer; /* cache-aligned block padding buffer */ + void *zero_buffer; +}; + +enum artpec6_crypto_hash_flags { + HASH_FLAG_INIT_CTX = 2, + HASH_FLAG_UPDATE = 4, + HASH_FLAG_FINALIZE = 8, + HASH_FLAG_HMAC = 16, + HASH_FLAG_UPDATE_KEY = 32, +}; + +struct artpec6_crypto_req_common { + struct list_head list; + struct artpec6_crypto_dma_descriptors *dma; + struct crypto_async_request *req; + void (*complete)(struct crypto_async_request *req); + gfp_t gfp_flags; +}; + +struct artpec6_hash_request_context { + char partial_buffer[SHA512_BLOCK_SIZE]; + char partial_buffer_out[SHA512_BLOCK_SIZE]; + char key_buffer[SHA512_BLOCK_SIZE]; + char pad_buffer[SHA512_BLOCK_SIZE + 32]; + unsigned char digeststate[SHA512_DIGEST_SIZE]; + size_t partial_bytes; + u64 digcnt; + u32 key_md; + u32 hash_md; + enum artpec6_crypto_hash_flags hash_flags; + struct artpec6_crypto_req_common common; +}; + +struct artpec6_hash_export_state { + char partial_buffer[SHA512_BLOCK_SIZE]; + unsigned char digeststate[SHA512_DIGEST_SIZE]; + size_t partial_bytes; + u64 digcnt; + int oper; + unsigned int hash_flags; +}; + +struct artpec6_hashalg_context { + char hmac_key[SHA512_BLOCK_SIZE]; + size_t hmac_key_length; + struct crypto_shash *child_hash; +}; + +struct artpec6_crypto_request_context { + u32 cipher_md; + bool decrypt; + struct artpec6_crypto_req_common common; +}; + +struct artpec6_cryptotfm_context { + unsigned char aes_key[2*AES_MAX_KEY_SIZE]; + size_t key_length; + u32 key_md; + int crypto_type; + struct crypto_skcipher *fallback; +}; + +struct artpec6_crypto_aead_hw_ctx { + __be64 aad_length_bits; + __be64 text_length_bits; + __u8 J0[AES_BLOCK_SIZE]; +}; + +struct artpec6_crypto_aead_req_ctx { + struct artpec6_crypto_aead_hw_ctx hw_ctx; + u32 cipher_md; + bool decrypt; + struct artpec6_crypto_req_common common; + __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned; +}; + +/* The crypto framework makes it hard to avoid this global. */ +static struct device *artpec6_crypto_dev; + +static struct dentry *dbgfs_root; + +#ifdef CONFIG_FAULT_INJECTION +static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); +static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); +#endif + +enum { + ARTPEC6_CRYPTO_PREPARE_HASH_NO_START, + ARTPEC6_CRYPTO_PREPARE_HASH_START, +}; + +static int artpec6_crypto_prepare_aead(struct aead_request *areq); +static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq); +static int artpec6_crypto_prepare_hash(struct ahash_request *areq); + +static void +artpec6_crypto_complete_crypto(struct crypto_async_request *req); +static void +artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req); +static void +artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req); +static void +artpec6_crypto_complete_aead(struct crypto_async_request *req); +static void +artpec6_crypto_complete_hash(struct crypto_async_request *req); + +static int +artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common); + +static void +artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common); + +struct artpec6_crypto_walk { + struct scatterlist *sg; + size_t offset; +}; + +static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk, + struct scatterlist *sg) +{ + awalk->sg = sg; + awalk->offset = 0; +} + +static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk, + size_t nbytes) +{ + while (nbytes && awalk->sg) { + size_t piece; + + WARN_ON(awalk->offset > awalk->sg->length); + + piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset); + nbytes -= piece; + awalk->offset += piece; + if (awalk->offset == awalk->sg->length) { + awalk->sg = sg_next(awalk->sg); + awalk->offset = 0; + } + + } + + return nbytes; +} + +static size_t +artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk) +{ + WARN_ON(awalk->sg->length == awalk->offset); + + return awalk->sg->length - awalk->offset; +} + +static dma_addr_t +artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk) +{ + return sg_phys(awalk->sg) + awalk->offset; +} + +static void +artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct artpec6_crypto_bounce_buffer *b; + struct artpec6_crypto_bounce_buffer *next; + + list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { + pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n", + b, b->length, b->offset, b->buf); + sg_pcopy_from_buffer(b->sg, + 1, + b->buf, + b->length, + b->offset); + + list_del(&b->list); + kfree(b); + } +} + +static inline bool artpec6_crypto_busy(void) +{ + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + int fifo_count = ac->pending_count; + + return fifo_count > 6; +} + +static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req) +{ + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + int ret = -EBUSY; + + spin_lock_bh(&ac->queue_lock); + + if (!artpec6_crypto_busy()) { + list_add_tail(&req->list, &ac->pending); + artpec6_crypto_start_dma(req); + ret = -EINPROGRESS; + } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { + list_add_tail(&req->list, &ac->queue); + } else { + artpec6_crypto_common_destroy(req); + } + + spin_unlock_bh(&ac->queue_lock); + + return ret; +} + +static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + void __iomem *base = ac->base; + struct artpec6_crypto_dma_descriptors *dma = common->dma; + u32 ind, statd, outd; + + /* Make descriptor content visible to the DMA before starting it. */ + wmb(); + + ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) | + FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6); + + statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) | + FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6); + + outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) | + FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6); + + if (variant == ARTPEC6_CRYPTO) { + writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH); + writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH); + writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD); + } else { + writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH); + writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH); + writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD); + } + + writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH); + writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD); + + ac->pending_count++; +} + +static void +artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + + dma->out_cnt = 0; + dma->in_cnt = 0; + dma->map_count = 0; + INIT_LIST_HEAD(&dma->bounce_buffers); +} + +static bool fault_inject_dma_descr(void) +{ +#ifdef CONFIG_FAULT_INJECTION + return should_fail(&artpec6_crypto_fail_dma_array_full, 1); +#else + return false; +#endif +} + +/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a + * physical address + * + * @addr: The physical address of the data buffer + * @len: The length of the data buffer + * @eop: True if this is the last buffer in the packet + * + * @return 0 on success or -ENOSPC if there are no more descriptors available + */ +static int +artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common, + dma_addr_t addr, size_t len, bool eop) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct pdma_descr *d; + + if (dma->out_cnt >= PDMA_DESCR_COUNT || + fault_inject_dma_descr()) { + pr_err("No free OUT DMA descriptors available!\n"); + return -ENOSPC; + } + + d = &dma->out[dma->out_cnt++]; + memset(d, 0, sizeof(*d)); + + d->ctrl.short_descr = 0; + d->ctrl.eop = eop; + d->data.len = len; + d->data.buf = addr; + return 0; +} + +/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor + * + * @dst: The virtual address of the data + * @len: The length of the data, must be between 1 to 7 bytes + * @eop: True if this is the last buffer in the packet + * + * @return 0 on success + * -ENOSPC if no more descriptors are available + * -EINVAL if the data length exceeds 7 bytes + */ +static int +artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common, + void *dst, unsigned int len, bool eop) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct pdma_descr *d; + + if (dma->out_cnt >= PDMA_DESCR_COUNT || + fault_inject_dma_descr()) { + pr_err("No free OUT DMA descriptors available!\n"); + return -ENOSPC; + } else if (len > 7 || len < 1) { + return -EINVAL; + } + d = &dma->out[dma->out_cnt++]; + memset(d, 0, sizeof(*d)); + + d->ctrl.short_descr = 1; + d->ctrl.short_len = len; + d->ctrl.eop = eop; + memcpy(d->shrt.data, dst, len); + return 0; +} + +static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common, + struct page *page, size_t offset, + size_t size, + enum dma_data_direction dir, + dma_addr_t *dma_addr_out) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct device *dev = artpec6_crypto_dev; + struct artpec6_crypto_dma_map *map; + dma_addr_t dma_addr; + + *dma_addr_out = 0; + + if (dma->map_count >= ARRAY_SIZE(dma->maps)) + return -ENOMEM; + + dma_addr = dma_map_page(dev, page, offset, size, dir); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + + map = &dma->maps[dma->map_count++]; + map->size = size; + map->dma_addr = dma_addr; + map->dir = dir; + + *dma_addr_out = dma_addr; + + return 0; +} + +static int +artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common, + void *ptr, size_t size, + enum dma_data_direction dir, + dma_addr_t *dma_addr_out) +{ + struct page *page = virt_to_page(ptr); + size_t offset = (uintptr_t)ptr & ~PAGE_MASK; + + return artpec6_crypto_dma_map_page(common, page, offset, size, dir, + dma_addr_out); +} + +static int +artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + int ret; + + ret = artpec6_crypto_dma_map_single(common, dma->in, + sizeof(dma->in[0]) * dma->in_cnt, + DMA_TO_DEVICE, &dma->in_dma_addr); + if (ret) + return ret; + + ret = artpec6_crypto_dma_map_single(common, dma->out, + sizeof(dma->out[0]) * dma->out_cnt, + DMA_TO_DEVICE, &dma->out_dma_addr); + if (ret) + return ret; + + /* We only read one stat descriptor */ + dma->stat[dma->in_cnt - 1] = 0; + + /* + * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor + * to be written. + */ + return artpec6_crypto_dma_map_single(common, + dma->stat + dma->in_cnt - 1, + sizeof(dma->stat[0]), + DMA_BIDIRECTIONAL, + &dma->stat_dma_addr); +} + +static void +artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct device *dev = artpec6_crypto_dev; + int i; + + for (i = 0; i < dma->map_count; i++) { + struct artpec6_crypto_dma_map *map = &dma->maps[i]; + + dma_unmap_page(dev, map->dma_addr, map->size, map->dir); + } + + dma->map_count = 0; +} + +/** artpec6_crypto_setup_out_descr - Setup an out descriptor + * + * @dst: The virtual address of the data + * @len: The length of the data + * @eop: True if this is the last buffer in the packet + * @use_short: If this is true and the data length is 7 bytes or less then + * a short descriptor will be used + * + * @return 0 on success + * Any errors from artpec6_crypto_setup_out_descr_short() or + * setup_out_descr_phys() + */ +static int +artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common, + void *dst, unsigned int len, bool eop, + bool use_short) +{ + if (use_short && len < 7) { + return artpec6_crypto_setup_out_descr_short(common, dst, len, + eop); + } else { + int ret; + dma_addr_t dma_addr; + + ret = artpec6_crypto_dma_map_single(common, dst, len, + DMA_TO_DEVICE, + &dma_addr); + if (ret) + return ret; + + return artpec6_crypto_setup_out_descr_phys(common, dma_addr, + len, eop); + } +} + +/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a + * physical address + * + * @addr: The physical address of the data buffer + * @len: The length of the data buffer + * @intr: True if an interrupt should be fired after HW processing of this + * descriptor + * + */ +static int +artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common, + dma_addr_t addr, unsigned int len, bool intr) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct pdma_descr *d; + + if (dma->in_cnt >= PDMA_DESCR_COUNT || + fault_inject_dma_descr()) { + pr_err("No free IN DMA descriptors available!\n"); + return -ENOSPC; + } + d = &dma->in[dma->in_cnt++]; + memset(d, 0, sizeof(*d)); + + d->ctrl.intr = intr; + d->data.len = len; + d->data.buf = addr; + return 0; +} + +/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor + * + * @buffer: The virtual address to of the data buffer + * @len: The length of the data buffer + * @last: If this is the last data buffer in the request (i.e. an interrupt + * is needed + * + * Short descriptors are not used for the in channel + */ +static int +artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common, + void *buffer, unsigned int len, bool last) +{ + dma_addr_t dma_addr; + int ret; + + ret = artpec6_crypto_dma_map_single(common, buffer, len, + DMA_FROM_DEVICE, &dma_addr); + if (ret) + return ret; + + return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last); +} + +static struct artpec6_crypto_bounce_buffer * +artpec6_crypto_alloc_bounce(gfp_t flags) +{ + void *base; + size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) + + 2 * ARTPEC_CACHE_LINE_MAX; + struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags); + + if (!bbuf) + return NULL; + + base = bbuf + 1; + bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX); + return bbuf; +} + +static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common, + struct artpec6_crypto_walk *walk, size_t size) +{ + struct artpec6_crypto_bounce_buffer *bbuf; + int ret; + + bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags); + if (!bbuf) + return -ENOMEM; + + bbuf->length = size; + bbuf->sg = walk->sg; + bbuf->offset = walk->offset; + + ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false); + if (ret) { + kfree(bbuf); + return ret; + } + + pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset); + list_add_tail(&bbuf->list, &common->dma->bounce_buffers); + return 0; +} + +static int +artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common, + struct artpec6_crypto_walk *walk, + size_t count) +{ + size_t chunk; + int ret; + dma_addr_t addr; + + while (walk->sg && count) { + chunk = min(count, artpec6_crypto_walk_chunklen(walk)); + addr = artpec6_crypto_walk_chunk_phys(walk); + + /* When destination buffers are not aligned to the cache line + * size we need bounce buffers. The DMA-API requires that the + * entire line is owned by the DMA buffer and this holds also + * for the case when coherent DMA is used. + */ + if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) { + chunk = min_t(dma_addr_t, chunk, + ALIGN(addr, ARTPEC_CACHE_LINE_MAX) - + addr); + + pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); + ret = setup_bounce_buffer_in(common, walk, chunk); + } else if (chunk < ARTPEC_CACHE_LINE_MAX) { + pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); + ret = setup_bounce_buffer_in(common, walk, chunk); + } else { + dma_addr_t dma_addr; + + chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1); + + pr_debug("CHUNK %pad:%zu\n", &addr, chunk); + + ret = artpec6_crypto_dma_map_page(common, + sg_page(walk->sg), + walk->sg->offset + + walk->offset, + chunk, + DMA_FROM_DEVICE, + &dma_addr); + if (ret) + return ret; + + ret = artpec6_crypto_setup_in_descr_phys(common, + dma_addr, + chunk, false); + } + + if (ret) + return ret; + + count = count - chunk; + artpec6_crypto_walk_advance(walk, chunk); + } + + if (count) + pr_err("EOL unexpected %zu bytes left\n", count); + + return count ? -EINVAL : 0; +} + +static int +artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common, + struct artpec6_crypto_walk *walk, + size_t count) +{ + size_t chunk; + int ret; + dma_addr_t addr; + + while (walk->sg && count) { + chunk = min(count, artpec6_crypto_walk_chunklen(walk)); + addr = artpec6_crypto_walk_chunk_phys(walk); + + pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk); + + if (addr & 3) { + char buf[3]; + + chunk = min_t(size_t, chunk, (4-(addr&3))); + + sg_pcopy_to_buffer(walk->sg, 1, buf, chunk, + walk->offset); + + ret = artpec6_crypto_setup_out_descr_short(common, buf, + chunk, + false); + } else { + dma_addr_t dma_addr; + + ret = artpec6_crypto_dma_map_page(common, + sg_page(walk->sg), + walk->sg->offset + + walk->offset, + chunk, + DMA_TO_DEVICE, + &dma_addr); + if (ret) + return ret; + + ret = artpec6_crypto_setup_out_descr_phys(common, + dma_addr, + chunk, false); + } + + if (ret) + return ret; + + count = count - chunk; + artpec6_crypto_walk_advance(walk, chunk); + } + + if (count) + pr_err("EOL unexpected %zu bytes left\n", count); + + return count ? -EINVAL : 0; +} + + +/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor + * + * If the out descriptor list is non-empty, then the eop flag on the + * last used out descriptor will be set. + * + * @return 0 on success + * -EINVAL if the out descriptor is empty or has overflown + */ +static int +artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct pdma_descr *d; + + if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) { + pr_err("%s: OUT descriptor list is %s\n", + MODULE_NAME, dma->out_cnt ? "empty" : "full"); + return -EINVAL; + + } + + d = &dma->out[dma->out_cnt-1]; + d->ctrl.eop = 1; + + return 0; +} + +/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last + * in descriptor + * + * See artpec6_crypto_terminate_out_descrs() for return values + */ +static int +artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto_dma_descriptors *dma = common->dma; + struct pdma_descr *d; + + if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) { + pr_err("%s: IN descriptor list is %s\n", + MODULE_NAME, dma->in_cnt ? "empty" : "full"); + return -EINVAL; + } + + d = &dma->in[dma->in_cnt-1]; + d->ctrl.intr = 1; + return 0; +} + +/** create_hash_pad - Create a Secure Hash conformant pad + * + * @dst: The destination buffer to write the pad. Must be at least 64 bytes + * @dgstlen: The total length of the hash digest in bytes + * @bitcount: The total length of the digest in bits + * + * @return The total number of padding bytes written to @dst + */ +static size_t +create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount) +{ + unsigned int mod, target, diff, pad_bytes, size_bytes; + __be64 bits = __cpu_to_be64(bitcount); + + switch (oper) { + case regk_crypto_sha1: + case regk_crypto_sha256: + case regk_crypto_hmac_sha1: + case regk_crypto_hmac_sha256: + target = 448 / 8; + mod = 512 / 8; + size_bytes = 8; + break; + default: + target = 896 / 8; + mod = 1024 / 8; + size_bytes = 16; + break; + } + + target -= 1; + diff = dgstlen & (mod - 1); + pad_bytes = diff > target ? target + mod - diff : target - diff; + + memset(dst + 1, 0, pad_bytes); + dst[0] = 0x80; + + if (size_bytes == 16) { + memset(dst + 1 + pad_bytes, 0, 8); + memcpy(dst + 1 + pad_bytes + 8, &bits, 8); + } else { + memcpy(dst + 1 + pad_bytes, &bits, 8); + } + + return pad_bytes + size_bytes + 1; +} + +static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common, + struct crypto_async_request *parent, + void (*complete)(struct crypto_async_request *req), + struct scatterlist *dstsg, unsigned int nbytes) +{ + gfp_t flags; + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + + flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + common->gfp_flags = flags; + common->dma = kmem_cache_alloc(ac->dma_cache, flags); + if (!common->dma) + return -ENOMEM; + + common->req = parent; + common->complete = complete; + return 0; +} + +static void +artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma) +{ + struct artpec6_crypto_bounce_buffer *b; + struct artpec6_crypto_bounce_buffer *next; + + list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { + kfree(b); + } +} + +static int +artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common) +{ + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + + artpec6_crypto_dma_unmap_all(common); + artpec6_crypto_bounce_destroy(common->dma); + kmem_cache_free(ac->dma_cache, common->dma); + common->dma = NULL; + return 0; +} + +/* + * Ciphering functions. + */ +static int artpec6_crypto_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); + struct artpec6_crypto_request_context *req_ctx = NULL; + void (*complete)(struct crypto_async_request *req); + int ret; + + req_ctx = skcipher_request_ctx(req); + + switch (ctx->crypto_type) { + case ARTPEC6_CRYPTO_CIPHER_AES_CBC: + case ARTPEC6_CRYPTO_CIPHER_AES_ECB: + case ARTPEC6_CRYPTO_CIPHER_AES_XTS: + req_ctx->decrypt = 0; + break; + default: + break; + } + + switch (ctx->crypto_type) { + case ARTPEC6_CRYPTO_CIPHER_AES_CBC: + complete = artpec6_crypto_complete_cbc_encrypt; + break; + default: + complete = artpec6_crypto_complete_crypto; + break; + } + + ret = artpec6_crypto_common_init(&req_ctx->common, + &req->base, + complete, + req->dst, req->cryptlen); + if (ret) + return ret; + + ret = artpec6_crypto_prepare_crypto(req); + if (ret) { + artpec6_crypto_common_destroy(&req_ctx->common); + return ret; + } + + return artpec6_crypto_submit(&req_ctx->common); +} + +static int artpec6_crypto_decrypt(struct skcipher_request *req) +{ + int ret; + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); + struct artpec6_crypto_request_context *req_ctx = NULL; + void (*complete)(struct crypto_async_request *req); + + req_ctx = skcipher_request_ctx(req); + + switch (ctx->crypto_type) { + case ARTPEC6_CRYPTO_CIPHER_AES_CBC: + case ARTPEC6_CRYPTO_CIPHER_AES_ECB: + case ARTPEC6_CRYPTO_CIPHER_AES_XTS: + req_ctx->decrypt = 1; + break; + default: + break; + } + + + switch (ctx->crypto_type) { + case ARTPEC6_CRYPTO_CIPHER_AES_CBC: + complete = artpec6_crypto_complete_cbc_decrypt; + break; + default: + complete = artpec6_crypto_complete_crypto; + break; + } + + ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, + complete, + req->dst, req->cryptlen); + if (ret) + return ret; + + ret = artpec6_crypto_prepare_crypto(req); + if (ret) { + artpec6_crypto_common_destroy(&req_ctx->common); + return ret; + } + + return artpec6_crypto_submit(&req_ctx->common); +} + +static int +artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); + size_t iv_len = crypto_skcipher_ivsize(cipher); + unsigned int counter = be32_to_cpup((__be32 *) + (req->iv + iv_len - 4)); + unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / + AES_BLOCK_SIZE; + + /* + * The hardware uses only the last 32-bits as the counter while the + * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that + * the whole IV is a counter. So fallback if the counter is going to + * overlow. + */ + if (counter + nblks < counter) { + int ret; + + pr_debug("counter %x will overflow (nblks %u), falling back\n", + counter, counter + nblks); + + ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key, + ctx->key_length); + if (ret) + return ret; + + { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(subreq) + : crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); + } + return ret; + } + + return encrypt ? artpec6_crypto_encrypt(req) + : artpec6_crypto_decrypt(req); +} + +static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req) +{ + return artpec6_crypto_ctr_crypt(req, true); +} + +static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req) +{ + return artpec6_crypto_ctr_crypt(req, false); +} + +/* + * AEAD functions + */ +static int artpec6_crypto_aead_init(struct crypto_aead *tfm) +{ + struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm); + + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + + crypto_aead_set_reqsize(tfm, + sizeof(struct artpec6_crypto_aead_req_ctx)); + + return 0; +} + +static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, + unsigned int len) +{ + struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); + + if (len != 16 && len != 24 && len != 32) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -1; + } + + ctx->key_length = len; + + memcpy(ctx->aes_key, key, len); + return 0; +} + +static int artpec6_crypto_aead_encrypt(struct aead_request *req) +{ + int ret; + struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); + + req_ctx->decrypt = false; + ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, + artpec6_crypto_complete_aead, + NULL, 0); + if (ret) + return ret; + + ret = artpec6_crypto_prepare_aead(req); + if (ret) { + artpec6_crypto_common_destroy(&req_ctx->common); + return ret; + } + + return artpec6_crypto_submit(&req_ctx->common); +} + +static int artpec6_crypto_aead_decrypt(struct aead_request *req) +{ + int ret; + struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); + + req_ctx->decrypt = true; + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + ret = artpec6_crypto_common_init(&req_ctx->common, + &req->base, + artpec6_crypto_complete_aead, + NULL, 0); + if (ret) + return ret; + + ret = artpec6_crypto_prepare_aead(req); + if (ret) { + artpec6_crypto_common_destroy(&req_ctx->common); + return ret; + } + + return artpec6_crypto_submit(&req_ctx->common); +} + +static int artpec6_crypto_prepare_hash(struct ahash_request *areq) +{ + struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); + size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); + size_t contextsize = digestsize == SHA384_DIGEST_SIZE ? + SHA512_DIGEST_SIZE : digestsize; + size_t blocksize = crypto_tfm_alg_blocksize( + crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); + struct artpec6_crypto_req_common *common = &req_ctx->common; + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + u32 sel_ctx; + bool ext_ctx = false; + bool run_hw = false; + int error = 0; + + artpec6_crypto_init_dma_operation(common); + + /* Upload HMAC key, must be first the first packet */ + if (req_ctx->hash_flags & HASH_FLAG_HMAC) { + if (variant == ARTPEC6_CRYPTO) { + req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, + a6_regk_crypto_dlkey); + } else { + req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, + a7_regk_crypto_dlkey); + } + + /* Copy and pad up the key */ + memcpy(req_ctx->key_buffer, ctx->hmac_key, + ctx->hmac_key_length); + memset(req_ctx->key_buffer + ctx->hmac_key_length, 0, + blocksize - ctx->hmac_key_length); + + error = artpec6_crypto_setup_out_descr(common, + (void *)&req_ctx->key_md, + sizeof(req_ctx->key_md), false, false); + if (error) + return error; + + error = artpec6_crypto_setup_out_descr(common, + req_ctx->key_buffer, blocksize, + true, false); + if (error) + return error; + } + + if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) { + /* Restore context */ + sel_ctx = regk_crypto_ext; + ext_ctx = true; + } else { + sel_ctx = regk_crypto_init; + } + + if (variant == ARTPEC6_CRYPTO) { + req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX; + req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx); + + /* If this is the final round, set the final flag */ + if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) + req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN; + } else { + req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX; + req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx); + + /* If this is the final round, set the final flag */ + if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) + req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN; + } + + /* Setup up metadata descriptors */ + error = artpec6_crypto_setup_out_descr(common, + (void *)&req_ctx->hash_md, + sizeof(req_ctx->hash_md), false, false); + if (error) + return error; + + error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); + if (error) + return error; + + if (ext_ctx) { + error = artpec6_crypto_setup_out_descr(common, + req_ctx->digeststate, + contextsize, false, false); + + if (error) + return error; + } + + if (req_ctx->hash_flags & HASH_FLAG_UPDATE) { + size_t done_bytes = 0; + size_t total_bytes = areq->nbytes + req_ctx->partial_bytes; + size_t ready_bytes = round_down(total_bytes, blocksize); + struct artpec6_crypto_walk walk; + + run_hw = ready_bytes > 0; + if (req_ctx->partial_bytes && ready_bytes) { + /* We have a partial buffer and will at least some bytes + * to the HW. Empty this partial buffer before tackling + * the SG lists + */ + memcpy(req_ctx->partial_buffer_out, + req_ctx->partial_buffer, + req_ctx->partial_bytes); + + error = artpec6_crypto_setup_out_descr(common, + req_ctx->partial_buffer_out, + req_ctx->partial_bytes, + false, true); + if (error) + return error; + + /* Reset partial buffer */ + done_bytes += req_ctx->partial_bytes; + req_ctx->partial_bytes = 0; + } + + artpec6_crypto_walk_init(&walk, areq->src); + + error = artpec6_crypto_setup_sg_descrs_out(common, &walk, + ready_bytes - + done_bytes); + if (error) + return error; + + if (walk.sg) { + size_t sg_skip = ready_bytes - done_bytes; + size_t sg_rem = areq->nbytes - sg_skip; + + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req_ctx->partial_buffer + + req_ctx->partial_bytes, + sg_rem, sg_skip); + + req_ctx->partial_bytes += sg_rem; + } + + req_ctx->digcnt += ready_bytes; + req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE); + } + + /* Finalize */ + if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { + bool needtrim = contextsize != digestsize; + size_t hash_pad_len; + u64 digest_bits; + u32 oper; + + if (variant == ARTPEC6_CRYPTO) + oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md); + else + oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md); + + /* Write out the partial buffer if present */ + if (req_ctx->partial_bytes) { + memcpy(req_ctx->partial_buffer_out, + req_ctx->partial_buffer, + req_ctx->partial_bytes); + error = artpec6_crypto_setup_out_descr(common, + req_ctx->partial_buffer_out, + req_ctx->partial_bytes, + false, true); + if (error) + return error; + + req_ctx->digcnt += req_ctx->partial_bytes; + req_ctx->partial_bytes = 0; + } + + if (req_ctx->hash_flags & HASH_FLAG_HMAC) + digest_bits = 8 * (req_ctx->digcnt + blocksize); + else + digest_bits = 8 * req_ctx->digcnt; + + /* Add the hash pad */ + hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer, + req_ctx->digcnt, digest_bits); + error = artpec6_crypto_setup_out_descr(common, + req_ctx->pad_buffer, + hash_pad_len, false, + true); + req_ctx->digcnt = 0; + + if (error) + return error; + + /* Descriptor for the final result */ + error = artpec6_crypto_setup_in_descr(common, areq->result, + digestsize, + !needtrim); + if (error) + return error; + + if (needtrim) { + /* Discard the extra context bytes for SHA-384 */ + error = artpec6_crypto_setup_in_descr(common, + req_ctx->partial_buffer, + digestsize - contextsize, true); + if (error) + return error; + } + + } else { /* This is not the final operation for this request */ + if (!run_hw) + return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; + + /* Save the result to the context */ + error = artpec6_crypto_setup_in_descr(common, + req_ctx->digeststate, + contextsize, false); + if (error) + return error; + /* fall through */ + } + + req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE | + HASH_FLAG_FINALIZE); + + error = artpec6_crypto_terminate_in_descrs(common); + if (error) + return error; + + error = artpec6_crypto_terminate_out_descrs(common); + if (error) + return error; + + error = artpec6_crypto_dma_map_descs(common); + if (error) + return error; + + return ARTPEC6_CRYPTO_PREPARE_HASH_START; +} + + +static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + tfm->reqsize = sizeof(struct artpec6_crypto_request_context); + ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB; + + return 0; +} + +static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + tfm->reqsize = sizeof(struct artpec6_crypto_request_context); + ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR; + + return 0; +} + +static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + tfm->reqsize = sizeof(struct artpec6_crypto_request_context); + ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC; + + return 0; +} + +static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + tfm->reqsize = sizeof(struct artpec6_crypto_request_context); + ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS; + + return 0; +} + +static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); +} + +static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm) +{ + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->fallback); + artpec6_crypto_aes_exit(tfm); +} + +static int +artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct artpec6_cryptotfm_context *ctx = + crypto_skcipher_ctx(cipher); + + switch (keylen) { + case 16: + case 24: + case 32: + break; + default: + crypto_skcipher_set_flags(cipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->aes_key, key, keylen); + ctx->key_length = keylen; + return 0; +} + +static int +artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct artpec6_cryptotfm_context *ctx = + crypto_skcipher_ctx(cipher); + int ret; + + ret = xts_check_key(&cipher->base, key, keylen); + if (ret) + return ret; + + switch (keylen) { + case 32: + case 48: + case 64: + break; + default: + crypto_skcipher_set_flags(cipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->aes_key, key, keylen); + ctx->key_length = keylen; + return 0; +} + +/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request + * + * @req: The asynch request to process + * + * @return 0 if the dma job was successfully prepared + * <0 on error + * + * This function sets up the PDMA descriptors for a block cipher request. + * + * The required padding is added for AES-CTR using a statically defined + * buffer. + * + * The PDMA descriptor list will be as follows: + * + * OUT: [KEY_MD][KEY][EOP][IV]...[data_n][AES-CTR_pad] + * IN: ...[data_n] + * + */ +static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq) +{ + int ret; + struct artpec6_crypto_walk walk; + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); + struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); + struct artpec6_crypto_request_context *req_ctx = NULL; + size_t iv_len = crypto_skcipher_ivsize(cipher); + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + struct artpec6_crypto_req_common *common; + bool cipher_decr = false; + size_t cipher_klen; + u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */ + u32 oper; + + req_ctx = skcipher_request_ctx(areq); + common = &req_ctx->common; + + artpec6_crypto_init_dma_operation(common); + + if (variant == ARTPEC6_CRYPTO) + ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); + else + ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); + + ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, + sizeof(ctx->key_md), false, false); + if (ret) + return ret; + + ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, + ctx->key_length, true, false); + if (ret) + return ret; + + req_ctx->cipher_md = 0; + + if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) + cipher_klen = ctx->key_length/2; + else + cipher_klen = ctx->key_length; + + /* Metadata */ + switch (cipher_klen) { + case 16: + cipher_len = regk_crypto_key_128; + break; + case 24: + cipher_len = regk_crypto_key_192; + break; + case 32: + cipher_len = regk_crypto_key_256; + break; + default: + pr_err("%s: Invalid key length %d!\n", + MODULE_NAME, ctx->key_length); + return -EINVAL; + } + + switch (ctx->crypto_type) { + case ARTPEC6_CRYPTO_CIPHER_AES_ECB: + oper = regk_crypto_aes_ecb; + cipher_decr = req_ctx->decrypt; + break; + + case ARTPEC6_CRYPTO_CIPHER_AES_CBC: + oper = regk_crypto_aes_cbc; + cipher_decr = req_ctx->decrypt; + break; + + case ARTPEC6_CRYPTO_CIPHER_AES_CTR: + oper = regk_crypto_aes_ctr; + cipher_decr = false; + break; + + case ARTPEC6_CRYPTO_CIPHER_AES_XTS: + oper = regk_crypto_aes_xts; + cipher_decr = req_ctx->decrypt; + + if (variant == ARTPEC6_CRYPTO) + req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ; + else + req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ; + break; + + default: + pr_err("%s: Invalid cipher mode %d!\n", + MODULE_NAME, ctx->crypto_type); + return -EINVAL; + } + + if (variant == ARTPEC6_CRYPTO) { + req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper); + req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, + cipher_len); + if (cipher_decr) + req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; + } else { + req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper); + req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, + cipher_len); + if (cipher_decr) + req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; + } + + ret = artpec6_crypto_setup_out_descr(common, + &req_ctx->cipher_md, + sizeof(req_ctx->cipher_md), + false, false); + if (ret) + return ret; + + ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); + if (ret) + return ret; + + if (iv_len) { + ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len, + false, false); + if (ret) + return ret; + } + /* Data out */ + artpec6_crypto_walk_init(&walk, areq->src); + ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen); + if (ret) + return ret; + + /* Data in */ + artpec6_crypto_walk_init(&walk, areq->dst); + ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen); + if (ret) + return ret; + + /* CTR-mode padding required by the HW. */ + if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR || + ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) { + size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) - + areq->cryptlen; + + if (pad) { + ret = artpec6_crypto_setup_out_descr(common, + ac->pad_buffer, + pad, false, false); + if (ret) + return ret; + + ret = artpec6_crypto_setup_in_descr(common, + ac->pad_buffer, pad, + false); + if (ret) + return ret; + } + } + + ret = artpec6_crypto_terminate_out_descrs(common); + if (ret) + return ret; + + ret = artpec6_crypto_terminate_in_descrs(common); + if (ret) + return ret; + + return artpec6_crypto_dma_map_descs(common); +} + +static int artpec6_crypto_prepare_aead(struct aead_request *areq) +{ + size_t count; + int ret; + size_t input_length; + struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm); + struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); + struct crypto_aead *cipher = crypto_aead_reqtfm(areq); + struct artpec6_crypto_req_common *common = &req_ctx->common; + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + u32 md_cipher_len; + + artpec6_crypto_init_dma_operation(common); + + /* Key */ + if (variant == ARTPEC6_CRYPTO) { + ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, + a6_regk_crypto_dlkey); + } else { + ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, + a7_regk_crypto_dlkey); + } + ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, + sizeof(ctx->key_md), false, false); + if (ret) + return ret; + + ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, + ctx->key_length, true, false); + if (ret) + return ret; + + req_ctx->cipher_md = 0; + + switch (ctx->key_length) { + case 16: + md_cipher_len = regk_crypto_key_128; + break; + case 24: + md_cipher_len = regk_crypto_key_192; + break; + case 32: + md_cipher_len = regk_crypto_key_256; + break; + default: + return -EINVAL; + } + + if (variant == ARTPEC6_CRYPTO) { + req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, + regk_crypto_aes_gcm); + req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, + md_cipher_len); + if (req_ctx->decrypt) + req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; + } else { + req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, + regk_crypto_aes_gcm); + req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, + md_cipher_len); + if (req_ctx->decrypt) + req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; + } + + ret = artpec6_crypto_setup_out_descr(common, + (void *) &req_ctx->cipher_md, + sizeof(req_ctx->cipher_md), false, + false); + if (ret) + return ret; + + ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); + if (ret) + return ret; + + /* For the decryption, cryptlen includes the tag. */ + input_length = areq->cryptlen; + if (req_ctx->decrypt) + input_length -= AES_BLOCK_SIZE; + + /* Prepare the context buffer */ + req_ctx->hw_ctx.aad_length_bits = + __cpu_to_be64(8*areq->assoclen); + + req_ctx->hw_ctx.text_length_bits = + __cpu_to_be64(8*input_length); + + memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); + // The HW omits the initial increment of the counter field. + crypto_inc(req_ctx->hw_ctx.J0+12, 4); + + ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, + sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); + if (ret) + return ret; + + { + struct artpec6_crypto_walk walk; + + artpec6_crypto_walk_init(&walk, areq->src); + + /* Associated data */ + count = areq->assoclen; + ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); + if (ret) + return ret; + + if (!IS_ALIGNED(areq->assoclen, 16)) { + size_t assoc_pad = 16 - (areq->assoclen % 16); + /* The HW mandates zero padding here */ + ret = artpec6_crypto_setup_out_descr(common, + ac->zero_buffer, + assoc_pad, false, + false); + if (ret) + return ret; + } + + /* Data to crypto */ + count = input_length; + ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); + if (ret) + return ret; + + if (!IS_ALIGNED(input_length, 16)) { + size_t crypto_pad = 16 - (input_length % 16); + /* The HW mandates zero padding here */ + ret = artpec6_crypto_setup_out_descr(common, + ac->zero_buffer, + crypto_pad, + false, + false); + if (ret) + return ret; + } + } + + /* Data from crypto */ + { + struct artpec6_crypto_walk walk; + size_t output_len = areq->cryptlen; + + if (req_ctx->decrypt) + output_len -= AES_BLOCK_SIZE; + + artpec6_crypto_walk_init(&walk, areq->dst); + + /* skip associated data in the output */ + count = artpec6_crypto_walk_advance(&walk, areq->assoclen); + if (count) + return -EINVAL; + + count = output_len; + ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count); + if (ret) + return ret; + + /* Put padding between the cryptotext and the auth tag */ + if (!IS_ALIGNED(output_len, 16)) { + size_t crypto_pad = 16 - (output_len % 16); + + ret = artpec6_crypto_setup_in_descr(common, + ac->pad_buffer, + crypto_pad, false); + if (ret) + return ret; + } + + /* The authentication tag shall follow immediately after + * the output ciphertext. For decryption it is put in a context + * buffer for later compare against the input tag. + */ + count = AES_BLOCK_SIZE; + + if (req_ctx->decrypt) { + ret = artpec6_crypto_setup_in_descr(common, + req_ctx->decryption_tag, count, false); + if (ret) + return ret; + + } else { + ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, + count); + if (ret) + return ret; + } + + } + + ret = artpec6_crypto_terminate_in_descrs(common); + if (ret) + return ret; + + ret = artpec6_crypto_terminate_out_descrs(common); + if (ret) + return ret; + + return artpec6_crypto_dma_map_descs(common); +} + +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) +{ + struct artpec6_crypto_req_common *req; + + while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) { + req = list_first_entry(&ac->queue, + struct artpec6_crypto_req_common, + list); + list_move_tail(&req->list, &ac->pending); + artpec6_crypto_start_dma(req); + + req->req->complete(req->req, -EINPROGRESS); + } + + /* + * In some cases, the hardware can raise an in_eop_flush interrupt + * before actually updating the status, so we have an timer which will + * recheck the status on timeout. Since the cases are expected to be + * very rare, we use a relatively large timeout value. There should be + * no noticeable negative effect if we timeout spuriously. + */ + if (ac->pending_count) + mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); + else + del_timer(&ac->timer); +} + +static void artpec6_crypto_timeout(unsigned long data) +{ + struct artpec6_crypto *ac = (struct artpec6_crypto *) data; + + dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); + + tasklet_schedule(&ac->task); +} + +static void artpec6_crypto_task(unsigned long data) +{ + struct artpec6_crypto *ac = (struct artpec6_crypto *)data; + struct artpec6_crypto_req_common *req; + struct artpec6_crypto_req_common *n; + + if (list_empty(&ac->pending)) { + pr_debug("Spurious IRQ\n"); + return; + } + + spin_lock_bh(&ac->queue_lock); + + list_for_each_entry_safe(req, n, &ac->pending, list) { + struct artpec6_crypto_dma_descriptors *dma = req->dma; + u32 stat; + + dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr, + sizeof(dma->stat[0]), + DMA_BIDIRECTIONAL); + + stat = req->dma->stat[req->dma->in_cnt-1]; + + /* A non-zero final status descriptor indicates + * this job has finished. + */ + pr_debug("Request %p status is %X\n", req, stat); + if (!stat) + break; + + /* Allow testing of timeout handling with fault injection */ +#ifdef CONFIG_FAULT_INJECTION + if (should_fail(&artpec6_crypto_fail_status_read, 1)) + continue; +#endif + + pr_debug("Completing request %p\n", req); + + list_del(&req->list); + + artpec6_crypto_dma_unmap_all(req); + artpec6_crypto_copy_bounce_buffers(req); + + ac->pending_count--; + artpec6_crypto_common_destroy(req); + req->complete(req->req); + } + + artpec6_crypto_process_queue(ac); + + spin_unlock_bh(&ac->queue_lock); +} + +static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) +{ + req->complete(req, 0); +} + +static void +artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req) +{ + struct skcipher_request *cipher_req = container_of(req, + struct skcipher_request, base); + + scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src, + cipher_req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); + req->complete(req, 0); +} + +static void +artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req) +{ + struct skcipher_request *cipher_req = container_of(req, + struct skcipher_request, base); + + scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst, + cipher_req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); + req->complete(req, 0); +} + +static void artpec6_crypto_complete_aead(struct crypto_async_request *req) +{ + int result = 0; + + /* Verify GCM hashtag. */ + struct aead_request *areq = container_of(req, + struct aead_request, base); + struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); + + if (req_ctx->decrypt) { + u8 input_tag[AES_BLOCK_SIZE]; + + sg_pcopy_to_buffer(areq->src, + sg_nents(areq->src), + input_tag, + AES_BLOCK_SIZE, + areq->assoclen + areq->cryptlen - + AES_BLOCK_SIZE); + + if (memcmp(req_ctx->decryption_tag, + input_tag, + AES_BLOCK_SIZE)) { + pr_debug("***EBADMSG:\n"); + print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, + input_tag, AES_BLOCK_SIZE, true); + print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, + req_ctx->decryption_tag, + AES_BLOCK_SIZE, true); + + result = -EBADMSG; + } + } + + req->complete(req, result); +} + +static void artpec6_crypto_complete_hash(struct crypto_async_request *req) +{ + req->complete(req, 0); +} + + +/*------------------- Hash functions -----------------------------------------*/ +static int +artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base); + size_t blocksize; + int ret; + + if (!keylen) { + pr_err("Invalid length (%d) of HMAC key\n", + keylen); + return -EINVAL; + } + + memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + + if (keylen > blocksize) { + SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); + + hdesc->tfm = tfm_ctx->child_hash; + hdesc->flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + tfm_ctx->hmac_key_length = blocksize; + ret = crypto_shash_digest(hdesc, key, keylen, + tfm_ctx->hmac_key); + if (ret) + return ret; + + } else { + memcpy(tfm_ctx->hmac_key, key, keylen); + tfm_ctx->hmac_key_length = keylen; + } + + return 0; +} + +static int +artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) +{ + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + u32 oper; + + memset(req_ctx, 0, sizeof(*req_ctx)); + + req_ctx->hash_flags = HASH_FLAG_INIT_CTX; + if (hmac) + req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY); + + switch (type) { + case ARTPEC6_CRYPTO_HASH_SHA1: + oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1; + break; + case ARTPEC6_CRYPTO_HASH_SHA256: + oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; + break; + case ARTPEC6_CRYPTO_HASH_SHA384: + oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384; + break; + case ARTPEC6_CRYPTO_HASH_SHA512: + oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512; + break; + + default: + pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); + return -EINVAL; + } + + if (variant == ARTPEC6_CRYPTO) + req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper); + else + req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper); + + return 0; +} + +static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + int ret; + + if (!req_ctx->common.dma) { + ret = artpec6_crypto_common_init(&req_ctx->common, + &req->base, + artpec6_crypto_complete_hash, + NULL, 0); + + if (ret) + return ret; + } + + ret = artpec6_crypto_prepare_hash(req); + switch (ret) { + case ARTPEC6_CRYPTO_PREPARE_HASH_START: + ret = artpec6_crypto_submit(&req_ctx->common); + break; + + case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: + ret = 0; + /* Fallthrough */ + + default: + artpec6_crypto_common_destroy(&req_ctx->common); + break; + } + + return ret; +} + +static int artpec6_crypto_hash_final(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + req_ctx->hash_flags |= HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_hash_update(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + req_ctx->hash_flags |= HASH_FLAG_UPDATE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_sha1_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); +} + +static int artpec6_crypto_sha1_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); + + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_sha256_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); +} + +static int artpec6_crypto_sha256_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); +} + +static int __maybe_unused +artpec6_crypto_sha384_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_sha512_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); +} + +static int artpec6_crypto_sha512_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); +} + +static int __maybe_unused +artpec6_crypto_hmac_sha384_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); +} + +static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req) +{ + return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); +} + +static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int __maybe_unused +artpec6_crypto_hmac_sha384_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req) +{ + struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); + + artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); + req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; + + return artpec6_crypto_prepare_submit_hash(req); +} + +static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, + const char *base_hash_name) +{ + struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct artpec6_hash_request_context)); + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + + if (base_hash_name) { + struct crypto_shash *child; + + child = crypto_alloc_shash(base_hash_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(child)) + return PTR_ERR(child); + + tfm_ctx->child_hash = child; + } + + return 0; +} + +static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm) +{ + return artpec6_crypto_ahash_init_common(tfm, NULL); +} + +static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) +{ + return artpec6_crypto_ahash_init_common(tfm, "sha256"); +} + +static int __maybe_unused +artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm) +{ + return artpec6_crypto_ahash_init_common(tfm, "sha384"); +} + +static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm) +{ + return artpec6_crypto_ahash_init_common(tfm, "sha512"); +} + +static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) +{ + struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); + + if (tfm_ctx->child_hash) + crypto_free_shash(tfm_ctx->child_hash); + + memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); + tfm_ctx->hmac_key_length = 0; +} + +static int artpec6_crypto_hash_export(struct ahash_request *req, void *out) +{ + const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); + struct artpec6_hash_export_state *state = out; + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + + BUILD_BUG_ON(sizeof(state->partial_buffer) != + sizeof(ctx->partial_buffer)); + BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate)); + + state->digcnt = ctx->digcnt; + state->partial_bytes = ctx->partial_bytes; + state->hash_flags = ctx->hash_flags; + + if (variant == ARTPEC6_CRYPTO) + state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md); + else + state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md); + + memcpy(state->partial_buffer, ctx->partial_buffer, + sizeof(state->partial_buffer)); + memcpy(state->digeststate, ctx->digeststate, + sizeof(state->digeststate)); + + return 0; +} + +static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in) +{ + struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); + const struct artpec6_hash_export_state *state = in; + struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); + enum artpec6_crypto_variant variant = ac->variant; + + memset(ctx, 0, sizeof(*ctx)); + + ctx->digcnt = state->digcnt; + ctx->partial_bytes = state->partial_bytes; + ctx->hash_flags = state->hash_flags; + + if (variant == ARTPEC6_CRYPTO) + ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper); + else + ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper); + + memcpy(ctx->partial_buffer, state->partial_buffer, + sizeof(state->partial_buffer)); + memcpy(ctx->digeststate, state->digeststate, + sizeof(state->digeststate)); + + return 0; +} + +static int init_crypto_hw(struct artpec6_crypto *ac) +{ + enum artpec6_crypto_variant variant = ac->variant; + void __iomem *base = ac->base; + u32 out_descr_buf_size; + u32 out_data_buf_size; + u32 in_data_buf_size; + u32 in_descr_buf_size; + u32 in_stat_buf_size; + u32 in, out; + + /* + * The PDMA unit contains 1984 bytes of internal memory for the OUT + * channels and 1024 bytes for the IN channel. This is an elastic + * memory used to internally store the descriptors and data. The values + * ares specified in 64 byte incremements. Trustzone buffers are not + * used at this stage. + */ + out_data_buf_size = 16; /* 1024 bytes for data */ + out_descr_buf_size = 15; /* 960 bytes for descriptors */ + in_data_buf_size = 8; /* 512 bytes for data */ + in_descr_buf_size = 4; /* 256 bytes for descriptors */ + in_stat_buf_size = 4; /* 256 bytes for stat descrs */ + + BUILD_BUG_ON_MSG((out_data_buf_size + + out_descr_buf_size) * 64 > 1984, + "Invalid OUT configuration"); + + BUILD_BUG_ON_MSG((in_data_buf_size + + in_descr_buf_size + + in_stat_buf_size) * 64 > 1024, + "Invalid IN configuration"); + + in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) | + FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) | + FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size); + + out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) | + FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size); + + writel_relaxed(out, base + PDMA_OUT_BUF_CFG); + writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG); + + if (variant == ARTPEC6_CRYPTO) { + writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG); + writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG); + writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA | + A6_PDMA_INTR_MASK_IN_EOP_FLUSH, + base + A6_PDMA_INTR_MASK); + } else { + writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG); + writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG); + writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA | + A7_PDMA_INTR_MASK_IN_EOP_FLUSH, + base + A7_PDMA_INTR_MASK); + } + + return 0; +} + +static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac) +{ + enum artpec6_crypto_variant variant = ac->variant; + void __iomem *base = ac->base; + + if (variant == ARTPEC6_CRYPTO) { + writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD); + writel_relaxed(0, base + A6_PDMA_IN_CFG); + writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); + } else { + writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD); + writel_relaxed(0, base + A7_PDMA_IN_CFG); + writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); + } + + writel_relaxed(0, base + PDMA_OUT_CFG); + +} + +static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id) +{ + struct artpec6_crypto *ac = dev_id; + enum artpec6_crypto_variant variant = ac->variant; + void __iomem *base = ac->base; + u32 mask_in_data, mask_in_eop_flush; + u32 in_cmd_flush_stat, in_cmd_reg; + u32 ack_intr_reg; + u32 ack = 0; + u32 intr; + + if (variant == ARTPEC6_CRYPTO) { + intr = readl_relaxed(base + A6_PDMA_MASKED_INTR); + mask_in_data = A6_PDMA_INTR_MASK_IN_DATA; + mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH; + in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT; + in_cmd_reg = A6_PDMA_IN_CMD; + ack_intr_reg = A6_PDMA_ACK_INTR; + } else { + intr = readl_relaxed(base + A7_PDMA_MASKED_INTR); + mask_in_data = A7_PDMA_INTR_MASK_IN_DATA; + mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH; + in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT; + in_cmd_reg = A7_PDMA_IN_CMD; + ack_intr_reg = A7_PDMA_ACK_INTR; + } + + /* We get two interrupt notifications from each job. + * The in_data means all data was sent to memory and then + * we request a status flush command to write the per-job + * status to its status vector. This ensures that the + * tasklet can detect exactly how many submitted jobs + * that have finished. + */ + if (intr & mask_in_data) + ack |= mask_in_data; + + if (intr & mask_in_eop_flush) + ack |= mask_in_eop_flush; + else + writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg); + + writel_relaxed(ack, base + ack_intr_reg); + + if (intr & mask_in_eop_flush) + tasklet_schedule(&ac->task); + + return IRQ_HANDLED; +} + +/*------------------- Algorithm definitions ----------------------------------*/ + +/* Hashes */ +static struct ahash_alg hash_algos[] = { + /* SHA-1 */ + { + .init = artpec6_crypto_sha1_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_sha1_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "artpec-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, + /* SHA-256 */ + { + .init = artpec6_crypto_sha256_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_sha256_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "artpec-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, + /* HMAC SHA-256 */ + { + .init = artpec6_crypto_hmac_sha256_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_hmac_sha256_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .setkey = artpec6_crypto_hash_set_key, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "artpec-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init_hmac_sha256, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, +}; + +static struct ahash_alg artpec7_hash_algos[] = { + /* SHA-384 */ + { + .init = artpec6_crypto_sha384_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_sha384_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "artpec-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, + /* HMAC SHA-384 */ + { + .init = artpec6_crypto_hmac_sha384_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_hmac_sha384_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .setkey = artpec6_crypto_hash_set_key, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "artpec-hmac-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init_hmac_sha384, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, + /* SHA-512 */ + { + .init = artpec6_crypto_sha512_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_sha512_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "artpec-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, + /* HMAC SHA-512 */ + { + .init = artpec6_crypto_hmac_sha512_init, + .update = artpec6_crypto_hash_update, + .final = artpec6_crypto_hash_final, + .digest = artpec6_crypto_hmac_sha512_digest, + .import = artpec6_crypto_hash_import, + .export = artpec6_crypto_hash_export, + .setkey = artpec6_crypto_hash_set_key, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct artpec6_hash_export_state), + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "artpec-hmac-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_hashalg_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_init = artpec6_crypto_ahash_init_hmac_sha512, + .cra_exit = artpec6_crypto_ahash_exit, + } + }, +}; + +/* Crypto */ +static struct skcipher_alg crypto_algos[] = { + /* AES - ECB */ + { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "artpec6-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = artpec6_crypto_cipher_set_key, + .encrypt = artpec6_crypto_encrypt, + .decrypt = artpec6_crypto_decrypt, + .init = artpec6_crypto_aes_ecb_init, + .exit = artpec6_crypto_aes_exit, + }, + /* AES - CTR */ + { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "artpec6-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = artpec6_crypto_cipher_set_key, + .encrypt = artpec6_crypto_ctr_encrypt, + .decrypt = artpec6_crypto_ctr_decrypt, + .init = artpec6_crypto_aes_ctr_init, + .exit = artpec6_crypto_aes_ctr_exit, + }, + /* AES - CBC */ + { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "artpec6-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = artpec6_crypto_cipher_set_key, + .encrypt = artpec6_crypto_encrypt, + .decrypt = artpec6_crypto_decrypt, + .init = artpec6_crypto_aes_cbc_init, + .exit = artpec6_crypto_aes_exit + }, + /* AES - XTS */ + { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "artpec6-xts-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + }, + .min_keysize = 2*AES_MIN_KEY_SIZE, + .max_keysize = 2*AES_MAX_KEY_SIZE, + .ivsize = 16, + .setkey = artpec6_crypto_xts_set_key, + .encrypt = artpec6_crypto_encrypt, + .decrypt = artpec6_crypto_decrypt, + .init = artpec6_crypto_aes_xts_init, + .exit = artpec6_crypto_aes_exit, + }, +}; + +static struct aead_alg aead_algos[] = { + { + .init = artpec6_crypto_aead_init, + .setkey = artpec6_crypto_aead_set_key, + .encrypt = artpec6_crypto_aead_encrypt, + .decrypt = artpec6_crypto_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "artpec-gcm-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + }, + } +}; + +#ifdef CONFIG_DEBUG_FS + +struct dbgfs_u32 { + char *name; + mode_t mode; + u32 *flag; + char *desc; +}; + +static void artpec6_crypto_init_debugfs(void) +{ + dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); + + if (!dbgfs_root || IS_ERR(dbgfs_root)) { + dbgfs_root = NULL; + pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME); + return; + } + +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("fail_status_read", dbgfs_root, + &artpec6_crypto_fail_status_read); + + fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root, + &artpec6_crypto_fail_dma_array_full); +#endif +} + +static void artpec6_crypto_free_debugfs(void) +{ + if (!dbgfs_root) + return; + + debugfs_remove_recursive(dbgfs_root); + dbgfs_root = NULL; +} +#endif + +static const struct of_device_id artpec6_crypto_of_match[] = { + { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO }, + { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO }, + {} +}; +MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); + +static int artpec6_crypto_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + enum artpec6_crypto_variant variant; + struct artpec6_crypto *ac; + struct device *dev = &pdev->dev; + void __iomem *base; + struct resource *res; + int irq; + int err; + + if (artpec6_crypto_dev) + return -ENODEV; + + match = of_match_node(artpec6_crypto_of_match, dev->of_node); + if (!match) + return -EINVAL; + + variant = (enum artpec6_crypto_variant)match->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENODEV; + + ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto), + GFP_KERNEL); + if (!ac) + return -ENOMEM; + + platform_set_drvdata(pdev, ac); + ac->variant = variant; + + spin_lock_init(&ac->queue_lock); + INIT_LIST_HEAD(&ac->queue); + INIT_LIST_HEAD(&ac->pending); + setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac); + + ac->base = base; + + ac->dma_cache = kmem_cache_create("artpec6_crypto_dma", + sizeof(struct artpec6_crypto_dma_descriptors), + 64, + 0, + NULL); + if (!ac->dma_cache) + return -ENOMEM; + +#ifdef CONFIG_DEBUG_FS + artpec6_crypto_init_debugfs(); +#endif + + tasklet_init(&ac->task, artpec6_crypto_task, + (unsigned long)ac); + + ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + GFP_KERNEL); + if (!ac->pad_buffer) + return -ENOMEM; + ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); + + ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + GFP_KERNEL); + if (!ac->zero_buffer) + return -ENOMEM; + ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX); + + err = init_crypto_hw(ac); + if (err) + goto free_cache; + + err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0, + "artpec6-crypto", ac); + if (err) + goto disable_hw; + + artpec6_crypto_dev = &pdev->dev; + + err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); + if (err) { + dev_err(dev, "Failed to register ahashes\n"); + goto disable_hw; + } + + if (variant != ARTPEC6_CRYPTO) { + err = crypto_register_ahashes(artpec7_hash_algos, + ARRAY_SIZE(artpec7_hash_algos)); + if (err) { + dev_err(dev, "Failed to register ahashes\n"); + goto unregister_ahashes; + } + } + + err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); + if (err) { + dev_err(dev, "Failed to register ciphers\n"); + goto unregister_a7_ahashes; + } + + err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); + if (err) { + dev_err(dev, "Failed to register aeads\n"); + goto unregister_algs; + } + + return 0; + +unregister_algs: + crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); +unregister_a7_ahashes: + if (variant != ARTPEC6_CRYPTO) + crypto_unregister_ahashes(artpec7_hash_algos, + ARRAY_SIZE(artpec7_hash_algos)); +unregister_ahashes: + crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); +disable_hw: + artpec6_crypto_disable_hw(ac); +free_cache: + kmem_cache_destroy(ac->dma_cache); + return err; +} + +static int artpec6_crypto_remove(struct platform_device *pdev) +{ + struct artpec6_crypto *ac = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); + if (ac->variant != ARTPEC6_CRYPTO) + crypto_unregister_ahashes(artpec7_hash_algos, + ARRAY_SIZE(artpec7_hash_algos)); + crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); + crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); + + tasklet_disable(&ac->task); + devm_free_irq(&pdev->dev, irq, ac); + tasklet_kill(&ac->task); + del_timer_sync(&ac->timer); + + artpec6_crypto_disable_hw(ac); + + kmem_cache_destroy(ac->dma_cache); +#ifdef CONFIG_DEBUG_FS + artpec6_crypto_free_debugfs(); +#endif + return 0; +} + +static struct platform_driver artpec6_crypto_driver = { + .probe = artpec6_crypto_probe, + .remove = artpec6_crypto_remove, + .driver = { + .name = "artpec6-crypto", + .owner = THIS_MODULE, + .of_match_table = artpec6_crypto_of_match, + }, +}; + +module_platform_driver(artpec6_crypto_driver); + +MODULE_AUTHOR("Axis Communications AB"); +MODULE_DESCRIPTION("ARTPEC-6 Crypto driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 9cfd36c1bcb6..8685c7e4debd 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -90,8 +90,6 @@ static int aead_pri = 150; module_param(aead_pri, int, 0644); MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); -#define MAX_SPUS 16 - /* A type 3 BCM header, expected to precede the SPU header for SPU-M. * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). * 0x60 - ring 0 @@ -120,7 +118,7 @@ static u8 select_channel(void) { u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan); - return chan_idx % iproc_priv.spu.num_spu; + return chan_idx % iproc_priv.spu.num_chan; } /** @@ -4528,8 +4526,13 @@ static void spu_functions_register(struct device *dev, */ static int spu_mb_init(struct device *dev) { - struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu]; - int err; + struct mbox_client *mcl = &iproc_priv.mcl; + int err, i; + + iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan, + sizeof(struct mbox_chan *), GFP_KERNEL); + if (!iproc_priv.mbox) + return -ENOMEM; mcl->dev = dev; mcl->tx_block = false; @@ -4538,25 +4541,33 @@ static int spu_mb_init(struct device *dev) mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; - iproc_priv.mbox[iproc_priv.spu.num_spu] = - mbox_request_channel(mcl, 0); - if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) { - err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]); - dev_err(dev, - "Mbox channel %d request failed with err %d", - iproc_priv.spu.num_spu, err); - iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL; - return err; + for (i = 0; i < iproc_priv.spu.num_chan; i++) { + iproc_priv.mbox[i] = mbox_request_channel(mcl, i); + if (IS_ERR(iproc_priv.mbox[i])) { + err = (int)PTR_ERR(iproc_priv.mbox[i]); + dev_err(dev, + "Mbox channel %d request failed with err %d", + i, err); + iproc_priv.mbox[i] = NULL; + goto free_channels; + } } return 0; +free_channels: + for (i = 0; i < iproc_priv.spu.num_chan; i++) { + if (iproc_priv.mbox[i]) + mbox_free_channel(iproc_priv.mbox[i]); + } + + return err; } static void spu_mb_release(struct platform_device *pdev) { int i; - for (i = 0; i < iproc_priv.spu.num_spu; i++) + for (i = 0; i < iproc_priv.spu.num_chan; i++) mbox_free_channel(iproc_priv.mbox[i]); } @@ -4567,7 +4578,7 @@ static void spu_counters_init(void) atomic_set(&iproc_priv.session_count, 0); atomic_set(&iproc_priv.stream_count, 0); - atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu); + atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan); atomic64_set(&iproc_priv.bytes_in, 0); atomic64_set(&iproc_priv.bytes_out, 0); for (i = 0; i < SPU_OP_NUM; i++) { @@ -4809,47 +4820,38 @@ static int spu_dt_read(struct platform_device *pdev) struct resource *spu_ctrl_regs; const struct of_device_id *match; const struct spu_type_subtype *matched_spu_type; - void __iomem *spu_reg_vbase[MAX_SPUS]; - int err; + struct device_node *dn = pdev->dev.of_node; + int err, i; + + /* Count number of mailbox channels */ + spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); + if (!match) { + dev_err(&pdev->dev, "Failed to match device\n"); + return -ENODEV; + } + matched_spu_type = match->data; - if (iproc_priv.spu.num_spu > 1) { - /* If this is 2nd or later SPU, make sure it's same type */ - if ((spu->spu_type != matched_spu_type->type) || - (spu->spu_subtype != matched_spu_type->subtype)) { - err = -EINVAL; - dev_err(&pdev->dev, "Multiple SPU types not allowed"); + spu->spu_type = matched_spu_type->type; + spu->spu_subtype = matched_spu_type->subtype; + + i = 0; + for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = + platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { + + spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); + if (IS_ERR(spu->reg_vbase[i])) { + err = PTR_ERR(spu->reg_vbase[i]); + dev_err(&pdev->dev, "Failed to map registers: %d\n", + err); + spu->reg_vbase[i] = NULL; return err; } - } else { - /* Record type of first SPU */ - spu->spu_type = matched_spu_type->type; - spu->spu_subtype = matched_spu_type->subtype; } - - /* Get and map SPU registers */ - spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!spu_ctrl_regs) { - err = -EINVAL; - dev_err(&pdev->dev, "Invalid/missing registers for SPU\n"); - return err; - } - - spu_reg_vbase[iproc_priv.spu.num_spu] = - devm_ioremap_resource(dev, spu_ctrl_regs); - if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) { - err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]); - dev_err(&pdev->dev, "Failed to map registers: %d\n", - err); - spu_reg_vbase[iproc_priv.spu.num_spu] = NULL; - return err; - } - - dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu); - - spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase; + spu->num_spu = i; + dev_dbg(dev, "Device has %d SPUs", spu->num_spu); return 0; } @@ -4860,8 +4862,8 @@ int bcm_spu_probe(struct platform_device *pdev) struct spu_hw *spu = &iproc_priv.spu; int err = 0; - iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev; - platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu], + iproc_priv.pdev = pdev; + platform_set_drvdata(iproc_priv.pdev, &iproc_priv); err = spu_dt_read(pdev); @@ -4872,12 +4874,6 @@ int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto failure; - iproc_priv.spu.num_spu++; - - /* If already initialized, we've just added another SPU and are done */ - if (iproc_priv.inited) - return 0; - if (spu->spu_type == SPU_TYPE_SPUM) iproc_priv.bcm_hdr_len = 8; else if (spu->spu_type == SPU_TYPE_SPU2) @@ -4893,8 +4889,6 @@ int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto fail_reg; - iproc_priv.inited = true; - return 0; fail_reg: diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 51dca529ce8f..57a55eb2a255 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -427,10 +427,13 @@ struct spu_hw { /* The number of SPUs on this platform */ u32 num_spu; + + /* The number of SPU channels on this platform */ + u32 num_chan; }; struct device_private { - struct platform_device *pdev[MAX_SPUS]; + struct platform_device *pdev; struct spu_hw spu; @@ -470,12 +473,10 @@ struct device_private { /* Number of ICV check failures for AEAD messages */ atomic_t bad_icv; - struct mbox_client mcl[MAX_SPUS]; - /* Array of mailbox channel pointers, one for each channel */ - struct mbox_chan *mbox[MAX_SPUS]; + struct mbox_client mcl; - /* Driver initialized */ - bool inited; + /* Array of mailbox channel pointers, one for each channel */ + struct mbox_chan **mbox; }; extern struct device_private iproc_priv; diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e36aeacd7635..1eb852765469 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -1,6 +1,7 @@ config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore driver backend" depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + select SOC_BUS help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). @@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. -config CRYPTO_DEV_FSL_CAAM_IMX - def_bool SOC_IMX6 || SOC_IMX7D - depends on CRYPTO_DEV_FSL_CAAM - config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 0488b7f81dcf..54f3b375a453 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -81,40 +81,6 @@ #define debug(format, arg...) #endif -#ifdef DEBUG -#include - -static void dbg_dump_sg(const char *level, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - struct scatterlist *sg, size_t tlen, bool ascii) -{ - struct scatterlist *it; - void *it_page; - size_t len; - void *buf; - - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { - /* - * make sure the scatterlist's page - * has a valid virtual memory mapping - */ - it_page = kmap_atomic(sg_page(it)); - if (unlikely(!it_page)) { - printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); - return; - } - - buf = it_page + it->offset; - len = min_t(size_t, tlen, it->length); - print_hex_dump(level, prefix_str, prefix_type, rowsize, - groupsize, buf, len, ascii); - tlen -= len; - - kunmap_atomic(it_page); - } -} -#endif - static struct list_head alg_list; struct caam_alg_entry { @@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); @@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); @@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, ivsize, 1); pr_err("asked=%d, nbytes%d\n", (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - edesc->src_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->nbytes, 1); len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); @@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - edesc->src_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->nbytes, 1); len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); @@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req) u32 *desc; int ret = 0; -#ifdef DEBUG - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - req->assoclen + req->cryptlen, 1); -#endif + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + req->assoclen + req->cryptlen, 1); /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 6f9c7ec0e339..530c14ee32de 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -599,7 +599,7 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD | JUMP_COND_SELF); + JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); @@ -688,8 +688,7 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | - JUMP_TEST_ALL | JUMP_COND_SHRD | - JUMP_COND_SELF); + JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 78c4c0485c58..2eefc4a26bc2 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -12,7 +12,6 @@ #include "intern.h" #include "desc_constr.h" #include "error.h" -#include "sg_sw_sec4.h" #include "sg_sw_qm.h" #include "key_gen.h" #include "qi.h" @@ -399,6 +398,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, * @iv_dma: dma address of iv for checking continuity and link table * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table + * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure * @sgt: the h/w link table @@ -409,8 +409,12 @@ struct aead_edesc { dma_addr_t iv_dma; int qm_sg_bytes; dma_addr_t qm_sg_dma; + unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; +#define CAAM_QI_MAX_AEAD_SG \ + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ + sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -431,6 +435,9 @@ struct ablkcipher_edesc { int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; +#define CAAM_QI_MAX_ABLKCIPHER_SG \ + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ + sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -660,6 +667,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_AEAD_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, op_type, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); @@ -670,7 +685,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->drv_req.cbk = aead_done; edesc->drv_req.drv_ctx = drv_ctx; - edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4, + edesc->assoclen = cpu_to_caam32(req->assoclen); + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(qidev, edesc->assoclen_dma)) { dev_err(qidev, "unable to map assoclen\n"); @@ -776,9 +792,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *qidev = caam_ctx->qidev; -#ifdef DEBUG int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); #endif @@ -791,14 +807,21 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif ablkcipher_unmap(qidev, edesc, req); qi_cache_free(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. This is used e.g. by the CTS mode. + */ + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + ivsize, 0); + ablkcipher_request_complete(req, status); } @@ -880,6 +903,15 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } dst_sg_idx = qm_sg_ents; + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, op_type, 0, 0); + return ERR_PTR(-ENOMEM); + } + /* allocate space for base edesc and link tables */ edesc = qi_cache_alloc(GFP_DMA | flags); if (unlikely(!edesc)) { @@ -892,7 +924,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sg_table = &edesc->sgt[0]; edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); edesc->drv_req.app_ctx = req; @@ -1026,6 +1057,14 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( qm_sg_ents += 1 + mapped_dst_nents; } + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, GIVENCRYPT, 0, 0); + return ERR_PTR(-ENOMEM); + } + /* allocate space for base edesc and link tables */ edesc = qi_cache_alloc(GFP_DMA | flags); if (!edesc) { @@ -1968,7 +2007,7 @@ static struct caam_aead_alg driver_aeads[] = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" - "hmac-sha256-cbc-desi-" + "hmac-sha256-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 910ec61cae09..698580b60b2f 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -791,8 +791,8 @@ static int ahash_update_ctx(struct ahash_request *req) to_hash - *buflen, *next_buflen, 0); } else { - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= - cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - + 1); } desc = edesc->hw_desc; @@ -882,8 +882,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= - cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 41398da3edf4..fde07d4ff019 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -285,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) if (err) return err; - err = caam_init_buf(ctx, 1); - if (err) - return err; - - return 0; + return caam_init_buf(ctx, 1); } static struct hwrng caam_rng = { diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index dd353e342c12..027e121c6f70 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "compat.h" #include "regs.h" @@ -17,6 +18,10 @@ bool caam_little_end; EXPORT_SYMBOL(caam_little_end); +bool caam_dpaa2; +EXPORT_SYMBOL(caam_dpaa2); +bool caam_imx; +EXPORT_SYMBOL(caam_imx); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -26,19 +31,11 @@ EXPORT_SYMBOL(caam_little_end); * i.MX targets tend to have clock control subsystems that can * enable/disable clocking to our device. */ -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX static inline struct clk *caam_drv_identify_clk(struct device *dev, char *clk_name) { - return devm_clk_get(dev, clk_name); + return caam_imx ? devm_clk_get(dev, clk_name) : NULL; } -#else -static inline struct clk *caam_drv_identify_clk(struct device *dev, - char *clk_name) -{ - return NULL; -} -#endif /* * Descriptor to instantiate RNG State Handle 0 in normal mode and @@ -319,8 +316,11 @@ static int caam_remove(struct platform_device *pdev) caam_qi_shutdown(ctrlpriv->qidev); #endif - /* De-initialize RNG state handles initialized by this driver. */ - if (ctrlpriv->rng4_sh_init) + /* + * De-initialize RNG state handles initialized by this driver. + * In case of DPAA 2.x, RNG is managed by MC firmware. + */ + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init) deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); /* Shut down debug views */ @@ -425,6 +425,10 @@ static int caam_probe(struct platform_device *pdev) { int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; + static const struct soc_device_attribute imx_soc[] = { + {.family = "Freescale i.MX"}, + {}, + }; struct device *dev; struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; @@ -444,9 +448,10 @@ static int caam_probe(struct platform_device *pdev) dev = &pdev->dev; dev_set_drvdata(dev, ctrlpriv); - ctrlpriv->pdev = pdev; nprop = pdev->dev.of_node; + caam_imx = (bool)soc_device_match(imx_soc); + /* Enable clocking */ clk = caam_drv_identify_clk(&pdev->dev, "ipg"); if (IS_ERR(clk)) { @@ -553,12 +558,17 @@ static int caam_probe(struct platform_device *pdev) /* * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, - * long pointers in master configuration register + * long pointers in master configuration register. + * In case of DPAA 2.x, Management Complex firmware performs + * the configuration. */ - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | - MCFGR_WDENABLE | MCFGR_LARGE_BURST | - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); + if (!caam_dpaa2) + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | + MCFGR_WDENABLE | MCFGR_LARGE_BURST | + (sizeof(dma_addr_t) == sizeof(u64) ? + MCFGR_LONG_PTR : 0)); /* * Read the Compile Time paramters and SCFGR to determine @@ -587,7 +597,9 @@ static int caam_probe(struct platform_device *pdev) JRSTART_JR3_START); if (sizeof(dma_addr_t) == sizeof(u64)) { - if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) + if (caam_dpaa2) + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); else ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); @@ -630,11 +642,9 @@ static int caam_probe(struct platform_device *pdev) ring++; } - /* Check to see if QI present. If so, enable */ - ctrlpriv->qi_present = - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & - CTPR_MS_QI_MASK); - if (ctrlpriv->qi_present) { + /* Check to see if (DPAA 1.x) QI present. If so, enable */ + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); + if (ctrlpriv->qi_present && !caam_dpaa2) { ctrlpriv->qi = (struct caam_queue_if __iomem __force *) ((__force uint8_t *)ctrl + BLOCK_OFFSET * QI_BLOCK_NUMBER @@ -662,8 +672,10 @@ static int caam_probe(struct platform_device *pdev) /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation + * In case of DPAA 2.x, RNG is managed by MC firmware. */ - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + if (!caam_dpaa2 && + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* @@ -731,63 +743,43 @@ static int caam_probe(struct platform_device *pdev) /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, caam_get_era()); - dev_info(dev, "job rings = %d, qi = %d\n", - ctrlpriv->total_jobrs, ctrlpriv->qi_present); + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present, + caam_dpaa2 ? "yes" : "no"); #ifdef CONFIG_DEBUG_FS - - ctrlpriv->ctl_rq_dequeued = - debugfs_create_file("rq_dequeued", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->req_dequeued, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_enc_req = - debugfs_create_file("ob_rq_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_req, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_dec_req = - debugfs_create_file("ib_rq_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_req, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_enc_bytes = - debugfs_create_file("ob_bytes_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_prot_bytes = - debugfs_create_file("ob_bytes_protected", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_prot_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_dec_bytes = - debugfs_create_file("ib_bytes_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_valid_bytes = - debugfs_create_file("ib_bytes_validated", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_valid_bytes, - &caam_fops_u64_ro); + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->req_dequeued, + &caam_fops_u64_ro); + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_req, + &caam_fops_u64_ro); + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_req, + &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_prot_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_valid_bytes, + &caam_fops_u64_ro); /* Controller level - global status values */ - ctrlpriv->ctl_faultaddr = - debugfs_create_file("fault_addr", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultaddr, - &caam_fops_u32_ro); - ctrlpriv->ctl_faultdetail = - debugfs_create_file("fault_detail", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultdetail, - &caam_fops_u32_ro); - ctrlpriv->ctl_faultstatus = - debugfs_create_file("fault_status", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->status, - &caam_fops_u32_ro); + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultaddr, + &caam_fops_u32_ro); + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultdetail, + &caam_fops_u32_ro); + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->status, + &caam_fops_u32_ro); /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h index cac5402a46eb..7e7bf68c9ef5 100644 --- a/drivers/crypto/caam/ctrl.h +++ b/drivers/crypto/caam/ctrl.h @@ -10,4 +10,6 @@ /* Prototypes for backend-level services exposed to APIs */ int caam_get_era(void); +extern bool caam_dpaa2; + #endif /* CTRL_H */ diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 6f44ccb55c63..3d639f3b45aa 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -9,6 +9,46 @@ #include "desc.h" #include "error.h" +#ifdef DEBUG +#include + +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{ + struct scatterlist *it; + void *it_page; + size_t len; + void *buf; + + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) { + /* + * make sure the scatterlist's page + * has a valid virtual memory mapping + */ + it_page = kmap_atomic(sg_page(it)); + if (unlikely(!it_page)) { + pr_err("caam_dump_sg: kmap failed\n"); + return; + } + + buf = it_page + it->offset; + len = min_t(size_t, tlen, it->length); + print_hex_dump(level, prefix_str, prefix_type, rowsize, + groupsize, buf, len, ascii); + tlen -= len; + + kunmap_atomic(it_page); + } +} +#else +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{} +#endif /* DEBUG */ +EXPORT_SYMBOL(caam_dump_sg); + static const struct { u8 value; const char *error_text; diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index b6350b0d9153..250e1a21c473 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -8,4 +8,8 @@ #define CAAM_ERROR_H #define CAAM_ERROR_STR_MAX 302 void caam_jr_strstatus(struct device *jrdev, u32 status); + +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii); #endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 85b6c5835b8f..a52361258d3a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -64,12 +64,9 @@ struct caam_drv_private_jr { * Driver-private storage for a single CAAM block instance */ struct caam_drv_private { - - struct device *dev; #ifdef CONFIG_CAAM_QI struct device *qidev; #endif - struct platform_device *pdev; /* Physical-presence section */ struct caam_ctrl __iomem *ctrl; /* controller region */ @@ -105,16 +102,8 @@ struct caam_drv_private { #ifdef CONFIG_DEBUG_FS struct dentry *dfs_root; struct dentry *ctl; /* controller dir */ - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req; - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes; - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes; - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus; - struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; -#ifdef CONFIG_CAAM_QI - struct dentry *qi_congested; -#endif #endif }; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1ccfb317d468..d258953ff488 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -9,6 +9,7 @@ #include #include "compat.h" +#include "ctrl.h" #include "regs.h" #include "jr.h" #include "desc.h" @@ -499,7 +500,11 @@ static int caam_jr_probe(struct platform_device *pdev) jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; if (sizeof(dma_addr_t) == sizeof(u64)) { - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) + if (caam_dpaa2) + error = dma_set_mask_and_coherent(jrdev, + DMA_BIT_MASK(49)); + else if (of_device_is_compatible(nprop, + "fsl,sec-v5.0-job-ring")) error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); else diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 1990ed460c46..e4cf00014233 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -24,9 +24,6 @@ */ #define MAX_RSP_FQ_BACKLOG_PER_CPU 256 -/* Length of a single buffer in the QI driver memory cache */ -#define CAAM_QI_MEMCACHE_SIZE 512 - #define CAAM_QI_ENQUEUE_RETRIES 10000 #define CAAM_NAPI_WEIGHT 63 @@ -55,6 +52,7 @@ struct caam_qi_pcpu_priv { } ____cacheline_aligned; static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); +static DEFINE_PER_CPU(int, last_cpu); /* * caam_qi_priv - CAAM QI backend private params @@ -203,8 +201,8 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev, goto init_req_fq_fail; } - dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, - smp_processor_id()); + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, + smp_processor_id()); return req_fq; init_req_fq_fail: @@ -277,6 +275,7 @@ static int kill_fq(struct device *qidev, struct qman_fq *fq) dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); qman_destroy_fq(fq); + kfree(fq); return ret; } @@ -342,8 +341,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) - dev_warn(qidev, "New CAAM FQ: %u kill failed\n", - new_fq->fqid); + dev_warn(qidev, "New CAAM FQ kill failed\n"); return ret; } @@ -373,10 +371,9 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) - dev_warn(qidev, "New CAAM FQ: %u kill failed\n", - new_fq->fqid); + dev_warn(qidev, "New CAAM FQ kill failed\n"); } else if (kill_fq(qidev, old_fq)) { - dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid); + dev_warn(qidev, "Old CAAM FQ kill failed\n"); } return 0; @@ -392,7 +389,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, dma_addr_t hwdesc; struct caam_drv_ctx *drv_ctx; const cpumask_t *cpus = qman_affine_cpus(); - static DEFINE_PER_CPU(int, last_cpu); num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { @@ -511,7 +507,6 @@ int caam_qi_shutdown(struct device *qidev) if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); - kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); } /* @@ -646,7 +641,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; - dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); return 0; } @@ -679,7 +674,7 @@ static int init_cgr(struct device *qidev) return ret; } - dev_info(qidev, "Congestion threshold set to %llu\n", val); + dev_dbg(qidev, "Congestion threshold set to %llu\n", val); return 0; } @@ -737,6 +732,7 @@ int caam_qi_init(struct platform_device *caam_pdev) qi_pdev = platform_device_register_full(&qi_pdev_info); if (IS_ERR(qi_pdev)) return PTR_ERR(qi_pdev); + set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); ctrlpriv = dev_get_drvdata(ctrldev); qidev = &qi_pdev->dev; @@ -795,10 +791,8 @@ int caam_qi_init(struct platform_device *caam_pdev) /* Done with the CGRs; restore the cpus allowed mask */ set_cpus_allowed_ptr(current, &old_cpumask); #ifdef CONFIG_DEBUG_FS - ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444, - ctrlpriv->ctl, - ×_congested, - &caam_fops_u64_ro); + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, + ×_congested, &caam_fops_u64_ro); #endif dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); return 0; diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index 33b0433f5f22..ecb21f207637 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h @@ -39,6 +39,9 @@ */ #define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) +/* Length of a single buffer in the QI driver memory cache */ +#define CAAM_QI_MEMCACHE_SIZE 768 + extern bool caam_congested __read_mostly; /* diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 84d2f838a063..17cfd23a38fa 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -67,6 +67,7 @@ */ extern bool caam_little_end; +extern bool caam_imx; #define caam_to_cpu(len) \ static inline u##len caam##len ## _to_cpu(u##len val) \ @@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg) #else /* CONFIG_64BIT */ static inline void wr_reg64(void __iomem *reg, u64 data) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) { + if (!caam_imx && caam_little_end) { wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); wr_reg32((u32 __iomem *)(reg), data); - } else -#endif - { + } else { wr_reg32((u32 __iomem *)(reg), data >> 32); wr_reg32((u32 __iomem *)(reg) + 1, data); } @@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data) static inline u64 rd_reg64(void __iomem *reg) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) + if (!caam_imx && caam_little_end) return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | (u64)rd_reg32((u32 __iomem *)(reg))); - else -#endif - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | - (u64)rd_reg32((u32 __iomem *)(reg) + 1)); + + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg) + 1)); } #endif /* CONFIG_64BIT */ +static inline u64 cpu_to_caam_dma64(dma_addr_t value) +{ + if (caam_imx) + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | + (u64)cpu_to_caam32(upper_32_bits(value))); + + return cpu_to_caam64(value); +} + +static inline u64 caam_dma64_to_cpu(u64 value) +{ + if (caam_imx) + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | + (u64)caam32_to_cpu(upper_32_bits(value))); + + return caam64_to_cpu(value); +} + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT -#ifdef CONFIG_SOC_IMX7D -#define cpu_to_caam_dma(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#define caam_dma_to_cpu(value) \ - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ - (u64)caam32_to_cpu(upper_32_bits(value))) -#else -#define cpu_to_caam_dma(value) cpu_to_caam64(value) -#define caam_dma_to_cpu(value) caam64_to_cpu(value) -#endif /* CONFIG_SOC_IMX7D */ +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) #else #define cpu_to_caam_dma(value) cpu_to_caam32(value) #define caam_dma_to_cpu(value) caam32_to_cpu(value) -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ - -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX -#define cpu_to_caam_dma64(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#else -#define cpu_to_caam_dma64(value) cpu_to_caam64(value) -#endif +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ /* * jr_outentry @@ -293,6 +290,7 @@ struct caam_perfmon { u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ #define CTPR_MS_QI_SHIFT 25 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_DPAA2 BIT(13) #define CTPR_MS_VIRT_EN_INCL 0x00000001 #define CTPR_MS_VIRT_EN_POR 0x00000002 #define CTPR_MS_PG_SZ_MASK 0x10 diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h new file mode 100644 index 000000000000..31b440757146 --- /dev/null +++ b/drivers/crypto/caam/sg_sw_qm2.h @@ -0,0 +1,81 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the names of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SG_SW_QM2_H_ +#define _SG_SW_QM2_H_ + +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" + +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + dpaa2_sg_set_addr(qm_sg_ptr, dma); + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single); + dpaa2_sg_set_final(qm_sg_ptr, false); + dpaa2_sg_set_len(qm_sg_ptr, len); + dpaa2_sg_set_bpid(qm_sg_ptr, 0); + dpaa2_sg_set_offset(qm_sg_ptr, offset); +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct dpaa2_sg_entry * +sg_to_qm_sg(struct scatterlist *sg, int sg_count, + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) +{ + while (sg_count && sg) { + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), + sg_dma_len(sg), offset); + qm_sg_ptr++; + sg = sg_next(sg); + sg_count--; + } + return qm_sg_ptr - 1; +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, + struct dpaa2_sg_entry *qm_sg_ptr, + u16 offset) +{ + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); + dpaa2_sg_set_final(qm_sg_ptr, true); +} + +#endif /* _SG_SW_QM2_H_ */ diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index c6adad09c972..936b1b630058 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -5,7 +5,13 @@ * */ +#ifndef _SG_SW_SEC4_H_ +#define _SG_SW_SEC4_H_ + +#include "ctrl.h" #include "regs.h" +#include "sg_sw_qm2.h" +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" struct sec4_sg_entry { u64 ptr; @@ -19,9 +25,15 @@ struct sec4_sg_entry { static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, dma_addr_t dma, u32 len, u16 offset) { - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); - sec4_sg_ptr->len = cpu_to_caam32(len); - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); + if (caam_dpaa2) { + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len, + offset); + } else { + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); + sec4_sg_ptr->len = cpu_to_caam32(len); + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & + SEC4_SG_OFFSET_MASK); + } #ifdef DEBUG print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, @@ -47,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count, return sec4_sg_ptr - 1; } +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) +{ + if (caam_dpaa2) + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true); + else + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); +} + /* * convert scatterlist to h/w link table format * scatterlist must have been previously dma mapped @@ -56,20 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, u16 offset) { sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(sec4_sg_ptr); } -static inline struct sec4_sg_entry *sg_to_sec4_sg_len( - struct scatterlist *sg, unsigned int total, - struct sec4_sg_entry *sec4_sg_ptr) -{ - do { - unsigned int len = min(sg_dma_len(sg), total); - - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); - sec4_sg_ptr++; - sg = sg_next(sg); - total -= len; - } while (total); - return sec4_sg_ptr - 1; -} +#endif /* _SG_SW_SEC4_H_ */ diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 4119c40e7c4b..34a6d8bf229e 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -268,8 +268,10 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) mcode = &cpt->mcode[cpt->next_mc_idx]; memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); mcode->code_size = ntohl(ucode->code_length) * 2; - if (!mcode->code_size) - return -EINVAL; + if (!mcode->code_size) { + ret = -EINVAL; + goto fw_release; + } mcode->is_ae = is_ae; mcode->core_mask = 0ULL; @@ -280,7 +282,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) &mcode->phys_base, GFP_KERNEL); if (!mcode->code) { dev_err(dev, "Unable to allocate space for microcode"); - return -ENOMEM; + ret = -ENOMEM; + goto fw_release; } memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)), @@ -302,12 +305,14 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) ret = do_cpt_init(cpt, mcode); if (ret) { dev_err(dev, "do_cpt_init failed with ret: %d\n", ret); - return ret; + goto fw_release; } dev_info(dev, "Microcode Loaded %s\n", mcode->version); mcode->is_mc_valid = 1; cpt->next_mc_idx++; + +fw_release: release_firmware(fw_entry); return ret; diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 9ccefb9b7232..fee7cb2ce747 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -513,8 +513,10 @@ static int nitrox_probe(struct pci_dev *pdev, pci_set_master(pdev); ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); - if (!ndev) + if (!ndev) { + err = -ENOMEM; goto ndev_fail; + } pci_set_drvdata(pdev, ndev); ndev->pdev = pdev; diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 2238f77aa248..6d626606b9c5 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -1,25 +1,33 @@ config CRYPTO_DEV_CCP_DD - tristate "Cryptographic Coprocessor device driver" - depends on CRYPTO_DEV_CCP + tristate "Secure Processor device driver" default m + help + Provides AMD Secure Processor device driver. + If you choose 'M' here, this module will be called ccp. + +config CRYPTO_DEV_SP_CCP + bool "Cryptographic Coprocessor device" + default y + depends on CRYPTO_DEV_CCP_DD select HW_RANDOM select DMA_ENGINE select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help - Provides the interface to use the AMD Cryptographic Coprocessor - which can be used to offload encryption operations such as SHA, - AES and more. If you choose 'M' here, this module will be called - ccp. + Provides the support for AMD Cryptographic Coprocessor (CCP) device + which can be used to offload encryption operations such as SHA, AES + and more. config CRYPTO_DEV_CCP_CRYPTO tristate "Encryption and hashing offload support" - depends on CRYPTO_DEV_CCP_DD default m + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_CCP select CRYPTO_HASH select CRYPTO_BLKCIPHER select CRYPTO_AUTHENC + select CRYPTO_RSA help Support for using the cryptographic API with the AMD Cryptographic Coprocessor. This module supports offload of SHA and AES algorithms. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 59493fd3a751..57f8debfcfb3 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -1,12 +1,12 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o -ccp-objs := ccp-dev.o \ +ccp-objs := sp-dev.o sp-platform.o +ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-platform.o \ ccp-dmaengine.o \ ccp-debugfs.o -ccp-$(CONFIG_PCI) += ccp-pci.o +ccp-$(CONFIG_PCI) += sp-pci.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ @@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes-xts.o \ ccp-crypto-aes-galois.o \ ccp-crypto-des3.o \ + ccp-crypto-rsa.o \ ccp-crypto-sha.o diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 38ee6f348ea9..52313524a4dd 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook * diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 58a4244b4752..94b5bcf5b628 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -1,8 +1,9 @@ /* * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * + * Author: Gary R Hook * Author: Tom Lendacky * * This program is free software; you can redistribute it and/or modify @@ -15,6 +16,7 @@ #include #include #include +#include #include #include @@ -37,46 +39,26 @@ struct ccp_unit_size_map { u32 value; }; -static struct ccp_unit_size_map unit_size_map[] = { +static struct ccp_unit_size_map xts_unit_sizes[] = { { - .size = 4096, - .value = CCP_XTS_AES_UNIT_SIZE_4096, - }, - { - .size = 2048, - .value = CCP_XTS_AES_UNIT_SIZE_2048, - }, - { - .size = 1024, - .value = CCP_XTS_AES_UNIT_SIZE_1024, - }, - { - .size = 512, - .value = CCP_XTS_AES_UNIT_SIZE_512, - }, - { - .size = 256, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 128, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 64, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 32, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 16, + .size = 16, .value = CCP_XTS_AES_UNIT_SIZE_16, }, { - .size = 1, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, + .size = 512, + .value = CCP_XTS_AES_UNIT_SIZE_512, + }, + { + .size = 1024, + .value = CCP_XTS_AES_UNIT_SIZE_1024, + }, + { + .size = 2048, + .value = CCP_XTS_AES_UNIT_SIZE_2048, + }, + { + .size = 4096, + .value = CCP_XTS_AES_UNIT_SIZE_4096, }, }; @@ -96,15 +78,26 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); + struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx(xfm); + unsigned int ccpversion = ccp_version(); + int ret; - /* Only support 128-bit AES key with a 128-bit Tweak key, - * otherwise use the fallback + ret = xts_check_key(xfm, key, key_len); + if (ret) + return ret; + + /* Version 3 devices support 128-bit keys; version 5 devices can + * accommodate 128- and 256-bit keys. */ switch (key_len) { case AES_KEYSIZE_128 * 2: memcpy(ctx->u.aes.key, key, key_len); break; + case AES_KEYSIZE_256 * 2: + if (ccpversion > CCP_VERSION(3, 0)) + memcpy(ctx->u.aes.key, key, key_len); + break; } ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); @@ -117,6 +110,8 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, { struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + unsigned int ccpversion = ccp_version(); + unsigned int fallback = 0; unsigned int unit; u32 unit_size; int ret; @@ -130,18 +125,32 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; + /* Check conditions under which the CCP can fulfill a request. The + * device can handle input plaintext of a length that is a multiple + * of the unit_size, bug the crypto implementation only supports + * the unit_size being equal to the input length. This limits the + * number of scenarios we can handle. + */ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; - if (req->nbytes <= unit_size_map[0].size) { - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { - if (!(req->nbytes & (unit_size_map[unit].size - 1))) { - unit_size = unit_size_map[unit].value; - break; - } + for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) { + if (req->nbytes == xts_unit_sizes[unit].size) { + unit_size = unit; + break; } } - - if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || - (ctx->u.aes.key_len != AES_KEYSIZE_128)) { + /* The CCP has restrictions on block sizes. Also, a version 3 device + * only supports AES-128 operations; version 5 CCPs support both + * AES-128 and -256 operations. + */ + if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) + fallback = 1; + if ((ccpversion < CCP_VERSION(5, 0)) && + (ctx->u.aes.key_len != AES_KEYSIZE_128)) + fallback = 1; + if ((ctx->u.aes.key_len != AES_KEYSIZE_128) && + (ctx->u.aes.key_len != AES_KEYSIZE_256)) + fallback = 1; + if (fallback) { SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher); /* Use the fallback to process the request for any @@ -164,6 +173,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; + rctx->cmd.u.xts.type = CCP_AES_TYPE_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; rctx->cmd.u.xts.unit_size = unit_size; diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 5af7347ae03c..ae87b741f9d5 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook * diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 8dccbddabef1..35a9de7fd475 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * @@ -17,6 +17,7 @@ #include #include #include +#include #include "ccp-crypto.h" @@ -37,10 +38,15 @@ static unsigned int des3_disable; module_param(des3_disable, uint, 0444); MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); +static unsigned int rsa_disable; +module_param(rsa_disable, uint, 0444); +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(cipher_algs); static LIST_HEAD(aead_algs); +static LIST_HEAD(akcipher_algs); /* For any tfm, requests for that tfm must be returned on the order * received. With multiple queues available, the CCP can process more @@ -358,6 +364,12 @@ static int ccp_register_algs(void) return ret; } + if (!rsa_disable) { + ret = ccp_register_rsa_algs(&akcipher_algs); + if (ret) + return ret; + } + return 0; } @@ -366,6 +378,7 @@ static void ccp_unregister_algs(void) struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; struct ccp_crypto_aead *aead_alg, *aead_tmp; + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { crypto_unregister_ahash(&ahash_alg->alg); @@ -384,6 +397,12 @@ static void ccp_unregister_algs(void) list_del(&aead_alg->entry); kfree(aead_alg); } + + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { + crypto_unregister_akcipher(&akc_alg->alg); + list_del(&akc_alg->entry); + kfree(akc_alg); + } } static int ccp_crypto_init(void) diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c new file mode 100644 index 000000000000..e6db8672d89c --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -0,0 +1,299 @@ +/* + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static inline struct akcipher_request *akcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct akcipher_request, base); +} + +static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen, + const u8 *buf, size_t sz) +{ + int nskip; + + for (nskip = 0; nskip < sz; nskip++) + if (buf[nskip]) + break; + *kplen = sz - nskip; + *kpbuf = kzalloc(*kplen, GFP_KERNEL); + if (!*kpbuf) + return -ENOMEM; + memcpy(*kpbuf, buf + nskip, *kplen); + + return 0; +} + +static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = akcipher_request_cast(async_req); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + req->dst_len = rctx->cmd.u.rsa.key_size >> 3; + + return 0; +} + +static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) +{ + if (ccp_version() > CCP_VERSION(3, 0)) + return CCP5_RSA_MAXMOD; + else + return CCP_RSA_MAXMOD; +} + +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + int ret = 0; + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_RSA; + + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */ + if (encrypt) { + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg; + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len; + } else { + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg; + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len; + } + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg; + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len; + rctx->cmd.u.rsa.src = req->src; + rctx->cmd.u.rsa.src_len = req->src_len; + rctx->cmd.u.rsa.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_rsa_encrypt(struct akcipher_request *req) +{ + return ccp_rsa_crypt(req, true); +} + +static int ccp_rsa_decrypt(struct akcipher_request *req) +{ + return ccp_rsa_crypt(req, false); +} + +static int ccp_check_key_length(unsigned int len) +{ + /* In bits */ + if (len < 8 || len > 4096) + return -EINVAL; + return 0; +} + +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) +{ + /* Clean up old key data */ + kzfree(ctx->u.rsa.e_buf); + ctx->u.rsa.e_buf = NULL; + ctx->u.rsa.e_len = 0; + kzfree(ctx->u.rsa.n_buf); + ctx->u.rsa.n_buf = NULL; + ctx->u.rsa.n_len = 0; + kzfree(ctx->u.rsa.d_buf); + ctx->u.rsa.d_buf = NULL; + ctx->u.rsa.d_len = 0; +} + +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen, bool private) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key; + int ret; + + ccp_rsa_free_key_bufs(ctx); + memset(&raw_key, 0, sizeof(raw_key)); + + /* Code borrowed from crypto/rsa.c */ + if (private) + ret = rsa_parse_priv_key(&raw_key, key, keylen); + else + ret = rsa_parse_pub_key(&raw_key, key, keylen); + if (ret) + goto n_key; + + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len, + raw_key.n, raw_key.n_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len); + + ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */ + if (ccp_check_key_length(ctx->u.rsa.key_len)) { + ret = -EINVAL; + goto key_err; + } + + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len, + raw_key.e, raw_key.e_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len); + + if (private) { + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf, + &ctx->u.rsa.d_len, + raw_key.d, raw_key.d_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.d_sg, + ctx->u.rsa.d_buf, ctx->u.rsa.d_len); + } + + return 0; + +key_err: + ccp_rsa_free_key_bufs(ctx); + +n_key: + return ret; +} + +static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + return ccp_rsa_setkey(tfm, key, keylen, true); +} + +static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + return ccp_rsa_setkey(tfm, key, keylen, false); +} + +static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx)); + ctx->complete = ccp_rsa_complete; + + return 0; +} + +static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + ccp_rsa_free_key_bufs(ctx); +} + +static struct akcipher_alg ccp_rsa_defaults = { + .encrypt = ccp_rsa_encrypt, + .decrypt = ccp_rsa_decrypt, + .sign = ccp_rsa_decrypt, + .verify = ccp_rsa_encrypt, + .set_pub_key = ccp_rsa_setpubkey, + .set_priv_key = ccp_rsa_setprivkey, + .max_size = ccp_rsa_maxsize, + .init = ccp_rsa_init_tfm, + .exit = ccp_rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-ccp", + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = 2 * sizeof(struct ccp_ctx), + }, +}; + +struct ccp_rsa_def { + unsigned int version; + const char *name; + const char *driver_name; + unsigned int reqsize; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_rsa_def rsa_algs[] = { + { + .version = CCP_VERSION(3, 0), + .name = "rsa", + .driver_name = "rsa-ccp", + .reqsize = sizeof(struct ccp_rsa_req_ctx), + .alg_defaults = &ccp_rsa_defaults, + } +}; + +int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_rsa_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + /* Register the RSA algorithm in standard mode + * This works for CCP v3 and later + */ + for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) { + if (rsa_algs[i].version > ccpversion) + continue; + ret = ccp_register_rsa_alg(head, &rsa_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index ce97b3868f4a..8b9b16d433f7 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index dd5bf15f06e5..b9fd090c46c2 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * @@ -24,6 +24,8 @@ #include #include #include +#include +#include #define CCP_LOG_LEVEL KERN_INFO @@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg { struct ahash_alg alg; }; +struct ccp_crypto_akcipher_alg { + struct list_head entry; + + struct akcipher_alg alg; +}; + static inline struct ccp_crypto_ablkcipher_alg * ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm) { @@ -91,7 +99,7 @@ struct ccp_aes_ctx { struct scatterlist key_sg; unsigned int key_len; - u8 key[AES_MAX_KEY_SIZE]; + u8 key[AES_MAX_KEY_SIZE * 2]; u8 nonce[CTR_RFC3686_NONCE_SIZE]; @@ -227,12 +235,35 @@ struct ccp_sha_exp_ctx { u8 buf[MAX_SHA_BLOCK_SIZE]; }; +/***** RSA related defines *****/ + +struct ccp_rsa_ctx { + unsigned int key_len; /* in bits */ + struct scatterlist e_sg; + u8 *e_buf; + unsigned int e_len; + struct scatterlist n_sg; + u8 *n_buf; + unsigned int n_len; + struct scatterlist d_sg; + u8 *d_buf; + unsigned int d_len; +}; + +struct ccp_rsa_req_ctx { + struct ccp_cmd cmd; +}; + +#define CCP_RSA_MAXMOD (4 * 1024 / 8) +#define CCP5_RSA_MAXMOD (16 * 1024 / 8) + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); union { struct ccp_aes_ctx aes; + struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; } u; @@ -249,5 +280,6 @@ int ccp_register_aes_xts_algs(struct list_head *head); int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); +int ccp_register_rsa_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 3cd6c83754e0..59d4ca4e72d8 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); if (!ccp->debugfs_instance) - return; + goto err; debugfs_info = debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp, &ccp_debugfs_info_ops); if (!debugfs_info) - return; + goto err; debugfs_stats = debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp, &ccp_debugfs_stats_ops); if (!debugfs_stats) - return; + goto err; for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -327,15 +327,20 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) debugfs_q_instance = debugfs_create_dir(name, ccp->debugfs_instance); if (!debugfs_q_instance) - return; + goto err; debugfs_q_stats = debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q, &ccp_debugfs_queue_ops); if (!debugfs_q_stats) - return; + goto err; } + + return; + +err: + debugfs_remove_recursive(ccp->debugfs_instance); } void ccp5_debugfs_destroy(void) diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 367c2e30656f..240bebbcb8ac 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data) static irqreturn_t ccp_irq_handler(int irq, void *data) { - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_device *ccp = (struct ccp_device *)data; ccp_disable_queue_interrupts(ccp); if (ccp->use_tasklet) @@ -454,7 +453,7 @@ static int ccp_init(struct ccp_device *ccp) iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ - ret = ccp->get_irq(ccp); + ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; @@ -511,7 +510,7 @@ static int ccp_init(struct ccp_device *ccp) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) @@ -550,7 +549,7 @@ static void ccp_destroy(struct ccp_device *ccp) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); @@ -586,10 +585,17 @@ static const struct ccp_actions ccp3_actions = { .irqhandler = ccp_irq_handler, }; +const struct ccp_vdata ccpv3_platform = { + .version = CCP_VERSION(3, 0), + .setup = NULL, + .perform = &ccp3_actions, + .offset = 0, +}; + const struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), .setup = NULL, .perform = &ccp3_actions, - .bar = 2, .offset = 0x20000, + .rsamax = CCP_RSA_MAX_WIDTH, }; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index b10d2d2075cb..65604fc65e8f 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook * @@ -145,6 +145,7 @@ union ccp_function { #define CCP_AES_MODE(p) ((p)->aes.mode) #define CCP_AES_TYPE(p) ((p)->aes.type) #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) +#define CCP_XTS_TYPE(p) ((p)->aes_xts.type) #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) #define CCP_DES3_SIZE(p) ((p)->des3.size) #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) @@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op) CCP5_CMD_PROT(&desc) = 0; function.raw = 0; + CCP_XTS_TYPE(&function) = op->u.xts.type; CCP_XTS_ENCRYPT(&function) = op->u.xts.action; CCP_XTS_SIZE(&function) = op->u.xts.unit_size; CCP5_CMD_FUNCTION(&desc) = function.raw; @@ -469,7 +471,7 @@ static int ccp5_perform_rsa(struct ccp_op *op) CCP5_CMD_PROT(&desc) = 0; function.raw = 0; - CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3; + CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; @@ -484,10 +486,10 @@ static int ccp5_perform_rsa(struct ccp_op *op) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - /* Exponent is in LSB memory */ - CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE; - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + /* Key (Exponent) is in external memory */ + CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); + CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; return ccp5_do_cmd(&desc, op->cmd_q); } @@ -769,8 +771,7 @@ static void ccp5_irq_bh(unsigned long data) static irqreturn_t ccp5_irq_handler(int irq, void *data) { - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_device *ccp = (struct ccp_device *)data; ccp5_disable_queue_interrupts(ccp); ccp->total_interrupts++; @@ -881,7 +882,7 @@ static int ccp5_init(struct ccp_device *ccp) dev_dbg(dev, "Requesting an IRQ...\n"); /* Request an irq */ - ret = ccp->get_irq(ccp); + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; @@ -987,7 +988,7 @@ static int ccp5_init(struct ccp_device *ccp) kthread_stop(ccp->cmd_q[i].kthread); e_irq: - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) @@ -1037,7 +1038,7 @@ static void ccp5_destroy(struct ccp_device *ccp) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -1106,15 +1107,14 @@ static const struct ccp_actions ccp5_actions = { .init = ccp5_init, .destroy = ccp5_destroy, .get_free_slots = ccp5_get_free_slots, - .irqhandler = ccp5_irq_handler, }; const struct ccp_vdata ccpv5a = { .version = CCP_VERSION(5, 0), .setup = ccp5_config, .perform = &ccp5_actions, - .bar = 2, .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv5b = { @@ -1122,6 +1122,6 @@ const struct ccp_vdata ccpv5b = { .dma_chan_attr = DMA_PRIVATE, .setup = ccp5other_config, .perform = &ccp5_actions, - .bar = 2, .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, }; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 2506b5025700..4e029b176641 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include #include #include #include @@ -30,12 +29,6 @@ #include "ccp-dev.h" -MODULE_AUTHOR("Tom Lendacky "); -MODULE_AUTHOR("Gary R Hook "); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.1.0"); -MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); - struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; @@ -111,13 +104,6 @@ static LIST_HEAD(ccp_units); static DEFINE_SPINLOCK(ccp_rr_lock); static struct ccp_device *ccp_rr; -/* Ever-increasing value to produce unique unit numbers */ -static atomic_t ccp_unit_ordinal; -static unsigned int ccp_increment_unit_ordinal(void) -{ - return atomic_inc_return(&ccp_unit_ordinal); -} - /** * ccp_add_device - add a CCP device to the list * @@ -415,6 +401,7 @@ static void ccp_do_cmd_complete(unsigned long data) struct ccp_cmd *cmd = tdata->cmd; cmd->callback(cmd->data, cmd->ret); + complete(&tdata->completion); } @@ -464,14 +451,17 @@ int ccp_cmd_queue_thread(void *data) * * @dev: device struct of the CCP */ -struct ccp_device *ccp_alloc_struct(struct device *dev) +struct ccp_device *ccp_alloc_struct(struct sp_device *sp) { + struct device *dev = sp->dev; struct ccp_device *ccp; ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); if (!ccp) return NULL; ccp->dev = dev; + ccp->sp = sp; + ccp->axcache = sp->axcache; INIT_LIST_HEAD(&ccp->cmd); INIT_LIST_HEAD(&ccp->backlog); @@ -486,9 +476,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); - ccp->ord = ccp_increment_unit_ordinal(); - snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); - snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); + snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); + snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); return ccp; } @@ -538,55 +527,100 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -#endif -static int __init ccp_mod_init(void) +int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) { -#ifdef CONFIG_X86 - int ret; + struct ccp_device *ccp = sp->ccp_data; + unsigned long flags; + unsigned int i; - ret = ccp_pci_init(); - if (ret) - return ret; + spin_lock_irqsave(&ccp->cmd_lock, flags); - /* Don't leave the driver loaded if init failed */ - if (ccp_present() != 0) { - ccp_pci_exit(); - return -ENODEV; - } + ccp->suspending = 1; + + /* Wake all the queue kthreads to prepare for suspend */ + for (i = 0; i < ccp->cmd_q_count; i++) + wake_up_process(ccp->cmd_q[i].kthread); + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* Wait for all queue kthreads to say they're done */ + while (!ccp_queues_suspended(ccp)) + wait_event_interruptible(ccp->suspend_queue, + ccp_queues_suspended(ccp)); return 0; -#endif +} -#ifdef CONFIG_ARM64 - int ret; +int ccp_dev_resume(struct sp_device *sp) +{ + struct ccp_device *ccp = sp->ccp_data; + unsigned long flags; + unsigned int i; - ret = ccp_platform_init(); - if (ret) - return ret; + spin_lock_irqsave(&ccp->cmd_lock, flags); - /* Don't leave the driver loaded if init failed */ - if (ccp_present() != 0) { - ccp_platform_exit(); - return -ENODEV; + ccp->suspending = 0; + + /* Wake up all the kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].suspended = 0; + wake_up_process(ccp->cmd_q[i].kthread); } + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + return 0; +} #endif - return -ENODEV; -} - -static void __exit ccp_mod_exit(void) +int ccp_dev_init(struct sp_device *sp) { -#ifdef CONFIG_X86 - ccp_pci_exit(); -#endif + struct device *dev = sp->dev; + struct ccp_device *ccp; + int ret; -#ifdef CONFIG_ARM64 - ccp_platform_exit(); -#endif + ret = -ENOMEM; + ccp = ccp_alloc_struct(sp); + if (!ccp) + goto e_err; + sp->ccp_data = ccp; + + ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; + if (!ccp->vdata || !ccp->vdata->version) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ccp->use_tasklet = sp->use_tasklet; + + ccp->io_regs = sp->io_map + ccp->vdata->offset; + if (ccp->vdata->setup) + ccp->vdata->setup(ccp); + + ret = ccp->vdata->perform->init(ccp); + if (ret) + goto e_err; + + dev_notice(dev, "ccp enabled\n"); + + return 0; + +e_err: + sp->ccp_data = NULL; + + dev_notice(dev, "ccp initialization failed\n"); + + return ret; } -module_init(ccp_mod_init); -module_exit(ccp_mod_exit); +void ccp_dev_destroy(struct sp_device *sp) +{ + struct ccp_device *ccp = sp->ccp_data; + + if (!ccp) + return; + + ccp->vdata->perform->destroy(ccp); +} diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index a70154ac7405..6810b65c1939 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -27,6 +27,8 @@ #include #include +#include "sp-dev.h" + #define MAX_CCP_NAME_LEN 16 #define MAX_DMAPOOL_NAME_LEN 32 @@ -192,6 +194,7 @@ #define CCP_AES_CTX_SB_COUNT 1 #define CCP_XTS_AES_KEY_SB_COUNT 1 +#define CCP5_XTS_AES_KEY_SB_COUNT 2 #define CCP_XTS_AES_CTX_SB_COUNT 1 #define CCP_DES3_KEY_SB_COUNT 1 @@ -200,6 +203,7 @@ #define CCP_SHA_SB_COUNT 1 #define CCP_RSA_MAX_WIDTH 4096 +#define CCP5_RSA_MAX_WIDTH 16384 #define CCP_PASSTHRU_BLOCKSIZE 256 #define CCP_PASSTHRU_MASKSIZE 32 @@ -344,12 +348,11 @@ struct ccp_device { char rngname[MAX_CCP_NAME_LEN]; struct device *dev; + struct sp_device *sp; /* Bus specific device information */ void *dev_specific; - int (*get_irq)(struct ccp_device *ccp); - void (*free_irq)(struct ccp_device *ccp); unsigned int qim; unsigned int irq; bool use_tasklet; @@ -362,7 +365,6 @@ struct ccp_device { * them. */ struct mutex req_mutex ____cacheline_aligned; - void __iomem *io_map; void __iomem *io_regs; /* Master lists that all cmds are queued on. Because there can be @@ -497,6 +499,7 @@ struct ccp_aes_op { }; struct ccp_xts_aes_op { + enum ccp_aes_type type; enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; }; @@ -626,18 +629,12 @@ struct ccp5_desc { struct dword7 dw7; }; -int ccp_pci_init(void); -void ccp_pci_exit(void); - -int ccp_platform_init(void); -void ccp_platform_exit(void); - void ccp_add_device(struct ccp_device *ccp); void ccp_del_device(struct ccp_device *ccp); extern void ccp_log_error(struct ccp_device *, int); -struct ccp_device *ccp_alloc_struct(struct device *dev); +struct ccp_device *ccp_alloc_struct(struct sp_device *sp); bool ccp_queues_suspended(struct ccp_device *ccp); int ccp_cmd_queue_thread(void *data); int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); @@ -669,16 +666,7 @@ struct ccp_actions { irqreturn_t (*irqhandler)(int, void *); }; -/* Structure to hold CCP version-specific values */ -struct ccp_vdata { - const unsigned int version; - const unsigned int dma_chan_attr; - void (*setup)(struct ccp_device *); - const struct ccp_actions *perform; - const unsigned int bar; - const unsigned int offset; -}; - +extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index e00be01fbf5a..d608043c0280 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook * @@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( return &desc->tx_desc; } -static struct dma_async_tx_descriptor *ccp_prep_dma_sg( - struct dma_chan *dma_chan, struct scatterlist *dst_sg, - unsigned int dst_nents, struct scatterlist *src_sg, - unsigned int src_nents, unsigned long flags) -{ - struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, - dma_chan); - struct ccp_dma_desc *desc; - - dev_dbg(chan->ccp->dev, - "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n", - __func__, src_sg, src_nents, dst_sg, dst_nents, flags); - - desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents, - flags); - if (!desc) - return NULL; - - return &desc->tx_desc; -} - static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( struct dma_chan *dma_chan, unsigned long flags) { @@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) dma_dev->directions = DMA_MEM_TO_MEM; dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); - dma_cap_set(DMA_SG, dma_dev->cap_mask); dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); /* The DMA channels for this device can be set to public or private, @@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) dma_dev->device_free_chan_resources = ccp_free_chan_resources; dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; - dma_dev->device_prep_dma_sg = ccp_prep_dma_sg; dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; dma_dev->device_issue_pending = ccp_issue_pending; dma_dev->device_tx_status = ccp_tx_status; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c0dfdacbdff5..406b95329b3d 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -168,7 +168,7 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); - if (!wa->dma.address) + if (dma_mapping_error(wa->dev, wa->dma.address)) return -ENOMEM; wa->dma.length = len; @@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_op op; unsigned int unit_size, dm_offset; bool in_place = false; + unsigned int sb_count; + enum ccp_aes_type aestype; int ret; switch (xts->unit_size) { @@ -1061,7 +1063,11 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, return -EINVAL; } - if (xts->key_len != AES_KEYSIZE_128) + if (xts->key_len == AES_KEYSIZE_128) + aestype = CCP_AES_TYPE_128; + else if (xts->key_len == AES_KEYSIZE_256) + aestype = CCP_AES_TYPE_256; + else return -EINVAL; if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) @@ -1083,23 +1089,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; + op.u.xts.type = aestype; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; - /* All supported key sizes fit in a single (32-byte) SB entry - * and must be in little endian format. Use the 256-bit byte - * swap passthru option to convert from big endian to little - * endian. + /* A version 3 device only supports 128-bit keys, which fits into a + * single SB entry. A version 5 device uses a 512-bit vector, so two + * SB entries. */ + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) + sb_count = CCP_XTS_AES_KEY_SB_COUNT; + else + sb_count = CCP5_XTS_AES_KEY_SB_COUNT; ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES, + sb_count * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; - dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; - ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { + /* All supported key sizes must be in little endian format. + * Use the 256-bit byte swap passthru option to convert from + * big endian to little endian. + */ + dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; + ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); + ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + } else { + /* Version 5 CCPs use a 512-bit space for the key: each portion + * occupies 256 bits, or one entire slot, and is zero-padded. + */ + unsigned int pad; + + dm_offset = CCP_SB_BYTES; + pad = dm_offset - xts->key_len; + ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); + ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, + xts->key_len); + } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1731,42 +1758,53 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; - struct ccp_dm_workarea exp, src; - struct ccp_data dst; + struct ccp_dm_workarea exp, src, dst; struct ccp_op op; unsigned int sb_count, i_len, o_len; int ret; - if (rsa->key_size > CCP_RSA_MAX_WIDTH) + /* Check against the maximum allowable size, in bits */ + if (rsa->key_size > cmd_q->ccp->vdata->rsamax) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which - * must be a multiple of 256-bits). + * must be a multiple of 256-bits). Compute o_len, i_len in bytes. + * Buffer sizes must be a multiple of 32 bytes; rounding up may be + * required. */ - o_len = ((rsa->key_size + 255) / 256) * 32; + o_len = 32 * ((rsa->key_size + 255) / 256); i_len = o_len * 2; - sb_count = o_len / CCP_SB_BYTES; + sb_count = 0; + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { + /* sb_count is the number of storage block slots required + * for the modulus. + */ + sb_count = o_len / CCP_SB_BYTES; + op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, + sb_count); + if (!op.sb_key) + return -EIO; + } else { + /* A version 5 device allows a modulus size that will not fit + * in the LSB, so the command will transfer it from memory. + * Set the sb key to the default, even though it's not used. + */ + op.sb_key = cmd_q->sb_key; + } - memset(&op, 0, sizeof(op)); - op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); - - if (!op.sb_key) - return -EIO; - - /* The RSA exponent may span multiple (32-byte) SB entries and must - * be in little endian format. Reverse copy each 32-byte chunk - * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) - * and each byte within that chunk and do not perform any byte swap - * operations on the passthru operation. + /* The RSA exponent must be in little endian format. Reverse its + * byte order. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) @@ -1775,11 +1813,22 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); if (ret) goto e_exp; - ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, - CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_exp; + + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { + /* Copy the exponent to the local storage block, using + * as many 32-byte blocks as were allocated above. It's + * already little endian, so no further change is required. + */ + ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_exp; + } + } else { + /* The exponent can be retrieved from memory via DMA. */ + op.exp.u.dma.address = exp.dma.address; + op.exp.u.dma.offset = 0; } /* Concatenate the modulus and the message. Both the modulus and @@ -1798,8 +1847,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; /* Prepare the output area for the operation */ - ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len, - o_len, DMA_FROM_DEVICE); + ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; @@ -1807,7 +1855,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; - op.dst.u.dma.address = dst.dm_wa.dma.address; + op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; @@ -1820,10 +1868,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len); + ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); e_dst: - ccp_free_data(&dst, cmd_q); + ccp_dm_free(&dst); e_src: ccp_dm_free(&src); @@ -1832,7 +1880,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ccp_dm_free(&exp); e_sb: - cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); + if (sb_count) + cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; } @@ -1992,7 +2041,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c deleted file mode 100644 index e880d4cf4ada..000000000000 --- a/drivers/crypto/ccp/ccp-pci.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * AMD Cryptographic Coprocessor (CCP) driver - * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. - * - * Author: Tom Lendacky - * Author: Gary R Hook - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ccp-dev.h" - -#define MSIX_VECTORS 2 - -struct ccp_msix { - u32 vector; - char name[16]; -}; - -struct ccp_pci { - int msix_count; - struct ccp_msix msix[MSIX_VECTORS]; -}; - -static int ccp_get_msix_irqs(struct ccp_device *ccp) -{ - struct ccp_pci *ccp_pci = ccp->dev_specific; - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - struct msix_entry msix_entry[MSIX_VECTORS]; - unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; - int v, ret; - - for (v = 0; v < ARRAY_SIZE(msix_entry); v++) - msix_entry[v].entry = v; - - ret = pci_enable_msix_range(pdev, msix_entry, 1, v); - if (ret < 0) - return ret; - - ccp_pci->msix_count = ret; - for (v = 0; v < ccp_pci->msix_count; v++) { - /* Set the interrupt names and request the irqs */ - snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", - ccp->name, v); - ccp_pci->msix[v].vector = msix_entry[v].vector; - ret = request_irq(ccp_pci->msix[v].vector, - ccp->vdata->perform->irqhandler, - 0, ccp_pci->msix[v].name, dev); - if (ret) { - dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", - ret); - goto e_irq; - } - } - ccp->use_tasklet = true; - - return 0; - -e_irq: - while (v--) - free_irq(ccp_pci->msix[v].vector, dev); - - pci_disable_msix(pdev); - - ccp_pci->msix_count = 0; - - return ret; -} - -static int ccp_get_msi_irq(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - ret = pci_enable_msi(pdev); - if (ret) - return ret; - - ccp->irq = pdev->irq; - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, - ccp->name, dev); - if (ret) { - dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); - goto e_msi; - } - ccp->use_tasklet = true; - - return 0; - -e_msi: - pci_disable_msi(pdev); - - return ret; -} - -static int ccp_get_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - int ret; - - ret = ccp_get_msix_irqs(ccp); - if (!ret) - return 0; - - /* Couldn't get MSI-X vectors, try MSI */ - dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); - ret = ccp_get_msi_irq(ccp); - if (!ret) - return 0; - - /* Couldn't get MSI interrupt */ - dev_notice(dev, "could not enable MSI (%d)\n", ret); - - return ret; -} - -static void ccp_free_irqs(struct ccp_device *ccp) -{ - struct ccp_pci *ccp_pci = ccp->dev_specific; - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - - if (ccp_pci->msix_count) { - while (ccp_pci->msix_count--) - free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, - dev); - pci_disable_msix(pdev); - } else if (ccp->irq) { - free_irq(ccp->irq, dev); - pci_disable_msi(pdev); - } - ccp->irq = 0; -} - -static int ccp_find_mmio_area(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - resource_size_t io_len; - unsigned long io_flags; - - io_flags = pci_resource_flags(pdev, ccp->vdata->bar); - io_len = pci_resource_len(pdev, ccp->vdata->bar); - if ((io_flags & IORESOURCE_MEM) && - (io_len >= (ccp->vdata->offset + 0x800))) - return ccp->vdata->bar; - - return -EIO; -} - -static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct ccp_device *ccp; - struct ccp_pci *ccp_pci; - struct device *dev = &pdev->dev; - unsigned int bar; - int ret; - - ret = -ENOMEM; - ccp = ccp_alloc_struct(dev); - if (!ccp) - goto e_err; - - ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); - if (!ccp_pci) - goto e_err; - - ccp->dev_specific = ccp_pci; - ccp->vdata = (struct ccp_vdata *)id->driver_data; - if (!ccp->vdata || !ccp->vdata->version) { - ret = -ENODEV; - dev_err(dev, "missing driver data\n"); - goto e_err; - } - ccp->get_irq = ccp_get_irqs; - ccp->free_irq = ccp_free_irqs; - - ret = pci_request_regions(pdev, "ccp"); - if (ret) { - dev_err(dev, "pci_request_regions failed (%d)\n", ret); - goto e_err; - } - - ret = pci_enable_device(pdev); - if (ret) { - dev_err(dev, "pci_enable_device failed (%d)\n", ret); - goto e_regions; - } - - pci_set_master(pdev); - - ret = ccp_find_mmio_area(ccp); - if (ret < 0) - goto e_device; - bar = ret; - - ret = -EIO; - ccp->io_map = pci_iomap(pdev, bar, 0); - if (!ccp->io_map) { - dev_err(dev, "pci_iomap failed\n"); - goto e_device; - } - ccp->io_regs = ccp->io_map + ccp->vdata->offset; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); - if (ret) { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", - ret); - goto e_iomap; - } - } - - dev_set_drvdata(dev, ccp); - - if (ccp->vdata->setup) - ccp->vdata->setup(ccp); - - ret = ccp->vdata->perform->init(ccp); - if (ret) - goto e_iomap; - - dev_notice(dev, "enabled\n"); - - return 0; - -e_iomap: - pci_iounmap(pdev, ccp->io_map); - -e_device: - pci_disable_device(pdev); - -e_regions: - pci_release_regions(pdev); - -e_err: - dev_notice(dev, "initialization failed\n"); - return ret; -} - -static void ccp_pci_remove(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - - if (!ccp) - return; - - ccp->vdata->perform->destroy(ccp); - - pci_iounmap(pdev, ccp->io_map); - - pci_disable_device(pdev); - - pci_release_regions(pdev); - - dev_notice(dev, "disabled\n"); -} - -#ifdef CONFIG_PM -static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 1; - - /* Wake all the queue kthreads to prepare for suspend */ - for (i = 0; i < ccp->cmd_q_count; i++) - wake_up_process(ccp->cmd_q[i].kthread); - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - /* Wait for all queue kthreads to say they're done */ - while (!ccp_queues_suspended(ccp)) - wait_event_interruptible(ccp->suspend_queue, - ccp_queues_suspended(ccp)); - - return 0; -} - -static int ccp_pci_resume(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 0; - - /* Wake up all the kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) { - ccp->cmd_q[i].suspended = 0; - wake_up_process(ccp->cmd_q[i].kthread); - } - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; -} -#endif - -static const struct pci_device_id ccp_pci_table[] = { - { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, - { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, - { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, - /* Last entry must be zero */ - { 0, } -}; -MODULE_DEVICE_TABLE(pci, ccp_pci_table); - -static struct pci_driver ccp_pci_driver = { - .name = "ccp", - .id_table = ccp_pci_table, - .probe = ccp_pci_probe, - .remove = ccp_pci_remove, -#ifdef CONFIG_PM - .suspend = ccp_pci_suspend, - .resume = ccp_pci_resume, -#endif -}; - -int ccp_pci_init(void) -{ - return pci_register_driver(&ccp_pci_driver); -} - -void ccp_pci_exit(void) -{ - pci_unregister_driver(&ccp_pci_driver); -} diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c deleted file mode 100644 index e26969e601ad..000000000000 --- a/drivers/crypto/ccp/ccp-platform.c +++ /dev/null @@ -1,293 +0,0 @@ -/* - * AMD Cryptographic Coprocessor (CCP) driver - * - * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. - * - * Author: Tom Lendacky - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ccp-dev.h" - -struct ccp_platform { - int coherent; -}; - -static const struct acpi_device_id ccp_acpi_match[]; -static const struct of_device_id ccp_of_match[]; - -static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) -{ -#ifdef CONFIG_OF - const struct of_device_id *match; - - match = of_match_node(ccp_of_match, pdev->dev.of_node); - if (match && match->data) - return (struct ccp_vdata *)match->data; -#endif - return NULL; -} - -static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) -{ -#ifdef CONFIG_ACPI - const struct acpi_device_id *match; - - match = acpi_match_device(ccp_acpi_match, &pdev->dev); - if (match && match->driver_data) - return (struct ccp_vdata *)match->driver_data; -#endif - return NULL; -} - -static int ccp_get_irq(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct platform_device *pdev = to_platform_device(dev); - int ret; - - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - ccp->irq = ret; - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, - ccp->name, dev); - if (ret) { - dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); - return ret; - } - - return 0; -} - -static int ccp_get_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - int ret; - - ret = ccp_get_irq(ccp); - if (!ret) - return 0; - - /* Couldn't get an interrupt */ - dev_notice(dev, "could not enable interrupts (%d)\n", ret); - - return ret; -} - -static void ccp_free_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - - free_irq(ccp->irq, dev); -} - -static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *ior; - - ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (ior && (resource_size(ior) >= 0x800)) - return ior; - - return NULL; -} - -static int ccp_platform_probe(struct platform_device *pdev) -{ - struct ccp_device *ccp; - struct ccp_platform *ccp_platform; - struct device *dev = &pdev->dev; - enum dev_dma_attr attr; - struct resource *ior; - int ret; - - ret = -ENOMEM; - ccp = ccp_alloc_struct(dev); - if (!ccp) - goto e_err; - - ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL); - if (!ccp_platform) - goto e_err; - - ccp->dev_specific = ccp_platform; - ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) - : ccp_get_acpi_version(pdev); - if (!ccp->vdata || !ccp->vdata->version) { - ret = -ENODEV; - dev_err(dev, "missing driver data\n"); - goto e_err; - } - ccp->get_irq = ccp_get_irqs; - ccp->free_irq = ccp_free_irqs; - - ior = ccp_find_mmio_area(ccp); - ccp->io_map = devm_ioremap_resource(dev, ior); - if (IS_ERR(ccp->io_map)) { - ret = PTR_ERR(ccp->io_map); - goto e_err; - } - ccp->io_regs = ccp->io_map; - - attr = device_get_dma_attr(dev); - if (attr == DEV_DMA_NOT_SUPPORTED) { - dev_err(dev, "DMA is not supported"); - goto e_err; - } - - ccp_platform->coherent = (attr == DEV_DMA_COHERENT); - if (ccp_platform->coherent) - ccp->axcache = CACHE_WB_NO_ALLOC; - else - ccp->axcache = CACHE_NONE; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); - goto e_err; - } - - dev_set_drvdata(dev, ccp); - - ret = ccp->vdata->perform->init(ccp); - if (ret) - goto e_err; - - dev_notice(dev, "enabled\n"); - - return 0; - -e_err: - dev_notice(dev, "initialization failed\n"); - return ret; -} - -static int ccp_platform_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - - ccp->vdata->perform->destroy(ccp); - - dev_notice(dev, "disabled\n"); - - return 0; -} - -#ifdef CONFIG_PM -static int ccp_platform_suspend(struct platform_device *pdev, - pm_message_t state) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 1; - - /* Wake all the queue kthreads to prepare for suspend */ - for (i = 0; i < ccp->cmd_q_count; i++) - wake_up_process(ccp->cmd_q[i].kthread); - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - /* Wait for all queue kthreads to say they're done */ - while (!ccp_queues_suspended(ccp)) - wait_event_interruptible(ccp->suspend_queue, - ccp_queues_suspended(ccp)); - - return 0; -} - -static int ccp_platform_resume(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 0; - - /* Wake up all the kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) { - ccp->cmd_q[i].suspended = 0; - wake_up_process(ccp->cmd_q[i].kthread); - } - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; -} -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id ccp_acpi_match[] = { - { "AMDI0C00", (kernel_ulong_t)&ccpv3 }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); -#endif - -#ifdef CONFIG_OF -static const struct of_device_id ccp_of_match[] = { - { .compatible = "amd,ccp-seattle-v1a", - .data = (const void *)&ccpv3 }, - { }, -}; -MODULE_DEVICE_TABLE(of, ccp_of_match); -#endif - -static struct platform_driver ccp_platform_driver = { - .driver = { - .name = "ccp", -#ifdef CONFIG_ACPI - .acpi_match_table = ccp_acpi_match, -#endif -#ifdef CONFIG_OF - .of_match_table = ccp_of_match, -#endif - }, - .probe = ccp_platform_probe, - .remove = ccp_platform_remove, -#ifdef CONFIG_PM - .suspend = ccp_platform_suspend, - .resume = ccp_platform_resume, -#endif -}; - -int ccp_platform_init(void) -{ - return platform_driver_register(&ccp_platform_driver); -} - -void ccp_platform_exit(void) -{ - platform_driver_unregister(&ccp_platform_driver); -} diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c new file mode 100644 index 000000000000..bef387c8abfd --- /dev/null +++ b/drivers/crypto/ccp/sp-dev.c @@ -0,0 +1,277 @@ +/* + * AMD Secure Processor driver + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * Author: Gary R Hook + * Author: Brijesh Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-dev.h" +#include "sp-dev.h" + +MODULE_AUTHOR("Tom Lendacky "); +MODULE_AUTHOR("Gary R Hook "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1.0"); +MODULE_DESCRIPTION("AMD Secure Processor driver"); + +/* List of SPs, SP count, read-write access lock, and access functions + * + * Lock structure: get sp_unit_lock for reading whenever we need to + * examine the SP list. + */ +static DEFINE_RWLOCK(sp_unit_lock); +static LIST_HEAD(sp_units); + +/* Ever-increasing value to produce unique unit numbers */ +static atomic_t sp_ordinal; + +static void sp_add_device(struct sp_device *sp) +{ + unsigned long flags; + + write_lock_irqsave(&sp_unit_lock, flags); + + list_add_tail(&sp->entry, &sp_units); + + write_unlock_irqrestore(&sp_unit_lock, flags); +} + +static void sp_del_device(struct sp_device *sp) +{ + unsigned long flags; + + write_lock_irqsave(&sp_unit_lock, flags); + + list_del(&sp->entry); + + write_unlock_irqrestore(&sp_unit_lock, flags); +} + +static irqreturn_t sp_irq_handler(int irq, void *data) +{ + struct sp_device *sp = data; + + if (sp->ccp_irq_handler) + sp->ccp_irq_handler(irq, sp->ccp_irq_data); + + if (sp->psp_irq_handler) + sp->psp_irq_handler(irq, sp->psp_irq_data); + + return IRQ_HANDLED; +} + +int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + int ret; + + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { + /* Need a common routine to manage all interrupts */ + sp->ccp_irq_data = data; + sp->ccp_irq_handler = handler; + + if (!sp->irq_registered) { + ret = request_irq(sp->ccp_irq, sp_irq_handler, 0, + sp->name, sp); + if (ret) + return ret; + + sp->irq_registered = true; + } + } else { + /* Each sub-device can manage it's own interrupt */ + ret = request_irq(sp->ccp_irq, handler, 0, name, data); + if (ret) + return ret; + } + + return 0; +} + +int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + int ret; + + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { + /* Need a common routine to manage all interrupts */ + sp->psp_irq_data = data; + sp->psp_irq_handler = handler; + + if (!sp->irq_registered) { + ret = request_irq(sp->psp_irq, sp_irq_handler, 0, + sp->name, sp); + if (ret) + return ret; + + sp->irq_registered = true; + } + } else { + /* Each sub-device can manage it's own interrupt */ + ret = request_irq(sp->psp_irq, handler, 0, name, data); + if (ret) + return ret; + } + + return 0; +} + +void sp_free_ccp_irq(struct sp_device *sp, void *data) +{ + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { + /* Using common routine to manage all interrupts */ + if (!sp->psp_irq_handler) { + /* Nothing else using it, so free it */ + free_irq(sp->ccp_irq, sp); + + sp->irq_registered = false; + } + + sp->ccp_irq_handler = NULL; + sp->ccp_irq_data = NULL; + } else { + /* Each sub-device can manage it's own interrupt */ + free_irq(sp->ccp_irq, data); + } +} + +void sp_free_psp_irq(struct sp_device *sp, void *data) +{ + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { + /* Using common routine to manage all interrupts */ + if (!sp->ccp_irq_handler) { + /* Nothing else using it, so free it */ + free_irq(sp->psp_irq, sp); + + sp->irq_registered = false; + } + + sp->psp_irq_handler = NULL; + sp->psp_irq_data = NULL; + } else { + /* Each sub-device can manage it's own interrupt */ + free_irq(sp->psp_irq, data); + } +} + +/** + * sp_alloc_struct - allocate and initialize the sp_device struct + * + * @dev: device struct of the SP + */ +struct sp_device *sp_alloc_struct(struct device *dev) +{ + struct sp_device *sp; + + sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL); + if (!sp) + return NULL; + + sp->dev = dev; + sp->ord = atomic_inc_return(&sp_ordinal); + snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord); + + return sp; +} + +int sp_init(struct sp_device *sp) +{ + sp_add_device(sp); + + if (sp->dev_vdata->ccp_vdata) + ccp_dev_init(sp); + + return 0; +} + +void sp_destroy(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_destroy(sp); + + sp_del_device(sp); +} + +#ifdef CONFIG_PM +int sp_suspend(struct sp_device *sp, pm_message_t state) +{ + int ret; + + if (sp->dev_vdata->ccp_vdata) { + ret = ccp_dev_suspend(sp, state); + if (ret) + return ret; + } + + return 0; +} + +int sp_resume(struct sp_device *sp) +{ + int ret; + + if (sp->dev_vdata->ccp_vdata) { + ret = ccp_dev_resume(sp); + if (ret) + return ret; + } + + return 0; +} +#endif + +static int __init sp_mod_init(void) +{ +#ifdef CONFIG_X86 + int ret; + + ret = sp_pci_init(); + if (ret) + return ret; + + return 0; +#endif + +#ifdef CONFIG_ARM64 + int ret; + + ret = sp_platform_init(); + if (ret) + return ret; + + return 0; +#endif + + return -ENODEV; +} + +static void __exit sp_mod_exit(void) +{ +#ifdef CONFIG_X86 + sp_pci_exit(); +#endif + +#ifdef CONFIG_ARM64 + sp_platform_exit(); +#endif +} + +module_init(sp_mod_init); +module_exit(sp_mod_exit); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h new file mode 100644 index 000000000000..5ab486ade1ad --- /dev/null +++ b/drivers/crypto/ccp/sp-dev.h @@ -0,0 +1,133 @@ +/* + * AMD Secure Processor driver + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * Author: Gary R Hook + * Author: Brijesh Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SP_DEV_H__ +#define __SP_DEV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SP_MAX_NAME_LEN 32 + +#define CACHE_NONE 0x00 +#define CACHE_WB_NO_ALLOC 0xb7 + +/* Structure to hold CCP device data */ +struct ccp_device; +struct ccp_vdata { + const unsigned int version; + const unsigned int dma_chan_attr; + void (*setup)(struct ccp_device *); + const struct ccp_actions *perform; + const unsigned int offset; + const unsigned int rsamax; +}; +/* Structure to hold SP device data */ +struct sp_dev_vdata { + const unsigned int bar; + + const struct ccp_vdata *ccp_vdata; + void *psp_vdata; +}; + +struct sp_device { + struct list_head entry; + + struct device *dev; + + struct sp_dev_vdata *dev_vdata; + unsigned int ord; + char name[SP_MAX_NAME_LEN]; + + /* Bus specific device information */ + void *dev_specific; + + /* I/O area used for device communication. */ + void __iomem *io_map; + + /* DMA caching attribute support */ + unsigned int axcache; + + bool irq_registered; + bool use_tasklet; + + unsigned int ccp_irq; + irq_handler_t ccp_irq_handler; + void *ccp_irq_data; + + unsigned int psp_irq; + irq_handler_t psp_irq_handler; + void *psp_irq_data; + + void *ccp_data; + void *psp_data; +}; + +int sp_pci_init(void); +void sp_pci_exit(void); + +int sp_platform_init(void); +void sp_platform_exit(void); + +struct sp_device *sp_alloc_struct(struct device *dev); + +int sp_init(struct sp_device *sp); +void sp_destroy(struct sp_device *sp); +struct sp_device *sp_get_master(void); + +int sp_suspend(struct sp_device *sp, pm_message_t state); +int sp_resume(struct sp_device *sp); +int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); +void sp_free_ccp_irq(struct sp_device *sp, void *data); +int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); +void sp_free_psp_irq(struct sp_device *sp, void *data); + +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + +int ccp_dev_init(struct sp_device *sp); +void ccp_dev_destroy(struct sp_device *sp); + +int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); +int ccp_dev_resume(struct sp_device *sp); + +#else /* !CONFIG_CRYPTO_DEV_SP_CCP */ + +static inline int ccp_dev_init(struct sp_device *sp) +{ + return 0; +} +static inline void ccp_dev_destroy(struct sp_device *sp) { } + +static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +{ + return 0; +} +static inline int ccp_dev_resume(struct sp_device *sp) +{ + return 0; +} +#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ + +#endif diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c new file mode 100644 index 000000000000..9859aa683a28 --- /dev/null +++ b/drivers/crypto/ccp/sp-pci.c @@ -0,0 +1,276 @@ +/* + * AMD Secure Processor device driver + * + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * Author: Gary R Hook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-dev.h" + +#define MSIX_VECTORS 2 + +struct sp_pci { + int msix_count; + struct msix_entry msix_entry[MSIX_VECTORS]; +}; + +static int sp_get_msix_irqs(struct sp_device *sp) +{ + struct sp_pci *sp_pci = sp->dev_specific; + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + int v, ret; + + for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++) + sp_pci->msix_entry[v].entry = v; + + ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v); + if (ret < 0) + return ret; + + sp_pci->msix_count = ret; + sp->use_tasklet = true; + + sp->psp_irq = sp_pci->msix_entry[0].vector; + sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector + : sp_pci->msix_entry[0].vector; + return 0; +} + +static int sp_get_msi_irq(struct sp_device *sp) +{ + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = pci_enable_msi(pdev); + if (ret) + return ret; + + sp->ccp_irq = pdev->irq; + sp->psp_irq = pdev->irq; + + return 0; +} + +static int sp_get_irqs(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret; + + ret = sp_get_msix_irqs(sp); + if (!ret) + return 0; + + /* Couldn't get MSI-X vectors, try MSI */ + dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); + ret = sp_get_msi_irq(sp); + if (!ret) + return 0; + + /* Couldn't get MSI interrupt */ + dev_notice(dev, "could not enable MSI (%d)\n", ret); + + return ret; +} + +static void sp_free_irqs(struct sp_device *sp) +{ + struct sp_pci *sp_pci = sp->dev_specific; + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (sp_pci->msix_count) + pci_disable_msix(pdev); + else if (sp->psp_irq) + pci_disable_msi(pdev); + + sp->ccp_irq = 0; + sp->psp_irq = 0; +} + +static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct sp_device *sp; + struct sp_pci *sp_pci; + struct device *dev = &pdev->dev; + void __iomem * const *iomap_table; + int bar_mask; + int ret; + + ret = -ENOMEM; + sp = sp_alloc_struct(dev); + if (!sp) + goto e_err; + + sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL); + if (!sp_pci) + goto e_err; + + sp->dev_specific = sp_pci; + sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data; + if (!sp->dev_vdata) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "pcim_enable_device failed (%d)\n", ret); + goto e_err; + } + + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + ret = pcim_iomap_regions(pdev, bar_mask, "ccp"); + if (ret) { + dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); + goto e_err; + } + + iomap_table = pcim_iomap_table(pdev); + if (!iomap_table) { + dev_err(dev, "pcim_iomap_table failed\n"); + ret = -ENOMEM; + goto e_err; + } + + sp->io_map = iomap_table[sp->dev_vdata->bar]; + if (!sp->io_map) { + dev_err(dev, "ioremap failed\n"); + ret = -ENOMEM; + goto e_err; + } + + ret = sp_get_irqs(sp); + if (ret) + goto e_err; + + pci_set_master(pdev); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", + ret); + goto e_err; + } + } + + dev_set_drvdata(dev, sp); + + ret = sp_init(sp); + if (ret) + goto e_err; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static void sp_pci_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + if (!sp) + return; + + sp_destroy(sp); + + sp_free_irqs(sp); + + dev_notice(dev, "disabled\n"); +} + +#ifdef CONFIG_PM +static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_suspend(sp, state); +} + +static int sp_pci_resume(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_resume(sp); +} +#endif + +static const struct sp_dev_vdata dev_vdata[] = { + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv3, +#endif + }, + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif + }, + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b, +#endif + }, +}; +static const struct pci_device_id sp_pci_table[] = { + { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, + /* Last entry must be zero */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, sp_pci_table); + +static struct pci_driver sp_pci_driver = { + .name = "ccp", + .id_table = sp_pci_table, + .probe = sp_pci_probe, + .remove = sp_pci_remove, +#ifdef CONFIG_PM + .suspend = sp_pci_suspend, + .resume = sp_pci_resume, +#endif +}; + +int sp_pci_init(void) +{ + return pci_register_driver(&sp_pci_driver); +} + +void sp_pci_exit(void) +{ + pci_unregister_driver(&sp_pci_driver); +} diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c new file mode 100644 index 000000000000..71734f254fd1 --- /dev/null +++ b/drivers/crypto/ccp/sp-platform.c @@ -0,0 +1,256 @@ +/* + * AMD Secure Processor device driver + * + * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-dev.h" + +struct sp_platform { + int coherent; + unsigned int irq_count; +}; + +static const struct acpi_device_id sp_acpi_match[]; +static const struct of_device_id sp_of_match[]; + +static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) +{ +#ifdef CONFIG_OF + const struct of_device_id *match; + + match = of_match_node(sp_of_match, pdev->dev.of_node); + if (match && match->data) + return (struct sp_dev_vdata *)match->data; +#endif + return NULL; +} + +static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) +{ +#ifdef CONFIG_ACPI + const struct acpi_device_id *match; + + match = acpi_match_device(sp_acpi_match, &pdev->dev); + if (match && match->driver_data) + return (struct sp_dev_vdata *)match->driver_data; +#endif + return NULL; +} + +static int sp_get_irqs(struct sp_device *sp) +{ + struct sp_platform *sp_platform = sp->dev_specific; + struct device *dev = sp->dev; + struct platform_device *pdev = to_platform_device(dev); + unsigned int i, count; + int ret; + + for (i = 0, count = 0; i < pdev->num_resources; i++) { + struct resource *res = &pdev->resource[i]; + + if (resource_type(res) == IORESOURCE_IRQ) + count++; + } + + sp_platform->irq_count = count; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_notice(dev, "unable to get IRQ (%d)\n", ret); + return ret; + } + + sp->psp_irq = ret; + if (count == 1) { + sp->ccp_irq = ret; + } else { + ret = platform_get_irq(pdev, 1); + if (ret < 0) { + dev_notice(dev, "unable to get IRQ (%d)\n", ret); + return ret; + } + + sp->ccp_irq = ret; + } + + return 0; +} + +static int sp_platform_probe(struct platform_device *pdev) +{ + struct sp_device *sp; + struct sp_platform *sp_platform; + struct device *dev = &pdev->dev; + enum dev_dma_attr attr; + struct resource *ior; + int ret; + + ret = -ENOMEM; + sp = sp_alloc_struct(dev); + if (!sp) + goto e_err; + + sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL); + if (!sp_platform) + goto e_err; + + sp->dev_specific = sp_platform; + sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev) + : sp_get_acpi_version(pdev); + if (!sp->dev_vdata) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sp->io_map = devm_ioremap_resource(dev, ior); + if (IS_ERR(sp->io_map)) { + ret = PTR_ERR(sp->io_map); + goto e_err; + } + + attr = device_get_dma_attr(dev); + if (attr == DEV_DMA_NOT_SUPPORTED) { + dev_err(dev, "DMA is not supported"); + goto e_err; + } + + sp_platform->coherent = (attr == DEV_DMA_COHERENT); + if (sp_platform->coherent) + sp->axcache = CACHE_WB_NO_ALLOC; + else + sp->axcache = CACHE_NONE; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); + goto e_err; + } + + ret = sp_get_irqs(sp); + if (ret) + goto e_err; + + dev_set_drvdata(dev, sp); + + ret = sp_init(sp); + if (ret) + goto e_err; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static int sp_platform_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + sp_destroy(sp); + + dev_notice(dev, "disabled\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int sp_platform_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_suspend(sp, state); +} + +static int sp_platform_resume(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_resume(sp); +} +#endif + +static const struct sp_dev_vdata dev_vdata[] = { + { + .bar = 0, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv3_platform, +#endif + }, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id sp_acpi_match[] = { + { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sp_acpi_match); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id sp_of_match[] = { + { .compatible = "amd,ccp-seattle-v1a", + .data = (const void *)&dev_vdata[0] }, + { }, +}; +MODULE_DEVICE_TABLE(of, sp_of_match); +#endif + +static struct platform_driver sp_platform_driver = { + .driver = { + .name = "ccp", +#ifdef CONFIG_ACPI + .acpi_match_table = sp_acpi_match, +#endif +#ifdef CONFIG_OF + .of_match_table = sp_of_match, +#endif + }, + .probe = sp_platform_probe, + .remove = sp_platform_remove, +#ifdef CONFIG_PM + .suspend = sp_platform_suspend, + .resume = sp_platform_resume, +#endif +}; + +int sp_platform_init(void) +{ + return platform_driver_register(&sp_platform_driver); +} + +void sp_platform_exit(void) +{ + platform_driver_unregister(&sp_platform_driver); +} diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index fe538e5287a5..eb2a0a73cbed 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -1,10 +1,10 @@ /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ #include #include @@ -30,6 +30,7 @@ static inline void _writefield(u32 offset, void *value) { int i; + for (i = 0; i < 4; i++) iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); } @@ -39,6 +40,7 @@ static inline void _readfield(u32 offset, void *value) { int i; + for (i = 0; i < 4; i++) ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); } @@ -515,6 +517,7 @@ static void geode_aes_remove(struct pci_dev *dev) static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret; + ret = pci_enable_device(dev); if (ret) return ret; @@ -570,7 +573,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) } static struct pci_device_id geode_aes_tbl[] = { - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), }, { 0, } }; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 0c6a917a9ab8..b87000a0a01c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -1054,7 +1054,7 @@ static int img_hash_probe(struct platform_device *pdev) static int img_hash_remove(struct platform_device *pdev) { - static struct img_hash_dev *hdev; + struct img_hash_dev *hdev; hdev = platform_get_drvdata(pdev); spin_lock(&img_hash.lock); diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 1fabd4aee81b..89ba9e85c0f3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -839,9 +839,10 @@ static int safexcel_probe(struct platform_device *pdev) snprintf(irq_name, 6, "ring%d", i); irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, ring_irq); - - if (irq < 0) + if (irq < 0) { + ret = irq; goto err_clk; + } priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d2207ac5ba19..5438552bc6d7 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct skcipher_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct skcipher_request)); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3f819399cd95..3980f946874f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct ahash_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct ahash_request)); diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 000b6500a22d..b182e941b0cd 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -500,7 +500,7 @@ static int mtk_crypto_probe(struct platform_device *pdev) cryp->irq[i] = platform_get_irq(pdev, i); if (cryp->irq[i] < 0) { dev_err(cryp->dev, "no IRQ:%d resource info\n", i); - return -ENXIO; + return cryp->irq[i]; } } diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index ee4be1b0d30b..e01c46387df8 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c @@ -708,8 +708,8 @@ static int mxc_scc_probe(struct platform_device *pdev) for (i = 0; i < 2; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { - dev_err(dev, "failed to get irq resource\n"); - ret = -EINVAL; + dev_err(dev, "failed to get irq resource: %d\n", irq); + ret = irq; goto err_out; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 625ee50fd78b..764be3e6933c 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -908,12 +908,16 @@ static int mxs_dcp_probe(struct platform_device *pdev) iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); dcp_vmi_irq = platform_get_irq(pdev, 0); - if (dcp_vmi_irq < 0) + if (dcp_vmi_irq < 0) { + dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); return dcp_vmi_irq; + } dcp_irq = platform_get_irq(pdev, 1); - if (dcp_irq < 0) + if (dcp_irq < 0) { + dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); return dcp_irq; + } sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); if (!sdcp) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 269451375b63..a9fd8b9e86cd 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1730,8 +1730,8 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, continue; id = mdesc_get_property(mdesc, tgt, "id", NULL); if (table[*id] != NULL) { - dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", + dev->dev.of_node); return -EINVAL; } cpumask_set_cpu(*id, &p->sharing); @@ -1751,8 +1751,8 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); if (!p) { - dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", + dev->dev.of_node); return -ENOMEM; } @@ -1981,41 +1981,39 @@ static void n2_spu_driver_version(void) static int n2_crypto_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; - const char *full_name; struct n2_crypto *np; int err; n2_spu_driver_version(); - full_name = dev->dev.of_node->full_name; - pr_info("Found N2CP at %s\n", full_name); + pr_info("Found N2CP at %pOF\n", dev->dev.of_node); np = alloc_n2cp(); if (!np) { - dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", + dev->dev.of_node); return -ENOMEM; } err = grab_global_resources(); if (err) { - dev_err(&dev->dev, "%s: Unable to grab " - "global resources.\n", full_name); + dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", + dev->dev.of_node); goto out_free_n2cp; } mdesc = mdesc_grab(); if (!mdesc) { - dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", + dev->dev.of_node); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); if (err) { - dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", + dev->dev.of_node); mdesc_release(mdesc); goto out_free_global; } @@ -2026,15 +2024,15 @@ static int n2_crypto_probe(struct platform_device *dev) mdesc_release(mdesc); if (err) { - dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", - full_name); + dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", + dev->dev.of_node); goto out_free_global; } err = n2_register_algs(); if (err) { - dev_err(&dev->dev, "%s: Unable to register algorithms.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", + dev->dev.of_node); goto out_free_spu_list; } @@ -2092,42 +2090,40 @@ static void free_ncp(struct n2_mau *mp) static int n2_mau_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; - const char *full_name; struct n2_mau *mp; int err; n2_spu_driver_version(); - full_name = dev->dev.of_node->full_name; - pr_info("Found NCP at %s\n", full_name); + pr_info("Found NCP at %pOF\n", dev->dev.of_node); mp = alloc_ncp(); if (!mp) { - dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", + dev->dev.of_node); return -ENOMEM; } err = grab_global_resources(); if (err) { - dev_err(&dev->dev, "%s: Unable to grab " - "global resources.\n", full_name); + dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", + dev->dev.of_node); goto out_free_ncp; } mdesc = mdesc_grab(); if (!mdesc) { - dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", + dev->dev.of_node); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); if (err) { - dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", + dev->dev.of_node); mdesc_release(mdesc); goto out_free_global; } @@ -2138,8 +2134,8 @@ static int n2_mau_probe(struct platform_device *dev) mdesc_release(mdesc); if (err) { - dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", - full_name); + dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", + dev->dev.of_node); goto out_free_global; } diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index ad7552a6998c..cd5dda9c48f4 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -38,6 +38,7 @@ config CRYPTO_DEV_NX_COMPRESS_PSERIES config CRYPTO_DEV_NX_COMPRESS_POWERNV tristate "Compression acceleration support on PowerNV platform" depends on PPC_POWERNV + depends on PPC_VAS default y help Support for PowerPC Nest (NX) compression acceleration. This diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 1710f80a09ec..874ddf5e9087 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -22,6 +22,8 @@ #include #include +#include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); @@ -31,6 +33,9 @@ MODULE_ALIAS_CRYPTO("842-nx"); #define WORKMEM_ALIGN (CRB_ALIGN) #define CSB_WAIT_MAX (5000) /* ms */ +#define VAS_RETRIES (10) +/* # of requests allowed per RxFIFO at a time. 0 for unlimited */ +#define MAX_CREDITS_PER_RXFIFO (1024) struct nx842_workmem { /* Below fields must be properly aligned */ @@ -41,19 +46,34 @@ struct nx842_workmem { ktime_t start; + struct vas_window *txwin; /* Used with VAS function */ char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ } __packed __aligned(WORKMEM_ALIGN); struct nx842_coproc { unsigned int chip_id; unsigned int ct; - unsigned int ci; + unsigned int ci; /* Coprocessor instance, used with icswx */ + struct { + struct vas_window *rxwin; + int id; + } vas; struct list_head list; }; +/* + * Send the request to NX engine on the chip for the corresponding CPU + * where the process is executing. Use with VAS function. + */ +static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst); + /* no cpu hotplug on powernv, so this list never changes after init */ static LIST_HEAD(nx842_coprocs); -static unsigned int nx842_ct; +static unsigned int nx842_ct; /* used in icswx function */ + +static int (*nx842_powernv_exec)(const unsigned char *in, + unsigned int inlen, unsigned char *out, + unsigned int *outlenp, void *workmem, int fc); /** * setup_indirect_dde - Setup an indirect DDE @@ -238,6 +258,13 @@ static int wait_for_csb(struct nx842_workmem *wmem, case CSB_CC_TEMPL_OVERFLOW: CSB_ERR(csb, "Compressed data template shows data past end"); return -EINVAL; + case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */ + /* + * DDE byte count exceeds the limit specified in Maximum + * byte count register. + */ + CSB_ERR(csb, "DDE byte count exceeds the limit"); + return -EINVAL; /* these should not happen */ case CSB_CC_INVALID_ALIGN: @@ -279,9 +306,17 @@ static int wait_for_csb(struct nx842_workmem *wmem, CSB_ERR(csb, "Too many DDEs in DDL"); return -EINVAL; case CSB_CC_TRANSPORT: + case CSB_CC_INVALID_CRB: /* P9 or later */ /* shouldn't happen, we setup CRB correctly */ CSB_ERR(csb, "Invalid CRB"); return -EINVAL; + case CSB_CC_INVALID_DDE: /* P9 or later */ + /* + * shouldn't happen, setup_direct/indirect_dde creates + * DDE right + */ + CSB_ERR(csb, "Invalid DDE"); + return -EINVAL; case CSB_CC_SEGMENTED_DDL: /* shouldn't happen, setup_ddl creates DDL right */ CSB_ERR(csb, "Segmented DDL error"); @@ -325,6 +360,9 @@ static int wait_for_csb(struct nx842_workmem *wmem, case CSB_CC_HW: CSB_ERR(csb, "Correctable hardware error"); return -EPROTO; + case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */ + CSB_ERR(csb, "Job did not finish within allowed time"); + return -EPROTO; default: CSB_ERR(csb, "Invalid CC %d", csb->cc); @@ -353,8 +391,42 @@ static int wait_for_csb(struct nx842_workmem *wmem, return 0; } +static int nx842_config_crb(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int outlen, + struct nx842_workmem *wmem) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + u64 csb_addr; + int ret; + + crb = &wmem->crb; + csb = &crb->csb; + + /* Clear any previous values */ + memset(crb, 0, sizeof(*crb)); + + /* set up DDLs */ + ret = setup_ddl(&crb->source, wmem->ddl_in, + (unsigned char *)in, inlen, true); + if (ret) + return ret; + + ret = setup_ddl(&crb->target, wmem->ddl_out, + out, outlen, false); + if (ret) + return ret; + + /* set up CRB's CSB addr */ + csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; + csb_addr |= CRB_CSB_AT; /* Addrs are phys */ + crb->csb_addr = cpu_to_be64(csb_addr); + + return 0; +} + /** - * nx842_powernv_function - compress/decompress data using the 842 algorithm + * nx842_exec_icswx - compress/decompress data using the 842 algorithm * * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. * This compresses or decompresses the provided input buffer into the provided @@ -384,7 +456,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, * -ETIMEDOUT hardware did not complete operation in reasonable time * -EINTR operation was aborted */ -static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, +static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc) { @@ -392,7 +464,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, struct coprocessor_status_block *csb; struct nx842_workmem *wmem; int ret; - u64 csb_addr; u32 ccw; unsigned int outlen = *outlenp; @@ -406,32 +477,18 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, return -ENODEV; } + ret = nx842_config_crb(in, inlen, out, outlen, wmem); + if (ret) + return ret; + crb = &wmem->crb; csb = &crb->csb; - /* Clear any previous values */ - memset(crb, 0, sizeof(*crb)); - - /* set up DDLs */ - ret = setup_ddl(&crb->source, wmem->ddl_in, - (unsigned char *)in, inlen, true); - if (ret) - return ret; - ret = setup_ddl(&crb->target, wmem->ddl_out, - out, outlen, false); - if (ret) - return ret; - /* set up CCW */ ccw = 0; - ccw = SET_FIELD(ccw, CCW_CT, nx842_ct); - ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */ - ccw = SET_FIELD(ccw, CCW_FC_842, fc); - - /* set up CRB's CSB addr */ - csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; - csb_addr |= CRB_CSB_AT; /* Addrs are phys */ - crb->csb_addr = cpu_to_be64(csb_addr); + ccw = SET_FIELD(CCW_CT, ccw, nx842_ct); + ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */ + ccw = SET_FIELD(CCW_FC_842, ccw, fc); wmem->start = ktime_get(); @@ -470,6 +527,104 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, return ret; } +/** + * nx842_exec_vas - compress/decompress data using the 842 algorithm + * + * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. + * This compresses or decompresses the provided input buffer into the provided + * output buffer. + * + * Upon return from this function @outlen contains the length of the + * output data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * The @workmem buffer should only be used by one function call at a time. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * @fc: function code, see CCW Function Codes in nx-842.h + * + * Returns: + * 0 Success, output of length @outlenp stored in the buffer + * at @out + * -ENODEV Hardware unavailable + * -ENOSPC Output buffer is to small + * -EMSGSIZE Input buffer too large + * -EINVAL buffer constraints do not fix nx842_constraints + * -EPROTO hardware error during operation + * -ETIMEDOUT hardware did not complete operation in reasonable time + * -EINTR operation was aborted + */ +static int nx842_exec_vas(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *workmem, int fc) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + struct nx842_workmem *wmem; + struct vas_window *txwin; + int ret, i = 0; + u32 ccw; + unsigned int outlen = *outlenp; + + wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); + + *outlenp = 0; + + crb = &wmem->crb; + csb = &crb->csb; + + ret = nx842_config_crb(in, inlen, out, outlen, wmem); + if (ret) + return ret; + + ccw = 0; + ccw = SET_FIELD(CCW_FC_842, ccw, fc); + crb->ccw = cpu_to_be32(ccw); + + txwin = wmem->txwin; + /* shoudn't happen, we don't load without a coproc */ + if (!txwin) { + pr_err_ratelimited("NX-842 coprocessor is not available"); + return -ENODEV; + } + + do { + wmem->start = ktime_get(); + preempt_disable(); + /* + * VAS copy CRB into L2 cache. Refer . + * @crb and @offset. + */ + vas_copy_crb(crb, 0); + + /* + * VAS paste previously copied CRB to NX. + * @txwin, @offset and @last (must be true). + */ + ret = vas_paste_crb(txwin, 0, 1); + preempt_enable(); + /* + * Retry copy/paste function for VAS failures. + */ + } while (ret && (i++ < VAS_RETRIES)); + + if (ret) { + pr_err_ratelimited("VAS copy/paste failed\n"); + return ret; + } + + ret = wait_for_csb(wmem, csb); + if (!ret) + *outlenp = be32_to_cpu(csb->count); + + return ret; +} + /** * nx842_powernv_compress - Compress data using the 842 algorithm * @@ -488,13 +643,13 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, * @workmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * - * Returns: see @nx842_powernv_function() + * Returns: see @nx842_powernv_exec() */ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *wmem) { - return nx842_powernv_function(in, inlen, out, outlenp, + return nx842_powernv_exec(in, inlen, out, outlenp, wmem, CCW_FC_842_COMP_CRC); } @@ -516,16 +671,219 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, * @workmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * - * Returns: see @nx842_powernv_function() + * Returns: see @nx842_powernv_exec() */ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *wmem) { - return nx842_powernv_function(in, inlen, out, outlenp, + return nx842_powernv_exec(in, inlen, out, outlenp, wmem, CCW_FC_842_DECOMP_CRC); } +static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc, + int chipid) +{ + coproc->chip_id = chipid; + INIT_LIST_HEAD(&coproc->list); + list_add(&coproc->list, &nx842_coprocs); +} + +/* + * Identify chip ID for each CPU and save coprocesor adddress for the + * corresponding NX engine in percpu coproc_inst. + * coproc_inst is used in crypto_init to open send window on the NX instance + * for the corresponding CPU / chip where the open request is executed. + */ +static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc) +{ + unsigned int i, chip_id; + + for_each_possible_cpu(i) { + chip_id = cpu_to_chip_id(i); + + if (coproc->chip_id == chip_id) + per_cpu(coproc_inst, i) = coproc; + } +} + + +static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc) +{ + struct vas_window *txwin = NULL; + struct vas_tx_win_attr txattr; + + /* + * Kernel requests will be high priority. So open send + * windows only for high priority RxFIFO entries. + */ + vas_init_tx_win_attr(&txattr, coproc->ct); + txattr.lpid = 0; /* lpid is 0 for kernel requests */ + txattr.pid = 0; /* pid is 0 for kernel requests */ + + /* + * Open a VAS send window which is used to send request to NX. + */ + txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr); + if (IS_ERR(txwin)) { + pr_err("ibm,nx-842: Can not open TX window: %ld\n", + PTR_ERR(txwin)); + return NULL; + } + + return txwin; +} + +static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, + int vasid) +{ + struct vas_window *rxwin = NULL; + struct vas_rx_win_attr rxattr; + struct nx842_coproc *coproc; + u32 lpid, pid, tid, fifo_size; + u64 rx_fifo; + const char *priority; + int ret; + + ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo); + if (ret) { + pr_err("Missing rx-fifo-address property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size); + if (ret) { + pr_err("Missing rx-fifo-size property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "lpid", &lpid); + if (ret) { + pr_err("Missing lpid property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "pid", &pid); + if (ret) { + pr_err("Missing pid property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "tid", &tid); + if (ret) { + pr_err("Missing tid property\n"); + return ret; + } + + ret = of_property_read_string(dn, "priority", &priority); + if (ret) { + pr_err("Missing priority property\n"); + return ret; + } + + coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); + if (!coproc) + return -ENOMEM; + + if (!strcmp(priority, "High")) + coproc->ct = VAS_COP_TYPE_842_HIPRI; + else if (!strcmp(priority, "Normal")) + coproc->ct = VAS_COP_TYPE_842; + else { + pr_err("Invalid RxFIFO priority value\n"); + ret = -EINVAL; + goto err_out; + } + + vas_init_rx_win_attr(&rxattr, coproc->ct); + rxattr.rx_fifo = (void *)rx_fifo; + rxattr.rx_fifo_size = fifo_size; + rxattr.lnotify_lpid = lpid; + rxattr.lnotify_pid = pid; + rxattr.lnotify_tid = tid; + rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; + + /* + * Open a VAS receice window which is used to configure RxFIFO + * for NX. + */ + rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr); + if (IS_ERR(rxwin)) { + ret = PTR_ERR(rxwin); + pr_err("setting RxFIFO with VAS failed: %d\n", + ret); + goto err_out; + } + + coproc->vas.rxwin = rxwin; + coproc->vas.id = vasid; + nx842_add_coprocs_list(coproc, chip_id); + + /* + * Kernel requests use only high priority FIFOs. So save coproc + * info in percpu coproc_inst which will be used to open send + * windows for crypto open requests later. + */ + if (coproc->ct == VAS_COP_TYPE_842_HIPRI) + nx842_set_per_cpu_coproc(coproc); + + return 0; + +err_out: + kfree(coproc); + return ret; +} + + +static int __init nx842_powernv_probe_vas(struct device_node *pn) +{ + struct device_node *dn; + int chip_id, vasid, ret = 0; + int nx_fifo_found = 0; + + chip_id = of_get_ibm_chip_id(pn); + if (chip_id < 0) { + pr_err("ibm,chip-id missing\n"); + return -EINVAL; + } + + for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") { + if (of_get_ibm_chip_id(dn) == chip_id) + break; + } + + if (!dn) { + pr_err("Missing VAS device node\n"); + return -EINVAL; + } + + if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) { + pr_err("Missing ibm,vas-id device property\n"); + of_node_put(dn); + return -EINVAL; + } + + of_node_put(dn); + + for_each_child_of_node(pn, dn) { + if (of_device_is_compatible(dn, "ibm,p9-nx-842")) { + ret = vas_cfg_coproc_info(dn, chip_id, vasid); + if (ret) { + of_node_put(dn); + return ret; + } + nx_fifo_found++; + } + } + + if (!nx_fifo_found) { + pr_err("NX842 FIFO nodes are missing\n"); + ret = -EINVAL; + } + + return ret; +} + static int __init nx842_powernv_probe(struct device_node *dn) { struct nx842_coproc *coproc; @@ -552,11 +910,9 @@ static int __init nx842_powernv_probe(struct device_node *dn) if (!coproc) return -ENOMEM; - coproc->chip_id = chip_id; coproc->ct = ct; coproc->ci = ci; - INIT_LIST_HEAD(&coproc->list); - list_add(&coproc->list, &nx842_coprocs); + nx842_add_coprocs_list(coproc, chip_id); pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); @@ -569,6 +925,19 @@ static int __init nx842_powernv_probe(struct device_node *dn) return 0; } +static void nx842_delete_coprocs(void) +{ + struct nx842_coproc *coproc, *n; + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + if (coproc->vas.rxwin) + vas_win_close(coproc->vas.rxwin); + + list_del(&coproc->list); + kfree(coproc); + } +} + static struct nx842_constraints nx842_powernv_constraints = { .alignment = DDE_BUFFER_ALIGN, .multiple = DDE_BUFFER_LAST_MULT, @@ -585,6 +954,46 @@ static struct nx842_driver nx842_powernv_driver = { .decompress = nx842_powernv_decompress, }; +static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_workmem *wmem; + struct nx842_coproc *coproc; + int ret; + + ret = nx842_crypto_init(tfm, &nx842_powernv_driver); + + if (ret) + return ret; + + wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN); + coproc = per_cpu(coproc_inst, smp_processor_id()); + + ret = -EINVAL; + if (coproc && coproc->vas.rxwin) { + wmem->txwin = nx842_alloc_txwin(coproc); + if (!IS_ERR(wmem->txwin)) + return 0; + + ret = PTR_ERR(wmem->txwin); + } + + return ret; +} + +void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_workmem *wmem; + + wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN); + + if (wmem && wmem->txwin) + vas_win_close(wmem->txwin); + + nx842_crypto_exit(tfm); +} + static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) { return nx842_crypto_init(tfm, &nx842_powernv_driver); @@ -618,21 +1027,31 @@ static __init int nx842_powernv_init(void) BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); - for_each_compatible_node(dn, NULL, "ibm,power-nx") - nx842_powernv_probe(dn); + for_each_compatible_node(dn, NULL, "ibm,power9-nx") { + ret = nx842_powernv_probe_vas(dn); + if (ret) { + nx842_delete_coprocs(); + return ret; + } + } - if (!nx842_ct) - return -ENODEV; + if (list_empty(&nx842_coprocs)) { + for_each_compatible_node(dn, NULL, "ibm,power-nx") + nx842_powernv_probe(dn); + + if (!nx842_ct) + return -ENODEV; + + nx842_powernv_exec = nx842_exec_icswx; + } else { + nx842_powernv_exec = nx842_exec_vas; + nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas; + nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas; + } ret = crypto_register_alg(&nx842_powernv_alg); if (ret) { - struct nx842_coproc *coproc, *n; - - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { - list_del(&coproc->list); - kfree(coproc); - } - + nx842_delete_coprocs(); return ret; } @@ -642,13 +1061,8 @@ module_init(nx842_powernv_init); static void __exit nx842_powernv_exit(void) { - struct nx842_coproc *coproc, *n; - crypto_unregister_alg(&nx842_powernv_alg); - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { - list_del(&coproc->list); - kfree(coproc); - } + nx842_delete_coprocs(); } module_exit(nx842_powernv_exit); diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index d94e25df503b..da3cb8c35ec7 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) spin_lock_init(&ctx->lock); ctx->driver = driver; - ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL); + ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL); ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index a4eee3bba937..bb2f31792683 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -76,9 +76,17 @@ #define CSB_CC_DECRYPT_OVERFLOW (64) /* asym crypt codes */ #define CSB_CC_MINV_OVERFLOW (128) +/* + * HW error - Job did not finish in the maximum time allowed. + * Job terminated. + */ +#define CSB_CC_HW_EXPIRED_TIMER (224) /* These are reserved for hypervisor use */ #define CSB_CC_HYP_RESERVE_START (240) #define CSB_CC_HYP_RESERVE_END (253) +#define CSB_CC_HYP_RESERVE_P9_END (251) +/* No valid interrupt server (P9 or later). */ +#define CSB_CC_HYP_RESERVE_NO_INTR_SERVER (252) #define CSB_CC_HYP_NO_HW (254) #define CSB_CC_HYP_HANG_ABORTED (255) @@ -100,11 +108,6 @@ static inline unsigned long nx842_get_pa(void *addr) return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } -/* Get/Set bit fields */ -#define MASK_LSH(m) (__builtin_ffsl(m) - 1) -#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) -#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) - /** * This provides the driver's constraints. Different nx842 implementations * may have varying requirements. The constraints are: diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 5120a17731d0..c376a3ee7c2c 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1095,6 +1095,7 @@ static int omap_aes_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "can't get IRQ resource\n"); + err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 0bcab00e0ff5..d37c9506c36c 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1023,7 +1023,8 @@ static int omap_des_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "can't get IRQ resource\n"); + dev_err(dev, "can't get IRQ resource: %d\n", irq); + err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 9ad9d399daf1..c40ac30ec002 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2133,7 +2133,7 @@ static int omap_sham_probe(struct platform_device *pdev) static int omap_sham_remove(struct platform_device *pdev) { - static struct omap_sham_dev *dd; + struct omap_sham_dev *dd; int i, j; dd = platform_get_drvdata(pdev); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index d3e25c37dc33..da8a2d3b5e9a 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -208,7 +208,7 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) static void adf_resume(struct pci_dev *pdev) { dev_info(&pdev->dev, "Acceleration driver reset completed\n"); - dev_info(&pdev->dev, "Device is up and runnig\n"); + dev_info(&pdev->dev, "Device is up and running\n"); } static const struct pci_error_handlers adf_err_handler = { diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index d0f80c6241f9..c9d622abd90c 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -169,50 +169,82 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; - int err = 0; spin_lock(&dev->lock); interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); - err = -EFAULT; - } else if (interrupt_status & 0x05) { - err = dev->update(dev); + dev->err = -EFAULT; } - if (err) - dev->complete(dev, err); + tasklet_schedule(&dev->done_task); + spin_unlock(&dev->lock); return IRQ_HANDLED; } -static void rk_crypto_tasklet_cb(unsigned long data) +static int rk_crypto_enqueue(struct rk_crypto_info *dev, + struct crypto_async_request *async_req) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->lock, flags); + ret = crypto_enqueue_request(&dev->queue, async_req); + if (dev->busy) { + spin_unlock_irqrestore(&dev->lock, flags); + return ret; + } + dev->busy = true; + spin_unlock_irqrestore(&dev->lock, flags); + tasklet_schedule(&dev->queue_task); + + return ret; +} + +static void rk_crypto_queue_task_cb(unsigned long data) { struct rk_crypto_info *dev = (struct rk_crypto_info *)data; struct crypto_async_request *async_req, *backlog; unsigned long flags; int err = 0; + dev->err = 0; spin_lock_irqsave(&dev->lock, flags); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - spin_unlock_irqrestore(&dev->lock, flags); + if (!async_req) { - dev_err(dev->dev, "async_req is NULL !!\n"); + dev->busy = false; + spin_unlock_irqrestore(&dev->lock, flags); return; } + spin_unlock_irqrestore(&dev->lock, flags); + if (backlog) { backlog->complete(backlog, -EINPROGRESS); backlog = NULL; } - if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) - dev->ablk_req = ablkcipher_request_cast(async_req); - else - dev->ahash_req = ahash_request_cast(async_req); + dev->async_req = async_req; err = dev->start(dev); if (err) - dev->complete(dev, err); + dev->complete(dev->async_req, err); +} + +static void rk_crypto_done_task_cb(unsigned long data) +{ + struct rk_crypto_info *dev = (struct rk_crypto_info *)data; + + if (dev->err) { + dev->complete(dev->async_req, dev->err); + return; + } + + dev->err = dev->update(dev); + if (dev->err) + dev->complete(dev->async_req, dev->err); } static struct rk_crypto_tmp *rk_cipher_algs[] = { @@ -361,14 +393,18 @@ static int rk_crypto_probe(struct platform_device *pdev) crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); - tasklet_init(&crypto_info->crypto_tasklet, - rk_crypto_tasklet_cb, (unsigned long)crypto_info); + tasklet_init(&crypto_info->queue_task, + rk_crypto_queue_task_cb, (unsigned long)crypto_info); + tasklet_init(&crypto_info->done_task, + rk_crypto_done_task_cb, (unsigned long)crypto_info); crypto_init_queue(&crypto_info->queue, 50); crypto_info->enable_clk = rk_crypto_enable_clk; crypto_info->disable_clk = rk_crypto_disable_clk; crypto_info->load_data = rk_load_data; crypto_info->unload_data = rk_unload_data; + crypto_info->enqueue = rk_crypto_enqueue; + crypto_info->busy = false; err = rk_crypto_register(crypto_info); if (err) { @@ -380,7 +416,8 @@ static int rk_crypto_probe(struct platform_device *pdev) return 0; err_register_alg: - tasklet_kill(&crypto_info->crypto_tasklet); + tasklet_kill(&crypto_info->queue_task); + tasklet_kill(&crypto_info->done_task); err_crypto: return err; } @@ -390,7 +427,8 @@ static int rk_crypto_remove(struct platform_device *pdev) struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->crypto_tasklet); + tasklet_kill(&crypto_tmp->done_task); + tasklet_kill(&crypto_tmp->queue_task); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index d7b71fea320b..ab6a1b4c40f0 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -190,9 +190,10 @@ struct rk_crypto_info { void __iomem *reg; int irq; struct crypto_queue queue; - struct tasklet_struct crypto_tasklet; - struct ablkcipher_request *ablk_req; - struct ahash_request *ahash_req; + struct tasklet_struct queue_task; + struct tasklet_struct done_task; + struct crypto_async_request *async_req; + int err; /* device lock */ spinlock_t lock; @@ -208,18 +209,20 @@ struct rk_crypto_info { size_t nents; unsigned int total; unsigned int count; - u32 mode; dma_addr_t addr_in; dma_addr_t addr_out; + bool busy; int (*start)(struct rk_crypto_info *dev); int (*update)(struct rk_crypto_info *dev); - void (*complete)(struct rk_crypto_info *dev, int err); + void (*complete)(struct crypto_async_request *base, int err); int (*enable_clk)(struct rk_crypto_info *dev); void (*disable_clk)(struct rk_crypto_info *dev); int (*load_data)(struct rk_crypto_info *dev, struct scatterlist *sg_src, struct scatterlist *sg_dst); void (*unload_data)(struct rk_crypto_info *dev); + int (*enqueue)(struct rk_crypto_info *dev, + struct crypto_async_request *async_req); }; /* the private variable of hash */ @@ -232,12 +235,14 @@ struct rk_ahash_ctx { /* the privete variable of hash for fallback */ struct rk_ahash_rctx { struct ahash_request fallback_req; + u32 mode; }; /* the private variable of cipher */ struct rk_cipher_ctx { struct rk_crypto_info *dev; unsigned int keylen; + u32 mode; }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index b5a3afe222e4..639c15c5364b 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -15,35 +15,19 @@ #define RK_CRYPTO_DEC BIT(0) -static void rk_crypto_complete(struct rk_crypto_info *dev, int err) +static void rk_crypto_complete(struct crypto_async_request *base, int err) { - if (dev->ablk_req->base.complete) - dev->ablk_req->base.complete(&dev->ablk_req->base, err); + if (base->complete) + base->complete(base, err); } static int rk_handle_req(struct rk_crypto_info *dev, struct ablkcipher_request *req) { - unsigned long flags; - int err; - if (!IS_ALIGNED(req->nbytes, dev->align_size)) return -EINVAL; - - dev->left_bytes = req->nbytes; - dev->total = req->nbytes; - dev->sg_src = req->src; - dev->first = req->src; - dev->nents = sg_nents(req->src); - dev->sg_dst = req->dst; - dev->aligned = 1; - dev->ablk_req = req; - - spin_lock_irqsave(&dev->lock, flags); - err = ablkcipher_enqueue_request(&dev->queue, req); - spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->crypto_tasklet); - return err; + else + return dev->enqueue(dev, &req->base); } static int rk_aes_setkey(struct crypto_ablkcipher *cipher, @@ -93,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_AES_ECB_MODE; + ctx->mode = RK_CRYPTO_AES_ECB_MODE; return rk_handle_req(dev, req); } @@ -103,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -113,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_AES_CBC_MODE; + ctx->mode = RK_CRYPTO_AES_CBC_MODE; return rk_handle_req(dev, req); } @@ -123,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -133,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = 0; + ctx->mode = 0; return rk_handle_req(dev, req); } @@ -143,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_DEC; + ctx->mode = RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -153,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -163,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -173,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_SELECT; + ctx->mode = RK_CRYPTO_TDES_SELECT; return rk_handle_req(dev, req); } @@ -183,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -193,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -203,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req) struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct rk_crypto_info *dev = ctx->dev; - dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } static void rk_ablk_hw_init(struct rk_crypto_info *dev) { - struct crypto_ablkcipher *cipher = - crypto_ablkcipher_reqtfm(dev->ablk_req); + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); u32 ivsize, block, conf_reg = 0; @@ -220,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) ivsize = crypto_ablkcipher_ivsize(cipher); if (block == DES_BLOCK_SIZE) { - dev->mode |= RK_CRYPTO_TDES_FIFO_MODE | + ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; - CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode); - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, - dev->ablk_req->info, ivsize); + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize); conf_reg = RK_CRYPTO_DESSEL; } else { - dev->mode |= RK_CRYPTO_AES_FIFO_MODE | + ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) - dev->mode |= RK_CRYPTO_AES_192BIT_key; + ctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) - dev->mode |= RK_CRYPTO_AES_256BIT_key; - CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode); - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, - dev->ablk_req->info, ivsize); + ctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; @@ -268,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev) static int rk_ablk_start(struct rk_crypto_info *dev) { + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); unsigned long flags; - int err; + int err = 0; + + dev->left_bytes = req->nbytes; + dev->total = req->nbytes; + dev->sg_src = req->src; + dev->first = req->src; + dev->nents = sg_nents(req->src); + dev->sg_dst = req->dst; + dev->aligned = 1; spin_lock_irqsave(&dev->lock, flags); rk_ablk_hw_init(dev); @@ -280,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev) static void rk_iv_copyback(struct rk_crypto_info *dev) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req); + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); u32 ivsize = crypto_ablkcipher_ivsize(tfm); if (ivsize == DES_BLOCK_SIZE) - memcpy_fromio(dev->ablk_req->info, - dev->reg + RK_CRYPTO_TDES_IV_0, ivsize); + memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0, + ivsize); else if (ivsize == AES_BLOCK_SIZE) - memcpy_fromio(dev->ablk_req->info, - dev->reg + RK_CRYPTO_AES_IV_0, ivsize); + memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); } /* return: @@ -298,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev) static int rk_ablk_rx(struct rk_crypto_info *dev) { int err = 0; + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); dev->unload_data(dev); if (!dev->aligned) { - if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents, + if (!sg_pcopy_from_buffer(req->dst, dev->nents, dev->addr_vir, dev->count, dev->total - dev->left_bytes - dev->count)) { @@ -324,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) } else { rk_iv_copyback(dev); /* here show the calculation is over without any err */ - dev->complete(dev, 0); + dev->complete(dev->async_req, 0); + tasklet_schedule(&dev->queue_task); } out_rx: return err; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 718588219f75..821a506b9e17 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req) return 0; } -static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err) +static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) { - if (dev->ahash_req->base.complete) - dev->ahash_req->base.complete(&dev->ahash_req->base, err); + if (base->complete) + base->complete(base, err); } static void rk_ahash_reg_init(struct rk_crypto_info *dev) { + struct ahash_request *req = ahash_request_cast(dev->async_req); + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); int reg_status = 0; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | @@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT | RK_CRYPTO_HRDMA_DONE_INT); - CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode | + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode | RK_CRYPTO_HASH_SWAP_DO); CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO | @@ -164,64 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out) static int rk_ahash_digest(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct rk_crypto_info *dev = NULL; - unsigned long flags; - int ret; + struct rk_crypto_info *dev = tctx->dev; if (!req->nbytes) return zero_message_process(req); - - dev = tctx->dev; - dev->total = req->nbytes; - dev->left_bytes = req->nbytes; - dev->aligned = 0; - dev->mode = 0; - dev->align_size = 4; - dev->sg_dst = NULL; - dev->sg_src = req->src; - dev->first = req->src; - dev->nents = sg_nents(req->src); - - switch (crypto_ahash_digestsize(tfm)) { - case SHA1_DIGEST_SIZE: - dev->mode = RK_CRYPTO_HASH_SHA1; - break; - case SHA256_DIGEST_SIZE: - dev->mode = RK_CRYPTO_HASH_SHA256; - break; - case MD5_DIGEST_SIZE: - dev->mode = RK_CRYPTO_HASH_MD5; - break; - default: - return -EINVAL; - } - - rk_ahash_reg_init(dev); - - spin_lock_irqsave(&dev->lock, flags); - ret = crypto_enqueue_request(&dev->queue, &req->base); - spin_unlock_irqrestore(&dev->lock, flags); - - tasklet_schedule(&dev->crypto_tasklet); - - /* - * it will take some time to process date after last dma transmission. - * - * waiting time is relative with the last date len, - * so cannot set a fixed time here. - * 10-50 makes system not call here frequently wasting - * efficiency, and make it response quickly when dma - * complete. - */ - while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) - usleep_range(10, 50); - - memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, - crypto_ahash_digestsize(tfm)); - - return 0; + else + return dev->enqueue(dev, &req->base); } static void crypto_ahash_dma_start(struct rk_crypto_info *dev) @@ -244,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev) static int rk_ahash_start(struct rk_crypto_info *dev) { + struct ahash_request *req = ahash_request_cast(dev->async_req); + struct crypto_ahash *tfm; + struct rk_ahash_rctx *rctx; + + dev->total = req->nbytes; + dev->left_bytes = req->nbytes; + dev->aligned = 0; + dev->align_size = 4; + dev->sg_dst = NULL; + dev->sg_src = req->src; + dev->first = req->src; + dev->nents = sg_nents(req->src); + rctx = ahash_request_ctx(req); + rctx->mode = 0; + + tfm = crypto_ahash_reqtfm(req); + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + rctx->mode = RK_CRYPTO_HASH_SHA1; + break; + case SHA256_DIGEST_SIZE: + rctx->mode = RK_CRYPTO_HASH_SHA256; + break; + case MD5_DIGEST_SIZE: + rctx->mode = RK_CRYPTO_HASH_MD5; + break; + default: + return -EINVAL; + } + + rk_ahash_reg_init(dev); return rk_ahash_set_data_start(dev); } static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) { int err = 0; + struct ahash_request *req = ahash_request_cast(dev->async_req); + struct crypto_ahash *tfm; dev->unload_data(dev); if (dev->left_bytes) { @@ -264,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) } err = rk_ahash_set_data_start(dev); } else { - dev->complete(dev, 0); + /* + * it will take some time to process date after last dma + * transmission. + * + * waiting time is relative with the last date len, + * so cannot set a fixed time here. + * 10us makes system not call here frequently wasting + * efficiency, and make it response quickly when dma + * complete. + */ + while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) + udelay(10); + + tfm = crypto_ahash_reqtfm(req); + memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, + crypto_ahash_digestsize(tfm)); + dev->complete(dev->async_req, 0); + tasklet_schedule(&dev->queue_task); } out_rx: diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 1d9ecd368b5b..08e7bdcaa6e3 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -202,7 +202,6 @@ struct sahara_dev { struct completion dma_completion; struct sahara_ctx *ctx; - spinlock_t lock; struct crypto_queue queue; unsigned long flags; @@ -543,10 +542,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) unmap_out: dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, - DMA_TO_DEVICE); + DMA_FROM_DEVICE); unmap_in: dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return -EINVAL; } @@ -594,9 +593,9 @@ static int sahara_aes_process(struct ablkcipher_request *req) } dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, - DMA_TO_DEVICE); - dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_FROM_DEVICE); + dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_TO_DEVICE); return 0; } @@ -1376,13 +1375,13 @@ static void sahara_unregister_algs(struct sahara_dev *dev) crypto_unregister_ahash(&sha_v4_algs[i]); } -static struct platform_device_id sahara_platform_ids[] = { +static const struct platform_device_id sahara_platform_ids[] = { { .name = "sahara-imx27" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, sahara_platform_ids); -static struct of_device_id sahara_dt_ids[] = { +static const struct of_device_id sahara_dt_ids[] = { { .compatible = "fsl,imx53-sahara" }, { .compatible = "fsl,imx27-sahara" }, { /* sentinel */ } @@ -1487,7 +1486,6 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - spin_lock_init(&dev->lock); mutex_init(&dev->queue_mutex); dev_ptr = dev; diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 09b4ec87c212..602332e02729 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -1,7 +1,20 @@ -config CRYPTO_DEV_STM32 - tristate "Support for STM32 crypto accelerators" +config CRC_DEV_STM32 + tristate "Support for STM32 crc accelerators" depends on ARCH_STM32 select CRYPTO_HASH help This enables support for the CRC32 hw accelerator which can be found - on STMicroelectronis STM32 SOC. + on STMicroelectronics STM32 SOC. + +config HASH_DEV_STM32 + tristate "Support for STM32 hash accelerators" + depends on ARCH_STM32 + depends on HAS_DMA + select CRYPTO_HASH + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_ENGINE + help + This enables support for the HASH hw accelerator which can be found + on STMicroelectronics STM32 SOC. diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile index 73b4c6e47f5f..73cd56cad0cc 100644 --- a/drivers/crypto/stm32/Makefile +++ b/drivers/crypto/stm32/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o -stm32_cryp-objs := stm32_crc32.o +obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o +obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c new file mode 100644 index 000000000000..b585ce54a802 --- /dev/null +++ b/drivers/crypto/stm32/stm32-hash.c @@ -0,0 +1,1575 @@ +/* + * This file is part of STM32 Crypto driver for Linux. + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Lionel DEBIEVE for STMicroelectronics. + * + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define HASH_CR 0x00 +#define HASH_DIN 0x04 +#define HASH_STR 0x08 +#define HASH_IMR 0x20 +#define HASH_SR 0x24 +#define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) +#define HASH_HREG(x) (0x310 + ((x) * 0x04)) +#define HASH_HWCFGR 0x3F0 +#define HASH_VER 0x3F4 +#define HASH_ID 0x3F8 + +/* Control Register */ +#define HASH_CR_INIT BIT(2) +#define HASH_CR_DMAE BIT(3) +#define HASH_CR_DATATYPE_POS 4 +#define HASH_CR_MODE BIT(6) +#define HASH_CR_MDMAT BIT(13) +#define HASH_CR_DMAA BIT(14) +#define HASH_CR_LKEY BIT(16) + +#define HASH_CR_ALGO_SHA1 0x0 +#define HASH_CR_ALGO_MD5 0x80 +#define HASH_CR_ALGO_SHA224 0x40000 +#define HASH_CR_ALGO_SHA256 0x40080 + +/* Interrupt */ +#define HASH_DINIE BIT(0) +#define HASH_DCIE BIT(1) + +/* Interrupt Mask */ +#define HASH_MASK_CALC_COMPLETION BIT(0) +#define HASH_MASK_DATA_INPUT BIT(1) + +/* Context swap register */ +#define HASH_CSR_REGISTER_NUMBER 53 + +/* Status Flags */ +#define HASH_SR_DATA_INPUT_READY BIT(0) +#define HASH_SR_OUTPUT_READY BIT(1) +#define HASH_SR_DMA_ACTIVE BIT(2) +#define HASH_SR_BUSY BIT(3) + +/* STR Register */ +#define HASH_STR_NBLW_MASK GENMASK(4, 0) +#define HASH_STR_DCAL BIT(8) + +#define HASH_FLAGS_INIT BIT(0) +#define HASH_FLAGS_OUTPUT_READY BIT(1) +#define HASH_FLAGS_CPU BIT(2) +#define HASH_FLAGS_DMA_READY BIT(3) +#define HASH_FLAGS_DMA_ACTIVE BIT(4) +#define HASH_FLAGS_HMAC_INIT BIT(5) +#define HASH_FLAGS_HMAC_FINAL BIT(6) +#define HASH_FLAGS_HMAC_KEY BIT(7) + +#define HASH_FLAGS_FINAL BIT(15) +#define HASH_FLAGS_FINUP BIT(16) +#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18) +#define HASH_FLAGS_MD5 BIT(18) +#define HASH_FLAGS_SHA1 BIT(19) +#define HASH_FLAGS_SHA224 BIT(20) +#define HASH_FLAGS_SHA256 BIT(21) +#define HASH_FLAGS_ERRORS BIT(22) +#define HASH_FLAGS_HMAC BIT(23) + +#define HASH_OP_UPDATE 1 +#define HASH_OP_FINAL 2 + +enum stm32_hash_data_format { + HASH_DATA_32_BITS = 0x0, + HASH_DATA_16_BITS = 0x1, + HASH_DATA_8_BITS = 0x2, + HASH_DATA_1_BIT = 0x3 +}; + +#define HASH_BUFLEN 256 +#define HASH_LONG_KEY 64 +#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8) +#define HASH_QUEUE_LENGTH 16 +#define HASH_DMA_THRESHOLD 50 + +struct stm32_hash_ctx { + struct stm32_hash_dev *hdev; + unsigned long flags; + + u8 key[HASH_MAX_KEY_SIZE]; + int keylen; +}; + +struct stm32_hash_request_ctx { + struct stm32_hash_dev *hdev; + unsigned long flags; + unsigned long op; + + u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + size_t digcnt; + size_t bufcnt; + size_t buflen; + + /* DMA */ + struct scatterlist *sg; + unsigned int offset; + unsigned int total; + struct scatterlist sg_key; + + dma_addr_t dma_addr; + size_t dma_ct; + int nents; + + u8 data_type; + + u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); + + /* Export Context */ + u32 *hw_context; +}; + +struct stm32_hash_algs_info { + struct ahash_alg *algs_list; + size_t size; +}; + +struct stm32_hash_pdata { + struct stm32_hash_algs_info *algs_info; + size_t algs_info_size; +}; + +struct stm32_hash_dev { + struct list_head list; + struct device *dev; + struct clk *clk; + struct reset_control *rst; + void __iomem *io_base; + phys_addr_t phys_base; + u32 dma_mode; + u32 dma_maxburst; + + spinlock_t lock; /* lock to protect queue */ + + struct ahash_request *req; + struct crypto_engine *engine; + + int err; + unsigned long flags; + + struct dma_chan *dma_lch; + struct completion dma_completion; + + const struct stm32_hash_pdata *pdata; +}; + +struct stm32_hash_drv { + struct list_head dev_list; + spinlock_t lock; /* List protection access */ +}; + +static struct stm32_hash_drv stm32_hash = { + .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock), +}; + +static void stm32_hash_dma_callback(void *param); + +static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) +{ + return readl_relaxed(hdev->io_base + offset); +} + +static inline void stm32_hash_write(struct stm32_hash_dev *hdev, + u32 offset, u32 value) +{ + writel_relaxed(value, hdev->io_base + offset); +} + +static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) +{ + u32 status; + + return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, + !(status & HASH_SR_BUSY), 10, 10000); +} + +static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) +{ + u32 reg; + + reg = stm32_hash_read(hdev, HASH_STR); + reg &= ~(HASH_STR_NBLW_MASK); + reg |= (8U * ((length) % 4U)); + stm32_hash_write(hdev, HASH_STR, reg); +} + +static int stm32_hash_write_key(struct stm32_hash_dev *hdev) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + u32 reg; + int keylen = ctx->keylen; + void *key = ctx->key; + + if (keylen) { + stm32_hash_set_nblw(hdev, keylen); + + while (keylen > 0) { + stm32_hash_write(hdev, HASH_DIN, *(u32 *)key); + keylen -= 4; + key += 4; + } + + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + + return -EINPROGRESS; + } + + return 0; +} + +static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + u32 reg = HASH_CR_INIT; + + if (!(hdev->flags & HASH_FLAGS_INIT)) { + switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { + case HASH_FLAGS_MD5: + reg |= HASH_CR_ALGO_MD5; + break; + case HASH_FLAGS_SHA1: + reg |= HASH_CR_ALGO_SHA1; + break; + case HASH_FLAGS_SHA224: + reg |= HASH_CR_ALGO_SHA224; + break; + case HASH_FLAGS_SHA256: + reg |= HASH_CR_ALGO_SHA256; + break; + default: + reg |= HASH_CR_ALGO_MD5; + } + + reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); + + if (rctx->flags & HASH_FLAGS_HMAC) { + hdev->flags |= HASH_FLAGS_HMAC; + reg |= HASH_CR_MODE; + if (ctx->keylen > HASH_LONG_KEY) + reg |= HASH_CR_LKEY; + } + + stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); + + stm32_hash_write(hdev, HASH_CR, reg); + + hdev->flags |= HASH_FLAGS_INIT; + + dev_dbg(hdev->dev, "Write Control %x\n", reg); + } +} + +static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) +{ + size_t count; + + while ((rctx->bufcnt < rctx->buflen) && rctx->total) { + count = min(rctx->sg->length - rctx->offset, rctx->total); + count = min(count, rctx->buflen - rctx->bufcnt); + + if (count <= 0) { + if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { + rctx->sg = sg_next(rctx->sg); + continue; + } else { + break; + } + } + + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg, + rctx->offset, count, 0); + + rctx->bufcnt += count; + rctx->offset += count; + rctx->total -= count; + + if (rctx->offset == rctx->sg->length) { + rctx->sg = sg_next(rctx->sg); + if (rctx->sg) + rctx->offset = 0; + else + rctx->total = 0; + } + } +} + +static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, + const u8 *buf, size_t length, int final) +{ + unsigned int count, len32; + const u32 *buffer = (const u32 *)buf; + u32 reg; + + if (final) + hdev->flags |= HASH_FLAGS_FINAL; + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n", + __func__, length, final, len32); + + hdev->flags |= HASH_FLAGS_CPU; + + stm32_hash_write_ctrl(hdev); + + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + + if ((hdev->flags & HASH_FLAGS_HMAC) && + (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { + hdev->flags |= HASH_FLAGS_HMAC_KEY; + stm32_hash_write_key(hdev); + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + } + + for (count = 0; count < len32; count++) + stm32_hash_write(hdev, HASH_DIN, buffer[count]); + + if (final) { + stm32_hash_set_nblw(hdev, length); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + stm32_hash_write_key(hdev); + } + return -EINPROGRESS; + } + + return 0; +} + +static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + int bufcnt, err = 0, final; + + dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags); + + final = (rctx->flags & HASH_FLAGS_FINUP); + + while ((rctx->total >= rctx->buflen) || + (rctx->bufcnt + rctx->total >= rctx->buflen)) { + stm32_hash_append_sg(rctx); + bufcnt = rctx->bufcnt; + rctx->bufcnt = 0; + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0); + } + + stm32_hash_append_sg(rctx); + + if (final) { + bufcnt = rctx->bufcnt; + rctx->bufcnt = 0; + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, + (rctx->flags & HASH_FLAGS_FINUP)); + } + + return err; +} + +static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, + struct scatterlist *sg, int length, int mdma) +{ + struct dma_async_tx_descriptor *in_desc; + dma_cookie_t cookie; + u32 reg; + int err; + + in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!in_desc) { + dev_err(hdev->dev, "dmaengine_prep_slave error\n"); + return -ENOMEM; + } + + reinit_completion(&hdev->dma_completion); + in_desc->callback = stm32_hash_dma_callback; + in_desc->callback_param = hdev; + + hdev->flags |= HASH_FLAGS_FINAL; + hdev->flags |= HASH_FLAGS_DMA_ACTIVE; + + reg = stm32_hash_read(hdev, HASH_CR); + + if (mdma) + reg |= HASH_CR_MDMAT; + else + reg &= ~HASH_CR_MDMAT; + + reg |= HASH_CR_DMAE; + + stm32_hash_write(hdev, HASH_CR, reg); + + stm32_hash_set_nblw(hdev, length); + + cookie = dmaengine_submit(in_desc); + err = dma_submit_error(cookie); + if (err) + return -ENOMEM; + + dma_async_issue_pending(hdev->dma_lch); + + if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion, + msecs_to_jiffies(100))) + err = -ETIMEDOUT; + + if (dma_async_is_tx_complete(hdev->dma_lch, cookie, + NULL, NULL) != DMA_COMPLETE) + err = -ETIMEDOUT; + + if (err) { + dev_err(hdev->dev, "DMA Error %i\n", err); + dmaengine_terminate_all(hdev->dma_lch); + return err; + } + + return -EINPROGRESS; +} + +static void stm32_hash_dma_callback(void *param) +{ + struct stm32_hash_dev *hdev = param; + + complete(&hdev->dma_completion); + + hdev->flags |= HASH_FLAGS_DMA_READY; +} + +static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + int err; + + if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) { + err = stm32_hash_write_key(hdev); + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + } else { + if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) + sg_init_one(&rctx->sg_key, ctx->key, + ALIGN(ctx->keylen, sizeof(u32))); + + rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, + DMA_TO_DEVICE); + if (rctx->dma_ct == 0) { + dev_err(hdev->dev, "dma_map_sg error\n"); + return -ENOMEM; + } + + err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); + + dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); + } + + return err; +} + +static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) +{ + struct dma_slave_config dma_conf; + int err; + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_MEM_TO_DEV; + dma_conf.dst_addr = hdev->phys_base + HASH_DIN; + dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.src_maxburst = hdev->dma_maxburst; + dma_conf.dst_maxburst = hdev->dma_maxburst; + dma_conf.device_fc = false; + + hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in"); + if (!hdev->dma_lch) { + dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); + return -EBUSY; + } + + err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); + if (err) { + dma_release_channel(hdev->dma_lch); + hdev->dma_lch = NULL; + dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); + return err; + } + + init_completion(&hdev->dma_completion); + + return 0; +} + +static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct scatterlist sg[1], *tsg; + int err = 0, len = 0, reg, ncp; + unsigned int i; + const u32 *buffer = (const u32 *)rctx->buffer; + + rctx->sg = hdev->req->src; + rctx->total = hdev->req->nbytes; + + rctx->nents = sg_nents(rctx->sg); + + if (rctx->nents < 0) + return -EINVAL; + + stm32_hash_write_ctrl(hdev); + + if (hdev->flags & HASH_FLAGS_HMAC) { + err = stm32_hash_hmac_dma_send(hdev); + if (err != -EINPROGRESS) + return err; + } + + for_each_sg(rctx->sg, tsg, rctx->nents, i) { + len = sg->length; + + sg[0] = *tsg; + if (sg_is_last(sg)) { + if (hdev->dma_mode == 1) { + len = (ALIGN(sg->length, 16) - 16); + + ncp = sg_pcopy_to_buffer( + rctx->sg, rctx->nents, + rctx->buffer, sg->length - len, + rctx->total - sg->length + len); + + sg->length = len; + } else { + if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { + len = sg->length; + sg->length = ALIGN(sg->length, + sizeof(u32)); + } + } + } + + rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, + DMA_TO_DEVICE); + if (rctx->dma_ct == 0) { + dev_err(hdev->dev, "dma_map_sg error\n"); + return -ENOMEM; + } + + err = stm32_hash_xmit_dma(hdev, sg, len, + !sg_is_last(sg)); + + dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); + + if (err == -ENOMEM) + return err; + } + + if (hdev->dma_mode == 1) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + reg = stm32_hash_read(hdev, HASH_CR); + reg &= ~HASH_CR_DMAE; + reg |= HASH_CR_DMAA; + stm32_hash_write(hdev, HASH_CR, reg); + + for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) + stm32_hash_write(hdev, HASH_DIN, buffer[i]); + + stm32_hash_set_nblw(hdev, ncp); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + err = -EINPROGRESS; + } + + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + err = stm32_hash_hmac_dma_send(hdev); + } + + return err; +} + +static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) +{ + struct stm32_hash_dev *hdev = NULL, *tmp; + + spin_lock_bh(&stm32_hash.lock); + if (!ctx->hdev) { + list_for_each_entry(tmp, &stm32_hash.dev_list, list) { + hdev = tmp; + break; + } + ctx->hdev = hdev; + } else { + hdev = ctx->hdev; + } + + spin_unlock_bh(&stm32_hash.lock); + + return hdev; +} + +static bool stm32_hash_dma_aligned_data(struct ahash_request *req) +{ + struct scatterlist *sg; + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + int i; + + if (req->nbytes <= HASH_DMA_THRESHOLD) + return false; + + if (sg_nents(req->src) > 1) { + if (hdev->dma_mode == 1) + return false; + for_each_sg(req->src, sg, sg_nents(req->src), i) { + if ((!IS_ALIGNED(sg->length, sizeof(u32))) && + (!sg_is_last(sg))) + return false; + } + } + + if (req->src->offset % 4) + return false; + + return true; +} + +static int stm32_hash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + + rctx->hdev = hdev; + + rctx->flags = HASH_FLAGS_CPU; + + rctx->digcnt = crypto_ahash_digestsize(tfm); + switch (rctx->digcnt) { + case MD5_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_MD5; + break; + case SHA1_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA1; + break; + case SHA224_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA224; + break; + case SHA256_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA256; + break; + default: + return -EINVAL; + } + + rctx->bufcnt = 0; + rctx->buflen = HASH_BUFLEN; + rctx->total = 0; + rctx->offset = 0; + rctx->data_type = HASH_DATA_8_BITS; + + memset(rctx->buffer, 0, HASH_BUFLEN); + + if (ctx->flags & HASH_FLAGS_HMAC) + rctx->flags |= HASH_FLAGS_HMAC; + + dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags); + + return 0; +} + +static int stm32_hash_update_req(struct stm32_hash_dev *hdev) +{ + return stm32_hash_update_cpu(hdev); +} + +static int stm32_hash_final_req(struct stm32_hash_dev *hdev) +{ + struct ahash_request *req = hdev->req; + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + int err; + + if (!(rctx->flags & HASH_FLAGS_CPU)) + err = stm32_hash_dma_send(hdev); + else + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1); + + rctx->bufcnt = 0; + + return err; +} + +static void stm32_hash_copy_hash(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + u32 *hash = (u32 *)rctx->digest; + unsigned int i, hashsize; + + switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { + case HASH_FLAGS_MD5: + hashsize = MD5_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA1: + hashsize = SHA1_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA224: + hashsize = SHA224_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA256: + hashsize = SHA256_DIGEST_SIZE; + break; + default: + return; + } + + for (i = 0; i < hashsize / sizeof(u32); i++) + hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev, + HASH_HREG(i))); +} + +static int stm32_hash_finish(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + + if (!req->result) + return -EINVAL; + + memcpy(req->result, rctx->digest, rctx->digcnt); + + return 0; +} + +static void stm32_hash_finish_req(struct ahash_request *req, int err) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = rctx->hdev; + + if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { + stm32_hash_copy_hash(req); + err = stm32_hash_finish(req); + hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU | + HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY | + HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC | + HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL | + HASH_FLAGS_HMAC_KEY); + } else { + rctx->flags |= HASH_FLAGS_ERRORS; + } + + crypto_finalize_hash_request(hdev->engine, req, err); +} + +static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, + struct stm32_hash_request_ctx *rctx) +{ + if (!(HASH_FLAGS_INIT & hdev->flags)) { + stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); + stm32_hash_write(hdev, HASH_STR, 0); + stm32_hash_write(hdev, HASH_DIN, 0); + stm32_hash_write(hdev, HASH_IMR, 0); + hdev->err = 0; + } + + return 0; +} + +static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, + struct ahash_request *req) +{ + return crypto_transfer_hash_request_to_engine(hdev->engine, req); +} + +static int stm32_hash_prepare_req(struct crypto_engine *engine, + struct ahash_request *req) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_request_ctx *rctx; + + if (!hdev) + return -ENODEV; + + hdev->req = req; + + rctx = ahash_request_ctx(req); + + dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", + rctx->op, req->nbytes); + + return stm32_hash_hw_init(hdev, rctx); +} + +static int stm32_hash_one_request(struct crypto_engine *engine, + struct ahash_request *req) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_request_ctx *rctx; + int err = 0; + + if (!hdev) + return -ENODEV; + + hdev->req = req; + + rctx = ahash_request_ctx(req); + + if (rctx->op == HASH_OP_UPDATE) + err = stm32_hash_update_req(hdev); + else if (rctx->op == HASH_OP_FINAL) + err = stm32_hash_final_req(hdev); + + if (err != -EINPROGRESS) + /* done task will not finish it, so do it here */ + stm32_hash_finish_req(req, err); + + return 0; +} + +static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct stm32_hash_dev *hdev = ctx->hdev; + + rctx->op = op; + + return stm32_hash_handle_queue(hdev, req); +} + +static int stm32_hash_update(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + int ret; + + if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) + return 0; + + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; + + if ((rctx->bufcnt + rctx->total < rctx->buflen)) { + stm32_hash_append_sg(rctx); + return 0; + } + + ret = stm32_hash_enqueue(req, HASH_OP_UPDATE); + + if (rctx->flags & HASH_FLAGS_FINUP) + return ret; + + return 0; +} + +static int stm32_hash_final(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + + rctx->flags |= HASH_FLAGS_FINUP; + + return stm32_hash_enqueue(req, HASH_OP_FINAL); +} + +static int stm32_hash_finup(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + int err1, err2; + + rctx->flags |= HASH_FLAGS_FINUP; + + if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) + rctx->flags &= ~HASH_FLAGS_CPU; + + err1 = stm32_hash_update(req); + + if (err1 == -EINPROGRESS || err1 == -EBUSY) + return err1; + + /* + * final() has to be always called to cleanup resources + * even if update() failed, except EINPROGRESS + */ + err2 = stm32_hash_final(req); + + return err1 ?: err2; +} + +static int stm32_hash_digest(struct ahash_request *req) +{ + return stm32_hash_init(req) ?: stm32_hash_finup(req); +} + +static int stm32_hash_export(struct ahash_request *req, void *out) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + u32 *preg; + unsigned int i; + + while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY)) + cpu_relax(); + + rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER), + GFP_KERNEL); + + preg = rctx->hw_context; + + *preg++ = stm32_hash_read(hdev, HASH_IMR); + *preg++ = stm32_hash_read(hdev, HASH_STR); + *preg++ = stm32_hash_read(hdev, HASH_CR); + for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int stm32_hash_import(struct ahash_request *req, const void *in) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + const u32 *preg = in; + u32 reg; + unsigned int i; + + memcpy(rctx, in, sizeof(*rctx)); + + preg = rctx->hw_context; + + stm32_hash_write(hdev, HASH_IMR, *preg++); + stm32_hash_write(hdev, HASH_STR, *preg++); + stm32_hash_write(hdev, HASH_CR, *preg); + reg = *preg++ | HASH_CR_INIT; + stm32_hash_write(hdev, HASH_CR, reg); + + for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + stm32_hash_write(hdev, HASH_CSR(i), *preg++); + + kfree(rctx->hw_context); + + return 0; +} + +static int stm32_hash_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + if (keylen <= HASH_MAX_KEY_SIZE) { + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + } else { + return -ENOMEM; + } + + return 0; +} + +static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, + const char *algs_hmac_name) +{ + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct stm32_hash_request_ctx)); + + ctx->keylen = 0; + + if (algs_hmac_name) + ctx->flags |= HASH_FLAGS_HMAC; + + return 0; +} + +static int stm32_hash_cra_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, NULL); +} + +static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "md5"); +} + +static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha1"); +} + +static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha224"); +} + +static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha256"); +} + +static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) +{ + struct stm32_hash_dev *hdev = dev_id; + int err; + + if (HASH_FLAGS_CPU & hdev->flags) { + if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; + goto finish; + } + } else if (HASH_FLAGS_DMA_READY & hdev->flags) { + if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; + goto finish; + } + } + + return IRQ_HANDLED; + +finish: + /*Finish current request */ + stm32_hash_finish_req(hdev->req, err); + + return IRQ_HANDLED; +} + +static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) +{ + struct stm32_hash_dev *hdev = dev_id; + u32 reg; + + reg = stm32_hash_read(hdev, HASH_SR); + if (reg & HASH_SR_OUTPUT_READY) { + reg &= ~HASH_SR_OUTPUT_READY; + stm32_hash_write(hdev, HASH_SR, reg); + hdev->flags |= HASH_FLAGS_OUTPUT_READY; + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static struct ahash_alg algs_md5_sha1[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "md5", + .cra_driver_name = "stm32-md5", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "stm32-hmac-md5", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_md5_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha1", + .cra_driver_name = "stm32-sha1", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "stm32-hmac-sha1", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha1_init, + .cra_module = THIS_MODULE, + } + } + }, +}; + +static struct ahash_alg algs_sha224_sha256[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha224", + .cra_driver_name = "stm32-sha224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .setkey = stm32_hash_setkey, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "stm32-hmac-sha224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha224_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha256", + .cra_driver_name = "stm32-sha256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "stm32-hmac-sha256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha256_init, + .cra_module = THIS_MODULE, + } + } + }, +}; + +static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) +{ + unsigned int i, j; + int err; + + for (i = 0; i < hdev->pdata->algs_info_size; i++) { + for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { + err = crypto_register_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + if (err) + goto err_algs; + } + } + + return 0; +err_algs: + dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); + for (; i--; ) { + for (; j--;) + crypto_unregister_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + } + + return err; +} + +static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) +{ + unsigned int i, j; + + for (i = 0; i < hdev->pdata->algs_info_size; i++) { + for (j = 0; j < hdev->pdata->algs_info[i].size; j++) + crypto_unregister_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + } + + return 0; +} + +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { + { + .algs_list = algs_md5_sha1, + .size = ARRAY_SIZE(algs_md5_sha1), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { + .algs_info = stm32_hash_algs_info_stm32f4, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), +}; + +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { + { + .algs_list = algs_md5_sha1, + .size = ARRAY_SIZE(algs_md5_sha1), + }, + { + .algs_list = algs_sha224_sha256, + .size = ARRAY_SIZE(algs_sha224_sha256), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { + .algs_info = stm32_hash_algs_info_stm32f7, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), +}; + +static const struct of_device_id stm32_hash_of_match[] = { + { + .compatible = "st,stm32f456-hash", + .data = &stm32_hash_pdata_stm32f4, + }, + { + .compatible = "st,stm32f756-hash", + .data = &stm32_hash_pdata_stm32f7, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, stm32_hash_of_match); + +static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, + struct device *dev) +{ + const struct of_device_id *match; + int err; + + match = of_match_device(stm32_hash_of_match, dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + return -EINVAL; + } + + err = of_property_read_u32(dev->of_node, "dma-maxburst", + &hdev->dma_maxburst); + + hdev->pdata = match->data; + + return err; +} + +static int stm32_hash_probe(struct platform_device *pdev) +{ + struct stm32_hash_dev *hdev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, irq; + + hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdev->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(hdev->io_base)) + return PTR_ERR(hdev->io_base); + + hdev->phys_base = res->start; + + ret = stm32_hash_get_of_match(hdev, dev); + if (ret) + return ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot get IRQ resource\n"); + return irq; + } + + ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, + stm32_hash_irq_thread, IRQF_ONESHOT, + dev_name(dev), hdev); + if (ret) { + dev_err(dev, "Cannot grab IRQ\n"); + return ret; + } + + hdev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hdev->clk)) { + dev_err(dev, "failed to get clock for hash (%lu)\n", + PTR_ERR(hdev->clk)); + return PTR_ERR(hdev->clk); + } + + ret = clk_prepare_enable(hdev->clk); + if (ret) { + dev_err(dev, "failed to enable hash clock (%d)\n", ret); + return ret; + } + + hdev->rst = devm_reset_control_get(&pdev->dev, NULL); + if (!IS_ERR(hdev->rst)) { + reset_control_assert(hdev->rst); + udelay(2); + reset_control_deassert(hdev->rst); + } + + hdev->dev = dev; + + platform_set_drvdata(pdev, hdev); + + ret = stm32_hash_dma_init(hdev); + if (ret) + dev_dbg(dev, "DMA mode not available\n"); + + spin_lock(&stm32_hash.lock); + list_add_tail(&hdev->list, &stm32_hash.dev_list); + spin_unlock(&stm32_hash.lock); + + /* Initialize crypto engine */ + hdev->engine = crypto_engine_alloc_init(dev, 1); + if (!hdev->engine) { + ret = -ENOMEM; + goto err_engine; + } + + hdev->engine->prepare_hash_request = stm32_hash_prepare_req; + hdev->engine->hash_one_request = stm32_hash_one_request; + + ret = crypto_engine_start(hdev->engine); + if (ret) + goto err_engine_start; + + hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); + + /* Register algos */ + ret = stm32_hash_register_algs(hdev); + if (ret) + goto err_algs; + + dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n", + stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); + + return 0; + +err_algs: +err_engine_start: + crypto_engine_exit(hdev->engine); +err_engine: + spin_lock(&stm32_hash.lock); + list_del(&hdev->list); + spin_unlock(&stm32_hash.lock); + + if (hdev->dma_lch) + dma_release_channel(hdev->dma_lch); + + clk_disable_unprepare(hdev->clk); + + return ret; +} + +static int stm32_hash_remove(struct platform_device *pdev) +{ + static struct stm32_hash_dev *hdev; + + hdev = platform_get_drvdata(pdev); + if (!hdev) + return -ENODEV; + + stm32_hash_unregister_algs(hdev); + + crypto_engine_exit(hdev->engine); + + spin_lock(&stm32_hash.lock); + list_del(&hdev->list); + spin_unlock(&stm32_hash.lock); + + if (hdev->dma_lch) + dma_release_channel(hdev->dma_lch); + + clk_disable_unprepare(hdev->clk); + + return 0; +} + +static struct platform_driver stm32_hash_driver = { + .probe = stm32_hash_probe, + .remove = stm32_hash_remove, + .driver = { + .name = "stm32-hash", + .of_match_table = stm32_hash_of_match, + } +}; + +module_platform_driver(stm32_hash_driver); + +MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver"); +MODULE_AUTHOR("Lionel Debieve "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index ec83b1e6bfe8..090582baecfe 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -107,12 +107,12 @@ static int stm32_crc_init(struct shash_desc *desc) spin_unlock_bh(&crc_list.lock); /* Reset, set key, poly and configure in bit reverse mode */ - writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl(ctx->crc->regs + CRC_DR); + ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); ctx->crc->nb_pending_bytes = 0; return 0; @@ -135,7 +135,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, if (crc->nb_pending_bytes == sizeof(u32)) { /* Process completed pending data */ - writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR); + writel_relaxed(*(u32 *)crc->pending_data, + crc->regs + CRC_DR); crc->nb_pending_bytes = 0; } } @@ -143,10 +144,10 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, d32 = (u32 *)d8; for (i = 0; i < length >> 2; i++) /* Process 32 bits data */ - writel(*(d32++), crc->regs + CRC_DR); + writel_relaxed(*(d32++), crc->regs + CRC_DR); /* Store partial result */ - ctx->partial = readl(crc->regs + CRC_DR); + ctx->partial = readl_relaxed(crc->regs + CRC_DR); /* Check for pending data (non 32 bits) */ length &= 3; @@ -295,7 +296,7 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shash(algs); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); clk_disable_unprepare(crc->clk); diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile index 8f4c7a273141..ccb893219079 100644 --- a/drivers/crypto/sunxi-ss/Makefile +++ b/drivers/crypto/sunxi-ss/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o +sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 02ad8256e900..1547cbe13dc2 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -213,6 +213,23 @@ static struct sun4i_ss_alg_template ss_algs[] = { } } }, +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun4i_ss_rng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + }, + .generate = sun4i_ss_prng_generate, + .seed = sun4i_ss_prng_seed, + .seedsize = SS_SEED_LEN / BITS_PER_BYTE, + } +}, +#endif }; static int sun4i_ss_probe(struct platform_device *pdev) @@ -355,6 +372,13 @@ static int sun4i_ss_probe(struct platform_device *pdev) goto error_alg; } break; + case CRYPTO_ALG_TYPE_RNG: + err = crypto_register_rng(&ss_algs[i].alg.rng); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.rng.base.cra_name); + } + break; } } platform_set_drvdata(pdev, ss); @@ -369,6 +393,9 @@ static int sun4i_ss_probe(struct platform_device *pdev) case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; } } if (ss->reset) @@ -393,6 +420,9 @@ static int sun4i_ss_remove(struct platform_device *pdev) case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; } } diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c new file mode 100644 index 000000000000..0d01d1624252 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -0,0 +1,56 @@ +#include "sun4i-ss.h" + +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + memcpy(algt->ss->seed, seed, slen); + + return 0; +} + +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + int i; + u32 v; + u32 *data = (u32 *)dst; + const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; + size_t len; + struct sun4i_ss_ctx *ss; + unsigned int todo = (dlen / 4) * 4; + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + ss = algt->ss; + + spin_lock(&ss->slock); + + writel(mode, ss->base + SS_CTL); + + while (todo > 0) { + /* write the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) + writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); + + /* Read the random data */ + len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); + readsl(ss->base + SS_TXFIFO, data, len / 4); + data += len / 4; + todo -= len; + + /* Update the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { + v = readl(ss->base + SS_KEY0 + i * 4); + ss->seed[i] = v; + } + } + + writel(0, ss->base + SS_CTL); + spin_unlock(&ss->slock); + return dlen; +} diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index a0e1efc1cb2a..f3ac90692ac6 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -32,6 +32,7 @@ #include #include #include +#include #define SS_CTL 0x00 #define SS_KEY0 0x04 @@ -127,6 +128,9 @@ #define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) #define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) +#define SS_SEED_LEN 192 +#define SS_DATA_LEN 160 + struct sun4i_ss_ctx { void __iomem *base; int irq; @@ -136,6 +140,9 @@ struct sun4i_ss_ctx { struct device *dev; struct resource *res; spinlock_t slock; /* control the use of the device */ +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG + u32 seed[SS_SEED_LEN / BITS_PER_LONG]; +#endif }; struct sun4i_ss_alg_template { @@ -144,6 +151,7 @@ struct sun4i_ss_alg_template { union { struct skcipher_alg crypto; struct ahash_alg hash; + struct rng_alg rng; } alg; struct sun4i_ss_ctx *ss; }; @@ -201,3 +209,6 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c690858..dff88838dce7 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, req_ctx->swinit = 0; } else { desc->ptr[1] = zero_entry; - /* Indicate next op is not the first. */ - req_ctx->first = 0; } + /* Indicate next op is not the first. */ + req_ctx->first = 0; /* HMAC key */ if (ctx->keylen) @@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, sg_count = edesc->src_nents ?: 1; if (is_sec1 && sg_count > 1) - sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); else sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, DMA_TO_DEVICE); @@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.hash.final = ahash_final; t_alg->algt.alg.hash.finup = ahash_finup; t_alg->algt.alg.hash.digest = ahash_digest; - t_alg->algt.alg.hash.setkey = ahash_setkey; + if (!strncmp(alg->cra_name, "hmac", 4)) + t_alg->algt.alg.hash.setkey = ahash_setkey; t_alg->algt.alg.hash.import = ahash_import; t_alg->algt.alg.hash.export = ahash_export; diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 49defda4e03d..5035b0dc1e40 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -27,12 +27,68 @@ #include #include "virtio_crypto_common.h" + +struct virtio_crypto_ablkcipher_ctx { + struct virtio_crypto *vcrypto; + struct crypto_tfm *tfm; + + struct virtio_crypto_sym_session_info enc_sess_info; + struct virtio_crypto_sym_session_info dec_sess_info; +}; + +struct virtio_crypto_sym_request { + struct virtio_crypto_request base; + + /* Cipher or aead */ + uint32_t type; + struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx; + struct ablkcipher_request *ablkcipher_req; + uint8_t *iv; + /* Encryption? */ + bool encrypt; +}; + /* * The algs_lock protects the below global virtio_crypto_active_devs * and crypto algorithms registion. */ static DEFINE_MUTEX(algs_lock); static unsigned int virtio_crypto_active_devs; +static void virtio_crypto_ablkcipher_finalize_req( + struct virtio_crypto_sym_request *vc_sym_req, + struct ablkcipher_request *req, + int err); + +static void virtio_crypto_dataq_sym_callback + (struct virtio_crypto_request *vc_req, int len) +{ + struct virtio_crypto_sym_request *vc_sym_req = + container_of(vc_req, struct virtio_crypto_sym_request, base); + struct ablkcipher_request *ablk_req; + int error; + + /* Finish the encrypt or decrypt process */ + if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { + switch (vc_req->status) { + case VIRTIO_CRYPTO_OK: + error = 0; + break; + case VIRTIO_CRYPTO_INVSESS: + case VIRTIO_CRYPTO_ERR: + error = -EINVAL; + break; + case VIRTIO_CRYPTO_BADMSG: + error = -EBADMSG; + break; + default: + error = -EIO; + break; + } + ablk_req = vc_sym_req->ablkcipher_req; + virtio_crypto_ablkcipher_finalize_req(vc_sym_req, + ablk_req, error); + } +} static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg) { @@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, } static int -__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, +__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, struct ablkcipher_request *req, struct data_queue *data_vq) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx; + struct virtio_crypto_request *vc_req = &vc_sym_req->base; unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx; struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data; int src_nents, dst_nents; @@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, } vc_req->req_data = req_data; - vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; + vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; /* Head of operation */ - if (vc_req->encrypt) { + if (vc_sym_req->encrypt) { req_data->header.session_id = cpu_to_le64(ctx->enc_sess_info.session_id); req_data->header.opcode = @@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, memcpy(iv, req->info, ivsize); sg_init_one(&iv_sg, iv, ivsize); sgs[num_out++] = &iv_sg; - vc_req->iv = iv; + vc_sym_req->iv = iv; /* Source data */ for (i = 0; i < src_nents; i++) @@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; - vc_req->ablkcipher_ctx = ctx; - vc_req->ablkcipher_req = req; - vc_req->encrypt = true; vc_req->dataq = data_vq; + vc_req->alg_cb = virtio_crypto_dataq_sym_callback; + vc_sym_req->ablkcipher_ctx = ctx; + vc_sym_req->ablkcipher_req = req; + vc_sym_req->encrypt = true; return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); } @@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; - vc_req->ablkcipher_ctx = ctx; - vc_req->ablkcipher_req = req; - - vc_req->encrypt = false; vc_req->dataq = data_vq; + vc_req->alg_cb = virtio_crypto_dataq_sym_callback; + vc_sym_req->ablkcipher_ctx = ctx; + vc_sym_req->ablkcipher_req = req; + vc_sym_req->encrypt = false; return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); } @@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) { struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request); + tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request); ctx->tfm = tfm; return 0; @@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req( struct crypto_engine *engine, struct ablkcipher_request *req) { - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct data_queue *data_vq = vc_req->dataq; int ret; - ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq); + ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq); if (ret < 0) return ret; @@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req( return 0; } -void virtio_crypto_ablkcipher_finalize_req( - struct virtio_crypto_request *vc_req, +static void virtio_crypto_ablkcipher_finalize_req( + struct virtio_crypto_sym_request *vc_sym_req, struct ablkcipher_request *req, int err) { - crypto_finalize_cipher_request(vc_req->dataq->engine, req, err); - - virtcrypto_clear_request(vc_req); + crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, + req, err); + kzfree(vc_sym_req->iv); + virtcrypto_clear_request(&vc_sym_req->base); } static struct crypto_alg virtio_crypto_algs[] = { { diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h index da6d8c0ea407..e976539a05d9 100644 --- a/drivers/crypto/virtio/virtio_crypto_common.h +++ b/drivers/crypto/virtio/virtio_crypto_common.h @@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info { __u64 session_id; }; -struct virtio_crypto_ablkcipher_ctx { - struct virtio_crypto *vcrypto; - struct crypto_tfm *tfm; - - struct virtio_crypto_sym_session_info enc_sess_info; - struct virtio_crypto_sym_session_info dec_sess_info; -}; +struct virtio_crypto_request; +typedef void (*virtio_crypto_data_callback) + (struct virtio_crypto_request *vc_req, int len); struct virtio_crypto_request { - /* Cipher or aead */ - uint32_t type; uint8_t status; - struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx; - struct ablkcipher_request *ablkcipher_req; struct virtio_crypto_op_data_req *req_data; struct scatterlist **sgs; - uint8_t *iv; - /* Encryption? */ - bool encrypt; struct data_queue *dataq; + virtio_crypto_data_callback alg_cb; }; int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); @@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); int virtio_crypto_ablkcipher_crypt_req( struct crypto_engine *engine, struct ablkcipher_request *req); -void virtio_crypto_ablkcipher_finalize_req( - struct virtio_crypto_request *vc_req, - struct ablkcipher_request *req, - int err); void virtcrypto_clear_request(struct virtio_crypto_request *vc_req); diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index a111cd72797b..ff1410a32c2b 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -29,7 +29,6 @@ void virtcrypto_clear_request(struct virtio_crypto_request *vc_req) { if (vc_req) { - kzfree(vc_req->iv); kzfree(vc_req->req_data); kfree(vc_req->sgs); } @@ -41,40 +40,18 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq) struct virtio_crypto_request *vc_req; unsigned long flags; unsigned int len; - struct ablkcipher_request *ablk_req; - int error; unsigned int qid = vq->index; spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); do { virtqueue_disable_cb(vq); while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { - if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { - switch (vc_req->status) { - case VIRTIO_CRYPTO_OK: - error = 0; - break; - case VIRTIO_CRYPTO_INVSESS: - case VIRTIO_CRYPTO_ERR: - error = -EINVAL; - break; - case VIRTIO_CRYPTO_BADMSG: - error = -EBADMSG; - break; - default: - error = -EIO; - break; - } - ablk_req = vc_req->ablkcipher_req; - - spin_unlock_irqrestore( - &vcrypto->data_vq[qid].lock, flags); - /* Finish the encrypt or decrypt process */ - virtio_crypto_ablkcipher_finalize_req(vc_req, - ablk_req, error); - spin_lock_irqsave( - &vcrypto->data_vq[qid].lock, flags); - } + spin_unlock_irqrestore( + &vcrypto->data_vq[qid].lock, flags); + if (vc_req->alg_cb) + vc_req->alg_cb(vc_req, len); + spin_lock_irqsave( + &vcrypto->data_vq[qid].lock, flags); } } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); @@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto) return -EPERM; } - dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n"); + dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); } else { virtcrypto_dev_stop(vcrypto); dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 9c26d9e8dbea..17d84217dd76 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -104,8 +104,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, pagefault_enable(); preempt_enable(); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 938eb4868f7f..557b93703532 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -46,6 +46,8 @@ void dax_read_unlock(int id) EXPORT_SYMBOL_GPL(dax_read_unlock); #ifdef CONFIG_BLOCK +#include + int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, pgoff_t *pgoff) { @@ -59,6 +61,16 @@ int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, } EXPORT_SYMBOL(bdev_dax_pgoff); +#if IS_ENABLED(CONFIG_FS_DAX) +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) +{ + if (!blk_queue_dax(bdev->bd_queue)) + return NULL; + return fs_dax_get_by_host(bdev->bd_disk->disk_name); +} +EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); +#endif + /** * __bdev_dax_supported() - Check if the device supports dax for filesystem * @sb: The superblock of the device @@ -189,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) if (!dax_dev) return 0; - if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush) +#ifndef CONFIG_ARCH_HAS_PMEM_API + if (a == &dev_attr_write_cache.attr) return 0; +#endif return a->mode; } @@ -255,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, } EXPORT_SYMBOL_GPL(dax_copy_from_iter); -void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size); +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { - if (!dax_alive(dax_dev)) + if (unlikely(!dax_alive(dax_dev))) return; - if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)) + if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) return; - if (dax_dev->ops->flush) - dax_dev->ops->flush(dax_dev, pgoff, addr, size); + arch_wb_cache_pmem(addr, size); } +#else +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) +{ +} +#endif EXPORT_SYMBOL_GPL(dax_flush); void dax_write_cache(struct dax_device *dax_dev, bool wc) diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 41254e702f1e..6a172d338f6d 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -1,6 +1,7 @@ menuconfig PM_DEVFREQ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" select SRCU + select PM_OPP help A device may have a list of frequencies and voltages available. devfreq, a generic DVFS framework can be registered for a device diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c index 8648b32ebc89..d67242d87744 100644 --- a/drivers/devfreq/devfreq-event.c +++ b/drivers/devfreq/devfreq-event.c @@ -277,8 +277,8 @@ int devfreq_event_get_edev_count(struct device *dev) sizeof(u32)); if (count < 0) { dev_err(dev, - "failed to get the count of devfreq-event in %s node\n", - dev->of_node->full_name); + "failed to get the count of devfreq-event in %pOF node\n", + dev->of_node); return count; } diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index dea04871b50d..a1c4ee818614 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev, err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); - goto err_out; + goto err_dev; } devfreq->trans_table = devm_kzalloc(&devfreq->dev, @@ -610,6 +610,9 @@ struct devfreq *devfreq_add_device(struct device *dev, mutex_unlock(&devfreq_list_lock); device_unregister(&devfreq->dev); +err_dev: + if (devfreq) + kfree(devfreq); err_out: return ERR_PTR(err); } diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index a4f2fa1091e4..cfc50a61a90d 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h @@ -69,4 +69,8 @@ extern int devfreq_remove_governor(struct devfreq_governor *governor); extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} #endif /* _GOVERNOR_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fa8f9c07ce73..fadc4d8783bd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -56,6 +56,12 @@ config DMA_OF select DMA_ENGINE #devices +config ALTERA_MSGDMA + tristate "Altera / Intel mSGDMA Engine" + select DMA_ENGINE + help + Enable support for Altera / Intel mSGDMA controller. + config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index d12ab2985ed1..f08f8de1b567 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_DMA_OF) += of-dma.o obj-$(CONFIG_DMATEST) += dmatest.o #devices +obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c new file mode 100644 index 000000000000..32905d5606ac --- /dev/null +++ b/drivers/dma/altera-msgdma.c @@ -0,0 +1,927 @@ +/* + * DMA driver for Altera mSGDMA IP core + * + * Copyright (C) 2017 Stefan Roese + * + * Based on drivers/dma/xilinx/zynqmp_dma.c, which is: + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dmaengine.h" + +#define MSGDMA_MAX_TRANS_LEN U32_MAX +#define MSGDMA_DESC_NUM 1024 + +/** + * struct msgdma_extended_desc - implements an extended descriptor + * @read_addr_lo: data buffer source address low bits + * @write_addr_lo: data buffer destination address low bits + * @len: the number of bytes to transfer per descriptor + * @burst_seq_num: bit 31:24 write burst + * bit 23:16 read burst + * bit 15:00 sequence number + * @stride: bit 31:16 write stride + * bit 15:00 read stride + * @read_addr_hi: data buffer source address high bits + * @write_addr_hi: data buffer destination address high bits + * @control: characteristics of the transfer + */ +struct msgdma_extended_desc { + u32 read_addr_lo; + u32 write_addr_lo; + u32 len; + u32 burst_seq_num; + u32 stride; + u32 read_addr_hi; + u32 write_addr_hi; + u32 control; +}; + +/* mSGDMA descriptor control field bit definitions */ +#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) +#define MSGDMA_DESC_CTL_GEN_SOP BIT(8) +#define MSGDMA_DESC_CTL_GEN_EOP BIT(9) +#define MSGDMA_DESC_CTL_PARK_READS BIT(10) +#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) +#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) +#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) +#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) +#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) +#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16) +#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24) + +/* + * Writing "1" the "go" bit commits the entire descriptor into the + * descriptor FIFO(s) + */ +#define MSGDMA_DESC_CTL_GO BIT(31) + +/* Tx buffer control flags */ +#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ + MSGDMA_DESC_CTL_END_ON_LEN | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_EARLY_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +/* mSGDMA extended descriptor stride definitions */ +#define MSGDMA_DESC_STRIDE_RD 0x00000001 +#define MSGDMA_DESC_STRIDE_WR 0x00010000 +#define MSGDMA_DESC_STRIDE_RW 0x00010001 + +/* mSGDMA dispatcher control and status register map */ +#define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */ +#define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */ +#define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */ + /* 15:00 - read fill level */ +#define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */ +#define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */ + /* 15:00 - read seq number */ + +/* mSGDMA CSR status register bit definitions */ +#define MSGDMA_CSR_STAT_BUSY BIT(0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4) +#define MSGDMA_CSR_STAT_STOPPED BIT(5) +#define MSGDMA_CSR_STAT_RESETTING BIT(6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8) +#define MSGDMA_CSR_STAT_IRQ BIT(9) +#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0) +#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0) + +#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \ + MSGDMA_CSR_STAT_RESP_BUF_EMPTY) + +/* mSGDMA CSR control register bit definitions */ +#define MSGDMA_CSR_CTL_STOP BIT(0) +#define MSGDMA_CSR_CTL_RESET BIT(1) +#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2) +#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3) +#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4) +#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5) + +/* mSGDMA CSR fill level bits */ +#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16) +#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) +#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) + +#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16) + +/* mSGDMA response register map */ +#define MSGDMA_RESP_BYTES_TRANSFERRED 0x00 +#define MSGDMA_RESP_STATUS 0x04 + +/* mSGDMA response register bit definitions */ +#define MSGDMA_RESP_EARLY_TERM BIT(8) +#define MSGDMA_RESP_ERR_MASK 0xff + +/** + * struct msgdma_sw_desc - implements a sw descriptor + * @async_tx: support for the async_tx api + * @hw_desc: assosiated HW descriptor + * @free_list: node of the free SW descriprots list + */ +struct msgdma_sw_desc { + struct dma_async_tx_descriptor async_tx; + struct msgdma_extended_desc hw_desc; + struct list_head node; + struct list_head tx_list; +}; + +/** + * struct msgdma_device - DMA device structure + */ +struct msgdma_device { + spinlock_t lock; + struct device *dev; + struct tasklet_struct irq_tasklet; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct list_head done_list; + u32 desc_free_cnt; + bool idle; + + struct dma_device dmadev; + struct dma_chan dmachan; + dma_addr_t hw_desq; + struct msgdma_sw_desc *sw_desq; + unsigned int npendings; + + struct dma_slave_config slave_cfg; + + int irq; + + /* mSGDMA controller */ + void __iomem *csr; + + /* mSGDMA descriptors */ + void __iomem *desc; + + /* mSGDMA response */ + void __iomem *resp; +}; + +#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan) +#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx) + +/** + * msgdma_get_descriptor - Get the sw descriptor from the pool + * @mdev: Pointer to the Altera mSGDMA device structure + * + * Return: The sw descriptor + */ +static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) +{ + struct msgdma_sw_desc *desc; + + spin_lock_bh(&mdev->lock); + desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); + list_del(&desc->node); + spin_unlock_bh(&mdev->lock); + + INIT_LIST_HEAD(&desc->tx_list); + + return desc; +} + +/** + * msgdma_free_descriptor - Issue pending transactions + * @mdev: Pointer to the Altera mSGDMA device structure + * @desc: Transaction descriptor pointer + */ +static void msgdma_free_descriptor(struct msgdma_device *mdev, + struct msgdma_sw_desc *desc) +{ + struct msgdma_sw_desc *child, *next; + + mdev->desc_free_cnt++; + list_add_tail(&desc->node, &mdev->free_list); + list_for_each_entry_safe(child, next, &desc->tx_list, node) { + mdev->desc_free_cnt++; + list_move_tail(&child->node, &mdev->free_list); + } +} + +/** + * msgdma_free_desc_list - Free descriptors list + * @mdev: Pointer to the Altera mSGDMA device structure + * @list: List to parse and delete the descriptor + */ +static void msgdma_free_desc_list(struct msgdma_device *mdev, + struct list_head *list) +{ + struct msgdma_sw_desc *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + msgdma_free_descriptor(mdev, desc); +} + +/** + * msgdma_desc_config - Configure the descriptor + * @desc: Hw descriptor pointer + * @dst: Destination buffer address + * @src: Source buffer address + * @len: Transfer length + */ +static void msgdma_desc_config(struct msgdma_extended_desc *desc, + dma_addr_t dst, dma_addr_t src, size_t len, + u32 stride) +{ + /* Set lower 32bits of src & dst addresses in the descriptor */ + desc->read_addr_lo = lower_32_bits(src); + desc->write_addr_lo = lower_32_bits(dst); + + /* Set upper 32bits of src & dst addresses in the descriptor */ + desc->read_addr_hi = upper_32_bits(src); + desc->write_addr_hi = upper_32_bits(dst); + + desc->len = len; + desc->stride = stride; + desc->burst_seq_num = 0; /* 0 will result in max burst length */ + + /* + * Don't set interrupt on xfer end yet, this will be done later + * for the "last" descriptor + */ + desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO | + MSGDMA_DESC_CTL_END_ON_LEN; +} + +/** + * msgdma_desc_config_eod - Mark the descriptor as end descriptor + * @desc: Hw descriptor pointer + */ +static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc) +{ + desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ; +} + +/** + * msgdma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct msgdma_device *mdev = to_mdev(tx->chan); + struct msgdma_sw_desc *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&mdev->lock); + cookie = dma_cookie_assign(tx); + + list_add_tail(&new->node, &mdev->pending_list); + spin_unlock_bh(&mdev->lock); + + return cookie; +} + +/** + * msgdma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct msgdma_device *mdev = to_mdev(dchan); + struct msgdma_sw_desc *new, *first = NULL; + struct msgdma_extended_desc *desc; + size_t copy; + u32 desc_cnt; + + desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); + + spin_lock_bh(&mdev->lock); + if (desc_cnt > mdev->desc_free_cnt) { + spin_unlock_bh(&mdev->lock); + dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); + return NULL; + } + mdev->desc_free_cnt -= desc_cnt; + spin_unlock_bh(&mdev->lock); + + do { + /* Allocate and populate the descriptor */ + new = msgdma_get_descriptor(mdev); + + copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN); + desc = &new->hw_desc; + msgdma_desc_config(desc, dma_dst, dma_src, copy, + MSGDMA_DESC_STRIDE_RW); + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + msgdma_desc_config_eod(desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + + return &first->async_tx; +} + +/** + * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction + * + * @dchan: DMA channel + * @sgl: Destination scatter list + * @sg_len: Number of entries in destination scatter list + * @dir: DMA transfer direction + * @flags: transfer ack flags + * @context: transfer context (unused) + */ +static struct dma_async_tx_descriptor * +msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) + +{ + struct msgdma_device *mdev = to_mdev(dchan); + struct dma_slave_config *cfg = &mdev->slave_cfg; + struct msgdma_sw_desc *new, *first = NULL; + void *desc = NULL; + size_t len, avail; + dma_addr_t dma_dst, dma_src; + u32 desc_cnt = 0, i; + struct scatterlist *sg; + u32 stride; + + for_each_sg(sgl, sg, sg_len, i) + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); + + spin_lock_bh(&mdev->lock); + if (desc_cnt > mdev->desc_free_cnt) { + spin_unlock_bh(&mdev->lock); + dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); + return NULL; + } + mdev->desc_free_cnt -= desc_cnt; + spin_unlock_bh(&mdev->lock); + + avail = sg_dma_len(sgl); + + /* Run until we are out of scatterlist entries */ + while (true) { + /* Allocate and populate the descriptor */ + new = msgdma_get_descriptor(mdev); + + desc = &new->hw_desc; + len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN); + + if (dir == DMA_MEM_TO_DEV) { + dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; + dma_dst = cfg->dst_addr; + stride = MSGDMA_DESC_STRIDE_RD; + } else { + dma_src = cfg->src_addr; + dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; + stride = MSGDMA_DESC_STRIDE_WR; + } + msgdma_desc_config(desc, dma_dst, dma_src, len, stride); + avail -= len; + + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + + /* Fetch the next scatterlist entry */ + if (avail == 0) { + if (sg_len == 0) + break; + sgl = sg_next(sgl); + if (sgl == NULL) + break; + sg_len--; + avail = sg_dma_len(sgl); + } + } + + msgdma_desc_config_eod(desc); + first->async_tx.flags = flags; + + return &first->async_tx; +} + +static int msgdma_dma_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct msgdma_device *mdev = to_mdev(dchan); + + memcpy(&mdev->slave_cfg, config, sizeof(*config)); + + return 0; +} + +static void msgdma_reset(struct msgdma_device *mdev) +{ + u32 val; + int ret; + + /* Reset mSGDMA */ + iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); + iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); + + ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, + (val & MSGDMA_CSR_STAT_RESETTING) == 0, + 1, 10000); + if (ret) + dev_err(mdev->dev, "DMA channel did not reset\n"); + + /* Clear all status bits */ + iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); + + /* Enable the DMA controller including interrupts */ + iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY | + MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); + + mdev->idle = true; +}; + +static void msgdma_copy_one(struct msgdma_device *mdev, + struct msgdma_sw_desc *desc) +{ + void __iomem *hw_desc = mdev->desc; + + /* + * Check if the DESC FIFO it not full. If its full, we need to wait + * for at least one entry to become free again + */ + while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & + MSGDMA_CSR_STAT_DESC_BUF_FULL) + mdelay(1); + + /* + * The descriptor needs to get copied into the descriptor FIFO + * of the DMA controller. The descriptor will get flushed to the + * FIFO, once the last word (control word) is written. Since we + * are not 100% sure that memcpy() writes all word in the "correct" + * oder (address from low to high) on all architectures, we make + * sure this control word is written last by single coding it and + * adding some write-barriers here. + */ + memcpy((void __force *)hw_desc, &desc->hw_desc, + sizeof(desc->hw_desc) - sizeof(u32)); + + /* Write control word last to flush this descriptor into the FIFO */ + mdev->idle = false; + wmb(); + iowrite32(desc->hw_desc.control, hw_desc + + offsetof(struct msgdma_extended_desc, control)); + wmb(); +} + +/** + * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO + * @mdev: Pointer to the Altera mSGDMA device structure + * @desc: Transaction descriptor pointer + */ +static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, + struct msgdma_sw_desc *desc) +{ + struct msgdma_sw_desc *sdesc, *next; + + msgdma_copy_one(mdev, desc); + + list_for_each_entry_safe(sdesc, next, &desc->tx_list, node) + msgdma_copy_one(mdev, sdesc); +} + +/** + * msgdma_start_transfer - Initiate the new transfer + * @mdev: Pointer to the Altera mSGDMA device structure + */ +static void msgdma_start_transfer(struct msgdma_device *mdev) +{ + struct msgdma_sw_desc *desc; + + if (!mdev->idle) + return; + + desc = list_first_entry_or_null(&mdev->pending_list, + struct msgdma_sw_desc, node); + if (!desc) + return; + + list_splice_tail_init(&mdev->pending_list, &mdev->active_list); + msgdma_copy_desc_to_fifo(mdev, desc); +} + +/** + * msgdma_issue_pending - Issue pending transactions + * @chan: DMA channel pointer + */ +static void msgdma_issue_pending(struct dma_chan *chan) +{ + struct msgdma_device *mdev = to_mdev(chan); + + spin_lock_bh(&mdev->lock); + msgdma_start_transfer(mdev); + spin_unlock_bh(&mdev->lock); +} + +/** + * msgdma_chan_desc_cleanup - Cleanup the completed descriptors + * @mdev: Pointer to the Altera mSGDMA device structure + */ +static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) +{ + struct msgdma_sw_desc *desc, *next; + + list_for_each_entry_safe(desc, next, &mdev->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&mdev->lock); + callback(callback_param); + spin_lock(&mdev->lock); + } + + /* Run any dependencies, then free the descriptor */ + msgdma_free_descriptor(mdev, desc); + } +} + +/** + * msgdma_complete_descriptor - Mark the active descriptor as complete + * @mdev: Pointer to the Altera mSGDMA device structure + */ +static void msgdma_complete_descriptor(struct msgdma_device *mdev) +{ + struct msgdma_sw_desc *desc; + + desc = list_first_entry_or_null(&mdev->active_list, + struct msgdma_sw_desc, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &mdev->done_list); +} + +/** + * msgdma_free_descriptors - Free channel descriptors + * @mdev: Pointer to the Altera mSGDMA device structure + */ +static void msgdma_free_descriptors(struct msgdma_device *mdev) +{ + msgdma_free_desc_list(mdev, &mdev->active_list); + msgdma_free_desc_list(mdev, &mdev->pending_list); + msgdma_free_desc_list(mdev, &mdev->done_list); +} + +/** + * msgdma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void msgdma_free_chan_resources(struct dma_chan *dchan) +{ + struct msgdma_device *mdev = to_mdev(dchan); + + spin_lock_bh(&mdev->lock); + msgdma_free_descriptors(mdev); + spin_unlock_bh(&mdev->lock); + kfree(mdev->sw_desq); +} + +/** + * msgdma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int msgdma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct msgdma_device *mdev = to_mdev(dchan); + struct msgdma_sw_desc *desc; + int i; + + mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); + if (!mdev->sw_desq) + return -ENOMEM; + + mdev->idle = true; + mdev->desc_free_cnt = MSGDMA_DESC_NUM; + + INIT_LIST_HEAD(&mdev->free_list); + + for (i = 0; i < MSGDMA_DESC_NUM; i++) { + desc = mdev->sw_desq + i; + dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); + desc->async_tx.tx_submit = msgdma_tx_submit; + list_add_tail(&desc->node, &mdev->free_list); + } + + return MSGDMA_DESC_NUM; +} + +/** + * msgdma_tasklet - Schedule completion tasklet + * @data: Pointer to the Altera sSGDMA channel structure + */ +static void msgdma_tasklet(unsigned long data) +{ + struct msgdma_device *mdev = (struct msgdma_device *)data; + u32 count; + u32 __maybe_unused size; + u32 __maybe_unused status; + + spin_lock(&mdev->lock); + + /* Read number of responses that are available */ + count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); + dev_dbg(mdev->dev, "%s (%d): response count=%d\n", + __func__, __LINE__, count); + + while (count--) { + /* + * Read both longwords to purge this response from the FIFO + * On Avalon-MM implementations, size and status do not + * have any real values, like transferred bytes or error + * bits. So we need to just drop these values. + */ + size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); + status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); + + msgdma_complete_descriptor(mdev); + msgdma_chan_desc_cleanup(mdev); + } + + spin_unlock(&mdev->lock); +} + +/** + * msgdma_irq_handler - Altera mSGDMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Altera mSGDMA device structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t msgdma_irq_handler(int irq, void *data) +{ + struct msgdma_device *mdev = data; + u32 status; + + status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); + if ((status & MSGDMA_CSR_STAT_BUSY) == 0) { + /* Start next transfer if the DMA controller is idle */ + spin_lock(&mdev->lock); + mdev->idle = true; + msgdma_start_transfer(mdev); + spin_unlock(&mdev->lock); + } + + tasklet_schedule(&mdev->irq_tasklet); + + /* Clear interrupt in mSGDMA controller */ + iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); + + return IRQ_HANDLED; +} + +/** + * msgdma_chan_remove - Channel remove function + * @mdev: Pointer to the Altera mSGDMA device structure + */ +static void msgdma_dev_remove(struct msgdma_device *mdev) +{ + if (!mdev) + return; + + devm_free_irq(mdev->dev, mdev->irq, mdev); + tasklet_kill(&mdev->irq_tasklet); + list_del(&mdev->dmachan.device_node); +} + +static int request_and_map(struct platform_device *pdev, const char *name, + struct resource **res, void __iomem **ptr) +{ + struct resource *region; + struct device *device = &pdev->dev; + + *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (*res == NULL) { + dev_err(device, "resource %s not defined\n", name); + return -ENODEV; + } + + region = devm_request_mem_region(device, (*res)->start, + resource_size(*res), dev_name(device)); + if (region == NULL) { + dev_err(device, "unable to request %s\n", name); + return -EBUSY; + } + + *ptr = devm_ioremap_nocache(device, region->start, + resource_size(region)); + if (*ptr == NULL) { + dev_err(device, "ioremap_nocache of %s failed!", name); + return -ENOMEM; + } + + return 0; +} + +/** + * msgdma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int msgdma_probe(struct platform_device *pdev) +{ + struct msgdma_device *mdev; + struct dma_device *dma_dev; + struct resource *dma_res; + int ret; + + mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); + if (!mdev) + return -ENOMEM; + + mdev->dev = &pdev->dev; + + /* Map CSR space */ + ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); + if (ret) + return ret; + + /* Map (extended) descriptor space */ + ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); + if (ret) + return ret; + + /* Map response space */ + ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); + if (ret) + return ret; + + platform_set_drvdata(pdev, mdev); + + /* Get interrupt nr from platform data */ + mdev->irq = platform_get_irq(pdev, 0); + if (mdev->irq < 0) + return -ENXIO; + + ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, + 0, dev_name(&pdev->dev), mdev); + if (ret) + return ret; + + tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev); + + dma_cookie_init(&mdev->dmachan); + + spin_lock_init(&mdev->lock); + + INIT_LIST_HEAD(&mdev->active_list); + INIT_LIST_HEAD(&mdev->pending_list); + INIT_LIST_HEAD(&mdev->done_list); + INIT_LIST_HEAD(&mdev->free_list); + + dma_dev = &mdev->dmadev; + + /* Set DMA capabilities */ + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + + dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) | + BIT(DMA_MEM_TO_MEM); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + /* Init DMA link list */ + INIT_LIST_HEAD(&dma_dev->channels); + + /* Set base routines */ + dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_issue_pending = msgdma_issue_pending; + dma_dev->dev = &pdev->dev; + + dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES; + dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy; + dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg; + dma_dev->device_config = msgdma_dma_config; + + dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources; + dma_dev->device_free_chan_resources = msgdma_free_chan_resources; + + mdev->dmachan.device = dma_dev; + list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); + + /* Set DMA mask to 64 bits */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_warn(&pdev->dev, "unable to set coherent mask to 64"); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto fail; + } + + msgdma_reset(mdev); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto fail; + + dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n"); + + return 0; + +fail: + msgdma_dev_remove(mdev); + + return ret; +} + +/** + * msgdma_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int msgdma_remove(struct platform_device *pdev) +{ + struct msgdma_device *mdev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&mdev->dmadev); + msgdma_dev_remove(mdev); + + dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n"); + + return 0; +} + +static struct platform_driver msgdma_driver = { + .driver = { + .name = "altera-msgdma", + }, + .probe = msgdma_probe, + .remove = msgdma_remove, +}; + +module_platform_driver(msgdma_driver); + +MODULE_ALIAS("platform:altera-msgdma"); +MODULE_DESCRIPTION("Altera mSGDMA driver"); +MODULE_AUTHOR("Stefan Roese "); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 13cc95c0474c..b52b0d55247e 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -3033,7 +3033,7 @@ static struct vendor_data vendor_ftdmac020 = { .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, }; -static struct amba_id pl08x_ids[] = { +static const struct amba_id pl08x_ids[] = { /* Samsung PL080S variant */ { .id = 0x0a141080, diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1baf3404a365..fbab271b3bf9 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } -/** - * atc_prep_dma_sg - prepare memory to memory scather-gather operation - * @chan: the channel to prepare operation on - * @dst_sg: destination scatterlist - * @dst_nents: number of destination scatterlist entries - * @src_sg: source scatterlist - * @src_nents: number of source scatterlist entries - * @flags: tx descriptor status flags - */ -static struct dma_async_tx_descriptor * -atc_prep_dma_sg(struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags) -{ - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc = NULL; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; - unsigned int src_width; - unsigned int dst_width; - size_t xfer_count; - u32 ctrla; - u32 ctrlb; - size_t dst_len = 0, src_len = 0; - dma_addr_t dst = 0, src = 0; - size_t len = 0, total_len = 0; - - if (unlikely(dst_nents == 0 || src_nents == 0)) - return NULL; - - if (unlikely(dst_sg == NULL || src_sg == NULL)) - return NULL; - - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_INCR - | ATC_DST_ADDR_MODE_INCR - | ATC_FC_MEM2MEM; - - /* - * loop until there is either no more source or no more destination - * scatterlist entry - */ - while (true) { - - /* prepare the next transfer */ - if (dst_len == 0) { - - /* no more destination scatterlist entries */ - if (!dst_sg || !dst_nents) - break; - - dst = sg_dma_address(dst_sg); - dst_len = sg_dma_len(dst_sg); - - dst_sg = sg_next(dst_sg); - dst_nents--; - } - - if (src_len == 0) { - - /* no more source scatterlist entries */ - if (!src_sg || !src_nents) - break; - - src = sg_dma_address(src_sg); - src_len = sg_dma_len(src_sg); - - src_sg = sg_next(src_sg); - src_nents--; - } - - len = min_t(size_t, src_len, dst_len); - if (len == 0) - continue; - - /* take care for the alignment */ - src_width = dst_width = atc_get_xfer_width(src, dst, len); - - ctrla = ATC_SRC_WIDTH(src_width) | - ATC_DST_WIDTH(dst_width); - - /* - * The number of transfers to set up refer to the source width - * that depends on the alignment. - */ - xfer_count = len >> src_width; - if (xfer_count > ATC_BTSIZE_MAX) { - xfer_count = ATC_BTSIZE_MAX; - len = ATC_BTSIZE_MAX << src_width; - } - - /* create the transfer */ - desc = atc_desc_get(atchan); - if (!desc) - goto err_desc_get; - - desc->lli.saddr = src; - desc->lli.daddr = dst; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; - - desc->txd.cookie = 0; - desc->len = len; - - atc_desc_chain(&first, &prev, desc); - - /* update the lengths and addresses for the next loop cycle */ - dst_len -= len; - src_len -= len; - dst += len; - src += len; - - total_len += len; - } - - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = total_len; - - /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(desc); - - first->txd.flags = flags; /* client is in control of this ack */ - - return &first->txd; - -err_desc_get: - atc_desc_put(atchan, first); - return NULL; -} - /** * atc_dma_cyclic_check_values * Check for too big/unaligned periods and unaligned DMA buffer @@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev) /* setup platform data for each SoC */ dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); - dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); - dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); /* get DMA parameters from controller type */ plat_dat = at_dma_get_driver_data(pdev); @@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev) atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; } - if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) - atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; - dma_writel(atdma, EN, AT_DMA_ENABLE); - dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n", + dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", - dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", plat_dat->nr_channels); dma_async_device_register(&atdma->dma_common); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7d4e0bcda9af..c00e3923d7d8 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -875,7 +875,7 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { dev_dbg(chan2dev(chan), - "%s: chunk too big (%d, max size %lu)...\n", + "%s: chunk too big (%zu, max size %lu)...\n", __func__, chunk->size, AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth); return NULL; @@ -956,7 +956,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, if ((xt->numf > 1) && (xt->frame_size > 1)) return NULL; - dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n", __func__, &xt->src_start, &xt->dst_start, xt->numf, xt->frame_size, flags); @@ -990,7 +990,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, dst_skip = chunk->size + dst_icg; dev_dbg(chan2dev(chan), - "%s: chunk size=%d, src icg=%d, dst icg=%d\n", + "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n", __func__, chunk->size, src_icg, dst_icg); desc = at_xdmac_interleaved_queue_desc(chan, atchan, @@ -1207,7 +1207,7 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; - dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n", __func__, &dest, len, value, flags); if (unlikely(!len)) @@ -1883,8 +1883,11 @@ static int atmel_xdmac_resume(struct device *dev) struct at_xdmac_chan *atchan; struct dma_chan *chan, *_chan; int i; + int ret; - clk_prepare_enable(atxdmac->clk); + ret = clk_prepare_enable(atxdmac->clk); + if (ret) + return ret; /* Clear pending interrupts. */ for (i = 0; i < atxdmac->dma.chancnt; i++) { diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index e41bbc7cb094..6c2c44724637 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -36,6 +36,7 @@ */ #include +#include #include #include #include @@ -48,7 +49,8 @@ #include "dmaengine.h" -/* SBA command related defines */ +/* ====== Driver macros and defines ===== */ + #define SBA_TYPE_SHIFT 48 #define SBA_TYPE_MASK GENMASK(1, 0) #define SBA_TYPE_A 0x0 @@ -82,39 +84,40 @@ #define SBA_CMD_WRITE_BUFFER 0xc #define SBA_CMD_GALOIS 0xe +#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 + /* Driver helper macros */ #define to_sba_request(tx) \ container_of(tx, struct sba_request, tx) #define to_sba_device(dchan) \ container_of(dchan, struct sba_device, dma_chan) -enum sba_request_state { - SBA_REQUEST_STATE_FREE = 1, - SBA_REQUEST_STATE_ALLOCED = 2, - SBA_REQUEST_STATE_PENDING = 3, - SBA_REQUEST_STATE_ACTIVE = 4, - SBA_REQUEST_STATE_RECEIVED = 5, - SBA_REQUEST_STATE_COMPLETED = 6, - SBA_REQUEST_STATE_ABORTED = 7, +/* ===== Driver data structures ===== */ + +enum sba_request_flags { + SBA_REQUEST_STATE_FREE = 0x001, + SBA_REQUEST_STATE_ALLOCED = 0x002, + SBA_REQUEST_STATE_PENDING = 0x004, + SBA_REQUEST_STATE_ACTIVE = 0x008, + SBA_REQUEST_STATE_ABORTED = 0x010, + SBA_REQUEST_STATE_MASK = 0x0ff, + SBA_REQUEST_FENCE = 0x100, }; struct sba_request { /* Global state */ struct list_head node; struct sba_device *sba; - enum sba_request_state state; - bool fence; + u32 flags; /* Chained requests management */ struct sba_request *first; struct list_head next; - unsigned int next_count; atomic_t next_pending_count; /* BRCM message data */ - void *resp; - dma_addr_t resp_dma; - struct brcm_sba_command *cmds; struct brcm_message msg; struct dma_async_tx_descriptor tx; + /* SBA commands */ + struct brcm_sba_command cmds[0]; }; enum sba_version { @@ -152,19 +155,18 @@ struct sba_device { void *cmds_base; dma_addr_t cmds_dma_base; spinlock_t reqs_lock; - struct sba_request *reqs; bool reqs_fence; struct list_head reqs_alloc_list; struct list_head reqs_pending_list; struct list_head reqs_active_list; - struct list_head reqs_received_list; - struct list_head reqs_completed_list; struct list_head reqs_aborted_list; struct list_head reqs_free_list; - int reqs_free_count; + /* DebugFS directory entries */ + struct dentry *root; + struct dentry *stats; }; -/* ====== SBA command helper routines ===== */ +/* ====== Command helper routines ===== */ static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) { @@ -196,32 +198,50 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); } -/* ====== Channel resource management routines ===== */ +/* ====== General helper routines ===== */ + +static void sba_peek_mchans(struct sba_device *sba) +{ + int mchan_idx; + + for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) + mbox_client_peek_data(sba->mchans[mchan_idx]); +} static struct sba_request *sba_alloc_request(struct sba_device *sba) { + bool found = false; unsigned long flags; struct sba_request *req = NULL; spin_lock_irqsave(&sba->reqs_lock, flags); + list_for_each_entry(req, &sba->reqs_free_list, node) { + if (async_tx_test_ack(&req->tx)) { + list_move_tail(&req->node, &sba->reqs_alloc_list); + found = true; + break; + } + } + spin_unlock_irqrestore(&sba->reqs_lock, flags); - req = list_first_entry_or_null(&sba->reqs_free_list, - struct sba_request, node); - if (req) { - list_move_tail(&req->node, &sba->reqs_alloc_list); - req->state = SBA_REQUEST_STATE_ALLOCED; - req->fence = false; - req->first = req; - INIT_LIST_HEAD(&req->next); - req->next_count = 1; - atomic_set(&req->next_pending_count, 1); - - sba->reqs_free_count--; - - dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); + if (!found) { + /* + * We have no more free requests so, we peek + * mailbox channels hoping few active requests + * would have completed which will create more + * room for new requests. + */ + sba_peek_mchans(sba); + return NULL; } - spin_unlock_irqrestore(&sba->reqs_lock, flags); + req->flags = SBA_REQUEST_STATE_ALLOCED; + req->first = req; + INIT_LIST_HEAD(&req->next); + atomic_set(&req->next_pending_count, 1); + + dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); + async_tx_ack(&req->tx); return req; } @@ -231,7 +251,8 @@ static void _sba_pending_request(struct sba_device *sba, struct sba_request *req) { lockdep_assert_held(&sba->reqs_lock); - req->state = SBA_REQUEST_STATE_PENDING; + req->flags &= ~SBA_REQUEST_STATE_MASK; + req->flags |= SBA_REQUEST_STATE_PENDING; list_move_tail(&req->node, &sba->reqs_pending_list); if (list_empty(&sba->reqs_active_list)) sba->reqs_fence = false; @@ -246,9 +267,10 @@ static bool _sba_active_request(struct sba_device *sba, sba->reqs_fence = false; if (sba->reqs_fence) return false; - req->state = SBA_REQUEST_STATE_ACTIVE; + req->flags &= ~SBA_REQUEST_STATE_MASK; + req->flags |= SBA_REQUEST_STATE_ACTIVE; list_move_tail(&req->node, &sba->reqs_active_list); - if (req->fence) + if (req->flags & SBA_REQUEST_FENCE) sba->reqs_fence = true; return true; } @@ -258,7 +280,8 @@ static void _sba_abort_request(struct sba_device *sba, struct sba_request *req) { lockdep_assert_held(&sba->reqs_lock); - req->state = SBA_REQUEST_STATE_ABORTED; + req->flags &= ~SBA_REQUEST_STATE_MASK; + req->flags |= SBA_REQUEST_STATE_ABORTED; list_move_tail(&req->node, &sba->reqs_aborted_list); if (list_empty(&sba->reqs_active_list)) sba->reqs_fence = false; @@ -269,42 +292,11 @@ static void _sba_free_request(struct sba_device *sba, struct sba_request *req) { lockdep_assert_held(&sba->reqs_lock); - req->state = SBA_REQUEST_STATE_FREE; + req->flags &= ~SBA_REQUEST_STATE_MASK; + req->flags |= SBA_REQUEST_STATE_FREE; list_move_tail(&req->node, &sba->reqs_free_list); if (list_empty(&sba->reqs_active_list)) sba->reqs_fence = false; - sba->reqs_free_count++; -} - -static void sba_received_request(struct sba_request *req) -{ - unsigned long flags; - struct sba_device *sba = req->sba; - - spin_lock_irqsave(&sba->reqs_lock, flags); - req->state = SBA_REQUEST_STATE_RECEIVED; - list_move_tail(&req->node, &sba->reqs_received_list); - spin_unlock_irqrestore(&sba->reqs_lock, flags); -} - -static void sba_complete_chained_requests(struct sba_request *req) -{ - unsigned long flags; - struct sba_request *nreq; - struct sba_device *sba = req->sba; - - spin_lock_irqsave(&sba->reqs_lock, flags); - - req->state = SBA_REQUEST_STATE_COMPLETED; - list_move_tail(&req->node, &sba->reqs_completed_list); - list_for_each_entry(nreq, &req->next, next) { - nreq->state = SBA_REQUEST_STATE_COMPLETED; - list_move_tail(&nreq->node, &sba->reqs_completed_list); - } - if (list_empty(&sba->reqs_active_list)) - sba->reqs_fence = false; - - spin_unlock_irqrestore(&sba->reqs_lock, flags); } static void sba_free_chained_requests(struct sba_request *req) @@ -332,8 +324,7 @@ static void sba_chain_request(struct sba_request *first, list_add_tail(&req->next, &first->next); req->first = first; - first->next_count++; - atomic_set(&first->next_pending_count, first->next_count); + atomic_inc(&first->next_pending_count); spin_unlock_irqrestore(&sba->reqs_lock, flags); } @@ -349,14 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba) list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) _sba_free_request(sba, req); - /* Freeup all received request */ - list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) - _sba_free_request(sba, req); - - /* Freeup all completed request */ - list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) - _sba_free_request(sba, req); - /* Set all active requests as aborted */ list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) _sba_abort_request(sba, req); @@ -383,6 +366,144 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) spin_unlock_irqrestore(&sba->reqs_lock, flags); } +static int sba_send_mbox_request(struct sba_device *sba, + struct sba_request *req) +{ + int mchans_idx, ret = 0; + + /* Select mailbox channel in round-robin fashion */ + mchans_idx = atomic_inc_return(&sba->mchans_current); + mchans_idx = mchans_idx % sba->mchans_count; + + /* Send message for the request */ + req->msg.error = 0; + ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); + if (ret < 0) { + dev_err(sba->dev, "send message failed with error %d", ret); + return ret; + } + + /* Check error returned by mailbox controller */ + ret = req->msg.error; + if (ret < 0) { + dev_err(sba->dev, "message error %d", ret); + } + + /* Signal txdone for mailbox channel */ + mbox_client_txdone(sba->mchans[mchans_idx], ret); + + return ret; +} + +/* Note: Must be called with sba->reqs_lock held */ +static void _sba_process_pending_requests(struct sba_device *sba) +{ + int ret; + u32 count; + struct sba_request *req; + + /* + * Process few pending requests + * + * For now, we process ( * 8) + * number of requests at a time. + */ + count = sba->mchans_count * 8; + while (!list_empty(&sba->reqs_pending_list) && count) { + /* Get the first pending request */ + req = list_first_entry(&sba->reqs_pending_list, + struct sba_request, node); + + /* Try to make request active */ + if (!_sba_active_request(sba, req)) + break; + + /* Send request to mailbox channel */ + ret = sba_send_mbox_request(sba, req); + if (ret < 0) { + _sba_pending_request(sba, req); + break; + } + + count--; + } +} + +static void sba_process_received_request(struct sba_device *sba, + struct sba_request *req) +{ + unsigned long flags; + struct dma_async_tx_descriptor *tx; + struct sba_request *nreq, *first = req->first; + + /* Process only after all chained requests are received */ + if (!atomic_dec_return(&first->next_pending_count)) { + tx = &first->tx; + + WARN_ON(tx->cookie < 0); + if (tx->cookie > 0) { + dma_cookie_complete(tx); + dmaengine_desc_get_callback_invoke(tx, NULL); + dma_descriptor_unmap(tx); + tx->callback = NULL; + tx->callback_result = NULL; + } + + dma_run_dependencies(tx); + + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* Free all requests chained to first request */ + list_for_each_entry(nreq, &first->next, next) + _sba_free_request(sba, nreq); + INIT_LIST_HEAD(&first->next); + + /* Free the first request */ + _sba_free_request(sba, first); + + /* Process pending requests */ + _sba_process_pending_requests(sba); + + spin_unlock_irqrestore(&sba->reqs_lock, flags); + } +} + +static void sba_write_stats_in_seqfile(struct sba_device *sba, + struct seq_file *file) +{ + unsigned long flags; + struct sba_request *req; + u32 free_count = 0, alloced_count = 0; + u32 pending_count = 0, active_count = 0, aborted_count = 0; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + list_for_each_entry(req, &sba->reqs_free_list, node) + if (async_tx_test_ack(&req->tx)) + free_count++; + + list_for_each_entry(req, &sba->reqs_alloc_list, node) + alloced_count++; + + list_for_each_entry(req, &sba->reqs_pending_list, node) + pending_count++; + + list_for_each_entry(req, &sba->reqs_active_list, node) + active_count++; + + list_for_each_entry(req, &sba->reqs_aborted_list, node) + aborted_count++; + + spin_unlock_irqrestore(&sba->reqs_lock, flags); + + seq_printf(file, "maximum requests = %d\n", sba->max_req); + seq_printf(file, "free requests = %d\n", free_count); + seq_printf(file, "alloced requests = %d\n", alloced_count); + seq_printf(file, "pending requests = %d\n", pending_count); + seq_printf(file, "active requests = %d\n", active_count); + seq_printf(file, "aborted requests = %d\n", aborted_count); +} + /* ====== DMAENGINE callbacks ===== */ static void sba_free_chan_resources(struct dma_chan *dchan) @@ -403,58 +524,14 @@ static int sba_device_terminate_all(struct dma_chan *dchan) return 0; } -static int sba_send_mbox_request(struct sba_device *sba, - struct sba_request *req) -{ - int mchans_idx, ret = 0; - - /* Select mailbox channel in round-robin fashion */ - mchans_idx = atomic_inc_return(&sba->mchans_current); - mchans_idx = mchans_idx % sba->mchans_count; - - /* Send message for the request */ - req->msg.error = 0; - ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); - if (ret < 0) { - dev_err(sba->dev, "send message failed with error %d", ret); - return ret; - } - ret = req->msg.error; - if (ret < 0) { - dev_err(sba->dev, "message error %d", ret); - return ret; - } - - return 0; -} - static void sba_issue_pending(struct dma_chan *dchan) { - int ret; unsigned long flags; - struct sba_request *req, *req1; struct sba_device *sba = to_sba_device(dchan); + /* Process pending requests */ spin_lock_irqsave(&sba->reqs_lock, flags); - - /* Process all pending request */ - list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) { - /* Try to make request active */ - if (!_sba_active_request(sba, req)) - break; - - /* Send request to mailbox channel */ - spin_unlock_irqrestore(&sba->reqs_lock, flags); - ret = sba_send_mbox_request(sba, req); - spin_lock_irqsave(&sba->reqs_lock, flags); - - /* If something went wrong then keep request pending */ - if (ret < 0) { - _sba_pending_request(sba, req); - break; - } - } - + _sba_process_pending_requests(sba); spin_unlock_irqrestore(&sba->reqs_lock, flags); } @@ -486,17 +563,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - int mchan_idx; enum dma_status ret; struct sba_device *sba = to_sba_device(dchan); - for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) - mbox_client_peek_data(sba->mchans[mchan_idx]); - ret = dma_cookie_status(dchan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; + sba_peek_mchans(sba); + return dma_cookie_status(dchan, cookie, txstate); } @@ -506,6 +581,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, { u64 cmd; u32 c_mdata; + dma_addr_t resp_dma = req->tx.phys; struct brcm_sba_command *cmdsp = cmds; /* Type-B command to load dummy data into buf0 */ @@ -521,7 +597,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, cmdsp->cmd = cmd; *cmdsp->cmd_dma = cpu_to_le64(cmd); cmdsp->flags = BRCM_SBA_CMD_TYPE_B; - cmdsp->data = req->resp_dma; + cmdsp->data = resp_dma; cmdsp->data_len = req->sba->hw_resp_size; cmdsp++; @@ -542,11 +618,11 @@ static void sba_fillup_interrupt_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; - cmdsp->data = req->resp_dma; + cmdsp->data = resp_dma; cmdsp->data_len = req->sba->hw_resp_size; cmdsp++; @@ -573,7 +649,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) * Force fence so that no requests are submitted * until DMA callback for this request is invoked. */ - req->fence = true; + req->flags |= SBA_REQUEST_FENCE; /* Fillup request message */ sba_fillup_interrupt_msg(req, req->cmds, &req->msg); @@ -593,6 +669,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, { u64 cmd; u32 c_mdata; + dma_addr_t resp_dma = req->tx.phys; struct brcm_sba_command *cmdsp = cmds; /* Type-B command to load data into buf0 */ @@ -629,7 +706,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -656,7 +733,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba, req = sba_alloc_request(sba); if (!req) return NULL; - req->fence = (flags & DMA_PREP_FENCE) ? true : false; + if (flags & DMA_PREP_FENCE) + req->flags |= SBA_REQUEST_FENCE; /* Fillup request message */ sba_fillup_memcpy_msg(req, req->cmds, &req->msg, @@ -711,6 +789,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, u64 cmd; u32 c_mdata; unsigned int i; + dma_addr_t resp_dma = req->tx.phys; struct brcm_sba_command *cmdsp = cmds; /* Type-B command to load data into buf0 */ @@ -766,7 +845,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -782,7 +861,7 @@ static void sba_fillup_xor_msg(struct sba_request *req, msg->error = 0; } -struct sba_request * +static struct sba_request * sba_prep_dma_xor_req(struct sba_device *sba, dma_addr_t off, dma_addr_t dst, dma_addr_t *src, u32 src_cnt, size_t len, unsigned long flags) @@ -793,7 +872,8 @@ sba_prep_dma_xor_req(struct sba_device *sba, req = sba_alloc_request(sba); if (!req) return NULL; - req->fence = (flags & DMA_PREP_FENCE) ? true : false; + if (flags & DMA_PREP_FENCE) + req->flags |= SBA_REQUEST_FENCE; /* Fillup request message */ sba_fillup_xor_msg(req, req->cmds, &req->msg, @@ -854,6 +934,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, u64 cmd; u32 c_mdata; unsigned int i; + dma_addr_t resp_dma = req->tx.phys; struct brcm_sba_command *cmdsp = cmds; if (pq_continue) { @@ -947,7 +1028,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -974,7 +1055,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -991,7 +1072,7 @@ static void sba_fillup_pq_msg(struct sba_request *req, msg->error = 0; } -struct sba_request * +static struct sba_request * sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) @@ -1002,7 +1083,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, req = sba_alloc_request(sba); if (!req) return NULL; - req->fence = (flags & DMA_PREP_FENCE) ? true : false; + if (flags & DMA_PREP_FENCE) + req->flags |= SBA_REQUEST_FENCE; /* Fillup request messages */ sba_fillup_pq_msg(req, dmaf_continue(flags), @@ -1027,6 +1109,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, u64 cmd; u32 c_mdata; u8 pos, dpos = raid6_gflog[scf]; + dma_addr_t resp_dma = req->tx.phys; struct brcm_sba_command *cmdsp = cmds; if (!dst_p) @@ -1105,7 +1188,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -1226,7 +1309,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, cmdsp->flags = BRCM_SBA_CMD_TYPE_A; if (req->sba->hw_resp_size) { cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; - cmdsp->resp = req->resp_dma; + cmdsp->resp = resp_dma; cmdsp->resp_len = req->sba->hw_resp_size; } cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; @@ -1243,7 +1326,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req, msg->error = 0; } -struct sba_request * +static struct sba_request * sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t src, u8 scf, size_t len, @@ -1255,7 +1338,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, req = sba_alloc_request(sba); if (!req) return NULL; - req->fence = (flags & DMA_PREP_FENCE) ? true : false; + if (flags & DMA_PREP_FENCE) + req->flags |= SBA_REQUEST_FENCE; /* Fillup request messages */ sba_fillup_pq_single_msg(req, dmaf_continue(flags), @@ -1370,40 +1454,10 @@ sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, /* ====== Mailbox callbacks ===== */ -static void sba_dma_tx_actions(struct sba_request *req) -{ - struct dma_async_tx_descriptor *tx = &req->tx; - - WARN_ON(tx->cookie < 0); - - if (tx->cookie > 0) { - dma_cookie_complete(tx); - - /* - * Call the callback (must not sleep or submit new - * operations to this channel) - */ - if (tx->callback) - tx->callback(tx->callback_param); - - dma_descriptor_unmap(tx); - } - - /* Run dependent operations */ - dma_run_dependencies(tx); - - /* If waiting for 'ack' then move to completed list */ - if (!async_tx_test_ack(&req->tx)) - sba_complete_chained_requests(req); - else - sba_free_chained_requests(req); -} - static void sba_receive_message(struct mbox_client *cl, void *msg) { - unsigned long flags; struct brcm_message *m = msg; - struct sba_request *req = m->ctx, *req1; + struct sba_request *req = m->ctx; struct sba_device *sba = req->sba; /* Error count if message has error */ @@ -1411,52 +1465,37 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) dev_err(sba->dev, "%s got message with error %d", dma_chan_name(&sba->dma_chan), m->error); - /* Mark request as received */ - sba_received_request(req); + /* Process received request */ + sba_process_received_request(sba, req); +} - /* Wait for all chained requests to be completed */ - if (atomic_dec_return(&req->first->next_pending_count)) - goto done; +/* ====== Debugfs callbacks ====== */ - /* Point to first request */ - req = req->first; +static int sba_debugfs_stats_show(struct seq_file *file, void *offset) +{ + struct platform_device *pdev = to_platform_device(file->private); + struct sba_device *sba = platform_get_drvdata(pdev); - /* Update request */ - if (req->state == SBA_REQUEST_STATE_RECEIVED) - sba_dma_tx_actions(req); - else - sba_free_chained_requests(req); + /* Write stats in file */ + sba_write_stats_in_seqfile(sba, file); - spin_lock_irqsave(&sba->reqs_lock, flags); - - /* Re-check all completed request waiting for 'ack' */ - list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) { - spin_unlock_irqrestore(&sba->reqs_lock, flags); - sba_dma_tx_actions(req); - spin_lock_irqsave(&sba->reqs_lock, flags); - } - - spin_unlock_irqrestore(&sba->reqs_lock, flags); - -done: - /* Try to submit pending request */ - sba_issue_pending(&sba->dma_chan); + return 0; } /* ====== Platform driver routines ===== */ static int sba_prealloc_channel_resources(struct sba_device *sba) { - int i, j, p, ret = 0; + int i, j, ret = 0; struct sba_request *req = NULL; - sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, + sba->resp_base = dma_alloc_coherent(sba->mbox_dev, sba->max_resp_pool_size, &sba->resp_dma_base, GFP_KERNEL); if (!sba->resp_base) return -ENOMEM; - sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, + sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, sba->max_cmds_pool_size, &sba->cmds_dma_base, GFP_KERNEL); if (!sba->cmds_base) { @@ -1469,36 +1508,23 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) INIT_LIST_HEAD(&sba->reqs_alloc_list); INIT_LIST_HEAD(&sba->reqs_pending_list); INIT_LIST_HEAD(&sba->reqs_active_list); - INIT_LIST_HEAD(&sba->reqs_received_list); - INIT_LIST_HEAD(&sba->reqs_completed_list); INIT_LIST_HEAD(&sba->reqs_aborted_list); INIT_LIST_HEAD(&sba->reqs_free_list); - sba->reqs = devm_kcalloc(sba->dev, sba->max_req, - sizeof(*req), GFP_KERNEL); - if (!sba->reqs) { - ret = -ENOMEM; - goto fail_free_cmds_pool; - } - - for (i = 0, p = 0; i < sba->max_req; i++) { - req = &sba->reqs[i]; - INIT_LIST_HEAD(&req->node); - req->sba = sba; - req->state = SBA_REQUEST_STATE_FREE; - INIT_LIST_HEAD(&req->next); - req->next_count = 1; - atomic_set(&req->next_pending_count, 0); - req->fence = false; - req->resp = sba->resp_base + p; - req->resp_dma = sba->resp_dma_base + p; - p += sba->hw_resp_size; - req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req, - sizeof(*req->cmds), GFP_KERNEL); - if (!req->cmds) { + for (i = 0; i < sba->max_req; i++) { + req = devm_kzalloc(sba->dev, + sizeof(*req) + + sba->max_cmd_per_req * sizeof(req->cmds[0]), + GFP_KERNEL); + if (!req) { ret = -ENOMEM; goto fail_free_cmds_pool; } + INIT_LIST_HEAD(&req->node); + req->sba = sba; + req->flags = SBA_REQUEST_STATE_FREE; + INIT_LIST_HEAD(&req->next); + atomic_set(&req->next_pending_count, 0); for (j = 0; j < sba->max_cmd_per_req; j++) { req->cmds[j].cmd = 0; req->cmds[j].cmd_dma = sba->cmds_base + @@ -1509,21 +1535,20 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) } memset(&req->msg, 0, sizeof(req->msg)); dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); + async_tx_ack(&req->tx); req->tx.tx_submit = sba_tx_submit; - req->tx.phys = req->resp_dma; + req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; list_add_tail(&req->node, &sba->reqs_free_list); } - sba->reqs_free_count = sba->max_req; - return 0; fail_free_cmds_pool: - dma_free_coherent(sba->dma_dev.dev, + dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, sba->cmds_base, sba->cmds_dma_base); fail_free_resp_pool: - dma_free_coherent(sba->dma_dev.dev, + dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, sba->resp_base, sba->resp_dma_base); return ret; @@ -1532,9 +1557,9 @@ static int sba_prealloc_channel_resources(struct sba_device *sba) static void sba_freeup_channel_resources(struct sba_device *sba) { dmaengine_terminate_all(&sba->dma_chan); - dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, + dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, sba->cmds_base, sba->cmds_dma_base); - dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, + dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, sba->resp_base, sba->resp_dma_base); sba->resp_base = NULL; sba->resp_dma_base = 0; @@ -1625,6 +1650,13 @@ static int sba_probe(struct platform_device *pdev) sba->dev = &pdev->dev; platform_set_drvdata(pdev, sba); + /* Number of channels equals number of mailbox channels */ + ret = of_count_phandle_with_args(pdev->dev.of_node, + "mboxes", "#mbox-cells"); + if (ret <= 0) + return -ENODEV; + mchans_count = ret; + /* Determine SBA version from DT compatible string */ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) sba->ver = SBA_VER_1; @@ -1637,14 +1669,12 @@ static int sba_probe(struct platform_device *pdev) /* Derived Configuration parameters */ switch (sba->ver) { case SBA_VER_1: - sba->max_req = 1024; sba->hw_buf_size = 4096; sba->hw_resp_size = 8; sba->max_pq_coefs = 6; sba->max_pq_srcs = 6; break; case SBA_VER_2: - sba->max_req = 1024; sba->hw_buf_size = 4096; sba->hw_resp_size = 8; sba->max_pq_coefs = 30; @@ -1658,6 +1688,7 @@ static int sba_probe(struct platform_device *pdev) default: return -EINVAL; } + sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; sba->max_cmd_per_req = sba->max_pq_srcs + 3; sba->max_xor_srcs = sba->max_cmd_per_req - 1; sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; @@ -1668,25 +1699,17 @@ static int sba_probe(struct platform_device *pdev) sba->client.dev = &pdev->dev; sba->client.rx_callback = sba_receive_message; sba->client.tx_block = false; - sba->client.knows_txdone = false; + sba->client.knows_txdone = true; sba->client.tx_tout = 0; - /* Number of channels equals number of mailbox channels */ - ret = of_count_phandle_with_args(pdev->dev.of_node, - "mboxes", "#mbox-cells"); - if (ret <= 0) - return -ENODEV; - mchans_count = ret; - sba->mchans_count = 0; - atomic_set(&sba->mchans_current, 0); - /* Allocate mailbox channel array */ - sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count, + sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, sizeof(*sba->mchans), GFP_KERNEL); if (!sba->mchans) return -ENOMEM; /* Request mailbox channels */ + sba->mchans_count = 0; for (i = 0; i < mchans_count; i++) { sba->mchans[i] = mbox_request_channel(&sba->client, i); if (IS_ERR(sba->mchans[i])) { @@ -1695,6 +1718,7 @@ static int sba_probe(struct platform_device *pdev) } sba->mchans_count++; } + atomic_set(&sba->mchans_current, 0); /* Find-out underlying mailbox device */ ret = of_parse_phandle_with_args(pdev->dev.of_node, @@ -1723,15 +1747,34 @@ static int sba_probe(struct platform_device *pdev) } } - /* Register DMA device with linux async framework */ - ret = sba_async_register(sba); - if (ret) - goto fail_free_mchans; - /* Prealloc channel resource */ ret = sba_prealloc_channel_resources(sba); if (ret) - goto fail_async_dev_unreg; + goto fail_free_mchans; + + /* Check availability of debugfs */ + if (!debugfs_initialized()) + goto skip_debugfs; + + /* Create debugfs root entry */ + sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); + if (IS_ERR_OR_NULL(sba->root)) { + dev_err(sba->dev, "failed to create debugfs root entry\n"); + sba->root = NULL; + goto skip_debugfs; + } + + /* Create debugfs stats entry */ + sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, + sba_debugfs_stats_show); + if (IS_ERR_OR_NULL(sba->stats)) + dev_err(sba->dev, "failed to create debugfs stats file\n"); +skip_debugfs: + + /* Register DMA device with Linux async framework */ + ret = sba_async_register(sba); + if (ret) + goto fail_free_resources; /* Print device info */ dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", @@ -1740,8 +1783,9 @@ static int sba_probe(struct platform_device *pdev) return 0; -fail_async_dev_unreg: - dma_async_device_unregister(&sba->dma_dev); +fail_free_resources: + debugfs_remove_recursive(sba->root); + sba_freeup_channel_resources(sba); fail_free_mchans: for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); @@ -1753,10 +1797,12 @@ static int sba_remove(struct platform_device *pdev) int i; struct sba_device *sba = platform_get_drvdata(pdev); - sba_freeup_channel_resources(sba); - dma_async_device_unregister(&sba->dma_dev); + debugfs_remove_recursive(sba->root); + + sba_freeup_channel_resources(sba); + for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d9118ec23025..b451354735d3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device) return -ENODEV; /* validate device routines */ - BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && - !device->device_prep_dma_memcpy); - BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && - !device->device_prep_dma_xor); - BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && - !device->device_prep_dma_xor_val); - BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && - !device->device_prep_dma_pq); - BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && - !device->device_prep_dma_pq_val); - BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && - !device->device_prep_dma_memset); - BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && - !device->device_prep_dma_interrupt); - BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && - !device->device_prep_dma_sg); - BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && - !device->device_prep_dma_cyclic); - BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && - !device->device_prep_interleaved_dma); + if (!device->dev) { + pr_err("DMAdevice must have dev\n"); + return -EIO; + } - BUG_ON(!device->device_tx_status); - BUG_ON(!device->device_issue_pending); - BUG_ON(!device->dev); + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMSET"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERRUPT"); + return -EIO; + } + + if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_CYCLIC"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERLEAVE"); + return -EIO; + } + + + if (!device->device_tx_status) { + dev_err(device->dev, "Device tx_status is not defined\n"); + return -EIO; + } + + + if (!device->device_issue_pending) { + dev_err(device->dev, "Device issue_pending is not defined\n"); + return -EIO; + } /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a07ef3d6b3ec..34ff53290b03 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); -static unsigned int sg_buffers = 1; -module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(sg_buffers, - "Number of scatter gather buffers (default: 1)"); - static unsigned int dmatest; module_param(dmatest, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dmatest, - "dmatest 0-memcpy 1-slave_sg (default: 0)"); + "dmatest 0-memcpy 1-memset (default: 0)"); static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); @@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); #define PATTERN_COPY 0x40 #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f +#define PATTERN_MEMSET_IDX 0x01 struct dmatest_thread { struct list_head node; @@ -239,46 +235,62 @@ static unsigned long dmatest_random(void) return buf; } +static inline u8 gen_inv_idx(u8 index, bool is_memset) +{ + u8 val = is_memset ? PATTERN_MEMSET_IDX : index; + + return ~val & PATTERN_COUNT_MASK; +} + +static inline u8 gen_src_value(u8 index, bool is_memset) +{ + return PATTERN_SRC | gen_inv_idx(index, is_memset); +} + +static inline u8 gen_dst_value(u8 index, bool is_memset) +{ + return PATTERN_DST | gen_inv_idx(index, is_memset); +} + static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, - unsigned int buf_size) + unsigned int buf_size, bool is_memset) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_src_value(i, is_memset); for ( ; i < start + len; i++) - buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY; for ( ; i < buf_size; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_src_value(i, is_memset); buf++; } } static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, - unsigned int buf_size) + unsigned int buf_size, bool is_memset) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_dst_value(i, is_memset); for ( ; i < start + len; i++) - buf[i] = PATTERN_DST | PATTERN_OVERWRITE - | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_dst_value(i, is_memset) | + PATTERN_OVERWRITE; for ( ; i < buf_size; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + buf[i] = gen_dst_value(i, is_memset); } } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, - unsigned int counter, bool is_srcbuf) + unsigned int counter, bool is_srcbuf, bool is_memset) { u8 diff = actual ^ pattern; - u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + u8 expected = pattern | gen_inv_idx(counter, is_memset); const char *thread_name = current->comm; if (is_srcbuf) @@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, static unsigned int dmatest_verify(u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, - bool is_srcbuf) + bool is_srcbuf, bool is_memset) { unsigned int i; unsigned int error_count = 0; @@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, counter = counter_orig; for (i = start; i < end; i++) { actual = buf[i]; - expected = pattern | (~counter & PATTERN_COUNT_MASK); + expected = pattern | gen_inv_idx(counter, is_memset); if (actual != expected) { if (error_count < MAX_ERROR_COUNT) dmatest_mismatch(actual, pattern, i, - counter, is_srcbuf); + counter, is_srcbuf, + is_memset); error_count++; } counter++; @@ -435,6 +448,7 @@ static int dmatest_func(void *data) s64 runtime = 0; unsigned long long total_len = 0; u8 align = 0; + bool is_memset = false; set_freezable(); @@ -448,9 +462,10 @@ static int dmatest_func(void *data) if (thread->type == DMA_MEMCPY) { align = dev->copy_align; src_cnt = dst_cnt = 1; - } else if (thread->type == DMA_SG) { - align = dev->copy_align; - src_cnt = dst_cnt = sg_buffers; + } else if (thread->type == DMA_MEMSET) { + align = dev->fill_align; + src_cnt = dst_cnt = 1; + is_memset = true; } else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); @@ -530,8 +545,6 @@ static int dmatest_func(void *data) dma_addr_t srcs[src_cnt]; dma_addr_t *dsts; unsigned int src_off, dst_off, len; - struct scatterlist tx_sg[src_cnt]; - struct scatterlist rx_sg[src_cnt]; total_tests++; @@ -571,9 +584,9 @@ static int dmatest_func(void *data) dst_off = (dst_off >> align) << align; dmatest_init_srcs(thread->srcs, src_off, len, - params->buf_size); + params->buf_size, is_memset); dmatest_init_dsts(thread->dsts, dst_off, len, - params->buf_size); + params->buf_size, is_memset); diff = ktime_sub(ktime_get(), start); filltime = ktime_add(filltime, diff); @@ -627,22 +640,15 @@ static int dmatest_func(void *data) um->bidi_cnt++; } - sg_init_table(tx_sg, src_cnt); - sg_init_table(rx_sg, src_cnt); - for (i = 0; i < src_cnt; i++) { - sg_dma_address(&rx_sg[i]) = srcs[i]; - sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; - sg_dma_len(&tx_sg[i]) = len; - sg_dma_len(&rx_sg[i]) = len; - } - if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dsts[0] + dst_off, srcs[0], len, flags); - else if (thread->type == DMA_SG) - tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, - rx_sg, src_cnt, flags); + else if (thread->type == DMA_MEMSET) + tx = dev->device_prep_dma_memset(chan, + dsts[0] + dst_off, + *(thread->srcs[0] + src_off), + len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dsts[0] + dst_off, @@ -722,23 +728,25 @@ static int dmatest_func(void *data) start = ktime_get(); pr_debug("%s: verifying source buffer...\n", current->comm); error_count = dmatest_verify(thread->srcs, 0, src_off, - 0, PATTERN_SRC, true); + 0, PATTERN_SRC, true, is_memset); error_count += dmatest_verify(thread->srcs, src_off, src_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, true); + PATTERN_SRC | PATTERN_COPY, true, is_memset); error_count += dmatest_verify(thread->srcs, src_off + len, params->buf_size, src_off + len, - PATTERN_SRC, true); + PATTERN_SRC, true, is_memset); pr_debug("%s: verifying dest buffer...\n", current->comm); error_count += dmatest_verify(thread->dsts, 0, dst_off, - 0, PATTERN_DST, false); + 0, PATTERN_DST, false, is_memset); + error_count += dmatest_verify(thread->dsts, dst_off, dst_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, false); + PATTERN_SRC | PATTERN_COPY, false, is_memset); + error_count += dmatest_verify(thread->dsts, dst_off + len, params->buf_size, dst_off + len, - PATTERN_DST, false); + PATTERN_DST, false, is_memset); diff = ktime_sub(ktime_get(), start); comparetime = ktime_add(comparetime, diff); @@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info, if (type == DMA_MEMCPY) op = "copy"; - else if (type == DMA_SG) - op = "sg"; + else if (type == DMA_MEMSET) + op = "set"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) @@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info, } } - if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { + if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { if (dmatest == 1) { - cnt = dmatest_add_threads(info, dtc, DMA_SG); + cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); thread_count += cnt > 0 ? cnt : 0; } } @@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info) params->noverify = noverify; request_channels(info, DMA_MEMCPY); + request_channels(info, DMA_MEMSET); request_channels(info, DMA_XOR); - request_channels(info, DMA_SG); request_channels(info, DMA_PQ); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 3b8b752ede2d..3eaece888e75 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -825,122 +825,6 @@ fsl_dma_prep_memcpy(struct dma_chan *dchan, return NULL; } -static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags) -{ - struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; - struct fsldma_chan *chan = to_fsl_chan(dchan); - size_t dst_avail, src_avail; - dma_addr_t dst, src; - size_t len; - - /* basic sanity checks */ - if (dst_nents == 0 || src_nents == 0) - return NULL; - - if (dst_sg == NULL || src_sg == NULL) - return NULL; - - /* - * TODO: should we check that both scatterlists have the same - * TODO: number of bytes in total? Is that really an error? - */ - - /* get prepared for the loop */ - dst_avail = sg_dma_len(dst_sg); - src_avail = sg_dma_len(src_sg); - - /* run until we are out of scatterlist entries */ - while (true) { - - /* create the largest transaction possible */ - len = min_t(size_t, src_avail, dst_avail); - len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); - if (len == 0) - goto fetch; - - dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; - src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; - - /* allocate and populate the descriptor */ - new = fsl_dma_alloc_descriptor(chan); - if (!new) { - chan_err(chan, "%s\n", msg_ld_oom); - goto fail; - } - - set_desc_cnt(chan, &new->hw, len); - set_desc_src(chan, &new->hw, src); - set_desc_dst(chan, &new->hw, dst); - - if (!first) - first = new; - else - set_desc_next(chan, &prev->hw, new->async_tx.phys); - - new->async_tx.cookie = 0; - async_tx_ack(&new->async_tx); - prev = new; - - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &first->tx_list); - - /* update metadata */ - dst_avail -= len; - src_avail -= len; - -fetch: - /* fetch the next dst scatterlist entry */ - if (dst_avail == 0) { - - /* no more entries: we're done */ - if (dst_nents == 0) - break; - - /* fetch the next entry: if there are no more: done */ - dst_sg = sg_next(dst_sg); - if (dst_sg == NULL) - break; - - dst_nents--; - dst_avail = sg_dma_len(dst_sg); - } - - /* fetch the next src scatterlist entry */ - if (src_avail == 0) { - - /* no more entries: we're done */ - if (src_nents == 0) - break; - - /* fetch the next entry: if there are no more: done */ - src_sg = sg_next(src_sg); - if (src_sg == NULL) - break; - - src_nents--; - src_avail = sg_dma_len(src_sg); - } - } - - new->async_tx.flags = flags; /* client is in control of this ack */ - new->async_tx.cookie = -EBUSY; - - /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(chan, new); - - return &first->async_tx; - -fail: - if (!first) - return NULL; - - fsldma_free_desc_list_reverse(chan, &first->tx_list); - return NULL; -} - static int fsl_dma_device_terminate_all(struct dma_chan *dchan) { struct fsldma_chan *chan; @@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op) fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); - dma_cap_set(DMA_SG, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; - fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_config = fsl_dma_device_config; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index a371b07a0981..f70cc74032ea 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -644,9 +644,13 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } - /* 5 microsecond delay per pending descriptor */ - writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), - ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); + /* microsecond delay by sysfs variable per pending descriptor */ + if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) { + writew(min((ioat_chan->intr_coalesce * (active - i)), + IOAT_INTRDELAY_MASK), + ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); + ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce; + } } static void ioat_cleanup(struct ioatdma_chan *ioat_chan) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index a9bc1a15b0d1..56200eefcf5e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -142,11 +142,14 @@ struct ioatdma_chan { spinlock_t prep_lock; struct ioat_descs descs[2]; int desc_chunks; + int intr_coalesce; + int prev_intr_coalesce; }; struct ioat_sysfs_entry { struct attribute attr; ssize_t (*show)(struct dma_chan *, char *); + ssize_t (*store)(struct dma_chan *, const char *, size_t); }; /** diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index ed8ed1192775..93e006c3441d 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -39,7 +39,7 @@ MODULE_VERSION(IOAT_DMA_VERSION); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); -static struct pci_device_id ioat_pci_tbl[] = { +static const struct pci_device_id ioat_pci_tbl[] = { /* I/OAT v3 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c index cb4a857ee21b..3ac677f29e8f 100644 --- a/drivers/dma/ioat/sysfs.c +++ b/drivers/dma/ioat/sysfs.c @@ -64,8 +64,24 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) return entry->show(&ioat_chan->dma_chan, page); } +static ssize_t +ioat_attr_store(struct kobject *kobj, struct attribute *attr, +const char *page, size_t count) +{ + struct ioat_sysfs_entry *entry; + struct ioatdma_chan *ioat_chan; + + entry = container_of(attr, struct ioat_sysfs_entry, attr); + ioat_chan = container_of(kobj, struct ioatdma_chan, kobj); + + if (!entry->store) + return -EIO; + return entry->store(&ioat_chan->dma_chan, page, count); +} + const struct sysfs_ops ioat_sysfs_ops = { .show = ioat_attr_show, + .store = ioat_attr_store, }; void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) @@ -121,11 +137,37 @@ static ssize_t ring_active_show(struct dma_chan *c, char *page) } static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); +static ssize_t intr_coalesce_show(struct dma_chan *c, char *page) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + return sprintf(page, "%d\n", ioat_chan->intr_coalesce); +} + +static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page, +size_t count) +{ + int intr_coalesce = 0; + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + if (sscanf(page, "%du", &intr_coalesce) != -1) { + if ((intr_coalesce < 0) || + (intr_coalesce > IOAT_INTRDELAY_MASK)) + return -EINVAL; + ioat_chan->intr_coalesce = intr_coalesce; + } + + return count; +} + +static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce); + static struct attribute *ioat_attrs[] = { &ring_size_attr.attr, &ring_active_attr.attr, &ioat_cap_attr.attr, &ioat_version_attr.attr, + &intr_coalesce_attr.attr, NULL, }; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 01e25c68dd5a..01d2a750a621 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -223,7 +223,6 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); vchan_cookie_complete(&p->ds_run->vd); - WARN_ON_ONCE(p->ds_done); p->ds_done = p->ds_run; p->ds_run = NULL; spin_unlock_irqrestore(&c->vc.lock, flags); @@ -274,13 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) */ list_del(&ds->vd.node); - WARN_ON_ONCE(c->phy->ds_run); - WARN_ON_ONCE(c->phy->ds_done); c->phy->ds_run = ds; + c->phy->ds_done = NULL; /* start dma */ k3_dma_set_desc(c->phy, &ds->desc_hw[0]); return 0; } + c->phy->ds_run = NULL; + c->phy->ds_done = NULL; return -EAGAIN; } @@ -722,11 +722,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan) k3_dma_free_desc(&p->ds_run->vd); p->ds_run = NULL; } - if (p->ds_done) { - k3_dma_free_desc(&p->ds_done->vd); - p->ds_done = NULL; - } - + p->ds_done = NULL; } spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25bc5b103aa2..1993889003fd 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, hw_desc->byte_count = byte_count; } -/* Populate the descriptor */ -static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc, - dma_addr_t dma_src, dma_addr_t dma_dst, - u32 len, struct mv_xor_desc_slot *prev) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - - hw_desc->status = XOR_DESC_DMA_OWNED; - hw_desc->phy_next_desc = 0; - /* Configure for XOR with only one src address -> MEMCPY */ - hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0); - hw_desc->phy_dest_addr = dma_dst; - hw_desc->phy_src_addr[0] = dma_src; - hw_desc->byte_count = len; - - if (prev) { - struct mv_xor_desc *hw_prev = prev->hw_desc; - - hw_prev->phy_next_desc = desc->async_tx.phys; - } -} - -static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - - /* Enable end-of-descriptor interrupt */ - hw_desc->desc_command |= XOR_DESC_EOD_INT_EN; -} - static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; @@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); } -/** - * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction - * @chan: DMA channel - * @dst_sg: Destination scatter list - * @dst_sg_len: Number of entries in destination scatter list - * @src_sg: Source scatter list - * @src_sg_len: Number of entries in source scatter list - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor * -mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg, - unsigned int dst_sg_len, struct scatterlist *src_sg, - unsigned int src_sg_len, unsigned long flags) -{ - struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); - struct mv_xor_desc_slot *new; - struct mv_xor_desc_slot *first = NULL; - struct mv_xor_desc_slot *prev = NULL; - size_t len, dst_avail, src_avail; - dma_addr_t dma_dst, dma_src; - int desc_cnt = 0; - int ret; - - dev_dbg(mv_chan_to_devp(mv_chan), - "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n", - __func__, dst_sg_len, src_sg_len, flags); - - dst_avail = sg_dma_len(dst_sg); - src_avail = sg_dma_len(src_sg); - - /* Run until we are out of scatterlist entries */ - while (true) { - /* Allocate and populate the descriptor */ - desc_cnt++; - new = mv_chan_alloc_slot(mv_chan); - if (!new) { - dev_err(mv_chan_to_devp(mv_chan), - "Out of descriptors (desc_cnt=%d)!\n", - desc_cnt); - goto err; - } - - len = min_t(size_t, src_avail, dst_avail); - len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT); - if (len == 0) - goto fetch; - - if (len < MV_XOR_MIN_BYTE_COUNT) { - dev_err(mv_chan_to_devp(mv_chan), - "Transfer size of %zu too small!\n", len); - goto err; - } - - dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - - dst_avail; - dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - - src_avail; - - /* Check if a new window needs to get added for 'dst' */ - ret = mv_xor_add_io_win(mv_chan, dma_dst); - if (ret) - goto err; - - /* Check if a new window needs to get added for 'src' */ - ret = mv_xor_add_io_win(mv_chan, dma_src); - if (ret) - goto err; - - /* Populate the descriptor */ - mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev); - prev = new; - dst_avail -= len; - src_avail -= len; - - if (!first) - first = new; - else - list_move_tail(&new->node, &first->sg_tx_list); - -fetch: - /* Fetch the next dst scatterlist entry */ - if (dst_avail == 0) { - if (dst_sg_len == 0) - break; - - /* Fetch the next entry: if there are no more: done */ - dst_sg = sg_next(dst_sg); - if (dst_sg == NULL) - break; - - dst_sg_len--; - dst_avail = sg_dma_len(dst_sg); - } - - /* Fetch the next src scatterlist entry */ - if (src_avail == 0) { - if (src_sg_len == 0) - break; - - /* Fetch the next entry: if there are no more: done */ - src_sg = sg_next(src_sg); - if (src_sg == NULL) - break; - - src_sg_len--; - src_avail = sg_dma_len(src_sg); - } - } - - /* Set the EOD flag in the last descriptor */ - mv_xor_desc_config_eod(new); - first->async_tx.flags = flags; - - return &first->async_tx; - -err: - /* Cleanup: Move all descriptors back into the free list */ - spin_lock_bh(&mv_chan->lock); - mv_desc_clean_slot(first, mv_chan); - spin_unlock_bh(&mv_chan->lock); - - return NULL; -} - static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); @@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; - if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) - dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; @@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, goto err_free_irq; } - dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n", + dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", - dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); @@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev) dma_cap_zero(cap_mask); dma_cap_set(DMA_MEMCPY, cap_mask); - dma_cap_set(DMA_SG, cap_mask); dma_cap_set(DMA_XOR, cap_mask); dma_cap_set(DMA_INTERRUPT, cap_mask); diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3f45b9bdf201..d3f918a9ee76 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy( DMA_MEM_TO_MEM, flags); } -static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( - struct dma_chan *dchan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags) -{ - struct nbpf_channel *chan = nbpf_to_chan(dchan); - - if (dst_nents != src_nents) - return NULL; - - return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, - DMA_MEM_TO_MEM, flags); -} - static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) @@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); - dma_cap_set(DMA_SG, dma_dev->cap_mask); /* Common and MEMCPY operations */ dma_dev->device_alloc_chan_resources = nbpf_alloc_chan_resources; dma_dev->device_free_chan_resources = nbpf_free_chan_resources; - dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; dma_dev->device_tx_status = nbpf_tx_status; dma_dev->device_issue_pending = nbpf_issue_pending; diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index faae0bfe1109..91fd395c90c4 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -38,8 +38,8 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) if (ofdma->of_node == dma_spec->np) return ofdma; - pr_debug("%s: can't find DMA controller %s\n", __func__, - dma_spec->np->full_name); + pr_debug("%s: can't find DMA controller %pOF\n", __func__, + dma_spec->np); return NULL; } @@ -255,8 +255,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, count = of_property_count_strings(np, "dma-names"); if (count < 0) { - pr_err("%s: dma-names property of node '%s' missing or empty\n", - __func__, np->full_name); + pr_err("%s: dma-names property of node '%pOF' missing or empty\n", + __func__, np); return ERR_PTR(-ENODEV); } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b19ee04567b5..f122c2a7b9f0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3023,7 +3023,7 @@ static int pl330_remove(struct amba_device *adev) return 0; } -static struct amba_id pl330_ids[] = { +static const struct amba_id pl330_ids[] = { { .id = 0x00041330, .mask = 0x000fffff, diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index b1535b1fe95c..4cf0d4d0cecf 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4040,9 +4040,9 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* it is DMA0 or DMA1 */ idx = of_get_property(np, "cell-index", &len); if (!idx || (len != sizeof(u32))) { - dev_err(&ofdev->dev, "Device node %s has missing " + dev_err(&ofdev->dev, "Device node %pOF has missing " "or invalid cell-index property\n", - np->full_name); + np); return -EINVAL; } id = *idx; @@ -4307,7 +4307,7 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev) * "poly" allows setting/checking used polynomial (for PPC440SPe only). */ -static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) +static ssize_t devices_show(struct device_driver *dev, char *buf) { ssize_t size = 0; int i; @@ -4321,16 +4321,17 @@ static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) } return size; } +static DRIVER_ATTR_RO(devices); -static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf) +static ssize_t enable_show(struct device_driver *dev, char *buf) { return snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", ppc440spe_r6_enabled ? "EN" : "DIS"); } -static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, - const char *buf, size_t count) +static ssize_t enable_store(struct device_driver *dev, const char *buf, + size_t count) { unsigned long val; @@ -4357,8 +4358,9 @@ static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, } return count; } +static DRIVER_ATTR_RW(enable); -static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) +static ssize_t poly_store(struct device_driver *dev, char *buf) { ssize_t size = 0; u32 reg; @@ -4377,8 +4379,8 @@ static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) return size; } -static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, - const char *buf, size_t count) +static ssize_t poly_store(struct device_driver *dev, const char *buf, + size_t count) { unsigned long reg, val; @@ -4404,12 +4406,7 @@ static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, return count; } - -static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL); -static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable, - store_ppc440spe_r6enable); -static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly, - store_ppc440spe_r6poly); +static DRIVER_ATTR_RW(poly); /* * Common initialisation for RAID engines; allocate memory for @@ -4448,8 +4445,7 @@ static int ppc440spe_configure_raid_devices(void) dcr_base = dcr_resource_start(np, 0); dcr_len = dcr_resource_len(np, 0); if (!dcr_base && !dcr_len) { - pr_err("%s: can't get DCR registers base/len!\n", - np->full_name); + pr_err("%pOF: can't get DCR registers base/len!\n", np); of_node_put(np); iounmap(i2o_reg); return -ENODEV; @@ -4457,7 +4453,7 @@ static int ppc440spe_configure_raid_devices(void) i2o_dcr_host = dcr_map(np, dcr_base, dcr_len); if (!DCR_MAP_OK(i2o_dcr_host)) { - pr_err("%s: failed to map DCRs!\n", np->full_name); + pr_err("%pOF: failed to map DCRs!\n", np); of_node_put(np); iounmap(i2o_reg); return -ENODEV; @@ -4518,15 +4514,14 @@ static int ppc440spe_configure_raid_devices(void) dcr_base = dcr_resource_start(np, 0); dcr_len = dcr_resource_len(np, 0); if (!dcr_base && !dcr_len) { - pr_err("%s: can't get DCR registers base/len!\n", - np->full_name); + pr_err("%pOF: can't get DCR registers base/len!\n", np); ret = -ENODEV; goto out_mq; } ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len); if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) { - pr_err("%s: failed to map DCRs!\n", np->full_name); + pr_err("%pOF: failed to map DCRs!\n", np); ret = -ENODEV; goto out_mq; } diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 03c4eb3fd314..6d89fb6a6a92 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -65,6 +65,7 @@ struct bam_desc_hw { #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) #define DESC_FLAG_NWD BIT(12) +#define DESC_FLAG_CMD BIT(11) struct bam_async_desc { struct virt_dma_desc vd; @@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, unsigned int curr_offset = 0; do { + if (flags & DMA_PREP_CMD) + desc->flags |= cpu_to_le16(DESC_FLAG_CMD); + desc->addr = cpu_to_le32(sg_dma_address(sg) + curr_offset); @@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan) /* set any special flags on the last descriptor */ if (async_desc->num_desc == async_desc->xfer_len) - desc[async_desc->xfer_len - 1].flags = + desc[async_desc->xfer_len - 1].flags |= cpu_to_le16(async_desc->flags); else desc[async_desc->xfer_len - 1].flags |= diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 34fb6afd229b..e3669850aef4 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, return NULL; hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, - src, dest, len, flags); + src, dest, len, flags, + HIDMA_TRE_MEMCPY); + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&mchan->lock, irqflags); + list_add_tail(&mdesc->node, &mchan->prepared); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + return &mdesc->desc; +} + +static struct dma_async_tx_descriptor * +hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct hidma_chan *mchan = to_hidma_chan(dmach); + struct hidma_desc *mdesc = NULL; + struct hidma_dev *mdma = mchan->dmadev; + unsigned long irqflags; + + /* Get free descriptor */ + spin_lock_irqsave(&mchan->lock, irqflags); + if (!list_empty(&mchan->free)) { + mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); + list_del(&mdesc->node); + } + spin_unlock_irqrestore(&mchan->lock, irqflags); + + if (!mdesc) + return NULL; + + hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, + value, dest, len, flags, + HIDMA_TRE_MEMSET); /* Place descriptor in prepared list */ spin_lock_irqsave(&mchan->lock, irqflags); @@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); + dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask); if (WARN_ON(!pdev->dev.dma_mask)) { rc = -ENXIO; goto dmafree; @@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev) dmadev->dev_trca = trca; dmadev->trca_resource = trca_resource; dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; + dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset; dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; dmadev->ddev.device_tx_status = hidma_tx_status; diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 41e0aa283828..5f9966e82c0b 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -28,6 +28,11 @@ #define HIDMA_TRE_DEST_LOW_IDX 4 #define HIDMA_TRE_DEST_HI_IDX 5 +enum tre_type { + HIDMA_TRE_MEMCPY = 3, + HIDMA_TRE_MEMSET = 4, +}; + struct hidma_tre { atomic_t allocated; /* if this channel is allocated */ bool queued; /* flag whether this is pending */ @@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl); int hidma_ll_disable(struct hidma_lldev *lldev); int hidma_ll_enable(struct hidma_lldev *llhndl); void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, - dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); + dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype); void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); int hidma_ll_setup(struct hidma_lldev *lldev); struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 1530a661518d..4999e266b2de 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -105,10 +105,6 @@ enum ch_state { HIDMA_CH_STOPPED = 4, }; -enum tre_type { - HIDMA_TRE_MEMCPY = 3, -}; - enum err_code { HIDMA_EVRE_STATUS_COMPLETE = 1, HIDMA_EVRE_STATUS_ERROR = 4, @@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, tre->err_info = 0; tre->lldev = lldev; tre_local = &tre->tre_local[0]; - tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; - tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; + tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8; tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ *tre_ch = i; if (callback) @@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev) void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, dma_addr_t src, dma_addr_t dest, u32 len, - u32 flags) + u32 flags, u32 txntype) { struct hidma_tre *tre; u32 *tre_local; @@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, } tre_local = &tre->tre_local[0]; + tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0); + tre_local[HIDMA_TRE_CFG_IDX] |= txntype; tre_local[HIDMA_TRE_LEN_IDX] = len; tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 5a0991bc4787..7335e2eb9b72 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -28,7 +28,7 @@ #include "hidma_mgmt.h" -#define HIDMA_QOS_N_OFFSET 0x300 +#define HIDMA_QOS_N_OFFSET 0x700 #define HIDMA_CFG_OFFSET 0x400 #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C #define HIDMA_MAX_XACTIONS_OFFSET 0x420 @@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) goto out; } - if (max_write_request) { + if (max_write_request && + (max_write_request != mgmtdev->max_write_request)) { dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", max_write_request); mgmtdev->max_write_request = max_write_request; @@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); goto out; } - if (max_read_request) { + if (max_read_request && + (max_read_request != mgmtdev->max_read_request)) { dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", max_read_request); mgmtdev->max_read_request = max_read_request; @@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "max-write-transactions missing\n"); goto out; } - if (max_wr_xactions) { + if (max_wr_xactions && + (max_wr_xactions != mgmtdev->max_wr_xactions)) { dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", max_wr_xactions); mgmtdev->max_wr_xactions = max_wr_xactions; @@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "max-read-transactions missing\n"); goto out; } - if (max_rd_xactions) { + if (max_rd_xactions && + (max_rd_xactions != mgmtdev->max_rd_xactions)) { dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", max_rd_xactions); mgmtdev->max_rd_xactions = max_rd_xactions; @@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) struct platform_device_info pdevinfo; struct of_phandle_args out_irq; struct device_node *child; - struct resource *res; + struct resource *res = NULL; const __be32 *cell; int ret = 0, size, i, num; u64 addr, addr_size; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index ffcadca53243..2b2c7db3e480 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1690,6 +1690,15 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, if (!irqname) return -ENOMEM; + /* + * Initialize the DMA engine channel and add it to the DMA engine + * channels list. + */ + chan->device = &dmac->engine; + dma_cookie_init(chan); + + list_add_tail(&chan->device_node, &dmac->engine.channels); + ret = devm_request_threaded_irq(dmac->dev, rchan->irq, rcar_dmac_isr_channel, rcar_dmac_isr_channel_thread, 0, @@ -1700,15 +1709,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, return ret; } - /* - * Initialize the DMA engine channel and add it to the DMA engine - * channels list. - */ - chan->device = &dmac->engine; - dma_cookie_init(chan); - - list_add_tail(&chan->device_node, &dmac->engine.channels); - return 0; } @@ -1794,14 +1794,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) if (!irqname) return -ENOMEM; - ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, - irqname, dmac); - if (ret) { - dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", - irq, ret); - return ret; - } - /* Enable runtime PM and initialize the device. */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); @@ -1818,8 +1810,32 @@ static int rcar_dmac_probe(struct platform_device *pdev) goto error; } - /* Initialize the channels. */ - INIT_LIST_HEAD(&dmac->engine.channels); + /* Initialize engine */ + engine = &dmac->engine; + + dma_cap_set(DMA_MEMCPY, engine->cap_mask); + dma_cap_set(DMA_SLAVE, engine->cap_mask); + + engine->dev = &pdev->dev; + engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); + + engine->src_addr_widths = widths; + engine->dst_addr_widths = widths; + engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; + engine->device_free_chan_resources = rcar_dmac_free_chan_resources; + engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; + engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; + engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; + engine->device_config = rcar_dmac_device_config; + engine->device_terminate_all = rcar_dmac_chan_terminate_all; + engine->device_tx_status = rcar_dmac_tx_status; + engine->device_issue_pending = rcar_dmac_issue_pending; + engine->device_synchronize = rcar_dmac_device_synchronize; + + INIT_LIST_HEAD(&engine->channels); for (i = 0; i < dmac->n_channels; ++i) { ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], @@ -1828,6 +1844,14 @@ static int rcar_dmac_probe(struct platform_device *pdev) goto error; } + ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, + irqname, dmac); + if (ret) { + dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", + irq, ret); + return ret; + } + /* Register the DMAC as a DMA provider for DT. */ ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, NULL); @@ -1839,29 +1863,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) * * Default transfer size of 32 bytes requires 32-byte alignment. */ - engine = &dmac->engine; - dma_cap_set(DMA_MEMCPY, engine->cap_mask); - dma_cap_set(DMA_SLAVE, engine->cap_mask); - - engine->dev = &pdev->dev; - engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); - - engine->src_addr_widths = widths; - engine->dst_addr_widths = widths; - engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); - engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - - engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; - engine->device_free_chan_resources = rcar_dmac_free_chan_resources; - engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; - engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; - engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; - engine->device_config = rcar_dmac_device_config; - engine->device_terminate_all = rcar_dmac_chan_terminate_all; - engine->device_tx_status = rcar_dmac_tx_status; - engine->device_issue_pending = rcar_dmac_issue_pending; - engine->device_synchronize = rcar_dmac_device_synchronize; - ret = dma_async_device_register(engine); if (ret < 0) goto error; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c3052fbfd092..c2b089af0420 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -79,7 +79,7 @@ static int dma40_memcpy_channels[] = { }; /* Default configuration for physcial memcpy */ -static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { +static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = DMA_MEM_TO_MEM, @@ -93,7 +93,7 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { }; /* Default configuration for logical memcpy */ -static struct stedma40_chan_cfg dma40_memcpy_conf_log = { +static const struct stedma40_chan_cfg dma40_memcpy_conf_log = { .mode = STEDMA40_MODE_LOGICAL, .dir = DMA_MEM_TO_MEM, @@ -2484,19 +2484,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, DMA_MEM_TO_MEM, dma_flags); } -static struct dma_async_tx_descriptor * -d40_prep_memcpy_sg(struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long dma_flags) -{ - if (dst_nents != src_nents) - return NULL; - - return d40_prep_sg(chan, src_sg, dst_sg, src_nents, - DMA_MEM_TO_MEM, dma_flags); -} - static struct dma_async_tx_descriptor * d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) dev->copy_align = DMAENGINE_ALIGN_4_BYTES; } - if (dma_has_cap(DMA_SG, dev->cap_mask)) - dev->device_prep_dma_sg = d40_prep_memcpy_sg; - if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; @@ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); - dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); d40_ops_init(base, &base->dma_memcpy); @@ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_both.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); - dma_cap_set(DMA_SG, base->dma_both.cap_mask); dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_both); diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index a2358780ab2c..bcd496edc70f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -101,6 +101,17 @@ struct sun6i_dma_config { u32 nr_max_channels; u32 nr_max_requests; u32 nr_max_vchans; + /* + * In the datasheets/user manuals of newer Allwinner SoCs, a special + * bit (bit 2 at register 0x20) is present. + * It's named "DMA MCLK interface circuit auto gating bit" in the + * documents, and the footnote of this register says that this bit + * should be set up when initializing the DMA controller. + * Allwinner A23/A33 user manuals do not have this bit documented, + * however these SoCs really have and need this bit, as seen in the + * BSP kernel source code. + */ + bool gate_needed; }; /* @@ -1009,6 +1020,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_channels = 8, .nr_max_requests = 24, .nr_max_vchans = 37, + .gate_needed = true, }; static struct sun6i_dma_config sun8i_a83t_dma_cfg = { @@ -1028,11 +1040,24 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_vchans = 34, }; +/* + * The V3s have only 8 physical channels, a maximum DRQ port id of 23, + * and a total of 24 usable source and destination endpoints. + */ + +static struct sun6i_dma_config sun8i_v3s_dma_cfg = { + .nr_max_channels = 8, + .nr_max_requests = 23, + .nr_max_vchans = 24, + .gate_needed = true, +}; + static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, + { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun6i_dma_match); @@ -1174,13 +1199,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) goto err_dma_unregister; } - /* - * sun8i variant requires us to toggle a dma gating register, - * as seen in Allwinner's SDK. This register is not documented - * in the A23 user manual. - */ - if (of_device_is_compatible(pdev->dev.of_node, - "allwinner,sun8i-a23-dma")) + if (sdc->cfg->gate_needed) writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); return 0; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2403475a37cf..2f65a8fde21d 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -308,7 +308,7 @@ static const struct of_device_id ti_dra7_master_match[] = { static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) { for (; len > 0; len--) - clear_bit(offset + (len - 1), p); + set_bit(offset + (len - 1), p); } static int ti_dra7_xbar_probe(struct platform_device *pdev) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 8b693b712d0f..1d5988849aa6 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -391,11 +391,6 @@ static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, *paddr += nbytes; } -static void xgene_dma_invalidate_buffer(__le64 *ext8) -{ - *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE); -} - static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) { switch (idx) { @@ -425,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, XGENE_DMA_DESC_HOENQ_NUM_POS); } -static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, - struct xgene_dma_desc_sw *desc_sw, - dma_addr_t dst, dma_addr_t src, - size_t len) -{ - struct xgene_dma_desc_hw *desc1, *desc2; - int i; - - /* Get 1st descriptor */ - desc1 = &desc_sw->desc1; - xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); - - /* Set destination address */ - desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); - desc1->m3 |= cpu_to_le64(dst); - - /* Set 1st source address */ - xgene_dma_set_src_buffer(&desc1->m1, &len, &src); - - if (!len) - return; - - /* - * We need to split this source buffer, - * and need to use 2nd descriptor - */ - desc2 = &desc_sw->desc2; - desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); - - /* Set 2nd to 5th source address */ - for (i = 0; i < 4 && len; i++) - xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i), - &len, &src); - - /* Invalidate unused source address field */ - for (; i < 4; i++) - xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); - - /* Updated flag that we have prepared 64B descriptor */ - desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; -} - static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, struct xgene_dma_desc_sw *desc_sw, dma_addr_t *dst, dma_addr_t *src, @@ -891,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) chan->desc_pool = NULL; } -static struct dma_async_tx_descriptor *xgene_dma_prep_sg( - struct dma_chan *dchan, struct scatterlist *dst_sg, - u32 dst_nents, struct scatterlist *src_sg, - u32 src_nents, unsigned long flags) -{ - struct xgene_dma_desc_sw *first = NULL, *new = NULL; - struct xgene_dma_chan *chan; - size_t dst_avail, src_avail; - dma_addr_t dst, src; - size_t len; - - if (unlikely(!dchan)) - return NULL; - - if (unlikely(!dst_nents || !src_nents)) - return NULL; - - if (unlikely(!dst_sg || !src_sg)) - return NULL; - - chan = to_dma_chan(dchan); - - /* Get prepared for the loop */ - dst_avail = sg_dma_len(dst_sg); - src_avail = sg_dma_len(src_sg); - dst_nents--; - src_nents--; - - /* Run until we are out of scatterlist entries */ - while (true) { - /* Create the largest transaction possible */ - len = min_t(size_t, src_avail, dst_avail); - len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); - if (len == 0) - goto fetch; - - dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; - src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; - - /* Allocate the link descriptor from DMA pool */ - new = xgene_dma_alloc_descriptor(chan); - if (!new) - goto fail; - - /* Prepare DMA descriptor */ - xgene_dma_prep_cpy_desc(chan, new, dst, src, len); - - if (!first) - first = new; - - new->tx.cookie = 0; - async_tx_ack(&new->tx); - - /* update metadata */ - dst_avail -= len; - src_avail -= len; - - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &first->tx_list); - -fetch: - /* fetch the next dst scatterlist entry */ - if (dst_avail == 0) { - /* no more entries: we're done */ - if (dst_nents == 0) - break; - - /* fetch the next entry: if there are no more: done */ - dst_sg = sg_next(dst_sg); - if (!dst_sg) - break; - - dst_nents--; - dst_avail = sg_dma_len(dst_sg); - } - - /* fetch the next src scatterlist entry */ - if (src_avail == 0) { - /* no more entries: we're done */ - if (src_nents == 0) - break; - - /* fetch the next entry: if there are no more: done */ - src_sg = sg_next(src_sg); - if (!src_sg) - break; - - src_nents--; - src_avail = sg_dma_len(src_sg); - } - } - - if (!new) - return NULL; - - new->tx.flags = flags; /* client is in control of this ack */ - new->tx.cookie = -EBUSY; - list_splice(&first->tx_list, &new->tx_list); - - return &new->tx; -fail: - if (!first) - return NULL; - - xgene_dma_free_desc_list(chan, &first->tx_list); - return NULL; -} - static struct dma_async_tx_descriptor *xgene_dma_prep_xor( struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, u32 src_cnt, size_t len, unsigned long flags) @@ -1653,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_cap_zero(dma_dev->cap_mask); /* Set DMA device capability */ - dma_cap_set(DMA_SG, dma_dev->cap_mask); /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR * and channel 1 supports XOR, PQ both. First thing here is we have @@ -1679,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; dma_dev->device_issue_pending = xgene_dma_issue_pending; dma_dev->device_tx_status = xgene_dma_tx_status; - dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; @@ -1731,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) /* DMA capability info */ dev_info(pdma->dev, - "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan), - dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", + "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8cf87b1a284b..8722bcba489d 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2124,7 +2124,7 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); return err; } @@ -2142,25 +2142,25 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, err = clk_prepare_enable(*axi_clk); if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); return err; } err = clk_prepare_enable(*tx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); goto err_disable_axiclk; } err = clk_prepare_enable(*rx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); goto err_disable_txclk; } err = clk_prepare_enable(*sg_clk); if (err) { - dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); goto err_disable_rxclk; } @@ -2189,26 +2189,26 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); return err; } *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); if (IS_ERR(*dev_clk)) { err = PTR_ERR(*dev_clk); - dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); return err; } err = clk_prepare_enable(*axi_clk); if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); return err; } err = clk_prepare_enable(*dev_clk); if (err) { - dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); goto err_disable_axiclk; } @@ -2229,7 +2229,7 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); return err; } @@ -2251,31 +2251,31 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, err = clk_prepare_enable(*axi_clk); if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); return err; } err = clk_prepare_enable(*tx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); goto err_disable_axiclk; } err = clk_prepare_enable(*txs_clk); if (err) { - dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); goto err_disable_txclk; } err = clk_prepare_enable(*rx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); goto err_disable_txsclk; } err = clk_prepare_enable(*rxs_clk); if (err) { - dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); goto err_disable_rxclk; } diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 47f64192d2fd..1ee1241ca797 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -829,98 +829,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( return &first->async_tx; } -/** - * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction - * @dchan: DMA channel - * @dst_sg: Destination scatter list - * @dst_sg_len: Number of entries in destination scatter list - * @src_sg: Source scatter list - * @src_sg_len: Number of entries in source scatter list - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( - struct dma_chan *dchan, struct scatterlist *dst_sg, - unsigned int dst_sg_len, struct scatterlist *src_sg, - unsigned int src_sg_len, unsigned long flags) -{ - struct zynqmp_dma_desc_sw *new, *first = NULL; - struct zynqmp_dma_chan *chan = to_chan(dchan); - void *desc = NULL, *prev = NULL; - size_t len, dst_avail, src_avail; - dma_addr_t dma_dst, dma_src; - u32 desc_cnt = 0, i; - struct scatterlist *sg; - - for_each_sg(src_sg, sg, src_sg_len, i) - desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), - ZYNQMP_DMA_MAX_TRANS_LEN); - - spin_lock_bh(&chan->lock); - if (desc_cnt > chan->desc_free_cnt) { - spin_unlock_bh(&chan->lock); - dev_dbg(chan->dev, "chan %p descs are not available\n", chan); - return NULL; - } - chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; - spin_unlock_bh(&chan->lock); - - dst_avail = sg_dma_len(dst_sg); - src_avail = sg_dma_len(src_sg); - - /* Run until we are out of scatterlist entries */ - while (true) { - /* Allocate and populate the descriptor */ - new = zynqmp_dma_get_descriptor(chan); - desc = (struct zynqmp_dma_desc_ll *)new->src_v; - len = min_t(size_t, src_avail, dst_avail); - len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); - if (len == 0) - goto fetch; - dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - - dst_avail; - dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - - src_avail; - - zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, - len, prev); - prev = desc; - dst_avail -= len; - src_avail -= len; - - if (!first) - first = new; - else - list_add_tail(&new->node, &first->tx_list); -fetch: - /* Fetch the next dst scatterlist entry */ - if (dst_avail == 0) { - if (dst_sg_len == 0) - break; - dst_sg = sg_next(dst_sg); - if (dst_sg == NULL) - break; - dst_sg_len--; - dst_avail = sg_dma_len(dst_sg); - } - /* Fetch the next src scatterlist entry */ - if (src_avail == 0) { - if (src_sg_len == 0) - break; - src_sg = sg_next(src_sg); - if (src_sg == NULL) - break; - src_sg_len--; - src_avail = sg_dma_len(src_sg); - } - } - - zynqmp_dma_desc_config_eod(chan, desc); - first->async_tx.flags = flags; - return &first->async_tx; -} - /** * zynqmp_dma_chan_remove - Channel remove function * @chan: ZynqMP DMA channel pointer @@ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&zdev->common.channels); dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); - dma_cap_set(DMA_SG, zdev->common.cap_mask); dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); p = &zdev->common; - p->device_prep_dma_sg = zynqmp_dma_prep_sg; p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; p->device_terminate_all = zynqmp_dma_device_terminate_all; p->device_issue_pending = zynqmp_dma_issue_pending; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 8043e51de897..7da9f1b83ebe 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -357,7 +357,7 @@ struct sensor_value { } __packed; struct dev_pstate_set { - u16 dev_id; + __le16 dev_id; u8 pstate; } __packed; @@ -965,7 +965,7 @@ static int scpi_probe(struct platform_device *pdev) count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { - dev_err(dev, "no mboxes property in '%s'\n", np->full_name); + dev_err(dev, "no mboxes property in '%pOF'\n", np); return -ENODEV; } diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 2fe1a130189f..c16600f30611 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -534,7 +534,7 @@ static struct attribute *dcdbas_dev_attrs[] = { NULL }; -static struct attribute_group dcdbas_attr_group = { +static const struct attribute_group dcdbas_attr_group = { .attrs = dcdbas_dev_attrs, .bin_attrs = dcdbas_bin_attrs, }; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 394db40ed374..2b4c39fdfa91 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -151,6 +151,16 @@ config APPLE_PROPERTIES If unsure, say Y if you have a Mac. Otherwise N. +config RESET_ATTACK_MITIGATION + bool "Reset memory attack mitigation" + depends on EFI_STUB + help + Request that the firmware clear the contents of RAM after a reboot + using the TCG Platform Reset Attack Mitigation specification. This + protects against an attacker forcibly rebooting the system while it + still contains secrets in RAM, booting another OS and extracting the + secrets. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index c473f4c5ca34..9f6bcf173b0e 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -18,8 +18,8 @@ #define pr_fmt(fmt) "apple-properties: " fmt #include -#include #include +#include #include #include #include @@ -191,8 +191,7 @@ static int __init map_properties(void) u64 pa_data; int ret; - if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") && - !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.")) + if (!x86_apple_machine) return 0; pa_data = boot_params.hdr.setup_data; diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 1027d7b44358..80d1a885def5 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -145,6 +145,9 @@ static int __init uefi_init(void) sizeof(efi_config_table_t), arch_tables); + if (!retval) + efi.config_table = (unsigned long)efi.systab->tables; + early_memunmap(config_tables, table_size); out: early_memunmap(efi.systab, sizeof(efi_system_table_t)); @@ -159,6 +162,7 @@ static __init int is_usable_memory(efi_memory_desc_t *md) switch (md->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: + case EFI_ACPI_RECLAIM_MEMORY: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: @@ -211,6 +215,10 @@ static __init void reserve_regions(void) if (!is_usable_memory(md)) memblock_mark_nomap(paddr, size); + + /* keep ACPI reclaim memory intact for kexec etc. */ + if (md->type == EFI_ACPI_RECLAIM_MEMORY) + memblock_reserve(paddr, size); } } } diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 48a8f69da42a..d2fcafcea07e 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -534,7 +534,7 @@ static void cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata, int sec_no) { - uuid_le *sec_type = (uuid_le *)gdata->section_type; + guid_t *sec_type = (guid_t *)gdata->section_type; __u16 severity; char newpfx[64]; @@ -545,12 +545,12 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata printk("%s""Error %d, type: %s\n", pfx, sec_no, cper_severity_str(severity)); if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id); + printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id); if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); - if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) { + if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata); printk("%s""section_type: general processor error\n", newpfx); @@ -558,7 +558,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata cper_print_proc_generic(newpfx, proc_err); else goto err_section_too_small; - } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { + } else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); printk("%s""section_type: memory error\n", newpfx); @@ -568,7 +568,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata gdata->error_data_length); else goto err_section_too_small; - } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { + } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata); printk("%s""section_type: PCIe error\n", newpfx); @@ -606,7 +606,6 @@ void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { struct acpi_hest_generic_data *gdata; - unsigned int data_len; int sec_no = 0; char newpfx[64]; __u16 severity; @@ -617,14 +616,10 @@ void cper_estatus_print(const char *pfx, "It has been corrected by h/w " "and requires no further action"); printk("%s""event severity: %s\n", pfx, cper_severity_str(severity)); - data_len = estatus->data_length; - gdata = (struct acpi_hest_generic_data *)(estatus + 1); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); - while (data_len >= acpi_hest_get_size(gdata)) { + apei_estatus_for_each_section(estatus, gdata) { cper_estatus_print_section(newpfx, gdata, sec_no); - data_len -= acpi_hest_get_record_size(gdata); - gdata = acpi_hest_get_next(gdata); sec_no++; } } @@ -653,15 +648,12 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus) if (rc) return rc; data_len = estatus->data_length; - gdata = (struct acpi_hest_generic_data *)(estatus + 1); - while (data_len >= acpi_hest_get_size(gdata)) { + apei_estatus_for_each_section(estatus, gdata) { gedata_len = acpi_hest_get_error_length(gdata); if (gedata_len > data_len - acpi_hest_get_size(gdata)) return -EINVAL; - data_len -= acpi_hest_get_record_size(gdata); - gdata = acpi_hest_get_next(gdata); } if (data_len) return -EINVAL; diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index b58233e4ed71..50793fda7819 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c @@ -27,26 +27,6 @@ struct bmp_header { u32 size; } __packed; -static bool efi_bgrt_addr_valid(u64 addr) -{ - efi_memory_desc_t *md; - - for_each_efi_memory_desc(md) { - u64 size; - u64 end; - - if (md->type != EFI_BOOT_SERVICES_DATA) - continue; - - size = md->num_pages << EFI_PAGE_SHIFT; - end = md->phys_addr + size; - if (addr >= md->phys_addr && addr < end) - return true; - } - - return false; -} - void __init efi_bgrt_init(struct acpi_table_header *table) { void *image; @@ -85,7 +65,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table) goto out; } - if (!efi_bgrt_addr_valid(bgrt->image_address)) { + if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) { pr_notice("Ignoring BGRT: invalid image address\n"); goto out; } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 69d4d130e055..f70febf680c3 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -198,7 +198,7 @@ static umode_t efi_attr_is_visible(struct kobject *kobj, return attr->mode; } -static struct attribute_group efi_subsys_attr_group = { +static const struct attribute_group efi_subsys_attr_group = { .attrs = efi_subsys_attrs, .is_visible = efi_attr_is_visible, }; @@ -541,6 +541,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, if (seed != NULL) { add_device_randomness(seed->bits, seed->size); early_memunmap(seed, sizeof(*seed) + size); + pr_notice("seeding entropy pool\n"); } else { pr_err("Could not map UEFI random seed!\n"); } @@ -809,6 +810,11 @@ char * __init efi_md_typeattr_format(char *buf, size_t size, return buf; } +/* + * IA64 has a funky EFI memory map that doesn't work the same way as + * other architectures. + */ +#ifndef CONFIG_IA64 /* * efi_mem_attributes - lookup memmap attributes for physical address * @phys_addr: the physical address to lookup @@ -816,13 +822,8 @@ char * __init efi_md_typeattr_format(char *buf, size_t size, * Search in the EFI memory map for the region covering * @phys_addr. Returns the EFI memory attributes if the region * was found in the memory map, 0 otherwise. - * - * Despite being marked __weak, most architectures should *not* - * override this function. It is __weak solely for the benefit - * of ia64 which has a funky EFI memory map that doesn't work - * the same way as other architectures. */ -u64 __weak efi_mem_attributes(unsigned long phys_addr) +u64 efi_mem_attributes(unsigned long phys_addr) { efi_memory_desc_t *md; @@ -838,6 +839,31 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr) return 0; } +/* + * efi_mem_type - lookup memmap type for physical address + * @phys_addr: the physical address to lookup + * + * Search in the EFI memory map for the region covering @phys_addr. + * Returns the EFI memory type if the region was found in the memory + * map, EFI_RESERVED_TYPE (zero) otherwise. + */ +int efi_mem_type(unsigned long phys_addr) +{ + const efi_memory_desc_t *md; + + if (!efi_enabled(EFI_MEMMAP)) + return -ENOTSUPP; + + for_each_efi_memory_desc(md) { + if ((md->phys_addr <= phys_addr) && + (phys_addr < (md->phys_addr + + (md->num_pages << EFI_PAGE_SHIFT)))) + return md->type; + } + return -EINVAL; +} +#endif + int efi_status_to_err(efi_status_t status) { int err; @@ -900,7 +926,7 @@ static int update_efi_random_seed(struct notifier_block *nb, seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB); if (seed != NULL) { - size = min(seed->size, 32U); + size = min(seed->size, EFI_RANDOM_SEED_SIZE); memunmap(seed); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 8554d7aec31c..bd7ed3c1148a 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -230,7 +230,7 @@ static umode_t esrt_attr_is_visible(struct kobject *kobj, return attr->mode; } -static struct attribute_group esrt_attr_group = { +static const struct attribute_group esrt_attr_group = { .attrs = esrt_attrs, .is_visible = esrt_attr_is_visible, }; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 37e24f525162..dedf9bde44db 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) +cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ -fno-builtin -fpic -mno-single-pic-base @@ -30,6 +30,7 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n lib-y := efi-stub-helper.o gop.o secureboot.o +lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 8181ac179d14..1cb2d1c070c3 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -192,6 +192,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, goto fail_free_cmdline; } + /* Ask the firmware to clear memory on unclean shutdown */ + efi_enable_reset_attack_mitigation(sys_table); + secure_boot = efi_get_secureboot(sys_table); /* diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index af6ae95a5e34..b9bd827caa22 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -9,10 +9,18 @@ * published by the Free Software Foundation. * */ + +/* + * To prevent the compiler from emitting GOT-indirected (and thus absolute) + * references to the section markers, override their visibility as 'hidden' + */ +#pragma GCC visibility push(hidden) +#include +#pragma GCC visibility pop + #include #include #include -#include #include #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 7e72954d5860..e0e603a89aa9 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -145,8 +145,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, return status; } -#define RANDOM_SEED_SIZE 32 - efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; @@ -162,25 +160,25 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) return status; status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*seed) + RANDOM_SEED_SIZE, + sizeof(*seed) + EFI_RANDOM_SEED_SIZE, (void **)&seed); if (status != EFI_SUCCESS) return status; - status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE, + status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) /* * Use whatever algorithm we have available if the raw algorithm * is not implemented. */ - status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE, + status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE, seed->bits); if (status != EFI_SUCCESS) goto err_freepool; - seed->size = RANDOM_SEED_SIZE; + seed->size = EFI_RANDOM_SEED_SIZE; status = efi_call_early(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c new file mode 100644 index 000000000000..6224cdbc9669 --- /dev/null +++ b/drivers/firmware/efi/libstub/tpm.c @@ -0,0 +1,58 @@ +/* + * TPM handling. + * + * Copyright (C) 2016 CoreOS, Inc + * Copyright (C) 2017 Google, Inc. + * Matthew Garrett + * + * This file is part of the Linux kernel, and is made available under the + * terms of the GNU General Public License version 2. + */ +#include +#include + +#include "efistub.h" + +static const efi_char16_t efi_MemoryOverWriteRequest_name[] = { + 'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't', + 'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o', + 'l', 0 +}; + +#define MEMORY_ONLY_RESET_CONTROL_GUID \ + EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29) + +#define get_efi_var(name, vendor, ...) \ + efi_call_runtime(get_variable, \ + (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ + __VA_ARGS__) + +#define set_efi_var(name, vendor, ...) \ + efi_call_runtime(set_variable, \ + (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ + __VA_ARGS__) + +/* + * Enable reboot attack mitigation. This requests that the firmware clear the + * RAM on next reboot before proceeding with boot, ensuring that any secrets + * are cleared. If userland has ensured that all secrets have been removed + * from RAM before reboot it can simply reset this variable. + */ +void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) +{ + u8 val = 1; + efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID; + efi_status_t status; + unsigned long datasize = 0; + + status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid, + NULL, &datasize, NULL); + + if (status == EFI_NOT_FOUND) + return; + + set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val); +} diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c index 62ead9b9d871..22874544d301 100644 --- a/drivers/firmware/efi/reboot.c +++ b/drivers/firmware/efi/reboot.c @@ -5,6 +5,8 @@ #include #include +static void (*orig_pm_power_off)(void); + int efi_reboot_quirk_mode = -1; void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) @@ -51,6 +53,12 @@ bool __weak efi_poweroff_required(void) static void efi_power_off(void) { efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + /* + * The above call should not return, if it does fall back to + * the original power off method (typically ACPI poweroff). + */ + if (orig_pm_power_off) + orig_pm_power_off(); } static int __init efi_shutdown_init(void) @@ -58,8 +66,10 @@ static int __init efi_shutdown_init(void) if (!efi_enabled(EFI_RUNTIME_SERVICES)) return -ENODEV; - if (efi_poweroff_required()) + if (efi_poweroff_required()) { + orig_pm_power_off = pm_power_off; pm_power_off = efi_power_off; + } return 0; } diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c46387160976..c8f169bf2e27 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8]) return local_hash_64(input, 32); } -static struct dmi_system_id gsmi_dmi_table[] __initdata = { +static const struct dmi_system_id gsmi_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c index 8c1bf6dbdaa6..19bcbd10855b 100644 --- a/drivers/firmware/google/memconsole-x86-legacy.c +++ b/drivers/firmware/google/memconsole-x86-legacy.c @@ -126,7 +126,7 @@ static bool memconsole_ebda_init(void) return false; } -static struct dmi_system_id memconsole_dmi_table[] __initdata = { +static const struct dmi_system_id memconsole_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 493a56a4cfc4..d687ca3d5049 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -280,8 +280,8 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) "arm,psci-suspend-param", &state); if (ret) { - pr_warn(" * %s missing arm,psci-suspend-param property\n", - state_node->full_name); + pr_warn(" * %pOF missing arm,psci-suspend-param property\n", + state_node); of_node_put(state_node); goto free_mem; } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index b25179517cc5..73ca55b7b7ec 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -806,6 +806,8 @@ static int tegra_bpmp_probe(struct platform_device *pdev) dev_info(&pdev->dev, "firmware: %s\n", tag); + platform_set_drvdata(pdev, bpmp); + err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev); if (err < 0) goto free_mrq; @@ -822,8 +824,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev) if (err < 0) goto free_mrq; - platform_set_drvdata(pdev, bpmp); - return 0; free_mrq: diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 08629ee69d11..00e73d28077c 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = { .write_complete = altera_cvp_write_complete, }; -static ssize_t show_chkcfg(struct device_driver *dev, char *buf) +static ssize_t chkcfg_show(struct device_driver *dev, char *buf) { return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); } -static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, +static ssize_t chkcfg_store(struct device_driver *drv, const char *buf, size_t count) { int ret; @@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, return count; } -static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg); +static DRIVER_ATTR_RW(chkcfg); static int altera_cvp_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -453,7 +453,8 @@ config GPIO_TS4800 config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) - depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY + depends on PCI_MSI + select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help Say yes here to support the on-chip GPIO lines on the ThunderX diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - irq_set_handler_locked(d, handle_edge_irq); + /* + * Edge IRQs are already cleared/acked in irq_handler and + * not need to be masked, as result handle_edge_irq() + * logic is excessed here and may cause lose of interrupts. + * So just use handle_simple_irq. + */ + irq_set_handler_locked(d, handle_simple_irq); return 0; @@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) { void __iomem *isr_reg = NULL; - u32 isr; + u32 enabled, isr, level_mask; unsigned int bit; struct gpio_bank *bank = gpiobank; unsigned long wa_lock_flags; @@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) pm_runtime_get_sync(bank->chip.parent); while (1) { - u32 isr_saved, level_mask = 0; - u32 enabled; - raw_spin_lock_irqsave(&bank->lock, lock_flags); enabled = omap_get_gpio_irqbank_mask(bank); - isr_saved = isr = readl_relaxed(isr_reg) & enabled; + isr = readl_relaxed(isr_reg) & enabled; if (bank->level_mask) level_mask = bank->level_mask & enabled; + else + level_mask = 0; /* clear edge sensitive interrupts before handler(s) are called so that we don't miss any interrupt occurred while executing them */ - omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); - omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); - omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); + if (isr & ~level_mask) + omap_clear_gpio_irqbank(bank, isr & ~level_mask); raw_spin_unlock_irqrestore(&bank->lock, lock_flags); @@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) /*---------------------------------------------------------------------*/ -static void __init omap_gpio_show_rev(struct gpio_bank *bank) +static void omap_gpio_show_rev(struct gpio_bank *bank) { static bool called; u32 rev; diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index 24f388ed46d4..9b511df5450e 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -35,7 +35,7 @@ #include #include -#include +#include /* * The GPIO "subchip" supports 18 GPIOs which can be configured as diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, if (pin <= 255) { char ev_name[5]; - sprintf(ev_name, "_%c%02X", + sprintf(ev_name, "_%c%02hhX", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12e71bbfd222..103635ab784c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -76,7 +76,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; -extern unsigned amdgpu_gart_size; +extern int amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index fb6e5dbd5a03..309f2419c6d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -155,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; - return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 269b835571eb..60d8bedb694d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1079,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, GFP_KERNEL); p->num_post_dep_syncobjs = 0; + if (!p->post_dep_syncobjs) + return -ENOMEM; + for (i = 0; i < num_deps; ++i) { p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); if (!p->post_dep_syncobjs[i]) @@ -1150,7 +1153,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); - amdgpu_cs_parser_fini(p, 0, true); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); @@ -1208,10 +1210,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; r = amdgpu_cs_submit(&parser, cs); - if (r) - goto out; - return 0; out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1a459ac63df4..e630d918fefc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1062,11 +1062,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size < 32) { + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { /* gart size must be greater or equal to 32M */ dev_warn(adev->dev, "gart size (%d) too small\n", amdgpu_gart_size); - amdgpu_gart_size = 32; + amdgpu_gart_size = -1; } if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { @@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, goto err; } - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); - if (r) { - DRM_ERROR("%p bind failed\n", bo->shadow); - goto err; - } - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, NULL, fence, true); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e39ec981b11c..0f16986ec5bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -76,7 +76,7 @@ int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; -unsigned amdgpu_gart_size = 256; +int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; @@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 94c1e2e8e34c..f4370081f6e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -56,18 +56,6 @@ * Common GART table functions. */ -/** - * amdgpu_gart_set_defaults - set the default gart_size - * - * @adev: amdgpu_device pointer - * - * Set the default gart_size based on parameters and available VRAM. - */ -void amdgpu_gart_set_defaults(struct amdgpu_device *adev) -{ - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; -} - /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index d4cce6936200..afbe803b1a13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -56,7 +56,6 @@ struct amdgpu_gart { const struct amdgpu_gart_funcs *gart_funcs; }; -void amdgpu_gart_set_defaults(struct amdgpu_device *adev); int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 9e05e257729f..0d15eb7d31d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -108,10 +108,10 @@ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) * * Allocate the address space for a node. */ -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) +static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; @@ -143,12 +143,8 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, fpfn, lpfn, mode); spin_unlock(&mgr->lock); - if (!r) { + if (!r) mem->start = node->start; - if (&tbo->mem == mem) - tbo->offset = (tbo->mem.start << PAGE_SHIFT) + - tbo->bdev->man[tbo->mem.mem_type].gpu_offset; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 4bdd851f56d0..538e5f27d120 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -221,8 +221,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) spin_lock_init(&adev->irq.lock); - /* Disable vblank irqs aggressively for power-saving */ - adev->ddev->vblank_disable_immediate = true; + if (!adev->enable_virtual_display) + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index e1cde6b80027..3b0f2ec6eec7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -51,7 +51,7 @@ struct amdgpu_mn { /* objects protected by lock */ struct mutex lock; - struct rb_root objects; + struct rb_root_cached objects; }; struct amdgpu_mn_node { @@ -76,8 +76,8 @@ static void amdgpu_mn_destroy(struct work_struct *work) mutex_lock(&adev->mn_lock); mutex_lock(&rmn->lock); hash_del(&rmn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, - it.rb) { + rbtree_postorder_for_each_entry_safe(node, next_node, + &rmn->objects.rb_root, it.rb) { list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { bo->mn = NULL; list_del_init(&bo->mn_list); @@ -221,7 +221,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; mutex_init(&rmn->lock); - rmn->objects = RB_ROOT; + rmn->objects = RB_ROOT_CACHED; r = __mmu_notifier_register(&rmn->mn, mm); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e7e899190bef..9e495da0bb03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; - places[c].lpfn = 0; + if (flags & AMDGPU_GEM_CREATE_SHADOW) + places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; + else + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_TT; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | @@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, if (bo->shadow) return 0; - bo->flags |= AMDGPU_GEM_CREATE_SHADOW; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); - - amdgpu_ttm_placement_init(adev, &placement, - placements, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW); r = amdgpu_bo_create_restricted(adev, size, byte_align, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, NULL, &placement, bo->tbo.resv, 0, @@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; int r; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + domain, parent_flags); - amdgpu_ttm_placement_init(adev, &placement, - placements, domain, flags); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, init_value, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, + parent_flags, sg, &placement, resv, + init_value, bo_ptr); if (r) return r; - if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { - if (!resv) { - r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); - WARN_ON(r != 0); - } + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if (!resv) + WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, + NULL)); r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); if (!resv) - ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); + reservation_object_unlock((*bo_ptr)->tbo.resv); if (r) amdgpu_bo_unref(bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6c5646b48d1a..5ce65280b396 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned irq_type) { int r; + int sched_hw_submission = amdgpu_sched_hw_submission; + + /* Set the hw submission limit higher for KIQ because + * it's used for a number of gfx/compute tasks by both + * KFD and KGD which may have outstanding fences and + * it doesn't really use the gpu scheduler anyway; + * KIQ tasks get submitted directly to the ring. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + sched_hw_submission = max(sched_hw_submission, 256); if (ring->adev == NULL) { if (adev->num_rings >= AMDGPU_MAX_RINGS) @@ -178,8 +188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, - amdgpu_sched_hw_submission); + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); if (r) return r; } @@ -218,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_size = roundup_pow_of_two(max_dw * 4 * - amdgpu_sched_hw_submission); + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; ring->ptr_mask = ring->funcs->support_64bit_ptrs ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8b2c294f6f79..7ef6c28a34d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -761,35 +761,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - uint64_t flags; - int r; - - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); - gtt->offset = (u64)mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; - -} - static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; + uint64_t flags; int r = 0; if (gtt->userptr) { @@ -809,9 +785,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (amdgpu_gtt_mgr_is_allocated(bo_mem)) - r = amdgpu_ttm_do_bind(ttm, bo_mem); + if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) + return 0; + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -824,20 +815,39 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg tmp; + + struct ttm_placement placement; + struct ttm_place placements; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) return 0; - r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, - NULL, bo_mem); - if (r) { - DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); - return r; - } + tmp = bo->mem; + tmp.mm_node = NULL; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - return amdgpu_ttm_do_bind(ttm, bo_mem); + r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); + if (unlikely(r)) + return r; + + r = ttm_bo_move_ttm(bo, true, false, &tmp); + if (unlikely(r)) + ttm_bo_mem_put(bo, &tmp); + else + bo->offset = (bo->mem.start << PAGE_SHIFT) + + bo->bdev->man[bo->mem.mem_type].gpu_offset; + + return r; } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f22a4758719d..43093bffa2cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -62,10 +62,6 @@ extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b1343e5541d..bd20ff018512 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, unsigned i; int r; - if (parent->bo->shadow) { - struct amdgpu_bo *shadow = parent->bo->shadow; - - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } - if (use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, NULL); if (r) @@ -1277,7 +1269,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type < CHIP_VEGA10 || nptes != AMDGPU_VM_PTE_COUNT(p->adev) || - p->func == amdgpu_vm_do_copy_ptes || + p->src || !(flags & AMDGPU_PTE_VALID)) { dst = amdgpu_bo_gpu_offset(entry->bo); @@ -1294,9 +1286,23 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, entry->addr = (dst | flags); if (use_cpu_update) { + /* In case a huge page is replaced with a system + * memory mapping, p->pages_addr != NULL and + * amdgpu_vm_cpu_set_ptes would try to translate dst + * through amdgpu_vm_map_gart. But dst is already a + * GPU address (of the page table). Disable + * amdgpu_vm_map_gart temporarily. + */ + dma_addr_t *tmp; + + tmp = p->pages_addr; + p->pages_addr = NULL; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + + p->pages_addr = tmp; } else { if (parent->bo->shadow) { pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); @@ -1610,7 +1616,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to - * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update @@ -1624,7 +1629,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint64_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, @@ -1679,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } if (pages_addr) { - if (flags == gtt_flags) - src = adev->gart.table_addr + - (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; - else - max_entries = min(max_entries, 16ull * 1024ull); + max_entries = min(max_entries, 16ull * 1024ull); addr = 0; } else if (flags & AMDGPU_PTE_VALID) { addr += adev->vm_manager.vram_base_offset; @@ -1728,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - uint64_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive; + uint64_t flags; int r; if (clear || !bo_va->base.bo) { @@ -1751,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo) { + if (bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? - flags : 0; - } else { + else flags = 0x0; - gtt_flags = ~0x0; - } spin_lock(&vm->status_lock); if (!list_empty(&bo_va->base.vm_status)) @@ -1767,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, - gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, &bo_va->last_pt_update); if (r) @@ -2475,7 +2469,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u64 flags; uint64_t init_pde_value = 0; - vm->va = RB_ROOT; + vm->va = RB_ROOT_CACHED; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; @@ -2596,10 +2590,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amd_sched_entity_fini(vm->entity.sched, &vm->entity); - if (!RB_EMPTY_ROOT(&vm->va)) { + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) { + rbtree_postorder_for_each_entry_safe(mapping, tmp, + &vm->va.rb_root, rb) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ba6691b58ee7..6716355403ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -118,7 +118,7 @@ struct amdgpu_vm_pt { struct amdgpu_vm { /* tree of virtual addresses mapped */ - struct rb_root va; + struct rb_root_cached va; /* protecting invalidated */ spinlock_t status_lock; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d228f5a99044..dbbe986f90f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) NUM_BANKS(ADDR_SURF_2_BANK); for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); - } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { + } else if (adev->asic_type == CHIP_OLAND) { + tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1); + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); + } else if (adev->asic_type == CHIP_HAINAN) { tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 832e592fcd07..fc260c13b1da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4579,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_misc_reserved = 0x00000003; if (!(adev->flags & AMD_IS_APU)) { mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); } eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; @@ -4768,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); @@ -4792,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 4f2788b61a08..6c8040e616c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,9 +143,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 12b0c4cd7a5a..5be9c83dfcf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -332,7 +332,24 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_HAINAN: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index e42c1ad3af5e..eace9e7182c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -386,7 +386,27 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_TOPAZ: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; +#endif + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 7ca2dae8237a..3b3326daf32b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -562,7 +562,26 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_POLARIS11: /* all engines support GPUVM */ + case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS12: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2769c2b3b56e..d04d0b123212 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -499,7 +499,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_VEGA10: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_RAVEN: /* DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 4395a4f12149..74cb647da30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,9 +157,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index e4a8c2e52cb2..660b3fbade41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep, int err = 0; dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; dev->kfd2kgd->get_tile_config(dev->kgd, &config); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 5979158c3f7b..944abfad39c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd, struct kfd_event *ev) { if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { - pr_warn("Signal event wasn't created because limit was reached\n"); + if (!p->signal_event_limit_reached) { + pr_warn("Signal event wasn't created because limit was reached\n"); + p->signal_event_limit_reached = true; + } return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 681b639f5133..ed71ad40e8f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq) { if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) kq->mqd->destroy_mqd(kq->mqd, - NULL, - false, + kq->queue->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_RESET, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, kq->queue->pipe, kq->queue->queue); @@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq, uint32_t wptr, rptr; unsigned int *queue_address; + /* When rptr == wptr, the buffer is empty. + * When rptr == wptr + 1, the buffer is full. + * It is always rptr that advances to the position of wptr, rather than + * the opposite. So we can only use up to queue_size_dwords - 1 dwords. + */ rptr = *kq->rptr_kernel; wptr = *kq->wptr_kernel; queue_address = (unsigned int *)kq->pq_kernel_addr; @@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq, pr_debug("wptr: %d\n", wptr); pr_debug("queue_address 0x%p\n", queue_address); - available_size = (rptr - 1 - wptr + queue_size_dwords) % + available_size = (rptr + queue_size_dwords - 1 - wptr) % queue_size_dwords; - if (packet_size_in_dwords >= queue_size_dwords || - packet_size_in_dwords >= available_size) { + if (packet_size_in_dwords > available_size) { /* * make sure calling functions know * acquire_packet_buffer() failed @@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq, } if (wptr + packet_size_in_dwords >= queue_size_dwords) { + /* make sure after rolling back to position 0, there is + * still enough space. + */ + if (packet_size_in_dwords >= rptr) { + *buffer_ptr = NULL; + return -ENOMEM; + } + /* fill nops, roll back and start at position 0 */ while (wptr > 0) { queue_address[wptr] = kq->nop_packet; wptr = (wptr + 1) % queue_size_dwords; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b397ec726400..b87e96cee5fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -521,6 +521,7 @@ struct kfd_process { struct list_head signal_event_pages; u32 next_nonsignal_event_id; size_t signal_event_count; + bool signal_event_limit_reached; }; /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1cae95e2b13a..03bec765b03d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, int num_queues = 0; struct queue *cur; - memset(&q_properties, 0, sizeof(struct queue_properties)); memcpy(&q_properties, properties, sizeof(struct queue_properties)); q = NULL; kq = NULL; diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index ca93b5160ba6..3e606a761d0e 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -419,8 +419,8 @@ struct vi_mqd_allocation { struct vi_mqd mqd; uint32_t wptr_poll_mem; uint32_t rptr_report_mem; - uint32_t dyamic_cu_mask; - uint32_t dyamic_rb_mask; + uint32_t dynamic_cu_mask; + uint32_t dynamic_rb_mask; }; struct cz_mqd { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index edc5fb6412d9..2c3e6baf2524 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -317,8 +317,8 @@ static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input, } static const struct phm_master_table_item rv_set_power_state_list[] = { - { NULL, rv_tf_set_clock_limit }, - { NULL, rv_tf_set_num_active_display }, + { .tableFunction = rv_tf_set_clock_limit }, + { .tableFunction = rv_tf_set_num_active_display }, { } }; @@ -391,7 +391,7 @@ static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item rv_disable_dpm_list[] = { - {NULL, rv_tf_disable_gfx_off}, + { .tableFunction = rv_tf_disable_gfx_off }, { }, }; @@ -416,7 +416,7 @@ static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item rv_enable_dpm_list[] = { - {NULL, rv_tf_enable_gfx_off}, + { .tableFunction = rv_tf_enable_gfx_off }, { }, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 9d71a259d97d..f8f02e70b8bc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1558,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) */ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, - uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, + uint32_t *acg_freq) { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); @@ -1609,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, cpu_to_le16(dividers.usPll_ss_slew_frac); current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ + return 0; } @@ -1689,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) for (i = 0; i < dpm_table->count; i++) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[i].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; } @@ -1698,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) while (i < NUM_GFXCLK_DPM_LEVELS) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[j].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; i++; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index f6d6c61f796a..2818c98ff5ca 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -315,10 +315,12 @@ typedef struct { uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; GbVdroopTable_t AcgBtcGbVdroopTable; QuadraticInt_t AcgAvfsGb; - uint32_t Reserved[4]; + + /* ACG Frequency Table, in Mhz */ + uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS]; /* Padding - ignore */ - uint32_t MmHubPadding[7]; /* SMU internal use */ + uint32_t MmHubPadding[3]; /* SMU internal use */ } PPTable_t; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 76347ff6d655..c49a6f22002f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->num_register_entries = 0; } - if (fw_type == UCODE_ID_RLC_G) + if ((fw_type == UCODE_ID_RLC_G) + || (fw_type == UCODE_ID_CP_MEC)) entry->flags = 1; else entry->flags = 0; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..97c94f9683fa 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { struct amd_sched_rq *rq = entity->rq; + int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; - /** * The client will not queue more IBs during this fini, consume existing - * queued IBs + * queued IBs or discard them on SIGKILL */ - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); - + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + amd_sched_entity_is_idle(entity)); amd_sched_rq_remove_entity(rq, entity); + if (r) { + struct amd_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) + sched->ops->free_job(job); + + } kfifo_free(&entity->job_queue); } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index db6aeec50b82..2e5e089dd912 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -319,7 +319,7 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", crtc->base.id, crtc->name); - states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY); + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); if (!states) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 80e62f669321..0ef9011a1856 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -111,7 +111,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, void *data; int ret; - data = kmalloc(msg.len, GFP_TEMPORARY); + data = kmalloc(msg.len, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index f794089d30ac..61a1c8ea74bc 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -169,7 +169,7 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { - return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, + return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, start, last) ?: (struct drm_mm_node *)&mm->head_node; } EXPORT_SYMBOL(__drm_mm_interval_first); @@ -180,6 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, struct drm_mm *mm = hole_node->mm; struct rb_node **link, *rb; struct drm_mm_node *parent; + bool leftmost = true; node->__subtree_last = LAST(node); @@ -196,9 +197,10 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, rb = &hole_node->rb; link = &hole_node->rb.rb_right; + leftmost = false; } else { rb = NULL; - link = &mm->interval_tree.rb_node; + link = &mm->interval_tree.rb_root.rb_node; } while (*link) { @@ -208,14 +210,15 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, parent->__subtree_last = node->__subtree_last; if (node->start < parent->start) link = &parent->rb.rb_left; - else + else { link = &parent->rb.rb_right; + leftmost = true; + } } rb_link_node(&node->rb, rb, link); - rb_insert_augmented(&node->rb, - &mm->interval_tree, - &drm_mm_interval_tree_augment); + rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, + &drm_mm_interval_tree_augment); } #define RB_INSERT(root, member, expr) do { \ @@ -577,7 +580,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) *new = *old; list_replace(&old->node_list, &new->node_list); - rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); + rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); if (drm_mm_hole_follows(old)) { list_replace(&old->hole_stack, &new->hole_stack); @@ -863,7 +866,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) mm->color_adjust = NULL; INIT_LIST_HEAD(&mm->hole_stack); - mm->interval_tree = RB_ROOT; + mm->interval_tree = RB_ROOT_CACHED; mm->holes_size = RB_ROOT; mm->holes_addr = RB_ROOT; diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c index 7d1b0f011d33..935653eb3616 100644 --- a/drivers/gpu/drm/drm_scdc_helper.c +++ b/drivers/gpu/drm/drm_scdc_helper.c @@ -102,7 +102,7 @@ ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, void *data; int err; - data = kmalloc(1 + size, GFP_TEMPORARY); + data = kmalloc(1 + size, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index d9100b565198..28f1226576f8 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -147,7 +147,7 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m struct rb_node *iter; unsigned long offset; - iter = mgr->vm_addr_space_mm.interval_tree.rb_node; + iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; best = NULL; while (likely(iter)) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5a634594a6ce..57881167ccd2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { void etnaviv_gem_free_object(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_drm_private *priv = obj->dev->dev_private; struct etnaviv_vram_mapping *mapping, *tmp; /* object should not be active */ WARN_ON(is_active(etnaviv_obj)); + mutex_lock(&priv->gem_lock); list_del(&etnaviv_obj->gem_node); + mutex_unlock(&priv->gem_lock); list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, obj_node) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index a7ff2e4c00d2..46dfe0737f43 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -37,7 +37,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, struct etnaviv_gem_submit *submit; size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (submit) { submit->dev = dev; submit->gpu = gpu; @@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, cmdbuf->user_size = ALIGN(args->stream_size, 8); ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); - if (ret == 0) - cmdbuf = NULL; + if (ret) + goto out; + + cmdbuf = NULL; if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 730b8d9db187..6be5b53c3b27 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b1f7299600f0..e651a58c18cf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = { static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - int old_dpms = connector->dpms; - - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); - - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; + drm_kms_helper_poll_disable(drm_dev); + exynos_drm_fbdev_suspend(drm_dev); + private->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(private->suspend_state)) { + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(private->suspend_state); } - drm_connector_list_iter_end(&conn_iter); return 0; } @@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev) static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->funcs->dpms) { - int dpms = connector->dpms; - - connector->dpms = DRM_MODE_DPMS_OFF; - connector->funcs->dpms(connector, dpms); - } - } - drm_connector_list_iter_end(&conn_iter); + drm_atomic_helper_resume(drm_dev, private->suspend_state); + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cf131c2aa23e..f8bae4cb4823 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -202,6 +202,7 @@ struct drm_exynos_file_private { */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; + struct drm_atomic_state *suspend_state; struct device *dma_dev; void *mapping; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index c3a068409b48..dfb66ecf417b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -18,6 +18,8 @@ #include #include +#include + #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_fbdev.h" @@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(fb_helper); } + +void exynos_drm_fbdev_suspend(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 1); + console_unlock(); +} + +void exynos_drm_fbdev_resume(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 0); + console_unlock(); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 330eef87f718..645d1bb7f665 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev); void exynos_drm_fbdev_fini(struct drm_device *dev); void exynos_drm_fbdev_restore_mode(struct drm_device *dev); void exynos_drm_output_poll_changed(struct drm_device *dev); +void exynos_drm_fbdev_suspend(struct drm_device *drm); +void exynos_drm_fbdev_resume(struct drm_device *drm); #else @@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev) #define exynos_drm_output_poll_changed (NULL) +static inline void exynos_drm_fbdev_suspend(struct drm_device *drm) +{ +} + +static inline void exynos_drm_fbdev_resume(struct drm_device *drm) +{ +} + #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 214fa5e51963..0109ff40b1db 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct drm_display_mode *m; + struct drm_connector_list_iter conn_iter; int mode_ok; drm_mode_set_crtcinfo(adjusted_mode, 0); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder == encoder) break; } + if (connector) + drm_connector_get(connector); + drm_connector_list_iter_end(&conn_iter); - if (connector->encoder != encoder) + if (!connector) return true; mode_ok = hdmi_mode_valid(connector, adjusted_mode); if (mode_ok == MODE_OK) - return true; + goto cleanup; /* * Find the most suitable mode and copy it to adjusted_mode. @@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, } } +cleanup: + drm_connector_put(connector); + return true; } diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c index 771ff66711af..37c997e24b9e 100644 --- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c +++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c @@ -26,7 +26,7 @@ #include "mdfld_output.h" #include "mdfld_dsi_pkg_sender.h" #include "tc35876x-dsi-lvds.h" -#include +#include #include #include #include diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 40af17ec6312..ff3154fe6588 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int bar_index = - (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; + struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; - if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) - return -EINVAL; - + /* + * Power-up software can determine how much address + * space the device requires by writing a value of + * all 1's to the register and then reading the value + * back. The device will return 0's in all don't-care + * address bits. + */ if (new == 0xffffffff) { - /* - * Power-up software can determine how much address - * space the device requires by writing a value of - * all 1's to the register and then reading the value - * back. The device will return 0's in all don't-care - * address bits. - */ - size = vgpu->cfg_space.bar[bar_index].size; - if (lo) { - new = rounddown(new, size); - } else { - u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; - /* for 32bit mode bar it returns all-0 in upper 32 - * bit, for 64bit mode bar it will calculate the - * size with lower 32bit and return the corresponding - * value + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); + /* + * Untrap the BAR, since guest hasn't configured a + * valid GPA */ - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - new &= (~(size-1)) >> 32; - else - new = 0; - } - /* - * Unmapp & untrap the BAR, since guest hasn't configured a - * valid GPA - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: ret = trap_gttmmio(vgpu, false); break; - case INTEL_GVT_PCI_BAR_APERTURE: + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); ret = map_aperture(vgpu, false); break; + default: + /* Unimplemented BARs */ + intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } else { - /* - * Unmapp & untrap the old BAR first, since guest has - * re-configured the BAR - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, false); + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + /* + * Untrap the old BAR first, since guest has + * re-configured the BAR + */ + trap_gttmmio(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = trap_gttmmio(vgpu, mmio_enabled); break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, false); + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + map_aperture(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = map_aperture(vgpu, mmio_enabled); break; - } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - /* Track the new BAR */ - if (mmio_enabled) { - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, true); - break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, true); - break; - } + default: + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } return ret; @@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } switch (rounddown(offset, 4)) { - case PCI_BASE_ADDRESS_0: - case PCI_BASE_ADDRESS_1: - case PCI_BASE_ADDRESS_2: - case PCI_BASE_ADDRESS_3: + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_bar_write(vgpu, offset, p_data, bytes); @@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; - int i; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); @@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, */ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 0); + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 2); } /** diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 3502a59166ff..2294466dd415 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2659,7 +2659,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, skl_power_well_ctl_write); - MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write); MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f0cb22cc0dd6..8ba932b22f7c 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1073,7 +1073,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, goto unpin_src; } - dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); if (IS_ERR(dst)) goto unpin_dst; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a36216bd2a84..e4d4b6b41e26 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4308,10 +4308,10 @@ i915_drop_caches_set(void *data, u64 val) fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) - i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); + i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); if (val & DROP_UNBOUND) - i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); + i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); if (val & DROP_SHRINK_ALL) i915_gem_shrink_all(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 43100229613c..9f45cfeae775 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1891,9 +1891,15 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) /* * Everything depends on having the GTT running, so we need to start - * there. Fortunately we don't need to do this unless we reset the - * chip at a PCI level. - * + * there. + */ + ret = i915_ggtt_enable_hw(i915); + if (ret) { + DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret); + goto error; + } + + /* * Next we need to restore the context, but we don't use those * yet either... * diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 60267e375e88..18d9da53282b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3479,6 +3479,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { I915_MAP_WB = 0, I915_MAP_WC, +#define I915_MAP_OVERRIDE BIT(31) + I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, + I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, }; /** @@ -3742,6 +3745,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv, /* i915_gem_shrinker.c */ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long target, + unsigned long *nr_scanned, unsigned flags); #define I915_SHRINK_PURGEABLE 0x1 #define I915_SHRINK_UNBOUND 0x2 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b9e8e0d6e97b..19404c96eeb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -695,12 +695,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) switch (obj->base.write_domain) { case I915_GEM_DOMAIN_GTT: if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { - if (intel_runtime_pm_get_if_in_use(dev_priv)) { - spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); - spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put(dev_priv); - } + intel_runtime_pm_get(dev_priv); + spin_lock_irq(&dev_priv->uncore.lock); + POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + spin_unlock_irq(&dev_priv->uncore.lock); + intel_runtime_pm_put(dev_priv); } intel_fb_obj_flush(obj, @@ -2213,7 +2212,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; - void **slot; + void __rcu **slot; radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) radix_tree_delete(&obj->mm.get_page.radix, iter.index); @@ -2354,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) goto err_sg; } - i915_gem_shrink(dev_priv, 2 * page_count, *s++); + i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); cond_resched(); /* We've tried hard to allocate the memory by reaping @@ -2541,7 +2540,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, if (n_pages > ARRAY_SIZE(stack_pages)) { /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY); + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; } @@ -2553,6 +2552,9 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, GEM_BUG_ON(i != n_pages); switch (type) { + default: + MISSING_CASE(type); + /* fallthrough to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; @@ -2583,7 +2585,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (ret) return ERR_PTR(ret); - pinned = true; + pinned = !(type & I915_MAP_OVERRIDE); + type &= ~I915_MAP_OVERRIDE; + if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { ret = ____i915_gem_object_get_pages(obj); @@ -3258,7 +3262,13 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) vma = radix_tree_delete(&ctx->handles_vma, lut->handle); - if (!i915_vma_is_ggtt(vma)) + GEM_BUG_ON(vma->obj != obj); + + /* We allow the process to have multiple handles to the same + * vma, in the same fd namespace, by virtue of flink/open. + */ + GEM_BUG_ON(!vma->open_count); + if (!--vma->open_count && !i915_vma_is_ggtt(vma)) i915_vma_close(vma); list_del(&lut->obj_link); @@ -5015,7 +5025,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv) * the objects as well, see i915_gem_freeze() */ - i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); + i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); i915_gem_drain_freed_objects(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4c2016237d61..92437f455b43 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -285,7 +285,7 @@ static int eb_create(struct i915_execbuffer *eb) * direct lookup. */ do { - unsigned int flags; + gfp_t flags; /* While we can still reduce the allocation size, don't * raise a warning and allow the allocation to fail. @@ -293,7 +293,7 @@ static int eb_create(struct i915_execbuffer *eb) * as possible to perform the allocation and warn * if it fails. */ - flags = GFP_TEMPORARY; + flags = GFP_KERNEL; if (size > 1) flags |= __GFP_NORETRY | __GFP_NOWARN; @@ -720,6 +720,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err_obj; } + vma->open_count++; list_add(&lut->obj_link, &obj->lut_list); list_add(&lut->ctx_link, &eb->ctx->handles_list); lut->ctx = eb->ctx; @@ -1070,7 +1071,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, return PTR_ERR(obj); cmd = i915_gem_object_pin_map(obj, - cache->has_llc ? I915_MAP_WB : I915_MAP_WC); + cache->has_llc ? + I915_MAP_FORCE_WB : + I915_MAP_FORCE_WC); i915_gem_object_unpin_pages(obj); if (IS_ERR(cmd)) return PTR_ERR(cmd); @@ -1512,7 +1515,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); size = nreloc * sizeof(*relocs); - relocs = kvmalloc_array(size, 1, GFP_TEMPORARY); + relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { kvfree(relocs); err = -ENOMEM; @@ -1526,7 +1529,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) min_t(u64, BIT_ULL(31), size - copied); if (__copy_from_user((char *)relocs + copied, - (char *)urelocs + copied, + (char __user *)urelocs + copied, len)) { kvfree(relocs); err = -EFAULT; @@ -2074,7 +2077,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args, return ERR_PTR(-EFAULT); fences = kvmalloc_array(args->num_cliprects, sizeof(*fences), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (!fences) return ERR_PTR(-ENOMEM); @@ -2460,9 +2463,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Copy in the exec list from userland */ exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); @@ -2540,7 +2543,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, /* Allocate an extra slot for use by the command parser */ exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d60f38adc4c4..e2410eb5d96e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2062,7 +2062,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, */ GEM_BUG_ON(obj->mm.pages == pages); } while (i915_gem_shrink(to_i915(obj->base.dev), - obj->base.size >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE)); @@ -2754,10 +2754,10 @@ static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability @@ -3231,7 +3231,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, /* Allocate a temporary list of source pages for random access. */ page_addr_list = kvmalloc_array(n_pages, sizeof(dma_addr_t), - GFP_TEMPORARY); + GFP_KERNEL); if (!page_addr_list) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 77fb39808131..74002b2d1b6f 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -136,6 +136,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) * i915_gem_shrink - Shrink buffer object caches * @dev_priv: i915 device * @target: amount of memory to make available, in pages + * @nr_scanned: optional output for number of pages scanned (incremental) * @flags: control flags for selecting cache types * * This function is the main interface to the shrinker. It will try to release @@ -158,7 +159,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) */ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, - unsigned long target, unsigned flags) + unsigned long target, + unsigned long *nr_scanned, + unsigned flags) { const struct { struct list_head *list; @@ -169,6 +172,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, { NULL, 0 }, }, *phase; unsigned long count = 0; + unsigned long scanned = 0; bool unlock; if (!shrinker_lock(dev_priv, &unlock)) @@ -249,6 +253,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, count += obj->base.size >> PAGE_SHIFT; } mutex_unlock(&obj->mm.lock); + scanned += obj->base.size >> PAGE_SHIFT; } } list_splice_tail(&still_in_list, phase->list); @@ -261,6 +266,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, shrinker_unlock(dev_priv, unlock); + if (nr_scanned) + *nr_scanned += scanned; return count; } @@ -283,7 +290,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) unsigned long freed; intel_runtime_pm_get(dev_priv); - freed = i915_gem_shrink(dev_priv, -1UL, + freed = i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); @@ -329,23 +336,28 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) unsigned long freed; bool unlock; + sc->nr_scanned = 0; + if (!shrinker_lock(dev_priv, &unlock)) return SHRINK_STOP; freed = i915_gem_shrink(dev_priv, sc->nr_to_scan, + &sc->nr_scanned, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE); if (freed < sc->nr_to_scan) freed += i915_gem_shrink(dev_priv, - sc->nr_to_scan - freed, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); if (freed < sc->nr_to_scan && current_is_kswapd()) { intel_runtime_pm_get(dev_priv); freed += i915_gem_shrink(dev_priv, - sc->nr_to_scan - freed, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); @@ -354,7 +366,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) shrinker_unlock(dev_priv, unlock); - return freed; + return sc->nr_scanned ? freed : SHRINK_STOP; } static bool @@ -453,7 +465,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr goto out; intel_runtime_pm_get(dev_priv); - freed_pages += i915_gem_shrink(dev_priv, -1UL, + freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE | diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index f152a38d7079..709efe2357ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -49,7 +49,7 @@ struct i915_mmu_notifier { spinlock_t lock; struct hlist_node node; struct mmu_notifier mn; - struct rb_root objects; + struct rb_root_cached objects; struct workqueue_struct *wq; }; @@ -123,7 +123,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct interval_tree_node *it; LIST_HEAD(cancelled); - if (RB_EMPTY_ROOT(&mn->objects)) + if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return; /* interval ranges are inclusive, but invalidate range is exclusive */ @@ -172,7 +172,7 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; - mn->objects = RB_ROOT; + mn->objects = RB_ROOT_CACHED; mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); if (mn->wq == NULL) { kfree(mn); @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; @@ -643,7 +643,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (mm == current->mm) { pvec = kvmalloc_array(num_pages, sizeof(struct page *), - GFP_TEMPORARY | + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (pvec) /* defer to worker if malloc fails */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index ed5a1eb839ad..0c779671fe2d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -787,16 +787,16 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, */ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; ebuf->buf = kmalloc(ebuf->size, - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (ebuf->buf == NULL) { ebuf->size = PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) { ebuf->size = 128; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e21ce9c18b6e..b63893eeca73 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, pipe); int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; - bool in_vbl = true; unsigned long irqflags; if (WARN_ON(!mode->crtc_clock)) { @@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - in_vbl = position >= vbl_start && position < vbl_end; - /* * While in vblank, position will be negative * counting up towards 0 at vbl_end. And outside diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b24a83d43559..6fd5c57e21f6 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -38,7 +38,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun, ); TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder), + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), TP_ARGS(dev_priv, pch_transcoder), TP_STRUCT__entry( @@ -48,7 +48,7 @@ TRACE_EVENT(intel_pch_fifo_underrun, ), TP_fast_assign( - enum pipe pipe = (enum pipe)pch_transcoder; + enum pipe pipe = pch_transcoder; __entry->pipe = pipe; __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 1fd61e88cfd0..e811067c7724 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -59,6 +59,12 @@ struct i915_vma { u32 fence_size; u32 fence_alignment; + /** + * Count of the number of times this vma has been opened by different + * handles (but same file) for execbuf, i.e. the number of aliases + * that exist in the ctx->handle_vmas LUT for this vma. + */ + unsigned int open_count; unsigned int flags; /** * How many users have pinned this object in GTT space. The following diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index d805b6e6fe71..27743be5b768 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder, connector->encoder->base.id, connector->encoder->name); - /* ELD Conn_Type */ - connector->eld[5] &= ~(3 << 2); - if (intel_crtc_has_dp_encoder(crtc_state)) - connector->eld[5] |= (1 << 2); - connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; if (dev_priv->display.audio_codec_enable) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 183e87e8ea31..00c6aee0a9a1 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); + if (port == PORT_A && is_dvi) { + DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", + is_hdmi ? "/HDMI" : ""); + is_dvi = false; + is_hdmi = false; + } + info->supports_dvi = is_dvi; info->supports_hdmi = is_hdmi; info->supports_dp = is_dp; diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 8e4e829682b9..ff9ecd211abb 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -107,7 +107,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input) } } -void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) +static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) { int pipe = intel_crtc->pipe; struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 965988f79a55..92c1f8e166dc 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) mask = DC_STATE_DEBUG_MASK_MEMORY_UP; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) mask |= DC_STATE_DEBUG_MASK_CORES; /* The below bit doesn't need to be cleared ever afterwards */ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4b4fd1f8110b..476681d5940c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, out: if (ret && IS_GEN9_LP(dev_priv)) { tmp = I915_READ(BXT_PHY_CTL(port)); - if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | + if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) DRM_ERROR("Port %c enabled but PHY powered down? " "(PHY_CTL %08x)\n", port_name(port), tmp); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0e93ec201fe3..64f7b51ed97c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2288,17 +2288,13 @@ void intel_add_fb_offsets(int *x, int *y, } } -/* - * Input tile dimensions and pitch must already be - * rotated to match x and y, and in pixel units. - */ -static u32 _intel_adjust_tile_offset(int *x, int *y, - unsigned int tile_width, - unsigned int tile_height, - unsigned int tile_size, - unsigned int pitch_tiles, - u32 old_offset, - u32 new_offset) +static u32 __intel_adjust_tile_offset(int *x, int *y, + unsigned int tile_width, + unsigned int tile_height, + unsigned int tile_size, + unsigned int pitch_tiles, + u32 old_offset, + u32 new_offset) { unsigned int pitch_pixels = pitch_tiles * tile_width; unsigned int tiles; @@ -2319,18 +2315,13 @@ static u32 _intel_adjust_tile_offset(int *x, int *y, return new_offset; } -/* - * Adjust the tile offset by moving the difference into - * the x/y offsets. - */ -static u32 intel_adjust_tile_offset(int *x, int *y, - const struct intel_plane_state *state, int plane, - u32 old_offset, u32 new_offset) +static u32 _intel_adjust_tile_offset(int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int rotation, + u32 old_offset, u32 new_offset) { - const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); - const struct drm_framebuffer *fb = state->base.fb; + const struct drm_i915_private *dev_priv = to_i915(fb->dev); unsigned int cpp = fb->format->cpp[plane]; - unsigned int rotation = state->base.rotation; unsigned int pitch = intel_fb_pitch(fb, plane, rotation); WARN_ON(new_offset > old_offset); @@ -2349,9 +2340,9 @@ static u32 intel_adjust_tile_offset(int *x, int *y, pitch_tiles = pitch / (tile_width * cpp); } - _intel_adjust_tile_offset(x, y, tile_width, tile_height, - tile_size, pitch_tiles, - old_offset, new_offset); + __intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + old_offset, new_offset); } else { old_offset += *y * pitch + *x * cpp; @@ -2362,6 +2353,19 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } +/* + * Adjust the tile offset by moving the difference into + * the x/y offsets. + */ +static u32 intel_adjust_tile_offset(int *x, int *y, + const struct intel_plane_state *state, int plane, + u32 old_offset, u32 new_offset) +{ + return _intel_adjust_tile_offset(x, y, state->base.fb, plane, + state->base.rotation, + old_offset, new_offset); +} + /* * Computes the linear offset to the base tile and adjusts * x, y. bytes per pixel is assumed to be a power-of-two. @@ -2413,9 +2417,9 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, offset = (tile_rows * pitch_tiles + tiles) * tile_size; offset_aligned = offset & ~alignment; - _intel_adjust_tile_offset(x, y, tile_width, tile_height, - tile_size, pitch_tiles, - offset, offset_aligned); + __intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; offset_aligned = offset & ~alignment; @@ -2447,16 +2451,24 @@ u32 intel_compute_tile_offset(int *x, int *y, rotation, alignment); } -/* Convert the fb->offset[] linear offset into x/y offsets */ -static void intel_fb_offset_to_xy(int *x, int *y, - const struct drm_framebuffer *fb, int plane) +/* Convert the fb->offset[] into x/y offsets */ +static int intel_fb_offset_to_xy(int *x, int *y, + const struct drm_framebuffer *fb, int plane) { - unsigned int cpp = fb->format->cpp[plane]; - unsigned int pitch = fb->pitches[plane]; - u32 linear_offset = fb->offsets[plane]; + struct drm_i915_private *dev_priv = to_i915(fb->dev); - *y = linear_offset / pitch; - *x = linear_offset % pitch / cpp; + if (fb->modifier != DRM_FORMAT_MOD_LINEAR && + fb->offsets[plane] % intel_tile_size(dev_priv)) + return -EINVAL; + + *x = 0; + *y = 0; + + _intel_adjust_tile_offset(x, y, + fb, plane, DRM_MODE_ROTATE_0, + fb->offsets[plane], 0); + + return 0; } static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) @@ -2523,12 +2535,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, unsigned int cpp, size; u32 offset; int x, y; + int ret; cpp = fb->format->cpp[i]; width = drm_framebuffer_plane_width(fb->width, fb, i); height = drm_framebuffer_plane_height(fb->height, fb, i); - intel_fb_offset_to_xy(&x, &y, fb, i); + ret = intel_fb_offset_to_xy(&x, &y, fb, i); + if (ret) { + DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); + return ret; + } if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) { @@ -2539,11 +2557,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, int ccs_x, ccs_y; intel_tile_dims(fb, i, &tile_width, &tile_height); + tile_width *= hsub; + tile_height *= vsub; - ccs_x = (x * hsub) % (tile_width * hsub); - ccs_y = (y * vsub) % (tile_height * vsub); - main_x = intel_fb->normal[0].x % (tile_width * hsub); - main_y = intel_fb->normal[0].y % (tile_height * vsub); + ccs_x = (x * hsub) % tile_width; + ccs_y = (y * vsub) % tile_height; + main_x = intel_fb->normal[0].x % tile_width; + main_y = intel_fb->normal[0].y % tile_height; /* * CCS doesn't have its own x/y offset register, so the intra CCS tile @@ -2569,7 +2589,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * fb layout agrees with the fence layout. We already check that the * fb stride matches the fence stride elsewhere. */ - if (i915_gem_object_is_tiled(intel_fb->obj) && + if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) && (x + width) * cpp > fb->pitches[i]) { DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", i, fb->offsets[i]); @@ -2632,10 +2652,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * We only keep the x/y offsets, so push all of the * gtt offset into the x/y offsets. */ - _intel_adjust_tile_offset(&x, &y, - tile_width, tile_height, - tile_size, pitch_tiles, - gtt_offset_rotated * tile_size, 0); + __intel_adjust_tile_offset(&x, &y, + tile_width, tile_height, + tile_size, pitch_tiles, + gtt_offset_rotated * tile_size, 0); gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; @@ -12339,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; - bool hw_check = intel_state->modeset; u64 put_domains[I915_MAX_PIPES] = {}; unsigned crtc_vblank_mask = 0; int i; @@ -12356,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (needs_modeset(new_crtc_state) || to_intel_crtc_state(new_crtc_state)->update_pipe) { - hw_check = true; put_domains[to_intel_crtc(crtc)->pipe] = modeset_get_crtc_power_domains(crtc, @@ -14010,7 +14028,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { DRM_DEBUG_KMS("bad plane %d handle\n", i); - return -EINVAL; + goto err; } stride_alignment = intel_fb_stride_alignment(fb, i); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4fd4853b2250..64134947c0aa 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, * seems sufficient to avoid this problem. */ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { - vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10); + vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", vbt.t11_t12); } diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 09b670929786..de38d014ed39 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { }, }; -static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) -{ - return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | - BIT(phy_info->channel[DPIO_CH0].port); -} - static const struct bxt_ddi_phy_info * bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) { @@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - enum port port; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, return false; } - for_each_port_masked(port, bxt_phy_port_mask(phy_info)) { - u32 tmp = I915_READ(BXT_PHY_CTL(port)); - - if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { - DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " - "for port %c powered down " - "(PHY_CTL %08x)\n", - phy, port_name(port), tmp); - - return false; - } - } - return true; } diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index f0c11aec5ea5..7442891762be 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -902,15 +900,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_panel_disable_backlight(old_conn_state); - /* - * Disable Device ready before the port shutdown in order - * to avoid split screen - */ - if (IS_BROXTON(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) - I915_WRITE(MIPI_DEVICE_READY(port), 0); - } - /* * According to the spec we should send SHUTDOWN before * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 3fca9fa39a8e..8c8ead2276e0 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -406,9 +406,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; if (drm_crtc_vblank_get(&crtc->base)) { - DRM_ERROR("vblank not available for FBC on pipe %c\n", - pipe_name(crtc->pipe)); - + /* CRTC is now off, leave FBC deactivated */ mutex_lock(&fbc->lock); work->scheduled = false; mutex_unlock(&fbc->lock); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 5a7cca32c0fa..04689600e337 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -187,11 +187,11 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, + enum pipe pch_transcoder, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t bit = (pch_transcoder == TRANSCODER_A) ? + uint32_t bit = (pch_transcoder == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) @@ -203,7 +203,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder pch_transcoder = (enum transcoder) crtc->pipe; + enum pipe pch_transcoder = crtc->pipe; uint32_t serr_int = I915_READ(SERR_INT); lockdep_assert_held(&dev_priv->irq_lock); @@ -215,12 +215,12 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) POSTING_READ(SERR_INT); trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); - DRM_ERROR("pch fifo underrun on pch transcoder %s\n", - transcoder_name(pch_transcoder)); + DRM_ERROR("pch fifo underrun on pch transcoder %c\n", + pipe_name(pch_transcoder)); } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, + enum pipe pch_transcoder, bool enable, bool old) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -238,8 +238,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, if (old && I915_READ(SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n", - transcoder_name(pch_transcoder)); + DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", + pipe_name(pch_transcoder)); } } } @@ -395,8 +395,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) { trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); - DRM_ERROR("PCH transcoder %s FIFO underrun\n", - transcoder_name(pch_transcoder)); + DRM_ERROR("PCH transcoder %c FIFO underrun\n", + pipe_name(pch_transcoder)); } } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 6698826954e1..eb5827110d8f 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -649,7 +649,7 @@ static void gmbus_unlock_bus(struct i2c_adapter *adapter, mutex_unlock(&dev_priv->gmbus_mutex); } -const struct i2c_lock_operations gmbus_lock_ops = { +static const struct i2c_lock_operations gmbus_lock_ops = { .lock_bus = gmbus_lock_bus, .trylock_bus = gmbus_trylock_bus, .unlock_bus = gmbus_unlock_bus, diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 951e834dd274..28a778b785ac 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -30,6 +30,21 @@ #include "intel_drv.h" #include "i915_drv.h" +static void intel_connector_update_eld_conn_type(struct drm_connector *connector) +{ + u8 conn_type; + + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + conn_type = DRM_ELD_CONN_TYPE_DP; + } else { + conn_type = DRM_ELD_CONN_TYPE_HDMI; + } + + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK; + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type; +} + /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use @@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector, ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + intel_connector_update_eld_conn_type(connector); + return ret; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a17b1de7d7e0..3b1c5d783ee7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, @@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b66d8e136aa3..b3a087cb0860 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume /* 6. Enable DBUF */ gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); } #undef CNL_PROCMON_IDX diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 3dc38c2ef4c3..29a3b0f5bec7 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2980,7 +2980,7 @@ static void proxy_unlock_bus(struct i2c_adapter *adapter, sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags); } -const struct i2c_lock_operations proxy_lock_ops = { +static const struct i2c_lock_operations proxy_lock_ops = { .lock_bus = proxy_lock_bus, .trylock_bus = proxy_trylock_bus, .unlock_bus = proxy_unlock_bus, diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index d044bf9a6feb..222c511bea49 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -62,7 +62,7 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index 7276194c04f7..828904b7d468 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -117,12 +117,12 @@ static int igt_random_insert_remove(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -187,12 +187,12 @@ static int igt_insert_complete(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -368,7 +368,7 @@ static int igt_wakeup(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 2d0fef2cfca6..3cac22eb47ce 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -127,7 +127,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri return 0; valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), - GFP_TEMPORARY); + GFP_KERNEL); if (!valid) return -ENOMEM; diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c index 7b12a68c3b54..a78c4b483e8d 100644 --- a/drivers/gpu/drm/lib/drm_random.c +++ b/drivers/gpu/drm/lib/drm_random.c @@ -28,7 +28,7 @@ unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 8a75c0bd8a78..5d0a75d4b249 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -40,7 +40,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return NULL; - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!submit) return NULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 4a57defc99b3..1399d923d446 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -171,7 +171,7 @@ nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static struct dmi_system_id gpio_reset_ids[] = { +static const struct dmi_system_id gpio_reset_ids[] = { { .ident = "Apple Macbook 10,1", .matches = { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 14c5613b4388..afbf50d0c08f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .y2 = qfb->base.height }; - if (!old_state->fb) { - qxl_io_log(qdev, - "create primary fb: %dx%d,%d,%d\n", - bo->surf.width, bo->surf.height, - bo->surf.stride, bo->surf.format); - - qxl_io_create_primary(qdev, 0, bo); - bo->is_primary = true; - return; - - } else { + if (old_state->fb) { qfb_old = to_qxl_framebuffer(old_state->fb); bo_old = gem_to_qxl_bo(qfb_old->obj); + } else { + bo_old = NULL; + } + + if (bo == bo_old) + return; + + if (bo_old && bo_old->is_primary) { + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; } - bo->is_primary = true; + if (!bo->is_primary) { + qxl_io_create_primary(qdev, 0, bo); + bo->is_primary = true; + } qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, { struct qxl_device *qdev = plane->dev->dev_private; - if (old_state->fb) - { struct qxl_framebuffer *qfb = + if (old_state->fb) { + struct qxl_framebuffer *qfb = to_qxl_framebuffer(old_state->fb); struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); - qxl_io_destroy_primary(qdev); - bo->is_primary = false; + if (bo->is_primary) { + qxl_io_destroy_primary(qdev); + bo->is_primary = false; + } } } @@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *user_bo; - if (!plane->state->fb) { - /* we never executed prepare_fb, so there's nothing to + if (!old_state->fb) { + /* + * we never executed prepare_fb, so there's nothing to * unpin. */ return; } - obj = to_qxl_framebuffer(plane->state->fb)->obj; + obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ec63bc5e9de7..8cbaeec090c9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -924,7 +924,7 @@ struct radeon_vm_id { struct radeon_vm { struct mutex mutex; - struct rb_root va; + struct rb_root_cached va; /* protecting invalidated and freed */ spinlock_t status_lock; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 997131d58c7f..ffc10cadcf34 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); pci_save_state(dev->pdev); - if (freeze && rdev->family >= CHIP_CEDAR) { + if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { rdev->asic->asic_reset(rdev, true); pci_restore_state(dev->pdev); } else if (suspend) { diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 896f2cf51e4e..1d62288b7ee3 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -50,7 +50,7 @@ struct radeon_mn { /* objects protected by lock */ struct mutex lock; - struct rb_root objects; + struct rb_root_cached objects; }; struct radeon_mn_node { @@ -75,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work) mutex_lock(&rdev->mn_lock); mutex_lock(&rmn->lock); hash_del(&rmn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, - it.rb) { + rbtree_postorder_for_each_entry_safe(node, next_node, + &rmn->objects.rb_root, it.rb) { interval_tree_remove(&node->it, &rmn->objects); list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { @@ -205,7 +205,7 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) rmn->mm = mm; rmn->mn.ops = &radeon_mn_ops; mutex_init(&rmn->lock); - rmn->objects = RB_ROOT; + rmn->objects = RB_ROOT_CACHED; r = __mmu_notifier_register(&rmn->mn, mm); if (r) diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 5e82b408d522..e5c0e635e371 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -1185,7 +1185,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) vm->ids[i].last_id_use = NULL; } mutex_init(&vm->mutex); - vm->va = RB_ROOT; + vm->va = RB_ROOT_CACHED; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->freed); @@ -1232,10 +1232,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) struct radeon_bo_va *bo_va, *tmp; int i, r; - if (!RB_EMPTY_ROOT(&vm->va)) { + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(rdev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { + rbtree_postorder_for_each_entry_safe(bo_va, tmp, + &vm->va.rb_root, it.rb) { interval_tree_remove(&bo_va->it, &vm->va); r = radeon_bo_reserve(bo_va->bo, false); if (!r) { diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index dfdd858eda0a..86eb4c185a28 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1627,7 +1627,7 @@ static int igt_topdown(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1741,7 +1741,7 @@ static int igt_bottomup(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 06f05302ee75..882d85db9053 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC bool "Allwinner A10 HDMI CEC Support" depends on DRM_SUN4I_HDMI select CEC_CORE - depends on CEC_PIN + select CEC_PIN help Choose this option if you have an Allwinner SoC with an HDMI controller and want to use CEC. diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 1457750988da..a1f8cba251a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -15,7 +15,7 @@ #include #include -#include +#include #define SUN4I_HDMI_CTRL_REG 0x004 #define SUN4I_HDMI_CTRL_ENABLE BIT(31) diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 9ea6cd5a1370..3cf1a6932fac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, hdmi->mod_clk = devm_clk_get(dev, "mod"); if (IS_ERR(hdmi->mod_clk)) { dev_err(dev, "Couldn't get the HDMI mod clock\n"); - return PTR_ERR(hdmi->mod_clk); + ret = PTR_ERR(hdmi->mod_clk); + goto err_disable_bus_clk; } clk_prepare_enable(hdmi->mod_clk); hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); if (IS_ERR(hdmi->pll0_clk)) { dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); - return PTR_ERR(hdmi->pll0_clk); + ret = PTR_ERR(hdmi->pll0_clk); + goto err_disable_mod_clk; } hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); if (IS_ERR(hdmi->pll1_clk)) { dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); - return PTR_ERR(hdmi->pll1_clk); + ret = PTR_ERR(hdmi->pll1_clk); + goto err_disable_mod_clk; } ret = sun4i_tmds_create(hdmi); if (ret) { dev_err(dev, "Couldn't create the TMDS clock\n"); - return ret; + goto err_disable_mod_clk; } writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); @@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, ret = sun4i_hdmi_i2c_create(dev, hdmi); if (ret) { dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); - return ret; + goto err_disable_mod_clk; } drm_encoder_helper_add(&hdmi->encoder, @@ -422,6 +425,10 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, drm_encoder_cleanup(&hdmi->encoder); err_del_i2c_adapter: i2c_del_adapter(hdmi->i2c); +err_disable_mod_clk: + clk_disable_unprepare(hdmi->mod_clk); +err_disable_bus_clk: + clk_disable_unprepare(hdmi->bus_clk); return ret; } @@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, drm_connector_cleanup(&hdmi->connector); drm_encoder_cleanup(&hdmi->encoder); i2c_del_adapter(hdmi->i2c); + clk_disable_unprepare(hdmi->mod_clk); + clk_disable_unprepare(hdmi->bus_clk); } static const struct component_ops sun4i_hdmi_ops = { diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h index e9b7cdad5c4c..5a1ab4046e92 100644 --- a/drivers/gpu/drm/tegra/trace.h +++ b/drivers/gpu/drm/tegra/trace.h @@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra #define TRACE_INCLUDE_FILE trace #include diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cba11f13d994..180ce6296416 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,8 +109,8 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj, struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); - return snprintf(buffer, PAGE_SIZE, "%lu\n", - (unsigned long) atomic_read(&glob->bo_count)); + return snprintf(buffer, PAGE_SIZE, "%d\n", + atomic_read(&glob->bo_count)); } static struct attribute *ttm_bo_global_attrs[] = { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d0459b392e5e..c934ad5b3903 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -469,6 +469,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ + atomic_inc(&bo->glob->bo_count); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index ed9c0ea5b026..b1eeb4839bfc 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -52,8 +52,30 @@ #define ADDRESS_U1_PAD_BTN 0x00800052 #define ADDRESS_U1_SP_BTN 0x0080009F +#define T4_INPUT_REPORT_LEN sizeof(struct t4_input_report) +#define T4_FEATURE_REPORT_LEN T4_INPUT_REPORT_LEN +#define T4_FEATURE_REPORT_ID 7 +#define T4_CMD_REGISTER_READ 0x08 +#define T4_CMD_REGISTER_WRITE 0x07 + +#define T4_ADDRESS_BASE 0xC2C0 +#define PRM_SYS_CONFIG_1 (T4_ADDRESS_BASE + 0x0002) +#define T4_PRM_FEED_CONFIG_1 (T4_ADDRESS_BASE + 0x0004) +#define T4_PRM_FEED_CONFIG_4 (T4_ADDRESS_BASE + 0x001A) +#define T4_PRM_ID_CONFIG_3 (T4_ADDRESS_BASE + 0x00B0) + + +#define T4_FEEDCFG4_ADVANCED_ABS_ENABLE 0x01 +#define T4_I2C_ABS 0x78 + +#define T4_COUNT_PER_ELECTRODE 256 #define MAX_TOUCHES 5 +enum dev_num { + U1, + T4, + UNKNOWN, +}; /** * struct u1_data * @@ -61,43 +83,173 @@ * @input2: pointer to the kernel input2 device * @hdev: pointer to the struct hid_device * - * @dev_ctrl: device control parameter * @dev_type: device type - * @sen_line_num_x: number of sensor line of X - * @sen_line_num_y: number of sensor line of Y - * @pitch_x: sensor pitch of X - * @pitch_y: sensor pitch of Y - * @resolution: resolution - * @btn_info: button information + * @max_fingers: total number of fingers + * @has_sp: boolean of sp existense + * @sp_btn_info: button information * @x_active_len_mm: active area length of X (mm) * @y_active_len_mm: active area length of Y (mm) * @x_max: maximum x coordinate value * @y_max: maximum y coordinate value + * @x_min: minimum x coordinate value + * @y_min: minimum y coordinate value * @btn_cnt: number of buttons * @sp_btn_cnt: number of stick buttons */ -struct u1_dev { +struct alps_dev { struct input_dev *input; struct input_dev *input2; struct hid_device *hdev; - u8 dev_ctrl; - u8 dev_type; - u8 sen_line_num_x; - u8 sen_line_num_y; - u8 pitch_x; - u8 pitch_y; - u8 resolution; - u8 btn_info; + enum dev_num dev_type; + u8 max_fingers; + u8 has_sp; u8 sp_btn_info; u32 x_active_len_mm; u32 y_active_len_mm; u32 x_max; u32 y_max; + u32 x_min; + u32 y_min; u32 btn_cnt; u32 sp_btn_cnt; }; +struct t4_contact_data { + u8 palm; + u8 x_lo; + u8 x_hi; + u8 y_lo; + u8 y_hi; +}; + +struct t4_input_report { + u8 reportID; + u8 numContacts; + struct t4_contact_data contact[5]; + u8 button; + u8 track[5]; + u8 zx[5], zy[5]; + u8 palmTime[5]; + u8 kilroy; + u16 timeStamp; +}; + +static u16 t4_calc_check_sum(u8 *buffer, + unsigned long offset, unsigned long length) +{ + u16 sum1 = 0xFF, sum2 = 0xFF; + unsigned long i = 0; + + if (offset + length >= 50) + return 0; + + while (length > 0) { + u32 tlen = length > 20 ? 20 : length; + + length -= tlen; + + do { + sum1 += buffer[offset + i]; + sum2 += sum1; + i++; + } while (--tlen > 0); + + sum1 = (sum1 & 0xFF) + (sum1 >> 8); + sum2 = (sum2 & 0xFF) + (sum2 >> 8); + } + + sum1 = (sum1 & 0xFF) + (sum1 >> 8); + sum2 = (sum2 & 0xFF) + (sum2 >> 8); + + return(sum2 << 8 | sum1); +} + +static int t4_read_write_register(struct hid_device *hdev, u32 address, + u8 *read_val, u8 write_val, bool read_flag) +{ + int ret; + u16 check_sum; + u8 *input; + u8 *readbuf; + + input = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL); + if (!input) + return -ENOMEM; + + input[0] = T4_FEATURE_REPORT_ID; + if (read_flag) { + input[1] = T4_CMD_REGISTER_READ; + input[8] = 0x00; + } else { + input[1] = T4_CMD_REGISTER_WRITE; + input[8] = write_val; + } + put_unaligned_le32(address, input + 2); + input[6] = 1; + input[7] = 0; + + /* Calculate the checksum */ + check_sum = t4_calc_check_sum(input, 1, 8); + input[9] = (u8)check_sum; + input[10] = (u8)(check_sum >> 8); + input[11] = 0; + + ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, input, + T4_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + + if (ret < 0) { + dev_err(&hdev->dev, "failed to read command (%d)\n", ret); + goto exit; + } + + readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL); + if (read_flag) { + if (!readbuf) { + ret = -ENOMEM; + goto exit; + } + + ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, readbuf, + T4_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + if (ret < 0) { + dev_err(&hdev->dev, "failed read register (%d)\n", ret); + goto exit_readbuf; + } + + if (*(u32 *)&readbuf[6] != address) { + dev_err(&hdev->dev, "read register address error (%x,%x)\n", + *(u32 *)&readbuf[6], address); + goto exit_readbuf; + } + + if (*(u16 *)&readbuf[10] != 1) { + dev_err(&hdev->dev, "read register size error (%x)\n", + *(u16 *)&readbuf[10]); + goto exit_readbuf; + } + + check_sum = t4_calc_check_sum(readbuf, 6, 7); + if (*(u16 *)&readbuf[13] != check_sum) { + dev_err(&hdev->dev, "read register checksum error (%x,%x)\n", + *(u16 *)&readbuf[13], check_sum); + goto exit_readbuf; + } + + *read_val = readbuf[12]; + } + + ret = 0; + +exit_readbuf: + kfree(readbuf); +exit: + kfree(input); + return ret; +} + static int u1_read_write_register(struct hid_device *hdev, u32 address, u8 *read_val, u8 write_val, bool read_flag) { @@ -165,21 +317,60 @@ static int u1_read_write_register(struct hid_device *hdev, u32 address, return ret; } -static int alps_raw_event(struct hid_device *hdev, - struct hid_report *report, u8 *data, int size) +static int t4_raw_event(struct alps_dev *hdata, u8 *data, int size) +{ + unsigned int x, y, z; + int i; + struct t4_input_report *p_report = (struct t4_input_report *)data; + + if (!data) + return 0; + for (i = 0; i < hdata->max_fingers; i++) { + x = p_report->contact[i].x_hi << 8 | p_report->contact[i].x_lo; + y = p_report->contact[i].y_hi << 8 | p_report->contact[i].y_lo; + y = hdata->y_max - y + hdata->y_min; + z = (p_report->contact[i].palm < 0x80 && + p_report->contact[i].palm > 0) * 62; + if (x == 0xffff) { + x = 0; + y = 0; + z = 0; + } + input_mt_slot(hdata->input, i); + + input_mt_report_slot_state(hdata->input, + MT_TOOL_FINGER, z != 0); + + if (!z) + continue; + + input_report_abs(hdata->input, ABS_MT_POSITION_X, x); + input_report_abs(hdata->input, ABS_MT_POSITION_Y, y); + input_report_abs(hdata->input, ABS_MT_PRESSURE, z); + } + input_mt_sync_frame(hdata->input); + + input_report_key(hdata->input, BTN_LEFT, p_report->button); + + input_sync(hdata->input); + return 1; +} + +static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) { unsigned int x, y, z; int i; short sp_x, sp_y; - struct u1_dev *hdata = hid_get_drvdata(hdev); + if (!data) + return 0; switch (data[0]) { case U1_MOUSE_REPORT_ID: break; case U1_FEATURE_REPORT_ID: break; case U1_ABSOLUTE_REPORT_ID: - for (i = 0; i < MAX_TOUCHES; i++) { + for (i = 0; i < hdata->max_fingers; i++) { u8 *contact = &data[i * 5]; x = get_unaligned_le16(contact + 3); @@ -241,25 +432,218 @@ static int alps_raw_event(struct hid_device *hdev, return 0; } -#ifdef CONFIG_PM -static int alps_post_reset(struct hid_device *hdev) +static int alps_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) { - return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, - NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false); + int ret = 0; + struct alps_dev *hdata = hid_get_drvdata(hdev); + + switch (hdev->product) { + case HID_PRODUCT_ID_T4_BTNLESS: + ret = t4_raw_event(hdata, data, size); + break; + default: + ret = u1_raw_event(hdata, data, size); + break; + } + return ret; } -static int alps_post_resume(struct hid_device *hdev) +static int __maybe_unused alps_post_reset(struct hid_device *hdev) { - return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, - NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false); + int ret = -1; + struct alps_dev *data = hid_get_drvdata(hdev); + + switch (data->dev_type) { + case T4: + ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1, + NULL, T4_I2C_ABS, false); + ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4, + NULL, T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false); + break; + case U1: + ret = u1_read_write_register(hdev, + ADDRESS_U1_DEV_CTRL_1, NULL, + U1_TP_ABS_MODE | U1_SP_ABS_MODE, false); + break; + default: + break; + } + return ret; +} + +static int __maybe_unused alps_post_resume(struct hid_device *hdev) +{ + return alps_post_reset(hdev); +} + +static int u1_init(struct hid_device *hdev, struct alps_dev *pri_data) +{ + int ret; + u8 tmp, dev_ctrl, sen_line_num_x, sen_line_num_y; + u8 pitch_x, pitch_y, resolution; + + /* Device initialization */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + &dev_ctrl, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret); + goto exit; + } + + dev_ctrl &= ~U1_DISABLE_DEV; + dev_ctrl |= U1_TP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X, + &sen_line_num_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y, + &sen_line_num_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X, + &pitch_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y, + &pitch_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS, + &resolution, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret); + goto exit; + } + pri_data->x_active_len_mm = + (pitch_x * (sen_line_num_x - 1)) / 10; + pri_data->y_active_len_mm = + (pitch_y * (sen_line_num_y - 1)) / 10; + + pri_data->x_max = + (resolution << 2) * (sen_line_num_x - 1); + pri_data->x_min = 1; + pri_data->y_max = + (resolution << 2) * (sen_line_num_y - 1); + pri_data->y_min = 1; + + ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN, + &tmp, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret); + goto exit; + } + if ((tmp & 0x0F) == (tmp & 0xF0) >> 4) { + pri_data->btn_cnt = (tmp & 0x0F); + } else { + /* Button pad */ + pri_data->btn_cnt = 1; + } + + pri_data->has_sp = 0; + /* Check StickPointer device */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP, + &tmp, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret); + goto exit; + } + if (tmp & U1_DEVTYPE_SP_SUPPORT) { + dev_ctrl |= U1_SP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed SP mode (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN, + &pri_data->sp_btn_info, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret); + goto exit; + } + pri_data->has_sp = 1; + } + pri_data->max_fingers = 5; +exit: + return ret; +} + +static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data) +{ + int ret; + u8 tmp, sen_line_num_x, sen_line_num_y; + + ret = t4_read_write_register(hdev, T4_PRM_ID_CONFIG_3, &tmp, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed T4_PRM_ID_CONFIG_3 (%d)\n", ret); + goto exit; + } + sen_line_num_x = 16 + ((tmp & 0x0F) | (tmp & 0x08 ? 0xF0 : 0)); + sen_line_num_y = 12 + (((tmp & 0xF0) >> 4) | (tmp & 0x80 ? 0xF0 : 0)); + + pri_data->x_max = sen_line_num_x * T4_COUNT_PER_ELECTRODE; + pri_data->x_min = T4_COUNT_PER_ELECTRODE; + pri_data->y_max = sen_line_num_y * T4_COUNT_PER_ELECTRODE; + pri_data->y_min = T4_COUNT_PER_ELECTRODE; + pri_data->x_active_len_mm = pri_data->y_active_len_mm = 0; + pri_data->btn_cnt = 1; + + ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, &tmp, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret); + goto exit; + } + tmp |= 0x02; + ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, NULL, tmp, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret); + goto exit; + } + + ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1, + NULL, T4_I2C_ABS, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_1 (%d)\n", ret); + goto exit; + } + + ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4, NULL, + T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_4 (%d)\n", ret); + goto exit; + } + pri_data->max_fingers = 5; + pri_data->has_sp = 0; +exit: + return ret; } -#endif /* CONFIG_PM */ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) { - struct u1_dev *data = hid_get_drvdata(hdev); + struct alps_dev *data = hid_get_drvdata(hdev); struct input_dev *input = hi->input, *input2; - struct u1_dev devInfo; int ret; int res_x, res_y, i; @@ -272,91 +656,29 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) /* Allow incoming hid reports */ hid_device_io_start(hdev); - - /* Device initialization */ - ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, - &devInfo.dev_ctrl, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret); - goto exit; + switch (data->dev_type) { + case T4: + ret = T4_init(hdev, data); + break; + case U1: + ret = u1_init(hdev, data); + break; + default: + break; } - devInfo.dev_ctrl &= ~U1_DISABLE_DEV; - devInfo.dev_ctrl |= U1_TP_ABS_MODE; - ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, - NULL, devInfo.dev_ctrl, false); - if (ret < 0) { - dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret); + if (ret) goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X, - &devInfo.sen_line_num_x, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y, - &devInfo.sen_line_num_y, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X, - &devInfo.pitch_x, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y, - &devInfo.pitch_y, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS, - &devInfo.resolution, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN, - &devInfo.btn_info, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret); - goto exit; - } - - /* Check StickPointer device */ - ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP, - &devInfo.dev_type, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret); - goto exit; - } - - devInfo.x_active_len_mm = - (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10; - devInfo.y_active_len_mm = - (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10; - - devInfo.x_max = - (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1); - devInfo.y_max = - (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1); __set_bit(EV_ABS, input->evbit); - input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_X, + data->x_min, data->x_max, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, + data->y_min, data->y_max, 0, 0); - if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) { - res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm; - res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm; + if (data->x_active_len_mm && data->y_active_len_mm) { + res_x = (data->x_max - 1) / data->x_active_len_mm; + res_y = (data->y_max - 1) / data->y_active_len_mm; input_abs_set_res(input, ABS_MT_POSITION_X, res_x); input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); @@ -364,49 +686,25 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0); - input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER); + input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER); __set_bit(EV_KEY, input->evbit); - if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) { - devInfo.btn_cnt = (devInfo.btn_info & 0x0F); - } else { - /* Button pad */ - devInfo.btn_cnt = 1; - __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); - } - for (i = 0; i < devInfo.btn_cnt; i++) + if (data->btn_cnt == 1) + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + + for (i = 0; i < data->btn_cnt; i++) __set_bit(BTN_LEFT + i, input->keybit); - /* Stick device initialization */ - if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) { - + if (data->has_sp) { input2 = input_allocate_device(); if (!input2) { - ret = -ENOMEM; + input_free_device(input2); goto exit; } data->input2 = input2; - - devInfo.dev_ctrl |= U1_SP_ABS_MODE; - ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, - NULL, devInfo.dev_ctrl, false); - if (ret < 0) { - dev_err(&hdev->dev, "failed SP mode (%d)\n", ret); - input_free_device(input2); - goto exit; - } - - ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN, - &devInfo.sp_btn_info, 0, true); - if (ret < 0) { - dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret); - input_free_device(input2); - goto exit; - } - input2->phys = input->phys; input2->name = "DualPoint Stick"; input2->id.bustype = BUS_I2C; @@ -416,8 +714,8 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) input2->dev.parent = input->dev.parent; __set_bit(EV_KEY, input2->evbit); - devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F); - for (i = 0; i < devInfo.sp_btn_cnt; i++) + data->sp_btn_cnt = (data->sp_btn_info & 0x0F); + for (i = 0; i < data->sp_btn_cnt; i++) __set_bit(BTN_LEFT + i, input2->keybit); __set_bit(EV_REL, input2->evbit); @@ -426,8 +724,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) __set_bit(INPUT_PROP_POINTER, input2->propbit); __set_bit(INPUT_PROP_POINTING_STICK, input2->propbit); - ret = input_register_device(data->input2); - if (ret) { + if (input_register_device(data->input2)) { input_free_device(input2); goto exit; } @@ -448,10 +745,9 @@ static int alps_input_mapping(struct hid_device *hdev, static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct u1_dev *data = NULL; + struct alps_dev *data = NULL; int ret; - - data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL); + data = devm_kzalloc(&hdev->dev, sizeof(struct alps_dev), GFP_KERNEL); if (!data) return -ENOMEM; @@ -466,6 +762,18 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } + switch (hdev->product) { + case HID_DEVICE_ID_ALPS_T4_BTNLESS: + data->dev_type = T4; + break; + case HID_DEVICE_ID_ALPS_U1_DUAL: + case HID_DEVICE_ID_ALPS_U1: + data->dev_type = U1; + break; + default: + data->dev_type = UNKNOWN; + } + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); @@ -483,6 +791,10 @@ static void alps_remove(struct hid_device *hdev) static const struct hid_device_id alps_id[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, + USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, + USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) }, { } }; MODULE_DEVICE_TABLE(hid, alps_id); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e7a45887afc4..1961aa689f32 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1889,6 +1889,9 @@ static const struct hid_device_id hid_have_special_driver[] = { #endif #if IS_ENABLED(CONFIG_HID_ALPS) { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) }, #endif #if IS_ENABLED(CONFIG_HID_APPLE) { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c24ebc413223..06697af9e6c1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -77,6 +77,9 @@ #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 +#define HID_DEVICE_ID_ALPS_U1 0x1215 +#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C + #define USB_VENDOR_ID_AMI 0x046b #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c index 8ffbb6f65a65..32747b7f917e 100644 --- a/drivers/hid/hid-picolcd_cir.c +++ b/drivers/hid/hid-picolcd_cir.c @@ -113,10 +113,10 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) return -ENOMEM; rdev->priv = data; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->open = picolcd_cir_open; rdev->close = picolcd_cir_close; - rdev->input_name = data->hdev->name; + rdev->device_name = data->hdev->name; rdev->input_phys = data->hdev->phys; rdev->input_id.bustype = data->hdev->bus; rdev->input_id.vendor = data->hdev->vendor; diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index c29cd5387a35..50b89ea0e60f 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -3,6 +3,7 @@ menu "Microsoft Hyper-V guest support" config HYPERV tristate "Microsoft Hyper-V client drivers" depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST + select PARAVIRT help Select this option to run Linux as a Hyper-V client operating system. diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 63ac1c6a825f..efd5db743319 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -661,9 +661,23 @@ void vmbus_close(struct vmbus_channel *channel) } EXPORT_SYMBOL_GPL(vmbus_close); -int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u64 requestid, - enum vmbus_packet_type type, u32 flags) +/** + * vmbus_sendpacket() - Send the specified buffer on the given channel + * @channel: Pointer to vmbus_channel structure. + * @buffer: Pointer to the buffer you want to receive the data into. + * @bufferlen: Maximum size of what the the buffer will hold + * @requestid: Identifier of the request + * @type: Type of packet that is being send e.g. negotiate, time + * packet etc. + * + * Sends data in @buffer directly to hyper-v via the vmbus + * This will send the data unparsed to hyper-v. + * + * Mainly used by Hyper-V drivers. + */ +int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u64 requestid, + enum vmbus_packet_type type, u32 flags) { struct vmpacket_descriptor desc; u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; @@ -690,42 +704,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, return hv_ringbuffer_write(channel, bufferlist, num_vecs); } -EXPORT_SYMBOL(vmbus_sendpacket_ctl); - -/** - * vmbus_sendpacket() - Send the specified buffer on the given channel - * @channel: Pointer to vmbus_channel structure. - * @buffer: Pointer to the buffer you want to receive the data into. - * @bufferlen: Maximum size of what the the buffer will hold - * @requestid: Identifier of the request - * @type: Type of packet that is being send e.g. negotiate, time - * packet etc. - * - * Sends data in @buffer directly to hyper-v via the vmbus - * This will send the data unparsed to hyper-v. - * - * Mainly used by Hyper-V drivers. - */ -int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u64 requestid, - enum vmbus_packet_type type, u32 flags) -{ - return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid, - type, flags); -} EXPORT_SYMBOL(vmbus_sendpacket); /* - * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer + * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer * packets using a GPADL Direct packet type. This interface allows you * to control notifying the host. This will be useful for sending * batched data. Also the sender can control the send flags * explicitly. */ -int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid, u32 flags) +int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, + struct hv_page_buffer pagebuffers[], + u32 pagecount, void *buffer, u32 bufferlen, + u64 requestid) { int i; struct vmbus_channel_packet_page_buffer desc; @@ -750,7 +741,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, /* Setup the descriptor */ desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = flags; + desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ desc.length8 = (u16)(packetlen_aligned >> 3); desc.transactionid = requestid; @@ -771,24 +762,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, return hv_ringbuffer_write(channel, bufferlist, 3); } -EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); - -/* - * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer - * packets using a GPADL Direct packet type. - */ -int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid) -{ - u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - - return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount, - buffer, bufferlen, - requestid, flags); - -} EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); /* @@ -828,62 +801,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, } EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); -/* - * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet - * using a GPADL Direct packet type. - */ -int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, - struct hv_multipage_buffer *multi_pagebuffer, - void *buffer, u32 bufferlen, u64 requestid) -{ - struct vmbus_channel_packet_multipage_buffer desc; - u32 descsize; - u32 packetlen; - u32 packetlen_aligned; - struct kvec bufferlist[3]; - u64 aligned_data = 0; - u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, - multi_pagebuffer->len); - - if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT) - return -EINVAL; - - /* - * Adjust the size down since vmbus_channel_packet_multipage_buffer is - * the largest size we support - */ - descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) - - ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) * - sizeof(u64)); - packetlen = descsize + bufferlen; - packetlen_aligned = ALIGN(packetlen, sizeof(u64)); - - - /* Setup the descriptor */ - desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ - desc.length8 = (u16)(packetlen_aligned >> 3); - desc.transactionid = requestid; - desc.rangecount = 1; - - desc.range.len = multi_pagebuffer->len; - desc.range.offset = multi_pagebuffer->offset; - - memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, - pfncount * sizeof(u64)); - - bufferlist[0].iov_base = &desc; - bufferlist[0].iov_len = descsize; - bufferlist[1].iov_base = buffer; - bufferlist[1].iov_len = bufferlen; - bufferlist[2].iov_base = &aligned_data; - bufferlist[2].iov_len = (packetlen_aligned - packetlen); - - return hv_ringbuffer_write(channel, bufferlist, 3); -} -EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); - /** * vmbus_recvpacket() - Retrieve the user packet on the specified channel * @channel: Pointer to vmbus_channel structure. diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 968af173c4c1..bcbb031f7263 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -603,7 +603,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) */ channel->numa_node = 0; channel->target_cpu = 0; - channel->target_vp = hv_context.vp_index[0]; + channel->target_vp = hv_cpu_number_to_vp_number(0); return; } @@ -687,7 +687,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) } channel->target_cpu = cur_cpu; - channel->target_vp = hv_context.vp_index[cur_cpu]; + channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); } static void vmbus_wait_for_unload(void) @@ -809,21 +809,12 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) /* * Setup state for signalling the host. */ - newchannel->sig_event = (struct hv_input_signal_event *) - (ALIGN((unsigned long) - &newchannel->sig_buf, - HV_HYPERCALL_PARAM_ALIGN)); - - newchannel->sig_event->connectionid.asu32 = 0; - newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID; - newchannel->sig_event->flag_number = 0; - newchannel->sig_event->rsvdz = 0; + newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID; if (vmbus_proto_version != VERSION_WS2008) { newchannel->is_dedicated_interrupt = (offer->is_dedicated_interrupt != 0); - newchannel->sig_event->connectionid.u.id = - offer->connection_id; + newchannel->sig_event = offer->connection_id; } memcpy(&newchannel->offermsg, offer, @@ -945,14 +936,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) { - mutex_lock(&vmbus_connection.channel_mutex); - BUG_ON(!is_hvsock_channel(channel)); channel->rescind = true; vmbus_device_unregister(channel->device_obj); - - mutex_unlock(&vmbus_connection.channel_mutex); } EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); @@ -1251,8 +1238,7 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) return outgoing_channel; } - cur_cpu = hv_context.vp_index[get_cpu()]; - put_cpu(); + cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id()); list_for_each_safe(cur, tmp, &primary->sc_list) { cur_channel = list_entry(cur, struct vmbus_channel, sc_list); if (cur_channel->state != CHANNEL_OPENED_STATE) diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 59c11ff90d12..f41901f80b64 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -32,6 +32,8 @@ #include #include #include +#include + #include "hyperv_vmbus.h" @@ -94,7 +96,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, * the CPU attempting to connect may not be CPU 0. */ if (version >= VERSION_WIN8_1) { - msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; + msg->target_vcpu = + hv_cpu_number_to_vp_number(smp_processor_id()); vmbus_connection.connect_cpu = smp_processor_id(); } else { msg->target_vcpu = 0; @@ -406,6 +409,6 @@ void vmbus_set_event(struct vmbus_channel *channel) if (!channel->is_dedicated_interrupt) vmbus_send_interrupt(child_relid); - hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); + hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event); } EXPORT_SYMBOL_GPL(vmbus_set_event); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 2ea12207caa0..8267439dd1ee 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -234,7 +234,6 @@ int hv_synic_init(unsigned int cpu) union hv_synic_siefp siefp; union hv_synic_sint shared_sint; union hv_synic_scontrol sctrl; - u64 vp_index; /* Setup the Synic's message page */ hv_get_simp(simp.as_uint64); @@ -275,14 +274,6 @@ int hv_synic_init(unsigned int cpu) hv_context.synic_initialized = true; - /* - * Setup the mapping between Hyper-V's notion - * of cpuid and Linux' notion of cpuid. - * This array will be indexed using Linux cpuid. - */ - hv_get_vp_index(vp_index); - hv_context.vp_index[cpu] = (u32)vp_index; - /* * Register the per-cpu clockevent source. */ diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index daa75bd41f86..2364281d8593 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy) out_src = smsg_out; break; + case WRITE_TO_FILE: + out_src = fcopy_transaction.fcopy_msg; + out_len = sizeof(struct hv_do_fcopy); + break; default: out_src = fcopy_transaction.fcopy_msg; out_len = fcopy_transaction.recv_len; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 1b6a5e0dfa75..49569f8fe038 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -228,17 +228,6 @@ struct hv_context { struct hv_per_cpu_context __percpu *cpu_context; - /* - * Hypervisor's notion of virtual processor ID is different from - * Linux' notion of CPU ID. This information can only be retrieved - * in the context of the calling CPU. Setup a map for easy access - * to this information: - * - * vp_index[a] is the Hyper-V's processor ID corresponding to - * Linux cpuid 'a'. - */ - u32 vp_index[NR_CPUS]; - /* * To manage allocations in a NUMA node. * Array indexed by numa node ID. diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 43160a2eafe0..a9d49f6f6501 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1454,23 +1454,6 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size) } EXPORT_SYMBOL_GPL(vmbus_free_mmio); -/** - * vmbus_cpu_number_to_vp_number() - Map CPU to VP. - * @cpu_number: CPU number in Linux terms - * - * This function returns the mapping between the Linux processor - * number and the hypervisor's virtual processor number, useful - * in making hypercalls and such that talk about specific - * processors. - * - * Return: Virtual processor number in Hyper-V terms - */ -int vmbus_cpu_number_to_vp_number(int cpu_number) -{ - return hv_context.vp_index[cpu_number]; -} -EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); - static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 579bdf93be43..14a94d90c028 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pm_dmi_table[] = { +static const struct dmi_system_id pm_dmi_table[] __initconst = { { enable_cap_knobs, "IBM Active Energy Manager", { diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 76c34f4fde13..5c677ba44014 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -1247,7 +1247,7 @@ static int applesmc_dmi_match(const struct dmi_system_id *id) * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ -static __initdata struct dmi_system_id applesmc_whitelist[] = { +static const struct dmi_system_id applesmc_whitelist[] __initconst = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 3189246302a6..c7c9e95e58a8 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -890,7 +890,7 @@ static const struct i8k_config_data i8k_config_data[] = { }, }; -static struct dmi_system_id i8k_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_dmi_table[] __initconst = { { .ident = "Dell Inspiron", .matches = { @@ -1013,7 +1013,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 */ -static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = { { .ident = "Dell Studio XPS 8000", .matches = { diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 9c0dbb8191ad..e1be61095532 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, GFP_KERNEL); if (rc) - goto out_mbox_free; + return -ENOMEM; INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); @@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) if (IS_ERR(ctx->mbox_chan)) { dev_err(&pdev->dev, "SLIMpro mailbox channel request failed\n"); - return -ENODEV; + rc = -ENODEV; + goto out_mbox_free; } } else { struct acpi_pcct_hw_reduced *cppc_ss; @@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) if (device_property_read_u32(&pdev->dev, "pcc-channel", &ctx->mbox_idx)) { dev_err(&pdev->dev, "no pcc-channel property\n"); - return -ENODEV; + rc = -ENODEV; + goto out_mbox_free; } cl->rx_callback = xgene_hwmon_pcc_rx_cb; @@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) if (IS_ERR(ctx->mbox_chan)) { dev_err(&pdev->dev, "PPC channel request failed\n"); - return -ENODEV; + rc = -ENODEV; + goto out_mbox_free; } /* @@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev) if (!cppc_ss) { dev_err(&pdev->dev, "PPC subspace not found\n"); rc = -ENODEV; - goto out_mbox_free; + goto out; } if (!ctx->mbox_chan->mbox->txdone_irq) { dev_err(&pdev->dev, "PCC IRQ not supported\n"); rc = -ENODEV; - goto out_mbox_free; + goto out; } /* @@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev) } else { dev_err(&pdev->dev, "Failed to get PCC comm region\n"); rc = -ENODEV; - goto out_mbox_free; + goto out; } if (!ctx->pcc_comm_addr) { dev_err(&pdev->dev, "Failed to ioremap PCC comm region\n"); rc = -ENOMEM; - goto out_mbox_free; + goto out; } /* diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index bc9cebc30526..c2a2ce8ee541 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -143,6 +143,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), .driver_data = (kernel_ulong_t)0, }, + { + /* Lewisburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6), + .driver_data = (kernel_ulong_t)0, + }, { /* Gemini Lake */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), @@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Cedar Fork PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 9414900575d8..f129869e05a9 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data) stm_source_link_drop(src); - device_destroy(&stm_source_class, src->dev.devt); + device_unregister(&src->dev); } EXPORT_SYMBOL_GPL(stm_source_unregister_device); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 65fa29591d21..45a3f3ca29b3 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -131,6 +131,7 @@ config I2C_I801 Gemini Lake (SOC) Cannon Lake-H (PCH) Cannon Lake-LP (PCH) + Cedar Fork (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -189,6 +190,14 @@ config I2C_PIIX4 This driver can also be built as a module. If so, the module will be called i2c-piix4. +config I2C_CHT_WC + tristate "Intel Cherry Trail Whiskey Cove PMIC smbus controller" + depends on INTEL_SOC_PMIC_CHTWC + help + If you say yes to this option, support will be included for the + SMBus controller found in the Intel Cherry Trail Whiskey Cove PMIC + found on some Intel Cherry Trail systems. + config I2C_NFORCE2 tristate "Nvidia nForce2, nForce3 and nForce4" depends on PCI @@ -328,6 +337,16 @@ config I2C_POWERMAC comment "I2C system bus drivers (mostly embedded / system-on-chip)" +config I2C_ALTERA + tristate "Altera Soft IP I2C" + depends on (ARCH_SOCFPGA || NIOS2) && OF + help + If you say yes to this option, support will be included for the + Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures. + + This driver can also be built as a module. If so, the module + will be called i2c-altera. + config I2C_ASPEED tristate "Aspeed I2C Controller" depends on ARCH_ASPEED || COMPILE_TEST @@ -900,6 +919,13 @@ config I2C_SIRF This driver can also be built as a module. If so, the module will be called i2c-sirf. +config I2C_SPRD + bool "Spreadtrum I2C interface" + depends on I2C=y && ARCH_SPRD + help + If you say yes to this option, support will be included for the + Spreadtrum I2C interface. + config I2C_ST tristate "STMicroelectronics SSC I2C support" depends on ARCH_STI @@ -920,6 +946,16 @@ config I2C_STM32F4 This driver can also be built as module. If so, the module will be called i2c-stm32f4. +config I2C_STM32F7 + tristate "STMicroelectronics STM32F7 I2C support" + depends on ARCH_STM32 || COMPILE_TEST + help + Enable this option to add support for STM32 I2C controller embedded + in STM32F7 SoCs. + + This driver can also be built as module. If so, the module + will be called i2c-stm32f7. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 1b2fc815a4d8..47f3ac9a695a 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_I2C_ALI15X3) += i2c-ali15x3.o obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o +obj-$(CONFIG_I2C_CHT_WC) += i2c-cht-wc.o obj-$(CONFIG_I2C_I801) += i2c-i801.o obj-$(CONFIG_I2C_ISCH) += i2c-isch.o obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o @@ -29,6 +30,7 @@ obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers +obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o @@ -89,8 +91,10 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o +obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o +obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c new file mode 100644 index 000000000000..f5e1941e65b5 --- /dev/null +++ b/drivers/i2c/busses/i2c-altera.c @@ -0,0 +1,511 @@ +/* + * Copyright Intel Corporation (C) 2017. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Based on the i2c-axxia.c driver. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ALTR_I2C_TFR_CMD 0x00 /* Transfer Command register */ +#define ALTR_I2C_TFR_CMD_STA BIT(9) /* send START before byte */ +#define ALTR_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */ +#define ALTR_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */ +#define ALTR_I2C_RX_DATA 0x04 /* RX data FIFO register */ +#define ALTR_I2C_CTRL 0x08 /* Control register */ +#define ALTR_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */ +#define ALTR_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */ +#define ALTR_I2C_CTRL_BSPEED BIT(1) /* Bus Speed (1=Fast) */ +#define ALTR_I2C_CTRL_EN BIT(0) /* Enable Core (1=Enable) */ +#define ALTR_I2C_ISER 0x0C /* Interrupt Status Enable register */ +#define ALTR_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */ +#define ALTR_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */ +#define ALTR_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */ +#define ALTR_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */ +#define ALTR_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */ +#define ALTR_I2C_ISR 0x10 /* Interrupt Status register */ +#define ALTR_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW IRQ */ +#define ALTR_I2C_ISR_ARB BIT(3) /* ARB LOST IRQ */ +#define ALTR_I2C_ISR_NACK BIT(2) /* NACK DET IRQ */ +#define ALTR_I2C_ISR_RXRDY BIT(1) /* RX Ready IRQ */ +#define ALTR_I2C_ISR_TXRDY BIT(0) /* TX Ready IRQ */ +#define ALTR_I2C_STATUS 0x14 /* Status register */ +#define ALTR_I2C_STAT_CORE BIT(0) /* Core Status (0=idle) */ +#define ALTR_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */ +#define ALTR_I2C_RX_FIFO_LVL 0x1C /* Receive FIFO LVL register */ +#define ALTR_I2C_SCL_LOW 0x20 /* SCL low count register */ +#define ALTR_I2C_SCL_HIGH 0x24 /* SCL high count register */ +#define ALTR_I2C_SDA_HOLD 0x28 /* SDA hold count register */ + +#define ALTR_I2C_ALL_IRQ (ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | \ + ALTR_I2C_ISR_NACK | ALTR_I2C_ISR_RXRDY | \ + ALTR_I2C_ISR_TXRDY) + +#define ALTR_I2C_THRESHOLD 0 /* IRQ Threshold at 1 element */ +#define ALTR_I2C_DFLT_FIFO_SZ 4 +#define ALTR_I2C_TIMEOUT 100000 /* 100ms */ +#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) + +/** + * altr_i2c_dev - I2C device context + * @base: pointer to register struct + * @msg: pointer to current message + * @msg_len: number of bytes transferred in msg + * @msg_err: error code for completed message + * @msg_complete: xfer completion object + * @dev: device reference + * @adapter: core i2c abstraction + * @i2c_clk: clock reference for i2c input clock + * @bus_clk_rate: current i2c bus clock rate + * @buf: ptr to msg buffer for easier use. + * @fifo_size: size of the FIFO passed in. + * @isr_mask: cached copy of local ISR enables. + * @isr_status: cached copy of local ISR status. + * @lock: spinlock for IRQ synchronization. + */ +struct altr_i2c_dev { + void __iomem *base; + struct i2c_msg *msg; + size_t msg_len; + int msg_err; + struct completion msg_complete; + struct device *dev; + struct i2c_adapter adapter; + struct clk *i2c_clk; + u32 bus_clk_rate; + u8 *buf; + u32 fifo_size; + u32 isr_mask; + u32 isr_status; + spinlock_t lock; /* IRQ synchronization */ +}; + +static void +altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) +{ + unsigned long flags; + u32 int_en; + + spin_lock_irqsave(&idev->lock, flags); + + int_en = readl(idev->base + ALTR_I2C_ISER); + if (enable) + idev->isr_mask = int_en | mask; + else + idev->isr_mask = int_en & ~mask; + + writel(idev->isr_mask, idev->base + ALTR_I2C_ISER); + + spin_unlock_irqrestore(&idev->lock, flags); +} + +static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask) +{ + u32 int_en = readl(idev->base + ALTR_I2C_ISR); + + writel(int_en | mask, idev->base + ALTR_I2C_ISR); +} + +static void altr_i2c_core_disable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp & ~ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_core_enable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp | ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_reset(struct altr_i2c_dev *idev) +{ + altr_i2c_core_disable(idev); + altr_i2c_core_enable(idev); +} + +static inline void altr_i2c_stop(struct altr_i2c_dev *idev) +{ + writel(ALTR_I2C_TFR_CMD_STO, idev->base + ALTR_I2C_TFR_CMD); +} + +static void altr_i2c_init(struct altr_i2c_dev *idev) +{ + u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; + u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; + u32 tmp = (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_RXT_SHFT) | + (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT); + u32 t_high, t_low; + + if (idev->bus_clk_rate <= 100000) { + tmp &= ~ALTR_I2C_CTRL_BSPEED; + /* Standard mode SCL 50/50 */ + t_high = divisor * 1 / 2; + t_low = divisor * 1 / 2; + } else { + tmp |= ALTR_I2C_CTRL_BSPEED; + /* Fast mode SCL 33/66 */ + t_high = divisor * 1 / 3; + t_low = divisor * 2 / 3; + } + writel(tmp, idev->base + ALTR_I2C_CTRL); + + dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", + idev->bus_clk_rate, clk_mhz, divisor); + + /* Reset controller */ + altr_i2c_reset(idev); + + /* SCL High Time */ + writel(t_high, idev->base + ALTR_I2C_SCL_HIGH); + /* SCL Low Time */ + writel(t_low, idev->base + ALTR_I2C_SCL_LOW); + /* SDA Hold Time, 300ns */ + writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); + + /* Mask all master interrupt bits */ + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); +} + +/** + * altr_i2c_transfer - On the last byte to be transmitted, send + * a Stop bit on the last byte. + */ +static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data) +{ + /* On the last byte to be transmitted, send STOP */ + if (idev->msg_len == 1) + data |= ALTR_I2C_TFR_CMD_STO; + if (idev->msg_len > 0) + writel(data, idev->base + ALTR_I2C_TFR_CMD); +} + +/** + * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of + * transfer. Send a Stop bit on the last byte. + */ +static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev) +{ + size_t rx_fifo_avail = readl(idev->base + ALTR_I2C_RX_FIFO_LVL); + int bytes_to_transfer = min(rx_fifo_avail, idev->msg_len); + + while (bytes_to_transfer-- > 0) { + *idev->buf++ = readl(idev->base + ALTR_I2C_RX_DATA); + idev->msg_len--; + altr_i2c_transfer(idev, 0); + } +} + +/** + * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. + * @return: Number of bytes left to transfer. + */ +static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev) +{ + size_t tx_fifo_avail = idev->fifo_size - readl(idev->base + + ALTR_I2C_TC_FIFO_LVL); + int bytes_to_transfer = min(tx_fifo_avail, idev->msg_len); + int ret = idev->msg_len - bytes_to_transfer; + + while (bytes_to_transfer-- > 0) { + altr_i2c_transfer(idev, *idev->buf++); + idev->msg_len--; + } + + return ret; +} + +static irqreturn_t altr_i2c_isr_quick(int irq, void *_dev) +{ + struct altr_i2c_dev *idev = _dev; + irqreturn_t ret = IRQ_HANDLED; + + /* Read IRQ status but only interested in Enabled IRQs. */ + idev->isr_status = readl(idev->base + ALTR_I2C_ISR) & idev->isr_mask; + if (idev->isr_status) + ret = IRQ_WAKE_THREAD; + + return ret; +} + +static irqreturn_t altr_i2c_isr(int irq, void *_dev) +{ + int ret; + bool read, finish = false; + struct altr_i2c_dev *idev = _dev; + u32 status = idev->isr_status; + + if (!idev->msg) { + dev_warn(idev->dev, "unexpected interrupt\n"); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + return IRQ_HANDLED; + } + read = (idev->msg->flags & I2C_M_RD) != 0; + + /* handle Lost Arbitration */ + if (unlikely(status & ALTR_I2C_ISR_ARB)) { + altr_i2c_int_clear(idev, ALTR_I2C_ISR_ARB); + idev->msg_err = -EAGAIN; + finish = true; + } else if (unlikely(status & ALTR_I2C_ISR_NACK)) { + dev_dbg(idev->dev, "Could not get ACK\n"); + idev->msg_err = -ENXIO; + altr_i2c_int_clear(idev, ALTR_I2C_ISR_NACK); + altr_i2c_stop(idev); + finish = true; + } else if (read && unlikely(status & ALTR_I2C_ISR_RXOF)) { + /* handle RX FIFO Overflow */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + altr_i2c_stop(idev); + dev_err(idev->dev, "RX FIFO Overflow\n"); + finish = true; + } else if (read && (status & ALTR_I2C_ISR_RXRDY)) { + /* RX FIFO needs service? */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + if (!idev->msg_len) + finish = true; + } else if (!read && (status & ALTR_I2C_ISR_TXRDY)) { + /* TX FIFO needs service? */ + altr_i2c_int_clear(idev, ALTR_I2C_ISR_TXRDY); + if (idev->msg_len > 0) + altr_i2c_fill_tx_fifo(idev); + else + finish = true; + } else { + dev_warn(idev->dev, "Unexpected interrupt: 0x%x\n", status); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + } + + if (finish) { + /* Wait for the Core to finish */ + ret = readl_poll_timeout_atomic(idev->base + ALTR_I2C_STATUS, + status, + !(status & ALTR_I2C_STAT_CORE), + 1, ALTR_I2C_TIMEOUT); + if (ret) + dev_err(idev->dev, "message timeout\n"); + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + complete(&idev->msg_complete); + dev_dbg(idev->dev, "Message Complete\n"); + } + + return IRQ_HANDLED; +} + +static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) +{ + u32 imask = ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | ALTR_I2C_ISR_NACK; + unsigned long time_left; + u32 value; + u8 addr = i2c_8bit_addr_from_msg(msg); + + idev->msg = msg; + idev->msg_len = msg->len; + idev->buf = msg->buf; + idev->msg_err = 0; + reinit_completion(&idev->msg_complete); + altr_i2c_core_enable(idev); + + /* Make sure RX FIFO is empty */ + do { + readl(idev->base + ALTR_I2C_RX_DATA); + } while (readl(idev->base + ALTR_I2C_RX_FIFO_LVL)); + + writel(ALTR_I2C_TFR_CMD_STA | addr, idev->base + ALTR_I2C_TFR_CMD); + + if ((msg->flags & I2C_M_RD) != 0) { + imask |= ALTR_I2C_ISER_RXOF_EN | ALTR_I2C_ISER_RXRDY_EN; + altr_i2c_int_enable(idev, imask, true); + /* write the first byte to start the RX */ + altr_i2c_transfer(idev, 0); + } else { + imask |= ALTR_I2C_ISR_TXRDY; + altr_i2c_int_enable(idev, imask, true); + altr_i2c_fill_tx_fifo(idev); + } + + time_left = wait_for_completion_timeout(&idev->msg_complete, + ALTR_I2C_XFER_TIMEOUT); + altr_i2c_int_enable(idev, imask, false); + + value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE; + if (value) + dev_err(idev->dev, "Core Status not IDLE...\n"); + + if (time_left == 0) { + idev->msg_err = -ETIMEDOUT; + dev_dbg(idev->dev, "Transaction timed out.\n"); + } + + altr_i2c_core_disable(idev); + + return idev->msg_err; +} + +static int +altr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct altr_i2c_dev *idev = i2c_get_adapdata(adap); + int i, ret; + + for (i = 0; i < num; i++) { + ret = altr_i2c_xfer_msg(idev, msgs++); + if (ret) + return ret; + } + return num; +} + +static u32 altr_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm altr_i2c_algo = { + .master_xfer = altr_i2c_xfer, + .functionality = altr_i2c_func, +}; + +static int altr_i2c_probe(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = NULL; + struct resource *res; + int irq, ret; + u32 val; + + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); + if (!idev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + idev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(idev->base)) + return PTR_ERR(idev->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "missing interrupt resource\n"); + return irq; + } + + idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(idev->i2c_clk)) { + dev_err(&pdev->dev, "missing clock\n"); + return PTR_ERR(idev->i2c_clk); + } + + idev->dev = &pdev->dev; + init_completion(&idev->msg_complete); + spin_lock_init(&idev->lock); + + val = device_property_read_u32(idev->dev, "fifo-size", + &idev->fifo_size); + if (val) { + dev_err(&pdev->dev, "FIFO size set to default of %d\n", + ALTR_I2C_DFLT_FIFO_SZ); + idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; + } + + val = device_property_read_u32(idev->dev, "clock-frequency", + &idev->bus_clk_rate); + if (val) { + dev_err(&pdev->dev, "Default to 100kHz\n"); + idev->bus_clk_rate = 100000; /* default clock rate */ + } + + if (idev->bus_clk_rate > 400000) { + dev_err(&pdev->dev, "invalid clock-frequency %d\n", + idev->bus_clk_rate); + return -EINVAL; + } + + ret = devm_request_threaded_irq(&pdev->dev, irq, altr_i2c_isr_quick, + altr_i2c_isr, IRQF_ONESHOT, + pdev->name, idev); + if (ret) { + dev_err(&pdev->dev, "failed to claim IRQ %d\n", irq); + return ret; + } + + ret = clk_prepare_enable(idev->i2c_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return ret; + } + + altr_i2c_init(idev); + + i2c_set_adapdata(&idev->adapter, idev); + strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); + idev->adapter.owner = THIS_MODULE; + idev->adapter.algo = &altr_i2c_algo; + idev->adapter.dev.parent = &pdev->dev; + idev->adapter.dev.of_node = pdev->dev.of_node; + + platform_set_drvdata(pdev, idev); + + ret = i2c_add_adapter(&idev->adapter); + if (ret) { + clk_disable_unprepare(idev->i2c_clk); + return ret; + } + dev_info(&pdev->dev, "Altera SoftIP I2C Probe Complete\n"); + + return 0; +} + +static int altr_i2c_remove(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = platform_get_drvdata(pdev); + + clk_disable_unprepare(idev->i2c_clk); + i2c_del_adapter(&idev->adapter); + + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id altr_i2c_of_match[] = { + { .compatible = "altr,softip-i2c-v1.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, altr_i2c_of_match); + +static struct platform_driver altr_i2c_driver = { + .probe = altr_i2c_probe, + .remove = altr_i2c_remove, + .driver = { + .name = "altera-i2c", + .of_match_table = altr_i2c_of_match, + }, +}; + +module_platform_driver(altr_i2c_driver); + +MODULE_DESCRIPTION("Altera Soft IP I2C bus driver"); +MODULE_AUTHOR("Thor Thayer "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 6fdf9231c23c..284f8670dbeb 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -53,6 +53,9 @@ #define ASPEED_I2CD_MASTER_EN BIT(0) /* 0x04 : I2CD Clock and AC Timing Control Register #1 */ +#define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28) +#define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24) +#define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20) #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 @@ -132,6 +135,7 @@ struct aspeed_i2c_bus { /* Synchronizes I/O mem access to base. */ spinlock_t lock; struct completion cmd_complete; + u32 (*get_clk_reg_val)(u32 divisor); unsigned long parent_clk_frequency; u32 bus_frequency; /* Transaction state. */ @@ -675,7 +679,7 @@ static const struct i2c_algorithm aspeed_i2c_algo = { #endif /* CONFIG_I2C_SLAVE */ }; -static u32 aspeed_i2c_get_clk_reg_val(u32 divisor) +static u32 aspeed_i2c_get_clk_reg_val(u32 clk_high_low_max, u32 divisor) { u32 base_clk, clk_high, clk_low, tmp; @@ -695,16 +699,22 @@ static u32 aspeed_i2c_get_clk_reg_val(u32 divisor) * Thus, * SCL_freq = APB_freq / * ((1 << base_clk) * (clk_high + 1 + clk_low + 1)) - * The documentation recommends clk_high >= 8 and clk_low >= 7 when - * possible; this last constraint gives us the following solution: + * The documentation recommends clk_high >= clk_high_max / 2 and + * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint + * gives us the following solution: */ - base_clk = divisor > 33 ? ilog2((divisor - 1) / 32) + 1 : 0; - tmp = divisor / (1 << base_clk); - clk_high = tmp / 2 + tmp % 2; - clk_low = tmp - clk_high; + base_clk = divisor > clk_high_low_max ? + ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; + tmp = (divisor + (1 << base_clk) - 1) >> base_clk; + clk_low = tmp / 2; + clk_high = tmp - clk_low; + + if (clk_high) + clk_high--; + + if (clk_low) + clk_low--; - clk_high -= 1; - clk_low -= 1; return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) & ASPEED_I2CD_TIME_SCL_HIGH_MASK) @@ -713,13 +723,35 @@ static u32 aspeed_i2c_get_clk_reg_val(u32 divisor) | (base_clk & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); } +static u32 aspeed_i2c_24xx_get_clk_reg_val(u32 divisor) +{ + /* + * clk_high and clk_low are each 3 bits wide, so each can hold a max + * value of 8 giving a clk_high_low_max of 16. + */ + return aspeed_i2c_get_clk_reg_val(16, divisor); +} + +static u32 aspeed_i2c_25xx_get_clk_reg_val(u32 divisor) +{ + /* + * clk_high and clk_low are each 4 bits wide, so each can hold a max + * value of 16 giving a clk_high_low_max of 32. + */ + return aspeed_i2c_get_clk_reg_val(32, divisor); +} + /* precondition: bus.lock has been acquired. */ static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) { u32 divisor, clk_reg_val; - divisor = bus->parent_clk_frequency / bus->bus_frequency; - clk_reg_val = aspeed_i2c_get_clk_reg_val(divisor); + divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency); + clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1); + clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | + ASPEED_I2CD_TIME_THDSTA_MASK | + ASPEED_I2CD_TIME_TACST_MASK); + clk_reg_val |= bus->get_clk_reg_val(divisor); writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); @@ -778,8 +810,22 @@ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) return ret; } +static const struct of_device_id aspeed_i2c_bus_of_table[] = { + { + .compatible = "aspeed,ast2400-i2c-bus", + .data = aspeed_i2c_24xx_get_clk_reg_val, + }, + { + .compatible = "aspeed,ast2500-i2c-bus", + .data = aspeed_i2c_25xx_get_clk_reg_val, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); + static int aspeed_i2c_probe_bus(struct platform_device *pdev) { + const struct of_device_id *match; struct aspeed_i2c_bus *bus; struct clk *parent_clk; struct resource *res; @@ -809,6 +855,12 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev) bus->bus_frequency = 100000; } + match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); + if (!match) + bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; + else + bus->get_clk_reg_val = match->data; + /* Initialize the I2C adapter */ spin_lock_init(&bus->lock); init_completion(&bus->cmd_complete); @@ -870,13 +922,6 @@ static int aspeed_i2c_remove_bus(struct platform_device *pdev) return 0; } -static const struct of_device_id aspeed_i2c_bus_of_table[] = { - { .compatible = "aspeed,ast2400-i2c-bus", }, - { .compatible = "aspeed,ast2500-i2c-bus", }, - { }, -}; -MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); - static struct platform_driver aspeed_i2c_bus_driver = { .probe = aspeed_i2c_probe_bus, .remove = aspeed_i2c_remove_bus, diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 38dd61d621df..bfd1fdff64a9 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -809,7 +809,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) * The hardware can handle at most two messages concatenated by a * repeated start via it's internal address feature. */ -static struct i2c_adapter_quirks at91_twi_quirks = { +static const struct i2c_adapter_quirks at91_twi_quirks = { .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR, .max_comb_1st_msg_len = 3, }; diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 318df559adc5..4c8c3bc4669c 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -510,8 +510,7 @@ static int bcm_iproc_i2c_remove(struct platform_device *pdev) static int bcm_iproc_i2c_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); + struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev); /* make sure there's no pending interrupt when we go into suspend */ writel(0, iproc_i2c->base + IE_OFFSET); @@ -526,8 +525,7 @@ static int bcm_iproc_i2c_suspend(struct device *dev) static int bcm_iproc_i2c_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); + struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev); int ret; u32 val; diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 9fe942b8c610..ff3343186a82 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 75d80161931f..b13605718291 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -826,8 +826,7 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long */ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct cdns_i2c *xi2c = platform_get_drvdata(pdev); + struct cdns_i2c *xi2c = dev_get_drvdata(dev); clk_disable(xi2c->clk); @@ -844,8 +843,7 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) */ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct cdns_i2c *xi2c = platform_get_drvdata(pdev); + struct cdns_i2c *xi2c = dev_get_drvdata(dev); int ret; ret = clk_enable(xi2c->clk); diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c new file mode 100644 index 000000000000..190bbbc7bfee --- /dev/null +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -0,0 +1,363 @@ +/* + * Intel CHT Whiskey Cove PMIC I2C Master driver + * Copyright (C) 2017 Hans de Goede + * + * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: + * Copyright (C) 2011 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CHT_WC_I2C_CTRL 0x5e24 +#define CHT_WC_I2C_CTRL_WR BIT(0) +#define CHT_WC_I2C_CTRL_RD BIT(1) +#define CHT_WC_I2C_CLIENT_ADDR 0x5e25 +#define CHT_WC_I2C_REG_OFFSET 0x5e26 +#define CHT_WC_I2C_WRDATA 0x5e27 +#define CHT_WC_I2C_RDDATA 0x5e28 + +#define CHT_WC_EXTCHGRIRQ 0x6e0a +#define CHT_WC_EXTCHGRIRQ_CLIENT_IRQ BIT(0) +#define CHT_WC_EXTCHGRIRQ_WRITE_IRQ BIT(1) +#define CHT_WC_EXTCHGRIRQ_READ_IRQ BIT(2) +#define CHT_WC_EXTCHGRIRQ_NACK_IRQ BIT(3) +#define CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK ((u8)GENMASK(3, 1)) +#define CHT_WC_EXTCHGRIRQ_MSK 0x6e17 + +struct cht_wc_i2c_adap { + struct i2c_adapter adapter; + wait_queue_head_t wait; + struct irq_chip irqchip; + struct mutex adap_lock; + struct mutex irqchip_lock; + struct regmap *regmap; + struct irq_domain *irq_domain; + struct i2c_client *client; + int client_irq; + u8 irq_mask; + u8 old_irq_mask; + int read_data; + bool io_error; + bool done; +}; + +static irqreturn_t cht_wc_i2c_adap_thread_handler(int id, void *data) +{ + struct cht_wc_i2c_adap *adap = data; + int ret, reg; + + mutex_lock(&adap->adap_lock); + + /* Read IRQs */ + ret = regmap_read(adap->regmap, CHT_WC_EXTCHGRIRQ, ®); + if (ret) { + dev_err(&adap->adapter.dev, "Error reading extchgrirq reg\n"); + mutex_unlock(&adap->adap_lock); + return IRQ_NONE; + } + + reg &= ~adap->irq_mask; + + /* Reads must be acked after reading the received data. */ + ret = regmap_read(adap->regmap, CHT_WC_I2C_RDDATA, &adap->read_data); + if (ret) + adap->io_error = true; + + /* + * Immediately ack IRQs, so that if new IRQs arrives while we're + * handling the previous ones our irq will re-trigger when we're done. + */ + ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ, reg); + if (ret) + dev_err(&adap->adapter.dev, "Error writing extchgrirq reg\n"); + + if (reg & CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK) { + adap->io_error |= !!(reg & CHT_WC_EXTCHGRIRQ_NACK_IRQ); + adap->done = true; + } + + mutex_unlock(&adap->adap_lock); + + if (reg & CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK) + wake_up(&adap->wait); + + /* + * Do NOT use handle_nested_irq here, the client irq handler will + * likely want to do i2c transfers and the i2c controller uses this + * interrupt handler as well, so running the client irq handler from + * this thread will cause things to lock up. + */ + if (reg & CHT_WC_EXTCHGRIRQ_CLIENT_IRQ) { + /* + * generic_handle_irq expects local IRQs to be disabled + * as normally it is called from interrupt context. + */ + local_irq_disable(); + generic_handle_irq(adap->client_irq); + local_irq_enable(); + } + + return IRQ_HANDLED; +} + +static u32 cht_wc_i2c_adap_master_func(struct i2c_adapter *adap) +{ + /* This i2c adapter only supports SMBUS byte transfers */ + return I2C_FUNC_SMBUS_BYTE_DATA; +} + +static int cht_wc_i2c_adap_smbus_xfer(struct i2c_adapter *_adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, + union i2c_smbus_data *data) +{ + struct cht_wc_i2c_adap *adap = i2c_get_adapdata(_adap); + int ret; + + mutex_lock(&adap->adap_lock); + adap->io_error = false; + adap->done = false; + mutex_unlock(&adap->adap_lock); + + ret = regmap_write(adap->regmap, CHT_WC_I2C_CLIENT_ADDR, addr); + if (ret) + return ret; + + if (read_write == I2C_SMBUS_WRITE) { + ret = regmap_write(adap->regmap, CHT_WC_I2C_WRDATA, data->byte); + if (ret) + return ret; + } + + ret = regmap_write(adap->regmap, CHT_WC_I2C_REG_OFFSET, command); + if (ret) + return ret; + + ret = regmap_write(adap->regmap, CHT_WC_I2C_CTRL, + (read_write == I2C_SMBUS_WRITE) ? + CHT_WC_I2C_CTRL_WR : CHT_WC_I2C_CTRL_RD); + if (ret) + return ret; + + ret = wait_event_timeout(adap->wait, adap->done, msecs_to_jiffies(30)); + if (ret == 0) { + /* + * The CHT GPIO controller serializes all IRQs, sometimes + * causing significant delays, check status manually. + */ + cht_wc_i2c_adap_thread_handler(0, adap); + if (!adap->done) + return -ETIMEDOUT; + } + + ret = 0; + mutex_lock(&adap->adap_lock); + if (adap->io_error) + ret = -EIO; + else if (read_write == I2C_SMBUS_READ) + data->byte = adap->read_data; + mutex_unlock(&adap->adap_lock); + + return ret; +} + +static const struct i2c_algorithm cht_wc_i2c_adap_algo = { + .functionality = cht_wc_i2c_adap_master_func, + .smbus_xfer = cht_wc_i2c_adap_smbus_xfer, +}; + +/**** irqchip for the client connected to the extchgr i2c adapter ****/ +static void cht_wc_i2c_irq_lock(struct irq_data *data) +{ + struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); + + mutex_lock(&adap->irqchip_lock); +} + +static void cht_wc_i2c_irq_sync_unlock(struct irq_data *data) +{ + struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); + int ret; + + if (adap->irq_mask != adap->old_irq_mask) { + ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ_MSK, + adap->irq_mask); + if (ret == 0) + adap->old_irq_mask = adap->irq_mask; + else + dev_err(&adap->adapter.dev, "Error writing EXTCHGRIRQ_MSK\n"); + } + + mutex_unlock(&adap->irqchip_lock); +} + +static void cht_wc_i2c_irq_enable(struct irq_data *data) +{ + struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); + + adap->irq_mask &= ~CHT_WC_EXTCHGRIRQ_CLIENT_IRQ; +} + +static void cht_wc_i2c_irq_disable(struct irq_data *data) +{ + struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); + + adap->irq_mask |= CHT_WC_EXTCHGRIRQ_CLIENT_IRQ; +} + +static const struct irq_chip cht_wc_i2c_irq_chip = { + .irq_bus_lock = cht_wc_i2c_irq_lock, + .irq_bus_sync_unlock = cht_wc_i2c_irq_sync_unlock, + .irq_disable = cht_wc_i2c_irq_disable, + .irq_enable = cht_wc_i2c_irq_enable, + .name = "cht_wc_ext_chrg_irq_chip", +}; + +static const struct property_entry bq24190_props[] = { + PROPERTY_ENTRY_STRING("extcon-name", "cht_wcove_pwrsrc"), + PROPERTY_ENTRY_BOOL("omit-battery-class"), + PROPERTY_ENTRY_BOOL("disable-reset"), + { } +}; + +static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + struct cht_wc_i2c_adap *adap; + struct i2c_board_info board_info = { + .type = "bq24190", + .addr = 0x6b, + .properties = bq24190_props, + }; + int ret, reg, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Error missing irq resource\n"); + return -EINVAL; + } + + adap = devm_kzalloc(&pdev->dev, sizeof(*adap), GFP_KERNEL); + if (!adap) + return -ENOMEM; + + init_waitqueue_head(&adap->wait); + mutex_init(&adap->adap_lock); + mutex_init(&adap->irqchip_lock); + adap->irqchip = cht_wc_i2c_irq_chip; + adap->regmap = pmic->regmap; + adap->adapter.owner = THIS_MODULE; + adap->adapter.class = I2C_CLASS_HWMON; + adap->adapter.algo = &cht_wc_i2c_adap_algo; + strlcpy(adap->adapter.name, "PMIC I2C Adapter", + sizeof(adap->adapter.name)); + adap->adapter.dev.parent = &pdev->dev; + + /* Clear and activate i2c-adapter interrupts, disable client IRQ */ + adap->old_irq_mask = adap->irq_mask = ~CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK; + + ret = regmap_read(adap->regmap, CHT_WC_I2C_RDDATA, ®); + if (ret) + return ret; + + ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ, ~adap->irq_mask); + if (ret) + return ret; + + ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ_MSK, adap->irq_mask); + if (ret) + return ret; + + /* Alloc and register client IRQ */ + adap->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 1, + &irq_domain_simple_ops, NULL); + if (!adap->irq_domain) + return -ENOMEM; + + adap->client_irq = irq_create_mapping(adap->irq_domain, 0); + if (!adap->client_irq) { + ret = -ENOMEM; + goto remove_irq_domain; + } + + irq_set_chip_data(adap->client_irq, adap); + irq_set_chip_and_handler(adap->client_irq, &adap->irqchip, + handle_simple_irq); + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + cht_wc_i2c_adap_thread_handler, + IRQF_ONESHOT, "PMIC I2C Adapter", adap); + if (ret) + goto remove_irq_domain; + + i2c_set_adapdata(&adap->adapter, adap); + ret = i2c_add_adapter(&adap->adapter); + if (ret) + goto remove_irq_domain; + + board_info.irq = adap->client_irq; + adap->client = i2c_new_device(&adap->adapter, &board_info); + if (!adap->client) { + ret = -ENOMEM; + goto del_adapter; + } + + platform_set_drvdata(pdev, adap); + return 0; + +del_adapter: + i2c_del_adapter(&adap->adapter); +remove_irq_domain: + irq_domain_remove(adap->irq_domain); + return ret; +} + +static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev) +{ + struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev); + + i2c_unregister_device(adap->client); + i2c_del_adapter(&adap->adapter); + irq_domain_remove(adap->irq_domain); + + return 0; +} + +static struct platform_device_id cht_wc_i2c_adap_id_table[] = { + { .name = "cht_wcove_ext_chgr" }, + {}, +}; +MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table); + +static struct platform_driver cht_wc_i2c_adap_driver = { + .probe = cht_wc_i2c_adap_i2c_probe, + .remove = cht_wc_i2c_adap_i2c_remove, + .driver = { + .name = "cht_wcove_ext_chgr", + }, + .id_table = cht_wc_i2c_adap_id_table, +}; +module_platform_driver(cht_wc_i2c_adap_driver); + +MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC I2C Master driver"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index d89bde2c5da2..8a8ca945561b 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -413,7 +413,7 @@ static const struct i2c_algorithm cpm_i2c_algo = { }; /* CPM_MAX_READ is also limiting writes according to the code! */ -static struct i2c_adapter_quirks cpm_i2c_quirks = { +static const struct i2c_adapter_quirks cpm_i2c_quirks = { .max_num_msgs = CPM_MAXBD, .max_read_len = CPM_MAX_READ, .max_write_len = CPM_MAX_READ, diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 9e7ef5cf5d49..b8c43535f16c 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -733,7 +733,7 @@ static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev) } #endif -static struct i2c_algorithm i2c_davinci_algo = { +static const struct i2c_algorithm i2c_davinci_algo = { .master_xfer = i2c_davinci_xfer, .functionality = i2c_davinci_func, }; @@ -801,7 +801,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) - return -ENODEV; + return PTR_ERR(dev->clk); clk_prepare_enable(dev->clk); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -876,8 +876,7 @@ static int davinci_i2c_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int davinci_i2c_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev); /* put I2C into reset */ davinci_i2c_reset_ctrl(i2c_dev, 0); @@ -888,8 +887,7 @@ static int davinci_i2c_suspend(struct device *dev) static int davinci_i2c_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev); clk_prepare_enable(i2c_dev->clk); /* take I2C out of reset */ diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2b98a173136f..0e65b97842b4 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -439,8 +439,7 @@ static void dw_i2c_plat_complete(struct device *dev) #ifdef CONFIG_PM static int dw_i2c_plat_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); i_dev->disable(i_dev); i2c_dw_plat_prepare_clk(i_dev, false); @@ -450,8 +449,7 @@ static int dw_i2c_plat_runtime_suspend(struct device *dev) static int dw_i2c_plat_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); i2c_dw_plat_prepare_clk(i_dev, true); i_dev->init(i_dev); diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 78d8fb73927d..ea9578ab19a1 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -346,7 +346,7 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) return IRQ_RETVAL(ret); } -static struct i2c_algorithm i2c_dw_algo = { +static const struct i2c_algorithm i2c_dw_algo = { .functionality = i2c_dw_func, .reg_slave = i2c_dw_reg_slave, .unreg_slave = i2c_dw_unreg_slave, diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 23ed4d67ecad..3855e0b11877 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -803,8 +803,7 @@ static int exynos5_i2c_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int exynos5_i2c_suspend_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + struct exynos5_i2c *i2c = dev_get_drvdata(dev); i2c->suspended = 1; @@ -815,8 +814,7 @@ static int exynos5_i2c_suspend_noirq(struct device *dev) static int exynos5_i2c_resume_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + struct exynos5_i2c *i2c = dev_get_drvdata(dev); int ret = 0; ret = clk_prepare_enable(i2c->clk); diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 34cfc0ebdcb9..0ef8fcc6ac3a 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -98,8 +98,8 @@ static int of_i2c_gpio_get_pins(struct device_node *np, return -EPROBE_DEFER; if (!gpio_is_valid(*sda_pin) || !gpio_is_valid(*scl_pin)) { - pr_err("%s: invalid GPIO pins, sda=%d/scl=%d\n", - np->full_name, *sda_pin, *scl_pin); + pr_err("%pOF: invalid GPIO pins, sda=%d/scl=%d\n", + np, *sda_pin, *scl_pin); return -ENODEV; } diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index ae7f3180f7e8..bb68957d3da5 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -505,8 +505,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int hix5hd2_i2c_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct hix5hd2_i2c_priv *priv = platform_get_drvdata(pdev); + struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk); @@ -515,8 +514,7 @@ static int hix5hd2_i2c_runtime_suspend(struct device *dev) static int hix5hd2_i2c_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct hix5hd2_i2c_priv *priv = platform_get_drvdata(pdev); + struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev); clk_prepare_enable(priv->clk); hix5hd2_i2c_init(priv); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c9536e17d6ff..9e12a53ef7b8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -68,6 +68,7 @@ * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes + * Cedar Fork (PCH) 0x18df 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -204,6 +205,7 @@ /* Older devices have their ID defined in */ #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 +#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 @@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, @@ -1332,6 +1335,7 @@ static void i801_add_tco(struct i801_priv *priv) u32 tco_base, tco_ctl; u32 base_addr, ctrl_val; u64 base64_addr; + u8 hidden; if (!(priv->features & FEATURE_TCO)) return; @@ -1376,8 +1380,10 @@ static void i801_add_tco(struct i801_priv *priv) devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1); - /* Unhide the P2SB device */ - pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0); + /* Unhide the P2SB device, if it is hidden */ + pci_bus_read_config_byte(pci_dev->bus, devfn, 0xe1, &hidden); + if (hidden) + pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0); pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr); base64_addr = base_addr & 0xfffffff0; @@ -1385,8 +1391,9 @@ static void i801_add_tco(struct i801_priv *priv) pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr); base64_addr |= (u64)base_addr << 32; - /* Hide the P2SB device */ - pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x1); + /* Hide the P2SB device, if it was hidden before */ + if (hidden) + pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden); spin_unlock(&p2sb_spinlock); res = &tco_res[ICH_RES_MEM_OFF]; @@ -1509,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: + case PCI_DEVICE_ID_INTEL_CDF_SMBUS: case PCI_DEVICE_ID_INTEL_DNV_SMBUS: case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: priv->features |= FEATURE_I2C_BLOCK_READ; diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 84fb35f6837f..eb1d91b986fd 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = { }; module_platform_driver(img_scb_i2c_driver); -MODULE_AUTHOR("James Hogan "); +MODULE_AUTHOR("James Hogan "); MODULE_DESCRIPTION("IMG host I2C driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index 25993d2e64bf..e879190b5d1d 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c @@ -289,7 +289,7 @@ static const struct i2c_algorithm kempld_i2c_algorithm = { .functionality = kempld_i2c_func, }; -static struct i2c_adapter kempld_i2c_adapter = { +static const struct i2c_adapter kempld_i2c_adapter = { .owner = THIS_MODULE, .name = "i2c-kempld", .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 9b1fef455a89..59167c018ae7 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -457,8 +457,7 @@ static int i2c_lpc2k_remove(struct platform_device *dev) #ifdef CONFIG_PM static int i2c_lpc2k_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct lpc2k_i2c *i2c = platform_get_drvdata(pdev); + struct lpc2k_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); @@ -467,8 +466,7 @@ static int i2c_lpc2k_suspend(struct device *dev) static int i2c_lpc2k_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct lpc2k_i2c *i2c = platform_get_drvdata(pdev); + struct lpc2k_i2c *i2c = dev_get_drvdata(dev); clk_enable(i2c->clk); i2c_lpc2k_reset(i2c); diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index d271e6a0954c..4c28fa28ce76 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -433,7 +433,7 @@ static const struct i2c_algorithm mlxcpld_i2c_algo = { .functionality = mlxcpld_i2c_func }; -static struct i2c_adapter_quirks mlxcpld_i2c_quirks = { +static const struct i2c_adapter_quirks mlxcpld_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_read_len = MLXCPLD_I2C_DATA_REG_SZ - MLXCPLD_I2C_MAX_ADDR_LEN, .max_write_len = MLXCPLD_I2C_DATA_REG_SZ, diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 45d61714c81b..09d288ce0ddb 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -50,7 +50,6 @@ #define I2C_FS_START_CON 0x1800 #define I2C_TIME_CLR_VALUE 0x0000 #define I2C_TIME_DEFAULT_VALUE 0x0003 -#define I2C_FS_TIME_INIT_VALUE 0x1303 #define I2C_WRRD_TRANAC_VALUE 0x0002 #define I2C_RD_TRANAC_VALUE 0x0001 @@ -154,6 +153,7 @@ struct mtk_i2c { bool use_push_pull; /* IO config push-pull mode */ u16 irq_stat; /* interrupt status */ + unsigned int clk_src_div; unsigned int speed_hz; /* The speed in transfer */ enum mtk_trans_op op; u16 timing_reg; @@ -172,6 +172,10 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = { .max_comb_2nd_msg_len = 31, }; +static const struct i2c_adapter_quirks mt7622_i2c_quirks = { + .max_num_msgs = 255, +}; + static const struct mtk_i2c_compatible mt6577_compat = { .quirks = &mt6577_i2c_quirks, .pmic_i2c = 0, @@ -190,6 +194,15 @@ static const struct mtk_i2c_compatible mt6589_compat = { .support_33bits = 0, }; +static const struct mtk_i2c_compatible mt7622_compat = { + .quirks = &mt7622_i2c_quirks, + .pmic_i2c = 0, + .dcm = 1, + .auto_restart = 1, + .aux_len_reg = 1, + .support_33bits = 0, +}; + static const struct mtk_i2c_compatible mt8173_compat = { .pmic_i2c = 0, .dcm = 1, @@ -201,6 +214,7 @@ static const struct mtk_i2c_compatible mt8173_compat = { static const struct of_device_id mtk_i2c_of_match[] = { { .compatible = "mediatek,mt6577-i2c", .data = &mt6577_compat }, { .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat }, + { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat }, { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat }, {} }; @@ -285,23 +299,20 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) * less than or equal to i2c->speed_hz. The calculation try to get * sample_cnt and step_cn */ -static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk, - unsigned int clock_div) +static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, + unsigned int target_speed, + unsigned int *timing_step_cnt, + unsigned int *timing_sample_cnt) { - unsigned int clk_src; unsigned int step_cnt; unsigned int sample_cnt; unsigned int max_step_cnt; - unsigned int target_speed; unsigned int base_sample_cnt = MAX_SAMPLE_CNT_DIV; unsigned int base_step_cnt; unsigned int opt_div; unsigned int best_mul; unsigned int cnt_mul; - clk_src = parent_clk / clock_div; - target_speed = i2c->speed_hz; - if (target_speed > MAX_HS_MODE_SPEED) target_speed = MAX_HS_MODE_SPEED; @@ -347,16 +358,48 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk, return -EINVAL; } - step_cnt--; - sample_cnt--; + *timing_step_cnt = step_cnt - 1; + *timing_sample_cnt = sample_cnt - 1; + + return 0; +} + +static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) +{ + unsigned int clk_src; + unsigned int step_cnt; + unsigned int sample_cnt; + unsigned int target_speed; + int ret; + + clk_src = parent_clk / i2c->clk_src_div; + target_speed = i2c->speed_hz; if (target_speed > MAX_FS_MODE_SPEED) { + /* Set master code speed register */ + ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED, + &step_cnt, &sample_cnt); + if (ret < 0) + return ret; + + i2c->timing_reg = (sample_cnt << 8) | step_cnt; + /* Set the high speed mode register */ - i2c->timing_reg = I2C_FS_TIME_INIT_VALUE; + ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, + &step_cnt, &sample_cnt); + if (ret < 0) + return ret; + i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE | (sample_cnt << 12) | (step_cnt << 8); } else { - i2c->timing_reg = (sample_cnt << 8) | (step_cnt << 0); + ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, + &step_cnt, &sample_cnt); + if (ret < 0) + return ret; + + i2c->timing_reg = (sample_cnt << 8) | step_cnt; + /* Disable the high speed transaction */ i2c->high_speed_reg = I2C_TIME_CLR_VALUE; } @@ -647,8 +690,7 @@ static const struct i2c_algorithm mtk_i2c_algorithm = { .functionality = mtk_i2c_functionality, }; -static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c, - unsigned int *clk_src_div) +static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c) { int ret; @@ -656,11 +698,11 @@ static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c, if (ret < 0) i2c->speed_hz = I2C_DEFAULT_SPEED; - ret = of_property_read_u32(np, "clock-div", clk_src_div); + ret = of_property_read_u32(np, "clock-div", &i2c->clk_src_div); if (ret < 0) return ret; - if (*clk_src_div == 0) + if (i2c->clk_src_div == 0) return -EINVAL; i2c->have_pmic = of_property_read_bool(np, "mediatek,have-pmic"); @@ -676,7 +718,6 @@ static int mtk_i2c_probe(struct platform_device *pdev) int ret = 0; struct mtk_i2c *i2c; struct clk *clk; - unsigned int clk_src_div; struct resource *res; int irq; @@ -684,7 +725,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; - ret = mtk_i2c_parse_dt(pdev->dev.of_node, i2c, &clk_src_div); + ret = mtk_i2c_parse_dt(pdev->dev.of_node, i2c); if (ret) return -EINVAL; @@ -745,7 +786,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) strlcpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name)); - ret = mtk_i2c_set_speed(i2c, clk_get_rate(clk), clk_src_div); + ret = mtk_i2c_set_speed(i2c, clk_get_rate(clk)); if (ret) { dev_err(&pdev->dev, "Failed to set the speed.\n"); return -EINVAL; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 5c4db65c5019..a832c45276a4 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -820,7 +820,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, goto out; } - drv_data->rstc = devm_reset_control_get_optional(dev, NULL); + drv_data->rstc = devm_reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(drv_data->rstc)) { rc = PTR_ERR(drv_data->rstc); goto out; @@ -975,8 +975,7 @@ mv64xxx_i2c_remove(struct platform_device *dev) #ifdef CONFIG_PM static int mv64xxx_i2c_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pdev); + struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev); mv64xxx_i2c_hw_init(drv_data); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index da6609d62848..49c7c0c91486 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1088,7 +1088,7 @@ static struct i2c_vendor_data vendor_db8500 = { .fifodepth = 32, /* Guessed from TFTR/RFTR = 15 */ }; -static struct amba_id nmk_i2c_ids[] = { +static const struct amba_id nmk_i2c_ids[] = { { .id = 0x00180024, .mask = 0x00ffffff, diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 34f1889a4073..8c42ca7107b2 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -276,7 +276,7 @@ static const struct i2c_algorithm ocores_algorithm = { .functionality = ocores_func, }; -static struct i2c_adapter ocores_adapter = { +static const struct i2c_adapter ocores_adapter = { .owner = THIS_MODULE, .name = "i2c-ocores", .class = I2C_CLASS_DEPRECATED, diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c index 917524ce6890..64bda83e65ac 100644 --- a/drivers/i2c/busses/i2c-octeon-platdrv.c +++ b/drivers/i2c/busses/i2c-octeon-platdrv.c @@ -126,7 +126,7 @@ static const struct i2c_algorithm octeon_i2c_algo = { .functionality = octeon_i2c_functionality, }; -static struct i2c_adapter octeon_i2c_ops = { +static const struct i2c_adapter octeon_i2c_ops = { .owner = THIS_MODULE, .name = "OCTEON adapter", .algo = &octeon_i2c_algo, diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index 11e2a1fc10e9..0aabb7eca0c5 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -204,7 +204,7 @@ static const struct i2c_algorithm i2c_opal_algo = { * For two messages, we basically support simple smbus transactions of a * write-then-anything. */ -static struct i2c_adapter_quirks i2c_opal_quirks = { +static const struct i2c_adapter_quirks i2c_opal_quirks = { .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR, .max_comb_1st_msg_len = 4, }; diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 217c78711d65..2aa0e83174c5 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -577,7 +577,7 @@ static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } -static struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = { +static const struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_write_len = MSP_MAX_BYTES_PER_RW, .max_read_len = MSP_MAX_BYTES_PER_RW, @@ -587,7 +587,7 @@ static struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = { /* -- Initialization -- */ -static struct i2c_algorithm pmcmsptwi_algo = { +static const struct i2c_algorithm pmcmsptwi_algo = { .master_xfer = pmcmsptwi_master_xfer, .functionality = pmcmsptwi_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index fd5f9d2bf6d9..42d6b3a226f8 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -590,7 +590,7 @@ static u32 i2c_pnx_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm pnx_algorithm = { +static const struct i2c_algorithm pnx_algorithm = { .master_xfer = i2c_pnx_xfer, .functionality = i2c_pnx_func, }; diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index b0d9dee14a7e..f2a2067525ef 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -197,7 +197,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = { .functionality = i2c_powermac_func, }; -static struct i2c_adapter_quirks i2c_powermac_quirks = { +static const struct i2c_adapter_quirks i2c_powermac_quirks = { .max_num_msgs = 1, }; @@ -234,7 +234,7 @@ static u32 i2c_powermac_get_addr(struct i2c_adapter *adap, else if (!strcmp(node->name, "deq")) return 0x34; - dev_warn(&adap->dev, "No i2c address for %s\n", node->full_name); + dev_warn(&adap->dev, "No i2c address for %pOF\n", node); return 0xffffffff; } @@ -315,8 +315,7 @@ static bool i2c_powermac_get_type(struct i2c_adapter *adap, } } - dev_err(&adap->dev, "i2c-powermac: modalias failure" - " on %s\n", node->full_name); + dev_err(&adap->dev, "i2c-powermac: modalias failure on %pOF\n", node); return false; } @@ -348,8 +347,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, if (!pmac_i2c_match_adapter(node, adap)) continue; - dev_dbg(&adap->dev, "i2c-powermac: register %s\n", - node->full_name); + dev_dbg(&adap->dev, "i2c-powermac: register %pOF\n", node); /* * Keep track of some device existence to handle @@ -372,7 +370,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, newdev = i2c_new_device(adap, &info); if (!newdev) { dev_err(&adap->dev, "i2c-powermac: Failure to register" - " %s\n", node->full_name); + " %pOF\n", node); of_node_put(node); /* We do not dispose of the interrupt mapping on * purpose. It's not necessary (interrupt cannot be diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c index 0c8b1571886d..287088b8c4c8 100644 --- a/drivers/i2c/busses/i2c-puv3.c +++ b/drivers/i2c/busses/i2c-puv3.c @@ -175,7 +175,7 @@ static u32 puv3_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm puv3_i2c_algorithm = { +static const struct i2c_algorithm puv3_i2c_algorithm = { .master_xfer = puv3_i2c_xfer, .functionality = puv3_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 6cf333ecc8b8..600d264e080c 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1346,8 +1346,7 @@ static int i2c_pxa_remove(struct platform_device *dev) #ifdef CONFIG_PM static int i2c_pxa_suspend_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct pxa_i2c *i2c = platform_get_drvdata(pdev); + struct pxa_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); @@ -1356,8 +1355,7 @@ static int i2c_pxa_suspend_noirq(struct device *dev) static int i2c_pxa_resume_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct pxa_i2c *i2c = platform_get_drvdata(pdev); + struct pxa_i2c *i2c = dev_get_drvdata(dev); clk_enable(i2c->clk); i2c_pxa_reset(i2c); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 1902d8ac9753..08f8e0107642 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1396,7 +1396,7 @@ static const struct i2c_algorithm qup_i2c_algo_v2 = { * the end of the read, the length of the read is specified as one byte * which limits the possible read to 256 (QUP_READ_LIMIT) bytes. */ -static struct i2c_adapter_quirks qup_i2c_quirks = { +static const struct i2c_adapter_quirks qup_i2c_quirks = { .max_read_len = QUP_READ_LIMIT, }; diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 93c1a54981df..15d764afec3b 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -625,9 +625,8 @@ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev, chan = dma_request_chan(dev, chan_name); if (IS_ERR(chan)) { - ret = PTR_ERR(chan); - dev_dbg(dev, "request_channel failed for %s (%d)\n", - chan_name, ret); + dev_dbg(dev, "request_channel failed for %s (%ld)\n", + chan_name, PTR_ERR(chan)); return chan; } diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index df220666d627..fe234578380a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1131,6 +1131,11 @@ static const struct i2c_algorithm rk3x_i2c_algorithm = { .functionality = rk3x_i2c_func, }; +static const struct rk3x_i2c_soc_data rv1108_soc_data = { + .grf_offset = -1, + .calc_timings = rk3x_i2c_v1_calc_timings, +}; + static const struct rk3x_i2c_soc_data rk3066_soc_data = { .grf_offset = 0x154, .calc_timings = rk3x_i2c_v0_calc_timings, @@ -1157,6 +1162,10 @@ static const struct rk3x_i2c_soc_data rk3399_soc_data = { }; static const struct of_device_id rk3x_i2c_match[] = { + { + .compatible = "rockchip,rv1108-i2c", + .data = (void *)&rv1108_soc_data + }, { .compatible = "rockchip,rk3066-i2c", .data = (void *)&rk3066_soc_data diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 499af26e736e..5d97510ee48b 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1246,8 +1246,7 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int s3c24xx_i2c_suspend_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + struct s3c24xx_i2c *i2c = dev_get_drvdata(dev); i2c->suspended = 1; @@ -1259,8 +1258,7 @@ static int s3c24xx_i2c_suspend_noirq(struct device *dev) static int s3c24xx_i2c_resume_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + struct s3c24xx_i2c *i2c = dev_get_drvdata(dev); int ret; if (!IS_ERR(i2c->sysreg)) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 2e097d97d258..6f2aaeb7c4fa 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -561,8 +561,8 @@ static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev, chan = dma_request_slave_channel_reason(dev, chan_name); if (IS_ERR(chan)) { - ret = PTR_ERR(chan); - dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret); + dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name, + PTR_ERR(chan)); return chan; } diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 95e81d0f72b4..2fd8b6d00391 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -421,8 +421,7 @@ static int i2c_sirfsoc_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int i2c_sirfsoc_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct i2c_adapter *adapter = platform_get_drvdata(pdev); + struct i2c_adapter *adapter = dev_get_drvdata(dev); struct sirfsoc_i2c *siic = adapter->algo_data; clk_enable(siic->clk); @@ -434,8 +433,7 @@ static int i2c_sirfsoc_suspend(struct device *dev) static int i2c_sirfsoc_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct i2c_adapter *adapter = platform_get_drvdata(pdev); + struct i2c_adapter *adapter = dev_get_drvdata(dev); struct sirfsoc_i2c *siic = adapter->algo_data; clk_enable(siic->clk); diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c new file mode 100644 index 000000000000..25fcc3c1e32b --- /dev/null +++ b/drivers/i2c/busses/i2c-sprd.c @@ -0,0 +1,647 @@ +/* + * Copyright (C) 2017 Spreadtrum Communications Inc. + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define I2C_CTL 0x00 +#define I2C_ADDR_CFG 0x04 +#define I2C_COUNT 0x08 +#define I2C_RX 0x0c +#define I2C_TX 0x10 +#define I2C_STATUS 0x14 +#define I2C_HSMODE_CFG 0x18 +#define I2C_VERSION 0x1c +#define ADDR_DVD0 0x20 +#define ADDR_DVD1 0x24 +#define ADDR_STA0_DVD 0x28 +#define ADDR_RST 0x2c + +/* I2C_CTL */ +#define STP_EN BIT(20) +#define FIFO_AF_LVL_MASK GENMASK(19, 16) +#define FIFO_AF_LVL 16 +#define FIFO_AE_LVL_MASK GENMASK(15, 12) +#define FIFO_AE_LVL 12 +#define I2C_DMA_EN BIT(11) +#define FULL_INTEN BIT(10) +#define EMPTY_INTEN BIT(9) +#define I2C_DVD_OPT BIT(8) +#define I2C_OUT_OPT BIT(7) +#define I2C_TRIM_OPT BIT(6) +#define I2C_HS_MODE BIT(4) +#define I2C_MODE BIT(3) +#define I2C_EN BIT(2) +#define I2C_INT_EN BIT(1) +#define I2C_START BIT(0) + +/* I2C_STATUS */ +#define SDA_IN BIT(21) +#define SCL_IN BIT(20) +#define FIFO_FULL BIT(4) +#define FIFO_EMPTY BIT(3) +#define I2C_INT BIT(2) +#define I2C_RX_ACK BIT(1) +#define I2C_BUSY BIT(0) + +/* ADDR_RST */ +#define I2C_RST BIT(0) + +#define I2C_FIFO_DEEP 12 +#define I2C_FIFO_FULL_THLD 15 +#define I2C_FIFO_EMPTY_THLD 4 +#define I2C_DATA_STEP 8 +#define I2C_ADDR_DVD0_CALC(high, low) \ + ((((high) & GENMASK(15, 0)) << 16) | ((low) & GENMASK(15, 0))) +#define I2C_ADDR_DVD1_CALC(high, low) \ + (((high) & GENMASK(31, 16)) | (((low) & GENMASK(31, 16)) >> 16)) + +/* timeout (ms) for pm runtime autosuspend */ +#define SPRD_I2C_PM_TIMEOUT 1000 + +/* SPRD i2c data structure */ +struct sprd_i2c { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct i2c_msg *msg; + struct clk *clk; + u32 src_clk; + u32 bus_freq; + struct completion complete; + u8 *buf; + u32 count; + int irq; + int err; +}; + +static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) +{ + writel(count, i2c_dev->base + I2C_COUNT); +} + +static void sprd_i2c_send_stop(struct sprd_i2c *i2c_dev, int stop) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + if (stop) + writel(tmp & ~STP_EN, i2c_dev->base + I2C_CTL); + else + writel(tmp | STP_EN, i2c_dev->base + I2C_CTL); +} + +static void sprd_i2c_clear_start(struct sprd_i2c *i2c_dev) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + writel(tmp & ~I2C_START, i2c_dev->base + I2C_CTL); +} + +static void sprd_i2c_clear_ack(struct sprd_i2c *i2c_dev) +{ + u32 tmp = readl(i2c_dev->base + I2C_STATUS); + + writel(tmp & ~I2C_RX_ACK, i2c_dev->base + I2C_STATUS); +} + +static void sprd_i2c_clear_irq(struct sprd_i2c *i2c_dev) +{ + u32 tmp = readl(i2c_dev->base + I2C_STATUS); + + writel(tmp & ~I2C_INT, i2c_dev->base + I2C_STATUS); +} + +static void sprd_i2c_reset_fifo(struct sprd_i2c *i2c_dev) +{ + writel(I2C_RST, i2c_dev->base + ADDR_RST); +} + +static void sprd_i2c_set_devaddr(struct sprd_i2c *i2c_dev, struct i2c_msg *m) +{ + writel(m->addr << 1, i2c_dev->base + I2C_ADDR_CFG); +} + +static void sprd_i2c_write_bytes(struct sprd_i2c *i2c_dev, u8 *buf, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + writeb(buf[i], i2c_dev->base + I2C_TX); +} + +static void sprd_i2c_read_bytes(struct sprd_i2c *i2c_dev, u8 *buf, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + buf[i] = readb(i2c_dev->base + I2C_RX); +} + +static void sprd_i2c_set_full_thld(struct sprd_i2c *i2c_dev, u32 full_thld) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + tmp &= ~FIFO_AF_LVL_MASK; + tmp |= full_thld << FIFO_AF_LVL; + writel(tmp, i2c_dev->base + I2C_CTL); +}; + +static void sprd_i2c_set_empty_thld(struct sprd_i2c *i2c_dev, u32 empty_thld) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + tmp &= ~FIFO_AE_LVL_MASK; + tmp |= empty_thld << FIFO_AE_LVL; + writel(tmp, i2c_dev->base + I2C_CTL); +}; + +static void sprd_i2c_set_fifo_full_int(struct sprd_i2c *i2c_dev, int enable) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + if (enable) + tmp |= FULL_INTEN; + else + tmp &= ~FULL_INTEN; + + writel(tmp, i2c_dev->base + I2C_CTL); +}; + +static void sprd_i2c_set_fifo_empty_int(struct sprd_i2c *i2c_dev, int enable) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + if (enable) + tmp |= EMPTY_INTEN; + else + tmp &= ~EMPTY_INTEN; + + writel(tmp, i2c_dev->base + I2C_CTL); +}; + +static void sprd_i2c_opt_start(struct sprd_i2c *i2c_dev) +{ + u32 tmp = readl(i2c_dev->base + I2C_CTL); + + writel(tmp | I2C_START, i2c_dev->base + I2C_CTL); +} + +static void sprd_i2c_opt_mode(struct sprd_i2c *i2c_dev, int rw) +{ + u32 cmd = readl(i2c_dev->base + I2C_CTL) & ~I2C_MODE; + + writel(cmd | rw << 3, i2c_dev->base + I2C_CTL); +} + +static void sprd_i2c_data_transfer(struct sprd_i2c *i2c_dev) +{ + u32 i2c_count = i2c_dev->count; + u32 need_tran = i2c_count <= I2C_FIFO_DEEP ? i2c_count : I2C_FIFO_DEEP; + struct i2c_msg *msg = i2c_dev->msg; + + if (msg->flags & I2C_M_RD) { + sprd_i2c_read_bytes(i2c_dev, i2c_dev->buf, I2C_FIFO_FULL_THLD); + i2c_dev->count -= I2C_FIFO_FULL_THLD; + i2c_dev->buf += I2C_FIFO_FULL_THLD; + + /* + * If the read data count is larger than rx fifo full threshold, + * we should enable the rx fifo full interrupt to read data + * again. + */ + if (i2c_dev->count >= I2C_FIFO_FULL_THLD) + sprd_i2c_set_fifo_full_int(i2c_dev, 1); + } else { + sprd_i2c_write_bytes(i2c_dev, i2c_dev->buf, need_tran); + i2c_dev->buf += need_tran; + i2c_dev->count -= need_tran; + + /* + * If the write data count is arger than tx fifo depth which + * means we can not write all data in one time, then we should + * enable the tx fifo empty interrupt to write again. + */ + if (i2c_count > I2C_FIFO_DEEP) + sprd_i2c_set_fifo_empty_int(i2c_dev, 1); + } +} + +static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, bool is_last_msg) +{ + struct sprd_i2c *i2c_dev = i2c_adap->algo_data; + + i2c_dev->msg = msg; + i2c_dev->buf = msg->buf; + i2c_dev->count = msg->len; + + reinit_completion(&i2c_dev->complete); + sprd_i2c_reset_fifo(i2c_dev); + sprd_i2c_set_devaddr(i2c_dev, msg); + sprd_i2c_set_count(i2c_dev, msg->len); + + if (msg->flags & I2C_M_RD) { + sprd_i2c_opt_mode(i2c_dev, 1); + sprd_i2c_send_stop(i2c_dev, 1); + } else { + sprd_i2c_opt_mode(i2c_dev, 0); + sprd_i2c_send_stop(i2c_dev, !!is_last_msg); + } + + /* + * We should enable rx fifo full interrupt to get data when receiving + * full data. + */ + if (msg->flags & I2C_M_RD) + sprd_i2c_set_fifo_full_int(i2c_dev, 1); + else + sprd_i2c_data_transfer(i2c_dev); + + sprd_i2c_opt_start(i2c_dev); + + wait_for_completion(&i2c_dev->complete); + + return i2c_dev->err; +} + +static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct sprd_i2c *i2c_dev = i2c_adap->algo_data; + int im, ret; + + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + return ret; + + for (im = 0; im < num - 1; im++) { + ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im], 0); + if (ret) + goto err_msg; + } + + ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im++], 1); + +err_msg: + pm_runtime_mark_last_busy(i2c_dev->dev); + pm_runtime_put_autosuspend(i2c_dev->dev); + + return ret < 0 ? ret : im; +} + +static u32 sprd_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm sprd_i2c_algo = { + .master_xfer = sprd_i2c_master_xfer, + .functionality = sprd_i2c_func, +}; + +static void sprd_i2c_set_clk(struct sprd_i2c *i2c_dev, u32 freq) +{ + u32 apb_clk = i2c_dev->src_clk; + /* + * From I2C databook, the prescale calculation formula: + * prescale = freq_i2c / (4 * freq_scl) - 1; + */ + u32 i2c_dvd = apb_clk / (4 * freq) - 1; + /* + * From I2C databook, the high period of SCL clock is recommended as + * 40% (2/5), and the low period of SCL clock is recommended as 60% + * (3/5), then the formula should be: + * high = (prescale * 2 * 2) / 5 + * low = (prescale * 2 * 3) / 5 + */ + u32 high = ((i2c_dvd << 1) * 2) / 5; + u32 low = ((i2c_dvd << 1) * 3) / 5; + u32 div0 = I2C_ADDR_DVD0_CALC(high, low); + u32 div1 = I2C_ADDR_DVD1_CALC(high, low); + + writel(div0, i2c_dev->base + ADDR_DVD0); + writel(div1, i2c_dev->base + ADDR_DVD1); + + /* Start hold timing = hold time(us) * source clock */ + if (freq == 400000) + writel((6 * apb_clk) / 10000000, i2c_dev->base + ADDR_STA0_DVD); + else if (freq == 100000) + writel((4 * apb_clk) / 1000000, i2c_dev->base + ADDR_STA0_DVD); +} + +static void sprd_i2c_enable(struct sprd_i2c *i2c_dev) +{ + u32 tmp = I2C_DVD_OPT; + + writel(tmp, i2c_dev->base + I2C_CTL); + + sprd_i2c_set_full_thld(i2c_dev, I2C_FIFO_FULL_THLD); + sprd_i2c_set_empty_thld(i2c_dev, I2C_FIFO_EMPTY_THLD); + + sprd_i2c_set_clk(i2c_dev, i2c_dev->bus_freq); + sprd_i2c_reset_fifo(i2c_dev); + sprd_i2c_clear_irq(i2c_dev); + + tmp = readl(i2c_dev->base + I2C_CTL); + writel(tmp | I2C_EN | I2C_INT_EN, i2c_dev->base + I2C_CTL); +} + +static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) +{ + struct sprd_i2c *i2c_dev = dev_id; + struct i2c_msg *msg = i2c_dev->msg; + bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); + u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); + u32 i2c_tran; + + if (msg->flags & I2C_M_RD) + i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; + else + i2c_tran = i2c_count; + + /* + * If we got one ACK from slave when writing data, and we did not + * finish this transmission (i2c_tran is not zero), then we should + * continue to write data. + * + * For reading data, ack is always true, if i2c_tran is not 0 which + * means we still need to contine to read data from slave. + */ + if (i2c_tran && ack) { + sprd_i2c_data_transfer(i2c_dev); + return IRQ_HANDLED; + } + + i2c_dev->err = 0; + + /* + * If we did not get one ACK from slave when writing data, we should + * return -EIO to notify users. + */ + if (!ack) + i2c_dev->err = -EIO; + else if (msg->flags & I2C_M_RD && i2c_dev->count) + sprd_i2c_read_bytes(i2c_dev, i2c_dev->buf, i2c_dev->count); + + /* Transmission is done and clear ack and start operation */ + sprd_i2c_clear_ack(i2c_dev); + sprd_i2c_clear_start(i2c_dev); + complete(&i2c_dev->complete); + + return IRQ_HANDLED; +} + +static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) +{ + struct sprd_i2c *i2c_dev = dev_id; + struct i2c_msg *msg = i2c_dev->msg; + u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); + bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); + u32 i2c_tran; + + if (msg->flags & I2C_M_RD) + i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; + else + i2c_tran = i2c_count; + + /* + * If we did not get one ACK from slave when writing data, then we + * should finish this transmission since we got some errors. + * + * When writing data, if i2c_tran == 0 which means we have writen + * done all data, then we can finish this transmission. + * + * When reading data, if conut < rx fifo full threshold, which + * means we can read all data in one time, then we can finish this + * transmission too. + */ + if (!i2c_tran || !ack) { + sprd_i2c_clear_start(i2c_dev); + sprd_i2c_clear_irq(i2c_dev); + } + + sprd_i2c_set_fifo_empty_int(i2c_dev, 0); + sprd_i2c_set_fifo_full_int(i2c_dev, 0); + + return IRQ_WAKE_THREAD; +} + +static int sprd_i2c_clk_init(struct sprd_i2c *i2c_dev) +{ + struct clk *clk_i2c, *clk_parent; + + clk_i2c = devm_clk_get(i2c_dev->dev, "i2c"); + if (IS_ERR(clk_i2c)) { + dev_warn(i2c_dev->dev, "i2c%d can't get the i2c clock\n", + i2c_dev->adap.nr); + clk_i2c = NULL; + } + + clk_parent = devm_clk_get(i2c_dev->dev, "source"); + if (IS_ERR(clk_parent)) { + dev_warn(i2c_dev->dev, "i2c%d can't get the source clock\n", + i2c_dev->adap.nr); + clk_parent = NULL; + } + + if (clk_set_parent(clk_i2c, clk_parent)) + i2c_dev->src_clk = clk_get_rate(clk_i2c); + else + i2c_dev->src_clk = 26000000; + + dev_dbg(i2c_dev->dev, "i2c%d set source clock is %d\n", + i2c_dev->adap.nr, i2c_dev->src_clk); + + i2c_dev->clk = devm_clk_get(i2c_dev->dev, "enable"); + if (IS_ERR(i2c_dev->clk)) { + dev_warn(i2c_dev->dev, "i2c%d can't get the enable clock\n", + i2c_dev->adap.nr); + i2c_dev->clk = NULL; + } + + return 0; +} + +static int sprd_i2c_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sprd_i2c *i2c_dev; + struct resource *res; + u32 prop; + int ret; + + pdev->id = of_alias_get_id(dev->of_node, "i2c"); + + i2c_dev = devm_kzalloc(dev, sizeof(struct sprd_i2c), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + i2c_dev->irq = platform_get_irq(pdev, 0); + if (i2c_dev->irq < 0) { + dev_err(&pdev->dev, "failed to get irq resource\n"); + return i2c_dev->irq; + } + + i2c_set_adapdata(&i2c_dev->adap, i2c_dev); + init_completion(&i2c_dev->complete); + snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name), + "%s", "sprd-i2c"); + + i2c_dev->bus_freq = 100000; + i2c_dev->adap.owner = THIS_MODULE; + i2c_dev->dev = dev; + i2c_dev->adap.retries = 3; + i2c_dev->adap.algo = &sprd_i2c_algo; + i2c_dev->adap.algo_data = i2c_dev; + i2c_dev->adap.dev.parent = dev; + i2c_dev->adap.nr = pdev->id; + i2c_dev->adap.dev.of_node = dev->of_node; + + if (!of_property_read_u32(dev->of_node, "clock-frequency", &prop)) + i2c_dev->bus_freq = prop; + + /* We only support 100k and 400k now, otherwise will return error. */ + if (i2c_dev->bus_freq != 100000 && i2c_dev->bus_freq != 400000) + return -EINVAL; + + sprd_i2c_clk_init(i2c_dev); + platform_set_drvdata(pdev, i2c_dev); + + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) + return ret; + + sprd_i2c_enable(i2c_dev); + + pm_runtime_set_autosuspend_delay(i2c_dev->dev, SPRD_I2C_PM_TIMEOUT); + pm_runtime_use_autosuspend(i2c_dev->dev); + pm_runtime_set_active(i2c_dev->dev); + pm_runtime_enable(i2c_dev->dev); + + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + goto err_rpm_put; + + ret = devm_request_threaded_irq(dev, i2c_dev->irq, + sprd_i2c_isr, sprd_i2c_isr_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "failed to request irq %d\n", i2c_dev->irq); + goto err_rpm_put; + } + + ret = i2c_add_numbered_adapter(&i2c_dev->adap); + if (ret) { + dev_err(&pdev->dev, "add adapter failed\n"); + goto err_rpm_put; + } + + pm_runtime_mark_last_busy(i2c_dev->dev); + pm_runtime_put_autosuspend(i2c_dev->dev); + return 0; + +err_rpm_put: + pm_runtime_put_noidle(i2c_dev->dev); + pm_runtime_disable(i2c_dev->dev); + clk_disable_unprepare(i2c_dev->clk); + return ret; +} + +static int sprd_i2c_remove(struct platform_device *pdev) +{ + struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); + int ret; + + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + return ret; + + i2c_del_adapter(&i2c_dev->adap); + clk_disable_unprepare(i2c_dev->clk); + + pm_runtime_put_noidle(i2c_dev->dev); + pm_runtime_disable(i2c_dev->dev); + + return 0; +} + +static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) +{ + return pm_runtime_force_suspend(pdev); +} + +static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) +{ + return pm_runtime_force_resume(pdev); +} + +static int __maybe_unused sprd_i2c_runtime_suspend(struct device *pdev) +{ + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + clk_disable_unprepare(i2c_dev->clk); + + return 0; +} + +static int __maybe_unused sprd_i2c_runtime_resume(struct device *pdev) +{ + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + int ret; + + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) + return ret; + + sprd_i2c_enable(i2c_dev); + + return 0; +} + +static const struct dev_pm_ops sprd_i2c_pm_ops = { + SET_RUNTIME_PM_OPS(sprd_i2c_runtime_suspend, + sprd_i2c_runtime_resume, NULL) + + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sprd_i2c_suspend_noirq, + sprd_i2c_resume_noirq) +}; + +static const struct of_device_id sprd_i2c_of_match[] = { + { .compatible = "sprd,sc9860-i2c", }, + {}, +}; + +static struct platform_driver sprd_i2c_driver = { + .probe = sprd_i2c_probe, + .remove = sprd_i2c_remove, + .driver = { + .name = "sprd-i2c", + .of_match_table = sprd_i2c_of_match, + .pm = &sprd_i2c_pm_ops, + }, +}; + +static int sprd_i2c_init(void) +{ + return platform_driver_register(&sprd_i2c_driver); +} +arch_initcall_sync(sprd_i2c_init); diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 1eb9fa82dcfd..9e62f893958a 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -745,8 +745,7 @@ static int st_i2c_xfer(struct i2c_adapter *i2c_adap, #ifdef CONFIG_PM_SLEEP static int st_i2c_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct st_i2c_dev *i2c_dev = dev_get_drvdata(dev); if (i2c_dev->busy) return -EBUSY; diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h new file mode 100644 index 000000000000..dab51761f8c5 --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32.h @@ -0,0 +1,20 @@ +/* + * i2c-stm32.h + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _I2C_STM32_H +#define _I2C_STM32_H + +enum stm32_i2c_speed { + STM32_I2C_SPEED_STANDARD, /* 100 kHz */ + STM32_I2C_SPEED_FAST, /* 400 kHz */ + STM32_I2C_SPEED_FAST_PLUS, /* 1 MHz */ + STM32_I2C_SPEED_END, +}; + +#endif /* _I2C_STM32_H */ diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index f9dd7e86b861..4ec108496f15 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -27,6 +27,8 @@ #include #include +#include "i2c-stm32.h" + /* STM32F4 I2C offset registers */ #define STM32F4_I2C_CR1 0x00 #define STM32F4_I2C_CR2 0x04 @@ -90,12 +92,6 @@ #define STM32F4_I2C_MAX_FREQ 46U #define HZ_TO_MHZ 1000000 -enum stm32f4_i2c_speed { - STM32F4_I2C_SPEED_STANDARD, /* 100 kHz */ - STM32F4_I2C_SPEED_FAST, /* 400 kHz */ - STM32F4_I2C_SPEED_END, -}; - /** * struct stm32f4_i2c_msg - client specific data * @addr: 8-bit slave addr, including r/w bit @@ -159,7 +155,7 @@ static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev) i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk); freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * To reach 100 kHz, the parent clk frequency should be between * a minimum value of 2 MHz and a maximum value of 46 MHz due @@ -216,7 +212,7 @@ static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev) * is not higher than 46 MHz . As a result trise is at most 4 bits wide * and so fits into the TRISE bits [5:0]. */ - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) trise = freq + 1; else trise = freq * 3 / 10 + 1; @@ -230,7 +226,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev) u32 val; u32 ccr = 0; - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * In standard mode: * t_scl_high = t_scl_low = CCR * I2C parent clk period @@ -751,7 +747,7 @@ static u32 stm32f4_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm stm32f4_i2c_algo = { +static const struct i2c_algorithm stm32f4_i2c_algo = { .master_xfer = stm32f4_i2c_xfer, .functionality = stm32f4_i2c_func, }; @@ -798,7 +794,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev) return ret; } - rst = devm_reset_control_get(&pdev->dev, NULL); + rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rst)) { dev_err(&pdev->dev, "Error: Missing controller reset\n"); ret = PTR_ERR(rst); @@ -808,10 +804,10 @@ static int stm32f4_i2c_probe(struct platform_device *pdev) udelay(2); reset_control_deassert(rst); - i2c_dev->speed = STM32F4_I2C_SPEED_STANDARD; + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!ret && clk_rate >= 400000) - i2c_dev->speed = STM32F4_I2C_SPEED_FAST; + i2c_dev->speed = STM32_I2C_SPEED_FAST; i2c_dev->dev = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c new file mode 100644 index 000000000000..d4a6e9c2e9aa --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -0,0 +1,969 @@ +/* + * Driver for STMicroelectronics STM32F7 I2C controller + * + * This I2C controller is described in the STM32F75xxx and STM32F74xxx Soc + * reference manual. + * Please see below a link to the documentation: + * http://www.st.com/resource/en/reference_manual/dm00124865.pdf + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga + * + * This driver is based on i2c-stm32f4.c + * + * License terms: GNU General Public License (GPL), version 2 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-stm32.h" + +/* STM32F7 I2C registers */ +#define STM32F7_I2C_CR1 0x00 +#define STM32F7_I2C_CR2 0x04 +#define STM32F7_I2C_TIMINGR 0x10 +#define STM32F7_I2C_ISR 0x18 +#define STM32F7_I2C_ICR 0x1C +#define STM32F7_I2C_RXDR 0x24 +#define STM32F7_I2C_TXDR 0x28 + +/* STM32F7 I2C control 1 */ +#define STM32F7_I2C_CR1_ANFOFF BIT(12) +#define STM32F7_I2C_CR1_ERRIE BIT(7) +#define STM32F7_I2C_CR1_TCIE BIT(6) +#define STM32F7_I2C_CR1_STOPIE BIT(5) +#define STM32F7_I2C_CR1_NACKIE BIT(4) +#define STM32F7_I2C_CR1_ADDRIE BIT(3) +#define STM32F7_I2C_CR1_RXIE BIT(2) +#define STM32F7_I2C_CR1_TXIE BIT(1) +#define STM32F7_I2C_CR1_PE BIT(0) +#define STM32F7_I2C_ALL_IRQ_MASK (STM32F7_I2C_CR1_ERRIE \ + | STM32F7_I2C_CR1_TCIE \ + | STM32F7_I2C_CR1_STOPIE \ + | STM32F7_I2C_CR1_NACKIE \ + | STM32F7_I2C_CR1_RXIE \ + | STM32F7_I2C_CR1_TXIE) + +/* STM32F7 I2C control 2 */ +#define STM32F7_I2C_CR2_RELOAD BIT(24) +#define STM32F7_I2C_CR2_NBYTES_MASK GENMASK(23, 16) +#define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16) +#define STM32F7_I2C_CR2_NACK BIT(15) +#define STM32F7_I2C_CR2_STOP BIT(14) +#define STM32F7_I2C_CR2_START BIT(13) +#define STM32F7_I2C_CR2_RD_WRN BIT(10) +#define STM32F7_I2C_CR2_SADD7_MASK GENMASK(7, 1) +#define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1) + +/* STM32F7 I2C Interrupt Status */ +#define STM32F7_I2C_ISR_BUSY BIT(15) +#define STM32F7_I2C_ISR_ARLO BIT(9) +#define STM32F7_I2C_ISR_BERR BIT(8) +#define STM32F7_I2C_ISR_TCR BIT(7) +#define STM32F7_I2C_ISR_TC BIT(6) +#define STM32F7_I2C_ISR_STOPF BIT(5) +#define STM32F7_I2C_ISR_NACKF BIT(4) +#define STM32F7_I2C_ISR_RXNE BIT(2) +#define STM32F7_I2C_ISR_TXIS BIT(1) + +/* STM32F7 I2C Interrupt Clear */ +#define STM32F7_I2C_ICR_ARLOCF BIT(9) +#define STM32F7_I2C_ICR_BERRCF BIT(8) +#define STM32F7_I2C_ICR_STOPCF BIT(5) +#define STM32F7_I2C_ICR_NACKCF BIT(4) + +/* STM32F7 I2C Timing */ +#define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28) +#define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20) +#define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16) +#define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8) +#define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff) + +#define STM32F7_I2C_MAX_LEN 0xff + +#define STM32F7_I2C_DNF_DEFAULT 0 +#define STM32F7_I2C_DNF_MAX 16 + +#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */ + +#define STM32F7_I2C_RISE_TIME_DEFAULT 25 /* ns */ +#define STM32F7_I2C_FALL_TIME_DEFAULT 10 /* ns */ + +#define STM32F7_PRESC_MAX BIT(4) +#define STM32F7_SCLDEL_MAX BIT(4) +#define STM32F7_SDADEL_MAX BIT(4) +#define STM32F7_SCLH_MAX BIT(8) +#define STM32F7_SCLL_MAX BIT(8) + +/** + * struct stm32f7_i2c_spec - private i2c specification timing + * @rate: I2C bus speed (Hz) + * @rate_min: 80% of I2C bus speed (Hz) + * @rate_max: 100% of I2C bus speed (Hz) + * @fall_max: Max fall time of both SDA and SCL signals (ns) + * @rise_max: Max rise time of both SDA and SCL signals (ns) + * @hddat_min: Min data hold time (ns) + * @vddat_max: Max data valid time (ns) + * @sudat_min: Min data setup time (ns) + * @l_min: Min low period of the SCL clock (ns) + * @h_min: Min high period of the SCL clock (ns) + */ +struct stm32f7_i2c_spec { + u32 rate; + u32 rate_min; + u32 rate_max; + u32 fall_max; + u32 rise_max; + u32 hddat_min; + u32 vddat_max; + u32 sudat_min; + u32 l_min; + u32 h_min; +}; + +/** + * struct stm32f7_i2c_setup - private I2C timing setup parameters + * @speed: I2C speed mode (standard, Fast Plus) + * @speed_freq: I2C speed frequency (Hz) + * @clock_src: I2C clock source frequency (Hz) + * @rise_time: Rise time (ns) + * @fall_time: Fall time (ns) + * @dnf: Digital filter coefficient (0-16) + * @analog_filter: Analog filter delay (On/Off) + */ +struct stm32f7_i2c_setup { + enum stm32_i2c_speed speed; + u32 speed_freq; + u32 clock_src; + u32 rise_time; + u32 fall_time; + u8 dnf; + bool analog_filter; +}; + +/** + * struct stm32f7_i2c_timings - private I2C output parameters + * @prec: Prescaler value + * @scldel: Data setup time + * @sdadel: Data hold time + * @sclh: SCL high period (master mode) + * @sclh: SCL low period (master mode) + */ +struct stm32f7_i2c_timings { + struct list_head node; + u8 presc; + u8 scldel; + u8 sdadel; + u8 sclh; + u8 scll; +}; + +/** + * struct stm32f7_i2c_msg - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct stm32f7_i2c_msg { + u8 addr; + u32 count; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct stm32f7_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @clk: hw i2c clock + * @speed: I2C clock frequency of the controller. Standard, Fast or Fast+ + * @msg: Pointer to data to be written + * @msg_num: number of I2C messages to be executed + * @msg_id: message identifiant + * @f7_msg: customized i2c msg for driver usage + * @setup: I2C timing input setup + * @timing: I2C computed timings + */ +struct stm32f7_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + struct clk *clk; + int speed; + struct i2c_msg *msg; + unsigned int msg_num; + unsigned int msg_id; + struct stm32f7_i2c_msg f7_msg; + struct stm32f7_i2c_setup setup; + struct stm32f7_i2c_timings timing; +}; + +/** + * All these values are coming from I2C Specification, Version 6.0, 4th of + * April 2014. + * + * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast, + * and Fast-mode Plus I2C-bus devices + */ +static struct stm32f7_i2c_spec i2c_specs[] = { + [STM32_I2C_SPEED_STANDARD] = { + .rate = 100000, + .rate_min = 80000, + .rate_max = 100000, + .fall_max = 300, + .rise_max = 1000, + .hddat_min = 0, + .vddat_max = 3450, + .sudat_min = 250, + .l_min = 4700, + .h_min = 4000, + }, + [STM32_I2C_SPEED_FAST] = { + .rate = 400000, + .rate_min = 320000, + .rate_max = 400000, + .fall_max = 300, + .rise_max = 300, + .hddat_min = 0, + .vddat_max = 900, + .sudat_min = 100, + .l_min = 1300, + .h_min = 600, + }, + [STM32_I2C_SPEED_FAST_PLUS] = { + .rate = 1000000, + .rate_min = 800000, + .rate_max = 1000000, + .fall_max = 100, + .rise_max = 120, + .hddat_min = 0, + .vddat_max = 450, + .sudat_min = 50, + .l_min = 500, + .h_min = 260, + }, +}; + +static const struct stm32f7_i2c_setup stm32f7_setup = { + .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, + .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, + .dnf = STM32F7_I2C_DNF_DEFAULT, + .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE, +}; + +static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void stm32f7_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup, + struct stm32f7_i2c_timings *output) +{ + u32 p_prev = STM32F7_PRESC_MAX; + u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->clock_src); + u32 i2cbus = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->speed_freq); + u32 clk_error_prev = i2cbus; + u32 tsync; + u32 af_delay_min, af_delay_max; + u32 dnf_delay; + u32 clk_min, clk_max; + int sdadel_min, sdadel_max; + int scldel_min; + struct stm32f7_i2c_timings *v, *_v, *s; + struct list_head solutions; + u16 p, l, a, h; + int ret = 0; + + if (setup->speed >= STM32_I2C_SPEED_END) { + dev_err(i2c_dev->dev, "speed out of bound {%d/%d}\n", + setup->speed, STM32_I2C_SPEED_END - 1); + return -EINVAL; + } + + if ((setup->rise_time > i2c_specs[setup->speed].rise_max) || + (setup->fall_time > i2c_specs[setup->speed].fall_max)) { + dev_err(i2c_dev->dev, + "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n", + setup->rise_time, i2c_specs[setup->speed].rise_max, + setup->fall_time, i2c_specs[setup->speed].fall_max); + return -EINVAL; + } + + if (setup->dnf > STM32F7_I2C_DNF_MAX) { + dev_err(i2c_dev->dev, + "DNF out of bound %d/%d\n", + setup->dnf, STM32F7_I2C_DNF_MAX); + return -EINVAL; + } + + if (setup->speed_freq > i2c_specs[setup->speed].rate) { + dev_err(i2c_dev->dev, "ERROR: Freq {%d/%d}\n", + setup->speed_freq, i2c_specs[setup->speed].rate); + return -EINVAL; + } + + /* Analog and Digital Filters */ + af_delay_min = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0); + af_delay_max = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); + dnf_delay = setup->dnf * i2cclk; + + sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - + af_delay_min - (setup->dnf + 3) * i2cclk; + + sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - + af_delay_max - (setup->dnf + 4) * i2cclk; + + scldel_min = setup->rise_time + i2c_specs[setup->speed].sudat_min; + + if (sdadel_min < 0) + sdadel_min = 0; + if (sdadel_max < 0) + sdadel_max = 0; + + dev_dbg(i2c_dev->dev, "SDADEL(min/max): %i/%i, SCLDEL(Min): %i\n", + sdadel_min, sdadel_max, scldel_min); + + INIT_LIST_HEAD(&solutions); + /* Compute possible values for PRESC, SCLDEL and SDADEL */ + for (p = 0; p < STM32F7_PRESC_MAX; p++) { + for (l = 0; l < STM32F7_SCLDEL_MAX; l++) { + u32 scldel = (l + 1) * (p + 1) * i2cclk; + + if (scldel < scldel_min) + continue; + + for (a = 0; a < STM32F7_SDADEL_MAX; a++) { + u32 sdadel = (a * (p + 1) + 1) * i2cclk; + + if (((sdadel >= sdadel_min) && + (sdadel <= sdadel_max)) && + (p != p_prev)) { + v = kmalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto exit; + } + + v->presc = p; + v->scldel = l; + v->sdadel = a; + p_prev = p; + + list_add_tail(&v->node, + &solutions); + } + } + } + } + + if (list_empty(&solutions)) { + dev_err(i2c_dev->dev, "no Prescaler solution\n"); + ret = -EPERM; + goto exit; + } + + tsync = af_delay_min + dnf_delay + (2 * i2cclk); + s = NULL; + clk_max = NSEC_PER_SEC / i2c_specs[setup->speed].rate_min; + clk_min = NSEC_PER_SEC / i2c_specs[setup->speed].rate_max; + + /* + * Among Prescaler possibilities discovered above figures out SCL Low + * and High Period. Provided: + * - SCL Low Period has to be higher than SCL Clock Low Period + * defined by I2C Specification. I2C Clock has to be lower than + * (SCL Low Period - Analog/Digital filters) / 4. + * - SCL High Period has to be lower than SCL Clock High Period + * defined by I2C Specification + * - I2C Clock has to be lower than SCL High Period + */ + list_for_each_entry(v, &solutions, node) { + u32 prescaler = (v->presc + 1) * i2cclk; + + for (l = 0; l < STM32F7_SCLL_MAX; l++) { + u32 tscl_l = (l + 1) * prescaler + tsync; + + if ((tscl_l < i2c_specs[setup->speed].l_min) || + (i2cclk >= + ((tscl_l - af_delay_min - dnf_delay) / 4))) { + continue; + } + + for (h = 0; h < STM32F7_SCLH_MAX; h++) { + u32 tscl_h = (h + 1) * prescaler + tsync; + u32 tscl = tscl_l + tscl_h + + setup->rise_time + setup->fall_time; + + if ((tscl >= clk_min) && (tscl <= clk_max) && + (tscl_h >= i2c_specs[setup->speed].h_min) && + (i2cclk < tscl_h)) { + int clk_error = tscl - i2cbus; + + if (clk_error < 0) + clk_error = -clk_error; + + if (clk_error < clk_error_prev) { + clk_error_prev = clk_error; + v->scll = l; + v->sclh = h; + s = v; + } + } + } + } + } + + if (!s) { + dev_err(i2c_dev->dev, "no solution at all\n"); + ret = -EPERM; + goto exit; + } + + output->presc = s->presc; + output->scldel = s->scldel; + output->sdadel = s->sdadel; + output->scll = s->scll; + output->sclh = s->sclh; + + dev_dbg(i2c_dev->dev, + "Presc: %i, scldel: %i, sdadel: %i, scll: %i, sclh: %i\n", + output->presc, + output->scldel, output->sdadel, + output->scll, output->sclh); + +exit: + /* Release list and memory */ + list_for_each_entry_safe(v, _v, &solutions, node) { + list_del(&v->node); + kfree(v); + } + + return ret; +} + +static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup) +{ + int ret = 0; + + setup->speed = i2c_dev->speed; + setup->speed_freq = i2c_specs[setup->speed].rate; + setup->clock_src = clk_get_rate(i2c_dev->clk); + + if (!setup->clock_src) { + dev_err(i2c_dev->dev, "clock rate is 0\n"); + return -EINVAL; + } + + do { + ret = stm32f7_i2c_compute_timing(i2c_dev, setup, + &i2c_dev->timing); + if (ret) { + dev_err(i2c_dev->dev, + "failed to compute I2C timings.\n"); + if (i2c_dev->speed > STM32_I2C_SPEED_STANDARD) { + i2c_dev->speed--; + setup->speed = i2c_dev->speed; + setup->speed_freq = + i2c_specs[setup->speed].rate; + dev_warn(i2c_dev->dev, + "downgrade I2C Speed Freq to (%i)\n", + i2c_specs[setup->speed].rate); + } else { + break; + } + } + } while (ret); + + if (ret) { + dev_err(i2c_dev->dev, "Impossible to compute I2C timings.\n"); + return ret; + } + + dev_dbg(i2c_dev->dev, "I2C Speed(%i), Freq(%i), Clk Source(%i)\n", + setup->speed, setup->speed_freq, setup->clock_src); + dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n", + setup->rise_time, setup->fall_time); + dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n", + (setup->analog_filter ? "On" : "Off"), setup->dnf); + + return 0; +} + +static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_timings *t = &i2c_dev->timing; + u32 timing = 0; + + /* Timing settings */ + timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc); + timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel); + timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel); + timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh); + timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll); + writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); + + /* Enable I2C */ + if (i2c_dev->setup.analog_filter) + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + else + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_PE); +} + +static void stm32f7_i2c_write_tx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + writeb_relaxed(*f7_msg->buf++, base + STM32F7_I2C_TXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_read_rx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + *f7_msg->buf++ = readb_relaxed(base + STM32F7_I2C_RXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_reload(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + u32 cr2; + + cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); + + cr2 &= ~STM32F7_I2C_CR2_NBYTES_MASK; + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + } else { + cr2 &= ~STM32F7_I2C_CR2_RELOAD; + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); +} + +static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) +{ + u32 status; + int ret; + + ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F7_I2C_ISR, + status, + !(status & STM32F7_I2C_ISR_BUSY), + 10, 1000); + if (ret) { + dev_dbg(i2c_dev->dev, "bus busy\n"); + ret = -EBUSY; + } + + return ret; +} + +static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, + struct i2c_msg *msg) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 cr1, cr2; + + f7_msg->addr = msg->addr; + f7_msg->buf = msg->buf; + f7_msg->count = msg->len; + f7_msg->result = 0; + f7_msg->stop = (i2c_dev->msg_id >= i2c_dev->msg_num - 1); + + reinit_completion(&i2c_dev->complete); + + cr1 = readl_relaxed(base + STM32F7_I2C_CR1); + cr2 = readl_relaxed(base + STM32F7_I2C_CR2); + + /* Set transfer direction */ + cr2 &= ~STM32F7_I2C_CR2_RD_WRN; + if (msg->flags & I2C_M_RD) + cr2 |= STM32F7_I2C_CR2_RD_WRN; + + /* Set slave address */ + cr2 &= ~STM32F7_I2C_CR2_SADD7_MASK; + cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr); + + /* Set nb bytes to transfer and reload if needed */ + cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + cr2 |= STM32F7_I2C_CR2_RELOAD; + } else { + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + /* Enable NACK, STOP, error and transfer complete interrupts */ + cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE | + STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE; + + /* Clear TX/RX interrupt */ + cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE); + + /* Enable RX/TX interrupt according to msg direction */ + if (msg->flags & I2C_M_RD) + cr1 |= STM32F7_I2C_CR1_RXIE; + else + cr1 |= STM32F7_I2C_CR1_TXIE; + + /* Configure Start/Repeated Start */ + cr2 |= STM32F7_I2C_CR2_START; + + /* Write configurations registers */ + writel_relaxed(cr1, base + STM32F7_I2C_CR1); + writel_relaxed(cr2, base + STM32F7_I2C_CR2); +} + +static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask) +{ + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask); +} + +static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 status, mask; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Tx empty */ + if (status & STM32F7_I2C_ISR_TXIS) + stm32f7_i2c_write_tx_data(i2c_dev); + + /* RX not empty */ + if (status & STM32F7_I2C_ISR_RXNE) + stm32f7_i2c_read_rx_data(i2c_dev); + + /* NACK received */ + if (status & STM32F7_I2C_ISR_NACKF) { + dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); + f7_msg->result = -ENXIO; + } + + /* STOP detection flag */ + if (status & STM32F7_I2C_ISR_STOPF) { + /* Disable interrupts */ + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + /* Clear STOP flag */ + writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); + + complete(&i2c_dev->complete); + } + + /* Transfer complete */ + if (status & STM32F7_I2C_ISR_TC) { + if (f7_msg->stop) { + mask = STM32F7_I2C_CR2_STOP; + stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); + } else { + i2c_dev->msg_id++; + i2c_dev->msg++; + stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg); + } + } + + /* + * Transfer Complete Reload: 255 data bytes have been transferred + * We have to prepare the I2C controller to transfer the remaining + * data. + */ + if (status & STM32F7_I2C_ISR_TCR) + stm32f7_i2c_reload(i2c_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + struct device *dev = i2c_dev->dev; + u32 status; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Bus error */ + if (status & STM32F7_I2C_ISR_BERR) { + dev_err(dev, "<%s>: Bus error\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EIO; + } + + /* Arbitration loss */ + if (status & STM32F7_I2C_ISR_ARLO) { + dev_dbg(dev, "<%s>: Arbitration loss\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EAGAIN; + } + + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + complete(&i2c_dev->complete); + + return IRQ_HANDLED; +} + +static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + unsigned long time_left; + int ret; + + i2c_dev->msg = msgs; + i2c_dev->msg_num = num; + i2c_dev->msg_id = 0; + + ret = clk_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to enable clock\n"); + return ret; + } + + ret = stm32f7_i2c_wait_free_bus(i2c_dev); + if (ret) + goto clk_free; + + stm32f7_i2c_xfer_msg(i2c_dev, msgs); + + time_left = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = f7_msg->result; + + if (!time_left) { + dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", + i2c_dev->msg->addr); + ret = -ETIMEDOUT; + } + +clk_free: + clk_disable(i2c_dev->clk); + + return (ret < 0) ? ret : num; +} + +static u32 stm32f7_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm stm32f7_i2c_algo = { + .master_xfer = stm32f7_i2c_xfer, + .functionality = stm32f7_i2c_func, +}; + +static int stm32f7_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct stm32f7_i2c_dev *i2c_dev; + const struct stm32f7_i2c_setup *setup; + struct resource *res; + u32 irq_error, irq_event, clk_rate, rise_time, fall_time; + struct i2c_adapter *adap; + struct reset_control *rst; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + irq_event = irq_of_parse_and_map(np, 0); + if (!irq_event) { + dev_err(&pdev->dev, "IRQ event missing or invalid\n"); + return -EINVAL; + } + + irq_error = irq_of_parse_and_map(np, 1); + if (!irq_error) { + dev_err(&pdev->dev, "IRQ error missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Error: Missing controller clock\n"); + return PTR_ERR(i2c_dev->clk); + } + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &clk_rate); + if (!ret && clk_rate >= 1000000) + i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS; + else if (!ret && clk_rate >= 400000) + i2c_dev->speed = STM32_I2C_SPEED_FAST; + else if (!ret && clk_rate >= 100000) + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + + rst = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(rst)) { + dev_err(&pdev->dev, "Error: Missing controller reset\n"); + ret = PTR_ERR(rst); + goto clk_free; + } + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_irq(&pdev->dev, irq_event, stm32f7_i2c_isr_event, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq event %i\n", + irq_event); + goto clk_free; + } + + ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq error %i\n", + irq_error); + goto clk_free; + } + + setup = of_device_get_match_data(&pdev->dev); + i2c_dev->setup = *setup; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", + &rise_time); + if (!ret) + i2c_dev->setup.rise_time = rise_time; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", + &fall_time); + if (!ret) + i2c_dev->setup.fall_time = fall_time; + + ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup); + if (ret) + goto clk_free; + + stm32f7_i2c_hw_config(i2c_dev); + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)", + &res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 3; + adap->algo = &stm32f7_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) + goto clk_free; + + platform_set_drvdata(pdev, i2c_dev); + + clk_disable(i2c_dev->clk); + + dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); + + return 0; + +clk_free: + clk_disable_unprepare(i2c_dev->clk); + + return ret; +} + +static int stm32f7_i2c_remove(struct platform_device *pdev) +{ + struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + clk_unprepare(i2c_dev->clk); + + return 0; +} + +static const struct of_device_id stm32f7_i2c_match[] = { + { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup}, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32f7_i2c_match); + +static struct platform_driver stm32f7_i2c_driver = { + .driver = { + .name = "stm32f7-i2c", + .of_match_table = stm32f7_i2c_match, + }, + .probe = stm32f7_i2c_probe, + .remove = stm32f7_i2c_remove, +}; + +module_platform_driver(stm32f7_i2c_driver); + +MODULE_AUTHOR("M'boumba Cedric Madianga "); +MODULE_DESCRIPTION("STMicroelectronics STM32F7 I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 7668e2e9b8fd..7c07ce116e38 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -223,8 +223,8 @@ static int p2wi_probe(struct platform_device *pdev) if (childnp) { ret = of_property_read_u32(childnp, "reg", &slave_addr); if (ret) { - dev_err(dev, "invalid slave address on node %s\n", - childnp->full_name); + dev_err(dev, "invalid slave address on node %pOF\n", + childnp); return -EINVAL; } @@ -258,7 +258,7 @@ static int p2wi_probe(struct platform_device *pdev) parent_clk_freq = clk_get_rate(p2wi->clk); - p2wi->rstc = devm_reset_control_get(dev, NULL); + p2wi->rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(p2wi->rstc)) { ret = PTR_ERR(p2wi->rstc); dev_err(dev, "failed to retrieve reset controller: %d\n", ret); diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c index 210ca82f8aa0..addd90a8cb59 100644 --- a/drivers/i2c/busses/i2c-taos-evm.c +++ b/drivers/i2c/busses/i2c-taos-evm.c @@ -291,7 +291,7 @@ static void taos_disconnect(struct serio *serio) dev_info(&serio->dev, "Disconnected from TAOS EVM\n"); } -static struct serio_device_id taos_serio_ids[] = { +static const struct serio_device_id taos_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TAOSEVM, diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 4af9bbae20df..60292d243e24 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -793,7 +793,7 @@ static const struct i2c_algorithm tegra_i2c_algo = { }; /* payload size is only 12 bit */ -static struct i2c_adapter_quirks tegra_i2c_quirks = { +static const struct i2c_adapter_quirks tegra_i2c_quirks = { .max_read_len = 4096, .max_write_len = 4096, }; @@ -911,7 +911,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->cont_id = pdev->id; i2c_dev->dev = &pdev->dev; - i2c_dev->rst = devm_reset_control_get(&pdev->dev, "i2c"); + i2c_dev->rst = devm_reset_control_get_exclusive(&pdev->dev, "i2c"); if (IS_ERR(i2c_dev->rst)) { dev_err(&pdev->dev, "missing controller reset\n"); return PTR_ERR(i2c_dev->rst); diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c index ea35a895b568..df0976f4432a 100644 --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c @@ -75,7 +75,7 @@ static const struct i2c_algorithm thunderx_i2c_algo = { .functionality = thunderx_i2c_functionality, }; -static struct i2c_adapter thunderx_i2c_ops = { +static const struct i2c_adapter thunderx_i2c_ops = { .owner = THIS_MODULE, .name = "ThunderX adapter", .algo = &thunderx_i2c_algo, diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index beee31892295..9918bdd81619 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -97,6 +97,7 @@ struct uniphier_fi2c_priv { int error; unsigned int flags; unsigned int busy_cnt; + unsigned int clk_cycle; }; static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv, @@ -461,9 +462,9 @@ static struct i2c_bus_recovery_info uniphier_fi2c_bus_recovery_info = { .unprepare_recovery = uniphier_fi2c_unprepare_recovery, }; -static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv, - u32 bus_speed, unsigned long clk_rate) +static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv) { + unsigned int cyc = priv->clk_cycle; u32 tmp; tmp = readl(priv->membase + UNIPHIER_FI2C_CR); @@ -472,12 +473,10 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv, uniphier_fi2c_reset(priv); - tmp = clk_rate / bus_speed; - - writel(tmp, priv->membase + UNIPHIER_FI2C_CYC); - writel(tmp / 2, priv->membase + UNIPHIER_FI2C_LCTL); - writel(tmp / 2, priv->membase + UNIPHIER_FI2C_SSUT); - writel(tmp / 16, priv->membase + UNIPHIER_FI2C_DSUT); + writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); + writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL); + writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); + writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); uniphier_fi2c_prepare_operation(priv); } @@ -531,6 +530,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) goto disable_clk; } + priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_fi2c_algo; @@ -541,7 +541,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); - uniphier_fi2c_hw_init(priv, bus_speed, clk_rate); + uniphier_fi2c_hw_init(priv); ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0, pdev->name, priv); @@ -568,6 +568,33 @@ static int uniphier_fi2c_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused uniphier_fi2c_suspend(struct device *dev) +{ + struct uniphier_fi2c_priv *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused uniphier_fi2c_resume(struct device *dev) +{ + struct uniphier_fi2c_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + uniphier_fi2c_hw_init(priv); + + return 0; +} + +static const struct dev_pm_ops uniphier_fi2c_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(uniphier_fi2c_suspend, uniphier_fi2c_resume) +}; + static const struct of_device_id uniphier_fi2c_match[] = { { .compatible = "socionext,uniphier-fi2c" }, { /* sentinel */ } @@ -580,6 +607,7 @@ static struct platform_driver uniphier_fi2c_drv = { .driver = { .name = "uniphier-fi2c", .of_match_table = uniphier_fi2c_match, + .pm = &uniphier_fi2c_pm_ops, }, }; module_platform_driver(uniphier_fi2c_drv); diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index 777c0fe93653..bb181b088291 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -53,6 +53,7 @@ struct uniphier_i2c_priv { void __iomem *membase; struct clk *clk; unsigned int busy_cnt; + unsigned int clk_cycle; }; static irqreturn_t uniphier_i2c_interrupt(int irq, void *dev_id) @@ -316,13 +317,13 @@ static struct i2c_bus_recovery_info uniphier_i2c_bus_recovery_info = { .unprepare_recovery = uniphier_i2c_unprepare_recovery, }; -static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv, - u32 bus_speed, unsigned long clk_rate) +static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv) { + unsigned int cyc = priv->clk_cycle; + uniphier_i2c_reset(priv, true); - writel((clk_rate / bus_speed / 2 << 16) | (clk_rate / bus_speed), - priv->membase + UNIPHIER_I2C_CLK); + writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); uniphier_i2c_reset(priv, false); } @@ -376,6 +377,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev) goto disable_clk; } + priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_i2c_algo; @@ -386,7 +388,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); - uniphier_i2c_hw_init(priv, bus_speed, clk_rate); + uniphier_i2c_hw_init(priv); ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name, priv); @@ -413,6 +415,33 @@ static int uniphier_i2c_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused uniphier_i2c_suspend(struct device *dev) +{ + struct uniphier_i2c_priv *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused uniphier_i2c_resume(struct device *dev) +{ + struct uniphier_i2c_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + uniphier_i2c_hw_init(priv); + + return 0; +} + +static const struct dev_pm_ops uniphier_i2c_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(uniphier_i2c_suspend, uniphier_i2c_resume) +}; + static const struct of_device_id uniphier_i2c_match[] = { { .compatible = "socionext,uniphier-i2c" }, { /* sentinel */ } @@ -425,6 +454,7 @@ static struct platform_driver uniphier_i2c_drv = { .driver = { .name = "uniphier-i2c", .of_match_table = uniphier_i2c_match, + .pm = &uniphier_i2c_pm_ops, }, }; module_platform_driver(uniphier_i2c_drv); diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c index c73d2d22009e..f1ab2a637ec0 100644 --- a/drivers/i2c/busses/i2c-versatile.c +++ b/drivers/i2c/busses/i2c-versatile.c @@ -55,7 +55,7 @@ static int i2c_versatile_getscl(void *data) return !!(readl(i2c->base + I2C_CONTROL) & SCL); } -static struct i2c_algo_bit_data i2c_versatile_algo = { +static const struct i2c_algo_bit_data i2c_versatile_algo = { .setsda = i2c_versatile_setsda, .setscl = i2c_versatile_setscl, .getsda = i2c_versatile_getsda, diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 66bce3b311a1..ae6ed254e01d 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -721,7 +721,7 @@ static const struct i2c_algorithm xiic_algorithm = { .functionality = xiic_func, }; -static struct i2c_adapter xiic_adapter = { +static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, @@ -853,8 +853,7 @@ MODULE_DEVICE_TABLE(of, xiic_of_match); static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct xiic_i2c *i2c = platform_get_drvdata(pdev); + struct xiic_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); @@ -863,8 +862,7 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct xiic_i2c *i2c = platform_get_drvdata(pdev); + struct xiic_i2c *i2c = dev_get_drvdata(dev); int ret; ret = clk_enable(i2c->clk); diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index ccf82fdbcd8e..8d474bb1dc15 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -32,18 +32,17 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, u32 addr; int len; - dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name); + dev_dbg(&adap->dev, "of_i2c: register %pOF\n", node); if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) { - dev_err(&adap->dev, "of_i2c: modalias failure on %s\n", - node->full_name); + dev_err(&adap->dev, "of_i2c: modalias failure on %pOF\n", + node); return ERR_PTR(-EINVAL); } addr_be = of_get_property(node, "reg", &len); if (!addr_be || (len < sizeof(*addr_be))) { - dev_err(&adap->dev, "of_i2c: invalid reg on %s\n", - node->full_name); + dev_err(&adap->dev, "of_i2c: invalid reg on %pOF\n", node); return ERR_PTR(-EINVAL); } @@ -59,8 +58,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, } if (i2c_check_addr_validity(addr, info.flags)) { - dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", - addr, node->full_name); + dev_err(&adap->dev, "of_i2c: invalid addr=%x on %pOF\n", + addr, node); return ERR_PTR(-EINVAL); } @@ -76,8 +75,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, result = i2c_new_device(adap, &info); if (result == NULL) { - dev_err(&adap->dev, "of_i2c: Failure registering %s\n", - node->full_name); + dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node); of_node_put(node); return ERR_PTR(-EINVAL); } @@ -106,8 +104,8 @@ void of_i2c_register_devices(struct i2c_adapter *adap) client = of_i2c_register_device(adap, node); if (IS_ERR(client)) { dev_warn(&adap->dev, - "Failed to create I2C device for %s\n", - node->full_name); + "Failed to create I2C device for %pOF\n", + node); of_node_clear_flag(node, OF_POPULATED); } } @@ -243,8 +241,8 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, put_device(&adap->dev); if (IS_ERR(client)) { - dev_err(&adap->dev, "failed to create client for '%s'\n", - rd->dn->full_name); + dev_err(&adap->dev, "failed to create client for '%pOF'\n", + rd->dn); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 17121329bb79..0f5c8fc36625 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -8,7 +8,7 @@ menu "Multiplexer I2C Chip support" config I2C_ARB_GPIO_CHALLENGE tristate "GPIO-based I2C arbitration" depends on GPIOLIB || COMPILE_TEST - depends on OF + depends on OF || COMPILE_TEST help If you say yes to this option, support will be included for an I2C multimaster arbitration scheme using GPIOs and a challenge & @@ -76,6 +76,7 @@ config I2C_MUX_PCA954x config I2C_MUX_PINCTRL tristate "pinctrl-based I2C multiplexer" depends on PINCTRL + depends on OF || COMPILE_TEST help If you say yes to this option, support will be included for an I2C multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 3e6fe1760d82..33ce032cb701 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -167,8 +167,8 @@ static ssize_t available_masters_show(struct device *dev, int count = 0, i; for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) - count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c", - i, priv->chan[i].parent_np->full_name, + count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%pOF%c", + i, priv->chan[i].parent_np, i == priv->num_chan - 1 ? '\n' : ' '); return count; diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c index e53f2abd1350..12ad8d65faf6 100644 --- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c +++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c @@ -38,9 +38,9 @@ #include #include #include +#include #include #include -#include #define CPLD_MUX_MAX_NCHANS 8 diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index 9e318c9516c7..6a39adaf433f 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -16,15 +16,14 @@ * warranty of any kind, whether express or implied. */ -#include -#include #include -#include #include #include #include - -#include +#include +#include +#include +#include /* * The PCA9541 is a bus master selector. It supports two I2C masters connected diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index f1751c290af6..7b992db38021 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -39,13 +39,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include #include diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 7c0c264b07bc..cc6818aabab5 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -20,17 +20,14 @@ #include #include #include -#include #include #include #include #include "../../pinctrl/core.h" struct i2c_mux_pinctrl { - struct i2c_mux_pinctrl_platform_data *pdata; struct pinctrl *pinctrl; struct pinctrl_state **states; - struct pinctrl_state *state_idle; }; static int i2c_mux_pinctrl_select(struct i2c_mux_core *muxc, u32 chan) @@ -42,85 +39,9 @@ static int i2c_mux_pinctrl_select(struct i2c_mux_core *muxc, u32 chan) static int i2c_mux_pinctrl_deselect(struct i2c_mux_core *muxc, u32 chan) { - struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc); - - return pinctrl_select_state(mux->pinctrl, mux->state_idle); + return i2c_mux_pinctrl_select(muxc, muxc->num_adapters); } -#ifdef CONFIG_OF -static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, - struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - int num_names, i, ret; - struct device_node *adapter_np; - struct i2c_adapter *adapter; - - if (!np) - return 0; - - mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); - if (!mux->pdata) - return -ENOMEM; - - num_names = of_property_count_strings(np, "pinctrl-names"); - if (num_names < 0) { - dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n", - num_names); - return num_names; - } - - mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, - sizeof(*mux->pdata->pinctrl_states) * num_names, - GFP_KERNEL); - if (!mux->pdata->pinctrl_states) - return -ENOMEM; - - for (i = 0; i < num_names; i++) { - ret = of_property_read_string_index(np, "pinctrl-names", i, - &mux->pdata->pinctrl_states[mux->pdata->bus_count]); - if (ret < 0) { - dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n", - ret); - return ret; - } - if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], - "idle")) { - if (i != num_names - 1) { - dev_err(&pdev->dev, - "idle state must be last\n"); - return -EINVAL; - } - mux->pdata->pinctrl_state_idle = "idle"; - } else { - mux->pdata->bus_count++; - } - } - - adapter_np = of_parse_phandle(np, "i2c-parent", 0); - if (!adapter_np) { - dev_err(&pdev->dev, "Cannot parse i2c-parent\n"); - return -ENODEV; - } - adapter = of_find_i2c_adapter_by_node(adapter_np); - of_node_put(adapter_np); - if (!adapter) { - dev_err(&pdev->dev, "Cannot find parent bus\n"); - return -EPROBE_DEFER; - } - mux->pdata->parent_bus_num = i2c_adapter_id(adapter); - put_device(&adapter->dev); - - return 0; -} -#else -static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, - struct platform_device *pdev) -{ - return 0; -} -#endif - static struct i2c_adapter *i2c_mux_pinctrl_root_adapter( struct pinctrl_state *state) { @@ -141,110 +62,108 @@ static struct i2c_adapter *i2c_mux_pinctrl_root_adapter( return root; } +static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct device_node *parent_np; + struct i2c_adapter *parent; + + parent_np = of_parse_phandle(np, "i2c-parent", 0); + if (!parent_np) { + dev_err(dev, "Cannot parse i2c-parent\n"); + return ERR_PTR(-ENODEV); + } + parent = of_find_i2c_adapter_by_node(parent_np); + of_node_put(parent_np); + if (!parent) + return ERR_PTR(-EPROBE_DEFER); + + return parent; +} + static int i2c_mux_pinctrl_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; struct i2c_mux_core *muxc; struct i2c_mux_pinctrl *mux; + struct i2c_adapter *parent; struct i2c_adapter *root; - int i, ret; + int num_names, i, ret; + const char *name; - mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); - if (!mux) { - ret = -ENOMEM; - goto err; + num_names = of_property_count_strings(np, "pinctrl-names"); + if (num_names < 0) { + dev_err(dev, "Cannot parse pinctrl-names: %d\n", + num_names); + return num_names; } - mux->pdata = dev_get_platdata(&pdev->dev); - if (!mux->pdata) { - ret = i2c_mux_pinctrl_parse_dt(mux, pdev); - if (ret < 0) - goto err; - } - if (!mux->pdata) { - dev_err(&pdev->dev, "Missing platform data\n"); - ret = -ENODEV; - goto err; - } + parent = i2c_mux_pinctrl_parent_adapter(dev); + if (IS_ERR(parent)) + return PTR_ERR(parent); - mux->states = devm_kzalloc(&pdev->dev, - sizeof(*mux->states) * mux->pdata->bus_count, - GFP_KERNEL); - if (!mux->states) { - dev_err(&pdev->dev, "Cannot allocate states\n"); - ret = -ENOMEM; - goto err; - } - - muxc = i2c_mux_alloc(NULL, &pdev->dev, mux->pdata->bus_count, 0, 0, - i2c_mux_pinctrl_select, NULL); + muxc = i2c_mux_alloc(parent, dev, num_names, + sizeof(*mux) + num_names * sizeof(*mux->states), + 0, i2c_mux_pinctrl_select, NULL); if (!muxc) { ret = -ENOMEM; - goto err; + goto err_put_parent; } - muxc->priv = mux; + mux = i2c_mux_priv(muxc); + mux->states = (struct pinctrl_state **)(mux + 1); platform_set_drvdata(pdev, muxc); - mux->pinctrl = devm_pinctrl_get(&pdev->dev); + mux->pinctrl = devm_pinctrl_get(dev); if (IS_ERR(mux->pinctrl)) { ret = PTR_ERR(mux->pinctrl); - dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); - goto err; + dev_err(dev, "Cannot get pinctrl: %d\n", ret); + goto err_put_parent; } - for (i = 0; i < mux->pdata->bus_count; i++) { - mux->states[i] = pinctrl_lookup_state(mux->pinctrl, - mux->pdata->pinctrl_states[i]); + + for (i = 0; i < num_names; i++) { + ret = of_property_read_string_index(np, "pinctrl-names", i, + &name); + if (ret < 0) { + dev_err(dev, "Cannot parse pinctrl-names: %d\n", ret); + goto err_put_parent; + } + + mux->states[i] = pinctrl_lookup_state(mux->pinctrl, name); if (IS_ERR(mux->states[i])) { ret = PTR_ERR(mux->states[i]); - dev_err(&pdev->dev, - "Cannot look up pinctrl state %s: %d\n", - mux->pdata->pinctrl_states[i], ret); - goto err; - } - } - if (mux->pdata->pinctrl_state_idle) { - mux->state_idle = pinctrl_lookup_state(mux->pinctrl, - mux->pdata->pinctrl_state_idle); - if (IS_ERR(mux->state_idle)) { - ret = PTR_ERR(mux->state_idle); - dev_err(&pdev->dev, - "Cannot look up pinctrl state %s: %d\n", - mux->pdata->pinctrl_state_idle, ret); - goto err; + dev_err(dev, "Cannot look up pinctrl state %s: %d\n", + name, ret); + goto err_put_parent; } + if (strcmp(name, "idle")) + continue; + + if (i != num_names - 1) { + dev_err(dev, "idle state must be last\n"); + ret = -EINVAL; + goto err_put_parent; + } muxc->deselect = i2c_mux_pinctrl_deselect; } - muxc->parent = i2c_get_adapter(mux->pdata->parent_bus_num); - if (!muxc->parent) { - dev_err(&pdev->dev, "Parent adapter (%d) not found\n", - mux->pdata->parent_bus_num); - ret = -EPROBE_DEFER; - goto err; - } - root = i2c_root_adapter(&muxc->parent->dev); muxc->mux_locked = true; - for (i = 0; i < mux->pdata->bus_count; i++) { + for (i = 0; i < num_names; i++) { if (root != i2c_mux_pinctrl_root_adapter(mux->states[i])) { muxc->mux_locked = false; break; } } - if (muxc->mux_locked && mux->pdata->pinctrl_state_idle && - root != i2c_mux_pinctrl_root_adapter(mux->state_idle)) - muxc->mux_locked = false; - if (muxc->mux_locked) - dev_info(&pdev->dev, "mux-locked i2c mux\n"); + dev_info(dev, "mux-locked i2c mux\n"); - for (i = 0; i < mux->pdata->bus_count; i++) { - u32 bus = mux->pdata->base_bus_num ? - (mux->pdata->base_bus_num + i) : 0; - - ret = i2c_mux_add_adapter(muxc, bus, i, 0); + /* Do not add any adapter for the idle state (if it's there at all). */ + for (i = 0; i < num_names - !!muxc->deselect; i++) { + ret = i2c_mux_add_adapter(muxc, 0, i, 0); if (ret) goto err_del_adapter; } @@ -253,8 +172,9 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev) err_del_adapter: i2c_mux_del_adapters(muxc); - i2c_put_adapter(muxc->parent); -err: +err_put_parent: + i2c_put_adapter(parent); + return ret; } @@ -264,16 +184,15 @@ static int i2c_mux_pinctrl_remove(struct platform_device *pdev) i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); + return 0; } -#ifdef CONFIG_OF static const struct of_device_id i2c_mux_pinctrl_of_match[] = { { .compatible = "i2c-mux-pinctrl", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); -#endif static struct platform_driver i2c_mux_pinctrl_driver = { .driver = { diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 627b1f62a749..3ddd88219906 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) drive->failed_pc = NULL; if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || - (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT)) + blk_rq_is_scsi(rq)) uptodate = 1; /* FIXME */ else if (pc->c[0] == GPCMD_REQUEST_SENSE) { diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 01b2adfd8226..eaf39e5db08b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, if (hwif_init(hwif) == 0) { printk(KERN_INFO "%s: failed to initialize IDE " "interface\n", hwif->name); + device_unregister(hwif->portdev); device_unregister(&hwif->gendev); ide_disable_port(hwif); continue; diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c index 86aa88aeb3a6..acf874800ca4 100644 --- a/drivers/ide/ide-scan-pci.c +++ b/drivers/ide/ide-scan-pci.c @@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) { struct list_head *l; struct pci_driver *d; + int ret; list_for_each(l, &ide_pci_drivers) { d = list_entry(l, struct pci_driver, node); @@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) const struct pci_device_id *id = pci_match_id(d->id_table, dev); - if (id != NULL && d->probe(dev, id) >= 0) { - dev->driver = d; - pci_dev_get(dev); - return 1; + if (id != NULL) { + pci_assign_irq(dev); + ret = d->probe(dev, id); + if (ret >= 0) { + dev->driver = d; + pci_dev_get(dev); + return 1; + } } } } diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 0c5d3a99468e..c5b902b86b44 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -1145,8 +1145,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev, return -ENOMEM; if (macio_resource_count(mdev) == 0) { - printk(KERN_WARNING "ide-pmac: no address for %s\n", - mdev->ofdev.dev.of_node->full_name); + printk(KERN_WARNING "ide-pmac: no address for %pOF\n", + mdev->ofdev.dev.of_node); rc = -ENXIO; goto out_free_pmif; } @@ -1154,7 +1154,7 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev, /* Request memory resource for IO ports */ if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { printk(KERN_ERR "ide-pmac: can't request MMIO resource for " - "%s!\n", mdev->ofdev.dev.of_node->full_name); + "%pOF!\n", mdev->ofdev.dev.of_node); rc = -EBUSY; goto out_free_pmif; } @@ -1165,8 +1165,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev, * where that happens though... */ if (macio_irq_count(mdev) == 0) { - printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " - "13\n", mdev->ofdev.dev.of_node->full_name); + printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using " + "13\n", mdev->ofdev.dev.of_node); irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); @@ -1183,8 +1183,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev, if (macio_resource_count(mdev) >= 2) { if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " - "resource for %s!\n", - mdev->ofdev.dev.of_node->full_name); + "resource for %pOF!\n", + mdev->ofdev.dev.of_node); else pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else @@ -1274,7 +1274,7 @@ static int pmac_ide_pci_attach(struct pci_dev *pdev, if (pci_enable_device(pdev)) { printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " - "%s\n", np->full_name); + "%pOF\n", np); rc = -ENXIO; goto out_free_pmif; } @@ -1282,7 +1282,7 @@ static int pmac_ide_pci_attach(struct pci_dev *pdev, if (pci_request_regions(pdev, "Kauai ATA")) { printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for " - "%s\n", np->full_name); + "%pOF\n", np); rc = -ENXIO; goto out_free_pmif; } diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 112d2fe1bcdb..fdc8e813170c 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); /** * ide_pci_enable - do PCI enables * @dev: PCI device + * @bars: PCI BARs mask * @d: IDE port info * * Enable the IDE PCI device. We attempt to enable the device in full @@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); * Returns zero on success or an error code */ -static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) +static int ide_pci_enable(struct pci_dev *dev, int bars, + const struct ide_port_info *d) { - int ret, bars; + int ret; if (pci_enable_device(dev)) { ret = pci_enable_device_io(dev); @@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) goto out; } - if (d->host_flags & IDE_HFLAG_SINGLE) - bars = (1 << 2) - 1; - else - bars = (1 << 4) - 1; - - if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { - if (d->host_flags & IDE_HFLAG_CS5520) - bars |= (1 << 2); - else - bars |= (1 << 4); - } - ret = pci_request_selected_regions(dev, bars, d->name); if (ret < 0) printk(KERN_ERR "%s %s: can't reserve resources\n", @@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) /** * ide_setup_pci_controller - set up IDE PCI * @dev: PCI device + * @bars: PCI BARs mask * @d: IDE port info * @noisy: verbose flag * @@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) * and enables it if need be */ -static int ide_setup_pci_controller(struct pci_dev *dev, +static int ide_setup_pci_controller(struct pci_dev *dev, int bars, const struct ide_port_info *d, int noisy) { int ret; @@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev, if (noisy) ide_setup_pci_noise(dev, d); - ret = ide_pci_enable(dev, d); + ret = ide_pci_enable(dev, bars, d); if (ret < 0) goto out; @@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev, if (ret < 0) { printk(KERN_ERR "%s %s: error accessing PCI regs\n", d->name, pci_name(dev)); - goto out; + goto out_free_bars; } if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ ret = ide_pci_configure(dev, d); if (ret < 0) - goto out; + goto out_free_bars; printk(KERN_INFO "%s %s: device enabled (Linux)\n", d->name, pci_name(dev)); } + goto out; + +out_free_bars: + pci_release_selected_regions(dev, bars); out: return ret; } @@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, { struct pci_dev *pdev[] = { dev1, dev2 }; struct ide_host *host; - int ret, i, n_ports = dev2 ? 4 : 2; + int ret, i, n_ports = dev2 ? 4 : 2, bars; struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + if (d->host_flags & IDE_HFLAG_SINGLE) + bars = (1 << 2) - 1; + else + bars = (1 << 4) - 1; + + if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { + if (d->host_flags & IDE_HFLAG_CS5520) + bars |= (1 << 2); + else + bars |= (1 << 4); + } + for (i = 0; i < n_ports / 2; i++) { - ret = ide_setup_pci_controller(pdev[i], d, !i); - if (ret < 0) + ret = ide_setup_pci_controller(pdev[i], bars, d, !i); + if (ret < 0) { + if (i == 1) + pci_release_selected_regions(pdev[0], bars); goto out; + } ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); } @@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, host = ide_host_alloc(d, hws, n_ports); if (host == NULL) { ret = -ENOMEM; - goto out; + goto out_free_bars; } host->dev[0] = &dev1->dev; @@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, * do_ide_setup_pci_device() on the first device! */ if (ret < 0) - goto out; + goto out_free_bars; /* fixup IRQ */ if (ide_pci_is_in_compatibility_mode(pdev[i])) { @@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, ret = ide_host_register(host, d, hws); if (ret) ide_host_free(host); + else + goto out; + +out_free_bars: + i = n_ports / 2; + while (i--) + pci_release_selected_regions(pdev[i], bars); out: return ret; } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index e87ffb3c31a9..5dc7ea4b6bc4 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -97,7 +97,7 @@ static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); -static void intel_idle_freeze(struct cpuidle_device *dev, +static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_state *cpuidle_state_table; @@ -132,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 3, .target_residency = 6, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -140,7 +140,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -148,7 +148,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 20, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] = { .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -169,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -177,7 +177,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -185,7 +185,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 80, .target_residency = 211, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -193,7 +193,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 104, .target_residency = 345, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x30", @@ -201,7 +201,7 @@ static struct cpuidle_state snb_cstates[] = { .exit_latency = 109, .target_residency = 345, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -214,7 +214,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", .desc = "MWAIT 0x58", @@ -222,7 +222,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 300, .target_residency = 275, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", .desc = "MWAIT 0x52", @@ -230,7 +230,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 500, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -238,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", .desc = "MWAIT 0x64", @@ -246,7 +246,7 @@ static struct cpuidle_state byt_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -259,7 +259,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", .desc = "MWAIT 0x58", @@ -267,7 +267,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 80, .target_residency = 275, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", .desc = "MWAIT 0x52", @@ -275,7 +275,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 200, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -283,7 +283,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", .desc = "MWAIT 0x64", @@ -291,7 +291,7 @@ static struct cpuidle_state cht_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -304,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -312,7 +312,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -320,7 +320,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 59, .target_residency = 156, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -328,7 +328,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 80, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x30", @@ -336,7 +336,7 @@ static struct cpuidle_state ivb_cstates[] = { .exit_latency = 87, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -349,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -357,7 +357,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -365,7 +365,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 59, .target_residency = 156, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -373,7 +373,7 @@ static struct cpuidle_state ivt_cstates[] = { .exit_latency = 82, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -386,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -394,7 +394,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -402,7 +402,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 59, .target_residency = 300, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -410,7 +410,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .exit_latency = 84, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -423,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -431,7 +431,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -439,7 +439,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 59, .target_residency = 600, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -447,7 +447,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .exit_latency = 88, .target_residency = 700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -460,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -468,7 +468,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -476,7 +476,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 33, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -484,7 +484,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 133, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x32", @@ -492,7 +492,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 166, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -500,7 +500,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 300, .target_residency = 900, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -508,7 +508,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 600, .target_residency = 1800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -516,7 +516,7 @@ static struct cpuidle_state hsw_cstates[] = { .exit_latency = 2600, .target_residency = 7700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -528,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -536,7 +536,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -544,7 +544,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 40, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -552,7 +552,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 133, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x32", @@ -560,7 +560,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 166, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -568,7 +568,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 300, .target_residency = 900, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -576,7 +576,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 600, .target_residency = 1800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -584,7 +584,7 @@ static struct cpuidle_state bdw_cstates[] = { .exit_latency = 2600, .target_residency = 7700, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -597,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -605,7 +605,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", .desc = "MWAIT 0x10", @@ -613,7 +613,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 70, .target_residency = 100, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -621,7 +621,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x33", @@ -629,7 +629,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 124, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -637,7 +637,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -645,7 +645,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 480, .target_residency = 5000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -653,7 +653,7 @@ static struct cpuidle_state skl_cstates[] = { .exit_latency = 890, .target_residency = 5000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -666,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -674,7 +674,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -682,7 +682,7 @@ static struct cpuidle_state skx_cstates[] = { .exit_latency = 133, .target_residency = 600, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -695,7 +695,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C2", .desc = "MWAIT 0x10", @@ -703,7 +703,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 20, .target_residency = 80, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", .desc = "MWAIT 0x30", @@ -711,7 +711,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 100, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x52", @@ -719,7 +719,7 @@ static struct cpuidle_state atom_cstates[] = { .exit_latency = 140, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -731,7 +731,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 1, .target_residency = 4, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", .desc = "MWAIT 0x30", @@ -739,7 +739,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 100, .target_residency = 400, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x52", @@ -747,7 +747,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 140, .target_residency = 560, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", .desc = "MWAIT 0x60", @@ -755,7 +755,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x64", @@ -763,7 +763,7 @@ static struct cpuidle_state tangier_cstates[] = { .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -775,7 +775,7 @@ static struct cpuidle_state avn_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x51", @@ -783,7 +783,7 @@ static struct cpuidle_state avn_cstates[] = { .exit_latency = 15, .target_residency = 45, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -795,7 +795,7 @@ static struct cpuidle_state knl_cstates[] = { .exit_latency = 1, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze }, + .enter_s2idle = intel_idle_s2idle }, { .name = "C6", .desc = "MWAIT 0x10", @@ -803,7 +803,7 @@ static struct cpuidle_state knl_cstates[] = { .exit_latency = 120, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze }, + .enter_s2idle = intel_idle_s2idle }, { .enter = NULL } }; @@ -816,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -824,7 +824,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -832,7 +832,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 133, .target_residency = 133, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", .desc = "MWAIT 0x31", @@ -840,7 +840,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 155, .target_residency = 155, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", .desc = "MWAIT 0x40", @@ -848,7 +848,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 1000, .target_residency = 1000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", .desc = "MWAIT 0x50", @@ -856,7 +856,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 2000, .target_residency = 2000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", .desc = "MWAIT 0x60", @@ -864,7 +864,7 @@ static struct cpuidle_state bxt_cstates[] = { .exit_latency = 10000, .target_residency = 10000, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -877,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", .desc = "MWAIT 0x01", @@ -885,7 +885,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", .desc = "MWAIT 0x20", @@ -893,7 +893,7 @@ static struct cpuidle_state dnv_cstates[] = { .exit_latency = 50, .target_residency = 500, .enter = &intel_idle, - .enter_freeze = intel_idle_freeze, }, + .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } }; @@ -935,12 +935,12 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, } /** - * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle + * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle * @dev: cpuidle_device * @drv: cpuidle driver * @index: state index */ -static void intel_idle_freeze(struct cpuidle_device *dev, +static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ @@ -1330,13 +1330,14 @@ static void __init intel_idle_cpuidle_driver_init(void) intel_idle_state_table_update(); + cpuidle_poll_state_init(drv); drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { int num_substates, mwait_hint, mwait_cstate; if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_freeze == NULL)) + (cpuidle_state_table[cstate].enter_s2idle == NULL)) break; if (cstate + 1 > max_cstate) { diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index e6706a09e100..47c3d7f32900 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, unsigned int vref_mv) { struct ad7793_state *st = iio_priv(indio_dev); - int i, ret = -1; + int i, ret; unsigned long long scale_uv; u32 id; @@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, return ret; /* reset the serial interface */ - ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret)); + ret = ad_sd_reset(&st->sd, 32); if (ret < 0) goto out; usleep_range(500, 2000); /* Wait for at least 500us */ diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index d10bd0c97233..22c4c17cd996 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -177,6 +177,34 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, } EXPORT_SYMBOL_GPL(ad_sd_read_reg); +/** + * ad_sd_reset() - Reset the serial interface + * + * @sigma_delta: The sigma delta device + * @reset_length: Number of SCLKs with DIN = 1 + * + * Returns 0 on success, an error code otherwise. + **/ +int ad_sd_reset(struct ad_sigma_delta *sigma_delta, + unsigned int reset_length) +{ + uint8_t *buf; + unsigned int size; + int ret; + + size = DIV_ROUND_UP(reset_length, 8); + buf = kcalloc(size, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memset(buf, 0xff, size); + ret = spi_write(sigma_delta->spi, buf, size); + kfree(buf); + + return ret; +} +EXPORT_SYMBOL_GPL(ad_sd_reset); + static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, unsigned int mode, unsigned int channel) { diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 634717ae12f3..071dd23a33d9 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -17,6 +17,8 @@ * MCP3204 * MCP3208 * ------------ + * 13 bit converter + * MCP3301 * * Datasheet can be found here: * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001 @@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index, } static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, - bool differential, int device_index) + bool differential, int device_index, int *val) { int ret; @@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, switch (device_index) { case mcp3001: - return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3); + *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3); + return 0; case mcp3002: case mcp3004: case mcp3008: - return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6); + *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6); + return 0; case mcp3201: - return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1); + *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1); + return 0; case mcp3202: case mcp3204: case mcp3208: - return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4); + *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4); + return 0; case mcp3301: - return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12); + *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8 + | adc->rx_buf[1], 12); + return 0; default: return -EINVAL; } @@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: ret = mcp320x_adc_conversion(adc, channel->address, - channel->differential, device_index); - + channel->differential, device_index, val); if (ret < 0) goto out; - *val = ret; ret = IIO_VAL_INT; break; @@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mcp320x_info; + spi_set_drvdata(spi, indio_dev); chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data]; indio_dev->channels = chip_info->channels; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 6bc602891f2f..4df32cf1650e 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -185,6 +186,11 @@ enum stm32_adc_extsel { STM32_EXT13, STM32_EXT14, STM32_EXT15, + STM32_EXT16, + STM32_EXT17, + STM32_EXT18, + STM32_EXT19, + STM32_EXT20, }; /** @@ -526,6 +532,9 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = { { TIM4_TRGO, STM32_EXT12 }, { TIM6_TRGO, STM32_EXT13 }, { TIM3_CH4, STM32_EXT15 }, + { LPTIM1_OUT, STM32_EXT18 }, + { LPTIM2_OUT, STM32_EXT19 }, + { LPTIM3_OUT, STM32_EXT20 }, {}, }; @@ -1082,7 +1091,8 @@ static int stm32_adc_get_trig_extsel(struct iio_dev *indio_dev, * Checking both stm32 timer trigger type and trig name * should be safe against arbitrary trigger names. */ - if (is_stm32_timer_trigger(trig) && + if ((is_stm32_timer_trigger(trig) || + is_stm32_lptim_trigger(trig)) && !strcmp(adc->cfg->trigs[i].name, trig->name)) { return adc->cfg->trigs[i].extsel; } @@ -1656,7 +1666,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) num_channels = of_property_count_u32_elems(node, "st,adc-channels"); if (num_channels < 0 || - num_channels >= adc_info->max_channels) { + num_channels > adc_info->max_channels) { dev_err(&indio_dev->dev, "Bad st,adc-channels?\n"); return num_channels < 0 ? num_channels : -EINVAL; } @@ -1764,7 +1774,7 @@ static int stm32_adc_probe(struct platform_device *pdev) indio_dev->dev.parent = &pdev->dev; indio_dev->dev.of_node = pdev->dev.of_node; indio_dev->info = &stm32_adc_iio_info; - indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED; platform_set_drvdata(pdev, adc); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index d1210024f6bc..e0dc20488335 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -52,7 +52,7 @@ #define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0) #define ADS1015_CFG_COMP_LAT_MASK BIT(2) -#define ADS1015_CFG_COMP_POL_MASK BIT(2) +#define ADS1015_CFG_COMP_POL_MASK BIT(3) #define ADS1015_CFG_COMP_MODE_MASK BIT(4) #define ADS1015_CFG_DR_MASK GENMASK(7, 5) #define ADS1015_CFG_MOD_MASK BIT(8) @@ -1017,10 +1017,12 @@ static int ads1015_probe(struct i2c_client *client, switch (irq_trig) { case IRQF_TRIGGER_LOW: - cfg_comp |= ADS1015_CFG_COMP_POL_LOW; + cfg_comp |= ADS1015_CFG_COMP_POL_LOW << + ADS1015_CFG_COMP_POL_SHIFT; break; case IRQF_TRIGGER_HIGH: - cfg_comp |= ADS1015_CFG_COMP_POL_HIGH; + cfg_comp |= ADS1015_CFG_COMP_POL_HIGH << + ADS1015_CFG_COMP_POL_SHIFT; break; default: return -EINVAL; diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index bd3d37fc2144..e3cfb91bffc6 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include @@ -887,21 +887,27 @@ static int twl4030_madc_probe(struct platform_device *pdev) /* Enable 3v1 bias regulator for MADC[3:6] */ madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); - if (IS_ERR(madc->usb3v1)) - return -ENODEV; + if (IS_ERR(madc->usb3v1)) { + ret = -ENODEV; + goto err_i2c; + } ret = regulator_enable(madc->usb3v1); - if (ret) + if (ret) { dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); + goto err_i2c; + } ret = iio_device_register(iio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio device\n"); - goto err_i2c; + goto err_usb3v1; } return 0; +err_usb3v1: + regulator_disable(madc->usb3v1); err_i2c: twl4030_madc_set_current_generator(madc, 0, 0); err_current_generator: diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index becbb0aef232..bc0e60b9da45 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index d99bb1460fe2..02e833b14db0 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -463,8 +463,17 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) u8 drdy_mask; struct st_sensor_data *sdata = iio_priv(indio_dev); - if (!sdata->sensor_settings->drdy_irq.addr) + if (!sdata->sensor_settings->drdy_irq.addr) { + /* + * there are some devices (e.g. LIS3MDL) where drdy line is + * routed to a given pin and it is not possible to select a + * different one. Take into account irq status register + * to understand if irq trigger can be properly supported + */ + if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) + sdata->hw_irq_trigger = enable; return 0; + } /* Enable/Disable the interrupt generator 1. */ if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) { diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig index b37e5fc03149..474e1ac4e7c0 100644 --- a/drivers/iio/counter/Kconfig +++ b/drivers/iio/counter/Kconfig @@ -21,4 +21,13 @@ config 104_QUAD_8 The base port addresses for the devices may be configured via the base array module parameter. +config STM32_LPTIMER_CNT + tristate "STM32 LP Timer encoder counter driver" + depends on MFD_STM32_LPTIMER || COMPILE_TEST + help + Select this option to enable STM32 Low-Power Timer quadrature encoder + and counter driver. + + To compile this driver as a module, choose M here: the + module will be called stm32-lptimer-cnt. endmenu diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile index 007e88411648..1b9a896eb488 100644 --- a/drivers/iio/counter/Makefile +++ b/drivers/iio/counter/Makefile @@ -5,3 +5,4 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o +obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c new file mode 100644 index 000000000000..1c5909bb1605 --- /dev/null +++ b/drivers/iio/counter/stm32-lptimer-cnt.c @@ -0,0 +1,383 @@ +/* + * STM32 Low-Power Timer Encoder and Counter driver + * + * Copyright (C) STMicroelectronics 2017 + * + * Author: Fabrice Gasnier + * + * Inspired by 104-quad-8 and stm32-timer-trigger drivers. + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#include +#include +#include +#include +#include + +struct stm32_lptim_cnt { + struct device *dev; + struct regmap *regmap; + struct clk *clk; + u32 preset; + u32 polarity; + u32 quadrature_mode; +}; + +static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv) +{ + u32 val; + int ret; + + ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val); + if (ret) + return ret; + + return FIELD_GET(STM32_LPTIM_ENABLE, val); +} + +static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, + int enable) +{ + int ret; + u32 val; + + val = FIELD_PREP(STM32_LPTIM_ENABLE, enable); + ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val); + if (ret) + return ret; + + if (!enable) { + clk_disable(priv->clk); + return 0; + } + + /* LP timer must be enabled before writing CMP & ARR */ + ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->preset); + if (ret) + return ret; + + ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0); + if (ret) + return ret; + + /* ensure CMP & ARR registers are properly written */ + ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, + (val & STM32_LPTIM_CMPOK_ARROK), + 100, 1000); + if (ret) + return ret; + + ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, + STM32_LPTIM_CMPOKCF_ARROKCF); + if (ret) + return ret; + + ret = clk_enable(priv->clk); + if (ret) { + regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + return ret; + } + + /* Start LP timer in continuous mode */ + return regmap_update_bits(priv->regmap, STM32_LPTIM_CR, + STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT); +} + +static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) +{ + u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE | + STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC; + u32 val; + + /* Setup LP timer encoder/counter and polarity, without prescaler */ + if (priv->quadrature_mode) + val = enable ? STM32_LPTIM_ENC : 0; + else + val = enable ? STM32_LPTIM_COUNTMODE : 0; + val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0); + + return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val); +} + +static int stm32_lptim_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_ENABLE: + if (val < 0 || val > 1) + return -EINVAL; + + /* Check nobody uses the timer, or already disabled/enabled */ + ret = stm32_lptim_is_enabled(priv); + if ((ret < 0) || (!ret && !val)) + return ret; + if (val && ret) + return -EBUSY; + + ret = stm32_lptim_setup(priv, val); + if (ret) + return ret; + return stm32_lptim_set_enable_state(priv, val); + + default: + return -EINVAL; + } +} + +static int stm32_lptim_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + u32 dat; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat); + if (ret) + return ret; + *val = dat; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_ENABLE: + ret = stm32_lptim_is_enabled(priv); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + /* Non-quadrature mode: scale = 1 */ + *val = 1; + *val2 = 0; + if (priv->quadrature_mode) { + /* + * Quadrature encoder mode: + * - both edges, quarter cycle, scale is 0.25 + * - either rising/falling edge scale is 0.5 + */ + if (priv->polarity > 1) + *val2 = 2; + else + *val2 = 1; + } + return IIO_VAL_FRACTIONAL_LOG2; + + default: + return -EINVAL; + } +} + +static const struct iio_info stm32_lptim_cnt_iio_info = { + .read_raw = stm32_lptim_read_raw, + .write_raw = stm32_lptim_write_raw, + .driver_module = THIS_MODULE, +}; + +static const char *const stm32_lptim_quadrature_modes[] = { + "non-quadrature", + "quadrature", +}; + +static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + + return priv->quadrature_mode; +} + +static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int type) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + + if (stm32_lptim_is_enabled(priv)) + return -EBUSY; + + priv->quadrature_mode = type; + + return 0; +} + +static const struct iio_enum stm32_lptim_quadrature_mode_en = { + .items = stm32_lptim_quadrature_modes, + .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes), + .get = stm32_lptim_get_quadrature_mode, + .set = stm32_lptim_set_quadrature_mode, +}; + +static const char * const stm32_lptim_cnt_polarity[] = { + "rising-edge", "falling-edge", "both-edges", +}; + +static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + + return priv->polarity; +} + +static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int type) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + + if (stm32_lptim_is_enabled(priv)) + return -EBUSY; + + priv->polarity = type; + + return 0; +} + +static const struct iio_enum stm32_lptim_cnt_polarity_en = { + .items = stm32_lptim_cnt_polarity, + .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity), + .get = stm32_lptim_cnt_get_polarity, + .set = stm32_lptim_cnt_set_polarity, +}; + +static ssize_t stm32_lptim_cnt_get_preset(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset); +} + +static ssize_t stm32_lptim_cnt_set_preset(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct stm32_lptim_cnt *priv = iio_priv(indio_dev); + int ret; + + if (stm32_lptim_is_enabled(priv)) + return -EBUSY; + + ret = kstrtouint(buf, 0, &priv->preset); + if (ret) + return ret; + + if (priv->preset > STM32_LPTIM_MAX_ARR) + return -EINVAL; + + return len; +} + +/* LP timer with encoder */ +static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = { + { + .name = "preset", + .shared = IIO_SEPARATE, + .read = stm32_lptim_cnt_get_preset, + .write = stm32_lptim_cnt_set_preset, + }, + IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), + IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), + IIO_ENUM("quadrature_mode", IIO_SEPARATE, + &stm32_lptim_quadrature_mode_en), + IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en), + {} +}; + +static const struct iio_chan_spec stm32_lptim_enc_channels = { + .type = IIO_COUNT, + .channel = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_ENABLE) | + BIT(IIO_CHAN_INFO_SCALE), + .ext_info = stm32_lptim_enc_ext_info, + .indexed = 1, +}; + +/* LP timer without encoder (counter only) */ +static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = { + { + .name = "preset", + .shared = IIO_SEPARATE, + .read = stm32_lptim_cnt_get_preset, + .write = stm32_lptim_cnt_set_preset, + }, + IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), + IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), + {} +}; + +static const struct iio_chan_spec stm32_lptim_cnt_channels = { + .type = IIO_COUNT, + .channel = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_ENABLE) | + BIT(IIO_CHAN_INFO_SCALE), + .ext_info = stm32_lptim_cnt_ext_info, + .indexed = 1, +}; + +static int stm32_lptim_cnt_probe(struct platform_device *pdev) +{ + struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); + struct stm32_lptim_cnt *priv; + struct iio_dev *indio_dev; + + if (IS_ERR_OR_NULL(ddata)) + return -EINVAL; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); + if (!indio_dev) + return -ENOMEM; + + priv = iio_priv(indio_dev); + priv->dev = &pdev->dev; + priv->regmap = ddata->regmap; + priv->clk = ddata->clk; + priv->preset = STM32_LPTIM_MAX_ARR; + + indio_dev->name = dev_name(&pdev->dev); + indio_dev->dev.parent = &pdev->dev; + indio_dev->dev.of_node = pdev->dev.of_node; + indio_dev->info = &stm32_lptim_cnt_iio_info; + if (ddata->has_encoder) + indio_dev->channels = &stm32_lptim_enc_channels; + else + indio_dev->channels = &stm32_lptim_cnt_channels; + indio_dev->num_channels = 1; + + platform_set_drvdata(pdev, priv); + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static const struct of_device_id stm32_lptim_cnt_of_match[] = { + { .compatible = "st,stm32-lptimer-counter", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match); + +static struct platform_driver stm32_lptim_cnt_driver = { + .probe = stm32_lptim_cnt_probe, + .driver = { + .name = "stm32-lptimer-counter", + .of_match_table = stm32_lptim_cnt_of_match, + }, +}; +module_platform_driver(stm32_lptim_cnt_driver); + +MODULE_AUTHOR("Fabrice Gasnier "); +MODULE_ALIAS("platform:stm32-lptimer-counter"); +MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 17ec4cee51dc..a47428b4d31b 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, ret = indio_dev->info->debugfs_reg_access(indio_dev, indio_dev->cached_reg_addr, 0, &val); - if (ret) + if (ret) { dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); + return ret; + } len = snprintf(buf, sizeof(buf), "0x%X\n", val); diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index e68368b5b2a3..08aafba4481c 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -315,6 +315,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { }, }, }, + .drdy_irq = { + /* drdy line is routed drdy pin */ + .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + }, .multi_read_bit = true, .bootime = 2, }, diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 0d2ea3ee371b..8f26428804a2 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data) u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) | BMP280_OSRS_PRESS_X(data->oversampling_press + 1); - ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS, + ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS, BMP280_OSRS_TEMP_MASK | BMP280_OSRS_PRESS_MASK | BMP280_MODE_MASK, diff --git a/drivers/iio/trigger/Kconfig b/drivers/iio/trigger/Kconfig index e4d4e63434db..a633d2c8e805 100644 --- a/drivers/iio/trigger/Kconfig +++ b/drivers/iio/trigger/Kconfig @@ -24,6 +24,17 @@ config IIO_INTERRUPT_TRIGGER To compile this driver as a module, choose M here: the module will be called iio-trig-interrupt. +config IIO_STM32_LPTIMER_TRIGGER + tristate "STM32 Low-Power Timer Trigger" + depends on MFD_STM32_LPTIMER || COMPILE_TEST + help + Select this option to enable STM32 Low-Power Timer Trigger. + This can be used as trigger source for STM32 internal ADC + and/or DAC. + + To compile this driver as a module, choose M here: the + module will be called stm32-lptimer-trigger. + config IIO_STM32_TIMER_TRIGGER tristate "STM32 Timer Trigger" depends on (ARCH_STM32 && OF && MFD_STM32_TIMERS) || COMPILE_TEST diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile index 5c4ecd380653..0a72a2a76cb2 100644 --- a/drivers/iio/trigger/Makefile +++ b/drivers/iio/trigger/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o +obj-$(CONFIG_IIO_STM32_LPTIMER_TRIGGER) += stm32-lptimer-trigger.o obj-$(CONFIG_IIO_STM32_TIMER_TRIGGER) += stm32-timer-trigger.o obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c new file mode 100644 index 000000000000..241eae6a4306 --- /dev/null +++ b/drivers/iio/trigger/stm32-lptimer-trigger.c @@ -0,0 +1,118 @@ +/* + * STM32 Low-Power Timer Trigger driver + * + * Copyright (C) STMicroelectronics 2017 + * + * Author: Fabrice Gasnier . + * + * License terms: GNU General Public License (GPL), version 2 + * + * Inspired by Benjamin Gaignard's stm32-timer-trigger driver + */ + +#include +#include +#include +#include + +/* List Low-Power Timer triggers */ +static const char * const stm32_lptim_triggers[] = { + LPTIM1_OUT, + LPTIM2_OUT, + LPTIM3_OUT, +}; + +struct stm32_lptim_trigger { + struct device *dev; + const char *trg; +}; + +static int stm32_lptim_validate_device(struct iio_trigger *trig, + struct iio_dev *indio_dev) +{ + if (indio_dev->modes & INDIO_HARDWARE_TRIGGERED) + return 0; + + return -EINVAL; +} + +static const struct iio_trigger_ops stm32_lptim_trigger_ops = { + .owner = THIS_MODULE, + .validate_device = stm32_lptim_validate_device, +}; + +/** + * is_stm32_lptim_trigger + * @trig: trigger to be checked + * + * return true if the trigger is a valid STM32 IIO Low-Power Timer Trigger + * either return false + */ +bool is_stm32_lptim_trigger(struct iio_trigger *trig) +{ + return (trig->ops == &stm32_lptim_trigger_ops); +} +EXPORT_SYMBOL(is_stm32_lptim_trigger); + +static int stm32_lptim_setup_trig(struct stm32_lptim_trigger *priv) +{ + struct iio_trigger *trig; + + trig = devm_iio_trigger_alloc(priv->dev, "%s", priv->trg); + if (!trig) + return -ENOMEM; + + trig->dev.parent = priv->dev->parent; + trig->ops = &stm32_lptim_trigger_ops; + iio_trigger_set_drvdata(trig, priv); + + return devm_iio_trigger_register(priv->dev, trig); +} + +static int stm32_lptim_trigger_probe(struct platform_device *pdev) +{ + struct stm32_lptim_trigger *priv; + u32 index; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (of_property_read_u32(pdev->dev.of_node, "reg", &index)) + return -EINVAL; + + if (index >= ARRAY_SIZE(stm32_lptim_triggers)) + return -EINVAL; + + priv->dev = &pdev->dev; + priv->trg = stm32_lptim_triggers[index]; + + ret = stm32_lptim_setup_trig(priv); + if (ret) + return ret; + + platform_set_drvdata(pdev, priv); + + return 0; +} + +static const struct of_device_id stm32_lptim_trig_of_match[] = { + { .compatible = "st,stm32-lptimer-trigger", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_lptim_trig_of_match); + +static struct platform_driver stm32_lptim_trigger_driver = { + .probe = stm32_lptim_trigger_probe, + .driver = { + .name = "stm32-lptimer-trigger", + .of_match_table = stm32_lptim_trig_of_match, + }, +}; +module_platform_driver(stm32_lptim_trigger_driver); + +MODULE_AUTHOR("Fabrice Gasnier "); +MODULE_ALIAS("platform:stm32-lptimer-trigger"); +MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM trigger driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index 9b9053494daf..eb212f8c8879 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -174,6 +174,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv) clk_disable(priv->clk); /* Stop timer */ + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); regmap_write(priv->regmap, TIM_PSC, 0); regmap_write(priv->regmap, TIM_ARR, 0); @@ -715,8 +716,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev, if (ret) return ret; + /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_write(priv->regmap, TIM_ARR, preset); - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE); return len; } diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 30825bb9b8e9..8861c052155a 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) if (ret) goto pid_query_error; + nlmsg_end(skb, nlh); + pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); @@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); if (ret) goto add_mapping_error; + + nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); @@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); if (ret) goto query_mapping_error; + + nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); @@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) if (ret) goto remove_mapping_error; + nlmsg_end(skb, nlh); + ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index c81c55942626..3c4faadb8cdd 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); if (ret) goto mapinfo_num_error; + + nlmsg_end(skb, nlh); + ret = rdma_nl_unicast(skb, iwpm_pid); if (ret) { skb = NULL; @@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) if (ret) goto send_mapping_info_unlock; + nlmsg_end(skb, nlh); + iwpm_print_sockaddr(&map_info->local_sockaddr, "send_mapping_info: Local sockaddr:"); iwpm_print_sockaddr(&map_info->mapped_sockaddr, diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index e685148dd3e6..b12e58787c3d 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -57,12 +57,12 @@ EXPORT_SYMBOL(rdma_nl_chk_listeners); static bool is_nl_msg_valid(unsigned int type, unsigned int op) { - static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS - 1] = { - RDMA_NL_RDMA_CM_NUM_OPS, - RDMA_NL_IWPM_NUM_OPS, - 0, - RDMA_NL_LS_NUM_OPS, - RDMA_NLDEV_NUM_OPS }; + static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = { + [RDMA_NL_RDMA_CM] = RDMA_NL_RDMA_CM_NUM_OPS, + [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS, + [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS, + [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS, + }; /* * This BUILD_BUG_ON is intended to catch addition of new @@ -70,10 +70,10 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op) */ BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6); - if (type > RDMA_NL_NUM_CLIENTS - 1) + if (type >= RDMA_NL_NUM_CLIENTS) return false; - return (op < max_num_ops[type - 1]) ? true : false; + return (op < max_num_ops[type]) ? true : false; } static bool is_nl_valid(unsigned int type, unsigned int op) diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index dbfd854c32c9..6ca607e8e293 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -643,6 +643,30 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, } EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); +/** + * rdma_rw_mr_factor - return number of MRs required for a payload + * @device: device handling the connection + * @port_num: port num to which the connection is bound + * @maxpages: maximum payload pages per rdma_rw_ctx + * + * Returns the number of MRs the device requires to move @maxpayload + * bytes. The returned value is used during transport creation to + * compute max_rdma_ctxts and the size of the transport's Send and + * Send Completion Queues. + */ +unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, + unsigned int maxpages) +{ + unsigned int mr_pages; + + if (rdma_rw_can_use_mr(device, port_num)) + mr_pages = rdma_rw_fr_page_list_len(device); + else + mr_pages = device->attrs.max_sge_rd; + return DIV_ROUND_UP(maxpages, mr_pages); +} +EXPORT_SYMBOL(rdma_rw_mr_factor); + void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) { u32 factor; diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 70ad19c4c73e..88bdafb297f5 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) atomic_set(&qp->qp_sec->error_list_count, 0); init_completion(&qp->qp_sec->error_complete); ret = security_ib_alloc_security(&qp->qp_sec->security); - if (ret) + if (ret) { kfree(qp->qp_sec); + qp->qp_sec = NULL; + } return ret; } diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c index d176597b4d78..fc801920e341 100644 --- a/drivers/infiniband/core/umem_rbtree.c +++ b/drivers/infiniband/core/umem_rbtree.c @@ -72,7 +72,7 @@ INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last, /* @last is not a part of the interval. See comment for function * node_last. */ -int rbt_ib_umem_for_each_in_range(struct rb_root *root, +int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, u64 start, u64 last, umem_call_back cb, void *cookie) @@ -95,7 +95,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root *root, } EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range); -struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, +struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length) { struct umem_odp_node *node; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e0cb99860934..52a2cf2d83aa 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -118,7 +118,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ucontext->closing = 0; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - ucontext->umem_tree = RB_ROOT; + ucontext->umem_tree = RB_ROOT_CACHED; init_rwsem(&ucontext->umem_rwsem); ucontext->odp_mrs_count = 0; INIT_LIST_HEAD(&ucontext->no_private_counters); @@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, resp.raw_packet_caps = attr.raw_packet_caps; resp.response_length += sizeof(resp.raw_packet_caps); - if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) + if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) goto end; - resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; - resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; - resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; - resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; - resp.xrq_caps.flags = attr.xrq_caps.flags; - resp.response_length += sizeof(resp.xrq_caps); + resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; + resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; + resp.tm_caps.max_ops = attr.tm_caps.max_ops; + resp.tm_caps.max_sge = attr.tm_caps.max_sge; + resp.tm_caps.flags = attr.tm_caps.flags; + resp.response_length += sizeof(resp.tm_caps); end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); return err; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ee9e27dc799b..de57d6c11a25 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) */ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (attr.qp_state >= IB_QPS_INIT) { - if (qp->device->get_link_layer(qp->device, attr.port_num) != + if (rdma_port_get_link_layer(qp->device, attr.port_num) != IB_LINK_LAYER_INFINIBAND) return true; goto lid_check; @@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) /* Can't get a quick answer, iterate over all ports */ for (port = 0; port < qp->device->phys_port_cnt; port++) - if (qp->device->get_link_layer(qp->device, port) != + if (rdma_port_get_link_layer(qp->device, port) != IB_LINK_LAYER_INFINIBAND) num_eth_ports++; diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig index 19982a4a9bba..18f5ed082f41 100644 --- a/drivers/infiniband/hw/bnxt_re/Kconfig +++ b/drivers/infiniband/hw/bnxt_re/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_BNXT_RE tristate "Broadcom Netxtreme HCA support" depends on ETHERNET && NETDEVICES && PCI && INET && DCB + depends on MAY_USE_DEVLINK select NET_VENDOR_BROADCOM select BNXT ---help--- diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index b3ad37fec578..ecbac91b2e14 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -93,11 +93,13 @@ struct bnxt_re_dev { struct ib_device ibdev; struct list_head list; unsigned long flags; -#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 -#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 -#define BNXT_RE_FLAG_GOT_MSIX 2 -#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 -#define BNXT_RE_FLAG_QOS_WORK_REG 16 +#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 +#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 +#define BNXT_RE_FLAG_GOT_MSIX 2 +#define BNXT_RE_FLAG_HAVE_L2_REF 3 +#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 +#define BNXT_RE_FLAG_QOS_WORK_REG 5 +#define BNXT_RE_FLAG_TASK_IN_PROG 6 struct net_device *netdev; unsigned int version, major, minor; struct bnxt_en_dev *en_dev; @@ -108,6 +110,8 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; + u8 active_speed; + u8 active_width; /* FP Notification Queue (CQ & SRQ) */ struct tasklet_struct nq_task; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 01eee15bbd65..0d89621d9fe8 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; - /* call the underlying netdev's ethtool hooks to query speed settings - * for which we acquire rtnl_lock _only_ if it's registered with - * IB stack to avoid race in the NETDEV_UNREG path - */ - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) - if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed, - &port_attr->active_width)) - return -EINVAL; + port_attr->active_speed = rdev->active_speed; + port_attr->active_width = rdev->active_width; + return 0; } @@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_qplib_gid *gid_to_del; /* Delete the entry from the hardware */ ctx = *context; @@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; + gid_to_del = &sgid_tbl->tbl[ctx->idx]; + /* DEL_GID is called in WQ context(netdevice_event_work_handler) + * or via the ib_unregister_device path. In the former case QP1 + * may not be destroyed yet, in which case just return as FW + * needs that entry to be present and will fail it's deletion. + * We could get invoked again after QP1 is destroyed OR get an + * ADD_GID call with a different GID value for the same index + * where we issue MODIFY_GID cmd to update the GID entry -- TBD + */ + if (ctx->idx == 0 && + rdma_link_local_addr((struct in6_addr *)gid_to_del) && + ctx->refcnt == 1 && rdev->qp1_sqp) { + dev_dbg(rdev_to_dev(rdev), + "Trying to delete GID0 while QP1 is alive\n"); + return -EFAULT; + } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, - &sgid_tbl->tbl[ctx->idx], - true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); @@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) kfree(rdev->sqp_ah); kfree(rdev->qp1_sqp); + rdev->qp1_sqp = NULL; + rdev->sqp_ah = NULL; } if (!IS_ERR_OR_NULL(qp->rumem)) @@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); + qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); } else if (qp_attr->qp_state == IB_QPS_RTR) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); + qp->qplib_qp.mtu = + ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); } if (qp_attr_mask & IB_QP_TIMEOUT) { @@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_qplib_qp qplib_qp; + struct bnxt_qplib_qp *qplib_qp; int rc; - memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); - qplib_qp.id = qp->qplib_qp.id; - qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; + qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); + if (!qplib_qp) + return -ENOMEM; - rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); + qplib_qp->id = qp->qplib_qp.id; + qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; + + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); - return rc; + goto out; } - qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); - qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; - qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); - qp_attr->pkey_index = qplib_qp.pkey_index; - qp_attr->qkey = qplib_qp.qkey; + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->pkey_index = qplib_qp->pkey_index; + qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, - qplib_qp.ah.host_sgid_index, - qplib_qp.ah.hop_limit, - qplib_qp.ah.traffic_class); - rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); - rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); - ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); - qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); - qp_attr->timeout = qplib_qp.timeout; - qp_attr->retry_cnt = qplib_qp.retry_cnt; - qp_attr->rnr_retry = qplib_qp.rnr_retry; - qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; - qp_attr->rq_psn = qplib_qp.rq.psn; - qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; - qp_attr->sq_psn = qplib_qp.sq.psn; - qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; - qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : - IB_SIGNAL_REQ_WR; - qp_attr->dest_qp_num = qplib_qp.dest_qpn; + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, + qplib_qp->ah.host_sgid_index, + qplib_qp->ah.hop_limit, + qplib_qp->ah.traffic_class); + rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); + rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); + ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); + qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); + qp_attr->timeout = qplib_qp->timeout; + qp_attr->retry_cnt = qplib_qp->retry_cnt; + qp_attr->rnr_retry = qplib_qp->rnr_retry; + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; + qp_attr->rq_psn = qplib_qp->rq.psn; + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; + qp_attr->sq_psn = qplib_qp->sq.psn; + qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; + qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : + IB_SIGNAL_REQ_WR; + qp_attr->dest_qp_num = qplib_qp->dest_qpn; qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; @@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; qp_init_attr->cap = qp_attr->cap; - return 0; +out: + kfree(qplib_qp); + return rc; } /* Routine for sending QP1 packets for RoCE V1 an V2 @@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; wqe->atomic.swap_data = atomic_wr(wr)->swap; break; case IB_WR_ATOMIC_FETCH_AND_ADD: @@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) return rc; } - if (mr->npages && mr->pages) { + if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); kfree(mr->pages); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 82d1cbc27aee..e7450ea92aa9 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } } set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); @@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work) else if (netif_carrier_ok(rdev->netdev)) bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); break; default: break; } + smp_mb__before_atomic(); + clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); kfree(re_work); } @@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, break; case NETDEV_UNREGISTER: + /* netdev notifier will call NETDEV_UNREGISTER again later since + * we are still holding the reference to the netdev + */ + if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) + goto exit; bnxt_re_ib_unreg(rdev, false); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); @@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, re_work->vlan_dev = (real_dev == netdev ? NULL : netdev); INIT_WORK(&re_work->work, bnxt_re_task); + set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); queue_work(bnxt_re_wq, &re_work->work); } } @@ -1375,6 +1387,22 @@ static int __init bnxt_re_mod_init(void) static void __exit bnxt_re_mod_exit(void) { + struct bnxt_re_dev *rdev; + LIST_HEAD(to_be_deleted); + + mutex_lock(&bnxt_re_dev_lock); + /* Free all adapter allocated resources */ + if (!list_empty(&bnxt_re_dev_list)) + list_splice_init(&bnxt_re_dev_list, &to_be_deleted); + mutex_unlock(&bnxt_re_dev_lock); + + list_for_each_entry(rdev, &to_be_deleted, list) { + dev_info(rdev_to_dev(rdev), "Unregistering Device"); + bnxt_re_dev_stop(rdev); + bnxt_re_ib_unreg(rdev, true); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) destroy_workqueue(bnxt_re_wq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 391bb7006e8f..2bdb1562bd21 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, return -EINVAL; } + if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) + return -ETIMEDOUT; + /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ @@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, /* timed out */ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 0ed312f17c8d..85b16da287f9 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw { unsigned long *cmdq_bitmap; u32 bmap_size; unsigned long flags; -#define FIRMWARE_INITIALIZED_FLAG 1 +#define FIRMWARE_INITIALIZED_FLAG BIT(0) #define FIRMWARE_FIRST_FLAG BIT(31) +#define FIRMWARE_TIMED_OUT BIT(3) wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ceaa2fa54d32..daf7a56e5d7e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); + if (!ep) { + pr_debug("%s stid %d lookup failure!\n", __func__, stid); + goto out; + } pr_debug("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_put_ep(&ep->com); +out: return 0; } @@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_put_ep(&child_ep->com); reject: reject_cr(dev, hwtid, skb); +out: if (parent_ep) c4iw_put_ep(&parent_ep->com); -out: return 0; } @@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) cm_id->provider_data = ep; goto out; } - + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b2ed4b9cda6e..0be42787759f 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); static int thermal_init(struct hfi1_devdata *dd); static void update_statusp(struct hfi1_pportdata *ppd, u32 state); +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs); static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); @@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data) u64 regs[CCE_NUM_INT_CSRS]; u32 bit; int i; + irqreturn_t handled = IRQ_NONE; this_cpu_inc(*dd->int_counter); @@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data) for_each_set_bit(bit, (unsigned long *)®s[0], CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); + handled = IRQ_HANDLED; } - return IRQ_HANDLED; + return handled; } static irqreturn_t sdma_interrupt(int irq, void *data) @@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); } -void reset_qsfp(struct hfi1_pportdata *ppd) +int reset_qsfp(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask, qsfp_mask; @@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd) * for alarms and warnings */ set_qsfp_int_n(ppd, 1); + + /* + * After the reset, AOC transmitters are enabled by default. They need + * to be turned off to complete the QSFP setup before they can be + * enabled again. + */ + return set_qsfp_tx(ppd, 0); } static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, @@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) { struct hfi1_devdata *dd = ppd->dd; u32 previous_state; + int offline_state_ret; int ret; update_lcb_cache(dd); @@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); - /* - * Wait for offline transition. It can take a while for - * the link to go down. - */ - ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000); - if (ret < 0) - return ret; - - /* - * Now in charge of LCB - must be after the physical state is - * offline.quiet and before host_link_state is changed. - */ - set_host_lcb_access(dd); - write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ - - /* make sure the logical state is also down */ - ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); - if (ret) - force_logical_link_state_down(ppd); - - ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); + if (offline_state_ret < 0) + return offline_state_ret; + /* Disabling AOC transmitters */ if (ppd->port_type == PORT_TYPE_QSFP && ppd->qsfp_info.limiting_active && qsfp_mod_present(ppd)) { @@ -10364,6 +10359,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) } } + /* + * Wait for the offline.Quiet transition if it hasn't happened yet. It + * can take a while for the link to go down. + */ + if (offline_state_ret != PLS_OFFLINE_QUIET) { + ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); + if (ret < 0) + return ret; + } + + /* + * Now in charge of LCB - must be after the physical state is + * offline.quiet and before host_link_state is changed. + */ + set_host_lcb_access(dd); + write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ + + /* make sure the logical state is also down */ + ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); + if (ret) + force_logical_link_state_down(ppd); + + ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + /* * The LNI has a mandatory wait time after the physical state * moves to Offline.Quiet. The wait time may be different @@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { /* went down while attempting link up */ check_lni_states(ppd); + + /* The QSFP doesn't need to be reset on LNI failure */ + ppd->qsfp_info.reset_needed = 0; } /* the active link width (downgrade) is 0 on link down */ @@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, return 0; } +/* + * wait_phys_link_offline_quiet_substates - wait for any offline substate + * @ppd: port device + * @msecs: the number of milliseconds to wait + * + * Wait up to msecs milliseconds for any offline physical link + * state change to occur. + * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. + */ +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs) +{ + u32 read_state; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(msecs); + while (1) { + read_state = read_physical_state(ppd->dd); + if ((read_state & 0xF0) == PLS_OFFLINE) + break; + if (time_after(jiffies, timeout)) { + dd_dev_err(ppd->dd, + "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", + read_state, msecs); + return -ETIMEDOUT; + } + usleep_range(1950, 2050); /* sleep 2ms-ish */ + } + + log_state_transition(ppd, read_state); + return read_state; +} + #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b8345a60a0fb..50b8645d0b87 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -204,6 +204,7 @@ #define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 #define PLS_OFFLINE_REPORT_FAILURE 0x93 #define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 +#define PLS_OFFLINE_QUIET_DURATION 0x95 #define PLS_POLLING 0x20 #define PLS_POLLING_QUIET 0x20 #define PLS_POLLING_ACTIVE 0x21 @@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work); void handle_link_bounce(struct work_struct *work); void handle_start_link(struct work_struct *work); void handle_sma_message(struct work_struct *work); -void reset_qsfp(struct hfi1_pportdata *ppd); +int reset_qsfp(struct hfi1_pportdata *ppd); void qsfp_event(struct work_struct *work); void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); int send_idle_sma(struct hfi1_devdata *dd, u64 message); diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index d46b17107901..1613af1c58d9 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -204,7 +204,10 @@ int eprom_init(struct hfi1_devdata *dd) return ret; } -/* magic character sequence that trails an image */ +/* magic character sequence that begins an image */ +#define IMAGE_START_MAGIC "APO=" + +/* magic character sequence that might trail an image */ #define IMAGE_TRAIL_MAGIC "egamiAPO" /* EPROM file types */ @@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, { void *buffer; void *p; + u32 length; int ret; buffer = kmalloc(P1_SIZE, GFP_KERNEL); @@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, return ret; } - /* scan for image magic that may trail the actual data */ - p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); - if (!p) { + /* config partition is valid only if it starts with IMAGE_START_MAGIC */ + if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) { kfree(buffer); return -ENOENT; } + /* scan for image magic that may trail the actual data */ + p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); + if (p) + length = p - buffer; + else + length = P1_SIZE; + *data = buffer; - *size = p - buffer; + *size = length; return 0; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 2bc89260235a..d9a1e9893136 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) switch (ret) { case 0: ret = setup_base_ctxt(fd, uctxt); - if (uctxt->subctxt_cnt) { - /* - * Base context is done (successfully or not), notify - * anybody using a sub-context that is waiting for - * this completion. - */ - clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); - wake_up(&uctxt->wait); - } + if (ret) + deallocate_ctxt(uctxt); break; case 1: ret = complete_subctxt(fd); @@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) - return ret; + goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) - goto setup_failed; + goto done; /* If sub-contexts are enabled, do the appropriate setup */ if (uctxt->subctxt_cnt) ret = setup_subctxt(uctxt); if (ret) - goto setup_failed; + goto done; ret = hfi1_alloc_ctxt_rcv_groups(uctxt); if (ret) - goto setup_failed; + goto done; ret = init_user_ctxt(fd, uctxt); if (ret) - goto setup_failed; + goto done; user_init(uctxt); @@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, fd->uctxt = uctxt; hfi1_rcd_get(uctxt); - return 0; +done: + if (uctxt->subctxt_cnt) { + /* + * On error, set the failed bit so sub-contexts will clean up + * correctly. + */ + if (ret) + set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); -setup_failed: - /* Set the failed bit so sub-context init can do the right thing */ - set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); - deallocate_ctxt(uctxt); + /* + * Base context is done (successfully or not), notify anybody + * using a sub-context that is waiting for this completion. + */ + clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); + wake_up(&uctxt->wait); + } return ret; } diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 2f0d285dc278..175002c046ed 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -54,7 +54,7 @@ struct mmu_rb_handler { struct mmu_notifier mn; - struct rb_root root; + struct rb_root_cached root; void *ops_arg; spinlock_t lock; /* protect the RB tree */ struct mmu_rb_ops *ops; @@ -108,7 +108,7 @@ int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, if (!handlr) return -ENOMEM; - handlr->root = RB_ROOT; + handlr->root = RB_ROOT_CACHED; handlr->ops = ops; handlr->ops_arg = ops_arg; INIT_HLIST_NODE(&handlr->mn.hlist); @@ -149,9 +149,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); - while ((node = rb_first(&handler->root))) { + while ((node = rb_first_cached(&handler->root))) { rbnode = rb_entry(node, struct mmu_rb_node, node); - rb_erase(node, &handler->root); + rb_erase_cached(node, &handler->root); /* move from LRU list to delete list */ list_move(&rbnode->list, &del_list); } @@ -300,7 +300,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, { struct mmu_rb_handler *handler = container_of(mn, struct mmu_rb_handler, mn); - struct rb_root *root = &handler->root; + struct rb_root_cached *root = &handler->root; struct mmu_rb_node *node, *ptr = NULL; unsigned long flags; bool added = false; diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 82447b7cdda1..09e50fd2a08f 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -68,7 +68,7 @@ /* * Code to adjust PCIe capabilities. */ -static int tune_pcie_caps(struct hfi1_devdata *); +static void tune_pcie_caps(struct hfi1_devdata *); /* * Do all the common PCIe setup and initialization. @@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd) */ int request_msix(struct hfi1_devdata *dd, u32 msireq) { - int nvec, ret; + int nvec; nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, PCI_IRQ_MSIX | PCI_IRQ_LEGACY); @@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq) return nvec; } - ret = tune_pcie_caps(dd); - if (ret) { - dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret); - pci_free_irq_vectors(dd->pcidev); - return ret; - } + tune_pcie_caps(dd); /* check for legacy IRQ */ if (nvec == 1 && !dd->pcidev->msix_enabled) @@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED; module_param_named(aspm, aspm_mode, uint, S_IRUGO); MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); -static int tune_pcie_caps(struct hfi1_devdata *dd) +static void tune_pcie_caps(struct hfi1_devdata *dd) { struct pci_dev *parent; u16 rc_mpss, rc_mps, ep_mpss, ep_mps; @@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * Turn on extended tags in DevCtl in case the BIOS has turned it off * to improve WFR SDMA bandwidth */ - ret = pcie_capability_read_word(dd->pcidev, - PCI_EXP_DEVCTL, &ectl); - if (ret) { - dd_dev_err(dd, "Unable to read from PCI config\n"); - return ret; - } - - if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { + ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); + if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { dd_dev_info(dd, "Enabling PCIe extended tags\n"); ectl |= PCI_EXP_DEVCTL_EXT_TAG; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); - if (ret) { - dd_dev_err(dd, "Unable to write to PCI config\n"); - return ret; - } + if (ret) + dd_dev_info(dd, "Unable to write to PCI config\n"); } /* Find out supported and configured values for parent (root) */ parent = dd->pcidev->bus->self; @@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * The driver cannot perform the tuning if it does not have * access to the upstream component. */ - if (!parent) - return -EINVAL; + if (!parent) { + dd_dev_info(dd, "Parent not found\n"); + return; + } if (!pci_is_root_bus(parent->bus)) { dd_dev_info(dd, "Parent not root\n"); - return -EINVAL; + return; + } + if (!pci_is_pcie(parent)) { + dd_dev_info(dd, "Parent is not PCI Express capable\n"); + return; + } + if (!pci_is_pcie(dd->pcidev)) { + dd_dev_info(dd, "PCI device is not PCI Express capable\n"); + return; } - - if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) - return -EINVAL; rc_mpss = parent->pcie_mpss; rc_mps = ffs(pcie_get_mps(parent)) - 8; /* Find out supported and configured values for endpoint (us) */ @@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) ep_mrrs = max_mrrs; pcie_set_readrq(dd->pcidev, ep_mrrs); } - - return 0; } /* End of PCIe capability tuning */ diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index a8af96d2b1b0..d486355880cb 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, * reuse of stale settings established in our previous pass through. */ if (ppd->qsfp_info.reset_needed) { - reset_qsfp(ppd); + ret = reset_qsfp(ppd); + if (ret) + return ret; refresh_qsfp_cache(ppd, &ppd->qsfp_info); } else { ppd->qsfp_info.reset_needed = 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 9b1566468744..a65e4cbdce2f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -201,7 +201,6 @@ enum init_completion_state { CEQ_CREATED, ILQ_CREATED, IEQ_CREATED, - INET_NOTIFIER, IP_ADDR_REGISTERED, RDMA_DEV_REGISTERED }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 14f36ba4e5be..5230dd3c938c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, } /** - * listen_port_in_use - determine if port is in use - * @port: Listen port number + * i40iw_port_in_use - determine if port is in use + * @port: port number + * @active_side: flag for listener side vs active side */ -static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) +static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side) { struct i40iw_cm_listener *listen_node; + struct i40iw_cm_node *cm_node; unsigned long flags; bool ret = false; - spin_lock_irqsave(&cm_core->listen_list_lock, flags); - list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { - if (listen_node->loc_port == port) { - ret = true; - break; + if (active_side) { + /* search connected node list */ + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_entry(cm_node, &cm_core->connected_nodes, list) { + if (cm_node->loc_port == port) { + ret = true; + break; + } } + if (!ret) + clear_bit(port, cm_core->active_side_ports); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } else { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (listen_node->loc_port == port) { + ret = true; + break; + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } - spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return ret; } @@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); if (listener->iwdev) { - if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) + if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false)) i40iw_manage_apbvt(listener->iwdev, listener->loc_port, I40IW_MANAGE_APBVT_DEL); @@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) if (cm_node->listener) { i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); } else { - if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && - cm_node->apbvt_set) { + if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) { i40iw_manage_apbvt(cm_node->iwdev, cm_node->loc_port, I40IW_MANAGE_APBVT_DEL); - i40iw_get_addr_info(cm_node, &nfo); - if (cm_node->qhash_set) { - i40iw_manage_qhash(cm_node->iwdev, - &nfo, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - cm_node->qhash_set = 0; - } + cm_node->apbvt_set = 0; + } + i40iw_get_addr_info(cm_node, &nfo); + if (cm_node->qhash_set) { + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; } } @@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); if (cm_node->vlan_id < VLAN_TAG_PRESENT) { tcp_info->insert_vlan_tag = true; - tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); + tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | + cm_node->vlan_id); } if (cm_node->ipv4) { tcp_info->src_port = cpu_to_le16(cm_node->loc_port); @@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; - bool qhash_set = false; - int apbvt_set = 0; - int err = 0; - enum i40iw_status_code status; + int ret = 0; + unsigned long flags; ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_info.user_pri = rt_tos2priority(cm_id->tos); i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", __func__, cm_id->tos, cm_info.user_pri); - if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || - (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, - raddr6->sin6_addr.in6_u.u6_addr32, - sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { - status = i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_ADD, - NULL, - true); - if (status) - return -EINVAL; - qhash_set = true; - } - status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); - if (status) { - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - return -EINVAL; - } - - apbvt_set = 1; cm_id->add_ref(cm_id); cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, conn_param->private_data_len, @@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) &cm_info); if (IS_ERR(cm_node)) { - err = PTR_ERR(cm_node); - goto err_out; + ret = PTR_ERR(cm_node); + cm_id->rem_ref(cm_id); + return ret; } + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, + raddr6->sin6_addr.in6_u.u6_addr32, + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { + if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { + ret = -EINVAL; + goto err; + } + cm_node->qhash_set = true; + } + + spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); + if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) { + ret = -EINVAL; + goto err; + } + } else { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + } + + cm_node->apbvt_set = true; i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && !cm_node->ord_size) cm_node->ord_size = 1; - cm_node->apbvt_set = apbvt_set; - cm_node->qhash_set = qhash_set; iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; iwqp->cm_id = cm_id; @@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { cm_node->state = I40IW_CM_STATE_SYN_SENT; - err = i40iw_send_syn(cm_node, 0); - if (err) { - i40iw_rem_ref_cm_node(cm_node); - goto err_out; - } + ret = i40iw_send_syn(cm_node, 0); + if (ret) + goto err; } i40iw_debug(cm_node->dev, @@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_node->rem_port, cm_node, cm_node->cm_id); + return 0; -err_out: +err: if (cm_info.ipv4) i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, @@ -3867,22 +3879,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) "Api - connect() FAILED: dest addr=%pI6", cm_info.rem_addr); - if (qhash_set) - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - - if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, - cm_info.loc_port)) - i40iw_manage_apbvt(iwdev, - cm_info.loc_port, - I40IW_MANAGE_APBVT_DEL); + i40iw_rem_ref_cm_node(cm_node); cm_id->rem_ref(cm_id); iwdev->cm_core.stats_connect_errs++; - return err; + return ret; } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h index 2e52e38ffcf3..45abef76295b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.h +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -71,6 +71,9 @@ #define I40IW_HW_IRD_SETTING_32 32 #define I40IW_HW_IRD_SETTING_64 64 +#define MAX_PORTS 65536 +#define I40IW_VLAN_PRIO_SHIFT 13 + enum ietf_mpa_flags { IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ @@ -411,6 +414,8 @@ struct i40iw_cm_core { spinlock_t ht_lock; /* manage hash table */ spinlock_t listen_list_lock; /* listen list */ + unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)]; + u64 stats_nodes_created; u64 stats_nodes_destroyed; u64 stats_listen_created; diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index d1f5345f04f0..42ca5346777d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -48,7 +48,7 @@ * @wqe: cqp wqe for header * @header: header for the cqp wqe */ -static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) +void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) { wmb(); /* make sure WQE is populated before polarity is set */ set_64bit_val(wqe, 24, header); diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index cc742c3132c6..27590ae21881 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = { .notifier_call = i40iw_net_event }; -static atomic_t i40iw_notifiers_registered; - /** * i40iw_find_i40e_handler - find a handler given a client info * @ldev: pointer to a client info @@ -1376,11 +1374,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, */ static void i40iw_register_notifiers(void) { - if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { - register_inetaddr_notifier(&i40iw_inetaddr_notifier); - register_inet6addr_notifier(&i40iw_inetaddr6_notifier); - register_netevent_notifier(&i40iw_net_notifier); - } + register_inetaddr_notifier(&i40iw_inetaddr_notifier); + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); + register_netevent_notifier(&i40iw_net_notifier); +} + +/** + * i40iw_unregister_notifiers - unregister tcp ip notifiers + */ + +static void i40iw_unregister_notifiers(void) +{ + unregister_netevent_notifier(&i40iw_net_notifier); + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); } /** @@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, u32 i; u32 size; + if (!ldev->msix_count) { + i40iw_pr_err("No MSI-X vectors\n"); + return I40IW_ERR_CONFIG; + } + iwdev->msix_count = ldev->msix_count; size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; @@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); /* fallthrough */ - case INET_NOTIFIER: - if (!atomic_dec_return(&i40iw_notifiers_registered)) { - unregister_netevent_notifier(&i40iw_net_notifier); - unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); - unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); - } /* fallthrough */ case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); @@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, status = i40iw_save_msix_info(iwdev, ldev); if (status) - goto exit; + return status; iwdev->hw.dev_context = (void *)ldev->pcidev; iwdev->hw.hw_addr = ldev->hw_addr; status = i40iw_allocate_dma_mem(&iwdev->hw, @@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) break; iwdev->init_state = PBLE_CHUNK_MEM; iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); - i40iw_register_notifiers(); - iwdev->init_state = INET_NOTIFIER; status = i40iw_add_mac_ip(iwdev); if (status) break; @@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void) i40iw_client.type = I40E_CLIENT_IWARP; spin_lock_init(&i40iw_handler_lock); ret = i40e_register_client(&i40iw_client); + i40iw_register_notifiers(); + return ret; } @@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void) */ static void __exit i40iw_exit_module(void) { + i40iw_unregister_notifiers(); i40e_unregister_client(&i40iw_client); } diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h index e217a1259f57..5498ad01c280 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_p.h +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h @@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, struct i40iw_fast_reg_stag_info *info, bool post_sq); +void i40iw_insert_wqe_hdr(u64 *wqe, u64 header); + /* HMC/FPM functions */ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id); diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index c2cab20c4bc5..59f70676f0e0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); - set_64bit_val(wqe, 24, offset24); set_64bit_val(wqe, 0, buf->mem.pa); set_64bit_val(wqe, 8, LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); - set_64bit_val(wqe, 24, offset24); + i40iw_insert_wqe_hdr(wqe, offset24); } /** @@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 16, header[0]); - /* Ensure all data is written before writing valid bit */ - wmb(); - set_64bit_val(wqe, 24, header[1]); + i40iw_insert_wqe_hdr(wqe, header[1]); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); i40iw_qp_post_wr(&qp->qp_uk); @@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); - set_64bit_val(wqe, 24, header); + i40iw_insert_wqe_hdr(wqe, header); i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); i40iw_sc_cqp_post_sq(cqp); @@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); - set_64bit_val(wqe, 24, header); + i40iw_insert_wqe_hdr(wqe, header); i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, I40IW_CQP_WQE_SIZE * 8); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 62f1f45b8737..e52dbbb4165e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * if (!iwhdl) return NOTIFY_DONE; iwdev = &iwhdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; p = (__be32 *)neigh->primary_key; i40iw_copy_ip_ntohl(local_ipaddr, p); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 1aa411034a27..62be0a41ad0b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp, attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + attr->port_num = 1; init_attr->event_handler = iwqp->ibqp.event_handler; init_attr->qp_context = iwqp->ibqp.qp_context; init_attr->send_cq = iwqp->ibqp.send_cq; init_attr->recv_cq = iwqp->ibqp.recv_cq; init_attr->srq = iwqp->ibqp.srq; init_attr->cap = attr->cap; + init_attr->port_num = 1; return 0; } @@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; iwqp->last_aeq = I40IW_AE_RESET_SENT; spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); } + } else { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + } + spin_unlock_irqrestore(&iwqp->lock, flags); } } return 0; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 2747abde2ea8..b6b33d99b0b4 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -150,8 +150,8 @@ static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) /* VF or PF -- proxy SQP */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { - if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || - qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { + if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || + qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { proxy_sqp = 1; break; } @@ -178,7 +178,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) /* VF or PF -- proxy QP0 */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { - if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { + if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { proxy_qp0 = 1; break; } @@ -632,8 +632,8 @@ static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn) { int i; for (i = 0; i < dev->caps.num_ports; i++) { - if (qpn == dev->caps.qp0_proxy[i]) - return !!dev->caps.qp0_qkey[i]; + if (qpn == dev->caps.spec_qps[i].qp0_proxy) + return !!dev->caps.spec_qps[i].qp0_qkey; } return 0; } @@ -1521,9 +1521,9 @@ static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) } /* PF or VF -- creating proxies */ if (attr->qp_type == IB_QPT_SMI) - return dev->dev->caps.qp0_proxy[attr->port_num - 1]; + return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy; else - return dev->dev->caps.qp1_proxy[attr->port_num - 1]; + return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy; } static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, @@ -2880,9 +2880,9 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) { int i; for (i = 0; i < dev->caps.num_ports; i++) { - if (qpn == dev->caps.qp0_proxy[i] || - qpn == dev->caps.qp0_tunnel[i]) { - *qkey = dev->caps.qp0_qkey[i]; + if (qpn == dev->caps.spec_qps[i].qp0_proxy || + qpn == dev->caps.spec_qps[i].qp0_tunnel) { + *qkey = dev->caps.spec_qps[i].qp0_qkey; return 0; } } @@ -2943,7 +2943,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); else sqp->ud_header.bth.destination_qpn = - cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); + cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); if (mlx4_is_master(mdev->dev)) { @@ -3403,9 +3403,9 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); if (qpt == MLX4_IB_QPT_PROXY_GSI) - dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); + dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel); else - dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]); + dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel); /* Use QKEY from the QP context, which is set by master */ dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); } diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 0ba5ba7540c8..e219093d2764 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[10]; + char buff[11]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ab3c562d5ba7..552f7bd4ecc3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (MLX5_CAP_GEN(mdev, tag_matching)) { - props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; - props->xrq_caps.max_num_tags = + props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; + props->tm_caps.max_num_tags = (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; - props->xrq_caps.flags = IB_TM_CAP_RC; - props->xrq_caps.max_ops = + props->tm_caps.flags = IB_TM_CAP_RC; + props->tm_caps.max_ops = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); - props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; + props->tm_caps.max_sge = MLX5_TM_MAX_SGE; } if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { @@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg) return -ENOMEM; + dev->delay_drop.dbg = dbg; + dbg->dir_debugfs = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); if (!dbg->dir_debugfs) - return -ENOMEM; + goto out_debugfs; dbg->events_cnt_debugfs = debugfs_create_atomic_t("num_timeout_events", 0400, @@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg->timeout_debugfs) goto out_debugfs; - dev->delay_drop.dbg = dbg; - return 0; out_debugfs: @@ -4174,9 +4174,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) err_uar_page: mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); -err_cnt: - mlx5_ib_cleanup_cong_debugfs(dev); err_cong: + mlx5_ib_cleanup_cong_debugfs(dev); +err_cnt: if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) mlx5_ib_dealloc_counters(dev); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 914f212e7ef6..f3dbd75a0a96 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, { unsigned long tmp; unsigned long m; - int i, k; - u64 base = 0; - int p = 0; - int skip; - int mask; - u64 len; - u64 pfn; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; struct scatterlist *sg; int entry; unsigned long page_shift = umem->page_shift; @@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, m = find_first_bit(&tmp, BITS_PER_LONG); if (max_page_shift) m = min_t(unsigned long, max_page_shift - page_shift, m); - skip = 1 << m; - mask = skip - 1; - i = 0; + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> page_shift; pfn = sg_dma_address(sg) >> page_shift; - for (k = 0; k < len; k++) { - if (!(i & mask)) { - tmp = (unsigned long)pfn; - m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } else { - if (base + p != pfn) { - tmp = (unsigned long)p; - m = find_first_bit(&tmp, BITS_PER_LONG); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } - } - p++; - i++; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; } + + p += len; + i += len; } if (i) { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 0e2789d9bb4d..37bbc543847a 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -47,7 +47,8 @@ enum { #define MLX5_UMR_ALIGN 2048 -static int clean_mr(struct mlx5_ib_mr *mr); +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); @@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, update_xlt_flags); + if (err) { - mlx5_ib_dereg_mr(&mr->ibmr); + dereg_mr(dev, mr); return ERR_PTR(err); } } @@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) } } -static int clean_mr(struct mlx5_ib_mr *mr) +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); int allocated_from_cache = mr->allocated_from_cache; int err; @@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr) return 0; } -int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(ibmr->device); - struct mlx5_ib_mr *mr = to_mmr(ibmr); int npages = mr->npages; struct ib_umem *umem = mr->umem; @@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) } #endif - clean_mr(mr); + clean_mr(dev, mr); if (umem) { ib_umem_release(umem); @@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) return 0; } +int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + struct mlx5_ib_mr *mr = to_mmr(ibmr); + + return dereg_mr(dev, mr); +} + struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f0dc5f4aa177..442b9bdc0f03 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->ibmr.iova); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, - mr->ibmr.length); + lower_32_bits(mr->ibmr.length)); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); set_wqe_32bit_value(wqe->wqe_words, @@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->npages * 8); nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " - "length: %d, rkey: %0x, pgl_paddr: %llx, " + "length: %lld, rkey: %0x, pgl_paddr: %llx, " "page_list_len: %u, wqe_misc: %x\n", (unsigned long long) mr->ibmr.iova, mr->ibmr.length, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index dcb5942f9fb5..65b166cc7437 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status) case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: err_num = -EAGAIN; break; + default: + err_num = -EFAULT; } + break; default: err_num = -EFAULT; } diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index b2bb42e2805d..254083b524bd 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -387,7 +387,7 @@ struct qedr_qp { u8 wqe_size; u8 smac[ETH_ALEN]; - u16 vlan_id; + u16 vlan; int rc; } *rqe_wr_id; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 4689e802b332..ad8965397cf7 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt, qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? -EINVAL : 0; - qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan; /* note: length stands for data length i.e. GRH is excluded */ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = data->length.data_length; @@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; + u16 vlan_id; int i = 0; spin_lock_irqsave(&cq->cq_lock, flags); @@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); wc[i].wc_flags |= IB_WC_WITH_SMAC; - if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { + + vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & + VLAN_VID_MASK; + if (vlan_id) { wc[i].wc_flags |= IB_WC_WITH_VLAN; - wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; + wc[i].vlan_id = vlan_id; + wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } qedr_inc_sw_cons(&qp->rq); diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index c49db7c33979..4381c0a9a873 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -227,7 +227,7 @@ static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, vpn_last = vpn_start + npages - 1; spin_lock(&pd->lock); - usnic_uiom_remove_interval(&pd->rb_root, vpn_start, + usnic_uiom_remove_interval(&pd->root, vpn_start, vpn_last, &rm_intervals); usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); @@ -379,7 +379,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0, IOMMU_WRITE, - &pd->rb_root, + &pd->root, &sorted_diff_intervals); if (err) { usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", @@ -395,7 +395,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, } - err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, + err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0); if (err) { usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 45ca7c1613a7..431efe4143f4 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -55,7 +55,7 @@ struct usnic_uiom_dev { struct usnic_uiom_pd { struct iommu_domain *domain; spinlock_t lock; - struct rb_root rb_root; + struct rb_root_cached root; struct list_head devs; int dev_cnt; }; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 42b4b4c4e452..d399523206c7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -100,9 +100,9 @@ static int interval_cmp(void *priv, struct list_head *a, struct list_head *b) } static void -find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, - unsigned long last, - struct list_head *list) +find_intervals_intersection_sorted(struct rb_root_cached *root, + unsigned long start, unsigned long last, + struct list_head *list) { struct usnic_uiom_interval_node *node; @@ -118,7 +118,7 @@ find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, - struct rb_root *root, + struct rb_root_cached *root, struct list_head *diff_set) { struct usnic_uiom_interval_node *interval, *tmp; @@ -175,7 +175,7 @@ void usnic_uiom_put_interval_set(struct list_head *intervals) kfree(interval); } -int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start, +int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags) { struct usnic_uiom_interval_node *interval, *tmp; @@ -246,8 +246,9 @@ int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start, return err; } -void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start, - unsigned long last, struct list_head *removed) +void usnic_uiom_remove_interval(struct rb_root_cached *root, + unsigned long start, unsigned long last, + struct list_head *removed) { struct usnic_uiom_interval_node *interval; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index c0b0b876ab90..1d7fc3226bca 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -48,12 +48,12 @@ struct usnic_uiom_interval_node { extern void usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node, - struct rb_root *root); + struct rb_root_cached *root); extern void usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node, - struct rb_root *root); + struct rb_root_cached *root); extern struct usnic_uiom_interval_node * -usnic_uiom_interval_tree_iter_first(struct rb_root *root, +usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, unsigned long last); extern struct usnic_uiom_interval_node * @@ -63,7 +63,7 @@ usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node, * Inserts {start...last} into {root}. If there are overlaps, * nodes will be broken up and merged */ -int usnic_uiom_insert_interval(struct rb_root *root, +int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags); /* @@ -71,7 +71,7 @@ int usnic_uiom_insert_interval(struct rb_root *root, * 'removed.' The caller is responsibile for freeing memory of nodes in * 'removed.' */ -void usnic_uiom_remove_interval(struct rb_root *root, +void usnic_uiom_remove_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *removed); /* @@ -81,7 +81,7 @@ void usnic_uiom_remove_interval(struct rb_root *root, int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, - struct rb_root *root, + struct rb_root_cached *root, struct list_head *diff_set); /* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */ void usnic_uiom_put_interval_set(struct list_head *intervals); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 663a0c301c43..984aa3484928 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib( return (enum ib_wc_status)status; } -static inline int pvrdma_wc_opcode_to_ib(int opcode) +static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) { - return opcode; + switch (opcode) { + case PVRDMA_WC_SEND: + return IB_WC_SEND; + case PVRDMA_WC_RDMA_WRITE: + return IB_WC_RDMA_WRITE; + case PVRDMA_WC_RDMA_READ: + return IB_WC_RDMA_READ; + case PVRDMA_WC_COMP_SWAP: + return IB_WC_COMP_SWAP; + case PVRDMA_WC_FETCH_ADD: + return IB_WC_FETCH_ADD; + case PVRDMA_WC_LOCAL_INV: + return IB_WC_LOCAL_INV; + case PVRDMA_WC_FAST_REG_MR: + return IB_WC_REG_MR; + case PVRDMA_WC_MASKED_COMP_SWAP: + return IB_WC_MASKED_COMP_SWAP; + case PVRDMA_WC_MASKED_FETCH_ADD: + return IB_WC_MASKED_FETCH_ADD; + case PVRDMA_WC_RECV: + return IB_WC_RECV; + case PVRDMA_WC_RECV_RDMA_WITH_IMM: + return IB_WC_RECV_RDMA_WITH_IMM; + default: + return IB_WC_SEND; + } } static inline int pvrdma_wc_flags_to_ib(int flags) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 14b62f7472b4..7774654c2ccb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_neigh *neigh; - if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) - ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, + * so don't make waves. + */ + if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || + wc->status == IB_WC_RETRY_EXC_ERR) + ipoib_dbg(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); else - ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + ipoib_warn(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); spin_lock_irqsave(&priv->lock, flags); neigh = tx->neigh; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2e075377242e..6cd61638b441 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; - - /* - * Update the broadcast address in the priv->broadcast object, - * in case it already exists, otherwise no one will do that. - */ - if (priv->broadcast) { - spin_lock_irq(&priv->lock); - memcpy(priv->broadcast->mcmember.mgid.raw, - priv->dev->broadcast + 4, - sizeof(union ib_gid)); - spin_unlock_irq(&priv->lock); - } - return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bac95b509a9b..dcc77014018d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format, { struct ipoib_dev_priv *priv; struct ib_port_attr attr; + struct rdma_netdev *rn; int result = -ENOMEM; priv = ipoib_intf_alloc(hca, port, format); @@ -2279,7 +2280,8 @@ static struct net_device *ipoib_add_port(const char *format, ipoib_dev_cleanup(priv->dev); device_init_failed: - free_netdev(priv->dev); + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); alloc_mem_failed: @@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { - struct rdma_netdev *rn = netdev_priv(priv->dev); + struct rdma_netdev *parent_rn = netdev_priv(priv->dev); ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); @@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) unregister_netdev(priv->dev); mutex_unlock(&priv->sysfs_mutex); - rn->free_rdma_netdev(priv->dev); + parent_rn->free_rdma_netdev(priv->dev); - list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { + struct rdma_netdev *child_rn; + + child_rn = netdev_priv(cpriv->dev); + child_rn->free_rdma_netdev(cpriv->dev); kfree(cpriv); + } kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 9927cd6b7082..55a9b71ed05a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); - if (!priv) { + if (!down_write_trylock(&ppriv->vlan_rwsem)) { rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - return -ENOMEM; + return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); + if (!priv) { + result = -ENOMEM; + goto out; + } /* * First ensure this isn't a duplicate. We check the parent device and @@ -175,8 +178,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - if (result) { - free_netdev(priv->dev); + if (result && priv) { + struct rdma_netdev *rn; + + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); } @@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + if (!down_write_trylock(&ppriv->vlan_rwsem)) { + rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); + return restart_syscall(); + } + list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) mutex_unlock(&ppriv->sysfs_mutex); if (dev) { - free_netdev(dev); + struct rdma_netdev *rn; + + rn = netdev_priv(dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9c3e9ab53a41..322209d5ff58 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) { int i; - iser_err("page vec npages %d data length %d\n", + iser_err("page vec npages %d data length %lld\n", page_vec->npages, page_vec->fake_mr.length); for (i = 0; i < page_vec->npages; i++) iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 8f2042432c85..66a46c84e28f 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) EXPORT_SYMBOL_GPL(input_ff_erase); /* - * flush_effects - erase all effects owned by a file handle + * input_ff_flush - erase all effects owned by a file handle + * @dev: input device to erase effect from + * @file: purported owner of the effects + * + * This function erases all force-feedback effects associated with + * the given owner from specified device. Note that @file may be %NULL, + * in which case all effects will be erased. */ -static int flush_effects(struct input_dev *dev, struct file *file) +int input_ff_flush(struct input_dev *dev, struct file *file) { struct ff_device *ff = dev->ff; int i; @@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file) return 0; } +EXPORT_SYMBOL_GPL(input_ff_flush); /** * input_ff_event() - generic handler for force-feedback events @@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) mutex_init(&ff->mutex); dev->ff = ff; - dev->flush = flush_effects; + dev->flush = input_ff_flush; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); diff --git a/drivers/input/input.c b/drivers/input/input.c index 7e6842bd525c..d268fdc23c64 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1398,7 +1398,7 @@ static struct attribute *input_dev_attrs[] = { NULL }; -static struct attribute_group input_dev_attr_group = { +static const struct attribute_group input_dev_attr_group = { .attrs = input_dev_attrs, }; @@ -1425,7 +1425,7 @@ static struct attribute *input_dev_id_attrs[] = { NULL }; -static struct attribute_group input_dev_id_attr_group = { +static const struct attribute_group input_dev_id_attr_group = { .name = "id", .attrs = input_dev_id_attrs, }; @@ -1495,7 +1495,7 @@ static struct attribute *input_dev_caps_attrs[] = { NULL }; -static struct attribute_group input_dev_caps_attr_group = { +static const struct attribute_group input_dev_caps_attr_group = { .name = "capabilities", .attrs = input_dev_caps_attrs, }; diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c index d09cefa37931..15a71acb6997 100644 --- a/drivers/input/joystick/adi.c +++ b/drivers/input/joystick/adi.c @@ -313,7 +313,7 @@ static void adi_close(struct input_dev *dev) static void adi_init_digital(struct gameport *gameport) { - int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; + static const int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; int i; for (i = 0; seq[i]; i++) { diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c index 46d5041d2d9d..154e827b559b 100644 --- a/drivers/input/joystick/iforce/iforce-serio.c +++ b/drivers/input/joystick/iforce/iforce-serio.c @@ -164,7 +164,7 @@ static void iforce_serio_disconnect(struct serio *serio) kfree(iforce); } -static struct serio_device_id iforce_serio_ids[] = { +static const struct serio_device_id iforce_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_IFORCE, diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index db64adfbe1af..e8724f1a4a25 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -209,7 +209,7 @@ static void iforce_usb_disconnect(struct usb_interface *intf) kfree(iforce); } -static struct usb_device_id iforce_usb_ids [] = { +static const struct usb_device_id iforce_usb_ids[] = { { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */ diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c index c5358ba1f571..a9d0e3edca94 100644 --- a/drivers/input/joystick/magellan.c +++ b/drivers/input/joystick/magellan.c @@ -198,7 +198,7 @@ static int magellan_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id magellan_serio_ids[] = { +static const struct serio_device_id magellan_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_MAGELLAN, diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c index f4445a4e8d6a..e9712a1b7cad 100644 --- a/drivers/input/joystick/spaceball.c +++ b/drivers/input/joystick/spaceball.c @@ -272,7 +272,7 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id spaceball_serio_ids[] = { +static const struct serio_device_id spaceball_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SPACEBALL, diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c index f2667820e8c5..05da0ed514e2 100644 --- a/drivers/input/joystick/spaceorb.c +++ b/drivers/input/joystick/spaceorb.c @@ -213,7 +213,7 @@ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id spaceorb_serio_ids[] = { +static const struct serio_device_id spaceorb_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SPACEORB, diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c index 099c6d7b5e08..cb10e7b097ae 100644 --- a/drivers/input/joystick/stinger.c +++ b/drivers/input/joystick/stinger.c @@ -184,7 +184,7 @@ static int stinger_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id stinger_serio_ids[] = { +static const struct serio_device_id stinger_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_STINGER, diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c index 7f7e5ab3f9e3..e60cb004cb8c 100644 --- a/drivers/input/joystick/twidjoy.c +++ b/drivers/input/joystick/twidjoy.c @@ -233,7 +233,7 @@ static int twidjoy_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id twidjoy_serio_ids[] = { +static const struct serio_device_id twidjoy_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TWIDJOY, diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c index e13a9144a25d..ef5391ba4470 100644 --- a/drivers/input/joystick/warrior.c +++ b/drivers/input/joystick/warrior.c @@ -193,7 +193,7 @@ static int warrior_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id warrior_serio_ids[] = { +static const struct serio_device_id warrior_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_WARRIOR, diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ca0e19ae7a90..d86e59515b9c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -408,7 +408,7 @@ static const signed short xpad_abs_triggers[] = { #define XPAD_XBOXONE_VENDOR(vend) \ { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } -static struct usb_device_id xpad_table[] = { +static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ @@ -1764,10 +1764,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_endpoint_descriptor *ep = &intf->cur_altsetting->endpoint[i].desc; - if (usb_endpoint_dir_in(ep)) - ep_irq_in = ep; - else - ep_irq_out = ep; + if (usb_endpoint_xfer_int(ep)) { + if (usb_endpoint_dir_in(ep)) + ep_irq_in = ep; + else + ep_irq_out = ep; + } } if (!ep_irq_in || !ep_irq_out) { diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c index 4a8258bf13fd..5c6d5de743f1 100644 --- a/drivers/input/joystick/zhenhua.c +++ b/drivers/input/joystick/zhenhua.c @@ -192,7 +192,7 @@ static int zhenhua_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id zhenhua_serio_ids[] = { +static const struct serio_device_id zhenhua_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_ZHENHUA, diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index ec876b5b1382..7e75835e220f 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -1270,7 +1270,7 @@ static int atkbd_reconnect(struct serio *serio) return retval; } -static struct serio_device_id atkbd_serio_ids[] = { +static const struct serio_device_id atkbd_serio_ids[] = { { .type = SERIO_8042, .proto = SERIO_ANY, diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 0b10d4b356db..e9f0ebf3267a 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -353,7 +353,7 @@ static struct attribute *gpio_keys_attrs[] = { NULL, }; -static struct attribute_group gpio_keys_attr_group = { +static const struct attribute_group gpio_keys_attr_group = { .attrs = gpio_keys_attrs, }; diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index 5b152f25a8e1..bb29a7c9a1c0 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -559,7 +559,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) return error; } -static struct serio_device_id hil_dev_ids[] = { +static const struct serio_device_id hil_dev_ids[] = { { .type = SERIO_HIL_MLC, .proto = SERIO_HIL, diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c index 9fcd9f1d5dc8..471d53815c6d 100644 --- a/drivers/input/keyboard/lkkbd.c +++ b/drivers/input/keyboard/lkkbd.c @@ -707,7 +707,7 @@ static void lkkbd_disconnect(struct serio *serio) kfree(lk); } -static struct serio_device_id lkkbd_serio_ids[] = { +static const struct serio_device_id lkkbd_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_LKKBD, diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c index 20f044377990..fb9b8e23ab93 100644 --- a/drivers/input/keyboard/newtonkbd.c +++ b/drivers/input/keyboard/newtonkbd.c @@ -142,7 +142,7 @@ static void nkbd_disconnect(struct serio *serio) kfree(nkbd); } -static struct serio_device_id nkbd_serio_ids[] = { +static const struct serio_device_id nkbd_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_NEWTON, diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 3841fa30db33..d0bdaeadf86d 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -644,9 +644,12 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad) static int pxa27x_keypad_open(struct input_dev *dev) { struct pxa27x_keypad *keypad = input_get_drvdata(dev); - + int ret; /* Enable unit clock */ - clk_prepare_enable(keypad->clk); + ret = clk_prepare_enable(keypad->clk); + if (ret) + return ret; + pxa27x_keypad_config(keypad); return 0; @@ -683,6 +686,7 @@ static int pxa27x_keypad_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; + int ret = 0; /* * If the keypad is used as wake up source, the clock is not turned @@ -695,14 +699,15 @@ static int pxa27x_keypad_resume(struct device *dev) if (input_dev->users) { /* Enable unit clock */ - clk_prepare_enable(keypad->clk); - pxa27x_keypad_config(keypad); + ret = clk_prepare_enable(keypad->clk); + if (!ret) + pxa27x_keypad_config(keypad); } mutex_unlock(&input_dev->mutex); } - return 0; + return ret; } #endif diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c index a6e0d565e306..8b6de9a692dc 100644 --- a/drivers/input/keyboard/stowaway.c +++ b/drivers/input/keyboard/stowaway.c @@ -146,7 +146,7 @@ static void skbd_disconnect(struct serio *serio) kfree(skbd); } -static struct serio_device_id skbd_serio_ids[] = { +static const struct serio_device_id skbd_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_STOWAWAY, diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index dc6bb9d5b4f0..c95707ea2656 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c @@ -339,7 +339,7 @@ static void sunkbd_disconnect(struct serio *serio) kfree(sunkbd); } -static struct serio_device_id sunkbd_serio_ids[] = { +static const struct serio_device_id sunkbd_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SUNKBD, diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 0c07e1023a46..edc1385ca00b 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -370,8 +370,11 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) { unsigned int debounce_cnt; u32 val = 0; + int ret; - clk_prepare_enable(kbc->clk); + ret = clk_prepare_enable(kbc->clk); + if (ret) + return ret; /* Reset the KBC controller to clear all previous status.*/ reset_control_assert(kbc->rst); diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index 39e72b3219d8..f9f98ef1d98e 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c index 7c2325bd7408..8f64b9ded8d0 100644 --- a/drivers/input/keyboard/xtkbd.c +++ b/drivers/input/keyboard/xtkbd.c @@ -145,7 +145,7 @@ static void xtkbd_disconnect(struct serio *serio) kfree(xtkbd); } -static struct serio_device_id xtkbd_serio_ids[] = { +static const struct serio_device_id xtkbd_serio_ids[] = { { .type = SERIO_XT, .proto = SERIO_ANY, diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 3872488c3fd7..9f082a388388 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -581,6 +581,29 @@ config INPUT_PWM_BEEPER To compile this driver as a module, choose M here: the module will be called pwm-beeper. +config INPUT_PWM_VIBRA + tristate "PWM vibrator support" + depends on PWM + select INPUT_FF_MEMLESS + help + Say Y here to get support for PWM based vibrator devices. + + If unsure, say N. + + To compile this driver as a module, choose M here: the module will be + called pwm-vibra. + +config INPUT_RK805_PWRKEY + tristate "Rockchip RK805 PMIC power key support" + depends on MFD_RK808 + help + Select this option to enable power key driver for RK805. + + If unsure, say N. + + To compile this driver as a module, choose M here: the module will be + called rk805_pwrkey. + config INPUT_GPIO_ROTARY_ENCODER tristate "Rotary encoders connected to GPIO pins" depends on GPIOLIB || COMPILE_TEST diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index b923a9828c88..03fd4262ada9 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -59,11 +59,13 @@ obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o obj-$(CONFIG_INPUT_POWERMATE) += powermate.o obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o +obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o +obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 1c5914cae853..ebf4448b31b9 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -110,7 +110,7 @@ static const struct kernel_param_ops param_ops_mode_mask = { module_param(mode_mask, mode_mask, 0644); MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); -static struct usb_device_id ati_remote2_id_table[] = { +static const struct usb_device_id ati_remote2_id_table[] = { { USB_DEVICE(0x0471, 0x0602) }, /* ATI Remote Wonder II */ { } }; diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index cfeb0e943de6..6cee5adc3b5c 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -29,9 +29,17 @@ #define AXP20X_PEK_STARTUP_MASK (0xc0) #define AXP20X_PEK_SHUTDOWN_MASK (0x03) +struct axp20x_info { + const struct axp20x_time *startup_time; + unsigned int startup_mask; + const struct axp20x_time *shutdown_time; + unsigned int shutdown_mask; +}; + struct axp20x_pek { struct axp20x_dev *axp20x; struct input_dev *input; + struct axp20x_info *info; int irq_dbr; int irq_dbf; }; @@ -48,6 +56,13 @@ static const struct axp20x_time startup_time[] = { { .time = 2000, .idx = 3 }, }; +static const struct axp20x_time axp221_startup_time[] = { + { .time = 128, .idx = 0 }, + { .time = 1000, .idx = 1 }, + { .time = 2000, .idx = 2 }, + { .time = 3000, .idx = 3 }, +}; + static const struct axp20x_time shutdown_time[] = { { .time = 4000, .idx = 0 }, { .time = 6000, .idx = 1 }, @@ -55,31 +70,25 @@ static const struct axp20x_time shutdown_time[] = { { .time = 10000, .idx = 3 }, }; -struct axp20x_pek_ext_attr { - const struct axp20x_time *p_time; - unsigned int mask; +static const struct axp20x_info axp20x_info = { + .startup_time = startup_time, + .startup_mask = AXP20X_PEK_STARTUP_MASK, + .shutdown_time = shutdown_time, + .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, }; -static struct axp20x_pek_ext_attr axp20x_pek_startup_ext_attr = { - .p_time = startup_time, - .mask = AXP20X_PEK_STARTUP_MASK, +static const struct axp20x_info axp221_info = { + .startup_time = axp221_startup_time, + .startup_mask = AXP20X_PEK_STARTUP_MASK, + .shutdown_time = shutdown_time, + .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, }; -static struct axp20x_pek_ext_attr axp20x_pek_shutdown_ext_attr = { - .p_time = shutdown_time, - .mask = AXP20X_PEK_SHUTDOWN_MASK, -}; - -static struct axp20x_pek_ext_attr *get_axp_ext_attr(struct device_attribute *attr) -{ - return container_of(attr, struct dev_ext_attribute, attr)->var; -} - -static ssize_t axp20x_show_ext_attr(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t axp20x_show_attr(struct device *dev, + const struct axp20x_time *time, + unsigned int mask, char *buf) { struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); - struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); unsigned int val; int ret, i; @@ -87,22 +96,42 @@ static ssize_t axp20x_show_ext_attr(struct device *dev, if (ret != 0) return ret; - val &= axp20x_ea->mask; - val >>= ffs(axp20x_ea->mask) - 1; + val &= mask; + val >>= ffs(mask) - 1; for (i = 0; i < 4; i++) - if (val == axp20x_ea->p_time[i].idx) - val = axp20x_ea->p_time[i].time; + if (val == time[i].idx) + val = time[i].time; return sprintf(buf, "%u\n", val); } -static ssize_t axp20x_store_ext_attr(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t axp20x_show_attr_startup(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); + + return axp20x_show_attr(dev, axp20x_pek->info->startup_time, + axp20x_pek->info->startup_mask, buf); +} + +static ssize_t axp20x_show_attr_shutdown(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); + + return axp20x_show_attr(dev, axp20x_pek->info->shutdown_time, + axp20x_pek->info->shutdown_mask, buf); +} + +static ssize_t axp20x_store_attr(struct device *dev, + const struct axp20x_time *time, + unsigned int mask, const char *buf, + size_t count) { struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); - struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); char val_str[20]; size_t len; int ret, i; @@ -123,39 +152,52 @@ static ssize_t axp20x_store_ext_attr(struct device *dev, for (i = 3; i >= 0; i--) { unsigned int err; - err = abs(axp20x_ea->p_time[i].time - val); + err = abs(time[i].time - val); if (err < best_err) { best_err = err; - idx = axp20x_ea->p_time[i].idx; + idx = time[i].idx; } if (!err) break; } - idx <<= ffs(axp20x_ea->mask) - 1; - ret = regmap_update_bits(axp20x_pek->axp20x->regmap, - AXP20X_PEK_KEY, - axp20x_ea->mask, idx); + idx <<= ffs(mask) - 1; + ret = regmap_update_bits(axp20x_pek->axp20x->regmap, AXP20X_PEK_KEY, + mask, idx); if (ret != 0) return -EINVAL; return count; } -static struct dev_ext_attribute axp20x_dev_attr_startup = { - .attr = __ATTR(startup, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), - .var = &axp20x_pek_startup_ext_attr, -}; +static ssize_t axp20x_store_attr_startup(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); -static struct dev_ext_attribute axp20x_dev_attr_shutdown = { - .attr = __ATTR(shutdown, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), - .var = &axp20x_pek_shutdown_ext_attr, -}; + return axp20x_store_attr(dev, axp20x_pek->info->startup_time, + axp20x_pek->info->startup_mask, buf, count); +} + +static ssize_t axp20x_store_attr_shutdown(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); + + return axp20x_store_attr(dev, axp20x_pek->info->shutdown_time, + axp20x_pek->info->shutdown_mask, buf, count); +} + +DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup); +DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown, + axp20x_store_attr_shutdown); static struct attribute *axp20x_attributes[] = { - &axp20x_dev_attr_startup.attr.attr, - &axp20x_dev_attr_shutdown.attr.attr, + &dev_attr_startup.attr, + &dev_attr_shutdown.attr, NULL, }; @@ -291,8 +333,14 @@ static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, static int axp20x_pek_probe(struct platform_device *pdev) { struct axp20x_pek *axp20x_pek; + const struct platform_device_id *match = platform_get_device_id(pdev); int error; + if (!match) { + dev_err(&pdev->dev, "Failed to get platform_device_id\n"); + return -EINVAL; + } + axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek), GFP_KERNEL); if (!axp20x_pek) @@ -306,6 +354,8 @@ static int axp20x_pek_probe(struct platform_device *pdev) return error; } + axp20x_pek->info = (struct axp20x_info *)match->driver_data; + error = devm_device_add_group(&pdev->dev, &axp20x_attribute_group); if (error) { dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n", @@ -342,8 +392,21 @@ static const struct dev_pm_ops axp20x_pek_pm_ops = { #endif }; +static const struct platform_device_id axp_pek_id_match[] = { + { + .name = "axp20x-pek", + .driver_data = (kernel_ulong_t)&axp20x_info, + }, + { + .name = "axp221-pek", + .driver_data = (kernel_ulong_t)&axp221_info, + }, + { /* sentinel */ } +}; + static struct platform_driver axp20x_pek_driver = { .probe = axp20x_pek_probe, + .id_table = axp_pek_id_match, .driver = { .name = "axp20x-pek", .pm = &axp20x_pek_pm_ops, diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c index bab256ef32b9..c803db64a376 100644 --- a/drivers/input/misc/dm355evm_keys.c +++ b/drivers/input/misc/dm355evm_keys.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index f4e8fbec6a94..6bf82ea8c918 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1261,7 +1261,7 @@ static umode_t ims_pcu_is_attr_visible(struct kobject *kobj, return mode; } -static struct attribute_group ims_pcu_attr_group = { +static const struct attribute_group ims_pcu_attr_group = { .is_visible = ims_pcu_is_attr_visible, .attrs = ims_pcu_attrs, }; @@ -1480,7 +1480,7 @@ static struct attribute *ims_pcu_ofn_attrs[] = { NULL }; -static struct attribute_group ims_pcu_ofn_attr_group = { +static const struct attribute_group ims_pcu_ofn_attr_group = { .name = "ofn", .attrs = ims_pcu_ofn_attrs, }; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index a3fe4a990cc9..77c47d6325fe 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -85,7 +85,7 @@ static const unsigned short keyspan_key_table[] = { }; /* table of devices that work with this driver */ -static struct usb_device_id keyspan_table[] = { +static const struct usb_device_id keyspan_table[] = { { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, { } /* Terminating entry */ }; diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 72b1fc3ab910..56ddba21de84 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -18,25 +18,30 @@ #include #include #include -#include +#include MODULE_AUTHOR("Vojtech Pavlik "); MODULE_DESCRIPTION("PC Speaker beeper driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pcspkr"); -static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) +static int pcspkr_event(struct input_dev *dev, unsigned int type, + unsigned int code, int value) { unsigned int count = 0; unsigned long flags; if (type != EV_SND) - return -1; + return -EINVAL; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + case SND_TONE: + break; + default: + return -EINVAL; } if (value > 20 && value < 32767) diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index 84909a12ff36..5c8c79623c87 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c @@ -432,7 +432,7 @@ static void powermate_disconnect(struct usb_interface *intf) } } -static struct usb_device_id powermate_devices [] = { +static const struct usb_device_id powermate_devices[] = { { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) }, { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) }, { USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) }, diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c new file mode 100644 index 000000000000..55da191ae550 --- /dev/null +++ b/drivers/input/misc/pwm-vibra.c @@ -0,0 +1,267 @@ +/* + * PWM vibrator driver + * + * Copyright (C) 2017 Collabora Ltd. + * + * Based on previous work from: + * Copyright (C) 2012 Dmitry Torokhov + * + * Based on PWM beeper driver: + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pwm_vibrator { + struct input_dev *input; + struct pwm_device *pwm; + struct pwm_device *pwm_dir; + struct regulator *vcc; + + struct work_struct play_work; + u16 level; + u32 direction_duty_cycle; +}; + +static int pwm_vibrator_start(struct pwm_vibrator *vibrator) +{ + struct device *pdev = vibrator->input->dev.parent; + struct pwm_state state; + int err; + + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + + pwm_get_state(vibrator->pwm, &state); + pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff); + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(pdev, "failed to apply pwm state: %d", err); + return err; + } + + if (vibrator->pwm_dir) { + pwm_get_state(vibrator->pwm_dir, &state); + state.duty_cycle = vibrator->direction_duty_cycle; + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(pdev, "failed to apply dir-pwm state: %d", err); + pwm_disable(vibrator->pwm); + return err; + } + } + + return 0; +} + +static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) +{ + regulator_disable(vibrator->vcc); + + if (vibrator->pwm_dir) + pwm_disable(vibrator->pwm_dir); + pwm_disable(vibrator->pwm); +} + +static void pwm_vibrator_play_work(struct work_struct *work) +{ + struct pwm_vibrator *vibrator = container_of(work, + struct pwm_vibrator, play_work); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + else + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(dev); + + vibrator->level = effect->u.rumble.strong_magnitude; + if (!vibrator->level) + vibrator->level = effect->u.rumble.weak_magnitude; + + schedule_work(&vibrator->play_work); + + return 0; +} + +static void pwm_vibrator_close(struct input_dev *input) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(input); + + cancel_work_sync(&vibrator->play_work); + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_probe(struct platform_device *pdev) +{ + struct pwm_vibrator *vibrator; + struct pwm_state state; + int err; + + vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL); + if (!vibrator) + return -ENOMEM; + + vibrator->input = devm_input_allocate_device(&pdev->dev); + if (!vibrator->input) + return -ENOMEM; + + vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc"); + err = PTR_ERR_OR_ZERO(vibrator->vcc); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request regulator: %d", + err); + return err; + } + + vibrator->pwm = devm_pwm_get(&pdev->dev, "enable"); + err = PTR_ERR_OR_ZERO(vibrator->pwm); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request main pwm: %d", + err); + return err; + } + + INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work); + + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->pwm_dir = devm_pwm_get(&pdev->dev, "direction"); + err = PTR_ERR_OR_ZERO(vibrator->pwm_dir); + switch (err) { + case 0: + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm_dir, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->direction_duty_cycle = + pwm_get_period(vibrator->pwm_dir) / 2; + device_property_read_u32(&pdev->dev, "direction-duty-cycle-ns", + &vibrator->direction_duty_cycle); + break; + + case -ENODATA: + /* Direction PWM is optional */ + vibrator->pwm_dir = NULL; + break; + + default: + dev_err(&pdev->dev, "Failed to request direction pwm: %d", err); + /* Fall through */ + + case -EPROBE_DEFER: + return err; + } + + vibrator->input->name = "pwm-vibrator"; + vibrator->input->id.bustype = BUS_HOST; + vibrator->input->dev.parent = &pdev->dev; + vibrator->input->close = pwm_vibrator_close; + + input_set_drvdata(vibrator->input, vibrator); + input_set_capability(vibrator->input, EV_FF, FF_RUMBLE); + + err = input_ff_create_memless(vibrator->input, NULL, + pwm_vibrator_play_effect); + if (err) { + dev_err(&pdev->dev, "Couldn't create FF dev: %d", err); + return err; + } + + err = input_register_device(vibrator->input); + if (err) { + dev_err(&pdev->dev, "Couldn't register input dev: %d", err); + return err; + } + + platform_set_drvdata(pdev, vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_suspend(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + cancel_work_sync(&vibrator->play_work); + if (vibrator->level) + pwm_vibrator_stop(vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_resume(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(pwm_vibrator_pm_ops, + pwm_vibrator_suspend, pwm_vibrator_resume); + +#ifdef CONFIG_OF +static const struct of_device_id pwm_vibra_dt_match_table[] = { + { .compatible = "pwm-vibrator" }, + {}, +}; +MODULE_DEVICE_TABLE(of, pwm_vibra_dt_match_table); +#endif + +static struct platform_driver pwm_vibrator_driver = { + .probe = pwm_vibrator_probe, + .driver = { + .name = "pwm-vibrator", + .pm = &pwm_vibrator_pm_ops, + .of_match_table = of_match_ptr(pwm_vibra_dt_match_table), + }, +}; +module_platform_driver(pwm_vibrator_driver); + +MODULE_AUTHOR("Sebastian Reichel "); +MODULE_DESCRIPTION("PWM vibrator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pwm-vibrator"); diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c new file mode 100644 index 000000000000..921003963a53 --- /dev/null +++ b/drivers/input/misc/rk805-pwrkey.c @@ -0,0 +1,111 @@ +/* + * Rockchip RK805 PMIC Power Key driver + * + * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Joseph Chen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr) +{ + struct input_dev *pwr = _pwr; + + input_report_key(pwr, KEY_POWER, 1); + input_sync(pwr); + + return IRQ_HANDLED; +} + +static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr) +{ + struct input_dev *pwr = _pwr; + + input_report_key(pwr, KEY_POWER, 0); + input_sync(pwr); + + return IRQ_HANDLED; +} + +static int rk805_pwrkey_probe(struct platform_device *pdev) +{ + struct input_dev *pwr; + int fall_irq, rise_irq; + int err; + + pwr = devm_input_allocate_device(&pdev->dev); + if (!pwr) { + dev_err(&pdev->dev, "Can't allocate power button\n"); + return -ENOMEM; + } + + pwr->name = "rk805 pwrkey"; + pwr->phys = "rk805_pwrkey/input0"; + pwr->id.bustype = BUS_HOST; + input_set_capability(pwr, EV_KEY, KEY_POWER); + + fall_irq = platform_get_irq(pdev, 0); + if (fall_irq < 0) { + dev_err(&pdev->dev, "Can't get fall irq: %d\n", fall_irq); + return fall_irq; + } + + rise_irq = platform_get_irq(pdev, 1); + if (rise_irq < 0) { + dev_err(&pdev->dev, "Can't get rise irq: %d\n", rise_irq); + return rise_irq; + } + + err = devm_request_any_context_irq(&pwr->dev, fall_irq, + pwrkey_fall_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "rk805_pwrkey_fall", pwr); + if (err < 0) { + dev_err(&pdev->dev, "Can't register fall irq: %d\n", err); + return err; + } + + err = devm_request_any_context_irq(&pwr->dev, rise_irq, + pwrkey_rise_irq, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "rk805_pwrkey_rise", pwr); + if (err < 0) { + dev_err(&pdev->dev, "Can't register rise irq: %d\n", err); + return err; + } + + err = input_register_device(pwr); + if (err) { + dev_err(&pdev->dev, "Can't register power button: %d\n", err); + return err; + } + + platform_set_drvdata(pdev, pwr); + device_init_wakeup(&pdev->dev, true); + + return 0; +} + +static struct platform_driver rk805_pwrkey_driver = { + .probe = rk805_pwrkey_probe, + .driver = { + .name = "rk805-pwrkey", + }, +}; +module_platform_driver(rk805_pwrkey_driver); + +MODULE_AUTHOR("Joseph Chen "); +MODULE_DESCRIPTION("RK805 PMIC Power Key driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index 1c13005b228f..b307cca17022 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #define PWR_PWRON_IRQ (1 << 0) diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index caa5a62c42fb..6c51d404874b 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 022be0e22eba..443151de90c6 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev, uinput_request_alloc_id(udev, request)); } -static void uinput_request_done(struct uinput_device *udev, - struct uinput_request *request) +static void uinput_request_release_slot(struct uinput_device *udev, + unsigned int id) { /* Mark slot as available */ - udev->requests[request->id] = NULL; - wake_up(&udev->requests_waitq); + spin_lock(&udev->requests_lock); + udev->requests[id] = NULL; + spin_unlock(&udev->requests_lock); - complete(&request->done); + wake_up(&udev->requests_waitq); } static int uinput_request_send(struct uinput_device *udev, @@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev, static int uinput_request_submit(struct uinput_device *udev, struct uinput_request *request) { - int error; + int retval; - error = uinput_request_reserve_slot(udev, request); - if (error) - return error; + retval = uinput_request_reserve_slot(udev, request); + if (retval) + return retval; - error = uinput_request_send(udev, request); - if (error) { - uinput_request_done(udev, request); - return error; - } + retval = uinput_request_send(udev, request); + if (retval) + goto out; wait_for_completion(&request->done); - return request->retval; + retval = request->retval; + + out: + uinput_request_release_slot(udev, request->id); + return retval; } /* @@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev) request = udev->requests[i]; if (request) { request->retval = -ENODEV; - uinput_request_done(udev, request); + complete(&request->done); } } @@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) return uinput_request_submit(udev, &request); } +static int uinput_dev_flush(struct input_dev *dev, struct file *file) +{ + /* + * If we are called with file == NULL that means we are tearing + * down the device, and therefore we can not handle FF erase + * requests: either we are handling UI_DEV_DESTROY (and holding + * the udev->mutex), or the file descriptor is closed and there is + * nobody on the other side anymore. + */ + return file ? input_ff_flush(dev, file) : 0; +} + static void uinput_destroy_device(struct uinput_device *udev) { const char *name, *phys; @@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev) dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = uinput_dev_set_autocenter; + /* + * The standard input_ff_flush() implementation does + * not quite work for uinput as we can't reasonably + * handle FF requests during device teardown. + */ + dev->flush = uinput_dev_flush; } error = input_register_device(udev->dev); @@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_up.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; case UI_END_FF_ERASE: @@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_erase.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; } diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index fa130e7b734c..6bf56bb5f8d9 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -84,17 +84,20 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info, struct xenkbd_key *key) { struct input_dev *dev; + int value = key->pressed; if (test_bit(key->keycode, info->ptr->keybit)) { dev = info->ptr; } else if (test_bit(key->keycode, info->kbd->keybit)) { dev = info->kbd; + if (key->pressed && test_bit(key->keycode, info->kbd->key)) + value = 2; /* Mark as autorepeat */ } else { pr_warn("unhandled keycode 0x%x\n", key->keycode); return; } - input_report_key(dev, key->keycode, key->pressed); + input_event(dev, EV_KEY, key->keycode, value); input_sync(dev); } diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 6e7ff9561d92..a1e0ff59d2f2 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c @@ -798,7 +798,7 @@ static struct attribute *yld_attributes[] = { NULL }; -static struct attribute_group yld_attr_group = { +static const struct attribute_group yld_attr_group = { .attrs = yld_attributes }; diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index ef234c9b2f2f..81a695d0b4e0 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -125,7 +125,7 @@ static const struct atp_info geyser4_info = { * According to Info.plist Geyser IV is the same as Geyser III.) */ -static struct usb_device_id atp_table[] = { +static const struct usb_device_id atp_table[] = { /* PowerBooks Feb 2005, iBooks G4 */ ATP_DEVICE(0x020e, fountain_info), /* FOUNTAIN ANSI */ ATP_DEVICE(0x020f, fountain_info), /* FOUNTAIN ISO */ diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index b27aa637f877..b64b81599f7e 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c @@ -344,7 +344,7 @@ static int byd_reset_touchpad(struct psmouse *psmouse) u8 param[4]; size_t i; - const struct { + static const struct { u16 command; u8 arg; } seq[] = { diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 61c202436250..599544c1a91c 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -58,7 +58,7 @@ struct elan_transport_ops { int (*get_version)(struct i2c_client *client, bool iap, u8 *version); int (*get_sm_version)(struct i2c_client *client, - u16 *ic_type, u8 *version); + u16 *ic_type, u8 *version, u8 *clickpad); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); int (*get_product_id)(struct i2c_client *client, u16 *id); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index cfbc8ba4c96c..0e761d079dc4 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -95,6 +95,7 @@ struct elan_tp_data { u8 min_baseline; u8 max_baseline; bool baseline_ready; + u8 clickpad; }; static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, @@ -213,7 +214,7 @@ static int elan_query_product(struct elan_tp_data *data) return error; error = data->ops->get_sm_version(data->client, &data->ic_type, - &data->sm_version); + &data->sm_version, &data->clickpad); if (error) return error; @@ -923,6 +924,7 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) } input_report_key(input, BTN_LEFT, tp_info & 0x01); + input_report_key(input, BTN_RIGHT, tp_info & 0x02); input_report_abs(input, ABS_DISTANCE, hover_event != 0); input_mt_report_pointer_emulation(input, true); input_sync(input); @@ -991,7 +993,10 @@ static int elan_setup_input_device(struct elan_tp_data *data) __set_bit(EV_ABS, input->evbit); __set_bit(INPUT_PROP_POINTER, input->propbit); - __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + if (data->clickpad) + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + else + __set_bit(BTN_RIGHT, input->keybit); __set_bit(BTN_LEFT, input->keybit); /* Set up ST parameters */ diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 80172f25974d..e19eb60b3d2f 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -288,7 +288,8 @@ static int elan_i2c_get_version(struct i2c_client *client, } static int elan_i2c_get_sm_version(struct i2c_client *client, - u16 *ic_type, u8 *version) + u16 *ic_type, u8 *version, + u8 *clickpad) { int error; u8 pattern_ver; @@ -317,6 +318,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return error; } *version = val[1]; + *clickpad = val[0] & 0x10; } else { error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val); if (error) { @@ -326,6 +328,15 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, } *version = val[0]; *ic_type = val[1]; + + error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD, + val); + if (error) { + dev_err(&client->dev, "failed to get SM version: %d\n", + error); + return error; + } + *clickpad = val[0] & 0x10; } return 0; @@ -587,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, } /* Wait for F/W to update one page ROM data. */ - msleep(20); + msleep(35); error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); if (error) { diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index df7a57ca7331..29f99529b187 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -166,7 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client, } static int elan_smbus_get_sm_version(struct i2c_client *client, - u16 *ic_type, u8 *version) + u16 *ic_type, u8 *version, + u8 *clickpad) { int error; u8 val[3]; @@ -180,6 +181,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, *version = val[0]; *ic_type = val[1]; + *clickpad = val[0] & 0x10; return 0; } diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 791993215ea3..b84cd978fce2 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -700,7 +700,9 @@ static int elantech_debounce_check_v2(struct psmouse *psmouse) * When we encounter packet that matches this exactly, it means the * hardware is in debounce status. Just ignore the whole packet. */ - const u8 debounce_packet[] = { 0x84, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0x84, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; return !memcmp(packet, debounce_packet, sizeof(debounce_packet)); @@ -741,7 +743,9 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) static int elantech_packet_check_v3(struct psmouse *psmouse) { struct elantech_data *etd = psmouse->private; - const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; /* @@ -1377,7 +1381,7 @@ static struct attribute *elantech_attrs[] = { NULL }; -static struct attribute_group elantech_attr_group = { +static const struct attribute_group elantech_attr_group = { .attrs = elantech_attrs, }; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index f73b47b8c578..6a5649e52eed 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -101,7 +101,7 @@ static struct attribute *psmouse_attributes[] = { NULL }; -static struct attribute_group psmouse_attribute_group = { +static const struct attribute_group psmouse_attribute_group = { .attrs = psmouse_attributes, }; diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c index 6bcc0189c1c9..cb7d15d826d0 100644 --- a/drivers/input/mouse/synaptics_usb.c +++ b/drivers/input/mouse/synaptics_usb.c @@ -525,7 +525,7 @@ static int synusb_reset_resume(struct usb_interface *intf) return synusb_resume(intf); } -static struct usb_device_id synusb_idtable[] = { +static const struct usb_device_id synusb_idtable[] = { { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(CPAD, diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 0e0ff84088fd..2d7f691ec71c 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -15,6 +15,7 @@ #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 +#include #include #include #include @@ -103,7 +104,7 @@ struct mousedev_client { spinlock_t packet_lock; int pos_x, pos_y; - signed char ps2[6]; + u8 ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; @@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev, } client->pos_x += packet->dx; - client->pos_x = client->pos_x < 0 ? - 0 : (client->pos_x >= xres ? xres : client->pos_x); + client->pos_x = clamp_val(client->pos_x, 0, xres); + client->pos_y += packet->dy; - client->pos_y = client->pos_y < 0 ? - 0 : (client->pos_y >= yres ? yres : client->pos_y); + client->pos_y = clamp_val(client->pos_y, 0, yres); p->dx += packet->dx; p->dy += packet->dy; @@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file) return error; } -static inline int mousedev_limit_delta(int delta, int limit) -{ - return delta > limit ? limit : (delta < -limit ? -limit : delta); -} - -static void mousedev_packet(struct mousedev_client *client, - signed char *ps2_data) +static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; + s8 dx, dy, dz; - ps2_data[0] = 0x08 | - ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); - ps2_data[1] = mousedev_limit_delta(p->dx, 127); - ps2_data[2] = mousedev_limit_delta(p->dy, 127); - p->dx -= ps2_data[1]; - p->dy -= ps2_data[2]; + dx = clamp_val(p->dx, -127, 127); + p->dx -= dx; + + dy = clamp_val(p->dy, -127, 127); + p->dy -= dy; + + ps2_data[0] = BIT(3); + ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); + ps2_data[0] |= p->buttons & 0x07; + ps2_data[1] = dx; + ps2_data[2] = dy; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: - ps2_data[3] = mousedev_limit_delta(p->dz, 7); - p->dz -= ps2_data[3]; - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); + dz = clamp_val(p->dz, -7, 7); + p->dz -= dz; + + ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); - ps2_data[3] = mousedev_limit_delta(p->dz, 127); - p->dz -= ps2_data[3]; + dz = clamp_val(p->dz, -127, 127); + p->dz -= dz; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + ps2_data[3] = dz; + client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); p->dz = 0; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + client->bufsiz = 3; break; } @@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; - signed char data[sizeof(client->ps2)]; + u8 data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index 6dca3c0fbb4a..ae966e333a2f 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c @@ -334,7 +334,7 @@ static struct attribute *rmi_f01_attrs[] = { NULL }; -static struct attribute_group rmi_f01_attr_group = { +static const struct attribute_group rmi_f01_attr_group = { .attrs = rmi_f01_attrs, }; diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index b8ee78e0d61f..4cfe9703a8e7 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -516,7 +516,7 @@ static struct attribute *rmi_firmware_attrs[] = { NULL }; -static struct attribute_group rmi_firmware_attr_group = { +static const struct attribute_group rmi_firmware_attr_group = { .attrs = rmi_firmware_attrs, }; diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index c3d05b4d3118..21488c048fa3 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -292,6 +292,17 @@ config SERIO_SUN4I_PS2 To compile this driver as a module, choose M here: the module will be called sun4i-ps2. +config SERIO_GPIO_PS2 + tristate "GPIO PS/2 bit banging driver" + depends on GPIOLIB + help + Say Y here if you want PS/2 bit banging support via GPIO. + + To compile this driver as a module, choose M here: the + module will be called ps2-gpio. + + If you are unsure, say N. + config USERIO tristate "User space serio port driver support" help diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 2374ef9b33d7..767bd9b6e1ed 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile @@ -30,4 +30,5 @@ obj-$(CONFIG_SERIO_APBPS2) += apbps2.o obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o +obj-$(CONFIG_SERIO_GPIO_PS2) += ps2-gpio.o obj-$(CONFIG_USERIO) += userio.o diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index c6606cacb6a7..ff3875cf3da1 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -187,7 +187,7 @@ static int __maybe_unused amba_kmi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume); -static struct amba_id amba_kmi_idtable[] = { +static const struct amba_id amba_kmi_idtable[] = { { .id = 0x00041050, .mask = 0x000fffff, diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index f932a83b4990..6cbbdc6e9687 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -839,6 +839,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P34"), }, }, + { + /* Gigabyte P57 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), + }, + }, { /* Schenker XMG C504 - Elantech touchpad */ .matches = { @@ -927,7 +934,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id * return 0; } -static struct pnp_device_id pnp_kbd_devids[] = { +static const struct pnp_device_id pnp_kbd_devids[] = { { .id = "PNP0300", .driver_data = 0 }, { .id = "PNP0301", .driver_data = 0 }, { .id = "PNP0302", .driver_data = 0 }, @@ -957,7 +964,7 @@ static struct pnp_driver i8042_pnp_kbd_driver = { }, }; -static struct pnp_device_id pnp_aux_devids[] = { +static const struct pnp_device_id pnp_aux_devids[] = { { .id = "AUI0200", .driver_data = 0 }, { .id = "FJC6000", .driver_data = 0 }, { .id = "FJC6001", .driver_data = 0 }, diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c new file mode 100644 index 000000000000..b50e3817f3c4 --- /dev/null +++ b/drivers/input/serio/ps2-gpio.c @@ -0,0 +1,453 @@ +/* + * GPIO based serio bus driver for bit banging the PS/2 protocol + * + * Author: Danilo Krummrich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "ps2-gpio" + +#define PS2_MODE_RX 0 +#define PS2_MODE_TX 1 + +#define PS2_START_BIT 0 +#define PS2_DATA_BIT0 1 +#define PS2_DATA_BIT1 2 +#define PS2_DATA_BIT2 3 +#define PS2_DATA_BIT3 4 +#define PS2_DATA_BIT4 5 +#define PS2_DATA_BIT5 6 +#define PS2_DATA_BIT6 7 +#define PS2_DATA_BIT7 8 +#define PS2_PARITY_BIT 9 +#define PS2_STOP_BIT 10 +#define PS2_TX_TIMEOUT 11 +#define PS2_ACK_BIT 12 + +#define PS2_DEV_RET_ACK 0xfa +#define PS2_DEV_RET_NACK 0xfe + +#define PS2_CMD_RESEND 0xfe + +struct ps2_gpio_data { + struct device *dev; + struct serio *serio; + unsigned char mode; + struct gpio_desc *gpio_clk; + struct gpio_desc *gpio_data; + bool write_enable; + int irq; + unsigned char rx_cnt; + unsigned char rx_byte; + unsigned char tx_cnt; + unsigned char tx_byte; + struct completion tx_done; + struct mutex tx_mutex; + struct delayed_work tx_work; +}; + +static int ps2_gpio_open(struct serio *serio) +{ + struct ps2_gpio_data *drvdata = serio->port_data; + + enable_irq(drvdata->irq); + return 0; +} + +static void ps2_gpio_close(struct serio *serio) +{ + struct ps2_gpio_data *drvdata = serio->port_data; + + disable_irq(drvdata->irq); +} + +static int __ps2_gpio_write(struct serio *serio, unsigned char val) +{ + struct ps2_gpio_data *drvdata = serio->port_data; + + disable_irq_nosync(drvdata->irq); + gpiod_direction_output(drvdata->gpio_clk, 0); + + drvdata->mode = PS2_MODE_TX; + drvdata->tx_byte = val; + + schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200)); + + return 0; +} + +static int ps2_gpio_write(struct serio *serio, unsigned char val) +{ + struct ps2_gpio_data *drvdata = serio->port_data; + int ret = 0; + + if (in_task()) { + mutex_lock(&drvdata->tx_mutex); + __ps2_gpio_write(serio, val); + if (!wait_for_completion_timeout(&drvdata->tx_done, + msecs_to_jiffies(10000))) + ret = SERIO_TIMEOUT; + mutex_unlock(&drvdata->tx_mutex); + } else { + __ps2_gpio_write(serio, val); + } + + return ret; +} + +static void ps2_gpio_tx_work_fn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct ps2_gpio_data *drvdata = container_of(dwork, + struct ps2_gpio_data, + tx_work); + + enable_irq(drvdata->irq); + gpiod_direction_output(drvdata->gpio_data, 0); + gpiod_direction_input(drvdata->gpio_clk); +} + +static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata) +{ + unsigned char byte, cnt; + int data; + int rxflags = 0; + static unsigned long old_jiffies; + + byte = drvdata->rx_byte; + cnt = drvdata->rx_cnt; + + if (old_jiffies == 0) + old_jiffies = jiffies; + + if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { + dev_err(drvdata->dev, + "RX: timeout, probably we missed an interrupt\n"); + goto err; + } + old_jiffies = jiffies; + + data = gpiod_get_value(drvdata->gpio_data); + if (unlikely(data < 0)) { + dev_err(drvdata->dev, "RX: failed to get data gpio val: %d\n", + data); + goto err; + } + + switch (cnt) { + case PS2_START_BIT: + /* start bit should be low */ + if (unlikely(data)) { + dev_err(drvdata->dev, "RX: start bit should be low\n"); + goto err; + } + break; + case PS2_DATA_BIT0: + case PS2_DATA_BIT1: + case PS2_DATA_BIT2: + case PS2_DATA_BIT3: + case PS2_DATA_BIT4: + case PS2_DATA_BIT5: + case PS2_DATA_BIT6: + case PS2_DATA_BIT7: + /* processing data bits */ + if (data) + byte |= (data << (cnt - 1)); + break; + case PS2_PARITY_BIT: + /* check odd parity */ + if (!((hweight8(byte) & 1) ^ data)) { + rxflags |= SERIO_PARITY; + dev_warn(drvdata->dev, "RX: parity error\n"); + if (!drvdata->write_enable) + goto err; + } + + /* Do not send spurious ACK's and NACK's when write fn is + * not provided. + */ + if (!drvdata->write_enable) { + if (byte == PS2_DEV_RET_NACK) + goto err; + else if (byte == PS2_DEV_RET_ACK) + break; + } + + /* Let's send the data without waiting for the stop bit to be + * sent. It may happen that we miss the stop bit. When this + * happens we have no way to recover from this, certainly + * missing the parity bit would be recognized when processing + * the stop bit. When missing both, data is lost. + */ + serio_interrupt(drvdata->serio, byte, rxflags); + dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte); + break; + case PS2_STOP_BIT: + /* stop bit should be high */ + if (unlikely(!data)) { + dev_err(drvdata->dev, "RX: stop bit should be high\n"); + goto err; + } + cnt = byte = 0; + old_jiffies = 0; + goto end; /* success */ + default: + dev_err(drvdata->dev, "RX: got out of sync with the device\n"); + goto err; + } + + cnt++; + goto end; /* success */ + +err: + cnt = byte = 0; + old_jiffies = 0; + __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND); +end: + drvdata->rx_cnt = cnt; + drvdata->rx_byte = byte; + return IRQ_HANDLED; +} + +static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata) +{ + unsigned char byte, cnt; + int data; + static unsigned long old_jiffies; + + cnt = drvdata->tx_cnt; + byte = drvdata->tx_byte; + + if (old_jiffies == 0) + old_jiffies = jiffies; + + if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { + dev_err(drvdata->dev, + "TX: timeout, probably we missed an interrupt\n"); + goto err; + } + old_jiffies = jiffies; + + switch (cnt) { + case PS2_START_BIT: + /* should never happen */ + dev_err(drvdata->dev, + "TX: start bit should have been sent already\n"); + goto err; + case PS2_DATA_BIT0: + case PS2_DATA_BIT1: + case PS2_DATA_BIT2: + case PS2_DATA_BIT3: + case PS2_DATA_BIT4: + case PS2_DATA_BIT5: + case PS2_DATA_BIT6: + case PS2_DATA_BIT7: + data = byte & BIT(cnt - 1); + gpiod_set_value(drvdata->gpio_data, data); + break; + case PS2_PARITY_BIT: + /* do odd parity */ + data = !(hweight8(byte) & 1); + gpiod_set_value(drvdata->gpio_data, data); + break; + case PS2_STOP_BIT: + /* release data line to generate stop bit */ + gpiod_direction_input(drvdata->gpio_data); + break; + case PS2_TX_TIMEOUT: + /* Devices generate one extra clock pulse before sending the + * acknowledgment. + */ + break; + case PS2_ACK_BIT: + gpiod_direction_input(drvdata->gpio_data); + data = gpiod_get_value(drvdata->gpio_data); + if (data) { + dev_warn(drvdata->dev, "TX: received NACK, retry\n"); + goto err; + } + + drvdata->mode = PS2_MODE_RX; + complete(&drvdata->tx_done); + + cnt = 1; + old_jiffies = 0; + goto end; /* success */ + default: + /* Probably we missed the stop bit. Therefore we release data + * line and try again. + */ + gpiod_direction_input(drvdata->gpio_data); + dev_err(drvdata->dev, "TX: got out of sync with the device\n"); + goto err; + } + + cnt++; + goto end; /* success */ + +err: + cnt = 1; + old_jiffies = 0; + gpiod_direction_input(drvdata->gpio_data); + __ps2_gpio_write(drvdata->serio, drvdata->tx_byte); +end: + drvdata->tx_cnt = cnt; + return IRQ_HANDLED; +} + +static irqreturn_t ps2_gpio_irq(int irq, void *dev_id) +{ + struct ps2_gpio_data *drvdata = dev_id; + + return drvdata->mode ? ps2_gpio_irq_tx(drvdata) : + ps2_gpio_irq_rx(drvdata); +} + +static int ps2_gpio_get_props(struct device *dev, + struct ps2_gpio_data *drvdata) +{ + drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN); + if (IS_ERR(drvdata->gpio_data)) { + dev_err(dev, "failed to request data gpio: %ld", + PTR_ERR(drvdata->gpio_data)); + return PTR_ERR(drvdata->gpio_data); + } + + drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN); + if (IS_ERR(drvdata->gpio_clk)) { + dev_err(dev, "failed to request clock gpio: %ld", + PTR_ERR(drvdata->gpio_clk)); + return PTR_ERR(drvdata->gpio_clk); + } + + drvdata->write_enable = device_property_read_bool(dev, + "write-enable"); + + return 0; +} + +static int ps2_gpio_probe(struct platform_device *pdev) +{ + struct ps2_gpio_data *drvdata; + struct serio *serio; + struct device *dev = &pdev->dev; + int error; + + drvdata = devm_kzalloc(dev, sizeof(struct ps2_gpio_data), GFP_KERNEL); + serio = kzalloc(sizeof(struct serio), GFP_KERNEL); + if (!drvdata || !serio) { + error = -ENOMEM; + goto err_free_serio; + } + + error = ps2_gpio_get_props(dev, drvdata); + if (error) + goto err_free_serio; + + if (gpiod_cansleep(drvdata->gpio_data) || + gpiod_cansleep(drvdata->gpio_clk)) { + dev_err(dev, "GPIO data or clk are connected via slow bus\n"); + error = -EINVAL; + } + + drvdata->irq = platform_get_irq(pdev, 0); + if (drvdata->irq < 0) { + dev_err(dev, "failed to get irq from platform resource: %d\n", + drvdata->irq); + error = drvdata->irq; + goto err_free_serio; + } + + error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq, + IRQF_NO_THREAD, DRIVER_NAME, drvdata); + if (error) { + dev_err(dev, "failed to request irq %d: %d\n", + drvdata->irq, error); + goto err_free_serio; + } + + /* Keep irq disabled until serio->open is called. */ + disable_irq(drvdata->irq); + + serio->id.type = SERIO_8042; + serio->open = ps2_gpio_open; + serio->close = ps2_gpio_close; + /* Write can be enabled in platform/dt data, but possibly it will not + * work because of the tough timings. + */ + serio->write = drvdata->write_enable ? ps2_gpio_write : NULL; + serio->port_data = drvdata; + serio->dev.parent = dev; + strlcpy(serio->name, dev_name(dev), sizeof(serio->name)); + strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys)); + + drvdata->serio = serio; + drvdata->dev = dev; + drvdata->mode = PS2_MODE_RX; + + /* Tx count always starts at 1, as the start bit is sent implicitly by + * host-to-device communication initialization. + */ + drvdata->tx_cnt = 1; + + INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn); + init_completion(&drvdata->tx_done); + mutex_init(&drvdata->tx_mutex); + + serio_register_port(serio); + platform_set_drvdata(pdev, drvdata); + + return 0; /* success */ + +err_free_serio: + kfree(serio); + return error; +} + +static int ps2_gpio_remove(struct platform_device *pdev) +{ + struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev); + + serio_unregister_port(drvdata->serio); + return 0; +} + +#if defined(CONFIG_OF) +static const struct of_device_id ps2_gpio_match[] = { + { .compatible = "ps2-gpio", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ps2_gpio_match); +#endif + +static struct platform_driver ps2_gpio_driver = { + .probe = ps2_gpio_probe, + .remove = ps2_gpio_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(ps2_gpio_match), + }, +}; +module_platform_driver(ps2_gpio_driver); + +MODULE_AUTHOR("Danilo Krummrich "); +MODULE_DESCRIPTION("GPIO PS2 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 30d6230d48f7..24a90c8db5b3 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -469,7 +469,7 @@ static struct attribute *serio_device_id_attrs[] = { NULL }; -static struct attribute_group serio_id_attr_group = { +static const struct attribute_group serio_id_attr_group = { .name = "id", .attrs = serio_device_id_attrs, }; @@ -489,7 +489,7 @@ static struct attribute *serio_device_attrs[] = { NULL }; -static struct attribute_group serio_device_attr_group = { +static const struct attribute_group serio_device_attr_group = { .attrs = serio_device_attrs, }; diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 71ef5d65a0c6..516f9fe77a17 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -410,7 +410,7 @@ static void serio_raw_disconnect(struct serio *serio) serio_set_drvdata(serio, NULL); } -static struct serio_device_id serio_raw_serio_ids[] = { +static const struct serio_device_id serio_raw_serio_ids[] = { { .type = SERIO_8042, .proto = SERIO_ANY, diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 14c40892ed82..07de1b49293c 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c @@ -45,8 +45,10 @@ #define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */ #define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */ -/* Bit definitions for ISR/IER registers. Both the registers have the same bit - * definitions and are only defined once. */ +/* + * Bit definitions for ISR/IER registers. Both the registers have the same bit + * definitions and are only defined once. + */ #define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */ #define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */ #define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */ @@ -292,8 +294,10 @@ static int xps2_of_probe(struct platform_device *ofdev) /* Disable all the interrupts, just in case */ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0); - /* Reset the PS2 device and abort any current transaction, to make sure - * we have the PS2 in a good state */ + /* + * Reset the PS2 device and abort any current transaction, + * to make sure we have the PS2 in a good state. + */ out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n", diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c index e86e377a90f5..aebb3f9090cd 100644 --- a/drivers/input/tablet/acecad.c +++ b/drivers/input/tablet/acecad.c @@ -260,7 +260,7 @@ static void usb_acecad_disconnect(struct usb_interface *intf) kfree(acecad); } -static struct usb_device_id usb_acecad_id_table [] = { +static const struct usb_device_id usb_acecad_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 }, { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 }, { } diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index d67547bded3e..0b55e1f375b3 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1676,7 +1676,7 @@ static struct attribute *aiptek_attributes[] = { NULL }; -static struct attribute_group aiptek_attribute_group = { +static const struct attribute_group aiptek_attribute_group = { .attrs = aiptek_attributes, }; diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 4d9d64908b59..a41c3ff7c9af 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -88,7 +88,7 @@ static void kbtab_irq(struct urb *urb) __func__, retval); } -static struct usb_device_id kbtab_ids[] = { +static const struct usb_device_id kbtab_ids[] = { { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, { } }; diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c index 20ab802461e7..38bfaca48eab 100644 --- a/drivers/input/tablet/wacom_serial4.c +++ b/drivers/input/tablet/wacom_serial4.c @@ -594,7 +594,7 @@ static int wacom_connect(struct serio *serio, struct serio_driver *drv) return err; } -static struct serio_device_id wacom_serio_ids[] = { +static const struct serio_device_id wacom_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_WACOM_IV, diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 735a0be1ad95..a2f45aefce08 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -499,7 +499,7 @@ static struct attribute *ads7846_attributes[] = { NULL, }; -static struct attribute_group ads7846_attr_group = { +static const struct attribute_group ads7846_attr_group = { .attrs = ads7846_attributes, .is_visible = ads7846_is_visible, }; @@ -599,7 +599,7 @@ static struct attribute *ads784x_attributes[] = { NULL, }; -static struct attribute_group ads784x_attr_group = { +static const struct attribute_group ads784x_attr_group = { .attrs = ads784x_attributes, }; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index dd042a9b0aaa..7659bc48f1db 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -300,6 +301,7 @@ struct mxt_data { u8 multitouch; struct t7_config t7_cfg; struct mxt_dbg dbg; + struct gpio_desc *reset_gpio; /* Cached parameters from object table */ u16 T5_address; @@ -3117,11 +3119,9 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) if (IS_ERR(pdata)) return PTR_ERR(pdata); - data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL); - if (!data) { - dev_err(&client->dev, "Failed to allocate memory\n"); + data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL); + if (!data) return -ENOMEM; - } snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", client->adapter->nr, client->addr); @@ -3135,19 +3135,40 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) init_completion(&data->reset_completion); init_completion(&data->crc_completion); - error = request_threaded_irq(client->irq, NULL, mxt_interrupt, - pdata->irqflags | IRQF_ONESHOT, - client->name, data); + data->reset_gpio = devm_gpiod_get_optional(&client->dev, + "reset", GPIOD_OUT_LOW); + if (IS_ERR(data->reset_gpio)) { + error = PTR_ERR(data->reset_gpio); + dev_err(&client->dev, "Failed to get reset gpio: %d\n", error); + return error; + } + + error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, mxt_interrupt, + pdata->irqflags | IRQF_ONESHOT, + client->name, data); if (error) { dev_err(&client->dev, "Failed to register interrupt\n"); - goto err_free_mem; + return error; + } + + if (data->reset_gpio) { + data->in_bootloader = true; + msleep(MXT_RESET_TIME); + reinit_completion(&data->bl_completion); + gpiod_set_value(data->reset_gpio, 1); + error = mxt_wait_for_completion(data, &data->bl_completion, + MXT_RESET_TIMEOUT); + if (error) + return error; + data->in_bootloader = false; } disable_irq(client->irq); error = mxt_initialize(data); if (error) - goto err_free_irq; + return error; error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group); if (error) { @@ -3161,10 +3182,6 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) err_free_object: mxt_free_input_device(data); mxt_free_object_table(data); -err_free_irq: - free_irq(client->irq, data); -err_free_mem: - kfree(data); return error; } @@ -3172,11 +3189,10 @@ static int mxt_remove(struct i2c_client *client) { struct mxt_data *data = i2c_get_clientdata(client); + disable_irq(data->irq); sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); - free_irq(data->irq, data); mxt_free_input_device(data); mxt_free_object_table(data); - kfree(data); return 0; } diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c index 86237a910876..5b1b66fffbe3 100644 --- a/drivers/input/touchscreen/dynapro.c +++ b/drivers/input/touchscreen/dynapro.c @@ -164,7 +164,7 @@ static int dynapro_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id dynapro_serio_ids[] = { +static const struct serio_device_id dynapro_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_DYNAPRO, diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index f872817e81e4..5bf63f76ddda 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -593,7 +593,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) tsdata->gain); edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, tsdata->offset); - if (reg_addr->reg_report_rate) + if (reg_addr->reg_report_rate != NO_REGISTER) edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate, tsdata->report_rate); @@ -874,6 +874,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata) case M09: reg_addr->reg_threshold = M09_REGISTER_THRESHOLD; + reg_addr->reg_report_rate = NO_REGISTER; reg_addr->reg_gain = M09_REGISTER_GAIN; reg_addr->reg_offset = M09_REGISTER_OFFSET; reg_addr->reg_num_x = M09_REGISTER_NUM_X; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 872750eeca93..0f4cda7282a2 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1066,7 +1066,7 @@ static struct attribute *elants_attributes[] = { NULL }; -static struct attribute_group elants_attribute_group = { +static const struct attribute_group elants_attribute_group = { .attrs = elants_attributes, }; diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index 8051a4b704ea..83433e8efff7 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -381,7 +381,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id elo_serio_ids[] = { +static const struct serio_device_id elo_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_ELO, diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c index d0e46a7e183b..a0fbb454499d 100644 --- a/drivers/input/touchscreen/fujitsu_ts.c +++ b/drivers/input/touchscreen/fujitsu_ts.c @@ -151,7 +151,7 @@ static int fujitsu_connect(struct serio *serio, struct serio_driver *drv) /* * The serio driver structure. */ -static struct serio_device_id fujitsu_serio_ids[] = { +static const struct serio_device_id fujitsu_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_FUJITSU, diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 240b16f3ee97..32d2762448aa 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -267,6 +267,12 @@ static void goodix_process_events(struct goodix_ts_data *ts) if (touch_num < 0) return; + /* + * Bit 4 of the first byte reports the status of the capacitive + * Windows/Home button. + */ + input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4)); + for (i = 0; i < touch_num; i++) goodix_ts_report_touch(ts, &point_data[1 + GOODIX_CONTACT_SIZE * i]); @@ -612,6 +618,9 @@ static int goodix_request_input_dev(struct goodix_ts_data *ts) ts->input_dev->id.product = ts->id; ts->input_dev->id.version = ts->version; + /* Capacitive Windows/Home button on some devices */ + input_set_capability(ts->input_dev, EV_KEY, KEY_LEFTMETA); + error = input_register_device(ts->input_dev); if (error) { dev_err(&ts->client->dev, diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c index e2ee62615273..481586909d28 100644 --- a/drivers/input/touchscreen/gunze.c +++ b/drivers/input/touchscreen/gunze.c @@ -162,7 +162,7 @@ static int gunze_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id gunze_serio_ids[] = { +static const struct serio_device_id gunze_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_GUNZE, diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c index ecb1e0e01328..eb052d559e54 100644 --- a/drivers/input/touchscreen/hampshire.c +++ b/drivers/input/touchscreen/hampshire.c @@ -163,7 +163,7 @@ static int hampshire_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id hampshire_serio_ids[] = { +static const struct serio_device_id hampshire_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_HAMPSHIRE, diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c index 92e2243fb77d..8fd909285877 100644 --- a/drivers/input/touchscreen/htcpen.c +++ b/drivers/input/touchscreen/htcpen.c @@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = { } }; -static struct dmi_system_id htcshift_dmi_table[] __initdata = { +static const struct dmi_system_id htcshift_dmi_table[] __initconst = { { .ident = "Shift", .matches = { diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c index adb80b65a259..b9bc56233ccc 100644 --- a/drivers/input/touchscreen/inexio.c +++ b/drivers/input/touchscreen/inexio.c @@ -165,7 +165,7 @@ static int inexio_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id inexio_serio_ids[] = { +static const struct serio_device_id inexio_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_INEXIO, diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c index 9b5552a26169..a3707fad4d1c 100644 --- a/drivers/input/touchscreen/mtouch.c +++ b/drivers/input/touchscreen/mtouch.c @@ -178,7 +178,7 @@ static int mtouch_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id mtouch_serio_ids[] = { +static const struct serio_device_id mtouch_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_MICROTOUCH, diff --git a/drivers/input/touchscreen/mxs-lradc-ts.c b/drivers/input/touchscreen/mxs-lradc-ts.c index 58c016cd6809..3707e927f770 100644 --- a/drivers/input/touchscreen/mxs-lradc-ts.c +++ b/drivers/input/touchscreen/mxs-lradc-ts.c @@ -30,7 +30,7 @@ #include #include -const char *mxs_lradc_ts_irq_names[] = { +static const char * const mxs_lradc_ts_irq_names[] = { "mxs-lradc-touchscreen", "mxs-lradc-channel6", "mxs-lradc-channel7", @@ -630,9 +630,11 @@ static int mxs_lradc_ts_probe(struct platform_device *pdev) spin_lock_init(&ts->lock); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iores) + return -EINVAL; ts->base = devm_ioremap(dev, iores->start, resource_size(iores)); - if (IS_ERR(ts->base)) - return PTR_ERR(ts->base); + if (!ts->base) + return -ENOMEM; ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires", &ts_wires); diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c index 417d87379265..6e6d7fd98cd2 100644 --- a/drivers/input/touchscreen/penmount.c +++ b/drivers/input/touchscreen/penmount.c @@ -293,7 +293,7 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id pm_serio_ids[] = { +static const struct serio_device_id pm_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_PENMOUNT, diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 1252e49ccfa1..4f1d3fd5d412 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -939,7 +939,7 @@ static struct attribute *raydium_i2c_attributes[] = { NULL }; -static struct attribute_group raydium_i2c_attribute_group = { +static const struct attribute_group raydium_i2c_attribute_group = { .attrs = raydium_i2c_attributes, }; diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index d07dd29d4848..d2e14d9e5975 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -206,7 +206,7 @@ static int sun4i_get_tz_temp(void *data, int *temp) return sun4i_get_temp(data, temp); } -static struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { +static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { .get_temp = sun4i_get_tz_temp, }; diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 128e5bd74720..f16f8358c70a 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -59,7 +59,7 @@ struct sur40_blob { __le16 blob_id; u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ - u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */ + u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ __le16 bb_pos_x; /* upper left corner of bounding box */ __le16 bb_pos_y; @@ -133,12 +133,19 @@ struct sur40_image_header { /* control commands */ #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ -#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */ -#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */ +#define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ +#define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ + +#define SUR40_POKE 0xc5 /* poke register byte */ +#define SUR40_PEEK 0xc4 /* 48 bytes registers */ #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ +#define SUR40_BLOB 0x01 +#define SUR40_TOUCH 0x02 +#define SUR40_TAG 0x04 + static const struct v4l2_pix_format sur40_pix_format[] = { { .pixelformat = V4L2_TCH_FMT_TU08, @@ -238,11 +245,11 @@ static int sur40_init(struct sur40_state *dev) if (result < 0) goto error; - result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); + result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); if (result < 0) goto error; - result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); + result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); if (result < 0) goto error; @@ -289,20 +296,24 @@ static void sur40_close(struct input_polled_dev *polldev) static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) { int wide, major, minor; + int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; - int bb_size_x = le16_to_cpu(blob->bb_size_x); - int bb_size_y = le16_to_cpu(blob->bb_size_y); + if (blob->type != SUR40_TOUCH) + return; - int pos_x = le16_to_cpu(blob->pos_x); - int pos_y = le16_to_cpu(blob->pos_y); - - int ctr_x = le16_to_cpu(blob->ctr_x); - int ctr_y = le16_to_cpu(blob->ctr_y); - - int slotnum = input_mt_get_slot_by_key(input, blob->blob_id); + slotnum = input_mt_get_slot_by_key(input, blob->blob_id); if (slotnum < 0 || slotnum >= MAX_CONTACTS) return; + bb_size_x = le16_to_cpu(blob->bb_size_x); + bb_size_y = le16_to_cpu(blob->bb_size_y); + + pos_x = le16_to_cpu(blob->pos_x); + pos_y = le16_to_cpu(blob->pos_y); + + ctr_x = le16_to_cpu(blob->ctr_x); + ctr_y = le16_to_cpu(blob->ctr_y); + input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); wide = (bb_size_x > bb_size_y); @@ -367,10 +378,13 @@ static void sur40_poll(struct input_polled_dev *polldev) /* * Sanity check. when video data is also being retrieved, the * packet ID will usually increase in the middle of a series - * instead of at the end. - */ + * instead of at the end. However, the data is still consistent, + * so the packet ID is probably just valid for the first packet + * in a series. + if (packet_id != le32_to_cpu(header->packet_id)) dev_dbg(sur40->dev, "packet ID mismatch\n"); + */ packet_blobs = result / sizeof(struct sur40_blob); dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c index e12fb9b63f31..5db0f1c4ef38 100644 --- a/drivers/input/touchscreen/surface3_spi.c +++ b/drivers/input/touchscreen/surface3_spi.c @@ -173,7 +173,7 @@ static void surface3_spi_process_pen(struct surface3_ts_data *ts_data, u8 *data) static void surface3_spi_process(struct surface3_ts_data *ts_data) { - const char header[] = { + static const char header[] = { 0xff, 0xff, 0xff, 0xff, 0xa5, 0x5a, 0xe7, 0x7e, 0x01 }; u8 *data = ts_data->rd_buf; diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c index c27cf8f3d1ca..98a16698be8e 100644 --- a/drivers/input/touchscreen/touchit213.c +++ b/drivers/input/touchscreen/touchit213.c @@ -192,7 +192,7 @@ static int touchit213_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id touchit213_serio_ids[] = { +static const struct serio_device_id touchit213_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TOUCHIT213, diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c index 4000e5205407..45c325c33f21 100644 --- a/drivers/input/touchscreen/touchright.c +++ b/drivers/input/touchscreen/touchright.c @@ -152,7 +152,7 @@ static int tr_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id tr_serio_ids[] = { +static const struct serio_device_id tr_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TOUCHRIGHT, diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c index ba90f447df75..2ba6b4ca28cb 100644 --- a/drivers/input/touchscreen/touchwin.c +++ b/drivers/input/touchscreen/touchwin.c @@ -159,7 +159,7 @@ static int tw_connect(struct serio *serio, struct serio_driver *drv) * The serio driver structure. */ -static struct serio_device_id tw_serio_ids[] = { +static const struct serio_device_id tw_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TOUCHWIN, diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c index 29687872cb94..d4ae4ba84c1f 100644 --- a/drivers/input/touchscreen/tsc40.c +++ b/drivers/input/touchscreen/tsc40.c @@ -141,7 +141,7 @@ static void tsc_disconnect(struct serio *serio) serio_set_drvdata(serio, NULL); } -static struct serio_device_id tsc_serio_ids[] = { +static const struct serio_device_id tsc_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TSC40, diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index c1e23cfc6155..1a86cbd9326f 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -414,7 +414,7 @@ static int __maybe_unused ucb1400_ts_suspend(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_start(ucb); + ucb1400_ts_stop(ucb); mutex_unlock(&idev->mutex); return 0; @@ -428,7 +428,7 @@ static int __maybe_unused ucb1400_ts_resume(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_stop(ucb); + ucb1400_ts_start(ucb); mutex_unlock(&idev->mutex); return 0; diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 85e95725d0df..3715d1eace92 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -681,7 +681,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv) return err; } -static struct serio_device_id w8001_serio_ids[] = { +static const struct serio_device_id w8001_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_W8001, diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index f73ff28f77e2..f3a21343e636 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -76,6 +76,8 @@ config IOMMU_DMA config FSL_PAMU bool "Freescale IOMMU support" + depends on PCI + depends on PHYS_64BIT depends on PPC_E500MC || (COMPILE_TEST && PPC) select IOMMU_API select GENERIC_ALLOCATOR @@ -253,6 +255,7 @@ config TEGRA_IOMMU_SMMU config EXYNOS_IOMMU bool "Exynos IOMMU Support" depends on ARCH_EXYNOS && MMU + depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes select IOMMU_API select ARM_DMA_USE_IOMMU help @@ -275,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARM || IOMMU_DMA - depends on ARCH_RENESAS || COMPILE_TEST + depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU @@ -367,4 +370,15 @@ config MTK_IOMMU_V1 if unsure, say N here. +config QCOM_IOMMU + # Note: iommu drivers cannot (yet?) be built as modules + bool "Qualcomm IOMMU Support" + depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on HAS_DMA + select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE + select ARM_DMA_USE_IOMMU + help + Support for IOMMU on certain Qualcomm SoCs. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 195f7b997d8e..b910aea813a1 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o +obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4ad7e5e31943..51f8215877f5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -102,29 +102,6 @@ int amd_iommu_max_glx_val = -1; static const struct dma_map_ops amd_iommu_dma_ops; -/* - * This struct contains device specific data for the IOMMU - */ -struct iommu_dev_data { - struct list_head list; /* For domain->dev_list */ - struct list_head dev_data_list; /* For global dev_data_list */ - struct protection_domain *domain; /* Domain the device is bound to */ - u16 devid; /* PCI Device ID */ - u16 alias; /* Alias Device ID */ - bool iommu_v2; /* Device can make use of IOMMUv2 */ - bool passthrough; /* Device is identity mapped */ - struct { - bool enabled; - int qdep; - } ats; /* ATS state */ - bool pri_tlp; /* PASID TLB required for - PPR completions */ - u32 errata; /* Bitmap for errata to apply */ - bool use_vapic; /* Enable device to use vapic mode */ - - struct ratelimit_state rs; /* Ratelimit IOPF messages */ -}; - /* * general struct to manage commands send to an IOMMU */ @@ -137,20 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); - -#define FLUSH_QUEUE_SIZE 256 - -struct flush_queue_entry { - unsigned long iova_pfn; - unsigned long pages; - u64 counter; /* Flush counter when this entry was added to the queue */ -}; - -struct flush_queue { - struct flush_queue_entry *entries; - unsigned head, tail; - spinlock_t lock; -}; +static void iova_domain_flush_tlb(struct iova_domain *iovad); /* * Data container for a dma_ops specific protection domain @@ -161,36 +125,6 @@ struct dma_ops_domain { /* IOVA RB-Tree */ struct iova_domain iovad; - - struct flush_queue __percpu *flush_queue; - - /* - * We need two counter here to be race-free wrt. IOTLB flushing and - * adding entries to the flush queue. - * - * The flush_start_cnt is incremented _before_ the IOTLB flush starts. - * New entries added to the flush ring-buffer get their 'counter' value - * from here. This way we can make sure that entries added to the queue - * (or other per-cpu queues of the same domain) while the TLB is about - * to be flushed are not considered to be flushed already. - */ - atomic64_t flush_start_cnt; - - /* - * The flush_finish_cnt is incremented when an IOTLB flush is complete. - * This value is always smaller than flush_start_cnt. The queue_add - * function frees all IOVAs that have a counter value smaller than - * flush_finish_cnt. This makes sure that we only free IOVAs that are - * flushed out of the IOTLB of the domain. - */ - atomic64_t flush_finish_cnt; - - /* - * Timer to make sure we don't keep IOVAs around unflushed - * for too long - */ - struct timer_list flush_timer; - atomic_t flush_timer_on; }; static struct iova_domain reserved_iova_ranges; @@ -371,19 +305,25 @@ static u16 get_alias(struct device *dev) static struct iommu_dev_data *find_dev_data(u16 devid) { struct iommu_dev_data *dev_data; + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; dev_data = search_dev_data(devid); - if (dev_data == NULL) + if (dev_data == NULL) { dev_data = alloc_dev_data(devid); + if (translation_pre_enabled(iommu)) + dev_data->defer_attach = true; + } + return dev_data; } -static struct iommu_dev_data *get_dev_data(struct device *dev) +struct iommu_dev_data *get_dev_data(struct device *dev) { return dev->archdata.iommu; } +EXPORT_SYMBOL(get_dev_data); /* * Find or create an IOMMU group for a acpihid device. @@ -1167,7 +1107,7 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) return iommu_queue_command(iommu, &cmd); } -static void iommu_flush_dte_all(struct amd_iommu *iommu) +static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) { u32 devid; @@ -1181,7 +1121,7 @@ static void iommu_flush_dte_all(struct amd_iommu *iommu) * This function uses heavy locking and may disable irqs for some time. But * this is no issue because it is only called during resume. */ -static void iommu_flush_tlb_all(struct amd_iommu *iommu) +static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) { u32 dom_id; @@ -1195,7 +1135,7 @@ static void iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } -static void iommu_flush_all(struct amd_iommu *iommu) +static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1214,7 +1154,7 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) iommu_queue_command(iommu, &cmd); } -static void iommu_flush_irt_all(struct amd_iommu *iommu) +static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) { u32 devid; @@ -1227,11 +1167,11 @@ static void iommu_flush_irt_all(struct amd_iommu *iommu) void iommu_flush_all_caches(struct amd_iommu *iommu) { if (iommu_feature(iommu, FEATURE_IA)) { - iommu_flush_all(iommu); + amd_iommu_flush_all(iommu); } else { - iommu_flush_dte_all(iommu); - iommu_flush_irt_all(iommu); - iommu_flush_tlb_all(iommu); + amd_iommu_flush_dte_all(iommu); + amd_iommu_flush_irt_all(iommu); + amd_iommu_flush_tlb_all(iommu); } } @@ -1539,9 +1479,9 @@ static int iommu_map_page(struct protection_domain *dom, if (count > 1) { __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size); - __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; + __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; } else - __pte = __sme_set(phys_addr) | IOMMU_PTE_P | IOMMU_PTE_FC; + __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC; if (prot & IOMMU_PROT_IR) __pte |= IOMMU_PTE_IR; @@ -1790,178 +1730,19 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } -static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - - queue = per_cpu_ptr(dom->flush_queue, cpu); - kfree(queue->entries); - } - - free_percpu(dom->flush_queue); - - dom->flush_queue = NULL; -} - -static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) -{ - int cpu; - - atomic64_set(&dom->flush_start_cnt, 0); - atomic64_set(&dom->flush_finish_cnt, 0); - - dom->flush_queue = alloc_percpu(struct flush_queue); - if (!dom->flush_queue) - return -ENOMEM; - - /* First make sure everything is cleared */ - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - - queue = per_cpu_ptr(dom->flush_queue, cpu); - queue->head = 0; - queue->tail = 0; - queue->entries = NULL; - } - - /* Now start doing the allocation */ - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - - queue = per_cpu_ptr(dom->flush_queue, cpu); - queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries), - GFP_KERNEL); - if (!queue->entries) { - dma_ops_domain_free_flush_queue(dom); - return -ENOMEM; - } - - spin_lock_init(&queue->lock); - } - - return 0; -} - static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) { - atomic64_inc(&dom->flush_start_cnt); domain_flush_tlb(&dom->domain); domain_flush_complete(&dom->domain); - atomic64_inc(&dom->flush_finish_cnt); } -static inline bool queue_ring_full(struct flush_queue *queue) +static void iova_domain_flush_tlb(struct iova_domain *iovad) { - assert_spin_locked(&queue->lock); + struct dma_ops_domain *dom; - return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); -} - -#define queue_ring_for_each(i, q) \ - for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) - -static inline unsigned queue_ring_add(struct flush_queue *queue) -{ - unsigned idx = queue->tail; - - assert_spin_locked(&queue->lock); - queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; - - return idx; -} - -static inline void queue_ring_remove_head(struct flush_queue *queue) -{ - assert_spin_locked(&queue->lock); - queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; -} - -static void queue_ring_free_flushed(struct dma_ops_domain *dom, - struct flush_queue *queue) -{ - u64 counter = atomic64_read(&dom->flush_finish_cnt); - int idx; - - queue_ring_for_each(idx, queue) { - /* - * This assumes that counter values in the ring-buffer are - * monotonously rising. - */ - if (queue->entries[idx].counter >= counter) - break; - - free_iova_fast(&dom->iovad, - queue->entries[idx].iova_pfn, - queue->entries[idx].pages); - - queue_ring_remove_head(queue); - } -} - -static void queue_add(struct dma_ops_domain *dom, - unsigned long address, unsigned long pages) -{ - struct flush_queue *queue; - unsigned long flags; - int idx; - - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - queue = get_cpu_ptr(dom->flush_queue); - spin_lock_irqsave(&queue->lock, flags); - - /* - * First remove the enries from the ring-buffer that are already - * flushed to make the below queue_ring_full() check less likely - */ - queue_ring_free_flushed(dom, queue); - - /* - * When ring-queue is full, flush the entries from the IOTLB so - * that we can free all entries with queue_ring_free_flushed() - * below. - */ - if (queue_ring_full(queue)) { - dma_ops_domain_flush_tlb(dom); - queue_ring_free_flushed(dom, queue); - } - - idx = queue_ring_add(queue); - - queue->entries[idx].iova_pfn = address; - queue->entries[idx].pages = pages; - queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); - - spin_unlock_irqrestore(&queue->lock, flags); - - if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0) - mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10)); - - put_cpu_ptr(dom->flush_queue); -} - -static void queue_flush_timeout(unsigned long data) -{ - struct dma_ops_domain *dom = (struct dma_ops_domain *)data; - int cpu; - - atomic_set(&dom->flush_timer_on, 0); + dom = container_of(iovad, struct dma_ops_domain, iovad); dma_ops_domain_flush_tlb(dom); - - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - unsigned long flags; - - queue = per_cpu_ptr(dom->flush_queue, cpu); - spin_lock_irqsave(&queue->lock, flags); - queue_ring_free_flushed(dom, queue); - spin_unlock_irqrestore(&queue->lock, flags); - } } /* @@ -1975,11 +1756,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) del_domain_from_list(&dom->domain); - if (timer_pending(&dom->flush_timer)) - del_timer(&dom->flush_timer); - - dma_ops_domain_free_flush_queue(dom); - put_iova_domain(&dom->iovad); free_pagetable(&dom->domain); @@ -2015,16 +1791,11 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); - /* Initialize reserved ranges */ - copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); - - if (dma_ops_domain_alloc_flush_queue(dma_dom)) + if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) goto free_dma_dom; - setup_timer(&dma_dom->flush_timer, queue_flush_timeout, - (unsigned long)dma_dom); - - atomic_set(&dma_dom->flush_timer_on, 0); + /* Initialize reserved ranges */ + copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); add_domain_to_list(&dma_dom->domain); @@ -2055,7 +1826,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; - pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; + pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV; flags = amd_iommu_dev_table[devid].data[1]; @@ -2088,8 +1859,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) flags |= tmp; } - - flags &= ~(DTE_FLAG_SA | 0xffffULL); + flags &= ~DEV_DOMID_MASK; flags |= domain->id; amd_iommu_dev_table[devid].data[1] = flags; @@ -2099,7 +1869,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) static void clear_dte_entry(u16 devid) { /* remove entry from the device table seen by the hardware */ - amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; + amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV; amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; amd_iommu_apply_erratum_63(devid); @@ -2480,11 +2250,21 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) static struct protection_domain *get_domain(struct device *dev) { struct protection_domain *domain; + struct iommu_domain *io_domain; if (!check_device(dev)) return ERR_PTR(-EINVAL); domain = get_dev_data(dev)->domain; + if (domain == NULL && get_dev_data(dev)->defer_attach) { + get_dev_data(dev)->defer_attach = false; + io_domain = iommu_get_domain_for_dev(dev); + domain = to_pdomain(io_domain); + attach_device(dev, domain); + } + if (domain == NULL) + return ERR_PTR(-EBUSY); + if (!dma_ops_domain(domain)) return ERR_PTR(-EBUSY); @@ -2530,6 +2310,7 @@ static int dir2prot(enum dma_data_direction direction) else return 0; } + /* * This function contains common code for mapping of a physically * contiguous memory region into DMA address space. It is used by all @@ -2621,7 +2402,8 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, domain_flush_tlb(&dma_dom->domain); domain_flush_complete(&dma_dom->domain); } else { - queue_add(dma_dom, dma_addr, pages); + pages = __roundup_pow_of_two(pages); + queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); } } @@ -3375,6 +3157,13 @@ static void amd_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } +static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev) +{ + struct iommu_dev_data *dev_data = dev->archdata.iommu; + return dev_data->defer_attach; +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3391,6 +3180,7 @@ const struct iommu_ops amd_iommu_ops = { .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = amd_iommu_put_resv_regions, .apply_resv_region = amd_iommu_apply_resv_region, + .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; @@ -3779,11 +3569,6 @@ EXPORT_SYMBOL(amd_iommu_device_info); static struct irq_chip amd_ir_chip; -#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) -#define DTE_IRQ_REMAP_INTCTL (2ULL << 60) -#define DTE_IRQ_TABLE_LEN (8ULL << 1) -#define DTE_IRQ_REMAP_ENABLE 1ULL - static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) { u64 dte; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 2292a6cece76..6fe2d0346073 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -39,6 +38,7 @@ #include #include +#include #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" @@ -197,6 +197,11 @@ spinlock_t amd_iommu_pd_lock; * page table root pointer. */ struct dev_table_entry *amd_iommu_dev_table; +/* + * Pointer to a device table which the content of old device table + * will be copied to. It's only be used in kdump kernel. + */ +static struct dev_table_entry *old_dev_tbl_cpy; /* * The alias table is a driver specific data structure which contains the @@ -210,6 +215,7 @@ u16 *amd_iommu_alias_table; * for a specific device. It is also indexed by the PCI device id. */ struct amd_iommu **amd_iommu_rlookup_table; +EXPORT_SYMBOL(amd_iommu_rlookup_table); /* * This table is used to find the irq remapping table for a given device id @@ -259,6 +265,28 @@ static int amd_iommu_enable_interrupts(void); static int __init iommu_go_to_state(enum iommu_init_state state); static void init_device_table_dma(void); +static bool amd_iommu_pre_enabled = true; + +bool translation_pre_enabled(struct amd_iommu *iommu) +{ + return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); +} +EXPORT_SYMBOL(translation_pre_enabled); + +static void clear_translation_pre_enabled(struct amd_iommu *iommu) +{ + iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; +} + +static void init_translation_status(struct amd_iommu *iommu) +{ + u32 ctrl; + + ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); + if (ctrl & (1<flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; +} + static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) @@ -616,6 +644,14 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) amd_iommu_reset_cmd_buffer(iommu); } +/* + * This function disables the command buffer + */ +static void iommu_disable_command_buffer(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); +} + static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); @@ -648,6 +684,14 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu) iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } +/* + * This function disables the event log buffer + */ +static void iommu_disable_event_buffer(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); +} + static void __init free_event_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); @@ -809,6 +853,96 @@ static int get_dev_entry_bit(u16 devid, u8 bit) } +static bool copy_device_table(void) +{ + u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; + struct dev_table_entry *old_devtb = NULL; + u32 lo, hi, devid, old_devtb_size; + phys_addr_t old_devtb_phys; + struct amd_iommu *iommu; + u16 dom_id, dte_v, irq_v; + gfp_t gfp_flag; + u64 tmp; + + if (!amd_iommu_pre_enabled) + return false; + + pr_warn("Translation is already enabled - trying to copy translation structures\n"); + for_each_iommu(iommu) { + /* All IOMMUs should use the same device table with the same size */ + lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); + hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); + entry = (((u64) hi) << 32) + lo; + if (last_entry && last_entry != entry) { + pr_err("IOMMU:%d should use the same dev table as others!\n", + iommu->index); + return false; + } + last_entry = entry; + + old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; + if (old_devtb_size != dev_table_size) { + pr_err("The device table size of IOMMU:%d is not expected!\n", + iommu->index); + return false; + } + } + + old_devtb_phys = entry & PAGE_MASK; + if (old_devtb_phys >= 0x100000000ULL) { + pr_err("The address of old device table is above 4G, not trustworthy!\n"); + return false; + } + old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); + if (!old_devtb) + return false; + + gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; + old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, + get_order(dev_table_size)); + if (old_dev_tbl_cpy == NULL) { + pr_err("Failed to allocate memory for copying old device table!\n"); + return false; + } + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { + old_dev_tbl_cpy[devid] = old_devtb[devid]; + dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; + dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; + + if (dte_v && dom_id) { + old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; + old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; + __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); + /* If gcr3 table existed, mask it out */ + if (old_devtb[devid].data[0] & DTE_FLAG_GV) { + tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; + tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; + old_dev_tbl_cpy[devid].data[1] &= ~tmp; + tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; + tmp |= DTE_FLAG_GV; + old_dev_tbl_cpy[devid].data[0] &= ~tmp; + } + } + + irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; + int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; + int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; + if (irq_v && (int_ctl || int_tab_len)) { + if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || + (int_tab_len != DTE_IRQ_TABLE_LEN)) { + pr_err("Wrong old irq remapping flag: %#x\n", devid); + return false; + } + + old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; + } + } + memunmap(old_devtb); + + return true; +} + void amd_iommu_apply_erratum_63(u16 devid) { int sysmgt; @@ -1400,6 +1534,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->int_enabled = false; + init_translation_status(iommu); + if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { + iommu_disable(iommu); + clear_translation_pre_enabled(iommu); + pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", + iommu->index); + } + if (amd_iommu_pre_enabled) + amd_iommu_pre_enabled = translation_pre_enabled(iommu); + ret = init_iommu_from_acpi(iommu, h); if (ret) return ret; @@ -1893,8 +2037,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) } /* - * Init the device table to not allow DMA access for devices and - * suppress all page faults + * Init the device table to not allow DMA access for devices */ static void init_device_table_dma(void) { @@ -1903,14 +2046,6 @@ static void init_device_table_dma(void) for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); - /* - * In kdump kernels in-flight DMA from the old kernel might - * cause IO_PAGE_FAULTs. There are no reports that a kdump - * actually failed because of that, so just disable fault - * reporting in the hardware to get rid of the messages - */ - if (is_kdump_kernel()) - set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); } } @@ -2023,24 +2158,62 @@ static void iommu_enable_ga(struct amd_iommu *iommu) #endif } +static void early_enable_iommu(struct amd_iommu *iommu) +{ + iommu_disable(iommu); + iommu_init_flags(iommu); + iommu_set_device_table(iommu); + iommu_enable_command_buffer(iommu); + iommu_enable_event_buffer(iommu); + iommu_set_exclusion_range(iommu); + iommu_enable_ga(iommu); + iommu_enable(iommu); + iommu_flush_all_caches(iommu); +} + /* * This function finally enables all IOMMUs found in the system after - * they have been initialized + * they have been initialized. + * + * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy + * the old content of device table entries. Not this case or copy failed, + * just continue as normal kernel does. */ static void early_enable_iommus(void) { struct amd_iommu *iommu; - for_each_iommu(iommu) { - iommu_disable(iommu); - iommu_init_flags(iommu); - iommu_set_device_table(iommu); - iommu_enable_command_buffer(iommu); - iommu_enable_event_buffer(iommu); - iommu_set_exclusion_range(iommu); - iommu_enable_ga(iommu); - iommu_enable(iommu); - iommu_flush_all_caches(iommu); + + if (!copy_device_table()) { + /* + * If come here because of failure in copying device table from old + * kernel with all IOMMUs enabled, print error message and try to + * free allocated old_dev_tbl_cpy. + */ + if (amd_iommu_pre_enabled) + pr_err("Failed to copy DEV table from previous kernel.\n"); + if (old_dev_tbl_cpy != NULL) + free_pages((unsigned long)old_dev_tbl_cpy, + get_order(dev_table_size)); + + for_each_iommu(iommu) { + clear_translation_pre_enabled(iommu); + early_enable_iommu(iommu); + } + } else { + pr_info("Copied DEV table from previous kernel.\n"); + free_pages((unsigned long)amd_iommu_dev_table, + get_order(dev_table_size)); + amd_iommu_dev_table = old_dev_tbl_cpy; + for_each_iommu(iommu) { + iommu_disable_command_buffer(iommu); + iommu_disable_event_buffer(iommu); + iommu_enable_command_buffer(iommu); + iommu_enable_event_buffer(iommu); + iommu_enable_ga(iommu); + iommu_set_device_table(iommu); + iommu_flush_all_caches(iommu); + } } #ifdef CONFIG_IRQ_REMAP @@ -2276,7 +2449,8 @@ static int __init early_amd_iommu_init(void) /* Device table - directly used by all IOMMUs */ ret = -ENOMEM; - amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + amd_iommu_dev_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO | GFP_DMA32, get_order(dev_table_size)); if (amd_iommu_dev_table == NULL) goto out; @@ -2326,7 +2500,8 @@ static int __init early_amd_iommu_init(void) goto out; /* Disable any previously enabled IOMMUs */ - disable_iommus(); + if (!is_kdump_kernel() || amd_iommu_disabled) + disable_iommus(); if (amd_iommu_irq_remap) amd_iommu_irq_remap = check_ioapic_information(); diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 3f12fb2338ea..640c286a0ab9 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -97,4 +97,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr) return phys_to_virt(__sme_clr(paddr)); } +extern bool translation_pre_enabled(struct amd_iommu *iommu); +extern struct iommu_dev_data *get_dev_data(struct device *dev); #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 8e3a85759242..f6b24c7d8b70 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -250,6 +250,14 @@ #define GA_GUEST_NR 0x1 +/* Bit value definition for dte irq remapping fields*/ +#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) +#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) +#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) +#define DTE_IRQ_REMAP_INTCTL (2ULL << 60) +#define DTE_IRQ_TABLE_LEN (8ULL << 1) +#define DTE_IRQ_REMAP_ENABLE 1ULL + #define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 @@ -265,7 +273,7 @@ #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ - IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) + IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) #define PM_MAP_4k 0 @@ -314,19 +322,29 @@ #define PTE_LEVEL_PAGE_SIZE(level) \ (1ULL << (12 + (9 * (level)))) -#define IOMMU_PTE_P (1ULL << 0) -#define IOMMU_PTE_TV (1ULL << 1) +/* + * Bit value definition for I/O PTE fields + */ +#define IOMMU_PTE_PR (1ULL << 0) #define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_FC (1ULL << 60) #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) +/* + * Bit value definition for DTE fields + */ +#define DTE_FLAG_V (1ULL << 0) +#define DTE_FLAG_TV (1ULL << 1) +#define DTE_FLAG_IR (1ULL << 61) +#define DTE_FLAG_IW (1ULL << 62) + #define DTE_FLAG_IOTLB (1ULL << 32) -#define DTE_FLAG_SA (1ULL << 34) #define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_GLX_SHIFT (56) #define DTE_GLX_MASK (3) +#define DEV_DOMID_MASK 0xffffULL #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) @@ -343,7 +361,7 @@ #define GCR3_VALID 0x01ULL #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) -#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) +#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) @@ -435,6 +453,8 @@ struct iommu_domain; struct irq_domain; struct amd_irte_ops; +#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) + /* * This structure contains generic data for IOMMU protection domains * independent of their use. @@ -569,6 +589,7 @@ struct amd_iommu { struct amd_irte_ops *irte_ops; #endif + u32 flags; volatile u64 __aligned(8) cmd_sem; }; @@ -599,6 +620,30 @@ struct devid_map { bool cmd_line; }; +/* + * This struct contains device specific data for the IOMMU + */ +struct iommu_dev_data { + struct list_head list; /* For domain->dev_list */ + struct list_head dev_data_list; /* For global dev_data_list */ + struct protection_domain *domain; /* Domain the device is bound to */ + u16 devid; /* PCI Device ID */ + u16 alias; /* Alias Device ID */ + bool iommu_v2; /* Device can make use of IOMMUv2 */ + bool passthrough; /* Device is identity mapped */ + struct { + bool enabled; + int qdep; + } ats; /* ATS state */ + bool pri_tlp; /* PASID TLB required for + PPR completions */ + u32 errata; /* Bitmap for errata to apply */ + bool use_vapic; /* Enable device to use vapic mode */ + bool defer_attach; + + struct ratelimit_state rs; /* Ratelimit IOPF messages */ +}; + /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ extern struct list_head ioapic_map; extern struct list_head hpet_map; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index dccf5b76eff2..7d94e1d39e5e 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -554,14 +554,30 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) unsigned long flags; struct fault *fault; bool finish; - u16 tag; + u16 tag, devid; int ret; + struct iommu_dev_data *dev_data; + struct pci_dev *pdev = NULL; iommu_fault = data; tag = iommu_fault->tag & 0x1ff; finish = (iommu_fault->tag >> 9) & 1; + devid = iommu_fault->device_id; + pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff); + if (!pdev) + return -ENODEV; + dev_data = get_dev_data(&pdev->dev); + + /* In kdump kernel pci dev is not initialized yet -> send INVALID */ ret = NOTIFY_DONE; + if (translation_pre_enabled(amd_iommu_rlookup_table[devid]) + && dev_data->defer_attach) { + amd_iommu_complete_ppr(pdev, iommu_fault->pasid, + PPR_INVALID, tag); + goto out; + } + dev_state = get_device_state(iommu_fault->device_id); if (dev_state == NULL) goto out; diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h new file mode 100644 index 000000000000..a1226e4ab5f8 --- /dev/null +++ b/drivers/iommu/arm-smmu-regs.h @@ -0,0 +1,220 @@ +/* + * IOMMU API for ARM architected SMMU implementations. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon + */ + +#ifndef _ARM_SMMU_REGS_H +#define _ARM_SMMU_REGS_H + +/* Configuration registers */ +#define ARM_SMMU_GR0_sCR0 0x0 +#define sCR0_CLIENTPD (1 << 0) +#define sCR0_GFRE (1 << 1) +#define sCR0_GFIE (1 << 2) +#define sCR0_EXIDENABLE (1 << 3) +#define sCR0_GCFGFRE (1 << 4) +#define sCR0_GCFGFIE (1 << 5) +#define sCR0_USFCFG (1 << 10) +#define sCR0_VMIDPNE (1 << 11) +#define sCR0_PTM (1 << 12) +#define sCR0_FB (1 << 13) +#define sCR0_VMID16EN (1 << 31) +#define sCR0_BSU_SHIFT 14 +#define sCR0_BSU_MASK 0x3 + +/* Auxiliary Configuration register */ +#define ARM_SMMU_GR0_sACR 0x10 + +/* Identification registers */ +#define ARM_SMMU_GR0_ID0 0x20 +#define ARM_SMMU_GR0_ID1 0x24 +#define ARM_SMMU_GR0_ID2 0x28 +#define ARM_SMMU_GR0_ID3 0x2c +#define ARM_SMMU_GR0_ID4 0x30 +#define ARM_SMMU_GR0_ID5 0x34 +#define ARM_SMMU_GR0_ID6 0x38 +#define ARM_SMMU_GR0_ID7 0x3c +#define ARM_SMMU_GR0_sGFSR 0x48 +#define ARM_SMMU_GR0_sGFSYNR0 0x50 +#define ARM_SMMU_GR0_sGFSYNR1 0x54 +#define ARM_SMMU_GR0_sGFSYNR2 0x58 + +#define ID0_S1TS (1 << 30) +#define ID0_S2TS (1 << 29) +#define ID0_NTS (1 << 28) +#define ID0_SMS (1 << 27) +#define ID0_ATOSNS (1 << 26) +#define ID0_PTFS_NO_AARCH32 (1 << 25) +#define ID0_PTFS_NO_AARCH32S (1 << 24) +#define ID0_CTTW (1 << 14) +#define ID0_NUMIRPT_SHIFT 16 +#define ID0_NUMIRPT_MASK 0xff +#define ID0_NUMSIDB_SHIFT 9 +#define ID0_NUMSIDB_MASK 0xf +#define ID0_EXIDS (1 << 8) +#define ID0_NUMSMRG_SHIFT 0 +#define ID0_NUMSMRG_MASK 0xff + +#define ID1_PAGESIZE (1 << 31) +#define ID1_NUMPAGENDXB_SHIFT 28 +#define ID1_NUMPAGENDXB_MASK 7 +#define ID1_NUMS2CB_SHIFT 16 +#define ID1_NUMS2CB_MASK 0xff +#define ID1_NUMCB_SHIFT 0 +#define ID1_NUMCB_MASK 0xff + +#define ID2_OAS_SHIFT 4 +#define ID2_OAS_MASK 0xf +#define ID2_IAS_SHIFT 0 +#define ID2_IAS_MASK 0xf +#define ID2_UBS_SHIFT 8 +#define ID2_UBS_MASK 0xf +#define ID2_PTFS_4K (1 << 12) +#define ID2_PTFS_16K (1 << 13) +#define ID2_PTFS_64K (1 << 14) +#define ID2_VMID16 (1 << 15) + +#define ID7_MAJOR_SHIFT 4 +#define ID7_MAJOR_MASK 0xf + +/* Global TLB invalidation */ +#define ARM_SMMU_GR0_TLBIVMID 0x64 +#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 +#define ARM_SMMU_GR0_TLBIALLH 0x6c +#define ARM_SMMU_GR0_sTLBGSYNC 0x70 +#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 +#define sTLBGSTATUS_GSACTIVE (1 << 0) + +/* Stream mapping registers */ +#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) +#define SMR_VALID (1 << 31) +#define SMR_MASK_SHIFT 16 +#define SMR_ID_SHIFT 0 + +#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) +#define S2CR_CBNDX_SHIFT 0 +#define S2CR_CBNDX_MASK 0xff +#define S2CR_EXIDVALID (1 << 10) +#define S2CR_TYPE_SHIFT 16 +#define S2CR_TYPE_MASK 0x3 +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS, + S2CR_TYPE_BYPASS, + S2CR_TYPE_FAULT, +}; + +#define S2CR_PRIVCFG_SHIFT 24 +#define S2CR_PRIVCFG_MASK 0x3 +enum arm_smmu_s2cr_privcfg { + S2CR_PRIVCFG_DEFAULT, + S2CR_PRIVCFG_DIPAN, + S2CR_PRIVCFG_UNPRIV, + S2CR_PRIVCFG_PRIV, +}; + +/* Context bank attribute registers */ +#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) +#define CBAR_VMID_SHIFT 0 +#define CBAR_VMID_MASK 0xff +#define CBAR_S1_BPSHCFG_SHIFT 8 +#define CBAR_S1_BPSHCFG_MASK 3 +#define CBAR_S1_BPSHCFG_NSH 3 +#define CBAR_S1_MEMATTR_SHIFT 12 +#define CBAR_S1_MEMATTR_MASK 0xf +#define CBAR_S1_MEMATTR_WB 0xf +#define CBAR_TYPE_SHIFT 16 +#define CBAR_TYPE_MASK 0x3 +#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) +#define CBAR_IRPTNDX_SHIFT 24 +#define CBAR_IRPTNDX_MASK 0xff + +#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) +#define CBA2R_RW64_32BIT (0 << 0) +#define CBA2R_RW64_64BIT (1 << 0) +#define CBA2R_VMID_SHIFT 16 +#define CBA2R_VMID_MASK 0xffff + +#define ARM_SMMU_CB_SCTLR 0x0 +#define ARM_SMMU_CB_ACTLR 0x4 +#define ARM_SMMU_CB_RESUME 0x8 +#define ARM_SMMU_CB_TTBCR2 0x10 +#define ARM_SMMU_CB_TTBR0 0x20 +#define ARM_SMMU_CB_TTBR1 0x28 +#define ARM_SMMU_CB_TTBCR 0x30 +#define ARM_SMMU_CB_CONTEXTIDR 0x34 +#define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_S1_MAIR1 0x3c +#define ARM_SMMU_CB_PAR 0x50 +#define ARM_SMMU_CB_FSR 0x58 +#define ARM_SMMU_CB_FAR 0x60 +#define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_CB_S1_TLBIVA 0x600 +#define ARM_SMMU_CB_S1_TLBIASID 0x610 +#define ARM_SMMU_CB_S1_TLBIVAL 0x620 +#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 +#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 +#define ARM_SMMU_CB_TLBSYNC 0x7f0 +#define ARM_SMMU_CB_TLBSTATUS 0x7f4 +#define ARM_SMMU_CB_ATS1PR 0x800 +#define ARM_SMMU_CB_ATSR 0x8f0 + +#define SCTLR_S1_ASIDPNE (1 << 12) +#define SCTLR_CFCFG (1 << 7) +#define SCTLR_CFIE (1 << 6) +#define SCTLR_CFRE (1 << 5) +#define SCTLR_E (1 << 4) +#define SCTLR_AFE (1 << 2) +#define SCTLR_TRE (1 << 1) +#define SCTLR_M (1 << 0) + +#define CB_PAR_F (1 << 0) + +#define ATSR_ACTIVE (1 << 0) + +#define RESUME_RETRY (0 << 0) +#define RESUME_TERMINATE (1 << 0) + +#define TTBCR2_SEP_SHIFT 15 +#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) +#define TTBCR2_AS (1 << 4) + +#define TTBRn_ASID_SHIFT 48 + +#define FSR_MULTI (1 << 31) +#define FSR_SS (1 << 30) +#define FSR_UUT (1 << 8) +#define FSR_ASF (1 << 7) +#define FSR_TLBLKF (1 << 6) +#define FSR_TLBMCF (1 << 5) +#define FSR_EF (1 << 4) +#define FSR_PF (1 << 3) +#define FSR_AFF (1 << 2) +#define FSR_TF (1 << 1) + +#define FSR_IGN (FSR_AFF | FSR_ASF | \ + FSR_TLBMCF | FSR_TLBLKF) +#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ + FSR_EF | FSR_PF | FSR_TF | FSR_IGN) + +#define FSYNR0_WNR (1 << 4) + +#endif /* _ARM_SMMU_REGS_H */ diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 568c400eeaed..e67ba6c40faf 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2852,9 +2852,15 @@ static int arm_smmu_device_remove(struct platform_device *pdev) struct arm_smmu_device *smmu = platform_get_drvdata(pdev); arm_smmu_device_disable(smmu); + return 0; } +static void arm_smmu_device_shutdown(struct platform_device *pdev) +{ + arm_smmu_device_remove(pdev); +} + static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,smmu-v3", }, { }, @@ -2868,6 +2874,7 @@ static struct platform_driver arm_smmu_driver = { }, .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, + .shutdown = arm_smmu_device_shutdown, }; module_platform_driver(arm_smmu_driver); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2d80fa8a0634..3bdb799d3b4b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -54,6 +54,15 @@ #include #include "io-pgtable.h" +#include "arm-smmu-regs.h" + +#define ARM_MMU500_ACTLR_CPRE (1 << 1) + +#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) + +#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ +#define TLB_SPIN_COUNT 10 /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 @@ -83,211 +92,9 @@ #define smmu_write_atomic_lq writel_relaxed #endif -/* Configuration registers */ -#define ARM_SMMU_GR0_sCR0 0x0 -#define sCR0_CLIENTPD (1 << 0) -#define sCR0_GFRE (1 << 1) -#define sCR0_GFIE (1 << 2) -#define sCR0_EXIDENABLE (1 << 3) -#define sCR0_GCFGFRE (1 << 4) -#define sCR0_GCFGFIE (1 << 5) -#define sCR0_USFCFG (1 << 10) -#define sCR0_VMIDPNE (1 << 11) -#define sCR0_PTM (1 << 12) -#define sCR0_FB (1 << 13) -#define sCR0_VMID16EN (1 << 31) -#define sCR0_BSU_SHIFT 14 -#define sCR0_BSU_MASK 0x3 - -/* Auxiliary Configuration register */ -#define ARM_SMMU_GR0_sACR 0x10 - -/* Identification registers */ -#define ARM_SMMU_GR0_ID0 0x20 -#define ARM_SMMU_GR0_ID1 0x24 -#define ARM_SMMU_GR0_ID2 0x28 -#define ARM_SMMU_GR0_ID3 0x2c -#define ARM_SMMU_GR0_ID4 0x30 -#define ARM_SMMU_GR0_ID5 0x34 -#define ARM_SMMU_GR0_ID6 0x38 -#define ARM_SMMU_GR0_ID7 0x3c -#define ARM_SMMU_GR0_sGFSR 0x48 -#define ARM_SMMU_GR0_sGFSYNR0 0x50 -#define ARM_SMMU_GR0_sGFSYNR1 0x54 -#define ARM_SMMU_GR0_sGFSYNR2 0x58 - -#define ID0_S1TS (1 << 30) -#define ID0_S2TS (1 << 29) -#define ID0_NTS (1 << 28) -#define ID0_SMS (1 << 27) -#define ID0_ATOSNS (1 << 26) -#define ID0_PTFS_NO_AARCH32 (1 << 25) -#define ID0_PTFS_NO_AARCH32S (1 << 24) -#define ID0_CTTW (1 << 14) -#define ID0_NUMIRPT_SHIFT 16 -#define ID0_NUMIRPT_MASK 0xff -#define ID0_NUMSIDB_SHIFT 9 -#define ID0_NUMSIDB_MASK 0xf -#define ID0_EXIDS (1 << 8) -#define ID0_NUMSMRG_SHIFT 0 -#define ID0_NUMSMRG_MASK 0xff - -#define ID1_PAGESIZE (1 << 31) -#define ID1_NUMPAGENDXB_SHIFT 28 -#define ID1_NUMPAGENDXB_MASK 7 -#define ID1_NUMS2CB_SHIFT 16 -#define ID1_NUMS2CB_MASK 0xff -#define ID1_NUMCB_SHIFT 0 -#define ID1_NUMCB_MASK 0xff - -#define ID2_OAS_SHIFT 4 -#define ID2_OAS_MASK 0xf -#define ID2_IAS_SHIFT 0 -#define ID2_IAS_MASK 0xf -#define ID2_UBS_SHIFT 8 -#define ID2_UBS_MASK 0xf -#define ID2_PTFS_4K (1 << 12) -#define ID2_PTFS_16K (1 << 13) -#define ID2_PTFS_64K (1 << 14) -#define ID2_VMID16 (1 << 15) - -#define ID7_MAJOR_SHIFT 4 -#define ID7_MAJOR_MASK 0xf - -/* Global TLB invalidation */ -#define ARM_SMMU_GR0_TLBIVMID 0x64 -#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 -#define ARM_SMMU_GR0_TLBIALLH 0x6c -#define ARM_SMMU_GR0_sTLBGSYNC 0x70 -#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 -#define sTLBGSTATUS_GSACTIVE (1 << 0) -#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ -#define TLB_SPIN_COUNT 10 - -/* Stream mapping registers */ -#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) -#define SMR_VALID (1 << 31) -#define SMR_MASK_SHIFT 16 -#define SMR_ID_SHIFT 0 - -#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) -#define S2CR_CBNDX_SHIFT 0 -#define S2CR_CBNDX_MASK 0xff -#define S2CR_EXIDVALID (1 << 10) -#define S2CR_TYPE_SHIFT 16 -#define S2CR_TYPE_MASK 0x3 -enum arm_smmu_s2cr_type { - S2CR_TYPE_TRANS, - S2CR_TYPE_BYPASS, - S2CR_TYPE_FAULT, -}; - -#define S2CR_PRIVCFG_SHIFT 24 -#define S2CR_PRIVCFG_MASK 0x3 -enum arm_smmu_s2cr_privcfg { - S2CR_PRIVCFG_DEFAULT, - S2CR_PRIVCFG_DIPAN, - S2CR_PRIVCFG_UNPRIV, - S2CR_PRIVCFG_PRIV, -}; - -/* Context bank attribute registers */ -#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) -#define CBAR_VMID_SHIFT 0 -#define CBAR_VMID_MASK 0xff -#define CBAR_S1_BPSHCFG_SHIFT 8 -#define CBAR_S1_BPSHCFG_MASK 3 -#define CBAR_S1_BPSHCFG_NSH 3 -#define CBAR_S1_MEMATTR_SHIFT 12 -#define CBAR_S1_MEMATTR_MASK 0xf -#define CBAR_S1_MEMATTR_WB 0xf -#define CBAR_TYPE_SHIFT 16 -#define CBAR_TYPE_MASK 0x3 -#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) -#define CBAR_IRPTNDX_SHIFT 24 -#define CBAR_IRPTNDX_MASK 0xff - -#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) -#define CBA2R_RW64_32BIT (0 << 0) -#define CBA2R_RW64_64BIT (1 << 0) -#define CBA2R_VMID_SHIFT 16 -#define CBA2R_VMID_MASK 0xffff - /* Translation context bank */ #define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) -#define ARM_SMMU_CB_SCTLR 0x0 -#define ARM_SMMU_CB_ACTLR 0x4 -#define ARM_SMMU_CB_RESUME 0x8 -#define ARM_SMMU_CB_TTBCR2 0x10 -#define ARM_SMMU_CB_TTBR0 0x20 -#define ARM_SMMU_CB_TTBR1 0x28 -#define ARM_SMMU_CB_TTBCR 0x30 -#define ARM_SMMU_CB_CONTEXTIDR 0x34 -#define ARM_SMMU_CB_S1_MAIR0 0x38 -#define ARM_SMMU_CB_S1_MAIR1 0x3c -#define ARM_SMMU_CB_PAR 0x50 -#define ARM_SMMU_CB_FSR 0x58 -#define ARM_SMMU_CB_FAR 0x60 -#define ARM_SMMU_CB_FSYNR0 0x68 -#define ARM_SMMU_CB_S1_TLBIVA 0x600 -#define ARM_SMMU_CB_S1_TLBIASID 0x610 -#define ARM_SMMU_CB_S1_TLBIVAL 0x620 -#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 -#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 -#define ARM_SMMU_CB_TLBSYNC 0x7f0 -#define ARM_SMMU_CB_TLBSTATUS 0x7f4 -#define ARM_SMMU_CB_ATS1PR 0x800 -#define ARM_SMMU_CB_ATSR 0x8f0 - -#define SCTLR_S1_ASIDPNE (1 << 12) -#define SCTLR_CFCFG (1 << 7) -#define SCTLR_CFIE (1 << 6) -#define SCTLR_CFRE (1 << 5) -#define SCTLR_E (1 << 4) -#define SCTLR_AFE (1 << 2) -#define SCTLR_TRE (1 << 1) -#define SCTLR_M (1 << 0) - -#define ARM_MMU500_ACTLR_CPRE (1 << 1) - -#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) -#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) - -#define CB_PAR_F (1 << 0) - -#define ATSR_ACTIVE (1 << 0) - -#define RESUME_RETRY (0 << 0) -#define RESUME_TERMINATE (1 << 0) - -#define TTBCR2_SEP_SHIFT 15 -#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) -#define TTBCR2_AS (1 << 4) - -#define TTBRn_ASID_SHIFT 48 - -#define FSR_MULTI (1 << 31) -#define FSR_SS (1 << 30) -#define FSR_UUT (1 << 8) -#define FSR_ASF (1 << 7) -#define FSR_TLBLKF (1 << 6) -#define FSR_TLBMCF (1 << 5) -#define FSR_EF (1 << 4) -#define FSR_PF (1 << 3) -#define FSR_AFF (1 << 2) -#define FSR_TF (1 << 1) - -#define FSR_IGN (FSR_AFF | FSR_ASF | \ - FSR_TLBMCF | FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ - FSR_EF | FSR_PF | FSR_TF | FSR_IGN) - -#define FSYNR0_WNR (1 << 4) - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -338,6 +145,13 @@ struct arm_smmu_smr { bool valid; }; +struct arm_smmu_cb { + u64 ttbr[2]; + u32 tcr[2]; + u32 mair[2]; + struct arm_smmu_cfg *cfg; +}; + struct arm_smmu_master_cfg { struct arm_smmu_device *smmu; s16 smendx[]; @@ -380,6 +194,7 @@ struct arm_smmu_device { u32 num_context_banks; u32 num_s2_context_banks; DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); + struct arm_smmu_cb *cbs; atomic_t irptndx; u32 num_mapping_groups; @@ -776,17 +591,74 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { - u32 reg, reg2; - u64 reg64; - bool stage1; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; + bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + + cb->cfg = cfg; + + /* TTBCR */ + if (stage1) { + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; + } else { + cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr; + cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; + cb->tcr[1] |= TTBCR2_SEP_UPSTREAM; + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) + cb->tcr[1] |= TTBCR2_AS; + } + } else { + cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; + } + + /* TTBRs */ + if (stage1) { + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; + cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; + } else { + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; + cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; + cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + } + } else { + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; + } + + /* MAIRs (stage-1 only) */ + if (stage1) { + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; + cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; + } else { + cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; + } + } +} + +static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) +{ + u32 reg; + bool stage1; + struct arm_smmu_cb *cb = &smmu->cbs[idx]; + struct arm_smmu_cfg *cfg = cb->cfg; void __iomem *cb_base, *gr1_base; + cb_base = ARM_SMMU_CB(smmu, idx); + + /* Unassigned context banks only need disabling */ + if (!cfg) { + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + return; + } + gr1_base = ARM_SMMU_GR1(smmu); stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); + /* CBA2R */ if (smmu->version > ARM_SMMU_V1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) reg = CBA2R_RW64_64BIT; @@ -796,7 +668,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= cfg->vmid << CBA2R_VMID_SHIFT; - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx)); } /* CBAR */ @@ -815,72 +687,41 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, /* 8-bit VMIDs live in CBAR */ reg |= cfg->vmid << CBAR_VMID_SHIFT; } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx)); /* * TTBCR * We must write this before the TTBRs, since it determines the * access behaviour of some fields (in particular, ASID[15:8]). */ - if (stage1) { - if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - reg = pgtbl_cfg->arm_v7s_cfg.tcr; - reg2 = 0; - } else { - reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; - reg2 |= TTBCR2_SEP_UPSTREAM; - if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - reg2 |= TTBCR2_AS; - } - if (smmu->version > ARM_SMMU_V1) - writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); - } else { - reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; - } - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + if (stage1 && smmu->version > ARM_SMMU_V1) + writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2); + writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR); /* TTBRs */ - if (stage1) { - if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); - reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); - writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); - } else { - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; - reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); - } + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); + writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); + writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); } else { - reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); + writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); + if (stage1) + writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); } /* MAIRs (stage-1 only) */ if (stage1) { - if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - reg = pgtbl_cfg->arm_v7s_cfg.prrr; - reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr; - } else { - reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; - } - writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); - writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1); + writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0); + writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1); } /* SCTLR */ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M; if (stage1) reg |= SCTLR_S1_ASIDPNE; -#ifdef __BIG_ENDIAN - reg |= SCTLR_E; -#endif + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + reg |= SCTLR_E; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); } @@ -1043,6 +884,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, /* Initialise the context bank with our page table cfg */ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); + arm_smmu_write_context_bank(smmu, cfg->cbndx); /* * Request context fault interrupt. Do this last to avoid the @@ -1075,7 +917,6 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *cb_base; int irq; if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) @@ -1085,8 +926,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) * Disable the context bank and free the page tables before freeing * it. */ - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + smmu->cbs[cfg->cbndx].cfg = NULL; + arm_smmu_write_context_bank(smmu, cfg->cbndx); if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; @@ -1736,7 +1577,6 @@ static struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - void __iomem *cb_base; int i; u32 reg, major; @@ -1772,8 +1612,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { - cb_base = ARM_SMMU_CB(smmu, i); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + void __iomem *cb_base = ARM_SMMU_CB(smmu, i); + + arm_smmu_write_context_bank(smmu, i); writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); /* * Disable MMU-500's not-particularly-beneficial next-page @@ -1979,6 +1820,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->cavium_id_base -= smmu->num_context_banks; dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); } + smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, + sizeof(*smmu->cbs), GFP_KERNEL); + if (!smmu->cbs) + return -ENOMEM; /* ID2 */ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); @@ -2336,13 +2181,30 @@ static int arm_smmu_device_remove(struct platform_device *pdev) return 0; } +static void arm_smmu_device_shutdown(struct platform_device *pdev) +{ + arm_smmu_device_remove(pdev); +} + +static int __maybe_unused arm_smmu_pm_resume(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + + arm_smmu_device_reset(smmu); + return 0; +} + +static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume); + static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu", .of_match_table = of_match_ptr(arm_smmu_of_match), + .pm = &arm_smmu_pm_ops, }, .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, + .shutdown = arm_smmu_device_shutdown, }; module_platform_driver(arm_smmu_driver); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index c8b0329c85d2..57c920c1372d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) #define dmar_parse_one_rhsa dmar_res_noop #endif -static void __init +static void dmar_table_print_dmar_entry(struct acpi_dmar_header *header) { struct acpi_dmar_hardware_unit *drhd; @@ -1343,7 +1343,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, if (mask) { BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); - addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; + addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else desc.high = QI_DEV_IOTLB_ADDR(addr); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 2395478dde75..f596fcc32898 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -54,10 +54,6 @@ typedef u32 sysmmu_pte_t; #define lv2ent_small(pent) ((*(pent) & 2) == 2) #define lv2ent_large(pent) ((*(pent) & 3) == 1) -#ifdef CONFIG_BIG_ENDIAN -#warning "revisit driver if we can enable big-endian ptes" -#endif - /* * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces * v5.0 introduced support for 36bit physical address space by shifting @@ -569,7 +565,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, spin_unlock_irqrestore(&data->lock, flags); } -static struct iommu_ops exynos_iommu_ops; +static const struct iommu_ops exynos_iommu_ops; static int __init exynos_sysmmu_probe(struct platform_device *pdev) { @@ -659,6 +655,13 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) } } + /* + * use the first registered sysmmu device for performing + * dma mapping operations on iommu page tables (cpu cache flush) + */ + if (!dma_dev) + dma_dev = &pdev->dev; + pm_runtime_enable(dev); return 0; @@ -1323,7 +1326,7 @@ static int exynos_iommu_of_xlate(struct device *dev, return 0; } -static struct iommu_ops exynos_iommu_ops = { +static const struct iommu_ops exynos_iommu_ops = { .domain_alloc = exynos_iommu_domain_alloc, .domain_free = exynos_iommu_domain_free, .attach_dev = exynos_iommu_attach_device, @@ -1339,8 +1342,6 @@ static struct iommu_ops exynos_iommu_ops = { .of_xlate = exynos_iommu_of_xlate, }; -static bool init_done; - static int __init exynos_iommu_init(void) { int ret; @@ -1373,8 +1374,6 @@ static int __init exynos_iommu_init(void) goto err_set_iommu; } - init_done = true; - return 0; err_set_iommu: kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); @@ -1384,27 +1383,6 @@ static int __init exynos_iommu_init(void) kmem_cache_destroy(lv2table_kmem_cache); return ret; } +core_initcall(exynos_iommu_init); -static int __init exynos_iommu_of_setup(struct device_node *np) -{ - struct platform_device *pdev; - - if (!init_done) - exynos_iommu_init(); - - pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); - if (!pdev) - return -ENODEV; - - /* - * use the first registered sysmmu device for performing - * dma mapping operations on iommu page tables (cpu cache flush) - */ - if (!dma_dev) - dma_dev = &pdev->dev; - - return 0; -} - -IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", - exynos_iommu_of_setup); +IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL); diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index a34355fca37a..8540625796a1 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -42,6 +42,8 @@ struct pamu_isr_data { static struct paace *ppaact; static struct paace *spaact; +static bool probed; /* Has PAMU been probed? */ + /* * Table for matching compatible strings, for device tree * guts node, for QorIQ SOCs. @@ -530,8 +532,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) if (node) { prop = of_get_property(node, "cache-stash-id", NULL); if (!prop) { - pr_debug("missing cache-stash-id at %s\n", - node->full_name); + pr_debug("missing cache-stash-id at %pOF\n", + node); of_node_put(node); return ~(u32)0; } @@ -557,8 +559,8 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) if (stash_dest_hint == cache_level) { prop = of_get_property(node, "cache-stash-id", NULL); if (!prop) { - pr_debug("missing cache-stash-id at %s\n", - node->full_name); + pr_debug("missing cache-stash-id at %pOF\n", + node); of_node_put(node); return ~(u32)0; } @@ -568,8 +570,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) prop = of_get_property(node, "next-level-cache", NULL); if (!prop) { - pr_debug("can't find next-level-cache at %s\n", - node->full_name); + pr_debug("can't find next-level-cache at %pOF\n", node); of_node_put(node); return ~(u32)0; /* can't traverse any further */ } @@ -1033,6 +1034,9 @@ static int fsl_pamu_probe(struct platform_device *pdev) * NOTE : All PAMUs share the same LIODN tables. */ + if (WARN_ON(probed)) + return -EBUSY; + pamu_regs = of_iomap(dev->of_node, 0); if (!pamu_regs) { dev_err(dev, "ioremap of PAMU node failed\n"); @@ -1063,8 +1067,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) guts_node = of_find_matching_node(NULL, guts_device_ids); if (!guts_node) { - dev_err(dev, "could not find GUTS node %s\n", - dev->of_node->full_name); + dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node); ret = -ENODEV; goto error; } @@ -1172,6 +1175,8 @@ static int fsl_pamu_probe(struct platform_device *pdev) setup_liodns(); + probed = true; + return 0; error_genpool: @@ -1246,8 +1251,7 @@ static __init int fsl_pamu_init(void) pdev = platform_device_alloc("fsl-of-pamu", 0); if (!pdev) { - pr_err("could not allocate device %s\n", - np->full_name); + pr_err("could not allocate device %pOF\n", np); ret = -ENOMEM; goto error_device_alloc; } @@ -1259,8 +1263,7 @@ static __init int fsl_pamu_init(void) ret = platform_device_add(pdev); if (ret) { - pr_err("could not add device %s (err=%i)\n", - np->full_name, ret); + pr_err("could not add device %pOF (err=%i)\n", np, ret); goto error_device_add; } diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index da0e1e30ef37..f089136e9c3f 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -33,6 +33,8 @@ static struct kmem_cache *fsl_pamu_domain_cache; static struct kmem_cache *iommu_devinfo_cache; static DEFINE_SPINLOCK(device_domain_lock); +struct iommu_device pamu_iommu; /* IOMMU core code handle */ + static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) { return container_of(dom, struct fsl_dma_domain, iommu_domain); @@ -619,8 +621,8 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, for (i = 0; i < num; i++) { /* Ensure that LIODN value is valid */ if (liodn[i] >= PAACE_NUMBER_ENTRIES) { - pr_debug("Invalid liodn %d, attach device failed for %s\n", - liodn[i], dev->of_node->full_name); + pr_debug("Invalid liodn %d, attach device failed for %pOF\n", + liodn[i], dev->of_node); ret = -EINVAL; break; } @@ -684,8 +686,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, liodn_cnt = len / sizeof(u32); ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); } else { - pr_debug("missing fsl,liodn property at %s\n", - dev->of_node->full_name); + pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); ret = -EINVAL; } @@ -720,8 +721,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, if (prop) detach_device(dev, dma_domain); else - pr_debug("missing fsl,liodn property at %s\n", - dev->of_node->full_name); + pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); } static int configure_domain_geometry(struct iommu_domain *domain, void *data) @@ -983,11 +983,14 @@ static int fsl_pamu_add_device(struct device *dev) iommu_group_put(group); + iommu_device_link(&pamu_iommu, dev); + return 0; } static void fsl_pamu_remove_device(struct device *dev) { + iommu_device_unlink(&pamu_iommu, dev); iommu_group_remove_device(dev); } @@ -1073,6 +1076,19 @@ int __init pamu_domain_init(void) if (ret) return ret; + ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); + if (ret) + return ret; + + iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops); + + ret = iommu_device_register(&pamu_iommu); + if (ret) { + iommu_device_sysfs_remove(&pamu_iommu); + pr_err("Can't register iommu device\n"); + return ret; + } + bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3e8636f1220e..6784a05dd6b2 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -458,31 +458,6 @@ static LIST_HEAD(dmar_rmrr_units); #define for_each_rmrr_units(rmrr) \ list_for_each_entry(rmrr, &dmar_rmrr_units, list) -static void flush_unmaps_timeout(unsigned long data); - -struct deferred_flush_entry { - unsigned long iova_pfn; - unsigned long nrpages; - struct dmar_domain *domain; - struct page *freelist; -}; - -#define HIGH_WATER_MARK 250 -struct deferred_flush_table { - int next; - struct deferred_flush_entry entries[HIGH_WATER_MARK]; -}; - -struct deferred_flush_data { - spinlock_t lock; - int timer_on; - struct timer_list timer; - long size; - struct deferred_flush_table *tables; -}; - -static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); - /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; @@ -901,6 +876,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf struct pci_dev *pf_pdev; pdev = to_pci_dev(dev); + +#ifdef CONFIG_X86 + /* VMD child devices currently cannot be handled individually */ + if (is_vmd(pdev->bus)) + return NULL; +#endif + /* VFs aren't listed in scope tables; we need to look up * the PF instead to find the IOMMU. */ pf_pdev = pci_physfn(pdev); @@ -974,20 +956,6 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) return ret; } -static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) -{ - struct context_entry *context; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - context = iommu_context_addr(iommu, bus, devfn, 0); - if (context) { - context_clear_entry(context); - __iommu_flush_cache(iommu, context, sizeof(*context)); - } - spin_unlock_irqrestore(&iommu->lock, flags); -} - static void free_context_table(struct intel_iommu *iommu) { int i; @@ -1137,8 +1105,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, } static void dma_pte_free_level(struct dmar_domain *domain, int level, - struct dma_pte *pte, unsigned long pfn, - unsigned long start_pfn, unsigned long last_pfn) + int retain_level, struct dma_pte *pte, + unsigned long pfn, unsigned long start_pfn, + unsigned long last_pfn) { pfn = max(start_pfn, pfn); pte = &pte[pfn_level_offset(pfn, level)]; @@ -1153,12 +1122,17 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); - if (level > 2) - dma_pte_free_level(domain, level - 1, level_pte, - level_pfn, start_pfn, last_pfn); + if (level > 2) { + dma_pte_free_level(domain, level - 1, retain_level, + level_pte, level_pfn, start_pfn, + last_pfn); + } - /* If range covers entire pagetable, free it */ - if (!(start_pfn > level_pfn || + /* + * Free the page table if we're below the level we want to + * retain and the range covers the entire table. + */ + if (level < retain_level && !(start_pfn > level_pfn || last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); @@ -1169,10 +1143,14 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, } while (!first_pte_in_page(++pte) && pfn <= last_pfn); } -/* clear last level (leaf) ptes and free page table pages. */ +/* + * clear last level (leaf) ptes and free page table pages below the + * level we wish to keep intact. + */ static void dma_pte_free_pagetable(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + int retain_level) { BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); @@ -1181,7 +1159,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, dma_pte_clear_range(domain, start_pfn, last_pfn); /* We don't need lock here; nobody else touches the iova range */ - dma_pte_free_level(domain, agaw_to_level(domain->agaw), + dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, domain->pgd, 0, start_pfn, last_pfn); /* free pgd */ @@ -1309,6 +1287,13 @@ static void dma_free_pagelist(struct page *freelist) } } +static void iova_entry_free(unsigned long data) +{ + struct page *freelist = (struct page *)data; + + dma_free_pagelist(freelist); +} + /* iommu handling */ static int iommu_alloc_root_entry(struct intel_iommu *iommu) { @@ -1622,6 +1607,25 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, addr, mask); } +static void iommu_flush_iova(struct iova_domain *iovad) +{ + struct dmar_domain *domain; + int idx; + + domain = container_of(iovad, struct dmar_domain, iovad); + + for_each_domain_iommu(idx, domain) { + struct intel_iommu *iommu = g_iommus[idx]; + u16 did = domain->iommu_did[iommu->seq_id]; + + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + + if (!cap_caching_mode(iommu->cap)) + iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), + 0, MAX_AGAW_PFN_WIDTH); + } +} + static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) { u32 pmen; @@ -1932,9 +1936,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, { int adjust_width, agaw; unsigned long sagaw; + int err; init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); + + err = init_iova_flush_queue(&domain->iovad, + iommu_flush_iova, iova_entry_free); + if (err) + return err; + domain_reserve_special_ranges(domain); /* calculate AGAW */ @@ -1986,14 +1997,6 @@ static void domain_exit(struct dmar_domain *domain) if (!domain) return; - /* Flush any lazy unmaps that may reference this domain */ - if (!intel_iommu_strict) { - int cpu; - - for_each_possible_cpu(cpu) - flush_unmaps_timeout(cpu); - } - /* Remove associated devices and clear attached or cached domains */ rcu_read_lock(); domain_remove_dev_info(domain); @@ -2277,8 +2280,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, /* * Ensure that old small page tables are * removed to make room for superpage(s). + * We're adding new large pages, so make sure + * we don't remove their parent tables. */ - dma_pte_free_pagetable(domain, iov_pfn, end_pfn); + dma_pte_free_pagetable(domain, iov_pfn, end_pfn, + largepage_lvl + 1); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; } @@ -2351,13 +2357,33 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) { + unsigned long flags; + struct context_entry *context; + u16 did_old; + if (!iommu) return; - clear_context_table(iommu, bus, devfn); - iommu->flush.flush_context(iommu, 0, 0, 0, - DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + spin_lock_irqsave(&iommu->lock, flags); + context = iommu_context_addr(iommu, bus, devfn, 0); + if (!context) { + spin_unlock_irqrestore(&iommu->lock, flags); + return; + } + did_old = context_domain_id(context); + context_clear_entry(context); + __iommu_flush_cache(iommu, context, sizeof(*context)); + spin_unlock_irqrestore(&iommu->lock, flags); + iommu->flush.flush_context(iommu, + did_old, + (((u16)bus) << 8) | devfn, + DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL); + iommu->flush.flush_iotlb(iommu, + did_old, + 0, + 0, + DMA_TLB_DSI_FLUSH); } static inline void unlink_domain_info(struct device_domain_info *info) @@ -3206,7 +3232,7 @@ static int __init init_dmars(void) bool copied_tables = false; struct device *dev; struct intel_iommu *iommu; - int i, ret, cpu; + int i, ret; /* * for each drhd @@ -3239,22 +3265,6 @@ static int __init init_dmars(void) goto error; } - for_each_possible_cpu(cpu) { - struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush, - cpu); - - dfd->tables = kzalloc(g_num_of_iommus * - sizeof(struct deferred_flush_table), - GFP_KERNEL); - if (!dfd->tables) { - ret = -ENOMEM; - goto free_g_iommus; - } - - spin_lock_init(&dfd->lock); - setup_timer(&dfd->timer, flush_unmaps_timeout, cpu); - } - for_each_active_iommu(iommu, drhd) { g_iommus[iommu->seq_id] = iommu; @@ -3437,10 +3447,9 @@ static int __init init_dmars(void) disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } -free_g_iommus: - for_each_possible_cpu(cpu) - kfree(per_cpu_ptr(&deferred_flush, cpu)->tables); + kfree(g_iommus); + error: return ret; } @@ -3645,110 +3654,6 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, dir, *dev->dma_mask); } -static void flush_unmaps(struct deferred_flush_data *flush_data) -{ - int i, j; - - flush_data->timer_on = 0; - - /* just flush them all */ - for (i = 0; i < g_num_of_iommus; i++) { - struct intel_iommu *iommu = g_iommus[i]; - struct deferred_flush_table *flush_table = - &flush_data->tables[i]; - if (!iommu) - continue; - - if (!flush_table->next) - continue; - - /* In caching mode, global flushes turn emulation expensive */ - if (!cap_caching_mode(iommu->cap)) - iommu->flush.flush_iotlb(iommu, 0, 0, 0, - DMA_TLB_GLOBAL_FLUSH); - for (j = 0; j < flush_table->next; j++) { - unsigned long mask; - struct deferred_flush_entry *entry = - &flush_table->entries[j]; - unsigned long iova_pfn = entry->iova_pfn; - unsigned long nrpages = entry->nrpages; - struct dmar_domain *domain = entry->domain; - struct page *freelist = entry->freelist; - - /* On real hardware multiple invalidations are expensive */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain, - mm_to_dma_pfn(iova_pfn), - nrpages, !freelist, 0); - else { - mask = ilog2(nrpages); - iommu_flush_dev_iotlb(domain, - (uint64_t)iova_pfn << PAGE_SHIFT, mask); - } - free_iova_fast(&domain->iovad, iova_pfn, nrpages); - if (freelist) - dma_free_pagelist(freelist); - } - flush_table->next = 0; - } - - flush_data->size = 0; -} - -static void flush_unmaps_timeout(unsigned long cpuid) -{ - struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid); - unsigned long flags; - - spin_lock_irqsave(&flush_data->lock, flags); - flush_unmaps(flush_data); - spin_unlock_irqrestore(&flush_data->lock, flags); -} - -static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, - unsigned long nrpages, struct page *freelist) -{ - unsigned long flags; - int entry_id, iommu_id; - struct intel_iommu *iommu; - struct deferred_flush_entry *entry; - struct deferred_flush_data *flush_data; - - flush_data = raw_cpu_ptr(&deferred_flush); - - /* Flush all CPUs' entries to avoid deferring too much. If - * this becomes a bottleneck, can just flush us, and rely on - * flush timer for the rest. - */ - if (flush_data->size == HIGH_WATER_MARK) { - int cpu; - - for_each_online_cpu(cpu) - flush_unmaps_timeout(cpu); - } - - spin_lock_irqsave(&flush_data->lock, flags); - - iommu = domain_get_iommu(dom); - iommu_id = iommu->seq_id; - - entry_id = flush_data->tables[iommu_id].next; - ++(flush_data->tables[iommu_id].next); - - entry = &flush_data->tables[iommu_id].entries[entry_id]; - entry->domain = dom; - entry->iova_pfn = iova_pfn; - entry->nrpages = nrpages; - entry->freelist = freelist; - - if (!flush_data->timer_on) { - mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10)); - flush_data->timer_on = 1; - } - flush_data->size++; - spin_unlock_irqrestore(&flush_data->lock, flags); -} - static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) { struct dmar_domain *domain; @@ -3784,7 +3689,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); dma_free_pagelist(freelist); } else { - add_unmap(domain, iova_pfn, nrpages, freelist); + queue_iova(&domain->iovad, iova_pfn, nrpages, + (unsigned long)freelist); /* * queue up the release of the unmap to save the 1/6th of the * cpu used up by the iotlb flush operation... @@ -3938,7 +3844,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); if (unlikely(ret)) { dma_pte_free_pagetable(domain, start_vpfn, - start_vpfn + size - 1); + start_vpfn + size - 1, + agaw_to_level(domain->agaw) + 1); free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); return 0; } @@ -4721,7 +4628,6 @@ static void free_all_cpu_cached_iovas(unsigned int cpu) static int intel_iommu_cpu_dead(unsigned int cpu) { free_all_cpu_cached_iovas(cpu); - flush_unmaps_timeout(cpu); return 0; } @@ -5343,7 +5249,8 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { - context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); + if (iommu->pasid_state_table) + context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | intel_iommu_get_pts(iommu); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f620dccec8ee..f6697e55c2d4 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -24,6 +24,7 @@ #include #include #include +#include static irqreturn_t prq_event_thread(int irq, void *d); @@ -546,6 +547,14 @@ static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) return (requested & ~vma->vm_flags) != 0; } +static bool is_canonical_address(u64 addr) +{ + int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); + long saddr = (long) addr; + + return (((saddr << shift) >> shift) == saddr); +} + static irqreturn_t prq_event_thread(int irq, void *d) { struct intel_iommu *iommu = d; @@ -603,6 +612,11 @@ static irqreturn_t prq_event_thread(int irq, void *d) /* If the mm is already defunct, don't handle faults. */ if (!mmget_not_zero(svm->mm)) goto bad_req; + + /* If address is not canonical, return invalid response */ + if (!is_canonical_address(address)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index d665d0dc16e8..6961fc393f0b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3f6ea160afed..3de5c0bcb5cc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -527,6 +527,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, } + iommu_flush_tlb_all(domain); + out: iommu_put_resv_regions(dev, &mappings); @@ -1005,11 +1007,10 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (group) return group; - group = ERR_PTR(-EINVAL); - - if (ops && ops->device_group) - group = ops->device_group(dev); + if (!ops) + return ERR_PTR(-EINVAL); + group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) return ERR_PTR(-EINVAL); @@ -1283,6 +1284,10 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { int ret; + if ((domain->ops->is_attach_deferred != NULL) && + domain->ops->is_attach_deferred(domain, dev)) + return 0; + if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; @@ -1298,12 +1303,8 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) int ret; group = iommu_group_get(dev); - /* FIXME: Remove this when groups a mandatory for iommu drivers */ - if (group == NULL) - return __iommu_attach_device(domain, dev); - /* - * We have a group - lock it to make sure the device-count doesn't + * Lock the group to make sure the device-count doesn't * change while we are attaching */ mutex_lock(&group->mutex); @@ -1324,6 +1325,10 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { + if ((domain->ops->is_attach_deferred != NULL) && + domain->ops->is_attach_deferred(domain, dev)) + return; + if (unlikely(domain->ops->detach_dev == NULL)) return; @@ -1336,9 +1341,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) struct iommu_group *group; group = iommu_group_get(dev); - /* FIXME: Remove this when groups a mandatory for iommu drivers */ - if (group == NULL) - return __iommu_detach_device(domain, dev); mutex_lock(&group->mutex); if (iommu_group_device_count(group) != 1) { @@ -1360,8 +1362,7 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) struct iommu_group *group; group = iommu_group_get(dev); - /* FIXME: Remove this when groups a mandatory for iommu drivers */ - if (group == NULL) + if (!group) return NULL; domain = group->domain; @@ -1556,13 +1557,16 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, } EXPORT_SYMBOL_GPL(iommu_map); -size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) +static size_t __iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size, + bool sync) { + const struct iommu_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; - unsigned int min_pagesz; unsigned long orig_iova = iova; + unsigned int min_pagesz; - if (unlikely(domain->ops->unmap == NULL || + if (unlikely(ops->unmap == NULL || domain->pgsize_bitmap == 0UL)) return -ENODEV; @@ -1592,10 +1596,13 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) while (unmapped < size) { size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); - unmapped_page = domain->ops->unmap(domain, iova, pgsize); + unmapped_page = ops->unmap(domain, iova, pgsize); if (!unmapped_page) break; + if (sync && ops->iotlb_range_add) + ops->iotlb_range_add(domain, iova, pgsize); + pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); @@ -1603,11 +1610,27 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) unmapped += unmapped_page; } + if (sync && ops->iotlb_sync) + ops->iotlb_sync(domain); + trace_unmap(orig_iova, size, unmapped); return unmapped; } + +size_t iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + return __iommu_unmap(domain, iova, size, true); +} EXPORT_SYMBOL_GPL(iommu_unmap); +size_t iommu_unmap_fast(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + return __iommu_unmap(domain, iova, size, false); +} +EXPORT_SYMBOL_GPL(iommu_unmap_fast); + size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 246f14c83944..33edfa794ae9 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -32,6 +32,8 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long limit_pfn); static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); +static void fq_destroy_all_entries(struct iova_domain *iovad); +static void fq_flush_timeout(unsigned long data); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -50,10 +52,61 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->granule = granule; iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit + 1; + iovad->flush_cb = NULL; + iovad->fq = NULL; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); +static void free_iova_flush_queue(struct iova_domain *iovad) +{ + if (!iovad->fq) + return; + + if (timer_pending(&iovad->fq_timer)) + del_timer(&iovad->fq_timer); + + fq_destroy_all_entries(iovad); + + free_percpu(iovad->fq); + + iovad->fq = NULL; + iovad->flush_cb = NULL; + iovad->entry_dtor = NULL; +} + +int init_iova_flush_queue(struct iova_domain *iovad, + iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) +{ + int cpu; + + atomic64_set(&iovad->fq_flush_start_cnt, 0); + atomic64_set(&iovad->fq_flush_finish_cnt, 0); + + iovad->fq = alloc_percpu(struct iova_fq); + if (!iovad->fq) + return -ENOMEM; + + iovad->flush_cb = flush_cb; + iovad->entry_dtor = entry_dtor; + + for_each_possible_cpu(cpu) { + struct iova_fq *fq; + + fq = per_cpu_ptr(iovad->fq, cpu); + fq->head = 0; + fq->tail = 0; + + spin_lock_init(&fq->lock); + } + + setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad); + atomic_set(&iovad->fq_timer_on, 0); + + return 0; +} +EXPORT_SYMBOL_GPL(init_iova_flush_queue); + static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) { @@ -423,6 +476,135 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); +#define fq_ring_for_each(i, fq) \ + for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) + +static inline bool fq_full(struct iova_fq *fq) +{ + assert_spin_locked(&fq->lock); + return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); +} + +static inline unsigned fq_ring_add(struct iova_fq *fq) +{ + unsigned idx = fq->tail; + + assert_spin_locked(&fq->lock); + + fq->tail = (idx + 1) % IOVA_FQ_SIZE; + + return idx; +} + +static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) +{ + u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); + unsigned idx; + + assert_spin_locked(&fq->lock); + + fq_ring_for_each(idx, fq) { + + if (fq->entries[idx].counter >= counter) + break; + + if (iovad->entry_dtor) + iovad->entry_dtor(fq->entries[idx].data); + + free_iova_fast(iovad, + fq->entries[idx].iova_pfn, + fq->entries[idx].pages); + + fq->head = (fq->head + 1) % IOVA_FQ_SIZE; + } +} + +static void iova_domain_flush(struct iova_domain *iovad) +{ + atomic64_inc(&iovad->fq_flush_start_cnt); + iovad->flush_cb(iovad); + atomic64_inc(&iovad->fq_flush_finish_cnt); +} + +static void fq_destroy_all_entries(struct iova_domain *iovad) +{ + int cpu; + + /* + * This code runs when the iova_domain is being detroyed, so don't + * bother to free iovas, just call the entry_dtor on all remaining + * entries. + */ + if (!iovad->entry_dtor) + return; + + for_each_possible_cpu(cpu) { + struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); + int idx; + + fq_ring_for_each(idx, fq) + iovad->entry_dtor(fq->entries[idx].data); + } +} + +static void fq_flush_timeout(unsigned long data) +{ + struct iova_domain *iovad = (struct iova_domain *)data; + int cpu; + + atomic_set(&iovad->fq_timer_on, 0); + iova_domain_flush(iovad); + + for_each_possible_cpu(cpu) { + unsigned long flags; + struct iova_fq *fq; + + fq = per_cpu_ptr(iovad->fq, cpu); + spin_lock_irqsave(&fq->lock, flags); + fq_ring_free(iovad, fq); + spin_unlock_irqrestore(&fq->lock, flags); + } +} + +void queue_iova(struct iova_domain *iovad, + unsigned long pfn, unsigned long pages, + unsigned long data) +{ + struct iova_fq *fq = get_cpu_ptr(iovad->fq); + unsigned long flags; + unsigned idx; + + spin_lock_irqsave(&fq->lock, flags); + + /* + * First remove all entries from the flush queue that have already been + * flushed out on another CPU. This makes the fq_full() check below less + * likely to be true. + */ + fq_ring_free(iovad, fq); + + if (fq_full(fq)) { + iova_domain_flush(iovad); + fq_ring_free(iovad, fq); + } + + idx = fq_ring_add(fq); + + fq->entries[idx].iova_pfn = pfn; + fq->entries[idx].pages = pages; + fq->entries[idx].data = data; + fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); + + spin_unlock_irqrestore(&fq->lock, flags); + + if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) + mod_timer(&iovad->fq_timer, + jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); + + put_cpu_ptr(iovad->fq); +} +EXPORT_SYMBOL_GPL(queue_iova); + /** * put_iova_domain - destroys the iova doamin * @iovad: - iova domain in question. @@ -433,6 +615,7 @@ void put_iova_domain(struct iova_domain *iovad) struct rb_node *node; unsigned long flags; + free_iova_flush_queue(iovad); free_iova_rcaches(iovad); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = rb_first(&iovad->rbroot); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2a38aa15be17..195d6e93ac71 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -35,7 +36,7 @@ struct ipmmu_vmsa_device { struct device *dev; void __iomem *base; - struct list_head list; + struct iommu_device iommu; unsigned int num_utlbs; spinlock_t lock; /* Protects ctx and domains[] */ @@ -58,36 +59,18 @@ struct ipmmu_vmsa_domain { struct ipmmu_vmsa_iommu_priv { struct ipmmu_vmsa_device *mmu; - unsigned int *utlbs; - unsigned int num_utlbs; struct device *dev; struct list_head list; }; -static DEFINE_SPINLOCK(ipmmu_devices_lock); -static LIST_HEAD(ipmmu_devices); - static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) { return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } - static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) { -#if defined(CONFIG_ARM) - return dev->archdata.iommu; -#else - return dev->iommu_fwspec->iommu_priv; -#endif -} -static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) -{ -#if defined(CONFIG_ARM) - dev->archdata.iommu = p; -#else - dev->iommu_fwspec->iommu_priv = p; -#endif + return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL; } #define TLB_LOOP_TIMEOUT 100 /* 100us */ @@ -312,7 +295,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, /* The hardware doesn't support selective TLB flush. */ } -static struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_gather_ops ipmmu_gather_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, @@ -341,6 +324,19 @@ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, return ret; } +static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, + unsigned int context_id) +{ + unsigned long flags; + + spin_lock_irqsave(&mmu->lock, flags); + + clear_bit(context_id, mmu->ctx); + mmu->domains[context_id] = NULL; + + spin_unlock_irqrestore(&mmu->lock, flags); +} + static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { u64 ttbr; @@ -370,22 +366,22 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) */ domain->cfg.iommu_dev = domain->mmu->dev; - domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, - domain); - if (!domain->iop) - return -EINVAL; - /* * Find an unused context. */ ret = ipmmu_domain_allocate_context(domain->mmu, domain); - if (ret == IPMMU_CTX_MAX) { - free_io_pgtable_ops(domain->iop); + if (ret == IPMMU_CTX_MAX) return -EBUSY; - } domain->context_id = ret; + domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, + domain); + if (!domain->iop) { + ipmmu_domain_free_context(domain->mmu, domain->context_id); + return -EINVAL; + } + /* TTBR0 */ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; ipmmu_ctx_write(domain, IMTTLBR0, ttbr); @@ -426,19 +422,6 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) return 0; } -static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, - unsigned int context_id) -{ - unsigned long flags; - - spin_lock_irqsave(&mmu->lock, flags); - - clear_bit(context_id, mmu->ctx); - mmu->domains[context_id] = NULL; - - spin_unlock_irqrestore(&mmu->lock, flags); -} - static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { /* @@ -562,13 +545,14 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, struct device *dev) { struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_device *mmu = priv->mmu; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned long flags; unsigned int i; int ret = 0; - if (!mmu) { + if (!priv || !priv->mmu) { dev_err(dev, "Cannot attach to IPMMU\n"); return -ENXIO; } @@ -595,8 +579,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, if (ret < 0) return ret; - for (i = 0; i < priv->num_utlbs; ++i) - ipmmu_utlb_enable(domain, priv->utlbs[i]); + for (i = 0; i < fwspec->num_ids; ++i) + ipmmu_utlb_enable(domain, fwspec->ids[i]); return 0; } @@ -604,12 +588,12 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, static void ipmmu_detach_device(struct iommu_domain *io_domain, struct device *dev) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned int i; - for (i = 0; i < priv->num_utlbs; ++i) - ipmmu_utlb_disable(domain, priv->utlbs[i]); + for (i = 0; i < fwspec->num_ids; ++i) + ipmmu_utlb_disable(domain, fwspec->ids[i]); /* * TODO: Optimize by disabling the context when no device is attached. @@ -645,92 +629,36 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, return domain->iop->iova_to_phys(domain->iop, iova); } -static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, - unsigned int *utlbs, unsigned int num_utlbs) +static int ipmmu_init_platform_device(struct device *dev, + struct of_phandle_args *args) { - unsigned int i; + struct platform_device *ipmmu_pdev; + struct ipmmu_vmsa_iommu_priv *priv; - for (i = 0; i < num_utlbs; ++i) { - struct of_phandle_args args; - int ret; + ipmmu_pdev = of_find_device_by_node(args->np); + if (!ipmmu_pdev) + return -ENODEV; - ret = of_parse_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells", i, &args); - if (ret < 0) - return ret; - - of_node_put(args.np); - - if (args.np != mmu->dev->of_node || args.args_count != 1) - return -EINVAL; - - utlbs[i] = args.args[0]; - } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->mmu = platform_get_drvdata(ipmmu_pdev); + priv->dev = dev; + dev->iommu_fwspec->iommu_priv = priv; return 0; } -static int ipmmu_init_platform_device(struct device *dev) +static int ipmmu_of_xlate(struct device *dev, + struct of_phandle_args *spec) { - struct ipmmu_vmsa_iommu_priv *priv; - struct ipmmu_vmsa_device *mmu; - unsigned int *utlbs; - unsigned int i; - int num_utlbs; - int ret = -ENODEV; + iommu_fwspec_add_ids(dev, spec->args, 1); - /* Find the master corresponding to the device. */ + /* Initialize once - xlate() will call multiple times */ + if (to_priv(dev)) + return 0; - num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells"); - if (num_utlbs < 0) - return -ENODEV; - - utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); - if (!utlbs) - return -ENOMEM; - - spin_lock(&ipmmu_devices_lock); - - list_for_each_entry(mmu, &ipmmu_devices, list) { - ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); - if (!ret) { - /* - * TODO Take a reference to the MMU to protect - * against device removal. - */ - break; - } - } - - spin_unlock(&ipmmu_devices_lock); - - if (ret < 0) - goto error; - - for (i = 0; i < num_utlbs; ++i) { - if (utlbs[i] >= mmu->num_utlbs) { - ret = -EINVAL; - goto error; - } - } - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto error; - } - - priv->mmu = mmu; - priv->utlbs = utlbs; - priv->num_utlbs = num_utlbs; - priv->dev = dev; - set_priv(dev, priv); - return 0; - -error: - kfree(utlbs); - return ret; + return ipmmu_init_platform_device(dev, spec); } #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) @@ -749,11 +677,11 @@ static int ipmmu_add_device(struct device *dev) struct iommu_group *group; int ret; - if (to_priv(dev)) { - dev_warn(dev, "IOMMU driver already assigned to device %s\n", - dev_name(dev)); - return -EINVAL; - } + /* + * Only let through devices that have been verified in xlate() + */ + if (!to_priv(dev)) + return -ENODEV; /* Create a device group and add the device to it. */ group = iommu_group_alloc(); @@ -772,10 +700,6 @@ static int ipmmu_add_device(struct device *dev) goto error; } - ret = ipmmu_init_platform_device(dev); - if (ret < 0) - goto error; - /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. @@ -816,24 +740,13 @@ static int ipmmu_add_device(struct device *dev) if (!IS_ERR_OR_NULL(group)) iommu_group_remove_device(dev); - kfree(to_priv(dev)->utlbs); - kfree(to_priv(dev)); - set_priv(dev, NULL); - return ret; } static void ipmmu_remove_device(struct device *dev) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); - arm_iommu_detach_device(dev); iommu_group_remove_device(dev); - - kfree(priv->utlbs); - kfree(priv); - - set_priv(dev, NULL); } static const struct iommu_ops ipmmu_ops = { @@ -848,6 +761,7 @@ static const struct iommu_ops ipmmu_ops = { .add_device = ipmmu_add_device, .remove_device = ipmmu_remove_device, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + .of_xlate = ipmmu_of_xlate, }; #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ @@ -890,14 +804,12 @@ static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) static int ipmmu_add_device_dma(struct device *dev) { - struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct iommu_group *group; /* * Only let through devices that have been verified in xlate() - * We may get called with dev->iommu_fwspec set to NULL. */ - if (!fwspec || !fwspec->iommu_priv) + if (!to_priv(dev)) return -ENODEV; group = iommu_group_get_for_dev(dev); @@ -957,19 +869,6 @@ static struct iommu_group *ipmmu_find_group_dma(struct device *dev) return group; } -static int ipmmu_of_xlate_dma(struct device *dev, - struct of_phandle_args *spec) -{ - /* If the IPMMU device is disabled in DT then return error - * to make sure the of_iommu code does not install ops - * even though the iommu device is disabled - */ - if (!of_device_is_available(spec->np)) - return -ENODEV; - - return ipmmu_init_platform_device(dev); -} - static const struct iommu_ops ipmmu_ops = { .domain_alloc = ipmmu_domain_alloc_dma, .domain_free = ipmmu_domain_free_dma, @@ -983,7 +882,7 @@ static const struct iommu_ops ipmmu_ops = { .remove_device = ipmmu_remove_device_dma, .device_group = ipmmu_find_group_dma, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, - .of_xlate = ipmmu_of_xlate_dma, + .of_xlate = ipmmu_of_xlate, }; #endif /* CONFIG_IOMMU_DMA */ @@ -1054,16 +953,24 @@ static int ipmmu_probe(struct platform_device *pdev) ipmmu_device_reset(mmu); + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, + dev_name(&pdev->dev)); + if (ret) + return ret; + + iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); + iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode); + + ret = iommu_device_register(&mmu->iommu); + if (ret) + return ret; + /* * We can't create the ARM mapping here as it requires the bus to have * an IOMMU, which only happens when bus_set_iommu() is called in * ipmmu_init() after the probe function returns. */ - spin_lock(&ipmmu_devices_lock); - list_add(&mmu->list, &ipmmu_devices); - spin_unlock(&ipmmu_devices_lock); - platform_set_drvdata(pdev, mmu); return 0; @@ -1073,9 +980,8 @@ static int ipmmu_remove(struct platform_device *pdev) { struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); - spin_lock(&ipmmu_devices_lock); - list_del(&mmu->list); - spin_unlock(&ipmmu_devices_lock); + iommu_device_sysfs_remove(&mmu->iommu); + iommu_device_unregister(&mmu->iommu); #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) arm_iommu_release_mapping(mmu->mapping); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index d0448353d501..04f4d51ffacb 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -393,6 +393,7 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) static int msm_iommu_add_device(struct device *dev) { struct msm_iommu_dev *iommu; + struct iommu_group *group; unsigned long flags; int ret = 0; @@ -406,7 +407,16 @@ static int msm_iommu_add_device(struct device *dev) spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; + if (ret) + return ret; + + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + iommu_group_put(group); + + return 0; } static void msm_iommu_remove_device(struct device *dev) @@ -421,6 +431,8 @@ static void msm_iommu_remove_device(struct device *dev) iommu_device_unlink(&iommu->iommu, dev); spin_unlock_irqrestore(&msm_iommu_lock, flags); + + iommu_group_remove_device(dev); } static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -700,6 +712,7 @@ static struct iommu_ops msm_iommu_ops = { .iova_to_phys = msm_iommu_iova_to_phys, .add_device = msm_iommu_add_device, .remove_device = msm_iommu_remove_device, + .device_group = generic_device_group, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 91c6d367ab35..16d33ac19db0 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include "mtk_iommu.h" @@ -54,10 +53,16 @@ #define REG_MMU_CTRL_REG 0x110 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) -#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) +#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ + ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) +/* It's named by F_MMU_TF_PROT_SEL in mt2712. */ +#define F_MMU_TF_PROTECT_SEL(prot, data) \ + (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) #define REG_MMU_IVRP_PADDR 0x114 #define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31)) +#define REG_MMU_VLD_PA_RNG 0x118 +#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) #define REG_MMU_INT_CONTROL0 0x120 #define F_L2_MULIT_HIT_EN BIT(0) @@ -82,7 +87,6 @@ #define REG_MMU_FAULT_ST1 0x134 #define REG_MMU_FAULT_VA 0x13c -#define F_MMU_FAULT_VA_MSK 0xfffff000 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) @@ -93,6 +97,13 @@ #define MTK_PROTECT_PA_ALIGN 128 +/* + * Get the local arbiter ID and the portid within the larb arbiter + * from mtk_m4u_id which is defined by MTK_M4U_ID. + */ +#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) +#define MTK_M4U_TO_PORT(id) ((id) & 0x1f) + struct mtk_iommu_domain { spinlock_t pgtlock; /* lock for page table */ @@ -104,6 +115,27 @@ struct mtk_iommu_domain { static struct iommu_ops mtk_iommu_ops; +static LIST_HEAD(m4ulist); /* List all the M4U HWs */ + +#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) + +/* + * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain + * for the performance. + * + * Here always return the mtk_iommu_data of the first probed M4U where the + * iommu domain information is recorded. + */ +static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) +{ + struct mtk_iommu_data *data; + + for_each_m4u(data) + return data; + + return NULL; +} + static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) { return container_of(dom, struct mtk_iommu_domain, domain); @@ -113,9 +145,12 @@ static void mtk_iommu_tlb_flush_all(void *cookie) { struct mtk_iommu_data *data = cookie; - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); - writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); - wmb(); /* Make sure the tlb flush all done */ + for_each_m4u(data) { + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, + data->base + REG_MMU_INV_SEL); + writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); + wmb(); /* Make sure the tlb flush all done */ + } } static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, @@ -124,12 +159,17 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, { struct mtk_iommu_data *data = cookie; - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); + for_each_m4u(data) { + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, + data->base + REG_MMU_INV_SEL); - writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); - writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); - writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); - data->tlb_flush_active = true; + writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); + writel_relaxed(iova + size - 1, + data->base + REG_MMU_INVLD_END_A); + writel_relaxed(F_MMU_INV_RANGE, + data->base + REG_MMU_INVALIDATE); + data->tlb_flush_active = true; + } } static void mtk_iommu_tlb_sync(void *cookie) @@ -138,20 +178,22 @@ static void mtk_iommu_tlb_sync(void *cookie) int ret; u32 tmp; - /* Avoid timing out if there's nothing to wait for */ - if (!data->tlb_flush_active) - return; + for_each_m4u(data) { + /* Avoid timing out if there's nothing to wait for */ + if (!data->tlb_flush_active) + return; - ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, - tmp != 0, 10, 100000); - if (ret) { - dev_warn(data->dev, - "Partial TLB flush timed out, falling back to full flush\n"); - mtk_iommu_tlb_flush_all(cookie); + ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, + tmp, tmp != 0, 10, 100000); + if (ret) { + dev_warn(data->dev, + "Partial TLB flush timed out, falling back to full flush\n"); + mtk_iommu_tlb_flush_all(cookie); + } + /* Clear the CPE status */ + writel_relaxed(0, data->base + REG_MMU_CPE_DONE); + data->tlb_flush_active = false; } - /* Clear the CPE status */ - writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - data->tlb_flush_active = false; } static const struct iommu_gather_ops mtk_iommu_gather_ops = { @@ -173,7 +215,6 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; - fault_iova &= F_MMU_FAULT_VA_MSK; fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); regval = readl_relaxed(data->base + REG_MMU_INT_ID); fault_larb = F_MMU0_INT_ID_LARB_ID(regval); @@ -221,9 +262,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, } } -static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) +static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) { - struct mtk_iommu_domain *dom = data->m4u_dom; + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); spin_lock_init(&dom->pgtlock); @@ -249,9 +290,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) /* Update our support page sizes bitmap */ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; - - writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], - data->base + REG_MMU_PT_BASE_ADDR); return 0; } @@ -266,20 +304,30 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) if (!dom) return NULL; - if (iommu_get_dma_cookie(&dom->domain)) { - kfree(dom); - return NULL; - } + if (iommu_get_dma_cookie(&dom->domain)) + goto free_dom; + + if (mtk_iommu_domain_finalise(dom)) + goto put_dma_cookie; dom->domain.geometry.aperture_start = 0; dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); dom->domain.geometry.force_aperture = true; return &dom->domain; + +put_dma_cookie: + iommu_put_dma_cookie(&dom->domain); +free_dom: + kfree(dom); + return NULL; } static void mtk_iommu_domain_free(struct iommu_domain *domain) { + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + + free_io_pgtable_ops(dom->iop); iommu_put_dma_cookie(domain); kfree(to_mtk_domain(domain)); } @@ -289,22 +337,15 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, { struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; - int ret; if (!data) return -ENODEV; + /* Update the pgtable base address register of the M4U HW */ if (!data->m4u_dom) { data->m4u_dom = dom; - ret = mtk_iommu_domain_finalise(data); - if (ret) { - data->m4u_dom = NULL; - return ret; - } - } else if (data->m4u_dom != dom) { - /* All the client devices should be in the same m4u domain */ - dev_err(dev, "try to attach into the error iommu domain\n"); - return -EPERM; + writel(dom->cfg.arm_v7s_cfg.ttbr[0], + data->base + REG_MMU_PT_BASE_ADDR); } mtk_iommu_config(data, dev, true); @@ -330,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr, size, prot); + ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), + size, prot); spin_unlock_irqrestore(&dom->pgtlock, flags); return ret; @@ -354,6 +396,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); unsigned long flags; phys_addr_t pa; @@ -361,6 +404,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, pa = dom->iop->iova_to_phys(dom->iop, iova); spin_unlock_irqrestore(&dom->pgtlock, flags); + if (data->enable_4GB) + pa |= BIT_ULL(32); + return pa; } @@ -399,7 +445,7 @@ static void mtk_iommu_remove_device(struct device *dev) static struct iommu_group *mtk_iommu_device_group(struct device *dev) { - struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv; + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); if (!data) return ERR_PTR(-ENODEV); @@ -464,8 +510,9 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return ret; } - regval = F_MMU_PREFETCH_RT_REPLACE_MOD | - F_MMU_TF_PROTECT_SEL(2); + regval = F_MMU_TF_PROTECT_SEL(2, data); + if (data->m4u_plat == M4U_MT8173) + regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); regval = F_L2_MULIT_HIT_EN | @@ -487,9 +534,19 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), data->base + REG_MMU_IVRP_PADDR); - + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { + /* + * If 4GB mode is enabled, the validate PA range is from + * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. + */ + regval = F_MMU_VLD_PA_RNG(7, 4); + writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); + } writel_relaxed(0, data->base + REG_MMU_DCM_DIS); - writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); + + /* It's MISC control register whose default value is ok except mt8173.*/ + if (data->m4u_plat == M4U_MT8173) + writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, dev_name(data->dev), (void *)data)) { @@ -521,6 +578,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (!data) return -ENOMEM; data->dev = dev; + data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); /* Protect memory. HW will access here while translation fault.*/ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); @@ -529,7 +587,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); /* Whether the current dram is over 4GB */ - data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT)); + data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->base = devm_ioremap_resource(dev, res); @@ -554,6 +612,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) for (i = 0; i < larb_nr; i++) { struct device_node *larbnode; struct platform_device *plarbdev; + u32 id; larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); if (!larbnode) @@ -562,17 +621,14 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (!of_device_is_available(larbnode)) continue; + ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); + if (ret)/* The id is consecutive if there is no this property */ + id = i; + plarbdev = of_find_device_by_node(larbnode); - if (!plarbdev) { - plarbdev = of_platform_device_create( - larbnode, NULL, - platform_bus_type.dev_root); - if (!plarbdev) { - of_node_put(larbnode); - return -EPROBE_DEFER; - } - } - data->smi_imu.larb_imu[i].dev = &plarbdev->dev; + if (!plarbdev) + return -EPROBE_DEFER; + data->smi_imu.larb_imu[id].dev = &plarbdev->dev; component_match_add_release(dev, &match, release_of, compare_of, larbnode); @@ -596,6 +652,8 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + list_add_tail(&data->list, &m4ulist); + if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); @@ -612,7 +670,6 @@ static int mtk_iommu_remove(struct platform_device *pdev) if (iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, NULL); - free_io_pgtable_ops(data->m4u_dom->iop); clk_disable_unprepare(data->bclk); devm_free_irq(&pdev->dev, data->irq, data); component_master_del(&pdev->dev, &mtk_iommu_com_ops); @@ -631,6 +688,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); + clk_disable_unprepare(data->bclk); return 0; } @@ -639,9 +697,13 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_suspend_reg *reg = &data->reg; void __iomem *base = data->base; + int ret; - writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], - base + REG_MMU_PT_BASE_ADDR); + ret = clk_prepare_enable(data->bclk); + if (ret) { + dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); + return ret; + } writel_relaxed(reg->standard_axi_mode, base + REG_MMU_STANDARD_AXI_MODE); writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); @@ -650,15 +712,19 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), base + REG_MMU_IVRP_PADDR); + if (data->m4u_dom) + writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], + base + REG_MMU_PT_BASE_ADDR); return 0; } -const struct dev_pm_ops mtk_iommu_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) +static const struct dev_pm_ops mtk_iommu_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) }; static const struct of_device_id mtk_iommu_of_ids[] = { - { .compatible = "mediatek,mt8173-m4u", }, + { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, + { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, {} }; @@ -667,27 +733,20 @@ static struct platform_driver mtk_iommu_driver = { .remove = mtk_iommu_remove, .driver = { .name = "mtk-iommu", - .of_match_table = mtk_iommu_of_ids, + .of_match_table = of_match_ptr(mtk_iommu_of_ids), .pm = &mtk_iommu_pm_ops, } }; -static int mtk_iommu_init_fn(struct device_node *np) +static int __init mtk_iommu_init(void) { int ret; - struct platform_device *pdev; - - pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); - if (!pdev) - return -ENOMEM; ret = platform_driver_register(&mtk_iommu_driver); - if (ret) { - pr_err("%s: Failed to register driver\n", __func__); - return ret; - } + if (ret != 0) + pr_err("Failed to register MTK IOMMU driver\n"); - return 0; + return ret; } -IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn); +subsys_initcall(mtk_iommu_init) diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index c06cc91b5d9a..b4451a1c7c2f 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -34,6 +34,12 @@ struct mtk_iommu_suspend_reg { u32 int_main_control; }; +enum mtk_iommu_plat { + M4U_MT2701, + M4U_MT2712, + M4U_MT8173, +}; + struct mtk_iommu_domain; struct mtk_iommu_data { @@ -50,6 +56,9 @@ struct mtk_iommu_data { bool tlb_flush_active; struct iommu_device iommu; + enum mtk_iommu_plat m4u_plat; + + struct list_head list; }; static inline int compare_of(struct device *dev, void *data) diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 8cb60829a7a1..50947ebb6d17 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -25,6 +25,8 @@ #include #include +#define NO_IOMMU 1 + static const struct of_device_id __iommu_of_table_sentinel __used __section(__iommu_of_table_end); @@ -109,8 +111,8 @@ static bool of_iommu_driver_present(struct device_node *np) return of_match_node(&__iommu_of_table, np); } -static const struct iommu_ops -*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) +static int of_iommu_xlate(struct device *dev, + struct of_phandle_args *iommu_spec) { const struct iommu_ops *ops; struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; @@ -120,95 +122,50 @@ static const struct iommu_ops if ((ops && !ops->of_xlate) || !of_device_is_available(iommu_spec->np) || (!ops && !of_iommu_driver_present(iommu_spec->np))) - return NULL; + return NO_IOMMU; err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); if (err) - return ERR_PTR(err); + return err; /* * The otherwise-empty fwspec handily serves to indicate the specific * IOMMU device we're waiting for, which will be useful if we ever get * a proper probe-ordering dependency mechanism in future. */ if (!ops) - return ERR_PTR(-EPROBE_DEFER); + return -EPROBE_DEFER; - err = ops->of_xlate(dev, iommu_spec); - if (err) - return ERR_PTR(err); - - return ops; + return ops->of_xlate(dev, iommu_spec); } -static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) -{ - struct of_phandle_args *iommu_spec = data; +struct of_pci_iommu_alias_info { + struct device *dev; + struct device_node *np; +}; - iommu_spec->args[0] = alias; - return iommu_spec->np == pdev->bus->dev.of_node; -} - -static const struct iommu_ops -*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) +static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) { - const struct iommu_ops *ops; - struct of_phandle_args iommu_spec; + struct of_pci_iommu_alias_info *info = data; + struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; - /* - * Start by tracing the RID alias down the PCI topology as - * far as the host bridge whose OF node we have... - * (we're not even attempting to handle multi-alias devices yet) - */ - iommu_spec.args_count = 1; - iommu_spec.np = bridge_np; - pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec); - /* - * ...then find out what that becomes once it escapes the PCI - * bus into the system beyond, and which IOMMU it ends up at. - */ - iommu_spec.np = NULL; - err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", + err = of_pci_map_rid(info->np, alias, "iommu-map", "iommu-map-mask", &iommu_spec.np, iommu_spec.args); if (err) - return err == -ENODEV ? NULL : ERR_PTR(err); - - ops = of_iommu_xlate(&pdev->dev, &iommu_spec); + return err == -ENODEV ? NO_IOMMU : err; + err = of_iommu_xlate(info->dev, &iommu_spec); of_node_put(iommu_spec.np); - return ops; -} - -static const struct iommu_ops -*of_platform_iommu_init(struct device *dev, struct device_node *np) -{ - struct of_phandle_args iommu_spec; - const struct iommu_ops *ops = NULL; - int idx = 0; - - /* - * We don't currently walk up the tree looking for a parent IOMMU. - * See the `Notes:' section of - * Documentation/devicetree/bindings/iommu/iommu.txt - */ - while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", - idx, &iommu_spec)) { - ops = of_iommu_xlate(dev, &iommu_spec); - of_node_put(iommu_spec.np); - idx++; - if (IS_ERR_OR_NULL(ops)) - break; - } - - return ops; + return err; } const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { - const struct iommu_ops *ops; + const struct iommu_ops *ops = NULL; struct iommu_fwspec *fwspec = dev->iommu_fwspec; + int err = NO_IOMMU; if (!master_np) return NULL; @@ -221,25 +178,54 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, iommu_fwspec_free(dev); } - if (dev_is_pci(dev)) - ops = of_pci_iommu_init(to_pci_dev(dev), master_np); - else - ops = of_platform_iommu_init(dev, master_np); + /* + * We don't currently walk up the tree looking for a parent IOMMU. + * See the `Notes:' section of + * Documentation/devicetree/bindings/iommu/iommu.txt + */ + if (dev_is_pci(dev)) { + struct of_pci_iommu_alias_info info = { + .dev = dev, + .np = master_np, + }; + + err = pci_for_each_dma_alias(to_pci_dev(dev), + of_pci_iommu_init, &info); + } else { + struct of_phandle_args iommu_spec; + int idx = 0; + + while (!of_parse_phandle_with_args(master_np, "iommus", + "#iommu-cells", + idx, &iommu_spec)) { + err = of_iommu_xlate(dev, &iommu_spec); + of_node_put(iommu_spec.np); + idx++; + if (err) + break; + } + } + + /* + * Two success conditions can be represented by non-negative err here: + * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons + * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately + * <0 : any actual error + */ + if (!err) + ops = dev->iommu_fwspec->ops; /* * If we have reason to believe the IOMMU driver missed the initial * add_device callback for dev, replay it to get things in order. */ - if (!IS_ERR_OR_NULL(ops) && ops->add_device && - dev->bus && !dev->iommu_group) { - int err = ops->add_device(dev); - - if (err) - ops = ERR_PTR(err); - } + if (ops && ops->add_device && dev->bus && !dev->iommu_group) + err = ops->add_device(dev); /* Ignore all other errors apart from EPROBE_DEFER */ - if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { - dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); + if (err == -EPROBE_DEFER) { + ops = ERR_PTR(err); + } else if (err < 0) { + dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); ops = NULL; } @@ -255,8 +241,7 @@ static int __init of_iommu_init(void) const of_iommu_init_fn init_fn = match->data; if (init_fn && init_fn(np)) - pr_err("Failed to initialise IOMMU %s\n", - of_node_full_name(np)); + pr_err("Failed to initialise IOMMU %pOF\n", np); } return 0; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 641e035cf866..bd67e1b2c64e 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -29,8 +30,6 @@ #include #include -#include - #include #include "omap-iopgtable.h" @@ -454,36 +453,35 @@ static void flush_iotlb_all(struct omap_iommu *obj) /* * H/W pagetable operations */ -static void flush_iopgd_range(u32 *first, u32 *last) +static void flush_iopte_range(struct device *dev, dma_addr_t dma, + unsigned long offset, int num_entries) { - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); + size_t size = num_entries * sizeof(u32); + + dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE); } -static void flush_iopte_range(u32 *first, u32 *last) +static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) { - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); -} + dma_addr_t pt_dma; -static void iopte_free(u32 *iopte) -{ /* Note: freed iopte's must be clean ready for re-use */ - if (iopte) + if (iopte) { + if (dma_valid) { + pt_dma = virt_to_phys(iopte); + dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, + DMA_TO_DEVICE); + } + kmem_cache_free(iopte_cachep, iopte); + } } -static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) +static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, + dma_addr_t *pt_dma, u32 da) { u32 *iopte; + unsigned long offset = iopgd_index(da) * sizeof(da); /* a table has already existed */ if (*iopgd) @@ -500,18 +498,38 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) if (!iopte) return ERR_PTR(-ENOMEM); - *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; - flush_iopgd_range(iopgd, iopgd); + *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(obj->dev, *pt_dma)) { + dev_err(obj->dev, "DMA map error for L2 table\n"); + iopte_free(obj, iopte, false); + return ERR_PTR(-ENOMEM); + } + /* + * we rely on dma address and the physical address to be + * the same for mapping the L2 table + */ + if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { + dev_err(obj->dev, "DMA translation error for L2 table\n"); + dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, + DMA_TO_DEVICE); + iopte_free(obj, iopte, false); + return ERR_PTR(-ENOMEM); + } + + *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; + + flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); } else { /* We raced, free the reduniovant table */ - iopte_free(iopte); + iopte_free(obj, iopte, false); } pte_ready: iopte = iopte_offset(iopgd, da); - + *pt_dma = virt_to_phys(iopte); dev_vdbg(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); @@ -522,6 +540,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); + unsigned long offset = iopgd_index(da) * sizeof(da); if ((da | pa) & ~IOSECTION_MASK) { dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", @@ -530,13 +549,14 @@ static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) } *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; - flush_iopgd_range(iopgd, iopgd); + flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); return 0; } static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); + unsigned long offset = iopgd_index(da) * sizeof(da); int i; if ((da | pa) & ~IOSUPER_MASK) { @@ -547,20 +567,22 @@ static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) for (i = 0; i < 16; i++) *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; - flush_iopgd_range(iopgd, iopgd + 15); + flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); return 0; } static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); - u32 *iopte = iopte_alloc(obj, iopgd, da); + dma_addr_t pt_dma; + u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); + unsigned long offset = iopte_index(da) * sizeof(da); if (IS_ERR(iopte)) return PTR_ERR(iopte); *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; - flush_iopte_range(iopte, iopte); + flush_iopte_range(obj->dev, pt_dma, offset, 1); dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", __func__, da, pa, iopte, *iopte); @@ -571,7 +593,9 @@ static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); - u32 *iopte = iopte_alloc(obj, iopgd, da); + dma_addr_t pt_dma; + u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); + unsigned long offset = iopte_index(da) * sizeof(da); int i; if ((da | pa) & ~IOLARGE_MASK) { @@ -585,7 +609,7 @@ static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) for (i = 0; i < 16; i++) *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; - flush_iopte_range(iopte, iopte + 15); + flush_iopte_range(obj->dev, pt_dma, offset, 16); return 0; } @@ -674,6 +698,9 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) size_t bytes; u32 *iopgd = iopgd_offset(obj, da); int nent = 1; + dma_addr_t pt_dma; + unsigned long pd_offset = iopgd_index(da) * sizeof(da); + unsigned long pt_offset = iopte_index(da) * sizeof(da); if (!*iopgd) return 0; @@ -690,7 +717,8 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) } bytes *= nent; memset(iopte, 0, nent * sizeof(*iopte)); - flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); + pt_dma = virt_to_phys(iopte); + flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); /* * do table walk to check if this table is necessary or not @@ -700,7 +728,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) if (iopte[i]) goto out; - iopte_free(iopte); + iopte_free(obj, iopte, true); nent = 1; /* for the next L1 entry */ } else { bytes = IOPGD_SIZE; @@ -712,7 +740,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) bytes *= nent; } memset(iopgd, 0, nent * sizeof(*iopgd)); - flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); + flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); out: return bytes; } @@ -738,6 +766,7 @@ static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) static void iopgtable_clear_entry_all(struct omap_iommu *obj) { + unsigned long offset; int i; spin_lock(&obj->page_table_lock); @@ -748,15 +777,16 @@ static void iopgtable_clear_entry_all(struct omap_iommu *obj) da = i << IOPGD_SHIFT; iopgd = iopgd_offset(obj, da); + offset = iopgd_index(da) * sizeof(da); if (!*iopgd) continue; if (iopgd_is_table(*iopgd)) - iopte_free(iopte_offset(iopgd, 0)); + iopte_free(obj, iopte_offset(iopgd, 0), true); *iopgd = 0; - flush_iopgd_range(iopgd, iopgd); + flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); } flush_iotlb_all(obj); @@ -786,7 +816,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) if (!report_iommu_fault(domain, obj->dev, da, 0)) return IRQ_HANDLED; - iommu_disable(obj); + iommu_write_reg(obj, 0, MMU_IRQENABLE); iopgd = iopgd_offset(obj, da); @@ -815,10 +845,18 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) spin_lock(&obj->iommu_lock); + obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(obj->dev, obj->pd_dma)) { + dev_err(obj->dev, "DMA map error for L1 table\n"); + err = -ENOMEM; + goto out_err; + } + obj->iopgd = iopgd; err = iommu_enable(obj); if (err) - goto err_enable; + goto out_err; flush_iotlb_all(obj); spin_unlock(&obj->iommu_lock); @@ -827,7 +865,7 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) return 0; -err_enable: +out_err: spin_unlock(&obj->iommu_lock); return err; @@ -844,7 +882,10 @@ static void omap_iommu_detach(struct omap_iommu *obj) spin_lock(&obj->iommu_lock); + dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, + DMA_TO_DEVICE); iommu_disable(obj); + obj->pd_dma = 0; obj->iopgd = NULL; spin_unlock(&obj->iommu_lock); @@ -1008,11 +1049,6 @@ static struct platform_driver omap_iommu_driver = { }, }; -static void iopte_cachep_ctor(void *iopte) -{ - clean_dcache_area(iopte, IOPTE_TABLE_SIZE); -} - static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) { memset(e, 0, sizeof(*e)); @@ -1159,7 +1195,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE))) goto fail_align; - clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); spin_lock_init(&omap_domain->lock); omap_domain->domain.geometry.aperture_start = 0; @@ -1347,7 +1382,7 @@ static int __init omap_iommu_init(void) of_node_put(np); p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, - iopte_cachep_ctor); + NULL); if (!p) return -ENOMEM; iopte_cachep = p; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 6e70515e6038..a675af29a6ec 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -61,6 +61,7 @@ struct omap_iommu { */ u32 *iopgd; spinlock_t page_table_lock; /* protect iopgd */ + dma_addr_t pd_dma; int nr_tlb_entries; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c new file mode 100644 index 000000000000..c8a587d034b0 --- /dev/null +++ b/drivers/iommu/qcom_iommu.c @@ -0,0 +1,930 @@ +/* + * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Copyright (C) 2013 ARM Limited + * Copyright (C) 2017 Red Hat + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "io-pgtable.h" +#include "arm-smmu-regs.h" + +#define SMMU_INTR_SEL_NS 0x2000 + +struct qcom_iommu_ctx; + +struct qcom_iommu_dev { + /* IOMMU core code handle */ + struct iommu_device iommu; + struct device *dev; + struct clk *iface_clk; + struct clk *bus_clk; + void __iomem *local_base; + u32 sec_id; + u8 num_ctxs; + struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ +}; + +struct qcom_iommu_ctx { + struct device *dev; + void __iomem *base; + bool secure_init; + u8 asid; /* asid and ctx bank # are 1:1 */ +}; + +struct qcom_iommu_domain { + struct io_pgtable_ops *pgtbl_ops; + spinlock_t pgtbl_lock; + struct mutex init_mutex; /* Protects iommu pointer */ + struct iommu_domain domain; + struct qcom_iommu_dev *iommu; +}; + +static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct qcom_iommu_domain, domain); +} + +static const struct iommu_ops qcom_iommu_ops; + +static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +{ + if (!fwspec || fwspec->ops != &qcom_iommu_ops) + return NULL; + return fwspec->iommu_priv; +} + +static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +{ + struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + if (!qcom_iommu) + return NULL; + return qcom_iommu->ctxs[asid - 1]; +} + +static inline void +iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val) +{ + writel_relaxed(val, ctx->base + reg); +} + +static inline void +iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val) +{ + writeq_relaxed(val, ctx->base + reg); +} + +static inline u32 +iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg) +{ + return readl_relaxed(ctx->base + reg); +} + +static inline u64 +iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) +{ + return readq_relaxed(ctx->base + reg); +} + +static void qcom_iommu_tlb_sync(void *cookie) +{ + struct iommu_fwspec *fwspec = cookie; + unsigned i; + + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + unsigned int val, ret; + + iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); + + ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val, + (val & 0x1) == 0, 0, 5000000); + if (ret) + dev_err(ctx->dev, "timeout waiting for TLB SYNC\n"); + } +} + +static void qcom_iommu_tlb_inv_context(void *cookie) +{ + struct iommu_fwspec *fwspec = cookie; + unsigned i; + + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); + } + + qcom_iommu_tlb_sync(cookie); +} + +static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + struct iommu_fwspec *fwspec = cookie; + unsigned i, reg; + + reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + size_t s = size; + + iova &= ~12UL; + iova |= ctx->asid; + do { + iommu_writel(ctx, reg, iova); + iova += granule; + } while (s -= granule); + } +} + +static const struct iommu_gather_ops qcom_gather_ops = { + .tlb_flush_all = qcom_iommu_tlb_inv_context, + .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, + .tlb_sync = qcom_iommu_tlb_sync, +}; + +static irqreturn_t qcom_iommu_fault(int irq, void *dev) +{ + struct qcom_iommu_ctx *ctx = dev; + u32 fsr, fsynr; + u64 iova; + + fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); + + if (!(fsr & FSR_FAULT)) + return IRQ_NONE; + + fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); + iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); + + dev_err_ratelimited(ctx->dev, + "Unhandled context fault: fsr=0x%x, " + "iova=0x%016llx, fsynr=0x%x, cb=%d\n", + fsr, iova, fsynr, ctx->asid); + + iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); + + return IRQ_HANDLED; +} + +static int qcom_iommu_init_domain(struct iommu_domain *domain, + struct qcom_iommu_dev *qcom_iommu, + struct iommu_fwspec *fwspec) +{ + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable_cfg pgtbl_cfg; + int i, ret = 0; + u32 reg; + + mutex_lock(&qcom_domain->init_mutex); + if (qcom_domain->iommu) + goto out_unlock; + + pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, + .ias = 32, + .oas = 40, + .tlb = &qcom_gather_ops, + .iommu_dev = qcom_iommu->dev, + }; + + qcom_domain->iommu = qcom_iommu; + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + if (!pgtbl_ops) { + dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); + ret = -ENOMEM; + goto out_clear_iommu; + } + + /* Update the domain's page sizes to reflect the page table format */ + domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; + domain->geometry.force_aperture = true; + + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + + if (!ctx->secure_init) { + ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); + if (ret) { + dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret); + goto out_clear_iommu; + } + ctx->secure_init = true; + } + + /* TTBRs */ + iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, + pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | + ((u64)ctx->asid << TTBRn_ASID_SHIFT)); + iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, + pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] | + ((u64)ctx->asid << TTBRn_ASID_SHIFT)); + + /* TTBCR */ + iommu_writel(ctx, ARM_SMMU_CB_TTBCR2, + (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | + TTBCR2_SEP_UPSTREAM); + iommu_writel(ctx, ARM_SMMU_CB_TTBCR, + pgtbl_cfg.arm_lpae_s1_cfg.tcr); + + /* MAIRs (stage-1 only) */ + iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, + pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); + iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, + pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); + + /* SCTLR */ + reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | + SCTLR_M | SCTLR_S1_ASIDPNE; + + if (IS_ENABLED(CONFIG_BIG_ENDIAN)) + reg |= SCTLR_E; + + iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); + } + + mutex_unlock(&qcom_domain->init_mutex); + + /* Publish page table ops for map/unmap */ + qcom_domain->pgtbl_ops = pgtbl_ops; + + return 0; + +out_clear_iommu: + qcom_domain->iommu = NULL; +out_unlock: + mutex_unlock(&qcom_domain->init_mutex); + return ret; +} + +static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) +{ + struct qcom_iommu_domain *qcom_domain; + + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) + return NULL; + /* + * Allocate the domain and initialise some of its data structures. + * We can't really do anything meaningful until we've added a + * master. + */ + qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL); + if (!qcom_domain) + return NULL; + + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&qcom_domain->domain)) { + kfree(qcom_domain); + return NULL; + } + + mutex_init(&qcom_domain->init_mutex); + spin_lock_init(&qcom_domain->pgtbl_lock); + + return &qcom_domain->domain; +} + +static void qcom_iommu_domain_free(struct iommu_domain *domain) +{ + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + + if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ + return; + + iommu_put_dma_cookie(domain); + + /* NOTE: unmap can be called after client device is powered off, + * for example, with GPUs or anything involving dma-buf. So we + * cannot rely on the device_link. Make sure the IOMMU is on to + * avoid unclocked accesses in the TLB inv path: + */ + pm_runtime_get_sync(qcom_domain->iommu->dev); + + free_io_pgtable_ops(qcom_domain->pgtbl_ops); + + pm_runtime_put_sync(qcom_domain->iommu->dev); + + kfree(qcom_domain); +} + +static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev->iommu_fwspec); + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + int ret; + + if (!qcom_iommu) { + dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n"); + return -ENXIO; + } + + /* Ensure that the domain is finalized */ + pm_runtime_get_sync(qcom_iommu->dev); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev->iommu_fwspec); + pm_runtime_put_sync(qcom_iommu->dev); + if (ret < 0) + return ret; + + /* + * Sanity check the domain. We don't support domains across + * different IOMMUs. + */ + if (qcom_domain->iommu != qcom_iommu) { + dev_err(dev, "cannot attach to IOMMU %s while already " + "attached to domain on IOMMU %s\n", + dev_name(qcom_domain->iommu->dev), + dev_name(qcom_iommu->dev)); + return -EINVAL; + } + + return 0; +} + +static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + unsigned i; + + if (!qcom_domain->iommu) + return; + + pm_runtime_get_sync(qcom_iommu->dev); + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + + /* Disable the context bank: */ + iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); + } + pm_runtime_put_sync(qcom_iommu->dev); + + qcom_domain->iommu = NULL; +} + +static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + int ret; + unsigned long flags; + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; + + if (!ops) + return -ENODEV; + + spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); + ret = ops->map(ops, iova, paddr, size, prot); + spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); + return ret; +} + +static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + size_t ret; + unsigned long flags; + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; + + if (!ops) + return 0; + + /* NOTE: unmap can be called after client device is powered off, + * for example, with GPUs or anything involving dma-buf. So we + * cannot rely on the device_link. Make sure the IOMMU is on to + * avoid unclocked accesses in the TLB inv path: + */ + pm_runtime_get_sync(qcom_domain->iommu->dev); + spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); + ret = ops->unmap(ops, iova, size); + spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); + pm_runtime_put_sync(qcom_domain->iommu->dev); + + return ret; +} + +static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + phys_addr_t ret; + unsigned long flags; + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; + + if (!ops) + return 0; + + spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); + ret = ops->iova_to_phys(ops, iova); + spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); + + return ret; +} + +static bool qcom_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + /* + * Return true here as the SMMU can always send out coherent + * requests. + */ + return true; + case IOMMU_CAP_NOEXEC: + return true; + default: + return false; + } +} + +static int qcom_iommu_add_device(struct device *dev) +{ + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev->iommu_fwspec); + struct iommu_group *group; + struct device_link *link; + + if (!qcom_iommu) + return -ENODEV; + + /* + * Establish the link between iommu and master, so that the + * iommu gets runtime enabled/disabled as per the master's + * needs. + */ + link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME); + if (!link) { + dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n", + dev_name(qcom_iommu->dev), dev_name(dev)); + return -ENODEV; + } + + group = iommu_group_get_for_dev(dev); + if (IS_ERR_OR_NULL(group)) + return PTR_ERR_OR_ZERO(group); + + iommu_group_put(group); + iommu_device_link(&qcom_iommu->iommu, dev); + + return 0; +} + +static void qcom_iommu_remove_device(struct device *dev) +{ + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev->iommu_fwspec); + + if (!qcom_iommu) + return; + + iommu_device_unlink(&qcom_iommu->iommu, dev); + iommu_group_remove_device(dev); + iommu_fwspec_free(dev); +} + +static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + struct qcom_iommu_dev *qcom_iommu; + struct platform_device *iommu_pdev; + unsigned asid = args->args[0]; + + if (args->args_count != 1) { + dev_err(dev, "incorrect number of iommu params found for %s " + "(found %d, expected 1)\n", + args->np->full_name, args->args_count); + return -EINVAL; + } + + iommu_pdev = of_find_device_by_node(args->np); + if (WARN_ON(!iommu_pdev)) + return -EINVAL; + + qcom_iommu = platform_get_drvdata(iommu_pdev); + + /* make sure the asid specified in dt is valid, so we don't have + * to sanity check this elsewhere, since 'asid - 1' is used to + * index into qcom_iommu->ctxs: + */ + if (WARN_ON(asid < 1) || + WARN_ON(asid > qcom_iommu->num_ctxs)) + return -EINVAL; + + if (!dev->iommu_fwspec->iommu_priv) { + dev->iommu_fwspec->iommu_priv = qcom_iommu; + } else { + /* make sure devices iommus dt node isn't referring to + * multiple different iommu devices. Multiple context + * banks are ok, but multiple devices are not: + */ + if (WARN_ON(qcom_iommu != dev->iommu_fwspec->iommu_priv)) + return -EINVAL; + } + + return iommu_fwspec_add_ids(dev, &asid, 1); +} + +static const struct iommu_ops qcom_iommu_ops = { + .capable = qcom_iommu_capable, + .domain_alloc = qcom_iommu_domain_alloc, + .domain_free = qcom_iommu_domain_free, + .attach_dev = qcom_iommu_attach_dev, + .detach_dev = qcom_iommu_detach_dev, + .map = qcom_iommu_map, + .unmap = qcom_iommu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = qcom_iommu_iova_to_phys, + .add_device = qcom_iommu_add_device, + .remove_device = qcom_iommu_remove_device, + .device_group = generic_device_group, + .of_xlate = qcom_iommu_of_xlate, + .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, +}; + +static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu) +{ + int ret; + + ret = clk_prepare_enable(qcom_iommu->iface_clk); + if (ret) { + dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n"); + return ret; + } + + ret = clk_prepare_enable(qcom_iommu->bus_clk); + if (ret) { + dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n"); + clk_disable_unprepare(qcom_iommu->iface_clk); + return ret; + } + + return 0; +} + +static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu) +{ + clk_disable_unprepare(qcom_iommu->bus_clk); + clk_disable_unprepare(qcom_iommu->iface_clk); +} + +static int qcom_iommu_sec_ptbl_init(struct device *dev) +{ + size_t psize = 0; + unsigned int spare = 0; + void *cpu_addr; + dma_addr_t paddr; + unsigned long attrs; + static bool allocated = false; + int ret; + + if (allocated) + return 0; + + ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); + if (ret) { + dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", + ret); + return ret; + } + + dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); + + attrs = DMA_ATTR_NO_KERNEL_MAPPING; + + cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); + if (!cpu_addr) { + dev_err(dev, "failed to allocate %zu bytes for pgtable\n", + psize); + return -ENOMEM; + } + + ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); + if (ret) { + dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); + goto free_mem; + } + + allocated = true; + return 0; + +free_mem: + dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); + return ret; +} + +static int get_asid(const struct device_node *np) +{ + u32 reg; + + /* read the "reg" property directly to get the relative address + * of the context bank, and calculate the asid from that: + */ + if (of_property_read_u32_index(np, "reg", 0, ®)) + return -ENODEV; + + return reg / 0x1000; /* context banks are 0x1000 apart */ +} + +static int qcom_iommu_ctx_probe(struct platform_device *pdev) +{ + struct qcom_iommu_ctx *ctx; + struct device *dev = &pdev->dev; + struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); + struct resource *res; + int ret, irq; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + platform_set_drvdata(pdev, ctx); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "failed to get irq\n"); + return -ENODEV; + } + + /* clear IRQs before registering fault handler, just in case the + * boot-loader left us a surprise: + */ + iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); + + ret = devm_request_irq(dev, irq, + qcom_iommu_fault, + IRQF_SHARED, + "qcom-iommu-fault", + ctx); + if (ret) { + dev_err(dev, "failed to request IRQ %u\n", irq); + return ret; + } + + ret = get_asid(dev->of_node); + if (ret < 0) { + dev_err(dev, "missing reg property\n"); + return ret; + } + + ctx->asid = ret; + + dev_dbg(dev, "found asid %u\n", ctx->asid); + + qcom_iommu->ctxs[ctx->asid - 1] = ctx; + + return 0; +} + +static int qcom_iommu_ctx_remove(struct platform_device *pdev) +{ + struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent); + struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + qcom_iommu->ctxs[ctx->asid - 1] = NULL; + + return 0; +} + +static const struct of_device_id ctx_of_match[] = { + { .compatible = "qcom,msm-iommu-v1-ns" }, + { .compatible = "qcom,msm-iommu-v1-sec" }, + { /* sentinel */ } +}; + +static struct platform_driver qcom_iommu_ctx_driver = { + .driver = { + .name = "qcom-iommu-ctx", + .of_match_table = of_match_ptr(ctx_of_match), + }, + .probe = qcom_iommu_ctx_probe, + .remove = qcom_iommu_ctx_remove, +}; + +static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) +{ + struct device_node *child; + + for_each_child_of_node(qcom_iommu->dev->of_node, child) + if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) + return true; + + return false; +} + +static int qcom_iommu_device_probe(struct platform_device *pdev) +{ + struct device_node *child; + struct qcom_iommu_dev *qcom_iommu; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, sz, max_asid = 0; + + /* find the max asid (which is 1:1 to ctx bank idx), so we know how + * many child ctx devices we have: + */ + for_each_child_of_node(dev->of_node, child) + max_asid = max(max_asid, get_asid(child)); + + sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])); + + qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL); + if (!qcom_iommu) + return -ENOMEM; + qcom_iommu->num_ctxs = max_asid; + qcom_iommu->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) + qcom_iommu->local_base = devm_ioremap_resource(dev, res); + + qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); + if (IS_ERR(qcom_iommu->iface_clk)) { + dev_err(dev, "failed to get iface clock\n"); + return PTR_ERR(qcom_iommu->iface_clk); + } + + qcom_iommu->bus_clk = devm_clk_get(dev, "bus"); + if (IS_ERR(qcom_iommu->bus_clk)) { + dev_err(dev, "failed to get bus clock\n"); + return PTR_ERR(qcom_iommu->bus_clk); + } + + if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", + &qcom_iommu->sec_id)) { + dev_err(dev, "missing qcom,iommu-secure-id property\n"); + return -ENODEV; + } + + if (qcom_iommu_has_secure_context(qcom_iommu)) { + ret = qcom_iommu_sec_ptbl_init(dev); + if (ret) { + dev_err(dev, "cannot init secure pg table(%d)\n", ret); + return ret; + } + } + + platform_set_drvdata(pdev, qcom_iommu); + + pm_runtime_enable(dev); + + /* register context bank devices, which are child nodes: */ + ret = devm_of_platform_populate(dev); + if (ret) { + dev_err(dev, "Failed to populate iommu contexts\n"); + return ret; + } + + ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, + dev_name(dev)); + if (ret) { + dev_err(dev, "Failed to register iommu in sysfs\n"); + return ret; + } + + iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops); + iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode); + + ret = iommu_device_register(&qcom_iommu->iommu); + if (ret) { + dev_err(dev, "Failed to register iommu\n"); + return ret; + } + + bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); + + if (qcom_iommu->local_base) { + pm_runtime_get_sync(dev); + writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); + pm_runtime_put_sync(dev); + } + + return 0; +} + +static int qcom_iommu_device_remove(struct platform_device *pdev) +{ + struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); + + bus_set_iommu(&platform_bus_type, NULL); + + pm_runtime_force_suspend(&pdev->dev); + platform_set_drvdata(pdev, NULL); + iommu_device_sysfs_remove(&qcom_iommu->iommu); + iommu_device_unregister(&qcom_iommu->iommu); + + return 0; +} + +static int __maybe_unused qcom_iommu_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); + + return qcom_iommu_enable_clocks(qcom_iommu); +} + +static int __maybe_unused qcom_iommu_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); + + qcom_iommu_disable_clocks(qcom_iommu); + + return 0; +} + +static const struct dev_pm_ops qcom_iommu_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static const struct of_device_id qcom_iommu_of_match[] = { + { .compatible = "qcom,msm-iommu-v1" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qcom_iommu_of_match); + +static struct platform_driver qcom_iommu_driver = { + .driver = { + .name = "qcom-iommu", + .of_match_table = of_match_ptr(qcom_iommu_of_match), + .pm = &qcom_iommu_pm_ops, + }, + .probe = qcom_iommu_device_probe, + .remove = qcom_iommu_device_remove, +}; + +static int __init qcom_iommu_init(void) +{ + int ret; + + ret = platform_driver_register(&qcom_iommu_ctx_driver); + if (ret) + return ret; + + ret = platform_driver_register(&qcom_iommu_driver); + if (ret) + platform_driver_unregister(&qcom_iommu_ctx_driver); + + return ret; +} + +static void __exit qcom_iommu_exit(void) +{ + platform_driver_unregister(&qcom_iommu_driver); + platform_driver_unregister(&qcom_iommu_ctx_driver); +} + +module_init(qcom_iommu_init); +module_exit(qcom_iommu_exit); + +IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1", NULL); + +MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4ba48a26b389..9d991c2d8767 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -90,7 +90,9 @@ struct rk_iommu { struct device *dev; void __iomem **bases; int num_mmu; - int irq; + int *irq; + int num_irq; + bool reset_disabled; struct iommu_device iommu; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ @@ -414,6 +416,9 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) int ret, i; u32 dte_addr; + if (iommu->reset_disabled) + return 0; + /* * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY * and verifying that upper 5 nybbles are read back. @@ -825,10 +830,12 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, iommu->domain = domain; - ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq, - IRQF_SHARED, dev_name(dev), iommu); - if (ret) - return ret; + for (i = 0; i < iommu->num_irq; i++) { + ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq, + IRQF_SHARED, dev_name(dev), iommu); + if (ret) + return ret; + } for (i = 0; i < iommu->num_mmu; i++) { rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, @@ -878,7 +885,8 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, } rk_iommu_disable_stall(iommu); - devm_free_irq(iommu->dev, iommu->irq, iommu); + for (i = 0; i < iommu->num_irq; i++) + devm_free_irq(iommu->dev, iommu->irq[i], iommu); iommu->domain = NULL; @@ -1008,20 +1016,20 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group, ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, &args); if (ret) { - dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", - np->full_name, ret); + dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n", + np, ret); return ret; } if (args.args_count != 0) { - dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", - args.np->full_name, args.args_count); + dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n", + args.np, args.args_count); return -EINVAL; } pd = of_find_device_by_node(args.np); of_node_put(args.np); if (!pd) { - dev_err(dev, "iommu %s not found\n", args.np->full_name); + dev_err(dev, "iommu %pOF not found\n", args.np); return -EPROBE_DEFER; } @@ -1157,12 +1165,28 @@ static int rk_iommu_probe(struct platform_device *pdev) if (iommu->num_mmu == 0) return PTR_ERR(iommu->bases[0]); - iommu->irq = platform_get_irq(pdev, 0); - if (iommu->irq < 0) { - dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); + iommu->num_irq = platform_irq_count(pdev); + if (iommu->num_irq < 0) + return iommu->num_irq; + if (iommu->num_irq == 0) return -ENXIO; + + iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq), + GFP_KERNEL); + if (!iommu->irq) + return -ENOMEM; + + for (i = 0; i < iommu->num_irq; i++) { + iommu->irq[i] = platform_get_irq(pdev, i); + if (iommu->irq[i] < 0) { + dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]); + return -ENXIO; + } } + iommu->reset_disabled = device_property_read_bool(dev, + "rockchip,disable-mmu-reset"); + err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); if (err) return err; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 8788640756a7..0e2f31f9032b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -18,6 +18,8 @@ */ #define S390_IOMMU_PGSIZES (~0xFFFUL) +static const struct iommu_ops s390_iommu_ops; + struct s390_domain { struct iommu_domain domain; struct list_head devices; @@ -166,11 +168,13 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, static int s390_iommu_add_device(struct device *dev) { struct iommu_group *group = iommu_group_get_for_dev(dev); + struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; if (IS_ERR(group)) return PTR_ERR(group); iommu_group_put(group); + iommu_device_link(&zdev->iommu_dev, dev); return 0; } @@ -197,6 +201,7 @@ static void s390_iommu_remove_device(struct device *dev) s390_iommu_detach_device(domain, dev); } + iommu_device_unlink(&zdev->iommu_dev, dev); iommu_group_remove_device(dev); } @@ -327,7 +332,37 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain, return size; } -static struct iommu_ops s390_iommu_ops = { +int zpci_init_iommu(struct zpci_dev *zdev) +{ + int rc = 0; + + rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL, + "s390-iommu.%08x", zdev->fid); + if (rc) + goto out_err; + + iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops); + + rc = iommu_device_register(&zdev->iommu_dev); + if (rc) + goto out_sysfs; + + return 0; + +out_sysfs: + iommu_device_sysfs_remove(&zdev->iommu_dev); + +out_err: + return rc; +} + +void zpci_destroy_iommu(struct zpci_dev *zdev) +{ + iommu_device_unregister(&zdev->iommu_dev); + iommu_device_sysfs_remove(&zdev->iommu_dev); +} + +static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc = s390_domain_alloc, .domain_free = s390_domain_free, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 37e708fdbb5a..b62f790ad1ba 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -61,6 +61,8 @@ struct gart_device { struct list_head client; spinlock_t client_lock; /* for client list */ struct device *dev; + + struct iommu_device iommu; /* IOMMU Core handle */ }; struct gart_domain { @@ -334,12 +336,35 @@ static bool gart_iommu_capable(enum iommu_cap cap) return false; } +static int gart_iommu_add_device(struct device *dev) +{ + struct iommu_group *group = iommu_group_get_for_dev(dev); + + if (IS_ERR(group)) + return PTR_ERR(group); + + iommu_group_put(group); + + iommu_device_link(&gart_handle->iommu, dev); + + return 0; +} + +static void gart_iommu_remove_device(struct device *dev) +{ + iommu_group_remove_device(dev); + iommu_device_unlink(&gart_handle->iommu, dev); +} + static const struct iommu_ops gart_iommu_ops = { .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, .domain_free = gart_iommu_domain_free, .attach_dev = gart_iommu_attach_dev, .detach_dev = gart_iommu_detach_dev, + .add_device = gart_iommu_add_device, + .remove_device = gart_iommu_remove_device, + .device_group = generic_device_group, .map = gart_iommu_map, .map_sg = default_iommu_map_sg, .unmap = gart_iommu_unmap, @@ -378,6 +403,7 @@ static int tegra_gart_probe(struct platform_device *pdev) struct resource *res, *res_remap; void __iomem *gart_regs; struct device *dev = &pdev->dev; + int ret; if (gart_handle) return -EIO; @@ -404,6 +430,22 @@ static int tegra_gart_probe(struct platform_device *pdev) return -ENXIO; } + ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL, + dev_name(&pdev->dev)); + if (ret) { + dev_err(dev, "Failed to register IOMMU in sysfs\n"); + return ret; + } + + iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); + + ret = iommu_device_register(&gart->iommu); + if (ret) { + dev_err(dev, "Failed to register IOMMU\n"); + iommu_device_sysfs_remove(&gart->iommu); + return ret; + } + gart->dev = &pdev->dev; spin_lock_init(&gart->pte_lock); spin_lock_init(&gart->client_lock); @@ -430,6 +472,9 @@ static int tegra_gart_remove(struct platform_device *pdev) { struct gart_device *gart = platform_get_drvdata(pdev); + iommu_device_unregister(&gart->iommu); + iommu_device_sysfs_remove(&gart->iommu); + writel(0, gart->regs + GART_CONFIG); if (gart->savedata) vfree(gart->savedata); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index eeb19f560a05..3b6449e2cbf1 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -36,6 +36,8 @@ struct tegra_smmu { struct list_head list; struct dentry *debugfs; + + struct iommu_device iommu; /* IOMMU Core code handle */ }; struct tegra_smmu_as { @@ -704,6 +706,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) static int tegra_smmu_add_device(struct device *dev) { struct device_node *np = dev->of_node; + struct iommu_group *group; struct of_phandle_args args; unsigned int index = 0; @@ -719,18 +722,33 @@ static int tegra_smmu_add_device(struct device *dev) * first match. */ dev->archdata.iommu = smmu; + + iommu_device_link(&smmu->iommu, dev); + break; } index++; } + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + iommu_group_put(group); + return 0; } static void tegra_smmu_remove_device(struct device *dev) { + struct tegra_smmu *smmu = dev->archdata.iommu; + + if (smmu) + iommu_device_unlink(&smmu->iommu, dev); + dev->archdata.iommu = NULL; + iommu_group_remove_device(dev); } static const struct iommu_ops tegra_smmu_ops = { @@ -741,6 +759,7 @@ static const struct iommu_ops tegra_smmu_ops = { .detach_dev = tegra_smmu_detach_dev, .add_device = tegra_smmu_add_device, .remove_device = tegra_smmu_remove_device, + .device_group = generic_device_group, .map = tegra_smmu_map, .unmap = tegra_smmu_unmap, .map_sg = default_iommu_map_sg, @@ -930,10 +949,25 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, tegra_smmu_ahb_enable(); - err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); - if (err < 0) + err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); + if (err) return ERR_PTR(err); + iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); + + err = iommu_device_register(&smmu->iommu); + if (err) { + iommu_device_sysfs_remove(&smmu->iommu); + return ERR_PTR(err); + } + + err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); + if (err < 0) { + iommu_device_unregister(&smmu->iommu); + iommu_device_sysfs_remove(&smmu->iommu); + return ERR_PTR(err); + } + if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_smmu_debugfs_init(smmu); @@ -942,6 +976,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, void tegra_smmu_remove(struct tegra_smmu *smmu) { + iommu_device_unregister(&smmu->iommu); + iommu_device_sysfs_remove(&smmu->iommu); + if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_smmu_debugfs_exit(smmu); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 519149ec9053..b5df99c6f680 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn) { const __be32 *cell; u64 hwid; - int i; + int cpu; cell = of_get_property(dn, "reg", NULL); if (!cell) @@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn) if (hwid & ~MPIDR_HWID_BITMASK) return -1; - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_logical_map(i) == hwid) - return i; + for_each_possible_cpu(cpu) + if (cpu_logical_map(cpu) == hwid) + return cpu; return -1; } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 2370e6d9e603..cd0bcc3b7e33 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = MAP_VLPI, - .map = map, + { + .map = map, + }, }; /* @@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = GET_VLPI, - .map = map, + { + .map = map, + }, }; return irq_set_vcpu_affinity(irq, &info); @@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) { struct its_cmd_info info = { .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, - .config = config, + { + .config = config, + }, }; return irq_set_vcpu_affinity(irq, &info); diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 14461cbfab2f..66f97fde13d8 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -101,7 +101,7 @@ static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu) local_irq_save(flags); /* We can only send IPIs to VPEs within the local core */ - WARN_ON(cpu_data[cpu].core != current_cpu_data.core); + WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu)); vpflags = dvpe(); settc(cpu_vpe_id(&cpu_data[cpu])); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b3a60da088db..c90976d7e53c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -12,27 +12,38 @@ #include #include #include -#include #include +#include #include #include -#include +#include #include #include #include -unsigned int gic_present; +#define GIC_MAX_INTRS 256 +#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); -}; +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 -static unsigned long __gic_base_addr; +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +void __iomem *mips_gic_base; + +DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static void __iomem *gic_base; -static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_ipi_domain; @@ -44,202 +55,13 @@ static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); -static void __gic_irq_dispatch(void); - -static inline u32 gic_read32(unsigned int reg) +static void gic_clear_pcpu_masks(unsigned int intr) { - return __raw_readl(gic_base + reg); -} + unsigned int i; -static inline u64 gic_read64(unsigned int reg) -{ - return __raw_readq(gic_base + reg); -} - -static inline unsigned long gic_read(unsigned int reg) -{ - if (!mips_cm_is64) - return gic_read32(reg); - else - return gic_read64(reg); -} - -static inline void gic_write32(unsigned int reg, u32 val) -{ - return __raw_writel(val, gic_base + reg); -} - -static inline void gic_write64(unsigned int reg, u64 val) -{ - return __raw_writeq(val, gic_base + reg); -} - -static inline void gic_write(unsigned int reg, unsigned long val) -{ - if (!mips_cm_is64) - return gic_write32(reg, (u32)val); - else - return gic_write64(reg, (u64)val); -} - -static inline void gic_update_bits(unsigned int reg, unsigned long mask, - unsigned long val) -{ - unsigned long regval; - - regval = gic_read(reg); - regval &= ~mask; - regval |= val; - gic_write(reg, regval); -} - -static inline void gic_reset_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_polarity(unsigned int intr, unsigned int pol) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)pol << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_trigger(unsigned int intr, unsigned int trig) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)trig << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr), - (unsigned long)dual << GIC_INTR_BIT(intr)); -} - -static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) -{ - gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + - GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); -} - -static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) -{ - gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + - GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), - GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); -} - -#ifdef CONFIG_CLKSRC_MIPS_GIC -u64 notrace gic_read_count(void) -{ - unsigned int hi, hi2, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); - - do { - hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); - hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - } while (hi2 != hi); - - return (((u64) hi) << 32) + lo; -} - -unsigned int gic_get_count_width(void) -{ - unsigned int bits, config; - - config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> - GIC_SH_CONFIG_COUNTBITS_SHF); - - return bits; -} - -void notrace gic_write_compare(u64 cnt) -{ - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } -} - -void notrace gic_write_cpu_compare(u64 cnt, int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu)); - - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } - - local_irq_restore(flags); -} - -u64 gic_read_compare(void) -{ - unsigned int hi, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); - - hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); - lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); - - return (((u64) hi) << 32) + lo; -} - -void gic_start_count(void) -{ - u32 gicconfig; - - /* Start the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -void gic_stop_count(void) -{ - u32 gicconfig; - - /* Stop the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -#endif - -unsigned gic_read_local_vp_id(void) -{ - unsigned long ident; - - ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT)); - return ident & GIC_VP_IDENT_VCNUM_MSK; + /* Clear the interrupt's bit in all pcpu_masks */ + for_each_possible_cpu(i) + clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); } static bool gic_local_irq_is_routable(int intr) @@ -250,17 +72,17 @@ static bool gic_local_irq_is_routable(int intr) if (cpu_has_veic) return true; - vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); + vpe_ctl = read_gic_vl_ctl(); switch (intr) { case GIC_LOCAL_INT_TIMER: - return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; case GIC_LOCAL_INT_PERFCTR: - return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; case GIC_LOCAL_INT_FDC: - return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; case GIC_LOCAL_INT_SWINT0: case GIC_LOCAL_INT_SWINT1: - return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; default: return true; } @@ -272,15 +94,14 @@ static void gic_bind_eic_interrupt(int irq, int set) irq -= GIC_PIN_TO_VEC_OFFSET; /* Set irq to use shadow set */ - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + - GIC_VPE_EIC_SS(irq), set); + write_gic_vl_eic_shadow_set(irq, set); } static void gic_send_ipi(struct irq_data *d, unsigned int cpu) { irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq)); + write_gic_wedge(GIC_WEDGE_RW | hwirq); } int gic_get_c0_compare_int(void) @@ -316,47 +137,22 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -int gic_get_usm_range(struct resource *gic_usm_res) -{ - if (!gic_present) - return -1; - - gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS; - gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1); - - return 0; -} - static void gic_handle_shared_int(bool chained) { - unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4; + unsigned int intr, virq; unsigned long *pcpu_mask; - unsigned long pending_reg, intrmask_reg; DECLARE_BITMAP(pending, GIC_MAX_INTRS); - DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); /* Get per-cpu bitmaps */ - pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; + pcpu_mask = this_cpu_ptr(pcpu_masks); - pending_reg = GIC_REG(SHARED, GIC_SH_PEND); - intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); + if (mips_cm_is64) + __ioread64_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 64)); + else + __ioread32_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 32)); - for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { - pending[i] = gic_read(pending_reg); - intrmask[i] = gic_read(intrmask_reg); - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; - - if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64) - continue; - - pending[i] |= (u64)gic_read(pending_reg) << 32; - intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; - } - - bitmap_and(pending, pending, intrmask, gic_shared_intrs); bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); for_each_set_bit(intr, pending, gic_shared_intrs) { @@ -371,19 +167,29 @@ static void gic_handle_shared_int(bool chained) static void gic_mask_irq(struct irq_data *d) { - gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + + write_gic_rmask(intr); + gic_clear_pcpu_masks(intr); } static void gic_unmask_irq(struct irq_data *d) { - gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned int cpu; + + write_gic_smask(intr); + + gic_clear_pcpu_masks(intr); + cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); } static void gic_ack_irq(struct irq_data *d) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); + write_gic_wedge(irq); } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -395,34 +201,34 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_FALLING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_RISING: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_RISING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_BOTH: /* polarity is irrelevant in this case */ - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_DUAL); is_edge = true; break; case IRQ_TYPE_LEVEL_LOW: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_LOW); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; case IRQ_TYPE_LEVEL_HIGH: default: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; } @@ -443,32 +249,28 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - cpumask_t tmp = CPU_MASK_NONE; - unsigned long flags; - int i, cpu; + unsigned long flags; + unsigned int cpu; - cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpumask_empty(&tmp)) + cpu = cpumask_first_and(cpumask, cpu_online_mask); + if (cpu >= NR_CPUS) return -EINVAL; - cpu = cpumask_first(&tmp); - /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, mips_cm_vp_id(cpu)); + write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); /* Update the pcpu_masks */ - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[cpu].pcpu_mask); + gic_clear_pcpu_masks(irq); + if (read_gic_mask(irq)) + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); - cpumask_copy(irq_data_get_affinity_mask(d), cpumask); irq_data_update_effective_affinity(d, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); - return IRQ_SET_MASK_OK_NOCOPY; + return IRQ_SET_MASK_OK; } #endif @@ -499,8 +301,8 @@ static void gic_handle_local_int(bool chained) unsigned long pending, masked; unsigned int intr, virq; - pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); - masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); + pending = read_gic_vl_pend(); + masked = read_gic_vl_mask(); bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); @@ -518,14 +320,14 @@ static void gic_mask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_rmask(BIT(intr)); } static void gic_unmask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_smask(BIT(intr)); } static struct irq_chip gic_local_irq_controller = { @@ -542,9 +344,8 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_rmask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -557,9 +358,8 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_smask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -582,103 +382,54 @@ static void gic_irq_dispatch(struct irq_desc *desc) gic_handle_shared_int(true); } -static void __init gic_basic_init(void) -{ - unsigned int i; - - board_bind_eic_interrupt = &gic_bind_eic_interrupt; - - /* Setup defaults */ - for (i = 0; i < gic_shared_intrs; i++) { - gic_set_polarity(i, GIC_POL_POS); - gic_set_trigger(i, GIC_TRIG_LEVEL); - gic_reset_mask(i); - } - - for (i = 0; i < gic_vpes; i++) { - unsigned int j; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { - if (!gic_local_irq_is_routable(j)) - continue; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); - } - } -} - static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { int intr = GIC_HWIRQ_TO_LOCAL(hw); - int ret = 0; int i; unsigned long flags; + u32 val; if (!gic_local_irq_is_routable(intr)) return -EPERM; + if (intr > GIC_LOCAL_INT_FDC) { + pr_err("Invalid local IRQ %d\n", intr); + return -EINVAL; + } + + if (intr == GIC_LOCAL_INT_TIMER) { + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; + } else { + val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + } + spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - - switch (intr) { - case GIC_LOCAL_INT_WD: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); - break; - case GIC_LOCAL_INT_COMPARE: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), - val); - break; - case GIC_LOCAL_INT_TIMER: - /* CONFIG_MIPS_CMP workaround (see __gic_init) */ - val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), - val); - break; - case GIC_LOCAL_INT_PERFCTR: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT0: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT1: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), - val); - break; - case GIC_LOCAL_INT_FDC: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); - break; - default: - pr_err("Invalid local IRQ %d\n", intr); - ret = -EINVAL; - break; - } + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_map(intr, val); } spin_unlock_irqrestore(&gic_lock, flags); - return ret; + return 0; } static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw, unsigned int vpe) + irq_hw_number_t hw, unsigned int cpu) { int intr = GIC_HWIRQ_TO_SHARED(hw); + struct irq_data *data; unsigned long flags; - int i; + + data = irq_get_irq_data(virq); spin_lock_irqsave(&gic_lock, flags); - gic_map_to_pin(intr, gic_cpu_pin); - gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(intr, pcpu_masks[i].pcpu_mask); - set_bit(intr, pcpu_masks[vpe].pcpu_mask); + write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); + gic_clear_pcpu_masks(intr); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); return 0; @@ -885,34 +636,69 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; -static void __init __gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase, - struct device_node *node) + +static int __init gic_of_init(struct device_node *node, + struct device_node *parent) { - unsigned int gicconfig, cpu; - unsigned int v[2]; + unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; + unsigned long reserved; + phys_addr_t gic_base; + struct resource res; + size_t gic_len; - __gic_base_addr = gic_base_addr; + /* Find the first available CPU vector. */ + i = 0; + reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); + while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", + i++, &cpu_vec)) + reserved |= BIT(cpu_vec); - gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); + cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); + if (cpu_vec == hweight_long(ST0_IM)) { + pr_err("No CPU vectors available for GIC\n"); + return -ENODEV; + } - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> - GIC_SH_CONFIG_NUMINTRS_SHF; - gic_shared_intrs = ((gic_shared_intrs + 1) * 8); + if (of_address_to_resource(node, 0, &res)) { + /* + * Probe the CM for the GIC base address if not specified + * in the device-tree. + */ + if (mips_cm_present()) { + gic_base = read_gcr_gic_base() & + ~CM_GCR_GIC_BASE_GICEN; + gic_len = 0x20000; + } else { + pr_err("Failed to get GIC memory range\n"); + return -ENODEV; + } + } else { + gic_base = res.start; + gic_len = resource_size(&res); + } - gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> - GIC_SH_CONFIG_NUMVPES_SHF; + if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); + /* Ensure GIC region is enabled before trying to access it */ + __sync(); + } + + mips_gic_base = ioremap_nocache(gic_base, gic_len); + + gicconfig = read_gic_config(); + gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; + gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); + gic_shared_intrs = (gic_shared_intrs + 1) * 8; + + gic_vpes = gicconfig & GIC_CONFIG_PVPS; + gic_vpes >>= __ffs(GIC_CONFIG_PVPS); gic_vpes = gic_vpes + 1; if (cpu_has_veic) { /* Set EIC mode for all VPEs */ for_each_present_cpu(cpu) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(cpu)); - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), - GIC_VPE_CTL_EIC_MODE_MSK); + write_gic_vl_other(mips_cm_vp_id(cpu)); + write_gic_vo_ctl(GIC_VX_CTL_EIC); } /* Always use vector 1 in EIC mode */ @@ -937,9 +723,7 @@ static void __init __gic_init(unsigned long gic_base_addr, */ if (IS_ENABLED(CONFIG_MIPS_CMP) && gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { - timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL, - GIC_VPE_TIMER_MAP)) & - GIC_MAP_MSK; + timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + GIC_CPU_PIN_OFFSET + timer_cpu_pin, @@ -950,17 +734,21 @@ static void __init __gic_init(unsigned long gic_base_addr, } gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + - gic_shared_intrs, irqbase, + gic_shared_intrs, 0, &gic_irq_domain_ops, NULL); - if (!gic_irq_domain) - panic("Failed to add GIC IRQ domain"); + if (!gic_irq_domain) { + pr_err("Failed to add GIC IRQ domain"); + return -ENXIO; + } gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, GIC_NUM_LOCAL_INTRS + gic_shared_intrs, node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) - panic("Failed to add GIC IPI domain"); + if (!gic_ipi_domain) { + pr_err("Failed to add GIC IPI domain"); + return -ENXIO; + } irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); @@ -975,64 +763,25 @@ static void __init __gic_init(unsigned long gic_base_addr, } bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); - gic_basic_init(); -} -void __init gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase) -{ - __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); -} + board_bind_eic_interrupt = &gic_bind_eic_interrupt; -static int __init gic_of_init(struct device_node *node, - struct device_node *parent) -{ - struct resource res; - unsigned int cpu_vec, i = 0, reserved = 0; - phys_addr_t gic_base; - size_t gic_len; - - /* Find the first available CPU vector. */ - while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", - i++, &cpu_vec)) - reserved |= BIT(cpu_vec); - for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { - if (!(reserved & BIT(cpu_vec))) - break; - } - if (cpu_vec == 8) { - pr_err("No CPU vectors available for GIC\n"); - return -ENODEV; + /* Setup defaults */ + for (i = 0; i < gic_shared_intrs; i++) { + change_gic_pol(i, GIC_POL_ACTIVE_HIGH); + change_gic_trig(i, GIC_TRIG_LEVEL); + write_gic_rmask(i); } - if (of_address_to_resource(node, 0, &res)) { - /* - * Probe the CM for the GIC base address if not specified - * in the device-tree. - */ - if (mips_cm_present()) { - gic_base = read_gcr_gic_base() & - ~CM_GCR_GIC_BASE_GICEN_MSK; - gic_len = 0x20000; - } else { - pr_err("Failed to get GIC memory range\n"); - return -ENODEV; + for (i = 0; i < gic_vpes; i++) { + write_gic_vl_other(mips_cm_vp_id(i)); + for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { + if (!gic_local_irq_is_routable(j)) + continue; + write_gic_vo_rmask(BIT(j)); } - } else { - gic_base = res.start; - gic_len = resource_size(&res); } - if (mips_cm_present()) { - write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); - /* Ensure GIC region is enabled before trying to access it */ - __sync(); - } - gic_present = true; - - __gic_init(gic_base, gic_len, cpu_vec, 0, node); - return 0; } IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 9ca691d6c13b..46c189ad8d94 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -55,7 +55,7 @@ struct capictr_event { /* ------------------------------------------------------------- */ -static struct capi_version driver_version = {2, 0, 1, 1 << 4}; +static const struct capi_version driver_version = {2, 0, 1, 1 << 4}; static char driver_serial[CAPI_SERIAL_LEN] = "0004711"; static char capi_manufakturer[64] = "AVM Berlin"; diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h index a315a2914d70..c4868a0d82f4 100644 --- a/drivers/isdn/hardware/eicon/divacapi.h +++ b/drivers/isdn/hardware/eicon/divacapi.h @@ -26,15 +26,7 @@ /*#define DEBUG */ - - - - - - - - - +#include #define IMPLEMENT_DTMF 1 #define IMPLEMENT_LINE_INTERCONNECT2 1 @@ -82,8 +74,6 @@ #define CODEC_PERMANENT 0x02 #define ADV_VOICE 0x03 #define MAX_CIP_TYPES 5 /* kind of CIP types for group optimization */ -#define C_IND_MASK_DWORDS ((MAX_APPL + 32) >> 5) - #define FAX_CONNECT_INFO_BUFFER_SIZE 256 #define NCPI_BUFFER_SIZE 256 @@ -265,8 +255,8 @@ struct _PLCI { word ncci_ring_list; byte inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI]; t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS]; - dword c_ind_mask_table[C_IND_MASK_DWORDS]; - dword group_optimization_mask_table[C_IND_MASK_DWORDS]; + DECLARE_BITMAP(c_ind_mask_table, MAX_APPL); + DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL); byte RBuffer[200]; dword msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)]; API_SAVE saved_msg; diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 3b11422b1cce..eadd1ed1e014 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -23,9 +23,7 @@ * */ - - - +#include #include "platform.h" #include "di_defs.h" @@ -35,19 +33,9 @@ #include "mdm_msg.h" #include "divasync.h" - - #define FILE_ "MESSAGE.C" #define dprintf - - - - - - - - /*------------------------------------------------------------------*/ /* This is options supported for all adapters that are server by */ /* XDI driver. Allo it is not necessary to ask it from every adapter*/ @@ -72,9 +60,6 @@ static dword diva_xdi_extended_features = 0; /*------------------------------------------------------------------*/ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci); -static void set_group_ind_mask(PLCI *plci); -static void clear_group_ind_mask_bit(PLCI *plci, word b); -static byte test_group_ind_mask_bit(PLCI *plci, word b); void AutomaticLaw(DIVA_CAPI_ADAPTER *); word CapiRelease(word); word CapiRegister(word); @@ -1086,106 +1071,6 @@ static void plci_remove(PLCI *plci) plci->State = OUTG_DIS_PENDING; } -/*------------------------------------------------------------------*/ -/* Application Group function helpers */ -/*------------------------------------------------------------------*/ - -static void set_group_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->group_optimization_mask_table[i] = 0xffffffffL; -} - -static void clear_group_ind_mask_bit(PLCI *plci, word b) -{ - plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_group_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -/*------------------------------------------------------------------*/ -/* c_ind_mask operations for arbitrary MAX_APPL */ -/*------------------------------------------------------------------*/ - -static void clear_c_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->c_ind_mask_table[i] = 0; -} - -static byte c_ind_mask_empty(PLCI *plci) -{ - word i; - - i = 0; - while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0)) - i++; - return (i == C_IND_MASK_DWORDS); -} - -static void set_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f)); -} - -static void clear_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_c_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -static void dump_c_ind_mask(PLCI *plci) -{ - word i, j, k; - dword d; - char *p; - char buf[40]; - - for (i = 0; i < C_IND_MASK_DWORDS; i += 4) - { - p = buf + 36; - *p = '\0'; - for (j = 0; j < 4; j++) - { - if (i + j < C_IND_MASK_DWORDS) - { - d = plci->c_ind_mask_table[i + j]; - for (k = 0; k < 8; k++) - { - *(--p) = hex_asc_lo(d); - d >>= 4; - } - } - else if (i != 0) - { - for (k = 0; k < 8; k++) - *(--p) = ' '; - } - *(--p) = ' '; - } - dbug(1, dprintf("c_ind_mask =%s", (char *) p)); - } -} - - - - - -#define dump_plcis(a) - - - /*------------------------------------------------------------------*/ /* translation function for each message */ /*------------------------------------------------------------------*/ @@ -1457,13 +1342,13 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, return 1; } else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); Reject = GET_WORD(parms[0].info); dbug(1, dprintf("Reject=0x%x", Reject)); if (Reject) { - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if ((Reject & 0xff00) == 0x3400) { @@ -1553,11 +1438,8 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, sig_req(plci, CALL_RES, 0); } - for (i = 0; i < max_appl; i++) { - if (test_c_ind_mask_bit(plci, i)) { - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } return 1; @@ -1584,13 +1466,10 @@ static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); plci->appl = appl; - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); plci->State = OUTG_DIS_PENDING; } if (plci->Sig.Id && plci->appl) @@ -1634,7 +1513,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { /* clear ind mask bit, just in case of collsion of */ /* DISCONNECT_IND and CONNECT_RES */ - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); ncci_free_receive_buffers(plci, 0); if (plci_remove_check(plci)) { @@ -1642,7 +1521,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } if (plci->State == INC_DIS_PENDING || plci->State == SUSPENDING) { - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->State != SUSPENDING) plci->State = IDLE; dbug(1, dprintf("chs=%d", plci->channels)); if (!plci->channels) { @@ -3351,13 +3230,11 @@ static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } plci->State = INC_CON_CONNECTED_ALERT; plci->appl = appl; - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); - for (i = 0; i < max_appl; i++) /* disconnect the other appls */ - { /* its quasi a connect */ - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); + /* disconnect the other appls its quasi a connect */ + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } api_save_msg(msg, "s", &plci->saved_msg); @@ -5692,19 +5569,17 @@ static void sig_ind(PLCI *plci) cip = find_cip(a, parms[4], parms[6]); cip_mask = 1L << cip; dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask)); - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (!remove_started && !a->adapter_disabled) { - set_c_ind_mask_bit(plci, MAX_APPL); group_optimization(a, plci); - for (i = 0; i < max_appl; i++) { + for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) { if (application[i].Id && (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask) - && CPN_filter_ok(parms[0], a, i) - && test_group_ind_mask_bit(plci, i)) { + && CPN_filter_ok(parms[0], a, i)) { dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i])); - set_c_ind_mask_bit(plci, i); - dump_c_ind_mask(plci); + __set_bit(i, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); plci->State = INC_CON_PENDING; plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) | CALL_DIR_IN | CALL_DIR_ANSWER; @@ -5750,10 +5625,9 @@ static void sig_ind(PLCI *plci) SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true)); } } - clear_c_ind_mask_bit(plci, MAX_APPL); - dump_c_ind_mask(plci); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); } - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); plci->State = IDLE; @@ -5994,13 +5868,13 @@ static void sig_ind(PLCI *plci) break; case RESUME: - clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1)); + __clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table); PUT_WORD(&resume_cau[4], GOOD); sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau); break; case SUSPEND: - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove(plci); @@ -6037,15 +5911,12 @@ static void sig_ind(PLCI *plci) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); } else { - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); } if (!plci->appl) { @@ -6055,7 +5926,7 @@ static void sig_ind(PLCI *plci) a->listen_active--; } plci->State = INC_DIS_PENDING; - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { plci->State = IDLE; if (plci->NL.Id && !plci->nl_remove_id) @@ -6341,14 +6212,10 @@ static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent) || Info_Number == DSP || Info_Number == UUI) { - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - dbug(1, dprintf("Ovl_Ind")); - iesent = true; - sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + dbug(1, dprintf("Ovl_Ind")); + iesent = true; + sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } } /* all other signalling states */ @@ -6416,14 +6283,10 @@ static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, } else if (!plci->appl && Info_Number) { /* overlap receiving broadcast */ - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - iesent = true; - dbug(1, dprintf("Mlt_Ovl_Ind")); - sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + iesent = true; + dbug(1, dprintf("Mlt_Ovl_Ind")); + sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } /* all other signalling states */ else if (Info_Number @@ -7270,7 +7133,6 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) word i, j; PLCI *plci; - dump_plcis(a); for (i = 0; i < a->max_plci && a->plci[i].Id; i++); if (i == a->max_plci) { dbug(1, dprintf("get_plci: out of PLCIs")); @@ -7321,8 +7183,8 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) plci->ncci_ring_list = 0; for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0; - clear_c_ind_mask(plci); - set_group_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); plci->fax_connect_info_length = 0; plci->nsf_control_bits = 0; plci->ncpi_state = 0x00; @@ -9373,10 +9235,10 @@ word CapiRelease(word Id) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); @@ -9384,10 +9246,10 @@ word CapiRelease(word Id) } } } - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (!plci->appl) { @@ -9452,7 +9314,7 @@ word CapiRelease(word Id) static word plci_remove_check(PLCI *plci) { if (!plci) return true; - if (!plci->NL.Id && c_ind_mask_empty(plci)) + if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->Sig.Id == 0xff) plci->Sig.Id = 0; @@ -14735,7 +14597,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) word appl_number_group_type[MAX_APPL]; PLCI *auxplci; - set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */ + /* all APPLs within this inc. call are allowed to dial in */ + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); if (!a->group_optimization_enabled) { @@ -14771,13 +14634,12 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (a->plci[k].Id) { auxplci = &a->plci[k]; - if (auxplci->appl == &application[i]) /* application has a busy PLCI */ - { + if (auxplci->appl == &application[i]) { + /* application has a busy PLCI */ busy = true; dbug(1, dprintf("Appl 0x%x is busy", i + 1)); - } - else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */ - { + } else if (test_bit(i, plci->c_ind_mask_table)) { + /* application has an incoming call pending */ busy = true; dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1)); } @@ -14826,7 +14688,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (appl_number_group_type[i] == appl_number_group_type[j]) { dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j])); - clear_group_ind_mask_bit(plci, j); /* disable call on other group members */ + /* disable call on other group members */ + __clear_bit(j, plci->group_optimization_mask_table); appl_number_group_type[j] = 0; /* remove disabled group member from group list */ } } @@ -14834,7 +14697,7 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) } else /* application should not get a call */ { - clear_group_ind_mask_bit(plci, i); + __clear_bit(i, plci->group_optimization_mask_table); } } diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h index 4157311d569d..5f8f1d9cac11 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.h +++ b/drivers/isdn/hardware/mISDN/hfcsusb.h @@ -337,7 +337,7 @@ static const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = { }; /* supported devices */ -static struct usb_device_id hfcsusb_idtab[] = { +static const struct usb_device_id hfcsusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((struct hfcsusb_vdata) diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index ef4748083efd..e8212185d386 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -65,7 +65,7 @@ typedef struct { } hfcsusb_vdata; /* VID/PID device list */ -static struct usb_device_id hfcusb_idtab[] = { +static const struct usb_device_id hfcusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((hfcsusb_vdata) diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 6c44609fd83a..cd2b3c69771a 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) isdn_net_local *lp; struct ippp_struct *is; int proto; - unsigned char protobuf[4]; is = file->private_data; @@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { - /* - * Don't reset huptimer for - * LCP packets. (Echo requests). - */ - if (copy_from_user(protobuf, buf, 4)) - return -EFAULT; - proto = PPP_PROTOCOL(protobuf); - if (proto != PPP_LCP) - lp->huptimer = 0; + if (lp->isdn_device < 0 || lp->isdn_channel < 0) { + unsigned char protobuf[4]; + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + if (copy_from_user(protobuf, buf, 4)) + return -EFAULT; + + proto = PPP_PROTOCOL(protobuf); + if (proto != PPP_LCP) + lp->huptimer = 0; - if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; + } if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; + unsigned char *cpy_buf; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved @@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) return count; } skb_reserve(skb, hl); - if (copy_from_user(skb_put(skb, count), buf, count)) + cpy_buf = skb_put(skb, count); + if (copy_from_user(cpy_buf, buf, count)) { kfree_skb(skb); return -EFAULT; } + + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + proto = PPP_PROTOCOL(cpy_buf); + if (proto != PPP_LCP) + lp->huptimer = 0; + if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 6ffd13466b8c..e97232646ba1 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -409,7 +409,7 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card *card) return -EINVAL; } if (len) { - if (!(card->flags & (channel) ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE)) + if (!(card->flags & (channel ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE))) return 0; if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE) return 0; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 594b24d410c3..52ea34e337cd 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -58,6 +58,15 @@ config LEDS_AAT1290 help This option enables support for the LEDs on the AAT1290. +config LEDS_AS3645A + tristate "AS3645A LED flash controller support" + depends on I2C && LEDS_CLASS_FLASH + depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS + help + Enable LED flash class support for AS3645A LED flash + controller. V4L2 flash API is provided as well if + CONFIG_V4L2_FLASH_API is enabled. + config LEDS_BCM6328 tristate "LED Support for Broadcom BCM6328" depends on LEDS_CLASS @@ -377,6 +386,17 @@ config LEDS_PCA955X LED driver chips accessed via the I2C bus. Supported devices include PCA9550, PCA9551, PCA9552, and PCA9553. +config LEDS_PCA955X_GPIO + bool "Enable GPIO support for PCA955X" + depends on LEDS_PCA955X + depends on GPIOLIB + help + Allow unused pins on PCA955X to be used as gpio. + + To use a pin as gpio the pin type should be set to + PCA955X_TYPE_GPIO in the device tree. + + config LEDS_PCA963X tristate "LED support for PCA963x I2C chip" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 909dae62ba05..7d7b26552923 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o # LED Platform Drivers obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o +obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c index a21e19297745..43bd8a43f36c 100644 --- a/drivers/leds/leds-aat1290.c +++ b/drivers/leds/leds-aat1290.c @@ -314,8 +314,10 @@ static void aat1290_led_validate_mm_current(struct aat1290_led *led, static int init_mm_current_scale(struct aat1290_led *led, struct aat1290_led_config_data *cfg) { - int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, - 63, 71, 79, 89, 100 }; + static const int max_mm_current_percent[] = { + 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, + 63, 71, 79, 89, 100 + }; int i, max_mm_current = AAT1290_MAX_MM_CURRENT(cfg->max_flash_current); @@ -432,7 +434,7 @@ static void aat1290_init_v4l2_flash_config(struct aat1290_led *led, strlcpy(v4l2_sd_cfg->dev_name, led_cdev->name, sizeof(v4l2_sd_cfg->dev_name)); - s = &v4l2_sd_cfg->torch_intensity; + s = &v4l2_sd_cfg->intensity; s->min = led->mm_current_scale[0]; s->max = led_cfg->max_mm_current; s->step = 1; @@ -504,7 +506,7 @@ static int aat1290_led_probe(struct platform_device *pdev) /* Create V4L2 Flash subdev. */ led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node), - fled_cdev, NULL, &v4l2_flash_ops, + fled_cdev, &v4l2_flash_ops, &v4l2_sd_cfg); if (IS_ERR(led->v4l2_flash)) { ret = PTR_ERR(led->v4l2_flash); diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c new file mode 100644 index 000000000000..9a257f969300 --- /dev/null +++ b/drivers/leds/leds-as3645a.c @@ -0,0 +1,786 @@ +/* + * drivers/leds/leds-as3645a.c - AS3645A and LM3555 flash controllers driver + * + * Copyright (C) 2008-2011 Nokia Corporation + * Copyright (c) 2011, 2017 Intel Corporation. + * + * Based on drivers/media/i2c/as3645a.c. + * + * Contact: Sakari Ailus + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define AS_TIMER_US_TO_CODE(t) (((t) / 1000 - 100) / 50) +#define AS_TIMER_CODE_TO_US(c) ((50 * (c) + 100) * 1000) + +/* Register definitions */ + +/* Read-only Design info register: Reset state: xxxx 0001 */ +#define AS_DESIGN_INFO_REG 0x00 +#define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4)) +#define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f) + +/* Read-only Version control register: Reset state: 0000 0000 + * for first engineering samples + */ +#define AS_VERSION_CONTROL_REG 0x01 +#define AS_VERSION_CONTROL_RFU(x) (((x) >> 4)) +#define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f) + +/* Read / Write (Indicator and timer register): Reset state: 0000 1111 */ +#define AS_INDICATOR_AND_TIMER_REG 0x02 +#define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0 +#define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4 +#define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6 + +/* Read / Write (Current set register): Reset state: 0110 1001 */ +#define AS_CURRENT_SET_REG 0x03 +#define AS_CURRENT_ASSIST_LIGHT_SHIFT 0 +#define AS_CURRENT_LED_DET_ON (1 << 3) +#define AS_CURRENT_FLASH_CURRENT_SHIFT 4 + +/* Read / Write (Control register): Reset state: 1011 0100 */ +#define AS_CONTROL_REG 0x04 +#define AS_CONTROL_MODE_SETTING_SHIFT 0 +#define AS_CONTROL_STROBE_ON (1 << 2) +#define AS_CONTROL_OUT_ON (1 << 3) +#define AS_CONTROL_EXT_TORCH_ON (1 << 4) +#define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5) +#define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5) +#define AS_CONTROL_COIL_PEAK_SHIFT 6 + +/* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */ +#define AS_FAULT_INFO_REG 0x05 +#define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1) +#define AS_FAULT_INFO_INDICATOR_LED (1 << 2) +#define AS_FAULT_INFO_LED_AMOUNT (1 << 3) +#define AS_FAULT_INFO_TIMEOUT (1 << 4) +#define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5) +#define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6) +#define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7) + +/* Boost register */ +#define AS_BOOST_REG 0x0d +#define AS_BOOST_CURRENT_DISABLE (0 << 0) +#define AS_BOOST_CURRENT_ENABLE (1 << 0) + +/* Password register is used to unlock boost register writing */ +#define AS_PASSWORD_REG 0x0f +#define AS_PASSWORD_UNLOCK_VALUE 0x55 + +#define AS_NAME "as3645a" +#define AS_I2C_ADDR (0x60 >> 1) /* W:0x60, R:0x61 */ + +#define AS_FLASH_TIMEOUT_MIN 100000 /* us */ +#define AS_FLASH_TIMEOUT_MAX 850000 +#define AS_FLASH_TIMEOUT_STEP 50000 + +#define AS_FLASH_INTENSITY_MIN 200000 /* uA */ +#define AS_FLASH_INTENSITY_MAX_1LED 500000 +#define AS_FLASH_INTENSITY_MAX_2LEDS 400000 +#define AS_FLASH_INTENSITY_STEP 20000 + +#define AS_TORCH_INTENSITY_MIN 20000 /* uA */ +#define AS_TORCH_INTENSITY_MAX 160000 +#define AS_TORCH_INTENSITY_STEP 20000 + +#define AS_INDICATOR_INTENSITY_MIN 0 /* uA */ +#define AS_INDICATOR_INTENSITY_MAX 10000 +#define AS_INDICATOR_INTENSITY_STEP 2500 + +#define AS_PEAK_mA_MAX 2000 +#define AS_PEAK_mA_TO_REG(a) \ + ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) + +/* LED numbers for Devicetree */ +#define AS_LED_FLASH 0 +#define AS_LED_INDICATOR 1 + +enum as_mode { + AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, + AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, + AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT, + AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT, +}; + +struct as3645a_config { + u32 flash_timeout_us; + u32 flash_max_ua; + u32 assist_max_ua; + u32 indicator_max_ua; + u32 voltage_reference; + u32 peak; +}; + +struct as3645a_names { + char flash[32]; + char indicator[32]; +}; + +struct as3645a { + struct i2c_client *client; + + struct mutex mutex; + + struct led_classdev_flash fled; + struct led_classdev iled_cdev; + + struct v4l2_flash *vf; + struct v4l2_flash *vfind; + + struct device_node *flash_node; + struct device_node *indicator_node; + + struct as3645a_config cfg; + + enum as_mode mode; + unsigned int timeout; + unsigned int flash_current; + unsigned int assist_current; + unsigned int indicator_current; + enum v4l2_flash_strobe_source strobe_source; +}; + +#define fled_to_as3645a(__fled) container_of(__fled, struct as3645a, fled) +#define iled_cdev_to_as3645a(__iled_cdev) \ + container_of(__iled_cdev, struct as3645a, iled_cdev) + +/* Return negative errno else zero on success */ +static int as3645a_write(struct as3645a *flash, u8 addr, u8 val) +{ + struct i2c_client *client = flash->client; + int rval; + + rval = i2c_smbus_write_byte_data(client, addr, val); + + dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val, + rval < 0 ? "fail" : "ok"); + + return rval; +} + +/* Return negative errno else a data byte received from the device. */ +static int as3645a_read(struct as3645a *flash, u8 addr) +{ + struct i2c_client *client = flash->client; + int rval; + + rval = i2c_smbus_read_byte_data(client, addr); + + dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval, + rval < 0 ? "fail" : "ok"); + + return rval; +} + +/* ----------------------------------------------------------------------------- + * Hardware configuration and trigger + */ + +/** + * as3645a_set_config - Set flash configuration registers + * @flash: The flash + * + * Configure the hardware with flash, assist and indicator currents, as well as + * flash timeout. + * + * Return 0 on success, or a negative error code if an I2C communication error + * occurred. + */ +static int as3645a_set_current(struct as3645a *flash) +{ + u8 val; + + val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT) + | (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT) + | AS_CURRENT_LED_DET_ON; + + return as3645a_write(flash, AS_CURRENT_SET_REG, val); +} + +static int as3645a_set_timeout(struct as3645a *flash) +{ + u8 val; + + val = flash->timeout << AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT; + + val |= (flash->cfg.voltage_reference + << AS_INDICATOR_AND_TIMER_VREF_SHIFT) + | ((flash->indicator_current ? flash->indicator_current - 1 : 0) + << AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT); + + return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val); +} + +/** + * as3645a_set_control - Set flash control register + * @flash: The flash + * @mode: Desired output mode + * @on: Desired output state + * + * Configure the hardware with output mode and state. + * + * Return 0 on success, or a negative error code if an I2C communication error + * occurred. + */ +static int +as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on) +{ + u8 reg; + + /* Configure output parameters and operation mode. */ + reg = (flash->cfg.peak << AS_CONTROL_COIL_PEAK_SHIFT) + | (on ? AS_CONTROL_OUT_ON : 0) + | mode; + + if (mode == AS_MODE_FLASH && + flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) + reg |= AS_CONTROL_STROBE_TYPE_LEVEL + | AS_CONTROL_STROBE_ON; + + return as3645a_write(flash, AS_CONTROL_REG, reg); +} + +static int as3645a_get_fault(struct led_classdev_flash *fled, u32 *fault) +{ + struct as3645a *flash = fled_to_as3645a(fled); + int rval; + + /* NOTE: reading register clears fault status */ + rval = as3645a_read(flash, AS_FAULT_INFO_REG); + if (rval < 0) + return rval; + + if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT) + *fault |= LED_FAULT_OVER_CURRENT; + + if (rval & AS_FAULT_INFO_INDICATOR_LED) + *fault |= LED_FAULT_INDICATOR; + + dev_dbg(&flash->client->dev, "%u connected LEDs\n", + rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1); + + if (rval & AS_FAULT_INFO_TIMEOUT) + *fault |= LED_FAULT_TIMEOUT; + + if (rval & AS_FAULT_INFO_OVER_TEMPERATURE) + *fault |= LED_FAULT_OVER_TEMPERATURE; + + if (rval & AS_FAULT_INFO_SHORT_CIRCUIT) + *fault |= LED_FAULT_OVER_CURRENT; + + if (rval & AS_FAULT_INFO_OVER_VOLTAGE) + *fault |= LED_FAULT_INPUT_VOLTAGE; + + return rval; +} + +static unsigned int __as3645a_current_to_reg(unsigned int min, unsigned int max, + unsigned int step, + unsigned int val) +{ + if (val < min) + val = min; + + if (val > max) + val = max; + + return (val - min) / step; +} + +static unsigned int as3645a_current_to_reg(struct as3645a *flash, bool is_flash, + unsigned int ua) +{ + if (is_flash) + return __as3645a_current_to_reg(AS_TORCH_INTENSITY_MIN, + flash->cfg.assist_max_ua, + AS_TORCH_INTENSITY_STEP, ua); + else + return __as3645a_current_to_reg(AS_FLASH_INTENSITY_MIN, + flash->cfg.flash_max_ua, + AS_FLASH_INTENSITY_STEP, ua); +} + +static int as3645a_set_indicator_brightness(struct led_classdev *iled_cdev, + enum led_brightness brightness) +{ + struct as3645a *flash = iled_cdev_to_as3645a(iled_cdev); + int rval; + + flash->indicator_current = brightness; + + rval = as3645a_set_timeout(flash); + if (rval) + return rval; + + return as3645a_set_control(flash, AS_MODE_INDICATOR, brightness); +} + +static int as3645a_set_assist_brightness(struct led_classdev *fled_cdev, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled = lcdev_to_flcdev(fled_cdev); + struct as3645a *flash = fled_to_as3645a(fled); + int rval; + + if (brightness) { + /* Register value 0 is 20 mA. */ + flash->assist_current = brightness - 1; + + rval = as3645a_set_current(flash); + if (rval) + return rval; + } + + return as3645a_set_control(flash, AS_MODE_ASSIST, brightness); +} + +static int as3645a_set_flash_brightness(struct led_classdev_flash *fled, + u32 brightness_ua) +{ + struct as3645a *flash = fled_to_as3645a(fled); + + flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua); + + return as3645a_set_current(flash); +} + +static int as3645a_set_flash_timeout(struct led_classdev_flash *fled, + u32 timeout_us) +{ + struct as3645a *flash = fled_to_as3645a(fled); + + flash->timeout = AS_TIMER_US_TO_CODE(timeout_us); + + return as3645a_set_timeout(flash); +} + +static int as3645a_set_strobe(struct led_classdev_flash *fled, bool state) +{ + struct as3645a *flash = fled_to_as3645a(fled); + + return as3645a_set_control(flash, AS_MODE_FLASH, state); +} + +static const struct led_flash_ops as3645a_led_flash_ops = { + .flash_brightness_set = as3645a_set_flash_brightness, + .timeout_set = as3645a_set_flash_timeout, + .strobe_set = as3645a_set_strobe, + .fault_get = as3645a_get_fault, +}; + +static int as3645a_setup(struct as3645a *flash) +{ + struct device *dev = &flash->client->dev; + u32 fault = 0; + int rval; + + /* clear errors */ + rval = as3645a_read(flash, AS_FAULT_INFO_REG); + if (rval < 0) + return rval; + + dev_dbg(dev, "Fault info: %02x\n", rval); + + rval = as3645a_set_current(flash); + if (rval < 0) + return rval; + + rval = as3645a_set_timeout(flash); + if (rval < 0) + return rval; + + rval = as3645a_set_control(flash, AS_MODE_INDICATOR, false); + if (rval < 0) + return rval; + + /* read status */ + rval = as3645a_get_fault(&flash->fled, &fault); + if (rval < 0) + return rval; + + dev_dbg(dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n", + as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG)); + dev_dbg(dev, "AS_CURRENT_SET_REG: %02x\n", + as3645a_read(flash, AS_CURRENT_SET_REG)); + dev_dbg(dev, "AS_CONTROL_REG: %02x\n", + as3645a_read(flash, AS_CONTROL_REG)); + + return rval & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0; +} + +static int as3645a_detect(struct as3645a *flash) +{ + struct device *dev = &flash->client->dev; + int rval, man, model, rfu, version; + const char *vendor; + + rval = as3645a_read(flash, AS_DESIGN_INFO_REG); + if (rval < 0) { + dev_err(dev, "can't read design info reg\n"); + return rval; + } + + man = AS_DESIGN_INFO_FACTORY(rval); + model = AS_DESIGN_INFO_MODEL(rval); + + rval = as3645a_read(flash, AS_VERSION_CONTROL_REG); + if (rval < 0) { + dev_err(dev, "can't read version control reg\n"); + return rval; + } + + rfu = AS_VERSION_CONTROL_RFU(rval); + version = AS_VERSION_CONTROL_VERSION(rval); + + /* Verify the chip model and version. */ + if (model != 0x01 || rfu != 0x00) { + dev_err(dev, "AS3645A not detected " + "(model %d rfu %d)\n", model, rfu); + return -ENODEV; + } + + switch (man) { + case 1: + vendor = "AMS, Austria Micro Systems"; + break; + case 2: + vendor = "ADI, Analog Devices Inc."; + break; + case 3: + vendor = "NSC, National Semiconductor"; + break; + case 4: + vendor = "NXP"; + break; + case 5: + vendor = "TI, Texas Instrument"; + break; + default: + vendor = "Unknown"; + } + + dev_info(dev, "Chip vendor: %s (%d) Version: %d\n", vendor, + man, version); + + rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE); + if (rval < 0) + return rval; + + return as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE); +} + +static int as3645a_parse_node(struct as3645a *flash, + struct as3645a_names *names, + struct device_node *node) +{ + struct as3645a_config *cfg = &flash->cfg; + struct device_node *child; + const char *name; + int rval; + + for_each_child_of_node(node, child) { + u32 id = 0; + + of_property_read_u32(child, "reg", &id); + + switch (id) { + case AS_LED_FLASH: + flash->flash_node = of_node_get(child); + break; + case AS_LED_INDICATOR: + flash->indicator_node = of_node_get(child); + break; + default: + dev_warn(&flash->client->dev, + "unknown LED %u encountered, ignoring\n", id); + break; + } + } + + if (!flash->flash_node) { + dev_err(&flash->client->dev, "can't find flash node\n"); + return -ENODEV; + } + + rval = of_property_read_string(flash->flash_node, "label", &name); + if (!rval) + strlcpy(names->flash, name, sizeof(names->flash)); + else + snprintf(names->flash, sizeof(names->flash), + "%s:flash", node->name); + + rval = of_property_read_u32(flash->flash_node, "flash-timeout-us", + &cfg->flash_timeout_us); + if (rval < 0) { + dev_err(&flash->client->dev, + "can't read flash-timeout-us property for flash\n"); + goto out_err; + } + + rval = of_property_read_u32(flash->flash_node, "flash-max-microamp", + &cfg->flash_max_ua); + if (rval < 0) { + dev_err(&flash->client->dev, + "can't read flash-max-microamp property for flash\n"); + goto out_err; + } + + rval = of_property_read_u32(flash->flash_node, "led-max-microamp", + &cfg->assist_max_ua); + if (rval < 0) { + dev_err(&flash->client->dev, + "can't read led-max-microamp property for flash\n"); + goto out_err; + } + + of_property_read_u32(flash->flash_node, "voltage-reference", + &cfg->voltage_reference); + + of_property_read_u32(flash->flash_node, "ams,input-max-microamp", + &cfg->peak); + cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); + + if (!flash->indicator_node) { + dev_warn(&flash->client->dev, + "can't find indicator node\n"); + goto out_err; + } + + rval = of_property_read_string(flash->indicator_node, "label", &name); + if (!rval) + strlcpy(names->indicator, name, sizeof(names->indicator)); + else + snprintf(names->indicator, sizeof(names->indicator), + "%s:indicator", node->name); + + rval = of_property_read_u32(flash->indicator_node, "led-max-microamp", + &cfg->indicator_max_ua); + if (rval < 0) { + dev_err(&flash->client->dev, + "can't read led-max-microamp property for indicator\n"); + goto out_err; + } + + return 0; + +out_err: + of_node_put(flash->flash_node); + of_node_put(flash->indicator_node); + + return rval; +} + +static int as3645a_led_class_setup(struct as3645a *flash, + struct as3645a_names *names) +{ + struct led_classdev *fled_cdev = &flash->fled.led_cdev; + struct led_classdev *iled_cdev = &flash->iled_cdev; + struct led_flash_setting *cfg; + int rval; + + iled_cdev->name = names->indicator; + iled_cdev->brightness_set_blocking = as3645a_set_indicator_brightness; + iled_cdev->max_brightness = + flash->cfg.indicator_max_ua / AS_INDICATOR_INTENSITY_STEP; + iled_cdev->flags = LED_CORE_SUSPENDRESUME; + + rval = led_classdev_register(&flash->client->dev, iled_cdev); + if (rval < 0) + return rval; + + cfg = &flash->fled.brightness; + cfg->min = AS_FLASH_INTENSITY_MIN; + cfg->max = flash->cfg.flash_max_ua; + cfg->step = AS_FLASH_INTENSITY_STEP; + cfg->val = flash->cfg.flash_max_ua; + + cfg = &flash->fled.timeout; + cfg->min = AS_FLASH_TIMEOUT_MIN; + cfg->max = flash->cfg.flash_timeout_us; + cfg->step = AS_FLASH_TIMEOUT_STEP; + cfg->val = flash->cfg.flash_timeout_us; + + flash->fled.ops = &as3645a_led_flash_ops; + + fled_cdev->name = names->flash; + fled_cdev->brightness_set_blocking = as3645a_set_assist_brightness; + /* Value 0 is off in LED class. */ + fled_cdev->max_brightness = + as3645a_current_to_reg(flash, false, + flash->cfg.assist_max_ua) + 1; + fled_cdev->flags = LED_DEV_CAP_FLASH | LED_CORE_SUSPENDRESUME; + + rval = led_classdev_flash_register(&flash->client->dev, &flash->fled); + if (rval) { + led_classdev_unregister(iled_cdev); + dev_err(&flash->client->dev, + "led_classdev_flash_register() failed, error %d\n", + rval); + } + + return rval; +} + +static int as3645a_v4l2_setup(struct as3645a *flash) +{ + struct led_classdev_flash *fled = &flash->fled; + struct led_classdev *led = &fled->led_cdev; + struct v4l2_flash_config cfg = { + .intensity = { + .min = AS_TORCH_INTENSITY_MIN, + .max = flash->cfg.assist_max_ua, + .step = AS_TORCH_INTENSITY_STEP, + .val = flash->cfg.assist_max_ua, + }, + }; + struct v4l2_flash_config cfgind = { + .intensity = { + .min = AS_INDICATOR_INTENSITY_MIN, + .max = flash->cfg.indicator_max_ua, + .step = AS_INDICATOR_INTENSITY_STEP, + .val = flash->cfg.indicator_max_ua, + }, + }; + + strlcpy(cfg.dev_name, led->name, sizeof(cfg.dev_name)); + strlcpy(cfgind.dev_name, flash->iled_cdev.name, sizeof(cfg.dev_name)); + + flash->vf = v4l2_flash_init( + &flash->client->dev, of_fwnode_handle(flash->flash_node), + &flash->fled, NULL, &cfg); + if (IS_ERR(flash->vf)) + return PTR_ERR(flash->vf); + + flash->vfind = v4l2_flash_indicator_init( + &flash->client->dev, of_fwnode_handle(flash->indicator_node), + &flash->iled_cdev, &cfgind); + if (IS_ERR(flash->vfind)) { + v4l2_flash_release(flash->vf); + return PTR_ERR(flash->vfind); + } + + return 0; +} + +static int as3645a_probe(struct i2c_client *client) +{ + struct as3645a_names names; + struct as3645a *flash; + int rval; + + if (client->dev.of_node == NULL) + return -ENODEV; + + flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); + if (flash == NULL) + return -ENOMEM; + + flash->client = client; + + rval = as3645a_parse_node(flash, &names, client->dev.of_node); + if (rval < 0) + return rval; + + rval = as3645a_detect(flash); + if (rval < 0) + goto out_put_nodes; + + mutex_init(&flash->mutex); + i2c_set_clientdata(client, flash); + + rval = as3645a_setup(flash); + if (rval) + goto out_mutex_destroy; + + rval = as3645a_led_class_setup(flash, &names); + if (rval) + goto out_mutex_destroy; + + rval = as3645a_v4l2_setup(flash); + if (rval) + goto out_led_classdev_flash_unregister; + + return 0; + +out_led_classdev_flash_unregister: + led_classdev_flash_unregister(&flash->fled); + +out_mutex_destroy: + mutex_destroy(&flash->mutex); + +out_put_nodes: + of_node_put(flash->flash_node); + of_node_put(flash->indicator_node); + + return rval; +} + +static int as3645a_remove(struct i2c_client *client) +{ + struct as3645a *flash = i2c_get_clientdata(client); + + as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); + + v4l2_flash_release(flash->vf); + v4l2_flash_release(flash->vfind); + + led_classdev_flash_unregister(&flash->fled); + led_classdev_unregister(&flash->iled_cdev); + + mutex_destroy(&flash->mutex); + + of_node_put(flash->flash_node); + of_node_put(flash->indicator_node); + + return 0; +} + +static const struct i2c_device_id as3645a_id_table[] = { + { AS_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, as3645a_id_table); + +static const struct of_device_id as3645a_of_table[] = { + { .compatible = "ams,as3645a" }, + { }, +}; +MODULE_DEVICE_TABLE(of, as3645a_of_table); + +static struct i2c_driver as3645a_i2c_driver = { + .driver = { + .of_match_table = as3645a_of_table, + .name = AS_NAME, + }, + .probe_new = as3645a_probe, + .remove = as3645a_remove, + .id_table = as3645a_id_table, +}; + +module_i2c_driver(as3645a_i2c_driver); + +MODULE_AUTHOR("Laurent Pinchart "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c index 617fe975bf6e..d03ed6b4176b 100644 --- a/drivers/leds/leds-blinkm.c +++ b/drivers/leds/leds-blinkm.c @@ -298,7 +298,7 @@ static struct attribute *blinkm_attrs[] = { NULL, }; -static struct attribute_group blinkm_group = { +static const struct attribute_group blinkm_group = { .name = "blinkm", .attrs = blinkm_attrs, }; diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 0f9ed1ea0e89..492789f56896 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = { +static const struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = { { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410J", diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index e753ba93ba1e..764c31301f90 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -134,6 +134,8 @@ static int create_gpio_led(const struct gpio_led *template, led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; if (template->panic_indicator) led_dat->cdev.flags |= LED_PANIC_INDICATOR; + if (template->retain_state_shutdown) + led_dat->cdev.flags |= LED_RETAIN_AT_SHUTDOWN; ret = gpiod_direction_output(led_dat->gpiod, state); if (ret < 0) @@ -205,6 +207,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) if (fwnode_property_present(child, "retain-state-suspended")) led.retain_state_suspended = 1; + if (fwnode_property_present(child, "retain-state-shutdown")) + led.retain_state_shutdown = 1; if (fwnode_property_present(child, "panic-indicator")) led.panic_indicator = 1; @@ -267,7 +271,8 @@ static void gpio_led_shutdown(struct platform_device *pdev) for (i = 0; i < priv->num_leds; i++) { struct gpio_led_data *led = &priv->leds[i]; - gpio_led_set(&led->cdev, LED_OFF); + if (!(led->cdev.flags & LED_RETAIN_AT_SHUTDOWN)) + gpio_led_set(&led->cdev, LED_OFF); } } diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c index 478844c5cead..31a9d749c8be 100644 --- a/drivers/leds/leds-is31fl32xx.c +++ b/drivers/leds/leds-is31fl32xx.c @@ -348,8 +348,8 @@ static int is31fl32xx_parse_child_dt(const struct device *dev, ret = of_property_read_u32(child, "reg", ®); if (ret || reg < 1 || reg > led_data->priv->cdef->channels) { dev_err(dev, - "Child node %s does not have a valid reg property\n", - child->full_name); + "Child node %pOF does not have a valid reg property\n", + child); return -EINVAL; } led_data->channel = reg; diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 5b529dc013d2..72224b599ffc 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -626,7 +626,7 @@ static umode_t lm3533_led_attr_is_visible(struct kobject *kobj, return mode; }; -static struct attribute_group lm3533_led_attribute_group = { +static const struct attribute_group lm3533_led_attribute_group = { .is_visible = lm3533_led_attr_is_visible, .attrs = lm3533_led_attributes }; diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index f53c8cda1bde..55c0517fbe03 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -134,13 +134,13 @@ static void lp5521_set_led_current(struct lp55xx_led *led, u8 led_current) static void lp5521_load_engine(struct lp55xx_chip *chip) { enum lp55xx_engine_index idx = chip->engine_idx; - u8 mask[] = { + static const u8 mask[] = { [LP55XX_ENGINE_1] = LP5521_MODE_R_M, [LP55XX_ENGINE_2] = LP5521_MODE_G_M, [LP55XX_ENGINE_3] = LP5521_MODE_B_M, }; - u8 val[] = { + static const u8 val[] = { [LP55XX_ENGINE_1] = LP5521_LOAD_R, [LP55XX_ENGINE_2] = LP5521_LOAD_G, [LP55XX_ENGINE_3] = LP5521_LOAD_B, @@ -160,7 +160,7 @@ static void lp5521_stop_all_engines(struct lp55xx_chip *chip) static void lp5521_stop_engine(struct lp55xx_chip *chip) { enum lp55xx_engine_index idx = chip->engine_idx; - u8 mask[] = { + static const u8 mask[] = { [LP55XX_ENGINE_1] = LP5521_MODE_R_M, [LP55XX_ENGINE_2] = LP5521_MODE_G_M, [LP55XX_ENGINE_3] = LP5521_MODE_B_M, @@ -226,7 +226,7 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip, { enum lp55xx_engine_index idx = chip->engine_idx; u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; - u8 addr[] = { + static const u8 addr[] = { [LP55XX_ENGINE_1] = LP5521_REG_R_PROG_MEM, [LP55XX_ENGINE_2] = LP5521_REG_G_PROG_MEM, [LP55XX_ENGINE_3] = LP5521_REG_B_PROG_MEM, diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index 90892585bcb5..05ffa34fb6ad 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -116,7 +116,7 @@ static inline void lp5562_wait_enable_done(void) static void lp5562_set_led_current(struct lp55xx_led *led, u8 led_current) { - u8 addr[] = { + static const u8 addr[] = { LP5562_REG_R_CURRENT, LP5562_REG_G_CURRENT, LP5562_REG_B_CURRENT, @@ -130,13 +130,13 @@ static void lp5562_set_led_current(struct lp55xx_led *led, u8 led_current) static void lp5562_load_engine(struct lp55xx_chip *chip) { enum lp55xx_engine_index idx = chip->engine_idx; - u8 mask[] = { + static const u8 mask[] = { [LP55XX_ENGINE_1] = LP5562_MODE_ENG1_M, [LP55XX_ENGINE_2] = LP5562_MODE_ENG2_M, [LP55XX_ENGINE_3] = LP5562_MODE_ENG3_M, }; - u8 val[] = { + static const u8 val[] = { [LP55XX_ENGINE_1] = LP5562_LOAD_ENG1, [LP55XX_ENGINE_2] = LP5562_LOAD_ENG2, [LP55XX_ENGINE_3] = LP5562_LOAD_ENG3, @@ -211,7 +211,7 @@ static int lp5562_update_firmware(struct lp55xx_chip *chip, { enum lp55xx_engine_index idx = chip->engine_idx; u8 pattern[LP5562_PROGRAM_LENGTH] = {0}; - u8 addr[] = { + static const u8 addr[] = { [LP55XX_ENGINE_1] = LP5562_REG_PROG_MEM_ENG1, [LP55XX_ENGINE_2] = LP5562_REG_PROG_MEM_ENG2, [LP55XX_ENGINE_3] = LP5562_REG_PROG_MEM_ENG3, @@ -314,7 +314,7 @@ static int lp5562_post_init_device(struct lp55xx_chip *chip) static int lp5562_led_brightness(struct lp55xx_led *led) { struct lp55xx_chip *chip = led->chip; - u8 addr[] = { + static const u8 addr[] = { LP5562_REG_R_PWM, LP5562_REG_G_PWM, LP5562_REG_B_PWM, diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c index 3f9675bd214a..3adb113cf02e 100644 --- a/drivers/leds/leds-lp8501.c +++ b/drivers/leds/leds-lp8501.c @@ -118,19 +118,19 @@ static int lp8501_post_init_device(struct lp55xx_chip *chip) static void lp8501_load_engine(struct lp55xx_chip *chip) { enum lp55xx_engine_index idx = chip->engine_idx; - u8 mask[] = { + static const u8 mask[] = { [LP55XX_ENGINE_1] = LP8501_MODE_ENG1_M, [LP55XX_ENGINE_2] = LP8501_MODE_ENG2_M, [LP55XX_ENGINE_3] = LP8501_MODE_ENG3_M, }; - u8 val[] = { + static const u8 val[] = { [LP55XX_ENGINE_1] = LP8501_LOAD_ENG1, [LP55XX_ENGINE_2] = LP8501_LOAD_ENG2, [LP55XX_ENGINE_3] = LP8501_LOAD_ENG3, }; - u8 page_sel[] = { + static const u8 page_sel[] = { [LP55XX_ENGINE_1] = LP8501_PAGE_ENG1, [LP55XX_ENGINE_2] = LP8501_PAGE_ENG2, [LP55XX_ENGINE_3] = LP8501_PAGE_ENG3, diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c index 2d3062d53325..adf0f191f794 100644 --- a/drivers/leds/leds-max77693.c +++ b/drivers/leds/leds-max77693.c @@ -856,7 +856,7 @@ static void max77693_init_v4l2_flash_config(struct max77693_sub_led *sub_led, "%s %d-%04x", sub_led->fled_cdev.led_cdev.name, i2c_adapter_id(i2c->adapter), i2c->addr); - s = &v4l2_sd_cfg->torch_intensity; + s = &v4l2_sd_cfg->intensity; s->min = TORCH_IOUT_MIN; s->max = sub_led->fled_cdev.led_cdev.max_brightness * TORCH_IOUT_STEP; s->step = TORCH_IOUT_STEP; @@ -931,7 +931,7 @@ static int max77693_register_led(struct max77693_sub_led *sub_led, /* Register in the V4L2 subsystem. */ sub_led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node), - fled_cdev, NULL, &v4l2_flash_ops, + fled_cdev, &v4l2_flash_ops, &v4l2_sd_cfg); if (IS_ERR(sub_led->v4l2_flash)) { ret = PTR_ERR(sub_led->v4l2_flash); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 9a873118ea5f..905729191d3e 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -41,14 +41,19 @@ */ #include -#include -#include -#include #include -#include +#include #include +#include #include +#include +#include +#include +#include #include +#include + +#include /* LED select registers determine the source that drives LED outputs */ #define PCA955X_LS_LED_ON 0x0 /* Output LOW */ @@ -115,6 +120,9 @@ struct pca955x { struct pca955x_led *leds; struct pca955x_chipdef *chipdef; struct i2c_client *client; +#ifdef CONFIG_LEDS_PCA955X_GPIO + struct gpio_chip gpio; +#endif }; struct pca955x_led { @@ -122,6 +130,13 @@ struct pca955x_led { struct led_classdev led_cdev; int led_num; /* 0 .. 15 potentially */ char name[32]; + u32 type; + const char *default_trigger; +}; + +struct pca955x_platform_data { + struct pca955x_led *leds; + int num_leds; }; /* 8 bits per input register */ @@ -150,13 +165,18 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state) * Write to frequency prescaler register, used to program the * period of the PWM output. period = (PSCx + 1) / 38 */ -static void pca955x_write_psc(struct i2c_client *client, int n, u8 val) +static int pca955x_write_psc(struct i2c_client *client, int n, u8 val) { struct pca955x *pca955x = i2c_get_clientdata(client); + int ret; - i2c_smbus_write_byte_data(client, + ret = i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n, val); + if (ret < 0) + dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", + __func__, n, val, ret); + return ret; } /* @@ -166,38 +186,56 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val) * * Duty cycle is (256 - PWMx) / 256 */ -static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val) +static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val) { struct pca955x *pca955x = i2c_get_clientdata(client); + int ret; - i2c_smbus_write_byte_data(client, + ret = i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n, val); + if (ret < 0) + dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", + __func__, n, val, ret); + return ret; } /* * Write to LED selector register, which determines the source that * drives the LED output. */ -static void pca955x_write_ls(struct i2c_client *client, int n, u8 val) +static int pca955x_write_ls(struct i2c_client *client, int n, u8 val) { struct pca955x *pca955x = i2c_get_clientdata(client); + int ret; - i2c_smbus_write_byte_data(client, + ret = i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n, val); + if (ret < 0) + dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", + __func__, n, val, ret); + return ret; } /* * Read the LED selector register, which determines the source that * drives the LED output. */ -static u8 pca955x_read_ls(struct i2c_client *client, int n) +static int pca955x_read_ls(struct i2c_client *client, int n, u8 *val) { struct pca955x *pca955x = i2c_get_clientdata(client); + int ret; - return (u8) i2c_smbus_read_byte_data(client, + ret = i2c_smbus_read_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n); + if (ret < 0) { + dev_err(&client->dev, "%s: reg 0x%x, err %d\n", + __func__, n, ret); + return ret; + } + *val = (u8)ret; + return 0; } static int pca955x_led_set(struct led_classdev *led_cdev, @@ -208,6 +246,7 @@ static int pca955x_led_set(struct led_classdev *led_cdev, u8 ls; int chip_ls; /* which LSx to use (0-3 potentially) */ int ls_led; /* which set of bits within LSx to use (0-3) */ + int ret; pca955x_led = container_of(led_cdev, struct pca955x_led, led_cdev); pca955x = pca955x_led->pca955x; @@ -217,7 +256,9 @@ static int pca955x_led_set(struct led_classdev *led_cdev, mutex_lock(&pca955x->lock); - ls = pca955x_read_ls(pca955x->client, chip_ls); + ret = pca955x_read_ls(pca955x->client, chip_ls, &ls); + if (ret) + goto out; switch (value) { case LED_FULL: @@ -237,19 +278,160 @@ static int pca955x_led_set(struct led_classdev *led_cdev, * OFF, HALF, or FULL. But, this is probably better than * just turning off for all other values. */ - pca955x_write_pwm(pca955x->client, 1, - 255 - value); + ret = pca955x_write_pwm(pca955x->client, 1, 255 - value); + if (ret) + goto out; ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); break; } - pca955x_write_ls(pca955x->client, chip_ls, ls); + ret = pca955x_write_ls(pca955x->client, chip_ls, ls); +out: mutex_unlock(&pca955x->lock); - return 0; + return ret; } +#ifdef CONFIG_LEDS_PCA955X_GPIO +/* + * Read the INPUT register, which contains the state of LEDs. + */ +static int pca955x_read_input(struct i2c_client *client, int n, u8 *val) +{ + int ret = i2c_smbus_read_byte_data(client, n); + + if (ret < 0) { + dev_err(&client->dev, "%s: reg 0x%x, err %d\n", + __func__, n, ret); + return ret; + } + *val = (u8)ret; + return 0; + +} + +static int pca955x_gpio_request_pin(struct gpio_chip *gc, unsigned int offset) +{ + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + + if (led->type == PCA955X_TYPE_GPIO) + return 0; + + return -EBUSY; +} + +static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset, + int val) +{ + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + + if (val) + return pca955x_led_set(&led->led_cdev, LED_FULL); + else + return pca955x_led_set(&led->led_cdev, LED_OFF); +} + +static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset, + int val) +{ + pca955x_set_value(gc, offset, val); +} + +static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset) +{ + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + u8 reg = 0; + + /* There is nothing we can do about errors */ + pca955x_read_input(pca955x->client, led->led_num / 8, ®); + + return !!(reg & (1 << (led->led_num % 8))); +} + +static int pca955x_gpio_direction_input(struct gpio_chip *gc, + unsigned int offset) +{ + /* To use as input ensure pin is not driven */ + return pca955x_set_value(gc, offset, 0); +} + +static int pca955x_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset, int val) +{ + return pca955x_set_value(gc, offset, val); +} +#endif /* CONFIG_LEDS_PCA955X_GPIO */ + +#if IS_ENABLED(CONFIG_OF) +static struct pca955x_platform_data * +pca955x_pdata_of_init(struct i2c_client *client, struct pca955x_chipdef *chip) +{ + struct device_node *np = client->dev.of_node; + struct device_node *child; + struct pca955x_platform_data *pdata; + int count; + + count = of_get_child_count(np); + if (!count || count > chip->bits) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->leds = devm_kzalloc(&client->dev, + sizeof(struct pca955x_led) * chip->bits, + GFP_KERNEL); + if (!pdata->leds) + return ERR_PTR(-ENOMEM); + + for_each_child_of_node(np, child) { + const char *name; + u32 reg; + int res; + + res = of_property_read_u32(child, "reg", ®); + if ((res != 0) || (reg >= chip->bits)) + continue; + + if (of_property_read_string(child, "label", &name)) + name = child->name; + + snprintf(pdata->leds[reg].name, sizeof(pdata->leds[reg].name), + "%s", name); + + pdata->leds[reg].type = PCA955X_TYPE_LED; + of_property_read_u32(child, "type", &pdata->leds[reg].type); + of_property_read_string(child, "linux,default-trigger", + &pdata->leds[reg].default_trigger); + } + + pdata->num_leds = chip->bits; + + return pdata; +} + +static const struct of_device_id of_pca955x_match[] = { + { .compatible = "nxp,pca9550", .data = (void *)pca9550 }, + { .compatible = "nxp,pca9551", .data = (void *)pca9551 }, + { .compatible = "nxp,pca9552", .data = (void *)pca9552 }, + { .compatible = "nxp,pca9553", .data = (void *)pca9553 }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_pca955x_match); +#else +static struct pca955x_platform_data * +pca955x_pdata_of_init(struct i2c_client *client, struct pca955x_chipdef *chip) +{ + return ERR_PTR(-ENODEV); +} +#endif + static int pca955x_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -257,8 +439,9 @@ static int pca955x_probe(struct i2c_client *client, struct pca955x_led *pca955x_led; struct pca955x_chipdef *chip; struct i2c_adapter *adapter; - struct led_platform_data *pdata; int i, err; + struct pca955x_platform_data *pdata; + int ngpios = 0; if (id) { chip = &pca955x_chipdefs[id->driver_data]; @@ -272,6 +455,11 @@ static int pca955x_probe(struct i2c_client *client, } adapter = to_i2c_adapter(client->dev.parent); pdata = dev_get_platdata(&client->dev); + if (!pdata) { + pdata = pca955x_pdata_of_init(client, chip); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } /* Make sure the slave address / chip type combo given is possible */ if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) != @@ -288,13 +476,11 @@ static int pca955x_probe(struct i2c_client *client, if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; - if (pdata) { - if (pdata->num_leds != chip->bits) { - dev_err(&client->dev, "board info claims %d LEDs" - " on a %d-bit chip\n", - pdata->num_leds, chip->bits); - return -ENODEV; - } + if (pdata->num_leds != chip->bits) { + dev_err(&client->dev, + "board info claims %d LEDs on a %d-bit chip\n", + pdata->num_leds, chip->bits); + return -ENODEV; } pca955x = devm_kzalloc(&client->dev, sizeof(*pca955x), GFP_KERNEL); @@ -316,60 +502,92 @@ static int pca955x_probe(struct i2c_client *client, pca955x_led = &pca955x->leds[i]; pca955x_led->led_num = i; pca955x_led->pca955x = pca955x; + pca955x_led->type = pdata->leds[i].type; + + switch (pca955x_led->type) { + case PCA955X_TYPE_NONE: + break; + case PCA955X_TYPE_GPIO: + ngpios++; + break; + case PCA955X_TYPE_LED: + /* + * Platform data can specify LED names and + * default triggers + */ + if (pdata->leds[i].name[0] == '\0') + snprintf(pdata->leds[i].name, + sizeof(pdata->leds[i].name), "%d", i); + + snprintf(pca955x_led->name, + sizeof(pca955x_led->name), "pca955x:%s", + pdata->leds[i].name); - /* Platform data can specify LED names and default triggers */ - if (pdata) { - if (pdata->leds[i].name) - snprintf(pca955x_led->name, - sizeof(pca955x_led->name), "pca955x:%s", - pdata->leds[i].name); if (pdata->leds[i].default_trigger) pca955x_led->led_cdev.default_trigger = pdata->leds[i].default_trigger; - } else { - snprintf(pca955x_led->name, sizeof(pca955x_led->name), - "pca955x:%d", i); + + pca955x_led->led_cdev.name = pca955x_led->name; + pca955x_led->led_cdev.brightness_set_blocking = + pca955x_led_set; + + err = devm_led_classdev_register(&client->dev, + &pca955x_led->led_cdev); + if (err) + return err; + + /* Turn off LED */ + err = pca955x_led_set(&pca955x_led->led_cdev, LED_OFF); + if (err) + return err; } - - pca955x_led->led_cdev.name = pca955x_led->name; - pca955x_led->led_cdev.brightness_set_blocking = pca955x_led_set; - - err = led_classdev_register(&client->dev, - &pca955x_led->led_cdev); - if (err < 0) - goto exit; } - /* Turn off LEDs */ - for (i = 0; i < pca95xx_num_led_regs(chip->bits); i++) - pca955x_write_ls(client, i, 0x55); - /* PWM0 is used for half brightness or 50% duty cycle */ - pca955x_write_pwm(client, 0, 255-LED_HALF); + err = pca955x_write_pwm(client, 0, 255 - LED_HALF); + if (err) + return err; /* PWM1 is used for variable brightness, default to OFF */ - pca955x_write_pwm(client, 1, 0); + err = pca955x_write_pwm(client, 1, 0); + if (err) + return err; /* Set to fast frequency so we do not see flashing */ - pca955x_write_psc(client, 0, 0); - pca955x_write_psc(client, 1, 0); + err = pca955x_write_psc(client, 0, 0); + if (err) + return err; + err = pca955x_write_psc(client, 1, 0); + if (err) + return err; - return 0; +#ifdef CONFIG_LEDS_PCA955X_GPIO + if (ngpios) { + pca955x->gpio.label = "gpio-pca955x"; + pca955x->gpio.direction_input = pca955x_gpio_direction_input; + pca955x->gpio.direction_output = pca955x_gpio_direction_output; + pca955x->gpio.set = pca955x_gpio_set_value; + pca955x->gpio.get = pca955x_gpio_get_value; + pca955x->gpio.request = pca955x_gpio_request_pin; + pca955x->gpio.can_sleep = 1; + pca955x->gpio.base = -1; + pca955x->gpio.ngpio = ngpios; + pca955x->gpio.parent = &client->dev; + pca955x->gpio.owner = THIS_MODULE; -exit: - while (i--) - led_classdev_unregister(&pca955x->leds[i].led_cdev); - - return err; -} - -static int pca955x_remove(struct i2c_client *client) -{ - struct pca955x *pca955x = i2c_get_clientdata(client); - int i; - - for (i = 0; i < pca955x->chipdef->bits; i++) - led_classdev_unregister(&pca955x->leds[i].led_cdev); + err = devm_gpiochip_add_data(&client->dev, &pca955x->gpio, + pca955x); + if (err) { + /* Use data->gpio.dev as a flag for freeing gpiochip */ + pca955x->gpio.parent = NULL; + dev_warn(&client->dev, "could not add gpiochip\n"); + return err; + } + dev_info(&client->dev, "gpios %i...%i\n", + pca955x->gpio.base, pca955x->gpio.base + + pca955x->gpio.ngpio - 1); + } +#endif return 0; } @@ -378,9 +596,9 @@ static struct i2c_driver pca955x_driver = { .driver = { .name = "leds-pca955x", .acpi_match_table = ACPI_PTR(pca955x_acpi_ids), + .of_match_table = of_match_ptr(of_pca955x_match), }, .probe = pca955x_probe, - .remove = pca955x_remove, .id_table = pca955x_id, }; diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c index b2a98c7b521b..b1adbd70ce2e 100644 --- a/drivers/leds/leds-powernv.c +++ b/drivers/leds/leds-powernv.c @@ -224,12 +224,8 @@ static int powernv_led_create(struct device *dev, powernv_led->cdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", powernv_led->loc_code, led_type_desc); - if (!powernv_led->cdev.name) { - dev_err(dev, - "%s: Memory allocation failed for classdev name\n", - __func__); + if (!powernv_led->cdev.name) return -ENOMEM; - } powernv_led->cdev.brightness_set_blocking = powernv_brightness_set; powernv_led->cdev.brightness_get = powernv_brightness_get; diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 732eb86bc1a5..a9db8674cd02 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id nas_led_whitelist[] __initdata = { +static const struct dmi_system_id nas_led_whitelist[] __initconst = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c index 304531644938..f5357f6d9e58 100644 --- a/drivers/leds/leds-tlc591xx.c +++ b/drivers/leds/leds-tlc591xx.c @@ -230,12 +230,15 @@ tlc591xx_probe(struct i2c_client *client, for_each_child_of_node(np, child) { err = of_property_read_u32(child, "reg", ®); - if (err) + if (err) { + of_node_put(child); return err; - if (reg < 0 || reg >= tlc591xx->max_leds) - return -EINVAL; - if (priv->leds[reg].active) + } + if (reg < 0 || reg >= tlc591xx->max_leds || + priv->leds[reg].active) { + of_node_put(child); return -EINVAL; + } priv->leds[reg].active = true; priv->leds[reg].ldev.name = of_get_property(child, "label", NULL) ? : child->name; diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index 2445274f7e4b..281f5345661e 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -52,7 +52,7 @@ static ssize_t devspec_show(struct device *dev, struct platform_device *ofdev; ofdev = to_platform_device(dev); - return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); + return sprintf(buf, "%pOF\n", ofdev->dev.of_node); } static DEVICE_ATTR_RO(modalias); static DEVICE_ATTR_RO(devspec); diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index e199fd6c71ce..910b5b6f96b1 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -411,16 +411,16 @@ static int rackmeter_probe(struct macio_dev* mdev, #if 0 /* Use that when i2s-a is finally an mdev per-se */ if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) { printk(KERN_ERR - "rackmeter: found match but lacks resources: %s" + "rackmeter: found match but lacks resources: %pOF" " (%d resources, %d interrupts)\n", - mdev->ofdev.node->full_name); + mdev->ofdev.dev.of_node); rc = -ENXIO; goto bail_free; } if (macio_request_resources(mdev, "rackmeter")) { printk(KERN_ERR - "rackmeter: failed to request resources: %s\n", - mdev->ofdev.node->full_name); + "rackmeter: failed to request resources: %pOF\n", + mdev->ofdev.dev.of_node); rc = -EBUSY; goto bail_free; } @@ -431,8 +431,8 @@ static int rackmeter_probe(struct macio_dev* mdev, of_address_to_resource(i2s, 0, &ri2s) || of_address_to_resource(i2s, 1, &rdma)) { printk(KERN_ERR - "rackmeter: found match but lacks resources: %s", - mdev->ofdev.dev.of_node->full_name); + "rackmeter: found match but lacks resources: %pOF", + mdev->ofdev.dev.of_node); rc = -ENXIO; goto bail_free; } @@ -579,7 +579,7 @@ static int rackmeter_shutdown(struct macio_dev* mdev) return 0; } -static struct of_device_id rackmeter_match[] = { +static const struct of_device_id rackmeter_match[] = { { .name = "i2s" }, { } }; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 1ac66421877a..ea9bdc85a21d 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -589,14 +589,14 @@ static int smu_late_init(void) if (smu->db_node) { smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); if (!smu->db_irq) - printk(KERN_ERR "smu: failed to map irq for node %s\n", - smu->db_node->full_name); + printk(KERN_ERR "smu: failed to map irq for node %pOF\n", + smu->db_node); } if (smu->msg_node) { smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0); if (!smu->msg_irq) - printk(KERN_ERR "smu: failed to map irq for node %s\n", - smu->msg_node->full_name); + printk(KERN_ERR "smu: failed to map irq for node %pOF\n", + smu->msg_node); } /* diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index c60415958dfe..147da4edd021 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c @@ -297,8 +297,8 @@ static int __init via_cuda_start(void) #else cuda_irq = irq_of_parse_and_map(vias, 0); if (!cuda_irq) { - printk(KERN_ERR "via-cuda: can't map interrupts for %s\n", - vias->full_name); + printk(KERN_ERR "via-cuda: can't map interrupts for %pOF\n", + vias); return -ENODEV; } #endif diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c index 72d1fdfe02a5..2626990331dc 100644 --- a/drivers/macintosh/windfarm_cpufreq_clamp.c +++ b/drivers/macintosh/windfarm_cpufreq_clamp.c @@ -63,7 +63,7 @@ static s32 clamp_max(struct wf_control *ct) return 1; } -static struct wf_control_ops clamp_ops = { +static const struct wf_control_ops clamp_ops = { .set_value = clamp_set, .get_value = clamp_get, .get_min = clamp_min, diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c index 0226b796a21c..fab7a21e9577 100644 --- a/drivers/macintosh/windfarm_fcu_controls.c +++ b/drivers/macintosh/windfarm_fcu_controls.c @@ -470,8 +470,8 @@ static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv) id = ((*reg) - 0x30) / 2; if (id > 7) { pr_warning("wf_fcu: Can't parse " - "fan ID in device-tree for %s\n", - np->full_name); + "fan ID in device-tree for %pOF\n", + np); break; } wf_fcu_add_fan(pv, name, type, id); diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 590214ba736c..6cdfe714901d 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -82,7 +82,7 @@ static void wf_lm75_release(struct wf_sensor *sr) kfree(lm); } -static struct wf_sensor_ops wf_lm75_ops = { +static const struct wf_sensor_ops wf_lm75_ops = { .get_value = wf_lm75_get, .release = wf_lm75_release, .owner = THIS_MODULE, diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c index c071aab79dd1..35aa571d498a 100644 --- a/drivers/macintosh/windfarm_lm87_sensor.c +++ b/drivers/macintosh/windfarm_lm87_sensor.c @@ -91,7 +91,7 @@ static void wf_lm87_release(struct wf_sensor *sr) kfree(lm); } -static struct wf_sensor_ops wf_lm87_ops = { +static const struct wf_sensor_ops wf_lm87_ops = { .get_value = wf_lm87_get, .release = wf_lm87_release, .owner = THIS_MODULE, @@ -126,8 +126,8 @@ static int wf_lm87_probe(struct i2c_client *client, } } if (!name) { - pr_warning("wf_lm87: Unsupported sensor %s\n", - client->dev.of_node->full_name); + pr_warning("wf_lm87: Unsupported sensor %pOF\n", + client->dev.of_node); return -ENODEV; } diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index 87e439b10318..6ad035e13c08 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -55,7 +55,7 @@ static void wf_max6690_release(struct wf_sensor *sr) kfree(max); } -static struct wf_sensor_ops wf_max6690_ops = { +static const struct wf_sensor_ops wf_max6690_ops = { .get_value = wf_max6690_get, .release = wf_max6690_release, .owner = THIS_MODULE, diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c index bdfcb8a8bfbb..a0cd9c7f9835 100644 --- a/drivers/macintosh/windfarm_rm31.c +++ b/drivers/macintosh/windfarm_rm31.c @@ -338,7 +338,7 @@ static int cpu_setup_pid(int cpu) } /* Backside/U3 fan */ -static struct wf_pid_param backside_param = { +static const struct wf_pid_param backside_param = { .interval = 1, .history_len = 2, .gd = 0x00500000, @@ -351,7 +351,7 @@ static struct wf_pid_param backside_param = { }; /* DIMMs temperature (clamp the backside fan) */ -static struct wf_pid_param dimms_param = { +static const struct wf_pid_param dimms_param = { .interval = 1, .history_len = 20, .gd = 0, diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index c155a54e8638..d174c7437337 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c @@ -145,7 +145,7 @@ static s32 smu_fan_max(struct wf_control *ct) return fct->max; } -static struct wf_control_ops smu_fan_ops = { +static const struct wf_control_ops smu_fan_ops = { .set_value = smu_fan_set, .get_value = smu_fan_get, .get_min = smu_fan_min, diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index ad6223e88340..da7f4fc1a51d 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -195,7 +195,7 @@ static void wf_sat_sensor_release(struct wf_sensor *sr) kref_put(&sat->ref, wf_sat_release); } -static struct wf_sensor_ops wf_sat_ops = { +static const struct wf_sensor_ops wf_sat_ops = { .get_value = wf_sat_sensor_get, .release = wf_sat_sensor_release, .owner = THIS_MODULE, @@ -248,7 +248,7 @@ static int wf_sat_probe(struct i2c_client *client, core = loc[5] - '0'; if (chip > 1 || core > 1) { printk(KERN_ERR "wf_sat_create: don't understand " - "location %s for %s\n", loc, child->full_name); + "location %s for %pOF\n", loc, child); continue; } cpu = 2 * chip + core; diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index 1cc4e4953d89..172fd267dcf6 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -172,22 +172,22 @@ static int smu_slotspow_get(struct wf_sensor *sr, s32 *value) } -static struct wf_sensor_ops smu_cputemp_ops = { +static const struct wf_sensor_ops smu_cputemp_ops = { .get_value = smu_cputemp_get, .release = smu_ads_release, .owner = THIS_MODULE, }; -static struct wf_sensor_ops smu_cpuamp_ops = { +static const struct wf_sensor_ops smu_cpuamp_ops = { .get_value = smu_cpuamp_get, .release = smu_ads_release, .owner = THIS_MODULE, }; -static struct wf_sensor_ops smu_cpuvolt_ops = { +static const struct wf_sensor_ops smu_cpuvolt_ops = { .get_value = smu_cpuvolt_get, .release = smu_ads_release, .owner = THIS_MODULE, }; -static struct wf_sensor_ops smu_slotspow_ops = { +static const struct wf_sensor_ops smu_slotspow_ops = { .get_value = smu_slotspow_get, .release = smu_ads_release, .owner = THIS_MODULE, @@ -327,7 +327,7 @@ static int smu_cpu_power_get(struct wf_sensor *sr, s32 *value) return 0; } -static struct wf_sensor_ops smu_cpu_power_ops = { +static const struct wf_sensor_ops smu_cpu_power_ops = { .get_value = smu_cpu_power_get, .release = smu_cpu_power_release, .owner = THIS_MODULE, diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index da67882caa7b..ae6146311934 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -17,12 +17,14 @@ #include #include +#include +#include +#include #include #include #include #include #include -#include #include #include #include @@ -95,7 +97,7 @@ /* Register RING_CMPL_START_ADDR fields */ #define CMPL_START_ADDR_VALUE(pa) \ - ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff)) + ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff)) /* Register RING_CONTROL fields */ #define CONTROL_MASK_DISABLE_CONTROL 12 @@ -260,18 +262,21 @@ struct flexrm_ring { void __iomem *regs; bool irq_requested; unsigned int irq; + cpumask_t irq_aff_hint; unsigned int msi_timer_val; unsigned int msi_count_threshold; - struct ida requests_ida; struct brcm_message *requests[RING_MAX_REQ_COUNT]; void *bd_base; dma_addr_t bd_dma_base; u32 bd_write_offset; void *cmpl_base; dma_addr_t cmpl_dma_base; + /* Atomic stats */ + atomic_t msg_send_count; + atomic_t msg_cmpl_count; /* Protected members */ spinlock_t lock; - struct brcm_message *last_pending_msg; + DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT); u32 cmpl_read_offset; }; @@ -282,6 +287,9 @@ struct flexrm_mbox { struct flexrm_ring *rings; struct dma_pool *bd_pool; struct dma_pool *cmpl_pool; + struct dentry *root; + struct dentry *config; + struct dentry *stats; struct mbox_controller controller; }; @@ -912,6 +920,62 @@ static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt, /* ====== FlexRM driver helper routines ===== */ +static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox, + struct seq_file *file) +{ + int i; + const char *state; + struct flexrm_ring *ring; + + seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n", + "Ring#", "State", "BD_Addr", "BD_Size", + "Cmpl_Addr", "Cmpl_Size"); + + for (i = 0; i < mbox->num_rings; i++) { + ring = &mbox->rings[i]; + if (readl(ring->regs + RING_CONTROL) & + BIT(CONTROL_ACTIVE_SHIFT)) + state = "active"; + else + state = "inactive"; + seq_printf(file, + "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n", + ring->num, state, + (unsigned long long)ring->bd_dma_base, + (u32)RING_BD_SIZE, + (unsigned long long)ring->cmpl_dma_base, + (u32)RING_CMPL_SIZE); + } +} + +static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox, + struct seq_file *file) +{ + int i; + u32 val, bd_read_offset; + struct flexrm_ring *ring; + + seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n", + "Ring#", "BD_Read", "BD_Write", + "Cmpl_Read", "Submitted", "Completed"); + + for (i = 0; i < mbox->num_rings; i++) { + ring = &mbox->rings[i]; + bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); + val = readl_relaxed(ring->regs + RING_BD_START_ADDR); + bd_read_offset *= RING_DESC_SIZE; + bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) - + ring->bd_dma_base); + seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n", + ring->num, + (u32)bd_read_offset, + (u32)ring->bd_write_offset, + (u32)ring->cmpl_read_offset, + (u32)atomic_read(&ring->msg_send_count), + (u32)atomic_read(&ring->msg_cmpl_count)); + } +} + static int flexrm_new_request(struct flexrm_ring *ring, struct brcm_message *batch_msg, struct brcm_message *msg) @@ -929,38 +993,24 @@ static int flexrm_new_request(struct flexrm_ring *ring, msg->error = 0; /* If no requests possible then save data pointer and goto done. */ - reqid = ida_simple_get(&ring->requests_ida, 0, - RING_MAX_REQ_COUNT, GFP_KERNEL); - if (reqid < 0) { - spin_lock_irqsave(&ring->lock, flags); - if (batch_msg) - ring->last_pending_msg = batch_msg; - else - ring->last_pending_msg = msg; - spin_unlock_irqrestore(&ring->lock, flags); - return 0; - } + spin_lock_irqsave(&ring->lock, flags); + reqid = bitmap_find_free_region(ring->requests_bmap, + RING_MAX_REQ_COUNT, 0); + spin_unlock_irqrestore(&ring->lock, flags); + if (reqid < 0) + return -ENOSPC; ring->requests[reqid] = msg; /* Do DMA mappings for the message */ ret = flexrm_dma_map(ring->mbox->dev, msg); if (ret < 0) { ring->requests[reqid] = NULL; - ida_simple_remove(&ring->requests_ida, reqid); + spin_lock_irqsave(&ring->lock, flags); + bitmap_release_region(ring->requests_bmap, reqid, 0); + spin_unlock_irqrestore(&ring->lock, flags); return ret; } - /* If last_pending_msg is already set then goto done with error */ - spin_lock_irqsave(&ring->lock, flags); - if (ring->last_pending_msg) - ret = -ENOSPC; - spin_unlock_irqrestore(&ring->lock, flags); - if (ret < 0) { - dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num); - exit_cleanup = true; - goto exit; - } - /* Determine current HW BD read offset */ read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); val = readl_relaxed(ring->regs + RING_BD_START_ADDR); @@ -987,13 +1037,7 @@ static int flexrm_new_request(struct flexrm_ring *ring, break; } if (count) { - spin_lock_irqsave(&ring->lock, flags); - if (batch_msg) - ring->last_pending_msg = batch_msg; - else - ring->last_pending_msg = msg; - spin_unlock_irqrestore(&ring->lock, flags); - ret = 0; + ret = -ENOSPC; exit_cleanup = true; goto exit; } @@ -1012,6 +1056,9 @@ static int flexrm_new_request(struct flexrm_ring *ring, /* Save ring BD write offset */ ring->bd_write_offset = (unsigned long)(next - ring->bd_base); + /* Increment number of messages sent */ + atomic_inc_return(&ring->msg_send_count); + exit: /* Update error status in message */ msg->error = ret; @@ -1020,7 +1067,9 @@ static int flexrm_new_request(struct flexrm_ring *ring, if (exit_cleanup) { flexrm_dma_unmap(ring->mbox->dev, msg); ring->requests[reqid] = NULL; - ida_simple_remove(&ring->requests_ida, reqid); + spin_lock_irqsave(&ring->lock, flags); + bitmap_release_region(ring->requests_bmap, reqid, 0); + spin_unlock_irqrestore(&ring->lock, flags); } return ret; @@ -1037,12 +1086,6 @@ static int flexrm_process_completions(struct flexrm_ring *ring) spin_lock_irqsave(&ring->lock, flags); - /* Check last_pending_msg */ - if (ring->last_pending_msg) { - msg = ring->last_pending_msg; - ring->last_pending_msg = NULL; - } - /* * Get current completion read and write offset * @@ -1058,10 +1101,6 @@ static int flexrm_process_completions(struct flexrm_ring *ring) spin_unlock_irqrestore(&ring->lock, flags); - /* If last_pending_msg was set then queue it back */ - if (msg) - mbox_send_message(chan, msg); - /* For each completed request notify mailbox clients */ reqid = 0; while (cmpl_read_offset != cmpl_write_offset) { @@ -1095,7 +1134,9 @@ static int flexrm_process_completions(struct flexrm_ring *ring) /* Release reqid for recycling */ ring->requests[reqid] = NULL; - ida_simple_remove(&ring->requests_ida, reqid); + spin_lock_irqsave(&ring->lock, flags); + bitmap_release_region(ring->requests_bmap, reqid, 0); + spin_unlock_irqrestore(&ring->lock, flags); /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); @@ -1105,12 +1146,37 @@ static int flexrm_process_completions(struct flexrm_ring *ring) mbox_chan_received_data(chan, msg); /* Increment number of completions processed */ + atomic_inc_return(&ring->msg_cmpl_count); count++; } return count; } +/* ====== FlexRM Debugfs callbacks ====== */ + +static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset) +{ + struct platform_device *pdev = to_platform_device(file->private); + struct flexrm_mbox *mbox = platform_get_drvdata(pdev); + + /* Write config in file */ + flexrm_write_config_in_seqfile(mbox, file); + + return 0; +} + +static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset) +{ + struct platform_device *pdev = to_platform_device(file->private); + struct flexrm_mbox *mbox = platform_get_drvdata(pdev); + + /* Write stats in file */ + flexrm_write_stats_in_seqfile(mbox, file); + + return 0; +} + /* ====== FlexRM interrupt handler ===== */ static irqreturn_t flexrm_irq_event(int irq, void *dev_id) @@ -1217,6 +1283,18 @@ static int flexrm_startup(struct mbox_chan *chan) } ring->irq_requested = true; + /* Set IRQ affinity hint */ + ring->irq_aff_hint = CPU_MASK_NONE; + val = ring->mbox->num_rings; + val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; + cpumask_set_cpu((ring->num / val) % num_online_cpus(), + &ring->irq_aff_hint); + ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); + if (ret) { + dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n"); + goto fail_free_irq; + } + /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); @@ -1233,9 +1311,6 @@ static int flexrm_startup(struct mbox_chan *chan) val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); - /* Ensure last pending message is cleared */ - ring->last_pending_msg = NULL; - /* Completion read pointer will be same as HW write pointer */ ring->cmpl_read_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); @@ -1259,8 +1334,15 @@ static int flexrm_startup(struct mbox_chan *chan) val = BIT(CONTROL_ACTIVE_SHIFT); writel_relaxed(val, ring->regs + RING_CONTROL); + /* Reset stats to zero */ + atomic_set(&ring->msg_send_count, 0); + atomic_set(&ring->msg_cmpl_count, 0); + return 0; +fail_free_irq: + free_irq(ring->irq, ring); + ring->irq_requested = false; fail_free_cmpl_memory: dma_pool_free(ring->mbox->cmpl_pool, ring->cmpl_base, ring->cmpl_dma_base); @@ -1302,7 +1384,6 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Release reqid for recycling */ ring->requests[reqid] = NULL; - ida_simple_remove(&ring->requests_ida, reqid); /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); @@ -1312,8 +1393,12 @@ static void flexrm_shutdown(struct mbox_chan *chan) mbox_chan_received_data(chan, msg); } + /* Clear requests bitmap */ + bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); + /* Release IRQ */ if (ring->irq_requested) { + irq_set_affinity_hint(ring->irq, NULL); free_irq(ring->irq, ring); ring->irq_requested = false; } @@ -1333,24 +1418,10 @@ static void flexrm_shutdown(struct mbox_chan *chan) } } -static bool flexrm_last_tx_done(struct mbox_chan *chan) -{ - bool ret; - unsigned long flags; - struct flexrm_ring *ring = chan->con_priv; - - spin_lock_irqsave(&ring->lock, flags); - ret = (ring->last_pending_msg) ? false : true; - spin_unlock_irqrestore(&ring->lock, flags); - - return ret; -} - static const struct mbox_chan_ops flexrm_mbox_chan_ops = { .send_data = flexrm_send_data, .startup = flexrm_startup, .shutdown = flexrm_shutdown, - .last_tx_done = flexrm_last_tx_done, .peek_data = flexrm_peek_data, }; @@ -1468,14 +1539,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev) ring->irq_requested = false; ring->msi_timer_val = MSI_TIMER_VAL_MASK; ring->msi_count_threshold = 0x1; - ida_init(&ring->requests_ida); memset(ring->requests, 0, sizeof(ring->requests)); ring->bd_base = NULL; ring->bd_dma_base = 0; ring->cmpl_base = NULL; ring->cmpl_dma_base = 0; + atomic_set(&ring->msg_send_count, 0); + atomic_set(&ring->msg_cmpl_count, 0); spin_lock_init(&ring->lock); - ring->last_pending_msg = NULL; + bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); ring->cmpl_read_offset = 0; } @@ -1515,10 +1587,39 @@ static int flexrm_mbox_probe(struct platform_device *pdev) ring->irq = desc->irq; } + /* Check availability of debugfs */ + if (!debugfs_initialized()) + goto skip_debugfs; + + /* Create debugfs root entry */ + mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL); + if (IS_ERR_OR_NULL(mbox->root)) { + ret = PTR_ERR_OR_ZERO(mbox->root); + goto fail_free_msis; + } + + /* Create debugfs config entry */ + mbox->config = debugfs_create_devm_seqfile(mbox->dev, + "config", mbox->root, + flexrm_debugfs_conf_show); + if (IS_ERR_OR_NULL(mbox->config)) { + ret = PTR_ERR_OR_ZERO(mbox->config); + goto fail_free_debugfs_root; + } + + /* Create debugfs stats entry */ + mbox->stats = debugfs_create_devm_seqfile(mbox->dev, + "stats", mbox->root, + flexrm_debugfs_stats_show); + if (IS_ERR_OR_NULL(mbox->stats)) { + ret = PTR_ERR_OR_ZERO(mbox->stats); + goto fail_free_debugfs_root; + } +skip_debugfs: + /* Initialize mailbox controller */ mbox->controller.txdone_irq = false; - mbox->controller.txdone_poll = true; - mbox->controller.txpoll_period = 1; + mbox->controller.txdone_poll = false; mbox->controller.ops = &flexrm_mbox_chan_ops; mbox->controller.dev = dev; mbox->controller.num_chans = mbox->num_rings; @@ -1527,7 +1628,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev) sizeof(*mbox->controller.chans), GFP_KERNEL); if (!mbox->controller.chans) { ret = -ENOMEM; - goto fail_free_msis; + goto fail_free_debugfs_root; } for (index = 0; index < mbox->num_rings; index++) mbox->controller.chans[index].con_priv = &mbox->rings[index]; @@ -1535,13 +1636,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev) /* Register mailbox controller */ ret = mbox_controller_register(&mbox->controller); if (ret) - goto fail_free_msis; + goto fail_free_debugfs_root; dev_info(dev, "registered flexrm mailbox with %d channels\n", mbox->controller.num_chans); return 0; +fail_free_debugfs_root: + debugfs_remove_recursive(mbox->root); fail_free_msis: platform_msi_domain_free_irqs(dev); fail_destroy_cmpl_pool: @@ -1554,23 +1657,18 @@ static int flexrm_mbox_probe(struct platform_device *pdev) static int flexrm_mbox_remove(struct platform_device *pdev) { - int index; struct device *dev = &pdev->dev; - struct flexrm_ring *ring; struct flexrm_mbox *mbox = platform_get_drvdata(pdev); mbox_controller_unregister(&mbox->controller); + debugfs_remove_recursive(mbox->root); + platform_msi_domain_free_irqs(dev); dma_pool_destroy(mbox->cmpl_pool); dma_pool_destroy(mbox->bd_pool); - for (index = 0; index < mbox->num_rings; index++) { - ring = &mbox->rings[index]; - ida_destroy(&ring->requests_ida); - } - return 0; } diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index cbca5e51b975..9b7005e1345e 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -457,10 +457,8 @@ static int __init acpi_pcc_probe(void) /* Search for PCCT */ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl); - if (ACPI_FAILURE(status) || !pcct_tbl) { - pr_warn("PCCT header not found.\n"); + if (ACPI_FAILURE(status) || !pcct_tbl) return -ENODEV; - } count = acpi_table_parse_entries(ACPI_SIG_PCCT, sizeof(struct acpi_table_pcct), diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index ca4abe1ccd8d..cacbe2dbd5c3 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -68,6 +68,8 @@ #include #include +#define MAX_OPEN_BUCKETS 128 + /* Bucket heap / gen */ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) @@ -671,7 +673,7 @@ int bch_open_buckets_alloc(struct cache_set *c) spin_lock_init(&c->data_bucket_lock); - for (i = 0; i < 6; i++) { + for (i = 0; i < MAX_OPEN_BUCKETS; i++) { struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) return -ENOMEM; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index dee542fff68e..2ed9bd231d84 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -333,6 +333,7 @@ struct cached_dev { /* Limit number of writeback bios in flight */ struct semaphore in_flight; struct task_struct *writeback_thread; + struct workqueue_struct *writeback_write_wq; struct keybuf writeback_keys; diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 864e673aec39..1841d0359bac 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -64,27 +64,16 @@ EXPORT_SYMBOL(closure_put); void __closure_wake_up(struct closure_waitlist *wait_list) { struct llist_node *list; - struct closure *cl; + struct closure *cl, *t; struct llist_node *reverse = NULL; list = llist_del_all(&wait_list->list); /* We first reverse the list to preserve FIFO ordering and fairness */ - - while (list) { - struct llist_node *t = list; - list = llist_next(list); - - t->next = reverse; - reverse = t; - } + reverse = llist_reverse_order(list); /* Then do the wakeups */ - - while (reverse) { - cl = container_of(reverse, struct closure, list); - reverse = llist_next(reverse); - + llist_for_each_entry_safe(cl, t, reverse, list) { closure_set_waiting(cl, 0); closure_sub(cl, CLOSURE_WAITING + 1); } diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 1ec84ca81146..295b7e43f92c 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -312,8 +312,6 @@ static inline void closure_wake_up(struct closure_waitlist *list) * been dropped with closure_put()), it will resume execution at @fn running out * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). * - * NOTE: This macro expands to a return in the calling function! - * * This is because after calling continue_at() you no longer have a ref on @cl, * and whatever @cl owns may be freed out from under you - a running closure fn * has a ref on its own closure which continue_at() drops. @@ -340,8 +338,6 @@ do { \ * Causes @fn to be executed out of @cl, in @wq context (or called directly if * @wq is NULL). * - * NOTE: like continue_at(), this macro expands to a return in the caller! - * * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, * thus it's not safe to touch anything protected by @cl after a * continue_at_nobarrier(). diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 35a5a7210e51..61076eda2e6d 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -49,7 +49,7 @@ void bch_btree_verify(struct btree *b) v->keys.ops = b->keys.ops; bio = bch_bbio_alloc(b->c); - bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; + bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bio->bi_opf = REQ_OP_READ | REQ_META; diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 6a9b85095e7b..7e871bdc0097 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) struct bbio *b = container_of(bio, struct bbio, bio); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; + bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); b->submit_time_us = local_clock_us(); closure_bio_submit(bio, bio->bi_private); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 0352d05e495c..7e1d1c3ba33a 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -53,7 +53,7 @@ reread: left = ca->sb.bucket_size - offset; bio_reset(bio); bio->bi_iter.bi_sector = bucket + offset; - bio->bi_bdev = ca->bdev; + bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; @@ -452,7 +452,7 @@ static void do_journal_discard(struct cache *ca) bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); - bio->bi_bdev = ca->bdev; + bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; @@ -623,7 +623,7 @@ static void journal_write_unlocked(struct closure *cl) bio_reset(bio); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); - bio->bi_bdev = ca->bdev; + bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 019b3df9f1c6..681b4f12b05a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl) struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct bio *bio = op->bio, *n; - if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) - wake_up_gc(op->c); - if (op->bypass) return bch_data_invalidate(cl); + if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) + wake_up_gc(op->c); + /* * Journal writes are marked REQ_PREFLUSH; if the original write was a * flush, it'll wait on the journal write. @@ -400,12 +400,6 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (!congested && !dc->sequential_cutoff) goto rescale; - if (!congested && - mode == CACHE_MODE_WRITEBACK && - op_is_write(bio->bi_opf) && - op_is_sync(bio->bi_opf)) - goto rescale; - spin_lock(&dc->io_lock); hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) @@ -607,7 +601,8 @@ static void request_endio(struct bio *bio) static void bio_complete(struct search *s) { if (s->orig_bio) { - generic_end_io_acct(bio_data_dir(s->orig_bio), + struct request_queue *q = s->orig_bio->bi_disk->queue; + generic_end_io_acct(q, bio_data_dir(s->orig_bio), &s->d->disk->part0, s->start_time); trace_bcache_request_end(s->d, s->orig_bio); @@ -734,7 +729,7 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; - s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; + bio_copy_dev(s->iop.bio, s->cache_miss); s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); @@ -793,7 +788,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, !(bio->bi_opf & REQ_META) && s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) reada = min_t(sector_t, dc->readahead >> 9, - bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); + get_capacity(bio->bi_disk) - bio_end_sector(bio)); s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); @@ -819,7 +814,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, goto out_submit; cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; - cache_bio->bi_bdev = miss->bi_bdev; + bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = request_endio; @@ -918,7 +913,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, dc->disk.bio_split); - flush->bi_bdev = bio->bi_bdev; + bio_copy_dev(flush, bio); flush->bi_end_io = request_endio; flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; @@ -955,13 +950,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) { struct search *s; - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; + struct bcache_device *d = bio->bi_disk->private_data; struct cached_dev *dc = container_of(d, struct cached_dev, disk); int rw = bio_data_dir(bio); - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); - bio->bi_bdev = dc->bdev; + bio_set_dev(bio, dc->bdev); bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { @@ -1071,10 +1066,10 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, { struct search *s; struct closure *cl; - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; + struct bcache_device *d = bio->bi_disk->private_data; int rw = bio_data_dir(bio); - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); s = search_alloc(bio, d); cl = &s->cl; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 8352fad765f6..fc0a31b13ac4 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -257,7 +257,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) closure_init(cl, parent); bio_reset(bio); - bio->bi_bdev = dc->bdev; + bio_set_dev(bio, dc->bdev); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; @@ -303,7 +303,7 @@ void bcache_write_super(struct cache_set *c) SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); bio_reset(bio); - bio->bi_bdev = ca->bdev; + bio_set_dev(bio, ca->bdev); bio->bi_end_io = write_super_endio; bio->bi_private = ca; @@ -508,7 +508,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, closure_init_stack(cl); bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; - bio->bi_bdev = ca->bdev; + bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; @@ -1026,7 +1026,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) } if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - bch_sectors_dirty_init(dc); + bch_sectors_dirty_init(&dc->disk); atomic_set(&dc->has_dirty, 1); atomic_inc(&dc->count); bch_writeback_queue(dc); @@ -1059,6 +1059,8 @@ static void cached_dev_free(struct closure *cl) cancel_delayed_work_sync(&dc->writeback_rate_update); if (!IS_ERR_OR_NULL(dc->writeback_thread)) kthread_stop(dc->writeback_thread); + if (dc->writeback_write_wq) + destroy_workqueue(dc->writeback_write_wq); mutex_lock(&bch_register_lock); @@ -1228,6 +1230,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) goto err; bcache_device_attach(d, c, u - c->uuids); + bch_sectors_dirty_init(d); bch_flash_dev_request_init(d); add_disk(d->disk); @@ -1374,9 +1377,6 @@ static void cache_set_flush(struct closure *cl) struct btree *b; unsigned i; - if (!c) - closure_return(cl); - bch_cache_accounting_destroy(&c->accounting); kobject_put(&c->internal); @@ -1964,6 +1964,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, else err = "device busy"; mutex_unlock(&bch_register_lock); + if (!IS_ERR(bdev)) + bdput(bdev); if (attr == &ksysfs_register_quiet) goto out; } diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index f90f13616980..104c57cd666c 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -192,7 +192,7 @@ STORE(__cached_dev) { struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); - unsigned v = size; + ssize_t v = size; struct cache_set *c; struct kobj_uevent_env *env; @@ -227,7 +227,7 @@ STORE(__cached_dev) bch_cached_dev_run(dc); if (attr == &sysfs_cache_mode) { - ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1); + v = bch_read_string_list(buf, bch_cache_modes + 1); if (v < 0) return v; @@ -615,8 +615,21 @@ STORE(__bch_cache_set) bch_cache_accounting_clear(&c->accounting); } - if (attr == &sysfs_trigger_gc) + if (attr == &sysfs_trigger_gc) { + /* + * Garbage collection thread only works when sectors_to_gc < 0, + * when users write to sysfs entry trigger_gc, most of time + * they want to forcibly triger gargage collection. Here -1 is + * set to c->sectors_to_gc, to make gc_should_run() give a + * chance to permit gc thread to run. "give a chance" means + * before going into gc_should_run(), there is still chance + * that c->sectors_to_gc being set to other positive value. So + * writing sysfs entry trigger_gc won't always make sure gc + * thread takes effect. + */ + atomic_set(&c->sectors_to_gc, -1); wake_up_gc(c); + } if (attr == &sysfs_prune_cache) { struct shrink_control sc; diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 8c3a938f4bf0..176d3c2ef5f5 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -74,24 +74,44 @@ STRTO_H(strtouint, unsigned int) STRTO_H(strtoll, long long) STRTO_H(strtoull, unsigned long long) +/** + * bch_hprint() - formats @v to human readable string for sysfs. + * + * @v - signed 64 bit integer + * @buf - the (at least 8 byte) buffer to format the result into. + * + * Returns the number of bytes used by format. + */ ssize_t bch_hprint(char *buf, int64_t v) { static const char units[] = "?kMGTPEZY"; - char dec[4] = ""; - int u, t = 0; + int u = 0, t; - for (u = 0; v >= 1024 || v <= -1024; u++) { - t = v & ~(~0 << 10); - v >>= 10; - } + uint64_t q; - if (!u) - return sprintf(buf, "%llu", v); + if (v < 0) + q = -v; + else + q = v; - if (v < 100 && v > -100) - snprintf(dec, sizeof(dec), ".%i", t / 100); + /* For as long as the number is more than 3 digits, but at least + * once, shift right / divide by 1024. Keep the remainder for + * a digit after the decimal point. + */ + do { + u++; - return sprintf(buf, "%lli%s%c", v, dec, units[u]); + t = q & ~(~0 << 10); + q >>= 10; + } while (q >= 1000); + + if (v < 0) + /* '-', up to 3 digits, '.', 1 digit, 1 character, null; + * yields 8 bytes. + */ + return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]); + else + return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]); } ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 42c66e76f05e..e663ca082183 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -21,7 +21,8 @@ static void __update_writeback_rate(struct cached_dev *dc) { struct cache_set *c = dc->disk.c; - uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; + uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - + bcache_flash_devs_sectors_dirty(c); uint64_t cache_dirty_target = div_u64(cache_sectors * dc->writeback_percent, 100); @@ -181,12 +182,12 @@ static void write_dirty(struct closure *cl) dirty_init(w); bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); io->bio.bi_iter.bi_sector = KEY_START(&w->key); - io->bio.bi_bdev = io->dc->bdev; + bio_set_dev(&io->bio, io->dc->bdev); io->bio.bi_end_io = dirty_endio; closure_bio_submit(&io->bio, cl); - continue_at(cl, write_dirty_finish, system_wq); + continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); } static void read_dirty_endio(struct bio *bio) @@ -206,7 +207,7 @@ static void read_dirty_submit(struct closure *cl) closure_bio_submit(&io->bio, cl); - continue_at(cl, write_dirty, system_wq); + continue_at(cl, write_dirty, io->dc->writeback_write_wq); } static void read_dirty(struct cached_dev *dc) @@ -250,8 +251,7 @@ static void read_dirty(struct cached_dev *dc) dirty_init(w); bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); - io->bio.bi_bdev = PTR_CACHE(dc->disk.c, - &w->key, 0)->bdev; + bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); io->bio.bi_end_io = read_dirty_endio; if (bio_alloc_pages(&io->bio, GFP_KERNEL)) @@ -482,17 +482,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, return MAP_CONTINUE; } -void bch_sectors_dirty_init(struct cached_dev *dc) +void bch_sectors_dirty_init(struct bcache_device *d) { struct sectors_dirty_init op; bch_btree_op_init(&op.op, -1); - op.inode = dc->disk.id; + op.inode = d->id; - bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), + bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0), sectors_dirty_init_fn, 0); - dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); + d->sectors_dirty_last = bcache_dev_sectors_dirty(d); } void bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -516,6 +516,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) int bch_cached_dev_writeback_start(struct cached_dev *dc) { + dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", + WQ_MEM_RECLAIM, 0); + if (!dc->writeback_write_wq) + return -ENOMEM; + dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); if (IS_ERR(dc->writeback_thread)) diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 629bd1a502fd..e35421d20d2e 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } +static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) +{ + uint64_t i, ret = 0; + + mutex_lock(&bch_register_lock); + + for (i = 0; i < c->nr_uuids; i++) { + struct bcache_device *d = c->devices[i]; + + if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) + continue; + ret += bcache_dev_sectors_dirty(d); + } + + mutex_unlock(&bch_register_lock); + + return ret; +} + static inline unsigned offset_to_stripe(struct bcache_device *d, uint64_t offset) { @@ -84,7 +103,7 @@ static inline void bch_writeback_add(struct cached_dev *dc) void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); -void bch_sectors_dirty_init(struct cached_dev *dc); +void bch_sectors_dirty_init(struct bcache_device *); void bch_cached_dev_writeback_init(struct cached_dev *); int bch_cached_dev_writeback_start(struct cached_dev *); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 40f3cd7eab0f..d2121637b4ab 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) err = read_sb_page(bitmap->mddev, offset, sb_page, - 0, sizeof(bitmap_super_t)); + 0, PAGE_SIZE); } if (err) return err; @@ -2058,6 +2058,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, long pages; struct bitmap_page *new_bp; + if (bitmap->storage.file && !init) { + pr_info("md: cannot resize file-based bitmap\n"); + return -EINVAL; + } + if (chunksize == 0) { /* If there is enough space, leave the chunk size unchanged, * else increase by factor of two until there is enough space. @@ -2118,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), - sizeof(bitmap_super_t)); + PAGE_SIZE); bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index dd3646111561..c82578af56a5 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -18,21 +18,24 @@ */ struct dm_bio_details { - struct block_device *bi_bdev; + struct gendisk *bi_disk; + u8 bi_partno; unsigned long bi_flags; struct bvec_iter bi_iter; }; static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) { - bd->bi_bdev = bio->bi_bdev; + bd->bi_disk = bio->bi_disk; + bd->bi_partno = bio->bi_partno; bd->bi_flags = bio->bi_flags; bd->bi_iter = bio->bi_iter; } static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) { - bio->bi_bdev = bd->bi_bdev; + bio->bi_disk = bd->bi_disk; + bio->bi_partno = bd->bi_partno; bio->bi_flags = bd->bi_flags; bio->bi_iter = bd->bi_iter; } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 44f4a8ac95bd..d216a8f7bc22 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -63,6 +63,12 @@ #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1) #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) +/* + * Align buffer writes to this boundary. + * Tests show that SSDs have the highest IOPS when using 4k writes. + */ +#define DM_BUFIO_WRITE_ALIGN 4096 + /* * dm_buffer->list_mode */ @@ -149,6 +155,10 @@ struct dm_buffer { blk_status_t write_error; unsigned long state; unsigned long last_accessed; + unsigned dirty_start; + unsigned dirty_end; + unsigned write_start; + unsigned write_end; struct dm_bufio_client *c; struct list_head write_list; struct bio bio; @@ -560,7 +570,7 @@ static void dmio_complete(unsigned long error, void *context) } static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { int r; struct dm_io_request io_req = { @@ -578,10 +588,10 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, if (b->data_mode != DATA_MODE_VMALLOC) { io_req.mem.type = DM_IO_KMEM; - io_req.mem.ptr.addr = b->data; + io_req.mem.ptr.addr = (char *)b->data + offset; } else { io_req.mem.type = DM_IO_VMA; - io_req.mem.ptr.vma = b->data; + io_req.mem.ptr.vma = (char *)b->data + offset; } b->bio.bi_end_io = end_io; @@ -609,14 +619,14 @@ static void inline_endio(struct bio *bio) } static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { char *ptr; - int len; + unsigned len; bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); b->bio.bi_iter.bi_sector = sector; - b->bio.bi_bdev = b->c->bdev; + bio_set_dev(&b->bio, b->c->bdev); b->bio.bi_end_io = inline_endio; /* * Use of .bi_private isn't a problem here because @@ -625,29 +635,20 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, b->bio.bi_private = end_io; bio_set_op_attrs(&b->bio, rw, 0); - /* - * We assume that if len >= PAGE_SIZE ptr is page-aligned. - * If len < PAGE_SIZE the buffer doesn't cross page boundary. - */ - ptr = b->data; + ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - if (len >= PAGE_SIZE) - BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); - else - BUG_ON((unsigned long)ptr & (len - 1)); - do { - if (!bio_add_page(&b->bio, virt_to_page(ptr), - len < PAGE_SIZE ? len : PAGE_SIZE, + unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); + if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step, offset_in_page(ptr))) { BUG_ON(b->c->block_size <= PAGE_SIZE); - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); return; } - len -= PAGE_SIZE; - ptr += PAGE_SIZE; + len -= this_step; + ptr += this_step; } while (len > 0); submit_bio(&b->bio); @@ -657,18 +658,33 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) { unsigned n_sectors; sector_t sector; - - if (rw == WRITE && b->c->write_callback) - b->c->write_callback(b); + unsigned offset, end; sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; - n_sectors = 1 << b->c->sectors_per_block_bits; + + if (rw != WRITE) { + n_sectors = 1 << b->c->sectors_per_block_bits; + offset = 0; + } else { + if (b->c->write_callback) + b->c->write_callback(b); + offset = b->write_start; + end = b->write_end; + offset &= -DM_BUFIO_WRITE_ALIGN; + end += DM_BUFIO_WRITE_ALIGN - 1; + end &= -DM_BUFIO_WRITE_ALIGN; + if (unlikely(end > b->c->block_size)) + end = b->c->block_size; + + sector += offset >> SECTOR_SHIFT; + n_sectors = (end - offset) >> SECTOR_SHIFT; + } if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && b->data_mode != DATA_MODE_VMALLOC) - use_inline_bio(b, rw, sector, n_sectors, end_io); + use_inline_bio(b, rw, sector, n_sectors, offset, end_io); else - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); } /*---------------------------------------------------------------- @@ -720,6 +736,9 @@ static void __write_dirty_buffer(struct dm_buffer *b, clear_bit(B_DIRTY, &b->state); wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); + b->write_start = b->dirty_start; + b->write_end = b->dirty_end; + if (!write_list) submit_io(b, WRITE, write_endio); else @@ -1221,19 +1240,37 @@ void dm_bufio_release(struct dm_buffer *b) } EXPORT_SYMBOL_GPL(dm_bufio_release); -void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end) { struct dm_bufio_client *c = b->c; + BUG_ON(start >= end); + BUG_ON(end > b->c->block_size); + dm_bufio_lock(c); BUG_ON(test_bit(B_READING, &b->state)); - if (!test_and_set_bit(B_DIRTY, &b->state)) + if (!test_and_set_bit(B_DIRTY, &b->state)) { + b->dirty_start = start; + b->dirty_end = end; __relink_lru(b, LIST_DIRTY); + } else { + if (start < b->dirty_start) + b->dirty_start = start; + if (end > b->dirty_end) + b->dirty_end = end; + } dm_bufio_unlock(c); } +EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); + +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +{ + dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); +} EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) @@ -1398,6 +1435,8 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); set_bit(B_DIRTY, &b->state); + b->dirty_start = 0; + b->dirty_end = c->block_size; __unlink_buffer(b); __link_buffer(b, new_block, LIST_DIRTY); } else { diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index b6d8f53ec15b..be732d3f8611 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h @@ -93,6 +93,15 @@ void dm_bufio_release(struct dm_buffer *b); */ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); +/* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + /* * Initiate writing of dirty buffers, without waiting for completion. */ diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index c5ea03fc7ee1..8785134c9f1f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -833,7 +833,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) *--------------------------------------------------------------*/ static void remap_to_origin(struct cache *cache, struct bio *bio) { - bio->bi_bdev = cache->origin_dev->bdev; + bio_set_dev(bio, cache->origin_dev->bdev); } static void remap_to_cache(struct cache *cache, struct bio *bio, @@ -842,7 +842,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, sector_t bi_sector = bio->bi_iter.bi_sector; sector_t block = from_cblock(cblock); - bio->bi_bdev = cache->cache_dev->bdev; + bio_set_dev(bio, cache->cache_dev->bdev); if (!block_size_is_power_of_two(cache)) bio->bi_iter.bi_sector = (block * cache->sectors_per_block) + @@ -2306,7 +2306,7 @@ static void init_features(struct cache_features *cf) static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 2, "Invalid number of cache feature arguments"}, }; @@ -2348,7 +2348,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 24eddbdf2ab4..203144762f36 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen extern atomic_t dm_global_event_nr; extern wait_queue_head_t dm_global_eventq; +void dm_issue_global_event(void); #endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index cdf6b1e12460..96ab46512e1f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -758,9 +758,8 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, int i, r; /* xor whitening with sector number */ - memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); - crypto_xor(buf, (u8 *)§or, 8); - crypto_xor(&buf[8], (u8 *)§or, 8); + crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); + crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); /* calculate crc32 for every 32bit part and xor it */ desc->tfm = tcw->crc32_tfm; @@ -805,10 +804,10 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, } /* Calculate IV */ - memcpy(iv, tcw->iv_seed, cc->iv_size); - crypto_xor(iv, (u8 *)§or, 8); + crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); if (cc->iv_size > 8) - crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); + crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, + cc->iv_size - 8); return r; } @@ -933,9 +932,6 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) bip->bip_iter.bi_size = tag_len; bip->bip_iter.bi_sector = io->cc->start + io->sector; - /* We own the metadata, do not let bio_free to release it */ - bip->bip_flags &= ~BIP_BLOCK_INTEGRITY; - ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), tag_len, offset_in_page(io->integrity_metadata)); if (unlikely(ret != tag_len)) @@ -1547,7 +1543,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; - clone->bi_bdev = cc->dev->bdev; + bio_set_dev(clone, cc->dev->bdev); clone->bi_opf = io->base_bio->bi_opf; } @@ -2470,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key kfree(cipher_api); return ret; } + kfree(cipher_api); return 0; bad_mem: @@ -2533,7 +2530,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar { struct crypt_config *cc = ti->private; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, }; unsigned int opt_params, val; @@ -2588,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar ti->error = "Invalid feature value for sector_size"; return -EINVAL; } + if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { + ti->error = "Device size is not multiple of sector_size feature"; + return -EINVAL; + } cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; } else if (!strcasecmp(opt_string, "iv_large_sectors")) set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); @@ -2796,7 +2797,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) */ if (unlikely(bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)) { - bio->bi_bdev = cc->dev->bdev; + bio_set_dev(bio, cc->dev->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + dm_target_offset(ti, bio->bi_iter.bi_sector); diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index ae3158795d26..2209a9700acd 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -282,7 +282,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio) struct delay_c *dc = ti->private; if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { - bio->bi_bdev = dc->dev_write->bdev; + bio_set_dev(bio, dc->dev_write->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = dc->start_write + dm_target_offset(ti, bio->bi_iter.bi_sector); @@ -290,7 +290,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio) return delay_bio(dc, dc->write_delay, bio); } - bio->bi_bdev = dc->dev_read->bdev; + bio_set_dev(bio, dc->dev_read->bdev); bio->bi_iter.bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_iter.bi_sector); diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index e7ba89f98d8d..ba84b8d62cd0 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1192,7 +1192,7 @@ static dm_block_t get_block(struct era *era, struct bio *bio) static void remap_to_origin(struct era *era, struct bio *bio) { - bio->bi_bdev = era->origin_dev->bdev; + bio_set_dev(bio, era->origin_dev->bdev); } /*---------------------------------------------------------------- diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index e2c7234931bc..b82cb1ab1eaa 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -51,7 +51,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, {1, UINT_MAX, "Invalid corrupt bio byte"}, {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, @@ -178,7 +178,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, */ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, UINT_MAX, "Invalid up interval"}, {0, UINT_MAX, "Invalid down interval"}, }; @@ -274,7 +274,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) { struct flakey_c *fc = ti->private; - bio->bi_bdev = fc->dev->bdev; + bio_set_dev(bio, fc->dev->bdev); if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 3acce09bba35..096fe9b66c50 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -225,6 +225,8 @@ struct dm_integrity_c { struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; struct alg_spec journal_mac_alg; + + atomic64_t number_of_mismatches; }; struct dm_integrity_range { @@ -250,7 +252,8 @@ struct dm_integrity_io { struct completion *completion; - struct block_device *orig_bi_bdev; + struct gendisk *orig_bi_disk; + u8 orig_bi_partno; bio_end_io_t *orig_bi_end_io; struct bio_integrity_payload *orig_bi_integrity; struct bvec_iter orig_bi_iter; @@ -297,7 +300,7 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) /* * DM Integrity profile, protection is performed layer above (dm-crypt) */ -static struct blk_integrity_profile dm_integrity_profile = { +static const struct blk_integrity_profile dm_integrity_profile = { .name = "DM-DIF-EXT-TAG", .generate_fn = NULL, .verify_fn = NULL, @@ -309,6 +312,8 @@ static void dm_integrity_dtr(struct dm_target *ti); static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) { + if (err == -EILSEQ) + atomic64_inc(&ic->number_of_mismatches); if (!cmpxchg(&ic->failed, 0, err)) DMERR("Error on %s: %d", msg, err); } @@ -769,13 +774,13 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi unsigned i; io_comp.ic = ic; - io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); + init_completion(&io_comp.comp); if (commit_start + commit_sections <= ic->journal_sections) { io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); @@ -791,18 +796,18 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi to_end = ic->journal_sections - commit_start; if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); if (try_wait_for_completion(&crypt_comp_1.comp)) { rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + reinit_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); } else { crypt_comp_2.ic = ic; - crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); + init_completion(&crypt_comp_2.comp); crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); wait_for_completion_io(&crypt_comp_1.comp); @@ -1040,7 +1045,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se memcpy(tag, dp, to_copy); } else if (op == TAG_WRITE) { memcpy(dp, tag, to_copy); - dm_bufio_mark_buffer_dirty(b); + dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); } else { /* e.g.: op == TAG_CMP */ if (unlikely(memcmp(dp, tag, to_copy))) { @@ -1164,7 +1169,8 @@ static void integrity_end_io(struct bio *bio) struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); bio->bi_iter = dio->orig_bi_iter; - bio->bi_bdev = dio->orig_bi_bdev; + bio->bi_disk = dio->orig_bi_disk; + bio->bi_partno = dio->orig_bi_partno; if (dio->orig_bi_integrity) { bio->bi_integrity = dio->orig_bi_integrity; bio->bi_opf |= REQ_INTEGRITY; @@ -1273,6 +1279,7 @@ static void integrity_metadata(struct work_struct *w) DMERR("Checksum failed at sector 0x%llx", (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); r = -EILSEQ; + atomic64_inc(&ic->number_of_mismatches); } if (likely(checksums != checksums_onstack)) kfree(checksums); @@ -1674,15 +1681,16 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map dio->in_flight = (atomic_t)ATOMIC_INIT(2); if (need_sync_io) { - read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); + init_completion(&read_comp); dio->completion = &read_comp; } else dio->completion = NULL; dio->orig_bi_iter = bio->bi_iter; - dio->orig_bi_bdev = bio->bi_bdev; - bio->bi_bdev = ic->dev->bdev; + dio->orig_bi_disk = bio->bi_disk; + dio->orig_bi_partno = bio->bi_partno; + bio_set_dev(bio, ic->dev->bdev); dio->orig_bi_integrity = bio_integrity(bio); bio->bi_integrity = NULL; @@ -1697,7 +1705,11 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (need_sync_io) { wait_for_completion_io(&read_comp); - integrity_metadata(&dio->work); + if (likely(!bio->bi_status)) + integrity_metadata(&dio->work); + else + dec_in_flight(dio); + } else { INIT_WORK(&dio->work, integrity_metadata); queue_work(ic->metadata_wq, &dio->work); @@ -1831,7 +1843,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, comp.ic = ic; comp.in_flight = (atomic_t)ATOMIC_INIT(1); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); i = write_start; for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { @@ -2058,7 +2070,7 @@ static void replay_journal(struct dm_integrity_c *ic) if (ic->journal_io) { struct journal_completion crypt_comp; crypt_comp.ic = ic; - crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); + init_completion(&crypt_comp.comp); crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); wait_for_completion(&crypt_comp.comp); @@ -2230,7 +2242,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: - result[0] = '\0'; + DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches)); break; case STATUSTYPE_TABLE: { @@ -2631,7 +2643,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) memset(iv, 0x00, ivsize); skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2688,7 +2700,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_init_one(&sg, crypt_data, crypt_len); skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2775,7 +2787,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) int r; unsigned extra_args; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 9, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; @@ -2803,6 +2815,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) bio_list_init(&ic->flush_bio_list); init_waitqueue_head(&ic->copy_to_journal_wait); init_completion(&ic->crypto_backoff); + atomic64_set(&ic->number_of_mismatches, 0); r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); if (r) { @@ -3199,7 +3212,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 25039607f3cb..b4357ed4d541 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -347,7 +347,7 @@ static void do_region(int op, int op_flags, unsigned region, bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); - bio->bi_bdev = where->bdev; + bio_set_dev(bio, where->bdev); bio->bi_end_io = endio; bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e06f0ef7d2ec..e52676fa9832 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si * Round up the ptr to an 8-byte boundary. */ #define ALIGN_MASK 7 +static inline size_t align_val(size_t val) +{ + return (val + ALIGN_MASK) & ~ALIGN_MASK; +} static inline void *align_ptr(void *ptr) { - return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); + return (void *)align_val((size_t)ptr); } /* @@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ struct hash_cell *hc; size_t len, needed = 0; struct gendisk *disk; - struct dm_name_list *nl, *old_nl = NULL; + struct dm_name_list *orig_nl, *nl, *old_nl = NULL; uint32_t *event_nr; down_write(&_hash_lock); @@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { - needed += sizeof(struct dm_name_list); - needed += strlen(hc->name) + 1; - needed += ALIGN_MASK; - needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK; + needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); + needed += align_val(sizeof(uint32_t)); } } /* * Grab our output buffer. */ - nl = get_result_buffer(param, param_size, &len); + nl = orig_nl = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; @@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ strcpy(nl->name, hc->name); old_nl = nl; - event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); + event_nr = align_ptr(nl->name + strlen(hc->name) + 1); *event_nr = dm_get_event_nr(hc->md); nl = align_ptr(event_nr + 1); } } + /* + * If mismatch happens, security may be compromised due to buffer + * overflow, so it's better to crash. + */ + BUG_ON((char *)nl - (char *)orig_nl != needed); out: up_write(&_hash_lock); @@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para * which has a variable size, is not used by the function processing * the ioctl. */ -#define IOCTL_FLAGS_NO_PARAMS 1 +#define IOCTL_FLAGS_NO_PARAMS 1 +#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2 /*----------------------------------------------------------------- * Implementation of open/close/ioctl on the special char @@ -1629,18 +1637,18 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) { - static struct { + static const struct { int cmd; int flags; ioctl_fn fn; } _ioctls[] = { {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ - {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, + {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all}, {DM_LIST_DEVICES_CMD, 0, list_devices}, - {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, - {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, - {DM_DEV_RENAME_CMD, 0, dev_rename}, + {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create}, + {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove}, + {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename}, {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, {DM_DEV_WAIT_CMD, 0, dev_wait}, @@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); + if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT) + dm_issue_global_event(); + /* * Copy the results back to userland. */ diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 41971a090e34..d5f8eff7c11d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -88,7 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) { struct linear_c *lc = ti->private; - bio->bi_bdev = lc->dev->bdev; + bio_set_dev(bio, lc->dev->bdev); if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) bio->bi_iter.bi_sector = linear_map_sector(ti, bio->bi_iter.bi_sector); @@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, @@ -212,7 +198,6 @@ static struct target_type linear_target = { .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_flush = linear_dax_flush, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index a1da0eb58a93..8b80a9ce9ea9 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -100,6 +100,7 @@ struct log_writes_c { struct dm_dev *logdev; u64 logged_entries; u32 sectorsize; + u32 sectorshift; atomic_t io_blocks; atomic_t pending_blocks; sector_t next_sector; @@ -128,6 +129,18 @@ struct per_bio_data { struct pending_block *block; }; +static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors >> (lc->sectorshift - SECTOR_SHIFT); +} + +static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors << (lc->sectorshift - SECTOR_SHIFT); +} + static void put_pending_block(struct log_writes_c *lc) { if (atomic_dec_and_test(&lc->pending_blocks)) { @@ -198,7 +211,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio->bi_bdev = lc->logdev->bdev; + bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -253,7 +266,7 @@ static int log_one_block(struct log_writes_c *lc, if (!block->vec_cnt) goto out; - sector++; + sector += dev_to_bio_sectors(lc, 1); atomic_inc(&lc->io_blocks); bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); @@ -263,7 +276,7 @@ static int log_one_block(struct log_writes_c *lc, } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio->bi_bdev = lc->logdev->bdev; + bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -285,7 +298,7 @@ static int log_one_block(struct log_writes_c *lc, } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio->bi_bdev = lc->logdev->bdev; + bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -354,10 +367,9 @@ static int log_writes_kthread(void *arg) goto next; sector = lc->next_sector; - if (block->flags & LOG_DISCARD_FLAG) - lc->next_sector++; - else - lc->next_sector += block->nr_sectors + 1; + if (!(block->flags & LOG_DISCARD_FLAG)) + lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); + lc->next_sector += dev_to_bio_sectors(lc, 1); /* * Apparently the size of the device may not be known @@ -399,7 +411,7 @@ static int log_writes_kthread(void *arg) if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && - !atomic_read(&lc->pending_blocks)) + list_empty(&lc->logging_blocks)) schedule(); __set_current_state(TASK_RUNNING); } @@ -435,7 +447,6 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->logging_blocks); init_waitqueue_head(&lc->wait); - lc->sectorsize = 1 << SECTOR_SHIFT; atomic_set(&lc->io_blocks, 0); atomic_set(&lc->pending_blocks, 0); @@ -455,6 +466,8 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); + lc->sectorshift = ilog2(lc->sectorsize); lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); if (IS_ERR(lc->log_kthread)) { ret = PTR_ERR(lc->log_kthread); @@ -464,8 +477,12 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - /* We put the super at sector 0, start logging at sector 1 */ - lc->next_sector = 1; + /* + * next_sector is in 512b sectors to correspond to what bi_sector expects. + * The super starts at sector 0, and the next_sector is the next logical + * one based on the sectorsize of the device. + */ + lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; lc->logging_enabled = true; lc->end_sector = logdev_last_sector(lc); lc->device_supports_discard = true; @@ -539,7 +556,7 @@ static void normal_map_bio(struct dm_target *ti, struct bio *bio) { struct log_writes_c *lc = ti->private; - bio->bi_bdev = lc->dev->bdev; + bio_set_dev(bio, lc->dev->bdev); } static int log_writes_map(struct dm_target *ti, struct bio *bio) @@ -599,8 +616,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) if (discard_bio) block->flags |= LOG_DISCARD_FLAG; - block->sector = bio->bi_iter.bi_sector; - block->nr_sectors = bio_sectors(bio); + block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); + block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); /* We don't need the data, just submit */ if (discard_bio) { @@ -767,9 +784,12 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit if (!q || !blk_queue_discard(q)) { lc->device_supports_discard = false; - limits->discard_granularity = 1 << SECTOR_SHIFT; + limits->discard_granularity = lc->sectorsize; limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); } + limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); + limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); + limits->io_min = limits->physical_block_size; } static struct target_type log_writes_target = { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d24e4b05f5da..11f273d2f018 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -565,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m mpio->nr_bytes = nr_bytes; bio->bi_status = 0; - bio->bi_bdev = pgpath->path.dev->bdev; + bio_set_dev(bio, pgpath->path.dev->bdev); bio->bi_opf |= REQ_FAILFAST_TRANSPORT; if (pgpath->pg->ps.type->start_io) @@ -632,6 +632,10 @@ static void process_queued_bios(struct work_struct *work) case DM_MAPIO_REMAPPED: generic_make_request(bio); break; + case 0: + break; + default: + WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); } } blk_finish_plug(&plug); @@ -698,7 +702,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct path_selector_type *pst; unsigned ps_argc; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; @@ -822,7 +826,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; @@ -898,7 +902,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) int ret; struct dm_target *ti = m->ti; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; @@ -950,7 +954,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) struct dm_target *ti = m->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 8, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, @@ -1019,7 +1023,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) { /* target arguments */ - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; @@ -1379,6 +1383,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = 1; + /* fall through */ case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5bfe285ea9d1..2245d06d2045 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); return DM_MAPIO_SUBMITTED; } @@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, static sector_t rs_get_progress(struct raid_set *rs, sector_t resync_max_sectors, bool *array_in_sync) { - sector_t r, recovery_cp, curr_resync_completed; + sector_t r, curr_resync_completed; struct mddev *mddev = &rs->md; curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; - recovery_cp = mddev->recovery_cp; *array_in_sync = false; if (rs_is_raid0(rs)) { @@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs, } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) r = curr_resync_completed; else - r = recovery_cp; + r = mddev->recovery_cp; - if (r == MaxSector) { + if ((r == MaxSector) || + (test_bit(MD_RECOVERY_DONE, &mddev->recovery) && + (mddev->curr_resync_completed == resync_max_sectors))) { /* * Sync complete. */ @@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 12, 1}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index a4fbd911d566..c0b82136b2d1 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) struct dm_raid1_bio_record { struct mirror *m; - /* if details->bi_bdev == NULL, details were not saved */ + /* if details->bi_disk == NULL, details were not saved */ struct dm_bio_details details; region_t write_region; }; @@ -464,7 +464,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio) static void map_bio(struct mirror *m, struct bio *bio) { - bio->bi_bdev = m->dev->bdev; + bio_set_dev(bio, m->dev->bdev); bio->bi_iter.bi_sector = map_sector(m, bio); } @@ -1199,7 +1199,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) struct dm_raid1_bio_record *bio_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); - bio_record->details.bi_bdev = NULL; + bio_record->details.bi_disk = NULL; if (rw == WRITE) { /* Save region for mirror_end_io() handler */ @@ -1266,7 +1266,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, goto out; if (unlikely(*error)) { - if (!bio_record->details.bi_bdev) { + if (!bio_record->details.bi_disk) { /* * There wasn't enough memory to record necessary * information for a retry or there was no other @@ -1291,7 +1291,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, bd = &bio_record->details; dm_bio_restore(bd, bio); - bio_record->details.bi_bdev = NULL; + bio_record->details.bi_disk = NULL; bio->bi_status = 0; queue_bio(ms, bio, rw); @@ -1301,7 +1301,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, } out: - bio_record->details.bi_bdev = NULL; + bio_record->details.bi_disk = NULL; return DM_ENDIO_DONE; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index c6ebc5b1e00e..eadfcfd106ff 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -117,9 +117,9 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_clone_bio_info *info = container_of(clone, struct dm_rq_clone_bio_info, clone); struct dm_rq_target_io *tio = info->tio; - struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; blk_status_t error = clone->bi_status; + bool is_last = !clone->bi_next; bio_put(clone); @@ -137,28 +137,23 @@ static void end_clone_bio(struct bio *clone) * when the request is completed. */ tio->error = error; - return; + goto exit; } /* * I/O for the bio successfully completed. * Notice the data completion to the upper layer. */ - - /* - * bios are processed from the head of the list. - * So the completing bio should always be rq->bio. - * If it's not, something wrong is happening. - */ - if (tio->orig->bio != bio) - DMERR("bio completion is going in the middle of the request"); + tio->completed += nr_bytes; /* * Update the original request. * Do not use blk_end_request() here, because it may complete * the original request before the clone, and break the ordering. */ - blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); + if (is_last) + exit: + blk_update_request(tio->orig, BLK_STS_OK, tio->completed); } static struct dm_rq_target_io *tio_from_request(struct request *rq) @@ -237,14 +232,14 @@ static void dm_end_request(struct request *clone, blk_status_t error) /* * Requeue the original request of a clone. */ -static void dm_old_requeue_request(struct request *rq) +static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); - blk_run_queue_async(q); + blk_delay_queue(q, delay_ms); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -270,6 +265,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ struct mapped_device *md = tio->md; struct request *rq = tio->orig; int rw = rq_data_dir(rq); + unsigned long delay_ms = delay_requeue ? 100 : 0; rq_end_stats(md, rq); if (tio->clone) { @@ -278,9 +274,9 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ } if (!rq->q->mq_ops) - dm_old_requeue_request(rq); + dm_old_requeue_request(rq, delay_ms); else - dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); + dm_mq_delay_requeue_request(rq, delay_ms); rq_completed(md, rw, false); } @@ -455,6 +451,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, tio->clone = NULL; tio->orig = rq; tio->error = 0; + tio->completed = 0; /* * Avoid initializing info for blk-mq; it passes * target-specific data through info.ptr diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 9813922e4fe5..f43c45460aac 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -29,6 +29,7 @@ struct dm_rq_target_io { struct dm_stats_aux stats_aux; unsigned long duration_jiffies; unsigned n_sectors; + unsigned completed; }; /* diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1ba41048b438..1113b42e1eda 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1663,7 +1663,7 @@ __find_pending_exception(struct dm_snapshot *s, static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { - bio->bi_bdev = s->cow->bdev; + bio_set_dev(bio, s->cow->bdev); bio->bi_iter.bi_sector = chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + (chunk - e->old_chunk)) + @@ -1681,7 +1681,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) init_tracked_chunk(bio); if (bio->bi_opf & REQ_PREFLUSH) { - bio->bi_bdev = s->cow->bdev; + bio_set_dev(bio, s->cow->bdev); return DM_MAPIO_REMAPPED; } @@ -1769,7 +1769,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) goto out; } } else { - bio->bi_bdev = s->origin->bdev; + bio_set_dev(bio, s->origin->bdev); track_chunk(s, bio, chunk); } @@ -1802,9 +1802,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) if (bio->bi_opf & REQ_PREFLUSH) { if (!dm_bio_get_target_bio_nr(bio)) - bio->bi_bdev = s->origin->bdev; + bio_set_dev(bio, s->origin->bdev); else - bio->bi_bdev = s->cow->bdev; + bio_set_dev(bio, s->cow->bdev); return DM_MAPIO_REMAPPED; } @@ -1824,7 +1824,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) chunk >= s->first_merging_chunk && chunk < (s->first_merging_chunk + s->num_merging_chunks)) { - bio->bi_bdev = s->origin->bdev; + bio_set_dev(bio, s->origin->bdev); bio_list_add(&s->bios_queued_during_merge, bio); r = DM_MAPIO_SUBMITTED; goto out_unlock; @@ -1838,7 +1838,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) } redirect_to_origin: - bio->bi_bdev = s->origin->bdev; + bio_set_dev(bio, s->origin->bdev); if (bio_data_dir(bio) == WRITE) { up_write(&s->lock); @@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) struct dm_origin *o = ti->private; unsigned available_sectors; - bio->bi_bdev = o->dev->bdev; + bio_set_dev(bio, o->dev->bdev); if (unlikely(bio->bi_opf & REQ_PREFLUSH)) return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index a0375530b07f..b5e892149c54 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -270,7 +270,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, stripe_map_range_sector(sc, bio_end_sector(bio), target_stripe, &end); if (begin < end) { - bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; + bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev); bio->bi_iter.bi_sector = begin + sc->stripe[target_stripe].physical_start; bio->bi_iter.bi_size = to_bytes(end - begin); @@ -291,7 +291,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) if (bio->bi_opf & REQ_PREFLUSH) { target_bio_nr = dm_bio_get_target_bio_nr(bio); BUG_ON(target_bio_nr >= sc->stripes); - bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; + bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev); return DM_MAPIO_REMAPPED; } if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || @@ -306,7 +306,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) &stripe, &bio->bi_iter.bi_sector); bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; - bio->bi_bdev = sc->stripe[stripe].dev->bdev; + bio_set_dev(bio, sc->stripe[stripe].dev->bdev); return DM_MAPIO_REMAPPED; } @@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; - - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - /* * Stripe status: * @@ -430,9 +411,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; memset(major_minor, 0, sizeof(major_minor)); - sprintf(major_minor, "%d:%d", - MAJOR(disk_devt(bio->bi_bdev->bd_disk)), - MINOR(disk_devt(bio->bi_bdev->bd_disk))); + sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio))); /* * Test to see which stripe drive triggered the event @@ -491,7 +470,6 @@ static struct target_type stripe_target = { .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, .dax_copy_from_iter = stripe_dax_copy_from_iter, - .dax_flush = stripe_dax_flush, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 871c18fe000d..4c8de1ff78ca 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -251,7 +251,7 @@ static void switch_dtr(struct dm_target *ti) */ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, {1, UINT_MAX, "Invalid region size"}, {0, 0, "Invalid number of optional args"}, @@ -322,7 +322,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio) sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); unsigned path_nr = switch_get_path_nr(sctx, offset); - bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; + bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev); bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 28a4071cdf85..ef7b8f201f73 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -806,7 +806,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, /* * Target argument parsing helpers. */ -static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +static int validate_next_arg(const struct dm_arg *arg, + struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); @@ -824,14 +825,14 @@ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, return 0; } -int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); -int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 9dec2f8cc739..1e25705209c2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -679,7 +679,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) struct pool *pool = tc->pool; sector_t bi_sector = bio->bi_iter.bi_sector; - bio->bi_bdev = tc->pool_dev->bdev; + bio_set_dev(bio, tc->pool_dev->bdev); if (block_size_is_power_of_two(pool)) bio->bi_iter.bi_sector = (block << pool->sectors_per_block_shift) | @@ -691,7 +691,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) static void remap_to_origin(struct thin_c *tc, struct bio *bio) { - bio->bi_bdev = tc->origin_dev->bdev; + bio_set_dev(bio, tc->origin_dev->bdev); } static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) @@ -3041,7 +3041,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 4, "Invalid number of pool feature arguments"}, }; @@ -3313,7 +3313,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) * As this is a singleton target, ti->begin is always zero. */ spin_lock_irqsave(&pool->lock, flags); - bio->bi_bdev = pt->data_dev->bdev; + bio_set_dev(bio, pt->data_dev->bdev); r = DM_MAPIO_REMAPPED; spin_unlock_irqrestore(&pool->lock, flags); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index b46705ebf01f..bda3caca23ca 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -637,7 +637,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) struct dm_verity *v = ti->private; struct dm_verity_io *io; - bio->bi_bdev = v->data_dev->bdev; + bio_set_dev(bio, v->data_dev->bdev); bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & @@ -839,7 +839,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) struct dm_target *ti = v->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, }; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index a4fa2ada6883..70485de37b66 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -409,7 +409,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, } bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio->bi_bdev = zmd->dev->bdev; + bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); @@ -564,7 +564,7 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, set_bit(DMZ_META_WRITING, &mblk->state); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio->bi_bdev = zmd->dev->bdev; + bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); @@ -586,7 +586,7 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, return -ENOMEM; bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio->bi_bdev = zmd->dev->bdev; + bio_set_dev(bio, zmd->dev->bdev); bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index b08bbbd4d902..b87c1741da4b 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -238,7 +238,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone, struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); /* Setup and submit the BIO */ - bio->bi_bdev = dmz->dev->bdev; + bio_set_dev(bio, dmz->dev->bdev); bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); atomic_inc(&bioctx->ref); generic_make_request(bio); @@ -586,7 +586,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), (unsigned int)dmz_bio_blocks(bio)); - bio->bi_bdev = dev->bdev; + bio_set_dev(bio, dev->bdev); if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d669fddd9290..4be85324f44d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue; atomic_t dm_global_event_nr = ATOMIC_INIT(0); DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); +void dm_issue_global_event(void) +{ + atomic_inc(&dm_global_event_nr); + wake_up(&dm_global_eventq); +} + /* * One of these is allocated per bio. */ @@ -510,7 +516,7 @@ static void start_io_acct(struct dm_io *io) io->start_time = jiffies; cpu = part_stat_lock(); - part_round_stats(cpu, &dm_disk(md)->part0); + part_round_stats(md->queue, cpu, &dm_disk(md)->part0); part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); @@ -529,7 +535,7 @@ static void end_io_acct(struct dm_io *io) int pending; int rw = bio_data_dir(bio); - generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); + generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), @@ -841,10 +847,10 @@ static void clone_endio(struct bio *bio) if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) + !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } @@ -987,24 +993,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (ti->type->dax_flush) - ti->type->dax_flush(ti, pgoff, addr, size); - out: - dm_put_live_table(md, srcu_idx); -} - /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH. @@ -1205,8 +1193,8 @@ static void __map_bio(struct dm_target_io *tio) break; case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ - trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, - tio->io->bio->bi_bdev->bd_dev, sector); + trace_block_bio_remap(clone->bi_disk->queue, clone, + bio_dev(tio->io->bio), sector); generic_make_request(clone); break; case DM_MAPIO_KILL: @@ -1532,7 +1520,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) map = dm_get_live_table(md, &srcu_idx); - generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); + generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); /* if we're suspended, we have to queue this io for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { @@ -1786,7 +1774,7 @@ static struct mapped_device *alloc_dev(int minor) goto bad; bio_init(&md->flush_bio, NULL, 0); - md->flush_bio.bi_bdev = md->bdev; + bio_set_dev(&md->flush_bio, md->bdev); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; dm_stats_init(&md->stats); @@ -1883,9 +1871,8 @@ static void event_callback(void *context) dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); atomic_inc(&md->event_nr); - atomic_inc(&dm_global_event_nr); wake_up(&md->eventq); - wake_up(&dm_global_eventq); + dm_issue_global_event(); } /* @@ -2301,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) } map = __bind(md, table, &limits); + dm_issue_global_event(); out: mutex_unlock(&md->suspend_lock); @@ -2992,7 +2980,6 @@ static const struct block_device_operations dm_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .copy_from_iter = dm_dax_copy_from_iter, - .flush = dm_dax_flush, }; /* diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 06a64d5d8c6c..38264b38420f 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -216,12 +216,12 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) if (failit) { struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - b->bi_bdev = conf->rdev->bdev; + bio_set_dev(b, conf->rdev->bdev); b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; } else - bio->bi_bdev = conf->rdev->bdev; + bio_set_dev(bio, conf->rdev->bdev); generic_make_request(bio); return true; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 5f1eb9189542..c464fb48039a 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -275,17 +275,17 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) bio = split; } - bio->bi_bdev = tmp_dev->rdev->bdev; + bio_set_dev(bio, tmp_dev->rdev->bdev); bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector + data_offset; if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { + !blk_queue_discard(bio->bi_disk->queue))) { /* Just ignore it */ bio_endio(bio); } else { if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + trace_block_bio_remap(bio->bi_disk->queue, bio, disk_devt(mddev->gendisk), bio_sector); mddev_check_writesame(mddev, bio); diff --git a/drivers/md/md.c b/drivers/md/md.c index b01e458d31e9..0ff1bbf6c90e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock); * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ +void md_handle_request(struct mddev *mddev, struct bio *bio) +{ +check_suspended: + rcu_read_lock(); + if (mddev->suspended) { + DEFINE_WAIT(__wait); + for (;;) { + prepare_to_wait(&mddev->sb_wait, &__wait, + TASK_UNINTERRUPTIBLE); + if (!mddev->suspended) + break; + rcu_read_unlock(); + schedule(); + rcu_read_lock(); + } + finish_wait(&mddev->sb_wait, &__wait); + } + atomic_inc(&mddev->active_io); + rcu_read_unlock(); + + if (!mddev->pers->make_request(mddev, bio)) { + atomic_dec(&mddev->active_io); + wake_up(&mddev->sb_wait); + goto check_suspended; + } + + if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) + wake_up(&mddev->sb_wait); +} +EXPORT_SYMBOL(md_handle_request); + static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); @@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) bio_endio(bio); return BLK_QC_T_NONE; } -check_suspended: - rcu_read_lock(); - if (mddev->suspended) { - DEFINE_WAIT(__wait); - for (;;) { - prepare_to_wait(&mddev->sb_wait, &__wait, - TASK_UNINTERRUPTIBLE); - if (!mddev->suspended) - break; - rcu_read_unlock(); - schedule(); - rcu_read_lock(); - } - finish_wait(&mddev->sb_wait, &__wait); - } - atomic_inc(&mddev->active_io); - rcu_read_unlock(); /* * save the sectors now since our bio can @@ -310,20 +324,14 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) sectors = bio_sectors(bio); /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; - if (!mddev->pers->make_request(mddev, bio)) { - atomic_dec(&mddev->active_io); - wake_up(&mddev->sb_wait); - goto check_suspended; - } + + md_handle_request(mddev, bio); cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); part_stat_unlock(); - if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) - wake_up(&mddev->sb_wait); - return BLK_QC_T_NONE; } @@ -422,7 +430,7 @@ static void submit_flushes(struct work_struct *ws) bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; - bi->bi_bdev = rdev->bdev; + bio_set_dev(bi, rdev->bdev); bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; atomic_inc(&mddev->flush_pending); submit_bio(bi); @@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; + /* + * must reset flush_bio before calling into md_handle_request to avoid a + * deadlock, because other bios passed md_handle_request suspend check + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ + mddev->flush_bio = NULL; + wake_up(&mddev->sb_wait); + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio); else { bio->bi_opf &= ~REQ_PREFLUSH; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); } - - mddev->flush_bio = NULL; - wake_up(&mddev->sb_wait); } void md_flush_request(struct mddev *mddev, struct bio *bio) @@ -772,7 +786,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, atomic_inc(&rdev->nr_pending); - bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; + bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; @@ -803,8 +817,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct bio *bio = md_bio_alloc_sync(rdev->mddev); int ret; - bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? - rdev->meta_bdev : rdev->bdev; + if (metadata_op && rdev->meta_bdev) + bio_set_dev(bio, rdev->meta_bdev); + else + bio_set_dev(bio, rdev->bdev); bio_set_op_attrs(bio, op, op_flags); if (metadata_op) bio->bi_iter.bi_sector = sector + rdev->sb_start; @@ -1536,7 +1552,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ } else if (sb->bblog_offset != 0) rdev->badblocks.shift = 0; - if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) { + if ((le32_to_cpu(sb->feature_map) & + (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); rdev->ppl.size = le16_to_cpu(sb->ppl.size); rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; @@ -1655,10 +1672,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) set_bit(MD_HAS_JOURNAL, &mddev->flags); - if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) { + if (le32_to_cpu(sb->feature_map) & + (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { if (le32_to_cpu(sb->feature_map) & (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) return -EINVAL; + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && + (le32_to_cpu(sb->feature_map) & + MD_FEATURE_MULTIPLE_PPLS)) + return -EINVAL; set_bit(MD_HAS_PPL, &mddev->flags); } } else if (mddev->pers == NULL) { @@ -1875,7 +1897,11 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); if (test_bit(MD_HAS_PPL, &mddev->flags)) { - sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); + if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) + sb->feature_map |= + cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); + else + sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); sb->ppl.size = cpu_to_le16(rdev->ppl.size); } @@ -4283,6 +4309,8 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) if (err) export_rdev(rdev); mddev_unlock(mddev); + if (!err) + md_new_event(mddev); return err ? err : len; } @@ -7836,7 +7864,7 @@ static const struct file_operations md_seq_fops = { .open = md_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = seq_release, .poll = mdstat_poll, }; diff --git a/drivers/md/md.h b/drivers/md/md.h index 09db03455801..d8287d3cd1bf 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -236,6 +236,7 @@ enum mddev_flags { * never cause the array to become failed. */ MD_HAS_PPL, /* The raid array has PPL feature set */ + MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */ }; enum mddev_sb_flags { @@ -509,6 +510,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } +static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) +{ + atomic_add(nr_sectors, &bio->bi_disk->sync_io); +} + struct md_personality { char *name; @@ -686,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev); +extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, @@ -721,14 +728,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev, static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) { if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) + !bio->bi_disk->queue->limits.max_write_same_sectors) mddev->queue->limits.max_write_same_sectors = 0; } static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) { if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) mddev->queue->limits.max_write_zeroes_sectors = 0; } #endif /* _MD_MD_H */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 23a162ba6c56..b68e0666b9b0 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -134,7 +134,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) __bio_clone_fast(&mp_bh->bio, bio); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; - mp_bh->bio.bi_bdev = multipath->rdev->bdev; + bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; @@ -345,17 +345,17 @@ static void multipathd(struct md_thread *thread) if ((mp_bh->path = multipath_map (conf))<0) { pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", - bdevname(bio->bi_bdev,b), + bio_devname(bio, b), (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, BLK_STS_IOERR); } else { pr_err("multipath: %s: redirecting sector %llu to another IO path\n", - bdevname(bio->bi_bdev,b), + bio_devname(bio, b), (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); bio->bi_iter.bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; - bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; + bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev); bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 94d9ae9b0fd0..5a00fc118470 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -30,7 +30,8 @@ ((1L << MD_HAS_JOURNAL) | \ (1L << MD_JOURNAL_CLEAN) | \ (1L << MD_FAILFAST_SUPPORTED) |\ - (1L << MD_HAS_PPL)) + (1L << MD_HAS_PPL) | \ + (1L << MD_HAS_MULTIPLE_PPLS)) static int raid0_congested(struct mddev *mddev, int bits) { @@ -539,6 +540,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) !discard_bio) continue; bio_chain(discard_bio, bio); + bio_clone_blkcg_association(discard_bio, bio); if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), @@ -588,14 +590,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) zone = find_zone(mddev->private, §or); tmp_dev = map_sector(mddev, zone, sector, §or); - bio->bi_bdev = tmp_dev->bdev; + bio_set_dev(bio, tmp_dev->bdev); bio->bi_iter.bi_sector = sector + zone->dev_start + tmp_dev->data_offset; if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), - bio, disk_devt(mddev->gendisk), - bio_sector); + trace_block_bio_remap(bio->bi_disk->queue, bio, + disk_devt(mddev->gendisk), bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); generic_make_request(bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f50958ded9f0..f3f3e40dc9d8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -48,7 +48,8 @@ #define UNSUPPORTED_MDDEV_FLAGS \ ((1L << MD_HAS_JOURNAL) | \ (1L << MD_JOURNAL_CLEAN) | \ - (1L << MD_HAS_PPL)) + (1L << MD_HAS_PPL) | \ + (1L << MD_HAS_MULTIPLE_PPLS)) /* * Number of guaranteed r1bios in case of extreme VM load: @@ -786,13 +787,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; + struct md_rdev *rdev = (void *)bio->bi_disk; bio->bi_next = NULL; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + !blk_queue_discard(bio->bi_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1273,7 +1274,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_iter.bi_sector = r1_bio->sector + mirror->rdev->data_offset; - read_bio->bi_bdev = mirror->rdev->bdev; + bio_set_dev(read_bio, mirror->rdev->bdev); read_bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &mirror->rdev->flags) && @@ -1282,9 +1283,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_private = r1_bio; if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), - read_bio, disk_devt(mddev->gendisk), - r1_bio->sector); + trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, + disk_devt(mddev->gendisk), r1_bio->sector); generic_make_request(read_bio); } @@ -1496,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); - mbio->bi_bdev = conf->mirrors[i].rdev->bdev; + bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); mbio->bi_end_io = raid1_end_write_request; mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && @@ -1508,11 +1508,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, atomic_inc(&r1_bio->remaining); if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + trace_block_bio_remap(mbio->bi_disk->queue, mbio, disk_devt(mddev->gendisk), r1_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ - mbio->bi_bdev = (void*)conf->mirrors[i].rdev; + mbio->bi_disk = (void *)conf->mirrors[i].rdev; cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); if (cb) @@ -1990,8 +1990,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) * Don't fail devices as that won't really help. */ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev, b), + mdname(mddev), bio_devname(bio, b), (unsigned long long)r1_bio->sector); for (d = 0; d < conf->raid_disks * 2; d++) { rdev = conf->mirrors[d].rdev; @@ -2082,7 +2081,7 @@ static void process_checks(struct r1bio *r1_bio) b->bi_status = status; b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; - b->bi_bdev = conf->mirrors[i].rdev->bdev; + bio_set_dev(b, conf->mirrors[i].rdev->bdev); b->bi_end_io = end_sync_read; rp->raid_bio = r1_bio; b->bi_private = rp; @@ -2350,7 +2349,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; - wbio->bi_bdev = rdev->bdev; + bio_set_dev(wbio, rdev->bdev); if (submit_bio_wait(wbio) < 0) /* failure! */ @@ -2440,7 +2439,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) struct mddev *mddev = conf->mddev; struct bio *bio; struct md_rdev *rdev; - dev_t bio_dev; sector_t bio_sector; clear_bit(R1BIO_ReadError, &r1_bio->state); @@ -2454,7 +2452,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) */ bio = r1_bio->bios[r1_bio->read_disk]; - bio_dev = bio->bi_bdev->bd_dev; bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; bio_put(bio); r1_bio->bios[r1_bio->read_disk] = NULL; @@ -2564,6 +2561,23 @@ static int init_resync(struct r1conf *conf) return 0; } +static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) +{ + struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); + struct resync_pages *rps; + struct bio *bio; + int i; + + for (i = conf->poolinfo->raid_disks; i--; ) { + bio = r1bio->bios[i]; + rps = bio->bi_private; + bio_reset(bio); + bio->bi_private = rps; + } + r1bio->master_bio = NULL; + return r1bio; +} + /* * perform a "sync" on one "block" * @@ -2649,7 +2663,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bitmap_cond_end_sync(mddev->bitmap, sector_nr, mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); - r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); + r1_bio = raid1_alloc_init_r1buf(conf); raise_barrier(conf, sector_nr); @@ -2727,7 +2741,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io) { atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; } @@ -2853,7 +2867,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; if (bio->bi_end_io == end_sync_read) { read_targets--; - md_sync_acct(bio->bi_bdev, nr_sectors); + md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); @@ -2862,7 +2876,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } else { atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; - md_sync_acct(bio->bi_bdev, nr_sectors); + md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f55d4cc085f6..374df5796649 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -901,13 +901,13 @@ static void flush_pending_writes(struct r10conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; + struct md_rdev *rdev = (void*)bio->bi_disk; bio->bi_next = NULL; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + !blk_queue_discard(bio->bi_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1085,13 +1085,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; + struct md_rdev *rdev = (void*)bio->bi_disk; bio->bi_next = NULL; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + !blk_queue_discard(bio->bi_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1200,7 +1200,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); - read_bio->bi_bdev = rdev->bdev; + bio_set_dev(read_bio, rdev->bdev); read_bio->bi_end_io = raid10_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &rdev->flags) && @@ -1209,7 +1209,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_private = r10_bio; if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r10_bio->sector); generic_make_request(read_bio); @@ -1249,7 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + choose_data_offset(r10_bio, rdev)); - mbio->bi_bdev = rdev->bdev; + bio_set_dev(mbio, rdev->bdev); mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); if (!replacement && test_bit(FailFast, @@ -1259,11 +1259,11 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_private = r10_bio; if (conf->mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + trace_block_bio_remap(mbio->bi_disk->queue, mbio, disk_devt(conf->mddev->gendisk), r10_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ - mbio->bi_bdev = (void *)rdev; + mbio->bi_disk = (void *)rdev; atomic_inc(&r10_bio->remaining); @@ -2094,7 +2094,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; - tbio->bi_bdev = conf->mirrors[d].rdev->bdev; + bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); generic_make_request(tbio); } @@ -2552,7 +2552,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); wbio->bi_iter.bi_sector = wsector + choose_data_offset(r10_bio, rdev); - wbio->bi_bdev = rdev->bdev; + bio_set_dev(wbio, rdev->bdev); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (submit_bio_wait(wbio) < 0) @@ -2575,7 +2575,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) struct bio *bio; struct r10conf *conf = mddev->private; struct md_rdev *rdev = r10_bio->devs[slot].rdev; - dev_t bio_dev; sector_t bio_last_sector; /* we got a read error. Maybe the drive is bad. Maybe just @@ -2587,7 +2586,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) * frozen. */ bio = r10_bio->devs[slot].bio; - bio_dev = bio->bi_bdev->bd_dev; bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; bio_put(bio); r10_bio->devs[slot].bio = NULL; @@ -2798,6 +2796,35 @@ static int init_resync(struct r10conf *conf) return 0; } +static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) +{ + struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + struct rsync_pages *rp; + struct bio *bio; + int nalloc; + int i; + + if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || + test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) + nalloc = conf->copies; /* resync */ + else + nalloc = 2; /* recovery */ + + for (i = 0; i < nalloc; i++) { + bio = r10bio->devs[i].bio; + rp = bio->bi_private; + bio_reset(bio); + bio->bi_private = rp; + bio = r10bio->devs[i].repl_bio; + if (bio) { + rp = bio->bi_private; + bio_reset(bio); + bio->bi_private = rp; + } + } + return r10bio; +} + /* * perform a "sync" on one "block" * @@ -2950,7 +2977,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that - * have bi_end_io, bi_sector, bi_bdev set, + * have bi_end_io, bi_sector, bi_disk set, * and bi_private set to the r10bio. * For recovery, we may actually create several r10bios * with 2 bios in each, that correspond to the bios in the main one. @@ -3027,7 +3054,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, atomic_inc(&mreplace->nr_pending); rcu_read_unlock(); - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio = raid10_alloc_init_r10buf(conf); r10_bio->state = 0; raise_barrier(conf, rb2 != NULL); atomic_set(&r10_bio->remaining, 0); @@ -3095,7 +3122,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, from_addr = r10_bio->devs[j].addr; bio->bi_iter.bi_sector = from_addr + rdev->data_offset; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); atomic_inc(&rdev->nr_pending); /* and we write to 'i' (if not in_sync) */ @@ -3117,7 +3144,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr + mrdev->data_offset; - bio->bi_bdev = mrdev->bdev; + bio_set_dev(bio, mrdev->bdev); atomic_inc(&r10_bio->remaining); } else r10_bio->devs[1].bio->bi_end_io = NULL; @@ -3143,7 +3170,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr + mreplace->data_offset; - bio->bi_bdev = mreplace->bdev; + bio_set_dev(bio, mreplace->bdev); atomic_inc(&r10_bio->remaining); break; } @@ -3236,7 +3263,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } if (sync_blocks < max_sync) max_sync = sync_blocks; - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio = raid10_alloc_init_r10buf(conf); r10_bio->state = 0; r10_bio->mddev = mddev; @@ -3289,7 +3316,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); count++; rdev = rcu_dereference(conf->mirrors[d].replacement); @@ -3311,7 +3338,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; - bio->bi_bdev = rdev->bdev; + bio_set_dev(bio, rdev->bdev); count++; rcu_read_unlock(); } @@ -3367,7 +3394,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sectors = nr_sectors; if (bio->bi_end_io == end_sync_read) { - md_sync_acct(bio->bi_bdev, nr_sectors); + md_sync_acct_bio(bio, nr_sectors); bio->bi_status = 0; generic_make_request(bio); } @@ -4360,7 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, read_more: /* Now schedule reads for blocks from sector_nr to last */ - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio = raid10_alloc_init_r10buf(conf); r10_bio->state = 0; raise_barrier(conf, sectors_done != 0); atomic_set(&r10_bio->remaining, 0); @@ -4383,7 +4410,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); - read_bio->bi_bdev = rdev->bdev; + bio_set_dev(read_bio, rdev->bdev); read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; @@ -4417,7 +4444,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, if (!rdev2 || test_bit(Faulty, &rdev2->flags)) continue; - b->bi_bdev = rdev2->bdev; + bio_set_dev(b, rdev2->bdev); b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; b->bi_end_io = end_reshape_write; @@ -4449,7 +4476,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sectors = nr_sectors; /* Now submit the read */ - md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); + md_sync_acct_bio(read_bio, r10_bio->sectors); atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; generic_make_request(read_bio); @@ -4511,7 +4538,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) } atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - md_sync_acct(b->bi_bdev, r10_bio->sectors); + md_sync_acct_bio(b, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; generic_make_request(b); diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 2dcbafa8e66c..0b7406ac8ce1 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -745,7 +745,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log) struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - bio->bi_bdev = log->rdev->bdev; + bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; return bio; @@ -1313,7 +1313,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) if (!do_flush) return; bio_reset(&log->flush_bio); - log->flush_bio.bi_bdev = log->rdev->bdev; + bio_set_dev(&log->flush_bio, log->rdev->bdev); log->flush_bio.bi_end_io = r5l_log_flush_endio; log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(&log->flush_bio); @@ -1691,7 +1691,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, sector_t offset) { bio_reset(ctx->ra_bio); - ctx->ra_bio->bi_bdev = log->rdev->bdev; + bio_set_dev(ctx->ra_bio, log->rdev->bdev); bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; @@ -2529,11 +2529,18 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp) static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) { - struct r5conf *conf = mddev->private; + struct r5conf *conf; int ret; - if (!conf->log) + ret = mddev_lock(mddev); + if (ret) + return ret; + + conf = mddev->private; + if (!conf || !conf->log) { + mddev_unlock(mddev); return 0; + } switch (conf->log->r5c_journal_mode) { case R5C_JOURNAL_MODE_WRITE_THROUGH: @@ -2551,6 +2558,7 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) default: ret = 0; } + mddev_unlock(mddev); return ret; } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 44ad5baf3206..cd026c88f7ef 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -87,6 +87,8 @@ * The current io_unit accepting new stripes is always at the end of the list. */ +#define PPL_SPACE_SIZE (128 * 1024) + struct ppl_conf { struct mddev *mddev; @@ -122,6 +124,10 @@ struct ppl_log { * always at the end of io_list */ spinlock_t io_list_lock; struct list_head io_list; /* all io_units of this log */ + + sector_t next_io_sector; + unsigned int entry_space; + bool use_multippl; }; #define PPL_IO_INLINE_BVECS 32 @@ -264,13 +270,12 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) int i; sector_t data_sector = 0; int data_disks = 0; - unsigned int entry_space = (log->rdev->ppl.size << 9) - PPL_HEADER_SIZE; struct r5conf *conf = sh->raid_conf; pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); /* check if current io_unit is full */ - if (io && (io->pp_size == entry_space || + if (io && (io->pp_size == log->entry_space || io->entries_count == PPL_HDR_MAX_ENTRIES)) { pr_debug("%s: add io_unit blocked by seq: %llu\n", __func__, io->seq); @@ -415,7 +420,7 @@ static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio) pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", __func__, io->seq, bio->bi_iter.bi_size, (unsigned long long)bio->bi_iter.bi_sector, - bdevname(bio->bi_bdev, b)); + bio_devname(bio, b)); submit_bio(bio); } @@ -451,12 +456,25 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) pplhdr->entries_count = cpu_to_le32(io->entries_count); pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE)); + /* Rewind the buffer if current PPL is larger then remaining space */ + if (log->use_multippl && + log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < + (PPL_HEADER_SIZE + io->pp_size) >> 9) + log->next_io_sector = log->rdev->ppl.sector; + + bio->bi_end_io = ppl_log_endio; bio->bi_opf = REQ_OP_WRITE | REQ_FUA; - bio->bi_bdev = log->rdev->bdev; - bio->bi_iter.bi_sector = log->rdev->ppl.sector; + bio_set_dev(bio, log->rdev->bdev); + bio->bi_iter.bi_sector = log->next_io_sector; bio_add_page(bio, io->header_page, PAGE_SIZE, 0); + pr_debug("%s: log->current_io_sector: %llu\n", __func__, + (unsigned long long)log->next_io_sector); + + if (log->use_multippl) + log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; + list_for_each_entry(sh, &io->stripe_list, log_list) { /* entries for full stripe writes have no partial parity */ if (test_bit(STRIPE_FULL_WRITE, &sh->state)) @@ -468,7 +486,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, ppl_conf->bs); bio->bi_opf = prev->bi_opf; - bio->bi_bdev = prev->bi_bdev; + bio_copy_dev(bio, prev); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -813,12 +831,14 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, return ret; } -static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr) +static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, + sector_t offset) { struct ppl_conf *ppl_conf = log->ppl_conf; struct md_rdev *rdev = log->rdev; struct mddev *mddev = rdev->mddev; - sector_t ppl_sector = rdev->ppl.sector + (PPL_HEADER_SIZE >> 9); + sector_t ppl_sector = rdev->ppl.sector + offset + + (PPL_HEADER_SIZE >> 9); struct page *page; int i; int ret = 0; @@ -902,6 +922,9 @@ static int ppl_write_empty_header(struct ppl_log *log) return -ENOMEM; pplhdr = page_address(page); + /* zero out PPL space to avoid collision with old PPLs */ + blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector, + log->rdev->ppl.size, GFP_NOIO, 0); memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); pplhdr->signature = cpu_to_le32(log->ppl_conf->signature); pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); @@ -922,63 +945,110 @@ static int ppl_load_distributed(struct ppl_log *log) struct ppl_conf *ppl_conf = log->ppl_conf; struct md_rdev *rdev = log->rdev; struct mddev *mddev = rdev->mddev; - struct page *page; - struct ppl_header *pplhdr; + struct page *page, *page2, *tmp; + struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL; u32 crc, crc_stored; u32 signature; - int ret = 0; + int ret = 0, i; + sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0; pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk); - - /* read PPL header */ + /* read PPL headers, find the recent one */ page = alloc_page(GFP_KERNEL); if (!page) return -ENOMEM; - if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, - PAGE_SIZE, page, REQ_OP_READ, 0, false)) { - md_error(mddev, rdev); - ret = -EIO; - goto out; + page2 = alloc_page(GFP_KERNEL); + if (!page2) { + __free_page(page); + return -ENOMEM; } - pplhdr = page_address(page); - /* check header validity */ - crc_stored = le32_to_cpu(pplhdr->checksum); - pplhdr->checksum = 0; - crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE); + /* searching ppl area for latest ppl */ + while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) { + if (!sync_page_io(rdev, + rdev->ppl.sector - rdev->data_offset + + pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, + 0, false)) { + md_error(mddev, rdev); + ret = -EIO; + /* if not able to read - don't recover any PPL */ + pplhdr = NULL; + break; + } + pplhdr = page_address(page); - if (crc_stored != crc) { - pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x\n", - __func__, crc_stored, crc); + /* check header validity */ + crc_stored = le32_to_cpu(pplhdr->checksum); + pplhdr->checksum = 0; + crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE); + + if (crc_stored != crc) { + pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n", + __func__, crc_stored, crc, + (unsigned long long)pplhdr_offset); + pplhdr = prev_pplhdr; + pplhdr_offset = prev_pplhdr_offset; + break; + } + + signature = le32_to_cpu(pplhdr->signature); + + if (mddev->external) { + /* + * For external metadata the header signature is set and + * validated in userspace. + */ + ppl_conf->signature = signature; + } else if (ppl_conf->signature != signature) { + pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n", + __func__, signature, ppl_conf->signature, + (unsigned long long)pplhdr_offset); + pplhdr = prev_pplhdr; + pplhdr_offset = prev_pplhdr_offset; + break; + } + + if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) > + le64_to_cpu(pplhdr->generation)) { + /* previous was newest */ + pplhdr = prev_pplhdr; + pplhdr_offset = prev_pplhdr_offset; + break; + } + + prev_pplhdr_offset = pplhdr_offset; + prev_pplhdr = pplhdr; + + tmp = page; + page = page2; + page2 = tmp; + + /* calculate next potential ppl offset */ + for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) + pplhdr_offset += + le32_to_cpu(pplhdr->entries[i].pp_size) >> 9; + pplhdr_offset += PPL_HEADER_SIZE >> 9; + } + + /* no valid ppl found */ + if (!pplhdr) ppl_conf->mismatch_count++; - goto out; - } - - signature = le32_to_cpu(pplhdr->signature); - - if (mddev->external) { - /* - * For external metadata the header signature is set and - * validated in userspace. - */ - ppl_conf->signature = signature; - } else if (ppl_conf->signature != signature) { - pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x\n", - __func__, signature, ppl_conf->signature); - ppl_conf->mismatch_count++; - goto out; - } + else + pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n", + __func__, (unsigned long long)pplhdr_offset, + le64_to_cpu(pplhdr->generation)); /* attempt to recover from log if we are starting a dirty array */ - if (!mddev->pers && mddev->recovery_cp != MaxSector) - ret = ppl_recover(log, pplhdr); -out: + if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) + ret = ppl_recover(log, pplhdr, pplhdr_offset); + /* write empty header if we are starting the array */ if (!ret && !mddev->pers) ret = ppl_write_empty_header(log); __free_page(page); + __free_page(page2); pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n", __func__, ret, ppl_conf->mismatch_count, @@ -1031,6 +1101,7 @@ static int ppl_load(struct ppl_conf *ppl_conf) static void __ppl_exit_log(struct ppl_conf *ppl_conf) { clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); + clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); kfree(ppl_conf->child_logs); @@ -1099,6 +1170,22 @@ static int ppl_validate_rdev(struct md_rdev *rdev) return 0; } +static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) +{ + if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + + PPL_HEADER_SIZE) * 2) { + log->use_multippl = true; + set_bit(MD_HAS_MULTIPLE_PPLS, + &log->ppl_conf->mddev->flags); + log->entry_space = PPL_SPACE_SIZE; + } else { + log->use_multippl = false; + log->entry_space = (log->rdev->ppl.size << 9) - + PPL_HEADER_SIZE; + } + log->next_io_sector = rdev->ppl.sector; +} + int ppl_init_log(struct r5conf *conf) { struct ppl_conf *ppl_conf; @@ -1196,6 +1283,7 @@ int ppl_init_log(struct r5conf *conf) q = bdev_get_queue(rdev->bdev); if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) need_cache_flush = true; + ppl_init_child_log(log, rdev); } } @@ -1261,6 +1349,7 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) if (!ret) { log->rdev = rdev; ret = ppl_write_empty_header(log); + ppl_init_child_log(log, rdev); } } else { log->rdev = NULL; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0fc2748aaf95..928e24a07133 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -494,7 +494,6 @@ static int grow_buffers(struct stripe_head *sh, gfp_t gfp) return 0; } -static void raid5_build_block(struct stripe_head *sh, int i, int previous); static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, struct stripe_head *sh); @@ -530,7 +529,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) WARN_ON(1); } dev->flags = 0; - raid5_build_block(sh, i, previous); + dev->sector = raid5_compute_blocknr(sh, i, previous); } if (read_seqcount_retry(&conf->gen_lock, seq)) goto retry; @@ -812,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh spin_unlock(&head->batch_head->batch_lock); goto unlock_out; } + /* + * We must assign batch_head of this stripe within the + * batch_lock, otherwise clear_batch_ready of batch head + * stripe could clear BATCH_READY bit of this stripe and + * this stripe->batch_head doesn't get assigned, which + * could confuse clear_batch_ready for this stripe + */ + sh->batch_head = head->batch_head; /* * at this point, head's BATCH_READY could be cleared, but we @@ -819,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh */ list_add(&sh->batch_list, &head->batch_list); spin_unlock(&head->batch_head->batch_lock); - - sh->batch_head = head->batch_head; } else { head->batch_head = head; sh->batch_head = head->batch_head; @@ -1096,7 +1101,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) set_bit(STRIPE_IO_STARTED, &sh->state); - bi->bi_bdev = rdev->bdev; + bio_set_dev(bi, rdev->bdev); bio_set_op_attrs(bi, op, op_flags); bi->bi_end_io = op_is_write(op) ? raid5_end_write_request @@ -1145,7 +1150,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); if (conf->mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), + trace_block_bio_remap(bi->bi_disk->queue, bi, disk_devt(conf->mddev->gendisk), sh->dev[i].sector); if (should_defer && op_is_write(op)) @@ -1160,7 +1165,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) set_bit(STRIPE_IO_STARTED, &sh->state); - rbi->bi_bdev = rrdev->bdev; + bio_set_dev(rbi, rrdev->bdev); bio_set_op_attrs(rbi, op, op_flags); BUG_ON(!op_is_write(op)); rbi->bi_end_io = raid5_end_write_request; @@ -1193,7 +1198,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (op == REQ_OP_DISCARD) rbi->bi_vcnt = 0; if (conf->mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), + trace_block_bio_remap(rbi->bi_disk->queue, rbi, disk_devt(conf->mddev->gendisk), sh->dev[i].sector); if (should_defer && op_is_write(op)) @@ -2662,14 +2667,6 @@ static void raid5_end_write_request(struct bio *bi) raid5_release_stripe(sh->batch_head); } -static void raid5_build_block(struct stripe_head *sh, int i, int previous) -{ - struct r5dev *dev = &sh->dev[i]; - - dev->flags = 0; - dev->sector = raid5_compute_blocknr(sh, i, previous); -} - static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; @@ -4608,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DEGRADED)), + (1 << STRIPE_DEGRADED) | + (1 << STRIPE_ON_UNPLUG_LIST)), head_sh->state & (1 << STRIPE_INSYNC)); sh->check_state = head_sh->check_state; @@ -5092,10 +5090,12 @@ static int raid5_congested(struct mddev *mddev, int bits) static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { struct r5conf *conf = mddev->private; - sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); + sector_t sector = bio->bi_iter.bi_sector; unsigned int chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); + WARN_ON_ONCE(bio->bi_partno); + chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); return chunk_sectors >= ((sector & (chunk_sectors - 1)) + bio_sectors); @@ -5231,7 +5231,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); raid_bio->bi_next = (void*)rdev; - align_bi->bi_bdev = rdev->bdev; + bio_set_dev(align_bi, rdev->bdev); bio_clear_flag(align_bi, BIO_SEG_VALID); if (is_badblock(rdev, align_bi->bi_iter.bi_sector, @@ -5253,7 +5253,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) spin_unlock_irq(&conf->device_lock); if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), + trace_block_bio_remap(align_bi->bi_disk->queue, align_bi, disk_devt(mddev->gendisk), raid_bio->bi_iter.bi_sector); generic_make_request(align_bi); @@ -6235,6 +6235,10 @@ static void raid5_do_work(struct work_struct *work) spin_unlock_irq(&conf->device_lock); + flush_deferred_bios(conf); + + r5l_flush_stripe_to_raid(conf->log); + async_tx_issue_pending_all(); blk_finish_plug(&plug); @@ -6571,14 +6575,17 @@ static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf; - unsigned long new; + unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; - if (kstrtoul(page, 10, &new)) + if (kstrtouint(page, 10, &new)) + return -EINVAL; + /* 8192 should be big enough */ + if (new > 8192) return -EINVAL; err = mddev_lock(mddev); @@ -7241,6 +7248,7 @@ static int raid5_run(struct mddev *mddev) pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n", mdname(mddev)); clear_bit(MD_HAS_PPL, &mddev->flags); + clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); } if (mddev->private == NULL) diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 55d9c2b82b7e..edfe99b22d56 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -8,6 +8,11 @@ config CEC_CORE config CEC_NOTIFIER bool +config CEC_PIN + bool + +source "drivers/media/rc/Kconfig" + menuconfig MEDIA_SUPPORT tristate "Multimedia support" depends on HAS_IOMEM @@ -72,20 +77,6 @@ config MEDIA_SDR_SUPPORT Say Y when you have a software defined radio device. -config MEDIA_RC_SUPPORT - bool "Remote Controller support" - depends on INPUT - ---help--- - Enable support for Remote Controllers on Linux. This is - needed in order to support several video capture adapters, - standalone IR receivers/transmitters, and RF receivers. - - Enable this option if you have a video capture board even - if you don't need IR, as otherwise, you may not be able to - compile the driver for your adapter. - - Say Y when you have a TV or an IR device. - config MEDIA_CEC_SUPPORT bool "HDMI CEC support" ---help--- @@ -175,7 +166,6 @@ config TTPCI_EEPROM source "drivers/media/dvb-core/Kconfig" comment "Media drivers" -source "drivers/media/rc/Kconfig" # # V4L platform/mem2mem drivers diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile index eaf408e64669..3353c1741961 100644 --- a/drivers/media/cec/Makefile +++ b/drivers/media/cec/Makefile @@ -4,4 +4,8 @@ ifeq ($(CONFIG_CEC_NOTIFIER),y) cec-objs += cec-notifier.o endif +ifeq ($(CONFIG_CEC_PIN),y) + cec-objs += cec-pin.o +endif + obj-$(CONFIG_CEC_CORE) += cec.o diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index d596b601ff42..eed6c397d840 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -78,42 +78,62 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr * Queue a new event for this filehandle. If ts == 0, then set it * to the current time. * - * The two events that are currently defined do not need to keep track - * of intermediate events, so no actual queue of events is needed, - * instead just store the latest state and the total number of lost - * messages. - * - * Should new events be added in the future that require intermediate - * results to be queued as well, then a proper queue data structure is - * required. But until then, just keep it simple. + * We keep a queue of at most max_event events where max_event differs + * per event. If the queue becomes full, then drop the oldest event and + * keep track of how many events we've dropped. */ void cec_queue_event_fh(struct cec_fh *fh, const struct cec_event *new_ev, u64 ts) { - struct cec_event *ev = &fh->events[new_ev->event - 1]; + static const u8 max_events[CEC_NUM_EVENTS] = { + 1, 1, 64, 64, + }; + struct cec_event_entry *entry; + unsigned int ev_idx = new_ev->event - 1; + + if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events))) + return; if (ts == 0) ts = ktime_get_ns(); mutex_lock(&fh->lock); - if (new_ev->event == CEC_EVENT_LOST_MSGS && - fh->pending_events & (1 << new_ev->event)) { - /* - * If there is already a lost_msgs event, then just - * update the lost_msgs count. This effectively - * merges the old and new events into one. - */ - ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs; - goto unlock; - } + if (ev_idx < CEC_NUM_CORE_EVENTS) + entry = &fh->core_events[ev_idx]; + else + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + if (new_ev->event == CEC_EVENT_LOST_MSGS && + fh->queued_events[ev_idx]) { + entry->ev.lost_msgs.lost_msgs += + new_ev->lost_msgs.lost_msgs; + goto unlock; + } + entry->ev = *new_ev; + entry->ev.ts = ts; - /* - * Intermediate states are not interesting, so just - * overwrite any older event. - */ - *ev = *new_ev; - ev->ts = ts; - fh->pending_events |= 1 << new_ev->event; + if (fh->queued_events[ev_idx] < max_events[ev_idx]) { + /* Add new msg at the end of the queue */ + list_add_tail(&entry->list, &fh->events[ev_idx]); + fh->queued_events[ev_idx]++; + fh->total_queued_events++; + goto unlock; + } + + if (ev_idx >= CEC_NUM_CORE_EVENTS) { + list_add_tail(&entry->list, &fh->events[ev_idx]); + /* drop the oldest event */ + entry = list_first_entry(&fh->events[ev_idx], + struct cec_event_entry, list); + list_del(&entry->list); + kfree(entry); + } + } + /* Mark that events were lost */ + entry = list_first_entry_or_null(&fh->events[ev_idx], + struct cec_event_entry, list); + if (entry) + entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS; unlock: mutex_unlock(&fh->lock); @@ -133,47 +153,71 @@ static void cec_queue_event(struct cec_adapter *adap, mutex_unlock(&adap->devnode.lock); } +/* Notify userspace that the CEC pin changed state at the given time. */ +void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, ktime_t ts) +{ + struct cec_event ev = { + .event = is_high ? CEC_EVENT_PIN_CEC_HIGH : + CEC_EVENT_PIN_CEC_LOW, + }; + struct cec_fh *fh; + + mutex_lock(&adap->devnode.lock); + list_for_each_entry(fh, &adap->devnode.fhs, list) + if (fh->mode_follower == CEC_MODE_MONITOR_PIN) + cec_queue_event_fh(fh, &ev, ktime_to_ns(ts)); + mutex_unlock(&adap->devnode.lock); +} +EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event); + /* - * Queue a new message for this filehandle. If there is no more room - * in the queue, then send the LOST_MSGS event instead. + * Queue a new message for this filehandle. + * + * We keep a queue of at most CEC_MAX_MSG_RX_QUEUE_SZ messages. If the + * queue becomes full, then drop the oldest message and keep track + * of how many messages we've dropped. */ static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg) { - static const struct cec_event ev_lost_msg = { - .ts = 0, + static const struct cec_event ev_lost_msgs = { .event = CEC_EVENT_LOST_MSGS, .flags = 0, { - .lost_msgs.lost_msgs = 1, + .lost_msgs = { 1 }, }, }; struct cec_msg_entry *entry; mutex_lock(&fh->lock); entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - goto lost_msgs; + if (entry) { + entry->msg = *msg; + /* Add new msg at the end of the queue */ + list_add_tail(&entry->list, &fh->msgs); - entry->msg = *msg; - /* Add new msg at the end of the queue */ - list_add_tail(&entry->list, &fh->msgs); + if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) { + /* All is fine if there is enough room */ + fh->queued_msgs++; + mutex_unlock(&fh->lock); + wake_up_interruptible(&fh->wait); + return; + } + + /* + * if the message queue is full, then drop the oldest one and + * send a lost message event. + */ + entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&fh->lock); /* - * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ - * messages, drop the oldest one and send a lost message event. + * We lost a message, either because kmalloc failed or the queue + * was full. */ - if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) { - list_del(&entry->list); - goto lost_msgs; - } - fh->queued_msgs++; - mutex_unlock(&fh->lock); - wake_up_interruptible(&fh->wait); - return; - -lost_msgs: - mutex_unlock(&fh->lock); - cec_queue_event_fh(fh, &ev_lost_msg, 0); + cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns()); } /* @@ -394,13 +438,17 @@ int cec_thread_func(void *_adap) if (adap->transmitting && timeout) { /* - * If we timeout, then log that. This really shouldn't - * happen and is an indication of a faulty CEC adapter - * driver, or the CEC bus is in some weird state. + * If we timeout, then log that. Normally this does + * not happen and it is an indication of a faulty CEC + * adapter driver, or the CEC bus is in some weird + * state. On rare occasions it can happen if there is + * so much traffic on the bus that the adapter was + * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - dprintk(0, "%s: message %*ph timed out!\n", __func__, + dprintk(1, "%s: message %*ph timed out\n", __func__, adap->transmitting->msg.len, adap->transmitting->msg.msg); + adap->tx_timeouts++; /* Just give up on this. */ cec_data_cancel(adap->transmitting); goto unlock; @@ -467,14 +515,19 @@ int cec_thread_func(void *_adap) /* * Called by the CEC adapter if a transmit finished. */ -void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, - u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt) +void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, + u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, + u8 error_cnt, ktime_t ts) { struct cec_data *data; struct cec_msg *msg; - u64 ts = ktime_get_ns(); + unsigned int attempts_made = arb_lost_cnt + nack_cnt + + low_drive_cnt + error_cnt; dprintk(2, "%s: status %02x\n", __func__, status); + if (attempts_made < 1) + attempts_made = 1; + mutex_lock(&adap->lock); data = adap->transmitting; if (!data) { @@ -492,7 +545,7 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, /* Drivers must fill in the status! */ WARN_ON(status == 0); - msg->tx_ts = ts; + msg->tx_ts = ktime_to_ns(ts); msg->tx_status |= status; msg->tx_arb_lost_cnt += arb_lost_cnt; msg->tx_nack_cnt += nack_cnt; @@ -507,10 +560,10 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, * the hardware didn't signal that it retried itself (by setting * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves. */ - if (data->attempts > 1 && + if (data->attempts > attempts_made && !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) { /* Retry this message */ - data->attempts--; + data->attempts -= attempts_made; if (msg->timeout) dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n", msg->len, msg->msg, data->attempts, msg->reply); @@ -555,25 +608,26 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, unlock: mutex_unlock(&adap->lock); } -EXPORT_SYMBOL_GPL(cec_transmit_done); +EXPORT_SYMBOL_GPL(cec_transmit_done_ts); -void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) +void cec_transmit_attempt_done_ts(struct cec_adapter *adap, + u8 status, ktime_t ts) { switch (status & ~CEC_TX_STATUS_MAX_RETRIES) { case CEC_TX_STATUS_OK: - cec_transmit_done(adap, status, 0, 0, 0, 0); + cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts); return; case CEC_TX_STATUS_ARB_LOST: - cec_transmit_done(adap, status, 1, 0, 0, 0); + cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts); return; case CEC_TX_STATUS_NACK: - cec_transmit_done(adap, status, 0, 1, 0, 0); + cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts); return; case CEC_TX_STATUS_LOW_DRIVE: - cec_transmit_done(adap, status, 0, 0, 1, 0); + cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts); return; case CEC_TX_STATUS_ERROR: - cec_transmit_done(adap, status, 0, 0, 0, 1); + cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts); return; default: /* Should never happen */ @@ -581,7 +635,7 @@ void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) return; } } -EXPORT_SYMBOL_GPL(cec_transmit_attempt_done); +EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts); /* * Called when waiting for a reply times out. @@ -630,9 +684,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, msg->tx_nack_cnt = 0; msg->tx_low_drive_cnt = 0; msg->tx_error_cnt = 0; - msg->sequence = ++adap->sequence; - if (!msg->sequence) - msg->sequence = ++adap->sequence; + msg->sequence = 0; if (msg->reply && msg->timeout == 0) { /* Make sure the timeout isn't 0. */ @@ -671,6 +723,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, msg->tx_status = CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES; msg->tx_nack_cnt = 1; + msg->sequence = ++adap->sequence; + if (!msg->sequence) + msg->sequence = ++adap->sequence; return 0; } } @@ -705,6 +760,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, if (!data) return -ENOMEM; + msg->sequence = ++adap->sequence; + if (!msg->sequence) + msg->sequence = ++adap->sequence; + if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) { msg->msg[2] = adap->phys_addr >> 8; msg->msg[3] = adap->phys_addr & 0xff; @@ -712,7 +771,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, if (msg->timeout) dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n", - __func__, msg->len, msg->msg, msg->reply, !block ? ", nb" : ""); + __func__, msg->len, msg->msg, msg->reply, + !block ? ", nb" : ""); else dprintk(2, "%s: %*ph%s\n", __func__, msg->len, msg->msg, !block ? " (nb)" : ""); @@ -909,7 +969,8 @@ static const u8 cec_msg_size[256] = { }; /* Called by the CEC adapter if a message is received */ -void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg) +void cec_received_msg_ts(struct cec_adapter *adap, + struct cec_msg *msg, ktime_t ts) { struct cec_data *data; u8 msg_init = cec_msg_initiator(msg); @@ -937,7 +998,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg) cec_has_log_addr(adap, msg_init)) return; - msg->rx_ts = ktime_get_ns(); + msg->rx_ts = ktime_to_ns(ts); msg->rx_status = CEC_RX_STATUS_OK; msg->sequence = msg->reply = msg->timeout = 0; msg->tx_status = 0; @@ -1102,7 +1163,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg) */ cec_receive_notify(adap, msg, is_reply); } -EXPORT_SYMBOL_GPL(cec_received_msg); +EXPORT_SYMBOL_GPL(cec_received_msg_ts); /* Logical Address Handling */ @@ -1390,7 +1451,9 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block) */ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) { - if (phys_addr == adap->phys_addr || adap->devnode.unregistered) + if (phys_addr == adap->phys_addr) + return; + if (phys_addr != CEC_PHYS_ADDR_INVALID && adap->devnode.unregistered) return; dprintk(1, "new physical address %x.%x.%x.%x\n", @@ -1471,8 +1534,13 @@ int __cec_s_log_addrs(struct cec_adapter *adap, return -ENODEV; if (!log_addrs || log_addrs->num_log_addrs == 0) { - adap->log_addrs.num_log_addrs = 0; cec_adap_unconfigure(adap); + adap->log_addrs.num_log_addrs = 0; + for (i = 0; i < CEC_MAX_LOG_ADDRS; i++) + adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID; + adap->log_addrs.osd_name[0] = '\0'; + adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE; + adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; return 0; } @@ -1704,6 +1772,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, int la_idx = cec_log_addr2idx(adap, dest_laddr); bool from_unregistered = init_laddr == 0xf; struct cec_msg tx_cec_msg = { }; +#ifdef CONFIG_MEDIA_CEC_RC + int scancode; +#endif dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); @@ -1792,11 +1863,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, */ case 0x60: if (msg->len == 2) - rc_keydown(adap->rc, RC_TYPE_CEC, - msg->msg[2], 0); + scancode = msg->msg[2]; else - rc_keydown(adap->rc, RC_TYPE_CEC, - msg->msg[2] << 8 | msg->msg[3], 0); + scancode = msg->msg[2] << 8 | msg->msg[3]; break; /* * Other function messages that are not handled. @@ -1809,11 +1878,54 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, */ case 0x56: case 0x57: case 0x67: case 0x68: case 0x69: case 0x6a: + scancode = -1; break; default: - rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0); + scancode = msg->msg[2]; break; } + + /* Was repeating, but keypress timed out */ + if (adap->rc_repeating && !adap->rc->keypressed) { + adap->rc_repeating = false; + adap->rc_last_scancode = -1; + } + /* Different keypress from last time, ends repeat mode */ + if (adap->rc_last_scancode != scancode) { + rc_keyup(adap->rc); + adap->rc_repeating = false; + } + /* We can't handle this scancode */ + if (scancode < 0) { + adap->rc_last_scancode = scancode; + break; + } + + /* Send key press */ + rc_keydown(adap->rc, RC_PROTO_CEC, scancode, 0); + + /* When in repeating mode, we're done */ + if (adap->rc_repeating) + break; + + /* + * We are not repeating, but the new scancode is + * the same as the last one, and this second key press is + * within 550 ms (the 'Follower Safety Timeout') from the + * previous key press, so we now enable the repeating mode. + */ + if (adap->rc_last_scancode == scancode && + msg->rx_ts - adap->rc_last_keypress < 550 * NSEC_PER_MSEC) { + adap->rc_repeating = true; + break; + } + /* + * Not in repeating mode, so avoid triggering repeat mode + * by calling keyup. + */ + rc_keyup(adap->rc); + adap->rc_last_scancode = scancode; + adap->rc_last_keypress = msg->rx_ts; #endif break; @@ -1823,6 +1935,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, break; #ifdef CONFIG_MEDIA_CEC_RC rc_keyup(adap->rc); + adap->rc_repeating = false; + adap->rc_last_scancode = -1; #endif break; @@ -1941,6 +2055,11 @@ int cec_adap_status(struct seq_file *file, void *priv) if (adap->monitor_all_cnt) seq_printf(file, "file handles in Monitor All mode: %u\n", adap->monitor_all_cnt); + if (adap->tx_timeouts) { + seq_printf(file, "transmit timeouts: %u\n", + adap->tx_timeouts); + adap->tx_timeouts = 0; + } data = adap->transmitting; if (data) seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n", diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index f7eb4c54a354..a079f7fe018c 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -30,6 +30,7 @@ #include #include +#include #include "cec-priv.h" static inline struct cec_devnode *cec_devnode_data(struct file *filp) @@ -57,7 +58,7 @@ static unsigned int cec_poll(struct file *filp, res |= POLLOUT | POLLWRNORM; if (fh->queued_msgs) res |= POLLIN | POLLRDNORM; - if (fh->pending_events) + if (fh->total_queued_events) res |= POLLPRI; poll_wait(filp, &fh->wait, poll); mutex_unlock(&adap->lock); @@ -289,15 +290,17 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_event __user *parg) { - struct cec_event *ev = NULL; + struct cec_event_entry *ev = NULL; u64 ts = ~0ULL; unsigned int i; + unsigned int ev_idx; long err = 0; mutex_lock(&fh->lock); - while (!fh->pending_events && block) { + while (!fh->total_queued_events && block) { mutex_unlock(&fh->lock); - err = wait_event_interruptible(fh->wait, fh->pending_events); + err = wait_event_interruptible(fh->wait, + fh->total_queued_events); if (err) return err; mutex_lock(&fh->lock); @@ -305,23 +308,29 @@ static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh, /* Find the oldest event */ for (i = 0; i < CEC_NUM_EVENTS; i++) { - if (fh->pending_events & (1 << (i + 1)) && - fh->events[i].ts <= ts) { - ev = &fh->events[i]; - ts = ev->ts; + struct cec_event_entry *entry = + list_first_entry_or_null(&fh->events[i], + struct cec_event_entry, list); + + if (entry && entry->ev.ts <= ts) { + ev = entry; + ev_idx = i; + ts = ev->ev.ts; } } + if (!ev) { err = -EAGAIN; goto unlock; } + list_del(&ev->list); - if (copy_to_user(parg, ev, sizeof(*ev))) { + if (copy_to_user(parg, &ev->ev, sizeof(ev->ev))) err = -EFAULT; - goto unlock; - } - - fh->pending_events &= ~(1 << ev->event); + if (ev_idx >= CEC_NUM_CORE_EVENTS) + kfree(ev); + fh->queued_events[ev_idx]--; + fh->total_queued_events--; unlock: mutex_unlock(&fh->lock); @@ -348,33 +357,50 @@ static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh, if (copy_from_user(&mode, parg, sizeof(mode))) return -EFAULT; - if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) + if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) { + dprintk(1, "%s: invalid mode bits set\n", __func__); return -EINVAL; + } mode_initiator = mode & CEC_MODE_INITIATOR_MSK; mode_follower = mode & CEC_MODE_FOLLOWER_MSK; if (mode_initiator > CEC_MODE_EXCL_INITIATOR || - mode_follower > CEC_MODE_MONITOR_ALL) + mode_follower > CEC_MODE_MONITOR_ALL) { + dprintk(1, "%s: unknown mode\n", __func__); return -EINVAL; + } if (mode_follower == CEC_MODE_MONITOR_ALL && - !(adap->capabilities & CEC_CAP_MONITOR_ALL)) + !(adap->capabilities & CEC_CAP_MONITOR_ALL)) { + dprintk(1, "%s: MONITOR_ALL not supported\n", __func__); return -EINVAL; + } + + if (mode_follower == CEC_MODE_MONITOR_PIN && + !(adap->capabilities & CEC_CAP_MONITOR_PIN)) { + dprintk(1, "%s: MONITOR_PIN not supported\n", __func__); + return -EINVAL; + } /* Follower modes should always be able to send CEC messages */ if ((mode_initiator == CEC_MODE_NO_INITIATOR || !(adap->capabilities & CEC_CAP_TRANSMIT)) && mode_follower >= CEC_MODE_FOLLOWER && - mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) + mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { + dprintk(1, "%s: cannot transmit\n", __func__); return -EINVAL; + } /* Monitor modes require CEC_MODE_NO_INITIATOR */ - if (mode_initiator && mode_follower >= CEC_MODE_MONITOR) + if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) { + dprintk(1, "%s: monitor modes require NO_INITIATOR\n", + __func__); return -EINVAL; + } /* Monitor modes require CAP_NET_ADMIN */ - if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN)) + if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&adap->lock); @@ -413,8 +439,20 @@ static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh, if (fh->mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt--; + if (fh->mode_follower == CEC_MODE_MONITOR_PIN) + adap->monitor_pin_cnt--; if (mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt++; + if (mode_follower == CEC_MODE_MONITOR_PIN) { + struct cec_event ev = { + .flags = CEC_EVENT_FL_INITIAL_STATE, + }; + + ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH : + CEC_EVENT_PIN_CEC_LOW; + cec_queue_event_fh(fh, &ev, 0); + adap->monitor_pin_cnt++; + } if (mode_follower == CEC_MODE_EXCL_FOLLOWER || mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { adap->passthrough = @@ -495,6 +533,7 @@ static int cec_open(struct inode *inode, struct file *filp) .event = CEC_EVENT_STATE_CHANGE, .flags = CEC_EVENT_FL_INITIAL_STATE, }; + unsigned int i; int err; if (!fh) @@ -502,6 +541,8 @@ static int cec_open(struct inode *inode, struct file *filp) INIT_LIST_HEAD(&fh->msgs); INIT_LIST_HEAD(&fh->xfer_list); + for (i = 0; i < CEC_NUM_EVENTS; i++) + INIT_LIST_HEAD(&fh->events[i]); mutex_init(&fh->lock); init_waitqueue_head(&fh->wait); @@ -544,6 +585,7 @@ static int cec_release(struct inode *inode, struct file *filp) struct cec_devnode *devnode = cec_devnode_data(filp); struct cec_adapter *adap = to_cec_adapter(devnode); struct cec_fh *fh = filp->private_data; + unsigned int i; mutex_lock(&adap->lock); if (adap->cec_initiator == fh) @@ -554,6 +596,8 @@ static int cec_release(struct inode *inode, struct file *filp) } if (fh->mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt--; + if (fh->mode_follower == CEC_MODE_MONITOR_PIN) + adap->monitor_pin_cnt--; if (fh->mode_follower == CEC_MODE_MONITOR_ALL) cec_monitor_all_cnt_dec(adap); mutex_unlock(&adap->lock); @@ -585,6 +629,16 @@ static int cec_release(struct inode *inode, struct file *filp) list_del(&entry->list); kfree(entry); } + for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) { + while (!list_empty(&fh->events[i])) { + struct cec_event_entry *entry = + list_first_entry(&fh->events[i], + struct cec_event_entry, list); + + list_del(&entry->list); + kfree(entry); + } + } kfree(fh); cec_put_device(devnode); diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c index b516d599d6c4..648136e552d5 100644 --- a/drivers/media/cec/cec-core.c +++ b/drivers/media/cec/cec-core.c @@ -227,6 +227,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, return ERR_PTR(-ENOMEM); strlcpy(adap->name, name, sizeof(adap->name)); adap->phys_addr = CEC_PHYS_ADDR_INVALID; + adap->cec_pin_is_high = true; adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE; adap->capabilities = caps; @@ -263,22 +264,24 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, return ERR_PTR(-ENOMEM); } - snprintf(adap->input_name, sizeof(adap->input_name), + snprintf(adap->device_name, sizeof(adap->device_name), "RC for %s", name); snprintf(adap->input_phys, sizeof(adap->input_phys), "%s/input0", name); - adap->rc->input_name = adap->input_name; + adap->rc->device_name = adap->device_name; adap->rc->input_phys = adap->input_phys; adap->rc->input_id.bustype = BUS_CEC; adap->rc->input_id.vendor = 0; adap->rc->input_id.product = 0; adap->rc->input_id.version = 1; adap->rc->driver_name = CEC_NAME; - adap->rc->allowed_protocols = RC_BIT_CEC; + adap->rc->allowed_protocols = RC_PROTO_BIT_CEC; + adap->rc->enabled_protocols = RC_PROTO_BIT_CEC; adap->rc->priv = adap; adap->rc->map_name = RC_MAP_CEC; adap->rc->timeout = MS_TO_NS(100); + adap->rc_last_scancode = -1; #endif return adap; } @@ -310,6 +313,17 @@ int cec_register_adapter(struct cec_adapter *adap, adap->rc = NULL; return res; } + /* + * The REP_DELAY for CEC is really the time between the initial + * 'User Control Pressed' message and the second. The first + * keypress is always seen as non-repeating, the second + * (provided it has the same UI Command) will start the 'Press + * and Hold' (aka repeat) behavior. By setting REP_DELAY to the + * same value as REP_PERIOD the expected CEC behavior is + * reproduced. + */ + adap->rc->input_dev->rep[REP_DELAY] = + adap->rc->input_dev->rep[REP_PERIOD]; } #endif @@ -374,6 +388,8 @@ void cec_delete_adapter(struct cec_adapter *adap) kthread_stop(adap->kthread); if (adap->kthread_config) kthread_stop(adap->kthread_config); + if (adap->ops->adap_free) + adap->ops->adap_free(adap); #ifdef CONFIG_MEDIA_CEC_RC rc_free_device(adap->rc); #endif @@ -386,11 +402,8 @@ EXPORT_SYMBOL_GPL(cec_delete_adapter); */ static int __init cec_devnode_init(void) { - int ret; + int ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES, CEC_NAME); - pr_info("Linux cec interface: v0.10\n"); - ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES, - CEC_NAME); if (ret < 0) { pr_warn("cec: unable to allocate major\n"); return ret; diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c new file mode 100644 index 000000000000..c003b8eac617 --- /dev/null +++ b/drivers/media/cec/cec-pin.c @@ -0,0 +1,802 @@ +/* + * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include + +/* All timings are in microseconds */ + +/* start bit timings */ +#define CEC_TIM_START_BIT_LOW 3700 +#define CEC_TIM_START_BIT_LOW_MIN 3500 +#define CEC_TIM_START_BIT_LOW_MAX 3900 +#define CEC_TIM_START_BIT_TOTAL 4500 +#define CEC_TIM_START_BIT_TOTAL_MIN 4300 +#define CEC_TIM_START_BIT_TOTAL_MAX 4700 + +/* data bit timings */ +#define CEC_TIM_DATA_BIT_0_LOW 1500 +#define CEC_TIM_DATA_BIT_0_LOW_MIN 1300 +#define CEC_TIM_DATA_BIT_0_LOW_MAX 1700 +#define CEC_TIM_DATA_BIT_1_LOW 600 +#define CEC_TIM_DATA_BIT_1_LOW_MIN 400 +#define CEC_TIM_DATA_BIT_1_LOW_MAX 800 +#define CEC_TIM_DATA_BIT_TOTAL 2400 +#define CEC_TIM_DATA_BIT_TOTAL_MIN 2050 +#define CEC_TIM_DATA_BIT_TOTAL_MAX 2750 +/* earliest safe time to sample the bit state */ +#define CEC_TIM_DATA_BIT_SAMPLE 850 +/* earliest time the bit is back to 1 (T7 + 50) */ +#define CEC_TIM_DATA_BIT_HIGH 1750 + +/* when idle, sample once per millisecond */ +#define CEC_TIM_IDLE_SAMPLE 1000 +/* when processing the start bit, sample twice per millisecond */ +#define CEC_TIM_START_BIT_SAMPLE 500 +/* when polling for a state change, sample once every 50 micoseconds */ +#define CEC_TIM_SAMPLE 50 + +#define CEC_TIM_LOW_DRIVE_ERROR (1.5 * CEC_TIM_DATA_BIT_TOTAL) + +struct cec_state { + const char * const name; + unsigned int usecs; +}; + +static const struct cec_state states[CEC_PIN_STATES] = { + { "Off", 0 }, + { "Idle", CEC_TIM_IDLE_SAMPLE }, + { "Tx Wait", CEC_TIM_SAMPLE }, + { "Tx Wait for High", CEC_TIM_IDLE_SAMPLE }, + { "Tx Start Bit Low", CEC_TIM_START_BIT_LOW }, + { "Tx Start Bit High", CEC_TIM_START_BIT_TOTAL - CEC_TIM_START_BIT_LOW }, + { "Tx Data 0 Low", CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 0 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_0_LOW }, + { "Tx Data 1 Low", CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 Pre Sample", CEC_TIM_DATA_BIT_SAMPLE - CEC_TIM_DATA_BIT_1_LOW }, + { "Tx Data 1 Post Sample", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_SAMPLE }, + { "Rx Start Bit Low", CEC_TIM_SAMPLE }, + { "Rx Start Bit High", CEC_TIM_SAMPLE }, + { "Rx Data Sample", CEC_TIM_DATA_BIT_SAMPLE }, + { "Rx Data Post Sample", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_SAMPLE }, + { "Rx Data High", CEC_TIM_SAMPLE }, + { "Rx Ack Low", CEC_TIM_DATA_BIT_0_LOW }, + { "Rx Ack Low Post", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_0_LOW }, + { "Rx Ack High Post", CEC_TIM_DATA_BIT_HIGH }, + { "Rx Ack Finish", CEC_TIM_DATA_BIT_TOTAL_MIN - CEC_TIM_DATA_BIT_HIGH }, + { "Rx Low Drive", CEC_TIM_LOW_DRIVE_ERROR }, + { "Rx Irq", 0 }, +}; + +static void cec_pin_update(struct cec_pin *pin, bool v, bool force) +{ + if (!force && v == pin->adap->cec_pin_is_high) + return; + + pin->adap->cec_pin_is_high = v; + if (atomic_read(&pin->work_pin_events) < CEC_NUM_PIN_EVENTS) { + pin->work_pin_is_high[pin->work_pin_events_wr] = v; + pin->work_pin_ts[pin->work_pin_events_wr] = ktime_get(); + pin->work_pin_events_wr = + (pin->work_pin_events_wr + 1) % CEC_NUM_PIN_EVENTS; + atomic_inc(&pin->work_pin_events); + } + wake_up_interruptible(&pin->kthread_waitq); +} + +static bool cec_pin_read(struct cec_pin *pin) +{ + bool v = pin->ops->read(pin->adap); + + cec_pin_update(pin, v, false); + return v; +} + +static void cec_pin_low(struct cec_pin *pin) +{ + pin->ops->low(pin->adap); + cec_pin_update(pin, false, false); +} + +static bool cec_pin_high(struct cec_pin *pin) +{ + pin->ops->high(pin->adap); + return cec_pin_read(pin); +} + +static void cec_pin_to_idle(struct cec_pin *pin) +{ + /* + * Reset all status fields, release the bus and + * go to idle state. + */ + pin->rx_bit = pin->tx_bit = 0; + pin->rx_msg.len = 0; + memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg)); + pin->state = CEC_ST_IDLE; + pin->ts = 0; +} + +/* + * Handle Transmit-related states + * + * Basic state changes when transmitting: + * + * Idle -> Tx Wait (waiting for the end of signal free time) -> + * Tx Start Bit Low -> Tx Start Bit High -> + * + * Regular data bits + EOM: + * Tx Data 0 Low -> Tx Data 0 High -> + * or: + * Tx Data 1 Low -> Tx Data 1 High -> + * + * First 4 data bits or Ack bit: + * Tx Data 0 Low -> Tx Data 0 High -> + * or: + * Tx Data 1 Low -> Tx Data 1 High -> Tx Data 1 Pre Sample -> + * Tx Data 1 Post Sample -> + * + * After the last Ack go to Idle. + * + * If it detects a Low Drive condition then: + * Tx Wait For High -> Idle + * + * If it loses arbitration, then it switches to state Rx Data Post Sample. + */ +static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts) +{ + bool v; + bool is_ack_bit, ack; + + switch (pin->state) { + case CEC_ST_TX_WAIT_FOR_HIGH: + if (cec_pin_read(pin)) + cec_pin_to_idle(pin); + break; + + case CEC_ST_TX_START_BIT_LOW: + pin->state = CEC_ST_TX_START_BIT_HIGH; + /* Generate start bit */ + cec_pin_high(pin); + break; + + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE: + /* If the read value is 1, then all is OK */ + if (!cec_pin_read(pin)) { + /* + * It's 0, so someone detected an error and pulled the + * line low for 1.5 times the nominal bit period. + */ + pin->tx_msg.len = 0; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE; + pin->state = CEC_ST_TX_WAIT_FOR_HIGH; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + if (pin->tx_nacked) { + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_NACK; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + /* fall through */ + case CEC_ST_TX_DATA_BIT_0_HIGH: + case CEC_ST_TX_DATA_BIT_1_HIGH: + pin->tx_bit++; + /* fall through */ + case CEC_ST_TX_START_BIT_HIGH: + if (pin->tx_bit / 10 >= pin->tx_msg.len) { + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_OK; + wake_up_interruptible(&pin->kthread_waitq); + break; + } + + switch (pin->tx_bit % 10) { + default: + v = pin->tx_msg.msg[pin->tx_bit / 10] & + (1 << (7 - (pin->tx_bit % 10))); + pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW : + CEC_ST_TX_DATA_BIT_0_LOW; + break; + case 8: + v = pin->tx_bit / 10 == pin->tx_msg.len - 1; + pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW : + CEC_ST_TX_DATA_BIT_0_LOW; + break; + case 9: + pin->state = CEC_ST_TX_DATA_BIT_1_LOW; + break; + } + cec_pin_low(pin); + break; + + case CEC_ST_TX_DATA_BIT_0_LOW: + case CEC_ST_TX_DATA_BIT_1_LOW: + v = pin->state == CEC_ST_TX_DATA_BIT_1_LOW; + pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH : + CEC_ST_TX_DATA_BIT_0_HIGH; + is_ack_bit = pin->tx_bit % 10 == 9; + if (v && (pin->tx_bit < 4 || is_ack_bit)) + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE; + cec_pin_high(pin); + break; + + case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE: + /* Read the CEC value at the sample time */ + v = cec_pin_read(pin); + is_ack_bit = pin->tx_bit % 10 == 9; + /* + * If v == 0 and we're within the first 4 bits + * of the initiator, then someone else started + * transmitting and we lost the arbitration + * (i.e. the logical address of the other + * transmitter has more leading 0 bits in the + * initiator). + */ + if (!v && !is_ack_bit) { + pin->tx_msg.len = 0; + pin->work_tx_ts = ts; + pin->work_tx_status = CEC_TX_STATUS_ARB_LOST; + wake_up_interruptible(&pin->kthread_waitq); + pin->rx_bit = pin->tx_bit; + pin->tx_bit = 0; + memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg)); + pin->rx_msg.msg[0] = pin->tx_msg.msg[0]; + pin->rx_msg.msg[0] &= ~(1 << (7 - pin->rx_bit)); + pin->rx_msg.len = 0; + pin->state = CEC_ST_RX_DATA_POST_SAMPLE; + pin->rx_bit++; + break; + } + pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE; + if (!is_ack_bit) + break; + /* Was the message ACKed? */ + ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v; + if (!ack) { + /* + * Note: the CEC spec is ambiguous regarding + * what action to take when a NACK appears + * before the last byte of the payload was + * transmitted: either stop transmitting + * immediately, or wait until the last byte + * was transmitted. + * + * Most CEC implementations appear to stop + * immediately, and that's what we do here + * as well. + */ + pin->tx_nacked = true; + } + break; + + default: + break; + } +} + +/* + * Handle Receive-related states + * + * Basic state changes when receiving: + * + * Rx Start Bit Low -> Rx Start Bit High -> + * Regular data bits + EOM: + * Rx Data Sample -> Rx Data Post Sample -> Rx Data High -> + * Ack bit 0: + * Rx Ack Low -> Rx Ack Low Post -> Rx Data High -> + * Ack bit 1: + * Rx Ack High Post -> Rx Data High -> + * Ack bit 0 && EOM: + * Rx Ack Low -> Rx Ack Low Post -> Rx Ack Finish -> Idle + */ +static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts) +{ + s32 delta; + bool v; + bool ack; + bool bcast, for_us; + u8 dest; + + switch (pin->state) { + /* Receive states */ + case CEC_ST_RX_START_BIT_LOW: + v = cec_pin_read(pin); + if (!v) + break; + pin->state = CEC_ST_RX_START_BIT_HIGH; + delta = ktime_us_delta(ts, pin->ts); + pin->ts = ts; + /* Start bit low is too short, go back to idle */ + if (delta < CEC_TIM_START_BIT_LOW_MIN - + CEC_TIM_IDLE_SAMPLE) { + cec_pin_to_idle(pin); + } + break; + + case CEC_ST_RX_START_BIT_HIGH: + v = cec_pin_read(pin); + delta = ktime_us_delta(ts, pin->ts); + if (v && delta > CEC_TIM_START_BIT_TOTAL_MAX - + CEC_TIM_START_BIT_LOW_MIN) { + cec_pin_to_idle(pin); + break; + } + if (v) + break; + pin->state = CEC_ST_RX_DATA_SAMPLE; + pin->ts = ts; + pin->rx_eom = false; + break; + + case CEC_ST_RX_DATA_SAMPLE: + v = cec_pin_read(pin); + pin->state = CEC_ST_RX_DATA_POST_SAMPLE; + switch (pin->rx_bit % 10) { + default: + if (pin->rx_bit / 10 < CEC_MAX_MSG_SIZE) + pin->rx_msg.msg[pin->rx_bit / 10] |= + v << (7 - (pin->rx_bit % 10)); + break; + case 8: + pin->rx_eom = v; + pin->rx_msg.len = pin->rx_bit / 10 + 1; + break; + case 9: + break; + } + pin->rx_bit++; + break; + + case CEC_ST_RX_DATA_POST_SAMPLE: + pin->state = CEC_ST_RX_DATA_HIGH; + break; + + case CEC_ST_RX_DATA_HIGH: + v = cec_pin_read(pin); + delta = ktime_us_delta(ts, pin->ts); + if (v && delta > CEC_TIM_DATA_BIT_TOTAL_MAX) { + cec_pin_to_idle(pin); + break; + } + if (v) + break; + /* + * Go to low drive state when the total bit time is + * too short. + */ + if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN) { + cec_pin_low(pin); + pin->state = CEC_ST_LOW_DRIVE; + break; + } + pin->ts = ts; + if (pin->rx_bit % 10 != 9) { + pin->state = CEC_ST_RX_DATA_SAMPLE; + break; + } + + dest = cec_msg_destination(&pin->rx_msg); + bcast = dest == CEC_LOG_ADDR_BROADCAST; + /* for_us == broadcast or directed to us */ + for_us = bcast || (pin->la_mask & (1 << dest)); + /* ACK bit value */ + ack = bcast ? 1 : !for_us; + + if (ack) { + /* No need to write to the bus, just wait */ + pin->state = CEC_ST_RX_ACK_HIGH_POST; + break; + } + cec_pin_low(pin); + pin->state = CEC_ST_RX_ACK_LOW; + break; + + case CEC_ST_RX_ACK_LOW: + cec_pin_high(pin); + pin->state = CEC_ST_RX_ACK_LOW_POST; + break; + + case CEC_ST_RX_ACK_LOW_POST: + case CEC_ST_RX_ACK_HIGH_POST: + v = cec_pin_read(pin); + if (v && pin->rx_eom) { + pin->work_rx_msg = pin->rx_msg; + pin->work_rx_msg.rx_ts = ts; + wake_up_interruptible(&pin->kthread_waitq); + pin->ts = ts; + pin->state = CEC_ST_RX_ACK_FINISH; + break; + } + pin->rx_bit++; + pin->state = CEC_ST_RX_DATA_HIGH; + break; + + case CEC_ST_RX_ACK_FINISH: + cec_pin_to_idle(pin); + break; + + default: + break; + } +} + +/* + * Main timer function + * + */ +static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer) +{ + struct cec_pin *pin = container_of(timer, struct cec_pin, timer); + struct cec_adapter *adap = pin->adap; + ktime_t ts; + s32 delta; + + ts = ktime_get(); + if (pin->timer_ts) { + delta = ktime_us_delta(ts, pin->timer_ts); + pin->timer_cnt++; + if (delta > 100 && pin->state != CEC_ST_IDLE) { + /* Keep track of timer overruns */ + pin->timer_sum_overrun += delta; + pin->timer_100ms_overruns++; + if (delta > 300) + pin->timer_300ms_overruns++; + if (delta > pin->timer_max_overrun) + pin->timer_max_overrun = delta; + } + } + if (adap->monitor_pin_cnt) + cec_pin_read(pin); + + if (pin->wait_usecs) { + /* + * If we are monitoring the pin, then we have to + * sample at regular intervals. + */ + if (pin->wait_usecs > 150) { + pin->wait_usecs -= 100; + pin->timer_ts = ktime_add_us(ts, 100); + hrtimer_forward_now(timer, 100000); + return HRTIMER_RESTART; + } + if (pin->wait_usecs > 100) { + pin->wait_usecs /= 2; + pin->timer_ts = ktime_add_us(ts, pin->wait_usecs); + hrtimer_forward_now(timer, pin->wait_usecs * 1000); + return HRTIMER_RESTART; + } + pin->timer_ts = ktime_add_us(ts, pin->wait_usecs); + hrtimer_forward_now(timer, pin->wait_usecs * 1000); + pin->wait_usecs = 0; + return HRTIMER_RESTART; + } + + switch (pin->state) { + /* Transmit states */ + case CEC_ST_TX_WAIT_FOR_HIGH: + case CEC_ST_TX_START_BIT_LOW: + case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE: + case CEC_ST_TX_DATA_BIT_0_HIGH: + case CEC_ST_TX_DATA_BIT_1_HIGH: + case CEC_ST_TX_START_BIT_HIGH: + case CEC_ST_TX_DATA_BIT_0_LOW: + case CEC_ST_TX_DATA_BIT_1_LOW: + case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE: + cec_pin_tx_states(pin, ts); + break; + + /* Receive states */ + case CEC_ST_RX_START_BIT_LOW: + case CEC_ST_RX_START_BIT_HIGH: + case CEC_ST_RX_DATA_SAMPLE: + case CEC_ST_RX_DATA_POST_SAMPLE: + case CEC_ST_RX_DATA_HIGH: + case CEC_ST_RX_ACK_LOW: + case CEC_ST_RX_ACK_LOW_POST: + case CEC_ST_RX_ACK_HIGH_POST: + case CEC_ST_RX_ACK_FINISH: + cec_pin_rx_states(pin, ts); + break; + + case CEC_ST_IDLE: + case CEC_ST_TX_WAIT: + if (!cec_pin_high(pin)) { + /* Start bit, switch to receive state */ + pin->ts = ts; + pin->state = CEC_ST_RX_START_BIT_LOW; + break; + } + if (pin->ts == 0) + pin->ts = ts; + if (pin->tx_msg.len) { + /* + * Check if the bus has been free for long enough + * so we can kick off the pending transmit. + */ + delta = ktime_us_delta(ts, pin->ts); + if (delta / CEC_TIM_DATA_BIT_TOTAL > + pin->tx_signal_free_time) { + pin->tx_nacked = false; + pin->state = CEC_ST_TX_START_BIT_LOW; + /* Generate start bit */ + cec_pin_low(pin); + break; + } + if (delta / CEC_TIM_DATA_BIT_TOTAL > + pin->tx_signal_free_time - 1) + pin->state = CEC_ST_TX_WAIT; + break; + } + if (pin->state != CEC_ST_IDLE || pin->ops->enable_irq == NULL || + pin->enable_irq_failed || adap->is_configuring || + adap->is_configured || adap->monitor_all_cnt) + break; + /* Switch to interrupt mode */ + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_ENABLE); + pin->state = CEC_ST_RX_IRQ; + wake_up_interruptible(&pin->kthread_waitq); + return HRTIMER_NORESTART; + + case CEC_ST_LOW_DRIVE: + cec_pin_to_idle(pin); + break; + + default: + break; + } + if (!adap->monitor_pin_cnt || states[pin->state].usecs <= 150) { + pin->wait_usecs = 0; + pin->timer_ts = ktime_add_us(ts, states[pin->state].usecs); + hrtimer_forward_now(timer, states[pin->state].usecs * 1000); + return HRTIMER_RESTART; + } + pin->wait_usecs = states[pin->state].usecs - 100; + pin->timer_ts = ktime_add_us(ts, 100); + hrtimer_forward_now(timer, 100000); + return HRTIMER_RESTART; +} + +static int cec_pin_thread_func(void *_adap) +{ + struct cec_adapter *adap = _adap; + struct cec_pin *pin = adap->pin; + + for (;;) { + wait_event_interruptible(pin->kthread_waitq, + kthread_should_stop() || + pin->work_rx_msg.len || + pin->work_tx_status || + atomic_read(&pin->work_irq_change) || + atomic_read(&pin->work_pin_events)); + + if (pin->work_rx_msg.len) { + cec_received_msg_ts(adap, &pin->work_rx_msg, + pin->work_rx_msg.rx_ts); + pin->work_rx_msg.len = 0; + } + if (pin->work_tx_status) { + unsigned int tx_status = pin->work_tx_status; + + pin->work_tx_status = 0; + cec_transmit_attempt_done_ts(adap, tx_status, + pin->work_tx_ts); + } + + while (atomic_read(&pin->work_pin_events)) { + unsigned int idx = pin->work_pin_events_rd; + + cec_queue_pin_cec_event(adap, + pin->work_pin_is_high[idx], + pin->work_pin_ts[idx]); + pin->work_pin_events_rd = (idx + 1) % CEC_NUM_PIN_EVENTS; + atomic_dec(&pin->work_pin_events); + } + + switch (atomic_xchg(&pin->work_irq_change, + CEC_PIN_IRQ_UNCHANGED)) { + case CEC_PIN_IRQ_DISABLE: + pin->ops->disable_irq(adap); + cec_pin_high(pin); + cec_pin_to_idle(pin); + hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL); + break; + case CEC_PIN_IRQ_ENABLE: + pin->enable_irq_failed = !pin->ops->enable_irq(adap); + if (pin->enable_irq_failed) { + cec_pin_to_idle(pin); + hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL); + } + break; + default: + break; + } + + if (kthread_should_stop()) + break; + } + return 0; +} + +static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct cec_pin *pin = adap->pin; + + pin->enabled = enable; + if (enable) { + atomic_set(&pin->work_pin_events, 0); + pin->work_pin_events_rd = pin->work_pin_events_wr = 0; + cec_pin_read(pin); + cec_pin_to_idle(pin); + pin->tx_msg.len = 0; + pin->timer_ts = 0; + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED); + pin->kthread = kthread_run(cec_pin_thread_func, adap, + "cec-pin"); + if (IS_ERR(pin->kthread)) { + pr_err("cec-pin: kernel_thread() failed\n"); + return PTR_ERR(pin->kthread); + } + hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL); + } else { + if (pin->ops->disable_irq) + pin->ops->disable_irq(adap); + hrtimer_cancel(&pin->timer); + kthread_stop(pin->kthread); + cec_pin_read(pin); + cec_pin_to_idle(pin); + pin->state = CEC_ST_OFF; + } + return 0; +} + +static int cec_pin_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct cec_pin *pin = adap->pin; + + if (log_addr == CEC_LOG_ADDR_INVALID) + pin->la_mask = 0; + else + pin->la_mask |= (1 << log_addr); + return 0; +} + +static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct cec_pin *pin = adap->pin; + + pin->tx_signal_free_time = signal_free_time; + pin->tx_msg = *msg; + pin->work_tx_status = 0; + pin->tx_bit = 0; + if (pin->state == CEC_ST_RX_IRQ) { + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED); + pin->ops->disable_irq(adap); + cec_pin_high(pin); + cec_pin_to_idle(pin); + hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL); + } + return 0; +} + +static void cec_pin_adap_status(struct cec_adapter *adap, + struct seq_file *file) +{ + struct cec_pin *pin = adap->pin; + + seq_printf(file, "state: %s\n", states[pin->state].name); + seq_printf(file, "tx_bit: %d\n", pin->tx_bit); + seq_printf(file, "rx_bit: %d\n", pin->rx_bit); + seq_printf(file, "cec pin: %d\n", pin->ops->read(adap)); + seq_printf(file, "irq failed: %d\n", pin->enable_irq_failed); + if (pin->timer_100ms_overruns) { + seq_printf(file, "timer overruns > 100ms: %u of %u\n", + pin->timer_100ms_overruns, pin->timer_cnt); + seq_printf(file, "timer overruns > 300ms: %u of %u\n", + pin->timer_300ms_overruns, pin->timer_cnt); + seq_printf(file, "max timer overrun: %u usecs\n", + pin->timer_max_overrun); + seq_printf(file, "avg timer overrun: %u usecs\n", + pin->timer_sum_overrun / pin->timer_100ms_overruns); + } + pin->timer_cnt = 0; + pin->timer_100ms_overruns = 0; + pin->timer_300ms_overruns = 0; + pin->timer_max_overrun = 0; + pin->timer_sum_overrun = 0; + if (pin->ops->status) + pin->ops->status(adap, file); +} + +static int cec_pin_adap_monitor_all_enable(struct cec_adapter *adap, + bool enable) +{ + struct cec_pin *pin = adap->pin; + + pin->monitor_all = enable; + return 0; +} + +static void cec_pin_adap_free(struct cec_adapter *adap) +{ + struct cec_pin *pin = adap->pin; + + if (pin->ops->free) + pin->ops->free(adap); + adap->pin = NULL; + kfree(pin); +} + +void cec_pin_changed(struct cec_adapter *adap, bool value) +{ + struct cec_pin *pin = adap->pin; + + cec_pin_update(pin, value, false); + if (!value && (adap->is_configuring || adap->is_configured || + adap->monitor_all_cnt)) + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); +} +EXPORT_SYMBOL_GPL(cec_pin_changed); + +static const struct cec_adap_ops cec_pin_adap_ops = { + .adap_enable = cec_pin_adap_enable, + .adap_monitor_all_enable = cec_pin_adap_monitor_all_enable, + .adap_log_addr = cec_pin_adap_log_addr, + .adap_transmit = cec_pin_adap_transmit, + .adap_status = cec_pin_adap_status, + .adap_free = cec_pin_adap_free, +}; + +struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops, + void *priv, const char *name, u32 caps) +{ + struct cec_adapter *adap; + struct cec_pin *pin = kzalloc(sizeof(*pin), GFP_KERNEL); + + if (pin == NULL) + return ERR_PTR(-ENOMEM); + pin->ops = pin_ops; + hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + pin->timer.function = cec_pin_timer; + init_waitqueue_head(&pin->kthread_waitq); + + adap = cec_allocate_adapter(&cec_pin_adap_ops, priv, name, + caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN, + CEC_MAX_LOG_ADDRS); + + if (PTR_ERR_OR_ZERO(adap)) { + kfree(pin); + return adap; + } + + adap->pin = pin; + pin->adap = adap; + cec_pin_update(pin, cec_pin_high(pin), true); + return adap; +} +EXPORT_SYMBOL_GPL(cec_pin_allocate_adapter); diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c index 239a2db35068..75897f95e4b4 100644 --- a/drivers/media/common/saa7146/saa7146_i2c.c +++ b/drivers/media/common/saa7146/saa7146_i2c.c @@ -395,7 +395,7 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in /* i2c-adapter helper functions */ /* exported algorithm data */ -static struct i2c_algorithm saa7146_algo = { +static const struct i2c_algorithm saa7146_algo = { .master_xfer = saa7146_i2c_xfer, .functionality = saa7146_i2c_func, }; diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c index 3553ac4cba5c..d79e4d7ecd9f 100644 --- a/drivers/media/common/saa7146/saa7146_vbi.c +++ b/drivers/media/common/saa7146/saa7146_vbi.c @@ -308,7 +308,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) saa7146_dma_free(dev,q,buf); } -static struct videobuf_queue_ops vbi_qops = { +static const struct videobuf_queue_ops vbi_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c index b3b29d4f36ed..37b4654dc21c 100644 --- a/drivers/media/common/saa7146/saa7146_video.c +++ b/drivers/media/common/saa7146/saa7146_video.c @@ -1187,7 +1187,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) release_all_pagetables(dev, buf); } -static struct videobuf_queue_ops video_qops = { +static const struct videobuf_queue_ops video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c index 7c898b06d85c..e77bb0c95e69 100644 --- a/drivers/media/common/siano/smsir.c +++ b/drivers/media/common/siano/smsir.c @@ -73,7 +73,7 @@ int sms_ir_init(struct smscore_device_t *coredev) strlcpy(coredev->ir.phys, coredev->devpath, sizeof(coredev->ir.phys)); strlcat(coredev->ir.phys, "/ir0", sizeof(coredev->ir.phys)); - dev->input_name = coredev->ir.name; + dev->device_name = coredev->ir.name; dev->input_phys = coredev->ir.phys; dev->dev.parent = coredev->device; @@ -86,12 +86,12 @@ int sms_ir_init(struct smscore_device_t *coredev) #endif dev->priv = coredev; - dev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; dev->map_name = sms_get_board(board_id)->rc_codes; dev->driver_name = MODULE_NAME; pr_debug("Input device (IR) %s is set for key events\n", - dev->input_name); + dev->device_name); err = rc_register_device(dev); if (err < 0) { diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c index 9bcbd318489b..5b5f95c38fe1 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c @@ -646,14 +646,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 }, @@ -702,14 +702,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 }, @@ -758,14 +758,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 }, @@ -814,14 +814,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3784, 3825, 2879 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 3351, 3791, 3790 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 3311, 3819, 2815 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3659, 1900, 3777 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3640, 2662, 2669 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2743, 0, 3769 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1800, 1836, 1090 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 1436, 1806, 1805 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1405, 1830, 1047 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1691, 527, 1793 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1674, 947, 952 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 1000, 0, 1786 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 }, @@ -870,14 +870,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2476 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 2782, 3798, 3798 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 2782, 3798, 2476 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 3780, 2563, 3803 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 3780, 2563, 2563 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3803 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 833 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 1025, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 1025, 1812, 833 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 1796, 886, 1816 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 1796, 886, 886 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1816 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 }, @@ -926,14 +926,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, @@ -982,14 +982,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2778 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 3306, 3798, 3798 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 3306, 3798, 2778 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3661, 2563, 3781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3661, 2563, 2563 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, + [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 }, @@ -1038,14 +1038,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 3780, 3793, 2984 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 3406, 3768, 3791 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 3359, 3763, 2939 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 3636, 2916, 3760 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 3609, 2880, 2661 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 2786, 2633, 3753 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 1796, 1808, 1163 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 1480, 1786, 1806 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 1443, 1781, 1131 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 1670, 1116, 1778 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 1648, 1091, 947 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 1028, 929, 1772 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 }, @@ -1094,14 +1094,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 3797, 3796, 2938 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 3049, 3783, 3791 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 3044, 3782, 2887 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 3741, 2765, 3768 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 3740, 2749, 2663 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 2580, 2587, 3760 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 1811, 1810, 1131 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 1210, 1799, 1806 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 1206, 1798, 1096 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 1762, 1014, 1785 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 1761, 1004, 948 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 896, 901, 1778 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, }; #else @@ -1225,6 +1225,12 @@ static double transfer_rgb_to_smpte2084(double v) const double c2 = 32.0 * 2413.0 / 4096.0; const double c3 = 32.0 * 2392.0 / 4096.0; + /* + * The RGB input maps to the luminance range 0-100 cd/m^2, while + * SMPTE-2084 maps values to the luminance range of 0-10000 cd/m^2. + * Hence the factor 100. + */ + v /= 100.0; v = pow(v, m1); return pow((c1 + c2 * v) / (1 + c3 * v), m2); } diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 3dd22da7e17d..a772976cfe26 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -615,7 +615,7 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b, static const int bt2020_full[3][3] = { { COEFF(0.2627, 255), COEFF(0.6780, 255), COEFF(0.0593, 255) }, { COEFF(-0.1396, 255), COEFF(-0.3604, 255), COEFF(0.5, 255) }, - { COEFF(0.5, 255), COEFF(-0.4698, 255), COEFF(-0.0402, 255) }, + { COEFF(0.5, 255), COEFF(-0.4598, 255), COEFF(-0.0402, 255) }, }; static const int bt2020c[4] = { COEFF(1.0 / 1.9404, 224), COEFF(1.0 / 1.5816, 224), diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h index f854309ba8a5..c4df6cee48e6 100644 --- a/drivers/media/dvb-core/demux.h +++ b/drivers/media/dvb-core/demux.h @@ -210,7 +210,7 @@ struct dmx_section_feed { * the start of the first undelivered TS packet within a circular buffer. * The @buffer2 buffer parameter is normally NULL, except when the received * TS packets have crossed the last address of the circular buffer and - * ”wrapped” to the beginning of the buffer. In the latter case the @buffer1 + * "wrapped" to the beginning of the buffer. In the latter case the @buffer1 * parameter would contain an address within the circular buffer, while the * @buffer2 parameter would contain the first address of the circular buffer. * The number of bytes delivered with this function (i.e. @buffer1_length + diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 45e91add73ba..18e4230865be 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -562,7 +562,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, { ktime_t timeout = 0; struct dmx_pes_filter_params *para = &filter->params.pes; - dmx_output_t otype; + enum dmx_output otype; int ret; int ts_type; enum dmx_ts_pes ts_pes; @@ -787,7 +787,7 @@ static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, return 0; } -static inline void invert_mode(dmx_filter_t *filter) +static inline void invert_mode(struct dmx_filter *filter) { int i; @@ -1025,26 +1025,6 @@ static int dvb_demux_do_ioctl(struct file *file, dmxdev->demux->get_pes_pids(dmxdev->demux, parg); break; -#if 0 - /* Not used upstream and never documented */ - - case DMX_GET_CAPS: - if (!dmxdev->demux->get_caps) { - ret = -EINVAL; - break; - } - ret = dmxdev->demux->get_caps(dmxdev->demux, parg); - break; - - case DMX_SET_SOURCE: - if (!dmxdev->demux->set_source) { - ret = -EINVAL; - break; - } - ret = dmxdev->demux->set_source(dmxdev->demux, parg); - break; -#endif - case DMX_GET_STC: if (!dmxdev->demux->get_stc) { ret = -EINVAL; diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h index e200aa6f2d2f..5b6041d462bc 100644 --- a/drivers/media/dvb-core/dvb-usb-ids.h +++ b/drivers/media/dvb-core/dvb-usb-ids.h @@ -279,6 +279,7 @@ #define USB_PID_TERRATEC_H7 0x10b4 #define USB_PID_TERRATEC_H7_2 0x10a3 #define USB_PID_TERRATEC_H7_3 0x10a5 +#define USB_PID_TERRATEC_T1 0x10ae #define USB_PID_TERRATEC_T3 0x10a0 #define USB_PID_TERRATEC_T5 0x10a1 #define USB_PID_NOXON_DAB_STICK 0x00b3 diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 17970cdd55fa..95b3723282f4 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -76,8 +76,6 @@ MODULE_PARM_DESC(cam_debug, "enable verbose debug messages"); #define STATUSREG_WE 2 /* write error */ #define STATUSREG_FR 0x40 /* module free */ #define STATUSREG_DA 0x80 /* data available */ -#define STATUSREG_TXERR (STATUSREG_RE|STATUSREG_WE) /* general transfer error */ - #define DVB_CA_SLOTSTATE_NONE 0 #define DVB_CA_SLOTSTATE_UNINITIALISED 1 @@ -88,10 +86,8 @@ MODULE_PARM_DESC(cam_debug, "enable verbose debug messages"); #define DVB_CA_SLOTSTATE_WAITFR 6 #define DVB_CA_SLOTSTATE_LINKINIT 7 - /* Information on a CA slot */ struct dvb_ca_slot { - /* current state of the CAM */ int slot_state; @@ -157,7 +153,10 @@ struct dvb_ca_private { /* Delay the main thread should use */ unsigned long delay; - /* Slot to start looking for data to read from in the next user-space read operation */ + /* + * Slot to start looking for data to read from in the next user-space + * read operation + */ int next_read_slot; /* mutex serializing ioctls */ @@ -178,7 +177,9 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca) static void dvb_ca_private_release(struct kref *ref) { - struct dvb_ca_private *ca = container_of(ref, struct dvb_ca_private, refcount); + struct dvb_ca_private *ca; + + ca = container_of(ref, struct dvb_ca_private, refcount); dvb_ca_private_free(ca); } @@ -198,7 +199,6 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 *ebuf, int ecount); - /** * Safely find needle in haystack. * @@ -223,25 +223,22 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen) return NULL; } - - -/* ******************************************************************************** */ +/* ************************************************************************** */ /* EN50221 physical interface functions */ - /** * dvb_ca_en50221_check_camstatus - Check CAM status. */ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int slot_status; int cam_present_now; int cam_changed; /* IRQ mode */ - if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) { - return (atomic_read(&ca->slot_info[slot].camchange_count) != 0); - } + if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) + return (atomic_read(&sl->camchange_count) != 0); /* poll mode */ slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open); @@ -249,29 +246,28 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) cam_present_now = (slot_status & DVB_CA_EN50221_POLL_CAM_PRESENT) ? 1 : 0; cam_changed = (slot_status & DVB_CA_EN50221_POLL_CAM_CHANGED) ? 1 : 0; if (!cam_changed) { - int cam_present_old = (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE); + int cam_present_old = (sl->slot_state != DVB_CA_SLOTSTATE_NONE); + cam_changed = (cam_present_now != cam_present_old); } if (cam_changed) { - if (!cam_present_now) { - ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; - } else { - ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED; - } - atomic_set(&ca->slot_info[slot].camchange_count, 1); + if (!cam_present_now) + sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; + else + sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED; + atomic_set(&sl->camchange_count, 1); } else { - if ((ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) && + if ((sl->slot_state == DVB_CA_SLOTSTATE_WAITREADY) && (slot_status & DVB_CA_EN50221_POLL_CAM_READY)) { - // move to validate state if reset is completed - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; + /* move to validate state if reset is completed */ + sl->slot_state = DVB_CA_SLOTSTATE_VALIDATE; } } return cam_changed; } - /** * dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS * register on a CAM interface, checking for errors and timeout. @@ -295,8 +291,10 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, start = jiffies; timeout = jiffies + timeout_hz; while (1) { + int res; + /* read the status and check for error */ - int res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); if (res < 0) return -EIO; @@ -308,12 +306,11 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, } /* check for timeout */ - if (time_after(jiffies, timeout)) { + if (time_after(jiffies, timeout)) break; - } /* wait for a bit */ - msleep(1); + usleep_range(1000, 1100); } dprintk("%s failed timeout:%lu\n", __func__, jiffies - start); @@ -322,7 +319,6 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, return -ETIMEDOUT; } - /** * dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM. * @@ -333,6 +329,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, */ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int ret; int buf_size; u8 buf[2]; @@ -340,40 +337,54 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) dprintk("%s\n", __func__); /* we'll be determining these during this function */ - ca->slot_info[slot].da_irq_supported = 0; + sl->da_irq_supported = 0; - /* set the host link buffer size temporarily. it will be overwritten with the - * real negotiated size later. */ - ca->slot_info[slot].link_buf_size = 2; + /* + * set the host link buffer size temporarily. it will be overwritten + * with the real negotiated size later. + */ + sl->link_buf_size = 2; /* read the buffer size from the CAM */ - if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) + ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, + IRQEN | CMDREG_SR); + if (ret) return ret; ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ); - if (ret != 0) + if (ret) return ret; - if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) + ret = dvb_ca_en50221_read_data(ca, slot, buf, 2); + if (ret != 2) return -EIO; - if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) + ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); + if (ret) return ret; - /* store it, and choose the minimum of our buffer and the CAM's buffer size */ + /* + * store it, and choose the minimum of our buffer and the CAM's buffer + * size + */ buf_size = (buf[0] << 8) | buf[1]; if (buf_size > HOST_LINK_BUF_SIZE) buf_size = HOST_LINK_BUF_SIZE; - ca->slot_info[slot].link_buf_size = buf_size; + sl->link_buf_size = buf_size; buf[0] = buf_size >> 8; buf[1] = buf_size & 0xff; dprintk("Chosen link buffer size of %i\n", buf_size); /* write the buffer size to the CAM */ - if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SW)) != 0) + ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, + IRQEN | CMDREG_SW); + if (ret) return ret; - if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10)) != 0) + ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10); + if (ret) return ret; - if ((ret = dvb_ca_en50221_write_data(ca, slot, buf, 2)) != 2) + ret = dvb_ca_en50221_write_data(ca, slot, buf, 2); + if (ret != 2) return -EIO; - if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) + ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); + if (ret) return ret; /* success */ @@ -393,47 +404,50 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) * @return 0 on success, nonzero on error. */ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot, - int *address, int *tupleType, - int *tupleLength, u8 *tuple) + int *address, int *tuple_type, + int *tuple_length, u8 *tuple) { int i; - int _tupleType; - int _tupleLength; + int _tuple_type; + int _tuple_length; int _address = *address; /* grab the next tuple length and type */ - if ((_tupleType = ca->pub->read_attribute_mem(ca->pub, slot, _address)) < 0) - return _tupleType; - if (_tupleType == 0xff) { - dprintk("END OF CHAIN TUPLE type:0x%x\n", _tupleType); + _tuple_type = ca->pub->read_attribute_mem(ca->pub, slot, _address); + if (_tuple_type < 0) + return _tuple_type; + if (_tuple_type == 0xff) { + dprintk("END OF CHAIN TUPLE type:0x%x\n", _tuple_type); *address += 2; - *tupleType = _tupleType; - *tupleLength = 0; + *tuple_type = _tuple_type; + *tuple_length = 0; return 0; } - if ((_tupleLength = ca->pub->read_attribute_mem(ca->pub, slot, _address + 2)) < 0) - return _tupleLength; + _tuple_length = ca->pub->read_attribute_mem(ca->pub, slot, + _address + 2); + if (_tuple_length < 0) + return _tuple_length; _address += 4; - dprintk("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); + dprintk("TUPLE type:0x%x length:%i\n", _tuple_type, _tuple_length); /* read in the whole tuple */ - for (i = 0; i < _tupleLength; i++) { - tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot, _address + (i * 2)); + for (i = 0; i < _tuple_length; i++) { + tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot, + _address + (i * 2)); dprintk(" 0x%02x: 0x%02x %c\n", i, tuple[i] & 0xff, ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); } - _address += (_tupleLength * 2); + _address += (_tuple_length * 2); - // success - *tupleType = _tupleType; - *tupleLength = _tupleLength; + /* success */ + *tuple_type = _tuple_type; + *tuple_length = _tuple_length; *address = _address; return 0; } - /** * dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module, * extracting Config register, and checking it is a DVB CAM module. @@ -445,9 +459,10 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot, */ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) { + struct dvb_ca_slot *sl; int address = 0; - int tupleLength; - int tupleType; + int tuple_length; + int tuple_type; u8 tuple[257]; char *dvb_str; int rasz; @@ -458,70 +473,66 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) u16 manfid = 0; u16 devid = 0; - - // CISTPL_DEVICE_0A - if ((status = - dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) + /* CISTPL_DEVICE_0A */ + status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type, + &tuple_length, tuple); + if (status < 0) return status; - if (tupleType != 0x1D) + if (tuple_type != 0x1D) return -EINVAL; - - - // CISTPL_DEVICE_0C - if ((status = - dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) + /* CISTPL_DEVICE_0C */ + status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type, + &tuple_length, tuple); + if (status < 0) return status; - if (tupleType != 0x1C) + if (tuple_type != 0x1C) return -EINVAL; - - - // CISTPL_VERS_1 - if ((status = - dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) + /* CISTPL_VERS_1 */ + status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type, + &tuple_length, tuple); + if (status < 0) return status; - if (tupleType != 0x15) + if (tuple_type != 0x15) return -EINVAL; - - - // CISTPL_MANFID - if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, - &tupleLength, tuple)) < 0) + /* CISTPL_MANFID */ + status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type, + &tuple_length, tuple); + if (status < 0) return status; - if (tupleType != 0x20) + if (tuple_type != 0x20) return -EINVAL; - if (tupleLength != 4) + if (tuple_length != 4) return -EINVAL; manfid = (tuple[1] << 8) | tuple[0]; devid = (tuple[3] << 8) | tuple[2]; - - - // CISTPL_CONFIG - if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, - &tupleLength, tuple)) < 0) + /* CISTPL_CONFIG */ + status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type, + &tuple_length, tuple); + if (status < 0) return status; - if (tupleType != 0x1A) + if (tuple_type != 0x1A) return -EINVAL; - if (tupleLength < 3) + if (tuple_length < 3) return -EINVAL; /* extract the configbase */ rasz = tuple[0] & 3; - if (tupleLength < (3 + rasz + 14)) + if (tuple_length < (3 + rasz + 14)) return -EINVAL; - ca->slot_info[slot].config_base = 0; - for (i = 0; i < rasz + 1; i++) { - ca->slot_info[slot].config_base |= (tuple[2 + i] << (8 * i)); - } + sl = &ca->slot_info[slot]; + sl->config_base = 0; + for (i = 0; i < rasz + 1; i++) + sl->config_base |= (tuple[2 + i] << (8 * i)); /* check it contains the correct DVB string */ - dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); - if (dvb_str == NULL) + dvb_str = findstr((char *)tuple, tuple_length, "DVB_CI_V", 8); + if (!dvb_str) return -EINVAL; - if (tupleLength < ((dvb_str - (char *) tuple) + 12)) + if (tuple_length < ((dvb_str - (char *)tuple) + 12)) return -EINVAL; /* is it a version we support? */ @@ -534,12 +545,14 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) /* process the CFTABLE_ENTRY tuples, and any after those */ while ((!end_chain) && (address < 0x1000)) { - if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, - &tupleLength, tuple)) < 0) + status = dvb_ca_en50221_read_tuple(ca, slot, &address, + &tuple_type, &tuple_length, + tuple); + if (status < 0) return status; - switch (tupleType) { - case 0x1B: // CISTPL_CFTABLE_ENTRY - if (tupleLength < (2 + 11 + 17)) + switch (tuple_type) { + case 0x1B: /* CISTPL_CFTABLE_ENTRY */ + if (tuple_length < (2 + 11 + 17)) break; /* if we've already parsed one, just use it */ @@ -547,26 +560,28 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) break; /* get the config option */ - ca->slot_info[slot].config_option = tuple[0] & 0x3f; + sl->config_option = tuple[0] & 0x3f; /* OK, check it contains the correct strings */ - if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) || - (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL)) + if (!findstr((char *)tuple, tuple_length, + "DVB_HOST", 8) || + !findstr((char *)tuple, tuple_length, + "DVB_CI_MODULE", 13)) break; got_cftableentry = 1; break; - case 0x14: // CISTPL_NO_LINK + case 0x14: /* CISTPL_NO_LINK */ break; - case 0xFF: // CISTPL_END + case 0xFF: /* CISTPL_END */ end_chain = 1; break; - default: /* Unknown tuple type - just skip this tuple and move to the next one */ + default: /* Unknown tuple type - just skip this tuple */ dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", - tupleType, tupleLength); + tuple_type, tuple_length); break; } } @@ -575,14 +590,12 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) return -EINVAL; dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n", - manfid, devid, ca->slot_info[slot].config_base, - ca->slot_info[slot].config_option); + manfid, devid, sl->config_base, sl->config_option); - // success! + /* success! */ return 0; } - /** * dvb_ca_en50221_set_configoption - Set CAM's configoption correctly. * @@ -591,26 +604,25 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) */ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int configoption; dprintk("%s\n", __func__); /* set the config option */ - ca->pub->write_attribute_mem(ca->pub, slot, - ca->slot_info[slot].config_base, - ca->slot_info[slot].config_option); + ca->pub->write_attribute_mem(ca->pub, slot, sl->config_base, + sl->config_option); /* check it */ - configoption = ca->pub->read_attribute_mem(ca->pub, slot, ca->slot_info[slot].config_base); + configoption = ca->pub->read_attribute_mem(ca->pub, slot, + sl->config_base); dprintk("Set configoption 0x%x, read configoption 0x%x\n", - ca->slot_info[slot].config_option, configoption & 0x3f); + sl->config_option, configoption & 0x3f); /* fine! */ return 0; - } - /** * dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control * interface. It reads a buffer of data from the CAM. The data can either @@ -628,6 +640,7 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot) static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 *ebuf, int ecount) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int bytes_read; int status; u8 buf[HOST_LINK_BUF_SIZE]; @@ -636,16 +649,16 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, dprintk("%s\n", __func__); /* check if we have space for a link buf in the rx_buffer */ - if (ebuf == NULL) { + if (!ebuf) { int buf_free; - if (ca->slot_info[slot].rx_buffer.data == NULL) { + if (!sl->rx_buffer.data) { status = -EIO; goto exit; } - buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); + buf_free = dvb_ringbuffer_free(&sl->rx_buffer); - if (buf_free < (ca->slot_info[slot].link_buf_size + + if (buf_free < (sl->link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { status = -EAGAIN; goto exit; @@ -653,8 +666,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, } if (ca->pub->read_data && - (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) { - if (ebuf == NULL) + (sl->slot_state != DVB_CA_SLOTSTATE_LINKINIT)) { + if (!ebuf) status = ca->pub->read_data(ca->pub, slot, buf, sizeof(buf)); else @@ -665,7 +678,6 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, if (status == 0) goto exit; } else { - /* check if there is data available */ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); @@ -690,21 +702,19 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, bytes_read |= status; /* check it will fit */ - if (ebuf == NULL) { - if (bytes_read > ca->slot_info[slot].link_buf_size) { + if (!ebuf) { + if (bytes_read > sl->link_buf_size) { pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", ca->dvbdev->adapter->num, bytes_read, - ca->slot_info[slot].link_buf_size); - ca->slot_info[slot].slot_state = - DVB_CA_SLOTSTATE_LINKINIT; + sl->link_buf_size); + sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } if (bytes_read < 2) { pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = - DVB_CA_SLOTSTATE_LINKINIT; + sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } @@ -735,20 +745,22 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, if (status < 0) goto exit; if (status & STATUSREG_RE) { - ca->slot_info[slot].slot_state = - DVB_CA_SLOTSTATE_LINKINIT; + sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } } - /* OK, add it to the receive buffer, or copy into external buffer if supplied */ - if (ebuf == NULL) { - if (ca->slot_info[slot].rx_buffer.data == NULL) { + /* + * OK, add it to the receive buffer, or copy into external buffer if + * supplied + */ + if (!ebuf) { + if (!sl->rx_buffer.data) { status = -EIO; goto exit; } - dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, buf, bytes_read); + dvb_ringbuffer_pkt_write(&sl->rx_buffer, buf, bytes_read); } else { memcpy(ebuf, buf, bytes_read); } @@ -757,16 +769,15 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, buf[0], (buf[1] & 0x80) == 0, bytes_read); /* wake up readers when a last_fragment is received */ - if ((buf[1] & 0x80) == 0x00) { + if ((buf[1] & 0x80) == 0x00) wake_up_interruptible(&ca->wait_queue); - } + status = bytes_read; exit: return status; } - /** * dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control * interface. It writes a buffer of data to a CAM. @@ -782,25 +793,28 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 *buf, int bytes_write) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int status; int i; dprintk("%s\n", __func__); - /* sanity check */ - if (bytes_write > ca->slot_info[slot].link_buf_size) + if (bytes_write > sl->link_buf_size) return -EINVAL; if (ca->pub->write_data && - (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) + (sl->slot_state != DVB_CA_SLOTSTATE_LINKINIT)) return ca->pub->write_data(ca->pub, slot, buf, bytes_write); - /* it is possible we are dealing with a single buffer implementation, - thus if there is data available for read or if there is even a read - already in progress, we do nothing but awake the kernel thread to - process the data if necessary. */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + /* + * it is possible we are dealing with a single buffer implementation, + * thus if there is data available for read or if there is even a read + * already in progress, we do nothing but awake the kernel thread to + * process the data if necessary. + */ + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) goto exitnowrite; if (status & (STATUSREG_DA | STATUSREG_RE)) { if (status & STATUSREG_DA) @@ -811,12 +825,14 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, } /* OK, set HC bit */ - if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, - IRQEN | CMDREG_HC)) != 0) + status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, + IRQEN | CMDREG_HC); + if (status) goto exit; /* check if interface is still free */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) goto exit; if (!(status & STATUSREG_FR)) { /* it wasn't free => try again later */ @@ -848,23 +864,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, } /* send the amount of data */ - if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) + status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, + bytes_write >> 8); + if (status) goto exit; - if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW, - bytes_write & 0xff)) != 0) + status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW, + bytes_write & 0xff); + if (status) goto exit; /* send the buffer */ for (i = 0; i < bytes_write; i++) { - if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA, buf[i])) != 0) + status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA, + buf[i]); + if (status) goto exit; } /* check for write error (WE should now be 0) */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) goto exit; if (status & STATUSREG_WE) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } @@ -880,12 +902,9 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, return status; } - - -/* ******************************************************************************** */ +/* ************************************************************************** */ /* EN50221 higher level functions */ - /** * dvb_ca_en50221_slot_shutdown - A CAM has been removed => shut it down. * @@ -899,8 +918,10 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) ca->pub->slot_shutdown(ca->pub, slot); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; - /* need to wake up all processes to check if they're now - trying to write to a defunct CAM */ + /* + * need to wake up all processes to check if they're now trying to + * write to a defunct CAM + */ wake_up_interruptible(&ca->wait_queue); dprintk("Slot %i shutdown\n", slot); @@ -909,7 +930,6 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) return 0; } - /** * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. * @@ -917,9 +937,11 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) * @slot: Slot concerned. * @change_type: One of the DVB_CA_CAMCHANGE_* values. */ -void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int change_type) +void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, + int change_type) { struct dvb_ca_private *ca = pubca->private; + struct dvb_ca_slot *sl = &ca->slot_info[slot]; dprintk("CAMCHANGE IRQ slot:%i change_type:%i\n", slot, change_type); @@ -932,13 +954,12 @@ void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int ch return; } - ca->slot_info[slot].camchange_type = change_type; - atomic_inc(&ca->slot_info[slot].camchange_count); + sl->camchange_type = change_type; + atomic_inc(&sl->camchange_count); dvb_ca_en50221_thread_wakeup(ca); } EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq); - /** * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. * @@ -948,17 +969,17 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq); void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot) { struct dvb_ca_private *ca = pubca->private; + struct dvb_ca_slot *sl = &ca->slot_info[slot]; dprintk("CAMREADY IRQ slot:%i\n", slot); - if (ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; + if (sl->slot_state == DVB_CA_SLOTSTATE_WAITREADY) { + sl->slot_state = DVB_CA_SLOTSTATE_VALIDATE; dvb_ca_en50221_thread_wakeup(ca); } } EXPORT_SYMBOL(dvb_ca_en50221_camready_irq); - /** * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred. * @@ -968,16 +989,17 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq); void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot) { struct dvb_ca_private *ca = pubca->private; + struct dvb_ca_slot *sl = &ca->slot_info[slot]; int flags; dprintk("FR/DA IRQ slot:%i\n", slot); - switch (ca->slot_info[slot].slot_state) { + switch (sl->slot_state) { case DVB_CA_SLOTSTATE_LINKINIT: flags = ca->pub->read_cam_control(pubca, slot, CTRLIF_STATUS); if (flags & STATUSREG_DA) { dprintk("CAM supports DA IRQ\n"); - ca->slot_info[slot].da_irq_supported = 1; + sl->da_irq_supported = 1; } break; @@ -989,8 +1011,7 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot) } EXPORT_SYMBOL(dvb_ca_en50221_frda_irq); - -/* ******************************************************************************** */ +/* ************************************************************************** */ /* EN50221 thread functions */ /** @@ -1000,7 +1021,6 @@ EXPORT_SYMBOL(dvb_ca_en50221_frda_irq); */ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca) { - dprintk("%s\n", __func__); ca->wakeup = 1; @@ -1019,11 +1039,14 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) int curdelay = 100000000; int slot; - /* Beware of too high polling frequency, because one polling + /* + * Beware of too high polling frequency, because one polling * call might take several hundred milliseconds until timeout! */ for (slot = 0; slot < ca->slot_count; slot++) { - switch (ca->slot_info[slot].slot_state) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; + + switch (sl->slot_state) { default: case DVB_CA_SLOTSTATE_NONE: delay = HZ * 60; /* 60s */ @@ -1049,7 +1072,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) delay = HZ / 10; /* 100ms */ if (ca->open) { - if ((!ca->slot_info[slot].da_irq_supported) || + if ((!sl->da_irq_supported) || (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA))) delay = HZ / 10; /* 100ms */ } @@ -1063,19 +1086,224 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) ca->delay = curdelay; } +/** + * Poll if the CAM is gone. + * + * @ca: CA instance. + * @slot: Slot to process. + * @return: 0 .. no change + * 1 .. CAM state changed + */ +static int dvb_ca_en50221_poll_cam_gone(struct dvb_ca_private *ca, int slot) +{ + int changed = 0; + int status; + + /* + * we need this extra check for annoying interfaces like the + * budget-av + */ + if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && + (ca->pub->poll_slot_status)) { + status = ca->pub->poll_slot_status(ca->pub, slot, 0); + if (!(status & + DVB_CA_EN50221_POLL_CAM_PRESENT)) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; + dvb_ca_en50221_thread_update_delay(ca); + changed = 1; + } + } + return changed; +} /** - * Kernel thread which monitors CA slots for CAM changes, and performs data transfers. + * Thread state machine for one CA slot to perform the data transfer. + * + * @ca: CA instance. + * @slot: Slot to process. + */ +static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca, + int slot) +{ + struct dvb_ca_slot *sl = &ca->slot_info[slot]; + int flags; + int pktcount; + void *rxbuf; + + mutex_lock(&sl->slot_lock); + + /* check the cam status + deal with CAMCHANGEs */ + while (dvb_ca_en50221_check_camstatus(ca, slot)) { + /* clear down an old CI slot if necessary */ + if (sl->slot_state != DVB_CA_SLOTSTATE_NONE) + dvb_ca_en50221_slot_shutdown(ca, slot); + + /* if a CAM is NOW present, initialise it */ + if (sl->camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED) + sl->slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; + + /* we've handled one CAMCHANGE */ + dvb_ca_en50221_thread_update_delay(ca); + atomic_dec(&sl->camchange_count); + } + + /* CAM state machine */ + switch (sl->slot_state) { + case DVB_CA_SLOTSTATE_NONE: + case DVB_CA_SLOTSTATE_INVALID: + /* no action needed */ + break; + + case DVB_CA_SLOTSTATE_UNINITIALISED: + sl->slot_state = DVB_CA_SLOTSTATE_WAITREADY; + ca->pub->slot_reset(ca->pub, slot); + sl->timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); + break; + + case DVB_CA_SLOTSTATE_WAITREADY: + if (time_after(jiffies, sl->timeout)) { + pr_err("dvb_ca adaptor %d: PC card did not respond :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + /* + * no other action needed; will automatically change state when + * ready + */ + break; + + case DVB_CA_SLOTSTATE_VALIDATE: + if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) { + if (dvb_ca_en50221_poll_cam_gone(ca, slot)) + break; + + pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + if (dvb_ca_en50221_set_configoption(ca, slot) != 0) { + pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + if (ca->pub->write_cam_control(ca->pub, slot, + CTRLIF_COMMAND, + CMDREG_RS) != 0) { + pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + dprintk("DVB CAM validated successfully\n"); + + sl->timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); + sl->slot_state = DVB_CA_SLOTSTATE_WAITFR; + ca->wakeup = 1; + break; + + case DVB_CA_SLOTSTATE_WAITFR: + if (time_after(jiffies, sl->timeout)) { + pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (flags & STATUSREG_FR) { + sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT; + ca->wakeup = 1; + } + break; + + case DVB_CA_SLOTSTATE_LINKINIT: + if (dvb_ca_en50221_link_init(ca, slot) != 0) { + if (dvb_ca_en50221_poll_cam_gone(ca, slot)) + break; + + pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + if (!sl->rx_buffer.data) { + rxbuf = vmalloc(RX_BUFFER_SIZE); + if (!rxbuf) { + pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", + ca->dvbdev->adapter->num); + sl->slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + dvb_ringbuffer_init(&sl->rx_buffer, rxbuf, + RX_BUFFER_SIZE); + } + + ca->pub->slot_ts_enable(ca->pub, slot); + sl->slot_state = DVB_CA_SLOTSTATE_RUNNING; + dvb_ca_en50221_thread_update_delay(ca); + pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", + ca->dvbdev->adapter->num); + break; + + case DVB_CA_SLOTSTATE_RUNNING: + if (!ca->open) + break; + + /* poll slots for data */ + pktcount = 0; + while (dvb_ca_en50221_read_data(ca, slot, NULL, 0) > 0) { + if (!ca->open) + break; + + /* + * if a CAMCHANGE occurred at some point, do not do any + * more processing of this slot + */ + if (dvb_ca_en50221_check_camstatus(ca, slot)) { + /* + * we don't want to sleep on the next iteration + * so we can handle the cam change + */ + ca->wakeup = 1; + break; + } + + /* check if we've hit our limit this time */ + if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) { + /* + * don't sleep; there is likely to be more data + * to read + */ + ca->wakeup = 1; + break; + } + } + break; + } + + mutex_unlock(&sl->slot_lock); +} + +/** + * Kernel thread which monitors CA slots for CAM changes, and performs data + * transfers. */ static int dvb_ca_en50221_thread(void *data) { struct dvb_ca_private *ca = data; int slot; - int flags; - int status; - int pktcount; - void *rxbuf; dprintk("%s\n", __func__); @@ -1094,184 +1322,14 @@ static int dvb_ca_en50221_thread(void *data) ca->wakeup = 0; /* go through all the slots processing them */ - for (slot = 0; slot < ca->slot_count; slot++) { - - mutex_lock(&ca->slot_info[slot].slot_lock); - - // check the cam status + deal with CAMCHANGEs - while (dvb_ca_en50221_check_camstatus(ca, slot)) { - /* clear down an old CI slot if necessary */ - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) - dvb_ca_en50221_slot_shutdown(ca, slot); - - /* if a CAM is NOW present, initialise it */ - if (ca->slot_info[slot].camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; - } - - /* we've handled one CAMCHANGE */ - dvb_ca_en50221_thread_update_delay(ca); - atomic_dec(&ca->slot_info[slot].camchange_count); - } - - // CAM state machine - switch (ca->slot_info[slot].slot_state) { - case DVB_CA_SLOTSTATE_NONE: - case DVB_CA_SLOTSTATE_INVALID: - // no action needed - break; - - case DVB_CA_SLOTSTATE_UNINITIALISED: - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITREADY; - ca->pub->slot_reset(ca->pub, slot); - ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); - break; - - case DVB_CA_SLOTSTATE_WAITREADY: - if (time_after(jiffies, ca->slot_info[slot].timeout)) { - pr_err("dvb_ca adaptor %d: PC card did not respond :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - // no other action needed; will automatically change state when ready - break; - - case DVB_CA_SLOTSTATE_VALIDATE: - if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) { - /* we need this extra check for annoying interfaces like the budget-av */ - if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && - (ca->pub->poll_slot_status)) { - status = ca->pub->poll_slot_status(ca->pub, slot, 0); - if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - } - - pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - if (dvb_ca_en50221_set_configoption(ca, slot) != 0) { - pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - if (ca->pub->write_cam_control(ca->pub, slot, - CTRLIF_COMMAND, CMDREG_RS) != 0) { - pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - dprintk("DVB CAM validated successfully\n"); - - ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITFR; - ca->wakeup = 1; - break; - - case DVB_CA_SLOTSTATE_WAITFR: - if (time_after(jiffies, ca->slot_info[slot].timeout)) { - pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - - flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); - if (flags & STATUSREG_FR) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - ca->wakeup = 1; - } - break; - - case DVB_CA_SLOTSTATE_LINKINIT: - if (dvb_ca_en50221_link_init(ca, slot) != 0) { - /* we need this extra check for annoying interfaces like the budget-av */ - if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && - (ca->pub->poll_slot_status)) { - status = ca->pub->poll_slot_status(ca->pub, slot, 0); - if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - } - - pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = - DVB_CA_SLOTSTATE_UNINITIALISED; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - - if (ca->slot_info[slot].rx_buffer.data == NULL) { - rxbuf = vmalloc(RX_BUFFER_SIZE); - if (rxbuf == NULL) { - pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; - dvb_ca_en50221_thread_update_delay(ca); - break; - } - dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE); - } - - ca->pub->slot_ts_enable(ca->pub, slot); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING; - dvb_ca_en50221_thread_update_delay(ca); - pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", - ca->dvbdev->adapter->num); - break; - - case DVB_CA_SLOTSTATE_RUNNING: - if (!ca->open) - break; - - // poll slots for data - pktcount = 0; - while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) > 0) { - if (!ca->open) - break; - - /* if a CAMCHANGE occurred at some point, do not do any more processing of this slot */ - if (dvb_ca_en50221_check_camstatus(ca, slot)) { - // we dont want to sleep on the next iteration so we can handle the cam change - ca->wakeup = 1; - break; - } - - /* check if we've hit our limit this time */ - if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) { - // dont sleep; there is likely to be more data to read - ca->wakeup = 1; - break; - } - } - break; - } - - mutex_unlock(&ca->slot_info[slot].slot_lock); - } + for (slot = 0; slot < ca->slot_count; slot++) + dvb_ca_en50221_thread_state_machine(ca, slot); } return 0; } - - -/* ******************************************************************************** */ +/* ************************************************************************** */ /* EN50221 IO interface functions */ /** @@ -1301,15 +1359,17 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file, switch (cmd) { case CA_RESET: for (slot = 0; slot < ca->slot_count; slot++) { - mutex_lock(&ca->slot_info[slot].slot_lock); - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) { + struct dvb_ca_slot *sl = &ca->slot_info[slot]; + + mutex_lock(&sl->slot_lock); + if (sl->slot_state != DVB_CA_SLOTSTATE_NONE) { dvb_ca_en50221_slot_shutdown(ca, slot); if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) dvb_ca_en50221_camchange_irq(ca->pub, slot, DVB_CA_EN50221_CAMCHANGE_INSERTED); } - mutex_unlock(&ca->slot_info[slot].slot_lock); + mutex_unlock(&sl->slot_lock); } ca->next_read_slot = 0; dvb_ca_en50221_thread_wakeup(ca); @@ -1327,21 +1387,23 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file, case CA_GET_SLOT_INFO: { struct ca_slot_info *info = parg; + struct dvb_ca_slot *sl; - if ((info->num > ca->slot_count) || (info->num < 0)) { + slot = info->num; + if ((slot > ca->slot_count) || (slot < 0)) { err = -EINVAL; goto out_unlock; } info->type = CA_CI_LINK; info->flags = 0; - if ((ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_NONE) - && (ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_INVALID)) { + sl = &ca->slot_info[slot]; + if ((sl->slot_state != DVB_CA_SLOTSTATE_NONE) && + (sl->slot_state != DVB_CA_SLOTSTATE_INVALID)) { info->flags = CA_CI_MODULE_PRESENT; } - if (ca->slot_info[info->num].slot_state == DVB_CA_SLOTSTATE_RUNNING) { + if (sl->slot_state == DVB_CA_SLOTSTATE_RUNNING) info->flags |= CA_CI_MODULE_READY; - } break; } @@ -1355,7 +1417,6 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file, return err; } - /** * Wrapper for ioctl implementation. * @@ -1372,7 +1433,6 @@ static long dvb_ca_en50221_io_ioctl(struct file *file, return dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); } - /** * Implementation of write() syscall. * @@ -1389,6 +1449,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; + struct dvb_ca_slot *sl; u8 slot, connection_id; int status; u8 fragbuf[HOST_LINK_BUF_SIZE]; @@ -1399,7 +1460,10 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, dprintk("%s\n", __func__); - /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ + /* + * Incoming packet has a 2 byte header. + * hdr[0] = slot_id, hdr[1] = connection_id + */ if (count < 2) return -EINVAL; @@ -1410,14 +1474,15 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, return -EFAULT; buf += 2; count -= 2; + sl = &ca->slot_info[slot]; /* check if the slot is actually running */ - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING) return -EINVAL; /* fragment the packets & store in the buffer */ while (fragpos < count) { - fraglen = ca->slot_info[slot].link_buf_size - 2; + fraglen = sl->link_buf_size - 2; if (fraglen < 0) break; if (fraglen > HOST_LINK_BUF_SIZE - 2) @@ -1436,15 +1501,19 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, timeout = jiffies + HZ / 2; written = 0; while (!time_after(jiffies, timeout)) { - /* check the CAM hasn't been removed/reset in the meantime */ - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) { + /* + * check the CAM hasn't been removed/reset in the + * meantime + */ + if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING) { status = -EIO; goto exit; } - mutex_lock(&ca->slot_info[slot].slot_lock); - status = dvb_ca_en50221_write_data(ca, slot, fragbuf, fraglen + 2); - mutex_unlock(&ca->slot_info[slot].slot_lock); + mutex_lock(&sl->slot_lock); + status = dvb_ca_en50221_write_data(ca, slot, fragbuf, + fraglen + 2); + mutex_unlock(&sl->slot_lock); if (status == (fraglen + 2)) { written = 1; break; @@ -1452,7 +1521,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, if (status != -EAGAIN) goto exit; - msleep(1); + usleep_range(1000, 1100); } if (!written) { status = -EIO; @@ -1467,7 +1536,6 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, return status; } - /** * Condition for waking up in dvb_ca_en50221_io_read_condition */ @@ -1484,25 +1552,28 @@ static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, slot = ca->next_read_slot; while ((slot_count < ca->slot_count) && (!found)) { - if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + struct dvb_ca_slot *sl = &ca->slot_info[slot]; + + if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING) goto nextslot; - if (ca->slot_info[slot].rx_buffer.data == NULL) { + if (!sl->rx_buffer.data) return 0; - } - idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); + idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, -1, &fraglen); while (idx != -1) { - dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); + dvb_ringbuffer_pkt_read(&sl->rx_buffer, idx, 0, hdr, 2); if (connection_id == -1) connection_id = hdr[0]; - if ((hdr[0] == connection_id) && ((hdr[1] & 0x80) == 0)) { + if ((hdr[0] == connection_id) && + ((hdr[1] & 0x80) == 0)) { *_slot = slot; found = 1; break; } - idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); + idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, idx, + &fraglen); } nextslot: @@ -1514,7 +1585,6 @@ static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, return found; } - /** * Implementation of read() syscall. * @@ -1530,6 +1600,7 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; + struct dvb_ca_slot *sl; int status; int result = 0; u8 hdr[2]; @@ -1543,13 +1614,16 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, dprintk("%s\n", __func__); - /* Outgoing packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ + /* + * Outgoing packet has a 2 byte header. + * hdr[0] = slot_id, hdr[1] = connection_id + */ if (count < 2) return -EINVAL; /* wait for some data */ - if ((status = dvb_ca_en50221_io_read_condition(ca, &result, &slot)) == 0) { - + status = dvb_ca_en50221_io_read_condition(ca, &result, &slot); + if (status == 0) { /* if we're in nonblocking mode, exit immediately */ if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; @@ -1565,7 +1639,8 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, return status; } - idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); + sl = &ca->slot_info[slot]; + idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, -1, &fraglen); pktlen = 2; do { if (idx == -1) { @@ -1575,21 +1650,24 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, goto exit; } - dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); + dvb_ringbuffer_pkt_read(&sl->rx_buffer, idx, 0, hdr, 2); if (connection_id == -1) connection_id = hdr[0]; if (hdr[0] == connection_id) { if (pktlen < count) { - if ((pktlen + fraglen - 2) > count) { + if ((pktlen + fraglen - 2) > count) fraglen = count - pktlen; - } else { + else fraglen -= 2; - } - if ((status = dvb_ringbuffer_pkt_read_user(&ca->slot_info[slot].rx_buffer, idx, 2, - buf + pktlen, fraglen)) < 0) { + status = + dvb_ringbuffer_pkt_read_user(&sl->rx_buffer, + idx, 2, + buf + pktlen, + fraglen); + if (status < 0) goto exit; - } + pktlen += fraglen; } @@ -1598,9 +1676,9 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, dispose = 1; } - idx2 = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); + idx2 = dvb_ringbuffer_pkt_next(&sl->rx_buffer, idx, &fraglen); if (dispose) - dvb_ringbuffer_pkt_dispose(&ca->slot_info[slot].rx_buffer, idx); + dvb_ringbuffer_pkt_dispose(&sl->rx_buffer, idx); idx = idx2; dispose = 0; } while (!last_fragment); @@ -1618,7 +1696,6 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, return status; } - /** * Implementation of file open syscall. * @@ -1646,12 +1723,16 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) } for (i = 0; i < ca->slot_count; i++) { + struct dvb_ca_slot *sl = &ca->slot_info[i]; - if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) { - if (ca->slot_info[i].rx_buffer.data != NULL) { - /* it is safe to call this here without locks because - * ca->open == 0. Data is not read in this case */ - dvb_ringbuffer_flush(&ca->slot_info[i].rx_buffer); + if (sl->slot_state == DVB_CA_SLOTSTATE_RUNNING) { + if (!sl->rx_buffer.data) { + /* + * it is safe to call this here without locks + * because ca->open == 0. Data is not read in + * this case + */ + dvb_ringbuffer_flush(&sl->rx_buffer); } } } @@ -1665,7 +1746,6 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) return 0; } - /** * Implementation of file close syscall. * @@ -1695,7 +1775,6 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) return err; } - /** * Implementation of poll() syscall. * @@ -1714,9 +1793,8 @@ static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) dprintk("%s\n", __func__); - if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) mask |= POLLIN; - } /* if there is something, return now */ if (mask) @@ -1725,14 +1803,11 @@ static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) /* wait for something to happen */ poll_wait(file, &ca->wait_queue, wait); - if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) mask |= POLLIN; - } return mask; } -EXPORT_SYMBOL(dvb_ca_en50221_init); - static const struct file_operations dvb_ca_fops = { .owner = THIS_MODULE, @@ -1756,10 +1831,9 @@ static const struct dvb_device dvbdev_ca = { .fops = &dvb_ca_fops, }; -/* ******************************************************************************** */ +/* ************************************************************************** */ /* Initialisation/shutdown functions */ - /** * Initialise a new DVB CA EN50221 interface device. * @@ -1783,7 +1857,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, return -EINVAL; /* initialise the system data */ - if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) { + ca = kzalloc(sizeof(*ca), GFP_KERNEL); + if (!ca) { ret = -ENOMEM; goto exit; } @@ -1791,7 +1866,9 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, ca->pub = pubca; ca->flags = flags; ca->slot_count = slot_count; - if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) { + ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), + GFP_KERNEL); + if (!ca->slot_info) { ret = -ENOMEM; goto free_ca; } @@ -1802,17 +1879,20 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, pubca->private = ca; /* register the DVB device */ - ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA, 0); + ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, + DVB_DEVICE_CA, 0); if (ret) goto free_slot_info; /* now initialise each slot */ for (i = 0; i < slot_count; i++) { - memset(&ca->slot_info[i], 0, sizeof(struct dvb_ca_slot)); - ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE; - atomic_set(&ca->slot_info[i].camchange_count, 0); - ca->slot_info[i].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; - mutex_init(&ca->slot_info[i].slot_lock); + struct dvb_ca_slot *sl = &ca->slot_info[i]; + + memset(sl, 0, sizeof(struct dvb_ca_slot)); + sl->slot_state = DVB_CA_SLOTSTATE_NONE; + atomic_set(&sl->camchange_count, 0); + sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; + mutex_init(&sl->slot_lock); } mutex_init(&ca->ioctl_mutex); @@ -1844,9 +1924,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, pubca->private = NULL; return ret; } -EXPORT_SYMBOL(dvb_ca_en50221_release); - - +EXPORT_SYMBOL(dvb_ca_en50221_init); /** * Release a DVB CA EN50221 interface device. @@ -1864,10 +1942,11 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) /* shutdown the thread if there was one */ kthread_stop(ca->thread); - for (i = 0; i < ca->slot_count; i++) { + for (i = 0; i < ca->slot_count; i++) dvb_ca_en50221_slot_shutdown(ca, i); - } + dvb_remove_device(ca->dvbdev); dvb_ca_private_put(ca); pubca->private = NULL; } +EXPORT_SYMBOL(dvb_ca_en50221_release); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h index 82617bac0875..367687d2b41a 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.h +++ b/drivers/media/dvb-core/dvb_ca_en50221.h @@ -69,9 +69,9 @@ struct dvb_ca_en50221 { int slot, u8 address, u8 value); int (*read_data)(struct dvb_ca_en50221 *ca, - int slot, u8 *ebuf, int ecount); + int slot, u8 *ebuf, int ecount); int (*write_data)(struct dvb_ca_en50221 *ca, - int slot, u8 *ebuf, int ecount); + int slot, u8 *ebuf, int ecount); int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); @@ -128,8 +128,8 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot); * * @return 0 on success, nonzero on failure */ -extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, - struct dvb_ca_en50221 *ca, int flags, +int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221 *ca, int flags, int slot_count); /** @@ -137,6 +137,6 @@ extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, * * @ca: The associated dvb_ca instance. */ -extern void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); +void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); #endif diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index e3fff8f64d37..2fcba1616168 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -460,7 +460,7 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra static void dvb_frontend_swzigzag(struct dvb_frontend *fe) { - enum fe_status s = 0; + enum fe_status s = FE_NONE; int retval = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; @@ -631,7 +631,7 @@ static int dvb_frontend_thread(void *data) struct dvb_frontend *fe = data; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; - enum fe_status s; + enum fe_status s = FE_NONE; enum dvbfe_algo algo; bool re_tune = false; bool semheld = false; @@ -1000,6 +1000,17 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe) .buffer = b \ } +struct dtv_cmds_h { + char *name; /* A display name for debugging purposes */ + + __u32 cmd; /* A unique ID */ + + /* Flags */ + __u32 set:1; /* Either a set or get property */ + __u32 buffer:1; /* Does this property use the buffer? */ + __u32 reserved:30; /* Align */ +}; + static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { _DTV_CMD(DTV_TUNE, 1, 0), _DTV_CMD(DTV_CLEAR, 1, 0), diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 3a260b82b3e8..2631d0e0a024 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig @@ -28,6 +28,15 @@ config DVB_STV090x DVB-S/S2/DSS Multistandard Professional/Broadcast demodulators. Say Y when you want to support these frontends. +config DVB_STV0910 + tristate "STV0910 based" + depends on DVB_CORE && I2C + default m if !MEDIA_SUBDRV_AUTOSELECT + help + ST STV0910 DVB-S/S2 demodulator driver. + + Say Y when you want to support these frontends. + config DVB_STV6110x tristate "STV6110/(A) based tuners" depends on DVB_CORE && I2C @@ -35,6 +44,24 @@ config DVB_STV6110x help A Silicon tuner that supports DVB-S and DVB-S2 modes +config DVB_STV6111 + tristate "STV6111 based tuners" + depends on DVB_CORE && I2C + default m if !MEDIA_SUBDRV_AUTOSELECT + help + A Silicon tuner that supports DVB-S and DVB-S2 modes + + Say Y when you want to support these frontends. + +config DVB_MXL5XX + tristate "MaxLinear MxL5xx based tuner-demodulators" + depends on DVB_CORE && I2C + default m if !MEDIA_SUBDRV_AUTOSELECT + help + MaxLinear MxL5xx family of DVB-S/S2 tuners/demodulators. + + Say Y when you want to support these frontends. + config DVB_M88DS3103 tristate "Montage Technology M88DS3103" depends on DVB_CORE && I2C && I2C_MUX diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile index 3fccaf34ef52..f45f6a4a4371 100644 --- a/drivers/media/dvb-frontends/Makefile +++ b/drivers/media/dvb-frontends/Makefile @@ -110,6 +110,9 @@ obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o obj-$(CONFIG_DVB_DRXK) += drxk.o obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o +obj-$(CONFIG_DVB_STV0910) += stv0910.o +obj-$(CONFIG_DVB_STV6111) += stv6111.o +obj-$(CONFIG_DVB_MXL5XX) += mxl5xx.o obj-$(CONFIG_DVB_SI2165) += si2165.o obj-$(CONFIG_DVB_A8293) += a8293.o obj-$(CONFIG_DVB_SP2) += sp2.o diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 4ae3d922a8e8..1d59d1d3bd82 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -1032,7 +1032,7 @@ static u32 cx24123_tuner_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C; } -static struct i2c_algorithm cx24123_tuner_i2c_algo = { +static const struct i2c_algorithm cx24123_tuner_i2c_algo = { .master_xfer = cx24123_tuner_i2c_tuner_xfer, .functionality = cx24123_tuner_i2c_func, }; diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 12bff778c97f..48ee9bc00c06 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -487,6 +487,8 @@ static int cxd2841er_sleep_tc_to_shutdown(struct cxd2841er_priv *priv); static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv); +static int cxd2841er_sleep_tc(struct dvb_frontend *fe); + static int cxd2841er_retune_active(struct cxd2841er_priv *priv, struct dtv_frontend_properties *p) { @@ -2178,42 +2180,42 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv, u32 iffreq, ifhz; u8 data[MAX_WRITE_REGSIZE]; - const uint8_t nominalRate8bw[3][5] = { + static const uint8_t nominalRate8bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - const uint8_t nominalRate7bw[3][5] = { + static const uint8_t nominalRate7bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - const uint8_t nominalRate6bw[3][5] = { + static const uint8_t nominalRate6bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */ }; - const uint8_t nominalRate5bw[3][5] = { + static const uint8_t nominalRate5bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */ {0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */ {0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */ }; - const uint8_t nominalRate17bw[3][5] = { + static const uint8_t nominalRate17bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x58, 0xE2, 0xAF, 0xE0, 0xBC}, /* 20.5MHz XTal */ {0x68, 0x0F, 0xA2, 0x32, 0xD0}, /* 24MHz XTal */ {0x58, 0xE2, 0xAF, 0xE0, 0xBC} /* 41MHz XTal */ }; - const uint8_t itbCoef8bw[3][14] = { + static const uint8_t itbCoef8bw[3][14] = { {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, @@ -2222,7 +2224,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */ }; - const uint8_t itbCoef7bw[3][14] = { + static const uint8_t itbCoef7bw[3][14] = { {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, @@ -2231,7 +2233,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */ }; - const uint8_t itbCoef6bw[3][14] = { + static const uint8_t itbCoef6bw[3][14] = { {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, @@ -2240,7 +2242,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */ }; - const uint8_t itbCoef5bw[3][14] = { + static const uint8_t itbCoef5bw[3][14] = { {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, @@ -2249,7 +2251,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */ }; - const uint8_t itbCoef17bw[3][14] = { + static const uint8_t itbCoef17bw[3][14] = { {0x25, 0xA0, 0x36, 0x8D, 0x2E, 0x94, 0x28, 0x9B, 0x32, 0x90, 0x2C, 0x9D, 0x29, 0x99}, /* 20.5MHz XTal */ {0x33, 0x8E, 0x2B, 0x97, 0x2D, 0x95, 0x37, 0x8B, @@ -2423,32 +2425,32 @@ static int cxd2841er_sleep_tc_to_active_t_band( { u8 data[MAX_WRITE_REGSIZE]; u32 iffreq, ifhz; - u8 nominalRate8bw[3][5] = { + static const u8 nominalRate8bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - u8 nominalRate7bw[3][5] = { + static const u8 nominalRate7bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - u8 nominalRate6bw[3][5] = { + static const u8 nominalRate6bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */ }; - u8 nominalRate5bw[3][5] = { + static const u8 nominalRate5bw[3][5] = { /* TRCG Nominal Rate [37:0] */ {0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */ {0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */ {0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */ }; - u8 itbCoef8bw[3][14] = { + static const u8 itbCoef8bw[3][14] = { {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29, 0xA5, @@ -2456,7 +2458,7 @@ static int cxd2841er_sleep_tc_to_active_t_band( {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */ }; - u8 itbCoef7bw[3][14] = { + static const u8 itbCoef7bw[3][14] = { {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29, 0xA2, @@ -2464,7 +2466,7 @@ static int cxd2841er_sleep_tc_to_active_t_band( {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */ }; - u8 itbCoef6bw[3][14] = { + static const u8 itbCoef6bw[3][14] = { {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4, @@ -2472,7 +2474,7 @@ static int cxd2841er_sleep_tc_to_active_t_band( {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */ }; - u8 itbCoef5bw[3][14] = { + static const u8 itbCoef5bw[3][14] = { {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4, @@ -2652,39 +2654,39 @@ static int cxd2841er_sleep_tc_to_active_i_band( u8 data[3]; /* TRCG Nominal Rate */ - u8 nominalRate8bw[3][5] = { + static const u8 nominalRate8bw[3][5] = { {0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x11, 0xB8, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - u8 nominalRate7bw[3][5] = { + static const u8 nominalRate7bw[3][5] = { {0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x14, 0x40, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - u8 nominalRate6bw[3][5] = { + static const u8 nominalRate6bw[3][5] = { {0x14, 0x2E, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */ {0x17, 0xA0, 0x00, 0x00, 0x00}, /* 24MHz XTal */ {0x14, 0x2E, 0x00, 0x00, 0x00} /* 41MHz XTal */ }; - u8 itbCoef8bw[3][14] = { + static const u8 itbCoef8bw[3][14] = { {0x00}, /* 20.5MHz XTal */ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29, 0xA5, 0x2A, 0xAC, 0x29, 0xB5}, /* 24MHz Xtal */ {0x0}, /* 41MHz XTal */ }; - u8 itbCoef7bw[3][14] = { + static const u8 itbCoef7bw[3][14] = { {0x00}, /* 20.5MHz XTal */ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29, 0xA2, 0x2B, 0xA6, 0x2B, 0xAD}, /* 24MHz Xtal */ {0x00}, /* 41MHz XTal */ }; - u8 itbCoef6bw[3][14] = { + static const u8 itbCoef6bw[3][14] = { {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, @@ -3378,6 +3380,14 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe) if (priv->flags & CXD2841ER_EARLY_TUNE) cxd2841er_tuner_set(fe); + /* deconfigure/put demod to sleep on delsys switch if active */ + if (priv->state == STATE_ACTIVE_TC && + priv->system != p->delivery_system) { + dev_dbg(&priv->i2c->dev, "%s(): old_delsys=%d, new_delsys=%d -> sleep\n", + __func__, priv->system, p->delivery_system); + cxd2841er_sleep_tc(fe); + } + if (p->delivery_system == SYS_DVBT) { priv->system = SYS_DVBT; switch (priv->state) { @@ -3594,6 +3604,7 @@ static int cxd2841er_sleep_tc(struct dvb_frontend *fe) struct cxd2841er_priv *priv = fe->demodulator_priv; dev_dbg(&priv->i2c->dev, "%s()\n", __func__); + if (priv->state == STATE_ACTIVE_TC) { switch (priv->system) { case SYS_DVBT: @@ -3619,7 +3630,17 @@ static int cxd2841er_sleep_tc(struct dvb_frontend *fe) __func__, priv->state); return -EINVAL; } - cxd2841er_sleep_tc_to_shutdown(priv); + return 0; +} + +static int cxd2841er_shutdown_tc(struct dvb_frontend *fe) +{ + struct cxd2841er_priv *priv = fe->demodulator_priv; + + dev_dbg(&priv->i2c->dev, "%s()\n", __func__); + + if (!cxd2841er_sleep_tc(fe)) + cxd2841er_sleep_tc_to_shutdown(priv); return 0; } @@ -3968,7 +3989,7 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = { .symbol_rate_max = 11700000 }, .init = cxd2841er_init_tc, - .sleep = cxd2841er_sleep_tc, + .sleep = cxd2841er_shutdown_tc, .release = cxd2841er_release, .set_frontend = cxd2841er_set_frontend_tc, .get_frontend = cxd2841er_get_frontend, @@ -3978,6 +3999,6 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = { .get_frontend_algo = cxd2841er_get_algo }; -MODULE_DESCRIPTION("Sony CXD2841ER/CXD2854ER DVB-C/C2/T/T2/S/S2 demodulator driver"); +MODULE_DESCRIPTION("Sony CXD2837/38/41/43/54ER DVB-C/C2/T/T2/S/S2 demodulator driver"); MODULE_AUTHOR("Sergey Kozlov , Abylay Ospan "); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index 33af14df27bd..d9d730dfe0b1 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -2052,7 +2052,7 @@ int dib0090_update_tuning_table_7090(struct dvb_frontend *fe, struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = dib0090_tuning_table_cband_7090e_sensitivity; - const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = { + static const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = { { 300000, 0 , 3, 0x8165, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 650000, 0 , 4, 0x815B, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 860000, 0 , 5, 0x84EF, 0x280, 0x2d12, 0xb84e, EN_CAB }, @@ -2435,14 +2435,7 @@ static int dib0090_tune(struct dvb_frontend *fe) Den = 1; if (Rest > 0) { - if (state->config->analog_output) - lo6 |= (1 << 2) | 2; - else { - if (state->identity.in_soc) - lo6 |= (1 << 2) | 2; - else - lo6 |= (1 << 2) | 2; - } + lo6 |= (1 << 2) | 2; Den = 255; } dib0090_write_reg(state, 0x15, (u16) FBDiv); diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 1caa04d8f60f..0fbaabe43682 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -2388,7 +2388,7 @@ static u32 dib7000p_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C; } -static struct i2c_algorithm dib7090_tuner_xfer_algo = { +static const struct i2c_algorithm dib7090_tuner_xfer_algo = { .master_xfer = dib7090_tuner_xfer, .functionality = dib7000p_i2c_func, }; diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index e501ec964df1..5d9381509b07 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -1880,7 +1880,7 @@ static u32 dib8096p_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C; } -static struct i2c_algorithm dib8096p_tuner_xfer_algo = { +static const struct i2c_algorithm dib8096p_tuner_xfer_algo = { .master_xfer = dib8096p_tuner_xfer, .functionality = dib8096p_i2c_func, }; @@ -4255,23 +4255,6 @@ static int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_fronte return -ENOMEM; } -static int dib8000_remove_slave_frontend(struct dvb_frontend *fe) -{ - struct dib8000_state *state = fe->demodulator_priv; - u8 index_frontend = 1; - - while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) - index_frontend++; - if (index_frontend != 1) { - dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend-1], index_frontend-1); - state->fe[index_frontend] = NULL; - return 0; - } - - dprintk("no frontend to be removed\n"); - return -ENODEV; -} - static struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index) { struct dib8000_state *state = fe->demodulator_priv; @@ -4506,7 +4489,6 @@ void *dib8000_attach(struct dib8000_ops *ops) ops->get_slave_frontend = dib8000_get_slave_frontend; ops->set_tune_state = dib8000_set_tune_state; ops->pid_filter_ctrl = dib8000_pid_filter_ctrl; - ops->remove_slave_frontend = dib8000_remove_slave_frontend; ops->get_adc_power = dib8000_get_adc_power; ops->update_pll = dib8000_update_pll; ops->tuner_sleep = dib8096p_tuner_sleep; diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h index 2b8b4b1656a2..75cc8e47ec8f 100644 --- a/drivers/media/dvb-frontends/dib8000.h +++ b/drivers/media/dvb-frontends/dib8000.h @@ -53,7 +53,6 @@ struct dib8000_ops { enum frontend_tune_state (*get_tune_state)(struct dvb_frontend *fe); int (*set_tune_state)(struct dvb_frontend *fe, enum frontend_tune_state tune_state); int (*set_slave_frontend)(struct dvb_frontend *fe, struct dvb_frontend *fe_slave); - int (*remove_slave_frontend)(struct dvb_frontend *fe); struct dvb_frontend *(*get_slave_frontend)(struct dvb_frontend *fe, int slave_index); int (*i2c_enumeration)(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr, u8 is_dib8096p); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index c95fff4f9582..1b7a4331af05 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -1714,12 +1714,12 @@ static u32 dib9000_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C; } -static struct i2c_algorithm dib9000_tuner_algo = { +static const struct i2c_algorithm dib9000_tuner_algo = { .master_xfer = dib9000_tuner_xfer, .functionality = dib9000_i2c_func, }; -static struct i2c_algorithm dib9000_component_bus_algo = { +static const struct i2c_algorithm dib9000_component_bus_algo = { .master_xfer = dib9000_fw_component_bus_xfer, .functionality = dib9000_i2c_func, }; @@ -2462,24 +2462,6 @@ int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_ } EXPORT_SYMBOL(dib9000_set_slave_frontend); -int dib9000_remove_slave_frontend(struct dvb_frontend *fe) -{ - struct dib9000_state *state = fe->demodulator_priv; - u8 index_frontend = 1; - - while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) - index_frontend++; - if (index_frontend != 1) { - dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend - 1], index_frontend - 1); - state->fe[index_frontend] = NULL; - return 0; - } - - dprintk("no frontend to be removed\n"); - return -ENODEV; -} -EXPORT_SYMBOL(dib9000_remove_slave_frontend); - struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index) { struct dib9000_state *state = fe->demodulator_priv; diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h index b10a70aa7c9f..40883b41e66b 100644 --- a/drivers/media/dvb-frontends/dib9000.h +++ b/drivers/media/dvb-frontends/dib9000.h @@ -37,7 +37,6 @@ extern int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff); extern int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff); extern int dib9000_firmware_post_pll_init(struct dvb_frontend *fe); extern int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave); -extern int dib9000_remove_slave_frontend(struct dvb_frontend *fe); extern struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index); extern struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe); extern int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c); @@ -97,12 +96,6 @@ static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb return -ENODEV; } -static inline int dib9000_remove_slave_frontend(struct dvb_frontend *fe) -{ - printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); - return -ENODEV; -} - static inline struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 14040c915dbb..499ccff557bf 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -5489,7 +5489,7 @@ static int set_vsb_leak_n_gain(struct drx_demod_instance *demod) struct i2c_device_addr *dev_addr = NULL; int rc; - const u8 vsb_ffe_leak_gain_ram0[] = { + static const u8 vsb_ffe_leak_gain_ram0[] = { DRXJ_16TO8(0x8), /* FFETRAINLKRATIO1 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO2 */ DRXJ_16TO8(0x8), /* FFETRAINLKRATIO3 */ @@ -5620,7 +5620,7 @@ static int set_vsb_leak_n_gain(struct drx_demod_instance *demod) DRXJ_16TO8(0x1010) /* FIRRCA1GAIN8 */ }; - const u8 vsb_ffe_leak_gain_ram1[] = { + static const u8 vsb_ffe_leak_gain_ram1[] = { DRXJ_16TO8(0x1010), /* FIRRCA1GAIN9 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN10 */ DRXJ_16TO8(0x0808), /* FIRRCA1GAIN11 */ @@ -5710,7 +5710,7 @@ static int set_vsb(struct drx_demod_instance *demod) struct drxj_data *ext_attr = NULL; u16 cmd_result = 0; u16 cmd_param = 0; - const u8 vsb_taps_re[] = { + static const u8 vsb_taps_re[] = { DRXJ_16TO8(-2), /* re0 */ DRXJ_16TO8(4), /* re1 */ DRXJ_16TO8(1), /* re2 */ @@ -6666,7 +6666,7 @@ static int set_qam16(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; - const u8 qam_dq_qual_fun[] = { + static const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(2), /* fun0 */ DRXJ_16TO8(2), /* fun1 */ DRXJ_16TO8(2), /* fun2 */ @@ -6674,7 +6674,7 @@ static int set_qam16(struct drx_demod_instance *demod) DRXJ_16TO8(3), /* fun4 */ DRXJ_16TO8(3), /* fun5 */ }; - const u8 qam_eq_cma_rad[] = { + static const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(13517), /* RAD0 */ DRXJ_16TO8(13517), /* RAD1 */ DRXJ_16TO8(13517), /* RAD2 */ @@ -6901,7 +6901,7 @@ static int set_qam32(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; - const u8 qam_dq_qual_fun[] = { + static const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(3), /* fun0 */ DRXJ_16TO8(3), /* fun1 */ DRXJ_16TO8(3), /* fun2 */ @@ -6909,7 +6909,7 @@ static int set_qam32(struct drx_demod_instance *demod) DRXJ_16TO8(4), /* fun4 */ DRXJ_16TO8(4), /* fun5 */ }; - const u8 qam_eq_cma_rad[] = { + static const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(6707), /* RAD0 */ DRXJ_16TO8(6707), /* RAD1 */ DRXJ_16TO8(6707), /* RAD2 */ @@ -7136,7 +7136,8 @@ static int set_qam64(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; - const u8 qam_dq_qual_fun[] = { /* this is hw reset value. no necessary to re-write */ + static const u8 qam_dq_qual_fun[] = { + /* this is hw reset value. no necessary to re-write */ DRXJ_16TO8(4), /* fun0 */ DRXJ_16TO8(4), /* fun1 */ DRXJ_16TO8(4), /* fun2 */ @@ -7144,7 +7145,7 @@ static int set_qam64(struct drx_demod_instance *demod) DRXJ_16TO8(6), /* fun4 */ DRXJ_16TO8(6), /* fun5 */ }; - const u8 qam_eq_cma_rad[] = { + static const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(13336), /* RAD0 */ DRXJ_16TO8(12618), /* RAD1 */ DRXJ_16TO8(11988), /* RAD2 */ @@ -7371,7 +7372,7 @@ static int set_qam128(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; - const u8 qam_dq_qual_fun[] = { + static const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(6), /* fun0 */ DRXJ_16TO8(6), /* fun1 */ DRXJ_16TO8(6), /* fun2 */ @@ -7379,7 +7380,7 @@ static int set_qam128(struct drx_demod_instance *demod) DRXJ_16TO8(9), /* fun4 */ DRXJ_16TO8(9), /* fun5 */ }; - const u8 qam_eq_cma_rad[] = { + static const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(6164), /* RAD0 */ DRXJ_16TO8(6598), /* RAD1 */ DRXJ_16TO8(6394), /* RAD2 */ @@ -7606,7 +7607,7 @@ static int set_qam256(struct drx_demod_instance *demod) { struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr; int rc; - const u8 qam_dq_qual_fun[] = { + static const u8 qam_dq_qual_fun[] = { DRXJ_16TO8(8), /* fun0 */ DRXJ_16TO8(8), /* fun1 */ DRXJ_16TO8(8), /* fun2 */ @@ -7614,7 +7615,7 @@ static int set_qam256(struct drx_demod_instance *demod) DRXJ_16TO8(12), /* fun4 */ DRXJ_16TO8(12), /* fun5 */ }; - const u8 qam_eq_cma_rad[] = { + static const u8 qam_eq_cma_rad[] = { DRXJ_16TO8(12345), /* RAD0 */ DRXJ_16TO8(12345), /* RAD1 */ DRXJ_16TO8(13626), /* RAD2 */ @@ -7862,7 +7863,7 @@ set_qam(struct drx_demod_instance *demod, /* parameter */ NULL, /* result */ NULL }; - const u8 qam_a_taps[] = { + static const u8 qam_a_taps[] = { DRXJ_16TO8(-1), /* re0 */ DRXJ_16TO8(1), /* re1 */ DRXJ_16TO8(1), /* re2 */ @@ -7892,7 +7893,7 @@ set_qam(struct drx_demod_instance *demod, DRXJ_16TO8(-40), /* re26 */ DRXJ_16TO8(619) /* re27 */ }; - const u8 qam_b64_taps[] = { + static const u8 qam_b64_taps[] = { DRXJ_16TO8(0), /* re0 */ DRXJ_16TO8(-2), /* re1 */ DRXJ_16TO8(1), /* re2 */ @@ -7922,7 +7923,7 @@ set_qam(struct drx_demod_instance *demod, DRXJ_16TO8(-46), /* re26 */ DRXJ_16TO8(614) /* re27 */ }; - const u8 qam_b256_taps[] = { + static const u8 qam_b256_taps[] = { DRXJ_16TO8(-2), /* re0 */ DRXJ_16TO8(4), /* re1 */ DRXJ_16TO8(1), /* re2 */ @@ -7952,7 +7953,7 @@ set_qam(struct drx_demod_instance *demod, DRXJ_16TO8(-32), /* re26 */ DRXJ_16TO8(628) /* re27 */ }; - const u8 qam_c_taps[] = { + static const u8 qam_c_taps[] = { DRXJ_16TO8(-3), /* re0 */ DRXJ_16TO8(3), /* re1 */ DRXJ_16TO8(2), /* re2 */ diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index 17638e08835a..7d04400b18dd 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -638,8 +638,10 @@ static int SetCfgIfAgc(struct drxd_state *state, struct SCfgAgc *cfg) /* == Speed == */ { const u16 maxRur = 8; - const u16 slowIncrDecLUT[] = { 3, 4, 4, 5, 6 }; - const u16 fastIncrDecLUT[] = { 14, 15, 15, 16, + static const u16 slowIncrDecLUT[] = { + 3, 4, 4, 5, 6 }; + const u16 fastIncrDecLUT[] = { + 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c index 838b42771a05..3f3487887672 100644 --- a/drivers/media/dvb-frontends/isl6421.c +++ b/drivers/media/dvb-frontends/isl6421.c @@ -38,35 +38,101 @@ struct isl6421 { u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; + bool is_off; }; static int isl6421_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { + int ret; + u8 buf; + bool is_off; struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; - struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, - .buf = &isl6421->config, - .len = sizeof(isl6421->config) }; + struct i2c_msg msg[2] = { + { + .addr = isl6421->i2c_addr, + .flags = 0, + .buf = &isl6421->config, + .len = 1, + }, { + .addr = isl6421->i2c_addr, + .flags = I2C_M_RD, + .buf = &buf, + .len = 1, + } + + }; isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1); switch(voltage) { case SEC_VOLTAGE_OFF: + is_off = true; break; case SEC_VOLTAGE_13: + is_off = false; isl6421->config |= ISL6421_EN1; break; case SEC_VOLTAGE_18: + is_off = false; isl6421->config |= (ISL6421_EN1 | ISL6421_VSEL1); break; default: return -EINVAL; } + /* + * If LNBf were not powered on, disable dynamic current limit, as, + * according with datasheet, highly capacitive load on the output may + * cause a difficult start-up. + */ + if (isl6421->is_off && !is_off) + isl6421->config |= ISL6421_DCL; + isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; - return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; + ret = i2c_transfer(isl6421->i2c, msg, 2); + if (ret < 0) + return ret; + if (ret != 2) + return -EIO; + + /* Store off status now incase future commands fail */ + isl6421->is_off = is_off; + + /* On overflow, the device will try again after 900 ms (typically) */ + if (!is_off && (buf & ISL6421_OLF1)) + msleep(1000); + + /* Re-enable dynamic current limit */ + if ((isl6421->config & ISL6421_DCL) && + !(isl6421->override_or & ISL6421_DCL)) { + isl6421->config &= ~ISL6421_DCL; + + ret = i2c_transfer(isl6421->i2c, msg, 2); + if (ret < 0) + return ret; + if (ret != 2) + return -EIO; + } + + /* Check if overload flag is active. If so, disable power */ + if (!is_off && (buf & ISL6421_OLF1)) { + isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1); + ret = i2c_transfer(isl6421->i2c, msg, 1); + if (ret < 0) + return ret; + if (ret != 1) + return -EIO; + isl6421->is_off = true; + + dev_warn(&isl6421->i2c->dev, + "Overload current detected. disabling LNBf power\n"); + return -EINVAL; + } + + return 0; } static int isl6421_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) @@ -148,6 +214,8 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter return NULL; } + isl6421->is_off = true; + /* install release callback */ fe->ops.release_sec = isl6421_release; diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c index 9bb122c39c1b..dfe322eccaa1 100644 --- a/drivers/media/dvb-frontends/mb86a16.c +++ b/drivers/media/dvb-frontends/mb86a16.c @@ -415,27 +415,21 @@ static int signal_det(struct mb86a16_state *state, int smrt, unsigned char *SIG) { - - int ret ; - int smrtd ; - int wait_sym ; - - u32 wait_t; - unsigned char S[3] ; - int i ; + int ret; + int smrtd; + unsigned char S[3]; + int i; if (*SIG > 45) { if (CNTM_set(state, 2, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } - wait_sym = 40000; } else { if (CNTM_set(state, 3, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } - wait_sym = 80000; } for (i = 0; i < 3; i++) { if (i == 0) @@ -447,22 +441,17 @@ static int signal_det(struct mb86a16_state *state, smrt_info_get(state, smrtd); smrt_set(state, smrtd); srst(state); - wait_t = (wait_sym + 99 * smrtd / 100) / smrtd; - if (wait_t == 0) - wait_t = 1; msleep_interruptible(10); if (mb86a16_read(state, 0x37, &(S[i])) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } } - if ((S[1] > S[0] * 112 / 100) && - (S[1] > S[2] * 112 / 100)) { - + if ((S[1] > S[0] * 112 / 100) && (S[1] > S[2] * 112 / 100)) ret = 1; - } else { + else ret = 0; - } + *SIG = S[1]; if (CNTM_set(state, 0, 1, 2) < 0) { diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c index f6938f9607ac..5e8fd63832e9 100644 --- a/drivers/media/dvb-frontends/mn88472.c +++ b/drivers/media/dvb-frontends/mn88472.c @@ -377,7 +377,9 @@ static int mn88472_set_frontend(struct dvb_frontend *fe) ret = regmap_write(dev->regmap[1], 0xf6, 0x05); if (ret) goto err; - ret = regmap_write(dev->regmap[2], 0x32, c->stream_id); + ret = regmap_write(dev->regmap[2], 0x32, + (c->stream_id == NO_STREAM_ID_FILTER) ? 0 : + c->stream_id ); if (ret) goto err; break; diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c index 15874244fd8b..58247432a628 100644 --- a/drivers/media/dvb-frontends/mn88473.c +++ b/drivers/media/dvb-frontends/mn88473.c @@ -225,7 +225,9 @@ static int mn88473_set_frontend(struct dvb_frontend *fe) /* PLP */ if (c->delivery_system == SYS_DVBT2) { - ret = regmap_write(dev->regmap[2], 0x36, c->stream_id); + ret = regmap_write(dev->regmap[2], 0x36, + (c->stream_id == NO_STREAM_ID_FILTER) ? 0 : + c->stream_id ); if (ret) goto err; } diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c new file mode 100644 index 000000000000..676c96c216c3 --- /dev/null +++ b/drivers/media/dvb-frontends/mxl5xx.c @@ -0,0 +1,1873 @@ +/* + * Driver for the MaxLinear MxL5xx family of tuners/demods + * + * Copyright (C) 2014-2015 Ralph Metzler + * Marcus Metzler + * developed for Digital Devices GmbH + * + * based on code: + * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved + * which was released under GPL V2 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dvb_frontend.h" +#include "mxl5xx.h" +#include "mxl5xx_regs.h" +#include "mxl5xx_defs.h" + +#define BYTE0(v) ((v >> 0) & 0xff) +#define BYTE1(v) ((v >> 8) & 0xff) +#define BYTE2(v) ((v >> 16) & 0xff) +#define BYTE3(v) ((v >> 24) & 0xff) + +LIST_HEAD(mxllist); + +struct mxl_base { + struct list_head mxllist; + struct list_head mxls; + + u8 adr; + struct i2c_adapter *i2c; + + u32 count; + u32 type; + u32 sku_type; + u32 chipversion; + u32 clock; + u32 fwversion; + + u8 *ts_map; + u8 can_clkout; + u8 chan_bond; + u8 demod_num; + u8 tuner_num; + + unsigned long next_tune; + + struct mutex i2c_lock; + struct mutex status_lock; + struct mutex tune_lock; + + u8 buf[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN]; + + u32 cmd_size; + u8 cmd_data[MAX_CMD_DATA]; +}; + +struct mxl { + struct list_head mxl; + + struct mxl_base *base; + struct dvb_frontend fe; + struct device *i2cdev; + u32 demod; + u32 tuner; + u32 tuner_in_use; + u8 xbar[3]; + + unsigned long tune_time; +}; + +static void convert_endian(u8 flag, u32 size, u8 *d) +{ + u32 i; + + if (!flag) + return; + for (i = 0; i < (size & ~3); i += 4) { + d[i + 0] ^= d[i + 3]; + d[i + 3] ^= d[i + 0]; + d[i + 0] ^= d[i + 3]; + + d[i + 1] ^= d[i + 2]; + d[i + 2] ^= d[i + 1]; + d[i + 1] ^= d[i + 2]; + } + + switch (size & 3) { + case 0: + case 1: + /* do nothing */ + break; + case 2: + d[i + 0] ^= d[i + 1]; + d[i + 1] ^= d[i + 0]; + d[i + 0] ^= d[i + 1]; + break; + + case 3: + d[i + 0] ^= d[i + 2]; + d[i + 2] ^= d[i + 0]; + d[i + 0] ^= d[i + 2]; + break; + } + +} + +static int i2c_write(struct i2c_adapter *adap, u8 adr, + u8 *data, u32 len) +{ + struct i2c_msg msg = {.addr = adr, .flags = 0, + .buf = data, .len = len}; + + return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1; +} + +static int i2c_read(struct i2c_adapter *adap, u8 adr, + u8 *data, u32 len) +{ + struct i2c_msg msg = {.addr = adr, .flags = I2C_M_RD, + .buf = data, .len = len}; + + return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1; +} + +static int i2cread(struct mxl *state, u8 *data, int len) +{ + return i2c_read(state->base->i2c, state->base->adr, data, len); +} + +static int i2cwrite(struct mxl *state, u8 *data, int len) +{ + return i2c_write(state->base->i2c, state->base->adr, data, len); +} + +static int read_register_unlocked(struct mxl *state, u32 reg, u32 *val) +{ + int stat; + u8 data[MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE] = { + MXL_HYDRA_PLID_REG_READ, 0x04, + GET_BYTE(reg, 0), GET_BYTE(reg, 1), + GET_BYTE(reg, 2), GET_BYTE(reg, 3), + }; + + stat = i2cwrite(state, data, + MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE); + if (stat) + dev_err(state->i2cdev, "i2c read error 1\n"); + if (!stat) + stat = i2cread(state, (u8 *) val, + MXL_HYDRA_REG_SIZE_IN_BYTES); + le32_to_cpus(val); + if (stat) + dev_err(state->i2cdev, "i2c read error 2\n"); + return stat; +} + +#define DMA_I2C_INTERRUPT_ADDR 0x8000011C +#define DMA_INTR_PROT_WR_CMP 0x08 + +static int send_command(struct mxl *state, u32 size, u8 *buf) +{ + int stat; + u32 val, count = 10; + + mutex_lock(&state->base->i2c_lock); + if (state->base->fwversion > 0x02010109) { + read_register_unlocked(state, DMA_I2C_INTERRUPT_ADDR, &val); + if (DMA_INTR_PROT_WR_CMP & val) + dev_info(state->i2cdev, "%s busy\n", __func__); + while ((DMA_INTR_PROT_WR_CMP & val) && --count) { + mutex_unlock(&state->base->i2c_lock); + usleep_range(1000, 2000); + mutex_lock(&state->base->i2c_lock); + read_register_unlocked(state, DMA_I2C_INTERRUPT_ADDR, + &val); + } + if (!count) { + dev_info(state->i2cdev, "%s busy\n", __func__); + mutex_unlock(&state->base->i2c_lock); + return -EBUSY; + } + } + stat = i2cwrite(state, buf, size); + mutex_unlock(&state->base->i2c_lock); + return stat; +} + +static int write_register(struct mxl *state, u32 reg, u32 val) +{ + int stat; + u8 data[MXL_HYDRA_REG_WRITE_LEN] = { + MXL_HYDRA_PLID_REG_WRITE, 0x08, + BYTE0(reg), BYTE1(reg), BYTE2(reg), BYTE3(reg), + BYTE0(val), BYTE1(val), BYTE2(val), BYTE3(val), + }; + mutex_lock(&state->base->i2c_lock); + stat = i2cwrite(state, data, sizeof(data)); + mutex_unlock(&state->base->i2c_lock); + if (stat) + dev_err(state->i2cdev, "i2c write error\n"); + return stat; +} + +static int write_firmware_block(struct mxl *state, + u32 reg, u32 size, u8 *reg_data_ptr) +{ + int stat; + u8 *buf = state->base->buf; + + mutex_lock(&state->base->i2c_lock); + buf[0] = MXL_HYDRA_PLID_REG_WRITE; + buf[1] = size + 4; + buf[2] = GET_BYTE(reg, 0); + buf[3] = GET_BYTE(reg, 1); + buf[4] = GET_BYTE(reg, 2); + buf[5] = GET_BYTE(reg, 3); + memcpy(&buf[6], reg_data_ptr, size); + stat = i2cwrite(state, buf, + MXL_HYDRA_I2C_HDR_SIZE + + MXL_HYDRA_REG_SIZE_IN_BYTES + size); + mutex_unlock(&state->base->i2c_lock); + if (stat) + dev_err(state->i2cdev, "fw block write failed\n"); + return stat; +} + +static int read_register(struct mxl *state, u32 reg, u32 *val) +{ + int stat; + u8 data[MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE] = { + MXL_HYDRA_PLID_REG_READ, 0x04, + GET_BYTE(reg, 0), GET_BYTE(reg, 1), + GET_BYTE(reg, 2), GET_BYTE(reg, 3), + }; + + mutex_lock(&state->base->i2c_lock); + stat = i2cwrite(state, data, + MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE); + if (stat) + dev_err(state->i2cdev, "i2c read error 1\n"); + if (!stat) + stat = i2cread(state, (u8 *) val, + MXL_HYDRA_REG_SIZE_IN_BYTES); + mutex_unlock(&state->base->i2c_lock); + le32_to_cpus(val); + if (stat) + dev_err(state->i2cdev, "i2c read error 2\n"); + return stat; +} + +static int read_register_block(struct mxl *state, u32 reg, u32 size, u8 *data) +{ + int stat; + u8 *buf = state->base->buf; + + mutex_lock(&state->base->i2c_lock); + + buf[0] = MXL_HYDRA_PLID_REG_READ; + buf[1] = size + 4; + buf[2] = GET_BYTE(reg, 0); + buf[3] = GET_BYTE(reg, 1); + buf[4] = GET_BYTE(reg, 2); + buf[5] = GET_BYTE(reg, 3); + stat = i2cwrite(state, buf, + MXL_HYDRA_I2C_HDR_SIZE + MXL_HYDRA_REG_SIZE_IN_BYTES); + if (!stat) { + stat = i2cread(state, data, size); + convert_endian(MXL_ENABLE_BIG_ENDIAN, size, data); + } + mutex_unlock(&state->base->i2c_lock); + return stat; +} + +static int read_by_mnemonic(struct mxl *state, + u32 reg, u8 lsbloc, u8 numofbits, u32 *val) +{ + u32 data = 0, mask = 0; + int stat; + + stat = read_register(state, reg, &data); + if (stat) + return stat; + mask = MXL_GET_REG_MASK_32(lsbloc, numofbits); + data &= mask; + data >>= lsbloc; + *val = data; + return 0; +} + + +static int update_by_mnemonic(struct mxl *state, + u32 reg, u8 lsbloc, u8 numofbits, u32 val) +{ + u32 data, mask; + int stat; + + stat = read_register(state, reg, &data); + if (stat) + return stat; + mask = MXL_GET_REG_MASK_32(lsbloc, numofbits); + data = (data & ~mask) | ((val << lsbloc) & mask); + stat = write_register(state, reg, data); + return stat; +} + +static int firmware_is_alive(struct mxl *state) +{ + u32 hb0, hb1; + + if (read_register(state, HYDRA_HEAR_BEAT, &hb0)) + return 0; + msleep(20); + if (read_register(state, HYDRA_HEAR_BEAT, &hb1)) + return 0; + if (hb1 == hb0) + return 0; + return 1; +} + +static int init(struct dvb_frontend *fe) +{ + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + + /* init fe stats */ + p->strength.len = 1; + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->cnr.len = 1; + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_error.len = 1; + p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_count.len = 1; + p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->post_bit_error.len = 1; + p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->post_bit_count.len = 1; + p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + return 0; +} + +static void release(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + + list_del(&state->mxl); + /* Release one frontend, two more shall take its place! */ + state->base->count--; + if (state->base->count == 0) { + list_del(&state->base->mxllist); + kfree(state->base); + } + kfree(state); +} + +static int get_algo(struct dvb_frontend *fe) +{ + return DVBFE_ALGO_HW; +} + +static int cfg_demod_abort_tune(struct mxl *state) +{ + struct MXL_HYDRA_DEMOD_ABORT_TUNE_T abort_tune_cmd; + u8 cmd_size = sizeof(abort_tune_cmd); + u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN]; + + abort_tune_cmd.demod_id = state->demod; + BUILD_HYDRA_CMD(MXL_HYDRA_ABORT_TUNE_CMD, MXL_CMD_WRITE, + cmd_size, &abort_tune_cmd, cmd_buff); + return send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE, + &cmd_buff[0]); +} + +static int send_master_cmd(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *cmd) +{ + /*struct mxl *state = fe->demodulator_priv;*/ + + return 0; /*CfgDemodAbortTune(state);*/ +} + +static int set_parameters(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + struct MXL_HYDRA_DEMOD_PARAM_T demod_chan_cfg; + u8 cmd_size = sizeof(demod_chan_cfg); + u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN]; + u32 srange = 10; + int stat; + + if (p->frequency < 950000 || p->frequency > 2150000) + return -EINVAL; + if (p->symbol_rate < 1000000 || p->symbol_rate > 45000000) + return -EINVAL; + + /* CfgDemodAbortTune(state); */ + + switch (p->delivery_system) { + case SYS_DSS: + demod_chan_cfg.standard = MXL_HYDRA_DSS; + demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_AUTO; + break; + case SYS_DVBS: + srange = p->symbol_rate / 1000000; + if (srange > 10) + srange = 10; + demod_chan_cfg.standard = MXL_HYDRA_DVBS; + demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_0_35; + demod_chan_cfg.modulation_scheme = MXL_HYDRA_MOD_QPSK; + demod_chan_cfg.pilots = MXL_HYDRA_PILOTS_OFF; + break; + case SYS_DVBS2: + demod_chan_cfg.standard = MXL_HYDRA_DVBS2; + demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_AUTO; + demod_chan_cfg.modulation_scheme = MXL_HYDRA_MOD_AUTO; + demod_chan_cfg.pilots = MXL_HYDRA_PILOTS_AUTO; + /* cfg_scrambler(state); */ + break; + default: + return -EINVAL; + } + demod_chan_cfg.tuner_index = state->tuner; + demod_chan_cfg.demod_index = state->demod; + demod_chan_cfg.frequency_in_hz = p->frequency * 1000; + demod_chan_cfg.symbol_rate_in_hz = p->symbol_rate; + demod_chan_cfg.max_carrier_offset_in_mhz = srange; + demod_chan_cfg.spectrum_inversion = MXL_HYDRA_SPECTRUM_AUTO; + demod_chan_cfg.fec_code_rate = MXL_HYDRA_FEC_AUTO; + + mutex_lock(&state->base->tune_lock); + if (time_after(jiffies + msecs_to_jiffies(200), + state->base->next_tune)) + while (time_before(jiffies, state->base->next_tune)) + usleep_range(10000, 11000); + state->base->next_tune = jiffies + msecs_to_jiffies(100); + state->tuner_in_use = state->tuner; + BUILD_HYDRA_CMD(MXL_HYDRA_DEMOD_SET_PARAM_CMD, MXL_CMD_WRITE, + cmd_size, &demod_chan_cfg, cmd_buff); + stat = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE, + &cmd_buff[0]); + mutex_unlock(&state->base->tune_lock); + return stat; +} + +static int enable_tuner(struct mxl *state, u32 tuner, u32 enable); + +static int sleep(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + struct mxl *p; + + cfg_demod_abort_tune(state); + if (state->tuner_in_use != 0xffffffff) { + mutex_lock(&state->base->tune_lock); + state->tuner_in_use = 0xffffffff; + list_for_each_entry(p, &state->base->mxls, mxl) { + if (p->tuner_in_use == state->tuner) + break; + } + if (&p->mxl == &state->base->mxls) + enable_tuner(state, state->tuner, 0); + mutex_unlock(&state->base->tune_lock); + } + return 0; +} + +static int read_snr(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + int stat; + u32 reg_data = 0; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + + mutex_lock(&state->base->status_lock); + HYDRA_DEMOD_STATUS_LOCK(state, state->demod); + stat = read_register(state, (HYDRA_DMD_SNR_ADDR_OFFSET + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + ®_data); + HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod); + mutex_unlock(&state->base->status_lock); + + p->cnr.stat[0].scale = FE_SCALE_DECIBEL; + p->cnr.stat[0].svalue = (s16)reg_data * 10; + + return stat; +} + +static int read_ber(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 reg[8]; + + mutex_lock(&state->base->status_lock); + HYDRA_DEMOD_STATUS_LOCK(state, state->demod); + read_register_block(state, + (HYDRA_DMD_DVBS_1ST_CORR_RS_ERRORS_ADDR_OFFSET + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + (4 * sizeof(u32)), + (u8 *) ®[0]); + HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod); + + switch (p->delivery_system) { + case SYS_DSS: + case SYS_DVBS: + p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; + p->pre_bit_error.stat[0].uvalue = reg[2]; + p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; + p->pre_bit_count.stat[0].uvalue = reg[3]; + break; + default: + break; + } + + read_register_block(state, + (HYDRA_DMD_DVBS2_CRC_ERRORS_ADDR_OFFSET + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + (7 * sizeof(u32)), + (u8 *) ®[0]); + + switch (p->delivery_system) { + case SYS_DSS: + case SYS_DVBS: + p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + p->post_bit_error.stat[0].uvalue = reg[5]; + p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + p->post_bit_count.stat[0].uvalue = reg[6]; + break; + case SYS_DVBS2: + p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + p->post_bit_error.stat[0].uvalue = reg[1]; + p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + p->post_bit_count.stat[0].uvalue = reg[2]; + break; + default: + break; + } + + mutex_unlock(&state->base->status_lock); + + return 0; +} + +static int read_signal_strength(struct dvb_frontend *fe) +{ + struct mxl *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + int stat; + u32 reg_data = 0; + + mutex_lock(&state->base->status_lock); + HYDRA_DEMOD_STATUS_LOCK(state, state->demod); + stat = read_register(state, (HYDRA_DMD_STATUS_INPUT_POWER_ADDR + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + ®_data); + HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod); + mutex_unlock(&state->base->status_lock); + + p->strength.stat[0].scale = FE_SCALE_DECIBEL; + p->strength.stat[0].svalue = (s16) reg_data * 10; /* fix scale */ + + return stat; +} + +static int read_status(struct dvb_frontend *fe, enum fe_status *status) +{ + struct mxl *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 reg_data = 0; + + mutex_lock(&state->base->status_lock); + HYDRA_DEMOD_STATUS_LOCK(state, state->demod); + read_register(state, (HYDRA_DMD_LOCK_STATUS_ADDR_OFFSET + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + ®_data); + HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod); + mutex_unlock(&state->base->status_lock); + + *status = (reg_data == 1) ? 0x1f : 0; + + /* signal statistics */ + + /* signal strength is always available */ + read_signal_strength(fe); + + if (*status & FE_HAS_CARRIER) + read_snr(fe); + else + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + if (*status & FE_HAS_SYNC) + read_ber(fe); + else { + p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + } + + return 0; +} + +static int tune(struct dvb_frontend *fe, bool re_tune, + unsigned int mode_flags, + unsigned int *delay, enum fe_status *status) +{ + struct mxl *state = fe->demodulator_priv; + int r = 0; + + *delay = HZ / 2; + if (re_tune) { + r = set_parameters(fe); + if (r) + return r; + state->tune_time = jiffies; + return 0; + } + if (*status & FE_HAS_LOCK) + return 0; + + r = read_status(fe, status); + if (r) + return r; + + return 0; +} + +static enum fe_code_rate conv_fec(enum MXL_HYDRA_FEC_E fec) +{ + enum fe_code_rate fec2fec[11] = { + FEC_NONE, FEC_1_2, FEC_3_5, FEC_2_3, + FEC_3_4, FEC_4_5, FEC_5_6, FEC_6_7, + FEC_7_8, FEC_8_9, FEC_9_10 + }; + + if (fec > MXL_HYDRA_FEC_9_10) + return FEC_NONE; + return fec2fec[fec]; +} + +static int get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) +{ + struct mxl *state = fe->demodulator_priv; + u32 reg_data[MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE]; + u32 freq; + + mutex_lock(&state->base->status_lock); + HYDRA_DEMOD_STATUS_LOCK(state, state->demod); + read_register_block(state, + (HYDRA_DMD_STANDARD_ADDR_OFFSET + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + (MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE * 4), /* 25 * 4 bytes */ + (u8 *) ®_data[0]); + /* read demod channel parameters */ + read_register_block(state, + (HYDRA_DMD_STATUS_CENTER_FREQ_IN_KHZ_ADDR + + HYDRA_DMD_STATUS_OFFSET(state->demod)), + (4), /* 4 bytes */ + (u8 *) &freq); + HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod); + mutex_unlock(&state->base->status_lock); + + dev_dbg(state->i2cdev, "freq=%u delsys=%u srate=%u\n", + freq * 1000, reg_data[DMD_STANDARD_ADDR], + reg_data[DMD_SYMBOL_RATE_ADDR]); + p->symbol_rate = reg_data[DMD_SYMBOL_RATE_ADDR]; + p->frequency = freq; + /* + * p->delivery_system = + * (MXL_HYDRA_BCAST_STD_E) regData[DMD_STANDARD_ADDR]; + * p->inversion = + * (MXL_HYDRA_SPECTRUM_E) regData[DMD_SPECTRUM_INVERSION_ADDR]; + * freqSearchRangeKHz = + * (regData[DMD_FREQ_SEARCH_RANGE_IN_KHZ_ADDR]); + */ + + p->fec_inner = conv_fec(reg_data[DMD_FEC_CODE_RATE_ADDR]); + switch (p->delivery_system) { + case SYS_DSS: + break; + case SYS_DVBS2: + switch ((enum MXL_HYDRA_PILOTS_E) + reg_data[DMD_DVBS2_PILOT_ON_OFF_ADDR]) { + case MXL_HYDRA_PILOTS_OFF: + p->pilot = PILOT_OFF; + break; + case MXL_HYDRA_PILOTS_ON: + p->pilot = PILOT_ON; + break; + default: + break; + } + case SYS_DVBS: + switch ((enum MXL_HYDRA_MODULATION_E) + reg_data[DMD_MODULATION_SCHEME_ADDR]) { + case MXL_HYDRA_MOD_QPSK: + p->modulation = QPSK; + break; + case MXL_HYDRA_MOD_8PSK: + p->modulation = PSK_8; + break; + default: + break; + } + switch ((enum MXL_HYDRA_ROLLOFF_E) + reg_data[DMD_SPECTRUM_ROLL_OFF_ADDR]) { + case MXL_HYDRA_ROLLOFF_0_20: + p->rolloff = ROLLOFF_20; + break; + case MXL_HYDRA_ROLLOFF_0_35: + p->rolloff = ROLLOFF_35; + break; + case MXL_HYDRA_ROLLOFF_0_25: + p->rolloff = ROLLOFF_25; + break; + default: + break; + } + break; + default: + return -EINVAL; + } + return 0; +} + +static int set_input(struct dvb_frontend *fe, int input) +{ + struct mxl *state = fe->demodulator_priv; + + state->tuner = input; + return 0; +} + +static struct dvb_frontend_ops mxl_ops = { + .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS }, + .info = { + .name = "MaxLinear MxL5xx DVB-S/S2 tuner-demodulator", + .frequency_min = 300000, + .frequency_max = 2350000, + .frequency_stepsize = 0, + .frequency_tolerance = 0, + .symbol_rate_min = 1000000, + .symbol_rate_max = 45000000, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_AUTO | + FE_CAN_QPSK | + FE_CAN_2G_MODULATION + }, + .init = init, + .release = release, + .get_frontend_algo = get_algo, + .tune = tune, + .read_status = read_status, + .sleep = sleep, + .get_frontend = get_frontend, + .diseqc_send_master_cmd = send_master_cmd, +}; + +static struct mxl_base *match_base(struct i2c_adapter *i2c, u8 adr) +{ + struct mxl_base *p; + + list_for_each_entry(p, &mxllist, mxllist) + if (p->i2c == i2c && p->adr == adr) + return p; + return NULL; +} + +static void cfg_dev_xtal(struct mxl *state, u32 freq, u32 cap, u32 enable) +{ + if (state->base->can_clkout || !enable) + update_by_mnemonic(state, 0x90200054, 23, 1, enable); + + if (freq == 24000000) + write_register(state, HYDRA_CRYSTAL_SETTING, 0); + else + write_register(state, HYDRA_CRYSTAL_SETTING, 1); + + write_register(state, HYDRA_CRYSTAL_CAP, cap); +} + +static u32 get_big_endian(u8 num_of_bits, const u8 buf[]) +{ + u32 ret_value = 0; + + switch (num_of_bits) { + case 24: + ret_value = (((u32) buf[0]) << 16) | + (((u32) buf[1]) << 8) | buf[2]; + break; + case 32: + ret_value = (((u32) buf[0]) << 24) | + (((u32) buf[1]) << 16) | + (((u32) buf[2]) << 8) | buf[3]; + break; + default: + break; + } + + return ret_value; +} + +static int write_fw_segment(struct mxl *state, + u32 mem_addr, u32 total_size, u8 *data_ptr) +{ + int status; + u32 data_count = 0; + u32 size = 0; + u32 orig_size = 0; + u8 *w_buf_ptr = NULL; + u32 block_size = ((MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH - + (MXL_HYDRA_I2C_HDR_SIZE + + MXL_HYDRA_REG_SIZE_IN_BYTES)) / 4) * 4; + u8 w_msg_buffer[MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH - + (MXL_HYDRA_I2C_HDR_SIZE + MXL_HYDRA_REG_SIZE_IN_BYTES)]; + + do { + size = orig_size = (((u32)(data_count + block_size)) > total_size) ? + (total_size - data_count) : block_size; + + if (orig_size & 3) + size = (orig_size + 4) & ~3; + w_buf_ptr = &w_msg_buffer[0]; + memset((void *) w_buf_ptr, 0, size); + memcpy((void *) w_buf_ptr, (void *) data_ptr, orig_size); + convert_endian(1, size, w_buf_ptr); + status = write_firmware_block(state, mem_addr, size, w_buf_ptr); + if (status) + return status; + data_count += size; + mem_addr += size; + data_ptr += size; + } while (data_count < total_size); + + return status; +} + +static int do_firmware_download(struct mxl *state, u8 *mbin_buffer_ptr, + u32 mbin_buffer_size) + +{ + int status; + u32 index = 0; + u32 seg_length = 0; + u32 seg_address = 0; + struct MBIN_FILE_T *mbin_ptr = (struct MBIN_FILE_T *)mbin_buffer_ptr; + struct MBIN_SEGMENT_T *segment_ptr; + enum MXL_BOOL_E xcpu_fw_flag = MXL_FALSE; + + if (mbin_ptr->header.id != MBIN_FILE_HEADER_ID) { + dev_err(state->i2cdev, "%s: Invalid file header ID (%c)\n", + __func__, mbin_ptr->header.id); + return -EINVAL; + } + status = write_register(state, FW_DL_SIGN_ADDR, 0); + if (status) + return status; + segment_ptr = (struct MBIN_SEGMENT_T *) (&mbin_ptr->data[0]); + for (index = 0; index < mbin_ptr->header.num_segments; index++) { + if (segment_ptr->header.id != MBIN_SEGMENT_HEADER_ID) { + dev_err(state->i2cdev, "%s: Invalid segment header ID (%c)\n", + __func__, segment_ptr->header.id); + return -EINVAL; + } + seg_length = get_big_endian(24, + &(segment_ptr->header.len24[0])); + seg_address = get_big_endian(32, + &(segment_ptr->header.address[0])); + + if (state->base->type == MXL_HYDRA_DEVICE_568) { + if ((((seg_address & 0x90760000) == 0x90760000) || + ((seg_address & 0x90740000) == 0x90740000)) && + (xcpu_fw_flag == MXL_FALSE)) { + update_by_mnemonic(state, 0x8003003C, 0, 1, 1); + msleep(200); + write_register(state, 0x90720000, 0); + usleep_range(10000, 11000); + xcpu_fw_flag = MXL_TRUE; + } + status = write_fw_segment(state, seg_address, + seg_length, + (u8 *) segment_ptr->data); + } else { + if (((seg_address & 0x90760000) != 0x90760000) && + ((seg_address & 0x90740000) != 0x90740000)) + status = write_fw_segment(state, seg_address, + seg_length, (u8 *) segment_ptr->data); + } + if (status) + return status; + segment_ptr = (struct MBIN_SEGMENT_T *) + &(segment_ptr->data[((seg_length + 3) / 4) * 4]); + } + return status; +} + +static int check_fw(struct mxl *state, u8 *mbin, u32 mbin_len) +{ + struct MBIN_FILE_HEADER_T *fh = (struct MBIN_FILE_HEADER_T *) mbin; + u32 flen = (fh->image_size24[0] << 16) | + (fh->image_size24[1] << 8) | fh->image_size24[2]; + u8 *fw, cs = 0; + u32 i; + + if (fh->id != 'M' || fh->fmt_version != '1' || flen > 0x3FFF0) { + dev_info(state->i2cdev, "Invalid FW Header\n"); + return -1; + } + fw = mbin + sizeof(struct MBIN_FILE_HEADER_T); + for (i = 0; i < flen; i += 1) + cs += fw[i]; + if (cs != fh->image_checksum) { + dev_info(state->i2cdev, "Invalid FW Checksum\n"); + return -1; + } + return 0; +} + +static int firmware_download(struct mxl *state, u8 *mbin, u32 mbin_len) +{ + int status; + u32 reg_data = 0; + struct MXL_HYDRA_SKU_COMMAND_T dev_sku_cfg; + u8 cmd_size = sizeof(struct MXL_HYDRA_SKU_COMMAND_T); + u8 cmd_buff[sizeof(struct MXL_HYDRA_SKU_COMMAND_T) + 6]; + + if (check_fw(state, mbin, mbin_len)) + return -1; + + /* put CPU into reset */ + status = update_by_mnemonic(state, 0x8003003C, 0, 1, 0); + if (status) + return status; + usleep_range(1000, 2000); + + /* Reset TX FIFO's, BBAND, XBAR */ + status = write_register(state, HYDRA_RESET_TRANSPORT_FIFO_REG, + HYDRA_RESET_TRANSPORT_FIFO_DATA); + if (status) + return status; + status = write_register(state, HYDRA_RESET_BBAND_REG, + HYDRA_RESET_BBAND_DATA); + if (status) + return status; + status = write_register(state, HYDRA_RESET_XBAR_REG, + HYDRA_RESET_XBAR_DATA); + if (status) + return status; + + /* Disable clock to Baseband, Wideband, SerDes, + * Alias ext & Transport modules + */ + status = write_register(state, HYDRA_MODULES_CLK_2_REG, + HYDRA_DISABLE_CLK_2); + if (status) + return status; + /* Clear Software & Host interrupt status - (Clear on read) */ + status = read_register(state, HYDRA_PRCM_ROOT_CLK_REG, ®_data); + if (status) + return status; + status = do_firmware_download(state, mbin, mbin_len); + if (status) + return status; + + if (state->base->type == MXL_HYDRA_DEVICE_568) { + usleep_range(10000, 11000); + + /* bring XCPU out of reset */ + status = write_register(state, 0x90720000, 1); + if (status) + return status; + msleep(500); + + /* Enable XCPU UART message processing in MCPU */ + status = write_register(state, 0x9076B510, 1); + if (status) + return status; + } else { + /* Bring CPU out of reset */ + status = update_by_mnemonic(state, 0x8003003C, 0, 1, 1); + if (status) + return status; + /* Wait until FW boots */ + msleep(150); + } + + /* Initialize XPT XBAR */ + status = write_register(state, XPT_DMD0_BASEADDR, 0x76543210); + if (status) + return status; + + if (!firmware_is_alive(state)) + return -1; + + dev_info(state->i2cdev, "Hydra FW alive. Hail!\n"); + + /* sometimes register values are wrong shortly + * after first heart beats + */ + msleep(50); + + dev_sku_cfg.sku_type = state->base->sku_type; + BUILD_HYDRA_CMD(MXL_HYDRA_DEV_CFG_SKU_CMD, MXL_CMD_WRITE, + cmd_size, &dev_sku_cfg, cmd_buff); + status = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE, + &cmd_buff[0]); + + return status; +} + +static int cfg_ts_pad_mux(struct mxl *state, enum MXL_BOOL_E enable_serial_ts) +{ + int status = 0; + u32 pad_mux_value = 0; + + if (enable_serial_ts == MXL_TRUE) { + pad_mux_value = 0; + if ((state->base->type == MXL_HYDRA_DEVICE_541) || + (state->base->type == MXL_HYDRA_DEVICE_541S)) + pad_mux_value = 2; + } else { + if ((state->base->type == MXL_HYDRA_DEVICE_581) || + (state->base->type == MXL_HYDRA_DEVICE_581S)) + pad_mux_value = 2; + else + pad_mux_value = 3; + } + + switch (state->base->type) { + case MXL_HYDRA_DEVICE_561: + case MXL_HYDRA_DEVICE_581: + case MXL_HYDRA_DEVICE_541: + case MXL_HYDRA_DEVICE_541S: + case MXL_HYDRA_DEVICE_561S: + case MXL_HYDRA_DEVICE_581S: + status |= update_by_mnemonic(state, 0x90000170, 24, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000170, 28, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 0, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 4, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 8, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 12, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 16, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 20, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 24, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000174, 28, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000178, 0, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000178, 4, 3, + pad_mux_value); + status |= update_by_mnemonic(state, 0x90000178, 8, 3, + pad_mux_value); + break; + + case MXL_HYDRA_DEVICE_544: + case MXL_HYDRA_DEVICE_542: + status |= update_by_mnemonic(state, 0x9000016C, 4, 3, 1); + status |= update_by_mnemonic(state, 0x9000016C, 8, 3, 0); + status |= update_by_mnemonic(state, 0x9000016C, 12, 3, 0); + status |= update_by_mnemonic(state, 0x9000016C, 16, 3, 0); + status |= update_by_mnemonic(state, 0x90000170, 0, 3, 0); + status |= update_by_mnemonic(state, 0x90000178, 12, 3, 1); + status |= update_by_mnemonic(state, 0x90000178, 16, 3, 1); + status |= update_by_mnemonic(state, 0x90000178, 20, 3, 1); + status |= update_by_mnemonic(state, 0x90000178, 24, 3, 1); + status |= update_by_mnemonic(state, 0x9000017C, 0, 3, 1); + status |= update_by_mnemonic(state, 0x9000017C, 4, 3, 1); + if (enable_serial_ts == MXL_ENABLE) { + status |= update_by_mnemonic(state, + 0x90000170, 4, 3, 0); + status |= update_by_mnemonic(state, + 0x90000170, 8, 3, 0); + status |= update_by_mnemonic(state, + 0x90000170, 12, 3, 0); + status |= update_by_mnemonic(state, + 0x90000170, 16, 3, 0); + status |= update_by_mnemonic(state, + 0x90000170, 20, 3, 1); + status |= update_by_mnemonic(state, + 0x90000170, 24, 3, 1); + status |= update_by_mnemonic(state, + 0x90000170, 28, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 0, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 4, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 8, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 12, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 16, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 20, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 24, 3, 2); + status |= update_by_mnemonic(state, + 0x90000174, 28, 3, 2); + status |= update_by_mnemonic(state, + 0x90000178, 0, 3, 2); + status |= update_by_mnemonic(state, + 0x90000178, 4, 3, 2); + status |= update_by_mnemonic(state, + 0x90000178, 8, 3, 2); + } else { + status |= update_by_mnemonic(state, + 0x90000170, 4, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 8, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 12, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 16, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 20, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 24, 3, 3); + status |= update_by_mnemonic(state, + 0x90000170, 28, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 0, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 4, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 8, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 12, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 16, 3, 3); + status |= update_by_mnemonic(state, + 0x90000174, 20, 3, 1); + status |= update_by_mnemonic(state, + 0x90000174, 24, 3, 1); + status |= update_by_mnemonic(state, + 0x90000174, 28, 3, 1); + status |= update_by_mnemonic(state, + 0x90000178, 0, 3, 1); + status |= update_by_mnemonic(state, + 0x90000178, 4, 3, 1); + status |= update_by_mnemonic(state, + 0x90000178, 8, 3, 1); + } + break; + + case MXL_HYDRA_DEVICE_568: + if (enable_serial_ts == MXL_FALSE) { + status |= update_by_mnemonic(state, + 0x9000016C, 8, 3, 5); + status |= update_by_mnemonic(state, + 0x9000016C, 12, 3, 5); + status |= update_by_mnemonic(state, + 0x9000016C, 16, 3, 5); + status |= update_by_mnemonic(state, + 0x9000016C, 20, 3, 5); + status |= update_by_mnemonic(state, + 0x9000016C, 24, 3, 5); + status |= update_by_mnemonic(state, + 0x9000016C, 28, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 0, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 4, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 8, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 12, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 16, 3, 5); + status |= update_by_mnemonic(state, + 0x90000170, 20, 3, 5); + + status |= update_by_mnemonic(state, + 0x90000170, 24, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 0, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 4, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 8, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 12, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 16, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 20, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 24, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 28, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000178, 0, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000178, 4, 3, pad_mux_value); + + status |= update_by_mnemonic(state, + 0x90000178, 8, 3, 5); + status |= update_by_mnemonic(state, + 0x90000178, 12, 3, 5); + status |= update_by_mnemonic(state, + 0x90000178, 16, 3, 5); + status |= update_by_mnemonic(state, + 0x90000178, 20, 3, 5); + status |= update_by_mnemonic(state, + 0x90000178, 24, 3, 5); + status |= update_by_mnemonic(state, + 0x90000178, 28, 3, 5); + status |= update_by_mnemonic(state, + 0x9000017C, 0, 3, 5); + status |= update_by_mnemonic(state, + 0x9000017C, 4, 3, 5); + } else { + status |= update_by_mnemonic(state, + 0x90000170, 4, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 8, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 12, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 16, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 20, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 24, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 28, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 0, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 4, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 8, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 12, 3, pad_mux_value); + } + break; + + + case MXL_HYDRA_DEVICE_584: + default: + status |= update_by_mnemonic(state, + 0x90000170, 4, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 8, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 12, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 16, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 20, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 24, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000170, 28, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 0, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 4, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 8, 3, pad_mux_value); + status |= update_by_mnemonic(state, + 0x90000174, 12, 3, pad_mux_value); + break; + } + return status; +} + +static int set_drive_strength(struct mxl *state, + enum MXL_HYDRA_TS_DRIVE_STRENGTH_E ts_drive_strength) +{ + int stat = 0; + u32 val; + + read_register(state, 0x90000194, &val); + dev_info(state->i2cdev, "DIGIO = %08x\n", val); + dev_info(state->i2cdev, "set drive_strength = %u\n", ts_drive_strength); + + + stat |= update_by_mnemonic(state, 0x90000194, 0, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000194, 20, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000194, 24, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000198, 12, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000198, 16, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000198, 20, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x90000198, 24, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x9000019C, 0, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x9000019C, 4, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x9000019C, 8, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x9000019C, 24, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x9000019C, 28, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x900001A0, 0, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x900001A0, 4, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x900001A0, 20, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x900001A0, 24, 3, ts_drive_strength); + stat |= update_by_mnemonic(state, 0x900001A0, 28, 3, ts_drive_strength); + + return stat; +} + +static int enable_tuner(struct mxl *state, u32 tuner, u32 enable) +{ + int stat = 0; + struct MXL_HYDRA_TUNER_CMD ctrl_tuner_cmd; + u8 cmd_size = sizeof(ctrl_tuner_cmd); + u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN]; + u32 val, count = 10; + + ctrl_tuner_cmd.tuner_id = tuner; + ctrl_tuner_cmd.enable = enable; + BUILD_HYDRA_CMD(MXL_HYDRA_TUNER_ACTIVATE_CMD, MXL_CMD_WRITE, + cmd_size, &ctrl_tuner_cmd, cmd_buff); + stat = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE, + &cmd_buff[0]); + if (stat) + return stat; + read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val); + while (--count && ((val >> tuner) & 1) != enable) { + msleep(20); + read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val); + } + if (!count) + return -1; + read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val); + dev_dbg(state->i2cdev, "tuner %u ready = %u\n", + tuner, (val >> tuner) & 1); + + return 0; +} + + +static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id, + struct MXL_HYDRA_MPEGOUT_PARAM_T *mpeg_out_param_ptr) +{ + int status = 0; + u32 nco_count_min = 0; + u32 clk_type = 0; + + struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = { + {0x90700010, 8, 1}, {0x90700010, 9, 1}, + {0x90700010, 10, 1}, {0x90700010, 11, 1}, + {0x90700010, 12, 1}, {0x90700010, 13, 1}, + {0x90700010, 14, 1}, {0x90700010, 15, 1} }; + struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = { + {0x90700010, 16, 1}, {0x90700010, 17, 1}, + {0x90700010, 18, 1}, {0x90700010, 19, 1}, + {0x90700010, 20, 1}, {0x90700010, 21, 1}, + {0x90700010, 22, 1}, {0x90700010, 23, 1} }; + struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = { + {0x90700014, 0, 1}, {0x90700014, 1, 1}, + {0x90700014, 2, 1}, {0x90700014, 3, 1}, + {0x90700014, 4, 1}, {0x90700014, 5, 1}, + {0x90700014, 6, 1}, {0x90700014, 7, 1} }; + struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = { + {0x90700018, 0, 3}, {0x90700018, 4, 3}, + {0x90700018, 8, 3}, {0x90700018, 12, 3}, + {0x90700018, 16, 3}, {0x90700018, 20, 3}, + {0x90700018, 24, 3}, {0x90700018, 28, 3} }; + struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = { + {0x9070000C, 16, 1}, {0x9070000C, 17, 1}, + {0x9070000C, 18, 1}, {0x9070000C, 19, 1}, + {0x9070000C, 20, 1}, {0x9070000C, 21, 1}, + {0x9070000C, 22, 1}, {0x9070000C, 23, 1} }; + struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = { + {0x90700010, 0, 1}, {0x90700010, 1, 1}, + {0x90700010, 2, 1}, {0x90700010, 3, 1}, + {0x90700010, 4, 1}, {0x90700010, 5, 1}, + {0x90700010, 6, 1}, {0x90700010, 7, 1} }; + struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = { + {0x9070000C, 0, 1}, {0x9070000C, 1, 1}, + {0x9070000C, 2, 1}, {0x9070000C, 3, 1}, + {0x9070000C, 4, 1}, {0x9070000C, 5, 1}, + {0x9070000C, 6, 1}, {0x9070000C, 7, 1} }; + struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = { + {0x9070000C, 24, 1}, {0x9070000C, 25, 1}, + {0x9070000C, 26, 1}, {0x9070000C, 27, 1}, + {0x9070000C, 28, 1}, {0x9070000C, 29, 1}, + {0x9070000C, 30, 1}, {0x9070000C, 31, 1} }; + struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = { + {0x90700014, 8, 1}, {0x90700014, 9, 1}, + {0x90700014, 10, 1}, {0x90700014, 11, 1}, + {0x90700014, 12, 1}, {0x90700014, 13, 1}, + {0x90700014, 14, 1}, {0x90700014, 15, 1} }; + struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = { + {0x907001D4, 0, 1}, {0x907001D4, 1, 1}, + {0x907001D4, 2, 1}, {0x907001D4, 3, 1}, + {0x907001D4, 4, 1}, {0x907001D4, 5, 1}, + {0x907001D4, 6, 1}, {0x907001D4, 7, 1} }; + struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = { + {0x90700044, 16, 80}, {0x90700044, 16, 81}, + {0x90700044, 16, 82}, {0x90700044, 16, 83}, + {0x90700044, 16, 84}, {0x90700044, 16, 85}, + {0x90700044, 16, 86}, {0x90700044, 16, 87} }; + + demod_id = state->base->ts_map[demod_id]; + + if (mpeg_out_param_ptr->enable == MXL_ENABLE) { + if (mpeg_out_param_ptr->mpeg_mode == + MXL_HYDRA_MPEG_MODE_PARALLEL) { + } else { + cfg_ts_pad_mux(state, MXL_TRUE); + update_by_mnemonic(state, + 0x90700010, 27, 1, MXL_FALSE); + } + } + + nco_count_min = + (u32)(MXL_HYDRA_NCO_CLK / mpeg_out_param_ptr->max_mpeg_clk_rate); + + if (state->base->chipversion >= 2) { + status |= update_by_mnemonic(state, + xpt_nco_clock_rate[demod_id].reg_addr, /* Reg Addr */ + xpt_nco_clock_rate[demod_id].lsb_pos, /* LSB pos */ + xpt_nco_clock_rate[demod_id].num_of_bits, /* Num of bits */ + nco_count_min); /* Data */ + } else + update_by_mnemonic(state, 0x90700044, 16, 8, nco_count_min); + + if (mpeg_out_param_ptr->mpeg_clk_type == MXL_HYDRA_MPEG_CLK_CONTINUOUS) + clk_type = 1; + + if (mpeg_out_param_ptr->mpeg_mode < MXL_HYDRA_MPEG_MODE_PARALLEL) { + status |= update_by_mnemonic(state, + xpt_continuous_clock[demod_id].reg_addr, + xpt_continuous_clock[demod_id].lsb_pos, + xpt_continuous_clock[demod_id].num_of_bits, + clk_type); + } else + update_by_mnemonic(state, 0x907001D4, 8, 1, clk_type); + + status |= update_by_mnemonic(state, + xpt_sync_polarity[demod_id].reg_addr, + xpt_sync_polarity[demod_id].lsb_pos, + xpt_sync_polarity[demod_id].num_of_bits, + mpeg_out_param_ptr->mpeg_sync_pol); + + status |= update_by_mnemonic(state, + xpt_valid_polarity[demod_id].reg_addr, + xpt_valid_polarity[demod_id].lsb_pos, + xpt_valid_polarity[demod_id].num_of_bits, + mpeg_out_param_ptr->mpeg_valid_pol); + + status |= update_by_mnemonic(state, + xpt_clock_polarity[demod_id].reg_addr, + xpt_clock_polarity[demod_id].lsb_pos, + xpt_clock_polarity[demod_id].num_of_bits, + mpeg_out_param_ptr->mpeg_clk_pol); + + status |= update_by_mnemonic(state, + xpt_sync_byte[demod_id].reg_addr, + xpt_sync_byte[demod_id].lsb_pos, + xpt_sync_byte[demod_id].num_of_bits, + mpeg_out_param_ptr->mpeg_sync_pulse_width); + + status |= update_by_mnemonic(state, + xpt_ts_clock_phase[demod_id].reg_addr, + xpt_ts_clock_phase[demod_id].lsb_pos, + xpt_ts_clock_phase[demod_id].num_of_bits, + mpeg_out_param_ptr->mpeg_clk_phase); + + status |= update_by_mnemonic(state, + xpt_lsb_first[demod_id].reg_addr, + xpt_lsb_first[demod_id].lsb_pos, + xpt_lsb_first[demod_id].num_of_bits, + mpeg_out_param_ptr->lsb_or_msb_first); + + switch (mpeg_out_param_ptr->mpeg_error_indication) { + case MXL_HYDRA_MPEG_ERR_REPLACE_SYNC: + status |= update_by_mnemonic(state, + xpt_err_replace_sync[demod_id].reg_addr, + xpt_err_replace_sync[demod_id].lsb_pos, + xpt_err_replace_sync[demod_id].num_of_bits, + MXL_TRUE); + status |= update_by_mnemonic(state, + xpt_err_replace_valid[demod_id].reg_addr, + xpt_err_replace_valid[demod_id].lsb_pos, + xpt_err_replace_valid[demod_id].num_of_bits, + MXL_FALSE); + break; + + case MXL_HYDRA_MPEG_ERR_REPLACE_VALID: + status |= update_by_mnemonic(state, + xpt_err_replace_sync[demod_id].reg_addr, + xpt_err_replace_sync[demod_id].lsb_pos, + xpt_err_replace_sync[demod_id].num_of_bits, + MXL_FALSE); + + status |= update_by_mnemonic(state, + xpt_err_replace_valid[demod_id].reg_addr, + xpt_err_replace_valid[demod_id].lsb_pos, + xpt_err_replace_valid[demod_id].num_of_bits, + MXL_TRUE); + break; + + case MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED: + default: + status |= update_by_mnemonic(state, + xpt_err_replace_sync[demod_id].reg_addr, + xpt_err_replace_sync[demod_id].lsb_pos, + xpt_err_replace_sync[demod_id].num_of_bits, + MXL_FALSE); + + status |= update_by_mnemonic(state, + xpt_err_replace_valid[demod_id].reg_addr, + xpt_err_replace_valid[demod_id].lsb_pos, + xpt_err_replace_valid[demod_id].num_of_bits, + MXL_FALSE); + + break; + + } + + if (mpeg_out_param_ptr->mpeg_mode != MXL_HYDRA_MPEG_MODE_PARALLEL) { + status |= update_by_mnemonic(state, + xpt_enable_output[demod_id].reg_addr, + xpt_enable_output[demod_id].lsb_pos, + xpt_enable_output[demod_id].num_of_bits, + mpeg_out_param_ptr->enable); + } + return status; +} + +static int config_mux(struct mxl *state) +{ + update_by_mnemonic(state, 0x9070000C, 0, 1, 0); + update_by_mnemonic(state, 0x9070000C, 1, 1, 0); + update_by_mnemonic(state, 0x9070000C, 2, 1, 0); + update_by_mnemonic(state, 0x9070000C, 3, 1, 0); + update_by_mnemonic(state, 0x9070000C, 4, 1, 0); + update_by_mnemonic(state, 0x9070000C, 5, 1, 0); + update_by_mnemonic(state, 0x9070000C, 6, 1, 0); + update_by_mnemonic(state, 0x9070000C, 7, 1, 0); + update_by_mnemonic(state, 0x90700008, 0, 2, 1); + update_by_mnemonic(state, 0x90700008, 2, 2, 1); + return 0; +} + +static int load_fw(struct mxl *state, struct mxl5xx_cfg *cfg) +{ + int stat = 0; + u8 *buf; + + if (cfg->fw) + return firmware_download(state, cfg->fw, cfg->fw_len); + + if (!cfg->fw_read) + return -1; + + buf = vmalloc(0x40000); + if (!buf) + return -ENOMEM; + + cfg->fw_read(cfg->fw_priv, buf, 0x40000); + stat = firmware_download(state, buf, 0x40000); + vfree(buf); + + return stat; +} + +static int validate_sku(struct mxl *state) +{ + u32 pad_mux_bond = 0, prcm_chip_id = 0, prcm_so_cid = 0; + int status; + u32 type = state->base->type; + + status = read_by_mnemonic(state, 0x90000190, 0, 3, &pad_mux_bond); + status |= read_by_mnemonic(state, 0x80030000, 0, 12, &prcm_chip_id); + status |= read_by_mnemonic(state, 0x80030004, 24, 8, &prcm_so_cid); + if (status) + return -1; + + dev_info(state->i2cdev, "padMuxBond=%08x, prcmChipId=%08x, prcmSoCId=%08x\n", + pad_mux_bond, prcm_chip_id, prcm_so_cid); + + if (prcm_chip_id != 0x560) { + switch (pad_mux_bond) { + case MXL_HYDRA_SKU_ID_581: + if (type == MXL_HYDRA_DEVICE_581) + return 0; + if (type == MXL_HYDRA_DEVICE_581S) { + state->base->type = MXL_HYDRA_DEVICE_581; + return 0; + } + break; + case MXL_HYDRA_SKU_ID_584: + if (type == MXL_HYDRA_DEVICE_584) + return 0; + break; + case MXL_HYDRA_SKU_ID_544: + if (type == MXL_HYDRA_DEVICE_544) + return 0; + if (type == MXL_HYDRA_DEVICE_542) + return 0; + break; + case MXL_HYDRA_SKU_ID_582: + if (type == MXL_HYDRA_DEVICE_582) + return 0; + break; + default: + return -1; + } + } else { + + } + return -1; +} + +static int get_fwinfo(struct mxl *state) +{ + int status; + u32 val = 0; + + status = read_by_mnemonic(state, 0x90000190, 0, 3, &val); + if (status) + return status; + dev_info(state->i2cdev, "chipID=%08x\n", val); + + status = read_by_mnemonic(state, 0x80030004, 8, 8, &val); + if (status) + return status; + dev_info(state->i2cdev, "chipVer=%08x\n", val); + + status = read_register(state, HYDRA_FIRMWARE_VERSION, &val); + if (status) + return status; + dev_info(state->i2cdev, "FWVer=%08x\n", val); + + state->base->fwversion = val; + return status; +} + + +static u8 ts_map1_to_1[MXL_HYDRA_DEMOD_MAX] = { + MXL_HYDRA_DEMOD_ID_0, + MXL_HYDRA_DEMOD_ID_1, + MXL_HYDRA_DEMOD_ID_2, + MXL_HYDRA_DEMOD_ID_3, + MXL_HYDRA_DEMOD_ID_4, + MXL_HYDRA_DEMOD_ID_5, + MXL_HYDRA_DEMOD_ID_6, + MXL_HYDRA_DEMOD_ID_7, +}; + +static u8 ts_map54x[MXL_HYDRA_DEMOD_MAX] = { + MXL_HYDRA_DEMOD_ID_2, + MXL_HYDRA_DEMOD_ID_3, + MXL_HYDRA_DEMOD_ID_4, + MXL_HYDRA_DEMOD_ID_5, + MXL_HYDRA_DEMOD_MAX, + MXL_HYDRA_DEMOD_MAX, + MXL_HYDRA_DEMOD_MAX, + MXL_HYDRA_DEMOD_MAX, +}; + +static int probe(struct mxl *state, struct mxl5xx_cfg *cfg) +{ + u32 chipver; + int fw, status, j; + struct MXL_HYDRA_MPEGOUT_PARAM_T mpeg_interface_cfg; + + state->base->ts_map = ts_map1_to_1; + + switch (state->base->type) { + case MXL_HYDRA_DEVICE_581: + case MXL_HYDRA_DEVICE_581S: + state->base->can_clkout = 1; + state->base->demod_num = 8; + state->base->tuner_num = 1; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_581; + break; + case MXL_HYDRA_DEVICE_582: + state->base->can_clkout = 1; + state->base->demod_num = 8; + state->base->tuner_num = 3; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_582; + break; + case MXL_HYDRA_DEVICE_585: + state->base->can_clkout = 0; + state->base->demod_num = 8; + state->base->tuner_num = 4; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_585; + break; + case MXL_HYDRA_DEVICE_544: + state->base->can_clkout = 0; + state->base->demod_num = 4; + state->base->tuner_num = 4; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_544; + state->base->ts_map = ts_map54x; + break; + case MXL_HYDRA_DEVICE_541: + case MXL_HYDRA_DEVICE_541S: + state->base->can_clkout = 0; + state->base->demod_num = 4; + state->base->tuner_num = 1; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_541; + state->base->ts_map = ts_map54x; + break; + case MXL_HYDRA_DEVICE_561: + case MXL_HYDRA_DEVICE_561S: + state->base->can_clkout = 0; + state->base->demod_num = 6; + state->base->tuner_num = 1; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_561; + break; + case MXL_HYDRA_DEVICE_568: + state->base->can_clkout = 0; + state->base->demod_num = 8; + state->base->tuner_num = 1; + state->base->chan_bond = 1; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_568; + break; + case MXL_HYDRA_DEVICE_542: + state->base->can_clkout = 1; + state->base->demod_num = 4; + state->base->tuner_num = 3; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_542; + state->base->ts_map = ts_map54x; + break; + case MXL_HYDRA_DEVICE_TEST: + case MXL_HYDRA_DEVICE_584: + default: + state->base->can_clkout = 0; + state->base->demod_num = 8; + state->base->tuner_num = 4; + state->base->sku_type = MXL_HYDRA_SKU_TYPE_584; + break; + } + + status = validate_sku(state); + if (status) + return status; + + update_by_mnemonic(state, 0x80030014, 9, 1, 1); + update_by_mnemonic(state, 0x8003003C, 12, 1, 1); + status = read_by_mnemonic(state, 0x80030000, 12, 4, &chipver); + if (status) + state->base->chipversion = 0; + else + state->base->chipversion = (chipver == 2) ? 2 : 1; + dev_info(state->i2cdev, "Hydra chip version %u\n", + state->base->chipversion); + + cfg_dev_xtal(state, cfg->clk, cfg->cap, 0); + + fw = firmware_is_alive(state); + if (!fw) { + status = load_fw(state, cfg); + if (status) + return status; + } + get_fwinfo(state); + + config_mux(state); + mpeg_interface_cfg.enable = MXL_ENABLE; + mpeg_interface_cfg.lsb_or_msb_first = MXL_HYDRA_MPEG_SERIAL_MSB_1ST; + /* supports only (0-104&139)MHz */ + if (cfg->ts_clk) + mpeg_interface_cfg.max_mpeg_clk_rate = cfg->ts_clk; + else + mpeg_interface_cfg.max_mpeg_clk_rate = 69; /* 139; */ + mpeg_interface_cfg.mpeg_clk_phase = MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_0_DEG; + mpeg_interface_cfg.mpeg_clk_pol = MXL_HYDRA_MPEG_CLK_IN_PHASE; + /* MXL_HYDRA_MPEG_CLK_GAPPED; */ + mpeg_interface_cfg.mpeg_clk_type = MXL_HYDRA_MPEG_CLK_CONTINUOUS; + mpeg_interface_cfg.mpeg_error_indication = + MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED; + mpeg_interface_cfg.mpeg_mode = MXL_HYDRA_MPEG_MODE_SERIAL_3_WIRE; + mpeg_interface_cfg.mpeg_sync_pol = MXL_HYDRA_MPEG_ACTIVE_HIGH; + mpeg_interface_cfg.mpeg_sync_pulse_width = MXL_HYDRA_MPEG_SYNC_WIDTH_BIT; + mpeg_interface_cfg.mpeg_valid_pol = MXL_HYDRA_MPEG_ACTIVE_HIGH; + + for (j = 0; j < state->base->demod_num; j++) { + status = config_ts(state, (enum MXL_HYDRA_DEMOD_ID_E) j, + &mpeg_interface_cfg); + if (status) + return status; + } + set_drive_strength(state, 1); + return 0; +} + +struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c, + struct mxl5xx_cfg *cfg, u32 demod, u32 tuner, + int (**fn_set_input)(struct dvb_frontend *, int)) +{ + struct mxl *state; + struct mxl_base *base; + + state = kzalloc(sizeof(struct mxl), GFP_KERNEL); + if (!state) + return NULL; + + state->demod = demod; + state->tuner = tuner; + state->tuner_in_use = 0xffffffff; + state->i2cdev = &i2c->dev; + + base = match_base(i2c, cfg->adr); + if (base) { + base->count++; + if (base->count > base->demod_num) + goto fail; + state->base = base; + } else { + base = kzalloc(sizeof(struct mxl_base), GFP_KERNEL); + if (!base) + goto fail; + base->i2c = i2c; + base->adr = cfg->adr; + base->type = cfg->type; + base->count = 1; + mutex_init(&base->i2c_lock); + mutex_init(&base->status_lock); + mutex_init(&base->tune_lock); + INIT_LIST_HEAD(&base->mxls); + + state->base = base; + if (probe(state, cfg) < 0) { + kfree(base); + goto fail; + } + list_add(&base->mxllist, &mxllist); + } + state->fe.ops = mxl_ops; + state->xbar[0] = 4; + state->xbar[1] = demod; + state->xbar[2] = 8; + state->fe.demodulator_priv = state; + *fn_set_input = set_input; + + list_add(&state->mxl, &base->mxls); + return &state->fe; + +fail: + kfree(state); + return NULL; +} +EXPORT_SYMBOL_GPL(mxl5xx_attach); + +MODULE_DESCRIPTION("MaxLinear MxL5xx DVB-S/S2 tuner-demodulator driver"); +MODULE_AUTHOR("Ralph and Marcus Metzler, Metzler Brothers Systementwicklung GbR"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/mxl5xx.h b/drivers/media/dvb-frontends/mxl5xx.h new file mode 100644 index 000000000000..532e08111537 --- /dev/null +++ b/drivers/media/dvb-frontends/mxl5xx.h @@ -0,0 +1,41 @@ +#ifndef _MXL5XX_H_ +#define _MXL5XX_H_ + +#include +#include + +#include "dvb_frontend.h" + +struct mxl5xx_cfg { + u8 adr; + u8 type; + u32 cap; + u32 clk; + u32 ts_clk; + + u8 *fw; + u32 fw_len; + + int (*fw_read)(void *priv, u8 *buf, u32 len); + void *fw_priv; +}; + +#if IS_REACHABLE(CONFIG_DVB_MXL5XX) + +extern struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c, + struct mxl5xx_cfg *cfg, u32 demod, u32 tuner, + int (**fn_set_input)(struct dvb_frontend *, int)); + +#else + +static inline struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c, + struct mxl5xx_cfg *cfg, u32 demod, u32 tuner, + int (**fn_set_input)(struct dvb_frontend *, int)) +{ + pr_warn("%s: driver disabled by Kconfig\n", __func__); + return NULL; +} + +#endif /* CONFIG_DVB_MXL5XX */ + +#endif /* _MXL5XX_H_ */ diff --git a/drivers/media/dvb-frontends/mxl5xx_defs.h b/drivers/media/dvb-frontends/mxl5xx_defs.h new file mode 100644 index 000000000000..fd9e61e0188f --- /dev/null +++ b/drivers/media/dvb-frontends/mxl5xx_defs.h @@ -0,0 +1,731 @@ +/* + * Defines for the Maxlinear MX58x family of tuners/demods + * + * Copyright (C) 2014 Digital Devices GmbH + * + * based on code: + * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved + * which was released under GPL V2 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + */ + +enum MXL_BOOL_E { + MXL_DISABLE = 0, + MXL_ENABLE = 1, + + MXL_FALSE = 0, + MXL_TRUE = 1, + + MXL_INVALID = 0, + MXL_VALID = 1, + + MXL_NO = 0, + MXL_YES = 1, + + MXL_OFF = 0, + MXL_ON = 1 +}; + +/* Firmware-Host Command IDs */ +enum MXL_HYDRA_HOST_CMD_ID_E { + /* --Device command IDs-- */ + MXL_HYDRA_DEV_NO_OP_CMD = 0, /* No OP */ + + MXL_HYDRA_DEV_SET_POWER_MODE_CMD = 1, + MXL_HYDRA_DEV_SET_OVERWRITE_DEF_CMD = 2, + + /* Host-used CMD, not used by firmware */ + MXL_HYDRA_DEV_FIRMWARE_DOWNLOAD_CMD = 3, + + /* Additional CONTROL types from DTV */ + MXL_HYDRA_DEV_SET_BROADCAST_PID_STB_ID_CMD = 4, + MXL_HYDRA_DEV_GET_PMM_SLEEP_CMD = 5, + + /* --Tuner command IDs-- */ + MXL_HYDRA_TUNER_TUNE_CMD = 6, + MXL_HYDRA_TUNER_GET_STATUS_CMD = 7, + + /* --Demod command IDs-- */ + MXL_HYDRA_DEMOD_SET_PARAM_CMD = 8, + MXL_HYDRA_DEMOD_GET_STATUS_CMD = 9, + + MXL_HYDRA_DEMOD_RESET_FEC_COUNTER_CMD = 10, + + MXL_HYDRA_DEMOD_SET_PKT_NUM_CMD = 11, + + MXL_HYDRA_DEMOD_SET_IQ_SOURCE_CMD = 12, + MXL_HYDRA_DEMOD_GET_IQ_DATA_CMD = 13, + + MXL_HYDRA_DEMOD_GET_M68HC05_VER_CMD = 14, + + MXL_HYDRA_DEMOD_SET_ERROR_COUNTER_MODE_CMD = 15, + + /* --- ABORT channel tune */ + MXL_HYDRA_ABORT_TUNE_CMD = 16, /* Abort current tune command. */ + + /* --SWM/FSK command IDs-- */ + MXL_HYDRA_FSK_RESET_CMD = 17, + MXL_HYDRA_FSK_MSG_CMD = 18, + MXL_HYDRA_FSK_SET_OP_MODE_CMD = 19, + + /* --DiSeqC command IDs-- */ + MXL_HYDRA_DISEQC_MSG_CMD = 20, + MXL_HYDRA_DISEQC_COPY_MSG_TO_MAILBOX = 21, + MXL_HYDRA_DISEQC_CFG_MSG_CMD = 22, + + /* --- FFT Debug Command IDs-- */ + MXL_HYDRA_REQ_FFT_SPECTRUM_CMD = 23, + + /* -- Demod scramblle code */ + MXL_HYDRA_DEMOD_SCRAMBLE_CODE_CMD = 24, + + /* ---For host to know how many commands in total */ + MXL_HYDRA_LAST_HOST_CMD = 25, + + MXL_HYDRA_DEMOD_INTR_TYPE_CMD = 47, + MXL_HYDRA_DEV_INTR_CLEAR_CMD = 48, + MXL_HYDRA_TUNER_SPECTRUM_REQ_CMD = 53, + MXL_HYDRA_TUNER_ACTIVATE_CMD = 55, + MXL_HYDRA_DEV_CFG_POWER_MODE_CMD = 56, + MXL_HYDRA_DEV_XTAL_CAP_CMD = 57, + MXL_HYDRA_DEV_CFG_SKU_CMD = 58, + MXL_HYDRA_TUNER_SPECTRUM_MIN_GAIN_CMD = 59, + MXL_HYDRA_DISEQC_CONT_TONE_CFG = 60, + MXL_HYDRA_DEV_RF_WAKE_UP_CMD = 61, + MXL_HYDRA_DEMOD_CFG_EQ_CTRL_PARAM_CMD = 62, + MXL_HYDRA_DEMOD_FREQ_OFFSET_SEARCH_RANGE_CMD = 63, + MXL_HYDRA_DEV_REQ_PWR_FROM_ADCRSSI_CMD = 64, + + MXL_XCPU_PID_FLT_CFG_CMD = 65, + MXL_XCPU_SHMEM_TEST_CMD = 66, + MXL_XCPU_ABORT_TUNE_CMD = 67, + MXL_XCPU_CHAN_TUNE_CMD = 68, + MXL_XCPU_FLT_BOND_HDRS_CMD = 69, + + MXL_HYDRA_DEV_BROADCAST_WAKE_UP_CMD = 70, + MXL_HYDRA_FSK_CFG_FSK_FREQ_CMD = 71, + MXL_HYDRA_FSK_POWER_DOWN_CMD = 72, + MXL_XCPU_CLEAR_CB_STATS_CMD = 73, + MXL_XCPU_CHAN_BOND_RESTART_CMD = 74 +}; + +#define MXL_ENABLE_BIG_ENDIAN (0) + +#define MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH 248 + +#define MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN (248) + +#define MXL_HYDRA_CAP_MIN 10 +#define MXL_HYDRA_CAP_MAX 33 + +#define MXL_HYDRA_PLID_REG_READ 0xFB /* Read register PLID */ +#define MXL_HYDRA_PLID_REG_WRITE 0xFC /* Write register PLID */ + +#define MXL_HYDRA_PLID_CMD_READ 0xFD /* Command Read PLID */ +#define MXL_HYDRA_PLID_CMD_WRITE 0xFE /* Command Write PLID */ + +#define MXL_HYDRA_REG_SIZE_IN_BYTES 4 /* Hydra register size in bytes */ +#define MXL_HYDRA_I2C_HDR_SIZE (2 * sizeof(u8)) /* PLID + LEN(0xFF) */ +#define MXL_HYDRA_CMD_HEADER_SIZE (MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE) + +#define MXL_HYDRA_SKU_ID_581 0 +#define MXL_HYDRA_SKU_ID_584 1 +#define MXL_HYDRA_SKU_ID_585 2 +#define MXL_HYDRA_SKU_ID_544 3 +#define MXL_HYDRA_SKU_ID_561 4 +#define MXL_HYDRA_SKU_ID_582 5 +#define MXL_HYDRA_SKU_ID_568 6 + +/* macro for register write data buffer size + * (PLID + LEN (0xFF) + RegAddr + RegData) + */ +#define MXL_HYDRA_REG_WRITE_LEN (MXL_HYDRA_I2C_HDR_SIZE + (2 * MXL_HYDRA_REG_SIZE_IN_BYTES)) + +/* macro to extract a single byte from 4-byte(32-bit) data */ +#define GET_BYTE(x, n) (((x) >> (8*(n))) & 0xFF) + +#define MAX_CMD_DATA 512 + +#define MXL_GET_REG_MASK_32(lsb_loc, num_of_bits) ((0xFFFFFFFF >> (32 - (num_of_bits))) << (lsb_loc)) + +#define FW_DL_SIGN (0xDEADBEEF) + +#define MBIN_FORMAT_VERSION '1' +#define MBIN_FILE_HEADER_ID 'M' +#define MBIN_SEGMENT_HEADER_ID 'S' +#define MBIN_MAX_FILE_LENGTH (1<<23) + +struct MBIN_FILE_HEADER_T { + u8 id; + u8 fmt_version; + u8 header_len; + u8 num_segments; + u8 entry_address[4]; + u8 image_size24[3]; + u8 image_checksum; + u8 reserved[4]; +}; + +struct MBIN_FILE_T { + struct MBIN_FILE_HEADER_T header; + u8 data[1]; +}; + +struct MBIN_SEGMENT_HEADER_T { + u8 id; + u8 len24[3]; + u8 address[4]; +}; + +struct MBIN_SEGMENT_T { + struct MBIN_SEGMENT_HEADER_T header; + u8 data[1]; +}; + +enum MXL_CMD_TYPE_E { MXL_CMD_WRITE = 0, MXL_CMD_READ }; + +#define BUILD_HYDRA_CMD(cmd_id, req_type, size, data_ptr, cmd_buff) \ + do { \ + cmd_buff[0] = ((req_type == MXL_CMD_WRITE) ? MXL_HYDRA_PLID_CMD_WRITE : MXL_HYDRA_PLID_CMD_READ); \ + cmd_buff[1] = (size > 251) ? 0xff : (u8) (size + 4); \ + cmd_buff[2] = size; \ + cmd_buff[3] = cmd_id; \ + cmd_buff[4] = 0x00; \ + cmd_buff[5] = 0x00; \ + convert_endian(MXL_ENABLE_BIG_ENDIAN, size, (u8 *)data_ptr); \ + memcpy((void *)&cmd_buff[6], data_ptr, size); \ + } while (0) + +struct MXL_REG_FIELD_T { + u32 reg_addr; + u8 lsb_pos; + u8 num_of_bits; +}; + +struct MXL_DEV_CMD_DATA_T { + u32 data_size; + u8 data[MAX_CMD_DATA]; +}; + +enum MXL_HYDRA_SKU_TYPE_E { + MXL_HYDRA_SKU_TYPE_MIN = 0x00, + MXL_HYDRA_SKU_TYPE_581 = 0x00, + MXL_HYDRA_SKU_TYPE_584 = 0x01, + MXL_HYDRA_SKU_TYPE_585 = 0x02, + MXL_HYDRA_SKU_TYPE_544 = 0x03, + MXL_HYDRA_SKU_TYPE_561 = 0x04, + MXL_HYDRA_SKU_TYPE_5XX = 0x05, + MXL_HYDRA_SKU_TYPE_5YY = 0x06, + MXL_HYDRA_SKU_TYPE_511 = 0x07, + MXL_HYDRA_SKU_TYPE_561_DE = 0x08, + MXL_HYDRA_SKU_TYPE_582 = 0x09, + MXL_HYDRA_SKU_TYPE_541 = 0x0A, + MXL_HYDRA_SKU_TYPE_568 = 0x0B, + MXL_HYDRA_SKU_TYPE_542 = 0x0C, + MXL_HYDRA_SKU_TYPE_MAX = 0x0D, +}; + +struct MXL_HYDRA_SKU_COMMAND_T { + enum MXL_HYDRA_SKU_TYPE_E sku_type; +}; + +enum MXL_HYDRA_DEMOD_ID_E { + MXL_HYDRA_DEMOD_ID_0 = 0, + MXL_HYDRA_DEMOD_ID_1, + MXL_HYDRA_DEMOD_ID_2, + MXL_HYDRA_DEMOD_ID_3, + MXL_HYDRA_DEMOD_ID_4, + MXL_HYDRA_DEMOD_ID_5, + MXL_HYDRA_DEMOD_ID_6, + MXL_HYDRA_DEMOD_ID_7, + MXL_HYDRA_DEMOD_MAX +}; + +#define MXL_DEMOD_SCRAMBLE_SEQ_LEN 12 + +#define MAX_STEP_SIZE_24_XTAL_102_05_KHZ 195 +#define MAX_STEP_SIZE_24_XTAL_204_10_KHZ 215 +#define MAX_STEP_SIZE_24_XTAL_306_15_KHZ 203 +#define MAX_STEP_SIZE_24_XTAL_408_20_KHZ 177 + +#define MAX_STEP_SIZE_27_XTAL_102_05_KHZ 195 +#define MAX_STEP_SIZE_27_XTAL_204_10_KHZ 215 +#define MAX_STEP_SIZE_27_XTAL_306_15_KHZ 203 +#define MAX_STEP_SIZE_27_XTAL_408_20_KHZ 177 + +#define MXL_HYDRA_SPECTRUM_MIN_FREQ_KHZ 300000 +#define MXL_HYDRA_SPECTRUM_MAX_FREQ_KHZ 2350000 + +enum MXL_DEMOD_CHAN_PARAMS_OFFSET_E { + DMD_STANDARD_ADDR = 0, + DMD_SPECTRUM_INVERSION_ADDR, + DMD_SPECTRUM_ROLL_OFF_ADDR, + DMD_SYMBOL_RATE_ADDR, + DMD_MODULATION_SCHEME_ADDR, + DMD_FEC_CODE_RATE_ADDR, + DMD_SNR_ADDR, + DMD_FREQ_OFFSET_ADDR, + DMD_CTL_FREQ_OFFSET_ADDR, + DMD_STR_FREQ_OFFSET_ADDR, + DMD_FTL_FREQ_OFFSET_ADDR, + DMD_STR_NBC_SYNC_LOCK_ADDR, + DMD_CYCLE_SLIP_COUNT_ADDR, + DMD_DISPLAY_IQ_ADDR, + DMD_DVBS2_CRC_ERRORS_ADDR, + DMD_DVBS2_PER_COUNT_ADDR, + DMD_DVBS2_PER_WINDOW_ADDR, + DMD_DVBS_CORR_RS_ERRORS_ADDR, + DMD_DVBS_UNCORR_RS_ERRORS_ADDR, + DMD_DVBS_BER_COUNT_ADDR, + DMD_DVBS_BER_WINDOW_ADDR, + DMD_TUNER_ID_ADDR, + DMD_DVBS2_PILOT_ON_OFF_ADDR, + DMD_FREQ_SEARCH_RANGE_IN_KHZ_ADDR, + + MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE, +}; + +enum MXL_HYDRA_TUNER_ID_E { + MXL_HYDRA_TUNER_ID_0 = 0, + MXL_HYDRA_TUNER_ID_1, + MXL_HYDRA_TUNER_ID_2, + MXL_HYDRA_TUNER_ID_3, + MXL_HYDRA_TUNER_MAX +}; + +enum MXL_HYDRA_BCAST_STD_E { + MXL_HYDRA_DSS = 0, + MXL_HYDRA_DVBS, + MXL_HYDRA_DVBS2, +}; + +enum MXL_HYDRA_FEC_E { + MXL_HYDRA_FEC_AUTO = 0, + MXL_HYDRA_FEC_1_2, + MXL_HYDRA_FEC_3_5, + MXL_HYDRA_FEC_2_3, + MXL_HYDRA_FEC_3_4, + MXL_HYDRA_FEC_4_5, + MXL_HYDRA_FEC_5_6, + MXL_HYDRA_FEC_6_7, + MXL_HYDRA_FEC_7_8, + MXL_HYDRA_FEC_8_9, + MXL_HYDRA_FEC_9_10, +}; + +enum MXL_HYDRA_MODULATION_E { + MXL_HYDRA_MOD_AUTO = 0, + MXL_HYDRA_MOD_QPSK, + MXL_HYDRA_MOD_8PSK +}; + +enum MXL_HYDRA_SPECTRUM_E { + MXL_HYDRA_SPECTRUM_AUTO = 0, + MXL_HYDRA_SPECTRUM_INVERTED, + MXL_HYDRA_SPECTRUM_NON_INVERTED, +}; + +enum MXL_HYDRA_ROLLOFF_E { + MXL_HYDRA_ROLLOFF_AUTO = 0, + MXL_HYDRA_ROLLOFF_0_20, + MXL_HYDRA_ROLLOFF_0_25, + MXL_HYDRA_ROLLOFF_0_35 +}; + +enum MXL_HYDRA_PILOTS_E { + MXL_HYDRA_PILOTS_OFF = 0, + MXL_HYDRA_PILOTS_ON, + MXL_HYDRA_PILOTS_AUTO +}; + +enum MXL_HYDRA_CONSTELLATION_SRC_E { + MXL_HYDRA_FORMATTER = 0, + MXL_HYDRA_LEGACY_FEC, + MXL_HYDRA_FREQ_RECOVERY, + MXL_HYDRA_NBC, + MXL_HYDRA_CTL, + MXL_HYDRA_EQ, +}; + +struct MXL_HYDRA_DEMOD_LOCK_T { + int agc_lock; /* AGC lock info */ + int fec_lock; /* Demod FEC block lock info */ +}; + +struct MXL_HYDRA_DEMOD_STATUS_DVBS_T { + u32 rs_errors; /* RS decoder err counter */ + u32 ber_window; /* Ber Windows */ + u32 ber_count; /* BER count */ + u32 ber_window_iter1; /* Ber Windows - post viterbi */ + u32 ber_count_iter1; /* BER count - post viterbi */ +}; + +struct MXL_HYDRA_DEMOD_STATUS_DSS_T { + u32 rs_errors; /* RS decoder err counter */ + u32 ber_window; /* Ber Windows */ + u32 ber_count; /* BER count */ +}; + +struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T { + u32 crc_errors; /* CRC error counter */ + u32 packet_error_count; /* Number of packet errors */ + u32 total_packets; /* Total packets */ +}; + +struct MXL_HYDRA_DEMOD_STATUS_T { + enum MXL_HYDRA_BCAST_STD_E standard_mask; /* Standard DVB-S, DVB-S2 or DSS */ + + union { + struct MXL_HYDRA_DEMOD_STATUS_DVBS_T demod_status_dvbs; /* DVB-S demod status */ + struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T demod_status_dvbs2; /* DVB-S2 demod status */ + struct MXL_HYDRA_DEMOD_STATUS_DSS_T demod_status_dss; /* DSS demod status */ + } u; +}; + +struct MXL_HYDRA_DEMOD_SIG_OFFSET_INFO_T { + s32 carrier_offset_in_hz; /* CRL offset info */ + s32 symbol_offset_in_symbol; /* SRL offset info */ +}; + +struct MXL_HYDRA_DEMOD_SCRAMBLE_INFO_T { + u8 scramble_sequence[MXL_DEMOD_SCRAMBLE_SEQ_LEN]; /* scramble sequence */ + u32 scramble_code; /* scramble gold code */ +}; + +enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E { + MXL_HYDRA_STEP_SIZE_24_XTAL_102_05KHZ, /* 102.05 KHz for 24 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_24_XTAL_204_10KHZ, /* 204.10 KHz for 24 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_24_XTAL_306_15KHZ, /* 306.15 KHz for 24 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_24_XTAL_408_20KHZ, /* 408.20 KHz for 24 MHz XTAL */ + + MXL_HYDRA_STEP_SIZE_27_XTAL_102_05KHZ, /* 102.05 KHz for 27 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_27_XTAL_204_35KHZ, /* 204.35 KHz for 27 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_27_XTAL_306_52KHZ, /* 306.52 KHz for 27 MHz XTAL */ + MXL_HYDRA_STEP_SIZE_27_XTAL_408_69KHZ, /* 408.69 KHz for 27 MHz XTAL */ +}; + +enum MXL_HYDRA_SPECTRUM_RESOLUTION_E { + MXL_HYDRA_SPECTRUM_RESOLUTION_00_1_DB, /* 0.1 dB */ + MXL_HYDRA_SPECTRUM_RESOLUTION_01_0_DB, /* 1.0 dB */ + MXL_HYDRA_SPECTRUM_RESOLUTION_05_0_DB, /* 5.0 dB */ + MXL_HYDRA_SPECTRUM_RESOLUTION_10_0_DB, /* 10 dB */ +}; + +enum MXL_HYDRA_SPECTRUM_ERROR_CODE_E { + MXL_SPECTRUM_NO_ERROR, + MXL_SPECTRUM_INVALID_PARAMETER, + MXL_SPECTRUM_INVALID_STEP_SIZE, + MXL_SPECTRUM_BW_CANNOT_BE_COVERED, + MXL_SPECTRUM_DEMOD_BUSY, + MXL_SPECTRUM_TUNER_NOT_ENABLED, +}; + +struct MXL_HYDRA_SPECTRUM_REQ_T { + u32 tuner_index; /* TUNER Ctrl: one of MXL58x_TUNER_ID_E */ + u32 demod_index; /* DEMOD Ctrl: one of MXL58x_DEMOD_ID_E */ + enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E step_size_in_khz; + u32 starting_freq_ink_hz; + u32 total_steps; + enum MXL_HYDRA_SPECTRUM_RESOLUTION_E spectrum_division; +}; + +enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E { + MXL_HYDRA_SEARCH_MAX_OFFSET = 0, /* DMD searches for max freq offset (i.e. 5MHz) */ + MXL_HYDRA_SEARCH_BW_PLUS_ROLLOFF, /* DMD searches for BW + ROLLOFF/2 */ +}; + +struct MXL58X_CFG_FREQ_OFF_SEARCH_RANGE_T { + u32 demod_index; + enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E search_type; +}; + +/* there are two slices + * slice0 - TS0, TS1, TS2 & TS3 + * slice1 - TS4, TS5, TS6 & TS7 + */ +#define MXL_HYDRA_TS_SLICE_MAX 2 + +#define MAX_FIXED_PID_NUM 32 + +#define MXL_HYDRA_NCO_CLK 418 /* 418 MHz */ + +#define MXL_HYDRA_MAX_TS_CLOCK 139 /* 139 MHz */ + +#define MXL_HYDRA_TS_FIXED_PID_FILT_SIZE 32 + +#define MXL_HYDRA_SHARED_PID_FILT_SIZE_DEFAULT 33 /* Shared PID filter size in 1-1 mux mode */ +#define MXL_HYDRA_SHARED_PID_FILT_SIZE_2_TO_1 66 /* Shared PID filter size in 2-1 mux mode */ +#define MXL_HYDRA_SHARED_PID_FILT_SIZE_4_TO_1 132 /* Shared PID filter size in 4-1 mux mode */ + +enum MXL_HYDRA_PID_BANK_TYPE_E { + MXL_HYDRA_SOFTWARE_PID_BANK = 0, + MXL_HYDRA_HARDWARE_PID_BANK, +}; + +enum MXL_HYDRA_TS_MUX_MODE_E { + MXL_HYDRA_TS_MUX_PID_REMAP = 0, + MXL_HYDRA_TS_MUX_PREFIX_EXTRA_HEADER = 1, +}; + +enum MXL_HYDRA_TS_MUX_TYPE_E { + MXL_HYDRA_TS_MUX_DISABLE = 0, /* No Mux ( 1 TSIF to 1 TSIF) */ + MXL_HYDRA_TS_MUX_2_TO_1, /* Mux 2 TSIF to 1 TSIF */ + MXL_HYDRA_TS_MUX_4_TO_1, /* Mux 4 TSIF to 1 TSIF */ +}; + +enum MXL_HYDRA_TS_GROUP_E { + MXL_HYDRA_TS_GROUP_0_3 = 0, /* TS group 0 to 3 (TS0, TS1, TS2 & TS3) */ + MXL_HYDRA_TS_GROUP_4_7, /* TS group 0 to 3 (TS4, TS5, TS6 & TS7) */ +}; + +enum MXL_HYDRA_TS_PID_FLT_CTRL_E { + MXL_HYDRA_TS_PIDS_ALLOW_ALL = 0, /* Allow all pids */ + MXL_HYDRA_TS_PIDS_DROP_ALL, /* Drop all pids */ + MXL_HYDRA_TS_INVALIDATE_PID_FILTER, /* Delete current PD filter in the device */ +}; + +enum MXL_HYDRA_TS_PID_TYPE_E { + MXL_HYDRA_TS_PID_FIXED = 0, + MXL_HYDRA_TS_PID_REGULAR, +}; + +struct MXL_HYDRA_TS_PID_T { + u16 original_pid; /* pid from TS */ + u16 remapped_pid; /* remapped pid */ + enum MXL_BOOL_E enable; /* enable or disable pid */ + enum MXL_BOOL_E allow_or_drop; /* allow or drop pid */ + enum MXL_BOOL_E enable_pid_remap; /* enable or disable pid remap */ + u8 bond_id; /* Bond ID in A0 always 0 - Only for 568 Sku */ + u8 dest_id; /* Output port ID for the PID - Only for 568 Sku */ +}; + +struct MXL_HYDRA_TS_MUX_PREFIX_HEADER_T { + enum MXL_BOOL_E enable; + u8 num_byte; + u8 header[12]; +}; + +enum MXL_HYDRA_PID_FILTER_BANK_E { + MXL_HYDRA_PID_BANK_A = 0, + MXL_HYDRA_PID_BANK_B, +}; + +enum MXL_HYDRA_MPEG_DATA_FMT_E { + MXL_HYDRA_MPEG_SERIAL_MSB_1ST = 0, + MXL_HYDRA_MPEG_SERIAL_LSB_1ST, + + MXL_HYDRA_MPEG_SYNC_WIDTH_BIT = 0, + MXL_HYDRA_MPEG_SYNC_WIDTH_BYTE +}; + +enum MXL_HYDRA_MPEG_MODE_E { + MXL_HYDRA_MPEG_MODE_SERIAL_4_WIRE = 0, /* MPEG 4 Wire serial mode */ + MXL_HYDRA_MPEG_MODE_SERIAL_3_WIRE, /* MPEG 3 Wire serial mode */ + MXL_HYDRA_MPEG_MODE_SERIAL_2_WIRE, /* MPEG 2 Wire serial mode */ + MXL_HYDRA_MPEG_MODE_PARALLEL /* MPEG parallel mode - valid only for MxL581 */ +}; + +enum MXL_HYDRA_MPEG_CLK_TYPE_E { + MXL_HYDRA_MPEG_CLK_CONTINUOUS = 0, /* Continuous MPEG clock */ + MXL_HYDRA_MPEG_CLK_GAPPED, /* Gapped (gated) MPEG clock */ +}; + +enum MXL_HYDRA_MPEG_CLK_FMT_E { + MXL_HYDRA_MPEG_ACTIVE_LOW = 0, + MXL_HYDRA_MPEG_ACTIVE_HIGH, + + MXL_HYDRA_MPEG_CLK_NEGATIVE = 0, + MXL_HYDRA_MPEG_CLK_POSITIVE, + + MXL_HYDRA_MPEG_CLK_IN_PHASE = 0, + MXL_HYDRA_MPEG_CLK_INVERTED, +}; + +enum MXL_HYDRA_MPEG_CLK_PHASE_E { + MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_0_DEG = 0, + MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_90_DEG, + MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_180_DEG, + MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_270_DEG +}; + +enum MXL_HYDRA_MPEG_ERR_INDICATION_E { + MXL_HYDRA_MPEG_ERR_REPLACE_SYNC = 0, + MXL_HYDRA_MPEG_ERR_REPLACE_VALID, + MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED +}; + +struct MXL_HYDRA_MPEGOUT_PARAM_T { + int enable; /* Enable or Disable MPEG OUT */ + enum MXL_HYDRA_MPEG_CLK_TYPE_E mpeg_clk_type; /* Continuous or gapped */ + enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_clk_pol; /* MPEG Clk polarity */ + u8 max_mpeg_clk_rate; /* Max MPEG Clk rate (0 - 104 MHz, 139 MHz) */ + enum MXL_HYDRA_MPEG_CLK_PHASE_E mpeg_clk_phase; /* MPEG Clk phase */ + enum MXL_HYDRA_MPEG_DATA_FMT_E lsb_or_msb_first; /* LSB first or MSB first in TS transmission */ + enum MXL_HYDRA_MPEG_DATA_FMT_E mpeg_sync_pulse_width; /* MPEG SYNC pulse width (1-bit or 1-byte) */ + enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_valid_pol; /* MPEG VALID polarity */ + enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_sync_pol; /* MPEG SYNC polarity */ + enum MXL_HYDRA_MPEG_MODE_E mpeg_mode; /* config 4/3/2-wire serial or parallel TS out */ + enum MXL_HYDRA_MPEG_ERR_INDICATION_E mpeg_error_indication; /* Enable or Disable MPEG error indication */ +}; + +enum MXL_HYDRA_EXT_TS_IN_ID_E { + MXL_HYDRA_EXT_TS_IN_0 = 0, + MXL_HYDRA_EXT_TS_IN_1, + MXL_HYDRA_EXT_TS_IN_2, + MXL_HYDRA_EXT_TS_IN_3, + MXL_HYDRA_EXT_TS_IN_MAX +}; + +enum MXL_HYDRA_TS_OUT_ID_E { + MXL_HYDRA_TS_OUT_0 = 0, + MXL_HYDRA_TS_OUT_1, + MXL_HYDRA_TS_OUT_2, + MXL_HYDRA_TS_OUT_3, + MXL_HYDRA_TS_OUT_4, + MXL_HYDRA_TS_OUT_5, + MXL_HYDRA_TS_OUT_6, + MXL_HYDRA_TS_OUT_7, + MXL_HYDRA_TS_OUT_MAX +}; + +enum MXL_HYDRA_TS_DRIVE_STRENGTH_E { + MXL_HYDRA_TS_DRIVE_STRENGTH_1X = 0, + MXL_HYDRA_TS_DRIVE_STRENGTH_2X, + MXL_HYDRA_TS_DRIVE_STRENGTH_3X, + MXL_HYDRA_TS_DRIVE_STRENGTH_4X, + MXL_HYDRA_TS_DRIVE_STRENGTH_5X, + MXL_HYDRA_TS_DRIVE_STRENGTH_6X, + MXL_HYDRA_TS_DRIVE_STRENGTH_7X, + MXL_HYDRA_TS_DRIVE_STRENGTH_8X +}; + +enum MXL_HYDRA_DEVICE_E { + MXL_HYDRA_DEVICE_581 = 0, + MXL_HYDRA_DEVICE_584, + MXL_HYDRA_DEVICE_585, + MXL_HYDRA_DEVICE_544, + MXL_HYDRA_DEVICE_561, + MXL_HYDRA_DEVICE_TEST, + MXL_HYDRA_DEVICE_582, + MXL_HYDRA_DEVICE_541, + MXL_HYDRA_DEVICE_568, + MXL_HYDRA_DEVICE_542, + MXL_HYDRA_DEVICE_541S, + MXL_HYDRA_DEVICE_561S, + MXL_HYDRA_DEVICE_581S, + MXL_HYDRA_DEVICE_MAX +}; + +/* Demod IQ data */ +struct MXL_HYDRA_DEMOD_IQ_SRC_T { + u32 demod_id; + u32 source_of_iq; /* == 0, it means I/Q comes from Formatter + * == 1, Legacy FEC + * == 2, Frequency Recovery + * == 3, NBC + * == 4, CTL + * == 5, EQ + * == 6, FPGA + */ +}; + +struct MXL_HYDRA_DEMOD_ABORT_TUNE_T { + u32 demod_id; +}; + +struct MXL_HYDRA_TUNER_CMD { + u8 tuner_id; + u8 enable; +}; + +/* Demod Para for Channel Tune */ +struct MXL_HYDRA_DEMOD_PARAM_T { + u32 tuner_index; + u32 demod_index; + u32 frequency_in_hz; /* Frequency */ + u32 standard; /* one of MXL_HYDRA_BCAST_STD_E */ + u32 spectrum_inversion; /* Input : Spectrum inversion. */ + u32 roll_off; /* rollOff (alpha) factor */ + u32 symbol_rate_in_hz; /* Symbol rate */ + u32 pilots; /* TRUE = pilots enabled */ + u32 modulation_scheme; /* Input : Modulation Scheme is one of MXL_HYDRA_MODULATION_E */ + u32 fec_code_rate; /* Input : Forward error correction rate. Is one of MXL_HYDRA_FEC_E */ + u32 max_carrier_offset_in_mhz; /* Maximum carrier freq offset in MHz. Same as freqSearchRangeKHz, but in unit of MHz. */ +}; + +struct MXL_HYDRA_DEMOD_SCRAMBLE_CODE_T { + u32 demod_index; + u8 scramble_sequence[12]; /* scramble sequence */ + u32 scramble_code; /* scramble gold code */ +}; + +struct MXL_INTR_CFG_T { + u32 intr_type; + u32 intr_duration_in_nano_secs; + u32 intr_mask; +}; + +struct MXL_HYDRA_POWER_MODE_CMD { + u8 power_mode; /* enumeration values are defined in MXL_HYDRA_PWR_MODE_E (device API.h) */ +}; + +struct MXL_HYDRA_RF_WAKEUP_PARAM_T { + u32 time_interval_in_seconds; /* in seconds */ + u32 tuner_index; + s32 rssi_threshold; +}; + +struct MXL_HYDRA_RF_WAKEUP_CFG_T { + u32 tuner_count; + struct MXL_HYDRA_RF_WAKEUP_PARAM_T params; +}; + +enum MXL_HYDRA_AUX_CTRL_MODE_E { + MXL_HYDRA_AUX_CTRL_MODE_FSK = 0, /* Select FSK controller */ + MXL_HYDRA_AUX_CTRL_MODE_DISEQC, /* Select DiSEqC controller */ +}; + +enum MXL_HYDRA_DISEQC_OPMODE_E { + MXL_HYDRA_DISEQC_ENVELOPE_MODE = 0, + MXL_HYDRA_DISEQC_TONE_MODE, +}; + +enum MXL_HYDRA_DISEQC_VER_E { + MXL_HYDRA_DISEQC_1_X = 0, /* Config DiSEqC 1.x mode */ + MXL_HYDRA_DISEQC_2_X, /* Config DiSEqC 2.x mode */ + MXL_HYDRA_DISEQC_DISABLE /* Disable DiSEqC */ +}; + +enum MXL_HYDRA_DISEQC_CARRIER_FREQ_E { + MXL_HYDRA_DISEQC_CARRIER_FREQ_22KHZ = 0, /* DiSEqC signal frequency of 22 KHz */ + MXL_HYDRA_DISEQC_CARRIER_FREQ_33KHZ, /* DiSEqC signal frequency of 33 KHz */ + MXL_HYDRA_DISEQC_CARRIER_FREQ_44KHZ /* DiSEqC signal frequency of 44 KHz */ +}; + +enum MXL_HYDRA_DISEQC_ID_E { + MXL_HYDRA_DISEQC_ID_0 = 0, + MXL_HYDRA_DISEQC_ID_1, + MXL_HYDRA_DISEQC_ID_2, + MXL_HYDRA_DISEQC_ID_3 +}; + +enum MXL_HYDRA_FSK_OP_MODE_E { + MXL_HYDRA_FSK_CFG_TYPE_39KPBS = 0, /* 39.0kbps */ + MXL_HYDRA_FSK_CFG_TYPE_39_017KPBS, /* 39.017kbps */ + MXL_HYDRA_FSK_CFG_TYPE_115_2KPBS /* 115.2kbps */ +}; + +struct MXL58X_DSQ_OP_MODE_T { + u32 diseqc_id; /* DSQ 0, 1, 2 or 3 */ + u32 op_mode; /* Envelope mode (0) or internal tone mode (1) */ + u32 version; /* 0: 1.0, 1: 1.1, 2: Disable */ + u32 center_freq; /* 0: 22KHz, 1: 33KHz and 2: 44 KHz */ +}; + +struct MXL_HYDRA_DISEQC_CFG_CONT_TONE_T { + u32 diseqc_id; + u32 cont_tone_flag; /* 1: Enable , 0: Disable */ +}; diff --git a/drivers/media/dvb-frontends/mxl5xx_regs.h b/drivers/media/dvb-frontends/mxl5xx_regs.h new file mode 100644 index 000000000000..5001dafe1ba8 --- /dev/null +++ b/drivers/media/dvb-frontends/mxl5xx_regs.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved + * + * License type: GPLv2 + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + * + * This program may alternatively be licensed under a proprietary license from + * MaxLinear, Inc. + * + */ + +#ifndef __MXL58X_REGISTERS_H__ +#define __MXL58X_REGISTERS_H__ + +#define HYDRA_INTR_STATUS_REG 0x80030008 +#define HYDRA_INTR_MASK_REG 0x8003000C + +#define HYDRA_CRYSTAL_SETTING 0x3FFFC5F0 /* 0 - 24 MHz & 1 - 27 MHz */ +#define HYDRA_CRYSTAL_CAP 0x3FFFEDA4 /* 0 - 24 MHz & 1 - 27 MHz */ + +#define HYDRA_CPU_RESET_REG 0x8003003C +#define HYDRA_CPU_RESET_DATA 0x00000400 + +#define HYDRA_RESET_TRANSPORT_FIFO_REG 0x80030028 +#define HYDRA_RESET_TRANSPORT_FIFO_DATA 0x00000000 + +#define HYDRA_RESET_BBAND_REG 0x80030024 +#define HYDRA_RESET_BBAND_DATA 0x00000000 + +#define HYDRA_RESET_XBAR_REG 0x80030020 +#define HYDRA_RESET_XBAR_DATA 0x00000000 + +#define HYDRA_MODULES_CLK_1_REG 0x80030014 +#define HYDRA_DISABLE_CLK_1 0x00000000 + +#define HYDRA_MODULES_CLK_2_REG 0x8003001C +#define HYDRA_DISABLE_CLK_2 0x0000000B + +#define HYDRA_PRCM_ROOT_CLK_REG 0x80030018 +#define HYDRA_PRCM_ROOT_CLK_DISABLE 0x00000000 + +#define HYDRA_CPU_RESET_CHECK_REG 0x80030008 +#define HYDRA_CPU_RESET_CHECK_OFFSET 0x40000000 /* */ + +#define HYDRA_SKU_ID_REG 0x90000190 + +#define FW_DL_SIGN_ADDR 0x3FFFEAE0 + +/* Register to check if FW is running or not */ +#define HYDRA_HEAR_BEAT 0x3FFFEDDC + +/* Firmware version */ +#define HYDRA_FIRMWARE_VERSION 0x3FFFEDB8 +#define HYDRA_FW_RC_VERSION 0x3FFFCFAC + +/* Firmware patch version */ +#define HYDRA_FIRMWARE_PATCH_VERSION 0x3FFFEDC2 + +/* SOC operating temperature in C */ +#define HYDRA_TEMPARATURE 0x3FFFEDB4 + +/* Demod & Tuner status registers */ +/* Demod 0 status base address */ +#define HYDRA_DEMOD_0_BASE_ADDR 0x3FFFC64C + +/* Tuner 0 status base address */ +#define HYDRA_TUNER_0_BASE_ADDR 0x3FFFCE4C + +#define POWER_FROM_ADCRSSI_READBACK 0x3FFFEB6C + +/* Macros to determine base address of respective demod or tuner */ +#define HYDRA_DMD_STATUS_OFFSET(demodID) ((demodID) * 0x100) +#define HYDRA_TUNER_STATUS_OFFSET(tunerID) ((tunerID) * 0x40) + +/* Demod status address offset from respective demod's base address */ +#define HYDRA_DMD_AGC_DIG_LEVEL_ADDR_OFFSET 0x3FFFC64C +#define HYDRA_DMD_LOCK_STATUS_ADDR_OFFSET 0x3FFFC650 +#define HYDRA_DMD_ACQ_STATUS_ADDR_OFFSET 0x3FFFC654 + +#define HYDRA_DMD_STANDARD_ADDR_OFFSET 0x3FFFC658 +#define HYDRA_DMD_SPECTRUM_INVERSION_ADDR_OFFSET 0x3FFFC65C +#define HYDRA_DMD_SPECTRUM_ROLL_OFF_ADDR_OFFSET 0x3FFFC660 +#define HYDRA_DMD_SYMBOL_RATE_ADDR_OFFSET 0x3FFFC664 +#define HYDRA_DMD_MODULATION_SCHEME_ADDR_OFFSET 0x3FFFC668 +#define HYDRA_DMD_FEC_CODE_RATE_ADDR_OFFSET 0x3FFFC66C + +#define HYDRA_DMD_SNR_ADDR_OFFSET 0x3FFFC670 +#define HYDRA_DMD_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC674 +#define HYDRA_DMD_CTL_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC678 +#define HYDRA_DMD_STR_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC67C +#define HYDRA_DMD_FTL_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC680 +#define HYDRA_DMD_STR_NBC_SYNC_LOCK_ADDR_OFFSET 0x3FFFC684 +#define HYDRA_DMD_CYCLE_SLIP_COUNT_ADDR_OFFSET 0x3FFFC688 + +#define HYDRA_DMD_DISPLAY_I_ADDR_OFFSET 0x3FFFC68C +#define HYDRA_DMD_DISPLAY_Q_ADDR_OFFSET 0x3FFFC68E + +#define HYDRA_DMD_DVBS2_CRC_ERRORS_ADDR_OFFSET 0x3FFFC690 +#define HYDRA_DMD_DVBS2_PER_COUNT_ADDR_OFFSET 0x3FFFC694 +#define HYDRA_DMD_DVBS2_PER_WINDOW_ADDR_OFFSET 0x3FFFC698 + +#define HYDRA_DMD_DVBS_CORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC69C +#define HYDRA_DMD_DVBS_UNCORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6A0 +#define HYDRA_DMD_DVBS_BER_COUNT_ADDR_OFFSET 0x3FFFC6A4 +#define HYDRA_DMD_DVBS_BER_WINDOW_ADDR_OFFSET 0x3FFFC6A8 + +/* Debug-purpose DVB-S DMD 0 */ +#define HYDRA_DMD_DVBS_1ST_CORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6C8 /* corrected RS Errors: 1st iteration */ +#define HYDRA_DMD_DVBS_1ST_UNCORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6CC /* uncorrected RS Errors: 1st iteration */ +#define HYDRA_DMD_DVBS_BER_COUNT_1ST_ADDR_OFFSET 0x3FFFC6D0 +#define HYDRA_DMD_DVBS_BER_WINDOW_1ST_ADDR_OFFSET 0x3FFFC6D4 + +#define HYDRA_DMD_TUNER_ID_ADDR_OFFSET 0x3FFFC6AC +#define HYDRA_DMD_DVBS2_PILOT_ON_OFF_ADDR_OFFSET 0x3FFFC6B0 +#define HYDRA_DMD_FREQ_SEARCH_RANGE_KHZ_ADDR_OFFSET 0x3FFFC6B4 +#define HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET 0x3FFFC6B8 +#define HYDRA_DMD_STATUS_CENTER_FREQ_IN_KHZ_ADDR 0x3FFFC704 +#define HYDRA_DMD_STATUS_INPUT_POWER_ADDR 0x3FFFC708 + +/* DVB-S new scaled_BER_count for a new BER API, see HYDRA-1343 "DVB-S post viterbi information" */ +#define DMD0_STATUS_DVBS_1ST_SCALED_BER_COUNT_ADDR 0x3FFFC710 /* DMD 0: 1st iteration BER count scaled by HYDRA_BER_COUNT_SCALING_FACTOR */ +#define DMD0_STATUS_DVBS_SCALED_BER_COUNT_ADDR 0x3FFFC714 /* DMD 0: 2nd iteration BER count scaled by HYDRA_BER_COUNT_SCALING_FACTOR */ + +#define DMD0_SPECTRUM_MIN_GAIN_STATUS 0x3FFFC73C +#define DMD0_SPECTRUM_MIN_GAIN_WB_SAGC_VALUE 0x3FFFC740 +#define DMD0_SPECTRUM_MIN_GAIN_NB_SAGC_VALUE 0x3FFFC744 + +#define HYDRA_DMD_STATUS_END_ADDR_OFFSET 0x3FFFC748 + +/* Tuner status address offset from respective tuners's base address */ +#define HYDRA_TUNER_DEMOD_ID_ADDR_OFFSET 0x3FFFCE4C +#define HYDRA_TUNER_AGC_LOCK_OFFSET 0x3FFFCE50 +#define HYDRA_TUNER_SPECTRUM_STATUS_OFFSET 0x3FFFCE54 +#define HYDRA_TUNER_SPECTRUM_BIN_SIZE_OFFSET 0x3FFFCE58 +#define HYDRA_TUNER_SPECTRUM_ADDRESS_OFFSET 0x3FFFCE5C +#define HYDRA_TUNER_ENABLE_COMPLETE 0x3FFFEB78 + +#define HYDRA_DEMOD_STATUS_LOCK(devId, demodId) write_register(devId, (HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET + HYDRA_DMD_STATUS_OFFSET(demodId)), MXL_YES) +#define HYDRA_DEMOD_STATUS_UNLOCK(devId, demodId) write_register(devId, (HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET + HYDRA_DMD_STATUS_OFFSET(demodId)), MXL_NO) + +#define HYDRA_VERSION 0x3FFFEDB8 +#define HYDRA_DEMOD0_VERSION 0x3FFFEDBC +#define HYDRA_DEMOD1_VERSION 0x3FFFEDC0 +#define HYDRA_DEMOD2_VERSION 0x3FFFEDC4 +#define HYDRA_DEMOD3_VERSION 0x3FFFEDC8 +#define HYDRA_DEMOD4_VERSION 0x3FFFEDCC +#define HYDRA_DEMOD5_VERSION 0x3FFFEDD0 +#define HYDRA_DEMOD6_VERSION 0x3FFFEDD4 +#define HYDRA_DEMOD7_VERSION 0x3FFFEDD8 +#define HYDRA_HEAR_BEAT 0x3FFFEDDC +#define HYDRA_SKU_MGMT 0x3FFFEBC0 + +#define MXL_HYDRA_FPGA_A_ADDRESS 0x91C00000 +#define MXL_HYDRA_FPGA_B_ADDRESS 0x91D00000 + +/* TS control base address */ +#define HYDRA_TS_CTRL_BASE_ADDR 0x90700000 + +#define MPEG_MUX_MODE_SLICE0_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x08) + +#define MPEG_MUX_MODE_SLICE1_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x08) + +#define PID_BANK_SEL_SLICE0_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190) +#define PID_BANK_SEL_SLICE1_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0) + +#define MPEG_CLK_GATED_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x20) + +#define MPEG_CLK_ALWAYS_ON_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1D4) + +#define HYDRA_REGULAR_PID_BANK_A_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190) + +#define HYDRA_FIXED_PID_BANK_A_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190) + +#define HYDRA_REGULAR_PID_BANK_B_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0) + +#define HYDRA_FIXED_PID_BANK_B_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0) + +#define FIXED_PID_TBL_REG_ADDRESS_0 (HYDRA_TS_CTRL_BASE_ADDR + 0x9000) +#define FIXED_PID_TBL_REG_ADDRESS_1 (HYDRA_TS_CTRL_BASE_ADDR + 0x9100) +#define FIXED_PID_TBL_REG_ADDRESS_2 (HYDRA_TS_CTRL_BASE_ADDR + 0x9200) +#define FIXED_PID_TBL_REG_ADDRESS_3 (HYDRA_TS_CTRL_BASE_ADDR + 0x9300) + +#define FIXED_PID_TBL_REG_ADDRESS_4 (HYDRA_TS_CTRL_BASE_ADDR + 0xB000) +#define FIXED_PID_TBL_REG_ADDRESS_5 (HYDRA_TS_CTRL_BASE_ADDR + 0xB100) +#define FIXED_PID_TBL_REG_ADDRESS_6 (HYDRA_TS_CTRL_BASE_ADDR + 0xB200) +#define FIXED_PID_TBL_REG_ADDRESS_7 (HYDRA_TS_CTRL_BASE_ADDR + 0xB300) + +#define REGULAR_PID_TBL_REG_ADDRESS_0 (HYDRA_TS_CTRL_BASE_ADDR + 0x8000) +#define REGULAR_PID_TBL_REG_ADDRESS_1 (HYDRA_TS_CTRL_BASE_ADDR + 0x8200) +#define REGULAR_PID_TBL_REG_ADDRESS_2 (HYDRA_TS_CTRL_BASE_ADDR + 0x8400) +#define REGULAR_PID_TBL_REG_ADDRESS_3 (HYDRA_TS_CTRL_BASE_ADDR + 0x8600) + +#define REGULAR_PID_TBL_REG_ADDRESS_4 (HYDRA_TS_CTRL_BASE_ADDR + 0xA000) +#define REGULAR_PID_TBL_REG_ADDRESS_5 (HYDRA_TS_CTRL_BASE_ADDR + 0xA200) +#define REGULAR_PID_TBL_REG_ADDRESS_6 (HYDRA_TS_CTRL_BASE_ADDR + 0xA400) +#define REGULAR_PID_TBL_REG_ADDRESS_7 (HYDRA_TS_CTRL_BASE_ADDR + 0xA600) + +/***************************************************************************/ + +#define PAD_MUX_GPIO_00_SYNC_BASEADDR 0x90000188 + + +#define PAD_MUX_UART_RX_C_PINMUX_BASEADDR 0x9000001C + +#define XPT_PACKET_GAP_MIN_BASEADDR 0x90700044 +#define XPT_NCO_COUNT_BASEADDR 0x90700238 + +#define XPT_NCO_COUNT_BASEADDR1 0x9070023C + +/* V2 DigRF status register */ + +#define XPT_PID_BASEADDR 0x90708000 + +#define XPT_PID_REMAP_BASEADDR 0x90708004 + +#define XPT_KNOWN_PID_BASEADDR 0x90709000 + +#define XPT_PID_BASEADDR1 0x9070A000 + +#define XPT_PID_REMAP_BASEADDR1 0x9070A004 + +#define XPT_KNOWN_PID_BASEADDR1 0x9070B000 + +#define XPT_BERT_LOCK_BASEADDR 0x907000B8 + +#define XPT_BERT_BASEADDR 0x907000BC + +#define XPT_BERT_INVERT_BASEADDR 0x907000C0 + +#define XPT_BERT_HEADER_BASEADDR 0x907000C4 + +#define XPT_BERT_BASEADDR1 0x907000C8 + +#define XPT_BERT_BIT_COUNT0_BASEADDR 0x907000CC + +#define XPT_BERT_BIT_COUNT0_BASEADDR1 0x907000D0 + +#define XPT_BERT_BIT_COUNT1_BASEADDR 0x907000D4 + +#define XPT_BERT_BIT_COUNT1_BASEADDR1 0x907000D8 + +#define XPT_BERT_BIT_COUNT2_BASEADDR 0x907000DC + +#define XPT_BERT_BIT_COUNT2_BASEADDR1 0x907000E0 + +#define XPT_BERT_BIT_COUNT3_BASEADDR 0x907000E4 + +#define XPT_BERT_BIT_COUNT3_BASEADDR1 0x907000E8 + +#define XPT_BERT_BIT_COUNT4_BASEADDR 0x907000EC + +#define XPT_BERT_BIT_COUNT4_BASEADDR1 0x907000F0 + +#define XPT_BERT_BIT_COUNT5_BASEADDR 0x907000F4 + +#define XPT_BERT_BIT_COUNT5_BASEADDR1 0x907000F8 + +#define XPT_BERT_BIT_COUNT6_BASEADDR 0x907000FC + +#define XPT_BERT_BIT_COUNT6_BASEADDR1 0x90700100 + +#define XPT_BERT_BIT_COUNT7_BASEADDR 0x90700104 + +#define XPT_BERT_BIT_COUNT7_BASEADDR1 0x90700108 + +#define XPT_BERT_ERR_COUNT0_BASEADDR 0x9070010C + +#define XPT_BERT_ERR_COUNT0_BASEADDR1 0x90700110 + +#define XPT_BERT_ERR_COUNT1_BASEADDR 0x90700114 + +#define XPT_BERT_ERR_COUNT1_BASEADDR1 0x90700118 + +#define XPT_BERT_ERR_COUNT2_BASEADDR 0x9070011C + +#define XPT_BERT_ERR_COUNT2_BASEADDR1 0x90700120 + +#define XPT_BERT_ERR_COUNT3_BASEADDR 0x90700124 + +#define XPT_BERT_ERR_COUNT3_BASEADDR1 0x90700128 + +#define XPT_BERT_ERR_COUNT4_BASEADDR 0x9070012C + +#define XPT_BERT_ERR_COUNT4_BASEADDR1 0x90700130 + +#define XPT_BERT_ERR_COUNT5_BASEADDR 0x90700134 + +#define XPT_BERT_ERR_COUNT5_BASEADDR1 0x90700138 + +#define XPT_BERT_ERR_COUNT6_BASEADDR 0x9070013C + +#define XPT_BERT_ERR_COUNT6_BASEADDR1 0x90700140 + +#define XPT_BERT_ERR_COUNT7_BASEADDR 0x90700144 + +#define XPT_BERT_ERR_COUNT7_BASEADDR1 0x90700148 + +#define XPT_BERT_ERROR_BASEADDR 0x9070014C + +#define XPT_BERT_ANALYZER_BASEADDR 0x90700150 + +#define XPT_BERT_ANALYZER_BASEADDR1 0x90700154 + +#define XPT_BERT_ANALYZER_BASEADDR2 0x90700158 + +#define XPT_BERT_ANALYZER_BASEADDR3 0x9070015C + +#define XPT_BERT_ANALYZER_BASEADDR4 0x90700160 + +#define XPT_BERT_ANALYZER_BASEADDR5 0x90700164 + +#define XPT_BERT_ANALYZER_BASEADDR6 0x90700168 + +#define XPT_BERT_ANALYZER_BASEADDR7 0x9070016C + +#define XPT_BERT_ANALYZER_BASEADDR8 0x90700170 + +#define XPT_BERT_ANALYZER_BASEADDR9 0x90700174 + +#define XPT_DMD0_BASEADDR 0x9070024C + +/* V2 AGC Gain Freeze & step */ +#define DBG_ENABLE_DISABLE_AGC (0x3FFFCF60) /* 1: DISABLE, 0:ENABLE */ +#define WB_DFE0_DFE_FB_RF1_BASEADDR 0x903004A4 + +#define WB_DFE1_DFE_FB_RF1_BASEADDR 0x904004A4 + +#define WB_DFE2_DFE_FB_RF1_BASEADDR 0x905004A4 + +#define WB_DFE3_DFE_FB_RF1_BASEADDR 0x906004A4 + +#define AFE_REG_D2A_TA_RFFE_LNA_BO_1P8_BASEADDR 0x90200104 + +#define AFE_REG_AFE_REG_SPARE_BASEADDR 0x902000A0 + +#define AFE_REG_AFE_REG_SPARE_BASEADDR1 0x902000B4 + +#define AFE_REG_AFE_REG_SPARE_BASEADDR2 0x902000C4 + +#define AFE_REG_AFE_REG_SPARE_BASEADDR3 0x902000D4 + +#define WB_DFE0_DFE_FB_AGC_BASEADDR 0x90300498 + +#define WB_DFE1_DFE_FB_AGC_BASEADDR 0x90400498 + +#define WB_DFE2_DFE_FB_AGC_BASEADDR 0x90500498 + +#define WB_DFE3_DFE_FB_AGC_BASEADDR 0x90600498 + +#define WDT_WD_INT_BASEADDR 0x8002000C + +#define FSK_TX_FTM_BASEADDR 0x80090000 + +#define FSK_TX_FTM_TX_CNT_BASEADDR 0x80090018 + +#define AFE_REG_D2A_FSK_BIAS_BASEADDR 0x90200040 + +#define DMD_TEI_BASEADDR 0x3FFFEBE0 + +#endif /* __MXL58X_REGISTERS_H__ */ diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c index cba9bff05b12..fd427a29c001 100644 --- a/drivers/media/dvb-frontends/s5h1420.c +++ b/drivers/media/dvb-frontends/s5h1420.c @@ -864,7 +864,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO; } -static struct i2c_algorithm s5h1420_tuner_i2c_algo = { +static const struct i2c_algorithm s5h1420_tuner_i2c_algo = { .master_xfer = s5h1420_tuner_i2c_tuner_xfer, .functionality = s5h1420_tuner_i2c_func, }; diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 8ac0f598978d..f3529df8211d 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -2149,6 +2149,71 @@ static u32 stv0367cab_GetSymbolRate(struct stv0367_state *state, u32 mclk_hz) return regsym; } +static u32 stv0367cab_fsm_status(struct stv0367_state *state) +{ + return stv0367_readbits(state, F367CAB_FSM_STATUS); +} + +static u32 stv0367cab_qamfec_lock(struct stv0367_state *state) +{ + return stv0367_readbits(state, + (state->cab_state->qamfec_status_reg ? + state->cab_state->qamfec_status_reg : + F367CAB_QAMFEC_LOCK)); +} + +static +enum stv0367_cab_signal_type stv0367cab_fsm_signaltype(u32 qam_fsm_status) +{ + enum stv0367_cab_signal_type signaltype = FE_CAB_NOAGC; + + switch (qam_fsm_status) { + case 1: + signaltype = FE_CAB_NOAGC; + break; + case 2: + signaltype = FE_CAB_NOTIMING; + break; + case 3: + signaltype = FE_CAB_TIMINGOK; + break; + case 4: + signaltype = FE_CAB_NOCARRIER; + break; + case 5: + signaltype = FE_CAB_CARRIEROK; + break; + case 7: + signaltype = FE_CAB_NOBLIND; + break; + case 8: + signaltype = FE_CAB_BLINDOK; + break; + case 10: + signaltype = FE_CAB_NODEMOD; + break; + case 11: + signaltype = FE_CAB_DEMODOK; + break; + case 12: + signaltype = FE_CAB_DEMODOK; + break; + case 13: + signaltype = FE_CAB_NODEMOD; + break; + case 14: + signaltype = FE_CAB_NOBLIND; + break; + case 15: + signaltype = FE_CAB_NOSIGNAL; + break; + default: + break; + } + + return signaltype; +} + static int stv0367cab_read_status(struct dvb_frontend *fe, enum fe_status *status) { @@ -2158,22 +2223,26 @@ static int stv0367cab_read_status(struct dvb_frontend *fe, *status = 0; - if (state->cab_state->state > FE_CAB_NOSIGNAL) - *status |= FE_HAS_SIGNAL; + /* update cab_state->state from QAM_FSM_STATUS */ + state->cab_state->state = stv0367cab_fsm_signaltype( + stv0367cab_fsm_status(state)); - if (state->cab_state->state > FE_CAB_NOCARRIER) - *status |= FE_HAS_CARRIER; - - if (state->cab_state->state >= FE_CAB_DEMODOK) - *status |= FE_HAS_VITERBI; - - if (state->cab_state->state >= FE_CAB_DATAOK) - *status |= FE_HAS_SYNC; - - if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? - state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { - *status |= FE_HAS_LOCK; + if (stv0367cab_qamfec_lock(state)) { + *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI + | FE_HAS_SYNC | FE_HAS_LOCK; dprintk("%s: stv0367 has locked\n", __func__); + } else { + if (state->cab_state->state > FE_CAB_NOSIGNAL) + *status |= FE_HAS_SIGNAL; + + if (state->cab_state->state > FE_CAB_NOCARRIER) + *status |= FE_HAS_CARRIER; + + if (state->cab_state->state >= FE_CAB_DEMODOK) + *status |= FE_HAS_VITERBI; + + if (state->cab_state->state >= FE_CAB_DATAOK) + *status |= FE_HAS_SYNC; } return 0; @@ -2374,7 +2443,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state, LockTime = 0; stv0367_writereg(state, R367CAB_CTRL_1, 0x00); do { - QAM_Lock = stv0367_readbits(state, F367CAB_FSM_STATUS); + QAM_Lock = stv0367cab_fsm_status(state); if ((LockTime >= (DemodTimeOut - EQLTimeOut)) && (QAM_Lock == 0x04)) /* @@ -2435,10 +2504,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state, do { usleep_range(5000, 7000); LockTime += 5; - QAMFEC_Lock = stv0367_readbits(state, - (state->cab_state->qamfec_status_reg ? - state->cab_state->qamfec_status_reg : - F367CAB_QAMFEC_LOCK)); + QAMFEC_Lock = stv0367cab_qamfec_lock(state); } while (!QAMFEC_Lock && (LockTime < FECTimeOut)); } else QAMFEC_Lock = 0; @@ -2474,52 +2540,8 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state, cab_state->locked = 1; /* stv0367_setbits(state, F367CAB_AGC_ACCUMRSTSEL,7);*/ - } else { - switch (QAM_Lock) { - case 1: - signalType = FE_CAB_NOAGC; - break; - case 2: - signalType = FE_CAB_NOTIMING; - break; - case 3: - signalType = FE_CAB_TIMINGOK; - break; - case 4: - signalType = FE_CAB_NOCARRIER; - break; - case 5: - signalType = FE_CAB_CARRIEROK; - break; - case 7: - signalType = FE_CAB_NOBLIND; - break; - case 8: - signalType = FE_CAB_BLINDOK; - break; - case 10: - signalType = FE_CAB_NODEMOD; - break; - case 11: - signalType = FE_CAB_DEMODOK; - break; - case 12: - signalType = FE_CAB_DEMODOK; - break; - case 13: - signalType = FE_CAB_NODEMOD; - break; - case 14: - signalType = FE_CAB_NOBLIND; - break; - case 15: - signalType = FE_CAB_NOSIGNAL; - break; - default: - break; - } - - } + } else + signalType = stv0367cab_fsm_signaltype(QAM_Lock); /* Set the AGC control values to tracking values */ stv0367_writebits(state, F367CAB_AGC_ACCUMRSTSEL, TrackAGCAccum); @@ -3090,7 +3112,7 @@ static int stv0367ddb_read_status(struct dvb_frontend *fe, { struct stv0367_state *state = fe->demodulator_priv; struct dtv_frontend_properties *p = &fe->dtv_property_cache; - int ret; + int ret = 0; switch (state->activedemod) { case demod_ter: @@ -3100,7 +3122,7 @@ static int stv0367ddb_read_status(struct dvb_frontend *fe, ret = stv0367cab_read_status(fe, status); break; default: - return 0; + break; } /* stop and report on *_read_status failure */ @@ -3138,7 +3160,7 @@ static int stv0367ddb_get_frontend(struct dvb_frontend *fe, break; } - return -EINVAL; + return 0; } static int stv0367ddb_sleep(struct dvb_frontend *fe) @@ -3261,7 +3283,7 @@ static const struct dvb_frontend_ops stv0367ddb_ops = { 0x400 |/* FE_CAN_QAM_4 */ FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | - FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | + FE_CAN_QAM_256 | /* DVB-T */ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c new file mode 100644 index 000000000000..8bf855c301f5 --- /dev/null +++ b/drivers/media/dvb-frontends/stv0910.c @@ -0,0 +1,1813 @@ +/* + * Driver for the ST STV0910 DVB-S/S2 demodulator. + * + * Copyright (C) 2014-2015 Ralph Metzler + * Marcus Metzler + * developed for Digital Devices GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dvb_math.h" +#include "dvb_frontend.h" +#include "stv0910.h" +#include "stv0910_regs.h" + +#define EXT_CLOCK 30000000 +#define TUNING_DELAY 200 +#define BER_SRC_S 0x20 +#define BER_SRC_S2 0x20 + +static LIST_HEAD(stvlist); + +enum receive_mode { RCVMODE_NONE, RCVMODE_DVBS, RCVMODE_DVBS2, RCVMODE_AUTO }; + +enum dvbs2_fectype { DVBS2_64K, DVBS2_16K }; + +enum dvbs2_mod_cod { + DVBS2_DUMMY_PLF, DVBS2_QPSK_1_4, DVBS2_QPSK_1_3, DVBS2_QPSK_2_5, + DVBS2_QPSK_1_2, DVBS2_QPSK_3_5, DVBS2_QPSK_2_3, DVBS2_QPSK_3_4, + DVBS2_QPSK_4_5, DVBS2_QPSK_5_6, DVBS2_QPSK_8_9, DVBS2_QPSK_9_10, + DVBS2_8PSK_3_5, DVBS2_8PSK_2_3, DVBS2_8PSK_3_4, DVBS2_8PSK_5_6, + DVBS2_8PSK_8_9, DVBS2_8PSK_9_10, DVBS2_16APSK_2_3, DVBS2_16APSK_3_4, + DVBS2_16APSK_4_5, DVBS2_16APSK_5_6, DVBS2_16APSK_8_9, DVBS2_16APSK_9_10, + DVBS2_32APSK_3_4, DVBS2_32APSK_4_5, DVBS2_32APSK_5_6, DVBS2_32APSK_8_9, + DVBS2_32APSK_9_10 +}; + +enum fe_stv0910_mod_cod { + FE_DUMMY_PLF, FE_QPSK_14, FE_QPSK_13, FE_QPSK_25, + FE_QPSK_12, FE_QPSK_35, FE_QPSK_23, FE_QPSK_34, + FE_QPSK_45, FE_QPSK_56, FE_QPSK_89, FE_QPSK_910, + FE_8PSK_35, FE_8PSK_23, FE_8PSK_34, FE_8PSK_56, + FE_8PSK_89, FE_8PSK_910, FE_16APSK_23, FE_16APSK_34, + FE_16APSK_45, FE_16APSK_56, FE_16APSK_89, FE_16APSK_910, + FE_32APSK_34, FE_32APSK_45, FE_32APSK_56, FE_32APSK_89, + FE_32APSK_910 +}; + +enum fe_stv0910_roll_off { FE_SAT_35, FE_SAT_25, FE_SAT_20, FE_SAT_15 }; + +static inline u32 muldiv32(u32 a, u32 b, u32 c) +{ + u64 tmp64; + + tmp64 = (u64)a * (u64)b; + do_div(tmp64, c); + + return (u32)tmp64; +} + +struct stv_base { + struct list_head stvlist; + + u8 adr; + struct i2c_adapter *i2c; + struct mutex i2c_lock; /* shared I2C access protect */ + struct mutex reg_lock; /* shared register write protect */ + int count; + + u32 extclk; + u32 mclk; +}; + +struct stv { + struct stv_base *base; + struct dvb_frontend fe; + int nr; + u16 regoff; + u8 i2crpt; + u8 tscfgh; + u8 tsgeneral; + u8 tsspeed; + u8 single; + unsigned long tune_time; + + s32 search_range; + u32 started; + u32 demod_lock_time; + enum receive_mode receive_mode; + u32 demod_timeout; + u32 fec_timeout; + u32 first_time_lock; + u8 demod_bits; + u32 symbol_rate; + + u8 last_viterbi_rate; + enum fe_code_rate puncture_rate; + enum fe_stv0910_mod_cod mod_cod; + enum dvbs2_fectype fectype; + u32 pilots; + enum fe_stv0910_roll_off feroll_off; + + int is_standard_broadcast; + int is_vcm; + + u32 cur_scrambling_code; + + u32 last_bernumerator; + u32 last_berdenominator; + u8 berscale; + + u8 vth[6]; +}; + +struct sinit_table { + u16 address; + u8 data; +}; + +struct slookup { + s16 value; + u32 reg_value; +}; + +static inline int i2c_write(struct i2c_adapter *adap, u8 adr, + u8 *data, int len) +{ + struct i2c_msg msg = {.addr = adr, .flags = 0, + .buf = data, .len = len}; + + if (i2c_transfer(adap, &msg, 1) != 1) { + dev_warn(&adap->dev, "i2c write error ([%02x] %04x: %02x)\n", + adr, (data[0] << 8) | data[1], + (len > 2 ? data[2] : 0)); + return -EREMOTEIO; + } + return 0; +} + +static int i2c_write_reg16(struct i2c_adapter *adap, u8 adr, u16 reg, u8 val) +{ + u8 msg[3] = {reg >> 8, reg & 0xff, val}; + + return i2c_write(adap, adr, msg, 3); +} + +static int write_reg(struct stv *state, u16 reg, u8 val) +{ + return i2c_write_reg16(state->base->i2c, state->base->adr, reg, val); +} + +static inline int i2c_read_regs16(struct i2c_adapter *adapter, u8 adr, + u16 reg, u8 *val, int count) +{ + u8 msg[2] = {reg >> 8, reg & 0xff}; + struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, + .buf = msg, .len = 2}, + {.addr = adr, .flags = I2C_M_RD, + .buf = val, .len = count } }; + + if (i2c_transfer(adapter, msgs, 2) != 2) { + dev_warn(&adapter->dev, "i2c read error ([%02x] %04x)\n", + adr, reg); + return -EREMOTEIO; + } + return 0; +} + +static int read_reg(struct stv *state, u16 reg, u8 *val) +{ + return i2c_read_regs16(state->base->i2c, state->base->adr, + reg, val, 1); +} + +static int read_regs(struct stv *state, u16 reg, u8 *val, int len) +{ + return i2c_read_regs16(state->base->i2c, state->base->adr, + reg, val, len); +} + +static int write_shared_reg(struct stv *state, u16 reg, u8 mask, u8 val) +{ + int status; + u8 tmp; + + mutex_lock(&state->base->reg_lock); + status = read_reg(state, reg, &tmp); + if (!status) + status = write_reg(state, reg, (tmp & ~mask) | (val & mask)); + mutex_unlock(&state->base->reg_lock); + return status; +} + +static const struct slookup s1_sn_lookup[] = { + { 0, 9242 }, /* C/N= 0dB */ + { 5, 9105 }, /* C/N= 0.5dB */ + { 10, 8950 }, /* C/N= 1.0dB */ + { 15, 8780 }, /* C/N= 1.5dB */ + { 20, 8566 }, /* C/N= 2.0dB */ + { 25, 8366 }, /* C/N= 2.5dB */ + { 30, 8146 }, /* C/N= 3.0dB */ + { 35, 7908 }, /* C/N= 3.5dB */ + { 40, 7666 }, /* C/N= 4.0dB */ + { 45, 7405 }, /* C/N= 4.5dB */ + { 50, 7136 }, /* C/N= 5.0dB */ + { 55, 6861 }, /* C/N= 5.5dB */ + { 60, 6576 }, /* C/N= 6.0dB */ + { 65, 6330 }, /* C/N= 6.5dB */ + { 70, 6048 }, /* C/N= 7.0dB */ + { 75, 5768 }, /* C/N= 7.5dB */ + { 80, 5492 }, /* C/N= 8.0dB */ + { 85, 5224 }, /* C/N= 8.5dB */ + { 90, 4959 }, /* C/N= 9.0dB */ + { 95, 4709 }, /* C/N= 9.5dB */ + { 100, 4467 }, /* C/N=10.0dB */ + { 105, 4236 }, /* C/N=10.5dB */ + { 110, 4013 }, /* C/N=11.0dB */ + { 115, 3800 }, /* C/N=11.5dB */ + { 120, 3598 }, /* C/N=12.0dB */ + { 125, 3406 }, /* C/N=12.5dB */ + { 130, 3225 }, /* C/N=13.0dB */ + { 135, 3052 }, /* C/N=13.5dB */ + { 140, 2889 }, /* C/N=14.0dB */ + { 145, 2733 }, /* C/N=14.5dB */ + { 150, 2587 }, /* C/N=15.0dB */ + { 160, 2318 }, /* C/N=16.0dB */ + { 170, 2077 }, /* C/N=17.0dB */ + { 180, 1862 }, /* C/N=18.0dB */ + { 190, 1670 }, /* C/N=19.0dB */ + { 200, 1499 }, /* C/N=20.0dB */ + { 210, 1347 }, /* C/N=21.0dB */ + { 220, 1213 }, /* C/N=22.0dB */ + { 230, 1095 }, /* C/N=23.0dB */ + { 240, 992 }, /* C/N=24.0dB */ + { 250, 900 }, /* C/N=25.0dB */ + { 260, 826 }, /* C/N=26.0dB */ + { 270, 758 }, /* C/N=27.0dB */ + { 280, 702 }, /* C/N=28.0dB */ + { 290, 653 }, /* C/N=29.0dB */ + { 300, 613 }, /* C/N=30.0dB */ + { 310, 579 }, /* C/N=31.0dB */ + { 320, 550 }, /* C/N=32.0dB */ + { 330, 526 }, /* C/N=33.0dB */ + { 350, 490 }, /* C/N=33.0dB */ + { 400, 445 }, /* C/N=40.0dB */ + { 450, 430 }, /* C/N=45.0dB */ + { 500, 426 }, /* C/N=50.0dB */ + { 510, 425 } /* C/N=51.0dB */ +}; + +static const struct slookup s2_sn_lookup[] = { + { -30, 13950 }, /* C/N=-2.5dB */ + { -25, 13580 }, /* C/N=-2.5dB */ + { -20, 13150 }, /* C/N=-2.0dB */ + { -15, 12760 }, /* C/N=-1.5dB */ + { -10, 12345 }, /* C/N=-1.0dB */ + { -5, 11900 }, /* C/N=-0.5dB */ + { 0, 11520 }, /* C/N= 0dB */ + { 5, 11080 }, /* C/N= 0.5dB */ + { 10, 10630 }, /* C/N= 1.0dB */ + { 15, 10210 }, /* C/N= 1.5dB */ + { 20, 9790 }, /* C/N= 2.0dB */ + { 25, 9390 }, /* C/N= 2.5dB */ + { 30, 8970 }, /* C/N= 3.0dB */ + { 35, 8575 }, /* C/N= 3.5dB */ + { 40, 8180 }, /* C/N= 4.0dB */ + { 45, 7800 }, /* C/N= 4.5dB */ + { 50, 7430 }, /* C/N= 5.0dB */ + { 55, 7080 }, /* C/N= 5.5dB */ + { 60, 6720 }, /* C/N= 6.0dB */ + { 65, 6320 }, /* C/N= 6.5dB */ + { 70, 6060 }, /* C/N= 7.0dB */ + { 75, 5760 }, /* C/N= 7.5dB */ + { 80, 5480 }, /* C/N= 8.0dB */ + { 85, 5200 }, /* C/N= 8.5dB */ + { 90, 4930 }, /* C/N= 9.0dB */ + { 95, 4680 }, /* C/N= 9.5dB */ + { 100, 4425 }, /* C/N=10.0dB */ + { 105, 4210 }, /* C/N=10.5dB */ + { 110, 3980 }, /* C/N=11.0dB */ + { 115, 3765 }, /* C/N=11.5dB */ + { 120, 3570 }, /* C/N=12.0dB */ + { 125, 3315 }, /* C/N=12.5dB */ + { 130, 3140 }, /* C/N=13.0dB */ + { 135, 2980 }, /* C/N=13.5dB */ + { 140, 2820 }, /* C/N=14.0dB */ + { 145, 2670 }, /* C/N=14.5dB */ + { 150, 2535 }, /* C/N=15.0dB */ + { 160, 2270 }, /* C/N=16.0dB */ + { 170, 2035 }, /* C/N=17.0dB */ + { 180, 1825 }, /* C/N=18.0dB */ + { 190, 1650 }, /* C/N=19.0dB */ + { 200, 1485 }, /* C/N=20.0dB */ + { 210, 1340 }, /* C/N=21.0dB */ + { 220, 1212 }, /* C/N=22.0dB */ + { 230, 1100 }, /* C/N=23.0dB */ + { 240, 1000 }, /* C/N=24.0dB */ + { 250, 910 }, /* C/N=25.0dB */ + { 260, 836 }, /* C/N=26.0dB */ + { 270, 772 }, /* C/N=27.0dB */ + { 280, 718 }, /* C/N=28.0dB */ + { 290, 671 }, /* C/N=29.0dB */ + { 300, 635 }, /* C/N=30.0dB */ + { 310, 602 }, /* C/N=31.0dB */ + { 320, 575 }, /* C/N=32.0dB */ + { 330, 550 }, /* C/N=33.0dB */ + { 350, 517 }, /* C/N=35.0dB */ + { 400, 480 }, /* C/N=40.0dB */ + { 450, 466 }, /* C/N=45.0dB */ + { 500, 464 }, /* C/N=50.0dB */ + { 510, 463 }, /* C/N=51.0dB */ +}; + +static const struct slookup padc_lookup[] = { + { 0, 118000 }, /* PADC= +0dBm */ + { -100, 93600 }, /* PADC= -1dBm */ + { -200, 74500 }, /* PADC= -2dBm */ + { -300, 59100 }, /* PADC= -3dBm */ + { -400, 47000 }, /* PADC= -4dBm */ + { -500, 37300 }, /* PADC= -5dBm */ + { -600, 29650 }, /* PADC= -6dBm */ + { -700, 23520 }, /* PADC= -7dBm */ + { -900, 14850 }, /* PADC= -9dBm */ + { -1100, 9380 }, /* PADC=-11dBm */ + { -1300, 5910 }, /* PADC=-13dBm */ + { -1500, 3730 }, /* PADC=-15dBm */ + { -1700, 2354 }, /* PADC=-17dBm */ + { -1900, 1485 }, /* PADC=-19dBm */ + { -2000, 1179 }, /* PADC=-20dBm */ + { -2100, 1000 }, /* PADC=-21dBm */ +}; + +/********************************************************************* + * Tracking carrier loop carrier QPSK 1/4 to 8PSK 9/10 long Frame + *********************************************************************/ +static const u8 s2car_loop[] = { + /* + * Modcod 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff + * 20MPon 20MPoff 30MPon 30MPoff + */ + + /* FE_QPSK_14 */ + 0x0C, 0x3C, 0x0B, 0x3C, 0x2A, 0x2C, 0x2A, 0x1C, 0x3A, 0x3B, + /* FE_QPSK_13 */ + 0x0C, 0x3C, 0x0B, 0x3C, 0x2A, 0x2C, 0x3A, 0x0C, 0x3A, 0x2B, + /* FE_QPSK_25 */ + 0x1C, 0x3C, 0x1B, 0x3C, 0x3A, 0x1C, 0x3A, 0x3B, 0x3A, 0x2B, + /* FE_QPSK_12 */ + 0x0C, 0x1C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B, + /* FE_QPSK_35 */ + 0x1C, 0x1C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B, + /* FE_QPSK_23 */ + 0x2C, 0x2C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B, + /* FE_QPSK_34 */ + 0x3C, 0x2C, 0x3B, 0x2C, 0x1B, 0x1C, 0x1B, 0x3B, 0x3A, 0x1B, + /* FE_QPSK_45 */ + 0x0D, 0x3C, 0x3B, 0x2C, 0x1B, 0x1C, 0x1B, 0x3B, 0x3A, 0x1B, + /* FE_QPSK_56 */ + 0x1D, 0x3C, 0x0C, 0x2C, 0x2B, 0x1C, 0x1B, 0x3B, 0x0B, 0x1B, + /* FE_QPSK_89 */ + 0x3D, 0x0D, 0x0C, 0x2C, 0x2B, 0x0C, 0x2B, 0x2B, 0x0B, 0x0B, + /* FE_QPSK_910 */ + 0x1E, 0x0D, 0x1C, 0x2C, 0x3B, 0x0C, 0x2B, 0x2B, 0x1B, 0x0B, + /* FE_8PSK_35 */ + 0x28, 0x09, 0x28, 0x09, 0x28, 0x09, 0x28, 0x08, 0x28, 0x27, + /* FE_8PSK_23 */ + 0x19, 0x29, 0x19, 0x29, 0x19, 0x29, 0x38, 0x19, 0x28, 0x09, + /* FE_8PSK_34 */ + 0x1A, 0x0B, 0x1A, 0x3A, 0x0A, 0x2A, 0x39, 0x2A, 0x39, 0x1A, + /* FE_8PSK_56 */ + 0x2B, 0x2B, 0x1B, 0x1B, 0x0B, 0x1B, 0x1A, 0x0B, 0x1A, 0x1A, + /* FE_8PSK_89 */ + 0x0C, 0x0C, 0x3B, 0x3B, 0x1B, 0x1B, 0x2A, 0x0B, 0x2A, 0x2A, + /* FE_8PSK_910 */ + 0x0C, 0x1C, 0x0C, 0x3B, 0x2B, 0x1B, 0x3A, 0x0B, 0x2A, 0x2A, + + /********************************************************************** + * Tracking carrier loop carrier 16APSK 2/3 to 32APSK 9/10 long Frame + **********************************************************************/ + + /* + * Modcod 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon + * 20MPoff 30MPon 30MPoff + */ + + /* FE_16APSK_23 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x1A, 0x0A, 0x39, 0x0A, 0x29, 0x0A, + /* FE_16APSK_34 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0A, 0x2A, 0x0A, 0x1A, 0x0A, + /* FE_16APSK_45 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x1B, 0x0A, 0x3A, 0x0A, 0x2A, 0x0A, + /* FE_16APSK_56 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x1B, 0x0A, 0x3A, 0x0A, 0x2A, 0x0A, + /* FE_16APSK_89 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x2B, 0x0A, 0x0B, 0x0A, 0x3A, 0x0A, + /* FE_16APSK_910 */ + 0x0A, 0x0A, 0x0A, 0x0A, 0x2B, 0x0A, 0x0B, 0x0A, 0x3A, 0x0A, + /* FE_32APSK_34 */ + 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, + /* FE_32APSK_45 */ + 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, + /* FE_32APSK_56 */ + 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, + /* FE_32APSK_89 */ + 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, + /* FE_32APSK_910 */ + 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, +}; + +static u8 get_optim_cloop(struct stv *state, + enum fe_stv0910_mod_cod mod_cod, u32 pilots) +{ + int i = 0; + + if (mod_cod >= FE_32APSK_910) + i = ((int)FE_32APSK_910 - (int)FE_QPSK_14) * 10; + else if (mod_cod >= FE_QPSK_14) + i = ((int)mod_cod - (int)FE_QPSK_14) * 10; + + if (state->symbol_rate <= 3000000) + i += 0; + else if (state->symbol_rate <= 7000000) + i += 2; + else if (state->symbol_rate <= 15000000) + i += 4; + else if (state->symbol_rate <= 25000000) + i += 6; + else + i += 8; + + if (!pilots) + i += 1; + + return s2car_loop[i]; +} + +static int get_cur_symbol_rate(struct stv *state, u32 *p_symbol_rate) +{ + int status = 0; + u8 symb_freq0; + u8 symb_freq1; + u8 symb_freq2; + u8 symb_freq3; + u8 tim_offs0; + u8 tim_offs1; + u8 tim_offs2; + u32 symbol_rate; + s32 timing_offset; + + *p_symbol_rate = 0; + if (!state->started) + return status; + + read_reg(state, RSTV0910_P2_SFR3 + state->regoff, &symb_freq3); + read_reg(state, RSTV0910_P2_SFR2 + state->regoff, &symb_freq2); + read_reg(state, RSTV0910_P2_SFR1 + state->regoff, &symb_freq1); + read_reg(state, RSTV0910_P2_SFR0 + state->regoff, &symb_freq0); + read_reg(state, RSTV0910_P2_TMGREG2 + state->regoff, &tim_offs2); + read_reg(state, RSTV0910_P2_TMGREG1 + state->regoff, &tim_offs1); + read_reg(state, RSTV0910_P2_TMGREG0 + state->regoff, &tim_offs0); + + symbol_rate = ((u32)symb_freq3 << 24) | ((u32)symb_freq2 << 16) | + ((u32)symb_freq1 << 8) | (u32)symb_freq0; + timing_offset = ((u32)tim_offs2 << 16) | ((u32)tim_offs1 << 8) | + (u32)tim_offs0; + + if ((timing_offset & (1 << 23)) != 0) + timing_offset |= 0xFF000000; /* Sign extent */ + + symbol_rate = (u32)(((u64)symbol_rate * state->base->mclk) >> 32); + timing_offset = (s32)(((s64)symbol_rate * (s64)timing_offset) >> 29); + + *p_symbol_rate = symbol_rate + timing_offset; + + return 0; +} + +static int get_signal_parameters(struct stv *state) +{ + u8 tmp; + + if (!state->started) + return -EINVAL; + + if (state->receive_mode == RCVMODE_DVBS2) { + read_reg(state, RSTV0910_P2_DMDMODCOD + state->regoff, &tmp); + state->mod_cod = (enum fe_stv0910_mod_cod)((tmp & 0x7c) >> 2); + state->pilots = (tmp & 0x01) != 0; + state->fectype = (enum dvbs2_fectype)((tmp & 0x02) >> 1); + + } else if (state->receive_mode == RCVMODE_DVBS) { + read_reg(state, RSTV0910_P2_VITCURPUN + state->regoff, &tmp); + state->puncture_rate = FEC_NONE; + switch (tmp & 0x1F) { + case 0x0d: + state->puncture_rate = FEC_1_2; + break; + case 0x12: + state->puncture_rate = FEC_2_3; + break; + case 0x15: + state->puncture_rate = FEC_3_4; + break; + case 0x18: + state->puncture_rate = FEC_5_6; + break; + case 0x1a: + state->puncture_rate = FEC_7_8; + break; + } + state->is_vcm = 0; + state->is_standard_broadcast = 1; + state->feroll_off = FE_SAT_35; + } + return 0; +} + +static int tracking_optimization(struct stv *state) +{ + u32 symbol_rate = 0; + u8 tmp; + + get_cur_symbol_rate(state, &symbol_rate); + read_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, &tmp); + tmp &= ~0xC0; + + switch (state->receive_mode) { + case RCVMODE_DVBS: + tmp |= 0x40; + break; + case RCVMODE_DVBS2: + tmp |= 0x80; + break; + default: + tmp |= 0xC0; + break; + } + write_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, tmp); + + if (state->receive_mode == RCVMODE_DVBS2) { + /* Disable Reed-Solomon */ + write_shared_reg(state, + RSTV0910_TSTTSRS, state->nr ? 0x02 : 0x01, + 0x03); + + if (state->fectype == DVBS2_64K) { + u8 aclc = get_optim_cloop(state, state->mod_cod, + state->pilots); + + if (state->mod_cod <= FE_QPSK_910) { + write_reg(state, RSTV0910_P2_ACLC2S2Q + + state->regoff, aclc); + } else if (state->mod_cod <= FE_8PSK_910) { + write_reg(state, RSTV0910_P2_ACLC2S2Q + + state->regoff, 0x2a); + write_reg(state, RSTV0910_P2_ACLC2S28 + + state->regoff, aclc); + } else if (state->mod_cod <= FE_16APSK_910) { + write_reg(state, RSTV0910_P2_ACLC2S2Q + + state->regoff, 0x2a); + write_reg(state, RSTV0910_P2_ACLC2S216A + + state->regoff, aclc); + } else if (state->mod_cod <= FE_32APSK_910) { + write_reg(state, RSTV0910_P2_ACLC2S2Q + + state->regoff, 0x2a); + write_reg(state, RSTV0910_P2_ACLC2S232A + + state->regoff, aclc); + } + } + } + return 0; +} + +static s32 table_lookup(const struct slookup *table, + int table_size, u32 reg_value) +{ + s32 value; + int imin = 0; + int imax = table_size - 1; + int i; + s32 reg_diff; + + /* Assumes Table[0].RegValue > Table[imax].RegValue */ + if (reg_value >= table[0].reg_value) { + value = table[0].value; + } else if (reg_value <= table[imax].reg_value) { + value = table[imax].value; + } else { + while ((imax - imin) > 1) { + i = (imax + imin) / 2; + if ((table[imin].reg_value >= reg_value) && + (reg_value >= table[i].reg_value)) + imax = i; + else + imin = i; + } + + reg_diff = table[imax].reg_value - table[imin].reg_value; + value = table[imin].value; + if (reg_diff != 0) + value += ((s32)(reg_value - table[imin].reg_value) * + (s32)(table[imax].value + - table[imin].value)) + / (reg_diff); + } + + return value; +} + +static int get_signal_to_noise(struct stv *state, s32 *signal_to_noise) +{ + u8 data0; + u8 data1; + u16 data; + int n_lookup; + const struct slookup *lookup; + + *signal_to_noise = 0; + + if (!state->started) + return -EINVAL; + + if (state->receive_mode == RCVMODE_DVBS2) { + read_reg(state, RSTV0910_P2_NNOSPLHT1 + state->regoff, + &data1); + read_reg(state, RSTV0910_P2_NNOSPLHT0 + state->regoff, + &data0); + n_lookup = ARRAY_SIZE(s2_sn_lookup); + lookup = s2_sn_lookup; + } else { + read_reg(state, RSTV0910_P2_NNOSDATAT1 + state->regoff, + &data1); + read_reg(state, RSTV0910_P2_NNOSDATAT0 + state->regoff, + &data0); + n_lookup = ARRAY_SIZE(s1_sn_lookup); + lookup = s1_sn_lookup; + } + data = (((u16)data1) << 8) | (u16)data0; + *signal_to_noise = table_lookup(lookup, n_lookup, data); + return 0; +} + +static int get_bit_error_rate_s(struct stv *state, u32 *bernumerator, + u32 *berdenominator) +{ + u8 regs[3]; + + int status = read_regs(state, + RSTV0910_P2_ERRCNT12 + state->regoff, + regs, 3); + + if (status) + return -EINVAL; + + if ((regs[0] & 0x80) == 0) { + state->last_berdenominator = 1 << ((state->berscale * 2) + + 10 + 3); + state->last_bernumerator = ((u32)(regs[0] & 0x7F) << 16) | + ((u32)regs[1] << 8) | regs[2]; + if (state->last_bernumerator < 256 && state->berscale < 6) { + state->berscale += 1; + status = write_reg(state, RSTV0910_P2_ERRCTRL1 + + state->regoff, + 0x20 | state->berscale); + } else if (state->last_bernumerator > 1024 && + state->berscale > 2) { + state->berscale -= 1; + status = write_reg(state, RSTV0910_P2_ERRCTRL1 + + state->regoff, 0x20 | + state->berscale); + } + } + *bernumerator = state->last_bernumerator; + *berdenominator = state->last_berdenominator; + return 0; +} + +static u32 dvbs2_nbch(enum dvbs2_mod_cod mod_cod, enum dvbs2_fectype fectype) +{ + static const u32 nbch[][2] = { + { 0, 0}, /* DUMMY_PLF */ + {16200, 3240}, /* QPSK_1_4, */ + {21600, 5400}, /* QPSK_1_3, */ + {25920, 6480}, /* QPSK_2_5, */ + {32400, 7200}, /* QPSK_1_2, */ + {38880, 9720}, /* QPSK_3_5, */ + {43200, 10800}, /* QPSK_2_3, */ + {48600, 11880}, /* QPSK_3_4, */ + {51840, 12600}, /* QPSK_4_5, */ + {54000, 13320}, /* QPSK_5_6, */ + {57600, 14400}, /* QPSK_8_9, */ + {58320, 16000}, /* QPSK_9_10, */ + {43200, 9720}, /* 8PSK_3_5, */ + {48600, 10800}, /* 8PSK_2_3, */ + {51840, 11880}, /* 8PSK_3_4, */ + {54000, 13320}, /* 8PSK_5_6, */ + {57600, 14400}, /* 8PSK_8_9, */ + {58320, 16000}, /* 8PSK_9_10, */ + {43200, 10800}, /* 16APSK_2_3, */ + {48600, 11880}, /* 16APSK_3_4, */ + {51840, 12600}, /* 16APSK_4_5, */ + {54000, 13320}, /* 16APSK_5_6, */ + {57600, 14400}, /* 16APSK_8_9, */ + {58320, 16000}, /* 16APSK_9_10 */ + {48600, 11880}, /* 32APSK_3_4, */ + {51840, 12600}, /* 32APSK_4_5, */ + {54000, 13320}, /* 32APSK_5_6, */ + {57600, 14400}, /* 32APSK_8_9, */ + {58320, 16000}, /* 32APSK_9_10 */ + }; + + if (mod_cod >= DVBS2_QPSK_1_4 && + mod_cod <= DVBS2_32APSK_9_10 && fectype <= DVBS2_16K) + return nbch[mod_cod][fectype]; + return 64800; +} + +static int get_bit_error_rate_s2(struct stv *state, u32 *bernumerator, + u32 *berdenominator) +{ + u8 regs[3]; + + int status = read_regs(state, RSTV0910_P2_ERRCNT12 + state->regoff, + regs, 3); + + if (status) + return -EINVAL; + + if ((regs[0] & 0x80) == 0) { + state->last_berdenominator = + dvbs2_nbch((enum dvbs2_mod_cod)state->mod_cod, + state->fectype) << + (state->berscale * 2); + state->last_bernumerator = (((u32)regs[0] & 0x7F) << 16) | + ((u32)regs[1] << 8) | regs[2]; + if (state->last_bernumerator < 256 && state->berscale < 6) { + state->berscale += 1; + write_reg(state, RSTV0910_P2_ERRCTRL1 + state->regoff, + 0x20 | state->berscale); + } else if (state->last_bernumerator > 1024 && + state->berscale > 2) { + state->berscale -= 1; + write_reg(state, RSTV0910_P2_ERRCTRL1 + state->regoff, + 0x20 | state->berscale); + } + } + *bernumerator = state->last_bernumerator; + *berdenominator = state->last_berdenominator; + return status; +} + +static int get_bit_error_rate(struct stv *state, u32 *bernumerator, + u32 *berdenominator) +{ + *bernumerator = 0; + *berdenominator = 1; + + switch (state->receive_mode) { + case RCVMODE_DVBS: + return get_bit_error_rate_s(state, + bernumerator, berdenominator); + case RCVMODE_DVBS2: + return get_bit_error_rate_s2(state, + bernumerator, berdenominator); + default: + break; + } + return 0; +} + +static int set_mclock(struct stv *state, u32 master_clock) +{ + u32 idf = 1; + u32 odf = 4; + u32 quartz = state->base->extclk / 1000000; + u32 fphi = master_clock / 1000000; + u32 ndiv = (fphi * odf * idf) / quartz; + u32 cp = 7; + u32 fvco; + + if (ndiv >= 7 && ndiv <= 71) + cp = 7; + else if (ndiv >= 72 && ndiv <= 79) + cp = 8; + else if (ndiv >= 80 && ndiv <= 87) + cp = 9; + else if (ndiv >= 88 && ndiv <= 95) + cp = 10; + else if (ndiv >= 96 && ndiv <= 103) + cp = 11; + else if (ndiv >= 104 && ndiv <= 111) + cp = 12; + else if (ndiv >= 112 && ndiv <= 119) + cp = 13; + else if (ndiv >= 120 && ndiv <= 127) + cp = 14; + else if (ndiv >= 128 && ndiv <= 135) + cp = 15; + else if (ndiv >= 136 && ndiv <= 143) + cp = 16; + else if (ndiv >= 144 && ndiv <= 151) + cp = 17; + else if (ndiv >= 152 && ndiv <= 159) + cp = 18; + else if (ndiv >= 160 && ndiv <= 167) + cp = 19; + else if (ndiv >= 168 && ndiv <= 175) + cp = 20; + else if (ndiv >= 176 && ndiv <= 183) + cp = 21; + else if (ndiv >= 184 && ndiv <= 191) + cp = 22; + else if (ndiv >= 192 && ndiv <= 199) + cp = 23; + else if (ndiv >= 200 && ndiv <= 207) + cp = 24; + else if (ndiv >= 208 && ndiv <= 215) + cp = 25; + else if (ndiv >= 216 && ndiv <= 223) + cp = 26; + else if (ndiv >= 224 && ndiv <= 225) + cp = 27; + + write_reg(state, RSTV0910_NCOARSE, (cp << 3) | idf); + write_reg(state, RSTV0910_NCOARSE2, odf); + write_reg(state, RSTV0910_NCOARSE1, ndiv); + + fvco = (quartz * 2 * ndiv) / idf; + state->base->mclk = fvco / (2 * odf) * 1000000; + + return 0; +} + +static int stop(struct stv *state) +{ + if (state->started) { + u8 tmp; + + write_reg(state, RSTV0910_P2_TSCFGH + state->regoff, + state->tscfgh | 0x01); + read_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, &tmp); + tmp &= ~0x01; /* release reset DVBS2 packet delin */ + write_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, tmp); + /* Blind optim*/ + write_reg(state, RSTV0910_P2_AGC2O + state->regoff, 0x5B); + /* Stop the demod */ + write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x5c); + state->started = 0; + } + state->receive_mode = RCVMODE_NONE; + return 0; +} + +static int init_search_param(struct stv *state) +{ + u8 tmp; + + read_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, &tmp); + tmp |= 0x20; /* Filter_en (no effect if SIS=non-MIS */ + write_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_PDELCTRL2 + state->regoff, &tmp); + tmp &= ~0x02; /* frame mode = 0 */ + write_reg(state, RSTV0910_P2_PDELCTRL2 + state->regoff, tmp); + + write_reg(state, RSTV0910_P2_UPLCCST0 + state->regoff, 0xe0); + write_reg(state, RSTV0910_P2_ISIBITENA + state->regoff, 0x00); + + read_reg(state, RSTV0910_P2_TSSTATEM + state->regoff, &tmp); + tmp &= ~0x01; /* nosync = 0, in case next signal is standard TS */ + write_reg(state, RSTV0910_P2_TSSTATEM + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_TSCFGL + state->regoff, &tmp); + tmp &= ~0x04; /* embindvb = 0 */ + write_reg(state, RSTV0910_P2_TSCFGL + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_TSINSDELH + state->regoff, &tmp); + tmp &= ~0x80; /* syncbyte = 0 */ + write_reg(state, RSTV0910_P2_TSINSDELH + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_TSINSDELM + state->regoff, &tmp); + tmp &= ~0x08; /* token = 0 */ + write_reg(state, RSTV0910_P2_TSINSDELM + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_TSDLYSET2 + state->regoff, &tmp); + tmp &= ~0x30; /* hysteresis threshold = 0 */ + write_reg(state, RSTV0910_P2_TSDLYSET2 + state->regoff, tmp); + + read_reg(state, RSTV0910_P2_PDELCTRL0 + state->regoff, &tmp); + tmp = (tmp & ~0x30) | 0x10; /* isi obs mode = 1, observe min ISI */ + write_reg(state, RSTV0910_P2_PDELCTRL0 + state->regoff, tmp); + + return 0; +} + +static int enable_puncture_rate(struct stv *state, enum fe_code_rate rate) +{ + switch (rate) { + case FEC_1_2: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x01); + case FEC_2_3: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x02); + case FEC_3_4: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x04); + case FEC_5_6: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x08); + case FEC_7_8: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x20); + case FEC_NONE: + default: + return write_reg(state, + RSTV0910_P2_PRVIT + state->regoff, 0x2f); + } +} + +static int set_vth_default(struct stv *state) +{ + state->vth[0] = 0xd7; + state->vth[1] = 0x85; + state->vth[2] = 0x58; + state->vth[3] = 0x3a; + state->vth[4] = 0x34; + state->vth[5] = 0x28; + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 0, state->vth[0]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 1, state->vth[1]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 2, state->vth[2]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 3, state->vth[3]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 4, state->vth[4]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 5, state->vth[5]); + return 0; +} + +static int set_vth(struct stv *state) +{ + static const struct slookup vthlookup_table[] = { + {250, 8780}, /* C/N= 1.5dB */ + {100, 7405}, /* C/N= 4.5dB */ + {40, 6330}, /* C/N= 6.5dB */ + {12, 5224}, /* C/N= 8.5dB */ + {5, 4236} /* C/N=10.5dB */ + }; + + int i; + u8 tmp[2]; + int status = read_regs(state, + RSTV0910_P2_NNOSDATAT1 + state->regoff, + tmp, 2); + u16 reg_value = (tmp[0] << 8) | tmp[1]; + s32 vth = table_lookup(vthlookup_table, ARRAY_SIZE(vthlookup_table), + reg_value); + + for (i = 0; i < 6; i += 1) + if (state->vth[i] > vth) + state->vth[i] = vth; + + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 0, state->vth[0]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 1, state->vth[1]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 2, state->vth[2]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 3, state->vth[3]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 4, state->vth[4]); + write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 5, state->vth[5]); + return status; +} + +static int start(struct stv *state, struct dtv_frontend_properties *p) +{ + s32 freq; + u8 reg_dmdcfgmd; + u16 symb; + u32 scrambling_code = 1; + + if (p->symbol_rate < 100000 || p->symbol_rate > 70000000) + return -EINVAL; + + state->receive_mode = RCVMODE_NONE; + state->demod_lock_time = 0; + + /* Demod Stop */ + if (state->started) + write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x5C); + + init_search_param(state); + + if (p->stream_id != NO_STREAM_ID_FILTER) { + /* + * Backwards compatibility to "crazy" API. + * PRBS X root cannot be 0, so this should always work. + */ + if (p->stream_id & 0xffffff00) + scrambling_code = p->stream_id >> 8; + write_reg(state, RSTV0910_P2_ISIENTRY + state->regoff, + p->stream_id & 0xff); + write_reg(state, RSTV0910_P2_ISIBITENA + state->regoff, + 0xff); + } + + if (scrambling_code != state->cur_scrambling_code) { + write_reg(state, RSTV0910_P2_PLROOT0 + state->regoff, + scrambling_code & 0xff); + write_reg(state, RSTV0910_P2_PLROOT1 + state->regoff, + (scrambling_code >> 8) & 0xff); + write_reg(state, RSTV0910_P2_PLROOT2 + state->regoff, + (scrambling_code >> 16) & 0x0f); + state->cur_scrambling_code = scrambling_code; + } + + if (p->symbol_rate <= 1000000) { /* SR <=1Msps */ + state->demod_timeout = 3000; + state->fec_timeout = 2000; + } else if (p->symbol_rate <= 2000000) { /* 1Msps < SR <=2Msps */ + state->demod_timeout = 2500; + state->fec_timeout = 1300; + } else if (p->symbol_rate <= 5000000) { /* 2Msps< SR <=5Msps */ + state->demod_timeout = 1000; + state->fec_timeout = 650; + } else if (p->symbol_rate <= 10000000) { /* 5Msps< SR <=10Msps */ + state->demod_timeout = 700; + state->fec_timeout = 350; + } else if (p->symbol_rate < 20000000) { /* 10Msps< SR <=20Msps */ + state->demod_timeout = 400; + state->fec_timeout = 200; + } else { /* SR >=20Msps */ + state->demod_timeout = 300; + state->fec_timeout = 200; + } + + /* Set the Init Symbol rate */ + symb = muldiv32(p->symbol_rate, 65536, state->base->mclk); + write_reg(state, RSTV0910_P2_SFRINIT1 + state->regoff, + ((symb >> 8) & 0x7F)); + write_reg(state, RSTV0910_P2_SFRINIT0 + state->regoff, (symb & 0xFF)); + + state->demod_bits |= 0x80; + write_reg(state, RSTV0910_P2_DEMOD + state->regoff, state->demod_bits); + + /* FE_STV0910_SetSearchStandard */ + read_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, ®_dmdcfgmd); + write_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, + reg_dmdcfgmd |= 0xC0); + + write_shared_reg(state, + RSTV0910_TSTTSRS, state->nr ? 0x02 : 0x01, 0x00); + + /* Disable DSS */ + write_reg(state, RSTV0910_P2_FECM + state->regoff, 0x00); + write_reg(state, RSTV0910_P2_PRVIT + state->regoff, 0x2F); + + enable_puncture_rate(state, FEC_NONE); + + /* 8PSK 3/5, 8PSK 2/3 Poff tracking optimization WA */ + write_reg(state, RSTV0910_P2_ACLC2S2Q + state->regoff, 0x0B); + write_reg(state, RSTV0910_P2_ACLC2S28 + state->regoff, 0x0A); + write_reg(state, RSTV0910_P2_BCLC2S2Q + state->regoff, 0x84); + write_reg(state, RSTV0910_P2_BCLC2S28 + state->regoff, 0x84); + write_reg(state, RSTV0910_P2_CARHDR + state->regoff, 0x1C); + write_reg(state, RSTV0910_P2_CARFREQ + state->regoff, 0x79); + + write_reg(state, RSTV0910_P2_ACLC2S216A + state->regoff, 0x29); + write_reg(state, RSTV0910_P2_ACLC2S232A + state->regoff, 0x09); + write_reg(state, RSTV0910_P2_BCLC2S216A + state->regoff, 0x84); + write_reg(state, RSTV0910_P2_BCLC2S232A + state->regoff, 0x84); + + /* + * Reset CAR3, bug DVBS2->DVBS1 lock + * Note: The bit is only pulsed -> no lock on shared register needed + */ + write_reg(state, RSTV0910_TSTRES0, state->nr ? 0x04 : 0x08); + write_reg(state, RSTV0910_TSTRES0, 0); + + set_vth_default(state); + /* Reset demod */ + write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x1F); + + write_reg(state, RSTV0910_P2_CARCFG + state->regoff, 0x46); + + if (p->symbol_rate <= 5000000) + freq = (state->search_range / 2000) + 80; + else + freq = (state->search_range / 2000) + 1600; + freq = (freq << 16) / (state->base->mclk / 1000); + + write_reg(state, RSTV0910_P2_CFRUP1 + state->regoff, + (freq >> 8) & 0xff); + write_reg(state, RSTV0910_P2_CFRUP0 + state->regoff, (freq & 0xff)); + /* CFR Low Setting */ + freq = -freq; + write_reg(state, RSTV0910_P2_CFRLOW1 + state->regoff, + (freq >> 8) & 0xff); + write_reg(state, RSTV0910_P2_CFRLOW0 + state->regoff, (freq & 0xff)); + + /* init the demod frequency offset to 0 */ + write_reg(state, RSTV0910_P2_CFRINIT1 + state->regoff, 0); + write_reg(state, RSTV0910_P2_CFRINIT0 + state->regoff, 0); + + write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x1F); + /* Trigger acq */ + write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x15); + + state->demod_lock_time += TUNING_DELAY; + state->started = 1; + + return 0; +} + +static int init_diseqc(struct stv *state) +{ + u16 offs = state->nr ? 0x40 : 0; /* Address offset */ + u8 freq = ((state->base->mclk + 11000 * 32) / (22000 * 32)); + + /* Disable receiver */ + write_reg(state, RSTV0910_P1_DISRXCFG + offs, 0x00); + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0xBA); /* Reset = 1 */ + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A); /* Reset = 0 */ + write_reg(state, RSTV0910_P1_DISTXF22 + offs, freq); + return 0; +} + +static int probe(struct stv *state) +{ + u8 id; + + state->receive_mode = RCVMODE_NONE; + state->started = 0; + + if (read_reg(state, RSTV0910_MID, &id) < 0) + return -ENODEV; + + if (id != 0x51) + return -EINVAL; + + /* Configure the I2C repeater to off */ + write_reg(state, RSTV0910_P1_I2CRPT, 0x24); + /* Configure the I2C repeater to off */ + write_reg(state, RSTV0910_P2_I2CRPT, 0x24); + /* Set the I2C to oversampling ratio */ + write_reg(state, RSTV0910_I2CCFG, 0x88); /* state->i2ccfg */ + + write_reg(state, RSTV0910_OUTCFG, 0x00); /* OUTCFG */ + write_reg(state, RSTV0910_PADCFG, 0x05); /* RFAGC Pads Dev = 05 */ + write_reg(state, RSTV0910_SYNTCTRL, 0x02); /* SYNTCTRL */ + write_reg(state, RSTV0910_TSGENERAL, state->tsgeneral); /* TSGENERAL */ + write_reg(state, RSTV0910_CFGEXT, 0x02); /* CFGEXT */ + + if (state->single) + write_reg(state, RSTV0910_GENCFG, 0x14); /* GENCFG */ + else + write_reg(state, RSTV0910_GENCFG, 0x15); /* GENCFG */ + + write_reg(state, RSTV0910_P1_TNRCFG2, 0x02); /* IQSWAP = 0 */ + write_reg(state, RSTV0910_P2_TNRCFG2, 0x82); /* IQSWAP = 1 */ + + write_reg(state, RSTV0910_P1_CAR3CFG, 0x02); + write_reg(state, RSTV0910_P2_CAR3CFG, 0x02); + write_reg(state, RSTV0910_P1_DMDCFG4, 0x04); + write_reg(state, RSTV0910_P2_DMDCFG4, 0x04); + + write_reg(state, RSTV0910_TSTRES0, 0x80); /* LDPC Reset */ + write_reg(state, RSTV0910_TSTRES0, 0x00); + + write_reg(state, RSTV0910_P1_TSPIDFLT1, 0x00); + write_reg(state, RSTV0910_P2_TSPIDFLT1, 0x00); + + write_reg(state, RSTV0910_P1_TMGCFG2, 0x80); + write_reg(state, RSTV0910_P2_TMGCFG2, 0x80); + + set_mclock(state, 135000000); + + /* TS output */ + write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh | 0x01); + write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh); + write_reg(state, RSTV0910_P1_TSCFGM, 0xC0); /* Manual speed */ + write_reg(state, RSTV0910_P1_TSCFGL, 0x20); + + /* Speed = 67.5 MHz */ + write_reg(state, RSTV0910_P1_TSSPEED, state->tsspeed); + + write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh | 0x01); + write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh); + write_reg(state, RSTV0910_P2_TSCFGM, 0xC0); /* Manual speed */ + write_reg(state, RSTV0910_P2_TSCFGL, 0x20); + + /* Speed = 67.5 MHz */ + write_reg(state, RSTV0910_P2_TSSPEED, state->tsspeed); + + /* Reset stream merger */ + write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh | 0x01); + write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh | 0x01); + write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh); + write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh); + + write_reg(state, RSTV0910_P1_I2CRPT, state->i2crpt); + write_reg(state, RSTV0910_P2_I2CRPT, state->i2crpt); + + init_diseqc(state); + return 0; +} + +static int gate_ctrl(struct dvb_frontend *fe, int enable) +{ + struct stv *state = fe->demodulator_priv; + u8 i2crpt = state->i2crpt & ~0x86; + + /* + * mutex_lock note: Concurrent I2C gate bus accesses must be + * prevented (STV0910 = dual demod on a single IC with a single I2C + * gate/bus, and two tuners attached), similar to most (if not all) + * other I2C host interfaces/busses. + * + * enable=1 (open I2C gate) will grab the lock + * enable=0 (close I2C gate) releases the lock + */ + + if (enable) { + mutex_lock(&state->base->i2c_lock); + i2crpt |= 0x80; + } else { + i2crpt |= 0x02; + } + + if (write_reg(state, state->nr ? RSTV0910_P2_I2CRPT : + RSTV0910_P1_I2CRPT, i2crpt) < 0) { + /* don't hold the I2C bus lock on failure */ + mutex_unlock(&state->base->i2c_lock); + dev_err(&state->base->i2c->dev, + "%s() write_reg failure (enable=%d)\n", + __func__, enable); + return -EIO; + } + + state->i2crpt = i2crpt; + + if (!enable) + mutex_unlock(&state->base->i2c_lock); + return 0; +} + +static void release(struct dvb_frontend *fe) +{ + struct stv *state = fe->demodulator_priv; + + state->base->count--; + if (state->base->count == 0) { + list_del(&state->base->stvlist); + kfree(state->base); + } + kfree(state); +} + +static int set_parameters(struct dvb_frontend *fe) +{ + int stat = 0; + struct stv *state = fe->demodulator_priv; + u32 iffreq; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + + stop(state); + if (fe->ops.tuner_ops.set_params) + fe->ops.tuner_ops.set_params(fe); + if (fe->ops.tuner_ops.get_if_frequency) + fe->ops.tuner_ops.get_if_frequency(fe, &iffreq); + state->symbol_rate = p->symbol_rate; + stat = start(state, p); + return stat; +} + +static int manage_matype_info(struct stv *state) +{ + if (!state->started) + return -EINVAL; + if (state->receive_mode == RCVMODE_DVBS2) { + u8 bbheader[2]; + + read_regs(state, RSTV0910_P2_MATSTR1 + state->regoff, + bbheader, 2); + state->feroll_off = + (enum fe_stv0910_roll_off)(bbheader[0] & 0x03); + state->is_vcm = (bbheader[0] & 0x10) == 0; + state->is_standard_broadcast = (bbheader[0] & 0xFC) == 0xF0; + } else if (state->receive_mode == RCVMODE_DVBS) { + state->is_vcm = 0; + state->is_standard_broadcast = 1; + state->feroll_off = FE_SAT_35; + } + return 0; +} + +static int read_snr(struct dvb_frontend *fe) +{ + struct stv *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + s32 snrval; + + if (!get_signal_to_noise(state, &snrval)) { + p->cnr.stat[0].scale = FE_SCALE_DECIBEL; + p->cnr.stat[0].uvalue = 100 * snrval; /* fix scale */ + } else { + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + } + + return 0; +} + +static int read_ber(struct dvb_frontend *fe) +{ + struct stv *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 n, d; + + get_bit_error_rate(state, &n, &d); + + p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; + p->pre_bit_error.stat[0].uvalue = n; + p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; + p->pre_bit_count.stat[0].uvalue = d; + + return 0; +} + +static void read_signal_strength(struct dvb_frontend *fe) +{ + struct stv *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &state->fe.dtv_property_cache; + u8 reg[2]; + u16 agc; + s32 padc, power = 0; + int i; + + read_regs(state, RSTV0910_P2_AGCIQIN1 + state->regoff, reg, 2); + + agc = (((u32)reg[0]) << 8) | reg[1]; + + for (i = 0; i < 5; i += 1) { + read_regs(state, RSTV0910_P2_POWERI + state->regoff, reg, 2); + power += (u32)reg[0] * (u32)reg[0] + + (u32)reg[1] * (u32)reg[1]; + usleep_range(3000, 4000); + } + power /= 5; + + padc = table_lookup(padc_lookup, ARRAY_SIZE(padc_lookup), power) + 352; + + p->strength.stat[0].scale = FE_SCALE_DECIBEL; + p->strength.stat[0].svalue = (padc - agc); +} + +static int read_status(struct dvb_frontend *fe, enum fe_status *status) +{ + struct stv *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u8 dmd_state = 0; + u8 dstatus = 0; + enum receive_mode cur_receive_mode = RCVMODE_NONE; + u32 feclock = 0; + + *status = 0; + + read_reg(state, RSTV0910_P2_DMDSTATE + state->regoff, &dmd_state); + + if (dmd_state & 0x40) { + read_reg(state, RSTV0910_P2_DSTATUS + state->regoff, &dstatus); + if (dstatus & 0x08) + cur_receive_mode = (dmd_state & 0x20) ? + RCVMODE_DVBS : RCVMODE_DVBS2; + } + if (cur_receive_mode == RCVMODE_NONE) { + set_vth(state); + + /* reset signal statistics */ + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + return 0; + } + + *status |= (FE_HAS_SIGNAL + | FE_HAS_CARRIER + | FE_HAS_VITERBI + | FE_HAS_SYNC); + + if (state->receive_mode == RCVMODE_NONE) { + state->receive_mode = cur_receive_mode; + state->demod_lock_time = jiffies; + state->first_time_lock = 1; + + get_signal_parameters(state); + tracking_optimization(state); + + write_reg(state, RSTV0910_P2_TSCFGH + state->regoff, + state->tscfgh); + usleep_range(3000, 4000); + write_reg(state, RSTV0910_P2_TSCFGH + state->regoff, + state->tscfgh | 0x01); + write_reg(state, RSTV0910_P2_TSCFGH + state->regoff, + state->tscfgh); + } + if (dmd_state & 0x40) { + if (state->receive_mode == RCVMODE_DVBS2) { + u8 pdelstatus; + + read_reg(state, + RSTV0910_P2_PDELSTATUS1 + state->regoff, + &pdelstatus); + feclock = (pdelstatus & 0x02) != 0; + } else { + u8 vstatus; + + read_reg(state, + RSTV0910_P2_VSTATUSVIT + state->regoff, + &vstatus); + feclock = (vstatus & 0x08) != 0; + } + } + + if (feclock) { + *status |= FE_HAS_LOCK; + + if (state->first_time_lock) { + u8 tmp; + + state->first_time_lock = 0; + + manage_matype_info(state); + + if (state->receive_mode == RCVMODE_DVBS2) { + /* + * FSTV0910_P2_MANUALSX_ROLLOFF, + * FSTV0910_P2_MANUALS2_ROLLOFF = 0 + */ + state->demod_bits &= ~0x84; + write_reg(state, + RSTV0910_P2_DEMOD + state->regoff, + state->demod_bits); + read_reg(state, + RSTV0910_P2_PDELCTRL2 + state->regoff, + &tmp); + /* reset DVBS2 packet delinator error counter */ + tmp |= 0x40; + write_reg(state, + RSTV0910_P2_PDELCTRL2 + state->regoff, + tmp); + /* reset DVBS2 packet delinator error counter */ + tmp &= ~0x40; + write_reg(state, + RSTV0910_P2_PDELCTRL2 + state->regoff, + tmp); + + state->berscale = 2; + state->last_bernumerator = 0; + state->last_berdenominator = 1; + /* force to PRE BCH Rate */ + write_reg(state, + RSTV0910_P2_ERRCTRL1 + state->regoff, + BER_SRC_S2 | state->berscale); + } else { + state->berscale = 2; + state->last_bernumerator = 0; + state->last_berdenominator = 1; + /* force to PRE RS Rate */ + write_reg(state, + RSTV0910_P2_ERRCTRL1 + state->regoff, + BER_SRC_S | state->berscale); + } + /* Reset the Total packet counter */ + write_reg(state, + RSTV0910_P2_FBERCPT4 + state->regoff, 0x00); + /* + * Reset the packet Error counter2 (and Set it to + * infinit error count mode) + */ + write_reg(state, + RSTV0910_P2_ERRCTRL2 + state->regoff, 0xc1); + + set_vth_default(state); + if (state->receive_mode == RCVMODE_DVBS) + enable_puncture_rate(state, + state->puncture_rate); + } + } + + /* read signal statistics */ + + /* read signal strength */ + read_signal_strength(fe); + + /* read carrier/noise on FE_HAS_CARRIER */ + if (*status & FE_HAS_CARRIER) + read_snr(fe); + else + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + /* read ber */ + if (*status & FE_HAS_VITERBI) { + read_ber(fe); + } else { + p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + } + + return 0; +} + +static int get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) +{ + struct stv *state = fe->demodulator_priv; + u8 tmp; + + if (state->receive_mode == RCVMODE_DVBS2) { + u32 mc; + const enum fe_modulation modcod2mod[0x20] = { + QPSK, QPSK, QPSK, QPSK, + QPSK, QPSK, QPSK, QPSK, + QPSK, QPSK, QPSK, QPSK, + PSK_8, PSK_8, PSK_8, PSK_8, + PSK_8, PSK_8, APSK_16, APSK_16, + APSK_16, APSK_16, APSK_16, APSK_16, + APSK_32, APSK_32, APSK_32, APSK_32, + APSK_32, + }; + const enum fe_code_rate modcod2fec[0x20] = { + FEC_NONE, FEC_NONE, FEC_NONE, FEC_2_5, + FEC_1_2, FEC_3_5, FEC_2_3, FEC_3_4, + FEC_4_5, FEC_5_6, FEC_8_9, FEC_9_10, + FEC_3_5, FEC_2_3, FEC_3_4, FEC_5_6, + FEC_8_9, FEC_9_10, FEC_2_3, FEC_3_4, + FEC_4_5, FEC_5_6, FEC_8_9, FEC_9_10, + FEC_3_4, FEC_4_5, FEC_5_6, FEC_8_9, + FEC_9_10 + }; + read_reg(state, RSTV0910_P2_DMDMODCOD + state->regoff, &tmp); + mc = ((tmp & 0x7c) >> 2); + p->pilot = (tmp & 0x01) ? PILOT_ON : PILOT_OFF; + p->modulation = modcod2mod[mc]; + p->fec_inner = modcod2fec[mc]; + } else if (state->receive_mode == RCVMODE_DVBS) { + read_reg(state, RSTV0910_P2_VITCURPUN + state->regoff, &tmp); + switch (tmp & 0x1F) { + case 0x0d: + p->fec_inner = FEC_1_2; + break; + case 0x12: + p->fec_inner = FEC_2_3; + break; + case 0x15: + p->fec_inner = FEC_3_4; + break; + case 0x18: + p->fec_inner = FEC_5_6; + break; + case 0x1a: + p->fec_inner = FEC_7_8; + break; + default: + p->fec_inner = FEC_NONE; + break; + } + p->rolloff = ROLLOFF_35; + } + + return 0; +} + +static int tune(struct dvb_frontend *fe, bool re_tune, + unsigned int mode_flags, + unsigned int *delay, enum fe_status *status) +{ + struct stv *state = fe->demodulator_priv; + int r; + + if (re_tune) { + r = set_parameters(fe); + if (r) + return r; + state->tune_time = jiffies; + } + + r = read_status(fe, status); + if (r) + return r; + + if (*status & FE_HAS_LOCK) + return 0; + *delay = HZ; + + return 0; +} + +static int get_algo(struct dvb_frontend *fe) +{ + return DVBFE_ALGO_HW; +} + +static int set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) +{ + struct stv *state = fe->demodulator_priv; + u16 offs = state->nr ? 0x40 : 0; + + switch (tone) { + case SEC_TONE_ON: + return write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x38); + case SEC_TONE_OFF: + return write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3a); + default: + break; + } + return -EINVAL; +} + +static int wait_dis(struct stv *state, u8 flag, u8 val) +{ + int i; + u8 stat; + u16 offs = state->nr ? 0x40 : 0; + + for (i = 0; i < 10; i++) { + read_reg(state, RSTV0910_P1_DISTXSTATUS + offs, &stat); + if ((stat & flag) == val) + return 0; + usleep_range(10000, 11000); + } + return -ETIMEDOUT; +} + +static int send_master_cmd(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *cmd) +{ + struct stv *state = fe->demodulator_priv; + u16 offs = state->nr ? 0x40 : 0; + int i; + + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3E); + for (i = 0; i < cmd->msg_len; i++) { + wait_dis(state, 0x40, 0x00); + write_reg(state, RSTV0910_P1_DISTXFIFO + offs, cmd->msg[i]); + } + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A); + wait_dis(state, 0x20, 0x20); + return 0; +} + +static int send_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst) +{ + struct stv *state = fe->demodulator_priv; + u16 offs = state->nr ? 0x40 : 0; + u8 value; + + if (burst == SEC_MINI_A) { + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3F); + value = 0x00; + } else { + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3E); + value = 0xFF; + } + wait_dis(state, 0x40, 0x00); + write_reg(state, RSTV0910_P1_DISTXFIFO + offs, value); + write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A); + wait_dis(state, 0x20, 0x20); + + return 0; +} + +static int sleep(struct dvb_frontend *fe) +{ + struct stv *state = fe->demodulator_priv; + + stop(state); + return 0; +} + +static const struct dvb_frontend_ops stv0910_ops = { + .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS }, + .info = { + .name = "ST STV0910", + .frequency_min = 950000, + .frequency_max = 2150000, + .frequency_stepsize = 0, + .frequency_tolerance = 0, + .symbol_rate_min = 100000, + .symbol_rate_max = 70000000, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_AUTO | + FE_CAN_QPSK | + FE_CAN_2G_MODULATION | + FE_CAN_MULTISTREAM + }, + .sleep = sleep, + .release = release, + .i2c_gate_ctrl = gate_ctrl, + .set_frontend = set_parameters, + .get_frontend_algo = get_algo, + .get_frontend = get_frontend, + .tune = tune, + .read_status = read_status, + .set_tone = set_tone, + + .diseqc_send_master_cmd = send_master_cmd, + .diseqc_send_burst = send_burst, +}; + +static struct stv_base *match_base(struct i2c_adapter *i2c, u8 adr) +{ + struct stv_base *p; + + list_for_each_entry(p, &stvlist, stvlist) + if (p->i2c == i2c && p->adr == adr) + return p; + return NULL; +} + +static void stv0910_init_stats(struct stv *state) +{ + struct dtv_frontend_properties *p = &state->fe.dtv_property_cache; + + p->strength.len = 1; + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->cnr.len = 1; + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_error.len = 1; + p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->pre_bit_count.len = 1; + p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; +} + +struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c, + struct stv0910_cfg *cfg, + int nr) +{ + struct stv *state; + struct stv_base *base; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + state->tscfgh = 0x20 | (cfg->parallel ? 0 : 0x40); + state->tsgeneral = (cfg->parallel == 2) ? 0x02 : 0x00; + state->i2crpt = 0x0A | ((cfg->rptlvl & 0x07) << 4); + state->tsspeed = 0x28; + state->nr = nr; + state->regoff = state->nr ? 0 : 0x200; + state->search_range = 16000000; + state->demod_bits = 0x10; /* Inversion : Auto with reset to 0 */ + state->receive_mode = RCVMODE_NONE; + state->cur_scrambling_code = (~0U); + state->single = cfg->single ? 1 : 0; + + base = match_base(i2c, cfg->adr); + if (base) { + base->count++; + state->base = base; + } else { + base = kzalloc(sizeof(*base), GFP_KERNEL); + if (!base) + goto fail; + base->i2c = i2c; + base->adr = cfg->adr; + base->count = 1; + base->extclk = cfg->clk ? cfg->clk : 30000000; + + mutex_init(&base->i2c_lock); + mutex_init(&base->reg_lock); + state->base = base; + if (probe(state) < 0) { + dev_info(&i2c->dev, "No demod found at adr %02X on %s\n", + cfg->adr, dev_name(&i2c->dev)); + kfree(base); + goto fail; + } + list_add(&base->stvlist, &stvlist); + } + state->fe.ops = stv0910_ops; + state->fe.demodulator_priv = state; + state->nr = nr; + + dev_info(&i2c->dev, "%s demod found at adr %02X on %s\n", + state->fe.ops.info.name, cfg->adr, dev_name(&i2c->dev)); + + stv0910_init_stats(state); + + return &state->fe; + +fail: + kfree(state); + return NULL; +} +EXPORT_SYMBOL_GPL(stv0910_attach); + +MODULE_DESCRIPTION("ST STV0910 multistandard frontend driver"); +MODULE_AUTHOR("Ralph and Marcus Metzler, Manfred Voelkel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/stv0910.h b/drivers/media/dvb-frontends/stv0910.h new file mode 100644 index 000000000000..fccd8d9b665f --- /dev/null +++ b/drivers/media/dvb-frontends/stv0910.h @@ -0,0 +1,32 @@ +#ifndef _STV0910_H_ +#define _STV0910_H_ + +#include +#include + +struct stv0910_cfg { + u32 clk; + u8 adr; + u8 parallel; + u8 rptlvl; + u8 single; +}; + +#if IS_REACHABLE(CONFIG_DVB_STV0910) + +struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c, + struct stv0910_cfg *cfg, int nr); + +#else + +static inline struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c, + struct stv0910_cfg *cfg, + int nr) +{ + pr_warn("%s: driver disabled by Kconfig\n", __func__); + return NULL; +} + +#endif /* CONFIG_DVB_STV0910 */ + +#endif /* _STV0910_H_ */ diff --git a/drivers/media/dvb-frontends/stv0910_regs.h b/drivers/media/dvb-frontends/stv0910_regs.h new file mode 100644 index 000000000000..32ced4eaf296 --- /dev/null +++ b/drivers/media/dvb-frontends/stv0910_regs.h @@ -0,0 +1,4760 @@ +/* + * @DVB-S/DVB-S2 STMicroelectronics STV0900 register definitions + * Author Manfred Voelkel, August 2013 + * (c) 2013 Digital Devices GmbH Germany. All rights reserved + * + * ======================================================================= + * Registers Declaration (Internal ST, All Applications ) + * ------------------------- + * Each register (RSTV0910__XXXXX) is defined by its address (2 bytes). + * + * Each field (FSTV0910__XXXXX)is defined as follow: + * [register address -- 2bytes][field sign -- 1byte][field mask -- 1byte] + * ====================================================================== + */ + +/* MID */ +#define RSTV0910_MID 0xf100 +#define FSTV0910_MCHIP_IDENT 0xf10000f0 +#define FSTV0910_MRELEASE 0xf100000f + +/* DID */ +#define RSTV0910_DID 0xf101 +#define FSTV0910_DEVICE_ID 0xf10100ff + +/* DACR1 */ +#define RSTV0910_DACR1 0xf113 +#define FSTV0910_DAC_MODE 0xf11300e0 +#define FSTV0910_DAC_VALUE1 0xf113000f + +/* DACR2 */ +#define RSTV0910_DACR2 0xf114 +#define FSTV0910_DAC_VALUE0 0xf11400ff + +/* PADCFG */ +#define RSTV0910_PADCFG 0xf11a +#define FSTV0910_AGCRF2_OPD 0xf11a0008 +#define FSTV0910_AGCRF2_XOR 0xf11a0004 +#define FSTV0910_AGCRF1_OPD 0xf11a0002 +#define FSTV0910_AGCRF1_XOR 0xf11a0001 + +/* OUTCFG2 */ +#define RSTV0910_OUTCFG2 0xf11b +#define FSTV0910_TS2_ERROR_XOR 0xf11b0080 +#define FSTV0910_TS2_DPN_XOR 0xf11b0040 +#define FSTV0910_TS2_STROUT_XOR 0xf11b0020 +#define FSTV0910_TS2_CLOCKOUT_XOR 0xf11b0010 +#define FSTV0910_TS1_ERROR_XOR 0xf11b0008 +#define FSTV0910_TS1_DPN_XOR 0xf11b0004 +#define FSTV0910_TS1_STROUT_XOR 0xf11b0002 +#define FSTV0910_TS1_CLOCKOUT_XOR 0xf11b0001 + +/* OUTCFG */ +#define RSTV0910_OUTCFG 0xf11c +#define FSTV0910_TS2_OUTSER_HZ 0xf11c0020 +#define FSTV0910_TS1_OUTSER_HZ 0xf11c0010 +#define FSTV0910_TS2_OUTPAR_HZ 0xf11c0008 +#define FSTV0910_TS1_OUTPAR_HZ 0xf11c0004 +#define FSTV0910_TS_SERDATA0 0xf11c0002 + +/* IRQSTATUS3 */ +#define RSTV0910_IRQSTATUS3 0xf120 +#define FSTV0910_SPLL_LOCK 0xf1200020 +#define FSTV0910_SSTREAM_LCK_1 0xf1200010 +#define FSTV0910_SSTREAM_LCK_2 0xf1200008 +#define FSTV0910_SDVBS1_PRF_2 0xf1200002 +#define FSTV0910_SDVBS1_PRF_1 0xf1200001 + +/* IRQSTATUS2 */ +#define RSTV0910_IRQSTATUS2 0xf121 +#define FSTV0910_SSPY_ENDSIM_1 0xf1210080 +#define FSTV0910_SSPY_ENDSIM_2 0xf1210040 +#define FSTV0910_SPKTDEL_ERROR_2 0xf1210010 +#define FSTV0910_SPKTDEL_LOCKB_2 0xf1210008 +#define FSTV0910_SPKTDEL_LOCK_2 0xf1210004 +#define FSTV0910_SPKTDEL_ERROR_1 0xf1210002 +#define FSTV0910_SPKTDEL_LOCKB_1 0xf1210001 + +/* IRQSTATUS1 */ +#define RSTV0910_IRQSTATUS1 0xf122 +#define FSTV0910_SPKTDEL_LOCK_1 0xf1220080 +#define FSTV0910_SFEC_LOCKB_2 0xf1220040 +#define FSTV0910_SFEC_LOCK_2 0xf1220020 +#define FSTV0910_SFEC_LOCKB_1 0xf1220010 +#define FSTV0910_SFEC_LOCK_1 0xf1220008 +#define FSTV0910_SDEMOD_LOCKB_2 0xf1220004 +#define FSTV0910_SDEMOD_LOCK_2 0xf1220002 +#define FSTV0910_SDEMOD_IRQ_2 0xf1220001 + +/* IRQSTATUS0 */ +#define RSTV0910_IRQSTATUS0 0xf123 +#define FSTV0910_SDEMOD_LOCKB_1 0xf1230080 +#define FSTV0910_SDEMOD_LOCK_1 0xf1230040 +#define FSTV0910_SDEMOD_IRQ_1 0xf1230020 +#define FSTV0910_SBCH_ERRFLAG 0xf1230010 +#define FSTV0910_SDISEQC2_IRQ 0xf1230004 +#define FSTV0910_SDISEQC1_IRQ 0xf1230001 + +/* IRQMASK3 */ +#define RSTV0910_IRQMASK3 0xf124 +#define FSTV0910_MPLL_LOCK 0xf1240020 +#define FSTV0910_MSTREAM_LCK_1 0xf1240010 +#define FSTV0910_MSTREAM_LCK_2 0xf1240008 +#define FSTV0910_MDVBS1_PRF_2 0xf1240002 +#define FSTV0910_MDVBS1_PRF_1 0xf1240001 + +/* IRQMASK2 */ +#define RSTV0910_IRQMASK2 0xf125 +#define FSTV0910_MSPY_ENDSIM_1 0xf1250080 +#define FSTV0910_MSPY_ENDSIM_2 0xf1250040 +#define FSTV0910_MPKTDEL_ERROR_2 0xf1250010 +#define FSTV0910_MPKTDEL_LOCKB_2 0xf1250008 +#define FSTV0910_MPKTDEL_LOCK_2 0xf1250004 +#define FSTV0910_MPKTDEL_ERROR_1 0xf1250002 +#define FSTV0910_MPKTDEL_LOCKB_1 0xf1250001 + +/* IRQMASK1 */ +#define RSTV0910_IRQMASK1 0xf126 +#define FSTV0910_MPKTDEL_LOCK_1 0xf1260080 +#define FSTV0910_MFEC_LOCKB_2 0xf1260040 +#define FSTV0910_MFEC_LOCK_2 0xf1260020 +#define FSTV0910_MFEC_LOCKB_1 0xf1260010 +#define FSTV0910_MFEC_LOCK_1 0xf1260008 +#define FSTV0910_MDEMOD_LOCKB_2 0xf1260004 +#define FSTV0910_MDEMOD_LOCK_2 0xf1260002 +#define FSTV0910_MDEMOD_IRQ_2 0xf1260001 + +/* IRQMASK0 */ +#define RSTV0910_IRQMASK0 0xf127 +#define FSTV0910_MDEMOD_LOCKB_1 0xf1270080 +#define FSTV0910_MDEMOD_LOCK_1 0xf1270040 +#define FSTV0910_MDEMOD_IRQ_1 0xf1270020 +#define FSTV0910_MBCH_ERRFLAG 0xf1270010 +#define FSTV0910_MDISEQC2_IRQ 0xf1270004 +#define FSTV0910_MDISEQC1_IRQ 0xf1270001 + +/* I2CCFG */ +#define RSTV0910_I2CCFG 0xf129 +#define FSTV0910_I2C_FASTMODE 0xf1290008 +#define FSTV0910_I2CADDR_INC 0xf1290003 + +/* P1_I2CRPT */ +#define RSTV0910_P1_I2CRPT 0xf12a +#define FSTV0910_P1_I2CT_ON 0xf12a0080 +#define FSTV0910_P1_ENARPT_LEVEL 0xf12a0070 +#define FSTV0910_P1_SCLT_DELAY 0xf12a0008 +#define FSTV0910_P1_STOP_ENABLE 0xf12a0004 +#define FSTV0910_P1_STOP_SDAT2SDA 0xf12a0002 + +/* P2_I2CRPT */ +#define RSTV0910_P2_I2CRPT 0xf12b +#define FSTV0910_P2_I2CT_ON 0xf12b0080 +#define FSTV0910_P2_ENARPT_LEVEL 0xf12b0070 +#define FSTV0910_P2_SCLT_DELAY 0xf12b0008 +#define FSTV0910_P2_STOP_ENABLE 0xf12b0004 +#define FSTV0910_P2_STOP_SDAT2SDA 0xf12b0002 + +/* GPIO0CFG */ +#define RSTV0910_GPIO0CFG 0xf140 +#define FSTV0910_GPIO0_OPD 0xf1400080 +#define FSTV0910_GPIO0_CONFIG 0xf140007e +#define FSTV0910_GPIO0_XOR 0xf1400001 + +/* GPIO1CFG */ +#define RSTV0910_GPIO1CFG 0xf141 +#define FSTV0910_GPIO1_OPD 0xf1410080 +#define FSTV0910_GPIO1_CONFIG 0xf141007e +#define FSTV0910_GPIO1_XOR 0xf1410001 + +/* GPIO2CFG */ +#define RSTV0910_GPIO2CFG 0xf142 +#define FSTV0910_GPIO2_OPD 0xf1420080 +#define FSTV0910_GPIO2_CONFIG 0xf142007e +#define FSTV0910_GPIO2_XOR 0xf1420001 + +/* GPIO3CFG */ +#define RSTV0910_GPIO3CFG 0xf143 +#define FSTV0910_GPIO3_OPD 0xf1430080 +#define FSTV0910_GPIO3_CONFIG 0xf143007e +#define FSTV0910_GPIO3_XOR 0xf1430001 + +/* GPIO4CFG */ +#define RSTV0910_GPIO4CFG 0xf144 +#define FSTV0910_GPIO4_OPD 0xf1440080 +#define FSTV0910_GPIO4_CONFIG 0xf144007e +#define FSTV0910_GPIO4_XOR 0xf1440001 + +/* GPIO5CFG */ +#define RSTV0910_GPIO5CFG 0xf145 +#define FSTV0910_GPIO5_OPD 0xf1450080 +#define FSTV0910_GPIO5_CONFIG 0xf145007e +#define FSTV0910_GPIO5_XOR 0xf1450001 + +/* GPIO6CFG */ +#define RSTV0910_GPIO6CFG 0xf146 +#define FSTV0910_GPIO6_OPD 0xf1460080 +#define FSTV0910_GPIO6_CONFIG 0xf146007e +#define FSTV0910_GPIO6_XOR 0xf1460001 + +/* GPIO7CFG */ +#define RSTV0910_GPIO7CFG 0xf147 +#define FSTV0910_GPIO7_OPD 0xf1470080 +#define FSTV0910_GPIO7_CONFIG 0xf147007e +#define FSTV0910_GPIO7_XOR 0xf1470001 + +/* GPIO8CFG */ +#define RSTV0910_GPIO8CFG 0xf148 +#define FSTV0910_GPIO8_OPD 0xf1480080 +#define FSTV0910_GPIO8_CONFIG 0xf148007e +#define FSTV0910_GPIO8_XOR 0xf1480001 + +/* GPIO9CFG */ +#define RSTV0910_GPIO9CFG 0xf149 +#define FSTV0910_GPIO9_OPD 0xf1490080 +#define FSTV0910_GPIO9_CONFIG 0xf149007e +#define FSTV0910_GPIO9_XOR 0xf1490001 + +/* GPIO10CFG */ +#define RSTV0910_GPIO10CFG 0xf14a +#define FSTV0910_GPIO10_OPD 0xf14a0080 +#define FSTV0910_GPIO10_CONFIG 0xf14a007e +#define FSTV0910_GPIO10_XOR 0xf14a0001 + +/* GPIO11CFG */ +#define RSTV0910_GPIO11CFG 0xf14b +#define FSTV0910_GPIO11_OPD 0xf14b0080 +#define FSTV0910_GPIO11_CONFIG 0xf14b007e +#define FSTV0910_GPIO11_XOR 0xf14b0001 + +/* GPIO12CFG */ +#define RSTV0910_GPIO12CFG 0xf14c +#define FSTV0910_GPIO12_OPD 0xf14c0080 +#define FSTV0910_GPIO12_CONFIG 0xf14c007e +#define FSTV0910_GPIO12_XOR 0xf14c0001 + +/* GPIO13CFG */ +#define RSTV0910_GPIO13CFG 0xf14d +#define FSTV0910_GPIO13_OPD 0xf14d0080 +#define FSTV0910_GPIO13_CONFIG 0xf14d007e +#define FSTV0910_GPIO13_XOR 0xf14d0001 + +/* GPIO14CFG */ +#define RSTV0910_GPIO14CFG 0xf14e +#define FSTV0910_GPIO14_OPD 0xf14e0080 +#define FSTV0910_GPIO14_CONFIG 0xf14e007e +#define FSTV0910_GPIO14_XOR 0xf14e0001 + +/* GPIO15CFG */ +#define RSTV0910_GPIO15CFG 0xf14f +#define FSTV0910_GPIO15_OPD 0xf14f0080 +#define FSTV0910_GPIO15_CONFIG 0xf14f007e +#define FSTV0910_GPIO15_XOR 0xf14f0001 + +/* GPIO16CFG */ +#define RSTV0910_GPIO16CFG 0xf150 +#define FSTV0910_GPIO16_OPD 0xf1500080 +#define FSTV0910_GPIO16_CONFIG 0xf150007e +#define FSTV0910_GPIO16_XOR 0xf1500001 + +/* GPIO17CFG */ +#define RSTV0910_GPIO17CFG 0xf151 +#define FSTV0910_GPIO17_OPD 0xf1510080 +#define FSTV0910_GPIO17_CONFIG 0xf151007e +#define FSTV0910_GPIO17_XOR 0xf1510001 + +/* GPIO18CFG */ +#define RSTV0910_GPIO18CFG 0xf152 +#define FSTV0910_GPIO18_OPD 0xf1520080 +#define FSTV0910_GPIO18_CONFIG 0xf152007e +#define FSTV0910_GPIO18_XOR 0xf1520001 + +/* GPIO19CFG */ +#define RSTV0910_GPIO19CFG 0xf153 +#define FSTV0910_GPIO19_OPD 0xf1530080 +#define FSTV0910_GPIO19_CONFIG 0xf153007e +#define FSTV0910_GPIO19_XOR 0xf1530001 + +/* GPIO20CFG */ +#define RSTV0910_GPIO20CFG 0xf154 +#define FSTV0910_GPIO20_OPD 0xf1540080 +#define FSTV0910_GPIO20_CONFIG 0xf154007e +#define FSTV0910_GPIO20_XOR 0xf1540001 + +/* GPIO21CFG */ +#define RSTV0910_GPIO21CFG 0xf155 +#define FSTV0910_GPIO21_OPD 0xf1550080 +#define FSTV0910_GPIO21_CONFIG 0xf155007e +#define FSTV0910_GPIO21_XOR 0xf1550001 + +/* GPIO22CFG */ +#define RSTV0910_GPIO22CFG 0xf156 +#define FSTV0910_GPIO22_OPD 0xf1560080 +#define FSTV0910_GPIO22_CONFIG 0xf156007e +#define FSTV0910_GPIO22_XOR 0xf1560001 + +/* STRSTATUS1 */ +#define RSTV0910_STRSTATUS1 0xf16a +#define FSTV0910_STRSTATUS_SEL2 0xf16a00f0 +#define FSTV0910_STRSTATUS_SEL1 0xf16a000f + +/* STRSTATUS2 */ +#define RSTV0910_STRSTATUS2 0xf16b +#define FSTV0910_STRSTATUS_SEL4 0xf16b00f0 +#define FSTV0910_STRSTATUS_SEL3 0xf16b000f + +/* STRSTATUS3 */ +#define RSTV0910_STRSTATUS3 0xf16c +#define FSTV0910_STRSTATUS_SEL6 0xf16c00f0 +#define FSTV0910_STRSTATUS_SEL5 0xf16c000f + +/* FSKTFC2 */ +#define RSTV0910_FSKTFC2 0xf170 +#define FSTV0910_FSKT_KMOD 0xf17000fc +#define FSTV0910_FSKT_CAR2 0xf1700003 + +/* FSKTFC1 */ +#define RSTV0910_FSKTFC1 0xf171 +#define FSTV0910_FSKT_CAR1 0xf17100ff + +/* FSKTFC0 */ +#define RSTV0910_FSKTFC0 0xf172 +#define FSTV0910_FSKT_CAR0 0xf17200ff + +/* FSKTDELTAF1 */ +#define RSTV0910_FSKTDELTAF1 0xf173 +#define FSTV0910_FSKT_DELTAF1 0xf173000f + +/* FSKTDELTAF0 */ +#define RSTV0910_FSKTDELTAF0 0xf174 +#define FSTV0910_FSKT_DELTAF0 0xf17400ff + +/* FSKTCTRL */ +#define RSTV0910_FSKTCTRL 0xf175 +#define FSTV0910_FSKT_PINSEL 0xf1750080 +#define FSTV0910_FSKT_EN_SGN 0xf1750040 +#define FSTV0910_FSKT_MOD_SGN 0xf1750020 +#define FSTV0910_FSKT_MOD_EN 0xf175001c +#define FSTV0910_FSKT_DACMODE 0xf1750003 + +/* FSKRFC2 */ +#define RSTV0910_FSKRFC2 0xf176 +#define FSTV0910_FSKR_DETSGN 0xf1760040 +#define FSTV0910_FSKR_OUTSGN 0xf1760020 +#define FSTV0910_FSKR_KAGC 0xf176001c +#define FSTV0910_FSKR_CAR2 0xf1760003 + +/* FSKRFC1 */ +#define RSTV0910_FSKRFC1 0xf177 +#define FSTV0910_FSKR_CAR1 0xf17700ff + +/* FSKRFC0 */ +#define RSTV0910_FSKRFC0 0xf178 +#define FSTV0910_FSKR_CAR0 0xf17800ff + +/* FSKRK1 */ +#define RSTV0910_FSKRK1 0xf179 +#define FSTV0910_FSKR_K1_EXP 0xf17900e0 +#define FSTV0910_FSKR_K1_MANT 0xf179001f + +/* FSKRK2 */ +#define RSTV0910_FSKRK2 0xf17a +#define FSTV0910_FSKR_K2_EXP 0xf17a00e0 +#define FSTV0910_FSKR_K2_MANT 0xf17a001f + +/* FSKRAGCR */ +#define RSTV0910_FSKRAGCR 0xf17b +#define FSTV0910_FSKR_OUTCTL 0xf17b00c0 +#define FSTV0910_FSKR_AGC_REF 0xf17b003f + +/* FSKRAGC */ +#define RSTV0910_FSKRAGC 0xf17c +#define FSTV0910_FSKR_AGC_ACCU 0xf17c00ff + +/* FSKRALPHA */ +#define RSTV0910_FSKRALPHA 0xf17d +#define FSTV0910_FSKR_ALPHA_EXP 0xf17d001c +#define FSTV0910_FSKR_ALPHA_M 0xf17d0003 + +/* FSKRPLTH1 */ +#define RSTV0910_FSKRPLTH1 0xf17e +#define FSTV0910_FSKR_BETA 0xf17e00f0 +#define FSTV0910_FSKR_PLL_TRESH1 0xf17e000f + +/* FSKRPLTH0 */ +#define RSTV0910_FSKRPLTH0 0xf17f +#define FSTV0910_FSKR_PLL_TRESH0 0xf17f00ff + +/* FSKRDF1 */ +#define RSTV0910_FSKRDF1 0xf180 +#define FSTV0910_FSKR_OUT 0xf1800080 +#define FSTV0910_FSKR_STATE 0xf1800060 +#define FSTV0910_FSKR_DELTAF1 0xf180001f + +/* FSKRDF0 */ +#define RSTV0910_FSKRDF0 0xf181 +#define FSTV0910_FSKR_DELTAF0 0xf18100ff + +/* FSKRSTEPP */ +#define RSTV0910_FSKRSTEPP 0xf182 +#define FSTV0910_FSKR_STEP_PLUS 0xf18200ff + +/* FSKRSTEPM */ +#define RSTV0910_FSKRSTEPM 0xf183 +#define FSTV0910_FSKR_STEP_MINUS 0xf18300ff + +/* FSKRDET1 */ +#define RSTV0910_FSKRDET1 0xf184 +#define FSTV0910_FSKR_DETECT 0xf1840080 +#define FSTV0910_FSKR_CARDET_ACCU1 0xf184000f + +/* FSKRDET0 */ +#define RSTV0910_FSKRDET0 0xf185 +#define FSTV0910_FSKR_CARDET_ACCU0 0xf18500ff + +/* FSKRDTH1 */ +#define RSTV0910_FSKRDTH1 0xf186 +#define FSTV0910_FSKR_CARLOSS_THRESH1 0xf18600f0 +#define FSTV0910_FSKR_CARDET_THRESH1 0xf186000f + +/* FSKRDTH0 */ +#define RSTV0910_FSKRDTH0 0xf187 +#define FSTV0910_FSKR_CARDET_THRESH0 0xf18700ff + +/* FSKRLOSS */ +#define RSTV0910_FSKRLOSS 0xf188 +#define FSTV0910_FSKR_CARLOSS_THRESH0 0xf18800ff + +/* NCOARSE */ +#define RSTV0910_NCOARSE 0xf1b3 +#define FSTV0910_CP 0xf1b300f8 +#define FSTV0910_IDF 0xf1b30007 + +/* NCOARSE1 */ +#define RSTV0910_NCOARSE1 0xf1b4 +#define FSTV0910_N_DIV 0xf1b400ff + +/* NCOARSE2 */ +#define RSTV0910_NCOARSE2 0xf1b5 +#define FSTV0910_ODF 0xf1b5003f + +/* SYNTCTRL */ +#define RSTV0910_SYNTCTRL 0xf1b6 +#define FSTV0910_STANDBY 0xf1b60080 +#define FSTV0910_BYPASSPLLCORE 0xf1b60040 +#define FSTV0910_STOP_PLL 0xf1b60008 +#define FSTV0910_OSCI_E 0xf1b60002 + +/* FILTCTRL */ +#define RSTV0910_FILTCTRL 0xf1b7 +#define FSTV0910_INV_CLKFSK 0xf1b70002 +#define FSTV0910_BYPASS_APPLI 0xf1b70001 + +/* PLLSTAT */ +#define RSTV0910_PLLSTAT 0xf1b8 +#define FSTV0910_PLLLOCK 0xf1b80001 + +/* STOPCLK1 */ +#define RSTV0910_STOPCLK1 0xf1c2 +#define FSTV0910_INV_CLKADCI2 0xf1c20004 +#define FSTV0910_INV_CLKADCI1 0xf1c20001 + +/* STOPCLK2 */ +#define RSTV0910_STOPCLK2 0xf1c3 +#define FSTV0910_STOP_DVBS2FEC2 0xf1c30020 +#define FSTV0910_STOP_DVBS2FEC 0xf1c30010 +#define FSTV0910_STOP_DVBS1FEC2 0xf1c30008 +#define FSTV0910_STOP_DVBS1FEC 0xf1c30004 +#define FSTV0910_STOP_DEMOD2 0xf1c30002 +#define FSTV0910_STOP_DEMOD 0xf1c30001 + +/* PREGCTL */ +#define RSTV0910_PREGCTL 0xf1c8 +#define FSTV0910_REG3V3TO2V5_POFF 0xf1c80080 + +/* TSTTNR0 */ +#define RSTV0910_TSTTNR0 0xf1df +#define FSTV0910_FSK_PON 0xf1df0004 + +/* TSTTNR1 */ +#define RSTV0910_TSTTNR1 0xf1e0 +#define FSTV0910_ADC1_PON 0xf1e00002 + +/* TSTTNR2 */ +#define RSTV0910_TSTTNR2 0xf1e1 +#define FSTV0910_I2C_DISEQC_PON 0xf1e10020 +#define FSTV0910_DISEQC_CLKDIV 0xf1e1000f + +/* TSTTNR3 */ +#define RSTV0910_TSTTNR3 0xf1e2 +#define FSTV0910_ADC2_PON 0xf1e20002 + +/* P2_IQCONST */ +#define RSTV0910_P2_IQCONST 0xf200 +#define FSTV0910_P2_CONSTEL_SELECT 0xf2000060 +#define FSTV0910_P2_IQSYMB_SEL 0xf200001f + +/* P2_NOSCFG */ +#define RSTV0910_P2_NOSCFG 0xf201 +#define FSTV0910_P2_DUMMYPL_NOSDATA 0xf2010020 +#define FSTV0910_P2_NOSPLH_BETA 0xf2010018 +#define FSTV0910_P2_NOSDATA_BETA 0xf2010007 + +/* P2_ISYMB */ +#define RSTV0910_P2_ISYMB 0xf202 +#define FSTV0910_P2_I_SYMBOL 0xf20201ff + +/* P2_QSYMB */ +#define RSTV0910_P2_QSYMB 0xf203 +#define FSTV0910_P2_Q_SYMBOL 0xf20301ff + +/* P2_AGC1CFG */ +#define RSTV0910_P2_AGC1CFG 0xf204 +#define FSTV0910_P2_DC_FROZEN 0xf2040080 +#define FSTV0910_P2_DC_CORRECT 0xf2040040 +#define FSTV0910_P2_AMM_FROZEN 0xf2040020 +#define FSTV0910_P2_AMM_CORRECT 0xf2040010 +#define FSTV0910_P2_QUAD_FROZEN 0xf2040008 +#define FSTV0910_P2_QUAD_CORRECT 0xf2040004 + +/* P2_AGC1CN */ +#define RSTV0910_P2_AGC1CN 0xf206 +#define FSTV0910_P2_AGC1_LOCKED 0xf2060080 +#define FSTV0910_P2_AGC1_MINPOWER 0xf2060010 +#define FSTV0910_P2_AGCOUT_FAST 0xf2060008 +#define FSTV0910_P2_AGCIQ_BETA 0xf2060007 + +/* P2_AGC1REF */ +#define RSTV0910_P2_AGC1REF 0xf207 +#define FSTV0910_P2_AGCIQ_REF 0xf20700ff + +/* P2_IDCCOMP */ +#define RSTV0910_P2_IDCCOMP 0xf208 +#define FSTV0910_P2_IAVERAGE_ADJ 0xf20801ff + +/* P2_QDCCOMP */ +#define RSTV0910_P2_QDCCOMP 0xf209 +#define FSTV0910_P2_QAVERAGE_ADJ 0xf20901ff + +/* P2_POWERI */ +#define RSTV0910_P2_POWERI 0xf20a +#define FSTV0910_P2_POWER_I 0xf20a00ff + +/* P2_POWERQ */ +#define RSTV0910_P2_POWERQ 0xf20b +#define FSTV0910_P2_POWER_Q 0xf20b00ff + +/* P2_AGC1AMM */ +#define RSTV0910_P2_AGC1AMM 0xf20c +#define FSTV0910_P2_AMM_VALUE 0xf20c00ff + +/* P2_AGC1QUAD */ +#define RSTV0910_P2_AGC1QUAD 0xf20d +#define FSTV0910_P2_QUAD_VALUE 0xf20d01ff + +/* P2_AGCIQIN1 */ +#define RSTV0910_P2_AGCIQIN1 0xf20e +#define FSTV0910_P2_AGCIQ_VALUE1 0xf20e00ff + +/* P2_AGCIQIN0 */ +#define RSTV0910_P2_AGCIQIN0 0xf20f +#define FSTV0910_P2_AGCIQ_VALUE0 0xf20f00ff + +/* P2_DEMOD */ +#define RSTV0910_P2_DEMOD 0xf210 +#define FSTV0910_P2_MANUALS2_ROLLOFF 0xf2100080 +#define FSTV0910_P2_SPECINV_CONTROL 0xf2100030 +#define FSTV0910_P2_MANUALSX_ROLLOFF 0xf2100004 +#define FSTV0910_P2_ROLLOFF_CONTROL 0xf2100003 + +/* P2_DMDMODCOD */ +#define RSTV0910_P2_DMDMODCOD 0xf211 +#define FSTV0910_P2_MANUAL_MODCOD 0xf2110080 +#define FSTV0910_P2_DEMOD_MODCOD 0xf211007c +#define FSTV0910_P2_DEMOD_TYPE 0xf2110003 + +/* P2_DSTATUS */ +#define RSTV0910_P2_DSTATUS 0xf212 +#define FSTV0910_P2_CAR_LOCK 0xf2120080 +#define FSTV0910_P2_TMGLOCK_QUALITY 0xf2120060 +#define FSTV0910_P2_LOCK_DEFINITIF 0xf2120008 +#define FSTV0910_P2_OVADC_DETECT 0xf2120001 + +/* P2_DSTATUS2 */ +#define RSTV0910_P2_DSTATUS2 0xf213 +#define FSTV0910_P2_DEMOD_DELOCK 0xf2130080 +#define FSTV0910_P2_MODCODRQ_SYNCTAG 0xf2130020 +#define FSTV0910_P2_POLYPH_SATEVENT 0xf2130010 +#define FSTV0910_P2_AGC1_NOSIGNALACK 0xf2130008 +#define FSTV0910_P2_AGC2_OVERFLOW 0xf2130004 +#define FSTV0910_P2_CFR_OVERFLOW 0xf2130002 +#define FSTV0910_P2_GAMMA_OVERUNDER 0xf2130001 + +/* P2_DMDCFGMD */ +#define RSTV0910_P2_DMDCFGMD 0xf214 +#define FSTV0910_P2_DVBS2_ENABLE 0xf2140080 +#define FSTV0910_P2_DVBS1_ENABLE 0xf2140040 +#define FSTV0910_P2_SCAN_ENABLE 0xf2140010 +#define FSTV0910_P2_CFR_AUTOSCAN 0xf2140008 +#define FSTV0910_P2_TUN_RNG 0xf2140003 + +/* P2_DMDCFG2 */ +#define RSTV0910_P2_DMDCFG2 0xf215 +#define FSTV0910_P2_S1S2_SEQUENTIAL 0xf2150040 +#define FSTV0910_P2_INFINITE_RELOCK 0xf2150010 + +/* P2_DMDISTATE */ +#define RSTV0910_P2_DMDISTATE 0xf216 +#define FSTV0910_P2_I2C_NORESETDMODE 0xf2160080 +#define FSTV0910_P2_I2C_DEMOD_MODE 0xf216001f + +/* P2_DMDT0M */ +#define RSTV0910_P2_DMDT0M 0xf217 +#define FSTV0910_P2_DMDT0_MIN 0xf21700ff + +/* P2_DMDSTATE */ +#define RSTV0910_P2_DMDSTATE 0xf21b +#define FSTV0910_P2_HEADER_MODE 0xf21b0060 + +/* P2_DMDFLYW */ +#define RSTV0910_P2_DMDFLYW 0xf21c +#define FSTV0910_P2_I2C_IRQVAL 0xf21c00f0 +#define FSTV0910_P2_FLYWHEEL_CPT 0xf21c000f + +/* P2_DSTATUS3 */ +#define RSTV0910_P2_DSTATUS3 0xf21d +#define FSTV0910_P2_CFR_ZIGZAG 0xf21d0080 +#define FSTV0910_P2_DEMOD_CFGMODE 0xf21d0060 +#define FSTV0910_P2_GAMMA_LOWBAUDRATE 0xf21d0010 + +/* P2_DMDCFG3 */ +#define RSTV0910_P2_DMDCFG3 0xf21e +#define FSTV0910_P2_NOSTOP_FIFOFULL 0xf21e0008 + +/* P2_DMDCFG4 */ +#define RSTV0910_P2_DMDCFG4 0xf21f +#define FSTV0910_P2_DIS_VITLOCK 0xf21f0080 +#define FSTV0910_P2_DIS_CLKENABLE 0xf21f0004 + +/* P2_CORRELMANT */ +#define RSTV0910_P2_CORRELMANT 0xf220 +#define FSTV0910_P2_CORREL_MANT 0xf22000ff + +/* P2_CORRELABS */ +#define RSTV0910_P2_CORRELABS 0xf221 +#define FSTV0910_P2_CORREL_ABS 0xf22100ff + +/* P2_CORRELEXP */ +#define RSTV0910_P2_CORRELEXP 0xf222 +#define FSTV0910_P2_CORREL_ABSEXP 0xf22200f0 +#define FSTV0910_P2_CORREL_EXP 0xf222000f + +/* P2_PLHMODCOD */ +#define RSTV0910_P2_PLHMODCOD 0xf224 +#define FSTV0910_P2_SPECINV_DEMOD 0xf2240080 +#define FSTV0910_P2_PLH_MODCOD 0xf224007c +#define FSTV0910_P2_PLH_TYPE 0xf2240003 + +/* P2_DMDREG */ +#define RSTV0910_P2_DMDREG 0xf225 +#define FSTV0910_P2_DECIM_PLFRAMES 0xf2250001 + +/* P2_AGCNADJ */ +#define RSTV0910_P2_AGCNADJ 0xf226 +#define FSTV0910_P2_RADJOFF_AGC2 0xf2260080 +#define FSTV0910_P2_RADJOFF_AGC1 0xf2260040 +#define FSTV0910_P2_AGC_NADJ 0xf226013f + +/* P2_AGCKS */ +#define RSTV0910_P2_AGCKS 0xf227 +#define FSTV0910_P2_RSADJ_MANUALCFG 0xf2270080 +#define FSTV0910_P2_RSADJ_CCMMODE 0xf2270040 +#define FSTV0910_P2_RADJ_SPSK 0xf227013f + +/* P2_AGCKQ */ +#define RSTV0910_P2_AGCKQ 0xf228 +#define FSTV0910_P2_RADJON_DVBS1 0xf2280040 +#define FSTV0910_P2_RADJ_QPSK 0xf228013f + +/* P2_AGCK8 */ +#define RSTV0910_P2_AGCK8 0xf229 +#define FSTV0910_P2_RADJ_8PSK 0xf229013f + +/* P2_AGCK16 */ +#define RSTV0910_P2_AGCK16 0xf22a +#define FSTV0910_P2_R2ADJOFF_16APSK 0xf22a0040 +#define FSTV0910_P2_R1ADJOFF_16APSK 0xf22a0020 +#define FSTV0910_P2_RADJ_16APSK 0xf22a011f + +/* P2_AGCK32 */ +#define RSTV0910_P2_AGCK32 0xf22b +#define FSTV0910_P2_R3ADJOFF_32APSK 0xf22b0080 +#define FSTV0910_P2_R2ADJOFF_32APSK 0xf22b0040 +#define FSTV0910_P2_R1ADJOFF_32APSK 0xf22b0020 +#define FSTV0910_P2_RADJ_32APSK 0xf22b011f + +/* P2_AGC2O */ +#define RSTV0910_P2_AGC2O 0xf22c +#define FSTV0910_P2_CSTENV_MODE 0xf22c00c0 +#define FSTV0910_P2_AGC2_COEF 0xf22c0007 + +/* P2_AGC2REF */ +#define RSTV0910_P2_AGC2REF 0xf22d +#define FSTV0910_P2_AGC2_REF 0xf22d00ff + +/* P2_AGC1ADJ */ +#define RSTV0910_P2_AGC1ADJ 0xf22e +#define FSTV0910_P2_AGC1_ADJUSTED 0xf22e007f + +/* P2_AGCRSADJ */ +#define RSTV0910_P2_AGCRSADJ 0xf22f +#define FSTV0910_P2_RS_ADJUSTED 0xf22f007f + +/* P2_AGCRQADJ */ +#define RSTV0910_P2_AGCRQADJ 0xf230 +#define FSTV0910_P2_RQ_ADJUSTED 0xf230007f + +/* P2_AGCR8ADJ */ +#define RSTV0910_P2_AGCR8ADJ 0xf231 +#define FSTV0910_P2_R8_ADJUSTED 0xf231007f + +/* P2_AGCR1ADJ */ +#define RSTV0910_P2_AGCR1ADJ 0xf232 +#define FSTV0910_P2_R1_ADJUSTED 0xf232007f + +/* P2_AGCR2ADJ */ +#define RSTV0910_P2_AGCR2ADJ 0xf233 +#define FSTV0910_P2_R2_ADJUSTED 0xf233007f + +/* P2_AGCR3ADJ */ +#define RSTV0910_P2_AGCR3ADJ 0xf234 +#define FSTV0910_P2_R3_ADJUSTED 0xf234007f + +/* P2_AGCREFADJ */ +#define RSTV0910_P2_AGCREFADJ 0xf235 +#define FSTV0910_P2_AGC2REF_ADJUSTED 0xf235007f + +/* P2_AGC2I1 */ +#define RSTV0910_P2_AGC2I1 0xf236 +#define FSTV0910_P2_AGC2_INTEGRATOR1 0xf23600ff + +/* P2_AGC2I0 */ +#define RSTV0910_P2_AGC2I0 0xf237 +#define FSTV0910_P2_AGC2_INTEGRATOR0 0xf23700ff + +/* P2_CARCFG */ +#define RSTV0910_P2_CARCFG 0xf238 +#define FSTV0910_P2_ROTAON 0xf2380004 +#define FSTV0910_P2_PH_DET_ALGO 0xf2380003 + +/* P2_ACLC */ +#define RSTV0910_P2_ACLC 0xf239 +#define FSTV0910_P2_CAR_ALPHA_MANT 0xf2390030 +#define FSTV0910_P2_CAR_ALPHA_EXP 0xf239000f + +/* P2_BCLC */ +#define RSTV0910_P2_BCLC 0xf23a +#define FSTV0910_P2_CAR_BETA_MANT 0xf23a0030 +#define FSTV0910_P2_CAR_BETA_EXP 0xf23a000f + +/* P2_ACLCS2 */ +#define RSTV0910_P2_ACLCS2 0xf23b +#define FSTV0910_P2_CARS2_APLHA_MANTISSE 0xf23b0030 +#define FSTV0910_P2_CARS2_ALPHA_EXP 0xf23b000f + +/* P2_BCLCS2 */ +#define RSTV0910_P2_BCLCS2 0xf23c +#define FSTV0910_P2_CARS2_BETA_MANTISSE 0xf23c0030 +#define FSTV0910_P2_CARS2_BETA_EXP 0xf23c000f + +/* P2_CARFREQ */ +#define RSTV0910_P2_CARFREQ 0xf23d +#define FSTV0910_P2_KC_COARSE_EXP 0xf23d00f0 +#define FSTV0910_P2_BETA_FREQ 0xf23d000f + +/* P2_CARHDR */ +#define RSTV0910_P2_CARHDR 0xf23e +#define FSTV0910_P2_K_FREQ_HDR 0xf23e00ff + +/* P2_LDT */ +#define RSTV0910_P2_LDT 0xf23f +#define FSTV0910_P2_CARLOCK_THRES 0xf23f01ff + +/* P2_LDT2 */ +#define RSTV0910_P2_LDT2 0xf240 +#define FSTV0910_P2_CARLOCK_THRES2 0xf24001ff + +/* P2_CFRICFG */ +#define RSTV0910_P2_CFRICFG 0xf241 +#define FSTV0910_P2_NEG_CFRSTEP 0xf2410001 + +/* P2_CFRUP1 */ +#define RSTV0910_P2_CFRUP1 0xf242 +#define FSTV0910_P2_CFR_UP1 0xf24201ff + +/* P2_CFRUP0 */ +#define RSTV0910_P2_CFRUP0 0xf243 +#define FSTV0910_P2_CFR_UP0 0xf24300ff + +/* P2_CFRIBASE1 */ +#define RSTV0910_P2_CFRIBASE1 0xf244 +#define FSTV0910_P2_CFRINIT_BASE1 0xf24400ff + +/* P2_CFRIBASE0 */ +#define RSTV0910_P2_CFRIBASE0 0xf245 +#define FSTV0910_P2_CFRINIT_BASE0 0xf24500ff + +/* P2_CFRLOW1 */ +#define RSTV0910_P2_CFRLOW1 0xf246 +#define FSTV0910_P2_CFR_LOW1 0xf24601ff + +/* P2_CFRLOW0 */ +#define RSTV0910_P2_CFRLOW0 0xf247 +#define FSTV0910_P2_CFR_LOW0 0xf24700ff + +/* P2_CFRINIT1 */ +#define RSTV0910_P2_CFRINIT1 0xf248 +#define FSTV0910_P2_CFR_INIT1 0xf24801ff + +/* P2_CFRINIT0 */ +#define RSTV0910_P2_CFRINIT0 0xf249 +#define FSTV0910_P2_CFR_INIT0 0xf24900ff + +/* P2_CFRINC1 */ +#define RSTV0910_P2_CFRINC1 0xf24a +#define FSTV0910_P2_MANUAL_CFRINC 0xf24a0080 +#define FSTV0910_P2_CFR_INC1 0xf24a003f + +/* P2_CFRINC0 */ +#define RSTV0910_P2_CFRINC0 0xf24b +#define FSTV0910_P2_CFR_INC0 0xf24b00ff + +/* P2_CFR2 */ +#define RSTV0910_P2_CFR2 0xf24c +#define FSTV0910_P2_CAR_FREQ2 0xf24c01ff + +/* P2_CFR1 */ +#define RSTV0910_P2_CFR1 0xf24d +#define FSTV0910_P2_CAR_FREQ1 0xf24d00ff + +/* P2_CFR0 */ +#define RSTV0910_P2_CFR0 0xf24e +#define FSTV0910_P2_CAR_FREQ0 0xf24e00ff + +/* P2_LDI */ +#define RSTV0910_P2_LDI 0xf24f +#define FSTV0910_P2_LOCK_DET_INTEGR 0xf24f01ff + +/* P2_TMGCFG */ +#define RSTV0910_P2_TMGCFG 0xf250 +#define FSTV0910_P2_TMGLOCK_BETA 0xf25000c0 +#define FSTV0910_P2_DO_TIMING_CORR 0xf2500010 +#define FSTV0910_P2_TMG_MINFREQ 0xf2500003 + +/* P2_RTC */ +#define RSTV0910_P2_RTC 0xf251 +#define FSTV0910_P2_TMGALPHA_EXP 0xf25100f0 +#define FSTV0910_P2_TMGBETA_EXP 0xf251000f + +/* P2_RTCS2 */ +#define RSTV0910_P2_RTCS2 0xf252 +#define FSTV0910_P2_TMGALPHAS2_EXP 0xf25200f0 +#define FSTV0910_P2_TMGBETAS2_EXP 0xf252000f + +/* P2_TMGTHRISE */ +#define RSTV0910_P2_TMGTHRISE 0xf253 +#define FSTV0910_P2_TMGLOCK_THRISE 0xf25300ff + +/* P2_TMGTHFALL */ +#define RSTV0910_P2_TMGTHFALL 0xf254 +#define FSTV0910_P2_TMGLOCK_THFALL 0xf25400ff + +/* P2_SFRUPRATIO */ +#define RSTV0910_P2_SFRUPRATIO 0xf255 +#define FSTV0910_P2_SFR_UPRATIO 0xf25500ff + +/* P2_SFRLOWRATIO */ +#define RSTV0910_P2_SFRLOWRATIO 0xf256 +#define FSTV0910_P2_SFR_LOWRATIO 0xf25600ff + +/* P2_KTTMG */ +#define RSTV0910_P2_KTTMG 0xf257 +#define FSTV0910_P2_KT_TMG_EXP 0xf25700f0 + +/* P2_KREFTMG */ +#define RSTV0910_P2_KREFTMG 0xf258 +#define FSTV0910_P2_KREF_TMG 0xf25800ff + +/* P2_SFRSTEP */ +#define RSTV0910_P2_SFRSTEP 0xf259 +#define FSTV0910_P2_SFR_SCANSTEP 0xf25900f0 +#define FSTV0910_P2_SFR_CENTERSTEP 0xf259000f + +/* P2_TMGCFG2 */ +#define RSTV0910_P2_TMGCFG2 0xf25a +#define FSTV0910_P2_DIS_AUTOSAMP 0xf25a0008 +#define FSTV0910_P2_SFRRATIO_FINE 0xf25a0001 + +/* P2_KREFTMG2 */ +#define RSTV0910_P2_KREFTMG2 0xf25b +#define FSTV0910_P2_KREF_TMG2 0xf25b00ff + +/* P2_TMGCFG3 */ +#define RSTV0910_P2_TMGCFG3 0xf25d +#define FSTV0910_P2_CONT_TMGCENTER 0xf25d0008 +#define FSTV0910_P2_AUTO_GUP 0xf25d0004 +#define FSTV0910_P2_AUTO_GLOW 0xf25d0002 + +/* P2_SFRINIT1 */ +#define RSTV0910_P2_SFRINIT1 0xf25e +#define FSTV0910_P2_SFR_INIT1 0xf25e00ff + +/* P2_SFRINIT0 */ +#define RSTV0910_P2_SFRINIT0 0xf25f +#define FSTV0910_P2_SFR_INIT0 0xf25f00ff + +/* P2_SFRUP1 */ +#define RSTV0910_P2_SFRUP1 0xf260 +#define FSTV0910_P2_SYMB_FREQ_UP1 0xf26000ff + +/* P2_SFRUP0 */ +#define RSTV0910_P2_SFRUP0 0xf261 +#define FSTV0910_P2_SYMB_FREQ_UP0 0xf26100ff + +/* P2_SFRLOW1 */ +#define RSTV0910_P2_SFRLOW1 0xf262 +#define FSTV0910_P2_SYMB_FREQ_LOW1 0xf26200ff + +/* P2_SFRLOW0 */ +#define RSTV0910_P2_SFRLOW0 0xf263 +#define FSTV0910_P2_SYMB_FREQ_LOW0 0xf26300ff + +/* P2_SFR3 */ +#define RSTV0910_P2_SFR3 0xf264 +#define FSTV0910_P2_SYMB_FREQ3 0xf26400ff + +/* P2_SFR2 */ +#define RSTV0910_P2_SFR2 0xf265 +#define FSTV0910_P2_SYMB_FREQ2 0xf26500ff + +/* P2_SFR1 */ +#define RSTV0910_P2_SFR1 0xf266 +#define FSTV0910_P2_SYMB_FREQ1 0xf26600ff + +/* P2_SFR0 */ +#define RSTV0910_P2_SFR0 0xf267 +#define FSTV0910_P2_SYMB_FREQ0 0xf26700ff + +/* P2_TMGREG2 */ +#define RSTV0910_P2_TMGREG2 0xf268 +#define FSTV0910_P2_TMGREG2 0xf26800ff + +/* P2_TMGREG1 */ +#define RSTV0910_P2_TMGREG1 0xf269 +#define FSTV0910_P2_TMGREG1 0xf26900ff + +/* P2_TMGREG0 */ +#define RSTV0910_P2_TMGREG0 0xf26a +#define FSTV0910_P2_TMGREG0 0xf26a00ff + +/* P2_TMGLOCK1 */ +#define RSTV0910_P2_TMGLOCK1 0xf26b +#define FSTV0910_P2_TMGLOCK_LEVEL1 0xf26b01ff + +/* P2_TMGLOCK0 */ +#define RSTV0910_P2_TMGLOCK0 0xf26c +#define FSTV0910_P2_TMGLOCK_LEVEL0 0xf26c00ff + +/* P2_TMGOBS */ +#define RSTV0910_P2_TMGOBS 0xf26d +#define FSTV0910_P2_ROLLOFF_STATUS 0xf26d00c0 + +/* P2_EQUALCFG */ +#define RSTV0910_P2_EQUALCFG 0xf26f +#define FSTV0910_P2_EQUAL_ON 0xf26f0040 +#define FSTV0910_P2_MU_EQUALDFE 0xf26f0007 + +/* P2_EQUAI1 */ +#define RSTV0910_P2_EQUAI1 0xf270 +#define FSTV0910_P2_EQUA_ACCI1 0xf27001ff + +/* P2_EQUAQ1 */ +#define RSTV0910_P2_EQUAQ1 0xf271 +#define FSTV0910_P2_EQUA_ACCQ1 0xf27101ff + +/* P2_EQUAI2 */ +#define RSTV0910_P2_EQUAI2 0xf272 +#define FSTV0910_P2_EQUA_ACCI2 0xf27201ff + +/* P2_EQUAQ2 */ +#define RSTV0910_P2_EQUAQ2 0xf273 +#define FSTV0910_P2_EQUA_ACCQ2 0xf27301ff + +/* P2_EQUAI3 */ +#define RSTV0910_P2_EQUAI3 0xf274 +#define FSTV0910_P2_EQUA_ACCI3 0xf27401ff + +/* P2_EQUAQ3 */ +#define RSTV0910_P2_EQUAQ3 0xf275 +#define FSTV0910_P2_EQUA_ACCQ3 0xf27501ff + +/* P2_EQUAI4 */ +#define RSTV0910_P2_EQUAI4 0xf276 +#define FSTV0910_P2_EQUA_ACCI4 0xf27601ff + +/* P2_EQUAQ4 */ +#define RSTV0910_P2_EQUAQ4 0xf277 +#define FSTV0910_P2_EQUA_ACCQ4 0xf27701ff + +/* P2_EQUAI5 */ +#define RSTV0910_P2_EQUAI5 0xf278 +#define FSTV0910_P2_EQUA_ACCI5 0xf27801ff + +/* P2_EQUAQ5 */ +#define RSTV0910_P2_EQUAQ5 0xf279 +#define FSTV0910_P2_EQUA_ACCQ5 0xf27901ff + +/* P2_EQUAI6 */ +#define RSTV0910_P2_EQUAI6 0xf27a +#define FSTV0910_P2_EQUA_ACCI6 0xf27a01ff + +/* P2_EQUAQ6 */ +#define RSTV0910_P2_EQUAQ6 0xf27b +#define FSTV0910_P2_EQUA_ACCQ6 0xf27b01ff + +/* P2_EQUAI7 */ +#define RSTV0910_P2_EQUAI7 0xf27c +#define FSTV0910_P2_EQUA_ACCI7 0xf27c01ff + +/* P2_EQUAQ7 */ +#define RSTV0910_P2_EQUAQ7 0xf27d +#define FSTV0910_P2_EQUA_ACCQ7 0xf27d01ff + +/* P2_EQUAI8 */ +#define RSTV0910_P2_EQUAI8 0xf27e +#define FSTV0910_P2_EQUA_ACCI8 0xf27e01ff + +/* P2_EQUAQ8 */ +#define RSTV0910_P2_EQUAQ8 0xf27f +#define FSTV0910_P2_EQUA_ACCQ8 0xf27f01ff + +/* P2_NNOSDATAT1 */ +#define RSTV0910_P2_NNOSDATAT1 0xf280 +#define FSTV0910_P2_NOSDATAT_NORMED1 0xf28000ff + +/* P2_NNOSDATAT0 */ +#define RSTV0910_P2_NNOSDATAT0 0xf281 +#define FSTV0910_P2_NOSDATAT_NORMED0 0xf28100ff + +/* P2_NNOSDATA1 */ +#define RSTV0910_P2_NNOSDATA1 0xf282 +#define FSTV0910_P2_NOSDATA_NORMED1 0xf28200ff + +/* P2_NNOSDATA0 */ +#define RSTV0910_P2_NNOSDATA0 0xf283 +#define FSTV0910_P2_NOSDATA_NORMED0 0xf28300ff + +/* P2_NNOSPLHT1 */ +#define RSTV0910_P2_NNOSPLHT1 0xf284 +#define FSTV0910_P2_NOSPLHT_NORMED1 0xf28400ff + +/* P2_NNOSPLHT0 */ +#define RSTV0910_P2_NNOSPLHT0 0xf285 +#define FSTV0910_P2_NOSPLHT_NORMED0 0xf28500ff + +/* P2_NNOSPLH1 */ +#define RSTV0910_P2_NNOSPLH1 0xf286 +#define FSTV0910_P2_NOSPLH_NORMED1 0xf28600ff + +/* P2_NNOSPLH0 */ +#define RSTV0910_P2_NNOSPLH0 0xf287 +#define FSTV0910_P2_NOSPLH_NORMED0 0xf28700ff + +/* P2_NOSDATAT1 */ +#define RSTV0910_P2_NOSDATAT1 0xf288 +#define FSTV0910_P2_NOSDATAT_UNNORMED1 0xf28800ff + +/* P2_NOSDATAT0 */ +#define RSTV0910_P2_NOSDATAT0 0xf289 +#define FSTV0910_P2_NOSDATAT_UNNORMED0 0xf28900ff + +/* P2_NNOSFRAME1 */ +#define RSTV0910_P2_NNOSFRAME1 0xf28a +#define FSTV0910_P2_NOSFRAME_NORMED1 0xf28a00ff + +/* P2_NNOSFRAME0 */ +#define RSTV0910_P2_NNOSFRAME0 0xf28b +#define FSTV0910_P2_NOSFRAME_NORMED0 0xf28b00ff + +/* P2_NNOSRAD1 */ +#define RSTV0910_P2_NNOSRAD1 0xf28c +#define FSTV0910_P2_NOSRADIAL_NORMED1 0xf28c00ff + +/* P2_NNOSRAD0 */ +#define RSTV0910_P2_NNOSRAD0 0xf28d +#define FSTV0910_P2_NOSRADIAL_NORMED0 0xf28d00ff + +/* P2_NOSCFGF1 */ +#define RSTV0910_P2_NOSCFGF1 0xf28e +#define FSTV0910_P2_LOWNOISE_MESURE 0xf28e0080 +#define FSTV0910_P2_NOS_DELFRAME 0xf28e0040 +#define FSTV0910_P2_NOSDATA_MODE 0xf28e0030 +#define FSTV0910_P2_FRAMESEL_TYPESEL 0xf28e000c +#define FSTV0910_P2_FRAMESEL_TYPE 0xf28e0003 + +/* P2_NOSCFGF2 */ +#define RSTV0910_P2_NOSCFGF2 0xf28f +#define FSTV0910_P2_DIS_NOSPILOTS 0xf28f0080 +#define FSTV0910_P2_FRAMESEL_MODCODSEL 0xf28f0060 +#define FSTV0910_P2_FRAMESEL_MODCOD 0xf28f001f + +/* P2_CAR2CFG */ +#define RSTV0910_P2_CAR2CFG 0xf290 +#define FSTV0910_P2_ROTA2ON 0xf2900004 +#define FSTV0910_P2_PH_DET_ALGO2 0xf2900003 + +/* P2_CFR2CFR1 */ +#define RSTV0910_P2_CFR2CFR1 0xf291 +#define FSTV0910_P2_EN_S2CAR2CENTER 0xf2910020 +#define FSTV0910_P2_CFR2TOCFR1_BETA 0xf2910007 + +/* P2_CAR3CFG */ +#define RSTV0910_P2_CAR3CFG 0xf292 +#define FSTV0910_P2_CARRIER23_MODE 0xf29200c0 +#define FSTV0910_P2_CAR3INTERM_DVBS1 0xf2920020 +#define FSTV0910_P2_ABAMPLIF_MODE 0xf2920018 +#define FSTV0910_P2_CARRIER3_ALPHA3DL 0xf2920007 + +/* P2_CFR22 */ +#define RSTV0910_P2_CFR22 0xf293 +#define FSTV0910_P2_CAR2_FREQ2 0xf29301ff + +/* P2_CFR21 */ +#define RSTV0910_P2_CFR21 0xf294 +#define FSTV0910_P2_CAR2_FREQ1 0xf29400ff + +/* P2_CFR20 */ +#define RSTV0910_P2_CFR20 0xf295 +#define FSTV0910_P2_CAR2_FREQ0 0xf29500ff + +/* P2_ACLC2S2Q */ +#define RSTV0910_P2_ACLC2S2Q 0xf297 +#define FSTV0910_P2_ENAB_SPSKSYMB 0xf2970080 +#define FSTV0910_P2_CAR2S2_Q_ALPH_M 0xf2970030 +#define FSTV0910_P2_CAR2S2_Q_ALPH_E 0xf297000f + +/* P2_ACLC2S28 */ +#define RSTV0910_P2_ACLC2S28 0xf298 +#define FSTV0910_P2_CAR2S2_8_ALPH_M 0xf2980030 +#define FSTV0910_P2_CAR2S2_8_ALPH_E 0xf298000f + +/* P2_ACLC2S216A */ +#define RSTV0910_P2_ACLC2S216A 0xf299 +#define FSTV0910_P2_CAR2S2_16A_ALPH_M 0xf2990030 +#define FSTV0910_P2_CAR2S2_16A_ALPH_E 0xf299000f + +/* P2_ACLC2S232A */ +#define RSTV0910_P2_ACLC2S232A 0xf29a +#define FSTV0910_P2_CAR2S2_32A_ALPH_M 0xf29a0030 +#define FSTV0910_P2_CAR2S2_32A_ALPH_E 0xf29a000f + +/* P2_BCLC2S2Q */ +#define RSTV0910_P2_BCLC2S2Q 0xf29c +#define FSTV0910_P2_CAR2S2_Q_BETA_M 0xf29c0030 +#define FSTV0910_P2_CAR2S2_Q_BETA_E 0xf29c000f + +/* P2_BCLC2S28 */ +#define RSTV0910_P2_BCLC2S28 0xf29d +#define FSTV0910_P2_CAR2S2_8_BETA_M 0xf29d0030 +#define FSTV0910_P2_CAR2S2_8_BETA_E 0xf29d000f + +/* P2_BCLC2S216A */ +#define RSTV0910_P2_BCLC2S216A 0xf29e +#define FSTV0910_P2_DVBS2S216A_NIP 0xf29e0080 +#define FSTV0910_P2_CAR2S2_16A_BETA_M 0xf29e0030 +#define FSTV0910_P2_CAR2S2_16A_BETA_E 0xf29e000f + +/* P2_BCLC2S232A */ +#define RSTV0910_P2_BCLC2S232A 0xf29f +#define FSTV0910_P2_DVBS2S232A_NIP 0xf29f0080 +#define FSTV0910_P2_CAR2S2_32A_BETA_M 0xf29f0030 +#define FSTV0910_P2_CAR2S2_32A_BETA_E 0xf29f000f + +/* P2_PLROOT2 */ +#define RSTV0910_P2_PLROOT2 0xf2ac +#define FSTV0910_P2_PLSCRAMB_MODE 0xf2ac000c +#define FSTV0910_P2_PLSCRAMB_ROOT2 0xf2ac0003 + +/* P2_PLROOT1 */ +#define RSTV0910_P2_PLROOT1 0xf2ad +#define FSTV0910_P2_PLSCRAMB_ROOT1 0xf2ad00ff + +/* P2_PLROOT0 */ +#define RSTV0910_P2_PLROOT0 0xf2ae +#define FSTV0910_P2_PLSCRAMB_ROOT0 0xf2ae00ff + +/* P2_MODCODLST0 */ +#define RSTV0910_P2_MODCODLST0 0xf2b0 +#define FSTV0910_P2_NACCES_MODCODCH 0xf2b00001 + +/* P2_MODCODLST1 */ +#define RSTV0910_P2_MODCODLST1 0xf2b1 +#define FSTV0910_P2_SYMBRATE_FILTER 0xf2b10008 +#define FSTV0910_P2_NRESET_MODCODLST 0xf2b10004 +#define FSTV0910_P2_DIS_32PSK_9_10 0xf2b10003 + +/* P2_MODCODLST2 */ +#define RSTV0910_P2_MODCODLST2 0xf2b2 +#define FSTV0910_P2_DIS_32PSK_8_9 0xf2b200f0 +#define FSTV0910_P2_DIS_32PSK_5_6 0xf2b2000f + +/* P2_MODCODLST3 */ +#define RSTV0910_P2_MODCODLST3 0xf2b3 +#define FSTV0910_P2_DIS_32PSK_4_5 0xf2b300f0 +#define FSTV0910_P2_DIS_32PSK_3_4 0xf2b3000f + +/* P2_MODCODLST4 */ +#define RSTV0910_P2_MODCODLST4 0xf2b4 +#define FSTV0910_P2_DUMMYPL_PILOT 0xf2b40080 +#define FSTV0910_P2_DUMMYPL_NOPILOT 0xf2b40040 +#define FSTV0910_P2_DIS_16PSK_9_10 0xf2b40030 +#define FSTV0910_P2_DIS_16PSK_8_9 0xf2b4000f + +/* P2_MODCODLST5 */ +#define RSTV0910_P2_MODCODLST5 0xf2b5 +#define FSTV0910_P2_DIS_16PSK_5_6 0xf2b500f0 +#define FSTV0910_P2_DIS_16PSK_4_5 0xf2b5000f + +/* P2_MODCODLST6 */ +#define RSTV0910_P2_MODCODLST6 0xf2b6 +#define FSTV0910_P2_DIS_16PSK_3_4 0xf2b600f0 +#define FSTV0910_P2_DIS_16PSK_2_3 0xf2b6000f + +/* P2_MODCODLST7 */ +#define RSTV0910_P2_MODCODLST7 0xf2b7 +#define FSTV0910_P2_MODCOD_NNOSFILTER 0xf2b70080 +#define FSTV0910_P2_DIS_8PSK_9_10 0xf2b70030 +#define FSTV0910_P2_DIS_8PSK_8_9 0xf2b7000f + +/* P2_MODCODLST8 */ +#define RSTV0910_P2_MODCODLST8 0xf2b8 +#define FSTV0910_P2_DIS_8PSK_5_6 0xf2b800f0 +#define FSTV0910_P2_DIS_8PSK_3_4 0xf2b8000f + +/* P2_MODCODLST9 */ +#define RSTV0910_P2_MODCODLST9 0xf2b9 +#define FSTV0910_P2_DIS_8PSK_2_3 0xf2b900f0 +#define FSTV0910_P2_DIS_8PSK_3_5 0xf2b9000f + +/* P2_MODCODLSTA */ +#define RSTV0910_P2_MODCODLSTA 0xf2ba +#define FSTV0910_P2_NOSFILTER_LIMITE 0xf2ba0080 +#define FSTV0910_P2_DIS_QPSK_9_10 0xf2ba0030 +#define FSTV0910_P2_DIS_QPSK_8_9 0xf2ba000f + +/* P2_MODCODLSTB */ +#define RSTV0910_P2_MODCODLSTB 0xf2bb +#define FSTV0910_P2_DIS_QPSK_5_6 0xf2bb00f0 +#define FSTV0910_P2_DIS_QPSK_4_5 0xf2bb000f + +/* P2_MODCODLSTC */ +#define RSTV0910_P2_MODCODLSTC 0xf2bc +#define FSTV0910_P2_DIS_QPSK_3_4 0xf2bc00f0 +#define FSTV0910_P2_DIS_QPSK_2_3 0xf2bc000f + +/* P2_MODCODLSTD */ +#define RSTV0910_P2_MODCODLSTD 0xf2bd +#define FSTV0910_P2_DIS_QPSK_3_5 0xf2bd00f0 +#define FSTV0910_P2_DIS_QPSK_1_2 0xf2bd000f + +/* P2_MODCODLSTE */ +#define RSTV0910_P2_MODCODLSTE 0xf2be +#define FSTV0910_P2_DIS_QPSK_2_5 0xf2be00f0 +#define FSTV0910_P2_DIS_QPSK_1_3 0xf2be000f + +/* P2_MODCODLSTF */ +#define RSTV0910_P2_MODCODLSTF 0xf2bf +#define FSTV0910_P2_DIS_QPSK_1_4 0xf2bf00f0 +#define FSTV0910_P2_DEMOD_INVMODLST 0xf2bf0008 +#define FSTV0910_P2_DEMODOUT_ENABLE 0xf2bf0004 +#define FSTV0910_P2_DDEMOD_NSET 0xf2bf0002 +#define FSTV0910_P2_MODCOD_NSTOCK 0xf2bf0001 + +/* P2_GAUSSR0 */ +#define RSTV0910_P2_GAUSSR0 0xf2c0 +#define FSTV0910_P2_EN_CCIMODE 0xf2c00080 +#define FSTV0910_P2_R0_GAUSSIEN 0xf2c0007f + +/* P2_CCIR0 */ +#define RSTV0910_P2_CCIR0 0xf2c1 +#define FSTV0910_P2_CCIDETECT_PLHONLY 0xf2c10080 +#define FSTV0910_P2_R0_CCI 0xf2c1007f + +/* P2_CCIQUANT */ +#define RSTV0910_P2_CCIQUANT 0xf2c2 +#define FSTV0910_P2_CCI_BETA 0xf2c200e0 +#define FSTV0910_P2_CCI_QUANT 0xf2c2001f + +/* P2_CCITHRES */ +#define RSTV0910_P2_CCITHRES 0xf2c3 +#define FSTV0910_P2_CCI_THRESHOLD 0xf2c300ff + +/* P2_CCIACC */ +#define RSTV0910_P2_CCIACC 0xf2c4 +#define FSTV0910_P2_CCI_VALUE 0xf2c400ff + +/* P2_DSTATUS4 */ +#define RSTV0910_P2_DSTATUS4 0xf2c5 +#define FSTV0910_P2_RAINFADE_DETECT 0xf2c50080 +#define FSTV0910_P2_NOTHRES2_FAIL 0xf2c50040 +#define FSTV0910_P2_NOTHRES1_FAIL 0xf2c50020 +#define FSTV0910_P2_DMDPROG_ERROR 0xf2c50004 +#define FSTV0910_P2_CSTENV_DETECT 0xf2c50002 +#define FSTV0910_P2_DETECTION_TRIAX 0xf2c50001 + +/* P2_DMDRESCFG */ +#define RSTV0910_P2_DMDRESCFG 0xf2c6 +#define FSTV0910_P2_DMDRES_RESET 0xf2c60080 +#define FSTV0910_P2_DMDRES_STRALL 0xf2c60008 +#define FSTV0910_P2_DMDRES_NEWONLY 0xf2c60004 +#define FSTV0910_P2_DMDRES_NOSTORE 0xf2c60002 + +/* P2_DMDRESADR */ +#define RSTV0910_P2_DMDRESADR 0xf2c7 +#define FSTV0910_P2_DMDRES_VALIDCFR 0xf2c70040 +#define FSTV0910_P2_DMDRES_MEMFULL 0xf2c70030 +#define FSTV0910_P2_DMDRES_RESNBR 0xf2c7000f + +/* P2_DMDRESDATA7 */ +#define RSTV0910_P2_DMDRESDATA7 0xf2c8 +#define FSTV0910_P2_DMDRES_DATA7 0xf2c800ff + +/* P2_DMDRESDATA6 */ +#define RSTV0910_P2_DMDRESDATA6 0xf2c9 +#define FSTV0910_P2_DMDRES_DATA6 0xf2c900ff + +/* P2_DMDRESDATA5 */ +#define RSTV0910_P2_DMDRESDATA5 0xf2ca +#define FSTV0910_P2_DMDRES_DATA5 0xf2ca00ff + +/* P2_DMDRESDATA4 */ +#define RSTV0910_P2_DMDRESDATA4 0xf2cb +#define FSTV0910_P2_DMDRES_DATA4 0xf2cb00ff + +/* P2_DMDRESDATA3 */ +#define RSTV0910_P2_DMDRESDATA3 0xf2cc +#define FSTV0910_P2_DMDRES_DATA3 0xf2cc00ff + +/* P2_DMDRESDATA2 */ +#define RSTV0910_P2_DMDRESDATA2 0xf2cd +#define FSTV0910_P2_DMDRES_DATA2 0xf2cd00ff + +/* P2_DMDRESDATA1 */ +#define RSTV0910_P2_DMDRESDATA1 0xf2ce +#define FSTV0910_P2_DMDRES_DATA1 0xf2ce00ff + +/* P2_DMDRESDATA0 */ +#define RSTV0910_P2_DMDRESDATA0 0xf2cf +#define FSTV0910_P2_DMDRES_DATA0 0xf2cf00ff + +/* P2_FFEI1 */ +#define RSTV0910_P2_FFEI1 0xf2d0 +#define FSTV0910_P2_FFE_ACCI1 0xf2d001ff + +/* P2_FFEQ1 */ +#define RSTV0910_P2_FFEQ1 0xf2d1 +#define FSTV0910_P2_FFE_ACCQ1 0xf2d101ff + +/* P2_FFEI2 */ +#define RSTV0910_P2_FFEI2 0xf2d2 +#define FSTV0910_P2_FFE_ACCI2 0xf2d201ff + +/* P2_FFEQ2 */ +#define RSTV0910_P2_FFEQ2 0xf2d3 +#define FSTV0910_P2_FFE_ACCQ2 0xf2d301ff + +/* P2_FFEI3 */ +#define RSTV0910_P2_FFEI3 0xf2d4 +#define FSTV0910_P2_FFE_ACCI3 0xf2d401ff + +/* P2_FFEQ3 */ +#define RSTV0910_P2_FFEQ3 0xf2d5 +#define FSTV0910_P2_FFE_ACCQ3 0xf2d501ff + +/* P2_FFEI4 */ +#define RSTV0910_P2_FFEI4 0xf2d6 +#define FSTV0910_P2_FFE_ACCI4 0xf2d601ff + +/* P2_FFEQ4 */ +#define RSTV0910_P2_FFEQ4 0xf2d7 +#define FSTV0910_P2_FFE_ACCQ4 0xf2d701ff + +/* P2_FFECFG */ +#define RSTV0910_P2_FFECFG 0xf2d8 +#define FSTV0910_P2_EQUALFFE_ON 0xf2d80040 +#define FSTV0910_P2_EQUAL_USEDSYMB 0xf2d80030 +#define FSTV0910_P2_MU_EQUALFFE 0xf2d80007 + +/* P2_TNRCFG2 */ +#define RSTV0910_P2_TNRCFG2 0xf2e1 +#define FSTV0910_P2_TUN_IQSWAP 0xf2e10080 + +/* P2_SMAPCOEF7 */ +#define RSTV0910_P2_SMAPCOEF7 0xf300 +#define FSTV0910_P2_DIS_QSCALE 0xf3000080 +#define FSTV0910_P2_SMAPCOEF_Q_LLR12 0xf300017f + +/* P2_SMAPCOEF6 */ +#define RSTV0910_P2_SMAPCOEF6 0xf301 +#define FSTV0910_P2_DIS_AGC2SCALE 0xf3010080 +#define FSTV0910_P2_ADJ_8PSKLLR1 0xf3010004 +#define FSTV0910_P2_OLD_8PSKLLR1 0xf3010002 +#define FSTV0910_P2_DIS_AB8PSK 0xf3010001 + +/* P2_SMAPCOEF5 */ +#define RSTV0910_P2_SMAPCOEF5 0xf302 +#define FSTV0910_P2_DIS_8SCALE 0xf3020080 +#define FSTV0910_P2_SMAPCOEF_8P_LLR23 0xf302017f + +/* P2_SMAPCOEF4 */ +#define RSTV0910_P2_SMAPCOEF4 0xf303 +#define FSTV0910_P2_SMAPCOEF_16APSK_LLR12 0xf303017f + +/* P2_SMAPCOEF3 */ +#define RSTV0910_P2_SMAPCOEF3 0xf304 +#define FSTV0910_P2_SMAPCOEF_16APSK_LLR34 0xf304017f + +/* P2_SMAPCOEF2 */ +#define RSTV0910_P2_SMAPCOEF2 0xf305 +#define FSTV0910_P2_SMAPCOEF_32APSK_R2R3 0xf30501f0 +#define FSTV0910_P2_SMAPCOEF_32APSK_LLR2 0xf305010f + +/* P2_SMAPCOEF1 */ +#define RSTV0910_P2_SMAPCOEF1 0xf306 +#define FSTV0910_P2_DIS_16SCALE 0xf3060080 +#define FSTV0910_P2_SMAPCOEF_32_LLR34 0xf306017f + +/* P2_SMAPCOEF0 */ +#define RSTV0910_P2_SMAPCOEF0 0xf307 +#define FSTV0910_P2_DIS_32SCALE 0xf3070080 +#define FSTV0910_P2_SMAPCOEF_32_LLR15 0xf307017f + +/* P2_NOSTHRES1 */ +#define RSTV0910_P2_NOSTHRES1 0xf309 +#define FSTV0910_P2_NOS_THRESHOLD1 0xf30900ff + +/* P2_NOSTHRES2 */ +#define RSTV0910_P2_NOSTHRES2 0xf30a +#define FSTV0910_P2_NOS_THRESHOLD2 0xf30a00ff + +/* P2_NOSDIFF1 */ +#define RSTV0910_P2_NOSDIFF1 0xf30b +#define FSTV0910_P2_NOSTHRES1_DIFF 0xf30b00ff + +/* P2_RAINFADE */ +#define RSTV0910_P2_RAINFADE 0xf30c +#define FSTV0910_P2_NOSTHRES_DATAT 0xf30c0080 +#define FSTV0910_P2_RAINFADE_CNLIMIT 0xf30c0070 +#define FSTV0910_P2_RAINFADE_TIMEOUT 0xf30c0007 + +/* P2_NOSRAMCFG */ +#define RSTV0910_P2_NOSRAMCFG 0xf30d +#define FSTV0910_P2_NOSRAM_ACTIVATION 0xf30d0030 +#define FSTV0910_P2_NOSRAM_CNRONLY 0xf30d0008 +#define FSTV0910_P2_NOSRAM_LGNCNR1 0xf30d0007 + +/* P2_NOSRAMPOS */ +#define RSTV0910_P2_NOSRAMPOS 0xf30e +#define FSTV0910_P2_NOSRAM_LGNCNR0 0xf30e00f0 +#define FSTV0910_P2_NOSRAM_VALIDE 0xf30e0004 +#define FSTV0910_P2_NOSRAM_CNRVAL1 0xf30e0003 + +/* P2_NOSRAMVAL */ +#define RSTV0910_P2_NOSRAMVAL 0xf30f +#define FSTV0910_P2_NOSRAM_CNRVAL0 0xf30f00ff + +/* P2_DMDPLHSTAT */ +#define RSTV0910_P2_DMDPLHSTAT 0xf320 +#define FSTV0910_P2_PLH_STATISTIC 0xf32000ff + +/* P2_LOCKTIME3 */ +#define RSTV0910_P2_LOCKTIME3 0xf322 +#define FSTV0910_P2_DEMOD_LOCKTIME3 0xf32200ff + +/* P2_LOCKTIME2 */ +#define RSTV0910_P2_LOCKTIME2 0xf323 +#define FSTV0910_P2_DEMOD_LOCKTIME2 0xf32300ff + +/* P2_LOCKTIME1 */ +#define RSTV0910_P2_LOCKTIME1 0xf324 +#define FSTV0910_P2_DEMOD_LOCKTIME1 0xf32400ff + +/* P2_LOCKTIME0 */ +#define RSTV0910_P2_LOCKTIME0 0xf325 +#define FSTV0910_P2_DEMOD_LOCKTIME0 0xf32500ff + +/* P2_VITSCALE */ +#define RSTV0910_P2_VITSCALE 0xf332 +#define FSTV0910_P2_NVTH_NOSRANGE 0xf3320080 +#define FSTV0910_P2_VERROR_MAXMODE 0xf3320040 +#define FSTV0910_P2_NSLOWSN_LOCKED 0xf3320008 +#define FSTV0910_P2_DIS_RSFLOCK 0xf3320002 + +/* P2_FECM */ +#define RSTV0910_P2_FECM 0xf333 +#define FSTV0910_P2_DSS_DVB 0xf3330080 +#define FSTV0910_P2_DSS_SRCH 0xf3330010 +#define FSTV0910_P2_SYNCVIT 0xf3330002 +#define FSTV0910_P2_IQINV 0xf3330001 + +/* P2_VTH12 */ +#define RSTV0910_P2_VTH12 0xf334 +#define FSTV0910_P2_VTH12 0xf33400ff + +/* P2_VTH23 */ +#define RSTV0910_P2_VTH23 0xf335 +#define FSTV0910_P2_VTH23 0xf33500ff + +/* P2_VTH34 */ +#define RSTV0910_P2_VTH34 0xf336 +#define FSTV0910_P2_VTH34 0xf33600ff + +/* P2_VTH56 */ +#define RSTV0910_P2_VTH56 0xf337 +#define FSTV0910_P2_VTH56 0xf33700ff + +/* P2_VTH67 */ +#define RSTV0910_P2_VTH67 0xf338 +#define FSTV0910_P2_VTH67 0xf33800ff + +/* P2_VTH78 */ +#define RSTV0910_P2_VTH78 0xf339 +#define FSTV0910_P2_VTH78 0xf33900ff + +/* P2_VITCURPUN */ +#define RSTV0910_P2_VITCURPUN 0xf33a +#define FSTV0910_P2_VIT_CURPUN 0xf33a001f + +/* P2_VERROR */ +#define RSTV0910_P2_VERROR 0xf33b +#define FSTV0910_P2_REGERR_VIT 0xf33b00ff + +/* P2_PRVIT */ +#define RSTV0910_P2_PRVIT 0xf33c +#define FSTV0910_P2_DIS_VTHLOCK 0xf33c0040 +#define FSTV0910_P2_E7_8VIT 0xf33c0020 +#define FSTV0910_P2_E6_7VIT 0xf33c0010 +#define FSTV0910_P2_E5_6VIT 0xf33c0008 +#define FSTV0910_P2_E3_4VIT 0xf33c0004 +#define FSTV0910_P2_E2_3VIT 0xf33c0002 +#define FSTV0910_P2_E1_2VIT 0xf33c0001 + +/* P2_VAVSRVIT */ +#define RSTV0910_P2_VAVSRVIT 0xf33d +#define FSTV0910_P2_AMVIT 0xf33d0080 +#define FSTV0910_P2_FROZENVIT 0xf33d0040 +#define FSTV0910_P2_SNVIT 0xf33d0030 +#define FSTV0910_P2_TOVVIT 0xf33d000c +#define FSTV0910_P2_HYPVIT 0xf33d0003 + +/* P2_VSTATUSVIT */ +#define RSTV0910_P2_VSTATUSVIT 0xf33e +#define FSTV0910_P2_PRFVIT 0xf33e0010 +#define FSTV0910_P2_LOCKEDVIT 0xf33e0008 + +/* P2_VTHINUSE */ +#define RSTV0910_P2_VTHINUSE 0xf33f +#define FSTV0910_P2_VIT_INUSE 0xf33f00ff + +/* P2_KDIV12 */ +#define RSTV0910_P2_KDIV12 0xf340 +#define FSTV0910_P2_K_DIVIDER_12 0xf340007f + +/* P2_KDIV23 */ +#define RSTV0910_P2_KDIV23 0xf341 +#define FSTV0910_P2_K_DIVIDER_23 0xf341007f + +/* P2_KDIV34 */ +#define RSTV0910_P2_KDIV34 0xf342 +#define FSTV0910_P2_K_DIVIDER_34 0xf342007f + +/* P2_KDIV56 */ +#define RSTV0910_P2_KDIV56 0xf343 +#define FSTV0910_P2_K_DIVIDER_56 0xf343007f + +/* P2_KDIV67 */ +#define RSTV0910_P2_KDIV67 0xf344 +#define FSTV0910_P2_K_DIVIDER_67 0xf344007f + +/* P2_KDIV78 */ +#define RSTV0910_P2_KDIV78 0xf345 +#define FSTV0910_P2_K_DIVIDER_78 0xf345007f + +/* P2_TSPIDFLT1 */ +#define RSTV0910_P2_TSPIDFLT1 0xf346 +#define FSTV0910_P2_PIDFLT_ADDR 0xf34600ff + +/* P2_TSPIDFLT0 */ +#define RSTV0910_P2_TSPIDFLT0 0xf347 +#define FSTV0910_P2_PIDFLT_DATA 0xf34700ff + +/* P2_PDELCTRL0 */ +#define RSTV0910_P2_PDELCTRL0 0xf34f +#define FSTV0910_P2_ISIOBS_MODE 0xf34f0030 + +/* P2_PDELCTRL1 */ +#define RSTV0910_P2_PDELCTRL1 0xf350 +#define FSTV0910_P2_INV_MISMASK 0xf3500080 +#define FSTV0910_P2_FILTER_EN 0xf3500020 +#define FSTV0910_P2_HYSTEN 0xf3500008 +#define FSTV0910_P2_HYSTSWRST 0xf3500004 +#define FSTV0910_P2_EN_MIS00 0xf3500002 +#define FSTV0910_P2_ALGOSWRST 0xf3500001 + +/* P2_PDELCTRL2 */ +#define RSTV0910_P2_PDELCTRL2 0xf351 +#define FSTV0910_P2_FORCE_CONTINUOUS 0xf3510080 +#define FSTV0910_P2_RESET_UPKO_COUNT 0xf3510040 +#define FSTV0910_P2_USER_PKTDELIN_NB 0xf3510020 +#define FSTV0910_P2_FRAME_MODE 0xf3510002 + +/* P2_HYSTTHRESH */ +#define RSTV0910_P2_HYSTTHRESH 0xf354 +#define FSTV0910_P2_DELIN_LOCKTHRES 0xf35400f0 +#define FSTV0910_P2_DELIN_UNLOCKTHRES 0xf354000f + +/* P2_UPLCCST0 */ +#define RSTV0910_P2_UPLCCST0 0xf358 +#define FSTV0910_P2_UPL_CST0 0xf35800f8 +#define FSTV0910_P2_UPL_MODE 0xf3580007 + +/* P2_ISIENTRY */ +#define RSTV0910_P2_ISIENTRY 0xf35e +#define FSTV0910_P2_ISI_ENTRY 0xf35e00ff + +/* P2_ISIBITENA */ +#define RSTV0910_P2_ISIBITENA 0xf35f +#define FSTV0910_P2_ISI_BIT_EN 0xf35f00ff + +/* P2_MATSTR1 */ +#define RSTV0910_P2_MATSTR1 0xf360 +#define FSTV0910_P2_MATYPE_CURRENT1 0xf36000ff + +/* P2_MATSTR0 */ +#define RSTV0910_P2_MATSTR0 0xf361 +#define FSTV0910_P2_MATYPE_CURRENT0 0xf36100ff + +/* P2_UPLSTR1 */ +#define RSTV0910_P2_UPLSTR1 0xf362 +#define FSTV0910_P2_UPL_CURRENT1 0xf36200ff + +/* P2_UPLSTR0 */ +#define RSTV0910_P2_UPLSTR0 0xf363 +#define FSTV0910_P2_UPL_CURRENT0 0xf36300ff + +/* P2_DFLSTR1 */ +#define RSTV0910_P2_DFLSTR1 0xf364 +#define FSTV0910_P2_DFL_CURRENT1 0xf36400ff + +/* P2_DFLSTR0 */ +#define RSTV0910_P2_DFLSTR0 0xf365 +#define FSTV0910_P2_DFL_CURRENT0 0xf36500ff + +/* P2_SYNCSTR */ +#define RSTV0910_P2_SYNCSTR 0xf366 +#define FSTV0910_P2_SYNC_CURRENT 0xf36600ff + +/* P2_SYNCDSTR1 */ +#define RSTV0910_P2_SYNCDSTR1 0xf367 +#define FSTV0910_P2_SYNCD_CURRENT1 0xf36700ff + +/* P2_SYNCDSTR0 */ +#define RSTV0910_P2_SYNCDSTR0 0xf368 +#define FSTV0910_P2_SYNCD_CURRENT0 0xf36800ff + +/* P2_PDELSTATUS1 */ +#define RSTV0910_P2_PDELSTATUS1 0xf369 +#define FSTV0910_P2_PKTDELIN_DELOCK 0xf3690080 +#define FSTV0910_P2_SYNCDUPDFL_BADDFL 0xf3690040 +#define FSTV0910_P2_UNACCEPTED_STREAM 0xf3690010 +#define FSTV0910_P2_BCH_ERROR_FLAG 0xf3690008 +#define FSTV0910_P2_PKTDELIN_LOCK 0xf3690002 +#define FSTV0910_P2_FIRST_LOCK 0xf3690001 + +/* P2_PDELSTATUS2 */ +#define RSTV0910_P2_PDELSTATUS2 0xf36a +#define FSTV0910_P2_FRAME_MODCOD 0xf36a007c +#define FSTV0910_P2_FRAME_TYPE 0xf36a0003 + +/* P2_BBFCRCKO1 */ +#define RSTV0910_P2_BBFCRCKO1 0xf36b +#define FSTV0910_P2_BBHCRC_KOCNT1 0xf36b00ff + +/* P2_BBFCRCKO0 */ +#define RSTV0910_P2_BBFCRCKO0 0xf36c +#define FSTV0910_P2_BBHCRC_KOCNT0 0xf36c00ff + +/* P2_UPCRCKO1 */ +#define RSTV0910_P2_UPCRCKO1 0xf36d +#define FSTV0910_P2_PKTCRC_KOCNT1 0xf36d00ff + +/* P2_UPCRCKO0 */ +#define RSTV0910_P2_UPCRCKO0 0xf36e +#define FSTV0910_P2_PKTCRC_KOCNT0 0xf36e00ff + +/* P2_PDELCTRL3 */ +#define RSTV0910_P2_PDELCTRL3 0xf36f +#define FSTV0910_P2_NOFIFO_BCHERR 0xf36f0020 +#define FSTV0910_P2_PKTDELIN_DELACMERR 0xf36f0010 + +/* P2_TSSTATEM */ +#define RSTV0910_P2_TSSTATEM 0xf370 +#define FSTV0910_P2_TSDIL_ON 0xf3700080 +#define FSTV0910_P2_TSRS_ON 0xf3700020 +#define FSTV0910_P2_TSDESCRAMB_ON 0xf3700010 +#define FSTV0910_P2_TSFRAME_MODE 0xf3700008 +#define FSTV0910_P2_TS_DISABLE 0xf3700004 +#define FSTV0910_P2_TSACM_MODE 0xf3700002 +#define FSTV0910_P2_TSOUT_NOSYNC 0xf3700001 + +/* P2_TSSTATEL */ +#define RSTV0910_P2_TSSTATEL 0xf371 +#define FSTV0910_P2_TSNOSYNCBYTE 0xf3710080 +#define FSTV0910_P2_TSPARITY_ON 0xf3710040 +#define FSTV0910_P2_TSISSYI_ON 0xf3710008 +#define FSTV0910_P2_TSNPD_ON 0xf3710004 +#define FSTV0910_P2_TSCRC8_ON 0xf3710002 +#define FSTV0910_P2_TSDSS_PACKET 0xf3710001 + +/* P2_TSCFGH */ +#define RSTV0910_P2_TSCFGH 0xf372 +#define FSTV0910_P2_TSFIFO_DVBCI 0xf3720080 +#define FSTV0910_P2_TSFIFO_SERIAL 0xf3720040 +#define FSTV0910_P2_TSFIFO_TEIUPDATE 0xf3720020 +#define FSTV0910_P2_TSFIFO_DUTY50 0xf3720010 +#define FSTV0910_P2_TSFIFO_HSGNLOUT 0xf3720008 +#define FSTV0910_P2_TSFIFO_ERRMODE 0xf3720006 +#define FSTV0910_P2_RST_HWARE 0xf3720001 + +/* P2_TSCFGM */ +#define RSTV0910_P2_TSCFGM 0xf373 +#define FSTV0910_P2_TSFIFO_MANSPEED 0xf37300c0 +#define FSTV0910_P2_TSFIFO_PERMDATA 0xf3730020 +#define FSTV0910_P2_TSFIFO_NONEWSGNL 0xf3730010 +#define FSTV0910_P2_TSFIFO_INVDATA 0xf3730001 + +/* P2_TSCFGL */ +#define RSTV0910_P2_TSCFGL 0xf374 +#define FSTV0910_P2_TSFIFO_BCLKDEL1CK 0xf37400c0 +#define FSTV0910_P2_BCHERROR_MODE 0xf3740030 +#define FSTV0910_P2_TSFIFO_NSGNL2DATA 0xf3740008 +#define FSTV0910_P2_TSFIFO_EMBINDVB 0xf3740004 +#define FSTV0910_P2_TSFIFO_BITSPEED 0xf3740003 + +/* P2_TSSYNC */ +#define RSTV0910_P2_TSSYNC 0xf375 +#define FSTV0910_P2_TSFIFO_SYNCMODE 0xf3750018 + +/* P2_TSINSDELH */ +#define RSTV0910_P2_TSINSDELH 0xf376 +#define FSTV0910_P2_TSDEL_SYNCBYTE 0xf3760080 +#define FSTV0910_P2_TSDEL_XXHEADER 0xf3760040 +#define FSTV0910_P2_TSDEL_DATAFIELD 0xf3760010 +#define FSTV0910_P2_TSINSDEL_RSPARITY 0xf3760002 +#define FSTV0910_P2_TSINSDEL_CRC8 0xf3760001 + +/* P2_TSINSDELM */ +#define RSTV0910_P2_TSINSDELM 0xf377 +#define FSTV0910_P2_TSINS_EMODCOD 0xf3770010 +#define FSTV0910_P2_TSINS_TOKEN 0xf3770008 +#define FSTV0910_P2_TSINS_XXXERR 0xf3770004 +#define FSTV0910_P2_TSINS_MATYPE 0xf3770002 +#define FSTV0910_P2_TSINS_UPL 0xf3770001 + +/* P2_TSINSDELL */ +#define RSTV0910_P2_TSINSDELL 0xf378 +#define FSTV0910_P2_TSINS_DFL 0xf3780080 +#define FSTV0910_P2_TSINS_SYNCD 0xf3780040 +#define FSTV0910_P2_TSINS_BLOCLEN 0xf3780020 +#define FSTV0910_P2_TSINS_SIGPCOUNT 0xf3780010 +#define FSTV0910_P2_TSINS_FIFO 0xf3780008 +#define FSTV0910_P2_TSINS_REALPACK 0xf3780004 +#define FSTV0910_P2_TSINS_TSCONFIG 0xf3780002 +#define FSTV0910_P2_TSINS_LATENCY 0xf3780001 + +/* P2_TSDIVN */ +#define RSTV0910_P2_TSDIVN 0xf379 +#define FSTV0910_P2_TSFIFO_SPEEDMODE 0xf37900c0 +#define FSTV0910_P2_TSFIFO_RISEOK 0xf3790007 + +/* P2_TSCFG4 */ +#define RSTV0910_P2_TSCFG4 0xf37a +#define FSTV0910_P2_TSFIFO_TSSPEEDMODE 0xf37a00c0 + +/* P2_TSSPEED */ +#define RSTV0910_P2_TSSPEED 0xf380 +#define FSTV0910_P2_TSFIFO_OUTSPEED 0xf38000ff + +/* P2_TSSTATUS */ +#define RSTV0910_P2_TSSTATUS 0xf381 +#define FSTV0910_P2_TSFIFO_LINEOK 0xf3810080 +#define FSTV0910_P2_TSFIFO_ERROR 0xf3810040 +#define FSTV0910_P2_TSFIFO_NOSYNC 0xf3810010 +#define FSTV0910_P2_TSREGUL_ERROR 0xf3810004 +#define FSTV0910_P2_DIL_READY 0xf3810001 + +/* P2_TSSTATUS2 */ +#define RSTV0910_P2_TSSTATUS2 0xf382 +#define FSTV0910_P2_TSFIFO_DEMODSEL 0xf3820080 +#define FSTV0910_P2_TSFIFOSPEED_STORE 0xf3820040 +#define FSTV0910_P2_DILXX_RESET 0xf3820020 +#define FSTV0910_P2_SCRAMBDETECT 0xf3820002 + +/* P2_TSBITRATE1 */ +#define RSTV0910_P2_TSBITRATE1 0xf383 +#define FSTV0910_P2_TSFIFO_BITRATE1 0xf38300ff + +/* P2_TSBITRATE0 */ +#define RSTV0910_P2_TSBITRATE0 0xf384 +#define FSTV0910_P2_TSFIFO_BITRATE0 0xf38400ff + +/* P2_TSPACKLEN1 */ +#define RSTV0910_P2_TSPACKLEN1 0xf385 +#define FSTV0910_P2_TSFIFO_PACKCPT 0xf38500e0 + +/* P2_TSDLY2 */ +#define RSTV0910_P2_TSDLY2 0xf389 +#define FSTV0910_P2_SOFFIFO_LATENCY2 0xf389000f + +/* P2_TSDLY1 */ +#define RSTV0910_P2_TSDLY1 0xf38a +#define FSTV0910_P2_SOFFIFO_LATENCY1 0xf38a00ff + +/* P2_TSDLY0 */ +#define RSTV0910_P2_TSDLY0 0xf38b +#define FSTV0910_P2_SOFFIFO_LATENCY0 0xf38b00ff + +/* P2_TSNPDAV */ +#define RSTV0910_P2_TSNPDAV 0xf38c +#define FSTV0910_P2_TSNPD_AVERAGE 0xf38c00ff + +/* P2_TSBUFSTAT2 */ +#define RSTV0910_P2_TSBUFSTAT2 0xf38d +#define FSTV0910_P2_TSISCR_3BYTES 0xf38d0080 +#define FSTV0910_P2_TSISCR_NEWDATA 0xf38d0040 +#define FSTV0910_P2_TSISCR_BUFSTAT2 0xf38d003f + +/* P2_TSBUFSTAT1 */ +#define RSTV0910_P2_TSBUFSTAT1 0xf38e +#define FSTV0910_P2_TSISCR_BUFSTAT1 0xf38e00ff + +/* P2_TSBUFSTAT0 */ +#define RSTV0910_P2_TSBUFSTAT0 0xf38f +#define FSTV0910_P2_TSISCR_BUFSTAT0 0xf38f00ff + +/* P2_TSDEBUGL */ +#define RSTV0910_P2_TSDEBUGL 0xf391 +#define FSTV0910_P2_TSFIFO_ERROR_EVNT 0xf3910004 +#define FSTV0910_P2_TSFIFO_OVERFLOWM 0xf3910001 + +/* P2_TSDLYSET2 */ +#define RSTV0910_P2_TSDLYSET2 0xf392 +#define FSTV0910_P2_SOFFIFO_OFFSET 0xf39200c0 +#define FSTV0910_P2_HYSTERESIS_THRESHOLD 0xf3920030 +#define FSTV0910_P2_SOFFIFO_SYMBOFFS2 0xf392000f + +/* P2_TSDLYSET1 */ +#define RSTV0910_P2_TSDLYSET1 0xf393 +#define FSTV0910_P2_SOFFIFO_SYMBOFFS1 0xf39300ff + +/* P2_TSDLYSET0 */ +#define RSTV0910_P2_TSDLYSET0 0xf394 +#define FSTV0910_P2_SOFFIFO_SYMBOFFS0 0xf39400ff + +/* P2_ERRCTRL1 */ +#define RSTV0910_P2_ERRCTRL1 0xf398 +#define FSTV0910_P2_ERR_SOURCE1 0xf39800f0 +#define FSTV0910_P2_NUM_EVENT1 0xf3980007 + +/* P2_ERRCNT12 */ +#define RSTV0910_P2_ERRCNT12 0xf399 +#define FSTV0910_P2_ERRCNT1_OLDVALUE 0xf3990080 +#define FSTV0910_P2_ERR_CNT12 0xf399007f + +/* P2_ERRCNT11 */ +#define RSTV0910_P2_ERRCNT11 0xf39a +#define FSTV0910_P2_ERR_CNT11 0xf39a00ff + +/* P2_ERRCNT10 */ +#define RSTV0910_P2_ERRCNT10 0xf39b +#define FSTV0910_P2_ERR_CNT10 0xf39b00ff + +/* P2_ERRCTRL2 */ +#define RSTV0910_P2_ERRCTRL2 0xf39c +#define FSTV0910_P2_ERR_SOURCE2 0xf39c00f0 +#define FSTV0910_P2_NUM_EVENT2 0xf39c0007 + +/* P2_ERRCNT22 */ +#define RSTV0910_P2_ERRCNT22 0xf39d +#define FSTV0910_P2_ERRCNT2_OLDVALUE 0xf39d0080 +#define FSTV0910_P2_ERR_CNT22 0xf39d007f + +/* P2_ERRCNT21 */ +#define RSTV0910_P2_ERRCNT21 0xf39e +#define FSTV0910_P2_ERR_CNT21 0xf39e00ff + +/* P2_ERRCNT20 */ +#define RSTV0910_P2_ERRCNT20 0xf39f +#define FSTV0910_P2_ERR_CNT20 0xf39f00ff + +/* P2_FECSPY */ +#define RSTV0910_P2_FECSPY 0xf3a0 +#define FSTV0910_P2_SPY_ENABLE 0xf3a00080 +#define FSTV0910_P2_NO_SYNCBYTE 0xf3a00040 +#define FSTV0910_P2_SERIAL_MODE 0xf3a00020 +#define FSTV0910_P2_UNUSUAL_PACKET 0xf3a00010 +#define FSTV0910_P2_BERMETER_DATAMODE 0xf3a0000c +#define FSTV0910_P2_BERMETER_LMODE 0xf3a00002 +#define FSTV0910_P2_BERMETER_RESET 0xf3a00001 + +/* P2_FSPYCFG */ +#define RSTV0910_P2_FSPYCFG 0xf3a1 +#define FSTV0910_P2_FECSPY_INPUT 0xf3a100c0 +#define FSTV0910_P2_RST_ON_ERROR 0xf3a10020 +#define FSTV0910_P2_ONE_SHOT 0xf3a10010 +#define FSTV0910_P2_I2C_MODE 0xf3a1000c +#define FSTV0910_P2_SPY_HYSTERESIS 0xf3a10003 + +/* P2_FSPYDATA */ +#define RSTV0910_P2_FSPYDATA 0xf3a2 +#define FSTV0910_P2_SPY_STUFFING 0xf3a20080 +#define FSTV0910_P2_SPY_CNULLPKT 0xf3a20020 +#define FSTV0910_P2_SPY_OUTDATA_MODE 0xf3a2001f + +/* P2_FSPYOUT */ +#define RSTV0910_P2_FSPYOUT 0xf3a3 +#define FSTV0910_P2_FSPY_DIRECT 0xf3a30080 +#define FSTV0910_P2_STUFF_MODE 0xf3a30007 + +/* P2_FSTATUS */ +#define RSTV0910_P2_FSTATUS 0xf3a4 +#define FSTV0910_P2_SPY_ENDSIM 0xf3a40080 +#define FSTV0910_P2_VALID_SIM 0xf3a40040 +#define FSTV0910_P2_FOUND_SIGNAL 0xf3a40020 +#define FSTV0910_P2_DSS_SYNCBYTE 0xf3a40010 +#define FSTV0910_P2_RESULT_STATE 0xf3a4000f + +/* P2_FBERCPT4 */ +#define RSTV0910_P2_FBERCPT4 0xf3a8 +#define FSTV0910_P2_FBERMETER_CPT4 0xf3a800ff + +/* P2_FBERCPT3 */ +#define RSTV0910_P2_FBERCPT3 0xf3a9 +#define FSTV0910_P2_FBERMETER_CPT3 0xf3a900ff + +/* P2_FBERCPT2 */ +#define RSTV0910_P2_FBERCPT2 0xf3aa +#define FSTV0910_P2_FBERMETER_CPT2 0xf3aa00ff + +/* P2_FBERCPT1 */ +#define RSTV0910_P2_FBERCPT1 0xf3ab +#define FSTV0910_P2_FBERMETER_CPT1 0xf3ab00ff + +/* P2_FBERCPT0 */ +#define RSTV0910_P2_FBERCPT0 0xf3ac +#define FSTV0910_P2_FBERMETER_CPT0 0xf3ac00ff + +/* P2_FBERERR2 */ +#define RSTV0910_P2_FBERERR2 0xf3ad +#define FSTV0910_P2_FBERMETER_ERR2 0xf3ad00ff + +/* P2_FBERERR1 */ +#define RSTV0910_P2_FBERERR1 0xf3ae +#define FSTV0910_P2_FBERMETER_ERR1 0xf3ae00ff + +/* P2_FBERERR0 */ +#define RSTV0910_P2_FBERERR0 0xf3af +#define FSTV0910_P2_FBERMETER_ERR0 0xf3af00ff + +/* P2_FSPYBER */ +#define RSTV0910_P2_FSPYBER 0xf3b2 +#define FSTV0910_P2_FSPYBER_SYNCBYTE 0xf3b20010 +#define FSTV0910_P2_FSPYBER_UNSYNC 0xf3b20008 +#define FSTV0910_P2_FSPYBER_CTIME 0xf3b20007 + +/* P2_SFERROR */ +#define RSTV0910_P2_SFERROR 0xf3c1 +#define FSTV0910_P2_SFEC_REGERR_VIT 0xf3c100ff + +/* P2_SFECSTATUS */ +#define RSTV0910_P2_SFECSTATUS 0xf3c3 +#define FSTV0910_P2_SFEC_ON 0xf3c30080 +#define FSTV0910_P2_SFEC_OFF 0xf3c30040 +#define FSTV0910_P2_LOCKEDSFEC 0xf3c30008 +#define FSTV0910_P2_SFEC_DELOCK 0xf3c30004 +#define FSTV0910_P2_SFEC_DEMODSEL 0xf3c30002 +#define FSTV0910_P2_SFEC_OVFON 0xf3c30001 + +/* P2_SFKDIV12 */ +#define RSTV0910_P2_SFKDIV12 0xf3c4 +#define FSTV0910_P2_SFECKDIV12_MAN 0xf3c40080 + +/* P2_SFKDIV23 */ +#define RSTV0910_P2_SFKDIV23 0xf3c5 +#define FSTV0910_P2_SFECKDIV23_MAN 0xf3c50080 + +/* P2_SFKDIV34 */ +#define RSTV0910_P2_SFKDIV34 0xf3c6 +#define FSTV0910_P2_SFECKDIV34_MAN 0xf3c60080 + +/* P2_SFKDIV56 */ +#define RSTV0910_P2_SFKDIV56 0xf3c7 +#define FSTV0910_P2_SFECKDIV56_MAN 0xf3c70080 + +/* P2_SFKDIV67 */ +#define RSTV0910_P2_SFKDIV67 0xf3c8 +#define FSTV0910_P2_SFECKDIV67_MAN 0xf3c80080 + +/* P2_SFKDIV78 */ +#define RSTV0910_P2_SFKDIV78 0xf3c9 +#define FSTV0910_P2_SFECKDIV78_MAN 0xf3c90080 + +/* P2_SFSTATUS */ +#define RSTV0910_P2_SFSTATUS 0xf3cc +#define FSTV0910_P2_SFEC_LINEOK 0xf3cc0080 +#define FSTV0910_P2_SFEC_ERROR 0xf3cc0040 +#define FSTV0910_P2_SFEC_DATA7 0xf3cc0020 +#define FSTV0910_P2_SFEC_PKTDNBRFAIL 0xf3cc0010 +#define FSTV0910_P2_TSSFEC_DEMODSEL 0xf3cc0008 +#define FSTV0910_P2_SFEC_NOSYNC 0xf3cc0004 +#define FSTV0910_P2_SFEC_UNREGULA 0xf3cc0002 +#define FSTV0910_P2_SFEC_READY 0xf3cc0001 + +/* P2_SFDLYSET2 */ +#define RSTV0910_P2_SFDLYSET2 0xf3d0 +#define FSTV0910_P2_SFEC_DISABLE 0xf3d00002 + +/* P2_SFERRCTRL */ +#define RSTV0910_P2_SFERRCTRL 0xf3d8 +#define FSTV0910_P2_SFEC_ERR_SOURCE 0xf3d800f0 +#define FSTV0910_P2_SFEC_NUM_EVENT 0xf3d80007 + +/* P2_SFERRCNT2 */ +#define RSTV0910_P2_SFERRCNT2 0xf3d9 +#define FSTV0910_P2_SFERRC_OLDVALUE 0xf3d90080 +#define FSTV0910_P2_SFEC_ERR_CNT2 0xf3d9007f + +/* P2_SFERRCNT1 */ +#define RSTV0910_P2_SFERRCNT1 0xf3da +#define FSTV0910_P2_SFEC_ERR_CNT1 0xf3da00ff + +/* P2_SFERRCNT0 */ +#define RSTV0910_P2_SFERRCNT0 0xf3db +#define FSTV0910_P2_SFEC_ERR_CNT0 0xf3db00ff + +/* P1_IQCONST */ +#define RSTV0910_P1_IQCONST 0xf400 +#define FSTV0910_P1_CONSTEL_SELECT 0xf4000060 +#define FSTV0910_P1_IQSYMB_SEL 0xf400001f + +/* P1_NOSCFG */ +#define RSTV0910_P1_NOSCFG 0xf401 +#define FSTV0910_P1_DUMMYPL_NOSDATA 0xf4010020 +#define FSTV0910_P1_NOSPLH_BETA 0xf4010018 +#define FSTV0910_P1_NOSDATA_BETA 0xf4010007 + +/* P1_ISYMB */ +#define RSTV0910_P1_ISYMB 0xf402 +#define FSTV0910_P1_I_SYMBOL 0xf40201ff + +/* P1_QSYMB */ +#define RSTV0910_P1_QSYMB 0xf403 +#define FSTV0910_P1_Q_SYMBOL 0xf40301ff + +/* P1_AGC1CFG */ +#define RSTV0910_P1_AGC1CFG 0xf404 +#define FSTV0910_P1_DC_FROZEN 0xf4040080 +#define FSTV0910_P1_DC_CORRECT 0xf4040040 +#define FSTV0910_P1_AMM_FROZEN 0xf4040020 +#define FSTV0910_P1_AMM_CORRECT 0xf4040010 +#define FSTV0910_P1_QUAD_FROZEN 0xf4040008 +#define FSTV0910_P1_QUAD_CORRECT 0xf4040004 + +/* P1_AGC1CN */ +#define RSTV0910_P1_AGC1CN 0xf406 +#define FSTV0910_P1_AGC1_LOCKED 0xf4060080 +#define FSTV0910_P1_AGC1_MINPOWER 0xf4060010 +#define FSTV0910_P1_AGCOUT_FAST 0xf4060008 +#define FSTV0910_P1_AGCIQ_BETA 0xf4060007 + +/* P1_AGC1REF */ +#define RSTV0910_P1_AGC1REF 0xf407 +#define FSTV0910_P1_AGCIQ_REF 0xf40700ff + +/* P1_IDCCOMP */ +#define RSTV0910_P1_IDCCOMP 0xf408 +#define FSTV0910_P1_IAVERAGE_ADJ 0xf40801ff + +/* P1_QDCCOMP */ +#define RSTV0910_P1_QDCCOMP 0xf409 +#define FSTV0910_P1_QAVERAGE_ADJ 0xf40901ff + +/* P1_POWERI */ +#define RSTV0910_P1_POWERI 0xf40a +#define FSTV0910_P1_POWER_I 0xf40a00ff + +/* P1_POWERQ */ +#define RSTV0910_P1_POWERQ 0xf40b +#define FSTV0910_P1_POWER_Q 0xf40b00ff + +/* P1_AGC1AMM */ +#define RSTV0910_P1_AGC1AMM 0xf40c +#define FSTV0910_P1_AMM_VALUE 0xf40c00ff + +/* P1_AGC1QUAD */ +#define RSTV0910_P1_AGC1QUAD 0xf40d +#define FSTV0910_P1_QUAD_VALUE 0xf40d01ff + +/* P1_AGCIQIN1 */ +#define RSTV0910_P1_AGCIQIN1 0xf40e +#define FSTV0910_P1_AGCIQ_VALUE1 0xf40e00ff + +/* P1_AGCIQIN0 */ +#define RSTV0910_P1_AGCIQIN0 0xf40f +#define FSTV0910_P1_AGCIQ_VALUE0 0xf40f00ff + +/* P1_DEMOD */ +#define RSTV0910_P1_DEMOD 0xf410 +#define FSTV0910_P1_MANUALS2_ROLLOFF 0xf4100080 +#define FSTV0910_P1_SPECINV_CONTROL 0xf4100030 +#define FSTV0910_P1_MANUALSX_ROLLOFF 0xf4100004 +#define FSTV0910_P1_ROLLOFF_CONTROL 0xf4100003 + +/* P1_DMDMODCOD */ +#define RSTV0910_P1_DMDMODCOD 0xf411 +#define FSTV0910_P1_MANUAL_MODCOD 0xf4110080 +#define FSTV0910_P1_DEMOD_MODCOD 0xf411007c +#define FSTV0910_P1_DEMOD_TYPE 0xf4110003 + +/* P1_DSTATUS */ +#define RSTV0910_P1_DSTATUS 0xf412 +#define FSTV0910_P1_CAR_LOCK 0xf4120080 +#define FSTV0910_P1_TMGLOCK_QUALITY 0xf4120060 +#define FSTV0910_P1_LOCK_DEFINITIF 0xf4120008 +#define FSTV0910_P1_OVADC_DETECT 0xf4120001 + +/* P1_DSTATUS2 */ +#define RSTV0910_P1_DSTATUS2 0xf413 +#define FSTV0910_P1_DEMOD_DELOCK 0xf4130080 +#define FSTV0910_P1_MODCODRQ_SYNCTAG 0xf4130020 +#define FSTV0910_P1_POLYPH_SATEVENT 0xf4130010 +#define FSTV0910_P1_AGC1_NOSIGNALACK 0xf4130008 +#define FSTV0910_P1_AGC2_OVERFLOW 0xf4130004 +#define FSTV0910_P1_CFR_OVERFLOW 0xf4130002 +#define FSTV0910_P1_GAMMA_OVERUNDER 0xf4130001 + +/* P1_DMDCFGMD */ +#define RSTV0910_P1_DMDCFGMD 0xf414 +#define FSTV0910_P1_DVBS2_ENABLE 0xf4140080 +#define FSTV0910_P1_DVBS1_ENABLE 0xf4140040 +#define FSTV0910_P1_SCAN_ENABLE 0xf4140010 +#define FSTV0910_P1_CFR_AUTOSCAN 0xf4140008 +#define FSTV0910_P1_TUN_RNG 0xf4140003 + +/* P1_DMDCFG2 */ +#define RSTV0910_P1_DMDCFG2 0xf415 +#define FSTV0910_P1_S1S2_SEQUENTIAL 0xf4150040 +#define FSTV0910_P1_INFINITE_RELOCK 0xf4150010 + +/* P1_DMDISTATE */ +#define RSTV0910_P1_DMDISTATE 0xf416 +#define FSTV0910_P1_I2C_NORESETDMODE 0xf4160080 +#define FSTV0910_P1_I2C_DEMOD_MODE 0xf416001f + +/* P1_DMDT0M */ +#define RSTV0910_P1_DMDT0M 0xf417 +#define FSTV0910_P1_DMDT0_MIN 0xf41700ff + +/* P1_DMDSTATE */ +#define RSTV0910_P1_DMDSTATE 0xf41b +#define FSTV0910_P1_HEADER_MODE 0xf41b0060 + +/* P1_DMDFLYW */ +#define RSTV0910_P1_DMDFLYW 0xf41c +#define FSTV0910_P1_I2C_IRQVAL 0xf41c00f0 +#define FSTV0910_P1_FLYWHEEL_CPT 0xf41c000f + +/* P1_DSTATUS3 */ +#define RSTV0910_P1_DSTATUS3 0xf41d +#define FSTV0910_P1_CFR_ZIGZAG 0xf41d0080 +#define FSTV0910_P1_DEMOD_CFGMODE 0xf41d0060 +#define FSTV0910_P1_GAMMA_LOWBAUDRATE 0xf41d0010 + +/* P1_DMDCFG3 */ +#define RSTV0910_P1_DMDCFG3 0xf41e +#define FSTV0910_P1_NOSTOP_FIFOFULL 0xf41e0008 + +/* P1_DMDCFG4 */ +#define RSTV0910_P1_DMDCFG4 0xf41f +#define FSTV0910_P1_DIS_VITLOCK 0xf41f0080 +#define FSTV0910_P1_DIS_CLKENABLE 0xf41f0004 + +/* P1_CORRELMANT */ +#define RSTV0910_P1_CORRELMANT 0xf420 +#define FSTV0910_P1_CORREL_MANT 0xf42000ff + +/* P1_CORRELABS */ +#define RSTV0910_P1_CORRELABS 0xf421 +#define FSTV0910_P1_CORREL_ABS 0xf42100ff + +/* P1_CORRELEXP */ +#define RSTV0910_P1_CORRELEXP 0xf422 +#define FSTV0910_P1_CORREL_ABSEXP 0xf42200f0 +#define FSTV0910_P1_CORREL_EXP 0xf422000f + +/* P1_PLHMODCOD */ +#define RSTV0910_P1_PLHMODCOD 0xf424 +#define FSTV0910_P1_SPECINV_DEMOD 0xf4240080 +#define FSTV0910_P1_PLH_MODCOD 0xf424007c +#define FSTV0910_P1_PLH_TYPE 0xf4240003 + +/* P1_DMDREG */ +#define RSTV0910_P1_DMDREG 0xf425 +#define FSTV0910_P1_DECIM_PLFRAMES 0xf4250001 + +/* P1_AGCNADJ */ +#define RSTV0910_P1_AGCNADJ 0xf426 +#define FSTV0910_P1_RADJOFF_AGC2 0xf4260080 +#define FSTV0910_P1_RADJOFF_AGC1 0xf4260040 +#define FSTV0910_P1_AGC_NADJ 0xf426013f + +/* P1_AGCKS */ +#define RSTV0910_P1_AGCKS 0xf427 +#define FSTV0910_P1_RSADJ_MANUALCFG 0xf4270080 +#define FSTV0910_P1_RSADJ_CCMMODE 0xf4270040 +#define FSTV0910_P1_RADJ_SPSK 0xf427013f + +/* P1_AGCKQ */ +#define RSTV0910_P1_AGCKQ 0xf428 +#define FSTV0910_P1_RADJON_DVBS1 0xf4280040 +#define FSTV0910_P1_RADJ_QPSK 0xf428013f + +/* P1_AGCK8 */ +#define RSTV0910_P1_AGCK8 0xf429 +#define FSTV0910_P1_RADJ_8PSK 0xf429013f + +/* P1_AGCK16 */ +#define RSTV0910_P1_AGCK16 0xf42a +#define FSTV0910_P1_R2ADJOFF_16APSK 0xf42a0040 +#define FSTV0910_P1_R1ADJOFF_16APSK 0xf42a0020 +#define FSTV0910_P1_RADJ_16APSK 0xf42a011f + +/* P1_AGCK32 */ +#define RSTV0910_P1_AGCK32 0xf42b +#define FSTV0910_P1_R3ADJOFF_32APSK 0xf42b0080 +#define FSTV0910_P1_R2ADJOFF_32APSK 0xf42b0040 +#define FSTV0910_P1_R1ADJOFF_32APSK 0xf42b0020 +#define FSTV0910_P1_RADJ_32APSK 0xf42b011f + +/* P1_AGC2O */ +#define RSTV0910_P1_AGC2O 0xf42c +#define FSTV0910_P1_CSTENV_MODE 0xf42c00c0 +#define FSTV0910_P1_AGC2_COEF 0xf42c0007 + +/* P1_AGC2REF */ +#define RSTV0910_P1_AGC2REF 0xf42d +#define FSTV0910_P1_AGC2_REF 0xf42d00ff + +/* P1_AGC1ADJ */ +#define RSTV0910_P1_AGC1ADJ 0xf42e +#define FSTV0910_P1_AGC1_ADJUSTED 0xf42e007f + +/* P1_AGCRSADJ */ +#define RSTV0910_P1_AGCRSADJ 0xf42f +#define FSTV0910_P1_RS_ADJUSTED 0xf42f007f + +/* P1_AGCRQADJ */ +#define RSTV0910_P1_AGCRQADJ 0xf430 +#define FSTV0910_P1_RQ_ADJUSTED 0xf430007f + +/* P1_AGCR8ADJ */ +#define RSTV0910_P1_AGCR8ADJ 0xf431 +#define FSTV0910_P1_R8_ADJUSTED 0xf431007f + +/* P1_AGCR1ADJ */ +#define RSTV0910_P1_AGCR1ADJ 0xf432 +#define FSTV0910_P1_R1_ADJUSTED 0xf432007f + +/* P1_AGCR2ADJ */ +#define RSTV0910_P1_AGCR2ADJ 0xf433 +#define FSTV0910_P1_R2_ADJUSTED 0xf433007f + +/* P1_AGCR3ADJ */ +#define RSTV0910_P1_AGCR3ADJ 0xf434 +#define FSTV0910_P1_R3_ADJUSTED 0xf434007f + +/* P1_AGCREFADJ */ +#define RSTV0910_P1_AGCREFADJ 0xf435 +#define FSTV0910_P1_AGC2REF_ADJUSTED 0xf435007f + +/* P1_AGC2I1 */ +#define RSTV0910_P1_AGC2I1 0xf436 +#define FSTV0910_P1_AGC2_INTEGRATOR1 0xf43600ff + +/* P1_AGC2I0 */ +#define RSTV0910_P1_AGC2I0 0xf437 +#define FSTV0910_P1_AGC2_INTEGRATOR0 0xf43700ff + +/* P1_CARCFG */ +#define RSTV0910_P1_CARCFG 0xf438 +#define FSTV0910_P1_ROTAON 0xf4380004 +#define FSTV0910_P1_PH_DET_ALGO 0xf4380003 + +/* P1_ACLC */ +#define RSTV0910_P1_ACLC 0xf439 +#define FSTV0910_P1_CAR_ALPHA_MANT 0xf4390030 +#define FSTV0910_P1_CAR_ALPHA_EXP 0xf439000f + +/* P1_BCLC */ +#define RSTV0910_P1_BCLC 0xf43a +#define FSTV0910_P1_CAR_BETA_MANT 0xf43a0030 +#define FSTV0910_P1_CAR_BETA_EXP 0xf43a000f + +/* P1_ACLCS2 */ +#define RSTV0910_P1_ACLCS2 0xf43b +#define FSTV0910_P1_CARS2_APLHA_MANTISSE 0xf43b0030 +#define FSTV0910_P1_CARS2_ALPHA_EXP 0xf43b000f + +/* P1_BCLCS2 */ +#define RSTV0910_P1_BCLCS2 0xf43c +#define FSTV0910_P1_CARS2_BETA_MANTISSE 0xf43c0030 +#define FSTV0910_P1_CARS2_BETA_EXP 0xf43c000f + +/* P1_CARFREQ */ +#define RSTV0910_P1_CARFREQ 0xf43d +#define FSTV0910_P1_KC_COARSE_EXP 0xf43d00f0 +#define FSTV0910_P1_BETA_FREQ 0xf43d000f + +/* P1_CARHDR */ +#define RSTV0910_P1_CARHDR 0xf43e +#define FSTV0910_P1_K_FREQ_HDR 0xf43e00ff + +/* P1_LDT */ +#define RSTV0910_P1_LDT 0xf43f +#define FSTV0910_P1_CARLOCK_THRES 0xf43f01ff + +/* P1_LDT2 */ +#define RSTV0910_P1_LDT2 0xf440 +#define FSTV0910_P1_CARLOCK_THRES2 0xf44001ff + +/* P1_CFRICFG */ +#define RSTV0910_P1_CFRICFG 0xf441 +#define FSTV0910_P1_NEG_CFRSTEP 0xf4410001 + +/* P1_CFRUP1 */ +#define RSTV0910_P1_CFRUP1 0xf442 +#define FSTV0910_P1_CFR_UP1 0xf44201ff + +/* P1_CFRUP0 */ +#define RSTV0910_P1_CFRUP0 0xf443 +#define FSTV0910_P1_CFR_UP0 0xf44300ff + +/* P1_CFRIBASE1 */ +#define RSTV0910_P1_CFRIBASE1 0xf444 +#define FSTV0910_P1_CFRINIT_BASE1 0xf44400ff + +/* P1_CFRIBASE0 */ +#define RSTV0910_P1_CFRIBASE0 0xf445 +#define FSTV0910_P1_CFRINIT_BASE0 0xf44500ff + +/* P1_CFRLOW1 */ +#define RSTV0910_P1_CFRLOW1 0xf446 +#define FSTV0910_P1_CFR_LOW1 0xf44601ff + +/* P1_CFRLOW0 */ +#define RSTV0910_P1_CFRLOW0 0xf447 +#define FSTV0910_P1_CFR_LOW0 0xf44700ff + +/* P1_CFRINIT1 */ +#define RSTV0910_P1_CFRINIT1 0xf448 +#define FSTV0910_P1_CFR_INIT1 0xf44801ff + +/* P1_CFRINIT0 */ +#define RSTV0910_P1_CFRINIT0 0xf449 +#define FSTV0910_P1_CFR_INIT0 0xf44900ff + +/* P1_CFRINC1 */ +#define RSTV0910_P1_CFRINC1 0xf44a +#define FSTV0910_P1_MANUAL_CFRINC 0xf44a0080 +#define FSTV0910_P1_CFR_INC1 0xf44a003f + +/* P1_CFRINC0 */ +#define RSTV0910_P1_CFRINC0 0xf44b +#define FSTV0910_P1_CFR_INC0 0xf44b00ff + +/* P1_CFR2 */ +#define RSTV0910_P1_CFR2 0xf44c +#define FSTV0910_P1_CAR_FREQ2 0xf44c01ff + +/* P1_CFR1 */ +#define RSTV0910_P1_CFR1 0xf44d +#define FSTV0910_P1_CAR_FREQ1 0xf44d00ff + +/* P1_CFR0 */ +#define RSTV0910_P1_CFR0 0xf44e +#define FSTV0910_P1_CAR_FREQ0 0xf44e00ff + +/* P1_LDI */ +#define RSTV0910_P1_LDI 0xf44f +#define FSTV0910_P1_LOCK_DET_INTEGR 0xf44f01ff + +/* P1_TMGCFG */ +#define RSTV0910_P1_TMGCFG 0xf450 +#define FSTV0910_P1_TMGLOCK_BETA 0xf45000c0 +#define FSTV0910_P1_DO_TIMING_CORR 0xf4500010 +#define FSTV0910_P1_TMG_MINFREQ 0xf4500003 + +/* P1_RTC */ +#define RSTV0910_P1_RTC 0xf451 +#define FSTV0910_P1_TMGALPHA_EXP 0xf45100f0 +#define FSTV0910_P1_TMGBETA_EXP 0xf451000f + +/* P1_RTCS2 */ +#define RSTV0910_P1_RTCS2 0xf452 +#define FSTV0910_P1_TMGALPHAS2_EXP 0xf45200f0 +#define FSTV0910_P1_TMGBETAS2_EXP 0xf452000f + +/* P1_TMGTHRISE */ +#define RSTV0910_P1_TMGTHRISE 0xf453 +#define FSTV0910_P1_TMGLOCK_THRISE 0xf45300ff + +/* P1_TMGTHFALL */ +#define RSTV0910_P1_TMGTHFALL 0xf454 +#define FSTV0910_P1_TMGLOCK_THFALL 0xf45400ff + +/* P1_SFRUPRATIO */ +#define RSTV0910_P1_SFRUPRATIO 0xf455 +#define FSTV0910_P1_SFR_UPRATIO 0xf45500ff + +/* P1_SFRLOWRATIO */ +#define RSTV0910_P1_SFRLOWRATIO 0xf456 +#define FSTV0910_P1_SFR_LOWRATIO 0xf45600ff + +/* P1_KTTMG */ +#define RSTV0910_P1_KTTMG 0xf457 +#define FSTV0910_P1_KT_TMG_EXP 0xf45700f0 + +/* P1_KREFTMG */ +#define RSTV0910_P1_KREFTMG 0xf458 +#define FSTV0910_P1_KREF_TMG 0xf45800ff + +/* P1_SFRSTEP */ +#define RSTV0910_P1_SFRSTEP 0xf459 +#define FSTV0910_P1_SFR_SCANSTEP 0xf45900f0 +#define FSTV0910_P1_SFR_CENTERSTEP 0xf459000f + +/* P1_TMGCFG2 */ +#define RSTV0910_P1_TMGCFG2 0xf45a +#define FSTV0910_P1_DIS_AUTOSAMP 0xf45a0008 +#define FSTV0910_P1_SFRRATIO_FINE 0xf45a0001 + +/* P1_KREFTMG2 */ +#define RSTV0910_P1_KREFTMG2 0xf45b +#define FSTV0910_P1_KREF_TMG2 0xf45b00ff + +/* P1_TMGCFG3 */ +#define RSTV0910_P1_TMGCFG3 0xf45d +#define FSTV0910_P1_CONT_TMGCENTER 0xf45d0008 +#define FSTV0910_P1_AUTO_GUP 0xf45d0004 +#define FSTV0910_P1_AUTO_GLOW 0xf45d0002 + +/* P1_SFRINIT1 */ +#define RSTV0910_P1_SFRINIT1 0xf45e +#define FSTV0910_P1_SFR_INIT1 0xf45e00ff + +/* P1_SFRINIT0 */ +#define RSTV0910_P1_SFRINIT0 0xf45f +#define FSTV0910_P1_SFR_INIT0 0xf45f00ff + +/* P1_SFRUP1 */ +#define RSTV0910_P1_SFRUP1 0xf460 +#define FSTV0910_P1_SYMB_FREQ_UP1 0xf46000ff + +/* P1_SFRUP0 */ +#define RSTV0910_P1_SFRUP0 0xf461 +#define FSTV0910_P1_SYMB_FREQ_UP0 0xf46100ff + +/* P1_SFRLOW1 */ +#define RSTV0910_P1_SFRLOW1 0xf462 +#define FSTV0910_P1_SYMB_FREQ_LOW1 0xf46200ff + +/* P1_SFRLOW0 */ +#define RSTV0910_P1_SFRLOW0 0xf463 +#define FSTV0910_P1_SYMB_FREQ_LOW0 0xf46300ff + +/* P1_SFR3 */ +#define RSTV0910_P1_SFR3 0xf464 +#define FSTV0910_P1_SYMB_FREQ3 0xf46400ff + +/* P1_SFR2 */ +#define RSTV0910_P1_SFR2 0xf465 +#define FSTV0910_P1_SYMB_FREQ2 0xf46500ff + +/* P1_SFR1 */ +#define RSTV0910_P1_SFR1 0xf466 +#define FSTV0910_P1_SYMB_FREQ1 0xf46600ff + +/* P1_SFR0 */ +#define RSTV0910_P1_SFR0 0xf467 +#define FSTV0910_P1_SYMB_FREQ0 0xf46700ff + +/* P1_TMGREG2 */ +#define RSTV0910_P1_TMGREG2 0xf468 +#define FSTV0910_P1_TMGREG2 0xf46800ff + +/* P1_TMGREG1 */ +#define RSTV0910_P1_TMGREG1 0xf469 +#define FSTV0910_P1_TMGREG1 0xf46900ff + +/* P1_TMGREG0 */ +#define RSTV0910_P1_TMGREG0 0xf46a +#define FSTV0910_P1_TMGREG0 0xf46a00ff + +/* P1_TMGLOCK1 */ +#define RSTV0910_P1_TMGLOCK1 0xf46b +#define FSTV0910_P1_TMGLOCK_LEVEL1 0xf46b01ff + +/* P1_TMGLOCK0 */ +#define RSTV0910_P1_TMGLOCK0 0xf46c +#define FSTV0910_P1_TMGLOCK_LEVEL0 0xf46c00ff + +/* P1_TMGOBS */ +#define RSTV0910_P1_TMGOBS 0xf46d +#define FSTV0910_P1_ROLLOFF_STATUS 0xf46d00c0 + +/* P1_EQUALCFG */ +#define RSTV0910_P1_EQUALCFG 0xf46f +#define FSTV0910_P1_EQUAL_ON 0xf46f0040 +#define FSTV0910_P1_MU_EQUALDFE 0xf46f0007 + +/* P1_EQUAI1 */ +#define RSTV0910_P1_EQUAI1 0xf470 +#define FSTV0910_P1_EQUA_ACCI1 0xf47001ff + +/* P1_EQUAQ1 */ +#define RSTV0910_P1_EQUAQ1 0xf471 +#define FSTV0910_P1_EQUA_ACCQ1 0xf47101ff + +/* P1_EQUAI2 */ +#define RSTV0910_P1_EQUAI2 0xf472 +#define FSTV0910_P1_EQUA_ACCI2 0xf47201ff + +/* P1_EQUAQ2 */ +#define RSTV0910_P1_EQUAQ2 0xf473 +#define FSTV0910_P1_EQUA_ACCQ2 0xf47301ff + +/* P1_EQUAI3 */ +#define RSTV0910_P1_EQUAI3 0xf474 +#define FSTV0910_P1_EQUA_ACCI3 0xf47401ff + +/* P1_EQUAQ3 */ +#define RSTV0910_P1_EQUAQ3 0xf475 +#define FSTV0910_P1_EQUA_ACCQ3 0xf47501ff + +/* P1_EQUAI4 */ +#define RSTV0910_P1_EQUAI4 0xf476 +#define FSTV0910_P1_EQUA_ACCI4 0xf47601ff + +/* P1_EQUAQ4 */ +#define RSTV0910_P1_EQUAQ4 0xf477 +#define FSTV0910_P1_EQUA_ACCQ4 0xf47701ff + +/* P1_EQUAI5 */ +#define RSTV0910_P1_EQUAI5 0xf478 +#define FSTV0910_P1_EQUA_ACCI5 0xf47801ff + +/* P1_EQUAQ5 */ +#define RSTV0910_P1_EQUAQ5 0xf479 +#define FSTV0910_P1_EQUA_ACCQ5 0xf47901ff + +/* P1_EQUAI6 */ +#define RSTV0910_P1_EQUAI6 0xf47a +#define FSTV0910_P1_EQUA_ACCI6 0xf47a01ff + +/* P1_EQUAQ6 */ +#define RSTV0910_P1_EQUAQ6 0xf47b +#define FSTV0910_P1_EQUA_ACCQ6 0xf47b01ff + +/* P1_EQUAI7 */ +#define RSTV0910_P1_EQUAI7 0xf47c +#define FSTV0910_P1_EQUA_ACCI7 0xf47c01ff + +/* P1_EQUAQ7 */ +#define RSTV0910_P1_EQUAQ7 0xf47d +#define FSTV0910_P1_EQUA_ACCQ7 0xf47d01ff + +/* P1_EQUAI8 */ +#define RSTV0910_P1_EQUAI8 0xf47e +#define FSTV0910_P1_EQUA_ACCI8 0xf47e01ff + +/* P1_EQUAQ8 */ +#define RSTV0910_P1_EQUAQ8 0xf47f +#define FSTV0910_P1_EQUA_ACCQ8 0xf47f01ff + +/* P1_NNOSDATAT1 */ +#define RSTV0910_P1_NNOSDATAT1 0xf480 +#define FSTV0910_P1_NOSDATAT_NORMED1 0xf48000ff + +/* P1_NNOSDATAT0 */ +#define RSTV0910_P1_NNOSDATAT0 0xf481 +#define FSTV0910_P1_NOSDATAT_NORMED0 0xf48100ff + +/* P1_NNOSDATA1 */ +#define RSTV0910_P1_NNOSDATA1 0xf482 +#define FSTV0910_P1_NOSDATA_NORMED1 0xf48200ff + +/* P1_NNOSDATA0 */ +#define RSTV0910_P1_NNOSDATA0 0xf483 +#define FSTV0910_P1_NOSDATA_NORMED0 0xf48300ff + +/* P1_NNOSPLHT1 */ +#define RSTV0910_P1_NNOSPLHT1 0xf484 +#define FSTV0910_P1_NOSPLHT_NORMED1 0xf48400ff + +/* P1_NNOSPLHT0 */ +#define RSTV0910_P1_NNOSPLHT0 0xf485 +#define FSTV0910_P1_NOSPLHT_NORMED0 0xf48500ff + +/* P1_NNOSPLH1 */ +#define RSTV0910_P1_NNOSPLH1 0xf486 +#define FSTV0910_P1_NOSPLH_NORMED1 0xf48600ff + +/* P1_NNOSPLH0 */ +#define RSTV0910_P1_NNOSPLH0 0xf487 +#define FSTV0910_P1_NOSPLH_NORMED0 0xf48700ff + +/* P1_NOSDATAT1 */ +#define RSTV0910_P1_NOSDATAT1 0xf488 +#define FSTV0910_P1_NOSDATAT_UNNORMED1 0xf48800ff + +/* P1_NOSDATAT0 */ +#define RSTV0910_P1_NOSDATAT0 0xf489 +#define FSTV0910_P1_NOSDATAT_UNNORMED0 0xf48900ff + +/* P1_NNOSFRAME1 */ +#define RSTV0910_P1_NNOSFRAME1 0xf48a +#define FSTV0910_P1_NOSFRAME_NORMED1 0xf48a00ff + +/* P1_NNOSFRAME0 */ +#define RSTV0910_P1_NNOSFRAME0 0xf48b +#define FSTV0910_P1_NOSFRAME_NORMED0 0xf48b00ff + +/* P1_NNOSRAD1 */ +#define RSTV0910_P1_NNOSRAD1 0xf48c +#define FSTV0910_P1_NOSRADIAL_NORMED1 0xf48c00ff + +/* P1_NNOSRAD0 */ +#define RSTV0910_P1_NNOSRAD0 0xf48d +#define FSTV0910_P1_NOSRADIAL_NORMED0 0xf48d00ff + +/* P1_NOSCFGF1 */ +#define RSTV0910_P1_NOSCFGF1 0xf48e +#define FSTV0910_P1_LOWNOISE_MESURE 0xf48e0080 +#define FSTV0910_P1_NOS_DELFRAME 0xf48e0040 +#define FSTV0910_P1_NOSDATA_MODE 0xf48e0030 +#define FSTV0910_P1_FRAMESEL_TYPESEL 0xf48e000c +#define FSTV0910_P1_FRAMESEL_TYPE 0xf48e0003 + +/* P1_NOSCFGF2 */ +#define RSTV0910_P1_NOSCFGF2 0xf48f +#define FSTV0910_P1_DIS_NOSPILOTS 0xf48f0080 +#define FSTV0910_P1_FRAMESEL_MODCODSEL 0xf48f0060 +#define FSTV0910_P1_FRAMESEL_MODCOD 0xf48f001f + +/* P1_CAR2CFG */ +#define RSTV0910_P1_CAR2CFG 0xf490 +#define FSTV0910_P1_ROTA2ON 0xf4900004 +#define FSTV0910_P1_PH_DET_ALGO2 0xf4900003 + +/* P1_CFR2CFR1 */ +#define RSTV0910_P1_CFR2CFR1 0xf491 +#define FSTV0910_P1_EN_S2CAR2CENTER 0xf4910020 +#define FSTV0910_P1_CFR2TOCFR1_BETA 0xf4910007 + +/* P1_CAR3CFG */ +#define RSTV0910_P1_CAR3CFG 0xf492 +#define FSTV0910_P1_CARRIER23_MODE 0xf49200c0 +#define FSTV0910_P1_CAR3INTERM_DVBS1 0xf4920020 +#define FSTV0910_P1_ABAMPLIF_MODE 0xf4920018 +#define FSTV0910_P1_CARRIER3_ALPHA3DL 0xf4920007 + +/* P1_CFR22 */ +#define RSTV0910_P1_CFR22 0xf493 +#define FSTV0910_P1_CAR2_FREQ2 0xf49301ff + +/* P1_CFR21 */ +#define RSTV0910_P1_CFR21 0xf494 +#define FSTV0910_P1_CAR2_FREQ1 0xf49400ff + +/* P1_CFR20 */ +#define RSTV0910_P1_CFR20 0xf495 +#define FSTV0910_P1_CAR2_FREQ0 0xf49500ff + +/* P1_ACLC2S2Q */ +#define RSTV0910_P1_ACLC2S2Q 0xf497 +#define FSTV0910_P1_ENAB_SPSKSYMB 0xf4970080 +#define FSTV0910_P1_CAR2S2_Q_ALPH_M 0xf4970030 +#define FSTV0910_P1_CAR2S2_Q_ALPH_E 0xf497000f + +/* P1_ACLC2S28 */ +#define RSTV0910_P1_ACLC2S28 0xf498 +#define FSTV0910_P1_CAR2S2_8_ALPH_M 0xf4980030 +#define FSTV0910_P1_CAR2S2_8_ALPH_E 0xf498000f + +/* P1_ACLC2S216A */ +#define RSTV0910_P1_ACLC2S216A 0xf499 +#define FSTV0910_P1_CAR2S2_16A_ALPH_M 0xf4990030 +#define FSTV0910_P1_CAR2S2_16A_ALPH_E 0xf499000f + +/* P1_ACLC2S232A */ +#define RSTV0910_P1_ACLC2S232A 0xf49a +#define FSTV0910_P1_CAR2S2_32A_ALPH_M 0xf49a0030 +#define FSTV0910_P1_CAR2S2_32A_ALPH_E 0xf49a000f + +/* P1_BCLC2S2Q */ +#define RSTV0910_P1_BCLC2S2Q 0xf49c +#define FSTV0910_P1_CAR2S2_Q_BETA_M 0xf49c0030 +#define FSTV0910_P1_CAR2S2_Q_BETA_E 0xf49c000f + +/* P1_BCLC2S28 */ +#define RSTV0910_P1_BCLC2S28 0xf49d +#define FSTV0910_P1_CAR2S2_8_BETA_M 0xf49d0030 +#define FSTV0910_P1_CAR2S2_8_BETA_E 0xf49d000f + +/* P1_BCLC2S216A */ +#define RSTV0910_P1_BCLC2S216A 0xf49e +#define FSTV0910_P1_DVBS2S216A_NIP 0xf49e0080 +#define FSTV0910_P1_CAR2S2_16A_BETA_M 0xf49e0030 +#define FSTV0910_P1_CAR2S2_16A_BETA_E 0xf49e000f + +/* P1_BCLC2S232A */ +#define RSTV0910_P1_BCLC2S232A 0xf49f +#define FSTV0910_P1_DVBS2S232A_NIP 0xf49f0080 +#define FSTV0910_P1_CAR2S2_32A_BETA_M 0xf49f0030 +#define FSTV0910_P1_CAR2S2_32A_BETA_E 0xf49f000f + +/* P1_PLROOT2 */ +#define RSTV0910_P1_PLROOT2 0xf4ac +#define FSTV0910_P1_PLSCRAMB_MODE 0xf4ac000c +#define FSTV0910_P1_PLSCRAMB_ROOT2 0xf4ac0003 + +/* P1_PLROOT1 */ +#define RSTV0910_P1_PLROOT1 0xf4ad +#define FSTV0910_P1_PLSCRAMB_ROOT1 0xf4ad00ff + +/* P1_PLROOT0 */ +#define RSTV0910_P1_PLROOT0 0xf4ae +#define FSTV0910_P1_PLSCRAMB_ROOT0 0xf4ae00ff + +/* P1_MODCODLST0 */ +#define RSTV0910_P1_MODCODLST0 0xf4b0 +#define FSTV0910_P1_NACCES_MODCODCH 0xf4b00001 + +/* P1_MODCODLST1 */ +#define RSTV0910_P1_MODCODLST1 0xf4b1 +#define FSTV0910_P1_SYMBRATE_FILTER 0xf4b10008 +#define FSTV0910_P1_NRESET_MODCODLST 0xf4b10004 +#define FSTV0910_P1_DIS_32PSK_9_10 0xf4b10003 + +/* P1_MODCODLST2 */ +#define RSTV0910_P1_MODCODLST2 0xf4b2 +#define FSTV0910_P1_DIS_32PSK_8_9 0xf4b200f0 +#define FSTV0910_P1_DIS_32PSK_5_6 0xf4b2000f + +/* P1_MODCODLST3 */ +#define RSTV0910_P1_MODCODLST3 0xf4b3 +#define FSTV0910_P1_DIS_32PSK_4_5 0xf4b300f0 +#define FSTV0910_P1_DIS_32PSK_3_4 0xf4b3000f + +/* P1_MODCODLST4 */ +#define RSTV0910_P1_MODCODLST4 0xf4b4 +#define FSTV0910_P1_DUMMYPL_PILOT 0xf4b40080 +#define FSTV0910_P1_DUMMYPL_NOPILOT 0xf4b40040 +#define FSTV0910_P1_DIS_16PSK_9_10 0xf4b40030 +#define FSTV0910_P1_DIS_16PSK_8_9 0xf4b4000f + +/* P1_MODCODLST5 */ +#define RSTV0910_P1_MODCODLST5 0xf4b5 +#define FSTV0910_P1_DIS_16PSK_5_6 0xf4b500f0 +#define FSTV0910_P1_DIS_16PSK_4_5 0xf4b5000f + +/* P1_MODCODLST6 */ +#define RSTV0910_P1_MODCODLST6 0xf4b6 +#define FSTV0910_P1_DIS_16PSK_3_4 0xf4b600f0 +#define FSTV0910_P1_DIS_16PSK_2_3 0xf4b6000f + +/* P1_MODCODLST7 */ +#define RSTV0910_P1_MODCODLST7 0xf4b7 +#define FSTV0910_P1_MODCOD_NNOSFILTER 0xf4b70080 +#define FSTV0910_P1_DIS_8PSK_9_10 0xf4b70030 +#define FSTV0910_P1_DIS_8PSK_8_9 0xf4b7000f + +/* P1_MODCODLST8 */ +#define RSTV0910_P1_MODCODLST8 0xf4b8 +#define FSTV0910_P1_DIS_8PSK_5_6 0xf4b800f0 +#define FSTV0910_P1_DIS_8PSK_3_4 0xf4b8000f + +/* P1_MODCODLST9 */ +#define RSTV0910_P1_MODCODLST9 0xf4b9 +#define FSTV0910_P1_DIS_8PSK_2_3 0xf4b900f0 +#define FSTV0910_P1_DIS_8PSK_3_5 0xf4b9000f + +/* P1_MODCODLSTA */ +#define RSTV0910_P1_MODCODLSTA 0xf4ba +#define FSTV0910_P1_NOSFILTER_LIMITE 0xf4ba0080 +#define FSTV0910_P1_DIS_QPSK_9_10 0xf4ba0030 +#define FSTV0910_P1_DIS_QPSK_8_9 0xf4ba000f + +/* P1_MODCODLSTB */ +#define RSTV0910_P1_MODCODLSTB 0xf4bb +#define FSTV0910_P1_DIS_QPSK_5_6 0xf4bb00f0 +#define FSTV0910_P1_DIS_QPSK_4_5 0xf4bb000f + +/* P1_MODCODLSTC */ +#define RSTV0910_P1_MODCODLSTC 0xf4bc +#define FSTV0910_P1_DIS_QPSK_3_4 0xf4bc00f0 +#define FSTV0910_P1_DIS_QPSK_2_3 0xf4bc000f + +/* P1_MODCODLSTD */ +#define RSTV0910_P1_MODCODLSTD 0xf4bd +#define FSTV0910_P1_DIS_QPSK_3_5 0xf4bd00f0 +#define FSTV0910_P1_DIS_QPSK_1_2 0xf4bd000f + +/* P1_MODCODLSTE */ +#define RSTV0910_P1_MODCODLSTE 0xf4be +#define FSTV0910_P1_DIS_QPSK_2_5 0xf4be00f0 +#define FSTV0910_P1_DIS_QPSK_1_3 0xf4be000f + +/* P1_MODCODLSTF */ +#define RSTV0910_P1_MODCODLSTF 0xf4bf +#define FSTV0910_P1_DIS_QPSK_1_4 0xf4bf00f0 +#define FSTV0910_P1_DEMOD_INVMODLST 0xf4bf0008 +#define FSTV0910_P1_DEMODOUT_ENABLE 0xf4bf0004 +#define FSTV0910_P1_DDEMOD_NSET 0xf4bf0002 +#define FSTV0910_P1_MODCOD_NSTOCK 0xf4bf0001 + +/* P1_GAUSSR0 */ +#define RSTV0910_P1_GAUSSR0 0xf4c0 +#define FSTV0910_P1_EN_CCIMODE 0xf4c00080 +#define FSTV0910_P1_R0_GAUSSIEN 0xf4c0007f + +/* P1_CCIR0 */ +#define RSTV0910_P1_CCIR0 0xf4c1 +#define FSTV0910_P1_CCIDETECT_PLHONLY 0xf4c10080 +#define FSTV0910_P1_R0_CCI 0xf4c1007f + +/* P1_CCIQUANT */ +#define RSTV0910_P1_CCIQUANT 0xf4c2 +#define FSTV0910_P1_CCI_BETA 0xf4c200e0 +#define FSTV0910_P1_CCI_QUANT 0xf4c2001f + +/* P1_CCITHRES */ +#define RSTV0910_P1_CCITHRES 0xf4c3 +#define FSTV0910_P1_CCI_THRESHOLD 0xf4c300ff + +/* P1_CCIACC */ +#define RSTV0910_P1_CCIACC 0xf4c4 +#define FSTV0910_P1_CCI_VALUE 0xf4c400ff + +/* P1_DSTATUS4 */ +#define RSTV0910_P1_DSTATUS4 0xf4c5 +#define FSTV0910_P1_RAINFADE_DETECT 0xf4c50080 +#define FSTV0910_P1_NOTHRES2_FAIL 0xf4c50040 +#define FSTV0910_P1_NOTHRES1_FAIL 0xf4c50020 +#define FSTV0910_P1_DMDPROG_ERROR 0xf4c50004 +#define FSTV0910_P1_CSTENV_DETECT 0xf4c50002 +#define FSTV0910_P1_DETECTION_TRIAX 0xf4c50001 + +/* P1_DMDRESCFG */ +#define RSTV0910_P1_DMDRESCFG 0xf4c6 +#define FSTV0910_P1_DMDRES_RESET 0xf4c60080 +#define FSTV0910_P1_DMDRES_STRALL 0xf4c60008 +#define FSTV0910_P1_DMDRES_NEWONLY 0xf4c60004 +#define FSTV0910_P1_DMDRES_NOSTORE 0xf4c60002 + +/* P1_DMDRESADR */ +#define RSTV0910_P1_DMDRESADR 0xf4c7 +#define FSTV0910_P1_DMDRES_VALIDCFR 0xf4c70040 +#define FSTV0910_P1_DMDRES_MEMFULL 0xf4c70030 +#define FSTV0910_P1_DMDRES_RESNBR 0xf4c7000f + +/* P1_DMDRESDATA7 */ +#define RSTV0910_P1_DMDRESDATA7 0xf4c8 +#define FSTV0910_P1_DMDRES_DATA7 0xf4c800ff + +/* P1_DMDRESDATA6 */ +#define RSTV0910_P1_DMDRESDATA6 0xf4c9 +#define FSTV0910_P1_DMDRES_DATA6 0xf4c900ff + +/* P1_DMDRESDATA5 */ +#define RSTV0910_P1_DMDRESDATA5 0xf4ca +#define FSTV0910_P1_DMDRES_DATA5 0xf4ca00ff + +/* P1_DMDRESDATA4 */ +#define RSTV0910_P1_DMDRESDATA4 0xf4cb +#define FSTV0910_P1_DMDRES_DATA4 0xf4cb00ff + +/* P1_DMDRESDATA3 */ +#define RSTV0910_P1_DMDRESDATA3 0xf4cc +#define FSTV0910_P1_DMDRES_DATA3 0xf4cc00ff + +/* P1_DMDRESDATA2 */ +#define RSTV0910_P1_DMDRESDATA2 0xf4cd +#define FSTV0910_P1_DMDRES_DATA2 0xf4cd00ff + +/* P1_DMDRESDATA1 */ +#define RSTV0910_P1_DMDRESDATA1 0xf4ce +#define FSTV0910_P1_DMDRES_DATA1 0xf4ce00ff + +/* P1_DMDRESDATA0 */ +#define RSTV0910_P1_DMDRESDATA0 0xf4cf +#define FSTV0910_P1_DMDRES_DATA0 0xf4cf00ff + +/* P1_FFEI1 */ +#define RSTV0910_P1_FFEI1 0xf4d0 +#define FSTV0910_P1_FFE_ACCI1 0xf4d001ff + +/* P1_FFEQ1 */ +#define RSTV0910_P1_FFEQ1 0xf4d1 +#define FSTV0910_P1_FFE_ACCQ1 0xf4d101ff + +/* P1_FFEI2 */ +#define RSTV0910_P1_FFEI2 0xf4d2 +#define FSTV0910_P1_FFE_ACCI2 0xf4d201ff + +/* P1_FFEQ2 */ +#define RSTV0910_P1_FFEQ2 0xf4d3 +#define FSTV0910_P1_FFE_ACCQ2 0xf4d301ff + +/* P1_FFEI3 */ +#define RSTV0910_P1_FFEI3 0xf4d4 +#define FSTV0910_P1_FFE_ACCI3 0xf4d401ff + +/* P1_FFEQ3 */ +#define RSTV0910_P1_FFEQ3 0xf4d5 +#define FSTV0910_P1_FFE_ACCQ3 0xf4d501ff + +/* P1_FFEI4 */ +#define RSTV0910_P1_FFEI4 0xf4d6 +#define FSTV0910_P1_FFE_ACCI4 0xf4d601ff + +/* P1_FFEQ4 */ +#define RSTV0910_P1_FFEQ4 0xf4d7 +#define FSTV0910_P1_FFE_ACCQ4 0xf4d701ff + +/* P1_FFECFG */ +#define RSTV0910_P1_FFECFG 0xf4d8 +#define FSTV0910_P1_EQUALFFE_ON 0xf4d80040 +#define FSTV0910_P1_EQUAL_USEDSYMB 0xf4d80030 +#define FSTV0910_P1_MU_EQUALFFE 0xf4d80007 + +/* P1_TNRCFG2 */ +#define RSTV0910_P1_TNRCFG2 0xf4e1 +#define FSTV0910_P1_TUN_IQSWAP 0xf4e10080 + +/* P1_SMAPCOEF7 */ +#define RSTV0910_P1_SMAPCOEF7 0xf500 +#define FSTV0910_P1_DIS_QSCALE 0xf5000080 +#define FSTV0910_P1_SMAPCOEF_Q_LLR12 0xf500017f + +/* P1_SMAPCOEF6 */ +#define RSTV0910_P1_SMAPCOEF6 0xf501 +#define FSTV0910_P1_DIS_AGC2SCALE 0xf5010080 +#define FSTV0910_P1_ADJ_8PSKLLR1 0xf5010004 +#define FSTV0910_P1_OLD_8PSKLLR1 0xf5010002 +#define FSTV0910_P1_DIS_AB8PSK 0xf5010001 + +/* P1_SMAPCOEF5 */ +#define RSTV0910_P1_SMAPCOEF5 0xf502 +#define FSTV0910_P1_DIS_8SCALE 0xf5020080 +#define FSTV0910_P1_SMAPCOEF_8P_LLR23 0xf502017f + +/* P1_SMAPCOEF4 */ +#define RSTV0910_P1_SMAPCOEF4 0xf503 +#define FSTV0910_P1_SMAPCOEF_16APSK_LLR12 0xf503017f + +/* P1_SMAPCOEF3 */ +#define RSTV0910_P1_SMAPCOEF3 0xf504 +#define FSTV0910_P1_SMAPCOEF_16APSK_LLR34 0xf504017f + +/* P1_SMAPCOEF2 */ +#define RSTV0910_P1_SMAPCOEF2 0xf505 +#define FSTV0910_P1_SMAPCOEF_32APSK_R2R3 0xf50501f0 +#define FSTV0910_P1_SMAPCOEF_32APSK_LLR2 0xf505010f + +/* P1_SMAPCOEF1 */ +#define RSTV0910_P1_SMAPCOEF1 0xf506 +#define FSTV0910_P1_DIS_16SCALE 0xf5060080 +#define FSTV0910_P1_SMAPCOEF_32_LLR34 0xf506017f + +/* P1_SMAPCOEF0 */ +#define RSTV0910_P1_SMAPCOEF0 0xf507 +#define FSTV0910_P1_DIS_32SCALE 0xf5070080 +#define FSTV0910_P1_SMAPCOEF_32_LLR15 0xf507017f + +/* P1_NOSTHRES1 */ +#define RSTV0910_P1_NOSTHRES1 0xf509 +#define FSTV0910_P1_NOS_THRESHOLD1 0xf50900ff + +/* P1_NOSTHRES2 */ +#define RSTV0910_P1_NOSTHRES2 0xf50a +#define FSTV0910_P1_NOS_THRESHOLD2 0xf50a00ff + +/* P1_NOSDIFF1 */ +#define RSTV0910_P1_NOSDIFF1 0xf50b +#define FSTV0910_P1_NOSTHRES1_DIFF 0xf50b00ff + +/* P1_RAINFADE */ +#define RSTV0910_P1_RAINFADE 0xf50c +#define FSTV0910_P1_NOSTHRES_DATAT 0xf50c0080 +#define FSTV0910_P1_RAINFADE_CNLIMIT 0xf50c0070 +#define FSTV0910_P1_RAINFADE_TIMEOUT 0xf50c0007 + +/* P1_NOSRAMCFG */ +#define RSTV0910_P1_NOSRAMCFG 0xf50d +#define FSTV0910_P1_NOSRAM_ACTIVATION 0xf50d0030 +#define FSTV0910_P1_NOSRAM_CNRONLY 0xf50d0008 +#define FSTV0910_P1_NOSRAM_LGNCNR1 0xf50d0007 + +/* P1_NOSRAMPOS */ +#define RSTV0910_P1_NOSRAMPOS 0xf50e +#define FSTV0910_P1_NOSRAM_LGNCNR0 0xf50e00f0 +#define FSTV0910_P1_NOSRAM_VALIDE 0xf50e0004 +#define FSTV0910_P1_NOSRAM_CNRVAL1 0xf50e0003 + +/* P1_NOSRAMVAL */ +#define RSTV0910_P1_NOSRAMVAL 0xf50f +#define FSTV0910_P1_NOSRAM_CNRVAL0 0xf50f00ff + +/* P1_DMDPLHSTAT */ +#define RSTV0910_P1_DMDPLHSTAT 0xf520 +#define FSTV0910_P1_PLH_STATISTIC 0xf52000ff + +/* P1_LOCKTIME3 */ +#define RSTV0910_P1_LOCKTIME3 0xf522 +#define FSTV0910_P1_DEMOD_LOCKTIME3 0xf52200ff + +/* P1_LOCKTIME2 */ +#define RSTV0910_P1_LOCKTIME2 0xf523 +#define FSTV0910_P1_DEMOD_LOCKTIME2 0xf52300ff + +/* P1_LOCKTIME1 */ +#define RSTV0910_P1_LOCKTIME1 0xf524 +#define FSTV0910_P1_DEMOD_LOCKTIME1 0xf52400ff + +/* P1_LOCKTIME0 */ +#define RSTV0910_P1_LOCKTIME0 0xf525 +#define FSTV0910_P1_DEMOD_LOCKTIME0 0xf52500ff + +/* P1_VITSCALE */ +#define RSTV0910_P1_VITSCALE 0xf532 +#define FSTV0910_P1_NVTH_NOSRANGE 0xf5320080 +#define FSTV0910_P1_VERROR_MAXMODE 0xf5320040 +#define FSTV0910_P1_NSLOWSN_LOCKED 0xf5320008 +#define FSTV0910_P1_DIS_RSFLOCK 0xf5320002 + +/* P1_FECM */ +#define RSTV0910_P1_FECM 0xf533 +#define FSTV0910_P1_DSS_DVB 0xf5330080 +#define FSTV0910_P1_DSS_SRCH 0xf5330010 +#define FSTV0910_P1_SYNCVIT 0xf5330002 +#define FSTV0910_P1_IQINV 0xf5330001 + +/* P1_VTH12 */ +#define RSTV0910_P1_VTH12 0xf534 +#define FSTV0910_P1_VTH12 0xf53400ff + +/* P1_VTH23 */ +#define RSTV0910_P1_VTH23 0xf535 +#define FSTV0910_P1_VTH23 0xf53500ff + +/* P1_VTH34 */ +#define RSTV0910_P1_VTH34 0xf536 +#define FSTV0910_P1_VTH34 0xf53600ff + +/* P1_VTH56 */ +#define RSTV0910_P1_VTH56 0xf537 +#define FSTV0910_P1_VTH56 0xf53700ff + +/* P1_VTH67 */ +#define RSTV0910_P1_VTH67 0xf538 +#define FSTV0910_P1_VTH67 0xf53800ff + +/* P1_VTH78 */ +#define RSTV0910_P1_VTH78 0xf539 +#define FSTV0910_P1_VTH78 0xf53900ff + +/* P1_VITCURPUN */ +#define RSTV0910_P1_VITCURPUN 0xf53a +#define FSTV0910_P1_VIT_CURPUN 0xf53a001f + +/* P1_VERROR */ +#define RSTV0910_P1_VERROR 0xf53b +#define FSTV0910_P1_REGERR_VIT 0xf53b00ff + +/* P1_PRVIT */ +#define RSTV0910_P1_PRVIT 0xf53c +#define FSTV0910_P1_DIS_VTHLOCK 0xf53c0040 +#define FSTV0910_P1_E7_8VIT 0xf53c0020 +#define FSTV0910_P1_E6_7VIT 0xf53c0010 +#define FSTV0910_P1_E5_6VIT 0xf53c0008 +#define FSTV0910_P1_E3_4VIT 0xf53c0004 +#define FSTV0910_P1_E2_3VIT 0xf53c0002 +#define FSTV0910_P1_E1_2VIT 0xf53c0001 + +/* P1_VAVSRVIT */ +#define RSTV0910_P1_VAVSRVIT 0xf53d +#define FSTV0910_P1_AMVIT 0xf53d0080 +#define FSTV0910_P1_FROZENVIT 0xf53d0040 +#define FSTV0910_P1_SNVIT 0xf53d0030 +#define FSTV0910_P1_TOVVIT 0xf53d000c +#define FSTV0910_P1_HYPVIT 0xf53d0003 + +/* P1_VSTATUSVIT */ +#define RSTV0910_P1_VSTATUSVIT 0xf53e +#define FSTV0910_P1_PRFVIT 0xf53e0010 +#define FSTV0910_P1_LOCKEDVIT 0xf53e0008 + +/* P1_VTHINUSE */ +#define RSTV0910_P1_VTHINUSE 0xf53f +#define FSTV0910_P1_VIT_INUSE 0xf53f00ff + +/* P1_KDIV12 */ +#define RSTV0910_P1_KDIV12 0xf540 +#define FSTV0910_P1_K_DIVIDER_12 0xf540007f + +/* P1_KDIV23 */ +#define RSTV0910_P1_KDIV23 0xf541 +#define FSTV0910_P1_K_DIVIDER_23 0xf541007f + +/* P1_KDIV34 */ +#define RSTV0910_P1_KDIV34 0xf542 +#define FSTV0910_P1_K_DIVIDER_34 0xf542007f + +/* P1_KDIV56 */ +#define RSTV0910_P1_KDIV56 0xf543 +#define FSTV0910_P1_K_DIVIDER_56 0xf543007f + +/* P1_KDIV67 */ +#define RSTV0910_P1_KDIV67 0xf544 +#define FSTV0910_P1_K_DIVIDER_67 0xf544007f + +/* P1_KDIV78 */ +#define RSTV0910_P1_KDIV78 0xf545 +#define FSTV0910_P1_K_DIVIDER_78 0xf545007f + +/* P1_TSPIDFLT1 */ +#define RSTV0910_P1_TSPIDFLT1 0xf546 +#define FSTV0910_P1_PIDFLT_ADDR 0xf54600ff + +/* P1_TSPIDFLT0 */ +#define RSTV0910_P1_TSPIDFLT0 0xf547 +#define FSTV0910_P1_PIDFLT_DATA 0xf54700ff + +/* P1_PDELCTRL0 */ +#define RSTV0910_P1_PDELCTRL0 0xf54f +#define FSTV0910_P1_ISIOBS_MODE 0xf54f0030 + +/* P1_PDELCTRL1 */ +#define RSTV0910_P1_PDELCTRL1 0xf550 +#define FSTV0910_P1_INV_MISMASK 0xf5500080 +#define FSTV0910_P1_FILTER_EN 0xf5500020 +#define FSTV0910_P1_HYSTEN 0xf5500008 +#define FSTV0910_P1_HYSTSWRST 0xf5500004 +#define FSTV0910_P1_EN_MIS00 0xf5500002 +#define FSTV0910_P1_ALGOSWRST 0xf5500001 + +/* P1_PDELCTRL2 */ +#define RSTV0910_P1_PDELCTRL2 0xf551 +#define FSTV0910_P1_FORCE_CONTINUOUS 0xf5510080 +#define FSTV0910_P1_RESET_UPKO_COUNT 0xf5510040 +#define FSTV0910_P1_USER_PKTDELIN_NB 0xf5510020 +#define FSTV0910_P1_FRAME_MODE 0xf5510002 + +/* P1_HYSTTHRESH */ +#define RSTV0910_P1_HYSTTHRESH 0xf554 +#define FSTV0910_P1_DELIN_LOCKTHRES 0xf55400f0 +#define FSTV0910_P1_DELIN_UNLOCKTHRES 0xf554000f + +/* P1_UPLCCST0 */ +#define RSTV0910_P1_UPLCCST0 0xf558 +#define FSTV0910_P1_UPL_CST0 0xf55800f8 +#define FSTV0910_P1_UPL_MODE 0xf5580007 + +/* P1_ISIENTRY */ +#define RSTV0910_P1_ISIENTRY 0xf55e +#define FSTV0910_P1_ISI_ENTRY 0xf55e00ff + +/* P1_ISIBITENA */ +#define RSTV0910_P1_ISIBITENA 0xf55f +#define FSTV0910_P1_ISI_BIT_EN 0xf55f00ff + +/* P1_MATSTR1 */ +#define RSTV0910_P1_MATSTR1 0xf560 +#define FSTV0910_P1_MATYPE_CURRENT1 0xf56000ff + +/* P1_MATSTR0 */ +#define RSTV0910_P1_MATSTR0 0xf561 +#define FSTV0910_P1_MATYPE_CURRENT0 0xf56100ff + +/* P1_UPLSTR1 */ +#define RSTV0910_P1_UPLSTR1 0xf562 +#define FSTV0910_P1_UPL_CURRENT1 0xf56200ff + +/* P1_UPLSTR0 */ +#define RSTV0910_P1_UPLSTR0 0xf563 +#define FSTV0910_P1_UPL_CURRENT0 0xf56300ff + +/* P1_DFLSTR1 */ +#define RSTV0910_P1_DFLSTR1 0xf564 +#define FSTV0910_P1_DFL_CURRENT1 0xf56400ff + +/* P1_DFLSTR0 */ +#define RSTV0910_P1_DFLSTR0 0xf565 +#define FSTV0910_P1_DFL_CURRENT0 0xf56500ff + +/* P1_SYNCSTR */ +#define RSTV0910_P1_SYNCSTR 0xf566 +#define FSTV0910_P1_SYNC_CURRENT 0xf56600ff + +/* P1_SYNCDSTR1 */ +#define RSTV0910_P1_SYNCDSTR1 0xf567 +#define FSTV0910_P1_SYNCD_CURRENT1 0xf56700ff + +/* P1_SYNCDSTR0 */ +#define RSTV0910_P1_SYNCDSTR0 0xf568 +#define FSTV0910_P1_SYNCD_CURRENT0 0xf56800ff + +/* P1_PDELSTATUS1 */ +#define RSTV0910_P1_PDELSTATUS1 0xf569 +#define FSTV0910_P1_PKTDELIN_DELOCK 0xf5690080 +#define FSTV0910_P1_SYNCDUPDFL_BADDFL 0xf5690040 +#define FSTV0910_P1_UNACCEPTED_STREAM 0xf5690010 +#define FSTV0910_P1_BCH_ERROR_FLAG 0xf5690008 +#define FSTV0910_P1_PKTDELIN_LOCK 0xf5690002 +#define FSTV0910_P1_FIRST_LOCK 0xf5690001 + +/* P1_PDELSTATUS2 */ +#define RSTV0910_P1_PDELSTATUS2 0xf56a +#define FSTV0910_P1_FRAME_MODCOD 0xf56a007c +#define FSTV0910_P1_FRAME_TYPE 0xf56a0003 + +/* P1_BBFCRCKO1 */ +#define RSTV0910_P1_BBFCRCKO1 0xf56b +#define FSTV0910_P1_BBHCRC_KOCNT1 0xf56b00ff + +/* P1_BBFCRCKO0 */ +#define RSTV0910_P1_BBFCRCKO0 0xf56c +#define FSTV0910_P1_BBHCRC_KOCNT0 0xf56c00ff + +/* P1_UPCRCKO1 */ +#define RSTV0910_P1_UPCRCKO1 0xf56d +#define FSTV0910_P1_PKTCRC_KOCNT1 0xf56d00ff + +/* P1_UPCRCKO0 */ +#define RSTV0910_P1_UPCRCKO0 0xf56e +#define FSTV0910_P1_PKTCRC_KOCNT0 0xf56e00ff + +/* P1_PDELCTRL3 */ +#define RSTV0910_P1_PDELCTRL3 0xf56f +#define FSTV0910_P1_NOFIFO_BCHERR 0xf56f0020 +#define FSTV0910_P1_PKTDELIN_DELACMERR 0xf56f0010 + +/* P1_TSSTATEM */ +#define RSTV0910_P1_TSSTATEM 0xf570 +#define FSTV0910_P1_TSDIL_ON 0xf5700080 +#define FSTV0910_P1_TSRS_ON 0xf5700020 +#define FSTV0910_P1_TSDESCRAMB_ON 0xf5700010 +#define FSTV0910_P1_TSFRAME_MODE 0xf5700008 +#define FSTV0910_P1_TS_DISABLE 0xf5700004 +#define FSTV0910_P1_TSACM_MODE 0xf5700002 +#define FSTV0910_P1_TSOUT_NOSYNC 0xf5700001 + +/* P1_TSSTATEL */ +#define RSTV0910_P1_TSSTATEL 0xf571 +#define FSTV0910_P1_TSNOSYNCBYTE 0xf5710080 +#define FSTV0910_P1_TSPARITY_ON 0xf5710040 +#define FSTV0910_P1_TSISSYI_ON 0xf5710008 +#define FSTV0910_P1_TSNPD_ON 0xf5710004 +#define FSTV0910_P1_TSCRC8_ON 0xf5710002 +#define FSTV0910_P1_TSDSS_PACKET 0xf5710001 + +/* P1_TSCFGH */ +#define RSTV0910_P1_TSCFGH 0xf572 +#define FSTV0910_P1_TSFIFO_DVBCI 0xf5720080 +#define FSTV0910_P1_TSFIFO_SERIAL 0xf5720040 +#define FSTV0910_P1_TSFIFO_TEIUPDATE 0xf5720020 +#define FSTV0910_P1_TSFIFO_DUTY50 0xf5720010 +#define FSTV0910_P1_TSFIFO_HSGNLOUT 0xf5720008 +#define FSTV0910_P1_TSFIFO_ERRMODE 0xf5720006 +#define FSTV0910_P1_RST_HWARE 0xf5720001 + +/* P1_TSCFGM */ +#define RSTV0910_P1_TSCFGM 0xf573 +#define FSTV0910_P1_TSFIFO_MANSPEED 0xf57300c0 +#define FSTV0910_P1_TSFIFO_PERMDATA 0xf5730020 +#define FSTV0910_P1_TSFIFO_NONEWSGNL 0xf5730010 +#define FSTV0910_P1_TSFIFO_INVDATA 0xf5730001 + +/* P1_TSCFGL */ +#define RSTV0910_P1_TSCFGL 0xf574 +#define FSTV0910_P1_TSFIFO_BCLKDEL1CK 0xf57400c0 +#define FSTV0910_P1_BCHERROR_MODE 0xf5740030 +#define FSTV0910_P1_TSFIFO_NSGNL2DATA 0xf5740008 +#define FSTV0910_P1_TSFIFO_EMBINDVB 0xf5740004 +#define FSTV0910_P1_TSFIFO_BITSPEED 0xf5740003 + +/* P1_TSSYNC */ +#define RSTV0910_P1_TSSYNC 0xf575 +#define FSTV0910_P1_TSFIFO_SYNCMODE 0xf5750018 + +/* P1_TSINSDELH */ +#define RSTV0910_P1_TSINSDELH 0xf576 +#define FSTV0910_P1_TSDEL_SYNCBYTE 0xf5760080 +#define FSTV0910_P1_TSDEL_XXHEADER 0xf5760040 +#define FSTV0910_P1_TSDEL_DATAFIELD 0xf5760010 +#define FSTV0910_P1_TSINSDEL_RSPARITY 0xf5760002 +#define FSTV0910_P1_TSINSDEL_CRC8 0xf5760001 + +/* P1_TSINSDELM */ +#define RSTV0910_P1_TSINSDELM 0xf577 +#define FSTV0910_P1_TSINS_EMODCOD 0xf5770010 +#define FSTV0910_P1_TSINS_TOKEN 0xf5770008 +#define FSTV0910_P1_TSINS_XXXERR 0xf5770004 +#define FSTV0910_P1_TSINS_MATYPE 0xf5770002 +#define FSTV0910_P1_TSINS_UPL 0xf5770001 + +/* P1_TSINSDELL */ +#define RSTV0910_P1_TSINSDELL 0xf578 +#define FSTV0910_P1_TSINS_DFL 0xf5780080 +#define FSTV0910_P1_TSINS_SYNCD 0xf5780040 +#define FSTV0910_P1_TSINS_BLOCLEN 0xf5780020 +#define FSTV0910_P1_TSINS_SIGPCOUNT 0xf5780010 +#define FSTV0910_P1_TSINS_FIFO 0xf5780008 +#define FSTV0910_P1_TSINS_REALPACK 0xf5780004 +#define FSTV0910_P1_TSINS_TSCONFIG 0xf5780002 +#define FSTV0910_P1_TSINS_LATENCY 0xf5780001 + +/* P1_TSDIVN */ +#define RSTV0910_P1_TSDIVN 0xf579 +#define FSTV0910_P1_TSFIFO_SPEEDMODE 0xf57900c0 +#define FSTV0910_P1_TSFIFO_RISEOK 0xf5790007 + +/* P1_TSCFG4 */ +#define RSTV0910_P1_TSCFG4 0xf57a +#define FSTV0910_P1_TSFIFO_TSSPEEDMODE 0xf57a00c0 + +/* P1_TSSPEED */ +#define RSTV0910_P1_TSSPEED 0xf580 +#define FSTV0910_P1_TSFIFO_OUTSPEED 0xf58000ff + +/* P1_TSSTATUS */ +#define RSTV0910_P1_TSSTATUS 0xf581 +#define FSTV0910_P1_TSFIFO_LINEOK 0xf5810080 +#define FSTV0910_P1_TSFIFO_ERROR 0xf5810040 +#define FSTV0910_P1_TSFIFO_NOSYNC 0xf5810010 +#define FSTV0910_P1_TSREGUL_ERROR 0xf5810004 +#define FSTV0910_P1_DIL_READY 0xf5810001 + +/* P1_TSSTATUS2 */ +#define RSTV0910_P1_TSSTATUS2 0xf582 +#define FSTV0910_P1_TSFIFO_DEMODSEL 0xf5820080 +#define FSTV0910_P1_TSFIFOSPEED_STORE 0xf5820040 +#define FSTV0910_P1_DILXX_RESET 0xf5820020 +#define FSTV0910_P1_SCRAMBDETECT 0xf5820002 + +/* P1_TSBITRATE1 */ +#define RSTV0910_P1_TSBITRATE1 0xf583 +#define FSTV0910_P1_TSFIFO_BITRATE1 0xf58300ff + +/* P1_TSBITRATE0 */ +#define RSTV0910_P1_TSBITRATE0 0xf584 +#define FSTV0910_P1_TSFIFO_BITRATE0 0xf58400ff + +/* P1_TSPACKLEN1 */ +#define RSTV0910_P1_TSPACKLEN1 0xf585 +#define FSTV0910_P1_TSFIFO_PACKCPT 0xf58500e0 + +/* P1_TSDLY2 */ +#define RSTV0910_P1_TSDLY2 0xf589 +#define FSTV0910_P1_SOFFIFO_LATENCY2 0xf589000f + +/* P1_TSDLY1 */ +#define RSTV0910_P1_TSDLY1 0xf58a +#define FSTV0910_P1_SOFFIFO_LATENCY1 0xf58a00ff + +/* P1_TSDLY0 */ +#define RSTV0910_P1_TSDLY0 0xf58b +#define FSTV0910_P1_SOFFIFO_LATENCY0 0xf58b00ff + +/* P1_TSNPDAV */ +#define RSTV0910_P1_TSNPDAV 0xf58c +#define FSTV0910_P1_TSNPD_AVERAGE 0xf58c00ff + +/* P1_TSBUFSTAT2 */ +#define RSTV0910_P1_TSBUFSTAT2 0xf58d +#define FSTV0910_P1_TSISCR_3BYTES 0xf58d0080 +#define FSTV0910_P1_TSISCR_NEWDATA 0xf58d0040 +#define FSTV0910_P1_TSISCR_BUFSTAT2 0xf58d003f + +/* P1_TSBUFSTAT1 */ +#define RSTV0910_P1_TSBUFSTAT1 0xf58e +#define FSTV0910_P1_TSISCR_BUFSTAT1 0xf58e00ff + +/* P1_TSBUFSTAT0 */ +#define RSTV0910_P1_TSBUFSTAT0 0xf58f +#define FSTV0910_P1_TSISCR_BUFSTAT0 0xf58f00ff + +/* P1_TSDEBUGL */ +#define RSTV0910_P1_TSDEBUGL 0xf591 +#define FSTV0910_P1_TSFIFO_ERROR_EVNT 0xf5910004 +#define FSTV0910_P1_TSFIFO_OVERFLOWM 0xf5910001 + +/* P1_TSDLYSET2 */ +#define RSTV0910_P1_TSDLYSET2 0xf592 +#define FSTV0910_P1_SOFFIFO_OFFSET 0xf59200c0 +#define FSTV0910_P1_HYSTERESIS_THRESHOLD 0xf5920030 +#define FSTV0910_P1_SOFFIFO_SYMBOFFS2 0xf592000f + +/* P1_TSDLYSET1 */ +#define RSTV0910_P1_TSDLYSET1 0xf593 +#define FSTV0910_P1_SOFFIFO_SYMBOFFS1 0xf59300ff + +/* P1_TSDLYSET0 */ +#define RSTV0910_P1_TSDLYSET0 0xf594 +#define FSTV0910_P1_SOFFIFO_SYMBOFFS0 0xf59400ff + +/* P1_ERRCTRL1 */ +#define RSTV0910_P1_ERRCTRL1 0xf598 +#define FSTV0910_P1_ERR_SOURCE1 0xf59800f0 +#define FSTV0910_P1_NUM_EVENT1 0xf5980007 + +/* P1_ERRCNT12 */ +#define RSTV0910_P1_ERRCNT12 0xf599 +#define FSTV0910_P1_ERRCNT1_OLDVALUE 0xf5990080 +#define FSTV0910_P1_ERR_CNT12 0xf599007f + +/* P1_ERRCNT11 */ +#define RSTV0910_P1_ERRCNT11 0xf59a +#define FSTV0910_P1_ERR_CNT11 0xf59a00ff + +/* P1_ERRCNT10 */ +#define RSTV0910_P1_ERRCNT10 0xf59b +#define FSTV0910_P1_ERR_CNT10 0xf59b00ff + +/* P1_ERRCTRL2 */ +#define RSTV0910_P1_ERRCTRL2 0xf59c +#define FSTV0910_P1_ERR_SOURCE2 0xf59c00f0 +#define FSTV0910_P1_NUM_EVENT2 0xf59c0007 + +/* P1_ERRCNT22 */ +#define RSTV0910_P1_ERRCNT22 0xf59d +#define FSTV0910_P1_ERRCNT2_OLDVALUE 0xf59d0080 +#define FSTV0910_P1_ERR_CNT22 0xf59d007f + +/* P1_ERRCNT21 */ +#define RSTV0910_P1_ERRCNT21 0xf59e +#define FSTV0910_P1_ERR_CNT21 0xf59e00ff + +/* P1_ERRCNT20 */ +#define RSTV0910_P1_ERRCNT20 0xf59f +#define FSTV0910_P1_ERR_CNT20 0xf59f00ff + +/* P1_FECSPY */ +#define RSTV0910_P1_FECSPY 0xf5a0 +#define FSTV0910_P1_SPY_ENABLE 0xf5a00080 +#define FSTV0910_P1_NO_SYNCBYTE 0xf5a00040 +#define FSTV0910_P1_SERIAL_MODE 0xf5a00020 +#define FSTV0910_P1_UNUSUAL_PACKET 0xf5a00010 +#define FSTV0910_P1_BERMETER_DATAMODE 0xf5a0000c +#define FSTV0910_P1_BERMETER_LMODE 0xf5a00002 +#define FSTV0910_P1_BERMETER_RESET 0xf5a00001 + +/* P1_FSPYCFG */ +#define RSTV0910_P1_FSPYCFG 0xf5a1 +#define FSTV0910_P1_FECSPY_INPUT 0xf5a100c0 +#define FSTV0910_P1_RST_ON_ERROR 0xf5a10020 +#define FSTV0910_P1_ONE_SHOT 0xf5a10010 +#define FSTV0910_P1_I2C_MODE 0xf5a1000c +#define FSTV0910_P1_SPY_HYSTERESIS 0xf5a10003 + +/* P1_FSPYDATA */ +#define RSTV0910_P1_FSPYDATA 0xf5a2 +#define FSTV0910_P1_SPY_STUFFING 0xf5a20080 +#define FSTV0910_P1_SPY_CNULLPKT 0xf5a20020 +#define FSTV0910_P1_SPY_OUTDATA_MODE 0xf5a2001f + +/* P1_FSPYOUT */ +#define RSTV0910_P1_FSPYOUT 0xf5a3 +#define FSTV0910_P1_FSPY_DIRECT 0xf5a30080 +#define FSTV0910_P1_STUFF_MODE 0xf5a30007 + +/* P1_FSTATUS */ +#define RSTV0910_P1_FSTATUS 0xf5a4 +#define FSTV0910_P1_SPY_ENDSIM 0xf5a40080 +#define FSTV0910_P1_VALID_SIM 0xf5a40040 +#define FSTV0910_P1_FOUND_SIGNAL 0xf5a40020 +#define FSTV0910_P1_DSS_SYNCBYTE 0xf5a40010 +#define FSTV0910_P1_RESULT_STATE 0xf5a4000f + +/* P1_FBERCPT4 */ +#define RSTV0910_P1_FBERCPT4 0xf5a8 +#define FSTV0910_P1_FBERMETER_CPT4 0xf5a800ff + +/* P1_FBERCPT3 */ +#define RSTV0910_P1_FBERCPT3 0xf5a9 +#define FSTV0910_P1_FBERMETER_CPT3 0xf5a900ff + +/* P1_FBERCPT2 */ +#define RSTV0910_P1_FBERCPT2 0xf5aa +#define FSTV0910_P1_FBERMETER_CPT2 0xf5aa00ff + +/* P1_FBERCPT1 */ +#define RSTV0910_P1_FBERCPT1 0xf5ab +#define FSTV0910_P1_FBERMETER_CPT1 0xf5ab00ff + +/* P1_FBERCPT0 */ +#define RSTV0910_P1_FBERCPT0 0xf5ac +#define FSTV0910_P1_FBERMETER_CPT0 0xf5ac00ff + +/* P1_FBERERR2 */ +#define RSTV0910_P1_FBERERR2 0xf5ad +#define FSTV0910_P1_FBERMETER_ERR2 0xf5ad00ff + +/* P1_FBERERR1 */ +#define RSTV0910_P1_FBERERR1 0xf5ae +#define FSTV0910_P1_FBERMETER_ERR1 0xf5ae00ff + +/* P1_FBERERR0 */ +#define RSTV0910_P1_FBERERR0 0xf5af +#define FSTV0910_P1_FBERMETER_ERR0 0xf5af00ff + +/* P1_FSPYBER */ +#define RSTV0910_P1_FSPYBER 0xf5b2 +#define FSTV0910_P1_FSPYBER_SYNCBYTE 0xf5b20010 +#define FSTV0910_P1_FSPYBER_UNSYNC 0xf5b20008 +#define FSTV0910_P1_FSPYBER_CTIME 0xf5b20007 + +/* P1_SFERROR */ +#define RSTV0910_P1_SFERROR 0xf5c1 +#define FSTV0910_P1_SFEC_REGERR_VIT 0xf5c100ff + +/* P1_SFECSTATUS */ +#define RSTV0910_P1_SFECSTATUS 0xf5c3 +#define FSTV0910_P1_SFEC_ON 0xf5c30080 +#define FSTV0910_P1_SFEC_OFF 0xf5c30040 +#define FSTV0910_P1_LOCKEDSFEC 0xf5c30008 +#define FSTV0910_P1_SFEC_DELOCK 0xf5c30004 +#define FSTV0910_P1_SFEC_DEMODSEL 0xf5c30002 +#define FSTV0910_P1_SFEC_OVFON 0xf5c30001 + +/* P1_SFKDIV12 */ +#define RSTV0910_P1_SFKDIV12 0xf5c4 +#define FSTV0910_P1_SFECKDIV12_MAN 0xf5c40080 + +/* P1_SFKDIV23 */ +#define RSTV0910_P1_SFKDIV23 0xf5c5 +#define FSTV0910_P1_SFECKDIV23_MAN 0xf5c50080 + +/* P1_SFKDIV34 */ +#define RSTV0910_P1_SFKDIV34 0xf5c6 +#define FSTV0910_P1_SFECKDIV34_MAN 0xf5c60080 + +/* P1_SFKDIV56 */ +#define RSTV0910_P1_SFKDIV56 0xf5c7 +#define FSTV0910_P1_SFECKDIV56_MAN 0xf5c70080 + +/* P1_SFKDIV67 */ +#define RSTV0910_P1_SFKDIV67 0xf5c8 +#define FSTV0910_P1_SFECKDIV67_MAN 0xf5c80080 + +/* P1_SFKDIV78 */ +#define RSTV0910_P1_SFKDIV78 0xf5c9 +#define FSTV0910_P1_SFECKDIV78_MAN 0xf5c90080 + +/* P1_SFSTATUS */ +#define RSTV0910_P1_SFSTATUS 0xf5cc +#define FSTV0910_P1_SFEC_LINEOK 0xf5cc0080 +#define FSTV0910_P1_SFEC_ERROR 0xf5cc0040 +#define FSTV0910_P1_SFEC_DATA7 0xf5cc0020 +#define FSTV0910_P1_SFEC_PKTDNBRFAIL 0xf5cc0010 +#define FSTV0910_P1_TSSFEC_DEMODSEL 0xf5cc0008 +#define FSTV0910_P1_SFEC_NOSYNC 0xf5cc0004 +#define FSTV0910_P1_SFEC_UNREGULA 0xf5cc0002 +#define FSTV0910_P1_SFEC_READY 0xf5cc0001 + +/* P1_SFDLYSET2 */ +#define RSTV0910_P1_SFDLYSET2 0xf5d0 +#define FSTV0910_P1_SFEC_DISABLE 0xf5d00002 + +/* P1_SFERRCTRL */ +#define RSTV0910_P1_SFERRCTRL 0xf5d8 +#define FSTV0910_P1_SFEC_ERR_SOURCE 0xf5d800f0 +#define FSTV0910_P1_SFEC_NUM_EVENT 0xf5d80007 + +/* P1_SFERRCNT2 */ +#define RSTV0910_P1_SFERRCNT2 0xf5d9 +#define FSTV0910_P1_SFERRC_OLDVALUE 0xf5d90080 +#define FSTV0910_P1_SFEC_ERR_CNT2 0xf5d9007f + +/* P1_SFERRCNT1 */ +#define RSTV0910_P1_SFERRCNT1 0xf5da +#define FSTV0910_P1_SFEC_ERR_CNT1 0xf5da00ff + +/* P1_SFERRCNT0 */ +#define RSTV0910_P1_SFERRCNT0 0xf5db +#define FSTV0910_P1_SFEC_ERR_CNT0 0xf5db00ff + +/* RCCFG2 */ +#define RSTV0910_RCCFG2 0xf600 +#define FSTV0910_TSRCFIFO_DVBCI 0xf6000080 +#define FSTV0910_TSRCFIFO_SERIAL 0xf6000040 +#define FSTV0910_TSRCFIFO_DISABLE 0xf6000020 +#define FSTV0910_TSFIFO_2TORC 0xf6000010 +#define FSTV0910_TSRCFIFO_HSGNLOUT 0xf6000008 +#define FSTV0910_TSRCFIFO_ERRMODE 0xf6000006 + +/* RCCFG1 */ +#define RSTV0910_RCCFG1 0xf601 +#define FSTV0910_TSRCFIFO_MANSPEED 0xf60100c0 +#define FSTV0910_TSRCFIFO_PERMDATA 0xf6010020 +#define FSTV0910_TSRCFIFO_NONEWSGNL 0xf6010010 +#define FSTV0910_TSRCFIFO_INVDATA 0xf6010001 + +/* RCCFG0 */ +#define RSTV0910_RCCFG0 0xf602 +#define FSTV0910_TSRCFIFO_BCLKDEL1CK 0xf60200c0 +#define FSTV0910_TSRCFIFO_DUTY50 0xf6020010 +#define FSTV0910_TSRCFIFO_NSGNL2DATA 0xf6020008 +#define FSTV0910_TSRCFIFO_NPDSGNL 0xf6020004 + +/* RCINSDEL2 */ +#define RSTV0910_RCINSDEL2 0xf603 +#define FSTV0910_TSRCDEL_SYNCBYTE 0xf6030080 +#define FSTV0910_TSRCDEL_XXHEADER 0xf6030040 +#define FSTV0910_TSRCDEL_BBHEADER 0xf6030020 +#define FSTV0910_TSRCDEL_DATAFIELD 0xf6030010 +#define FSTV0910_TSRCINSDEL_ISCR 0xf6030008 +#define FSTV0910_TSRCINSDEL_NPD 0xf6030004 +#define FSTV0910_TSRCINSDEL_RSPARITY 0xf6030002 +#define FSTV0910_TSRCINSDEL_CRC8 0xf6030001 + +/* RCINSDEL1 */ +#define RSTV0910_RCINSDEL1 0xf604 +#define FSTV0910_TSRCINS_BBPADDING 0xf6040080 +#define FSTV0910_TSRCINS_BCHFEC 0xf6040040 +#define FSTV0910_TSRCINS_EMODCOD 0xf6040010 +#define FSTV0910_TSRCINS_TOKEN 0xf6040008 +#define FSTV0910_TSRCINS_XXXERR 0xf6040004 +#define FSTV0910_TSRCINS_MATYPE 0xf6040002 +#define FSTV0910_TSRCINS_UPL 0xf6040001 + +/* RCINSDEL0 */ +#define RSTV0910_RCINSDEL0 0xf605 +#define FSTV0910_TSRCINS_DFL 0xf6050080 +#define FSTV0910_TSRCINS_SYNCD 0xf6050040 +#define FSTV0910_TSRCINS_BLOCLEN 0xf6050020 +#define FSTV0910_TSRCINS_SIGPCOUNT 0xf6050010 +#define FSTV0910_TSRCINS_FIFO 0xf6050008 +#define FSTV0910_TSRCINS_REALPACK 0xf6050004 +#define FSTV0910_TSRCINS_TSCONFIG 0xf6050002 +#define FSTV0910_TSRCINS_LATENCY 0xf6050001 + +/* RCSTATUS */ +#define RSTV0910_RCSTATUS 0xf606 +#define FSTV0910_TSRCFIFO_LINEOK 0xf6060080 +#define FSTV0910_TSRCFIFO_ERROR 0xf6060040 +#define FSTV0910_TSRCREGUL_ERROR 0xf6060010 +#define FSTV0910_TSRCFIFO_DEMODSEL 0xf6060008 +#define FSTV0910_TSRCFIFOSPEED_STORE 0xf6060004 +#define FSTV0910_TSRCSPEED_IMPOSSIBLE 0xf6060001 + +/* RCSPEED */ +#define RSTV0910_RCSPEED 0xf607 +#define FSTV0910_TSRCFIFO_OUTSPEED 0xf60700ff + +/* TSGENERAL */ +#define RSTV0910_TSGENERAL 0xf630 +#define FSTV0910_TSFIFO_DISTS2PAR 0xf6300040 +#define FSTV0910_MUXSTREAM_OUTMODE 0xf6300008 +#define FSTV0910_TSFIFO_PERMPARAL 0xf6300006 + +/* P1_DISIRQCFG */ +#define RSTV0910_P1_DISIRQCFG 0xf700 +#define FSTV0910_P1_ENRXEND 0xf7000040 +#define FSTV0910_P1_ENRXFIFO8B 0xf7000020 +#define FSTV0910_P1_ENTRFINISH 0xf7000010 +#define FSTV0910_P1_ENTIMEOUT 0xf7000008 +#define FSTV0910_P1_ENTXEND 0xf7000004 +#define FSTV0910_P1_ENTXFIFO64B 0xf7000002 +#define FSTV0910_P1_ENGAPBURST 0xf7000001 + +/* P1_DISIRQSTAT */ +#define RSTV0910_P1_DISIRQSTAT 0xf701 +#define FSTV0910_P1_IRQRXEND 0xf7010040 +#define FSTV0910_P1_IRQRXFIFO8B 0xf7010020 +#define FSTV0910_P1_IRQTRFINISH 0xf7010010 +#define FSTV0910_P1_IRQTIMEOUT 0xf7010008 +#define FSTV0910_P1_IRQTXEND 0xf7010004 +#define FSTV0910_P1_IRQTXFIFO64B 0xf7010002 +#define FSTV0910_P1_IRQGAPBURST 0xf7010001 + +/* P1_DISTXCFG */ +#define RSTV0910_P1_DISTXCFG 0xf702 +#define FSTV0910_P1_DISTX_RESET 0xf7020080 +#define FSTV0910_P1_TIM_OFF 0xf7020040 +#define FSTV0910_P1_TIM_CMD 0xf7020030 +#define FSTV0910_P1_ENVELOP 0xf7020008 +#define FSTV0910_P1_DIS_PRECHARGE 0xf7020004 +#define FSTV0910_P1_DISEQC_MODE 0xf7020003 + +/* P1_DISTXSTATUS */ +#define RSTV0910_P1_DISTXSTATUS 0xf703 +#define FSTV0910_P1_TX_FIFO_FULL 0xf7030040 +#define FSTV0910_P1_TX_IDLE 0xf7030020 +#define FSTV0910_P1_GAP_BURST 0xf7030010 +#define FSTV0910_P1_TX_FIFO64B 0xf7030008 +#define FSTV0910_P1_TX_END 0xf7030004 +#define FSTV0910_P1_TR_TIMEOUT 0xf7030002 +#define FSTV0910_P1_TR_FINISH 0xf7030001 + +/* P1_DISTXBYTES */ +#define RSTV0910_P1_DISTXBYTES 0xf704 +#define FSTV0910_P1_TXFIFO_BYTES 0xf70400ff + +/* P1_DISTXFIFO */ +#define RSTV0910_P1_DISTXFIFO 0xf705 +#define FSTV0910_P1_DISEQC_TX_FIFO 0xf70500ff + +/* P1_DISTXF22 */ +#define RSTV0910_P1_DISTXF22 0xf706 +#define FSTV0910_P1_F22TX 0xf70600ff + +/* P1_DISTIMEOCFG */ +#define RSTV0910_P1_DISTIMEOCFG 0xf708 +#define FSTV0910_P1_RXCHOICE 0xf7080006 +#define FSTV0910_P1_TIMEOUT_OFF 0xf7080001 + +/* P1_DISTIMEOUT */ +#define RSTV0910_P1_DISTIMEOUT 0xf709 +#define FSTV0910_P1_TIMEOUT_COUNT 0xf70900ff + +/* P1_DISRXCFG */ +#define RSTV0910_P1_DISRXCFG 0xf70a +#define FSTV0910_P1_DISRX_RESET 0xf70a0080 +#define FSTV0910_P1_EXTENVELOP 0xf70a0040 +#define FSTV0910_P1_PINSELECT 0xf70a0038 +#define FSTV0910_P1_IGNORE_SHORT22K 0xf70a0004 +#define FSTV0910_P1_SIGNED_RXIN 0xf70a0002 +#define FSTV0910_P1_DISRX_ON 0xf70a0001 + +/* P1_DISRXSTAT1 */ +#define RSTV0910_P1_DISRXSTAT1 0xf70b +#define FSTV0910_P1_RXEND 0xf70b0080 +#define FSTV0910_P1_RXACTIVE 0xf70b0040 +#define FSTV0910_P1_RXDETECT 0xf70b0020 +#define FSTV0910_P1_CONTTONE 0xf70b0010 +#define FSTV0910_P1_8BFIFOREADY 0xf70b0008 +#define FSTV0910_P1_FIFOEMPTY 0xf70b0004 + +/* P1_DISRXSTAT0 */ +#define RSTV0910_P1_DISRXSTAT0 0xf70c +#define FSTV0910_P1_RXFAIL 0xf70c0080 +#define FSTV0910_P1_FIFOPFAIL 0xf70c0040 +#define FSTV0910_P1_RXNONBYTE 0xf70c0020 +#define FSTV0910_P1_FIFOOVF 0xf70c0010 +#define FSTV0910_P1_SHORT22K 0xf70c0008 +#define FSTV0910_P1_RXMSGLOST 0xf70c0004 + +/* P1_DISRXBYTES */ +#define RSTV0910_P1_DISRXBYTES 0xf70d +#define FSTV0910_P1_RXFIFO_BYTES 0xf70d001f + +/* P1_DISRXPARITY1 */ +#define RSTV0910_P1_DISRXPARITY1 0xf70e +#define FSTV0910_P1_DISRX_PARITY1 0xf70e00ff + +/* P1_DISRXPARITY0 */ +#define RSTV0910_P1_DISRXPARITY0 0xf70f +#define FSTV0910_P1_DISRX_PARITY0 0xf70f00ff + +/* P1_DISRXFIFO */ +#define RSTV0910_P1_DISRXFIFO 0xf710 +#define FSTV0910_P1_DISEQC_RX_FIFO 0xf71000ff + +/* P1_DISRXDC1 */ +#define RSTV0910_P1_DISRXDC1 0xf711 +#define FSTV0910_P1_DC_VALUE1 0xf7110103 + +/* P1_DISRXDC0 */ +#define RSTV0910_P1_DISRXDC0 0xf712 +#define FSTV0910_P1_DC_VALUE0 0xf71200ff + +/* P1_DISRXF221 */ +#define RSTV0910_P1_DISRXF221 0xf714 +#define FSTV0910_P1_F22RX1 0xf714000f + +/* P1_DISRXF220 */ +#define RSTV0910_P1_DISRXF220 0xf715 +#define FSTV0910_P1_F22RX0 0xf71500ff + +/* P1_DISRXF100 */ +#define RSTV0910_P1_DISRXF100 0xf716 +#define FSTV0910_P1_F100RX 0xf71600ff + +/* P1_DISRXSHORT22K */ +#define RSTV0910_P1_DISRXSHORT22K 0xf71c +#define FSTV0910_P1_SHORT22K_LENGTH 0xf71c001f + +/* P1_ACRPRESC */ +#define RSTV0910_P1_ACRPRESC 0xf71e +#define FSTV0910_P1_ACR_PRESC 0xf71e0007 + +/* P1_ACRDIV */ +#define RSTV0910_P1_ACRDIV 0xf71f +#define FSTV0910_P1_ACR_DIV 0xf71f00ff + +/* P2_DISIRQCFG */ +#define RSTV0910_P2_DISIRQCFG 0xf740 +#define FSTV0910_P2_ENRXEND 0xf7400040 +#define FSTV0910_P2_ENRXFIFO8B 0xf7400020 +#define FSTV0910_P2_ENTRFINISH 0xf7400010 +#define FSTV0910_P2_ENTIMEOUT 0xf7400008 +#define FSTV0910_P2_ENTXEND 0xf7400004 +#define FSTV0910_P2_ENTXFIFO64B 0xf7400002 +#define FSTV0910_P2_ENGAPBURST 0xf7400001 + +/* P2_DISIRQSTAT */ +#define RSTV0910_P2_DISIRQSTAT 0xf741 +#define FSTV0910_P2_IRQRXEND 0xf7410040 +#define FSTV0910_P2_IRQRXFIFO8B 0xf7410020 +#define FSTV0910_P2_IRQTRFINISH 0xf7410010 +#define FSTV0910_P2_IRQTIMEOUT 0xf7410008 +#define FSTV0910_P2_IRQTXEND 0xf7410004 +#define FSTV0910_P2_IRQTXFIFO64B 0xf7410002 +#define FSTV0910_P2_IRQGAPBURST 0xf7410001 + +/* P2_DISTXCFG */ +#define RSTV0910_P2_DISTXCFG 0xf742 +#define FSTV0910_P2_DISTX_RESET 0xf7420080 +#define FSTV0910_P2_TIM_OFF 0xf7420040 +#define FSTV0910_P2_TIM_CMD 0xf7420030 +#define FSTV0910_P2_ENVELOP 0xf7420008 +#define FSTV0910_P2_DIS_PRECHARGE 0xf7420004 +#define FSTV0910_P2_DISEQC_MODE 0xf7420003 + +/* P2_DISTXSTATUS */ +#define RSTV0910_P2_DISTXSTATUS 0xf743 +#define FSTV0910_P2_TX_FIFO_FULL 0xf7430040 +#define FSTV0910_P2_TX_IDLE 0xf7430020 +#define FSTV0910_P2_GAP_BURST 0xf7430010 +#define FSTV0910_P2_TX_FIFO64B 0xf7430008 +#define FSTV0910_P2_TX_END 0xf7430004 +#define FSTV0910_P2_TR_TIMEOUT 0xf7430002 +#define FSTV0910_P2_TR_FINISH 0xf7430001 + +/* P2_DISTXBYTES */ +#define RSTV0910_P2_DISTXBYTES 0xf744 +#define FSTV0910_P2_TXFIFO_BYTES 0xf74400ff + +/* P2_DISTXFIFO */ +#define RSTV0910_P2_DISTXFIFO 0xf745 +#define FSTV0910_P2_DISEQC_TX_FIFO 0xf74500ff + +/* P2_DISTXF22 */ +#define RSTV0910_P2_DISTXF22 0xf746 +#define FSTV0910_P2_F22TX 0xf74600ff + +/* P2_DISTIMEOCFG */ +#define RSTV0910_P2_DISTIMEOCFG 0xf748 +#define FSTV0910_P2_RXCHOICE 0xf7480006 +#define FSTV0910_P2_TIMEOUT_OFF 0xf7480001 + +/* P2_DISTIMEOUT */ +#define RSTV0910_P2_DISTIMEOUT 0xf749 +#define FSTV0910_P2_TIMEOUT_COUNT 0xf74900ff + +/* P2_DISRXCFG */ +#define RSTV0910_P2_DISRXCFG 0xf74a +#define FSTV0910_P2_DISRX_RESET 0xf74a0080 +#define FSTV0910_P2_EXTENVELOP 0xf74a0040 +#define FSTV0910_P2_PINSELECT 0xf74a0038 +#define FSTV0910_P2_IGNORE_SHORT22K 0xf74a0004 +#define FSTV0910_P2_SIGNED_RXIN 0xf74a0002 +#define FSTV0910_P2_DISRX_ON 0xf74a0001 + +/* P2_DISRXSTAT1 */ +#define RSTV0910_P2_DISRXSTAT1 0xf74b +#define FSTV0910_P2_RXEND 0xf74b0080 +#define FSTV0910_P2_RXACTIVE 0xf74b0040 +#define FSTV0910_P2_RXDETECT 0xf74b0020 +#define FSTV0910_P2_CONTTONE 0xf74b0010 +#define FSTV0910_P2_8BFIFOREADY 0xf74b0008 +#define FSTV0910_P2_FIFOEMPTY 0xf74b0004 + +/* P2_DISRXSTAT0 */ +#define RSTV0910_P2_DISRXSTAT0 0xf74c +#define FSTV0910_P2_RXFAIL 0xf74c0080 +#define FSTV0910_P2_FIFOPFAIL 0xf74c0040 +#define FSTV0910_P2_RXNONBYTE 0xf74c0020 +#define FSTV0910_P2_FIFOOVF 0xf74c0010 +#define FSTV0910_P2_SHORT22K 0xf74c0008 +#define FSTV0910_P2_RXMSGLOST 0xf74c0004 + +/* P2_DISRXBYTES */ +#define RSTV0910_P2_DISRXBYTES 0xf74d +#define FSTV0910_P2_RXFIFO_BYTES 0xf74d001f + +/* P2_DISRXPARITY1 */ +#define RSTV0910_P2_DISRXPARITY1 0xf74e +#define FSTV0910_P2_DISRX_PARITY1 0xf74e00ff + +/* P2_DISRXPARITY0 */ +#define RSTV0910_P2_DISRXPARITY0 0xf74f +#define FSTV0910_P2_DISRX_PARITY0 0xf74f00ff + +/* P2_DISRXFIFO */ +#define RSTV0910_P2_DISRXFIFO 0xf750 +#define FSTV0910_P2_DISEQC_RX_FIFO 0xf75000ff + +/* P2_DISRXDC1 */ +#define RSTV0910_P2_DISRXDC1 0xf751 +#define FSTV0910_P2_DC_VALUE1 0xf7510103 + +/* P2_DISRXDC0 */ +#define RSTV0910_P2_DISRXDC0 0xf752 +#define FSTV0910_P2_DC_VALUE0 0xf75200ff + +/* P2_DISRXF221 */ +#define RSTV0910_P2_DISRXF221 0xf754 +#define FSTV0910_P2_F22RX1 0xf754000f + +/* P2_DISRXF220 */ +#define RSTV0910_P2_DISRXF220 0xf755 +#define FSTV0910_P2_F22RX0 0xf75500ff + +/* P2_DISRXF100 */ +#define RSTV0910_P2_DISRXF100 0xf756 +#define FSTV0910_P2_F100RX 0xf75600ff + +/* P2_DISRXSHORT22K */ +#define RSTV0910_P2_DISRXSHORT22K 0xf75c +#define FSTV0910_P2_SHORT22K_LENGTH 0xf75c001f + +/* P2_ACRPRESC */ +#define RSTV0910_P2_ACRPRESC 0xf75e +#define FSTV0910_P2_ACR_PRESC 0xf75e0007 + +/* P2_ACRDIV */ +#define RSTV0910_P2_ACRDIV 0xf75f +#define FSTV0910_P2_ACR_DIV 0xf75f00ff + +/* P1_NBITER_NF1 */ +#define RSTV0910_P1_NBITER_NF1 0xfa00 +#define FSTV0910_P1_NBITER_NF_QPSK_1_4 0xfa0000ff + +/* P1_NBITER_NF2 */ +#define RSTV0910_P1_NBITER_NF2 0xfa01 +#define FSTV0910_P1_NBITER_NF_QPSK_1_3 0xfa0100ff + +/* P1_NBITER_NF3 */ +#define RSTV0910_P1_NBITER_NF3 0xfa02 +#define FSTV0910_P1_NBITER_NF_QPSK_2_5 0xfa0200ff + +/* P1_NBITER_NF4 */ +#define RSTV0910_P1_NBITER_NF4 0xfa03 +#define FSTV0910_P1_NBITER_NF_QPSK_1_2 0xfa0300ff + +/* P1_NBITER_NF5 */ +#define RSTV0910_P1_NBITER_NF5 0xfa04 +#define FSTV0910_P1_NBITER_NF_QPSK_3_5 0xfa0400ff + +/* P1_NBITER_NF6 */ +#define RSTV0910_P1_NBITER_NF6 0xfa05 +#define FSTV0910_P1_NBITER_NF_QPSK_2_3 0xfa0500ff + +/* P1_NBITER_NF7 */ +#define RSTV0910_P1_NBITER_NF7 0xfa06 +#define FSTV0910_P1_NBITER_NF_QPSK_3_4 0xfa0600ff + +/* P1_NBITER_NF8 */ +#define RSTV0910_P1_NBITER_NF8 0xfa07 +#define FSTV0910_P1_NBITER_NF_QPSK_4_5 0xfa0700ff + +/* P1_NBITER_NF9 */ +#define RSTV0910_P1_NBITER_NF9 0xfa08 +#define FSTV0910_P1_NBITER_NF_QPSK_5_6 0xfa0800ff + +/* P1_NBITER_NF10 */ +#define RSTV0910_P1_NBITER_NF10 0xfa09 +#define FSTV0910_P1_NBITER_NF_QPSK_8_9 0xfa0900ff + +/* P1_NBITER_NF11 */ +#define RSTV0910_P1_NBITER_NF11 0xfa0a +#define FSTV0910_P1_NBITER_NF_QPSK_9_10 0xfa0a00ff + +/* P1_NBITER_NF12 */ +#define RSTV0910_P1_NBITER_NF12 0xfa0b +#define FSTV0910_P1_NBITER_NF_8PSK_3_5 0xfa0b00ff + +/* P1_NBITER_NF13 */ +#define RSTV0910_P1_NBITER_NF13 0xfa0c +#define FSTV0910_P1_NBITER_NF_8PSK_2_3 0xfa0c00ff + +/* P1_NBITER_NF14 */ +#define RSTV0910_P1_NBITER_NF14 0xfa0d +#define FSTV0910_P1_NBITER_NF_8PSK_3_4 0xfa0d00ff + +/* P1_NBITER_NF15 */ +#define RSTV0910_P1_NBITER_NF15 0xfa0e +#define FSTV0910_P1_NBITER_NF_8PSK_5_6 0xfa0e00ff + +/* P1_NBITER_NF16 */ +#define RSTV0910_P1_NBITER_NF16 0xfa0f +#define FSTV0910_P1_NBITER_NF_8PSK_8_9 0xfa0f00ff + +/* P1_NBITER_NF17 */ +#define RSTV0910_P1_NBITER_NF17 0xfa10 +#define FSTV0910_P1_NBITER_NF_8PSK_9_10 0xfa1000ff + +/* P1_NBITER_NF18 */ +#define RSTV0910_P1_NBITER_NF18 0xfa11 +#define FSTV0910_P1_NBITER_NF_16APSK_2_3 0xfa1100ff + +/* P1_NBITER_NF19 */ +#define RSTV0910_P1_NBITER_NF19 0xfa12 +#define FSTV0910_P1_NBITER_NF_16APSK_3_4 0xfa1200ff + +/* P1_NBITER_NF20 */ +#define RSTV0910_P1_NBITER_NF20 0xfa13 +#define FSTV0910_P1_NBITER_NF_16APSK_4_5 0xfa1300ff + +/* P1_NBITER_NF21 */ +#define RSTV0910_P1_NBITER_NF21 0xfa14 +#define FSTV0910_P1_NBITER_NF_16APSK_5_6 0xfa1400ff + +/* P1_NBITER_NF22 */ +#define RSTV0910_P1_NBITER_NF22 0xfa15 +#define FSTV0910_P1_NBITER_NF_16APSK_8_9 0xfa1500ff + +/* P1_NBITER_NF23 */ +#define RSTV0910_P1_NBITER_NF23 0xfa16 +#define FSTV0910_P1_NBITER_NF_16APSK_9_10 0xfa1600ff + +/* P1_NBITER_NF24 */ +#define RSTV0910_P1_NBITER_NF24 0xfa17 +#define FSTV0910_P1_NBITER_NF_32APSK_3_4 0xfa1700ff + +/* P1_NBITER_NF25 */ +#define RSTV0910_P1_NBITER_NF25 0xfa18 +#define FSTV0910_P1_NBITER_NF_32APSK_4_5 0xfa1800ff + +/* P1_NBITER_NF26 */ +#define RSTV0910_P1_NBITER_NF26 0xfa19 +#define FSTV0910_P1_NBITER_NF_32APSK_5_6 0xfa1900ff + +/* P1_NBITER_NF27 */ +#define RSTV0910_P1_NBITER_NF27 0xfa1a +#define FSTV0910_P1_NBITER_NF_32APSK_8_9 0xfa1a00ff + +/* P1_NBITER_NF28 */ +#define RSTV0910_P1_NBITER_NF28 0xfa1b +#define FSTV0910_P1_NBITER_NF_32APSK_9_10 0xfa1b00ff + +/* P1_NBITER_SF1 */ +#define RSTV0910_P1_NBITER_SF1 0xfa1c +#define FSTV0910_P1_NBITER_SF_QPSK_1_4 0xfa1c00ff + +/* P1_NBITER_SF2 */ +#define RSTV0910_P1_NBITER_SF2 0xfa1d +#define FSTV0910_P1_NBITER_SF_QPSK_1_3 0xfa1d00ff + +/* P1_NBITER_SF3 */ +#define RSTV0910_P1_NBITER_SF3 0xfa1e +#define FSTV0910_P1_NBITER_SF_QPSK_2_5 0xfa1e00ff + +/* P1_NBITER_SF4 */ +#define RSTV0910_P1_NBITER_SF4 0xfa1f +#define FSTV0910_P1_NBITER_SF_QPSK_1_2 0xfa1f00ff + +/* P1_NBITER_SF5 */ +#define RSTV0910_P1_NBITER_SF5 0xfa20 +#define FSTV0910_P1_NBITER_SF_QPSK_3_5 0xfa2000ff + +/* P1_NBITER_SF6 */ +#define RSTV0910_P1_NBITER_SF6 0xfa21 +#define FSTV0910_P1_NBITER_SF_QPSK_2_3 0xfa2100ff + +/* P1_NBITER_SF7 */ +#define RSTV0910_P1_NBITER_SF7 0xfa22 +#define FSTV0910_P1_NBITER_SF_QPSK_3_4 0xfa2200ff + +/* P1_NBITER_SF8 */ +#define RSTV0910_P1_NBITER_SF8 0xfa23 +#define FSTV0910_P1_NBITER_SF_QPSK_4_5 0xfa2300ff + +/* P1_NBITER_SF9 */ +#define RSTV0910_P1_NBITER_SF9 0xfa24 +#define FSTV0910_P1_NBITER_SF_QPSK_5_6 0xfa2400ff + +/* P1_NBITER_SF10 */ +#define RSTV0910_P1_NBITER_SF10 0xfa25 +#define FSTV0910_P1_NBITER_SF_QPSK_8_9 0xfa2500ff + +/* P1_NBITER_SF12 */ +#define RSTV0910_P1_NBITER_SF12 0xfa26 +#define FSTV0910_P1_NBITER_SF_8PSK_3_5 0xfa2600ff + +/* P1_NBITER_SF13 */ +#define RSTV0910_P1_NBITER_SF13 0xfa27 +#define FSTV0910_P1_NBITER_SF_8PSK_2_3 0xfa2700ff + +/* P1_NBITER_SF14 */ +#define RSTV0910_P1_NBITER_SF14 0xfa28 +#define FSTV0910_P1_NBITER_SF_8PSK_3_4 0xfa2800ff + +/* P1_NBITER_SF15 */ +#define RSTV0910_P1_NBITER_SF15 0xfa29 +#define FSTV0910_P1_NBITER_SF_8PSK_5_6 0xfa2900ff + +/* P1_NBITER_SF16 */ +#define RSTV0910_P1_NBITER_SF16 0xfa2a +#define FSTV0910_P1_NBITER_SF_8PSK_8_9 0xfa2a00ff + +/* P1_NBITER_SF18 */ +#define RSTV0910_P1_NBITER_SF18 0xfa2b +#define FSTV0910_P1_NBITER_SF_16APSK_2_3 0xfa2b00ff + +/* P1_NBITER_SF19 */ +#define RSTV0910_P1_NBITER_SF19 0xfa2c +#define FSTV0910_P1_NBITER_SF_16APSK_3_4 0xfa2c00ff + +/* P1_NBITER_SF20 */ +#define RSTV0910_P1_NBITER_SF20 0xfa2d +#define FSTV0910_P1_NBITER_SF_16APSK_4_5 0xfa2d00ff + +/* P1_NBITER_SF21 */ +#define RSTV0910_P1_NBITER_SF21 0xfa2e +#define FSTV0910_P1_NBITER_SF_16APSK_5_6 0xfa2e00ff + +/* P1_NBITER_SF22 */ +#define RSTV0910_P1_NBITER_SF22 0xfa2f +#define FSTV0910_P1_NBITER_SF_16APSK_8_9 0xfa2f00ff + +/* P1_NBITER_SF24 */ +#define RSTV0910_P1_NBITER_SF24 0xfa30 +#define FSTV0910_P1_NBITER_SF_32APSK_3_4 0xfa3000ff + +/* P1_NBITER_SF25 */ +#define RSTV0910_P1_NBITER_SF25 0xfa31 +#define FSTV0910_P1_NBITER_SF_32APSK_4_5 0xfa3100ff + +/* P1_NBITER_SF26 */ +#define RSTV0910_P1_NBITER_SF26 0xfa32 +#define FSTV0910_P1_NBITER_SF_32APSK_5_6 0xfa3200ff + +/* P1_NBITER_SF27 */ +#define RSTV0910_P1_NBITER_SF27 0xfa33 +#define FSTV0910_P1_NBITER_SF_32APSK_8_9 0xfa3300ff + +/* SELSATUR6 */ +#define RSTV0910_SELSATUR6 0xfa34 +#define FSTV0910_SSAT_SF27 0xfa340008 +#define FSTV0910_SSAT_SF26 0xfa340004 +#define FSTV0910_SSAT_SF25 0xfa340002 +#define FSTV0910_SSAT_SF24 0xfa340001 + +/* SELSATUR5 */ +#define RSTV0910_SELSATUR5 0xfa35 +#define FSTV0910_SSAT_SF22 0xfa350080 +#define FSTV0910_SSAT_SF21 0xfa350040 +#define FSTV0910_SSAT_SF20 0xfa350020 +#define FSTV0910_SSAT_SF19 0xfa350010 +#define FSTV0910_SSAT_SF18 0xfa350008 +#define FSTV0910_SSAT_SF16 0xfa350004 +#define FSTV0910_SSAT_SF15 0xfa350002 +#define FSTV0910_SSAT_SF14 0xfa350001 + +/* SELSATUR4 */ +#define RSTV0910_SELSATUR4 0xfa36 +#define FSTV0910_SSAT_SF13 0xfa360080 +#define FSTV0910_SSAT_SF12 0xfa360040 +#define FSTV0910_SSAT_SF10 0xfa360020 +#define FSTV0910_SSAT_SF9 0xfa360010 +#define FSTV0910_SSAT_SF8 0xfa360008 +#define FSTV0910_SSAT_SF7 0xfa360004 +#define FSTV0910_SSAT_SF6 0xfa360002 +#define FSTV0910_SSAT_SF5 0xfa360001 + +/* SELSATUR3 */ +#define RSTV0910_SELSATUR3 0xfa37 +#define FSTV0910_SSAT_SF4 0xfa370080 +#define FSTV0910_SSAT_SF3 0xfa370040 +#define FSTV0910_SSAT_SF2 0xfa370020 +#define FSTV0910_SSAT_SF1 0xfa370010 +#define FSTV0910_SSAT_NF28 0xfa370008 +#define FSTV0910_SSAT_NF27 0xfa370004 +#define FSTV0910_SSAT_NF26 0xfa370002 +#define FSTV0910_SSAT_NF25 0xfa370001 + +/* SELSATUR2 */ +#define RSTV0910_SELSATUR2 0xfa38 +#define FSTV0910_SSAT_NF24 0xfa380080 +#define FSTV0910_SSAT_NF23 0xfa380040 +#define FSTV0910_SSAT_NF22 0xfa380020 +#define FSTV0910_SSAT_NF21 0xfa380010 +#define FSTV0910_SSAT_NF20 0xfa380008 +#define FSTV0910_SSAT_NF19 0xfa380004 +#define FSTV0910_SSAT_NF18 0xfa380002 +#define FSTV0910_SSAT_NF17 0xfa380001 + +/* SELSATUR1 */ +#define RSTV0910_SELSATUR1 0xfa39 +#define FSTV0910_SSAT_NF16 0xfa390080 +#define FSTV0910_SSAT_NF15 0xfa390040 +#define FSTV0910_SSAT_NF14 0xfa390020 +#define FSTV0910_SSAT_NF13 0xfa390010 +#define FSTV0910_SSAT_NF12 0xfa390008 +#define FSTV0910_SSAT_NF11 0xfa390004 +#define FSTV0910_SSAT_NF10 0xfa390002 +#define FSTV0910_SSAT_NF9 0xfa390001 + +/* SELSATUR0 */ +#define RSTV0910_SELSATUR0 0xfa3a +#define FSTV0910_SSAT_NF8 0xfa3a0080 +#define FSTV0910_SSAT_NF7 0xfa3a0040 +#define FSTV0910_SSAT_NF6 0xfa3a0020 +#define FSTV0910_SSAT_NF5 0xfa3a0010 +#define FSTV0910_SSAT_NF4 0xfa3a0008 +#define FSTV0910_SSAT_NF3 0xfa3a0004 +#define FSTV0910_SSAT_NF2 0xfa3a0002 +#define FSTV0910_SSAT_NF1 0xfa3a0001 + +/* GAINLLR_NF1 */ +#define RSTV0910_GAINLLR_NF1 0xfa40 +#define FSTV0910_GAINLLR_NF_QPSK_1_4 0xfa40007f + +/* GAINLLR_NF2 */ +#define RSTV0910_GAINLLR_NF2 0xfa41 +#define FSTV0910_GAINLLR_NF_QPSK_1_3 0xfa41007f + +/* GAINLLR_NF3 */ +#define RSTV0910_GAINLLR_NF3 0xfa42 +#define FSTV0910_GAINLLR_NF_QPSK_2_5 0xfa42007f + +/* GAINLLR_NF4 */ +#define RSTV0910_GAINLLR_NF4 0xfa43 +#define FSTV0910_GAINLLR_NF_QPSK_1_2 0xfa43007f + +/* GAINLLR_NF5 */ +#define RSTV0910_GAINLLR_NF5 0xfa44 +#define FSTV0910_GAINLLR_NF_QPSK_3_5 0xfa44007f + +/* GAINLLR_NF6 */ +#define RSTV0910_GAINLLR_NF6 0xfa45 +#define FSTV0910_GAINLLR_NF_QPSK_2_3 0xfa45007f + +/* GAINLLR_NF7 */ +#define RSTV0910_GAINLLR_NF7 0xfa46 +#define FSTV0910_GAINLLR_NF_QPSK_3_4 0xfa46007f + +/* GAINLLR_NF8 */ +#define RSTV0910_GAINLLR_NF8 0xfa47 +#define FSTV0910_GAINLLR_NF_QPSK_4_5 0xfa47007f + +/* GAINLLR_NF9 */ +#define RSTV0910_GAINLLR_NF9 0xfa48 +#define FSTV0910_GAINLLR_NF_QPSK_5_6 0xfa48007f + +/* GAINLLR_NF10 */ +#define RSTV0910_GAINLLR_NF10 0xfa49 +#define FSTV0910_GAINLLR_NF_QPSK_8_9 0xfa49007f + +/* GAINLLR_NF11 */ +#define RSTV0910_GAINLLR_NF11 0xfa4a +#define FSTV0910_GAINLLR_NF_QPSK_9_10 0xfa4a007f + +/* GAINLLR_NF12 */ +#define RSTV0910_GAINLLR_NF12 0xfa4b +#define FSTV0910_GAINLLR_NF_8PSK_3_5 0xfa4b007f + +/* GAINLLR_NF13 */ +#define RSTV0910_GAINLLR_NF13 0xfa4c +#define FSTV0910_GAINLLR_NF_8PSK_2_3 0xfa4c007f + +/* GAINLLR_NF14 */ +#define RSTV0910_GAINLLR_NF14 0xfa4d +#define FSTV0910_GAINLLR_NF_8PSK_3_4 0xfa4d007f + +/* GAINLLR_NF15 */ +#define RSTV0910_GAINLLR_NF15 0xfa4e +#define FSTV0910_GAINLLR_NF_8PSK_5_6 0xfa4e007f + +/* GAINLLR_NF16 */ +#define RSTV0910_GAINLLR_NF16 0xfa4f +#define FSTV0910_GAINLLR_NF_8PSK_8_9 0xfa4f007f + +/* GAINLLR_NF17 */ +#define RSTV0910_GAINLLR_NF17 0xfa50 +#define FSTV0910_GAINLLR_NF_8PSK_9_10 0xfa50007f + +/* GAINLLR_NF18 */ +#define RSTV0910_GAINLLR_NF18 0xfa51 +#define FSTV0910_GAINLLR_NF_16APSK_2_3 0xfa51007f + +/* GAINLLR_NF19 */ +#define RSTV0910_GAINLLR_NF19 0xfa52 +#define FSTV0910_GAINLLR_NF_16APSK_3_4 0xfa52007f + +/* GAINLLR_NF20 */ +#define RSTV0910_GAINLLR_NF20 0xfa53 +#define FSTV0910_GAINLLR_NF_16APSK_4_5 0xfa53007f + +/* GAINLLR_NF21 */ +#define RSTV0910_GAINLLR_NF21 0xfa54 +#define FSTV0910_GAINLLR_NF_16APSK_5_6 0xfa54007f + +/* GAINLLR_NF22 */ +#define RSTV0910_GAINLLR_NF22 0xfa55 +#define FSTV0910_GAINLLR_NF_16APSK_8_9 0xfa55007f + +/* GAINLLR_NF23 */ +#define RSTV0910_GAINLLR_NF23 0xfa56 +#define FSTV0910_GAINLLR_NF_16APSK_9_10 0xfa56007f + +/* GAINLLR_NF24 */ +#define RSTV0910_GAINLLR_NF24 0xfa57 +#define FSTV0910_GAINLLR_NF_32APSK_3_4 0xfa57007f + +/* GAINLLR_NF25 */ +#define RSTV0910_GAINLLR_NF25 0xfa58 +#define FSTV0910_GAINLLR_NF_32APSK_4_5 0xfa58007f + +/* GAINLLR_NF26 */ +#define RSTV0910_GAINLLR_NF26 0xfa59 +#define FSTV0910_GAINLLR_NF_32APSK_5_6 0xfa59007f + +/* GAINLLR_NF27 */ +#define RSTV0910_GAINLLR_NF27 0xfa5a +#define FSTV0910_GAINLLR_NF_32APSK_8_9 0xfa5a007f + +/* GAINLLR_NF28 */ +#define RSTV0910_GAINLLR_NF28 0xfa5b +#define FSTV0910_GAINLLR_NF_32APSK_9_10 0xfa5b007f + +/* GAINLLR_SF1 */ +#define RSTV0910_GAINLLR_SF1 0xfa5c +#define FSTV0910_GAINLLR_SF_QPSK_1_4 0xfa5c007f + +/* GAINLLR_SF2 */ +#define RSTV0910_GAINLLR_SF2 0xfa5d +#define FSTV0910_GAINLLR_SF_QPSK_1_3 0xfa5d007f + +/* GAINLLR_SF3 */ +#define RSTV0910_GAINLLR_SF3 0xfa5e +#define FSTV0910_GAINLLR_SF_QPSK_2_5 0xfa5e007f + +/* GAINLLR_SF4 */ +#define RSTV0910_GAINLLR_SF4 0xfa5f +#define FSTV0910_GAINLLR_SF_QPSK_1_2 0xfa5f007f + +/* GAINLLR_SF5 */ +#define RSTV0910_GAINLLR_SF5 0xfa60 +#define FSTV0910_GAINLLR_SF_QPSK_3_5 0xfa60007f + +/* GAINLLR_SF6 */ +#define RSTV0910_GAINLLR_SF6 0xfa61 +#define FSTV0910_GAINLLR_SF_QPSK_2_3 0xfa61007f + +/* GAINLLR_SF7 */ +#define RSTV0910_GAINLLR_SF7 0xfa62 +#define FSTV0910_GAINLLR_SF_QPSK_3_4 0xfa62007f + +/* GAINLLR_SF8 */ +#define RSTV0910_GAINLLR_SF8 0xfa63 +#define FSTV0910_GAINLLR_SF_QPSK_4_5 0xfa63007f + +/* GAINLLR_SF9 */ +#define RSTV0910_GAINLLR_SF9 0xfa64 +#define FSTV0910_GAINLLR_SF_QPSK_5_6 0xfa64007f + +/* GAINLLR_SF10 */ +#define RSTV0910_GAINLLR_SF10 0xfa65 +#define FSTV0910_GAINLLR_SF_QPSK_8_9 0xfa65007f + +/* GAINLLR_SF12 */ +#define RSTV0910_GAINLLR_SF12 0xfa66 +#define FSTV0910_GAINLLR_SF_8PSK_3_5 0xfa66007f + +/* GAINLLR_SF13 */ +#define RSTV0910_GAINLLR_SF13 0xfa67 +#define FSTV0910_GAINLLR_SF_8PSK_2_3 0xfa67007f + +/* GAINLLR_SF14 */ +#define RSTV0910_GAINLLR_SF14 0xfa68 +#define FSTV0910_GAINLLR_SF_8PSK_3_4 0xfa68007f + +/* GAINLLR_SF15 */ +#define RSTV0910_GAINLLR_SF15 0xfa69 +#define FSTV0910_GAINLLR_SF_8PSK_5_6 0xfa69007f + +/* GAINLLR_SF16 */ +#define RSTV0910_GAINLLR_SF16 0xfa6a +#define FSTV0910_GAINLLR_SF_8PSK_8_9 0xfa6a007f + +/* GAINLLR_SF18 */ +#define RSTV0910_GAINLLR_SF18 0xfa6b +#define FSTV0910_GAINLLR_SF_16APSK_2_3 0xfa6b007f + +/* GAINLLR_SF19 */ +#define RSTV0910_GAINLLR_SF19 0xfa6c +#define FSTV0910_GAINLLR_SF_16APSK_3_4 0xfa6c007f + +/* GAINLLR_SF20 */ +#define RSTV0910_GAINLLR_SF20 0xfa6d +#define FSTV0910_GAINLLR_SF_16APSK_4_5 0xfa6d007f + +/* GAINLLR_SF21 */ +#define RSTV0910_GAINLLR_SF21 0xfa6e +#define FSTV0910_GAINLLR_SF_16APSK_5_6 0xfa6e007f + +/* GAINLLR_SF22 */ +#define RSTV0910_GAINLLR_SF22 0xfa6f +#define FSTV0910_GAINLLR_SF_16APSK_8_9 0xfa6f007f + +/* GAINLLR_SF24 */ +#define RSTV0910_GAINLLR_SF24 0xfa70 +#define FSTV0910_GAINLLR_SF_32APSK_3_4 0xfa70007f + +/* GAINLLR_SF25 */ +#define RSTV0910_GAINLLR_SF25 0xfa71 +#define FSTV0910_GAINLLR_SF_32APSK_4_5 0xfa71007f + +/* GAINLLR_SF26 */ +#define RSTV0910_GAINLLR_SF26 0xfa72 +#define FSTV0910_GAINLLR_SF_32APSK_5_6 0xfa72007f + +/* GAINLLR_SF27 */ +#define RSTV0910_GAINLLR_SF27 0xfa73 +#define FSTV0910_GAINLLR_SF_32APSK_8_9 0xfa73007f + +/* CFGEXT */ +#define RSTV0910_CFGEXT 0xfa80 +#define FSTV0910_BYPBCH 0xfa800040 +#define FSTV0910_BYPLDPC 0xfa800020 +#define FSTV0910_SHORTMULT 0xfa800004 + +/* GENCFG */ +#define RSTV0910_GENCFG 0xfa86 +#define FSTV0910_BROADCAST 0xfa860010 +#define FSTV0910_CROSSINPUT 0xfa860002 +#define FSTV0910_DDEMOD 0xfa860001 + +/* LDPCERR1 */ +#define RSTV0910_LDPCERR1 0xfa96 +#define FSTV0910_LDPC_ERRORS1 0xfa9600ff + +/* LDPCERR0 */ +#define RSTV0910_LDPCERR0 0xfa97 +#define FSTV0910_LDPC_ERRORS0 0xfa9700ff + +/* BCHERR */ +#define RSTV0910_BCHERR 0xfa98 +#define FSTV0910_ERRORFLAG 0xfa980010 +#define FSTV0910_BCH_ERRORS_COUNTER 0xfa98000f + +/* P1_MAXEXTRAITER */ +#define RSTV0910_P1_MAXEXTRAITER 0xfab1 +#define FSTV0910_P1_MAX_EXTRA_ITER 0xfab100ff + +/* P2_MAXEXTRAITER */ +#define RSTV0910_P2_MAXEXTRAITER 0xfab6 +#define FSTV0910_P2_MAX_EXTRA_ITER 0xfab600ff + +/* P1_STATUSITER */ +#define RSTV0910_P1_STATUSITER 0xfabc +#define FSTV0910_P1_STATUS_ITER 0xfabc00ff + +/* P1_STATUSMAXITER */ +#define RSTV0910_P1_STATUSMAXITER 0xfabd +#define FSTV0910_P1_STATUS_MAX_ITER 0xfabd00ff + +/* P2_STATUSITER */ +#define RSTV0910_P2_STATUSITER 0xfabe +#define FSTV0910_P2_STATUS_ITER 0xfabe00ff + +/* P2_STATUSMAXITER */ +#define RSTV0910_P2_STATUSMAXITER 0xfabf +#define FSTV0910_P2_STATUS_MAX_ITER 0xfabf00ff + +/* P2_NBITER_NF1 */ +#define RSTV0910_P2_NBITER_NF1 0xfac0 +#define FSTV0910_P2_NBITER_NF_QPSK_1_4 0xfac000ff + +/* P2_NBITER_NF2 */ +#define RSTV0910_P2_NBITER_NF2 0xfac1 +#define FSTV0910_P2_NBITER_NF_QPSK_1_3 0xfac100ff + +/* P2_NBITER_NF3 */ +#define RSTV0910_P2_NBITER_NF3 0xfac2 +#define FSTV0910_P2_NBITER_NF_QPSK_2_5 0xfac200ff + +/* P2_NBITER_NF4 */ +#define RSTV0910_P2_NBITER_NF4 0xfac3 +#define FSTV0910_P2_NBITER_NF_QPSK_1_2 0xfac300ff + +/* P2_NBITER_NF5 */ +#define RSTV0910_P2_NBITER_NF5 0xfac4 +#define FSTV0910_P2_NBITER_NF_QPSK_3_5 0xfac400ff + +/* P2_NBITER_NF6 */ +#define RSTV0910_P2_NBITER_NF6 0xfac5 +#define FSTV0910_P2_NBITER_NF_QPSK_2_3 0xfac500ff + +/* P2_NBITER_NF7 */ +#define RSTV0910_P2_NBITER_NF7 0xfac6 +#define FSTV0910_P2_NBITER_NF_QPSK_3_4 0xfac600ff + +/* P2_NBITER_NF8 */ +#define RSTV0910_P2_NBITER_NF8 0xfac7 +#define FSTV0910_P2_NBITER_NF_QPSK_4_5 0xfac700ff + +/* P2_NBITER_NF9 */ +#define RSTV0910_P2_NBITER_NF9 0xfac8 +#define FSTV0910_P2_NBITER_NF_QPSK_5_6 0xfac800ff + +/* P2_NBITER_NF10 */ +#define RSTV0910_P2_NBITER_NF10 0xfac9 +#define FSTV0910_P2_NBITER_NF_QPSK_8_9 0xfac900ff + +/* P2_NBITER_NF11 */ +#define RSTV0910_P2_NBITER_NF11 0xfaca +#define FSTV0910_P2_NBITER_NF_QPSK_9_10 0xfaca00ff + +/* P2_NBITER_NF12 */ +#define RSTV0910_P2_NBITER_NF12 0xfacb +#define FSTV0910_P2_NBITER_NF_8PSK_3_5 0xfacb00ff + +/* P2_NBITER_NF13 */ +#define RSTV0910_P2_NBITER_NF13 0xfacc +#define FSTV0910_P2_NBITER_NF_8PSK_2_3 0xfacc00ff + +/* P2_NBITER_NF14 */ +#define RSTV0910_P2_NBITER_NF14 0xfacd +#define FSTV0910_P2_NBITER_NF_8PSK_3_4 0xfacd00ff + +/* P2_NBITER_NF15 */ +#define RSTV0910_P2_NBITER_NF15 0xface +#define FSTV0910_P2_NBITER_NF_8PSK_5_6 0xface00ff + +/* P2_NBITER_NF16 */ +#define RSTV0910_P2_NBITER_NF16 0xfacf +#define FSTV0910_P2_NBITER_NF_8PSK_8_9 0xfacf00ff + +/* P2_NBITER_NF17 */ +#define RSTV0910_P2_NBITER_NF17 0xfad0 +#define FSTV0910_P2_NBITER_NF_8PSK_9_10 0xfad000ff + +/* P2_NBITER_NF18 */ +#define RSTV0910_P2_NBITER_NF18 0xfad1 +#define FSTV0910_P2_NBITER_NF_16APSK_2_3 0xfad100ff + +/* P2_NBITER_NF19 */ +#define RSTV0910_P2_NBITER_NF19 0xfad2 +#define FSTV0910_P2_NBITER_NF_16APSK_3_4 0xfad200ff + +/* P2_NBITER_NF20 */ +#define RSTV0910_P2_NBITER_NF20 0xfad3 +#define FSTV0910_P2_NBITER_NF_16APSK_4_5 0xfad300ff + +/* P2_NBITER_NF21 */ +#define RSTV0910_P2_NBITER_NF21 0xfad4 +#define FSTV0910_P2_NBITER_NF_16APSK_5_6 0xfad400ff + +/* P2_NBITER_NF22 */ +#define RSTV0910_P2_NBITER_NF22 0xfad5 +#define FSTV0910_P2_NBITER_NF_16APSK_8_9 0xfad500ff + +/* P2_NBITER_NF23 */ +#define RSTV0910_P2_NBITER_NF23 0xfad6 +#define FSTV0910_P2_NBITER_NF_16APSK_9_10 0xfad600ff + +/* P2_NBITER_NF24 */ +#define RSTV0910_P2_NBITER_NF24 0xfad7 +#define FSTV0910_P2_NBITER_NF_32APSK_3_4 0xfad700ff + +/* P2_NBITER_NF25 */ +#define RSTV0910_P2_NBITER_NF25 0xfad8 +#define FSTV0910_P2_NBITER_NF_32APSK_4_5 0xfad800ff + +/* P2_NBITER_NF26 */ +#define RSTV0910_P2_NBITER_NF26 0xfad9 +#define FSTV0910_P2_NBITER_NF_32APSK_5_6 0xfad900ff + +/* P2_NBITER_NF27 */ +#define RSTV0910_P2_NBITER_NF27 0xfada +#define FSTV0910_P2_NBITER_NF_32APSK_8_9 0xfada00ff + +/* P2_NBITER_NF28 */ +#define RSTV0910_P2_NBITER_NF28 0xfadb +#define FSTV0910_P2_NBITER_NF_32APSK_9_10 0xfadb00ff + +/* P2_NBITER_SF1 */ +#define RSTV0910_P2_NBITER_SF1 0xfadc +#define FSTV0910_P2_NBITER_SF_QPSK_1_4 0xfadc00ff + +/* P2_NBITER_SF2 */ +#define RSTV0910_P2_NBITER_SF2 0xfadd +#define FSTV0910_P2_NBITER_SF_QPSK_1_3 0xfadd00ff + +/* P2_NBITER_SF3 */ +#define RSTV0910_P2_NBITER_SF3 0xfade +#define FSTV0910_P2_NBITER_SF_QPSK_2_5 0xfade00ff + +/* P2_NBITER_SF4 */ +#define RSTV0910_P2_NBITER_SF4 0xfadf +#define FSTV0910_P2_NBITER_SF_QPSK_1_2 0xfadf00ff + +/* P2_NBITER_SF5 */ +#define RSTV0910_P2_NBITER_SF5 0xfae0 +#define FSTV0910_P2_NBITER_SF_QPSK_3_5 0xfae000ff + +/* P2_NBITER_SF6 */ +#define RSTV0910_P2_NBITER_SF6 0xfae1 +#define FSTV0910_P2_NBITER_SF_QPSK_2_3 0xfae100ff + +/* P2_NBITER_SF7 */ +#define RSTV0910_P2_NBITER_SF7 0xfae2 +#define FSTV0910_P2_NBITER_SF_QPSK_3_4 0xfae200ff + +/* P2_NBITER_SF8 */ +#define RSTV0910_P2_NBITER_SF8 0xfae3 +#define FSTV0910_P2_NBITER_SF_QPSK_4_5 0xfae300ff + +/* P2_NBITER_SF9 */ +#define RSTV0910_P2_NBITER_SF9 0xfae4 +#define FSTV0910_P2_NBITER_SF_QPSK_5_6 0xfae400ff + +/* P2_NBITER_SF10 */ +#define RSTV0910_P2_NBITER_SF10 0xfae5 +#define FSTV0910_P2_NBITER_SF_QPSK_8_9 0xfae500ff + +/* P2_NBITER_SF12 */ +#define RSTV0910_P2_NBITER_SF12 0xfae6 +#define FSTV0910_P2_NBITER_SF_8PSK_3_5 0xfae600ff + +/* P2_NBITER_SF13 */ +#define RSTV0910_P2_NBITER_SF13 0xfae7 +#define FSTV0910_P2_NBITER_SF_8PSK_2_3 0xfae700ff + +/* P2_NBITER_SF14 */ +#define RSTV0910_P2_NBITER_SF14 0xfae8 +#define FSTV0910_P2_NBITER_SF_8PSK_3_4 0xfae800ff + +/* P2_NBITER_SF15 */ +#define RSTV0910_P2_NBITER_SF15 0xfae9 +#define FSTV0910_P2_NBITER_SF_8PSK_5_6 0xfae900ff + +/* P2_NBITER_SF16 */ +#define RSTV0910_P2_NBITER_SF16 0xfaea +#define FSTV0910_P2_NBITER_SF_8PSK_8_9 0xfaea00ff + +/* P2_NBITER_SF18 */ +#define RSTV0910_P2_NBITER_SF18 0xfaeb +#define FSTV0910_P2_NBITER_SF_16APSK_2_3 0xfaeb00ff + +/* P2_NBITER_SF19 */ +#define RSTV0910_P2_NBITER_SF19 0xfaec +#define FSTV0910_P2_NBITER_SF_16APSK_3_4 0xfaec00ff + +/* P2_NBITER_SF20 */ +#define RSTV0910_P2_NBITER_SF20 0xfaed +#define FSTV0910_P2_NBITER_SF_16APSK_4_5 0xfaed00ff + +/* P2_NBITER_SF21 */ +#define RSTV0910_P2_NBITER_SF21 0xfaee +#define FSTV0910_P2_NBITER_SF_16APSK_5_6 0xfaee00ff + +/* P2_NBITER_SF22 */ +#define RSTV0910_P2_NBITER_SF22 0xfaef +#define FSTV0910_P2_NBITER_SF_16APSK_8_9 0xfaef00ff + +/* P2_NBITER_SF24 */ +#define RSTV0910_P2_NBITER_SF24 0xfaf0 +#define FSTV0910_P2_NBITER_SF_32APSK_3_4 0xfaf000ff + +/* P2_NBITER_SF25 */ +#define RSTV0910_P2_NBITER_SF25 0xfaf1 +#define FSTV0910_P2_NBITER_SF_32APSK_4_5 0xfaf100ff + +/* P2_NBITER_SF26 */ +#define RSTV0910_P2_NBITER_SF26 0xfaf2 +#define FSTV0910_P2_NBITER_SF_32APSK_5_6 0xfaf200ff + +/* P2_NBITER_SF27 */ +#define RSTV0910_P2_NBITER_SF27 0xfaf3 +#define FSTV0910_P2_NBITER_SF_32APSK_8_9 0xfaf300ff + +/* TSTRES0 */ +#define RSTV0910_TSTRES0 0xff11 +#define FSTV0910_FRESFEC 0xff110080 +#define FSTV0910_FRESSYM1 0xff110008 +#define FSTV0910_FRESSYM2 0xff110004 + +/* TSTOUT */ +#define RSTV0910_TSTOUT 0xff12 +#define FSTV0910_TS 0xff12003e +#define FSTV0910_TEST_OUT 0xff120001 + +/* TSTIN */ +#define RSTV0910_TSTIN 0xff13 +#define FSTV0910_TEST_IN 0xff130080 + +/* P2_TSTDMD */ +#define RSTV0910_P2_TSTDMD 0xff20 +#define FSTV0910_P2_CFRINIT_INVZIGZAG 0xff200008 + +/* P2_TCTL1 */ +#define RSTV0910_P2_TCTL1 0xff24 +#define FSTV0910_P2_TST_IQSYMBSEL 0xff24001f + +/* P2_TCTL4 */ +#define RSTV0910_P2_TCTL4 0xff28 +#define FSTV0910_P2_CFR2TOCFR1_DVBS1 0xff2800c0 + +/* P2_TPKTDELIN */ +#define RSTV0910_P2_TPKTDELIN 0xff37 +#define FSTV0910_P2_CFG_RSPARITYON 0xff370080 + +/* P1_TSTDMD */ +#define RSTV0910_P1_TSTDMD 0xff40 +#define FSTV0910_P1_CFRINIT_INVZIGZAG 0xff400008 + +/* P1_TCTL1 */ +#define RSTV0910_P1_TCTL1 0xff44 +#define FSTV0910_P1_TST_IQSYMBSEL 0xff44001f + +/* P1_TCTL4 */ +#define RSTV0910_P1_TCTL4 0xff48 +#define FSTV0910_P1_CFR2TOCFR1_DVBS1 0xff4800c0 + +/* P1_TPKTDELIN */ +#define RSTV0910_P1_TPKTDELIN 0xff57 +#define FSTV0910_P1_CFG_RSPARITYON 0xff570080 + +/* TSTTSRS */ +#define RSTV0910_TSTTSRS 0xff6d +#define FSTV0910_TSTRS_DISRS2 0xff6d0002 +#define FSTV0910_TSTRS_DISRS1 0xff6d0001 + +#define STV0910_NBREGS 975 +#define STV0910_NBFIELDS 1818 diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c new file mode 100644 index 000000000000..e3e90070e293 --- /dev/null +++ b/drivers/media/dvb-frontends/stv6111.c @@ -0,0 +1,681 @@ +/* + * Driver for the ST STV6111 tuner + * + * Copyright (C) 2014 Digital Devices GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stv6111.h" + +#include "dvb_frontend.h" + +struct stv { + struct i2c_adapter *i2c; + u8 adr; + + u8 reg[11]; + u32 ref_freq; + u32 frequency; +}; + +struct slookup { + s16 value; + u16 reg_value; +}; + +static const struct slookup lnagain_nf_lookup[] = { + /* Gain *100dB // Reg */ + { 2572, 0 }, + { 2575, 1 }, + { 2580, 2 }, + { 2588, 3 }, + { 2596, 4 }, + { 2611, 5 }, + { 2633, 6 }, + { 2664, 7 }, + { 2701, 8 }, + { 2753, 9 }, + { 2816, 10 }, + { 2902, 11 }, + { 2995, 12 }, + { 3104, 13 }, + { 3215, 14 }, + { 3337, 15 }, + { 3492, 16 }, + { 3614, 17 }, + { 3731, 18 }, + { 3861, 19 }, + { 3988, 20 }, + { 4124, 21 }, + { 4253, 22 }, + { 4386, 23 }, + { 4505, 24 }, + { 4623, 25 }, + { 4726, 26 }, + { 4821, 27 }, + { 4903, 28 }, + { 4979, 29 }, + { 5045, 30 }, + { 5102, 31 } +}; + +static const struct slookup lnagain_iip3_lookup[] = { + /* Gain *100dB // reg */ + { 1548, 0 }, + { 1552, 1 }, + { 1569, 2 }, + { 1565, 3 }, + { 1577, 4 }, + { 1594, 5 }, + { 1627, 6 }, + { 1656, 7 }, + { 1700, 8 }, + { 1748, 9 }, + { 1805, 10 }, + { 1896, 11 }, + { 1995, 12 }, + { 2113, 13 }, + { 2233, 14 }, + { 2366, 15 }, + { 2543, 16 }, + { 2687, 17 }, + { 2842, 18 }, + { 2999, 19 }, + { 3167, 20 }, + { 3342, 21 }, + { 3507, 22 }, + { 3679, 23 }, + { 3827, 24 }, + { 3970, 25 }, + { 4094, 26 }, + { 4210, 27 }, + { 4308, 28 }, + { 4396, 29 }, + { 4468, 30 }, + { 4535, 31 } +}; + +static const struct slookup gain_rfagc_lookup[] = { + /* Gain *100dB // reg */ + { 4870, 0x3000 }, + { 4850, 0x3C00 }, + { 4800, 0x4500 }, + { 4750, 0x4800 }, + { 4700, 0x4B00 }, + { 4650, 0x4D00 }, + { 4600, 0x4F00 }, + { 4550, 0x5100 }, + { 4500, 0x5200 }, + { 4420, 0x5500 }, + { 4316, 0x5800 }, + { 4200, 0x5B00 }, + { 4119, 0x5D00 }, + { 3999, 0x6000 }, + { 3950, 0x6100 }, + { 3876, 0x6300 }, + { 3755, 0x6600 }, + { 3641, 0x6900 }, + { 3567, 0x6B00 }, + { 3425, 0x6F00 }, + { 3350, 0x7100 }, + { 3236, 0x7400 }, + { 3118, 0x7700 }, + { 3004, 0x7A00 }, + { 2917, 0x7C00 }, + { 2776, 0x7F00 }, + { 2635, 0x8200 }, + { 2516, 0x8500 }, + { 2406, 0x8800 }, + { 2290, 0x8B00 }, + { 2170, 0x8E00 }, + { 2073, 0x9100 }, + { 1949, 0x9400 }, + { 1836, 0x9700 }, + { 1712, 0x9A00 }, + { 1631, 0x9C00 }, + { 1515, 0x9F00 }, + { 1400, 0xA200 }, + { 1323, 0xA400 }, + { 1203, 0xA700 }, + { 1091, 0xAA00 }, + { 1011, 0xAC00 }, + { 904, 0xAF00 }, + { 787, 0xB200 }, + { 685, 0xB500 }, + { 571, 0xB800 }, + { 464, 0xBB00 }, + { 374, 0xBE00 }, + { 275, 0xC200 }, + { 181, 0xC600 }, + { 102, 0xCC00 }, + { 49, 0xD900 } +}; + +/* + * This table is 6 dB too low comapred to the others (probably created with + * a different BB_MAG setting) + */ +static const struct slookup gain_channel_agc_nf_lookup[] = { + /* Gain *100dB // reg */ + { 7082, 0x3000 }, + { 7052, 0x4000 }, + { 7007, 0x4600 }, + { 6954, 0x4A00 }, + { 6909, 0x4D00 }, + { 6833, 0x5100 }, + { 6753, 0x5400 }, + { 6659, 0x5700 }, + { 6561, 0x5A00 }, + { 6472, 0x5C00 }, + { 6366, 0x5F00 }, + { 6259, 0x6100 }, + { 6151, 0x6400 }, + { 6026, 0x6700 }, + { 5920, 0x6900 }, + { 5835, 0x6B00 }, + { 5770, 0x6C00 }, + { 5681, 0x6E00 }, + { 5596, 0x7000 }, + { 5503, 0x7200 }, + { 5429, 0x7300 }, + { 5319, 0x7500 }, + { 5220, 0x7700 }, + { 5111, 0x7900 }, + { 4983, 0x7B00 }, + { 4876, 0x7D00 }, + { 4755, 0x7F00 }, + { 4635, 0x8100 }, + { 4499, 0x8300 }, + { 4405, 0x8500 }, + { 4323, 0x8600 }, + { 4233, 0x8800 }, + { 4156, 0x8A00 }, + { 4038, 0x8C00 }, + { 3935, 0x8E00 }, + { 3823, 0x9000 }, + { 3712, 0x9200 }, + { 3601, 0x9500 }, + { 3511, 0x9700 }, + { 3413, 0x9900 }, + { 3309, 0x9B00 }, + { 3213, 0x9D00 }, + { 3088, 0x9F00 }, + { 2992, 0xA100 }, + { 2878, 0xA400 }, + { 2769, 0xA700 }, + { 2645, 0xAA00 }, + { 2538, 0xAD00 }, + { 2441, 0xB000 }, + { 2350, 0xB600 }, + { 2237, 0xBA00 }, + { 2137, 0xBF00 }, + { 2039, 0xC500 }, + { 1938, 0xDF00 }, + { 1927, 0xFF00 } +}; + +static const struct slookup gain_channel_agc_iip3_lookup[] = { + /* Gain *100dB // reg */ + { 7070, 0x3000 }, + { 7028, 0x4000 }, + { 7019, 0x4600 }, + { 6900, 0x4A00 }, + { 6811, 0x4D00 }, + { 6763, 0x5100 }, + { 6690, 0x5400 }, + { 6644, 0x5700 }, + { 6617, 0x5A00 }, + { 6598, 0x5C00 }, + { 6462, 0x5F00 }, + { 6348, 0x6100 }, + { 6197, 0x6400 }, + { 6154, 0x6700 }, + { 6098, 0x6900 }, + { 5893, 0x6B00 }, + { 5812, 0x6C00 }, + { 5773, 0x6E00 }, + { 5723, 0x7000 }, + { 5661, 0x7200 }, + { 5579, 0x7300 }, + { 5460, 0x7500 }, + { 5308, 0x7700 }, + { 5099, 0x7900 }, + { 4910, 0x7B00 }, + { 4800, 0x7D00 }, + { 4785, 0x7F00 }, + { 4635, 0x8100 }, + { 4466, 0x8300 }, + { 4314, 0x8500 }, + { 4295, 0x8600 }, + { 4144, 0x8800 }, + { 3920, 0x8A00 }, + { 3889, 0x8C00 }, + { 3771, 0x8E00 }, + { 3655, 0x9000 }, + { 3446, 0x9200 }, + { 3298, 0x9500 }, + { 3083, 0x9700 }, + { 3015, 0x9900 }, + { 2833, 0x9B00 }, + { 2746, 0x9D00 }, + { 2632, 0x9F00 }, + { 2598, 0xA100 }, + { 2480, 0xA400 }, + { 2236, 0xA700 }, + { 2171, 0xAA00 }, + { 2060, 0xAD00 }, + { 1999, 0xB000 }, + { 1974, 0xB600 }, + { 1820, 0xBA00 }, + { 1741, 0xBF00 }, + { 1655, 0xC500 }, + { 1444, 0xDF00 }, + { 1325, 0xFF00 }, +}; + +static inline u32 muldiv32(u32 a, u32 b, u32 c) +{ + u64 tmp64; + + tmp64 = (u64)a * (u64)b; + do_div(tmp64, c); + + return (u32)tmp64; +} + +static int i2c_read(struct i2c_adapter *adap, + u8 adr, u8 *msg, int len, u8 *answ, int alen) +{ + struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0, + .buf = msg, .len = len}, + { .addr = adr, .flags = I2C_M_RD, + .buf = answ, .len = alen } }; + if (i2c_transfer(adap, msgs, 2) != 2) { + dev_err(&adap->dev, "i2c read error\n"); + return -EIO; + } + return 0; +} + +static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len) +{ + struct i2c_msg msg = {.addr = adr, .flags = 0, + .buf = data, .len = len}; + + if (i2c_transfer(adap, &msg, 1) != 1) { + dev_err(&adap->dev, "i2c write error\n"); + return -EIO; + } + return 0; +} + +static int write_regs(struct stv *state, int reg, int len) +{ + u8 d[12]; + + memcpy(&d[1], &state->reg[reg], len); + d[0] = reg; + return i2c_write(state->i2c, state->adr, d, len + 1); +} + +static int write_reg(struct stv *state, u8 reg, u8 val) +{ + u8 d[2] = {reg, val}; + + return i2c_write(state->i2c, state->adr, d, 2); +} + +static int read_reg(struct stv *state, u8 reg, u8 *val) +{ + return i2c_read(state->i2c, state->adr, ®, 1, val, 1); +} + +static int wait_for_call_done(struct stv *state, u8 mask) +{ + int status = 0; + u32 lock_retry_count = 10; + + while (lock_retry_count > 0) { + u8 regval; + + status = read_reg(state, 9, ®val); + if (status < 0) + return status; + + if ((regval & mask) == 0) + break; + usleep_range(4000, 6000); + lock_retry_count -= 1; + + status = -EIO; + } + return status; +} + +static void init_state(struct stv *state) +{ + u32 clkdiv = 0; + u32 agcmode = 0; + u32 agcref = 2; + u32 agcset = 0xffffffff; + u32 bbmode = 0xffffffff; + + state->reg[0] = 0x08; + state->reg[1] = 0x41; + state->reg[2] = 0x8f; + state->reg[3] = 0x00; + state->reg[4] = 0xce; + state->reg[5] = 0x54; + state->reg[6] = 0x55; + state->reg[7] = 0x45; + state->reg[8] = 0x46; + state->reg[9] = 0xbd; + state->reg[10] = 0x11; + + state->ref_freq = 16000; + + if (clkdiv <= 3) + state->reg[0x00] |= (clkdiv & 0x03); + if (agcmode <= 3) { + state->reg[0x03] |= (agcmode << 5); + if (agcmode == 0x01) + state->reg[0x01] |= 0x30; + } + if (bbmode <= 3) + state->reg[0x01] = (state->reg[0x01] & ~0x30) | (bbmode << 4); + if (agcref <= 7) + state->reg[0x03] |= agcref; + if (agcset <= 31) + state->reg[0x02] = (state->reg[0x02] & ~0x1F) | agcset | 0x40; +} + +static int attach_init(struct stv *state) +{ + if (write_regs(state, 0, 11)) + return -ENODEV; + return 0; +} + +static void release(struct dvb_frontend *fe) +{ + kfree(fe->tuner_priv); + fe->tuner_priv = NULL; +} + +static int set_bandwidth(struct dvb_frontend *fe, u32 cutoff_frequency) +{ + struct stv *state = fe->tuner_priv; + u32 index = (cutoff_frequency + 999999) / 1000000; + + if (index < 6) + index = 6; + if (index > 50) + index = 50; + if ((state->reg[0x08] & ~0xFC) == ((index - 6) << 2)) + return 0; + + state->reg[0x08] = (state->reg[0x08] & ~0xFC) | ((index - 6) << 2); + state->reg[0x09] = (state->reg[0x09] & ~0x0C) | 0x08; + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 1); + write_regs(state, 0x08, 2); + wait_for_call_done(state, 0x08); + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 0); + return 0; +} + +static int set_lof(struct stv *state, u32 local_frequency, u32 cutoff_frequency) +{ + u32 index = (cutoff_frequency + 999999) / 1000000; + u32 frequency = (local_frequency + 500) / 1000; + u32 p = 1, psel = 0, fvco, div, frac; + u8 icp, tmp; + + if (index < 6) + index = 6; + if (index > 50) + index = 50; + + if (frequency <= 1300000) { + p = 4; + psel = 1; + } else { + p = 2; + psel = 0; + } + fvco = frequency * p; + div = fvco / state->ref_freq; + frac = fvco % state->ref_freq; + frac = muldiv32(frac, 0x40000, state->ref_freq); + + icp = 0; + if (fvco < 2700000) + icp = 0; + else if (fvco < 2950000) + icp = 1; + else if (fvco < 3300000) + icp = 2; + else if (fvco < 3700000) + icp = 3; + else if (fvco < 4200000) + icp = 5; + else if (fvco < 4800000) + icp = 6; + else + icp = 7; + + state->reg[0x02] |= 0x80; /* LNA IIP3 Mode */ + + state->reg[0x03] = (state->reg[0x03] & ~0x80) | (psel << 7); + state->reg[0x04] = (div & 0xFF); + state->reg[0x05] = (((div >> 8) & 0x01) | ((frac & 0x7F) << 1)) & 0xff; + state->reg[0x06] = ((frac >> 7) & 0xFF); + state->reg[0x07] = (state->reg[0x07] & ~0x07) | ((frac >> 15) & 0x07); + state->reg[0x07] = (state->reg[0x07] & ~0xE0) | (icp << 5); + + state->reg[0x08] = (state->reg[0x08] & ~0xFC) | ((index - 6) << 2); + /* Start cal vco,CF */ + state->reg[0x09] = (state->reg[0x09] & ~0x0C) | 0x0C; + write_regs(state, 2, 8); + + wait_for_call_done(state, 0x0C); + + usleep_range(10000, 12000); + + read_reg(state, 0x03, &tmp); + if (tmp & 0x10) { + state->reg[0x02] &= ~0x80; /* LNA NF Mode */ + write_regs(state, 2, 1); + } + read_reg(state, 0x08, &tmp); + + state->frequency = frequency; + + return 0; +} + +static int set_params(struct dvb_frontend *fe) +{ + struct stv *state = fe->tuner_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 freq, cutoff; + + if (p->delivery_system != SYS_DVBS && p->delivery_system != SYS_DVBS2) + return -EINVAL; + + freq = p->frequency * 1000; + cutoff = 5000000 + muldiv32(p->symbol_rate, 135, 200); + + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 1); + set_lof(state, freq, cutoff); + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 0); + return 0; +} + +static s32 table_lookup(const struct slookup *table, + int table_size, u16 reg_value) +{ + s32 gain; + s32 reg_diff; + int imin = 0; + int imax = table_size - 1; + int i; + + /* Assumes Table[0].RegValue < Table[imax].RegValue */ + if (reg_value <= table[0].reg_value) { + gain = table[0].value; + } else if (reg_value >= table[imax].reg_value) { + gain = table[imax].value; + } else { + while ((imax - imin) > 1) { + i = (imax + imin) / 2; + if ((table[imin].reg_value <= reg_value) && + (reg_value <= table[i].reg_value)) + imax = i; + else + imin = i; + } + reg_diff = table[imax].reg_value - table[imin].reg_value; + gain = table[imin].value; + if (reg_diff != 0) + gain += ((s32)(reg_value - table[imin].reg_value) * + (s32)(table[imax].value + - table[imin].value)) / reg_diff; + } + return gain; +} + +static int get_rf_strength(struct dvb_frontend *fe, u16 *st) +{ + struct stv *state = fe->tuner_priv; + u16 rfagc = *st; + s32 gain; + + if ((state->reg[0x03] & 0x60) == 0) { + /* RF Mode, Read AGC ADC */ + u8 reg = 0; + + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 1); + write_reg(state, 0x02, state->reg[0x02] | 0x20); + read_reg(state, 2, ®); + if (reg & 0x20) + read_reg(state, 2, ®); + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 0); + + if ((state->reg[0x02] & 0x80) == 0) + /* NF */ + gain = table_lookup(lnagain_nf_lookup, + ARRAY_SIZE(lnagain_nf_lookup), + reg & 0x1F); + else + /* IIP3 */ + gain = table_lookup(lnagain_iip3_lookup, + ARRAY_SIZE(lnagain_iip3_lookup), + reg & 0x1F); + + gain += table_lookup(gain_rfagc_lookup, + ARRAY_SIZE(gain_rfagc_lookup), rfagc); + + gain -= 2400; + } else { + /* Channel Mode */ + if ((state->reg[0x02] & 0x80) == 0) { + /* NF */ + gain = table_lookup( + gain_channel_agc_nf_lookup, + ARRAY_SIZE(gain_channel_agc_nf_lookup), rfagc); + + gain += 600; + } else { + /* IIP3 */ + gain = table_lookup( + gain_channel_agc_iip3_lookup, + ARRAY_SIZE(gain_channel_agc_iip3_lookup), + rfagc); + } + } + + if (state->frequency > 0) + /* Tilt correction ( 0.00016 dB/MHz ) */ + gain -= ((((s32)(state->frequency / 1000) - 1550) * 2) / 12); + + /* + (BBGain * 10); */ + gain += (s32)((state->reg[0x01] & 0xC0) >> 6) * 600 - 1300; + + if (gain < 0) + gain = 0; + else if (gain > 10000) + gain = 10000; + + *st = 10000 - gain; + + return 0; +} + +static const struct dvb_tuner_ops tuner_ops = { + .info = { + .name = "ST STV6111", + .frequency_min = 950000, + .frequency_max = 2150000, + .frequency_step = 0 + }, + .set_params = set_params, + .release = release, + .get_rf_strength = get_rf_strength, + .set_bandwidth = set_bandwidth, +}; + +struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, u8 adr) +{ + struct stv *state; + int stat; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + state->adr = adr; + state->i2c = i2c; + memcpy(&fe->ops.tuner_ops, &tuner_ops, sizeof(struct dvb_tuner_ops)); + init_state(state); + + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 1); + stat = attach_init(state); + if (fe->ops.i2c_gate_ctrl) + fe->ops.i2c_gate_ctrl(fe, 0); + if (stat < 0) { + kfree(state); + return NULL; + } + fe->tuner_priv = state; + return fe; +} +EXPORT_SYMBOL_GPL(stv6111_attach); + +MODULE_DESCRIPTION("ST STV6111 satellite tuner driver"); +MODULE_AUTHOR("Ralph Metzler, Manfred Voelkel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/stv6111.h b/drivers/media/dvb-frontends/stv6111.h new file mode 100644 index 000000000000..5bc1228dc9bd --- /dev/null +++ b/drivers/media/dvb-frontends/stv6111.h @@ -0,0 +1,21 @@ +#ifndef _STV6111_H_ +#define _STV6111_H_ + +#if IS_REACHABLE(CONFIG_DVB_STV6111) + +struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, u8 adr); + +#else + +static inline struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, + u8 adr) +{ + pr_warn("%s: Driver disabled by Kconfig\n", __func__); + return NULL; +} + +#endif /* CONFIG_DVB_STV6111 */ + +#endif /* _STV6111_H_ */ diff --git a/drivers/media/dvb-frontends/zd1301_demod.c b/drivers/media/dvb-frontends/zd1301_demod.c index fcf5f69de0c5..84a2b25a574a 100644 --- a/drivers/media/dvb-frontends/zd1301_demod.c +++ b/drivers/media/dvb-frontends/zd1301_demod.c @@ -445,7 +445,7 @@ static u32 zd1301_demod_i2c_functionality(struct i2c_adapter *adapter) return I2C_FUNC_I2C; } -static struct i2c_algorithm zd1301_demod_i2c_algorithm = { +static const struct i2c_algorithm zd1301_demod_i2c_algorithm = { .master_xfer = zd1301_demod_i2c_master_xfer, .functionality = zd1301_demod_i2c_functionality, }; diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 121b3b5394cb..94153895fcd4 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -204,6 +204,18 @@ config VIDEO_ADV7183 To compile this driver as a module, choose M here: the module will be called adv7183. +config VIDEO_ADV748X + tristate "Analog Devices ADV748x decoder" + depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API + depends on OF + select REGMAP_I2C + ---help--- + V4L2 subdevice driver for the Analog Devices + ADV7481 and ADV7482 HDMI/Analog video decoders. + + To compile this driver as a module, choose M here: the + module will be called adv748x. + config VIDEO_ADV7604 tristate "Analog Devices ADV7604 decoder" depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API @@ -593,6 +605,30 @@ config VIDEO_OV5647 To compile this driver as a module, choose M here: the module will be called ov5647. +config VIDEO_OV6650 + tristate "OmniVision OV6650 sensor support" + depends on I2C && VIDEO_V4L2 + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a Video4Linux2 sensor-level driver for the OmniVision + OV6650 camera. + + To compile this driver as a module, choose M here: the + module will be called ov6650. + +config VIDEO_OV5670 + tristate "OmniVision OV5670 sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + depends on MEDIA_CONTROLLER + select V4L2_FWNODE + ---help--- + This is a Video4Linux2 sensor-level driver for the OmniVision + OV5670 camera. + + To compile this driver as a module, choose M here: the + module will be called ov5670. + config VIDEO_OV7640 tristate "OmniVision OV7640 sensor support" depends on I2C && VIDEO_V4L2 diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 2c0868fa6034..c843c181dfb9 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o obj-$(CONFIG_VIDEO_ADV7183) += adv7183.o obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o +obj-$(CONFIG_VIDEO_ADV748X) += adv748x/ obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o @@ -62,6 +63,8 @@ obj-$(CONFIG_VIDEO_OV2640) += ov2640.o obj-$(CONFIG_VIDEO_OV5640) += ov5640.o obj-$(CONFIG_VIDEO_OV5645) += ov5645.o obj-$(CONFIG_VIDEO_OV5647) += ov5647.o +obj-$(CONFIG_VIDEO_OV5670) += ov5670.o +obj-$(CONFIG_VIDEO_OV6650) += ov6650.o obj-$(CONFIG_VIDEO_OV7640) += ov7640.o obj-$(CONFIG_VIDEO_OV7670) += ov7670.o obj-$(CONFIG_VIDEO_OV9650) += ov9650.o diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c index 50f354144ee7..a056d6cdaaaa 100644 --- a/drivers/media/i2c/ad9389b.c +++ b/drivers/media/i2c/ad9389b.c @@ -1208,7 +1208,7 @@ static int ad9389b_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ -static struct i2c_device_id ad9389b_id[] = { +static const struct i2c_device_id ad9389b_id[] = { { "ad9389b", 0 }, { "ad9889b", 0 }, { } diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c index 78de7ddf5081..3df28f2f9b38 100644 --- a/drivers/media/i2c/adv7180.c +++ b/drivers/media/i2c/adv7180.c @@ -1402,6 +1402,8 @@ static int adv7180_remove(struct i2c_client *client) static const struct i2c_device_id adv7180_id[] = { { "adv7180", (kernel_ulong_t)&adv7180_info }, + { "adv7180cp", (kernel_ulong_t)&adv7180_info }, + { "adv7180st", (kernel_ulong_t)&adv7180_info }, { "adv7182", (kernel_ulong_t)&adv7182_info }, { "adv7280", (kernel_ulong_t)&adv7280_info }, { "adv7280-m", (kernel_ulong_t)&adv7280_m_info }, diff --git a/drivers/media/i2c/adv748x/Makefile b/drivers/media/i2c/adv748x/Makefile new file mode 100644 index 000000000000..c0711e076f1d --- /dev/null +++ b/drivers/media/i2c/adv748x/Makefile @@ -0,0 +1,7 @@ +adv748x-objs := \ + adv748x-afe.o \ + adv748x-core.o \ + adv748x-csi2.o \ + adv748x-hdmi.o + +obj-$(CONFIG_VIDEO_ADV748X) += adv748x.o diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c new file mode 100644 index 000000000000..b33ccfc08708 --- /dev/null +++ b/drivers/media/i2c/adv748x/adv748x-afe.c @@ -0,0 +1,552 @@ +/* + * Driver for Analog Devices ADV748X 8 channel analog front end (AFE) receiver + * with standard definition processor (SDP) + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "adv748x.h" + +/* ----------------------------------------------------------------------------- + * SDP + */ + +#define ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM 0x0 +#define ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM_PED 0x1 +#define ADV748X_AFE_STD_AD_PAL_N_NTSC_J_SECAM 0x2 +#define ADV748X_AFE_STD_AD_PAL_N_NTSC_M_SECAM 0x3 +#define ADV748X_AFE_STD_NTSC_J 0x4 +#define ADV748X_AFE_STD_NTSC_M 0x5 +#define ADV748X_AFE_STD_PAL60 0x6 +#define ADV748X_AFE_STD_NTSC_443 0x7 +#define ADV748X_AFE_STD_PAL_BG 0x8 +#define ADV748X_AFE_STD_PAL_N 0x9 +#define ADV748X_AFE_STD_PAL_M 0xa +#define ADV748X_AFE_STD_PAL_M_PED 0xb +#define ADV748X_AFE_STD_PAL_COMB_N 0xc +#define ADV748X_AFE_STD_PAL_COMB_N_PED 0xd +#define ADV748X_AFE_STD_PAL_SECAM 0xe +#define ADV748X_AFE_STD_PAL_SECAM_PED 0xf + +static int adv748x_afe_read_ro_map(struct adv748x_state *state, u8 reg) +{ + int ret; + + /* Select SDP Read-Only Main Map */ + ret = sdp_write(state, ADV748X_SDP_MAP_SEL, + ADV748X_SDP_MAP_SEL_RO_MAIN); + if (ret < 0) + return ret; + + return sdp_read(state, reg); +} + +static int adv748x_afe_status(struct adv748x_afe *afe, u32 *signal, + v4l2_std_id *std) +{ + struct adv748x_state *state = adv748x_afe_to_state(afe); + int info; + + /* Read status from reg 0x10 of SDP RO Map */ + info = adv748x_afe_read_ro_map(state, ADV748X_SDP_RO_10); + if (info < 0) + return info; + + if (signal) + *signal = info & ADV748X_SDP_RO_10_IN_LOCK ? + 0 : V4L2_IN_ST_NO_SIGNAL; + + if (!std) + return 0; + + /* Standard not valid if there is no signal */ + if (!(info & ADV748X_SDP_RO_10_IN_LOCK)) { + *std = V4L2_STD_UNKNOWN; + return 0; + } + + switch (info & 0x70) { + case 0x00: + *std = V4L2_STD_NTSC; + break; + case 0x10: + *std = V4L2_STD_NTSC_443; + break; + case 0x20: + *std = V4L2_STD_PAL_M; + break; + case 0x30: + *std = V4L2_STD_PAL_60; + break; + case 0x40: + *std = V4L2_STD_PAL; + break; + case 0x50: + *std = V4L2_STD_SECAM; + break; + case 0x60: + *std = V4L2_STD_PAL_Nc | V4L2_STD_PAL_N; + break; + case 0x70: + *std = V4L2_STD_SECAM; + break; + default: + *std = V4L2_STD_UNKNOWN; + break; + } + + return 0; +} + +static void adv748x_afe_fill_format(struct adv748x_afe *afe, + struct v4l2_mbus_framefmt *fmt) +{ + memset(fmt, 0, sizeof(*fmt)); + + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; + fmt->field = V4L2_FIELD_ALTERNATE; + + fmt->width = 720; + fmt->height = afe->curr_norm & V4L2_STD_525_60 ? 480 : 576; + + /* Field height */ + fmt->height /= 2; +} + +static int adv748x_afe_std(v4l2_std_id std) +{ + if (std == V4L2_STD_PAL_60) + return ADV748X_AFE_STD_PAL60; + if (std == V4L2_STD_NTSC_443) + return ADV748X_AFE_STD_NTSC_443; + if (std == V4L2_STD_PAL_N) + return ADV748X_AFE_STD_PAL_N; + if (std == V4L2_STD_PAL_M) + return ADV748X_AFE_STD_PAL_M; + if (std == V4L2_STD_PAL_Nc) + return ADV748X_AFE_STD_PAL_COMB_N; + if (std & V4L2_STD_NTSC) + return ADV748X_AFE_STD_NTSC_M; + if (std & V4L2_STD_PAL) + return ADV748X_AFE_STD_PAL_BG; + if (std & V4L2_STD_SECAM) + return ADV748X_AFE_STD_PAL_SECAM; + + return -EINVAL; +} + +static void adv748x_afe_set_video_standard(struct adv748x_state *state, + int sdpstd) +{ + sdp_clrset(state, ADV748X_SDP_VID_SEL, ADV748X_SDP_VID_SEL_MASK, + (sdpstd & 0xf) << ADV748X_SDP_VID_SEL_SHIFT); +} + +static int adv748x_afe_s_input(struct adv748x_afe *afe, unsigned int input) +{ + struct adv748x_state *state = adv748x_afe_to_state(afe); + + return sdp_write(state, ADV748X_SDP_INSEL, input); +} + +static int adv748x_afe_g_pixelaspect(struct v4l2_subdev *sd, + struct v4l2_fract *aspect) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + + if (afe->curr_norm & V4L2_STD_525_60) { + aspect->numerator = 11; + aspect->denominator = 10; + } else { + aspect->numerator = 54; + aspect->denominator = 59; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_video_ops + */ + +static int adv748x_afe_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + + *norm = afe->curr_norm; + + return 0; +} + +static int adv748x_afe_s_std(struct v4l2_subdev *sd, v4l2_std_id std) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + struct adv748x_state *state = adv748x_afe_to_state(afe); + int afe_std = adv748x_afe_std(std); + + if (afe_std < 0) + return afe_std; + + mutex_lock(&state->mutex); + + adv748x_afe_set_video_standard(state, afe_std); + afe->curr_norm = std; + + mutex_unlock(&state->mutex); + + return 0; +} + +static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + struct adv748x_state *state = adv748x_afe_to_state(afe); + int ret; + + mutex_lock(&state->mutex); + + if (afe->streaming) { + ret = -EBUSY; + goto unlock; + } + + /* Set auto detect mode */ + adv748x_afe_set_video_standard(state, + ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM); + + msleep(100); + + /* Read detected standard */ + ret = adv748x_afe_status(afe, NULL, std); + + /* Restore original state */ + adv748x_afe_set_video_standard(state, afe->curr_norm); + +unlock: + mutex_unlock(&state->mutex); + + return ret; +} + +static int adv748x_afe_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm) +{ + *norm = V4L2_STD_ALL; + + return 0; +} + +static int adv748x_afe_g_input_status(struct v4l2_subdev *sd, u32 *status) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + struct adv748x_state *state = adv748x_afe_to_state(afe); + int ret; + + mutex_lock(&state->mutex); + + ret = adv748x_afe_status(afe, status, NULL); + + mutex_unlock(&state->mutex); + return ret; +} + +static int adv748x_afe_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + struct adv748x_state *state = adv748x_afe_to_state(afe); + int ret, signal = V4L2_IN_ST_NO_SIGNAL; + + mutex_lock(&state->mutex); + + if (enable) { + ret = adv748x_afe_s_input(afe, afe->input); + if (ret) + goto unlock; + } + + ret = adv748x_txb_power(state, enable); + if (ret) + goto unlock; + + afe->streaming = enable; + + adv748x_afe_status(afe, &signal, NULL); + if (signal != V4L2_IN_ST_NO_SIGNAL) + adv_dbg(state, "Detected SDP signal\n"); + else + adv_dbg(state, "Couldn't detect SDP video signal\n"); + +unlock: + mutex_unlock(&state->mutex); + + return ret; +} + +static const struct v4l2_subdev_video_ops adv748x_afe_video_ops = { + .g_std = adv748x_afe_g_std, + .s_std = adv748x_afe_s_std, + .querystd = adv748x_afe_querystd, + .g_tvnorms = adv748x_afe_g_tvnorms, + .g_input_status = adv748x_afe_g_input_status, + .s_stream = adv748x_afe_s_stream, + .g_pixelaspect = adv748x_afe_g_pixelaspect, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_pad_ops + */ + +static int adv748x_afe_propagate_pixelrate(struct adv748x_afe *afe) +{ + struct v4l2_subdev *tx; + unsigned int width, height, fps; + + tx = adv748x_get_remote_sd(&afe->pads[ADV748X_AFE_SOURCE]); + if (!tx) + return -ENOLINK; + + width = 720; + height = afe->curr_norm & V4L2_STD_525_60 ? 480 : 576; + fps = afe->curr_norm & V4L2_STD_525_60 ? 30 : 25; + + return adv748x_csi2_set_pixelrate(tx, width * height * fps); +} + +static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index != 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_UYVY8_2X8; + + return 0; +} + +static int adv748x_afe_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct adv748x_afe *afe = adv748x_sd_to_afe(sd); + struct v4l2_mbus_framefmt *mbusformat; + + /* It makes no sense to get the format of the analog sink pads */ + if (sdformat->pad != ADV748X_AFE_SOURCE) + return -EINVAL; + + if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) { + mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad); + sdformat->format = *mbusformat; + } else { + adv748x_afe_fill_format(afe, &sdformat->format); + adv748x_afe_propagate_pixelrate(afe); + } + + return 0; +} + +static int adv748x_afe_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct v4l2_mbus_framefmt *mbusformat; + + /* It makes no sense to get the format of the analog sink pads */ + if (sdformat->pad != ADV748X_AFE_SOURCE) + return -EINVAL; + + if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) + return adv748x_afe_get_format(sd, cfg, sdformat); + + mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad); + *mbusformat = sdformat->format; + + return 0; +} + +static const struct v4l2_subdev_pad_ops adv748x_afe_pad_ops = { + .enum_mbus_code = adv748x_afe_enum_mbus_code, + .set_fmt = adv748x_afe_set_format, + .get_fmt = adv748x_afe_get_format, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_ops + */ + +static const struct v4l2_subdev_ops adv748x_afe_ops = { + .video = &adv748x_afe_video_ops, + .pad = &adv748x_afe_pad_ops, +}; + +/* ----------------------------------------------------------------------------- + * Controls + */ + +static const char * const afe_ctrl_frp_menu[] = { + "Disabled", + "Solid Blue", + "Color Bars", + "Grey Ramp", + "Cb Ramp", + "Cr Ramp", + "Boundary" +}; + +static int adv748x_afe_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct adv748x_afe *afe = adv748x_ctrl_to_afe(ctrl); + struct adv748x_state *state = adv748x_afe_to_state(afe); + bool enable; + int ret; + + ret = sdp_write(state, 0x0e, 0x00); + if (ret < 0) + return ret; + + switch (ctrl->id) { + case V4L2_CID_BRIGHTNESS: + ret = sdp_write(state, ADV748X_SDP_BRI, ctrl->val); + break; + case V4L2_CID_HUE: + /* Hue is inverted according to HSL chart */ + ret = sdp_write(state, ADV748X_SDP_HUE, -ctrl->val); + break; + case V4L2_CID_CONTRAST: + ret = sdp_write(state, ADV748X_SDP_CON, ctrl->val); + break; + case V4L2_CID_SATURATION: + ret = sdp_write(state, ADV748X_SDP_SD_SAT_U, ctrl->val); + if (ret) + break; + ret = sdp_write(state, ADV748X_SDP_SD_SAT_V, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + enable = !!ctrl->val; + + /* Enable/Disable Color bar test patterns */ + ret = sdp_clrset(state, ADV748X_SDP_DEF, ADV748X_SDP_DEF_VAL_EN, + enable); + if (ret) + break; + ret = sdp_clrset(state, ADV748X_SDP_FRP, ADV748X_SDP_FRP_MASK, + enable ? ctrl->val - 1 : 0); + break; + default: + return -EINVAL; + } + + return ret; +} + +static const struct v4l2_ctrl_ops adv748x_afe_ctrl_ops = { + .s_ctrl = adv748x_afe_s_ctrl, +}; + +static int adv748x_afe_init_controls(struct adv748x_afe *afe) +{ + struct adv748x_state *state = adv748x_afe_to_state(afe); + + v4l2_ctrl_handler_init(&afe->ctrl_hdl, 5); + + /* Use our mutex for the controls */ + afe->ctrl_hdl.lock = &state->mutex; + + v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops, + V4L2_CID_BRIGHTNESS, ADV748X_SDP_BRI_MIN, + ADV748X_SDP_BRI_MAX, 1, ADV748X_SDP_BRI_DEF); + v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops, + V4L2_CID_CONTRAST, ADV748X_SDP_CON_MIN, + ADV748X_SDP_CON_MAX, 1, ADV748X_SDP_CON_DEF); + v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops, + V4L2_CID_SATURATION, ADV748X_SDP_SAT_MIN, + ADV748X_SDP_SAT_MAX, 1, ADV748X_SDP_SAT_DEF); + v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops, + V4L2_CID_HUE, ADV748X_SDP_HUE_MIN, + ADV748X_SDP_HUE_MAX, 1, ADV748X_SDP_HUE_DEF); + + v4l2_ctrl_new_std_menu_items(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(afe_ctrl_frp_menu) - 1, + 0, 0, afe_ctrl_frp_menu); + + afe->sd.ctrl_handler = &afe->ctrl_hdl; + if (afe->ctrl_hdl.error) { + v4l2_ctrl_handler_free(&afe->ctrl_hdl); + return afe->ctrl_hdl.error; + } + + return v4l2_ctrl_handler_setup(&afe->ctrl_hdl); +} + +int adv748x_afe_init(struct adv748x_afe *afe) +{ + struct adv748x_state *state = adv748x_afe_to_state(afe); + int ret; + unsigned int i; + + afe->input = 0; + afe->streaming = false; + afe->curr_norm = V4L2_STD_NTSC_M; + + adv748x_subdev_init(&afe->sd, state, &adv748x_afe_ops, + MEDIA_ENT_F_ATV_DECODER, "afe"); + + /* Identify the first connector found as a default input if set */ + for (i = ADV748X_PORT_AIN0; i <= ADV748X_PORT_AIN7; i++) { + /* Inputs and ports are 1-indexed to match the data sheet */ + if (state->endpoints[i]) { + afe->input = i; + break; + } + } + + adv748x_afe_s_input(afe, afe->input); + + adv_dbg(state, "AFE Default input set to %d\n", afe->input); + + /* Entity pads and sinks are 0-indexed to match the pads */ + for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++) + afe->pads[i].flags = MEDIA_PAD_FL_SINK; + + afe->pads[ADV748X_AFE_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&afe->sd.entity, ADV748X_AFE_NR_PADS, + afe->pads); + if (ret) + return ret; + + ret = adv748x_afe_init_controls(afe); + if (ret) + goto error; + + return 0; + +error: + media_entity_cleanup(&afe->sd.entity); + + return ret; +} + +void adv748x_afe_cleanup(struct adv748x_afe *afe) +{ + v4l2_device_unregister_subdev(&afe->sd); + media_entity_cleanup(&afe->sd.entity); + v4l2_ctrl_handler_free(&afe->ctrl_hdl); +} diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c new file mode 100644 index 000000000000..5ee14f2c2747 --- /dev/null +++ b/drivers/media/i2c/adv748x/adv748x-core.c @@ -0,0 +1,833 @@ +/* + * Driver for Analog Devices ADV748X HDMI receiver with AFE + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Authors: + * Koji Matsuoka + * Niklas Söderlund + * Kieran Bingham + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "adv748x.h" + +/* ----------------------------------------------------------------------------- + * Register manipulation + */ + +static const struct regmap_config adv748x_regmap_cnf[] = { + { + .name = "io", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "dpll", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "cp", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "hdmi", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "edid", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "repeater", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "infoframe", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "cec", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "sdp", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + + { + .name = "txb", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, + { + .name = "txa", + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_NONE, + }, +}; + +static int adv748x_configure_regmap(struct adv748x_state *state, int region) +{ + int err; + + if (!state->i2c_clients[region]) + return -ENODEV; + + state->regmap[region] = + devm_regmap_init_i2c(state->i2c_clients[region], + &adv748x_regmap_cnf[region]); + + if (IS_ERR(state->regmap[region])) { + err = PTR_ERR(state->regmap[region]); + adv_err(state, + "Error initializing regmap %d with error %d\n", + region, err); + return -EINVAL; + } + + return 0; +} + +/* Default addresses for the I2C pages */ +static int adv748x_i2c_addresses[ADV748X_PAGE_MAX] = { + ADV748X_I2C_IO, + ADV748X_I2C_DPLL, + ADV748X_I2C_CP, + ADV748X_I2C_HDMI, + ADV748X_I2C_EDID, + ADV748X_I2C_REPEATER, + ADV748X_I2C_INFOFRAME, + ADV748X_I2C_CEC, + ADV748X_I2C_SDP, + ADV748X_I2C_TXB, + ADV748X_I2C_TXA, +}; + +static int adv748x_read_check(struct adv748x_state *state, + int client_page, u8 reg) +{ + struct i2c_client *client = state->i2c_clients[client_page]; + int err; + unsigned int val; + + err = regmap_read(state->regmap[client_page], reg, &val); + + if (err) { + adv_err(state, "error reading %02x, %02x\n", + client->addr, reg); + return err; + } + + return val; +} + +int adv748x_read(struct adv748x_state *state, u8 page, u8 reg) +{ + return adv748x_read_check(state, page, reg); +} + +int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value) +{ + return regmap_write(state->regmap[page], reg, value); +} + +/* adv748x_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX + * size to one or more registers. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int adv748x_write_block(struct adv748x_state *state, int client_page, + unsigned int init_reg, const void *val, + size_t val_len) +{ + struct regmap *regmap = state->regmap[client_page]; + + if (val_len > I2C_SMBUS_BLOCK_MAX) + val_len = I2C_SMBUS_BLOCK_MAX; + + return regmap_raw_write(regmap, init_reg, val, val_len); +} + +static struct i2c_client *adv748x_dummy_client(struct adv748x_state *state, + u8 addr, u8 io_reg) +{ + struct i2c_client *client = state->client; + + if (addr) + io_write(state, io_reg, addr << 1); + + return i2c_new_dummy(client->adapter, io_read(state, io_reg) >> 1); +} + +static void adv748x_unregister_clients(struct adv748x_state *state) +{ + unsigned int i; + + for (i = 1; i < ARRAY_SIZE(state->i2c_clients); ++i) { + if (state->i2c_clients[i]) + i2c_unregister_device(state->i2c_clients[i]); + } +} + +static int adv748x_initialise_clients(struct adv748x_state *state) +{ + int i; + int ret; + + for (i = ADV748X_PAGE_DPLL; i < ADV748X_PAGE_MAX; ++i) { + state->i2c_clients[i] = + adv748x_dummy_client(state, adv748x_i2c_addresses[i], + ADV748X_IO_SLAVE_ADDR_BASE + i); + if (state->i2c_clients[i] == NULL) { + adv_err(state, "failed to create i2c client %u\n", i); + return -ENOMEM; + } + + ret = adv748x_configure_regmap(state, i); + if (ret) + return ret; + } + + return 0; +} + +/** + * struct adv748x_reg_value - Register write instruction + * @page: Regmap page identifier + * @reg: I2C register + * @value: value to write to @page at @reg + */ +struct adv748x_reg_value { + u8 page; + u8 reg; + u8 value; +}; + +static int adv748x_write_regs(struct adv748x_state *state, + const struct adv748x_reg_value *regs) +{ + int ret; + + while (regs->page != ADV748X_PAGE_EOR) { + if (regs->page == ADV748X_PAGE_WAIT) { + msleep(regs->value); + } else { + ret = adv748x_write(state, regs->page, regs->reg, + regs->value); + if (ret < 0) { + adv_err(state, + "Error regs page: 0x%02x reg: 0x%02x\n", + regs->page, regs->reg); + return ret; + } + } + regs++; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * TXA and TXB + */ + +static const struct adv748x_reg_value adv748x_power_up_txa_4lane[] = { + + {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ + {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */ + + {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ + {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +static const struct adv748x_reg_value adv748x_power_down_txa_4lane[] = { + + {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x1e, 0x00}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ + {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + {ADV748X_PAGE_TXA, 0xc1, 0x3b}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +static const struct adv748x_reg_value adv748x_power_up_txb_1lane[] = { + + {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */ + {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */ + + {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ + {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +static const struct adv748x_reg_value adv748x_power_down_txb_1lane[] = { + + {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x1e, 0x00}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 4-lane MIPI */ + {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + {ADV748X_PAGE_TXB, 0xc1, 0x3b}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +int adv748x_txa_power(struct adv748x_state *state, bool on) +{ + int val; + + val = txa_read(state, ADV748X_CSI_FS_AS_LS); + if (val < 0) + return val; + + /* + * This test against BIT(6) is not documented by the datasheet, but was + * specified in the downstream driver. + * Track with a WARN_ONCE to determine if it is ever set by HW. + */ + WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN), + "Enabling with unknown bit set"); + + if (on) + return adv748x_write_regs(state, adv748x_power_up_txa_4lane); + + return adv748x_write_regs(state, adv748x_power_down_txa_4lane); +} + +int adv748x_txb_power(struct adv748x_state *state, bool on) +{ + int val; + + val = txb_read(state, ADV748X_CSI_FS_AS_LS); + if (val < 0) + return val; + + /* + * This test against BIT(6) is not documented by the datasheet, but was + * specified in the downstream driver. + * Track with a WARN_ONCE to determine if it is ever set by HW. + */ + WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN), + "Enabling with unknown bit set"); + + if (on) + return adv748x_write_regs(state, adv748x_power_up_txb_1lane); + + return adv748x_write_regs(state, adv748x_power_down_txb_1lane); +} + +/* ----------------------------------------------------------------------------- + * Media Operations + */ + +static const struct media_entity_operations adv748x_media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +/* ----------------------------------------------------------------------------- + * HW setup + */ + +static const struct adv748x_reg_value adv748x_sw_reset[] = { + + {ADV748X_PAGE_IO, 0xff, 0xff}, /* SW reset */ + {ADV748X_PAGE_WAIT, 0x00, 0x05},/* delay 5 */ + {ADV748X_PAGE_IO, 0x01, 0x76}, /* ADI Required Write */ + {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */ + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +static const struct adv748x_reg_value adv748x_set_slave_address[] = { + {ADV748X_PAGE_IO, 0xf3, ADV748X_I2C_DPLL << 1}, + {ADV748X_PAGE_IO, 0xf4, ADV748X_I2C_CP << 1}, + {ADV748X_PAGE_IO, 0xf5, ADV748X_I2C_HDMI << 1}, + {ADV748X_PAGE_IO, 0xf6, ADV748X_I2C_EDID << 1}, + {ADV748X_PAGE_IO, 0xf7, ADV748X_I2C_REPEATER << 1}, + {ADV748X_PAGE_IO, 0xf8, ADV748X_I2C_INFOFRAME << 1}, + {ADV748X_PAGE_IO, 0xfa, ADV748X_I2C_CEC << 1}, + {ADV748X_PAGE_IO, 0xfb, ADV748X_I2C_SDP << 1}, + {ADV748X_PAGE_IO, 0xfc, ADV748X_I2C_TXB << 1}, + {ADV748X_PAGE_IO, 0xfd, ADV748X_I2C_TXA << 1}, + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +/* Supported Formats For Script Below */ +/* - 01-29 HDMI to MIPI TxA CSI 4-Lane - RGB888: */ +static const struct adv748x_reg_value adv748x_init_txa_4lane[] = { + /* Disable chip powerdown & Enable HDMI Rx block */ + {ADV748X_PAGE_IO, 0x00, 0x40}, + + {ADV748X_PAGE_REPEATER, 0x40, 0x83}, /* Enable HDCP 1.1 */ + + {ADV748X_PAGE_HDMI, 0x00, 0x08},/* Foreground Channel = A */ + {ADV748X_PAGE_HDMI, 0x98, 0xff},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x99, 0xa3},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x9a, 0x00},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x9b, 0x0a},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x9d, 0x40},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0xcb, 0x09},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x3d, 0x10},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x3e, 0x7b},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x3f, 0x5e},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x4e, 0xfe},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x4f, 0x18},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x57, 0xa3},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x58, 0x04},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0x85, 0x10},/* ADI Required Write */ + + {ADV748X_PAGE_HDMI, 0x83, 0x00},/* Enable All Terminations */ + {ADV748X_PAGE_HDMI, 0xa3, 0x01},/* ADI Required Write */ + {ADV748X_PAGE_HDMI, 0xbe, 0x00},/* ADI Required Write */ + + {ADV748X_PAGE_HDMI, 0x6c, 0x01},/* HPA Manual Enable */ + {ADV748X_PAGE_HDMI, 0xf8, 0x01},/* HPA Asserted */ + {ADV748X_PAGE_HDMI, 0x0f, 0x00},/* Audio Mute Speed Set to Fastest */ + /* (Smallest Step Size) */ + + {ADV748X_PAGE_IO, 0x04, 0x02}, /* RGB Out of CP */ + {ADV748X_PAGE_IO, 0x12, 0xf0}, /* CSC Depends on ip Packets, SDR 444 */ + {ADV748X_PAGE_IO, 0x17, 0x80}, /* Luma & Chroma can reach 254d */ + {ADV748X_PAGE_IO, 0x03, 0x86}, /* CP-Insert_AV_Code */ + + {ADV748X_PAGE_CP, 0x7c, 0x00}, /* ADI Required Write */ + + {ADV748X_PAGE_IO, 0x0c, 0xe0}, /* Enable LLC_DLL & Double LLC Timing */ + {ADV748X_PAGE_IO, 0x0e, 0xdd}, /* LLC/PIX/SPI PINS TRISTATED AUD */ + /* Outputs Enabled */ + {ADV748X_PAGE_IO, 0x10, 0xa0}, /* Enable 4-lane CSI Tx & Pixel Port */ + + {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ + {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */ + {ADV748X_PAGE_TXA, 0xdb, 0x10}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0xd6, 0x07}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0xc4, 0x0a}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x71, 0x33}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x72, 0x11}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */ + + {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */ + {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ + {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +/* 02-01 Analog CVBS to MIPI TX-B CSI 1-Lane - */ +/* Autodetect CVBS Single Ended In Ain 1 - MIPI Out */ +static const struct adv748x_reg_value adv748x_init_txb_1lane[] = { + + {ADV748X_PAGE_IO, 0x00, 0x30}, /* Disable chip powerdown Rx */ + {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */ + + {ADV748X_PAGE_IO, 0x0e, 0xff}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + + {ADV748X_PAGE_SDP, 0x0f, 0x00}, /* Exit Power Down Mode */ + {ADV748X_PAGE_SDP, 0x52, 0xcd}, /* ADI Required Write */ + + {ADV748X_PAGE_SDP, 0x0e, 0x80}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0x9c, 0x00}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0x9c, 0xff}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0x0e, 0x00}, /* ADI Required Write */ + + /* ADI recommended writes for improved video quality */ + {ADV748X_PAGE_SDP, 0x80, 0x51}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0x81, 0x51}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0x82, 0x68}, /* ADI Required Write */ + + {ADV748X_PAGE_SDP, 0x03, 0x42}, /* Tri-S Output , PwrDwn 656 pads */ + {ADV748X_PAGE_SDP, 0x04, 0xb5}, /* ITU-R BT.656-4 compatible */ + {ADV748X_PAGE_SDP, 0x13, 0x00}, /* ADI Required Write */ + + {ADV748X_PAGE_SDP, 0x17, 0x41}, /* Select SH1 */ + {ADV748X_PAGE_SDP, 0x31, 0x12}, /* ADI Required Write */ + {ADV748X_PAGE_SDP, 0xe6, 0x4f}, /* V bit end pos manually in NTSC */ + + /* Enable 1-Lane MIPI Tx, */ + /* enable pixel output and route SD through Pixel port */ + {ADV748X_PAGE_IO, 0x10, 0x70}, + + {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */ + {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */ + {ADV748X_PAGE_TXB, 0xd2, 0x40}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0xc4, 0x0a}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x71, 0x33}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x72, 0x11}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */ + {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */ + {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ + + {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ + {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */ + {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ + {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */ + + {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ +}; + +static int adv748x_reset(struct adv748x_state *state) +{ + int ret; + + ret = adv748x_write_regs(state, adv748x_sw_reset); + if (ret < 0) + return ret; + + ret = adv748x_write_regs(state, adv748x_set_slave_address); + if (ret < 0) + return ret; + + /* Init and power down TXA */ + ret = adv748x_write_regs(state, adv748x_init_txa_4lane); + if (ret) + return ret; + + adv748x_txa_power(state, 0); + + /* Init and power down TXB */ + ret = adv748x_write_regs(state, adv748x_init_txb_1lane); + if (ret) + return ret; + + adv748x_txb_power(state, 0); + + /* Disable chip powerdown & Enable HDMI Rx block */ + io_write(state, ADV748X_IO_PD, ADV748X_IO_PD_RX_EN); + + /* Enable 4-lane CSI Tx & Pixel Port */ + io_write(state, ADV748X_IO_10, ADV748X_IO_10_CSI4_EN | + ADV748X_IO_10_CSI1_EN | + ADV748X_IO_10_PIX_OUT_EN); + + /* Use vid_std and v_freq as freerun resolution for CP */ + cp_clrset(state, ADV748X_CP_CLMP_POS, ADV748X_CP_CLMP_POS_DIS_AUTO, + ADV748X_CP_CLMP_POS_DIS_AUTO); + + return 0; +} + +static int adv748x_identify_chip(struct adv748x_state *state) +{ + int msb, lsb; + + lsb = io_read(state, ADV748X_IO_CHIP_REV_ID_1); + msb = io_read(state, ADV748X_IO_CHIP_REV_ID_2); + + if (lsb < 0 || msb < 0) { + adv_err(state, "Failed to read chip revision\n"); + return -EIO; + } + + adv_info(state, "chip found @ 0x%02x revision %02x%02x\n", + state->client->addr << 1, lsb, msb); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * i2c driver + */ + +void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state, + const struct v4l2_subdev_ops *ops, u32 function, + const char *ident) +{ + v4l2_subdev_init(sd, ops); + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + + /* the owner is the same as the i2c_client's driver owner */ + sd->owner = state->dev->driver->owner; + sd->dev = state->dev; + + v4l2_set_subdevdata(sd, state); + + /* initialize name */ + snprintf(sd->name, sizeof(sd->name), "%s %d-%04x %s", + state->dev->driver->name, + i2c_adapter_id(state->client->adapter), + state->client->addr, ident); + + sd->entity.function = function; + sd->entity.ops = &adv748x_media_ops; +} + +static int adv748x_parse_dt(struct adv748x_state *state) +{ + struct device_node *ep_np = NULL; + struct of_endpoint ep; + bool found = false; + + for_each_endpoint_of_node(state->dev->of_node, ep_np) { + of_graph_parse_endpoint(ep_np, &ep); + adv_info(state, "Endpoint %s on port %d", + of_node_full_name(ep.local_node), + ep.port); + + if (ep.port >= ADV748X_PORT_MAX) { + adv_err(state, "Invalid endpoint %s on port %d", + of_node_full_name(ep.local_node), + ep.port); + + continue; + } + + if (state->endpoints[ep.port]) { + adv_err(state, + "Multiple port endpoints are not supported"); + continue; + } + + of_node_get(ep_np); + state->endpoints[ep.port] = ep_np; + + found = true; + } + + return found ? 0 : -ENODEV; +} + +static void adv748x_dt_cleanup(struct adv748x_state *state) +{ + unsigned int i; + + for (i = 0; i < ADV748X_PORT_MAX; i++) + of_node_put(state->endpoints[i]); +} + +static int adv748x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adv748x_state *state; + int ret; + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + state = kzalloc(sizeof(struct adv748x_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + mutex_init(&state->mutex); + + state->dev = &client->dev; + state->client = client; + state->i2c_clients[ADV748X_PAGE_IO] = client; + i2c_set_clientdata(client, state); + + /* Discover and process ports declared by the Device tree endpoints */ + ret = adv748x_parse_dt(state); + if (ret) { + adv_err(state, "Failed to parse device tree"); + goto err_free_mutex; + } + + /* Configure IO Regmap region */ + ret = adv748x_configure_regmap(state, ADV748X_PAGE_IO); + if (ret) { + adv_err(state, "Error configuring IO regmap region"); + goto err_cleanup_dt; + } + + ret = adv748x_identify_chip(state); + if (ret) { + adv_err(state, "Failed to identify chip"); + goto err_cleanup_clients; + } + + /* Configure remaining pages as I2C clients with regmap access */ + ret = adv748x_initialise_clients(state); + if (ret) { + adv_err(state, "Failed to setup client regmap pages"); + goto err_cleanup_clients; + } + + /* SW reset ADV748X to its default values */ + ret = adv748x_reset(state); + if (ret) { + adv_err(state, "Failed to reset hardware"); + goto err_cleanup_clients; + } + + /* Initialise HDMI */ + ret = adv748x_hdmi_init(&state->hdmi); + if (ret) { + adv_err(state, "Failed to probe HDMI"); + goto err_cleanup_clients; + } + + /* Initialise AFE */ + ret = adv748x_afe_init(&state->afe); + if (ret) { + adv_err(state, "Failed to probe AFE"); + goto err_cleanup_hdmi; + } + + /* Initialise TXA */ + ret = adv748x_csi2_init(state, &state->txa); + if (ret) { + adv_err(state, "Failed to probe TXA"); + goto err_cleanup_afe; + } + + /* Initialise TXB */ + ret = adv748x_csi2_init(state, &state->txb); + if (ret) { + adv_err(state, "Failed to probe TXB"); + goto err_cleanup_txa; + } + + return 0; + +err_cleanup_txa: + adv748x_csi2_cleanup(&state->txa); +err_cleanup_afe: + adv748x_afe_cleanup(&state->afe); +err_cleanup_hdmi: + adv748x_hdmi_cleanup(&state->hdmi); +err_cleanup_clients: + adv748x_unregister_clients(state); +err_cleanup_dt: + adv748x_dt_cleanup(state); +err_free_mutex: + mutex_destroy(&state->mutex); + kfree(state); + + return ret; +} + +static int adv748x_remove(struct i2c_client *client) +{ + struct adv748x_state *state = i2c_get_clientdata(client); + + adv748x_afe_cleanup(&state->afe); + adv748x_hdmi_cleanup(&state->hdmi); + + adv748x_csi2_cleanup(&state->txa); + adv748x_csi2_cleanup(&state->txb); + + adv748x_unregister_clients(state); + adv748x_dt_cleanup(state); + mutex_destroy(&state->mutex); + + kfree(state); + + return 0; +} + +static const struct i2c_device_id adv748x_id[] = { + { "adv7481", 0 }, + { "adv7482", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, adv748x_id); + +static const struct of_device_id adv748x_of_table[] = { + { .compatible = "adi,adv7481", }, + { .compatible = "adi,adv7482", }, + { } +}; +MODULE_DEVICE_TABLE(of, adv748x_of_table); + +static struct i2c_driver adv748x_driver = { + .driver = { + .name = "adv748x", + .of_match_table = adv748x_of_table, + }, + .probe = adv748x_probe, + .remove = adv748x_remove, + .id_table = adv748x_id, +}; + +module_i2c_driver(adv748x_driver); + +MODULE_AUTHOR("Kieran Bingham "); +MODULE_DESCRIPTION("ADV748X video decoder"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c new file mode 100644 index 000000000000..979825d4a419 --- /dev/null +++ b/drivers/media/i2c/adv748x/adv748x-csi2.c @@ -0,0 +1,326 @@ +/* + * Driver for Analog Devices ADV748X CSI-2 Transmitter + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include +#include + +#include "adv748x.h" + +static bool is_txa(struct adv748x_csi2 *tx) +{ + return tx == &tx->state->txa; +} + +static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx, + unsigned int vc) +{ + return tx_write(tx, ADV748X_CSI_VC_REF, vc << ADV748X_CSI_VC_REF_SHIFT); +} + +/** + * adv748x_csi2_register_link : Register and link internal entities + * + * @tx: CSI2 private entity + * @v4l2_dev: Video registration device + * @src: Source subdevice to establish link + * @src_pad: Pad number of source to link to this @tx + * + * Ensure that the subdevice is registered against the v4l2_device, and link the + * source pad to the sink pad of the CSI2 bus entity. + */ +static int adv748x_csi2_register_link(struct adv748x_csi2 *tx, + struct v4l2_device *v4l2_dev, + struct v4l2_subdev *src, + unsigned int src_pad) +{ + int enabled = MEDIA_LNK_FL_ENABLED; + int ret; + + /* + * Dynamic linking of the AFE is not supported. + * Register the links as immutable. + */ + enabled |= MEDIA_LNK_FL_IMMUTABLE; + + if (!src->v4l2_dev) { + ret = v4l2_device_register_subdev(v4l2_dev, src); + if (ret) + return ret; + } + + return media_create_pad_link(&src->entity, src_pad, + &tx->sd.entity, ADV748X_CSI2_SINK, + enabled); +} + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_internal_ops + * + * We use the internal registered operation to be able to ensure that our + * incremental subdevices (not connected in the forward path) can be registered + * against the resulting video path and media device. + */ + +static int adv748x_csi2_registered(struct v4l2_subdev *sd) +{ + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + struct adv748x_state *state = tx->state; + + adv_dbg(state, "Registered %s (%s)", is_txa(tx) ? "TXA":"TXB", + sd->name); + + /* + * The adv748x hardware allows the AFE to route through the TXA, however + * this is not currently supported in this driver. + * + * Link HDMI->TXA, and AFE->TXB directly. + */ + if (is_txa(tx)) { + return adv748x_csi2_register_link(tx, sd->v4l2_dev, + &state->hdmi.sd, + ADV748X_HDMI_SOURCE); + } else { + return adv748x_csi2_register_link(tx, sd->v4l2_dev, + &state->afe.sd, + ADV748X_AFE_SOURCE); + } +} + +static const struct v4l2_subdev_internal_ops adv748x_csi2_internal_ops = { + .registered = adv748x_csi2_registered, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_video_ops + */ + +static int adv748x_csi2_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + struct v4l2_subdev *src; + + src = adv748x_get_remote_sd(&tx->pads[ADV748X_CSI2_SINK]); + if (!src) + return -EPIPE; + + return v4l2_subdev_call(src, video, s_stream, enable); +} + +static const struct v4l2_subdev_video_ops adv748x_csi2_video_ops = { + .s_stream = adv748x_csi2_s_stream, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_pad_ops + * + * The CSI2 bus pads are ignorant to the data sizes or formats. + * But we must support setting the pad formats for format propagation. + */ + +static struct v4l2_mbus_framefmt * +adv748x_csi2_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, u32 which) +{ + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(sd, cfg, pad); + + return &tx->format; +} + +static int adv748x_csi2_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + struct adv748x_state *state = tx->state; + struct v4l2_mbus_framefmt *mbusformat; + + mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad, + sdformat->which); + if (!mbusformat) + return -EINVAL; + + mutex_lock(&state->mutex); + + sdformat->format = *mbusformat; + + mutex_unlock(&state->mutex); + + return 0; +} + +static int adv748x_csi2_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + struct adv748x_state *state = tx->state; + struct v4l2_mbus_framefmt *mbusformat; + int ret = 0; + + mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad, + sdformat->which); + if (!mbusformat) + return -EINVAL; + + mutex_lock(&state->mutex); + + if (sdformat->pad == ADV748X_CSI2_SOURCE) { + const struct v4l2_mbus_framefmt *sink_fmt; + + sink_fmt = adv748x_csi2_get_pad_format(sd, cfg, + ADV748X_CSI2_SINK, + sdformat->which); + + if (!sink_fmt) { + ret = -EINVAL; + goto unlock; + } + + sdformat->format = *sink_fmt; + } + + *mbusformat = sdformat->format; + +unlock: + mutex_unlock(&state->mutex); + + return ret; +} + +static const struct v4l2_subdev_pad_ops adv748x_csi2_pad_ops = { + .get_fmt = adv748x_csi2_get_format, + .set_fmt = adv748x_csi2_set_format, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_ops + */ + +static const struct v4l2_subdev_ops adv748x_csi2_ops = { + .video = &adv748x_csi2_video_ops, + .pad = &adv748x_csi2_pad_ops, +}; + +/* ----------------------------------------------------------------------------- + * Subdev module and controls + */ + +int adv748x_csi2_set_pixelrate(struct v4l2_subdev *sd, s64 rate) +{ + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_find(sd->ctrl_handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) + return -EINVAL; + + return v4l2_ctrl_s_ctrl_int64(ctrl, rate); +} + +static int adv748x_csi2_s_ctrl(struct v4l2_ctrl *ctrl) +{ + switch (ctrl->id) { + case V4L2_CID_PIXEL_RATE: + return 0; + default: + return -EINVAL; + } +} + +static const struct v4l2_ctrl_ops adv748x_csi2_ctrl_ops = { + .s_ctrl = adv748x_csi2_s_ctrl, +}; + +static int adv748x_csi2_init_controls(struct adv748x_csi2 *tx) +{ + + v4l2_ctrl_handler_init(&tx->ctrl_hdl, 1); + + v4l2_ctrl_new_std(&tx->ctrl_hdl, &adv748x_csi2_ctrl_ops, + V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1); + + tx->sd.ctrl_handler = &tx->ctrl_hdl; + if (tx->ctrl_hdl.error) { + v4l2_ctrl_handler_free(&tx->ctrl_hdl); + return tx->ctrl_hdl.error; + } + + return v4l2_ctrl_handler_setup(&tx->ctrl_hdl); +} + +int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx) +{ + struct device_node *ep; + int ret; + + /* We can not use container_of to get back to the state with two TXs */ + tx->state = state; + tx->page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; + + ep = state->endpoints[is_txa(tx) ? ADV748X_PORT_TXA : ADV748X_PORT_TXB]; + if (!ep) { + adv_err(state, "No endpoint found for %s\n", + is_txa(tx) ? "txa" : "txb"); + return -ENODEV; + } + + /* Initialise the virtual channel */ + adv748x_csi2_set_virtual_channel(tx, 0); + + adv748x_subdev_init(&tx->sd, state, &adv748x_csi2_ops, + MEDIA_ENT_F_UNKNOWN, + is_txa(tx) ? "txa" : "txb"); + + /* Ensure that matching is based upon the endpoint fwnodes */ + tx->sd.fwnode = of_fwnode_handle(ep); + + /* Register internal ops for incremental subdev registration */ + tx->sd.internal_ops = &adv748x_csi2_internal_ops; + + tx->pads[ADV748X_CSI2_SINK].flags = MEDIA_PAD_FL_SINK; + tx->pads[ADV748X_CSI2_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&tx->sd.entity, ADV748X_CSI2_NR_PADS, + tx->pads); + if (ret) + return ret; + + ret = adv748x_csi2_init_controls(tx); + if (ret) + goto err_free_media; + + ret = v4l2_async_register_subdev(&tx->sd); + if (ret) + goto err_free_ctrl; + + return 0; + +err_free_ctrl: + v4l2_ctrl_handler_free(&tx->ctrl_hdl); +err_free_media: + media_entity_cleanup(&tx->sd.entity); + + return ret; +} + +void adv748x_csi2_cleanup(struct adv748x_csi2 *tx) +{ + v4l2_async_unregister_subdev(&tx->sd); + media_entity_cleanup(&tx->sd.entity); + v4l2_ctrl_handler_free(&tx->ctrl_hdl); +} diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c new file mode 100644 index 000000000000..4da4253553fc --- /dev/null +++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c @@ -0,0 +1,768 @@ +/* + * Driver for Analog Devices ADV748X HDMI receiver and Component Processor (CP) + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include +#include +#include + +#include + +#include "adv748x.h" + +/* ----------------------------------------------------------------------------- + * HDMI and CP + */ + +#define ADV748X_HDMI_MIN_WIDTH 640 +#define ADV748X_HDMI_MAX_WIDTH 1920 +#define ADV748X_HDMI_MIN_HEIGHT 480 +#define ADV748X_HDMI_MAX_HEIGHT 1200 + +/* V4L2_DV_BT_CEA_720X480I59_94 - 0.5 MHz */ +#define ADV748X_HDMI_MIN_PIXELCLOCK 13000000 +/* V4L2_DV_BT_DMT_1600X1200P60 */ +#define ADV748X_HDMI_MAX_PIXELCLOCK 162000000 + +static const struct v4l2_dv_timings_cap adv748x_hdmi_timings_cap = { + .type = V4L2_DV_BT_656_1120, + /* keep this initialization for compatibility with GCC < 4.4.6 */ + .reserved = { 0 }, + + V4L2_INIT_BT_TIMINGS(ADV748X_HDMI_MIN_WIDTH, ADV748X_HDMI_MAX_WIDTH, + ADV748X_HDMI_MIN_HEIGHT, ADV748X_HDMI_MAX_HEIGHT, + ADV748X_HDMI_MIN_PIXELCLOCK, + ADV748X_HDMI_MAX_PIXELCLOCK, + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT, + V4L2_DV_BT_CAP_PROGRESSIVE) +}; + +struct adv748x_hdmi_video_standards { + struct v4l2_dv_timings timings; + u8 vid_std; + u8 v_freq; +}; + +static const struct adv748x_hdmi_video_standards +adv748x_hdmi_video_standards[] = { + { V4L2_DV_BT_CEA_720X480P59_94, 0x4a, 0x00 }, + { V4L2_DV_BT_CEA_720X576P50, 0x4b, 0x00 }, + { V4L2_DV_BT_CEA_1280X720P60, 0x53, 0x00 }, + { V4L2_DV_BT_CEA_1280X720P50, 0x53, 0x01 }, + { V4L2_DV_BT_CEA_1280X720P30, 0x53, 0x02 }, + { V4L2_DV_BT_CEA_1280X720P25, 0x53, 0x03 }, + { V4L2_DV_BT_CEA_1280X720P24, 0x53, 0x04 }, + { V4L2_DV_BT_CEA_1920X1080P60, 0x5e, 0x00 }, + { V4L2_DV_BT_CEA_1920X1080P50, 0x5e, 0x01 }, + { V4L2_DV_BT_CEA_1920X1080P30, 0x5e, 0x02 }, + { V4L2_DV_BT_CEA_1920X1080P25, 0x5e, 0x03 }, + { V4L2_DV_BT_CEA_1920X1080P24, 0x5e, 0x04 }, + /* SVGA */ + { V4L2_DV_BT_DMT_800X600P56, 0x80, 0x00 }, + { V4L2_DV_BT_DMT_800X600P60, 0x81, 0x00 }, + { V4L2_DV_BT_DMT_800X600P72, 0x82, 0x00 }, + { V4L2_DV_BT_DMT_800X600P75, 0x83, 0x00 }, + { V4L2_DV_BT_DMT_800X600P85, 0x84, 0x00 }, + /* SXGA */ + { V4L2_DV_BT_DMT_1280X1024P60, 0x85, 0x00 }, + { V4L2_DV_BT_DMT_1280X1024P75, 0x86, 0x00 }, + /* VGA */ + { V4L2_DV_BT_DMT_640X480P60, 0x88, 0x00 }, + { V4L2_DV_BT_DMT_640X480P72, 0x89, 0x00 }, + { V4L2_DV_BT_DMT_640X480P75, 0x8a, 0x00 }, + { V4L2_DV_BT_DMT_640X480P85, 0x8b, 0x00 }, + /* XGA */ + { V4L2_DV_BT_DMT_1024X768P60, 0x8c, 0x00 }, + { V4L2_DV_BT_DMT_1024X768P70, 0x8d, 0x00 }, + { V4L2_DV_BT_DMT_1024X768P75, 0x8e, 0x00 }, + { V4L2_DV_BT_DMT_1024X768P85, 0x8f, 0x00 }, + /* UXGA */ + { V4L2_DV_BT_DMT_1600X1200P60, 0x96, 0x00 }, +}; + +static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi, + struct v4l2_mbus_framefmt *fmt) +{ + memset(fmt, 0, sizeof(*fmt)); + + fmt->code = MEDIA_BUS_FMT_RGB888_1X24; + fmt->field = hdmi->timings.bt.interlaced ? + V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; + + /* TODO: The colorspace depends on the AVI InfoFrame contents */ + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + fmt->width = hdmi->timings.bt.width; + fmt->height = hdmi->timings.bt.height; +} + +static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings) +{ + v4l2_find_dv_timings_cap(timings, &adv748x_hdmi_timings_cap, + 250000, NULL, NULL); +} + +static bool adv748x_hdmi_has_signal(struct adv748x_state *state) +{ + int val; + + /* Check that VERT_FILTER and DE_REGEN is locked */ + val = hdmi_read(state, ADV748X_HDMI_LW1); + return (val & ADV748X_HDMI_LW1_VERT_FILTER) && + (val & ADV748X_HDMI_LW1_DE_REGEN); +} + +static int adv748x_hdmi_read_pixelclock(struct adv748x_state *state) +{ + int a, b; + + a = hdmi_read(state, ADV748X_HDMI_TMDS_1); + b = hdmi_read(state, ADV748X_HDMI_TMDS_2); + if (a < 0 || b < 0) + return -ENODATA; + + /* + * The high 9 bits store TMDS frequency measurement in MHz + * The low 7 bits of TMDS_2 store the 7-bit TMDS fractional frequency + * measurement in 1/128 MHz + */ + return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128; +} + +/* + * adv748x_hdmi_set_de_timings: Adjust horizontal picture offset through DE + * + * HDMI CP uses a Data Enable synchronisation timing reference + * + * Vary the leading and trailing edge position of the DE signal output by the CP + * core. Values are stored as signed-twos-complement in one-pixel-clock units + * + * The start and end are shifted equally by the 10-bit shift value. + */ +static void adv748x_hdmi_set_de_timings(struct adv748x_state *state, int shift) +{ + u8 high, low; + + /* POS_HIGH stores bits 8 and 9 of both the start and end */ + high = ADV748X_CP_DE_POS_HIGH_SET; + high |= (shift & 0x300) >> 8; + low = shift & 0xff; + + /* The sequence of the writes is important and must be followed */ + cp_write(state, ADV748X_CP_DE_POS_HIGH, high); + cp_write(state, ADV748X_CP_DE_POS_END_LOW, low); + + high |= (shift & 0x300) >> 6; + + cp_write(state, ADV748X_CP_DE_POS_HIGH, high); + cp_write(state, ADV748X_CP_DE_POS_START_LOW, low); +} + +static int adv748x_hdmi_set_video_timings(struct adv748x_state *state, + const struct v4l2_dv_timings *timings) +{ + const struct adv748x_hdmi_video_standards *stds = + adv748x_hdmi_video_standards; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(adv748x_hdmi_video_standards); i++) { + if (!v4l2_match_dv_timings(timings, &stds[i].timings, 250000, + false)) + continue; + } + + if (i >= ARRAY_SIZE(adv748x_hdmi_video_standards)) + return -EINVAL; + + /* + * When setting cp_vid_std to either 720p, 1080i, or 1080p, the video + * will get shifted horizontally to the left in active video mode. + * The de_h_start and de_h_end controls are used to centre the picture + * correctly + */ + switch (stds[i].vid_std) { + case 0x53: /* 720p */ + adv748x_hdmi_set_de_timings(state, -40); + break; + case 0x54: /* 1080i */ + case 0x5e: /* 1080p */ + adv748x_hdmi_set_de_timings(state, -44); + break; + default: + adv748x_hdmi_set_de_timings(state, 0); + break; + } + + io_write(state, ADV748X_IO_VID_STD, stds[i].vid_std); + io_clrset(state, ADV748X_IO_DATAPATH, ADV748X_IO_DATAPATH_VFREQ_M, + stds[i].v_freq << ADV748X_IO_DATAPATH_VFREQ_SHIFT); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_video_ops + */ + +static int adv748x_hdmi_s_dv_timings(struct v4l2_subdev *sd, + struct v4l2_dv_timings *timings) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + int ret; + + if (!timings) + return -EINVAL; + + if (v4l2_match_dv_timings(&hdmi->timings, timings, 0, false)) + return 0; + + if (!v4l2_valid_dv_timings(timings, &adv748x_hdmi_timings_cap, + NULL, NULL)) + return -ERANGE; + + adv748x_fill_optional_dv_timings(timings); + + mutex_lock(&state->mutex); + + ret = adv748x_hdmi_set_video_timings(state, timings); + if (ret) + goto error; + + hdmi->timings = *timings; + + cp_clrset(state, ADV748X_CP_VID_ADJ_2, ADV748X_CP_VID_ADJ_2_INTERLACED, + timings->bt.interlaced ? + ADV748X_CP_VID_ADJ_2_INTERLACED : 0); + + mutex_unlock(&state->mutex); + + return 0; + +error: + mutex_unlock(&state->mutex); + return ret; +} + +static int adv748x_hdmi_g_dv_timings(struct v4l2_subdev *sd, + struct v4l2_dv_timings *timings) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + + mutex_lock(&state->mutex); + + *timings = hdmi->timings; + + mutex_unlock(&state->mutex); + + return 0; +} + +static int adv748x_hdmi_query_dv_timings(struct v4l2_subdev *sd, + struct v4l2_dv_timings *timings) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + struct v4l2_bt_timings *bt = &timings->bt; + int pixelclock; + int polarity; + + if (!timings) + return -EINVAL; + + memset(timings, 0, sizeof(struct v4l2_dv_timings)); + + if (!adv748x_hdmi_has_signal(state)) + return -ENOLINK; + + pixelclock = adv748x_hdmi_read_pixelclock(state); + if (pixelclock < 0) + return -ENODATA; + + timings->type = V4L2_DV_BT_656_1120; + + bt->pixelclock = pixelclock; + bt->interlaced = hdmi_read(state, ADV748X_HDMI_F1H1) & + ADV748X_HDMI_F1H1_INTERLACED ? + V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE; + bt->width = hdmi_read16(state, ADV748X_HDMI_LW1, + ADV748X_HDMI_LW1_WIDTH_MASK); + bt->height = hdmi_read16(state, ADV748X_HDMI_F0H1, + ADV748X_HDMI_F0H1_HEIGHT_MASK); + bt->hfrontporch = hdmi_read16(state, ADV748X_HDMI_HFRONT_PORCH, + ADV748X_HDMI_HFRONT_PORCH_MASK); + bt->hsync = hdmi_read16(state, ADV748X_HDMI_HSYNC_WIDTH, + ADV748X_HDMI_HSYNC_WIDTH_MASK); + bt->hbackporch = hdmi_read16(state, ADV748X_HDMI_HBACK_PORCH, + ADV748X_HDMI_HBACK_PORCH_MASK); + bt->vfrontporch = hdmi_read16(state, ADV748X_HDMI_VFRONT_PORCH, + ADV748X_HDMI_VFRONT_PORCH_MASK) / 2; + bt->vsync = hdmi_read16(state, ADV748X_HDMI_VSYNC_WIDTH, + ADV748X_HDMI_VSYNC_WIDTH_MASK) / 2; + bt->vbackporch = hdmi_read16(state, ADV748X_HDMI_VBACK_PORCH, + ADV748X_HDMI_VBACK_PORCH_MASK) / 2; + + polarity = hdmi_read(state, 0x05); + bt->polarities = (polarity & BIT(4) ? V4L2_DV_VSYNC_POS_POL : 0) | + (polarity & BIT(5) ? V4L2_DV_HSYNC_POS_POL : 0); + + if (bt->interlaced == V4L2_DV_INTERLACED) { + bt->height += hdmi_read16(state, 0x0b, 0x1fff); + bt->il_vfrontporch = hdmi_read16(state, 0x2c, 0x3fff) / 2; + bt->il_vsync = hdmi_read16(state, 0x30, 0x3fff) / 2; + bt->il_vbackporch = hdmi_read16(state, 0x34, 0x3fff) / 2; + } + + adv748x_fill_optional_dv_timings(timings); + + /* + * No interrupt handling is implemented yet. + * There should be an IRQ when a cable is plugged and the new timings + * should be figured out and stored to state. + */ + hdmi->timings = *timings; + + return 0; +} + +static int adv748x_hdmi_g_input_status(struct v4l2_subdev *sd, u32 *status) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + + mutex_lock(&state->mutex); + + *status = adv748x_hdmi_has_signal(state) ? 0 : V4L2_IN_ST_NO_SIGNAL; + + mutex_unlock(&state->mutex); + + return 0; +} + +static int adv748x_hdmi_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + int ret; + + mutex_lock(&state->mutex); + + ret = adv748x_txa_power(state, enable); + if (ret) + goto done; + + if (adv748x_hdmi_has_signal(state)) + adv_dbg(state, "Detected HDMI signal\n"); + else + adv_dbg(state, "Couldn't detect HDMI video signal\n"); + +done: + mutex_unlock(&state->mutex); + return ret; +} + +static int adv748x_hdmi_g_pixelaspect(struct v4l2_subdev *sd, + struct v4l2_fract *aspect) +{ + aspect->numerator = 1; + aspect->denominator = 1; + + return 0; +} + +static const struct v4l2_subdev_video_ops adv748x_video_ops_hdmi = { + .s_dv_timings = adv748x_hdmi_s_dv_timings, + .g_dv_timings = adv748x_hdmi_g_dv_timings, + .query_dv_timings = adv748x_hdmi_query_dv_timings, + .g_input_status = adv748x_hdmi_g_input_status, + .s_stream = adv748x_hdmi_s_stream, + .g_pixelaspect = adv748x_hdmi_g_pixelaspect, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_pad_ops + */ + +static int adv748x_hdmi_propagate_pixelrate(struct adv748x_hdmi *hdmi) +{ + struct v4l2_subdev *tx; + struct v4l2_dv_timings timings; + struct v4l2_bt_timings *bt = &timings.bt; + unsigned int fps; + + tx = adv748x_get_remote_sd(&hdmi->pads[ADV748X_HDMI_SOURCE]); + if (!tx) + return -ENOLINK; + + adv748x_hdmi_query_dv_timings(&hdmi->sd, &timings); + + fps = DIV_ROUND_CLOSEST_ULL(bt->pixelclock, + V4L2_DV_BT_FRAME_WIDTH(bt) * + V4L2_DV_BT_FRAME_HEIGHT(bt)); + + return adv748x_csi2_set_pixelrate(tx, bt->width * bt->height * fps); +} + +static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index != 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_RGB888_1X24; + + return 0; +} + +static int adv748x_hdmi_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct v4l2_mbus_framefmt *mbusformat; + + if (sdformat->pad != ADV748X_HDMI_SOURCE) + return -EINVAL; + + if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) { + mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad); + sdformat->format = *mbusformat; + } else { + adv748x_hdmi_fill_format(hdmi, &sdformat->format); + adv748x_hdmi_propagate_pixelrate(hdmi); + } + + return 0; +} + +static int adv748x_hdmi_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct v4l2_mbus_framefmt *mbusformat; + + if (sdformat->pad != ADV748X_HDMI_SOURCE) + return -EINVAL; + + if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) + return adv748x_hdmi_get_format(sd, cfg, sdformat); + + mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad); + *mbusformat = sdformat->format; + + return 0; +} + +static int adv748x_hdmi_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + + memset(edid->reserved, 0, sizeof(edid->reserved)); + + if (!hdmi->edid.present) + return -ENODATA; + + if (edid->start_block == 0 && edid->blocks == 0) { + edid->blocks = hdmi->edid.blocks; + return 0; + } + + if (edid->start_block >= hdmi->edid.blocks) + return -EINVAL; + + if (edid->start_block + edid->blocks > hdmi->edid.blocks) + edid->blocks = hdmi->edid.blocks - edid->start_block; + + memcpy(edid->edid, hdmi->edid.edid + edid->start_block * 128, + edid->blocks * 128); + + return 0; +} + +static inline int adv748x_hdmi_edid_write_block(struct adv748x_hdmi *hdmi, + unsigned int total_len, const u8 *val) +{ + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + int err = 0; + int i = 0; + int len = 0; + + adv_dbg(state, "%s: write EDID block (%d byte)\n", + __func__, total_len); + + while (!err && i < total_len) { + len = (total_len - i) > I2C_SMBUS_BLOCK_MAX ? + I2C_SMBUS_BLOCK_MAX : + (total_len - i); + + err = adv748x_write_block(state, ADV748X_PAGE_EDID, + i, val + i, len); + i += len; + } + + return err; +} + +static int adv748x_hdmi_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) +{ + struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + int err; + + memset(edid->reserved, 0, sizeof(edid->reserved)); + + if (edid->start_block != 0) + return -EINVAL; + + if (edid->blocks == 0) { + hdmi->edid.blocks = 0; + hdmi->edid.present = 0; + + /* Fall back to a 16:9 aspect ratio */ + hdmi->aspect_ratio.numerator = 16; + hdmi->aspect_ratio.denominator = 9; + + /* Disable the EDID */ + repeater_write(state, ADV748X_REPEATER_EDID_SZ, + edid->blocks << ADV748X_REPEATER_EDID_SZ_SHIFT); + + repeater_write(state, ADV748X_REPEATER_EDID_CTL, 0); + + return 0; + } + + if (edid->blocks > 4) { + edid->blocks = 4; + return -E2BIG; + } + + memcpy(hdmi->edid.edid, edid->edid, 128 * edid->blocks); + hdmi->edid.blocks = edid->blocks; + hdmi->edid.present = true; + + hdmi->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15], + edid->edid[0x16]); + + err = adv748x_hdmi_edid_write_block(hdmi, 128 * edid->blocks, + hdmi->edid.edid); + if (err < 0) { + v4l2_err(sd, "error %d writing edid pad %d\n", err, edid->pad); + return err; + } + + repeater_write(state, ADV748X_REPEATER_EDID_SZ, + edid->blocks << ADV748X_REPEATER_EDID_SZ_SHIFT); + + repeater_write(state, ADV748X_REPEATER_EDID_CTL, + ADV748X_REPEATER_EDID_CTL_EN); + + return 0; +} + +static bool adv748x_hdmi_check_dv_timings(const struct v4l2_dv_timings *timings, + void *hdl) +{ + const struct adv748x_hdmi_video_standards *stds = + adv748x_hdmi_video_standards; + unsigned int i; + + for (i = 0; stds[i].timings.bt.width; i++) + if (v4l2_match_dv_timings(timings, &stds[i].timings, 0, false)) + return true; + + return false; +} + +static int adv748x_hdmi_enum_dv_timings(struct v4l2_subdev *sd, + struct v4l2_enum_dv_timings *timings) +{ + return v4l2_enum_dv_timings_cap(timings, &adv748x_hdmi_timings_cap, + adv748x_hdmi_check_dv_timings, NULL); +} + +static int adv748x_hdmi_dv_timings_cap(struct v4l2_subdev *sd, + struct v4l2_dv_timings_cap *cap) +{ + *cap = adv748x_hdmi_timings_cap; + return 0; +} + +static const struct v4l2_subdev_pad_ops adv748x_pad_ops_hdmi = { + .enum_mbus_code = adv748x_hdmi_enum_mbus_code, + .set_fmt = adv748x_hdmi_set_format, + .get_fmt = adv748x_hdmi_get_format, + .get_edid = adv748x_hdmi_get_edid, + .set_edid = adv748x_hdmi_set_edid, + .dv_timings_cap = adv748x_hdmi_dv_timings_cap, + .enum_dv_timings = adv748x_hdmi_enum_dv_timings, +}; + +/* ----------------------------------------------------------------------------- + * v4l2_subdev_ops + */ + +static const struct v4l2_subdev_ops adv748x_ops_hdmi = { + .video = &adv748x_video_ops_hdmi, + .pad = &adv748x_pad_ops_hdmi, +}; + +/* ----------------------------------------------------------------------------- + * Controls + */ + +static const char * const hdmi_ctrl_patgen_menu[] = { + "Disabled", + "Solid Color", + "Color Bars", + "Ramp Grey", + "Ramp Blue", + "Ramp Red", + "Checkered" +}; + +static int adv748x_hdmi_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct adv748x_hdmi *hdmi = adv748x_ctrl_to_hdmi(ctrl); + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + int ret; + u8 pattern; + + /* Enable video adjustment first */ + ret = cp_clrset(state, ADV748X_CP_VID_ADJ, + ADV748X_CP_VID_ADJ_ENABLE, + ADV748X_CP_VID_ADJ_ENABLE); + if (ret < 0) + return ret; + + switch (ctrl->id) { + case V4L2_CID_BRIGHTNESS: + ret = cp_write(state, ADV748X_CP_BRI, ctrl->val); + break; + case V4L2_CID_HUE: + ret = cp_write(state, ADV748X_CP_HUE, ctrl->val); + break; + case V4L2_CID_CONTRAST: + ret = cp_write(state, ADV748X_CP_CON, ctrl->val); + break; + case V4L2_CID_SATURATION: + ret = cp_write(state, ADV748X_CP_SAT, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + pattern = ctrl->val; + + /* Pattern is 0-indexed. Ctrl Menu is 1-indexed */ + if (pattern) { + pattern--; + pattern |= ADV748X_CP_PAT_GEN_EN; + } + + ret = cp_write(state, ADV748X_CP_PAT_GEN, pattern); + + break; + default: + return -EINVAL; + } + + return ret; +} + +static const struct v4l2_ctrl_ops adv748x_hdmi_ctrl_ops = { + .s_ctrl = adv748x_hdmi_s_ctrl, +}; + +static int adv748x_hdmi_init_controls(struct adv748x_hdmi *hdmi) +{ + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + + v4l2_ctrl_handler_init(&hdmi->ctrl_hdl, 5); + + /* Use our mutex for the controls */ + hdmi->ctrl_hdl.lock = &state->mutex; + + v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops, + V4L2_CID_BRIGHTNESS, ADV748X_CP_BRI_MIN, + ADV748X_CP_BRI_MAX, 1, ADV748X_CP_BRI_DEF); + v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops, + V4L2_CID_CONTRAST, ADV748X_CP_CON_MIN, + ADV748X_CP_CON_MAX, 1, ADV748X_CP_CON_DEF); + v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops, + V4L2_CID_SATURATION, ADV748X_CP_SAT_MIN, + ADV748X_CP_SAT_MAX, 1, ADV748X_CP_SAT_DEF); + v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops, + V4L2_CID_HUE, ADV748X_CP_HUE_MIN, + ADV748X_CP_HUE_MAX, 1, ADV748X_CP_HUE_DEF); + + /* + * Todo: V4L2_CID_DV_RX_POWER_PRESENT should also be supported when + * interrupts are handled correctly + */ + + v4l2_ctrl_new_std_menu_items(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(hdmi_ctrl_patgen_menu) - 1, + 0, 0, hdmi_ctrl_patgen_menu); + + hdmi->sd.ctrl_handler = &hdmi->ctrl_hdl; + if (hdmi->ctrl_hdl.error) { + v4l2_ctrl_handler_free(&hdmi->ctrl_hdl); + return hdmi->ctrl_hdl.error; + } + + return v4l2_ctrl_handler_setup(&hdmi->ctrl_hdl); +} + +int adv748x_hdmi_init(struct adv748x_hdmi *hdmi) +{ + struct adv748x_state *state = adv748x_hdmi_to_state(hdmi); + static const struct v4l2_dv_timings cea1280x720 = + V4L2_DV_BT_CEA_1280X720P30; + int ret; + + hdmi->timings = cea1280x720; + + /* Initialise a default 16:9 aspect ratio */ + hdmi->aspect_ratio.numerator = 16; + hdmi->aspect_ratio.denominator = 9; + + adv748x_subdev_init(&hdmi->sd, state, &adv748x_ops_hdmi, + MEDIA_ENT_F_IO_DTV, "hdmi"); + + hdmi->pads[ADV748X_HDMI_SINK].flags = MEDIA_PAD_FL_SINK; + hdmi->pads[ADV748X_HDMI_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&hdmi->sd.entity, + ADV748X_HDMI_NR_PADS, hdmi->pads); + if (ret) + return ret; + + ret = adv748x_hdmi_init_controls(hdmi); + if (ret) + goto err_free_media; + + return 0; + +err_free_media: + media_entity_cleanup(&hdmi->sd.entity); + + return ret; +} + +void adv748x_hdmi_cleanup(struct adv748x_hdmi *hdmi) +{ + v4l2_device_unregister_subdev(&hdmi->sd); + media_entity_cleanup(&hdmi->sd.entity); + v4l2_ctrl_handler_free(&hdmi->ctrl_hdl); +} diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h new file mode 100644 index 000000000000..cc4151b5b31e --- /dev/null +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -0,0 +1,425 @@ +/* + * Driver for Analog Devices ADV748X video decoder and HDMI receiver + * + * Copyright (C) 2017 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Authors: + * Koji Matsuoka + * Niklas Söderlund + * Kieran Bingham + * + * The ADV748x range of receivers have the following configurations: + * + * Analog HDMI MHL 4-Lane 1-Lane + * In In CSI CSI + * ADV7480 X X X + * ADV7481 X X X X X + * ADV7482 X X X X + */ + +#include + +#ifndef _ADV748X_H_ +#define _ADV748X_H_ + +/* I2C slave addresses */ +#define ADV748X_I2C_IO 0x70 /* IO Map */ +#define ADV748X_I2C_DPLL 0x26 /* DPLL Map */ +#define ADV748X_I2C_CP 0x22 /* CP Map */ +#define ADV748X_I2C_HDMI 0x34 /* HDMI Map */ +#define ADV748X_I2C_EDID 0x36 /* EDID Map */ +#define ADV748X_I2C_REPEATER 0x32 /* HDMI RX Repeater Map */ +#define ADV748X_I2C_INFOFRAME 0x31 /* HDMI RX InfoFrame Map */ +#define ADV748X_I2C_CEC 0x41 /* CEC Map */ +#define ADV748X_I2C_SDP 0x79 /* SDP Map */ +#define ADV748X_I2C_TXB 0x48 /* CSI-TXB Map */ +#define ADV748X_I2C_TXA 0x4a /* CSI-TXA Map */ + +enum adv748x_page { + ADV748X_PAGE_IO, + ADV748X_PAGE_DPLL, + ADV748X_PAGE_CP, + ADV748X_PAGE_HDMI, + ADV748X_PAGE_EDID, + ADV748X_PAGE_REPEATER, + ADV748X_PAGE_INFOFRAME, + ADV748X_PAGE_CEC, + ADV748X_PAGE_SDP, + ADV748X_PAGE_TXB, + ADV748X_PAGE_TXA, + ADV748X_PAGE_MAX, + + /* Fake pages for register sequences */ + ADV748X_PAGE_WAIT, /* Wait x msec */ + ADV748X_PAGE_EOR, /* End Mark */ +}; + +/** + * enum adv748x_ports - Device tree port number definitions + * + * The ADV748X ports define the mapping between subdevices + * and the device tree specification + */ +enum adv748x_ports { + ADV748X_PORT_AIN0 = 0, + ADV748X_PORT_AIN1 = 1, + ADV748X_PORT_AIN2 = 2, + ADV748X_PORT_AIN3 = 3, + ADV748X_PORT_AIN4 = 4, + ADV748X_PORT_AIN5 = 5, + ADV748X_PORT_AIN6 = 6, + ADV748X_PORT_AIN7 = 7, + ADV748X_PORT_HDMI = 8, + ADV748X_PORT_TTL = 9, + ADV748X_PORT_TXA = 10, + ADV748X_PORT_TXB = 11, + ADV748X_PORT_MAX = 12, +}; + +enum adv748x_csi2_pads { + ADV748X_CSI2_SINK, + ADV748X_CSI2_SOURCE, + ADV748X_CSI2_NR_PADS, +}; + +/* CSI2 transmitters can have 2 internal connections, HDMI/AFE */ +#define ADV748X_CSI2_MAX_SUBDEVS 2 + +struct adv748x_csi2 { + struct adv748x_state *state; + struct v4l2_mbus_framefmt format; + unsigned int page; + + struct media_pad pads[ADV748X_CSI2_NR_PADS]; + struct v4l2_ctrl_handler ctrl_hdl; + struct v4l2_subdev sd; +}; + +#define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier) +#define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd) + +enum adv748x_hdmi_pads { + ADV748X_HDMI_SINK, + ADV748X_HDMI_SOURCE, + ADV748X_HDMI_NR_PADS, +}; + +struct adv748x_hdmi { + struct media_pad pads[ADV748X_HDMI_NR_PADS]; + struct v4l2_ctrl_handler ctrl_hdl; + struct v4l2_subdev sd; + struct v4l2_mbus_framefmt format; + + struct v4l2_dv_timings timings; + struct v4l2_fract aspect_ratio; + + struct { + u8 edid[512]; + u32 present; + unsigned int blocks; + } edid; +}; + +#define adv748x_ctrl_to_hdmi(ctrl) \ + container_of(ctrl->handler, struct adv748x_hdmi, ctrl_hdl) +#define adv748x_sd_to_hdmi(sd) container_of(sd, struct adv748x_hdmi, sd) + +enum adv748x_afe_pads { + ADV748X_AFE_SINK_AIN0, + ADV748X_AFE_SINK_AIN1, + ADV748X_AFE_SINK_AIN2, + ADV748X_AFE_SINK_AIN3, + ADV748X_AFE_SINK_AIN4, + ADV748X_AFE_SINK_AIN5, + ADV748X_AFE_SINK_AIN6, + ADV748X_AFE_SINK_AIN7, + ADV748X_AFE_SOURCE, + ADV748X_AFE_NR_PADS, +}; + +struct adv748x_afe { + struct media_pad pads[ADV748X_AFE_NR_PADS]; + struct v4l2_ctrl_handler ctrl_hdl; + struct v4l2_subdev sd; + struct v4l2_mbus_framefmt format; + + bool streaming; + v4l2_std_id curr_norm; + unsigned int input; +}; + +#define adv748x_ctrl_to_afe(ctrl) \ + container_of(ctrl->handler, struct adv748x_afe, ctrl_hdl) +#define adv748x_sd_to_afe(sd) container_of(sd, struct adv748x_afe, sd) + +/** + * struct adv748x_state - State of ADV748X + * @dev: (OF) device + * @client: I2C client + * @mutex: protect global state + * + * @endpoints: parsed device node endpoints for each port + * + * @i2c_addresses I2C Page addresses + * @i2c_clients I2C clients for the page accesses + * @regmap regmap configuration pages. + * + * @hdmi: state of HDMI receiver context + * @afe: state of AFE receiver context + * @txa: state of TXA transmitter context + * @txb: state of TXB transmitter context + */ +struct adv748x_state { + struct device *dev; + struct i2c_client *client; + struct mutex mutex; + + struct device_node *endpoints[ADV748X_PORT_MAX]; + + struct i2c_client *i2c_clients[ADV748X_PAGE_MAX]; + struct regmap *regmap[ADV748X_PAGE_MAX]; + + struct adv748x_hdmi hdmi; + struct adv748x_afe afe; + struct adv748x_csi2 txa; + struct adv748x_csi2 txb; +}; + +#define adv748x_hdmi_to_state(h) container_of(h, struct adv748x_state, hdmi) +#define adv748x_afe_to_state(a) container_of(a, struct adv748x_state, afe) + +#define adv_err(a, fmt, arg...) dev_err(a->dev, fmt, ##arg) +#define adv_info(a, fmt, arg...) dev_info(a->dev, fmt, ##arg) +#define adv_dbg(a, fmt, arg...) dev_dbg(a->dev, fmt, ##arg) + +/* Register Mappings */ + +/* IO Map */ +#define ADV748X_IO_PD 0x00 /* power down controls */ +#define ADV748X_IO_PD_RX_EN BIT(6) + +#define ADV748X_IO_REG_04 0x04 +#define ADV748X_IO_REG_04_FORCE_FR BIT(0) /* Force CP free-run */ + +#define ADV748X_IO_DATAPATH 0x03 /* datapath cntrl */ +#define ADV748X_IO_DATAPATH_VFREQ_M 0x70 +#define ADV748X_IO_DATAPATH_VFREQ_SHIFT 4 + +#define ADV748X_IO_VID_STD 0x05 + +#define ADV748X_IO_10 0x10 /* io_reg_10 */ +#define ADV748X_IO_10_CSI4_EN BIT(7) +#define ADV748X_IO_10_CSI1_EN BIT(6) +#define ADV748X_IO_10_PIX_OUT_EN BIT(5) + +#define ADV748X_IO_CHIP_REV_ID_1 0xdf +#define ADV748X_IO_CHIP_REV_ID_2 0xe0 + +#define ADV748X_IO_SLAVE_ADDR_BASE 0xf2 + +/* HDMI RX Map */ +#define ADV748X_HDMI_LW1 0x07 /* line width_1 */ +#define ADV748X_HDMI_LW1_VERT_FILTER BIT(7) +#define ADV748X_HDMI_LW1_DE_REGEN BIT(5) +#define ADV748X_HDMI_LW1_WIDTH_MASK 0x1fff + +#define ADV748X_HDMI_F0H1 0x09 /* field0 height_1 */ +#define ADV748X_HDMI_F0H1_HEIGHT_MASK 0x1fff + +#define ADV748X_HDMI_F1H1 0x0b /* field1 height_1 */ +#define ADV748X_HDMI_F1H1_INTERLACED BIT(5) + +#define ADV748X_HDMI_HFRONT_PORCH 0x20 /* hsync_front_porch_1 */ +#define ADV748X_HDMI_HFRONT_PORCH_MASK 0x1fff + +#define ADV748X_HDMI_HSYNC_WIDTH 0x22 /* hsync_pulse_width_1 */ +#define ADV748X_HDMI_HSYNC_WIDTH_MASK 0x1fff + +#define ADV748X_HDMI_HBACK_PORCH 0x24 /* hsync_back_porch_1 */ +#define ADV748X_HDMI_HBACK_PORCH_MASK 0x1fff + +#define ADV748X_HDMI_VFRONT_PORCH 0x2a /* field0_vs_front_porch_1 */ +#define ADV748X_HDMI_VFRONT_PORCH_MASK 0x3fff + +#define ADV748X_HDMI_VSYNC_WIDTH 0x2e /* field0_vs_pulse_width_1 */ +#define ADV748X_HDMI_VSYNC_WIDTH_MASK 0x3fff + +#define ADV748X_HDMI_VBACK_PORCH 0x32 /* field0_vs_back_porch_1 */ +#define ADV748X_HDMI_VBACK_PORCH_MASK 0x3fff + +#define ADV748X_HDMI_TMDS_1 0x51 /* hdmi_reg_51 */ +#define ADV748X_HDMI_TMDS_2 0x52 /* hdmi_reg_52 */ + +/* HDMI RX Repeater Map */ +#define ADV748X_REPEATER_EDID_SZ 0x70 /* primary_edid_size */ +#define ADV748X_REPEATER_EDID_SZ_SHIFT 4 + +#define ADV748X_REPEATER_EDID_CTL 0x74 /* hdcp edid controls */ +#define ADV748X_REPEATER_EDID_CTL_EN BIT(0) /* man_edid_a_enable */ + +/* SDP Main Map */ +#define ADV748X_SDP_INSEL 0x00 /* user_map_rw_reg_00 */ + +#define ADV748X_SDP_VID_SEL 0x02 /* user_map_rw_reg_02 */ +#define ADV748X_SDP_VID_SEL_MASK 0xf0 +#define ADV748X_SDP_VID_SEL_SHIFT 4 + +/* Contrast - Unsigned*/ +#define ADV748X_SDP_CON 0x08 /* user_map_rw_reg_08 */ +#define ADV748X_SDP_CON_MIN 0 +#define ADV748X_SDP_CON_DEF 128 +#define ADV748X_SDP_CON_MAX 255 + +/* Brightness - Signed */ +#define ADV748X_SDP_BRI 0x0a /* user_map_rw_reg_0a */ +#define ADV748X_SDP_BRI_MIN -128 +#define ADV748X_SDP_BRI_DEF 0 +#define ADV748X_SDP_BRI_MAX 127 + +/* Hue - Signed, inverted*/ +#define ADV748X_SDP_HUE 0x0b /* user_map_rw_reg_0b */ +#define ADV748X_SDP_HUE_MIN -127 +#define ADV748X_SDP_HUE_DEF 0 +#define ADV748X_SDP_HUE_MAX 128 + +/* Test Patterns / Default Values */ +#define ADV748X_SDP_DEF 0x0c /* user_map_rw_reg_0c */ +#define ADV748X_SDP_DEF_VAL_EN BIT(0) /* Force free run mode */ +#define ADV748X_SDP_DEF_VAL_AUTO_EN BIT(1) /* Free run when no signal */ + +#define ADV748X_SDP_MAP_SEL 0x0e /* user_map_rw_reg_0e */ +#define ADV748X_SDP_MAP_SEL_RO_MAIN 1 + +/* Free run pattern select */ +#define ADV748X_SDP_FRP 0x14 +#define ADV748X_SDP_FRP_MASK GENMASK(3, 1) + +/* Saturation */ +#define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */ +#define ADV748X_SDP_SD_SAT_V 0xe4 /* user_map_rw_reg_e4 */ +#define ADV748X_SDP_SAT_MIN 0 +#define ADV748X_SDP_SAT_DEF 128 +#define ADV748X_SDP_SAT_MAX 255 + +/* SDP RO Main Map */ +#define ADV748X_SDP_RO_10 0x10 +#define ADV748X_SDP_RO_10_IN_LOCK BIT(0) + +/* CP Map */ +#define ADV748X_CP_PAT_GEN 0x37 /* int_pat_gen_1 */ +#define ADV748X_CP_PAT_GEN_EN BIT(7) + +/* Contrast Control - Unsigned */ +#define ADV748X_CP_CON 0x3a /* contrast_cntrl */ +#define ADV748X_CP_CON_MIN 0 /* Minimum contrast */ +#define ADV748X_CP_CON_DEF 128 /* Default */ +#define ADV748X_CP_CON_MAX 255 /* Maximum contrast */ + +/* Saturation Control - Unsigned */ +#define ADV748X_CP_SAT 0x3b /* saturation_cntrl */ +#define ADV748X_CP_SAT_MIN 0 /* Minimum saturation */ +#define ADV748X_CP_SAT_DEF 128 /* Default */ +#define ADV748X_CP_SAT_MAX 255 /* Maximum saturation */ + +/* Brightness Control - Signed */ +#define ADV748X_CP_BRI 0x3c /* brightness_cntrl */ +#define ADV748X_CP_BRI_MIN -128 /* Luma is -512d */ +#define ADV748X_CP_BRI_DEF 0 /* Luma is 0 */ +#define ADV748X_CP_BRI_MAX 127 /* Luma is 508d */ + +/* Hue Control */ +#define ADV748X_CP_HUE 0x3d /* hue_cntrl */ +#define ADV748X_CP_HUE_MIN 0 /* -90 degree */ +#define ADV748X_CP_HUE_DEF 0 /* -90 degree */ +#define ADV748X_CP_HUE_MAX 255 /* +90 degree */ + +#define ADV748X_CP_VID_ADJ 0x3e /* vid_adj_0 */ +#define ADV748X_CP_VID_ADJ_ENABLE BIT(7) /* Enable colour controls */ + +#define ADV748X_CP_DE_POS_HIGH 0x8b /* de_pos_adj_6 */ +#define ADV748X_CP_DE_POS_HIGH_SET BIT(6) +#define ADV748X_CP_DE_POS_END_LOW 0x8c /* de_pos_adj_7 */ +#define ADV748X_CP_DE_POS_START_LOW 0x8d /* de_pos_adj_8 */ + +#define ADV748X_CP_VID_ADJ_2 0x91 +#define ADV748X_CP_VID_ADJ_2_INTERLACED BIT(6) +#define ADV748X_CP_VID_ADJ_2_INTERLACED_3D BIT(4) + +#define ADV748X_CP_CLMP_POS 0xc9 /* clmp_pos_cntrl_4 */ +#define ADV748X_CP_CLMP_POS_DIS_AUTO BIT(0) /* dis_auto_param_buff */ + +/* CSI : TXA/TXB Maps */ +#define ADV748X_CSI_VC_REF 0x0d /* csi_tx_top_reg_0d */ +#define ADV748X_CSI_VC_REF_SHIFT 6 + +#define ADV748X_CSI_FS_AS_LS 0x1e /* csi_tx_top_reg_1e */ +#define ADV748X_CSI_FS_AS_LS_UNKNOWN BIT(6) /* Undocumented bit */ + +/* Register handling */ + +int adv748x_read(struct adv748x_state *state, u8 addr, u8 reg); +int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value); +int adv748x_write_block(struct adv748x_state *state, int client_page, + unsigned int init_reg, const void *val, + size_t val_len); + +#define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r) +#define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v) +#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v) + +#define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r) +#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m) +#define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v) + +#define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r) +#define repeater_write(s, r, v) adv748x_write(s, ADV748X_PAGE_REPEATER, r, v) + +#define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r) +#define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v) +#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v) + +#define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r) +#define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v) +#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v) + +#define txa_read(s, r) adv748x_read(s, ADV748X_PAGE_TXA, r) +#define txb_read(s, r) adv748x_read(s, ADV748X_PAGE_TXB, r) + +#define tx_read(t, r) adv748x_read(t->state, t->page, r) +#define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v) + +static inline struct v4l2_subdev *adv748x_get_remote_sd(struct media_pad *pad) +{ + pad = media_entity_remote_pad(pad); + if (!pad) + return NULL; + + return media_entity_to_v4l2_subdev(pad->entity); +} + +void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state, + const struct v4l2_subdev_ops *ops, u32 function, + const char *ident); + +int adv748x_register_subdevs(struct adv748x_state *state, + struct v4l2_device *v4l2_dev); + +int adv748x_txa_power(struct adv748x_state *state, bool on); +int adv748x_txb_power(struct adv748x_state *state, bool on); + +int adv748x_afe_init(struct adv748x_afe *afe); +void adv748x_afe_cleanup(struct adv748x_afe *afe); + +int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx); +void adv748x_csi2_cleanup(struct adv748x_csi2 *tx); +int adv748x_csi2_set_pixelrate(struct v4l2_subdev *sd, s64 rate); + +int adv748x_hdmi_init(struct adv748x_hdmi *hdmi); +void adv748x_hdmi_cleanup(struct adv748x_hdmi *hdmi); + +#endif /* _ADV748X_H_ */ diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c index ccc478605643..2817bafc67bf 100644 --- a/drivers/media/i2c/adv7511.c +++ b/drivers/media/i2c/adv7511.c @@ -1927,8 +1927,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id * #if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC) state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops, - state, dev_name(&client->dev), CEC_CAP_TRANSMIT | - CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC, + state, dev_name(&client->dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS); err = PTR_ERR_OR_ZERO(state->cec_adap); if (err) { @@ -1986,7 +1985,7 @@ static int adv7511_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ -static struct i2c_device_id adv7511_id[] = { +static const struct i2c_device_id adv7511_id[] = { { "adv7511", 0 }, { } }; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 660bacb8f7d9..f289b8aca1da 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -618,7 +618,7 @@ static int adv76xx_read_reg(struct v4l2_subdev *sd, unsigned int reg) unsigned int val; int err; - if (!(BIT(page) & state->info->page_mask)) + if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask)) return -EINVAL; reg &= 0xff; @@ -633,7 +633,7 @@ static int adv76xx_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val) struct adv76xx_state *state = to_state(sd); unsigned int page = reg >> 8; - if (!(BIT(page) & state->info->page_mask)) + if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask)) return -EINVAL; reg &= 0xff; @@ -3515,8 +3515,7 @@ static int adv76xx_probe(struct i2c_client *client, #if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC) state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops, state, dev_name(&client->dev), - CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS); + CEC_CAP_DEFAULTS, ADV76XX_MAX_ADDRS); err = PTR_ERR_OR_ZERO(state->cec_adap); if (err) goto err_entity; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 303effda1a2e..65f34e7e146f 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -3568,8 +3568,7 @@ static int adv7842_probe(struct i2c_client *client, #if IS_ENABLED(CONFIG_VIDEO_ADV7842_CEC) state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops, state, dev_name(&client->dev), - CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS); + CEC_CAP_DEFAULTS, ADV7842_MAX_ADDRS); err = PTR_ERR_OR_ZERO(state->cec_adap); if (err) goto err_entity; @@ -3608,7 +3607,7 @@ static int adv7842_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ -static struct i2c_device_id adv7842_id[] = { +static const struct i2c_device_id adv7842_id[] = { { "adv7842", 0 }, { } }; diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c index 6a607d7f82de..95af4fc99cd0 100644 --- a/drivers/media/i2c/dw9714.c +++ b/drivers/media/i2c/dw9714.c @@ -11,7 +11,6 @@ * GNU General Public License for more details. */ -#include #include #include #include @@ -147,8 +146,7 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm) return hdl->error; } -static int dw9714_probe(struct i2c_client *client, - const struct i2c_device_id *devid) +static int dw9714_probe(struct i2c_client *client) { struct dw9714_device *dw9714_dev; int rval; @@ -250,20 +248,18 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev) return 0; } -#ifdef CONFIG_ACPI -static const struct acpi_device_id dw9714_acpi_match[] = { - {}, -}; -MODULE_DEVICE_TABLE(acpi, dw9714_acpi_match); -#endif - static const struct i2c_device_id dw9714_id_table[] = { - {DW9714_NAME, 0}, - {} + { DW9714_NAME, 0 }, + { { 0 } } }; - MODULE_DEVICE_TABLE(i2c, dw9714_id_table); +static const struct of_device_id dw9714_of_table[] = { + { .compatible = "dongwoon,dw9714" }, + { { 0 } } +}; +MODULE_DEVICE_TABLE(of, dw9714_of_table); + static const struct dev_pm_ops dw9714_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume) SET_RUNTIME_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume, NULL) @@ -273,9 +269,9 @@ static struct i2c_driver dw9714_i2c_driver = { .driver = { .name = DW9714_NAME, .pm = &dw9714_pm_ops, - .acpi_match_table = ACPI_PTR(dw9714_acpi_match), + .of_match_table = dw9714_of_table, }, - .probe = dw9714_probe, + .probe_new = dw9714_probe, .remove = dw9714_remove, .id_table = dw9714_id_table, }; diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index f39f5179dd95..c14f0fd6ded3 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -43,7 +43,7 @@ #define ET8EK8_NAME "et8ek8" #define ET8EK8_PRIV_MEM_SIZE 128 -#define ET8EK8_MAX_MSG 48 +#define ET8EK8_MAX_MSG 8 struct et8ek8_sensor { struct v4l2_subdev subdev; @@ -220,7 +220,8 @@ static void et8ek8_i2c_create_msg(struct i2c_client *client, u16 len, u16 reg, /* * A buffered write method that puts the wanted register write - * commands in a message list and passes the list to the i2c framework + * commands in smaller number of message lists and passes the lists to + * the i2c framework */ static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client, const struct et8ek8_reg *wnext, @@ -231,11 +232,7 @@ static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client, int wcnt = 0; u16 reg, data_length; u32 val; - - if (WARN_ONCE(cnt > ET8EK8_MAX_MSG, - ET8EK8_NAME ": %s: too many messages.\n", __func__)) { - return -EINVAL; - } + int rval; /* Create new write messages for all writes */ while (wcnt < cnt) { @@ -249,10 +246,21 @@ static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client, /* Update write count */ wcnt++; + + if (wcnt < ET8EK8_MAX_MSG) + continue; + + rval = i2c_transfer(client->adapter, msg, wcnt); + if (rval < 0) + return rval; + + cnt -= wcnt; + wcnt = 0; } - /* Now we send everything ... */ - return i2c_transfer(client->adapter, msg, wcnt); + rval = i2c_transfer(client->adapter, msg, wcnt); + + return rval < 0 ? rval : 0; } /* diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index cee7fd9cf08b..a374e2a0ac3d 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -59,8 +59,8 @@ module_param(debug, int, 0644); /* debug level (0,1,2) */ /* ----------------------------------------------------------------------- */ -static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, - u32 *scancode, u8 *ptoggle, int size) +static int get_key_haup_common(struct IR_i2c *ir, enum rc_proto *protocol, + u32 *scancode, u8 *ptoggle, int size) { unsigned char buf[6]; int start, range, toggle, dev, code, ircode, vendor; @@ -99,7 +99,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, dprintk(1, "ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n", start, range, toggle, dev, code); - *protocol = RC_TYPE_RC5; + *protocol = RC_PROTO_RC5; *scancode = RC_SCANCODE_RC5(dev, code); *ptoggle = toggle; @@ -111,13 +111,13 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, if (vendor == 0x800f) { *ptoggle = (dev & 0x80) != 0; - *protocol = RC_TYPE_RC6_MCE; + *protocol = RC_PROTO_RC6_MCE; dev &= 0x7f; dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", *ptoggle, vendor, dev, code); } else { *ptoggle = 0; - *protocol = RC_TYPE_RC6_6A_32; + *protocol = RC_PROTO_RC6_6A_32; dprintk(1, "ir hauppauge (rc6-6a-32): vendor=%d dev=%d code=%d\n", vendor, dev, code); } @@ -130,13 +130,13 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, return 0; } -static int get_key_haup(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_haup(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { return get_key_haup_common(ir, protocol, scancode, toggle, 3); } -static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int ret; @@ -155,7 +155,7 @@ static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol, return get_key_haup_common(ir, protocol, scancode, toggle, 6); } -static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pixelview(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -166,13 +166,13 @@ static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol, return -EIO; } - *protocol = RC_TYPE_OTHER; + *protocol = RC_PROTO_OTHER; *scancode = b; *toggle = 0; return 1; } -static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char buf[4]; @@ -191,13 +191,13 @@ static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_type *protocol, if(buf[0] != 0x1 || buf[1] != 0xfe) return 0; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = buf[2]; *toggle = 0; return 1; } -static int get_key_knc1(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -221,13 +221,13 @@ static int get_key_knc1(struct IR_i2c *ir, enum rc_type *protocol, /* keep old data */ return 1; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } -static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char subaddr, key, keygroup; @@ -262,7 +262,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol, } key |= (keygroup & 1) << 6; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = key; if (ir->c->addr == 0x41) /* AVerMedia EM78P153 */ *scancode |= keygroup << 8; @@ -274,7 +274,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol, static int ir_key_poll(struct IR_i2c *ir) { - enum rc_type protocol; + enum rc_proto protocol; u32 scancode; u8 toggle; int rc; @@ -315,7 +315,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) { char *ir_codes = NULL; const char *name = NULL; - u64 rc_type = RC_BIT_UNKNOWN; + u64 rc_proto = RC_PROTO_BIT_UNKNOWN; struct IR_i2c *ir; struct rc_dev *rc = NULL; struct i2c_adapter *adap = client->adapter; @@ -334,7 +334,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) case 0x64: name = "Pixelview"; ir->get_key = get_key_pixelview; - rc_type = RC_BIT_OTHER; + rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; case 0x18: @@ -342,38 +342,39 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) case 0x1a: name = "Hauppauge"; ir->get_key = get_key_haup; - rc_type = RC_BIT_RC5; + rc_proto = RC_PROTO_BIT_RC5; ir_codes = RC_MAP_HAUPPAUGE; break; case 0x30: name = "KNC One"; ir->get_key = get_key_knc1; - rc_type = RC_BIT_OTHER; + rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; case 0x6b: name = "FusionHDTV"; ir->get_key = get_key_fusionhdtv; - rc_type = RC_BIT_UNKNOWN; + rc_proto = RC_PROTO_BIT_UNKNOWN; ir_codes = RC_MAP_FUSIONHDTV_MCE; break; case 0x40: name = "AVerMedia Cardbus remote"; ir->get_key = get_key_avermedia_cardbus; - rc_type = RC_BIT_OTHER; + rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_AVERMEDIA_CARDBUS; break; case 0x41: name = "AVerMedia EM78P153"; ir->get_key = get_key_avermedia_cardbus; - rc_type = RC_BIT_OTHER; + rc_proto = RC_PROTO_BIT_OTHER; /* RM-KV remote, seems to be same as RM-K6 */ ir_codes = RC_MAP_AVERMEDIA_M733A_RM_K6; break; case 0x71: name = "Hauppauge/Zilog Z8"; ir->get_key = get_key_haup_xvr; - rc_type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32; + rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_RC6_6A_32; ir_codes = RC_MAP_HAUPPAUGE; break; } @@ -388,7 +389,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) name = init_data->name; if (init_data->type) - rc_type = init_data->type; + rc_proto = init_data->type; if (init_data->polling_interval) ir->polling_interval = init_data->polling_interval; @@ -431,7 +432,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) ir->rc = rc; /* Make sure we are all setup before going on */ - if (!name || !ir->get_key || !rc_type || !ir_codes) { + if (!name || !ir->get_key || !rc_proto || !ir_codes) { dprintk(1, ": Unsupported device at address 0x%02x\n", addr); err = -ENODEV; @@ -452,14 +453,14 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) */ rc->input_id.bustype = BUS_I2C; rc->input_phys = ir->phys; - rc->input_name = ir->name; + rc->device_name = ir->name; /* * Initialize the other fields of rc_dev */ rc->map_name = ir->ir_codes; - rc->allowed_protocols = rc_type; - rc->enabled_protocols = rc_type; + rc->allowed_protocols = rc_proto; + rc->enabled_protocols = rc_proto; if (!rc->driver_name) rc->driver_name = MODULE_NAME; diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 9ccb5ee55fa9..463534d44756 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -457,7 +457,7 @@ static int m5mols_get_version(struct v4l2_subdev *sd) v4l2_info(sd, "Manufacturer\t[%s]\n", is_manufacturer(info, REG_SAMSUNG_ELECTRO) ? - "Samsung Electro-Machanics" : + "Samsung Electro-Mechanics" : is_manufacturer(info, REG_SAMSUNG_OPTICS) ? "Samsung Fiber-Optics" : is_manufacturer(info, REG_SAMSUNG_TECHWIN) ? diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c index a4736a8a7792..bf0e821a2b93 100644 --- a/drivers/media/i2c/max2175.c +++ b/drivers/media/i2c/max2175.c @@ -1319,7 +1319,7 @@ static int max2175_probe(struct i2c_client *client, if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&client->dev, "cannot get clock %d\n", ret); - return -ENODEV; + return ret; } regmap = devm_regmap_init_i2c(client, &max2175_regmap_config); diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c index 72e71b762827..99b992e46702 100644 --- a/drivers/media/i2c/mt9m111.c +++ b/drivers/media/i2c/mt9m111.c @@ -835,7 +835,7 @@ static const struct v4l2_ctrl_ops mt9m111_ctrl_ops = { .s_ctrl = mt9m111_s_ctrl, }; -static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { +static const struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { .s_power = mt9m111_s_power, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9m111_g_register, @@ -865,7 +865,7 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd, return 0; } -static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { +static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { .g_mbus_config = mt9m111_g_mbus_config, }; @@ -877,7 +877,7 @@ static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = { .set_fmt = mt9m111_set_fmt, }; -static struct v4l2_subdev_ops mt9m111_subdev_ops = { +static const struct v4l2_subdev_ops mt9m111_subdev_ops = { .core = &mt9m111_subdev_core_ops, .video = &mt9m111_subdev_video_ops, .pad = &mt9m111_subdev_pad_ops, diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c index 842017fa4aab..9d981d9f5686 100644 --- a/drivers/media/i2c/mt9t001.c +++ b/drivers/media/i2c/mt9t001.c @@ -822,15 +822,15 @@ static int mt9t001_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh) return mt9t001_set_power(subdev, 0); } -static struct v4l2_subdev_core_ops mt9t001_subdev_core_ops = { +static const struct v4l2_subdev_core_ops mt9t001_subdev_core_ops = { .s_power = mt9t001_set_power, }; -static struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = { +static const struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = { .s_stream = mt9t001_s_stream, }; -static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = { +static const struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = { .enum_mbus_code = mt9t001_enum_mbus_code, .enum_frame_size = mt9t001_enum_frame_size, .get_fmt = mt9t001_get_format, @@ -839,7 +839,7 @@ static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = { .set_selection = mt9t001_set_selection, }; -static struct v4l2_subdev_ops mt9t001_subdev_ops = { +static const struct v4l2_subdev_ops mt9t001_subdev_ops = { .core = &mt9t001_subdev_core_ops, .video = &mt9t001_subdev_video_ops, .pad = &mt9t001_subdev_pad_ops, diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c index 86550d8ddfee..af7af0d14c69 100644 --- a/drivers/media/i2c/ov13858.c +++ b/drivers/media/i2c/ov13858.c @@ -57,16 +57,14 @@ #define OV13858_VTS_30FPS 0x0c8e /* 30 fps */ #define OV13858_VTS_60FPS 0x0648 /* 60 fps */ #define OV13858_VTS_MAX 0x7fff -#define OV13858_VBLANK_MIN 56 /* HBLANK control - read only */ -#define OV13858_PPL_540MHZ 2244 -#define OV13858_PPL_1080MHZ 4488 +#define OV13858_PPL_270MHZ 2244 +#define OV13858_PPL_540MHZ 4488 /* Exposure control */ #define OV13858_REG_EXPOSURE 0x3500 #define OV13858_EXPOSURE_MIN 4 -#define OV13858_EXPOSURE_MAX (OV13858_VTS_MAX - 8) #define OV13858_EXPOSURE_STEP 1 #define OV13858_EXPOSURE_DEFAULT 0x640 @@ -78,13 +76,13 @@ #define OV13858_ANA_GAIN_DEFAULT 0x80 /* Digital gain control */ -#define OV13858_REG_DIGITAL_GAIN 0x350a -#define OV13858_DGTL_GAIN_MASK 0xf3 -#define OV13858_DGTL_GAIN_SHIFT 2 -#define OV13858_DGTL_GAIN_MIN 1 -#define OV13858_DGTL_GAIN_MAX 4 -#define OV13858_DGTL_GAIN_STEP 1 -#define OV13858_DGTL_GAIN_DEFAULT 1 +#define OV13858_REG_B_MWB_GAIN 0x5100 +#define OV13858_REG_G_MWB_GAIN 0x5102 +#define OV13858_REG_R_MWB_GAIN 0x5104 +#define OV13858_DGTL_GAIN_MIN 0 +#define OV13858_DGTL_GAIN_MAX 16384 /* Max = 16 X */ +#define OV13858_DGTL_GAIN_DEFAULT 1024 /* Default gain = 1 X */ +#define OV13858_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */ /* Test Pattern Control */ #define OV13858_REG_TEST_PATTERN 0x4503 @@ -121,7 +119,8 @@ struct ov13858_mode { u32 height; /* V-timing */ - u32 vts; + u32 vts_def; + u32 vts_min; /* Index of Link frequency config to be used */ u32 link_freq_index; @@ -944,31 +943,33 @@ static const char * const ov13858_test_pattern_menu[] = { /* Configurations for supported link frequencies */ #define OV13858_NUM_OF_LINK_FREQS 2 -#define OV13858_LINK_FREQ_1080MBPS 1080000000 -#define OV13858_LINK_FREQ_540MBPS 540000000 +#define OV13858_LINK_FREQ_540MHZ 540000000ULL +#define OV13858_LINK_FREQ_270MHZ 270000000ULL #define OV13858_LINK_FREQ_INDEX_0 0 #define OV13858_LINK_FREQ_INDEX_1 1 /* Menu items for LINK_FREQ V4L2 control */ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = { - OV13858_LINK_FREQ_1080MBPS, - OV13858_LINK_FREQ_540MBPS + OV13858_LINK_FREQ_540MHZ, + OV13858_LINK_FREQ_270MHZ }; /* Link frequency configs */ static const struct ov13858_link_freq_config link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = { { - .pixel_rate = 864000000, - .pixels_per_line = OV13858_PPL_1080MHZ, + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + .pixel_rate = (OV13858_LINK_FREQ_540MHZ * 2 * 4) / 10, + .pixels_per_line = OV13858_PPL_540MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps), .regs = mipi_data_rate_1080mbps, } }, { - .pixel_rate = 432000000, - .pixels_per_line = OV13858_PPL_540MHZ, + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + .pixel_rate = (OV13858_LINK_FREQ_270MHZ * 2 * 4) / 10, + .pixels_per_line = OV13858_PPL_270MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps), .regs = mipi_data_rate_540mbps, @@ -981,7 +982,8 @@ static const struct ov13858_mode supported_modes[] = { { .width = 4224, .height = 3136, - .vts = OV13858_VTS_30FPS, + .vts_def = OV13858_VTS_30FPS, + .vts_min = OV13858_VTS_30FPS, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_4224x3136_regs), .regs = mode_4224x3136_regs, @@ -991,7 +993,8 @@ static const struct ov13858_mode supported_modes[] = { { .width = 2112, .height = 1568, - .vts = OV13858_VTS_30FPS, + .vts_def = OV13858_VTS_30FPS, + .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1568_regs), .regs = mode_2112x1568_regs, @@ -1001,7 +1004,8 @@ static const struct ov13858_mode supported_modes[] = { { .width = 2112, .height = 1188, - .vts = OV13858_VTS_30FPS, + .vts_def = OV13858_VTS_30FPS, + .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1188_regs), .regs = mode_2112x1188_regs, @@ -1011,7 +1015,8 @@ static const struct ov13858_mode supported_modes[] = { { .width = 1056, .height = 784, - .vts = OV13858_VTS_30FPS, + .vts_def = OV13858_VTS_30FPS, + .vts_min = 804, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1056x784_regs), .regs = mode_1056x784_regs, @@ -1161,21 +1166,21 @@ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) static int ov13858_update_digital_gain(struct ov13858 *ov13858, u32 d_gain) { int ret; - u32 val; - if (d_gain == 3) - return -EINVAL; - - ret = ov13858_read_reg(ov13858, OV13858_REG_DIGITAL_GAIN, - OV13858_REG_VALUE_08BIT, &val); + ret = ov13858_write_reg(ov13858, OV13858_REG_B_MWB_GAIN, + OV13858_REG_VALUE_16BIT, d_gain); if (ret) return ret; - val &= OV13858_DGTL_GAIN_MASK; - val |= (d_gain - 1) << OV13858_DGTL_GAIN_SHIFT; + ret = ov13858_write_reg(ov13858, OV13858_REG_G_MWB_GAIN, + OV13858_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; - return ov13858_write_reg(ov13858, OV13858_REG_DIGITAL_GAIN, - OV13858_REG_VALUE_08BIT, val); + ret = ov13858_write_reg(ov13858, OV13858_REG_R_MWB_GAIN, + OV13858_REG_VALUE_16BIT, d_gain); + + return ret; } static int ov13858_enable_test_pattern(struct ov13858 *ov13858, u32 pattern) @@ -1377,6 +1382,8 @@ ov13858_set_pad_format(struct v4l2_subdev *sd, struct ov13858 *ov13858 = to_ov13858(sd); const struct ov13858_mode *mode; struct v4l2_mbus_framefmt *framefmt; + s32 vblank_def; + s32 vblank_min; s64 h_blank; mutex_lock(&ov13858->mutex); @@ -1397,10 +1404,15 @@ ov13858_set_pad_format(struct v4l2_subdev *sd, ov13858->pixel_rate, link_freq_configs[mode->link_freq_index].pixel_rate); /* Update limits and set FPS to default */ + vblank_def = ov13858->cur_mode->vts_def - + ov13858->cur_mode->height; + vblank_min = ov13858->cur_mode->vts_min - + ov13858->cur_mode->height; __v4l2_ctrl_modify_range( - ov13858->vblank, OV13858_VBLANK_MIN, + ov13858->vblank, vblank_min, OV13858_VTS_MAX - ov13858->cur_mode->height, 1, - ov13858->cur_mode->vts - ov13858->cur_mode->height); + vblank_def); + __v4l2_ctrl_s_ctrl(ov13858->vblank, vblank_def); h_blank = link_freq_configs[mode->link_freq_index].pixels_per_line - ov13858->cur_mode->width; @@ -1602,6 +1614,9 @@ static int ov13858_init_controls(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max; + s64 vblank_def; + s64 vblank_min; int ret; ctrl_hdlr = &ov13858->ctrl_handler; @@ -1625,25 +1640,27 @@ static int ov13858_init_controls(struct ov13858 *ov13858) link_freq_configs[0].pixel_rate, 1, link_freq_configs[0].pixel_rate); + vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height; + vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height; ov13858->vblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK, - OV13858_VBLANK_MIN, + vblank_min, OV13858_VTS_MAX - ov13858->cur_mode->height, 1, - ov13858->cur_mode->vts - - ov13858->cur_mode->height); + vblank_def); ov13858->hblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK, - OV13858_PPL_1080MHZ - ov13858->cur_mode->width, - OV13858_PPL_1080MHZ - ov13858->cur_mode->width, + OV13858_PPL_540MHZ - ov13858->cur_mode->width, + OV13858_PPL_540MHZ - ov13858->cur_mode->width, 1, - OV13858_PPL_1080MHZ - ov13858->cur_mode->width); + OV13858_PPL_540MHZ - ov13858->cur_mode->width); ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + exposure_max = ov13858->cur_mode->vts_def - 8; ov13858->exposure = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN, - OV13858_EXPOSURE_MAX, OV13858_EXPOSURE_STEP, + exposure_max, OV13858_EXPOSURE_STEP, OV13858_EXPOSURE_DEFAULT); v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 1f5b483cf334..39a2269c0bee 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -1524,8 +1524,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) static void ov5640_power(struct ov5640_dev *sensor, bool enable) { - if (sensor->pwdn_gpio) - gpiod_set_value(sensor->pwdn_gpio, enable ? 0 : 1); + gpiod_set_value(sensor->pwdn_gpio, enable ? 0 : 1); } static void ov5640_reset(struct ov5640_dev *sensor) diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index d1e844f7f03f..d28845f7356f 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -80,6 +80,8 @@ struct ov5645_mode_info { u32 height; const struct reg_value *data; u32 data_size; + u32 pixel_clock; + u32 link_freq; }; struct ov5645 { @@ -99,6 +101,8 @@ struct ov5645 { const struct ov5645_mode_info *current_mode; struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *pixel_clock; + struct v4l2_ctrl *link_freq; /* Cached register values */ u8 aec_pk_manual; @@ -505,24 +509,35 @@ static const struct reg_value ov5645_setting_full[] = { { 0x4202, 0x00 } }; +static const s64 link_freq[] = { + 222880000, + 334320000 +}; + static const struct ov5645_mode_info ov5645_mode_info_data[] = { { .width = 1280, .height = 960, .data = ov5645_setting_sxga, - .data_size = ARRAY_SIZE(ov5645_setting_sxga) + .data_size = ARRAY_SIZE(ov5645_setting_sxga), + .pixel_clock = 111440000, + .link_freq = 0 /* an index in link_freq[] */ }, { .width = 1920, .height = 1080, .data = ov5645_setting_1080p, - .data_size = ARRAY_SIZE(ov5645_setting_1080p) + .data_size = ARRAY_SIZE(ov5645_setting_1080p), + .pixel_clock = 167160000, + .link_freq = 1 /* an index in link_freq[] */ }, { .width = 2592, .height = 1944, .data = ov5645_setting_full, - .data_size = ARRAY_SIZE(ov5645_setting_full) + .data_size = ARRAY_SIZE(ov5645_setting_full), + .pixel_clock = 167160000, + .link_freq = 1 /* an index in link_freq[] */ }, }; @@ -969,6 +984,7 @@ static int ov5645_set_format(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *__format; struct v4l2_rect *__crop; const struct ov5645_mode_info *new_mode; + int ret; __crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad, format->which); @@ -978,8 +994,19 @@ static int ov5645_set_format(struct v4l2_subdev *sd, __crop->width = new_mode->width; __crop->height = new_mode->height; - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) + if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ret = v4l2_ctrl_s_ctrl_int64(ov5645->pixel_clock, + new_mode->pixel_clock); + if (ret < 0) + return ret; + + ret = v4l2_ctrl_s_ctrl(ov5645->link_freq, + new_mode->link_freq); + if (ret < 0) + return ret; + ov5645->current_mode = new_mode; + } __format = __ov5645_get_pad_format(ov5645, cfg, format->pad, format->which); @@ -1197,7 +1224,7 @@ static int ov5645_probe(struct i2c_client *client, mutex_init(&ov5645->power_lock); - v4l2_ctrl_handler_init(&ov5645->ctrls, 7); + v4l2_ctrl_handler_init(&ov5645->ctrls, 9); v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, V4L2_CID_SATURATION, -4, 4, 1, 0); v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, @@ -1215,6 +1242,17 @@ static int ov5645_probe(struct i2c_client *client, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov5645_test_pattern_menu) - 1, 0, 0, ov5645_test_pattern_menu); + ov5645->pixel_clock = v4l2_ctrl_new_std(&ov5645->ctrls, + &ov5645_ctrl_ops, + V4L2_CID_PIXEL_RATE, + 1, INT_MAX, 1, 1); + ov5645->link_freq = v4l2_ctrl_new_int_menu(&ov5645->ctrls, + &ov5645_ctrl_ops, + V4L2_CID_LINK_FREQ, + ARRAY_SIZE(link_freq) - 1, + 0, link_freq); + if (ov5645->link_freq) + ov5645->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov5645->sd.ctrl_handler = &ov5645->ctrls; @@ -1229,6 +1267,7 @@ static int ov5645_probe(struct i2c_client *client, ov5645->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov5645->pad.flags = MEDIA_PAD_FL_SOURCE; ov5645->sd.dev = &client->dev; + ov5645->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&ov5645->sd.entity, 1, &ov5645->pad); if (ret < 0) { diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c new file mode 100644 index 000000000000..6f7a1d6d2200 --- /dev/null +++ b/drivers/media/i2c/ov5670.c @@ -0,0 +1,2601 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#define OV5670_REG_CHIP_ID 0x300a +#define OV5670_CHIP_ID 0x005670 + +#define OV5670_REG_MODE_SELECT 0x0100 +#define OV5670_MODE_STANDBY 0x00 +#define OV5670_MODE_STREAMING 0x01 + +#define OV5670_REG_SOFTWARE_RST 0x0103 +#define OV5670_SOFTWARE_RST 0x01 + +/* vertical-timings from sensor */ +#define OV5670_REG_VTS 0x380e +#define OV5670_VTS_30FPS 0x0808 /* default for 30 fps */ +#define OV5670_VTS_MAX 0xffff + +/* horizontal-timings from sensor */ +#define OV5670_REG_HTS 0x380c + +/* + * Pixels-per-line(PPL) = Time-per-line * pixel-rate + * In OV5670, Time-per-line = HTS/SCLK. + * HTS is fixed for all resolutions, not recommended to change. + */ +#define OV5670_FIXED_PPL 2724 /* Pixels per line */ + +/* Exposure controls from sensor */ +#define OV5670_REG_EXPOSURE 0x3500 +#define OV5670_EXPOSURE_MIN 4 +#define OV5670_EXPOSURE_STEP 1 + +/* Analog gain controls from sensor */ +#define OV5670_REG_ANALOG_GAIN 0x3508 +#define ANALOG_GAIN_MIN 0 +#define ANALOG_GAIN_MAX 8191 +#define ANALOG_GAIN_STEP 1 +#define ANALOG_GAIN_DEFAULT 128 + +/* Digital gain controls from sensor */ +#define OV5670_REG_R_DGTL_GAIN 0x5032 +#define OV5670_REG_G_DGTL_GAIN 0x5034 +#define OV5670_REG_B_DGTL_GAIN 0x5036 +#define OV5670_DGTL_GAIN_MIN 0 +#define OV5670_DGTL_GAIN_MAX 4095 +#define OV5670_DGTL_GAIN_STEP 1 +#define OV5670_DGTL_GAIN_DEFAULT 1024 + +/* Test Pattern Control */ +#define OV5670_REG_TEST_PATTERN 0x4303 +#define OV5670_TEST_PATTERN_ENABLE BIT(3) +#define OV5670_REG_TEST_PATTERN_CTRL 0x4320 + +#define OV5670_REG_VALUE_08BIT 1 +#define OV5670_REG_VALUE_16BIT 2 +#define OV5670_REG_VALUE_24BIT 3 + +/* Initial number of frames to skip to avoid possible garbage */ +#define OV5670_NUM_OF_SKIP_FRAMES 2 + +struct ov5670_reg { + u16 address; + u8 val; +}; + +struct ov5670_reg_list { + u32 num_of_regs; + const struct ov5670_reg *regs; +}; + +struct ov5670_link_freq_config { + u32 pixel_rate; + const struct ov5670_reg_list reg_list; +}; + +struct ov5670_mode { + /* Frame width in pixels */ + u32 width; + + /* Frame height in pixels */ + u32 height; + + /* Default vertical timining size */ + u32 vts_def; + + /* Min vertical timining size */ + u32 vts_min; + + /* Link frequency needed for this resolution */ + u32 link_freq_index; + + /* Sensor register settings for this resolution */ + const struct ov5670_reg_list reg_list; +}; + +static const struct ov5670_reg mipi_data_rate_840mbps[] = { + {0x0300, 0x04}, + {0x0301, 0x00}, + {0x0302, 0x84}, + {0x0303, 0x00}, + {0x0304, 0x03}, + {0x0305, 0x01}, + {0x0306, 0x01}, + {0x030a, 0x00}, + {0x030b, 0x00}, + {0x030c, 0x00}, + {0x030d, 0x26}, + {0x030e, 0x00}, + {0x030f, 0x06}, + {0x0312, 0x01}, + {0x3031, 0x0a}, +}; + +static const struct ov5670_reg mode_2592x1944_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x04}, + {0x3509, 0x00}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x00}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x10}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x0a}, + {0x3809, 0x20}, + {0x380a, 0x07}, + {0x380b, 0x98}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x02}, + {0x3814, 0x01}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x84}, + {0x3821, 0x46}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x01}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x0d}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x40}, + {0x4503, 0x10}, + {0x4508, 0xaa}, + {0x4509, 0xaa}, + {0x450a, 0x00}, + {0x450b, 0x00}, + {0x4600, 0x01}, + {0x4601, 0x03}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x08}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x06}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x3503, 0x00} +}; + +static const struct ov5670_reg mode_1296x972_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x07}, + {0x3509, 0x80}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x00}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x08}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x05}, + {0x3809, 0x10}, + {0x380a, 0x03}, + {0x380b, 0xcc}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x04}, + {0x3814, 0x03}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x94}, + {0x3821, 0x47}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x03}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x05}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x48}, + {0x4503, 0x10}, + {0x4508, 0x55}, + {0x4509, 0x55}, + {0x450a, 0x00}, + {0x450b, 0x00}, + {0x4600, 0x00}, + {0x4601, 0x81}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x10}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x04}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x3503, 0x00} +}; + +static const struct ov5670_reg mode_648x486_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x04}, + {0x3509, 0x00}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x04}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x08}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x02}, + {0x3809, 0x88}, + {0x380a, 0x01}, + {0x380b, 0xe6}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x02}, + {0x3814, 0x07}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x94}, + {0x3821, 0xc6}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x07}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x05}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x40}, + {0x4503, 0x10}, + {0x4508, 0x55}, + {0x4509, 0x55}, + {0x450a, 0x02}, + {0x450b, 0x00}, + {0x4600, 0x00}, + {0x4601, 0x40}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x10}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x06}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x3503, 0x00} +}; + +static const struct ov5670_reg mode_2560x1440_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x04}, + {0x3509, 0x00}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x00}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x10}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x0a}, + {0x3809, 0x00}, + {0x380a, 0x05}, + {0x380b, 0xa0}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x02}, + {0x3814, 0x01}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x84}, + {0x3821, 0x46}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x01}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x0d}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x40}, + {0x4503, 0x10}, + {0x4508, 0xaa}, + {0x4509, 0xaa}, + {0x450a, 0x00}, + {0x450b, 0x00}, + {0x4600, 0x01}, + {0x4601, 0x00}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x08}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x06}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3} +}; + +static const struct ov5670_reg mode_1280x720_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x04}, + {0x3509, 0x00}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x00}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x08}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x05}, + {0x3809, 0x00}, + {0x380a, 0x02}, + {0x380b, 0xd0}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x02}, + {0x3814, 0x03}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x94}, + {0x3821, 0x47}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x03}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x05}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x48}, + {0x4503, 0x10}, + {0x4508, 0x55}, + {0x4509, 0x55}, + {0x450a, 0x00}, + {0x450b, 0x00}, + {0x4600, 0x00}, + {0x4601, 0x80}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x10}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x06}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x3503, 0x00} +}; + +static const struct ov5670_reg mode_640x360_regs[] = { + {0x3000, 0x00}, + {0x3002, 0x21}, + {0x3005, 0xf0}, + {0x3007, 0x00}, + {0x3015, 0x0f}, + {0x3018, 0x32}, + {0x301a, 0xf0}, + {0x301b, 0xf0}, + {0x301c, 0xf0}, + {0x301d, 0xf0}, + {0x301e, 0xf0}, + {0x3030, 0x00}, + {0x3031, 0x0a}, + {0x303c, 0xff}, + {0x303e, 0xff}, + {0x3040, 0xf0}, + {0x3041, 0x00}, + {0x3042, 0xf0}, + {0x3106, 0x11}, + {0x3500, 0x00}, + {0x3501, 0x80}, + {0x3502, 0x00}, + {0x3503, 0x04}, + {0x3504, 0x03}, + {0x3505, 0x83}, + {0x3508, 0x04}, + {0x3509, 0x00}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3601, 0xc8}, + {0x3610, 0x88}, + {0x3612, 0x48}, + {0x3614, 0x5b}, + {0x3615, 0x96}, + {0x3621, 0xd0}, + {0x3622, 0x00}, + {0x3623, 0x04}, + {0x3633, 0x13}, + {0x3634, 0x13}, + {0x3635, 0x13}, + {0x3636, 0x13}, + {0x3645, 0x13}, + {0x3646, 0x82}, + {0x3650, 0x00}, + {0x3652, 0xff}, + {0x3655, 0x20}, + {0x3656, 0xff}, + {0x365a, 0xff}, + {0x365e, 0xff}, + {0x3668, 0x00}, + {0x366a, 0x07}, + {0x366e, 0x08}, + {0x366d, 0x00}, + {0x366f, 0x80}, + {0x3700, 0x28}, + {0x3701, 0x10}, + {0x3702, 0x3a}, + {0x3703, 0x19}, + {0x3704, 0x10}, + {0x3705, 0x00}, + {0x3706, 0x66}, + {0x3707, 0x08}, + {0x3708, 0x34}, + {0x3709, 0x40}, + {0x370a, 0x01}, + {0x370b, 0x1b}, + {0x3714, 0x24}, + {0x371a, 0x3e}, + {0x3733, 0x00}, + {0x3734, 0x00}, + {0x373a, 0x05}, + {0x373b, 0x06}, + {0x373c, 0x0a}, + {0x373f, 0xa0}, + {0x3755, 0x00}, + {0x3758, 0x00}, + {0x375b, 0x0e}, + {0x3766, 0x5f}, + {0x3768, 0x00}, + {0x3769, 0x22}, + {0x3773, 0x08}, + {0x3774, 0x1f}, + {0x3776, 0x06}, + {0x37a0, 0x88}, + {0x37a1, 0x5c}, + {0x37a7, 0x88}, + {0x37a8, 0x70}, + {0x37aa, 0x88}, + {0x37ab, 0x48}, + {0x37b3, 0x66}, + {0x37c2, 0x04}, + {0x37c5, 0x00}, + {0x37c8, 0x00}, + {0x3800, 0x00}, + {0x3801, 0x0c}, + {0x3802, 0x00}, + {0x3803, 0x04}, + {0x3804, 0x0a}, + {0x3805, 0x33}, + {0x3806, 0x07}, + {0x3807, 0xa3}, + {0x3808, 0x02}, + {0x3809, 0x80}, + {0x380a, 0x01}, + {0x380b, 0x68}, + {0x380c, 0x06}, + {0x380d, 0x90}, + {0x380e, 0x08}, + {0x380f, 0x08}, + {0x3811, 0x04}, + {0x3813, 0x02}, + {0x3814, 0x07}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x00}, + {0x3820, 0x94}, + {0x3821, 0xc6}, + {0x3822, 0x48}, + {0x3826, 0x00}, + {0x3827, 0x08}, + {0x382a, 0x07}, + {0x382b, 0x01}, + {0x3830, 0x08}, + {0x3836, 0x02}, + {0x3837, 0x00}, + {0x3838, 0x10}, + {0x3841, 0xff}, + {0x3846, 0x48}, + {0x3861, 0x00}, + {0x3862, 0x04}, + {0x3863, 0x06}, + {0x3a11, 0x01}, + {0x3a12, 0x78}, + {0x3b00, 0x00}, + {0x3b02, 0x00}, + {0x3b03, 0x00}, + {0x3b04, 0x00}, + {0x3b05, 0x00}, + {0x3c00, 0x89}, + {0x3c01, 0xab}, + {0x3c02, 0x01}, + {0x3c03, 0x00}, + {0x3c04, 0x00}, + {0x3c05, 0x03}, + {0x3c06, 0x00}, + {0x3c07, 0x05}, + {0x3c0c, 0x00}, + {0x3c0d, 0x00}, + {0x3c0e, 0x00}, + {0x3c0f, 0x00}, + {0x3c40, 0x00}, + {0x3c41, 0xa3}, + {0x3c43, 0x7d}, + {0x3c45, 0xd7}, + {0x3c47, 0xfc}, + {0x3c50, 0x05}, + {0x3c52, 0xaa}, + {0x3c54, 0x71}, + {0x3c56, 0x80}, + {0x3d85, 0x17}, + {0x3f03, 0x00}, + {0x3f0a, 0x00}, + {0x3f0b, 0x00}, + {0x4001, 0x60}, + {0x4009, 0x05}, + {0x4020, 0x00}, + {0x4021, 0x00}, + {0x4022, 0x00}, + {0x4023, 0x00}, + {0x4024, 0x00}, + {0x4025, 0x00}, + {0x4026, 0x00}, + {0x4027, 0x00}, + {0x4028, 0x00}, + {0x4029, 0x00}, + {0x402a, 0x00}, + {0x402b, 0x00}, + {0x402c, 0x00}, + {0x402d, 0x00}, + {0x402e, 0x00}, + {0x402f, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x03}, + {0x4042, 0x00}, + {0x4043, 0x7A}, + {0x4044, 0x00}, + {0x4045, 0x7A}, + {0x4046, 0x00}, + {0x4047, 0x7A}, + {0x4048, 0x00}, + {0x4049, 0x7A}, + {0x4307, 0x30}, + {0x4500, 0x58}, + {0x4501, 0x04}, + {0x4502, 0x40}, + {0x4503, 0x10}, + {0x4508, 0x55}, + {0x4509, 0x55}, + {0x450a, 0x02}, + {0x450b, 0x00}, + {0x4600, 0x00}, + {0x4601, 0x40}, + {0x4700, 0xa4}, + {0x4800, 0x4c}, + {0x4816, 0x53}, + {0x481f, 0x40}, + {0x4837, 0x13}, + {0x5000, 0x56}, + {0x5001, 0x01}, + {0x5002, 0x28}, + {0x5004, 0x0c}, + {0x5006, 0x0c}, + {0x5007, 0xe0}, + {0x5008, 0x01}, + {0x5009, 0xb0}, + {0x5901, 0x00}, + {0x5a01, 0x00}, + {0x5a03, 0x00}, + {0x5a04, 0x0c}, + {0x5a05, 0xe0}, + {0x5a06, 0x09}, + {0x5a07, 0xb0}, + {0x5a08, 0x06}, + {0x5e00, 0x00}, + {0x3734, 0x40}, + {0x5b00, 0x01}, + {0x5b01, 0x10}, + {0x5b02, 0x01}, + {0x5b03, 0xdb}, + {0x3d8c, 0x71}, + {0x3d8d, 0xea}, + {0x4017, 0x10}, + {0x3618, 0x2a}, + {0x5780, 0x3e}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x06}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x3503, 0x00} +}; + +static const char * const ov5670_test_pattern_menu[] = { + "Disabled", + "Vertical Color Bar Type 1", +}; + +/* Supported link frequencies */ +#define OV5670_LINK_FREQ_422MHZ 422400000 +#define OV5670_LINK_FREQ_422MHZ_INDEX 0 +static const struct ov5670_link_freq_config link_freq_configs[] = { + { + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + .pixel_rate = (OV5670_LINK_FREQ_422MHZ * 2 * 2) / 10, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_840mbps), + .regs = mipi_data_rate_840mbps, + } + } +}; + +static const s64 link_freq_menu_items[] = { + OV5670_LINK_FREQ_422MHZ +}; + +/* + * OV5670 sensor supports following resolutions with full FOV: + * 4:3 ==> {2592x1944, 1296x972, 648x486} + * 16:9 ==> {2560x1440, 1280x720, 640x360} + */ +static const struct ov5670_mode supported_modes[] = { + { + .width = 2592, + .height = 1944, + .vts_def = OV5670_VTS_30FPS, + .vts_min = OV5670_VTS_30FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs), + .regs = mode_2592x1944_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + }, + { + .width = 1296, + .height = 972, + .vts_def = OV5670_VTS_30FPS, + .vts_min = 996, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1296x972_regs), + .regs = mode_1296x972_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + }, + { + .width = 648, + .height = 486, + .vts_def = OV5670_VTS_30FPS, + .vts_min = 516, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_648x486_regs), + .regs = mode_648x486_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + }, + { + .width = 2560, + .height = 1440, + .vts_def = OV5670_VTS_30FPS, + .vts_min = OV5670_VTS_30FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_2560x1440_regs), + .regs = mode_2560x1440_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + }, + { + .width = 1280, + .height = 720, + .vts_def = OV5670_VTS_30FPS, + .vts_min = 1020, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1280x720_regs), + .regs = mode_1280x720_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + }, + { + .width = 640, + .height = 360, + .vts_def = OV5670_VTS_30FPS, + .vts_min = 510, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_640x360_regs), + .regs = mode_640x360_regs, + }, + .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX, + } +}; + +struct ov5670 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + + /* Current mode */ + const struct ov5670_mode *cur_mode; + + /* To serialize asynchronus callbacks */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +#define to_ov5670(_sd) container_of(_sd, struct ov5670, sd) + +/* Read registers up to 4 at a time */ +static int ov5670_read_reg(struct ov5670 *ov5670, u16 reg, unsigned int len, + u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + struct i2c_msg msgs[2]; + u8 *data_be_p; + u32 data_be = 0; + u16 reg_addr_be = cpu_to_be16(reg); + int ret; + + if (len > 4) + return -EINVAL; + + data_be_p = (u8 *)&data_be; + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = 2; + msgs[0].buf = (u8 *)®_addr_be; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_be_p[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = be32_to_cpu(data_be); + + return 0; +} + +/* Write registers up to 4 at a time */ +static int ov5670_write_reg(struct ov5670 *ov5670, u16 reg, unsigned int len, + u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + int buf_i; + int val_i; + u8 buf[6]; + u8 *val_p; + + if (len > 4) + return -EINVAL; + + buf[0] = reg >> 8; + buf[1] = reg & 0xff; + + val = cpu_to_be32(val); + val_p = (u8 *)&val; + buf_i = 2; + val_i = 4 - len; + + while (val_i < 4) + buf[buf_i++] = val_p[val_i++]; + + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int ov5670_write_regs(struct ov5670 *ov5670, + const struct ov5670_reg *regs, unsigned int len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = ov5670_write_reg(ov5670, regs[i].address, 1, regs[i].val); + if (ret) { + dev_err_ratelimited( + &client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +static int ov5670_write_reg_list(struct ov5670 *ov5670, + const struct ov5670_reg_list *r_list) +{ + return ov5670_write_regs(ov5670, r_list->regs, r_list->num_of_regs); +} + +/* Open sub-device */ +static int ov5670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct ov5670 *ov5670 = to_ov5670(sd); + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, 0); + + mutex_lock(&ov5670->mutex); + + /* Initialize try_fmt */ + try_fmt->width = ov5670->cur_mode->width; + try_fmt->height = ov5670->cur_mode->height; + try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; + try_fmt->field = V4L2_FIELD_NONE; + + /* No crop or compose */ + mutex_unlock(&ov5670->mutex); + + return 0; +} + +static int ov5670_update_digital_gain(struct ov5670 *ov5670, u32 d_gain) +{ + int ret; + + ret = ov5670_write_reg(ov5670, OV5670_REG_R_DGTL_GAIN, + OV5670_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + ret = ov5670_write_reg(ov5670, OV5670_REG_G_DGTL_GAIN, + OV5670_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + return ov5670_write_reg(ov5670, OV5670_REG_B_DGTL_GAIN, + OV5670_REG_VALUE_16BIT, d_gain); +} + +static int ov5670_enable_test_pattern(struct ov5670 *ov5670, u32 pattern) +{ + u32 val; + int ret; + + /* Set the bayer order that we support */ + ret = ov5670_write_reg(ov5670, OV5670_REG_TEST_PATTERN_CTRL, + OV5670_REG_VALUE_08BIT, 0); + if (ret) + return ret; + + ret = ov5670_read_reg(ov5670, OV5670_REG_TEST_PATTERN, + OV5670_REG_VALUE_08BIT, &val); + if (ret) + return ret; + + if (pattern) + val |= OV5670_TEST_PATTERN_ENABLE; + else + val &= ~OV5670_TEST_PATTERN_ENABLE; + + return ov5670_write_reg(ov5670, OV5670_REG_TEST_PATTERN, + OV5670_REG_VALUE_08BIT, val); +} + +/* Initialize control handlers */ +static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ov5670 *ov5670 = container_of(ctrl->handler, + struct ov5670, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + s64 max; + int ret = 0; + + /* Propagate change of current control to all related controls */ + switch (ctrl->id) { + case V4L2_CID_VBLANK: + /* Update max exposure while meeting expected vblanking */ + max = ov5670->cur_mode->height + ctrl->val - 8; + __v4l2_ctrl_modify_range(ov5670->exposure, + ov5670->exposure->minimum, max, + ov5670->exposure->step, max); + break; + } + + /* V4L2 controls values will be applied only when power is already up */ + if (pm_runtime_get_if_in_use(&client->dev) <= 0) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + ret = ov5670_write_reg(ov5670, OV5670_REG_ANALOG_GAIN, + OV5670_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = ov5670_update_digital_gain(ov5670, ctrl->val); + break; + case V4L2_CID_EXPOSURE: + /* 4 least significant bits of expsoure are fractional part */ + ret = ov5670_write_reg(ov5670, OV5670_REG_EXPOSURE, + OV5670_REG_VALUE_24BIT, ctrl->val << 4); + break; + case V4L2_CID_VBLANK: + /* Update VTS that meets expected vertical blanking */ + ret = ov5670_write_reg(ov5670, OV5670_REG_VTS, + OV5670_REG_VALUE_16BIT, + ov5670->cur_mode->height + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = ov5670_enable_test_pattern(ov5670, ctrl->val); + break; + default: + dev_info(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n", + __func__, ctrl->id, ctrl->val); + break; + } + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops ov5670_ctrl_ops = { + .s_ctrl = ov5670_set_ctrl, +}; + +/* Initialize control handlers */ +static int ov5670_init_controls(struct ov5670 *ov5670) +{ + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 vblank_max; + s64 vblank_def; + s64 vblank_min; + s64 exposure_max; + int ret; + + ctrl_hdlr = &ov5670->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); + if (ret) + return ret; + + ctrl_hdlr->lock = &ov5670->mutex; + ov5670->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, + &ov5670_ctrl_ops, + V4L2_CID_LINK_FREQ, + 0, 0, link_freq_menu_items); + if (ov5670->link_freq) + ov5670->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + /* By default, V4L2_CID_PIXEL_RATE is read only */ + ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, + V4L2_CID_PIXEL_RATE, 0, + link_freq_configs[0].pixel_rate, + 1, + link_freq_configs[0].pixel_rate); + + vblank_max = OV5670_VTS_MAX - ov5670->cur_mode->height; + vblank_def = ov5670->cur_mode->vts_def - ov5670->cur_mode->height; + vblank_min = ov5670->cur_mode->vts_min - ov5670->cur_mode->height; + ov5670->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, + V4L2_CID_VBLANK, vblank_min, + vblank_max, 1, vblank_def); + + ov5670->hblank = v4l2_ctrl_new_std( + ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_HBLANK, + OV5670_FIXED_PPL - ov5670->cur_mode->width, + OV5670_FIXED_PPL - ov5670->cur_mode->width, 1, + OV5670_FIXED_PPL - ov5670->cur_mode->width); + if (ov5670->hblank) + ov5670->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + /* Get min, max, step, default from sensor */ + v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + ANALOG_GAIN_MIN, ANALOG_GAIN_MAX, ANALOG_GAIN_STEP, + ANALOG_GAIN_DEFAULT); + + /* Digital gain */ + v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + OV5670_DGTL_GAIN_MIN, OV5670_DGTL_GAIN_MAX, + OV5670_DGTL_GAIN_STEP, OV5670_DGTL_GAIN_DEFAULT); + + /* Get min, max, step, default from sensor */ + exposure_max = ov5670->cur_mode->vts_def - 8; + ov5670->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, + V4L2_CID_EXPOSURE, + OV5670_EXPOSURE_MIN, + exposure_max, OV5670_EXPOSURE_STEP, + exposure_max); + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov5670_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(ov5670_test_pattern_menu) - 1, + 0, 0, ov5670_test_pattern_menu); + + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + goto error; + } + + ov5670->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + + return ret; +} + +static int ov5670_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + /* Only one bayer order GRBG is supported */ + if (code->index > 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + + return 0; +} + +static int ov5670_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +/* Calculate resolution distance */ +static int ov5670_get_reso_dist(const struct ov5670_mode *mode, + struct v4l2_mbus_framefmt *framefmt) +{ + return abs(mode->width - framefmt->width) + + abs(mode->height - framefmt->height); +} + +/* Find the closest supported resolution to the requested resolution */ +static const struct ov5670_mode *ov5670_find_best_fit( + struct ov5670 *ov5670, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *framefmt = &fmt->format; + int dist; + int cur_best_fit = 0; + int cur_best_fit_dist = -1; + int i; + + for (i = 0; i < ARRAY_SIZE(supported_modes); i++) { + dist = ov5670_get_reso_dist(&supported_modes[i], framefmt); + if (cur_best_fit_dist == -1 || dist < cur_best_fit_dist) { + cur_best_fit_dist = dist; + cur_best_fit = i; + } + } + + return &supported_modes[cur_best_fit]; +} + +static void ov5670_update_pad_format(const struct ov5670_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->format.field = V4L2_FIELD_NONE; +} + +static int ov5670_do_get_pad_format(struct ov5670 *ov5670, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd, cfg, + fmt->pad); + else + ov5670_update_pad_format(ov5670->cur_mode, fmt); + + return 0; +} + +static int ov5670_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov5670 *ov5670 = to_ov5670(sd); + int ret; + + mutex_lock(&ov5670->mutex); + ret = ov5670_do_get_pad_format(ov5670, cfg, fmt); + mutex_unlock(&ov5670->mutex); + + return ret; +} + +static int ov5670_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov5670 *ov5670 = to_ov5670(sd); + const struct ov5670_mode *mode; + s32 vblank_def; + s32 h_blank; + + mutex_lock(&ov5670->mutex); + + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + + mode = ov5670_find_best_fit(ov5670, fmt); + ov5670_update_pad_format(mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; + } else { + ov5670->cur_mode = mode; + __v4l2_ctrl_s_ctrl(ov5670->link_freq, mode->link_freq_index); + __v4l2_ctrl_s_ctrl_int64( + ov5670->pixel_rate, + link_freq_configs[mode->link_freq_index].pixel_rate); + /* Update limits and set FPS to default */ + vblank_def = ov5670->cur_mode->vts_def - + ov5670->cur_mode->height; + __v4l2_ctrl_modify_range( + ov5670->vblank, + ov5670->cur_mode->vts_min - ov5670->cur_mode->height, + OV5670_VTS_MAX - ov5670->cur_mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(ov5670->vblank, vblank_def); + h_blank = OV5670_FIXED_PPL - ov5670->cur_mode->width; + __v4l2_ctrl_modify_range(ov5670->hblank, h_blank, h_blank, 1, + h_blank); + } + + mutex_unlock(&ov5670->mutex); + + return 0; +} + +static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames) +{ + *frames = OV5670_NUM_OF_SKIP_FRAMES; + + return 0; +} + +/* Prepare streaming by writing default values and customized values */ +static int ov5670_start_streaming(struct ov5670 *ov5670) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + const struct ov5670_reg_list *reg_list; + int link_freq_index; + int ret; + + /* Get out of from software reset */ + ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST, + OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST); + if (ret) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return ret; + } + + /* Setup PLL */ + link_freq_index = ov5670->cur_mode->link_freq_index; + reg_list = &link_freq_configs[link_freq_index].reg_list; + ret = ov5670_write_reg_list(ov5670, reg_list); + if (ret) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return ret; + } + + /* Apply default values of current mode */ + reg_list = &ov5670->cur_mode->reg_list; + ret = ov5670_write_reg_list(ov5670, reg_list); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + ret = __v4l2_ctrl_handler_setup(ov5670->sd.ctrl_handler); + if (ret) + return ret; + + /* Write stream on list */ + ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT, + OV5670_REG_VALUE_08BIT, OV5670_MODE_STREAMING); + if (ret) { + dev_err(&client->dev, "%s failed to set stream\n", __func__); + return ret; + } + + ov5670->streaming = true; + + return 0; +} + +static int ov5670_stop_streaming(struct ov5670 *ov5670) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + int ret; + + ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT, + OV5670_REG_VALUE_08BIT, OV5670_MODE_STANDBY); + if (ret) + dev_err(&client->dev, "%s failed to set stream\n", __func__); + + ov5670->streaming = false; + + /* Return success even if it was an error, as there is nothing the + * caller can do about it. + */ + return 0; +} + +static int ov5670_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ov5670 *ov5670 = to_ov5670(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&ov5670->mutex); + if (ov5670->streaming == enable) + goto unlock_and_return; + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto unlock_and_return; + } + + ret = ov5670_start_streaming(ov5670); + if (ret) + goto error; + } else { + ret = ov5670_stop_streaming(ov5670); + pm_runtime_put(&client->dev); + } + goto unlock_and_return; + +error: + pm_runtime_put(&client->dev); + +unlock_and_return: + mutex_unlock(&ov5670->mutex); + + return ret; +} + +static int __maybe_unused ov5670_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov5670 *ov5670 = to_ov5670(sd); + + if (ov5670->streaming) + ov5670_stop_streaming(ov5670); + + return 0; +} + +static int __maybe_unused ov5670_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov5670 *ov5670 = to_ov5670(sd); + int ret; + + if (ov5670->streaming) { + ret = ov5670_start_streaming(ov5670); + if (ret) { + ov5670_stop_streaming(ov5670); + return ret; + } + } + + return 0; +} + +/* Verify chip ID */ +static int ov5670_identify_module(struct ov5670 *ov5670) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd); + int ret; + u32 val; + + ret = ov5670_read_reg(ov5670, OV5670_REG_CHIP_ID, + OV5670_REG_VALUE_24BIT, &val); + if (ret) + return ret; + + if (val != OV5670_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + OV5670_CHIP_ID, val); + return -ENXIO; + } + + return 0; +} + +static const struct v4l2_subdev_video_ops ov5670_video_ops = { + .s_stream = ov5670_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ov5670_pad_ops = { + .enum_mbus_code = ov5670_enum_mbus_code, + .get_fmt = ov5670_get_pad_format, + .set_fmt = ov5670_set_pad_format, + .enum_frame_size = ov5670_enum_frame_size, +}; + +static const struct v4l2_subdev_sensor_ops ov5670_sensor_ops = { + .g_skip_frames = ov5670_get_skip_frames, +}; + +static const struct v4l2_subdev_ops ov5670_subdev_ops = { + .video = &ov5670_video_ops, + .pad = &ov5670_pad_ops, + .sensor = &ov5670_sensor_ops, +}; + +static const struct media_entity_operations ov5670_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops ov5670_internal_ops = { + .open = ov5670_open, +}; + +static int ov5670_probe(struct i2c_client *client) +{ + struct ov5670 *ov5670; + const char *err_msg; + u32 input_clk = 0; + int ret; + + device_property_read_u32(&client->dev, "clock-frequency", &input_clk); + if (input_clk != 19200000) + return -EINVAL; + + ov5670 = devm_kzalloc(&client->dev, sizeof(*ov5670), GFP_KERNEL); + if (!ov5670) { + ret = -ENOMEM; + err_msg = "devm_kzalloc() error"; + goto error_print; + } + + /* Initialize subdev */ + v4l2_i2c_subdev_init(&ov5670->sd, client, &ov5670_subdev_ops); + + /* Check module identity */ + ret = ov5670_identify_module(ov5670); + if (ret) { + err_msg = "ov5670_identify_module() error"; + goto error_print; + } + + mutex_init(&ov5670->mutex); + + /* Set default mode to max resolution */ + ov5670->cur_mode = &supported_modes[0]; + + ret = ov5670_init_controls(ov5670); + if (ret) { + err_msg = "ov5670_init_controls() error"; + goto error_mutex_destroy; + } + + ov5670->sd.internal_ops = &ov5670_internal_ops; + ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + ov5670->sd.entity.ops = &ov5670_subdev_entity_ops; + ov5670->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Source pad initialization */ + ov5670->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&ov5670->sd.entity, 1, &ov5670->pad); + if (ret) { + err_msg = "media_entity_pads_init() error"; + goto error_handler_free; + } + + /* Async register for subdev */ + ret = v4l2_async_register_subdev(&ov5670->sd); + if (ret < 0) { + err_msg = "v4l2_async_register_subdev() error"; + goto error_entity_cleanup; + } + + ov5670->streaming = false; + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_put(&client->dev); + + return 0; + +error_entity_cleanup: + media_entity_cleanup(&ov5670->sd.entity); + +error_handler_free: + v4l2_ctrl_handler_free(ov5670->sd.ctrl_handler); + +error_mutex_destroy: + mutex_destroy(&ov5670->mutex); + +error_print: + dev_err(&client->dev, "%s: %s %d\n", __func__, err_msg, ret); + + return ret; +} + +static int ov5670_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov5670 *ov5670 = to_ov5670(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + v4l2_ctrl_handler_free(sd->ctrl_handler); + mutex_destroy(&ov5670->mutex); + + /* + * Disable runtime PM but keep the device turned on. + * i2c-core with ACPI domain PM will turn off the device. + */ + pm_runtime_get_sync(&client->dev); + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + return 0; +} + +static const struct dev_pm_ops ov5670_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ov5670_suspend, ov5670_resume) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ov5670_acpi_ids[] = { + {"INT3479"}, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(acpi, ov5670_acpi_ids); +#endif + +static struct i2c_driver ov5670_i2c_driver = { + .driver = { + .name = "ov5670", + .pm = &ov5670_pm_ops, + .acpi_match_table = ACPI_PTR(ov5670_acpi_ids), + }, + .probe_new = ov5670_probe, + .remove = ov5670_remove, +}; + +module_i2c_driver(ov5670_i2c_driver); + +MODULE_AUTHOR("Rapolu, Chiranjeevi "); +MODULE_AUTHOR("Yang, Hyungwoo "); +MODULE_DESCRIPTION("Omnivision ov5670 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/ov6650.c similarity index 92% rename from drivers/media/i2c/soc_camera/ov6650.c rename to drivers/media/i2c/ov6650.c index d2be64d54b22..768f2950ea36 100644 --- a/drivers/media/i2c/soc_camera/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -1,5 +1,5 @@ /* - * V4L2 SoC Camera driver for OmniVision OV6650 Camera Sensor + * V4L2 subdevice driver for OmniVision OV6650 Camera Sensor * * Copyright (C) 2010 Janusz Krzysztofik * @@ -31,9 +31,9 @@ #include #include -#include #include #include +#include /* Register definitions */ #define REG_GAIN 0x00 /* range 00 - 3F */ @@ -426,10 +426,15 @@ static int ov6650_set_register(struct v4l2_subdev *sd, static int ov6650_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); struct ov6650 *priv = to_ov6650(client); + int ret = 0; - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); + if (on) + ret = v4l2_clk_enable(priv->clk); + else + v4l2_clk_disable(priv->clk); + + return ret; } static int ov6650_get_selection(struct v4l2_subdev *sd, @@ -471,14 +476,13 @@ static int ov6650_set_selection(struct v4l2_subdev *sd, sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; - rect.left = ALIGN(rect.left, 2); - rect.width = ALIGN(rect.width, 2); - rect.top = ALIGN(rect.top, 2); - rect.height = ALIGN(rect.height, 2); - soc_camera_limit_side(&rect.left, &rect.width, - DEF_HSTRT << 1, 2, W_CIF); - soc_camera_limit_side(&rect.top, &rect.height, - DEF_VSTRT << 1, 2, H_CIF); + v4l_bound_align_image(&rect.width, 2, W_CIF, 1, + &rect.height, 2, H_CIF, 1, 0); + v4l_bound_align_image(&rect.left, DEF_HSTRT << 1, + (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1, + &rect.top, DEF_VSTRT << 1, + (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1, + 0); ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1); if (!ret) { @@ -547,8 +551,6 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe, static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); - struct soc_camera_sense *sense = icd->sense; struct ov6650 *priv = to_ov6650(client); bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect); struct v4l2_subdev_selection sel = { @@ -640,32 +642,10 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) } priv->half_scale = half_scale; - if (sense) { - if (sense->master_clock == 8000000) { - dev_dbg(&client->dev, "8MHz input clock\n"); - clkrc = CLKRC_6MHz; - } else if (sense->master_clock == 12000000) { - dev_dbg(&client->dev, "12MHz input clock\n"); - clkrc = CLKRC_12MHz; - } else if (sense->master_clock == 16000000) { - dev_dbg(&client->dev, "16MHz input clock\n"); - clkrc = CLKRC_16MHz; - } else if (sense->master_clock == 24000000) { - dev_dbg(&client->dev, "24MHz input clock\n"); - clkrc = CLKRC_24MHz; - } else { - dev_err(&client->dev, - "unsupported input clock, check platform data\n"); - return -EINVAL; - } - mclk = sense->master_clock; - priv->pclk_limit = sense->pixel_clock_max; - } else { - clkrc = CLKRC_24MHz; - mclk = 24000000; - priv->pclk_limit = 0; - dev_dbg(&client->dev, "using default 24MHz input clock\n"); - } + clkrc = CLKRC_12MHz; + mclk = 12000000; + priv->pclk_limit = 1334000; + dev_dbg(&client->dev, "using 12MHz input clock\n"); clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max); @@ -899,8 +879,6 @@ static const struct v4l2_subdev_core_ops ov6650_core_ops = { static int ov6650_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | @@ -908,7 +886,6 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd, V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); return 0; } @@ -918,25 +895,23 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg); int ret; - if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING) + if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING) ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0); else ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING); if (ret) return ret; - if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) + if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0); else ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW); if (ret) return ret; - if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) + if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0); else ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH); @@ -973,14 +948,8 @@ static int ov6650_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct ov6650 *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); int ret; - if (!ssdd) { - dev_err(&client->dev, "Missing platform_data for driver\n"); - return -EINVAL; - } - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&client->dev, diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index 7270c68ed18a..e88549f0e704 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -1614,8 +1614,10 @@ static int ov7670_probe(struct i2c_client *client, info->clk = devm_clk_get(&client->dev, "xclk"); if (IS_ERR(info->clk)) - return -EPROBE_DEFER; - clk_prepare_enable(info->clk); + return PTR_ERR(info->clk); + ret = clk_prepare_enable(info->clk); + if (ret) + return ret; ret = ov7670_init_gpio(client, info); if (ret) diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 2de2fbb13b85..6ffb460e8589 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -484,6 +484,7 @@ static int ov965x_set_default_gamma_curve(struct ov965x *ov965x) for (i = 0; i < ARRAY_SIZE(gamma_curve); i++) { int ret = ov965x_write(ov965x->client, addr, gamma_curve[i]); + if (ret < 0) return ret; addr++; @@ -503,6 +504,7 @@ static int ov965x_set_color_matrix(struct ov965x *ov965x) for (i = 0; i < ARRAY_SIZE(mtx); i++) { int ret = ov965x_write(ov965x->client, addr, mtx[i]); + if (ret < 0) return ret; addr++; @@ -611,7 +613,7 @@ static int ov965x_set_banding_filter(struct ov965x *ov965x, int value) } if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED) return 0; - if (WARN_ON(ov965x->fiv == NULL)) + if (WARN_ON(!ov965x->fiv)) return -EINVAL; /* Set minimal exposure time for 50/60 HZ lighting */ if (value == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) @@ -999,44 +1001,47 @@ static int ov965x_initialize_controls(struct ov965x *ov965x) /* Auto/manual white balance */ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops, - V4L2_CID_AUTO_WHITE_BALANCE, - 0, 1, 1, 1); + V4L2_CID_AUTO_WHITE_BALANCE, + 0, 1, 1, 1); ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, 0x80); ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE, - 0, 0xff, 1, 0x80); + 0, 0xff, 1, 0x80); /* Auto/manual exposure */ - ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops, - V4L2_CID_EXPOSURE_AUTO, - V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO); + ctrls->auto_exp = + v4l2_ctrl_new_std_menu(hdl, ops, + V4L2_CID_EXPOSURE_AUTO, + V4L2_EXPOSURE_MANUAL, 0, + V4L2_EXPOSURE_AUTO); /* Exposure time, in 100 us units. min/max is updated dynamically. */ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, - V4L2_CID_EXPOSURE_ABSOLUTE, - 2, 1500, 1, 500); + V4L2_CID_EXPOSURE_ABSOLUTE, + 2, 1500, 1, 500); /* Auto/manual gain */ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN, - 0, 1, 1, 1); + 0, 1, 1, 1); ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, - 16, 64 * (16 + 15), 1, 64 * 16); + 16, 64 * (16 + 15), 1, 64 * 16); ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, - -2, 2, 1, 0); + -2, 2, 1, 0); ctrls->brightness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, - -3, 3, 1, 0); + -3, 3, 1, 0); ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS, - 0, 32, 1, 6); + 0, 32, 1, 6); ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0); ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0); - ctrls->light_freq = v4l2_ctrl_new_std_menu(hdl, ops, - V4L2_CID_POWER_LINE_FREQUENCY, - V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7, - V4L2_CID_POWER_LINE_FREQUENCY_50HZ); + ctrls->light_freq = + v4l2_ctrl_new_std_menu(hdl, ops, + V4L2_CID_POWER_LINE_FREQUENCY, + V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7, + V4L2_CID_POWER_LINE_FREQUENCY_50HZ); v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN, - ARRAY_SIZE(test_pattern_menu) - 1, 0, 0, - test_pattern_menu); + ARRAY_SIZE(test_pattern_menu) - 1, 0, 0, + test_pattern_menu); if (hdl->error) { ret = hdl->error; v4l2_ctrl_handler_free(hdl); @@ -1121,7 +1126,6 @@ static int __ov965x_set_frame_interval(struct ov965x *ov965x, u64 req_int, err, min_err = ~0ULL; unsigned int i; - if (fi->interval.denominator == 0) return -EINVAL; @@ -1165,7 +1169,8 @@ static int ov965x_s_frame_interval(struct v4l2_subdev *sd, return ret; } -static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, +static int ov965x_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct ov965x *ov965x = to_ov965x(sd); @@ -1209,7 +1214,8 @@ static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf, *size = match; } -static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, +static int ov965x_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { unsigned int index = ARRAY_SIZE(ov965x_formats); @@ -1231,7 +1237,7 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config mutex_lock(&ov965x->lock); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { - if (cfg != NULL) { + if (cfg) { mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); *mf = fmt->format; } @@ -1362,7 +1368,8 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on) */ static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { - struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0); + struct v4l2_mbus_framefmt *mf = + v4l2_subdev_get_try_format(sd, fh->pad, 0); ov965x_get_default_format(mf); return 0; @@ -1470,7 +1477,7 @@ static int ov965x_probe(struct i2c_client *client, struct ov965x *ov965x; int ret; - if (pdata == NULL) { + if (!pdata) { dev_err(&client->dev, "platform data not specified\n"); return -EINVAL; } @@ -1498,13 +1505,13 @@ static int ov965x_probe(struct i2c_client *client, ret = ov965x_configure_gpios(ov965x, pdata); if (ret < 0) - return ret; + goto err_mutex; ov965x->pad.flags = MEDIA_PAD_FL_SOURCE; sd->entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&sd->entity, 1, &ov965x->pad); if (ret < 0) - return ret; + goto err_mutex; ret = ov965x_initialize_controls(ov965x); if (ret < 0) @@ -1530,16 +1537,20 @@ static int ov965x_probe(struct i2c_client *client, v4l2_ctrl_handler_free(sd->ctrl_handler); err_me: media_entity_cleanup(&sd->entity); +err_mutex: + mutex_destroy(&ov965x->lock); return ret; } static int ov965x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov965x *ov965x = to_ov965x(sd); v4l2_async_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); media_entity_cleanup(&sd->entity); + mutex_destroy(&ov965x->lock); return 0; } diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index f434fb2ee6fc..cdc4f2392ef9 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1635,8 +1635,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) node_ep = of_graph_get_next_endpoint(node, NULL); if (!node_ep) { - dev_warn(dev, "no endpoint defined for node: %s\n", - node->full_name); + dev_warn(dev, "no endpoint defined for node: %pOF\n", node); return 0; } diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c index 962051b9939d..ff46d2c96cea 100644 --- a/drivers/media/i2c/s5k5baf.c +++ b/drivers/media/i2c/s5k5baf.c @@ -1374,7 +1374,7 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_selection *sel) { - static enum selection_rect rtype; + enum selection_rect rtype; struct s5k5baf *state = to_s5k5baf(sd); rtype = s5k5baf_get_sel_rect(sel->pad, sel->target); @@ -1863,8 +1863,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev) node_ep = of_graph_get_next_endpoint(node, NULL); if (!node_ep) { - dev_err(dev, "no endpoint defined at node %s\n", - node->full_name); + dev_err(dev, "no endpoint defined at node %pOF\n", node); return -EINVAL; } @@ -1882,8 +1881,8 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev) case V4L2_MBUS_PARALLEL: break; default: - dev_err(dev, "unsupported bus in endpoint defined at node %s\n", - node->full_name); + dev_err(dev, "unsupported bus in endpoint defined at node %pOF\n", + node); return -EINVAL; } diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c index 99c303002e90..01784d441ae6 100644 --- a/drivers/media/i2c/saa7127.c +++ b/drivers/media/i2c/saa7127.c @@ -806,7 +806,7 @@ static int saa7127_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ -static struct i2c_device_id saa7127_id[] = { +static const struct i2c_device_id saa7127_id[] = { { "saa7127_auto", 0 }, /* auto-detection */ { "saa7126", SAA7127 }, { "saa7127", SAA7127 }, diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c index e1f6bc219c64..102467e00fb3 100644 --- a/drivers/media/i2c/saa717x.c +++ b/drivers/media/i2c/saa717x.c @@ -1069,7 +1069,7 @@ static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder set norm "); - v4l2_dbg(1, debug, sd, "(not yet implementd)\n"); + v4l2_dbg(1, debug, sd, "(not yet implemented)\n"); decoder->radio = 0; decoder->std = std; diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index e0b0c032c4ac..700f433261d0 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -841,6 +841,8 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor) &client->dev, compressed_max_bpp - sensor->compressed_min_bpp + 1, sizeof(*sensor->valid_link_freqs), GFP_KERNEL); + if (!sensor->valid_link_freqs) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) { const struct smiapp_csi_data_format *f = @@ -2809,13 +2811,19 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) switch (bus_cfg->bus_type) { case V4L2_MBUS_CSI2: hwcfg->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2; + hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes; + break; + case V4L2_MBUS_CCP2: + hwcfg->csi_signalling_mode = (bus_cfg->bus.mipi_csi1.strobe) ? + SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE : + SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK; + hwcfg->lanes = 1; break; - /* FIXME: add CCP2 support. */ default: + dev_err(dev, "unsupported bus %u\n", bus_cfg->bus_type); goto out_err; } - hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes; dev_dbg(dev, "lanes %u\n", hwcfg->lanes); /* NVM size is not mandatory */ @@ -2828,8 +2836,8 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) goto out_err; } - dev_dbg(dev, "nvm %d, clk %d, csi %d\n", hwcfg->nvm_size, - hwcfg->ext_clk, hwcfg->csi_signalling_mode); + dev_dbg(dev, "nvm %d, clk %d, mode %d\n", + hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode); if (!bus_cfg->nr_of_link_frequencies) { dev_warn(dev, "no link frequencies defined\n"); diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c index cb128eae9c54..95c0272bb014 100644 --- a/drivers/media/i2c/smiapp/smiapp-quirk.c +++ b/drivers/media/i2c/smiapp/smiapp-quirk.c @@ -71,7 +71,7 @@ static int jt8ew9_limits(struct smiapp_sensor *sensor) static int jt8ew9_post_poweron(struct smiapp_sensor *sensor) { - const struct smiapp_reg_8 regs[] = { + static const struct smiapp_reg_8 regs[] = { { 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */ { 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */ { 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */ @@ -115,7 +115,7 @@ const struct smiapp_quirk smiapp_jt8ew9_quirk = { static int imx125es_post_poweron(struct smiapp_sensor *sensor) { /* Taken from v02. No idea what the other two are. */ - const struct smiapp_reg_8 regs[] = { + static const struct smiapp_reg_8 regs[] = { /* * 0x3302: clk during frame blanking: * 0x00 - HS mode, 0x01 - LP11 @@ -145,7 +145,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); int rval; - const struct smiapp_reg_8 regs[] = { + static const struct smiapp_reg_8 regs[] = { { 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */ { 0x30a3, 0xd0 }, /* FLASH STROBE enable */ { 0x3237, 0x00 }, /* For control of pulse timing for ADC */ @@ -166,7 +166,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor) { 0x33cf, 0xec }, /* For Black sun */ { 0x3328, 0x80 }, /* Ugh. No idea what's this. */ }; - const struct smiapp_reg_8 regs_96[] = { + static const struct smiapp_reg_8 regs_96[] = { { 0x30ae, 0x00 }, /* For control of ADC clock */ { 0x30af, 0xd0 }, { 0x30b0, 0x01 }, diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig index 96859f37cb1c..72b369895b37 100644 --- a/drivers/media/i2c/soc_camera/Kconfig +++ b/drivers/media/i2c/soc_camera/Kconfig @@ -47,12 +47,6 @@ config SOC_CAMERA_OV5642 help This is a V4L2 camera driver for the OmniVision OV5642 sensor -config SOC_CAMERA_OV6650 - tristate "ov6650 sensor support" - depends on SOC_CAMERA && I2C - ---help--- - This is a V4L2 SoC camera driver for the OmniVision OV6650 sensor - config SOC_CAMERA_OV772X tristate "ov772x camera support" depends on SOC_CAMERA && I2C diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile index 974bdb721dbe..78532a7fb8e2 100644 --- a/drivers/media/i2c/soc_camera/Makefile +++ b/drivers/media/i2c/soc_camera/Makefile @@ -4,7 +4,6 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o -obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o obj-$(CONFIG_SOC_CAMERA_OV9740) += ov9740.o diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c index 714fb3555b34..4802d30e47de 100644 --- a/drivers/media/i2c/soc_camera/mt9t031.c +++ b/drivers/media/i2c/soc_camera/mt9t031.c @@ -592,7 +592,7 @@ static const struct dev_pm_ops mt9t031_dev_pm_ops = { .runtime_resume = mt9t031_runtime_resume, }; -static struct device_type mt9t031_dev_type = { +static const struct device_type mt9t031_dev_type = { .name = "MT9T031", .pm = &mt9t031_dev_pm_ops, }; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 5788af238b86..e6f5c363ccab 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -2013,7 +2013,7 @@ static int tc358743_remove(struct i2c_client *client) return 0; } -static struct i2c_device_id tc358743_id[] = { +static const struct i2c_device_id tc358743_id[] = { {"tc358743", 0}, {} }; diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c index 42340e364cea..498ad2368cbc 100644 --- a/drivers/media/i2c/ths8200.c +++ b/drivers/media/i2c/ths8200.c @@ -483,7 +483,7 @@ static int ths8200_remove(struct i2c_client *client) return 0; } -static struct i2c_device_id ths8200_id[] = { +static const struct i2c_device_id ths8200_id[] = { { "ths8200", 0 }, {}, }; diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c index f0741ab338df..560738213c00 100644 --- a/drivers/media/i2c/vs6624.c +++ b/drivers/media/i2c/vs6624.c @@ -58,7 +58,7 @@ static const struct vs6624_format { }, }; -static struct v4l2_mbus_framefmt vs6624_default_fmt = { +static const struct v4l2_mbus_framefmt vs6624_default_fmt = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .code = MEDIA_BUS_FMT_UYVY8_2X8, diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 760e3e424e23..e79f72b8b858 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -69,9 +69,9 @@ static int media_device_get_info(struct media_device *dev, strlcpy(info->serial, dev->serial, sizeof(info->serial)); strlcpy(info->bus_info, dev->bus_info, sizeof(info->bus_info)); - info->media_version = MEDIA_API_VERSION; + info->media_version = LINUX_VERSION_CODE; + info->driver_version = info->media_version; info->hw_revision = dev->hw_revision; - info->driver_version = dev->driver_version; return 0; } @@ -537,9 +537,9 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); * Registration/unregistration */ -static void media_device_release(struct media_devnode *mdev) +static void media_device_release(struct media_devnode *devnode) { - dev_dbg(mdev->parent, "Media device released\n"); + dev_dbg(devnode->parent, "Media device released\n"); } /** @@ -591,9 +591,8 @@ int __must_check media_device_register_entity(struct media_device *mdev, &entity->pads[i].graph_obj); /* invoke entity_notify callbacks */ - list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) { - (notify)->notify(entity, notify->notify_data); - } + list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) + notify->notify(entity, notify->notify_data); if (mdev->entity_internal_idx_max >= mdev->pm_count_walk.ent_enum.idx_max) { @@ -834,8 +833,6 @@ void media_device_pci_init(struct media_device *mdev, mdev->hw_revision = (pci_dev->subsystem_vendor << 16) | pci_dev->subsystem_device; - mdev->driver_version = LINUX_VERSION_CODE; - media_device_init(mdev); } EXPORT_SYMBOL_GPL(media_device_pci_init); @@ -863,7 +860,6 @@ void __media_device_usb_init(struct media_device *mdev, strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info)); mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; media_device_init(mdev); } diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c index dd0f0ead9516..2ace0410d277 100644 --- a/drivers/media/media-entity.c +++ b/drivers/media/media-entity.c @@ -917,7 +917,7 @@ media_entity_find_link(struct media_pad *source, struct media_pad *sink) } EXPORT_SYMBOL_GPL(media_entity_find_link); -struct media_pad *media_entity_remote_pad(struct media_pad *pad) +struct media_pad *media_entity_remote_pad(const struct media_pad *pad) { struct media_link *link; diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 6e60decb2198..cc6527e35537 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -415,7 +415,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev) flexcop_device_kfree(fc_pci->fc_dev); } -static struct pci_device_id flexcop_pci_tbl[] = { +static const struct pci_device_id flexcop_pci_tbl[] = { { PCI_DEVICE(0x13d0, 0x2103) }, { }, }; diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index 8aa726651630..a5f52137d306 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -383,7 +383,7 @@ EXPORT_SYMBOL(bt878_device_control); .driver_data = (unsigned long) name \ } -static struct pci_device_id bt878_pci_tbl[] = { +static const struct pci_device_id bt878_pci_tbl[] = { BROOKTREE_878_DEVICE(0x0071, 0x0101, "Nebula Electronics DigiTV"), BROOKTREE_878_DEVICE(0x1461, 0x0761, "AverMedia AverTV DVB-T 761"), BROOKTREE_878_DEVICE(0x11bd, 0x001c, "Pinnacle PCTV Sat"), diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index ed319f18ba48..227086a2e99c 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -1702,7 +1702,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) bttv_dma_free(q,fh->btv,buf); } -static struct videobuf_queue_ops bttv_video_qops = { +static const struct videobuf_queue_ops bttv_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, @@ -4388,7 +4388,7 @@ static int bttv_resume(struct pci_dev *pci_dev) } #endif -static struct pci_device_id bttv_pci_tbl[] = { +static const struct pci_device_id bttv_pci_tbl[] = { {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT848), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT849), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT878), 0}, diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c index 274fd036b306..eccd1e3d717a 100644 --- a/drivers/media/pci/bt8xx/bttv-i2c.c +++ b/drivers/media/pci/bt8xx/bttv-i2c.c @@ -97,7 +97,7 @@ static int bttv_bit_getsda(void *data) return state; } -static struct i2c_algo_bit_data bttv_i2c_algo_bit_template = { +static const struct i2c_algo_bit_data bttv_i2c_algo_bit_template = { .setsda = bttv_bit_setsda, .setscl = bttv_bit_setscl, .getsda = bttv_bit_getsda, diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c index 2fd07a8afcd2..73d655d073d6 100644 --- a/drivers/media/pci/bt8xx/bttv-input.c +++ b/drivers/media/pci/bt8xx/bttv-input.c @@ -69,12 +69,13 @@ static void ir_handle_key(struct bttv *btv) if ((ir->mask_keydown && (gpio & ir->mask_keydown)) || (ir->mask_keyup && !(gpio & ir->mask_keyup))) { - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0); } else { /* HACK: Probably, ir->mask_keydown is missing for this board */ if (btv->c.type == BTTV_BOARD_WINFAST2000) - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); rc_keyup(ir->dev); } @@ -99,7 +100,7 @@ static void ir_enltv_handle_key(struct bttv *btv) gpio, data, (gpio & ir->mask_keyup) ? " up" : "up/down"); - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0); if (keyup) rc_keyup(ir->dev); } else { @@ -113,7 +114,8 @@ static void ir_enltv_handle_key(struct bttv *btv) if (keyup) rc_keyup(ir->dev); else - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); } ir->last_gpio = data | keyup; @@ -235,7 +237,7 @@ static void bttv_rc5_timer_end(unsigned long data) } scancode = RC_SCANCODE_RC5(system, command); - rc_keydown(ir->dev, RC_TYPE_RC5, scancode, toggle); + rc_keydown(ir->dev, RC_PROTO_RC5, scancode, toggle); dprintk("scancode %x, toggle %x\n", scancode, toggle); } @@ -327,7 +329,7 @@ static void bttv_ir_stop(struct bttv *btv) * Get_key functions used by I2C remotes */ -static int get_key_pv951(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pv951(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -355,7 +357,7 @@ static int get_key_pv951(struct IR_i2c *ir, enum rc_type *protocol, * the device is bound to the vendor-provided RC. */ - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; @@ -535,7 +537,7 @@ int bttv_input_init(struct bttv *btv) snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(btv->c.pci)); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_PCI; rc->input_id.version = 1; diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c index 90f4263452d3..530b3e9764ce 100644 --- a/drivers/media/pci/bt8xx/dst_ca.c +++ b/drivers/media/pci/bt8xx/dst_ca.c @@ -57,20 +57,6 @@ static unsigned int verbose = 5; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)"); -/* Need some more work */ -static int ca_set_slot_descr(void) -{ - /* We could make this more graceful ? */ - return -EOPNOTSUPP; -} - -/* Need some more work */ -static int ca_set_pid(void) -{ - /* We could make this more graceful ? */ - return -EOPNOTSUPP; -} - static void put_command_and_length(u8 *data, int command, int length) { data[0] = (command >> 16) & 0xff; @@ -144,7 +130,7 @@ static int dst_put_ci(struct dst_state *state, u8 *data, int len, u8 *ca_string, } if(dst_ca_comm_err == RETRIES) - return -1; + return -EIO; return 0; } @@ -159,7 +145,7 @@ static int ca_get_app_info(struct dst_state *state) put_checksum(&command[0], command[0]); if ((dst_put_ci(state, command, sizeof(command), state->messages, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); - return -1; + return -EIO; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); dprintk(verbose, DST_CA_INFO, 1, " ================================ CI Module Application Info ======================================"); @@ -198,7 +184,7 @@ static int ca_get_ca_info(struct dst_state *state) put_checksum(&slot_command[0], slot_command[0]); if ((dst_put_ci(state, slot_command, sizeof (slot_command), state->messages, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); - return -1; + return -EIO; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); @@ -242,7 +228,7 @@ static int ca_get_slot_caps(struct dst_state *state, struct ca_caps *p_ca_caps, put_checksum(&slot_command[0], slot_command[0]); if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_cap, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); - return -1; + return -EIO; } dprintk(verbose, DST_CA_NOTICE, 1, " -->dst_put_ci SUCCESS !"); @@ -282,7 +268,7 @@ static int ca_get_slot_info(struct dst_state *state, struct ca_slot_info *p_ca_s put_checksum(&slot_command[0], 7); if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_info, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); - return -1; + return -EIO; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); @@ -354,7 +340,7 @@ static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message, } else { if (length > 247) { dprintk(verbose, DST_CA_ERROR, 1, " Message too long ! *** Bailing Out *** !"); - return -1; + return -EIO; } hw_buffer->msg[0] = (length & 0xff) + 7; hw_buffer->msg[1] = 0x40; @@ -380,7 +366,7 @@ static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 l dprintk(verbose, DST_CA_ERROR, 1, " DST-CI Command failed."); dprintk(verbose, DST_CA_NOTICE, 1, " Resetting DST."); rdc_reset_state(state); - return -1; + return -EIO; } dprintk(verbose, DST_CA_NOTICE, 1, " DST-CI Command success."); @@ -453,7 +439,7 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message if (ca_pmt_reply_test) { if ((ca_set_pmt(state, p_ca_message, hw_buffer, 1, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !"); - return -1; + return -EIO; } /* Process CA PMT Reply */ @@ -464,7 +450,7 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message if (!ca_pmt_reply_test) { if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, NO_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !"); - return -1; + return -EIO; } dprintk(verbose, DST_CA_NOTICE, 1, " ca_set_pmt.. success !"); /* put a dummy message */ @@ -573,17 +559,18 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct switch (cmd) { case CA_SEND_MSG: dprintk(verbose, DST_CA_INFO, 1, " Sending message"); - if ((ca_send_message(state, p_ca_message, arg)) < 0) { + result = ca_send_message(state, p_ca_message, arg); + + if (result < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SEND_MSG Failed !"); - result = -1; goto free_mem_and_exit; } break; case CA_GET_MSG: dprintk(verbose, DST_CA_INFO, 1, " Getting message"); - if ((ca_get_message(state, p_ca_message, arg)) < 0) { + result = ca_get_message(state, p_ca_message, arg); + if (result < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_MSG Failed !"); - result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_MSG Success !"); @@ -595,7 +582,8 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct break; case CA_GET_SLOT_INFO: dprintk(verbose, DST_CA_INFO, 1, " Getting Slot info"); - if ((ca_get_slot_info(state, p_ca_slot_info, arg)) < 0) { + result = ca_get_slot_info(state, p_ca_slot_info, arg); + if (result < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_SLOT_INFO Failed !"); result = -1; goto free_mem_and_exit; @@ -604,40 +592,22 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct break; case CA_GET_CAP: dprintk(verbose, DST_CA_INFO, 1, " Getting Slot capabilities"); - if ((ca_get_slot_caps(state, p_ca_caps, arg)) < 0) { + result = ca_get_slot_caps(state, p_ca_caps, arg); + if (result < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_CAP Failed !"); - result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_CAP Success !"); break; case CA_GET_DESCR_INFO: dprintk(verbose, DST_CA_INFO, 1, " Getting descrambler description"); - if ((ca_get_slot_descr(state, p_ca_message, arg)) < 0) { + result = ca_get_slot_descr(state, p_ca_message, arg); + if (result < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_DESCR_INFO Failed !"); - result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_DESCR_INFO Success !"); break; - case CA_SET_DESCR: - dprintk(verbose, DST_CA_INFO, 1, " Setting descrambler"); - if ((ca_set_slot_descr()) < 0) { - dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_DESCR Failed !"); - result = -1; - goto free_mem_and_exit; - } - dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_DESCR Success !"); - break; - case CA_SET_PID: - dprintk(verbose, DST_CA_INFO, 1, " Setting PID"); - if ((ca_set_pid()) < 0) { - dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_PID Failed !"); - result = -1; - goto free_mem_and_exit; - } - dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !"); - break; default: result = -EOPNOTSUPP; } diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c index 49013c6b8646..b69b258d39b9 100644 --- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c +++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c @@ -43,7 +43,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm"); pr_info("cobalt-alsa-pcm %s: " fmt, __func__, ##arg); \ } while (0) -static struct snd_pcm_hardware snd_cobalt_hdmi_capture = { +static const struct snd_pcm_hardware snd_cobalt_hdmi_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | @@ -64,7 +64,7 @@ static struct snd_pcm_hardware snd_cobalt_hdmi_capture = { .periods_max = 4, }; -static struct snd_pcm_hardware snd_cobalt_playback = { +static const struct snd_pcm_hardware snd_cobalt_playback = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c index f8e173f3e9e2..98b6cb9505d1 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.c +++ b/drivers/media/pci/cobalt/cobalt-driver.c @@ -36,7 +36,7 @@ #include "cobalt-omnitek.h" /* add your revision and whatnot here */ -static struct pci_device_id cobalt_pci_tbl[] = { +static const struct pci_device_id cobalt_pci_tbl[] = { {PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_COBALT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} diff --git a/drivers/media/pci/cobalt/cobalt-i2c.c b/drivers/media/pci/cobalt/cobalt-i2c.c index ad16b89b8d0c..1a5c55673ea8 100644 --- a/drivers/media/pci/cobalt/cobalt-i2c.c +++ b/drivers/media/pci/cobalt/cobalt-i2c.c @@ -301,7 +301,7 @@ static u32 cobalt_func(struct i2c_adapter *adap) } /* template for i2c-bit-algo */ -static struct i2c_adapter cobalt_i2c_adap_template = { +static const struct i2c_adapter cobalt_i2c_adap_template = { .name = "cobalt i2c driver", .algo = NULL, /* set by i2c-algo-bit */ .algo_data = NULL, /* filled from template */ diff --git a/drivers/media/pci/cx18/cx18-alsa-mixer.c b/drivers/media/pci/cx18/cx18-alsa-mixer.c index 06b066bc9301..cb04c3d820e2 100644 --- a/drivers/media/pci/cx18/cx18-alsa-mixer.c +++ b/drivers/media/pci/cx18/cx18-alsa-mixer.c @@ -161,7 +161,7 @@ int __init snd_cx18_mixer_create(struct snd_cx18_card *cxsc) strlcpy(sc->mixername, "CX23418 Mixer", sizeof(sc->mixername)); - ret = snd_ctl_add(sc, snd_ctl_new1(snd_cx18_mixer_tv_vol, cxsc)); + ret = snd_ctl_add(sc, snd_ctl_new1(&snd_cx18_mixer_tv_vol, cxsc)); if (ret) { CX18_ALSA_WARN("%s: failed to add %s control, err %d\n", __func__, snd_cx18_mixer_tv_vol.name, ret); diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c index f68ee57a9ae2..aadd76466aec 100644 --- a/drivers/media/pci/cx18/cx18-alsa-pcm.c +++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c @@ -44,7 +44,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm"); __func__, ##arg); \ } while (0) -static struct snd_pcm_hardware snd_cx18_hw_capture = { +static const struct snd_pcm_hardware snd_cx18_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 8bce49cdad46..8654710464cc 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -48,7 +48,7 @@ int (*cx18_ext_init)(struct cx18 *); EXPORT_SYMBOL(cx18_ext_init); /* add your revision and whatnot here */ -static struct pci_device_id cx18_pci_tbl[] = { +static const struct pci_device_id cx18_pci_tbl[] = { {PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c index eabdd4c5520a..7f588eeac60f 100644 --- a/drivers/media/pci/cx18/cx18-i2c.c +++ b/drivers/media/pci/cx18/cx18-i2c.c @@ -93,8 +93,8 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw, case CX18_HW_Z8F0811_IR_RX_HAUP: init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; - init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | - RC_BIT_RC6_6A_32; + init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_RC6_6A_32; init_data->name = cx->card_name; info.platform_data = init_data; break; @@ -206,7 +206,7 @@ static int cx18_getsda(void *data) } /* template for i2c-bit-algo */ -static struct i2c_adapter cx18_i2c_adap_template = { +static const struct i2c_adapter cx18_i2c_adap_template = { .name = "cx18 i2c driver", .algo = NULL, /* set by i2c-algo-bit */ .algo_data = NULL, /* filled from template */ @@ -216,7 +216,7 @@ static struct i2c_adapter cx18_i2c_adap_template = { #define CX18_SCL_PERIOD (10) /* usecs. 10 usec is period for a 100 KHz clock */ #define CX18_ALGO_BIT_TIMEOUT (2) /* seconds */ -static struct i2c_algo_bit_data cx18_i2c_algo_template = { +static const struct i2c_algo_bit_data cx18_i2c_algo_template = { .setsda = cx18_setsda, .setscl = cx18_setscl, .getsda = cx18_getsda, diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c index 3c45e0071530..8385411af641 100644 --- a/drivers/media/pci/cx18/cx18-streams.c +++ b/drivers/media/pci/cx18/cx18-streams.c @@ -31,7 +31,7 @@ #define CX18_DSP0_INTERRUPT_MASK 0xd0004C -static struct v4l2_file_operations cx18_v4l2_enc_fops = { +static const struct v4l2_file_operations cx18_v4l2_enc_fops = { .owner = THIS_MODULE, .read = cx18_v4l2_read, .open = cx18_v4l2_open, @@ -240,7 +240,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) list_add_tail(&buf->vb.queue, &s->vb_capture); } -static struct videobuf_queue_ops cx18_videobuf_qops = { +static const struct videobuf_queue_ops cx18_videobuf_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index 2ff1d1e274be..a71f3c7569ce 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -1416,7 +1416,7 @@ static int vidioc_log_status(struct file *file, void *priv) return 0; } -static struct v4l2_file_operations mpeg_fops = { +static const struct v4l2_file_operations mpeg_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c index c148f9a4a9ac..d8c3637e492e 100644 --- a/drivers/media/pci/cx23885/cx23885-alsa.c +++ b/drivers/media/pci/cx23885/cx23885-alsa.c @@ -293,7 +293,7 @@ static int dsp_buffer_free(struct cx23885_audio_dev *chip) */ #define DEFAULT_FIFO_SIZE 4096 -static struct snd_pcm_hardware snd_cx23885_digital_hw = { +static const struct snd_pcm_hardware snd_cx23885_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index c48fa8e25a70..78a8836d03e4 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -1278,6 +1278,12 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data) case 85721: /* WinTV-HVR1290 (PCIe, OEM, RCA in, IR, Dual channel ATSC and Basic analog */ + case 121019: + /* WinTV-HVR4400 (PCIe, DVB-S2, DVB-C/T) */ + break; + case 121029: + /* WinTV-HVR5500 (PCIe, DVB-S2, DVB-C/T) */ + break; case 150329: /* WinTV-HVR5525 (PCIe, DVB-S/S2, DVB-T/T2/C) */ break; diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 02b5ec549369..8f63df1cb418 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2056,7 +2056,7 @@ static void cx23885_finidev(struct pci_dev *pci_dev) kfree(dev); } -static struct pci_device_id cx23885_pci_tbl[] = { +static const struct pci_device_id cx23885_pci_tbl[] = { { /* CX23885 */ .vendor = 0x14f1, diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index 979b66627f60..e795ddeb7fe2 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -2637,6 +2637,11 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port) struct vb2_dvb_frontend *fe0; struct i2c_client *client; + fe0 = vb2_dvb_get_frontend(&port->frontends, 1); + + if (fe0 && fe0->dvb.frontend) + vb2_dvb_unregister_bus(&port->frontends); + /* remove I2C client for CI */ client = port->i2c_client_ci; if (client) { @@ -2665,11 +2670,6 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port) i2c_unregister_device(client); } - fe0 = vb2_dvb_get_frontend(&port->frontends, 1); - - if (fe0 && fe0->dvb.frontend) - vb2_dvb_unregister_bus(&port->frontends); - switch (port->dev->board) { case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: netup_ci_exit(port); diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c index 8528032090f2..0f21467ae88e 100644 --- a/drivers/media/pci/cx23885/cx23885-i2c.c +++ b/drivers/media/pci/cx23885/cx23885-i2c.c @@ -264,7 +264,7 @@ static const struct i2c_algorithm cx23885_i2c_algo_template = { /* ----------------------------------------------------------------------- */ -static struct i2c_adapter cx23885_i2c_adap_template = { +static const struct i2c_adapter cx23885_i2c_adap_template = { .name = "cx23885", .owner = THIS_MODULE, .algo = &cx23885_i2c_algo_template, diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index 4367cb3162b6..944b70831f12 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -284,32 +284,32 @@ int cx23885_input_init(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_HAUPPAUGE_HVR1250: /* Integrated CX2388[58] IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* The grey Hauppauge RC-5 remote */ rc_map = RC_MAP_HAUPPAUGE; break; case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* The grey Terratec remote with orange buttons */ rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS; break; case CX23885_BOARD_TEVII_S470: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TEVII_NEC; break; case CX23885_BOARD_MYGICA_X8507: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02; break; case CX23885_BOARD_TBS_6980: case CX23885_BOARD_TBS_6981: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; /* A guess at the remote */ rc_map = RC_MAP_TBS_NEC; break; @@ -320,12 +320,12 @@ int cx23885_input_init(struct cx23885_dev *dev) case CX23885_BOARD_DVBSKY_S952: case CX23885_BOARD_DVBSKY_T982: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; rc_map = RC_MAP_DVBSKY; break; case CX23885_BOARD_TT_CT2_4500_CI: /* Integrated CX23885 IR controller */ - allowed_protos = RC_BIT_ALL_IR_DECODER; + allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; rc_map = RC_MAP_TT_1500; break; default: @@ -351,7 +351,7 @@ int cx23885_input_init(struct cx23885_dev *dev) } kernel_ir->rc = rc; - rc->input_name = kernel_ir->name; + rc->device_name = kernel_ir->name; rc->input_phys = kernel_ir->phys; rc->input_id.bustype = BUS_PCI; rc->input_id.version = 1; diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c index 519b81c0c837..2b34990e86f2 100644 --- a/drivers/media/pci/cx25821/cx25821-alsa.c +++ b/drivers/media/pci/cx25821/cx25821-alsa.c @@ -428,7 +428,7 @@ static int dsp_buffer_free(struct cx25821_audio_dev *chip) * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 384 -static struct snd_pcm_hardware snd_cx25821_digital_hw = { +static const struct snd_pcm_hardware snd_cx25821_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c index b94eb1c0023d..ada26d4acfb4 100644 --- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c +++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c @@ -277,7 +277,7 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, p = (char *)dev->_audiodata_buf_virt_addr + frame_offset; for (i = 0; i < dev->_audio_lines_count; i++) { - int n = kernel_read(file, file_offset, mybuf, AUDIO_LINE_SIZE); + int n = kernel_read(file, mybuf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); @@ -290,7 +290,6 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, memcpy(p, mybuf, n); p += n; } - file_offset += n; } dev->_audioframe_count++; fput(file); @@ -318,7 +317,7 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, { char *p = (void *)dev->_audiodata_buf_virt_addr; struct file *file; - loff_t offset; + loff_t file_offset = 0; int i, j; file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); @@ -328,11 +327,11 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, return PTR_ERR(file); } - for (j = 0, offset = 0; j < NUM_AUDIO_FRAMES; j++) { + for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { char buf[AUDIO_LINE_SIZE]; - int n = kernel_read(file, offset, buf, - AUDIO_LINE_SIZE); + loff_t offset = file_offset; + int n = kernel_read(file, buf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", @@ -344,8 +343,6 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, if (p) memcpy(p + offset, buf, n); - - offset += n; } dev->_audioframe_count++; } diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index fbc0229183bd..04aa4a68a0ae 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -1390,10 +1390,7 @@ static struct pci_driver cx25821_pci_driver = { static int __init cx25821_init(void) { - pr_info("driver version %d.%d.%d loaded\n", - (CX25821_VERSION_CODE >> 16) & 0xff, - (CX25821_VERSION_CODE >> 8) & 0xff, - CX25821_VERSION_CODE & 0xff); + pr_info("driver loaded\n"); return pci_register_driver(&cx25821_pci_driver); } diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c index 263a1cf36ef1..000049d3c71b 100644 --- a/drivers/media/pci/cx25821/cx25821-i2c.c +++ b/drivers/media/pci/cx25821/cx25821-i2c.c @@ -285,7 +285,7 @@ static const struct i2c_algorithm cx25821_i2c_algo_template = { #endif }; -static struct i2c_adapter cx25821_i2c_adap_template = { +static const struct i2c_adapter cx25821_i2c_adap_template = { .name = "cx25821", .owner = THIS_MODULE, .algo = &cx25821_i2c_algo_template, diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h index 0f20e89b0cde..b3eb2dabb30b 100644 --- a/drivers/media/pci/cx25821/cx25821.h +++ b/drivers/media/pci/cx25821/cx25821.h @@ -41,8 +41,6 @@ #include #include -#define CX25821_VERSION_CODE KERNEL_VERSION(0, 0, 106) - #define UNSET (-1U) #define NO_SYNC_LINE (-1U) diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index c81fe4681d14..9740326bc93f 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -799,7 +799,7 @@ static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol, return 0; } -static struct snd_kcontrol_new snd_cx88_alc_switch = { +static const struct snd_kcontrol_new snd_cx88_alc_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line-In ALC Switch", .info = snd_ctl_boolean_mono_info, diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index aa49c9597d9c..e3101f04941c 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -1075,7 +1075,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device cx8802_mpeg_template = { +static const struct video_device cx8802_mpeg_template = { .name = "cx8802", .fops = &mpeg_fops, .ioctl_ops = &mpeg_ioctl_ops, diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c index 01f2e472a2a0..e02449bf2041 100644 --- a/drivers/media/pci/cx88/cx88-input.c +++ b/drivers/media/pci/cx88/cx88-input.c @@ -132,7 +132,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir) data = (data << 4) | ((gpio_key & 0xf0) >> 4); - rc_keydown(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown(ir->dev, RC_PROTO_UNKNOWN, data, 0); } else if (ir->core->boardnr == CX88_BOARD_PROLINK_PLAYTVPVR || ir->core->boardnr == CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO) { @@ -146,7 +146,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir) scancode = RC_SCANCODE_NECX(addr, cmd); if (0 == (gpio & ir->mask_keyup)) - rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode, + rc_keydown_notimeout(ir->dev, RC_PROTO_NECX, scancode, 0); else rc_keyup(ir->dev); @@ -154,20 +154,22 @@ static void cx88_ir_handle_key(struct cx88_IR *ir) } else if (ir->mask_keydown) { /* bit set on keydown */ if (gpio & ir->mask_keydown) - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); else rc_keyup(ir->dev); } else if (ir->mask_keyup) { /* bit cleared on keydown */ if (0 == (gpio & ir->mask_keyup)) - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); else rc_keyup(ir->dev); } else { /* can't distinguish keydown/up :-/ */ - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0); rc_keyup(ir->dev); } } @@ -267,7 +269,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) struct cx88_IR *ir; struct rc_dev *dev; char *ir_codes = NULL; - u64 rc_type = RC_BIT_OTHER; + u64 rc_proto = RC_PROTO_BIT_OTHER; int err = -ENOMEM; u32 hardware_mask = 0; /* For devices with a hardware mask, when * used with a full-code IR table @@ -348,7 +350,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) * 002-T mini RC, provided with newer PV hardware */ ir_codes = RC_MAP_PIXELVIEW_MK12; - rc_type = RC_BIT_NECX; + rc_proto = RC_PROTO_BIT_NECX; ir->gpio_addr = MO_GP1_IO; ir->mask_keyup = 0x80; ir->polling = 10; /* ms */ @@ -464,7 +466,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci)); - dev->input_name = ir->name; + dev->device_name = ir->name; dev->input_phys = ir->phys; dev->input_id.bustype = BUS_PCI; dev->input_id.version = 1; @@ -487,7 +489,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) dev->timeout = 10 * 1000 * 1000; /* 10 ms */ } else { dev->driver_type = RC_DRIVER_SCANCODE; - dev->allowed_protocols = rc_type; + dev->allowed_protocols = rc_proto; } ir->core = core; @@ -557,7 +559,7 @@ void cx88_ir_irq(struct cx88_core *core) ir_raw_event_handle(ir->dev); } -static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pvr2000(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int flags, code; @@ -582,7 +584,7 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol, dprintk("IR Key/Flags: (0x%02x/0x%02x)\n", code & 0xff, flags & 0xff); - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = code & 0xff; *toggle = 0; return 1; @@ -612,7 +614,7 @@ void cx88_i2c_init_ir(struct cx88_core *core) case CX88_BOARD_LEADTEK_PVR2000: addr_list = pvr2000_addr_list; core->init_data.name = "cx88 Leadtek PVR 2000 remote"; - core->init_data.type = RC_BIT_UNKNOWN; + core->init_data.type = RC_PROTO_BIT_UNKNOWN; core->init_data.get_key = get_key_pvr2000; core->init_data.ir_codes = RC_MAP_EMPTY; break; @@ -633,8 +635,8 @@ void cx88_i2c_init_ir(struct cx88_core *core) /* Hauppauge XVR */ core->init_data.name = "cx88 Hauppauge XVR remote"; core->init_data.ir_codes = RC_MAP_HAUPPAUGE; - core->init_data.type = RC_BIT_RC5 | RC_BIT_RC6_MCE | - RC_BIT_RC6_6A_32; + core->init_data.type = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_RC6_6A_32; core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; info.platform_data = &core->init_data; diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig index ffed78c2ffb4..f43d0b83fc0c 100644 --- a/drivers/media/pci/ddbridge/Kconfig +++ b/drivers/media/pci/ddbridge/Kconfig @@ -8,7 +8,11 @@ config DVB_DDBRIDGE select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0910 if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV6111 if MEDIA_SUBDRV_AUTOSELECT + select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT + select DVB_MXL5XX if MEDIA_SUBDRV_AUTOSELECT ---help--- Support for cards with the Digital Devices PCI express bridge: - Octopus PCIe Bridge @@ -20,5 +24,22 @@ config DVB_DDBRIDGE - CineCTv6 and DuoFlex CT (STV0367-based) - CineCTv7 and DuoFlex CT2/C2T2/C2T2I (Sony CXD28xx-based) - MaxA8 series + - CineS2 V7/V7A and DuoFlex S2 V4 (ST STV0910-based) + - Max S4/8 Say Y if you own such a card and want to use it. + +config DVB_DDBRIDGE_MSIENABLE + bool "Enable Message Signaled Interrupts (MSI) per default (EXPERIMENTAL)" + depends on DVB_DDBRIDGE + depends on PCI_MSI + default n + ---help--- + Use PCI MSI (Message Signaled Interrupts) per default. Enabling this + might lead to I2C errors originating from the bridge in conjunction + with certain SATA controllers, requiring a reload of the ddbridge + module. MSI can still be disabled by passing msi=0 as option, as + this will just change the msi option default value. + + If you're unsure, concerned about stability and don't want to pass + module options in case of troubles, say N. diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile index 7446c8b677b5..09703312a3f1 100644 --- a/drivers/media/pci/ddbridge/Makefile +++ b/drivers/media/pci/ddbridge/Makefile @@ -2,7 +2,8 @@ # Makefile for the ddbridge device driver # -ddbridge-objs := ddbridge-core.o +ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-hw.o \ + ddbridge-i2c.o ddbridge-maxs8.o obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index cd1723e79a07..f4bd4908acdd 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -1,7 +1,10 @@ /* - * ddbridge.c: Digital Devices PCIe bridge driver + * ddbridge-core.c: Digital Devices bridge core functions + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Marcus Metzler + * Ralph Metzler * - * Copyright (C) 2010-2011 Digital Devices GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -17,8 +20,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -32,9 +33,12 @@ #include #include #include -#include "ddbridge.h" +#include "ddbridge.h" +#include "ddbridge-i2c.h" #include "ddbridge-regs.h" +#include "ddbridge-maxs8.h" +#include "ddbridge-io.h" #include "tda18271c2dd.h" #include "stv6110x.h" @@ -45,355 +49,429 @@ #include "stv0367_priv.h" #include "cxd2841er.h" #include "tda18212.h" +#include "stv0910.h" +#include "stv6111.h" +#include "lnbh25.h" +#include "cxd2099.h" -static int xo2_speed = 2; -module_param(xo2_speed, int, 0444); -MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards"); +/****************************************************************************/ + +#define DDB_MAX_ADAPTER 64 + +/****************************************************************************/ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); -/* MSI had problems with lost interrupts, fixed but needs testing */ -#undef CONFIG_PCI_MSI +static int adapter_alloc; +module_param(adapter_alloc, int, 0444); +MODULE_PARM_DESC(adapter_alloc, + "0-one adapter per io, 1-one per tab with io, 2-one per tab, 3-one for all"); -/******************************************************************************/ +/****************************************************************************/ -static int i2c_io(struct i2c_adapter *adapter, u8 adr, - u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) +static DEFINE_MUTEX(redirect_lock); + +struct workqueue_struct *ddb_wq; + +static struct ddb *ddbs[DDB_MAX_ADAPTER]; + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static void ddb_set_dma_table(struct ddb_io *io) { - struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, - .buf = wbuf, .len = wlen }, - {.addr = adr, .flags = I2C_M_RD, - .buf = rbuf, .len = rlen } }; - return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; -} + struct ddb *dev = io->port->dev; + struct ddb_dma *dma = io->dma; + u32 i; + u64 mem; -static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len) -{ - struct i2c_msg msg = {.addr = adr, .flags = 0, - .buf = data, .len = len}; - - return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1; -} - -static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val) -{ - struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD, - .buf = val, .len = 1 } }; - return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1; -} - -static int i2c_read_regs(struct i2c_adapter *adapter, - u8 adr, u8 reg, u8 *val, u8 len) -{ - struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, - .buf = ®, .len = 1 }, - {.addr = adr, .flags = I2C_M_RD, - .buf = val, .len = len } }; - return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; -} - -static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val) -{ - return i2c_read_regs(adapter, adr, reg, val, 1); -} - -static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr, - u16 reg, u8 *val) -{ - u8 msg[2] = {reg>>8, reg&0xff}; - struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, - .buf = msg, .len = 2}, - {.addr = adr, .flags = I2C_M_RD, - .buf = val, .len = 1} }; - return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; -} - -static int i2c_write_reg(struct i2c_adapter *adap, u8 adr, - u8 reg, u8 val) -{ - u8 msg[2] = {reg, val}; - - return i2c_write(adap, adr, msg, 2); -} - -static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr) -{ - u32 val = ddbreadl(adr); - - /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */ - if (val == ~0) { - dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr); - return 0; + if (!dma) + return; + for (i = 0; i < dma->num; i++) { + mem = dma->pbuf[i]; + ddbwritel(dev, mem & 0xffffffff, dma->bufregs + i * 8); + ddbwritel(dev, mem >> 32, dma->bufregs + i * 8 + 4); } - - return val; + dma->bufval = ((dma->div & 0x0f) << 16) | + ((dma->num & 0x1f) << 11) | + ((dma->size >> 7) & 0x7ff); } -static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) +static void ddb_set_dma_tables(struct ddb *dev) { - struct ddb *dev = i2c->dev; - long stat; - u32 val; + u32 i; - i2c->done = 0; - ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); - stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); - if (stat == 0) { - dev_err(&dev->pdev->dev, "I2C timeout\n"); - { /* MSI debugging*/ - u32 istat = ddbreadl(INTERRUPT_STATUS); - dev_err(&dev->pdev->dev, "IRS %08x\n", istat); - ddbwritel(istat, INTERRUPT_ACK); - } - return -EIO; - } - val = ddbreadl(i2c->regs+I2C_COMMAND); - if (val & 0x70000) - return -EIO; - return 0; -} - -static int ddb_i2c_master_xfer(struct i2c_adapter *adapter, - struct i2c_msg msg[], int num) -{ - struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter); - struct ddb *dev = i2c->dev; - u8 addr = 0; - - if (num) - addr = msg[0].addr; - - if (num == 2 && msg[1].flags & I2C_M_RD && - !(msg[0].flags & I2C_M_RD)) { - memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf, - msg[0].buf, msg[0].len); - ddbwritel(msg[0].len|(msg[1].len << 16), - i2c->regs+I2C_TASKLENGTH); - if (!ddb_i2c_cmd(i2c, addr, 1)) { - memcpy_fromio(msg[1].buf, - dev->regs + I2C_TASKMEM_BASE + i2c->rbuf, - msg[1].len); - return num; - } - } - - if (num == 1 && !(msg[0].flags & I2C_M_RD)) { - ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len); - ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH); - if (!ddb_i2c_cmd(i2c, addr, 2)) - return num; - } - if (num == 1 && (msg[0].flags & I2C_M_RD)) { - ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH); - if (!ddb_i2c_cmd(i2c, addr, 3)) { - ddbcpyfrom(msg[0].buf, - I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len); - return num; - } - } - return -EIO; -} - - -static u32 ddb_i2c_functionality(struct i2c_adapter *adap) -{ - return I2C_FUNC_SMBUS_EMUL; -} - -static struct i2c_algorithm ddb_i2c_algo = { - .master_xfer = ddb_i2c_master_xfer, - .functionality = ddb_i2c_functionality, -}; - -static void ddb_i2c_release(struct ddb *dev) -{ - int i; - struct ddb_i2c *i2c; - struct i2c_adapter *adap; - - for (i = 0; i < dev->info->port_num; i++) { - i2c = &dev->i2c[i]; - adap = &i2c->adap; - i2c_del_adapter(adap); + for (i = 0; i < DDB_MAX_PORT; i++) { + if (dev->port[i].input[0]) + ddb_set_dma_table(dev->port[i].input[0]); + if (dev->port[i].input[1]) + ddb_set_dma_table(dev->port[i].input[1]); + if (dev->port[i].output) + ddb_set_dma_table(dev->port[i].output); } } -static int ddb_i2c_init(struct ddb *dev) -{ - int i, j, stat = 0; - struct ddb_i2c *i2c; - struct i2c_adapter *adap; - for (i = 0; i < dev->info->port_num; i++) { - i2c = &dev->i2c[i]; - i2c->dev = dev; - i2c->nr = i; - i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4); - i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8); - i2c->regs = 0x80 + i * 0x20; - ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING); - ddbwritel((i2c->rbuf << 16) | i2c->wbuf, - i2c->regs + I2C_TASKADDRESS); - init_waitqueue_head(&i2c->wq); +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ - adap = &i2c->adap; - i2c_set_adapdata(adap, i2c); -#ifdef I2C_ADAP_CLASS_TV_DIGITAL - adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG; -#else -#ifdef I2C_CLASS_TV_ANALOG - adap->class = I2C_CLASS_TV_ANALOG; -#endif -#endif - strcpy(adap->name, "ddbridge"); - adap->algo = &ddb_i2c_algo; - adap->algo_data = (void *)i2c; - adap->dev.parent = &dev->pdev->dev; - stat = i2c_add_adapter(adap); - if (stat) - break; - } - if (stat) - for (j = 0; j < i; j++) { - i2c = &dev->i2c[j]; - adap = &i2c->adap; - i2c_del_adapter(adap); - } - return stat; -} - - -/******************************************************************************/ -/******************************************************************************/ -/******************************************************************************/ - -#if 0 -static void set_table(struct ddb *dev, u32 off, - dma_addr_t *pbuf, u32 num) +static void ddb_redirect_dma(struct ddb *dev, + struct ddb_dma *sdma, + struct ddb_dma *ddma) { u32 i, base; u64 mem; - base = DMA_BASE_ADDRESS_TABLE + off; - for (i = 0; i < num; i++) { - mem = pbuf[i]; - ddbwritel(mem & 0xffffffff, base + i * 8); - ddbwritel(mem >> 32, base + i * 8 + 4); + sdma->bufval = ddma->bufval; + base = sdma->bufregs; + for (i = 0; i < ddma->num; i++) { + mem = ddma->pbuf[i]; + ddbwritel(dev, mem & 0xffffffff, base + i * 8); + ddbwritel(dev, mem >> 32, base + i * 8 + 4); } } -#endif -static void ddb_address_table(struct ddb *dev) +static int ddb_unredirect(struct ddb_port *port) { - u32 i, j, base; - u64 mem; - dma_addr_t *pbuf; + struct ddb_input *oredi, *iredi = NULL; + struct ddb_output *iredo = NULL; - for (i = 0; i < dev->info->port_num * 2; i++) { - base = DMA_BASE_ADDRESS_TABLE + i * 0x100; - pbuf = dev->input[i].pbuf; - for (j = 0; j < dev->input[i].dma_buf_num; j++) { - mem = pbuf[j]; - ddbwritel(mem & 0xffffffff, base + j * 8); - ddbwritel(mem >> 32, base + j * 8 + 4); - } + /* dev_info(port->dev->dev, + * "unredirect %d.%d\n", port->dev->nr, port->nr); + */ + mutex_lock(&redirect_lock); + if (port->output->dma->running) { + mutex_unlock(&redirect_lock); + return -EBUSY; } - for (i = 0; i < dev->info->port_num; i++) { - base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100; - pbuf = dev->output[i].pbuf; - for (j = 0; j < dev->output[i].dma_buf_num; j++) { - mem = pbuf[j]; - ddbwritel(mem & 0xffffffff, base + j * 8); - ddbwritel(mem >> 32, base + j * 8 + 4); + oredi = port->output->redi; + if (!oredi) + goto done; + if (port->input[0]) { + iredi = port->input[0]->redi; + iredo = port->input[0]->redo; + + if (iredo) { + iredo->port->output->redi = oredi; + if (iredo->port->input[0]) { + iredo->port->input[0]->redi = iredi; + ddb_redirect_dma(oredi->port->dev, + oredi->dma, iredo->dma); + } + port->input[0]->redo = NULL; + ddb_set_dma_table(port->input[0]); } + oredi->redi = iredi; + port->input[0]->redi = NULL; } + oredi->redo = NULL; + port->output->redi = NULL; + + ddb_set_dma_table(oredi); +done: + mutex_unlock(&redirect_lock); + return 0; } -static void io_free(struct pci_dev *pdev, u8 **vbuf, - dma_addr_t *pbuf, u32 size, int num) +static int ddb_redirect(u32 i, u32 p) +{ + struct ddb *idev = ddbs[(i >> 4) & 0x3f]; + struct ddb_input *input, *input2; + struct ddb *pdev = ddbs[(p >> 4) & 0x3f]; + struct ddb_port *port; + + if (!idev || !pdev) + return -EINVAL; + if (!idev->has_dma || !pdev->has_dma) + return -EINVAL; + + port = &pdev->port[p & 0x0f]; + if (!port->output) + return -EINVAL; + if (ddb_unredirect(port)) + return -EBUSY; + + if (i == 8) + return 0; + + input = &idev->input[i & 7]; + if (!input) + return -EINVAL; + + mutex_lock(&redirect_lock); + if (port->output->dma->running || input->dma->running) { + mutex_unlock(&redirect_lock); + return -EBUSY; + } + input2 = port->input[0]; + if (input2) { + if (input->redi) { + input2->redi = input->redi; + input->redi = NULL; + } else + input2->redi = input; + } + input->redo = port->output; + port->output->redi = input; + + ddb_redirect_dma(input->port->dev, input->dma, port->output->dma); + mutex_unlock(&redirect_lock); + return 0; +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static void dma_free(struct pci_dev *pdev, struct ddb_dma *dma, int dir) { int i; - for (i = 0; i < num; i++) { - if (vbuf[i]) { - pci_free_consistent(pdev, size, vbuf[i], pbuf[i]); - vbuf[i] = NULL; + if (!dma) + return; + for (i = 0; i < dma->num; i++) { + if (dma->vbuf[i]) { + if (alt_dma) { + dma_unmap_single(&pdev->dev, dma->pbuf[i], + dma->size, + dir ? DMA_TO_DEVICE : + DMA_FROM_DEVICE); + kfree(dma->vbuf[i]); + dma->vbuf[i] = NULL; + } else { + dma_free_coherent(&pdev->dev, dma->size, + dma->vbuf[i], dma->pbuf[i]); + } + + dma->vbuf[i] = NULL; } } } -static int io_alloc(struct pci_dev *pdev, u8 **vbuf, - dma_addr_t *pbuf, u32 size, int num) +static int dma_alloc(struct pci_dev *pdev, struct ddb_dma *dma, int dir) { int i; - for (i = 0; i < num; i++) { - vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]); - if (!vbuf[i]) - return -ENOMEM; + if (!dma) + return 0; + for (i = 0; i < dma->num; i++) { + if (alt_dma) { + dma->vbuf[i] = kmalloc(dma->size, __GFP_RETRY_MAYFAIL); + if (!dma->vbuf[i]) + return -ENOMEM; + dma->pbuf[i] = dma_map_single(&pdev->dev, + dma->vbuf[i], + dma->size, + dir ? DMA_TO_DEVICE : + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, dma->pbuf[i])) { + kfree(dma->vbuf[i]); + dma->vbuf[i] = NULL; + return -ENOMEM; + } + } else { + dma->vbuf[i] = dma_alloc_coherent(&pdev->dev, + dma->size, + &dma->pbuf[i], + GFP_KERNEL); + if (!dma->vbuf[i]) + return -ENOMEM; + } } return 0; } -static int ddb_buffers_alloc(struct ddb *dev) +int ddb_buffers_alloc(struct ddb *dev) { int i; struct ddb_port *port; - for (i = 0; i < dev->info->port_num; i++) { + for (i = 0; i < dev->port_num; i++) { port = &dev->port[i]; switch (port->class) { case DDB_PORT_TUNER: - if (io_alloc(dev->pdev, port->input[0]->vbuf, - port->input[0]->pbuf, - port->input[0]->dma_buf_size, - port->input[0]->dma_buf_num) < 0) - return -1; - if (io_alloc(dev->pdev, port->input[1]->vbuf, - port->input[1]->pbuf, - port->input[1]->dma_buf_size, - port->input[1]->dma_buf_num) < 0) - return -1; + if (port->input[0]->dma) + if (dma_alloc(dev->pdev, port->input[0]->dma, 0) + < 0) + return -1; + if (port->input[1]->dma) + if (dma_alloc(dev->pdev, port->input[1]->dma, 0) + < 0) + return -1; break; case DDB_PORT_CI: - if (io_alloc(dev->pdev, port->input[0]->vbuf, - port->input[0]->pbuf, - port->input[0]->dma_buf_size, - port->input[0]->dma_buf_num) < 0) - return -1; - if (io_alloc(dev->pdev, port->output->vbuf, - port->output->pbuf, - port->output->dma_buf_size, - port->output->dma_buf_num) < 0) - return -1; + case DDB_PORT_LOOP: + if (port->input[0]->dma) + if (dma_alloc(dev->pdev, port->input[0]->dma, 0) + < 0) + return -1; + if (port->output->dma) + if (dma_alloc(dev->pdev, port->output->dma, 1) + < 0) + return -1; break; default: break; } } - ddb_address_table(dev); + ddb_set_dma_tables(dev); return 0; } -static void ddb_buffers_free(struct ddb *dev) +void ddb_buffers_free(struct ddb *dev) { int i; struct ddb_port *port; - for (i = 0; i < dev->info->port_num; i++) { + for (i = 0; i < dev->port_num; i++) { port = &dev->port[i]; - io_free(dev->pdev, port->input[0]->vbuf, - port->input[0]->pbuf, - port->input[0]->dma_buf_size, - port->input[0]->dma_buf_num); - io_free(dev->pdev, port->input[1]->vbuf, - port->input[1]->pbuf, - port->input[1]->dma_buf_size, - port->input[1]->dma_buf_num); - io_free(dev->pdev, port->output->vbuf, - port->output->pbuf, - port->output->dma_buf_size, - port->output->dma_buf_num); + + if (port->input[0] && port->input[0]->dma) + dma_free(dev->pdev, port->input[0]->dma, 0); + if (port->input[1] && port->input[1]->dma) + dma_free(dev->pdev, port->input[1]->dma, 0); + if (port->output && port->output->dma) + dma_free(dev->pdev, port->output->dma, 1); + } +} + +static void calc_con(struct ddb_output *output, u32 *con, u32 *con2, u32 flags) +{ + struct ddb *dev = output->port->dev; + u32 bitrate = output->port->obr, max_bitrate = 72000; + u32 gap = 4, nco = 0; + + *con = 0x1c; + if (output->port->gap != 0xffffffff) { + flags |= 1; + gap = output->port->gap; + max_bitrate = 0; + } + if (dev->link[0].info->type == DDB_OCTOPUS_CI && output->port->nr > 1) { + *con = 0x10c; + if (dev->link[0].ids.regmapid >= 0x10003 && !(flags & 1)) { + if (!(flags & 2)) { + /* NCO */ + max_bitrate = 0; + gap = 0; + if (bitrate != 72000) { + if (bitrate >= 96000) + *con |= 0x800; + else { + *con |= 0x1000; + nco = (bitrate * 8192 + 71999) + / 72000; + } + } + } else { + /* Divider and gap */ + *con |= 0x1810; + if (bitrate <= 64000) { + max_bitrate = 64000; + nco = 8; + } else if (bitrate <= 72000) { + max_bitrate = 72000; + nco = 7; + } else { + max_bitrate = 96000; + nco = 5; + } + } + } else { + if (bitrate > 72000) { + *con |= 0x810; /* 96 MBit/s and gap */ + max_bitrate = 96000; + } + *con |= 0x10; /* enable gap */ + } + } + if (max_bitrate > 0) { + if (bitrate > max_bitrate) + bitrate = max_bitrate; + if (bitrate < 31000) + bitrate = 31000; + gap = ((max_bitrate - bitrate) * 94) / bitrate; + if (gap < 2) + *con &= ~0x10; /* Disable gap */ + else + gap -= 2; + if (gap > 127) + gap = 127; + } + + *con2 = (nco << 16) | gap; +} + +static void ddb_output_start(struct ddb_output *output) +{ + struct ddb *dev = output->port->dev; + u32 con = 0x11c, con2 = 0; + + if (output->dma) { + spin_lock_irq(&output->dma->lock); + output->dma->cbuf = 0; + output->dma->coff = 0; + output->dma->stat = 0; + ddbwritel(dev, 0, DMA_BUFFER_CONTROL(output->dma)); + } + + if (output->port->input[0]->port->class == DDB_PORT_LOOP) + con = (1UL << 13) | 0x14; + else + calc_con(output, &con, &con2, 0); + + ddbwritel(dev, 0, TS_CONTROL(output)); + ddbwritel(dev, 2, TS_CONTROL(output)); + ddbwritel(dev, 0, TS_CONTROL(output)); + ddbwritel(dev, con, TS_CONTROL(output)); + ddbwritel(dev, con2, TS_CONTROL2(output)); + + if (output->dma) { + ddbwritel(dev, output->dma->bufval, + DMA_BUFFER_SIZE(output->dma)); + ddbwritel(dev, 0, DMA_BUFFER_ACK(output->dma)); + ddbwritel(dev, 1, DMA_BASE_READ); + ddbwritel(dev, 7, DMA_BUFFER_CONTROL(output->dma)); + } + + ddbwritel(dev, con | 1, TS_CONTROL(output)); + + if (output->dma) { + output->dma->running = 1; + spin_unlock_irq(&output->dma->lock); + } +} + +static void ddb_output_stop(struct ddb_output *output) +{ + struct ddb *dev = output->port->dev; + + if (output->dma) + spin_lock_irq(&output->dma->lock); + + ddbwritel(dev, 0, TS_CONTROL(output)); + + if (output->dma) { + ddbwritel(dev, 0, DMA_BUFFER_CONTROL(output->dma)); + output->dma->running = 0; + spin_unlock_irq(&output->dma->lock); + } +} + +static void ddb_input_stop(struct ddb_input *input) +{ + struct ddb *dev = input->port->dev; + u32 tag = DDB_LINK_TAG(input->port->lnr); + + if (input->dma) + spin_lock_irq(&input->dma->lock); + ddbwritel(dev, 0, tag | TS_CONTROL(input)); + if (input->dma) { + ddbwritel(dev, 0, DMA_BUFFER_CONTROL(input->dma)); + input->dma->running = 0; + spin_unlock_irq(&input->dma->lock); } } @@ -401,90 +479,81 @@ static void ddb_input_start(struct ddb_input *input) { struct ddb *dev = input->port->dev; - spin_lock_irq(&input->lock); - input->cbuf = 0; - input->coff = 0; + if (input->dma) { + spin_lock_irq(&input->dma->lock); + input->dma->cbuf = 0; + input->dma->coff = 0; + input->dma->stat = 0; + ddbwritel(dev, 0, DMA_BUFFER_CONTROL(input->dma)); + } + ddbwritel(dev, 0, TS_CONTROL(input)); + ddbwritel(dev, 2, TS_CONTROL(input)); + ddbwritel(dev, 0, TS_CONTROL(input)); - /* reset */ - ddbwritel(0, TS_INPUT_CONTROL(input->nr)); - ddbwritel(2, TS_INPUT_CONTROL(input->nr)); - ddbwritel(0, TS_INPUT_CONTROL(input->nr)); + if (input->dma) { + ddbwritel(dev, input->dma->bufval, + DMA_BUFFER_SIZE(input->dma)); + ddbwritel(dev, 0, DMA_BUFFER_ACK(input->dma)); + ddbwritel(dev, 1, DMA_BASE_WRITE); + ddbwritel(dev, 3, DMA_BUFFER_CONTROL(input->dma)); + } - ddbwritel((1 << 16) | - (input->dma_buf_num << 11) | - (input->dma_buf_size >> 7), - DMA_BUFFER_SIZE(input->nr)); - ddbwritel(0, DMA_BUFFER_ACK(input->nr)); + ddbwritel(dev, 0x09, TS_CONTROL(input)); - ddbwritel(1, DMA_BASE_WRITE); - ddbwritel(3, DMA_BUFFER_CONTROL(input->nr)); - ddbwritel(9, TS_INPUT_CONTROL(input->nr)); - input->running = 1; - spin_unlock_irq(&input->lock); + if (input->dma) { + input->dma->running = 1; + spin_unlock_irq(&input->dma->lock); + } } -static void ddb_input_stop(struct ddb_input *input) -{ - struct ddb *dev = input->port->dev; - spin_lock_irq(&input->lock); - ddbwritel(0, TS_INPUT_CONTROL(input->nr)); - ddbwritel(0, DMA_BUFFER_CONTROL(input->nr)); - input->running = 0; - spin_unlock_irq(&input->lock); +static void ddb_input_start_all(struct ddb_input *input) +{ + struct ddb_input *i = input; + struct ddb_output *o; + + mutex_lock(&redirect_lock); + while (i && (o = i->redo)) { + ddb_output_start(o); + i = o->port->input[0]; + if (i) + ddb_input_start(i); + } + ddb_input_start(input); + mutex_unlock(&redirect_lock); } -static void ddb_output_start(struct ddb_output *output) +static void ddb_input_stop_all(struct ddb_input *input) { - struct ddb *dev = output->port->dev; + struct ddb_input *i = input; + struct ddb_output *o; - spin_lock_irq(&output->lock); - output->cbuf = 0; - output->coff = 0; - ddbwritel(0, TS_OUTPUT_CONTROL(output->nr)); - ddbwritel(2, TS_OUTPUT_CONTROL(output->nr)); - ddbwritel(0, TS_OUTPUT_CONTROL(output->nr)); - ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr)); - ddbwritel((1 << 16) | - (output->dma_buf_num << 11) | - (output->dma_buf_size >> 7), - DMA_BUFFER_SIZE(output->nr + 8)); - ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8)); - - ddbwritel(1, DMA_BASE_READ); - ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8)); - /* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */ - ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr)); - output->running = 1; - spin_unlock_irq(&output->lock); -} - -static void ddb_output_stop(struct ddb_output *output) -{ - struct ddb *dev = output->port->dev; - - spin_lock_irq(&output->lock); - ddbwritel(0, TS_OUTPUT_CONTROL(output->nr)); - ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8)); - output->running = 0; - spin_unlock_irq(&output->lock); + mutex_lock(&redirect_lock); + ddb_input_stop(input); + while (i && (o = i->redo)) { + ddb_output_stop(o); + i = o->port->input[0]; + if (i) + ddb_input_stop(i); + } + mutex_unlock(&redirect_lock); } static u32 ddb_output_free(struct ddb_output *output) { - u32 idx, off, stat = output->stat; + u32 idx, off, stat = output->dma->stat; s32 diff; idx = (stat >> 11) & 0x1f; off = (stat & 0x7ff) << 7; - if (output->cbuf != idx) { - if ((((output->cbuf + 1) % output->dma_buf_num) == idx) && - (output->dma_buf_size - output->coff <= 188)) + if (output->dma->cbuf != idx) { + if ((((output->dma->cbuf + 1) % output->dma->num) == idx) && + (output->dma->size - output->dma->coff <= 188)) return 0; return 188; } - diff = off - output->coff; + diff = off - output->dma->coff; if (diff <= 0 || diff > 188) return 188; return 0; @@ -494,46 +563,51 @@ static ssize_t ddb_output_write(struct ddb_output *output, const __user u8 *buf, size_t count) { struct ddb *dev = output->port->dev; - u32 idx, off, stat = output->stat; + u32 idx, off, stat = output->dma->stat; u32 left = count, len; idx = (stat >> 11) & 0x1f; off = (stat & 0x7ff) << 7; while (left) { - len = output->dma_buf_size - output->coff; - if ((((output->cbuf + 1) % output->dma_buf_num) == idx) && + len = output->dma->size - output->dma->coff; + if ((((output->dma->cbuf + 1) % output->dma->num) == idx) && (off == 0)) { if (len <= 188) break; len -= 188; } - if (output->cbuf == idx) { - if (off > output->coff) { -#if 1 - len = off - output->coff; + if (output->dma->cbuf == idx) { + if (off > output->dma->coff) { + len = off - output->dma->coff; len -= (len % 188); if (len <= 188) - -#endif break; len -= 188; } } if (len > left) len = left; - if (copy_from_user(output->vbuf[output->cbuf] + output->coff, + if (copy_from_user(output->dma->vbuf[output->dma->cbuf] + + output->dma->coff, buf, len)) return -EIO; + if (alt_dma) + dma_sync_single_for_device(dev->dev, + output->dma->pbuf[output->dma->cbuf], + output->dma->size, DMA_TO_DEVICE); left -= len; buf += len; - output->coff += len; - if (output->coff == output->dma_buf_size) { - output->coff = 0; - output->cbuf = ((output->cbuf + 1) % output->dma_buf_num); + output->dma->coff += len; + if (output->dma->coff == output->dma->size) { + output->dma->coff = 0; + output->dma->cbuf = ((output->dma->cbuf + 1) % + output->dma->num); } - ddbwritel((output->cbuf << 11) | (output->coff >> 7), - DMA_BUFFER_ACK(output->nr + 8)); + ddbwritel(dev, + (output->dma->cbuf << 11) | + (output->dma->coff >> 7), + DMA_BUFFER_ACK(output->dma)); } return count - left; } @@ -541,81 +615,229 @@ static ssize_t ddb_output_write(struct ddb_output *output, static u32 ddb_input_avail(struct ddb_input *input) { struct ddb *dev = input->port->dev; - u32 idx, off, stat = input->stat; - u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr)); + u32 idx, off, stat = input->dma->stat; + u32 ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(input->dma)); idx = (stat >> 11) & 0x1f; off = (stat & 0x7ff) << 7; if (ctrl & 4) { - dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl); - ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); + dev_err(dev->dev, "IA %d %d %08x\n", idx, off, ctrl); + ddbwritel(dev, stat, DMA_BUFFER_ACK(input->dma)); return 0; } - if (input->cbuf != idx) + if (input->dma->cbuf != idx) return 188; return 0; } -static ssize_t ddb_input_read(struct ddb_input *input, __user u8 *buf, size_t count) +static ssize_t ddb_input_read(struct ddb_input *input, + __user u8 *buf, size_t count) { struct ddb *dev = input->port->dev; u32 left = count; - u32 idx, free, stat = input->stat; + u32 idx, free, stat = input->dma->stat; int ret; idx = (stat >> 11) & 0x1f; while (left) { - if (input->cbuf == idx) + if (input->dma->cbuf == idx) return count - left; - free = input->dma_buf_size - input->coff; + free = input->dma->size - input->dma->coff; if (free > left) free = left; - ret = copy_to_user(buf, input->vbuf[input->cbuf] + - input->coff, free); + if (alt_dma) + dma_sync_single_for_cpu(dev->dev, + input->dma->pbuf[input->dma->cbuf], + input->dma->size, DMA_FROM_DEVICE); + ret = copy_to_user(buf, input->dma->vbuf[input->dma->cbuf] + + input->dma->coff, free); if (ret) return -EFAULT; - input->coff += free; - if (input->coff == input->dma_buf_size) { - input->coff = 0; - input->cbuf = (input->cbuf+1) % input->dma_buf_num; + input->dma->coff += free; + if (input->dma->coff == input->dma->size) { + input->dma->coff = 0; + input->dma->cbuf = (input->dma->cbuf + 1) % + input->dma->num; } left -= free; - ddbwritel((input->cbuf << 11) | (input->coff >> 7), - DMA_BUFFER_ACK(input->nr)); + buf += free; + ddbwritel(dev, + (input->dma->cbuf << 11) | (input->dma->coff >> 7), + DMA_BUFFER_ACK(input->dma)); } return count; } -/******************************************************************************/ -/******************************************************************************/ -/******************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ -#if 0 -static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe) +static ssize_t ts_write(struct file *file, const __user char *buf, + size_t count, loff_t *ppos) { - int i; + struct dvb_device *dvbdev = file->private_data; + struct ddb_output *output = dvbdev->priv; + struct ddb *dev = output->port->dev; + size_t left = count; + int stat; - for (i = 0; i < dev->info->port_num * 2; i++) { - if (dev->input[i].fe == fe) - return &dev->input[i]; + if (!dev->has_dma) + return -EINVAL; + while (left) { + if (ddb_output_free(output) < 188) { + if (file->f_flags & O_NONBLOCK) + break; + if (wait_event_interruptible( + output->dma->wq, + ddb_output_free(output) >= 188) < 0) + break; + } + stat = ddb_output_write(output, buf, left); + if (stat < 0) + return stat; + buf += stat; + left -= stat; } - return NULL; + return (left == count) ? -EAGAIN : (count - left); } -#endif -static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) +static ssize_t ts_read(struct file *file, __user char *buf, + size_t count, loff_t *ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct ddb_output *output = dvbdev->priv; + struct ddb_input *input = output->port->input[0]; + struct ddb *dev = output->port->dev; + size_t left = count; + int stat; + + if (!dev->has_dma) + return -EINVAL; + while (left) { + if (ddb_input_avail(input) < 188) { + if (file->f_flags & O_NONBLOCK) + break; + if (wait_event_interruptible( + input->dma->wq, + ddb_input_avail(input) >= 188) < 0) + break; + } + stat = ddb_input_read(input, buf, left); + if (stat < 0) + return stat; + left -= stat; + buf += stat; + } + return (count && (left == count)) ? -EAGAIN : (count - left); +} + +static unsigned int ts_poll(struct file *file, poll_table *wait) +{ + struct dvb_device *dvbdev = file->private_data; + struct ddb_output *output = dvbdev->priv; + struct ddb_input *input = output->port->input[0]; + + unsigned int mask = 0; + + poll_wait(file, &input->dma->wq, wait); + poll_wait(file, &output->dma->wq, wait); + if (ddb_input_avail(input) >= 188) + mask |= POLLIN | POLLRDNORM; + if (ddb_output_free(output) >= 188) + mask |= POLLOUT | POLLWRNORM; + return mask; +} + +static int ts_release(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct ddb_output *output = NULL; + struct ddb_input *input = NULL; + + if (dvbdev) { + output = dvbdev->priv; + input = output->port->input[0]; + } + + if ((file->f_flags & O_ACCMODE) == O_RDONLY) { + if (!input) + return -EINVAL; + ddb_input_stop(input); + } else if ((file->f_flags & O_ACCMODE) == O_WRONLY) { + if (!output) + return -EINVAL; + ddb_output_stop(output); + } + return dvb_generic_release(inode, file); +} + +static int ts_open(struct inode *inode, struct file *file) +{ + int err; + struct dvb_device *dvbdev = file->private_data; + struct ddb_output *output = NULL; + struct ddb_input *input = NULL; + + if (dvbdev) { + output = dvbdev->priv; + input = output->port->input[0]; + } + + if ((file->f_flags & O_ACCMODE) == O_RDONLY) { + if (!input) + return -EINVAL; + if (input->redo || input->redi) + return -EBUSY; + } else if ((file->f_flags & O_ACCMODE) == O_WRONLY) { + if (!output) + return -EINVAL; + } else + return -EINVAL; + err = dvb_generic_open(inode, file); + if (err < 0) + return err; + if ((file->f_flags & O_ACCMODE) == O_RDONLY) + ddb_input_start(input); + else if ((file->f_flags & O_ACCMODE) == O_WRONLY) + ddb_output_start(output); + return err; +} + +static const struct file_operations ci_fops = { + .owner = THIS_MODULE, + .read = ts_read, + .write = ts_write, + .open = ts_open, + .release = ts_release, + .poll = ts_poll, + .mmap = NULL, +}; + +static struct dvb_device dvbdev_ci = { + .priv = NULL, + .readers = 1, + .writers = 1, + .users = 2, + .fops = &ci_fops, +}; + + +/****************************************************************************/ +/****************************************************************************/ + +static int locked_gate_ctrl(struct dvb_frontend *fe, int enable) { struct ddb_input *input = fe->sec_priv; struct ddb_port *port = input->port; + struct ddb_dvb *dvb = &port->dvb[input->nr & 1]; int status; if (enable) { mutex_lock(&port->i2c_gate_lock); - status = input->gate_ctrl(fe, 1); + status = dvb->i2c_gate_ctrl(fe, 1); } else { - status = input->gate_ctrl(fe, 0); + status = dvb->i2c_gate_ctrl(fe, 0); mutex_unlock(&port->i2c_gate_lock); } return status; @@ -624,41 +846,42 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) static int demod_attach_drxk(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; struct dvb_frontend *fe; struct drxk_config config; - struct device *dev = &input->port->dev->pdev->dev; memset(&config, 0, sizeof(config)); - config.microcode_name = "drxk_a3.mc"; - config.qam_demod_parameter_count = 4; config.adr = 0x29 + (input->nr & 1); + config.microcode_name = "drxk_a3.mc"; - fe = input->fe = dvb_attach(drxk_attach, &config, i2c); - if (!input->fe) { + fe = dvb->fe = dvb_attach(drxk_attach, &config, i2c); + if (!fe) { dev_err(dev, "No DRXK found!\n"); return -ENODEV; } fe->sec_priv = input; - input->gate_ctrl = fe->ops.i2c_gate_ctrl; - fe->ops.i2c_gate_ctrl = drxk_gate_ctrl; + dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl; + fe->ops.i2c_gate_ctrl = locked_gate_ctrl; return 0; } static int tuner_attach_tda18271(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; struct dvb_frontend *fe; - struct device *dev = &input->port->dev->pdev->dev; - if (input->fe->ops.i2c_gate_ctrl) - input->fe->ops.i2c_gate_ctrl(input->fe, 1); - fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); + if (dvb->fe->ops.i2c_gate_ctrl) + dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 1); + fe = dvb_attach(tda18271c2dd_attach, dvb->fe, i2c, 0x60); + if (dvb->fe->ops.i2c_gate_ctrl) + dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 0); if (!fe) { dev_err(dev, "No TDA18271 found!\n"); return -ENODEV; } - if (input->fe->ops.i2c_gate_ctrl) - input->fe->ops.i2c_gate_ctrl(input->fe, 0); return 0; } @@ -687,43 +910,43 @@ static struct stv0367_config ddb_stv0367_config[] = { static int demod_attach_stv0367(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; + struct dvb_frontend *fe; /* attach frontend */ - input->fe = dvb_attach(stv0367ddb_attach, + fe = dvb->fe = dvb_attach(stv0367ddb_attach, &ddb_stv0367_config[(input->nr & 1)], i2c); - if (!input->fe) { - dev_err(dev, "stv0367ddb_attach failed (not found?)\n"); + if (!dvb->fe) { + dev_err(dev, "No stv0367 found!\n"); return -ENODEV; } - - input->fe->sec_priv = input; - input->gate_ctrl = input->fe->ops.i2c_gate_ctrl; - input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl; - + fe->sec_priv = input; + dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl; + fe->ops.i2c_gate_ctrl = locked_gate_ctrl; return 0; } static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) { struct i2c_adapter *adapter = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; - + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; u8 tda_id[2]; u8 subaddr = 0x00; dev_dbg(dev, "stv0367-tda18212 tuner ping\n"); - if (input->fe->ops.i2c_gate_ctrl) - input->fe->ops.i2c_gate_ctrl(input->fe, 1); + if (dvb->fe->ops.i2c_gate_ctrl) + dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 1); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) dev_dbg(dev, "tda18212 ping 1 fail\n"); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) dev_warn(dev, "tda18212 ping failed, expect problems\n"); - if (input->fe->ops.i2c_gate_ctrl) - input->fe->ops.i2c_gate_ctrl(input->fe, 0); + if (dvb->fe->ops.i2c_gate_ctrl) + dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 0); return 0; } @@ -731,7 +954,9 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) { struct i2c_adapter *i2c = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; + struct dvb_frontend *fe; struct cxd2841er_config cfg; /* the cxd2841er driver expects 8bit/shifted I2C addresses */ @@ -746,27 +971,26 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) cfg.flags |= CXD2841ER_TS_SERIAL; /* attach frontend */ - input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); + fe = dvb->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); - if (!input->fe) { - dev_err(dev, "No Sony CXD28xx found!\n"); + if (!dvb->fe) { + dev_err(dev, "No cxd2837/38/43/54 found!\n"); return -ENODEV; } - - input->fe->sec_priv = input; - input->gate_ctrl = input->fe->ops.i2c_gate_ctrl; - input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl; - + fe->sec_priv = input; + dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl; + fe->ops.i2c_gate_ctrl = locked_gate_ctrl; return 0; } static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) { struct i2c_adapter *adapter = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; struct i2c_client *client; struct tda18212_config config = { - .fe = input->fe, + .fe = dvb->fe, .if_dvbt_6 = 3550, .if_dvbt_7 = 3700, .if_dvbt_8 = 4150, @@ -804,17 +1028,17 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) goto err; } - input->i2c_client[0] = client; + dvb->i2c_client[0] = client; return 0; err: - dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n"); + dev_notice(dev, "TDA18212 tuner not found. Device is not fully operational.\n"); return -ENODEV; } -/******************************************************************************/ -/******************************************************************************/ -/******************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ static struct stv090x_config stv0900 = { .device = STV0900, @@ -827,6 +1051,9 @@ static struct stv090x_config stv0900 = { .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED, + .ts1_tei = 1, + .ts2_tei = 1, + .repeater_level = STV090x_RPTLEVEL_16, .adc1_range = STV090x_ADC_1Vpp, @@ -846,6 +1073,9 @@ static struct stv090x_config stv0900_aa = { .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED, + .ts1_tei = 1, + .ts2_tei = 1, + .repeater_level = STV090x_RPTLEVEL_16, .adc1_range = STV090x_ADC_1Vpp, @@ -869,17 +1099,18 @@ static struct stv6110x_config stv6110b = { static int demod_attach_stv0900(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; - input->fe = dvb_attach(stv090x_attach, feconf, i2c, - (input->nr & 1) ? STV090x_DEMODULATOR_1 - : STV090x_DEMODULATOR_0); - if (!input->fe) { + dvb->fe = dvb_attach(stv090x_attach, feconf, i2c, + (input->nr & 1) ? STV090x_DEMODULATOR_1 + : STV090x_DEMODULATOR_0); + if (!dvb->fe) { dev_err(dev, "No STV0900 found!\n"); return -ENODEV; } - if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, + if (!dvb_attach(lnbh24_attach, dvb->fe, i2c, 0, 0, (input->nr & 1) ? (0x09 - type) : (0x0b - type))) { dev_err(dev, "No LNBH24 found!\n"); @@ -891,19 +1122,20 @@ static int demod_attach_stv0900(struct ddb_input *input, int type) static int tuner_attach_stv6110(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; - struct device *dev = &input->port->dev->pdev->dev; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; struct stv6110x_config *tunerconf = (input->nr & 1) ? &stv6110b : &stv6110a; const struct stv6110x_devctl *ctl; - ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); + ctl = dvb_attach(stv6110x_attach, dvb->fe, tunerconf, i2c); if (!ctl) { dev_err(dev, "No STV6110X found!\n"); return -ENODEV; } dev_info(dev, "attach tuner input %d adr %02x\n", - input->nr, tunerconf->addr); + input->nr, tunerconf->addr); feconf->tuner_init = ctl->tuner_init; feconf->tuner_sleep = ctl->tuner_sleep; @@ -920,491 +1152,502 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type) return 0; } -static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id, - int (*start_feed)(struct dvb_demux_feed *), - int (*stop_feed)(struct dvb_demux_feed *), - void *priv) -{ - dvbdemux->priv = priv; +static const struct stv0910_cfg stv0910_p = { + .adr = 0x68, + .parallel = 1, + .rptlvl = 4, + .clk = 30000000, +}; - dvbdemux->filternum = 256; - dvbdemux->feednum = 256; - dvbdemux->start_feed = start_feed; - dvbdemux->stop_feed = stop_feed; - dvbdemux->write_to_decoder = NULL; - dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | - DMX_SECTION_FILTERING | - DMX_MEMORY_BASED_FILTERING); - return dvb_dmx_init(dvbdemux); +static const struct lnbh25_config lnbh25_cfg = { + .i2c_address = 0x0c << 1, + .data2_config = LNBH25_TEN +}; + +static int demod_attach_stv0910(struct ddb_input *input, int type) +{ + struct i2c_adapter *i2c = &input->port->i2c->adap; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; + struct stv0910_cfg cfg = stv0910_p; + struct lnbh25_config lnbcfg = lnbh25_cfg; + + if (stv0910_single) + cfg.single = 1; + + if (type) + cfg.parallel = 2; + dvb->fe = dvb_attach(stv0910_attach, i2c, &cfg, (input->nr & 1)); + if (!dvb->fe) { + cfg.adr = 0x6c; + dvb->fe = dvb_attach(stv0910_attach, i2c, + &cfg, (input->nr & 1)); + } + if (!dvb->fe) { + dev_err(dev, "No STV0910 found!\n"); + return -ENODEV; + } + + /* attach lnbh25 - leftshift by one as the lnbh25 driver expects 8bit + * i2c addresses + */ + lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1); + if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) { + lnbcfg.i2c_address = (((input->nr & 1) ? 0x09 : 0x08) << 1); + if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) { + dev_err(dev, "No LNBH25 found!\n"); + return -ENODEV; + } + } + + return 0; } -static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev, - struct dvb_demux *dvbdemux, - struct dmx_frontend *hw_frontend, - struct dmx_frontend *mem_frontend, - struct dvb_adapter *dvb_adapter) +static int tuner_attach_stv6111(struct ddb_input *input, int type) { - int ret; + struct i2c_adapter *i2c = &input->port->i2c->adap; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct device *dev = input->port->dev->dev; + struct dvb_frontend *fe; + u8 adr = (type ? 0 : 4) + ((input->nr & 1) ? 0x63 : 0x60); - dmxdev->filternum = 256; - dmxdev->demux = &dvbdemux->dmx; - dmxdev->capabilities = 0; - ret = dvb_dmxdev_init(dmxdev, dvb_adapter); - if (ret < 0) - return ret; - - hw_frontend->source = DMX_FRONTEND_0; - dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend); - mem_frontend->source = DMX_MEMORY_FE; - dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend); - return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend); + fe = dvb_attach(stv6111_attach, dvb->fe, i2c, adr); + if (!fe) { + fe = dvb_attach(stv6111_attach, dvb->fe, i2c, adr & ~4); + if (!fe) { + dev_err(dev, "No STV6111 found at 0x%02x!\n", adr); + return -ENODEV; + } + } + return 0; } static int start_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_demux *dvbdmx = dvbdmxfeed->demux; struct ddb_input *input = dvbdmx->priv; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; - if (!input->users) - ddb_input_start(input); + if (!dvb->users) + ddb_input_start_all(input); - return ++input->users; + return ++dvb->users; } static int stop_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_demux *dvbdmx = dvbdmxfeed->demux; struct ddb_input *input = dvbdmx->priv; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; - if (--input->users) - return input->users; + if (--dvb->users) + return dvb->users; - ddb_input_stop(input); + ddb_input_stop_all(input); return 0; } - static void dvb_input_detach(struct ddb_input *input) { - struct dvb_adapter *adap = &input->adap; - struct dvb_demux *dvbdemux = &input->demux; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct dvb_demux *dvbdemux = &dvb->demux; struct i2c_client *client; - switch (input->attached) { - case 5: - client = input->i2c_client[0]; + switch (dvb->attached) { + case 0x31: + if (dvb->fe2) + dvb_unregister_frontend(dvb->fe2); + if (dvb->fe) + dvb_unregister_frontend(dvb->fe); + /* fallthrough */ + case 0x30: + if (dvb->fe2) + dvb_frontend_detach(dvb->fe2); + if (dvb->fe) + dvb_frontend_detach(dvb->fe); + dvb->fe = dvb->fe2 = NULL; + /* fallthrough */ + case 0x20: + client = dvb->i2c_client[0]; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } - if (input->fe2) { - dvb_unregister_frontend(input->fe2); - input->fe2 = NULL; - } - if (input->fe) { - dvb_unregister_frontend(input->fe); - dvb_frontend_detach(input->fe); - input->fe = NULL; - } - /* fall-through */ - case 4: - dvb_net_release(&input->dvbnet); - /* fall-through */ - case 3: - dvbdemux->dmx.close(&dvbdemux->dmx); + + dvb_net_release(&dvb->dvbnet); + /* fallthrough */ + case 0x12: dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, - &input->hw_frontend); + &dvb->hw_frontend); dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, - &input->mem_frontend); - dvb_dmxdev_release(&input->dmxdev); - /* fall-through */ - case 2: - dvb_dmx_release(&input->demux); - /* fall-through */ - case 1: - dvb_unregister_adapter(adap); + &dvb->mem_frontend); + /* fallthrough */ + case 0x11: + dvb_dmxdev_release(&dvb->dmxdev); + /* fallthrough */ + case 0x10: + dvb_dmx_release(&dvb->demux); + /* fallthrough */ + case 0x01: + break; + } + dvb->attached = 0x00; +} + +static int dvb_register_adapters(struct ddb *dev) +{ + int i, ret = 0; + struct ddb_port *port; + struct dvb_adapter *adap; + + if (adapter_alloc == 3) { + port = &dev->port[0]; + adap = port->dvb[0].adap; + ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, + port->dev->dev, + adapter_nr); + if (ret < 0) + return ret; + port->dvb[0].adap_registered = 1; + for (i = 0; i < dev->port_num; i++) { + port = &dev->port[i]; + port->dvb[0].adap = adap; + port->dvb[1].adap = adap; + } + return 0; + } + + for (i = 0; i < dev->port_num; i++) { + port = &dev->port[i]; + switch (port->class) { + case DDB_PORT_TUNER: + adap = port->dvb[0].adap; + ret = dvb_register_adapter(adap, "DDBridge", + THIS_MODULE, + port->dev->dev, + adapter_nr); + if (ret < 0) + return ret; + port->dvb[0].adap_registered = 1; + + if (adapter_alloc > 0) { + port->dvb[1].adap = port->dvb[0].adap; + break; + } + adap = port->dvb[1].adap; + ret = dvb_register_adapter(adap, "DDBridge", + THIS_MODULE, + port->dev->dev, + adapter_nr); + if (ret < 0) + return ret; + port->dvb[1].adap_registered = 1; + break; + + case DDB_PORT_CI: + case DDB_PORT_LOOP: + adap = port->dvb[0].adap; + ret = dvb_register_adapter(adap, "DDBridge", + THIS_MODULE, + port->dev->dev, + adapter_nr); + if (ret < 0) + return ret; + port->dvb[0].adap_registered = 1; + break; + default: + if (adapter_alloc < 2) + break; + adap = port->dvb[0].adap; + ret = dvb_register_adapter(adap, "DDBridge", + THIS_MODULE, + port->dev->dev, + adapter_nr); + if (ret < 0) + return ret; + port->dvb[0].adap_registered = 1; + break; + } + } + return ret; +} + +static void dvb_unregister_adapters(struct ddb *dev) +{ + int i; + struct ddb_port *port; + struct ddb_dvb *dvb; + + for (i = 0; i < dev->link[0].info->port_num; i++) { + port = &dev->port[i]; + + dvb = &port->dvb[0]; + if (dvb->adap_registered) + dvb_unregister_adapter(dvb->adap); + dvb->adap_registered = 0; + + dvb = &port->dvb[1]; + if (dvb->adap_registered) + dvb_unregister_adapter(dvb->adap); + dvb->adap_registered = 0; } - input->attached = 0; } static int dvb_input_attach(struct ddb_input *input) { - int ret; + int ret = 0; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; struct ddb_port *port = input->port; - struct dvb_adapter *adap = &input->adap; - struct dvb_demux *dvbdemux = &input->demux; - struct device *dev = &input->port->dev->pdev->dev; - int sony_osc24 = 0, sony_tspar = 0; + struct dvb_adapter *adap = dvb->adap; + struct dvb_demux *dvbdemux = &dvb->demux; + int par = 0, osc24 = 0; - ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, - &input->port->dev->pdev->dev, - adapter_nr); - if (ret < 0) { - dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n"); - return ret; - } - input->attached = 1; + dvb->attached = 0x01; - ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux", - start_feed, - stop_feed, input); + dvbdemux->priv = input; + dvbdemux->dmx.capabilities = DMX_TS_FILTERING | + DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; + dvbdemux->start_feed = start_feed; + dvbdemux->stop_feed = stop_feed; + dvbdemux->filternum = dvbdemux->feednum = 256; + ret = dvb_dmx_init(dvbdemux); if (ret < 0) return ret; - input->attached = 2; + dvb->attached = 0x10; - ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux, - &input->hw_frontend, - &input->mem_frontend, adap); + dvb->dmxdev.filternum = 256; + dvb->dmxdev.demux = &dvbdemux->dmx; + ret = dvb_dmxdev_init(&dvb->dmxdev, adap); if (ret < 0) return ret; - input->attached = 3; + dvb->attached = 0x11; - ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux); + dvb->mem_frontend.source = DMX_MEMORY_FE; + dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->mem_frontend); + dvb->hw_frontend.source = DMX_FRONTEND_0; + dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->hw_frontend); + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &dvb->hw_frontend); if (ret < 0) return ret; - input->attached = 4; + dvb->attached = 0x12; - input->fe = NULL; + ret = dvb_net_init(adap, &dvb->dvbnet, dvb->dmxdev.demux); + if (ret < 0) + return ret; + dvb->attached = 0x20; + + dvb->fe = dvb->fe2 = NULL; switch (port->type) { + case DDB_TUNER_MXL5XX: + if (fe_attach_mxl5xx(input) < 0) + return -ENODEV; + break; case DDB_TUNER_DVBS_ST: if (demod_attach_stv0900(input, 0) < 0) return -ENODEV; if (tuner_attach_stv6110(input, 0) < 0) return -ENODEV; - if (input->fe) { - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; - } break; case DDB_TUNER_DVBS_ST_AA: if (demod_attach_stv0900(input, 1) < 0) return -ENODEV; if (tuner_attach_stv6110(input, 1) < 0) return -ENODEV; - if (input->fe) { - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; - } + break; + case DDB_TUNER_DVBS_STV0910: + if (demod_attach_stv0910(input, 0) < 0) + return -ENODEV; + if (tuner_attach_stv6111(input, 0) < 0) + return -ENODEV; + break; + case DDB_TUNER_DVBS_STV0910_PR: + if (demod_attach_stv0910(input, 1) < 0) + return -ENODEV; + if (tuner_attach_stv6111(input, 1) < 0) + return -ENODEV; + break; + case DDB_TUNER_DVBS_STV0910_P: + if (demod_attach_stv0910(input, 0) < 0) + return -ENODEV; + if (tuner_attach_stv6111(input, 1) < 0) + return -ENODEV; break; case DDB_TUNER_DVBCT_TR: if (demod_attach_drxk(input) < 0) return -ENODEV; if (tuner_attach_tda18271(input) < 0) return -ENODEV; - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; - if (input->fe2) { - if (dvb_register_frontend(adap, input->fe2) < 0) - return -ENODEV; - input->fe2->tuner_priv = input->fe->tuner_priv; - memcpy(&input->fe2->ops.tuner_ops, - &input->fe->ops.tuner_ops, - sizeof(struct dvb_tuner_ops)); - } break; case DDB_TUNER_DVBCT_ST: if (demod_attach_stv0367(input) < 0) return -ENODEV; - if (tuner_attach_tda18212(input, port->type) < 0) + if (tuner_attach_tda18212(input, port->type) < 0) { + if (dvb->fe2) + dvb_frontend_detach(dvb->fe2); + if (dvb->fe) + dvb_frontend_detach(dvb->fe); return -ENODEV; - if (input->fe) { - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; } break; case DDB_TUNER_DVBC2T2I_SONY_P: + if (input->port->dev->link[input->port->lnr].info->ts_quirks & + TS_QUIRK_ALT_OSC) + osc24 = 0; + else + osc24 = 1; + /* fall-through */ case DDB_TUNER_DVBCT2_SONY_P: case DDB_TUNER_DVBC2T2_SONY_P: case DDB_TUNER_ISDBT_SONY_P: - if (port->type == DDB_TUNER_DVBC2T2I_SONY_P) - sony_osc24 = 1; - if (input->port->dev->info->ts_quirks & TS_QUIRK_ALT_OSC) - sony_osc24 = 0; - if (input->port->dev->info->ts_quirks & TS_QUIRK_SERIAL) - sony_tspar = 0; + if (input->port->dev->link[input->port->lnr].info->ts_quirks + & TS_QUIRK_SERIAL) + par = 0; else - sony_tspar = 1; - - if (demod_attach_cxd28xx(input, sony_tspar, sony_osc24) < 0) + par = 1; + if (demod_attach_cxd28xx(input, par, osc24) < 0) return -ENODEV; - if (tuner_attach_tda18212(input, port->type) < 0) + if (tuner_attach_tda18212(input, port->type) < 0) { + if (dvb->fe2) + dvb_frontend_detach(dvb->fe2); + if (dvb->fe) + dvb_frontend_detach(dvb->fe); return -ENODEV; - if (input->fe) { - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; } break; - case DDB_TUNER_XO2_DVBC2T2I_SONY: - case DDB_TUNER_XO2_DVBCT2_SONY: - case DDB_TUNER_XO2_DVBC2T2_SONY: - case DDB_TUNER_XO2_ISDBT_SONY: - if (port->type == DDB_TUNER_XO2_DVBC2T2I_SONY) - sony_osc24 = 1; - - if (demod_attach_cxd28xx(input, 0, sony_osc24) < 0) + case DDB_TUNER_DVBC2T2I_SONY: + osc24 = 1; + /* fall-through */ + case DDB_TUNER_DVBCT2_SONY: + case DDB_TUNER_DVBC2T2_SONY: + case DDB_TUNER_ISDBT_SONY: + if (demod_attach_cxd28xx(input, 0, osc24) < 0) return -ENODEV; - if (tuner_attach_tda18212(input, port->type) < 0) + if (tuner_attach_tda18212(input, port->type) < 0) { + if (dvb->fe2) + dvb_frontend_detach(dvb->fe2); + if (dvb->fe) + dvb_frontend_detach(dvb->fe); return -ENODEV; - if (input->fe) { - if (dvb_register_frontend(adap, input->fe) < 0) - return -ENODEV; } break; + default: + return 0; + } + dvb->attached = 0x30; + + if (dvb->fe) { + if (dvb_register_frontend(adap, dvb->fe) < 0) + return -ENODEV; + + if (dvb->fe2) { + if (dvb_register_frontend(adap, dvb->fe2) < 0) + return -ENODEV; + dvb->fe2->tuner_priv = dvb->fe->tuner_priv; + memcpy(&dvb->fe2->ops.tuner_ops, + &dvb->fe->ops.tuner_ops, + sizeof(struct dvb_tuner_ops)); + } } - input->attached = 5; + dvb->attached = 0x31; return 0; } -/****************************************************************************/ -/****************************************************************************/ - -static ssize_t ts_write(struct file *file, const __user char *buf, - size_t count, loff_t *ppos) +static int port_has_encti(struct ddb_port *port) { - struct dvb_device *dvbdev = file->private_data; - struct ddb_output *output = dvbdev->priv; - size_t left = count; - int stat; + struct device *dev = port->dev->dev; + u8 val; + int ret = i2c_read_reg(&port->i2c->adap, 0x20, 0, &val); - while (left) { - if (ddb_output_free(output) < 188) { - if (file->f_flags & O_NONBLOCK) - break; - if (wait_event_interruptible( - output->wq, ddb_output_free(output) >= 188) < 0) - break; - } - stat = ddb_output_write(output, buf, left); - if (stat < 0) - break; - buf += stat; - left -= stat; - } - return (left == count) ? -EAGAIN : (count - left); + if (!ret) + dev_info(dev, "[0x20]=0x%02x\n", val); + return ret ? 0 : 1; } -static ssize_t ts_read(struct file *file, __user char *buf, - size_t count, loff_t *ppos) +static int port_has_cxd(struct ddb_port *port, u8 *type) { - struct dvb_device *dvbdev = file->private_data; - struct ddb_output *output = dvbdev->priv; - struct ddb_input *input = output->port->input[0]; - int left, read; + u8 val; + u8 probe[4] = { 0xe0, 0x00, 0x00, 0x00 }, data[4]; + struct i2c_msg msgs[2] = {{ .addr = 0x40, .flags = 0, + .buf = probe, .len = 4 }, + { .addr = 0x40, .flags = I2C_M_RD, + .buf = data, .len = 4 } }; + val = i2c_transfer(&port->i2c->adap, msgs, 2); + if (val != 2) + return 0; - count -= count % 188; - left = count; - while (left) { - if (ddb_input_avail(input) < 188) { - if (file->f_flags & O_NONBLOCK) - break; - if (wait_event_interruptible( - input->wq, ddb_input_avail(input) >= 188) < 0) - break; - } - read = ddb_input_read(input, buf, left); - if (read < 0) - return read; - left -= read; - buf += read; - } - return (left == count) ? -EAGAIN : (count - left); + if (data[0] == 0x02 && data[1] == 0x2b && data[3] == 0x43) + *type = 2; + else + *type = 1; + return 1; } -static unsigned int ts_poll(struct file *file, poll_table *wait) +static int port_has_xo2(struct ddb_port *port, u8 *type, u8 *id) { - /* - struct dvb_device *dvbdev = file->private_data; - struct ddb_output *output = dvbdev->priv; - struct ddb_input *input = output->port->input[0]; - */ - unsigned int mask = 0; + u8 probe[1] = { 0x00 }, data[4]; -#if 0 - if (data_avail_to_read) - mask |= POLLIN | POLLRDNORM; - if (data_avail_to_write) - mask |= POLLOUT | POLLWRNORM; - - poll_wait(file, &read_queue, wait); - poll_wait(file, &write_queue, wait); -#endif - return mask; + if (i2c_io(&port->i2c->adap, 0x10, probe, 1, data, 4)) + return 0; + if (data[0] == 'D' && data[1] == 'F') { + *id = data[2]; + *type = 1; + return 1; + } + if (data[0] == 'C' && data[1] == 'I') { + *id = data[2]; + *type = 2; + return 1; + } + return 0; } -static const struct file_operations ci_fops = { - .owner = THIS_MODULE, - .read = ts_read, - .write = ts_write, - .open = dvb_generic_open, - .release = dvb_generic_release, - .poll = ts_poll, -}; - -static struct dvb_device dvbdev_ci = { - .readers = -1, - .writers = -1, - .users = -1, - .fops = &ci_fops, -}; - -/****************************************************************************/ -/****************************************************************************/ -/****************************************************************************/ - -static void input_tasklet(unsigned long data) +static int port_has_stv0900(struct ddb_port *port) { - struct ddb_input *input = (struct ddb_input *) data; - struct ddb *dev = input->port->dev; + u8 val; - spin_lock(&input->lock); - if (!input->running) { - spin_unlock(&input->lock); - return; - } - input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr)); - - if (input->port->class == DDB_PORT_TUNER) { - if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) - dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr); - while (input->cbuf != ((input->stat >> 11) & 0x1f) - || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) { - dvb_dmx_swfilter_packets(&input->demux, - input->vbuf[input->cbuf], - input->dma_buf_size / 188); - - input->cbuf = (input->cbuf + 1) % input->dma_buf_num; - ddbwritel((input->cbuf << 11), - DMA_BUFFER_ACK(input->nr)); - input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr)); - } - } - if (input->port->class == DDB_PORT_CI) - wake_up(&input->wq); - spin_unlock(&input->lock); + if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0) + return 0; + return 1; } -static void output_tasklet(unsigned long data) +static int port_has_stv0900_aa(struct ddb_port *port, u8 *id) { - struct ddb_output *output = (struct ddb_output *) data; - struct ddb *dev = output->port->dev; - - spin_lock(&output->lock); - if (!output->running) { - spin_unlock(&output->lock); - return; - } - output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8)); - wake_up(&output->wq); - spin_unlock(&output->lock); + if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, id) < 0) + return 0; + return 1; } - -static struct cxd2099_cfg cxd_cfg = { - .bitrate = 62000, - .adr = 0x40, - .polarity = 1, - .clock_mode = 1, - .max_i2c = 512, -}; - -static int ddb_ci_attach(struct ddb_port *port) +static int port_has_drxks(struct ddb_port *port) { - int ret; + u8 val; - ret = dvb_register_adapter(&port->output->adap, - "DDBridge", - THIS_MODULE, - &port->dev->pdev->dev, - adapter_nr); - if (ret < 0) - return ret; - port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap); - if (!port->en) { - dvb_unregister_adapter(&port->output->adap); - return -ENODEV; - } - ddb_input_start(port->input[0]); - ddb_output_start(port->output); - dvb_ca_en50221_init(&port->output->adap, - port->en, 0, 1); - ret = dvb_register_device(&port->output->adap, &port->output->dev, - &dvbdev_ci, (void *) port->output, - DVB_DEVICE_SEC, 0); - return ret; + if (i2c_read(&port->i2c->adap, 0x29, &val) < 0) + return 0; + if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0) + return 0; + return 1; } -static int ddb_port_attach(struct ddb_port *port) +static int port_has_stv0367(struct ddb_port *port) { - struct device *dev = &port->dev->pdev->dev; - int ret = 0; + u8 val; - switch (port->class) { - case DDB_PORT_TUNER: - ret = dvb_input_attach(port->input[0]); - if (ret < 0) - break; - ret = dvb_input_attach(port->input[1]); - break; - case DDB_PORT_CI: - ret = ddb_ci_attach(port); - break; - default: - break; - } - if (ret < 0) - dev_err(dev, "port_attach on port %d failed\n", port->nr); - return ret; + if (i2c_read_reg16(&port->i2c->adap, 0x1e, 0xf000, &val) < 0) + return 0; + if (val != 0x60) + return 0; + if (i2c_read_reg16(&port->i2c->adap, 0x1f, 0xf000, &val) < 0) + return 0; + if (val != 0x60) + return 0; + return 1; } -static int ddb_ports_attach(struct ddb *dev) -{ - int i, ret = 0; - struct ddb_port *port; - - for (i = 0; i < dev->info->port_num; i++) { - port = &dev->port[i]; - ret = ddb_port_attach(port); - if (ret < 0) - break; - } - return ret; -} - -static void ddb_ports_detach(struct ddb *dev) -{ - int i; - struct ddb_port *port; - - for (i = 0; i < dev->info->port_num; i++) { - port = &dev->port[i]; - switch (port->class) { - case DDB_PORT_TUNER: - dvb_input_detach(port->input[0]); - dvb_input_detach(port->input[1]); - break; - case DDB_PORT_CI: - dvb_unregister_device(port->output->dev); - if (port->en) { - ddb_input_stop(port->input[0]); - ddb_output_stop(port->output); - dvb_ca_en50221_release(port->en); - kfree(port->en); - port->en = NULL; - dvb_unregister_adapter(&port->output->adap); - } - break; - } - } -} - -/****************************************************************************/ -/****************************************************************************/ - static int init_xo2(struct ddb_port *port) { struct i2c_adapter *i2c = &port->i2c->adap; - struct device *dev = &port->dev->pdev->dev; + struct ddb *dev = port->dev; u8 val, data[2]; int res; @@ -1413,7 +1656,7 @@ static int init_xo2(struct ddb_port *port) return res; if (data[0] != 0x01) { - dev_info(dev, "Port %d: invalid XO2\n", port->nr); + dev_info(dev->dev, "Port %d: invalid XO2\n", port->nr); return -1; } @@ -1429,11 +1672,16 @@ static int init_xo2(struct ddb_port *port) i2c_write_reg(i2c, 0x10, 0x08, 0x07); /* speed: 0=55,1=75,2=90,3=104 MBit/s */ - i2c_write_reg(i2c, 0x10, 0x09, - ((xo2_speed >= 0 && xo2_speed <= 3) ? xo2_speed : 2)); + i2c_write_reg(i2c, 0x10, 0x09, xo2_speed); - i2c_write_reg(i2c, 0x10, 0x0a, 0x01); - i2c_write_reg(i2c, 0x10, 0x0b, 0x01); + if (dev->link[port->lnr].info->con_clock) { + dev_info(dev->dev, "Setting continuous clock for XO2\n"); + i2c_write_reg(i2c, 0x10, 0x0a, 0x03); + i2c_write_reg(i2c, 0x10, 0x0b, 0x03); + } else { + i2c_write_reg(i2c, 0x10, 0x0a, 0x01); + i2c_write_reg(i2c, 0x10, 0x0b, 0x01); + } usleep_range(2000, 3000); /* Start XO2 PLL */ @@ -1442,76 +1690,52 @@ static int init_xo2(struct ddb_port *port) return 0; } -static int port_has_xo2(struct ddb_port *port, u8 *type, u8 *id) +static int init_xo2_ci(struct ddb_port *port) { - u8 probe[1] = { 0x00 }, data[4]; + struct i2c_adapter *i2c = &port->i2c->adap; + struct ddb *dev = port->dev; + u8 val, data[2]; + int res; - *type = DDB_XO2_TYPE_NONE; + res = i2c_read_regs(i2c, 0x10, 0x04, data, 2); + if (res < 0) + return res; - if (i2c_io(&port->i2c->adap, 0x10, probe, 1, data, 4)) - return 0; - if (data[0] == 'D' && data[1] == 'F') { - *id = data[2]; - *type = DDB_XO2_TYPE_DUOFLEX; - return 1; + if (data[0] > 1) { + dev_info(dev->dev, "Port %d: invalid XO2 CI %02x\n", + port->nr, data[0]); + return -1; } - if (data[0] == 'C' && data[1] == 'I') { - *id = data[2]; - *type = DDB_XO2_TYPE_CI; - return 1; + dev_info(dev->dev, "Port %d: DuoFlex CI %u.%u\n", + port->nr, data[0], data[1]); + + i2c_read_reg(i2c, 0x10, 0x08, &val); + if (val != 0) { + i2c_write_reg(i2c, 0x10, 0x08, 0x00); + msleep(100); + } + /* Enable both CI */ + i2c_write_reg(i2c, 0x10, 0x08, 3); + usleep_range(2000, 3000); + + + /* speed: 0=55,1=75,2=90,3=104 MBit/s */ + i2c_write_reg(i2c, 0x10, 0x09, 1); + + i2c_write_reg(i2c, 0x10, 0x08, 0x83); + usleep_range(2000, 3000); + + if (dev->link[port->lnr].info->con_clock) { + dev_info(dev->dev, "Setting continuous clock for DuoFlex CI\n"); + i2c_write_reg(i2c, 0x10, 0x0a, 0x03); + i2c_write_reg(i2c, 0x10, 0x0b, 0x03); + } else { + i2c_write_reg(i2c, 0x10, 0x0a, 0x01); + i2c_write_reg(i2c, 0x10, 0x0b, 0x01); } return 0; } -/****************************************************************************/ -/****************************************************************************/ - -static int port_has_ci(struct ddb_port *port) -{ - u8 val; - return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1; -} - -static int port_has_stv0900(struct ddb_port *port) -{ - u8 val; - if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0) - return 0; - return 1; -} - -static int port_has_stv0900_aa(struct ddb_port *port) -{ - u8 val; - if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0) - return 0; - return 1; -} - -static int port_has_drxks(struct ddb_port *port) -{ - u8 val; - if (i2c_read(&port->i2c->adap, 0x29, &val) < 0) - return 0; - if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0) - return 0; - return 1; -} - -static int port_has_stv0367(struct ddb_port *port) -{ - u8 val; - if (i2c_read_reg16(&port->i2c->adap, 0x1e, 0xf000, &val) < 0) - return 0; - if (val != 0x60) - return 0; - if (i2c_read_reg16(&port->i2c->adap, 0x1f, 0xf000, &val) < 0) - return 0; - if (val != 0x60) - return 0; - return 1; -} - static int port_has_cxd28xx(struct ddb_port *port, u8 *id) { struct i2c_adapter *i2c = &port->i2c->adap; @@ -1526,190 +1750,933 @@ static int port_has_cxd28xx(struct ddb_port *port, u8 *id) return 1; } +static char *xo2names[] = { + "DUAL DVB-S2", "DUAL DVB-C/T/T2", + "DUAL DVB-ISDBT", "DUAL DVB-C/C2/T/T2", + "DUAL ATSC", "DUAL DVB-C/C2/T/T2,ISDB-T", + "", "" +}; + +static char *xo2types[] = { + "DVBS_ST", "DVBCT2_SONY", + "ISDBT_SONY", "DVBC2T2_SONY", + "ATSC_ST", "DVBC2T2I_SONY" +}; + static void ddb_port_probe(struct ddb_port *port) { struct ddb *dev = port->dev; - char *modname = "NO MODULE"; - u8 xo2_type, xo2_id, cxd_id; + u32 l = port->lnr; + u8 id, type; + port->name = "NO MODULE"; + port->type_name = "NONE"; port->class = DDB_PORT_NONE; - if (port_has_ci(port)) { - modname = "CI"; - port->class = DDB_PORT_CI; - ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); - } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { - dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n", - port->nr, port->nr+1, xo2_type, xo2_id); + /* Handle missing ports and ports without I2C */ - ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); - - switch (xo2_type) { - case DDB_XO2_TYPE_DUOFLEX: - init_xo2(port); - switch (xo2_id >> 2) { - case 0: - modname = "DUAL DVB-S2 (unsupported)"; - port->class = DDB_PORT_NONE; - port->type = DDB_TUNER_XO2_DVBS_STV0910; - break; - case 1: - modname = "DUAL DVB-C/T/T2"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_XO2_DVBCT2_SONY; - break; - case 2: - modname = "DUAL DVB-ISDBT"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_XO2_ISDBT_SONY; - break; - case 3: - modname = "DUAL DVB-C/C2/T/T2"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_XO2_DVBC2T2_SONY; - break; - case 4: - modname = "DUAL ATSC (unsupported)"; - port->class = DDB_PORT_NONE; - port->type = DDB_TUNER_XO2_ATSC_ST; - break; - case 5: - modname = "DUAL DVB-C/C2/T/T2/ISDBT"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_XO2_DVBC2T2I_SONY; - break; - default: - modname = "Unknown XO2 DuoFlex module\n"; - break; - } - break; - case DDB_XO2_TYPE_CI: - dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n"); - break; - default: - dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n"); - break; - } - } else if (port_has_cxd28xx(port, &cxd_id)) { - switch (cxd_id) { - case 0xa4: - modname = "DUAL DVB-C2T2 CXD2843"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBC2T2_SONY_P; - break; - case 0xb1: - modname = "DUAL DVB-CT2 CXD2837"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBCT2_SONY_P; - break; - case 0xb0: - modname = "DUAL ISDB-T CXD2838"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_ISDBT_SONY_P; - break; - case 0xc1: - modname = "DUAL DVB-C2T2 ISDB-T CXD2854"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBC2T2I_SONY_P; - break; - default: - modname = "Unknown CXD28xx tuner"; - break; - } - ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); - } else if (port_has_stv0900(port)) { - modname = "DUAL DVB-S2"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBS_ST; - ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); - } else if (port_has_stv0900_aa(port)) { - modname = "DUAL DVB-S2"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBS_ST_AA; - ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); - } else if (port_has_drxks(port)) { - modname = "DUAL DVB-C/T"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBCT_TR; - ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); - } else if (port_has_stv0367(port)) { - modname = "DUAL DVB-C/T"; - port->class = DDB_PORT_TUNER; - port->type = DDB_TUNER_DVBCT_ST; - ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); + if (port->nr == ts_loop) { + port->name = "TS LOOP"; + port->class = DDB_PORT_LOOP; + return; } - dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n", - port->nr, port->nr+1, modname); + if (port->nr == 1 && dev->link[l].info->type == DDB_OCTOPUS_CI && + dev->link[l].info->i2c_mask == 1) { + port->name = "NO TAB"; + port->class = DDB_PORT_NONE; + return; + } + + if (dev->link[l].info->type == DDB_OCTOPUS_MAX) { + port->name = "DUAL DVB-S2 MAX"; + port->type_name = "MXL5XX"; + port->class = DDB_PORT_TUNER; + port->type = DDB_TUNER_MXL5XX; + if (port->i2c) + ddbwritel(dev, I2C_SPEED_400, + port->i2c->regs + I2C_TIMING); + return; + } + + if (port->nr > 1 && dev->link[l].info->type == DDB_OCTOPUS_CI) { + port->name = "CI internal"; + port->type_name = "INTERNAL"; + port->class = DDB_PORT_CI; + port->type = DDB_CI_INTERNAL; + } + + if (!port->i2c) + return; + + /* Probe ports with I2C */ + + if (port_has_cxd(port, &id)) { + if (id == 1) { + port->name = "CI"; + port->type_name = "CXD2099"; + port->class = DDB_PORT_CI; + port->type = DDB_CI_EXTERNAL_SONY; + ddbwritel(dev, I2C_SPEED_400, + port->i2c->regs + I2C_TIMING); + } else { + dev_info(dev->dev, "Port %d: Uninitialized DuoFlex\n", + port->nr); + return; + } + } else if (port_has_xo2(port, &type, &id)) { + ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING); + /*dev_info(dev->dev, "XO2 ID %02x\n", id);*/ + if (type == 2) { + port->name = "DuoFlex CI"; + port->class = DDB_PORT_CI; + port->type = DDB_CI_EXTERNAL_XO2; + port->type_name = "CI_XO2"; + init_xo2_ci(port); + return; + } + id >>= 2; + if (id > 5) { + port->name = "unknown XO2 DuoFlex"; + port->type_name = "UNKNOWN"; + } else { + port->name = xo2names[id]; + port->class = DDB_PORT_TUNER; + port->type = DDB_TUNER_XO2 + id; + port->type_name = xo2types[id]; + init_xo2(port); + } + } else if (port_has_cxd28xx(port, &id)) { + switch (id) { + case 0xa4: + port->name = "DUAL DVB-C2T2 CXD2843"; + port->type = DDB_TUNER_DVBC2T2_SONY_P; + port->type_name = "DVBC2T2_SONY"; + break; + case 0xb1: + port->name = "DUAL DVB-CT2 CXD2837"; + port->type = DDB_TUNER_DVBCT2_SONY_P; + port->type_name = "DVBCT2_SONY"; + break; + case 0xb0: + port->name = "DUAL ISDB-T CXD2838"; + port->type = DDB_TUNER_ISDBT_SONY_P; + port->type_name = "ISDBT_SONY"; + break; + case 0xc1: + port->name = "DUAL DVB-C2T2 ISDB-T CXD2854"; + port->type = DDB_TUNER_DVBC2T2I_SONY_P; + port->type_name = "DVBC2T2I_ISDBT_SONY"; + break; + default: + return; + } + port->class = DDB_PORT_TUNER; + ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING); + } else if (port_has_stv0900(port)) { + port->name = "DUAL DVB-S2"; + port->class = DDB_PORT_TUNER; + port->type = DDB_TUNER_DVBS_ST; + port->type_name = "DVBS_ST"; + ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING); + } else if (port_has_stv0900_aa(port, &id)) { + port->name = "DUAL DVB-S2"; + port->class = DDB_PORT_TUNER; + if (id == 0x51) { + if (port->nr == 0 && + dev->link[l].info->ts_quirks & TS_QUIRK_REVERSED) + port->type = DDB_TUNER_DVBS_STV0910_PR; + else + port->type = DDB_TUNER_DVBS_STV0910_P; + port->type_name = "DVBS_ST_0910"; + } else { + port->type = DDB_TUNER_DVBS_ST_AA; + port->type_name = "DVBS_ST_AA"; + } + ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING); + } else if (port_has_drxks(port)) { + port->name = "DUAL DVB-C/T"; + port->class = DDB_PORT_TUNER; + port->type = DDB_TUNER_DVBCT_TR; + port->type_name = "DVBCT_TR"; + ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING); + } else if (port_has_stv0367(port)) { + port->name = "DUAL DVB-C/T"; + port->class = DDB_PORT_TUNER; + port->type = DDB_TUNER_DVBCT_ST; + port->type_name = "DVBCT_ST"; + ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING); + } else if (port_has_encti(port)) { + port->name = "ENCTI"; + port->class = DDB_PORT_LOOP; + } } -static void ddb_input_init(struct ddb_port *port, int nr) + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static int wait_ci_ready(struct ddb_ci *ci) +{ + u32 count = 10; + + ndelay(500); + do { + if (ddbreadl(ci->port->dev, + CI_CONTROL(ci->nr)) & CI_READY) + break; + usleep_range(1, 2); + if ((--count) == 0) + return -1; + } while (1); + return 0; +} + +static int read_attribute_mem(struct dvb_ca_en50221 *ca, + int slot, int address) +{ + struct ddb_ci *ci = ca->data; + u32 val, off = (address >> 1) & (CI_BUFFER_SIZE - 1); + + if (address > CI_BUFFER_SIZE) + return -1; + ddbwritel(ci->port->dev, CI_READ_CMD | (1 << 16) | address, + CI_DO_READ_ATTRIBUTES(ci->nr)); + wait_ci_ready(ci); + val = 0xff & ddbreadl(ci->port->dev, CI_BUFFER(ci->nr) + off); + return val; +} + +static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, + int address, u8 value) +{ + struct ddb_ci *ci = ca->data; + + ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address, + CI_DO_ATTRIBUTE_RW(ci->nr)); + wait_ci_ready(ci); + return 0; +} + +static int read_cam_control(struct dvb_ca_en50221 *ca, + int slot, u8 address) +{ + u32 count = 100; + struct ddb_ci *ci = ca->data; + u32 res; + + ddbwritel(ci->port->dev, CI_READ_CMD | address, + CI_DO_IO_RW(ci->nr)); + ndelay(500); + do { + res = ddbreadl(ci->port->dev, CI_READDATA(ci->nr)); + if (res & CI_READY) + break; + usleep_range(1, 2); + if ((--count) == 0) + return -1; + } while (1); + return 0xff & res; +} + +static int write_cam_control(struct dvb_ca_en50221 *ca, int slot, + u8 address, u8 value) +{ + struct ddb_ci *ci = ca->data; + + ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address, + CI_DO_IO_RW(ci->nr)); + wait_ci_ready(ci); + return 0; +} + +static int slot_reset(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + + ddbwritel(ci->port->dev, CI_POWER_ON, + CI_CONTROL(ci->nr)); + msleep(100); + ddbwritel(ci->port->dev, CI_POWER_ON | CI_RESET_CAM, + CI_CONTROL(ci->nr)); + ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON | CI_RESET_CAM, + CI_CONTROL(ci->nr)); + udelay(20); + ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON, + CI_CONTROL(ci->nr)); + return 0; +} + +static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + + ddbwritel(ci->port->dev, 0, CI_CONTROL(ci->nr)); + msleep(300); + return 0; +} + +static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr)); + + ddbwritel(ci->port->dev, val | CI_BYPASS_DISABLE, + CI_CONTROL(ci->nr)); + return 0; +} + +static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) +{ + struct ddb_ci *ci = ca->data; + u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr)); + int stat = 0; + + if (val & CI_CAM_DETECT) + stat |= DVB_CA_EN50221_POLL_CAM_PRESENT; + if (val & CI_CAM_READY) + stat |= DVB_CA_EN50221_POLL_CAM_READY; + return stat; +} + +static struct dvb_ca_en50221 en_templ = { + .read_attribute_mem = read_attribute_mem, + .write_attribute_mem = write_attribute_mem, + .read_cam_control = read_cam_control, + .write_cam_control = write_cam_control, + .slot_reset = slot_reset, + .slot_shutdown = slot_shutdown, + .slot_ts_enable = slot_ts_enable, + .poll_slot_status = poll_slot_status, +}; + +static void ci_attach(struct ddb_port *port) +{ + struct ddb_ci *ci = NULL; + + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) + return; + memcpy(&ci->en, &en_templ, sizeof(en_templ)); + ci->en.data = ci; + port->en = &ci->en; + ci->port = port; + ci->nr = port->nr - 2; +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static int write_creg(struct ddb_ci *ci, u8 data, u8 mask) +{ + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + + ci->port->creg = (ci->port->creg & ~mask) | data; + return i2c_write_reg(i2c, adr, 0x02, ci->port->creg); +} + +static int read_attribute_mem_xo2(struct dvb_ca_en50221 *ca, + int slot, int address) +{ + struct ddb_ci *ci = ca->data; + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + int res; + u8 val; + + res = i2c_read_reg16(i2c, adr, 0x8000 | address, &val); + return res ? res : val; +} + +static int write_attribute_mem_xo2(struct dvb_ca_en50221 *ca, int slot, + int address, u8 value) +{ + struct ddb_ci *ci = ca->data; + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + + return i2c_write_reg16(i2c, adr, 0x8000 | address, value); +} + +static int read_cam_control_xo2(struct dvb_ca_en50221 *ca, + int slot, u8 address) +{ + struct ddb_ci *ci = ca->data; + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + u8 val; + int res; + + res = i2c_read_reg(i2c, adr, 0x20 | (address & 3), &val); + return res ? res : val; +} + +static int write_cam_control_xo2(struct dvb_ca_en50221 *ca, int slot, + u8 address, u8 value) +{ + struct ddb_ci *ci = ca->data; + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + + return i2c_write_reg(i2c, adr, 0x20 | (address & 3), value); +} + +static int slot_reset_xo2(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + + dev_dbg(ci->port->dev->dev, "%s\n", __func__); + write_creg(ci, 0x01, 0x01); + write_creg(ci, 0x04, 0x04); + msleep(20); + write_creg(ci, 0x02, 0x02); + write_creg(ci, 0x00, 0x04); + write_creg(ci, 0x18, 0x18); + return 0; +} + +static int slot_shutdown_xo2(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + + dev_dbg(ci->port->dev->dev, "%s\n", __func__); + write_creg(ci, 0x10, 0xff); + write_creg(ci, 0x08, 0x08); + return 0; +} + +static int slot_ts_enable_xo2(struct dvb_ca_en50221 *ca, int slot) +{ + struct ddb_ci *ci = ca->data; + + dev_info(ci->port->dev->dev, "%s\n", __func__); + write_creg(ci, 0x00, 0x10); + return 0; +} + +static int poll_slot_status_xo2(struct dvb_ca_en50221 *ca, int slot, int open) +{ + struct ddb_ci *ci = ca->data; + struct i2c_adapter *i2c = &ci->port->i2c->adap; + u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13; + u8 val = 0; + int stat = 0; + + i2c_read_reg(i2c, adr, 0x01, &val); + + if (val & 2) + stat |= DVB_CA_EN50221_POLL_CAM_PRESENT; + if (val & 1) + stat |= DVB_CA_EN50221_POLL_CAM_READY; + return stat; +} + +static struct dvb_ca_en50221 en_xo2_templ = { + .read_attribute_mem = read_attribute_mem_xo2, + .write_attribute_mem = write_attribute_mem_xo2, + .read_cam_control = read_cam_control_xo2, + .write_cam_control = write_cam_control_xo2, + .slot_reset = slot_reset_xo2, + .slot_shutdown = slot_shutdown_xo2, + .slot_ts_enable = slot_ts_enable_xo2, + .poll_slot_status = poll_slot_status_xo2, +}; + +static void ci_xo2_attach(struct ddb_port *port) +{ + struct ddb_ci *ci; + + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) + return; + memcpy(&ci->en, &en_xo2_templ, sizeof(en_xo2_templ)); + ci->en.data = ci; + port->en = &ci->en; + ci->port = port; + ci->nr = port->nr - 2; + ci->port->creg = 0; + write_creg(ci, 0x10, 0xff); + write_creg(ci, 0x08, 0x08); +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static struct cxd2099_cfg cxd_cfg = { + .bitrate = 72000, + .adr = 0x40, + .polarity = 1, + .clock_mode = 1, + .max_i2c = 512, +}; + +static int ddb_ci_attach(struct ddb_port *port) +{ + switch (port->type) { + case DDB_CI_EXTERNAL_SONY: + cxd_cfg.bitrate = ci_bitrate; + port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap); + if (!port->en) + return -ENODEV; + dvb_ca_en50221_init(port->dvb[0].adap, + port->en, 0, 1); + break; + + case DDB_CI_EXTERNAL_XO2: + case DDB_CI_EXTERNAL_XO2_B: + ci_xo2_attach(port); + if (!port->en) + return -ENODEV; + dvb_ca_en50221_init(port->dvb[0].adap, port->en, 0, 1); + break; + + case DDB_CI_INTERNAL: + ci_attach(port); + if (!port->en) + return -ENODEV; + dvb_ca_en50221_init(port->dvb[0].adap, port->en, 0, 1); + break; + } + return 0; +} + +static int ddb_port_attach(struct ddb_port *port) +{ + int ret = 0; + + switch (port->class) { + case DDB_PORT_TUNER: + ret = dvb_input_attach(port->input[0]); + if (ret < 0) + break; + ret = dvb_input_attach(port->input[1]); + if (ret < 0) + break; + port->input[0]->redi = port->input[0]; + port->input[1]->redi = port->input[1]; + break; + case DDB_PORT_CI: + ret = ddb_ci_attach(port); + if (ret < 0) + break; + /* fall-through */ + case DDB_PORT_LOOP: + ret = dvb_register_device(port->dvb[0].adap, + &port->dvb[0].dev, + &dvbdev_ci, (void *) port->output, + DVB_DEVICE_SEC, 0); + break; + default: + break; + } + if (ret < 0) + dev_err(port->dev->dev, "port_attach on port %d failed\n", + port->nr); + return ret; +} + +int ddb_ports_attach(struct ddb *dev) +{ + int i, ret = 0; + struct ddb_port *port; + + if (dev->port_num) { + ret = dvb_register_adapters(dev); + if (ret < 0) { + dev_err(dev->dev, "Registering adapters failed. Check DVB_MAX_ADAPTERS in config.\n"); + return ret; + } + } + for (i = 0; i < dev->port_num; i++) { + port = &dev->port[i]; + ret = ddb_port_attach(port); + } + return ret; +} + +void ddb_ports_detach(struct ddb *dev) +{ + int i; + struct ddb_port *port; + + for (i = 0; i < dev->port_num; i++) { + port = &dev->port[i]; + + switch (port->class) { + case DDB_PORT_TUNER: + dvb_input_detach(port->input[0]); + dvb_input_detach(port->input[1]); + break; + case DDB_PORT_CI: + case DDB_PORT_LOOP: + if (port->dvb[0].dev) + dvb_unregister_device(port->dvb[0].dev); + if (port->en) { + dvb_ca_en50221_release(port->en); + kfree(port->en); + port->en = NULL; + } + break; + } + } + dvb_unregister_adapters(dev); +} + + +/* Copy input DMA pointers to output DMA and ACK. */ + +static void input_write_output(struct ddb_input *input, + struct ddb_output *output) +{ + ddbwritel(output->port->dev, + input->dma->stat, DMA_BUFFER_ACK(output->dma)); + output->dma->cbuf = (input->dma->stat >> 11) & 0x1f; + output->dma->coff = (input->dma->stat & 0x7ff) << 7; +} + +static void output_ack_input(struct ddb_output *output, + struct ddb_input *input) +{ + ddbwritel(input->port->dev, + output->dma->stat, DMA_BUFFER_ACK(input->dma)); +} + +static void input_write_dvb(struct ddb_input *input, + struct ddb_input *input2) +{ + struct ddb_dvb *dvb = &input2->port->dvb[input2->nr & 1]; + struct ddb_dma *dma, *dma2; + struct ddb *dev = input->port->dev; + int ack = 1; + + dma = dma2 = input->dma; + /* if there also is an output connected, do not ACK. + * input_write_output will ACK. + */ + if (input->redo) { + dma2 = input->redo->dma; + ack = 0; + } + while (dma->cbuf != ((dma->stat >> 11) & 0x1f) + || (4 & dma->ctrl)) { + if (4 & dma->ctrl) { + /* dev_err(dev->dev, "Overflow dma %d\n", dma->nr); */ + ack = 1; + } + if (alt_dma) + dma_sync_single_for_cpu(dev->dev, dma2->pbuf[dma->cbuf], + dma2->size, DMA_FROM_DEVICE); + dvb_dmx_swfilter_packets(&dvb->demux, + dma2->vbuf[dma->cbuf], + dma2->size / 188); + dma->cbuf = (dma->cbuf + 1) % dma2->num; + if (ack) + ddbwritel(dev, (dma->cbuf << 11), + DMA_BUFFER_ACK(dma)); + dma->stat = safe_ddbreadl(dev, DMA_BUFFER_CURRENT(dma)); + dma->ctrl = safe_ddbreadl(dev, DMA_BUFFER_CONTROL(dma)); + } +} + +static void input_work(struct work_struct *work) +{ + struct ddb_dma *dma = container_of(work, struct ddb_dma, work); + struct ddb_input *input = (struct ddb_input *) dma->io; + struct ddb *dev = input->port->dev; + unsigned long flags; + + spin_lock_irqsave(&dma->lock, flags); + if (!dma->running) { + spin_unlock_irqrestore(&dma->lock, flags); + return; + } + dma->stat = ddbreadl(dev, DMA_BUFFER_CURRENT(dma)); + dma->ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(dma)); + + if (input->redi) + input_write_dvb(input, input->redi); + if (input->redo) + input_write_output(input, input->redo); + wake_up(&dma->wq); + spin_unlock_irqrestore(&dma->lock, flags); +} + +static void input_handler(unsigned long data) +{ + struct ddb_input *input = (struct ddb_input *) data; + struct ddb_dma *dma = input->dma; + + + /* If there is no input connected, input_tasklet() will + * just copy pointers and ACK. So, there is no need to go + * through the tasklet scheduler. + */ + if (input->redi) + queue_work(ddb_wq, &dma->work); + else + input_work(&dma->work); +} + +static void output_handler(unsigned long data) +{ + struct ddb_output *output = (struct ddb_output *) data; + struct ddb_dma *dma = output->dma; + struct ddb *dev = output->port->dev; + + spin_lock(&dma->lock); + if (!dma->running) { + spin_unlock(&dma->lock); + return; + } + dma->stat = ddbreadl(dev, DMA_BUFFER_CURRENT(dma)); + dma->ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(dma)); + if (output->redi) + output_ack_input(output, output->redi); + wake_up(&dma->wq); + spin_unlock(&dma->lock); +} + +/****************************************************************************/ +/****************************************************************************/ + +static const struct ddb_regmap *io_regmap(struct ddb_io *io, int link) +{ + const struct ddb_info *info; + + if (link) + info = io->port->dev->link[io->port->lnr].info; + else + info = io->port->dev->link[0].info; + + if (!info) + return NULL; + + return info->regmap; +} + +static void ddb_dma_init(struct ddb_io *io, int nr, int out) +{ + struct ddb_dma *dma; + const struct ddb_regmap *rm = io_regmap(io, 0); + + dma = out ? &io->port->dev->odma[nr] : &io->port->dev->idma[nr]; + io->dma = dma; + dma->io = io; + + spin_lock_init(&dma->lock); + init_waitqueue_head(&dma->wq); + if (out) { + dma->regs = rm->odma->base + rm->odma->size * nr; + dma->bufregs = rm->odma_buf->base + rm->odma_buf->size * nr; + dma->num = OUTPUT_DMA_BUFS; + dma->size = OUTPUT_DMA_SIZE; + dma->div = OUTPUT_DMA_IRQ_DIV; + } else { + INIT_WORK(&dma->work, input_work); + dma->regs = rm->idma->base + rm->idma->size * nr; + dma->bufregs = rm->idma_buf->base + rm->idma_buf->size * nr; + dma->num = INPUT_DMA_BUFS; + dma->size = INPUT_DMA_SIZE; + dma->div = INPUT_DMA_IRQ_DIV; + } + ddbwritel(io->port->dev, 0, DMA_BUFFER_ACK(dma)); + dev_dbg(io->port->dev->dev, "init link %u, io %u, dma %u, dmaregs %08x bufregs %08x\n", + io->port->lnr, io->nr, nr, dma->regs, dma->bufregs); +} + +static void ddb_input_init(struct ddb_port *port, int nr, int pnr, int anr) { struct ddb *dev = port->dev; - struct ddb_input *input = &dev->input[nr]; + struct ddb_input *input = &dev->input[anr]; + const struct ddb_regmap *rm; + port->input[pnr] = input; input->nr = nr; input->port = port; - input->dma_buf_num = INPUT_DMA_BUFS; - input->dma_buf_size = INPUT_DMA_SIZE; - ddbwritel(0, TS_INPUT_CONTROL(nr)); - ddbwritel(2, TS_INPUT_CONTROL(nr)); - ddbwritel(0, TS_INPUT_CONTROL(nr)); - ddbwritel(0, DMA_BUFFER_ACK(nr)); - tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input); - spin_lock_init(&input->lock); - init_waitqueue_head(&input->wq); + rm = io_regmap(input, 1); + input->regs = DDB_LINK_TAG(port->lnr) | + (rm->input->base + rm->input->size * nr); + dev_dbg(dev->dev, "init link %u, input %u, regs %08x\n", + port->lnr, nr, input->regs); + + if (dev->has_dma) { + const struct ddb_regmap *rm0 = io_regmap(input, 0); + u32 base = rm0->irq_base_idma; + u32 dma_nr = nr; + + if (port->lnr) + dma_nr += 32 + (port->lnr - 1) * 8; + + dev_dbg(dev->dev, "init link %u, input %u, handler %u\n", + port->lnr, nr, dma_nr + base); + + dev->handler[0][dma_nr + base] = input_handler; + dev->handler_data[0][dma_nr + base] = (unsigned long) input; + ddb_dma_init(input, dma_nr, 0); + } } static void ddb_output_init(struct ddb_port *port, int nr) { struct ddb *dev = port->dev; struct ddb_output *output = &dev->output[nr]; + const struct ddb_regmap *rm; + + port->output = output; output->nr = nr; output->port = port; - output->dma_buf_num = OUTPUT_DMA_BUFS; - output->dma_buf_size = OUTPUT_DMA_SIZE; + rm = io_regmap(output, 1); + output->regs = DDB_LINK_TAG(port->lnr) | + (rm->output->base + rm->output->size * nr); - ddbwritel(0, TS_OUTPUT_CONTROL(nr)); - ddbwritel(2, TS_OUTPUT_CONTROL(nr)); - ddbwritel(0, TS_OUTPUT_CONTROL(nr)); - tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output); - init_waitqueue_head(&output->wq); -} + dev_dbg(dev->dev, "init link %u, output %u, regs %08x\n", + port->lnr, nr, output->regs); -static void ddb_ports_init(struct ddb *dev) -{ - int i; - struct ddb_port *port; + if (dev->has_dma) { + const struct ddb_regmap *rm0 = io_regmap(output, 0); + u32 base = rm0->irq_base_odma; - for (i = 0; i < dev->info->port_num; i++) { - port = &dev->port[i]; - port->dev = dev; - port->nr = i; - port->i2c = &dev->i2c[i]; - port->input[0] = &dev->input[2 * i]; - port->input[1] = &dev->input[2 * i + 1]; - port->output = &dev->output[i]; - - mutex_init(&port->i2c_gate_lock); - ddb_port_probe(port); - ddb_input_init(port, 2 * i); - ddb_input_init(port, 2 * i + 1); - ddb_output_init(port, i); + dev->handler[0][nr + base] = output_handler; + dev->handler_data[0][nr + base] = (unsigned long) output; + ddb_dma_init(output, nr, 1); } } -static void ddb_ports_release(struct ddb *dev) +static int ddb_port_match_i2c(struct ddb_port *port) +{ + struct ddb *dev = port->dev; + u32 i; + + for (i = 0; i < dev->i2c_num; i++) { + if (dev->i2c[i].link == port->lnr && + dev->i2c[i].nr == port->nr) { + port->i2c = &dev->i2c[i]; + return 1; + } + } + return 0; +} + +static int ddb_port_match_link_i2c(struct ddb_port *port) +{ + struct ddb *dev = port->dev; + u32 i; + + for (i = 0; i < dev->i2c_num; i++) { + if (dev->i2c[i].link == port->lnr) { + port->i2c = &dev->i2c[i]; + return 1; + } + } + return 0; +} + +void ddb_ports_init(struct ddb *dev) +{ + u32 i, l, p; + struct ddb_port *port; + const struct ddb_info *info; + const struct ddb_regmap *rm; + + for (p = l = 0; l < DDB_MAX_LINK; l++) { + info = dev->link[l].info; + if (!info) + continue; + rm = info->regmap; + if (!rm) + continue; + for (i = 0; i < info->port_num; i++, p++) { + port = &dev->port[p]; + port->dev = dev; + port->nr = i; + port->lnr = l; + port->pnr = p; + port->gap = 0xffffffff; + port->obr = ci_bitrate; + mutex_init(&port->i2c_gate_lock); + + if (!ddb_port_match_i2c(port)) { + if (info->type == DDB_OCTOPUS_MAX) + ddb_port_match_link_i2c(port); + } + + ddb_port_probe(port); + + port->dvb[0].adap = &dev->adap[2 * p]; + port->dvb[1].adap = &dev->adap[2 * p + 1]; + + if ((port->class == DDB_PORT_NONE) && i && p && + dev->port[p - 1].type == DDB_CI_EXTERNAL_XO2) { + port->class = DDB_PORT_CI; + port->type = DDB_CI_EXTERNAL_XO2_B; + port->name = "DuoFlex CI_B"; + port->i2c = dev->port[p - 1].i2c; + } + + dev_info(dev->dev, "Port %u: Link %u, Link Port %u (TAB %u): %s\n", + port->pnr, port->lnr, port->nr, port->nr + 1, + port->name); + + if (port->class == DDB_PORT_CI && + port->type == DDB_CI_EXTERNAL_XO2) { + ddb_input_init(port, 2 * i, 0, 2 * i); + ddb_output_init(port, i); + continue; + } + + if (port->class == DDB_PORT_CI && + port->type == DDB_CI_EXTERNAL_XO2_B) { + ddb_input_init(port, 2 * i - 1, 0, 2 * i - 1); + ddb_output_init(port, i); + continue; + } + + if (port->class == DDB_PORT_NONE) + continue; + + switch (dev->link[l].info->type) { + case DDB_OCTOPUS_CI: + if (i >= 2) { + ddb_input_init(port, 2 + i, 0, 2 + i); + ddb_input_init(port, 4 + i, 1, 4 + i); + ddb_output_init(port, i); + break; + } /* fallthrough */ + case DDB_OCTOPUS: + ddb_input_init(port, 2 * i, 0, 2 * i); + ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1); + ddb_output_init(port, i); + break; + case DDB_OCTOPUS_MAX: + case DDB_OCTOPUS_MAX_CT: + ddb_input_init(port, 2 * i, 0, 2 * p); + ddb_input_init(port, 2 * i + 1, 1, 2 * p + 1); + break; + default: + break; + } + } + } + dev->port_num = p; +} + +void ddb_ports_release(struct ddb *dev) { int i; struct ddb_port *port; - for (i = 0; i < dev->info->port_num; i++) { + for (i = 0; i < dev->port_num; i++) { port = &dev->port[i]; - port->dev = dev; - tasklet_kill(&port->input[0]->tasklet); - tasklet_kill(&port->input[1]->tasklet); - tasklet_kill(&port->output->tasklet); + if (port->input[0] && port->input[0]->dma) + cancel_work_sync(&port->input[0]->dma->work); + if (port->input[1] && port->input[1]->dma) + cancel_work_sync(&port->input[1]->dma->work); + if (port->output && port->output->dma) + cancel_work_sync(&port->output->dma->work); } } @@ -1717,90 +2684,158 @@ static void ddb_ports_release(struct ddb *dev) /****************************************************************************/ /****************************************************************************/ -static void irq_handle_i2c(struct ddb *dev, int n) -{ - struct ddb_i2c *i2c = &dev->i2c[n]; +#define IRQ_HANDLE(_nr) \ + do { if ((s & (1UL << ((_nr) & 0x1f))) && dev->handler[0][_nr]) \ + dev->handler[0][_nr](dev->handler_data[0][_nr]); } \ + while (0) - i2c->done = 1; - wake_up(&i2c->wq); +static void irq_handle_msg(struct ddb *dev, u32 s) +{ + dev->i2c_irq++; + IRQ_HANDLE(0); + IRQ_HANDLE(1); + IRQ_HANDLE(2); + IRQ_HANDLE(3); } -static irqreturn_t irq_handler(int irq, void *dev_id) +static void irq_handle_io(struct ddb *dev, u32 s) +{ + dev->ts_irq++; + if ((s & 0x000000f0)) { + IRQ_HANDLE(4); + IRQ_HANDLE(5); + IRQ_HANDLE(6); + IRQ_HANDLE(7); + } + if ((s & 0x0000ff00)) { + IRQ_HANDLE(8); + IRQ_HANDLE(9); + IRQ_HANDLE(10); + IRQ_HANDLE(11); + IRQ_HANDLE(12); + IRQ_HANDLE(13); + IRQ_HANDLE(14); + IRQ_HANDLE(15); + } + if ((s & 0x00ff0000)) { + IRQ_HANDLE(16); + IRQ_HANDLE(17); + IRQ_HANDLE(18); + IRQ_HANDLE(19); + IRQ_HANDLE(20); + IRQ_HANDLE(21); + IRQ_HANDLE(22); + IRQ_HANDLE(23); + } + if ((s & 0xff000000)) { + IRQ_HANDLE(24); + IRQ_HANDLE(25); + IRQ_HANDLE(26); + IRQ_HANDLE(27); + IRQ_HANDLE(28); + IRQ_HANDLE(29); + IRQ_HANDLE(30); + IRQ_HANDLE(31); + } +} + +irqreturn_t ddb_irq_handler0(int irq, void *dev_id) { struct ddb *dev = (struct ddb *) dev_id; - u32 s = ddbreadl(INTERRUPT_STATUS); - - if (!s) - return IRQ_NONE; + u32 s = ddbreadl(dev, INTERRUPT_STATUS); do { - ddbwritel(s, INTERRUPT_ACK); - - if (s & 0x00000001) - irq_handle_i2c(dev, 0); - if (s & 0x00000002) - irq_handle_i2c(dev, 1); - if (s & 0x00000004) - irq_handle_i2c(dev, 2); - if (s & 0x00000008) - irq_handle_i2c(dev, 3); - - if (s & 0x00000100) - tasklet_schedule(&dev->input[0].tasklet); - if (s & 0x00000200) - tasklet_schedule(&dev->input[1].tasklet); - if (s & 0x00000400) - tasklet_schedule(&dev->input[2].tasklet); - if (s & 0x00000800) - tasklet_schedule(&dev->input[3].tasklet); - if (s & 0x00001000) - tasklet_schedule(&dev->input[4].tasklet); - if (s & 0x00002000) - tasklet_schedule(&dev->input[5].tasklet); - if (s & 0x00004000) - tasklet_schedule(&dev->input[6].tasklet); - if (s & 0x00008000) - tasklet_schedule(&dev->input[7].tasklet); - - if (s & 0x00010000) - tasklet_schedule(&dev->output[0].tasklet); - if (s & 0x00020000) - tasklet_schedule(&dev->output[1].tasklet); - if (s & 0x00040000) - tasklet_schedule(&dev->output[2].tasklet); - if (s & 0x00080000) - tasklet_schedule(&dev->output[3].tasklet); - - /* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */ - } while ((s = ddbreadl(INTERRUPT_STATUS))); + if (s & 0x80000000) + return IRQ_NONE; + if (!(s & 0xfffff00)) + return IRQ_NONE; + ddbwritel(dev, s & 0xfffff00, INTERRUPT_ACK); + irq_handle_io(dev, s); + } while ((s = ddbreadl(dev, INTERRUPT_STATUS))); return IRQ_HANDLED; } -/******************************************************************************/ -/******************************************************************************/ -/******************************************************************************/ +irqreturn_t ddb_irq_handler1(int irq, void *dev_id) +{ + struct ddb *dev = (struct ddb *) dev_id; + u32 s = ddbreadl(dev, INTERRUPT_STATUS); -static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) + do { + if (s & 0x80000000) + return IRQ_NONE; + if (!(s & 0x0000f)) + return IRQ_NONE; + ddbwritel(dev, s & 0x0000f, INTERRUPT_ACK); + irq_handle_msg(dev, s); + } while ((s = ddbreadl(dev, INTERRUPT_STATUS))); + + return IRQ_HANDLED; +} + +irqreturn_t ddb_irq_handler(int irq, void *dev_id) +{ + struct ddb *dev = (struct ddb *) dev_id; + u32 s = ddbreadl(dev, INTERRUPT_STATUS); + int ret = IRQ_HANDLED; + + if (!s) + return IRQ_NONE; + do { + if (s & 0x80000000) + return IRQ_NONE; + ddbwritel(dev, s, INTERRUPT_ACK); + + if (s & 0x0000000f) + irq_handle_msg(dev, s); + if (s & 0x0fffff00) + irq_handle_io(dev, s); + } while ((s = ddbreadl(dev, INTERRUPT_STATUS))); + + return ret; +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static int reg_wait(struct ddb *dev, u32 reg, u32 bit) +{ + u32 count = 0; + + while (safe_ddbreadl(dev, reg) & bit) { + ndelay(10); + if (++count == 100) + return -1; + } + return 0; +} + +static int flashio(struct ddb *dev, u32 lnr, u8 *wbuf, u32 wlen, u8 *rbuf, + u32 rlen) { u32 data, shift; + u32 tag = DDB_LINK_TAG(lnr); + struct ddb_link *link = &dev->link[lnr]; + mutex_lock(&link->flash_mutex); if (wlen > 4) - ddbwritel(1, SPI_CONTROL); + ddbwritel(dev, 1, tag | SPI_CONTROL); while (wlen > 4) { /* FIXME: check for big-endian */ - data = swab32(*(u32 *)wbuf); + data = swab32(*(u32 *) wbuf); wbuf += 4; wlen -= 4; - ddbwritel(data, SPI_DATA); - while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) - ; + ddbwritel(dev, data, tag | SPI_DATA); + if (reg_wait(dev, tag | SPI_CONTROL, 4)) + goto fail; } - if (rlen) - ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL); + ddbwritel(dev, 0x0001 | ((wlen << (8 + 3)) & 0x1f00), + tag | SPI_CONTROL); else - ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL); + ddbwritel(dev, 0x0003 | ((wlen << (8 + 3)) & 0x1f00), + tag | SPI_CONTROL); data = 0; shift = ((4 - wlen) * 8); @@ -1812,33 +2847,34 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) } if (shift) data <<= shift; - ddbwritel(data, SPI_DATA); - while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) - ; + ddbwritel(dev, data, tag | SPI_DATA); + if (reg_wait(dev, tag | SPI_CONTROL, 4)) + goto fail; if (!rlen) { - ddbwritel(0, SPI_CONTROL); - return 0; + ddbwritel(dev, 0, tag | SPI_CONTROL); + goto exit; } if (rlen > 4) - ddbwritel(1, SPI_CONTROL); + ddbwritel(dev, 1, tag | SPI_CONTROL); while (rlen > 4) { - ddbwritel(0xffffffff, SPI_DATA); - while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) - ; - data = ddbreadl(SPI_DATA); + ddbwritel(dev, 0xffffffff, tag | SPI_DATA); + if (reg_wait(dev, tag | SPI_CONTROL, 4)) + goto fail; + data = ddbreadl(dev, tag | SPI_DATA); *(u32 *) rbuf = swab32(data); rbuf += 4; rlen -= 4; } - ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); - ddbwritel(0xffffffff, SPI_DATA); - while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) - ; + ddbwritel(dev, 0x0003 | ((rlen << (8 + 3)) & 0x1F00), + tag | SPI_CONTROL); + ddbwritel(dev, 0xffffffff, tag | SPI_DATA); + if (reg_wait(dev, tag | SPI_CONTROL, 4)) + goto fail; - data = ddbreadl(SPI_DATA); - ddbwritel(0, SPI_CONTROL); + data = ddbreadl(dev, tag | SPI_DATA); + ddbwritel(dev, 0, tag | SPI_CONTROL); if (rlen < 4) data <<= ((4 - rlen) * 8); @@ -1849,31 +2885,47 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) rbuf++; rlen--; } +exit: + mutex_unlock(&link->flash_mutex); return 0; +fail: + mutex_unlock(&link->flash_mutex); + return -1; } -#define DDB_MAGIC 'd' +int ddbridge_flashread(struct ddb *dev, u32 link, u8 *buf, u32 addr, u32 len) +{ + u8 cmd[4] = {0x03, (addr >> 16) & 0xff, + (addr >> 8) & 0xff, addr & 0xff}; -struct ddb_flashio { - __user __u8 *write_buf; - __u32 write_len; - __user __u8 *read_buf; - __u32 read_len; -}; + return flashio(dev, link, cmd, 4, buf, len); +} -#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio) +/* + * TODO/FIXME: add/implement IOCTLs from upstream driver + */ #define DDB_NAME "ddbridge" static u32 ddb_num; -static struct ddb *ddbs[32]; -static struct class *ddb_class; static int ddb_major; +static DEFINE_MUTEX(ddb_mutex); + +static int ddb_release(struct inode *inode, struct file *file) +{ + struct ddb *dev = file->private_data; + + dev->ddb_dev_users--; + return 0; +} static int ddb_open(struct inode *inode, struct file *file) { struct ddb *dev = ddbs[iminor(inode)]; + if (dev->ddb_dev_users) + return -EBUSY; + dev->ddb_dev_users++; file->private_data = dev; return 0; } @@ -1881,44 +2933,17 @@ static int ddb_open(struct inode *inode, struct file *file) static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ddb *dev = file->private_data; - __user void *parg = (__user void *)arg; - int res; - switch (cmd) { - case IOCTL_DDB_FLASHIO: - { - struct ddb_flashio fio; - u8 *rbuf, *wbuf; + dev_warn(dev->dev, "DDB IOCTLs unsupported (cmd: %d, arg: %lu)\n", + cmd, arg); - if (copy_from_user(&fio, parg, sizeof(fio))) - return -EFAULT; - - if (fio.write_len > 1028 || fio.read_len > 1028) - return -EINVAL; - if (fio.write_len + fio.read_len > 1028) - return -EINVAL; - - wbuf = &dev->iobuf[0]; - rbuf = wbuf + fio.write_len; - - if (copy_from_user(wbuf, fio.write_buf, fio.write_len)) - return -EFAULT; - res = flashio(dev, wbuf, fio.write_len, rbuf, fio.read_len); - if (res) - return res; - if (copy_to_user(fio.read_buf, rbuf, fio.read_len)) - return -EFAULT; - break; - } - default: - return -ENOTTY; - } - return 0; + return -ENOTTY; } static const struct file_operations ddb_fops = { .unlocked_ioctl = ddb_ioctl, .open = ddb_open, + .release = ddb_release, }; static char *ddb_devnode(struct device *device, umode_t *mode) @@ -1928,369 +2953,684 @@ static char *ddb_devnode(struct device *device, umode_t *mode) return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr); } -static int ddb_class_create(void) +#define __ATTR_MRO(_name, _show) { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _show, \ +} + +#define __ATTR_MWO(_name, _store) { \ + .attr = { .name = __stringify(_name), .mode = 0222 }, \ + .store = _store, \ +} + +static ssize_t ports_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%d\n", dev->port_num); +} + +static ssize_t ts_irq_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%d\n", dev->ts_irq); +} + +static ssize_t i2c_irq_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%d\n", dev->i2c_irq); +} + +static ssize_t fan_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + u32 val; + + val = ddbreadl(dev, GPIO_OUTPUT) & 1; + return sprintf(buf, "%d\n", val); +} + +static ssize_t fan_store(struct device *device, struct device_attribute *d, + const char *buf, size_t count) +{ + struct ddb *dev = dev_get_drvdata(device); + u32 val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + ddbwritel(dev, 1, GPIO_DIRECTION); + ddbwritel(dev, val & 1, GPIO_OUTPUT); + return count; +} + +static ssize_t fanspeed_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[8] - 0x30; + struct ddb_link *link = &dev->link[num]; + u32 spd; + + spd = ddblreadl(link, TEMPMON_FANCONTROL) & 0xff; + return sprintf(buf, "%u\n", spd * 100); +} + +static ssize_t temp_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + struct ddb_link *link = &dev->link[0]; + struct i2c_adapter *adap; + int temp, temp2; + u8 tmp[2]; + + if (!link->info->temp_num) + return sprintf(buf, "no sensor\n"); + adap = &dev->i2c[link->info->temp_bus].adap; + if (i2c_read_regs(adap, 0x48, 0, tmp, 2) < 0) + return sprintf(buf, "read_error\n"); + temp = (tmp[0] << 3) | (tmp[1] >> 5); + temp *= 125; + if (link->info->temp_num == 2) { + if (i2c_read_regs(adap, 0x49, 0, tmp, 2) < 0) + return sprintf(buf, "read_error\n"); + temp2 = (tmp[0] << 3) | (tmp[1] >> 5); + temp2 *= 125; + return sprintf(buf, "%d %d\n", temp, temp2); + } + return sprintf(buf, "%d\n", temp); +} + +static ssize_t ctemp_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + struct i2c_adapter *adap; + int temp; + u8 tmp[2]; + int num = attr->attr.name[4] - 0x30; + + adap = &dev->i2c[num].adap; + if (!adap) + return 0; + if (i2c_read_regs(adap, 0x49, 0, tmp, 2) < 0) + if (i2c_read_regs(adap, 0x4d, 0, tmp, 2) < 0) + return sprintf(buf, "no sensor\n"); + temp = tmp[0] * 1000; + return sprintf(buf, "%d\n", temp); +} + +static ssize_t led_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[3] - 0x30; + + return sprintf(buf, "%d\n", dev->leds & (1 << num) ? 1 : 0); +} + + +static void ddb_set_led(struct ddb *dev, int num, int val) +{ + if (!dev->link[0].info->led_num) + return; + switch (dev->port[num].class) { + case DDB_PORT_TUNER: + switch (dev->port[num].type) { + case DDB_TUNER_DVBS_ST: + i2c_write_reg16(&dev->i2c[num].adap, + 0x69, 0xf14c, val ? 2 : 0); + break; + case DDB_TUNER_DVBCT_ST: + i2c_write_reg16(&dev->i2c[num].adap, + 0x1f, 0xf00e, 0); + i2c_write_reg16(&dev->i2c[num].adap, + 0x1f, 0xf00f, val ? 1 : 0); + break; + case DDB_TUNER_XO2 ... DDB_TUNER_DVBC2T2I_SONY: + { + u8 v; + + i2c_read_reg(&dev->i2c[num].adap, 0x10, 0x08, &v); + v = (v & ~0x10) | (val ? 0x10 : 0); + i2c_write_reg(&dev->i2c[num].adap, 0x10, 0x08, v); + break; + } + default: + break; + } + break; + } +} + +static ssize_t led_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[3] - 0x30; + u32 val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + if (val) + dev->leds |= (1 << num); + else + dev->leds &= ~(1 << num); + ddb_set_led(dev, num, val); + return count; +} + +static ssize_t snr_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + char snr[32]; + int num = attr->attr.name[3] - 0x30; + + if (dev->port[num].type >= DDB_TUNER_XO2) { + if (i2c_read_regs(&dev->i2c[num].adap, 0x10, 0x10, snr, 16) < 0) + return sprintf(buf, "NO SNR\n"); + snr[16] = 0; + } else { + /* serial number at 0x100-0x11f */ + if (i2c_read_regs16(&dev->i2c[num].adap, + 0x57, 0x100, snr, 32) < 0) + if (i2c_read_regs16(&dev->i2c[num].adap, + 0x50, 0x100, snr, 32) < 0) + return sprintf(buf, "NO SNR\n"); + snr[31] = 0; /* in case it is not terminated on EEPROM */ + } + return sprintf(buf, "%s\n", snr); +} + +static ssize_t bsnr_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + char snr[16]; + + ddbridge_flashread(dev, 0, snr, 0x10, 15); + snr[15] = 0; /* in case it is not terminated on EEPROM */ + return sprintf(buf, "%s\n", snr); +} + +static ssize_t bpsnr_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + unsigned char snr[32]; + + if (!dev->i2c_num) + return 0; + + if (i2c_read_regs16(&dev->i2c[0].adap, + 0x50, 0x0000, snr, 32) < 0 || + snr[0] == 0xff) + return sprintf(buf, "NO SNR\n"); + snr[31] = 0; /* in case it is not terminated on EEPROM */ + return sprintf(buf, "%s\n", snr); +} + +static ssize_t redirect_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + return 0; +} + +static ssize_t redirect_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int i, p; + int res; + + if (sscanf(buf, "%x %x\n", &i, &p) != 2) + return -EINVAL; + res = ddb_redirect(i, p); + if (res < 0) + return res; + dev_info(device, "redirect: %02x, %02x\n", i, p); + return count; +} + +static ssize_t gap_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[3] - 0x30; + + return sprintf(buf, "%d\n", dev->port[num].gap); + +} + +static ssize_t gap_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[3] - 0x30; + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + if (val > 128) + return -EINVAL; + if (val == 128) + val = 0xffffffff; + dev->port[num].gap = val; + return count; +} + +static ssize_t version_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%08x %08x\n", + dev->link[0].ids.hwid, dev->link[0].ids.regmapid); +} + +static ssize_t hwid_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "0x%08X\n", dev->link[0].ids.hwid); +} + +static ssize_t regmap_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "0x%08X\n", dev->link[0].ids.regmapid); +} + +static ssize_t fmode_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + int num = attr->attr.name[5] - 0x30; + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%u\n", dev->link[num].lnb.fmode); +} + +static ssize_t devid_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + int num = attr->attr.name[5] - 0x30; + struct ddb *dev = dev_get_drvdata(device); + + return sprintf(buf, "%08x\n", dev->link[num].ids.devid); +} + +static ssize_t fmode_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ddb *dev = dev_get_drvdata(device); + int num = attr->attr.name[5] - 0x30; + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + if (val > 3) + return -EINVAL; + lnb_init_fmode(dev, &dev->link[num], val); + return count; +} + +static struct device_attribute ddb_attrs[] = { + __ATTR_RO(version), + __ATTR_RO(ports), + __ATTR_RO(ts_irq), + __ATTR_RO(i2c_irq), + __ATTR(gap0, 0664, gap_show, gap_store), + __ATTR(gap1, 0664, gap_show, gap_store), + __ATTR(gap2, 0664, gap_show, gap_store), + __ATTR(gap3, 0664, gap_show, gap_store), + __ATTR(fmode0, 0664, fmode_show, fmode_store), + __ATTR(fmode1, 0664, fmode_show, fmode_store), + __ATTR(fmode2, 0664, fmode_show, fmode_store), + __ATTR(fmode3, 0664, fmode_show, fmode_store), + __ATTR_MRO(devid0, devid_show), + __ATTR_MRO(devid1, devid_show), + __ATTR_MRO(devid2, devid_show), + __ATTR_MRO(devid3, devid_show), + __ATTR_RO(hwid), + __ATTR_RO(regmap), + __ATTR(redirect, 0664, redirect_show, redirect_store), + __ATTR_MRO(snr, bsnr_show), + __ATTR_RO(bpsnr), + __ATTR_NULL, +}; + +static struct device_attribute ddb_attrs_temp[] = { + __ATTR_RO(temp), +}; + +static struct device_attribute ddb_attrs_fan[] = { + __ATTR(fan, 0664, fan_show, fan_store), +}; + +static struct device_attribute ddb_attrs_snr[] = { + __ATTR_MRO(snr0, snr_show), + __ATTR_MRO(snr1, snr_show), + __ATTR_MRO(snr2, snr_show), + __ATTR_MRO(snr3, snr_show), +}; + +static struct device_attribute ddb_attrs_ctemp[] = { + __ATTR_MRO(temp0, ctemp_show), + __ATTR_MRO(temp1, ctemp_show), + __ATTR_MRO(temp2, ctemp_show), + __ATTR_MRO(temp3, ctemp_show), +}; + +static struct device_attribute ddb_attrs_led[] = { + __ATTR(led0, 0664, led_show, led_store), + __ATTR(led1, 0664, led_show, led_store), + __ATTR(led2, 0664, led_show, led_store), + __ATTR(led3, 0664, led_show, led_store), +}; + +static struct device_attribute ddb_attrs_fanspeed[] = { + __ATTR_MRO(fanspeed0, fanspeed_show), + __ATTR_MRO(fanspeed1, fanspeed_show), + __ATTR_MRO(fanspeed2, fanspeed_show), + __ATTR_MRO(fanspeed3, fanspeed_show), +}; + +static struct class ddb_class = { + .name = "ddbridge", + .owner = THIS_MODULE, + .devnode = ddb_devnode, +}; + +int ddb_class_create(void) { ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops); if (ddb_major < 0) return ddb_major; - - ddb_class = class_create(THIS_MODULE, DDB_NAME); - if (IS_ERR(ddb_class)) { - unregister_chrdev(ddb_major, DDB_NAME); - return PTR_ERR(ddb_class); - } - ddb_class->devnode = ddb_devnode; - return 0; -} - -static void ddb_class_destroy(void) -{ - class_destroy(ddb_class); - unregister_chrdev(ddb_major, DDB_NAME); -} - -static int ddb_device_create(struct ddb *dev) -{ - dev->nr = ddb_num++; - dev->ddb_dev = device_create(ddb_class, NULL, - MKDEV(ddb_major, dev->nr), - dev, "ddbridge%d", dev->nr); - ddbs[dev->nr] = dev; - if (IS_ERR(dev->ddb_dev)) + if (class_register(&ddb_class) < 0) return -1; return 0; } -static void ddb_device_destroy(struct ddb *dev) +void ddb_class_destroy(void) { - ddb_num--; - if (IS_ERR(dev->ddb_dev)) - return; - device_destroy(ddb_class, MKDEV(ddb_major, 0)); + class_unregister(&ddb_class); + unregister_chrdev(ddb_major, DDB_NAME); } +static void ddb_device_attrs_del(struct ddb *dev) +{ + int i; + + for (i = 0; i < 4; i++) + if (dev->link[i].info && dev->link[i].info->tempmon_irq) + device_remove_file(dev->ddb_dev, + &ddb_attrs_fanspeed[i]); + for (i = 0; i < dev->link[0].info->temp_num; i++) + device_remove_file(dev->ddb_dev, &ddb_attrs_temp[i]); + for (i = 0; i < dev->link[0].info->fan_num; i++) + device_remove_file(dev->ddb_dev, &ddb_attrs_fan[i]); + for (i = 0; i < dev->i2c_num && i < 4; i++) { + if (dev->link[0].info->led_num) + device_remove_file(dev->ddb_dev, &ddb_attrs_led[i]); + device_remove_file(dev->ddb_dev, &ddb_attrs_snr[i]); + device_remove_file(dev->ddb_dev, &ddb_attrs_ctemp[i]); + } + for (i = 0; ddb_attrs[i].attr.name != NULL; i++) + device_remove_file(dev->ddb_dev, &ddb_attrs[i]); +} + +static int ddb_device_attrs_add(struct ddb *dev) +{ + int i; + + for (i = 0; ddb_attrs[i].attr.name != NULL; i++) + if (device_create_file(dev->ddb_dev, &ddb_attrs[i])) + goto fail; + for (i = 0; i < dev->link[0].info->temp_num; i++) + if (device_create_file(dev->ddb_dev, &ddb_attrs_temp[i])) + goto fail; + for (i = 0; i < dev->link[0].info->fan_num; i++) + if (device_create_file(dev->ddb_dev, &ddb_attrs_fan[i])) + goto fail; + for (i = 0; (i < dev->i2c_num) && (i < 4); i++) { + if (device_create_file(dev->ddb_dev, &ddb_attrs_snr[i])) + goto fail; + if (device_create_file(dev->ddb_dev, &ddb_attrs_ctemp[i])) + goto fail; + if (dev->link[0].info->led_num) + if (device_create_file(dev->ddb_dev, + &ddb_attrs_led[i])) + goto fail; + } + for (i = 0; i < 4; i++) + if (dev->link[i].info && dev->link[i].info->tempmon_irq) + if (device_create_file(dev->ddb_dev, + &ddb_attrs_fanspeed[i])) + goto fail; + return 0; +fail: + return -1; +} + +int ddb_device_create(struct ddb *dev) +{ + int res = 0; + + if (ddb_num == DDB_MAX_ADAPTER) + return -ENOMEM; + mutex_lock(&ddb_mutex); + dev->nr = ddb_num; + ddbs[dev->nr] = dev; + dev->ddb_dev = device_create(&ddb_class, dev->dev, + MKDEV(ddb_major, dev->nr), + dev, "ddbridge%d", dev->nr); + if (IS_ERR(dev->ddb_dev)) { + res = PTR_ERR(dev->ddb_dev); + dev_info(dev->dev, "Could not create ddbridge%d\n", dev->nr); + goto fail; + } + res = ddb_device_attrs_add(dev); + if (res) { + ddb_device_attrs_del(dev); + device_destroy(&ddb_class, MKDEV(ddb_major, dev->nr)); + ddbs[dev->nr] = NULL; + dev->ddb_dev = ERR_PTR(-ENODEV); + } else + ddb_num++; +fail: + mutex_unlock(&ddb_mutex); + return res; +} + +void ddb_device_destroy(struct ddb *dev) +{ + if (IS_ERR(dev->ddb_dev)) + return; + ddb_device_attrs_del(dev); + device_destroy(&ddb_class, MKDEV(ddb_major, dev->nr)); +} /****************************************************************************/ /****************************************************************************/ /****************************************************************************/ -static void ddb_unmap(struct ddb *dev) +static void tempmon_setfan(struct ddb_link *link) +{ + u32 temp, temp2, pwm; + + if ((ddblreadl(link, TEMPMON_CONTROL) & + TEMPMON_CONTROL_OVERTEMP) != 0) { + dev_info(link->dev->dev, "Over temperature condition\n"); + link->overtemperature_error = 1; + } + temp = (ddblreadl(link, TEMPMON_SENSOR0) >> 8) & 0xFF; + if (temp & 0x80) + temp = 0; + temp2 = (ddblreadl(link, TEMPMON_SENSOR1) >> 8) & 0xFF; + if (temp2 & 0x80) + temp2 = 0; + if (temp2 > temp) + temp = temp2; + + pwm = (ddblreadl(link, TEMPMON_FANCONTROL) >> 8) & 0x0F; + if (pwm > 10) + pwm = 10; + + if (temp >= link->temp_tab[pwm]) { + while (pwm < 10 && temp >= link->temp_tab[pwm + 1]) + pwm += 1; + } else { + while (pwm > 1 && temp < link->temp_tab[pwm - 2]) + pwm -= 1; + } + ddblwritel(link, (pwm << 8), TEMPMON_FANCONTROL); +} + +static void temp_handler(unsigned long data) +{ + struct ddb_link *link = (struct ddb_link *) data; + + spin_lock(&link->temp_lock); + tempmon_setfan(link); + spin_unlock(&link->temp_lock); +} + +static int tempmon_init(struct ddb_link *link, int first_time) +{ + struct ddb *dev = link->dev; + int status = 0; + u32 l = link->nr; + + spin_lock_irq(&link->temp_lock); + if (first_time) { + static u8 temperature_table[11] = { + 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80 }; + + memcpy(link->temp_tab, temperature_table, + sizeof(temperature_table)); + } + dev->handler[l][link->info->tempmon_irq] = temp_handler; + dev->handler_data[l][link->info->tempmon_irq] = (unsigned long) link; + ddblwritel(link, (TEMPMON_CONTROL_OVERTEMP | TEMPMON_CONTROL_AUTOSCAN | + TEMPMON_CONTROL_INTENABLE), + TEMPMON_CONTROL); + ddblwritel(link, (3 << 8), TEMPMON_FANCONTROL); + + link->overtemperature_error = + ((ddblreadl(link, TEMPMON_CONTROL) & + TEMPMON_CONTROL_OVERTEMP) != 0); + if (link->overtemperature_error) { + dev_info(link->dev->dev, "Over temperature condition\n"); + status = -1; + } + tempmon_setfan(link); + spin_unlock_irq(&link->temp_lock); + return status; +} + +static int ddb_init_tempmon(struct ddb_link *link) +{ + const struct ddb_info *info = link->info; + + if (!info->tempmon_irq) + return 0; + if (info->type == DDB_OCTOPUS_MAX_CT) + if (link->ids.regmapid < 0x00010002) + return 0; + spin_lock_init(&link->temp_lock); + dev_dbg(link->dev->dev, "init_tempmon\n"); + return tempmon_init(link, 1); +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static int ddb_init_boards(struct ddb *dev) +{ + const struct ddb_info *info; + struct ddb_link *link; + u32 l; + + for (l = 0; l < DDB_MAX_LINK; l++) { + link = &dev->link[l]; + info = link->info; + + if (!info) + continue; + if (info->board_control) { + ddbwritel(dev, 0, DDB_LINK_TAG(l) | BOARD_CONTROL); + msleep(100); + ddbwritel(dev, info->board_control_2, + DDB_LINK_TAG(l) | BOARD_CONTROL); + usleep_range(2000, 3000); + ddbwritel(dev, + info->board_control_2 | info->board_control, + DDB_LINK_TAG(l) | BOARD_CONTROL); + usleep_range(2000, 3000); + } + ddb_init_tempmon(link); + } + return 0; +} + +int ddb_init(struct ddb *dev) +{ + mutex_init(&dev->link[0].lnb.lock); + mutex_init(&dev->link[0].flash_mutex); + if (no_init) { + ddb_device_create(dev); + return 0; + } + + ddb_init_boards(dev); + + if (ddb_i2c_init(dev) < 0) + goto fail; + ddb_ports_init(dev); + if (ddb_buffers_alloc(dev) < 0) { + dev_info(dev->dev, "Could not allocate buffer memory\n"); + goto fail2; + } + if (ddb_ports_attach(dev) < 0) + goto fail3; + + ddb_device_create(dev); + + if (dev->link[0].info->fan_num) { + ddbwritel(dev, 1, GPIO_DIRECTION); + ddbwritel(dev, 1, GPIO_OUTPUT); + } + return 0; + +fail3: + ddb_ports_detach(dev); + dev_err(dev->dev, "fail3\n"); + ddb_ports_release(dev); +fail2: + dev_err(dev->dev, "fail2\n"); + ddb_buffers_free(dev); + ddb_i2c_release(dev); +fail: + dev_err(dev->dev, "fail1\n"); + return -1; +} + +void ddb_unmap(struct ddb *dev) { if (dev->regs) iounmap(dev->regs); vfree(dev); } - - -static void ddb_remove(struct pci_dev *pdev) -{ - struct ddb *dev = pci_get_drvdata(pdev); - - ddb_ports_detach(dev); - ddb_i2c_release(dev); - - ddbwritel(0, INTERRUPT_ENABLE); - free_irq(dev->pdev->irq, dev); -#ifdef CONFIG_PCI_MSI - if (dev->msi) - pci_disable_msi(dev->pdev); -#endif - ddb_ports_release(dev); - ddb_buffers_free(dev); - ddb_device_destroy(dev); - - ddb_unmap(dev); - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); -} - - -static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct ddb *dev; - int stat = 0; - int irq_flag = IRQF_SHARED; - - if (pci_enable_device(pdev) < 0) - return -ENODEV; - - dev = vzalloc(sizeof(struct ddb)); - if (dev == NULL) - return -ENOMEM; - - dev->pdev = pdev; - pci_set_drvdata(pdev, dev); - dev->info = (struct ddb_info *) id->driver_data; - dev_info(&pdev->dev, "Detected %s\n", dev->info->name); - - dev->regs = ioremap(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); - if (!dev->regs) { - stat = -ENOMEM; - goto fail; - } - dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); - -#ifdef CONFIG_PCI_MSI - if (pci_msi_enabled()) - stat = pci_enable_msi(dev->pdev); - if (stat) { - dev_info(&pdev->dev, "MSI not available.\n"); - } else { - irq_flag = 0; - dev->msi = 1; - } -#endif - stat = request_irq(dev->pdev->irq, irq_handler, - irq_flag, "DDBridge", (void *) dev); - if (stat < 0) - goto fail1; - ddbwritel(0, DMA_BASE_WRITE); - ddbwritel(0, DMA_BASE_READ); - ddbwritel(0xffffffff, INTERRUPT_ACK); - ddbwritel(0xfff0f, INTERRUPT_ENABLE); - ddbwritel(0, MSI1_ENABLE); - - /* board control */ - if (dev->info->board_control) { - ddbwritel(0, DDB_LINK_TAG(0) | BOARD_CONTROL); - msleep(100); - ddbwritel(dev->info->board_control_2, - DDB_LINK_TAG(0) | BOARD_CONTROL); - usleep_range(2000, 3000); - ddbwritel(dev->info->board_control_2 - | dev->info->board_control, - DDB_LINK_TAG(0) | BOARD_CONTROL); - usleep_range(2000, 3000); - } - - if (ddb_i2c_init(dev) < 0) - goto fail1; - ddb_ports_init(dev); - if (ddb_buffers_alloc(dev) < 0) { - dev_err(&pdev->dev, "Could not allocate buffer memory\n"); - goto fail2; - } - if (ddb_ports_attach(dev) < 0) - goto fail3; - ddb_device_create(dev); - return 0; - -fail3: - ddb_ports_detach(dev); - dev_err(&pdev->dev, "fail3\n"); - ddb_ports_release(dev); -fail2: - dev_err(&pdev->dev, "fail2\n"); - ddb_buffers_free(dev); -fail1: - dev_err(&pdev->dev, "fail1\n"); - if (dev->msi) - pci_disable_msi(dev->pdev); - if (stat == 0) - free_irq(dev->pdev->irq, dev); -fail: - dev_err(&pdev->dev, "fail\n"); - ddb_unmap(dev); - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - return -1; -} - -/******************************************************************************/ -/******************************************************************************/ -/******************************************************************************/ - -static const struct ddb_info ddb_none = { - .type = DDB_NONE, - .name = "Digital Devices PCIe bridge", -}; - -static const struct ddb_info ddb_octopus = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Octopus DVB adapter", - .port_num = 4, -}; - -static const struct ddb_info ddb_octopus_le = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Octopus LE DVB adapter", - .port_num = 2, -}; - -static const struct ddb_info ddb_octopus_oem = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Octopus OEM", - .port_num = 4, -}; - -static const struct ddb_info ddb_octopus_mini = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Octopus Mini", - .port_num = 4, -}; - -static const struct ddb_info ddb_v6 = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Cine S2 V6 DVB adapter", - .port_num = 3, -}; -static const struct ddb_info ddb_v6_5 = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Cine S2 V6.5 DVB adapter", - .port_num = 4, -}; - -static const struct ddb_info ddb_dvbct = { - .type = DDB_OCTOPUS, - .name = "Digital Devices DVBCT V6.1 DVB adapter", - .port_num = 3, -}; - -static const struct ddb_info ddb_ctv7 = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Cine CT V7 DVB adapter", - .port_num = 4, - .board_control = 3, - .board_control_2 = 4, -}; - -static const struct ddb_info ddb_satixS2v3 = { - .type = DDB_OCTOPUS, - .name = "Mystique SaTiX-S2 V3 DVB adapter", - .port_num = 3, -}; - -static const struct ddb_info ddb_octopusv3 = { - .type = DDB_OCTOPUS, - .name = "Digital Devices Octopus V3 DVB adapter", - .port_num = 4, -}; - -/*** MaxA8 adapters ***********************************************************/ - -static struct ddb_info ddb_ct2_8 = { - .type = DDB_OCTOPUS_MAX_CT, - .name = "Digital Devices MAX A8 CT2", - .port_num = 4, - .board_control = 0x0ff, - .board_control_2 = 0xf00, - .ts_quirks = TS_QUIRK_SERIAL, -}; - -static struct ddb_info ddb_c2t2_8 = { - .type = DDB_OCTOPUS_MAX_CT, - .name = "Digital Devices MAX A8 C2T2", - .port_num = 4, - .board_control = 0x0ff, - .board_control_2 = 0xf00, - .ts_quirks = TS_QUIRK_SERIAL, -}; - -static struct ddb_info ddb_isdbt_8 = { - .type = DDB_OCTOPUS_MAX_CT, - .name = "Digital Devices MAX A8 ISDBT", - .port_num = 4, - .board_control = 0x0ff, - .board_control_2 = 0xf00, - .ts_quirks = TS_QUIRK_SERIAL, -}; - -static struct ddb_info ddb_c2t2i_v0_8 = { - .type = DDB_OCTOPUS_MAX_CT, - .name = "Digital Devices MAX A8 C2T2I V0", - .port_num = 4, - .board_control = 0x0ff, - .board_control_2 = 0xf00, - .ts_quirks = TS_QUIRK_SERIAL | TS_QUIRK_ALT_OSC, -}; - -static struct ddb_info ddb_c2t2i_8 = { - .type = DDB_OCTOPUS_MAX_CT, - .name = "Digital Devices MAX A8 C2T2I", - .port_num = 4, - .board_control = 0x0ff, - .board_control_2 = 0xf00, - .ts_quirks = TS_QUIRK_SERIAL, -}; - -/******************************************************************************/ - -#define DDVID 0xdd01 /* Digital Devices Vendor ID */ - -#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \ - .vendor = _vend, .device = _dev, \ - .subvendor = _subvend, .subdevice = _subdev, \ - .driver_data = (unsigned long)&_driverdata } - -static const struct pci_device_id ddb_id_tbl[] = { - DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus), - DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus), - DDB_ID(DDVID, 0x0005, DDVID, 0x0004, ddb_octopusv3), - DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le), - DDB_ID(DDVID, 0x0003, DDVID, 0x0003, ddb_octopus_oem), - DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus_mini), - DDB_ID(DDVID, 0x0005, DDVID, 0x0011, ddb_octopus_mini), - DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6), - DDB_ID(DDVID, 0x0003, DDVID, 0x0021, ddb_v6_5), - DDB_ID(DDVID, 0x0003, DDVID, 0x0030, ddb_dvbct), - DDB_ID(DDVID, 0x0003, DDVID, 0xdb03, ddb_satixS2v3), - DDB_ID(DDVID, 0x0006, DDVID, 0x0031, ddb_ctv7), - DDB_ID(DDVID, 0x0006, DDVID, 0x0032, ddb_ctv7), - DDB_ID(DDVID, 0x0006, DDVID, 0x0033, ddb_ctv7), - DDB_ID(DDVID, 0x0008, DDVID, 0x0034, ddb_ct2_8), - DDB_ID(DDVID, 0x0008, DDVID, 0x0035, ddb_c2t2_8), - DDB_ID(DDVID, 0x0008, DDVID, 0x0036, ddb_isdbt_8), - DDB_ID(DDVID, 0x0008, DDVID, 0x0037, ddb_c2t2i_v0_8), - DDB_ID(DDVID, 0x0008, DDVID, 0x0038, ddb_c2t2i_8), - DDB_ID(DDVID, 0x0006, DDVID, 0x0039, ddb_ctv7), - /* in case sub-ids got deleted in flash */ - DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0005, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0006, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0007, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0008, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0011, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0013, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0201, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - DDB_ID(DDVID, 0x0320, PCI_ANY_ID, PCI_ANY_ID, ddb_none), - {0} -}; -MODULE_DEVICE_TABLE(pci, ddb_id_tbl); - - -static struct pci_driver ddb_pci_driver = { - .name = "DDBridge", - .id_table = ddb_id_tbl, - .probe = ddb_probe, - .remove = ddb_remove, -}; - -static __init int module_init_ddbridge(void) -{ - int ret; - - pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); - - ret = ddb_class_create(); - if (ret < 0) - return ret; - ret = pci_register_driver(&ddb_pci_driver); - if (ret < 0) - ddb_class_destroy(); - return ret; -} - -static __exit void module_exit_ddbridge(void) -{ - pci_unregister_driver(&ddb_pci_driver); - ddb_class_destroy(); -} - -module_init(module_init_ddbridge); -module_exit(module_exit_ddbridge); - -MODULE_DESCRIPTION("Digital Devices PCIe Bridge"); -MODULE_AUTHOR("Ralph Metzler"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.5"); diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.c b/drivers/media/pci/ddbridge/ddbridge-hw.c new file mode 100644 index 000000000000..48248bcd59c2 --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-hw.c @@ -0,0 +1,376 @@ +/* + * ddbridge-hw.c: Digital Devices bridge hardware maps + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "ddbridge.h" +#include "ddbridge-hw.h" + +/******************************************************************************/ + +static const struct ddb_regset octopus_input = { + .base = 0x200, + .num = 0x08, + .size = 0x10, +}; + +static const struct ddb_regset octopus_output = { + .base = 0x280, + .num = 0x08, + .size = 0x10, +}; + +static const struct ddb_regset octopus_idma = { + .base = 0x300, + .num = 0x08, + .size = 0x10, +}; + +static const struct ddb_regset octopus_idma_buf = { + .base = 0x2000, + .num = 0x08, + .size = 0x100, +}; + +static const struct ddb_regset octopus_odma = { + .base = 0x380, + .num = 0x04, + .size = 0x10, +}; + +static const struct ddb_regset octopus_odma_buf = { + .base = 0x2800, + .num = 0x04, + .size = 0x100, +}; + +static const struct ddb_regset octopus_i2c = { + .base = 0x80, + .num = 0x04, + .size = 0x20, +}; + +static const struct ddb_regset octopus_i2c_buf = { + .base = 0x1000, + .num = 0x04, + .size = 0x200, +}; + +/****************************************************************************/ + +static const struct ddb_regmap octopus_map = { + .irq_base_i2c = 0, + .irq_base_idma = 8, + .irq_base_odma = 16, + .i2c = &octopus_i2c, + .i2c_buf = &octopus_i2c_buf, + .idma = &octopus_idma, + .idma_buf = &octopus_idma_buf, + .odma = &octopus_odma, + .odma_buf = &octopus_odma_buf, + .input = &octopus_input, + .output = &octopus_output, +}; + +/****************************************************************************/ + +static const struct ddb_info ddb_none = { + .type = DDB_NONE, + .name = "unknown Digital Devices PCIe card, install newer driver", + .regmap = &octopus_map, +}; + +static const struct ddb_info ddb_octopus = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Octopus DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, +}; + +static const struct ddb_info ddb_octopusv3 = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Octopus V3 DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, +}; + +static const struct ddb_info ddb_octopus_le = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Octopus LE DVB adapter", + .regmap = &octopus_map, + .port_num = 2, + .i2c_mask = 0x03, +}; + +static const struct ddb_info ddb_octopus_oem = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Octopus OEM", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .led_num = 1, + .fan_num = 1, + .temp_num = 1, + .temp_bus = 0, +}; + +static const struct ddb_info ddb_octopus_mini = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Octopus Mini", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, +}; + +static const struct ddb_info ddb_v6 = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Cine S2 V6 DVB adapter", + .regmap = &octopus_map, + .port_num = 3, + .i2c_mask = 0x07, +}; + +static const struct ddb_info ddb_v6_5 = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Cine S2 V6.5 DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, +}; + +static const struct ddb_info ddb_v7 = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Cine S2 V7 DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 2, + .board_control_2 = 4, + .ts_quirks = TS_QUIRK_REVERSED, +}; + +static const struct ddb_info ddb_v7a = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Cine S2 V7 Advanced DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 2, + .board_control_2 = 4, + .ts_quirks = TS_QUIRK_REVERSED, +}; + +static const struct ddb_info ddb_ctv7 = { + .type = DDB_OCTOPUS, + .name = "Digital Devices Cine CT V7 DVB adapter", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 3, + .board_control_2 = 4, +}; + +static const struct ddb_info ddb_satixS2v3 = { + .type = DDB_OCTOPUS, + .name = "Mystique SaTiX-S2 V3 DVB adapter", + .regmap = &octopus_map, + .port_num = 3, + .i2c_mask = 0x07, +}; + +static const struct ddb_info ddb_ci = { + .type = DDB_OCTOPUS_CI, + .name = "Digital Devices Octopus CI", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x03, +}; + +static const struct ddb_info ddb_cis = { + .type = DDB_OCTOPUS_CI, + .name = "Digital Devices Octopus CI single", + .regmap = &octopus_map, + .port_num = 3, + .i2c_mask = 0x03, +}; + +static const struct ddb_info ddb_ci_s2_pro = { + .type = DDB_OCTOPUS_CI, + .name = "Digital Devices Octopus CI S2 Pro", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x01, + .board_control = 2, + .board_control_2 = 4, +}; + +static const struct ddb_info ddb_ci_s2_pro_a = { + .type = DDB_OCTOPUS_CI, + .name = "Digital Devices Octopus CI S2 Pro Advanced", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x01, + .board_control = 2, + .board_control_2 = 4, +}; + +static const struct ddb_info ddb_dvbct = { + .type = DDB_OCTOPUS, + .name = "Digital Devices DVBCT V6.1 DVB adapter", + .regmap = &octopus_map, + .port_num = 3, + .i2c_mask = 0x07, +}; + +/****************************************************************************/ + +static const struct ddb_info ddb_ct2_8 = { + .type = DDB_OCTOPUS_MAX_CT, + .name = "Digital Devices MAX A8 CT2", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 0x0ff, + .board_control_2 = 0xf00, + .ts_quirks = TS_QUIRK_SERIAL, + .tempmon_irq = 24, +}; + +static const struct ddb_info ddb_c2t2_8 = { + .type = DDB_OCTOPUS_MAX_CT, + .name = "Digital Devices MAX A8 C2T2", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 0x0ff, + .board_control_2 = 0xf00, + .ts_quirks = TS_QUIRK_SERIAL, + .tempmon_irq = 24, +}; + +static const struct ddb_info ddb_isdbt_8 = { + .type = DDB_OCTOPUS_MAX_CT, + .name = "Digital Devices MAX A8 ISDBT", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 0x0ff, + .board_control_2 = 0xf00, + .ts_quirks = TS_QUIRK_SERIAL, + .tempmon_irq = 24, +}; + +static const struct ddb_info ddb_c2t2i_v0_8 = { + .type = DDB_OCTOPUS_MAX_CT, + .name = "Digital Devices MAX A8 C2T2I V0", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 0x0ff, + .board_control_2 = 0xf00, + .ts_quirks = TS_QUIRK_SERIAL | TS_QUIRK_ALT_OSC, + .tempmon_irq = 24, +}; + +static const struct ddb_info ddb_c2t2i_8 = { + .type = DDB_OCTOPUS_MAX_CT, + .name = "Digital Devices MAX A8 C2T2I", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x0f, + .board_control = 0x0ff, + .board_control_2 = 0xf00, + .ts_quirks = TS_QUIRK_SERIAL, + .tempmon_irq = 24, +}; + +/****************************************************************************/ + +static const struct ddb_info ddb_s2_48 = { + .type = DDB_OCTOPUS_MAX, + .name = "Digital Devices MAX S8 4/8", + .regmap = &octopus_map, + .port_num = 4, + .i2c_mask = 0x01, + .board_control = 1, + .tempmon_irq = 24, +}; + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +#define DDB_DEVID(_device, _subdevice, _info) { \ + .vendor = DDVID, \ + .device = _device, \ + .subvendor = DDVID, \ + .subdevice = _subdevice, \ + .info = &_info } + +static const struct ddb_device_id ddb_device_ids[] = { + /* PCIe devices */ + DDB_DEVID(0x0002, 0x0001, ddb_octopus), + DDB_DEVID(0x0003, 0x0001, ddb_octopus), + DDB_DEVID(0x0005, 0x0004, ddb_octopusv3), + DDB_DEVID(0x0003, 0x0002, ddb_octopus_le), + DDB_DEVID(0x0003, 0x0003, ddb_octopus_oem), + DDB_DEVID(0x0003, 0x0010, ddb_octopus_mini), + DDB_DEVID(0x0005, 0x0011, ddb_octopus_mini), + DDB_DEVID(0x0003, 0x0020, ddb_v6), + DDB_DEVID(0x0003, 0x0021, ddb_v6_5), + DDB_DEVID(0x0006, 0x0022, ddb_v7), + DDB_DEVID(0x0006, 0x0024, ddb_v7a), + DDB_DEVID(0x0003, 0x0030, ddb_dvbct), + DDB_DEVID(0x0003, 0xdb03, ddb_satixS2v3), + DDB_DEVID(0x0006, 0x0031, ddb_ctv7), + DDB_DEVID(0x0006, 0x0032, ddb_ctv7), + DDB_DEVID(0x0006, 0x0033, ddb_ctv7), + DDB_DEVID(0x0007, 0x0023, ddb_s2_48), + DDB_DEVID(0x0008, 0x0034, ddb_ct2_8), + DDB_DEVID(0x0008, 0x0035, ddb_c2t2_8), + DDB_DEVID(0x0008, 0x0036, ddb_isdbt_8), + DDB_DEVID(0x0008, 0x0037, ddb_c2t2i_v0_8), + DDB_DEVID(0x0008, 0x0038, ddb_c2t2i_8), + DDB_DEVID(0x0006, 0x0039, ddb_ctv7), + DDB_DEVID(0x0011, 0x0040, ddb_ci), + DDB_DEVID(0x0011, 0x0041, ddb_cis), + DDB_DEVID(0x0012, 0x0042, ddb_ci), + DDB_DEVID(0x0013, 0x0043, ddb_ci_s2_pro), + DDB_DEVID(0x0013, 0x0044, ddb_ci_s2_pro_a), +}; + +/****************************************************************************/ + +const struct ddb_info *get_ddb_info(u16 vendor, u16 device, + u16 subvendor, u16 subdevice) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ddb_device_ids); i++) { + const struct ddb_device_id *id = &ddb_device_ids[i]; + + if (vendor == id->vendor && + device == id->device && + subvendor == id->subvendor && + ((subdevice == id->subdevice) || + (id->subdevice == 0xffff))) + return id->info; + } + + return &ddb_none; +} diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.h b/drivers/media/pci/ddbridge/ddbridge-hw.h new file mode 100644 index 000000000000..7c142419419c --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-hw.h @@ -0,0 +1,43 @@ +/* + * ddbridge-hw.h: Digital Devices bridge hardware maps + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DDBRIDGE_HW_H_ +#define _DDBRIDGE_HW_H_ + +#include "ddbridge.h" + +/******************************************************************************/ + +#define DDVID 0xdd01 /* Digital Devices Vendor ID */ + +/******************************************************************************/ + +struct ddb_device_id { + u16 vendor; + u16 device; + u16 subvendor; + u16 subdevice; + const struct ddb_info *info; +}; + +/******************************************************************************/ + +const struct ddb_info *get_ddb_info(u16 vendor, u16 device, + u16 subvendor, u16 subdevice); + +#endif /* _DDBRIDGE_HW_H */ diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.c b/drivers/media/pci/ddbridge/ddbridge-i2c.c new file mode 100644 index 000000000000..e4d39c3270ae --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-i2c.c @@ -0,0 +1,230 @@ +/* + * ddbridge-i2c.c: Digital Devices bridge i2c driver + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ddbridge.h" +#include "ddbridge-i2c.h" +#include "ddbridge-regs.h" +#include "ddbridge-io.h" + +/******************************************************************************/ + +static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) +{ + struct ddb *dev = i2c->dev; + unsigned long stat; + u32 val; + + ddbwritel(dev, (adr << 9) | cmd, i2c->regs + I2C_COMMAND); + stat = wait_for_completion_timeout(&i2c->completion, HZ); + val = ddbreadl(dev, i2c->regs + I2C_COMMAND); + if (stat == 0) { + dev_err(dev->dev, "I2C timeout, card %d, port %d, link %u\n", + dev->nr, i2c->nr, i2c->link); + { + u32 istat = ddbreadl(dev, INTERRUPT_STATUS); + + dev_err(dev->dev, "DDBridge IRS %08x\n", istat); + if (i2c->link) { + u32 listat = ddbreadl(dev, + DDB_LINK_TAG(i2c->link) | + INTERRUPT_STATUS); + + dev_err(dev->dev, "DDBridge link %u IRS %08x\n", + i2c->link, listat); + } + if (istat & 1) { + ddbwritel(dev, istat & 1, INTERRUPT_ACK); + } else { + u32 mon = ddbreadl(dev, + i2c->regs + I2C_MONITOR); + + dev_err(dev->dev, "I2C cmd=%08x mon=%08x\n", + val, mon); + } + } + return -EIO; + } + if (val & 0x70000) + return -EIO; + return 0; +} + +static int ddb_i2c_master_xfer(struct i2c_adapter *adapter, + struct i2c_msg msg[], int num) +{ + struct ddb_i2c *i2c = (struct ddb_i2c *) i2c_get_adapdata(adapter); + struct ddb *dev = i2c->dev; + u8 addr = 0; + + addr = msg[0].addr; + if (msg[0].len > i2c->bsize) + return -EIO; + switch (num) { + case 1: + if (msg[0].flags & I2C_M_RD) { + ddbwritel(dev, msg[0].len << 16, + i2c->regs + I2C_TASKLENGTH); + if (ddb_i2c_cmd(i2c, addr, 3)) + break; + ddbcpyfrom(dev, msg[0].buf, + i2c->rbuf, msg[0].len); + return num; + } + ddbcpyto(dev, i2c->wbuf, msg[0].buf, msg[0].len); + ddbwritel(dev, msg[0].len, i2c->regs + I2C_TASKLENGTH); + if (ddb_i2c_cmd(i2c, addr, 2)) + break; + return num; + case 2: + if ((msg[0].flags & I2C_M_RD) == I2C_M_RD) + break; + if ((msg[1].flags & I2C_M_RD) != I2C_M_RD) + break; + if (msg[1].len > i2c->bsize) + break; + ddbcpyto(dev, i2c->wbuf, msg[0].buf, msg[0].len); + ddbwritel(dev, msg[0].len | (msg[1].len << 16), + i2c->regs + I2C_TASKLENGTH); + if (ddb_i2c_cmd(i2c, addr, 1)) + break; + ddbcpyfrom(dev, msg[1].buf, + i2c->rbuf, + msg[1].len); + return num; + default: + break; + } + return -EIO; +} + +static u32 ddb_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm ddb_i2c_algo = { + .master_xfer = ddb_i2c_master_xfer, + .functionality = ddb_i2c_functionality, +}; + +void ddb_i2c_release(struct ddb *dev) +{ + int i; + struct ddb_i2c *i2c; + + for (i = 0; i < dev->i2c_num; i++) { + i2c = &dev->i2c[i]; + i2c_del_adapter(&i2c->adap); + } +} + +static void i2c_handler(unsigned long priv) +{ + struct ddb_i2c *i2c = (struct ddb_i2c *) priv; + + complete(&i2c->completion); +} + +static int ddb_i2c_add(struct ddb *dev, struct ddb_i2c *i2c, + const struct ddb_regmap *regmap, int link, + int i, int num) +{ + struct i2c_adapter *adap; + + i2c->nr = i; + i2c->dev = dev; + i2c->link = link; + i2c->bsize = regmap->i2c_buf->size; + i2c->wbuf = DDB_LINK_TAG(link) | + (regmap->i2c_buf->base + i2c->bsize * i); + i2c->rbuf = i2c->wbuf; /* + i2c->bsize / 2 */ + i2c->regs = DDB_LINK_TAG(link) | + (regmap->i2c->base + regmap->i2c->size * i); + ddbwritel(dev, I2C_SPEED_100, i2c->regs + I2C_TIMING); + ddbwritel(dev, ((i2c->rbuf & 0xffff) << 16) | (i2c->wbuf & 0xffff), + i2c->regs + I2C_TASKADDRESS); + init_completion(&i2c->completion); + + adap = &i2c->adap; + i2c_set_adapdata(adap, i2c); +#ifdef I2C_ADAP_CLASS_TV_DIGITAL + adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG; +#else +#ifdef I2C_CLASS_TV_ANALOG + adap->class = I2C_CLASS_TV_ANALOG; +#endif +#endif + snprintf(adap->name, I2C_NAME_SIZE, "ddbridge_%02x.%x.%x", + dev->nr, i2c->link, i); + adap->algo = &ddb_i2c_algo; + adap->algo_data = (void *)i2c; + adap->dev.parent = dev->dev; + return i2c_add_adapter(adap); +} + +int ddb_i2c_init(struct ddb *dev) +{ + int stat = 0; + u32 i, j, num = 0, l, base; + struct ddb_i2c *i2c; + struct i2c_adapter *adap; + const struct ddb_regmap *regmap; + + for (l = 0; l < DDB_MAX_LINK; l++) { + if (!dev->link[l].info) + continue; + regmap = dev->link[l].info->regmap; + if (!regmap || !regmap->i2c) + continue; + base = regmap->irq_base_i2c; + for (i = 0; i < regmap->i2c->num; i++) { + if (!(dev->link[l].info->i2c_mask & (1 << i))) + continue; + i2c = &dev->i2c[num]; + dev->handler_data[l][i + base] = (unsigned long) i2c; + dev->handler[l][i + base] = i2c_handler; + stat = ddb_i2c_add(dev, i2c, regmap, l, i, num); + if (stat) + break; + num++; + } + } + if (stat) { + for (j = 0; j < num; j++) { + i2c = &dev->i2c[j]; + adap = &i2c->adap; + i2c_del_adapter(adap); + } + } else + dev->i2c_num = num; + return stat; +} diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.h b/drivers/media/pci/ddbridge/ddbridge-i2c.h new file mode 100644 index 000000000000..7ed220506c05 --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-i2c.h @@ -0,0 +1,112 @@ +/* + * ddbridge-i2c.c: Digital Devices bridge i2c driver + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DDBRIDGE_I2C_H__ +#define __DDBRIDGE_I2C_H__ + +#include + +#include "ddbridge.h" + +/******************************************************************************/ + +void ddb_i2c_release(struct ddb *dev); +int ddb_i2c_init(struct ddb *dev); + +/******************************************************************************/ + +static int __maybe_unused i2c_io(struct i2c_adapter *adapter, u8 adr, + u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) +{ + struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0, + .buf = wbuf, .len = wlen }, + { .addr = adr, .flags = I2C_M_RD, + .buf = rbuf, .len = rlen } }; + + return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; +} + +static int __maybe_unused i2c_write(struct i2c_adapter *adap, u8 adr, + u8 *data, int len) +{ + struct i2c_msg msg = { .addr = adr, .flags = 0, + .buf = data, .len = len }; + + return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1; +} + +static int __maybe_unused i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val) +{ + struct i2c_msg msgs[1] = { { .addr = adr, .flags = I2C_M_RD, + .buf = val, .len = 1 } }; + + return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1; +} + +static int __maybe_unused i2c_read_regs(struct i2c_adapter *adapter, + u8 adr, u8 reg, u8 *val, u8 len) +{ + struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0, + .buf = ®, .len = 1 }, + { .addr = adr, .flags = I2C_M_RD, + .buf = val, .len = len } }; + + return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; +} + +static int __maybe_unused i2c_read_regs16(struct i2c_adapter *adapter, + u8 adr, u16 reg, u8 *val, u8 len) +{ + u8 msg[2] = { reg >> 8, reg & 0xff }; + struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0, + .buf = msg, .len = 2 }, + { .addr = adr, .flags = I2C_M_RD, + .buf = val, .len = len } }; + + return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; +} + +static int __maybe_unused i2c_write_reg16(struct i2c_adapter *adap, + u8 adr, u16 reg, u8 val) +{ + u8 msg[3] = { reg >> 8, reg & 0xff, val }; + + return i2c_write(adap, adr, msg, 3); +} + +static int __maybe_unused i2c_write_reg(struct i2c_adapter *adap, + u8 adr, u8 reg, u8 val) +{ + u8 msg[2] = { reg, val }; + + return i2c_write(adap, adr, msg, 2); +} + +static int __maybe_unused i2c_read_reg16(struct i2c_adapter *adapter, + u8 adr, u16 reg, u8 *val) +{ + return i2c_read_regs16(adapter, adr, reg, val, 1); +} + +static int __maybe_unused i2c_read_reg(struct i2c_adapter *adapter, + u8 adr, u8 reg, u8 *val) +{ + return i2c_read_regs(adapter, adr, reg, val, 1); +} + +#endif /* __DDBRIDGE_I2C_H__ */ diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h new file mode 100644 index 000000000000..a4c6bbe09168 --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-io.h @@ -0,0 +1,71 @@ +/* + * ddbridge-io.h: Digital Devices bridge I/O inline functions + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DDBRIDGE_IO_H__ +#define __DDBRIDGE_IO_H__ + +#include + +#include "ddbridge.h" + +/******************************************************************************/ + +static inline u32 ddblreadl(struct ddb_link *link, u32 adr) +{ + return readl(link->dev->regs + adr); +} + +static inline void ddblwritel(struct ddb_link *link, u32 val, u32 adr) +{ + writel(val, link->dev->regs + adr); +} + +static inline u32 ddbreadl(struct ddb *dev, u32 adr) +{ + return readl(dev->regs + adr); +} + +static inline void ddbwritel(struct ddb *dev, u32 val, u32 adr) +{ + writel(val, dev->regs + adr); +} + +static inline void ddbcpyto(struct ddb *dev, u32 adr, void *src, long count) +{ + return memcpy_toio(dev->regs + adr, src, count); +} + +static inline void ddbcpyfrom(struct ddb *dev, void *dst, u32 adr, long count) +{ + return memcpy_fromio(dst, dev->regs + adr, count); +} + +static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr) +{ + u32 val = ddbreadl(dev, adr); + + /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */ + if (val == ~0) { + dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr); + return 0; + } + + return val; +} + +#endif /* __DDBRIDGE_IO_H__ */ diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c new file mode 100644 index 000000000000..ccac7fe31336 --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-main.c @@ -0,0 +1,346 @@ +/* + * ddbridge.c: Digital Devices PCIe bridge driver + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ddbridge.h" +#include "ddbridge-i2c.h" +#include "ddbridge-regs.h" +#include "ddbridge-hw.h" +#include "ddbridge-io.h" + +/****************************************************************************/ +/* module parameters */ + +#ifdef CONFIG_PCI_MSI +#ifdef CONFIG_DVB_DDBRIDGE_MSIENABLE +static int msi = 1; +#else +static int msi; +#endif +module_param(msi, int, 0444); +#ifdef CONFIG_DVB_DDBRIDGE_MSIENABLE +MODULE_PARM_DESC(msi, "Control MSI interrupts: 0-disable, 1-enable (default)"); +#else +MODULE_PARM_DESC(msi, "Control MSI interrupts: 0-disable (default), 1-enable"); +#endif +#endif + +int ci_bitrate = 70000; +module_param(ci_bitrate, int, 0444); +MODULE_PARM_DESC(ci_bitrate, " Bitrate in KHz for output to CI."); + +int ts_loop = -1; +module_param(ts_loop, int, 0444); +MODULE_PARM_DESC(ts_loop, "TS in/out test loop on port ts_loop"); + +int xo2_speed = 2; +module_param(xo2_speed, int, 0444); +MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards"); + +#ifdef __arm__ +int alt_dma = 1; +#else +int alt_dma; +#endif +module_param(alt_dma, int, 0444); +MODULE_PARM_DESC(alt_dma, "use alternative DMA buffer handling"); + +int no_init; +module_param(no_init, int, 0444); +MODULE_PARM_DESC(no_init, "do not initialize most devices"); + +int stv0910_single; +module_param(stv0910_single, int, 0444); +MODULE_PARM_DESC(stv0910_single, "use stv0910 cards as single demods"); + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +static void ddb_irq_disable(struct ddb *dev) +{ + ddbwritel(dev, 0, INTERRUPT_ENABLE); + ddbwritel(dev, 0, MSI1_ENABLE); +} + +static void ddb_irq_exit(struct ddb *dev) +{ + ddb_irq_disable(dev); + if (dev->msi == 2) + free_irq(dev->pdev->irq + 1, dev); + free_irq(dev->pdev->irq, dev); +#ifdef CONFIG_PCI_MSI + if (dev->msi) + pci_disable_msi(dev->pdev); +#endif +} + +static void ddb_remove(struct pci_dev *pdev) +{ + struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev); + + ddb_device_destroy(dev); + ddb_ports_detach(dev); + ddb_i2c_release(dev); + + ddb_irq_exit(dev); + ddb_ports_release(dev); + ddb_buffers_free(dev); + + ddb_unmap(dev); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PCI_MSI +static void ddb_irq_msi(struct ddb *dev, int nr) +{ + int stat; + + if (msi && pci_msi_enabled()) { + stat = pci_alloc_irq_vectors(dev->pdev, 1, nr, PCI_IRQ_MSI); + if (stat >= 1) { + dev->msi = stat; + dev_info(dev->dev, "using %d MSI interrupt(s)\n", + dev->msi); + } else + dev_info(dev->dev, "MSI not available.\n"); + } +} +#endif + +static int ddb_irq_init(struct ddb *dev) +{ + int stat; + int irq_flag = IRQF_SHARED; + + ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE); + ddbwritel(dev, 0x00000000, MSI1_ENABLE); + ddbwritel(dev, 0x00000000, MSI2_ENABLE); + ddbwritel(dev, 0x00000000, MSI3_ENABLE); + ddbwritel(dev, 0x00000000, MSI4_ENABLE); + ddbwritel(dev, 0x00000000, MSI5_ENABLE); + ddbwritel(dev, 0x00000000, MSI6_ENABLE); + ddbwritel(dev, 0x00000000, MSI7_ENABLE); + +#ifdef CONFIG_PCI_MSI + ddb_irq_msi(dev, 2); + + if (dev->msi) + irq_flag = 0; + if (dev->msi == 2) { + stat = request_irq(dev->pdev->irq, ddb_irq_handler0, + irq_flag, "ddbridge", (void *) dev); + if (stat < 0) + return stat; + stat = request_irq(dev->pdev->irq + 1, ddb_irq_handler1, + irq_flag, "ddbridge", (void *) dev); + if (stat < 0) { + free_irq(dev->pdev->irq, dev); + return stat; + } + } else +#endif + { + stat = request_irq(dev->pdev->irq, ddb_irq_handler, + irq_flag, "ddbridge", (void *) dev); + if (stat < 0) + return stat; + } + if (dev->msi == 2) { + ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE); + ddbwritel(dev, 0x0000000f, MSI1_ENABLE); + } else { + ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE); + ddbwritel(dev, 0x00000000, MSI1_ENABLE); + } + return stat; +} + +static int ddb_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ddb *dev; + int stat = 0; + + if (pci_enable_device(pdev) < 0) + return -ENODEV; + + pci_set_master(pdev); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + return -ENODEV; + + dev = vzalloc(sizeof(struct ddb)); + if (dev == NULL) + return -ENOMEM; + + mutex_init(&dev->mutex); + dev->has_dma = 1; + dev->pdev = pdev; + dev->dev = &pdev->dev; + pci_set_drvdata(pdev, dev); + + dev->link[0].ids.vendor = id->vendor; + dev->link[0].ids.device = id->device; + dev->link[0].ids.subvendor = id->subvendor; + dev->link[0].ids.subdevice = pdev->subsystem_device; + + dev->link[0].dev = dev; + dev->link[0].info = get_ddb_info(id->vendor, id->device, + id->subvendor, pdev->subsystem_device); + + dev_info(&pdev->dev, "detected %s\n", dev->link[0].info->name); + + dev->regs_len = pci_resource_len(dev->pdev, 0); + dev->regs = ioremap(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + + if (!dev->regs) { + dev_err(&pdev->dev, "not enough memory for register map\n"); + stat = -ENOMEM; + goto fail; + } + if (ddbreadl(dev, 0) == 0xffffffff) { + dev_err(&pdev->dev, "cannot read registers\n"); + stat = -ENODEV; + goto fail; + } + + dev->link[0].ids.hwid = ddbreadl(dev, 0); + dev->link[0].ids.regmapid = ddbreadl(dev, 4); + + dev_info(&pdev->dev, "HW %08x REGMAP %08x\n", + dev->link[0].ids.hwid, dev->link[0].ids.regmapid); + + ddbwritel(dev, 0, DMA_BASE_READ); + ddbwritel(dev, 0, DMA_BASE_WRITE); + + stat = ddb_irq_init(dev); + if (stat < 0) + goto fail0; + + if (ddb_init(dev) == 0) + return 0; + + ddb_irq_exit(dev); +fail0: + dev_err(&pdev->dev, "fail0\n"); + if (dev->msi) + pci_disable_msi(dev->pdev); +fail: + dev_err(&pdev->dev, "fail\n"); + + ddb_unmap(dev); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return -1; +} + +/****************************************************************************/ +/****************************************************************************/ +/****************************************************************************/ + +#define DDB_DEVICE_ANY(_device) \ + { PCI_DEVICE_SUB(DDVID, _device, DDVID, PCI_ANY_ID) } + +static const struct pci_device_id ddb_id_table[] = { + DDB_DEVICE_ANY(0x0002), + DDB_DEVICE_ANY(0x0003), + DDB_DEVICE_ANY(0x0005), + DDB_DEVICE_ANY(0x0006), + DDB_DEVICE_ANY(0x0007), + DDB_DEVICE_ANY(0x0008), + DDB_DEVICE_ANY(0x0011), + DDB_DEVICE_ANY(0x0012), + DDB_DEVICE_ANY(0x0013), + DDB_DEVICE_ANY(0x0201), + DDB_DEVICE_ANY(0x0203), + DDB_DEVICE_ANY(0x0210), + DDB_DEVICE_ANY(0x0220), + DDB_DEVICE_ANY(0x0320), + DDB_DEVICE_ANY(0x0321), + DDB_DEVICE_ANY(0x0322), + DDB_DEVICE_ANY(0x0323), + DDB_DEVICE_ANY(0x0328), + DDB_DEVICE_ANY(0x0329), + {0} +}; + +MODULE_DEVICE_TABLE(pci, ddb_id_table); + +static struct pci_driver ddb_pci_driver = { + .name = "ddbridge", + .id_table = ddb_id_table, + .probe = ddb_probe, + .remove = ddb_remove, +}; + +static __init int module_init_ddbridge(void) +{ + int stat = -1; + + pr_info("Digital Devices PCIE bridge driver " + DDBRIDGE_VERSION + ", Copyright (C) 2010-17 Digital Devices GmbH\n"); + if (ddb_class_create() < 0) + return -1; + ddb_wq = create_workqueue("ddbridge"); + if (ddb_wq == NULL) + goto exit1; + stat = pci_register_driver(&ddb_pci_driver); + if (stat < 0) + goto exit2; + return stat; +exit2: + destroy_workqueue(ddb_wq); +exit1: + ddb_class_destroy(); + return stat; +} + +static __exit void module_exit_ddbridge(void) +{ + pci_unregister_driver(&ddb_pci_driver); + destroy_workqueue(ddb_wq); + ddb_class_destroy(); +} + +module_init(module_init_ddbridge); +module_exit(module_exit_ddbridge); + +MODULE_DESCRIPTION("Digital Devices PCIe Bridge"); +MODULE_AUTHOR("Ralph and Marcus Metzler, Metzler Brothers Systementwicklung GbR"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DDBRIDGE_VERSION); diff --git a/drivers/media/pci/ddbridge/ddbridge-maxs8.c b/drivers/media/pci/ddbridge/ddbridge-maxs8.c new file mode 100644 index 000000000000..f8a53bc7c86c --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-maxs8.c @@ -0,0 +1,444 @@ +/* + * ddbridge-maxs8.c: Digital Devices bridge MaxS4/8 support + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ddbridge.h" +#include "ddbridge-regs.h" +#include "ddbridge-io.h" + +#include "ddbridge-maxs8.h" +#include "mxl5xx.h" + +/******************************************************************************/ + +/* MaxS4/8 related modparams */ +static int fmode; +module_param(fmode, int, 0444); +MODULE_PARM_DESC(fmode, "frontend emulation mode"); + +static int fmode_sat = -1; +module_param(fmode_sat, int, 0444); +MODULE_PARM_DESC(fmode_sat, "set frontend emulation mode sat"); + +static int old_quattro; +module_param(old_quattro, int, 0444); +MODULE_PARM_DESC(old_quattro, "old quattro LNB input order "); + +/******************************************************************************/ + +static int lnb_command(struct ddb *dev, u32 link, u32 lnb, u32 cmd) +{ + u32 c, v = 0, tag = DDB_LINK_TAG(link); + + v = LNB_TONE & (dev->link[link].lnb.tone << (15 - lnb)); + ddbwritel(dev, cmd | v, tag | LNB_CONTROL(lnb)); + for (c = 0; c < 10; c++) { + v = ddbreadl(dev, tag | LNB_CONTROL(lnb)); + if ((v & LNB_BUSY) == 0) + break; + msleep(20); + } + if (c == 10) + dev_info(dev->dev, "%s lnb = %08x cmd = %08x\n", + __func__, lnb, cmd); + return 0; +} + +static int max_send_master_cmd(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *cmd) +{ + struct ddb_input *input = fe->sec_priv; + struct ddb_port *port = input->port; + struct ddb *dev = port->dev; + struct ddb_dvb *dvb = &port->dvb[input->nr & 1]; + u32 tag = DDB_LINK_TAG(port->lnr); + int i; + u32 fmode = dev->link[port->lnr].lnb.fmode; + + if (fmode == 2 || fmode == 1) + return 0; + if (dvb->diseqc_send_master_cmd) + dvb->diseqc_send_master_cmd(fe, cmd); + + mutex_lock(&dev->link[port->lnr].lnb.lock); + ddbwritel(dev, 0, tag | LNB_BUF_LEVEL(dvb->input)); + for (i = 0; i < cmd->msg_len; i++) + ddbwritel(dev, cmd->msg[i], tag | LNB_BUF_WRITE(dvb->input)); + lnb_command(dev, port->lnr, dvb->input, LNB_CMD_DISEQC); + mutex_unlock(&dev->link[port->lnr].lnb.lock); + return 0; +} + +static int lnb_send_diseqc(struct ddb *dev, u32 link, u32 input, + struct dvb_diseqc_master_cmd *cmd) +{ + u32 tag = DDB_LINK_TAG(link); + int i; + + ddbwritel(dev, 0, tag | LNB_BUF_LEVEL(input)); + for (i = 0; i < cmd->msg_len; i++) + ddbwritel(dev, cmd->msg[i], tag | LNB_BUF_WRITE(input)); + lnb_command(dev, link, input, LNB_CMD_DISEQC); + return 0; +} + +static int lnb_set_sat(struct ddb *dev, u32 link, u32 input, u32 sat, u32 band, + u32 hor) +{ + struct dvb_diseqc_master_cmd cmd = { + .msg = {0xe0, 0x10, 0x38, 0xf0, 0x00, 0x00}, + .msg_len = 4 + }; + cmd.msg[3] = 0xf0 | (((sat << 2) & 0x0c) | (band ? 1 : 0) | + (hor ? 2 : 0)); + return lnb_send_diseqc(dev, link, input, &cmd); +} + +static int lnb_set_tone(struct ddb *dev, u32 link, u32 input, + enum fe_sec_tone_mode tone) +{ + int s = 0; + u32 mask = (1ULL << input); + + switch (tone) { + case SEC_TONE_OFF: + if (!(dev->link[link].lnb.tone & mask)) + return 0; + dev->link[link].lnb.tone &= ~(1ULL << input); + break; + case SEC_TONE_ON: + if (dev->link[link].lnb.tone & mask) + return 0; + dev->link[link].lnb.tone |= (1ULL << input); + break; + default: + s = -EINVAL; + break; + } + if (!s) + s = lnb_command(dev, link, input, LNB_CMD_NOP); + return s; +} + +static int lnb_set_voltage(struct ddb *dev, u32 link, u32 input, + enum fe_sec_voltage voltage) +{ + int s = 0; + + if (dev->link[link].lnb.oldvoltage[input] == voltage) + return 0; + switch (voltage) { + case SEC_VOLTAGE_OFF: + if (dev->link[link].lnb.voltage[input]) + return 0; + lnb_command(dev, link, input, LNB_CMD_OFF); + break; + case SEC_VOLTAGE_13: + lnb_command(dev, link, input, LNB_CMD_LOW); + break; + case SEC_VOLTAGE_18: + lnb_command(dev, link, input, LNB_CMD_HIGH); + break; + default: + s = -EINVAL; + break; + } + dev->link[link].lnb.oldvoltage[input] = voltage; + return s; +} + +static int max_set_input_unlocked(struct dvb_frontend *fe, int in) +{ + struct ddb_input *input = fe->sec_priv; + struct ddb_port *port = input->port; + struct ddb *dev = port->dev; + struct ddb_dvb *dvb = &port->dvb[input->nr & 1]; + int res = 0; + + if (in > 3) + return -EINVAL; + if (dvb->input != in) { + u32 bit = (1ULL << input->nr); + u32 obit = + dev->link[port->lnr].lnb.voltage[dvb->input & 3] & bit; + + dev->link[port->lnr].lnb.voltage[dvb->input & 3] &= ~bit; + dvb->input = in; + dev->link[port->lnr].lnb.voltage[dvb->input & 3] |= obit; + } + res = dvb->set_input(fe, in); + return res; +} + +static int max_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) +{ + struct ddb_input *input = fe->sec_priv; + struct ddb_port *port = input->port; + struct ddb *dev = port->dev; + struct ddb_dvb *dvb = &port->dvb[input->nr & 1]; + int tuner = 0; + int res = 0; + u32 fmode = dev->link[port->lnr].lnb.fmode; + + mutex_lock(&dev->link[port->lnr].lnb.lock); + dvb->tone = tone; + switch (fmode) { + default: + case 0: + case 3: + res = lnb_set_tone(dev, port->lnr, dvb->input, tone); + break; + case 1: + case 2: + if (old_quattro) { + if (dvb->tone == SEC_TONE_ON) + tuner |= 2; + if (dvb->voltage == SEC_VOLTAGE_18) + tuner |= 1; + } else { + if (dvb->tone == SEC_TONE_ON) + tuner |= 1; + if (dvb->voltage == SEC_VOLTAGE_18) + tuner |= 2; + } + res = max_set_input_unlocked(fe, tuner); + break; + } + mutex_unlock(&dev->link[port->lnr].lnb.lock); + return res; +} + +static int max_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) +{ + struct ddb_input *input = fe->sec_priv; + struct ddb_port *port = input->port; + struct ddb *dev = port->dev; + struct ddb_dvb *dvb = &port->dvb[input->nr & 1]; + int tuner = 0; + u32 nv, ov = dev->link[port->lnr].lnb.voltages; + int res = 0; + u32 fmode = dev->link[port->lnr].lnb.fmode; + + mutex_lock(&dev->link[port->lnr].lnb.lock); + dvb->voltage = voltage; + + switch (fmode) { + case 3: + default: + case 0: + if (fmode == 3) + max_set_input_unlocked(fe, 0); + if (voltage == SEC_VOLTAGE_OFF) + dev->link[port->lnr].lnb.voltage[dvb->input] &= + ~(1ULL << input->nr); + else + dev->link[port->lnr].lnb.voltage[dvb->input] |= + (1ULL << input->nr); + + res = lnb_set_voltage(dev, port->lnr, dvb->input, voltage); + break; + case 1: + case 2: + if (voltage == SEC_VOLTAGE_OFF) + dev->link[port->lnr].lnb.voltages &= + ~(1ULL << input->nr); + else + dev->link[port->lnr].lnb.voltages |= + (1ULL << input->nr); + + nv = dev->link[port->lnr].lnb.voltages; + + if (old_quattro) { + if (dvb->tone == SEC_TONE_ON) + tuner |= 2; + if (dvb->voltage == SEC_VOLTAGE_18) + tuner |= 1; + } else { + if (dvb->tone == SEC_TONE_ON) + tuner |= 1; + if (dvb->voltage == SEC_VOLTAGE_18) + tuner |= 2; + } + res = max_set_input_unlocked(fe, tuner); + + if (nv != ov) { + if (nv) { + lnb_set_voltage(dev, + port->lnr, 0, SEC_VOLTAGE_13); + if (fmode == 1) { + lnb_set_voltage(dev, port->lnr, + 0, SEC_VOLTAGE_13); + if (old_quattro) { + lnb_set_voltage(dev, port->lnr, + 1, SEC_VOLTAGE_18); + lnb_set_voltage(dev, port->lnr, + 2, SEC_VOLTAGE_13); + } else { + lnb_set_voltage(dev, port->lnr, + 1, SEC_VOLTAGE_13); + lnb_set_voltage(dev, port->lnr, + 2, SEC_VOLTAGE_18); + } + lnb_set_voltage(dev, port->lnr, + 3, SEC_VOLTAGE_18); + } + } else { + lnb_set_voltage(dev, port->lnr, + 0, SEC_VOLTAGE_OFF); + if (fmode == 1) { + lnb_set_voltage(dev, port->lnr, + 1, SEC_VOLTAGE_OFF); + lnb_set_voltage(dev, port->lnr, + 2, SEC_VOLTAGE_OFF); + lnb_set_voltage(dev, port->lnr, + 3, SEC_VOLTAGE_OFF); + } + } + } + break; + } + mutex_unlock(&dev->link[port->lnr].lnb.lock); + return res; +} + +static int max_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) +{ + + return 0; +} + +static int max_send_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst) +{ + return 0; +} + +static int mxl_fw_read(void *priv, u8 *buf, u32 len) +{ + struct ddb_link *link = priv; + struct ddb *dev = link->dev; + + dev_info(dev->dev, "Read mxl_fw from link %u\n", link->nr); + + return ddbridge_flashread(dev, link->nr, buf, 0xc0000, len); +} + +int lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm) +{ + u32 l = link->nr; + + if (link->lnb.fmode == fm) + return 0; + dev_info(dev->dev, "Set fmode link %u = %u\n", l, fm); + mutex_lock(&link->lnb.lock); + if (fm == 2 || fm == 1) { + if (fmode_sat >= 0) { + lnb_set_sat(dev, l, 0, fmode_sat, 0, 0); + if (old_quattro) { + lnb_set_sat(dev, l, 1, fmode_sat, 0, 1); + lnb_set_sat(dev, l, 2, fmode_sat, 1, 0); + } else { + lnb_set_sat(dev, l, 1, fmode_sat, 1, 0); + lnb_set_sat(dev, l, 2, fmode_sat, 0, 1); + } + lnb_set_sat(dev, l, 3, fmode_sat, 1, 1); + } + lnb_set_tone(dev, l, 0, SEC_TONE_OFF); + if (old_quattro) { + lnb_set_tone(dev, l, 1, SEC_TONE_OFF); + lnb_set_tone(dev, l, 2, SEC_TONE_ON); + } else { + lnb_set_tone(dev, l, 1, SEC_TONE_ON); + lnb_set_tone(dev, l, 2, SEC_TONE_OFF); + } + lnb_set_tone(dev, l, 3, SEC_TONE_ON); + } + link->lnb.fmode = fm; + mutex_unlock(&link->lnb.lock); + return 0; +} + +static struct mxl5xx_cfg mxl5xx = { + .adr = 0x60, + .type = 0x01, + .clk = 27000000, + .ts_clk = 139, + .cap = 12, + .fw_read = mxl_fw_read, +}; + +int fe_attach_mxl5xx(struct ddb_input *input) +{ + struct ddb *dev = input->port->dev; + struct i2c_adapter *i2c = &input->port->i2c->adap; + struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1]; + struct ddb_port *port = input->port; + struct ddb_link *link = &dev->link[port->lnr]; + struct mxl5xx_cfg cfg; + int demod, tuner; + + cfg = mxl5xx; + cfg.fw_priv = link; + dvb->set_input = NULL; + + demod = input->nr; + tuner = demod & 3; + if (fmode == 3) + tuner = 0; + + dvb->fe = dvb_attach(mxl5xx_attach, i2c, &cfg, + demod, tuner, &dvb->set_input); + + if (!dvb->fe) { + dev_err(dev->dev, "No MXL5XX found!\n"); + return -ENODEV; + } + + if (!dvb->set_input) { + dev_err(dev->dev, "No mxl5xx_set_input function pointer!\n"); + return -ENODEV; + } + + if (input->nr < 4) { + lnb_command(dev, port->lnr, input->nr, LNB_CMD_INIT); + lnb_set_voltage(dev, port->lnr, input->nr, SEC_VOLTAGE_OFF); + } + lnb_init_fmode(dev, link, fmode); + + dvb->fe->ops.set_voltage = max_set_voltage; + dvb->fe->ops.enable_high_lnb_voltage = max_enable_high_lnb_voltage; + dvb->fe->ops.set_tone = max_set_tone; + dvb->diseqc_send_master_cmd = dvb->fe->ops.diseqc_send_master_cmd; + dvb->fe->ops.diseqc_send_master_cmd = max_send_master_cmd; + dvb->fe->ops.diseqc_send_burst = max_send_burst; + dvb->fe->sec_priv = input; + dvb->input = tuner; + return 0; +} diff --git a/drivers/media/pci/ddbridge/ddbridge-maxs8.h b/drivers/media/pci/ddbridge/ddbridge-maxs8.h new file mode 100644 index 000000000000..bb8884811a46 --- /dev/null +++ b/drivers/media/pci/ddbridge/ddbridge-maxs8.h @@ -0,0 +1,29 @@ +/* + * ddbridge-maxs8.h: Digital Devices bridge MaxS4/8 support + * + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler + * Marcus Metzler + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 only, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DDBRIDGE_MAXS8_H_ +#define _DDBRIDGE_MAXS8_H_ + +#include "ddbridge.h" + +/******************************************************************************/ + +int lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm); +int fe_attach_mxl5xx(struct ddb_input *input); + +#endif /* _DDBRIDGE_MAXS8_H */ diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h index 98cebb97d64f..9d44f8d3af75 100644 --- a/drivers/media/pci/ddbridge/ddbridge-regs.h +++ b/drivers/media/pci/ddbridge/ddbridge-regs.h @@ -1,7 +1,7 @@ /* * ddbridge-regs.h: Digital Devices PCIe bridge driver * - * Copyright (C) 2010-2011 Digital Devices GmbH + * Copyright (C) 2010-2017 Digital Devices GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -17,35 +17,41 @@ * http://www.gnu.org/copyleft/gpl.html */ -/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */ - -/* Register Definitions */ - -#define CUR_REGISTERMAP_VERSION 0x10000 - -#define HARDWARE_VERSION 0x00 -#define REGISTERMAP_VERSION 0x04 - /* ------------------------------------------------------------------------- */ /* SPI Controller */ #define SPI_CONTROL 0x10 #define SPI_DATA 0x14 +/* ------------------------------------------------------------------------- */ +/* GPIO */ + +#define GPIO_OUTPUT 0x20 +#define GPIO_INPUT 0x24 +#define GPIO_DIRECTION 0x28 + +/* ------------------------------------------------------------------------- */ +/* MDIO */ + +#define MDIO_CTRL 0x20 +#define MDIO_ADR 0x24 +#define MDIO_REG 0x28 +#define MDIO_VAL 0x2C + /* ------------------------------------------------------------------------- */ #define BOARD_CONTROL 0x30 /* ------------------------------------------------------------------------- */ -/* Interrupt controller */ -/* How many MSI's are available depends on HW (Min 2 max 8) */ -/* How many are usable also depends on Host platform */ +/* Interrupt controller + * How many MSI's are available depends on HW (Min 2 max 8) + * How many are usable also depends on Host platform + */ #define INTERRUPT_BASE (0x40) #define INTERRUPT_ENABLE (INTERRUPT_BASE + 0x00) -#define MSI0_ENABLE (INTERRUPT_BASE + 0x00) #define MSI1_ENABLE (INTERRUPT_BASE + 0x04) #define MSI2_ENABLE (INTERRUPT_BASE + 0x08) #define MSI3_ENABLE (INTERRUPT_BASE + 0x0C) @@ -57,59 +63,31 @@ #define INTERRUPT_STATUS (INTERRUPT_BASE + 0x20) #define INTERRUPT_ACK (INTERRUPT_BASE + 0x20) -#define INTMASK_I2C1 (0x00000001) -#define INTMASK_I2C2 (0x00000002) -#define INTMASK_I2C3 (0x00000004) -#define INTMASK_I2C4 (0x00000008) +/* Temperature Monitor ( 2x LM75A @ 0x90,0x92 I2c ) */ +#define TEMPMON_BASE (0x1c0) +#define TEMPMON_CONTROL (TEMPMON_BASE + 0x00) -#define INTMASK_CIRQ1 (0x00000010) -#define INTMASK_CIRQ2 (0x00000020) -#define INTMASK_CIRQ3 (0x00000040) -#define INTMASK_CIRQ4 (0x00000080) +#define TEMPMON_CONTROL_AUTOSCAN (0x00000002) +#define TEMPMON_CONTROL_INTENABLE (0x00000004) +#define TEMPMON_CONTROL_OVERTEMP (0x00008000) -#define INTMASK_TSINPUT1 (0x00000100) -#define INTMASK_TSINPUT2 (0x00000200) -#define INTMASK_TSINPUT3 (0x00000400) -#define INTMASK_TSINPUT4 (0x00000800) -#define INTMASK_TSINPUT5 (0x00001000) -#define INTMASK_TSINPUT6 (0x00002000) -#define INTMASK_TSINPUT7 (0x00004000) -#define INTMASK_TSINPUT8 (0x00008000) +/* SHORT Temperature in Celsius x 256 */ +#define TEMPMON_SENSOR0 (TEMPMON_BASE + 0x04) +#define TEMPMON_SENSOR1 (TEMPMON_BASE + 0x08) -#define INTMASK_TSOUTPUT1 (0x00010000) -#define INTMASK_TSOUTPUT2 (0x00020000) -#define INTMASK_TSOUTPUT3 (0x00040000) -#define INTMASK_TSOUTPUT4 (0x00080000) +#define TEMPMON_FANCONTROL (TEMPMON_BASE + 0x10) /* ------------------------------------------------------------------------- */ /* I2C Master Controller */ -#define I2C_BASE (0x80) /* Byte offset */ - #define I2C_COMMAND (0x00) #define I2C_TIMING (0x04) #define I2C_TASKLENGTH (0x08) /* High read, low write */ #define I2C_TASKADDRESS (0x0C) /* High read, low write */ - #define I2C_MONITOR (0x1C) -#define I2C_BASE_1 (I2C_BASE + 0x00) -#define I2C_BASE_2 (I2C_BASE + 0x20) -#define I2C_BASE_3 (I2C_BASE + 0x40) -#define I2C_BASE_4 (I2C_BASE + 0x60) - -#define I2C_BASE_N(i) (I2C_BASE + (i) * 0x20) - -#define I2C_TASKMEM_BASE (0x1000) /* Byte offset */ -#define I2C_TASKMEM_SIZE (0x1000) - #define I2C_SPEED_400 (0x04030404) -#define I2C_SPEED_200 (0x09080909) -#define I2C_SPEED_154 (0x0C0B0C0C) #define I2C_SPEED_100 (0x13121313) -#define I2C_SPEED_77 (0x19181919) -#define I2C_SPEED_50 (0x27262727) - /* ------------------------------------------------------------------------- */ /* DMA Controller */ @@ -117,35 +95,62 @@ #define DMA_BASE_WRITE (0x100) #define DMA_BASE_READ (0x140) -#define DMA_CONTROL (0x00) /* 64 */ -#define DMA_ERROR (0x04) /* 65 ( only read instance ) */ - -#define DMA_DIAG_CONTROL (0x1C) /* 71 */ -#define DMA_DIAG_PACKETCOUNTER_LOW (0x20) /* 72 */ -#define DMA_DIAG_PACKETCOUNTER_HIGH (0x24) /* 73 */ -#define DMA_DIAG_TIMECOUNTER_LOW (0x28) /* 74 */ -#define DMA_DIAG_TIMECOUNTER_HIGH (0x2C) /* 75 */ -#define DMA_DIAG_RECHECKCOUNTER (0x30) /* 76 ( Split completions on read ) */ -#define DMA_DIAG_WAITTIMEOUTINIT (0x34) /* 77 */ -#define DMA_DIAG_WAITOVERFLOWCOUNTER (0x38) /* 78 */ -#define DMA_DIAG_WAITCOUNTER (0x3C) /* 79 */ +#define TS_CONTROL(_io) (_io->regs + 0x00) +#define TS_CONTROL2(_io) (_io->regs + 0x04) /* ------------------------------------------------------------------------- */ /* DMA Buffer */ -#define TS_INPUT_BASE (0x200) -#define TS_INPUT_CONTROL(i) (TS_INPUT_BASE + (i) * 16 + 0x00) +#define DMA_BUFFER_CONTROL(_dma) (_dma->regs + 0x00) +#define DMA_BUFFER_ACK(_dma) (_dma->regs + 0x04) +#define DMA_BUFFER_CURRENT(_dma) (_dma->regs + 0x08) +#define DMA_BUFFER_SIZE(_dma) (_dma->regs + 0x0c) -#define TS_OUTPUT_BASE (0x280) -#define TS_OUTPUT_CONTROL(i) (TS_OUTPUT_BASE + (i) * 16 + 0x00) +/* ------------------------------------------------------------------------- */ +/* CI Interface (only CI-Bridge) */ -#define DMA_BUFFER_BASE (0x300) +#define CI_BASE (0x400) +#define CI_CONTROL(i) (CI_BASE + (i) * 32 + 0x00) -#define DMA_BUFFER_CONTROL(i) (DMA_BUFFER_BASE + (i) * 16 + 0x00) -#define DMA_BUFFER_ACK(i) (DMA_BUFFER_BASE + (i) * 16 + 0x04) -#define DMA_BUFFER_CURRENT(i) (DMA_BUFFER_BASE + (i) * 16 + 0x08) -#define DMA_BUFFER_SIZE(i) (DMA_BUFFER_BASE + (i) * 16 + 0x0c) +#define CI_DO_ATTRIBUTE_RW(i) (CI_BASE + (i) * 32 + 0x04) +#define CI_DO_IO_RW(i) (CI_BASE + (i) * 32 + 0x08) +#define CI_READDATA(i) (CI_BASE + (i) * 32 + 0x0c) +#define CI_DO_READ_ATTRIBUTES(i) (CI_BASE + (i) * 32 + 0x10) -#define DMA_BASE_ADDRESS_TABLE (0x2000) -#define DMA_BASE_ADDRESS_TABLE_ENTRIES (512) +#define CI_RESET_CAM (0x00000001) +#define CI_POWER_ON (0x00000002) +#define CI_ENABLE (0x00000004) +#define CI_BYPASS_DISABLE (0x00000010) + +#define CI_CAM_READY (0x00010000) +#define CI_CAM_DETECT (0x00020000) +#define CI_READY (0x80000000) + +#define CI_READ_CMD (0x40000000) +#define CI_WRITE_CMD (0x80000000) + +#define CI_BUFFER_BASE (0x3000) +#define CI_BUFFER_SIZE (0x0800) + +#define CI_BUFFER(i) (CI_BUFFER_BASE + (i) * CI_BUFFER_SIZE) + +/* ------------------------------------------------------------------------- */ +/* LNB commands (mxl5xx / Max S8) */ + +#define LNB_BASE (0x400) +#define LNB_CONTROL(i) (LNB_BASE + (i) * 0x20 + 0x00) + +#define LNB_CMD (7ULL << 0) +#define LNB_CMD_NOP 0 +#define LNB_CMD_INIT 1 +#define LNB_CMD_LOW 3 +#define LNB_CMD_HIGH 4 +#define LNB_CMD_OFF 5 +#define LNB_CMD_DISEQC 6 + +#define LNB_BUSY (1ULL << 4) +#define LNB_TONE (1ULL << 15) + +#define LNB_BUF_LEVEL(i) (LNB_BASE + (i) * 0x20 + 0x10) +#define LNB_BUF_WRITE(i) (LNB_BASE + (i) * 0x20 + 0x14) diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h index 4a0e3283d646..e9afa96bd9df 100644 --- a/drivers/media/pci/ddbridge/ddbridge.h +++ b/drivers/media/pci/ddbridge/ddbridge.h @@ -1,7 +1,8 @@ /* * ddbridge.h: Digital Devices PCIe bridge driver * - * Copyright (C) 2010-2011 Digital Devices GmbH + * Copyright (C) 2010-2017 Digital Devices GmbH + * Ralph Metzler * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -20,15 +21,39 @@ #ifndef _DDBRIDGE_H_ #define _DDBRIDGE_H_ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include #include #include -#include #include #include -#include +#include +#include +#include + #include #include +#include +#include #include "dmxdev.h" #include "dvbdev.h" @@ -37,69 +62,122 @@ #include "dvb_ringbuffer.h" #include "dvb_ca_en50221.h" #include "dvb_net.h" -#include "cxd2099.h" -#define DDB_MAX_I2C 4 -#define DDB_MAX_PORT 4 -#define DDB_MAX_INPUT 8 -#define DDB_MAX_OUTPUT 4 +#define DDBRIDGE_VERSION "0.9.31intermediate-integrated" + +#define DDB_MAX_I2C 32 +#define DDB_MAX_PORT 32 +#define DDB_MAX_INPUT 64 +#define DDB_MAX_OUTPUT 32 #define DDB_MAX_LINK 4 #define DDB_LINK_SHIFT 28 #define DDB_LINK_TAG(_x) (_x << DDB_LINK_SHIFT) -#define DDB_XO2_TYPE_NONE 0 -#define DDB_XO2_TYPE_DUOFLEX 1 -#define DDB_XO2_TYPE_CI 2 +struct ddb_regset { + u32 base; + u32 num; + u32 size; +}; + +struct ddb_regmap { + u32 irq_base_i2c; + u32 irq_base_idma; + u32 irq_base_odma; + + const struct ddb_regset *i2c; + const struct ddb_regset *i2c_buf; + const struct ddb_regset *idma; + const struct ddb_regset *idma_buf; + const struct ddb_regset *odma; + const struct ddb_regset *odma_buf; + + const struct ddb_regset *input; + const struct ddb_regset *output; + + const struct ddb_regset *channel; +}; + +struct ddb_ids { + u16 vendor; + u16 device; + u16 subvendor; + u16 subdevice; + + u32 hwid; + u32 regmapid; + u32 devid; + u32 mac; +}; struct ddb_info { int type; -#define DDB_NONE 0 -#define DDB_OCTOPUS 1 -#define DDB_OCTOPUS_MAX_CT 6 +#define DDB_NONE 0 +#define DDB_OCTOPUS 1 +#define DDB_OCTOPUS_CI 2 +#define DDB_OCTOPUS_MAX 5 +#define DDB_OCTOPUS_MAX_CT 6 char *name; - int port_num; - u32 port_type[DDB_MAX_PORT]; + u32 i2c_mask; + u8 port_num; + u8 led_num; + u8 fan_num; + u8 temp_num; + u8 temp_bus; u32 board_control; u32 board_control_2; + u8 mdio_num; + u8 con_clock; /* use a continuous clock */ u8 ts_quirks; #define TS_QUIRK_SERIAL 1 #define TS_QUIRK_REVERSED 2 #define TS_QUIRK_ALT_OSC 8 + u32 tempmon_irq; + const struct ddb_regmap *regmap; }; -/* DMA_SIZE MUST be divisible by 188 and 128 !!! */ +/* DMA_SIZE MUST be smaller than 256k and + * MUST be divisible by 188 and 128 !!! + */ + +#define DMA_MAX_BUFS 32 /* hardware table limit */ -#define INPUT_DMA_MAX_BUFS 32 /* hardware table limit */ #define INPUT_DMA_BUFS 8 #define INPUT_DMA_SIZE (128*47*21) +#define INPUT_DMA_IRQ_DIV 1 -#define OUTPUT_DMA_MAX_BUFS 32 #define OUTPUT_DMA_BUFS 8 #define OUTPUT_DMA_SIZE (128*47*21) +#define OUTPUT_DMA_IRQ_DIV 1 struct ddb; struct ddb_port; -struct ddb_input { - struct ddb_port *port; - u32 nr; - int attached; +struct ddb_dma { + void *io; + u32 regs; + u32 bufregs; - dma_addr_t pbuf[INPUT_DMA_MAX_BUFS]; - u8 *vbuf[INPUT_DMA_MAX_BUFS]; - u32 dma_buf_num; - u32 dma_buf_size; + dma_addr_t pbuf[DMA_MAX_BUFS]; + u8 *vbuf[DMA_MAX_BUFS]; + u32 num; + u32 size; + u32 div; + u32 bufval; - struct tasklet_struct tasklet; + struct work_struct work; spinlock_t lock; wait_queue_head_t wq; int running; u32 stat; + u32 ctrl; u32 cbuf; u32 coff; +}; - struct dvb_adapter adap; +struct ddb_dvb { + struct dvb_adapter *adap; + int adap_registered; struct dvb_device *dev; struct i2c_client *i2c_client[1]; struct dvb_frontend *fe; @@ -110,97 +188,210 @@ struct ddb_input { struct dmx_frontend hw_frontend; struct dmx_frontend mem_frontend; int users; - int (*gate_ctrl)(struct dvb_frontend *, int); + u32 attached; + u8 input; + + enum fe_sec_tone_mode tone; + enum fe_sec_voltage voltage; + + int (*i2c_gate_ctrl)(struct dvb_frontend *, int); + int (*set_voltage)(struct dvb_frontend *fe, + enum fe_sec_voltage voltage); + int (*set_input)(struct dvb_frontend *fe, int input); + int (*diseqc_send_master_cmd)(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *cmd); }; -struct ddb_output { +struct ddb_ci { + struct dvb_ca_en50221 en; struct ddb_port *port; u32 nr; - dma_addr_t pbuf[OUTPUT_DMA_MAX_BUFS]; - u8 *vbuf[OUTPUT_DMA_MAX_BUFS]; - u32 dma_buf_num; - u32 dma_buf_size; - struct tasklet_struct tasklet; - spinlock_t lock; - wait_queue_head_t wq; - int running; - u32 stat; - u32 cbuf; - u32 coff; - - struct dvb_adapter adap; - struct dvb_device *dev; + struct mutex lock; }; +struct ddb_io { + struct ddb_port *port; + u32 nr; + u32 regs; + struct ddb_dma *dma; + struct ddb_io *redo; + struct ddb_io *redi; +}; + +#define ddb_output ddb_io +#define ddb_input ddb_io + struct ddb_i2c { struct ddb *dev; u32 nr; - struct i2c_adapter adap; - struct i2c_adapter adap2; u32 regs; + u32 link; + struct i2c_adapter adap; u32 rbuf; u32 wbuf; - int done; - wait_queue_head_t wq; + u32 bsize; + struct completion completion; }; struct ddb_port { struct ddb *dev; u32 nr; + u32 pnr; + u32 regs; + u32 lnr; struct ddb_i2c *i2c; struct mutex i2c_gate_lock; u32 class; #define DDB_PORT_NONE 0 #define DDB_PORT_CI 1 #define DDB_PORT_TUNER 2 - u32 type; -#define DDB_TUNER_NONE 0 -#define DDB_TUNER_DVBS_ST 1 -#define DDB_TUNER_DVBS_ST_AA 2 -#define DDB_TUNER_DVBCT2_SONY_P 7 -#define DDB_TUNER_DVBC2T2_SONY_P 8 -#define DDB_TUNER_ISDBT_SONY_P 9 -#define DDB_TUNER_DVBC2T2I_SONY_P 15 -#define DDB_TUNER_DVBCT_TR 16 -#define DDB_TUNER_DVBCT_ST 17 -#define DDB_TUNER_XO2_DVBS_STV0910 32 -#define DDB_TUNER_XO2_DVBCT2_SONY 33 -#define DDB_TUNER_XO2_ISDBT_SONY 34 -#define DDB_TUNER_XO2_DVBC2T2_SONY 35 -#define DDB_TUNER_XO2_ATSC_ST 36 -#define DDB_TUNER_XO2_DVBC2T2I_SONY 37 +#define DDB_PORT_LOOP 3 + char *name; + char *type_name; + u32 type; +#define DDB_TUNER_NONE 0 +#define DDB_TUNER_DVBS_ST 1 +#define DDB_TUNER_DVBS_ST_AA 2 +#define DDB_TUNER_DVBCT_TR 3 +#define DDB_TUNER_DVBCT_ST 4 +#define DDB_CI_INTERNAL 5 +#define DDB_CI_EXTERNAL_SONY 6 +#define DDB_TUNER_DVBCT2_SONY_P 7 +#define DDB_TUNER_DVBC2T2_SONY_P 8 +#define DDB_TUNER_ISDBT_SONY_P 9 +#define DDB_TUNER_DVBS_STV0910_P 10 +#define DDB_TUNER_MXL5XX 11 +#define DDB_CI_EXTERNAL_XO2 12 +#define DDB_CI_EXTERNAL_XO2_B 13 +#define DDB_TUNER_DVBS_STV0910_PR 14 +#define DDB_TUNER_DVBC2T2I_SONY_P 15 - u32 adr; +#define DDB_TUNER_XO2 32 +#define DDB_TUNER_DVBS_STV0910 (DDB_TUNER_XO2 + 0) +#define DDB_TUNER_DVBCT2_SONY (DDB_TUNER_XO2 + 1) +#define DDB_TUNER_ISDBT_SONY (DDB_TUNER_XO2 + 2) +#define DDB_TUNER_DVBC2T2_SONY (DDB_TUNER_XO2 + 3) +#define DDB_TUNER_ATSC_ST (DDB_TUNER_XO2 + 4) +#define DDB_TUNER_DVBC2T2I_SONY (DDB_TUNER_XO2 + 5) struct ddb_input *input[2]; struct ddb_output *output; struct dvb_ca_en50221 *en; + struct ddb_dvb dvb[2]; + u32 gap; + u32 obr; + u8 creg; +}; + +#define CM_STARTUP_DELAY 2 +#define CM_AVERAGE 20 +#define CM_GAIN 10 + +#define HW_LSB_SHIFT 12 +#define HW_LSB_MASK 0x1000 + +#define CM_IDLE 0 +#define CM_STARTUP 1 +#define CM_ADJUST 2 + +#define TS_CAPTURE_LEN (4096) + +struct ddb_lnb { + struct mutex lock; + u32 tone; + enum fe_sec_voltage oldvoltage[4]; + u32 voltage[4]; + u32 voltages; + u32 fmode; +}; + +struct ddb_link { + struct ddb *dev; + const struct ddb_info *info; + u32 nr; + u32 regs; + spinlock_t lock; + struct mutex flash_mutex; + struct ddb_lnb lnb; + struct tasklet_struct tasklet; + struct ddb_ids ids; + + spinlock_t temp_lock; + int overtemperature_error; + u8 temp_tab[11]; }; struct ddb { struct pci_dev *pdev; + struct platform_device *pfdev; + struct device *dev; + + int msi; + struct workqueue_struct *wq; + u32 has_dma; + + struct ddb_link link[DDB_MAX_LINK]; unsigned char __iomem *regs; + u32 regs_len; + u32 port_num; struct ddb_port port[DDB_MAX_PORT]; + u32 i2c_num; struct ddb_i2c i2c[DDB_MAX_I2C]; struct ddb_input input[DDB_MAX_INPUT]; struct ddb_output output[DDB_MAX_OUTPUT]; + struct dvb_adapter adap[DDB_MAX_INPUT]; + struct ddb_dma idma[DDB_MAX_INPUT]; + struct ddb_dma odma[DDB_MAX_OUTPUT]; + + void (*handler[4][256])(unsigned long); + unsigned long handler_data[4][256]; struct device *ddb_dev; - int nr; + u32 ddb_dev_users; + u32 nr; u8 iobuf[1028]; - struct ddb_info *info; - int msi; + u8 leds; + u32 ts_irq; + u32 i2c_irq; + + struct mutex mutex; + + u8 tsbuf[TS_CAPTURE_LEN]; }; +/****************************************************************************/ +/****************************************************************************/ /****************************************************************************/ -#define ddbwritel(_val, _adr) writel((_val), \ - dev->regs+(_adr)) -#define ddbreadl(_adr) readl(dev->regs+(_adr)) -#define ddbcpyto(_adr, _src, _count) memcpy_toio(dev->regs+(_adr), (_src), (_count)) -#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), dev->regs+(_adr), (_count)) +int ddbridge_flashread(struct ddb *dev, u32 link, u8 *buf, u32 addr, u32 len); /****************************************************************************/ -#endif +/* ddbridge-main.c (modparams) */ +extern int ci_bitrate; +extern int ts_loop; +extern int xo2_speed; +extern int alt_dma; +extern int no_init; +extern int stv0910_single; +extern struct workqueue_struct *ddb_wq; + +/* ddbridge-core.c */ +void ddb_ports_detach(struct ddb *dev); +void ddb_ports_release(struct ddb *dev); +void ddb_buffers_free(struct ddb *dev); +void ddb_device_destroy(struct ddb *dev); +irqreturn_t ddb_irq_handler0(int irq, void *dev_id); +irqreturn_t ddb_irq_handler1(int irq, void *dev_id); +irqreturn_t ddb_irq_handler(int irq, void *dev_id); +void ddb_ports_init(struct ddb *dev); +int ddb_buffers_alloc(struct ddb *dev); +int ddb_ports_attach(struct ddb *dev); +int ddb_device_create(struct ddb *dev); +int ddb_class_create(void); +void ddb_class_destroy(void); +int ddb_init(struct ddb *dev); +void ddb_unmap(struct ddb *dev); + +#endif /* DDBRIDGE_H */ diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index 1d41934cfaf5..7c3900dec368 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -571,7 +571,7 @@ static u32 functionality(struct i2c_adapter *adap) return I2C_FUNC_I2C; } -static struct i2c_algorithm dm1105_algo = { +static const struct i2c_algorithm dm1105_algo = { .master_xfer = dm1105_i2c_xfer, .functionality = functionality, }; @@ -675,7 +675,7 @@ static void dm1105_emit_key(struct work_struct *work) data = (ircom >> 8) & 0x7f; /* FIXME: UNKNOWN because we don't generate a full NEC scancode (yet?) */ - rc_keydown(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown(ir->dev, RC_PROTO_UNKNOWN, data, 0); } /* work handler */ @@ -748,7 +748,7 @@ static int dm1105_ir_init(struct dm1105_dev *dm1105) dev->driver_name = MODULE_NAME; dev->map_name = RC_MAP_DM1105_NEC; - dev->input_name = "DVB on-card IR receiver"; + dev->device_name = "DVB on-card IR receiver"; dev->input_phys = dm1105->ir.input_phys; dev->input_id.bustype = BUS_PCI; dev->input_id.version = 1; @@ -1208,7 +1208,7 @@ static void dm1105_remove(struct pci_dev *pdev) kfree(dev); } -static struct pci_device_id dm1105_id_table[] = { +static const struct pci_device_id dm1105_id_table[] = { { .vendor = PCI_VENDOR_ID_TRIGEM, .device = PCI_DEVICE_ID_DM1105, diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c index 6a219694b225..1775c36891ae 100644 --- a/drivers/media/pci/dt3155/dt3155.c +++ b/drivers/media/pci/dt3155/dt3155.c @@ -499,7 +499,7 @@ static int dt3155_init_board(struct dt3155_priv *pd) return 0; } -static struct video_device dt3155_vdev = { +static const struct video_device dt3155_vdev = { .name = DT3155_NAME, .fops = &dt3155_fops, .ioctl_ops = &dt3155_ioctl_ops, diff --git a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c index ba372a23eb5c..aee453fcff37 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c @@ -156,7 +156,7 @@ int __init snd_ivtv_mixer_create(struct snd_ivtv_card *itvsc) strlcpy(sc->mixername, "CX2341[56] Mixer", sizeof(sc->mixername)); - ret = snd_ctl_add(sc, snd_ctl_new1(snd_ivtv_mixer_tv_vol, itvsc)); + ret = snd_ctl_add(sc, snd_ctl_new1(&snd_ivtv_mixer_tv_vol, itvsc)); if (ret) { IVTV_ALSA_WARN("%s: failed to add %s control, err %d\n", __func__, snd_ivtv_mixer_tv_vol.name, ret); diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c index 417d03da01f0..5326d86fa375 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c @@ -41,7 +41,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm"); pr_info("ivtv-alsa-pcm %s: " fmt, __func__, ##arg); \ } while (0) -static struct snd_pcm_hardware snd_ivtv_hw_capture = { +static const struct snd_pcm_hardware snd_ivtv_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index e8fa99b6c7b4..54dcac4b2229 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -73,7 +73,7 @@ int (*ivtv_ext_init)(struct ivtv *); EXPORT_SYMBOL(ivtv_ext_init); /* add your revision and whatnot here */ -static struct pci_device_id ivtv_pci_tbl[] = { +static const struct pci_device_id ivtv_pci_tbl[] = { {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16, diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c index dea80efd5836..5a35e366f4c0 100644 --- a/drivers/media/pci/ivtv/ivtv-i2c.c +++ b/drivers/media/pci/ivtv/ivtv-i2c.c @@ -148,7 +148,7 @@ static const char * const hw_devicenames[] = { "ir_video", /* IVTV_HW_I2C_IR_RX_ADAPTEC */ }; -static int get_key_adaptec(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_adaptec(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char keybuf[4]; @@ -168,7 +168,7 @@ static int get_key_adaptec(struct IR_i2c *ir, enum rc_type *protocol, keybuf[2] &= 0x7f; keybuf[3] |= 0x80; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = keybuf[3] | keybuf[2] << 8 | keybuf[1] << 16 |keybuf[0] << 24; *toggle = 0; return 1; @@ -201,22 +201,22 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr) init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS; init_data->internal_get_key_func = IR_KBD_GET_KEY_AVERMEDIA_CARDBUS; - init_data->type = RC_BIT_OTHER; + init_data->type = RC_PROTO_BIT_OTHER; init_data->name = "AVerMedia AVerTV card"; break; case IVTV_HW_I2C_IR_RX_HAUP_EXT: case IVTV_HW_I2C_IR_RX_HAUP_INT: init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP; - init_data->type = RC_BIT_RC5; + init_data->type = RC_PROTO_BIT_RC5; init_data->name = itv->card_name; break; case IVTV_HW_Z8F0811_IR_RX_HAUP: /* Default to grey remote */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; - init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | - RC_BIT_RC6_6A_32; + init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_RC6_6A_32; init_data->name = itv->card_name; break; case IVTV_HW_I2C_IR_RX_ADAPTEC: @@ -224,7 +224,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr) init_data->name = itv->card_name; /* FIXME: The protocol and RC_MAP needs to be corrected */ init_data->ir_codes = RC_MAP_EMPTY; - init_data->type = RC_BIT_UNKNOWN; + init_data->type = RC_PROTO_BIT_UNKNOWN; break; } @@ -632,7 +632,7 @@ static const struct i2c_algorithm ivtv_algo = { }; /* template for our-bit banger */ -static struct i2c_adapter ivtv_i2c_adap_hw_template = { +static const struct i2c_adapter ivtv_i2c_adap_hw_template = { .name = "ivtv i2c driver", .algo = &ivtv_algo, .algo_data = NULL, /* filled from template */ @@ -682,7 +682,7 @@ static int ivtv_getsda_old(void *data) } /* template for i2c-bit-algo */ -static struct i2c_adapter ivtv_i2c_adap_template = { +static const struct i2c_adapter ivtv_i2c_adap_template = { .name = "ivtv i2c driver", .algo = NULL, /* set by i2c-algo-bit */ .algo_data = NULL, /* filled from template */ diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c index 68b5800030b7..11e987860b23 100644 --- a/drivers/media/pci/mantis/hopper_cards.c +++ b/drivers/media/pci/mantis/hopper_cards.c @@ -255,7 +255,7 @@ static void hopper_pci_remove(struct pci_dev *pdev) } -static struct pci_device_id hopper_pci_table[] = { +static const struct pci_device_id hopper_pci_table[] = { MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config, NULL), { } diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c index cdefffc16d9e..adc980d33711 100644 --- a/drivers/media/pci/mantis/mantis_cards.c +++ b/drivers/media/pci/mantis/mantis_cards.c @@ -281,7 +281,7 @@ static void mantis_pci_remove(struct pci_dev *pdev) return; } -static struct pci_device_id mantis_pci_table[] = { +static const struct pci_device_id mantis_pci_table[] = { MAKE_ENTRY(TECHNISAT, CABLESTAR_HD2, &vp2040_config, RC_MAP_TECHNISAT_TS35), MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_10, &vp1041_config, diff --git a/drivers/media/pci/mantis/mantis_common.h b/drivers/media/pci/mantis/mantis_common.h index d48778a366a9..a664c319ef0a 100644 --- a/drivers/media/pci/mantis/mantis_common.h +++ b/drivers/media/pci/mantis/mantis_common.h @@ -176,7 +176,7 @@ struct mantis_pci { struct work_struct uart_work; struct rc_dev *rc; - char input_name[80]; + char device_name[80]; char input_phys[80]; char *rc_map_name; }; diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c index d72ee47dc6e4..496c10dfc4df 100644 --- a/drivers/media/pci/mantis/mantis_i2c.c +++ b/drivers/media/pci/mantis/mantis_i2c.c @@ -212,7 +212,7 @@ static u32 mantis_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm mantis_algo = { +static const struct i2c_algorithm mantis_algo = { .master_xfer = mantis_i2c_xfer, .functionality = mantis_i2c_func, }; diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c index 50d10cb7d49d..7519dcc934dd 100644 --- a/drivers/media/pci/mantis/mantis_input.c +++ b/drivers/media/pci/mantis/mantis_input.c @@ -31,7 +31,7 @@ void mantis_input_process(struct mantis_pci *mantis, int scancode) { if (mantis->rc) - rc_keydown(mantis->rc, RC_TYPE_UNKNOWN, scancode, 0); + rc_keydown(mantis->rc, RC_PROTO_UNKNOWN, scancode, 0); } int mantis_input_init(struct mantis_pci *mantis) @@ -46,12 +46,12 @@ int mantis_input_init(struct mantis_pci *mantis) goto out; } - snprintf(mantis->input_name, sizeof(mantis->input_name), + snprintf(mantis->device_name, sizeof(mantis->device_name), "Mantis %s IR receiver", mantis->hwconfig->model_name); snprintf(mantis->input_phys, sizeof(mantis->input_phys), "pci-%s/ir0", pci_name(mantis->pdev)); - dev->input_name = mantis->input_name; + dev->device_name = mantis->device_name; dev->input_phys = mantis->input_phys; dev->input_id.bustype = BUS_PCI; dev->input_id.vendor = mantis->vendor_id; diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 9c4a024745de..49e047e4a81e 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1533,7 +1533,7 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = { .vidioc_default = vidioc_default, }; -static struct video_device meye_template = { +static const struct video_device meye_template = { .name = "meye", .fops = &meye_fops, .ioctl_ops = &meye_ioctl_ops, @@ -1801,7 +1801,7 @@ static void meye_remove(struct pci_dev *pcidev) printk(KERN_INFO "meye: removed\n"); } -static struct pci_device_id meye_pci_tbl[] = { +static const struct pci_device_id meye_pci_tbl[] = { { PCI_VDEVICE(KAWASAKI, PCI_DEVICE_ID_MCHIP_KL5A72002), 0 }, { } }; diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 5c0a4e614413..60e6cd5b3a03 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -1014,7 +1014,7 @@ static void netup_unidvb_finidev(struct pci_dev *pci_dev) } -static struct pci_device_id netup_unidvb_pci_tbl[] = { +static const struct pci_device_id netup_unidvb_pci_tbl[] = { { PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */ { PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */ { 0, } diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c index b49e4f9788e8..b13e319d24b7 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c @@ -300,7 +300,7 @@ static const struct i2c_algorithm netup_i2c_algorithm = { .functionality = netup_i2c_func, }; -static struct i2c_adapter netup_i2c_adapter = { +static const struct i2c_adapter netup_i2c_adapter = { .owner = THIS_MODULE, .name = NETUP_UNIDVB_NAME, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c index fbf36353c701..3004947f300b 100644 --- a/drivers/media/pci/ngene/ngene-i2c.c +++ b/drivers/media/pci/ngene/ngene-i2c.c @@ -150,7 +150,7 @@ static u32 ngene_i2c_functionality(struct i2c_adapter *adap) return I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm ngene_i2c_algo = { +static const struct i2c_algorithm ngene_i2c_algo = { .master_xfer = ngene_i2c_master_xfer, .functionality = ngene_i2c_functionality, }; diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 74838109afe5..39dcba2b620c 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c @@ -770,7 +770,7 @@ static void pluto2_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_PLUTO2 0x0001 #endif -static struct pci_device_id pluto2_id_table[] = { +static const struct pci_device_id pluto2_id_table[] = { { .vendor = PCI_VENDOR_ID_SCM, .device = PCI_DEVICE_ID_PLUTO2, diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index 3219d2f3271e..b6b1a8d20d86 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -1202,7 +1202,7 @@ static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } -static struct pci_device_id pt1_id_table[] = { +static const struct pci_device_id pt1_id_table[] = { { PCI_DEVICE(0x10ee, 0x211a) }, { PCI_DEVICE(0x10ee, 0x222a) }, { }, diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c index e8b5d0992157..34044a45fecc 100644 --- a/drivers/media/pci/pt3/pt3.c +++ b/drivers/media/pci/pt3/pt3.c @@ -472,7 +472,6 @@ static int pt3_fetch_thread(void *data) } dev_dbg(adap->dvb_adap.device, "PT3: [%s] exited\n", adap->thread->comm); - adap->thread = NULL; return 0; } @@ -486,6 +485,7 @@ static int pt3_start_streaming(struct pt3_adapter *adap) if (IS_ERR(thread)) { int ret = PTR_ERR(thread); + adap->thread = NULL; dev_warn(adap->dvb_adap.device, "PT3 (adap:%d, dmx:%d): failed to start kthread\n", adap->dvb_adap.num, adap->dmxdev.dvbdev->id); @@ -508,6 +508,7 @@ static int pt3_stop_streaming(struct pt3_adapter *adap) /* kill the fetching thread */ ret = kthread_stop(adap->thread); + adap->thread = NULL; return ret; } @@ -520,14 +521,8 @@ static int pt3_start_feed(struct dvb_demux_feed *feed) adap = container_of(feed->demux, struct pt3_adapter, demux); adap->num_feeds++; - if (adap->thread) + if (adap->num_feeds > 1) return 0; - if (adap->num_feeds != 1) { - dev_warn(adap->dvb_adap.device, - "%s: unmatched start/stop_feed in adap:%i/dmx:%i\n", - __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id); - adap->num_feeds = 1; - } return pt3_start_streaming(adap); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index bf358ec7aca5..c59b69f1af9d 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -627,7 +627,7 @@ snd_card_saa7134_capture_pointer(struct snd_pcm_substream * substream) * switching to 32kHz without any frequency translation */ -static struct snd_pcm_hardware snd_card_saa7134_capture = +static const struct snd_pcm_hardware snd_card_saa7134_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index b1d3648dcba1..66acfd35ffc6 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -205,7 +205,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = { /* ----------------------------------------------------------- */ -static struct video_device saa7134_empress_template = { +static const struct video_device saa7134_empress_template = { .name = "saa7134-empress", .fops = &ts_fops, .ioctl_ops = &ts_ioctl_ops, diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index 9d0e69eae036..8f2ed632840f 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -339,7 +339,7 @@ static const struct i2c_algorithm saa7134_algo = { .functionality = functionality, }; -static struct i2c_adapter saa7134_adap_template = { +static const struct i2c_adapter saa7134_adap_template = { .owner = THIS_MODULE, .name = "saa7134", .algo = &saa7134_algo, diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c index 78849c19f68a..9337e4615519 100644 --- a/drivers/media/pci/saa7134/saa7134-input.c +++ b/drivers/media/pci/saa7134/saa7134-input.c @@ -83,14 +83,16 @@ static int build_key(struct saa7134_dev *dev) if (data == ir->mask_keycode) rc_keyup(ir->dev); else - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); return 0; } if (ir->polling) { if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) || (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) { - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); } else { rc_keyup(ir->dev); } @@ -98,7 +100,8 @@ static int build_key(struct saa7134_dev *dev) else { /* IRQ driven mode - handle key press and release in one go */ if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) || (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) { - rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0); + rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, + 0); rc_keyup(ir->dev); } } @@ -108,7 +111,7 @@ static int build_key(struct saa7134_dev *dev) /* --------------------- Chip specific I2C key builders ----------------- */ -static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int gpio; @@ -154,13 +157,14 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol, return -EIO; } - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } -static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, + enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -201,14 +205,14 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol /* Button pressed */ input_dbg("get_key_msi_tvanywhere_plus: Key = 0x%02X\n", b); - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } /* copied and modified from get_key_msi_tvanywhere_plus() */ -static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -249,13 +253,13 @@ static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol, /* Button pressed */ input_dbg("get_key_kworld_pc150u: Key = 0x%02X\n", b); - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } -static int get_key_purpletv(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_purpletv(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char b; @@ -274,13 +278,13 @@ static int get_key_purpletv(struct IR_i2c *ir, enum rc_type *protocol, if (b & 0x80) return 1; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } -static int get_key_hvr1110(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_hvr1110(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char buf[5]; @@ -304,14 +308,14 @@ static int get_key_hvr1110(struct IR_i2c *ir, enum rc_type *protocol, * * FIXME: start bits could maybe be used...? */ - *protocol = RC_TYPE_RC5; + *protocol = RC_PROTO_RC5; *scancode = RC_SCANCODE_RC5(buf[3] & 0x1f, buf[4] >> 2); *toggle = !!(buf[3] & 0x40); return 1; } -static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char data[12]; @@ -338,7 +342,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol, if (data[9] != (unsigned char)(~data[8])) return 0; - *protocol = RC_TYPE_NECX; + *protocol = RC_PROTO_NECX; *scancode = RC_SCANCODE_NECX(data[11] << 8 | data[10], data[9]); *toggle = 0; return 1; @@ -347,7 +351,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol, /* Common (grey or coloured) pinnacle PCTV remote handling * */ -static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pinnacle(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle, int parity_offset, int marker, int code_modulo) { @@ -384,7 +388,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol, code %= code_modulo; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = code; *toggle = 0; @@ -401,7 +405,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol, * * Sylvain Pasche */ -static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { @@ -413,7 +417,7 @@ static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_type *protocol, * * Ricardo Cerqueira */ -static int get_key_pinnacle_color(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_pinnacle_color(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { /* code_modulo parameter (0x88) is used to reduce code value to fit inside IR_KEYTAB_SIZE @@ -452,13 +456,6 @@ static void saa7134_input_timer(unsigned long data) mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); } -static void ir_raw_decode_timer_end(unsigned long data) -{ - struct saa7134_dev *dev = (struct saa7134_dev *)data; - - ir_raw_event_handle(dev->remote->dev); -} - static int __saa7134_ir_start(void *priv) { struct saa7134_dev *dev = priv; @@ -514,10 +511,6 @@ static int __saa7134_ir_start(void *priv) (unsigned long)dev); ir->timer.expires = jiffies + HZ; add_timer(&ir->timer); - } else if (ir->raw_decode) { - /* set timer_end for code completion */ - setup_timer(&ir->timer, ir_raw_decode_timer_end, - (unsigned long)dev); } return 0; @@ -535,7 +528,7 @@ static void __saa7134_ir_stop(void *priv) if (!ir->running) return; - if (ir->polling || ir->raw_decode) + if (ir->polling) del_timer_sync(&ir->timer); ir->running = false; @@ -867,10 +860,12 @@ int saa7134_input_init1(struct saa7134_dev *dev) rc->priv = dev; rc->open = saa7134_ir_open; rc->close = saa7134_ir_close; - if (raw_decode) + if (raw_decode) { rc->driver_type = RC_DRIVER_IR_RAW; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; + } - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_PCI; rc->input_id.version = 1; @@ -884,6 +879,9 @@ int saa7134_input_init1(struct saa7134_dev *dev) rc->dev.parent = &dev->pci->dev; rc->map_name = ir_codes; rc->driver_name = MODULE_NAME; + rc->min_timeout = 1; + rc->timeout = IR_DEFAULT_TIMEOUT; + rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT; err = rc_register_device(rc); if (err) @@ -1028,7 +1026,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev) dev->init_data.name = "BeholdTV"; dev->init_data.get_key = get_key_beholdm6xx; dev->init_data.ir_codes = RC_MAP_BEHOLD; - dev->init_data.type = RC_BIT_NECX; + dev->init_data.type = RC_PROTO_BIT_NECX; info.addr = 0x2d; break; case SAA7134_BOARD_AVERMEDIA_CARDBUS_501: @@ -1057,26 +1055,13 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev) static int saa7134_raw_decode_irq(struct saa7134_dev *dev) { struct saa7134_card_ir *ir = dev->remote; - unsigned long timeout; int space; /* Generate initial event */ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); space = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & ir->mask_keydown; - ir_raw_event_store_edge(dev->remote->dev, space ? IR_SPACE : IR_PULSE); - - /* - * Wait 15 ms from the start of the first IR event before processing - * the event. This time is enough for NEC protocol. May need adjustments - * to work with other protocols. - */ - smp_mb(); - - if (!timer_pending(&ir->timer)) { - timeout = jiffies + msecs_to_jiffies(15); - mod_timer(&ir->timer, timeout); - } + ir_raw_event_store_edge(dev->remote->dev, !space); return 1; } diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index c889ec9f8a5a..f708cab01fef 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -363,7 +363,7 @@ static struct saa7146_pci_extension_data hexium_gemini_dual_4bnc = { .ext = &hexium_extension, }; -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index c306a92e8909..01f01580c7ca 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -427,7 +427,7 @@ static struct saa7146_pci_extension_data hexium_orion_4bnc = { .ext = &extension, }; -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 504d78807639..930218cc2de1 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -819,7 +819,7 @@ static struct saa7146_pci_extension_data mxb = { .ext = &extension, }; -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 75eed4cc4823..fca36a4910c2 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1490,7 +1490,7 @@ static void saa7164_finidev(struct pci_dev *pci_dev) kfree(dev); } -static struct pci_device_id saa7164_pci_tbl[] = { +static const struct pci_device_id saa7164_pci_tbl[] = { { /* SAA7164 */ .vendor = 0x1131, diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c index 430f6789f222..4bcde7c79dc3 100644 --- a/drivers/media/pci/saa7164/saa7164-i2c.c +++ b/drivers/media/pci/saa7164/saa7164-i2c.c @@ -78,7 +78,7 @@ static const struct i2c_algorithm saa7164_i2c_algo_template = { /* ----------------------------------------------------------------------- */ -static struct i2c_adapter saa7164_i2c_adap_template = { +static const struct i2c_adapter saa7164_i2c_adap_template = { .name = "saa7164", .owner = THIS_MODULE, .algo = &saa7164_i2c_algo_template, diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c index d2730c3fdbae..c5595af6b976 100644 --- a/drivers/media/pci/smipcie/smipcie-ir.c +++ b/drivers/media/pci/smipcie/smipcie-ir.c @@ -144,7 +144,7 @@ static void smi_ir_decode(struct work_struct *work) rc5_system = (dwIRCode & 0x7C0) >> 6; toggle = (dwIRCode & 0x800) ? 1 : 0; scancode = rc5_system << 8 | rc5_command; - rc_keydown(rc_dev, RC_TYPE_RC5, scancode, toggle); + rc_keydown(rc_dev, RC_PROTO_RC5, scancode, toggle); } } end_ir_decode: @@ -188,14 +188,14 @@ int smi_ir_init(struct smi_dev *dev) return -ENOMEM; /* init input device */ - snprintf(ir->input_name, sizeof(ir->input_name), "IR (%s)", + snprintf(ir->device_name, sizeof(ir->device_name), "IR (%s)", dev->info->name); snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0", pci_name(dev->pci_dev)); rc_dev->driver_name = "SMI_PCIe"; rc_dev->input_phys = ir->input_phys; - rc_dev->input_name = ir->input_name; + rc_dev->device_name = ir->device_name; rc_dev->input_id.bustype = BUS_PCI; rc_dev->input_id.version = 1; rc_dev->input_id.vendor = dev->pci_dev->subsystem_vendor; diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h index 611e4f02cadd..c8368c78ddd5 100644 --- a/drivers/media/pci/smipcie/smipcie.h +++ b/drivers/media/pci/smipcie/smipcie.h @@ -240,7 +240,7 @@ struct smi_rc { struct smi_dev *dev; struct rc_dev *rc_dev; char input_phys[64]; - char input_name[64]; + char device_name[64]; struct work_struct work; u8 irData[256]; diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c index 3ca947092775..81be1b8df758 100644 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c @@ -319,7 +319,7 @@ static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol, return 1; } -static struct snd_kcontrol_new snd_solo_capture_volume = { +static const struct snd_kcontrol_new snd_solo_capture_volume = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Volume", .info = snd_solo_capture_volume_info, diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c index 6d3b4a36bc11..3d0d1aa2f6a8 100644 --- a/drivers/media/pci/solo6x10/solo6x10-gpio.c +++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c @@ -57,6 +57,9 @@ static void solo_gpio_mode(struct solo_dev *solo_dev, ret |= 1 << port; } + /* Enable GPIO[31:16] */ + ret |= 0xffff0000; + solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_1, ret); } @@ -90,16 +93,110 @@ static void solo_gpio_config(struct solo_dev *solo_dev) /* Initially set relay status to 0 */ solo_gpio_clear(solo_dev, 0xff00); + + /* Set input pins direction */ + solo_gpio_mode(solo_dev, 0xffff0000, 0); } +#ifdef CONFIG_GPIOLIB +/* Pins 0-7 are not exported, because it seems from code above they are + * used for internal purposes. So offset 0 corresponds to pin 8, therefore + * offsets 0-7 are relay GPIOs, 8-23 - input GPIOs. + */ +static int solo_gpiochip_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + int ret, mode; + struct solo_dev *solo_dev = gpiochip_get_data(chip); + + if (offset < 8) { + ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_0); + mode = 3 & (ret >> ((offset + 8) * 2)); + } else { + ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1); + mode = 1 & (ret >> (offset - 8)); + } + + if (!mode) + return 1; + else if (mode == 1) + return 0; + + return -1; +} + +static int solo_gpiochip_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + return -1; +} + +static int solo_gpiochip_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + return -1; +} + +static int solo_gpiochip_get(struct gpio_chip *chip, + unsigned int offset) +{ + int ret; + struct solo_dev *solo_dev = gpiochip_get_data(chip); + + ret = solo_reg_read(solo_dev, SOLO_GPIO_DATA_IN); + + return 1 & (ret >> (offset + 8)); +} + +static void solo_gpiochip_set(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct solo_dev *solo_dev = gpiochip_get_data(chip); + + if (value) + solo_gpio_set(solo_dev, 1 << (offset + 8)); + else + solo_gpio_clear(solo_dev, 1 << (offset + 8)); +} +#endif + int solo_gpio_init(struct solo_dev *solo_dev) { + int ret; + solo_gpio_config(solo_dev); +#ifdef CONFIG_GPIOLIB + solo_dev->gpio_dev.label = SOLO6X10_NAME"_gpio"; + solo_dev->gpio_dev.parent = &solo_dev->pdev->dev; + solo_dev->gpio_dev.owner = THIS_MODULE; + solo_dev->gpio_dev.base = -1; + solo_dev->gpio_dev.ngpio = 24; + solo_dev->gpio_dev.can_sleep = 0; + + solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction; + solo_dev->gpio_dev.direction_input = solo_gpiochip_direction_input; + solo_dev->gpio_dev.direction_output = solo_gpiochip_direction_output; + solo_dev->gpio_dev.get = solo_gpiochip_get; + solo_dev->gpio_dev.set = solo_gpiochip_set; + + ret = gpiochip_add_data(&solo_dev->gpio_dev, solo_dev); + + if (ret) { + solo_dev->gpio_dev.label = NULL; + return -1; + } +#endif return 0; } void solo_gpio_exit(struct solo_dev *solo_dev) { +#ifdef CONFIG_GPIOLIB + if (solo_dev->gpio_dev.label) { + gpiochip_remove(&solo_dev->gpio_dev); + solo_dev->gpio_dev.label = NULL; + } +#endif solo_gpio_clear(solo_dev, 0x30); solo_gpio_config(solo_dev); } diff --git a/drivers/media/pci/solo6x10/solo6x10-tw28.c b/drivers/media/pci/solo6x10/solo6x10-tw28.c index 0632d3f7c73c..7ecb725b6dd2 100644 --- a/drivers/media/pci/solo6x10/solo6x10-tw28.c +++ b/drivers/media/pci/solo6x10/solo6x10-tw28.c @@ -532,7 +532,7 @@ static void saa712x_write_regs(struct solo_dev *dev, const u8 *vals, static void saa712x_setup(struct solo_dev *dev) { const int reg_start = 0x26; - const u8 saa7128_regs_ntsc[] = { + static const u8 saa7128_regs_ntsc[] = { /* :0x26 */ 0x0d, 0x00, /* :0x28 */ @@ -606,6 +606,7 @@ int solo_tw28_init(struct solo_dev *solo_dev) solo_dev->tw28_cnt++; break; case 0x0c: + case 0x0d: solo_dev->tw2864 |= 1 << i; solo_dev->tw28_cnt++; break; diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c index 3266fc21825f..99ffd1ed4a73 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c @@ -630,7 +630,7 @@ static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device solo_v4l2_template = { +static const struct video_device solo_v4l2_template = { .name = SOLO6X10_NAME, .fops = &solo_v4l2_fops, .ioctl_ops = &solo_v4l2_ioctl_ops, diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h index 3f8da5e8c430..3a1893ae2dad 100644 --- a/drivers/media/pci/solo6x10/solo6x10.h +++ b/drivers/media/pci/solo6x10/solo6x10.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -199,6 +200,10 @@ struct solo_dev { u32 irq_mask; u32 motion_mask; struct v4l2_device v4l2_dev; +#ifdef CONFIG_GPIOLIB + /* GPIO */ + struct gpio_chip gpio_dev; +#endif /* tw28xx accounting */ u8 tw2865, tw2864, tw2815; diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index 6343d24eb1d5..eb5a9eae7c8e 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -754,7 +754,7 @@ static const struct v4l2_ioctl_ops vip_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device video_dev_template = { +static const struct video_device video_dev_template = { .name = KBUILD_MODNAME, .release = video_device_release_empty, .fops = &vip_fops, diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index f2905bd80366..f46947d8adf8 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -2872,7 +2872,7 @@ MAKE_AV7110_INFO(fsc, "Fujitsu Siemens DVB-C"); MAKE_AV7110_INFO(fss, "Fujitsu Siemens DVB-S rev1.6"); MAKE_AV7110_INFO(gxs_1_3, "Galaxis DVB-S rev1.3"); -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(fsc, 0x110a, 0x0000), MAKE_EXTENSION_PCI(tts_1_X_fsc, 0x13c2, 0x0000), MAKE_EXTENSION_PCI(ttt_1_X, 0x13c2, 0x0001), diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h index 824c1e262fbb..347827925c14 100644 --- a/drivers/media/pci/ttpci/av7110.h +++ b/drivers/media/pci/ttpci/av7110.h @@ -177,7 +177,7 @@ struct av7110 { /* CA */ - ca_slot_info_t ci_slot[2]; + struct ca_slot_info ci_slot[2]; enum av7110_video_mode vidmode; struct dmxdev dmxdev; diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c index f64723aea56b..1fe49171d823 100644 --- a/drivers/media/pci/ttpci/av7110_ca.c +++ b/drivers/media/pci/ttpci/av7110_ca.c @@ -119,7 +119,7 @@ static void ci_ll_release(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer * } static int ci_ll_reset(struct dvb_ringbuffer *cibuf, struct file *file, - int slots, ca_slot_info_t *slot) + int slots, struct ca_slot_info *slot) { int i; int len = 0; @@ -264,7 +264,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) break; case CA_GET_CAP: { - ca_caps_t cap; + struct ca_caps cap; cap.slot_num = 2; cap.slot_type = (FW_CI_LL_SUPPORT(av7110->arm_app) ? @@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) case CA_GET_SLOT_INFO: { - ca_slot_info_t *info=(ca_slot_info_t *)parg; + struct ca_slot_info *info=(struct ca_slot_info *)parg; if (info->num < 0 || info->num > 1) { mutex_unlock(&av7110->ioctl_mutex); @@ -286,7 +286,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) av7110->ci_slot[info->num].num = info->num; av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? CA_CI_LINK : CA_CI; - memcpy(info, &av7110->ci_slot[info->num], sizeof(ca_slot_info_t)); + memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info)); break; } @@ -298,7 +298,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) case CA_GET_DESCR_INFO: { - ca_descr_info_t info; + struct ca_descr_info info; info.num = 16; info.type = CA_ECD; @@ -308,7 +308,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) case CA_SET_DESCR: { - ca_descr_t *descr = (ca_descr_t*) parg; + struct ca_descr *descr = (struct ca_descr*) parg; if (descr->index >= 16 || descr->parity > 1) { mutex_unlock(&av7110->ioctl_mutex); diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c index 397fe146dedd..e4cf42c32284 100644 --- a/drivers/media/pci/ttpci/av7110_v4l.c +++ b/drivers/media/pci/ttpci/av7110_v4l.c @@ -218,7 +218,7 @@ static struct saa7146_standard analog_standard[]; static struct saa7146_standard dvb_standard[]; static struct saa7146_standard standard[]; -static struct v4l2_audio msp3400_v4l2_audio = { +static const struct v4l2_audio msp3400_v4l2_audio = { .index = 0, .name = "Television", .capability = V4L2_AUDCAP_STEREO diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index dc7be8fac9a3..ac83fff9fe0b 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -1567,7 +1567,7 @@ MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C); MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3); MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T); -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56), MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010), MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010), diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c index 11b9227307bf..57af11804fd6 100644 --- a/drivers/media/pci/ttpci/budget-ci.c +++ b/drivers/media/pci/ttpci/budget-ci.c @@ -158,14 +158,15 @@ static void msp430_ir_interrupt(unsigned long data) return; if (budget_ci->ir.full_rc5) { - rc_keydown(dev, RC_TYPE_RC5, + rc_keydown(dev, RC_PROTO_RC5, RC_SCANCODE_RC5(budget_ci->ir.rc5_device, budget_ci->ir.ir_key), !!(command & 0x20)); return; } /* FIXME: We should generate complete scancodes for all devices */ - rc_keydown(dev, RC_TYPE_UNKNOWN, budget_ci->ir.ir_key, !!(command & 0x20)); + rc_keydown(dev, RC_PROTO_UNKNOWN, budget_ci->ir.ir_key, + !!(command & 0x20)); } static int msp430_ir_init(struct budget_ci *budget_ci) @@ -186,7 +187,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci) "pci-%s/ir0", pci_name(saa->pci)); dev->driver_name = MODULE_NAME; - dev->input_name = budget_ci->ir.name; + dev->device_name = budget_ci->ir.name; dev->input_phys = budget_ci->ir.phys; dev->input_id.bustype = BUS_PCI; dev->input_id.version = 1; @@ -1538,7 +1539,7 @@ MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT); MAKE_BUDGET_INFO(tt3200, "TT-Budget S2-3200 PCI", BUDGET_TT); MAKE_BUDGET_INFO(ttbs1500b, "TT-Budget S-1500B PCI", BUDGET_TT); -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c), MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100f), MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010), diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c index 442992372008..a738018cdca8 100644 --- a/drivers/media/pci/ttpci/budget-patch.c +++ b/drivers/media/pci/ttpci/budget-patch.c @@ -45,7 +45,7 @@ static struct saa7146_extension budget_extension; MAKE_BUDGET_INFO(ttbp, "TT-Budget/Patch DVB-S 1.x PCI", BUDGET_PATCH); //MAKE_BUDGET_INFO(satel,"TT-Budget/Patch SATELCO PCI", BUDGET_TT_HW_DISEQC); -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(ttbp,0x13c2, 0x0000), // MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013), { diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c index 81fe35cedd10..f59eadb7a5eb 100644 --- a/drivers/media/pci/ttpci/budget.c +++ b/drivers/media/pci/ttpci/budget.c @@ -845,7 +845,7 @@ MAKE_BUDGET_INFO(fsact1, "Fujitsu Siemens Activy Budget-T PCI (rev AL/ALPS TDHD1 MAKE_BUDGET_INFO(omicom, "Omicom S2 PCI", BUDGET_TT); MAKE_BUDGET_INFO(sylt, "Philips Semi Sylt PCI", BUDGET_TT_HW_DISEQC); -static struct pci_device_id pci_tbl[] = { +static const struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1003), MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004), MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005), diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c index 58c4dd75bfa1..8c1f4a049764 100644 --- a/drivers/media/pci/tw68/tw68-video.c +++ b/drivers/media/pci/tw68/tw68-video.c @@ -916,7 +916,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { #endif }; -static struct video_device tw68_video_template = { +static const struct video_device tw68_video_template = { .name = "tw68_video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 4680f001653a..a6b9ebd20263 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -130,7 +130,7 @@ MODULE_VERSION(ZORAN_VERSION); .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \ .subvendor = (subven), .subdevice = (subdev), .driver_data = (data) } -static struct pci_device_id zr36067_pci_tbl[] = { +static const struct pci_device_id zr36067_pci_tbl[] = { ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10plus), ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30plus), ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10), diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index fb1fa0b82077..7e7cc49b8674 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -76,7 +76,8 @@ config VIDEO_M32R_AR_M64278 config VIDEO_MUX tristate "Video Multiplexer" - depends on OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER + select MULTIPLEXER + depends on VIDEO_V4L2 && OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER select REGMAP help This driver provides support for N:1 video bus multiplexers. @@ -109,6 +110,13 @@ config VIDEO_PXA27x ---help--- This is a v4l2 driver for the PXA27x Quick Capture Interface +config VIDEO_QCOM_CAMSS + tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST + select VIDEOBUF2_DMA_SG + select V4L2_FWNODE + config VIDEO_S3C_CAMIF tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver" depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API @@ -536,6 +544,17 @@ menuconfig CEC_PLATFORM_DRIVERS if CEC_PLATFORM_DRIVERS +config VIDEO_MESON_AO_CEC + tristate "Amlogic Meson AO CEC driver" + depends on ARCH_MESON || COMPILE_TEST + select CEC_CORE + select CEC_NOTIFIER + ---help--- + This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the + generic CEC framework interface. + CEC bus is present in the HDMI connector and enables communication + between compatible devices. + config VIDEO_SAMSUNG_S5P_CEC tristate "Samsung S5P CEC driver" depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 9beadc760467..c1ef946bf032 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -85,4 +85,8 @@ obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/ obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/ +obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss-8x16/ + obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ + +obj-y += meson/ diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index 466aba8b0e00..dfcc484cab89 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -2490,8 +2490,8 @@ vpfe_get_pdata(struct platform_device *pdev) rem = of_graph_get_remote_port_parent(endpoint); if (!rem) { - dev_err(&pdev->dev, "Remote device at %s not found\n", - endpoint->full_name); + dev_err(&pdev->dev, "Remote device at %pOF not found\n", + endpoint); goto done; } diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c index d6534252cdcd..d7103c5f92c3 100644 --- a/drivers/media/platform/atmel/atmel-isc.c +++ b/drivers/media/platform/atmel/atmel-isc.c @@ -873,7 +873,7 @@ static void isc_buffer_queue(struct vb2_buffer *vb) spin_unlock_irqrestore(&isc->dma_queue_lock, flags); } -static struct vb2_ops isc_vb2_ops = { +static const struct vb2_ops isc_vb2_ops = { .queue_setup = isc_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, @@ -1700,8 +1700,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc) rem = of_graph_get_remote_port_parent(epn); if (!rem) { - dev_notice(dev, "Remote device at %s not found\n", - of_node_full_name(epn)); + dev_notice(dev, "Remote device at %pOF not found\n", + epn); continue; } diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c index 1c5166df46f5..41f179117fb0 100644 --- a/drivers/media/platform/blackfin/bfin_capture.c +++ b/drivers/media/platform/blackfin/bfin_capture.c @@ -375,7 +375,7 @@ static void bcap_stop_streaming(struct vb2_queue *vq) } } -static struct vb2_ops bcap_video_qops = { +static const struct vb2_ops bcap_video_qops = { .queue_setup = bcap_queue_setup, .buf_prepare = bcap_buffer_prepare, .buf_cleanup = bcap_buffer_cleanup, @@ -769,7 +769,7 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = { .vidioc_log_status = bcap_log_status, }; -static struct v4l2_file_operations bcap_fops = { +static const struct v4l2_file_operations bcap_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index bba1eb43b5d8..291c40933935 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -394,7 +394,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, int i; if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || - ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) { + ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 || + ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4) { width = round_up(q_data->width, 16); height = round_up(q_data->height, 16); } else { @@ -702,6 +703,8 @@ static u32 coda_supported_firmwares[] = { CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5), CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50), CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5), + CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10), + CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1), }; static bool coda_firmware_supported(u32 vernum) @@ -1006,7 +1009,7 @@ static int coda_start_encoding(struct coda_ctx *ctx) break; } coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE); - value = ctx->params.gop_size & CODA_GOP_SIZE_MASK; + value = ctx->params.gop_size; coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE); } @@ -1250,7 +1253,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx) force_ipicture = ctx->params.force_ipicture; if (force_ipicture) ctx->params.force_ipicture = false; - else if ((src_buf->sequence % ctx->params.gop_size) == 0) + else if (ctx->params.gop_size != 0 && + (src_buf->sequence % ctx->params.gop_size) == 0) force_ipicture = 1; /* @@ -1411,6 +1415,7 @@ static void coda_finish_encode(struct coda_ctx *ctx) } dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->field = src_buf->field; dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; @@ -1634,9 +1639,6 @@ static int __coda_start_decoding(struct coda_ctx *ctx) ctx->frm_dis_flg = 0; coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); - coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE, - CODA_REG_BIT_BIT_STREAM_PARAM); - coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START); coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE); val = 0; @@ -1652,6 +1654,10 @@ static int __coda_start_decoding(struct coda_ctx *ctx) ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4; else ctx->params.codec_mode_aux = 0; + if (src_fourcc == V4L2_PIX_FMT_MPEG4) { + coda_write(dev, CODA_MP4_CLASS_MPEG4, + CODA_CMD_DEC_SEQ_MP4_ASP_CLASS); + } if (src_fourcc == V4L2_PIX_FMT_H264) { if (dev->devtype->product == CODA_7541) { coda_write(dev, ctx->psbuf.paddr, @@ -1667,18 +1673,18 @@ static int __coda_start_decoding(struct coda_ctx *ctx) if (dev->devtype->product != CODA_960) coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE); - if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) { + ctx->bit_stream_param = CODA_BIT_DEC_SEQ_INIT_ESCAPE; + ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT); + ctx->bit_stream_param = 0; + if (ret) { v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n"); - coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM); - return -ETIMEDOUT; + return ret; } ctx->initialized = 1; /* Update kfifo out pointer from coda bitstream read pointer */ coda_kfifo_sync_from_device(ctx); - coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM); - if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) { v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT failed, error code = %d\n", @@ -2153,6 +2159,7 @@ static void coda_finish_decode(struct coda_ctx *ctx) dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); dst_buf->sequence = ctx->osequence++; + dst_buf->field = V4L2_FIELD_NONE; dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME); diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 829c7895a98a..15eb5dc4dff9 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster static int enable_bwb = 0; module_param(enable_bwb, int, 0644); -MODULE_PARM_DESC(enable_bwb, "Enable BWB unit, may crash on certain streams"); +MODULE_PARM_DESC(enable_bwb, "Enable BWB unit for decoding, may crash on certain streams"); void coda_write(struct coda_dev *dev, u32 data, u32 reg) { @@ -714,9 +714,10 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f, ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; case V4L2_PIX_FMT_NV12: - ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; - if (!disable_tiling) + if (!disable_tiling) { + ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; + } /* else fall through */ case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: @@ -932,7 +933,7 @@ static int coda_encoder_cmd(struct file *file, void *fh, ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; /* If there is no buffer in flight, wake up */ - if (ctx->qsequence == ctx->osequence) { + if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) { dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); dst_vq->last_buffer_dequeued = true; @@ -1683,12 +1684,23 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) ctx->params.h264_deblk_enabled = (ctrl->val == V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + /* TODO: switch between baseline and constrained baseline */ + ctx->params.h264_profile_idc = 66; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + /* nothing to do, this is set by the encoder */ + break; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: ctx->params.mpeg4_intra_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: ctx->params.mpeg4_inter_qp = ctrl->val; break; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + /* nothing to do, these are fixed */ + break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: ctx->params.slice_mode = ctrl->val; break; @@ -1734,10 +1746,12 @@ static const struct v4l2_ctrl_ops coda_ctrl_ops = { static void coda_encode_ctrls(struct coda_ctx *ctx) { + int max_gop_size = (ctx->dev->devtype->product == CODA_DX6) ? 60 : 99; + v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, - V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16); + V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, max_gop_size, 1, 16); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, @@ -1756,10 +1770,46 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); + v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE); + if (ctx->dev->devtype->product == CODA_7541) { + v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_3_1, + ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1)), + V4L2_MPEG_VIDEO_H264_LEVEL_3_1); + } + if (ctx->dev->devtype->product == CODA_960) { + v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0, + ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)), + V4L2_MPEG_VIDEO_H264_LEVEL_4_0); + } v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2); + v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, 0x0, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE); + if (ctx->dev->devtype->product == CODA_7541 || + ctx->dev->devtype->product == CODA_960) { + v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, + ~(1 << V4L2_MPEG_VIDEO_MPEG4_LEVEL_5), + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5); + } v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0, @@ -1938,7 +1988,13 @@ static int coda_open(struct file *file) ctx->idx = idx; switch (dev->devtype->product) { case CODA_960: - if (enable_bwb) + /* + * Enabling the BWB when decoding can hang the firmware with + * certain streams. The issue was tracked as ENGR00293425 by + * Freescale. As a workaround, disable BWB for all decoders. + * The enable_bwb module parameter allows to override this. + */ + if (enable_bwb || ctx->inst_type == CODA_INST_ENCODER) ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB; /* fallthrough */ case CODA_7541: @@ -2142,7 +2198,8 @@ static int coda_hw_init(struct coda_dev *dev) CODA_REG_BIT_STREAM_CTRL); } if (dev->devtype->product == CODA_960) - coda_write(dev, 1 << 12, CODA_REG_BIT_FRAME_MEM_CTRL); + coda_write(dev, CODA9_FRAME_ENABLE_BWB, + CODA_REG_BIT_FRAME_MEM_CTRL); else coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL); @@ -2386,11 +2443,11 @@ static const struct coda_devtype coda_devdata[] = { .num_vdevs = ARRAY_SIZE(coda9_video_devices), .workbuf_size = 80 * 1024, .tempbuf_size = 204 * 1024, - .iram_size = 0x20000, + .iram_size = 0x1f000, /* leave 4k for suspend code */ }, }; -static struct platform_device_id coda_platform_ids[] = { +static const struct platform_device_id coda_platform_ids[] = { { .name = "coda-imx27", .driver_data = CODA_IMX27 }, { /* sentinel */ } }; @@ -2470,7 +2527,8 @@ static int coda_probe(struct platform_device *pdev) return ret; } - dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL); + dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, + NULL); if (IS_ERR(dev->rstc)) { ret = PTR_ERR(dev->rstc); dev_err(&pdev->dev, "failed get reset control: %d\n", ret); diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h index 77ee46a93427..38df5fd9a2fa 100644 --- a/drivers/media/platform/coda/coda_regs.h +++ b/drivers/media/platform/coda/coda_regs.h @@ -158,6 +158,7 @@ #define CODA_CMD_DEC_SEQ_PS_BB_START 0x194 #define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198 #define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c +#define CODA_MP4_CLASS_MPEG4 0 #define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c #define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE 0x1a0 diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index df9b71621420..8eb3e0c05473 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c @@ -314,6 +314,8 @@ static int vdoa_probe(struct platform_device *pdev) return PTR_ERR(vdoa->regs); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) + return -EINVAL; vdoa->irq = devm_request_threaded_irq(&pdev->dev, res->start, NULL, vdoa_irq_handler, IRQF_ONESHOT, "vdoa", vdoa); diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c index 3679b1e7b39e..7f6462562579 100644 --- a/drivers/media/platform/davinci/vpbe.c +++ b/drivers/media/platform/davinci/vpbe.c @@ -790,7 +790,7 @@ static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev) vpss_enable_clock(VPSS_VPBE_CLOCK, 0); } -static struct vpbe_device_ops vpbe_dev_ops = { +static const struct vpbe_device_ops vpbe_dev_ops = { .g_cropcap = vpbe_g_cropcap, .enum_outputs = vpbe_enum_outputs, .set_output = vpbe_set_output, diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index a9bc0175e4d3..13d027031ff0 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -355,7 +355,7 @@ static void vpbe_stop_streaming(struct vb2_queue *vq) spin_unlock_irqrestore(&disp->dma_queue_lock, flags); } -static struct vb2_ops video_qops = { +static const struct vb2_ops video_qops = { .queue_setup = vpbe_buffer_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, @@ -1275,7 +1275,7 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = { .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings, }; -static struct v4l2_file_operations vpbe_fops = { +static const struct v4l2_file_operations vpbe_fops = { .owner = THIS_MODULE, .open = vpbe_display_open, .release = vpbe_display_release, diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c index df042e84a678..66449791c70c 100644 --- a/drivers/media/platform/davinci/vpbe_osd.c +++ b/drivers/media/platform/davinci/vpbe_osd.c @@ -37,7 +37,7 @@ #define MODULE_NAME "davinci-vpbe-osd" -static struct platform_device_id vpbe_osd_devtype[] = { +static const struct platform_device_id vpbe_osd_devtype[] = { { .name = DM644X_VPBE_OSD_SUBDEV_NAME, .driver_data = VPBE_VERSION_1, diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c index 8bfe90a24681..3a4e78595149 100644 --- a/drivers/media/platform/davinci/vpbe_venc.c +++ b/drivers/media/platform/davinci/vpbe_venc.c @@ -36,7 +36,7 @@ #define MODULE_NAME "davinci-vpbe-venc" -static struct platform_device_id vpbe_venc_devtype[] = { +static const struct platform_device_id vpbe_venc_devtype[] = { { .name = DM644X_VPBE_VENC_SUBDEV_NAME, .driver_data = VPBE_VERSION_1, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index b1bf4a7e8eb7..6792da16d9c7 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1288,7 +1288,7 @@ static void vpfe_videobuf_release(struct videobuf_queue *vq, vb->state = VIDEOBUF_NEEDS_INIT; } -static struct videobuf_queue_ops vpfe_videobuf_qops = { +static const struct videobuf_queue_ops vpfe_videobuf_qops = { .buf_setup = vpfe_videobuf_setup, .buf_prepare = vpfe_videobuf_prepare, .buf_queue = vpfe_videobuf_queue, diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 4be6554c56c5..0ef36cec21d1 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -312,7 +312,7 @@ static void vpif_stop_streaming(struct vb2_queue *vq) spin_unlock_irqrestore(&common->irqlock, flags); } -static struct vb2_ops video_qops = { +static const struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .buf_prepare = vpif_buffer_prepare, .start_streaming = vpif_start_streaming, @@ -1344,7 +1344,7 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { }; /* vpif file operations */ -static struct v4l2_file_operations vpif_fops = { +static const struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, @@ -1397,9 +1397,9 @@ static int vpif_async_bound(struct v4l2_async_notifier *notifier, vpif_obj.config->chan_config->inputs[i].subdev_name = (char *)to_of_node(subdev->fwnode)->full_name; vpif_dbg(2, debug, - "%s: setting input %d subdev_name = %s\n", + "%s: setting input %d subdev_name = %pOF\n", __func__, i, - to_of_node(subdev->fwnode)->full_name); + to_of_node(subdev->fwnode)); return 0; } } @@ -1557,8 +1557,8 @@ vpif_capture_get_pdata(struct platform_device *pdev) dev_err(&pdev->dev, "Could not parse the endpoint\n"); goto done; } - dev_dbg(&pdev->dev, "Endpoint %s, bus_width = %d\n", - endpoint->full_name, bus_cfg.bus.parallel.bus_width); + dev_dbg(&pdev->dev, "Endpoint %pOF, bus_width = %d\n", + endpoint, bus_cfg.bus.parallel.bus_width); flags = bus_cfg.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) @@ -1569,13 +1569,13 @@ vpif_capture_get_pdata(struct platform_device *pdev) rem = of_graph_get_remote_port_parent(endpoint); if (!rem) { - dev_dbg(&pdev->dev, "Remote device at %s not found\n", - endpoint->full_name); + dev_dbg(&pdev->dev, "Remote device at %pOF not found\n", + endpoint); goto done; } - dev_dbg(&pdev->dev, "Remote device %s, %s found\n", - rem->name, rem->full_name); + dev_dbg(&pdev->dev, "Remote device %s, %pOF found\n", + rem->name, rem); sdinfo->name = rem->full_name; pdata->asd[i] = devm_kzalloc(&pdev->dev, @@ -1593,9 +1593,11 @@ vpif_capture_get_pdata(struct platform_device *pdev) } done: - pdata->asd_sizes[0] = i; - pdata->subdev_count = i; - pdata->card_name = "DA850/OMAP-L138 Video Capture"; + if (pdata) { + pdata->asd_sizes[0] = i; + pdata->subdev_count = i; + pdata->card_name = "DA850/OMAP-L138 Video Capture"; + } return pdata; } diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index bf982bf86542..56fe4e5b396e 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -290,7 +290,7 @@ static void vpif_stop_streaming(struct vb2_queue *vq) spin_unlock_irqrestore(&common->irqlock, flags); } -static struct vb2_ops video_qops = { +static const struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index 33611a46ce35..2a2994ef15d5 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -747,7 +747,7 @@ static const struct v4l2_file_operations gsc_m2m_fops = { .mmap = gsc_m2m_mmap, }; -static struct v4l2_m2m_ops gsc_m2m_ops = { +static const struct v4l2_m2m_ops gsc_m2m_ops = { .device_run = gsc_m2m_device_run, .job_abort = gsc_m2m_job_abort, }; diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c index 2f559663e51e..70dd4852b2b9 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c +++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c @@ -130,7 +130,7 @@ static int fimc_is_i2c_resume(struct device *dev) } #endif -static struct dev_pm_ops fimc_is_i2c_pm_ops = { +static const struct dev_pm_ops fimc_is_i2c_pm_ops = { SET_RUNTIME_PM_OPS(fimc_is_i2c_runtime_suspend, fimc_is_i2c_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(fimc_is_i2c_suspend, fimc_is_i2c_resume) diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 340d906db370..5ddb2321e9e4 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -174,8 +174,8 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index, sensor->drvdata = fimc_is_sensor_get_drvdata(node); if (!sensor->drvdata) { - dev_err(&is->pdev->dev, "no driver data found for: %s\n", - node->full_name); + dev_err(&is->pdev->dev, "no driver data found for: %pOF\n", + node); return -EINVAL; } @@ -191,8 +191,8 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index, /* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */ ret = of_property_read_u32(port, "reg", &tmp); if (ret < 0) { - dev_err(&is->pdev->dev, "reg property not found at: %s\n", - port->full_name); + dev_err(&is->pdev->dev, "reg property not found at: %pOF\n", + port); of_node_put(port); return ret; } diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index 8efe9160ab34..fd793d3ac072 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -433,7 +433,7 @@ static const struct v4l2_subdev_core_ops fimc_is_core_ops = { .s_power = fimc_isp_subdev_s_power, }; -static struct v4l2_subdev_ops fimc_is_subdev_ops = { +static const struct v4l2_subdev_ops fimc_is_subdev_ops = { .core = &fimc_is_core_ops, .video = &fimc_is_subdev_video_ops, .pad = &fimc_is_subdev_pad_ops, diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index 7d3ec5cc6608..4a3c9948ca54 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -1361,7 +1361,7 @@ static const struct v4l2_subdev_core_ops fimc_lite_core_ops = { .log_status = fimc_lite_log_status, }; -static struct v4l2_subdev_ops fimc_lite_subdev_ops = { +static const struct v4l2_subdev_ops fimc_lite_subdev_ops = { .core = &fimc_lite_core_ops, .video = &fimc_lite_subdev_video_ops, .pad = &fimc_lite_subdev_pad_ops, @@ -1493,8 +1493,7 @@ static int fimc_lite_probe(struct platform_device *pdev) if (!drv_data || fimc->index >= drv_data->num_instances || fimc->index < 0) { - dev_err(dev, "Wrong %s node alias\n", - dev->of_node->full_name); + dev_err(dev, "Wrong %pOF node alias\n", dev->of_node); return -EINVAL; } diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c index d8724fe9e9da..9027d0b0d2bd 100644 --- a/drivers/media/platform/exynos4-is/fimc-m2m.c +++ b/drivers/media/platform/exynos4-is/fimc-m2m.c @@ -704,7 +704,7 @@ static const struct v4l2_file_operations fimc_m2m_fops = { .mmap = v4l2_m2m_fop_mmap, }; -static struct v4l2_m2m_ops m2m_ops = { +static const struct v4l2_m2m_ops m2m_ops = { .device_run = fimc_device_run, .job_abort = fimc_job_abort, }; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 7d1cf78846c4..d4656d5175d7 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -412,8 +412,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, rem = of_graph_get_remote_port_parent(ep); of_node_put(ep); if (rem == NULL) { - v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n", - ep->full_name); + v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n", + ep); return 0; } @@ -430,8 +430,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, */ pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2; } else { - v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n", - endpoint.base.port, rem->full_name); + v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %pOF\n", + endpoint.base.port, rem); } /* * For FIMC-IS handled sensors, that are placed under i2c-isp device diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 98c89873c2dc..560aadabcb11 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -730,8 +730,8 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, node = of_graph_get_next_endpoint(node, NULL); if (!node) { - dev_err(&pdev->dev, "No port node at %s\n", - pdev->dev.of_node->full_name); + dev_err(&pdev->dev, "No port node at %pOF\n", + pdev->dev.of_node); return -EINVAL; } /* Get port node and validate MIPI-CSI channel id. */ diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index 97e164b2075a..fb43025df573 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -549,7 +549,7 @@ static void buffer_release(struct videobuf_queue *vq, free_buffer(vq, buf); } -static struct videobuf_queue_ops viu_video_qops = { +static const struct videobuf_queue_ops viu_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, @@ -1340,7 +1340,7 @@ static int viu_mmap(struct file *file, struct vm_area_struct *vma) return ret; } -static struct v4l2_file_operations viu_fops = { +static const struct v4l2_file_operations viu_fops = { .owner = THIS_MODULE, .open = viu_open, .release = viu_release, @@ -1380,7 +1380,7 @@ static const struct v4l2_ioctl_ops viu_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device viu_template = { +static const struct video_device viu_template = { .name = "FSL viu", .fops = &viu_fops, .minor = -1, diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 980066b8d32a..c8a12493f395 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -979,7 +979,7 @@ static const struct v4l2_file_operations deinterlace_fops = { .mmap = deinterlace_mmap, }; -static struct video_device deinterlace_videodev = { +static const struct video_device deinterlace_videodev = { .name = MEM2MEM_NAME, .fops = &deinterlace_fops, .ioctl_ops = &deinterlace_ioctl_ops, @@ -988,7 +988,7 @@ static struct video_device deinterlace_videodev = { .vfl_dir = VFL_DIR_M2M, }; -static struct v4l2_m2m_ops m2m_ops = { +static const struct v4l2_m2m_ops m2m_ops = { .device_run = deinterlace_device_run, .job_ready = deinterlace_job_ready, .job_abort = deinterlace_job_abort, diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c index 77890bd0deab..57d2c483ad09 100644 --- a/drivers/media/platform/marvell-ccic/cafe-driver.c +++ b/drivers/media/platform/marvell-ccic/cafe-driver.c @@ -326,7 +326,7 @@ static u32 cafe_smbus_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_WRITE_BYTE_DATA; } -static struct i2c_algorithm cafe_smbus_algo = { +static const struct i2c_algorithm cafe_smbus_algo = { .smbus_xfer = cafe_smbus_xfer, .functionality = cafe_smbus_func }; @@ -612,7 +612,7 @@ static int cafe_pci_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ -static struct pci_device_id cafe_ids[] = { +static const struct pci_device_id cafe_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) }, { 0, } diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 8cac2f202099..b07a251e8857 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1639,7 +1639,7 @@ static const struct v4l2_file_operations mcam_v4l_fops = { * This template device holds all of those v4l2 methods; we * clone it for specific real devices. */ -static struct video_device mcam_v4l_template = { +static const struct video_device mcam_v4l_template = { .name = "mcam", .fops = &mcam_v4l_fops, .ioctl_ops = &mcam_v4l_ioctl_ops, diff --git a/drivers/media/platform/meson/Makefile b/drivers/media/platform/meson/Makefile new file mode 100644 index 000000000000..597beb8f34d1 --- /dev/null +++ b/drivers/media/platform/meson/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VIDEO_MESON_AO_CEC) += ao-cec.o diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c new file mode 100644 index 000000000000..8040a6285c3f --- /dev/null +++ b/drivers/media/platform/meson/ao-cec.c @@ -0,0 +1,744 @@ +/* + * Driver for Amlogic Meson AO CEC Controller + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved + * Copyright (C) 2017 BayLibre, SAS + * Author: Neil Armstrong + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* CEC Registers */ + +/* + * [2:1] cntl_clk + * - 0 = Disable clk (Power-off mode) + * - 1 = Enable gated clock (Normal mode) + * - 2 = Enable free-run clk (Debug mode) + */ +#define CEC_GEN_CNTL_REG 0x00 + +#define CEC_GEN_CNTL_RESET BIT(0) +#define CEC_GEN_CNTL_CLK_DISABLE 0 +#define CEC_GEN_CNTL_CLK_ENABLE 1 +#define CEC_GEN_CNTL_CLK_ENABLE_DBG 2 +#define CEC_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1) + +/* + * [7:0] cec_reg_addr + * [15:8] cec_reg_wrdata + * [16] cec_reg_wr + * - 0 = Read + * - 1 = Write + * [23] bus free + * [31:24] cec_reg_rddata + */ +#define CEC_RW_REG 0x04 + +#define CEC_RW_ADDR GENMASK(7, 0) +#define CEC_RW_WR_DATA GENMASK(15, 8) +#define CEC_RW_WRITE_EN BIT(16) +#define CEC_RW_BUS_BUSY BIT(23) +#define CEC_RW_RD_DATA GENMASK(31, 24) + +/* + * [1] tx intr + * [2] rx intr + */ +#define CEC_INTR_MASKN_REG 0x08 +#define CEC_INTR_CLR_REG 0x0c +#define CEC_INTR_STAT_REG 0x10 + +#define CEC_INTR_TX BIT(1) +#define CEC_INTR_RX BIT(2) + +/* CEC Commands */ + +#define CEC_TX_MSG_0_HEADER 0x00 +#define CEC_TX_MSG_1_OPCODE 0x01 +#define CEC_TX_MSG_2_OP1 0x02 +#define CEC_TX_MSG_3_OP2 0x03 +#define CEC_TX_MSG_4_OP3 0x04 +#define CEC_TX_MSG_5_OP4 0x05 +#define CEC_TX_MSG_6_OP5 0x06 +#define CEC_TX_MSG_7_OP6 0x07 +#define CEC_TX_MSG_8_OP7 0x08 +#define CEC_TX_MSG_9_OP8 0x09 +#define CEC_TX_MSG_A_OP9 0x0A +#define CEC_TX_MSG_B_OP10 0x0B +#define CEC_TX_MSG_C_OP11 0x0C +#define CEC_TX_MSG_D_OP12 0x0D +#define CEC_TX_MSG_E_OP13 0x0E +#define CEC_TX_MSG_F_OP14 0x0F +#define CEC_TX_MSG_LENGTH 0x10 +#define CEC_TX_MSG_CMD 0x11 +#define CEC_TX_WRITE_BUF 0x12 +#define CEC_TX_CLEAR_BUF 0x13 +#define CEC_RX_MSG_CMD 0x14 +#define CEC_RX_CLEAR_BUF 0x15 +#define CEC_LOGICAL_ADDR0 0x16 +#define CEC_LOGICAL_ADDR1 0x17 +#define CEC_LOGICAL_ADDR2 0x18 +#define CEC_LOGICAL_ADDR3 0x19 +#define CEC_LOGICAL_ADDR4 0x1A +#define CEC_CLOCK_DIV_H 0x1B +#define CEC_CLOCK_DIV_L 0x1C +#define CEC_QUIESCENT_25MS_BIT7_0 0x20 +#define CEC_QUIESCENT_25MS_BIT11_8 0x21 +#define CEC_STARTBITMINL2H_3MS5_BIT7_0 0x22 +#define CEC_STARTBITMINL2H_3MS5_BIT8 0x23 +#define CEC_STARTBITMAXL2H_3MS9_BIT7_0 0x24 +#define CEC_STARTBITMAXL2H_3MS9_BIT8 0x25 +#define CEC_STARTBITMINH_0MS6_BIT7_0 0x26 +#define CEC_STARTBITMINH_0MS6_BIT8 0x27 +#define CEC_STARTBITMAXH_1MS0_BIT7_0 0x28 +#define CEC_STARTBITMAXH_1MS0_BIT8 0x29 +#define CEC_STARTBITMINTOT_4MS3_BIT7_0 0x2A +#define CEC_STARTBITMINTOT_4MS3_BIT9_8 0x2B +#define CEC_STARTBITMAXTOT_4MS7_BIT7_0 0x2C +#define CEC_STARTBITMAXTOT_4MS7_BIT9_8 0x2D +#define CEC_LOGIC1MINL2H_0MS4_BIT7_0 0x2E +#define CEC_LOGIC1MINL2H_0MS4_BIT8 0x2F +#define CEC_LOGIC1MAXL2H_0MS8_BIT7_0 0x30 +#define CEC_LOGIC1MAXL2H_0MS8_BIT8 0x31 +#define CEC_LOGIC0MINL2H_1MS3_BIT7_0 0x32 +#define CEC_LOGIC0MINL2H_1MS3_BIT8 0x33 +#define CEC_LOGIC0MAXL2H_1MS7_BIT7_0 0x34 +#define CEC_LOGIC0MAXL2H_1MS7_BIT8 0x35 +#define CEC_LOGICMINTOTAL_2MS05_BIT7_0 0x36 +#define CEC_LOGICMINTOTAL_2MS05_BIT9_8 0x37 +#define CEC_LOGICMAXHIGH_2MS8_BIT7_0 0x38 +#define CEC_LOGICMAXHIGH_2MS8_BIT8 0x39 +#define CEC_LOGICERRLOW_3MS4_BIT7_0 0x3A +#define CEC_LOGICERRLOW_3MS4_BIT8 0x3B +#define CEC_NOMSMPPOINT_1MS05 0x3C +#define CEC_DELCNTR_LOGICERR 0x3E +#define CEC_TXTIME_17MS_BIT7_0 0x40 +#define CEC_TXTIME_17MS_BIT10_8 0x41 +#define CEC_TXTIME_2BIT_BIT7_0 0x42 +#define CEC_TXTIME_2BIT_BIT10_8 0x43 +#define CEC_TXTIME_4BIT_BIT7_0 0x44 +#define CEC_TXTIME_4BIT_BIT10_8 0x45 +#define CEC_STARTBITNOML2H_3MS7_BIT7_0 0x46 +#define CEC_STARTBITNOML2H_3MS7_BIT8 0x47 +#define CEC_STARTBITNOMH_0MS8_BIT7_0 0x48 +#define CEC_STARTBITNOMH_0MS8_BIT8 0x49 +#define CEC_LOGIC1NOML2H_0MS6_BIT7_0 0x4A +#define CEC_LOGIC1NOML2H_0MS6_BIT8 0x4B +#define CEC_LOGIC0NOML2H_1MS5_BIT7_0 0x4C +#define CEC_LOGIC0NOML2H_1MS5_BIT8 0x4D +#define CEC_LOGIC1NOMH_1MS8_BIT7_0 0x4E +#define CEC_LOGIC1NOMH_1MS8_BIT8 0x4F +#define CEC_LOGIC0NOMH_0MS9_BIT7_0 0x50 +#define CEC_LOGIC0NOMH_0MS9_BIT8 0x51 +#define CEC_LOGICERRLOW_3MS6_BIT7_0 0x52 +#define CEC_LOGICERRLOW_3MS6_BIT8 0x53 +#define CEC_CHKCONTENTION_0MS1 0x54 +#define CEC_PREPARENXTBIT_0MS05_BIT7_0 0x56 +#define CEC_PREPARENXTBIT_0MS05_BIT8 0x57 +#define CEC_NOMSMPACKPOINT_0MS45 0x58 +#define CEC_ACK0NOML2H_1MS5_BIT7_0 0x5A +#define CEC_ACK0NOML2H_1MS5_BIT8 0x5B +#define CEC_BUGFIX_DISABLE_0 0x60 +#define CEC_BUGFIX_DISABLE_1 0x61 +#define CEC_RX_MSG_0_HEADER 0x80 +#define CEC_RX_MSG_1_OPCODE 0x81 +#define CEC_RX_MSG_2_OP1 0x82 +#define CEC_RX_MSG_3_OP2 0x83 +#define CEC_RX_MSG_4_OP3 0x84 +#define CEC_RX_MSG_5_OP4 0x85 +#define CEC_RX_MSG_6_OP5 0x86 +#define CEC_RX_MSG_7_OP6 0x87 +#define CEC_RX_MSG_8_OP7 0x88 +#define CEC_RX_MSG_9_OP8 0x89 +#define CEC_RX_MSG_A_OP9 0x8A +#define CEC_RX_MSG_B_OP10 0x8B +#define CEC_RX_MSG_C_OP11 0x8C +#define CEC_RX_MSG_D_OP12 0x8D +#define CEC_RX_MSG_E_OP13 0x8E +#define CEC_RX_MSG_F_OP14 0x8F +#define CEC_RX_MSG_LENGTH 0x90 +#define CEC_RX_MSG_STATUS 0x91 +#define CEC_RX_NUM_MSG 0x92 +#define CEC_TX_MSG_STATUS 0x93 +#define CEC_TX_NUM_MSG 0x94 + + +/* CEC_TX_MSG_CMD definition */ +#define TX_NO_OP 0 /* No transaction */ +#define TX_REQ_CURRENT 1 /* Transmit earliest message in buffer */ +#define TX_ABORT 2 /* Abort transmitting earliest message */ +#define TX_REQ_NEXT 3 /* Overwrite earliest msg, transmit next */ + +/* tx_msg_status definition */ +#define TX_IDLE 0 /* No transaction */ +#define TX_BUSY 1 /* Transmitter is busy */ +#define TX_DONE 2 /* Message successfully transmitted */ +#define TX_ERROR 3 /* Message transmitted with error */ + +/* rx_msg_cmd */ +#define RX_NO_OP 0 /* No transaction */ +#define RX_ACK_CURRENT 1 /* Read earliest message in buffer */ +#define RX_DISABLE 2 /* Disable receiving latest message */ +#define RX_ACK_NEXT 3 /* Clear earliest msg, read next */ + +/* rx_msg_status */ +#define RX_IDLE 0 /* No transaction */ +#define RX_BUSY 1 /* Receiver is busy */ +#define RX_DONE 2 /* Message has been received successfully */ +#define RX_ERROR 3 /* Message has been received with error */ + +/* RX_CLEAR_BUF options */ +#define CLEAR_START 1 +#define CLEAR_STOP 0 + +/* CEC_LOGICAL_ADDRx options */ +#define LOGICAL_ADDR_MASK 0xf +#define LOGICAL_ADDR_VALID BIT(4) +#define LOGICAL_ADDR_DISABLE 0 + +#define CEC_CLK_RATE 32768 + +struct meson_ao_cec_device { + struct platform_device *pdev; + void __iomem *base; + struct clk *core; + spinlock_t cec_reg_lock; + struct cec_notifier *notify; + struct cec_adapter *adap; + struct cec_msg rx_msg; +}; + +#define writel_bits_relaxed(mask, val, addr) \ + writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) + +static inline int meson_ao_cec_wait_busy(struct meson_ao_cec_device *ao_cec) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 5000); + + while (readl_relaxed(ao_cec->base + CEC_RW_REG) & CEC_RW_BUS_BUSY) { + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + return 0; +} + +static void meson_ao_cec_read(struct meson_ao_cec_device *ao_cec, + unsigned long address, u8 *data, + int *res) +{ + unsigned long flags; + u32 reg = FIELD_PREP(CEC_RW_ADDR, address); + int ret = 0; + + if (res && *res) + return; + + spin_lock_irqsave(&ao_cec->cec_reg_lock, flags); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto read_out; + + writel_relaxed(reg, ao_cec->base + CEC_RW_REG); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto read_out; + + *data = FIELD_GET(CEC_RW_RD_DATA, + readl_relaxed(ao_cec->base + CEC_RW_REG)); + +read_out: + spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags); + + if (res) + *res = ret; +} + +static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec, + unsigned long address, u8 data, + int *res) +{ + unsigned long flags; + u32 reg = FIELD_PREP(CEC_RW_ADDR, address) | + FIELD_PREP(CEC_RW_WR_DATA, data) | + CEC_RW_WRITE_EN; + int ret = 0; + + if (res && *res) + return; + + spin_lock_irqsave(&ao_cec->cec_reg_lock, flags); + + ret = meson_ao_cec_wait_busy(ao_cec); + if (ret) + goto write_out; + + writel_relaxed(reg, ao_cec->base + CEC_RW_REG); + +write_out: + spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags); + + if (res) + *res = ret; +} + +static inline void meson_ao_cec_irq_setup(struct meson_ao_cec_device *ao_cec, + bool enable) +{ + u32 cfg = CEC_INTR_TX | CEC_INTR_RX; + + writel_bits_relaxed(cfg, enable ? cfg : 0, + ao_cec->base + CEC_INTR_MASKN_REG); +} + +static inline int meson_ao_cec_clear(struct meson_ao_cec_device *ao_cec) +{ + int ret = 0; + + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_DISABLE, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 1, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 1, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 0, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 0, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret); + + return ret; +} + +static int meson_ao_cec_arbit_bit_time_set(struct meson_ao_cec_device *ao_cec, + unsigned int bit_set, + unsigned int time_set) +{ + int ret = 0; + + switch (bit_set) { + case CEC_SIGNAL_FREE_TIME_RETRY: + meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + + case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR: + meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + + case CEC_SIGNAL_FREE_TIME_NEXT_XFER: + meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT7_0, + time_set & 0xff, &ret); + meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT10_8, + (time_set >> 8) & 0x7, &ret); + break; + } + + return ret; +} + +static irqreturn_t meson_ao_cec_irq(int irq, void *data) +{ + struct meson_ao_cec_device *ao_cec = data; + u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG); + + if (stat) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +static void meson_ao_cec_irq_tx(struct meson_ao_cec_device *ao_cec) +{ + unsigned long tx_status = 0; + u8 stat; + int ret = 0; + + meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &stat, &ret); + if (ret) + goto tx_reg_err; + + switch (stat) { + case TX_DONE: + tx_status = CEC_TX_STATUS_OK; + break; + + case TX_BUSY: + tx_status = CEC_TX_STATUS_ARB_LOST; + break; + + case TX_IDLE: + tx_status = CEC_TX_STATUS_LOW_DRIVE; + break; + + case TX_ERROR: + default: + tx_status = CEC_TX_STATUS_NACK; + break; + } + + /* Clear Interruption */ + writel_relaxed(CEC_INTR_TX, ao_cec->base + CEC_INTR_CLR_REG); + + /* Stop TX */ + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret); + if (ret) + goto tx_reg_err; + + cec_transmit_attempt_done(ao_cec->adap, tx_status); + return; + +tx_reg_err: + cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR); +} + +static void meson_ao_cec_irq_rx(struct meson_ao_cec_device *ao_cec) +{ + int i, ret = 0; + u8 reg; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_STATUS, ®, &ret); + if (reg != RX_DONE) + goto rx_out; + + meson_ao_cec_read(ao_cec, CEC_RX_NUM_MSG, ®, &ret); + if (reg != 1) + goto rx_out; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_LENGTH, ®, &ret); + + ao_cec->rx_msg.len = reg + 1; + if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE) + ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE; + + for (i = 0; i < ao_cec->rx_msg.len; i++) { + u8 byte; + + meson_ao_cec_read(ao_cec, CEC_RX_MSG_0_HEADER + i, &byte, &ret); + + ao_cec->rx_msg.msg[i] = byte; + } + + if (ret) + goto rx_out; + + cec_received_msg(ao_cec->adap, &ao_cec->rx_msg); + +rx_out: + /* Clear Interruption */ + writel_relaxed(CEC_INTR_RX, ao_cec->base + CEC_INTR_CLR_REG); + + /* Ack RX message */ + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_ACK_CURRENT, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret); + + /* Clear RX buffer */ + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_START, &ret); + meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_STOP, &ret); +} + +static irqreturn_t meson_ao_cec_irq_thread(int irq, void *data) +{ + struct meson_ao_cec_device *ao_cec = data; + u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG); + + if (stat & CEC_INTR_TX) + meson_ao_cec_irq_tx(ao_cec); + + meson_ao_cec_irq_rx(ao_cec); + + return IRQ_HANDLED; +} + +static int meson_ao_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int ret = 0; + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + LOGICAL_ADDR_DISABLE, &ret); + if (ret) + return ret; + + ret = meson_ao_cec_clear(ao_cec); + if (ret) + return ret; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + return 0; + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + logical_addr & LOGICAL_ADDR_MASK, &ret); + if (ret) + return ret; + + udelay(100); + + meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0, + (logical_addr & LOGICAL_ADDR_MASK) | + LOGICAL_ADDR_VALID, &ret); + + return ret; +} + +static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int i, ret = 0; + u8 reg; + + meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, ®, &ret); + if (ret) + return ret; + + if (reg == TX_BUSY) { + dev_err(&ao_cec->pdev->dev, "%s: busy TX: aborting\n", + __func__); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret); + } + + for (i = 0; i < msg->len; i++) { + meson_ao_cec_write(ao_cec, CEC_TX_MSG_0_HEADER + i, + msg->msg[i], &ret); + } + + meson_ao_cec_write(ao_cec, CEC_TX_MSG_LENGTH, msg->len - 1, &ret); + meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_REQ_CURRENT, &ret); + + return ret; +} + +static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct meson_ao_cec_device *ao_cec = adap->priv; + int ret; + + meson_ao_cec_irq_setup(ao_cec, false); + + writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET, + ao_cec->base + CEC_GEN_CNTL_REG); + + if (!enable) + return 0; + + /* Enable gated clock (Normal mode). */ + writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK, + FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK, + CEC_GEN_CNTL_CLK_ENABLE), + ao_cec->base + CEC_GEN_CNTL_REG); + + udelay(100); + + /* Release Reset */ + writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0, + ao_cec->base + CEC_GEN_CNTL_REG); + + /* Clear buffers */ + ret = meson_ao_cec_clear(ao_cec); + if (ret) + return ret; + + /* CEC arbitration 3/5/7 bit time set. */ + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_RETRY, + 0x118); + if (ret) + return ret; + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR, + 0x000); + if (ret) + return ret; + ret = meson_ao_cec_arbit_bit_time_set(ao_cec, + CEC_SIGNAL_FREE_TIME_NEXT_XFER, + 0x2aa); + if (ret) + return ret; + + meson_ao_cec_irq_setup(ao_cec, true); + + return 0; +} + +static const struct cec_adap_ops meson_ao_cec_ops = { + .adap_enable = meson_ao_cec_adap_enable, + .adap_log_addr = meson_ao_cec_set_log_addr, + .adap_transmit = meson_ao_cec_transmit, +}; + +static int meson_ao_cec_probe(struct platform_device *pdev) +{ + struct meson_ao_cec_device *ao_cec; + struct platform_device *hdmi_dev; + struct device_node *np; + struct resource *res; + int ret, irq; + + np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0); + if (!np) { + dev_err(&pdev->dev, "Failed to find hdmi node\n"); + return -ENODEV; + } + + hdmi_dev = of_find_device_by_node(np); + if (hdmi_dev == NULL) + return -EPROBE_DEFER; + + ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL); + if (!ao_cec) + return -ENOMEM; + + spin_lock_init(&ao_cec->cec_reg_lock); + + ao_cec->notify = cec_notifier_get(&hdmi_dev->dev); + if (!ao_cec->notify) + return -ENOMEM; + + ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec, + "meson_ao_cec", + CEC_CAP_LOG_ADDRS | + CEC_CAP_TRANSMIT | + CEC_CAP_RC | + CEC_CAP_PASSTHROUGH, + 1); /* Use 1 for now */ + if (IS_ERR(ao_cec->adap)) { + ret = PTR_ERR(ao_cec->adap); + goto out_probe_notify; + } + + ao_cec->adap->owner = THIS_MODULE; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ao_cec->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ao_cec->base)) { + ret = PTR_ERR(ao_cec->base); + goto out_probe_adapter; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(&pdev->dev, irq, + meson_ao_cec_irq, + meson_ao_cec_irq_thread, + 0, NULL, ao_cec); + if (ret) { + dev_err(&pdev->dev, "irq request failed\n"); + goto out_probe_adapter; + } + + ao_cec->core = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(ao_cec->core)) { + dev_err(&pdev->dev, "core clock request failed\n"); + ret = PTR_ERR(ao_cec->core); + goto out_probe_adapter; + } + + ret = clk_prepare_enable(ao_cec->core); + if (ret) { + dev_err(&pdev->dev, "core clock enable failed\n"); + goto out_probe_adapter; + } + + ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE); + if (ret) { + dev_err(&pdev->dev, "core clock set rate failed\n"); + goto out_probe_clk; + } + + device_reset_optional(&pdev->dev); + + ao_cec->pdev = pdev; + platform_set_drvdata(pdev, ao_cec); + + ret = cec_register_adapter(ao_cec->adap, &pdev->dev); + if (ret < 0) { + cec_notifier_put(ao_cec->notify); + goto out_probe_clk; + } + + /* Setup Hardware */ + writel_relaxed(CEC_GEN_CNTL_RESET, + ao_cec->base + CEC_GEN_CNTL_REG); + + cec_register_cec_notifier(ao_cec->adap, ao_cec->notify); + + return 0; + +out_probe_clk: + clk_disable_unprepare(ao_cec->core); + +out_probe_adapter: + cec_delete_adapter(ao_cec->adap); + +out_probe_notify: + cec_notifier_put(ao_cec->notify); + + dev_err(&pdev->dev, "CEC controller registration failed\n"); + + return ret; +} + +static int meson_ao_cec_remove(struct platform_device *pdev) +{ + struct meson_ao_cec_device *ao_cec = platform_get_drvdata(pdev); + + clk_disable_unprepare(ao_cec->core); + + cec_unregister_adapter(ao_cec->adap); + + cec_notifier_put(ao_cec->notify); + + return 0; +} + +static const struct of_device_id meson_ao_cec_of_match[] = { + { .compatible = "amlogic,meson-gx-ao-cec", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_ao_cec_of_match); + +static struct platform_driver meson_ao_cec_driver = { + .probe = meson_ao_cec_probe, + .remove = meson_ao_cec_remove, + .driver = { + .name = "meson-ao-cec", + .of_match_table = of_match_ptr(meson_ao_cec_of_match), + }, +}; + +module_platform_driver(meson_ao_cec_driver); + +MODULE_DESCRIPTION("Meson AO CEC Controller driver"); +MODULE_AUTHOR("Neil Armstrong "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index 451a54039e65..226f90886484 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -756,7 +756,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) pm_runtime_put_sync(ctx->jpeg->dev); } -static struct vb2_ops mtk_jpeg_qops = { +static const struct vb2_ops mtk_jpeg_qops = { .queue_setup = mtk_jpeg_queue_setup, .buf_prepare = mtk_jpeg_buf_prepare, .buf_queue = mtk_jpeg_buf_queue, @@ -865,7 +865,7 @@ static void mtk_jpeg_job_abort(void *priv) { } -static struct v4l2_m2m_ops mtk_jpeg_m2m_ops = { +static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = { .device_run = mtk_jpeg_device_run, .job_ready = mtk_jpeg_job_ready, .job_abort = mtk_jpeg_job_abort, diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c index aa8f9fd1f1a2..03aba03a24c8 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c @@ -75,7 +75,7 @@ void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp) } for (i = 0; i < ARRAY_SIZE(comp->clk); i++) { - if (!comp->clk[i]) + if (IS_ERR(comp->clk[i])) continue; err = clk_prepare_enable(comp->clk[i]); if (err) @@ -90,7 +90,7 @@ void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp) int i; for (i = 0; i < ARRAY_SIZE(comp->clk); i++) { - if (!comp->clk[i]) + if (IS_ERR(comp->clk[i])) continue; clk_disable_unprepare(comp->clk[i]); } @@ -134,15 +134,13 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node, larb_node = of_parse_phandle(node, "mediatek,larb", 0); if (!larb_node) { dev_err(dev, - "Missing mediadek,larb phandle in %s node\n", - node->full_name); + "Missing mediadek,larb phandle in %pOF node\n", node); return -EINVAL; } larb_pdev = of_find_device_by_node(larb_node); if (!larb_pdev) { - dev_warn(dev, "Waiting for larb device %s\n", - larb_node->full_name); + dev_warn(dev, "Waiting for larb device %pOF\n", larb_node); of_node_put(larb_node); return -EPROBE_DEFER; } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c index 81347558b24a..bbb24fb95b95 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c @@ -137,16 +137,16 @@ static int mtk_mdp_probe(struct platform_device *pdev) continue; if (!of_device_is_available(node)) { - dev_err(dev, "Skipping disabled component %s\n", - node->full_name); + dev_err(dev, "Skipping disabled component %pOF\n", + node); continue; } comp_type = (enum mtk_mdp_comp_type)of_id->data; comp_id = mtk_mdp_comp_get_id(dev, node, comp_type); if (comp_id < 0) { - dev_warn(dev, "Skipping unknown component %s\n", - node->full_name); + dev_warn(dev, "Skipping unknown component %pOF\n", + node); continue; } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c index 13afe48b9dc5..583d47724ee8 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c @@ -621,7 +621,7 @@ static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb)); } -static struct vb2_ops mtk_mdp_m2m_qops = { +static const struct vb2_ops mtk_mdp_m2m_qops = { .queue_setup = mtk_mdp_m2m_queue_setup, .buf_prepare = mtk_mdp_m2m_buf_prepare, .buf_queue = mtk_mdp_m2m_buf_queue, @@ -1225,7 +1225,7 @@ static const struct v4l2_file_operations mtk_mdp_m2m_fops = { .mmap = v4l2_m2m_fop_mmap, }; -static struct v4l2_m2m_ops mtk_mdp_m2m_ops = { +static const struct v4l2_m2m_ops mtk_mdp_m2m_ops = { .device_run = mtk_mdp_m2m_device_run, .job_abort = mtk_mdp_m2m_job_abort, }; diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c index 1daee1207469..bc8349bc2e80 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c @@ -31,6 +31,7 @@ #define MAX_NUM_REF_FRAMES 8 #define VP9_MAX_FRM_BUF_NUM 9 #define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2) +#define VP9_SEG_ID_SZ 0x12000 /** * struct vp9_dram_buf - contains buffer info for vpu @@ -132,6 +133,7 @@ struct vp9_sf_ref_fb { * @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W) * @mv_buf : motion vector working buffer (AP-W, VPU-R) * @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W) + * @seg_id_buf : segmentation map working buffer (AP-W, VPU-R) */ struct vdec_vp9_vsi { unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ]; @@ -167,11 +169,14 @@ struct vdec_vp9_vsi { struct vp9_dram_buf mv_buf; struct vp9_ref_buf frm_refs[REFS_PER_FRAME]; + struct vp9_dram_buf seg_id_buf; + }; /* * struct vdec_vp9_inst - vp9 decode instance * @mv_buf : working buffer for mv + * @seg_id_buf : working buffer for segmentation map * @dec_fb : vdec_fb node to link fb to different fb_xxx_list * @available_fb_node_list : current available vdec_fb node * @fb_use_list : current used or referenced vdec_fb @@ -187,6 +192,7 @@ struct vdec_vp9_vsi { */ struct vdec_vp9_inst { struct mtk_vcodec_mem mv_buf; + struct mtk_vcodec_mem seg_id_buf; struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM]; struct list_head available_fb_node_list; @@ -388,13 +394,11 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst) vsi->buf_h); mem = &inst->mv_buf; - if (mem->va) mtk_vcodec_mem_free(inst->ctx, mem); mem->size = ((vsi->buf_w / 64) * (vsi->buf_h / 64) + 2) * 36 * 16; - result = mtk_vcodec_mem_alloc(inst->ctx, mem); if (result) { mem->size = 0; @@ -406,6 +410,24 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst) vsi->mv_buf.pa = (unsigned long)mem->dma_addr; vsi->mv_buf.sz = (unsigned int)mem->size; + + mem = &inst->seg_id_buf; + if (mem->va) + mtk_vcodec_mem_free(inst->ctx, mem); + + mem->size = VP9_SEG_ID_SZ; + result = mtk_vcodec_mem_alloc(inst->ctx, mem); + if (result) { + mem->size = 0; + mtk_vcodec_err(inst, "Cannot allocate seg_id_buf"); + return false; + } + /* Set the va again */ + vsi->seg_id_buf.va = (unsigned long)mem->va; + vsi->seg_id_buf.pa = (unsigned long)mem->dma_addr; + vsi->seg_id_buf.sz = (unsigned int)mem->size; + + vp9_free_all_sf_ref_fb(inst); vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst); @@ -653,6 +675,12 @@ static void vp9_reset(struct vdec_vp9_inst *inst) inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va; inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr; inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size; + + /* Set the va again, since vpu_dec_reset will clear seg_id_buf in vpu */ + inst->vsi->seg_id_buf.va = (unsigned long)inst->seg_id_buf.va; + inst->vsi->seg_id_buf.pa = (unsigned long)inst->seg_id_buf.dma_addr; + inst->vsi->seg_id_buf.sz = (unsigned long)inst->seg_id_buf.size; + } static void init_all_fb_lists(struct vdec_vp9_inst *inst) @@ -752,6 +780,10 @@ static void vdec_vp9_deinit(unsigned long h_vdec) if (mem->va) mtk_vcodec_mem_free(inst->ctx, mem); + mem = &inst->seg_id_buf; + if (mem->va) + mtk_vcodec_mem_free(inst->ctx, mem); + vp9_free_all_sf_ref_fb(inst); vp9_free_inst(inst); } @@ -848,6 +880,7 @@ static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs, vsi->sf_frm_sz[idx]); } } + memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size); ret = vpu_dec_start(&inst->vpu, data, 3); if (ret) { mtk_vcodec_err(inst, "vpu_dec_start failed"); diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 03e47e0f778d..4a2b1afa19c4 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -873,7 +873,7 @@ static const struct v4l2_file_operations emmaprp_fops = { .mmap = emmaprp_mmap, }; -static struct video_device emmaprp_videodev = { +static const struct video_device emmaprp_videodev = { .name = MEM2MEM_NAME, .fops = &emmaprp_fops, .ioctl_ops = &emmaprp_ioctl_ops, @@ -882,7 +882,7 @@ static struct video_device emmaprp_videodev = { .vfl_dir = VFL_DIR_M2M, }; -static struct v4l2_m2m_ops m2m_ops = { +static const struct v4l2_m2m_ops m2m_ops = { .device_run = emmaprp_device_run, .job_abort = emmaprp_job_abort, .lock = emmaprp_lock, @@ -942,6 +942,8 @@ static int emmaprp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcdev); irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, dev_name(&pdev->dev), pcdev); if (ret) diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 45a553d4f5b2..123c2b26a933 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -233,7 +234,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout, struct videobuf_buffer *vb) { struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags flags; + enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; struct dma_chan *chan = vout->vrfb_dma_tx.chan; struct dma_device *dmadev = chan->device; struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 9df64c189883..1a428fe9f070 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1859,6 +1859,7 @@ static void isp_cleanup_modules(struct isp_device *isp) omap3isp_ccdc_cleanup(isp); omap3isp_ccp2_cleanup(isp); omap3isp_csi2_cleanup(isp); + omap3isp_csiphy_cleanup(isp); } static int isp_initialize_modules(struct isp_device *isp) @@ -1868,7 +1869,7 @@ static int isp_initialize_modules(struct isp_device *isp) ret = omap3isp_csiphy_init(isp); if (ret < 0) { dev_err(isp->dev, "CSI PHY initialization failed\n"); - goto error_csiphy; + return ret; } ret = omap3isp_csi2_init(isp); @@ -1879,7 +1880,8 @@ static int isp_initialize_modules(struct isp_device *isp) ret = omap3isp_ccp2_init(isp); if (ret < 0) { - dev_err(isp->dev, "CCP2 initialization failed\n"); + if (ret != -EPROBE_DEFER) + dev_err(isp->dev, "CCP2 initialization failed\n"); goto error_ccp2; } @@ -1936,7 +1938,8 @@ static int isp_initialize_modules(struct isp_device *isp) error_ccp2: omap3isp_csi2_cleanup(isp); error_csi2: -error_csiphy: + omap3isp_csiphy_cleanup(isp); + return ret; } @@ -2015,13 +2018,14 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode, struct v4l2_fwnode_endpoint vep; unsigned int i; int ret; + bool csi1 = false; ret = v4l2_fwnode_endpoint_parse(fwnode, &vep); if (ret) return ret; - dev_dbg(dev, "parsing endpoint %s, interface %u\n", - to_of_node(fwnode)->full_name, vep.base.port); + dev_dbg(dev, "parsing endpoint %pOF, interface %u\n", + to_of_node(fwnode), vep.base.port); switch (vep.base.port) { case ISP_OF_PHY_PARALLEL: @@ -2039,48 +2043,102 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode, !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW); buscfg->bus.parallel.data_pol = !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW); + buscfg->bus.parallel.bt656 = vep.bus_type == V4L2_MBUS_BT656; break; case ISP_OF_PHY_CSIPHY1: case ISP_OF_PHY_CSIPHY2: - /* FIXME: always assume CSI-2 for now. */ + switch (vep.bus_type) { + case V4L2_MBUS_CCP2: + case V4L2_MBUS_CSI1: + dev_dbg(dev, "CSI-1/CCP-2 configuration\n"); + csi1 = true; + break; + case V4L2_MBUS_CSI2: + dev_dbg(dev, "CSI-2 configuration\n"); + csi1 = false; + break; + default: + dev_err(dev, "unsupported bus type %u\n", + vep.bus_type); + return -EINVAL; + } + switch (vep.base.port) { case ISP_OF_PHY_CSIPHY1: - buscfg->interface = ISP_INTERFACE_CSI2C_PHY1; + if (csi1) + buscfg->interface = ISP_INTERFACE_CCP2B_PHY1; + else + buscfg->interface = ISP_INTERFACE_CSI2C_PHY1; break; case ISP_OF_PHY_CSIPHY2: - buscfg->interface = ISP_INTERFACE_CSI2A_PHY2; + if (csi1) + buscfg->interface = ISP_INTERFACE_CCP2B_PHY2; + else + buscfg->interface = ISP_INTERFACE_CSI2A_PHY2; break; } - buscfg->bus.csi2.lanecfg.clk.pos = vep.bus.mipi_csi2.clock_lane; - buscfg->bus.csi2.lanecfg.clk.pol = - vep.bus.mipi_csi2.lane_polarities[0]; - dev_dbg(dev, "clock lane polarity %u, pos %u\n", - buscfg->bus.csi2.lanecfg.clk.pol, - buscfg->bus.csi2.lanecfg.clk.pos); + if (csi1) { + buscfg->bus.ccp2.lanecfg.clk.pos = + vep.bus.mipi_csi1.clock_lane; + buscfg->bus.ccp2.lanecfg.clk.pol = + vep.bus.mipi_csi1.lane_polarity[0]; + dev_dbg(dev, "clock lane polarity %u, pos %u\n", + buscfg->bus.ccp2.lanecfg.clk.pol, + buscfg->bus.ccp2.lanecfg.clk.pos); - for (i = 0; i < ISP_CSIPHY2_NUM_DATA_LANES; i++) { - buscfg->bus.csi2.lanecfg.data[i].pos = - vep.bus.mipi_csi2.data_lanes[i]; - buscfg->bus.csi2.lanecfg.data[i].pol = - vep.bus.mipi_csi2.lane_polarities[i + 1]; - dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i, - buscfg->bus.csi2.lanecfg.data[i].pol, - buscfg->bus.csi2.lanecfg.data[i].pos); + buscfg->bus.ccp2.lanecfg.data[0].pos = + vep.bus.mipi_csi1.data_lane; + buscfg->bus.ccp2.lanecfg.data[0].pol = + vep.bus.mipi_csi1.lane_polarity[1]; + + dev_dbg(dev, "data lane polarity %u, pos %u\n", + buscfg->bus.ccp2.lanecfg.data[0].pol, + buscfg->bus.ccp2.lanecfg.data[0].pos); + + buscfg->bus.ccp2.strobe_clk_pol = + vep.bus.mipi_csi1.clock_inv; + buscfg->bus.ccp2.phy_layer = vep.bus.mipi_csi1.strobe; + buscfg->bus.ccp2.ccp2_mode = + vep.bus_type == V4L2_MBUS_CCP2; + buscfg->bus.ccp2.vp_clk_pol = 1; + + buscfg->bus.ccp2.crc = 1; + } else { + buscfg->bus.csi2.lanecfg.clk.pos = + vep.bus.mipi_csi2.clock_lane; + buscfg->bus.csi2.lanecfg.clk.pol = + vep.bus.mipi_csi2.lane_polarities[0]; + dev_dbg(dev, "clock lane polarity %u, pos %u\n", + buscfg->bus.csi2.lanecfg.clk.pol, + buscfg->bus.csi2.lanecfg.clk.pos); + + buscfg->bus.csi2.num_data_lanes = + vep.bus.mipi_csi2.num_data_lanes; + + for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) { + buscfg->bus.csi2.lanecfg.data[i].pos = + vep.bus.mipi_csi2.data_lanes[i]; + buscfg->bus.csi2.lanecfg.data[i].pol = + vep.bus.mipi_csi2.lane_polarities[i + 1]; + dev_dbg(dev, + "data lane %u polarity %u, pos %u\n", i, + buscfg->bus.csi2.lanecfg.data[i].pol, + buscfg->bus.csi2.lanecfg.data[i].pos); + } + /* + * FIXME: now we assume the CRC is always there. + * Implement a way to obtain this information from the + * sensor. Frame descriptors, perhaps? + */ + buscfg->bus.csi2.crc = 1; } - - /* - * FIXME: now we assume the CRC is always there. - * Implement a way to obtain this information from the - * sensor. Frame descriptors, perhaps? - */ - buscfg->bus.csi2.crc = 1; break; default: - dev_warn(dev, "%s: invalid interface %u\n", - to_of_node(fwnode)->full_name, vep.base.port); - break; + dev_warn(dev, "%pOF: invalid interface %u\n", + to_of_node(fwnode), vep.base.port); + return -EINVAL; } return 0; @@ -2105,10 +2163,12 @@ static int isp_fwnodes_parse(struct device *dev, if (!isd) goto error; - notifier->subdevs[notifier->num_subdevs] = &isd->asd; + if (isp_fwnode_parse(dev, fwnode, isd)) { + devm_kfree(dev, isd); + continue; + } - if (isp_fwnode_parse(dev, fwnode, isd)) - goto error; + notifier->subdevs[notifier->num_subdevs] = &isd->asd; isd->asd.match.fwnode.fwnode = fwnode_graph_get_remote_port_parent(fwnode); @@ -2128,26 +2188,12 @@ static int isp_fwnodes_parse(struct device *dev, return -EINVAL; } -static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async, - struct v4l2_subdev *subdev, - struct v4l2_async_subdev *asd) -{ - struct isp_async_subdev *isd = - container_of(asd, struct isp_async_subdev, asd); - - isd->sd = subdev; - isd->sd->host_priv = &isd->bus; - - return 0; -} - static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async) { struct isp_device *isp = container_of(async, struct isp_device, notifier); struct v4l2_device *v4l2_dev = &isp->v4l2_dev; struct v4l2_subdev *sd; - struct isp_bus_cfg *bus; int ret; ret = media_entity_enum_init(&isp->crashed, &isp->media_dev); @@ -2155,13 +2201,13 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async) return ret; list_for_each_entry(sd, &v4l2_dev->subdevs, list) { - /* Only try to link entities whose interface was set on bound */ - if (sd->host_priv) { - bus = (struct isp_bus_cfg *)sd->host_priv; - ret = isp_link_entity(isp, &sd->entity, bus->interface); - if (ret < 0) - return ret; - } + if (!sd->asd) + continue; + + ret = isp_link_entity(isp, &sd->entity, + v4l2_subdev_to_bus_cfg(sd)->interface); + if (ret < 0) + return ret; } ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); @@ -2339,7 +2385,6 @@ static int isp_probe(struct platform_device *pdev) if (ret < 0) goto error_register_entities; - isp->notifier.bound = isp_subdev_notifier_bound; isp->notifier.complete = isp_subdev_notifier_complete; ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier); diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h index 2f2ae609c548..e528df6efc09 100644 --- a/drivers/media/platform/omap3isp/isp.h +++ b/drivers/media/platform/omap3isp/isp.h @@ -226,11 +226,13 @@ struct isp_device { }; struct isp_async_subdev { - struct v4l2_subdev *sd; struct isp_bus_cfg bus; struct v4l2_async_subdev asd; }; +#define v4l2_subdev_to_bus_cfg(sd) \ + (&container_of((sd)->asd, struct isp_async_subdev, asd)->bus) + #define v4l2_dev_to_isp_device(dev) \ container_of(dev, struct isp_device, v4l2_dev) diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 7207558d722c..b66276ab5765 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -1139,15 +1139,11 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc) pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); if (ccdc->input == CCDC_INPUT_PARALLEL) { - struct v4l2_mbus_config cfg; - int ret; + struct v4l2_subdev *sd = + to_isp_pipeline(&ccdc->subdev.entity)->external; - ret = v4l2_subdev_call(sensor, video, g_mbus_config, &cfg); - if (!ret) - ccdc->bt656 = cfg.type == V4L2_MBUS_BT656; - - parcfg = &((struct isp_bus_cfg *)sensor->host_priv) - ->bus.parallel; + parcfg = &v4l2_subdev_to_bus_cfg(sd)->bus.parallel; + ccdc->bt656 = parcfg->bt656; } /* CCDC_PAD_SINK */ @@ -2418,11 +2414,11 @@ static int ccdc_link_validate(struct v4l2_subdev *sd, /* We've got a parallel sensor here. */ if (ccdc->input == CCDC_INPUT_PARALLEL) { - struct isp_parallel_cfg *parcfg = - &((struct isp_bus_cfg *) - media_entity_to_v4l2_subdev(link->source->entity) - ->host_priv)->bus.parallel; - parallel_shift = parcfg->data_lane_shift; + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); + struct isp_bus_cfg *bus_cfg = v4l2_subdev_to_bus_cfg(sd); + + parallel_shift = bus_cfg->bus.parallel.data_lane_shift; } else { parallel_shift = 0; } diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c index ca095238510d..e062939d0d05 100644 --- a/drivers/media/platform/omap3isp/ispccp2.c +++ b/drivers/media/platform/omap3isp/ispccp2.c @@ -213,14 +213,17 @@ static int ccp2_phyif_config(struct isp_ccp2_device *ccp2, struct isp_device *isp = to_isp_device(ccp2); u32 val; - /* CCP2B mode */ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) | - ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE; + ISPCCP2_CTRL_MODE; /* Data/strobe physical layer */ BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK, buscfg->phy_layer); + BIT_SET(val, ISPCCP2_CTRL_IO_OUT_SEL_SHIFT, + ISPCCP2_CTRL_IO_OUT_SEL_MASK, buscfg->ccp2_mode); BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK, buscfg->strobe_clk_pol); + BIT_SET(val, ISPCCP2_CTRL_VP_CLK_POL_SHIFT, + ISPCCP2_CTRL_VP_CLK_POL_MASK, buscfg->vp_clk_pol); isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); @@ -347,6 +350,7 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2, */ static int ccp2_if_configure(struct isp_ccp2_device *ccp2) { + struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); const struct isp_bus_cfg *buscfg; struct v4l2_mbus_framefmt *format; struct media_pad *pad; @@ -358,7 +362,7 @@ static int ccp2_if_configure(struct isp_ccp2_device *ccp2) pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); - buscfg = sensor->host_priv; + buscfg = v4l2_subdev_to_bus_cfg(pipe->external); ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2); if (ret < 0) @@ -838,7 +842,7 @@ static int ccp2_s_stream(struct v4l2_subdev *sd, int enable) switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccp2->phy) { - ret = omap3isp_csiphy_acquire(ccp2->phy); + ret = omap3isp_csiphy_acquire(ccp2->phy, &sd->entity); if (ret < 0) return ret; } @@ -1137,10 +1141,16 @@ int omap3isp_ccp2_init(struct isp_device *isp) if (isp->revision == ISP_REVISION_2_0) { ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib"); if (IS_ERR(ccp2->vdds_csib)) { + if (PTR_ERR(ccp2->vdds_csib) == -EPROBE_DEFER) { + dev_dbg(isp->dev, + "Can't get regulator vdds_csib, deferring probing\n"); + return -EPROBE_DEFER; + } dev_dbg(isp->dev, "Could not get regulator vdds_csib\n"); ccp2->vdds_csib = NULL; } + ccp2->phy = &isp->isp_csiphy2; } else if (isp->revision == ISP_REVISION_15_0) { ccp2->phy = &isp->isp_csiphy1; } diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index 7dae2fe0d42d..a4d3d030e81e 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -490,7 +490,7 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2) if (!csi2->available) return -ENODEV; - if (csi2->phy->phy_in_use) + if (csi2->phy->entity) return -EBUSY; isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, @@ -566,7 +566,7 @@ static int csi2_configure(struct isp_csi2_device *csi2) pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); - buscfg = sensor->host_priv; + buscfg = v4l2_subdev_to_bus_cfg(pipe->external); csi2->frame_skip = 0; v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip); @@ -1053,7 +1053,7 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable) switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: - if (omap3isp_csiphy_acquire(csi2->phy) < 0) + if (omap3isp_csiphy_acquire(csi2->phy, &sd->entity) < 0) return -ENODEV; if (csi2->output & CSI2_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE); diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c index 871d4fe09c7f..a28fb79abaac 100644 --- a/drivers/media/platform/omap3isp/ispcsiphy.c +++ b/drivers/media/platform/omap3isp/ispcsiphy.c @@ -164,30 +164,28 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power) static int omap3isp_csiphy_config(struct isp_csiphy *phy) { - struct isp_csi2_device *csi2 = phy->csi2; - struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity); - struct isp_bus_cfg *buscfg = pipe->external->host_priv; + struct isp_pipeline *pipe = to_isp_pipeline(phy->entity); + struct isp_bus_cfg *buscfg = v4l2_subdev_to_bus_cfg(pipe->external); struct isp_csiphy_lanes_cfg *lanes; int csi2_ddrclk_khz; - unsigned int used_lanes = 0; + unsigned int num_data_lanes, used_lanes = 0; unsigned int i; u32 reg; - if (!buscfg) { - struct isp_async_subdev *isd = - container_of(pipe->external->asd, - struct isp_async_subdev, asd); - buscfg = &isd->bus; + if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1 + || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) { + lanes = &buscfg->bus.ccp2.lanecfg; + num_data_lanes = 1; + } else { + lanes = &buscfg->bus.csi2.lanecfg; + num_data_lanes = buscfg->bus.csi2.num_data_lanes; } - if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1 - || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) - lanes = &buscfg->bus.ccp2.lanecfg; - else - lanes = &buscfg->bus.csi2.lanecfg; + if (num_data_lanes > phy->num_data_lanes) + return -EINVAL; /* Clock and data lanes verification */ - for (i = 0; i < phy->num_data_lanes; i++) { + for (i = 0; i < num_data_lanes; i++) { if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3) return -EINVAL; @@ -216,7 +214,7 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy) csi2_ddrclk_khz = pipe->external_rate / 1000 / (2 * hweight32(used_lanes)) * pipe->external_width; - reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0); + reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0); reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK | ISPCSIPHY_REG0_THS_SETTLE_MASK); @@ -227,9 +225,9 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy) reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3) << ISPCSIPHY_REG0_THS_SETTLE_SHIFT; - isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0); + isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0); - reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1); + reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1); reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK | ISPCSIPHY_REG1_TCLK_MISS_MASK | @@ -238,12 +236,12 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy) reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT; reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT; - isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1); + isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1); /* DPHY lane configuration */ - reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG); + reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG); - for (i = 0; i < phy->num_data_lanes; i++) { + for (i = 0; i < num_data_lanes; i++) { reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) | ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1)); reg |= (lanes->data[i].pol << @@ -257,12 +255,12 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy) reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT; reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT; - isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG); + isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG); return 0; } -int omap3isp_csiphy_acquire(struct isp_csiphy *phy) +int omap3isp_csiphy_acquire(struct isp_csiphy *phy, struct media_entity *entity) { int rval; @@ -282,20 +280,25 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy) if (rval < 0) goto done; + phy->entity = entity; + rval = omap3isp_csiphy_config(phy); if (rval < 0) goto done; - rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON); - if (rval) { - regulator_disable(phy->vdd); - goto done; + if (phy->isp->revision == ISP_REVISION_15_0) { + rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON); + if (rval) { + regulator_disable(phy->vdd); + goto done; + } + + csiphy_power_autoswitch_enable(phy, true); } - - csiphy_power_autoswitch_enable(phy, true); - phy->phy_in_use = 1; - done: + if (rval < 0) + phy->entity = NULL; + mutex_unlock(&phy->mutex); return rval; } @@ -303,18 +306,19 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy) void omap3isp_csiphy_release(struct isp_csiphy *phy) { mutex_lock(&phy->mutex); - if (phy->phy_in_use) { - struct isp_csi2_device *csi2 = phy->csi2; - struct isp_pipeline *pipe = - to_isp_pipeline(&csi2->subdev.entity); - struct isp_bus_cfg *buscfg = pipe->external->host_priv; + if (phy->entity) { + struct isp_pipeline *pipe = to_isp_pipeline(phy->entity); + struct isp_bus_cfg *buscfg = + v4l2_subdev_to_bus_cfg(pipe->external); csiphy_routing_cfg(phy, buscfg->interface, false, buscfg->bus.ccp2.phy_layer); - csiphy_power_autoswitch_enable(phy, false); - csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF); + if (phy->isp->revision == ISP_REVISION_15_0) { + csiphy_power_autoswitch_enable(phy, false); + csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF); + } regulator_disable(phy->vdd); - phy->phy_in_use = 0; + phy->entity = NULL; } mutex_unlock(&phy->mutex); } @@ -334,14 +338,21 @@ int omap3isp_csiphy_init(struct isp_device *isp) phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2; mutex_init(&phy2->mutex); + phy1->isp = isp; + mutex_init(&phy1->mutex); + if (isp->revision == ISP_REVISION_15_0) { - phy1->isp = isp; phy1->csi2 = &isp->isp_csi2c; phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES; phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1; phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1; - mutex_init(&phy1->mutex); } return 0; } + +void omap3isp_csiphy_cleanup(struct isp_device *isp) +{ + mutex_destroy(&isp->isp_csiphy1.mutex); + mutex_destroy(&isp->isp_csiphy2.mutex); +} diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h index 28b63b28f9f7..91543a09b28a 100644 --- a/drivers/media/platform/omap3isp/ispcsiphy.h +++ b/drivers/media/platform/omap3isp/ispcsiphy.h @@ -25,9 +25,10 @@ struct regulator; struct isp_csiphy { struct isp_device *isp; struct mutex mutex; /* serialize csiphy configuration */ - u8 phy_in_use; struct isp_csi2_device *csi2; struct regulator *vdd; + /* the entity that acquired the phy */ + struct media_entity *entity; /* mem resources - enums as defined in enum isp_mem_resources */ unsigned int cfg_regs; @@ -36,8 +37,10 @@ struct isp_csiphy { u8 num_data_lanes; /* number of CSI2 Data Lanes supported */ }; -int omap3isp_csiphy_acquire(struct isp_csiphy *phy); +int omap3isp_csiphy_acquire(struct isp_csiphy *phy, + struct media_entity *entity); void omap3isp_csiphy_release(struct isp_csiphy *phy); int omap3isp_csiphy_init(struct isp_device *isp); +void omap3isp_csiphy_cleanup(struct isp_device *isp); #endif /* OMAP3_ISP_CSI_PHY_H */ diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h index b5ea8da0b904..d08483919a77 100644 --- a/drivers/media/platform/omap3isp/ispreg.h +++ b/drivers/media/platform/omap3isp/ispreg.h @@ -87,6 +87,8 @@ #define ISPCCP2_CTRL_PHY_SEL_MASK 0x1 #define ISPCCP2_CTRL_PHY_SEL_SHIFT 1 #define ISPCCP2_CTRL_IO_OUT_SEL (1 << 2) +#define ISPCCP2_CTRL_IO_OUT_SEL_MASK 0x1 +#define ISPCCP2_CTRL_IO_OUT_SEL_SHIFT 2 #define ISPCCP2_CTRL_MODE (1 << 4) #define ISPCCP2_CTRL_VP_CLK_FORCE_ON (1 << 9) #define ISPCCP2_CTRL_INV (1 << 10) @@ -94,6 +96,8 @@ #define ISPCCP2_CTRL_INV_SHIFT 10 #define ISPCCP2_CTRL_VP_ONLY_EN (1 << 11) #define ISPCCP2_CTRL_VP_CLK_POL (1 << 12) +#define ISPCCP2_CTRL_VP_CLK_POL_MASK 0x1 +#define ISPCCP2_CTRL_VP_CLK_POL_SHIFT 12 #define ISPCCP2_CTRL_VPCLK_DIV_SHIFT 15 #define ISPCCP2_CTRL_VPCLK_DIV_MASK 0x1ffff /* [31:15] */ #define ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT 8 /* 3430 bits */ diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h index 443e8f7673e2..9fb4d5bce004 100644 --- a/drivers/media/platform/omap3isp/omap3isp.h +++ b/drivers/media/platform/omap3isp/omap3isp.h @@ -46,6 +46,7 @@ enum isp_interface_type { * 0 - Positive, 1 - Negative * @data_pol: Data polarity * 0 - Normal, 1 - One's complement + * @bt656: Data contain BT.656 embedded synchronization */ struct isp_parallel_cfg { unsigned int data_lane_shift:3; @@ -54,6 +55,7 @@ struct isp_parallel_cfg { unsigned int vs_pol:1; unsigned int fld_pol:1; unsigned int data_pol:1; + unsigned int bt656:1; }; enum { @@ -108,16 +110,20 @@ struct isp_ccp2_cfg { unsigned int ccp2_mode:1; unsigned int phy_layer:1; unsigned int vpclk_div:2; + unsigned int vp_clk_pol:1; struct isp_csiphy_lanes_cfg lanecfg; }; /** * struct isp_csi2_cfg - CSI2 interface configuration * @crc: Enable the cyclic redundancy check + * @lanecfg: CSI-2 lane configuration + * @num_data_lanes: The number of data lanes in use */ struct isp_csi2_cfg { unsigned crc:1; struct isp_csiphy_lanes_cfg lanecfg; + u8 num_data_lanes; }; struct isp_bus_cfg { diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 399095170b6e..edca993c2b1f 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -638,6 +638,9 @@ static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cf mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; + default: + WARN_ON(1); + return -EINVAL; } return 0; } @@ -1557,7 +1560,7 @@ static void pxac_vb2_stop_streaming(struct vb2_queue *vq) pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_ERROR); } -static struct vb2_ops pxac_vb2_ops = { +static const struct vb2_ops pxac_vb2_ops = { .queue_setup = pxac_vb2_queue_setup, .buf_init = pxac_vb2_init, .buf_prepare = pxac_vb2_prepare, @@ -2097,7 +2100,7 @@ static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct v4l2_clk_ops pxa_camera_mclk_ops = { +static const struct v4l2_clk_ops pxa_camera_mclk_ops = { }; static const struct video_device pxa_camera_videodev_template = { @@ -2328,7 +2331,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev, asd->match.fwnode.fwnode = of_fwnode_handle(remote); of_node_put(remote); } else { - dev_notice(dev, "no remote for %s\n", of_node_full_name(np)); + dev_notice(dev, "no remote for %pOF\n", np); } out: diff --git a/drivers/media/platform/qcom/camss-8x16/Makefile b/drivers/media/platform/qcom/camss-8x16/Makefile new file mode 100644 index 000000000000..3c4024fbb768 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/Makefile @@ -0,0 +1,11 @@ +# Makefile for Qualcomm CAMSS driver + +qcom-camss-objs += \ + camss.o \ + camss-csid.o \ + camss-csiphy.o \ + camss-ispif.o \ + camss-vfe.o \ + camss-video.o \ + +obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c new file mode 100644 index 000000000000..64df82817de3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c @@ -0,0 +1,1092 @@ +/* + * camss-csid.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camss-csid.h" +#include "camss.h" + +#define MSM_CSID_NAME "msm_csid" + +#define CAMSS_CSID_HW_VERSION 0x0 +#define CAMSS_CSID_CORE_CTRL_0 0x004 +#define CAMSS_CSID_CORE_CTRL_1 0x008 +#define CAMSS_CSID_RST_CMD 0x00c +#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n)) +#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060 +#define CAMSS_CSID_IRQ_MASK 0x064 +#define CAMSS_CSID_IRQ_STATUS 0x068 +#define CAMSS_CSID_TG_CTRL 0x0a0 +#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436 +#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437 +#define CAMSS_CSID_TG_VC_CFG 0x0a4 +#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff +#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f +#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n)) + +#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12 +#define DATA_TYPE_YUV422_8BIT 0x1e +#define DATA_TYPE_RAW_6BIT 0x28 +#define DATA_TYPE_RAW_8BIT 0x2a +#define DATA_TYPE_RAW_10BIT 0x2b +#define DATA_TYPE_RAW_12BIT 0x2c + +#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0 +#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1 +#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2 +#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3 + +#define CSID_RESET_TIMEOUT_MS 500 + +struct csid_fmts { + u32 code; + u8 data_type; + u8 decode_format; + u8 bpp; + u8 spp; /* bus samples per pixel */ +}; + +static const struct csid_fmts csid_input_fmts[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + } +}; + +static const struct csid_fmts *csid_get_fmt_entry(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (code == csid_input_fmts[i].code) + return &csid_input_fmts[i]; + + WARN(1, "Unknown format\n"); + + return &csid_input_fmts[0]; +} + +/* + * csid_isr - CSID module interrupt handler + * @irq: Interrupt line + * @dev: CSID device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csid_isr(int irq, void *dev) +{ + struct csid_device *csid = dev; + u32 value; + + value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS); + writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD); + + if ((value >> 11) & 0x1) + complete(&csid->reset_complete); + + return IRQ_HANDLED; +} + +/* + * csid_set_clock_rates - Calculate and set clock rates on CSID module + * @csiphy: CSID device + */ +static int csid_set_clock_rates(struct csid_device *csid) +{ + struct device *dev = to_device_index(csid, csid->id); + u32 pixel_clock; + int i, j; + int ret; + + ret = camss_get_pixel_clock(&csid->subdev.entity, &pixel_clock); + if (ret) + pixel_clock = 0; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + if (!strcmp(clock->name, "csi0") || + !strcmp(clock->name, "csi1")) { + u8 bpp = csid_get_fmt_entry( + csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp; + u8 num_lanes = csid->phy.lane_cnt; + u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4); + long rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSID\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSID clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * csid_reset - Trigger reset on CSID module and wait to complete + * @csid: CSID device + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_reset(struct csid_device *csid) +{ + unsigned long time; + + reinit_completion(&csid->reset_complete); + + writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD); + + time = wait_for_completion_timeout(&csid->reset_complete, + msecs_to_jiffies(CSID_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device_index(csid, csid->id), + "CSID reset timeout\n"); + return -EIO; + } + + return 0; +} + +/* + * csid_set_power - Power on/off CSID module + * @sd: CSID V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_power(struct v4l2_subdev *sd, int on) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct device *dev = to_device_index(csid, csid->id); + int ret; + + if (on) { + u32 hw_version; + + ret = regulator_enable(csid->vdda); + if (ret < 0) + return ret; + + ret = csid_set_clock_rates(csid); + if (ret < 0) { + regulator_disable(csid->vdda); + return ret; + } + + ret = camss_enable_clocks(csid->nclocks, csid->clock, dev); + if (ret < 0) { + regulator_disable(csid->vdda); + return ret; + } + + enable_irq(csid->irq); + + ret = csid_reset(csid); + if (ret < 0) { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + regulator_disable(csid->vdda); + return ret; + } + + hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION); + dev_dbg(dev, "CSID HW Version = 0x%08x\n", hw_version); + } else { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + ret = regulator_disable(csid->vdda); + } + + return ret; +} + +/* + * csid_set_stream - Enable/disable streaming on CSID module + * @sd: CSID V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of CSID module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct csid_testgen_config *tg = &csid->testgen; + u32 val; + + if (enable) { + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + u8 dt, dt_shift, df; + int ret; + + ret = v4l2_ctrl_handler_setup(&csid->ctrls); + if (ret < 0) { + dev_err(to_device_index(csid, csid->id), + "could not sync v4l2 controls: %d\n", ret); + return ret; + } + + if (!tg->enabled && + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) + return -ENOLINK; + + dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)-> + data_type; + + if (tg->enabled) { + /* Config Test Generator */ + struct v4l2_mbus_framefmt *f = + &csid->fmt[MSM_CSID_PAD_SRC]; + u8 bpp = csid_get_fmt_entry(f->code)->bpp; + u8 spp = csid_get_fmt_entry(f->code)->spp; + u32 num_bytes_per_line = f->width * bpp * spp / 8; + u32 num_lines = f->height; + + /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */ + /* 1:0 VC */ + val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) | + ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG); + + /* 28:16 bytes per lines, 12:0 num of lines */ + val = ((num_bytes_per_line & 0x1fff) << 16) | + (num_lines & 0x1fff); + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_0(0)); + + /* 5:0 data type */ + val = dt; + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_1(0)); + + /* 2:0 output test pattern */ + val = tg->payload_mode; + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_2(0)); + } else { + struct csid_phy_config *phy = &csid->phy; + + val = phy->lane_cnt - 1; + val |= phy->lane_assign << 4; + + writel_relaxed(val, + csid->base + CAMSS_CSID_CORE_CTRL_0); + + val = phy->csiphy_id << 17; + val |= 0x9; + + writel_relaxed(val, + csid->base + CAMSS_CSID_CORE_CTRL_1); + } + + /* Config LUT */ + + dt_shift = (cid % 4) * 8; + df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)-> + decode_format; + + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + val &= ~(0xff << dt_shift); + val |= dt << dt_shift; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + + val = (df << 4) | 0x3; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid)); + + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_ENABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } else { + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_DISABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } + + return 0; +} + +/* + * __csid_get_format - Get pointer to format structure + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csid_get_format(struct csid_device *csid, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad); + + return &csid->fmt[pad]; +} + +/* + * csid_try_format - Handle try format by pad subdev method + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csid_try_format(struct csid_device *csid, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSID_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (fmt->code == csid_input_fmts[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csid_input_fmts)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSID_PAD_SRC: + if (csid->testgen_mode->cur.val == 0) { + /* Test generator is disabled, keep pad formats */ + /* in sync - set and return a format same as sink pad */ + struct v4l2_mbus_framefmt format; + + format = *__csid_get_format(csid, cfg, + MSM_CSID_PAD_SINK, which); + *fmt = format; + } else { + /* Test generator is enabled, set format on source*/ + /* pad to allow test generator usage */ + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (csid_input_fmts[i].code == fmt->code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csid_input_fmts)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + } + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * csid_enum_mbus_code - Handle pixel format enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_CSID_PAD_SINK) { + if (code->index >= ARRAY_SIZE(csid_input_fmts)) + return -EINVAL; + + code->code = csid_input_fmts[code->index].code; + } else { + if (csid->testgen_mode->cur.val == 0) { + if (code->index > 0) + return -EINVAL; + + format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK, + code->which); + + code->code = format->code; + } else { + if (code->index >= ARRAY_SIZE(csid_input_fmts)) + return -EINVAL; + + code->code = csid_input_fmts[code->index].code; + } + } + + return 0; +} + +/* + * csid_enum_frame_size - Handle frame size enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csid_try_format(csid, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csid_try_format(csid, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csid_get_format - Handle get format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csid_get_format(csid, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csid_set_format - Handle set format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csid_get_format(csid, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_CSID_PAD_SINK) { + format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC, + fmt->which); + + *format = fmt->format; + csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * csid_init_formats - Initialize formats on all pads + * @sd: CSID V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSID_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csid_set_format(sd, fh ? fh->pad : NULL, &format); +} + +static const char * const csid_test_pattern_menu[] = { + "Disabled", + "Incrementing", + "Alternating 0x55/0xAA", + "All Zeros 0x00", + "All Ones 0xFF", + "Pseudo-random Data", +}; + +/* + * csid_set_test_pattern - Set test generator's pattern mode + * @csid: CSID device + * @value: desired test pattern mode + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_test_pattern(struct csid_device *csid, s32 value) +{ + struct csid_testgen_config *tg = &csid->testgen; + + /* If CSID is linked to CSIPHY, do not allow to enable test generator */ + if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) + return -EBUSY; + + tg->enabled = !!value; + + switch (value) { + case 1: + tg->payload_mode = CSID_PAYLOAD_MODE_INCREMENTING; + break; + case 2: + tg->payload_mode = CSID_PAYLOAD_MODE_ALTERNATING_55_AA; + break; + case 3: + tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ZEROES; + break; + case 4: + tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ONES; + break; + case 5: + tg->payload_mode = CSID_PAYLOAD_MODE_RANDOM; + break; + } + + return 0; +} + +/* + * csid_s_ctrl - Handle set control subdev method + * @ctrl: pointer to v4l2 control structure + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct csid_device *csid = container_of(ctrl->handler, + struct csid_device, ctrls); + int ret = -EINVAL; + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN: + ret = csid_set_test_pattern(csid, ctrl->val); + break; + } + + return ret; +} + +static const struct v4l2_ctrl_ops csid_ctrl_ops = { + .s_ctrl = csid_s_ctrl, +}; + +/* + * msm_csid_subdev_init - Initialize CSID device structure and resources + * @csid: CSID device + * @res: CSID module resources table + * @id: CSID module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_subdev_init(struct csid_device *csid, + const struct resources *res, u8 id) +{ + struct device *dev = to_device_index(csid, id); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i, j; + int ret; + + csid->id = id; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + csid->base = devm_ioremap_resource(dev, r); + if (IS_ERR(csid->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csid->base); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + csid->irq = r->start; + snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSID_NAME, csid->id); + ret = devm_request_irq(dev, csid->irq, csid_isr, + IRQF_TRIGGER_RISING, csid->irq_name, csid); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + disable_irq(csid->irq); + + /* Clocks */ + + csid->nclocks = 0; + while (res->clock[csid->nclocks]) + csid->nclocks++; + + csid->clock = devm_kzalloc(dev, csid->nclocks * sizeof(*csid->clock), + GFP_KERNEL); + if (!csid->clock) + return -ENOMEM; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + /* Regulator */ + + csid->vdda = devm_regulator_get(dev, res->regulator[0]); + if (IS_ERR(csid->vdda)) { + dev_err(dev, "could not get regulator\n"); + return PTR_ERR(csid->vdda); + } + + init_completion(&csid->reset_complete); + + return 0; +} + +/* + * msm_csid_get_csid_id - Get CSID HW module id + * @entity: Pointer to CSID media entity structure + * @id: Return CSID HW module id here + */ +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct csid_device *csid = v4l2_get_subdevdata(sd); + + *id = csid->id; +} + +/* + * csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane assign + */ +static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg) +{ + u32 lane_assign = 0; + int i; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_assign |= lane_cfg->data[i].pos << (i * 4); + + return lane_assign; +} + +/* + * csid_link_setup - Setup CSID connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int csid_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_entity_remote_pad(local)) + return -EBUSY; + + if ((local->flags & MEDIA_PAD_FL_SINK) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csid_device *csid; + struct csiphy_device *csiphy; + struct csiphy_lanes_cfg *lane_cfg; + struct v4l2_subdev_format format = { 0 }; + + sd = media_entity_to_v4l2_subdev(entity); + csid = v4l2_get_subdevdata(sd); + + /* If test generator is enabled */ + /* do not allow a link from CSIPHY to CSID */ + if (csid->testgen_mode->cur.val != 0) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(remote->entity); + csiphy = v4l2_get_subdevdata(sd); + + /* If a sensor is not linked to CSIPHY */ + /* do no allow a link from CSIPHY to CSID */ + if (!csiphy->cfg.csi2) + return -EPERM; + + csid->phy.csiphy_id = csiphy->id; + + lane_cfg = &csiphy->cfg.csi2->lane_cfg; + csid->phy.lane_cnt = lane_cfg->num_data; + csid->phy.lane_assign = csid_get_lane_assign(lane_cfg); + + /* Reset format on source pad to sink pad format */ + format.pad = MSM_CSID_PAD_SRC; + format.which = V4L2_SUBDEV_FORMAT_ACTIVE; + csid_set_format(&csid->subdev, NULL, &format); + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csid_core_ops = { + .s_power = csid_set_power, +}; + +static const struct v4l2_subdev_video_ops csid_video_ops = { + .s_stream = csid_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csid_pad_ops = { + .enum_mbus_code = csid_enum_mbus_code, + .enum_frame_size = csid_enum_frame_size, + .get_fmt = csid_get_format, + .set_fmt = csid_set_format, +}; + +static const struct v4l2_subdev_ops csid_v4l2_ops = { + .core = &csid_core_ops, + .video = &csid_video_ops, + .pad = &csid_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = { + .open = csid_init_formats, +}; + +static const struct media_entity_operations csid_media_ops = { + .link_setup = csid_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csid_register_entity - Register subdev node for CSID module + * @csid: CSID device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csid->subdev; + struct media_pad *pads = csid->pads; + struct device *dev = to_device_index(csid, csid->id); + int ret; + + v4l2_subdev_init(sd, &csid_v4l2_ops); + sd->internal_ops = &csid_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSID_NAME, csid->id); + v4l2_set_subdevdata(sd, csid); + + ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); + if (ret < 0) { + dev_err(dev, "Failed to init ctrl handler: %d\n", ret); + return ret; + } + + csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, + &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(csid_test_pattern_menu) - 1, 0, 0, + csid_test_pattern_menu); + + if (csid->ctrls.error) { + dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); + ret = csid->ctrls.error; + goto free_ctrl; + } + + csid->subdev.ctrl_handler = &csid->ctrls; + + ret = csid_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto free_ctrl; + } + + pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &csid_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto free_ctrl; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto media_cleanup; + } + + return 0; + +media_cleanup: + media_entity_cleanup(&sd->entity); +free_ctrl: + v4l2_ctrl_handler_free(&csid->ctrls); + + return ret; +} + +/* + * msm_csid_unregister_entity - Unregister CSID module subdev node + * @csid: CSID device + */ +void msm_csid_unregister_entity(struct csid_device *csid) +{ + v4l2_device_unregister_subdev(&csid->subdev); + media_entity_cleanup(&csid->subdev.entity); + v4l2_ctrl_handler_free(&csid->ctrls); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.h b/drivers/media/platform/qcom/camss-8x16/camss-csid.h new file mode 100644 index 000000000000..8682d3081bc3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.h @@ -0,0 +1,82 @@ +/* + * camss-csid.h + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_CSID_H +#define QC_MSM_CAMSS_CSID_H + +#include +#include +#include +#include +#include +#include + +#define MSM_CSID_PAD_SINK 0 +#define MSM_CSID_PAD_SRC 1 +#define MSM_CSID_PADS_NUM 2 + +enum csid_payload_mode { + CSID_PAYLOAD_MODE_INCREMENTING = 0, + CSID_PAYLOAD_MODE_ALTERNATING_55_AA = 1, + CSID_PAYLOAD_MODE_ALL_ZEROES = 2, + CSID_PAYLOAD_MODE_ALL_ONES = 3, + CSID_PAYLOAD_MODE_RANDOM = 4, + CSID_PAYLOAD_MODE_USER_SPECIFIED = 5, +}; + +struct csid_testgen_config { + u8 enabled; + enum csid_payload_mode payload_mode; +}; + +struct csid_phy_config { + u8 csiphy_id; + u8 lane_cnt; + u32 lane_assign; +}; + +struct csid_device { + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSID_PADS_NUM]; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct regulator *vdda; + struct completion reset_complete; + struct csid_testgen_config testgen; + struct csid_phy_config phy; + struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM]; + struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *testgen_mode; +}; + +struct resources; + +int msm_csid_subdev_init(struct csid_device *csid, + const struct resources *res, u8 id); + +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev); + +void msm_csid_unregister_entity(struct csid_device *csid); + +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id); + +#endif /* QC_MSM_CAMSS_CSID_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c new file mode 100644 index 000000000000..072c6cf053f6 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c @@ -0,0 +1,890 @@ +/* + * camss-csiphy.c + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camss-csiphy.h" +#include "camss.h" + +#define MSM_CSIPHY_NAME "msm_csiphy" + +#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) +#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) +#define CAMSS_CSI_PHY_GLBL_RESET 0x140 +#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144 +#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164 +#define CAMSS_CSI_PHY_HW_VERSION 0x188 +#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n)) +#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec +#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4 + +static const struct { + u32 code; + u8 bpp; +} csiphy_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + 12, + } +}; + +/* + * csiphy_get_bpp - map media bus format to bits per pixel + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 csiphy_get_bpp(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++) + if (code == csiphy_formats[i].code) + return csiphy_formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return csiphy_formats[0].bpp; +} + +/* + * csiphy_isr - CSIPHY module interrupt handler + * @irq: Interrupt line + * @dev: CSIPHY device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csiphy_isr(int irq, void *dev) +{ + struct csiphy_device *csiphy = dev; + u8 i; + + for (i = 0; i < 8; i++) { + u8 val = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_STATUSn(i)); + writel_relaxed(val, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + } + + return IRQ_HANDLED; +} + +/* + * csiphy_set_clock_rates - Calculate and set clock rates on CSIPHY module + * @csiphy: CSIPHY device + */ +static int csiphy_set_clock_rates(struct csiphy_device *csiphy) +{ + struct device *dev = to_device_index(csiphy, csiphy->id); + u32 pixel_clock; + int i, j; + int ret; + + ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock); + if (ret) + pixel_clock = 0; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + if (!strcmp(clock->name, "csiphy0_timer") || + !strcmp(clock->name, "csiphy1_timer")) { + u8 bpp = csiphy_get_bpp( + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4); + long round_rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSIPHY\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSIPHY clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + round_rate = clk_round_rate(clock->clk, clock->freq[j]); + if (round_rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + round_rate); + return -EINVAL; + } + + csiphy->timer_clk_rate = round_rate; + + ret = clk_set_rate(clock->clk, csiphy->timer_clk_rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * csiphy_reset - Perform software reset on CSIPHY module + * @csiphy: CSIPHY device + */ +static void csiphy_reset(struct csiphy_device *csiphy) +{ + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + usleep_range(5000, 8000); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); +} + +/* + * csiphy_set_power - Power on/off CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_power(struct v4l2_subdev *sd, int on) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct device *dev = to_device_index(csiphy, csiphy->id); + + if (on) { + u8 hw_version; + int ret; + + ret = csiphy_set_clock_rates(csiphy); + if (ret < 0) + return ret; + + ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev); + if (ret < 0) + return ret; + + enable_irq(csiphy->irq); + + csiphy_reset(csiphy); + + hw_version = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_HW_VERSION); + dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version); + } else { + disable_irq(csiphy->irq); + + camss_disable_clocks(csiphy->nclocks, csiphy->clock); + } + + return 0; +} + +/* + * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane mask + */ +static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg) +{ + u8 lane_mask; + int i; + + lane_mask = 1 << lane_cfg->clk.pos; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_mask |= 1 << lane_cfg->data[i].pos; + + return lane_mask; +} + +/* + * csiphy_settle_cnt_calc - Calculate settle count value + * @csiphy: CSIPHY device + * + * Helper function to calculate settle count value. This is + * based on the CSI2 T_hs_settle parameter which in turn + * is calculated based on the CSI2 transmitter pixel clock + * frequency. + * + * Return settle count value or 0 if the CSI2 pixel clock + * frequency is not available + */ +static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy) +{ + u8 bpp = csiphy_get_bpp( + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + u32 pixel_clock; /* Hz */ + u32 mipi_clock; /* Hz */ + u32 ui; /* ps */ + u32 timer_period; /* ps */ + u32 t_hs_prepare_max; /* ps */ + u32 t_hs_prepare_zero_min; /* ps */ + u32 t_hs_settle; /* ps */ + u8 settle_cnt; + int ret; + + ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock); + if (ret) { + dev_err(to_device_index(csiphy, csiphy->id), + "Cannot get CSI2 transmitter's pixel clock\n"); + return 0; + } + if (!pixel_clock) { + dev_err(to_device_index(csiphy, csiphy->id), + "Got pixel clock == 0, cannot continue\n"); + return 0; + } + + mipi_clock = pixel_clock * bpp / (2 * num_lanes); + ui = div_u64(1000000000000LL, mipi_clock); + ui /= 2; + t_hs_prepare_max = 85000 + 6 * ui; + t_hs_prepare_zero_min = 145000 + 10 * ui; + t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2; + + timer_period = div_u64(1000000000000LL, csiphy->timer_clk_rate); + settle_cnt = t_hs_settle / timer_period; + + return settle_cnt; +} + +/* + * csiphy_stream_on - Enable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to enable streaming on CSIPHY module. + * Main configuration of CSIPHY module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_stream_on(struct csiphy_device *csiphy) +{ + struct csiphy_config *cfg = &csiphy->cfg; + u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg); + u8 settle_cnt; + u8 val; + int i = 0; + + settle_cnt = csiphy_settle_cnt_calc(csiphy); + if (!settle_cnt) + return -EINVAL; + + val = readl_relaxed(csiphy->base_clk_mux); + if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) { + val &= ~0xf0; + val |= cfg->csid_id << 4; + } else { + val &= ~0xf; + val |= cfg->csid_id; + } + writel_relaxed(val, csiphy->base_clk_mux); + + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_GLBL_T_INIT_CFG0); + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_T_WAKEUP_CFG0); + + val = 0x1; + val |= lane_mask << 1; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); + + val = cfg->combo_mode << 4; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + + while (lane_mask) { + if (lane_mask & 0x1) { + writel_relaxed(0x10, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(i)); + writel_relaxed(settle_cnt, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG3(i)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_MASKn(i)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + } + + lane_mask >>= 1; + i++; + } + + return 0; +} + +/* + * csiphy_stream_off - Disable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to disable streaming on CSIPHY module + */ +static void csiphy_stream_off(struct csiphy_device *csiphy) +{ + u8 lane_mask = csiphy_get_lane_mask(&csiphy->cfg.csi2->lane_cfg); + int i = 0; + + while (lane_mask) { + if (lane_mask & 0x1) + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(i)); + + lane_mask >>= 1; + i++; + } + + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); +} + + +/* + * csiphy_set_stream - Enable/disable streaming on CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @enable: Requested streaming state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + int ret = 0; + + if (enable) + ret = csiphy_stream_on(csiphy); + else + csiphy_stream_off(csiphy); + + return ret; +} + +/* + * __csiphy_get_format - Get pointer to format structure + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csiphy_get_format(struct csiphy_device *csiphy, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad); + + return &csiphy->fmt[pad]; +} + +/* + * csiphy_try_format - Handle try format by pad subdev method + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csiphy_try_format(struct csiphy_device *csiphy, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSIPHY_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++) + if (fmt->code == csiphy_formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csiphy_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSIPHY_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__csiphy_get_format(csiphy, cfg, MSM_CSID_PAD_SINK, + which); + + break; + } +} + +/* + * csiphy_enum_mbus_code - Handle pixel format enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_CSIPHY_PAD_SINK) { + if (code->index >= ARRAY_SIZE(csiphy_formats)) + return -EINVAL; + + code->code = csiphy_formats[code->index].code; + } else { + if (code->index > 0) + return -EINVAL; + + format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * csiphy_enum_frame_size - Handle frame size enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csiphy_get_format - Handle get format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csiphy_set_format - Handle set format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csiphy_try_format(csiphy, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_CSIPHY_PAD_SINK) { + format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, + fmt->which); + + *format = fmt->format; + csiphy_try_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * csiphy_init_formats - Initialize formats on all pads + * @sd: CSIPHY V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_init_formats(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSIPHY_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csiphy_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources + * @csiphy: CSIPHY device + * @res: CSIPHY module resources table + * @id: CSIPHY module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_subdev_init(struct csiphy_device *csiphy, + const struct resources *res, u8 id) +{ + struct device *dev = to_device_index(csiphy, id); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i, j; + int ret; + + csiphy->id = id; + csiphy->cfg.combo_mode = 0; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + csiphy->base = devm_ioremap_resource(dev, r); + if (IS_ERR(csiphy->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csiphy->base); + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]); + csiphy->base_clk_mux = devm_ioremap_resource(dev, r); + if (IS_ERR(csiphy->base_clk_mux)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csiphy->base_clk_mux); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + csiphy->irq = r->start; + snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSIPHY_NAME, csiphy->id); + ret = devm_request_irq(dev, csiphy->irq, csiphy_isr, + IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + disable_irq(csiphy->irq); + + /* Clocks */ + + csiphy->nclocks = 0; + while (res->clock[csiphy->nclocks]) + csiphy->nclocks++; + + csiphy->clock = devm_kzalloc(dev, csiphy->nclocks * + sizeof(*csiphy->clock), GFP_KERNEL); + if (!csiphy->clock) + return -ENOMEM; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + return 0; +} + +/* + * csiphy_link_setup - Setup CSIPHY connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Rreturn 0 on success + */ +static int csiphy_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if ((local->flags & MEDIA_PAD_FL_SOURCE) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csiphy_device *csiphy; + struct csid_device *csid; + + if (media_entity_remote_pad(local)) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(entity); + csiphy = v4l2_get_subdevdata(sd); + + sd = media_entity_to_v4l2_subdev(remote->entity); + csid = v4l2_get_subdevdata(sd); + + csiphy->cfg.csid_id = csid->id; + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csiphy_core_ops = { + .s_power = csiphy_set_power, +}; + +static const struct v4l2_subdev_video_ops csiphy_video_ops = { + .s_stream = csiphy_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csiphy_pad_ops = { + .enum_mbus_code = csiphy_enum_mbus_code, + .enum_frame_size = csiphy_enum_frame_size, + .get_fmt = csiphy_get_format, + .set_fmt = csiphy_set_format, +}; + +static const struct v4l2_subdev_ops csiphy_v4l2_ops = { + .core = &csiphy_core_ops, + .video = &csiphy_video_ops, + .pad = &csiphy_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = { + .open = csiphy_init_formats, +}; + +static const struct media_entity_operations csiphy_media_ops = { + .link_setup = csiphy_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csiphy_register_entity - Register subdev node for CSIPHY module + * @csiphy: CSIPHY device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csiphy->subdev; + struct media_pad *pads = csiphy->pads; + struct device *dev = to_device_index(csiphy, csiphy->id); + int ret; + + v4l2_subdev_init(sd, &csiphy_v4l2_ops); + sd->internal_ops = &csiphy_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSIPHY_NAME, csiphy->id); + v4l2_set_subdevdata(sd, csiphy); + + ret = csiphy_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + return ret; + } + + pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &csiphy_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + return ret; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_csiphy_unregister_entity - Unregister CSIPHY module subdev node + * @csiphy: CSIPHY device + */ +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy) +{ + v4l2_device_unregister_subdev(&csiphy->subdev); + media_entity_cleanup(&csiphy->subdev.entity); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h new file mode 100644 index 000000000000..ba8781122065 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h @@ -0,0 +1,77 @@ +/* + * camss-csiphy.h + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_CSIPHY_H +#define QC_MSM_CAMSS_CSIPHY_H + +#include +#include +#include +#include +#include + +#define MSM_CSIPHY_PAD_SINK 0 +#define MSM_CSIPHY_PAD_SRC 1 +#define MSM_CSIPHY_PADS_NUM 2 + +struct csiphy_lane { + u8 pos; + u8 pol; +}; + +struct csiphy_lanes_cfg { + int num_data; + struct csiphy_lane *data; + struct csiphy_lane clk; +}; + +struct csiphy_csi2_cfg { + struct csiphy_lanes_cfg lane_cfg; +}; + +struct csiphy_config { + u8 combo_mode; + u8 csid_id; + struct csiphy_csi2_cfg *csi2; +}; + +struct csiphy_device { + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSIPHY_PADS_NUM]; + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + u32 timer_clk_rate; + struct csiphy_config cfg; + struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM]; +}; + +struct resources; + +int msm_csiphy_subdev_init(struct csiphy_device *csiphy, + const struct resources *res, u8 id); + +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev); + +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy); + +#endif /* QC_MSM_CAMSS_CSIPHY_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c b/drivers/media/platform/qcom/camss-8x16/camss-ispif.c new file mode 100644 index 000000000000..24da529397b5 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-ispif.c @@ -0,0 +1,1175 @@ +/* + * camss-ispif.c + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camss-ispif.h" +#include "camss.h" + +#define MSM_ISPIF_NAME "msm_ispif" + +#define ispif_line_array(ptr_line) \ + ((const struct ispif_line (*)[]) &(ptr_line[-(ptr_line->id)])) + +#define to_ispif(ptr_line) \ + container_of(ispif_line_array(ptr_line), struct ispif_device, ptr_line) + +#define ISPIF_RST_CMD_0 0x008 +#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0) +#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1) +#define ISPIF_RST_CMD_0_SW_REG_RST (1 << 2) +#define ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST (1 << 3) +#define ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST (1 << 4) +#define ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST (1 << 5) +#define ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST (1 << 6) +#define ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST (1 << 7) +#define ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST (1 << 8) +#define ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST (1 << 9) +#define ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST (1 << 10) +#define ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST (1 << 11) +#define ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST (1 << 12) +#define ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST (1 << 16) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST (1 << 17) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST (1 << 18) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST (1 << 19) +#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c +#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m)) +#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6) +#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m)) +#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \ + (0x254 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \ + (0x264 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \ + (0x2c0 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \ + (0x2d0 + 0x200 * (m) + 0x4 * (n)) + +#define CSI_PIX_CLK_MUX_SEL 0x000 +#define CSI_RDI_CLK_MUX_SEL 0x008 + +#define ISPIF_TIMEOUT_SLEEP_US 1000 +#define ISPIF_TIMEOUT_ALL_US 1000000 +#define ISPIF_RESET_TIMEOUT_MS 500 + +enum ispif_intf_cmd { + CMD_DISABLE_FRAME_BOUNDARY = 0x0, + CMD_ENABLE_FRAME_BOUNDARY = 0x1, + CMD_DISABLE_IMMEDIATELY = 0x2, + CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa, + CMD_ALL_NO_CHANGE = 0xffffffff, +}; + +static const u32 ispif_formats[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, +}; + +/* + * ispif_isr - ISPIF module interrupt handler + * @irq: Interrupt line + * @dev: ISPIF device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t ispif_isr(int irq, void *dev) +{ + struct ispif_device *ispif = dev; + u32 value0, value1, value2; + + value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0)); + value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0)); + value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0)); + + writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0)); + writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0)); + writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0)); + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); + + if ((value0 >> 27) & 0x1) + complete(&ispif->reset_complete); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n"); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n"); + + if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n"); + + return IRQ_HANDLED; +} + +/* + * ispif_reset - Trigger reset on ISPIF module and wait to complete + * @ispif: ISPIF device + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_reset(struct ispif_device *ispif) +{ + unsigned long time; + u32 val; + int ret; + + ret = camss_enable_clocks(ispif->nclocks_for_reset, + ispif->clock_for_reset, + to_device(ispif)); + if (ret < 0) + return ret; + + reinit_completion(&ispif->reset_complete); + + val = ISPIF_RST_CMD_0_STROBED_RST_EN | + ISPIF_RST_CMD_0_MISC_LOGIC_RST | + ISPIF_RST_CMD_0_SW_REG_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST | + ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST; + + writel_relaxed(val, ispif->base + ISPIF_RST_CMD_0); + + time = wait_for_completion_timeout(&ispif->reset_complete, + msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(ispif), "ISPIF reset timeout\n"); + return -EIO; + } + + camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset); + + return 0; +} + +/* + * ispif_set_power - Power on/off ISPIF module + * @sd: ISPIF V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_power(struct v4l2_subdev *sd, int on) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = to_ispif(line); + struct device *dev = to_device(ispif); + int ret = 0; + + mutex_lock(&ispif->power_lock); + + if (on) { + if (ispif->power_count) { + /* Power is already on */ + ispif->power_count++; + goto exit; + } + + ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev); + if (ret < 0) + goto exit; + + ret = ispif_reset(ispif); + if (ret < 0) { + camss_disable_clocks(ispif->nclocks, ispif->clock); + goto exit; + } + + ispif->intf_cmd[line->vfe_id].cmd_0 = CMD_ALL_NO_CHANGE; + ispif->intf_cmd[line->vfe_id].cmd_1 = CMD_ALL_NO_CHANGE; + + ispif->power_count++; + } else { + if (ispif->power_count == 0) { + dev_err(dev, "ispif power off on power_count == 0\n"); + goto exit; + } else if (ispif->power_count == 1) { + camss_disable_clocks(ispif->nclocks, ispif->clock); + } + + ispif->power_count--; + } + +exit: + mutex_unlock(&ispif->power_lock); + + return ret; +} + +/* + * ispif_select_clk_mux - Select clock for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected clock + */ +static void ispif_select_clk_mux(struct ispif_device *ispif, + enum ispif_intf intf, u8 csid, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 8)); + if (enable) + val |= (csid << (vfe * 8)); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI0: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 12)); + if (enable) + val |= (csid << (vfe * 12)); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case PIX1: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 8))); + if (enable) + val |= (csid << (4 + (vfe * 8))); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI1: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 12))); + if (enable) + val |= (csid << (4 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case RDI2: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (8 + (vfe * 12))); + if (enable) + val |= (csid << (8 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + } + + mb(); +} + +/* + * ispif_validate_intf_status - Validate current status of PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 when interface is idle or -EBUSY otherwise + */ +static int ispif_validate_intf_status(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + int ret = 0; + u32 val = 0; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0)); + break; + case RDI0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0)); + break; + case PIX1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1)); + break; + case RDI1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1)); + break; + case RDI2: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2)); + break; + } + + if ((val & 0xf) != 0xf) { + dev_err(to_device(ispif), "%s: ispif is busy: 0x%x\n", + __func__, val); + ret = -EBUSY; + } + + return ret; +} + +/* + * ispif_wait_for_stop - Wait for PIX/RDI interface to stop + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_wait_for_stop(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + u32 addr = 0; + u32 stop_flag = 0; + int ret; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2); + break; + } + + ret = readl_poll_timeout(ispif->base + addr, + stop_flag, + (stop_flag & 0xf) == 0xf, + ISPIF_TIMEOUT_SLEEP_US, + ISPIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(to_device(ispif), "%s: ispif stop timeout\n", + __func__); + + return ret; +} + +/* + * ispif_select_csid - Select CSID HW module for input from + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected input + */ +static void ispif_select_csid(struct ispif_device *ispif, enum ispif_intf intf, + u8 csid, u8 vfe, u8 enable) +{ + u32 val; + + val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); + switch (intf) { + case PIX0: + val &= ~(BIT(1) | BIT(0)); + if (enable) + val |= csid; + break; + case RDI0: + val &= ~(BIT(5) | BIT(4)); + if (enable) + val |= (csid << 4); + break; + case PIX1: + val &= ~(BIT(9) | BIT(8)); + if (enable) + val |= (csid << 8); + break; + case RDI1: + val &= ~(BIT(13) | BIT(12)); + if (enable) + val |= (csid << 12); + break; + case RDI2: + val &= ~(BIT(21) | BIT(20)); + if (enable) + val |= (csid << 20); + break; + } + + writel(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); +} + +/* + * ispif_select_cid - Enable/disable desired CID + * @ispif: ISPIF device + * @intf: VFE interface + * @cid: desired CID to enable/disable + * @vfe: VFE HW module id + * @enable: enable or disable the desired CID + */ +static void ispif_select_cid(struct ispif_device *ispif, enum ispif_intf intf, + u8 cid, u8 vfe, u8 enable) +{ + u32 cid_mask = 1 << cid; + u32 addr = 0; + u32 val; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2); + break; + } + + val = readl_relaxed(ispif->base + addr); + if (enable) + val |= cid_mask; + else + val &= ~cid_mask; + + writel(val, ispif->base + addr); +} + +/* + * ispif_config_irq - Enable/disable interrupts for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * @enable: enable or disable + */ +static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case RDI0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case PIX1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI2: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe)); + break; + } + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); +} + +/* + * ispif_set_intf_cmd - Set command to enable/disable interface + * @ispif: ISPIF device + * @cmd: interface command + * @intf: VFE interface + * @vfe: VFE HW module id + * @vc: virtual channel + */ +static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd, + enum ispif_intf intf, u8 vfe, u8 vc) +{ + u32 *val; + + if (intf == RDI2) { + val = &ispif->intf_cmd[vfe].cmd_1; + *val &= ~(0x3 << (vc * 2 + 8)); + *val |= (cmd << (vc * 2 + 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe)); + wmb(); + } else { + val = &ispif->intf_cmd[vfe].cmd_0; + *val &= ~(0x3 << (vc * 2 + intf * 8)); + *val |= (cmd << (vc * 2 + intf * 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe)); + wmb(); + } +} + +/* + * ispif_set_stream - Enable/disable streaming on ISPIF module + * @sd: ISPIF V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of ISPIF module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = to_ispif(line); + enum ispif_intf intf = line->interface; + u8 csid = line->csid_id; + u8 vfe = line->vfe_id; + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + int ret; + + if (enable) { + if (!media_entity_remote_pad(&line->pads[MSM_ISPIF_PAD_SINK])) + return -ENOLINK; + + /* Config */ + + mutex_lock(&ispif->config_lock); + ispif_select_clk_mux(ispif, intf, csid, vfe, 1); + + ret = ispif_validate_intf_status(ispif, intf, vfe); + if (ret < 0) { + mutex_unlock(&ispif->config_lock); + return ret; + } + + ispif_select_csid(ispif, intf, csid, vfe, 1); + ispif_select_cid(ispif, intf, cid, vfe, 1); + ispif_config_irq(ispif, intf, vfe, 1); + ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY, + intf, vfe, vc); + } else { + mutex_lock(&ispif->config_lock); + ispif_set_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY, + intf, vfe, vc); + mutex_unlock(&ispif->config_lock); + + ret = ispif_wait_for_stop(ispif, intf, vfe); + if (ret < 0) + return ret; + + mutex_lock(&ispif->config_lock); + ispif_config_irq(ispif, intf, vfe, 0); + ispif_select_cid(ispif, intf, cid, vfe, 0); + ispif_select_csid(ispif, intf, csid, vfe, 0); + ispif_select_clk_mux(ispif, intf, csid, vfe, 0); + } + + mutex_unlock(&ispif->config_lock); + + return 0; +} + +/* + * __ispif_get_format - Get pointer to format structure + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__ispif_get_format(struct ispif_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, cfg, pad); + + return &line->fmt[pad]; +} + +/* + * ispif_try_format - Handle try format by pad subdev method + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void ispif_try_format(struct ispif_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_ISPIF_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(ispif_formats); i++) + if (fmt->code == ispif_formats[i]) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(ispif_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_ISPIF_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK, + which); + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * ispif_enum_mbus_code - Handle pixel format enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_ISPIF_PAD_SINK) { + if (code->index >= ARRAY_SIZE(ispif_formats)) + return -EINVAL; + + code->code = ispif_formats[code->index]; + } else { + if (code->index > 0) + return -EINVAL; + + format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * ispif_enum_frame_size - Handle frame size enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + ispif_try_format(line, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + ispif_try_format(line, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * ispif_get_format - Handle get format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * ispif_set_format - Handle set format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + ispif_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_ISPIF_PAD_SINK) { + format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SRC, + fmt->which); + + *format = fmt->format; + ispif_try_format(line, cfg, MSM_ISPIF_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * ispif_init_formats - Initialize formats on all pads + * @sd: ISPIF V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_ISPIF_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return ispif_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_ispif_subdev_init - Initialize ISPIF device structure and resources + * @ispif: ISPIF device + * @res: ISPIF module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_subdev_init(struct ispif_device *ispif, + const struct resources_ispif *res) +{ + struct device *dev = to_device(ispif); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i; + int ret; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + ispif->base = devm_ioremap_resource(dev, r); + if (IS_ERR(ispif->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(ispif->base); + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]); + ispif->base_clk_mux = devm_ioremap_resource(dev, r); + if (IS_ERR(ispif->base_clk_mux)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(ispif->base_clk_mux); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt); + + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + ispif->irq = r->start; + snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s", + dev_name(dev), MSM_ISPIF_NAME); + ret = devm_request_irq(dev, ispif->irq, ispif_isr, + IRQF_TRIGGER_RISING, ispif->irq_name, ispif); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + ispif->nclocks = 0; + while (res->clock[ispif->nclocks]) + ispif->nclocks++; + + ispif->clock = devm_kzalloc(dev, ispif->nclocks * sizeof(*ispif->clock), + GFP_KERNEL); + if (!ispif->clock) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks; i++) { + struct camss_clock *clock = &ispif->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + ispif->nclocks_for_reset = 0; + while (res->clock_for_reset[ispif->nclocks_for_reset]) + ispif->nclocks_for_reset++; + + ispif->clock_for_reset = devm_kzalloc(dev, ispif->nclocks_for_reset * + sizeof(*ispif->clock_for_reset), GFP_KERNEL); + if (!ispif->clock_for_reset) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks_for_reset; i++) { + struct camss_clock *clock = &ispif->clock_for_reset[i]; + + clock->clk = devm_clk_get(dev, res->clock_for_reset[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) + ispif->line[i].id = i; + + mutex_init(&ispif->power_lock); + ispif->power_count = 0; + + mutex_init(&ispif->config_lock); + + init_completion(&ispif->reset_complete); + + return 0; +} + +/* + * ispif_get_intf - Get ISPIF interface to use by VFE line id + * @line_id: VFE line id that the ISPIF line is connected to + * + * Return ISPIF interface to use + */ +static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id) +{ + switch (line_id) { + case (VFE_LINE_RDI0): + return RDI0; + case (VFE_LINE_RDI1): + return RDI1; + case (VFE_LINE_RDI2): + return RDI2; + case (VFE_LINE_PIX): + return PIX0; + default: + return RDI0; + } +} + +/* + * ispif_link_setup - Setup ISPIF connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int ispif_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) { + if (media_entity_remote_pad(local)) + return -EBUSY; + + if (local->flags & MEDIA_PAD_FL_SINK) { + struct v4l2_subdev *sd; + struct ispif_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + msm_csid_get_csid_id(remote->entity, &line->csid_id); + } else { /* MEDIA_PAD_FL_SOURCE */ + struct v4l2_subdev *sd; + struct ispif_line *line; + enum vfe_line_id id; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + msm_vfe_get_vfe_id(remote->entity, &line->vfe_id); + msm_vfe_get_vfe_line_id(remote->entity, &id); + line->interface = ispif_get_intf(id); + } + } + + return 0; +} + +static const struct v4l2_subdev_core_ops ispif_core_ops = { + .s_power = ispif_set_power, +}; + +static const struct v4l2_subdev_video_ops ispif_video_ops = { + .s_stream = ispif_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ispif_pad_ops = { + .enum_mbus_code = ispif_enum_mbus_code, + .enum_frame_size = ispif_enum_frame_size, + .get_fmt = ispif_get_format, + .set_fmt = ispif_set_format, +}; + +static const struct v4l2_subdev_ops ispif_v4l2_ops = { + .core = &ispif_core_ops, + .video = &ispif_video_ops, + .pad = &ispif_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops = { + .open = ispif_init_formats, +}; + +static const struct media_entity_operations ispif_media_ops = { + .link_setup = ispif_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_ispif_register_entities - Register subdev node for ISPIF module + * @ispif: ISPIF device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev) +{ + struct device *dev = to_device(ispif); + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + struct media_pad *pads = ispif->line[i].pads; + + v4l2_subdev_init(sd, &ispif_v4l2_ops); + sd->internal_ops = &ispif_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_ISPIF_NAME, i); + v4l2_set_subdevdata(sd, &ispif->line[i]); + + ret = ispif_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto error; + } + + pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &ispif_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_ISPIF_PADS_NUM, + pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto error; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + media_entity_cleanup(&sd->entity); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_ispif_unregister_entities - Unregister ISPIF module subdev node + * @ispif: ISPIF device + */ +void msm_ispif_unregister_entities(struct ispif_device *ispif) +{ + int i; + + mutex_destroy(&ispif->power_lock); + mutex_destroy(&ispif->config_lock); + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h b/drivers/media/platform/qcom/camss-8x16/camss-ispif.h new file mode 100644 index 000000000000..f668306020c3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-ispif.h @@ -0,0 +1,85 @@ +/* + * camss-ispif.h + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_ISPIF_H +#define QC_MSM_CAMSS_ISPIF_H + +#include +#include +#include +#include + +/* Number of ISPIF lines - same as number of CSID hardware modules */ +#define MSM_ISPIF_LINE_NUM 2 + +#define MSM_ISPIF_PAD_SINK 0 +#define MSM_ISPIF_PAD_SRC 1 +#define MSM_ISPIF_PADS_NUM 2 + +#define MSM_ISPIF_VFE_NUM 1 + +enum ispif_intf { + PIX0, + RDI0, + PIX1, + RDI1, + RDI2 +}; + +struct ispif_intf_cmd_reg { + u32 cmd_0; + u32 cmd_1; +}; + +struct ispif_line { + u8 id; + u8 csid_id; + u8 vfe_id; + enum ispif_intf interface; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_ISPIF_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM]; +}; + +struct ispif_device { + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct camss_clock *clock_for_reset; + int nclocks_for_reset; + struct completion reset_complete; + int power_count; + struct mutex power_lock; + struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM]; + struct mutex config_lock; + struct ispif_line line[MSM_ISPIF_LINE_NUM]; +}; + +struct resources_ispif; + +int msm_ispif_subdev_init(struct ispif_device *ispif, + const struct resources_ispif *res); + +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev); + +void msm_ispif_unregister_entities(struct ispif_device *ispif); + +#endif /* QC_MSM_CAMSS_ISPIF_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c new file mode 100644 index 000000000000..b21b3c2dc77f --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -0,0 +1,3088 @@ +/* + * camss-vfe.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camss-vfe.h" +#include "camss.h" + +#define MSM_VFE_NAME "msm_vfe" + +#define vfe_line_array(ptr_line) \ + ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)])) + +#define to_vfe(ptr_line) \ + container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line) + +#define VFE_0_HW_VERSION 0x000 + +#define VFE_0_GLOBAL_RESET_CMD 0x00c +#define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0) +#define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1) +#define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2) +#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3) +#define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4) +#define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5) +#define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6) +#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7) +#define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8) + +#define VFE_0_MODULE_CFG 0x018 +#define VFE_0_MODULE_CFG_DEMUX (1 << 2) +#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3) +#define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23) +#define VFE_0_MODULE_CFG_CROP_ENC (1 << 27) + +#define VFE_0_CORE_CFG 0x01c +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 + +#define VFE_0_IRQ_CMD 0x024 +#define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0) + +#define VFE_0_IRQ_MASK_0 0x028 +#define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0) +#define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1) +#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) +#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) +#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) +#define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31) +#define VFE_0_IRQ_MASK_1 0x02c +#define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0) +#define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7) +#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8) +#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9)) +#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29)) + +#define VFE_0_IRQ_CLEAR_0 0x030 +#define VFE_0_IRQ_CLEAR_1 0x034 + +#define VFE_0_IRQ_STATUS_0 0x038 +#define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0) +#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) +#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) +#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) +#define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31) +#define VFE_0_IRQ_STATUS_1 0x03c +#define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7) +#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8) +#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29)) + +#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40 +#define VFE_0_VIOLATION_STATUS 0x48 + +#define VFE_0_BUS_CMD 0x4c +#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x)) + +#define VFE_0_BUS_CFG 0x050 + +#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2)) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7 + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2) + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ + (0x088 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ + (0x08c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff + +#define VFE_0_BUS_PING_PONG_STATUS 0x268 + +#define VFE_0_BUS_BDG_CMD 0x2c0 +#define VFE_0_BUS_BDG_CMD_HALT_REQ 1 + +#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4 +#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 +#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8 +#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc +#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0 +#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4 +#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8 +#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc +#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0 +#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5 + +#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x))) +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) +#define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2) +#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 +#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r))) + +#define VFE_0_CAMIF_CMD 0x2f4 +#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 +#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 +#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2) +#define VFE_0_CAMIF_CFG 0x2f8 +#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6) +#define VFE_0_CAMIF_FRAME_CFG 0x300 +#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304 +#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308 +#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c +#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314 +#define VFE_0_CAMIF_STATUS 0x31c +#define VFE_0_CAMIF_STATUS_HALT (1 << 31) + +#define VFE_0_REG_UPDATE 0x378 +#define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n))) +#define VFE_0_REG_UPDATE_line_n(n) \ + ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) + +#define VFE_0_DEMUX_CFG 0x424 +#define VFE_0_DEMUX_CFG_PERIOD 0x3 +#define VFE_0_DEMUX_GAIN_0 0x428 +#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) +#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) +#define VFE_0_DEMUX_GAIN_1 0x42c +#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) +#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) +#define VFE_0_DEMUX_EVEN_CFG 0x438 +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 +#define VFE_0_DEMUX_ODD_CFG 0x43c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 + +#define VFE_0_SCALE_ENC_Y_CFG 0x75c +#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760 +#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764 +#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c +#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770 +#define VFE_0_SCALE_ENC_CBCR_CFG 0x778 +#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c +#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780 +#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790 +#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794 + +#define VFE_0_CROP_ENC_Y_WIDTH 0x854 +#define VFE_0_CROP_ENC_Y_HEIGHT 0x858 +#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c +#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860 + +#define VFE_0_CLAMP_ENC_MAX_CFG 0x874 +#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) +#define VFE_0_CLAMP_ENC_MIN_CFG 0x878 +#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) + +#define VFE_0_CGC_OVERRIDE_1 0x974 +#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x)) + +/* VFE reset timeout */ +#define VFE_RESET_TIMEOUT_MS 50 +/* VFE halt timeout */ +#define VFE_HALT_TIMEOUT_MS 100 +/* Max number of frame drop updates per frame */ +#define VFE_FRAME_DROP_UPDATES 5 +/* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */ +#define VFE_FRAME_DROP_VAL 20 + +#define VFE_NEXT_SOF_MS 500 + +#define CAMIF_TIMEOUT_SLEEP_US 1000 +#define CAMIF_TIMEOUT_ALL_US 1000000 + +#define SCALER_RATIO_MAX 16 + +static const struct { + u32 code; + u8 bpp; +} vfe_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + 12, + } +}; + +/* + * vfe_get_bpp - map media bus format to bits per pixel + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 vfe_get_bpp(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) + if (code == vfe_formats[i].code) + return vfe_formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return vfe_formats[0].bpp; +} + +static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits & ~clr_bits, vfe->base + reg); +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN | + VFE_0_GLOBAL_RESET_CMD_BUS_MISR | + VFE_0_GLOBAL_RESET_CMD_PM | + VFE_0_GLOBAL_RESET_CMD_TIMER | + VFE_0_GLOBAL_RESET_CMD_REGISTER | + VFE_0_GLOBAL_RESET_CMD_BUS_BDG | + VFE_0_GLOBAL_RESET_CMD_BUS | + VFE_0_GLOBAL_RESET_CMD_CAMIF | + VFE_0_GLOBAL_RESET_CMD_CORE; + + writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); +} + +static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); +} + +static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); +} + +#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) + +static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line) +{ + int val = 0; + + switch (format) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + val = CALC_WORD(pixel_per_line, 1, 8); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_VYUY: + val = CALC_WORD(pixel_per_line, 2, 8); + break; + } + + return val; +} + +static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, + u16 *width, u16 *height, u16 *bytesperline) +{ + switch (pix->pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + *width = pix->width; + *height = pix->height; + *bytesperline = pix->plane_fmt[0].bytesperline; + if (plane == 1) + *height /= 2; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + *width = pix->width; + *height = pix->height; + *bytesperline = pix->plane_fmt[0].bytesperline; + break; + } +} + +static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, + struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable) +{ + u32 reg; + + if (enable) { + u16 width = 0, height = 0, bytesperline = 0, wpl; + + vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); + + wpl = vfe_word_per_line(pix->pixelformat, width); + + reg = height - 1; + reg |= ((wpl + 1) / 2 - 1) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + + wpl = vfe_word_per_line(pix->pixelformat, bytesperline); + + reg = 0x3; + reg |= (height - 1) << 4; + reg |= wpl << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } else { + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } +} + +static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); + + reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); + + reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) + & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; + + writel_relaxed(reg, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); +} + +static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, + u32 pattern) +{ + writel_relaxed(pattern, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); +} + +static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset, + u16 depth) +{ + u32 reg; + + reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | + depth; + writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); +} + +static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) +{ + wmb(); + writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); + wmb(); +} + +static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); +} + +static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); +} + +static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); + + return (reg >> wm) & 0x1; +} + +static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) +{ + if (enable) + writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG); + else + writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); +} + +static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & + VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) +{ + writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, + vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); +} + +static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, + u8 enable) +{ + struct vfe_line *line = container_of(output, struct vfe_line, output); + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (i == 0) { + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + } else if (i == 1) { + reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } + + if (output->wm_idx[i] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + } +} + +static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) +{ + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), + VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); + + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), + cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); + wmb(); + writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); + wmb(); +} + +static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, + enum vfe_line_id line_id, u8 enable) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | + VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | + VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } +} + +static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, + enum vfe_line_id line_id, u8 enable) +{ + struct vfe_output *output = &vfe->line[line_id].output; + unsigned int i; + u32 irq_en0; + u32 irq_en1; + u32 comp_mask = 0; + + irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; + irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; + irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); + irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; + for (i = 0; i < output->wm_num; i++) { + irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW( + output->wm_idx[i]); + comp_mask |= (1 << output->wm_idx[i]) << comp * 8; + } + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; + u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | + VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; + + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); +} + +static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val, even_cfg, odd_cfg; + + writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); + + val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); + + val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; + break; + } + + writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); + writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); +} + +static inline u8 vfe_calc_interp_reso(u16 input, u16 output) +{ + if (input / output >= 16) + return 0; + + if (input / output >= 8) + return 1; + + if (input / output >= 4) + return 2; + + return 3; +} + +static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 input, output; + u8 interp_reso; + u32 phase_mult; + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) + output = line->compose.height / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); +} + +static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 first, last; + + first = line->crop.left; + last = line->crop.left + line->crop.width - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); + + first = line->crop.left / 2; + last = line->crop.left / 2 + line->crop.width / 2 - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { + first = line->crop.top / 2; + last = line->crop.top / 2 + line->crop.height / 2 - 1; + } + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); +} + +static void vfe_set_clamp_cfg(struct vfe_device *vfe) +{ + u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | + VFE_0_CLAMP_ENC_MAX_CFG_CH1 | + VFE_0_CLAMP_ENC_MAX_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); + + val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | + VFE_0_CLAMP_ENC_MIN_CFG_CH1 | + VFE_0_CLAMP_ENC_MIN_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); +} + +/* + * vfe_reset - Trigger reset on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_reset(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->reset_complete); + + vfe_global_reset(vfe); + + time = wait_for_completion_timeout(&vfe->reset_complete, + msecs_to_jiffies(VFE_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(vfe), "VFE reset timeout\n"); + return -EIO; + } + + return 0; +} + +/* + * vfe_halt - Trigger halt on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_halt(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->halt_complete); + + writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, + vfe->base + VFE_0_BUS_BDG_CMD); + + time = wait_for_completion_timeout(&vfe->halt_complete, + msecs_to_jiffies(VFE_HALT_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(vfe), "VFE halt timeout\n"); + return -EIO; + } + + return 0; +} + +static void vfe_init_outputs(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + struct vfe_output *output = &vfe->line[i].output; + + output->state = VFE_OUTPUT_OFF; + output->buf[0] = NULL; + output->buf[1] = NULL; + INIT_LIST_HEAD(&output->pending_bufs); + + output->wm_num = 1; + if (vfe->line[i].id == VFE_LINE_PIX) + output->wm_num = 2; + } +} + +static void vfe_reset_output_maps(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + vfe->wm_output_map[i] = VFE_LINE_NONE; +} + +static void vfe_set_qos(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; + u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); + writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); +} + +static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) +{ + u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm); + + if (enable) + vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val); + else + vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val); + + wmb(); +} + +static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) +{ + u32 val = VFE_0_MODULE_CFG_DEMUX | + VFE_0_MODULE_CFG_CHROMA_UPSAMPLE | + VFE_0_MODULE_CFG_SCALE_ENC | + VFE_0_MODULE_CFG_CROP_ENC; + + if (enable) + writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG); + else + writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG); +} + +static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val; + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; + break; + } + + writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2; + val |= line->fmt[MSM_VFE_PAD_SINK].height << 16; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].height - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); + + val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); + + val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); +} + +static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd) +{ + writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS, + vfe->base + VFE_0_CAMIF_CMD); + + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); +} + +static int vfe_camif_wait_for_stop(struct vfe_device *vfe) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, + val, + (val & VFE_0_CAMIF_STATUS_HALT), + CAMIF_TIMEOUT_SLEEP_US, + CAMIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__); + + return ret; +} + +static void vfe_output_init_addrs(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 ping_addr; + u32 pong_addr; + unsigned int i; + + output->active_buf = 0; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + ping_addr = output->buf[0]->addr[i]; + else + ping_addr = 0; + + if (output->buf[1]) + pong_addr = output->buf[1]->addr[i]; + else + pong_addr = ping_addr; + + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr); + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_update_ping_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + addr = output->buf[0]->addr[i]; + else + addr = 0; + + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_update_pong_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[1]) + addr = output->buf[1]->addr[i]; + else + addr = 0; + + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } + +} + +static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + int ret = -EBUSY; + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) { + if (vfe->wm_output_map[i] == VFE_LINE_NONE) { + vfe->wm_output_map[i] = line_id; + ret = i; + break; + } + } + + return ret; +} + +static int vfe_release_wm(struct vfe_device *vfe, u8 wm) +{ + if (wm >= ARRAY_SIZE(vfe->wm_output_map)) + return -EINVAL; + + vfe->wm_output_map[wm] = VFE_LINE_NONE; + + return 0; +} + +static void vfe_output_frame_drop(struct vfe_device *vfe, + struct vfe_output *output, + u32 drop_pattern) +{ + u8 drop_period; + unsigned int i; + + /* We need to toggle update period to be valid on next frame */ + output->drop_update_idx++; + output->drop_update_idx %= VFE_FRAME_DROP_UPDATES; + drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx; + + for (i = 0; i < output->wm_num; i++) { + vfe_wm_set_framedrop_period(vfe, output->wm_idx[i], + drop_period); + vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i], + drop_pattern); + } + vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id); +} + +static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output) +{ + struct camss_buffer *buffer = NULL; + + if (!list_empty(&output->pending_bufs)) { + buffer = list_first_entry(&output->pending_bufs, + struct camss_buffer, + queue); + list_del(&buffer->queue); + } + + return buffer; +} + +/* + * vfe_buf_add_pending - Add output buffer to list of pending + * @output: VFE output + * @buffer: Video buffer + */ +static void vfe_buf_add_pending(struct vfe_output *output, + struct camss_buffer *buffer) +{ + INIT_LIST_HEAD(&buffer->queue); + list_add_tail(&buffer->queue, &output->pending_bufs); +} + +/* + * vfe_buf_flush_pending - Flush all pending buffers. + * @output: VFE output + * @state: vb2 buffer state + */ +static void vfe_buf_flush_pending(struct vfe_output *output, + enum vb2_buffer_state state) +{ + struct camss_buffer *buf; + struct camss_buffer *t; + + list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) { + vb2_buffer_done(&buf->vb.vb2_buf, state); + list_del(&buf->queue); + } +} + +static void vfe_buf_update_wm_on_next(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + case VFE_OUTPUT_SINGLE: + default: + dev_err_ratelimited(to_device(vfe), + "Next buf in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_last(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + output->state = VFE_OUTPUT_SINGLE; + vfe_output_frame_drop(vfe, output, 1); + break; + case VFE_OUTPUT_SINGLE: + output->state = VFE_OUTPUT_STOPPING; + vfe_output_frame_drop(vfe, output, 0); + break; + default: + dev_err_ratelimited(to_device(vfe), + "Last buff in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_new(struct vfe_device *vfe, + struct vfe_output *output, + struct camss_buffer *new_buf) +{ + int inactive_idx; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + inactive_idx = !output->active_buf; + + if (!output->buf[inactive_idx]) { + output->buf[inactive_idx] = new_buf; + + if (inactive_idx) + vfe_output_update_pong_addr(vfe, output, 0); + else + vfe_output_update_ping_addr(vfe, output, 0); + + vfe_output_frame_drop(vfe, output, 3); + output->state = VFE_OUTPUT_CONTINUOUS; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(to_device(vfe), + "Inactive buffer is busy\n"); + } + break; + + case VFE_OUTPUT_IDLE: + if (!output->buf[0]) { + output->buf[0] = new_buf; + + vfe_output_init_addrs(vfe, output, 1); + + vfe_output_frame_drop(vfe, output, 1); + output->state = VFE_OUTPUT_SINGLE; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(to_device(vfe), + "Output idle with buffer set!\n"); + } + break; + + case VFE_OUTPUT_CONTINUOUS: + default: + vfe_buf_add_pending(output, new_buf); + break; + } +} + +static int vfe_get_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + int i; + int wm_idx; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output = &line->output; + if (output->state != VFE_OUTPUT_OFF) { + dev_err(to_device(vfe), "Output is running\n"); + goto error; + } + output->state = VFE_OUTPUT_RESERVED; + + output->active_buf = 0; + + for (i = 0; i < output->wm_num; i++) { + wm_idx = vfe_reserve_wm(vfe, line->id); + if (wm_idx < 0) { + dev_err(to_device(vfe), "Can not reserve wm\n"); + goto error_get_wm; + } + output->wm_idx[i] = wm_idx; + } + + output->drop_update_idx = 0; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; + +error_get_wm: + for (i--; i >= 0; i--) + vfe_release_wm(vfe, output->wm_idx[i]); + output->state = VFE_OUTPUT_OFF; +error: + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return -EINVAL; +} + +static int vfe_put_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + for (i = 0; i < output->wm_num; i++) + vfe_release_wm(vfe, output->wm_idx[i]); + + output->state = VFE_OUTPUT_OFF; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + return 0; +} + +static int vfe_enable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + u16 ub_size; + + switch (vfe->id) { + case 0: + ub_size = MSM_VFE_VFE0_UB_SIZE_RDI; + break; + case 1: + ub_size = MSM_VFE_VFE1_UB_SIZE_RDI; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id); + + if (output->state != VFE_OUTPUT_RESERVED) { + dev_err(to_device(vfe), "Output is not in reserved state %d\n", + output->state); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return -EINVAL; + } + output->state = VFE_OUTPUT_IDLE; + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + vfe_output_frame_drop(vfe, output, 1); + break; + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + output->sequence = 0; + output->wait_sof = 0; + output->wait_reg_update = 0; + reinit_completion(&output->sof); + reinit_completion(&output->reg_update); + + vfe_output_init_addrs(vfe, output, 0); + + if (line->id != VFE_LINE_PIX) { + vfe_set_cgc_override(vfe, output->wm_idx[0], 1); + vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1); + vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id); + vfe_wm_set_subsample(vfe, output->wm_idx[0]); + vfe_set_rdi_cid(vfe, line->id, 0); + vfe_wm_set_ub_cfg(vfe, output->wm_idx[0], + (ub_size + 1) * output->wm_idx[0], ub_size); + vfe_wm_frame_based(vfe, output->wm_idx[0], 1); + vfe_wm_enable(vfe, output->wm_idx[0], 1); + vfe_bus_reload_wm(vfe, output->wm_idx[0]); + } else { + ub_size /= output->wm_num; + for (i = 0; i < output->wm_num; i++) { + vfe_set_cgc_override(vfe, output->wm_idx[i], 1); + vfe_wm_set_subsample(vfe, output->wm_idx[i]); + vfe_wm_set_ub_cfg(vfe, output->wm_idx[i], + (ub_size + 1) * output->wm_idx[i], + ub_size); + vfe_wm_line_based(vfe, output->wm_idx[i], + &line->video_out.active_fmt.fmt.pix_mp, + i, 1); + vfe_wm_enable(vfe, output->wm_idx[i], 1); + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } + vfe_enable_irq_pix_line(vfe, 0, line->id, 1); + vfe_set_module_cfg(vfe, 1); + vfe_set_camif_cfg(vfe, line); + vfe_set_xbar_cfg(vfe, output, 1); + vfe_set_demux_cfg(vfe, line); + vfe_set_scale_cfg(vfe, line); + vfe_set_crop_cfg(vfe, line); + vfe_set_clamp_cfg(vfe); + vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY); + } + + vfe_reg_update(vfe, line->id); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static int vfe_disable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned long time; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output->wait_sof = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->sof, + msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(to_device(vfe), "VFE sof timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + for (i = 0; i < output->wm_num; i++) + vfe_wm_enable(vfe, output->wm_idx[i], 0); + + vfe_reg_update(vfe, line->id); + output->wait_reg_update = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->reg_update, + msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(to_device(vfe), "VFE reg update timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (line->id != VFE_LINE_PIX) { + vfe_wm_frame_based(vfe, output->wm_idx[0], 0); + vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id); + vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0); + vfe_set_cgc_override(vfe, output->wm_idx[0], 0); + spin_unlock_irqrestore(&vfe->output_lock, flags); + } else { + for (i = 0; i < output->wm_num; i++) { + vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0); + vfe_set_cgc_override(vfe, output->wm_idx[i], 0); + } + + vfe_enable_irq_pix_line(vfe, 0, line->id, 0); + vfe_set_module_cfg(vfe, 0); + vfe_set_xbar_cfg(vfe, output, 0); + + vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY); + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vfe_camif_wait_for_stop(vfe); + } + + return 0; +} + +/* + * vfe_enable - Enable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_enable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + int ret; + + mutex_lock(&vfe->stream_lock); + + if (!vfe->stream_count) { + vfe_enable_irq_common(vfe); + + vfe_bus_enable_wr_if(vfe, 1); + + vfe_set_qos(vfe); + } + + vfe->stream_count++; + + mutex_unlock(&vfe->stream_lock); + + ret = vfe_get_output(line); + if (ret < 0) + goto error_get_output; + + ret = vfe_enable_output(line); + if (ret < 0) + goto error_enable_output; + + vfe->was_streaming = 1; + + return 0; + + +error_enable_output: + vfe_put_output(line); + +error_get_output: + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe_bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return ret; +} + +/* + * vfe_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_disable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + + vfe_disable_output(line); + + vfe_put_output(line); + + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe_bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return 0; +} + +/* + * vfe_isr_sof - Process start of frame interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + output = &vfe->line[line_id].output; + if (output->wait_sof) { + output->wait_sof = 0; + complete(&output->sof); + } + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_reg_update - Process reg update interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); + + output = &vfe->line[line_id].output; + + if (output->wait_reg_update) { + output->wait_reg_update = 0; + complete(&output->reg_update); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return; + } + + if (output->state == VFE_OUTPUT_STOPPING) { + /* Release last buffer when hw is idle */ + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, + VB2_BUF_STATE_DONE); + output->last_buffer = NULL; + } + output->state = VFE_OUTPUT_IDLE; + + /* Buffers received in stopping state are queued in */ + /* dma pending queue, start next capture here */ + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + vfe_output_frame_drop(vfe, output, 2); + break; + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + vfe_output_init_addrs(vfe, output, 1); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process write master done interrupt + * @vfe: VFE Device + * @wm: Write master id + */ +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) +{ + struct camss_buffer *ready_buf; + struct vfe_output *output; + dma_addr_t *new_addr; + unsigned long flags; + u32 active_index; + u64 ts = ktime_get_ns(); + unsigned int i; + + active_index = vfe_wm_get_ping_pong_status(vfe, wm); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { + dev_err_ratelimited(to_device(vfe), + "Received wm done for unmapped index\n"); + goto out_unlock; + } + output = &vfe->line[vfe->wm_output_map[wm]].output; + + if (output->active_buf == active_index) { + dev_err_ratelimited(to_device(vfe), + "Active buffer mismatch!\n"); + goto out_unlock; + } + output->active_buf = active_index; + + ready_buf = output->buf[!active_index]; + if (!ready_buf) { + dev_err_ratelimited(to_device(vfe), + "Missing ready buf %d %d!\n", + !active_index, output->state); + goto out_unlock; + } + + ready_buf->vb.vb2_buf.timestamp = ts; + ready_buf->vb.sequence = output->sequence++; + + /* Get next buffer */ + output->buf[!active_index] = vfe_buf_get_pending(output); + if (!output->buf[!active_index]) { + /* No next buffer - set same address */ + new_addr = ready_buf->addr; + vfe_buf_update_wm_on_last(vfe, output); + } else { + new_addr = output->buf[!active_index]->addr; + vfe_buf_update_wm_on_next(vfe, output); + } + + if (active_index) + for (i = 0; i < output->wm_num; i++) + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], + new_addr[i]); + else + for (i = 0; i < output->wm_num; i++) + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], + new_addr[i]); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + if (output->state == VFE_OUTPUT_STOPPING) + output->last_buffer = ready_buf; + else + vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + + return; + +out_unlock: + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process composite image done interrupt + * @vfe: VFE Device + * @comp: Composite image id + */ +static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + if (vfe->wm_output_map[i] == VFE_LINE_PIX) { + vfe_isr_wm_done(vfe, i); + break; + } +} + +/* + * vfe_isr - ISPIF module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 value0, value1; + u32 violation; + int i, j; + + value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); + value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); + + writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0); + writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1); + + wmb(); + writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); + + if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) + complete(&vfe->reset_complete); + + if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) { + violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); + dev_err_ratelimited(to_device(vfe), + "VFE: violation = 0x%08x\n", violation); + } + + if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) { + complete(&vfe->halt_complete); + writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); + } + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) + if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) + vfe_isr_reg_update(vfe, i); + + if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) + vfe_isr_sof(vfe, VFE_LINE_PIX); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) + vfe_isr_sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { + vfe_isr_comp_done(vfe, i); + for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) + if (vfe->wm_output_map[j] == VFE_LINE_PIX) + value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); + } + + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) + vfe_isr_wm_done(vfe, i); + + return IRQ_HANDLED; +} + +/* + * vfe_set_clock_rates - Calculate and set clock rates on VFE module + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_clock_rates(struct vfe_device *vfe) +{ + struct device *dev = to_device(vfe); + u32 pixel_clock[MSM_VFE_LINE_NUM]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "camss_vfe_vfe")) { + u64 min_rate = 0; + long rate; + + for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + bpp = vfe_get_bpp(vfe->line[j]. + fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for VFE"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible VFE clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * vfe_check_clock_rates - Check current clock rates on VFE module + * @vfe: VFE device + * + * Return 0 if current clock rates are suitable for a new pipeline + * or a negative error code otherwise + */ +static int vfe_check_clock_rates(struct vfe_device *vfe) +{ + u32 pixel_clock[MSM_VFE_LINE_NUM]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "camss_vfe_vfe")) { + u64 min_rate = 0; + unsigned long rate; + + for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + bpp = vfe_get_bpp(vfe->line[j]. + fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + rate = clk_get_rate(clock->clk); + if (rate < min_rate) + return -EBUSY; + } + } + + return 0; +} + +/* + * vfe_get - Power up and reset VFE module + * @vfe: VFE Device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_get(struct vfe_device *vfe) +{ + int ret; + + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + ret = vfe_set_clock_rates(vfe); + if (ret < 0) + goto error_clocks; + + ret = camss_enable_clocks(vfe->nclocks, vfe->clock, + to_device(vfe)); + if (ret < 0) + goto error_clocks; + + ret = vfe_reset(vfe); + if (ret < 0) + goto error_reset; + + vfe_reset_output_maps(vfe); + + vfe_init_outputs(vfe); + } else { + ret = vfe_check_clock_rates(vfe); + if (ret < 0) + goto error_clocks; + } + vfe->power_count++; + + mutex_unlock(&vfe->power_lock); + + return 0; + +error_reset: + camss_disable_clocks(vfe->nclocks, vfe->clock); + +error_clocks: + mutex_unlock(&vfe->power_lock); + + return ret; +} + +/* + * vfe_put - Power down VFE module + * @vfe: VFE Device + */ +static void vfe_put(struct vfe_device *vfe) +{ + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + dev_err(to_device(vfe), "vfe power off on power_count == 0\n"); + goto exit; + } else if (vfe->power_count == 1) { + if (vfe->was_streaming) { + vfe->was_streaming = 0; + vfe_halt(vfe); + } + camss_disable_clocks(vfe->nclocks, vfe->clock); + } + + vfe->power_count--; + +exit: + mutex_unlock(&vfe->power_lock); +} + +/* + * vfe_video_pad_to_line - Get pointer to VFE line by media pad + * @pad: Media pad + * + * Return pointer to vfe line structure + */ +static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad) +{ + struct media_pad *vfe_pad; + struct v4l2_subdev *subdev; + + vfe_pad = media_entity_remote_pad(pad); + if (vfe_pad == NULL) + return NULL; + + subdev = media_entity_to_v4l2_subdev(vfe_pad->entity); + + return container_of(subdev, struct vfe_line, subdev); +} + +/* + * vfe_queue_buffer - Add empty buffer + * @vid: Video device structure + * @buf: Buffer to be enqueued + * + * Add an empty buffer - depending on the current number of buffers it will be + * put in pending buffer queue or directly given to the hardware to be filled. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_queue_buffer(struct camss_video *vid, + struct camss_buffer *buf) +{ + struct vfe_device *vfe = &vid->camss->vfe; + struct vfe_line *line; + struct vfe_output *output; + unsigned long flags; + + line = vfe_video_pad_to_line(&vid->pad); + if (!line) { + dev_err(to_device(vfe), "Can not queue buffer\n"); + return -1; + } + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_update_wm_on_new(vfe, output, buf); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +/* + * vfe_flush_buffers - Return all vb2 buffers + * @vid: Video device structure + * @state: vb2 buffer state of the returned buffers + * + * Return all buffers to vb2. This includes queued pending buffers (still + * unused) and any buffers given to the hardware but again still not used. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_flush_buffers(struct camss_video *vid, + enum vb2_buffer_state state) +{ + struct vfe_device *vfe = &vid->camss->vfe; + struct vfe_line *line; + struct vfe_output *output; + unsigned long flags; + + line = vfe_video_pad_to_line(&vid->pad); + if (!line) { + dev_err(to_device(vfe), "Can not flush buffers\n"); + return -1; + } + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_flush_pending(output, state); + + if (output->buf[0]) + vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state); + + if (output->buf[1]) + vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state); + + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state); + output->last_buffer = NULL; + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +/* + * vfe_set_power - Power on/off VFE module + * @sd: VFE V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_power(struct v4l2_subdev *sd, int on) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (on) { + u32 hw_version; + + ret = vfe_get(vfe); + if (ret < 0) + return ret; + + hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); + dev_dbg(to_device(vfe), + "VFE HW Version = 0x%08x\n", hw_version); + } else { + vfe_put(vfe); + } + + return 0; +} + +/* + * vfe_set_stream - Enable/disable streaming on VFE module + * @sd: VFE V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of VFE module is triggered here. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (enable) { + ret = vfe_enable(line); + if (ret < 0) + dev_err(to_device(vfe), + "Failed to enable vfe outputs\n"); + } else { + ret = vfe_disable(line); + if (ret < 0) + dev_err(to_device(vfe), + "Failed to disable vfe outputs\n"); + } + + return ret; +} + +/* + * __vfe_get_format - Get pointer to format structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__vfe_get_format(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, cfg, pad); + + return &line->fmt[pad]; +} + +/* + * __vfe_get_compose - Get pointer to compose selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE compose rectangle structure + */ +static struct v4l2_rect * +__vfe_get_compose(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_compose(&line->subdev, cfg, + MSM_VFE_PAD_SINK); + + return &line->compose; +} + +/* + * __vfe_get_crop - Get pointer to crop selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE crop rectangle structure + */ +static struct v4l2_rect * +__vfe_get_crop(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_crop(&line->subdev, cfg, + MSM_VFE_PAD_SRC); + + return &line->crop; +} + +/* + * vfe_try_format - Handle try format by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void vfe_try_format(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + u32 code; + + switch (pad) { + case MSM_VFE_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) + if (fmt->code == vfe_formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(vfe_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_VFE_PAD_SRC: + /* Set and return a format same as sink pad */ + + code = fmt->code; + + *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, + which); + + if (line->id == VFE_LINE_PIX) { + struct v4l2_rect *rect; + + rect = __vfe_get_crop(line, cfg, which); + + fmt->width = rect->width; + fmt->height = rect->height; + + switch (fmt->code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + if (code == MEDIA_BUS_FMT_YUYV8_1_5X8) + fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_YUYV8_2X8; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + if (code == MEDIA_BUS_FMT_YVYU8_1_5X8) + fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_YVYU8_2X8; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + if (code == MEDIA_BUS_FMT_UYVY8_1_5X8) + fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + if (code == MEDIA_BUS_FMT_VYUY8_1_5X8) + fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_VYUY8_2X8; + break; + } + } + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * vfe_try_compose - Handle try compose selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_compose(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which); + + if (rect->width > fmt->width) + rect->width = fmt->width; + + if (rect->height > fmt->height) + rect->height = fmt->height; + + if (fmt->width > rect->width * SCALER_RATIO_MAX) + rect->width = (fmt->width + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + rect->width &= ~0x1; + + if (fmt->height > rect->height * SCALER_RATIO_MAX) + rect->height = (fmt->height + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + if (rect->width < 16) + rect->width = 16; + + if (rect->height < 4) + rect->height = 4; +} + +/* + * vfe_try_crop - Handle try crop selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_crop(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_rect *compose; + + compose = __vfe_get_compose(line, cfg, which); + + if (rect->width > compose->width) + rect->width = compose->width; + + if (rect->width + rect->left > compose->width) + rect->left = compose->width - rect->width; + + if (rect->height > compose->height) + rect->height = compose->height; + + if (rect->height + rect->top > compose->height) + rect->top = compose->height - rect->height; + + /* wm in line based mode writes multiple of 16 horizontally */ + rect->left += (rect->width & 0xf) >> 1; + rect->width &= ~0xf; + + if (rect->width < 16) { + rect->left = 0; + rect->width = 16; + } + + if (rect->height < 4) { + rect->top = 0; + rect->height = 4; + } +} + +/* + * vfe_enum_mbus_code - Handle pixel format enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * + * return -EINVAL or zero on success + */ +static int vfe_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_VFE_PAD_SINK) { + if (code->index >= ARRAY_SIZE(vfe_formats)) + return -EINVAL; + + code->code = vfe_formats[code->index].code; + } else { + if (code->index > 0) + return -EINVAL; + + format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * vfe_enum_frame_size - Handle frame size enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * + * Return -EINVAL or zero on success + */ +static int vfe_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + vfe_try_format(line, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + vfe_try_format(line, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * vfe_get_format - Handle get format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +static int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); + +/* + * vfe_set_format - Handle set format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + if (fmt->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection sel = { 0 }; + int ret; + + /* Propagate the format from sink to source */ + format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC, + fmt->which); + + *format = fmt->format; + vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format, + fmt->which); + + if (line->id != VFE_LINE_PIX) + return 0; + + /* Reset sink pad compose selection */ + sel.which = fmt->which; + sel.pad = MSM_VFE_PAD_SINK; + sel.target = V4L2_SEL_TGT_COMPOSE; + sel.r.width = fmt->format.width; + sel.r.height = fmt->format.height; + ret = vfe_set_selection(sd, cfg, &sel); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * vfe_get_selection - Handle get selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->pad == MSM_VFE_PAD_SINK) + switch (sel->target) { + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + fmt.pad = sel->pad; + fmt.which = sel->which; + ret = vfe_get_format(sd, cfg, &fmt); + if (ret < 0) + return ret; + + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = fmt.format.width; + sel->r.height = fmt.format.height; + break; + case V4L2_SEL_TGT_COMPOSE: + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + else if (sel->pad == MSM_VFE_PAD_SRC) + switch (sel->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r.left = rect->left; + sel->r.top = rect->top; + sel->r.width = rect->width; + sel->r.height = rect->height; + break; + case V4L2_SEL_TGT_CROP: + rect = __vfe_get_crop(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * vfe_set_selection - Handle set selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->target == V4L2_SEL_TGT_COMPOSE && + sel->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection crop = { 0 }; + + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_compose(line, cfg, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source crop selection */ + crop.which = sel->which; + crop.pad = MSM_VFE_PAD_SRC; + crop.target = V4L2_SEL_TGT_CROP; + crop.r = *rect; + ret = vfe_set_selection(sd, cfg, &crop); + } else if (sel->target == V4L2_SEL_TGT_CROP && + sel->pad == MSM_VFE_PAD_SRC) { + struct v4l2_subdev_format fmt = { 0 }; + + rect = __vfe_get_crop(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_crop(line, cfg, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source pad format width and height */ + fmt.which = sel->which; + fmt.pad = MSM_VFE_PAD_SRC; + ret = vfe_get_format(sd, cfg, &fmt); + if (ret < 0) + return ret; + + fmt.format.width = rect->width; + fmt.format.height = rect->height; + ret = vfe_set_format(sd, cfg, &fmt); + } else { + ret = -EINVAL; + } + + return ret; +} + +/* + * vfe_init_formats - Initialize formats on all pads + * @sd: VFE V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_VFE_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return vfe_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_vfe_subdev_init - Initialize VFE device structure and resources + * @vfe: VFE device + * @res: VFE module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res) +{ + struct device *dev = to_device(vfe); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + struct camss *camss = to_camss(vfe); + int i, j; + int ret; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + vfe->base = devm_ioremap_resource(dev, r); + if (IS_ERR(vfe->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(vfe->base); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + vfe->irq = r->start; + snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d", + dev_name(dev), MSM_VFE_NAME, vfe->id); + ret = devm_request_irq(dev, vfe->irq, vfe_isr, + IRQF_TRIGGER_RISING, vfe->irq_name, vfe); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + vfe->nclocks = 0; + while (res->clock[vfe->nclocks]) + vfe->nclocks++; + + vfe->clock = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clock), + GFP_KERNEL); + if (!vfe->clock) + return -ENOMEM; + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + mutex_init(&vfe->power_lock); + vfe->power_count = 0; + + mutex_init(&vfe->stream_lock); + vfe->stream_count = 0; + + spin_lock_init(&vfe->output_lock); + + vfe->id = 0; + vfe->reg_update = 0; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + vfe->line[i].video_out.type = + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + vfe->line[i].video_out.camss = camss; + vfe->line[i].id = i; + init_completion(&vfe->line[i].output.sof); + init_completion(&vfe->line[i].output.reg_update); + } + + init_completion(&vfe->reset_complete); + init_completion(&vfe->halt_complete); + + return 0; +} + +/* + * msm_vfe_get_vfe_id - Get VFE HW module id + * @entity: Pointer to VFE media entity structure + * @id: Return CSID HW module id here + */ +void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + struct vfe_device *vfe; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + vfe = to_vfe(line); + + *id = vfe->id; +} + +/* + * msm_vfe_get_vfe_line_id - Get VFE line id by media entity + * @entity: Pointer to VFE media entity structure + * @id: Return VFE line id here + */ +void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + *id = line->id; +} + +/* + * vfe_link_setup - Setup VFE connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int vfe_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_entity_remote_pad(local)) + return -EBUSY; + + return 0; +} + +static const struct v4l2_subdev_core_ops vfe_core_ops = { + .s_power = vfe_set_power, +}; + +static const struct v4l2_subdev_video_ops vfe_video_ops = { + .s_stream = vfe_set_stream, +}; + +static const struct v4l2_subdev_pad_ops vfe_pad_ops = { + .enum_mbus_code = vfe_enum_mbus_code, + .enum_frame_size = vfe_enum_frame_size, + .get_fmt = vfe_get_format, + .set_fmt = vfe_set_format, + .get_selection = vfe_get_selection, + .set_selection = vfe_set_selection, +}; + +static const struct v4l2_subdev_ops vfe_v4l2_ops = { + .core = &vfe_core_ops, + .video = &vfe_video_ops, + .pad = &vfe_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = { + .open = vfe_init_formats, +}; + +static const struct media_entity_operations vfe_media_ops = { + .link_setup = vfe_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct camss_video_ops camss_vfe_video_ops = { + .queue_buffer = vfe_queue_buffer, + .flush_buffers = vfe_flush_buffers, +}; + +void msm_vfe_stop_streaming(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) + msm_video_stop_streaming(&vfe->line[i].video_out); +} + +/* + * msm_vfe_register_entities - Register subdev node for VFE module + * @vfe: VFE device + * @v4l2_dev: V4L2 device + * + * Initialize and register a subdev node for the VFE module. Then + * call msm_video_register() to register the video device node which + * will be connected to this subdev node. Then actually create the + * media link between them. + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev) +{ + struct device *dev = to_device(vfe); + struct v4l2_subdev *sd; + struct media_pad *pads; + struct camss_video *video_out; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + char name[32]; + + sd = &vfe->line[i].subdev; + pads = vfe->line[i].pads; + video_out = &vfe->line[i].video_out; + + v4l2_subdev_init(sd, &vfe_v4l2_ops); + sd->internal_ops = &vfe_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + if (i == VFE_LINE_PIX) + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s", + MSM_VFE_NAME, vfe->id, "pix"); + else + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "rdi", i); + + v4l2_set_subdevdata(sd, &vfe->line[i]); + + ret = vfe_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto error_init; + } + + pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &vfe_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM, + pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto error_init; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto error_reg_subdev; + } + + video_out->ops = &camss_vfe_video_ops; + video_out->bpl_alignment = 8; + video_out->line_based = 0; + if (i == VFE_LINE_PIX) { + video_out->bpl_alignment = 16; + video_out->line_based = 1; + } + snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "video", i); + ret = msm_video_register(video_out, v4l2_dev, name, + i == VFE_LINE_PIX ? 1 : 0); + if (ret < 0) { + dev_err(dev, "Failed to register video node: %d\n", + ret); + goto error_reg_video; + } + + ret = media_create_pad_link( + &sd->entity, MSM_VFE_PAD_SRC, + &video_out->vdev.entity, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(dev, "Failed to link %s->%s entities: %d\n", + sd->entity.name, video_out->vdev.entity.name, + ret); + goto error_link; + } + } + + return 0; + +error_link: + msm_video_unregister(video_out); + +error_reg_video: + v4l2_device_unregister_subdev(sd); + +error_reg_subdev: + media_entity_cleanup(&sd->entity); + +error_init: + for (i--; i >= 0; i--) { + sd = &vfe->line[i].subdev; + video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_vfe_unregister_entities - Unregister VFE module subdev node + * @vfe: VFE device + */ +void msm_vfe_unregister_entities(struct vfe_device *vfe) +{ + int i; + + mutex_destroy(&vfe->power_lock); + mutex_destroy(&vfe->stream_lock); + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + struct v4l2_subdev *sd = &vfe->line[i].subdev; + struct camss_video *video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h new file mode 100644 index 000000000000..53d5b66a9dfb --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h @@ -0,0 +1,123 @@ +/* + * camss-vfe.h + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_VFE_H +#define QC_MSM_CAMSS_VFE_H + +#include +#include +#include +#include +#include + +#include "camss-video.h" + +#define MSM_VFE_PAD_SINK 0 +#define MSM_VFE_PAD_SRC 1 +#define MSM_VFE_PADS_NUM 2 + +#define MSM_VFE_LINE_NUM 4 +#define MSM_VFE_IMAGE_MASTERS_NUM 7 +#define MSM_VFE_COMPOSITE_IRQ_NUM 4 + +#define MSM_VFE_VFE0_UB_SIZE 1023 +#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3) +#define MSM_VFE_VFE1_UB_SIZE 1535 +#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3) + +enum vfe_output_state { + VFE_OUTPUT_OFF, + VFE_OUTPUT_RESERVED, + VFE_OUTPUT_SINGLE, + VFE_OUTPUT_CONTINUOUS, + VFE_OUTPUT_IDLE, + VFE_OUTPUT_STOPPING +}; + +enum vfe_line_id { + VFE_LINE_NONE = -1, + VFE_LINE_RDI0 = 0, + VFE_LINE_RDI1 = 1, + VFE_LINE_RDI2 = 2, + VFE_LINE_PIX = 3 +}; + +struct vfe_output { + u8 wm_num; + u8 wm_idx[3]; + + int active_buf; + struct camss_buffer *buf[2]; + struct camss_buffer *last_buffer; + struct list_head pending_bufs; + + unsigned int drop_update_idx; + + enum vfe_output_state state; + unsigned int sequence; + int wait_sof; + int wait_reg_update; + struct completion sof; + struct completion reg_update; +}; + +struct vfe_line { + enum vfe_line_id id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_VFE_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM]; + struct v4l2_rect compose; + struct v4l2_rect crop; + struct camss_video video_out; + struct vfe_output output; +}; + +struct vfe_device { + u8 id; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct completion reset_complete; + struct completion halt_complete; + struct mutex power_lock; + int power_count; + struct mutex stream_lock; + int stream_count; + spinlock_t output_lock; + enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM]; + struct vfe_line line[MSM_VFE_LINE_NUM]; + u32 reg_update; + u8 was_streaming; +}; + +struct resources; + +int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res); + +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev); + +void msm_vfe_unregister_entities(struct vfe_device *vfe); + +void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id); +void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id); + +void msm_vfe_stop_streaming(struct vfe_device *vfe); + +#endif /* QC_MSM_CAMSS_VFE_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss-8x16/camss-video.c new file mode 100644 index 000000000000..cf4219e871bd --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-video.c @@ -0,0 +1,860 @@ +/* + * camss-video.c + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camss-video.h" +#include "camss.h" + +struct fract { + u8 numerator; + u8 denominator; +}; + +/* + * struct camss_format_info - ISP media bus format information + * @code: V4L2 media bus format code + * @pixelformat: V4L2 pixel format FCC identifier + * @planes: Number of planes + * @hsub: Horizontal subsampling (for each plane) + * @vsub: Vertical subsampling (for each plane) + * @bpp: Bits per pixel when stored in memory (for each plane) + */ +struct camss_format_info { + u32 code; + u32 pixelformat; + u8 planes; + struct fract hsub[3]; + struct fract vsub[3]; + unsigned int bpp[3]; +}; + +static const struct camss_format_info formats_rdi[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, +}; + +static const struct camss_format_info formats_pix[] = { + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, +}; + +/* ----------------------------------------------------------------------------- + * Helper functions + */ + +static int video_find_format(u32 code, u32 pixelformat, + const struct camss_format_info *formats, + unsigned int nformats) +{ + int i; + + for (i = 0; i < nformats; i++) { + if (formats[i].code == code && + formats[i].pixelformat == pixelformat) + return i; + } + + for (i = 0; i < nformats; i++) + if (formats[i].code == code) + return i; + + WARN_ON(1); + + return -EINVAL; +} + +/* + * video_mbus_to_pix_mp - Convert v4l2_mbus_framefmt to v4l2_pix_format_mplane + * @mbus: v4l2_mbus_framefmt format (input) + * @pix: v4l2_pix_format_mplane format (output) + * @f: a pointer to formats array element to be used for the conversion + * @alignment: bytesperline alignment value + * + * Fill the output pix structure with information from the input mbus format. + * + * Return 0 on success or a negative error code otherwise + */ +static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus, + struct v4l2_pix_format_mplane *pix, + const struct camss_format_info *f, + unsigned int alignment) +{ + unsigned int i; + u32 bytesperline; + + memset(pix, 0, sizeof(*pix)); + v4l2_fill_pix_format_mplane(pix, mbus); + pix->pixelformat = f->pixelformat; + pix->num_planes = f->planes; + for (i = 0; i < pix->num_planes; i++) { + bytesperline = pix->width / f->hsub[i].numerator * + f->hsub[i].denominator * f->bpp[i] / 8; + bytesperline = ALIGN(bytesperline, alignment); + pix->plane_fmt[i].bytesperline = bytesperline; + pix->plane_fmt[i].sizeimage = pix->height / + f->vsub[i].numerator * f->vsub[i].denominator * + bytesperline; + } + + return 0; +} + +static struct v4l2_subdev *video_remote_subdev(struct camss_video *video, + u32 *pad) +{ + struct media_pad *remote; + + remote = media_entity_remote_pad(&video->pad); + + if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) + return NULL; + + if (pad) + *pad = remote->index; + + return media_entity_to_v4l2_subdev(remote->entity); +} + +static int video_get_subdev_format(struct camss_video *video, + struct v4l2_format *format) +{ + struct v4l2_subdev_format fmt; + struct v4l2_subdev *subdev; + u32 pad; + int ret; + + subdev = video_remote_subdev(video, &pad); + if (subdev == NULL) + return -EPIPE; + + fmt.pad = pad; + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); + if (ret) + return ret; + + ret = video_find_format(fmt.format.code, + format->fmt.pix_mp.pixelformat, + video->formats, video->nformats); + if (ret < 0) + return ret; + + format->type = video->type; + + return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp, + &video->formats[ret], video->bpl_alignment); +} + +/* ----------------------------------------------------------------------------- + * Video queue operations + */ + +static int video_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct camss_video *video = vb2_get_drv_priv(q); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + if (*num_planes) { + if (*num_planes != format->num_planes) + return -EINVAL; + + for (i = 0; i < *num_planes; i++) + if (sizes[i] < format->plane_fmt[i].sizeimage) + return -EINVAL; + + return 0; + } + + *num_planes = format->num_planes; + + for (i = 0; i < *num_planes; i++) + sizes[i] = format->plane_fmt[i].sizeimage; + + return 0; +} + +static int video_buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + struct sg_table *sgt; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + sgt = vb2_dma_sg_plane_desc(vb, i); + if (!sgt) + return -EFAULT; + + buffer->addr[i] = sg_dma_address(sgt->sgl); + } + + if (format->pixelformat == V4L2_PIX_FMT_NV12 || + format->pixelformat == V4L2_PIX_FMT_NV21 || + format->pixelformat == V4L2_PIX_FMT_NV16 || + format->pixelformat == V4L2_PIX_FMT_NV61) + buffer->addr[1] = buffer->addr[0] + + format->plane_fmt[0].bytesperline * + format->height; + + return 0; +} + +static int video_buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i)) + return -EINVAL; + + vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage); + } + + vbuf->field = V4L2_FIELD_NONE; + + return 0; +} + +static void video_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + + video->ops->queue_buffer(video, buffer); +} + +static int video_check_format(struct camss_video *video) +{ + struct v4l2_pix_format_mplane *pix = &video->active_fmt.fmt.pix_mp; + struct v4l2_format format; + struct v4l2_pix_format_mplane *sd_pix = &format.fmt.pix_mp; + int ret; + + sd_pix->pixelformat = pix->pixelformat; + ret = video_get_subdev_format(video, &format); + if (ret < 0) + return ret; + + if (pix->pixelformat != sd_pix->pixelformat || + pix->height != sd_pix->height || + pix->width != sd_pix->width || + pix->num_planes != sd_pix->num_planes || + pix->field != format.fmt.pix_mp.field) + return -EPIPE; + + return 0; +} + +static int video_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + int ret; + + ret = media_pipeline_start(&vdev->entity, &video->pipe); + if (ret < 0) + return ret; + + ret = video_check_format(video); + if (ret < 0) + goto error; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + ret = v4l2_subdev_call(subdev, video, s_stream, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + goto error; + } + + return 0; + +error: + media_pipeline_stop(&vdev->entity); + + video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void video_stop_streaming(struct vb2_queue *q) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + v4l2_subdev_call(subdev, video, s_stream, 0); + } + + media_pipeline_stop(&vdev->entity); + + video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops msm_video_vb2_q_ops = { + .queue_setup = video_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_init = video_buf_init, + .buf_prepare = video_buf_prepare, + .buf_queue = video_buf_queue, + .start_streaming = video_start_streaming, + .stop_streaming = video_stop_streaming, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 ioctls + */ + +static int video_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct camss_video *video = video_drvdata(file); + + strlcpy(cap->driver, "qcom-camss", sizeof(cap->driver)); + strlcpy(cap->card, "Qualcomm Camera Subsystem", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", + dev_name(video->camss->dev)); + + return 0; +} + +static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct camss_video *video = video_drvdata(file); + int i, j, k; + + if (f->type != video->type) + return -EINVAL; + + if (f->index >= video->nformats) + return -EINVAL; + + /* find index "i" of "k"th unique pixelformat in formats array */ + k = -1; + for (i = 0; i < video->nformats; i++) { + for (j = 0; j < i; j++) { + if (video->formats[i].pixelformat == + video->formats[j].pixelformat) + break; + } + + if (j == i) + k++; + + if (k == f->index) + break; + } + + if (k < f->index) + return -EINVAL; + + f->pixelformat = video->formats[i].pixelformat; + + return 0; +} + +static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + *f = video->active_fmt; + + return 0; +} + +static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix_mp; + const struct camss_format_info *fi; + struct v4l2_plane_pix_format *p; + u32 bytesperline[3] = { 0 }; + u32 sizeimage[3] = { 0 }; + u32 width, height; + u32 bpl, lines; + int i, j; + + pix_mp = &f->fmt.pix_mp; + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes && i < 3; i++) { + p = &pix_mp->plane_fmt[i]; + bytesperline[i] = clamp_t(u32, p->bytesperline, + 1, 65528); + sizeimage[i] = clamp_t(u32, p->sizeimage, + bytesperline[i], + bytesperline[i] * 4096); + } + + for (j = 0; j < video->nformats; j++) + if (pix_mp->pixelformat == video->formats[j].pixelformat) + break; + + if (j == video->nformats) + j = 0; /* default format */ + + fi = &video->formats[j]; + width = pix_mp->width; + height = pix_mp->height; + + memset(pix_mp, 0, sizeof(*pix_mp)); + + pix_mp->pixelformat = fi->pixelformat; + pix_mp->width = clamp_t(u32, width, 1, 8191); + pix_mp->height = clamp_t(u32, height, 1, 8191); + pix_mp->num_planes = fi->planes; + for (i = 0; i < pix_mp->num_planes; i++) { + bpl = pix_mp->width / fi->hsub[i].numerator * + fi->hsub[i].denominator * fi->bpp[i] / 8; + bpl = ALIGN(bpl, video->bpl_alignment); + pix_mp->plane_fmt[i].bytesperline = bpl; + pix_mp->plane_fmt[i].sizeimage = pix_mp->height / + fi->vsub[i].numerator * fi->vsub[i].denominator * bpl; + } + + pix_mp->field = V4L2_FIELD_NONE; + pix_mp->colorspace = V4L2_COLORSPACE_SRGB; + pix_mp->flags = 0; + pix_mp->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace); + pix_mp->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, + pix_mp->colorspace, pix_mp->ycbcr_enc); + pix_mp->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace); + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes; i++) { + p = &pix_mp->plane_fmt[i]; + p->bytesperline = clamp_t(u32, p->bytesperline, + 1, 65528); + p->sizeimage = clamp_t(u32, p->sizeimage, + p->bytesperline, + p->bytesperline * 4096); + lines = p->sizeimage / p->bytesperline; + + if (p->bytesperline < bytesperline[i]) + p->bytesperline = ALIGN(bytesperline[i], 8); + + if (p->sizeimage < p->bytesperline * lines) + p->sizeimage = p->bytesperline * lines; + + if (p->sizeimage < sizeimage[i]) + p->sizeimage = sizeimage[i]; + } + + return 0; +} + +static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + return __video_try_fmt(video, f); +} + +static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + int ret; + + if (vb2_is_busy(&video->vb2_q)) + return -EBUSY; + + ret = __video_try_fmt(video, f); + if (ret < 0) + return ret; + + video->active_fmt = *f; + + return 0; +} + +static int video_enum_input(struct file *file, void *fh, + struct v4l2_input *input) +{ + if (input->index > 0) + return -EINVAL; + + strlcpy(input->name, "camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int video_g_input(struct file *file, void *fh, unsigned int *input) +{ + *input = 0; + + return 0; +} + +static int video_s_input(struct file *file, void *fh, unsigned int input) +{ + return input == 0 ? 0 : -EINVAL; +} + +static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = { + .vidioc_querycap = video_querycap, + .vidioc_enum_fmt_vid_cap_mplane = video_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = video_g_fmt, + .vidioc_s_fmt_vid_cap_mplane = video_s_fmt, + .vidioc_try_fmt_vid_cap_mplane = video_try_fmt, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_input = video_enum_input, + .vidioc_g_input = video_g_input, + .vidioc_s_input = video_s_input, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 file operations + */ + +static int video_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct camss_video *video = video_drvdata(file); + struct v4l2_fh *vfh; + int ret; + + mutex_lock(&video->lock); + + vfh = kzalloc(sizeof(*vfh), GFP_KERNEL); + if (vfh == NULL) { + ret = -ENOMEM; + goto error_alloc; + } + + v4l2_fh_init(vfh, vdev); + v4l2_fh_add(vfh); + + file->private_data = vfh; + + ret = v4l2_pipeline_pm_use(&vdev->entity, 1); + if (ret < 0) { + dev_err(video->camss->dev, "Failed to power up pipeline: %d\n", + ret); + goto error_pm_use; + } + + mutex_unlock(&video->lock); + + return 0; + +error_pm_use: + v4l2_fh_release(file); + +error_alloc: + mutex_unlock(&video->lock); + + return ret; +} + +static int video_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + + vb2_fop_release(file); + + v4l2_pipeline_pm_use(&vdev->entity, 0); + + file->private_data = NULL; + + return 0; +} + +static const struct v4l2_file_operations msm_vid_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = video_ioctl2, + .open = video_open, + .release = video_release, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, + .read = vb2_fop_read, +}; + +/* ----------------------------------------------------------------------------- + * CAMSS video core + */ + +static void msm_video_release(struct video_device *vdev) +{ + struct camss_video *video = video_get_drvdata(vdev); + + media_entity_cleanup(&vdev->entity); + + mutex_destroy(&video->q_lock); + mutex_destroy(&video->lock); + + if (atomic_dec_and_test(&video->camss->ref_count)) + camss_delete(video->camss); +} + +/* + * msm_video_init_format - Helper function to initialize format + * @video: struct camss_video + * + * Initialize pad format with default value. + * + * Return 0 on success or a negative error code otherwise + */ +static int msm_video_init_format(struct camss_video *video) +{ + int ret; + struct v4l2_format format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + .fmt.pix_mp = { + .width = 1920, + .height = 1080, + .pixelformat = video->formats[0].pixelformat, + }, + }; + + ret = __video_try_fmt(video, &format); + if (ret < 0) + return ret; + + video->active_fmt = format; + + return 0; +} + +/* + * msm_video_register - Register a video device node + * @video: struct camss_video + * @v4l2_dev: V4L2 device + * @name: name to be used for the video device node + * + * Initialize and register a video device node to a V4L2 device. Also + * initialize the vb2 queue. + * + * Return 0 on success or a negative error code otherwise + */ + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix) +{ + struct media_pad *pad = &video->pad; + struct video_device *vdev; + struct vb2_queue *q; + int ret; + + vdev = &video->vdev; + + mutex_init(&video->q_lock); + + q = &video->vb2_q; + q->drv_priv = video; + q->mem_ops = &vb2_dma_sg_memops; + q->ops = &msm_video_vb2_q_ops; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->buf_struct_size = sizeof(struct camss_buffer); + q->dev = video->camss->dev; + q->lock = &video->q_lock; + ret = vb2_queue_init(q); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init vb2 queue: %d\n", ret); + goto error_vb2_init; + } + + pad->flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vdev->entity, 1, pad); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n", + ret); + goto error_media_init; + } + + mutex_init(&video->lock); + + video->formats = formats_rdi; + video->nformats = ARRAY_SIZE(formats_rdi); + if (is_pix) { + video->formats = formats_pix; + video->nformats = ARRAY_SIZE(formats_pix); + } + + ret = msm_video_init_format(video); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init format: %d\n", ret); + goto error_video_register; + } + + vdev->fops = &msm_vid_fops; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING | + V4L2_CAP_READWRITE; + vdev->ioctl_ops = &msm_vid_ioctl_ops; + vdev->release = msm_video_release; + vdev->v4l2_dev = v4l2_dev; + vdev->vfl_dir = VFL_DIR_RX; + vdev->queue = &video->vb2_q; + vdev->lock = &video->lock; + strlcpy(vdev->name, name, sizeof(vdev->name)); + + ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to register video device: %d\n", + ret); + goto error_video_register; + } + + video_set_drvdata(vdev, video); + atomic_inc(&video->camss->ref_count); + + return 0; + +error_video_register: + media_entity_cleanup(&vdev->entity); + mutex_destroy(&video->lock); +error_media_init: + vb2_queue_release(&video->vb2_q); +error_vb2_init: + mutex_destroy(&video->q_lock); + + return ret; +} + +void msm_video_stop_streaming(struct camss_video *video) +{ + if (vb2_is_streaming(&video->vb2_q)) + vb2_queue_release(&video->vb2_q); +} + +void msm_video_unregister(struct camss_video *video) +{ + atomic_inc(&video->camss->ref_count); + video_unregister_device(&video->vdev); + atomic_dec(&video->camss->ref_count); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.h b/drivers/media/platform/qcom/camss-8x16/camss-video.h new file mode 100644 index 000000000000..38bd1f2eec54 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-video.h @@ -0,0 +1,70 @@ +/* + * camss-video.h + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_VIDEO_H +#define QC_MSM_CAMSS_VIDEO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct camss_buffer { + struct vb2_v4l2_buffer vb; + dma_addr_t addr[3]; + struct list_head queue; +}; + +struct camss_video; + +struct camss_video_ops { + int (*queue_buffer)(struct camss_video *vid, struct camss_buffer *buf); + int (*flush_buffers)(struct camss_video *vid, + enum vb2_buffer_state state); +}; + +struct camss_format_info; + +struct camss_video { + struct camss *camss; + struct vb2_queue vb2_q; + struct video_device vdev; + struct media_pad pad; + struct v4l2_format active_fmt; + enum v4l2_buf_type type; + struct media_pipeline pipe; + const struct camss_video_ops *ops; + struct mutex lock; + struct mutex q_lock; + unsigned int bpl_alignment; + unsigned int line_based; + const struct camss_format_info *formats; + unsigned int nformats; +}; + +void msm_video_stop_streaming(struct camss_video *video); + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix); + +void msm_video_unregister(struct camss_video *video); + +#endif /* QC_MSM_CAMSS_VIDEO_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss-8x16/camss.c new file mode 100644 index 000000000000..a3760b5dd1d1 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss.c @@ -0,0 +1,746 @@ +/* + * camss.c + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "camss.h" + +#define CAMSS_CLOCK_MARGIN_NUMERATOR 105 +#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100 + +static const struct resources csiphy_res[] = { + /* CSIPHY0 */ + { + .regulator = { NULL }, + .clock = { "camss_top_ahb", "ispif_ahb", + "camss_ahb", "csiphy0_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy0", "csiphy0_clk_mux" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulator = { NULL }, + .clock = { "camss_top_ahb", "ispif_ahb", + "camss_ahb", "csiphy1_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy1", "csiphy1_clk_mux" }, + .interrupt = { "csiphy1" } + } +}; + +static const struct resources csid_res[] = { + /* CSID0 */ + { + .regulator = { "vdda" }, + .clock = { "camss_top_ahb", "ispif_ahb", + "csi0_ahb", "camss_ahb", + "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulator = { "vdda" }, + .clock = { "camss_top_ahb", "ispif_ahb", + "csi1_ahb", "camss_ahb", + "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, +}; + +static const struct resources_ispif ispif_res = { + /* ISPIF */ + .clock = { "camss_top_ahb", "camss_ahb", "ispif_ahb", + "csi0", "csi0_pix", "csi0_rdi", + "csi1", "csi1_pix", "csi1_rdi" }, + .clock_for_reset = { "camss_vfe_vfe", "camss_csi_vfe" }, + .reg = { "ispif", "csi_clk_mux" }, + .interrupt = "ispif" + +}; + +static const struct resources vfe_res = { + /* VFE0 */ + .regulator = { NULL }, + .clock = { "camss_top_ahb", "camss_vfe_vfe", "camss_csi_vfe", + "iface", "bus", "camss_ahb" }, + .clock_rate = { { 0 }, + { 50000000, 80000000, 100000000, 160000000, + 177780000, 200000000, 266670000, 320000000, + 400000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } +}; + +/* + * camss_add_clock_margin - Add margin to clock frequency rate + * @rate: Clock frequency rate + * + * When making calculations with physical clock frequency values + * some safety margin must be added. Add it. + */ +inline void camss_add_clock_margin(u64 *rate) +{ + *rate *= CAMSS_CLOCK_MARGIN_NUMERATOR; + *rate = div_u64(*rate, CAMSS_CLOCK_MARGIN_DENOMINATOR); +} + +/* + * camss_enable_clocks - Enable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + * @dev: Device + * + * Return 0 on success or a negative error code otherwise + */ +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev) +{ + int ret; + int i; + + for (i = 0; i < nclocks; i++) { + ret = clk_prepare_enable(clock[i].clk); + if (ret) { + dev_err(dev, "clock enable failed: %d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); + + return ret; +} + +/* + * camss_disable_clocks - Disable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + */ +void camss_disable_clocks(int nclocks, struct camss_clock *clock) +{ + int i; + + for (i = nclocks - 1; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); +} + +/* + * camss_find_sensor - Find a linked media entity which represents a sensor + * @entity: Media entity to start searching from + * + * Return a pointer to sensor media entity or NULL if not found + */ +static struct media_entity *camss_find_sensor(struct media_entity *entity) +{ + struct media_pad *pad; + + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + return NULL; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + return NULL; + + entity = pad->entity; + + if (entity->function == MEDIA_ENT_F_CAM_SENSOR) + return entity; + } +} + +/* + * camss_get_pixel_clock - Get pixel clock rate from sensor + * @entity: Media entity in the current pipeline + * @pixel_clock: Received pixel clock value + * + * Return 0 on success or a negative error code otherwise + */ +int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock) +{ + struct media_entity *sensor; + struct v4l2_subdev *subdev; + struct v4l2_ctrl *ctrl; + + sensor = camss_find_sensor(entity); + if (!sensor) + return -ENODEV; + + subdev = media_entity_to_v4l2_subdev(sensor); + + ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE); + + if (!ctrl) + return -EINVAL; + + *pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl); + + return 0; +} + +/* + * camss_of_parse_endpoint_node - Parse port endpoint node + * @dev: Device + * @node: Device node to be parsed + * @csd: Parsed data from port endpoint node + * + * Return 0 on success or a negative error code on failure + */ +static int camss_of_parse_endpoint_node(struct device *dev, + struct device_node *node, + struct camss_async_subdev *csd) +{ + struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg; + struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2; + struct v4l2_fwnode_endpoint vep = { { 0 } }; + unsigned int i; + + v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); + + csd->interface.csiphy_id = vep.base.port; + + mipi_csi2 = &vep.bus.mipi_csi2; + lncfg->clk.pos = mipi_csi2->clock_lane; + lncfg->clk.pol = mipi_csi2->lane_polarities[0]; + lncfg->num_data = mipi_csi2->num_data_lanes; + + lncfg->data = devm_kzalloc(dev, lncfg->num_data * sizeof(*lncfg->data), + GFP_KERNEL); + if (!lncfg->data) + return -ENOMEM; + + for (i = 0; i < lncfg->num_data; i++) { + lncfg->data[i].pos = mipi_csi2->data_lanes[i]; + lncfg->data[i].pol = mipi_csi2->lane_polarities[i + 1]; + } + + return 0; +} + +/* + * camss_of_parse_ports - Parse ports node + * @dev: Device + * @notifier: v4l2_device notifier data + * + * Return number of "port" nodes found in "ports" node + */ +static int camss_of_parse_ports(struct device *dev, + struct v4l2_async_notifier *notifier) +{ + struct device_node *node = NULL; + struct device_node *remote = NULL; + unsigned int size, i; + int ret; + + while ((node = of_graph_get_next_endpoint(dev->of_node, node))) + if (of_device_is_available(node)) + notifier->num_subdevs++; + + size = sizeof(*notifier->subdevs) * notifier->num_subdevs; + notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL); + if (!notifier->subdevs) { + dev_err(dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + i = 0; + while ((node = of_graph_get_next_endpoint(dev->of_node, node))) { + struct camss_async_subdev *csd; + + if (!of_device_is_available(node)) + continue; + + csd = devm_kzalloc(dev, sizeof(*csd), GFP_KERNEL); + if (!csd) { + of_node_put(node); + dev_err(dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + notifier->subdevs[i++] = &csd->asd; + + ret = camss_of_parse_endpoint_node(dev, node, csd); + if (ret < 0) { + of_node_put(node); + return ret; + } + + remote = of_graph_get_remote_port_parent(node); + of_node_put(node); + + if (!remote) { + dev_err(dev, "Cannot get remote parent\n"); + return -EINVAL; + } + + csd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; + csd->asd.match.fwnode.fwnode = of_fwnode_handle(remote); + } + + return notifier->num_subdevs; +} + +/* + * camss_init_subdevices - Initialize subdev structures and resources + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_init_subdevices(struct camss *camss) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + ret = msm_csiphy_subdev_init(&camss->csiphy[i], + &csiphy_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csiphy%d sub-device: %d\n", + i, ret); + return ret; + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + ret = msm_csid_subdev_init(&camss->csid[i], + &csid_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csid%d sub-device: %d\n", + i, ret); + return ret; + } + } + + ret = msm_ispif_subdev_init(&camss->ispif, &ispif_res); + if (ret < 0) { + dev_err(camss->dev, "Failed to init ispif sub-device: %d\n", + ret); + return ret; + } + + ret = msm_vfe_subdev_init(&camss->vfe, &vfe_res); + if (ret < 0) { + dev_err(camss->dev, "Fail to init vfe sub-device: %d\n", ret); + return ret; + } + + return 0; +} + +/* + * camss_register_entities - Register subdev nodes and create links + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_register_entities(struct camss *camss) +{ + int i, j; + int ret; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + ret = msm_csiphy_register_entity(&camss->csiphy[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csiphy%d entity: %d\n", + i, ret); + goto err_reg_csiphy; + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + ret = msm_csid_register_entity(&camss->csid[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csid%d entity: %d\n", + i, ret); + goto err_reg_csid; + } + } + + ret = msm_ispif_register_entities(&camss->ispif, &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, "Failed to register ispif entities: %d\n", + ret); + goto err_reg_ispif; + } + + ret = msm_vfe_register_entities(&camss->vfe, &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, "Failed to register vfe entities: %d\n", + ret); + goto err_reg_vfe; + } + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + for (j = 0; j < ARRAY_SIZE(camss->csid); j++) { + ret = media_create_pad_link( + &camss->csiphy[i].subdev.entity, + MSM_CSIPHY_PAD_SRC, + &camss->csid[j].subdev.entity, + MSM_CSID_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csiphy[i].subdev.entity.name, + camss->csid[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + for (j = 0; j < ARRAY_SIZE(camss->ispif.line); j++) { + ret = media_create_pad_link( + &camss->csid[i].subdev.entity, + MSM_CSID_PAD_SRC, + &camss->ispif.line[j].subdev.entity, + MSM_ISPIF_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csid[i].subdev.entity.name, + camss->ispif.line[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + for (i = 0; i < ARRAY_SIZE(camss->ispif.line); i++) { + for (j = 0; j < ARRAY_SIZE(camss->vfe.line); j++) { + ret = media_create_pad_link( + &camss->ispif.line[i].subdev.entity, + MSM_ISPIF_PAD_SRC, + &camss->vfe.line[j].subdev.entity, + MSM_VFE_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->ispif.line[i].subdev.entity.name, + camss->vfe.line[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + return 0; + +err_link: + msm_vfe_unregister_entities(&camss->vfe); +err_reg_vfe: + msm_ispif_unregister_entities(&camss->ispif); +err_reg_ispif: + + i = ARRAY_SIZE(camss->csid); +err_reg_csid: + for (i--; i >= 0; i--) + msm_csid_unregister_entity(&camss->csid[i]); + + i = ARRAY_SIZE(camss->csiphy); +err_reg_csiphy: + for (i--; i >= 0; i--) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + return ret; +} + +/* + * camss_unregister_entities - Unregister subdev nodes + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static void camss_unregister_entities(struct camss *camss) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) + msm_csid_unregister_entity(&camss->csid[i]); + + msm_ispif_unregister_entities(&camss->ispif); + msm_vfe_unregister_entities(&camss->vfe); +} + +static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct camss_async_subdev *csd = + container_of(asd, struct camss_async_subdev, asd); + u8 id = csd->interface.csiphy_id; + struct csiphy_device *csiphy = &camss->csiphy[id]; + + csiphy->cfg.csi2 = &csd->interface.csi2; + subdev->host_priv = csiphy; + + return 0; +} + +static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct v4l2_device *v4l2_dev = &camss->v4l2_dev; + struct v4l2_subdev *sd; + int ret; + + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (sd->host_priv) { + struct media_entity *sensor = &sd->entity; + struct csiphy_device *csiphy = + (struct csiphy_device *) sd->host_priv; + struct media_entity *input = &csiphy->subdev.entity; + unsigned int i; + + for (i = 0; i < sensor->num_pads; i++) { + if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + if (i == sensor->num_pads) { + dev_err(camss->dev, + "No source pad in external entity\n"); + return -EINVAL; + } + + ret = media_create_pad_link(sensor, i, + input, MSM_CSIPHY_PAD_SINK, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + sensor->name, input->name, ret); + return ret; + } + } + } + + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) + return ret; + + return media_device_register(&camss->media_dev); +} + +static const struct media_device_ops camss_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +/* + * camss_probe - Probe CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct camss *camss; + int ret; + + camss = kzalloc(sizeof(*camss), GFP_KERNEL); + if (!camss) + return -ENOMEM; + + atomic_set(&camss->ref_count, 0); + camss->dev = dev; + platform_set_drvdata(pdev, camss); + + ret = camss_of_parse_ports(dev, &camss->notifier); + if (ret < 0) + return ret; + + ret = camss_init_subdevices(camss); + if (ret < 0) + return ret; + + ret = dma_set_mask_and_coherent(dev, 0xffffffff); + if (ret) + return ret; + + camss->media_dev.dev = camss->dev; + strlcpy(camss->media_dev.model, "Qualcomm Camera Subsystem", + sizeof(camss->media_dev.model)); + camss->media_dev.ops = &camss_media_ops; + media_device_init(&camss->media_dev); + + camss->v4l2_dev.mdev = &camss->media_dev; + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); + return ret; + } + + ret = camss_register_entities(camss); + if (ret < 0) + goto err_register_entities; + + if (camss->notifier.num_subdevs) { + camss->notifier.bound = camss_subdev_notifier_bound; + camss->notifier.complete = camss_subdev_notifier_complete; + + ret = v4l2_async_notifier_register(&camss->v4l2_dev, + &camss->notifier); + if (ret) { + dev_err(dev, + "Failed to register async subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + } else { + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + + ret = media_device_register(&camss->media_dev); + if (ret < 0) { + dev_err(dev, "Failed to register media device: %d\n", + ret); + goto err_register_subdevs; + } + } + + return 0; + +err_register_subdevs: + camss_unregister_entities(camss); +err_register_entities: + v4l2_device_unregister(&camss->v4l2_dev); + + return ret; +} + +void camss_delete(struct camss *camss) +{ + v4l2_device_unregister(&camss->v4l2_dev); + media_device_unregister(&camss->media_dev); + media_device_cleanup(&camss->media_dev); + + kfree(camss); +} + +/* + * camss_remove - Remove CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Always returns 0. + */ +static int camss_remove(struct platform_device *pdev) +{ + struct camss *camss = platform_get_drvdata(pdev); + + msm_vfe_stop_streaming(&camss->vfe); + + v4l2_async_notifier_unregister(&camss->notifier); + camss_unregister_entities(camss); + + if (atomic_read(&camss->ref_count) == 0) + camss_delete(camss); + + return 0; +} + +static const struct of_device_id camss_dt_match[] = { + { .compatible = "qcom,msm8916-camss" }, + { } +}; + +MODULE_DEVICE_TABLE(of, camss_dt_match); + +static struct platform_driver qcom_camss_driver = { + .probe = camss_probe, + .remove = camss_remove, + .driver = { + .name = "qcom-camss", + .of_match_table = camss_dt_match, + }, +}; + +module_platform_driver(qcom_camss_driver); + +MODULE_ALIAS("platform:qcom-camss"); +MODULE_DESCRIPTION("Qualcomm Camera Subsystem driver"); +MODULE_AUTHOR("Todor Tomov "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/camss-8x16/camss.h b/drivers/media/platform/qcom/camss-8x16/camss.h new file mode 100644 index 000000000000..4ad223443e4b --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss.h @@ -0,0 +1,106 @@ +/* + * camss.h + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_H +#define QC_MSM_CAMSS_H + +#include +#include +#include +#include +#include +#include +#include + +#include "camss-csid.h" +#include "camss-csiphy.h" +#include "camss-ispif.h" +#include "camss-vfe.h" + +#define CAMSS_CSID_NUM 2 +#define CAMSS_CSIPHY_NUM 2 + +#define to_camss(ptr_module) \ + container_of(ptr_module, struct camss, ptr_module) + +#define to_device(ptr_module) \ + (to_camss(ptr_module)->dev) + +#define module_pointer(ptr_module, index) \ + ((const struct ptr_module##_device (*)[]) &(ptr_module[-(index)])) + +#define to_camss_index(ptr_module, index) \ + container_of(module_pointer(ptr_module, index), \ + struct camss, ptr_module) + +#define to_device_index(ptr_module, index) \ + (to_camss_index(ptr_module, index)->dev) + +#define CAMSS_RES_MAX 15 + +struct resources { + char *regulator[CAMSS_RES_MAX]; + char *clock[CAMSS_RES_MAX]; + u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt[CAMSS_RES_MAX]; +}; + +struct resources_ispif { + char *clock[CAMSS_RES_MAX]; + char *clock_for_reset[CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt; +}; + +struct camss { + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + struct media_device media_dev; + struct device *dev; + struct csiphy_device csiphy[CAMSS_CSIPHY_NUM]; + struct csid_device csid[CAMSS_CSID_NUM]; + struct ispif_device ispif; + struct vfe_device vfe; + atomic_t ref_count; +}; + +struct camss_camera_interface { + u8 csiphy_id; + struct csiphy_csi2_cfg csi2; +}; + +struct camss_async_subdev { + struct camss_camera_interface interface; + struct v4l2_async_subdev asd; +}; + +struct camss_clock { + struct clk *clk; + const char *name; + u32 *freq; + u32 nfreqs; +}; + +void camss_add_clock_margin(u64 *rate); +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev); +void camss_disable_clocks(int nclocks, struct camss_clock *clock); +int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock); +void camss_delete(struct camss *camss); + +#endif /* QC_MSM_CAMSS_H */ diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 5f4434c0a8f1..68933d208063 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -34,6 +34,55 @@ struct intbuf { unsigned long attrs; }; +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt) +{ + struct venus_core *core = inst->core; + u32 session_type = inst->session_type; + u32 codec; + + switch (v4l2_pixfmt) { + case V4L2_PIX_FMT_H264: + codec = HFI_VIDEO_CODEC_H264; + break; + case V4L2_PIX_FMT_H263: + codec = HFI_VIDEO_CODEC_H263; + break; + case V4L2_PIX_FMT_MPEG1: + codec = HFI_VIDEO_CODEC_MPEG1; + break; + case V4L2_PIX_FMT_MPEG2: + codec = HFI_VIDEO_CODEC_MPEG2; + break; + case V4L2_PIX_FMT_MPEG4: + codec = HFI_VIDEO_CODEC_MPEG4; + break; + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + codec = HFI_VIDEO_CODEC_VC1; + break; + case V4L2_PIX_FMT_VP8: + codec = HFI_VIDEO_CODEC_VP8; + break; + case V4L2_PIX_FMT_VP9: + codec = HFI_VIDEO_CODEC_VP9; + break; + case V4L2_PIX_FMT_XVID: + codec = HFI_VIDEO_CODEC_DIVX; + break; + default: + return false; + } + + if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec) + return true; + + if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(venus_helper_check_codec); + static int intbufs_set_buffer(struct venus_inst *inst, u32 type) { struct venus_core *core = inst->core; @@ -243,7 +292,7 @@ static void return_buf_error(struct venus_inst *inst, if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); else - v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); + v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); } diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h index 6a061b417a93..971392be5df5 100644 --- a/drivers/media/platform/qcom/venus/helpers.h +++ b/drivers/media/platform/qcom/venus/helpers.h @@ -19,6 +19,7 @@ struct venus_inst; +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt); struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx); void venus_helper_buffers_done(struct venus_inst *inst, diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index eb0c1c51cfef..da611a5eb670 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -102,7 +102,8 @@ static const struct venus_format vdec_formats[] = { }, }; -static const struct venus_format *find_format(u32 pixfmt, u32 type) +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) { const struct venus_format *fmt = vdec_formats; unsigned int size = ARRAY_SIZE(vdec_formats); @@ -116,11 +117,15 @@ static const struct venus_format *find_format(u32 pixfmt, u32 type) if (i == size || fmt[i].type != type) return NULL; + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + return &fmt[i]; } static const struct venus_format * -find_format_by_index(unsigned int index, u32 type) +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) { const struct venus_format *fmt = vdec_formats; unsigned int size = ARRAY_SIZE(vdec_formats); @@ -140,6 +145,10 @@ find_format_by_index(unsigned int index, u32 type) if (i == size) return NULL; + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + return &fmt[i]; } @@ -154,7 +163,7 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); - fmt = find_format(pixmp->pixelformat, f->type); + fmt = find_format(inst, pixmp->pixelformat, f->type); if (!fmt) { if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) pixmp->pixelformat = V4L2_PIX_FMT_NV12; @@ -162,7 +171,7 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) pixmp->pixelformat = V4L2_PIX_FMT_H264; else return NULL; - fmt = find_format(pixmp->pixelformat, f->type); + fmt = find_format(inst, pixmp->pixelformat, f->type); pixmp->width = 1280; pixmp->height = 720; } @@ -364,11 +373,12 @@ vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap) static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { + struct venus_inst *inst = to_inst(file); const struct venus_format *fmt; memset(f->reserved, 0, sizeof(f->reserved)); - fmt = find_format_by_index(f->index, f->type); + fmt = find_format_by_index(inst, f->index, f->type); if (!fmt) return -EINVAL; @@ -417,10 +427,10 @@ static int vdec_enum_framesizes(struct file *file, void *fh, struct venus_inst *inst = to_inst(file); const struct venus_format *fmt; - fmt = find_format(fsize->pixel_format, + fmt = find_format(inst, fsize->pixel_format, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (!fmt) { - fmt = find_format(fsize->pixel_format, + fmt = find_format(inst, fsize->pixel_format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); if (!fmt) return -EINVAL; @@ -1069,6 +1079,7 @@ static int vdec_probe(struct platform_device *pdev) if (!vdev) return -ENOMEM; + strlcpy(vdev->name, "qcom-venus-decoder", sizeof(vdev->name)); vdev->release = video_device_release; vdev->fops = &vdec_fops; vdev->ioctl_ops = &vdec_ioctl_ops; @@ -1103,8 +1114,7 @@ static int vdec_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int vdec_runtime_suspend(struct device *dev) +static __maybe_unused int vdec_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); @@ -1118,7 +1128,7 @@ static int vdec_runtime_suspend(struct device *dev) return 0; } -static int vdec_runtime_resume(struct device *dev) +static __maybe_unused int vdec_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); int ret; @@ -1132,7 +1142,6 @@ static int vdec_runtime_resume(struct device *dev) return ret; } -#endif static const struct dev_pm_ops vdec_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 39748e7a08e4..6f123a387cf9 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -84,14 +84,11 @@ static const struct venus_format venc_formats[] = { .pixfmt = V4L2_PIX_FMT_VP8, .num_planes = 1, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, - }, { - .pixfmt = V4L2_PIX_FMT_VP9, - .num_planes = 1, - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, }, }; -static const struct venus_format *find_format(u32 pixfmt, u32 type) +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) { const struct venus_format *fmt = venc_formats; unsigned int size = ARRAY_SIZE(venc_formats); @@ -105,11 +102,15 @@ static const struct venus_format *find_format(u32 pixfmt, u32 type) if (i == size || fmt[i].type != type) return NULL; + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + return &fmt[i]; } static const struct venus_format * -find_format_by_index(unsigned int index, u32 type) +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) { const struct venus_format *fmt = venc_formats; unsigned int size = ARRAY_SIZE(venc_formats); @@ -129,6 +130,10 @@ find_format_by_index(unsigned int index, u32 type) if (i == size) return NULL; + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + return &fmt[i]; } @@ -246,9 +251,10 @@ venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { + struct venus_inst *inst = to_inst(file); const struct venus_format *fmt; - fmt = find_format_by_index(f->index, f->type); + fmt = find_format_by_index(inst, f->index, f->type); memset(f->reserved, 0, sizeof(f->reserved)); @@ -271,7 +277,7 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); - fmt = find_format(pixmp->pixelformat, f->type); + fmt = find_format(inst, pixmp->pixelformat, f->type); if (!fmt) { if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) pixmp->pixelformat = V4L2_PIX_FMT_H264; @@ -279,7 +285,7 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) pixmp->pixelformat = V4L2_PIX_FMT_NV12; else return NULL; - fmt = find_format(pixmp->pixelformat, f->type); + fmt = find_format(inst, pixmp->pixelformat, f->type); pixmp->width = 1280; pixmp->height = 720; } @@ -289,7 +295,7 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) pixmp->height = clamp(pixmp->height, inst->cap_height.min, inst->cap_height.max); - if (inst->core->res->hfi_version == HFI_VERSION_1XX) + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) pixmp->height = ALIGN(pixmp->height, 32); pixmp->width = ALIGN(pixmp->width, 2); @@ -524,10 +530,10 @@ static int venc_enum_framesizes(struct file *file, void *fh, fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; - fmt = find_format(fsize->pixel_format, + fmt = find_format(inst, fsize->pixel_format, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (!fmt) { - fmt = find_format(fsize->pixel_format, + fmt = find_format(inst, fsize->pixel_format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); if (!fmt) return -EINVAL; @@ -554,10 +560,10 @@ static int venc_enum_frameintervals(struct file *file, void *fh, fival->type = V4L2_FRMIVAL_TYPE_STEPWISE; - fmt = find_format(fival->pixel_format, + fmt = find_format(inst, fival->pixel_format, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (!fmt) { - fmt = find_format(fival->pixel_format, + fmt = find_format(inst, fival->pixel_format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); if (!fmt) return -EINVAL; @@ -747,8 +753,8 @@ static int venc_init_session(struct venus_inst *inst) if (ret) return ret; - ret = venus_helper_set_input_resolution(inst, inst->out_width, - inst->out_height); + ret = venus_helper_set_input_resolution(inst, inst->width, + inst->height); if (ret) goto deinit; @@ -1010,6 +1016,8 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, src_vq->allow_zero_bytesused = 1; src_vq->min_buffers_needed = 1; src_vq->dev = inst->core->dev; + if (inst->core->res->hfi_version == HFI_VERSION_1XX) + src_vq->bidirectional = 1; ret = vb2_queue_init(src_vq); if (ret) return ret; @@ -1190,6 +1198,7 @@ static int venc_probe(struct platform_device *pdev) if (!vdev) return -ENOMEM; + strlcpy(vdev->name, "qcom-venus-encoder", sizeof(vdev->name)); vdev->release = video_device_release; vdev->fops = &venc_fops; vdev->ioctl_ops = &venc_ioctl_ops; @@ -1224,8 +1233,7 @@ static int venc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int venc_runtime_suspend(struct device *dev) +static __maybe_unused int venc_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); @@ -1239,7 +1247,7 @@ static int venc_runtime_suspend(struct device *dev) return 0; } -static int venc_runtime_resume(struct device *dev) +static __maybe_unused int venc_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); int ret; @@ -1253,7 +1261,6 @@ static int venc_runtime_resume(struct device *dev) return ret; } -#endif static const struct dev_pm_ops venc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index 77dff047c41c..142de447aaaa 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -222,8 +222,8 @@ static int rvin_digital_graph_init(struct rvin_dev *vin) subdevs[0] = &vin->digital.asd; - vin_dbg(vin, "Found digital subdevice %s\n", - of_node_full_name(to_of_node(subdevs[0]->match.fwnode.fwnode))); + vin_dbg(vin, "Found digital subdevice %pOF\n", + to_of_node(subdevs[0]->match.fwnode.fwnode)); vin->notifier.num_subdevs = 1; vin->notifier.subdevs = subdevs; diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index 3ee51fc3bb50..3245bc45f4a0 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -2032,7 +2032,7 @@ static void fdp1_stop_streaming(struct vb2_queue *q) } } -static struct vb2_ops fdp1_qops = { +static const struct vb2_ops fdp1_qops = { .queue_setup = fdp1_queue_setup, .buf_prepare = fdp1_buf_prepare, .buf_queue = fdp1_buf_queue, diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index d1746ecc645d..070bac36d766 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1506,7 +1506,7 @@ static void jpu_job_abort(void *priv) jpu_cleanup(ctx, true); } -static struct v4l2_m2m_ops jpu_m2m_ops = { +static const struct v4l2_m2m_ops jpu_m2m_ops = { .device_run = jpu_device_run, .job_ready = jpu_job_ready, .job_abort = jpu_job_abort, diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c index ec4001970313..c4ab63986c8f 100644 --- a/drivers/media/platform/s3c-camif/camif-core.c +++ b/drivers/media/platform/s3c-camif/camif-core.c @@ -317,7 +317,6 @@ static int camif_media_dev_init(struct camif_dev *camif) ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X"); strlcpy(md->bus_info, "platform", sizeof(md->bus_info)); md->hw_revision = ip_rev; - md->driver_version = KERNEL_VERSION(1, 0, 0); md->dev = camif->dev; diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c index 8e06071a7977..58d200e7c838 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.c +++ b/drivers/media/platform/s5p-cec/s5p_cec.c @@ -219,11 +219,8 @@ static int s5p_cec_probe(struct platform_device *pdev) if (cec->notifier == NULL) return -ENOMEM; - cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, - CEC_NAME, - CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC | - (needs_hpd ? CEC_CAP_NEEDS_HPD : 0), 1); + cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME, + CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0), 1); ret = PTR_ERR_OR_ZERO(cec->adap); if (ret) return ret; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 81ed5cd5cd5d..66aa8cf1d048 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -602,7 +602,7 @@ static const struct v4l2_ioctl_ops g2d_ioctl_ops = { .vidioc_cropcap = vidioc_cropcap, }; -static struct video_device g2d_videodev = { +static const struct video_device g2d_videodev = { .name = G2D_NAME, .fops = &g2d_fops, .ioctl_ops = &g2d_ioctl_ops, @@ -611,7 +611,7 @@ static struct video_device g2d_videodev = { .vfl_dir = VFL_DIR_M2M, }; -static struct v4l2_m2m_ops g2d_m2m_ops = { +static const struct v4l2_m2m_ops g2d_m2m_ops = { .device_run = device_run, .job_abort = job_abort, }; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index d1e3ebb22577..faac8161b683 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -614,24 +615,27 @@ static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh) static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx) { - WARN_ON(ctx->subsampling > 3); - switch (ctx->jpeg->variant->version) { case SJPEG_S5P: + WARN_ON(ctx->subsampling > 3); if (ctx->subsampling > 2) return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; return ctx->subsampling; case SJPEG_EXYNOS3250: case SJPEG_EXYNOS5420: + WARN_ON(ctx->subsampling > 6); if (ctx->subsampling > 3) return V4L2_JPEG_CHROMA_SUBSAMPLING_411; return exynos3250_decoded_subsampling[ctx->subsampling]; case SJPEG_EXYNOS4: - case SJPEG_EXYNOS5433: + WARN_ON(ctx->subsampling > 3); if (ctx->subsampling > 2) return V4L2_JPEG_CHROMA_SUBSAMPLING_420; return exynos4x12_decoded_subsampling[ctx->subsampling]; + case SJPEG_EXYNOS5433: + return ctx->subsampling; /* parsed from header */ default: + WARN_ON(ctx->subsampling > 3); return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; } } @@ -1094,6 +1098,44 @@ static void skip(struct s5p_jpeg_buffer *buf, long len) get_byte(buf); } +static bool s5p_jpeg_subsampling_decode(struct s5p_jpeg_ctx *ctx, + unsigned int subsampling) +{ + unsigned int version; + + switch (subsampling) { + case 0x11: + ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444; + break; + case 0x21: + ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422; + break; + case 0x22: + ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420; + break; + case 0x33: + ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; + break; + case 0x41: + /* + * 4:1:1 subsampling only supported by 3250, 5420, and 5433 + * variants + */ + version = ctx->jpeg->variant->version; + if (version != SJPEG_EXYNOS3250 && + version != SJPEG_EXYNOS5420 && + version != SJPEG_EXYNOS5433) + return false; + + ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_411; + break; + default: + return false; + } + + return true; +} + static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, unsigned long buffer, unsigned long size, struct s5p_jpeg_ctx *ctx) @@ -1204,6 +1246,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, break; } } + + if (notfound || !sos || !s5p_jpeg_subsampling_decode(ctx, subsampling)) + return false; + result->w = width; result->h = height; result->sos = sos; @@ -1219,26 +1265,9 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, } result->sof = sof; result->sof_len = sof_len; - result->size = result->components = components; + result->components = components; - switch (subsampling) { - case 0x11: - ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444; - break; - case 0x21: - ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422; - break; - case 0x22: - ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420; - break; - case 0x33: - ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; - break; - default: - return false; - } - - return !notfound && sos; + return true; } static int s5p_jpeg_querycap(struct file *file, void *priv, @@ -1606,8 +1635,12 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f) FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE; q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type); - q_data->w = pix->width; - q_data->h = pix->height; + if (ct->mode == S5P_JPEG_ENCODE || + (ct->mode == S5P_JPEG_DECODE && + q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)) { + q_data->w = pix->width; + q_data->h = pix->height; + } if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) { /* * During encoding Exynos4x12 SoCs access wider memory area @@ -1690,6 +1723,15 @@ static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv, return s5p_jpeg_s_fmt(fh_to_ctx(priv), f); } +static int s5p_jpeg_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type == V4L2_EVENT_SOURCE_CHANGE) + return v4l2_src_change_event_subscribe(fh, sub); + + return -EINVAL; +} + static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx, struct v4l2_rect *r) { @@ -2015,6 +2057,9 @@ static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = { .vidioc_g_selection = s5p_jpeg_g_selection, .vidioc_s_selection = s5p_jpeg_s_selection, + + .vidioc_subscribe_event = s5p_jpeg_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* @@ -2259,6 +2304,7 @@ static void exynos4_jpeg_device_run(void *priv) exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size); } + exynos4_jpeg_set_sys_int_enable(jpeg->regs, 1); exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode); spin_unlock_irqrestore(&jpeg->slock, flags); @@ -2407,8 +2453,17 @@ static int s5p_jpeg_job_ready(void *priv) { struct s5p_jpeg_ctx *ctx = priv; - if (ctx->mode == S5P_JPEG_DECODE) + if (ctx->mode == S5P_JPEG_DECODE) { + /* + * We have only one input buffer and one output buffer. If there + * is a resolution change event, no need to continue decoding. + */ + if (ctx->state == JPEGCTX_RESOLUTION_CHANGE) + return 0; + return ctx->hdr_parsed; + } + return 1; } @@ -2487,6 +2542,30 @@ static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb) return 0; } +static void s5p_jpeg_set_capture_queue_data(struct s5p_jpeg_ctx *ctx) +{ + struct s5p_jpeg_q_data *q_data = &ctx->cap_q; + + q_data->w = ctx->out_q.w; + q_data->h = ctx->out_q.h; + + /* + * This call to jpeg_bound_align_image() takes care of width and + * height values alignment when user space calls the QBUF of + * OUTPUT buffer after the S_FMT of CAPTURE buffer. + * Please note that on Exynos4x12 SoCs, resigning from executing + * S_FMT on capture buffer for each JPEG image can result in a + * hardware hangup if subsampling is lower than the one of input + * JPEG. + */ + jpeg_bound_align_image(ctx, &q_data->w, S5P_JPEG_MIN_WIDTH, + S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align, + &q_data->h, S5P_JPEG_MIN_HEIGHT, + S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align); + + q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; +} + static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -2494,9 +2573,20 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) if (ctx->mode == S5P_JPEG_DECODE && vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { - struct s5p_jpeg_q_data tmp, *q_data; + static const struct v4l2_event ev_src_ch = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + struct vb2_queue *dst_vq; + u32 ori_w; + u32 ori_h; - ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp, + dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + ori_w = ctx->out_q.w; + ori_h = ctx->out_q.h; + + ctx->hdr_parsed = s5p_jpeg_parse_hdr(&ctx->out_q, (unsigned long)vb2_plane_vaddr(vb, 0), min((unsigned long)ctx->out_q.size, vb2_get_plane_payload(vb, 0)), ctx); @@ -2505,24 +2595,18 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) return; } - q_data = &ctx->out_q; - q_data->w = tmp.w; - q_data->h = tmp.h; - q_data->sos = tmp.sos; - memcpy(q_data->dht.marker, tmp.dht.marker, - sizeof(tmp.dht.marker)); - memcpy(q_data->dht.len, tmp.dht.len, sizeof(tmp.dht.len)); - q_data->dht.n = tmp.dht.n; - memcpy(q_data->dqt.marker, tmp.dqt.marker, - sizeof(tmp.dqt.marker)); - memcpy(q_data->dqt.len, tmp.dqt.len, sizeof(tmp.dqt.len)); - q_data->dqt.n = tmp.dqt.n; - q_data->sof = tmp.sof; - q_data->sof_len = tmp.sof_len; - - q_data = &ctx->cap_q; - q_data->w = tmp.w; - q_data->h = tmp.h; + /* + * If there is a resolution change event, only update capture + * queue when it is not streaming. Otherwise, update it in + * STREAMOFF. See s5p_jpeg_stop_streaming for detail. + */ + if (ctx->out_q.w != ori_w || ctx->out_q.h != ori_h) { + v4l2_event_queue_fh(&ctx->fh, &ev_src_ch); + if (vb2_is_streaming(dst_vq)) + ctx->state = JPEGCTX_RESOLUTION_CHANGE; + else + s5p_jpeg_set_capture_queue_data(ctx); + } } v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); @@ -2542,6 +2626,17 @@ static void s5p_jpeg_stop_streaming(struct vb2_queue *q) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); + /* + * STREAMOFF is an acknowledgment for resolution change event. + * Before STREAMOFF, we still have to return the old resolution and + * subsampling. Update capture queue when the stream is off. + */ + if (ctx->state == JPEGCTX_RESOLUTION_CHANGE && + q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + s5p_jpeg_set_capture_queue_data(ctx); + ctx->state = JPEGCTX_RUNNING; + } + pm_runtime_put(ctx->jpeg->dev); } @@ -2662,6 +2757,8 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv) spin_lock(&jpeg->slock); + exynos4_jpeg_set_sys_int_enable(jpeg->regs, 0); + curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev); src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); @@ -2710,6 +2807,8 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv) if (jpeg->variant->version == SJPEG_EXYNOS4) curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs); + exynos4_jpeg_set_enc_dec_mode(jpeg->regs, S5P_JPEG_DISABLE); + spin_unlock(&jpeg->slock); v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx); @@ -2724,6 +2823,7 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id) unsigned long payload_size = 0; enum vb2_buffer_state state = VB2_BUF_STATE_DONE; bool interrupt_timeout = false; + bool stream_error = false; u32 irq_status; spin_lock(&jpeg->slock); @@ -2740,6 +2840,12 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id) jpeg->irq_status |= irq_status; + if (jpeg->variant->version == SJPEG_EXYNOS5420 && + irq_status & EXYNOS3250_STREAM_STAT) { + stream_error = true; + dev_err(jpeg->dev, "Syntax error or unrecoverable error occurred.\n"); + } + curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev); if (!curr_ctx) @@ -2756,7 +2862,7 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id) EXYNOS3250_RDMA_DONE | EXYNOS3250_RESULT_STAT)) payload_size = exynos3250_jpeg_compressed_size(jpeg->regs); - else if (interrupt_timeout) + else if (interrupt_timeout || stream_error) state = VB2_BUF_STATE_ERROR; else goto exit_unlock; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h index 4492a3535df5..a46465e10351 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h @@ -63,6 +63,7 @@ #define S5P_JPEG_ENCODE 0 #define S5P_JPEG_DECODE 1 +#define S5P_JPEG_DISABLE -1 #define FMT_TYPE_OUTPUT 0 #define FMT_TYPE_CAPTURE 1 @@ -98,6 +99,11 @@ enum exynos4_jpeg_img_quality_level { QUALITY_LEVEL_4, /* low */ }; +enum s5p_jpeg_ctx_state { + JPEGCTX_RUNNING = 0, + JPEGCTX_RESOLUTION_CHANGE, +}; + /** * struct s5p_jpeg - JPEG IP abstraction * @lock: the mutex protecting this structure @@ -220,6 +226,7 @@ struct s5p_jpeg_q_data { * @hdr_parsed: set if header has been parsed during decompression * @crop_altered: set if crop rectangle has been altered by the user space * @ctrl_handler: controls handler + * @state: state of the context */ struct s5p_jpeg_ctx { struct s5p_jpeg *jpeg; @@ -235,6 +242,7 @@ struct s5p_jpeg_ctx { bool hdr_parsed; bool crop_altered; struct v4l2_ctrl_handler ctrl_handler; + enum s5p_jpeg_ctx_state state; }; /** diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c index a1d823ab0c63..c72789bae6ed 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c @@ -20,6 +20,10 @@ void exynos4_jpeg_sw_reset(void __iomem *base) { unsigned int reg; + reg = readl(base + EXYNOS4_JPEG_CNTL_REG); + writel(reg & ~(EXYNOS4_DEC_MODE | EXYNOS4_ENC_MODE), + base + EXYNOS4_JPEG_CNTL_REG); + reg = readl(base + EXYNOS4_JPEG_CNTL_REG); writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG); @@ -38,10 +42,13 @@ void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode) writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) | EXYNOS4_DEC_MODE, base + EXYNOS4_JPEG_CNTL_REG); - } else {/* encode */ + } else if (mode == S5P_JPEG_ENCODE) {/* encode */ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) | EXYNOS4_ENC_MODE, base + EXYNOS4_JPEG_CNTL_REG); + } else { /* disable both */ + writel(reg & EXYNOS4_ENC_DEC_MODE_MASK, + base + EXYNOS4_JPEG_CNTL_REG); } } diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h index 1870400468b2..df790b10140c 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h @@ -371,7 +371,7 @@ #define EXYNOS4_NF_SHIFT 16 #define EXYNOS4_NF_MASK 0xff #define EXYNOS4_NF(x) \ - (((x) << EXYNOS4_NF_SHIFT) & EXYNOS4_NF_MASK) + (((x) & EXYNOS4_NF_MASK) << EXYNOS4_NF_SHIFT) /* JPEG quantizer table register */ #define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40) diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 96dc01750bc0..36762ec954e7 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1708,11 +1708,10 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); - if (!err) { + if (err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); - return -ENXIO; + return err; } pcdev->video_limit = resource_size(res); diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index 45a0429d75bb..1f3c450c7a69 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -820,7 +820,7 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt) return res; } -static struct v4l2_file_operations soc_camera_fops = { +static const struct v4l2_file_operations soc_camera_fops = { .owner = THIS_MODULE, .open = soc_camera_open, .release = soc_camera_close, @@ -1550,8 +1550,7 @@ static int soc_of_bind(struct soc_camera_host *ici, v4l2_clk_name_i2c(clk_name, sizeof(clk_name), client->adapter->nr, client->addr); else - v4l2_clk_name_of(clk_name, sizeof(clk_name), - of_node_full_name(remote)); + v4l2_clk_name_of(clk_name, sizeof(clk_name), remote); icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); if (IS_ERR(icd->clk)) { @@ -1590,8 +1589,7 @@ static void scan_of_host(struct soc_camera_host *ici) ren = of_graph_get_remote_port(epn); if (!ren) { - dev_notice(dev, "no remote for %s\n", - of_node_full_name(epn)); + dev_notice(dev, "no remote for %pOF\n", epn); continue; } diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c index 57581f626f4c..0ad4b28266e4 100644 --- a/drivers/media/platform/soc_camera/soc_mediabus.c +++ b/drivers/media/platform/soc_camera/soc_mediabus.c @@ -508,6 +508,9 @@ unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; + default: + WARN_ON(1); + return -EINVAL; } return 0; } diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 7918b928f058..939da6da7644 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -360,7 +360,7 @@ static void bdisp_device_run(void *priv) bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR); } -static struct v4l2_m2m_ops bdisp_m2m_ops = { +static const struct v4l2_m2m_ops bdisp_m2m_ops = { .device_run = bdisp_device_run, .job_abort = bdisp_job_abort, }; diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c index dccbdaebb7a8..70160df36de9 100644 --- a/drivers/media/platform/sti/cec/stih-cec.c +++ b/drivers/media/platform/sti/cec/stih-cec.c @@ -351,9 +351,7 @@ static int stih_cec_probe(struct platform_device *pdev) } cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec, - CEC_NAME, - CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | - CEC_CAP_TRANSMIT, CEC_MAX_LOG_ADDRS); + CEC_NAME, CEC_CAP_DEFAULTS, CEC_MAX_LOG_ADDRS); ret = PTR_ERR_OR_ZERO(cec->adap); if (ret) return ret; diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c index c6f2e244b7a8..b2dc3d223a9c 100644 --- a/drivers/media/platform/sti/delta/delta-v4l2.c +++ b/drivers/media/platform/sti/delta/delta-v4l2.c @@ -1095,7 +1095,7 @@ static int delta_job_ready(void *priv) } /* mem-to-mem ops */ -static struct v4l2_m2m_ops delta_m2m_ops = { +static const struct v4l2_m2m_ops delta_m2m_ops = { .device_run = delta_device_run, .job_ready = delta_job_ready, .job_abort = delta_job_abort, @@ -1574,7 +1574,7 @@ static void delta_vb2_frame_stop_streaming(struct vb2_queue *q) } /* VB2 queue ops */ -static struct vb2_ops delta_vb2_au_ops = { +static const struct vb2_ops delta_vb2_au_ops = { .queue_setup = delta_vb2_au_queue_setup, .buf_prepare = delta_vb2_au_prepare, .buf_queue = delta_vb2_au_queue, @@ -1584,7 +1584,7 @@ static struct vb2_ops delta_vb2_au_ops = { .stop_streaming = delta_vb2_au_stop_streaming, }; -static struct vb2_ops delta_vb2_frame_ops = { +static const struct vb2_ops delta_vb2_frame_ops = { .queue_setup = delta_vb2_frame_queue_setup, .buf_prepare = delta_vb2_frame_prepare, .buf_finish = delta_vb2_frame_finish, diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c index 9ab896b01ee8..0e5aa17bdd40 100644 --- a/drivers/media/platform/stm32/stm32-cec.c +++ b/drivers/media/platform/stm32/stm32-cec.c @@ -246,9 +246,7 @@ static const struct regmap_config stm32_cec_regmap_cfg = { static int stm32_cec_probe(struct platform_device *pdev) { - u32 caps = CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | - CEC_CAP_TRANSMIT | CEC_CAP_RC | CEC_CAP_PHYS_ADDR | - CEC_MODE_MONITOR_ALL; + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL; struct resource *res; struct stm32_cec *cec; void __iomem *mmio; diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 83d32a5d0f40..35ba6f211b79 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define DRV_NAME "stm32-dcmi" @@ -107,6 +108,11 @@ struct dcmi_format { u8 bpp; }; +struct dcmi_framesize { + u32 width; + u32 height; +}; + struct dcmi_buf { struct vb2_v4l2_buffer vb; bool prepared; @@ -131,10 +137,16 @@ struct stm32_dcmi { struct v4l2_async_notifier notifier; struct dcmi_graph_entity entity; struct v4l2_format fmt; + struct v4l2_rect crop; + bool do_crop; - const struct dcmi_format **user_formats; - unsigned int num_user_formats; - const struct dcmi_format *current_fmt; + const struct dcmi_format **sd_formats; + unsigned int num_of_sd_formats; + const struct dcmi_format *sd_format; + struct dcmi_framesize *sd_framesizes; + unsigned int num_of_sd_framesizes; + struct dcmi_framesize sd_framesize; + struct v4l2_rect sd_bounds; /* Protect this data structure */ struct mutex lock; @@ -295,6 +307,10 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, /* Push current DMA transaction in the pending queue */ dcmi->dma_cookie = dmaengine_submit(desc); + if (dma_submit_error(dcmi->dma_cookie)) { + dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__); + return -ENXIO; + } dma_async_issue_pending(dcmi->dma_chan); @@ -321,6 +337,28 @@ static int dcmi_start_capture(struct stm32_dcmi *dcmi) return 0; } +static void dcmi_set_crop(struct stm32_dcmi *dcmi) +{ + u32 size, start; + + /* Crop resolution */ + size = ((dcmi->crop.height - 1) << 16) | + ((dcmi->crop.width << 1) - 1); + reg_write(dcmi->regs, DCMI_CWSIZE, size); + + /* Crop start point */ + start = ((dcmi->crop.top) << 16) | + ((dcmi->crop.left << 1)); + reg_write(dcmi->regs, DCMI_CWSTRT, start); + + dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n", + dcmi->crop.width, dcmi->crop.height, + dcmi->crop.left, dcmi->crop.top); + + /* Enable crop */ + reg_set(dcmi->regs, DCMI_CR, CR_CROP); +} + static irqreturn_t dcmi_irq_thread(int irq, void *arg) { struct stm32_dcmi *dcmi = arg; @@ -486,7 +524,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) { struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq); struct dcmi_buf *buf, *node; - u32 val; + u32 val = 0; int ret; ret = clk_enable(dcmi->mclk); @@ -506,22 +544,16 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) spin_lock_irq(&dcmi->irqlock); - val = reg_read(dcmi->regs, DCMI_CR); - - val &= ~(CR_PCKPOL | CR_HSPOL | CR_VSPOL | - CR_EDM_0 | CR_EDM_1 | CR_FCRC_0 | - CR_FCRC_1 | CR_JPEG | CR_ESS); - /* Set bus width */ switch (dcmi->bus.bus_width) { case 14: - val &= CR_EDM_0 + CR_EDM_1; + val |= CR_EDM_0 | CR_EDM_1; break; case 12: - val &= CR_EDM_1; + val |= CR_EDM_1; break; case 10: - val &= CR_EDM_0; + val |= CR_EDM_0; break; default: /* Set bus width to 8 bits by default */ @@ -542,6 +574,10 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) reg_write(dcmi->regs, DCMI_CR, val); + /* Set crop */ + if (dcmi->do_crop) + dcmi_set_crop(dcmi); + /* Enable dcmi */ reg_set(dcmi->regs, DCMI_CR, CR_ENABLE); @@ -662,7 +698,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) dcmi->errors_count, dcmi->buffers_count); } -static struct vb2_ops dcmi_video_qops = { +static const struct vb2_ops dcmi_video_qops = { .queue_setup = dcmi_queue_setup, .buf_init = dcmi_buf_init, .buf_prepare = dcmi_buf_prepare, @@ -686,12 +722,12 @@ static int dcmi_g_fmt_vid_cap(struct file *file, void *priv, static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi, unsigned int fourcc) { - unsigned int num_formats = dcmi->num_user_formats; + unsigned int num_formats = dcmi->num_of_sd_formats; const struct dcmi_format *fmt; unsigned int i; for (i = 0; i < num_formats; i++) { - fmt = dcmi->user_formats[i]; + fmt = dcmi->sd_formats[i]; if (fmt->fourcc == fourcc) return fmt; } @@ -699,41 +735,108 @@ static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi, return NULL; } -static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f, - const struct dcmi_format **current_fmt) +static void __find_outer_frame_size(struct stm32_dcmi *dcmi, + struct v4l2_pix_format *pix, + struct dcmi_framesize *framesize) { - const struct dcmi_format *dcmi_fmt; - struct v4l2_pix_format *pixfmt = &f->fmt.pix; + struct dcmi_framesize *match = NULL; + unsigned int i; + unsigned int min_err = UINT_MAX; + + for (i = 0; i < dcmi->num_of_sd_framesizes; i++) { + struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i]; + int w_err = (fsize->width - pix->width); + int h_err = (fsize->height - pix->height); + int err = w_err + h_err; + + if ((w_err >= 0) && (h_err >= 0) && (err < min_err)) { + min_err = err; + match = fsize; + } + } + if (!match) + match = &dcmi->sd_framesizes[0]; + + *framesize = *match; +} + +static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f, + const struct dcmi_format **sd_format, + struct dcmi_framesize *sd_framesize) +{ + const struct dcmi_format *sd_fmt; + struct dcmi_framesize sd_fsize; + struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; int ret; - dcmi_fmt = find_format_by_fourcc(dcmi, pixfmt->pixelformat); - if (!dcmi_fmt) { - dcmi_fmt = dcmi->user_formats[dcmi->num_user_formats - 1]; - pixfmt->pixelformat = dcmi_fmt->fourcc; + sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); + if (!sd_fmt) { + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; + pix->pixelformat = sd_fmt->fourcc; } /* Limit to hardware capabilities */ - pixfmt->width = clamp(pixfmt->width, MIN_WIDTH, MAX_WIDTH); - pixfmt->height = clamp(pixfmt->height, MIN_HEIGHT, MAX_HEIGHT); + pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); + pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); - v4l2_fill_mbus_format(&format.format, pixfmt, dcmi_fmt->mbus_code); + if (dcmi->do_crop && dcmi->num_of_sd_framesizes) { + struct dcmi_framesize outer_sd_fsize; + /* + * If crop is requested and sensor have discrete frame sizes, + * select the frame size that is just larger than request + */ + __find_outer_frame_size(dcmi, pix, &outer_sd_fsize); + pix->width = outer_sd_fsize.width; + pix->height = outer_sd_fsize.height; + } + + v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code); ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt, &pad_cfg, &format); if (ret < 0) return ret; - v4l2_fill_pix_format(pixfmt, &format.format); + /* Update pix regarding to what sensor can do */ + v4l2_fill_pix_format(pix, &format.format); - pixfmt->field = V4L2_FIELD_NONE; - pixfmt->bytesperline = pixfmt->width * dcmi_fmt->bpp; - pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; + /* Save resolution that sensor can actually do */ + sd_fsize.width = pix->width; + sd_fsize.height = pix->height; - if (current_fmt) - *current_fmt = dcmi_fmt; + if (dcmi->do_crop) { + struct v4l2_rect c = dcmi->crop; + struct v4l2_rect max_rect; + + /* + * Adjust crop by making the intersection between + * format resolution request and crop request + */ + max_rect.top = 0; + max_rect.left = 0; + max_rect.width = pix->width; + max_rect.height = pix->height; + v4l2_rect_map_inside(&c, &max_rect); + c.top = clamp_t(s32, c.top, 0, pix->height - c.height); + c.left = clamp_t(s32, c.left, 0, pix->width - c.width); + dcmi->crop = c; + + /* Adjust format resolution request to crop */ + pix->width = dcmi->crop.width; + pix->height = dcmi->crop.height; + } + + pix->field = V4L2_FIELD_NONE; + pix->bytesperline = pix->width * sd_fmt->bpp; + pix->sizeimage = pix->bytesperline * pix->height; + + if (sd_format) + *sd_format = sd_fmt; + if (sd_framesize) + *sd_framesize = sd_fsize; return 0; } @@ -743,22 +846,42 @@ static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f) struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; - const struct dcmi_format *current_fmt; + const struct dcmi_format *sd_format; + struct dcmi_framesize sd_framesize; + struct v4l2_mbus_framefmt *mf = &format.format; + struct v4l2_pix_format *pix = &f->fmt.pix; int ret; - ret = dcmi_try_fmt(dcmi, f, ¤t_fmt); + /* + * Try format, fmt.width/height could have been changed + * to match sensor capability or crop request + * sd_format & sd_framesize will contain what subdev + * can do for this request. + */ + ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize); if (ret) return ret; - v4l2_fill_mbus_format(&format.format, &f->fmt.pix, - current_fmt->mbus_code); + /* pix to mbus format */ + v4l2_fill_mbus_format(mf, pix, + sd_format->mbus_code); + mf->width = sd_framesize.width; + mf->height = sd_framesize.height; + ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; + dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n", + mf->code, mf->width, mf->height); + dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n", + (char *)&pix->pixelformat, + pix->width, pix->height); + dcmi->fmt = *f; - dcmi->current_fmt = current_fmt; + dcmi->sd_format = sd_format; + dcmi->sd_framesize = sd_framesize; return 0; } @@ -779,7 +902,7 @@ static int dcmi_try_fmt_vid_cap(struct file *file, void *priv, { struct stm32_dcmi *dcmi = video_drvdata(file); - return dcmi_try_fmt(dcmi, f, NULL); + return dcmi_try_fmt(dcmi, f, NULL, NULL); } static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv, @@ -787,10 +910,197 @@ static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv, { struct stm32_dcmi *dcmi = video_drvdata(file); - if (f->index >= dcmi->num_user_formats) + if (f->index >= dcmi->num_of_sd_formats) return -EINVAL; - f->pixelformat = dcmi->user_formats[f->index]->fourcc; + f->pixelformat = dcmi->sd_formats[f->index]->fourcc; + return 0; +} + +static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi, + struct v4l2_pix_format *pix) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret; + + ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt); + if (ret) + return ret; + + v4l2_fill_pix_format(pix, &fmt.format); + + return 0; +} + +static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi, + struct v4l2_pix_format *pix) +{ + const struct dcmi_format *sd_fmt; + struct v4l2_subdev_format format = { + .which = V4L2_SUBDEV_FORMAT_TRY, + }; + struct v4l2_subdev_pad_config pad_cfg; + int ret; + + sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); + if (!sd_fmt) { + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; + pix->pixelformat = sd_fmt->fourcc; + } + + v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code); + ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt, + &pad_cfg, &format); + if (ret < 0) + return ret; + + return 0; +} + +static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi, + struct v4l2_rect *r) +{ + struct v4l2_subdev_selection bounds = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .target = V4L2_SEL_TGT_CROP_BOUNDS, + }; + unsigned int max_width, max_height, max_pixsize; + struct v4l2_pix_format pix; + unsigned int i; + int ret; + + /* + * Get sensor bounds first + */ + ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection, + NULL, &bounds); + if (!ret) + *r = bounds.r; + if (ret != -ENOIOCTLCMD) + return ret; + + /* + * If selection is not implemented, + * fallback by enumerating sensor frame sizes + * and take the largest one + */ + max_width = 0; + max_height = 0; + max_pixsize = 0; + for (i = 0; i < dcmi->num_of_sd_framesizes; i++) { + struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i]; + unsigned int pixsize = fsize->width * fsize->height; + + if (pixsize > max_pixsize) { + max_pixsize = pixsize; + max_width = fsize->width; + max_height = fsize->height; + } + } + if (max_pixsize > 0) { + r->top = 0; + r->left = 0; + r->width = max_width; + r->height = max_height; + return 0; + } + + /* + * If frame sizes enumeration is not implemented, + * fallback by getting current sensor frame size + */ + ret = dcmi_get_sensor_format(dcmi, &pix); + if (ret) + return ret; + + r->top = 0; + r->left = 0; + r->width = pix.width; + r->height = pix.height; + + return 0; +} + +static int dcmi_g_selection(struct file *file, void *fh, + struct v4l2_selection *s) +{ + struct stm32_dcmi *dcmi = video_drvdata(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r = dcmi->sd_bounds; + return 0; + case V4L2_SEL_TGT_CROP: + if (dcmi->do_crop) { + s->r = dcmi->crop; + } else { + s->r.top = 0; + s->r.left = 0; + s->r.width = dcmi->fmt.fmt.pix.width; + s->r.height = dcmi->fmt.fmt.pix.height; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int dcmi_s_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct stm32_dcmi *dcmi = video_drvdata(file); + struct v4l2_rect r = s->r; + struct v4l2_rect max_rect; + struct v4l2_pix_format pix; + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + s->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + /* Reset sensor resolution to max resolution */ + pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat; + pix.width = dcmi->sd_bounds.width; + pix.height = dcmi->sd_bounds.height; + dcmi_set_sensor_format(dcmi, &pix); + + /* + * Make the intersection between + * sensor resolution + * and crop request + */ + max_rect.top = 0; + max_rect.left = 0; + max_rect.width = pix.width; + max_rect.height = pix.height; + v4l2_rect_map_inside(&r, &max_rect); + r.top = clamp_t(s32, r.top, 0, pix.height - r.height); + r.left = clamp_t(s32, r.left, 0, pix.width - r.width); + + if (!((r.top == dcmi->sd_bounds.top) && + (r.left == dcmi->sd_bounds.left) && + (r.width == dcmi->sd_bounds.width) && + (r.height == dcmi->sd_bounds.height))) { + /* Crop if request is different than sensor resolution */ + dcmi->do_crop = true; + dcmi->crop = r; + dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n", + r.width, r.height, r.left, r.top, + pix.width, pix.height); + } else { + /* Disable crop */ + dcmi->do_crop = false; + dev_dbg(dcmi->dev, "s_selection: crop is disabled\n"); + } + + s->r = r; return 0; } @@ -832,18 +1142,18 @@ static int dcmi_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct stm32_dcmi *dcmi = video_drvdata(file); - const struct dcmi_format *dcmi_fmt; + const struct dcmi_format *sd_fmt; struct v4l2_subdev_frame_size_enum fse = { .index = fsize->index, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; - dcmi_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format); - if (!dcmi_fmt) + sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format); + if (!sd_fmt) return -EINVAL; - fse.code = dcmi_fmt->mbus_code; + fse.code = sd_fmt->mbus_code; ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size, NULL, &fse); @@ -861,7 +1171,7 @@ static int dcmi_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival) { struct stm32_dcmi *dcmi = video_drvdata(file); - const struct dcmi_format *dcmi_fmt; + const struct dcmi_format *sd_fmt; struct v4l2_subdev_frame_interval_enum fie = { .index = fival->index, .width = fival->width, @@ -870,11 +1180,11 @@ static int dcmi_enum_frameintervals(struct file *file, void *fh, }; int ret; - dcmi_fmt = find_format_by_fourcc(dcmi, fival->pixel_format); - if (!dcmi_fmt) + sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format); + if (!sd_fmt) return -EINVAL; - fie.code = dcmi_fmt->mbus_code; + fie.code = sd_fmt->mbus_code; ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_interval, NULL, &fie); @@ -952,6 +1262,8 @@ static const struct v4l2_ioctl_ops dcmi_ioctl_ops = { .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap, + .vidioc_g_selection = dcmi_g_selection, + .vidioc_s_selection = dcmi_s_selection, .vidioc_enum_input = dcmi_enum_input, .vidioc_g_input = dcmi_g_input, @@ -996,15 +1308,15 @@ static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi) .width = CIF_WIDTH, .height = CIF_HEIGHT, .field = V4L2_FIELD_NONE, - .pixelformat = dcmi->user_formats[0]->fourcc, + .pixelformat = dcmi->sd_formats[0]->fourcc, }, }; int ret; - ret = dcmi_try_fmt(dcmi, &f, NULL); + ret = dcmi_try_fmt(dcmi, &f, NULL, NULL); if (ret) return ret; - dcmi->current_fmt = dcmi->user_formats[0]; + dcmi->sd_format = dcmi->sd_formats[0]; dcmi->fmt = f; return 0; } @@ -1027,7 +1339,7 @@ static const struct dcmi_format dcmi_formats[] = { static int dcmi_formats_init(struct stm32_dcmi *dcmi) { - const struct dcmi_format *dcmi_fmts[ARRAY_SIZE(dcmi_formats)]; + const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)]; unsigned int num_fmts = 0, i, j; struct v4l2_subdev *subdev = dcmi->entity.subdev; struct v4l2_subdev_mbus_code_enum mbus_code = { @@ -1042,13 +1354,13 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi) /* Code supported, have we got this fourcc yet? */ for (j = 0; j < num_fmts; j++) - if (dcmi_fmts[j]->fourcc == + if (sd_fmts[j]->fourcc == dcmi_formats[i].fourcc) /* Already available */ break; if (j == num_fmts) /* New */ - dcmi_fmts[num_fmts++] = dcmi_formats + i; + sd_fmts[num_fmts++] = dcmi_formats + i; } mbus_code.index++; } @@ -1056,18 +1368,63 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi) if (!num_fmts) return -ENXIO; - dcmi->num_user_formats = num_fmts; - dcmi->user_formats = devm_kcalloc(dcmi->dev, - num_fmts, sizeof(struct dcmi_format *), - GFP_KERNEL); - if (!dcmi->user_formats) { - dev_err(dcmi->dev, "could not allocate memory\n"); + dcmi->num_of_sd_formats = num_fmts; + dcmi->sd_formats = devm_kcalloc(dcmi->dev, + num_fmts, sizeof(struct dcmi_format *), + GFP_KERNEL); + if (!dcmi->sd_formats) { + dev_err(dcmi->dev, "Could not allocate memory\n"); return -ENOMEM; } - memcpy(dcmi->user_formats, dcmi_fmts, + memcpy(dcmi->sd_formats, sd_fmts, num_fmts * sizeof(struct dcmi_format *)); - dcmi->current_fmt = dcmi->user_formats[0]; + dcmi->sd_format = dcmi->sd_formats[0]; + + return 0; +} + +static int dcmi_framesizes_init(struct stm32_dcmi *dcmi) +{ + unsigned int num_fsize = 0; + struct v4l2_subdev *subdev = dcmi->entity.subdev; + struct v4l2_subdev_frame_size_enum fse = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .code = dcmi->sd_format->mbus_code, + }; + unsigned int ret; + unsigned int i; + + /* Allocate discrete framesizes array */ + while (!v4l2_subdev_call(subdev, pad, enum_frame_size, + NULL, &fse)) + fse.index++; + + num_fsize = fse.index; + if (!num_fsize) + return 0; + + dcmi->num_of_sd_framesizes = num_fsize; + dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize, + sizeof(struct dcmi_framesize), + GFP_KERNEL); + if (!dcmi->sd_framesizes) { + dev_err(dcmi->dev, "Could not allocate memory\n"); + return -ENOMEM; + } + + /* Fill array with sensor supported framesizes */ + dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize); + for (i = 0; i < dcmi->num_of_sd_framesizes; i++) { + fse.index = i; + ret = v4l2_subdev_call(subdev, pad, enum_frame_size, + NULL, &fse); + if (ret) + return ret; + dcmi->sd_framesizes[fse.index].width = fse.max_width; + dcmi->sd_framesizes[fse.index].height = fse.max_height; + dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height); + } return 0; } @@ -1084,6 +1441,18 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier) return ret; } + ret = dcmi_framesizes_init(dcmi); + if (ret) { + dev_err(dcmi->dev, "Could not initialize framesizes\n"); + return ret; + } + + ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds); + if (ret) { + dev_err(dcmi->dev, "Could not get sensor bounds\n"); + return ret; + } + ret = dcmi_set_default_fmt(dcmi); if (ret) { dev_err(dcmi->dev, "Could not set default format\n"); @@ -1209,7 +1578,7 @@ static int dcmi_probe(struct platform_device *pdev) if (!dcmi) return -ENOMEM; - dcmi->rstc = devm_reset_control_get(&pdev->dev, NULL); + dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(dcmi->rstc)) { dev_err(&pdev->dev, "Could not get reset control\n"); return -ENODEV; diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index 177faa36bc16..42e383a48ffe 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -1420,7 +1420,7 @@ static const struct v4l2_ioctl_ops cal_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device cal_videodev = { +static const struct video_device cal_videodev = { .name = CAL_MODULE_NAME, .fops = &cal_fops, .ioctl_ops = &cal_ioctl_ops, @@ -1702,7 +1702,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) asd->match_type = V4L2_ASYNC_MATCH_FWNODE; asd->match.fwnode.fwnode = of_fwnode_handle(sensor_node); - remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0); + remote_ep = of_graph_get_remote_endpoint(ep_node); if (!remote_ep) { ctx_dbg(3, ctx, "can't get remote-endpoint\n"); goto cleanup_exit; diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index c47151495b6f..45bd10544189 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -2421,7 +2421,7 @@ static const struct v4l2_file_operations vpe_fops = { .mmap = v4l2_m2m_fop_mmap, }; -static struct video_device vpe_videodev = { +static const struct video_device vpe_videodev = { .name = VPE_MODULE_NAME, .fops = &vpe_fops, .ioctl_ops = &vpe_ioctl_ops, @@ -2430,7 +2430,7 @@ static struct video_device vpe_videodev = { .vfl_dir = VFL_DIR_M2M, }; -static struct v4l2_m2m_ops m2m_ops = { +static const struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c index e16f70a5df1d..805d4a8fc17e 100644 --- a/drivers/media/platform/via-camera.c +++ b/drivers/media/platform/via-camera.c @@ -1259,7 +1259,7 @@ static struct viafb_pm_hooks viacam_pm_hooks = { * Setup stuff. */ -static struct video_device viacam_v4l_template = { +static const struct video_device viacam_v4l_template = { .name = "via-camera", .minor = -1, .tvnorms = V4L2_STD_NTSC_M, diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c index 665744716f73..ee89ad76bee2 100644 --- a/drivers/media/platform/video-mux.c +++ b/drivers/media/platform/video-mux.c @@ -17,8 +17,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -30,7 +29,7 @@ struct video_mux { struct v4l2_subdev subdev; struct media_pad *pads; struct v4l2_mbus_framefmt *format_mbus; - struct regmap_field *field; + struct mux_control *mux; struct mutex lock; int active; }; @@ -71,7 +70,7 @@ static int video_mux_link_setup(struct media_entity *entity, } dev_dbg(sd->dev, "setting %d active\n", local->index); - ret = regmap_field_write(vmux->field, local->index); + ret = mux_control_try_select(vmux->mux, local->index); if (ret < 0) goto out; vmux->active = local->index; @@ -80,6 +79,7 @@ static int video_mux_link_setup(struct media_entity *entity, goto out; dev_dbg(sd->dev, "going inactive\n"); + mux_control_deselect(vmux->mux); vmux->active = -1; } @@ -193,46 +193,6 @@ static const struct v4l2_subdev_ops video_mux_subdev_ops = { .video = &video_mux_subdev_video_ops, }; -static int video_mux_probe_mmio_mux(struct video_mux *vmux) -{ - struct device *dev = vmux->subdev.dev; - struct of_phandle_args args; - struct reg_field field; - struct regmap *regmap; - u32 reg, mask; - int ret; - - ret = of_parse_phandle_with_args(dev->of_node, "mux-controls", - "#mux-control-cells", 0, &args); - if (ret) - return ret; - - if (!of_device_is_compatible(args.np, "mmio-mux")) - return -EINVAL; - - regmap = syscon_node_to_regmap(args.np->parent); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); - - ret = of_property_read_u32_index(args.np, "mux-reg-masks", - 2 * args.args[0], ®); - if (!ret) - ret = of_property_read_u32_index(args.np, "mux-reg-masks", - 2 * args.args[0] + 1, &mask); - if (ret < 0) - return ret; - - field.reg = reg; - field.msb = fls(mask) - 1; - field.lsb = ffs(mask) - 1; - - vmux->field = devm_regmap_field_alloc(dev, regmap, field); - if (IS_ERR(vmux->field)) - return PTR_ERR(vmux->field); - - return 0; -} - static int video_mux_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -270,8 +230,9 @@ static int video_mux_probe(struct platform_device *pdev) return -EINVAL; } - ret = video_mux_probe_mmio_mux(vmux); - if (ret) { + vmux->mux = devm_mux_control_get(dev, NULL); + if (IS_ERR(vmux->mux)) { + ret = PTR_ERR(vmux->mux); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get mux: %d\n", ret); return ret; diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 970b9b6dab25..b01fba020d5f 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -974,7 +974,7 @@ static const struct v4l2_file_operations vim2m_fops = { .mmap = v4l2_m2m_fop_mmap, }; -static struct video_device vim2m_videodev = { +static const struct video_device vim2m_videodev = { .name = MEM2MEM_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &vim2m_fops, @@ -983,7 +983,7 @@ static struct video_device vim2m_videodev = { .release = video_device_release_empty, }; -static struct v4l2_m2m_ops m2m_ops = { +static const struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 033a131f67af..4d663e89d33f 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -373,7 +373,7 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) return 0; } -static struct v4l2_subdev_video_ops vimc_deb_video_ops = { +static const struct v4l2_subdev_video_ops vimc_deb_video_ops = { .s_stream = vimc_deb_s_stream, }; diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index 0a3e086e12f3..e1602e0bc230 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -267,7 +267,7 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) return 0; } -static struct v4l2_subdev_video_ops vimc_sca_video_ops = { +static const struct v4l2_subdev_video_ops vimc_sca_video_ops = { .s_stream = vimc_sca_s_stream, }; diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index 615c2b18dcfc..02e68c8fc02b 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -282,7 +282,7 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) return 0; } -static struct v4l2_subdev_video_ops vimc_sen_video_ops = { +static const struct v4l2_subdev_video_ops vimc_sen_video_ops = { .s_stream = vimc_sen_s_stream, }; diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c index e15705758969..b55d278d38a7 100644 --- a/drivers/media/platform/vivid/vivid-cec.c +++ b/drivers/media/platform/vivid/vivid-cec.c @@ -22,6 +22,15 @@ #include "vivid-core.h" #include "vivid-cec.h" +#define CEC_TIM_START_BIT_TOTAL 4500 +#define CEC_TIM_START_BIT_LOW 3700 +#define CEC_TIM_START_BIT_HIGH 800 +#define CEC_TIM_DATA_BIT_TOTAL 2400 +#define CEC_TIM_DATA_BIT_0_LOW 1500 +#define CEC_TIM_DATA_BIT_0_HIGH 900 +#define CEC_TIM_DATA_BIT_1_LOW 600 +#define CEC_TIM_DATA_BIT_1_HIGH 1800 + void vivid_cec_bus_free_work(struct vivid_dev *dev) { spin_lock(&dev->cec_slock); @@ -64,6 +73,58 @@ static bool vivid_cec_find_dest_adap(struct vivid_dev *dev, return false; } +static void vivid_cec_pin_adap_events(struct cec_adapter *adap, ktime_t ts, + const struct cec_msg *msg, bool nacked) +{ + unsigned int len = nacked ? 1 : msg->len; + unsigned int i; + bool bit; + + if (adap == NULL) + return; + ts = ktime_sub_us(ts, (CEC_TIM_START_BIT_TOTAL + + len * 10 * CEC_TIM_DATA_BIT_TOTAL)); + cec_queue_pin_cec_event(adap, false, ts); + ts = ktime_add_us(ts, CEC_TIM_START_BIT_LOW); + cec_queue_pin_cec_event(adap, true, ts); + ts = ktime_add_us(ts, CEC_TIM_START_BIT_HIGH); + + for (i = 0; i < 10 * len; i++) { + switch (i % 10) { + case 0 ... 7: + bit = msg->msg[i / 10] & (0x80 >> (i % 10)); + break; + case 8: /* EOM */ + bit = i / 10 == msg->len - 1; + break; + case 9: /* ACK */ + bit = cec_msg_is_broadcast(msg) ^ nacked; + break; + } + cec_queue_pin_cec_event(adap, false, ts); + if (bit) + ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_LOW); + else + ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_LOW); + cec_queue_pin_cec_event(adap, true, ts); + if (bit) + ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_HIGH); + else + ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_HIGH); + } +} + +static void vivid_cec_pin_events(struct vivid_dev *dev, + const struct cec_msg *msg, bool nacked) +{ + ktime_t ts = ktime_get(); + unsigned int i; + + vivid_cec_pin_adap_events(dev->cec_rx_adap, ts, msg, nacked); + for (i = 0; i < MAX_OUTPUTS; i++) + vivid_cec_pin_adap_events(dev->cec_tx_adap[i], ts, msg, nacked); +} + static void vivid_cec_xfer_done_worker(struct work_struct *work) { struct vivid_cec_work *cw = @@ -84,6 +145,7 @@ static void vivid_cec_xfer_done_worker(struct work_struct *work) dev->cec_xfer_start_jiffies = 0; list_del(&cw->list); spin_unlock(&dev->cec_slock); + vivid_cec_pin_events(dev, &cw->msg, !valid_dest); cec_transmit_attempt_done(cw->adap, cw->tx_status); /* Broadcast message */ @@ -118,6 +180,7 @@ static void vivid_cec_xfer_try_worker(struct work_struct *work) static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable) { + adap->cec_pin_is_high = true; return 0; } @@ -219,8 +282,7 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev, bool is_source) { char name[sizeof(dev->vid_out_dev.name) + 2]; - u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL; + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN; snprintf(name, sizeof(name), "%s%d", is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name, diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index ef344b9a48af..5f316a5e38db 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -1201,8 +1201,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) goto unreg_dev; } cec_s_phys_addr(adap, 0, false); - v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input %d\n", - dev_name(&adap->devnode.dev), i); + v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input 0\n", + dev_name(&adap->devnode.dev)); } #endif @@ -1255,13 +1255,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) dev->cec_tx_adap[bus_cnt] = NULL; goto unreg_dev; } + v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n", + dev_name(&adap->devnode.dev), bus_cnt); bus_cnt++; if (bus_cnt <= out_type_counter[HDMI]) cec_s_phys_addr(adap, bus_cnt << 12, false); else cec_s_phys_addr(adap, 0x1000, false); - v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n", - dev_name(&adap->devnode.dev), i); } #endif diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h index 11f8363fa6b0..408602ebeb97 100644 --- a/drivers/media/platform/vsp1/vsp1_entity.h +++ b/drivers/media/platform/vsp1/vsp1_entity.h @@ -21,6 +21,8 @@ struct vsp1_device; struct vsp1_dl_list; struct vsp1_pipeline; +struct vsp1_partition; +struct vsp1_partition_window; enum vsp1_entity_type { VSP1_ENTITY_BRS, @@ -82,12 +84,17 @@ struct vsp1_route { * selection rectangles, ...) * @max_width: Return the max supported width of data that the entity can * process in a single operation. + * @partition: Process the partition construction based on this entity's + * configuration. */ struct vsp1_entity_operations { void (*destroy)(struct vsp1_entity *); void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *, struct vsp1_dl_list *, enum vsp1_entity_params); unsigned int (*max_width)(struct vsp1_entity *, struct vsp1_pipeline *); + void (*partition)(struct vsp1_entity *, struct vsp1_pipeline *, + struct vsp1_partition *, unsigned int, + struct vsp1_partition_window *); }; struct vsp1_entity { diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c index 4f4b732df84b..44944ac86d9b 100644 --- a/drivers/media/platform/vsp1/vsp1_pipe.c +++ b/drivers/media/platform/vsp1/vsp1_pipe.c @@ -383,6 +383,28 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, vsp1_uds_set_alpha(pipe->uds, dl, alpha); } +/* + * Propagate the partition calculations through the pipeline + * + * Work backwards through the pipe, allowing each entity to update the partition + * parameters based on its configuration, and the entity connected to its + * source. Each entity must produce the partition required for the previous + * entity in the pipeline. + */ +void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int index, + struct vsp1_partition_window *window) +{ + struct vsp1_entity *entity; + + list_for_each_entry_reverse(entity, &pipe->entities, list_pipe) { + if (entity->ops->partition) + entity->ops->partition(entity, pipe, partition, index, + window); + } +} + void vsp1_pipelines_suspend(struct vsp1_device *vsp1) { unsigned long flags; diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h index c5d01a365370..dfff9b5685fe 100644 --- a/drivers/media/platform/vsp1/vsp1_pipe.h +++ b/drivers/media/platform/vsp1/vsp1_pipe.h @@ -57,6 +57,33 @@ enum vsp1_pipeline_state { VSP1_PIPELINE_STOPPING, }; +/* + * struct vsp1_partition_window - Partition window coordinates + * @left: horizontal coordinate of the partition start in pixels relative to the + * left edge of the image + * @width: partition width in pixels + */ +struct vsp1_partition_window { + unsigned int left; + unsigned int width; +}; + +/* + * struct vsp1_partition - A description of a slice for the partition algorithm + * @rpf: The RPF partition window configuration + * @uds_sink: The UDS input partition window configuration + * @uds_source: The UDS output partition window configuration + * @sru: The SRU partition window configuration + * @wpf: The WPF partition window configuration + */ +struct vsp1_partition { + struct vsp1_partition_window rpf; + struct vsp1_partition_window uds_sink; + struct vsp1_partition_window uds_source; + struct vsp1_partition_window sru; + struct vsp1_partition_window wpf; +}; + /* * struct vsp1_pipeline - A VSP1 hardware pipeline * @pipe: the media pipeline @@ -80,9 +107,9 @@ enum vsp1_pipeline_state { * @uds_input: entity at the input of the UDS, if the UDS is present * @entities: list of entities in the pipeline * @dl: display list associated with the pipeline - * @div_size: The maximum allowed partition size for the pipeline * @partitions: The number of partitions used to process one frame - * @current_partition: The partition number currently being configured + * @partition: The current partition for configuration to process + * @part_table: The pre-calculated partitions used by the pipeline */ struct vsp1_pipeline { struct media_pipeline pipe; @@ -109,14 +136,18 @@ struct vsp1_pipeline { struct vsp1_entity *uds; struct vsp1_entity *uds_input; + /* + * The order of this list must be identical to the order of the entities + * in the pipeline, as it is assumed by the partition algorithm that we + * can walk this list in sequence. + */ struct list_head entities; struct vsp1_dl_list *dl; - unsigned int div_size; unsigned int partitions; - struct v4l2_rect partition; - unsigned int current_partition; + struct vsp1_partition *partition; + struct vsp1_partition *part_table; }; void vsp1_pipeline_reset(struct vsp1_pipeline *pipe); @@ -132,6 +163,11 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe); void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, unsigned int alpha); +void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int index, + struct vsp1_partition_window *window); + void vsp1_pipelines_suspend(struct vsp1_device *vsp1); void vsp1_pipelines_resume(struct vsp1_device *vsp1); diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h index 58d0bea963a6..26c4ffad2f46 100644 --- a/drivers/media/platform/vsp1/vsp1_regs.h +++ b/drivers/media/platform/vsp1/vsp1_regs.h @@ -396,6 +396,7 @@ #define VI6_UDS_CTRL_NE_RCR (1 << 18) #define VI6_UDS_CTRL_NE_GY (1 << 17) #define VI6_UDS_CTRL_NE_BCB (1 << 16) +#define VI6_UDS_CTRL_AMDSLH (1 << 2) #define VI6_UDS_CTRL_TDIPC (1 << 1) #define VI6_UDS_SCALE 0x2304 @@ -428,11 +429,24 @@ #define VI6_UDS_PASS_BWIDTH_V_MASK (0x7f << 0) #define VI6_UDS_PASS_BWIDTH_V_SHIFT 0 +#define VI6_UDS_HPHASE 0x2314 +#define VI6_UDS_HPHASE_HSTP_MASK (0xfff << 16) +#define VI6_UDS_HPHASE_HSTP_SHIFT 16 +#define VI6_UDS_HPHASE_HEDP_MASK (0xfff << 0) +#define VI6_UDS_HPHASE_HEDP_SHIFT 0 + #define VI6_UDS_IPC 0x2318 #define VI6_UDS_IPC_FIELD (1 << 27) #define VI6_UDS_IPC_VEDP_MASK (0xfff << 0) #define VI6_UDS_IPC_VEDP_SHIFT 0 +#define VI6_UDS_HSZCLIP 0x231c +#define VI6_UDS_HSZCLIP_HCEN (1 << 28) +#define VI6_UDS_HSZCLIP_HCL_OFST_MASK (0xff << 16) +#define VI6_UDS_HSZCLIP_HCL_OFST_SHIFT 16 +#define VI6_UDS_HSZCLIP_HCL_SIZE_MASK (0x1fff << 0) +#define VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT 0 + #define VI6_UDS_CLIP_SIZE 0x2324 #define VI6_UDS_CLIP_SIZE_HSIZE_MASK (0x1fff << 16) #define VI6_UDS_CLIP_SIZE_HSIZE_SHIFT 16 diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c index 8feddd59cf8d..fe0633da5a5f 100644 --- a/drivers/media/platform/vsp1/vsp1_rpf.c +++ b/drivers/media/platform/vsp1/vsp1_rpf.c @@ -97,21 +97,8 @@ static void rpf_configure(struct vsp1_entity *entity, * 'width' need to be adjusted. */ if (pipe->partitions > 1) { - const struct v4l2_mbus_framefmt *output; - struct vsp1_entity *wpf = &pipe->output->entity; - unsigned int input_width = crop.width; - - /* - * Scale the partition window based on the configuration - * of the pipeline. - */ - output = vsp1_entity_get_pad_format(wpf, wpf->config, - RWPF_PAD_SINK); - - crop.width = pipe->partition.width * input_width - / output->width; - crop.left += pipe->partition.left * input_width - / output->width; + crop.width = pipe->partition->rpf.width; + crop.left += pipe->partition->rpf.left; } vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE, @@ -260,8 +247,18 @@ static void rpf_configure(struct vsp1_entity *entity, } +static void rpf_partition(struct vsp1_entity *entity, + struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int partition_idx, + struct vsp1_partition_window *window) +{ + partition->rpf = *window; +} + static const struct vsp1_entity_operations rpf_entity_ops = { .configure = rpf_configure, + .partition = rpf_partition, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c index 30142793dfcd..51e5691187c3 100644 --- a/drivers/media/platform/vsp1/vsp1_sru.c +++ b/drivers/media/platform/vsp1/vsp1_sru.c @@ -18,6 +18,7 @@ #include "vsp1.h" #include "vsp1_dl.h" +#include "vsp1_pipe.h" #include "vsp1_sru.h" #define SRU_MIN_SIZE 4U @@ -325,9 +326,34 @@ static unsigned int sru_max_width(struct vsp1_entity *entity, return 256; } +static void sru_partition(struct vsp1_entity *entity, + struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int partition_idx, + struct vsp1_partition_window *window) +{ + struct vsp1_sru *sru = to_sru(&entity->subdev); + struct v4l2_mbus_framefmt *input; + struct v4l2_mbus_framefmt *output; + + input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, + SRU_PAD_SINK); + output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, + SRU_PAD_SOURCE); + + /* Adapt if SRUx2 is enabled */ + if (input->width != output->width) { + window->width /= 2; + window->left /= 2; + } + + partition->sru = *window; +} + static const struct vsp1_entity_operations sru_entity_ops = { .configure = sru_configure, .max_width = sru_max_width, + .partition = sru_partition, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c index 4226403ad235..72f72a9d2152 100644 --- a/drivers/media/platform/vsp1/vsp1_uds.c +++ b/drivers/media/platform/vsp1/vsp1_uds.c @@ -271,23 +271,32 @@ static void uds_configure(struct vsp1_entity *entity, unsigned int vscale; bool multitap; - if (params == VSP1_ENTITY_PARAMS_PARTITION) { - const struct v4l2_rect *clip = &pipe->partition; + input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, + UDS_PAD_SINK); + output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, + UDS_PAD_SOURCE); + if (params == VSP1_ENTITY_PARAMS_PARTITION) { + struct vsp1_partition *partition = pipe->partition; + + /* Input size clipping */ + vsp1_uds_write(uds, dl, VI6_UDS_HSZCLIP, VI6_UDS_HSZCLIP_HCEN | + (0 << VI6_UDS_HSZCLIP_HCL_OFST_SHIFT) | + (partition->uds_sink.width + << VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT)); + + /* Output size clipping */ vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE, - (clip->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) | - (clip->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT)); + (partition->uds_source.width + << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) | + (output->height + << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT)); return; } if (params != VSP1_ENTITY_PARAMS_INIT) return; - input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, - UDS_PAD_SINK); - output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, - UDS_PAD_SOURCE); - hscale = uds_compute_ratio(input->width, output->width); vscale = uds_compute_ratio(input->height, output->height); @@ -343,9 +352,41 @@ static unsigned int uds_max_width(struct vsp1_entity *entity, return 2048; } +/* ----------------------------------------------------------------------------- + * Partition Algorithm Support + */ + +static void uds_partition(struct vsp1_entity *entity, + struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int partition_idx, + struct vsp1_partition_window *window) +{ + struct vsp1_uds *uds = to_uds(&entity->subdev); + const struct v4l2_mbus_framefmt *output; + const struct v4l2_mbus_framefmt *input; + + /* Initialise the partition state */ + partition->uds_sink = *window; + partition->uds_source = *window; + + input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, + UDS_PAD_SINK); + output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, + UDS_PAD_SOURCE); + + partition->uds_sink.width = window->width * input->width + / output->width; + partition->uds_sink.left = window->left * input->width + / output->width; + + *window = partition->uds_sink; +} + static const struct vsp1_entity_operations uds_entity_ops = { .configure = uds_configure, .max_width = uds_max_width, + .partition = uds_partition, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index e9f5dcb8fae5..c2d3b8f0f487 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -182,57 +182,21 @@ static int __vsp1_video_try_format(struct vsp1_video *video, * VSP1 Partition Algorithm support */ -static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe) -{ - struct vsp1_device *vsp1 = pipe->output->entity.vsp1; - const struct v4l2_mbus_framefmt *format; - struct vsp1_entity *entity; - unsigned int div_size; - - /* - * Partitions are computed on the size before rotation, use the format - * at the WPF sink. - */ - format = vsp1_entity_get_pad_format(&pipe->output->entity, - pipe->output->entity.config, - RWPF_PAD_SINK); - div_size = format->width; - - /* Gen2 hardware doesn't require image partitioning. */ - if (vsp1->info->gen == 2) { - pipe->div_size = div_size; - pipe->partitions = 1; - return; - } - - list_for_each_entry(entity, &pipe->entities, list_pipe) { - unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH; - - if (entity->ops->max_width) { - entity_max = entity->ops->max_width(entity, pipe); - if (entity_max) - div_size = min(div_size, entity_max); - } - } - - pipe->div_size = div_size; - pipe->partitions = DIV_ROUND_UP(format->width, div_size); -} - /** - * vsp1_video_partition - Calculate the active partition output window + * vsp1_video_calculate_partition - Calculate the active partition output window * + * @pipe: the pipeline + * @partition: partition that will hold the calculated values * @div_size: pre-determined maximum partition division size * @index: partition index - * - * Returns a v4l2_rect describing the partition window. */ -static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe, - unsigned int div_size, - unsigned int index) +static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int div_size, + unsigned int index) { const struct v4l2_mbus_framefmt *format; - struct v4l2_rect partition; + struct vsp1_partition_window window; unsigned int modulus; /* @@ -245,18 +209,17 @@ static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe, /* A single partition simply processes the output size in full. */ if (pipe->partitions <= 1) { - partition.left = 0; - partition.top = 0; - partition.width = format->width; - partition.height = format->height; - return partition; + window.left = 0; + window.width = format->width; + + vsp1_pipeline_propagate_partition(pipe, partition, index, + &window); + return; } /* Initialise the partition with sane starting conditions. */ - partition.left = index * div_size; - partition.top = 0; - partition.width = div_size; - partition.height = format->height; + window.left = index * div_size; + window.width = div_size; modulus = format->width % div_size; @@ -279,18 +242,65 @@ static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe, if (modulus < div_size / 2) { if (index == partitions - 1) { /* Halve the penultimate partition. */ - partition.width = div_size / 2; + window.width = div_size / 2; } else if (index == partitions) { /* Increase the final partition. */ - partition.width = (div_size / 2) + modulus; - partition.left -= div_size / 2; + window.width = (div_size / 2) + modulus; + window.left -= div_size / 2; } } else if (index == partitions) { - partition.width = modulus; + window.width = modulus; } } - return partition; + vsp1_pipeline_propagate_partition(pipe, partition, index, &window); +} + +static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe) +{ + struct vsp1_device *vsp1 = pipe->output->entity.vsp1; + const struct v4l2_mbus_framefmt *format; + struct vsp1_entity *entity; + unsigned int div_size; + unsigned int i; + + /* + * Partitions are computed on the size before rotation, use the format + * at the WPF sink. + */ + format = vsp1_entity_get_pad_format(&pipe->output->entity, + pipe->output->entity.config, + RWPF_PAD_SINK); + div_size = format->width; + + /* + * Only Gen3 hardware requires image partitioning, Gen2 will operate + * with a single partition that covers the whole output. + */ + if (vsp1->info->gen == 3) { + list_for_each_entry(entity, &pipe->entities, list_pipe) { + unsigned int entity_max; + + if (!entity->ops->max_width) + continue; + + entity_max = entity->ops->max_width(entity, pipe); + if (entity_max) + div_size = min(div_size, entity_max); + } + } + + pipe->partitions = DIV_ROUND_UP(format->width, div_size); + pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table), + GFP_KERNEL); + if (!pipe->part_table) + return -ENOMEM; + + for (i = 0; i < pipe->partitions; ++i) + vsp1_video_calculate_partition(pipe, &pipe->part_table[i], + div_size, i); + + return 0; } /* ----------------------------------------------------------------------------- @@ -369,12 +379,12 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, } static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe, - struct vsp1_dl_list *dl) + struct vsp1_dl_list *dl, + unsigned int partition) { struct vsp1_entity *entity; - pipe->partition = vsp1_video_partition(pipe, pipe->div_size, - pipe->current_partition); + pipe->partition = &pipe->part_table[partition]; list_for_each_entry(entity, &pipe->entities, list_pipe) { if (entity->ops->configure) @@ -387,6 +397,7 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; struct vsp1_entity *entity; + unsigned int partition; if (!pipe->dl) pipe->dl = vsp1_dl_list_get(pipe->output->dlm); @@ -403,20 +414,12 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) } /* Run the first partition */ - pipe->current_partition = 0; - vsp1_video_pipeline_run_partition(pipe, pipe->dl); + vsp1_video_pipeline_run_partition(pipe, pipe->dl, 0); /* Process consecutive partitions as necessary */ - for (pipe->current_partition = 1; - pipe->current_partition < pipe->partitions; - pipe->current_partition++) { + for (partition = 1; partition < pipe->partitions; ++partition) { struct vsp1_dl_list *dl; - /* - * Partition configuration operations will utilise - * the pipe->current_partition variable to determine - * the work they should complete. - */ dl = vsp1_dl_list_get(pipe->output->dlm); /* @@ -429,7 +432,7 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) break; } - vsp1_video_pipeline_run_partition(pipe, dl); + vsp1_video_pipeline_run_partition(pipe, dl, partition); vsp1_dl_list_add_chain(pipe->dl, dl); } @@ -802,9 +805,12 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb) static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) { struct vsp1_entity *entity; + int ret; /* Determine this pipelines sizes for image partitioning support. */ - vsp1_video_pipeline_setup_partitions(pipe); + ret = vsp1_video_pipeline_setup_partitions(pipe); + if (ret < 0) + return ret; /* Prepare the display list. */ pipe->dl = vsp1_dl_list_get(pipe->output->dlm); @@ -843,6 +849,26 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) return 0; } +static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) +{ + struct vsp1_video *video = pipe->output->video; + struct vsp1_vb2_buffer *buffer; + unsigned long flags; + + /* Remove all buffers from the IRQ queue. */ + spin_lock_irqsave(&video->irqlock, flags); + list_for_each_entry(buffer, &video->irqqueue, queue) + vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); + INIT_LIST_HEAD(&video->irqqueue); + spin_unlock_irqrestore(&video->irqlock, flags); + + /* Release our partition table allocation */ + mutex_lock(&pipe->lock); + kfree(pipe->part_table); + pipe->part_table = NULL; + mutex_unlock(&pipe->lock); +} + static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vsp1_video *video = vb2_get_drv_priv(vq); @@ -856,6 +882,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) ret = vsp1_video_setup_pipeline(pipe); if (ret < 0) { mutex_unlock(&pipe->lock); + vsp1_video_cleanup_pipeline(pipe); return ret; } @@ -887,7 +914,6 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) { struct vsp1_video *video = vb2_get_drv_priv(vq); struct vsp1_pipeline *pipe = video->rwpf->pipe; - struct vsp1_vb2_buffer *buffer; unsigned long flags; int ret; @@ -912,14 +938,8 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) mutex_unlock(&pipe->lock); media_pipeline_stop(&video->video.entity); + vsp1_video_cleanup_pipeline(pipe); vsp1_video_pipeline_put(pipe); - - /* Remove all buffers from the IRQ queue. */ - spin_lock_irqsave(&video->irqlock, flags); - list_for_each_entry(buffer, &video->irqqueue, queue) - vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); - INIT_LIST_HEAD(&video->irqqueue); - spin_unlock_irqrestore(&video->irqlock, flags); } static const struct vb2_ops vsp1_video_queue_qops = { diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c index b6c902be225b..f7f3b4b2c2de 100644 --- a/drivers/media/platform/vsp1/vsp1_wpf.c +++ b/drivers/media/platform/vsp1/vsp1_wpf.c @@ -291,7 +291,7 @@ static void wpf_configure(struct vsp1_entity *entity, * multiple slices. */ if (pipe->partitions > 1) - width = pipe->partition.width; + width = pipe->partition->wpf.width; vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN | (0 << VI6_WPF_SZCLIP_OFST_SHIFT) | @@ -320,13 +320,13 @@ static void wpf_configure(struct vsp1_entity *entity, * is applied horizontally or vertically accordingly. */ if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate) - offset = format->width - pipe->partition.left - - pipe->partition.width; + offset = format->width - pipe->partition->wpf.left + - pipe->partition->wpf.width; else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate) - offset = format->height - pipe->partition.left - - pipe->partition.width; + offset = format->height - pipe->partition->wpf.left + - pipe->partition->wpf.width; else - offset = pipe->partition.left; + offset = pipe->partition->wpf.left; for (i = 0; i < format->num_planes; ++i) { unsigned int hsub = i > 0 ? fmtinfo->hsub : 1; @@ -348,7 +348,7 @@ static void wpf_configure(struct vsp1_entity *entity, * image height. */ if (wpf->flip.rotate) - height = pipe->partition.width; + height = pipe->partition->wpf.width; else height = format->height; @@ -473,10 +473,20 @@ static unsigned int wpf_max_width(struct vsp1_entity *entity, return wpf->flip.rotate ? 256 : wpf->max_width; } +static void wpf_partition(struct vsp1_entity *entity, + struct vsp1_pipeline *pipe, + struct vsp1_partition *partition, + unsigned int partition_idx, + struct vsp1_partition_window *window) +{ + partition->wpf = *window; +} + static const struct vsp1_entity_operations wpf_entity_ops = { .destroy = vsp1_wpf_destroy, .configure = wpf_configure, .max_width = wpf_max_width, + .partition = wpf_partition, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index ac4704388920..ebfdf334d99c 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -90,12 +90,12 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, of_node_put(ep); ep = next; - dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); + dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep); ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link); if (ret < 0) { - dev_err(xdev->dev, "failed to parse link for %s\n", - ep->full_name); + dev_err(xdev->dev, "failed to parse link for %pOF\n", + ep); continue; } @@ -103,9 +103,9 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, * the link. */ if (link.local_port >= local->num_pads) { - dev_err(xdev->dev, "invalid port number %u for %s\n", + dev_err(xdev->dev, "invalid port number %u for %pOF\n", link.local_port, - to_of_node(link.local_node)->full_name); + to_of_node(link.local_node)); v4l2_fwnode_put_link(&link); ret = -EINVAL; break; @@ -114,8 +114,8 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, local_pad = &local->pads[link.local_port]; if (local_pad->flags & MEDIA_PAD_FL_SINK) { - dev_dbg(xdev->dev, "skipping sink port %s:%u\n", - to_of_node(link.local_node)->full_name, + dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n", + to_of_node(link.local_node), link.local_port); v4l2_fwnode_put_link(&link); continue; @@ -123,8 +123,8 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, /* Skip DMA engines, they will be processed separately. */ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) { - dev_dbg(xdev->dev, "skipping DMA port %s:%u\n", - to_of_node(link.local_node)->full_name, + dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n", + to_of_node(link.local_node), link.local_port); v4l2_fwnode_put_link(&link); continue; @@ -134,8 +134,8 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, ent = xvip_graph_find_entity(xdev, to_of_node(link.remote_node)); if (ent == NULL) { - dev_err(xdev->dev, "no entity found for %s\n", - to_of_node(link.remote_node)->full_name); + dev_err(xdev->dev, "no entity found for %pOF\n", + to_of_node(link.remote_node)); v4l2_fwnode_put_link(&link); ret = -ENODEV; break; @@ -144,9 +144,8 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev, remote = ent->entity; if (link.remote_port >= remote->num_pads) { - dev_err(xdev->dev, "invalid port number %u on %s\n", - link.remote_port, - to_of_node(link.remote_node)->full_name); + dev_err(xdev->dev, "invalid port number %u on %pOF\n", + link.remote_port, to_of_node(link.remote_node)); v4l2_fwnode_put_link(&link); ret = -EINVAL; break; @@ -216,12 +215,12 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev) of_node_put(ep); ep = next; - dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); + dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep); ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link); if (ret < 0) { - dev_err(xdev->dev, "failed to parse link for %s\n", - ep->full_name); + dev_err(xdev->dev, "failed to parse link for %pOF\n", + ep); continue; } @@ -242,17 +241,17 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev) ent = xvip_graph_find_entity(xdev, to_of_node(link.remote_node)); if (ent == NULL) { - dev_err(xdev->dev, "no entity found for %s\n", - to_of_node(link.remote_node)->full_name); + dev_err(xdev->dev, "no entity found for %pOF\n", + to_of_node(link.remote_node)); v4l2_fwnode_put_link(&link); ret = -ENODEV; break; } if (link.remote_port >= ent->entity->num_pads) { - dev_err(xdev->dev, "invalid port number %u on %s\n", + dev_err(xdev->dev, "invalid port number %u on %pOF\n", link.remote_port, - to_of_node(link.remote_node)->full_name); + to_of_node(link.remote_node)); v4l2_fwnode_put_link(&link); ret = -EINVAL; break; @@ -337,8 +336,8 @@ static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier, continue; if (entity->subdev) { - dev_err(xdev->dev, "duplicate subdev for node %s\n", - entity->node->full_name); + dev_err(xdev->dev, "duplicate subdev for node %pOF\n", + entity->node); return -EINVAL; } @@ -360,14 +359,14 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev, struct device_node *ep = NULL; int ret = 0; - dev_dbg(xdev->dev, "parsing node %s\n", node->full_name); + dev_dbg(xdev->dev, "parsing node %pOF\n", node); while (1) { ep = of_graph_get_next_endpoint(node, ep); if (ep == NULL) break; - dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name); + dev_dbg(xdev->dev, "handling endpoint %pOF\n", ep); remote = of_graph_get_remote_port_parent(ep); if (remote == NULL) { @@ -452,8 +451,7 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev, ret = xvip_dma_init(xdev, dma, type, index); if (ret < 0) { - dev_err(xdev->dev, "%s initialization failed\n", - node->full_name); + dev_err(xdev->dev, "%pOF initialization failed\n", node); return ret; } diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c index 53bc8c010035..8521bb2825e8 100644 --- a/drivers/media/radio/dsbr100.c +++ b/drivers/media/radio/dsbr100.c @@ -408,7 +408,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf, return retval; } -static struct usb_device_id usb_dsbr100_device_table[] = { +static const struct usb_device_id usb_dsbr100_device_table[] = { { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) }, { } /* Terminating entry */ }; diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index cbaf850f4791..6888b7db449d 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -528,7 +528,7 @@ static const struct v4l2_ctrl_ops cadet_ctrl_ops = { #ifdef CONFIG_PNP -static struct pnp_device_id cadet_pnp_devices[] = { +static const struct pnp_device_id cadet_pnp_devices[] = { /* ADS Cadet AM/FM Radio Card */ {.id = "MSM0c24", .driver_data = 0}, {.id = ""} diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c index ca051ccbc3e4..ddc12b16f77c 100644 --- a/drivers/media/radio/radio-gemtek.c +++ b/drivers/media/radio/radio-gemtek.c @@ -281,7 +281,7 @@ static const struct radio_isa_ops gemtek_ops = { static const int gemtek_ioports[] = { 0x20c, 0x30c, 0x24c, 0x34c, 0x248, 0x28c }; #ifdef CONFIG_PNP -static struct pnp_device_id gemtek_pnp_devices[] = { +static const struct pnp_device_id gemtek_pnp_devices[] = { /* AOpen FX-3D/Pro Radio */ {.id = "ADS7183", .driver_data = 0}, {.id = ""} diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index 53a7c2e87762..f2ea8bc5f5ee 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -45,7 +45,7 @@ MODULE_LICENSE("GPL"); #define FREQ_MUL 16000U /* USB Device ID List */ -static struct usb_device_id usb_keene_device_table[] = { +static const struct usb_device_id usb_keene_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_KEENE_VENDOR, USB_KEENE_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c2010a905a47..fdc481257efd 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c @@ -444,7 +444,7 @@ static int usb_ma901radio_probe(struct usb_interface *intf, } /* USB Device ID List */ -static struct usb_device_id usb_ma901radio_device_table[] = { +static const struct usb_device_id usb_ma901radio_device_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(USB_MA901_VENDOR, USB_MA901_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 8253f79d5d75..3aa5ad391581 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -186,7 +186,7 @@ static void maxiradio_remove(struct pci_dev *pdev) kfree(dev); } -static struct pci_device_id maxiradio_pci_tbl[] = { +static const struct pci_device_id maxiradio_pci_tbl[] = { { PCI_VENDOR_ID_GUILLEMOT, PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO, PCI_ANY_ID, PCI_ANY_ID, }, { 0 } diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index 95c12532e87a..c9f59129af79 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c @@ -587,7 +587,7 @@ static int usb_amradio_probe(struct usb_interface *intf, } /* USB Device ID List */ -static struct usb_device_id usb_amradio_device_table[] = { +static const struct usb_device_id usb_amradio_device_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c index bfb3a6d051ba..3c0a22a54113 100644 --- a/drivers/media/radio/radio-raremono.c +++ b/drivers/media/radio/radio-raremono.c @@ -58,7 +58,7 @@ MODULE_LICENSE("GPL v2"); */ /* USB Device ID List */ -static struct usb_device_id usb_raremono_device_table[] = { +static const struct usb_device_id usb_raremono_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index dc81d422b394..de79d5569c2a 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -197,7 +197,7 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea) return 0; } -static struct pnp_device_id fmr2_pnp_ids[] = { +static const struct pnp_device_id fmr2_pnp_ids[] = { { .id = "MFRad13" }, /* tuner subdevice of SF16-FMD2 */ { .id = "" } }; diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 23971f5502a8..22f3466af2b1 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -392,7 +392,7 @@ static int usb_shark_resume(struct usb_interface *intf) #endif /* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */ -static struct usb_device_id usb_shark_device_table[] = { +static const struct usb_device_id usb_shark_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = 0x077d, diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index b50638ec5f09..4d1a4b3d669c 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -358,7 +358,7 @@ static int usb_shark_resume(struct usb_interface *intf) #endif /* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */ -static struct usb_device_id usb_shark_device_table[] = { +static const struct usb_device_id usb_shark_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = 0x077d, diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 9db8331a0c75..bc7e69e7e32e 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -414,7 +414,7 @@ static const struct v4l2_ioctl_ops tea5764_ioctl_ops = { }; /* V4L2 interface */ -static struct video_device tea5764_radio_template = { +static const struct video_device tea5764_radio_template = { .name = "TEA5764 FM-Radio", .fops = &tea5764_fops, .ioctl_ops = &tea5764_ioctl_ops, diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index 17e82a9a0109..903fcd5e99c0 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1982,7 +1982,7 @@ static const struct v4l2_ioctl_ops wl1273_ioctl_ops = { .vidioc_log_status = wl1273_fm_vidioc_log_status, }; -static struct video_device wl1273_viddev_template = { +static const struct video_device wl1273_viddev_template = { .fops = &wl1273_fops, .ioctl_ops = &wl1273_ioctl_ops, .name = WL1273_FM_DRIVER_NAME, diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 571f29a34bf8..c311f9951d80 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -38,7 +38,7 @@ /* USB Device ID List */ -static struct usb_device_id si470x_usb_driver_id_table[] = { +static const struct usb_device_id si470x_usb_driver_id_table[] = { /* Silicon Labs USB FM Radio Reference Design */ { USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, /* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */ diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c index 6f93ef1249a6..27339ec495f6 100644 --- a/drivers/media/radio/si4713/radio-platform-si4713.c +++ b/drivers/media/radio/si4713/radio-platform-si4713.c @@ -135,7 +135,7 @@ static struct v4l2_ioctl_ops radio_si4713_ioctl_ops = { }; /* radio_si4713_vdev_template - video device interface */ -static struct video_device radio_si4713_vdev_template = { +static const struct video_device radio_si4713_vdev_template = { .fops = &radio_si4713_fops, .name = "radio-si4713", .release = video_device_release_empty, diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c index e5e5a1672bdb..a115db24667b 100644 --- a/drivers/media/radio/si4713/radio-usb-si4713.c +++ b/drivers/media/radio/si4713/radio-usb-si4713.c @@ -49,7 +49,7 @@ MODULE_LICENSE("GPL v2"); #define USB_RESP_TIMEOUT 50000 /* USB Device ID List */ -static struct usb_device_id usb_si4713_usb_device_table[] = { +static const struct usb_device_id usb_si4713_usb_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_SI4713_VENDOR, USB_SI4713_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ @@ -409,7 +409,7 @@ static const struct i2c_algorithm si4713_algo = { /* This name value shows up in the sysfs filename associated with this I2C adapter */ -static struct i2c_adapter si4713_i2c_adapter_template = { +static const struct i2c_adapter si4713_i2c_adapter_template = { .name = "si4713-i2c", .owner = THIS_MODULE, .algo = &si4713_algo, diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 71423f45c05c..fc5a7abc83d2 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -509,7 +509,7 @@ static const struct v4l2_ioctl_ops fm_drv_ioctl_ops = { }; /* V4L2 RADIO device parent structure */ -static struct video_device fm_viddev_template = { +static const struct video_device fm_viddev_template = { .fops = &fm_drv_fops, .ioctl_ops = &fm_drv_ioctl_ops, .name = FM_DRV_NAME, diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 5e83b76495f7..d9ce8ff55d0c 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -1,9 +1,20 @@ -config RC_CORE - tristate - depends on MEDIA_RC_SUPPORT + +menuconfig RC_CORE + tristate "Remote Controller support" depends on INPUT default y + ---help--- + Enable support for Remote Controllers on Linux. This is + needed in order to support several video capture adapters, + standalone IR receivers/transmitters, and RF receivers. + Enable this option if you have a video capture board even + if you don't need IR, as otherwise, you may not be able to + compile the driver for your adapter. + + Say Y when you have a TV or an IR device. + +if RC_CORE source "drivers/media/rc/keymaps/Kconfig" menuconfig RC_DECODERS @@ -388,6 +399,29 @@ config IR_GPIO_CIR To compile this driver as a module, choose M here: the module will be called gpio-ir-recv. +config IR_GPIO_TX + tristate "GPIO IR Bit Banging Transmitter" + depends on RC_CORE + depends on LIRC + ---help--- + Say Y if you want to a GPIO based IR transmitter. This is a + bit banging driver. + + To compile this driver as a module, choose M here: the module will + be called gpio-ir-tx. + +config IR_PWM_TX + tristate "PWM IR transmitter" + depends on RC_CORE + depends on LIRC + depends on PWM + ---help--- + Say Y if you want to use a PWM based IR transmitter. This is + more power efficient than the bit banging gpio driver. + + To compile this driver as a module, choose M here: the module will + be called pwm-ir-tx. + config RC_ST tristate "ST remote control receiver" depends on RC_CORE @@ -435,4 +469,17 @@ config IR_SIR To compile this driver as a module, choose M here: the module will be called sir-ir. +config IR_ZX + tristate "ZTE ZX IR remote control" + depends on RC_CORE + depends on ARCH_ZX || COMPILE_TEST + ---help--- + Say Y if you want to use the IR remote control available + on ZTE ZX family SoCs. + + To compile this driver as a module, choose M here: the + module will be called zx-irdec. + endif #RC_DEVICES + +endif #RC_CORE diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 245e2c2d0b22..9bc6a3980ed0 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -32,6 +32,8 @@ obj-$(CONFIG_IR_STREAMZAP) += streamzap.o obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o +obj-$(CONFIG_IR_GPIO_TX) += gpio-ir-tx.o +obj-$(CONFIG_IR_PWM_TX) += pwm-ir-tx.o obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o obj-$(CONFIG_IR_IGUANA) += iguanair.o obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o @@ -41,3 +43,4 @@ obj-$(CONFIG_IR_IMG) += img-ir/ obj-$(CONFIG_IR_SERIAL) += serial_ir.o obj-$(CONFIG_IR_SIR) += sir_ir.o obj-$(CONFIG_IR_MTK) += mtk-cir.o +obj-$(CONFIG_IR_ZX) += zx-irdec.o diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c index a4c6ad4f67c1..d0871d60a723 100644 --- a/drivers/media/rc/ati_remote.c +++ b/drivers/media/rc/ati_remote.c @@ -622,7 +622,8 @@ static void ati_remote_input_report(struct urb *urb) * it would cause ghost repeats which would be a * regression for this driver. */ - rc_keydown_notimeout(ati_remote->rdev, RC_TYPE_OTHER, + rc_keydown_notimeout(ati_remote->rdev, + RC_PROTO_OTHER, scancode, data[2]); rc_keyup(ati_remote->rdev); } @@ -760,13 +761,13 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote) struct rc_dev *rdev = ati_remote->rdev; rdev->priv = ati_remote; - rdev->allowed_protocols = RC_BIT_OTHER; + rdev->allowed_protocols = RC_PROTO_BIT_OTHER; rdev->driver_name = "ati_remote"; rdev->open = ati_remote_rc_open; rdev->close = ati_remote_rc_close; - rdev->input_name = ati_remote->rc_name; + rdev->device_name = ati_remote->rc_name; rdev->input_phys = ati_remote->rc_phys; usb_to_input_id(ati_remote->udev, &rdev->input_id); diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c index 60da963f40dc..af7ba23e16e1 100644 --- a/drivers/media/rc/ene_ir.c +++ b/drivers/media/rc/ene_ir.c @@ -1053,14 +1053,14 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) if (!dev->hw_learning_and_tx_capable) learning_mode_force = false; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->priv = dev; rdev->open = ene_open; rdev->close = ene_close; rdev->s_idle = ene_set_idle; rdev->driver_name = ENE_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; - rdev->input_name = "ENE eHome Infrared Remote Receiver"; + rdev->device_name = "ENE eHome Infrared Remote Receiver"; if (dev->hw_learning_and_tx_capable) { rdev->s_learning_mode = ene_set_learning_mode; @@ -1070,7 +1070,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) rdev->s_tx_carrier = ene_set_tx_carrier; rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle; rdev->s_carrier_report = ene_set_carrier_report; - rdev->input_name = "ENE eHome Infrared Remote Transceiver"; + rdev->device_name = "ENE eHome Infrared Remote Transceiver"; } dev->rdev = rdev; diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c index 0d3562712f27..f2639d0c2fca 100644 --- a/drivers/media/rc/fintek-cir.c +++ b/drivers/media/rc/fintek-cir.c @@ -529,10 +529,10 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id /* Set up the rc device */ rdev->priv = fintek; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->open = fintek_open; rdev->close = fintek_close; - rdev->input_name = FINTEK_DESCRIPTION; + rdev->device_name = FINTEK_DESCRIPTION; rdev->input_phys = "fintek/cir0"; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = VENDOR_ID_FINTEK; diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index b4f773b9dc1d..7248b3662285 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -30,7 +30,6 @@ struct gpio_rc_dev { struct rc_dev *rcdev; int gpio_nr; bool active_low; - struct timer_list flush_timer; }; #ifdef CONFIG_OF @@ -77,7 +76,6 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) struct gpio_rc_dev *gpio_dev = dev_id; int gval; int rc = 0; - enum raw_event_type type = IR_SPACE; gval = gpio_get_value(gpio_dev->gpio_nr); @@ -87,33 +85,14 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) if (gpio_dev->active_low) gval = !gval; - if (gval == 1) - type = IR_PULSE; - - rc = ir_raw_event_store_edge(gpio_dev->rcdev, type); + rc = ir_raw_event_store_edge(gpio_dev->rcdev, gval == 1); if (rc < 0) goto err_get_value; - mod_timer(&gpio_dev->flush_timer, - jiffies + nsecs_to_jiffies(gpio_dev->rcdev->timeout)); - - ir_raw_event_handle(gpio_dev->rcdev); - err_get_value: return IRQ_HANDLED; } -static void flush_timer(unsigned long arg) -{ - struct gpio_rc_dev *gpio_dev = (struct gpio_rc_dev *)arg; - DEFINE_IR_RAW_EVENT(ev); - - ev.timeout = true; - ev.duration = gpio_dev->rcdev->timeout; - ir_raw_event_store(gpio_dev->rcdev, &ev); - ir_raw_event_handle(gpio_dev->rcdev); -} - static int gpio_ir_recv_probe(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev; @@ -150,7 +129,7 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) } rcdev->priv = gpio_dev; - rcdev->input_name = GPIO_IR_DEVICE_NAME; + rcdev->device_name = GPIO_IR_DEVICE_NAME; rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0"; rcdev->input_id.bustype = BUS_HOST; rcdev->input_id.vendor = 0x0001; @@ -164,16 +143,13 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) if (pdata->allowed_protos) rcdev->allowed_protocols = pdata->allowed_protos; else - rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY; gpio_dev->rcdev = rcdev; gpio_dev->gpio_nr = pdata->gpio_nr; gpio_dev->active_low = pdata->active_low; - setup_timer(&gpio_dev->flush_timer, flush_timer, - (unsigned long)gpio_dev); - rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv"); if (rc < 0) goto err_gpio_request; @@ -216,7 +192,6 @@ static int gpio_ir_recv_remove(struct platform_device *pdev) struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev); - del_timer_sync(&gpio_dev->flush_timer); rc_unregister_device(gpio_dev->rcdev); gpio_free(gpio_dev->gpio_nr); kfree(gpio_dev); diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c new file mode 100644 index 000000000000..cd476cab9782 --- /dev/null +++ b/drivers/media/rc/gpio-ir-tx.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2017 Sean Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "gpio-ir-tx" +#define DEVICE_NAME "GPIO IR Bit Banging Transmitter" + +struct gpio_ir { + struct gpio_desc *gpio; + unsigned int carrier; + unsigned int duty_cycle; + /* we need a spinlock to hold the cpu while transmitting */ + spinlock_t lock; +}; + +static const struct of_device_id gpio_ir_tx_of_match[] = { + { .compatible = "gpio-ir-tx", }, + { }, +}; +MODULE_DEVICE_TABLE(of, gpio_ir_tx_of_match); + +static int gpio_ir_tx_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle) +{ + struct gpio_ir *gpio_ir = dev->priv; + + gpio_ir->duty_cycle = duty_cycle; + + return 0; +} + +static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) +{ + struct gpio_ir *gpio_ir = dev->priv; + + if (!carrier) + return -EINVAL; + + gpio_ir->carrier = carrier; + + return 0; +} + +static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, + unsigned int count) +{ + struct gpio_ir *gpio_ir = dev->priv; + unsigned long flags; + ktime_t edge; + /* + * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on + * m68k ndelay(s64) does not compile; so use s32 rather than s64. + */ + s32 delta; + int i; + unsigned int pulse, space; + + /* Ensure the dividend fits into 32 bit */ + pulse = DIV_ROUND_CLOSEST(gpio_ir->duty_cycle * (NSEC_PER_SEC / 100), + gpio_ir->carrier); + space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) * + (NSEC_PER_SEC / 100), gpio_ir->carrier); + + spin_lock_irqsave(&gpio_ir->lock, flags); + + edge = ktime_get(); + + for (i = 0; i < count; i++) { + if (i % 2) { + // space + edge = ktime_add_us(edge, txbuf[i]); + delta = ktime_us_delta(edge, ktime_get()); + if (delta > 10) { + spin_unlock_irqrestore(&gpio_ir->lock, flags); + usleep_range(delta, delta + 10); + spin_lock_irqsave(&gpio_ir->lock, flags); + } else if (delta > 0) { + udelay(delta); + } + } else { + // pulse + ktime_t last = ktime_add_us(edge, txbuf[i]); + + while (ktime_before(ktime_get(), last)) { + gpiod_set_value(gpio_ir->gpio, 1); + edge = ktime_add_ns(edge, pulse); + delta = ktime_to_ns(ktime_sub(edge, + ktime_get())); + if (delta > 0) + ndelay(delta); + gpiod_set_value(gpio_ir->gpio, 0); + edge = ktime_add_ns(edge, space); + delta = ktime_to_ns(ktime_sub(edge, + ktime_get())); + if (delta > 0) + ndelay(delta); + } + + edge = last; + } + } + + spin_unlock_irqrestore(&gpio_ir->lock, flags); + + return count; +} + +static int gpio_ir_tx_probe(struct platform_device *pdev) +{ + struct gpio_ir *gpio_ir; + struct rc_dev *rcdev; + int rc; + + gpio_ir = devm_kmalloc(&pdev->dev, sizeof(*gpio_ir), GFP_KERNEL); + if (!gpio_ir) + return -ENOMEM; + + rcdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW_TX); + if (!rcdev) + return -ENOMEM; + + gpio_ir->gpio = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(gpio_ir->gpio)) { + if (PTR_ERR(gpio_ir->gpio) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get gpio (%ld)\n", + PTR_ERR(gpio_ir->gpio)); + return PTR_ERR(gpio_ir->gpio); + } + + rcdev->priv = gpio_ir; + rcdev->driver_name = DRIVER_NAME; + rcdev->device_name = DEVICE_NAME; + rcdev->tx_ir = gpio_ir_tx; + rcdev->s_tx_duty_cycle = gpio_ir_tx_set_duty_cycle; + rcdev->s_tx_carrier = gpio_ir_tx_set_carrier; + + gpio_ir->carrier = 38000; + gpio_ir->duty_cycle = 50; + spin_lock_init(&gpio_ir->lock); + + rc = devm_rc_register_device(&pdev->dev, rcdev); + if (rc < 0) + dev_err(&pdev->dev, "failed to register rc device\n"); + + return rc; +} + +static struct platform_driver gpio_ir_tx_driver = { + .probe = gpio_ir_tx_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(gpio_ir_tx_of_match), + }, +}; +module_platform_driver(gpio_ir_tx_driver); + +MODULE_DESCRIPTION("GPIO IR Bit Banging Transmitter"); +MODULE_AUTHOR("Sean Young "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c index cb6d4f1247da..a5ea86be8f44 100644 --- a/drivers/media/rc/igorplugusb.c +++ b/drivers/media/rc/igorplugusb.c @@ -194,7 +194,7 @@ static int igorplugusb_probe(struct usb_interface *intf, if (!rc) goto fail; - rc->input_name = DRIVER_DESC; + rc->device_name = DRIVER_DESC; rc->input_phys = ir->phys; usb_to_input_id(udev, &rc->input_id); rc->dev.parent = &intf->dev; @@ -202,10 +202,11 @@ static int igorplugusb_probe(struct usb_interface *intf, * This device can only store 36 pulses + spaces, which is not enough * for the NEC protocol and many others. */ - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER & ~(RC_BIT_NEC | - RC_BIT_NECX | RC_BIT_NEC32 | RC_BIT_RC6_6A_20 | - RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | - RC_BIT_SONY20 | RC_BIT_SANYO); + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER & + ~(RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | + RC_PROTO_BIT_RC6_6A_20 | RC_PROTO_BIT_RC6_6A_24 | + RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_SANYO); rc->priv = ir; rc->driver_name = DRIVER_NAME; diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index 8711a7ff55cc..30e24da67226 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -487,11 +487,11 @@ static int iguanair_probe(struct usb_interface *intf, usb_make_path(ir->udev, ir->phys, sizeof(ir->phys)); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; usb_to_input_id(ir->udev, &rc->input_id); rc->dev.parent = &intf->dev; - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->priv = ir; rc->open = iguanair_open; rc->close = iguanair_close; diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c index 8d1439622533..82fdf4cc0824 100644 --- a/drivers/media/rc/img-ir/img-ir-hw.c +++ b/drivers/media/rc/img-ir/img-ir-hw.c @@ -589,7 +589,7 @@ static void img_ir_set_decoder(struct img_ir_priv *priv, /* clear the wakeup scancode filter */ rdev->scancode_wakeup_filter.data = 0; rdev->scancode_wakeup_filter.mask = 0; - rdev->wakeup_protocol = RC_TYPE_UNKNOWN; + rdev->wakeup_protocol = RC_PROTO_UNKNOWN; /* clear raw filters */ _img_ir_set_filter(priv, NULL); @@ -823,7 +823,7 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw) int ret = IMG_IR_SCANCODE; struct img_ir_scancode_req request; - request.protocol = RC_TYPE_UNKNOWN; + request.protocol = RC_PROTO_UNKNOWN; request.toggle = 0; if (dec->scancode) @@ -1083,7 +1083,7 @@ int img_ir_probe_hw(struct img_ir_priv *priv) rdev->priv = priv; rdev->map_name = RC_MAP_EMPTY; rdev->allowed_protocols = img_ir_allowed_protos(priv); - rdev->input_name = "IMG Infrared Decoder"; + rdev->device_name = "IMG Infrared Decoder"; rdev->s_filter = img_ir_set_normal_filter; rdev->s_wakeup_filter = img_ir_set_wakeup_filter; diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h index 91a297731661..58b68dd6c67d 100644 --- a/drivers/media/rc/img-ir/img-ir-hw.h +++ b/drivers/media/rc/img-ir/img-ir-hw.h @@ -135,13 +135,13 @@ struct img_ir_timing_regvals { /** * struct img_ir_scancode_req - Scancode request data. * @protocol: Protocol code of received message (defaults to - * RC_TYPE_UNKNOWN). + * RC_PROTO_UNKNOWN). * @scancode: Scan code of received message (must be written by * handler if IMG_IR_SCANCODE is returned). * @toggle: Toggle bit (defaults to 0). */ struct img_ir_scancode_req { - enum rc_type protocol; + enum rc_proto protocol; u32 scancode; u8 toggle; }; diff --git a/drivers/media/rc/img-ir/img-ir-jvc.c b/drivers/media/rc/img-ir/img-ir-jvc.c index d3e2fc0bcfe1..4b07c76fbe1b 100644 --- a/drivers/media/rc/img-ir/img-ir-jvc.c +++ b/drivers/media/rc/img-ir/img-ir-jvc.c @@ -23,7 +23,7 @@ static int img_ir_jvc_scancode(int len, u64 raw, u64 enabled_protocols, cust = (raw >> 0) & 0xff; data = (raw >> 8) & 0xff; - request->protocol = RC_TYPE_JVC; + request->protocol = RC_PROTO_JVC; request->scancode = cust << 8 | data; return IMG_IR_SCANCODE; } @@ -52,7 +52,7 @@ static int img_ir_jvc_filter(const struct rc_scancode_filter *in, * http://support.jvc.com/consumer/support/documents/RemoteCodes.pdf */ struct img_ir_decoder img_ir_jvc = { - .type = RC_BIT_JVC, + .type = RC_PROTO_BIT_JVC, .control = { .decoden = 1, .code_type = IMG_IR_CODETYPE_PULSEDIST, diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c index 044fd42b22a0..2fc0678ad2d7 100644 --- a/drivers/media/rc/img-ir/img-ir-nec.c +++ b/drivers/media/rc/img-ir/img-ir-nec.c @@ -35,20 +35,20 @@ static int img_ir_nec_scancode(int len, u64 raw, u64 enabled_protocols, bitrev8(addr_inv) << 16 | bitrev8(data) << 8 | bitrev8(data_inv); - request->protocol = RC_TYPE_NEC32; + request->protocol = RC_PROTO_NEC32; } else if ((addr_inv ^ addr) != 0xff) { /* Extended NEC */ /* scan encoding: AAaaDD */ request->scancode = addr << 16 | addr_inv << 8 | data; - request->protocol = RC_TYPE_NECX; + request->protocol = RC_PROTO_NECX; } else { /* Normal NEC */ /* scan encoding: AADD */ request->scancode = addr << 8 | data; - request->protocol = RC_TYPE_NEC; + request->protocol = RC_PROTO_NEC; } return IMG_IR_SCANCODE; } @@ -63,7 +63,7 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in, data = in->data & 0xff; data_m = in->mask & 0xff; - protocols &= RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32; + protocols &= RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32; /* * If only one bit is set, we were requested to do an exact @@ -72,14 +72,14 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in, */ if (!is_power_of_2(protocols)) { if ((in->data | in->mask) & 0xff000000) - protocols = RC_BIT_NEC32; + protocols = RC_PROTO_BIT_NEC32; else if ((in->data | in->mask) & 0x00ff0000) - protocols = RC_BIT_NECX; + protocols = RC_PROTO_BIT_NECX; else - protocols = RC_BIT_NEC; + protocols = RC_PROTO_BIT_NEC; } - if (protocols == RC_BIT_NEC32) { + if (protocols == RC_PROTO_BIT_NEC32) { /* 32-bit NEC (used by Apple and TiVo remotes) */ /* scan encoding: as transmitted, MSBit = first received bit */ addr = bitrev8(in->data >> 24); @@ -90,7 +90,7 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in, data_m = bitrev8(in->mask >> 8); data_inv = bitrev8(in->data >> 0); data_inv_m = bitrev8(in->mask >> 0); - } else if (protocols == RC_BIT_NECX) { + } else if (protocols == RC_PROTO_BIT_NECX) { /* Extended NEC */ /* scan encoding AAaaDD */ addr = (in->data >> 16) & 0xff; @@ -128,7 +128,7 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in, * http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol */ struct img_ir_decoder img_ir_nec = { - .type = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32, + .type = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32, .control = { .decoden = 1, .code_type = IMG_IR_CODETYPE_PULSEDIST, diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c index 8d2f8e2006e7..64714efc1145 100644 --- a/drivers/media/rc/img-ir/img-ir-raw.c +++ b/drivers/media/rc/img-ir/img-ir-raw.c @@ -40,9 +40,9 @@ static void img_ir_refresh_raw(struct img_ir_priv *priv, u32 irq_status) /* report the edge to the IR raw decoders */ if (ir_status) /* low */ - ir_raw_event_store_edge(rc_dev, IR_SPACE); + ir_raw_event_store_edge(rc_dev, false); else /* high */ - ir_raw_event_store_edge(rc_dev, IR_PULSE); + ir_raw_event_store_edge(rc_dev, true); ir_raw_event_handle(rc_dev); } @@ -117,7 +117,7 @@ int img_ir_probe_raw(struct img_ir_priv *priv) } rdev->priv = priv; rdev->map_name = RC_MAP_EMPTY; - rdev->input_name = "IMG Infrared Decoder Raw"; + rdev->device_name = "IMG Infrared Decoder Raw"; /* Register raw decoder */ error = rc_register_device(rdev); diff --git a/drivers/media/rc/img-ir/img-ir-rc5.c b/drivers/media/rc/img-ir/img-ir-rc5.c index a8a28a377eee..a1bc8705472b 100644 --- a/drivers/media/rc/img-ir/img-ir-rc5.c +++ b/drivers/media/rc/img-ir/img-ir-rc5.c @@ -33,7 +33,7 @@ static int img_ir_rc5_scancode(int len, u64 raw, u64 enabled_protocols, if (!start) return -EINVAL; - request->protocol = RC_TYPE_RC5; + request->protocol = RC_PROTO_RC5; request->scancode = addr << 8 | cmd; request->toggle = tgl; return IMG_IR_SCANCODE; @@ -52,7 +52,7 @@ static int img_ir_rc5_filter(const struct rc_scancode_filter *in, * see http://www.sbprojects.com/knowledge/ir/rc5.php */ struct img_ir_decoder img_ir_rc5 = { - .type = RC_BIT_RC5, + .type = RC_PROTO_BIT_RC5, .control = { .bitoriend2 = 1, .code_type = IMG_IR_CODETYPE_BIPHASE, diff --git a/drivers/media/rc/img-ir/img-ir-rc6.c b/drivers/media/rc/img-ir/img-ir-rc6.c index de1e27534968..5f34f59ca257 100644 --- a/drivers/media/rc/img-ir/img-ir-rc6.c +++ b/drivers/media/rc/img-ir/img-ir-rc6.c @@ -54,7 +54,7 @@ static int img_ir_rc6_scancode(int len, u64 raw, u64 enabled_protocols, if (mode) return -EINVAL; - request->protocol = RC_TYPE_RC6_0; + request->protocol = RC_PROTO_RC6_0; request->scancode = addr << 8 | cmd; request->toggle = trl2; return IMG_IR_SCANCODE; @@ -73,7 +73,7 @@ static int img_ir_rc6_filter(const struct rc_scancode_filter *in, * see http://www.sbprojects.com/knowledge/ir/rc6.php */ struct img_ir_decoder img_ir_rc6 = { - .type = RC_BIT_RC6_0, + .type = RC_PROTO_BIT_RC6_0, .control = { .bitorien = 1, .code_type = IMG_IR_CODETYPE_BIPHASE, diff --git a/drivers/media/rc/img-ir/img-ir-sanyo.c b/drivers/media/rc/img-ir/img-ir-sanyo.c index f394994ffc22..55a755bb437c 100644 --- a/drivers/media/rc/img-ir/img-ir-sanyo.c +++ b/drivers/media/rc/img-ir/img-ir-sanyo.c @@ -44,7 +44,7 @@ static int img_ir_sanyo_scancode(int len, u64 raw, u64 enabled_protocols, return -EINVAL; /* Normal Sanyo */ - request->protocol = RC_TYPE_SANYO; + request->protocol = RC_PROTO_SANYO; request->scancode = addr << 8 | data; return IMG_IR_SCANCODE; } @@ -80,7 +80,7 @@ static int img_ir_sanyo_filter(const struct rc_scancode_filter *in, /* Sanyo decoder */ struct img_ir_decoder img_ir_sanyo = { - .type = RC_BIT_SANYO, + .type = RC_PROTO_BIT_SANYO, .control = { .decoden = 1, .code_type = IMG_IR_CODETYPE_PULSEDIST, diff --git a/drivers/media/rc/img-ir/img-ir-sharp.c b/drivers/media/rc/img-ir/img-ir-sharp.c index fe5acc4f030e..2d2530902cfa 100644 --- a/drivers/media/rc/img-ir/img-ir-sharp.c +++ b/drivers/media/rc/img-ir/img-ir-sharp.c @@ -32,7 +32,7 @@ static int img_ir_sharp_scancode(int len, u64 raw, u64 enabled_protocols, /* probably the second half of the message */ return -EINVAL; - request->protocol = RC_TYPE_SHARP; + request->protocol = RC_PROTO_SHARP; request->scancode = addr << 8 | cmd; return IMG_IR_SCANCODE; } @@ -73,7 +73,7 @@ static int img_ir_sharp_filter(const struct rc_scancode_filter *in, * See also http://www.sbprojects.com/knowledge/ir/sharp.php */ struct img_ir_decoder img_ir_sharp = { - .type = RC_BIT_SHARP, + .type = RC_PROTO_BIT_SHARP, .control = { .decoden = 0, .decodend2 = 1, diff --git a/drivers/media/rc/img-ir/img-ir-sony.c b/drivers/media/rc/img-ir/img-ir-sony.c index 3fcba271a419..a942d0be908c 100644 --- a/drivers/media/rc/img-ir/img-ir-sony.c +++ b/drivers/media/rc/img-ir/img-ir-sony.c @@ -19,32 +19,32 @@ static int img_ir_sony_scancode(int len, u64 raw, u64 enabled_protocols, switch (len) { case 12: - if (!(enabled_protocols & RC_BIT_SONY12)) + if (!(enabled_protocols & RC_PROTO_BIT_SONY12)) return -EINVAL; func = raw & 0x7f; /* first 7 bits */ raw >>= 7; dev = raw & 0x1f; /* next 5 bits */ subdev = 0; - request->protocol = RC_TYPE_SONY12; + request->protocol = RC_PROTO_SONY12; break; case 15: - if (!(enabled_protocols & RC_BIT_SONY15)) + if (!(enabled_protocols & RC_PROTO_BIT_SONY15)) return -EINVAL; func = raw & 0x7f; /* first 7 bits */ raw >>= 7; dev = raw & 0xff; /* next 8 bits */ subdev = 0; - request->protocol = RC_TYPE_SONY15; + request->protocol = RC_PROTO_SONY15; break; case 20: - if (!(enabled_protocols & RC_BIT_SONY20)) + if (!(enabled_protocols & RC_PROTO_BIT_SONY20)) return -EINVAL; func = raw & 0x7f; /* first 7 bits */ raw >>= 7; dev = raw & 0x1f; /* next 5 bits */ raw >>= 5; subdev = raw & 0xff; /* next 8 bits */ - request->protocol = RC_TYPE_SONY20; + request->protocol = RC_PROTO_SONY20; break; default: return -EINVAL; @@ -68,7 +68,8 @@ static int img_ir_sony_filter(const struct rc_scancode_filter *in, func = (in->data >> 0) & 0x7f; func_m = (in->mask >> 0) & 0x7f; - protocols &= RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20; + protocols &= RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | + RC_PROTO_BIT_SONY20; /* * If only one bit is set, we were requested to do an exact @@ -77,20 +78,20 @@ static int img_ir_sony_filter(const struct rc_scancode_filter *in, */ if (!is_power_of_2(protocols)) { if (subdev & subdev_m) - protocols = RC_BIT_SONY20; + protocols = RC_PROTO_BIT_SONY20; else if (dev & dev_m & 0xe0) - protocols = RC_BIT_SONY15; + protocols = RC_PROTO_BIT_SONY15; else - protocols = RC_BIT_SONY12; + protocols = RC_PROTO_BIT_SONY12; } - if (protocols == RC_BIT_SONY20) { + if (protocols == RC_PROTO_BIT_SONY20) { /* can't encode subdev and higher device bits */ if (dev & dev_m & 0xe0) return -EINVAL; len = 20; dev_m &= 0x1f; - } else if (protocols == RC_BIT_SONY15) { + } else if (protocols == RC_PROTO_BIT_SONY15) { len = 15; subdev_m = 0; } else { @@ -128,7 +129,7 @@ static int img_ir_sony_filter(const struct rc_scancode_filter *in, * http://picprojects.org.uk/projects/sirc/sonysirc.pdf */ struct img_ir_decoder img_ir_sony = { - .type = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20, + .type = RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20, .control = { .decoden = 1, .code_type = IMG_IR_CODETYPE_PULSELEN, diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index bd76534a2749..7b3f31cc63d2 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -148,7 +148,7 @@ struct imon_context { u32 last_keycode; /* last reported input keycode */ u32 rc_scancode; /* the computed remote scancode */ u8 rc_toggle; /* the computed remote toggle bit */ - u64 rc_type; /* iMON or MCE (RC6) IR protocol? */ + u64 rc_proto; /* iMON or MCE (RC6) IR protocol? */ bool release_code; /* some keys send a release code */ u8 display_type; /* store the display type */ @@ -911,7 +911,7 @@ static struct attribute *imon_display_sysfs_entries[] = { NULL }; -static struct attribute_group imon_display_attr_group = { +static const struct attribute_group imon_display_attr_group = { .attrs = imon_display_sysfs_entries }; @@ -920,7 +920,7 @@ static struct attribute *imon_rf_sysfs_entries[] = { NULL }; -static struct attribute_group imon_rf_attr_group = { +static const struct attribute_group imon_rf_attr_group = { .attrs = imon_rf_sysfs_entries }; @@ -1118,7 +1118,7 @@ static void imon_touch_display_timeout(unsigned long data) * it is not, so we must acquire it prior to calling send_packet, which * requires that the lock is held. */ -static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type) +static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) { int retval; struct imon_context *ictx = rc->priv; @@ -1127,25 +1127,25 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type) unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; - if (*rc_type && !(*rc_type & rc->allowed_protocols)) + if (*rc_proto && !(*rc_proto & rc->allowed_protocols)) dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n"); - if (*rc_type & RC_BIT_RC6_MCE) { + if (*rc_proto & RC_PROTO_BIT_RC6_MCE) { dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); ir_proto_packet[0] = 0x01; - *rc_type = RC_BIT_RC6_MCE; - } else if (*rc_type & RC_BIT_OTHER) { + *rc_proto = RC_PROTO_BIT_RC6_MCE; + } else if (*rc_proto & RC_PROTO_BIT_OTHER) { dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ - *rc_type = RC_BIT_OTHER; + *rc_proto = RC_PROTO_BIT_OTHER; } else { dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ - *rc_type = RC_BIT_OTHER; + *rc_proto = RC_PROTO_BIT_OTHER; } memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); @@ -1159,7 +1159,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type) if (retval) goto out; - ictx->rc_type = *rc_type; + ictx->rc_proto = *rc_proto; ictx->pad_mouse = false; out: @@ -1435,7 +1435,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) rel_x = buf[2]; rel_y = buf[3]; - if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) { + if (ictx->rc_proto == RC_PROTO_BIT_OTHER && pad_stabilize) { if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); @@ -1502,7 +1502,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) buf[0] = 0x01; buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0; - if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) { + if (ictx->rc_proto == RC_PROTO_BIT_OTHER && pad_stabilize) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { @@ -1706,7 +1706,7 @@ static void imon_incoming_scancode(struct imon_context *ictx, ictx->release_code = false; } else { scancode = be32_to_cpu(*((__be32 *)buf)); - if (ictx->rc_type == RC_BIT_RC6_MCE) { + if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) { ktype = IMON_KEY_IMON; if (buf[0] == 0x80) ktype = IMON_KEY_MCE; @@ -1769,10 +1769,10 @@ static void imon_incoming_scancode(struct imon_context *ictx, if (press_type == 0) rc_keyup(ictx->rdev); else { - if (ictx->rc_type == RC_BIT_RC6_MCE || - ictx->rc_type == RC_BIT_OTHER) + if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE || + ictx->rc_proto == RC_PROTO_BIT_OTHER) rc_keydown(ictx->rdev, - ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER, + ictx->rc_proto == RC_PROTO_BIT_RC6_MCE ? RC_PROTO_RC6_MCE : RC_PROTO_OTHER, ictx->rc_scancode, ictx->rc_toggle); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; @@ -1936,7 +1936,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx) { u8 ffdc_cfg_byte = ictx->usb_rx_buf[6]; u8 detected_display_type = IMON_DISPLAY_TYPE_NONE; - u64 allowed_protos = RC_BIT_OTHER; + u64 allowed_protos = RC_PROTO_BIT_OTHER; switch (ffdc_cfg_byte) { /* iMON Knob, no display, iMON IR + vol knob */ @@ -1967,27 +1967,27 @@ static void imon_get_ffdc_type(struct imon_context *ictx) case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; - allowed_protos = RC_BIT_RC6_MCE; + allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; - allowed_protos = RC_BIT_RC6_MCE; + allowed_protos = RC_PROTO_BIT_RC6_MCE; break; default: dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; /* We don't know which one it is, allow user to set the * RC6 one from userspace if OTHER wasn't correct. */ - allowed_protos |= RC_BIT_RC6_MCE; + allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; } printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte); ictx->display_type = detected_display_type; - ictx->rc_type = allowed_protos; + ictx->rc_proto = allowed_protos; } static void imon_set_display_type(struct imon_context *ictx) @@ -2063,17 +2063,18 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx) sizeof(ictx->phys_rdev)); strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev)); - rdev->input_name = ictx->name_rdev; + rdev->device_name = ictx->name_rdev; rdev->input_phys = ictx->phys_rdev; usb_to_input_id(ictx->usbdev_intf0, &rdev->input_id); rdev->dev.parent = ictx->dev; rdev->priv = ictx; if (ictx->dev_descr->flags & IMON_IR_RAW) - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; else /* iMON PAD or MCE */ - rdev->allowed_protocols = RC_BIT_OTHER | RC_BIT_RC6_MCE; + rdev->allowed_protocols = RC_PROTO_BIT_OTHER | + RC_PROTO_BIT_RC6_MCE; rdev->change_protocol = imon_ir_change_protocol; rdev->driver_name = MOD_NAME; @@ -2086,12 +2087,12 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx) if (ictx->product == 0xffdc) { imon_get_ffdc_type(ictx); - rdev->allowed_protocols = ictx->rc_type; + rdev->allowed_protocols = ictx->rc_proto; } imon_set_display_type(ictx); - if (ictx->rc_type == RC_BIT_RC6_MCE || + if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE || ictx->dev_descr->flags & IMON_IR_RAW) rdev->map_name = RC_MAP_IMON_MCE; else diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c index 50951f686852..0ce11c41dfae 100644 --- a/drivers/media/rc/ir-hix5hd2.c +++ b/drivers/media/rc/ir-hix5hd2.c @@ -242,14 +242,14 @@ static int hix5hd2_ir_probe(struct platform_device *pdev) clk_prepare_enable(priv->clock); priv->rate = clk_get_rate(priv->clock); - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->priv = priv; rdev->open = hix5hd2_ir_open; rdev->close = hix5hd2_ir_close; rdev->driver_name = IR_HIX5HD2_NAME; map_name = of_get_property(node, "linux,rc-map-name", NULL); rdev->map_name = map_name ?: RC_MAP_EMPTY; - rdev->input_name = IR_HIX5HD2_NAME; + rdev->device_name = IR_HIX5HD2_NAME; rdev->input_phys = IR_HIX5HD2_NAME "/input0"; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = 0x0001; diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c index 674bf156edcb..e2bd68c42edf 100644 --- a/drivers/media/rc/ir-jvc-decoder.c +++ b/drivers/media/rc/ir-jvc-decoder.c @@ -137,7 +137,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev) scancode = (bitrev8((data->bits >> 8) & 0xff) << 8) | (bitrev8((data->bits >> 0) & 0xff) << 0); IR_dprintk(1, "JVC scancode 0x%04x\n", scancode); - rc_keydown(dev, RC_TYPE_JVC, scancode, data->toggle); + rc_keydown(dev, RC_PROTO_JVC, scancode, data->toggle); data->first = false; data->old_bits = data->bits; } else if (data->bits == data->old_bits) { @@ -193,7 +193,7 @@ static const struct ir_raw_timings_pd ir_jvc_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_jvc_encode(enum rc_type protocol, u32 scancode, +static int ir_jvc_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; @@ -209,7 +209,7 @@ static int ir_jvc_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler jvc_handler = { - .protocols = RC_BIT_JVC, + .protocols = RC_PROTO_BIT_JVC, .decode = ir_jvc_decode, .encode = ir_jvc_encode, }; diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c index 6a4d58b88d91..7c572a643656 100644 --- a/drivers/media/rc/ir-mce_kbd-decoder.c +++ b/drivers/media/rc/ir-mce_kbd-decoder.c @@ -358,6 +358,9 @@ static int ir_mce_kbd_register(struct rc_dev *dev) struct input_dev *idev; int i, ret; + if (dev->driver_type == RC_DRIVER_IR_RAW_TX) + return 0; + idev = input_allocate_device(); if (!idev) return -ENOMEM; @@ -413,6 +416,9 @@ static int ir_mce_kbd_unregister(struct rc_dev *dev) struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd; struct input_dev *idev = mce_kbd->idev; + if (dev->driver_type == RC_DRIVER_IR_RAW_TX) + return 0; + del_timer_sync(&mce_kbd->rx_timeout); input_unregister_device(idev); @@ -438,14 +444,14 @@ static const struct ir_raw_timings_manchester ir_mce_kbd_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_mce_kbd_encode(enum rc_type protocol, u32 scancode, +static int ir_mce_kbd_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; int len, ret; u64 raw; - if (protocol == RC_TYPE_MCIR2_KBD) { + if (protocol == RC_PROTO_MCIR2_KBD) { raw = scancode | ((u64)MCIR2_KEYBOARD_HEADER << MCIR2_KEYBOARD_NBITS); len = MCIR2_KEYBOARD_NBITS + MCIR2_HEADER_NBITS + 1; @@ -463,7 +469,7 @@ static int ir_mce_kbd_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler mce_kbd_handler = { - .protocols = RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE, + .protocols = RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE, .decode = ir_mce_kbd_decode, .encode = ir_mce_kbd_encode, .raw_register = ir_mce_kbd_register, diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c index 3ce850314dca..817c18f2ddd1 100644 --- a/drivers/media/rc/ir-nec-decoder.c +++ b/drivers/media/rc/ir-nec-decoder.c @@ -49,9 +49,8 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct nec_dec *data = &dev->raw->nec; u32 scancode; - enum rc_type rc_type; + enum rc_proto rc_proto; u8 address, not_address, command, not_command; - bool send_32bits = false; if (!is_timing_event(ev)) { if (ev.reset) @@ -88,13 +87,9 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) data->state = STATE_BIT_PULSE; return 0; } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) { - if (!dev->keypressed) { - IR_dprintk(1, "Discarding last key repeat: event after key up\n"); - } else { - rc_repeat(dev); - IR_dprintk(1, "Repeat last key\n"); - data->state = STATE_TRAILER_PULSE; - } + rc_repeat(dev); + IR_dprintk(1, "Repeat last key\n"); + data->state = STATE_TRAILER_PULSE; return 0; } @@ -161,39 +156,14 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) command = bitrev8((data->bits >> 8) & 0xff); not_command = bitrev8((data->bits >> 0) & 0xff); - if ((command ^ not_command) != 0xff) { - IR_dprintk(1, "NEC checksum error: received 0x%08x\n", - data->bits); - send_32bits = true; - } - - if (send_32bits) { - /* NEC transport, but modified protocol, used by at - * least Apple and TiVo remotes */ - scancode = not_address << 24 | - address << 16 | - not_command << 8 | - command; - IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode); - rc_type = RC_TYPE_NEC32; - } else if ((address ^ not_address) != 0xff) { - /* Extended NEC */ - scancode = address << 16 | - not_address << 8 | - command; - IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode); - rc_type = RC_TYPE_NECX; - } else { - /* Normal NEC */ - scancode = address << 8 | command; - IR_dprintk(1, "NEC scancode 0x%04x\n", scancode); - rc_type = RC_TYPE_NEC; - } + scancode = ir_nec_bytes_to_scancode(address, not_address, + command, not_command, + &rc_proto); if (data->is_nec_x) data->necx_repeat = true; - rc_keydown(dev, rc_type, scancode, 0); + rc_keydown(dev, rc_proto, scancode, 0); data->state = STATE_INACTIVE; return 0; } @@ -210,19 +180,19 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) * @scancode: a single NEC scancode. * @raw: raw data to be modulated. */ -static u32 ir_nec_scancode_to_raw(enum rc_type protocol, u32 scancode) +static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode) { unsigned int addr, addr_inv, data, data_inv; data = scancode & 0xff; - if (protocol == RC_TYPE_NEC32) { + if (protocol == RC_PROTO_NEC32) { /* 32-bit NEC (used by Apple and TiVo remotes) */ /* scan encoding: aaAAddDD */ addr_inv = (scancode >> 24) & 0xff; addr = (scancode >> 16) & 0xff; data_inv = (scancode >> 8) & 0xff; - } else if (protocol == RC_TYPE_NECX) { + } else if (protocol == RC_PROTO_NECX) { /* Extended NEC */ /* scan encoding AAaaDD */ addr = (scancode >> 16) & 0xff; @@ -266,7 +236,7 @@ static const struct ir_raw_timings_pd ir_nec_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_nec_encode(enum rc_type protocol, u32 scancode, +static int ir_nec_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; @@ -285,7 +255,8 @@ static int ir_nec_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler nec_handler = { - .protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32, + .protocols = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32, .decode = ir_nec_decode, .encode = ir_nec_encode, }; diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c index fcfedf95def7..1292f534de43 100644 --- a/drivers/media/rc/ir-rc5-decoder.c +++ b/drivers/media/rc/ir-rc5-decoder.c @@ -51,7 +51,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) struct rc5_dec *data = &dev->raw->rc5; u8 toggle; u32 scancode; - enum rc_type protocol; + enum rc_proto protocol; if (!is_timing_event(ev)) { if (ev.reset) @@ -124,7 +124,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) if (data->is_rc5x && data->count == RC5X_NBITS) { /* RC5X */ u8 xdata, command, system; - if (!(dev->enabled_protocols & RC_BIT_RC5X_20)) { + if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5X_20)) { data->state = STATE_INACTIVE; return 0; } @@ -134,12 +134,12 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) toggle = (data->bits & 0x20000) ? 1 : 0; command += (data->bits & 0x40000) ? 0 : 0x40; scancode = system << 16 | command << 8 | xdata; - protocol = RC_TYPE_RC5X_20; + protocol = RC_PROTO_RC5X_20; } else if (!data->is_rc5x && data->count == RC5_NBITS) { /* RC5 */ u8 command, system; - if (!(dev->enabled_protocols & RC_BIT_RC5)) { + if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5)) { data->state = STATE_INACTIVE; return 0; } @@ -148,12 +148,12 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) toggle = (data->bits & 0x00800) ? 1 : 0; command += (data->bits & 0x01000) ? 0 : 0x40; scancode = system << 8 | command; - protocol = RC_TYPE_RC5; + protocol = RC_PROTO_RC5; } else if (!data->is_rc5x && data->count == RC5_SZ_NBITS) { /* RC5 StreamZap */ u8 command, system; - if (!(dev->enabled_protocols & RC_BIT_RC5_SZ)) { + if (!(dev->enabled_protocols & RC_PROTO_BIT_RC5_SZ)) { data->state = STATE_INACTIVE; return 0; } @@ -161,7 +161,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) system = (data->bits & 0x02FC0) >> 6; toggle = (data->bits & 0x01000) ? 1 : 0; scancode = system << 6 | command; - protocol = RC_TYPE_RC5_SZ; + protocol = RC_PROTO_RC5_SZ; } else break; @@ -221,7 +221,7 @@ static const struct ir_raw_timings_manchester ir_rc5_sz_timings = { * encoding. In this case all @max events will have been written. * -EINVAL if the scancode is ambiguous or invalid. */ -static int ir_rc5_encode(enum rc_type protocol, u32 scancode, +static int ir_rc5_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { int ret; @@ -229,7 +229,7 @@ static int ir_rc5_encode(enum rc_type protocol, u32 scancode, unsigned int data, xdata, command, commandx, system, pre_space_data; /* Detect protocol and convert scancode to raw data */ - if (protocol == RC_TYPE_RC5) { + if (protocol == RC_PROTO_RC5) { /* decode scancode */ command = (scancode & 0x003f) >> 0; commandx = (scancode & 0x0040) >> 6; @@ -242,7 +242,7 @@ static int ir_rc5_encode(enum rc_type protocol, u32 scancode, RC5_NBITS, data); if (ret < 0) return ret; - } else if (protocol == RC_TYPE_RC5X_20) { + } else if (protocol == RC_PROTO_RC5X_20) { /* decode scancode */ xdata = (scancode & 0x00003f) >> 0; command = (scancode & 0x003f00) >> 8; @@ -264,7 +264,7 @@ static int ir_rc5_encode(enum rc_type protocol, u32 scancode, data); if (ret < 0) return ret; - } else if (protocol == RC_TYPE_RC5_SZ) { + } else if (protocol == RC_PROTO_RC5_SZ) { /* RC5-SZ scancode is raw enough for Manchester as it is */ ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings, RC5_SZ_NBITS, scancode & 0x2fff); @@ -278,7 +278,8 @@ static int ir_rc5_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler rc5_handler = { - .protocols = RC_BIT_RC5 | RC_BIT_RC5X_20 | RC_BIT_RC5_SZ, + .protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | + RC_PROTO_BIT_RC5_SZ, .decode = ir_rc5_decode, .encode = ir_rc5_encode, }; diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index 6fe2268dada0..5d0d2fe3b7a7 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -88,7 +88,7 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) struct rc6_dec *data = &dev->raw->rc6; u32 scancode; u8 toggle; - enum rc_type protocol; + enum rc_proto protocol; if (!is_timing_event(ev)) { if (ev.reset) @@ -229,7 +229,7 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) case RC6_MODE_0: scancode = data->body; toggle = data->toggle; - protocol = RC_TYPE_RC6_0; + protocol = RC_PROTO_RC6_0; IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n", scancode, toggle); break; @@ -244,20 +244,20 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) scancode = data->body; switch (data->count) { case 20: - protocol = RC_TYPE_RC6_6A_20; + protocol = RC_PROTO_RC6_6A_20; toggle = 0; break; case 24: - protocol = RC_TYPE_RC6_6A_24; + protocol = RC_PROTO_RC6_6A_24; toggle = 0; break; case 32: if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { - protocol = RC_TYPE_RC6_MCE; + protocol = RC_PROTO_RC6_MCE; toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); scancode &= ~RC6_6A_MCE_TOGGLE_MASK; } else { - protocol = RC_TYPE_RC6_6A_32; + protocol = RC_PROTO_RC6_6A_32; toggle = 0; } break; @@ -322,13 +322,13 @@ static const struct ir_raw_timings_manchester ir_rc6_timings[4] = { * encoding. In this case all @max events will have been written. * -EINVAL if the scancode is ambiguous or invalid. */ -static int ir_rc6_encode(enum rc_type protocol, u32 scancode, +static int ir_rc6_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { int ret; struct ir_raw_event *e = events; - if (protocol == RC_TYPE_RC6_0) { + if (protocol == RC_PROTO_RC6_0) { /* Modulate the preamble */ ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0); if (ret < 0) @@ -358,14 +358,14 @@ static int ir_rc6_encode(enum rc_type protocol, u32 scancode, int bits; switch (protocol) { - case RC_TYPE_RC6_MCE: - case RC_TYPE_RC6_6A_32: + case RC_PROTO_RC6_MCE: + case RC_PROTO_RC6_6A_32: bits = 32; break; - case RC_TYPE_RC6_6A_24: + case RC_PROTO_RC6_6A_24: bits = 24; break; - case RC_TYPE_RC6_6A_20: + case RC_PROTO_RC6_6A_20: bits = 20; break; default: @@ -403,9 +403,9 @@ static int ir_rc6_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler rc6_handler = { - .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | - RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | - RC_BIT_RC6_MCE, + .protocols = RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | + RC_PROTO_BIT_RC6_MCE, .decode = ir_rc6_decode, .encode = ir_rc6_encode, }; diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c index 520bb77dcb62..758c60956850 100644 --- a/drivers/media/rc/ir-sanyo-decoder.c +++ b/drivers/media/rc/ir-sanyo-decoder.c @@ -110,13 +110,9 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev) break; if (!data->count && geq_margin(ev.duration, SANYO_REPEAT_SPACE, SANYO_UNIT / 2)) { - if (!dev->keypressed) { - IR_dprintk(1, "SANYO discarding last key repeat: event after key up\n"); - } else { - rc_repeat(dev); - IR_dprintk(1, "SANYO repeat last key\n"); - data->state = STATE_INACTIVE; - } + rc_repeat(dev); + IR_dprintk(1, "SANYO repeat last key\n"); + data->state = STATE_INACTIVE; return 0; } @@ -165,7 +161,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev) scancode = address << 8 | command; IR_dprintk(1, "SANYO scancode: 0x%06x\n", scancode); - rc_keydown(dev, RC_TYPE_SANYO, scancode, 0); + rc_keydown(dev, RC_PROTO_SANYO, scancode, 0); data->state = STATE_INACTIVE; return 0; } @@ -199,7 +195,7 @@ static const struct ir_raw_timings_pd ir_sanyo_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_sanyo_encode(enum rc_type protocol, u32 scancode, +static int ir_sanyo_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; @@ -219,7 +215,7 @@ static int ir_sanyo_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler sanyo_handler = { - .protocols = RC_BIT_SANYO, + .protocols = RC_PROTO_BIT_SANYO, .decode = ir_sanyo_decode, .encode = ir_sanyo_encode, }; diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c index b47e89e2c1bd..129b558acc92 100644 --- a/drivers/media/rc/ir-sharp-decoder.c +++ b/drivers/media/rc/ir-sharp-decoder.c @@ -161,7 +161,7 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev) scancode = address << 8 | command; IR_dprintk(1, "Sharp scancode 0x%04x\n", scancode); - rc_keydown(dev, RC_TYPE_SHARP, scancode, 0); + rc_keydown(dev, RC_PROTO_SHARP, scancode, 0); data->state = STATE_INACTIVE; return 0; } @@ -196,7 +196,7 @@ static const struct ir_raw_timings_pd ir_sharp_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_sharp_encode(enum rc_type protocol, u32 scancode, +static int ir_sharp_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; @@ -223,7 +223,7 @@ static int ir_sharp_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler sharp_handler = { - .protocols = RC_BIT_SHARP, + .protocols = RC_PROTO_BIT_SHARP, .decode = ir_sharp_decode, .encode = ir_sharp_encode, }; @@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init); module_exit(ir_sharp_decode_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("James Hogan "); +MODULE_AUTHOR("James Hogan "); MODULE_DESCRIPTION("Sharp IR protocol decoder"); diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c index 355fa8198f5a..a47ced763031 100644 --- a/drivers/media/rc/ir-sony-decoder.c +++ b/drivers/media/rc/ir-sony-decoder.c @@ -42,7 +42,7 @@ enum sony_state { static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct sony_dec *data = &dev->raw->sony; - enum rc_type protocol; + enum rc_proto protocol; u32 scancode; u8 device, subdevice, function; @@ -121,31 +121,31 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) switch (data->count) { case 12: - if (!(dev->enabled_protocols & RC_BIT_SONY12)) + if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY12)) goto finish_state_machine; device = bitrev8((data->bits << 3) & 0xF8); subdevice = 0; function = bitrev8((data->bits >> 4) & 0xFE); - protocol = RC_TYPE_SONY12; + protocol = RC_PROTO_SONY12; break; case 15: - if (!(dev->enabled_protocols & RC_BIT_SONY15)) + if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY15)) goto finish_state_machine; device = bitrev8((data->bits >> 0) & 0xFF); subdevice = 0; function = bitrev8((data->bits >> 7) & 0xFE); - protocol = RC_TYPE_SONY15; + protocol = RC_PROTO_SONY15; break; case 20: - if (!(dev->enabled_protocols & RC_BIT_SONY20)) + if (!(dev->enabled_protocols & RC_PROTO_BIT_SONY20)) goto finish_state_machine; device = bitrev8((data->bits >> 5) & 0xF8); subdevice = bitrev8((data->bits >> 0) & 0xFF); function = bitrev8((data->bits >> 12) & 0xFE); - protocol = RC_TYPE_SONY20; + protocol = RC_PROTO_SONY20; break; default: IR_dprintk(1, "Sony invalid bitcount %u\n", data->count); @@ -190,17 +190,17 @@ static const struct ir_raw_timings_pl ir_sony_timings = { * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. */ -static int ir_sony_encode(enum rc_type protocol, u32 scancode, +static int ir_sony_encode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_event *e = events; u32 raw, len; int ret; - if (protocol == RC_TYPE_SONY12) { + if (protocol == RC_PROTO_SONY12) { raw = (scancode & 0x7f) | ((scancode & 0x1f0000) >> 9); len = 12; - } else if (protocol == RC_TYPE_SONY15) { + } else if (protocol == RC_PROTO_SONY15) { raw = (scancode & 0x7f) | ((scancode & 0xff0000) >> 9); len = 15; } else { @@ -217,7 +217,8 @@ static int ir_sony_encode(enum rc_type protocol, u32 scancode, } static struct ir_raw_handler sony_handler = { - .protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20, + .protocols = RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | + RC_PROTO_BIT_SONY20, .decode = ir_sony_decode, .encode = ir_sony_encode, }; diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c index 7e383b3fedd5..29ed0638cb74 100644 --- a/drivers/media/rc/ir-spi.c +++ b/drivers/media/rc/ir-spi.c @@ -155,6 +155,7 @@ static int ir_spi_probe(struct spi_device *spi) idata->rc->tx_ir = ir_spi_tx; idata->rc->s_tx_carrier = ir_spi_set_tx_carrier; idata->rc->s_tx_duty_cycle = ir_spi_set_duty_cycle; + idata->rc->device_name = "IR SPI"; idata->rc->driver_name = IR_SPI_DRIVER_NAME; idata->rc->priv = idata; idata->spi = spi; diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c index 18596190bbb8..6f464be1c8d7 100644 --- a/drivers/media/rc/ir-xmp-decoder.c +++ b/drivers/media/rc/ir-xmp-decoder.c @@ -141,7 +141,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev) IR_dprintk(1, "XMP scancode 0x%06x\n", scancode); if (toggle == 0) { - rc_keydown(dev, RC_TYPE_XMP, scancode, 0); + rc_keydown(dev, RC_PROTO_XMP, scancode, 0); } else { rc_repeat(dev); IR_dprintk(1, "Repeat last key\n"); @@ -196,7 +196,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev) } static struct ir_raw_handler xmp_handler = { - .protocols = RC_BIT_XMP, + .protocols = RC_PROTO_BIT_XMP, .decode = ir_xmp_decode, }; diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index e9e4befbbebb..65e104c7ddfc 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -1556,7 +1556,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id /* set up ir-core props */ rdev->priv = itdev; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->open = ite_open; rdev->close = ite_close; rdev->s_idle = ite_s_idle; @@ -1576,7 +1576,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev->s_tx_duty_cycle = ite_set_tx_duty_cycle; } - rdev->input_name = dev_desc->model; + rdev->device_name = dev_desc->model; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = PCI_VENDOR_ID_ITE; rdev->input_id.product = 0; diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile index 2945f99907b5..af6496d709fb 100644 --- a/drivers/media/rc/keymaps/Makefile +++ b/drivers/media/rc/keymaps/Makefile @@ -109,4 +109,5 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-videomate-tv-pvr.o \ rc-winfast.o \ rc-winfast-usbii-deluxe.o \ - rc-su3000.o + rc-su3000.o \ + rc-zx-irdec.o diff --git a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c index 01d901fbfc8b..2d303c2cee3b 100644 --- a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c +++ b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c @@ -66,10 +66,10 @@ static struct rc_map_table adstech_dvb_t_pci[] = { static struct rc_map_list adstech_dvb_t_pci_map = { .map = { - .scan = adstech_dvb_t_pci, - .size = ARRAY_SIZE(adstech_dvb_t_pci), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_ADSTECH_DVB_T_PCI, + .scan = adstech_dvb_t_pci, + .size = ARRAY_SIZE(adstech_dvb_t_pci), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_ADSTECH_DVB_T_PCI, } }; diff --git a/drivers/media/rc/keymaps/rc-alink-dtu-m.c b/drivers/media/rc/keymaps/rc-alink-dtu-m.c index 4e6ade8e616f..3818c33734a1 100644 --- a/drivers/media/rc/keymaps/rc-alink-dtu-m.c +++ b/drivers/media/rc/keymaps/rc-alink-dtu-m.c @@ -45,10 +45,10 @@ static struct rc_map_table alink_dtu_m[] = { static struct rc_map_list alink_dtu_m_map = { .map = { - .scan = alink_dtu_m, - .size = ARRAY_SIZE(alink_dtu_m), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_ALINK_DTU_M, + .scan = alink_dtu_m, + .size = ARRAY_SIZE(alink_dtu_m), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_ALINK_DTU_M, } }; diff --git a/drivers/media/rc/keymaps/rc-anysee.c b/drivers/media/rc/keymaps/rc-anysee.c index c735fe10a390..e75e51b34d29 100644 --- a/drivers/media/rc/keymaps/rc-anysee.c +++ b/drivers/media/rc/keymaps/rc-anysee.c @@ -70,10 +70,10 @@ static struct rc_map_table anysee[] = { static struct rc_map_list anysee_map = { .map = { - .scan = anysee, - .size = ARRAY_SIZE(anysee), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_ANYSEE, + .scan = anysee, + .size = ARRAY_SIZE(anysee), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_ANYSEE, } }; diff --git a/drivers/media/rc/keymaps/rc-apac-viewcomp.c b/drivers/media/rc/keymaps/rc-apac-viewcomp.c index bf9efa007e1c..65bc8957d9c3 100644 --- a/drivers/media/rc/keymaps/rc-apac-viewcomp.c +++ b/drivers/media/rc/keymaps/rc-apac-viewcomp.c @@ -57,10 +57,10 @@ static struct rc_map_table apac_viewcomp[] = { static struct rc_map_list apac_viewcomp_map = { .map = { - .scan = apac_viewcomp, - .size = ARRAY_SIZE(apac_viewcomp), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_APAC_VIEWCOMP, + .scan = apac_viewcomp, + .size = ARRAY_SIZE(apac_viewcomp), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_APAC_VIEWCOMP, } }; diff --git a/drivers/media/rc/keymaps/rc-asus-pc39.c b/drivers/media/rc/keymaps/rc-asus-pc39.c index 9e674ba5dd4f..530e1d1158d1 100644 --- a/drivers/media/rc/keymaps/rc-asus-pc39.c +++ b/drivers/media/rc/keymaps/rc-asus-pc39.c @@ -68,10 +68,10 @@ static struct rc_map_table asus_pc39[] = { static struct rc_map_list asus_pc39_map = { .map = { - .scan = asus_pc39, - .size = ARRAY_SIZE(asus_pc39), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_ASUS_PC39, + .scan = asus_pc39, + .size = ARRAY_SIZE(asus_pc39), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_ASUS_PC39, } }; diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c index e45de35f528f..c91ba332984c 100644 --- a/drivers/media/rc/keymaps/rc-asus-ps3-100.c +++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c @@ -67,10 +67,10 @@ static struct rc_map_table asus_ps3_100[] = { static struct rc_map_list asus_ps3_100_map = { .map = { - .scan = asus_ps3_100, - .size = ARRAY_SIZE(asus_ps3_100), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_ASUS_PS3_100, + .scan = asus_ps3_100, + .size = ARRAY_SIZE(asus_ps3_100), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_ASUS_PS3_100, } }; diff --git a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c index 91392d4cfd6d..11b4bdd2392b 100644 --- a/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c +++ b/drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c @@ -46,10 +46,10 @@ static struct rc_map_table ati_tv_wonder_hd_600[] = { static struct rc_map_list ati_tv_wonder_hd_600_map = { .map = { - .scan = ati_tv_wonder_hd_600, - .size = ARRAY_SIZE(ati_tv_wonder_hd_600), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_ATI_TV_WONDER_HD_600, + .scan = ati_tv_wonder_hd_600, + .size = ARRAY_SIZE(ati_tv_wonder_hd_600), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_ATI_TV_WONDER_HD_600, } }; diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c index 4bdc709ec54d..11f1eb6ad712 100644 --- a/drivers/media/rc/keymaps/rc-ati-x10.c +++ b/drivers/media/rc/keymaps/rc-ati-x10.c @@ -114,10 +114,10 @@ static struct rc_map_table ati_x10[] = { static struct rc_map_list ati_x10_map = { .map = { - .scan = ati_x10, - .size = ARRAY_SIZE(ati_x10), - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_ATI_X10, + .scan = ati_x10, + .size = ARRAY_SIZE(ati_x10), + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_ATI_X10, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-a16d.c b/drivers/media/rc/keymaps/rc-avermedia-a16d.c index ff30a71d623e..510dc90ebf49 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-a16d.c +++ b/drivers/media/rc/keymaps/rc-avermedia-a16d.c @@ -52,10 +52,10 @@ static struct rc_map_table avermedia_a16d[] = { static struct rc_map_list avermedia_a16d_map = { .map = { - .scan = avermedia_a16d, - .size = ARRAY_SIZE(avermedia_a16d), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_AVERMEDIA_A16D, + .scan = avermedia_a16d, + .size = ARRAY_SIZE(avermedia_a16d), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_AVERMEDIA_A16D, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c index d7471a6de9b4..4bbc1e68d1b8 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c +++ b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c @@ -74,10 +74,10 @@ static struct rc_map_table avermedia_cardbus[] = { static struct rc_map_list avermedia_cardbus_map = { .map = { - .scan = avermedia_cardbus, - .size = ARRAY_SIZE(avermedia_cardbus), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_AVERMEDIA_CARDBUS, + .scan = avermedia_cardbus, + .size = ARRAY_SIZE(avermedia_cardbus), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_AVERMEDIA_CARDBUS, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c index e2417d6331fe..f6b8547dbad3 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c +++ b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c @@ -55,10 +55,10 @@ static struct rc_map_table avermedia_dvbt[] = { static struct rc_map_list avermedia_dvbt_map = { .map = { - .scan = avermedia_dvbt, - .size = ARRAY_SIZE(avermedia_dvbt), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_AVERMEDIA_DVBT, + .scan = avermedia_dvbt, + .size = ARRAY_SIZE(avermedia_dvbt), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_AVERMEDIA_DVBT, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c index 843598a5f1b5..9882e2cde975 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c +++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c @@ -124,10 +124,10 @@ static struct rc_map_table avermedia_m135a[] = { static struct rc_map_list avermedia_m135a_map = { .map = { - .scan = avermedia_m135a, - .size = ARRAY_SIZE(avermedia_m135a), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_AVERMEDIA_M135A, + .scan = avermedia_m135a, + .size = ARRAY_SIZE(avermedia_m135a), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_AVERMEDIA_M135A, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c index b24e7481ac21..d86126e10375 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c +++ b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c @@ -72,10 +72,10 @@ static struct rc_map_table avermedia_m733a_rm_k6[] = { static struct rc_map_list avermedia_m733a_rm_k6_map = { .map = { - .scan = avermedia_m733a_rm_k6, - .size = ARRAY_SIZE(avermedia_m733a_rm_k6), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_AVERMEDIA_M733A_RM_K6, + .scan = avermedia_m733a_rm_k6, + .size = ARRAY_SIZE(avermedia_m733a_rm_k6), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_AVERMEDIA_M733A_RM_K6, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c index 2583400ca1b4..5d92d36d9174 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c +++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c @@ -56,10 +56,10 @@ static struct rc_map_table avermedia_rm_ks[] = { static struct rc_map_list avermedia_rm_ks_map = { .map = { - .scan = avermedia_rm_ks, - .size = ARRAY_SIZE(avermedia_rm_ks), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_AVERMEDIA_RM_KS, + .scan = avermedia_rm_ks, + .size = ARRAY_SIZE(avermedia_rm_ks), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_AVERMEDIA_RM_KS, } }; diff --git a/drivers/media/rc/keymaps/rc-avermedia.c b/drivers/media/rc/keymaps/rc-avermedia.c index 3f68fbecc188..6503f11c7df5 100644 --- a/drivers/media/rc/keymaps/rc-avermedia.c +++ b/drivers/media/rc/keymaps/rc-avermedia.c @@ -63,10 +63,10 @@ static struct rc_map_table avermedia[] = { static struct rc_map_list avermedia_map = { .map = { - .scan = avermedia, - .size = ARRAY_SIZE(avermedia), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_AVERMEDIA, + .scan = avermedia, + .size = ARRAY_SIZE(avermedia), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_AVERMEDIA, } }; diff --git a/drivers/media/rc/keymaps/rc-avertv-303.c b/drivers/media/rc/keymaps/rc-avertv-303.c index c35bc5b835c4..fbdd7ada57ce 100644 --- a/drivers/media/rc/keymaps/rc-avertv-303.c +++ b/drivers/media/rc/keymaps/rc-avertv-303.c @@ -62,10 +62,10 @@ static struct rc_map_table avertv_303[] = { static struct rc_map_list avertv_303_map = { .map = { - .scan = avertv_303, - .size = ARRAY_SIZE(avertv_303), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_AVERTV_303, + .scan = avertv_303, + .size = ARRAY_SIZE(avertv_303), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_AVERTV_303, } }; diff --git a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c index ea7f2d0f31eb..18d7dcb869b0 100644 --- a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c +++ b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c @@ -79,10 +79,10 @@ static struct rc_map_table azurewave_ad_tu700[] = { static struct rc_map_list azurewave_ad_tu700_map = { .map = { - .scan = azurewave_ad_tu700, - .size = ARRAY_SIZE(azurewave_ad_tu700), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_AZUREWAVE_AD_TU700, + .scan = azurewave_ad_tu700, + .size = ARRAY_SIZE(azurewave_ad_tu700), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_AZUREWAVE_AD_TU700, } }; diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c index 1fc344e9daa7..d256743be998 100644 --- a/drivers/media/rc/keymaps/rc-behold-columbus.c +++ b/drivers/media/rc/keymaps/rc-behold-columbus.c @@ -85,10 +85,10 @@ static struct rc_map_table behold_columbus[] = { static struct rc_map_list behold_columbus_map = { .map = { - .scan = behold_columbus, - .size = ARRAY_SIZE(behold_columbus), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_BEHOLD_COLUMBUS, + .scan = behold_columbus, + .size = ARRAY_SIZE(behold_columbus), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_BEHOLD_COLUMBUS, } }; diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c index 520a96f2ff86..93dc795adc67 100644 --- a/drivers/media/rc/keymaps/rc-behold.c +++ b/drivers/media/rc/keymaps/rc-behold.c @@ -118,10 +118,10 @@ static struct rc_map_table behold[] = { static struct rc_map_list behold_map = { .map = { - .scan = behold, - .size = ARRAY_SIZE(behold), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_BEHOLD, + .scan = behold, + .size = ARRAY_SIZE(behold), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_BEHOLD, } }; diff --git a/drivers/media/rc/keymaps/rc-budget-ci-old.c b/drivers/media/rc/keymaps/rc-budget-ci-old.c index b196a5f436a3..81ea1424d9e5 100644 --- a/drivers/media/rc/keymaps/rc-budget-ci-old.c +++ b/drivers/media/rc/keymaps/rc-budget-ci-old.c @@ -70,10 +70,10 @@ static struct rc_map_table budget_ci_old[] = { static struct rc_map_list budget_ci_old_map = { .map = { - .scan = budget_ci_old, - .size = ARRAY_SIZE(budget_ci_old), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_BUDGET_CI_OLD, + .scan = budget_ci_old, + .size = ARRAY_SIZE(budget_ci_old), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_BUDGET_CI_OLD, } }; diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c index 354c8e724b8e..76d34abb7c85 100644 --- a/drivers/media/rc/keymaps/rc-cec.c +++ b/drivers/media/rc/keymaps/rc-cec.c @@ -160,7 +160,7 @@ static struct rc_map_list cec_map = { .map = { .scan = cec, .size = ARRAY_SIZE(cec), - .rc_type = RC_TYPE_CEC, + .rc_proto = RC_PROTO_CEC, .name = RC_MAP_CEC, } }; diff --git a/drivers/media/rc/keymaps/rc-cinergy-1400.c b/drivers/media/rc/keymaps/rc-cinergy-1400.c index a099c080bf8c..bcb96b3dda85 100644 --- a/drivers/media/rc/keymaps/rc-cinergy-1400.c +++ b/drivers/media/rc/keymaps/rc-cinergy-1400.c @@ -61,10 +61,10 @@ static struct rc_map_table cinergy_1400[] = { static struct rc_map_list cinergy_1400_map = { .map = { - .scan = cinergy_1400, - .size = ARRAY_SIZE(cinergy_1400), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_CINERGY_1400, + .scan = cinergy_1400, + .size = ARRAY_SIZE(cinergy_1400), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_CINERGY_1400, } }; diff --git a/drivers/media/rc/keymaps/rc-cinergy.c b/drivers/media/rc/keymaps/rc-cinergy.c index b0f4328bdd6f..fd56c402aae5 100644 --- a/drivers/media/rc/keymaps/rc-cinergy.c +++ b/drivers/media/rc/keymaps/rc-cinergy.c @@ -55,10 +55,10 @@ static struct rc_map_table cinergy[] = { static struct rc_map_list cinergy_map = { .map = { - .scan = cinergy, - .size = ARRAY_SIZE(cinergy), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_CINERGY, + .scan = cinergy, + .size = ARRAY_SIZE(cinergy), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_CINERGY, } }; diff --git a/drivers/media/rc/keymaps/rc-d680-dmb.c b/drivers/media/rc/keymaps/rc-d680-dmb.c index bb5745d29d8a..2c94b9d88b67 100644 --- a/drivers/media/rc/keymaps/rc-d680-dmb.c +++ b/drivers/media/rc/keymaps/rc-d680-dmb.c @@ -51,10 +51,10 @@ static struct rc_map_table rc_map_d680_dmb_table[] = { static struct rc_map_list d680_dmb_map = { .map = { - .scan = rc_map_d680_dmb_table, - .size = ARRAY_SIZE(rc_map_d680_dmb_table), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_D680_DMB, + .scan = rc_map_d680_dmb_table, + .size = ARRAY_SIZE(rc_map_d680_dmb_table), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_D680_DMB, } }; diff --git a/drivers/media/rc/keymaps/rc-delock-61959.c b/drivers/media/rc/keymaps/rc-delock-61959.c index 01bed864f09d..62de69d78d92 100644 --- a/drivers/media/rc/keymaps/rc-delock-61959.c +++ b/drivers/media/rc/keymaps/rc-delock-61959.c @@ -58,10 +58,10 @@ static struct rc_map_table delock_61959[] = { static struct rc_map_list delock_61959_map = { .map = { - .scan = delock_61959, - .size = ARRAY_SIZE(delock_61959), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DELOCK_61959, + .scan = delock_61959, + .size = ARRAY_SIZE(delock_61959), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DELOCK_61959, } }; diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c index a0fa543c9f9e..1b4df106b7b5 100644 --- a/drivers/media/rc/keymaps/rc-dib0700-nec.c +++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c @@ -101,10 +101,10 @@ static struct rc_map_table dib0700_nec_table[] = { static struct rc_map_list dib0700_nec_map = { .map = { - .scan = dib0700_nec_table, - .size = ARRAY_SIZE(dib0700_nec_table), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DIB0700_NEC_TABLE, + .scan = dib0700_nec_table, + .size = ARRAY_SIZE(dib0700_nec_table), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DIB0700_NEC_TABLE, } }; diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c index 907941145eb7..b0f8151bb824 100644 --- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c +++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c @@ -212,10 +212,10 @@ static struct rc_map_table dib0700_rc5_table[] = { static struct rc_map_list dib0700_rc5_map = { .map = { - .scan = dib0700_rc5_table, - .size = ARRAY_SIZE(dib0700_rc5_table), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_DIB0700_RC5_TABLE, + .scan = dib0700_rc5_table, + .size = ARRAY_SIZE(dib0700_rc5_table), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_DIB0700_RC5_TABLE, } }; diff --git a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c index bed78acb9198..01ca8b39359f 100644 --- a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c +++ b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c @@ -75,10 +75,10 @@ static struct rc_map_table digitalnow_tinytwin[] = { static struct rc_map_list digitalnow_tinytwin_map = { .map = { - .scan = digitalnow_tinytwin, - .size = ARRAY_SIZE(digitalnow_tinytwin), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DIGITALNOW_TINYTWIN, + .scan = digitalnow_tinytwin, + .size = ARRAY_SIZE(digitalnow_tinytwin), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DIGITALNOW_TINYTWIN, } }; diff --git a/drivers/media/rc/keymaps/rc-digittrade.c b/drivers/media/rc/keymaps/rc-digittrade.c index a3b97a1fe223..a54b1d632ca6 100644 --- a/drivers/media/rc/keymaps/rc-digittrade.c +++ b/drivers/media/rc/keymaps/rc-digittrade.c @@ -59,10 +59,10 @@ static struct rc_map_table digittrade[] = { static struct rc_map_list digittrade_map = { .map = { - .scan = digittrade, - .size = ARRAY_SIZE(digittrade), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DIGITTRADE, + .scan = digittrade, + .size = ARRAY_SIZE(digittrade), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DIGITTRADE, } }; diff --git a/drivers/media/rc/keymaps/rc-dm1105-nec.c b/drivers/media/rc/keymaps/rc-dm1105-nec.c index 46e7ae414cc8..c353445d10ed 100644 --- a/drivers/media/rc/keymaps/rc-dm1105-nec.c +++ b/drivers/media/rc/keymaps/rc-dm1105-nec.c @@ -53,10 +53,10 @@ static struct rc_map_table dm1105_nec[] = { static struct rc_map_list dm1105_nec_map = { .map = { - .scan = dm1105_nec, - .size = ARRAY_SIZE(dm1105_nec), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_DM1105_NEC, + .scan = dm1105_nec, + .size = ARRAY_SIZE(dm1105_nec), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_DM1105_NEC, } }; diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c index d2826b46fea2..5bafd5b70f5e 100644 --- a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c +++ b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c @@ -55,10 +55,10 @@ static struct rc_map_table dntv_live_dvb_t[] = { static struct rc_map_list dntv_live_dvb_t_map = { .map = { - .scan = dntv_live_dvb_t, - .size = ARRAY_SIZE(dntv_live_dvb_t), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_DNTV_LIVE_DVB_T, + .scan = dntv_live_dvb_t, + .size = ARRAY_SIZE(dntv_live_dvb_t), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_DNTV_LIVE_DVB_T, } }; diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c index 0d74769467b5..360167c8829b 100644 --- a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c +++ b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c @@ -74,10 +74,10 @@ static struct rc_map_table dntv_live_dvbt_pro[] = { static struct rc_map_list dntv_live_dvbt_pro_map = { .map = { - .scan = dntv_live_dvbt_pro, - .size = ARRAY_SIZE(dntv_live_dvbt_pro), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_DNTV_LIVE_DVBT_PRO, + .scan = dntv_live_dvbt_pro, + .size = ARRAY_SIZE(dntv_live_dvbt_pro), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_DNTV_LIVE_DVBT_PRO, } }; diff --git a/drivers/media/rc/keymaps/rc-dtt200u.c b/drivers/media/rc/keymaps/rc-dtt200u.c index 25650e9e4664..c932d8b6c509 100644 --- a/drivers/media/rc/keymaps/rc-dtt200u.c +++ b/drivers/media/rc/keymaps/rc-dtt200u.c @@ -35,10 +35,10 @@ static struct rc_map_table dtt200u_table[] = { static struct rc_map_list dtt200u_map = { .map = { - .scan = dtt200u_table, - .size = ARRAY_SIZE(dtt200u_table), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DTT200U, + .scan = dtt200u_table, + .size = ARRAY_SIZE(dtt200u_table), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DTT200U, } }; diff --git a/drivers/media/rc/keymaps/rc-dvbsky.c b/drivers/media/rc/keymaps/rc-dvbsky.c index c5115a1165d1..d6c0b4c1e20e 100644 --- a/drivers/media/rc/keymaps/rc-dvbsky.c +++ b/drivers/media/rc/keymaps/rc-dvbsky.c @@ -54,10 +54,10 @@ static struct rc_map_table rc5_dvbsky[] = { static struct rc_map_list rc5_dvbsky_map = { .map = { - .scan = rc5_dvbsky, - .size = ARRAY_SIZE(rc5_dvbsky), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_DVBSKY, + .scan = rc5_dvbsky, + .size = ARRAY_SIZE(rc5_dvbsky), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_DVBSKY, } }; diff --git a/drivers/media/rc/keymaps/rc-dvico-mce.c b/drivers/media/rc/keymaps/rc-dvico-mce.c index d1e861f4d095..e4cee190b923 100644 --- a/drivers/media/rc/keymaps/rc-dvico-mce.c +++ b/drivers/media/rc/keymaps/rc-dvico-mce.c @@ -61,10 +61,10 @@ static struct rc_map_table rc_map_dvico_mce_table[] = { static struct rc_map_list dvico_mce_map = { .map = { - .scan = rc_map_dvico_mce_table, - .size = ARRAY_SIZE(rc_map_dvico_mce_table), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DVICO_MCE, + .scan = rc_map_dvico_mce_table, + .size = ARRAY_SIZE(rc_map_dvico_mce_table), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DVICO_MCE, } }; diff --git a/drivers/media/rc/keymaps/rc-dvico-portable.c b/drivers/media/rc/keymaps/rc-dvico-portable.c index ac4cb515cbf1..cdd21f54aa61 100644 --- a/drivers/media/rc/keymaps/rc-dvico-portable.c +++ b/drivers/media/rc/keymaps/rc-dvico-portable.c @@ -52,10 +52,10 @@ static struct rc_map_table rc_map_dvico_portable_table[] = { static struct rc_map_list dvico_portable_map = { .map = { - .scan = rc_map_dvico_portable_table, - .size = ARRAY_SIZE(rc_map_dvico_portable_table), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_DVICO_PORTABLE, + .scan = rc_map_dvico_portable_table, + .size = ARRAY_SIZE(rc_map_dvico_portable_table), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_DVICO_PORTABLE, } }; diff --git a/drivers/media/rc/keymaps/rc-em-terratec.c b/drivers/media/rc/keymaps/rc-em-terratec.c index 7f1e06be175b..18e1a2679c20 100644 --- a/drivers/media/rc/keymaps/rc-em-terratec.c +++ b/drivers/media/rc/keymaps/rc-em-terratec.c @@ -46,10 +46,10 @@ static struct rc_map_table em_terratec[] = { static struct rc_map_list em_terratec_map = { .map = { - .scan = em_terratec, - .size = ARRAY_SIZE(em_terratec), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_EM_TERRATEC, + .scan = em_terratec, + .size = ARRAY_SIZE(em_terratec), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_EM_TERRATEC, } }; diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c index 4fc3904daf06..72ffd5cb0108 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c @@ -58,10 +58,10 @@ static struct rc_map_table encore_enltv_fm53[] = { static struct rc_map_list encore_enltv_fm53_map = { .map = { - .scan = encore_enltv_fm53, - .size = ARRAY_SIZE(encore_enltv_fm53), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_ENCORE_ENLTV_FM53, + .scan = encore_enltv_fm53, + .size = ARRAY_SIZE(encore_enltv_fm53), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_ENCORE_ENLTV_FM53, } }; diff --git a/drivers/media/rc/keymaps/rc-encore-enltv.c b/drivers/media/rc/keymaps/rc-encore-enltv.c index f1914e23d203..e0381e7aa964 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv.c @@ -89,10 +89,10 @@ static struct rc_map_table encore_enltv[] = { static struct rc_map_list encore_enltv_map = { .map = { - .scan = encore_enltv, - .size = ARRAY_SIZE(encore_enltv), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_ENCORE_ENLTV, + .scan = encore_enltv, + .size = ARRAY_SIZE(encore_enltv), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_ENCORE_ENLTV, } }; diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c index 9c6c55240d18..e9b0bfba319c 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv2.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c @@ -67,10 +67,10 @@ static struct rc_map_table encore_enltv2[] = { static struct rc_map_list encore_enltv2_map = { .map = { - .scan = encore_enltv2, - .size = ARRAY_SIZE(encore_enltv2), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_ENCORE_ENLTV2, + .scan = encore_enltv2, + .size = ARRAY_SIZE(encore_enltv2), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_ENCORE_ENLTV2, } }; diff --git a/drivers/media/rc/keymaps/rc-evga-indtube.c b/drivers/media/rc/keymaps/rc-evga-indtube.c index 2370d2a3deb6..b77c5e908668 100644 --- a/drivers/media/rc/keymaps/rc-evga-indtube.c +++ b/drivers/media/rc/keymaps/rc-evga-indtube.c @@ -38,10 +38,10 @@ static struct rc_map_table evga_indtube[] = { static struct rc_map_list evga_indtube_map = { .map = { - .scan = evga_indtube, - .size = ARRAY_SIZE(evga_indtube), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_EVGA_INDTUBE, + .scan = evga_indtube, + .size = ARRAY_SIZE(evga_indtube), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_EVGA_INDTUBE, } }; diff --git a/drivers/media/rc/keymaps/rc-eztv.c b/drivers/media/rc/keymaps/rc-eztv.c index b5c96ed84376..5013b3b2aa93 100644 --- a/drivers/media/rc/keymaps/rc-eztv.c +++ b/drivers/media/rc/keymaps/rc-eztv.c @@ -73,10 +73,10 @@ static struct rc_map_table eztv[] = { static struct rc_map_list eztv_map = { .map = { - .scan = eztv, - .size = ARRAY_SIZE(eztv), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_EZTV, + .scan = eztv, + .size = ARRAY_SIZE(eztv), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_EZTV, } }; diff --git a/drivers/media/rc/keymaps/rc-flydvb.c b/drivers/media/rc/keymaps/rc-flydvb.c index 25cb89fac03c..418b32521273 100644 --- a/drivers/media/rc/keymaps/rc-flydvb.c +++ b/drivers/media/rc/keymaps/rc-flydvb.c @@ -54,10 +54,10 @@ static struct rc_map_table flydvb[] = { static struct rc_map_list flydvb_map = { .map = { - .scan = flydvb, - .size = ARRAY_SIZE(flydvb), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_FLYDVB, + .scan = flydvb, + .size = ARRAY_SIZE(flydvb), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_FLYDVB, } }; diff --git a/drivers/media/rc/keymaps/rc-flyvideo.c b/drivers/media/rc/keymaps/rc-flyvideo.c index e71377dd0534..93fb87ecf061 100644 --- a/drivers/media/rc/keymaps/rc-flyvideo.c +++ b/drivers/media/rc/keymaps/rc-flyvideo.c @@ -47,10 +47,10 @@ static struct rc_map_table flyvideo[] = { static struct rc_map_list flyvideo_map = { .map = { - .scan = flyvideo, - .size = ARRAY_SIZE(flyvideo), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_FLYVIDEO, + .scan = flyvideo, + .size = ARRAY_SIZE(flyvideo), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_FLYVIDEO, } }; diff --git a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c index cf0608dc83d5..9ed3f749262b 100644 --- a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c +++ b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c @@ -75,10 +75,10 @@ static struct rc_map_table fusionhdtv_mce[] = { static struct rc_map_list fusionhdtv_mce_map = { .map = { - .scan = fusionhdtv_mce, - .size = ARRAY_SIZE(fusionhdtv_mce), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_FUSIONHDTV_MCE, + .scan = fusionhdtv_mce, + .size = ARRAY_SIZE(fusionhdtv_mce), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_FUSIONHDTV_MCE, } }; diff --git a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c index 03575bdb2eca..3443b721d092 100644 --- a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c +++ b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c @@ -58,10 +58,10 @@ static struct rc_map_table gadmei_rm008z[] = { static struct rc_map_list gadmei_rm008z_map = { .map = { - .scan = gadmei_rm008z, - .size = ARRAY_SIZE(gadmei_rm008z), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_GADMEI_RM008Z, + .scan = gadmei_rm008z, + .size = ARRAY_SIZE(gadmei_rm008z), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_GADMEI_RM008Z, } }; diff --git a/drivers/media/rc/keymaps/rc-geekbox.c b/drivers/media/rc/keymaps/rc-geekbox.c index affc4c481888..4aa1b54bb52e 100644 --- a/drivers/media/rc/keymaps/rc-geekbox.c +++ b/drivers/media/rc/keymaps/rc-geekbox.c @@ -31,10 +31,10 @@ static struct rc_map_table geekbox[] = { static struct rc_map_list geekbox_map = { .map = { - .scan = geekbox, - .size = ARRAY_SIZE(geekbox), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_GEEKBOX, + .scan = geekbox, + .size = ARRAY_SIZE(geekbox), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_GEEKBOX, } }; diff --git a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c index b2ab13b0dcb1..d140e8d45bcc 100644 --- a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c +++ b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c @@ -61,10 +61,10 @@ static struct rc_map_table genius_tvgo_a11mce[] = { static struct rc_map_list genius_tvgo_a11mce_map = { .map = { - .scan = genius_tvgo_a11mce, - .size = ARRAY_SIZE(genius_tvgo_a11mce), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_GENIUS_TVGO_A11MCE, + .scan = genius_tvgo_a11mce, + .size = ARRAY_SIZE(genius_tvgo_a11mce), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_GENIUS_TVGO_A11MCE, } }; diff --git a/drivers/media/rc/keymaps/rc-gotview7135.c b/drivers/media/rc/keymaps/rc-gotview7135.c index 229a36ac7f0a..51230fbb52ba 100644 --- a/drivers/media/rc/keymaps/rc-gotview7135.c +++ b/drivers/media/rc/keymaps/rc-gotview7135.c @@ -56,10 +56,10 @@ static struct rc_map_table gotview7135[] = { static struct rc_map_list gotview7135_map = { .map = { - .scan = gotview7135, - .size = ARRAY_SIZE(gotview7135), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_GOTVIEW7135, + .scan = gotview7135, + .size = ARRAY_SIZE(gotview7135), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_GOTVIEW7135, } }; diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c index 36d57f7c532b..890164b68d64 100644 --- a/drivers/media/rc/keymaps/rc-hauppauge.c +++ b/drivers/media/rc/keymaps/rc-hauppauge.c @@ -269,10 +269,10 @@ static struct rc_map_table rc5_hauppauge_new[] = { static struct rc_map_list rc5_hauppauge_new_map = { .map = { - .scan = rc5_hauppauge_new, - .size = ARRAY_SIZE(rc5_hauppauge_new), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_HAUPPAUGE, + .scan = rc5_hauppauge_new, + .size = ARRAY_SIZE(rc5_hauppauge_new), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_HAUPPAUGE, } }; diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c index f0da960560b0..6a69ce1451f1 100644 --- a/drivers/media/rc/keymaps/rc-imon-mce.c +++ b/drivers/media/rc/keymaps/rc-imon-mce.c @@ -118,11 +118,11 @@ static struct rc_map_table imon_mce[] = { static struct rc_map_list imon_mce_map = { .map = { - .scan = imon_mce, - .size = ARRAY_SIZE(imon_mce), + .scan = imon_mce, + .size = ARRAY_SIZE(imon_mce), /* its RC6, but w/a hardware decoder */ - .rc_type = RC_TYPE_RC6_MCE, - .name = RC_MAP_IMON_MCE, + .rc_proto = RC_PROTO_RC6_MCE, + .name = RC_MAP_IMON_MCE, } }; diff --git a/drivers/media/rc/keymaps/rc-imon-pad.c b/drivers/media/rc/keymaps/rc-imon-pad.c index 999c6295c70e..a7296ffbf218 100644 --- a/drivers/media/rc/keymaps/rc-imon-pad.c +++ b/drivers/media/rc/keymaps/rc-imon-pad.c @@ -132,11 +132,11 @@ static struct rc_map_table imon_pad[] = { static struct rc_map_list imon_pad_map = { .map = { - .scan = imon_pad, - .size = ARRAY_SIZE(imon_pad), + .scan = imon_pad, + .size = ARRAY_SIZE(imon_pad), /* actual protocol details unknown, hardware decoder */ - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_IMON_PAD, + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_IMON_PAD, } }; diff --git a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c index 9ee154cb0c6b..8cf87a15c4f2 100644 --- a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c +++ b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c @@ -65,10 +65,10 @@ static struct rc_map_table iodata_bctv7e[] = { static struct rc_map_list iodata_bctv7e_map = { .map = { - .scan = iodata_bctv7e, - .size = ARRAY_SIZE(iodata_bctv7e), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_IODATA_BCTV7E, + .scan = iodata_bctv7e, + .size = ARRAY_SIZE(iodata_bctv7e), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_IODATA_BCTV7E, } }; diff --git a/drivers/media/rc/keymaps/rc-it913x-v1.c b/drivers/media/rc/keymaps/rc-it913x-v1.c index 0ac775fd109d..908d14848ae8 100644 --- a/drivers/media/rc/keymaps/rc-it913x-v1.c +++ b/drivers/media/rc/keymaps/rc-it913x-v1.c @@ -71,10 +71,10 @@ static struct rc_map_table it913x_v1_rc[] = { static struct rc_map_list it913x_v1_map = { .map = { - .scan = it913x_v1_rc, - .size = ARRAY_SIZE(it913x_v1_rc), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_IT913X_V1, + .scan = it913x_v1_rc, + .size = ARRAY_SIZE(it913x_v1_rc), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_IT913X_V1, } }; diff --git a/drivers/media/rc/keymaps/rc-it913x-v2.c b/drivers/media/rc/keymaps/rc-it913x-v2.c index bd42a30ec06f..05ab7fa4f90b 100644 --- a/drivers/media/rc/keymaps/rc-it913x-v2.c +++ b/drivers/media/rc/keymaps/rc-it913x-v2.c @@ -70,10 +70,10 @@ static struct rc_map_table it913x_v2_rc[] = { static struct rc_map_list it913x_v2_map = { .map = { - .scan = it913x_v2_rc, - .size = ARRAY_SIZE(it913x_v2_rc), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_IT913X_V2, + .scan = it913x_v2_rc, + .size = ARRAY_SIZE(it913x_v2_rc), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_IT913X_V2, } }; diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c index 60803a732c08..e791f1e1b43b 100644 --- a/drivers/media/rc/keymaps/rc-kaiomy.c +++ b/drivers/media/rc/keymaps/rc-kaiomy.c @@ -64,10 +64,10 @@ static struct rc_map_table kaiomy[] = { static struct rc_map_list kaiomy_map = { .map = { - .scan = kaiomy, - .size = ARRAY_SIZE(kaiomy), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_KAIOMY, + .scan = kaiomy, + .size = ARRAY_SIZE(kaiomy), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_KAIOMY, } }; diff --git a/drivers/media/rc/keymaps/rc-kworld-315u.c b/drivers/media/rc/keymaps/rc-kworld-315u.c index ba087eed1ed9..71dce0138f0e 100644 --- a/drivers/media/rc/keymaps/rc-kworld-315u.c +++ b/drivers/media/rc/keymaps/rc-kworld-315u.c @@ -60,10 +60,10 @@ static struct rc_map_table kworld_315u[] = { static struct rc_map_list kworld_315u_map = { .map = { - .scan = kworld_315u, - .size = ARRAY_SIZE(kworld_315u), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_KWORLD_315U, + .scan = kworld_315u, + .size = ARRAY_SIZE(kworld_315u), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_KWORLD_315U, } }; diff --git a/drivers/media/rc/keymaps/rc-kworld-pc150u.c b/drivers/media/rc/keymaps/rc-kworld-pc150u.c index b92e571f4def..3846059060aa 100644 --- a/drivers/media/rc/keymaps/rc-kworld-pc150u.c +++ b/drivers/media/rc/keymaps/rc-kworld-pc150u.c @@ -78,10 +78,10 @@ static struct rc_map_table kworld_pc150u[] = { static struct rc_map_list kworld_pc150u_map = { .map = { - .scan = kworld_pc150u, - .size = ARRAY_SIZE(kworld_pc150u), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_KWORLD_PC150U, + .scan = kworld_pc150u, + .size = ARRAY_SIZE(kworld_pc150u), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_KWORLD_PC150U, } }; diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c index edc868564f99..e0322ed16c94 100644 --- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c +++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c @@ -76,10 +76,10 @@ static struct rc_map_table kworld_plus_tv_analog[] = { static struct rc_map_list kworld_plus_tv_analog_map = { .map = { - .scan = kworld_plus_tv_analog, - .size = ARRAY_SIZE(kworld_plus_tv_analog), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_KWORLD_PLUS_TV_ANALOG, + .scan = kworld_plus_tv_analog, + .size = ARRAY_SIZE(kworld_plus_tv_analog), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_KWORLD_PLUS_TV_ANALOG, } }; diff --git a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c index 03d762d986ee..e534a5601b6d 100644 --- a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c +++ b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c @@ -76,10 +76,10 @@ static struct rc_map_table leadtek_y04g0051[] = { static struct rc_map_list leadtek_y04g0051_map = { .map = { - .scan = leadtek_y04g0051, - .size = ARRAY_SIZE(leadtek_y04g0051), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_LEADTEK_Y04G0051, + .scan = leadtek_y04g0051, + .size = ARRAY_SIZE(leadtek_y04g0051), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_LEADTEK_Y04G0051, } }; diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c index 2b0027c41332..9c93f90f5c2b 100644 --- a/drivers/media/rc/keymaps/rc-lme2510.c +++ b/drivers/media/rc/keymaps/rc-lme2510.c @@ -87,10 +87,10 @@ static struct rc_map_table lme2510_rc[] = { static struct rc_map_list lme2510_map = { .map = { - .scan = lme2510_rc, - .size = ARRAY_SIZE(lme2510_rc), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_LME2510, + .scan = lme2510_rc, + .size = ARRAY_SIZE(lme2510_rc), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_LME2510, } }; diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c index 92424ef2aaa6..da566902a4dd 100644 --- a/drivers/media/rc/keymaps/rc-manli.c +++ b/drivers/media/rc/keymaps/rc-manli.c @@ -111,10 +111,10 @@ static struct rc_map_table manli[] = { static struct rc_map_list manli_map = { .map = { - .scan = manli, - .size = ARRAY_SIZE(manli), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_MANLI, + .scan = manli, + .size = ARRAY_SIZE(manli), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_MANLI, } }; diff --git a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c index 966f9b3c71da..c9973340e546 100644 --- a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c +++ b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c @@ -98,10 +98,10 @@ static struct rc_map_table medion_x10_digitainer[] = { static struct rc_map_list medion_x10_digitainer_map = { .map = { - .scan = medion_x10_digitainer, - .size = ARRAY_SIZE(medion_x10_digitainer), - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_MEDION_X10_DIGITAINER, + .scan = medion_x10_digitainer, + .size = ARRAY_SIZE(medion_x10_digitainer), + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_MEDION_X10_DIGITAINER, } }; diff --git a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c index b077300ecb5c..103ad88d242c 100644 --- a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c +++ b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c @@ -83,10 +83,10 @@ static struct rc_map_table medion_x10_or2x[] = { static struct rc_map_list medion_x10_or2x_map = { .map = { - .scan = medion_x10_or2x, - .size = ARRAY_SIZE(medion_x10_or2x), - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_MEDION_X10_OR2X, + .scan = medion_x10_or2x, + .size = ARRAY_SIZE(medion_x10_or2x), + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_MEDION_X10_OR2X, } }; diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c index 479cdb897810..bbffa5dfe420 100644 --- a/drivers/media/rc/keymaps/rc-medion-x10.c +++ b/drivers/media/rc/keymaps/rc-medion-x10.c @@ -93,10 +93,10 @@ static struct rc_map_table medion_x10[] = { static struct rc_map_list medion_x10_map = { .map = { - .scan = medion_x10, - .size = ARRAY_SIZE(medion_x10), - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_MEDION_X10, + .scan = medion_x10, + .size = ARRAY_SIZE(medion_x10), + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_MEDION_X10, } }; diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c index 2fa71d0d72d7..94aa12d4b73c 100644 --- a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c +++ b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c @@ -44,10 +44,10 @@ static struct rc_map_table msi_digivox_ii[] = { static struct rc_map_list msi_digivox_ii_map = { .map = { - .scan = msi_digivox_ii, - .size = ARRAY_SIZE(msi_digivox_ii), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_MSI_DIGIVOX_II, + .scan = msi_digivox_ii, + .size = ARRAY_SIZE(msi_digivox_ii), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_MSI_DIGIVOX_II, } }; diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c index 303a0b73175b..8fec0c1dcb12 100644 --- a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c +++ b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c @@ -62,10 +62,10 @@ static struct rc_map_table msi_digivox_iii[] = { static struct rc_map_list msi_digivox_iii_map = { .map = { - .scan = msi_digivox_iii, - .size = ARRAY_SIZE(msi_digivox_iii), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_MSI_DIGIVOX_III, + .scan = msi_digivox_iii, + .size = ARRAY_SIZE(msi_digivox_iii), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_MSI_DIGIVOX_III, } }; diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c index fd7a55c56167..dfa0ed1d7667 100644 --- a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c +++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c @@ -100,10 +100,10 @@ static struct rc_map_table msi_tvanywhere_plus[] = { static struct rc_map_list msi_tvanywhere_plus_map = { .map = { - .scan = msi_tvanywhere_plus, - .size = ARRAY_SIZE(msi_tvanywhere_plus), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_MSI_TVANYWHERE_PLUS, + .scan = msi_tvanywhere_plus, + .size = ARRAY_SIZE(msi_tvanywhere_plus), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_MSI_TVANYWHERE_PLUS, } }; diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c index 4233a8d4d63e..2111816a3f59 100644 --- a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c +++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c @@ -46,10 +46,10 @@ static struct rc_map_table msi_tvanywhere[] = { static struct rc_map_list msi_tvanywhere_map = { .map = { - .scan = msi_tvanywhere, - .size = ARRAY_SIZE(msi_tvanywhere), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_MSI_TVANYWHERE, + .scan = msi_tvanywhere, + .size = ARRAY_SIZE(msi_tvanywhere), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_MSI_TVANYWHERE, } }; diff --git a/drivers/media/rc/keymaps/rc-nebula.c b/drivers/media/rc/keymaps/rc-nebula.c index 4c50f33c7c41..109b6e1a8b1a 100644 --- a/drivers/media/rc/keymaps/rc-nebula.c +++ b/drivers/media/rc/keymaps/rc-nebula.c @@ -73,10 +73,10 @@ static struct rc_map_table nebula[] = { static struct rc_map_list nebula_map = { .map = { - .scan = nebula, - .size = ARRAY_SIZE(nebula), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_NEBULA, + .scan = nebula, + .size = ARRAY_SIZE(nebula), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_NEBULA, } }; diff --git a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c index 292bbad35d21..bb2d3a2962c0 100644 --- a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c +++ b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c @@ -134,10 +134,10 @@ static struct rc_map_table nec_terratec_cinergy_xs[] = { static struct rc_map_list nec_terratec_cinergy_xs_map = { .map = { - .scan = nec_terratec_cinergy_xs, - .size = ARRAY_SIZE(nec_terratec_cinergy_xs), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_NEC_TERRATEC_CINERGY_XS, + .scan = nec_terratec_cinergy_xs, + .size = ARRAY_SIZE(nec_terratec_cinergy_xs), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_NEC_TERRATEC_CINERGY_XS, } }; diff --git a/drivers/media/rc/keymaps/rc-norwood.c b/drivers/media/rc/keymaps/rc-norwood.c index ca1b82a2c54f..cd25df336749 100644 --- a/drivers/media/rc/keymaps/rc-norwood.c +++ b/drivers/media/rc/keymaps/rc-norwood.c @@ -62,10 +62,10 @@ static struct rc_map_table norwood[] = { static struct rc_map_list norwood_map = { .map = { - .scan = norwood, - .size = ARRAY_SIZE(norwood), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_NORWOOD, + .scan = norwood, + .size = ARRAY_SIZE(norwood), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_NORWOOD, } }; diff --git a/drivers/media/rc/keymaps/rc-npgtech.c b/drivers/media/rc/keymaps/rc-npgtech.c index 1fb946024512..140bbc20a764 100644 --- a/drivers/media/rc/keymaps/rc-npgtech.c +++ b/drivers/media/rc/keymaps/rc-npgtech.c @@ -57,10 +57,10 @@ static struct rc_map_table npgtech[] = { static struct rc_map_list npgtech_map = { .map = { - .scan = npgtech, - .size = ARRAY_SIZE(npgtech), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_NPGTECH, + .scan = npgtech, + .size = ARRAY_SIZE(npgtech), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_NPGTECH, } }; diff --git a/drivers/media/rc/keymaps/rc-pctv-sedna.c b/drivers/media/rc/keymaps/rc-pctv-sedna.c index 5ef01ab3fd50..52b4558b7bd0 100644 --- a/drivers/media/rc/keymaps/rc-pctv-sedna.c +++ b/drivers/media/rc/keymaps/rc-pctv-sedna.c @@ -57,10 +57,10 @@ static struct rc_map_table pctv_sedna[] = { static struct rc_map_list pctv_sedna_map = { .map = { - .scan = pctv_sedna, - .size = ARRAY_SIZE(pctv_sedna), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PCTV_SEDNA, + .scan = pctv_sedna, + .size = ARRAY_SIZE(pctv_sedna), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PCTV_SEDNA, } }; diff --git a/drivers/media/rc/keymaps/rc-pinnacle-color.c b/drivers/media/rc/keymaps/rc-pinnacle-color.c index a218b471a4ca..973c9c34e304 100644 --- a/drivers/media/rc/keymaps/rc-pinnacle-color.c +++ b/drivers/media/rc/keymaps/rc-pinnacle-color.c @@ -71,10 +71,10 @@ static struct rc_map_table pinnacle_color[] = { static struct rc_map_list pinnacle_color_map = { .map = { - .scan = pinnacle_color, - .size = ARRAY_SIZE(pinnacle_color), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PINNACLE_COLOR, + .scan = pinnacle_color, + .size = ARRAY_SIZE(pinnacle_color), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PINNACLE_COLOR, } }; diff --git a/drivers/media/rc/keymaps/rc-pinnacle-grey.c b/drivers/media/rc/keymaps/rc-pinnacle-grey.c index 4a3f467a47a2..22e44b0d2a93 100644 --- a/drivers/media/rc/keymaps/rc-pinnacle-grey.c +++ b/drivers/media/rc/keymaps/rc-pinnacle-grey.c @@ -66,10 +66,10 @@ static struct rc_map_table pinnacle_grey[] = { static struct rc_map_list pinnacle_grey_map = { .map = { - .scan = pinnacle_grey, - .size = ARRAY_SIZE(pinnacle_grey), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PINNACLE_GREY, + .scan = pinnacle_grey, + .size = ARRAY_SIZE(pinnacle_grey), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PINNACLE_GREY, } }; diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c index e89cc10b68bf..186dcf8e0491 100644 --- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c +++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c @@ -47,10 +47,10 @@ static struct rc_map_table pinnacle_pctv_hd[] = { static struct rc_map_list pinnacle_pctv_hd_map = { .map = { - .scan = pinnacle_pctv_hd, - .size = ARRAY_SIZE(pinnacle_pctv_hd), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_PINNACLE_PCTV_HD, + .scan = pinnacle_pctv_hd, + .size = ARRAY_SIZE(pinnacle_pctv_hd), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_PINNACLE_PCTV_HD, } }; diff --git a/drivers/media/rc/keymaps/rc-pixelview-002t.c b/drivers/media/rc/keymaps/rc-pixelview-002t.c index d967c3816fdc..b235ada2e28f 100644 --- a/drivers/media/rc/keymaps/rc-pixelview-002t.c +++ b/drivers/media/rc/keymaps/rc-pixelview-002t.c @@ -54,10 +54,10 @@ static struct rc_map_table pixelview_002t[] = { static struct rc_map_list pixelview_map = { .map = { - .scan = pixelview_002t, - .size = ARRAY_SIZE(pixelview_002t), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_PIXELVIEW_002T, + .scan = pixelview_002t, + .size = ARRAY_SIZE(pixelview_002t), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_PIXELVIEW_002T, } }; diff --git a/drivers/media/rc/keymaps/rc-pixelview-mk12.c b/drivers/media/rc/keymaps/rc-pixelview-mk12.c index 224d0efaa6e5..453d52d663fe 100644 --- a/drivers/media/rc/keymaps/rc-pixelview-mk12.c +++ b/drivers/media/rc/keymaps/rc-pixelview-mk12.c @@ -60,10 +60,10 @@ static struct rc_map_table pixelview_mk12[] = { static struct rc_map_list pixelview_map = { .map = { - .scan = pixelview_mk12, - .size = ARRAY_SIZE(pixelview_mk12), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_PIXELVIEW_MK12, + .scan = pixelview_mk12, + .size = ARRAY_SIZE(pixelview_mk12), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_PIXELVIEW_MK12, } }; diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c index 781d788d6b6d..ef97095ec8f1 100644 --- a/drivers/media/rc/keymaps/rc-pixelview-new.c +++ b/drivers/media/rc/keymaps/rc-pixelview-new.c @@ -60,10 +60,10 @@ static struct rc_map_table pixelview_new[] = { static struct rc_map_list pixelview_new_map = { .map = { - .scan = pixelview_new, - .size = ARRAY_SIZE(pixelview_new), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PIXELVIEW_NEW, + .scan = pixelview_new, + .size = ARRAY_SIZE(pixelview_new), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PIXELVIEW_NEW, } }; diff --git a/drivers/media/rc/keymaps/rc-pixelview.c b/drivers/media/rc/keymaps/rc-pixelview.c index 39e6feaa35a3..cfd8f80d3617 100644 --- a/drivers/media/rc/keymaps/rc-pixelview.c +++ b/drivers/media/rc/keymaps/rc-pixelview.c @@ -59,10 +59,10 @@ static struct rc_map_table pixelview[] = { static struct rc_map_list pixelview_map = { .map = { - .scan = pixelview, - .size = ARRAY_SIZE(pixelview), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PIXELVIEW, + .scan = pixelview, + .size = ARRAY_SIZE(pixelview), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PIXELVIEW, } }; diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c index e96fa3ab9f4b..b63f82bcf29a 100644 --- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c +++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c @@ -58,10 +58,10 @@ static struct rc_map_table powercolor_real_angel[] = { static struct rc_map_list powercolor_real_angel_map = { .map = { - .scan = powercolor_real_angel, - .size = ARRAY_SIZE(powercolor_real_angel), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_POWERCOLOR_REAL_ANGEL, + .scan = powercolor_real_angel, + .size = ARRAY_SIZE(powercolor_real_angel), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_POWERCOLOR_REAL_ANGEL, } }; diff --git a/drivers/media/rc/keymaps/rc-proteus-2309.c b/drivers/media/rc/keymaps/rc-proteus-2309.c index eef626ee02df..be34c517e4e1 100644 --- a/drivers/media/rc/keymaps/rc-proteus-2309.c +++ b/drivers/media/rc/keymaps/rc-proteus-2309.c @@ -46,10 +46,10 @@ static struct rc_map_table proteus_2309[] = { static struct rc_map_list proteus_2309_map = { .map = { - .scan = proteus_2309, - .size = ARRAY_SIZE(proteus_2309), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PROTEUS_2309, + .scan = proteus_2309, + .size = ARRAY_SIZE(proteus_2309), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PROTEUS_2309, } }; diff --git a/drivers/media/rc/keymaps/rc-purpletv.c b/drivers/media/rc/keymaps/rc-purpletv.c index cec6fe466829..84c40b97ee00 100644 --- a/drivers/media/rc/keymaps/rc-purpletv.c +++ b/drivers/media/rc/keymaps/rc-purpletv.c @@ -58,10 +58,10 @@ static struct rc_map_table purpletv[] = { static struct rc_map_list purpletv_map = { .map = { - .scan = purpletv, - .size = ARRAY_SIZE(purpletv), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PURPLETV, + .scan = purpletv, + .size = ARRAY_SIZE(purpletv), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PURPLETV, } }; diff --git a/drivers/media/rc/keymaps/rc-pv951.c b/drivers/media/rc/keymaps/rc-pv951.c index 5ac89ce8c053..be190ddebfc4 100644 --- a/drivers/media/rc/keymaps/rc-pv951.c +++ b/drivers/media/rc/keymaps/rc-pv951.c @@ -55,10 +55,10 @@ static struct rc_map_table pv951[] = { static struct rc_map_list pv951_map = { .map = { - .scan = pv951, - .size = ARRAY_SIZE(pv951), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_PV951, + .scan = pv951, + .size = ARRAY_SIZE(pv951), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_PV951, } }; diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c index 5be567506bcd..0d87b20a0c43 100644 --- a/drivers/media/rc/keymaps/rc-rc6-mce.c +++ b/drivers/media/rc/keymaps/rc-rc6-mce.c @@ -96,10 +96,10 @@ static struct rc_map_table rc6_mce[] = { static struct rc_map_list rc6_mce_map = { .map = { - .scan = rc6_mce, - .size = ARRAY_SIZE(rc6_mce), - .rc_type = RC_TYPE_RC6_MCE, - .name = RC_MAP_RC6_MCE, + .scan = rc6_mce, + .size = ARRAY_SIZE(rc6_mce), + .rc_proto = RC_PROTO_RC6_MCE, + .name = RC_MAP_RC6_MCE, } }; diff --git a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c index 9f778bd091db..957fa21747ea 100644 --- a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c +++ b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c @@ -55,10 +55,10 @@ static struct rc_map_table real_audio_220_32_keys[] = { static struct rc_map_list real_audio_220_32_keys_map = { .map = { - .scan = real_audio_220_32_keys, - .size = ARRAY_SIZE(real_audio_220_32_keys), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_REAL_AUDIO_220_32_KEYS, + .scan = real_audio_220_32_keys, + .size = ARRAY_SIZE(real_audio_220_32_keys), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_REAL_AUDIO_220_32_KEYS, } }; diff --git a/drivers/media/rc/keymaps/rc-reddo.c b/drivers/media/rc/keymaps/rc-reddo.c index b80b336e9284..3b37acc7b144 100644 --- a/drivers/media/rc/keymaps/rc-reddo.c +++ b/drivers/media/rc/keymaps/rc-reddo.c @@ -62,10 +62,10 @@ static struct rc_map_table reddo[] = { static struct rc_map_list reddo_map = { .map = { - .scan = reddo, - .size = ARRAY_SIZE(reddo), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_REDDO, + .scan = reddo, + .size = ARRAY_SIZE(reddo), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_REDDO, } }; diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c index c7f33ec719b4..30630a6f76ac 100644 --- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c +++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c @@ -83,10 +83,10 @@ static struct rc_map_table snapstream_firefly[] = { static struct rc_map_list snapstream_firefly_map = { .map = { - .scan = snapstream_firefly, - .size = ARRAY_SIZE(snapstream_firefly), - .rc_type = RC_TYPE_OTHER, - .name = RC_MAP_SNAPSTREAM_FIREFLY, + .scan = snapstream_firefly, + .size = ARRAY_SIZE(snapstream_firefly), + .rc_proto = RC_PROTO_OTHER, + .name = RC_MAP_SNAPSTREAM_FIREFLY, } }; diff --git a/drivers/media/rc/keymaps/rc-streamzap.c b/drivers/media/rc/keymaps/rc-streamzap.c index 23c061174ed7..b53bca9e4576 100644 --- a/drivers/media/rc/keymaps/rc-streamzap.c +++ b/drivers/media/rc/keymaps/rc-streamzap.c @@ -57,10 +57,10 @@ static struct rc_map_table streamzap[] = { static struct rc_map_list streamzap_map = { .map = { - .scan = streamzap, - .size = ARRAY_SIZE(streamzap), - .rc_type = RC_TYPE_RC5_SZ, - .name = RC_MAP_STREAMZAP, + .scan = streamzap, + .size = ARRAY_SIZE(streamzap), + .rc_proto = RC_PROTO_RC5_SZ, + .name = RC_MAP_STREAMZAP, } }; diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c index 8dbd3e9bc951..d9af7e3c55d9 100644 --- a/drivers/media/rc/keymaps/rc-su3000.c +++ b/drivers/media/rc/keymaps/rc-su3000.c @@ -51,10 +51,10 @@ static struct rc_map_table su3000[] = { static struct rc_map_list su3000_map = { .map = { - .scan = su3000, - .size = ARRAY_SIZE(su3000), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_SU3000, + .scan = su3000, + .size = ARRAY_SIZE(su3000), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_SU3000, } }; diff --git a/drivers/media/rc/keymaps/rc-tbs-nec.c b/drivers/media/rc/keymaps/rc-tbs-nec.c index 24ce2a252502..05facc043272 100644 --- a/drivers/media/rc/keymaps/rc-tbs-nec.c +++ b/drivers/media/rc/keymaps/rc-tbs-nec.c @@ -52,10 +52,10 @@ static struct rc_map_table tbs_nec[] = { static struct rc_map_list tbs_nec_map = { .map = { - .scan = tbs_nec, - .size = ARRAY_SIZE(tbs_nec), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TBS_NEC, + .scan = tbs_nec, + .size = ARRAY_SIZE(tbs_nec), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TBS_NEC, } }; diff --git a/drivers/media/rc/keymaps/rc-technisat-ts35.c b/drivers/media/rc/keymaps/rc-technisat-ts35.c index 3328cbefabad..dff7021734ba 100644 --- a/drivers/media/rc/keymaps/rc-technisat-ts35.c +++ b/drivers/media/rc/keymaps/rc-technisat-ts35.c @@ -53,10 +53,10 @@ static struct rc_map_table technisat_ts35[] = { static struct rc_map_list technisat_ts35_map = { .map = { - .scan = technisat_ts35, - .size = ARRAY_SIZE(technisat_ts35), - .rc_type = RC_TYPE_UNKNOWN, - .name = RC_MAP_TECHNISAT_TS35, + .scan = technisat_ts35, + .size = ARRAY_SIZE(technisat_ts35), + .rc_proto = RC_PROTO_UNKNOWN, + .name = RC_MAP_TECHNISAT_TS35, } }; diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c index 02c9c243c060..58b3baf5ee96 100644 --- a/drivers/media/rc/keymaps/rc-technisat-usb2.c +++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c @@ -66,10 +66,10 @@ static struct rc_map_table technisat_usb2[] = { static struct rc_map_list technisat_usb2_map = { .map = { - .scan = technisat_usb2, - .size = ARRAY_SIZE(technisat_usb2), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_TECHNISAT_USB2, + .scan = technisat_usb2, + .size = ARRAY_SIZE(technisat_usb2), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_TECHNISAT_USB2, } }; diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c index 7958f458527a..7ae88ccf1def 100644 --- a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c +++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c @@ -65,10 +65,10 @@ static struct rc_map_table terratec_cinergy_c_pci[] = { static struct rc_map_list terratec_cinergy_c_pci_map = { .map = { - .scan = terratec_cinergy_c_pci, - .size = ARRAY_SIZE(terratec_cinergy_c_pci), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TERRATEC_CINERGY_C_PCI, + .scan = terratec_cinergy_c_pci, + .size = ARRAY_SIZE(terratec_cinergy_c_pci), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TERRATEC_CINERGY_C_PCI, } }; diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c index 1e096bbda4a0..bf0171b05ac2 100644 --- a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c +++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c @@ -63,10 +63,10 @@ static struct rc_map_table terratec_cinergy_s2_hd[] = { static struct rc_map_list terratec_cinergy_s2_hd_map = { .map = { - .scan = terratec_cinergy_s2_hd, - .size = ARRAY_SIZE(terratec_cinergy_s2_hd), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TERRATEC_CINERGY_S2_HD, + .scan = terratec_cinergy_s2_hd, + .size = ARRAY_SIZE(terratec_cinergy_s2_hd), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TERRATEC_CINERGY_S2_HD, } }; diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c index 97eb83ab5a35..3d0f6f7e5bea 100644 --- a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c +++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c @@ -69,10 +69,10 @@ static struct rc_map_table terratec_cinergy_xs[] = { static struct rc_map_list terratec_cinergy_xs_map = { .map = { - .scan = terratec_cinergy_xs, - .size = ARRAY_SIZE(terratec_cinergy_xs), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TERRATEC_CINERGY_XS, + .scan = terratec_cinergy_xs, + .size = ARRAY_SIZE(terratec_cinergy_xs), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TERRATEC_CINERGY_XS, } }; diff --git a/drivers/media/rc/keymaps/rc-terratec-slim-2.c b/drivers/media/rc/keymaps/rc-terratec-slim-2.c index 4c149ef712dc..df57e0a45820 100644 --- a/drivers/media/rc/keymaps/rc-terratec-slim-2.c +++ b/drivers/media/rc/keymaps/rc-terratec-slim-2.c @@ -49,10 +49,10 @@ static struct rc_map_table terratec_slim_2[] = { static struct rc_map_list terratec_slim_2_map = { .map = { - .scan = terratec_slim_2, - .size = ARRAY_SIZE(terratec_slim_2), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_TERRATEC_SLIM_2, + .scan = terratec_slim_2, + .size = ARRAY_SIZE(terratec_slim_2), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_TERRATEC_SLIM_2, } }; diff --git a/drivers/media/rc/keymaps/rc-terratec-slim.c b/drivers/media/rc/keymaps/rc-terratec-slim.c index 3d8a19cdb5a2..628272c58d65 100644 --- a/drivers/media/rc/keymaps/rc-terratec-slim.c +++ b/drivers/media/rc/keymaps/rc-terratec-slim.c @@ -56,10 +56,10 @@ static struct rc_map_table terratec_slim[] = { static struct rc_map_list terratec_slim_map = { .map = { - .scan = terratec_slim, - .size = ARRAY_SIZE(terratec_slim), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_TERRATEC_SLIM, + .scan = terratec_slim, + .size = ARRAY_SIZE(terratec_slim), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_TERRATEC_SLIM, } }; diff --git a/drivers/media/rc/keymaps/rc-tevii-nec.c b/drivers/media/rc/keymaps/rc-tevii-nec.c index 38e0c0875596..31f8a0fd1f2c 100644 --- a/drivers/media/rc/keymaps/rc-tevii-nec.c +++ b/drivers/media/rc/keymaps/rc-tevii-nec.c @@ -65,10 +65,10 @@ static struct rc_map_table tevii_nec[] = { static struct rc_map_list tevii_nec_map = { .map = { - .scan = tevii_nec, - .size = ARRAY_SIZE(tevii_nec), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TEVII_NEC, + .scan = tevii_nec, + .size = ARRAY_SIZE(tevii_nec), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TEVII_NEC, } }; diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c index 5cc1b456e329..1962e33c8f4e 100644 --- a/drivers/media/rc/keymaps/rc-tivo.c +++ b/drivers/media/rc/keymaps/rc-tivo.c @@ -75,10 +75,10 @@ static struct rc_map_table tivo[] = { static struct rc_map_list tivo_map = { .map = { - .scan = tivo, - .size = ARRAY_SIZE(tivo), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_TIVO, + .scan = tivo, + .size = ARRAY_SIZE(tivo), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_TIVO, } }; diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c index 47270f72ebf0..eeeca142f7b1 100644 --- a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c +++ b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c @@ -62,10 +62,10 @@ static struct rc_map_table total_media_in_hand_02[] = { static struct rc_map_list total_media_in_hand_02_map = { .map = { - .scan = total_media_in_hand_02, - .size = ARRAY_SIZE(total_media_in_hand_02), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_TOTAL_MEDIA_IN_HAND_02, + .scan = total_media_in_hand_02, + .size = ARRAY_SIZE(total_media_in_hand_02), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_TOTAL_MEDIA_IN_HAND_02, } }; diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand.c b/drivers/media/rc/keymaps/rc-total-media-in-hand.c index 5b9f9ec13680..bc73bee309d8 100644 --- a/drivers/media/rc/keymaps/rc-total-media-in-hand.c +++ b/drivers/media/rc/keymaps/rc-total-media-in-hand.c @@ -62,10 +62,10 @@ static struct rc_map_table total_media_in_hand[] = { static struct rc_map_list total_media_in_hand_map = { .map = { - .scan = total_media_in_hand, - .size = ARRAY_SIZE(total_media_in_hand), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_TOTAL_MEDIA_IN_HAND, + .scan = total_media_in_hand, + .size = ARRAY_SIZE(total_media_in_hand), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_TOTAL_MEDIA_IN_HAND, } }; diff --git a/drivers/media/rc/keymaps/rc-trekstor.c b/drivers/media/rc/keymaps/rc-trekstor.c index f9a2e0fabb9f..63f966219342 100644 --- a/drivers/media/rc/keymaps/rc-trekstor.c +++ b/drivers/media/rc/keymaps/rc-trekstor.c @@ -57,10 +57,10 @@ static struct rc_map_table trekstor[] = { static struct rc_map_list trekstor_map = { .map = { - .scan = trekstor, - .size = ARRAY_SIZE(trekstor), - .rc_type = RC_TYPE_NEC, - .name = RC_MAP_TREKSTOR, + .scan = trekstor, + .size = ARRAY_SIZE(trekstor), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_TREKSTOR, } }; diff --git a/drivers/media/rc/keymaps/rc-tt-1500.c b/drivers/media/rc/keymaps/rc-tt-1500.c index c766d3b2b6b0..374c230705d2 100644 --- a/drivers/media/rc/keymaps/rc-tt-1500.c +++ b/drivers/media/rc/keymaps/rc-tt-1500.c @@ -59,10 +59,10 @@ static struct rc_map_table tt_1500[] = { static struct rc_map_list tt_1500_map = { .map = { - .scan = tt_1500, - .size = ARRAY_SIZE(tt_1500), - .rc_type = RC_TYPE_RC5, - .name = RC_MAP_TT_1500, + .scan = tt_1500, + .size = ARRAY_SIZE(tt_1500), + .rc_proto = RC_PROTO_RC5, + .name = RC_MAP_TT_1500, } }; diff --git a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c index 202500cb3061..240d720d440c 100644 --- a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c +++ b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c @@ -75,10 +75,10 @@ static struct rc_map_table twinhan_dtv_cab_ci[] = { static struct rc_map_list twinhan_dtv_cab_ci_map = { .map = { - .scan = twinhan_dtv_cab_ci, - .size = ARRAY_SIZE(twinhan_dtv_cab_ci), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TWINHAN_DTV_CAB_CI, + .scan = twinhan_dtv_cab_ci, + .size = ARRAY_SIZE(twinhan_dtv_cab_ci), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TWINHAN_DTV_CAB_CI, } }; diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c index 509299b90c90..2275b37c61d2 100644 --- a/drivers/media/rc/keymaps/rc-twinhan1027.c +++ b/drivers/media/rc/keymaps/rc-twinhan1027.c @@ -64,10 +64,10 @@ static struct rc_map_table twinhan_vp1027[] = { static struct rc_map_list twinhan_vp1027_map = { .map = { - .scan = twinhan_vp1027, - .size = ARRAY_SIZE(twinhan_vp1027), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_TWINHAN_VP1027_DVBS, + .scan = twinhan_vp1027, + .size = ARRAY_SIZE(twinhan_vp1027), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_TWINHAN_VP1027_DVBS, } }; diff --git a/drivers/media/rc/keymaps/rc-videomate-m1f.c b/drivers/media/rc/keymaps/rc-videomate-m1f.c index 23ee05e53949..fe02e047bd01 100644 --- a/drivers/media/rc/keymaps/rc-videomate-m1f.c +++ b/drivers/media/rc/keymaps/rc-videomate-m1f.c @@ -69,10 +69,10 @@ static struct rc_map_table videomate_k100[] = { static struct rc_map_list videomate_k100_map = { .map = { - .scan = videomate_k100, - .size = ARRAY_SIZE(videomate_k100), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_VIDEOMATE_K100, + .scan = videomate_k100, + .size = ARRAY_SIZE(videomate_k100), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_VIDEOMATE_K100, } }; diff --git a/drivers/media/rc/keymaps/rc-videomate-s350.c b/drivers/media/rc/keymaps/rc-videomate-s350.c index 8a354775a2d8..b4f103269872 100644 --- a/drivers/media/rc/keymaps/rc-videomate-s350.c +++ b/drivers/media/rc/keymaps/rc-videomate-s350.c @@ -62,10 +62,10 @@ static struct rc_map_table videomate_s350[] = { static struct rc_map_list videomate_s350_map = { .map = { - .scan = videomate_s350, - .size = ARRAY_SIZE(videomate_s350), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_VIDEOMATE_S350, + .scan = videomate_s350, + .size = ARRAY_SIZE(videomate_s350), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_VIDEOMATE_S350, } }; diff --git a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c index eb0cda7766c4..c431fdf44057 100644 --- a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c +++ b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c @@ -64,10 +64,10 @@ static struct rc_map_table videomate_tv_pvr[] = { static struct rc_map_list videomate_tv_pvr_map = { .map = { - .scan = videomate_tv_pvr, - .size = ARRAY_SIZE(videomate_tv_pvr), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_VIDEOMATE_TV_PVR, + .scan = videomate_tv_pvr, + .size = ARRAY_SIZE(videomate_tv_pvr), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_VIDEOMATE_TV_PVR, } }; diff --git a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c index c1dd598e828e..5a437e61bd5d 100644 --- a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c +++ b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c @@ -59,10 +59,10 @@ static struct rc_map_table winfast_usbii_deluxe[] = { static struct rc_map_list winfast_usbii_deluxe_map = { .map = { - .scan = winfast_usbii_deluxe, - .size = ARRAY_SIZE(winfast_usbii_deluxe), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_WINFAST_USBII_DELUXE, + .scan = winfast_usbii_deluxe, + .size = ARRAY_SIZE(winfast_usbii_deluxe), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_WINFAST_USBII_DELUXE, } }; diff --git a/drivers/media/rc/keymaps/rc-winfast.c b/drivers/media/rc/keymaps/rc-winfast.c index 8a779da1e973..53685d1f9a47 100644 --- a/drivers/media/rc/keymaps/rc-winfast.c +++ b/drivers/media/rc/keymaps/rc-winfast.c @@ -79,10 +79,10 @@ static struct rc_map_table winfast[] = { static struct rc_map_list winfast_map = { .map = { - .scan = winfast, - .size = ARRAY_SIZE(winfast), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_WINFAST, + .scan = winfast, + .size = ARRAY_SIZE(winfast), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_WINFAST, } }; diff --git a/drivers/media/rc/keymaps/rc-zx-irdec.c b/drivers/media/rc/keymaps/rc-zx-irdec.c new file mode 100644 index 000000000000..5bf3ab002afc --- /dev/null +++ b/drivers/media/rc/keymaps/rc-zx-irdec.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2017 Sanechips Technology Co., Ltd. + * Copyright 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +static struct rc_map_table zx_irdec_table[] = { + { 0x01, KEY_1 }, + { 0x02, KEY_2 }, + { 0x03, KEY_3 }, + { 0x04, KEY_4 }, + { 0x05, KEY_5 }, + { 0x06, KEY_6 }, + { 0x07, KEY_7 }, + { 0x08, KEY_8 }, + { 0x09, KEY_9 }, + { 0x31, KEY_0 }, + { 0x16, KEY_DELETE }, + { 0x0a, KEY_MODE }, /* Input method */ + { 0x0c, KEY_VOLUMEUP }, + { 0x18, KEY_VOLUMEDOWN }, + { 0x0b, KEY_CHANNELUP }, + { 0x15, KEY_CHANNELDOWN }, + { 0x0d, KEY_PAGEUP }, + { 0x13, KEY_PAGEDOWN }, + { 0x46, KEY_FASTFORWARD }, + { 0x43, KEY_REWIND }, + { 0x44, KEY_PLAYPAUSE }, + { 0x45, KEY_STOP }, + { 0x49, KEY_OK }, + { 0x47, KEY_UP }, + { 0x4b, KEY_DOWN }, + { 0x48, KEY_LEFT }, + { 0x4a, KEY_RIGHT }, + { 0x4d, KEY_MENU }, + { 0x56, KEY_APPSELECT }, /* Application */ + { 0x4c, KEY_BACK }, + { 0x1e, KEY_INFO }, + { 0x4e, KEY_F1 }, + { 0x4f, KEY_F2 }, + { 0x50, KEY_F3 }, + { 0x51, KEY_F4 }, + { 0x1c, KEY_AUDIO }, + { 0x12, KEY_MUTE }, + { 0x11, KEY_DOT }, /* Location */ + { 0x1d, KEY_SETUP }, + { 0x40, KEY_POWER }, +}; + +static struct rc_map_list zx_irdec_map = { + .map = { + .scan = zx_irdec_table, + .size = ARRAY_SIZE(zx_irdec_table), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_ZX_IRDEC, + } +}; + +static int __init init_rc_map_zx_irdec(void) +{ + return rc_map_register(&zx_irdec_map); +} + +static void __exit exit_rc_map_zx_irdec(void) +{ + rc_map_unregister(&zx_irdec_map); +} + +module_init(init_rc_map_zx_irdec) +module_exit(exit_rc_map_zx_irdec) + +MODULE_AUTHOR("Shawn Guo "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index db1e7b70c998..9080e39ea391 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -59,6 +59,8 @@ static void lirc_release(struct device *ld) { struct irctl *ir = container_of(ld, struct irctl, dev); + put_device(ir->dev.parent); + if (ir->buf_internal) { lirc_buffer_free(ir->buf); kfree(ir->buf); @@ -218,6 +220,8 @@ int lirc_register_driver(struct lirc_driver *d) mutex_unlock(&lirc_dev_lock); + get_device(ir->dev.parent); + dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n", ir->d.name, ir->d.minor); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index eb130694bbb8..bf7aaff3aa37 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -538,12 +538,12 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd) return datasize; } -static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, - int buf_len, int offset, int len, bool out) +static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, + int offset, int len, bool out) { #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) char *inout; - u8 cmd, subcmd, data1, data2, data3, data4; + u8 cmd, subcmd, *data; struct device *dev = ir->dev; int start, skip = 0; u32 carrier, period; @@ -564,17 +564,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, start = offset + skip; cmd = buf[start] & 0xff; subcmd = buf[start + 1] & 0xff; - data1 = buf[start + 2] & 0xff; - data2 = buf[start + 3] & 0xff; - data3 = buf[start + 4] & 0xff; - data4 = buf[start + 5] & 0xff; + data = buf + start + 2; switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_NULL) break; if ((subcmd == MCE_CMD_PORT_SYS) && - (data1 == MCE_CMD_RESUME)) + (data[0] == MCE_CMD_RESUME)) dev_dbg(dev, "Device resume requested"); else dev_dbg(dev, "Unknown command 0x%02x 0x%02x", @@ -585,7 +582,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, case MCE_RSP_EQEMVER: if (!out) dev_dbg(dev, "Emulator interface version %x", - data1); + data[0]); break; case MCE_CMD_G_REVISION: if (len == 2) @@ -603,13 +600,13 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, case MCE_RSP_EQWAKEVERSION: if (!out) dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x", - data1, data2, data3, data4); + data[0], data[1], data[2], data[3]); break; case MCE_RSP_GETPORTSTATUS: if (!out) /* We use data1 + 1 here, to match hw labels */ dev_dbg(dev, "TX port %d: blaster is%s connected", - data1 + 1, data4 ? " not" : ""); + data[0] + 1, data[3] ? " not" : ""); break; case MCE_CMD_FLASHLED: dev_dbg(dev, "Attempting to flash LED"); @@ -630,11 +627,11 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, break; case MCE_CMD_UNKNOWN: dev_dbg(dev, "Resp to 9f 05 of 0x%02x 0x%02x", - data1, data2); + data[0], data[1]); break; case MCE_RSP_EQIRCFS: - period = DIV_ROUND_CLOSEST( - (1U << data1 * 2) * (data2 + 1), 10); + period = DIV_ROUND_CLOSEST((1U << data[0] * 2) * + (data[1] + 1), 10); if (!period) break; carrier = (1000 * 1000) / period; @@ -646,11 +643,12 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, break; case MCE_RSP_EQIRTXPORTS: dev_dbg(dev, "%s transmit blaster mask of 0x%02x", - inout, data1); + inout, data[0]); break; case MCE_RSP_EQIRTIMEOUT: /* value is in units of 50us, so x*50/1000 ms */ - period = ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000; + period = ((data[0] << 8) | data[1]) * + MCE_TIME_UNIT / 1000; dev_dbg(dev, "%s receive timeout of %d ms", inout, period); break; @@ -662,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, break; case MCE_RSP_EQIRRXPORTEN: dev_dbg(dev, "%s %s-range receive sensor in use", - inout, data1 == 0x02 ? "short" : "long"); + inout, data[0] == 0x02 ? "short" : "long"); break; case MCE_CMD_GETIRRXPORTEN: /* aka MCE_RSP_EQIRRXCFCNT */ @@ -670,13 +668,13 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, dev_dbg(dev, "Get receive sensor"); else if (ir->learning_enabled) dev_dbg(dev, "RX pulse count: %d", - ((data1 << 8) | data2)); + ((data[0] << 8) | data[1])); break; case MCE_RSP_EQIRNUMPORTS: if (out) break; dev_dbg(dev, "Num TX ports: %x, num RX ports: %x", - data1, data2); + data[0], data[1]); break; case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_IR command"); @@ -1264,12 +1262,12 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir) usb_make_path(ir->usbdev, ir->phys, sizeof(ir->phys)); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; usb_to_input_id(ir->usbdev, &rc->input_id); rc->dev.parent = dev; rc->priv = ir; - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->timeout = MS_TO_NS(100); if (!ir->flags.no_tx) { rc->s_tx_mask = mceusb_set_tx_mask; diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c index 65566d569cb1..f2204eb77e2a 100644 --- a/drivers/media/rc/meson-ir.c +++ b/drivers/media/rc/meson-ir.c @@ -138,12 +138,12 @@ static int meson_ir_probe(struct platform_device *pdev) } ir->rc->priv = ir; - ir->rc->input_name = DRIVER_NAME; + ir->rc->device_name = DRIVER_NAME; ir->rc->input_phys = DRIVER_NAME "/input0"; ir->rc->input_id.bustype = BUS_HOST; map_name = of_get_property(node, "linux,rc-map-name", NULL); ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY; - ir->rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; ir->rc->rx_resolution = US_TO_NS(MESON_TRATE); ir->rc->timeout = MS_TO_NS(200); ir->rc->driver_name = DRIVER_NAME; diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c index f1e164e441e8..e88eb64e8e69 100644 --- a/drivers/media/rc/mtk-cir.c +++ b/drivers/media/rc/mtk-cir.c @@ -25,35 +25,28 @@ /* Register to enable PWM and IR */ #define MTK_CONFIG_HIGH_REG 0x0c -/* Enable IR pulse width detection */ + +/* Bit to enable IR pulse width detection */ #define MTK_PWM_EN BIT(13) -/* Enable IR hardware function */ + +/* + * Register to setting ok count whose unit based on hardware sampling period + * indicating IR receiving completion and then making IRQ fires + */ +#define MTK_OK_COUNT(x) (((x) & GENMASK(23, 16)) << 16) + +/* Bit to enable IR hardware function */ #define MTK_IR_EN BIT(0) -/* Register to setting sample period */ -#define MTK_CONFIG_LOW_REG 0x10 -/* Field to set sample period */ -#define CHK_PERIOD DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, \ - MTK_IR_CLK_PERIOD) -#define MTK_CHK_PERIOD (((CHK_PERIOD) << 8) & (GENMASK(20, 8))) -#define MTK_CHK_PERIOD_MASK (GENMASK(20, 8)) - -/* Register to clear state of state machine */ -#define MTK_IRCLR_REG 0x20 /* Bit to restart IR receiving */ #define MTK_IRCLR BIT(0) -/* Register containing pulse width data */ -#define MTK_CHKDATA_REG(i) (0x88 + 4 * (i)) +/* Fields containing pulse width data */ #define MTK_WIDTH_MASK (GENMASK(7, 0)) -/* Register to enable IR interrupt */ -#define MTK_IRINT_EN_REG 0xcc /* Bit to enable interrupt */ #define MTK_IRINT_EN BIT(0) -/* Register to ack IR interrupt */ -#define MTK_IRINT_CLR_REG 0xd0 /* Bit to clear interrupt status */ #define MTK_IRINT_CLR BIT(0) @@ -63,24 +56,85 @@ #define MTK_IR_END(v, p) ((v) == MTK_MAX_SAMPLES && (p) == 0) /* Number of registers to record the pulse width */ #define MTK_CHKDATA_SZ 17 -/* Source clock frequency */ -#define MTK_IR_BASE_CLK 273000000 -/* Frequency after IR internal divider */ -#define MTK_IR_CLK_FREQ (MTK_IR_BASE_CLK / 4) -/* Period for MTK_IR_CLK in ns*/ -#define MTK_IR_CLK_PERIOD DIV_ROUND_CLOSEST(1000000000ul, \ - MTK_IR_CLK_FREQ) /* Sample period in ns */ -#define MTK_IR_SAMPLE (MTK_IR_CLK_PERIOD * 0xc00) +#define MTK_IR_SAMPLE 46000 + +enum mtk_fields { + /* Register to setting software sampling period */ + MTK_CHK_PERIOD, + /* Register to setting hardware sampling period */ + MTK_HW_PERIOD, +}; + +enum mtk_regs { + /* Register to clear state of state machine */ + MTK_IRCLR_REG, + /* Register containing pulse width data */ + MTK_CHKDATA_REG, + /* Register to enable IR interrupt */ + MTK_IRINT_EN_REG, + /* Register to ack IR interrupt */ + MTK_IRINT_CLR_REG +}; + +static const u32 mt7623_regs[] = { + [MTK_IRCLR_REG] = 0x20, + [MTK_CHKDATA_REG] = 0x88, + [MTK_IRINT_EN_REG] = 0xcc, + [MTK_IRINT_CLR_REG] = 0xd0, +}; + +static const u32 mt7622_regs[] = { + [MTK_IRCLR_REG] = 0x18, + [MTK_CHKDATA_REG] = 0x30, + [MTK_IRINT_EN_REG] = 0x1c, + [MTK_IRINT_CLR_REG] = 0x20, +}; + +struct mtk_field_type { + u32 reg; + u8 offset; + u32 mask; +}; + +/* + * struct mtk_ir_data - This is the structure holding all differences among + various hardwares + * @regs: The pointer to the array holding registers offset + * @fields: The pointer to the array holding fields location + * @div: The internal divisor for the based reference clock + * @ok_count: The count indicating the completion of IR data + * receiving when count is reached + * @hw_period: The value indicating the hardware sampling period + */ +struct mtk_ir_data { + const u32 *regs; + const struct mtk_field_type *fields; + u8 div; + u8 ok_count; + u32 hw_period; +}; + +static const struct mtk_field_type mt7623_fields[] = { + [MTK_CHK_PERIOD] = {0x10, 8, GENMASK(20, 8)}, + [MTK_HW_PERIOD] = {0x10, 0, GENMASK(7, 0)}, +}; + +static const struct mtk_field_type mt7622_fields[] = { + [MTK_CHK_PERIOD] = {0x24, 0, GENMASK(24, 0)}, + [MTK_HW_PERIOD] = {0x10, 0, GENMASK(24, 0)}, +}; /* * struct mtk_ir - This is the main datasructure for holding the state * of the driver * @dev: The device pointer * @rc: The rc instrance - * @irq: The IRQ that we are using * @base: The mapped register i/o base - * @clk: The clock that we are using + * @irq: The IRQ that we are using + * @clk: The clock that IR internal is using + * @bus: The clock that software decoder is using + * @data: Holding specific data for vaious platform */ struct mtk_ir { struct device *dev; @@ -88,8 +142,36 @@ struct mtk_ir { void __iomem *base; int irq; struct clk *clk; + struct clk *bus; + const struct mtk_ir_data *data; }; +static inline u32 mtk_chkdata_reg(struct mtk_ir *ir, u32 i) +{ + return ir->data->regs[MTK_CHKDATA_REG] + 4 * i; +} + +static inline u32 mtk_chk_period(struct mtk_ir *ir) +{ + u32 val; + + /* Period of raw software sampling in ns */ + val = DIV_ROUND_CLOSEST(1000000000ul, + clk_get_rate(ir->bus) / ir->data->div); + + /* + * Period for software decoder used in the + * unit of raw software sampling + */ + val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val); + + dev_dbg(ir->dev, "@pwm clk = \t%lu\n", + clk_get_rate(ir->bus) / ir->data->div); + dev_dbg(ir->dev, "@chkperiod = %08x\n", val); + + return val; +} + static void mtk_w32_mask(struct mtk_ir *ir, u32 val, u32 mask, unsigned int reg) { u32 tmp; @@ -113,16 +195,16 @@ static inline void mtk_irq_disable(struct mtk_ir *ir, u32 mask) { u32 val; - val = mtk_r32(ir, MTK_IRINT_EN_REG); - mtk_w32(ir, val & ~mask, MTK_IRINT_EN_REG); + val = mtk_r32(ir, ir->data->regs[MTK_IRINT_EN_REG]); + mtk_w32(ir, val & ~mask, ir->data->regs[MTK_IRINT_EN_REG]); } static inline void mtk_irq_enable(struct mtk_ir *ir, u32 mask) { u32 val; - val = mtk_r32(ir, MTK_IRINT_EN_REG); - mtk_w32(ir, val | mask, MTK_IRINT_EN_REG); + val = mtk_r32(ir, ir->data->regs[MTK_IRINT_EN_REG]); + mtk_w32(ir, val | mask, ir->data->regs[MTK_IRINT_EN_REG]); } static irqreturn_t mtk_ir_irq(int irqno, void *dev_id) @@ -140,7 +222,7 @@ static irqreturn_t mtk_ir_irq(int irqno, void *dev_id) * every decoder to reset themselves through long enough * trailing spaces and 2) the IRQ handler guarantees that * start of IR message is always contained in and starting - * from register MTK_CHKDATA_REG(0). + * from register mtk_chkdata_reg(ir, i). */ ir_raw_event_reset(ir->rc); @@ -149,7 +231,7 @@ static irqreturn_t mtk_ir_irq(int irqno, void *dev_id) /* Handle all pulse and space IR controller captures */ for (i = 0 ; i < MTK_CHKDATA_SZ ; i++) { - val = mtk_r32(ir, MTK_CHKDATA_REG(i)); + val = mtk_r32(ir, mtk_chkdata_reg(ir, i)); dev_dbg(ir->dev, "@reg%d=0x%08x\n", i, val); for (j = 0 ; j < 4 ; j++) { @@ -181,18 +263,44 @@ static irqreturn_t mtk_ir_irq(int irqno, void *dev_id) * Restart controller for the next receive that would * clear up all CHKDATA registers */ - mtk_w32_mask(ir, 0x1, MTK_IRCLR, MTK_IRCLR_REG); + mtk_w32_mask(ir, 0x1, MTK_IRCLR, ir->data->regs[MTK_IRCLR_REG]); /* Clear interrupt status */ - mtk_w32_mask(ir, 0x1, MTK_IRINT_CLR, MTK_IRINT_CLR_REG); + mtk_w32_mask(ir, 0x1, MTK_IRINT_CLR, + ir->data->regs[MTK_IRINT_CLR_REG]); return IRQ_HANDLED; } +static const struct mtk_ir_data mt7623_data = { + .regs = mt7623_regs, + .fields = mt7623_fields, + .ok_count = 0xf, + .hw_period = 0xff, + .div = 4, +}; + +static const struct mtk_ir_data mt7622_data = { + .regs = mt7622_regs, + .fields = mt7622_fields, + .ok_count = 0xf, + .hw_period = 0xffff, + .div = 32, +}; + +static const struct of_device_id mtk_ir_match[] = { + { .compatible = "mediatek,mt7623-cir", .data = &mt7623_data}, + { .compatible = "mediatek,mt7622-cir", .data = &mt7622_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_ir_match); + static int mtk_ir_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; + const struct of_device_id *of_id = + of_match_device(mtk_ir_match, &pdev->dev); struct resource *res; struct mtk_ir *ir; u32 val; @@ -204,9 +312,7 @@ static int mtk_ir_probe(struct platform_device *pdev) return -ENOMEM; ir->dev = dev; - - if (!of_device_is_compatible(dn, "mediatek,mt7623-cir")) - return -ENODEV; + ir->data = of_id->data; ir->clk = devm_clk_get(dev, "clk"); if (IS_ERR(ir->clk)) { @@ -214,6 +320,15 @@ static int mtk_ir_probe(struct platform_device *pdev) return PTR_ERR(ir->clk); } + ir->bus = devm_clk_get(dev, "bus"); + if (IS_ERR(ir->bus)) { + /* + * For compatibility with older device trees try unnamed + * ir->bus uses the same clock as ir->clock. + */ + ir->bus = ir->clk; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ir->base = devm_ioremap_resource(dev, res); if (IS_ERR(ir->base)) { @@ -228,7 +343,7 @@ static int mtk_ir_probe(struct platform_device *pdev) } ir->rc->priv = ir; - ir->rc->input_name = MTK_IR_DEV; + ir->rc->device_name = MTK_IR_DEV; ir->rc->input_phys = MTK_IR_DEV "/input0"; ir->rc->input_id.bustype = BUS_HOST; ir->rc->input_id.vendor = 0x0001; @@ -238,7 +353,7 @@ static int mtk_ir_probe(struct platform_device *pdev) ir->rc->map_name = map_name ?: RC_MAP_EMPTY; ir->rc->dev.parent = dev; ir->rc->driver_name = MTK_IR_DEV; - ir->rc->allowed_protocols = RC_BIT_ALL; + ir->rc->allowed_protocols = RC_PROTO_BIT_ALL; ir->rc->rx_resolution = MTK_IR_SAMPLE; ir->rc->timeout = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1); @@ -256,40 +371,60 @@ static int mtk_ir_probe(struct platform_device *pdev) return -ENODEV; } - /* - * Enable interrupt after proper hardware - * setup and IRQ handler registration - */ if (clk_prepare_enable(ir->clk)) { + dev_err(dev, "try to enable ir_clk failed\n"); + return -EINVAL; + } + + if (clk_prepare_enable(ir->bus)) { dev_err(dev, "try to enable ir_clk failed\n"); ret = -EINVAL; goto exit_clkdisable_clk; } + /* + * Enable interrupt after proper hardware + * setup and IRQ handler registration + */ mtk_irq_disable(ir, MTK_IRINT_EN); ret = devm_request_irq(dev, ir->irq, mtk_ir_irq, 0, MTK_IR_DEV, ir); if (ret) { dev_err(dev, "failed request irq\n"); - goto exit_clkdisable_clk; + goto exit_clkdisable_bus; } + /* + * Setup software sample period as the reference of software decoder + */ + val = (mtk_chk_period(ir) << ir->data->fields[MTK_CHK_PERIOD].offset) & + ir->data->fields[MTK_CHK_PERIOD].mask; + mtk_w32_mask(ir, val, ir->data->fields[MTK_CHK_PERIOD].mask, + ir->data->fields[MTK_CHK_PERIOD].reg); + + /* + * Setup hardware sampling period used to setup the proper timeout for + * indicating end of IR receiving completion + */ + val = (ir->data->hw_period << ir->data->fields[MTK_HW_PERIOD].offset) & + ir->data->fields[MTK_HW_PERIOD].mask; + mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask, + ir->data->fields[MTK_HW_PERIOD].reg); + /* Enable IR and PWM */ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG); - val |= MTK_PWM_EN | MTK_IR_EN; + val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN; mtk_w32(ir, val, MTK_CONFIG_HIGH_REG); - /* Setting sample period */ - mtk_w32_mask(ir, MTK_CHK_PERIOD, MTK_CHK_PERIOD_MASK, - MTK_CONFIG_LOW_REG); - mtk_irq_enable(ir, MTK_IRINT_EN); - dev_info(dev, "Initialized MT7623 IR driver, sample period = %luus\n", + dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n", DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000)); return 0; +exit_clkdisable_bus: + clk_disable_unprepare(ir->bus); exit_clkdisable_clk: clk_disable_unprepare(ir->clk); @@ -308,17 +443,12 @@ static int mtk_ir_remove(struct platform_device *pdev) mtk_irq_disable(ir, MTK_IRINT_EN); synchronize_irq(ir->irq); + clk_disable_unprepare(ir->bus); clk_disable_unprepare(ir->clk); return 0; } -static const struct of_device_id mtk_ir_match[] = { - { .compatible = "mediatek,mt7623-cir" }, - {}, -}; -MODULE_DEVICE_TABLE(of, mtk_ir_match); - static struct platform_driver mtk_ir_driver = { .probe = mtk_ir_probe, .remove = mtk_ir_remove, diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index ec4b25bd2ec2..5e1d866a61a5 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -727,70 +727,6 @@ static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev, return ret; } -/* - * nvt_tx_ir - * - * 1) clean TX fifo first (handled by AP) - * 2) copy data from user space - * 3) disable RX interrupts, enable TX interrupts: TTR & TFU - * 4) send 9 packets to TX FIFO to open TTR - * in interrupt_handler: - * 5) send all data out - * go back to write(): - * 6) disable TX interrupts, re-enable RX interupts - * - * The key problem of this function is user space data may larger than - * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to - * buf, and keep current copied data buf num in cur_buf_num. But driver's buf - * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to - * set TXFCONT as 0xff, until buf_count less than 0xff. - */ -static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n) -{ - struct nvt_dev *nvt = dev->priv; - unsigned long flags; - unsigned int i; - u8 iren; - int ret; - - spin_lock_irqsave(&nvt->lock, flags); - - ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n); - nvt->tx.buf_count = (ret * sizeof(unsigned)); - - memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count); - - nvt->tx.cur_buf_num = 0; - - /* save currently enabled interrupts */ - iren = nvt_cir_reg_read(nvt, CIR_IREN); - - /* now disable all interrupts, save TFU & TTR */ - nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN); - - nvt->tx.tx_state = ST_TX_REPLY; - - nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 | - CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON); - - /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */ - for (i = 0; i < 9; i++) - nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO); - - spin_unlock_irqrestore(&nvt->lock, flags); - - wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST); - - spin_lock_irqsave(&nvt->lock, flags); - nvt->tx.tx_state = ST_TX_NONE; - spin_unlock_irqrestore(&nvt->lock, flags); - - /* restore enabled interrupts to prior state */ - nvt_cir_reg_write(nvt, iren, CIR_IREN); - - return ret; -} - /* dump contents of the last rx buffer we got from the hw rx fifo */ static void nvt_dump_rx_buf(struct nvt_dev *nvt) { @@ -895,11 +831,6 @@ static void nvt_cir_log_irqs(u8 status, u8 iren) CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : ""); } -static bool nvt_cir_tx_inactive(struct nvt_dev *nvt) -{ - return nvt->tx.tx_state == ST_TX_NONE; -} - /* interrupt service routine for incoming and outgoing CIR data */ static irqreturn_t nvt_cir_isr(int irq, void *data) { @@ -952,40 +883,8 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) if (status & CIR_IRSTS_RFO) nvt_handle_rx_fifo_overrun(nvt); - - else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) { - /* We only do rx if not tx'ing */ - if (nvt_cir_tx_inactive(nvt)) - nvt_get_rx_ir_data(nvt); - } - - if (status & CIR_IRSTS_TE) - nvt_clear_tx_fifo(nvt); - - if (status & CIR_IRSTS_TTR) { - unsigned int pos, count; - u8 tmp; - - pos = nvt->tx.cur_buf_num; - count = nvt->tx.buf_count; - - /* Write data into the hardware tx fifo while pos < count */ - if (pos < count) { - nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO); - nvt->tx.cur_buf_num++; - /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */ - } else { - tmp = nvt_cir_reg_read(nvt, CIR_IREN); - nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN); - } - } - - if (status & CIR_IRSTS_TFU) { - if (nvt->tx.tx_state == ST_TX_REPLY) { - nvt->tx.tx_state = ST_TX_REQUEST; - wake_up(&nvt->tx.queue); - } - } + else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) + nvt_get_rx_ir_data(nvt); spin_unlock(&nvt->lock); @@ -1062,7 +961,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) if (!nvt) return -ENOMEM; - /* input device for IR remote (and tx) */ + /* input device for IR remote */ nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW); if (!nvt->rdev) return -ENOMEM; @@ -1105,8 +1004,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) pnp_set_drvdata(pdev, nvt); - init_waitqueue_head(&nvt->tx.queue); - ret = nvt_hw_detect(nvt); if (ret) return ret; @@ -1126,15 +1023,14 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) /* Set up the rc device */ rdev->priv = nvt; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; - rdev->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; + rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER; rdev->encode_wakeup = true; rdev->open = nvt_open; rdev->close = nvt_close; - rdev->tx_ir = nvt_tx_ir; rdev->s_tx_carrier = nvt_set_tx_carrier; rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter; - rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; + rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; rdev->input_phys = "nuvoton/cir0"; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2; @@ -1148,8 +1044,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) #if 0 rdev->min_timeout = XYZ; rdev->max_timeout = XYZ; - /* tx bits */ - rdev->tx_resolution = XYZ; #endif ret = devm_rc_register_device(&pdev->dev, rdev); if (ret) @@ -1205,8 +1099,6 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) spin_lock_irqsave(&nvt->lock, flags); - nvt->tx.tx_state = ST_TX_NONE; - /* disable all CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 88a29df38a57..0737c27f7ddc 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -46,14 +46,6 @@ static int debug; KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__) -/* - * Original lirc driver said min value of 76, and recommended value of 256 - * for the buffer length, but then used 2048. Never mind that the size of the - * RX FIFO is 32 bytes... So I'm using 32 for RX and 256 for TX atm, but I'm - * not sure if maybe that TX value is off by a factor of 8 (bits vs. bytes), - * and I don't have TX-capable hardware to test/debug on... - */ -#define TX_BUF_LEN 256 #define RX_BUF_LEN 32 #define SIO_ID_MASK 0xfff0 @@ -81,14 +73,6 @@ struct nvt_dev { u8 buf[RX_BUF_LEN]; unsigned int pkts; - struct { - u8 buf[TX_BUF_LEN]; - unsigned int buf_count; - unsigned int cur_buf_num; - wait_queue_head_t queue; - u8 tx_state; - } tx; - /* EFER Config register index/data pair */ u32 cr_efir; u32 cr_efdr; @@ -103,18 +87,10 @@ struct nvt_dev { u8 chip_major; u8 chip_minor; - /* hardware features */ - bool hw_tx_capable; - /* carrier period = 1 / frequency */ u32 carrier; }; -/* send states */ -#define ST_TX_NONE 0x0 -#define ST_TX_REQUEST 0x2 -#define ST_TX_REPLY 0x4 - /* buffer packet constants */ #define BUF_PULSE_BIT 0x80 #define BUF_LEN_MASK 0x7f diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c new file mode 100644 index 000000000000..27d0f5837a76 --- /dev/null +++ b/drivers/media/rc/pwm-ir-tx.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2017 Sean Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "pwm-ir-tx" +#define DEVICE_NAME "PWM IR Transmitter" + +struct pwm_ir { + struct pwm_device *pwm; + unsigned int carrier; + unsigned int duty_cycle; +}; + +static const struct of_device_id pwm_ir_of_match[] = { + { .compatible = "pwm-ir-tx", }, + { }, +}; +MODULE_DEVICE_TABLE(of, pwm_ir_of_match); + +static int pwm_ir_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle) +{ + struct pwm_ir *pwm_ir = dev->priv; + + pwm_ir->duty_cycle = duty_cycle; + + return 0; +} + +static int pwm_ir_set_carrier(struct rc_dev *dev, u32 carrier) +{ + struct pwm_ir *pwm_ir = dev->priv; + + if (!carrier) + return -EINVAL; + + pwm_ir->carrier = carrier; + + return 0; +} + +static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf, + unsigned int count) +{ + struct pwm_ir *pwm_ir = dev->priv; + struct pwm_device *pwm = pwm_ir->pwm; + int i, duty, period; + ktime_t edge; + long delta; + + period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier); + duty = DIV_ROUND_CLOSEST(pwm_ir->duty_cycle * period, 100); + + pwm_config(pwm, duty, period); + + edge = ktime_get(); + + for (i = 0; i < count; i++) { + if (i % 2) // space + pwm_disable(pwm); + else + pwm_enable(pwm); + + edge = ktime_add_us(edge, txbuf[i]); + delta = ktime_us_delta(edge, ktime_get()); + if (delta > 0) + usleep_range(delta, delta + 10); + } + + pwm_disable(pwm); + + return count; +} + +static int pwm_ir_probe(struct platform_device *pdev) +{ + struct pwm_ir *pwm_ir; + struct rc_dev *rcdev; + int rc; + + pwm_ir = devm_kmalloc(&pdev->dev, sizeof(*pwm_ir), GFP_KERNEL); + if (!pwm_ir) + return -ENOMEM; + + pwm_ir->pwm = devm_pwm_get(&pdev->dev, NULL); + if (IS_ERR(pwm_ir->pwm)) + return PTR_ERR(pwm_ir->pwm); + + pwm_ir->carrier = 38000; + pwm_ir->duty_cycle = 50; + + rcdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW_TX); + if (!rcdev) + return -ENOMEM; + + rcdev->priv = pwm_ir; + rcdev->driver_name = DRIVER_NAME; + rcdev->device_name = DEVICE_NAME; + rcdev->tx_ir = pwm_ir_tx; + rcdev->s_tx_duty_cycle = pwm_ir_set_duty_cycle; + rcdev->s_tx_carrier = pwm_ir_set_carrier; + + rc = devm_rc_register_device(&pdev->dev, rcdev); + if (rc < 0) + dev_err(&pdev->dev, "failed to register rc device\n"); + + return rc; +} + +static struct platform_driver pwm_ir_driver = { + .probe = pwm_ir_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(pwm_ir_of_match), + }, +}; +module_platform_driver(pwm_ir_driver); + +MODULE_DESCRIPTION("PWM IR Transmitter"); +MODULE_AUTHOR("Sean Young "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index b3e7cac2c3ee..7da9c96cb058 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -27,7 +27,7 @@ struct ir_raw_handler { u64 protocols; /* which are handled by this handler */ int (*decode)(struct rc_dev *dev, struct ir_raw_event event); - int (*encode)(enum rc_type protocol, u32 scancode, + int (*encode)(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max); /* These two should only be used by the lirc decoder */ @@ -41,8 +41,9 @@ struct ir_raw_event_ctrl { /* fifo for the pulse/space durations */ DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE); ktime_t last_event; /* when last event occurred */ - enum raw_event_type last_type; /* last event type */ struct rc_dev *dev; /* pointer to the parent rc_dev */ + /* edge driver */ + struct timer_list edge_handle; /* raw decoder state follows */ struct ir_raw_event prev_ev; diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index b6d256f03847..503bc425a187 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store); /** * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space * @dev: the struct rc_dev device descriptor - * @type: the type of the event that has occurred + * @pulse: true for pulse, false for space * * This routine (which may be called from an interrupt context) is used to * store the beginning of an ir pulse or space (or the start/end of ir @@ -96,43 +96,31 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store); * hardware which does not provide durations directly but only interrupts * (or similar events) on state change. */ -int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) +int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse) { ktime_t now; - s64 delta; /* ns */ DEFINE_IR_RAW_EVENT(ev); int rc = 0; - int delay; if (!dev->raw) return -EINVAL; now = ktime_get(); - delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); - delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]); + ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); + ev.pulse = !pulse; - /* Check for a long duration since last event or if we're - * being called for the first time, note that delta can't - * possibly be negative. - */ - if (delta > delay || !dev->raw->last_type) - type |= IR_START_EVENT; - else - ev.duration = delta; - - if (type & IR_START_EVENT) - ir_raw_event_reset(dev); - else if (dev->raw->last_type & IR_SPACE) { - ev.pulse = false; - rc = ir_raw_event_store(dev, &ev); - } else if (dev->raw->last_type & IR_PULSE) { - ev.pulse = true; - rc = ir_raw_event_store(dev, &ev); - } else - return 0; + rc = ir_raw_event_store(dev, &ev); dev->raw->last_event = now; - dev->raw->last_type = type; + + /* timer could be set to timeout (125ms by default) */ + if (!timer_pending(&dev->raw->edge_handle) || + time_after(dev->raw->edge_handle.expires, + jiffies + msecs_to_jiffies(15))) { + mod_timer(&dev->raw->edge_handle, + jiffies + msecs_to_jiffies(15)); + } + return rc; } EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); @@ -225,7 +213,7 @@ ir_raw_get_allowed_protocols(void) return atomic64_read(&available_protocols); } -static int change_protocol(struct rc_dev *dev, u64 *rc_type) +static int change_protocol(struct rc_dev *dev, u64 *rc_proto) { /* the caller will update dev->enabled_protocols */ return 0; @@ -462,7 +450,7 @@ EXPORT_SYMBOL(ir_raw_gen_pl); * -EINVAL if the scancode is ambiguous or invalid, or if no * compatible encoder was found. */ -int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode, +int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_handler *handler; @@ -483,6 +471,27 @@ int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode, } EXPORT_SYMBOL(ir_raw_encode_scancode); +static void edge_handle(unsigned long arg) +{ + struct rc_dev *dev = (struct rc_dev *)arg; + ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event); + + if (ktime_to_ns(interval) >= dev->timeout) { + DEFINE_IR_RAW_EVENT(ev); + + ev.timeout = true; + ev.duration = ktime_to_ns(interval); + + ir_raw_event_store(dev, &ev); + } else { + mod_timer(&dev->raw->edge_handle, + jiffies + nsecs_to_jiffies(dev->timeout - + ktime_to_ns(interval))); + } + + ir_raw_event_handle(dev); +} + /* * Used to (un)register raw event clients */ @@ -504,6 +513,8 @@ int ir_raw_event_prepare(struct rc_dev *dev) dev->raw->dev = dev; dev->change_protocol = change_protocol; + setup_timer(&dev->raw->edge_handle, edge_handle, + (unsigned long)dev); INIT_KFIFO(dev->raw->kfifo); return 0; @@ -555,6 +566,7 @@ void ir_raw_event_unregister(struct rc_dev *dev) return; kthread_stop(dev->raw->thread); + del_timer_sync(&dev->raw->edge_handle); mutex_lock(&ir_raw_handler_lock); list_del(&dev->raw->list); diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index 62195af24fbe..3822d9ebcb46 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c @@ -219,15 +219,15 @@ static int __init loop_init(void) return -ENOMEM; } - rc->input_name = "rc-core loopback device"; + rc->device_name = "rc-core loopback device"; rc->input_phys = "rc-core/virtual"; rc->input_id.bustype = BUS_VIRTUAL; rc->input_id.version = 1; rc->driver_name = DRIVER_NAME; rc->map_name = RC_MAP_EMPTY; rc->priv = &loopdev; - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; - rc->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; + rc->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER; rc->encode_wakeup = true; rc->timeout = 100 * 1000 * 1000; /* 100 ms */ rc->min_timeout = 1; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index a9eba0013525..981cccd6b988 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -30,8 +30,54 @@ #define IR_TAB_MAX_SIZE 8192 #define RC_DEV_MAX 256 -/* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */ -#define IR_KEYPRESS_TIMEOUT 250 +static const struct { + const char *name; + unsigned int repeat_period; + unsigned int scancode_bits; +} protocols[] = { + [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 }, + [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 }, + [RC_PROTO_RC5] = { .name = "rc-5", + .scancode_bits = 0x1f7f, .repeat_period = 164 }, + [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", + .scancode_bits = 0x1f7f3f, .repeat_period = 164 }, + [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", + .scancode_bits = 0x2fff, .repeat_period = 164 }, + [RC_PROTO_JVC] = { .name = "jvc", + .scancode_bits = 0xffff, .repeat_period = 250 }, + [RC_PROTO_SONY12] = { .name = "sony-12", + .scancode_bits = 0x1f007f, .repeat_period = 100 }, + [RC_PROTO_SONY15] = { .name = "sony-15", + .scancode_bits = 0xff007f, .repeat_period = 100 }, + [RC_PROTO_SONY20] = { .name = "sony-20", + .scancode_bits = 0x1fff7f, .repeat_period = 100 }, + [RC_PROTO_NEC] = { .name = "nec", + .scancode_bits = 0xffff, .repeat_period = 160 }, + [RC_PROTO_NECX] = { .name = "nec-x", + .scancode_bits = 0xffffff, .repeat_period = 160 }, + [RC_PROTO_NEC32] = { .name = "nec-32", + .scancode_bits = 0xffffffff, .repeat_period = 160 }, + [RC_PROTO_SANYO] = { .name = "sanyo", + .scancode_bits = 0x1fffff, .repeat_period = 250 }, + [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", + .scancode_bits = 0xffff, .repeat_period = 150 }, + [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", + .scancode_bits = 0x1fffff, .repeat_period = 150 }, + [RC_PROTO_RC6_0] = { .name = "rc-6-0", + .scancode_bits = 0xffff, .repeat_period = 164 }, + [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", + .scancode_bits = 0xfffff, .repeat_period = 164 }, + [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", + .scancode_bits = 0xffffff, .repeat_period = 164 }, + [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", + .scancode_bits = 0xffffffff, .repeat_period = 164 }, + [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", + .scancode_bits = 0xffff7fff, .repeat_period = 164 }, + [RC_PROTO_SHARP] = { .name = "sharp", + .scancode_bits = 0x1fff, .repeat_period = 250 }, + [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 }, + [RC_PROTO_CEC] = { .name = "cec", .repeat_period = 550 }, +}; /* Used to keep track of known keymaps */ static LIST_HEAD(rc_map_list); @@ -110,10 +156,10 @@ static struct rc_map_table empty[] = { static struct rc_map_list empty_map = { .map = { - .scan = empty, - .size = ARRAY_SIZE(empty), - .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_EMPTY, + .scan = empty, + .size = ARRAY_SIZE(empty), + .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_EMPTY, } }; @@ -121,7 +167,7 @@ static struct rc_map_list empty_map = { * ir_create_table() - initializes a scancode table * @rc_map: the rc_map to initialize * @name: name to assign to the table - * @rc_type: ir type to assign to the new table + * @rc_proto: ir type to assign to the new table * @size: initial size of the table * @return: zero on success or a negative error code * @@ -129,12 +175,12 @@ static struct rc_map_list empty_map = { * memory to hold at least the specified number of elements. */ static int ir_create_table(struct rc_map *rc_map, - const char *name, u64 rc_type, size_t size) + const char *name, u64 rc_proto, size_t size) { rc_map->name = kstrdup(name, GFP_KERNEL); if (!rc_map->name) return -ENOMEM; - rc_map->rc_type = rc_type; + rc_map->rc_proto = rc_proto; rc_map->alloc = roundup_pow_of_two(size * sizeof(struct rc_map_table)); rc_map->size = rc_map->alloc / sizeof(struct rc_map_table); rc_map->scan = kmalloc(rc_map->alloc, GFP_KERNEL); @@ -389,7 +435,7 @@ static int ir_setkeytable(struct rc_dev *dev, int rc; rc = ir_create_table(rc_map, from->name, - from->rc_type, from->size); + from->rc_proto, from->size); if (rc) return rc; @@ -530,7 +576,7 @@ u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode) if (keycode != KEY_RESERVED) IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n", - dev->input_name, scancode, keycode); + dev->device_name, scancode, keycode); return keycode; } @@ -613,16 +659,17 @@ static void ir_timer_keyup(unsigned long cookie) void rc_repeat(struct rc_dev *dev) { unsigned long flags; + unsigned int timeout = protocols[dev->last_protocol].repeat_period; spin_lock_irqsave(&dev->keylock, flags); - input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode); - input_sync(dev->input_dev); - if (!dev->keypressed) goto out; - dev->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT); + input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode); + input_sync(dev->input_dev); + + dev->keyup_jiffies = jiffies + msecs_to_jiffies(timeout); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); out: @@ -641,7 +688,7 @@ EXPORT_SYMBOL_GPL(rc_repeat); * This function is used internally to register a keypress, it must be * called with keylock held. */ -static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol, +static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, u32 keycode, u8 toggle) { bool new_event = (!dev->keypressed || @@ -663,7 +710,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol, dev->last_keycode = keycode; IR_dprintk(1, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n", - dev->input_name, keycode, protocol, scancode); + dev->device_name, keycode, protocol, scancode); input_report_key(dev->input_dev, keycode, 1); led_trigger_event(led_feedback, LED_FULL); @@ -683,7 +730,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol, * This routine is used to signal that a key has been pressed on the * remote control. */ -void rc_keydown(struct rc_dev *dev, enum rc_type protocol, u32 scancode, u8 toggle) +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, + u8 toggle) { unsigned long flags; u32 keycode = rc_g_keycode_from_table(dev, scancode); @@ -692,7 +740,8 @@ void rc_keydown(struct rc_dev *dev, enum rc_type protocol, u32 scancode, u8 togg ir_do_keydown(dev, protocol, scancode, keycode, toggle); if (dev->keypressed) { - dev->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT); + dev->keyup_jiffies = jiffies + + msecs_to_jiffies(protocols[protocol].repeat_period); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } spin_unlock_irqrestore(&dev->keylock, flags); @@ -711,7 +760,7 @@ EXPORT_SYMBOL_GPL(rc_keydown); * This routine is used to signal that a key has been pressed on the * remote control. The driver must manually call rc_keyup() at a later stage. */ -void rc_keydown_notimeout(struct rc_dev *dev, enum rc_type protocol, +void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, u8 toggle) { unsigned long flags; @@ -733,44 +782,28 @@ EXPORT_SYMBOL_GPL(rc_keydown_notimeout); static int rc_validate_filter(struct rc_dev *dev, struct rc_scancode_filter *filter) { - static u32 masks[] = { - [RC_TYPE_RC5] = 0x1f7f, - [RC_TYPE_RC5X_20] = 0x1f7f3f, - [RC_TYPE_RC5_SZ] = 0x2fff, - [RC_TYPE_SONY12] = 0x1f007f, - [RC_TYPE_SONY15] = 0xff007f, - [RC_TYPE_SONY20] = 0x1fff7f, - [RC_TYPE_JVC] = 0xffff, - [RC_TYPE_NEC] = 0xffff, - [RC_TYPE_NECX] = 0xffffff, - [RC_TYPE_NEC32] = 0xffffffff, - [RC_TYPE_SANYO] = 0x1fffff, - [RC_TYPE_MCIR2_KBD] = 0xffff, - [RC_TYPE_MCIR2_MSE] = 0x1fffff, - [RC_TYPE_RC6_0] = 0xffff, - [RC_TYPE_RC6_6A_20] = 0xfffff, - [RC_TYPE_RC6_6A_24] = 0xffffff, - [RC_TYPE_RC6_6A_32] = 0xffffffff, - [RC_TYPE_RC6_MCE] = 0xffff7fff, - [RC_TYPE_SHARP] = 0x1fff, - }; - u32 s = filter->data; - enum rc_type protocol = dev->wakeup_protocol; + u32 mask, s = filter->data; + enum rc_proto protocol = dev->wakeup_protocol; + + if (protocol >= ARRAY_SIZE(protocols)) + return -EINVAL; + + mask = protocols[protocol].scancode_bits; switch (protocol) { - case RC_TYPE_NECX: + case RC_PROTO_NECX: if ((((s >> 16) ^ ~(s >> 8)) & 0xff) == 0) return -EINVAL; break; - case RC_TYPE_NEC32: + case RC_PROTO_NEC32: if ((((s >> 24) ^ ~(s >> 16)) & 0xff) == 0) return -EINVAL; break; - case RC_TYPE_RC6_MCE: + case RC_PROTO_RC6_MCE: if ((s & 0xffff0000) != 0x800f0000) return -EINVAL; break; - case RC_TYPE_RC6_6A_32: + case RC_PROTO_RC6_6A_32: if ((s & 0xffff0000) == 0x800f0000) return -EINVAL; break; @@ -778,14 +811,13 @@ static int rc_validate_filter(struct rc_dev *dev, break; } - filter->data &= masks[protocol]; - filter->mask &= masks[protocol]; + filter->data &= mask; + filter->mask &= mask; /* * If we have to raw encode the IR for wakeup, we cannot have a mask */ - if (dev->encode_wakeup && - filter->mask != 0 && filter->mask != masks[protocol]) + if (dev->encode_wakeup && filter->mask != 0 && filter->mask != mask) return -EINVAL; return 0; @@ -859,30 +891,30 @@ static const struct { const char *name; const char *module_name; } proto_names[] = { - { RC_BIT_NONE, "none", NULL }, - { RC_BIT_OTHER, "other", NULL }, - { RC_BIT_UNKNOWN, "unknown", NULL }, - { RC_BIT_RC5 | - RC_BIT_RC5X_20, "rc-5", "ir-rc5-decoder" }, - { RC_BIT_NEC | - RC_BIT_NECX | - RC_BIT_NEC32, "nec", "ir-nec-decoder" }, - { RC_BIT_RC6_0 | - RC_BIT_RC6_6A_20 | - RC_BIT_RC6_6A_24 | - RC_BIT_RC6_6A_32 | - RC_BIT_RC6_MCE, "rc-6", "ir-rc6-decoder" }, - { RC_BIT_JVC, "jvc", "ir-jvc-decoder" }, - { RC_BIT_SONY12 | - RC_BIT_SONY15 | - RC_BIT_SONY20, "sony", "ir-sony-decoder" }, - { RC_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" }, - { RC_BIT_SANYO, "sanyo", "ir-sanyo-decoder" }, - { RC_BIT_SHARP, "sharp", "ir-sharp-decoder" }, - { RC_BIT_MCIR2_KBD | - RC_BIT_MCIR2_MSE, "mce_kbd", "ir-mce_kbd-decoder" }, - { RC_BIT_XMP, "xmp", "ir-xmp-decoder" }, - { RC_BIT_CEC, "cec", NULL }, + { RC_PROTO_BIT_NONE, "none", NULL }, + { RC_PROTO_BIT_OTHER, "other", NULL }, + { RC_PROTO_BIT_UNKNOWN, "unknown", NULL }, + { RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC5X_20, "rc-5", "ir-rc5-decoder" }, + { RC_PROTO_BIT_NEC | + RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32, "nec", "ir-nec-decoder" }, + { RC_PROTO_BIT_RC6_0 | + RC_PROTO_BIT_RC6_6A_20 | + RC_PROTO_BIT_RC6_6A_24 | + RC_PROTO_BIT_RC6_6A_32 | + RC_PROTO_BIT_RC6_MCE, "rc-6", "ir-rc6-decoder" }, + { RC_PROTO_BIT_JVC, "jvc", "ir-jvc-decoder" }, + { RC_PROTO_BIT_SONY12 | + RC_PROTO_BIT_SONY15 | + RC_PROTO_BIT_SONY20, "sony", "ir-sony-decoder" }, + { RC_PROTO_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" }, + { RC_PROTO_BIT_SANYO, "sanyo", "ir-sanyo-decoder" }, + { RC_PROTO_BIT_SHARP, "sharp", "ir-sharp-decoder" }, + { RC_PROTO_BIT_MCIR2_KBD | + RC_PROTO_BIT_MCIR2_MSE, "mce_kbd", "ir-mce_kbd-decoder" }, + { RC_PROTO_BIT_XMP, "xmp", "ir-xmp-decoder" }, + { RC_PROTO_BIT_CEC, "cec", NULL }, }; /** @@ -1052,8 +1084,9 @@ static void ir_raw_load_modules(u64 *protocols) int i, ret; for (i = 0; i < ARRAY_SIZE(proto_names); i++) { - if (proto_names[i].type == RC_BIT_NONE || - proto_names[i].type & (RC_BIT_OTHER | RC_BIT_UNKNOWN)) + if (proto_names[i].type == RC_PROTO_BIT_NONE || + proto_names[i].type & (RC_PROTO_BIT_OTHER | + RC_PROTO_BIT_UNKNOWN)) continue; available = ir_raw_get_allowed_protocols(); @@ -1271,7 +1304,7 @@ static ssize_t store_filter(struct device *device, * Refuse to set a filter unless a protocol is enabled * and the filter is valid for that protocol */ - if (dev->wakeup_protocol != RC_TYPE_UNKNOWN) + if (dev->wakeup_protocol != RC_PROTO_UNKNOWN) ret = rc_validate_filter(dev, &new_filter); else ret = -EINVAL; @@ -1298,40 +1331,6 @@ static ssize_t store_filter(struct device *device, return (ret < 0) ? ret : len; } -/* - * This is the list of all variants of all protocols, which is used by - * the wakeup_protocols sysfs entry. In the protocols sysfs entry some - * some protocols are grouped together (e.g. nec = nec + necx + nec32). - * - * For wakeup we need to know the exact protocol variant so the hardware - * can be programmed exactly what to expect. - */ -static const char * const proto_variant_names[] = { - [RC_TYPE_UNKNOWN] = "unknown", - [RC_TYPE_OTHER] = "other", - [RC_TYPE_RC5] = "rc-5", - [RC_TYPE_RC5X_20] = "rc-5x-20", - [RC_TYPE_RC5_SZ] = "rc-5-sz", - [RC_TYPE_JVC] = "jvc", - [RC_TYPE_SONY12] = "sony-12", - [RC_TYPE_SONY15] = "sony-15", - [RC_TYPE_SONY20] = "sony-20", - [RC_TYPE_NEC] = "nec", - [RC_TYPE_NECX] = "nec-x", - [RC_TYPE_NEC32] = "nec-32", - [RC_TYPE_SANYO] = "sanyo", - [RC_TYPE_MCIR2_KBD] = "mcir2-kbd", - [RC_TYPE_MCIR2_MSE] = "mcir2-mse", - [RC_TYPE_RC6_0] = "rc-6-0", - [RC_TYPE_RC6_6A_20] = "rc-6-6a-20", - [RC_TYPE_RC6_6A_24] = "rc-6-6a-24", - [RC_TYPE_RC6_6A_32] = "rc-6-6a-32", - [RC_TYPE_RC6_MCE] = "rc-6-mce", - [RC_TYPE_SHARP] = "sharp", - [RC_TYPE_XMP] = "xmp", - [RC_TYPE_CEC] = "cec", -}; - /** * show_wakeup_protocols() - shows the wakeup IR protocol * @device: the device descriptor @@ -1352,7 +1351,7 @@ static ssize_t show_wakeup_protocols(struct device *device, { struct rc_dev *dev = to_rc_dev(device); u64 allowed; - enum rc_type enabled; + enum rc_proto enabled; char *tmp = buf; int i; @@ -1366,14 +1365,12 @@ static ssize_t show_wakeup_protocols(struct device *device, IR_dprintk(1, "%s: allowed - 0x%llx, enabled - %d\n", __func__, (long long)allowed, enabled); - for (i = 0; i < ARRAY_SIZE(proto_variant_names); i++) { + for (i = 0; i < ARRAY_SIZE(protocols); i++) { if (allowed & (1ULL << i)) { if (i == enabled) - tmp += sprintf(tmp, "[%s] ", - proto_variant_names[i]); + tmp += sprintf(tmp, "[%s] ", protocols[i].name); else - tmp += sprintf(tmp, "%s ", - proto_variant_names[i]); + tmp += sprintf(tmp, "%s ", protocols[i].name); } } @@ -1403,7 +1400,7 @@ static ssize_t store_wakeup_protocols(struct device *device, const char *buf, size_t len) { struct rc_dev *dev = to_rc_dev(device); - enum rc_type protocol; + enum rc_proto protocol; ssize_t rc; u64 allowed; int i; @@ -1413,17 +1410,17 @@ static ssize_t store_wakeup_protocols(struct device *device, allowed = dev->allowed_wakeup_protocols; if (sysfs_streq(buf, "none")) { - protocol = RC_TYPE_UNKNOWN; + protocol = RC_PROTO_UNKNOWN; } else { - for (i = 0; i < ARRAY_SIZE(proto_variant_names); i++) { + for (i = 0; i < ARRAY_SIZE(protocols); i++) { if ((allowed & (1ULL << i)) && - sysfs_streq(buf, proto_variant_names[i])) { + sysfs_streq(buf, protocols[i].name)) { protocol = i; break; } } - if (i == ARRAY_SIZE(proto_variant_names)) { + if (i == ARRAY_SIZE(protocols)) { rc = -EINVAL; goto out; } @@ -1443,7 +1440,7 @@ static ssize_t store_wakeup_protocols(struct device *device, dev->wakeup_protocol = protocol; IR_dprintk(1, "Wakeup protocol changed to %d\n", protocol); - if (protocol == RC_TYPE_RC6_MCE) + if (protocol == RC_PROTO_RC6_MCE) dev->scancode_wakeup_filter.data = 0x800f0000; else dev->scancode_wakeup_filter.data = 0; @@ -1507,7 +1504,7 @@ static struct attribute *rc_dev_protocol_attrs[] = { NULL, }; -static struct attribute_group rc_dev_protocol_attr_grp = { +static const struct attribute_group rc_dev_protocol_attr_grp = { .attrs = rc_dev_protocol_attrs, }; @@ -1517,7 +1514,7 @@ static struct attribute *rc_dev_filter_attrs[] = { NULL, }; -static struct attribute_group rc_dev_filter_attr_grp = { +static const struct attribute_group rc_dev_filter_attr_grp = { .attrs = rc_dev_filter_attrs, }; @@ -1528,7 +1525,7 @@ static struct attribute *rc_dev_wakeup_filter_attrs[] = { NULL, }; -static struct attribute_group rc_dev_wakeup_filter_attr_grp = { +static const struct attribute_group rc_dev_wakeup_filter_attr_grp = { .attrs = rc_dev_wakeup_filter_attrs, }; @@ -1624,7 +1621,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev) { int rc; struct rc_map *rc_map; - u64 rc_type; + u64 rc_proto; if (!dev->map_name) return -EINVAL; @@ -1639,17 +1636,17 @@ static int rc_prepare_rx_device(struct rc_dev *dev) if (rc) return rc; - rc_type = BIT_ULL(rc_map->rc_type); + rc_proto = BIT_ULL(rc_map->rc_proto); if (dev->change_protocol) { - rc = dev->change_protocol(dev, &rc_type); + rc = dev->change_protocol(dev, &rc_proto); if (rc < 0) goto out_table; - dev->enabled_protocols = rc_type; + dev->enabled_protocols = rc_proto; } if (dev->driver_type == RC_DRIVER_IR_RAW) - ir_raw_load_modules(&rc_type); + ir_raw_load_modules(&rc_proto); set_bit(EV_KEY, dev->input_dev->evbit); set_bit(EV_REP, dev->input_dev->evbit); @@ -1663,7 +1660,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev) dev->input_dev->dev.parent = &dev->dev; memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id)); dev->input_dev->phys = dev->input_phys; - dev->input_dev->name = dev->input_name; + dev->input_dev->name = dev->device_name; return 0; @@ -1759,7 +1756,7 @@ int rc_register_device(struct rc_dev *dev) path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); dev_info(&dev->dev, "%s as %s\n", - dev->input_name ?: "Unspecified device", path ?: "N/A"); + dev->device_name ?: "Unspecified device", path ?: "N/A"); kfree(path); if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 56d43be2756b..6784cb9fc4e7 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -951,12 +951,12 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3) usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys)); - rc->input_name = rr3->name; + rc->device_name = rr3->name; rc->input_phys = rr3->phys; usb_to_input_id(rr3->udev, &rc->input_id); rc->dev.parent = dev; rc->priv = rr3; - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT); rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT); rc->timeout = US_TO_NS(redrat3_get_timeout(rr3)); diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 77d5d4cbed0a..8b66926bc16a 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -139,10 +139,8 @@ struct serial_ir { struct platform_device *pdev; struct timer_list timeout_timer; - unsigned int freq; + unsigned int carrier; unsigned int duty_cycle; - - unsigned int pulse_width, space_width; }; static struct serial_ir serial_ir; @@ -183,18 +181,6 @@ static void off(void) soutp(UART_MCR, hardware[type].off); } -static void init_timing_params(unsigned int new_duty_cycle, - unsigned int new_freq) -{ - serial_ir.duty_cycle = new_duty_cycle; - serial_ir.freq = new_freq; - - serial_ir.pulse_width = DIV_ROUND_CLOSEST( - new_duty_cycle * NSEC_PER_SEC, new_freq * 100l); - serial_ir.space_width = DIV_ROUND_CLOSEST( - (100l - new_duty_cycle) * NSEC_PER_SEC, new_freq * 100l); -} - static void send_pulse_irdeo(unsigned int length, ktime_t target) { long rawbits; @@ -241,13 +227,20 @@ static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge) * ndelay(s64) does not compile; so use s32 rather than s64. */ s32 delta; + unsigned int pulse, space; + + /* Ensure the dividend fits into 32 bit */ + pulse = DIV_ROUND_CLOSEST(serial_ir.duty_cycle * (NSEC_PER_SEC / 100), + serial_ir.carrier); + space = DIV_ROUND_CLOSEST((100 - serial_ir.duty_cycle) * + (NSEC_PER_SEC / 100), serial_ir.carrier); for (;;) { now = ktime_get(); if (ktime_compare(now, target) >= 0) break; on(); - edge = ktime_add_ns(edge, serial_ir.pulse_width); + edge = ktime_add_ns(edge, pulse); delta = ktime_to_ns(ktime_sub(edge, now)); if (delta > 0) ndelay(delta); @@ -255,7 +248,7 @@ static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge) off(); if (ktime_compare(now, target) >= 0) break; - edge = ktime_add_ns(edge, serial_ir.space_width); + edge = ktime_add_ns(edge, space); delta = ktime_to_ns(ktime_sub(edge, now)); if (delta > 0) ndelay(delta); @@ -513,19 +506,19 @@ static int serial_ir_probe(struct platform_device *dev) switch (type) { case IR_HOMEBREW: - rcdev->input_name = "Serial IR type home-brew"; + rcdev->device_name = "Serial IR type home-brew"; break; case IR_IRDEO: - rcdev->input_name = "Serial IR type IRdeo"; + rcdev->device_name = "Serial IR type IRdeo"; break; case IR_IRDEO_REMOTE: - rcdev->input_name = "Serial IR type IRdeo remote"; + rcdev->device_name = "Serial IR type IRdeo remote"; break; case IR_ANIMAX: - rcdev->input_name = "Serial IR type AnimaX"; + rcdev->device_name = "Serial IR type AnimaX"; break; case IR_IGOR: - rcdev->input_name = "Serial IR type IgorPlug"; + rcdev->device_name = "Serial IR type IgorPlug"; break; } @@ -537,7 +530,7 @@ static int serial_ir_probe(struct platform_device *dev) rcdev->open = serial_ir_open; rcdev->close = serial_ir_close; rcdev->dev.parent = &serial_ir.pdev->dev; - rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rcdev->driver_name = KBUILD_MODNAME; rcdev->map_name = RC_MAP_RC6_MCE; rcdev->min_timeout = 1; @@ -580,7 +573,8 @@ static int serial_ir_probe(struct platform_device *dev) return result; /* Initialize pulse/space widths */ - init_timing_params(50, 38000); + serial_ir.duty_cycle = 50; + serial_ir.carrier = 38000; /* If pin is high, then this must be an active low receiver. */ if (sense == -1) { @@ -684,7 +678,7 @@ static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf, static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle) { - init_timing_params(cycle, serial_ir.freq); + serial_ir.duty_cycle = cycle; return 0; } @@ -693,7 +687,7 @@ static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier) if (carrier > 500000 || carrier < 20000) return -EINVAL; - init_timing_params(serial_ir.duty_cycle, carrier); + serial_ir.carrier = carrier; return 0; } diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index 20234ba0b318..bc906fb128d5 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c @@ -155,7 +155,7 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id) { unsigned char data; ktime_t curr_time; - static unsigned long delt; + unsigned long delt; unsigned long deltintr; unsigned long flags; int counter = 0; @@ -308,14 +308,14 @@ static int sir_ir_probe(struct platform_device *dev) if (!rcdev) return -ENOMEM; - rcdev->input_name = "SIR IrDA port"; + rcdev->device_name = "SIR IrDA port"; rcdev->input_phys = KBUILD_MODNAME "/input0"; rcdev->input_id.bustype = BUS_HOST; rcdev->input_id.vendor = 0x0001; rcdev->input_id.product = 0x0001; rcdev->input_id.version = 0x0100; rcdev->tx_ir = sir_tx_ir; - rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rcdev->driver_name = KBUILD_MODNAME; rcdev->map_name = RC_MAP_RC6_MCE; rcdev->timeout = IR_DEFAULT_TIMEOUT; diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c index a08e1dd06124..a8e39c635f34 100644 --- a/drivers/media/rc/st_rc.c +++ b/drivers/media/rc/st_rc.c @@ -280,7 +280,7 @@ static int st_rc_probe(struct platform_device *pdev) else rc_dev->rx_base = rc_dev->base; - rc_dev->rstc = reset_control_get_optional(dev, NULL); + rc_dev->rstc = reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(rc_dev->rstc)) { ret = PTR_ERR(rc_dev->rstc); goto err; @@ -290,7 +290,7 @@ static int st_rc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rc_dev); st_rc_hardware_init(rc_dev); - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; /* rx sampling rate is 10Mhz */ rdev->rx_resolution = 100; rdev->timeout = US_TO_NS(MAX_SYMB_TIME); @@ -299,7 +299,7 @@ static int st_rc_probe(struct platform_device *pdev) rdev->close = st_rc_close; rdev->driver_name = IR_ST_NAME; rdev->map_name = RC_MAP_EMPTY; - rdev->input_name = "ST Remote Control Receiver"; + rdev->device_name = "ST Remote Control Receiver"; ret = rc_register_device(rdev); if (ret < 0) diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index b09c45abb5f3..f03a174ddf9d 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -299,12 +299,12 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz) usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys)); strlcat(sz->phys, "/input0", sizeof(sz->phys)); - rdev->input_name = sz->name; + rdev->device_name = sz->name; rdev->input_phys = sz->phys; usb_to_input_id(sz->usbdev, &rdev->input_id); rdev->dev.parent = dev; rdev->priv = sz; - rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->driver_name = DRIVER_NAME; rdev->map_name = RC_MAP_STREAMZAP; diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c index 4b785dd775c1..97f367b446c4 100644 --- a/drivers/media/rc/sunxi-cir.c +++ b/drivers/media/rc/sunxi-cir.c @@ -173,7 +173,7 @@ static int sunxi_ir_probe(struct platform_device *pdev) } /* Reset (optional) */ - ir->rst = devm_reset_control_get_optional(dev, NULL); + ir->rst = devm_reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(ir->rst)) return PTR_ERR(ir->rst); ret = reset_control_deassert(ir->rst); @@ -215,7 +215,7 @@ static int sunxi_ir_probe(struct platform_device *pdev) } ir->rc->priv = ir; - ir->rc->input_name = SUNXI_IR_DEV; + ir->rc->device_name = SUNXI_IR_DEV; ir->rc->input_phys = "sunxi-ir/input0"; ir->rc->input_id.bustype = BUS_HOST; ir->rc->input_id.vendor = 0x0001; @@ -224,7 +224,7 @@ static int sunxi_ir_probe(struct platform_device *pdev) ir->map_name = of_get_property(dn, "linux,rc-map-name", NULL); ir->rc->map_name = ir->map_name ?: RC_MAP_EMPTY; ir->rc->dev.parent = dev; - ir->rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; ir->rc->rx_resolution = SUNXI_IR_SAMPLE; ir->rc->timeout = MS_TO_NS(SUNXI_IR_TIMEOUT); ir->rc->driver_name = SUNXI_IR_DEV; diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c index 23be7702e2df..aafea3c5170b 100644 --- a/drivers/media/rc/ttusbir.c +++ b/drivers/media/rc/ttusbir.c @@ -309,11 +309,11 @@ static int ttusbir_probe(struct usb_interface *intf, usb_make_path(tt->udev, tt->phys, sizeof(tt->phys)); - rc->input_name = DRIVER_DESC; + rc->device_name = DRIVER_DESC; rc->input_phys = tt->phys; usb_to_input_id(tt->udev, &rc->input_id); rc->dev.parent = &intf->dev; - rc->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->priv = tt; rc->driver_name = DRIVER_NAME; rc->map_name = RC_MAP_TT_1500; diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c index 5a4d4a611197..3ca7ab48293d 100644 --- a/drivers/media/rc/winbond-cir.c +++ b/drivers/media/rc/winbond-cir.c @@ -429,7 +429,7 @@ wbcir_irq_tx(struct wbcir_data *data) bytes[used] = byte; } - while (data->txbuf[data->txoff] == 0 && data->txoff != data->txlen) + while (data->txoff != data->txlen && data->txbuf[data->txoff] == 0) data->txoff++; if (used == 0) { @@ -697,7 +697,7 @@ wbcir_shutdown(struct pnp_dev *device) } switch (rc->wakeup_protocol) { - case RC_TYPE_RC5: + case RC_PROTO_RC5: /* Mask = 13 bits, ex toggle */ mask[0] = (mask_sc & 0x003f); mask[0] |= (mask_sc & 0x0300) >> 2; @@ -714,7 +714,7 @@ wbcir_shutdown(struct pnp_dev *device) proto = IR_PROTOCOL_RC5; break; - case RC_TYPE_NEC: + case RC_PROTO_NEC: mask[1] = bitrev8(mask_sc); mask[0] = mask[1]; mask[3] = bitrev8(mask_sc >> 8); @@ -728,7 +728,7 @@ wbcir_shutdown(struct pnp_dev *device) proto = IR_PROTOCOL_NEC; break; - case RC_TYPE_NECX: + case RC_PROTO_NECX: mask[1] = bitrev8(mask_sc); mask[0] = mask[1]; mask[2] = bitrev8(mask_sc >> 8); @@ -742,7 +742,7 @@ wbcir_shutdown(struct pnp_dev *device) proto = IR_PROTOCOL_NEC; break; - case RC_TYPE_NEC32: + case RC_PROTO_NEC32: mask[0] = bitrev8(mask_sc); mask[1] = bitrev8(mask_sc >> 8); mask[2] = bitrev8(mask_sc >> 16); @@ -756,7 +756,7 @@ wbcir_shutdown(struct pnp_dev *device) proto = IR_PROTOCOL_NEC; break; - case RC_TYPE_RC6_0: + case RC_PROTO_RC6_0: /* Command */ match[0] = wbcir_to_rc6cells(wake_sc >> 0); mask[0] = wbcir_to_rc6cells(mask_sc >> 0); @@ -779,9 +779,9 @@ wbcir_shutdown(struct pnp_dev *device) proto = IR_PROTOCOL_RC6; break; - case RC_TYPE_RC6_6A_24: - case RC_TYPE_RC6_6A_32: - case RC_TYPE_RC6_MCE: + case RC_PROTO_RC6_6A_24: + case RC_PROTO_RC6_6A_32: + case RC_PROTO_RC6_MCE: i = 0; /* Command */ @@ -800,13 +800,13 @@ wbcir_shutdown(struct pnp_dev *device) match[i] = wbcir_to_rc6cells(wake_sc >> 16); mask[i++] = wbcir_to_rc6cells(mask_sc >> 16); - if (rc->wakeup_protocol == RC_TYPE_RC6_6A_20) { + if (rc->wakeup_protocol == RC_PROTO_RC6_6A_20) { rc6_csl = 52; } else { match[i] = wbcir_to_rc6cells(wake_sc >> 20); mask[i++] = wbcir_to_rc6cells(mask_sc >> 20); - if (rc->wakeup_protocol == RC_TYPE_RC6_6A_24) { + if (rc->wakeup_protocol == RC_PROTO_RC6_6A_24) { rc6_csl = 60; } else { /* Customer range bit and bits 15 - 8 */ @@ -1068,7 +1068,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) } data->dev->driver_name = DRVNAME; - data->dev->input_name = WBCIR_NAME; + data->dev->device_name = WBCIR_NAME; data->dev->input_phys = "wbcir/cir0"; data->dev->input_id.bustype = BUS_HOST; data->dev->input_id.vendor = PCI_VENDOR_ID_WINBOND; @@ -1086,12 +1086,13 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) data->dev->timeout = IR_DEFAULT_TIMEOUT; data->dev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; data->dev->rx_resolution = US_TO_NS(2); - data->dev->allowed_protocols = RC_BIT_ALL_IR_DECODER; - data->dev->allowed_wakeup_protocols = RC_BIT_NEC | RC_BIT_NECX | - RC_BIT_NEC32 | RC_BIT_RC5 | RC_BIT_RC6_0 | - RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | - RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE; - data->dev->wakeup_protocol = RC_TYPE_RC6_MCE; + data->dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; + data->dev->allowed_wakeup_protocols = RC_PROTO_BIT_NEC | + RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | + RC_PROTO_BIT_RC6_MCE; + data->dev->wakeup_protocol = RC_PROTO_RC6_MCE; data->dev->scancode_wakeup_filter.data = 0x800f040c; data->dev->scancode_wakeup_filter.mask = 0xffff7fff; data->dev->s_wakeup_filter = wbcir_set_wakeup_filter; diff --git a/drivers/media/rc/zx-irdec.c b/drivers/media/rc/zx-irdec.c new file mode 100644 index 000000000000..12d322ec8a29 --- /dev/null +++ b/drivers/media/rc/zx-irdec.c @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2017 Sanechips Technology Co., Ltd. + * Copyright 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "zx-irdec" + +#define ZX_IR_ENABLE 0x04 +#define ZX_IREN BIT(0) +#define ZX_IR_CTRL 0x08 +#define ZX_DEGL_MASK GENMASK(21, 20) +#define ZX_DEGL_VALUE(x) (((x) << 20) & ZX_DEGL_MASK) +#define ZX_WDBEGIN_MASK GENMASK(18, 8) +#define ZX_WDBEGIN_VALUE(x) (((x) << 8) & ZX_WDBEGIN_MASK) +#define ZX_IR_INTEN 0x10 +#define ZX_IR_INTSTCLR 0x14 +#define ZX_IR_CODE 0x30 +#define ZX_IR_CNUM 0x34 +#define ZX_NECRPT BIT(16) + +struct zx_irdec { + void __iomem *base; + struct rc_dev *rcd; +}; + +static void zx_irdec_set_mask(struct zx_irdec *irdec, unsigned int reg, + u32 mask, u32 value) +{ + u32 data; + + data = readl(irdec->base + reg); + data &= ~mask; + data |= value & mask; + writel(data, irdec->base + reg); +} + +static irqreturn_t zx_irdec_irq(int irq, void *dev_id) +{ + struct zx_irdec *irdec = dev_id; + u8 address, not_address; + u8 command, not_command; + u32 rawcode, scancode; + enum rc_proto rc_proto; + + /* Clear interrupt */ + writel(1, irdec->base + ZX_IR_INTSTCLR); + + /* Check repeat frame */ + if (readl(irdec->base + ZX_IR_CNUM) & ZX_NECRPT) { + rc_repeat(irdec->rcd); + goto done; + } + + rawcode = readl(irdec->base + ZX_IR_CODE); + not_command = (rawcode >> 24) & 0xff; + command = (rawcode >> 16) & 0xff; + not_address = (rawcode >> 8) & 0xff; + address = rawcode & 0xff; + + scancode = ir_nec_bytes_to_scancode(address, not_address, + command, not_command, + &rc_proto); + rc_keydown(irdec->rcd, rc_proto, scancode, 0); + +done: + return IRQ_HANDLED; +} + +static int zx_irdec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zx_irdec *irdec; + struct resource *res; + struct rc_dev *rcd; + int irq; + int ret; + + irdec = devm_kzalloc(dev, sizeof(*irdec), GFP_KERNEL); + if (!irdec) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irdec->base = devm_ioremap_resource(dev, res); + if (IS_ERR(irdec->base)) + return PTR_ERR(irdec->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + rcd = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE); + if (!rcd) { + dev_err(dev, "failed to allocate rc device\n"); + return -ENOMEM; + } + + irdec->rcd = rcd; + + rcd->priv = irdec; + rcd->input_phys = DRIVER_NAME "/input0"; + rcd->input_id.bustype = BUS_HOST; + rcd->map_name = RC_MAP_ZX_IRDEC; + rcd->allowed_protocols = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32; + rcd->driver_name = DRIVER_NAME; + rcd->device_name = DRIVER_NAME; + + platform_set_drvdata(pdev, irdec); + + ret = devm_rc_register_device(dev, rcd); + if (ret) { + dev_err(dev, "failed to register rc device\n"); + return ret; + } + + ret = devm_request_irq(dev, irq, zx_irdec_irq, 0, NULL, irdec); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + /* + * Initialize deglitch level and watchdog counter beginner as + * recommended by vendor BSP code. + */ + zx_irdec_set_mask(irdec, ZX_IR_CTRL, ZX_DEGL_MASK, ZX_DEGL_VALUE(0)); + zx_irdec_set_mask(irdec, ZX_IR_CTRL, ZX_WDBEGIN_MASK, + ZX_WDBEGIN_VALUE(0x21c)); + + /* Enable interrupt */ + writel(1, irdec->base + ZX_IR_INTEN); + + /* Enable the decoder */ + zx_irdec_set_mask(irdec, ZX_IR_ENABLE, ZX_IREN, ZX_IREN); + + return 0; +} + +static int zx_irdec_remove(struct platform_device *pdev) +{ + struct zx_irdec *irdec = platform_get_drvdata(pdev); + + /* Disable the decoder */ + zx_irdec_set_mask(irdec, ZX_IR_ENABLE, ZX_IREN, 0); + + /* Disable interrupt */ + writel(0, irdec->base + ZX_IR_INTEN); + + return 0; +} + +static const struct of_device_id zx_irdec_match[] = { + { .compatible = "zte,zx296718-irdec" }, + { }, +}; +MODULE_DEVICE_TABLE(of, zx_irdec_match); + +static struct platform_driver zx_irdec_driver = { + .probe = zx_irdec_probe, + .remove = zx_irdec_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = zx_irdec_match, + }, +}; +module_platform_driver(zx_irdec_driver); + +MODULE_DESCRIPTION("ZTE ZX IR remote control driver"); +MODULE_AUTHOR("Shawn Guo "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c index dcc323ffbde7..625ac6f51c39 100644 --- a/drivers/media/tuners/fc0012.c +++ b/drivers/media/tuners/fc0012.c @@ -351,7 +351,7 @@ static int fc0012_get_rf_strength(struct dvb_frontend *fe, u16 *strength) int ret; unsigned char tmp; int int_temp, lna_gain, int_lna, tot_agc_gain, power; - const int fc0012_lna_gain_table[] = { + static const int fc0012_lna_gain_table[] = { /* low gain */ -63, -58, -99, -73, -63, -65, -54, -60, diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c index 91dfa770a5cc..e606118d1a9b 100644 --- a/drivers/media/tuners/fc0013.c +++ b/drivers/media/tuners/fc0013.c @@ -511,7 +511,7 @@ static int fc0013_get_rf_strength(struct dvb_frontend *fe, u16 *strength) int ret; unsigned char tmp; int int_temp, lna_gain, int_lna, tot_agc_gain, power; - const int fc0013_lna_gain_table[] = { + static const int fc0013_lna_gain_table[] = { /* low gain */ -63, -58, -99, -73, -63, -65, -54, -60, diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c index 7d114677b4ca..9679804fd219 100644 --- a/drivers/media/tuners/tda18271-maps.c +++ b/drivers/media/tuners/tda18271-maps.c @@ -1182,7 +1182,7 @@ int tda18271_lookup_map(struct dvb_frontend *fe, /*---------------------------------------------------------------------*/ -static struct tda18271_std_map tda18271c1_std_map = { +static const struct tda18271_std_map tda18271c1_std_map = { .fm_radio = { .if_freq = 1250, .fm_rfn = 1, .agc_mode = 3, .std = 0, .if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x18 */ .atv_b = { .if_freq = 6750, .fm_rfn = 0, .agc_mode = 1, .std = 6, @@ -1215,7 +1215,7 @@ static struct tda18271_std_map tda18271c1_std_map = { .if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1f */ }; -static struct tda18271_std_map tda18271c2_std_map = { +static const struct tda18271_std_map tda18271c2_std_map = { .fm_radio = { .if_freq = 1250, .fm_rfn = 1, .agc_mode = 3, .std = 0, .if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x18 */ .atv_b = { .if_freq = 6000, .fm_rfn = 0, .agc_mode = 1, .std = 5, diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c index 3339b13dd3f5..cf44d3657f55 100644 --- a/drivers/media/tuners/tuner-simple.c +++ b/drivers/media/tuners/tuner-simple.c @@ -846,7 +846,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf, /* This function returns the tuned frequency on success, 0 on error */ struct tuner_simple_priv *priv = fe->tuner_priv; struct tunertype *tun = priv->tun; - static struct tuner_params *t_params; + struct tuner_params *t_params; u8 config, cb; u32 div; int ret; diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c index 8251942bcd12..e70c9e2f3798 100644 --- a/drivers/media/usb/airspy/airspy.c +++ b/drivers/media/usb/airspy/airspy.c @@ -859,7 +859,7 @@ static const struct v4l2_file_operations airspy_fops = { .unlocked_ioctl = video_ioctl2, }; -static struct video_device airspy_template = { +static const struct video_device airspy_template = { .name = "AirSpy SDR", .release = video_device_release_empty, .fops = &airspy_fops, @@ -1087,7 +1087,7 @@ static int airspy_probe(struct usb_interface *intf, } /* USB device ID list */ -static struct usb_device_id airspy_id_table[] = { +static const struct usb_device_id airspy_id_table[] = { { USB_DEVICE(0x1d50, 0x60a1) }, /* AirSpy */ { } }; diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c index 68c3a80ce349..ea57859aee77 100644 --- a/drivers/media/usb/as102/as102_usb_drv.c +++ b/drivers/media/usb/as102/as102_usb_drv.c @@ -33,7 +33,7 @@ static void as102_usb_stop_stream(struct as102_dev_t *dev); static int as102_open(struct inode *inode, struct file *file); static int as102_release(struct inode *inode, struct file *file); -static struct usb_device_id as102_usb_id_table[] = { +static const struct usb_device_id as102_usb_id_table[] = { { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) }, { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) }, { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) }, diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig index 78b797e0b434..70521e0b4c53 100644 --- a/drivers/media/usb/au0828/Kconfig +++ b/drivers/media/usb/au0828/Kconfig @@ -31,6 +31,7 @@ config VIDEO_AU0828_V4L2 config VIDEO_AU0828_RC bool "AU0828 Remote Controller support" depends on RC_CORE + depends on !(RC_CORE=m && VIDEO_AU0828=y) depends on VIDEO_AU0828 ---help--- Enables Remote Controller support on au0828 driver. diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index 739df61cec4f..cd363a2100d4 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -628,6 +628,8 @@ static int au0828_usb_probe(struct usb_interface *interface, if (retval) { pr_err("%s() au0282_dev_register failed to register on V4L2\n", __func__); + mutex_unlock(&dev->lock); + kfree(dev); goto done; } diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c index 42b352bb4f02..ef7d1b830ca3 100644 --- a/drivers/media/usb/au0828/au0828-i2c.c +++ b/drivers/media/usb/au0828/au0828-i2c.c @@ -329,14 +329,14 @@ static u32 au0828_functionality(struct i2c_adapter *adap) return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } -static struct i2c_algorithm au0828_i2c_algo_template = { +static const struct i2c_algorithm au0828_i2c_algo_template = { .master_xfer = i2c_xfer, .functionality = au0828_functionality, }; /* ----------------------------------------------------------------------- */ -static struct i2c_adapter au0828_i2c_adap_template = { +static const struct i2c_adapter au0828_i2c_adap_template = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, .algo = &au0828_i2c_algo_template, diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c index 9d82ec0a4b64..7996eb83a54e 100644 --- a/drivers/media/usb/au0828/au0828-input.c +++ b/drivers/media/usb/au0828/au0828-input.c @@ -335,7 +335,7 @@ int au0828_rc_register(struct au0828_dev *dev) usb_make_path(dev->usbdev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_USB; rc->input_id.version = 1; @@ -343,8 +343,8 @@ int au0828_rc_register(struct au0828_dev *dev) rc->input_id.product = le16_to_cpu(dev->usbdev->descriptor.idProduct); rc->dev.parent = &dev->usbdev->dev; rc->driver_name = "au0828-input"; - rc->allowed_protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | - RC_BIT_RC5; + rc->allowed_protocols = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32 | RC_PROTO_BIT_RC5; /* all done */ err = rc_register_device(rc); diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 2a255bd32bb3..9342402b92f7 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -1740,7 +1740,7 @@ void au0828_v4l2_resume(struct au0828_dev *dev) } } -static struct v4l2_file_operations au0828_v4l_fops = { +static const struct v4l2_file_operations au0828_v4l_fops = { .owner = THIS_MODULE, .open = au0828_v4l2_open, .release = au0828_v4l2_close, diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 788c73803138..a8f3169e30b3 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -596,7 +596,7 @@ static void flexcop_usb_disconnect(struct usb_interface *intf) info("%s successfully deinitialized and disconnected.", DRIVER_NAME); } -static struct usb_device_id flexcop_usb_table [] = { +static const struct usb_device_id flexcop_usb_table[] = { { USB_DEVICE(0x0af7, 0x0101) }, { } }; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 1c7e16e5d88b..6089036049d9 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -60,7 +60,7 @@ static int submit_urbs(struct camera_data *cam); static int set_alternate(struct camera_data *cam, unsigned int alt); static int configure_transfer_mode(struct camera_data *cam, unsigned int alt); -static struct usb_device_id cpia2_id_table[] = { +static const struct usb_device_id cpia2_id_table[] = { {USB_DEVICE(0x0553, 0x0100)}, {USB_DEVICE(0x0553, 0x0140)}, {USB_DEVICE(0x0553, 0x0151)}, /* STV0676 */ diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 7122023e7004..3dedd83f0b19 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -1075,7 +1075,7 @@ static const struct v4l2_file_operations cpia2_fops = { .mmap = cpia2_mmap, }; -static struct video_device cpia2_template = { +static const struct video_device cpia2_template = { /* I could not find any place for the old .initialize initializer?? */ .name = "CPiA2 Camera", .fops = &cpia2_fops, diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 509d9711d590..d538fa407742 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1490,7 +1490,7 @@ static void bb_buf_release(struct videobuf_queue *q, free_buffer(q, buf); } -static struct videobuf_queue_ops cx231xx_qops = { +static const struct videobuf_queue_ops cx231xx_qops = { .buf_setup = bb_buf_setup, .buf_prepare = bb_buf_prepare, .buf_queue = bb_buf_queue, @@ -1843,7 +1843,7 @@ static int mpeg_mmap(struct file *file, struct vm_area_struct *vma) return videobuf_mmap_mapper(&fh->vidq, vma); } -static struct v4l2_file_operations mpeg_fops = { +static const struct v4l2_file_operations mpeg_fops = { .owner = THIS_MODULE, .open = mpeg_open, .release = mpeg_release, diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c index a050d125934c..06f10d7fc4b0 100644 --- a/drivers/media/usb/cx231xx/cx231xx-audio.c +++ b/drivers/media/usb/cx231xx/cx231xx-audio.c @@ -403,7 +403,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, return 0; } -static struct snd_pcm_hardware snd_cx231xx_hw_capture = { +static const struct snd_pcm_hardware snd_cx231xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c index 46646ecd2dbc..f372ad3917a8 100644 --- a/drivers/media/usb/cx231xx/cx231xx-core.c +++ b/drivers/media/usb/cx231xx/cx231xx-core.c @@ -1311,6 +1311,7 @@ int cx231xx_dev_init(struct cx231xx *dev) dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[0].i2c_nostop = 0; dev->i2c_bus[0].i2c_reserve = 0; + dev->i2c_bus[0].i2c_rc = -ENODEV; /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; @@ -1318,6 +1319,7 @@ int cx231xx_dev_init(struct cx231xx *dev) dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[1].i2c_nostop = 0; dev->i2c_bus[1].i2c_reserve = 0; + dev->i2c_bus[1].i2c_rc = -ENODEV; /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; @@ -1325,6 +1327,7 @@ int cx231xx_dev_init(struct cx231xx *dev) dev->i2c_bus[2].i2c_period = I2C_SPEED_100K; /* 100kHz */ dev->i2c_bus[2].i2c_nostop = 0; dev->i2c_bus[2].i2c_reserve = 0; + dev->i2c_bus[2].i2c_rc = -ENODEV; /* register I2C buses */ errCode = cx231xx_i2c_register(&dev->i2c_bus[0]); diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c index ee3eeeb600f8..c18bb33e060e 100644 --- a/drivers/media/usb/cx231xx/cx231xx-dvb.c +++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c @@ -585,6 +585,9 @@ static void unregister_dvb(struct cx231xx_dvb *dvb) dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); + dvb_unregister_frontend(dvb->frontend); + dvb_frontend_detach(dvb->frontend); + dvb_unregister_adapter(&dvb->adapter); /* remove I2C tuner */ client = dvb->i2c_client_tuner; if (client) { @@ -597,9 +600,6 @@ static void unregister_dvb(struct cx231xx_dvb *dvb) module_put(client->dev.driver->owner); i2c_unregister_device(client); } - dvb_unregister_frontend(dvb->frontend); - dvb_frontend_detach(dvb->frontend); - dvb_unregister_adapter(&dvb->adapter); } static int dvb_init(struct cx231xx *dev) diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c index 8d95b1154e12..23648dab7be8 100644 --- a/drivers/media/usb/cx231xx/cx231xx-i2c.c +++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c @@ -459,7 +459,7 @@ static const struct i2c_algorithm cx231xx_algo = { .functionality = functionality, }; -static struct i2c_adapter cx231xx_adap_template = { +static const struct i2c_adapter cx231xx_adap_template = { .owner = THIS_MODULE, .name = "cx231xx", .algo = &cx231xx_algo, @@ -538,7 +538,7 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus) bus->i2c_adap.algo_data = bus; i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev); - i2c_add_adapter(&bus->i2c_adap); + bus->i2c_rc = i2c_add_adapter(&bus->i2c_adap); if (0 != bus->i2c_rc) dev_warn(dev->dev, @@ -551,10 +551,10 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus) * cx231xx_i2c_unregister() * unregister i2c_bus */ -int cx231xx_i2c_unregister(struct cx231xx_i2c *bus) +void cx231xx_i2c_unregister(struct cx231xx_i2c *bus) { - i2c_del_adapter(&bus->i2c_adap); - return 0; + if (!bus->i2c_rc) + i2c_del_adapter(&bus->i2c_adap); } /* diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c index eecf074b0a48..02ebeb16055f 100644 --- a/drivers/media/usb/cx231xx/cx231xx-input.c +++ b/drivers/media/usb/cx231xx/cx231xx-input.c @@ -24,7 +24,7 @@ #define MODULE_NAME "cx231xx-input" -static int get_key_isdbt(struct IR_i2c *ir, enum rc_type *protocol, +static int get_key_isdbt(struct IR_i2c *ir, enum rc_proto *protocol, u32 *pscancode, u8 *toggle) { int rc; @@ -50,7 +50,7 @@ static int get_key_isdbt(struct IR_i2c *ir, enum rc_type *protocol, dev_dbg(&ir->rc->dev, "cmd %02x, scan = %02x\n", cmd, scancode); - *protocol = RC_TYPE_OTHER; + *protocol = RC_PROTO_OTHER; *pscancode = scancode; *toggle = 0; return 1; @@ -91,7 +91,7 @@ int cx231xx_ir_init(struct cx231xx *dev) /* The i2c micro-controller only outputs the cmd part of NEC protocol */ dev->init_data.rc_dev->scancode_mask = 0xff; dev->init_data.rc_dev->driver_name = "cx231xx"; - dev->init_data.type = RC_BIT_NEC; + dev->init_data.type = RC_PROTO_BIT_NEC; info.addr = 0x30; /* Load and bind ir-kbd-i2c */ diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index f67f86876625..179b8481a870 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -859,7 +859,7 @@ static void buffer_release(struct videobuf_queue *vq, free_buffer(vq, buf); } -static struct videobuf_queue_ops cx231xx_video_qops = { +static const struct videobuf_queue_ops cx231xx_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h index 986c64ba5b56..72d5937a087e 100644 --- a/drivers/media/usb/cx231xx/cx231xx.h +++ b/drivers/media/usb/cx231xx/cx231xx.h @@ -476,7 +476,7 @@ struct cx231xx_i2c { /* i2c i/o */ struct i2c_adapter i2c_adap; - u32 i2c_rc; + int i2c_rc; /* different settings for each bus */ u8 i2c_period; @@ -762,7 +762,7 @@ int cx231xx_reset_analog_tuner(struct cx231xx *dev); /* Provided by cx231xx-i2c.c */ void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port); int cx231xx_i2c_register(struct cx231xx_i2c *bus); -int cx231xx_i2c_unregister(struct cx231xx_i2c *bus); +void cx231xx_i2c_unregister(struct cx231xx_i2c *bus); int cx231xx_i2c_mux_create(struct cx231xx *dev); int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no); void cx231xx_i2c_mux_unregister(struct cx231xx *dev); diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c index 23bbbf367b51..8013659c41b1 100644 --- a/drivers/media/usb/dvb-usb-v2/af9015.c +++ b/drivers/media/usb/dvb-usb-v2/af9015.c @@ -1237,7 +1237,7 @@ static int af9015_rc_query(struct dvb_usb_device *d) /* Only process key if canary killed */ if (buf[16] != 0xff && buf[0] != 0x01) { - enum rc_type proto; + enum rc_proto proto; dev_dbg(&d->udev->dev, "%s: key pressed %*ph\n", __func__, 4, buf + 12); @@ -1253,13 +1253,13 @@ static int af9015_rc_query(struct dvb_usb_device *d) /* NEC */ state->rc_keycode = RC_SCANCODE_NEC(buf[12], buf[14]); - proto = RC_TYPE_NEC; + proto = RC_PROTO_NEC; } else { /* NEC extended*/ state->rc_keycode = RC_SCANCODE_NECX(buf[12] << 8 | buf[13], buf[14]); - proto = RC_TYPE_NECX; + proto = RC_PROTO_NECX; } } else { /* 32 bit NEC */ @@ -1267,7 +1267,7 @@ static int af9015_rc_query(struct dvb_usb_device *d) buf[13] << 16 | buf[14] << 8 | buf[15]); - proto = RC_TYPE_NEC32; + proto = RC_PROTO_NEC32; } rc_keydown(d->rc_dev, proto, state->rc_keycode, 0); } else { @@ -1336,7 +1336,8 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) if (!rc->map_name) rc->map_name = RC_MAP_EMPTY; - rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32; + rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32; rc->query = af9015_rc_query; rc->interval = 500; diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 4df9486e19b9..666d319d3d1a 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1828,7 +1828,7 @@ static int af9035_rc_query(struct dvb_usb_device *d) { struct usb_interface *intf = d->intf; int ret; - enum rc_type proto; + enum rc_proto proto; u32 key; u8 buf[4]; struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf }; @@ -1843,17 +1843,17 @@ static int af9035_rc_query(struct dvb_usb_device *d) if ((buf[0] + buf[1]) == 0xff) { /* NEC standard 16bit */ key = RC_SCANCODE_NEC(buf[0], buf[2]); - proto = RC_TYPE_NEC; + proto = RC_PROTO_NEC; } else { /* NEC extended 24bit */ key = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]); - proto = RC_TYPE_NECX; + proto = RC_PROTO_NECX; } } else { /* NEC full code 32bit */ key = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]); - proto = RC_TYPE_NEC32; + proto = RC_PROTO_NEC32; } dev_dbg(&intf->dev, "%*ph\n", 4, buf); @@ -1881,11 +1881,11 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) switch (state->ir_type) { case 0: /* NEC */ default: - rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | - RC_BIT_NEC32; + rc->allowed_protos = RC_PROTO_BIT_NEC | + RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32; break; case 1: /* RC6 */ - rc->allowed_protos = RC_BIT_RC6_MCE; + rc->allowed_protos = RC_PROTO_BIT_RC6_MCE; break; } @@ -2108,6 +2108,8 @@ static const struct usb_device_id af9035_id_table[] = { { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2, &af9035_props, "Digital Dual TV Receiver CTVDIGDUAL_V2", RC_MAP_IT913X_V1) }, + { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T1, + &af9035_props, "TerraTec T1", RC_MAP_IT913X_V1) }, /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c index 6795c0c609b1..20ee7eea2a91 100644 --- a/drivers/media/usb/dvb-usb-v2/anysee.c +++ b/drivers/media/usb/dvb-usb-v2/anysee.c @@ -1142,7 +1142,7 @@ static int anysee_rc_query(struct dvb_usb_device *d) if (ircode[0]) { dev_dbg(&d->udev->dev, "%s: key pressed %02x\n", __func__, ircode[1]); - rc_keydown(d->rc_dev, RC_TYPE_NEC, + rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0x08, ircode[1]), 0); } @@ -1151,7 +1151,7 @@ static int anysee_rc_query(struct dvb_usb_device *d) static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { - rc->allowed_protos = RC_BIT_NEC; + rc->allowed_protos = RC_PROTO_BIT_NEC; rc->query = anysee_rc_query; rc->interval = 250; /* windows driver uses 500ms */ diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c index 50c07fe7dacb..1414d59e85ba 100644 --- a/drivers/media/usb/dvb-usb-v2/az6007.c +++ b/drivers/media/usb/dvb-usb-v2/az6007.c @@ -208,7 +208,7 @@ static int az6007_rc_query(struct dvb_usb_device *d) { struct az6007_device_state *st = d_to_priv(d); unsigned code; - enum rc_type proto; + enum rc_proto proto; az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10); @@ -218,18 +218,18 @@ static int az6007_rc_query(struct dvb_usb_device *d) if ((st->data[3] ^ st->data[4]) == 0xff) { if ((st->data[1] ^ st->data[2]) == 0xff) { code = RC_SCANCODE_NEC(st->data[1], st->data[3]); - proto = RC_TYPE_NEC; + proto = RC_PROTO_NEC; } else { code = RC_SCANCODE_NECX(st->data[1] << 8 | st->data[2], st->data[3]); - proto = RC_TYPE_NECX; + proto = RC_PROTO_NECX; } } else { code = RC_SCANCODE_NEC32(st->data[1] << 24 | st->data[2] << 16 | st->data[3] << 8 | st->data[4]); - proto = RC_TYPE_NEC32; + proto = RC_PROTO_NEC32; } rc_keydown(d->rc_dev, proto, code, st->data[5]); @@ -241,7 +241,8 @@ static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { pr_debug("Getting az6007 Remote Control properties\n"); - rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32; + rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32; rc->query = az6007_rc_query; rc->interval = 400; @@ -933,7 +934,7 @@ static struct dvb_usb_device_properties az6007_cablestar_hdci_props = { } }; -static struct usb_device_id az6007_usb_table[] = { +static const struct usb_device_id az6007_usb_table[] = { {DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007, &az6007_props, "Azurewave 6007", RC_MAP_EMPTY)}, {DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7, diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h index 35f27e2e4e28..0005bdb2207d 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h @@ -138,7 +138,7 @@ struct dvb_usb_driver_info { struct dvb_usb_rc { const char *map_name; u64 allowed_protos; - int (*change_protocol)(struct rc_dev *dev, u64 *rc_type); + int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); int (*query) (struct dvb_usb_device *d); unsigned int interval; enum rc_driver_type driver_type; diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index 955fb0d07507..096bb75a24e5 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c @@ -154,13 +154,12 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d) } dev->dev.parent = &d->udev->dev; - dev->input_name = d->name; + dev->device_name = d->name; usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys)); strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys)); dev->input_phys = d->rc_phys; usb_to_input_id(d->udev, &dev->input_id); - /* TODO: likely RC-core should took const char * */ - dev->driver_name = (char *) d->props->driver_name; + dev->driver_name = d->props->driver_name; dev->map_name = d->rc.map_name; dev->allowed_protocols = d->rc.allowed_protos; dev->change_protocol = d->rc.change_protocol; diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index 5730760e4e93..131b6c08e199 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -211,7 +211,7 @@ static int dvbsky_rc_query(struct dvb_usb_device *d) rc5_system = (code & 0x7C0) >> 6; toggle = (code & 0x800) ? 1 : 0; scancode = rc5_system << 8 | rc5_command; - rc_keydown(d->rc_dev, RC_TYPE_RC5, scancode, toggle); + rc_keydown(d->rc_dev, RC_PROTO_RC5, scancode, toggle); } return 0; } @@ -223,7 +223,7 @@ static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) return 0; } - rc->allowed_protos = RC_BIT_RC5; + rc->allowed_protos = RC_PROTO_BIT_RC5; rc->query = dvbsky_rc_query; rc->interval = 300; return 0; diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index a91fdad8f8d4..5e320fa4a795 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -347,8 +347,8 @@ static void lme2510_int_response(struct urb *lme_urb) ibuf[5]); deb_info(1, "INT Key = 0x%08x", key); - rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC32, key, - 0); + rc_keydown(adap_to_d(adap)->rc_dev, RC_PROTO_NEC32, key, + 0); break; case 0xbb: switch (st->tuner_config) { @@ -1232,7 +1232,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, static int lme2510_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { - rc->allowed_protos = RC_BIT_NEC32; + rc->allowed_protos = RC_PROTO_BIT_NEC32; return 0; } diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index b0d5904a4ea6..67953360fda5 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -77,7 +77,9 @@ int mxl111sf_ctrl_msg(struct mxl111sf_state *state, dvb_usbv2_generic_rw(d, state->sndbuf, 1+wlen, state->rcvbuf, rlen); - memcpy(rbuf, state->rcvbuf, rlen); + if (rbuf) + memcpy(rbuf, state->rcvbuf, rlen); + mutex_unlock(&state->msg_lock); mxl_fail(ret); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index e16ca07acf1d..95a7b9123f8e 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -1631,24 +1631,24 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d) goto err; if (buf[4] & 0x01) { - enum rc_type proto; + enum rc_proto proto; if (buf[2] == (u8) ~buf[3]) { if (buf[0] == (u8) ~buf[1]) { /* NEC standard (16 bit) */ rc_code = RC_SCANCODE_NEC(buf[0], buf[2]); - proto = RC_TYPE_NEC; + proto = RC_PROTO_NEC; } else { /* NEC extended (24 bit) */ rc_code = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]); - proto = RC_TYPE_NECX; + proto = RC_PROTO_NECX; } } else { /* NEC full (32 bit) */ rc_code = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]); - proto = RC_TYPE_NEC32; + proto = RC_PROTO_NEC32; } rc_keydown(d->rc_dev, proto, rc_code, 0); @@ -1673,7 +1673,8 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { rc->map_name = RC_MAP_EMPTY; - rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32; + rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32; rc->query = rtl2831u_rc_query; rc->interval = 400; @@ -1778,7 +1779,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d, /* load empty to enable rc */ if (!rc->map_name) rc->map_name = RC_MAP_EMPTY; - rc->allowed_protos = RC_BIT_ALL_IR_DECODER; + rc->allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER; rc->driver_type = RC_DRIVER_IR_RAW; rc->query = rtl2832u_rc_query; rc->interval = 200; diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 99a3f3625944..37dea0adc695 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -458,7 +458,7 @@ static int cxusb_rc_query(struct dvb_usb_device *d) cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); if (ircode[2] || ircode[3]) - rc_keydown(d->rc_dev, RC_TYPE_NEC, + rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(~ircode[2] & 0xff, ircode[3]), 0); return 0; } @@ -473,7 +473,7 @@ static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d) return 0; if (ircode[1] || ircode[2]) - rc_keydown(d->rc_dev, RC_TYPE_NEC, + rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(~ircode[1] & 0xff, ircode[2]), 0); return 0; } @@ -486,7 +486,7 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d) return 0; if (ircode[0] || ircode[1]) - rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, + rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, RC_SCANCODE_RC5(ircode[0], ircode[1]), 0); return 0; } @@ -1646,7 +1646,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = { .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -1703,7 +1703,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = { .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -1768,7 +1768,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = { .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -1824,7 +1824,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = { .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -1879,7 +1879,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = { .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_bluebird2_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, @@ -1933,7 +1933,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = { .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_bluebird2_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, @@ -1989,7 +1989,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, @@ -2088,7 +2088,7 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = { .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, @@ -2142,7 +2142,7 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = { .rc_codes = RC_MAP_D680_DMB, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, - .allowed_protos = RC_BIT_UNKNOWN, + .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, @@ -2197,7 +2197,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = { .rc_codes = RC_MAP_D680_DMB, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, - .allowed_protos = RC_BIT_UNKNOWN, + .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, @@ -2251,7 +2251,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = { .rc_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, - .allowed_protos = RC_BIT_UNKNOWN, + .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, @@ -2305,7 +2305,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230c_properties = { .rc_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, - .allowed_protos = RC_BIT_UNKNOWN, + .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, diff --git a/drivers/media/usb/dvb-usb/dib0700.h b/drivers/media/usb/dvb-usb/dib0700.h index 8fd8f5b489d2..f89ab3b5a6c4 100644 --- a/drivers/media/usb/dvb-usb/dib0700.h +++ b/drivers/media/usb/dvb-usb/dib0700.h @@ -64,7 +64,7 @@ extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff); extern struct i2c_algorithm dib0700_i2c_algo; extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold); -extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_type); +extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_proto); extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz); extern int dib0700_device_count; diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index bea1b4764a66..1ee7ec558293 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -638,7 +638,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) return ret; } -int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type) +int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_proto) { struct dvb_usb_device *d = rc->priv; struct dib0700_state *st = d->priv; @@ -654,19 +654,19 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type) st->buf[2] = 0; /* Set the IR mode */ - if (*rc_type & RC_BIT_RC5) { + if (*rc_proto & RC_PROTO_BIT_RC5) { new_proto = 1; - *rc_type = RC_BIT_RC5; - } else if (*rc_type & RC_BIT_NEC) { + *rc_proto = RC_PROTO_BIT_RC5; + } else if (*rc_proto & RC_PROTO_BIT_NEC) { new_proto = 0; - *rc_type = RC_BIT_NEC; - } else if (*rc_type & RC_BIT_RC6_MCE) { + *rc_proto = RC_PROTO_BIT_NEC; + } else if (*rc_proto & RC_PROTO_BIT_RC6_MCE) { if (st->fw_version < 0x10200) { ret = -EINVAL; goto out; } new_proto = 2; - *rc_type = RC_BIT_RC6_MCE; + *rc_proto = RC_PROTO_BIT_RC6_MCE; } else { ret = -EINVAL; goto out; @@ -680,7 +680,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type) goto out; } - d->props.rc.core.protocol = *rc_type; + d->props.rc.core.protocol = *rc_proto; out: mutex_unlock(&d->usb_mutex); @@ -712,7 +712,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) { struct dvb_usb_device *d = purb->context; struct dib0700_rc_response *poll_reply; - enum rc_type protocol; + enum rc_proto protocol; u32 keycode; u8 toggle; @@ -745,7 +745,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) purb->actual_length); switch (d->props.rc.core.protocol) { - case RC_BIT_NEC: + case RC_PROTO_BIT_NEC: toggle = 0; /* NEC protocol sends repeat code as 0 0 0 FF */ @@ -764,25 +764,25 @@ static void dib0700_rc_urb_completion(struct urb *purb) poll_reply->nec.not_system << 16 | poll_reply->nec.data << 8 | poll_reply->nec.not_data); - protocol = RC_TYPE_NEC32; + protocol = RC_PROTO_NEC32; } else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) { deb_data("NEC extended protocol\n"); keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 | poll_reply->nec.not_system, poll_reply->nec.data); - protocol = RC_TYPE_NECX; + protocol = RC_PROTO_NECX; } else { deb_data("NEC normal protocol\n"); keycode = RC_SCANCODE_NEC(poll_reply->nec.system, poll_reply->nec.data); - protocol = RC_TYPE_NEC; + protocol = RC_PROTO_NEC; } break; default: deb_data("RC5 protocol\n"); - protocol = RC_TYPE_RC5; + protocol = RC_PROTO_RC5; toggle = poll_reply->report_id; keycode = RC_SCANCODE_RC5(poll_reply->rc5.system, poll_reply->rc5.data); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 6a57fc6d3472..6020170fe99a 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -514,7 +514,7 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) */ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) { - enum rc_type protocol; + enum rc_proto protocol; u32 scancode; u8 toggle; int i; @@ -547,7 +547,7 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ switch (d->props.rc.core.protocol) { - case RC_BIT_NEC: + case RC_PROTO_BIT_NEC: /* NEC protocol sends repeat code as 0 0 0 FF */ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) && (st->buf[3] == 0xff)) { @@ -555,14 +555,14 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) return 0; } - protocol = RC_TYPE_NEC; + protocol = RC_PROTO_NEC; scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]); toggle = 0; break; default: /* RC-5 protocol changes toggle bit on new keypress */ - protocol = RC_TYPE_RC5; + protocol = RC_PROTO_RC5; scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]); toggle = st->buf[3 - 1]; break; @@ -3909,9 +3909,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -3949,9 +3949,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4014,9 +4014,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4059,9 +4059,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4140,9 +4140,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4185,9 +4185,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4242,9 +4242,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4308,9 +4308,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4357,9 +4357,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4430,9 +4430,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4466,9 +4466,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4542,9 +4542,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4586,9 +4586,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4635,9 +4635,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4672,9 +4672,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4709,9 +4709,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4746,9 +4746,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4783,9 +4783,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4820,9 +4820,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4871,9 +4871,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4906,9 +4906,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4943,9 +4943,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -4981,9 +4981,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -5035,9 +5035,9 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, - .allowed_protos = RC_BIT_RC5 | - RC_BIT_RC6_MCE | - RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c index fcbff7fb0c4e..512370786696 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.c +++ b/drivers/media/usb/dvb-usb/dtt200u.c @@ -100,14 +100,14 @@ static int dtt200u_rc_query(struct dvb_usb_device *d) goto ret; if (st->data[0] == 1) { - enum rc_type proto = RC_TYPE_NEC; + enum rc_proto proto = RC_PROTO_NEC; scancode = st->data[1]; if ((u8) ~st->data[1] != st->data[2]) { /* Extended NEC */ scancode = scancode << 8; scancode |= st->data[2]; - proto = RC_TYPE_NECX; + proto = RC_PROTO_NECX; } scancode = scancode << 8; scancode |= st->data[3]; @@ -213,7 +213,7 @@ static struct dvb_usb_device_properties dtt200u_properties = { .rc_interval = 300, .rc_codes = RC_MAP_DTT200U, .rc_query = dtt200u_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -265,7 +265,7 @@ static struct dvb_usb_device_properties wt220u_properties = { .rc_interval = 300, .rc_codes = RC_MAP_DTT200U, .rc_query = dtt200u_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -317,7 +317,7 @@ static struct dvb_usb_device_properties wt220u_fc_properties = { .rc_interval = 300, .rc_codes = RC_MAP_DTT200U, .rc_query = dtt200u_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, @@ -369,7 +369,7 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = { .rc_interval = 300, .rc_codes = RC_MAP_DTT200U, .rc_query = dtt200u_rc_query, - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c index f05f1fc80729..0b03f9bd9c26 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c @@ -279,7 +279,7 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d) dev->change_protocol = d->props.rc.core.change_protocol; dev->allowed_protocols = d->props.rc.core.allowed_protos; usb_to_input_id(d->udev, &dev->input_id); - dev->input_name = "IR-receiver inside an USB DVB receiver"; + dev->device_name = "IR-receiver inside an USB DVB receiver"; dev->input_phys = d->rc_phys; dev->dev.parent = &d->udev->dev; dev->priv = d; diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index 67f898b6f6d0..72468fdffa18 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -202,7 +202,7 @@ struct dvb_rc { u64 protocol; u64 allowed_protos; enum rc_driver_type driver_type; - int (*change_protocol)(struct rc_dev *dev, u64 *rc_type); + int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); char *module_name; int (*rc_query) (struct dvb_usb_device *d); int rc_interval; diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 57b187240110..b421329b21fa 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -1671,7 +1671,7 @@ static int dw2102_rc_query(struct dvb_usb_device *d) if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); - rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, key[0], 0); + rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0], 0); } } @@ -1692,7 +1692,8 @@ static int prof_rc_query(struct dvb_usb_device *d) if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); - rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, key[0]^0xff, 0); + rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0] ^ 0xff, + 0); } } @@ -1713,7 +1714,7 @@ static int su3000_rc_query(struct dvb_usb_device *d) if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); - rc_keydown(d->rc_dev, RC_TYPE_RC5, + rc_keydown(d->rc_dev, RC_PROTO_RC5, RC_SCANCODE_RC5(key[1], key[0]), 0); } } @@ -1912,7 +1913,7 @@ static struct dvb_usb_device_properties dw2102_properties = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, @@ -1967,7 +1968,7 @@ static struct dvb_usb_device_properties dw2104_properties = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, @@ -2018,7 +2019,7 @@ static struct dvb_usb_device_properties dw3101_properties = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, @@ -2067,7 +2068,7 @@ static struct dvb_usb_device_properties s6x0_properties = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", - .allowed_protos = RC_BIT_NEC, + .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, @@ -2103,46 +2104,46 @@ static struct dvb_usb_device_properties s6x0_properties = { }; static struct dvb_usb_device_properties *p1100; -static struct dvb_usb_device_description d1100 = { +static const struct dvb_usb_device_description d1100 = { "Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }; static struct dvb_usb_device_properties *s660; -static struct dvb_usb_device_description d660 = { +static const struct dvb_usb_device_description d660 = { "TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, {NULL}, }; -static struct dvb_usb_device_description d480_1 = { +static const struct dvb_usb_device_description d480_1 = { "TeVii S480.1 USB", {&dw2102_table[TEVII_S480_1], NULL}, {NULL}, }; -static struct dvb_usb_device_description d480_2 = { +static const struct dvb_usb_device_description d480_2 = { "TeVii S480.2 USB", {&dw2102_table[TEVII_S480_2], NULL}, {NULL}, }; static struct dvb_usb_device_properties *p7500; -static struct dvb_usb_device_description d7500 = { +static const struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }; static struct dvb_usb_device_properties *s421; -static struct dvb_usb_device_description d421 = { +static const struct dvb_usb_device_description d421 = { "TeVii S421 PCI", {&dw2102_table[TEVII_S421], NULL}, {NULL}, }; -static struct dvb_usb_device_description d632 = { +static const struct dvb_usb_device_description d632 = { "TeVii S632 USB", {&dw2102_table[TEVII_S632], NULL}, {NULL}, @@ -2161,7 +2162,7 @@ static struct dvb_usb_device_properties su3000_properties = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, @@ -2230,7 +2231,7 @@ static struct dvb_usb_device_properties t220_properties = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, @@ -2279,7 +2280,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { .rc_interval = 250, .rc_codes = RC_MAP_TT_1500, .module_name = "dw2102", - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, @@ -2334,10 +2335,12 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { + int retval = -ENOMEM; p1100 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p1100) - return -ENOMEM; + goto err0; + /* copy default structure */ /* fill only different fields */ p1100->firmware = P1100_FIRMWARE; @@ -2348,10 +2351,9 @@ static int dw2102_probe(struct usb_interface *intf, s660 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s660) { - kfree(p1100); - return -ENOMEM; - } + if (!s660) + goto err1; + s660->firmware = S660_FIRMWARE; s660->num_device_descs = 3; s660->devices[0] = d660; @@ -2361,11 +2363,9 @@ static int dw2102_probe(struct usb_interface *intf, p7500 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!p7500) { - kfree(p1100); - kfree(s660); - return -ENOMEM; - } + if (!p7500) + goto err2; + p7500->firmware = P7500_FIRMWARE; p7500->devices[0] = d7500; p7500->rc.core.rc_query = prof_rc_query; @@ -2375,12 +2375,9 @@ static int dw2102_probe(struct usb_interface *intf, s421 = kmemdup(&su3000_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s421) { - kfree(p1100); - kfree(s660); - kfree(p7500); - return -ENOMEM; - } + if (!s421) + goto err3; + s421->num_device_descs = 2; s421->devices[0] = d421; s421->devices[1] = d632; @@ -2410,7 +2407,16 @@ static int dw2102_probe(struct usb_interface *intf, THIS_MODULE, NULL, adapter_nr)) return 0; - return -ENODEV; + retval = -ENODEV; + kfree(s421); +err3: + kfree(p7500); +err2: + kfree(s660); +err1: + kfree(p1100); +err0: + return retval; } static void dw2102_disconnect(struct usb_interface *intf) diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index 70672e1e5ec7..32081c2ce0da 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -241,7 +241,7 @@ static int m920x_rc_core_query(struct dvb_usb_device *d) else if (state == REMOTE_KEY_REPEAT) rc_repeat(d->rc_dev); else - rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, rc_state[1], 0); + rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, rc_state[1], 0); out: kfree(rc_state); @@ -1208,7 +1208,7 @@ static struct dvb_usb_device_properties vp7049_properties = { .rc_interval = 150, .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS, .rc_query = m920x_rc_core_query, - .allowed_protos = RC_BIT_UNKNOWN, + .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .size_of_priv = sizeof(struct m920x_state), diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index d54ebe7e0215..601ade7ca48d 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c @@ -600,7 +600,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d) info("%s: cmd=0x%02x sys=0x%02x\n", __func__, rx[6], rx[7]); - rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); + rc_keydown(d->rc_dev, RC_PROTO_RC5, state->last_rc_key, 0); } else if (state->last_rc_key) { rc_keyup(d->rc_dev); state->last_rc_key = 0; @@ -958,7 +958,7 @@ static struct dvb_usb_device_properties pctv452e_properties = { .rc.core = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, @@ -1011,7 +1011,7 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = { .rc.core = { .rc_codes = RC_MAP_TT_1500, - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 9f7dd1afcb15..18d0f8f5283f 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -749,7 +749,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = { .rc_codes = RC_MAP_TECHNISAT_USB2, .module_name = "technisat-usb2", .rc_query = technisat_usb2_rc_query, - .allowed_protos = RC_BIT_ALL_IR_DECODER, + .allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER, .driver_type = RC_DRIVER_IR_RAW, } }; diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c index 9e0d6a4166d2..e7020f245f53 100644 --- a/drivers/media/usb/dvb-usb/ttusb2.c +++ b/drivers/media/usb/dvb-usb/ttusb2.c @@ -459,7 +459,7 @@ static int tt3650_rc_query(struct dvb_usb_device *d) /* got a "press" event */ st->last_rc_key = RC_SCANCODE_RC5(rx[3], rx[2]); deb_info("%s: cmd=0x%02x sys=0x%02x\n", __func__, rx[2], rx[3]); - rc_keydown(d->rc_dev, RC_TYPE_RC5, st->last_rc_key, rx[1]); + rc_keydown(d->rc_dev, RC_PROTO_RC5, st->last_rc_key, rx[1]); } else if (st->last_rc_key) { rc_keyup(d->rc_dev); st->last_rc_key = 0; @@ -766,7 +766,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = { .rc_interval = 150, /* Less than IR_KEYPRESS_TIMEOUT */ .rc_codes = RC_MAP_TT_1500, .rc_query = tt3650_rc_query, - .allowed_protos = RC_BIT_RC5, + .allowed_protos = RC_PROTO_BIT_RC5, }, .num_adapters = 1, diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c index ffad7f1af166..4628d73f46f2 100644 --- a/drivers/media/usb/em28xx/em28xx-audio.c +++ b/drivers/media/usb/em28xx/em28xx-audio.c @@ -216,7 +216,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, return 0; } -static struct snd_pcm_hardware snd_em28xx_hw_capture = { +static const struct snd_pcm_hardware snd_em28xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | @@ -564,7 +564,7 @@ static int em28xx_vol_get(struct snd_kcontrol *kcontrol, val, (int)kcontrol->private_value); value->value.integer.value[0] = 0x1f - (val & 0x1f); - value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f); + value->value.integer.value[1] = 0x1f - ((val >> 8) & 0x1f); return 0; } diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 60b195c157b8..66c5012a628a 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -876,7 +876,7 @@ static const struct i2c_algorithm em28xx_algo = { .functionality = functionality, }; -static struct i2c_adapter em28xx_adap_template = { +static const struct i2c_adapter em28xx_adap_template = { .owner = THIS_MODULE, .name = "em28xx", .algo = &em28xx_algo, diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index ca9673917ad5..046223de1e91 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -55,7 +55,7 @@ struct em28xx_ir_poll_result { unsigned int toggle_bit:1; unsigned int read_count:7; - enum rc_type protocol; + enum rc_proto protocol; u32 scancode; }; @@ -70,11 +70,12 @@ struct em28xx_IR { struct delayed_work work; unsigned int full_code:1; unsigned int last_readcount; - u64 rc_type; + u64 rc_proto; struct i2c_client *i2c_client; - int (*get_key_i2c)(struct i2c_client *ir, enum rc_type *protocol, u32 *scancode); + int (*get_key_i2c)(struct i2c_client *ir, enum rc_proto *protocol, + u32 *scancode); int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *); }; @@ -83,7 +84,7 @@ struct em28xx_IR { **********************************************************/ static int em28xx_get_key_terratec(struct i2c_client *i2c_dev, - enum rc_type *protocol, u32 *scancode) + enum rc_proto *protocol, u32 *scancode) { unsigned char b; @@ -101,13 +102,13 @@ static int em28xx_get_key_terratec(struct i2c_client *i2c_dev, /* keep old data */ return 1; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = b; return 1; } static int em28xx_get_key_em_haup(struct i2c_client *i2c_dev, - enum rc_type *protocol, u32 *scancode) + enum rc_proto *protocol, u32 *scancode) { unsigned char buf[2]; int size; @@ -131,13 +132,14 @@ static int em28xx_get_key_em_haup(struct i2c_client *i2c_dev, * So, the code translation is not complete. Yet, it is enough to * work with the provided RC5 IR. */ - *protocol = RC_TYPE_RC5; + *protocol = RC_PROTO_RC5; *scancode = (bitrev8(buf[1]) & 0x1f) << 8 | bitrev8(buf[0]) >> 2; return 1; } static int em28xx_get_key_pinnacle_usb_grey(struct i2c_client *i2c_dev, - enum rc_type *protocol, u32 *scancode) + enum rc_proto *protocol, + u32 *scancode) { unsigned char buf[3]; @@ -149,13 +151,14 @@ static int em28xx_get_key_pinnacle_usb_grey(struct i2c_client *i2c_dev, if (buf[0] != 0x00) return 0; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = buf[2] & 0x3f; return 1; } static int em28xx_get_key_winfast_usbii_deluxe(struct i2c_client *i2c_dev, - enum rc_type *protocol, u32 *scancode) + enum rc_proto *protocol, + u32 *scancode) { unsigned char subaddr, keydetect, key; @@ -175,7 +178,7 @@ static int em28xx_get_key_winfast_usbii_deluxe(struct i2c_client *i2c_dev, if (key == 0x00) return 0; - *protocol = RC_TYPE_UNKNOWN; + *protocol = RC_PROTO_UNKNOWN; *scancode = key; return 1; } @@ -207,19 +210,19 @@ static int default_polling_getkey(struct em28xx_IR *ir, poll_result->read_count = (msg[0] & 0x7f); /* Remote Control Address/Data (Regs 0x46/0x47) */ - switch (ir->rc_type) { - case RC_BIT_RC5: - poll_result->protocol = RC_TYPE_RC5; + switch (ir->rc_proto) { + case RC_PROTO_BIT_RC5: + poll_result->protocol = RC_PROTO_RC5; poll_result->scancode = RC_SCANCODE_RC5(msg[1], msg[2]); break; - case RC_BIT_NEC: - poll_result->protocol = RC_TYPE_NEC; + case RC_PROTO_BIT_NEC: + poll_result->protocol = RC_PROTO_NEC; poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[2]); break; default: - poll_result->protocol = RC_TYPE_UNKNOWN; + poll_result->protocol = RC_PROTO_UNKNOWN; poll_result->scancode = msg[1] << 8 | msg[2]; break; } @@ -252,37 +255,37 @@ static int em2874_polling_getkey(struct em28xx_IR *ir, * Remote Control Address (Reg 0x52) * Remote Control Data (Reg 0x53-0x55) */ - switch (ir->rc_type) { - case RC_BIT_RC5: - poll_result->protocol = RC_TYPE_RC5; + switch (ir->rc_proto) { + case RC_PROTO_BIT_RC5: + poll_result->protocol = RC_PROTO_RC5; poll_result->scancode = RC_SCANCODE_RC5(msg[1], msg[2]); break; - case RC_BIT_NEC: + case RC_PROTO_BIT_NEC: poll_result->scancode = msg[1] << 8 | msg[2]; if ((msg[3] ^ msg[4]) != 0xff) { /* 32 bits NEC */ - poll_result->protocol = RC_TYPE_NEC32; + poll_result->protocol = RC_PROTO_NEC32; poll_result->scancode = RC_SCANCODE_NEC32((msg[1] << 24) | (msg[2] << 16) | (msg[3] << 8) | (msg[4])); } else if ((msg[1] ^ msg[2]) != 0xff) { /* 24 bits NEC */ - poll_result->protocol = RC_TYPE_NECX; + poll_result->protocol = RC_PROTO_NECX; poll_result->scancode = RC_SCANCODE_NECX(msg[1] << 8 | msg[2], msg[3]); } else { /* Normal NEC */ - poll_result->protocol = RC_TYPE_NEC; + poll_result->protocol = RC_PROTO_NEC; poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[3]); } break; - case RC_BIT_RC6_0: - poll_result->protocol = RC_TYPE_RC6_0; + case RC_PROTO_BIT_RC6_0: + poll_result->protocol = RC_PROTO_RC6_0; poll_result->scancode = RC_SCANCODE_RC6_0(msg[1], msg[2]); break; default: - poll_result->protocol = RC_TYPE_UNKNOWN; + poll_result->protocol = RC_PROTO_UNKNOWN; poll_result->scancode = (msg[1] << 24) | (msg[2] << 16) | (msg[3] << 8) | msg[4]; break; @@ -298,7 +301,7 @@ static int em2874_polling_getkey(struct em28xx_IR *ir, static int em28xx_i2c_ir_handle_key(struct em28xx_IR *ir) { static u32 scancode; - enum rc_type protocol; + enum rc_proto protocol; int rc; rc = ir->get_key_i2c(ir->i2c_client, &protocol, &scancode); @@ -338,7 +341,7 @@ static void em28xx_ir_handle_key(struct em28xx_IR *ir) poll_result.toggle_bit); else rc_keydown(ir->rc, - RC_TYPE_UNKNOWN, + RC_PROTO_UNKNOWN, poll_result.scancode & 0xff, poll_result.toggle_bit); @@ -383,70 +386,71 @@ static void em28xx_ir_stop(struct rc_dev *rc) cancel_delayed_work_sync(&ir->work); } -static int em2860_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type) +static int em2860_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_proto) { struct em28xx_IR *ir = rc_dev->priv; struct em28xx *dev = ir->dev; /* Adjust xclk based on IR table for RC5/NEC tables */ - if (*rc_type & RC_BIT_RC5) { + if (*rc_proto & RC_PROTO_BIT_RC5) { dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE; ir->full_code = 1; - *rc_type = RC_BIT_RC5; - } else if (*rc_type & RC_BIT_NEC) { + *rc_proto = RC_PROTO_BIT_RC5; + } else if (*rc_proto & RC_PROTO_BIT_NEC) { dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE; ir->full_code = 1; - *rc_type = RC_BIT_NEC; - } else if (*rc_type & RC_BIT_UNKNOWN) { - *rc_type = RC_BIT_UNKNOWN; + *rc_proto = RC_PROTO_BIT_NEC; + } else if (*rc_proto & RC_PROTO_BIT_UNKNOWN) { + *rc_proto = RC_PROTO_BIT_UNKNOWN; } else { - *rc_type = ir->rc_type; + *rc_proto = ir->rc_proto; return -EINVAL; } em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk, EM28XX_XCLK_IR_RC5_MODE); - ir->rc_type = *rc_type; + ir->rc_proto = *rc_proto; return 0; } -static int em2874_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type) +static int em2874_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_proto) { struct em28xx_IR *ir = rc_dev->priv; struct em28xx *dev = ir->dev; u8 ir_config = EM2874_IR_RC5; /* Adjust xclk and set type based on IR table for RC5/NEC/RC6 tables */ - if (*rc_type & RC_BIT_RC5) { + if (*rc_proto & RC_PROTO_BIT_RC5) { dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE; ir->full_code = 1; - *rc_type = RC_BIT_RC5; - } else if (*rc_type & RC_BIT_NEC) { + *rc_proto = RC_PROTO_BIT_RC5; + } else if (*rc_proto & RC_PROTO_BIT_NEC) { dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE; ir_config = EM2874_IR_NEC | EM2874_IR_NEC_NO_PARITY; ir->full_code = 1; - *rc_type = RC_BIT_NEC; - } else if (*rc_type & RC_BIT_RC6_0) { + *rc_proto = RC_PROTO_BIT_NEC; + } else if (*rc_proto & RC_PROTO_BIT_RC6_0) { dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE; ir_config = EM2874_IR_RC6_MODE_0; ir->full_code = 1; - *rc_type = RC_BIT_RC6_0; - } else if (*rc_type & RC_BIT_UNKNOWN) { - *rc_type = RC_BIT_UNKNOWN; + *rc_proto = RC_PROTO_BIT_RC6_0; + } else if (*rc_proto & RC_PROTO_BIT_UNKNOWN) { + *rc_proto = RC_PROTO_BIT_UNKNOWN; } else { - *rc_type = ir->rc_type; + *rc_proto = ir->rc_proto; return -EINVAL; } em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1); em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk, EM28XX_XCLK_IR_RC5_MODE); - ir->rc_type = *rc_type; + ir->rc_proto = *rc_proto; return 0; } -static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type) + +static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_proto) { struct em28xx_IR *ir = rc_dev->priv; struct em28xx *dev = ir->dev; @@ -455,12 +459,12 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type) switch (dev->chip_id) { case CHIP_ID_EM2860: case CHIP_ID_EM2883: - return em2860_ir_change_protocol(rc_dev, rc_type); + return em2860_ir_change_protocol(rc_dev, rc_proto); case CHIP_ID_EM2884: case CHIP_ID_EM2874: case CHIP_ID_EM28174: case CHIP_ID_EM28178: - return em2874_ir_change_protocol(rc_dev, rc_type); + return em2874_ir_change_protocol(rc_dev, rc_proto); default: dev_err(&ir->dev->intf->dev, "Unrecognized em28xx chip id 0x%02x: IR not supported\n", @@ -686,7 +690,7 @@ static int em28xx_ir_init(struct em28xx *dev) struct em28xx_IR *ir; struct rc_dev *rc; int err = -ENOMEM; - u64 rc_type; + u64 rc_proto; u16 i2c_rc_dev_addr = 0; if (dev->is_audio_only) { @@ -749,7 +753,7 @@ static int em28xx_ir_init(struct em28xx *dev) case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: rc->map_name = RC_MAP_HAUPPAUGE; ir->get_key_i2c = em28xx_get_key_em_haup; - rc->allowed_protocols = RC_BIT_RC5; + rc->allowed_protocols = RC_PROTO_BIT_RC5; break; case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE: rc->map_name = RC_MAP_WINFAST_USBII_DELUXE; @@ -771,7 +775,8 @@ static int em28xx_ir_init(struct em28xx *dev) switch (dev->chip_id) { case CHIP_ID_EM2860: case CHIP_ID_EM2883: - rc->allowed_protocols = RC_BIT_RC5 | RC_BIT_NEC; + rc->allowed_protocols = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_NEC; ir->get_key = default_polling_getkey; break; case CHIP_ID_EM2884: @@ -779,8 +784,9 @@ static int em28xx_ir_init(struct em28xx *dev) case CHIP_ID_EM28174: case CHIP_ID_EM28178: ir->get_key = em2874_polling_getkey; - rc->allowed_protocols = RC_BIT_RC5 | RC_BIT_NEC | - RC_BIT_NECX | RC_BIT_NEC32 | RC_BIT_RC6_0; + rc->allowed_protocols = RC_PROTO_BIT_RC5 | + RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | + RC_PROTO_BIT_NEC32 | RC_PROTO_BIT_RC6_0; break; default: err = -ENODEV; @@ -791,8 +797,8 @@ static int em28xx_ir_init(struct em28xx *dev) rc->map_name = dev->board.ir_codes; /* By default, keep protocol field untouched */ - rc_type = RC_BIT_UNKNOWN; - err = em28xx_ir_change_protocol(rc, &rc_type); + rc_proto = RC_PROTO_BIT_UNKNOWN; + err = em28xx_ir_change_protocol(rc, &rc_proto); if (err) goto error; } @@ -807,7 +813,7 @@ static int em28xx_ir_init(struct em28xx *dev) usb_make_path(udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_USB; rc->input_id.version = 1; diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c index ed5ec9773969..98cd57eaf36a 100644 --- a/drivers/media/usb/go7007/go7007-v4l2.c +++ b/drivers/media/usb/go7007/go7007-v4l2.c @@ -857,7 +857,7 @@ static int go7007_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } -static struct v4l2_file_operations go7007_fops = { +static const struct v4l2_file_operations go7007_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, @@ -901,7 +901,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device go7007_template = { +static const struct video_device go7007_template = { .name = "go7007", .fops = &go7007_fops, .release = video_device_release_empty, diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c index 070871fb1fc4..c618764480c6 100644 --- a/drivers/media/usb/go7007/snd-go7007.c +++ b/drivers/media/usb/go7007/snd-go7007.c @@ -52,7 +52,7 @@ struct go7007_snd { int capturing; }; -static struct snd_pcm_hardware go7007_snd_capture_hw = { +static const struct snd_pcm_hardware go7007_snd_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 16bc1dde2c8c..0f141762abf1 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -1964,7 +1964,7 @@ static ssize_t dev_read(struct file *file, char __user *data, return ret; } -static struct v4l2_file_operations dev_fops = { +static const struct v4l2_file_operations dev_fops = { .owner = THIS_MODULE, .open = dev_open, .release = dev_close, diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index b600ea6460d3..68656e7986c7 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1315,7 +1315,7 @@ static int cit_set_sharpness(struct gspca_dev *gspca_dev, s32 val) break; case CIT_MODEL1: { int i; - const unsigned short sa[] = { + static const unsigned short sa[] = { 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a }; for (i = 0; i < cit_model1_ntries; i++) diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c index d9a525260511..7eb53517a82f 100644 --- a/drivers/media/usb/hackrf/hackrf.c +++ b/drivers/media/usb/hackrf/hackrf.c @@ -1263,7 +1263,7 @@ static const struct v4l2_file_operations hackrf_fops = { .unlocked_ioctl = video_ioctl2, }; -static struct video_device hackrf_template = { +static const struct video_device hackrf_template = { .name = "HackRF One", .release = video_device_release_empty, .fops = &hackrf_fops, @@ -1545,7 +1545,7 @@ static int hackrf_probe(struct usb_interface *intf, } /* USB device ID list */ -static struct usb_device_id hackrf_id_table[] = { +static const struct usb_device_id hackrf_id_table[] = { { USB_DEVICE(0x1d50, 0x6089) }, /* HackRF One */ { } }; diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 15f016ad5b89..dbe29c6c4d8b 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -53,7 +53,7 @@ MODULE_PARM_DESC(boost_audio, "boost the audio signal"); /* table of devices that work with this driver */ -static struct usb_device_id hdpvr_table[] = { +static const struct usb_device_id hdpvr_table[] = { { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) }, diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c index fcab55038d99..1db49ed5eaf1 100644 --- a/drivers/media/usb/hdpvr/hdpvr-i2c.c +++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c @@ -55,7 +55,8 @@ struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev) /* Our default information for ir-kbd-i2c.c to use */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; - init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32; + init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_RC6_6A_32; init_data->name = "HD-PVR"; init_data->polling_interval = 405; /* ms, duplicated from Windows */ hdpvr_ir_rx_i2c_board_info.platform_data = init_data; @@ -184,7 +185,7 @@ static const struct i2c_algorithm hdpvr_algo = { .functionality = hdpvr_functionality, }; -static struct i2c_adapter hdpvr_i2c_adapter_template = { +static const struct i2c_adapter hdpvr_i2c_adapter_template = { .name = "Hauppage HD PVR I2C", .owner = THIS_MODULE, .algo = &hdpvr_algo, diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index bb3d31e2a0b5..a097d3dbc141 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -1143,7 +1143,7 @@ static const struct v4l2_file_operations msi2500_fops = { .unlocked_ioctl = video_ioctl2, }; -static struct video_device msi2500_template = { +static const struct video_device msi2500_template = { .name = "Mirics MSi3101 SDR Dongle", .release = video_device_release_empty, .fops = &msi2500_fops, @@ -1308,7 +1308,7 @@ static int msi2500_probe(struct usb_interface *intf, } /* USB device ID list */ -static struct usb_device_id msi2500_id_table[] = { +static const struct usb_device_id msi2500_id_table[] = { {USB_DEVICE(0x1df7, 0x2500)}, /* Mirics MSi3101 SDR Dongle */ {USB_DEVICE(0x2040, 0xd300)}, /* Hauppauge WinTV 133559 LF */ {} diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index f9ed9c950247..50146f263d90 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -642,8 +642,7 @@ static const struct cec_adap_ops pulse8_cec_adap_ops = { static int pulse8_connect(struct serio *serio, struct serio_driver *drv) { - u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL; + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL; struct pulse8 *pulse8; int err = -ENOMEM; struct cec_log_addrs log_addrs = {}; @@ -656,7 +655,7 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv) pulse8->serio = serio; pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8, - "HDMI CEC", caps, 1); + dev_name(&serio->dev), caps, 1); err = PTR_ERR_OR_ZERO(pulse8->adap); if (err < 0) goto free_device; @@ -732,7 +731,7 @@ static void pulse8_ping_eeprom_work_handler(struct work_struct *work) mutex_unlock(&pulse8->config_lock); } -static struct serio_device_id pulse8_serio_ids[] = { +static const struct serio_device_id pulse8_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_PULSE8_CEC, diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c index ca637074fa1f..43e43404095f 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c @@ -198,7 +198,7 @@ static int pvr2_encoder_cmd(void *ctxt, } - LOCK_TAKE(hdw->ctl_lock); do { + LOCK_TAKE(hdw->ctl_lock); while (1) { if (!hdw->state_encoder_ok) { ret = -EIO; @@ -293,9 +293,9 @@ rdData[0]); wrData[0] = 0x0; ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1); - if (ret) break; + break; - } while(0); LOCK_GIVE(hdw->ctl_lock); + }; LOCK_GIVE(hdw->ctl_lock); return ret; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c index 20a52b785fff..ff7b4d1d385d 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c @@ -514,12 +514,12 @@ static u32 pvr2_i2c_functionality(struct i2c_adapter *adap) return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } -static struct i2c_algorithm pvr2_i2c_algo_template = { +static const struct i2c_algorithm pvr2_i2c_algo_template = { .master_xfer = pvr2_i2c_xfer, .functionality = pvr2_i2c_functionality, }; -static struct i2c_adapter pvr2_i2c_adap_template = { +static const struct i2c_adapter pvr2_i2c_adap_template = { .owner = THIS_MODULE, .class = 0, }; @@ -567,7 +567,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw) case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP; - init_data->type = RC_BIT_RC5; + init_data->type = RC_PROTO_BIT_RC5; init_data->name = hdw->hdw_desc->description; init_data->polling_interval = 100; /* ms From ir-kbd-i2c */ /* IR Receiver */ @@ -580,11 +580,11 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw) break; case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */ case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */ - init_data->ir_codes = RC_MAP_HAUPPAUGE; + init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; - init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | - RC_BIT_RC6_6A_32; - init_data->name = hdw->hdw_desc->description; + init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | + RC_PROTO_BIT_RC6_6A_32; + init_data->name = hdw->hdw_desc->description; /* IR Receiver */ info.addr = 0x71; info.platform_data = init_data; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 8f13c60198ed..4320bda9352d 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1226,7 +1226,7 @@ static const struct v4l2_file_operations vdev_fops = { }; -static struct video_device vdev_template = { +static const struct video_device vdev_template = { .fops = &vdev_fops, }; diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 22420c14ac98..eb6921d2743e 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -146,7 +146,7 @@ static const struct v4l2_file_operations pwc_fops = { .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; -static struct video_device pwc_template = { +static const struct video_device pwc_template = { .name = "Philips Webcam", /* Filled in later */ .release = video_device_release_empty, .fops = &pwc_fops, diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index 65692576690f..cecdcbcd400c 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c @@ -309,8 +309,7 @@ static const struct cec_adap_ops rain_cec_adap_ops = { static int rain_connect(struct serio *serio, struct serio_driver *drv) { - u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL; + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL; struct rain *rain; int err = -ENOMEM; struct cec_log_addrs log_addrs = {}; @@ -323,7 +322,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv) rain->serio = serio; rain->adap = cec_allocate_adapter(&rain_cec_adap_ops, rain, - "HDMI CEC", caps, 1); + dev_name(&serio->dev), caps, 1); err = PTR_ERR_OR_ZERO(rain->adap); if (err < 0) goto free_device; @@ -359,7 +358,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv) return err; } -static struct serio_device_id rain_serio_ids[] = { +static const struct serio_device_id rain_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_RAINSHADOW_CEC, diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index 6a88b1dbb3a0..b2f239c4ba42 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c @@ -381,7 +381,7 @@ MODULE_PARM_DESC(jpeg_enable, "Jpeg enable(1-on 0-off) default 1"); /* USB device table */ #define USB_SENSORAY_VID 0x1943 -static struct usb_device_id s2255_table[] = { +static const struct usb_device_id s2255_table[] = { {USB_DEVICE(USB_SENSORAY_VID, 0x2255)}, {USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/ { } /* Terminating entry */ @@ -1590,7 +1590,7 @@ static void s2255_video_device_release(struct video_device *vdev) return; } -static struct video_device template = { +static const struct video_device template = { .name = "s2255v", .fops = &s2255_fops_v4l, .ioctl_ops = &s2255_ioctl_ops, diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index c86eb6164713..bea8bbbb84fb 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -47,7 +47,7 @@ MODULE_AUTHOR("Ezequiel Garcia"); MODULE_DESCRIPTION("STK1160 driver"); /* Devices supported by this driver */ -static struct usb_device_id stk1160_id_table[] = { +static const struct usb_device_id stk1160_id_table[] = { { USB_DEVICE(0x05e1, 0x0408) }, { } }; diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c index 3f2517be02bb..2c70173e3c82 100644 --- a/drivers/media/usb/stk1160/stk1160-i2c.c +++ b/drivers/media/usb/stk1160/stk1160-i2c.c @@ -240,7 +240,7 @@ static const struct i2c_algorithm algo = { .functionality = functionality, }; -static struct i2c_adapter adap_template = { +static const struct i2c_adapter adap_template = { .owner = THIS_MODULE, .name = "stk1160", .algo = &algo, diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index a005d262392a..77b759a0bcd9 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -326,7 +326,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev) return 0; } -static struct v4l2_file_operations stk1160_fops = { +static const struct v4l2_file_operations stk1160_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, @@ -751,7 +751,7 @@ static const struct vb2_ops stk1160_video_qops = { .wait_finish = vb2_ops_wait_finish, }; -static struct video_device v4l_template = { +static const struct video_device v4l_template = { .name = "stk1160", .tvnorms = V4L2_STD_525_60 | V4L2_STD_625_50, .fops = &stk1160_fops, diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 90d4a08cda31..c0bba773db25 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -55,7 +55,7 @@ MODULE_AUTHOR("Jaime Velasco Juan and Nicolas VIVIEN"); MODULE_DESCRIPTION("Syntek DC1125 webcam driver"); /* Some cameras have audio interfaces, we aren't interested in those */ -static struct usb_device_id stkwebcam_table[] = { +static const struct usb_device_id stkwebcam_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(0x05e1, 0x0501, 0xff, 0xff, 0xff) }, { } @@ -1202,7 +1202,7 @@ static const struct v4l2_ctrl_ops stk_ctrl_ops = { .s_ctrl = stk_s_ctrl, }; -static struct v4l2_file_operations v4l_stk_fops = { +static const struct v4l2_file_operations v4l_stk_fops = { .owner = THIS_MODULE, .open = v4l_stk_open, .release = v4l_stk_release, @@ -1244,7 +1244,7 @@ static void stk_v4l_dev_release(struct video_device *vd) kfree(dev); } -static struct video_device stk_v4l_data = { +static const struct video_device stk_v4l_data = { .name = "stkwebcam", .fops = &v4l_stk_fops, .ioctl_ops = &v4l_stk_ioctl_ops, diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c index 422322541af6..3717a6844ea8 100644 --- a/drivers/media/usb/tm6000/tm6000-alsa.c +++ b/drivers/media/usb/tm6000/tm6000-alsa.c @@ -143,7 +143,7 @@ static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size) */ #define DEFAULT_FIFO_SIZE 4096 -static struct snd_pcm_hardware snd_tm6000_digital_hw = { +static const struct snd_pcm_hardware snd_tm6000_digital_hw = { .info = SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c index b293dea6554f..2537643a1808 100644 --- a/drivers/media/usb/tm6000/tm6000-cards.c +++ b/drivers/media/usb/tm6000/tm6000-cards.c @@ -613,7 +613,7 @@ static struct tm6000_board tm6000_boards[] = { }; /* table of devices that work with this driver */ -static struct usb_device_id tm6000_id_table[] = { +static const struct usb_device_id tm6000_id_table[] = { { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_GENERIC }, { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC }, { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV }, diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c index 1a033f57fcc1..91889ad9cdd7 100644 --- a/drivers/media/usb/tm6000/tm6000-input.c +++ b/drivers/media/usb/tm6000/tm6000-input.c @@ -66,7 +66,7 @@ struct tm6000_IR { struct urb *int_urb; /* IR device properties */ - u64 rc_type; + u64 rc_proto; }; void tm6000_ir_wait(struct tm6000_core *dev, u8 state) @@ -103,13 +103,13 @@ static int tm6000_ir_config(struct tm6000_IR *ir) * IR, in order to discard such decoding */ - switch (ir->rc_type) { - case RC_BIT_NEC: + switch (ir->rc_proto) { + case RC_PROTO_BIT_NEC: leader = 900; /* ms */ pulse = 700; /* ms - the actual value would be 562 */ break; default: - case RC_BIT_RC5: + case RC_PROTO_BIT_RC5: leader = 900; /* ms - from the NEC decoding */ pulse = 1780; /* ms - The actual value would be 1776 */ break; @@ -117,12 +117,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir) pulse = ir_clock_mhz * pulse; leader = ir_clock_mhz * leader; - if (ir->rc_type == RC_BIT_NEC) + if (ir->rc_proto == RC_PROTO_BIT_NEC) leader = leader | 0x8000; dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n", __func__, - (ir->rc_type == RC_BIT_NEC) ? "NEC" : "RC-5", + (ir->rc_proto == RC_PROTO_BIT_NEC) ? "NEC" : "RC-5", ir_clock_mhz, leader, pulse); /* Remote WAKEUP = enable, normal mode, from IR decoder output */ @@ -162,24 +162,24 @@ static void tm6000_ir_keydown(struct tm6000_IR *ir, { u8 device, command; u32 scancode; - enum rc_type protocol; + enum rc_proto protocol; if (len < 1) return; command = buf[0]; device = (len > 1 ? buf[1] : 0x0); - switch (ir->rc_type) { - case RC_BIT_RC5: - protocol = RC_TYPE_RC5; + switch (ir->rc_proto) { + case RC_PROTO_BIT_RC5: + protocol = RC_PROTO_RC5; scancode = RC_SCANCODE_RC5(device, command); break; - case RC_BIT_NEC: - protocol = RC_TYPE_NEC; + case RC_PROTO_BIT_NEC: + protocol = RC_PROTO_NEC; scancode = RC_SCANCODE_NEC(device, command); break; default: - protocol = RC_TYPE_OTHER; + protocol = RC_PROTO_OTHER; scancode = RC_SCANCODE_OTHER(device << 8 | command); break; } @@ -311,7 +311,7 @@ static void tm6000_ir_stop(struct rc_dev *rc) cancel_delayed_work_sync(&ir->work); } -static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type) +static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) { struct tm6000_IR *ir = rc->priv; @@ -320,7 +320,7 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type) dprintk(2, "%s\n",__func__); - ir->rc_type = *rc_type; + ir->rc_proto = *rc_proto; tm6000_ir_config(ir); /* TODO */ @@ -409,7 +409,7 @@ int tm6000_ir_init(struct tm6000_core *dev) struct tm6000_IR *ir; struct rc_dev *rc; int err = -ENOMEM; - u64 rc_type; + u64 rc_proto; if (!enable_ir) return -ENODEV; @@ -433,7 +433,7 @@ int tm6000_ir_init(struct tm6000_core *dev) ir->rc = rc; /* input setup */ - rc->allowed_protocols = RC_BIT_RC5 | RC_BIT_NEC; + rc->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_NEC; /* Needed, in order to support NEC remotes with 24 or 32 bits */ rc->scancode_mask = 0xffff; rc->priv = ir; @@ -455,10 +455,10 @@ int tm6000_ir_init(struct tm6000_core *dev) usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); - rc_type = RC_BIT_UNKNOWN; - tm6000_ir_change_protocol(rc, &rc_type); + rc_proto = RC_PROTO_BIT_UNKNOWN; + tm6000_ir_change_protocol(rc, &rc_proto); - rc->input_name = ir->name; + rc->device_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_USB; rc->input_id.version = 1; diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index 7e960d0a5b92..ec8c4d2534dc 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -801,7 +801,7 @@ static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb free_buffer(vq, buf); } -static struct videobuf_queue_ops tm6000_video_qops = { +static const struct videobuf_queue_ops tm6000_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, @@ -1532,7 +1532,7 @@ static int tm6000_mmap(struct file *file, struct vm_area_struct * vma) return res; } -static struct v4l2_file_operations tm6000_fops = { +static const struct v4l2_file_operations tm6000_fops = { .owner = THIS_MODULE, .open = tm6000_open, .release = tm6000_release, diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c index 361e40b56045..b842f367249f 100644 --- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c +++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c @@ -1640,7 +1640,7 @@ static void frontend_init(struct ttusb* ttusb) -static struct i2c_algorithm ttusb_dec_algo = { +static const struct i2c_algorithm ttusb_dec_algo = { .master_xfer = master_xfer, .functionality = functionality, }; @@ -1795,7 +1795,7 @@ static void ttusb_disconnect(struct usb_interface *intf) dprintk("%s: TTUSB DVB disconnected\n", __func__); } -static struct usb_device_id ttusb_table[] = { +static const struct usb_device_id ttusb_table[] = { {USB_DEVICE(0xb48, 0x1003)}, {USB_DEVICE(0xb48, 0x1004)}, {USB_DEVICE(0xb48, 0x1005)}, diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 01c7e6d4481c..cdefb5dfbbdc 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1791,7 +1791,7 @@ static void ttusb_dec_set_model(struct ttusb_dec *dec, } } -static struct usb_device_id ttusb_dec_table[] = { +static const struct usb_device_id ttusb_dec_table[] = { {USB_DEVICE(0x0b48, 0x1006)}, /* DEC3000-s */ /*{USB_DEVICE(0x0b48, 0x1007)}, Unconfirmed */ {USB_DEVICE(0x0b48, 0x1008)}, /* DEC2000-t */ diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c index 9db31db7d9ac..2c2ca77fa01f 100644 --- a/drivers/media/usb/usbtv/usbtv-audio.c +++ b/drivers/media/usb/usbtv/usbtv-audio.c @@ -43,7 +43,7 @@ #include "usbtv.h" -static struct snd_pcm_hardware snd_usbtv_digital_hw = { +static const struct snd_pcm_hardware snd_usbtv_digital_hw = { .info = SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index ceb953be0770..f06f09a0876e 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -142,7 +142,7 @@ static void usbtv_disconnect(struct usb_interface *intf) v4l2_device_put(&usbtv->v4l2_dev); } -static struct usb_device_id usbtv_id_table[] = { +static const struct usb_device_id usbtv_id_table[] = { { USB_DEVICE(0x1b71, 0x3002) }, {} }; diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 8135614f395a..95b5f4319ec2 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -629,7 +629,7 @@ static struct v4l2_ioctl_ops usbtv_ioctl_ops = { .vidioc_streamoff = vb2_ioctl_streamoff, }; -static struct v4l2_file_operations usbtv_fops = { +static const struct v4l2_file_operations usbtv_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c index fdf6b6e285da..837bd4d9db41 100644 --- a/drivers/media/usb/usbvision/usbvision-i2c.c +++ b/drivers/media/usb/usbvision/usbvision-i2c.c @@ -163,7 +163,7 @@ static u32 functionality(struct i2c_adapter *adap) /* -----exported algorithm data: ------------------------------------- */ -static struct i2c_algorithm usbvision_algo = { +static const struct i2c_algorithm usbvision_algo = { .master_xfer = usbvision_i2c_xfer, .smbus_xfer = NULL, .functionality = functionality, @@ -173,7 +173,7 @@ static struct i2c_algorithm usbvision_algo = { /* ----------------------------------------------------------------------- */ /* usbvision specific I2C functions */ /* ----------------------------------------------------------------------- */ -static struct i2c_adapter i2c_adap_template; +static const struct i2c_adapter i2c_adap_template; int usbvision_i2c_register(struct usb_usbvision *usbvision) { @@ -187,8 +187,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) usbvision->i2c_adap = i2c_adap_template; - sprintf(usbvision->i2c_adap.name, "%s-%d-%s", i2c_adap_template.name, - usbvision->dev->bus->busnum, usbvision->dev->devpath); + snprintf(usbvision->i2c_adap.name, sizeof(usbvision->i2c_adap.name), + "usbvision-%d-%s", + usbvision->dev->bus->busnum, usbvision->dev->devpath); PDEBUG(DBG_I2C, "Adaptername: %s", usbvision->i2c_adap.name); usbvision->i2c_adap.dev.parent = &usbvision->dev->dev; @@ -440,7 +441,7 @@ static int usbvision_i2c_read(struct usb_usbvision *usbvision, unsigned char add return rdcount; } -static struct i2c_adapter i2c_adap_template = { +static const struct i2c_adapter i2c_adap_template = { .owner = THIS_MODULE, .name = "usbvision", }; diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index 756322c4ac05..960272d3c924 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -904,7 +904,7 @@ static ssize_t usbvision_read(struct file *file, char __user *buf, PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __func__, (unsigned long)count, noblock); - if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL)) + if (!USBVISION_IS_OPERATIONAL(usbvision) || !buf) return -EFAULT; /* This entry point is compatible with the mmap routines @@ -1234,7 +1234,7 @@ static void usbvision_vdev_init(struct usb_usbvision *usbvision, { struct usb_device *usb_dev = usbvision->dev; - if (usb_dev == NULL) { + if (!usb_dev) { dev_err(&usbvision->dev->dev, "%s: usbvision->dev is not set\n", __func__); return; @@ -1319,8 +1319,8 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev, { struct usb_usbvision *usbvision; - usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL); - if (usbvision == NULL) + usbvision = kzalloc(sizeof(*usbvision), GFP_KERNEL); + if (!usbvision) return NULL; usbvision->dev = dev; @@ -1334,7 +1334,7 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev, /* prepare control urb for control messages during interrupts */ usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL); - if (usbvision->ctrl_urb == NULL) + if (!usbvision->ctrl_urb) goto err_unreg; return usbvision; @@ -1380,7 +1380,7 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision) { int model; - if (usbvision == NULL) + if (!usbvision) return; model = usbvision->dev_model; @@ -1474,7 +1474,7 @@ static int usbvision_probe(struct usb_interface *intf, } usbvision = usbvision_alloc(dev, intf); - if (usbvision == NULL) { + if (!usbvision) { dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__); ret = -ENOMEM; goto err_usb; @@ -1494,8 +1494,7 @@ static int usbvision_probe(struct usb_interface *intf, usbvision->num_alt = uif->num_altsetting; PDEBUG(DBG_PROBE, "Alternate settings: %i", usbvision->num_alt); usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL); - if (usbvision->alt_max_pkt_size == NULL) { - dev_err(&intf->dev, "usbvision: out of memory!\n"); + if (!usbvision->alt_max_pkt_size) { ret = -ENOMEM; goto err_pkt; } @@ -1566,7 +1565,7 @@ static void usbvision_disconnect(struct usb_interface *intf) PDEBUG(DBG_PROBE, ""); - if (usbvision == NULL) { + if (!usbvision) { pr_err("%s: usb_get_intfdata() failed\n", __func__); return; } diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index c2ee6e39fd0c..20397aba6849 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -2002,6 +2002,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, goto done; } + /* Validate the user-provided bit-size and offset */ + if (mapping->size > 32 || + mapping->offset + mapping->size > ctrl->info.size * 8) { + ret = -EINVAL; + goto done; + } + list_for_each_entry(map, &ctrl->info.mappings, list) { if (mapping->id == map->id) { uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', " diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 70842c5af05b..6d22b22cb35b 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1802,8 +1802,9 @@ static int uvc_scan_device(struct uvc_device *dev) * already been canceled by the USB core. There is no need to kill the * interrupt URB manually. */ -static void uvc_delete(struct uvc_device *dev) +static void uvc_delete(struct kref *kref) { + struct uvc_device *dev = container_of(kref, struct uvc_device, ref); struct list_head *p, *n; uvc_status_cleanup(dev); @@ -1854,11 +1855,7 @@ static void uvc_release(struct video_device *vdev) struct uvc_streaming *stream = video_get_drvdata(vdev); struct uvc_device *dev = stream->dev; - /* Decrement the registered streams count and delete the device when it - * reaches zero. - */ - if (atomic_dec_and_test(&dev->nstreams)) - uvc_delete(dev); + kref_put(&dev->ref, uvc_delete); } /* @@ -1870,10 +1867,10 @@ static void uvc_unregister_video(struct uvc_device *dev) /* Unregistering all video devices might result in uvc_delete() being * called from inside the loop if there's no open file handle. To avoid - * that, increment the stream count before iterating over the streams - * and decrement it when done. + * that, increment the refcount before iterating over the streams and + * decrement it when done. */ - atomic_inc(&dev->nstreams); + kref_get(&dev->ref); list_for_each_entry(stream, &dev->streams, list) { if (!video_is_registered(&stream->vdev)) @@ -1884,11 +1881,7 @@ static void uvc_unregister_video(struct uvc_device *dev) uvc_debugfs_cleanup_stream(stream); } - /* Decrement the stream count and call uvc_delete explicitly if there - * are no stream left. - */ - if (atomic_dec_and_test(&dev->nstreams)) - uvc_delete(dev); + kref_put(&dev->ref, uvc_delete); } static int uvc_register_video(struct uvc_device *dev, @@ -1946,7 +1939,7 @@ static int uvc_register_video(struct uvc_device *dev, else stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT; - atomic_inc(&dev->nstreams); + kref_get(&dev->ref); return 0; } @@ -2031,7 +2024,7 @@ static int uvc_probe(struct usb_interface *intf, INIT_LIST_HEAD(&dev->entities); INIT_LIST_HEAD(&dev->chains); INIT_LIST_HEAD(&dev->streams); - atomic_set(&dev->nstreams, 0); + kref_init(&dev->ref); atomic_set(&dev->nmappings, 0); mutex_init(&dev->lock); @@ -2096,7 +2089,6 @@ static int uvc_probe(struct usb_interface *intf, sizeof(dev->mdev.serial)); strcpy(dev->mdev.bus_info, udev->devpath); dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - dev->mdev.driver_version = LINUX_VERSION_CODE; media_device_init(&dev->mdev); dev->vdev.mdev = &dev->mdev; @@ -2284,7 +2276,7 @@ MODULE_PARM_DESC(timeout, "Streaming control requests timeout"); * VENDOR_SPEC because they don't announce themselves as UVC devices, even * though they are compliant. */ -static struct usb_device_id uvc_ids[] = { +static const struct usb_device_id uvc_ids[] = { /* LogiLink Wireless Webcam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c index ac386bb547e6..554063c07d7a 100644 --- a/drivers/media/usb/uvc/uvc_entity.c +++ b/drivers/media/usb/uvc/uvc_entity.c @@ -61,7 +61,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain, return 0; } -static struct v4l2_subdev_ops uvc_subdev_ops = { +static const struct v4l2_subdev_ops uvc_subdev_ops = { }; void uvc_mc_cleanup_entity(struct uvc_entity *entity) diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c index aa2199775cb8..c8d78b2f3de4 100644 --- a/drivers/media/usb/uvc/uvc_queue.c +++ b/drivers/media/usb/uvc/uvc_queue.c @@ -82,9 +82,14 @@ static int uvc_queue_setup(struct vb2_queue *vq, struct uvc_streaming *stream = uvc_queue_to_stream(queue); unsigned size = stream->ctrl.dwMaxVideoFrameSize; - /* Make sure the image size is large enough. */ + /* + * When called with plane sizes, validate them. The driver supports + * single planar formats only, and requires buffers to be large enough + * to store a complete frame. + */ if (*nplanes) - return sizes[0] < size ? -EINVAL : 0; + return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0; + *nplanes = 1; sizes[0] = size; return 0; diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 15e415e32c7f..34c7ee6cc9e5 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -166,7 +166,7 @@ /* Maximum status buffer size in bytes of interrupt URB. */ #define UVC_MAX_STATUS_SIZE 16 -#define UVC_CTRL_CONTROL_TIMEOUT 300 +#define UVC_CTRL_CONTROL_TIMEOUT 500 #define UVC_CTRL_STREAMING_TIMEOUT 5000 /* Maximum allowed number of control mappings per device */ @@ -575,7 +575,7 @@ struct uvc_device { /* Video Streaming interfaces */ struct list_head streams; - atomic_t nstreams; + struct kref ref; /* Status Interrupt Endpoint */ struct usb_host_endpoint *int_ep; diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c index efdcd5bd6a4c..4ff8d0aed015 100644 --- a/drivers/media/usb/zr364xx/zr364xx.c +++ b/drivers/media/usb/zr364xx/zr364xx.c @@ -93,7 +93,7 @@ MODULE_PARM_DESC(mode, "0 = 320x240, 1 = 160x120, 2 = 640x480"); /* Devices supported by this driver * .driver_info contains the init method used by the camera */ -static struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x08ca, 0x0109), .driver_info = METHOD0 }, {USB_DEVICE(0x041e, 0x4024), .driver_info = METHOD0 }, {USB_DEVICE(0x0d64, 0x0108), .driver_info = METHOD0 }, @@ -439,7 +439,7 @@ static void buffer_release(struct videobuf_queue *vq, free_buffer(vq, buf); } -static struct videobuf_queue_ops zr364xx_video_qops = { +static const struct videobuf_queue_ops zr364xx_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, @@ -1335,7 +1335,7 @@ static const struct v4l2_ioctl_ops zr364xx_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -static struct video_device zr364xx_template = { +static const struct video_device zr364xx_template = { .name = DRIVER_DESC, .fops = &zr364xx_fops, .ioctl_ops = &zr364xx_ioctl_ops, diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 851f128eba22..d741a8e0fdac 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -44,12 +44,7 @@ static bool match_devname(struct v4l2_subdev *sd, static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { - if (!is_of_node(sd->fwnode) || !is_of_node(asd->match.fwnode.fwnode)) - return sd->fwnode == asd->match.fwnode.fwnode; - - return !of_node_cmp(of_node_full_name(to_of_node(sd->fwnode)), - of_node_full_name( - to_of_node(asd->match.fwnode.fwnode))); + return sd->fwnode == asd->match.fwnode.fwnode; } static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index 297e10e69898..90628d7a04de 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -61,8 +61,7 @@ struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id) /* if dev_name is not found, try use the OF name to find again */ if (PTR_ERR(clk) == -ENODEV && dev->of_node) { - v4l2_clk_name_of(clk_name, sizeof(clk_name), - of_node_full_name(dev->of_node)); + v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node); clk = v4l2_clk_find(clk_name); } diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 6f52970f8b54..821f2aa299ae 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -43,6 +43,7 @@ struct v4l2_window32 { compat_caddr_t clips; /* actually struct v4l2_clip32 * */ __u32 clipcount; compat_caddr_t bitmap; + __u8 global_alpha; }; static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) @@ -51,7 +52,8 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user copy_from_user(&kp->w, &up->w, sizeof(up->w)) || get_user(kp->field, &up->field) || get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount)) + get_user(kp->clipcount, &up->clipcount) || + get_user(kp->global_alpha, &up->global_alpha)) return -EFAULT; if (kp->clipcount > 2048) return -EINVAL; @@ -84,7 +86,8 @@ static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || put_user(kp->field, &up->field) || put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount)) + put_user(kp->clipcount, &up->clipcount) || + put_user(kp->global_alpha, &up->global_alpha)) return -EFAULT; return 0; } @@ -627,7 +630,8 @@ struct v4l2_input32 { __u32 tuner; /* Associated tuner */ compat_u64 std; __u32 status; - __u32 reserved[4]; + __u32 capabilities; + __u32 reserved[3]; }; /* The 64-bit v4l2_input struct has extra padding at the end of the struct. @@ -796,7 +800,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || put_user(kp->pending, &up->pending) || put_user(kp->sequence, &up->sequence) || - compat_put_timespec(&kp->timestamp, &up->timestamp) || + put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || + put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || put_user(kp->id, &up->id) || copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) return -EFAULT; diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c index 7b8288108e8a..4ceef217de83 100644 --- a/drivers/media/v4l2-core/v4l2-flash-led-class.c +++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -18,7 +18,7 @@ #include #define has_flash_op(v4l2_flash, op) \ - (v4l2_flash && v4l2_flash->ops->op) + (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op) #define call_flash_op(v4l2_flash, op, arg) \ (has_flash_op(v4l2_flash, op) ? \ @@ -110,7 +110,7 @@ static void v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, led_set_brightness_sync(&v4l2_flash->fled_cdev->led_cdev, brightness); } else { - led_set_brightness_sync(&v4l2_flash->iled_cdev->led_cdev, + led_set_brightness_sync(v4l2_flash->iled_cdev, brightness); } } @@ -133,7 +133,7 @@ static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, return 0; led_cdev = &v4l2_flash->fled_cdev->led_cdev; } else { - led_cdev = &v4l2_flash->iled_cdev->led_cdev; + led_cdev = v4l2_flash->iled_cdev; } ret = led_update_brightness(led_cdev); @@ -197,7 +197,7 @@ static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) { struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; bool external_strobe; int ret = 0; @@ -299,11 +299,26 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, struct v4l2_flash_ctrl_data *ctrl_init_data) { struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - const struct led_flash_ops *fled_cdev_ops = fled_cdev->ops; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; struct v4l2_ctrl_config *ctrl_cfg; u32 mask; + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH))) + return; + /* Init FLASH_FAULT ctrl data */ if (flash_cfg->flash_faults) { ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; @@ -331,27 +346,11 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, /* Init TORCH_INTENSITY ctrl data */ ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; - __lfs_to_v4l2_ctrl_config(&flash_cfg->torch_intensity, ctrl_cfg); + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg); ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; - /* Init INDICATOR_INTENSITY ctrl data */ - if (v4l2_flash->iled_cdev) { - ctrl_init_data[INDICATOR_INTENSITY].cid = - V4L2_CID_FLASH_INDICATOR_INTENSITY; - ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; - __lfs_to_v4l2_ctrl_config(&flash_cfg->indicator_intensity, - ctrl_cfg); - ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; - ctrl_cfg->min = 0; - ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | - V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; - } - - if (!(led_cdev->flags & LED_DEV_CAP_FLASH)) - return; - /* Init FLASH_STROBE ctrl data */ ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; @@ -376,7 +375,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init STROBE_STATUS ctrl data */ - if (fled_cdev_ops->strobe_get) { + if (has_flash_op(fled_cdev, strobe_get)) { ctrl_init_data[STROBE_STATUS].cid = V4L2_CID_FLASH_STROBE_STATUS; ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; @@ -386,7 +385,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init FLASH_TIMEOUT ctrl data */ - if (fled_cdev_ops->timeout_set) { + if (has_flash_op(fled_cdev, timeout_set)) { ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); @@ -394,7 +393,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init FLASH_INTENSITY ctrl data */ - if (fled_cdev_ops->flash_brightness_set) { + if (has_flash_op(fled_cdev, flash_brightness_set)) { ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); @@ -486,7 +485,9 @@ static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; int ret = 0; - v4l2_flash_set_led_brightness(v4l2_flash, ctrls[TORCH_INTENSITY]); + if (ctrls[TORCH_INTENSITY]) + v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); if (ctrls[INDICATOR_INTENSITY]) v4l2_flash_set_led_brightness(v4l2_flash, @@ -528,24 +529,23 @@ static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; - struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; - struct led_classdev *led_cdev_ind = NULL; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; int ret = 0; if (!v4l2_fh_is_singular(&fh->vfh)) return 0; - mutex_lock(&led_cdev->led_access); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); - led_sysfs_disable(led_cdev); - led_trigger_remove(led_cdev); + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); - mutex_unlock(&led_cdev->led_access); - - if (iled_cdev) { - led_cdev_ind = &iled_cdev->led_cdev; + mutex_unlock(&led_cdev->led_access); + } + if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); led_sysfs_disable(led_cdev_ind); @@ -560,9 +560,11 @@ static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) return 0; out_sync_device: - mutex_lock(&led_cdev->led_access); - led_sysfs_enable(led_cdev); - mutex_unlock(&led_cdev->led_access); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + } if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); @@ -577,25 +579,26 @@ static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; - struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; int ret = 0; if (!v4l2_fh_is_singular(&fh->vfh)) return 0; - mutex_lock(&led_cdev->led_access); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); - if (v4l2_flash->ctrls[STROBE_SOURCE]) - ret = v4l2_ctrl_s_ctrl(v4l2_flash->ctrls[STROBE_SOURCE], + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl( + v4l2_flash->ctrls[STROBE_SOURCE], V4L2_FLASH_STROBE_SOURCE_SOFTWARE); - led_sysfs_enable(led_cdev); + led_sysfs_enable(led_cdev); - mutex_unlock(&led_cdev->led_access); - - if (iled_cdev) { - struct led_classdev *led_cdev_ind = &iled_cdev->led_cdev; + mutex_unlock(&led_cdev->led_access); + } + if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); led_sysfs_enable(led_cdev_ind); mutex_unlock(&led_cdev_ind->led_access); @@ -611,25 +614,19 @@ static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { static const struct v4l2_subdev_ops v4l2_flash_subdev_ops; -struct v4l2_flash *v4l2_flash_init( +static struct v4l2_flash *__v4l2_flash_init( struct device *dev, struct fwnode_handle *fwn, - struct led_classdev_flash *fled_cdev, - struct led_classdev_flash *iled_cdev, - const struct v4l2_flash_ops *ops, - struct v4l2_flash_config *config) + struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) { struct v4l2_flash *v4l2_flash; - struct led_classdev *led_cdev; struct v4l2_subdev *sd; int ret; - if (!fled_cdev || !ops || !config) + if (!config) return ERR_PTR(-EINVAL); - led_cdev = &fled_cdev->led_cdev; - - v4l2_flash = devm_kzalloc(led_cdev->dev, sizeof(*v4l2_flash), - GFP_KERNEL); + v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL); if (!v4l2_flash) return ERR_PTR(-ENOMEM); @@ -638,7 +635,7 @@ struct v4l2_flash *v4l2_flash_init( v4l2_flash->iled_cdev = iled_cdev; v4l2_flash->ops = ops; sd->dev = dev; - sd->fwnode = fwn ? fwn : dev_fwnode(led_cdev->dev); + sd->fwnode = fwn ? fwn : dev_fwnode(dev); v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); sd->internal_ops = &v4l2_flash_subdev_internal_ops; sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; @@ -670,8 +667,26 @@ struct v4l2_flash *v4l2_flash_init( return ERR_PTR(ret); } + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config); +} EXPORT_SYMBOL_GPL(v4l2_flash_init); +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init); + void v4l2_flash_release(struct v4l2_flash *v4l2_flash) { struct v4l2_subdev *sd; diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 153c53ca3925..40b2fbfe8865 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -28,8 +28,16 @@ #include -static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, - struct v4l2_fwnode_endpoint *vep) +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + NR_OF_V4L2_FWNODE_BUS_TYPE, +}; + +static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) { struct v4l2_fwnode_bus_mipi_csi2 *bus = &vep->bus.mipi_csi2; bool have_clk_lane = false; @@ -40,10 +48,10 @@ static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0); if (rval > 0) { - u32 array[ARRAY_SIZE(bus->data_lanes)]; + u32 array[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES]; bus->num_data_lanes = - min_t(int, ARRAY_SIZE(bus->data_lanes), rval); + min_t(int, V4L2_FWNODE_CSI2_MAX_DATA_LANES, rval); fwnode_property_read_u32_array(fwnode, "data-lanes", array, bus->num_data_lanes); @@ -56,24 +64,25 @@ static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, bus->data_lanes[i] = array[i]; } - } - rval = fwnode_property_read_u32_array(fwnode, "lane-polarities", NULL, - 0); - if (rval > 0) { - u32 array[ARRAY_SIZE(bus->lane_polarities)]; + rval = fwnode_property_read_u32_array(fwnode, + "lane-polarities", NULL, + 0); + if (rval > 0) { + if (rval != 1 + bus->num_data_lanes /* clock+data */) { + pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n", + 1 + bus->num_data_lanes, rval); + return -EINVAL; + } - if (rval < 1 + bus->num_data_lanes /* clock + data */) { - pr_warn("too few lane-polarities entries (need %u, got %u)\n", - 1 + bus->num_data_lanes, rval); - return -EINVAL; + fwnode_property_read_u32_array(fwnode, + "lane-polarities", array, + 1 + bus->num_data_lanes); + + for (i = 0; i < 1 + bus->num_data_lanes; i++) + bus->lane_polarities[i] = array[i]; } - fwnode_property_read_u32_array(fwnode, "lane-polarities", array, - 1 + bus->num_data_lanes); - - for (i = 0; i < 1 + bus->num_data_lanes; i++) - bus->lane_polarities[i] = array[i]; } if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { @@ -146,6 +155,32 @@ static void v4l2_fwnode_endpoint_parse_parallel_bus( } +static void +v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + u32 bus_type) +{ + struct v4l2_fwnode_bus_mipi_csi1 *bus = &vep->bus.mipi_csi1; + u32 v; + + if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) + bus->clock_inv = v; + + if (!fwnode_property_read_u32(fwnode, "strobe", &v)) + bus->strobe = v; + + if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) + bus->data_lane = v; + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) + bus->clock_lane = v; + + if (bus_type == V4L2_FWNODE_BUS_TYPE_CCP2) + vep->bus_type = V4L2_MBUS_CCP2; + else + vep->bus_type = V4L2_MBUS_CSI1; +} + /** * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties * @fwnode: pointer to the endpoint's fwnode handle @@ -168,6 +203,7 @@ static void v4l2_fwnode_endpoint_parse_parallel_bus( int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, struct v4l2_fwnode_endpoint *vep) { + u32 bus_type = 0; int rval; fwnode_graph_parse_endpoint(fwnode, &vep->base); @@ -176,17 +212,30 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, memset(&vep->bus_type, 0, sizeof(*vep) - offsetof(typeof(*vep), bus_type)); - rval = v4l2_fwnode_endpoint_parse_csi_bus(fwnode, vep); - if (rval) - return rval; - /* - * Parse the parallel video bus properties only if none - * of the MIPI CSI-2 specific properties were found. - */ - if (vep->bus.mipi_csi2.flags == 0) - v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep); + fwnode_property_read_u32(fwnode, "bus-type", &bus_type); - return 0; + switch (bus_type) { + case V4L2_FWNODE_BUS_TYPE_GUESS: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep); + if (rval) + return rval; + /* + * Parse the parallel video bus properties only if none + * of the MIPI CSI-2 specific properties were found. + */ + if (vep->bus.mipi_csi2.flags == 0) + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep); + + return 0; + case V4L2_FWNODE_BUS_TYPE_CCP2: + case V4L2_FWNODE_BUS_TYPE_CSI1: + v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, bus_type); + + return 0; + default: + pr_warn("unsupported bus type %u\n", bus_type); + return -EINVAL; + } } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse); @@ -247,24 +296,24 @@ struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse( rval = fwnode_property_read_u64_array(fwnode, "link-frequencies", NULL, 0); - if (rval < 0) - goto out_err; + if (rval > 0) { + vep->link_frequencies = + kmalloc_array(rval, sizeof(*vep->link_frequencies), + GFP_KERNEL); + if (!vep->link_frequencies) { + rval = -ENOMEM; + goto out_err; + } - vep->link_frequencies = - kmalloc_array(rval, sizeof(*vep->link_frequencies), GFP_KERNEL); - if (!vep->link_frequencies) { - rval = -ENOMEM; - goto out_err; + vep->nr_of_link_frequencies = rval; + + rval = fwnode_property_read_u64_array( + fwnode, "link-frequencies", vep->link_frequencies, + vep->nr_of_link_frequencies); + if (rval < 0) + goto out_err; } - vep->nr_of_link_frequencies = rval; - - rval = fwnode_property_read_u64_array(fwnode, "link-frequencies", - vep->link_frequencies, - vep->nr_of_link_frequencies); - if (rval < 0) - goto out_err; - return vep; out_err: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index cab63bb49c97..b60a6b0841d1 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1195,10 +1195,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; - case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; - case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; - case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; - case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; @@ -1211,6 +1207,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 14f83cecfa92..cb115ba6a1d2 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -194,8 +194,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb); static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; void *mem_priv; int plane; int ret = -ENOMEM; @@ -209,7 +207,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) mem_priv = call_ptr_memop(vb, alloc, q->alloc_devs[plane] ? : q->dev, - q->dma_attrs, size, dma_dir, q->gfp_flags); + q->dma_attrs, size, q->dma_dir, q->gfp_flags); if (IS_ERR_OR_NULL(mem_priv)) { if (mem_priv) ret = PTR_ERR(mem_priv); @@ -978,8 +976,6 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) void *mem_priv; unsigned int plane; int ret = 0; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1030,7 +1026,7 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_devs[plane] ? : q->dev, planes[plane].m.userptr, - planes[plane].length, dma_dir); + planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed acquiring userspace memory for plane %d\n", plane); @@ -1096,8 +1092,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) void *mem_priv; unsigned int plane; int ret = 0; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1139,7 +1133,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) continue; } - dprintk(1, "buffer for plane %d changed\n", plane); + dprintk(3, "buffer for plane %d changed\n", plane); if (!reacquired) { reacquired = true; @@ -1156,7 +1150,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_devs[plane] ? : q->dev, - dbuf, planes[plane].length, dma_dir); + dbuf, planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); @@ -1298,7 +1292,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) /* Fill buffer information for the userspace */ call_void_bufop(q, fill_user_buffer, vb, pb); - dprintk(1, "prepare of buffer %d succeeded\n", vb->index); + dprintk(2, "prepare of buffer %d succeeded\n", vb->index); return ret; } @@ -1428,7 +1422,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) return ret; } - dprintk(1, "qbuf of buffer %d succeeded\n", vb->index); + dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } EXPORT_SYMBOL_GPL(vb2_core_qbuf); @@ -1476,7 +1470,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) } if (nonblocking) { - dprintk(1, "nonblocking and no buffers to dequeue, will not wait\n"); + dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n"); return -EAGAIN; } @@ -1623,7 +1617,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, /* go back to dequeued state */ __vb2_dqbuf(vb); - dprintk(1, "dqbuf of buffer %d, with state %d\n", + dprintk(2, "dqbuf of buffer %d, with state %d\n", vb->index, vb->state); return 0; @@ -2003,6 +1997,11 @@ int vb2_core_queue_init(struct vb2_queue *q) if (q->buf_struct_size == 0) q->buf_struct_size = sizeof(struct vb2_buffer); + if (q->bidirectional) + q->dma_dir = DMA_BIDIRECTIONAL; + else + q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + return 0; } EXPORT_SYMBOL_GPL(vb2_core_queue_init); diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4f246d166111..9f389f36566d 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -352,7 +352,7 @@ static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_dc_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_dc_dmabuf_ops = { +static const struct dma_buf_ops vb2_dc_dmabuf_ops = { .attach = vb2_dc_dmabuf_ops_attach, .detach = vb2_dc_dmabuf_ops_detach, .map_dma_buf = vb2_dc_dmabuf_ops_map, @@ -508,7 +508,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_buf; diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 5defa1f22ca2..6808231a6bdc 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -239,7 +239,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; - vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) goto userptr_fail_pfnvec; buf->vec = vec; @@ -292,7 +293,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); while (--i >= 0) { - if (buf->dma_dir == DMA_FROM_DEVICE) + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) set_page_dirty_lock(buf->pages[i]); } vb2_destroy_framevec(buf->vec); @@ -500,7 +502,7 @@ static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_dma_sg_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { +static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { .attach = vb2_dma_sg_dmabuf_ops_attach, .detach = vb2_dma_sg_dmabuf_ops_detach, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index b337d780844c..3a7c80cd1a17 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -87,7 +87,8 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_pfnvec_create; @@ -137,7 +138,8 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) pages = frame_vector_pages(buf->vec); if (vaddr) vm_unmap_ram((void *)vaddr, n_pages); - if (buf->dma_dir == DMA_FROM_DEVICE) + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) for (i = 0; i < n_pages; i++) set_page_dirty_lock(pages[i]); } else { @@ -338,7 +340,7 @@ static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_vmalloc_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { +static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { .attach = vb2_vmalloc_dmabuf_ops_attach, .detach = vb2_vmalloc_dmabuf_ops_detach, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index ebf69ff48ae2..b907865d4664 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -51,6 +51,7 @@ struct atmel_ebi { struct { struct regmap *regmap; struct clk *clk; + const struct atmel_hsmc_reg_layout *layout; } smc; struct device *dev; @@ -84,8 +85,8 @@ static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, static void sama5_ebi_get_config(struct atmel_ebi_dev *ebid, struct atmel_ebi_dev_config *conf) { - atmel_hsmc_cs_conf_get(ebid->ebi->smc.regmap, conf->cs, - &conf->smcconf); + atmel_hsmc_cs_conf_get(ebid->ebi->smc.regmap, ebid->ebi->smc.layout, + conf->cs, &conf->smcconf); } static const struct atmel_smc_timing_xlate timings_xlate_table[] = { @@ -158,8 +159,8 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid, out: if (ret) { dev_err(ebid->ebi->dev, - "missing or invalid timings definition in %s", - np->full_name); + "missing or invalid timings definition in %pOF", + np); return ret; } @@ -269,8 +270,8 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid, return -EINVAL; if ((ret > 0 && !required) || (!ret && required)) { - dev_err(ebid->ebi->dev, "missing atmel,smc- properties in %s", - np->full_name); + dev_err(ebid->ebi->dev, "missing atmel,smc- properties in %pOF", + np); return -EINVAL; } @@ -287,8 +288,8 @@ static void at91sam9_ebi_apply_config(struct atmel_ebi_dev *ebid, static void sama5_ebi_apply_config(struct atmel_ebi_dev *ebid, struct atmel_ebi_dev_config *conf) { - atmel_hsmc_cs_conf_apply(ebid->ebi->smc.regmap, conf->cs, - &conf->smcconf); + atmel_hsmc_cs_conf_apply(ebid->ebi->smc.regmap, ebid->ebi->smc.layout, + conf->cs, &conf->smcconf); } static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np, @@ -313,8 +314,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np, if (cs >= AT91_MATRIX_EBI_NUM_CS || !(ebi->caps->available_cs & BIT(cs))) { - dev_err(dev, "invalid reg property in %s\n", - np->full_name); + dev_err(dev, "invalid reg property in %pOF\n", np); return -EINVAL; } @@ -323,7 +323,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np, } if (!numcs) { - dev_err(dev, "invalid reg property in %s\n", np->full_name); + dev_err(dev, "invalid reg property in %pOF\n", np); return -EINVAL; } @@ -527,6 +527,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) if (IS_ERR(ebi->smc.regmap)) return PTR_ERR(ebi->smc.regmap); + ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); + if (IS_ERR(ebi->smc.layout)) + return PTR_ERR(ebi->smc.layout); + ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { if (PTR_ERR(ebi->smc.clk) != -ENOENT) @@ -571,8 +575,8 @@ static int atmel_ebi_probe(struct platform_device *pdev) ret = atmel_ebi_dev_setup(ebi, child, reg_cells); if (ret) { - dev_err(dev, "failed to configure EBI bus for %s, disabling the device", - child->full_name); + dev_err(dev, "failed to configure EBI bus for %pOF, disabling the device", + child); ret = atmel_ebi_dev_disable(ebi, child); if (ret) diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c index 919d1925acb9..bcf06adefc96 100644 --- a/drivers/memory/jz4780-nemc.c +++ b/drivers/memory/jz4780-nemc.c @@ -322,8 +322,8 @@ static int jz4780_nemc_probe(struct platform_device *pdev) bank = of_read_number(prop, 1); if (bank < 1 || bank >= JZ4780_NEMC_NUM_BANKS) { dev_err(nemc->dev, - "%s requests invalid bank %u\n", - child->full_name, bank); + "%pOF requests invalid bank %u\n", + child, bank); /* Will continue the outer loop below. */ referenced = 0; @@ -334,12 +334,12 @@ static int jz4780_nemc_probe(struct platform_device *pdev) } if (!referenced) { - dev_err(nemc->dev, "%s has no addresses\n", - child->full_name); + dev_err(nemc->dev, "%pOF has no addresses\n", + child); continue; } else if (nemc->banks_present & referenced) { - dev_err(nemc->dev, "%s conflicts with another node\n", - child->full_name); + dev_err(nemc->dev, "%pOF conflicts with another node\n", + child); continue; } diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 4afbc412f959..8f2d152a78b8 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -23,7 +24,10 @@ #include #include +/* mt8173 */ #define SMI_LARB_MMU_EN 0xf00 + +/* mt2701 */ #define REG_SMI_SECUR_CON_BASE 0x5c0 /* every register control 8 port, register offset 0x4 */ @@ -41,7 +45,12 @@ /* mt2701 domain should be set to 3 */ #define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1)) +/* mt2712 */ +#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4)) +#define F_MMU_EN BIT(0) + struct mtk_smi_larb_gen { + bool need_larbid; int port_in_larb[MTK_LARB_NR_MAX + 1]; void (*config_port)(struct device *); }; @@ -148,6 +157,15 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) struct mtk_smi_iommu *smi_iommu = data; unsigned int i; + if (larb->larb_gen->need_larbid) { + larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu; + return 0; + } + + /* + * If there is no larbid property, Loop to find the corresponding + * iommu information. + */ for (i = 0; i < smi_iommu->larb_nr; i++) { if (dev == smi_iommu->larb_imu[i].dev) { /* The 'mmu' may be updated in iommu-attach/detach. */ @@ -158,14 +176,33 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } -static void mtk_smi_larb_config_port(struct device *dev) +static void mtk_smi_larb_config_port_mt2712(struct device *dev) +{ + struct mtk_smi_larb *larb = dev_get_drvdata(dev); + u32 reg; + int i; + + /* + * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly. + * Don't need to set it again. + */ + if (larb->larbid == 8 || larb->larbid == 9) + return; + + for_each_set_bit(i, (unsigned long *)larb->mmu, 32) { + reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i)); + reg |= F_MMU_EN; + writel(reg, larb->base + SMI_LARB_NONSEC_CON(i)); + } +} + +static void mtk_smi_larb_config_port_mt8173(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN); } - static void mtk_smi_larb_config_port_gen1(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); @@ -210,10 +247,11 @@ static const struct component_ops mtk_smi_larb_component_ops = { static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = { /* mt8173 do not need the port in larb */ - .config_port = mtk_smi_larb_config_port, + .config_port = mtk_smi_larb_config_port_mt8173, }; static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { + .need_larbid = true, .port_in_larb = { LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, LARB2_PORT_OFFSET, LARB3_PORT_OFFSET @@ -221,6 +259,11 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { .config_port = mtk_smi_larb_config_port_gen1, }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { + .need_larbid = true, + .config_port = mtk_smi_larb_config_port_mt2712, +}; + static const struct of_device_id mtk_smi_larb_of_ids[] = { { .compatible = "mediatek,mt8173-smi-larb", @@ -230,6 +273,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { .compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701 }, + { + .compatible = "mediatek,mt2712-smi-larb", + .data = &mtk_smi_larb_mt2712 + }, {} }; @@ -240,20 +287,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *smi_node; struct platform_device *smi_pdev; - const struct of_device_id *of_id; - - if (!dev->pm_domain) - return -EPROBE_DEFER; - - of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node); - if (!of_id) - return -EINVAL; + int err; larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL); if (!larb) return -ENOMEM; - larb->larb_gen = of_id->data; + larb->larb_gen = of_device_get_match_data(dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); larb->base = devm_ioremap_resource(dev, res); if (IS_ERR(larb->base)) @@ -268,6 +308,15 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) return PTR_ERR(larb->smi.clk_smi); larb->smi.dev = dev; + if (larb->larb_gen->need_larbid) { + err = of_property_read_u32(dev->of_node, "mediatek,larb-id", + &larb->larbid); + if (err) { + dev_err(dev, "missing larbid property\n"); + return err; + } + } + smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0); if (!smi_node) return -EINVAL; @@ -275,6 +324,8 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) smi_pdev = of_find_device_by_node(smi_node); of_node_put(smi_node); if (smi_pdev) { + if (!platform_get_drvdata(smi_pdev)) + return -EPROBE_DEFER; larb->smi_common_dev = &smi_pdev->dev; } else { dev_err(dev, "Failed to get the smi_common device\n"); @@ -311,6 +362,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { .compatible = "mediatek,mt2701-smi-common", .data = (void *)MTK_SMI_GEN1 }, + { + .compatible = "mediatek,mt2712-smi-common", + .data = (void *)MTK_SMI_GEN2 + }, {} }; @@ -319,11 +374,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mtk_smi *common; struct resource *res; - const struct of_device_id *of_id; enum mtk_smi_gen smi_gen; - - if (!dev->pm_domain) - return -EPROBE_DEFER; + int ret; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) @@ -338,17 +390,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev) if (IS_ERR(common->clk_smi)) return PTR_ERR(common->clk_smi); - of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node); - if (!of_id) - return -EINVAL; - /* * for mtk smi gen 1, we need to get the ao(always on) base to config * m4u port, and we need to enable the aync clock for transform the smi * clock into emi clock domain, but for mtk smi gen2, there's no smi ao * base. */ - smi_gen = (enum mtk_smi_gen)of_id->data; + smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev); if (smi_gen == MTK_SMI_GEN1) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); common->smi_ao_base = devm_ioremap_resource(dev, res); @@ -359,7 +407,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev) if (IS_ERR(common->clk_async)) return PTR_ERR(common->clk_async); - clk_prepare_enable(common->clk_async); + ret = clk_prepare_enable(common->clk_async); + if (ret) + return ret; } pm_runtime_enable(dev); platform_set_drvdata(pdev, common); @@ -403,4 +453,4 @@ static int __init mtk_smi_init(void) return ret; } -subsys_initcall(mtk_smi_init); +module_init(mtk_smi_init); diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 24852812fd44..981860879d02 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -105,8 +105,8 @@ static int get_timing_param_ps(struct devbus *devbus, err = of_property_read_u32(node, name, &time_ps); if (err < 0) { - dev_err(devbus->dev, "%s has no '%s' property\n", - name, node->full_name); + dev_err(devbus->dev, "%pOF has no '%s' property\n", + node, name); return err; } @@ -127,8 +127,8 @@ static int devbus_get_timing_params(struct devbus *devbus, err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width); if (err < 0) { dev_err(devbus->dev, - "%s has no 'devbus,bus-width' property\n", - node->full_name); + "%pOF has no 'devbus,bus-width' property\n", + node); return err; } @@ -180,8 +180,8 @@ static int devbus_get_timing_params(struct devbus *devbus, &w->sync_enable); if (err < 0) { dev_err(devbus->dev, - "%s has no 'devbus,sync-enable' property\n", - node->full_name); + "%pOF has no 'devbus,sync-enable' property\n", + node); return err; } } diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index a80e17de906d..7059bbda2fac 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -1930,8 +1930,8 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev, struct omap_onenand_platform_data *gpmc_onenand_data; if (of_property_read_u32(child, "reg", &val) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); + dev_err(&pdev->dev, "%pOF has no 'reg' property\n", + child); return -ENODEV; } @@ -1979,14 +1979,14 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, struct gpmc_device *gpmc = platform_get_drvdata(pdev); if (of_property_read_u32(child, "reg", &cs) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); + dev_err(&pdev->dev, "%pOF has no 'reg' property\n", + child); return -ENODEV; } if (of_address_to_resource(child, 0, &res) < 0) { - dev_err(&pdev->dev, "%s has malformed 'reg' property\n", - child->full_name); + dev_err(&pdev->dev, "%pOF has malformed 'reg' property\n", + child); return -ENODEV; } @@ -2084,8 +2084,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); if (ret < 0) { - dev_err(&pdev->dev, "%s has no 'bank-width' property\n", - child->full_name); + dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n", + child); goto err; } } diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 62cff5afc6bd..84eab28665f3 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -2079,7 +2079,7 @@ void mpt_detach(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - char pname[32]; + char pname[64]; u8 cb_idx; unsigned long flags; struct workqueue_struct *wq; @@ -2100,11 +2100,11 @@ mpt_detach(struct pci_dev *pdev) spin_unlock_irqrestore(&ioc->fw_event_lock, flags); destroy_workqueue(wq); - sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); + snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); remove_proc_entry(pname, NULL); - sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name); + snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name); remove_proc_entry(pname, NULL); - sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); + snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); remove_proc_entry(pname, NULL); /* call per device driver remove entry point */ diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index d065062240bc..6d461ca97150 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -104,7 +104,6 @@ static void mptfc_remove(struct pci_dev *pdev); static int mptfc_abort(struct scsi_cmnd *SCpnt); static int mptfc_dev_reset(struct scsi_cmnd *SCpnt); static int mptfc_bus_reset(struct scsi_cmnd *SCpnt); -static int mptfc_host_reset(struct scsi_cmnd *SCpnt); static struct scsi_host_template mptfc_driver_template = { .module = THIS_MODULE, @@ -123,7 +122,7 @@ static struct scsi_host_template mptfc_driver_template = { .eh_abort_handler = mptfc_abort, .eh_device_reset_handler = mptfc_dev_reset, .eh_bus_reset_handler = mptfc_bus_reset, - .eh_host_reset_handler = mptfc_host_reset, + .eh_host_reset_handler = mptscsih_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_FC_CAN_QUEUE, .this_id = -1, @@ -254,13 +253,6 @@ mptfc_bus_reset(struct scsi_cmnd *SCpnt) mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); } -static int -mptfc_host_reset(struct scsi_cmnd *SCpnt) -{ - return - mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__); -} - static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f6308ad35b19..345f6035599e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -2210,33 +2210,26 @@ mptsas_get_bay_identifier(struct sas_rphy *rphy) return rc; } -static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, - struct request *req) +static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) { MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc; MPT_FRAME_HDR *mf; SmpPassthroughRequest_t *smpreq; - struct request *rsp = req->next_rq; - int ret; int flagsLength; unsigned long timeleft; char *psge; - dma_addr_t dma_addr_in = 0; - dma_addr_t dma_addr_out = 0; u64 sas_address = 0; - - if (!rsp) { - printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n", - ioc->name, __func__); - return -EINVAL; - } + unsigned int reslen = 0; + int ret = -EINVAL; /* do we need to support multiple segments? */ - if (bio_multiple_segments(req->bio) || - bio_multiple_segments(rsp->bio)) { + if (job->request_payload.sg_cnt > 1 || + job->reply_payload.sg_cnt > 1) { printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n", - ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); - return -EINVAL; + ioc->name, __func__, job->request_payload.payload_len, + job->reply_payload.payload_len); + goto out; } ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex); @@ -2252,7 +2245,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, smpreq = (SmpPassthroughRequest_t *)mf; memset(smpreq, 0, sizeof(*smpreq)); - smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); + smpreq->RequestDataLength = + cpu_to_le16(job->request_payload.payload_len - 4); smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; if (rphy) @@ -2278,13 +2272,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; - flagsLength |= (blk_rq_bytes(req) - 4); - dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), - blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out)) + if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list, + 1, PCI_DMA_BIDIRECTIONAL)) goto put_mf; - ioc->add_sge(psge, flagsLength, dma_addr_out); + + flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4); + ioc->add_sge(psge, flagsLength, + sg_dma_address(job->request_payload.sg_list)); psge += ioc->SGE_size; /* response */ @@ -2294,12 +2289,13 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, MPI_SGE_FLAGS_END_OF_BUFFER; flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; - flagsLength |= blk_rq_bytes(rsp) + 4; - dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), - blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in)) - goto unmap; - ioc->add_sge(psge, flagsLength, dma_addr_in); + + if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, + 1, PCI_DMA_BIDIRECTIONAL)) + goto unmap_out; + flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4; + ioc->add_sge(psge, flagsLength, + sg_dma_address(job->reply_payload.sg_list)); INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); @@ -2310,10 +2306,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, mpt_free_msg_frame(ioc, mf); mf = NULL; if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto unmap; + goto unmap_in; if (!timeleft) mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); - goto unmap; + goto unmap_in; } mf = NULL; @@ -2321,23 +2317,22 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, SmpPassthroughReply_t *smprep; smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; - memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep)); - scsi_req(req)->sense_len = sizeof(*smprep); - scsi_req(req)->resid_len = 0; - scsi_req(rsp)->resid_len -= smprep->ResponseDataLength; + memcpy(job->reply, smprep, sizeof(*smprep)); + job->reply_len = sizeof(*smprep); + reslen = smprep->ResponseDataLength; } else { printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", ioc->name, __func__); ret = -ENXIO; } -unmap: - if (dma_addr_out) - pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), - PCI_DMA_BIDIRECTIONAL); - if (dma_addr_in) - pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp), - PCI_DMA_BIDIRECTIONAL); + +unmap_in: + dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1, + PCI_DMA_BIDIRECTIONAL); +unmap_out: + dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1, + PCI_DMA_BIDIRECTIONAL); put_mf: if (mf) mpt_free_msg_frame(ioc, mf); @@ -2345,7 +2340,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) mutex_unlock(&ioc->sas_mgmt.mutex); out: - return ret; + bsg_job_done(job, ret, reslen); } static struct sas_function_template mptsas_transport_functions = { @@ -4352,11 +4347,10 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, return; phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); - /* Only For SATA Device ADD */ - if (!phy_info && (sas_device.device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE)) { + /* Device hot plug */ + if (!phy_info) { devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s %d SATA HOT PLUG: " + "%s %d HOT PLUG: " "parent handle of device %x\n", ioc->name, __func__, __LINE__, sas_device.handle_parent)); port_info = mptsas_find_portinfo_by_handle(ioc, diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 94ad2c1c3d90..fc5e4fef89d2 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -133,6 +133,20 @@ config MFD_BCM590XX help Support for the BCM590xx PMUs from Broadcom +config MFD_BD9571MWV + tristate "ROHM BD9571MWV PMIC" + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + depends on I2C + help + Support for the ROHM BD9571MWV PMIC, which contains single + voltage regulator, voltage sampling units, GPIO block and + watchdog block. + + This driver can also be built as a module. If so, the module + will be called bd9571mwv. + config MFD_AC100 tristate "X-Powers AC100" select MFD_CORE @@ -453,12 +467,12 @@ config LPC_SCH config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" - depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK + depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK depends on X86 || COMPILE_TEST select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - select I2C_DESIGNWARE_PLATFORM if ACPI + select I2C_DESIGNWARE_PLATFORM help Select this option to enable support for Crystal Cove PMIC on some Intel SoC systems. The PMIC provides ADC, GPIO, @@ -481,7 +495,7 @@ config INTEL_SOC_PMIC_BXTWC on these systems. config INTEL_SOC_PMIC_CHTWC - tristate "Support for Intel Cherry Trail Whiskey Cove PMIC" + bool "Support for Intel Cherry Trail Whiskey Cove PMIC" depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK depends on X86 || COMPILE_TEST select MFD_CORE @@ -951,13 +965,13 @@ config MFD_RC5T583 different functionality of the device. config MFD_RK808 - tristate "Rockchip RK808/RK818 Power Management Chip" + tristate "Rockchip RK805/RK808/RK818 Power Management Chip" depends on I2C && OF select MFD_CORE select REGMAP_I2C select REGMAP_IRQ help - If you say yes here you get support for the RK808 and RK818 + If you say yes here you get support for the RK805, RK808 and RK818 Power Management chips. This driver provides common support for accessing the device through I2C interface. The device supports multiple sub-devices @@ -1294,6 +1308,7 @@ config TPS6507X config MFD_TPS65086 tristate "TI TPS65086 Power Management Integrated Chips (PMICs)" + select MFD_CORE select REGMAP select REGMAP_IRQ select REGMAP_I2C @@ -1337,6 +1352,24 @@ config MFD_TPS65217 This driver can also be built as a module. If so, the module will be called tps65217. +config MFD_TPS68470 + bool "TI TPS68470 Power Management / LED chips" + depends on ACPI && I2C=y + select MFD_CORE + select REGMAP_I2C + select I2C_DESIGNWARE_PLATFORM + help + If you say yes here you get support for the TPS68470 series of + Power Management / LED chips. + + These include voltage regulators, LEDs and other features + that are often used in portable devices. + + This option is a bool as it provides an ACPI operation + region, which must be available before any of the devices + using this are probed. This option also configures the + designware-i2c driver to be built-in, for the same reason. + config MFD_TI_LP873X tristate "TI LP873X Power Management IC" depends on I2C @@ -1723,6 +1756,20 @@ config MFD_STW481X in various ST Microelectronics and ST-Ericsson embedded Nomadik series. +config MFD_STM32_LPTIMER + tristate "Support for STM32 Low-Power Timer" + depends on (ARCH_STM32 && OF) || COMPILE_TEST + select MFD_CORE + select REGMAP + select REGMAP_MMIO + help + Select this option to enable STM32 Low-Power Timer driver + used for PWM, IIO Trigger, IIO Encoder and Counter. Shared + resources are also dealt with here. + + To compile this driver as a module, choose M here: the + module will be called stm32-lptimer. + config MFD_STM32_TIMERS tristate "Support for STM32 Timers" depends on (ARCH_STM32 && OF) || COMPILE_TEST diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 080793b3fd0e..c3d0a1b39bb6 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_ACT8945A) += act8945a.o obj-$(CONFIG_MFD_SM501) += sm501.o obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o +obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o cros_ec_core-objs := cros_ec.o cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o @@ -83,6 +84,7 @@ obj-$(CONFIG_MFD_TPS65910) += tps65910.o obj-$(CONFIG_MFD_TPS65912) += tps65912-core.o obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o +obj-$(CONFIG_MFD_TPS68470) += tps68470.o obj-$(CONFIG_MFD_TPS80031) += tps80031.o obj-$(CONFIG_MENELAUS) += menelaus.o @@ -221,5 +223,6 @@ obj-$(CONFIG_MFD_MT6397) += mt6397-core.o obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o +obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 8511c068a610..30d09d177171 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -1059,15 +1059,15 @@ static struct attribute *ab9540_sysfs_entries[] = { NULL, }; -static struct attribute_group ab8500_attr_group = { +static const struct attribute_group ab8500_attr_group = { .attrs = ab8500_sysfs_entries, }; -static struct attribute_group ab8505_attr_group = { +static const struct attribute_group ab8505_attr_group = { .attrs = ab8505_sysfs_entries, }; -static struct attribute_group ab9540_attr_group = { +static const struct attribute_group ab9540_attr_group = { .attrs = ab9540_sysfs_entries, }; diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index 20cc0ea470fa..7d77948567d7 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c @@ -258,19 +258,21 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_apply); * atmel_hsmc_cs_conf_apply - apply an SMC CS conf * @regmap: the HSMC regmap * @cs: the CS id + * @layout: the layout of registers * @conf the SMC CS conf to apply * * Applies an SMC CS configuration. * Only valid on post-sama5 SoCs. */ -void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs, - const struct atmel_smc_cs_conf *conf) +void atmel_hsmc_cs_conf_apply(struct regmap *regmap, + const struct atmel_hsmc_reg_layout *layout, + int cs, const struct atmel_smc_cs_conf *conf) { - regmap_write(regmap, ATMEL_HSMC_SETUP(cs), conf->setup); - regmap_write(regmap, ATMEL_HSMC_PULSE(cs), conf->pulse); - regmap_write(regmap, ATMEL_HSMC_CYCLE(cs), conf->cycle); - regmap_write(regmap, ATMEL_HSMC_TIMINGS(cs), conf->timings); - regmap_write(regmap, ATMEL_HSMC_MODE(cs), conf->mode); + regmap_write(regmap, ATMEL_HSMC_SETUP(layout, cs), conf->setup); + regmap_write(regmap, ATMEL_HSMC_PULSE(layout, cs), conf->pulse); + regmap_write(regmap, ATMEL_HSMC_CYCLE(layout, cs), conf->cycle); + regmap_write(regmap, ATMEL_HSMC_TIMINGS(layout, cs), conf->timings); + regmap_write(regmap, ATMEL_HSMC_MODE(layout, cs), conf->mode); } EXPORT_SYMBOL_GPL(atmel_hsmc_cs_conf_apply); @@ -297,18 +299,55 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_get); * atmel_hsmc_cs_conf_get - retrieve the current SMC CS conf * @regmap: the HSMC regmap * @cs: the CS id + * @layout: the layout of registers * @conf: the SMC CS conf object to store the current conf * * Retrieve the SMC CS configuration. * Only valid on post-sama5 SoCs. */ -void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs, - struct atmel_smc_cs_conf *conf) +void atmel_hsmc_cs_conf_get(struct regmap *regmap, + const struct atmel_hsmc_reg_layout *layout, + int cs, struct atmel_smc_cs_conf *conf) { - regmap_read(regmap, ATMEL_HSMC_SETUP(cs), &conf->setup); - regmap_read(regmap, ATMEL_HSMC_PULSE(cs), &conf->pulse); - regmap_read(regmap, ATMEL_HSMC_CYCLE(cs), &conf->cycle); - regmap_read(regmap, ATMEL_HSMC_TIMINGS(cs), &conf->timings); - regmap_read(regmap, ATMEL_HSMC_MODE(cs), &conf->mode); + regmap_read(regmap, ATMEL_HSMC_SETUP(layout, cs), &conf->setup); + regmap_read(regmap, ATMEL_HSMC_PULSE(layout, cs), &conf->pulse); + regmap_read(regmap, ATMEL_HSMC_CYCLE(layout, cs), &conf->cycle); + regmap_read(regmap, ATMEL_HSMC_TIMINGS(layout, cs), &conf->timings); + regmap_read(regmap, ATMEL_HSMC_MODE(layout, cs), &conf->mode); } EXPORT_SYMBOL_GPL(atmel_hsmc_cs_conf_get); + +static const struct atmel_hsmc_reg_layout sama5d3_reg_layout = { + .timing_regs_offset = 0x600, +}; + +static const struct atmel_hsmc_reg_layout sama5d2_reg_layout = { + .timing_regs_offset = 0x700, +}; + +static const struct of_device_id atmel_smc_ids[] = { + { .compatible = "atmel,at91sam9260-smc", .data = NULL }, + { .compatible = "atmel,sama5d3-smc", .data = &sama5d3_reg_layout }, + { .compatible = "atmel,sama5d2-smc", .data = &sama5d2_reg_layout }, + { /* sentinel */ }, +}; + +/** + * atmel_hsmc_get_reg_layout - retrieve the layout of HSMC registers + * @np: the HSMC regmap + * + * Retrieve the layout of HSMC registers. + * + * Returns NULL in case of SMC, a struct atmel_hsmc_reg_layout pointer + * in HSMC case, otherwise ERR_PTR(-EINVAL). + */ +const struct atmel_hsmc_reg_layout * +atmel_hsmc_get_reg_layout(struct device_node *np) +{ + const struct of_device_id *match; + + match = of_match_node(atmel_smc_ids, np); + + return match ? match->data : ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(atmel_hsmc_get_reg_layout); diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c index fd5c7267b136..7ddbd9e8dd03 100644 --- a/drivers/mfd/axp20x-rsb.c +++ b/drivers/mfd/axp20x-rsb.c @@ -64,6 +64,7 @@ static const struct of_device_id axp20x_rsb_of_match[] = { { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID }, { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, { .compatible = "x-powers,axp809", .data = (void *)AXP809_ID }, + { .compatible = "x-powers,axp813", .data = (void *)AXP813_ID }, { }, }; MODULE_DEVICE_TABLE(of, axp20x_rsb_of_match); diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 917b6ddc4f15..336de66ca408 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -44,6 +44,7 @@ static const char * const axp20x_model_names[] = { "AXP803", "AXP806", "AXP809", + "AXP813", }; static const struct regmap_range axp152_writeable_ranges[] = { @@ -676,7 +677,7 @@ static struct mfd_cell axp20x_cells[] = { static struct mfd_cell axp221_cells[] = { { - .name = "axp20x-pek", + .name = "axp221-pek", .num_resources = ARRAY_SIZE(axp22x_pek_resources), .resources = axp22x_pek_resources, }, { @@ -701,7 +702,7 @@ static struct mfd_cell axp221_cells[] = { static struct mfd_cell axp223_cells[] = { { - .name = "axp20x-pek", + .name = "axp221-pek", .num_resources = ARRAY_SIZE(axp22x_pek_resources), .resources = axp22x_pek_resources, }, { @@ -834,7 +835,7 @@ static struct mfd_cell axp288_cells[] = { .resources = axp288_fuel_gauge_resources, }, { - .name = "axp20x-pek", + .name = "axp221-pek", .num_resources = ARRAY_SIZE(axp288_power_button_resources), .resources = axp288_power_button_resources, }, @@ -845,7 +846,7 @@ static struct mfd_cell axp288_cells[] = { static struct mfd_cell axp803_cells[] = { { - .name = "axp20x-pek", + .name = "axp221-pek", .num_resources = ARRAY_SIZE(axp803_pek_resources), .resources = axp803_pek_resources, }, @@ -861,7 +862,7 @@ static struct mfd_cell axp806_cells[] = { static struct mfd_cell axp809_cells[] = { { - .name = "axp20x-pek", + .name = "axp221-pek", .num_resources = ARRAY_SIZE(axp809_pek_resources), .resources = axp809_pek_resources, }, { @@ -870,6 +871,14 @@ static struct mfd_cell axp809_cells[] = { }, }; +static struct mfd_cell axp813_cells[] = { + { + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp803_pek_resources), + .resources = axp803_pek_resources, + } +}; + static struct axp20x_dev *axp20x_pm_power_off; static void axp20x_power_off(void) { @@ -956,6 +965,19 @@ int axp20x_match_device(struct axp20x_dev *axp20x) axp20x->regmap_cfg = &axp22x_regmap_config; axp20x->regmap_irq_chip = &axp809_regmap_irq_chip; break; + case AXP813_ID: + axp20x->nr_cells = ARRAY_SIZE(axp813_cells); + axp20x->cells = axp813_cells; + axp20x->regmap_cfg = &axp288_regmap_config; + /* + * The IRQ table given in the datasheet is incorrect. + * In IRQ enable/status registers 1, there are separate + * IRQs for ACIN and VBUS, instead of bits [7:5] being + * the same as bits [4:2]. So it shares the same IRQs + * as the AXP803, rather than the AXP288. + */ + axp20x->regmap_irq_chip = &axp803_regmap_irq_chip; + break; default: dev_err(dev, "unsupported AXP20X ID %lu\n", axp20x->variant); return -EINVAL; diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c new file mode 100644 index 000000000000..64e088dfe7b0 --- /dev/null +++ b/drivers/mfd/bd9571mwv.c @@ -0,0 +1,230 @@ +/* + * ROHM BD9571MWV-M MFD driver + * + * Copyright (C) 2017 Marek Vasut + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * Based on the TPS65086 driver + */ + +#include +#include +#include +#include + +#include + +static const struct mfd_cell bd9571mwv_cells[] = { + { .name = "bd9571mwv-regulator", }, + { .name = "bd9571mwv-gpio", }, +}; + +static const struct regmap_range bd9571mwv_readable_yes_ranges[] = { + regmap_reg_range(BD9571MWV_VENDOR_CODE, BD9571MWV_PRODUCT_REVISION), + regmap_reg_range(BD9571MWV_AVS_SET_MONI, BD9571MWV_AVS_DVFS_VID(3)), + regmap_reg_range(BD9571MWV_VD18_VID, BD9571MWV_VD33_VID), + regmap_reg_range(BD9571MWV_DVFS_VINIT, BD9571MWV_DVFS_VINIT), + regmap_reg_range(BD9571MWV_DVFS_SETVMAX, BD9571MWV_DVFS_MONIVDAC), + regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), + regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INTMASK), + regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTMASK), +}; + +static const struct regmap_access_table bd9571mwv_readable_table = { + .yes_ranges = bd9571mwv_readable_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(bd9571mwv_readable_yes_ranges), +}; + +static const struct regmap_range bd9571mwv_writable_yes_ranges[] = { + regmap_reg_range(BD9571MWV_AVS_VD09_VID(0), BD9571MWV_AVS_VD09_VID(3)), + regmap_reg_range(BD9571MWV_DVFS_SETVID, BD9571MWV_DVFS_SETVID), + regmap_reg_range(BD9571MWV_GPIO_DIR, BD9571MWV_GPIO_OUT), + regmap_reg_range(BD9571MWV_GPIO_INT_SET, BD9571MWV_GPIO_INTMASK), + regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTMASK), +}; + +static const struct regmap_access_table bd9571mwv_writable_table = { + .yes_ranges = bd9571mwv_writable_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(bd9571mwv_writable_yes_ranges), +}; + +static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { + regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), + regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), + regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), +}; + +static const struct regmap_access_table bd9571mwv_volatile_table = { + .yes_ranges = bd9571mwv_volatile_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(bd9571mwv_volatile_yes_ranges), +}; + +static const struct regmap_config bd9571mwv_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .rd_table = &bd9571mwv_readable_table, + .wr_table = &bd9571mwv_writable_table, + .volatile_table = &bd9571mwv_volatile_table, + .max_register = 0xff, +}; + +static const struct regmap_irq bd9571mwv_irqs[] = { + REGMAP_IRQ_REG(BD9571MWV_IRQ_MD1, 0, + BD9571MWV_INT_INTREQ_MD1_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_MD2_E1, 0, + BD9571MWV_INT_INTREQ_MD2_E1_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_MD2_E2, 0, + BD9571MWV_INT_INTREQ_MD2_E2_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_PROT_ERR, 0, + BD9571MWV_INT_INTREQ_PROT_ERR_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_GP, 0, + BD9571MWV_INT_INTREQ_GP_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_128H_OF, 0, + BD9571MWV_INT_INTREQ_128H_OF_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_WDT_OF, 0, + BD9571MWV_INT_INTREQ_WDT_OF_INT), + REGMAP_IRQ_REG(BD9571MWV_IRQ_BKUP_TRG, 0, + BD9571MWV_INT_INTREQ_BKUP_TRG_INT), +}; + +static struct regmap_irq_chip bd9571mwv_irq_chip = { + .name = "bd9571mwv", + .status_base = BD9571MWV_INT_INTREQ, + .mask_base = BD9571MWV_INT_INTMASK, + .ack_base = BD9571MWV_INT_INTREQ, + .init_ack_masked = true, + .num_regs = 1, + .irqs = bd9571mwv_irqs, + .num_irqs = ARRAY_SIZE(bd9571mwv_irqs), +}; + +static int bd9571mwv_identify(struct bd9571mwv *bd) +{ + struct device *dev = bd->dev; + unsigned int value; + int ret; + + ret = regmap_read(bd->regmap, BD9571MWV_VENDOR_CODE, &value); + if (ret) { + dev_err(dev, "Failed to read vendor code register (ret=%i)\n", + ret); + return ret; + } + + if (value != BD9571MWV_VENDOR_CODE_VAL) { + dev_err(dev, "Invalid vendor code ID %02x (expected %02x)\n", + value, BD9571MWV_VENDOR_CODE_VAL); + return -EINVAL; + } + + ret = regmap_read(bd->regmap, BD9571MWV_PRODUCT_CODE, &value); + if (ret) { + dev_err(dev, "Failed to read product code register (ret=%i)\n", + ret); + return ret; + } + + if (value != BD9571MWV_PRODUCT_CODE_VAL) { + dev_err(dev, "Invalid product code ID %02x (expected %02x)\n", + value, BD9571MWV_PRODUCT_CODE_VAL); + return -EINVAL; + } + + ret = regmap_read(bd->regmap, BD9571MWV_PRODUCT_REVISION, &value); + if (ret) { + dev_err(dev, "Failed to read revision register (ret=%i)\n", + ret); + return ret; + } + + dev_info(dev, "Device: BD9571MWV rev. %d\n", value & 0xff); + + return 0; +} + +static int bd9571mwv_probe(struct i2c_client *client, + const struct i2c_device_id *ids) +{ + struct bd9571mwv *bd; + int ret; + + bd = devm_kzalloc(&client->dev, sizeof(*bd), GFP_KERNEL); + if (!bd) + return -ENOMEM; + + i2c_set_clientdata(client, bd); + bd->dev = &client->dev; + bd->irq = client->irq; + + bd->regmap = devm_regmap_init_i2c(client, &bd9571mwv_regmap_config); + if (IS_ERR(bd->regmap)) { + dev_err(bd->dev, "Failed to initialize register map\n"); + return PTR_ERR(bd->regmap); + } + + ret = bd9571mwv_identify(bd); + if (ret) + return ret; + + ret = regmap_add_irq_chip(bd->regmap, bd->irq, IRQF_ONESHOT, 0, + &bd9571mwv_irq_chip, &bd->irq_data); + if (ret) { + dev_err(bd->dev, "Failed to register IRQ chip\n"); + return ret; + } + + ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells, + ARRAY_SIZE(bd9571mwv_cells), NULL, 0, + regmap_irq_get_domain(bd->irq_data)); + if (ret) { + regmap_del_irq_chip(bd->irq, bd->irq_data); + return ret; + } + + return 0; +} + +static int bd9571mwv_remove(struct i2c_client *client) +{ + struct bd9571mwv *bd = i2c_get_clientdata(client); + + regmap_del_irq_chip(bd->irq, bd->irq_data); + + return 0; +} + +static const struct of_device_id bd9571mwv_of_match_table[] = { + { .compatible = "rohm,bd9571mwv", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bd9571mwv_of_match_table); + +static const struct i2c_device_id bd9571mwv_id_table[] = { + { "bd9571mwv", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, bd9571mwv_id_table); + +static struct i2c_driver bd9571mwv_driver = { + .driver = { + .name = "bd9571mwv", + .of_match_table = bd9571mwv_of_match_table, + }, + .probe = bd9571mwv_probe, + .remove = bd9571mwv_remove, + .id_table = bd9571mwv_id_table, +}; +module_i2c_driver(bd9571mwv_driver); + +MODULE_AUTHOR("Marek Vasut "); +MODULE_DESCRIPTION("BD9571MWV PMIC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index a23a3a1c7061..433add43a0a9 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c @@ -387,6 +387,8 @@ int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel) mutex_lock(&da9052->auxadc_lock); + reinit_completion(&da9052->done); + /* Channel gets activated on enabling the Conversion bit */ mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV; diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c index b9ea1b27db64..abfb11818fdc 100644 --- a/drivers/mfd/da9052-spi.c +++ b/drivers/mfd/da9052-spi.c @@ -67,7 +67,7 @@ static int da9052_spi_remove(struct spi_device *spi) return 0; } -static struct spi_device_id da9052_spi_id[] = { +static const struct spi_device_id da9052_spi_id[] = { {"da9052", DA9052}, {"da9053-aa", DA9053_AA}, {"da9053-ba", DA9053_BA}, diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c index b53e100f577c..8169a5c2fa20 100644 --- a/drivers/mfd/da9055-i2c.c +++ b/drivers/mfd/da9055-i2c.c @@ -62,7 +62,7 @@ static int da9055_i2c_remove(struct i2c_client *i2c) * purposes separate). As a result there are specific DA9055 ids for PMIC * and CODEC, which must be different to operate together. */ -static struct i2c_device_id da9055_i2c_id[] = { +static const struct i2c_device_id da9055_i2c_id[] = { {"da9055-pmic", 0}, { } }; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5c739ac752e8..5970b8def548 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include "dbx500-prcmu-regs.h" @@ -1692,32 +1691,27 @@ static long round_clock_rate(u8 clock, unsigned long rate) return rounded_rate; } -/* CPU FREQ table, may be changed due to if MAX_OPP is supported. */ -static struct cpufreq_frequency_table db8500_cpufreq_table[] = { - { .frequency = 200000, .driver_data = ARM_EXTCLK,}, - { .frequency = 400000, .driver_data = ARM_50_OPP,}, - { .frequency = 800000, .driver_data = ARM_100_OPP,}, - { .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */ - { .frequency = CPUFREQ_TABLE_END,}, +static const unsigned long armss_freqs[] = { + 200000000, + 400000000, + 800000000, + 998400000 }; static long round_armss_rate(unsigned long rate) { - struct cpufreq_frequency_table *pos; - long freq = 0; - - /* cpufreq table frequencies is in KHz. */ - rate = rate / 1000; + unsigned long freq = 0; + int i; /* Find the corresponding arm opp from the cpufreq table. */ - cpufreq_for_each_entry(pos, db8500_cpufreq_table) { - freq = pos->frequency; - if (freq == rate) + for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { + freq = armss_freqs[i]; + if (rate <= freq) break; } /* Return the last valid value, even if a match was not found. */ - return freq * 1000; + return freq; } #define MIN_PLL_VCO_RATE 600000000ULL @@ -1854,21 +1848,23 @@ static void set_clock_rate(u8 clock, unsigned long rate) static int set_armss_rate(unsigned long rate) { - struct cpufreq_frequency_table *pos; - - /* cpufreq table frequencies is in KHz. */ - rate = rate / 1000; + unsigned long freq; + u8 opps[] = { ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP }; + int i; /* Find the corresponding arm opp from the cpufreq table. */ - cpufreq_for_each_entry(pos, db8500_cpufreq_table) - if (pos->frequency == rate) + for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { + freq = armss_freqs[i]; + if (rate == freq) break; + } - if (pos->frequency != rate) + if (rate != freq) return -EINVAL; /* Set the new arm opp. */ - return db8500_prcmu_set_arm_opp(pos->driver_data); + pr_debug("SET ARM OPP 0x%02x\n", opps[i]); + return db8500_prcmu_set_arm_opp(opps[i]); } static int set_plldsi_rate(unsigned long rate) @@ -3048,12 +3044,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { .platform_data = &db8500_regulators, .pdata_size = sizeof(db8500_regulators), }, - { - .name = "cpufreq-ux500", - .of_compatible = "stericsson,cpufreq-ux500", - .platform_data = &db8500_cpufreq_table, - .pdata_size = sizeof(db8500_cpufreq_table), - }, { .name = "cpuidle-dbx500", .of_compatible = "stericsson,cpuidle-dbx500", @@ -3067,14 +3057,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { }, }; -static void db8500_prcmu_update_cpufreq(void) -{ - if (prcmu_has_arm_maxopp()) { - db8500_cpufreq_table[3].frequency = 1000000; - db8500_cpufreq_table[3].driver_data = ARM_MAX_OPP; - } -} - static int db8500_prcmu_register_ab8500(struct device *parent) { struct device_node *np; @@ -3160,8 +3142,6 @@ static int db8500_prcmu_probe(struct platform_device *pdev) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); - db8500_prcmu_update_cpufreq(); - err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs, ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain); if (err) { diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c index 86eca614507b..2a2756709f22 100644 --- a/drivers/mfd/dm355evm_msp.c +++ b/drivers/mfd/dm355evm_msp.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include /* diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c index 3fd703fe3aba..6fb7ba272e09 100644 --- a/drivers/mfd/hi6421-pmic-core.c +++ b/drivers/mfd/hi6421-pmic-core.c @@ -1,40 +1,35 @@ /* - * Device driver for Hi6421 IC + * Device driver for Hi6421 PMIC * * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com - * Copyright (c) <2013-2014> Linaro Ltd. + * Copyright (c) <2013-2017> Linaro Ltd. * http://www.linaro.org * * Author: Guodong Xu * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include #include #include +#include #include -#include +#include #include #include -#include static const struct mfd_cell hi6421_devs[] = { { .name = "hi6421-regulator", }, }; +static const struct mfd_cell hi6421v530_devs[] = { + { .name = "hi6421v530-regulator", }, +}; + static const struct regmap_config hi6421_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -42,12 +37,33 @@ static const struct regmap_config hi6421_regmap_config = { .max_register = HI6421_REG_TO_BUS_ADDR(HI6421_REG_MAX), }; +static const struct of_device_id of_hi6421_pmic_match[] = { + { + .compatible = "hisilicon,hi6421-pmic", + .data = (void *)HI6421 + }, + { + .compatible = "hisilicon,hi6421v530-pmic", + .data = (void *)HI6421_V530 + }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_hi6421_pmic_match); + static int hi6421_pmic_probe(struct platform_device *pdev) { struct hi6421_pmic *pmic; struct resource *res; + const struct of_device_id *id; + const struct mfd_cell *subdevs; + enum hi6421_type type; void __iomem *base; - int ret; + int n_subdevs, ret; + + id = of_match_device(of_hi6421_pmic_match, &pdev->dev); + if (!id) + return -EINVAL; + type = (enum hi6421_type)id->data; pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); if (!pmic) @@ -61,41 +77,50 @@ static int hi6421_pmic_probe(struct platform_device *pdev) pmic->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base, &hi6421_regmap_config); if (IS_ERR(pmic->regmap)) { - dev_err(&pdev->dev, - "regmap init failed: %ld\n", PTR_ERR(pmic->regmap)); + dev_err(&pdev->dev, "Failed to initialise Regmap: %ld\n", + PTR_ERR(pmic->regmap)); return PTR_ERR(pmic->regmap); } - /* set over-current protection debounce 8ms */ - regmap_update_bits(pmic->regmap, HI6421_OCP_DEB_CTRL_REG, + platform_set_drvdata(pdev, pmic); + + switch (type) { + case HI6421: + /* set over-current protection debounce 8ms */ + regmap_update_bits(pmic->regmap, HI6421_OCP_DEB_CTRL_REG, (HI6421_OCP_DEB_SEL_MASK | HI6421_OCP_EN_DEBOUNCE_MASK | HI6421_OCP_AUTO_STOP_MASK), (HI6421_OCP_DEB_SEL_8MS | HI6421_OCP_EN_DEBOUNCE_ENABLE)); - platform_set_drvdata(pdev, pmic); + subdevs = hi6421_devs; + n_subdevs = ARRAY_SIZE(hi6421_devs); + break; + case HI6421_V530: + subdevs = hi6421v530_devs; + n_subdevs = ARRAY_SIZE(hi6421v530_devs); + break; + default: + dev_err(&pdev->dev, "Unknown device type %d\n", + (unsigned int)type); + return -EINVAL; + } - ret = devm_mfd_add_devices(&pdev->dev, 0, hi6421_devs, - ARRAY_SIZE(hi6421_devs), NULL, 0, NULL); + ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, + subdevs, n_subdevs, NULL, 0, NULL); if (ret) { - dev_err(&pdev->dev, "add mfd devices failed: %d\n", ret); + dev_err(&pdev->dev, "Failed to add child devices: %d\n", ret); return ret; } return 0; } -static const struct of_device_id of_hi6421_pmic_match_tbl[] = { - { .compatible = "hisilicon,hi6421-pmic", }, - { }, -}; -MODULE_DEVICE_TABLE(of, of_hi6421_pmic_match_tbl); - static struct platform_driver hi6421_pmic_driver = { .driver = { - .name = "hi6421_pmic", - .of_match_table = of_hi6421_pmic_match_tbl, + .name = "hi6421_pmic", + .of_match_table = of_hi6421_pmic_match, }, .probe = hi6421_pmic_probe, }; diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index ad388bb056cd..d1c46de89eb4 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -221,6 +221,7 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa162), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, /* KBL-H */ { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 70c646b0097d..0e0ab9bb1530 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -502,6 +502,14 @@ int intel_lpss_suspend(struct device *dev) for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) lpss->priv_ctx[i] = readl(lpss->priv + i * 4); + /* + * If the device type is not UART, then put the controller into + * reset. UART cannot be put into reset since S3/S0ix fail when + * no_console_suspend flag is enabled. + */ + if (lpss->type != LPSS_DEV_UART) + writel(0, lpss->priv + LPSS_PRIV_RESETS); + return 0; } EXPORT_SYMBOL_GPL(intel_lpss_suspend); diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index 13737be6df35..36adf9e8153e 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -16,6 +16,7 @@ * Author: Zhu, Lejun */ +#include #include #include #include @@ -28,6 +29,10 @@ #include #include "intel_soc_pmic_core.h" +/* Crystal Cove PMIC shares same ACPI ID between different platforms */ +#define BYT_CRC_HRV 2 +#define CHT_CRC_HRV 3 + /* Lookup table for the Panel Enable/Disable line as GPIO signals */ static struct gpiod_lookup_table panel_gpio_table = { /* Intel GFX is consumer */ @@ -48,16 +53,33 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *i2c_id) { struct device *dev = &i2c->dev; - const struct acpi_device_id *id; struct intel_soc_pmic_config *config; struct intel_soc_pmic *pmic; + unsigned long long hrv; + acpi_status status; int ret; - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id || !id->driver_data) + /* + * There are 2 different Crystal Cove PMICs a Bay Trail and Cherry + * Trail version, use _HRV to differentiate between the 2. + */ + status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to get PMIC hardware revision\n"); return -ENODEV; + } - config = (struct intel_soc_pmic_config *)id->driver_data; + switch (hrv) { + case BYT_CRC_HRV: + config = &intel_soc_pmic_config_byt_crc; + break; + case CHT_CRC_HRV: + config = &intel_soc_pmic_config_cht_crc; + break; + default: + dev_warn(dev, "Unknown hardware rev %llu, assuming BYT\n", hrv); + config = &intel_soc_pmic_config_byt_crc; + } pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); if (!pmic) @@ -157,7 +179,7 @@ MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id); #if defined(CONFIG_ACPI) static const struct acpi_device_id intel_soc_pmic_acpi_match[] = { - {"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc}, + { "INT33FD" }, { }, }; MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match); diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h index ff2464bc172f..90a1416d4dac 100644 --- a/drivers/mfd/intel_soc_pmic_core.h +++ b/drivers/mfd/intel_soc_pmic_core.h @@ -27,6 +27,7 @@ struct intel_soc_pmic_config { const struct regmap_irq_chip *irq_chip; }; -extern struct intel_soc_pmic_config intel_soc_pmic_config_crc; +extern struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc; +extern struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc; #endif /* __INTEL_SOC_PMIC_CORE_H__ */ diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c index 4a7494872da2..6d19a6d0fb97 100644 --- a/drivers/mfd/intel_soc_pmic_crc.c +++ b/drivers/mfd/intel_soc_pmic_crc.c @@ -80,7 +80,7 @@ static struct resource bcu_resources[] = { }, }; -static struct mfd_cell crystal_cove_dev[] = { +static struct mfd_cell crystal_cove_byt_dev[] = { { .name = "crystal_cove_pwrsrc", .num_resources = ARRAY_SIZE(pwrsrc_resources), @@ -114,6 +114,17 @@ static struct mfd_cell crystal_cove_dev[] = { }, }; +static struct mfd_cell crystal_cove_cht_dev[] = { + { + .name = "crystal_cove_gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = gpio_resources, + }, + { + .name = "crystal_cove_pwm", + }, +}; + static const struct regmap_config crystal_cove_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -155,10 +166,18 @@ static const struct regmap_irq_chip crystal_cove_irq_chip = { .mask_base = CRYSTAL_COVE_REG_MIRQLVL1, }; -struct intel_soc_pmic_config intel_soc_pmic_config_crc = { +struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc = { .irq_flags = IRQF_TRIGGER_RISING, - .cell_dev = crystal_cove_dev, - .n_cell_devs = ARRAY_SIZE(crystal_cove_dev), + .cell_dev = crystal_cove_byt_dev, + .n_cell_devs = ARRAY_SIZE(crystal_cove_byt_dev), + .regmap_config = &crystal_cove_regmap_config, + .irq_chip = &crystal_cove_irq_chip, +}; + +struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc = { + .irq_flags = IRQF_TRIGGER_RISING, + .cell_dev = crystal_cove_cht_dev, + .n_cell_devs = ARRAY_SIZE(crystal_cove_cht_dev), .regmap_config = &crystal_cove_regmap_config, .irq_chip = &crystal_cove_irq_chip, }; diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 895f655780a7..55d824b3a808 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -494,7 +494,7 @@ static struct platform_driver kempld_driver = { .remove = kempld_remove, }; -static struct dmi_system_id kempld_dmi_table[] __initdata = { +static const struct dmi_system_id kempld_dmi_table[] __initconst = { { .ident = "BBD6", .matches = { diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c index 340ad0c63744..32d2a07d4354 100644 --- a/drivers/mfd/lp87565.c +++ b/drivers/mfd/lp87565.c @@ -73,10 +73,9 @@ static int lp87565_probe(struct i2c_client *client, i2c_set_clientdata(client, lp87565); - ret = mfd_add_devices(lp87565->dev, PLATFORM_DEVID_AUTO, lp87565_cells, - ARRAY_SIZE(lp87565_cells), NULL, 0, NULL); - - return ret; + return devm_mfd_add_devices(lp87565->dev, PLATFORM_DEVID_AUTO, + lp87565_cells, ARRAY_SIZE(lp87565_cells), + NULL, 0, NULL); } static const struct i2c_device_id lp87565_id_table[] = { diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 773f1554d2f9..450ae36645aa 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -1119,17 +1119,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base + SPIBASE_LPT; res->end = res->start + SPIBASE_LPT_SZ - 1; - /* - * Try to make the flash chip writeable now by - * setting BCR_WPD. It it fails we tell the driver - * that it can only read the chip. - */ pci_read_config_dword(dev, BCR, &bcr); - if (!(bcr & BCR_WPD)) { - bcr |= BCR_WPD; - pci_write_config_dword(dev, BCR, bcr); - pci_read_config_dword(dev, BCR, &bcr); - } info->writeable = !!(bcr & BCR_WPD); } break; diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index 5c80aea3211f..10063236132c 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -151,7 +151,7 @@ static int max8925_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max8925_platform_data *pdata = dev_get_platdata(&client->dev); - static struct max8925_chip *chip; + struct max8925_chip *chip; struct device_node *node = client->dev.of_node; if (node && !pdata) { diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index 4c33b8063bc3..b1d3f70782d9 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c @@ -192,10 +192,8 @@ static int max8998_i2c_probe(struct i2c_client *i2c, if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { pdata = max8998_i2c_parse_dt_pdata(&i2c->dev); - if (IS_ERR(pdata)) { - ret = PTR_ERR(pdata); - goto err; - } + if (IS_ERR(pdata)) + return PTR_ERR(pdata); } i2c_set_clientdata(i2c, max8998); diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 6f5300b0eb31..44a5d66314c6 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -131,12 +131,12 @@ static inline u32 usbtll_read(void __iomem *base, u32 reg) return readl_relaxed(base + reg); } -static inline void usbtll_writeb(void __iomem *base, u8 reg, u8 val) +static inline void usbtll_writeb(void __iomem *base, u32 reg, u8 val) { writeb_relaxed(val, base + reg); } -static inline u8 usbtll_readb(void __iomem *base, u8 reg) +static inline u8 usbtll_readb(void __iomem *base, u32 reg) { return readb_relaxed(base + reg); } diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c index d4c114abeb75..e7d27b7861c1 100644 --- a/drivers/mfd/retu-mfd.c +++ b/drivers/mfd/retu-mfd.c @@ -302,15 +302,23 @@ static int retu_remove(struct i2c_client *i2c) } static const struct i2c_device_id retu_id[] = { - { "retu-mfd", 0 }, - { "tahvo-mfd", 0 }, + { "retu", 0 }, + { "tahvo", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, retu_id); +static const struct of_device_id retu_of_match[] = { + { .compatible = "nokia,retu" }, + { .compatible = "nokia,tahvo" }, + { } +}; +MODULE_DEVICE_TABLE(of, retu_of_match); + static struct i2c_driver retu_driver = { .driver = { .name = "retu-mfd", + .of_match_table = retu_of_match, }, .probe = retu_probe, .remove = retu_remove, diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index fd087cbb0bde..216fbf6adec9 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -70,6 +70,14 @@ static const struct regmap_config rk818_regmap_config = { .volatile_reg = rk808_is_volatile_reg, }; +static const struct regmap_config rk805_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = RK805_OFF_SOURCE_REG, + .cache_type = REGCACHE_RBTREE, + .volatile_reg = rk808_is_volatile_reg, +}; + static const struct regmap_config rk808_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -86,6 +94,34 @@ static struct resource rtc_resources[] = { } }; +static struct resource rk805_key_resources[] = { + { + .start = RK805_IRQ_PWRON_FALL, + .end = RK805_IRQ_PWRON_FALL, + .flags = IORESOURCE_IRQ, + }, + { + .start = RK805_IRQ_PWRON_RISE, + .end = RK805_IRQ_PWRON_RISE, + .flags = IORESOURCE_IRQ, + } +}; + +static const struct mfd_cell rk805s[] = { + { .name = "rk808-clkout", }, + { .name = "rk808-regulator", }, + { .name = "rk805-pinctrl", }, + { + .name = "rk808-rtc", + .num_resources = ARRAY_SIZE(rtc_resources), + .resources = &rtc_resources[0], + }, + { .name = "rk805-pwrkey", + .num_resources = ARRAY_SIZE(rk805_key_resources), + .resources = &rk805_key_resources[0], + }, +}; + static const struct mfd_cell rk808s[] = { { .name = "rk808-clkout", }, { .name = "rk808-regulator", }, @@ -106,6 +142,20 @@ static const struct mfd_cell rk818s[] = { }, }; +static const struct rk808_reg_data rk805_pre_init_reg[] = { + {RK805_BUCK1_CONFIG_REG, RK805_BUCK1_2_ILMAX_MASK, + RK805_BUCK1_2_ILMAX_4000MA}, + {RK805_BUCK2_CONFIG_REG, RK805_BUCK1_2_ILMAX_MASK, + RK805_BUCK1_2_ILMAX_4000MA}, + {RK805_BUCK3_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK, + RK805_BUCK3_ILMAX_3000MA}, + {RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK, + RK805_BUCK4_ILMAX_3500MA}, + {RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA}, + {RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN}, + {RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C}, +}; + static const struct rk808_reg_data rk808_pre_init_reg[] = { { RK808_BUCK3_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_150MA }, { RK808_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_200MA }, @@ -135,6 +185,41 @@ static const struct rk808_reg_data rk818_pre_init_reg[] = { VB_LO_SEL_3500MV }, }; +static const struct regmap_irq rk805_irqs[] = { + [RK805_IRQ_PWRON_RISE] = { + .mask = RK805_IRQ_PWRON_RISE_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_VB_LOW] = { + .mask = RK805_IRQ_VB_LOW_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_PWRON] = { + .mask = RK805_IRQ_PWRON_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_PWRON_LP] = { + .mask = RK805_IRQ_PWRON_LP_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_HOTDIE] = { + .mask = RK805_IRQ_HOTDIE_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_RTC_ALARM] = { + .mask = RK805_IRQ_RTC_ALARM_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_RTC_PERIOD] = { + .mask = RK805_IRQ_RTC_PERIOD_MSK, + .reg_offset = 0, + }, + [RK805_IRQ_PWRON_FALL] = { + .mask = RK805_IRQ_PWRON_FALL_MSK, + .reg_offset = 0, + }, +}; + static const struct regmap_irq rk808_irqs[] = { /* INT_STS */ [RK808_IRQ_VOUT_LO] = { @@ -247,6 +332,17 @@ static const struct regmap_irq rk818_irqs[] = { }, }; +static struct regmap_irq_chip rk805_irq_chip = { + .name = "rk805", + .irqs = rk805_irqs, + .num_irqs = ARRAY_SIZE(rk805_irqs), + .num_regs = 1, + .status_base = RK805_INT_STS_REG, + .mask_base = RK805_INT_STS_MSK_REG, + .ack_base = RK805_INT_STS_REG, + .init_ack_masked = true, +}; + static const struct regmap_irq_chip rk808_irq_chip = { .name = "rk808", .irqs = rk808_irqs, @@ -272,6 +368,25 @@ static const struct regmap_irq_chip rk818_irq_chip = { }; static struct i2c_client *rk808_i2c_client; + +static void rk805_device_shutdown(void) +{ + int ret; + struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); + + if (!rk808) { + dev_warn(&rk808_i2c_client->dev, + "have no rk805, so do nothing here\n"); + return; + } + + ret = regmap_update_bits(rk808->regmap, + RK805_DEV_CTRL_REG, + DEV_OFF, DEV_OFF); + if (ret) + dev_err(&rk808_i2c_client->dev, "power off error!\n"); +} + static void rk808_device_shutdown(void) { int ret; @@ -309,6 +424,7 @@ static void rk818_device_shutdown(void) } static const struct of_device_id rk808_of_match[] = { + { .compatible = "rockchip,rk805" }, { .compatible = "rockchip,rk808" }, { .compatible = "rockchip,rk818" }, { }, @@ -325,7 +441,7 @@ static int rk808_probe(struct i2c_client *client, void (*pm_pwroff_fn)(void); int nr_pre_init_regs; int nr_cells; - int pm_off = 0; + int pm_off = 0, msb, lsb; int ret; int i; @@ -333,16 +449,34 @@ static int rk808_probe(struct i2c_client *client, if (!rk808) return -ENOMEM; - rk808->variant = i2c_smbus_read_word_data(client, RK808_ID_MSB); - if (rk808->variant < 0) { - dev_err(&client->dev, "Failed to read the chip id at 0x%02x\n", + /* Read chip variant */ + msb = i2c_smbus_read_byte_data(client, RK808_ID_MSB); + if (msb < 0) { + dev_err(&client->dev, "failed to read the chip id at 0x%x\n", RK808_ID_MSB); - return rk808->variant; + return msb; } - dev_dbg(&client->dev, "Chip id: 0x%x\n", (unsigned int)rk808->variant); + lsb = i2c_smbus_read_byte_data(client, RK808_ID_LSB); + if (lsb < 0) { + dev_err(&client->dev, "failed to read the chip id at 0x%x\n", + RK808_ID_LSB); + return lsb; + } + + rk808->variant = ((msb << 8) | lsb) & RK8XX_ID_MSK; + dev_info(&client->dev, "chip id: 0x%x\n", (unsigned int)rk808->variant); switch (rk808->variant) { + case RK805_ID: + rk808->regmap_cfg = &rk805_regmap_config; + rk808->regmap_irq_chip = &rk805_irq_chip; + pre_init_reg = rk805_pre_init_reg; + nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg); + cells = rk805s; + nr_cells = ARRAY_SIZE(rk805s); + pm_pwroff_fn = rk805_device_shutdown; + break; case RK808_ID: rk808->regmap_cfg = &rk808_regmap_config; rk808->regmap_irq_chip = &rk808_irq_chip; @@ -435,6 +569,7 @@ static int rk808_remove(struct i2c_client *client) } static const struct i2c_device_id rk808_ids[] = { + { "rk805" }, { "rk808" }, { "rk818" }, { }, diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index a0ac89dfdf0f..3cf69e5c5703 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -644,7 +644,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, { int err, clk; u8 n, clk_divider, mcu_cnt, div; - u8 depth[] = { + static const u8 depth[] = { [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, @@ -768,7 +768,7 @@ EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) { - unsigned int cd_mask[] = { + static const unsigned int cd_mask[] = { [RTSX_SD_CARD] = SD_EXIST, [RTSX_MS_CARD] = MS_EXIST }; diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c new file mode 100644 index 000000000000..075330a25f61 --- /dev/null +++ b/drivers/mfd/stm32-lptimer.c @@ -0,0 +1,107 @@ +/* + * STM32 Low-Power Timer parent driver. + * + * Copyright (C) STMicroelectronics 2017 + * + * Author: Fabrice Gasnier + * + * Inspired by Benjamin Gaignard's stm32-timers driver + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#include +#include +#include + +#define STM32_LPTIM_MAX_REGISTER 0x3fc + +static const struct regmap_config stm32_lptimer_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = sizeof(u32), + .max_register = STM32_LPTIM_MAX_REGISTER, +}; + +static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata) +{ + u32 val; + int ret; + + /* + * Quadrature encoder mode bit can only be written and read back when + * Low-Power Timer supports it. + */ + ret = regmap_update_bits(ddata->regmap, STM32_LPTIM_CFGR, + STM32_LPTIM_ENC, STM32_LPTIM_ENC); + if (ret) + return ret; + + ret = regmap_read(ddata->regmap, STM32_LPTIM_CFGR, &val); + if (ret) + return ret; + + ret = regmap_update_bits(ddata->regmap, STM32_LPTIM_CFGR, + STM32_LPTIM_ENC, 0); + if (ret) + return ret; + + ddata->has_encoder = !!(val & STM32_LPTIM_ENC); + + return 0; +} + +static int stm32_lptimer_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_lptimer *ddata; + struct resource *res; + void __iomem *mmio; + int ret; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + ddata->regmap = devm_regmap_init_mmio_clk(dev, "mux", mmio, + &stm32_lptimer_regmap_cfg); + if (IS_ERR(ddata->regmap)) + return PTR_ERR(ddata->regmap); + + ddata->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ddata->clk)) + return PTR_ERR(ddata->clk); + + ret = stm32_lptimer_detect_encoder(ddata); + if (ret) + return ret; + + platform_set_drvdata(pdev, ddata); + + return devm_of_platform_populate(&pdev->dev); +} + +static const struct of_device_id stm32_lptimer_of_match[] = { + { .compatible = "st,stm32-lptimer", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_lptimer_of_match); + +static struct platform_driver stm32_lptimer_driver = { + .probe = stm32_lptimer_probe, + .driver = { + .name = "stm32-lptimer", + .of_match_table = stm32_lptimer_of_match, + }, +}; +module_platform_driver(stm32_lptimer_driver); + +MODULE_AUTHOR("Fabrice Gasnier "); +MODULE_DESCRIPTION("STMicroelectronics STM32 Low-Power Timer"); +MODULE_ALIAS("platform:stm32-lptimer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 22c811396edc..43d8683266de 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -86,8 +86,11 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc) struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned long flags; u8 dev_ctl; + int ret; - clk_prepare_enable(t7l66xb->clk32k); + ret = clk_prepare_enable(t7l66xb->clk32k); + if (ret) + return ret; raw_spin_lock_irqsave(&t7l66xb->lock, flags); @@ -286,8 +289,12 @@ static int t7l66xb_resume(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev); + int ret; + + ret = clk_prepare_enable(t7l66xb->clk48m); + if (ret) + return ret; - clk_prepare_enable(t7l66xb->clk48m); if (pdata && pdata->resume) pdata->resume(dev); @@ -361,7 +368,9 @@ static int t7l66xb_probe(struct platform_device *dev) goto err_ioremap; } - clk_prepare_enable(t7l66xb->clk48m); + ret = clk_prepare_enable(t7l66xb->clk48m); + if (ret) + goto err_clk_enable; if (pdata->enable) pdata->enable(dev); @@ -386,6 +395,8 @@ static int t7l66xb_probe(struct platform_device *dev) return 0; t7l66xb_detach_irq(dev); + clk_disable_unprepare(t7l66xb->clk48m); +err_clk_enable: iounmap(t7l66xb->scr); err_ioremap: release_resource(&t7l66xb->rscr); diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c index baa12ea666fb..187848c93779 100644 --- a/drivers/mfd/tps6105x.c +++ b/drivers/mfd/tps6105x.c @@ -173,9 +173,17 @@ static const struct i2c_device_id tps6105x_id[] = { }; MODULE_DEVICE_TABLE(i2c, tps6105x_id); +static const struct of_device_id tps6105x_of_match[] = { + { .compatible = "ti,tps61050" }, + { .compatible = "ti,tps61052" }, + { }, +}; +MODULE_DEVICE_TABLE(of, tps6105x_of_match); + static struct i2c_driver tps6105x_driver = { .driver = { .name = "tps6105x", + .of_match_table = tps6105x_of_match, }, .probe = tps6105x_probe, .remove = tps6105x_remove, diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index d829a6131f09..2ab67386b4ef 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -32,7 +32,7 @@ #include #include -#include +#include #include diff --git a/drivers/mfd/tps68470.c b/drivers/mfd/tps68470.c new file mode 100644 index 000000000000..189efaea054c --- /dev/null +++ b/drivers/mfd/tps68470.c @@ -0,0 +1,106 @@ +/* + * TPS68470 chip Parent driver + * + * Copyright (C) 2017 Intel Corporation + * + * Authors: + * Rajmohan Mani + * Tianshu Qiu + * Jian Xu Zheng + * Yuning Pu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +static const struct mfd_cell tps68470s[] = { + { .name = "tps68470-gpio" }, + { .name = "tps68470_pmic_opregion" }, +}; + +static const struct regmap_config tps68470_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = TPS68470_REG_MAX, +}; + +static int tps68470_chip_init(struct device *dev, struct regmap *regmap) +{ + unsigned int version; + int ret; + + /* Force software reset */ + ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK); + if (ret) + return ret; + + ret = regmap_read(regmap, TPS68470_REG_REVID, &version); + if (ret) { + dev_err(dev, "Failed to read revision register: %d\n", ret); + return ret; + } + + dev_info(dev, "TPS68470 REVID: 0x%x\n", version); + + return 0; +} + +static int tps68470_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct regmap *regmap; + int ret; + + regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "devm_regmap_init_i2c Error %ld\n", + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + i2c_set_clientdata(client, regmap); + + ret = tps68470_chip_init(dev, regmap); + if (ret < 0) { + dev_err(dev, "TPS68470 Init Error %d\n", ret); + return ret; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tps68470s, + ARRAY_SIZE(tps68470s), NULL, 0, NULL); + if (ret < 0) { + dev_err(dev, "devm_mfd_add_devices failed: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct acpi_device_id tps68470_acpi_ids[] = { + {"INT3472"}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, tps68470_acpi_ids); + +static struct i2c_driver tps68470_driver = { + .driver = { + .name = "tps68470", + .acpi_match_table = tps68470_acpi_ids, + }, + .probe_new = tps68470_probe, +}; +builtin_i2c_driver(tps68470_driver); diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index c64615dca2bd..d3133a371e27 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -44,7 +44,7 @@ #include #include -#include +#include /* Register descriptions for audio */ #include @@ -173,7 +173,7 @@ static struct twl_private *twl_priv; static struct twl_mapping twl4030_map[] = { /* * NOTE: don't change this table without updating the - * defines for TWL4030_MODULE_* + * defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ @@ -344,7 +344,7 @@ static const struct regmap_config twl4030_regmap_config[4] = { static struct twl_mapping twl6030_map[] = { /* * NOTE: don't change this table without updating the - * defines for TWL4030_MODULE_* + * defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ @@ -448,7 +448,7 @@ static struct regmap *twl_get_regmap(u8 mod_no) * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * - * Returns the result of operation - 0 is success + * Returns 0 on success or else a negative error code. */ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { @@ -476,7 +476,7 @@ EXPORT_SYMBOL(twl_i2c_write); * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * - * Returns result of operation - num_bytes is success else failure. + * Returns 0 on success or else a negative error code. */ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index 0a1606480023..da16bf45fab4 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 378c02d43bf7..b16c16f194fd 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include "twl-core.h" diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index f4b2c29d77e3..6b36932263ba 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 53574508a613..e3ec8dfa9f1e 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 1a138c83f877..a0c44d16bf30 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -336,6 +336,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, mmput(ctx->mm); } + /* + * Increment driver use count. Enables global TLBIs for hash + * and callbacks to handle the segment table + */ cxl_ctx_get(); if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 5dba23ca2e5f..dc9bc1807fdf 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) down_read(&mm->mmap_sem); - for (dar = addr; dar < addr + size; dar += page_size) { - if (!vma || dar < vma->vm_start || dar > vma->vm_end) { + vma = find_vma(mm, addr); + if (!vma) { + pr_err("Can't find vma for addr %016llx\n", addr); + rc = -EFAULT; + goto out; + } + /* get the size of the pages allocated */ + page_size = vma_kernel_pagesize(vma); + + for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) { + if (dar < vma->vm_start || dar >= vma->vm_end) { vma = find_vma(mm, addr); if (!vma) { pr_err("Can't find vma for addr %016llx\n", addr); diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 6eed7d03e2b5..f17f72ea0545 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -138,6 +138,22 @@ int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) int result; unsigned long access, flags, inv_flags = 0; + /* + * Add the fault handling cpu to task mm cpumask so that we + * can do a safe lockless page table walk when inserting the + * hash page table entry. This function get called with a + * valid mm for user space addresses. Hence using the if (mm) + * check is sufficient here. + */ + if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + /* + * We need to make sure we walk the table only after + * we update the cpumask. The other side of the barrier + * is explained in serialize_against_pte_lookup() + */ + smp_mb(); + } if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { pr_devel("copro_handle_mm_fault failed: %#x\n", result); return result; diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 0761271d68c5..4bfad9f6dc9f 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -95,7 +95,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) pr_devel("afu_open pe: %i\n", ctx->pe); file->private_data = ctx; - cxl_ctx_get(); /* indicate success */ rc = 0; @@ -225,6 +224,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, if (ctx->mm) mmput(ctx->mm); + /* + * Increment driver use count. Enables global TLBIs for hash + * and callbacks to handle the segment table + */ + cxl_ctx_get(); + trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, @@ -233,6 +238,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, cxl_adapter_context_put(ctx->afu->adapter); put_pid(ctx->pid); ctx->pid = NULL; + cxl_ctx_put(); cxl_context_mm_count_put(ctx); goto out; } diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d18b3d9292fd..3ba04f371380 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1279,7 +1279,7 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, } /* use bounce buffer for copy */ - tbuf = (void *)__get_free_page(GFP_TEMPORARY); + tbuf = (void *)__get_free_page(GFP_KERNEL); if (!tbuf) return -ENOMEM; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 09c10f426b64..deb203026496 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -72,6 +72,11 @@ static DEFINE_IDA(pci_endpoint_test_ida); #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ miscdev) + +static bool no_msi; +module_param(no_msi, bool, 0444); +MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); + enum pci_barno { BAR_0, BAR_1, @@ -90,9 +95,15 @@ struct pci_endpoint_test { /* mutex to protect the ioctls */ struct mutex mutex; struct miscdevice miscdev; + enum pci_barno test_reg_bar; + size_t alignment; }; -static int bar_size[] = { 4, 512, 1024, 16384, 131072, 1048576 }; +struct pci_endpoint_test_data { + enum pci_barno test_reg_bar; + size_t alignment; + bool no_msi; +}; static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, u32 offset) @@ -141,11 +152,15 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, int j; u32 val; int size; + struct pci_dev *pdev = test->pdev; if (!test->bar[barno]) return false; - size = bar_size[barno]; + size = pci_resource_len(pdev, barno); + + if (barno == test->test_reg_bar) + size = 0x4; for (j = 0; j < size; j += 4) pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); @@ -202,16 +217,32 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) dma_addr_t dst_phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; + void *orig_src_addr; + dma_addr_t orig_src_phys_addr; + void *orig_dst_addr; + dma_addr_t orig_dst_phys_addr; + size_t offset; + size_t alignment = test->alignment; u32 src_crc32; u32 dst_crc32; - src_addr = dma_alloc_coherent(dev, size, &src_phys_addr, GFP_KERNEL); - if (!src_addr) { + orig_src_addr = dma_alloc_coherent(dev, size + alignment, + &orig_src_phys_addr, GFP_KERNEL); + if (!orig_src_addr) { dev_err(dev, "failed to allocate source buffer\n"); ret = false; goto err; } + if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { + src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); + offset = src_phys_addr - orig_src_phys_addr; + src_addr = orig_src_addr + offset; + } else { + src_phys_addr = orig_src_phys_addr; + src_addr = orig_src_addr; + } + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, lower_32_bits(src_phys_addr)); @@ -221,11 +252,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) get_random_bytes(src_addr, size); src_crc32 = crc32_le(~0, src_addr, size); - dst_addr = dma_alloc_coherent(dev, size, &dst_phys_addr, GFP_KERNEL); - if (!dst_addr) { + orig_dst_addr = dma_alloc_coherent(dev, size + alignment, + &orig_dst_phys_addr, GFP_KERNEL); + if (!orig_dst_addr) { dev_err(dev, "failed to allocate destination address\n"); ret = false; - goto err_src_addr; + goto err_orig_src_addr; + } + + if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { + dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); + offset = dst_phys_addr - orig_dst_phys_addr; + dst_addr = orig_dst_addr + offset; + } else { + dst_phys_addr = orig_dst_phys_addr; + dst_addr = orig_dst_addr; } pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, @@ -245,10 +286,12 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) if (dst_crc32 == src_crc32) ret = true; - dma_free_coherent(dev, size, dst_addr, dst_phys_addr); + dma_free_coherent(dev, size + alignment, orig_dst_addr, + orig_dst_phys_addr); -err_src_addr: - dma_free_coherent(dev, size, src_addr, src_phys_addr); +err_orig_src_addr: + dma_free_coherent(dev, size + alignment, orig_src_addr, + orig_src_phys_addr); err: return ret; @@ -262,15 +305,29 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) dma_addr_t phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; + void *orig_addr; + dma_addr_t orig_phys_addr; + size_t offset; + size_t alignment = test->alignment; u32 crc32; - addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); - if (!addr) { + orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, + GFP_KERNEL); + if (!orig_addr) { dev_err(dev, "failed to allocate address\n"); ret = false; goto err; } + if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { + phys_addr = PTR_ALIGN(orig_phys_addr, alignment); + offset = phys_addr - orig_phys_addr; + addr = orig_addr + offset; + } else { + phys_addr = orig_phys_addr; + addr = orig_addr; + } + get_random_bytes(addr, size); crc32 = crc32_le(~0, addr, size); @@ -293,7 +350,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) if (reg & STATUS_READ_SUCCESS) ret = true; - dma_free_coherent(dev, size, addr, phys_addr); + dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); err: return ret; @@ -306,15 +363,29 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) dma_addr_t phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; + void *orig_addr; + dma_addr_t orig_phys_addr; + size_t offset; + size_t alignment = test->alignment; u32 crc32; - addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); - if (!addr) { + orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, + GFP_KERNEL); + if (!orig_addr) { dev_err(dev, "failed to allocate destination address\n"); ret = false; goto err; } + if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { + phys_addr = PTR_ALIGN(orig_phys_addr, alignment); + offset = phys_addr - orig_phys_addr; + addr = orig_addr + offset; + } else { + phys_addr = orig_phys_addr; + addr = orig_addr; + } + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, lower_32_bits(phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, @@ -331,7 +402,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) ret = true; - dma_free_coherent(dev, size, addr, phys_addr); + dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); err: return ret; } @@ -383,13 +454,15 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, { int i; int err; - int irq; + int irq = 0; int id; char name[20]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; struct pci_endpoint_test *test; + struct pci_endpoint_test_data *data; + enum pci_barno test_reg_bar = BAR_0; struct miscdevice *misc_device; if (pci_is_bridge(pdev)) @@ -399,7 +472,17 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, if (!test) return -ENOMEM; + test->test_reg_bar = 0; + test->alignment = 0; test->pdev = pdev; + + data = (struct pci_endpoint_test_data *)ent->driver_data; + if (data) { + test_reg_bar = data->test_reg_bar; + test->alignment = data->alignment; + no_msi = data->no_msi; + } + init_completion(&test->irq_raised); mutex_init(&test->mutex); @@ -417,9 +500,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, pci_set_master(pdev); - irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); - if (irq < 0) - dev_err(dev, "failed to get MSI interrupts\n"); + if (!no_msi) { + irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); + if (irq < 0) + dev_err(dev, "failed to get MSI interrupts\n"); + } err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler, IRQF_SHARED, DRV_MODULE_NAME, test); @@ -441,14 +526,15 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, base = pci_ioremap_bar(pdev, bar); if (!base) { dev_err(dev, "failed to read BAR%d\n", bar); - WARN_ON(bar == BAR_0); + WARN_ON(bar == test_reg_bar); } test->bar[bar] = base; } - test->base = test->bar[0]; + test->base = test->bar[test_reg_bar]; if (!test->base) { - dev_err(dev, "Cannot perform PCI test without BAR0\n"); + dev_err(dev, "Cannot perform PCI test without BAR%d\n", + test_reg_bar); goto err_iounmap; } diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 7e803fc454d1..ec21388311db 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -12,13 +12,6 @@ menuconfig MMC If you want MMC/SD/SDIO support, you should say Y here and also to your specific host controller driver. -config MMC_DEBUG - bool "MMC debugging" - depends on MMC != n - help - This is an option for use by developers; most people should - say N here. This enables MMC core and driver debugging. - if MMC source "drivers/mmc/core/Kconfig" diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 416b6d1c9ec6..26ab7af4e0f9 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -2,7 +2,5 @@ # Makefile for the kernel mmc device drivers. # -subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG - obj-$(CONFIG_MMC) += core/ obj-$(subst m,y,$(CONFIG_MMC)) += host/ diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8bd7aba811e9..2ad7b5c69156 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -126,7 +127,7 @@ module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, - struct mmc_blk_data *md); + unsigned int part_type); static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { @@ -188,7 +189,6 @@ static ssize_t power_ro_lock_store(struct device *dev, { int ret; struct mmc_blk_data *md, *part_md; - struct mmc_card *card; struct mmc_queue *mq; struct request *req; unsigned long set; @@ -201,7 +201,6 @@ static ssize_t power_ro_lock_store(struct device *dev, md = mmc_blk_get(dev_to_disk(dev)); mq = &md->queue; - card = md->queue.card; /* Dispatch locking to the block layer */ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); @@ -489,7 +488,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, mrq.cmd = &cmd; - err = mmc_blk_part_switch(card, md); + err = mmc_blk_part_switch(card, md->part_type); if (err) return err; @@ -554,35 +553,20 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } -static int mmc_blk_ioctl_cmd(struct block_device *bdev, +static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, struct mmc_ioc_cmd __user *ic_ptr) { struct mmc_blk_ioc_data *idata; struct mmc_blk_ioc_data *idatas[1]; - struct mmc_blk_data *md; struct mmc_queue *mq; struct mmc_card *card; int err = 0, ioc_err = 0; struct request *req; - /* - * The caller must have CAP_SYS_RAWIO, and must be calling this on the - * whole block device, not on a partition. This prevents overspray - * between sibling partitions. - */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) - return -EPERM; - idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); - md = mmc_blk_get(bdev->bd_disk); - if (!md) { - err = -EINVAL; - goto cmd_err; - } - card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); @@ -598,7 +582,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, __GFP_RECLAIM); idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; - req_to_mmc_queue_req(req)->idata = idatas; + req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(mq->queue, NULL, req, 0); ioc_err = req_to_mmc_queue_req(req)->drv_op_result; @@ -606,33 +590,22 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, blk_put_request(req); cmd_done: - mmc_blk_put(md); -cmd_err: kfree(idata->buf); kfree(idata); return ioc_err ? ioc_err : err; } -static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, +static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, struct mmc_ioc_multi_cmd __user *user) { struct mmc_blk_ioc_data **idata = NULL; struct mmc_ioc_cmd __user *cmds = user->cmds; struct mmc_card *card; - struct mmc_blk_data *md; struct mmc_queue *mq; int i, err = 0, ioc_err = 0; __u64 num_of_cmds; struct request *req; - /* - * The caller must have CAP_SYS_RAWIO, and must be calling this on the - * whole block device, not on a partition. This prevents overspray - * between sibling partitions. - */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) - return -EPERM; - if (copy_from_user(&num_of_cmds, &user->num_of_cmds, sizeof(num_of_cmds))) return -EFAULT; @@ -656,16 +629,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, } } - md = mmc_blk_get(bdev->bd_disk); - if (!md) { - err = -EINVAL; - goto cmd_err; - } - card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); - goto cmd_done; + goto cmd_err; } @@ -677,7 +644,7 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; - req_to_mmc_queue_req(req)->idata = idata; + req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; blk_execute_rq(mq->queue, NULL, req, 0); ioc_err = req_to_mmc_queue_req(req)->drv_op_result; @@ -688,8 +655,6 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, blk_put_request(req); -cmd_done: - mmc_blk_put(md); cmd_err: for (i = 0; i < num_of_cmds; i++) { kfree(idata[i]->buf); @@ -699,16 +664,47 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, return ioc_err ? ioc_err : err; } +static int mmc_blk_check_blkdev(struct block_device *bdev) +{ + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + return -EPERM; + return 0; +} + static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { + struct mmc_blk_data *md; + int ret; + switch (cmd) { case MMC_IOC_CMD: - return mmc_blk_ioctl_cmd(bdev, - (struct mmc_ioc_cmd __user *)arg); + ret = mmc_blk_check_blkdev(bdev); + if (ret) + return ret; + md = mmc_blk_get(bdev->bd_disk); + if (!md) + return -EINVAL; + ret = mmc_blk_ioctl_cmd(md, + (struct mmc_ioc_cmd __user *)arg); + mmc_blk_put(md); + return ret; case MMC_IOC_MULTI_CMD: - return mmc_blk_ioctl_multi_cmd(bdev, - (struct mmc_ioc_multi_cmd __user *)arg); + ret = mmc_blk_check_blkdev(bdev); + if (ret) + return ret; + md = mmc_blk_get(bdev->bd_disk); + if (!md) + return -EINVAL; + ret = mmc_blk_ioctl_multi_cmd(md, + (struct mmc_ioc_multi_cmd __user *)arg); + mmc_blk_put(md); + return ret; default: return -EINVAL; } @@ -765,29 +761,29 @@ static int mmc_blk_part_switch_post(struct mmc_card *card, } static inline int mmc_blk_part_switch(struct mmc_card *card, - struct mmc_blk_data *md) + unsigned int part_type) { int ret = 0; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); - if (main_md->part_curr == md->part_type) + if (main_md->part_curr == part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; - ret = mmc_blk_part_switch_pre(card, md->part_type); + ret = mmc_blk_part_switch_pre(card, part_type); if (ret) return ret; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; - part_config |= md->part_type; + part_config |= part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) { - mmc_blk_part_switch_post(card, md->part_type); + mmc_blk_part_switch_post(card, part_type); return ret; } @@ -796,7 +792,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, ret = mmc_blk_part_switch_post(card, main_md->part_curr); } - main_md->part_curr = md->part_type; + main_md->part_curr = part_type; return ret; } @@ -1139,7 +1135,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int part_err; main_md->part_curr = main_md->part_type; - part_err = mmc_blk_part_switch(host->card, md); + part_err = mmc_blk_part_switch(host->card, md->part_type); if (part_err) { /* * We have failed to get back into the correct @@ -1178,6 +1174,10 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) struct mmc_queue_req *mq_rq; struct mmc_card *card = mq->card; struct mmc_blk_data *md = mq->blkdata; + struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); + struct mmc_blk_ioc_data **idata; + u8 **ext_csd; + u32 status; int ret; int i; @@ -1185,14 +1185,15 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) switch (mq_rq->drv_op) { case MMC_DRV_OP_IOCTL: + idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { - ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]); + ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); if (ret) break; } /* Always switch back to main area after RPMB access */ if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); + mmc_blk_part_switch(card, main_md->part_type); break; case MMC_DRV_OP_BOOT_WP: ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, @@ -1206,6 +1207,15 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; break; + case MMC_DRV_OP_GET_CARD_STATUS: + ret = mmc_send_status(card, &status); + if (!ret) + ret = status; + break; + case MMC_DRV_OP_GET_EXT_CSD: + ext_csd = mq_rq->drv_op_data; + ret = mmc_get_ext_csd(card, ext_csd); + break; default: pr_err("%s: unknown driver specific operation\n", md->disk->disk_name); @@ -1624,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, } mqrq->areq.mrq = &brq->mrq; - - mmc_queue_bounce_pre(mqrq); } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, @@ -1819,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) brq = &mq_rq->brq; old_req = mmc_queue_req_to_req(mq_rq); type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; - mmc_queue_bounce_post(mq_rq); switch (status) { case MMC_BLK_SUCCESS: @@ -1943,7 +1950,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* claim host only for the first request */ mmc_get_card(card); - ret = mmc_blk_part_switch(card, md); + ret = mmc_blk_part_switch(card, md->part_type); if (ret) { if (req) { blk_end_request_all(req, BLK_STS_IOERR); @@ -2024,8 +2031,20 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, int devidx, ret; devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); - if (devidx < 0) + if (devidx < 0) { + /* + * We get -ENOSPC because there are no more any available + * devidx. The reason may be that, either userspace haven't yet + * unmounted the partitions, which postpones mmc_blk_release() + * from being called, or the device has more partitions than + * what we support. + */ + if (devidx == -ENOSPC) + dev_err(mmc_dev(card->host), + "no more device IDs available\n"); + return ERR_PTR(devidx); + } md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { @@ -2283,6 +2302,134 @@ static int mmc_add_disk(struct mmc_blk_data *md) return ret; } +#ifdef CONFIG_DEBUG_FS + +static int mmc_dbg_card_status_get(void *data, u64 *val) +{ + struct mmc_card *card = data; + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_queue *mq = &md->queue; + struct request *req; + int ret; + + /* Ask the block layer about the card status */ + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; + blk_execute_rq(mq->queue, NULL, req, 0); + ret = req_to_mmc_queue_req(req)->drv_op_result; + if (ret >= 0) { + *val = ret; + ret = 0; + } + + return ret; +} +DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, + NULL, "%08llx\n"); + +/* That is two digits * 512 + 1 for newline */ +#define EXT_CSD_STR_LEN 1025 + +static int mmc_ext_csd_open(struct inode *inode, struct file *filp) +{ + struct mmc_card *card = inode->i_private; + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_queue *mq = &md->queue; + struct request *req; + char *buf; + ssize_t n = 0; + u8 *ext_csd; + int err, i; + + buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Ask the block layer for the EXT CSD */ + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; + req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; + blk_execute_rq(mq->queue, NULL, req, 0); + err = req_to_mmc_queue_req(req)->drv_op_result; + if (err) { + pr_err("FAILED %d\n", err); + goto out_free; + } + + for (i = 0; i < 512; i++) + n += sprintf(buf + n, "%02x", ext_csd[i]); + n += sprintf(buf + n, "\n"); + + if (n != EXT_CSD_STR_LEN) { + err = -EINVAL; + goto out_free; + } + + filp->private_data = buf; + kfree(ext_csd); + return 0; + +out_free: + kfree(buf); + return err; +} + +static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buf = filp->private_data; + + return simple_read_from_buffer(ubuf, cnt, ppos, + buf, EXT_CSD_STR_LEN); +} + +static int mmc_ext_csd_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static const struct file_operations mmc_dbg_ext_csd_fops = { + .open = mmc_ext_csd_open, + .read = mmc_ext_csd_read, + .release = mmc_ext_csd_release, + .llseek = default_llseek, +}; + +static int mmc_blk_add_debugfs(struct mmc_card *card) +{ + struct dentry *root; + + if (!card->debugfs_root) + return 0; + + root = card->debugfs_root; + + if (mmc_card_mmc(card) || mmc_card_sd(card)) { + if (!debugfs_create_file("status", S_IRUSR, root, card, + &mmc_dbg_card_status_fops)) + return -EIO; + } + + if (mmc_card_mmc(card)) { + if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, + &mmc_dbg_ext_csd_fops)) + return -EIO; + } + + return 0; +} + + +#else + +static int mmc_blk_add_debugfs(struct mmc_card *card) +{ + return 0; +} + +#endif /* CONFIG_DEBUG_FS */ + static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; @@ -2319,6 +2466,9 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* Add two debugfs entries */ + mmc_blk_add_debugfs(card); + pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); @@ -2346,7 +2496,7 @@ static void mmc_blk_remove(struct mmc_card *card) mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); - mmc_blk_part_switch(card, md); + mmc_blk_part_switch(card, md->part_type); mmc_release_host(card->host); if (card->type != MMC_TYPE_SD_COMBO) pm_runtime_disable(&card->dev); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 26431267a3e2..66c9cf49ad2f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -260,6 +260,9 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) trace_mmc_request_start(host, mrq); + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + host->ops->request(host, mrq); } @@ -295,10 +298,8 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) { -#ifdef CONFIG_MMC_DEBUG - unsigned int i, sz; + unsigned int i, sz = 0; struct scatterlist *sg; -#endif if (mrq->cmd) { mrq->cmd->error = 0; @@ -314,13 +315,12 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) mrq->data->blocks > host->max_blk_count || mrq->data->blocks * mrq->data->blksz > host->max_req_size) return -EINVAL; -#ifdef CONFIG_MMC_DEBUG - sz = 0; + for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) sz += sg->length; if (sz != mrq->data->blocks * mrq->data->blksz) return -EINVAL; -#endif + mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { @@ -736,8 +736,8 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; - data->timeout_ns = card->csd.tacc_ns * mult; - data->timeout_clks = card->csd.tacc_clks * mult; + data->timeout_ns = card->csd.taac_ns * mult; + data->timeout_clks = card->csd.taac_clks * mult; /* * SD cards also have an upper limit on the timeout. @@ -766,7 +766,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) /* * SDHC cards always use these fixed values. */ - if (timeout_us > limit_us || mmc_card_blockaddr(card)) { + if (timeout_us > limit_us) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } @@ -982,6 +982,9 @@ int mmc_execute_tuning(struct mmc_card *card) if (!host->ops->execute_tuning) return 0; + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + if (mmc_card_mmc(card)) opcode = MMC_SEND_TUNING_BLOCK_HS200; else @@ -1021,6 +1024,9 @@ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) */ void mmc_set_initial_state(struct mmc_host *host) { + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + mmc_retune_disable(host); if (mmc_host_is_spi(host)) @@ -1137,11 +1143,11 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask) voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; if (!voltage_ranges) { - pr_debug("%s: voltage-ranges unspecified\n", np->full_name); + pr_debug("%pOF: voltage-ranges unspecified\n", np); return 0; } if (!num_ranges) { - pr_err("%s: voltage-ranges empty\n", np->full_name); + pr_err("%pOF: voltage-ranges empty\n", np); return -EINVAL; } @@ -1153,8 +1159,8 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask) be32_to_cpu(voltage_ranges[j]), be32_to_cpu(voltage_ranges[j + 1])); if (!ocr_mask) { - pr_err("%s: voltage-range #%d is invalid\n", - np->full_name, i); + pr_err("%pOF: voltage-range #%d is invalid\n", + np, i); return -EINVAL; } *mask |= ocr_mask; @@ -1769,13 +1775,6 @@ void mmc_detach_bus(struct mmc_host *host) static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->removed); - spin_unlock_irqrestore(&host->lock, flags); -#endif - /* * If the device is configured as wakeup, we prevent a new sleep for * 5 s to give provision for user space to consume the event. @@ -1869,14 +1868,14 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, } else { /* CSD Erase Group Size uses write timeout */ unsigned int mult = (10 << card->csd.r2w_factor); - unsigned int timeout_clks = card->csd.tacc_clks * mult; + unsigned int timeout_clks = card->csd.taac_clks * mult; unsigned int timeout_us; - /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ - if (card->csd.tacc_ns < 1000000) - timeout_us = (card->csd.tacc_ns * mult) / 1000; + /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */ + if (card->csd.taac_ns < 1000000) + timeout_us = (card->csd.taac_ns * mult) / 1000; else - timeout_us = (card->csd.tacc_ns / 1000) * mult; + timeout_us = (card->csd.taac_ns / 1000) * mult; /* * ios.clock is only a target. The real clock rate might be @@ -2446,10 +2445,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) { host->f_init = freq; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: trying to init card at %u Hz\n", + pr_debug("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); -#endif + mmc_power_up(host, host->ocr_avail); /* @@ -2646,12 +2644,6 @@ void mmc_start_host(struct mmc_host *host) void mmc_stop_host(struct mmc_host *host) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - host->removed = 1; - spin_unlock_irqrestore(&host->lock, flags); -#endif if (host->slot.cd_irq >= 0) { if (host->slot.cd_wake_enabled) disable_irq_wake(host->slot.cd_irq); @@ -2686,9 +2678,7 @@ int mmc_power_save_host(struct mmc_host *host) { int ret = 0; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); -#endif + pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__); mmc_bus_get(host); @@ -2712,9 +2702,7 @@ int mmc_power_restore_host(struct mmc_host *host) { int ret; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); -#endif + pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__); mmc_bus_get(host); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 55f543fd37c4..ca861091a776 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -107,6 +107,12 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { } void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq); bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); +struct mmc_async_req; + +struct mmc_async_req *mmc_start_areq(struct mmc_host *host, + struct mmc_async_req *areq, + enum mmc_blk_status *ret_stat); + int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, unsigned int arg); int mmc_can_erase(struct mmc_card *card); diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index a1fba5732d66..01e459a34f33 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -281,85 +281,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host) debugfs_remove_recursive(host->debugfs_root); } -static int mmc_dbg_card_status_get(void *data, u64 *val) -{ - struct mmc_card *card = data; - u32 status; - int ret; - - mmc_get_card(card); - - ret = mmc_send_status(data, &status); - if (!ret) - *val = status; - - mmc_put_card(card); - - return ret; -} -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); - -#define EXT_CSD_STR_LEN 1025 - -static int mmc_ext_csd_open(struct inode *inode, struct file *filp) -{ - struct mmc_card *card = inode->i_private; - char *buf; - ssize_t n = 0; - u8 *ext_csd; - int err, i; - - buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mmc_get_card(card); - err = mmc_get_ext_csd(card, &ext_csd); - mmc_put_card(card); - if (err) - goto out_free; - - for (i = 0; i < 512; i++) - n += sprintf(buf + n, "%02x", ext_csd[i]); - n += sprintf(buf + n, "\n"); - - if (n != EXT_CSD_STR_LEN) { - err = -EINVAL; - goto out_free; - } - - filp->private_data = buf; - kfree(ext_csd); - return 0; - -out_free: - kfree(buf); - return err; -} - -static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char *buf = filp->private_data; - - return simple_read_from_buffer(ubuf, cnt, ppos, - buf, EXT_CSD_STR_LEN); -} - -static int mmc_ext_csd_release(struct inode *inode, struct file *file) -{ - kfree(file->private_data); - return 0; -} - -static const struct file_operations mmc_dbg_ext_csd_fops = { - .open = mmc_ext_csd_open, - .read = mmc_ext_csd_read, - .release = mmc_ext_csd_release, - .llseek = default_llseek, -}; - void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; @@ -382,16 +303,6 @@ void mmc_add_card_debugfs(struct mmc_card *card) if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; - if (mmc_card_mmc(card) || mmc_card_sd(card)) - if (!debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops)) - goto err; - - if (mmc_card_mmc(card)) - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, - &mmc_dbg_ext_csd_fops)) - goto err; - return; err: diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 1503412f826c..ad88deb2e8f3 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -111,6 +111,12 @@ void mmc_retune_hold(struct mmc_host *host) host->hold_retune += 1; } +void mmc_retune_hold_now(struct mmc_host *host) +{ + host->retune_now = 0; + host->hold_retune += 1; +} + void mmc_retune_release(struct mmc_host *host) { if (host->hold_retune) diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index fb6a76a03833..77d6f60d1bf9 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -19,6 +19,7 @@ void mmc_unregister_host_class(void); void mmc_retune_enable(struct mmc_host *host); void mmc_retune_disable(struct mmc_host *host); void mmc_retune_hold(struct mmc_host *host); +void mmc_retune_hold_now(struct mmc_host *host); void mmc_retune_release(struct mmc_host *host); int mmc_retune(struct mmc_host *host); void mmc_retune_pause(struct mmc_host *host); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2bae69e39544..36217ad5e9b1 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -41,11 +41,11 @@ static const unsigned char tran_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; -static const unsigned int tacc_exp[] = { +static const unsigned int taac_exp[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, }; -static const unsigned int tacc_mant[] = { +static const unsigned int taac_mant[] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, }; @@ -153,8 +153,8 @@ static int mmc_decode_csd(struct mmc_card *card) csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; + csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); @@ -1286,6 +1286,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card) return err; } +static void mmc_select_driver_type(struct mmc_card *card) +{ + int card_drv_type, drive_strength, drv_type; + + card_drv_type = card->ext_csd.raw_driver_strength | + mmc_driver_type_mask(0); + + drive_strength = mmc_select_drive_strength(card, + card->ext_csd.hs200_max_dtr, + card_drv_type, &drv_type); + + card->drive_strength = drive_strength; + + if (drv_type) + mmc_set_driver_type(card->host, drv_type); +} + static int mmc_select_hs400es(struct mmc_card *card) { struct mmc_host *host = card->host; @@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card) goto out_err; } + mmc_select_driver_type(card); + /* Switch card to HS400 */ val = EXT_CSD_TIMING_HS400 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; @@ -1374,23 +1393,6 @@ static int mmc_select_hs400es(struct mmc_card *card) return err; } -static void mmc_select_driver_type(struct mmc_card *card) -{ - int card_drv_type, drive_strength, drv_type; - - card_drv_type = card->ext_csd.raw_driver_strength | - mmc_driver_type_mask(0); - - drive_strength = mmc_select_drive_strength(card, - card->ext_csd.hs200_max_dtr, - card_drv_type, &drv_type); - - card->drive_strength = drive_strength; - - if (drv_type) - mmc_set_driver_type(card->host, drv_type); -} - /* * For device supporting HS200 mode, the following sequence * should be done before executing the tuning process. @@ -1790,29 +1792,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, */ card->reenable_cmdq = card->ext_csd.cmdq_en; - /* - * The mandatory minimum values are defined for packed command. - * read: 5, write: 3 - */ - if (card->ext_csd.max_packed_writes >= 3 && - card->ext_csd.max_packed_reads >= 5 && - host->caps2 & MMC_CAP2_PACKED_CMD) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_EXP_EVENTS_CTRL, - EXT_CSD_PACKED_EVENT_EN, - card->ext_csd.generic_cmd6_time); - if (err && err != -EBADMSG) - goto free_card; - if (err) { - pr_warn("%s: Enabling packed event failed\n", - mmc_hostname(card->host)); - card->ext_csd.packed_event_en = 0; - err = 0; - } else { - card->ext_csd.packed_event_en = 1; - } - } - if (!oldcard) host->card = card; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 5f7c5920231a..54686ca4bfb7 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -83,6 +83,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status) { return __mmc_send_status(card, status, MMC_CMD_RETRIES); } +EXPORT_SYMBOL_GPL(mmc_send_status); static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { @@ -946,7 +947,7 @@ static int mmc_read_bkops_status(struct mmc_card *card) /** * mmc_start_bkops - start BKOPS for supported cards * @card: MMC card to start BKOPS - * @form_exception: A flag to indicate if this function was + * @from_exception: A flag to indicate if this function was * called due to an exception raised by the card * * Start background operations whenever requested. diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 7a304a6e5bf1..478869805b96 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -800,38 +800,44 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test, return ret; } +struct mmc_test_req { + struct mmc_request mrq; + struct mmc_command sbc; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_command status; + struct mmc_data data; +}; + /* * Tests nonblock transfer with certain parameters */ -static void mmc_test_nonblock_reset(struct mmc_request *mrq, - struct mmc_command *cmd, - struct mmc_command *stop, - struct mmc_data *data) +static void mmc_test_req_reset(struct mmc_test_req *rq) { - memset(mrq, 0, sizeof(struct mmc_request)); - memset(cmd, 0, sizeof(struct mmc_command)); - memset(data, 0, sizeof(struct mmc_data)); - memset(stop, 0, sizeof(struct mmc_command)); + memset(rq, 0, sizeof(struct mmc_test_req)); - mrq->cmd = cmd; - mrq->data = data; - mrq->stop = stop; + rq->mrq.cmd = &rq->cmd; + rq->mrq.data = &rq->data; + rq->mrq.stop = &rq->stop; } + +static struct mmc_test_req *mmc_test_req_alloc(void) +{ + struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); + + if (rq) + mmc_test_req_reset(rq); + + return rq; +} + + static int mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write, int count) { - struct mmc_request mrq1; - struct mmc_command cmd1; - struct mmc_command stop1; - struct mmc_data data1; - - struct mmc_request mrq2; - struct mmc_command cmd2; - struct mmc_command stop2; - struct mmc_data data2; - + struct mmc_test_req *rq1, *rq2; struct mmc_test_async_req test_areq[2]; struct mmc_async_req *done_areq; struct mmc_async_req *cur_areq = &test_areq[0].areq; @@ -843,12 +849,16 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, test_areq[0].test = test; test_areq[1].test = test; - mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); - mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); + rq1 = mmc_test_req_alloc(); + rq2 = mmc_test_req_alloc(); + if (!rq1 || !rq2) { + ret = RESULT_FAIL; + goto err; + } - cur_areq->mrq = &mrq1; + cur_areq->mrq = &rq1->mrq; cur_areq->err_check = mmc_test_check_result_async; - other_areq->mrq = &mrq2; + other_areq->mrq = &rq2->mrq; other_areq->err_check = mmc_test_check_result_async; for (i = 0; i < count; i++) { @@ -861,14 +871,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, goto err; } - if (done_areq) { - if (done_areq->mrq == &mrq2) - mmc_test_nonblock_reset(&mrq2, &cmd2, - &stop2, &data2); - else - mmc_test_nonblock_reset(&mrq1, &cmd1, - &stop1, &data1); - } + if (done_areq) + mmc_test_req_reset(container_of(done_areq->mrq, + struct mmc_test_req, mrq)); + swap(cur_areq, other_areq); dev_addr += blocks; } @@ -877,8 +883,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, if (status != MMC_BLK_SUCCESS) ret = RESULT_FAIL; - return ret; err: + kfree(rq1); + kfree(rq2); return ret; } @@ -2329,28 +2336,6 @@ static int mmc_test_reset(struct mmc_test_card *test) return RESULT_FAIL; } -struct mmc_test_req { - struct mmc_request mrq; - struct mmc_command sbc; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_command status; - struct mmc_data data; -}; - -static struct mmc_test_req *mmc_test_req_alloc(void) -{ - struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL); - - if (rq) { - rq->mrq.cmd = &rq->cmd; - rq->mrq.data = &rq->data; - rq->mrq.stop = &rq->stop; - } - - return rq; -} - static int mmc_test_send_status(struct mmc_test_card *test, struct mmc_command *cmd) { diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index affa7370ba82..0a4e77a5ba33 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -23,8 +23,6 @@ #include "core.h" #include "card.h" -#define MMC_QUEUE_BOUNCESZ 65536 - /* * Prepare a MMC request. This just filters out odd stuff. */ @@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); } -static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) -{ - unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; - - if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) - return 0; - - if (bouncesz > host->max_req_size) - bouncesz = host->max_req_size; - if (bouncesz > host->max_seg_size) - bouncesz = host->max_seg_size; - if (bouncesz > host->max_blk_count * 512) - bouncesz = host->max_blk_count * 512; - - if (bouncesz <= 512) - return 0; - - return bouncesz; -} - /** * mmc_init_request() - initialize the MMC-specific per-request data * @q: the request queue @@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, struct mmc_card *card = mq->card; struct mmc_host *host = card->host; - if (card->bouncesz) { - mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); - if (!mq_rq->bounce_buf) - return -ENOMEM; - if (card->bouncesz > 512) { - mq_rq->sg = mmc_alloc_sg(1, gfp); - if (!mq_rq->sg) - return -ENOMEM; - mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, - gfp); - if (!mq_rq->bounce_sg) - return -ENOMEM; - } - } else { - mq_rq->bounce_buf = NULL; - mq_rq->bounce_sg = NULL; - mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); - if (!mq_rq->sg) - return -ENOMEM; - } + mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); + if (!mq_rq->sg) + return -ENOMEM; return 0; } @@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); - /* It is OK to kfree(NULL) so this will be smooth */ - kfree(mq_rq->bounce_sg); - mq_rq->bounce_sg = NULL; - - kfree(mq_rq->bounce_buf); - mq_rq->bounce_buf = NULL; - kfree(mq_rq->sg); mq_rq->sg = NULL; } @@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); - card->bouncesz = mmc_queue_calc_bouncesz(host); - if (card->bouncesz) { - blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); - blk_queue_max_segments(mq->queue, card->bouncesz / 512); - blk_queue_max_segment_size(mq->queue, card->bouncesz); - } else { - blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_hw_sectors(mq->queue, - min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); - } + blk_queue_bounce_limit(mq->queue, limit); + blk_queue_max_hw_sectors(mq->queue, + min(host->max_blk_count, host->max_req_size / 512)); + blk_queue_max_segments(mq->queue, host->max_segs); + blk_queue_max_segment_size(mq->queue, host->max_seg_size); sema_init(&mq->thread_sem, 1); @@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq) */ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) { - unsigned int sg_len; - size_t buflen; - struct scatterlist *sg; struct request *req = mmc_queue_req_to_req(mqrq); - int i; - if (!mqrq->bounce_buf) - return blk_rq_map_sg(mq->queue, req, mqrq->sg); - - sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); - - mqrq->bounce_sg_len = sg_len; - - buflen = 0; - for_each_sg(mqrq->bounce_sg, sg, sg_len, i) - buflen += sg->length; - - sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); - - return 1; -} - -/* - * If writing, bounce the data to the buffer before the request - * is sent to the host driver - */ -void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) -{ - if (!mqrq->bounce_buf) - return; - - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) - return; - - sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, - mqrq->bounce_buf, mqrq->sg[0].length); -} - -/* - * If reading, bounce the data from the buffer after the request - * has been handled by the host driver - */ -void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) -{ - if (!mqrq->bounce_buf) - return; - - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) - return; - - sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, - mqrq->bounce_buf, mqrq->sg[0].length); + return blk_rq_map_sg(mq->queue, req, mqrq->sg); } diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 361b46408e0f..f18d3f656baa 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -36,22 +36,23 @@ struct mmc_blk_request { * enum mmc_drv_op - enumerates the operations in the mmc_queue_req * @MMC_DRV_OP_IOCTL: ioctl operation * @MMC_DRV_OP_BOOT_WP: write protect boot partitions + * @MMC_DRV_OP_GET_CARD_STATUS: get card status + * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card */ enum mmc_drv_op { MMC_DRV_OP_IOCTL, MMC_DRV_OP_BOOT_WP, + MMC_DRV_OP_GET_CARD_STATUS, + MMC_DRV_OP_GET_EXT_CSD, }; struct mmc_queue_req { struct mmc_blk_request brq; struct scatterlist *sg; - char *bounce_buf; - struct scatterlist *bounce_sg; - unsigned int bounce_sg_len; struct mmc_async_req areq; enum mmc_drv_op drv_op; int drv_op_result; - struct mmc_blk_ioc_data **idata; + void *drv_op_data; unsigned int ioc_count; }; @@ -77,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *); - extern unsigned int mmc_queue_map_sg(struct mmc_queue *, struct mmc_queue_req *); -extern void mmc_queue_bounce_pre(struct mmc_queue_req *); -extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern int mmc_access_rpmb(struct mmc_queue *); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index a1b0aa14d5e3..4fd1620b732d 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -39,11 +39,11 @@ static const unsigned char tran_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; -static const unsigned int tacc_exp[] = { +static const unsigned int taac_exp[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, }; -static const unsigned int tacc_mant[] = { +static const unsigned int taac_mant[] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, }; @@ -111,8 +111,8 @@ static int mmc_decode_csd(struct mmc_card *card) case 0: m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; + csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); @@ -148,8 +148,8 @@ static int mmc_decode_csd(struct mmc_card *card) */ mmc_card_set_blockaddr(card); - csd->tacc_ns = 0; /* Unused */ - csd->tacc_clks = 0; /* Unused */ + csd->taac_ns = 0; /* Unused */ + csd->taac_clks = 0; /* Unused */ m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 5755b69f2f72..8c15637178ff 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -4,6 +4,15 @@ comment "MMC/SD/SDIO Host Controller Drivers" +config MMC_DEBUG + bool "MMC host drivers debugging" + depends on MMC != n + help + This is an option for use by developers; most people should + say N here. This enables MMC host driver debugging. And further + added host drivers please don't invent their private macro for + debugging. + config MMC_ARMMMCI tristate "ARM AMBA Multimedia Card Interface support" depends on ARM_AMBA @@ -15,7 +24,7 @@ config MMC_ARMMMCI If unsure, say N. config MMC_QCOM_DML - tristate "Qualcomm Data Mover for SD Card Controller" + bool "Qualcomm Data Mover for SD Card Controller" depends on MMC_ARMMMCI && QCOM_BAM_DMA default y help @@ -354,7 +363,7 @@ config MMC_MOXART config MMC_SDHCI_ST tristate "SDHCI support on STMicroelectronics SoC" - depends on ARCH_STI + depends on ARCH_STI || FSP2 depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS help @@ -494,7 +503,7 @@ config MMC_GOLDFISH config MMC_SPI tristate "MMC/SD/SDIO over SPI" - depends on SPI_MASTER && !HIGHMEM && HAS_DMA + depends on SPI_MASTER && HAS_DMA select CRC7 select CRC_ITU_T help @@ -575,10 +584,29 @@ config MMC_SDHI depends on SUPERH || ARM || ARM64 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST select MMC_TMIO_CORE + select MMC_SDHI_SYS_DMAC if (SUPERH || ARM) + select MMC_SDHI_INTERNAL_DMAC if ARM64 help This provides support for the SDHI SD/SDIO controller found in Renesas SuperH, ARM and ARM64 based SoCs +config MMC_SDHI_SYS_DMAC + tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC" + depends on MMC_SDHI + help + This provides DMA support for SDHI SD/SDIO controllers + using SYS-DMAC via DMA Engine. This supports the controllers + found in SuperH and Renesas ARM based SoCs. + +config MMC_SDHI_INTERNAL_DMAC + tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" + depends on ARM64 || COMPILE_TEST + depends on MMC_SDHI + help + This provides DMA support for SDHI SD/SDIO controllers + using on-chip bus mastering. This supports the controllers + found in arm64 based SoCs. + config MMC_CB710 tristate "ENE CB710 MMC/SD Interface support" depends on PCI diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 4d4547116311..303f5cd46cd9 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -2,8 +2,9 @@ # Makefile for MMC/SD host controller drivers # -obj-$(CONFIG_MMC_ARMMMCI) += mmci.o -obj-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o +obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o +armmmci-y := mmci.o +armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_MXS) += mxs-mmc.o @@ -36,7 +37,13 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o -obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o renesas_sdhi_sys_dmac.o +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o +ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y) +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o +endif +ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y) +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o +endif obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 5b3e1c9bb75f..63fe5091ca59 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c @@ -290,7 +290,6 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) u16 status; int end_command = 0; int end_transfer = 0; - int transfer_error = 0; int state_changed = 0; int cmd_timeout = 0; @@ -322,9 +321,7 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) if (end_command) goldfish_mmc_cmd_done(host, host->cmd); - if (transfer_error) - goldfish_mmc_xfer_done(host, host->data); - else if (end_transfer) { + if (end_transfer) { host->dma_done = 1; goldfish_mmc_end_of_data(host, host->data); } else if (host->data != NULL) { @@ -347,8 +344,7 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) mmc_detect_change(host->mmc, 0); } - if (!end_command && !end_transfer && - !transfer_error && !state_changed && !cmd_timeout) { + if (!end_command && !end_transfer && !state_changed && !cmd_timeout) { status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); if (status != 0) { diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 97de2d32ba84..0a0ebf3a096d 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -665,14 +665,15 @@ atmci_of_init(struct platform_device *pdev) for_each_child_of_node(np, cnp) { if (of_property_read_u32(cnp, "reg", &slot_id)) { - dev_warn(&pdev->dev, "reg property is missing for %s\n", - cnp->full_name); + dev_warn(&pdev->dev, "reg property is missing for %pOF\n", + cnp); continue; } if (slot_id >= ATMCI_MAX_NR_SLOTS) { dev_warn(&pdev->dev, "can't have more than %d slots\n", ATMCI_MAX_NR_SLOTS); + of_node_put(cnp); break; } @@ -1083,7 +1084,6 @@ static u32 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) { u32 iflags, tmp; - unsigned int sg_len; int i; data->error = -EINPROGRESS; @@ -1108,8 +1108,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) /* Configure PDC */ host->data_size = data->blocks * data->blksz; - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, - mmc_get_dma_dir(data)); + dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); if ((!host->caps.has_rwproof) && (host->data->flags & MMC_DATA_WRITE)) { diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index abba9a2a78b8..229dc18f0581 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1252,7 +1252,7 @@ static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) mutex_unlock(&host->mutex); } -static struct mmc_host_ops bcm2835_ops = { +static const struct mmc_host_ops bcm2835_ops = { .request = bcm2835_request, .set_ios = bcm2835_set_ios, .hw_reset = bcm2835_reset, diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c index 951d2cdd7888..22aded1065ae 100644 --- a/drivers/mmc/host/cavium-octeon.c +++ b/drivers/mmc/host/cavium-octeon.c @@ -342,18 +342,7 @@ static struct platform_driver octeon_mmc_driver = { }, }; -static int __init octeon_mmc_init(void) -{ - return platform_driver_register(&octeon_mmc_driver); -} - -static void __exit octeon_mmc_cleanup(void) -{ - platform_driver_unregister(&octeon_mmc_driver); -} - -module_init(octeon_mmc_init); -module_exit(octeon_mmc_cleanup); +module_platform_driver(octeon_mmc_driver); MODULE_AUTHOR("Cavium Inc. "); MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card"); diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index b9cc95998799..eee08d81b242 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -7,6 +7,7 @@ * * Copyright (C) 2016 Cavium Inc. */ +#include #include #include #include @@ -149,8 +150,11 @@ static int thunder_mmc_probe(struct pci_dev *pdev, for (i = 0; i < CAVIUM_MAX_MMC; i++) { if (host->slot[i]) cvm_mmc_of_slot_remove(host->slot[i]); - if (host->slot_pdev[i]) + if (host->slot_pdev[i]) { + get_device(&host->slot_pdev[i]->dev); of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + put_device(&host->slot_pdev[i]->dev); + } } clk_disable_unprepare(host->clk); return ret; diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 3686d77c717b..fbd29f00fca0 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -957,14 +957,12 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) ret = of_property_read_u32(node, "reg", &id); if (ret) { - dev_err(dev, "Missing or invalid reg property on %s\n", - of_node_full_name(node)); + dev_err(dev, "Missing or invalid reg property on %pOF\n", node); return ret; } if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { - dev_err(dev, "Invalid reg property on %s\n", - of_node_full_name(node)); + dev_err(dev, "Invalid reg property on %pOF\n", node); return -EINVAL; } @@ -1040,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) */ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | - MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; + MMC_CAP_3_3V_DDR; if (host->use_sg) mmc->max_segs = 16; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 621ce47e0e4a..351330dfb954 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1062,7 +1062,7 @@ static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) } } -static struct mmc_host_ops mmc_davinci_ops = { +static const struct mmc_host_ops mmc_davinci_ops = { .request = mmc_davinci_request, .set_ios = mmc_davinci_set_ios, .get_cd = mmc_davinci_get_cd, diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index e38fb0020bb1..64cda84b2302 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include +#include #include #include #include @@ -28,7 +30,35 @@ #define AO_SCTRL_SEL18 BIT(10) #define AO_SCTRL_CTRL3 0x40C +#define DWMMC_SDIO_ID 2 + +#define SOC_SCTRL_SCPERCTRL5 (0x314) +#define SDCARD_IO_SEL18 BIT(2) + +#define SDCARD_RD_THRESHOLD (512) + +#define GENCLK_DIV (7) + +#define GPIO_CLK_ENABLE BIT(16) +#define GPIO_CLK_DIV_MASK GENMASK(11, 8) +#define GPIO_USE_SAMPLE_DLY_MASK GENMASK(13, 13) +#define UHS_REG_EXT_SAMPLE_PHASE_MASK GENMASK(20, 16) +#define UHS_REG_EXT_SAMPLE_DRVPHASE_MASK GENMASK(25, 21) +#define UHS_REG_EXT_SAMPLE_DLY_MASK GENMASK(30, 26) + +#define TIMING_MODE 3 +#define TIMING_CFG_NUM 10 + +#define NUM_PHASES (40) + +#define ENABLE_SHIFT_MIN_SMPL (4) +#define ENABLE_SHIFT_MAX_SMPL (12) +#define USE_DLY_MIN_SMPL (11) +#define USE_DLY_MAX_SMPL (14) + struct k3_priv { + int ctrl_id; + u32 cur_speed; struct regmap *reg; }; @@ -38,6 +68,41 @@ static unsigned long dw_mci_hi6220_caps[] = { 0 }; +struct hs_timing { + u32 drv_phase; + u32 smpl_dly; + u32 smpl_phase_max; + u32 smpl_phase_min; +}; + +struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = { + { /* reserved */ }, + { /* SD */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {6, 0, 4, 4,}, /* 1: MMC_HS */ + {6, 0, 3, 3,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 2, 2,}, /* 4: SDR25 */ + {4, 0, 11, 0,}, /* 5: SDR50 */ + {6, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + }, + { /* SDIO */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {0}, /* 1: MMC_HS */ + {6, 0, 15, 15,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 0, 0,}, /* 4: SDR25 */ + {4, 0, 12, 0,}, /* 5: SDR50 */ + {5, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + } +}; + static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) { int ret; @@ -66,6 +131,10 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) if (IS_ERR(priv->reg)) priv->reg = NULL; + priv->ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (priv->ctrl_id < 0) + priv->ctrl_id = 0; + host->priv = priv; return 0; } @@ -144,7 +213,236 @@ static const struct dw_mci_drv_data hi6220_data = { .execute_tuning = dw_mci_hi6220_execute_tuning, }; +static void dw_mci_hs_set_timing(struct dw_mci *host, int timing, + int smpl_phase) +{ + u32 drv_phase; + u32 smpl_dly; + u32 use_smpl_dly = 0; + u32 enable_shift = 0; + u32 reg_value; + int ctrl_id; + struct k3_priv *priv; + + priv = host->priv; + ctrl_id = priv->ctrl_id; + + drv_phase = hs_timing_cfg[ctrl_id][timing].drv_phase; + smpl_dly = hs_timing_cfg[ctrl_id][timing].smpl_dly; + if (smpl_phase == -1) + smpl_phase = (hs_timing_cfg[ctrl_id][timing].smpl_phase_max + + hs_timing_cfg[ctrl_id][timing].smpl_phase_min) / 2; + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + if (smpl_phase >= USE_DLY_MIN_SMPL && + smpl_phase <= USE_DLY_MAX_SMPL) + use_smpl_dly = 1; + /* fallthrough */ + case MMC_TIMING_UHS_SDR50: + if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL && + smpl_phase <= ENABLE_SHIFT_MAX_SMPL) + enable_shift = 1; + break; + } + + mci_writel(host, GPIO, 0x0); + usleep_range(5, 10); + + reg_value = FIELD_PREP(UHS_REG_EXT_SAMPLE_PHASE_MASK, smpl_phase) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DLY_MASK, smpl_dly) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DRVPHASE_MASK, drv_phase); + mci_writel(host, UHS_REG_EXT, reg_value); + + mci_writel(host, ENABLE_SHIFT, enable_shift); + + reg_value = FIELD_PREP(GPIO_CLK_DIV_MASK, GENCLK_DIV) | + FIELD_PREP(GPIO_USE_SAMPLE_DLY_MASK, use_smpl_dly); + mci_writel(host, GPIO, (unsigned int)reg_value | GPIO_CLK_ENABLE); + + /* We should delay 1ms wait for timing setting finished. */ + usleep_range(1000, 2000); +} + +static int dw_mci_hi3660_init(struct dw_mci *host) +{ + mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(SDCARD_RD_THRESHOLD, + SDMMC_CARD_RD_THR_EN)); + + dw_mci_hs_set_timing(host, MMC_TIMING_LEGACY, -1); + host->bus_hz /= (GENCLK_DIV + 1); + + return 0; +} + +static int dw_mci_set_sel18(struct dw_mci *host, bool set) +{ + int ret; + unsigned int val; + struct k3_priv *priv; + + priv = host->priv; + + val = set ? SDCARD_IO_SEL18 : 0; + ret = regmap_update_bits(priv->reg, SOC_SCTRL_SCPERCTRL5, + SDCARD_IO_SEL18, val); + if (ret) { + dev_err(host->dev, "sel18 %u error\n", val); + return ret; + } + + return 0; +} + +static void dw_mci_hi3660_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + unsigned long wanted; + unsigned long actual; + struct k3_priv *priv = host->priv; + + if (!ios->clock || ios->clock == priv->cur_speed) + return; + + wanted = ios->clock * (GENCLK_DIV + 1); + ret = clk_set_rate(host->ciu_clk, wanted); + if (ret) { + dev_err(host->dev, "failed to set rate %luHz\n", wanted); + return; + } + actual = clk_get_rate(host->ciu_clk); + + dw_mci_hs_set_timing(host, ios->timing, -1); + host->bus_hz = actual / (GENCLK_DIV + 1); + host->current_speed = 0; + priv->cur_speed = host->bus_hz; +} + +static int dw_mci_get_best_clksmpl(unsigned int sample_flag) +{ + int i; + int interval; + unsigned int v; + unsigned int len; + unsigned int range_start = 0; + unsigned int range_length = 0; + unsigned int middle_range = 0; + + if (!sample_flag) + return -EIO; + + if (~sample_flag == 0) + return 0; + + i = ffs(sample_flag) - 1; + + /* + * A clock cycle is divided into 32 phases, + * each of which is represented by a bit, + * finding the optimal phase. + */ + while (i < 32) { + v = ror32(sample_flag, i); + len = ffs(~v) - 1; + + if (len > range_length) { + range_length = len; + range_start = i; + } + + interval = ffs(v >> len) - 1; + if (interval < 0) + break; + + i += len + interval; + } + + middle_range = range_start + range_length / 2; + if (middle_range >= 32) + middle_range %= 32; + + return middle_range; +} + +static int dw_mci_hi3660_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + int i = 0; + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + int smpl_phase = 0; + u32 tuning_sample_flag = 0; + int best_clksmpl = 0; + + for (i = 0; i < NUM_PHASES; ++i, ++smpl_phase) { + smpl_phase %= 32; + + mci_writel(host, TMOUT, ~0); + dw_mci_hs_set_timing(host, mmc->ios.timing, smpl_phase); + + if (!mmc_send_tuning(mmc, opcode, NULL)) + tuning_sample_flag |= (1 << smpl_phase); + else + tuning_sample_flag &= ~(1 << smpl_phase); + } + + best_clksmpl = dw_mci_get_best_clksmpl(tuning_sample_flag); + if (best_clksmpl < 0) { + dev_err(host->dev, "All phases bad!\n"); + return -EIO; + } + + dw_mci_hs_set_timing(host, mmc->ios.timing, best_clksmpl); + + dev_info(host->dev, "tuning ok best_clksmpl %u tuning_sample_flag %x\n", + best_clksmpl, tuning_sample_flag); + return 0; +} + +static int dw_mci_hi3660_switch_voltage(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + int ret = 0; + struct dw_mci_slot *slot = mmc_priv(mmc); + struct k3_priv *priv; + struct dw_mci *host; + + host = slot->host; + priv = host->priv; + + if (!priv || !priv->reg) + return 0; + + if (priv->ctrl_id == DWMMC_SDIO_ID) + return 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + ret = dw_mci_set_sel18(host, 0); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + ret = dw_mci_set_sel18(host, 1); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret) { + dev_err(host->dev, "Regulator set error %d\n", ret); + return ret; + } + } + + return 0; +} + +static const struct dw_mci_drv_data hi3660_data = { + .init = dw_mci_hi3660_init, + .set_ios = dw_mci_hi3660_set_ios, + .parse_dt = dw_mci_hi6220_parse_dt, + .execute_tuning = dw_mci_hi3660_execute_tuning, + .switch_voltage = dw_mci_hi3660_switch_voltage, +}; + static const struct of_device_id dw_mci_k3_match[] = { + { .compatible = "hisilicon,hi3660-dw-mshc", .data = &hi3660_data, }, { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, { .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, }, {}, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 250dc6ec4c82..860313bd952a 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -398,6 +398,21 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) return cmdr; } +static inline void dw_mci_set_cto(struct dw_mci *host) +{ + unsigned int cto_clks; + unsigned int cto_ms; + + cto_clks = mci_readl(host, TMOUT) & 0xff; + cto_ms = DIV_ROUND_UP(cto_clks, host->bus_hz / 1000); + + /* add a bit spare time */ + cto_ms += 10; + + mod_timer(&host->cto_timer, + jiffies + msecs_to_jiffies(cto_ms) + 1); +} + static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { @@ -410,6 +425,10 @@ static void dw_mci_start_command(struct dw_mci *host, wmb(); /* drain writebuffer */ dw_mci_wait_while_busy(host, cmd_flags); + /* response expected command only */ + if (cmd_flags & SDMMC_CMD_RESP_EXP) + dw_mci_set_cto(host); + mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } @@ -2599,6 +2618,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & DW_MCI_CMD_ERROR_FLAGS) { + del_timer(&host->cto_timer); mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); host->cmd_status = pending; smp_wmb(); /* drain writebuffer */ @@ -2642,6 +2662,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_CMD_DONE) { + del_timer(&host->cto_timer); mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); dw_mci_cmd_interrupt(host, pending); } @@ -2914,6 +2935,30 @@ static void dw_mci_cmd11_timer(unsigned long arg) tasklet_schedule(&host->tasklet); } +static void dw_mci_cto_timer(unsigned long arg) +{ + struct dw_mci *host = (struct dw_mci *)arg; + + switch (host->state) { + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + case STATE_SENDING_STOP: + /* + * If CMD_DONE interrupt does NOT come in sending command + * state, we should notify the driver to terminate current + * transfer and report a command timeout to the core. + */ + host->cmd_status = SDMMC_INT_RTO; + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + break; + default: + dev_warn(host->dev, "Unexpected command timeout, state %d\n", + host->state); + break; + } +} + static void dw_mci_dto_timer(unsigned long arg) { struct dw_mci *host = (struct dw_mci *)arg; @@ -2950,7 +2995,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) return ERR_PTR(-ENOMEM); /* find reset controller when exist */ - pdata->rstc = devm_reset_control_get_optional(dev, "reset"); + pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); if (IS_ERR(pdata->rstc)) { if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) return ERR_PTR(-EPROBE_DEFER); @@ -3067,6 +3112,12 @@ int dw_mci_probe(struct dw_mci *host) goto err_clk_ciu; } + if (!IS_ERR(host->pdata->rstc)) { + reset_control_assert(host->pdata->rstc); + usleep_range(10, 50); + reset_control_deassert(host->pdata->rstc); + } + if (drv_data && drv_data->init) { ret = drv_data->init(host); if (ret) { @@ -3076,15 +3127,12 @@ int dw_mci_probe(struct dw_mci *host) } } - if (!IS_ERR(host->pdata->rstc)) { - reset_control_assert(host->pdata->rstc); - usleep_range(10, 50); - reset_control_deassert(host->pdata->rstc); - } - setup_timer(&host->cmd11_timer, dw_mci_cmd11_timer, (unsigned long)host); + setup_timer(&host->cto_timer, + dw_mci_cto_timer, (unsigned long)host); + setup_timer(&host->dto_timer, dw_mci_dto_timer, (unsigned long)host); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 75da3756955d..34474ad731aa 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -126,6 +126,7 @@ struct dw_mci_dma_slave { * @irq: The irq value to be passed to request_irq. * @sdio_id0: Number of slot0 in the SDIO interrupt registers. * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. + * @cto_timer: Timer for broken command transfer over scheme. * @dto_timer: Timer for broken data transfer over scheme. * * Locking @@ -232,6 +233,7 @@ struct dw_mci { int sdio_id0; struct timer_list cmd11_timer; + struct timer_list cto_timer; struct timer_list dto_timer; }; @@ -314,6 +316,8 @@ struct dw_mci_board { #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 #define SDMMC_CDTHRCTL 0x100 +#define SDMMC_UHS_REG_EXT 0x108 +#define SDMMC_ENABLE_SHIFT 0x110 #define SDMMC_DATA(x) (x) /* * Registers to support idmac 64-bit address mode diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index de962c2d5e00..85745ef179e2 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -42,22 +42,18 @@ #define SD_EMMC_CLOCK 0x0 #define CLK_DIV_MASK GENMASK(5, 0) -#define CLK_DIV_MAX 63 #define CLK_SRC_MASK GENMASK(7, 6) -#define CLK_SRC_XTAL 0 /* external crystal */ -#define CLK_SRC_XTAL_RATE 24000000 -#define CLK_SRC_PLL 1 /* FCLK_DIV2 */ -#define CLK_SRC_PLL_RATE 1000000000 #define CLK_CORE_PHASE_MASK GENMASK(9, 8) #define CLK_TX_PHASE_MASK GENMASK(11, 10) #define CLK_RX_PHASE_MASK GENMASK(13, 12) -#define CLK_PHASE_0 0 -#define CLK_PHASE_90 1 -#define CLK_PHASE_180 2 -#define CLK_PHASE_270 3 +#define CLK_TX_DELAY_MASK GENMASK(19, 16) +#define CLK_RX_DELAY_MASK GENMASK(23, 20) +#define CLK_DELAY_STEP_PS 200 +#define CLK_PHASE_STEP 30 +#define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP) #define CLK_ALWAYS_ON BIT(24) -#define SD_EMMC_DElAY 0x4 +#define SD_EMMC_DELAY 0x4 #define SD_EMMC_ADJUST 0x8 #define SD_EMMC_CALOUT 0x10 #define SD_EMMC_START 0x40 @@ -81,18 +77,25 @@ #define SD_EMMC_STATUS 0x48 #define STATUS_BUSY BIT(31) +#define STATUS_DATI GENMASK(23, 16) #define SD_EMMC_IRQ_EN 0x4c -#define IRQ_EN_MASK GENMASK(13, 0) #define IRQ_RXD_ERR_MASK GENMASK(7, 0) #define IRQ_TXD_ERR BIT(8) #define IRQ_DESC_ERR BIT(9) #define IRQ_RESP_ERR BIT(10) +#define IRQ_CRC_ERR \ + (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR) #define IRQ_RESP_TIMEOUT BIT(11) #define IRQ_DESC_TIMEOUT BIT(12) +#define IRQ_TIMEOUTS \ + (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT) #define IRQ_END_OF_CHAIN BIT(13) #define IRQ_RESP_STATUS BIT(14) #define IRQ_SDIO BIT(15) +#define IRQ_EN_MASK \ + (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\ + IRQ_SDIO) #define SD_EMMC_CMD_CFG 0x50 #define SD_EMMC_CMD_ARG 0x54 @@ -118,12 +121,6 @@ #define MUX_CLK_NUM_PARENTS 2 -struct meson_tuning_params { - u8 core_phase; - u8 tx_phase; - u8 rx_phase; -}; - struct sd_emmc_desc { u32 cmd_cfg; u32 cmd_arg; @@ -139,12 +136,14 @@ struct meson_host { spinlock_t lock; void __iomem *regs; struct clk *core_clk; - struct clk_mux mux; - struct clk *mux_clk; - unsigned long current_clock; + struct clk *mmc_clk; + struct clk *rx_clk; + struct clk *tx_clk; + unsigned long req_rate; - struct clk_divider cfg_div; - struct clk *cfg_div_clk; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; void *bounce_buf; @@ -152,7 +151,6 @@ struct meson_host { struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; - struct meson_tuning_params tp; bool vqmmc_enabled; }; @@ -179,6 +177,90 @@ struct meson_host { #define CMD_RESP_MASK GENMASK(31, 1) #define CMD_RESP_SRAM BIT(0) +struct meson_mmc_phase { + struct clk_hw hw; + void __iomem *reg; + unsigned long phase_mask; + unsigned long delay_mask; + unsigned int delay_step_ps; +}; + +#define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw) + +static int meson_mmc_clk_get_phase(struct clk_hw *hw) +{ + struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); + unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); + unsigned long period_ps, p, d; + int degrees; + u32 val; + + val = readl(mmc->reg); + p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask); + degrees = p * 360 / phase_num; + + if (mmc->delay_mask) { + period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, + clk_get_rate(hw->clk)); + d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask); + degrees += d * mmc->delay_step_ps * 360 / period_ps; + degrees %= 360; + } + + return degrees; +} + +static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc, + unsigned int phase, + unsigned int delay) +{ + u32 val; + + val = readl(mmc->reg); + val &= ~mmc->phase_mask; + val |= phase << __ffs(mmc->phase_mask); + + if (mmc->delay_mask) { + val &= ~mmc->delay_mask; + val |= delay << __ffs(mmc->delay_mask); + } + + writel(val, mmc->reg); +} + +static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees) +{ + struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); + unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); + unsigned long period_ps, d = 0, r; + uint64_t p; + + p = degrees % 360; + + if (!mmc->delay_mask) { + p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num); + } else { + period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, + clk_get_rate(hw->clk)); + + /* First compute the phase index (p), the remainder (r) is the + * part we'll try to acheive using the delays (d). + */ + r = do_div(p, 360 / phase_num); + d = DIV_ROUND_CLOSEST(r * period_ps, + 360 * mmc->delay_step_ps); + d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask)); + } + + meson_mmc_apply_phase_delay(mmc, p, d); + return 0; +} + +static const struct clk_ops meson_mmc_clk_phase_ops = { + .get_phase = meson_mmc_clk_get_phase, + .set_phase = meson_mmc_clk_set_phase, +}; + static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) { unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; @@ -271,58 +353,102 @@ static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, mmc_get_dma_dir(data)); } -static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) +static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios) +{ + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_HS400) + return true; + + return false; +} + +/* + * Gating the clock on this controller is tricky. It seems the mmc clock + * is also used by the controller. It may crash during some operation if the + * clock is stopped. The safest thing to do, whenever possible, is to keep + * clock running at stop it at the pad using the pinmux. + */ +static void meson_mmc_clk_gate(struct meson_host *host) { - struct mmc_host *mmc = host->mmc; - int ret; u32 cfg; - if (clk_rate) { - if (WARN_ON(clk_rate > mmc->f_max)) - clk_rate = mmc->f_max; - else if (WARN_ON(clk_rate < mmc->f_min)) - clk_rate = mmc->f_min; - } - - if (clk_rate == host->current_clock) - return 0; - - /* stop clock */ - cfg = readl(host->regs + SD_EMMC_CFG); - if (!(cfg & CFG_STOP_CLOCK)) { + if (host->pins_clk_gate) { + pinctrl_select_state(host->pinctrl, host->pins_clk_gate); + } else { + /* + * If the pinmux is not provided - default to the classic and + * unsafe method + */ + cfg = readl(host->regs + SD_EMMC_CFG); cfg |= CFG_STOP_CLOCK; writel(cfg, host->regs + SD_EMMC_CFG); } +} - dev_dbg(host->dev, "change clock rate %u -> %lu\n", - mmc->actual_clock, clk_rate); +static void meson_mmc_clk_ungate(struct meson_host *host) +{ + u32 cfg; - if (!clk_rate) { + if (host->pins_clk_gate) + pinctrl_select_state(host->pinctrl, host->pins_default); + + /* Make sure the clock is not stopped in the controller */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg &= ~CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); +} + +static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + unsigned long rate = ios->clock; + int ret; + u32 cfg; + + /* DDR modes require higher module clock */ + if (meson_mmc_timing_is_ddr(ios)) + rate <<= 1; + + /* Same request - bail-out */ + if (host->req_rate == rate) + return 0; + + /* stop clock */ + meson_mmc_clk_gate(host); + host->req_rate = 0; + + if (!rate) { mmc->actual_clock = 0; - host->current_clock = 0; /* return with clock being stopped */ return 0; } - ret = clk_set_rate(host->cfg_div_clk, clk_rate); + /* Stop the clock during rate change to avoid glitches */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg |= CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); + + ret = clk_set_rate(host->mmc_clk, rate); if (ret) { dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", - clk_rate, ret); + rate, ret); return ret; } - mmc->actual_clock = clk_get_rate(host->cfg_div_clk); - host->current_clock = clk_rate; + host->req_rate = rate; + mmc->actual_clock = clk_get_rate(host->mmc_clk); - if (clk_rate != mmc->actual_clock) - dev_dbg(host->dev, - "divider requested rate %lu != actual rate %u\n", - clk_rate, mmc->actual_clock); + /* We should report the real output frequency of the controller */ + if (meson_mmc_timing_is_ddr(ios)) + mmc->actual_clock >>= 1; + + dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); + if (ios->clock != mmc->actual_clock) + dev_dbg(host->dev, "requested rate was %u\n", ios->clock); /* (re)start clock */ - cfg = readl(host->regs + SD_EMMC_CFG); - cfg &= ~CFG_STOP_CLOCK; - writel(cfg, host->regs + SD_EMMC_CFG); + meson_mmc_clk_ungate(host); return 0; } @@ -335,11 +461,21 @@ static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) static int meson_mmc_clk_init(struct meson_host *host) { struct clk_init_data init; + struct clk_mux *mux; + struct clk_divider *div; + struct meson_mmc_phase *core, *tx, *rx; + struct clk *clk; char clk_name[32]; int i, ret = 0; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; - const char *clk_div_parents[1]; - u32 clk_reg, cfg; + const char *clk_parent[1]; + u32 clk_reg; + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + clk_reg = 0; + clk_reg |= CLK_ALWAYS_ON; + clk_reg |= CLK_DIV_MASK; + writel(clk_reg, host->regs + SD_EMMC_CLOCK); /* get the mux parents */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { @@ -358,103 +494,253 @@ static int meson_mmc_clk_init(struct meson_host *host) } /* create the mux */ + mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); init.name = clk_name; init.ops = &clk_mux_ops; init.flags = 0; init.parent_names = mux_parent_names; init.num_parents = MUX_CLK_NUM_PARENTS; - host->mux.reg = host->regs + SD_EMMC_CLOCK; - host->mux.shift = __bf_shf(CLK_SRC_MASK); - host->mux.mask = CLK_SRC_MASK; - host->mux.flags = 0; - host->mux.table = NULL; - host->mux.hw.init = &init; - host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); - if (WARN_ON(IS_ERR(host->mux_clk))) - return PTR_ERR(host->mux_clk); + mux->reg = host->regs + SD_EMMC_CLOCK; + mux->shift = __ffs(CLK_SRC_MASK); + mux->mask = CLK_SRC_MASK >> mux->shift; + mux->hw.init = &init; + + clk = devm_clk_register(host->dev, &mux->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); /* create the divider */ + div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); init.name = clk_name; init.ops = &clk_divider_ops; init.flags = CLK_SET_RATE_PARENT; - clk_div_parents[0] = __clk_get_name(host->mux_clk); - init.parent_names = clk_div_parents; - init.num_parents = ARRAY_SIZE(clk_div_parents); + clk_parent[0] = __clk_get_name(clk); + init.parent_names = clk_parent; + init.num_parents = 1; - host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; - host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); - host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); - host->cfg_div.hw.init = &init; - host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; + div->reg = host->regs + SD_EMMC_CLOCK; + div->shift = __ffs(CLK_DIV_MASK); + div->width = __builtin_popcountl(CLK_DIV_MASK); + div->hw.init = &init; + div->flags = CLK_DIVIDER_ONE_BASED; - host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); - if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) - return PTR_ERR(host->cfg_div_clk); + clk = devm_clk_register(host->dev, &div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + /* create the mmc core clock */ + core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = CLK_SET_RATE_PARENT; + clk_parent[0] = __clk_get_name(clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + core->reg = host->regs + SD_EMMC_CLOCK; + core->phase_mask = CLK_CORE_PHASE_MASK; + core->hw.init = &init; + + host->mmc_clk = devm_clk_register(host->dev, &core->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk))) + return PTR_ERR(host->mmc_clk); + + /* create the mmc tx clock */ + tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL); + if (!tx) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = 0; + clk_parent[0] = __clk_get_name(host->mmc_clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + tx->reg = host->regs + SD_EMMC_CLOCK; + tx->phase_mask = CLK_TX_PHASE_MASK; + tx->delay_mask = CLK_TX_DELAY_MASK; + tx->delay_step_ps = CLK_DELAY_STEP_PS; + tx->hw.init = &init; + + host->tx_clk = devm_clk_register(host->dev, &tx->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk))) + return PTR_ERR(host->tx_clk); + + /* create the mmc rx clock */ + rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL); + if (!rx) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = 0; + clk_parent[0] = __clk_get_name(host->mmc_clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + rx->reg = host->regs + SD_EMMC_CLOCK; + rx->phase_mask = CLK_RX_PHASE_MASK; + rx->delay_mask = CLK_RX_DELAY_MASK; + rx->delay_step_ps = CLK_DELAY_STEP_PS; + rx->hw.init = &init; + + host->rx_clk = devm_clk_register(host->dev, &rx->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk))) + return PTR_ERR(host->rx_clk); /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ - clk_reg = 0; - clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); - clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); - clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); - clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL); - clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX); - clk_reg &= ~CLK_ALWAYS_ON; - writel(clk_reg, host->regs + SD_EMMC_CLOCK); - - /* Ensure clock starts in "auto" mode, not "always on" */ - cfg = readl(host->regs + SD_EMMC_CFG); - cfg &= ~CFG_CLK_ALWAYS_ON; - cfg |= CFG_AUTO_CLK; - writel(cfg, host->regs + SD_EMMC_CFG); - - ret = clk_prepare_enable(host->cfg_div_clk); + host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000); + ret = clk_set_rate(host->mmc_clk, host->mmc->f_min); if (ret) return ret; - /* Get the nearest minimum clock to 400KHz */ - host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); + /* + * Set phases : These values are mostly the datasheet recommended ones + * except for the Tx phase. Datasheet recommends 180 but some cards + * fail at initialisation with it. 270 works just fine, it fixes these + * initialisation issues and enable eMMC DDR52 mode. + */ + clk_set_phase(host->mmc_clk, 180); + clk_set_phase(host->tx_clk, 270); + clk_set_phase(host->rx_clk, 0); - ret = meson_mmc_clk_set(host, host->mmc->f_min); - if (ret) - clk_disable_unprepare(host->cfg_div_clk); - - return ret; + return clk_prepare_enable(host->mmc_clk); } -static void meson_mmc_set_tuning_params(struct mmc_host *mmc) +static void meson_mmc_shift_map(unsigned long *map, unsigned long shift) +{ + DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM); + DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM); + + /* + * shift the bitmap right and reintroduce the dropped bits on the left + * of the bitmap + */ + bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM); + bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift, + CLK_PHASE_POINT_NUM); + bitmap_or(map, left, right, CLK_PHASE_POINT_NUM); +} + +static void meson_mmc_find_next_region(unsigned long *map, + unsigned long *start, + unsigned long *stop) +{ + *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start); + *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start); +} + +static int meson_mmc_find_tuning_point(unsigned long *test) +{ + unsigned long shift, stop, offset = 0, start = 0, size = 0; + + /* Get the all good/all bad situation out the way */ + if (bitmap_full(test, CLK_PHASE_POINT_NUM)) + return 0; /* All points are good so point 0 will do */ + else if (bitmap_empty(test, CLK_PHASE_POINT_NUM)) + return -EIO; /* No successful tuning point */ + + /* + * Now we know there is a least one region find. Make sure it does + * not wrap by the shifting the bitmap if necessary + */ + shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM); + if (shift != 0) + meson_mmc_shift_map(test, shift); + + while (start < CLK_PHASE_POINT_NUM) { + meson_mmc_find_next_region(test, &start, &stop); + + if ((stop - start) > size) { + offset = start; + size = stop - start; + } + + start = stop; + } + + /* Get the center point of the region */ + offset += (size / 2); + + /* Shift the result back */ + offset = (offset + shift) % CLK_PHASE_POINT_NUM; + + return offset; +} + +static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, + struct clk *clk) +{ + int point, ret; + DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM); + + dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n", + __clk_get_name(clk)); + bitmap_zero(test, CLK_PHASE_POINT_NUM); + + /* Explore tuning points */ + for (point = 0; point < CLK_PHASE_POINT_NUM; point++) { + clk_set_phase(clk, point * CLK_PHASE_STEP); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + set_bit(point, test); + } + + /* Find the optimal tuning point and apply it */ + point = meson_mmc_find_tuning_point(test); + if (point < 0) + return point; /* tuning failed */ + + clk_set_phase(clk, point * CLK_PHASE_STEP); + dev_dbg(mmc_dev(mmc), "success with phase: %d\n", + clk_get_phase(clk)); + return 0; +} + +static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - u32 regval; + int ret; - /* stop clock */ - regval = readl(host->regs + SD_EMMC_CFG); - regval |= CFG_STOP_CLOCK; - writel(regval, host->regs + SD_EMMC_CFG); + /* + * If this is the initial tuning, try to get a sane Rx starting + * phase before doing the actual tuning. + */ + if (!mmc->doing_retune) { + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - regval = readl(host->regs + SD_EMMC_CLOCK); - regval &= ~CLK_CORE_PHASE_MASK; - regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); - regval &= ~CLK_TX_PHASE_MASK; - regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); - regval &= ~CLK_RX_PHASE_MASK; - regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); - writel(regval, host->regs + SD_EMMC_CLOCK); + if (ret) + return ret; + } - /* start clock */ - regval = readl(host->regs + SD_EMMC_CFG); - regval &= ~CFG_STOP_CLOCK; - writel(regval, host->regs + SD_EMMC_CFG); + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); + if (ret) + return ret; + + return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct meson_host *host = mmc_priv(mmc); - u32 bus_width; - u32 val, orig; + u32 bus_width, val; + int err; /* * GPIO regulator, only controls switching between 1v8 and @@ -475,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_UP: if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + /* Reset phases */ + clk_set_phase(host->rx_clk, 0); + clk_set_phase(host->tx_clk, 270); + break; case MMC_POWER_ON: @@ -482,7 +773,7 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) int ret = regulator_enable(mmc->supply.vqmmc); if (ret < 0) - dev_err(mmc_dev(mmc), + dev_err(host->dev, "failed to enable vqmmc regulator\n"); else host->vqmmc_enabled = true; @@ -491,9 +782,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; } - - meson_mmc_clk_set(host, ios->clock); - /* Bus width */ switch (ios->bus_width) { case MMC_BUS_WIDTH_1: @@ -512,26 +800,23 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } val = readl(host->regs + SD_EMMC_CFG); - orig = val; - val &= ~CFG_BUS_WIDTH_MASK; val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); val &= ~CFG_DDR; - if (ios->timing == MMC_TIMING_UHS_DDR50 || - ios->timing == MMC_TIMING_MMC_DDR52 || - ios->timing == MMC_TIMING_MMC_HS400) + if (meson_mmc_timing_is_ddr(ios)) val |= CFG_DDR; val &= ~CFG_CHK_DS; if (ios->timing == MMC_TIMING_MMC_HS400) val |= CFG_CHK_DS; - if (val != orig) { - writel(val, host->regs + SD_EMMC_CFG); - dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", - __func__, orig, val); - } + err = meson_mmc_clk_set(host, ios); + if (err) + dev_err(host->dev, "Failed to set clock: %d\n,", err); + + writel(val, host->regs + SD_EMMC_CFG); + dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); } static void meson_mmc_request_done(struct mmc_host *mmc, @@ -729,57 +1014,40 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) struct mmc_command *cmd; struct mmc_data *data; u32 irq_en, status, raw_status; - irqreturn_t ret = IRQ_HANDLED; + irqreturn_t ret = IRQ_NONE; - if (WARN_ON(!host)) + if (WARN_ON(!host) || WARN_ON(!host->cmd)) return IRQ_NONE; - cmd = host->cmd; - - if (WARN_ON(!cmd)) - return IRQ_NONE; - - data = cmd->data; - spin_lock(&host->lock); + + cmd = host->cmd; + data = cmd->data; irq_en = readl(host->regs + SD_EMMC_IRQ_EN); raw_status = readl(host->regs + SD_EMMC_STATUS); status = raw_status & irq_en; - if (!status) { - dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", - raw_status, irq_en); - ret = IRQ_NONE; + cmd->error = 0; + if (status & IRQ_CRC_ERR) { + dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); + cmd->error = -EILSEQ; + ret = IRQ_HANDLED; + goto out; + } + + if (status & IRQ_TIMEOUTS) { + dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); + cmd->error = -ETIMEDOUT; + ret = IRQ_HANDLED; goto out; } meson_mmc_read_resp(host->mmc, cmd); - cmd->error = 0; - if (status & IRQ_RXD_ERR_MASK) { - dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); - cmd->error = -EILSEQ; + if (status & IRQ_SDIO) { + dev_dbg(host->dev, "IRQ: SDIO TODO.\n"); + ret = IRQ_HANDLED; } - if (status & IRQ_TXD_ERR) { - dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); - cmd->error = -EILSEQ; - } - if (status & IRQ_DESC_ERR) - dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); - if (status & IRQ_RESP_ERR) { - dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); - cmd->error = -EILSEQ; - } - if (status & IRQ_RESP_TIMEOUT) { - dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); - cmd->error = -ETIMEDOUT; - } - if (status & IRQ_DESC_TIMEOUT) { - dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); - cmd->error = -ETIMEDOUT; - } - if (status & IRQ_SDIO) - dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { if (data && !cmd->error) @@ -787,26 +1055,20 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (meson_mmc_bounce_buf_read(data) || meson_mmc_get_next_command(cmd)) ret = IRQ_WAKE_THREAD; - } else { - dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", - status, cmd->opcode, cmd->arg, - cmd->flags, cmd->mrq->stop ? 1 : 0); - if (cmd->data) { - struct mmc_data *data = cmd->data; - - dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", - data->blksz, data->blocks, data->flags, - data->flags & MMC_DATA_WRITE ? "write" : "", - data->flags & MMC_DATA_READ ? "read" : ""); - } + else + ret = IRQ_HANDLED; } out: - /* ack all (enabled) interrupts */ - writel(status, host->regs + SD_EMMC_STATUS); + /* ack all enabled interrupts */ + writel(irq_en, host->regs + SD_EMMC_STATUS); if (ret == IRQ_HANDLED) meson_mmc_request_done(host->mmc, cmd->mrq); + else if (ret == IRQ_NONE) + dev_warn(host->dev, + "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n", + raw_status, irq_en); spin_unlock(&host->lock); return ret; @@ -839,29 +1101,6 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) return IRQ_HANDLED; } -static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) -{ - struct meson_host *host = mmc_priv(mmc); - struct meson_tuning_params tp_old = host->tp; - int ret = -EINVAL, i, cmd_error; - - dev_info(mmc_dev(mmc), "(re)tuning...\n"); - - for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { - host->tp.rx_phase = i; - /* exclude the active parameter set if retuning */ - if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && - mmc->doing_retune) - continue; - meson_mmc_set_tuning_params(mmc); - ret = mmc_send_tuning(mmc, opcode, &cmd_error); - if (!ret) - break; - } - - return ret; -} - /* * NOTE: we only need this until the GPIO/pinctrl driver can handle * interrupts. For now, the MMC core will use this for polling. @@ -888,6 +1127,38 @@ static void meson_mmc_cfg_init(struct meson_host *host) writel(cfg, host->regs + SD_EMMC_CFG); } +static int meson_mmc_card_busy(struct mmc_host *mmc) +{ + struct meson_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->regs + SD_EMMC_STATUS); + + /* We are only interrested in lines 0 to 3, so mask the other ones */ + return !(FIELD_GET(STATUS_DATI, regval) & 0xf); +} + +static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + /* vqmmc regulator is available */ + if (!IS_ERR(mmc->supply.vqmmc)) { + /* + * The usual amlogic setup uses a GPIO to switch from one + * regulator to the other. While the voltage ramp up is + * pretty fast, care must be taken when switching from 3.3v + * to 1.8v. Please make sure the regulator framework is aware + * of your own regulator constraints + */ + return mmc_regulator_set_vqmmc(mmc, ios); + } + + /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + return 0; + + return -EINVAL; +} + static const struct mmc_host_ops meson_mmc_ops = { .request = meson_mmc_request, .set_ios = meson_mmc_set_ios, @@ -895,6 +1166,8 @@ static const struct mmc_host_ops meson_mmc_ops = { .pre_req = meson_mmc_pre_req, .post_req = meson_mmc_post_req, .execute_tuning = meson_mmc_execute_tuning, + .card_busy = meson_mmc_card_busy, + .start_signal_voltage_switch = meson_mmc_voltage_switch, }; static int meson_mmc_probe(struct platform_device *pdev) @@ -941,6 +1214,27 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) { + ret = PTR_ERR(host->pinctrl); + goto free_host; + } + + host->pins_default = pinctrl_lookup_state(host->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(host->pins_default)) { + ret = PTR_ERR(host->pins_default); + goto free_host; + } + + host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, + "clk-gate"); + if (IS_ERR(host->pins_clk_gate)) { + dev_warn(&pdev->dev, + "can't get clk-gate pinctrl, using clk_stop bit\n"); + host->pins_clk_gate = NULL; + } + host->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(host->core_clk)) { ret = PTR_ERR(host->core_clk); @@ -951,30 +1245,28 @@ static int meson_mmc_probe(struct platform_device *pdev) if (ret) goto free_host; - host->tp.core_phase = CLK_PHASE_180; - host->tp.tx_phase = CLK_PHASE_0; - host->tp.rx_phase = CLK_PHASE_0; - ret = meson_mmc_clk_init(host); if (ret) goto err_core_clk; + /* set config to sane default */ + meson_mmc_cfg_init(host); + /* Stop execution */ writel(0, host->regs + SD_EMMC_START); - /* clear, ack, enable all interrupts */ + /* clear, ack and enable interrupts */ writel(0, host->regs + SD_EMMC_IRQ_EN); - writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); - writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); - - /* set config to sane default */ - meson_mmc_cfg_init(host); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_STATUS); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_IRQ_EN); ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, meson_mmc_irq_thread, IRQF_SHARED, NULL, host); if (ret) - goto err_div_clk; + goto err_init_clk; mmc->caps |= MMC_CAP_CMD23; mmc->max_blk_count = CMD_CFG_LENGTH_MASK; @@ -990,7 +1282,7 @@ static int meson_mmc_probe(struct platform_device *pdev) if (host->bounce_buf == NULL) { dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); ret = -ENOMEM; - goto err_div_clk; + goto err_init_clk; } host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, @@ -1009,8 +1301,8 @@ static int meson_mmc_probe(struct platform_device *pdev) err_bounce_buf: dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); -err_div_clk: - clk_disable_unprepare(host->cfg_div_clk); +err_init_clk: + clk_disable_unprepare(host->mmc_clk); err_core_clk: clk_disable_unprepare(host->core_clk); free_host: @@ -1032,7 +1324,7 @@ static int meson_mmc_remove(struct platform_device *pdev) dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); - clk_disable_unprepare(host->cfg_div_clk); + clk_disable_unprepare(host->mmc_clk); clk_disable_unprepare(host->core_clk); mmc_free_host(host->mmc); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d1ca2f489054..f1f54a818489 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1904,7 +1904,7 @@ static const struct dev_pm_ops mmci_dev_pm_ops = { SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; -static struct amba_id mmci_ids[] = { +static const struct amba_id mmci_ids[] = { { .id = 0x00041180, .mask = 0xff0fffff, diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index d4dc55ac7dea..a0670e9cd012 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -546,7 +546,7 @@ static int moxart_get_ro(struct mmc_host *mmc) return !!(readl(host->base + REG_STATUS) & WRITE_PROT); } -static struct mmc_host_ops moxart_ops = { +static const struct mmc_host_ops moxart_ops = { .request = moxart_request, .set_ios = moxart_set_ios, .get_ro = moxart_get_ro, diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 5a672a5218ad..267f7ab08420 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1579,12 +1579,13 @@ static void msdc_hw_reset(struct mmc_host *mmc) sdr_clr_bits(host->base + EMMC_IOCON, 1); } -static struct mmc_host_ops mt_msdc_ops = { +static const struct mmc_host_ops mt_msdc_ops = { .post_req = msdc_post_req, .pre_req = msdc_pre_req, .request = msdc_ops_request, .set_ios = msdc_ops_set_ios, .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, .start_signal_voltage_switch = msdc_ops_switch_volt, .card_busy = msdc_card_busy, .execute_tuning = msdc_execute_tuning, diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index fb3ca8296273..1d5418e4efae 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -681,6 +681,9 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) spin_unlock_irqrestore(&host->lock, flags); + if (data_error) + return; + mxcmci_read_response(host, stat); host->cmd = NULL; @@ -1014,8 +1017,10 @@ static int mxcmci_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -EINVAL; + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ: %d\n", irq); + return irq; + } mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); if (!mmc) @@ -1098,8 +1103,13 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_free; } - clk_prepare_enable(host->clk_per); - clk_prepare_enable(host->clk_ipg); + ret = clk_prepare_enable(host->clk_per); + if (ret) + goto out_free; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + goto out_clk_per_put; mxcmci_softreset(host); @@ -1168,8 +1178,9 @@ static int mxcmci_probe(struct platform_device *pdev) dma_release_channel(host->dma); out_clk_put: - clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); +out_clk_per_put: + clk_disable_unprepare(host->clk_per); out_free: mmc_free_host(mmc); @@ -1212,10 +1223,17 @@ static int __maybe_unused mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); + int ret; - clk_prepare_enable(host->clk_per); - clk_prepare_enable(host->clk_ipg); - return 0; + ret = clk_prepare_enable(host->clk_per); + if (ret) + return ret; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + clk_disable_unprepare(host->clk_per); + + return ret; } static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 85bbebfde02e..c9eed8436b6b 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -71,7 +71,7 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms; - const u32 *voltage_ranges; + const __be32 *voltage_ranges; int num_ranges; int i; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 2ab4788d021f..3b5e6d11069b 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2076,9 +2076,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->dbclk = NULL; } - /* Since we do only SG emulation, we can have as many segs - * as we want. */ - mmc->max_segs = 1024; + /* Set this to a value that allows allocating an entire descriptor + * list within a page (zero order allocation). */ + mmc->max_segs = 64; mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ @@ -2322,7 +2322,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev) return 0; } -static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { +static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume) .runtime_suspend = omap_hsmmc_runtime_suspend, .runtime_resume = omap_hsmmc_runtime_resume, diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 59ab194cb009..c763b404510f 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) pxamci_init_ocr(host); - /* - * This architecture used to disable bounce buffers through its - * defconfig, now it is done at runtime as a host property. - */ - mmc->caps = MMC_CAP_NO_BOUNCE_BUFF; + mmc->caps = 0; host->cmdat = 0; if (!cpu_is_pxa25x()) { mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index ca83acc113b8..b9dfea5d8193 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -31,6 +31,8 @@ struct renesas_sdhi_of_data { int scc_offset; struct renesas_sdhi_scc *taps; int taps_num; + unsigned int max_blk_count; + unsigned short max_segs; }; int renesas_sdhi_probe(struct platform_device *pdev, diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index a4fb07d0ea91..fcf7235d5742 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -40,6 +40,7 @@ #define EXT_ACC 0xe4 #define SDHI_VER_GEN2_SDR50 0x490c +#define SDHI_VER_RZ_A1 0x820b /* very old datasheets said 0x490c for SDR104, too. They are wrong! */ #define SDHI_VER_GEN2_SDR104 0xcb0d #define SDHI_VER_GEN3_SD 0xcc10 @@ -398,12 +399,14 @@ static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host) sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); } -static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host) +static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit) { int timeout = 1000; + /* CBSY is set when busy, SCLKDIVEN is cleared when busy */ + u32 wait_state = (bit == TMIO_STAT_CMD_BUSY ? TMIO_STAT_CMD_BUSY : 0); - while (--timeout && !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) - & TMIO_STAT_SCLKDIVEN)) + while (--timeout && (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) + & bit) == wait_state) udelay(1); if (!timeout) { @@ -416,17 +419,22 @@ static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host) static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) { + u32 bit = TMIO_STAT_SCLKDIVEN; + switch (addr) { case CTL_SD_CMD: case CTL_STOP_INTERNAL_ACTION: case CTL_XFER_BLK_COUNT: - case CTL_SD_CARD_CLK_CTL: case CTL_SD_XFER_LEN: case CTL_SD_MEM_CARD_OPT: case CTL_TRANSACTION_CTL: case CTL_DMA_ENABLE: case EXT_ACC: - return renesas_sdhi_wait_idle(host); + if (host->pdata->flags & TMIO_MMC_HAVE_CBSY) + bit = TMIO_STAT_CMD_BUSY; + /* fallthrough */ + case CTL_SD_CARD_CLK_CTL: + return renesas_sdhi_wait_idle(host, bit); } return 0; @@ -452,10 +460,11 @@ static int renesas_sdhi_multi_io_quirk(struct mmc_card *card, static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable) { - sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); + /* Iff regs are 8 byte apart, sdbuf is 64 bit. Otherwise always 32. */ + int width = (host->bus_shift == 2) ? 64 : 32; - /* enable 32bit access if DMA mode if possibile */ - renesas_sdhi_sdbuf_width(host, enable ? 32 : 16); + sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); + renesas_sdhi_sdbuf_width(host, enable ? width : 16); } int renesas_sdhi_probe(struct platform_device *pdev, @@ -526,6 +535,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, mmc_data->capabilities |= of_data->capabilities; mmc_data->capabilities2 |= of_data->capabilities2; mmc_data->dma_rx_offset = of_data->dma_rx_offset; + mmc_data->max_blk_count = of_data->max_blk_count; + mmc_data->max_segs = of_data->max_segs; dma_priv->dma_buswidth = of_data->dma_buswidth; host->bus_shift = of_data->bus_shift; } @@ -579,6 +590,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret < 0) goto efree; + /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) + mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; + /* Enable tuning iff we have an SCC and a supported mode */ if (of_data && of_data->scc_offset && (host->mmc->caps & MMC_CAP_UHS_SDR104 || diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c new file mode 100644 index 000000000000..f905f2361d12 --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -0,0 +1,287 @@ +/* + * DMA support for Internal DMAC with SDHI SD/SDIO controller + * + * Copyright (C) 2016-17 Renesas Electronics Corporation + * Copyright (C) 2016-17 Horms Solutions, Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "renesas_sdhi.h" +#include "tmio_mmc.h" + +#define DM_CM_DTRAN_MODE 0x820 +#define DM_CM_DTRAN_CTRL 0x828 +#define DM_CM_RST 0x830 +#define DM_CM_INFO1 0x840 +#define DM_CM_INFO1_MASK 0x848 +#define DM_CM_INFO2 0x850 +#define DM_CM_INFO2_MASK 0x858 +#define DM_DTRAN_ADDR 0x880 + +/* DM_CM_DTRAN_MODE */ +#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */ +#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "uptream" = for read commands */ +#define DTRAN_MODE_BUS_WID_TH (BIT(5) | BIT(4)) +#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address */ + +/* DM_CM_DTRAN_CTRL */ +#define DTRAN_CTRL_DM_START BIT(0) + +/* DM_CM_RST */ +#define RST_DTRANRST1 BIT(9) +#define RST_DTRANRST0 BIT(8) +#define RST_RESERVED_BITS GENMASK_ULL(32, 0) + +/* DM_CM_INFO1 and DM_CM_INFO1_MASK */ +#define INFO1_CLEAR 0 +#define INFO1_DTRANEND1 BIT(17) +#define INFO1_DTRANEND0 BIT(16) + +/* DM_CM_INFO2 and DM_CM_INFO2_MASK */ +#define INFO2_DTRANERR1 BIT(17) +#define INFO2_DTRANERR0 BIT(16) + +/* + * Specification of this driver: + * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma + * - Since this SDHI DMAC register set has 16 but 32-bit width, we + * need a custom accessor. + */ + +/* Definitions for sampling clocks */ +static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { + { + .clk_rate = 0, + .tap = 0x00000300, + }, +}; + +static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | + MMC_CAP_CMD23, + .bus_shift = 2, + .scc_offset = 0x1000, + .taps = rcar_gen3_scc_taps, + .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), + /* Gen3 SDHI DMAC can handle 0xffffffff blk count, but seg = 1 */ + .max_blk_count = 0xffffffff, + .max_segs = 1, +}; + +static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { + { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + {}, +}; +MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match); + +static void +renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host, + int addr, u64 val) +{ + writeq(val, host->ctl + addr); +} + +static void +renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) +{ + if (!host->chan_tx || !host->chan_rx) + return; + + if (!enable) + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1, + INFO1_CLEAR); + + if (host->dma->enable) + host->dma->enable(host, enable); +} + +static void +renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { + u64 val = RST_DTRANRST1 | RST_DTRANRST0; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS & ~val); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS | val); + + renesas_sdhi_internal_dmac_enable_dma(host, true); +} + +static void +renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) { + tasklet_schedule(&host->dma_complete); +} + +static void +renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg = host->sg_ptr; + u32 dtran_mode = DTRAN_MODE_BUS_WID_TH | DTRAN_MODE_ADDR_MODE; + enum dma_data_direction dir; + int ret; + u32 irq_mask; + + /* This DMAC cannot handle if sg_len is not 1 */ + WARN_ON(host->sg_len > 1); + + /* This DMAC cannot handle if buffer is not 8-bytes alignment */ + if (!IS_ALIGNED(sg->offset, 8)) { + host->force_pio = true; + renesas_sdhi_internal_dmac_enable_dma(host, false); + return; + } + + if (data->flags & MMC_DATA_READ) { + dtran_mode |= DTRAN_MODE_CH_NUM_CH1; + dir = DMA_FROM_DEVICE; + irq_mask = TMIO_STAT_RXRDY; + } else { + dtran_mode |= DTRAN_MODE_CH_NUM_CH0; + dir = DMA_TO_DEVICE; + irq_mask = TMIO_STAT_TXRQ; + } + + ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir); + if (ret < 0) + return; + + renesas_sdhi_internal_dmac_enable_dma(host, true); + + /* disable PIO irqs to avoid "PIO IRQ in DMA mode!" */ + tmio_mmc_disable_mmc_irqs(host, irq_mask); + + /* set dma parameters */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_MODE, + dtran_mode); + renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, + sg->dma_address); +} + +static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + /* start the DMAC */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_CTRL, + DTRAN_CTRL_DM_START); +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + enum dma_data_direction dir; + + spin_lock_irq(&host->lock); + + if (!host->data) + goto out; + + if (host->data->flags & MMC_DATA_READ) + dir = DMA_FROM_DEVICE; + else + dir = DMA_TO_DEVICE; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); + + tmio_mmc_do_data_irq(host); +out: + spin_unlock_irq(&host->lock); +} + +static void +renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + /* Each value is set to non-zero to assume "enabling" each DMA */ + host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; + + tasklet_init(&host->dma_complete, + renesas_sdhi_internal_dmac_complete_tasklet_fn, + (unsigned long)host); + tasklet_init(&host->dma_issue, + renesas_sdhi_internal_dmac_issue_tasklet_fn, + (unsigned long)host); +} + +static void +renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host) +{ + /* Each value is set to zero to assume "disabling" each DMA */ + host->chan_rx = host->chan_tx = NULL; +} + +static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { + .start = renesas_sdhi_internal_dmac_start_dma, + .enable = renesas_sdhi_internal_dmac_enable_dma, + .request = renesas_sdhi_internal_dmac_request_dma, + .release = renesas_sdhi_internal_dmac_release_dma, + .abort = renesas_sdhi_internal_dmac_abort_dma, + .dataend = renesas_sdhi_internal_dmac_dataend_dma, +}; + +/* + * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC + * implementation as others may use a different implementation. + */ +static const struct soc_device_attribute gen3_soc_whitelist[] = { + { .soc_id = "r8a7795", .revision = "ES1.*" }, + { .soc_id = "r8a7795", .revision = "ES2.0" }, + { .soc_id = "r8a7796", .revision = "ES1.0" }, + { /* sentinel */ } +}; + +static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) +{ + if (!soc_device_match(gen3_soc_whitelist)) + return -ENODEV; + + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); +} + +static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, + NULL) +}; + +static struct platform_driver renesas_internal_dmac_sdhi_driver = { + .driver = { + .name = "renesas_sdhi_internal_dmac", + .pm = &renesas_sdhi_internal_dmac_dev_pm_ops, + .of_match_table = renesas_sdhi_internal_dmac_of_match, + }, + .probe = renesas_sdhi_internal_dmac_probe, + .remove = renesas_sdhi_remove, +}; + +module_platform_driver(renesas_internal_dmac_sdhi_driver); + +MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC"); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 642a0dcc8c5c..df4465439e13 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -1,5 +1,5 @@ /* - * DMA function for TMIO MMC implementations + * DMA support use of SYS DMAC with SDHI SD/SDIO controller * * Copyright (C) 2016-17 Renesas Electronics Corporation * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang @@ -18,8 +18,10 @@ #include #include #include +#include #include #include +#include #include "renesas_sdhi.h" #include "tmio_mmc.h" @@ -31,7 +33,8 @@ static const struct renesas_sdhi_of_data of_default_cfg = { }; static const struct renesas_sdhi_of_data of_rz_compatible = { - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | + TMIO_MMC_HAVE_CBSY, .tmio_ocr_mask = MMC_VDD_32_33, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, }; @@ -56,7 +59,8 @@ static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | - TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23, .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, @@ -76,7 +80,8 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | - TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23, .bus_shift = 2, @@ -93,6 +98,8 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, @@ -126,6 +133,11 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) renesas_sdhi_sys_dmac_enable_dma(host, true); } +static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) +{ + complete(&host->dma_dataend); +} + static void renesas_sdhi_sys_dmac_dma_callback(void *arg) { struct tmio_mmc_host *host = arg; @@ -451,10 +463,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { .request = renesas_sdhi_sys_dmac_request_dma, .release = renesas_sdhi_sys_dmac_release_dma, .abort = renesas_sdhi_sys_dmac_abort_dma, + .dataend = renesas_sdhi_sys_dmac_dataend_dma, +}; + +/* + * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC + * implementation. Currently empty as all supported ES versions use + * the internal DMAC. + */ +static const struct soc_device_attribute gen3_soc_whitelist[] = { + { /* sentinel */ } }; static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) { + if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && + !soc_device_match(gen3_soc_whitelist)) + return -ENODEV; + return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); } diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 12d2fbe9c520..76da1687ab37 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -909,7 +909,7 @@ static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, unsigned char bus_width) { int err = 0; - u8 width[] = { + static const u8 width[] = { [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 8896bf533dc7..f7f157a62a4a 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1313,7 +1313,7 @@ static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable) s3cmci_check_sdio_irq(host); } -static struct mmc_host_ops s3cmci_ops = { +static const struct mmc_host_ops s3cmci_ops = { .request = s3cmci_request, .set_ios = s3cmci_set_ios, .get_ro = mmc_gpio_get_ro, diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index ac678e9fb19a..08ae0ff13513 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -294,13 +294,10 @@ static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev, const char *hid, const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; if (!c || !c->host) return 0; - host = c->host; - /* Platform specific code during sdio probe slot goes here */ return 0; @@ -432,7 +429,6 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid, static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *device, *child; struct sdhci_acpi_host *c; struct sdhci_host *host; @@ -442,7 +438,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) const char *uid; int err; - if (acpi_bus_get_device(handle, &device)) + device = ACPI_COMPANION(dev); + if (!device) return -ENODEV; hid = acpi_device_hid(device); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index 51dd2fd65000..11ca95c60bcf 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -186,7 +186,7 @@ static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, udelay(740); } -static struct sdhci_ops sdhci_bcm_kona_ops = { +static const struct sdhci_ops sdhci_bcm_kona_ops = { .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -197,7 +197,7 @@ static struct sdhci_ops sdhci_bcm_kona_ops = { .card_event = sdhci_bcm_kona_card_event, }; -static struct sdhci_pltfm_data sdhci_pltfm_data_kona = { +static const struct sdhci_pltfm_data sdhci_pltfm_data_kona = { .ops = &sdhci_bcm_kona_ops, .quirks = SDHCI_QUIRK_NO_CARD_NO_RESET | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR | diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index e2f638338e8f..552bddc5096c 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -21,41 +21,6 @@ #include "sdhci-pltfm.h" -#ifdef CONFIG_PM_SLEEP - -static int sdhci_brcmstb_suspend(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int res; - - if (host->tuning_mode != SDHCI_TUNING_MODE_3) - mmc_retune_needed(host->mmc); - - res = sdhci_suspend_host(host); - if (res) - return res; - clk_disable_unprepare(pltfm_host->clk); - return res; -} - -static int sdhci_brcmstb_resume(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int err; - - err = clk_prepare_enable(pltfm_host->clk); - if (err) - return err; - return sdhci_resume_host(host); -} - -#endif /* CONFIG_PM_SLEEP */ - -static SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend, - sdhci_brcmstb_resume); - static const struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -63,7 +28,7 @@ static const struct sdhci_ops sdhci_brcmstb_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { .ops = &sdhci_brcmstb_ops, }; @@ -131,7 +96,7 @@ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { .driver = { .name = "sdhci-brcmstb", - .pm = &sdhci_brcmstb_pmops, + .pm = &sdhci_pltfm_pmops, .of_match_table = of_match_ptr(sdhci_brcm_of_match), }, .probe = sdhci_brcmstb_probe, diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 19d5698244b5..56529c3d389a 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -67,9 +67,16 @@ */ #define SDHCI_CDNS_MAX_TUNING_LOOP 40 +struct sdhci_cdns_phy_param { + u8 addr; + u8 data; +}; + struct sdhci_cdns_priv { void __iomem *hrs_addr; bool enhanced_strobe; + unsigned int nr_phy_params; + struct sdhci_cdns_phy_param phy_params[0]; }; struct sdhci_cdns_phy_cfg { @@ -115,9 +122,22 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, return 0; } -static int sdhci_cdns_phy_init(struct device_node *np, - struct sdhci_cdns_priv *priv) +static unsigned int sdhci_cdns_phy_param_count(struct device_node *np) { + unsigned int count = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) + if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property)) + count++; + + return count; +} + +static void sdhci_cdns_phy_param_parse(struct device_node *np, + struct sdhci_cdns_priv *priv) +{ + struct sdhci_cdns_phy_param *p = priv->phy_params; u32 val; int ret, i; @@ -127,9 +147,19 @@ static int sdhci_cdns_phy_init(struct device_node *np, if (ret) continue; - ret = sdhci_cdns_write_phy_reg(priv, - sdhci_cdns_phy_cfgs[i].addr, - val); + p->addr = sdhci_cdns_phy_cfgs[i].addr; + p->data = val; + p++; + } +} + +static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) +{ + int ret, i; + + for (i = 0; i < priv->nr_phy_params; i++) { + ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr, + priv->phy_params[i].data); if (ret) return ret; } @@ -302,6 +332,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_cdns_priv *priv; struct clk *clk; + size_t priv_size; + unsigned int nr_phy_params; int ret; struct device *dev = &pdev->dev; @@ -313,7 +345,9 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) return ret; - host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, sizeof(*priv)); + nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node); + priv_size = sizeof(*priv) + sizeof(priv->phy_params[0]) * nr_phy_params; + host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, priv_size); if (IS_ERR(host)) { ret = PTR_ERR(host); goto disable_clk; @@ -322,7 +356,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; - priv = sdhci_cdns_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->nr_phy_params = nr_phy_params; priv->hrs_addr = host->ioaddr; priv->enhanced_strobe = false; host->ioaddr += SDHCI_CDNS_SRS_BASE; @@ -336,7 +371,9 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) goto free; - ret = sdhci_cdns_phy_init(dev->of_node, priv); + sdhci_cdns_phy_param_parse(dev->of_node, priv); + + ret = sdhci_cdns_phy_init(priv); if (ret) goto free; @@ -353,6 +390,39 @@ static int sdhci_cdns_probe(struct platform_device *pdev) return ret; } +#ifdef CONFIG_PM_SLEEP +static int sdhci_cdns_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_cdns_phy_init(priv); + if (ret) + goto disable_clk; + + ret = sdhci_resume_host(host); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(pltfm_host->clk); + + return ret; +} +#endif + +static const struct dev_pm_ops sdhci_cdns_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume) +}; + static const struct of_device_id sdhci_cdns_match[] = { { .compatible = "socionext,uniphier-sd4hc" }, { .compatible = "cdns,sd4hc" }, @@ -363,7 +433,7 @@ MODULE_DEVICE_TABLE(of, sdhci_cdns_match); static struct platform_driver sdhci_cdns_driver = { .driver = { .name = "sdhci-cdns", - .pm = &sdhci_pltfm_pmops, + .pm = &sdhci_cdns_pm_ops, .of_match_table = sdhci_cdns_match, }, .probe = sdhci_cdns_probe, diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index e7893f21b65e..dfa58f8b8dfa 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -54,6 +54,9 @@ #define ESDHC_CLOCK_HCKEN 0x00000002 #define ESDHC_CLOCK_IPGEN 0x00000001 +/* Host Controller Capabilities Register 2 */ +#define ESDHC_CAPABILITIES_1 0x114 + /* Tuning Block Control Register */ #define ESDHC_TBCTL 0x120 #define ESDHC_TB_EN 0x00000004 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 9d601dc0d646..fc73e56eb1e2 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -611,7 +611,7 @@ static void msm_hc_select_hs400(struct sdhci_host *host) * HS400 - divided clock (free running MCLK/2) * All other modes - default (free running MCLK) */ -void sdhci_msm_hc_select_mode(struct sdhci_host *host) +static void sdhci_msm_hc_select_mode(struct sdhci_host *host) { struct mmc_ios ios = host->mmc->ios; @@ -1049,7 +1049,7 @@ static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) * instead directly control the GCC clock as per * HW recommendation. **/ -void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) +static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) { u16 clk; /* @@ -1133,6 +1133,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) if (IS_ERR(host)) return PTR_ERR(host); + host->sdma_boundary = 0; pltfm_host = sdhci_priv(host); msm_host = sdhci_pltfm_priv(pltfm_host); msm_host->mmc = host->mmc; diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index b13c0a7d50e4..0720ea717011 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -216,13 +216,13 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, u32 vendor; struct sdhci_host *host = mmc_priv(mmc); - vendor = readl(host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); + vendor = sdhci_readl(host, SDHCI_ARASAN_VENDOR_REGISTER); if (ios->enhanced_strobe) vendor |= VENDOR_ENHANCED_STROBE; else vendor &= ~VENDOR_ENHANCED_STROBE; - writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); + sdhci_writel(host, vendor, SDHCI_ARASAN_VENDOR_REGISTER); } static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) @@ -262,7 +262,7 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, return -EINVAL; } -static struct sdhci_ops sdhci_arasan_ops = { +static const struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -271,7 +271,7 @@ static struct sdhci_ops sdhci_arasan_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_arasan_pdata = { +static const struct sdhci_pltfm_data sdhci_arasan_pdata = { .ops = &sdhci_arasan_ops, .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 1485530c3592..4e47ed6bc716 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -42,6 +42,7 @@ struct sdhci_at91_priv { struct clk *hclock; struct clk *gck; struct clk *mainck; + bool restore_needed; }; static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) @@ -146,6 +147,100 @@ static const struct of_device_id sdhci_at91_dt_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); +static int sdhci_at91_set_clks_presets(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + unsigned int caps0, caps1; + unsigned int clk_base, clk_mul; + unsigned int gck_rate, real_gck_rate; + unsigned int preset_div; + + /* + * The mult clock is provided by as a generated clock by the PMC + * controller. In order to set the rate of gck, we have to get the + * base clock rate and the clock mult from capabilities. + */ + clk_prepare_enable(priv->hclock); + caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); + caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); + clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; + clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; + gck_rate = clk_base * 1000000 * (clk_mul + 1); + ret = clk_set_rate(priv->gck, gck_rate); + if (ret < 0) { + dev_err(dev, "failed to set gck"); + clk_disable_unprepare(priv->hclock); + return ret; + } + /* + * We need to check if we have the requested rate for gck because in + * some cases this rate could be not supported. If it happens, the rate + * is the closest one gck can provide. We have to update the value + * of clk mul. + */ + real_gck_rate = clk_get_rate(priv->gck); + if (real_gck_rate != gck_rate) { + clk_mul = real_gck_rate / (clk_base * 1000000) - 1; + caps1 &= (~SDHCI_CLOCK_MUL_MASK); + caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & + SDHCI_CLOCK_MUL_MASK); + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, + host->ioaddr + SDMMC_CACR); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", + clk_mul, real_gck_rate); + } + + /* + * We have to set preset values because it depends on the clk_mul + * value. Moreover, SDR104 is supported in a degraded mode since the + * maximum sd clock value is 120 MHz instead of 208 MHz. For that + * reason, we need to use presets to support SDR104. + */ + preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR12); + preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR25); + preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR50); + preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR104); + preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_DDR50); + + clk_prepare_enable(priv->mainck); + clk_prepare_enable(priv->gck); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_at91_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM static int sdhci_at91_runtime_suspend(struct device *dev) { @@ -173,6 +268,15 @@ static int sdhci_at91_runtime_resume(struct device *dev) struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); int ret; + if (priv->restore_needed) { + ret = sdhci_at91_set_clks_presets(dev); + if (ret) + return ret; + + priv->restore_needed = false; + goto out; + } + ret = clk_prepare_enable(priv->mainck); if (ret) { dev_err(dev, "can't enable mainck\n"); @@ -191,13 +295,13 @@ static int sdhci_at91_runtime_resume(struct device *dev) return ret; } +out: return sdhci_runtime_resume_host(host); } #endif /* CONFIG_PM */ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, sdhci_at91_runtime_resume, NULL) @@ -210,11 +314,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; - unsigned int caps0, caps1; - unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; int ret; - unsigned int preset_div; match = of_match_device(sdhci_at91_dt_match, &pdev->dev); if (!match) @@ -246,66 +346,11 @@ static int sdhci_at91_probe(struct platform_device *pdev) return PTR_ERR(priv->gck); } - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ - clk_prepare_enable(priv->hclock); - caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); - caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(&pdev->dev, "failed to set gck"); - goto hclock_disable_unprepare; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } + ret = sdhci_at91_set_clks_presets(&pdev->dev); + if (ret) + goto sdhci_pltfm_free; - /* - * We have to set preset values because it depends on the clk_mul - * value. Moreover, SDR104 is supported in a degraded mode since the - * maximum sd clock value is 120 MHz instead of 208 MHz. For that - * reason, we need to use presets to support SDR104. - */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_DDR50); - - clk_prepare_enable(priv->mainck); - clk_prepare_enable(priv->gck); + priv->restore_needed = false; ret = mmc_of_parse(host->mmc); if (ret) @@ -368,8 +413,8 @@ static int sdhci_at91_probe(struct platform_device *pdev) clocks_disable_unprepare: clk_disable_unprepare(priv->gck); clk_disable_unprepare(priv->mainck); -hclock_disable_unprepare: clk_disable_unprepare(priv->hclock); +sdhci_pltfm_free: sdhci_pltfm_free(pdev); return ret; } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 44b016baa585..d96a057a7db8 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -86,6 +86,17 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host, return ret; } + /* + * DTS properties of mmc host are used to enable each speed mode + * according to soc and board capability. So clean up + * SDR50/SDR104/DDR50 support bits here. + */ + if (spec_reg == SDHCI_CAPABILITIES_1) { + ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + return ret; + } + ret = value; return ret; } @@ -249,7 +260,11 @@ static u32 esdhc_be_readl(struct sdhci_host *host, int reg) u32 ret; u32 value; - value = ioread32be(host->ioaddr + reg); + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32be(host->ioaddr + reg); + ret = esdhc_readl_fixup(host, reg, value); return ret; @@ -260,7 +275,11 @@ static u32 esdhc_le_readl(struct sdhci_host *host, int reg) u32 ret; u32 value; - value = ioread32(host->ioaddr + reg); + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32(host->ioaddr + reg); + ret = esdhc_readl_fixup(host, reg, value); return ret; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index e1721ac37919..d0ccc6729fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -35,7 +35,6 @@ #include "sdhci-pci-o2micro.h" static int sdhci_pci_enable_dma(struct sdhci_host *host); -static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width); static void sdhci_pci_hw_reset(struct sdhci_host *host); #ifdef CONFIG_PM_SLEEP @@ -393,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { enum { INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_DRV_STRENGTH = 9, INTEL_DSM_D3_RETUNE = 10, }; @@ -558,14 +558,28 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, sdhci_writel(host, val, INTEL_HS400_ES_REG); } +static void sdhci_intel_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct device *dev = &slot->chip->pdev->dev; + u32 result = 0; + int err; + + err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result); + pr_debug("%s: %s DSM error %d result %u\n", + mmc_hostname(host->mmc), __func__, err, result); +} + static const struct sdhci_ops sdhci_intel_byt_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_intel_set_power, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, + .voltage_switch = sdhci_intel_voltage_switch, }; static void byt_read_dsm(struct sdhci_pci_slot *slot) @@ -730,6 +744,24 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { #define INTEL_MRFLD_SD 2 #define INTEL_MRFLD_SDIO 3 +#ifdef CONFIG_ACPI +static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) +{ + struct acpi_device *device, *child; + + device = ACPI_COMPANION(&slot->chip->pdev->dev); + if (!device) + return; + + acpi_device_fix_up_power(device); + list_for_each_entry(child, &device->children, node) + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); +} +#else +static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} +#endif + static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) { unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); @@ -751,6 +783,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) default: return -ENODEV; } + + intel_mrfld_mmc_fix_up_power_slot(slot); return 0; } @@ -1197,7 +1231,7 @@ static int amd_probe(struct sdhci_pci_chip *chip) static const struct sdhci_ops amd_sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .platform_execute_tuning = amd_execute_tuning, @@ -1313,29 +1347,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) return 0; } -static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) -{ - u8 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - switch (width) { - case MMC_BUS_WIDTH_8: - ctrl |= SDHCI_CTRL_8BITBUS; - ctrl &= ~SDHCI_CTRL_4BITBUS; - break; - case MMC_BUS_WIDTH_4: - ctrl |= SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - default: - ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); - break; - } - - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); @@ -1362,7 +1373,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host) static const struct sdhci_ops sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c index 72c13b6f05f9..a6caa49ca25a 100644 --- a/drivers/mmc/host/sdhci-pic32.c +++ b/drivers/mmc/host/sdhci-pic32.c @@ -97,7 +97,7 @@ static const struct sdhci_ops pic32_sdhci_ops = { .get_ro = pic32_sdhci_get_ro, }; -static struct sdhci_pltfm_data sdhci_pic32_pdata = { +static const struct sdhci_pltfm_data sdhci_pic32_pdata = { .ops = &pic32_sdhci_ops, .quirks = SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_NO_1_8_V, diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index e090d8c42ddb..02bea6159d79 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -209,22 +209,42 @@ int sdhci_pltfm_unregister(struct platform_device *pdev) EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); #ifdef CONFIG_PM_SLEEP -static int sdhci_pltfm_suspend(struct device *dev) +int sdhci_pltfm_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); - return sdhci_suspend_host(host); -} + ret = sdhci_suspend_host(host); + if (ret) + return ret; -static int sdhci_pltfm_resume(struct device *dev) + clk_disable_unprepare(pltfm_host->clk); + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); + +int sdhci_pltfm_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; - return sdhci_resume_host(host); + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_resume_host(host); + if (ret) + clk_disable_unprepare(pltfm_host->clk); + + return ret; } +EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); #endif const struct dev_pm_ops sdhci_pltfm_pmops = { diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 957839d0fe37..1e91fb1c020e 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -109,6 +109,8 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) return host->private; } +int sdhci_pltfm_suspend(struct device *dev); +int sdhci_pltfm_resume(struct device *dev); extern const struct dev_pm_ops sdhci_pltfm_pmops; #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index 995083ce1c46..8986f9d9cf98 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -178,17 +178,17 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); - clk = clk_get(dev, "PXA-SDHCLK"); + clk = devm_clk_get(dev, "PXA-SDHCLK"); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); - goto err_clk_get; + goto free; } pltfm_host->clk = clk; ret = clk_prepare_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable io clock\n"); - goto err_clk_enable; + goto free; } host->quirks = SDHCI_QUIRK_BROKEN_ADMA @@ -223,34 +223,18 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); - goto err_add_host; + goto disable_clk; } return 0; -err_add_host: +disable_clk: clk_disable_unprepare(clk); -err_clk_enable: - clk_put(clk); -err_clk_get: +free: sdhci_pltfm_free(pdev); return ret; } -static int sdhci_pxav2_remove(struct platform_device *pdev) -{ - struct sdhci_host *host = platform_get_drvdata(pdev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - - sdhci_remove_host(host, 1); - - clk_disable_unprepare(pltfm_host->clk); - clk_put(pltfm_host->clk); - sdhci_pltfm_free(pdev); - - return 0; -} - static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", @@ -258,7 +242,7 @@ static struct platform_driver sdhci_pxav2_driver = { .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_pxav2_probe, - .remove = sdhci_pxav2_remove, + .remove = sdhci_pltfm_unregister, }; module_platform_driver(sdhci_pxav2_driver); diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index f953f35c2624..a34434166ca7 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -337,7 +337,7 @@ static const struct sdhci_ops pxav3_sdhci_ops = { .set_uhs_signaling = pxav3_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_pxav3_pdata = { +static const struct sdhci_pltfm_data sdhci_pxav3_pdata = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_32BIT_ADMA_SIZE diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 7c065a70f92b..d328fcf284d1 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -414,43 +414,11 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); } -/** - * sdhci_s3c_set_bus_width - support 8bit buswidth - * @host: The SDHCI host being queried - * @width: MMC_BUS_WIDTH_ macro for the bus width being requested - * - * We have 8-bit width support but is not a v3 controller. - * So we add platform_bus_width() and support 8bit width. - */ -static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) -{ - u8 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - switch (width) { - case MMC_BUS_WIDTH_8: - ctrl |= SDHCI_CTRL_8BITBUS; - ctrl &= ~SDHCI_CTRL_4BITBUS; - break; - case MMC_BUS_WIDTH_4: - ctrl |= SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - default: - ctrl &= ~SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - } - - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static struct sdhci_ops sdhci_s3c_ops = { .get_max_clock = sdhci_s3c_get_max_clk, .set_clock = sdhci_s3c_set_clock, .get_min_clock = sdhci_s3c_get_min_clock, - .set_bus_width = sdhci_s3c_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index c251c6c0a112..391d52b467ca 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -146,7 +146,7 @@ static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode) return rc; } -static struct sdhci_ops sdhci_sirf_ops = { +static const struct sdhci_ops sdhci_sirf_ops = { .read_l = sdhci_sirf_readl_le, .read_w = sdhci_sirf_readw_le, .platform_execute_tuning = sdhci_sirf_execute_tuning, @@ -157,7 +157,7 @@ static struct sdhci_ops sdhci_sirf_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_sirf_pdata = { +static const struct sdhci_pltfm_data sdhci_sirf_pdata = { .ops = &sdhci_sirf_ops, .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | @@ -230,43 +230,6 @@ static int sdhci_sirf_probe(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM_SLEEP -static int sdhci_sirf_suspend(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int ret; - - if (host->tuning_mode != SDHCI_TUNING_MODE_3) - mmc_retune_needed(host->mmc); - - ret = sdhci_suspend_host(host); - if (ret) - return ret; - - clk_disable(pltfm_host->clk); - - return 0; -} - -static int sdhci_sirf_resume(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int ret; - - ret = clk_enable(pltfm_host->clk); - if (ret) { - dev_dbg(dev, "Resume: Error enabling clock\n"); - return ret; - } - - return sdhci_resume_host(host); -} -#endif - -static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume); - static const struct of_device_id sdhci_sirf_of_match[] = { { .compatible = "sirf,prima2-sdhc" }, { } @@ -277,7 +240,7 @@ static struct platform_driver sdhci_sirf_driver = { .driver = { .name = "sdhci-sirf", .of_match_table = sdhci_sirf_of_match, - .pm = &sdhci_sirf_pm_ops, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_sirf_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c index 68c36c9fa231..c32daed0d418 100644 --- a/drivers/mmc/host/sdhci-st.c +++ b/drivers/mmc/host/sdhci-st.c @@ -371,7 +371,7 @@ static int sdhci_st_probe(struct platform_device *pdev) if (IS_ERR(icnclk)) icnclk = NULL; - rstc = devm_reset_control_get(&pdev->dev, NULL); + rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rstc)) rstc = NULL; else @@ -394,8 +394,17 @@ static int sdhci_st_probe(struct platform_device *pdev) goto err_of; } - clk_prepare_enable(clk); - clk_prepare_enable(icnclk); + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare clock\n"); + goto err_of; + } + + ret = clk_prepare_enable(icnclk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare icn clock\n"); + goto err_icnclk; + } /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -429,6 +438,7 @@ static int sdhci_st_probe(struct platform_device *pdev) err_out: clk_disable_unprepare(icnclk); +err_icnclk: clk_disable_unprepare(clk); err_of: sdhci_pltfm_free(pdev); @@ -487,9 +497,17 @@ static int sdhci_st_resume(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); struct device_node *np = dev->of_node; + int ret; - clk_prepare_enable(pltfm_host->clk); - clk_prepare_enable(pdata->icnclk); + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = clk_prepare_enable(pdata->icnclk); + if (ret) { + clk_disable_unprepare(pltfm_host->clk); + return ret; + } if (pdata->rstc) reset_control_deassert(pdata->rstc); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7f93079c7a3a..0cd6fa80db66 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -190,25 +190,6 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) tegra_host->ddr_signaling = false; } -static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) -{ - u32 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) && - (bus_width == MMC_BUS_WIDTH_8)) { - ctrl &= ~SDHCI_CTRL_4BITBUS; - ctrl |= SDHCI_CTRL_8BITBUS; - } else { - ctrl &= ~SDHCI_CTRL_8BITBUS; - if (bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; - } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) { u32 val; @@ -323,7 +304,7 @@ static const struct sdhci_ops tegra_sdhci_ops = { .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, - .set_bus_width = tegra_sdhci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = tegra_sdhci_reset, .platform_execute_tuning = tegra_sdhci_execute_tuning, .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, @@ -371,7 +352,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = { .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, - .set_bus_width = tegra_sdhci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = tegra_sdhci_reset, .platform_execute_tuning = tegra_sdhci_execute_tuning, .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, @@ -508,7 +489,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev) clk_prepare_enable(clk); pltfm_host->clk = clk; - tegra_host->rst = devm_reset_control_get(&pdev->dev, "sdhci"); + tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, + "sdhci"); if (IS_ERR(tegra_host->rst)) { rc = PTR_ERR(tegra_host->rst); dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index f7e26b031e76..ec8794335241 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -409,17 +409,30 @@ static int xenon_emmc_phy_config_tuning(struct sdhci_host *host) return 0; } -static void xenon_emmc_phy_disable_data_strobe(struct sdhci_host *host) +static void xenon_emmc_phy_disable_strobe(struct sdhci_host *host) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 reg; - /* Disable SDHC Data Strobe */ + /* Disable both SDHC Data Strobe and Enhanced Strobe */ reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); - reg &= ~XENON_ENABLE_DATA_STROBE; + reg &= ~(XENON_ENABLE_DATA_STROBE | XENON_ENABLE_RESP_STROBE); sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); + + /* Clear Strobe line Pull down or Pull up */ + if (priv->phy_type == EMMC_5_0_PHY) { + reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL); + reg &= ~(XENON_EMMC5_FC_QSP_PD | XENON_EMMC5_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL); + } else { + reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1); + reg &= ~(XENON_EMMC5_1_FC_QSP_PD | XENON_EMMC5_1_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1); + } } -/* Set HS400 Data Strobe */ +/* Set HS400 Data Strobe and Enhanced Strobe */ static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -439,6 +452,15 @@ static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host) /* Enable SDHC Data Strobe */ reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); reg |= XENON_ENABLE_DATA_STROBE; + /* + * Enable SDHC Enhanced Strobe if supported + * Xenon Enhanced Strobe should be enabled only when + * 1. card is in HS400 mode and + * 2. SDCLK is higher than 52MHz + * 3. DLL is enabled + */ + if (host->mmc->ios.enhanced_strobe) + reg |= XENON_ENABLE_RESP_STROBE; sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); /* Set Data Strobe Pull down */ @@ -615,7 +637,7 @@ static void xenon_emmc_phy_set(struct sdhci_host *host, sdhci_writel(host, phy_regs->logic_timing_val, phy_regs->logic_timing_adj); else - xenon_emmc_phy_disable_data_strobe(host); + xenon_emmc_phy_disable_strobe(host); phy_init: xenon_emmc_phy_init(host); @@ -705,7 +727,7 @@ void xenon_soc_pad_ctrl(struct sdhci_host *host, /* * Setting PHY when card is working in High Speed Mode. - * HS400 set data strobe line. + * HS400 set Data Strobe and Enhanced Strobe if it is supported. * HS200/SDR104 set tuning config to prepare for tuning. */ static int xenon_hs_delay_adj(struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index c580af05b033..0842bbc2d7ad 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "sdhci-pltfm.h" #include "sdhci-xenon.h" @@ -330,7 +332,8 @@ static int xenon_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - if (host->timing == MMC_TIMING_UHS_DDR50) + if (host->timing == MMC_TIMING_UHS_DDR50 || + host->timing == MMC_TIMING_MMC_DDR52) return 0; /* @@ -490,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev) if (err) goto free_pltfm; + priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_clk; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err) + goto err_clk; + } + err = mmc_of_parse(host->mmc); if (err) - goto err_clk; + goto err_clk_axi; sdhci_get_of_property(pdev); @@ -501,20 +515,33 @@ static int xenon_probe(struct platform_device *pdev) /* Xenon specific dt parse */ err = xenon_probe_dt(pdev); if (err) - goto err_clk; + goto err_clk_axi; err = xenon_sdhc_prepare(host); if (err) - goto err_clk; + goto err_clk_axi; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); err = sdhci_add_host(host); if (err) goto remove_sdhc; + pm_runtime_put_autosuspend(&pdev->dev); + return 0; remove_sdhc: + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); xenon_sdhc_unprepare(host); +err_clk_axi: + clk_disable_unprepare(priv->axi_clk); err_clk: clk_disable_unprepare(pltfm_host->clk); free_pltfm: @@ -526,11 +553,16 @@ static int xenon_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); sdhci_remove_host(host, 0); xenon_sdhc_unprepare(host); - + clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(pltfm_host->clk); sdhci_pltfm_free(pdev); @@ -538,6 +570,84 @@ static int xenon_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int xenon_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + return ret; +} +#endif + +#ifdef CONFIG_PM +static int xenon_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + clk_disable_unprepare(pltfm_host->clk); + /* + * Need to update the priv->clock here, or when runtime resume + * back, phy don't aware the clock change and won't adjust phy + * which will cause cmd err + */ + priv->clock = 0; + return 0; +} + +static int xenon_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "can't enable mainck\n"); + return ret; + } + + if (priv->restore_needed) { + ret = xenon_sdhc_prepare(host); + if (ret) + goto out; + priv->restore_needed = false; + } + + ret = sdhci_runtime_resume_host(host); + if (ret) + goto out; + return 0; +out: + clk_disable_unprepare(pltfm_host->clk); + return ret; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(xenon_runtime_suspend, + xenon_runtime_resume, + NULL) +}; + static const struct of_device_id sdhci_xenon_dt_ids[] = { { .compatible = "marvell,armada-ap806-sdhci",}, { .compatible = "marvell,armada-cp110-sdhci",}, @@ -550,7 +660,7 @@ static struct platform_driver sdhci_xenon_driver = { .driver = { .name = "xenon-sdhci", .of_match_table = sdhci_xenon_dt_ids, - .pm = &sdhci_pltfm_pmops, + .pm = &sdhci_xenon_dev_pm_ops, }, .probe = xenon_probe, .remove = xenon_remove, diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 73debb42dc2f..9994995c7c56 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h @@ -33,6 +33,7 @@ #define XENON_TUNING_STEP_DIVIDER BIT(6) #define XENON_SLOT_EMMC_CTRL 0x0130 +#define XENON_ENABLE_RESP_STROBE BIT(25) #define XENON_ENABLE_DATA_STROBE BIT(24) #define XENON_SLOT_RETUNING_REQ_CTRL 0x0144 @@ -82,6 +83,7 @@ struct xenon_priv { unsigned char bus_width; unsigned char timing; unsigned int clock; + struct clk *axi_clk; int phy_type; /* @@ -90,6 +92,7 @@ struct xenon_priv { */ void *phy_params; struct xenon_emmc_phy_regs *emmc_phy_regs; + bool restore_needed; }; int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ecd0d4350e8a..0d5fcca18c9e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -897,8 +897,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, - data->blksz), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); } @@ -1173,24 +1173,35 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } EXPORT_SYMBOL_GPL(sdhci_send_command); +static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) +{ + int i, reg; + + for (i = 0; i < 4; i++) { + reg = SDHCI_RESPONSE + (3 - i) * 4; + cmd->resp[i] = sdhci_readl(host, reg); + } + + if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) + return; + + /* CRC is stripped so we need to do some shifting */ + for (i = 0; i < 4; i++) { + cmd->resp[i] <<= 8; + if (i != 3) + cmd->resp[i] |= cmd->resp[i + 1] >> 24; + } +} + static void sdhci_finish_command(struct sdhci_host *host) { struct mmc_command *cmd = host->cmd; - int i; host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { - /* CRC is stripped so we need to do some shifting. */ - for (i = 0;i < 4;i++) { - cmd->resp[i] = sdhci_readl(host, - SDHCI_RESPONSE + (3-i)*4) << 8; - if (i != 3) - cmd->resp[i] |= - sdhci_readb(host, - SDHCI_RESPONSE + (3-i)*4-1); - } + sdhci_read_rsp_136(host, cmd); } else { cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); } @@ -1544,10 +1555,9 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if (width == MMC_BUS_WIDTH_8) { ctrl &= ~SDHCI_CTRL_4BITBUS; - if (host->version >= SDHCI_SPEC_300) - ctrl |= SDHCI_CTRL_8BITBUS; + ctrl |= SDHCI_CTRL_8BITBUS; } else { - if (host->version >= SDHCI_SPEC_300) + if (host->mmc->caps & MMC_CAP_8_BIT_DATA) ctrl &= ~SDHCI_CTRL_8BITBUS; if (width == MMC_BUS_WIDTH_4) ctrl |= SDHCI_CTRL_4BITBUS; @@ -1641,19 +1651,20 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if ((ios->timing == MMC_TIMING_SD_HS || - ios->timing == MMC_TIMING_MMC_HS || - ios->timing == MMC_TIMING_MMC_HS400 || - ios->timing == MMC_TIMING_MMC_HS200 || - ios->timing == MMC_TIMING_MMC_DDR52 || - ios->timing == MMC_TIMING_UHS_SDR50 || - ios->timing == MMC_TIMING_UHS_SDR104 || - ios->timing == MMC_TIMING_UHS_DDR50 || - ios->timing == MMC_TIMING_UHS_SDR25) - && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) - ctrl |= SDHCI_CTRL_HISPD; - else - ctrl &= ~SDHCI_CTRL_HISPD; + if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { + if (ios->timing == MMC_TIMING_SD_HS || + ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_MMC_HS400 || + ios->timing == MMC_TIMING_MMC_HS200 || + ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_SDR50 || + ios->timing == MMC_TIMING_UHS_SDR104 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_UHS_SDR25) + ctrl |= SDHCI_CTRL_HISPD; + else + ctrl &= ~SDHCI_CTRL_HISPD; + } if (host->version >= SDHCI_SPEC_300) { u16 clk, ctrl_2; @@ -2037,6 +2048,7 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) struct mmc_command cmd = {}; struct mmc_request mrq = {}; unsigned long flags; + u32 b = host->sdma_boundary; spin_lock_irqsave(&host->lock, flags); @@ -2052,9 +2064,9 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) */ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && mmc->ios.bus_width == MMC_BUS_WIDTH_8) - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); else - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); /* * The tuning block is sent by the card to the host controller. @@ -2502,7 +2514,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) sdhci_finish_command(host); } -#ifdef CONFIG_MMC_DEBUG static void sdhci_adma_show_error(struct sdhci_host *host) { void *desc = host->adma_table; @@ -2530,9 +2541,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host) break; } } -#else -static void sdhci_adma_show_error(struct sdhci_host *host) { } -#endif static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { @@ -2938,7 +2946,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) sdhci_init(host, 0); - if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) { + if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && + mmc->ios.power_mode != MMC_POWER_OFF) { /* Force clock and power re-program */ host->pwr = 0; host->clock = 0; @@ -2998,7 +3007,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc) ctrl |= SDHCI_CTRL_ADMA32; sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512), + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), SDHCI_BLOCK_SIZE); /* Set maximum timeout */ @@ -3119,6 +3128,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, host->tuning_delay = -1; + host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; + return host; } @@ -3230,6 +3241,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (ret == -EPROBE_DEFER) return ret; + DBG("Version: 0x%08x | Present: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_VERSION), + sdhci_readl(host, SDHCI_PRESENT_STATE)); + DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", + sdhci_readl(host, SDHCI_CAPABILITIES), + sdhci_readl(host, SDHCI_CAPABILITIES_1)); + sdhci_read_caps(host); override_timeout_clk = host->timeout_clk; @@ -3747,10 +3765,6 @@ int __sdhci_add_host(struct sdhci_host *host) goto untasklet; } -#ifdef CONFIG_MMC_DEBUG - sdhci_dumpregs(host); -#endif - ret = sdhci_led_register(host); if (ret) { pr_err("%s: Failed to register LED device: %d\n", diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0469fa191493..54bc444c317f 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -435,6 +435,8 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* Controller has CRC in 136 bit Command Response */ +#define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -541,6 +543,9 @@ struct sdhci_host { /* Delay (ms) between tuning commands */ int tuning_delay; + /* Host SDMA buffer boundary. */ + u32 sdma_boundary; + unsigned long private[0] ____cacheline_aligned; }; diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 70cb00aa79a0..9e46039282d2 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -385,7 +385,7 @@ static int sdricoh_get_ro(struct mmc_host *mmc) return (status & STATUS_CARD_LOCKED); } -static struct mmc_host_ops sdricoh_ops = { +static const struct mmc_host_ops sdricoh_ops = { .request = sdricoh_request, .set_ios = sdricoh_set_ios, .get_ro = sdricoh_get_ro, diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 4062d6bef3c8..53fb18bb7bee 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1079,7 +1079,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->state = STATE_IDLE; } -static struct mmc_host_ops sh_mmcif_ops = { +static const struct mmc_host_ops sh_mmcif_ops = { .request = sh_mmcif_request, .set_ios = sh_mmcif_set_ios, .get_cd = mmc_gpio_get_cd, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 0fb4e4c119e1..53c970fe0873 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -259,7 +260,11 @@ struct sunxi_mmc_cfg { /* Does DATA0 needs to be masked while the clock is updated */ bool mask_data0; + /* hardware only supports new timing mode */ bool needs_new_timings; + + /* hardware can switch between old and new timing modes */ + bool has_timings_switch; }; struct sunxi_mmc_host { @@ -293,6 +298,9 @@ struct sunxi_mmc_host { /* vqmmc */ bool vqmmc_enabled; + + /* timings */ + bool use_new_timings; }; static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) @@ -714,6 +722,11 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host, { int index; + /* clk controller delays not used under new timings mode */ + if (host->use_new_timings) + return 0; + + /* some old controllers don't support delays */ if (!host->cfg->clk_delays) return 0; @@ -747,7 +760,7 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, { struct mmc_host *mmc = host->mmc; long rate; - u32 rval, clock = ios->clock; + u32 rval, clock = ios->clock, div = 1; int ret; ret = sunxi_mmc_oclk_onoff(host, 0); @@ -760,10 +773,30 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, if (!ios->clock) return 0; - /* 8 bit DDR requires a higher module clock */ + /* + * Under the old timing mode, 8 bit DDR requires the module + * clock to be double the card clock. Under the new timing + * mode, all DDR modes require a doubled module clock. + * + * We currently only support the standard MMC DDR52 mode. + * This block should be updated once support for other DDR + * modes is added. + */ if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) + (host->use_new_timings || + ios->bus_width == MMC_BUS_WIDTH_8)) { + div = 2; clock <<= 1; + } + + if (host->use_new_timings && host->cfg->has_timings_switch) { + ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + if (ret) { + dev_err(mmc_dev(mmc), + "error setting new timing mode\n"); + return ret; + } + } rate = clk_round_rate(host->clk_mmc, clock); if (rate < 0) { @@ -782,24 +815,23 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, return ret; } - /* clear internal divider */ + /* set internal divider */ rval = mmc_readl(host, REG_CLKCR); rval &= ~0xff; - /* set internal divider for 8 bit eMMC DDR, so card clock is right */ - if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) { - rval |= 1; - rate >>= 1; - } + rval |= div - 1; mmc_writel(host, REG_CLKCR, rval); - if (host->cfg->needs_new_timings) { + /* update card clock rate to account for internal divider */ + rate /= div; + + if (host->use_new_timings) { /* Don't touch the delay bits */ rval = mmc_readl(host, REG_SD_NTSR); rval |= SDXC_2X_TIMING_MODE; mmc_writel(host, REG_SD_NTSR, rval); } + /* sunxi_mmc_clk_set_phase expects the actual card clock rate */ ret = sunxi_mmc_clk_set_phase(host, ios, rate); if (ret) return ret; @@ -1048,7 +1080,7 @@ static int sunxi_mmc_card_busy(struct mmc_host *mmc) return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY); } -static struct mmc_host_ops sunxi_mmc_ops = { +static const struct mmc_host_ops sunxi_mmc_ops = { .request = sunxi_mmc_request, .set_ios = sunxi_mmc_set_ios, .get_ro = mmc_gpio_get_ro, @@ -1094,6 +1126,13 @@ static const struct sunxi_mmc_cfg sun7i_a20_cfg = { .can_calibrate = false, }; +static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sunxi_mmc_clk_delays, + .can_calibrate = false, + .has_timings_switch = true, +}; + static const struct sunxi_mmc_cfg sun9i_a80_cfg = { .idma_des_size_bits = 16, .clk_delays = sun9i_mmc_clk_delays, @@ -1118,6 +1157,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg }, { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg }, { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg }, { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg }, @@ -1172,7 +1212,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, } } - host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "ahb"); if (PTR_ERR(host->reset) == -EPROBE_DEFER) return PTR_ERR(host->reset); @@ -1201,7 +1242,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, } if (!IS_ERR(host->reset)) { - ret = reset_control_deassert(host->reset); + ret = reset_control_reset(host->reset); if (ret) { dev_err(&pdev->dev, "reset err %d\n", ret); goto error_disable_clk_sample; @@ -1262,6 +1303,30 @@ static int sunxi_mmc_probe(struct platform_device *pdev) goto error_free_host; } + if (host->cfg->has_timings_switch) { + /* + * Supports both old and new timing modes. + * Try setting the clk to new timing mode. + */ + sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + + /* And check the result */ + ret = sunxi_ccu_get_mmc_timing_mode(host->clk_mmc); + if (ret < 0) { + /* + * For whatever reason we were not able to get + * the current active mode. Default to old mode. + */ + dev_warn(&pdev->dev, "MMC clk timing mode unknown\n"); + host->use_new_timings = false; + } else { + host->use_new_timings = !!ret; + } + } else if (host->cfg->needs_new_timings) { + /* Supports new timing mode only */ + host->use_new_timings = true; + } + mmc->ops = &sunxi_mmc_ops; mmc->max_blk_count = 8192; mmc->max_blk_size = 4096; @@ -1274,7 +1339,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays) + if (host->cfg->clk_delays || host->use_new_timings) mmc->caps |= MMC_CAP_1_8V_DDR; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 6ad6704175dc..3e6ff8921440 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -81,14 +81,14 @@ #define TMIO_STAT_CMD_BUSY BIT(30) #define TMIO_STAT_ILL_ACCESS BIT(31) +/* Definitions for values the CTL_SD_CARD_CLK_CTL register can take */ #define CLK_CTL_DIV_MASK 0xff #define CLK_CTL_SCLKEN BIT(8) +/* Definitions for values the CTL_SD_MEM_CARD_OPT register can take */ #define CARD_OPT_WIDTH8 BIT(13) #define CARD_OPT_WIDTH BIT(15) -#define TMIO_BBS 512 /* Boot block size */ - /* Definitions for values the CTL_SDIO_STATUS register can take */ #define TMIO_SDIO_STAT_IOIRQ 0x0001 #define TMIO_SDIO_STAT_EXPUB52 0x4000 @@ -97,6 +97,9 @@ #define TMIO_SDIO_SETBITS_MASK 0x0006 +/* Definitions for values the CTL_DMA_ENABLE register can take */ +#define DMA_ENABLE_DMASDRW BIT(1) + /* Define some IRQ masks */ /* This is the mask used at reset by the chip */ #define TMIO_MASK_ALL 0x837f031d @@ -122,6 +125,7 @@ struct tmio_mmc_dma_ops { struct tmio_mmc_data *pdata); void (*release)(struct tmio_mmc_host *host); void (*abort)(struct tmio_mmc_host *host); + void (*dataend)(struct tmio_mmc_host *host); }; struct tmio_mmc_host { @@ -151,6 +155,7 @@ struct tmio_mmc_host { struct dma_chan *chan_rx; struct dma_chan *chan_tx; struct completion dma_dataend; + struct tasklet_struct dma_complete; struct tasklet_struct dma_issue; struct scatterlist bounce_sg; u8 *bounce_buf; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 88a94355ac90..a7293e186e03 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -87,6 +87,12 @@ static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) host->dma_ops->abort(host); } +static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops) + host->dma_ops->dataend(host); +} + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) { host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); @@ -123,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) #define CMDREQ_TIMEOUT 5000 -#ifdef CONFIG_MMC_DEBUG - -#define STATUS_TO_TEXT(a, status, i) \ - do { \ - if ((status) & TMIO_STAT_##a) { \ - if ((i)++) \ - printk(KERN_DEBUG " | "); \ - printk(KERN_DEBUG #a); \ - } \ - } while (0) - -static void pr_debug_status(u32 status) -{ - int i = 0; - - pr_debug("status: %08x = ", status); - STATUS_TO_TEXT(CARD_REMOVE, status, i); - STATUS_TO_TEXT(CARD_INSERT, status, i); - STATUS_TO_TEXT(SIGSTATE, status, i); - STATUS_TO_TEXT(WRPROTECT, status, i); - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); - STATUS_TO_TEXT(CARD_INSERT_A, status, i); - STATUS_TO_TEXT(SIGSTATE_A, status, i); - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); - STATUS_TO_TEXT(STOPBIT_ERR, status, i); - STATUS_TO_TEXT(ILL_FUNC, status, i); - STATUS_TO_TEXT(CMD_BUSY, status, i); - STATUS_TO_TEXT(CMDRESPEND, status, i); - STATUS_TO_TEXT(DATAEND, status, i); - STATUS_TO_TEXT(CRCFAIL, status, i); - STATUS_TO_TEXT(DATATIMEOUT, status, i); - STATUS_TO_TEXT(CMDTIMEOUT, status, i); - STATUS_TO_TEXT(RXOVERFLOW, status, i); - STATUS_TO_TEXT(TXUNDERRUN, status, i); - STATUS_TO_TEXT(RXRDY, status, i); - STATUS_TO_TEXT(TXRQ, status, i); - STATUS_TO_TEXT(ILL_ACCESS, status, i); - printk("\n"); -} - -#else -#define pr_debug_status(s) do { } while (0) -#endif - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct tmio_mmc_host *host = mmc_priv(mmc); @@ -201,7 +163,10 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host) { sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + msleep(10); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); @@ -218,7 +183,10 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + msleep(10); } static void tmio_mmc_set_clock(struct tmio_mmc_host *host, @@ -343,12 +311,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, int c = cmd->opcode; u32 irq_mask = TMIO_MASK_CMD; - /* CMD12 is handled by hardware */ - if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_STP); - return 0; - } - switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: c |= RESP_NONE; break; case MMC_RSP_R1: @@ -605,11 +567,11 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) if (done) { tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); - complete(&host->dma_dataend); + tmio_mmc_dataend_dma(host); } } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); - complete(&host->dma_dataend); + tmio_mmc_dataend_dma(host); } else { tmio_mmc_do_data_irq(host); tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); @@ -756,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; - pr_debug_status(status); - pr_debug_status(ireg); - /* Clear the status except the interrupt status */ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); @@ -1251,10 +1210,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; - mmc->max_segs = 32; + mmc->max_segs = pdata->max_segs ? : 32; mmc->max_blk_size = 512; - mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) * - mmc->max_segs; + mmc->max_blk_count = pdata->max_blk_count ? : + (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c index 553ef41bb806..dd961c54a6a9 100644 --- a/drivers/mmc/host/toshsd.c +++ b/drivers/mmc/host/toshsd.c @@ -550,7 +550,7 @@ static int toshsd_get_cd(struct mmc_host *mmc) return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0); } -static struct mmc_host_ops toshsd_ops = { +static const struct mmc_host_ops toshsd_ops = { .request = toshsd_request, .set_ios = toshsd_set_ios, .get_ro = toshsd_get_ro, diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 1bd5f1a18d4e..64da6a88cfb9 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1185,7 +1185,7 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } -static struct mmc_host_ops usdhi6_ops = { +static const struct mmc_host_ops usdhi6_ops = { .request = usdhi6_request, .set_ios = usdhi6_set_ios, .get_cd = usdhi6_get_cd, diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 6380044c0628..a838bf5480d8 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -323,7 +323,7 @@ struct via_crdr_mmc_host { /* some devices need a very long delay for power to stabilize */ #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 -static struct pci_device_id via_ids[] = { +static const struct pci_device_id via_ids[] = { {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, {0,} diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index fbeea1a491a6..8f569d257405 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -266,7 +266,7 @@ MODULE_PARM_DESC(firmware_rom_wait_states, #define ELAN_VENDOR_ID 0x2201 #define VUB300_VENDOR_ID 0x0424 #define VUB300_PRODUCT_ID 0x012C -static struct usb_device_id vub300_table[] = { +static const struct usb_device_id vub300_table[] = { {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, {} /* Terminating entry */ @@ -2079,7 +2079,7 @@ static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); } -static struct mmc_host_ops vub300_mmc_ops = { +static const struct mmc_host_ops vub300_mmc_ops = { .request = vub300_mmc_request, .set_ios = vub300_mmc_set_ios, .get_ro = vub300_mmc_get_ro, diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 9668616faf16..546aaf8d1507 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -802,10 +802,8 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) break; default: -#ifdef CONFIG_MMC_DEBUG pr_warn("%s: Data command %d is not supported by this controller\n", mmc_hostname(host->mmc), cmd->opcode); -#endif cmd->error = -EINVAL; goto done; diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 21ebba88679c..fd30ac7da5e5 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -726,7 +726,7 @@ static int wmt_mci_get_cd(struct mmc_host *mmc) return !(cd ^ priv->cd_inverted); } -static struct mmc_host_ops wmt_mci_ops = { +static const struct mmc_host_ops wmt_mci_ops = { .request = wmt_mci_request, .set_ios = wmt_mci_set_ios, .get_ro = wmt_mci_get_ro, @@ -856,7 +856,9 @@ static int wmt_mci_probe(struct platform_device *pdev) goto fail5; } - clk_prepare_enable(priv->clk_sdmmc); + ret = clk_prepare_enable(priv->clk_sdmmc); + if (ret) + goto fail6; /* configure the controller to a known 'ready' state */ wmt_reset_hardware(mmc); @@ -866,6 +868,8 @@ static int wmt_mci_probe(struct platform_device *pdev) dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); return 0; +fail6: + clk_put(priv->clk_sdmmc); fail5: free_irq(dma_irq, priv); fail4: diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index b833e6cc684c..84b16133554b 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1809,37 +1809,22 @@ static int dbg_protection_show(struct seq_file *s, void *p) } DEBUGFS_RO_ATTR(protection, dbg_protection_show); -static int __init doc_dbg_register(struct docg3 *docg3) +static void __init doc_dbg_register(struct mtd_info *floor) { - struct dentry *root, *entry; + struct dentry *root = floor->dbg.dfs_dir; + struct docg3 *docg3 = floor->priv; - root = debugfs_create_dir("docg3", NULL); - if (!root) - return -ENOMEM; + if (IS_ERR_OR_NULL(root)) + return; - entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3, - &flashcontrol_fops); - if (entry) - entry = debugfs_create_file("asic_mode", S_IRUSR, root, - docg3, &asic_mode_fops); - if (entry) - entry = debugfs_create_file("device_id", S_IRUSR, root, - docg3, &device_id_fops); - if (entry) - entry = debugfs_create_file("protection", S_IRUSR, root, - docg3, &protection_fops); - if (entry) { - docg3->debugfs_root = root; - return 0; - } else { - debugfs_remove_recursive(root); - return -ENOMEM; - } -} - -static void doc_dbg_unregister(struct docg3 *docg3) -{ - debugfs_remove_recursive(docg3->debugfs_root); + debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, + &flashcontrol_fops); + debugfs_create_file("docg3_asic_mode", S_IRUSR, root, docg3, + &asic_mode_fops); + debugfs_create_file("docg3_device_id", S_IRUSR, root, docg3, + &device_id_fops); + debugfs_create_file("docg3_protection", S_IRUSR, root, docg3, + &protection_fops); } /** @@ -2114,6 +2099,8 @@ static int __init docg3_probe(struct platform_device *pdev) 0); if (ret) goto err_probe; + + doc_dbg_register(cascade->floors[floor]); } ret = doc_register_sysfs(pdev, cascade); @@ -2121,7 +2108,6 @@ static int __init docg3_probe(struct platform_device *pdev) goto err_probe; platform_set_drvdata(pdev, cascade); - doc_dbg_register(cascade->floors[0]->priv); return 0; notfound: @@ -2148,7 +2134,6 @@ static int docg3_release(struct platform_device *pdev) int floor; doc_unregister_sysfs(pdev, cascade); - doc_dbg_unregister(docg3); for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) if (cascade->floors[floor]) doc_release_device(cascade->floors[floor]); diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h index 19fb93f96a3a..e99946575398 100644 --- a/drivers/mtd/devices/docg3.h +++ b/drivers/mtd/devices/docg3.h @@ -299,7 +299,6 @@ struct docg3_cascade { * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC * if 0, use all the 16 bytes. * @oob_write_buf: prepared OOB for next page_write - * @debugfs_root: debugfs root node */ struct docg3 { struct device *dev; @@ -312,7 +311,6 @@ struct docg3 { loff_t oob_write_ofs; int oob_autoecc; u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE]; - struct dentry *debugfs_root; }; #define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg) diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index dd5069876537..ddf478976013 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -775,6 +775,8 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev, pdata->board_flash_info = devm_kzalloc(&pdev->dev, sizeof(*pdata->board_flash_info), GFP_KERNEL); + if (!pdata->board_flash_info) + return -ENOMEM; /* Fill structs for each subnode (flash device) */ while ((pp = of_get_next_child(np, pp))) { diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 21afd94cd904..7bc29d725200 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -2073,15 +2073,17 @@ static int stfsm_probe(struct platform_device *pdev) ret = stfsm_init(fsm); if (ret) { dev_err(&pdev->dev, "Failed to initialise FSM Controller\n"); - return ret; + goto err_clk_unprepare; } stfsm_fetch_platform_configs(pdev); /* Detect SPI FLASH device */ info = stfsm_jedec_probe(fsm); - if (!info) - return -ENODEV; + if (!info) { + ret = -ENODEV; + goto err_clk_unprepare; + } fsm->info = info; /* Use device size to determine address width */ @@ -2095,11 +2097,11 @@ static int stfsm_probe(struct platform_device *pdev) if (info->config) { ret = info->config(fsm); if (ret) - return ret; + goto err_clk_unprepare; } else { ret = stfsm_prepare_rwe_seqs_default(fsm); if (ret) - return ret; + goto err_clk_unprepare; } fsm->mtd.name = info->name; @@ -2124,6 +2126,10 @@ static int stfsm_probe(struct platform_device *pdev) fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10)); return mtd_device_register(&fsm->mtd, NULL, 0); + +err_clk_unprepare: + clk_disable_unprepare(fsm->clk); + return ret; } static int stfsm_remove(struct platform_device *pdev) @@ -2147,9 +2153,7 @@ static int stfsmfsm_resume(struct device *dev) { struct stfsm *fsm = dev_get_drvdata(dev); - clk_prepare_enable(fsm->clk); - - return 0; + return clk_prepare_enable(fsm->clk); } #endif diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 8db740d6eb08..57ef1fb42a04 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index f2b68667ea59..26de0a1d08cf 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -296,7 +296,7 @@ static void amd76xrom_remove_one(struct pci_dev *pdev) amd76xrom_cleanup(window); } -static struct pci_device_id amd76xrom_pci_tbl[] = { +static const struct pci_device_id amd76xrom_pci_tbl[] = { { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440, @@ -319,7 +319,7 @@ static struct pci_driver amd76xrom_driver = { static int __init init_amd76xrom(void) { struct pci_dev *pdev; - struct pci_device_id *id; + const struct pci_device_id *id; pdev = NULL; for(id = amd76xrom_pci_tbl; id->vendor; id++) { pdev = pci_get_device(id->vendor, id->device, NULL); diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index 4f206a99164c..584962ec49f8 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -326,7 +326,7 @@ static void ck804xrom_remove_one(struct pci_dev *pdev) ck804xrom_cleanup(window); } -static struct pci_device_id ck804xrom_pci_tbl[] = { +static const struct pci_device_id ck804xrom_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 }, @@ -353,7 +353,7 @@ static struct pci_driver ck804xrom_driver = { static int __init init_ck804xrom(void) { struct pci_dev *pdev; - struct pci_device_id *id; + const struct pci_device_id *id; int retVal; pdev = NULL; diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index 9646b0766ce0..da9f6d76ce1d 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -384,7 +384,7 @@ static void esb2rom_remove_one(struct pci_dev *pdev) esb2rom_cleanup(window); } -static struct pci_device_id esb2rom_pci_tbl[] = { +static const struct pci_device_id esb2rom_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, @@ -414,7 +414,7 @@ static struct pci_driver esb2rom_driver = { static int __init init_esb2rom(void) { struct pci_dev *pdev; - struct pci_device_id *id; + const struct pci_device_id *id; int retVal; pdev = NULL; diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index 976d42f63aef..1888c5bf13f8 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -323,7 +323,7 @@ static void ichxrom_remove_one(struct pci_dev *pdev) ichxrom_cleanup(window); } -static struct pci_device_id ichxrom_pci_tbl[] = { +static const struct pci_device_id ichxrom_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, @@ -351,7 +351,7 @@ static struct pci_driver ichxrom_driver = { static int __init init_ichxrom(void) { struct pci_dev *pdev; - struct pci_device_id *id; + const struct pci_device_id *id; pdev = NULL; for (id = ichxrom_pci_tbl; id->vendor; id++) { diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index 8bf79775e7c1..dd5d6855f543 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -170,7 +170,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p) return err; } -static struct pci_device_id vr_nor_pci_ids[] = { +static const struct pci_device_id vr_nor_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)}, {0,} }; diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 3e33ab66eb24..77b1d8013295 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -114,12 +114,6 @@ ltq_mtd_probe(struct platform_device *pdev) struct cfi_private *cfi; int err; - if (of_machine_is_compatible("lantiq,falcon") && - (ltq_boot_select() != BS_FLASH)) { - dev_err(&pdev->dev, "invalid bootstrap options\n"); - return -ENODEV; - } - ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL); if (!ltq_mtd) return -ENOMEM; diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index eb0242e0b2d9..7b3bb40aff72 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -228,7 +228,7 @@ static struct mtd_pci_info intel_dc21285_info = { * PCI device ID table */ -static struct pci_device_id mtd_pci_ids[] = { +static const struct pci_device_id mtd_pci_ids[] = { { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x530d, diff --git a/drivers/mtd/maps/physmap_of_core.c b/drivers/mtd/maps/physmap_of_core.c index 62fa6836f218..b1bd4faecfb2 100644 --- a/drivers/mtd/maps/physmap_of_core.c +++ b/drivers/mtd/maps/physmap_of_core.c @@ -178,8 +178,8 @@ static int of_flash_probe(struct platform_device *dev) */ p = of_get_property(dp, "reg", &count); if (!p || count % reg_tuple_size != 0) { - dev_err(&dev->dev, "Malformed reg property on %s\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "Malformed reg property on %pOF\n", + dev->dev.of_node); err = -EINVAL; goto err_flash_remove; } @@ -235,10 +235,10 @@ static int of_flash_probe(struct platform_device *dev) err = of_flash_probe_gemini(dev, dp, &info->list[i].map); if (err) - return err; + goto err_out; err = of_flash_probe_versatile(dev, dp, &info->list[i].map); if (err) - return err; + goto err_out; err = -ENOMEM; info->list[i].map.virt = ioremap(info->list[i].map.phys, diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c index 05b286b5289f..4ed1a6bb4d3c 100644 --- a/drivers/mtd/maps/physmap_of_gemini.c +++ b/drivers/mtd/maps/physmap_of_gemini.c @@ -43,13 +43,6 @@ #define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */ -/* Miscellaneous Control Register */ -#define GLOBAL_MISC_CTRL 0x30 -#define FLASH_PADS_MASK 0x07 -#define NAND_PADS_DISABLE BIT(2) -#define PFLASH_PADS_DISABLE BIT(1) -#define SFLASH_PADS_DISABLE BIT(0) - static const struct of_device_id syscon_match[] = { { .compatible = "cortina,gemini-syscon" }, { }, @@ -102,15 +95,6 @@ int of_flash_probe_gemini(struct platform_device *pdev, map->bankwidth * 8); } - /* Activate parallel (NOR flash) mode */ - ret = regmap_update_bits(rmap, GLOBAL_MISC_CTRL, - FLASH_PADS_MASK, - SFLASH_PADS_DISABLE | NAND_PADS_DISABLE); - if (ret) { - dev_err(dev, "unable to set up physmap pads\n"); - return -ENODEV; - } - dev_info(&pdev->dev, "initialized Gemini-specific physmap control\n"); return 0; diff --git a/drivers/mtd/maps/physmap_of_versatile.c b/drivers/mtd/maps/physmap_of_versatile.c index 8c6ccded9be8..03f2b6e7bc7e 100644 --- a/drivers/mtd/maps/physmap_of_versatile.c +++ b/drivers/mtd/maps/physmap_of_versatile.c @@ -97,7 +97,7 @@ static const struct of_device_id ebi_match[] = { static int ap_flash_init(struct platform_device *pdev) { struct device_node *ebi; - static void __iomem *ebi_base; + void __iomem *ebi_base; u32 val; int ret; diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index 414956eca0c9..1e73bba6e286 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -55,8 +55,8 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp) /* Non-CFI userflash device-- once I find one we * can work on supporting it. */ - printk(KERN_ERR PFX "Unsupported device at %s, 0x%llx\n", - dp->full_name, (unsigned long long)op->resource[0].start); + printk(KERN_ERR PFX "Unsupported device at %pOF, 0x%llx\n", + dp, (unsigned long long)op->resource[0].start); return -ENODEV; } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 956382cea256..e7ea842ba3db 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -339,7 +340,7 @@ static struct attribute *mtd_attrs[] = { }; ATTRIBUTE_GROUPS(mtd); -static struct device_type mtd_devtype = { +static const struct device_type mtd_devtype = { .name = "mtd", .groups = mtd_groups, .release = mtd_release, @@ -477,6 +478,8 @@ int mtd_pairing_groups(struct mtd_info *mtd) } EXPORT_SYMBOL_GPL(mtd_pairing_groups); +static struct dentry *dfs_dir_mtd; + /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure @@ -552,6 +555,14 @@ int add_mtd_device(struct mtd_info *mtd) if (error) goto fail_added; + if (!IS_ERR_OR_NULL(dfs_dir_mtd)) { + mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd); + if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) { + pr_debug("mtd device %s won't show data in debugfs\n", + dev_name(&mtd->dev)); + } + } + device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, "mtd%dro", i); @@ -594,6 +605,8 @@ int del_mtd_device(struct mtd_info *mtd) mutex_lock(&mtd_table_mutex); + debugfs_remove_recursive(mtd->dbg.dfs_dir); + if (idr_find(&mtd_idr, mtd->index) != mtd) { ret = -ENODEV; goto out_error; @@ -1811,6 +1824,8 @@ static int __init init_mtd(void) if (ret) goto out_procfs; + dfs_dir_mtd = debugfs_create_dir("mtd", NULL); + return 0; out_procfs: @@ -1826,6 +1841,7 @@ static int __init init_mtd(void) static void __exit cleanup_mtd(void) { + debugfs_remove_recursive(dfs_dir_mtd); cleanup_mtdchar(); if (proc_mtd) remove_proc_entry("mtd", NULL); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5736b0c90b33..a308e707392d 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, slave->mtd.erasesize = parent->erasesize; } + /* + * Slave erasesize might differ from the master one if the master + * exposes several regions with different erasesize. Adjust + * wr_alignment accordingly. + */ + if (!(slave->mtd.flags & MTD_NO_ERASE)) + wr_alignment = slave->mtd.erasesize; + tmp = slave->offset; remainder = do_div(tmp, wr_alignment); if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index f12879a3d4ff..7d9080e33865 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -138,8 +138,6 @@ struct mtdswap_dev { char *page_buf; char *oob_buf; - - struct dentry *debugfs_root; }; struct mtdswap_oobdata { @@ -1315,29 +1313,19 @@ static const struct file_operations mtdswap_fops = { static int mtdswap_add_debugfs(struct mtdswap_dev *d) { - struct gendisk *gd = d->mbd_dev->disk; - struct device *dev = disk_to_dev(gd); - - struct dentry *root; + struct dentry *root = d->mtd->dbg.dfs_dir; struct dentry *dent; - root = debugfs_create_dir(gd->disk_name, NULL); - if (IS_ERR(root)) + if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; - if (!root) { - dev_err(dev, "failed to initialize debugfs\n"); + if (IS_ERR_OR_NULL(root)) return -1; - } - d->debugfs_root = root; - - dent = debugfs_create_file("stats", S_IRUSR, root, d, + dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops); if (!dent) { dev_err(d->dev, "debugfs_create_file failed\n"); - debugfs_remove_recursive(root); - d->debugfs_root = NULL; return -1; } @@ -1540,7 +1528,6 @@ static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev) { struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); - debugfs_remove_recursive(d->debugfs_root); del_mtd_blktrans_dev(dev); mtdswap_cleanup(d); kfree(d); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index dbfa72d61d5a..3f2036f31da4 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -315,7 +315,7 @@ config MTD_NAND_ATMEL config MTD_NAND_PXA3xx tristate "NAND support on PXA3xx and Armada 370/XP" - depends on PXA3xx || ARCH_MMP || PLAT_ORION + depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU help This enables the driver for the NAND flash device found on PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index 5d6c26f3cf7f..dcec9cf4983f 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index ceec21bd30c4..f25eca79f4e5 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -59,7 +59,7 @@ #include #include #include -#include +#include #include #include #include @@ -247,6 +247,7 @@ struct atmel_hsmc_nand_controller { void __iomem *virt; dma_addr_t dma; } sram; + const struct atmel_hsmc_reg_layout *hsmc_layout; struct regmap *io; struct atmel_nfc_op op; struct completion complete; @@ -1442,12 +1443,12 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, int csline, const struct nand_data_interface *conf) { - struct atmel_nand_controller *nc; + struct atmel_hsmc_nand_controller *nc; struct atmel_smc_cs_conf smcconf; struct atmel_nand_cs *cs; int ret; - nc = to_nand_controller(nand->base.controller); + nc = to_hsmc_nand_controller(nand->base.controller); ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf); if (ret) @@ -1462,7 +1463,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, if (cs->rb.type == ATMEL_NAND_NATIVE_RB) cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id); - atmel_hsmc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf); + atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id, + &cs->smcconf); return 0; } @@ -2089,8 +2091,8 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) } nc->irq = of_irq_get(nand_np, 0); - if (nc->irq < 0) { - ret = nc->irq; + if (nc->irq <= 0) { + ret = nc->irq ?: -ENXIO; if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get IRQ number (err = %d)\n", ret); @@ -2177,13 +2179,16 @@ atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc) return -EINVAL; } + nc->hsmc_layout = atmel_hsmc_get_reg_layout(np); + nc->irq = of_irq_get(np, 0); of_node_put(np); - if (nc->irq < 0) { - if (nc->irq != -EPROBE_DEFER) + if (nc->irq <= 0) { + ret = nc->irq ?: -ENXIO; + if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get IRQ number (err = %d)\n", - nc->irq); - return nc->irq; + ret); + return ret; } np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0); diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 8c210a5776bc..8268636675ef 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include #include @@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, size += (req->ecc.strength + 1) * sizeof(u16); /* Reserve space for mu, dmu and delta. */ size = ALIGN(size, sizeof(s32)); - size += (req->ecc.strength + 1) * sizeof(s32); + size += (req->ecc.strength + 1) * sizeof(s32) * 3; user = kzalloc(size, GFP_KERNEL); if (!user) diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 9bf6d9915694..9d4a28fa6b73 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h index 8ea75710a854..c8834767ab6d 100644 --- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h +++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h @@ -6,7 +6,7 @@ #endif #include -#include +#include struct bcm47xxnflash { struct bcma_drv_cc *cc; diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 3962f55bd034..5655dca6ce43 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -49,7 +49,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 7419c5ce63f8..e0eb51d8c012 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 2fd733eba0a3..bc558c438a57 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -13,7 +13,7 @@ #include #undef DEBUG #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 949b9400dcb7..1fc435f994e1 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -18,7 +18,7 @@ * CM-X270 board. */ -#include +#include #include #include #include diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index 594b28684138..d48877540f14 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 7b26e53b95b1..ccc8c43abcff 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index d723be352148..3087b0ba7b7f 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -980,9 +980,6 @@ static int denali_erase(struct mtd_info *mtd, int page) return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; } -#define DIV_ROUND_DOWN_ULL(ll, d) \ - ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) - static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, const struct nand_data_interface *conf) { diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index 237cc706b0fb..9239e6793e6e 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -21,7 +21,7 @@ #define __DENALI_H__ #include -#include +#include #define DEVICE_RESET 0x0 #define DEVICE_RESET__BANK(bank) BIT(bank) diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 47f398edf18f..56e2e177644d 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -118,7 +118,9 @@ static int denali_dt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "no clk available\n"); return PTR_ERR(dt->clk); } - clk_prepare_enable(dt->clk); + ret = clk_prepare_enable(dt->clk); + if (ret) + return ret; denali->clk_x_rate = clk_get_rate(dt->clk); diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index a023ab9e9cbf..c3aa53caab5c 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index a27a84fbfb84..2436cbc71662 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index b9ac16f05057..17db2f90aa2c 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -34,7 +34,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 59408ec2c69f..9e03bac7f34c 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index d85fa2555b68..a88e2cf66e0f 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 9d8b051d3187..eac15d9bf49e 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index 85294f150f4f..fd3648952b5a 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index 9df0ad64e7e0..a45e4ce13d10 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h @@ -17,7 +17,7 @@ #ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H #define __DRIVERS_MTD_NAND_GPMI_NAND_H -#include +#include #include #include #include diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index 530caa80b1b6..d9ee1a7e6956 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index 0d06a1f07d82..ad827d4af3e9 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index 8bc835f71b26..e69f6ae4c539 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 846a66c1b133..5796468db653 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include @@ -705,7 +705,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) res = -ENOENT; goto err_exit1; } - clk_prepare_enable(host->clk); + res = clk_prepare_enable(host->clk); + if (res) + goto err_put_clk; nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; nand_chip->dev_ready = lpc32xx_nand_device_ready; @@ -812,6 +814,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) dma_release_channel(host->dma_chan); err_exit2: clk_disable_unprepare(host->clk); +err_put_clk: clk_put(host->clk); err_exit1: lpc32xx_wp_enable(host); @@ -846,9 +849,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) static int lpc32xx_nand_resume(struct platform_device *pdev) { struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + int ret; /* Re-enable NAND clock */ - clk_prepare_enable(host->clk); + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; /* Fresh init of NAND controller */ lpc32xx_nand_setup(host); diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c index a0669a33f8fe..b61f28a1554d 100644 --- a/drivers/mtd/nand/lpc32xx_slc.c +++ b/drivers/mtd/nand/lpc32xx_slc.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -840,7 +840,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) res = -ENOENT; goto err_exit1; } - clk_prepare_enable(host->clk); + res = clk_prepare_enable(host->clk); + if (res) + goto err_exit1; /* Set NAND IO addresses and command/ready functions */ chip->IO_ADDR_R = SLC_DATA(host->io_base); @@ -972,9 +974,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) static int lpc32xx_nand_resume(struct platform_device *pdev) { struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + int ret; /* Re-enable NAND clock */ - clk_prepare_enable(host->clk); + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; /* Fresh init of NAND controller */ lpc32xx_nand_setup(host); diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 0e86fb6277c3..b6b97cc9fba6 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 6c3a4aab0b48..7f3b065b6b8f 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -464,8 +464,8 @@ static int mtk_ecc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "failed to get irq\n"); - return -EINVAL; + dev_err(dev, "failed to get irq: %d\n", irq); + return irq; } ret = dma_set_mask(dev, DMA_BIT_MASK(32)); diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c index f7ae99464375..d86a7d131cc0 100644 --- a/drivers/mtd/nand/mtk_nand.c +++ b/drivers/mtd/nand/mtk_nand.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index a764d5ca7536..53e5e0337c3e 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -876,6 +876,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) } } +#define MXC_V1_ECCBYTES 5 + static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { @@ -885,7 +887,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section, return -ERANGE; oobregion->offset = (section * 16) + 6; - oobregion->length = nand_chip->ecc.bytes; + oobregion->length = MXC_V1_ECCBYTES; return 0; } @@ -907,8 +909,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section, oobregion->length = 4; } } else { - oobregion->offset = ((section - 1) * 16) + - nand_chip->ecc.bytes + 6; + oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6; if (section < nand_chip->ecc.steps) oobregion->length = (section * 16) + 6 - oobregion->offset; diff --git a/drivers/mtd/nand/nand_amd.c b/drivers/mtd/nand/nand_amd.c index 170403a3bfa8..22f060f38123 100644 --- a/drivers/mtd/nand/nand_amd.c +++ b/drivers/mtd/nand/nand_amd.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include static void amd_nand_decode_id(struct nand_chip *chip) { diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index c6c18b82f8f4..12edaae17d81 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include @@ -1247,179 +1247,6 @@ int nand_reset(struct nand_chip *chip, int chipnr) return 0; } -/** - * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks - * @mtd: mtd info - * @ofs: offset to start unlock from - * @len: length to unlock - * @invert: - * - when = 0, unlock the range of blocks within the lower and - * upper boundary address - * - when = 1, unlock the range of blocks outside the boundaries - * of the lower and upper boundary address - * - * Returs unlock status. - */ -static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, - uint64_t len, int invert) -{ - int ret = 0; - int status, page; - struct nand_chip *chip = mtd_to_nand(mtd); - - /* Submit address of first page to unlock */ - page = ofs >> chip->page_shift; - chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask); - - /* Submit address of last page to unlock */ - page = (ofs + len) >> chip->page_shift; - chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, - (page | invert) & chip->pagemask); - - /* Call wait ready function */ - status = chip->waitfunc(mtd, chip); - /* See if device thinks it succeeded */ - if (status & NAND_STATUS_FAIL) { - pr_debug("%s: error status = 0x%08x\n", - __func__, status); - ret = -EIO; - } - - return ret; -} - -/** - * nand_unlock - [REPLACEABLE] unlocks specified locked blocks - * @mtd: mtd info - * @ofs: offset to start unlock from - * @len: length to unlock - * - * Returns unlock status. - */ -int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ - int ret = 0; - int chipnr; - struct nand_chip *chip = mtd_to_nand(mtd); - - pr_debug("%s: start = 0x%012llx, len = %llu\n", - __func__, (unsigned long long)ofs, len); - - if (check_offs_len(mtd, ofs, len)) - return -EINVAL; - - /* Align to last block address if size addresses end of the device */ - if (ofs + len == mtd->size) - len -= mtd->erasesize; - - nand_get_device(mtd, FL_UNLOCKING); - - /* Shift to get chip number */ - chipnr = ofs >> chip->chip_shift; - - /* - * Reset the chip. - * If we want to check the WP through READ STATUS and check the bit 7 - * we must reset the chip - * some operation can also clear the bit 7 of status register - * eg. erase/program a locked block - */ - nand_reset(chip, chipnr); - - chip->select_chip(mtd, chipnr); - - /* Check, if it is write protected */ - if (nand_check_wp(mtd)) { - pr_debug("%s: device is write protected!\n", - __func__); - ret = -EIO; - goto out; - } - - ret = __nand_unlock(mtd, ofs, len, 0); - -out: - chip->select_chip(mtd, -1); - nand_release_device(mtd); - - return ret; -} -EXPORT_SYMBOL(nand_unlock); - -/** - * nand_lock - [REPLACEABLE] locks all blocks present in the device - * @mtd: mtd info - * @ofs: offset to start unlock from - * @len: length to unlock - * - * This feature is not supported in many NAND parts. 'Micron' NAND parts do - * have this feature, but it allows only to lock all blocks, not for specified - * range for block. Implementing 'lock' feature by making use of 'unlock', for - * now. - * - * Returns lock status. - */ -int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ - int ret = 0; - int chipnr, status, page; - struct nand_chip *chip = mtd_to_nand(mtd); - - pr_debug("%s: start = 0x%012llx, len = %llu\n", - __func__, (unsigned long long)ofs, len); - - if (check_offs_len(mtd, ofs, len)) - return -EINVAL; - - nand_get_device(mtd, FL_LOCKING); - - /* Shift to get chip number */ - chipnr = ofs >> chip->chip_shift; - - /* - * Reset the chip. - * If we want to check the WP through READ STATUS and check the bit 7 - * we must reset the chip - * some operation can also clear the bit 7 of status register - * eg. erase/program a locked block - */ - nand_reset(chip, chipnr); - - chip->select_chip(mtd, chipnr); - - /* Check, if it is write protected */ - if (nand_check_wp(mtd)) { - pr_debug("%s: device is write protected!\n", - __func__); - status = MTD_ERASE_FAILED; - ret = -EIO; - goto out; - } - - /* Submit address of first page to lock */ - page = ofs >> chip->page_shift; - chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask); - - /* Call wait ready function */ - status = chip->waitfunc(mtd, chip); - /* See if device thinks it succeeded */ - if (status & NAND_STATUS_FAIL) { - pr_debug("%s: error status = 0x%08x\n", - __func__, status); - ret = -EIO; - goto out; - } - - ret = __nand_unlock(mtd, ofs, len, 0x1); - -out: - chip->select_chip(mtd, -1); - nand_release_device(mtd); - - return ret; -} -EXPORT_SYMBOL(nand_lock); - /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data * @buf: buffer to test @@ -2841,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { - int chipnr, realpage, page, blockmask, column; + int chipnr, realpage, page, column; struct nand_chip *chip = mtd_to_nand(mtd); uint32_t writelen = ops->len; @@ -2877,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, realpage = (int)(to >> chip->page_shift); page = realpage & chip->pagemask; - blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; /* Invalidate the page cache, when we write to the cached page */ if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && @@ -3993,10 +3819,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip) * nand_decode_ext_id() otherwise. */ if (chip->manufacturer.desc && chip->manufacturer.desc->ops && - chip->manufacturer.desc->ops->detect) + chip->manufacturer.desc->ops->detect) { + /* The 3rd id byte holds MLC / multichip data */ + chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); chip->manufacturer.desc->ops->detect(chip); - else + } else { nand_decode_ext_id(chip); + } } /* @@ -4036,7 +3865,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) const struct nand_manufacturer *manufacturer; struct mtd_info *mtd = nand_to_mtd(chip); int busw; - int i, ret; + int i; u8 *id_data = chip->id.data; u8 maf_id, dev_id; @@ -4066,7 +3895,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); /* Read entire ID string */ - for (i = 0; i < 8; i++) + for (i = 0; i < ARRAY_SIZE(chip->id.data); i++) id_data[i] = chip->read_byte(mtd); if (id_data[0] != maf_id || id_data[1] != dev_id) { @@ -4075,7 +3904,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) return -ENODEV; } - chip->id.len = nand_id_len(id_data, 8); + chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ manufacturer = nand_get_manufacturer(maf_id); @@ -4177,10 +4006,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) if (mtd->writesize > 512 && chip->cmdfunc == nand_command) chip->cmdfunc = nand_command_lp; - ret = nand_manufacturer_init(chip); - if (ret) - return ret; - pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); @@ -4388,23 +4213,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, return ret; } - /* Initialize the ->data_interface field. */ - ret = nand_init_data_interface(chip); - if (ret) - goto err_nand_init; - - /* - * Setup the data interface correctly on the chip and controller side. - * This explicit call to nand_setup_data_interface() is only required - * for the first die, because nand_reset() has been called before - * ->data_interface and ->default_onfi_timing_mode were set. - * For the other dies, nand_reset() will automatically switch to the - * best mode for us. - */ - ret = nand_setup_data_interface(chip, 0); - if (ret) - goto err_nand_init; - nand_maf_id = chip->id.data[0]; nand_dev_id = chip->id.data[1]; @@ -4434,12 +4242,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, mtd->size = i * chip->chipsize; return 0; - -err_nand_init: - /* Free manufacturer priv data. */ - nand_manufacturer_cleanup(chip); - - return ret; } EXPORT_SYMBOL(nand_scan_ident); @@ -4826,55 +4628,60 @@ int nand_scan_tail(struct mtd_info *mtd) struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; struct nand_buffers *nbuf = NULL; - int ret; + int ret, i; /* New bad blocks should be marked in OOB, flash-based BBT, or both */ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && !(chip->bbt_options & NAND_BBT_USE_FLASH))) { - ret = -EINVAL; - goto err_ident; + return -EINVAL; } if (invalid_ecc_page_accessors(chip)) { pr_err("Invalid ECC page accessors setup\n"); - ret = -EINVAL; - goto err_ident; + return -EINVAL; } if (!(chip->options & NAND_OWN_BUFFERS)) { nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); - if (!nbuf) { - ret = -ENOMEM; - goto err_ident; - } + if (!nbuf) + return -ENOMEM; nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); if (!nbuf->ecccalc) { ret = -ENOMEM; - goto err_free; + goto err_free_nbuf; } nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL); if (!nbuf->ecccode) { ret = -ENOMEM; - goto err_free; + goto err_free_nbuf; } nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); if (!nbuf->databuf) { ret = -ENOMEM; - goto err_free; + goto err_free_nbuf; } chip->buffers = nbuf; - } else { - if (!chip->buffers) { - ret = -ENOMEM; - goto err_ident; - } + } else if (!chip->buffers) { + return -ENOMEM; } + /* + * FIXME: some NAND manufacturer drivers expect the first die to be + * selected when manufacturer->init() is called. They should be fixed + * to explictly select the relevant die when interacting with the NAND + * chip. + */ + chip->select_chip(mtd, 0); + ret = nand_manufacturer_init(chip); + chip->select_chip(mtd, -1); + if (ret) + goto err_free_nbuf; + /* Set the internal oob buffer location, just after the page data */ chip->oob_poi = chip->buffers->databuf + mtd->writesize; @@ -4896,7 +4703,7 @@ int nand_scan_tail(struct mtd_info *mtd) WARN(1, "No oob scheme defined for oobsize %d\n", mtd->oobsize); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } } @@ -4911,7 +4718,7 @@ int nand_scan_tail(struct mtd_info *mtd) if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } if (!ecc->read_page) ecc->read_page = nand_read_page_hwecc_oob_first; @@ -4943,7 +4750,7 @@ int nand_scan_tail(struct mtd_info *mtd) ecc->write_page == nand_write_page_hwecc)) { WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } /* Use standard syndrome read/write page function? */ if (!ecc->read_page) @@ -4963,7 +4770,7 @@ int nand_scan_tail(struct mtd_info *mtd) if (!ecc->strength) { WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } break; } @@ -4976,7 +4783,7 @@ int nand_scan_tail(struct mtd_info *mtd) ret = nand_set_ecc_soft_ops(mtd); if (ret) { ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } break; @@ -4984,7 +4791,7 @@ int nand_scan_tail(struct mtd_info *mtd) if (!ecc->read_page || !ecc->write_page) { WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } if (!ecc->read_oob) ecc->read_oob = nand_read_oob_std; @@ -5008,7 +4815,7 @@ int nand_scan_tail(struct mtd_info *mtd) default: WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } /* For many systems, the standard OOB write also works for raw */ @@ -5029,13 +4836,13 @@ int nand_scan_tail(struct mtd_info *mtd) if (ecc->steps * ecc->size != mtd->writesize) { WARN(1, "Invalid ECC parameters\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } ecc->total = ecc->steps * ecc->bytes; if (ecc->total > mtd->oobsize) { WARN(1, "Total number of ECC bytes exceeded oobsize\n"); ret = -EINVAL; - goto err_free; + goto err_nand_manuf_cleanup; } /* @@ -5117,6 +4924,21 @@ int nand_scan_tail(struct mtd_info *mtd) if (!mtd->bitflip_threshold) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); + /* Initialize the ->data_interface field. */ + ret = nand_init_data_interface(chip); + if (ret) + goto err_nand_manuf_cleanup; + + /* Enter fastest possible mode on all dies. */ + for (i = 0; i < chip->numchips; i++) { + chip->select_chip(mtd, i); + ret = nand_setup_data_interface(chip, i); + chip->select_chip(mtd, -1); + + if (ret) + goto err_nand_data_iface_cleanup; + } + /* Check, if we should skip the bad block table scan */ if (chip->options & NAND_SKIP_BBTSCAN) return 0; @@ -5124,10 +4946,17 @@ int nand_scan_tail(struct mtd_info *mtd) /* Build bad block table */ ret = chip->scan_bbt(mtd); if (ret) - goto err_free; + goto err_nand_data_iface_cleanup; + return 0; -err_free: +err_nand_data_iface_cleanup: + nand_release_data_interface(chip); + +err_nand_manuf_cleanup: + nand_manufacturer_cleanup(chip); + +err_free_nbuf: if (nbuf) { kfree(nbuf->databuf); kfree(nbuf->ecccode); @@ -5135,12 +4964,6 @@ int nand_scan_tail(struct mtd_info *mtd) kfree(nbuf); } -err_ident: - /* Clean up nand_scan_ident(). */ - - /* Free manufacturer priv data. */ - nand_manufacturer_cleanup(chip); - return ret; } EXPORT_SYMBOL(nand_scan_tail); diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 7695efea65f2..2915b6739bf8 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -61,7 +61,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c index 44763f87eae4..505441c9373b 100644 --- a/drivers/mtd/nand/nand_bch.c +++ b/drivers/mtd/nand/nand_bch.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index d1770b066396..7613a0388044 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include #else diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c index b12dc7325378..985751eda317 100644 --- a/drivers/mtd/nand/nand_hynix.c +++ b/drivers/mtd/nand/nand_hynix.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include #include #include @@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip, * The ECC requirements field meaning depends on the * NAND technology. */ - u8 nand_tech = chip->id.data[5] & 0x3; + u8 nand_tech = chip->id.data[5] & 0x7; if (nand_tech < 3) { /* > 26nm, reference: H27UBG8T2A datasheet */ @@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip, if (nand_tech > 0) chip->options |= NAND_NEED_SCRAMBLING; } else { - nand_tech = chip->id.data[5] & 0x3; + nand_tech = chip->id.data[5] & 0x7; /* < 32nm */ if (nand_tech > 2) diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 92e2cf8e9ff9..5423c3bb388e 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -6,7 +6,7 @@ * published by the Free Software Foundation. * */ -#include +#include #include #define LP_OPTIONS 0 diff --git a/drivers/mtd/nand/nand_macronix.c b/drivers/mtd/nand/nand_macronix.c index 84855c3e1a02..d290ff2a6d2f 100644 --- a/drivers/mtd/nand/nand_macronix.c +++ b/drivers/mtd/nand/nand_macronix.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include static int macronix_nand_init(struct nand_chip *chip) { diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c index c30ab60f8e1b..abf6a3c376e8 100644 --- a/drivers/mtd/nand/nand_micron.c +++ b/drivers/mtd/nand/nand_micron.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include /* * Special Micron status bit that indicates when the block has been diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c index 1e0755997762..d348f0129ae7 100644 --- a/drivers/mtd/nand/nand_samsung.c +++ b/drivers/mtd/nand/nand_samsung.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include static void samsung_nand_decode_id(struct nand_chip *chip) { diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c index 7e36d7d13c26..5d1533bcc5bd 100644 --- a/drivers/mtd/nand/nand_timings.c +++ b/drivers/mtd/nand/nand_timings.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 0 */ diff --git a/drivers/mtd/nand/nand_toshiba.c b/drivers/mtd/nand/nand_toshiba.c index fa787ba38dcd..57df857074e6 100644 --- a/drivers/mtd/nand/nand_toshiba.c +++ b/drivers/mtd/nand/nand_toshiba.c @@ -15,7 +15,7 @@ * GNU General Public License for more details. */ -#include +#include static void toshiba_nand_decode_id(struct nand_chip *chip) { diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index e4211c3cc49b..246b4393118e 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -287,11 +287,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " /* Maximum page cache pages needed to read or write a NAND page to the cache_file */ #define NS_MAX_HELD_PAGES 16 -struct nandsim_debug_info { - struct dentry *dfs_root; - struct dentry *dfs_wear_report; -}; - /* * A union to represent flash memory contents and flash buffer. */ @@ -370,8 +365,6 @@ struct nandsim { void *file_buf; struct page *held_pages[NS_MAX_HELD_PAGES]; int held_cnt; - - struct nandsim_debug_info dbg; }; /* @@ -524,39 +517,23 @@ static const struct file_operations dfs_fops = { */ static int nandsim_debugfs_create(struct nandsim *dev) { - struct nandsim_debug_info *dbg = &dev->dbg; + struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; - dent = debugfs_create_dir("nandsim", NULL); - if (!dent) { - NS_ERR("cannot create \"nandsim\" debugfs directory\n"); - return -ENODEV; - } - dbg->dfs_root = dent; + if (IS_ERR_OR_NULL(root)) + return -1; - dent = debugfs_create_file("wear_report", S_IRUSR, - dbg->dfs_root, dev, &dfs_fops); - if (!dent) - goto out_remove; - dbg->dfs_wear_report = dent; + dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, + root, dev, &dfs_fops); + if (IS_ERR_OR_NULL(dent)) { + NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); + return -1; + } return 0; - -out_remove: - debugfs_remove_recursive(dbg->dfs_root); - return -ENODEV; -} - -/** - * nandsim_debugfs_remove - destroy all debugfs files - */ -static void nandsim_debugfs_remove(struct nandsim *ns) -{ - if (IS_ENABLED(CONFIG_DEBUG_FS)) - debugfs_remove_recursive(ns->dbg.dfs_root); } /* @@ -1379,7 +1356,7 @@ static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_ if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_read(file, pos, buf, count); + tx = kernel_read(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; @@ -1395,7 +1372,7 @@ static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_write(file, buf, count, pos); + tx = kernel_write(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; @@ -2352,9 +2329,6 @@ static int __init ns_init_module(void) if ((retval = setup_wear_reporting(nsmtd)) != 0) goto err_exit; - if ((retval = nandsim_debugfs_create(nand)) != 0) - goto err_exit; - if ((retval = init_nandsim(nsmtd)) != 0) goto err_exit; @@ -2370,10 +2344,12 @@ static int __init ns_init_module(void) if (retval != 0) goto err_exit; + if ((retval = nandsim_debugfs_create(nand)) != 0) + goto err_exit; + return 0; err_exit: - nandsim_debugfs_remove(nand); free_nandsim(nand); nand_release(nsmtd); for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) @@ -2396,7 +2372,6 @@ static void __exit ns_cleanup_module(void) struct nandsim *ns = nand_get_controller_data(chip); int i; - nandsim_debugfs_remove(ns); free_nandsim(ns); /* Free nandsim private resources */ nand_release(nsmtd); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index 28e6118362f7..d8a806894937 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c @@ -22,7 +22,7 @@ * */ #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 8f64011d32ef..7bb4d2ea9342 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #define REG_FMICSR 0x00 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 084934a9f19c..54540c8fa1a2 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 209170ed2b76..5a5aa1f07d07 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -54,13 +54,16 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd_to_nand(mtd); void __iomem *io_base = chip->IO_ADDR_R; +#if __LINUX_ARM_ARCH__ >= 5 uint64_t *buf64; +#endif int i = 0; while (len && (unsigned long)buf & 7) { *buf++ = readb(io_base); len--; } +#if __LINUX_ARM_ARCH__ >= 5 buf64 = (uint64_t *)buf; while (i < len/8) { /* @@ -74,6 +77,10 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) buf64[i++] = x; } i *= 8; +#else + readsl(io_base, buf, len/4); + i = len / 4 * 4; +#endif while (i < len) buf[i++] = readb(io_base); } diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c index 1b207aac840c..d649d5944826 100644 --- a/drivers/mtd/nand/oxnas_nand.c +++ b/drivers/mtd/nand/oxnas_nand.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -112,14 +112,19 @@ static int oxnas_nand_probe(struct platform_device *pdev) if (count > 1) return -EINVAL; - clk_prepare_enable(oxnas->clk); + err = clk_prepare_enable(oxnas->clk); + if (err) + return err; + device_reset_optional(&pdev->dev); for_each_child_of_node(np, nand_np) { chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; + if (!chip) { + err = -ENOMEM; + goto err_clk_unprepare; + } chip->controller = &oxnas->base; @@ -139,12 +144,12 @@ static int oxnas_nand_probe(struct platform_device *pdev) /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) - return err; + goto err_clk_unprepare; err = mtd_device_register(mtd, NULL, 0); if (err) { nand_release(mtd); - return err; + goto err_clk_unprepare; } oxnas->chips[nchips] = chip; @@ -152,12 +157,18 @@ static int oxnas_nand_probe(struct platform_device *pdev) } /* Exit if no chips found */ - if (!nchips) - return -ENODEV; + if (!nchips) { + err = -ENODEV; + goto err_clk_unprepare; + } platform_set_drvdata(pdev, oxnas); return 0; + +err_clk_unprepare: + clk_disable_unprepare(oxnas->clk); + return err; } static int oxnas_nand_remove(struct platform_device *pdev) diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 074b8b01289e..a47a7e4bd25a 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 791de3e4bbb6..925a1323604d 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include struct plat_nand_data { diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 74dae4bbdac8..85cff68643e0 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 88af7145a51a..3baddfc997d1 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -53,6 +53,8 @@ #define NAND_VERSION 0xf08 #define NAND_READ_LOCATION_0 0xf20 #define NAND_READ_LOCATION_1 0xf24 +#define NAND_READ_LOCATION_2 0xf28 +#define NAND_READ_LOCATION_3 0xf2c /* dummy register offsets, used by write_reg_dma */ #define NAND_DEV_CMD1_RESTORE 0xdead @@ -109,7 +111,11 @@ #define READ_ADDR 0 /* NAND_DEV_CMD_VLD bits */ -#define READ_START_VLD 0 +#define READ_START_VLD BIT(0) +#define READ_STOP_VLD BIT(1) +#define WRITE_START_VLD BIT(2) +#define ERASE_START_VLD BIT(3) +#define SEQ_READ_START_VLD BIT(4) /* NAND_EBI2_ECC_BUF_CFG bits */ #define NUM_STEPS 0 @@ -131,6 +137,11 @@ #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) +/* NAND_READ_LOCATION_n bits */ +#define READ_LOCATION_OFFSET 0 +#define READ_LOCATION_SIZE 16 +#define READ_LOCATION_LAST 31 + /* Version Mask */ #define NAND_VERSION_MAJOR_MASK 0xf0000000 #define NAND_VERSION_MAJOR_SHIFT 28 @@ -148,6 +159,13 @@ #define FETCH_ID 0xb #define RESET_DEVICE 0xd +/* Default Value for NAND_DEV_CMD_VLD */ +#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ + ERASE_START_VLD | SEQ_READ_START_VLD) + +/* NAND_CTRL bits */ +#define BAM_MODE_EN BIT(0) + /* * the NAND controller performs reads/writes with ECC in 516 byte chunks. * the driver calls the chunks 'step' or 'codeword' interchangeably @@ -169,11 +187,81 @@ #define ECC_BCH_4BIT BIT(2) #define ECC_BCH_8BIT BIT(3) +#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \ +nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ + ((offset) << READ_LOCATION_OFFSET) | \ + ((size) << READ_LOCATION_SIZE) | \ + ((is_last) << READ_LOCATION_LAST)) + +/* + * Returns the actual register address for all NAND_DEV_ registers + * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) + */ +#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) + +#define QPIC_PER_CW_CMD_SGL 32 +#define QPIC_PER_CW_DATA_SGL 8 + +/* + * Flags used in DMA descriptor preparation helper functions + * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma) + */ +/* Don't set the EOT in current tx BAM sgl */ +#define NAND_BAM_NO_EOT BIT(0) +/* Set the NWD flag in current BAM sgl */ +#define NAND_BAM_NWD BIT(1) +/* Finish writing in the current BAM sgl and start writing in another BAM sgl */ +#define NAND_BAM_NEXT_SGL BIT(2) +/* + * Erased codeword status is being used two times in single transfer so this + * flag will determine the current value of erased codeword status register + */ +#define NAND_ERASED_CW_SET BIT(4) + +/* + * This data type corresponds to the BAM transaction which will be used for all + * NAND transfers. + * @cmd_sgl - sgl for NAND BAM command pipe + * @data_sgl - sgl for NAND BAM consumer/producer pipe + * @cmd_sgl_pos - current index in command sgl. + * @cmd_sgl_start - start index in command sgl. + * @tx_sgl_pos - current index in data sgl for tx. + * @tx_sgl_start - start index in data sgl for tx. + * @rx_sgl_pos - current index in data sgl for rx. + * @rx_sgl_start - start index in data sgl for rx. + */ +struct bam_transaction { + struct scatterlist *cmd_sgl; + struct scatterlist *data_sgl; + u32 cmd_sgl_pos; + u32 cmd_sgl_start; + u32 tx_sgl_pos; + u32 tx_sgl_start; + u32 rx_sgl_pos; + u32 rx_sgl_start; +}; + +/* + * This data type corresponds to the nand dma descriptor + * @list - list for desc_info + * @dir - DMA transfer direction + * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by + * ADM + * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM + * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM + * @dma_desc - low level DMA engine descriptor + */ struct desc_info { struct list_head node; enum dma_data_direction dir; - struct scatterlist sgl; + union { + struct scatterlist adm_sgl; + struct { + struct scatterlist *bam_sgl; + int sgl_cnt; + }; + }; struct dma_async_tx_descriptor *dma_desc; }; @@ -202,6 +290,13 @@ struct nandc_regs { __le32 orig_vld; __le32 ecc_buf_cfg; + __le32 read_location0; + __le32 read_location1; + __le32 read_location2; + __le32 read_location3; + + __le32 erased_cw_detect_cfg_clr; + __le32 erased_cw_detect_cfg_set; }; /* @@ -226,14 +321,17 @@ struct nandc_regs { * by upper layers directly * @buf_size/count/start: markers for chip->read_buf/write_buf functions * @reg_read_buf: local buffer for reading back registers via DMA + * @reg_read_dma: contains dma address for register read buffer * @reg_read_pos: marker for data read in reg_read_buf * * @regs: a contiguous chunk of memory for DMA register * writes. contains the register values to be * written to controller * @cmd1/vld: some fixed controller register values - * @ecc_modes: supported ECC modes by the current controller, + * @props: properties of current NAND controller, * initialized via DT match data + * @max_cwperpage: maximum QPIC codewords required. calculated + * from all connected NAND devices pagesize */ struct qcom_nand_controller { struct nand_hw_control controller; @@ -247,23 +345,39 @@ struct qcom_nand_controller { struct clk *core_clk; struct clk *aon_clk; - struct dma_chan *chan; - unsigned int cmd_crci; - unsigned int data_crci; + union { + /* will be used only by QPIC for BAM DMA */ + struct { + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; + struct dma_chan *cmd_chan; + }; + + /* will be used only by EBI2 for ADM DMA */ + struct { + struct dma_chan *chan; + unsigned int cmd_crci; + unsigned int data_crci; + }; + }; + struct list_head desc_list; + struct bam_transaction *bam_txn; u8 *data_buffer; int buf_size; int buf_count; int buf_start; + unsigned int max_cwperpage; __le32 *reg_read_buf; + dma_addr_t reg_read_dma; int reg_read_pos; struct nandc_regs *regs; u32 cmd1, vld; - u32 ecc_modes; + const struct qcom_nandc_props *props; }; /* @@ -316,6 +430,78 @@ struct qcom_nand_host { u32 clrreadstatus; }; +/* + * This data type corresponds to the NAND controller properties which varies + * among different NAND controllers. + * @ecc_modes - ecc mode for NAND + * @is_bam - whether NAND controller is using BAM + * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset + */ +struct qcom_nandc_props { + u32 ecc_modes; + bool is_bam; + u32 dev_cmd_reg_start; +}; + +/* Frees the BAM transaction memory */ +static void free_bam_transaction(struct qcom_nand_controller *nandc) +{ + struct bam_transaction *bam_txn = nandc->bam_txn; + + devm_kfree(nandc->dev, bam_txn); +} + +/* Allocates and Initializes the BAM transaction */ +static struct bam_transaction * +alloc_bam_transaction(struct qcom_nand_controller *nandc) +{ + struct bam_transaction *bam_txn; + size_t bam_txn_size; + unsigned int num_cw = nandc->max_cwperpage; + void *bam_txn_buf; + + bam_txn_size = + sizeof(*bam_txn) + num_cw * + ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + + (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); + + bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); + if (!bam_txn_buf) + return NULL; + + bam_txn = bam_txn_buf; + bam_txn_buf += sizeof(*bam_txn); + + bam_txn->cmd_sgl = bam_txn_buf; + bam_txn_buf += + sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; + + bam_txn->data_sgl = bam_txn_buf; + + return bam_txn; +} + +/* Clears the BAM transaction indexes */ +static void clear_bam_transaction(struct qcom_nand_controller *nandc) +{ + struct bam_transaction *bam_txn = nandc->bam_txn; + + if (!nandc->props->is_bam) + return; + + bam_txn->cmd_sgl_pos = 0; + bam_txn->cmd_sgl_start = 0; + bam_txn->tx_sgl_pos = 0; + bam_txn->tx_sgl_start = 0; + bam_txn->rx_sgl_pos = 0; + bam_txn->rx_sgl_start = 0; + + sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * + QPIC_PER_CW_CMD_SGL); + sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * + QPIC_PER_CW_DATA_SGL); +} + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) { return container_of(chip, struct qcom_nand_host, chip); @@ -339,6 +525,24 @@ static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, iowrite32(val, nandc->base + offset); } +static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc, + bool is_cpu) +{ + if (!nandc->props->is_bam) + return; + + if (is_cpu) + dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma, + MAX_REG_RD * + sizeof(*nandc->reg_read_buf), + DMA_FROM_DEVICE); + else + dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma, + MAX_REG_RD * + sizeof(*nandc->reg_read_buf), + DMA_FROM_DEVICE); +} + static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset) { switch (offset) { @@ -372,6 +576,14 @@ static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset) return ®s->orig_vld; case NAND_EBI2_ECC_BUF_CFG: return ®s->ecc_buf_cfg; + case NAND_READ_LOCATION_0: + return ®s->read_location0; + case NAND_READ_LOCATION_1: + return ®s->read_location1; + case NAND_READ_LOCATION_2: + return ®s->read_location2; + case NAND_READ_LOCATION_3: + return ®s->read_location3; default: return NULL; } @@ -446,11 +658,119 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read) nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + + if (read) + nandc_set_read_loc(nandc, 0, 0, host->use_ecc ? + host->cw_data : host->cw_size, 1); } -static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read, - int reg_off, const void *vaddr, int size, - bool flow_control) +/* + * Maps the scatter gather list for DMA transfer and forms the DMA descriptor + * for BAM. This descriptor will be added in the NAND DMA descriptor queue + * which will be submitted to DMA engine. + */ +static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, + struct dma_chan *chan, + unsigned long flags) +{ + struct desc_info *desc; + struct scatterlist *sgl; + unsigned int sgl_cnt; + int ret; + struct bam_transaction *bam_txn = nandc->bam_txn; + enum dma_transfer_direction dir_eng; + struct dma_async_tx_descriptor *dma_desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + if (chan == nandc->cmd_chan) { + sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; + sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; + bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; + dir_eng = DMA_MEM_TO_DEV; + desc->dir = DMA_TO_DEVICE; + } else if (chan == nandc->tx_chan) { + sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; + sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; + bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; + dir_eng = DMA_MEM_TO_DEV; + desc->dir = DMA_TO_DEVICE; + } else { + sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; + sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; + bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; + dir_eng = DMA_DEV_TO_MEM; + desc->dir = DMA_FROM_DEVICE; + } + + sg_mark_end(sgl + sgl_cnt - 1); + ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); + if (ret == 0) { + dev_err(nandc->dev, "failure in mapping desc\n"); + kfree(desc); + return -ENOMEM; + } + + desc->sgl_cnt = sgl_cnt; + desc->bam_sgl = sgl; + + dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng, + flags); + + if (!dma_desc) { + dev_err(nandc->dev, "failure in prep desc\n"); + dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); + kfree(desc); + return -EINVAL; + } + + desc->dma_desc = dma_desc; + + list_add_tail(&desc->node, &nandc->desc_list); + + return 0; +} + +/* + * Prepares the data descriptor for BAM DMA which will be used for NAND + * data reads and writes. + */ +static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, + const void *vaddr, + int size, unsigned int flags) +{ + int ret; + struct bam_transaction *bam_txn = nandc->bam_txn; + + if (read) { + sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], + vaddr, size); + bam_txn->rx_sgl_pos++; + } else { + sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], + vaddr, size); + bam_txn->tx_sgl_pos++; + + /* + * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag + * is not set, form the DMA descriptor + */ + if (!(flags & NAND_BAM_NO_EOT)) { + ret = prepare_bam_async_desc(nandc, nandc->tx_chan, + DMA_PREP_INTERRUPT); + if (ret) + return ret; + } + } + + return 0; +} + +static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, + int reg_off, const void *vaddr, int size, + bool flow_control) { struct desc_info *desc; struct dma_async_tx_descriptor *dma_desc; @@ -463,7 +783,7 @@ static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read, if (!desc) return -ENOMEM; - sgl = &desc->sgl; + sgl = &desc->adm_sgl; sg_init_one(sgl, vaddr, size); @@ -524,9 +844,10 @@ static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read, * * @first: offset of the first register in the contiguous block * @num_regs: number of registers to read + * @flags: flags to control DMA descriptor preparation */ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, - int num_regs) + int num_regs, unsigned int flags) { bool flow_control = false; void *vaddr; @@ -535,11 +856,14 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) flow_control = true; + if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) + first = dev_cmd_reg_addr(nandc, first); + size = num_regs * sizeof(u32); vaddr = nandc->reg_read_buf + nandc->reg_read_pos; nandc->reg_read_pos += num_regs; - return prep_dma_desc(nandc, true, first, vaddr, size, flow_control); + return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); } /* @@ -548,9 +872,10 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, * * @first: offset of the first register in the contiguous block * @num_regs: number of registers to write + * @flags: flags to control DMA descriptor preparation */ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, - int num_regs) + int num_regs, unsigned int flags) { bool flow_control = false; struct nandc_regs *regs = nandc->regs; @@ -562,15 +887,26 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, if (first == NAND_FLASH_CMD) flow_control = true; - if (first == NAND_DEV_CMD1_RESTORE) - first = NAND_DEV_CMD1; + if (first == NAND_ERASED_CW_DETECT_CFG) { + if (flags & NAND_ERASED_CW_SET) + vaddr = ®s->erased_cw_detect_cfg_set; + else + vaddr = ®s->erased_cw_detect_cfg_clr; + } - if (first == NAND_DEV_CMD_VLD_RESTORE) - first = NAND_DEV_CMD_VLD; + if (first == NAND_EXEC_CMD) + flags |= NAND_BAM_NWD; + + if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1) + first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1); + + if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) + first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); size = num_regs * sizeof(u32); - return prep_dma_desc(nandc, false, first, vaddr, size, flow_control); + return prep_adm_dma_desc(nandc, false, first, vaddr, size, + flow_control); } /* @@ -580,11 +916,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to write to * @size: DMA transaction size in bytes + * @flags: flags to control DMA descriptor preparation */ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, - const u8 *vaddr, int size) + const u8 *vaddr, int size, unsigned int flags) { - return prep_dma_desc(nandc, true, reg_off, vaddr, size, false); + if (nandc->props->is_bam) + return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); + + return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); } /* @@ -594,48 +934,84 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to read from * @size: DMA transaction size in bytes + * @flags: flags to control DMA descriptor preparation */ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, - const u8 *vaddr, int size) + const u8 *vaddr, int size, unsigned int flags) { - return prep_dma_desc(nandc, false, reg_off, vaddr, size, false); + if (nandc->props->is_bam) + return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); + + return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); } /* - * helper to prepare dma descriptors to configure registers needed for reading a - * codeword/step in a page + * Helper to prepare DMA descriptors for configuring registers + * before reading a NAND page. */ -static void config_cw_read(struct qcom_nand_controller *nandc) +static void config_nand_page_read(struct qcom_nand_controller *nandc) { - write_reg_dma(nandc, NAND_FLASH_CMD, 3); - write_reg_dma(nandc, NAND_DEV0_CFG0, 3); - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); - - write_reg_dma(nandc, NAND_EXEC_CMD, 1); - - read_reg_dma(nandc, NAND_FLASH_STATUS, 2); - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1); + write_reg_dma(nandc, NAND_ADDR0, 2, 0); + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); + write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); } /* - * helpers to prepare dma descriptors used to configure registers needed for - * writing a codeword/step in a page + * Helper to prepare DMA descriptors for configuring registers + * before reading each codeword in NAND page. */ -static void config_cw_write_pre(struct qcom_nand_controller *nandc) +static void config_nand_cw_read(struct qcom_nand_controller *nandc) { - write_reg_dma(nandc, NAND_FLASH_CMD, 3); - write_reg_dma(nandc, NAND_DEV0_CFG0, 3); - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); + if (nandc->props->is_bam) + write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, + NAND_BAM_NEXT_SGL); + + write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, + NAND_BAM_NEXT_SGL); } -static void config_cw_write_post(struct qcom_nand_controller *nandc) +/* + * Helper to prepare dma descriptors to configure registers needed for reading a + * single codeword in page + */ +static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc) { - write_reg_dma(nandc, NAND_EXEC_CMD, 1); + config_nand_page_read(nandc); + config_nand_cw_read(nandc); +} - read_reg_dma(nandc, NAND_FLASH_STATUS, 1); +/* + * Helper to prepare DMA descriptors used to configure registers needed for + * before writing a NAND page. + */ +static void config_nand_page_write(struct qcom_nand_controller *nandc) +{ + write_reg_dma(nandc, NAND_ADDR0, 2, 0); + write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, + NAND_BAM_NEXT_SGL); +} - write_reg_dma(nandc, NAND_FLASH_STATUS, 1); - write_reg_dma(nandc, NAND_READ_STATUS, 1); +/* + * Helper to prepare DMA descriptors for configuring registers + * before writing each codeword in NAND page. + */ +static void config_nand_cw_write(struct qcom_nand_controller *nandc) +{ + write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); + write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); } /* @@ -672,8 +1048,7 @@ static int nandc_param(struct qcom_nand_host *host) /* configure CMD1 and VLD for ONFI param probing */ nandc_set_reg(nandc, NAND_DEV_CMD_VLD, - (nandc->vld & ~(1 << READ_START_VLD)) - | 0 << READ_START_VLD); + (nandc->vld & ~READ_START_VLD)); nandc_set_reg(nandc, NAND_DEV_CMD1, (nandc->cmd1 & ~(0xFF << READ_ADDR)) | NAND_CMD_PARAM << READ_ADDR); @@ -682,21 +1057,22 @@ static int nandc_param(struct qcom_nand_host *host) nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + nandc_set_read_loc(nandc, 0, 0, 512, 1); - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1); - write_reg_dma(nandc, NAND_DEV_CMD1, 1); + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); - config_cw_read(nandc); + config_nand_single_cw_page_read(nandc); read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, - nandc->buf_count); + nandc->buf_count, 0); /* restore CMD1 and VLD regs */ - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1); - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1); + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); return 0; } @@ -718,14 +1094,14 @@ static int erase_block(struct qcom_nand_host *host, int page_addr) nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); - write_reg_dma(nandc, NAND_FLASH_CMD, 3); - write_reg_dma(nandc, NAND_DEV0_CFG0, 2); - write_reg_dma(nandc, NAND_EXEC_CMD, 1); + write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - read_reg_dma(nandc, NAND_FLASH_STATUS, 1); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); - write_reg_dma(nandc, NAND_FLASH_STATUS, 1); - write_reg_dma(nandc, NAND_READ_STATUS, 1); + write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); + write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); return 0; } @@ -742,13 +1118,14 @@ static int read_id(struct qcom_nand_host *host, int column) nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); nandc_set_reg(nandc, NAND_ADDR0, column); nandc_set_reg(nandc, NAND_ADDR1, 0); - nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); + nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, + nandc->props->is_bam ? 0 : DM_EN); nandc_set_reg(nandc, NAND_EXEC_CMD, 1); - write_reg_dma(nandc, NAND_FLASH_CMD, 4); - write_reg_dma(nandc, NAND_EXEC_CMD, 1); + write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - read_reg_dma(nandc, NAND_READ_ID, 1); + read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); return 0; } @@ -762,10 +1139,10 @@ static int reset(struct qcom_nand_host *host) nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); nandc_set_reg(nandc, NAND_EXEC_CMD, 1); - write_reg_dma(nandc, NAND_FLASH_CMD, 1); - write_reg_dma(nandc, NAND_EXEC_CMD, 1); + write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); - read_reg_dma(nandc, NAND_FLASH_STATUS, 1); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); return 0; } @@ -775,12 +1152,43 @@ static int submit_descs(struct qcom_nand_controller *nandc) { struct desc_info *desc; dma_cookie_t cookie = 0; + struct bam_transaction *bam_txn = nandc->bam_txn; + int r; + + if (nandc->props->is_bam) { + if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { + r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); + if (r) + return r; + } + + if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { + r = prepare_bam_async_desc(nandc, nandc->tx_chan, + DMA_PREP_INTERRUPT); + if (r) + return r; + } + + if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { + r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); + if (r) + return r; + } + } list_for_each_entry(desc, &nandc->desc_list, node) cookie = dmaengine_submit(desc->dma_desc); - if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) - return -ETIMEDOUT; + if (nandc->props->is_bam) { + dma_async_issue_pending(nandc->tx_chan); + dma_async_issue_pending(nandc->rx_chan); + + if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE) + return -ETIMEDOUT; + } else { + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) + return -ETIMEDOUT; + } return 0; } @@ -791,7 +1199,14 @@ static void free_descs(struct qcom_nand_controller *nandc) list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { list_del(&desc->node); - dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir); + + if (nandc->props->is_bam) + dma_unmap_sg(nandc->dev, desc->bam_sgl, + desc->sgl_cnt, desc->dir); + else + dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, + desc->dir); + kfree(desc); } } @@ -800,8 +1215,7 @@ static void free_descs(struct qcom_nand_controller *nandc) static void clear_read_regs(struct qcom_nand_controller *nandc) { nandc->reg_read_pos = 0; - memset(nandc->reg_read_buf, 0, - MAX_REG_RD * sizeof(*nandc->reg_read_buf)); + nandc_read_buffer_sync(nandc, false); } static void pre_command(struct qcom_nand_host *host, int command) @@ -815,6 +1229,10 @@ static void pre_command(struct qcom_nand_host *host, int command) host->last_command = command; clear_read_regs(nandc); + + if (command == NAND_CMD_RESET || command == NAND_CMD_READID || + command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1) + clear_bam_transaction(nandc); } /* @@ -831,6 +1249,7 @@ static void parse_erase_write_errors(struct qcom_nand_host *host, int command) int i; num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1; + nandc_read_buffer_sync(nandc, true); for (i = 0; i < num_cw; i++) { u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]); @@ -852,6 +1271,7 @@ static void post_command(struct qcom_nand_host *host, int command) switch (command) { case NAND_CMD_READID: + nandc_read_buffer_sync(nandc, true); memcpy(nandc->data_buffer, nandc->reg_read_buf, nandc->buf_count); break; @@ -1015,6 +1435,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, int i; buf = (struct read_stats *)nandc->reg_read_buf; + nandc_read_buffer_sync(nandc, true); for (i = 0; i < ecc->steps; i++, buf++) { u32 flash, buffer, erased_cw; @@ -1102,6 +1523,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, struct nand_ecc_ctrl *ecc = &chip->ecc; int i, ret; + config_nand_page_read(nandc); + /* queue cmd descs for each codeword */ for (i = 0; i < ecc->steps; i++) { int data_size, oob_size; @@ -1115,11 +1538,24 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, oob_size = host->ecc_bytes_hw + host->spare_bytes; } - config_cw_read(nandc); + if (nandc->props->is_bam) { + if (data_buf && oob_buf) { + nandc_set_read_loc(nandc, 0, 0, data_size, 0); + nandc_set_read_loc(nandc, 1, data_size, + oob_size, 1); + } else if (data_buf) { + nandc_set_read_loc(nandc, 0, 0, data_size, 1); + } else { + nandc_set_read_loc(nandc, 0, data_size, + oob_size, 1); + } + } + + config_nand_cw_read(nandc); if (data_buf) read_data_dma(nandc, FLASH_BUF_ACC, data_buf, - data_size); + data_size, 0); /* * when ecc is enabled, the controller doesn't read the real @@ -1135,7 +1571,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, *oob_buf++ = 0xff; read_data_dma(nandc, FLASH_BUF_ACC + data_size, - oob_buf, oob_size); + oob_buf, oob_size, 0); } if (data_buf) @@ -1175,9 +1611,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page) set_address(host, host->cw_size * (ecc->steps - 1), page); update_rw_regs(host, 1, true); - config_cw_read(nandc); + config_nand_single_cw_page_read(nandc); - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size); + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0); ret = submit_descs(nandc); if (ret) @@ -1200,6 +1636,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip, data_buf = buf; oob_buf = oob_required ? chip->oob_poi : NULL; + clear_bam_transaction(nandc); ret = read_page_ecc(host, data_buf, oob_buf); if (ret) { dev_err(nandc->dev, "failure to read page\n"); @@ -1219,12 +1656,16 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd, u8 *data_buf, *oob_buf; struct nand_ecc_ctrl *ecc = &chip->ecc; int i, ret; + int read_loc; data_buf = buf; oob_buf = chip->oob_poi; host->use_ecc = false; + + clear_bam_transaction(nandc); update_rw_regs(host, ecc->steps, true); + config_nand_page_read(nandc); for (i = 0; i < ecc->steps; i++) { int data_size1, data_size2, oob_size1, oob_size2; @@ -1243,21 +1684,35 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd, oob_size2 = host->ecc_bytes_hw + host->spare_bytes; } - config_cw_read(nandc); + if (nandc->props->is_bam) { + read_loc = 0; + nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0); + read_loc += data_size1; - read_data_dma(nandc, reg_off, data_buf, data_size1); + nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0); + read_loc += oob_size1; + + nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0); + read_loc += data_size2; + + nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1); + } + + config_nand_cw_read(nandc); + + read_data_dma(nandc, reg_off, data_buf, data_size1, 0); reg_off += data_size1; data_buf += data_size1; - read_data_dma(nandc, reg_off, oob_buf, oob_size1); + read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0); reg_off += oob_size1; oob_buf += oob_size1; - read_data_dma(nandc, reg_off, data_buf, data_size2); + read_data_dma(nandc, reg_off, data_buf, data_size2, 0); reg_off += data_size2; data_buf += data_size2; - read_data_dma(nandc, reg_off, oob_buf, oob_size2); + read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); oob_buf += oob_size2; } @@ -1280,6 +1735,7 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int ret; clear_read_regs(nandc); + clear_bam_transaction(nandc); host->use_ecc = true; set_address(host, 0, page); @@ -1303,12 +1759,14 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip, int i, ret; clear_read_regs(nandc); + clear_bam_transaction(nandc); data_buf = (u8 *)buf; oob_buf = chip->oob_poi; host->use_ecc = true; update_rw_regs(host, ecc->steps, false); + config_nand_page_write(nandc); for (i = 0; i < ecc->steps; i++) { int data_size, oob_size; @@ -1322,9 +1780,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip, oob_size = ecc->bytes; } - config_cw_write_pre(nandc); - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size); + write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, + i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0); /* * when ECC is enabled, we don't really need to write anything @@ -1337,10 +1795,10 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip, oob_buf += host->bbm_size; write_data_dma(nandc, FLASH_BUF_ACC + data_size, - oob_buf, oob_size); + oob_buf, oob_size, 0); } - config_cw_write_post(nandc); + config_nand_cw_write(nandc); data_buf += data_size; oob_buf += oob_size; @@ -1367,12 +1825,14 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd, int i, ret; clear_read_regs(nandc); + clear_bam_transaction(nandc); data_buf = (u8 *)buf; oob_buf = chip->oob_poi; host->use_ecc = false; update_rw_regs(host, ecc->steps, false); + config_nand_page_write(nandc); for (i = 0; i < ecc->steps; i++) { int data_size1, data_size2, oob_size1, oob_size2; @@ -1391,24 +1851,25 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd, oob_size2 = host->ecc_bytes_hw + host->spare_bytes; } - config_cw_write_pre(nandc); - - write_data_dma(nandc, reg_off, data_buf, data_size1); + write_data_dma(nandc, reg_off, data_buf, data_size1, + NAND_BAM_NO_EOT); reg_off += data_size1; data_buf += data_size1; - write_data_dma(nandc, reg_off, oob_buf, oob_size1); + write_data_dma(nandc, reg_off, oob_buf, oob_size1, + NAND_BAM_NO_EOT); reg_off += oob_size1; oob_buf += oob_size1; - write_data_dma(nandc, reg_off, data_buf, data_size2); + write_data_dma(nandc, reg_off, data_buf, data_size2, + NAND_BAM_NO_EOT); reg_off += data_size2; data_buf += data_size2; - write_data_dma(nandc, reg_off, oob_buf, oob_size2); + write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); oob_buf += oob_size2; - config_cw_write_post(nandc); + config_nand_cw_write(nandc); } ret = submit_descs(nandc); @@ -1441,11 +1902,13 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, host->use_ecc = true; + clear_bam_transaction(nandc); ret = copy_last_cw(host, page); if (ret) return ret; clear_read_regs(nandc); + clear_bam_transaction(nandc); /* calculate the data and oob size for the last codeword/step */ data_size = ecc->size - ((ecc->steps - 1) << 2); @@ -1458,10 +1921,10 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, set_address(host, host->cw_size * (ecc->steps - 1), page); update_rw_regs(host, 1, false); - config_cw_write_pre(nandc); - write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, - data_size + oob_size); - config_cw_write_post(nandc); + config_nand_page_write(nandc); + write_data_dma(nandc, FLASH_BUF_ACC, + nandc->data_buffer, data_size + oob_size, 0); + config_nand_cw_write(nandc); ret = submit_descs(nandc); @@ -1498,6 +1961,7 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs) */ host->use_ecc = false; + clear_bam_transaction(nandc); ret = copy_last_cw(host, page); if (ret) goto err; @@ -1528,6 +1992,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs) int page, ret, status = 0; clear_read_regs(nandc); + clear_bam_transaction(nandc); /* * to mark the BBM as bad, we flash the entire last codeword with 0s. @@ -1543,9 +2008,10 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs) set_address(host, host->cw_size * (ecc->steps - 1), page); update_rw_regs(host, 1, false); - config_cw_write_pre(nandc); - write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size); - config_cw_write_post(nandc); + config_nand_page_write(nandc); + write_data_dma(nandc, FLASH_BUF_ACC, + nandc->data_buffer, host->cw_size, 0); + config_nand_cw_write(nandc); ret = submit_descs(nandc); @@ -1794,7 +2260,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host) * uses lesser bytes for ECC. If RS is used, the ECC bytes is * always 10 bytes */ - if (nandc->ecc_modes & ECC_BCH_4BIT) { + if (nandc->props->ecc_modes & ECC_BCH_4BIT) { /* BCH */ host->bch_enabled = true; ecc_mode = 0; @@ -1842,6 +2308,8 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host) mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); cwperpage = mtd->writesize / ecc->size; + nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage, + cwperpage); /* * DATA_UD_BYTES varies based on whether the read/write command protects @@ -1893,7 +2361,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host) | wide_bus << WIDE_FLASH | 1 << DEV0_CFG1_ECC_DISABLE; - host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE + host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE | 0 << ECC_SW_RESET | host->cw_data << ECC_NUM_DATA_BYTES | 1 << ECC_FORCE_CLK_OPEN @@ -1904,6 +2372,10 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host) host->clrflashstatus = FS_READY_BSY_N; host->clrreadstatus = 0xc0; + nandc->regs->erased_cw_detect_cfg_clr = + cpu_to_le32(CLR_ERASED_PAGE_DET); + nandc->regs->erased_cw_detect_cfg_set = + cpu_to_le32(SET_ERASED_PAGE_DET); dev_dbg(nandc->dev, "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n", @@ -1948,10 +2420,55 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) if (!nandc->reg_read_buf) return -ENOMEM; - nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx"); - if (!nandc->chan) { - dev_err(nandc->dev, "failed to request slave channel\n"); - return -ENODEV; + if (nandc->props->is_bam) { + nandc->reg_read_dma = + dma_map_single(nandc->dev, nandc->reg_read_buf, + MAX_REG_RD * + sizeof(*nandc->reg_read_buf), + DMA_FROM_DEVICE); + if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) { + dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); + return -EIO; + } + + nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx"); + if (!nandc->tx_chan) { + dev_err(nandc->dev, "failed to request tx channel\n"); + return -ENODEV; + } + + nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx"); + if (!nandc->rx_chan) { + dev_err(nandc->dev, "failed to request rx channel\n"); + return -ENODEV; + } + + nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd"); + if (!nandc->cmd_chan) { + dev_err(nandc->dev, "failed to request cmd channel\n"); + return -ENODEV; + } + + /* + * Initially allocate BAM transaction to read ONFI param page. + * After detecting all the devices, this BAM transaction will + * be freed and the next BAM tranasction will be allocated with + * maximum codeword size + */ + nandc->max_cwperpage = 1; + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); + return -ENOMEM; + } + } else { + nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx"); + if (!nandc->chan) { + dev_err(nandc->dev, + "failed to request slave channel\n"); + return -ENODEV; + } } INIT_LIST_HEAD(&nandc->desc_list); @@ -1964,21 +2481,48 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) { - dma_release_channel(nandc->chan); + if (nandc->props->is_bam) { + if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) + dma_unmap_single(nandc->dev, nandc->reg_read_dma, + MAX_REG_RD * + sizeof(*nandc->reg_read_buf), + DMA_FROM_DEVICE); + + if (nandc->tx_chan) + dma_release_channel(nandc->tx_chan); + + if (nandc->rx_chan) + dma_release_channel(nandc->rx_chan); + + if (nandc->cmd_chan) + dma_release_channel(nandc->cmd_chan); + } else { + if (nandc->chan) + dma_release_channel(nandc->chan); + } } /* one time setup of a few nand controller registers */ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) { + u32 nand_ctrl; + /* kill onenand */ nandc_write(nandc, SFLASHC_BURST_CFG, 0); + nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), + NAND_DEV_CMD_VLD_VAL); - /* enable ADM DMA */ - nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); + /* enable ADM or BAM DMA */ + if (nandc->props->is_bam) { + nand_ctrl = nandc_read(nandc, NAND_CTRL); + nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN); + } else { + nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); + } /* save the original values of these registers */ - nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1); - nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD); + nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); + nandc->vld = NAND_DEV_CMD_VLD_VAL; return 0; } @@ -2034,14 +2578,77 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc, return ret; ret = qcom_nand_host_setup(host); - if (ret) - return ret; + + return ret; +} + +static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc, + struct qcom_nand_host *host, + struct device_node *dn) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; ret = nand_scan_tail(mtd); if (ret) return ret; - return mtd_device_register(mtd, NULL, 0); + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + nand_cleanup(mtd_to_nand(mtd)); + + return ret; +} + +static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) +{ + struct device *dev = nandc->dev; + struct device_node *dn = dev->of_node, *child; + struct qcom_nand_host *host, *tmp; + int ret; + + for_each_available_child_of_node(dn, child) { + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + of_node_put(child); + return -ENOMEM; + } + + ret = qcom_nand_host_init(nandc, host, child); + if (ret) { + devm_kfree(dev, host); + continue; + } + + list_add_tail(&host->node, &nandc->host_list); + } + + if (list_empty(&nandc->host_list)) + return -ENODEV; + + if (nandc->props->is_bam) { + free_bam_transaction(nandc); + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); + return -ENOMEM; + } + } + + list_for_each_entry_safe(host, tmp, &nandc->host_list, node) { + ret = qcom_nand_mtd_register(nandc, host, child); + if (ret) { + list_del(&host->node); + devm_kfree(dev, host); + } + } + + if (list_empty(&nandc->host_list)) + return -ENODEV; + + return 0; } /* parse custom DT properties here */ @@ -2051,16 +2658,20 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev) struct device_node *np = nandc->dev->of_node; int ret; - ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci); - if (ret) { - dev_err(nandc->dev, "command CRCI unspecified\n"); - return ret; - } + if (!nandc->props->is_bam) { + ret = of_property_read_u32(np, "qcom,cmd-crci", + &nandc->cmd_crci); + if (ret) { + dev_err(nandc->dev, "command CRCI unspecified\n"); + return ret; + } - ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci); - if (ret) { - dev_err(nandc->dev, "data CRCI unspecified\n"); - return ret; + ret = of_property_read_u32(np, "qcom,data-crci", + &nandc->data_crci); + if (ret) { + dev_err(nandc->dev, "data CRCI unspecified\n"); + return ret; + } } return 0; @@ -2069,10 +2680,8 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev) static int qcom_nandc_probe(struct platform_device *pdev) { struct qcom_nand_controller *nandc; - struct qcom_nand_host *host; const void *dev_data; struct device *dev = &pdev->dev; - struct device_node *dn = dev->of_node, *child; struct resource *res; int ret; @@ -2089,7 +2698,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) return -ENODEV; } - nandc->ecc_modes = (unsigned long)dev_data; + nandc->props = dev_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); nandc->base = devm_ioremap_resource(dev, res); @@ -2112,7 +2721,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) ret = qcom_nandc_alloc(nandc); if (ret) - return ret; + goto err_core_clk; ret = clk_prepare_enable(nandc->core_clk); if (ret) @@ -2126,35 +2735,12 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_setup; - for_each_available_child_of_node(dn, child) { - if (of_device_is_compatible(child, "qcom,nandcs")) { - host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); - if (!host) { - of_node_put(child); - ret = -ENOMEM; - goto err_cs_init; - } - - ret = qcom_nand_host_init(nandc, host, child); - if (ret) { - devm_kfree(dev, host); - continue; - } - - list_add_tail(&host->node, &nandc->host_list); - } - } - - if (list_empty(&nandc->host_list)) { - ret = -ENODEV; - goto err_cs_init; - } + ret = qcom_probe_nand_devices(nandc); + if (ret) + goto err_setup; return 0; -err_cs_init: - list_for_each_entry(host, &nandc->host_list, node) - nand_release(nand_to_mtd(&host->chip)); err_setup: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: @@ -2181,15 +2767,40 @@ static int qcom_nandc_remove(struct platform_device *pdev) return 0; } -#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT) +static const struct qcom_nandc_props ipq806x_nandc_props = { + .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT), + .is_bam = false, + .dev_cmd_reg_start = 0x0, +}; + +static const struct qcom_nandc_props ipq4019_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), + .is_bam = true, + .dev_cmd_reg_start = 0x0, +}; + +static const struct qcom_nandc_props ipq8074_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), + .is_bam = true, + .dev_cmd_reg_start = 0x7000, +}; /* * data will hold a struct pointer containing more differences once we support * more controller variants */ static const struct of_device_id qcom_nandc_of_match[] = { - { .compatible = "qcom,ipq806x-nand", - .data = (void *)EBI2_NANDC_ECC_MODES, + { + .compatible = "qcom,ipq806x-nand", + .data = &ipq806x_nandc_props, + }, + { + .compatible = "qcom,ipq4019-nand", + .data = &ipq4019_nandc_props, + }, + { + .compatible = "qcom,ipq8074-nand", + .data = &ipq8074_nandc_props, }, {} }; diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h index d042ddb71a8b..8713c57f6207 100644 --- a/drivers/mtd/nand/r852.h +++ b/drivers/mtd/nand/r852.h @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 9e0c849607b9..4c383eeec6f6 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -43,7 +43,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 891ac7b99305..e7f3c98487e6 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -38,7 +38,7 @@ #include #include -#include +#include #include #include @@ -411,7 +411,7 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, dma_addr = dma_map_single(chan->device->dev, buf, len, dir); - if (dma_addr) + if (!dma_mapping_error(chan->device->dev, dma_addr)) desc = dmaengine_prep_slave_single(chan, dma_addr, len, tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -1141,8 +1141,8 @@ static int flctl_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "failed to get flste irq data\n"); - return -ENXIO; + dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq); + return irq; } ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index 064ca1757589..f59c455d9f51 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) /* Register the partitions */ mtd->name = "sharpsl-nand"; - err = mtd_device_parse_register(mtd, NULL, NULL, + err = mtd_device_parse_register(mtd, data->part_parsers, NULL, data->partitions, data->nr_partitions); if (err) goto err_add; diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c index 5939dff253c2..c378705c6e2b 100644 --- a/drivers/mtd/nand/sm_common.c +++ b/drivers/mtd/nand/sm_common.c @@ -7,7 +7,7 @@ * published by the Free Software Foundation. */ #include -#include +#include #include #include #include "sm_common.h" diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index 72369bd079af..575997d0ef8a 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 6abd142b1324..82244be3e766 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -2212,7 +2212,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev) if (ret) goto out_ahb_clk_unprepare; - nfc->reset = devm_reset_control_get_optional(dev, "ahb"); + nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb"); if (IS_ERR(nfc->reset)) { ret = PTR_ERR(nfc->reset); goto out_mod_clk_unprepare; diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 9d40b793b1c4..766906f03943 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index fc5e773f8b60..84dbf32332e1 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include @@ -440,7 +440,9 @@ static int tmio_probe(struct platform_device *dev) goto err_irq; /* Register the partitions */ - retval = mtd_device_parse_register(mtd, NULL, NULL, + retval = mtd_device_parse_register(mtd, + data ? data->part_parsers : NULL, + NULL, data ? data->partition : NULL, data ? data->num_partitions : 0); if (!retval) diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 0a14fda2e41b..b567d212fe7d 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 744ab10e8962..8037d4b48a05 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -31,10 +31,9 @@ #include #include #include -#include +#include #include #include -#include #include #include @@ -814,12 +813,14 @@ static int vf610_nfc_suspend(struct device *dev) static int vf610_nfc_resume(struct device *dev) { + int err; + struct mtd_info *mtd = dev_get_drvdata(dev); struct vf610_nfc *nfc = mtd_to_nfc(mtd); - pinctrl_pm_select_default_state(dev); - - clk_prepare_enable(nfc->clk); + err = clk_prepare_enable(nfc->clk); + if (err) + return err; vf610_nfc_preinit_controller(nfc); vf610_nfc_init_controller(nfc); diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index ddee4005248c..9926b4e3d69d 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -7,7 +7,7 @@ * Copyright © 2016 Hauke Mehrtens */ -#include +#include #include #include diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index e21161353e76..1f1a61168b3d 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -34,7 +34,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index a5dfbfbebfca..184c8fbfe465 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #define SECTORSIZE 512 diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 2861c7079d7b..6bdf4e525677 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -50,8 +50,8 @@ static int parse_ofpart_partitions(struct mtd_info *master, * when using another parser), so don't be louder than * KERN_DEBUG */ - pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", - master->name, mtd_node->full_name); + pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n", + master->name, mtd_node); ofpart_node = mtd_node; dedicated = false; } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { @@ -87,9 +87,9 @@ static int parse_ofpart_partitions(struct mtd_info *master, reg = of_get_property(pp, "reg", &len); if (!reg) { if (dedicated) { - pr_debug("%s: ofpart partition %s (%s) missing reg property.\n", - master->name, pp->full_name, - mtd_node->full_name); + pr_debug("%s: ofpart partition %pOF (%pOF) missing reg property.\n", + master->name, pp, + mtd_node); goto ofpart_fail; } else { nr_parts--; @@ -100,9 +100,9 @@ static int parse_ofpart_partitions(struct mtd_info *master, a_cells = of_n_addr_cells(pp); s_cells = of_n_size_cells(pp); if (len / 4 != a_cells + s_cells) { - pr_debug("%s: ofpart partition %s (%s) error parsing reg property.\n", - master->name, pp->full_name, - mtd_node->full_name); + pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n", + master->name, pp, + mtd_node); goto ofpart_fail; } @@ -131,8 +131,8 @@ static int parse_ofpart_partitions(struct mtd_info *master, return nr_parts; ofpart_fail: - pr_err("%s: error parsing ofpart partition %s (%s)\n", - master->name, pp->full_name, mtd_node->full_name); + pr_err("%s: error parsing ofpart partition %pOF (%pOF)\n", + master->name, pp, mtd_node); ret = -EINVAL; ofpart_none: of_node_put(pp); @@ -166,8 +166,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, if (!part) return 0; /* No partitions found */ - pr_warn("Device tree uses obsolete partition map binding: %s\n", - dp->full_name); + pr_warn("Device tree uses obsolete partition map binding: %pOF\n", dp); nr_parts = plen / sizeof(part[0]); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 293c8a4d1e49..69c638dd0484 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -89,6 +89,22 @@ config SPI_NXP_SPIFI config SPI_INTEL_SPI tristate +config SPI_INTEL_SPI_PCI + tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT + depends on X86 && PCI + select SPI_INTEL_SPI + help + This enables PCI support for the Intel PCH/PCU SPI controller in + master mode. This controller is present in modern Intel hardware + and is used to hold BIOS and other persistent settings. Using + this driver it is possible to upgrade BIOS directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called intel-spi-pci. + config SPI_INTEL_SPI_PLATFORM tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT depends on X86 diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 285aab86c7ca..7d84c5108e17 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o +obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o -obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o \ No newline at end of file +obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c index 0106357421bd..8d3cbe27efb6 100644 --- a/drivers/mtd/spi-nor/aspeed-smc.c +++ b/drivers/mtd/spi-nor/aspeed-smc.c @@ -621,19 +621,18 @@ static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type) } /* - * The AST2500 FMC flash controller should be strapped by hardware, or - * autodetected, but the AST2500 SPI flash needs to be set. + * The first chip of the AST2500 FMC flash controller is strapped by + * hardware, or autodetected, but other chips need to be set. Enforce + * the 4B setting for all chips. */ static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip) { struct aspeed_smc_controller *controller = chip->controller; u32 reg; - if (chip->controller->info == &spi_2500_info) { - reg = readl(controller->regs + CE_CONTROL_REG); - reg |= 1 << chip->cs; - writel(reg, controller->regs + CE_CONTROL_REG); - } + reg = readl(controller->regs + CE_CONTROL_REG); + reg |= 1 << chip->cs; + writel(reg, controller->regs + CE_CONTROL_REG); } /* diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c index ba76fa8f2031..6c5708bacad8 100644 --- a/drivers/mtd/spi-nor/atmel-quadspi.c +++ b/drivers/mtd/spi-nor/atmel-quadspi.c @@ -35,7 +35,6 @@ #include #include -#include /* QSPI register offsets */ #define QSPI_CR 0x0000 /* Control Register */ diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index d1106832b9d5..04f9fb5cd9b6 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -355,16 +355,16 @@ static int hisi_spi_nor_register(struct device_node *np, ret = of_property_read_u32(np, "reg", &priv->chipselect); if (ret) { - dev_err(dev, "There's no reg property for %s\n", - np->full_name); + dev_err(dev, "There's no reg property for %pOF\n", + np); return ret; } ret = of_property_read_u32(np, "spi-max-frequency", &priv->clkrate); if (ret) { - dev_err(dev, "There's no spi-max-frequency property for %s\n", - np->full_name); + dev_err(dev, "There's no spi-max-frequency property for %pOF\n", + np); return ret; } priv->host = host; diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c new file mode 100644 index 000000000000..e82652335ede --- /dev/null +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -0,0 +1,82 @@ +/* + * Intel PCH/PCU SPI flash PCI driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "intel-spi.h" + +#define BCR 0xdc +#define BCR_WPD BIT(0) + +static const struct intel_spi_boardinfo bxt_info = { + .type = INTEL_SPI_BXT, +}; + +static int intel_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct intel_spi_boardinfo *info; + struct intel_spi *ispi; + u32 bcr; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + info->writeable = !!(bcr & BCR_WPD); + + ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); + if (IS_ERR(ispi)) + return PTR_ERR(ispi); + + pci_set_drvdata(pdev, ispi); + return 0; +} + +static void intel_spi_pci_remove(struct pci_dev *pdev) +{ + intel_spi_remove(pci_get_drvdata(pdev)); +} + +static const struct pci_device_id intel_spi_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { }, +}; +MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); + +static struct pci_driver intel_spi_pci_driver = { + .name = "intel-spi", + .id_table = intel_spi_pci_ids, + .probe = intel_spi_pci_probe, + .remove = intel_spi_pci_remove, +}; + +module_pci_driver(intel_spi_pci_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver"); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 8a20ec4991c8..c258c7adf1c5 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1413828ff1fb..19c000722cbc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -86,6 +87,8 @@ struct flash_info { * to support memory size above 128Mib. */ #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ +#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ +#define USE_CLSR BIT(14) /* use CLSR command */ }; #define JEDEC_MFR(info) ((info)->id[0]) @@ -306,8 +309,18 @@ static inline int spi_nor_sr_ready(struct spi_nor *nor) int sr = read_sr(nor); if (sr < 0) return sr; - else - return !(sr & SR_WIP); + + if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) { + if (sr & SR_E_ERR) + dev_err(nor->dev, "Erase Error occurred\n"); + else + dev_err(nor->dev, "Programming Error occurred\n"); + + nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0); + return -EIO; + } + + return !(sr & SR_WIP); } static inline int spi_nor_fsr_ready(struct spi_nor *nor) @@ -1041,15 +1054,15 @@ static const struct flash_info spi_nor_ids[] = { */ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, - { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) }, + { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, - { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, @@ -1079,6 +1092,7 @@ static const struct flash_info spi_nor_ids[] = { { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) }, { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, + { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, /* ST Microelectronics -- newer production may have feature updates */ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, @@ -1380,6 +1394,16 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, return ret; } +/** + * macronix_quad_enable() - set QE bit in Status Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Status Register. + * + * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ static int macronix_quad_enable(struct spi_nor *nor) { int ret, val; @@ -1413,22 +1437,13 @@ static int macronix_quad_enable(struct spi_nor *nor) * second byte will be written to the configuration register. * Return negative if error occurred. */ -static int write_sr_cr(struct spi_nor *nor, u16 val) -{ - nor->cmd_buf[0] = val & 0xff; - nor->cmd_buf[1] = (val >> 8); - - return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2); -} - -static int spansion_quad_enable(struct spi_nor *nor) +static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr) { int ret; - int quad_en = CR_QUAD_EN_SPAN << 8; write_enable(nor); - ret = write_sr_cr(nor, quad_en); + ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2); if (ret < 0) { dev_err(nor->dev, "error while writing configuration register\n"); @@ -1442,6 +1457,41 @@ static int spansion_quad_enable(struct spi_nor *nor) return ret; } + return 0; +} + +/** + * spansion_quad_enable() - set QE bit in Configuraiton Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Configuration Register. + * This function is kept for legacy purpose because it has been used for a + * long time without anybody complaining but it should be considered as + * deprecated and maybe buggy. + * First, this function doesn't care about the previous values of the Status + * and Configuration Registers when it sets the QE bit (bit 1) in the + * Configuration Register: all other bits are cleared, which may have unwanted + * side effects like removing some block protections. + * Secondly, it uses the Read Configuration Register (35h) instruction though + * some very old and few memories don't support this instruction. If a pull-up + * resistor is present on the MISO/IO1 line, we might still be able to pass the + * "read back" test because the QSPI memory doesn't recognize the command, + * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF. + * + * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI + * memories. + * + * Return: 0 on success, -errno otherwise. + */ +static int spansion_quad_enable(struct spi_nor *nor) +{ + u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN}; + int ret; + + ret = write_sr_cr(nor, sr_cr); + if (ret) + return ret; + /* read back and check it */ ret = read_cr(nor); if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { @@ -1452,6 +1502,140 @@ static int spansion_quad_enable(struct spi_nor *nor) return 0; } +/** + * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Configuration Register. + * This function should be used with QSPI memories not supporting the Read + * Configuration Register (35h) instruction. + * + * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI + * memories. + * + * Return: 0 on success, -errno otherwise. + */ +static int spansion_no_read_cr_quad_enable(struct spi_nor *nor) +{ + u8 sr_cr[2]; + int ret; + + /* Keep the current value of the Status Register. */ + ret = read_sr(nor); + if (ret < 0) { + dev_err(nor->dev, "error while reading status register\n"); + return -EINVAL; + } + sr_cr[0] = ret; + sr_cr[1] = CR_QUAD_EN_SPAN; + + return write_sr_cr(nor, sr_cr); +} + +/** + * spansion_read_cr_quad_enable() - set QE bit in Configuration Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Configuration Register. + * This function should be used with QSPI memories supporting the Read + * Configuration Register (35h) instruction. + * + * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI + * memories. + * + * Return: 0 on success, -errno otherwise. + */ +static int spansion_read_cr_quad_enable(struct spi_nor *nor) +{ + struct device *dev = nor->dev; + u8 sr_cr[2]; + int ret; + + /* Check current Quad Enable bit value. */ + ret = read_cr(nor); + if (ret < 0) { + dev_err(dev, "error while reading configuration register\n"); + return -EINVAL; + } + + if (ret & CR_QUAD_EN_SPAN) + return 0; + + sr_cr[1] = ret | CR_QUAD_EN_SPAN; + + /* Keep the current value of the Status Register. */ + ret = read_sr(nor); + if (ret < 0) { + dev_err(dev, "error while reading status register\n"); + return -EINVAL; + } + sr_cr[0] = ret; + + ret = write_sr_cr(nor, sr_cr); + if (ret) + return ret; + + /* Read back and check it. */ + ret = read_cr(nor); + if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { + dev_err(nor->dev, "Spansion Quad bit not set\n"); + return -EINVAL; + } + + return 0; +} + +/** + * sr2_bit7_quad_enable() - set QE bit in Status Register 2. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Status Register 2. + * + * This is one of the procedures to set the QE bit described in the SFDP + * (JESD216 rev B) specification but no manufacturer using this procedure has + * been identified yet, hence the name of the function. + * + * Return: 0 on success, -errno otherwise. + */ +static int sr2_bit7_quad_enable(struct spi_nor *nor) +{ + u8 sr2; + int ret; + + /* Check current Quad Enable bit value. */ + ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1); + if (ret) + return ret; + if (sr2 & SR2_QUAD_EN_BIT7) + return 0; + + /* Update the Quad Enable bit. */ + sr2 |= SR2_QUAD_EN_BIT7; + + write_enable(nor); + + ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1); + if (ret < 0) { + dev_err(nor->dev, "error while writing status register 2\n"); + return -EINVAL; + } + + ret = spi_nor_wait_till_ready(nor); + if (ret < 0) { + dev_err(nor->dev, "timeout while writing status register 2\n"); + return ret; + } + + /* Read back and check it. */ + ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1); + if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) { + dev_err(nor->dev, "SR2 Quad bit not set\n"); + return -EINVAL; + } + + return 0; +} + static int spi_nor_check(struct spi_nor *nor) { if (!nor->dev || !nor->read || !nor->write || @@ -1591,6 +1775,599 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, pp->proto = proto; } +/* + * Serial Flash Discoverable Parameters (SFDP) parsing. + */ + +/** + * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into (dma-safe memory) + * + * Whatever the actual numbers of bytes for address and dummy cycles are + * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always + * followed by a 3-byte address and 8 dummy clock cycles. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + u8 addr_width, read_opcode, read_dummy; + int ret; + + read_opcode = nor->read_opcode; + addr_width = nor->addr_width; + read_dummy = nor->read_dummy; + + nor->read_opcode = SPINOR_OP_RDSFDP; + nor->addr_width = 3; + nor->read_dummy = 8; + + while (len) { + ret = nor->read(nor, addr, len, (u8 *)buf); + if (!ret || ret > len) { + ret = -EIO; + goto read_err; + } + if (ret < 0) + goto read_err; + + buf += ret; + addr += ret; + len -= ret; + } + ret = 0; + +read_err: + nor->read_opcode = read_opcode; + nor->addr_width = addr_width; + nor->read_dummy = read_dummy; + + return ret; +} + +/** + * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into + * + * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not + * guaranteed to be dma-safe. + * + * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() + * otherwise. + */ +static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + void *dma_safe_buf; + int ret; + + dma_safe_buf = kmalloc(len, GFP_KERNEL); + if (!dma_safe_buf) + return -ENOMEM; + + ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); + memcpy(buf, dma_safe_buf, len); + kfree(dma_safe_buf); + + return ret; +} + +struct sfdp_parameter_header { + u8 id_lsb; + u8 minor; + u8 major; + u8 length; /* in double words */ + u8 parameter_table_pointer[3]; /* byte address */ + u8 id_msb; +}; + +#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) +#define SFDP_PARAM_HEADER_PTP(p) \ + (((p)->parameter_table_pointer[2] << 16) | \ + ((p)->parameter_table_pointer[1] << 8) | \ + ((p)->parameter_table_pointer[0] << 0)) + +#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ +#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ + +#define SFDP_SIGNATURE 0x50444653U +#define SFDP_JESD216_MAJOR 1 +#define SFDP_JESD216_MINOR 0 +#define SFDP_JESD216A_MINOR 5 +#define SFDP_JESD216B_MINOR 6 + +struct sfdp_header { + u32 signature; /* Ox50444653U <=> "SFDP" */ + u8 minor; + u8 major; + u8 nph; /* 0-base number of parameter headers */ + u8 unused; + + /* Basic Flash Parameter Table. */ + struct sfdp_parameter_header bfpt_header; +}; + +/* Basic Flash Parameter Table */ + +/* + * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs. + * They are indexed from 1 but C arrays are indexed from 0. + */ +#define BFPT_DWORD(i) ((i) - 1) +#define BFPT_DWORD_MAX 16 + +/* The first version of JESB216 defined only 9 DWORDs. */ +#define BFPT_DWORD_MAX_JESD216 9 + +/* 1st DWORD. */ +#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16) +#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17) +#define BFPT_DWORD1_DTR BIT(19) +#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20) +#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21) +#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22) + +/* 5th DWORD. */ +#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0) +#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4) + +/* 11th DWORD. */ +#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4 +#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4) + +/* 15th DWORD. */ + +/* + * (from JESD216 rev B) + * Quad Enable Requirements (QER): + * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4 + * reads based on instruction. DQ3/HOLD# functions are hold during + * instruction phase. + * - 001b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * Writing only one byte to the status register has the side-effect of + * clearing status register 2, including the QE bit. The 100b code is + * used if writing one byte to the status register does not modify + * status register 2. + * - 010b: QE is bit 6 of status register 1. It is set via Write Status with + * one data byte where bit 6 is one. + * [...] + * - 011b: QE is bit 7 of status register 2. It is set via Write status + * register 2 instruction 3Eh with one data byte where bit 7 is one. + * [...] + * The status register 2 is read using instruction 3Fh. + * - 100b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * In contrast to the 001b code, writing one byte to the status + * register does not modify status register 2. + * - 101b: QE is bit 1 of status register 2. Status register 1 is read using + * Read Status instruction 05h. Status register2 is read using + * instruction 35h. QE is set via Writ Status instruction 01h with + * two data bytes where bit 1 of the second byte is one. + * [...] + */ +#define BFPT_DWORD15_QER_MASK GENMASK(22, 20) +#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */ +#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20) +#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */ +#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */ + +struct sfdp_bfpt { + u32 dwords[BFPT_DWORD_MAX]; +}; + +/* Fast Read settings. */ + +static inline void +spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, + u16 half, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = (half >> 5) & 0x07; + read->num_wait_states = (half >> 0) & 0x1f; + read->opcode = (half >> 8) & 0xff; + read->proto = proto; +} + +struct sfdp_bfpt_read { + /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ + u32 hwcaps; + + /* + * The bit in BFPT DWORD tells us + * whether the Fast Read x-y-z command is supported. + */ + u32 supported_dword; + u32 supported_bit; + + /* + * The half-word at offset in BFPT DWORD + * encodes the op code, the number of mode clocks and the number of wait + * states to be used by Fast Read x-y-z command. + */ + u32 settings_dword; + u32 settings_shift; + + /* The SPI protocol for this Fast Read x-y-z command. */ + enum spi_nor_protocol proto; +}; + +static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { + /* Fast Read 1-1-2 */ + { + SNOR_HWCAPS_READ_1_1_2, + BFPT_DWORD(1), BIT(16), /* Supported bit */ + BFPT_DWORD(4), 0, /* Settings */ + SNOR_PROTO_1_1_2, + }, + + /* Fast Read 1-2-2 */ + { + SNOR_HWCAPS_READ_1_2_2, + BFPT_DWORD(1), BIT(20), /* Supported bit */ + BFPT_DWORD(4), 16, /* Settings */ + SNOR_PROTO_1_2_2, + }, + + /* Fast Read 2-2-2 */ + { + SNOR_HWCAPS_READ_2_2_2, + BFPT_DWORD(5), BIT(0), /* Supported bit */ + BFPT_DWORD(6), 16, /* Settings */ + SNOR_PROTO_2_2_2, + }, + + /* Fast Read 1-1-4 */ + { + SNOR_HWCAPS_READ_1_1_4, + BFPT_DWORD(1), BIT(22), /* Supported bit */ + BFPT_DWORD(3), 16, /* Settings */ + SNOR_PROTO_1_1_4, + }, + + /* Fast Read 1-4-4 */ + { + SNOR_HWCAPS_READ_1_4_4, + BFPT_DWORD(1), BIT(21), /* Supported bit */ + BFPT_DWORD(3), 0, /* Settings */ + SNOR_PROTO_1_4_4, + }, + + /* Fast Read 4-4-4 */ + { + SNOR_HWCAPS_READ_4_4_4, + BFPT_DWORD(5), BIT(4), /* Supported bit */ + BFPT_DWORD(7), 16, /* Settings */ + SNOR_PROTO_4_4_4, + }, +}; + +struct sfdp_bfpt_erase { + /* + * The half-word at offset in DWORD encodes the + * op code and erase sector size to be used by Sector Erase commands. + */ + u32 dword; + u32 shift; +}; + +static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { + /* Erase Type 1 in DWORD8 bits[15:0] */ + {BFPT_DWORD(8), 0}, + + /* Erase Type 2 in DWORD8 bits[31:16] */ + {BFPT_DWORD(8), 16}, + + /* Erase Type 3 in DWORD9 bits[15:0] */ + {BFPT_DWORD(9), 0}, + + /* Erase Type 4 in DWORD9 bits[31:16] */ + {BFPT_DWORD(9), 16}, +}; + +static int spi_nor_hwcaps_read2cmd(u32 hwcaps); + +/** + * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. + * @nor: pointer to a 'struct spi_nor' + * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing + * the Basic Flash Parameter Table length and version + * @params: pointer to the 'struct spi_nor_flash_parameter' to be + * filled + * + * The Basic Flash Parameter Table is the main and only mandatory table as + * defined by the SFDP (JESD216) specification. + * It provides us with the total size (memory density) of the data array and + * the number of address bytes for Fast Read, Page Program and Sector Erase + * commands. + * For Fast READ commands, it also gives the number of mode clock cycles and + * wait states (regrouped in the number of dummy clock cycles) for each + * supported instruction op code. + * For Page Program, the page size is now available since JESD216 rev A, however + * the supported instruction op codes are still not provided. + * For Sector Erase commands, this table stores the supported instruction op + * codes and the associated sector sizes. + * Finally, the Quad Enable Requirements (QER) are also available since JESD216 + * rev A. The QER bits encode the manufacturer dependent procedure to be + * executed to set the Quad Enable (QE) bit in some internal register of the + * Quad SPI memory. Indeed the QE bit, when it exists, must be set before + * sending any Quad SPI command to the memory. Actually, setting the QE bit + * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 + * and IO3 hence enabling 4 (Quad) I/O lines. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_bfpt(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + struct spi_nor_flash_parameter *params) +{ + struct mtd_info *mtd = &nor->mtd; + struct sfdp_bfpt bfpt; + size_t len; + int i, cmd, err; + u32 addr; + u16 half; + + /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ + if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) + return -EINVAL; + + /* Read the Basic Flash Parameter Table. */ + len = min_t(size_t, sizeof(bfpt), + bfpt_header->length * sizeof(u32)); + addr = SFDP_PARAM_HEADER_PTP(bfpt_header); + memset(&bfpt, 0, sizeof(bfpt)); + err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); + if (err < 0) + return err; + + /* Fix endianness of the BFPT DWORDs. */ + for (i = 0; i < BFPT_DWORD_MAX; i++) + bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]); + + /* Number of address bytes. */ + switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { + case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: + nor->addr_width = 3; + break; + + case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: + nor->addr_width = 4; + break; + + default: + break; + } + + /* Flash Memory Density (in bits). */ + params->size = bfpt.dwords[BFPT_DWORD(2)]; + if (params->size & BIT(31)) { + params->size &= ~BIT(31); + + /* + * Prevent overflows on params->size. Anyway, a NOR of 2^64 + * bits is unlikely to exist so this error probably means + * the BFPT we are reading is corrupted/wrong. + */ + if (params->size > 63) + return -EINVAL; + + params->size = 1ULL << params->size; + } else { + params->size++; + } + params->size >>= 3; /* Convert to bytes. */ + + /* Fast Read settings. */ + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { + const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; + struct spi_nor_read_command *read; + + if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { + params->hwcaps.mask &= ~rd->hwcaps; + continue; + } + + params->hwcaps.mask |= rd->hwcaps; + cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); + read = ¶ms->reads[cmd]; + half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; + spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); + } + + /* Sector Erase settings. */ + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { + const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; + u32 erasesize; + u8 opcode; + + half = bfpt.dwords[er->dword] >> er->shift; + erasesize = half & 0xff; + + /* erasesize == 0 means this Erase Type is not supported. */ + if (!erasesize) + continue; + + erasesize = 1U << erasesize; + opcode = (half >> 8) & 0xff; +#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + if (erasesize == SZ_4K) { + nor->erase_opcode = opcode; + mtd->erasesize = erasesize; + break; + } +#endif + if (!mtd->erasesize || mtd->erasesize < erasesize) { + nor->erase_opcode = opcode; + mtd->erasesize = erasesize; + } + } + + /* Stop here if not JESD216 rev A or later. */ + if (bfpt_header->length < BFPT_DWORD_MAX) + return 0; + + /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ + params->page_size = bfpt.dwords[BFPT_DWORD(11)]; + params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK; + params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; + params->page_size = 1U << params->page_size; + + /* Quad Enable Requirements. */ + switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { + case BFPT_DWORD15_QER_NONE: + params->quad_enable = NULL; + break; + + case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: + case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: + params->quad_enable = spansion_no_read_cr_quad_enable; + break; + + case BFPT_DWORD15_QER_SR1_BIT6: + params->quad_enable = macronix_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT7: + params->quad_enable = sr2_bit7_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT1: + params->quad_enable = spansion_read_cr_quad_enable; + break; + + default: + return -EINVAL; + } + + return 0; +} + +/** + * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @params: pointer to the 'struct spi_nor_flash_parameter' to be + * filled + * + * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 + * specification. This is a standard which tends to supported by almost all + * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at + * runtime the main parameters needed to perform basic SPI flash operations such + * as Fast Read, Page Program or Sector Erase commands. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_sfdp(struct spi_nor *nor, + struct spi_nor_flash_parameter *params) +{ + const struct sfdp_parameter_header *param_header, *bfpt_header; + struct sfdp_parameter_header *param_headers = NULL; + struct sfdp_header header; + struct device *dev = nor->dev; + size_t psize; + int i, err; + + /* Get the SFDP header. */ + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); + if (err < 0) + return err; + + /* Check the SFDP header version. */ + if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || + header.major != SFDP_JESD216_MAJOR || + header.minor < SFDP_JESD216_MINOR) + return -EINVAL; + + /* + * Verify that the first and only mandatory parameter header is a + * Basic Flash Parameter Table header as specified in JESD216. + */ + bfpt_header = &header.bfpt_header; + if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || + bfpt_header->major != SFDP_JESD216_MAJOR) + return -EINVAL; + + /* + * Allocate memory then read all parameter headers with a single + * Read SFDP command. These parameter headers will actually be parsed + * twice: a first time to get the latest revision of the basic flash + * parameter table, then a second time to handle the supported optional + * tables. + * Hence we read the parameter headers once for all to reduce the + * processing time. Also we use kmalloc() instead of devm_kmalloc() + * because we don't need to keep these parameter headers: the allocated + * memory is always released with kfree() before exiting this function. + */ + if (header.nph) { + psize = header.nph * sizeof(*param_headers); + + param_headers = kmalloc(psize, GFP_KERNEL); + if (!param_headers) + return -ENOMEM; + + err = spi_nor_read_sfdp(nor, sizeof(header), + psize, param_headers); + if (err < 0) { + dev_err(dev, "failed to read SFDP parameter headers\n"); + goto exit; + } + } + + /* + * Check other parameter headers to get the latest revision of + * the basic flash parameter table. + */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && + param_header->major == SFDP_JESD216_MAJOR && + (param_header->minor > bfpt_header->minor || + (param_header->minor == bfpt_header->minor && + param_header->length > bfpt_header->length))) + bfpt_header = param_header; + } + + err = spi_nor_parse_bfpt(nor, bfpt_header, params); + if (err) + goto exit; + + /* Parse other parameter headers. */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + switch (SFDP_PARAM_HEADER_ID(param_header)) { + case SFDP_SECTOR_MAP_ID: + dev_info(dev, "non-uniform erase sector maps are not supported yet.\n"); + break; + + default: + break; + } + + if (err) + goto exit; + } + +exit: + kfree(param_headers); + return err; +} + static int spi_nor_init_params(struct spi_nor *nor, const struct flash_info *info, struct spi_nor_flash_parameter *params) @@ -1646,11 +2423,28 @@ static int spi_nor_init_params(struct spi_nor *nor, break; default: + /* Kept only for backward compatibility purpose. */ params->quad_enable = spansion_quad_enable; break; } } + /* Override the parameters with data read from SFDP tables. */ + nor->addr_width = 0; + nor->mtd.erasesize = 0; + if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) && + !(info->flags & SPI_NOR_SKIP_SFDP)) { + struct spi_nor_flash_parameter sfdp_params; + + memcpy(&sfdp_params, params, sizeof(sfdp_params)); + if (spi_nor_parse_sfdp(nor, &sfdp_params)) { + nor->addr_width = 0; + nor->mtd.erasesize = 0; + } else { + memcpy(params, &sfdp_params, sizeof(*params)); + } + } + return 0; } @@ -1762,6 +2556,10 @@ static int spi_nor_select_erase(struct spi_nor *nor, { struct mtd_info *mtd = &nor->mtd; + /* Do nothing if already configured from SFDP. */ + if (mtd->erasesize) + return 0; + #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS /* prefer "small sector" erase if possible */ if (info->flags & SECT_4K) { @@ -1960,6 +2758,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, nor->flags |= SNOR_F_HAS_SR_TB; if (info->flags & NO_CHIP_ERASE) nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + if (info->flags & USE_CLSR) + nor->flags |= SNOR_F_USE_CLSR; if (info->flags & SPI_NOR_NO_ERASE) mtd->flags |= MTD_NO_ERASE; @@ -1994,9 +2794,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (ret) return ret; - if (info->addr_width) + if (nor->addr_width) { + /* already configured from SFDP */ + } else if (info->addr_width) { nor->addr_width = info->addr_width; - else if (mtd->size > 0x1000000) { + } else if (mtd->size > 0x1000000) { /* enable 4-byte addressing if the device exceeds 16MiB */ nor->addr_width = 4; if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 41b13d1cdcc4..95f0bf95f095 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include struct ssfdcr_record { diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c index f26dec896afa..5f03b8c885a9 100644 --- a/drivers/mtd/tests/nandbiterrs.c +++ b/drivers/mtd/tests/nandbiterrs.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include "mtd_test.h" diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index c3963f880448..b210fdb31c98 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Initialize the gendisk of this ubiblock device */ gd = alloc_disk(1); if (!gd) { - pr_err("UBI: block: alloc_disk failed"); + pr_err("UBI: block: alloc_disk failed\n"); ret = -ENODEV; goto out_free_dev; } @@ -607,7 +607,7 @@ static void __init ubiblock_create_from_param(void) desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); if (IS_ERR(desc)) { pr_err( - "UBI: block: can't open volume on ubi%d_%d, err=%ld", + "UBI: block: can't open volume on ubi%d_%d, err=%ld\n", p->ubi_num, p->vol_id, PTR_ERR(desc)); continue; } @@ -618,7 +618,7 @@ static void __init ubiblock_create_from_param(void) ret = ubiblock_create(&vi); if (ret) { pr_err( - "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d", + "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n", vi.name, p->ubi_num, p->vol_id, ret); continue; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d854521962ef..842550b5712a 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -825,7 +825,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, for (i = 0; i < UBI_MAX_DEVICES; i++) { ubi = ubi_devices[i]; if (ubi && mtd->index == ubi->mtd->index) { - pr_err("ubi: mtd%d is already attached to ubi%d", + pr_err("ubi: mtd%d is already attached to ubi%d\n", mtd->index, i); return -EEXIST; } @@ -840,7 +840,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * no sense to attach emulated MTD devices, so we prohibit this. */ if (mtd->type == MTD_UBIVOLUME) { - pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI", + pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n", mtd->index); return -EINVAL; } @@ -851,7 +851,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi_devices[ubi_num]) break; if (ubi_num == UBI_MAX_DEVICES) { - pr_err("ubi: only %d UBI devices may be created", + pr_err("ubi: only %d UBI devices may be created\n", UBI_MAX_DEVICES); return -ENFILE; } @@ -861,7 +861,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, /* Make sure ubi_num is not busy */ if (ubi_devices[ubi_num]) { - pr_err("ubi: ubi%i already exists", ubi_num); + pr_err("ubi: ubi%i already exists\n", ubi_num); return -EEXIST; } } @@ -1166,7 +1166,7 @@ static int __init ubi_init(void) BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); if (mtd_devs > UBI_MAX_DEVICES) { - pr_err("UBI error: too many MTD devices, maximum is %d", + pr_err("UBI error: too many MTD devices, maximum is %d\n", UBI_MAX_DEVICES); return -EINVAL; } @@ -1178,7 +1178,7 @@ static int __init ubi_init(void) err = misc_register(&ubi_ctrl_cdev); if (err) { - pr_err("UBI error: cannot register device"); + pr_err("UBI error: cannot register device\n"); goto out; } @@ -1205,7 +1205,7 @@ static int __init ubi_init(void) mtd = open_mtd_device(p->name); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); - pr_err("UBI error: cannot open mtd %s, error %d", + pr_err("UBI error: cannot open mtd %s, error %d\n", p->name, err); /* See comment below re-ubi_is_module(). */ if (ubi_is_module()) @@ -1218,7 +1218,7 @@ static int __init ubi_init(void) p->vid_hdr_offs, p->max_beb_per1024); mutex_unlock(&ubi_devices_mutex); if (err < 0) { - pr_err("UBI error: cannot attach mtd%d", + pr_err("UBI error: cannot attach mtd%d\n", mtd->index); put_mtd_device(mtd); @@ -1242,7 +1242,7 @@ static int __init ubi_init(void) err = ubiblock_init(); if (err) { - pr_err("UBI error: block: cannot initialize, error %d", err); + pr_err("UBI error: block: cannot initialize, error %d\n", err); /* See comment above re-ubi_is_module(). */ if (ubi_is_module()) @@ -1265,7 +1265,7 @@ static int __init ubi_init(void) misc_deregister(&ubi_ctrl_cdev); out: class_unregister(&ubi_class); - pr_err("UBI error: cannot initialize UBI, error %d", err); + pr_err("UBI error: cannot initialize UBI, error %d\n", err); return err; } late_initcall(ubi_init); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index b44c8d348e78..5a832bc79b1b 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1667,7 +1667,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) ret = invalidate_fastmap(ubi); if (ret < 0) { - ubi_err(ubi, "Unable to invalidiate current fastmap!"); + ubi_err(ubi, "Unable to invalidate current fastmap!"); ubi_ro_mode(ubi); } else { return_fm_pebs(ubi, old_fm); diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 22ed3f627506..bfceae5a890e 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -229,7 +229,7 @@ struct ubi_ec_hdr { * copy. UBI also calculates data CRC when the data is moved and stores it at * the @data_crc field of the copy (P1). So when UBI needs to pick one physical * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is - * examined. If it is cleared, the situation* is simple and the newer one is + * examined. If it is cleared, the situation is simple and the newer one is * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC * checksum is correct, this physical eraseblock is selected (P1). Otherwise * the older one (P) is selected. @@ -389,7 +389,7 @@ struct ubi_vtbl_record { #define UBI_FM_POOL_MAGIC 0x67AF4D08 #define UBI_FM_EBA_MAGIC 0xf0c040a8 -/* A fastmap supber block can be located between PEB 0 and +/* A fastmap super block can be located between PEB 0 and * UBI_FM_MAX_START */ #define UBI_FM_MAX_START 64 diff --git a/drivers/net/Makefile b/drivers/net/Makefile index b2f6556d8848..8dff900085d6 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -45,7 +45,6 @@ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ obj-$(CONFIG_HAMRADIO) += hamradio/ -obj-$(CONFIG_IRDA) += irda/ obj-$(CONFIG_PLIP) += plip/ obj-$(CONFIG_PPP) += ppp/ obj-$(CONFIG_PPP_ASYNC) += ppp/ diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index a306de4318d7..9375cef22420 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -311,9 +311,7 @@ module_param(ipddp_mode, int, 0); static int __init ipddp_init_module(void) { dev_ipddp = ipddp_init(); - if (IS_ERR(dev_ipddp)) - return PTR_ERR(dev_ipddp); - return 0; + return PTR_ERR_OR_ZERO(dev_ipddp); } static void __exit ipddp_cleanup_module(void) diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index cbb4f8566bbe..d09b2b46ab63 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -20,7 +20,7 @@ #include #ifdef __KERNEL__ -#include +#include /* * RECON_THRESHOLD is the maximum number of RECON messages to receive diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 01cab9548785..eb7f76753c9c 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -109,7 +109,7 @@ static struct attribute *com20020_state_attrs[] = { NULL, }; -static struct attribute_group com20020_state_group = { +static const struct attribute_group com20020_state_group = { .name = NULL, .attrs = com20020_state_attrs, }; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fc63992ab0e0..c99dc59d729b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4289,7 +4289,7 @@ static int bond_check_params(struct bond_params *params) int bond_mode = BOND_MODE_ROUNDROBIN; int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; int lacp_fast = 0; - int tlb_dynamic_lb = 0; + int tlb_dynamic_lb; /* Convert string parameters. */ if (mode) { @@ -4601,16 +4601,13 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; - if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { - bond_opt_initstr(&newval, "default"); - valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), - &newval); - if (!valptr) { - pr_err("Error: No tlb_dynamic_lb default value"); - return -EINVAL; - } - tlb_dynamic_lb = valptr->value; + bond_opt_initstr(&newval, "default"); + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); + if (!valptr) { + pr_err("Error: No tlb_dynamic_lb default value"); + return -EINVAL; } + tlb_dynamic_lb = valptr->value; if (lp_interval == 0) { pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index a12d603d41c6..5931aa2fe997 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -754,6 +754,9 @@ static int bond_option_mode_set(struct bonding *bond, bond->params.miimon); } + if (newval->value == BOND_MODE_ALB) + bond->params.tlb_dynamic_lb = 1; + /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; bond->params.mode = newval->value; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 770623a0cc01..040b493f60ae 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -759,7 +759,7 @@ static struct attribute *per_bond_attrs[] = { NULL, }; -static struct attribute_group bonding_group = { +static const struct attribute_group bonding_group = { .name = "bonding", .attrs = per_bond_attrs, }; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 0e0df0ba288c..f37ce0e1b603 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1232,7 +1232,7 @@ static struct attribute *at91_sysfs_attrs[] = { NULL, }; -static struct attribute_group at91_sysfs_attr_group = { +static const struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e36d10520e24..46a746ee80bb 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -220,7 +220,7 @@ static const struct c_can_driver_data am3352_dcan_drvdata = { .raminit_bits = am3352_raminit_bits, }; -static struct platform_device_id c_can_id_table[] = { +static const struct platform_device_id c_can_id_table[] = { { .name = KBUILD_MODNAME, .driver_data = (kernel_ulong_t)&c_can_drvdata, diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 2ba1a81500c1..12a53c8e8e1d 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1875,7 +1875,7 @@ static struct attribute *ican3_sysfs_attrs[] = { NULL, }; -static struct attribute_group ican3_sysfs_attr_group = { +static const struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7f36d3e3c98b..274f3679f33d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1053,49 +1053,6 @@ int b53_vlan_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_vlan_del); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct b53_device *dev = ds->priv; - u16 vid, vid_start = 0, pvid; - struct b53_vlan *vl; - int err = 0; - - if (is5325(dev) || is5365(dev)) - vid_start = 1; - - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); - - /* Use our software cache for dumps, since we do not have any HW - * operation returning only the used/valid VLANs - */ - for (vid = vid_start; vid < dev->num_vlans; vid++) { - vl = &dev->vlans[vid]; - - if (!vl->valid) - continue; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untag & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} -EXPORT_SYMBOL(b53_vlan_dump); - /* Address Resolution Logic routines */ static int b53_arl_op_wait(struct b53_device *dev) { @@ -1213,9 +1170,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return b53_arl_rw_op(dev, 0); } -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; @@ -1225,27 +1181,16 @@ int b53_fdb_prepare(struct dsa_switch *ds, int port, if (is5325(priv) || is5365(priv)) return -EOPNOTSUPP; - return 0; -} -EXPORT_SYMBOL(b53_fdb_prepare); - -void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct b53_device *priv = ds->priv; - - if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) - pr_err("%s: failed to add MAC address\n", __func__); + return b53_arl_op(priv, 0, port, addr, vid, true); } EXPORT_SYMBOL(b53_fdb_add); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; - return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); + return b53_arl_op(priv, 0, port, addr, vid, false); } EXPORT_SYMBOL(b53_fdb_del); @@ -1282,8 +1227,7 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx, } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { if (!ent->is_valid) return 0; @@ -1291,16 +1235,11 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, if (port != ent->port) return 0; - ether_addr_copy(fdb->addr, ent->mac); - fdb->vid = ent->vid; - fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; - - return cb(&fdb->obj); + return cb(ent->mac, ent->vid, ent->is_static, data); } int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; @@ -1318,13 +1257,13 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, return ret; b53_arl_search_rd(priv, 0, &results[0]); - ret = b53_fdb_copy(port, &results[0], fdb, cb); + ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) return ret; if (priv->num_arl_entries > 2) { b53_arl_search_rd(priv, 1, &results[1]); - ret = b53_fdb_copy(port, &results[1], fdb, cb); + ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) return ret; @@ -1564,8 +1503,6 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 155a9c48c317..01bd8cbe9a3f 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -393,20 +393,12 @@ void b53_vlan_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb); -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); -void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb); + const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb); + dsa_fdb_dump_cb_t *cb, void *data); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void b53_mirror_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9b6ce7c3f6c3..d7b53d53c116 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -103,6 +103,7 @@ static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int i; u32 reg, offset; if (priv->type == BCM7445_DEVICE_ID) @@ -129,6 +130,14 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) reg |= MII_DUMB_FWDG_EN; core_writel(priv, reg, CORE_SWITCH_CTRL); + /* Configure Traffic Class to QoS mapping, allow each priority to map + * to a different queue number + */ + reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); + for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) + reg |= i << (PRT_TO_QID_SHIFT * i); + core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); + bcm_sf2_brcm_hdr_setup(priv, port); /* Force link status for IMP port */ @@ -244,7 +253,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, * to a different queue number */ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); - for (i = 0; i < 8; i++) + for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) reg |= i << (PRT_TO_QID_SHIFT * i); core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); @@ -327,12 +336,8 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - ret = phy_init_eee(phy, 0); if (ret) return 0; @@ -342,8 +347,8 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, return 1; } -static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; @@ -356,22 +361,14 @@ static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, - struct ethtool_eee *e) +static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; - - if (!p->eee_enabled) { - bcm_sf2_eee_enable_set(ds, port, false); - } else { - p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); - if (!p->eee_enabled) - return -EOPNOTSUPP; - } + bcm_sf2_eee_enable_set(ds, port, e->eee_enabled); return 0; } @@ -800,7 +797,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; @@ -823,7 +820,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst->cpu_dp->index; struct ethtool_wolinfo pwol; @@ -995,7 +992,7 @@ static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, return 0; } -static struct b53_io_ops bcm_sf2_io_ops = { +static const struct b53_io_ops bcm_sf2_io_ops = { .read8 = bcm_sf2_core_read8, .read16 = bcm_sf2_core_read16, .read32 = bcm_sf2_core_read32, @@ -1023,8 +1020,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, + .get_mac_eee = bcm_sf2_sw_get_mac_eee, + .set_mac_eee = bcm_sf2_sw_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, @@ -1033,8 +1030,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, @@ -1165,6 +1160,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) ds = dev->ds; ds->ops = &bcm_sf2_ops; + /* Advertise the 8 egress queues */ + ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; + dev_set_drvdata(&pdev->dev, priv); spin_lock_init(&priv->indir_lock); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 7f9125eef3df..02c499f9c56b 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -131,12 +131,12 @@ static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off) #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ { \ - return __raw_readl(priv->name + off); \ + return readl_relaxed(priv->name + off); \ } \ static inline void name##_writel(struct bcm_sf2_priv *priv, \ u32 val, u32 off) \ { \ - __raw_writel(val, priv->name + off); \ + writel_relaxed(val, priv->name + off); \ } \ /* Accesses to 64-bits register requires us to latch the hi/lo pairs @@ -180,23 +180,23 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off) { u32 tmp = bcm_sf2_mangle_addr(priv, off); - return __raw_readl(priv->core + tmp); + return readl_relaxed(priv->core + tmp); } static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off) { u32 tmp = bcm_sf2_mangle_addr(priv, off); - __raw_writel(val, priv->core + tmp); + writel_relaxed(val, priv->core + tmp); } static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off) { - return __raw_readl(priv->reg + priv->reg_offsets[off]); + return readl_relaxed(priv->reg + priv->reg_offsets[off]); } static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off) { - __raw_writel(val, priv->reg + priv->reg_offsets[off]); + writel_relaxed(val, priv->reg + priv->reg_offsets[off]); } SF2_IO64_MACRO(core); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 26052450091e..49695fcc2ea8 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -401,4 +401,7 @@ enum bcm_sf2_reg_offs { #define CFP_NUM_RULES 256 +/* Number of egress queues per port */ +#define SF2_NUM_EGRESS_QUEUES 8 + #endif /* __BCM_SF2_REGS_H */ diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index fdd8f3872102..d55051abf4ed 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -257,44 +257,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_loop_priv *ps = ds->priv; - struct mii_bus *bus = ps->bus; - struct dsa_loop_vlan *vl; - u16 vid, vid_start = 0; - int err = 0; - - dev_dbg(ds->dev, "%s\n", __func__); - - /* Just do a sleeping operation to make lockdep checks effective */ - mdiobus_read(bus, ps->port_base + port, MII_BMSR); - - for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) { - vl = &ps->vlans[vid]; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untagged & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (ps->pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - -static struct dsa_switch_ops dsa_loop_driver = { +static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, .get_strings = dsa_loop_get_strings, @@ -310,7 +273,6 @@ static struct dsa_switch_ops dsa_loop_driver = { .port_vlan_prepare = dsa_loop_port_vlan_prepare, .port_vlan_add = dsa_loop_port_vlan_add, .port_vlan_del = dsa_loop_port_vlan_del, - .port_vlan_dump = dsa_loop_port_vlan_dump, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) @@ -390,7 +352,7 @@ static void __exit dsa_loop_exit(void) mdio_driver_unregister(&dsa_loop_drv); for (i = 0; i < NUM_FIXED_PHYS; i++) - if (phydevs[i]) + if (!IS_ERR(phydevs[i])) fixed_phy_unregister(phydevs[i]); } module_exit(dsa_loop_exit); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cd76e61f1fca..b471413d3df9 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -20,6 +20,11 @@ #include "lan9303.h" +#define LAN9303_NUM_PORTS 3 + +/* 13.2 System Control and Status Registers + * Multiply register number by 4 to get address offset. + */ #define LAN9303_CHIP_REV 0x14 # define LAN9303_CHIP_ID 0x9303 #define LAN9303_IRQ_CFG 0x15 @@ -53,6 +58,9 @@ #define LAN9303_VIRT_PHY_BASE 0x70 #define LAN9303_VIRT_SPECIAL_CTRL 0x77 +/*13.4 Switch Fabric Control and Status Registers + * Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA. + */ #define LAN9303_SW_DEV_ID 0x0000 #define LAN9303_SW_RESET 0x0001 #define LAN9303_SW_RESET_RESET BIT(0) @@ -153,9 +161,7 @@ # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8)) # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0)) -#define LAN9303_PORT_0_OFFSET 0x400 -#define LAN9303_PORT_1_OFFSET 0x800 -#define LAN9303_PORT_2_OFFSET 0xc00 +#define LAN9303_SWITCH_PORT_REG(port, reg0) (0x400 * (port) + (reg0)) /* the built-in PHYs are of type LAN911X */ #define MII_LAN911X_SPECIAL_MODES 0x12 @@ -242,7 +248,7 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val) return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val); } -static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) +static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip) { int ret, i; u32 reg; @@ -262,7 +268,7 @@ static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) return -EIO; } -static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) +static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum) { int ret; u32 val; @@ -272,7 +278,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -281,7 +287,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) if (ret) goto on_error; - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -299,8 +305,8 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) return ret; } -static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, - unsigned int val) +static int lan9303_indirect_phy_write(struct lan9303 *chip, int addr, + int regnum, u16 val) { int ret; u32 reg; @@ -311,7 +317,7 @@ static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -328,6 +334,12 @@ static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, return ret; } +const struct lan9303_phy_ops lan9303_indirect_phy_ops = { + .phy_read = lan9303_indirect_phy_read, + .phy_write = lan9303_indirect_phy_write, +}; +EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops); + static int lan9303_switch_wait_for_completion(struct lan9303 *chip) { int ret, i; @@ -416,6 +428,20 @@ static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val) return ret; } +static int lan9303_write_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 val) +{ + return lan9303_write_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + +static int lan9303_read_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 *val) +{ + return lan9303_read_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; @@ -427,14 +453,15 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0 * and the IDs are 0-1-2, else it contains something different from * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. + * 0xffff is returned on MDIO read with no response. */ - reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES); + reg = chip->ops->phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES); if (reg < 0) { dev_err(chip->dev, "Failed to detect phy config: %d\n", reg); return reg; } - if (reg != 0) + if ((reg != 0) && (reg != 0xffff)) chip->phy_addr_sel_strap = 1; else chip->phy_addr_sel_strap = 0; @@ -445,40 +472,37 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return 0; } -#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET) -#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET) - -static int lan9303_disable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_disable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; /* disable RX, but keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); if (ret) return ret; /* disable TX, but keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE); } -static int lan9303_enable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_enable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; /* enable RX and keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | - LAN9303_MAC_RX_CFG_X_RX_ENABLE); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | + LAN9303_MAC_RX_CFG_X_RX_ENABLE); if (ret) return ret; /* enable TX and keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE | LAN9303_MAC_TX_CFG_X_TX_ENABLE); @@ -543,15 +567,16 @@ static int lan9303_handle_reset(struct lan9303 *chip) /* stop processing packets for all ports */ static int lan9303_disable_processing(struct lan9303 *chip) { - int ret; + int p; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET); - if (ret) - return ret; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - if (ret) - return ret; - return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); + for (p = 0; p < LAN9303_NUM_PORTS; p++) { + int ret = lan9303_disable_processing_port(chip, p); + + if (ret) + return ret; + } + + return 0; } static int lan9303_check_device(struct lan9303 *chip) @@ -621,7 +646,7 @@ static int lan9303_setup(struct dsa_switch *ds) if (ret) dev_err(chip->dev, "failed to separate ports %d\n", ret); - ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + ret = lan9303_enable_processing_port(chip, 0); if (ret) dev_err(chip->dev, "failed to re-enable switching %d\n", ret); @@ -687,19 +712,18 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct lan9303 *chip = ds->priv; - u32 reg; - unsigned int u, poff; - int ret; - - poff = port * 0x400; + unsigned int u; for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { - ret = lan9303_read_switch_reg(chip, - lan9303_mib[u].offset + poff, - ®); + u32 reg; + int ret; + + ret = lan9303_read_switch_port( + chip, port, lan9303_mib[u].offset, ®); + if (ret) - dev_warn(chip->dev, "Reading status reg %u failed\n", - lan9303_mib[u].offset + poff); + dev_warn(chip->dev, "Reading status port %d reg %u failed\n", + port, lan9303_mib[u].offset); data[u] = reg; } } @@ -719,7 +743,7 @@ static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) if (phy > phy_base + 2) return -ENODEV; - return lan9303_port_phy_reg_read(chip, phy, regnum); + return chip->ops->phy_read(chip, phy, regnum); } static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, @@ -733,7 +757,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, if (phy > phy_base + 2) return -ENODEV; - return lan9303_phy_reg_write(chip, phy, regnum, val); + return chip->ops->phy_write(chip, phy, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -744,11 +768,8 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, /* enable internal packet processing */ switch (port) { case 1: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_1_OFFSET); case 2: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_2_OFFSET); + return lan9303_enable_processing_port(chip, port); default: dev_dbg(chip->dev, "Error: request to power up invalid port %d\n", port); @@ -765,14 +786,10 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, /* disable internal packet processing */ switch (port) { case 1: - lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1, - MII_BMCR, BMCR_PDOWN); - break; case 2: - lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2, - MII_BMCR, BMCR_PDOWN); + lan9303_disable_processing_port(chip, port); + lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, + MII_BMCR, BMCR_PDOWN); break; default: dev_dbg(chip->dev, @@ -780,7 +797,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, } } -static struct dsa_switch_ops lan9303_switch_ops = { +static const struct dsa_switch_ops lan9303_switch_ops = { .get_tag_protocol = lan9303_get_tag_protocol, .setup = lan9303_setup, .get_strings = lan9303_get_strings, @@ -794,7 +811,7 @@ static struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { - chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS); + chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); if (!chip->ds) return -ENOMEM; diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h index d1512dad2d90..4d8be555ff4d 100644 --- a/drivers/net/dsa/lan9303.h +++ b/drivers/net/dsa/lan9303.h @@ -2,6 +2,15 @@ #include #include +struct lan9303; + +struct lan9303_phy_ops { + /* PHY 1 and 2 access*/ + int (*phy_read)(struct lan9303 *chip, int port, int regnum); + int (*phy_write)(struct lan9303 *chip, int port, + int regnum, u16 val); +}; + struct lan9303 { struct device *dev; struct regmap *regmap; @@ -11,9 +20,11 @@ struct lan9303 { bool phy_addr_sel_strap; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + const struct lan9303_phy_ops *ops; }; extern const struct regmap_access_table lan9303_register_set; +extern const struct lan9303_phy_ops lan9303_indirect_phy_ops; int lan9303_probe(struct lan9303 *chip, struct device_node *np); int lan9303_remove(struct lan9303 *chip); diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c index ab3ce0da5071..24ec20f7f444 100644 --- a/drivers/net/dsa/lan9303_i2c.c +++ b/drivers/net/dsa/lan9303_i2c.c @@ -63,6 +63,8 @@ static int lan9303_i2c_probe(struct i2c_client *client, i2c_set_clientdata(client, sw_dev); sw_dev->chip.dev = &client->dev; + sw_dev->chip.ops = &lan9303_indirect_phy_ops; + ret = lan9303_probe(&sw_dev->chip, client->dev.of_node); if (ret != 0) return ret; diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index 93c36c0541cf..fc16668a487f 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -40,6 +40,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); @@ -57,6 +58,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); *val = lan9303_mdio_real_read(sw_dev->device, reg); *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); @@ -65,6 +67,25 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) return 0; } +int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg, u16 val) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val); +} + +int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_read_nested(sw_dev->device->bus, phy, reg); +} + +static const struct lan9303_phy_ops lan9303_mdio_phy_ops = { + .phy_read = lan9303_mdio_phy_read, + .phy_write = lan9303_mdio_phy_write, +}; + static const struct regmap_config lan9303_mdio_regmap_config = { .reg_bits = 8, .val_bits = 32, @@ -106,6 +127,8 @@ static int lan9303_mdio_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, sw_dev); sw_dev->chip.dev = &mdiodev->dev; + sw_dev->chip.ops = &lan9303_mdio_phy_ops; + ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node); if (ret != 0) return ret; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b313ecdf2919..56cd6d365352 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -638,55 +638,6 @@ static int ksz_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct ksz_device *dev = ds->priv; - u16 vid; - u16 data; - struct vlan_table *vlan_cache; - int err = 0; - - mutex_lock(&dev->vlan_mutex); - - /* use dev->vlan_cache due to lack of searching valid vlan entry */ - for (vid = vlan->vid_begin; vid < dev->num_vlans; vid++) { - vlan_cache = &dev->vlan_cache[vid]; - - if (!(vlan_cache->table[0] & VLAN_VALID)) - continue; - - vlan->vid_begin = vid; - vlan->vid_end = vid; - vlan->flags = 0; - if (vlan_cache->table[2] & BIT(port)) { - if (vlan_cache->table[1] & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &data); - if (vid == (data & 0xFFFFF)) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - } - - mutex_unlock(&dev->vlan_mutex); - - return err; -} - -static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* nothing needed */ - - return 0; -} - struct alu_struct { /* entry 1 */ u8 is_static:1; @@ -706,30 +657,31 @@ struct alu_struct { u8 mac[ETH_ALEN]; }; -static void ksz_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static int ksz_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; u32 data; + int ret = 0; mutex_lock(&dev->alu_mutex); /* find any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) { + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { dev_dbg(dev->dev, "Failed to read ALU\n"); goto exit; } @@ -740,27 +692,30 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, /* update ALU entry */ alu_table[0] = ALU_V_STATIC_VALID; alu_table[1] |= BIT(port); - if (fdb->vid) + if (vid) alu_table[1] |= ALU_V_USE_FID; - alu_table[2] = (fdb->vid << ALU_V_FID_S); - alu_table[2] |= ((fdb->addr[0] << 8) | fdb->addr[1]); - alu_table[3] = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - alu_table[3] |= ((fdb->addr[4] << 8) | fdb->addr[5]); + alu_table[2] = (vid << ALU_V_FID_S); + alu_table[2] |= ((addr[0] << 8) | addr[1]); + alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); + alu_table[3] |= ((addr[4] << 8) | addr[5]); write_table(ds, alu_table); ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) - dev_dbg(dev->dev, "Failed to read ALU\n"); + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); exit: mutex_unlock(&dev->alu_mutex); + + return ret; } static int ksz_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -770,12 +725,12 @@ static int ksz_port_fdb_del(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); /* read any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ @@ -850,12 +805,11 @@ static void convert_alu(struct alu_struct *alu, u32 *alu_table) } static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct ksz_device *dev = ds->priv; int ret = 0; - u32 data; + u32 ksz_data; u32 alu_table[4]; struct alu_struct alu; int timeout; @@ -868,8 +822,8 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, do { timeout = 1000; do { - ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); - if ((data & ALU_VALID) || !(data & ALU_START)) + ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); + if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) break; usleep_range(1, 10); } while (timeout-- > 0); @@ -886,18 +840,11 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, convert_alu(&alu, alu_table); if (alu.port_forward & BIT(port)) { - fdb->vid = alu.fid; - if (alu.is_static) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - ether_addr_copy(fdb->addr, alu.mac); - - ret = cb(&fdb->obj); + ret = cb(alu.mac, alu.fid, alu.is_static, data); if (ret) goto exit; } - } while (data & ALU_START); + } while (ksz_data & ALU_START); exit: @@ -1065,14 +1012,6 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port, return ret; } -static int ksz_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - /* this is not called by switch layer */ - return 0; -} - static int ksz_port_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress) @@ -1129,15 +1068,12 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_vlan_prepare = ksz_port_vlan_prepare, .port_vlan_add = ksz_port_vlan_add, .port_vlan_del = ksz_port_vlan_del, - .port_vlan_dump = ksz_port_vlan_dump, - .port_fdb_prepare = ksz_port_fdb_prepare, .port_fdb_dump = ksz_port_fdb_dump, .port_fdb_add = ksz_port_fdb_add, .port_fdb_del = ksz_port_fdb_del, .port_mdb_prepare = ksz_port_mdb_prepare, .port_mdb_add = ksz_port_mdb_add, .port_mdb_del = ksz_port_mdb_del, - .port_mdb_dump = ksz_port_mdb_dump, .port_mirror_add = ksz_port_mirror_add, .port_mirror_del = ksz_port_mirror_del, }; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 264b281eb86b..c142b97add2c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -839,49 +839,31 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, } static int -mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +mt7530_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; + u8 port_mask = BIT(port); - /* Because auto-learned entrie shares the same FDB table. - * an entry is reserved with no port_mask to make sure fdb_add - * is called while the entry is still available. - */ mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); return ret; } -static void -mt7530_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct mt7530_priv *priv = ds->priv; - u8 port_mask = BIT(port); - - mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT); - mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); - mutex_unlock(&priv->reg_mutex); -} - static int mt7530_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; u8 port_mask = BIT(port); mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); @@ -890,8 +872,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port, static int mt7530_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mt7530_priv *priv = ds->priv; struct mt7530_fdb _fdb = { 0 }; @@ -909,11 +890,8 @@ mt7530_port_fdb_dump(struct dsa_switch *ds, int port, if (rsp & ATC_SRCH_HIT) { mt7530_fdb_read(priv, &_fdb); if (_fdb.port_mask & BIT(port)) { - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - fdb->ndm_state = _fdb.noarp ? - NUD_NOARP : NUD_REACHABLE; - ret = cb(&fdb->obj); + ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, + data); if (ret < 0) break; } @@ -1039,7 +1017,7 @@ mt7530_setup(struct dsa_switch *ds) return 0; } -static struct dsa_switch_ops mt7530_switch_ops = { +static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt7530_setup, .get_strings = mt7530_get_strings, @@ -1053,7 +1031,6 @@ static struct dsa_switch_ops mt7530_switch_ops = { .port_stp_state_set = mt7530_stp_state_set, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, - .port_fdb_prepare = mt7530_port_fdb_prepare, .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5bcdd33101b0..d74c7335c512 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -307,7 +307,7 @@ static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d) mutex_unlock(&chip->reg_lock); } -static struct irq_chip mv88e6xxx_g1_irq_chip = { +static const struct irq_chip mv88e6xxx_g1_irq_chip = { .name = "mv88e6xxx-g1", .irq_mask = mv88e6xxx_g1_irq_mask, .irq_unmask = mv88e6xxx_g1_irq_unmask, @@ -810,63 +810,18 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; - int err; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); - if (err) - goto out; - - e->eee_enabled = !!(reg & 0x0200); - e->tx_lpi_enabled = !!(reg & 0x0100); - - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); - if (err) - goto out; - - e->eee_active = !!(reg & MV88E6352_PORT_STS_EEE); -out: - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } -static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e) +static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; - int err; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); - if (err) - goto out; - - reg &= ~0x0300; - if (e->eee_enabled) - reg |= 0x0200; - if (e->tx_lpi_enabled) - reg |= 0x0100; - - err = mv88e6xxx_phy_write(chip, port, 16, reg); -out: - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) @@ -926,6 +881,22 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->pot_clear) + return chip->info->ops->pot_clear(chip); + + return 0; +} + +static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->mgmt_rsvd2cpu) + return chip->info->ops->mgmt_rsvd2cpu(chip); + + return 0; +} + static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) { int err; @@ -1040,61 +1011,6 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } -static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry next = { - .vid = chip->info->max_vid, - }; - u16 pvid; - int err; - - if (!chip->info->max_vid) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_port_get_pvid(chip, port, &pvid); - if (err) - goto unlock; - - do { - err = mv88e6xxx_vtu_getnext(chip, &next); - if (err) - break; - - if (!next.valid) - break; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) - continue; - - /* reinit and dump this VLAN obj */ - vlan->vid_begin = next.vid; - vlan->vid_end = next.vid; - vlan->flags = 0; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - - if (next.vid == pvid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } while (next.vid < chip->info->max_vid); - -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); @@ -1184,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, }; int i, err; + /* DSA and CPU ports have to be members of multiple vlans */ + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + return 0; + if (!vid_begin) return -EOPNOTSUPP; @@ -1435,38 +1355,28 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); } -static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* We don't need any dynamic resource from the kernel (yet), - * so skip the prepare phase. - */ - return 0; -} - -static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct mv88e6xxx_chip *chip = ds->priv; - - mutex_lock(&chip->reg_lock); - if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, - MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)) - dev_err(ds->dev, "p%d: failed to load unicast MAC address\n", - port); - mutex_unlock(&chip->reg_lock); -} - -static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) +static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); mutex_unlock(&chip->reg_lock); @@ -1475,10 +1385,10 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_atu_entry addr; + bool is_static; int err; addr.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; @@ -1495,33 +1405,12 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, if (addr.trunk || (addr.portvec & BIT(port)) == 0) continue; - if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) { - struct switchdev_obj_port_fdb *fdb; + if (!is_unicast_ether_addr(addr.mac)) + continue; - if (!is_unicast_ether_addr(addr.mac)) - continue; - - fdb = SWITCHDEV_OBJ_PORT_FDB(obj); - fdb->vid = vid; - ether_addr_copy(fdb->addr, addr.mac); - if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - } else if (obj->id == SWITCHDEV_OBJ_ID_PORT_MDB) { - struct switchdev_obj_port_mdb *mdb; - - if (!is_multicast_ether_addr(addr.mac)) - continue; - - mdb = SWITCHDEV_OBJ_PORT_MDB(obj); - mdb->vid = vid; - ether_addr_copy(mdb->addr, addr.mac); - } else { - return -EOPNOTSUPP; - } - - err = cb(obj); + is_static = (addr.state == + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + err = cb(addr.mac, vid, is_static, data); if (err) return err; } while (!is_broadcast_ether_addr(addr.mac)); @@ -1530,8 +1419,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_vtu_entry vlan = { .vid = chip->info->max_vid, @@ -1544,7 +1432,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, obj, cb); + err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data); if (err) return err; @@ -1558,7 +1446,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, break; err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - obj, cb); + cb, data); if (err) return err; } while (vlan.vid < chip->info->max_vid); @@ -1567,14 +1455,13 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &fdb->obj, cb); + err = mv88e6xxx_port_db_dump(chip, port, cb, data); mutex_unlock(&chip->reg_lock); return err; @@ -2116,7 +2003,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; /* Setup Switch Global 2 Registers */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { err = mv88e6xxx_g2_setup(chip); if (err) goto unlock; @@ -2142,16 +2029,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; - /* Some generations have the configuration of sending reserved - * management frames to the CPU in global2, others in - * global1. Hence it does not fit the two setup functions - * above. - */ - if (chip->info->ops->mgmt_rsvd2cpu) { - err = chip->info->ops->mgmt_rsvd2cpu(chip); - if (err) - goto unlock; - } + err = mv88e6xxx_pot_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_rsvd2cpu_setup(chip); + if (err) + goto unlock; unlock: mutex_unlock(&chip->reg_lock); @@ -2236,7 +2120,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (np) { bus->name = np->full_name; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np); } else { bus->name = "mv88e6xxx SMI"; snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++); @@ -2385,7 +2269,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2408,7 +2293,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, .stats_get_stats = mv88e6095_stats_get_stats, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2441,7 +2326,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2467,7 +2353,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2496,7 +2383,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2533,6 +2420,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2563,7 +2451,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2587,7 +2476,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2619,7 +2509,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2653,7 +2544,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2686,7 +2578,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2720,7 +2613,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2746,7 +2640,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2782,6 +2676,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2816,6 +2711,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2850,6 +2746,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2884,7 +2781,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2920,6 +2818,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2952,14 +2851,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .stats_get_stats = mv88e6320_stats_get_stats, .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6321_ops = { - /* MV88E6XXX_FAMILY_6321 */ + /* MV88E6XXX_FAMILY_6320 */ .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3018,6 +2918,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3049,7 +2950,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3081,7 +2983,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3115,7 +3018,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3153,6 +3057,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -3190,6 +3095,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -3206,12 +3112,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6085_ops, }, @@ -3224,11 +3132,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6095, .ops = &mv88e6095_ops, }, @@ -3241,12 +3150,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6097_ops, }, @@ -3259,12 +3170,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6123_ops, }, @@ -3277,11 +3190,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6131_ops, }, @@ -3294,11 +3208,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6141_ops, }, @@ -3311,12 +3227,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6161_ops, }, @@ -3329,12 +3247,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6165_ops, }, @@ -3347,12 +3267,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6171_ops, }, @@ -3365,12 +3287,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6172_ops, }, @@ -3383,12 +3307,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6175_ops, }, @@ -3401,12 +3327,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6176_ops, }, @@ -3419,11 +3347,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6185_ops, }, @@ -3436,12 +3365,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .pvt = true, + .multi_chip = true, .atu_move_port_mask = 0x1f, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, }, @@ -3454,12 +3385,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190x_ops, }, @@ -3472,12 +3405,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6191_ops, }, @@ -3490,12 +3425,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6240_ops, }, @@ -3508,12 +3445,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6290_ops, }, @@ -3526,12 +3465,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6320_ops, }, @@ -3544,11 +3484,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6321_ops, }, @@ -3561,11 +3502,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6341_ops, }, @@ -3578,12 +3521,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6350_ops, }, @@ -3596,12 +3541,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6351_ops, }, @@ -3614,12 +3561,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6352_ops, }, [MV88E6390] = { @@ -3631,12 +3580,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390_ops, }, [MV88E6390X] = { @@ -3648,12 +3599,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390x_ops, }, }; @@ -3723,7 +3676,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, { if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) + else if (chip->info->multi_chip) chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; @@ -3828,20 +3781,6 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &mdb->obj, cb); - mutex_unlock(&chip->reg_lock); - - return err; -} - static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, .get_tag_protocol = mv88e6xxx_get_tag_protocol, @@ -3853,8 +3792,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .port_enable = mv88e6xxx_port_enable, .port_disable = mv88e6xxx_port_disable, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, + .get_mac_eee = mv88e6xxx_get_mac_eee, + .set_mac_eee = mv88e6xxx_set_mac_eee, .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, @@ -3869,15 +3808,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, .port_fdb_dump = mv88e6xxx_port_fdb_dump, .port_mdb_prepare = mv88e6xxx_port_mdb_prepare, .port_mdb_add = mv88e6xxx_port_mdb_add, .port_mdb_del = mv88e6xxx_port_mdb_del, - .port_mdb_dump = mv88e6xxx_port_mdb_dump, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, }; @@ -3971,7 +3907,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) { + if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); if (err) goto out_g1_irq; @@ -3991,7 +3927,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) out_mdio: mv88e6xxx_mdios_unregister(chip); out_g2_irq: - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) + if (chip->info->g2_irqs > 0 && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); out_g1_irq: if (chip->irq > 0) { @@ -4013,9 +3949,11 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) + if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); + mutex_lock(&chip->reg_lock); mv88e6xxx_g1_irq_free(chip); + mutex_unlock(&chip->reg_lock); } } diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 086444016352..334f6f7544ba 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -97,133 +97,6 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ }; -enum mv88e6xxx_cap { - /* Energy Efficient Ethernet. - */ - MV88E6XXX_CAP_EEE, - - /* Multi-chip Addressing Mode. - * Some chips respond to only 2 registers of its own SMI device address - * when it is non-zero, and use indirect access to internal registers. - */ - MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ - MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ - - /* Switch Global (1) Registers. - */ - MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */ - MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */ - - /* Switch Global 2 Registers. - * The device contains a second set of global 16-bit registers. - */ - MV88E6XXX_CAP_GLOBAL2, - MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */ - MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ - MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ - MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ - - /* Per VLAN Spanning Tree Unit (STU). - * The Port State database, if present, is accessed through VTU - * operations and dedicated SID registers. See MV88E6352_G1_VTU_SID. - */ - MV88E6XXX_CAP_STU, - - /* VLAN Table Unit. - * The VTU is used to program 802.1Q VLANs. See MV88E6XXX_G1_VTU_OP. - */ - MV88E6XXX_CAP_VTU, -}; - -/* Bitmask of capabilities */ -#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE) - -#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) -#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) - -#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID) - -#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) -#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT) -#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X) -#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X) -#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) - -/* Multi-chip Addressing Mode */ -#define MV88E6XXX_FLAGS_MULTI_CHIP \ - (MV88E6XXX_FLAG_SMI_CMD | \ - MV88E6XXX_FLAG_SMI_DATA) - -#define MV88E6XXX_FLAGS_FAMILY_6095 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6185 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6341 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6390 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - struct mv88e6xxx_ops; struct mv88e6xxx_info { @@ -235,11 +108,18 @@ struct mv88e6xxx_info { unsigned int max_vid; unsigned int port_base_addr; unsigned int global1_addr; + unsigned int global2_addr; unsigned int age_time_coeff; unsigned int g1_irqs; + unsigned int g2_irqs; bool pvt; + + /* Multi-chip Addressing Mode. + * Some chips respond to only 2 registers of its own SMI device address + * when it is non-zero, and use indirect access to internal registers. + */ + bool multi_chip; enum dsa_tag_protocol tag_protocol; - unsigned long long flags; /* Mask for FromPort and ToPort value of PortVec used in ATU Move * operation. 0 means that the ATU Move operation is not supported. @@ -359,6 +239,9 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); + /* Priority Override Table operations */ + int (*pot_clear)(struct mv88e6xxx_chip *chip); + /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); int (*ppu_disable)(struct mv88e6xxx_chip *chip); @@ -449,7 +332,6 @@ struct mv88e6xxx_ops { int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); const struct mv88e6xxx_irq_ops *watchdog_ops; - /* Can be either in g1 or g2, so don't use a prefix */ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ @@ -482,12 +364,6 @@ struct mv88e6xxx_hw_stat { int type; }; -static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, - unsigned long flags) -{ - return (chip->info->flags & flags) == flags; -} - static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip) { return chip->info->pvt; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 158d0f499874..af0727877825 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -22,48 +22,99 @@ static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) { - return mv88e6xxx_read(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) { - return mv88e6xxx_write(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) { - return mv88e6xxx_update(chip, MV88E6XXX_G2, reg, update); + return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update); } static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) { - return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask); + return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask); +} + +/* Offset 0x00: Interrupt Source Register */ + +static int mv88e6xxx_g2_int_source(struct mv88e6xxx_chip *chip, u16 *src) +{ + /* Read (and clear most of) the Interrupt Source bits */ + return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SRC, src); +} + +/* Offset 0x01: Interrupt Mask Register */ + +static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, mask); } /* Offset 0x02: Management Enable 2x */ + +static int mv88e6xxx_g2_mgmt_enable_2x(struct mv88e6xxx_chip *chip, u16 en2x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, en2x); +} + /* Offset 0x03: Management Enable 0x */ -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_g2_mgmt_enable_0x(struct mv88e6xxx_chip *chip, u16 en0x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, en0x); +} + +/* Offset 0x05: Switch Management Register */ + +static int mv88e6xxx_g2_switch_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip, + bool enable) +{ + u16 val; + int err; + + err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SWITCH_MGMT, &val); + if (err) + return err; + + if (enable) + val |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + else + val &= ~MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, val); +} + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + int err; + + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:0x as MGMT. + */ + err = mv88e6xxx_g2_mgmt_enable_0x(chip, 0xffff); + if (err) + return err; + + return mv88e6xxx_g2_switch_mgmt_rsvd2cpu(chip, true); +} + +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { int err; /* Consider the frames with reserved multicast destination * addresses matching 01:80:c2:00:00:2x as MGMT. */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) { - err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, 0xffff); - if (err) - return err; - } + err = mv88e6xxx_g2_mgmt_enable_2x(chip, 0xffff); + if (err) + return err; - /* Consider the frames with reserved multicast destination - * addresses matching 01:80:c2:00:00:0x as MGMT. - */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) - return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, - 0xffff); - - return 0; + return mv88e6185_g2_mgmt_rsvd2cpu(chip); } /* Offset 0x06: Device Mapping Table register */ @@ -260,7 +311,7 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val); } -static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) { int i, err; @@ -933,7 +984,7 @@ static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id) u16 reg; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SOURCE, ®); + err = mv88e6xxx_g2_int_source(chip, ®); mutex_unlock(&chip->reg_lock); if (err) goto out; @@ -959,13 +1010,16 @@ static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d) static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); + int err; - mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, ~chip->g2_irq.masked); + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked); + if (err) + dev_err(chip->dev, "failed to mask interrupts\n"); mutex_unlock(&chip->reg_lock); } -static struct irq_chip mv88e6xxx_g2_irq_chip = { +static const struct irq_chip mv88e6xxx_g2_irq_chip = { .name = "mv88e6xxx-g2", .irq_mask = mv88e6xxx_g2_irq_mask, .irq_unmask = mv88e6xxx_g2_irq_unmask, @@ -1063,9 +1117,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) * port at the highest priority. */ reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4); - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) || - mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) - reg |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU | 0x7; err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg); if (err) return err; @@ -1080,12 +1131,5 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) if (err) return err; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { - /* Clear the priority override table. */ - err = mv88e6xxx_g2_clear_pot(chip); - if (err) - return err; - } - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 317ffd8f323d..669f59017b12 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -17,14 +17,27 @@ #include "chip.h" -#define MV88E6XXX_G2 0x1c - /* Offset 0x00: Interrupt Source Register */ -#define MV88E6XXX_G2_INT_SOURCE 0x00 +#define MV88E6XXX_G2_INT_SRC 0x00 +#define MV88E6XXX_G2_INT_SRC_WDOG 0x8000 +#define MV88E6XXX_G2_INT_SRC_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_SRC_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_SRC_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_SRC_SERDES 0x0800 +#define MV88E6352_G2_INT_SRC_PHY 0x001f +#define MV88E6390_G2_INT_SRC_PHY 0x07fe + #define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15 /* Offset 0x01: Interrupt Mask Register */ -#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK_WDOG 0x8000 +#define MV88E6XXX_G2_INT_MASK_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_MASK_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_MASK_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_MASK_SERDES 0x0800 +#define MV88E6352_G2_INT_MASK_PHY 0x001f +#define MV88E6390_G2_INT_MASK_PHY 0x07fe /* Offset 0x02: MGMT Enable Register 2x */ #define MV88E6XXX_G2_MGMT_EN_2X 0x02 @@ -245,7 +258,11 @@ int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); + +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -254,7 +271,7 @@ extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n"); return -EOPNOTSUPP; } @@ -347,7 +364,17 @@ static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip) { } -static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; } diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 3500ac0ea848..436668bd50dc 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -13,7 +13,6 @@ #include #include -#include #include "chip.h" #include "phy.h" diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 8f3991bf1851..b16d5f0e6e9c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -216,9 +216,6 @@ /* Offset 0x13: OutFiltered Counter */ #define MV88E6XXX_PORT_OUT_FILTERED 0x13 -/* Offset 0x16: LED Control */ -#define MV88E6XXX_PORT_LED_CONTROL 0x16 - /* Offset 0x18: IEEE Priority Mapping Table */ #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18 #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000 diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b3bee7eab45f..5ada7a41449c 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -250,7 +250,7 @@ static const struct regmap_range qca8k_readable_ranges[] = { }; -static struct regmap_access_table qca8k_readable_table = { +static const struct regmap_access_table qca8k_readable_table = { .yes_ranges = qca8k_readable_ranges, .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), }; @@ -637,8 +637,8 @@ qca8k_get_sset_count(struct dsa_switch *ds) return ARRAY_SIZE(ar8327_mib); } -static void -qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) +static int +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); @@ -646,73 +646,21 @@ qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) mutex_lock(&priv->reg_mutex); reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); - if (enable) + if (eee->eee_enabled) reg |= lpi_en; else reg &= ~lpi_en; qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); mutex_unlock(&priv->reg_mutex); -} - -static int -qca8k_eee_init(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - int ret; - - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - - ret = phy_init_eee(phy, 0); - if (ret) - return ret; - - qca8k_eee_enable_set(ds, port, true); return 0; } static int -qca8k_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, - struct ethtool_eee *e) +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - int ret = 0; - - p->eee_enabled = e->eee_enabled; - - if (e->eee_enabled) { - p->eee_enabled = qca8k_eee_init(ds, port, phydev); - if (!p->eee_enabled) - ret = -EOPNOTSUPP; - } - qca8k_eee_enable_set(ds, port, p->eee_enabled); - - return ret; -} - -static int -qca8k_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - struct net_device *netdev = ds->ports[port].netdev; - int ret; - - ret = phy_ethtool_get_eee(netdev->phydev, p); - if (!ret) - e->eee_active = - !!(p->supported & p->advertised & p->lp_advertised); - else - e->eee_active = 0; - - e->eee_enabled = p->eee_enabled; - - return ret; + /* Nothing to do on the port's MAC */ + return 0; } static void @@ -829,69 +777,44 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, } static int -qca8k_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - /* The FDB table for static and auto learned entries is the same. We - * need to reserve an entry with no port_mask set to make sure that - * when port_fdb_add is called an entry is still available. Otherwise - * the last free entry might have been used up by auto learning - */ - return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid); -} - -static void qca8k_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - /* Update the FDB entry adding the port_mask */ - qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid); + return qca8k_port_fdb_insert(priv, addr, port_mask, vid); } static int qca8k_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - u16 vid = fdb->vid; if (!vid) vid = 1; - return qca8k_fdb_del(priv, fdb->addr, port_mask, vid); + return qca8k_fdb_del(priv, addr, port_mask, vid); } static int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; struct qca8k_fdb _fdb = { 0 }; int cnt = QCA8K_NUM_FDB_RECORDS; + bool is_static; int ret = 0; mutex_lock(&priv->reg_mutex); while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { if (!_fdb.aging) break; - - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - if (_fdb.aging == QCA8K_ATU_STATUS_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - - ret = cb(&fdb->obj); + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); + ret = cb(_fdb.mac, _fdb.vid, is_static, data); if (ret) break; } @@ -914,14 +837,13 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, - .get_eee = qca8k_get_eee, - .set_eee = qca8k_set_eee, + .get_mac_eee = qca8k_get_mac_eee, + .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, .port_disable = qca8k_port_disable, .port_stp_state_set = qca8k_port_stp_state_set, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, - .port_fdb_prepare = qca8k_port_fdb_prepare, .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1ed4fac6cd6d..1cf8a920d4ff 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -156,7 +156,6 @@ enum qca8k_fdb_cmd { }; struct ar8xxx_port_status { - struct ethtool_eee eee; int enabled; }; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d0c165d2086e..d0a1f9ce3168 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -345,7 +345,7 @@ static void dummy_setup(struct net_device *dev) dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; - dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO; + dev->features |= NETIF_F_ALL_TSO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index f66c9710cb81..b223769d6a5e 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -392,7 +392,7 @@ static struct isa_driver el3_isa_driver = { static int isa_registered; #ifdef CONFIG_PNP -static struct pnp_device_id el3_pnp_ids[] = { +static const struct pnp_device_id el3_pnp_ids[] = { { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ { .id = "TCM5091" }, /* 3Com Etherlink III */ { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ @@ -474,7 +474,7 @@ static int pnp_registered; #endif /* CONFIG_PNP */ #ifdef CONFIG_EISA -static struct eisa_device_id el3_eisa_ids[] = { +static const struct eisa_device_id el3_eisa_ids[] = { { "TCM5090" }, { "TCM5091" }, { "TCM5092" }, diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3b516ebeeddb..402d9090ad29 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -900,7 +900,7 @@ static const struct dev_pm_ops vortex_pm_ops = { #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA -static struct eisa_device_id vortex_eisa_ids[] = { +static const struct eisa_device_id vortex_eisa_ids[] = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 05d9d3e2e92e..245554707163 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -585,7 +585,7 @@ static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) return reg_memr & AX_MEMR_MDI ? 1 : 0; } -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ax_bb_mdc, .set_mdio_dir = ax_bb_dir, diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index edae15ac0e98..c60421339a98 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -78,6 +78,7 @@ source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/hp/Kconfig" +source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index bf7f4502cabc..a0a03d4d939a 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ obj-$(CONFIG_NET_VENDOR_HP) += hp/ +obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a9ac58c351a0..a251de8d9a91 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -986,7 +986,7 @@ static int bfin_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info bfin_ptp_caps = { +static const struct ptp_clock_info bfin_ptp_caps = { .owner = THIS_MODULE, .name = "BF518 clock", .max_adj = 0, diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index ee4b94e3cda9..e22f976a0d18 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -643,7 +643,7 @@ static int a2065_init_one(struct zorro_dev *z, static void a2065_remove_one(struct zorro_dev *z); -static struct zorro_device_id a2065_zorro_tbl[] = { +static const struct zorro_device_id a2065_zorro_tbl[] = { { ZORRO_PROD_CBM_A2065_1 }, { ZORRO_PROD_CBM_A2065_2 }, { ZORRO_PROD_AMERISTAR_A2065 }, diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 5fd7b15b0574..4b6a5cb85dd2 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -692,7 +692,7 @@ static void ariadne_remove_one(struct zorro_dev *z) free_netdev(dev); } -static struct zorro_device_id ariadne_zorro_tbl[] = { +static const struct zorro_device_id ariadne_zorro_tbl[] = { { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, { 0 } }; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index a3c90fe5de00..73ca8879ada7 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1180,9 +1180,10 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); + aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0, + DMA_ATTR_NON_CONSISTENT); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; @@ -1361,8 +1362,9 @@ static int au1000_probe(struct platform_device *pdev) err_remap2: iounmap(aup->mac); err_remap1: - dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr, + DMA_ATTR_NON_CONSISTENT); err_vaddr: free_netdev(dev); err_alloc: @@ -1394,9 +1396,9 @@ static int au1000_remove(struct platform_device *pdev) if (aup->tx_db_inuse[i]) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); - dma_free_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr, + DMA_ATTR_NON_CONSISTENT); iounmap(aup->macdma); iounmap(aup->mac); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 9795419aac2d..7ea72ef11a55 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -210,11 +210,15 @@ #define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_SPH_INDEX 24 #define DMA_CH_CR_SPH_WIDTH 1 -#define DMA_CH_IER_AIE_INDEX 15 +#define DMA_CH_IER_AIE20_INDEX 15 +#define DMA_CH_IER_AIE20_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 14 #define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_FBEE_INDEX 12 #define DMA_CH_IER_FBEE_WIDTH 1 -#define DMA_CH_IER_NIE_INDEX 16 +#define DMA_CH_IER_NIE20_INDEX 16 +#define DMA_CH_IER_NIE20_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 15 #define DMA_CH_IER_NIE_WIDTH 1 #define DMA_CH_IER_RBUE_INDEX 7 #define DMA_CH_IER_RBUE_WIDTH 1 @@ -298,6 +302,7 @@ #define MAC_RWKPFR 0x00c4 #define MAC_LPICSR 0x00d0 #define MAC_LPITCR 0x00d4 +#define MAC_TIR 0x00e0 #define MAC_VR 0x0110 #define MAC_DR 0x0114 #define MAC_HWF0R 0x011c @@ -364,6 +369,8 @@ #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF0R_VXN_INDEX 29 +#define MAC_HWF0R_VXN_WIDTH 1 #define MAC_HWF1R_ADDR64_INDEX 14 #define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 @@ -448,6 +455,8 @@ #define MAC_PFR_PR_WIDTH 1 #define MAC_PFR_VTFE_INDEX 16 #define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PFR_VUCC_INDEX 22 +#define MAC_PFR_VUCC_WIDTH 1 #define MAC_PMTCSR_MGKPKTEN_INDEX 1 #define MAC_PMTCSR_MGKPKTEN_WIDTH 1 #define MAC_PMTCSR_PWRDWN_INDEX 0 @@ -510,6 +519,12 @@ #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_TCR_VNE_INDEX 24 +#define MAC_TCR_VNE_WIDTH 1 +#define MAC_TCR_VNM_INDEX 25 +#define MAC_TCR_VNM_WIDTH 1 +#define MAC_TIR_TNID_INDEX 0 +#define MAC_TIR_TNID_WIDTH 16 #define MAC_TSCR_AV8021ASMEN_INDEX 28 #define MAC_TSCR_AV8021ASMEN_WIDTH 1 #define MAC_TSCR_SNAPTYPSEL_INDEX 16 @@ -1153,11 +1168,17 @@ #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8 +#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC2_HL_INDEX 0 #define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC2_TNP_INDEX 11 +#define RX_NORMAL_DESC2_TNP_WIDTH 1 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 @@ -1184,9 +1205,11 @@ #define RX_DESC3_L34T_IPV4_TCP 1 #define RX_DESC3_L34T_IPV4_UDP 2 #define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV4_UNKNOWN 7 #define RX_DESC3_L34T_IPV6_TCP 9 #define RX_DESC3_L34T_IPV6_UDP 10 #define RX_DESC3_L34T_IPV6_ICMP 11 +#define RX_DESC3_L34T_IPV6_UNKNOWN 15 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 @@ -1201,6 +1224,8 @@ #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4 +#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1 #define TX_CONTEXT_DESC2_MSS_INDEX 0 #define TX_CONTEXT_DESC2_MSS_WIDTH 15 @@ -1241,8 +1266,11 @@ #define TX_NORMAL_DESC3_TCPPL_WIDTH 18 #define TX_NORMAL_DESC3_TSE_INDEX 18 #define TX_NORMAL_DESC3_TSE_WIDTH 1 +#define TX_NORMAL_DESC3_VNP_INDEX 23 +#define TX_NORMAL_DESC3_VNP_WIDTH 3 #define TX_NORMAL_DESC2_VLAN_INSERT 0x2 +#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3 /* MDIO undefined or vendor specific registers */ #ifndef MDIO_PMA_10GBR_PMD_CTRL @@ -1339,6 +1367,7 @@ #define XGBE_AN_CL37_PCS_MODE_BASEX 0x00 #define XGBE_AN_CL37_PCS_MODE_SGMII 0x04 #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 +#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 /* Bit setting and getting macros * The get macro will extract the current bit field value from within diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7546b660d6b5..7d128be61310 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -527,3 +527,28 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) debugfs_remove_recursive(pdata->xgbe_debugfs); pdata->xgbe_debugfs = NULL; } + +void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) +{ + struct dentry *pfile; + char *buf; + + if (!pdata->xgbe_debugfs) + return; + + buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + + if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf)) + goto out; + + pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent, + pdata->xgbe_debugfs, + pdata->xgbe_debugfs->d_parent, buf); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_rename failed\n"); + +out: + kfree(buf); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 06f953e1e9b2..e107e180e2c8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -479,6 +479,50 @@ static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, return false; } +static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) +{ + /* Program the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", + pdata->vxlan_port); +} + +static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Program the VXLAN port */ + xgbe_set_vxlan_id(pdata); + + /* Allow for IPv6/UDP zero-checksum VXLAN packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); + + /* Enable VXLAN tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); +} + +static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Disable tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); + + /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); + + /* Clear the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; @@ -605,32 +649,38 @@ static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; - unsigned int dma_ch_isr, dma_ch_ier; - unsigned int i; + unsigned int i, ver; /* Set the interrupt mode if supported */ if (pdata->channel_irq_mode) XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, pdata->channel_irq_mode); + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel[i]; /* Clear all the interrupts which are set */ - dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ - dma_ch_ier = 0; + channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + if (ver < 0x21) { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); + } else { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); + } + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts @@ -639,7 +689,8 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * mode) */ if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts @@ -648,12 +699,13 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * per channel interrupts in edge triggered * mode) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, RIE, 1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } @@ -1608,7 +1660,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; - unsigned int csum, tso, vlan; + unsigned int tx_packets, tx_bytes; + unsigned int csum, tso, vlan, vxlan; unsigned int tso_context, vlan_context; unsigned int tx_set_ic; int start_index = ring->cur; @@ -1617,12 +1670,17 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) DBGPR("-->xgbe_dev_xmit\n"); + tx_packets = packet->tx_packets; + tx_bytes = packet->tx_bytes; + csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE); tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, TSO_ENABLE); vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VLAN_CTAG); + vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VXLAN); if (tso && (packet->mss != ring->tx.cur_mss)) tso_context = 1; @@ -1644,13 +1702,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ - ring->coalesce_count += packet->tx_packets; + ring->coalesce_count += tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; - else if (packet->tx_packets > pdata->tx_frames) + else if (tx_packets > pdata->tx_frames) tx_set_ic = 1; - else if ((ring->coalesce_count % pdata->tx_frames) < - packet->tx_packets) + else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) tx_set_ic = 1; else tx_set_ic = 0; @@ -1740,7 +1797,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, packet->tcp_header_len / 4); - pdata->ext_stats.tx_tso_packets++; + pdata->ext_stats.tx_tso_packets += tx_packets; } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); @@ -1755,6 +1812,13 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->length); } + if (vxlan) { + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, + TX_NORMAL_DESC3_VXLAN_PACKET); + + pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; + } + for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); @@ -1788,8 +1852,11 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); /* Save the Tx info to report back during cleanup */ - rdata->tx.packets = packet->tx_packets; - rdata->tx.bytes = packet->tx_bytes; + rdata->tx.packets = tx_packets; + rdata->tx.bytes = tx_bytes; + + pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; + pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit @@ -1913,9 +1980,28 @@ static int xgbe_dev_read(struct xgbe_channel *channel) rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ - if (netdev->features & NETIF_F_RXCSUM) + if (netdev->features & NETIF_F_RXCSUM) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 1); + } + + /* Set the tunneled packet indicator */ + if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNP, 1); + pdata->ext_stats.rx_vxlan_packets++; + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_UNKNOWN: + case RX_DESC3_L34T_IPV6_UNKNOWN: + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + break; + } + } /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); @@ -1935,14 +2021,30 @@ static int xgbe_dev_read(struct xgbe_channel *channel) packet->vlan_ctag); } } else { - if ((etlt == 0x05) || (etlt == 0x06)) + unsigned int tnp = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP); + + if ((etlt == 0x05) || (etlt == 0x06)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); - else + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_csum_errors++; + } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_vxlan_csum_errors++; + } else { XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); + } } + pdata->ext_stats.rxq_packets[channel->queue_index]++; + pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; + DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); @@ -1964,44 +2066,40 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) static int xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - dma_ch_ier |= channel->saved_ier; + channel->curr_ier |= channel->saved_ier; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } @@ -2009,45 +2107,41 @@ static int xgbe_enable_int(struct xgbe_channel *channel, static int xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; - dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; + channel->saved_ier = channel->curr_ier; + channel->curr_ier = 0; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } @@ -3534,5 +3628,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; + /* For VXLAN */ + hw_if->enable_vxlan = xgbe_enable_vxlan; + hw_if->disable_vxlan = xgbe_disable_vxlan; + hw_if->set_vxlan_id = xgbe_set_vxlan_id; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ecef3ee87b17..608693d11bd7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -124,6 +124,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -732,8 +733,6 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) unsigned int mac_hfr0, mac_hfr1, mac_hfr2; struct xgbe_hw_features *hw_feat = &pdata->hw_feat; - DBGPR("-->xgbe_get_all_hw_features\n"); - mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); @@ -758,6 +757,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) ADDMACADRSEL); hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); /* Hardware feature register 1 */ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, @@ -828,7 +828,193 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); - DBGPR("<--xgbe_get_all_hw_features\n"); + if (netif_msg_probe(pdata)) { + dev_dbg(pdata->dev, "Hardware features:\n"); + + /* Hardware feature register 0 */ + dev_dbg(pdata->dev, " 1GbE support : %s\n", + hw_feat->gmii ? "yes" : "no"); + dev_dbg(pdata->dev, " VLAN hash filter : %s\n", + hw_feat->vlhash ? "yes" : "no"); + dev_dbg(pdata->dev, " MDIO interface : %s\n", + hw_feat->sma ? "yes" : "no"); + dev_dbg(pdata->dev, " Wake-up packet support : %s\n", + hw_feat->rwk ? "yes" : "no"); + dev_dbg(pdata->dev, " Magic packet support : %s\n", + hw_feat->mgk ? "yes" : "no"); + dev_dbg(pdata->dev, " Management counters : %s\n", + hw_feat->mmc ? "yes" : "no"); + dev_dbg(pdata->dev, " ARP offload : %s\n", + hw_feat->aoe ? "yes" : "no"); + dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n", + hw_feat->ts ? "yes" : "no"); + dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n", + hw_feat->eee ? "yes" : "no"); + dev_dbg(pdata->dev, " TX checksum offload : %s\n", + hw_feat->tx_coe ? "yes" : "no"); + dev_dbg(pdata->dev, " RX checksum offload : %s\n", + hw_feat->rx_coe ? "yes" : "no"); + dev_dbg(pdata->dev, " Additional MAC addresses : %u\n", + hw_feat->addn_mac); + dev_dbg(pdata->dev, " Timestamp source : %s\n", + (hw_feat->ts_src == 1) ? "internal" : + (hw_feat->ts_src == 2) ? "external" : + (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); + dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n", + hw_feat->sa_vlan_ins ? "yes" : "no"); + dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n", + hw_feat->vxn ? "yes" : "no"); + + /* Hardware feature register 1 */ + dev_dbg(pdata->dev, " RX fifo size : %u\n", + hw_feat->rx_fifo_size); + dev_dbg(pdata->dev, " TX fifo size : %u\n", + hw_feat->tx_fifo_size); + dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n", + hw_feat->adv_ts_hi ? "yes" : "no"); + dev_dbg(pdata->dev, " DMA width : %u\n", + hw_feat->dma_width); + dev_dbg(pdata->dev, " Data Center Bridging : %s\n", + hw_feat->dcb ? "yes" : "no"); + dev_dbg(pdata->dev, " Split header : %s\n", + hw_feat->sph ? "yes" : "no"); + dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n", + hw_feat->tso ? "yes" : "no"); + dev_dbg(pdata->dev, " Debug memory interface : %s\n", + hw_feat->dma_debug ? "yes" : "no"); + dev_dbg(pdata->dev, " Receive Side Scaling : %s\n", + hw_feat->rss ? "yes" : "no"); + dev_dbg(pdata->dev, " Traffic Class count : %u\n", + hw_feat->tc_cnt); + dev_dbg(pdata->dev, " Hash table size : %u\n", + hw_feat->hash_table_size); + dev_dbg(pdata->dev, " L3/L4 Filters : %u\n", + hw_feat->l3l4_filter_num); + + /* Hardware feature register 2 */ + dev_dbg(pdata->dev, " RX queue count : %u\n", + hw_feat->rx_q_cnt); + dev_dbg(pdata->dev, " TX queue count : %u\n", + hw_feat->tx_q_cnt); + dev_dbg(pdata->dev, " RX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + dev_dbg(pdata->dev, " TX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + dev_dbg(pdata->dev, " PPS outputs : %u\n", + hw_feat->pps_out_num); + dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n", + hw_feat->aux_snap_num); + } +} + +static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + + if (!pdata->vxlan_offloads_set) + return; + + netdev_info(netdev, "disabling VXLAN offloads\n"); + + netdev->hw_enc_features &= ~(NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + + netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + + pdata->vxlan_offloads_set = 0; +} + +static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata) +{ + if (!pdata->vxlan_port_set) + return; + + pdata->hw_if.disable_vxlan(pdata); + + pdata->vxlan_port_set = 0; + pdata->vxlan_port = 0; +} + +static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata) +{ + xgbe_disable_vxlan_offloads(pdata); + + xgbe_disable_vxlan_hw(pdata); +} + +static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + + if (pdata->vxlan_offloads_set) + return; + + netdev_info(netdev, "enabling VXLAN offloads\n"); + + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + pdata->vxlan_features; + + netdev->features |= pdata->vxlan_features; + + pdata->vxlan_offloads_set = 1; +} + +static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata) +{ + struct xgbe_vxlan_data *vdata; + + if (pdata->vxlan_port_set) + return; + + if (list_empty(&pdata->vxlan_ports)) + return; + + vdata = list_first_entry(&pdata->vxlan_ports, + struct xgbe_vxlan_data, list); + + pdata->vxlan_port_set = 1; + pdata->vxlan_port = be16_to_cpu(vdata->port); + + pdata->hw_if.enable_vxlan(pdata); +} + +static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata) +{ + /* VXLAN acceleration desired? */ + if (!pdata->vxlan_features) + return; + + /* VXLAN acceleration possible? */ + if (pdata->vxlan_force_disable) + return; + + xgbe_enable_vxlan_hw(pdata); + + xgbe_enable_vxlan_offloads(pdata); +} + +static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata) +{ + xgbe_disable_vxlan_hw(pdata); + + if (pdata->vxlan_features) + xgbe_enable_vxlan_offloads(pdata); + + pdata->vxlan_force_disable = 0; } static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) @@ -887,7 +1073,7 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata) (unsigned long)pdata); ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, - netdev->name, pdata); + netdev_name(netdev), pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", pdata->dev_irq); @@ -1154,6 +1340,8 @@ static int xgbe_start(struct xgbe_prv_data *pdata) hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); + udp_tunnel_get_rx_info(netdev); + netif_tx_start_all_queues(netdev); xgbe_start_timers(pdata); @@ -1195,6 +1383,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) xgbe_stop_timers(pdata); flush_workqueue(pdata->dev_workqueue); + xgbe_reset_vxlan_accel(pdata); + hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); @@ -1483,10 +1673,18 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) if (ret) return ret; - packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - packet->tcp_header_len = tcp_hdrlen(skb); + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) { + packet->header_len = skb_inner_transport_offset(skb) + + inner_tcp_hdrlen(skb); + packet->tcp_header_len = inner_tcp_hdrlen(skb); + } else { + packet->header_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + packet->tcp_header_len = tcp_hdrlen(skb); + } packet->tcp_payload_len = skb->len - packet->header_len; packet->mss = skb_shinfo(skb)->gso_size; + DBGPR(" packet->header_len=%u\n", packet->header_len); DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", packet->tcp_header_len, packet->tcp_payload_len); @@ -1501,6 +1699,49 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) return 0; } +static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb) +{ + struct xgbe_vxlan_data *vdata; + + if (pdata->vxlan_force_disable) + return false; + + if (!skb->encapsulation) + return false; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->protocol) { + case htons(ETH_P_IP): + if (ip_hdr(skb)->protocol != IPPROTO_UDP) + return false; + break; + + case htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) + return false; + break; + + default: + return false; + } + + /* See if we have the UDP port in our list */ + list_for_each_entry(vdata, &pdata->vxlan_ports, list) { + if ((skb->protocol == htons(ETH_P_IP)) && + (vdata->sa_family == AF_INET) && + (vdata->port == udp_hdr(skb)->dest)) + return true; + else if ((skb->protocol == htons(ETH_P_IPV6)) && + (vdata->sa_family == AF_INET6) && + (vdata->port == udp_hdr(skb)->dest)) + return true; + } + + return false; +} + static int xgbe_is_tso(struct sk_buff *skb) { if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1549,6 +1790,10 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE, 1); + if (xgbe_is_vxlan(pdata, skb)) + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VXLAN, 1); + if (skb_vlan_tag_present(skb)) { /* VLAN requires an extra descriptor if tag is different */ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) @@ -1589,16 +1834,42 @@ static int xgbe_open(struct net_device *netdev) DBGPR("-->xgbe_open\n"); + /* Create the various names based on netdev name */ + snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", + netdev_name(netdev)); + + snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", + netdev_name(netdev)); + + snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", + netdev_name(netdev)); + + /* Create workqueues */ + pdata->dev_workqueue = + create_singlethread_workqueue(netdev_name(netdev)); + if (!pdata->dev_workqueue) { + netdev_err(netdev, "device workqueue creation failed\n"); + return -ENOMEM; + } + + pdata->an_workqueue = + create_singlethread_workqueue(pdata->an_name); + if (!pdata->an_workqueue) { + netdev_err(netdev, "phy workqueue creation failed\n"); + ret = -ENOMEM; + goto err_dev_wq; + } + /* Reset the phy settings */ ret = xgbe_phy_reset(pdata); if (ret) - return ret; + goto err_an_wq; /* Enable the clocks */ ret = clk_prepare_enable(pdata->sysclk); if (ret) { netdev_alert(netdev, "dma clk_prepare_enable failed\n"); - return ret; + goto err_an_wq; } ret = clk_prepare_enable(pdata->ptpclk); @@ -1651,6 +1922,12 @@ static int xgbe_open(struct net_device *netdev) err_sysclk: clk_disable_unprepare(pdata->sysclk); +err_an_wq: + destroy_workqueue(pdata->an_workqueue); + +err_dev_wq: + destroy_workqueue(pdata->dev_workqueue); + return ret; } @@ -1674,6 +1951,12 @@ static int xgbe_close(struct net_device *netdev) clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); + flush_workqueue(pdata->an_workqueue); + destroy_workqueue(pdata->an_workqueue); + + flush_workqueue(pdata->dev_workqueue); + destroy_workqueue(pdata->dev_workqueue); + set_bit(XGBE_DOWN, &pdata->dev_state); DBGPR("<--xgbe_close\n"); @@ -1918,18 +2201,18 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, - struct tc_to_netdev *tc_to_netdev) +static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct tc_mqprio_qopt *mqprio = type_data; u8 tc; - if (tc_to_netdev->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - tc = tc_to_netdev->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + tc = mqprio->num_tc; if (tc > pdata->hw_feat.tc_cnt) return -EINVAL; @@ -1940,18 +2223,83 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, return 0; } +static netdev_features_t xgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + netdev_features_t vxlan_base, vxlan_mask; + + vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT; + vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + pdata->vxlan_features = features & vxlan_mask; + + /* Only fix VXLAN-related features */ + if (!pdata->vxlan_features) + return features; + + /* If VXLAN isn't supported then clear any features: + * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets + * automatically set if ndo_udp_tunnel_add is set. + */ + if (!pdata->hw_feat.vxn) + return features & ~vxlan_mask; + + /* VXLAN CSUM requires VXLAN base */ + if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) && + !(features & NETIF_F_GSO_UDP_TUNNEL)) { + netdev_notice(netdev, + "forcing tx udp tunnel support\n"); + features |= NETIF_F_GSO_UDP_TUNNEL; + } + + /* Can't do one without doing the other */ + if ((features & vxlan_base) != vxlan_base) { + netdev_notice(netdev, + "forcing both tx and rx udp tunnel support\n"); + features |= vxlan_base; + } + + if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + netdev_notice(netdev, + "forcing tx udp tunnel checksumming on\n"); + features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + } else { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) { + netdev_notice(netdev, + "forcing tx udp tunnel checksumming off\n"); + features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + } + + pdata->vxlan_features = features & vxlan_mask; + + /* Adjust UDP Tunnel based on current state */ + if (pdata->vxlan_force_disable) { + netdev_notice(netdev, + "VXLAN acceleration disabled, turning off udp tunnel features\n"); + features &= ~vxlan_mask; + } + + return features; +} + static int xgbe_set_features(struct net_device *netdev, netdev_features_t features) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + netdev_features_t udp_tunnel; int ret = 0; rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL; if ((features & NETIF_F_RXHASH) && !rxhash) ret = hw_if->enable_rss(pdata); @@ -1975,6 +2323,11 @@ static int xgbe_set_features(struct net_device *netdev, else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) hw_if->disable_rx_vlan_filtering(pdata); + if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel) + xgbe_enable_vxlan_accel(pdata); + else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel) + xgbe_disable_vxlan_accel(pdata); + pdata->netdev_features = features; DBGPR("<--xgbe_set_features\n"); @@ -1982,6 +2335,111 @@ static int xgbe_set_features(struct net_device *netdev, return 0; } +static void xgbe_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_vxlan_data *vdata; + + if (!pdata->hw_feat.vxn) + return; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + pdata->vxlan_port_count++; + + netif_dbg(pdata, drv, netdev, + "adding VXLAN tunnel, family=%hx/port=%hx\n", + ti->sa_family, be16_to_cpu(ti->port)); + + if (pdata->vxlan_force_disable) + return; + + vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC); + if (!vdata) { + /* Can no longer properly track VXLAN ports */ + pdata->vxlan_force_disable = 1; + netif_dbg(pdata, drv, netdev, + "internal error, disabling VXLAN accelerations\n"); + + xgbe_disable_vxlan_accel(pdata); + + return; + } + vdata->sa_family = ti->sa_family; + vdata->port = ti->port; + + list_add_tail(&vdata->list, &pdata->vxlan_ports); + + /* First port added? */ + if (pdata->vxlan_port_count == 1) { + xgbe_enable_vxlan_accel(pdata); + + return; + } +} + +static void xgbe_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_vxlan_data *vdata; + + if (!pdata->hw_feat.vxn) + return; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + netif_dbg(pdata, drv, netdev, + "deleting VXLAN tunnel, family=%hx/port=%hx\n", + ti->sa_family, be16_to_cpu(ti->port)); + + /* Don't need safe version since loop terminates with deletion */ + list_for_each_entry(vdata, &pdata->vxlan_ports, list) { + if (vdata->sa_family != ti->sa_family) + continue; + + if (vdata->port != ti->port) + continue; + + list_del(&vdata->list); + kfree(vdata); + + break; + } + + pdata->vxlan_port_count--; + if (!pdata->vxlan_port_count) { + xgbe_reset_vxlan_accel(pdata); + + return; + } + + if (pdata->vxlan_force_disable) + return; + + /* See if VXLAN tunnel id needs to be changed */ + vdata = list_first_entry(&pdata->vxlan_ports, + struct xgbe_vxlan_data, list); + if (pdata->vxlan_port == be16_to_cpu(vdata->port)) + return; + + pdata->vxlan_port = be16_to_cpu(vdata->port); + pdata->hw_if.set_vxlan_id(pdata); +} + +static netdev_features_t xgbe_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops xgbe_netdev_ops = { .ndo_open = xgbe_open, .ndo_stop = xgbe_close, @@ -1999,7 +2457,11 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_poll_controller = xgbe_poll_controller, #endif .ndo_setup_tc = xgbe_setup_tc, + .ndo_fix_features = xgbe_fix_features, .ndo_set_features = xgbe_set_features, + .ndo_udp_tunnel_add = xgbe_udp_tunnel_add, + .ndo_udp_tunnel_del = xgbe_udp_tunnel_del, + .ndo_features_check = xgbe_features_check, }; const struct net_device_ops *xgbe_get_netdev_ops(void) @@ -2311,6 +2773,15 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) RX_PACKET_ATTRIBUTES, CSUM_DONE)) skb->ip_summed = CHECKSUM_UNNECESSARY; + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP)) { + skb->encapsulation = 1; + + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNPCSUM_DONE)) + skb->csum_level = 1; + } + if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 67a2e52ad25d..ff397bb25042 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -146,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), + XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets), XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets), XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), @@ -162,6 +163,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), + XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets), XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), @@ -177,6 +179,8 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), + XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors), + XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors), XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets), XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable), @@ -186,6 +190,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct xgbe_prv_data *pdata = netdev_priv(netdev); int i; switch (stringset) { @@ -195,6 +200,18 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } + for (i = 0; i < pdata->tx_ring_count; i++) { + sprintf(data, "txq_%u_packets", i); + data += ETH_GSTRING_LEN; + sprintf(data, "txq_%u_bytes", i); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < pdata->rx_ring_count; i++) { + sprintf(data, "rxq_%u_packets", i); + data += ETH_GSTRING_LEN; + sprintf(data, "rxq_%u_bytes", i); + data += ETH_GSTRING_LEN; + } break; } } @@ -211,15 +228,26 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev, stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset; *data++ = *(u64 *)stat; } + for (i = 0; i < pdata->tx_ring_count; i++) { + *data++ = pdata->ext_stats.txq_packets[i]; + *data++ = pdata->ext_stats.txq_bytes[i]; + } + for (i = 0; i < pdata->rx_ring_count; i++) { + *data++ = pdata->ext_stats.rxq_packets[i]; + *data++ = pdata->ext_stats.rxq_bytes[i]; + } } static int xgbe_get_sset_count(struct net_device *netdev, int stringset) { + struct xgbe_prv_data *pdata = netdev_priv(netdev); int ret; switch (stringset) { case ETH_SS_STATS: - ret = XGBE_STATS_COUNT; + ret = XGBE_STATS_COUNT + + (pdata->tx_ring_count * 2) + + (pdata->rx_ring_count * 2); break; default: @@ -243,6 +271,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct ethtool_link_ksettings *lks = &pdata->phy.lks; int ret = 0; if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) { @@ -255,16 +284,21 @@ static int xgbe_set_pauseparam(struct net_device *netdev, pdata->phy.tx_pause = pause->tx_pause; pdata->phy.rx_pause = pause->rx_pause; - pdata->phy.advertising &= ~ADVERTISED_Pause; - pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + XGBE_CLR_ADV(lks, Pause); + XGBE_CLR_ADV(lks, Asym_Pause); if (pause->rx_pause) { - pdata->phy.advertising |= ADVERTISED_Pause; - pdata->phy.advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_ADV(lks, Pause); + XGBE_SET_ADV(lks, Asym_Pause); } - if (pause->tx_pause) - pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + if (pause->tx_pause) { + /* Equivalent to XOR of Asym_Pause */ + if (XGBE_ADV(lks, Asym_Pause)) + XGBE_CLR_ADV(lks, Asym_Pause); + else + XGBE_SET_ADV(lks, Asym_Pause); + } if (netif_running(netdev)) ret = pdata->phy_if.phy_config_aneg(pdata); @@ -276,22 +310,20 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct ethtool_link_ksettings *lks = &pdata->phy.lks; cmd->base.phy_address = pdata->phy.address; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - pdata->phy.supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - pdata->phy.advertising); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - pdata->phy.lp_advertising); - cmd->base.autoneg = pdata->phy.autoneg; cmd->base.speed = pdata->phy.speed; cmd->base.duplex = pdata->phy.duplex; cmd->base.port = PORT_NONE; + XGBE_LM_COPY(cmd, supported, lks, supported); + XGBE_LM_COPY(cmd, advertising, lks, advertising); + XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising); + return 0; } @@ -299,7 +331,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - u32 advertising; + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u32 speed; int ret; @@ -331,15 +364,17 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, } } - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - netif_dbg(pdata, link, netdev, - "requested advertisement %#x, phy supported %#x\n", - advertising, pdata->phy.supported); + "requested advertisement 0x%*pb, phy supported 0x%*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported); - advertising &= pdata->phy.supported; - if ((cmd->base.autoneg == AUTONEG_ENABLE) && !advertising) { + bitmap_and(advertising, + cmd->link_modes.advertising, lks->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + if ((cmd->base.autoneg == AUTONEG_ENABLE) && + bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) { netdev_err(netdev, "unsupported requested advertisement\n"); return -EINVAL; @@ -349,12 +384,13 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, pdata->phy.autoneg = cmd->base.autoneg; pdata->phy.speed = speed; pdata->phy.duplex = cmd->base.duplex; - pdata->phy.advertising = advertising; + bitmap_copy(lks->link_modes.advertising, advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); if (cmd->base.autoneg == AUTONEG_ENABLE) - pdata->phy.advertising |= ADVERTISED_Autoneg; + XGBE_SET_ADV(lks, Autoneg); else - pdata->phy.advertising &= ~ADVERTISED_Autoneg; + XGBE_CLR_ADV(lks, Autoneg); if (netif_running(netdev)) ret = pdata->phy_if.phy_config_aneg(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 500147d9e3c8..d91fa595be98 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -120,6 +120,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -192,6 +193,7 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev) mutex_init(&pdata->i2c_mutex); init_completion(&pdata->i2c_complete); init_completion(&pdata->mdio_complete); + INIT_LIST_HEAD(&pdata->vxlan_ports); pdata->msg_enable = netif_msg_init(debug, default_msg_level); @@ -373,6 +375,28 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; + if (pdata->hw_feat.vxn) { + netdev->hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + + pdata->vxlan_offloads_set = 1; + pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + } + netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -399,35 +423,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) return ret; } - /* Create the PHY/ANEG name based on netdev name */ - snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", - netdev_name(netdev)); - - /* Create the ECC name based on netdev name */ - snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", - netdev_name(netdev)); - - /* Create the I2C name based on netdev name */ - snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", - netdev_name(netdev)); - - /* Create workqueues */ - pdata->dev_workqueue = - create_singlethread_workqueue(netdev_name(netdev)); - if (!pdata->dev_workqueue) { - netdev_err(netdev, "device workqueue creation failed\n"); - ret = -ENOMEM; - goto err_netdev; - } - - pdata->an_workqueue = - create_singlethread_workqueue(pdata->an_name); - if (!pdata->an_workqueue) { - netdev_err(netdev, "phy workqueue creation failed\n"); - ret = -ENOMEM; - goto err_wq; - } - if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK)) xgbe_ptp_register(pdata); @@ -439,14 +434,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) pdata->rx_ring_count); return 0; - -err_wq: - destroy_workqueue(pdata->dev_workqueue); - -err_netdev: - unregister_netdev(netdev); - - return ret; } void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) @@ -458,21 +445,45 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK)) xgbe_ptp_unregister(pdata); - pdata->phy_if.phy_exit(pdata); - - flush_workqueue(pdata->an_workqueue); - destroy_workqueue(pdata->an_workqueue); - - flush_workqueue(pdata->dev_workqueue); - destroy_workqueue(pdata->dev_workqueue); - unregister_netdev(netdev); + + pdata->phy_if.phy_exit(pdata); } +static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(data); + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + if (netdev->netdev_ops != xgbe_get_netdev_ops()) + goto out; + + switch (event) { + case NETDEV_CHANGENAME: + xgbe_debugfs_rename(pdata); + break; + + default: + break; + } + +out: + return NOTIFY_DONE; +} + +static struct notifier_block xgbe_netdev_notifier = { + .notifier_call = xgbe_netdev_event, +}; + static int __init xgbe_mod_init(void) { int ret; + ret = register_netdevice_notifier(&xgbe_netdev_notifier); + if (ret) + return ret; + ret = xgbe_platform_init(); if (ret) return ret; @@ -489,6 +500,8 @@ static void __exit xgbe_mod_exit(void) xgbe_pci_exit(); xgbe_platform_exit(); + + unregister_netdevice_notifier(&xgbe_netdev_notifier); } module_init(xgbe_mod_init); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 80684914dd8a..072b9f664597 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -615,12 +615,14 @@ static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata) static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + /* Be sure we aren't looping trying to negotiate */ if (xgbe_in_kr_mode(pdata)) { pdata->kr_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && - !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + if (!XGBE_ADV(lks, 1000baseKX_Full) && + !XGBE_ADV(lks, 2500baseX_Full)) return XGBE_AN_NO_LINK; if (pdata->kx_state != XGBE_RX_BPA) @@ -628,7 +630,7 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) } else { pdata->kx_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) + if (!XGBE_ADV(lks, 10000baseKR_Full)) return XGBE_AN_NO_LINK; if (pdata->kr_state != XGBE_RX_BPA) @@ -944,18 +946,19 @@ static void xgbe_an_state_machine(struct work_struct *work) static void xgbe_an37_init(struct xgbe_prv_data *pdata) { - unsigned int advertising, reg; + struct ethtool_link_ksettings lks; + unsigned int reg; - advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + pdata->phy_if.phy_impl.an_advertising(pdata, &lks); /* Set up Advertisement register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); - if (advertising & ADVERTISED_Pause) + if (XGBE_ADV(&lks, Pause)) reg |= 0x100; else reg &= ~0x100; - if (advertising & ADVERTISED_Asym_Pause) + if (XGBE_ADV(&lks, Asym_Pause)) reg |= 0x80; else reg &= ~0x80; @@ -982,6 +985,8 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) break; } + reg |= XGBE_AN_CL37_MII_CTRL_8BIT; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n", @@ -990,13 +995,14 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) static void xgbe_an73_init(struct xgbe_prv_data *pdata) { - unsigned int advertising, reg; + struct ethtool_link_ksettings lks; + unsigned int reg; - advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + pdata->phy_if.phy_impl.an_advertising(pdata, &lks); /* Set up Advertisement register 3 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); - if (advertising & ADVERTISED_10000baseR_FEC) + if (XGBE_ADV(&lks, 10000baseR_FEC)) reg |= 0xc000; else reg &= ~0xc000; @@ -1005,13 +1011,13 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata) /* Set up Advertisement register 2 next */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); - if (advertising & ADVERTISED_10000baseKR_Full) + if (XGBE_ADV(&lks, 10000baseKR_Full)) reg |= 0x80; else reg &= ~0x80; - if ((advertising & ADVERTISED_1000baseKX_Full) || - (advertising & ADVERTISED_2500baseX_Full)) + if (XGBE_ADV(&lks, 1000baseKX_Full) || + XGBE_ADV(&lks, 2500baseX_Full)) reg |= 0x20; else reg &= ~0x20; @@ -1020,12 +1026,12 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata) /* Set up Advertisement register 1 last */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); - if (advertising & ADVERTISED_Pause) + if (XGBE_ADV(&lks, Pause)) reg |= 0x400; else reg &= ~0x400; - if (advertising & ADVERTISED_Asym_Pause) + if (XGBE_ADV(&lks, Asym_Pause)) reg |= 0x800; else reg &= ~0x800; @@ -1281,9 +1287,10 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; - pdata->phy.lp_advertising = 0; + XGBE_ZERO_LP_ADV(lks); if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) mode = xgbe_cur_mode(pdata); @@ -1513,17 +1520,21 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) { - if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + + if (XGBE_ADV(lks, 10000baseKR_Full)) return SPEED_10000; - else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full) + else if (XGBE_ADV(lks, 10000baseT_Full)) return SPEED_10000; - else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) + else if (XGBE_ADV(lks, 2500baseX_Full)) return SPEED_2500; - else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) + else if (XGBE_ADV(lks, 2500baseT_Full)) + return SPEED_2500; + else if (XGBE_ADV(lks, 1000baseKX_Full)) return SPEED_1000; - else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full) + else if (XGBE_ADV(lks, 1000baseT_Full)) return SPEED_1000; - else if (pdata->phy.advertising & ADVERTISED_100baseT_Full) + else if (XGBE_ADV(lks, 100baseT_Full)) return SPEED_100; return SPEED_UNKNOWN; @@ -1531,13 +1542,12 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { - xgbe_phy_stop(pdata); - pdata->phy_if.phy_impl.exit(pdata); } static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; int ret; mutex_init(&pdata->an_mutex); @@ -1555,11 +1565,13 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) ret = pdata->phy_if.phy_impl.init(pdata); if (ret) return ret; - pdata->phy.advertising = pdata->phy.supported; + + /* Copy supported link modes to advertising link modes */ + XGBE_LM_COPY(lks, advertising, lks, supported); pdata->phy.address = 0; - if (pdata->phy.advertising & ADVERTISED_Autoneg) { + if (XGBE_ADV(lks, Autoneg)) { pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; @@ -1576,16 +1588,21 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) pdata->phy.rx_pause = pdata->rx_pause; /* Fix up Flow Control advertising */ - pdata->phy.advertising &= ~ADVERTISED_Pause; - pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + XGBE_CLR_ADV(lks, Pause); + XGBE_CLR_ADV(lks, Asym_Pause); if (pdata->rx_pause) { - pdata->phy.advertising |= ADVERTISED_Pause; - pdata->phy.advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_ADV(lks, Pause); + XGBE_SET_ADV(lks, Asym_Pause); } - if (pdata->tx_pause) - pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + if (pdata->tx_pause) { + /* Equivalent to XOR of Asym_Pause */ + if (XGBE_ADV(lks, Asym_Pause)) + XGBE_CLR_ADV(lks, Asym_Pause); + else + XGBE_SET_ADV(lks, Asym_Pause); + } if (netif_msg_drv(pdata)) xgbe_dump_phy_registers(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 1e56ad7bd9a5..3e5833cf1fab 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -292,6 +292,10 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; if (netif_msg_probe(pdata)) { + dev_dbg(dev, "xpcs window def = %#010x\n", + pdata->xpcs_window_def_reg); + dev_dbg(dev, "xpcs window sel = %#010x\n", + pdata->xpcs_window_sel_reg); dev_dbg(dev, "xpcs window = %#010x\n", pdata->xpcs_window); dev_dbg(dev, "xpcs window size = %#010x\n", diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c index c75edcac5e0a..d16eae415f72 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c @@ -231,20 +231,21 @@ static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x800) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -266,12 +267,12 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) { if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000) - pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full; + XGBE_SET_LP_ADV(lks, 2500baseX_Full); else - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); } ad_reg &= lp_reg; @@ -290,14 +291,17 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } -static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) +static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, + struct ethtool_link_ksettings *dlks) { - return pdata->phy.advertising; + struct ethtool_link_ksettings *slks = &pdata->phy.lks; + + XGBE_LM_COPY(dlks, advertising, slks, advertising); } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) @@ -565,11 +569,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode mode, u32 advert) + enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & advert) - return true; + return advert; } else { enum xgbe_mode cur_mode; @@ -583,16 +586,18 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseKX_Full); + XGBE_ADV(lks, 1000baseKX_Full)); case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseKR_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -672,6 +677,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata) static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; int ret; @@ -790,21 +796,23 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } /* Initialize supported features */ - pdata->phy.supported = SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; - pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + XGBE_ZERO_SUP(lks); + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); + XGBE_SET_SUP(lks, 10000baseKR_Full); switch (phy_data->speed_set) { case XGBE_SPEEDSET_1000_10000: - pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + XGBE_SET_SUP(lks, 1000baseKX_Full); break; case XGBE_SPEEDSET_2500_10000: - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, 2500baseX_Full); break; } if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); pdata->phy_data = phy_data; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 04b5c149caca..3304a291aa96 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -709,18 +709,13 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg) static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed) return; - pdata->phy.supported &= ~SUPPORTED_Autoneg; - pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); - pdata->phy.supported &= ~SUPPORTED_TP; - pdata->phy.supported &= ~SUPPORTED_FIBRE; - pdata->phy.supported &= ~SUPPORTED_100baseT_Full; - pdata->phy.supported &= ~SUPPORTED_1000baseT_Full; - pdata->phy.supported &= ~SUPPORTED_10000baseT_Full; + XGBE_ZERO_SUP(lks); if (phy_data->sfp_mod_absent) { pdata->phy.speed = SPEED_UNKNOWN; @@ -728,18 +723,13 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_FIBRE; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); + XGBE_SET_SUP(lks, FIBRE); - pdata->phy.advertising = pdata->phy.supported; + XGBE_LM_COPY(lks, advertising, lks, supported); return; } @@ -753,8 +743,18 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) + XGBE_SET_SUP(lks, 100baseT_Full); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(lks, 1000baseT_Full); + } else { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(lks, 1000baseX_Full); + } break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: @@ -765,6 +765,27 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_10000_SR: + XGBE_SET_SUP(lks, 10000baseSR_Full); + break; + case XGBE_SFP_BASE_10000_LR: + XGBE_SET_SUP(lks, 10000baseLR_Full); + break; + case XGBE_SFP_BASE_10000_LRM: + XGBE_SET_SUP(lks, 10000baseLRM_Full); + break; + case XGBE_SFP_BASE_10000_ER: + XGBE_SET_SUP(lks, 10000baseER_Full); + break; + case XGBE_SFP_BASE_10000_CR: + XGBE_SET_SUP(lks, 10000baseCR_Full); + break; + default: + break; + } + } break; default: pdata->phy.speed = SPEED_UNKNOWN; @@ -778,38 +799,14 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_CX: case XGBE_SFP_BASE_10000_CR: - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, TP); break; default: - pdata->phy.supported |= SUPPORTED_FIBRE; + XGBE_SET_SUP(lks, FIBRE); + break; } - switch (phy_data->sfp_speed) { - case XGBE_SFP_SPEED_100_1000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - break; - case XGBE_SFP_SPEED_1000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - break; - case XGBE_SFP_SPEED_10000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; - break; - default: - /* Choose the fastest supported speed */ - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; - else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; - } - - pdata->phy.advertising = pdata->phy.supported; + XGBE_LM_COPY(lks, advertising, lks, supported); } static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, @@ -886,8 +883,10 @@ static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; struct phy_device *phydev; + u32 advertising; int ret; /* If we already have a PHY, just return */ @@ -943,7 +942,10 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) phy_data->phydev = phydev; xgbe_phy_external_phy_quirks(pdata); - phydev->advertising &= pdata->phy.advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + lks->link_modes.advertising); + phydev->advertising &= advertising; phy_start_aneg(phy_data->phydev); @@ -1277,6 +1279,7 @@ static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata) static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; u16 lcl_adv = 0, rmt_adv = 0; u8 fc; @@ -1293,11 +1296,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) lcl_adv |= ADVERTISE_PAUSE_ASYM; if (phy_data->phydev->pause) { - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); rmt_adv |= LPA_PAUSE_CAP; } if (phy_data->phydev->asym_pause) { - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); rmt_adv |= LPA_PAUSE_ASYM; } @@ -1310,10 +1313,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_TP; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, TP); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) @@ -1322,21 +1326,21 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { case XGBE_SGMII_AN_LINK_SPEED_100: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { - pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full; + XGBE_SET_LP_ADV(lks, 100baseT_Full); mode = XGBE_MODE_SGMII_100; } else { /* Half-duplex not supported */ - pdata->phy.lp_advertising |= ADVERTISED_100baseT_Half; + XGBE_SET_LP_ADV(lks, 100baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; case XGBE_SGMII_AN_LINK_SPEED_1000: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + XGBE_SET_LP_ADV(lks, 1000baseT_Full); mode = XGBE_MODE_SGMII_1000; } else { /* Half-duplex not supported */ - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half; + XGBE_SET_LP_ADV(lks, 1000baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; @@ -1349,19 +1353,20 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_FIBRE; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, FIBRE); /* Compare Advertisement and Link Partner register */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY); if (lp_reg & 0x100) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -1379,10 +1384,8 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) } } - if (lp_reg & 0x40) - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half; if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + XGBE_SET_LP_ADV(lks, 1000baseX_Full); /* Half duplex is not supported */ ad_reg &= lp_reg; @@ -1393,12 +1396,13 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) @@ -1408,9 +1412,9 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) { @@ -1463,26 +1467,27 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x800) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -1504,9 +1509,9 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) @@ -1520,7 +1525,7 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } @@ -1541,41 +1546,43 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) } } -static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) +static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, + struct ethtool_link_ksettings *dlks) { + struct ethtool_link_ksettings *slks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int advertising; + + XGBE_LM_COPY(dlks, advertising, slks, advertising); /* Without a re-driver, just return current advertising */ if (!phy_data->redrv) - return pdata->phy.advertising; + return; /* With the KR re-driver we need to advertise a single speed */ - advertising = pdata->phy.advertising; - advertising &= ~ADVERTISED_1000baseKX_Full; - advertising &= ~ADVERTISED_10000baseKR_Full; + XGBE_CLR_ADV(dlks, 1000baseKX_Full); + XGBE_CLR_ADV(dlks, 10000baseKR_Full); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; case XGBE_PORT_MODE_BACKPLANE_2500: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_T: if (phy_data->phydev && (phy_data->phydev->speed == SPEED_10000)) - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); else - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_R: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { @@ -1583,24 +1590,24 @@ static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; default: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; } break; default: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; } - - return advertising; } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; + u32 advertising; int ret; ret = xgbe_phy_find_phy_device(pdata); @@ -1610,9 +1617,12 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return 0; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + lks->link_modes.advertising); + phy_data->phydev->autoneg = pdata->phy.autoneg; phy_data->phydev->advertising = phy_data->phydev->supported & - pdata->phy.advertising; + advertising; if (pdata->phy.autoneg != AUTONEG_ENABLE) { phy_data->phydev->speed = pdata->phy.speed; @@ -2073,11 +2083,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode mode, u32 advert) + enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & advert) - return true; + return advert; } else { enum xgbe_mode cur_mode; @@ -2092,13 +2101,15 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_X: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -2107,19 +2118,21 @@ static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_SGMII_100: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_100baseT_Full); + XGBE_ADV(lks, 100baseT_Full)); case XGBE_MODE_SGMII_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseT_Full)); case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseT_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseT_Full)); default: return false; } @@ -2128,6 +2141,7 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; switch (mode) { @@ -2135,22 +2149,26 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseX_Full)); case XGBE_MODE_SGMII_100: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_100baseT_Full); + XGBE_ADV(lks, 100baseT_Full)); case XGBE_MODE_SGMII_1000: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseT_Full)); case XGBE_MODE_SFI: if (phy_data->sfp_mod_absent) return true; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseSR_Full) || + XGBE_ADV(lks, 10000baseLR_Full) || + XGBE_ADV(lks, 10000baseLRM_Full) || + XGBE_ADV(lks, 10000baseER_Full) || + XGBE_ADV(lks, 10000baseCR_Full)); default: return false; } @@ -2159,10 +2177,12 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseX_Full)); default: return false; } @@ -2171,13 +2191,15 @@ static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseKX_Full); + XGBE_ADV(lks, 1000baseKX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseKR_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -2744,6 +2766,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata) static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; struct mii_bus *mii; unsigned int reg; @@ -2823,32 +2846,33 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) phy_data->cur_mode = XGBE_MODE_UNKNOWN; /* Initialize supported features */ - pdata->phy.supported = 0; + XGBE_ZERO_SUP(lks); switch (phy_data->port_mode) { /* Backplane support */ case XGBE_PORT_MODE_BACKPLANE: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + XGBE_SET_SUP(lks, 1000baseKX_Full); phy_data->start_mode = XGBE_MODE_KX_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + XGBE_SET_SUP(lks, 10000baseKR_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= - SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; case XGBE_PORT_MODE_BACKPLANE_2500: - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); + XGBE_SET_SUP(lks, 2500baseX_Full); phy_data->start_mode = XGBE_MODE_KX_2500; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; @@ -2856,15 +2880,16 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO 1GBase-T support */ case XGBE_PORT_MODE_1000BASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } @@ -2873,10 +2898,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO Base-X support */ case XGBE_PORT_MODE_1000BASE_X: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_FIBRE; - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, FIBRE); + XGBE_SET_SUP(lks, 1000baseX_Full); phy_data->start_mode = XGBE_MODE_X; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; @@ -2884,19 +2910,20 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO NBase-T support */ case XGBE_PORT_MODE_NBASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) { - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, 2500baseT_Full); phy_data->start_mode = XGBE_MODE_KX_2500; } @@ -2905,33 +2932,38 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* 10GBase-T support */ case XGBE_PORT_MODE_10GBASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, 10000baseT_Full); phy_data->start_mode = XGBE_MODE_KR; } - phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; + phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-R support */ case XGBE_PORT_MODE_10GBASE_R: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, FIBRE); + XGBE_SET_SUP(lks, 10000baseSR_Full); + XGBE_SET_SUP(lks, 10000baseLR_Full); + XGBE_SET_SUP(lks, 10000baseLRM_Full); + XGBE_SET_SUP(lks, 10000baseER_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; @@ -2939,22 +2971,17 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* SFP support */ case XGBE_PORT_MODE_SFP: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_FIBRE; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); + XGBE_SET_SUP(lks, FIBRE); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) phy_data->start_mode = XGBE_MODE_SGMII_100; - } - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) phy_data->start_mode = XGBE_MODE_SGMII_1000; - } - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) phy_data->start_mode = XGBE_MODE_SFI; - } phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; @@ -2965,8 +2992,9 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } if (netif_msg_probe(pdata)) - dev_dbg(pdata->dev, "phy supported=%#x\n", - pdata->phy.supported); + dev_dbg(pdata->dev, "phy supported=0x%*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + lks->link_modes.supported); if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) && (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 0938294f640a..ad102c8bac7b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -129,6 +129,10 @@ #include #include #include +#include +#include +#include +#include #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" @@ -180,8 +184,6 @@ #define XGBE_IRQ_MODE_EDGE 0 #define XGBE_IRQ_MODE_LEVEL 1 -#define XGBE_DMA_INTERRUPT_MASK 0x31c7 - #define XGMAC_MIN_PACKET 60 #define XGMAC_STD_PACKET_MTU 1500 #define XGMAC_MAX_STD_PACKET 1518 @@ -296,6 +298,48 @@ /* MDIO port types */ #define XGMAC_MAX_C22_PORT 3 +/* Link mode bit operations */ +#define XGBE_ZERO_SUP(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), supported) + +#define XGBE_SET_SUP(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), supported, _mode) + +#define XGBE_CLR_SUP(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), supported, _mode) + +#define XGBE_IS_SUP(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), supported, _mode) + +#define XGBE_ZERO_ADV(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), advertising) + +#define XGBE_SET_ADV(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), advertising, _mode) + +#define XGBE_CLR_ADV(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), advertising, _mode) + +#define XGBE_ADV(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), advertising, _mode) + +#define XGBE_ZERO_LP_ADV(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), lp_advertising) + +#define XGBE_SET_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_CLR_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_LM_COPY(_dst, _dname, _src, _sname) \ + bitmap_copy((_dst)->link_modes._dname, \ + (_src)->link_modes._sname, \ + __ETHTOOL_LINK_MODE_MASK_NBITS) + struct xgbe_prv_data; struct xgbe_packet_data { @@ -460,6 +504,8 @@ struct xgbe_channel { /* Netdev related settings */ struct napi_struct napi; + /* Per channel interrupt enablement tracker */ + unsigned int curr_ier; unsigned int saved_ier; unsigned int tx_timer_active; @@ -561,9 +607,7 @@ enum xgbe_mdio_mode { }; struct xgbe_phy { - u32 supported; - u32 advertising; - u32 lp_advertising; + struct ethtool_link_ksettings lks; int address; @@ -666,6 +710,16 @@ struct xgbe_ext_stats { u64 tx_tso_packets; u64 rx_split_header_packets; u64 rx_buffer_unavailable; + + u64 txq_packets[XGBE_MAX_DMA_CHANNELS]; + u64 txq_bytes[XGBE_MAX_DMA_CHANNELS]; + u64 rxq_packets[XGBE_MAX_DMA_CHANNELS]; + u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS]; + + u64 tx_vxlan_packets; + u64 rx_vxlan_packets; + u64 rx_csum_errors; + u64 rx_vxlan_csum_errors; }; struct xgbe_hw_if { @@ -769,6 +823,11 @@ struct xgbe_hw_if { /* For ECC */ void (*disable_ecc_ded)(struct xgbe_prv_data *); void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec); + + /* For VXLAN */ + void (*enable_vxlan)(struct xgbe_prv_data *); + void (*disable_vxlan)(struct xgbe_prv_data *); + void (*set_vxlan_id)(struct xgbe_prv_data *); }; /* This structure represents implementation specific routines for an @@ -810,7 +869,8 @@ struct xgbe_phy_impl_if { int (*an_config)(struct xgbe_prv_data *); /* Set/override auto-negotiation advertisement settings */ - unsigned int (*an_advertising)(struct xgbe_prv_data *); + void (*an_advertising)(struct xgbe_prv_data *, + struct ethtool_link_ksettings *); /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); @@ -892,6 +952,7 @@ struct xgbe_hw_features { unsigned int addn_mac; /* Additional MAC Addresses */ unsigned int ts_src; /* Timestamp Source */ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + unsigned int vxn; /* VXLAN/NVGRE */ /* HW Feature Register1 */ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ @@ -930,6 +991,12 @@ struct xgbe_version_data { unsigned int rx_desc_prefetch; }; +struct xgbe_vxlan_data { + struct list_head list; + sa_family_t sa_family; + __be16 port; +}; + struct xgbe_prv_data { struct net_device *netdev; struct pci_dev *pcidev; @@ -1071,6 +1138,15 @@ struct xgbe_prv_data { u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; u32 rss_options; + /* VXLAN settings */ + unsigned int vxlan_port_set; + unsigned int vxlan_offloads_set; + unsigned int vxlan_force_disable; + unsigned int vxlan_port_count; + struct list_head vxlan_ports; + u16 vxlan_port; + netdev_features_t vxlan_features; + /* Netdev related settings */ unsigned char mac_addr[ETH_ALEN]; netdev_features_t netdev_features; @@ -1171,7 +1247,6 @@ struct xgbe_prv_data { struct tasklet_struct tasklet_i2c; struct tasklet_struct tasklet_an; -#ifdef CONFIG_DEBUG_FS struct dentry *xgbe_debugfs; unsigned int debugfs_xgmac_reg; @@ -1182,7 +1257,6 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; -#endif }; /* Function prototypes*/ @@ -1231,9 +1305,11 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *); #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); void xgbe_debugfs_exit(struct xgbe_prv_data *); +void xgbe_debugfs_rename(struct xgbe_prv_data *pdata); #else static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {} static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {} +static inline void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) {} #endif /* CONFIG_DEBUG_FS */ /* NOTE: Uncomment for function trace log messages in KERNEL LOG */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index e45b587c2994..3188f553da35 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -468,7 +468,6 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) { - struct device *dev = &pdata->pdev->dev; u32 icm0, icm2, mc2; u32 intf_ctl, rgmii, value; @@ -500,10 +499,8 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) intf_ctl |= ENET_GHD_MODE; CFG_MACMODE_SET(&icm0, 2); CFG_WAITASYNCRD_SET(&icm2, 0); - if (dev->of_node) { - CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); - CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); - } + CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); + CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); rgmii |= CFG_SPEED_1250; xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 6e253d913fe2..3b889efddf78 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1591,7 +1591,7 @@ static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) struct device *dev = &pdata->pdev->dev; int delay, ret; - ret = of_property_read_u32(dev->of_node, "tx-delay", &delay); + ret = device_property_read_u32(dev, "tx-delay", &delay); if (ret) { pdata->tx_delay = 4; return 0; @@ -1612,7 +1612,7 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) struct device *dev = &pdata->pdev->dev; int delay, ret; - ret = of_property_read_u32(dev->of_node, "rx-delay", &delay); + ret = device_property_read_u32(dev, "rx-delay", &delay); if (ret) { pdata->rx_delay = 2; return 0; @@ -1674,8 +1674,6 @@ static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) ret = xgene_enet_phy_connect(pdata->ndev); if (!ret) pdata->mdio_driver = true; - - return; } static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 96dd5300e0e5..e58b157b7d7c 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -114,8 +114,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) int j, rev, rc = -EBUSY; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { - printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", - mace->full_name); + printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n", + mace); return -ENODEV; } @@ -123,8 +123,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) if (addr == NULL) { addr = of_get_property(mace, "local-mac-address", NULL); if (addr == NULL) { - printk(KERN_ERR "Can't get mac-address for MACE %s\n", - mace->full_name); + printk(KERN_ERR "Can't get mac-address for MACE %pOF\n", + mace); return -ENODEV; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 214986436ece..0fdaaa643073 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -51,6 +51,10 @@ #define AQ_CFG_SKB_FRAGS_MAX 32U +/* Number of descriptors available in one ring to resume this ring queue + */ +#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) + #define AQ_CFG_NAPI_WEIGHT 64U #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6ac9e2602d6d..0a5bb4114eb4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self) return 0; } +static int aq_nic_update_link_status(struct aq_nic_s *self) +{ + int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); + + if (err) + return err; + + if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) + pr_info("%s: link change old %d new %d\n", + AQ_CFG_DRV_NAME, self->link_status.mbps, + self->aq_hw->aq_link_status.mbps); + + self->link_status = self->aq_hw->aq_link_status; + if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + netif_tx_wake_all_queues(self->ndev); + } + if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { + netif_carrier_off(self->ndev); + netif_tx_disable(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + } + return 0; +} + static void aq_nic_service_timer_cb(unsigned long param) { struct aq_nic_s *self = (struct aq_nic_s *)param; @@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param) if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) goto err_exit; - err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); - if (err < 0) + err = aq_nic_update_link_status(self); + if (err) goto err_exit; - self->link_status = self->aq_hw->aq_link_status; - self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, self->aq_nic_cfg.is_interrupt_moderation); - if (self->link_status.mbps) { - aq_utils_obj_set(&self->header.flags, - AQ_NIC_FLAG_STARTED); - aq_utils_obj_clear(&self->header.flags, - AQ_NIC_LINK_DOWN); - netif_carrier_on(self->ndev); - } else { - netif_carrier_off(self->ndev); - aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); - } - memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); for (i = AQ_DIMOF(self->aq_vec); i--;) { @@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, SET_NETDEV_DEV(ndev, dev); ndev->if_port = port; - ndev->min_mtu = ETH_MIN_MTU; self->ndev = ndev; self->aq_pci_func = aq_pci_func; @@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, int aq_nic_ndev_register(struct aq_nic_s *self) { int err = 0; - unsigned int i = 0U; if (!self->ndev) { err = -EINVAL; @@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) netif_carrier_off(self->ndev); - for (i = AQ_CFG_VECS_MAX; i--;) - aq_nic_ndev_queue_stop(self, i); + netif_tx_disable(self->ndev); err = register_netdev(self->ndev); if (err < 0) @@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->features = aq_hw_caps->hw_features; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; + self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; return 0; } @@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) err = -EINVAL; goto err_exit; } - if (netif_running(ndev)) { - unsigned int i; - - for (i = AQ_CFG_VECS_MAX; i--;) - netif_stop_subqueue(ndev, i); - } + if (netif_running(ndev)) + netif_tx_disable(ndev); for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; self->aq_vecs++) { @@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self) return err; } -void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) -{ - netif_start_subqueue(self->ndev, idx); -} - -void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) -{ - netif_stop_subqueue(self->ndev, idx); -} - int aq_nic_start(struct aq_nic_s *self) { struct aq_vec_s *aq_vec = NULL; @@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; } - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) - aq_nic_ndev_queue_start(self, i); - err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); if (err < 0) goto err_exit; @@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; + netif_tx_start_all_queues(self->ndev); + err_exit: return err; } @@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int frag_count = 0U; unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *first = NULL; struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; if (unlikely(skb_is_gso(skb))) { @@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, dx_buff->len_l4 = tcp_hdrlen(skb); dx_buff->mss = skb_shinfo(skb)->gso_size; dx_buff->is_txc = 1U; + dx_buff->eop_index = 0xffffU; dx_buff->is_ipv6 = (ip_hdr(skb)->version == 6) ? 1U : 0U; @@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) goto exit; + first = dx_buff; dx_buff->len_pkt = skb->len; dx_buff->is_sop = 1U; dx_buff->is_mapped = 1U; @@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, for (; nr_frags--; ++frag_count) { unsigned int frag_len = 0U; + unsigned int buff_offset = 0U; + unsigned int buff_size = 0U; dma_addr_t frag_pa; skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; frag_len = skb_frag_size(frag); - frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, - frag_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) - goto mapping_error; + while (frag_len) { + if (frag_len > AQ_CFG_TX_FRAME_MAX) + buff_size = AQ_CFG_TX_FRAME_MAX; + else + buff_size = frag_len; + + frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), + frag, + buff_offset, + buff_size, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), + frag_pa))) + goto mapping_error; - while (frag_len > AQ_CFG_TX_FRAME_MAX) { dx = aq_ring_next_dx(ring, dx); dx_buff = &ring->buff_ring[dx]; dx_buff->flags = 0U; - dx_buff->len = AQ_CFG_TX_FRAME_MAX; + dx_buff->len = buff_size; dx_buff->pa = frag_pa; dx_buff->is_mapped = 1U; + dx_buff->eop_index = 0xffffU; + + frag_len -= buff_size; + buff_offset += buff_size; - frag_len -= AQ_CFG_TX_FRAME_MAX; - frag_pa += AQ_CFG_TX_FRAME_MAX; ++ret; } - - dx = aq_ring_next_dx(ring, dx); - dx_buff = &ring->buff_ring[dx]; - - dx_buff->flags = 0U; - dx_buff->len = frag_len; - dx_buff->pa = frag_pa; - dx_buff->is_mapped = 1U; - ++ret; } + first->eop_index = dx; dx_buff->is_eop = 1U; dx_buff->skb = skb; goto exit; @@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; unsigned int tc = 0U; int err = NETDEV_TX_OK; - bool is_nic_in_bad_state; frags = skb_shinfo(skb)->nr_frags + 1; @@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) goto err_exit; } - is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, - AQ_NIC_FLAGS_IS_NOT_TX_READY) || - (aq_ring_avail_dx(ring) < - AQ_CFG_SKB_FRAGS_MAX); + aq_ring_update_queue_state(ring); - if (is_nic_in_bad_state) { - aq_nic_ndev_queue_stop(self, ring->idx); + /* Above status update may stop the queue. Check this. */ + if (__netif_subqueue_stopped(self->ndev, ring->idx)) { err = NETDEV_TX_BUSY; goto err_exit; } @@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) ring, frags); if (err >= 0) { - if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) - aq_nic_ndev_queue_stop(self, ring->idx); - ++ring->stats.tx.packets; ring->stats.tx.bytes += skb->len; } @@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) { - int err = 0; - - if (new_mtu > self->aq_hw_caps.mtu) { - err = -EINVAL; - goto err_exit; - } self->aq_nic_cfg.mtu = new_mtu; -err_exit: - return err; + return 0; } int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) @@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self) struct aq_vec_s *aq_vec = NULL; unsigned int i = 0U; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) - aq_nic_ndev_queue_stop(self, i); + netif_tx_disable(self->ndev); del_timer_sync(&self->service_timer); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 7fc2a5ecb2b7..0ddd556ff901 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); int aq_nic_init(struct aq_nic_s *self); int aq_nic_cfg_start(struct aq_nic_s *self); int aq_nic_ndev_register(struct aq_nic_s *self); -void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); -void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); void aq_nic_ndev_free(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index ec5579fb8268..0654e0c76bc2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self) return 0; } +static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, + unsigned int t) +{ + return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); +} + +void aq_ring_update_queue_state(struct aq_ring_s *ring) +{ + if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) + aq_ring_queue_stop(ring); + else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) + aq_ring_queue_wake(ring); +} + +void aq_ring_queue_wake(struct aq_ring_s *ring) +{ + struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); + + if (__netif_subqueue_stopped(ndev, ring->idx)) { + netif_wake_subqueue(ndev, ring->idx); + ring->stats.tx.queue_restarts++; + } +} + +void aq_ring_queue_stop(struct aq_ring_s *ring) +{ + struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); + + if (!__netif_subqueue_stopped(ndev, ring->idx)) + netif_stop_subqueue(ndev, ring->idx); +} + void aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); @@ -113,27 +145,35 @@ void aq_ring_tx_clean(struct aq_ring_s *self) struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; if (likely(buff->is_mapped)) { - if (unlikely(buff->is_sop)) + if (unlikely(buff->is_sop)) { + if (!buff->is_eop && + buff->eop_index != 0xffffU && + (!aq_ring_dx_in_range(self->sw_head, + buff->eop_index, + self->hw_head))) + break; + dma_unmap_single(dev, buff->pa, buff->len, DMA_TO_DEVICE); - else + } else { dma_unmap_page(dev, buff->pa, buff->len, DMA_TO_DEVICE); + } } if (unlikely(buff->is_eop)) dev_kfree_skb_any(buff->skb); + + buff->pa = 0U; + buff->eop_index = 0xffffU; } } -static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, - unsigned int t) -{ - return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); -} - #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); int err = 0; @@ -239,7 +279,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) skb_record_rx_queue(skb, self->idx); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index eecd6d1c4d73..5844078764bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s { }; union { struct { - u32 len:16; + u16 len; u32 is_ip_cso:1; u32 is_udp_cso:1; u32 is_tcp_cso:1; @@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s { u32 is_cleaned:1; u32 is_error:1; u32 rsvd3:6; + u16 eop_index; + u16 rsvd4; }; - u32 flags; + u64 flags; }; }; @@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s { u64 errors; u64 packets; u64 bytes; + u64 queue_restarts; }; union aq_ring_stats_s { @@ -147,8 +150,14 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, int aq_ring_init(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); +void aq_ring_update_queue_state(struct aq_ring_s *ring); +void aq_ring_queue_wake(struct aq_ring_s *ring); +void aq_ring_queue_stop(struct aq_ring_s *ring); void aq_ring_tx_clean(struct aq_ring_s *self); -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget); int aq_ring_rx_fill(struct aq_ring_s *self); #endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index fee446af748f..305ff8ffac2c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (ring[AQ_VEC_TX_ID].sw_head != ring[AQ_VEC_TX_ID].hw_head) { aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); - - if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > - AQ_CFG_SKB_FRAGS_MAX) { - aq_nic_ndev_queue_start(self->aq_nic, - ring[AQ_VEC_TX_ID].idx); - } + aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); was_tx_cleaned = true; } @@ -76,6 +71,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (ring[AQ_VEC_RX_ID].sw_head != ring[AQ_VEC_RX_ID].hw_head) { err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + napi, &work_done, budget - work_done); if (err < 0) @@ -363,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self, stats_tx->packets += tx->packets; stats_tx->bytes += tx->bytes; stats_tx->errors += tx->errors; + stats_tx->queue_restarts += tx->queue_restarts; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index f3957e930340..fcf89e25a773 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -16,7 +16,7 @@ #include "../aq_common.h" -#define HW_ATL_B0_MTU_JUMBO (16000U) +#define HW_ATL_B0_MTU_JUMBO 16352U #define HW_ATL_B0_MTU 1514U #define HW_ATL_B0_TX_RINGS 4U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 4f5ec9a0fbfb..bf734b32e44b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) break; default: - link_status->mbps = 0U; - break; + return -EBUSY; } } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 68de2f2652f2..3241af1ce718 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -720,6 +720,18 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) return 0; } +static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, rq, cmd); +} + + static const struct net_device_ops arc_emac_netdev_ops = { .ndo_open = arc_emac_open, .ndo_stop = arc_emac_stop, @@ -727,6 +739,7 @@ static const struct net_device_ops arc_emac_netdev_ops = { .ndo_set_mac_address = arc_emac_set_address, .ndo_get_stats = arc_emac_stats, .ndo_set_rx_mode = arc_emac_set_rx_mode, + .ndo_do_ioctl = arc_emac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = arc_emac_poll_controller, #endif diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 96413808c726..67134ece1107 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,10 +61,12 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" + depends on OF && HAS_IOMEM select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY + select MDIO_BCM_UNIMAC help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. @@ -193,6 +195,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI + depends on MAY_USE_DEVLINK select FW_LOADER select LIBCRC32C ---help--- @@ -209,6 +212,15 @@ config BNXT_SRIOV Virtualization support in the NetXtreme-C/E products. This allows for virtual function acceleration in virtual environments. +config BNXT_FLOWER_OFFLOAD + bool "TC Flower offload support for NetXtreme-C/E" + depends on BNXT + default y + ---help--- + This configuration parameter enables TC Flower packet classifier + offload for eswitch. This option enables SR-IOV switchdev eswitch + offload. + config BNXT_DCB bool "Data Center Bridging (DCB) Support" default n diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 61a88b64bd39..4f3845a58126 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2674,7 +2674,7 @@ static int bcm_enetsw_set_ringparam(struct net_device *dev, return 0; } -static struct ethtool_ops bcm_enetsw_ethtool_ops = { +static const struct ethtool_ops bcm_enetsw_ethtool_ops = { .get_strings = bcm_enetsw_get_strings, .get_sset_count = bcm_enetsw_get_sset_count, .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c28fa5a8734c..83eec9a8c275 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -32,13 +32,13 @@ #define BCM_SYSPORT_IO_MACRO(name, offset) \ static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ { \ - u32 reg = __raw_readl(priv->base + offset + off); \ + u32 reg = readl_relaxed(priv->base + offset + off); \ return reg; \ } \ static inline void name##_writel(struct bcm_sysport_priv *priv, \ u32 val, u32 off) \ { \ - __raw_writel(val, priv->base + offset + off); \ + writel_relaxed(val, priv->base + offset + off); \ } \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); @@ -59,14 +59,14 @@ static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) { if (priv->is_lite && off >= RDMA_STATUS) off += 4; - return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); + return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); } static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) { if (priv->is_lite && off >= RDMA_STATUS) off += 4; - __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); + writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); } static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) @@ -110,10 +110,10 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, dma_addr_t addr) { #ifdef CONFIG_PHYS_ADDR_T_64BIT - __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, + writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, d + DESC_ADDR_HI_STATUS_LEN); #endif - __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); + writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); } static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, @@ -201,10 +201,10 @@ static int bcm_sysport_set_features(struct net_device *dev, */ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), + STAT_NETDEV64(rx_packets), + STAT_NETDEV64(tx_packets), + STAT_NETDEV64(rx_bytes), + STAT_NETDEV64(tx_bytes), STAT_NETDEV(rx_errors), STAT_NETDEV(tx_errors), STAT_NETDEV(rx_dropped), @@ -316,6 +316,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) { switch (type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_RXCHK: case BCM_SYSPORT_STAT_RBUF: case BCM_SYSPORT_STAT_SOFT: @@ -398,6 +399,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: @@ -430,15 +432,44 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); } +static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, + u64 *tx_bytes, u64 *tx_packets) +{ + struct bcm_sysport_tx_ring *ring; + u64 bytes = 0, packets = 0; + unsigned int start; + unsigned int q; + + for (q = 0; q < priv->netdev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + bytes = ring->bytes; + packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + *tx_bytes += bytes; + *tx_packets += packets; + } +} + static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + u64 tx_bytes = 0, tx_packets = 0; + unsigned int start; int i, j; - if (netif_running(dev)) + if (netif_running(dev)) { bcm_sysport_update_mib_counters(priv); + bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + } for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; @@ -447,14 +478,23 @@ static void bcm_sysport_get_stats(struct net_device *dev, s = &bcm_sysport_gstrings_stats[i]; if (s->type == BCM_SYSPORT_STAT_NETDEV) p = (char *)&dev->stats; + else if (s->type == BCM_SYSPORT_STAT_NETDEV64) + p = (char *)stats64; else p = (char *)priv; if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) continue; - p += s->stat_offset; - data[j] = *(unsigned long *)p; + + if (s->stat_sizeof == sizeof(u64) && + s->type == BCM_SYSPORT_STAT_NETDEV64) { + do { + start = u64_stats_fetch_begin_irq(syncp); + data[i] = *(u64 *)p; + } while (u64_stats_fetch_retry_irq(syncp, start)); + } else + data[i] = *(u32 *)p; j++; } @@ -666,6 +706,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -769,6 +810,10 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, skb->protocol = eth_type_trans(skb, ndev); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; + u64_stats_update_begin(&priv->syncp); + stats64->rx_packets++; + stats64->rx_bytes += len; + u64_stats_update_end(&priv->syncp); napi_gro_receive(&priv->napi, skb); next: @@ -791,17 +836,15 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, struct device *kdev = &priv->pdev->dev; if (cb->skb) { - ring->bytes += cb->skb->len; *bytes_compl += cb->skb->len; dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); - ring->packets++; (*pkts_compl)++; bcm_sysport_free_cb(cb); /* SKB fragment */ } else if (dma_unmap_addr(cb, dma_addr)) { - ring->bytes += dma_unmap_len(cb, dma_len); + *bytes_compl += dma_unmap_len(cb, dma_len); dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); @@ -812,9 +855,9 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - struct net_device *ndev = priv->netdev; unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + struct net_device *ndev = priv->netdev; struct bcm_sysport_cb *cb; u32 hw_ind; @@ -853,6 +896,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, last_c_index &= (num_tx_cbs - 1); } + u64_stats_update_begin(&priv->syncp); + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + u64_stats_update_end(&priv->syncp); + ring->c_index = c_index; netif_dbg(priv, tx_done, ndev, @@ -1371,6 +1419,19 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + /* Do not use tdma_control_bit() here because TSB_SWAP1 collides + * with the original definition of ACB_ALGO + */ + reg = tdma_readl(priv, TDMA_CONTROL); + if (priv->is_lite) + reg &= ~BIT(TSB_SWAP1); + /* Set a correct TSB format based on host endian */ + if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + reg |= tdma_control_bit(priv, TSB_SWAP0); + else + reg &= ~tdma_control_bit(priv, TSB_SWAP0); + tdma_writel(priv, reg, TDMA_CONTROL); + /* Program the number of descriptors as MAX_THRESHOLD and half of * its size for the hysteresis trigger */ @@ -1677,22 +1738,23 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) +static void bcm_sysport_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcm_sysport_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; - struct bcm_sysport_tx_ring *ring; - unsigned int q; + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + unsigned int start; - for (q = 0; q < dev->num_tx_queues; q++) { - ring = &priv->tx_rings[q]; - tx_bytes += ring->bytes; - tx_packets += ring->packets; - } + netdev_stats_to_stats64(stats, &dev->stats); - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - return &dev->stats; + bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, + &stats->tx_packets); + + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + stats->rx_packets = stats64->rx_packets; + stats->rx_bytes = stats64->rx_bytes; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) @@ -1724,10 +1786,14 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; /* Set a correct RSB format on SYSTEMPORT Lite */ - if (priv->is_lite) { + if (priv->is_lite) reg &= ~RBUF_RSB_SWAP1; + + /* Set a correct RSB format based on host endian */ + if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= RBUF_RSB_SWAP0; - } + else + reg &= ~RBUF_RSB_SWAP0; rbuf_writel(priv, reg, RBUF_CONTROL); } @@ -1956,7 +2022,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_sysport_poll_controller, #endif - .ndo_get_stats = bcm_sysport_get_nstats, + .ndo_get_stats64 = bcm_sysport_get_stats64, }; #define REV_FMT "v%2x.%02x" @@ -2104,6 +2170,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + u64_stats_init(&priv->syncp); + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 77a51c167a69..82e401df199e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -449,7 +449,8 @@ struct bcm_rsb { /* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() */ -#define TSB_SWAP 2 +#define TSB_SWAP0 2 +#define TSB_SWAP1 3 #define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff @@ -603,6 +604,7 @@ struct bcm_sysport_mib { /* HW maintains a large list of counters */ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_NETDEV = -1, + BCM_SYSPORT_STAT_NETDEV64, BCM_SYSPORT_STAT_MIB_RX, BCM_SYSPORT_STAT_MIB_TX, BCM_SYSPORT_STAT_RUNT, @@ -619,6 +621,13 @@ enum bcm_sysport_stat_type { .type = BCM_SYSPORT_STAT_NETDEV, \ } +#define STAT_NETDEV64(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct bcm_sysport_stats64 *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_stats64, m), \ + .type = BCM_SYSPORT_STAT_NETDEV64, \ +} + #define STAT_MIB(str, m, _type) { \ .stat_string = str, \ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ @@ -659,6 +668,14 @@ struct bcm_sysport_stats { u16 reg_offset; }; +struct bcm_sysport_stats64 { + /* 64bit stats on 32bit/64bit Machine */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + /* Software house keeping helper structure */ struct bcm_sysport_cb { struct sk_buff *skb; /* SKB for RX packets */ @@ -743,5 +760,10 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; + + struct bcm_sysport_stats64 stats64; + + /* For atomic update generic 64bit value on 32bit Machine */ + struct u64_stats_sync syncp; }; #endif /* __BCM_SYSPORT_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 67fe3d826566..1216c1f1e052 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4284,15 +4284,17 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - return bnx2x_setup_tc(dev, tc->mqprio->num_tc); + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnx2x_setup_tc(dev, mqprio->num_tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c26688d2f326..a5265e1344f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index a7ca45b251cb..4f0cb8e1ffc0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f20b3d2a4c23..aacec8bc19d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -48,6 +49,8 @@ #include #include #include +#include +#include #include "bnxt_hsi.h" #include "bnxt.h" @@ -56,6 +59,8 @@ #include "bnxt_ethtool.h" #include "bnxt_dcb.h" #include "bnxt_xdp.h" +#include "bnxt_vfr.h" +#include "bnxt_tc.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -101,6 +106,8 @@ enum board_idx { BCM57416_NPAR, BCM57452, BCM57454, + BCM58802, + BCM58808, NETXTREME_E_VF, NETXTREME_C_VF, }; @@ -109,39 +116,42 @@ enum board_idx { static const struct { char *name; } board_info[] = { - { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, - { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, - { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, - { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, - { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, - { "Broadcom NetXtreme-E Ethernet Virtual Function" }, - { "Broadcom NetXtreme-C Ethernet Virtual Function" }, + [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, + [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, + [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, + [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, + [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, + [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, + [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, + [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, + [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, + [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, + [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, + [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, + [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, + [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, + [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, @@ -172,8 +182,9 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, - { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, + { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, @@ -243,6 +254,16 @@ const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_2048_AND_LARGER, }; +static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) + return 0; + + return md_dst->u.port_info.port_id; +} + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -287,7 +308,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_buf->nr_frags = last_frag; vlan_tag_flags = 0; - cfa_action = 0; + cfa_action = bnxt_xmit_get_cfa_action(skb); if (skb_vlan_tag_present(skb)) { vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | skb_vlan_tag_get(skb); @@ -322,7 +343,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_hsize_lflags = 0; tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); end = pdata + length; end = PTR_ALIGN(end, 8) - 1; @@ -427,7 +449,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); for (i = 0; i < last_frag; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1032,7 +1055,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bnxt_sched_reset(bp, rxr); return; } - + /* Store cfa_code in tpa_info to use in tpa_end + * completion processing. + */ + tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); prod_rx_buf->data = tpa_info->data; prod_rx_buf->data_ptr = tpa_info->data_ptr; @@ -1267,6 +1293,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, return skb; } +/* Given the cfa_code of a received packet determine which + * netdev (vf-rep or PF) the packet is destined to. + */ +static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) +{ + struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1360,7 +1397,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } } - skb->protocol = eth_type_trans(skb, bp->dev); + + skb->protocol = + eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); @@ -1387,6 +1426,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return skb; } +static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb) +{ + if (skb->dev != bp->dev) { + /* this packet belongs to a vf-rep */ + bnxt_vf_rep_rx(bp, skb); + return; + } + skb_record_rx_queue(skb, bnapi->index); + napi_gro_receive(&bnapi->napi, skb); +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1403,7 +1454,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; - u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -1445,8 +1496,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; } *event |= BNXT_RX_EVENT; @@ -1535,7 +1585,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } - skb->protocol = eth_type_trans(skb, dev); + cfa_code = RX_CMP_CFA_CODE(rxcmp1); + skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && @@ -1560,8 +1611,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } } - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; next_rx: @@ -1802,6 +1852,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) &event); if (likely(rc >= 0)) rx_pkts += rc; + /* Increment rx_pkts when rc is -ENOMEM to count towards + * the NAPI budget. Otherwise, we may potentially loop + * here forever if we consistently cannot allocate + * buffers. + */ + else if (rc == -ENOMEM) + rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; } else if (unlikely((TX_CMP_TYPE(txcmp) == @@ -4420,9 +4477,33 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) mutex_lock(&bp->hwrm_cmd_lock); rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) + bp->tx_reserved_rings = *tx_rings; return rc; } +static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10801) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(tx_rings); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -ENOMEM; + return 0; +} + static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, u32 buf_tmrs, u16 flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) @@ -4577,6 +4658,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u16 flags; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); @@ -4593,15 +4675,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif - if (BNXT_PF(bp)) { - u16 flags = le16_to_cpu(resp->flags); - - if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | - FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; - if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) - bp->flags |= BNXT_FLAG_MULTI_HOST; + flags = le16_to_cpu(resp->flags); + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) + bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; } + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -4610,6 +4692,13 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->port_partition_type = resp->port_partition_type; break; } + if (bp->hwrm_spec_code < 0x10707 || + resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) + bp->br_mode = BRIDGE_MODE_VEB; + else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) + bp->br_mode = BRIDGE_MODE_VEPA; + else + bp->br_mode = BRIDGE_MODE_UNDEF; func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -4900,6 +4989,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, } } +static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + if (br_mode == BRIDGE_MODE_VEB) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + else if (br_mode == BRIDGE_MODE_VEPA) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + else + return -EINVAL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -5035,6 +5144,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc); goto err_out; } + if (bp->tx_reserved_rings != bp->tx_nr_rings) { + int tx = bp->tx_nr_rings; + + if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || + tx < bp->tx_nr_rings) { + rc = -ENOMEM; + goto err_out; + } + } } rc = bnxt_hwrm_ring_alloc(bp); @@ -5441,8 +5559,15 @@ static void bnxt_free_irq(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { irq = &bp->irq_tbl[i]; - if (irq->requested) + if (irq->requested) { + if (irq->have_cpumask) { + irq_set_affinity_hint(irq->vector, NULL); + free_cpumask_var(irq->cpu_mask); + irq->have_cpumask = 0; + } free_irq(irq->vector, bp->bnapi[i]); + } + irq->requested = 0; } } @@ -5475,6 +5600,21 @@ static int bnxt_request_irq(struct bnxt *bp) break; irq->requested = 1; + + if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { + int numa_node = dev_to_node(&bp->pdev->dev); + + irq->have_cpumask = 1; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + irq->cpu_mask); + rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); + if (rc) { + netdev_warn(bp->dev, + "Set affinity failed, IRQ = %d\n", + irq->vector); + break; + } + } } return rc; } @@ -5548,12 +5688,10 @@ void bnxt_tx_disable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = BNXT_DEV_STATE_CLOSING; } } @@ -5566,11 +5704,9 @@ void bnxt_tx_enable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } netif_tx_wake_all_queues(bp->dev); @@ -5635,7 +5771,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; - if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); @@ -5650,6 +5786,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); + bp->port_count = resp->port_cnt; + hwrm_phy_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -5675,13 +5813,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); link_info->phy_link_status = resp->link; - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; + if (bp->hwrm_spec_code >= 0x10800) + link_info->duplex = resp->duplex_state; link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex; + link_info->duplex_setting = resp->duplex_cfg; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else @@ -6203,6 +6343,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Poll link status and check for SFP+ module status */ bnxt_get_port_module_status(bp); + /* VF-reps may need to be re-opened after the PF is re-opened */ + if (BNXT_PF(bp)) + bnxt_vf_reps_open(bp); return 0; open_err: @@ -6291,6 +6434,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); } + + /* Close the VF-reps before closing PF */ + if (BNXT_PF(bp)) + bnxt_vf_reps_close(bp); #endif /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); @@ -6802,7 +6949,8 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && + bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); schedule_work(&bp->sp_task); } @@ -6912,8 +7060,8 @@ static void bnxt_sp_task(struct work_struct *work) } /* Under rtnl_lock */ -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, - int tx_xdp) +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp) { int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; @@ -6933,10 +7081,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; - if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || - tx_rings_needed < (tx * tx_sets + tx_xdp)) - return -ENOMEM; - return 0; + return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -7125,8 +7270,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - sh, tc, bp->tx_nr_rings_xdp); + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + sh, tc, bp->tx_nr_rings_xdp); if (rc) return rc; @@ -7152,15 +7297,33 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +static int bnxt_setup_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct bnxt *bp = netdev_priv(dev); - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (BNXT_VF(bp)) + return -EOPNOTSUPP; - return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower); +} + +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_setup_flower(dev, type_data); + case TC_SETUP_MQPRIO: { + struct tc_mqprio_qopt *mqprio = type_data; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnxt_setup_mq_tc(dev, mqprio->num_tc); + } + default: + return -EOPNOTSUPP; + } } #ifdef CONFIG_RFS_ACCEL @@ -7412,6 +7575,102 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, schedule_work(&bp->sp_task); } +static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct bnxt *bp = netdev_priv(dev); + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, + nlflags, filter_mask, NULL); +} + +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem, rc = 0; + + if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode == bp->br_mode) + break; + + rc = bnxt_hwrm_set_br_mode(bp, mode); + if (!rc) + bp->br_mode = mode; + break; + } + return rc; +} + +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + rc = snprintf(buf, len, "p%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + /* In SRIOV each PF-pool (PF + child VFs) serves as a + * switching domain, the PF's perm mac-addr can be used + * as the unique parent-id + */ + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); +} + +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -7443,6 +7702,9 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, .ndo_xdp = bnxt_xdp, + .ndo_bridge_getlink = bnxt_bridge_getlink, + .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_phys_port_name = bnxt_get_phys_port_name }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -7450,11 +7712,14 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bnxt_sriov_disable(bp); + bnxt_dl_unregister(bp); + } pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); + bnxt_shutdown_tc(bp); cancel_work_sync(&bp->sp_task); bp->sp_event = 0; @@ -7623,6 +7888,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); + /* Reduce default rings to reduce memory usage on multi-port cards */ + if (bp->port_count > 1) + dflt_rings = min_t(int, dflt_rings, 4); rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -7722,6 +7990,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -7776,6 +8045,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); + mutex_init(&bp->sriov_lock); #endif bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4_PLUS(bp)) @@ -7820,6 +8090,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_ethtool_init(bp); bnxt_dcb_init(bp); + rc = bnxt_probe_phy(bp); + if (rc) + goto init_err_pci_clean; + bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); @@ -7854,10 +8128,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; - rc = bnxt_probe_phy(bp); - if (rc) - goto init_err_pci_clean; - rc = bnxt_init_int_mode(bp); if (rc) goto init_err_pci_clean; @@ -7868,9 +8138,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else device_set_wakeup_capable(&pdev->dev, false); + if (BNXT_PF(bp)) + bnxt_init_tc(bp); + rc = register_netdev(dev); if (rc) - goto init_err_clr_int; + goto init_err_cleanup_tc; + + if (BNXT_PF(bp)) + bnxt_dl_register(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, @@ -7880,7 +8156,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -init_err_clr_int: +init_err_cleanup_tc: + bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f34691f85602..7b888d4b2b55 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,13 +12,17 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.7.0" +#define DRV_MODULE_VERSION "1.8.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 7 +#define DRV_VER_MIN 8 #define DRV_VER_UPD 0 #include +#include +#include +#include +#include struct tx_bd { __le32 tx_bd_len_flags_type; @@ -242,6 +246,10 @@ struct rx_cmp_ext { ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) +#define RX_CMP_CFA_CODE(rxcmpl1) \ + ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) + struct rx_agg_cmp { __le32 rx_agg_cmp_len_flags_type; #define RX_AGG_CMP_TYPE (0x3f << 0) @@ -311,6 +319,10 @@ struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_hdr_info; }; +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -618,6 +630,8 @@ struct bnxt_tpa_info { #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) + + u16 cfa_code; /* cfa_code in TPA start compl */ }; struct bnxt_rx_ring_info { @@ -688,8 +702,10 @@ struct bnxt_napi { struct bnxt_irq { irq_handler_t handler; unsigned int vector; - u8 requested; + u8 requested:1; + u8 have_cpumask:1; char name[IFNAMSIZ + 2]; + cpumask_var_t cpu_mask; }; #define HWRM_RING_ALLOC_TX 0x1 @@ -825,8 +841,8 @@ struct bnxt_link_info { u8 loop_back; u8 link_up; u8 duplex; -#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF -#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL u8 pause; #define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX @@ -928,6 +944,45 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 +struct bnxt_tc_info { + bool enabled; + + /* hash table to store TC offloaded flows */ + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + + /* hash table to store L2 keys of TC flows */ + struct rhashtable l2_table; + struct rhashtable_params l2_ht_params; + + /* lock to atomically add/del an l2 node when a flow is + * added or deleted. + */ + struct mutex lock; + + /* Stat counter mask (width) */ + u64 bytes_mask; + u64 packets_mask; +}; + +struct bnxt_vf_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct bnxt_vf_rep { + struct bnxt *bp; + struct net_device *dev; + struct metadata_dst *dst; + u16 vf_idx; + u16 tx_cfa_action; + u16 rx_cfa_code; + + struct bnxt_vf_rep_stats rx_stats; + struct bnxt_vf_rep_stats tx_stats; +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -957,6 +1012,9 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 +#define CHIP_NUM_58802 0xd802 +#define CHIP_NUM_58808 0xd808 + #define BNXT_CHIP_NUM_5730X(chip_num) \ ((chip_num) >= CHIP_NUM_57301 && \ (chip_num) <= CHIP_NUM_57304) @@ -988,6 +1046,10 @@ struct bnxt { #define BNXT_CHIP_NUM_57X1X(chip_num) \ (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) +#define BNXT_CHIP_NUM_588XX(chip_num) \ + ((chip_num) == CHIP_NUM_58802 || \ + (chip_num) == CHIP_NUM_58808) + struct net_device *dev; struct pci_dev *pdev; @@ -1027,6 +1089,7 @@ struct bnxt { #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_SHORT_CMD 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 + #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1045,6 +1108,7 @@ struct bnxt { #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \ + BNXT_CHIP_NUM_588XX((bp)->chip_num) || \ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) @@ -1086,6 +1150,7 @@ struct bnxt { int tx_nr_rings; int tx_nr_rings_per_tc; int tx_nr_rings_xdp; + int tx_reserved_rings; int tx_wake_thresh; int tx_push_thresh; @@ -1164,6 +1229,8 @@ struct bnxt { u8 nge_port_cnt; __le16 nge_fw_dst_port_id; u8 port_partition_type; + u8 port_count; + u16 br_mode; u16 rx_coal_ticks; u16 rx_coal_ticks_irq; @@ -1206,6 +1273,12 @@ struct bnxt { wait_queue_head_t sriov_cfg_wait; bool sriov_cfg; #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) + + /* lock to protect VF-rep creation/cleanup via + * multiple paths such as ->sriov_configure() and + * devlink ->eswitch_mode_set() + */ + struct mutex sriov_lock; #endif #define BNXT_NTP_FLTR_MAX_FLTR 4096 @@ -1232,6 +1305,13 @@ struct bnxt { struct bnxt_led_info leds[BNXT_MAX_LED]; struct bpf_prog *xdp_prog; + + /* devlink interface and vf-rep structs */ + struct devlink *dl; + enum devlink_eswitch_mode eswitch_mode; + struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ + u16 *cfa_code_map; /* cfa_code -> vf_idx map */ + struct bnxt_tc_info tc_info; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1301,9 +1381,10 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, - int tx_xdp); +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 5c6dd0ce209f..aa1f3a2c7a78 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -93,6 +93,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS; cos2bw.bw_weight = ets->tc_tx_bw[i]; + /* older firmware requires min_bw to be set to the + * same weight value in percent. + */ + cos2bw.min_bw = + cpu_to_le32((ets->tc_tx_bw[i] * 100) | + BW_VALUE_UNIT_PERCENT1_100); } memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (i == 0) { @@ -549,13 +555,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct bnxt *bp = netdev_priv(dev); - /* only support IEEE */ - if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) + /* All firmware DCBX settings are set in NVRAM */ + if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return 1; if (mode & DCB_CAP_DCBX_HOST) { if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) return 1; + + /* only support IEEE */ + if ((mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE)) + return 1; } if (mode == bp->dcbx_cap) @@ -584,7 +595,7 @@ void bnxt_dcb_init(struct bnxt *bp) bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; - else + else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; bp->dev->dcbnl_ops = &dcbnl_ops; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index ecd0a5e46a49..d2e0af960bf5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index be6acadcb202..8eff05a3e0e4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -86,9 +86,11 @@ static int bnxt_set_coalesce(struct net_device *dev, if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; - stats_ticks = clamp_t(u32, stats_ticks, - BNXT_MIN_STATS_COAL_TICKS, - BNXT_MAX_STATS_COAL_TICKS); + /* Allow 0, which means disable. */ + if (stats_ticks) + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); bp->stats_coal_ticks = stats_ticks; update_stats = true; @@ -198,19 +200,23 @@ static const struct { #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +static int bnxt_get_num_stats(struct bnxt *bp) +{ + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; +} + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; - - if (bp->flags & BNXT_FLAG_PORT_STATS) - num_stats += BNXT_NUM_PORT_STATS; - - return num_stats; - } + case ETH_SS_STATS: + return bnxt_get_num_stats(bp); case ETH_SS_TEST: if (!bp->num_tests) return -EOPNOTSUPP; @@ -225,11 +231,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; - memset(buf, 0, buf_size); - if (!bp->bnapi) return; @@ -432,8 +435,7 @@ static int bnxt_set_channels(struct net_device *dev, } tx_xdp = req_rx_rings; } - rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, - tx_xdp); + rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); if (rc) { netdev_warn(dev, "Unable to allocate the requested rings\n"); return rc; @@ -520,7 +522,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) struct flow_keys *fkeys; int i, rc = -EINVAL; - if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) return rc; for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { @@ -835,7 +837,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->n_stats = bnxt_get_num_stats(bp); info->testinfo_len = bp->num_tests; /* TODO CHIMP_FW: eeprom dump details */ info->eedump_len = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7dc71bb95837..cb04cc76e8ad 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,14 +11,14 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.7.6 */ +/* HSI and HWRM Specification 1.8.1 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 6 +#define HWRM_VERSION_MINOR 8 +#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */ +#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */ -#define HWRM_VERSION_STR "1.7.6.2" +#define HWRM_VERSION_STR "1.8.1.4" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -813,7 +813,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL - #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -835,9 +835,8 @@ struct hwrm_func_qcfg_output { u8 port_pf_cnt; #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 host_cnt; - #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL u8 unused_0; + u8 unused_1; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_1; + u8 unused_2; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vlan_cfg */ +/* Input (48 bytes) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0; + u8 unused_1; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; u8 unused_2; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_3; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + __le32 unused_4; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; u8 valid; }; @@ -902,6 +945,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1456,9 +1501,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL - u8 duplex; - #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL u8 pause; #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL @@ -1573,6 +1618,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -1651,14 +1699,16 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL u8 unused_1; - u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_3; + __le32 unused_2; + u8 unused_3; u8 unused_4; u8 unused_5; - u8 unused_6; u8 valid; }; @@ -1744,6 +1794,51 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; +/* hwrm_port_mac_ptp_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0; + __le16 unused_1; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1874,11 +1969,16 @@ struct hwrm_port_phy_qcaps_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 eee_supported; - #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 - u8 unused_0; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL __le16 supported_speeds_force_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL @@ -3152,6 +3252,95 @@ struct hwrm_queue_cos2bw_cfg_output { u8 valid; }; +/* hwrm_queue_dscp_qcaps */ +/* Input (24 bytes) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg */ +/* Input (32 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_vnic_alloc */ /* Input (24 bytes) */ struct hwrm_vnic_alloc_input { @@ -4038,7 +4227,7 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL u8 unused_0; __le16 unused_1; - __le32 encap_data[16]; + __le32 encap_data[20]; }; /* Output (16 bytes) */ @@ -4120,8 +4309,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL u8 ip_protocol; #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; @@ -4224,6 +4413,216 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_flow_alloc */ +/* Input (128 bytes) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_cfa_flow_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + __le16 unused_0[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_flow_stats */ +/* Input (40 bytes) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + __le16 unused_0; +}; + +/* Output (176 bytes) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + __le32 unused_0; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_vfr_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_tunnel_dst_port_query */ /* Input (24 bytes) */ struct hwrm_tunnel_dst_port_query_input { @@ -4448,12 +4847,13 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - __le16 unused_0[3]; + u8 host_idx; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4487,7 +4887,7 @@ struct hwrm_fw_qstatus_input { #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 unused_0[7]; }; @@ -4572,6 +4972,16 @@ struct hwrm_fw_set_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_fw_get_structured_data */ /* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { @@ -4611,6 +5021,14 @@ struct hwrm_fw_get_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { @@ -5280,11 +5698,15 @@ struct hwrm_selftest_qlist_output { #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL u8 offline_tests; #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL u8 unused_0; __le16 test_timeout; u8 unused_1; @@ -5312,6 +5734,8 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL u8 unused_0[7]; }; @@ -5326,11 +5750,15 @@ struct hwrm_selftest_exec_output { #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL u8 test_success; #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL __le16 unused_0[3]; }; @@ -5411,7 +5839,7 @@ struct cmd_nums { #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) - #define RESERVED7 (0x29UL) + #define HWRM_PORT_MAC_PTP_QCFG (0x29UL) #define HWRM_PORT_PHY_QCAPS (0x2aUL) #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) #define HWRM_PORT_PHY_I2C_READ (0x2cUL) @@ -5421,14 +5849,17 @@ struct cmd_nums { #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) - #define RESERVED2 (0x33UL) - #define RESERVED3 (0x34UL) + #define HWRM_FUNC_VLAN_CFG (0x33UL) + #define HWRM_FUNC_VLAN_QCFG (0x34UL) #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL) + #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL) + #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL) #define HWRM_VNIC_ALLOC (0x40UL) #define HWRM_VNIC_FREE (0x41UL) #define HWRM_VNIC_CFG (0x42UL) @@ -5455,7 +5886,7 @@ struct cmd_nums { #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define RESERVED4 (0x94UL) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -5494,6 +5925,8 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VFR_ALLOC (0xfdUL) + #define HWRM_CFA_VFR_FREE (0xfeUL) #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) #define HWRM_CFA_VF_PAIR_FREE (0x101UL) #define HWRM_CFA_VF_PAIR_INFO (0x102UL) @@ -5502,14 +5935,20 @@ struct cmd_nums { #define HWRM_CFA_FLOW_FLUSH (0x105UL) #define HWRM_CFA_FLOW_STATS (0x106UL) #define HWRM_CFA_FLOW_INFO (0x107UL) + #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) + #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) #define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_IRQ (0x202UL) + #define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_DBG_ERASE_NVM (0xff15UL) + #define HWRM_DBG_CFG (0xff16UL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) @@ -5720,6 +6159,7 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL __le16 len; u8 version; u8 count; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index b8e7248294d9..d37925a8a65b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -18,6 +18,7 @@ #include "bnxt.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" +#include "bnxt_vfr.h" #include "bnxt_ethtool.h" #ifdef CONFIG_BNXT_SRIOV @@ -587,6 +588,10 @@ void bnxt_sriov_disable(struct bnxt *bp) if (!num_vfs) return; + /* synchronize VF and VF-rep create and destroy */ + mutex_lock(&bp->sriov_lock); + bnxt_vf_reps_destroy(bp); + if (pci_vfs_assigned(bp->pdev)) { bnxt_hwrm_fwd_async_event_cmpl( bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); @@ -597,6 +602,7 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } + mutex_unlock(&bp->sriov_lock); bnxt_free_vf_resources(bp); @@ -794,8 +800,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_LINK; phy_qcfg_resp.link_speed = cpu_to_le16( PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); - phy_qcfg_resp.duplex = - PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.duplex_cfg = + PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; phy_qcfg_resp.pause = (PORT_PHY_QCFG_RESP_PAUSE_TX | PORT_PHY_QCFG_RESP_PAUSE_RX); @@ -804,7 +812,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) /* force link down */ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; phy_qcfg_resp.link_speed = 0; - phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; phy_qcfg_resp.pause = 0; } rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c new file mode 100644 index 000000000000..7dd3d131043a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -0,0 +1,838 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_tc.h" +#include "bnxt_vfr.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +#define BNXT_FID_INVALID 0xffff +#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) + +/* Return the dst fid of the func for flow forwarding + * For PFs: src_fid is the fid of the PF + * For VF-reps: src_fid the fid of the VF + */ +static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) +{ + struct bnxt *bp; + + /* check if dev belongs to the same switch */ + if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", + dev->ifindex); + return BNXT_FID_INVALID; + } + + /* Is dev a VF-rep? */ + if (dev != pf_bp->dev) + return bnxt_vf_rep_get_fid(dev); + + bp = netdev_priv(dev); + return bp->pf.fw_fid; +} + +static int bnxt_tc_parse_redir(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + u16 dst_fid; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); + if (!dev) { + netdev_info(bp->dev, "no dev for ifindex=%d", ifindex); + return -EINVAL; + } + + /* find the FID from dev */ + dst_fid = bnxt_flow_get_dst_fid(bp, dev); + if (dst_fid == BNXT_FID_INVALID) { + netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex); + return -EINVAL; + } + + actions->flags |= BNXT_TC_ACTION_FLAG_FWD; + actions->dst_fid = dst_fid; + actions->dst_dev = dev; + return 0; +} + +static void bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; + } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; + actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); + actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + } +} + +static int bnxt_tc_parse_actions(struct bnxt *bp, + struct bnxt_tc_actions *actions, + struct tcf_exts *tc_exts) +{ + const struct tc_action *tc_act; + LIST_HEAD(tc_actions); + int rc; + + if (!tcf_exts_has_actions(tc_exts)) { + netdev_info(bp->dev, "no actions"); + return -EINVAL; + } + + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { + /* Drop action */ + if (is_tcf_gact_shot(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_DROP; + return 0; /* don't bother with other actions */ + } + + /* Redirect action */ + if (is_tcf_mirred_egress_redirect(tc_act)) { + rc = bnxt_tc_parse_redir(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Push/pop VLAN */ + if (is_tcf_vlan(tc_act)) { + bnxt_tc_parse_vlan(bp, actions, tc_act); + continue; + } + } + + return 0; +} + +#define GET_KEY(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->key) +#define GET_MASK(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->mask) + +static int bnxt_tc_parse_flow(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd, + struct bnxt_tc_flow *flow) +{ + struct flow_dissector *dissector = tc_flow_cmd->dissector; + u16 addr_type = 0; + + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + struct flow_dissector_key_basic *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + + flow->l2_key.ether_type = key->n_proto; + flow->l2_mask.ether_type = mask->n_proto; + + if (key->n_proto == htons(ETH_P_IP) || + key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = key->ip_proto; + flow->l4_mask.ip_proto = mask->ip_proto; + } + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + struct flow_dissector_key_eth_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; + ether_addr_copy(flow->l2_key.dmac, key->dst); + ether_addr_copy(flow->l2_mask.dmac, mask->dst); + ether_addr_copy(flow->l2_key.smac, key->src); + ether_addr_copy(flow->l2_mask.smac, mask->src); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + struct flow_dissector_key_vlan *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + + flow->l2_key.inner_vlan_tci = + cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); + flow->l2_mask.inner_vlan_tci = + cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); + flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); + flow->l2_mask.inner_vlan_tpid = htons(0xffff); + flow->l2_key.num_vlans = 1; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; + flow->l3_key.ipv4.daddr.s_addr = key->dst; + flow->l3_mask.ipv4.daddr.s_addr = mask->dst; + flow->l3_key.ipv4.saddr.s_addr = key->src; + flow->l3_mask.ipv4.saddr.s_addr = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_dissector_key_ipv6_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + struct flow_dissector_key_ipv6_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; + flow->l3_key.ipv6.daddr = key->dst; + flow->l3_mask.ipv6.daddr = mask->dst; + flow->l3_key.ipv6.saddr = key->src; + flow->l3_mask.ipv6.saddr = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; + flow->l4_key.ports.dport = key->dst; + flow->l4_mask.ports.dport = mask->dst; + flow->l4_key.ports.sport = key->src; + flow->l4_mask.ports.sport = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_dissector_key_icmp *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + struct flow_dissector_key_icmp *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; + flow->l4_key.icmp.type = key->type; + flow->l4_key.icmp.code = key->code; + flow->l4_mask.icmp.type = mask->type; + flow->l4_mask.icmp.code = mask->code; + } + + return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); +} + +static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) +{ + struct hwrm_cfa_flow_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); + req.flow_handle = flow_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", + __func__, flow_handle, rc); + return rc; +} + +static int ipv6_mask_len(struct in6_addr *mask) +{ + int mask_len = 0, i; + + for (i = 0; i < 4; i++) + mask_len += inet_mask_len(mask->s6_addr32[i]); + + return mask_len; +} + +static bool is_wildcard(void *mask, int len) +{ + const u8 *p = mask; + int i; + + for (i = 0; i < len; i++) { + if (p[i] != 0) + return false; + } + return true; +} + +static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, + __le16 ref_flow_handle, __le16 *flow_handle) +{ + struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_tc_actions *actions = &flow->actions; + struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; + struct bnxt_tc_l3_key *l3_key = &flow->l3_key; + struct hwrm_cfa_flow_alloc_input req = { 0 }; + u16 flow_flags = 0, action_flags = 0; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + + req.src_fid = cpu_to_le16(flow->src_fid); + req.ref_flow_handle = ref_flow_handle; + req.ethertype = flow->l2_key.ether_type; + req.ip_proto = flow->l4_key.ip_proto; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { + memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + } + + if (flow->l2_key.num_vlans > 0) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; + /* FW expects the inner_vlan_tci value to be set + * in outer_vlan_tci when num_vlans is 1 (which is + * always the case in TC.) + */ + req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + } + + /* If all IP and L4 fields are wildcarded then this is an L2 flow */ + if (is_wildcard(&l3_mask, sizeof(l3_mask)) && + is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; + } else { + flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 : + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { + req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req.ip_dst_mask_len = + inet_mask_len(l3_mask->ipv4.daddr.s_addr); + req.ip_src[0] = l3_key->ipv4.saddr.s_addr; + req.ip_src_mask_len = + inet_mask_len(l3_mask->ipv4.saddr.s_addr); + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { + memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req.ip_dst)); + req.ip_dst_mask_len = + ipv6_mask_len(&l3_mask->ipv6.daddr); + memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req.ip_src)); + req.ip_src_mask_len = + ipv6_mask_len(&l3_mask->ipv6.saddr); + } + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { + req.l4_src_port = flow->l4_key.ports.sport; + req.l4_src_port_mask = flow->l4_mask.ports.sport; + req.l4_dst_port = flow->l4_key.ports.dport; + req.l4_dst_port_mask = flow->l4_mask.ports.dport; + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { + /* l4 ports serve as type/code when ip_proto is ICMP */ + req.l4_src_port = htons(flow->l4_key.icmp.type); + req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req.l4_dst_port = htons(flow->l4_key.icmp.code); + req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + } + req.flags = cpu_to_le16(flow_flags); + + if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; + } else { + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; + req.dst_fid = cpu_to_le16(actions->dst_fid); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req.l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); + memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + /* Rewrite config with tpid = 0 implies vlan pop */ + req.l2_rewrite_vlan_tpid = 0; + memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); + memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + } + } + req.action_flags = cpu_to_le16(action_flags); + + mutex_lock(&bp->hwrm_cmd_lock); + + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *flow_handle = resp->flow_handle; + + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +/* Add val to accum while handling a possible wraparound + * of val. Eventhough val is of type u64, its actual width + * is denoted by mask and will wrap-around beyond that width. + */ +static void accumulate_val(u64 *accum, u64 val, u64 mask) +{ +#define low_bits(x, mask) ((x) & (mask)) +#define high_bits(x, mask) ((x) & ~(mask)) + bool wrapped = val < low_bits(*accum, mask); + + *accum = high_bits(*accum, mask) + val; + if (wrapped) + *accum += (mask + 1); +} + +/* The HW counters' width is much less than 64bits. + * Handle possible wrap-around while updating the stat counters + */ +static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info, + struct bnxt_tc_flow_stats *stats, + struct bnxt_tc_flow_stats *hw_stats) +{ + accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask); + accumulate_val(&stats->packets, hw_stats->packets, + tc_info->packets_mask); +} + +/* Fix possible wraparound of the stats queried from HW, calculate + * the delta from prev_stats, and also update the prev_stats. + * The HW flow stats are fetched under the hwrm_cmd_lock mutex. + * This routine is best called while under the mutex so that the + * stats processing happens atomically. + */ +static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_stats *stats) +{ + struct bnxt_tc_flow_stats *acc_stats, *prev_stats; + + acc_stats = &flow->stats; + bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats); + + prev_stats = &flow->prev_stats; + stats->bytes = acc_stats->bytes - prev_stats->bytes; + stats->packets = acc_stats->packets - prev_stats->packets; + *prev_stats = *acc_stats; +} + +static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, + __le16 flow_handle, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_stats *stats) +{ + struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_flow_stats_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); + req.num_flows = cpu_to_le16(1); + req.flow_handle_0 = flow_handle; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + stats->packets = le64_to_cpu(resp->packet_0); + stats->bytes = le64_to_cpu(resp->byte_0); + bnxt_flow_stats_calc(&bp->tc_info, flow, stats); + } else { + netdev_info(bp->dev, "error rc=%d", rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_tc_put_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + /* remove flow_node from the L2 shared flow list */ + list_del(&flow_node->l2_list_node); + if (--l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node, + tc_info->l2_ht_params); + if (rc) + netdev_err(bp->dev, + "Error: %s: rhashtable_remove_fast: %d", + __func__, rc); + kfree_rcu(l2_node, rcu); + } + return 0; +} + +static struct bnxt_tc_l2_node * +bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, + struct rhashtable_params ht_params, + struct bnxt_tc_l2_key *l2_key) +{ + struct bnxt_tc_l2_node *l2_node; + int rc; + + l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params); + if (!l2_node) { + l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL); + if (!l2_node) { + rc = -ENOMEM; + return NULL; + } + + l2_node->key = *l2_key; + rc = rhashtable_insert_fast(l2_table, &l2_node->node, + ht_params); + if (rc) { + kfree(l2_node); + netdev_err(bp->dev, + "Error: %s: rhashtable_insert_fast: %d", + __func__, rc); + return NULL; + } + INIT_LIST_HEAD(&l2_node->common_l2_flows); + } + return l2_node; +} + +/* Get the ref_flow_handle for a flow by checking if there are any other + * flows that share the same L2 key as this flow. + */ +static int +bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le16 *ref_flow_handle) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *l2_node; + + l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table, + tc_info->l2_ht_params, + &flow->l2_key); + if (!l2_node) + return -1; + + /* If any other flow is using this l2_node, use it's flow_handle + * as the ref_flow_handle + */ + if (l2_node->refcount > 0) { + ref_flow_node = list_first_entry(&l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + l2_list_node); + *ref_flow_handle = ref_flow_node->flow_handle; + } else { + *ref_flow_handle = cpu_to_le16(0xffff); + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching l2 key can use the flow_handle of this flow + * as their ref_flow_handle + */ + flow_node->l2_node = l2_node; + list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows); + l2_node->refcount++; + return 0; +} + +/* After the flow parsing is done, this routine is used for checking + * if there are any aspects of the flow that prevent it from being + * offloaded. + */ +static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) +{ + /* If L4 ports are specified then ip_proto must be TCP or UDP */ + if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && + (flow->l4_key.ip_proto != IPPROTO_TCP && + flow->l4_key.ip_proto != IPPROTO_UDP)) { + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", + flow->l4_key.ip_proto); + return false; + } + + return true; +} + +static int __bnxt_tc_del_flow(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + /* send HWRM cmd to free the flow-id */ + bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); + + mutex_lock(&tc_info->lock); + + /* release reference to l2 node */ + bnxt_tc_put_l2_node(bp, flow_node); + + mutex_unlock(&tc_info->lock); + + rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, + tc_info->flow_ht_params); + if (rc) + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", + __func__, rc); + + kfree_rcu(flow_node, rcu); + return 0; +} + +/* Add a new flow or replace an existing flow. + * Notes on locking: + * There are essentially two critical sections here. + * 1. while adding a new flow + * a) lookup l2-key + * b) issue HWRM cmd and get flow_handle + * c) link l2-key with flow + * 2. while deleting a flow + * a) unlinking l2-key from flow + * A lock is needed to protect these two critical sections. + * + * The hash-tables are already protected by the rhashtable API. + */ +static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_flow_node *new_node, *old_node; + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_flow *flow; + __le16 ref_flow_handle; + int rc; + + /* allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) { + rc = -ENOMEM; + goto done; + } + new_node->cookie = tc_flow_cmd->cookie; + flow = &new_node->flow; + + rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); + if (rc) + goto free_node; + flow->src_fid = src_fid; + + if (!bnxt_tc_can_offload(bp, flow)) { + rc = -ENOSPC; + goto free_node; + } + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (old_node) + __bnxt_tc_del_flow(bp, old_node); + + /* Check if the L2 part of the flow has been offloaded already. + * If so, bump up it's refcnt and get it's reference handle. + */ + mutex_lock(&tc_info->lock); + rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); + if (rc) + goto unlock; + + /* send HWRM cmd to alloc the flow */ + rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, + &new_node->flow_handle); + if (rc) + goto put_l2; + + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, + tc_info->flow_ht_params); + if (rc) + goto hwrm_flow_free; + + mutex_unlock(&tc_info->lock); + return 0; + +hwrm_flow_free: + bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); +put_l2: + bnxt_tc_put_l2_node(bp, new_node); +unlock: + mutex_unlock(&tc_info->lock); +free_node: + kfree(new_node); +done: + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", + __func__, tc_flow_cmd->cookie, rc); + return rc; +} + +static int bnxt_tc_del_flow(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_flow_node *flow_node; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", + tc_flow_cmd->cookie); + return -EINVAL; + } + + return __bnxt_tc_del_flow(bp, flow_node); +} + +static int bnxt_tc_get_flow_stats(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_flow_node *flow_node; + struct bnxt_tc_flow_stats stats; + int rc; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(bp->dev, "Error: no flow_node for cookie %lx", + tc_flow_cmd->cookie); + return -1; + } + + rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle, + &flow_node->flow, &stats); + if (rc) + return rc; + + tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0); + return 0; +} + +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower) +{ + int rc = 0; + + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); + break; + + case TC_CLSFLOWER_DESTROY: + rc = bnxt_tc_del_flow(bp, cls_flower); + break; + + case TC_CLSFLOWER_STATS: + rc = bnxt_tc_get_flow_stats(bp, cls_flower); + break; + } + return rc; +} + +static const struct rhashtable_params bnxt_tc_flow_ht_params = { + .head_offset = offsetof(struct bnxt_tc_flow_node, node), + .key_offset = offsetof(struct bnxt_tc_flow_node, cookie), + .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +/* convert counter width in bits to a mask */ +#define mask(width) ((u64)~0 >> (64 - (width))) + +int bnxt_init_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + if (bp->hwrm_spec_code < 0x10800) { + netdev_warn(bp->dev, + "Firmware does not support TC flower offload.\n"); + return -ENOTSUPP; + } + mutex_init(&tc_info->lock); + + /* Counter widths are programmed by FW */ + tc_info->bytes_mask = mask(36); + tc_info->packets_mask = mask(28); + + tc_info->flow_ht_params = bnxt_tc_flow_ht_params; + rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); + if (rc) + return rc; + + tc_info->l2_ht_params = bnxt_tc_l2_ht_params; + rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); + if (rc) + goto destroy_flow_table; + + tc_info->enabled = true; + bp->dev->hw_features |= NETIF_F_HW_TC; + bp->dev->features |= NETIF_F_HW_TC; + return 0; + +destroy_flow_table: + rhashtable_destroy(&tc_info->flow_table); + return rc; +} + +void bnxt_shutdown_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + + if (!tc_info->enabled) + return; + + rhashtable_destroy(&tc_info->flow_table); + rhashtable_destroy(&tc_info->l2_table); +} + +#else +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h new file mode 100644 index 000000000000..6c4c1ed279ef --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -0,0 +1,158 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_TC_H +#define BNXT_TC_H + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +/* Structs used for storing the filter/actions of the TC cmd. + */ +struct bnxt_tc_l2_key { + u8 dmac[ETH_ALEN]; + u8 smac[ETH_ALEN]; + __be16 inner_vlan_tpid; + __be16 inner_vlan_tci; + __be16 ether_type; + u8 num_vlans; +}; + +struct bnxt_tc_l3_key { + union { + struct { + struct in_addr daddr; + struct in_addr saddr; + } ipv4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } ipv6; + }; +}; + +struct bnxt_tc_l4_key { + u8 ip_proto; + union { + struct { + __be16 sport; + __be16 dport; + } ports; + struct { + u8 type; + u8 code; + } icmp; + }; +}; + +struct bnxt_tc_actions { + u32 flags; +#define BNXT_TC_ACTION_FLAG_FWD BIT(0) +#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1) +#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3) +#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4) +#define BNXT_TC_ACTION_FLAG_DROP BIT(5) + + u16 dst_fid; + struct net_device *dst_dev; + __be16 push_vlan_tpid; + __be16 push_vlan_tci; +}; + +struct bnxt_tc_flow_stats { + u64 packets; + u64 bytes; +}; + +struct bnxt_tc_flow { + u32 flags; +#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1) +#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2) +#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3) +#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4) +#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5) + + /* flow applicable to pkts ingressing on this fid */ + u16 src_fid; + struct bnxt_tc_l2_key l2_key; + struct bnxt_tc_l2_key l2_mask; + struct bnxt_tc_l3_key l3_key; + struct bnxt_tc_l3_key l3_mask; + struct bnxt_tc_l4_key l4_key; + struct bnxt_tc_l4_key l4_mask; + + struct bnxt_tc_actions actions; + + /* updated stats accounting for hw-counter wrap-around */ + struct bnxt_tc_flow_stats stats; + /* previous snap-shot of stats */ + struct bnxt_tc_flow_stats prev_stats; + unsigned long lastused; /* jiffies */ +}; + +/* L2 hash table + * This data-struct is used for L2-flow table. + * The L2 part of a flow is stored in a hash table. + * A flow that shares the same L2 key/mask with an + * already existing flow must refer to it's flow handle. + */ +struct bnxt_tc_l2_node { + /* hash key: first 16b of key */ +#define BNXT_TC_L2_KEY_LEN 16 + struct bnxt_tc_l2_key key; + struct rhash_head node; + + /* a linked list of flows that share the same l2 key */ + struct list_head common_l2_flows; + + /* number of flows sharing the l2 key */ + u16 refcount; + + struct rcu_head rcu; +}; + +struct bnxt_tc_flow_node { + /* hash key: provided by TC */ + unsigned long cookie; + struct rhash_head node; + + struct bnxt_tc_flow flow; + + __le16 flow_handle; + + /* L2 node in l2 hashtable that shares flow's l2 key */ + struct bnxt_tc_l2_node *l2_node; + /* for the shared_flows list maintained in l2_node */ + struct list_head l2_list_node; + + struct rcu_head rcu; +}; + +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower); +int bnxt_init_tc(struct bnxt *bp); +void bnxt_shutdown_tc(struct bnxt *bp); + +#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower) +{ + return -EOPNOTSUPP; +} + +static inline int bnxt_init_tc(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_shutdown_tc(struct bnxt *bp) +{ +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ +#endif /* BNXT_TC_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c new file mode 100644 index 000000000000..e75db04c6cdc --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -0,0 +1,513 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tc.h" + +#ifdef CONFIG_BNXT_SRIOV + +#define CFA_HANDLE_INVALID 0xffff +#define VF_IDX_INVALID 0xffff + +static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, + u16 *tx_cfa_action, u16 *rx_cfa_code) +{ + struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_vfr_alloc_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); + req.vf_id = cpu_to_le16(vf_idx); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } else { + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) +{ + struct hwrm_cfa_vfr_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + return rc; +} + +static int bnxt_vf_rep_open(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + /* Enable link and TX only if the parent PF is open. */ + if (netif_running(bp->dev)) { + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + } + return 0; +} + +static int bnxt_vf_rep_close(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_tx_disable(dev); + + return 0; +} + +static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc, len = skb->len; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)vf_rep->dst); + skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); + skb->dev = vf_rep->dst->u.port_info.lower_dev; + + rc = dev_queue_xmit(skb); + if (!rc) { + vf_rep->tx_stats.packets++; + vf_rep->tx_stats.bytes += len; + } + return rc; +} + +static void +bnxt_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + stats->rx_packets = vf_rep->rx_stats.packets; + stats->rx_bytes = vf_rep->rx_stats.bytes; + stats->tx_packets = vf_rep->tx_stats.packets; + stats->tx_bytes = vf_rep->tx_stats.bytes; +} + +static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, vf_fid, type_data); + default: + return -EOPNOTSUPP; + } +} + +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + u16 vf_idx; + + if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { + vf_idx = bp->cfa_code_map[cfa_code]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + return NULL; +} + +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); + struct bnxt_vf_rep_stats *rx_stats; + + rx_stats = &vf_rep->rx_stats; + vf_rep->rx_stats.bytes += skb->len; + vf_rep->rx_stats.packets++; + + netif_receive_skb(skb); +} + +static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct pci_dev *pf_pdev = vf_rep->bp->pdev; + int rc; + + rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn), + vf_rep->vf_idx); + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; + +static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { + .get_drvinfo = bnxt_vf_rep_get_drvinfo +}; + +static const struct net_device_ops bnxt_vf_rep_netdev_ops = { + .ndo_open = bnxt_vf_rep_open, + .ndo_stop = bnxt_vf_rep_close, + .ndo_start_xmit = bnxt_vf_rep_xmit, + .ndo_get_stats64 = bnxt_vf_rep_get_stats64, + .ndo_setup_tc = bnxt_vf_rep_setup_tc, + .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +}; + +/* Called when the parent PF interface is closed: + * As the mode transition from SWITCHDEV to LEGACY + * happens under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_close(struct bnxt *bp) +{ + struct bnxt_vf_rep *vf_rep; + u16 num_vfs, i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + num_vfs = pci_num_vf(bp->pdev); + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (netif_running(vf_rep->dev)) + bnxt_vf_rep_close(vf_rep->dev); + } +} + +/* Called when the parent PF interface is opened (re-opened): + * As the mode transition from SWITCHDEV to LEGACY + * happen under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_open(struct bnxt *bp) +{ + int i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + for (i = 0; i < pci_num_vf(bp->pdev); i++) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); +} + +static void __bnxt_vf_reps_destroy(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int i; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (vf_rep) { + dst_release((struct dst_entry *)vf_rep->dst); + + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + + if (vf_rep->dev) { + /* if register_netdev failed, then netdev_ops + * would have been set to NULL + */ + if (vf_rep->dev->netdev_ops) + unregister_netdev(vf_rep->dev); + free_netdev(vf_rep->dev); + } + } + } + + kfree(bp->vf_reps); + bp->vf_reps = NULL; +} + +void bnxt_vf_reps_destroy(struct bnxt *bp) +{ + bool closed = false; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + if (!bp->vf_reps) + return; + + /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced + * before proceeding with VF-rep cleanup. + */ + rtnl_lock(); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + closed = true; + } + /* un-publish cfa_code_map so that RX path can't see it anymore */ + kfree(bp->cfa_code_map); + bp->cfa_code_map = NULL; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + if (closed) + bnxt_open_nic(bp, false, false); + rtnl_unlock(); + + /* Need to call vf_reps_destroy() outside of rntl_lock + * as unregister_netdev takes rtnl_lock + */ + __bnxt_vf_reps_destroy(bp); +} + +/* Use the OUI of the PF's perm addr and report the same mac addr + * for the same VF-rep each time + */ +static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac) +{ + u32 addr; + + ether_addr_copy(mac, src_mac); + + addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx; + mac[3] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[5] = (u8)((addr >> 16) & 0xFF); +} + +static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + struct net_device *dev) +{ + struct net_device *pf_dev = bp->dev; + + dev->netdev_ops = &bnxt_vf_rep_netdev_ops; + dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); + /* Just inherit all the featues of the parent PF as the VF-R + * uses the RX/TX rings of the parent PF + */ + dev->hw_features = pf_dev->hw_features; + dev->gso_partial_features = pf_dev->gso_partial_features; + dev->vlan_features = pf_dev->vlan_features; + dev->hw_enc_features = pf_dev->hw_enc_features; + dev->features |= pf_dev->features; + bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, + dev->perm_addr); + ether_addr_copy(dev->dev_addr, dev->perm_addr); +} + +static int bnxt_vf_reps_create(struct bnxt *bp) +{ + u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; + int rc, i; + + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); + if (!bp->vf_reps) + return -ENOMEM; + + /* storage for cfa_code to vf-idx mapping */ + cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE, + GFP_KERNEL); + if (!cfa_code_map) { + rc = -ENOMEM; + goto err; + } + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + for (i = 0; i < num_vfs; i++) { + dev = alloc_etherdev(sizeof(*vf_rep)); + if (!dev) { + rc = -ENOMEM; + goto err; + } + + vf_rep = netdev_priv(dev); + bp->vf_reps[i] = vf_rep; + vf_rep->dev = dev; + vf_rep->bp = bp; + vf_rep->vf_idx = i; + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + + /* get cfa handles from FW */ + rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, + &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code); + if (rc) { + rc = -ENOLINK; + goto err; + } + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf_rep->dst) { + rc = -ENOMEM; + goto err; + } + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + rc = register_netdev(dev); + if (rc) { + /* no need for unregister_netdev in cleanup */ + dev->netdev_ops = NULL; + goto err; + } + } + + /* publish cfa_code_map only after all VF-reps have been initialized */ + bp->cfa_code_map = cfa_code_map; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + netif_keep_dst(bp->dev); + return 0; + +err: + netdev_info(bp->dev, "%s error=%d", __func__, rc); + kfree(cfa_code_map); + __bnxt_vf_reps_destroy(bp); + return rc; +} + +/* Devlink related routines */ +static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + + *mode = bp->eswitch_mode; + return 0; +} + +static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + int rc = 0; + + mutex_lock(&bp->sriov_lock); + if (bp->eswitch_mode == mode) { + netdev_info(bp->dev, "already in %s eswitch mode", + mode == DEVLINK_ESWITCH_MODE_LEGACY ? + "legacy" : "switchdev"); + rc = -EINVAL; + goto done; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + bnxt_vf_reps_destroy(bp); + break; + + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (pci_num_vf(bp->pdev) == 0) { + netdev_info(bp->dev, + "Enable VFs before setting switchdev mode"); + rc = -EPERM; + goto done; + } + rc = bnxt_vf_reps_create(bp); + break; + + default: + rc = -EINVAL; + goto done; + } +done: + mutex_unlock(&bp->sriov_lock); + return rc; +} + +static const struct devlink_ops bnxt_dl_ops = { + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get +}; + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + if (bp->hwrm_spec_code < 0x10800) { + netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + return -ENOTSUPP; + } + + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed"); + return -ENOMEM; + } + + bnxt_link_bp_to_dl(bp, dl); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); + if (rc) { + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + return rc; + } + + return 0; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + if (!dl) + return; + + devlink_unregister(dl); + devlink_free(dl); +} + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h new file mode 100644 index 000000000000..7787cd24606a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -0,0 +1,89 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_VFR_H +#define BNXT_VFR_H + +#ifdef CONFIG_BNXT_SRIOV + +#define MAX_CFA_CODE 65536 + +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +/* To clear devlink pointer from bp, pass NULL dl */ +static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) +{ + bp->dl = dl; + + /* add a back pointer in dl to bp */ + if (dl) { + struct bnxt_dl *bp_dl = devlink_priv(dl); + + bp_dl->bp = bp; + } +} + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); +void bnxt_vf_reps_destroy(struct bnxt *bp); +void bnxt_vf_reps_close(struct bnxt *bp); +void bnxt_vf_reps_open(struct bnxt *bp); +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + return bp->pf.vf[vf_rep->vf_idx].fw_fid; +} + +#else + +static inline int bnxt_dl_register(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_dl_unregister(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + return 0; +} +#endif /* CONFIG_BNXT_SRIOV */ +#endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 3961a6807454..d8f0c837b72c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -169,8 +169,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) tc = netdev_get_num_tc(dev); if (!tc) tc = 1; - rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - true, tc, tx_xdp); + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + true, tc, tx_xdp); if (rc) { netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); return rc; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index cec94bbb2ea5..8bc126a156e8 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); if (ret) - return -ENOMEM; + goto error; n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; for (i = 0, j = 0; i < cp->max_cid_space; i++) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fea3f9a5fb2d..9cebca896913 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -72,23 +72,42 @@ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ TOTAL_DESC * DMA_DESC_SIZE) +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, void __iomem *d, u32 value) { - __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); } static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, void __iomem *d) { - return __raw_readl(d + DMA_DESC_LENGTH_STATUS); + return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS); } static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr) { - __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); /* Register writes to GISB bus can take couple hundred nanoseconds * and are done for each packet, save these expensive writes unless @@ -96,7 +115,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, */ #ifdef CONFIG_PHYS_ADDR_T_64BIT if (priv->hw_params->flags & GENET_HAS_40BITS) - __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); #endif } @@ -113,7 +132,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, { dma_addr_t addr; - addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); /* Register writes to GISB bus can take couple hundred nanoseconds * and are done for each packet, save these expensive writes unless @@ -121,7 +140,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, */ #ifdef CONFIG_PHYS_ADDR_T_64BIT if (priv->hw_params->flags & GENET_HAS_40BITS) - addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; #endif return addr; } @@ -156,8 +175,8 @@ static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) if (GENET_IS_V1(priv)) return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); else - return __raw_readl(priv->base + - priv->hw_params->tbuf_offset + TBUF_CTRL); + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); } static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) @@ -165,7 +184,7 @@ static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) if (GENET_IS_V1(priv)) bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); else - __raw_writel(val, priv->base + + bcmgenet_writel(val, priv->base + priv->hw_params->tbuf_offset + TBUF_CTRL); } @@ -174,8 +193,8 @@ static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) if (GENET_IS_V1(priv)) return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); else - return __raw_readl(priv->base + - priv->hw_params->tbuf_offset + TBUF_BP_MC); + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); } static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) @@ -183,7 +202,7 @@ static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) if (GENET_IS_V1(priv)) bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); else - __raw_writel(val, priv->base + + bcmgenet_writel(val, priv->base + priv->hw_params->tbuf_offset + TBUF_BP_MC); } @@ -326,28 +345,28 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, enum dma_reg r) { - return __raw_readl(priv->base + GENET_TDMA_REG_OFF + - DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, u32 val, enum dma_reg r) { - __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, enum dma_reg r) { - return __raw_readl(priv->base + GENET_RDMA_REG_OFF + - DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, u32 val, enum dma_reg r) { - __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } @@ -418,16 +437,16 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, unsigned int ring, enum dma_ring_reg r) { - return __raw_readl(priv->base + GENET_TDMA_REG_OFF + - (DMA_RING_SIZE * ring) + - genet_dma_ring_regs[r]); + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); } static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, unsigned int ring, u32 val, enum dma_ring_reg r) { - __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + (DMA_RING_SIZE * ring) + genet_dma_ring_regs[r]); } @@ -436,16 +455,16 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, unsigned int ring, enum dma_ring_reg r) { - return __raw_readl(priv->base + GENET_RDMA_REG_OFF + - (DMA_RING_SIZE * ring) + - genet_dma_ring_regs[r]); + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); } static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, unsigned int ring, u32 val, enum dma_ring_reg r) { - __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + (DMA_RING_SIZE * ring) + genet_dma_ring_regs[r]); } @@ -991,12 +1010,12 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); /* Enable EEE and switch to a 27Mhz clock automatically */ - reg = __raw_readl(priv->base + off); + reg = bcmgenet_readl(priv->base + off); if (enable) reg |= TBUF_EEE_EN | TBUF_PM_EN; else reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); - __raw_writel(reg, priv->base + off); + bcmgenet_writel(reg, priv->base + off); /* Do the same for thing for RBUF */ reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 3a34fdba5301..4c49d0b97748 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -657,6 +657,7 @@ struct bcmgenet_priv { struct clk *clk; struct platform_device *pdev; + struct platform_device *mii_pdev; /* WOL */ struct clk *clk_wol; @@ -671,12 +672,21 @@ struct bcmgenet_priv { static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ u32 off) \ { \ - return __raw_readl(priv->base + offset + off); \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ } \ static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ u32 val, u32 off) \ { \ - __raw_writel(val, priv->base + offset + off); \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ } GENET_IO_MACRO(ext, GENET_EXT_OFF); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 30cb97b4a1d7..18f5723be2c9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -24,62 +24,10 @@ #include #include #include +#include #include "bcmgenet.h" -/* read a value from the MII */ -static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) -{ - int ret; - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); - /* Start MDIO transaction*/ - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) - & MDIO_START_BUSY), - HZ / 100); - ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - - /* Some broken devices are known not to release the line during - * turn-around, e.g: Broadcom BCM53125 external switches, so check for - * that condition here and ignore the MDIO controller read failure - * indication. - */ - if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL)) - return -EIO; - - return ret & 0xffff; -} - -/* write a value to the MII */ -static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, - int location, u16 val) -{ - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT) | (0xffff & val)), - UMAC_MDIO_CMD); - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & - MDIO_START_BUSY), - HZ / 100); - - return 0; -} - /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ @@ -393,104 +341,121 @@ int bcmgenet_mii_probe(struct net_device *dev) return 0; } -/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with - * their internal MDIO management controller making them fail to successfully - * be read from or written to for the first transaction. We insert a dummy - * BMSR read here to make sure that phy_get_device() and get_phy_id() can - * correctly read the PHY MII_PHYSID1/2 registers and successfully register a - * PHY device for this peripheral. - * - * Once the PHY driver is registered, we can workaround subsequent reads from - * there (e.g: during system-wide power management). - * - * bus->reset is invoked before mdiobus_scan during mdiobus_register and is - * therefore the right location to stick that workaround. Since we do not want - * to read from non-existing PHYs, we either use bus->phy_mask or do a manual - * Device Tree scan to limit the search area. - */ -static int bcmgenet_mii_bus_reset(struct mii_bus *bus) +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) { - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *np = priv->mdio_dn; - struct device_node *child = NULL; - u32 read_mask = 0; - int addr = 0; + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; - if (!np) { - read_mask = 1 << priv->phy_addr; - } else { - for_each_available_child_of_node(np, child) { - addr = of_mdio_parse_addr(&dev->dev, child); - if (addr < 0) - continue; + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; - read_mask |= 1 << addr; - } + priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; } - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) { - dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); - mdiobus_read(bus, addr, MII_BMSR); - } - } + return priv->mdio_dn; +} +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); return 0; } -static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) { - struct mii_bus *bus; + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; - if (priv->mii_bus) - return 0; + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("failed to allocate\n"); + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) return -ENOMEM; - } - bus = priv->mii_bus; - bus->priv = priv->dev; - bus->name = "bcmgenet MII bus"; - bus->parent = &priv->pdev->dev; - bus->read = bcmgenet_mii_read; - bus->write = bcmgenet_mii_write; - bus->reset = bcmgenet_mii_bus_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->pdev->name, priv->pdev->id); + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; return 0; +out: + platform_device_put(ppdev); + return ret; } static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; - struct phy_device *phydev = NULL; - char *compat; + struct phy_device *phydev; int phy_mode; int ret; - compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); - if (!compat) - return -ENOMEM; - - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); - kfree(compat); - if (!priv->mdio_dn) { - dev_err(kdev, "unable to find MDIO bus node\n"); - return -ENODEV; - } - - ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); @@ -537,33 +502,23 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; struct bcmgenet_platform_data *pd = kdev->platform_data; - struct mii_bus *mdio = priv->mii_bus; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; struct phy_device *phydev; - int ret; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + /* * Internal or external PHY with MDIO access */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - mdio->phy_mask = ~(1 << pd->phy_address); - else - mdio->phy_mask = 0; - - ret = mdiobus_register(mdio); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdiobus_get_phy(mdio, pd->phy_address); - else - phydev = phy_find_first(mdio); - + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); if (!phydev) { dev_err(kdev, "failed to register PHY device\n"); - mdiobus_unregister(mdio); return -ENODEV; } } else { @@ -609,10 +564,9 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *dn = priv->pdev->dev.of_node; int ret; - ret = bcmgenet_mii_alloc(priv); + ret = bcmgenet_mii_register(priv); if (ret) return ret; @@ -623,11 +577,7 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); - of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + bcmgenet_mii_exit(dev); return ret; } @@ -639,6 +589,6 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + platform_device_unregister(priv->mii_pdev); + platform_device_put(priv->mii_pdev); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 16a0f192daec..ecdef42f0ae6 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1367,15 +1367,11 @@ static int sbmac_initctx(struct sbmac_softc *s) static void sbdma_uninitctx(struct sbmacdma *d) { - if (d->sbdma_dscrtable_unaligned) { - kfree(d->sbdma_dscrtable_unaligned); - d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; - } + kfree(d->sbdma_dscrtable_unaligned); + d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; - if (d->sbdma_ctxtable) { - kfree(d->sbdma_ctxtable); - d->sbdma_ctxtable = NULL; - } + kfree(d->sbdma_ctxtable); + d->sbdma_ctxtable = NULL; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d600c41fb1dc..656e6af70f0a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6587,7 +6587,7 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -7829,7 +7829,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, } } - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); *pskb = new_skb; return ret; } @@ -7882,7 +7882,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, } while (segs); tg3_tso_bug_end: - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } @@ -8543,7 +8543,7 @@ static void tg3_free_rings(struct tg3 *tp) tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags - 1); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); } @@ -11536,11 +11536,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { for (i--; i >= 0; i--) { - tnapi = &tp->napi[i]; + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } goto out_napi_fini; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 26d25749c3e4..6df2cad61647 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -68,7 +68,7 @@ #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) #define GEM_MTU_MIN_SIZE ETH_MIN_MTU -#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO) +#define MACB_NETIF_LSO NETIF_F_TSO #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 9906fda76087..248a8fc45069 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -128,7 +128,7 @@ static void macb_remove(struct pci_dev *pdev) clk_unregister(plat_data->hclk); } -static struct pci_device_id dev_id_table[] = { +static const struct pci_device_id dev_id_table[] = { { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, { 0, } }; diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 67cca08472b7..2220c771092b 100755 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -192,7 +192,7 @@ static int gem_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info gem_ptp_caps_template = { +static const struct ptp_clock_info gem_ptp_caps_template = { .owner = THIS_MODULE, .name = GEM_PTP_TIMER_NAME, .max_adj = 0, diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4b0ca9fb2cb4..e8b290473ee2 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1150,14 +1150,50 @@ static void cn23xx_get_pcie_qlmport(struct octeon_device *oct) oct->pcie_port); } -static void cn23xx_get_pf_num(struct octeon_device *oct) +static int cn23xx_get_pf_num(struct octeon_device *oct) { u32 fdl_bit = 0; + u64 pkt0_in_ctl, d64; + int pfnum, mac, trs, ret; + + ret = 0; /** Read Function Dependency Link reg to get the function number */ - pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit); - oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & - CN23XX_PCIE_SRIOV_FDL_MASK); + if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, + &fdl_bit) == 0) { + oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & + CN23XX_PCIE_SRIOV_FDL_MASK); + } else { + ret = EINVAL; + + /* Under some virtual environments, extended PCI regs are + * inaccessible, in which case the above read will have failed. + * In this case, read the PF number from the + * SLI_PKT0_INPUT_CONTROL reg (written by f/w) + */ + pkt0_in_ctl = octeon_read_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(0)); + pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; + + /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/ + d64 = octeon_read_csr64(oct, + CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum)); + trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff; + if (trs == 1) { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum, re-read %u\n", + pfnum); + oct->pf_num = pfnum; + ret = 0; + } else { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n"); + } + } + + return ret; } static void cn23xx_setup_reg_address(struct octeon_device *oct) @@ -1269,6 +1305,26 @@ static int cn23xx_sriov_config(struct octeon_device *oct) int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) { + u32 data32; + u64 BAR0, BAR1; + + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32); + BAR0 = (u64)(data32 & ~0xf); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32); + BAR0 |= ((u64)data32 << 32); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32); + BAR1 = (u64)(data32 & ~0xf); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32); + BAR1 |= ((u64)data32 << 32); + + if (!BAR0 || !BAR1) { + if (!BAR0) + dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n"); + if (!BAR1) + dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n"); + return 1; + } + if (octeon_map_pci_barx(oct, 0, 0)) return 1; @@ -1279,7 +1335,8 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) return 1; } - cn23xx_get_pf_num(oct); + if (cn23xx_get_pf_num(oct) != 0) + return 1; if (cn23xx_sriov_config(oct)) { octeon_unmap_pci_barx(oct, 0); @@ -1405,8 +1462,19 @@ int cn23xx_fw_loaded(struct octeon_device *oct) { u64 val; - val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1); - return (val >> 1) & 1ULL; + /* If there's more than one active PF on this NIC, then that + * implies that the NIC firmware is loaded and running. This check + * prevents a rare false negative that might occur if we only relied + * on checking the SCR2_BIT_FW_LOADED flag. The false negative would + * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even + * though the firmware was already loaded but still booting and has yet + * to set SCR2_BIT_FW_LOADED. + */ + if (atomic_read(oct->adapter_refcount) > 1) + return 1; + + val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); + return (val >> SCR2_BIT_FW_LOADED) & 1ULL; } void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index dee604651ba7..2aba5247b6d8 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -24,8 +24,6 @@ #include "cn23xx_pf_regs.h" -#define LIO_CMD_WAIT_TM 100 - /* Register address and configuration for a CN23XX devices. * If device specific changes need to be made then add a struct to include * device specific fields as shown in the commented section diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h index 3f98c7334957..2d06097d3f61 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h @@ -36,8 +36,6 @@ struct octeon_cn23xx_vf { #define CN23XX_MAILBOX_MSGPARAM_SIZE 6 -#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100 - void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct); int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index adde7745d069..23f6b60030c5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -165,9 +165,6 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) /* If command is successful, change the MTU. */ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", netdev->mtu, nctrl->ncmd.s.param1); - dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", - netdev->name, netdev->mtu, - nctrl->ncmd.s.param1); netdev->mtu = nctrl->ncmd.s.param1; queue_delayed_work(lio->link_status_wq.wq, &lio->link_status_wq.wk.work, 0); @@ -275,6 +272,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); break; + case OCTNET_CMD_QUEUE_COUNT_CTL: + netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n", + nctrl->ncmd.s.param1); + break; + default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); @@ -364,3 +366,723 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) destroy_workqueue(lio->rxq_status_wq.wq); } } + +/* Runs in interrupt context. */ +static void lio_update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + struct net_device *netdev; + struct lio *lio; + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + } + } else if (netif_queue_stopped(netdev) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); + netif_wake_queue(netdev); + } +} + +/** + * \brief Setup output queue + * @param oct octeon device + * @param q_no which queue + * @param num_descs how many descriptors + * @param desc_size size of each descriptor + * @param app_ctx application context + */ +static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) +{ + int ret_val; + + dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); + /* droq creation and local register settings. */ + ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (ret_val < 0) + return ret_val; + + if (ret_val == 1) { + dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); + return 0; + } + + /* Enable the droq queues */ + octeon_set_droq_pkt_op(oct, q_no, 1); + + /* Send Credit for Octeon Output queues. Credits are always + * sent after the output queue is enabled. + */ + writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); + + return ret_val; +} + +/** Routine to push packets arriving on Octeon interface upto network layer. + * @param oct_id - octeon device id. + * @param skbuff - skbuff struct to be passed to network layer. + * @param len - size of total data received. + * @param rh - Control header associated with the packet + * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops + */ +static void +liquidio_push_packet(u32 octeon_id __attribute__((unused)), + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param, + void *arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct octeon_droq *droq = + container_of(param, struct octeon_droq, napi); + struct sk_buff *skb = (struct sk_buff *)skbuff; + struct skb_shared_hwtstamps *shhwtstamps; + struct napi_struct *napi = param; + u16 vtag = 0; + u32 r_dh_off; + u64 ns; + + if (netdev) { + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int packet_was_received; + + /* Do not proceed if the interface is not in RUNNING state. */ + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + recv_buffer_free(skb); + droq->stats.rx_dropped++; + return; + } + + skb->dev = netdev; + + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (oct->ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. + */ + if (ifstate_check + (lio, + LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + } + } + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); + + if ((netdev->features & NETIF_F_RXCSUM) && + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) + /* checksum has already been verified */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + rh->r_dh.vlan) { + u16 priority = rh->r_dh.priority; + u16 vid = rh->r_dh.vlan; + + vtag = (priority << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); + + if (packet_was_received) { + droq->stats.rx_bytes_received += len; + droq->stats.rx_pkts_received++; + } else { + droq->stats.rx_dropped++; + netif_info(lio, rx_err, lio->netdev, + "droq:%d error rx_dropped:%llu\n", + droq->q_no, droq->stats.rx_dropped); + } + + } else { + recv_buffer_free(skb); + } +} + +/** + * \brief wrapper for calling napi_schedule + * @param param parameters to pass to napi_schedule + * + * Used when scheduling on different CPUs + */ +static void napi_schedule_wrapper(void *param) +{ + struct napi_struct *napi = param; + + napi_schedule(napi); +} + +/** + * \brief callback when receive interrupt occurs and we are in NAPI mode + * @param arg pointer to octeon output queue + */ +static void liquidio_napi_drv_callback(void *arg) +{ + struct octeon_device *oct; + struct octeon_droq *droq = arg; + int this_cpu = smp_processor_id(); + + oct = droq->oct_dev; + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) || + droq->cpu_id == this_cpu) { + napi_schedule_irqoff(&droq->napi); + } else { + call_single_data_t *csd = &droq->csd; + + csd->func = napi_schedule_wrapper; + csd->info = &droq->napi; + csd->flags = 0; + + smp_call_function_single_async(droq->cpu_id, csd); + } +} + +/** + * \brief Entry point for NAPI polling + * @param napi NAPI structure + * @param budget maximum number of items to process + */ +static int liquidio_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_instr_queue *iq; + struct octeon_device *oct; + struct octeon_droq *droq; + int tx_done = 0, iq_no; + int work_done; + + droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); + + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* TODO: move this check to inside octeon_flush_iq, + * once check_db_timeout is removed + */ + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + /* sub-queue status update */ + lio_update_txq_status(oct, iq_no); + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } + +#define MAX_REG_CNT 2000000U + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); + + octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, + POLL_EVENT_ENABLE_INTR, 0); + return 0; + } + + return (!tx_done) ? (budget) : (work_done); +} + +/** + * \brief Setup input and output queues + * @param octeon_dev octeon device + * @param ifidx Interface index + * + * Note: Queues are with respect to the octeon device. Thus + * an input queue is for egress packets, and output queues + * are for ingress packets. + */ +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs) +{ + struct octeon_droq_ops droq_ops; + struct net_device *netdev; + struct octeon_droq *droq; + struct napi_struct *napi; + int cpu_id_modulus; + int num_tx_descs; + struct lio *lio; + int retval = 0; + int q, q_no; + int cpu_id; + + netdev = octeon_dev->props[ifidx].netdev; + + lio = GET_LIO(netdev); + + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); + + /* set up DROQs. */ + for (q = 0; q < num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "%s index:%d linfo.rxpciq.s.q_no:%d\n", + __func__, q, q_no); + retval = octeon_setup_droq( + octeon_dev, q_no, + CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + NULL); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "%s : Runtime DROQ(RxQ) creation failed.\n", + __func__); + return 1; + } + + droq = octeon_dev->droq[q_no]; + napi = &droq->napi; + dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); + + /* designate a CPU for this droq */ + droq->cpu_id = cpu_id; + cpu_id++; + if (cpu_id >= cpu_id_modulus) + cpu_id = 0; + + octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); + } + + if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { + /* 23XX PF/VF can send/recv control messages (via the first + * PF/VF-owned droq) from the firmware even if the ethX + * interface is down, so that's why poll_mode must be off + * for the first droq. + */ + octeon_dev->droq[0]->ops.poll_mode = 0; + } + + /* set up IQs. */ + for (q = 0; q < num_iqs; q++) { + num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( + octeon_get_conf(octeon_dev), lio->ifidx); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime IQ(TxQ) creation failed.\n", + __func__); + return 1; + } + + /* XPS */ + if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on && + octeon_dev->ioq_vector) { + struct octeon_ioq_vector *ioq_vector; + + ioq_vector = &octeon_dev->ioq_vector[q]; + netif_set_xps_queue(netdev, + &ioq_vector->affinity_mask, + ioq_vector->iq_index); + } + } + + return 0; +} + +static +int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) +{ + struct octeon_device *oct = droq->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + } else { + if (ret & MSIX_PO_INT) { + if (OCTEON_CN23XX_VF(oct)) + dev_err(&oct->pci_dev->dev, + "should not come here should not get rx when poll mode = 0 for vf\n"); + tasklet_schedule(&oct_priv->droq_tasklet); + return 1; + } + /* this will be flushed periodically by check iq db */ + if (ret & MSIX_PI_INT) + return 0; + } + + return 0; +} + +irqreturn_t +liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 ret; + + ret = oct->fn_list.msix_interrupt_handler(ioq_vector); + + if (ret & MSIX_PO_INT || ret & MSIX_PI_INT) + liquidio_schedule_msix_droq_pkt_handler(droq, ret); + + return IRQ_HANDLED; +} + +/** + * \brief Droq packet processor sceduler + * @param oct octeon device + */ +static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct octeon_droq *droq; + u64 oq_no; + + if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & BIT_ULL(oq_no))) + continue; + + droq = oct->droq[oq_no]; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + oct_priv->napi_mask |= (1 << oq_no); + } else { + tasklet_schedule(&oct_priv->droq_tasklet); + } + } + } +} + +/** + * \brief Interrupt handler for octeon + * @param irq unused + * @param dev octeon device + */ +static +irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), + void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + irqreturn_t ret; + + /* Disable our interrupts for the duration of ISR */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + ret = oct->fn_list.process_interrupt_regs(oct); + + if (ret == IRQ_HANDLED) + liquidio_schedule_droq_pkt_handlers(oct); + + /* Re-enable our interrupts */ + if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return ret; +} + +/** + * \brief Setup interrupt for octeon device + * @param oct octeon device + * + * Enable interrupt in Octeon device as given in the PCI interrupt mask. + */ +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) +{ + struct msix_entry *msix_entries; + char *queue_irq_names = NULL; + int i, num_interrupts = 0; + int num_alloc_ioq_vectors; + char *aux_irq_name = NULL; + int num_ioq_vectors; + int irqret, err; + + oct->num_msix_irqs = num_ioqs; + if (oct->msix_on) { + if (OCTEON_CN23XX_PF(oct)) { + num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; + + /* one non ioq interrupt for handling + * sli_mac_pf_int_sum + */ + oct->num_msix_irqs += 1; + } else if (OCTEON_CN23XX_VF(oct)) { + num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; + } + + /* allocate storage for the names assigned to each irq */ + oct->irq_name_storage = + kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) { + dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); + return -ENOMEM; + } + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + aux_irq_name = &queue_irq_names + [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; + + oct->msix_entries = kcalloc(oct->num_msix_irqs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!oct->msix_entries) { + dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return -ENOMEM; + } + + msix_entries = (struct msix_entry *)oct->msix_entries; + + /*Assumption is that pf msix vectors start from pf srn to pf to + * trs and not from 0. if not change this code + */ + if (OCTEON_CN23XX_PF(oct)) { + for (i = 0; i < oct->num_msix_irqs - 1; i++) + msix_entries[i].entry = + oct->sriov_info.pf_srn + i; + + msix_entries[oct->num_msix_irqs - 1].entry = + oct->sriov_info.trs; + } else if (OCTEON_CN23XX_VF(oct)) { + for (i = 0; i < oct->num_msix_irqs; i++) + msix_entries[i].entry = i; + } + num_alloc_ioq_vectors = pci_enable_msix_range( + oct->pci_dev, msix_entries, + oct->num_msix_irqs, + oct->num_msix_irqs); + if (num_alloc_ioq_vectors < 0) { + dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return num_alloc_ioq_vectors; + } + + dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); + + num_ioq_vectors = oct->num_msix_irqs; + /** For PF, there is one non-ioq interrupt handler */ + if (OCTEON_CN23XX_PF(oct)) { + num_ioq_vectors -= 1; + + snprintf(aux_irq_name, INTRNAMSIZ, + "LiquidIO%u-pf%u-aux", oct->octeon_id, + oct->pf_num); + irqret = request_irq( + msix_entries[num_ioq_vectors].vector, + liquidio_legacy_intr_handler, 0, + aux_irq_name, oct); + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + } + for (i = 0 ; i < num_ioq_vectors ; i++) { + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, i); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, i); + + irqret = request_irq(msix_entries[i].vector, + liquidio_msix_intr_handler, 0, + &queue_irq_names[IRQ_NAME_OFF(i)], + &oct->ioq_vector[i]); + + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + /** Freeing the non-ioq irq vector here . */ + free_irq(msix_entries[num_ioq_vectors].vector, + oct); + + while (i) { + i--; + /** clearing affinity mask. */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + oct->ioq_vector[i].vector = msix_entries[i].vector; + /* assign the cpu mask for this msix interrupt vector */ + irq_set_affinity_hint(msix_entries[i].vector, + &oct->ioq_vector[i].affinity_mask + ); + } + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", + oct->octeon_id); + } else { + err = pci_enable_msi(oct->pci_dev); + if (err) + dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", + err); + else + oct->flags |= LIO_FLAG_MSI_ENABLED; + + /* allocate storage for the names assigned to the irq */ + oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) + return -ENOMEM; + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, 0); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, 0); + + irqret = request_irq(oct->pci_dev->irq, + liquidio_legacy_intr_handler, + IRQF_SHARED, + &queue_irq_names[IRQ_NAME_OFF(0)], oct); + if (irqret) { + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", + irqret); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; + } + } + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index ebd353bc78ff..a63ddf07f168 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -31,6 +31,7 @@ #include "cn23xx_pf_device.h" #include "cn23xx_vf_device.h" +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); static int octnet_get_link_stats(struct net_device *netdev); struct oct_intrmod_context { @@ -105,6 +106,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_total_sent", "tx_total_fwd", "tx_err_pko", + "tx_err_pki", "tx_err_link", "tx_err_drop", @@ -299,6 +301,35 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); } +static int +lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; + nctrl.ncmd.s.param1 = num_queues; + nctrl.ncmd.s.param2 = num_queues; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", + ret); + return -1; + } + + return 0; +} + static void lio_ethtool_get_channels(struct net_device *dev, struct ethtool_channels *channel) @@ -306,6 +337,7 @@ lio_ethtool_get_channels(struct net_device *dev, struct lio *lio = GET_LIO(dev); struct octeon_device *oct = lio->oct_dev; u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; + u32 combined_count = 0, max_combined = 0; if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); @@ -315,22 +347,137 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - - max_rx = oct->sriov_info.num_pf_rings; - max_tx = oct->sriov_info.num_pf_rings; - rx_count = lio->linfo.num_rxpciq; - tx_count = lio->linfo.num_txpciq; + max_combined = lio->linfo.num_txpciq; + combined_count = oct->num_iqs; } else if (OCTEON_CN23XX_VF(oct)) { - max_tx = oct->sriov_info.rings_per_vf; - max_rx = oct->sriov_info.rings_per_vf; - rx_count = lio->linfo.num_rxpciq; - tx_count = lio->linfo.num_txpciq; + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + combined_count = oct->num_iqs; } channel->max_rx = max_rx; channel->max_tx = max_tx; + channel->max_combined = max_combined; channel->rx_count = rx_count; channel->tx_count = tx_count; + channel->combined_count = combined_count; +} + +static int +lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) +{ + struct msix_entry *msix_entries; + int num_msix_irqs = 0; + int i; + + if (!oct->msix_on) + return 0; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + if (OCTEON_CN23XX_PF(oct)) + num_msix_irqs = oct->num_msix_irqs - 1; + else if (OCTEON_CN23XX_VF(oct)) + num_msix_irqs = oct->num_msix_irqs; + + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < num_msix_irqs; i++) { + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } + } + + /* non-iov vector's argument is oct struct */ + if (OCTEON_CN23XX_PF(oct)) + free_irq(msix_entries[i].vector, oct); + + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + } + + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + if (octeon_setup_interrupt(oct, num_ioqs)) { + dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); + return 1; + } + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return 0; +} + +static int +lio_ethtool_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + u32 combined_count, max_combined; + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { + dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); + return -EINVAL; + } + + if (!channel->combined_count || channel->other_count || + channel->rx_count || channel->tx_count) + return -EINVAL; + + combined_count = channel->combined_count; + + if (OCTEON_CN23XX_PF(oct)) { + max_combined = channel->max_combined; + } else if (OCTEON_CN23XX_VF(oct)) { + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + } else { + return -EINVAL; + } + + if (combined_count > max_combined || combined_count < 1) + return -EINVAL; + + if (combined_count == oct->num_iqs) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(dev)) { + dev->netdev_ops->ndo_stop(dev); + stopped = 1; + } + + if (lio_reset_queues(dev, combined_count)) + return -EINVAL; + + lio_irq_reallocate_irqs(oct, combined_count); + if (stopped) + dev->netdev_ops->ndo_open(dev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; } static int lio_get_eeprom_len(struct net_device *netdev) @@ -577,23 +724,18 @@ static int lio_set_phys_id(struct net_device *netdev, break; case ETHTOOL_ID_ON: - if (oct->chip_id == OCTEON_CN66XX) { + if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_HIGH); + else + return -EINVAL; - } else if (oct->chip_id == OCTEON_CN68XX) { - return -EINVAL; - } else { - return -EINVAL; - } break; case ETHTOOL_ID_OFF: if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_LOW); - else if (oct->chip_id == OCTEON_CN68XX) - return -EINVAL; else return -EINVAL; @@ -641,6 +783,9 @@ lio_ethtool_get_ringparam(struct net_device *netdev, u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, rx_pending = 0; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); @@ -648,33 +793,147 @@ lio_ethtool_get_ringparam(struct net_device *netdev, rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); - } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - + } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; - rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx); - tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx); - } - - if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { - ering->rx_pending = 0; - ering->rx_max_pending = 0; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = rx_pending; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = rx_max_pending; - } else { - ering->rx_pending = rx_pending; - ering->rx_max_pending = rx_max_pending; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; + rx_pending = oct->droq[0]->max_count; + tx_pending = oct->instr_queue[0]->max_count; } ering->tx_pending = tx_pending; ering->tx_max_pending = tx_max_pending; + ering->rx_pending = rx_pending; + ering->rx_max_pending = rx_max_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; +} + +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + int i, update = 0; + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + if (octeon_set_io_queues_off(oct)) { + dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + return -1; + } + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_io_queues(oct); + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + + if (num_qs != oct->num_iqs) { + netif_set_real_num_rx_queues(netdev, num_qs); + netif_set_real_num_tx_queues(netdev, num_qs); + update = 1; + } + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + if (oct->fn_list.setup_device_regs(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); + return -1; + } + + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { + dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); + return -1; + } + + if (update && lio_send_queue_count_update(netdev, num_qs)) + return -1; + + return 0; +} + +static int lio_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + u32 rx_count, tx_count, rx_count_old, tx_count_old; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) + return -EINVAL; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, + CN23XX_MAX_OQ_DESCRIPTORS); + tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, + CN23XX_MAX_IQ_DESCRIPTORS); + + rx_count_old = oct->droq[0]->max_count; + tx_count_old = oct->instr_queue[0]->max_count; + + if (rx_count == rx_count_old && tx_count == tx_count_old) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(netdev)) { + netdev->netdev_ops->ndo_stop(netdev); + stopped = 1; + } + + /* Change RX/TX DESCS count */ + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count); + + if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) + goto err_lio_reset_queues; + + if (stopped) + netdev->netdev_ops->ndo_open(netdev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; + +err_lio_reset_queues: + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count_old); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count_old); + return -EINVAL; } static u32 lio_get_msglevel(struct net_device *netdev) @@ -795,6 +1054,9 @@ lio_get_ethtool_stats(struct net_device *netdev, struct net_device_stats *netstats = &netdev->stats; int i = 0, j; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + netdev->netdev_ops->ndo_get_stats(netdev); octnet_get_link_stats(netdev); @@ -826,6 +1088,8 @@ lio_get_ethtool_stats(struct net_device *netdev, data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. @@ -1057,6 +1321,9 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, struct octeon_device *oct_dev = lio->oct_dev; int i = 0, j, vj; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + netdev->netdev_ops->ndo_get_stats(netdev); /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ data[i++] = CVM_CAST64(netstats->rx_packets); @@ -1079,7 +1346,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, /* lio->link_changes */ data[i++] = CVM_CAST64(lio->link_changes); - for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { + for (vj = 0; vj < oct_dev->num_iqs; vj++) { j = lio->linfo.txpciq[vj].s.q_no; /* packets to network port */ @@ -1121,7 +1388,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, } /* RX */ - for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { + for (vj = 0; vj < oct_dev->num_oqs; vj++) { j = lio->linfo.rxpciq[vj].s.q_no; /* packets send to TCP/IP network stack */ @@ -1568,6 +1835,7 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; tstats->fw_err_drop = rsp_tstats->fw_err_drop; tstats->fw_tso = rsp_tstats->fw_tso; @@ -2587,7 +2855,9 @@ static const struct ethtool_ops lio_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, .set_phys_id = lio_set_phys_id, .get_eeprom_len = lio_get_eeprom_len, .get_eeprom = lio_get_eeprom, @@ -2612,7 +2882,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_vf_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, .get_strings = lio_vf_get_strings, .get_ethtool_stats = lio_vf_get_ethtool_stats, .get_regs_len = lio_get_regs_len, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 120b6e537b28..e7f54948173f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -39,10 +39,14 @@ MODULE_AUTHOR("Cavium Networks, "); MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(LIQUIDIO_VERSION); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); static int ddr_timeout = 10000; module_param(ddr_timeout, int, 0644); @@ -55,11 +59,24 @@ static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); -static char fw_type[LIO_MAX_FW_TYPE_LEN]; -module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); -MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); +static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC; +module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); +MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash."); -static int ptp_enable = 1; +static u32 console_bitmask; +module_param(console_bitmask, int, 0644); +MODULE_PARM_DESC(console_bitmask, + "Bitmask indicating which consoles have debug output redirected to syslog."); + +/** + * \brief determines if a given console has debug enabled. + * @param console console to check + * @returns 1 = enabled. 0 otherwise + */ +static int octeon_console_debug_enabled(u32 console) +{ + return (console_bitmask >> (console)) & 0x1; +} /* Polling interval for determining when NIC application is alive */ #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 @@ -158,16 +175,13 @@ struct handshake { int started_ok; }; -struct octeon_device_priv { - /** Tasklet structures for this device. */ - struct tasklet_struct droq_tasklet; - unsigned long napi_mask; -}; - #ifdef CONFIG_PCI_IOV static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); #endif +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix); + static int octeon_device_init(struct octeon_device *); static int liquidio_stop(struct net_device *netdev); static void liquidio_remove(struct pci_dev *pdev); @@ -255,32 +269,6 @@ static void force_io_queues_off(struct octeon_device *oct) } } -/** - * \brief wait for all pending requests to complete - * @param oct Pointer to Octeon device - * - * Called during shutdown sequence - */ -static int wait_for_pending_requests(struct octeon_device *oct) -{ - int i, pcount = 0; - - for (i = 0; i < 100; i++) { - pcount = - atomic_read(&oct->response_list - [OCTEON_ORDERED_SC_LIST].pending_req_count); - if (pcount) - schedule_timeout_uninterruptible(HZ / 10); - else - break; - } - - if (pcount) - return 1; - - return 0; -} - /** * \brief Cause device to go quiet so it can be safely removed/reset/etc * @param oct Pointer to Octeon device @@ -572,7 +560,7 @@ static inline void txqs_wake(struct net_device *netdev) for (i = 0; i < netdev->num_tx_queues; i++) { int qno = lio->linfo.txpciq[i % - (lio->linfo.num_txpciq)].s.q_no; + lio->oct_dev->num_iqs].s.q_no; if (__netif_subqueue_stopped(netdev, i)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, @@ -652,7 +640,7 @@ static inline int check_txq_status(struct lio *lio) /* check each sub-queue state */ for (q = 0; q < numqs; q++) { iq = lio->linfo.txpciq[q % - (lio->linfo.num_txpciq)].s.q_no; + lio->oct_dev->num_iqs].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; if (__netif_subqueue_stopped(lio->netdev, q)) { @@ -823,7 +811,8 @@ static void print_link_info(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct oct_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { @@ -912,295 +901,6 @@ static inline void update_link_status(struct net_device *netdev, } } -/* Runs in interrupt context. */ -static void update_txq_status(struct octeon_device *oct, int iq_num) -{ - struct net_device *netdev; - struct lio *lio; - struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; - - netdev = oct->props[iq->ifidx].netdev; - - /* This is needed because the first IQ does not have - * a netdev associated with it. - */ - if (!netdev) - return; - - lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - netif_wake_subqueue(netdev, iq->q_index); - } - } else if (netif_queue_stopped(netdev) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, tx_restart, 1); - netif_wake_queue(netdev); - } -} - -static -int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) -{ - struct octeon_device *oct = droq->oct_dev; - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - } else { - if (ret & MSIX_PO_INT) { - tasklet_schedule(&oct_priv->droq_tasklet); - return 1; - } - /* this will be flushed periodically by check iq db */ - if (ret & MSIX_PI_INT) - return 0; - } - return 0; -} - -/** - * \brief Droq packet processor sceduler - * @param oct octeon device - */ -static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) -{ - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - u64 oq_no; - struct octeon_droq *droq; - - if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); - oq_no++) { - if (!(oct->droq_intr & BIT_ULL(oq_no))) - continue; - - droq = oct->droq[oq_no]; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - oct_priv->napi_mask |= (1 << oq_no); - } else { - tasklet_schedule(&oct_priv->droq_tasklet); - } - } - } -} - -static irqreturn_t -liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) -{ - u64 ret; - struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; - struct octeon_device *oct = ioq_vector->oct_dev; - struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; - - ret = oct->fn_list.msix_interrupt_handler(ioq_vector); - - if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) - liquidio_schedule_msix_droq_pkt_handler(droq, ret); - - return IRQ_HANDLED; -} - -/** - * \brief Interrupt handler for octeon - * @param irq unused - * @param dev octeon device - */ -static -irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), - void *dev) -{ - struct octeon_device *oct = (struct octeon_device *)dev; - irqreturn_t ret; - - /* Disable our interrupts for the duration of ISR */ - oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); - - ret = oct->fn_list.process_interrupt_regs(oct); - - if (ret == IRQ_HANDLED) - liquidio_schedule_droq_pkt_handlers(oct); - - /* Re-enable our interrupts */ - if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) - oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); - - return ret; -} - -/** - * \brief Setup interrupt for octeon device - * @param oct octeon device - * - * Enable interrupt in Octeon device as given in the PCI interrupt mask. - */ -static int octeon_setup_interrupt(struct octeon_device *oct) -{ - int irqret, err; - struct msix_entry *msix_entries; - int i; - int num_ioq_vectors; - int num_alloc_ioq_vectors; - char *queue_irq_names = NULL; - char *aux_irq_name = NULL; - - if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { - oct->num_msix_irqs = oct->sriov_info.num_pf_rings; - /* one non ioq interrupt for handling sli_mac_pf_int_sum */ - oct->num_msix_irqs += 1; - - /* allocate storage for the names assigned to each irq */ - oct->irq_name_storage = - kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ, - GFP_KERNEL); - if (!oct->irq_name_storage) { - dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); - return -ENOMEM; - } - - queue_irq_names = oct->irq_name_storage; - aux_irq_name = &queue_irq_names - [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; - - oct->msix_entries = kcalloc( - oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) { - dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return -ENOMEM; - } - - msix_entries = (struct msix_entry *)oct->msix_entries; - /*Assumption is that pf msix vectors start from pf srn to pf to - * trs and not from 0. if not change this code - */ - for (i = 0; i < oct->num_msix_irqs - 1; i++) - msix_entries[i].entry = oct->sriov_info.pf_srn + i; - msix_entries[oct->num_msix_irqs - 1].entry = - oct->sriov_info.trs; - num_alloc_ioq_vectors = pci_enable_msix_range( - oct->pci_dev, msix_entries, - oct->num_msix_irqs, - oct->num_msix_irqs); - if (num_alloc_ioq_vectors < 0) { - dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return num_alloc_ioq_vectors; - } - dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); - - num_ioq_vectors = oct->num_msix_irqs; - - /** For PF, there is one non-ioq interrupt handler */ - num_ioq_vectors -= 1; - - snprintf(aux_irq_name, INTRNAMSIZ, - "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num); - irqret = request_irq(msix_entries[num_ioq_vectors].vector, - liquidio_legacy_intr_handler, 0, - aux_irq_name, oct); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - - for (i = 0; i < num_ioq_vectors; i++) { - snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, - "LiquidIO%u-pf%u-rxtx-%u", - oct->octeon_id, oct->pf_num, i); - - irqret = request_irq(msix_entries[i].vector, - liquidio_msix_intr_handler, 0, - &queue_irq_names[IRQ_NAME_OFF(i)], - &oct->ioq_vector[i]); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - /** Freeing the non-ioq irq vector here . */ - free_irq(msix_entries[num_ioq_vectors].vector, - oct); - - while (i) { - i--; - /** clearing affinity mask. */ - irq_set_affinity_hint( - msix_entries[i].vector, NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); - } - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - oct->ioq_vector[i].vector = msix_entries[i].vector; - /* assign the cpu mask for this msix interrupt vector */ - irq_set_affinity_hint( - msix_entries[i].vector, - (&oct->ioq_vector[i].affinity_mask)); - } - dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", - oct->octeon_id); - } else { - err = pci_enable_msi(oct->pci_dev); - if (err) - dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", - err); - else - oct->flags |= LIO_FLAG_MSI_ENABLED; - - /* allocate storage for the names assigned to the irq */ - oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); - if (!oct->irq_name_storage) - return -ENOMEM; - - queue_irq_names = oct->irq_name_storage; - - snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, - "LiquidIO%u-pf%u-rxtx-%u", - oct->octeon_id, oct->pf_num, 0); - - irqret = request_irq(oct->pci_dev->irq, - liquidio_legacy_intr_handler, - IRQF_SHARED, - &queue_irq_names[IRQ_NAME_OFF(0)], oct); - if (irqret) { - if (oct->flags & LIO_FLAG_MSI_ENABLED) - pci_disable_msi(oct->pci_dev); - dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", - irqret); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - } - return 0; -} - static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; @@ -1344,6 +1044,13 @@ liquidio_probe(struct pci_dev *pdev, if (pdev->device == OCTEON_CN23XX_PF_VID) oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; + /* Enable PTP for 6XXX Device */ + if (((pdev->device == OCTEON_CN66XX) || + (pdev->device == OCTEON_CN68XX))) + oct_dev->ptp_enable = true; + else + oct_dev->ptp_enable = false; + dev_info(&pdev->dev, "Initializing device %x:%x.\n", (u32)pdev->vendor, (u32)pdev->device); @@ -1414,6 +1121,33 @@ static bool fw_type_is_none(void) sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; } +/** + * \brief PCI FLR for each Octeon device. + * @param oct octeon device + */ +static void octeon_pci_flr(struct octeon_device *oct) +{ + int rc; + + pci_save_state(oct->pci_dev); + + pci_cfg_access_lock(oct->pci_dev); + + /* Quiesce the device completely */ + pci_write_config_word(oct->pci_dev, PCI_COMMAND, + PCI_COMMAND_INTX_DISABLE); + + rc = __pci_reset_function_locked(oct->pci_dev); + + if (rc != 0) + dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", + rc, oct->pf_num); + + pci_cfg_access_unlock(oct->pci_dev); + + pci_restore_state(oct->pci_dev); +} + /** *\brief Destroy resources associated with octeon device * @param pdev PCI device structure @@ -1474,11 +1208,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->msix_on) { msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs - 1; i++) { - /* clear the affinity_cpumask */ - irq_set_affinity_hint(msix_entries[i].vector, - NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } } /* non-iov vector's argument is oct struct */ free_irq(msix_entries[i].vector, oct); @@ -1558,14 +1296,16 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_PCI_MAP_DONE: refcount = octeon_deregister_device(oct); - if (!fw_type_is_none()) { - /* Soft reset the octeon device before exiting. - * Implementation note: here, we reset the device - * if it is a CN6XXX OR the last CN23XX device. - */ - if (OCTEON_CN6XXX(oct) || !refcount) - oct->fn_list.soft_reset(oct); - } + /* Soft reset the octeon device before exiting. + * However, if fw was loaded from card (i.e. autoboot), + * perform an FLR instead. + * Implementation note: only soft-reset the device + * if it is a CN6XXX OR the LAST CN23XX device. + */ + if (fw_type_is_none()) + octeon_pci_flr(oct); + else if (OCTEON_CN6XXX(oct) || !refcount) + oct->fn_list.soft_reset(oct); octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); @@ -1698,15 +1438,6 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) liquidio_stop(netdev); - if (fw_type_is_none()) { - struct octnic_ctrl_pkt nctrl; - - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF; - nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - octnet_send_nic_ctrl_pkt(oct, &nctrl); - } - if (oct->props[lio->ifidx].napi_enabled == 1) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1717,6 +1448,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1754,7 +1489,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); - for (j = 0; j < lio->linfo.num_rxpciq; j++) + for (j = 0; j < oct->num_oqs; j++) octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } @@ -1825,6 +1560,13 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN23XX_PCIID_PF: oct->chip_id = OCTEON_CN23XX_PF_VID; ret = setup_cn23xx_octeon_pf_device(oct); + if (ret) + break; +#ifdef CONFIG_PCI_IOV + if (!ret) + pci_sriov_set_totalvfs(oct->pci_dev, + oct->sriov_info.max_vfs); +#endif s = "CN23XX"; break; @@ -1889,7 +1631,7 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; + iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; } else { iq = lio->txq; q = iq; @@ -2192,11 +1934,6 @@ static int load_firmware(struct octeon_device *oct) char fw_name[LIO_MAX_FW_FILENAME_LEN]; char *tmp_fw_type; - if (fw_type_is_none()) { - dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); - return ret; - } - if (fw_type[0] == '\0') tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else @@ -2221,43 +1958,6 @@ static int load_firmware(struct octeon_device *oct) return ret; } -/** - * \brief Setup output queue - * @param oct octeon device - * @param q_no which queue - * @param num_descs how many descriptors - * @param desc_size size of each descriptor - * @param app_ctx application context - */ -static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx) -{ - int ret_val = 0; - - dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); - /* droq creation and local register settings. */ - ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val < 0) - return ret_val; - - if (ret_val == 1) { - dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); - return 0; - } - /* tasklet creation for the droq */ - - /* Enable the droq queues */ - octeon_set_droq_pkt_op(oct, q_no, 1); - - /* Send Credit for Octeon Output queues. Credits are always - * sent after the output queue is enabled. - */ - writel(oct->droq[q_no]->max_count, - oct->droq[q_no]->pkts_credit_reg); - - return ret_val; -} - /** * \brief Callback for getting interface configuration * @param status status of request @@ -2291,352 +1991,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** Routine to push packets arriving on Octeon interface upto network layer. - * @param oct_id - octeon device id. - * @param skbuff - skbuff struct to be passed to network layer. - * @param len - size of total data received. - * @param rh - Control header associated with the packet - * @param param - additional control data with the packet - * @param arg - farg registered in droq_ops - */ -static void -liquidio_push_packet(u32 octeon_id __attribute__((unused)), - void *skbuff, - u32 len, - union octeon_rh *rh, - void *param, - void *arg) -{ - struct napi_struct *napi = param; - struct sk_buff *skb = (struct sk_buff *)skbuff; - struct skb_shared_hwtstamps *shhwtstamps; - u64 ns; - u16 vtag = 0; - u32 r_dh_off; - struct net_device *netdev = (struct net_device *)arg; - struct octeon_droq *droq = container_of(param, struct octeon_droq, - napi); - if (netdev) { - int packet_was_received; - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; - - /* Do not proceed if the interface is not in RUNNING state. */ - if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - recv_buffer_free(skb); - droq->stats.rx_dropped++; - return; - } - - skb->dev = netdev; - - skb_record_rx_queue(skb, droq->q_no); - if (likely(len > MIN_SKB_SIZE)) { - struct octeon_skb_page_info *pg_info; - unsigned char *va; - - pg_info = ((struct octeon_skb_page_info *)(skb->cb)); - if (pg_info->page) { - /* For Paged allocation use the frags */ - va = page_address(pg_info->page) + - pg_info->page_offset; - memcpy(skb->data, va, MIN_SKB_SIZE); - skb_put(skb, MIN_SKB_SIZE); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + - MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); - } - } else { - struct octeon_skb_page_info *pg_info = - ((struct octeon_skb_page_info *)(skb->cb)); - skb_copy_to_linear_data(skb, page_address(pg_info->page) - + pg_info->page_offset, len); - skb_put(skb, len); - put_page(pg_info->page); - } - - r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; - - if (((oct->chip_id == OCTEON_CN66XX) || - (oct->chip_id == OCTEON_CN68XX)) && - ptp_enable) { - if (rh->r_dh.has_hwtstamp) { - /* timestamp is included from the hardware at - * the beginning of the packet. - */ - if (ifstate_check - (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { - /* Nanoseconds are in the first 64-bits - * of the packet. - */ - memcpy(&ns, (skb->data + r_dh_off), - sizeof(ns)); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = - ns_to_ktime(ns + - lio->ptp_adjust); - } - } - } - - if (rh->r_dh.has_hash) { - __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); - u32 hash = be32_to_cpu(*hash_be); - - skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - } - - skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); - - skb->protocol = eth_type_trans(skb, skb->dev); - if ((netdev->features & NETIF_F_RXCSUM) && - (((rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || - (!(rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) - /* checksum has already been verified */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; - - /* Setting Encapsulation field on basis of status received - * from the firmware - */ - if (rh->r_dh.encap_on) { - skb->encapsulation = 1; - skb->csum_level = 1; - droq->stats.rx_vxlan++; - } - - /* inbound VLAN tag */ - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (rh->r_dh.vlan != 0)) { - u16 vid = rh->r_dh.vlan; - u16 priority = rh->r_dh.priority; - - vtag = priority << 13 | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - } - - packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; - - if (packet_was_received) { - droq->stats.rx_bytes_received += len; - droq->stats.rx_pkts_received++; - } else { - droq->stats.rx_dropped++; - netif_info(lio, rx_err, lio->netdev, - "droq:%d error rx_dropped:%llu\n", - droq->q_no, droq->stats.rx_dropped); - } - - } else { - recv_buffer_free(skb); - } -} - -/** - * \brief wrapper for calling napi_schedule - * @param param parameters to pass to napi_schedule - * - * Used when scheduling on different CPUs - */ -static void napi_schedule_wrapper(void *param) -{ - struct napi_struct *napi = param; - - napi_schedule(napi); -} - -/** - * \brief callback when receive interrupt occurs and we are in NAPI mode - * @param arg pointer to octeon output queue - */ -static void liquidio_napi_drv_callback(void *arg) -{ - struct octeon_device *oct; - struct octeon_droq *droq = arg; - int this_cpu = smp_processor_id(); - - oct = droq->oct_dev; - - if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) { - napi_schedule_irqoff(&droq->napi); - } else { - call_single_data_t *csd = &droq->csd; - - csd->func = napi_schedule_wrapper; - csd->info = &droq->napi; - csd->flags = 0; - - smp_call_function_single_async(droq->cpu_id, csd); - } -} - -/** - * \brief Entry point for NAPI polling - * @param napi NAPI structure - * @param budget maximum number of items to process - */ -static int liquidio_napi_poll(struct napi_struct *napi, int budget) -{ - struct octeon_droq *droq; - int work_done; - int tx_done = 0, iq_no; - struct octeon_instr_queue *iq; - struct octeon_device *oct; - - droq = container_of(napi, struct octeon_droq, napi); - oct = droq->oct_dev; - iq_no = droq->q_no; - /* Handle Droq descriptors */ - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - - /* Flush the instruction queue */ - iq = oct->instr_queue[iq_no]; - if (iq) { - if (atomic_read(&iq->instr_pending)) - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); - else - tx_done = 1; - /* Update iq read-index rather than waiting for next interrupt. - * Return back if tx_done is false. - */ - update_txq_status(oct, iq_no); - } else { - dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", - __func__, iq_no); - } - - /* force enable interrupt if reg cnts are high to avoid wraparound */ - if ((work_done < budget && tx_done) || - (iq && iq->pkt_in_done >= MAX_REG_CNT) || - (droq->pkt_count >= MAX_REG_CNT)) { - tx_done = 1; - napi_complete_done(napi, work_done); - octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, - POLL_EVENT_ENABLE_INTR, 0); - return 0; - } - - return (!tx_done) ? (budget) : (work_done); -} - -/** - * \brief Setup input and output queues - * @param octeon_dev octeon device - * @param ifidx Interface Index - * - * Note: Queues are with respect to the octeon device. Thus - * an input queue is for egress packets, and output queues - * are for ingress packets. - */ -static inline int setup_io_queues(struct octeon_device *octeon_dev, - int ifidx) -{ - struct octeon_droq_ops droq_ops; - struct net_device *netdev; - static int cpu_id; - static int cpu_id_modulus; - struct octeon_droq *droq; - struct napi_struct *napi; - int q, q_no, retval = 0; - struct lio *lio; - int num_tx_descs; - - netdev = octeon_dev->props[ifidx].netdev; - - lio = GET_LIO(netdev); - - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); - - droq_ops.fptr = liquidio_push_packet; - droq_ops.farg = (void *)netdev; - - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - - /* set up DROQs. */ - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - dev_dbg(&octeon_dev->pci_dev->dev, - "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", - q, q_no); - retval = octeon_setup_droq(octeon_dev, q_no, - CFG_GET_NUM_RX_DESCS_NIC_IF - (octeon_get_conf(octeon_dev), - lio->ifidx), - CFG_GET_NUM_RX_BUF_SIZE_NIC_IF - (octeon_get_conf(octeon_dev), - lio->ifidx), NULL); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - "%s : Runtime DROQ(RxQ) creation failed.\n", - __func__); - return 1; - } - - droq = octeon_dev->droq[q_no]; - napi = &droq->napi; - dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n", - (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num); - netif_napi_add(netdev, napi, liquidio_napi_poll, 64); - - /* designate a CPU for this droq */ - droq->cpu_id = cpu_id; - cpu_id++; - if (cpu_id >= cpu_id_modulus) - cpu_id = 0; - - octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); - } - - if (OCTEON_CN23XX_PF(octeon_dev)) { - /* 23XX PF can receive control messages (via the first PF-owned - * droq) from the firmware even if the ethX interface is down, - * so that's why poll_mode must be off for the first droq. - */ - octeon_dev->droq[0]->ops.poll_mode = 0; - } - - /* set up IQs. */ - for (q = 0; q < lio->linfo.num_txpciq; q++) { - num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf - (octeon_dev), - lio->ifidx); - retval = octeon_setup_iq(octeon_dev, ifidx, q, - lio->linfo.txpciq[q], num_tx_descs, - netdev_get_tx_queue(netdev, q)); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - " %s : Runtime IQ(TxQ) creation failed.\n", - __func__); - return 1; - } - - if (octeon_dev->ioq_vector) { - struct octeon_ioq_vector *ioq_vector; - - ioq_vector = &octeon_dev->ioq_vector[q]; - netif_set_xps_queue(netdev, - &ioq_vector->affinity_mask, - ioq_vector->iq_index); - } - } - - return 0; -} - /** * \brief Poll routine for checking transmit queue status * @param work work_struct data structure @@ -2707,8 +2061,7 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && - ptp_enable) + if (oct->ptp_enable) oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -2746,6 +2099,17 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } ifstate_reset(lio, LIO_IFSTATE_RUNNING); @@ -2916,7 +2280,10 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; - for (i = 0; i < lio->linfo.num_txpciq; i++) { + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return stats; + + for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; @@ -2932,7 +2299,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) drop = 0; bytes = 0; - for (i = 0; i < lio->linfo.num_rxpciq; i++) { + for (i = 0; i < oct->num_oqs; i++) { oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; @@ -3052,8 +2419,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCSHWTSTAMP: - if ((lio->oct_dev->chip_id == OCTEON_CN66XX || - lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + if (lio->oct_dev->ptp_enable) return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; @@ -4188,7 +3554,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - if (setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } @@ -4516,6 +3884,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) int j, ret; int fw_loaded = 0; char bootcmd[] = "\n"; + char *dbg_enb = NULL; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -4548,18 +3917,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev) octeon_dev->app_mode = CVM_DRV_INVALID_APP; if (OCTEON_CN23XX_PF(octeon_dev)) { - if (!cn23xx_fw_loaded(octeon_dev)) { + if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) { fw_loaded = 0; - if (!fw_type_is_none()) { - /* Do a soft reset of the Octeon device. */ - if (octeon_dev->fn_list.soft_reset(octeon_dev)) - return 1; - /* things might have changed */ - if (!cn23xx_fw_loaded(octeon_dev)) - fw_loaded = 0; - else - fw_loaded = 1; - } + /* Do a soft reset of the Octeon device. */ + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + /* things might have changed */ + if (!cn23xx_fw_loaded(octeon_dev)) + fw_loaded = 0; + else + fw_loaded = 1; } else { fw_loaded = 1; } @@ -4666,7 +4033,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Setup the interrupt handler and record the INT SUM register address */ - if (octeon_setup_interrupt(octeon_dev)) + if (octeon_setup_interrupt(octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) return 1; /* Enable Octeon device interrupts */ @@ -4674,6 +4042,18 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); + /* Send Credit for Octeon Output queues. Credits are always sent BEFORE + * the output queue is enabled. + * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in + * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. + * Otherwise, it is possible that the DRV_ACTIVE message will be sent + * before any credits have been issued, causing the ring to be reset + * (and the f/w appear to never have started). + */ + for (j = 0; j < octeon_dev->num_oqs; j++) + writel(octeon_dev->droq[j]->max_count, + octeon_dev->droq[j]->pkts_credit_reg); + /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { @@ -4722,10 +4102,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); return 1; } - ret = octeon_add_console(octeon_dev, 0); + /* If console debug enabled, specify empty string to use default + * enablement ELSE specify NULL string for 'disabled'. + */ + dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; + ret = octeon_add_console(octeon_dev, 0, dbg_enb); if (ret) { dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); return 1; + } else if (octeon_console_debug_enabled(0)) { + /* If console was added AND we're logging console output + * then set our console print function. + */ + octeon_dev->console[0].print = octeon_dbg_console_print; } atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); @@ -4736,12 +4125,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); return 1; } - /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is - * loaded - */ - if (OCTEON_CN23XX_PF(octeon_dev)) - octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1, - 2ULL); } handshake[octeon_dev->octeon_id].init_ok = 1; @@ -4749,14 +4132,33 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); - /* Send Credit for Octeon Output queues. Credits are always sent after - * the output queue is enabled. - */ - for (j = 0; j < octeon_dev->num_oqs; j++) - writel(octeon_dev->droq[j]->max_count, - octeon_dev->droq[j]->pkts_credit_reg); + return 0; +} + +/** + * \brief Debug console print function + * @param octeon_dev octeon device + * @param console_num console number + * @param prefix first portion of line to display + * @param suffix second portion of line to display + * + * The OCTEON debug console outputs entire lines (excluding '\n'). + * Normally, the line will be passed in the 'prefix' parameter. + * However, due to buffering, it is possible for a line to be split into two + * parts, in which case they will be passed as the 'prefix' parameter and + * 'suffix' parameter. + */ +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix) +{ + if (prefix && suffix) + dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, + suffix); + else if (prefix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); + else if (suffix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); - /* Packets can start arriving on the output queues from this point. */ return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 9b247102eb92..2e993ce43b66 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -107,12 +107,6 @@ struct octnic_gather { dma_addr_t sg_dma_ptr; }; -struct octeon_device_priv { - /* Tasklet structures for this device. */ - struct tasklet_struct droq_tasklet; - unsigned long napi_mask; -}; - static int liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void liquidio_vf_remove(struct pci_dev *pdev); @@ -123,7 +117,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) { struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)oct->priv; - int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; + int retry = MAX_IO_PENDING_PKT_COUNT; int pkt_cnt = 0, pending_pkts; int i; @@ -147,32 +141,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) return pkt_cnt; } -/** - * \brief wait for all pending requests to complete - * @param oct Pointer to Octeon device - * - * Called during shutdown sequence - */ -static int wait_for_pending_requests(struct octeon_device *oct) -{ - int i, pcount = 0; - - for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { - pcount = atomic_read( - &oct->response_list[OCTEON_ORDERED_SC_LIST] - .pending_req_count); - if (pcount) - schedule_timeout_uninterruptible(HZ / 10); - else - break; - } - - if (pcount) - return 1; - - return 0; -} - /** * \brief Cause device to go quiet so it can be safely removed/reset/etc * @param oct Pointer to Octeon device @@ -374,7 +342,7 @@ static void txqs_wake(struct net_device *netdev) int i; for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] + int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs] .s.q_no; if (__netif_subqueue_stopped(netdev, i)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, @@ -574,7 +542,8 @@ static void print_link_info(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct oct_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { @@ -661,6 +630,12 @@ static void update_link_status(struct net_device *netdev, txqs_stop(netdev); } + if (lio->linfo.link.s.mtu != netdev->max_mtu) { + dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n", + netdev->max_mtu, lio->linfo.link.s.mtu); + netdev->max_mtu = lio->linfo.link.s.mtu; + } + if (lio->linfo.link.s.mtu < netdev->mtu) { dev_warn(&oct->pci_dev->dev, "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", @@ -673,167 +648,6 @@ static void update_link_status(struct net_device *netdev, } } -static void update_txq_status(struct octeon_device *oct, int iq_num) -{ - struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; - struct net_device *netdev; - struct lio *lio; - - netdev = oct->props[iq->ifidx].netdev; - lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - netif_wake_subqueue(netdev, iq->q_index); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - } - } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, tx_restart, 1); - netif_wake_queue(netdev); - } -} - -static -int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) -{ - struct octeon_device *oct = droq->oct_dev; - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - } else { - if (ret & MSIX_PO_INT) { - dev_err(&oct->pci_dev->dev, - "should not come here should not get rx when poll mode = 0 for vf\n"); - tasklet_schedule(&oct_priv->droq_tasklet); - return 1; - } - /* this will be flushed periodically by check iq db */ - if (ret & MSIX_PI_INT) - return 0; - } - return 0; -} - -static irqreturn_t -liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) -{ - struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; - struct octeon_device *oct = ioq_vector->oct_dev; - struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; - u64 ret; - - ret = oct->fn_list.msix_interrupt_handler(ioq_vector); - - if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) - liquidio_schedule_msix_droq_pkt_handler(droq, ret); - - return IRQ_HANDLED; -} - -/** - * \brief Setup interrupt for octeon device - * @param oct octeon device - * - * Enable interrupt in Octeon device as given in the PCI interrupt mask. - */ -static int octeon_setup_interrupt(struct octeon_device *oct) -{ - struct msix_entry *msix_entries; - char *queue_irq_names = NULL; - int num_alloc_ioq_vectors; - int num_ioq_vectors; - int irqret; - int i; - - if (oct->msix_on) { - oct->num_msix_irqs = oct->sriov_info.rings_per_vf; - - /* allocate storage for the names assigned to each irq */ - oct->irq_name_storage = - kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ, - GFP_KERNEL); - if (!oct->irq_name_storage) { - dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); - return -ENOMEM; - } - - queue_irq_names = oct->irq_name_storage; - - oct->msix_entries = kcalloc( - oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) { - dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return -ENOMEM; - } - - msix_entries = (struct msix_entry *)oct->msix_entries; - - for (i = 0; i < oct->num_msix_irqs; i++) - msix_entries[i].entry = i; - num_alloc_ioq_vectors = pci_enable_msix_range( - oct->pci_dev, msix_entries, - oct->num_msix_irqs, - oct->num_msix_irqs); - if (num_alloc_ioq_vectors < 0) { - dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return num_alloc_ioq_vectors; - } - dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); - - num_ioq_vectors = oct->num_msix_irqs; - - for (i = 0; i < num_ioq_vectors; i++) { - snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, - "LiquidIO%u-vf%u-rxtx-%u", - oct->octeon_id, oct->vf_num, i); - - irqret = request_irq(msix_entries[i].vector, - liquidio_msix_intr_handler, 0, - &queue_irq_names[IRQ_NAME_OFF(i)], - &oct->ioq_vector[i]); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - - while (i) { - i--; - irq_set_affinity_hint( - msix_entries[i].vector, NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); - } - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - oct->ioq_vector[i].vector = msix_entries[i].vector; - /* assign the cpu mask for this msix interrupt vector */ - irq_set_affinity_hint( - msix_entries[i].vector, - (&oct->ioq_vector[i].affinity_mask)); - } - dev_dbg(&oct->pci_dev->dev, - "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); - } - return 0; -} - /** * \brief PCI probe handler * @param pdev PCI device structure @@ -942,10 +756,14 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->msix_on) { msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs; i++) { - irq_set_affinity_hint(msix_entries[i].vector, - NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); + if (oct->ioq_vector[i].vector) { + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } } pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); @@ -1137,6 +955,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1174,7 +996,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); - for (j = 0; j < lio->linfo.num_rxpciq; j++) + for (j = 0; j < oct->num_oqs; j++) octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } @@ -1262,7 +1084,7 @@ static int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; + iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; } else { iq = lio->txq; q = iq; @@ -1390,41 +1212,6 @@ static void free_netsgbuf_with_resp(void *buf) check_txq_state(lio, skb); } -/** - * \brief Setup output queue - * @param oct octeon device - * @param q_no which queue - * @param num_descs how many descriptors - * @param desc_size size of each descriptor - * @param app_ctx application context - */ -static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx) -{ - int ret_val; - - dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); - /* droq creation and local register settings. */ - ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val < 0) - return ret_val; - - if (ret_val == 1) { - dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); - return 0; - } - - /* Enable the droq queues */ - octeon_set_droq_pkt_op(oct, q_no, 1); - - /* Send Credit for Octeon Output queues. Credits are always - * sent after the output queue is enabled. - */ - writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); - - return ret_val; -} - /** * \brief Callback for getting interface configuration * @param status status of request @@ -1457,290 +1244,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** Routine to push packets arriving on Octeon interface upto network layer. - * @param oct_id - octeon device id. - * @param skbuff - skbuff struct to be passed to network layer. - * @param len - size of total data received. - * @param rh - Control header associated with the packet - * @param param - additional control data with the packet - * @param arg - farg registered in droq_ops - */ -static void -liquidio_push_packet(u32 octeon_id __attribute__((unused)), - void *skbuff, - u32 len, - union octeon_rh *rh, - void *param, - void *arg) -{ - struct napi_struct *napi = param; - struct octeon_droq *droq = - container_of(param, struct octeon_droq, napi); - struct net_device *netdev = (struct net_device *)arg; - struct sk_buff *skb = (struct sk_buff *)skbuff; - u16 vtag = 0; - u32 r_dh_off; - - if (netdev) { - struct lio *lio = GET_LIO(netdev); - int packet_was_received; - - /* Do not proceed if the interface is not in RUNNING state. */ - if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - recv_buffer_free(skb); - droq->stats.rx_dropped++; - return; - } - - skb->dev = netdev; - - skb_record_rx_queue(skb, droq->q_no); - if (likely(len > MIN_SKB_SIZE)) { - struct octeon_skb_page_info *pg_info; - unsigned char *va; - - pg_info = ((struct octeon_skb_page_info *)(skb->cb)); - if (pg_info->page) { - /* For Paged allocation use the frags */ - va = page_address(pg_info->page) + - pg_info->page_offset; - memcpy(skb->data, va, MIN_SKB_SIZE); - skb_put(skb, MIN_SKB_SIZE); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + - MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); - } - } else { - struct octeon_skb_page_info *pg_info = - ((struct octeon_skb_page_info *)(skb->cb)); - skb_copy_to_linear_data(skb, - page_address(pg_info->page) + - pg_info->page_offset, len); - skb_put(skb, len); - put_page(pg_info->page); - } - - r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; - - if (rh->r_dh.has_hwtstamp) - r_dh_off -= BYTES_PER_DHLEN_UNIT; - - if (rh->r_dh.has_hash) { - __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); - u32 hash = be32_to_cpu(*hash_be); - - skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - } - - skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); - skb->protocol = eth_type_trans(skb, skb->dev); - - if ((netdev->features & NETIF_F_RXCSUM) && - (((rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || - (!(rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) - /* checksum has already been verified */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; - - /* Setting Encapsulation field on basis of status received - * from the firmware - */ - if (rh->r_dh.encap_on) { - skb->encapsulation = 1; - skb->csum_level = 1; - droq->stats.rx_vxlan++; - } - - /* inbound VLAN tag */ - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - rh->r_dh.vlan) { - u16 priority = rh->r_dh.priority; - u16 vid = rh->r_dh.vlan; - - vtag = (priority << VLAN_PRIO_SHIFT) | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - } - - packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); - - if (packet_was_received) { - droq->stats.rx_bytes_received += len; - droq->stats.rx_pkts_received++; - } else { - droq->stats.rx_dropped++; - netif_info(lio, rx_err, lio->netdev, - "droq:%d error rx_dropped:%llu\n", - droq->q_no, droq->stats.rx_dropped); - } - - } else { - recv_buffer_free(skb); - } -} - -/** - * \brief callback when receive interrupt occurs and we are in NAPI mode - * @param arg pointer to octeon output queue - */ -static void liquidio_vf_napi_drv_callback(void *arg) -{ - struct octeon_droq *droq = arg; - - napi_schedule_irqoff(&droq->napi); -} - -/** - * \brief Entry point for NAPI polling - * @param napi NAPI structure - * @param budget maximum number of items to process - */ -static int liquidio_napi_poll(struct napi_struct *napi, int budget) -{ - struct octeon_instr_queue *iq; - struct octeon_device *oct; - struct octeon_droq *droq; - int tx_done = 0, iq_no; - int work_done; - - droq = container_of(napi, struct octeon_droq, napi); - oct = droq->oct_dev; - iq_no = droq->q_no; - - /* Handle Droq descriptors */ - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - - /* Flush the instruction queue */ - iq = oct->instr_queue[iq_no]; - if (iq) { - if (atomic_read(&iq->instr_pending)) - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); - else - tx_done = 1; - - /* Update iq read-index rather than waiting for next interrupt. - * Return back if tx_done is false. - */ - update_txq_status(oct, iq_no); - } else { - dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", - __func__, iq_no); - } - - /* force enable interrupt if reg cnts are high to avoid wraparound */ - if ((work_done < budget && tx_done) || - (iq && iq->pkt_in_done >= MAX_REG_CNT) || - (droq->pkt_count >= MAX_REG_CNT)) { - tx_done = 1; - napi_complete_done(napi, work_done); - octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, - POLL_EVENT_ENABLE_INTR, 0); - return 0; - } - - return (!tx_done) ? (budget) : (work_done); -} - -/** - * \brief Setup input and output queues - * @param octeon_dev octeon device - * @param ifidx Interface index - * - * Note: Queues are with respect to the octeon device. Thus - * an input queue is for egress packets, and output queues - * are for ingress packets. - */ -static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) -{ - struct octeon_droq_ops droq_ops; - struct net_device *netdev; - static int cpu_id_modulus; - struct octeon_droq *droq; - struct napi_struct *napi; - static int cpu_id; - int num_tx_descs; - struct lio *lio; - int retval = 0; - int q, q_no; - - netdev = octeon_dev->props[ifidx].netdev; - - lio = GET_LIO(netdev); - - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); - - droq_ops.fptr = liquidio_push_packet; - droq_ops.farg = netdev; - - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_vf_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - - /* set up DROQs. */ - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - - retval = octeon_setup_droq( - octeon_dev, q_no, - CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), - lio->ifidx), - CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), - lio->ifidx), - NULL); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - "%s : Runtime DROQ(RxQ) creation failed.\n", - __func__); - return 1; - } - - droq = octeon_dev->droq[q_no]; - napi = &droq->napi; - netif_napi_add(netdev, napi, liquidio_napi_poll, 64); - - /* designate a CPU for this droq */ - droq->cpu_id = cpu_id; - cpu_id++; - if (cpu_id >= cpu_id_modulus) - cpu_id = 0; - - octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); - } - - /* 23XX VF can send/recv control messages (via the first VF-owned - * droq) from the firmware even if the ethX interface is down, - * so that's why poll_mode must be off for the first droq. - */ - octeon_dev->droq[0]->ops.poll_mode = 0; - - /* set up IQs. */ - for (q = 0; q < lio->linfo.num_txpciq; q++) { - num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( - octeon_get_conf(octeon_dev), lio->ifidx); - retval = octeon_setup_iq(octeon_dev, ifidx, q, - lio->linfo.txpciq[q], num_tx_descs, - netdev_get_tx_queue(netdev, q)); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - " %s : Runtime IQ(TxQ) creation failed.\n", - __func__); - return 1; - } - } - - return 0; -} - /** * \brief Net device open for LiquidIO * @param netdev network device @@ -1784,6 +1287,16 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */ @@ -1988,7 +1501,10 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; - for (i = 0; i < lio->linfo.num_txpciq; i++) { + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return stats; + + for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; @@ -2004,7 +1520,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) drop = 0; bytes = 0; - for (i = 0; i < lio->linfo.num_rxpciq; i++) { + for (i = 0; i < oct->num_oqs; i++) { oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; @@ -2028,18 +1544,32 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) */ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct octeon_device *oct; + struct lio *lio; + int ret = 0; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; + nctrl.ncmd.s.param1 = new_mtu; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = LIO_CMD_WAIT_TM; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); + return -EIO; + } lio->mtu = new_mtu; - netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", - netdev->mtu, new_mtu); - dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); - - netdev->mtu = new_mtu; - return 0; } @@ -2959,7 +2489,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Copy MAC Address to OS network device structure */ ether_addr_copy(netdev->dev_addr, mac); - if (setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } @@ -3182,7 +2714,7 @@ static int octeon_device_init(struct octeon_device *oct) LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); /* Setup the interrupt handler and record the INT SUM register address*/ - if (octeon_setup_interrupt(oct)) + if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) return 1; atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 231dd7fbfb80..3788c8cd082a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -27,7 +27,7 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 -#define LIQUIDIO_BASE_MINOR_VERSION 5 +#define LIQUIDIO_BASE_MINOR_VERSION 6 #define LIQUIDIO_BASE_MICRO_VERSION 1 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) @@ -106,6 +106,7 @@ enum octeon_tag_type { #define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2) #define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2) +#define SCR2_BIT_FW_LOADED 63 static inline u32 incr_index(u32 index, u32 count, u32 max) { @@ -189,7 +190,6 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_Q 0 /* NIC Command types */ -#define OCTNET_CMD_RESET_PF 0x0 #define OCTNET_CMD_CHANGE_MTU 0x1 #define OCTNET_CMD_CHANGE_MACADDR 0x2 #define OCTNET_CMD_CHANGE_DEVFLAGS 0x3 @@ -226,6 +226,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_SET_UC_LIST 0x1b #define OCTNET_CMD_SET_VF_LINKSTATE 0x1c + +#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f + #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 @@ -235,6 +238,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define LIO_CMD_WAIT_TM 100 + /* RX(packets coming from wire) Checksum verification flags */ /* TCP/UDP csum */ #define CNNIC_L4SUM_VERIFIED 0x1 @@ -768,6 +773,7 @@ struct nic_rx_stats { /* firmware stats */ u64 fw_total_rcvd; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; @@ -814,6 +820,7 @@ struct nic_tx_stats { u64 fw_tso; /* number of tso requests */ u64 fw_tso_fwd; /* number of packets segmented in tso */ u64 fw_tx_vxlan; + u64 fw_err_pki; }; struct oct_link_stats { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index f229d792c2b3..63bd9c94e547 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -71,13 +71,17 @@ #define CN23XX_MAX_RINGS_PER_VF 8 #define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_IQ_DESCRIPTORS 512 +#define CN23XX_MAX_IQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512 +#define CN23XX_MIN_IQ_DESCRIPTORS 128 #define CN23XX_DB_MIN 1 #define CN23XX_DB_MAX 8 #define CN23XX_DB_TIMEOUT 1 #define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_OQ_DESCRIPTORS 512 +#define CN23XX_MAX_OQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512 +#define CN23XX_MIN_OQ_DESCRIPTORS 128 #define CN23XX_OQ_BUF_SIZE 1664 #define CN23XX_OQ_PKTSPER_INTR 128 /*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ @@ -163,6 +167,11 @@ ((cfg)->misc.oct_link_query_interval) #define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) +#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_rx_descs = value) +#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_tx_descs = value) + /* Max IOQs per OCTEON Link */ #define MAX_IOQS_PER_NICIF 64 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index e08f7600f986..ec3dd69cd6b2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -37,13 +37,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, u32 flags); static int octeon_console_read(struct octeon_device *oct, u32 console_num, char *buffer, u32 buf_size); -static u32 console_bitmask; -module_param(console_bitmask, int, 0644); -MODULE_PARM_DESC(console_bitmask, - "Bitmask indicating which consoles have debug output redirected to syslog."); - -#define MIN(a, b) min((a), (b)) -#define CAST_ULL(v) ((u64)(v)) #define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 #define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 @@ -138,16 +131,6 @@ struct octeon_pci_console_desc { /* Implicit storage for console_addr_array */ }; -/** - * \brief determines if a given console has debug enabled. - * @param console console to check - * @returns 1 = enabled. 0 otherwise - */ -static int octeon_console_debug_enabled(u32 console) -{ - return (console_bitmask >> (console)) & 0x1; -} - /** * This function is the implementation of the get macros defined * for individual structure members. The argument are generated @@ -234,7 +217,7 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct, (exact_match && major_version != exact_match)) { dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n", major_version, minor_version, - CAST_ULL(oct->bootmem_desc_addr)); + (long long)oct->bootmem_desc_addr); return -1; } else { return 0; @@ -454,20 +437,31 @@ static void output_console_line(struct octeon_device *oct, { char *line; s32 i; + size_t len; line = console_buffer; for (i = 0; i < bytes_read; i++) { /* Output a line at a time, prefixed */ if (console_buffer[i] == '\n') { console_buffer[i] = '\0'; - if (console->leftover[0]) { - dev_info(&oct->pci_dev->dev, "%lu: %s%s\n", - console_num, console->leftover, - line); + /* We need to output 'line', prefaced by 'leftover'. + * However, it is possible we're being called to + * output 'leftover' by itself (in the case of nothing + * having been read from the console). + * + * To avoid duplication, check for this condition. + */ + if (console->leftover[0] && + (line != console->leftover)) { + if (console->print) + (*console->print)(oct, (u32)console_num, + console->leftover, + line); console->leftover[0] = '\0'; } else { - dev_info(&oct->pci_dev->dev, "%lu: %s\n", - console_num, line); + if (console->print) + (*console->print)(oct, (u32)console_num, + line, NULL); } line = &console_buffer[i + 1]; } @@ -476,13 +470,16 @@ static void output_console_line(struct octeon_device *oct, /* Save off any leftovers */ if (line != &console_buffer[bytes_read]) { console_buffer[bytes_read] = '\0'; - strcpy(console->leftover, line); + len = strlen(console->leftover); + strncpy(&console->leftover[len], line, + sizeof(console->leftover) - len); } } static void check_console(struct work_struct *work) { s32 bytes_read, tries, total_read; + size_t len; struct octeon_console *console; struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; @@ -504,7 +501,7 @@ static void check_console(struct work_struct *work) total_read += bytes_read; if (console->waiting) octeon_console_handle_result(oct, console_num); - if (octeon_console_debug_enabled(console_num)) { + if (console->print) { output_console_line(oct, console, console_num, console_buffer, bytes_read); } @@ -519,10 +516,13 @@ static void check_console(struct work_struct *work) /* If nothing is read after polling the console, * output any leftovers if any */ - if (octeon_console_debug_enabled(console_num) && - (total_read == 0) && (console->leftover[0])) { - dev_info(&oct->pci_dev->dev, "%u: %s\n", - console_num, console->leftover); + if (console->print && (total_read == 0) && + (console->leftover[0])) { + /* append '\n' as terminator for 'output_console_line' */ + len = strlen(console->leftover); + console->leftover[len] = '\n'; + output_console_line(oct, console, console_num, + console->leftover, (s32)(len + 1)); console->leftover[0] = '\0'; } @@ -574,7 +574,84 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } -int octeon_add_console(struct octeon_device *oct, u32 console_num) +static void octeon_get_uboot_version(struct octeon_device *oct) +{ + s32 bytes_read, tries, total_read; + struct octeon_console *console; + u32 console_num = 0; + char *uboot_ver; + char *buf; + char *p; + +#define OCTEON_UBOOT_VER_BUF_SIZE 512 + buf = kmalloc(OCTEON_UBOOT_VER_BUF_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (octeon_console_send_cmd(oct, "setenv stdout pci\n", 50)) { + kfree(buf); + return; + } + + if (octeon_console_send_cmd(oct, "version\n", 1)) { + kfree(buf); + return; + } + + console = &oct->console[console_num]; + tries = 0; + total_read = 0; + + do { + /* Take console output regardless of whether it will + * be logged + */ + bytes_read = + octeon_console_read(oct, + console_num, buf + total_read, + OCTEON_UBOOT_VER_BUF_SIZE - 1 - + total_read); + if (bytes_read > 0) { + buf[bytes_read] = '\0'; + + total_read += bytes_read; + if (console->waiting) + octeon_console_handle_result(oct, console_num); + } else if (bytes_read < 0) { + dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n", + console_num, bytes_read); + } + + tries++; + } while ((bytes_read > 0) && (tries < 16)); + + /* If nothing is read after polling the console, + * output any leftovers if any + */ + if ((total_read == 0) && (console->leftover[0])) { + dev_dbg(&oct->pci_dev->dev, "%u: %s\n", + console_num, console->leftover); + console->leftover[0] = '\0'; + } + + buf[OCTEON_UBOOT_VER_BUF_SIZE - 1] = '\0'; + + uboot_ver = strstr(buf, "U-Boot"); + if (uboot_ver) { + p = strstr(uboot_ver, "mips"); + if (p) { + p--; + *p = '\0'; + dev_info(&oct->pci_dev->dev, "%s\n", uboot_ver); + } + } + + kfree(buf); + octeon_console_send_cmd(oct, "setenv stdout serial\n", 50); +} + +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb) { int ret = 0; u32 delay; @@ -610,17 +687,19 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num) work = &oct->console_poll_work[console_num].work; + octeon_get_uboot_version(oct); + INIT_DELAYED_WORK(work, check_console); oct->console_poll_work[console_num].ctxptr = (void *)oct; oct->console_poll_work[console_num].ctxul = console_num; delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; schedule_delayed_work(work, msecs_to_jiffies(delay)); - if (octeon_console_debug_enabled(console_num)) { - ret = octeon_console_send_cmd(oct, - "setenv pci_console_active 1", - 2000); - } + /* an empty string means use default debug console enablement */ + if (dbg_enb && !dbg_enb[0]) + dbg_enb = "setenv pci_console_active 1"; + if (dbg_enb) + ret = octeon_console_send_cmd(oct, dbg_enb, 2000); console->active = 1; } @@ -704,7 +783,7 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num, if (bytes_to_read <= 0) return bytes_to_read; - bytes_to_read = MIN(bytes_to_read, (s32)buf_size); + bytes_to_read = min_t(s32, bytes_to_read, buf_size); /* Check to see if what we want to read is not contiguous, and limit * ourselves to the contiguous block diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 623e28ca736e..29d53b1763a7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -418,7 +418,7 @@ static struct octeon_config default_cn23xx_conf = { /** IQ attributes */ .iq = { .max_iqs = CN23XX_CFG_IO_QUEUES, - .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS * + .pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES), .instr_type = OCTEON_64BYTE_INSTR, .db_min = CN23XX_DB_MIN, @@ -436,8 +436,8 @@ static struct octeon_config default_cn23xx_conf = { }, .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX, - .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, - .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, + .num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, /* For ethernet interface 0: Port cfg Attributes */ @@ -455,10 +455,10 @@ static struct octeon_config default_cn23xx_conf = { .num_rxqs = DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ - .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ - .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, /* SKB size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, @@ -484,10 +484,10 @@ static struct octeon_config default_cn23xx_conf = { .num_rxqs = DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ - .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ - .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, /* SKB size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, @@ -528,9 +528,10 @@ static struct octeon_config_ptr { }; static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { - "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", - "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", + "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", + "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; @@ -876,11 +877,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct) oct->num_iqs = 0; - oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]), numa_node); if (!oct->instr_queue[0]) oct->instr_queue[0] = - vmalloc(sizeof(struct octeon_instr_queue)); + vzalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[0]) return 1; memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); @@ -923,9 +924,9 @@ int octeon_setup_output_queues(struct octeon_device *oct) desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); } oct->num_oqs = 0; - oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); if (!oct->droq[0]) - oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); if (!oct->droq[0]) return 1; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index c90ed48ae8ab..894af199ddef 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -22,6 +22,8 @@ #ifndef _OCTEON_DEVICE_H_ #define _OCTEON_DEVICE_H_ +#include + /** PCI VendorId Device Id */ #define OCTEON_CN68XX_PCIID 0x91177d #define OCTEON_CN66XX_PCIID 0x92177d @@ -192,6 +194,8 @@ struct octeon_reg_list { }; #define OCTEON_CONSOLE_MAX_READ_BYTES 512 +typedef int (*octeon_console_print_fn)(struct octeon_device *oct, + u32 num, char *pre, char *suf); struct octeon_console { u32 active; u32 waiting; @@ -199,6 +203,7 @@ struct octeon_console { u32 buffer_size; u64 input_base_addr; u64 output_base_addr; + octeon_console_print_fn print; char leftover[OCTEON_CONSOLE_MAX_READ_BYTES]; }; @@ -552,6 +557,7 @@ struct octeon_device { } loc; atomic_t *adapter_refcount; /* reference count of adapter */ + bool ptp_enable; }; #define OCT_DRV_ONLINE 1 @@ -565,6 +571,8 @@ struct octeon_device { #define CHIP_CONF(oct, TYPE) \ (((struct octeon_ ## TYPE *)((oct)->chip))->conf) +#define MAX_IO_PENDING_PKT_COUNT 100 + /*------------------ Function Prototypes ----------------------*/ /** Initialize device list memory */ @@ -740,11 +748,17 @@ int octeon_init_consoles(struct octeon_device *oct); /** * Adds access to a console to the device. * - * @param oct which octeon to add to - * @param console_num which console + * @param oct: which octeon to add to + * @param console_num: which console + * @param dbg_enb: ptr to debug enablement string, one of: + * * NULL for no debug output (i.e. disabled) + * * empty string enables debug output (via default method) + * * specific string to enable debug console output + * * @return Zero on success, negative on failure. */ -int octeon_add_console(struct octeon_device *oct, u32 console_num); +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb); /** write or read from a console */ int octeon_console_write(struct octeon_device *oct, u32 console_num, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 2e190deb2233..9372d4ce9954 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -145,6 +145,8 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, for (i = 0; i < droq->max_count; i++) { pg_info = &droq->recv_buf_list[i].pg_info; + if (!pg_info) + continue; if (pg_info->dma) lio_unmap_ring(oct->pci_dev, @@ -207,6 +209,10 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no) droq->desc_ring, droq->desc_ring_dma); memset(droq, 0, OCT_DROQ_SIZE); + oct->io_qmask.oq &= ~(1ULL << q_no); + vfree(oct->droq[q_no]); + oct->droq[q_no] = NULL; + oct->num_oqs--; return 0; } @@ -275,12 +281,12 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc_node(droq->max_count * + vzalloc_node(droq->max_count * OCT_DROQ_RECVBUF_SIZE, numa_node); if (!droq->recv_buf_list) droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vzalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 7ccffbb0019e..32ef3a7d88d8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -35,6 +35,12 @@ #define DRV_NAME "LiquidIO" +struct octeon_device_priv { + /** Tasklet structures for this device. */ + struct tasklet_struct droq_tasklet; + unsigned long napi_mask; +}; + /** This structure is used by NIC driver to store information required * to free the sk_buff when the packet has been fetched by Octeon. * Bytes offset below assume worst-case of a 64-bit system. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index ec8504b2942d..9e36319cead6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -33,6 +33,7 @@ #define LIO_IFSTATE_REGISTERED 0x02 #define LIO_IFSTATE_RUNNING 0x04 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 +#define LIO_IFSTATE_RESETTING 0x10 struct oct_nic_stats_resp { u64 rh; @@ -166,6 +167,14 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev); */ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs); + +irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), + void *dev); + +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); + /** * \brief Register ethtool operations * @param netdev pointer to network device @@ -448,4 +457,30 @@ static inline void ifstate_reset(struct lio *lio, int state_flag) atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); } +/** + * \brief wait for all pending requests to complete + * @param oct Pointer to Octeon device + * + * Called during shutdown sequence + */ +static inline int wait_for_pending_requests(struct octeon_device *oct) +{ + int i, pcount = 0; + + for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) { + pcount = atomic_read( + &oct->response_list[OCTEON_ORDERED_SC_LIST] + .pending_req_count); + if (pcount) + schedule_timeout_uninterruptible(HZ / 10); + else + break; + } + + if (pcount) + return 1; + + return 0; +} + #endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 7b297f1f6dbe..1e0fbce86d60 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -77,13 +77,6 @@ int octeon_init_instr_queue(struct octeon_device *oct, return 1; } - if (num_descs & (num_descs - 1)) { - dev_err(&oct->pci_dev->dev, - "Number of descriptors for instr queue %d not in power of 2.\n", - iq_no); - return 1; - } - q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; @@ -190,6 +183,10 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) q_size = iq->max_count * desc_size; lio_dma_free(oct, (u32)q_size, iq->base_addr, iq->base_addr_dma); + oct->io_qmask.iq &= ~(1ULL << iq_no); + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + oct->num_iqs--; return 0; } return 1; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 49b80da51ba7..805ab45e9b5a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, return true; default: bpf_warn_invalid_xdp_action(action); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(nic->netdev, prog, action); + /* fall through */ case XDP_DROP: /* Check if it's a recycled page, if not * unmap the DMA mapping. diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 57858522c33c..67d1a3230773 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -277,7 +277,6 @@ struct snd_queue { u16 xdp_free_cnt; bool is_xdp; -#define TSO_HEADER_SIZE 128 /* For TSO segment's header */ char *tso_hdrs; dma_addr_t tso_hdrs_phys; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 0bc6a4ffce30..6a015362c340 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -793,7 +793,9 @@ static struct attribute *cxgb3_attrs[] = { NULL }; -static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; +static const struct attribute_group cxgb3_attr_group = { + .attrs = cxgb3_attrs, +}; static ssize_t tm_attr_show(struct device *d, char *buf, int sched) @@ -880,7 +882,9 @@ static struct attribute *offload_attrs[] = { NULL }; -static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; +static const struct attribute_group offload_attr_group = { + .attrs = offload_attrs, +}; /* * Sends an sk_buff to an offload queue driver diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 09ea62ee96d3..ea72d2d2e1b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -104,13 +104,13 @@ enum dev_state { DEV_STATE_ERR }; -enum { +enum cc_pause { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; -enum { +enum cc_fec { FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ FEC_RS = 1 << 1, /* Reed-Solomon */ FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ @@ -338,10 +338,12 @@ struct adapter_params { unsigned int sf_nsec; /* # of flash sectors */ unsigned int sf_fw_start; /* start of FW image in flash */ - unsigned int fw_vers; - unsigned int bs_vers; /* bootstrap version */ - unsigned int tp_vers; - unsigned int er_vers; /* expansion ROM version */ + unsigned int fw_vers; /* firmware version */ + unsigned int bs_vers; /* bootstrap version */ + unsigned int tp_vers; /* TP microcode version */ + unsigned int er_vers; /* expansion ROM version */ + unsigned int scfg_vers; /* Serial Configuration version */ + unsigned int vpd_vers; /* VPD Version */ u8 api_vers[7]; unsigned short mtus[NMTUS]; @@ -364,6 +366,7 @@ struct adapter_params { unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ unsigned int max_ird_adapter; /* Max read depth per adapter */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is * used by the Port @@ -437,18 +440,34 @@ struct trace_params { unsigned char port; }; +/* Firmware Port Capabilities types. */ + +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ +}; + struct link_config { - unsigned short supported; /* link capabilities */ - unsigned short advertising; /* advertised capabilities */ - unsigned short lp_advertising; /* peer advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char auto_fec; /* Forward Error Correction: */ - unsigned char requested_fec; /* "automatic" (IEEE 802.3), */ - unsigned char fec; /* requested, and actual in use */ + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t def_acaps; /* default advertised capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + unsigned int speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec requested_fec; /* Forward Error Correction: */ + enum cc_fec fec; /* requested and actual in use */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ unsigned char link_down_rc; /* link down reason */ }; @@ -1404,10 +1423,15 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); +int t4_get_scfg_version(struct adapter *adapter, u32 *vers); +int t4_get_vpd_version(struct adapter *adapter, u32 *vers); +int t4_get_version_info(struct adapter *adapter); +void t4_dump_version_info(struct adapter *adapter); int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); @@ -1573,6 +1597,8 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_update_port_info(struct port_info *pi); +int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, + unsigned int *speedp, unsigned int *mtup); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 26eb00a45db1..a71af1e587e2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -533,17 +533,23 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, static unsigned int speed_to_fw_caps(int speed) { if (speed == 100) - return FW_PORT_CAP_SPEED_100M; + return FW_PORT_CAP32_SPEED_100M; if (speed == 1000) - return FW_PORT_CAP_SPEED_1G; + return FW_PORT_CAP32_SPEED_1G; if (speed == 10000) - return FW_PORT_CAP_SPEED_10G; + return FW_PORT_CAP32_SPEED_10G; if (speed == 25000) - return FW_PORT_CAP_SPEED_25G; + return FW_PORT_CAP32_SPEED_25G; if (speed == 40000) - return FW_PORT_CAP_SPEED_40G; + return FW_PORT_CAP32_SPEED_40G; + if (speed == 50000) + return FW_PORT_CAP32_SPEED_50G; if (speed == 100000) - return FW_PORT_CAP_SPEED_100G; + return FW_PORT_CAP32_SPEED_100G; + if (speed == 200000) + return FW_PORT_CAP32_SPEED_200G; + if (speed == 400000) + return FW_PORT_CAP32_SPEED_400G; return 0; } @@ -560,12 +566,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned int fw_caps, unsigned long *link_mode_mask) { - #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ - ## _BIT, link_mode_mask) + #define SET_LMM(__lmm_name) \ + __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_LMM(__lmm_name); \ } while (0) @@ -645,7 +652,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); - SET_LMM(100000baseCR4_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); + FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); + FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full); break; default: @@ -663,8 +673,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, /** * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware * capabilities - * - * @link_mode_mask: ethtool Link Mode Mask + * @et_lmm: ethtool Link Mode Mask * * Translate ethtool Link Mode Mask into a Firmware Port capabilities * value. @@ -677,7 +686,7 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) do { \ if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ link_mode_mask)) \ - fw_caps |= FW_PORT_CAP_ ## __fw_name; \ + fw_caps |= FW_PORT_CAP32_ ## __fw_name; \ } while (0) LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); @@ -685,6 +694,7 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); + LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G); LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); #undef LMM_TO_FW_CAPS @@ -698,10 +708,6 @@ static int get_link_ksettings(struct net_device *dev, struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; - ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); - ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); - /* For the nonce, the Firmware doesn't send up Port State changes * when the Virtual Interface attached to the Port is down. So * if it's down, let's grab any changes. @@ -709,6 +715,10 @@ static int get_link_ksettings(struct net_device *dev, if (!netif_running(dev)) (void)t4_update_port_info(pi); + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); if (pi->mdio_addr >= 0) { @@ -721,11 +731,11 @@ static int get_link_ksettings(struct net_device *dev, base->mdio_support = 0; } - fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, link_ksettings->link_modes.supported); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, link_ksettings->link_modes.advertising); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); if (netif_carrier_ok(dev)) { @@ -736,8 +746,24 @@ static int get_link_ksettings(struct net_device *dev, base->duplex = DUPLEX_UNKNOWN; } + if (pi->link_cfg.fc & PAUSE_RX) { + if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Pause); + } else { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + } else if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + base->autoneg = pi->link_cfg.autoneg; - if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); if (pi->link_cfg.autoneg) @@ -748,8 +774,7 @@ static int get_link_ksettings(struct net_device *dev, } static int set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings - *link_ksettings) + const struct ethtool_link_ksettings *link_ksettings) { struct port_info *pi = netdev_priv(dev); struct link_config *lc = &pi->link_cfg; @@ -762,12 +787,12 @@ static int set_link_ksettings(struct net_device *dev, if (base->duplex != DUPLEX_FULL) return -EINVAL; - if (!(lc->supported & FW_PORT_CAP_ANEG)) { + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { /* PHY offers a single speed. See if that's what's * being requested. */ if (base->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_fw_caps(base->speed))) + (lc->pcaps & speed_to_fw_caps(base->speed))) return 0; return -EINVAL; } @@ -776,18 +801,17 @@ static int set_link_ksettings(struct net_device *dev, if (base->autoneg == AUTONEG_DISABLE) { fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->supported & fw_caps)) + if (!(lc->pcaps & fw_caps)) return -EINVAL; - lc->requested_speed = fw_caps; - lc->advertising = 0; + lc->speed_caps = fw_caps; + lc->acaps = 0; } else { fw_caps = - lmm_to_fw_caps(link_ksettings->link_modes.advertising); - - if (!(lc->supported & fw_caps)) + lmm_to_fw_caps(link_ksettings->link_modes.advertising); + if (!(lc->pcaps & fw_caps)) return -EINVAL; - lc->requested_speed = 0; - lc->advertising = fw_caps | FW_PORT_CAP_ANEG; + lc->speed_caps = 0; + lc->acaps = fw_caps | FW_PORT_CAP32_ANEG; } lc->autoneg = base->autoneg; @@ -801,6 +825,104 @@ static int set_link_ksettings(struct net_device *dev, return ret; } +/* Translate the Firmware FEC value into the ethtool value. */ +static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec) +{ + unsigned int eth_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate Common Code FEC value into ethtool value. */ +static inline unsigned int cc_to_eth_fec(unsigned int cc_fec) +{ + unsigned int eth_fec = 0; + + if (cc_fec & FEC_AUTO) + eth_fec |= ETHTOOL_FEC_AUTO; + if (cc_fec & FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (cc_fec & FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool FEC value into Common Code value. */ +static inline unsigned int eth_to_cc_fec(unsigned int eth_fec) +{ + unsigned int cc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + return cc_fec; + + if (eth_fec & ETHTOOL_FEC_AUTO) + cc_fec |= FEC_AUTO; + if (eth_fec & ETHTOOL_FEC_RS) + cc_fec |= FEC_RS; + if (eth_fec & ETHTOOL_FEC_BASER) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + const struct port_info *pi = netdev_priv(dev); + const struct link_config *lc = &pi->link_cfg; + + /* Translate the Firmware FEC Support into the ethtool value. We + * always support IEEE 802.3 "automatic" selection of Link FEC type if + * any FEC is supported. + */ + fec->fec = fwcap_to_eth_fec(lc->pcaps); + if (fec->fec != ETHTOOL_FEC_OFF) + fec->fec |= ETHTOOL_FEC_AUTO; + + /* Translate the current internal FEC parameters into the + * ethtool values. + */ + fec->active_fec = cc_to_eth_fec(lc->fec); + + return 0; +} + +static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + struct port_info *pi = netdev_priv(dev); + struct link_config *lc = &pi->link_cfg; + struct link_config old_lc; + int ret; + + /* Save old Link Configuration in case the L1 Configure below + * fails. + */ + old_lc = *lc; + + /* Try to perform the L1 Configure and return the result of that + * effort. If it fails, revert the attempted change. + */ + lc->requested_fec = eth_to_cc_fec(fec->fec); + ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, + pi->tx_chan, lc); + if (ret) + *lc = old_lc; + return ret; +} + static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { @@ -819,7 +941,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; - else if (lc->supported & FW_PORT_CAP_ANEG) + else if (lc->pcaps & FW_PORT_CAP32_ANEG) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; @@ -1255,6 +1377,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, + .get_fecparam = get_fecparam, + .set_fecparam = set_fecparam, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 33bb8678833a..92d9d795d874 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -530,15 +530,22 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); if (cmd == FW_PORT_CMD && - action == FW_PORT_ACTION_GET_PORT_INFO) { + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = - q->adap->port[q->adap->chan_map[port]]; - int state_input = ((pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS_F) - ? CXGB4_DCB_INPUT_FW_DISABLED - : CXGB4_DCB_INPUT_FW_ENABLED); + struct net_device *dev; + int dcbxdis, state_input; + + dev = q->adap->port[q->adap->chan_map[port]]; + dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO + ? !!(pcmd->u.info.dcbxdis_pkd & + FW_PORT_CMD_DCBXDIS_F) + : !!(pcmd->u.info32.lstatus32_to_cbllen32 & + FW_PORT_CMD_DCBXDIS32_F)); + state_input = (dcbxdis + ? CXGB4_DCB_INPUT_FW_DISABLED + : CXGB4_DCB_INPUT_FW_ENABLED); cxgb4_dcb_state_fsm(dev, state_input); } @@ -2672,11 +2679,10 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; - struct fw_port_cmd port_cmd, port_rpl; - u32 link_status, speed = 0; + unsigned int link_ok, speed, mtu; u32 fw_pfvf, fw_class; int class_id = vf; - int link_ok, ret; + int ret; u16 pktsize; if (vf >= adap->num_vfs) @@ -2688,41 +2694,18 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, min_tx_rate, vf); return -EINVAL; } - /* Retrieve link details for VF port */ - memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(pi->port_id)); - port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(port_cmd)); - ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), - &port_rpl); + + ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); if (ret != FW_SUCCESS) { dev_err(adap->pdev_dev, - "Failed to get link status for VF %d\n", vf); + "Failed to get link information for VF %d\n", vf); return -EINVAL; } - link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); - link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0; + if (!link_ok) { dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); return -EINVAL; } - /* Determine link speed */ - if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; if (max_tx_rate > speed) { dev_err(adap->pdev_dev, @@ -2730,7 +2713,8 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, max_tx_rate, vf, speed); return -EINVAL; } - pktsize = be16_to_cpu(port_rpl.u.info.mtu); + + pktsize = mtu; /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ pktsize = pktsize - sizeof(struct ethhdr) - 4; /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ @@ -2741,7 +2725,7 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, SCHED_CLASS_MODE_CLASS, SCHED_CLASS_RATEUNIT_BITS, SCHED_CLASS_RATEMODE_ABS, - pi->port_id, class_id, 0, + pi->tx_chan, class_id, 0, max_tx_rate * 1000, 0, pktsize); if (ret) { dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", @@ -2889,15 +2873,30 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int cxgb_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) +{ + if (!is_classid_clsact_ingress(cls_u32->common.classid) || + cls_u32->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return cxgb4_config_knode(dev, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return cxgb4_delete_knode(dev, cls_u32); + default: + return -EOPNOTSUPP; + } +} + +static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - if (chain_index) - return -EOPNOTSUPP; - if (!(adap->flags & FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to setup tc on port %d. Link Down?\n", @@ -2905,20 +2904,12 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, return -EINVAL; } - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return cxgb4_config_knode(dev, proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return cxgb4_delete_knode(dev, proto, tc->cls_u32); - default: - return -EOPNOTSUPP; - } + switch (type) { + case TC_SETUP_CLSU32: + return cxgb_setup_tc_cls_u32(dev, type_data); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } static netdev_features_t cxgb_fix_features(struct net_device *dev, @@ -3610,11 +3601,8 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - t4_get_fw_version(adap, &adap->params.fw_vers); - t4_get_bs_version(adap, &adap->params.bs_vers); - t4_get_tp_version(adap, &adap->params.tp_vers); - t4_get_exprom_version(adap, &adap->params.er_vers); + t4_get_version_info(adap); ret = t4_check_fw_version(adap); /* If firmware is too old (not supported by driver) force an update. */ if (ret) @@ -4204,8 +4192,9 @@ static inline bool is_x_10g_port(const struct link_config *lc) { unsigned int speeds, high_speeds; - speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); + high_speeds = speeds & + ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); return high_speeds != 0; } @@ -4560,56 +4549,8 @@ static void cxgb4_check_pcie_caps(struct adapter *adap) /* Dump basic information about the adapter */ static void print_adapter_info(struct adapter *adapter) { - /* Device information */ - dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", - adapter->params.vpd.id, - CHELSIO_CHIP_RELEASE(adapter->params.chip)); - dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", - adapter->params.vpd.sn, adapter->params.vpd.pn); - - /* Firmware Version */ - if (!adapter->params.fw_vers) - dev_warn(adapter->pdev_dev, "No firmware loaded\n"); - else - dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); - - /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap - * Firmware, so dev_info() is more appropriate here.) - */ - if (!adapter->params.bs_vers) - dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); - else - dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); - - /* TP Microcode Version */ - if (!adapter->params.tp_vers) - dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); - else - dev_info(adapter->pdev_dev, - "TP Microcode version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); - - /* Expansion ROM version */ - if (!adapter->params.er_vers) - dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); - else - dev_info(adapter->pdev_dev, - "Expansion ROM version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + /* Hardware/Firmware/etc. Version/Revision IDs */ + t4_dump_version_info(adapter); /* Software/Hardware configuration */ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", @@ -4634,18 +4575,24 @@ static void print_port_info(const struct net_device *dev) else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) spd = " 8 GT/s"; - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) bufp += sprintf(bufp, "100M/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) bufp += sprintf(bufp, "1G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) bufp += sprintf(bufp, "10G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) bufp += sprintf(bufp, "25G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) bufp += sprintf(bufp, "40G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) + bufp += sprintf(bufp, "50G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) bufp += sprintf(bufp, "100G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) + bufp += sprintf(bufp, "200G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) + bufp += sprintf(bufp, "400G/"); if (bufp != buf) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); @@ -4751,10 +4698,11 @@ static int config_mgmt_dev(struct pci_dev *pdev) pi = netdev_priv(netdev); pi->adapter = adap; - pi->port_id = adap->pf % adap->params.nports; + pi->tx_chan = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; + pi->port_id = 0; err = register_netdev(adap->port[0]); if (err) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index ef06ce8247ab..48970ba08bdc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -96,7 +96,7 @@ static int fill_action_fields(struct adapter *adap, LIST_HEAD(actions); exts = cls->knode.exts; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); @@ -146,11 +146,11 @@ static int fill_action_fields(struct adapter *adap, return 0; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { const struct cxgb4_match_field *start, *link_start = NULL; struct adapter *adapter = netdev2adap(dev); + __be16 protocol = cls->common.protocol; struct ch_filter_specification fs; struct cxgb4_tc_u32_table *t; struct cxgb4_link *link; @@ -338,8 +338,7 @@ int cxgb4_config_knode(struct net_device *dev, __be16 protocol, return ret; } -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { struct adapter *adapter = netdev2adap(dev); unsigned int filter_id, max_tids, i, j; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 021261a41c13..70a07b7cca56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -44,10 +44,8 @@ static inline bool can_tc_u32_offload(struct net_device *dev) return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 02acff741f11..9148abb7994c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -533,10 +533,10 @@ struct sched_table *t4_init_sched(unsigned int sched_size) void t4_cleanup_sched(struct adapter *adap) { struct sched_table *s; - unsigned int i; + unsigned int j, i; - for_each_port(adap, i) { - struct port_info *pi = netdev2pinfo(adap->port[i]); + for_each_port(adap, j) { + struct port_info *pi = netdev2pinfo(adap->port[j]); s = pi->sched_tbl; for (i = 0; i < s->sched_size; i++) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 0293b41171a5..b65ce26ff72f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -913,7 +913,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, - 0xf000, 0x11190, + 0xf000, 0x11110, + 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e4, @@ -1439,8 +1440,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, 0x30100, 0x30144, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -1551,8 +1550,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c3c, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, 0x34100, 0x34144, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -1663,8 +1660,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x37c3c, 0x37c50, 0x37cf0, 0x37cfc, 0x38000, 0x38030, - 0x38038, 0x38038, - 0x38040, 0x38040, 0x38100, 0x38144, 0x38190, 0x381a0, 0x381a8, 0x381b8, @@ -1775,8 +1770,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x3bc3c, 0x3bc50, 0x3bcf0, 0x3bcfc, 0x3c000, 0x3c030, - 0x3c038, 0x3c038, - 0x3c040, 0x3c040, 0x3c100, 0x3c144, 0x3c190, 0x3c1a0, 0x3c1a8, 0x3c1b8, @@ -2040,12 +2033,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1258, - 0x1280, 0x12d4, - 0x12d9, 0x12d9, - 0x12de, 0x12de, - 0x12e3, 0x12e3, - 0x12e8, 0x133c, + 0x11fc, 0x1274, + 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, @@ -2076,6 +2065,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, 0x5ec8, 0x5ed0, + 0x5ee0, 0x5ee0, + 0x5ef0, 0x5ef0, + 0x5f00, 0x5f00, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, @@ -2133,6 +2125,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd300, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, + 0xf010, 0xf018, + 0xf020, 0xf028, 0x11000, 0x11014, 0x11048, 0x1106c, 0x11074, 0x11088, @@ -2256,13 +2250,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, - 0x30048, 0x30048, - 0x30050, 0x30050, - 0x3005c, 0x30060, - 0x30068, 0x30068, - 0x30070, 0x30070, 0x30100, 0x30168, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -2325,13 +2312,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x326a8, 0x326a8, 0x326ec, 0x326ec, 0x32a00, 0x32abc, - 0x32b00, 0x32b38, + 0x32b00, 0x32b18, + 0x32b20, 0x32b38, 0x32b40, 0x32b58, 0x32b60, 0x32b78, 0x32c00, 0x32c00, 0x32c08, 0x32c3c, - 0x32e00, 0x32e2c, - 0x32f00, 0x32f2c, 0x33000, 0x3302c, 0x33034, 0x33050, 0x33058, 0x33058, @@ -2396,13 +2382,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c38, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, - 0x34048, 0x34048, - 0x34050, 0x34050, - 0x3405c, 0x34060, - 0x34068, 0x34068, - 0x34070, 0x34070, 0x34100, 0x34168, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -2465,13 +2444,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x366a8, 0x366a8, 0x366ec, 0x366ec, 0x36a00, 0x36abc, - 0x36b00, 0x36b38, + 0x36b00, 0x36b18, + 0x36b20, 0x36b38, 0x36b40, 0x36b58, 0x36b60, 0x36b78, 0x36c00, 0x36c00, 0x36c08, 0x36c3c, - 0x36e00, 0x36e2c, - 0x36f00, 0x36f2c, 0x37000, 0x3702c, 0x37034, 0x37050, 0x37058, 0x37058, @@ -2545,8 +2523,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, - 0x41304, 0x413b8, - 0x413c0, 0x413c8, + 0x41304, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, @@ -3099,6 +3076,179 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers) return 0; } +/** + * t4_get_vpd_version - return the VPD version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the VPD via the Firmware interface (thus this can only be called + * once we're ready to issue Firmware commands). The format of the + * VPD version is adapter specific. Returns 0 on success, an error on + * failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the VPD version, so we zero-out the return-value parameter + * in that case to avoid leaving it with garbage in it. + * + * Also note that the Firmware will return its cached copy of the VPD + * Revision ID, not the actual Revision ID as written in the Serial + * EEPROM. This is only an issue if a new VPD has been written and the + * Firmware/Chip haven't yet gone through a RESET sequence. So it's best + * to defer calling this routine till after a FW_RESET_CMD has been issued + * if the Host Driver will be performing a full adapter initialization. + */ +int t4_get_vpd_version(struct adapter *adapter, u32 *vers) +{ + u32 vpdrev_param; + int ret; + + vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &vpdrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_scfg_version - return the Serial Configuration version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the Serial Configuration Version via the Firmware interface + * (thus this can only be called once we're ready to issue Firmware + * commands). The format of the Serial Configuration version is + * adapter specific. Returns 0 on success, an error on failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the Serial Configuration version, so we zero-out the + * return-value parameter in that case to avoid leaving it with + * garbage in it. + * + * Also note that the Firmware will return its cached copy of the Serial + * Initialization Revision ID, not the actual Revision ID as written in + * the Serial EEPROM. This is only an issue if a new VPD has been written + * and the Firmware/Chip haven't yet gone through a RESET sequence. So + * it's best to defer calling this routine till after a FW_RESET_CMD has + * been issued if the Host Driver will be performing a full adapter + * initialization. + */ +int t4_get_scfg_version(struct adapter *adapter, u32 *vers) +{ + u32 scfgrev_param; + int ret; + + scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &scfgrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_version_info - extract various chip/firmware version information + * @adapter: the adapter + * + * Reads various chip/firmware version numbers and stores them into the + * adapter Adapter Parameters structure. If any of the efforts fails + * the first failure will be returned, but all of the version numbers + * will be read. + */ +int t4_get_version_info(struct adapter *adapter) +{ + int ret = 0; + + #define FIRST_RET(__getvinfo) \ + do { \ + int __ret = __getvinfo; \ + if (__ret && !ret) \ + ret = __ret; \ + } while (0) + + FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); + FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); + FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); + FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); + FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); + FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); + + #undef FIRST_RET + return ret; +} + +/** + * t4_dump_version_info - dump all of the adapter configuration IDs + * @adapter: the adapter + * + * Dumps all of the various bits of adapter configuration version/revision + * IDs information. This is typically called at some point after + * t4_get_version_info() has been called. + */ +void t4_dump_version_info(struct adapter *adapter) +{ + /* Device information */ + dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", + adapter->params.vpd.id, + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", + adapter->params.vpd.sn, adapter->params.vpd.pn); + + /* Firmware Version */ + if (!adapter->params.fw_vers) + dev_warn(adapter->pdev_dev, "No firmware loaded\n"); + else + dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); + + /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap + * Firmware, so dev_info() is more appropriate here.) + */ + if (!adapter->params.bs_vers) + dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); + else + dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); + + /* TP Microcode Version */ + if (!adapter->params.tp_vers) + dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); + else + dev_info(adapter->pdev_dev, + "TP Microcode version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + /* Expansion ROM version */ + if (!adapter->params.er_vers) + dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); + else + dev_info(adapter->pdev_dev, + "Expansion ROM version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + + /* Serial Configuration version */ + dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n", + adapter->params.scfg_vers); + + /* VPD Version */ + dev_info(adapter->pdev_dev, "VPD version: %#x\n", + adapter->params.vpd_vers); +} + /** * t4_check_fw_version - check if the FW is supported with this driver * @adap: the adapter @@ -3685,16 +3835,143 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) } } -#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ - FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) + +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + + #define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + + #undef CAP32_TO_CAP16 + + return caps16; +} + +/* Translate Firmware Port Capabilities Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause specification into Firmware Port Capabilities */ +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} /** * t4_link_l1cfg - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -3703,47 +3980,64 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc) +int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) { - struct fw_port_cmd c; - unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); - unsigned int fc = 0, fec = 0, fw_fec = 0; + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd cmd; + unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); + fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; lc->link_ok = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec; + /* Convert driver coding of Pause Frame Flow Control settings into the + * Firmware's API. + */ + fw_fc = cc_to_fwcap_pause(lc->requested_fc); - if (fec & FEC_RS) - fw_fec |= FW_PORT_CAP_FEC_RS; - if (fec & FEC_BASER_RS) - fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = fwcap_to_cc_fec(lc->def_acaps); + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_EXEC_F | - FW_PORT_CMD_PORTID_V(port)); - c.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc | fw_fec); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | - fw_fec | mdi); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | - fw_fec | mdi); + rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else { + rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; + } - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + /* And send that on to the Firmware ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_PORT_CMD_PORTID_V(port)); + cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | + FW_LEN16(cmd)); + if (fw_caps == FW_CAPS16) + cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); + else + cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); + return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); } /** @@ -3765,7 +4059,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) c.action_to_len16 = cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); + c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -4254,6 +4548,18 @@ static void mps_intr_handler(struct adapter *adapter) { FRMERR_F, "MPS Tx framing error", -1, 1 }, { 0 } }; + static const struct intr_info t6_mps_tx_intr_info[] = { + { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", + -1, 1 }, + { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", + -1, 1 }, + /* MPS Tx Bubble is normal for T6 */ + { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR_F, "MPS Tx framing error", -1, 1 }, + { 0 } + }; static const struct intr_info mps_trc_intr_info[] = { { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", @@ -4285,7 +4591,9 @@ static void mps_intr_handler(struct adapter *adapter) fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A, mps_rx_intr_info) + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A, - mps_tx_intr_info) + + is_t6(adapter->params.chip) + ? t6_mps_tx_intr_info + : mps_tx_intr_info) + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A, mps_trc_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A, @@ -5693,10 +6001,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { - if (stat_ctl & COUNTPAUSESTATTX_F) { - p->tx_frames -= p->tx_pause; - p->tx_octets -= p->tx_pause * 64; - } + if (stat_ctl & COUNTPAUSESTATTX_F) + p->tx_frames_64 -= p->tx_pause; if (stat_ctl & COUNTPAUSEMCTX_F) p->tx_mcast_frames -= p->tx_pause; } @@ -5729,10 +6035,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { - if (stat_ctl & COUNTPAUSESTATRX_F) { - p->rx_frames -= p->rx_pause; - p->rx_octets -= p->rx_pause * 64; - } + if (stat_ctl & COUNTPAUSESTATRX_F) + p->rx_frames_64 -= p->rx_pause; if (stat_ctl & COUNTPAUSEMCRX_F) p->rx_mcast_frames -= p->rx_pause; } @@ -6448,6 +6752,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (ret < 0) goto out; + /* + * If there was a Firmware Configuration File stored in FLASH, + * there's a good chance that it won't be compatible with the new + * Firmware. In order to prevent difficult to diagnose adapter + * initialization issues, we clear out the Firmware Configuration File + * portion of the FLASH . The user will need to re-FLASH a new + * Firmware Configuration File which is compatible with the new + * Firmware if that's desired. + */ + (void)t4_load_cfg(adap, NULL, 0); + /* * Older versions of the firmware don't understand the new * PCIE_FW.HALT flag and so won't know to perform a RESET when they @@ -7470,6 +7785,98 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) return reason[link_down_rc]; } +/** + * Return the highest speed set in the port capabilities, in Mb/s. + */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ + #define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(400G, 400000); + TEST_SPEED_RETURN(200G, 200000); + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. It will be either the highest speed from the list of + * speeds or whatever user has set using ethtool. + */ +static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities + * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value + * + * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new + * 32-bit Port Capabilities value. + */ +static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) +{ + fw_port_cap32_t linkattr = 0; + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + return linkattr; +} + /** * t4_handle_get_port_info - process a FW reply message * @pi: the port info @@ -7479,56 +7886,123 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) */ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { - const struct fw_port_cmd *p = (const void *)rpl; - struct adapter *adap = pi->adapter; + const struct fw_port_cmd *cmd = (const void *)rpl; + int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + int link_ok, linkdnrc; + enum fw_port_type port_type; + enum fw_port_module_type mod_type; + unsigned int speed, fc, fec; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; - /* link/module state change message */ - int speed = 0, fc = 0; - struct link_config *lc; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + /* Extract the various fields from the Port Information message. + */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; - - lc = &pi->link_cfg; - - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, pi->port_id); + link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); + linkattr = lstatus_to_fwcap(lstatus); + break; } - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - if (!link_ok && lc->link_ok) { - unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); - lc->link_down_rc = rc; - dev_warn(adap->pdev_dev, - "Port %d link down, reason: %s\n", - pi->port_id, t4_link_down_rc_str(rc)); + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32; + + lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + /* With the newer SFP28 and QSFP28 Transceiver Module Types, + * various fundamental Port Capabilities which used to be + * immutable can now change radically. We can now have + * Speeds, Auto-Negotiation, Forward Error Correction, etc. + * all change based on what Transceiver Module is inserted. + * So we need to record the Physical "Port" Capabilities on + * every Transceiver Module change. + */ + lc->pcaps = pcaps; + + /* When a new Transceiver Module is inserted, the Firmware + * will examine its i2c EPROM to determine its type and + * general operating parameters including things like Forward + * Error Control, etc. Various IEEE 802.3 standards dictate + * how to interpret these i2c values to determine default + * "sutomatic" settings. We record these for future use when + * the user explicitly requests these standards-based values. + */ + lc->def_acaps = acaps; + + /* Some versions of the early T6 Firmware "cheated" when + * handling different Transceiver Modules by changing the + * underlaying Port Type reported to the Host Drivers. As + * such we need to capture whatever Port Type the Firmware + * sends us and record it in case it's different from what we + * were told earlier. Unfortunately, since Firmware is + * forever, we'll need to keep this code here forever, but in + * later T6 Firmware it should just be an assignment of the + * same value already recorded. + */ + pi->port_type = port_type; + + pi->mod_type = mod_type; + t4_os_portmod_changed(adapter, pi->port_id); + } + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", + pi->tx_chan, t4_link_down_rc_str(linkdnrc)); } lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; - lc->supported = be16_to_cpu(p->u.info.pcap); - lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); + lc->fec = fec; - t4_os_link_changed(adap, pi->port_id, link_ok); + lc->lpacaps = lpacaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->speed_caps = fwcap_to_fwspeed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + + t4_os_link_changed(adapter, pi->port_id, link_ok); } } @@ -7542,15 +8016,18 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) */ int t4_update_port_info(struct port_info *pi) { + unsigned int fw_caps = pi->adapter->params.fw_caps_support; struct fw_port_cmd port_cmd; int ret; memset(&port_cmd, 0, sizeof(port_cmd)); port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(pi->port_id)); + FW_PORT_CMD_PORTID_V(pi->tx_chan)); port_cmd.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | FW_LEN16(port_cmd)); ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, &port_cmd, sizeof(port_cmd), &port_cmd); @@ -7561,6 +8038,65 @@ int t4_update_port_info(struct port_info *pi) return 0; } +/** + * t4_get_link_params - retrieve basic link parameters for given port + * @pi: the port + * @link_okp: value return pointer for link up/down + * @speedp: value return pointer for speed (Mb/s) + * @mtup: value return pointer for mtu + * + * Retrieves basic link parameters for a port: link up/down, speed (Mb/s), + * and MTU for a specified port. A negative error is returned on + * failure; 0 on success. + */ +int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, + unsigned int *speedp, unsigned int *mtup) +{ + unsigned int fw_caps = pi->adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd; + unsigned int action, link_ok, speed, mtu; + fw_port_cap32_t linkattr; + int ret; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->tx_chan)); + action = (fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32); + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(action) | + FW_LEN16(port_cmd)); + ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, + &port_cmd, sizeof(port_cmd), &port_cmd); + if (ret) + return ret; + + if (action == FW_PORT_ACTION_GET_PORT_INFO) { + u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype); + + link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F); + linkattr = lstatus_to_fwcap(lstatus); + mtu = be16_to_cpu(port_cmd.u.info.mtu); + } else { + u32 lstatus32 = + be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32); + + link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F); + linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32); + mtu = FW_PORT_CMD_MTU32_G( + be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); + } + speed = fwcap_to_speed(linkattr); + + *link_okp = link_ok; + *speedp = fwcap_to_speed(linkattr); + *mtup = mtu; + + return 0; +} + /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter @@ -7581,7 +8117,9 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) unsigned int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16)); - if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + if (opcode == FW_PORT_CMD && + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { int i; int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; @@ -7594,7 +8132,8 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) t4_handle_get_port_info(pi, rpl); } else { - dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode); + dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", + opcode); return -EINVAL; } return 0; @@ -7613,38 +8152,35 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) /** * init_link_config - initialize a link's SW state - * @lc: structure holding the link state - * @caps: link capabilities + * @lc: pointer to structure holding the link state + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps) +static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - lc->supported = pcaps; - lc->lp_advertising = 0; - lc->requested_speed = 0; + lc->pcaps = pcaps; + lc->def_acaps = acaps; + lc->lpacaps = 0; + lc->speed_caps = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - lc->auto_fec = 0; /* For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - if (acaps & FW_PORT_CAP_FEC_RS) - lc->auto_fec |= FEC_RS; - if (acaps & FW_PORT_CAP_FEC_BASER_RS) - lc->auto_fec |= FEC_BASER_RS; lc->requested_fec = FEC_AUTO; - lc->fec = lc->auto_fec; + lc->fec = fwcap_to_cc_fec(lc->def_acaps); - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -8169,7 +8705,7 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) } /** - * t4_init_portinfo - allocate a virtual interface amd initialize port_info + * t4_init_portinfo - allocate a virtual interface and initialize port_info * @pi: the port_info * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI @@ -8185,21 +8721,67 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) int t4_init_portinfo(struct port_info *pi, int mbox, int port, int pf, int vf, u8 mac[]) { - int ret; - struct fw_port_cmd c; + struct adapter *adapter = pi->adapter; + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd cmd; unsigned int rss_size; + enum fw_port_type port_type; + int mdio_addr; + fw_port_cap32_t pcaps, acaps; + int ret; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(port)); - c.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c); + /* If we haven't yet determined whether we're talking to Firmware + * which knows the new 32-bit Port Capabilities, it's time to find + * out now. This will also tell new Firmware to send us Port Status + * Updates using the new 32-bit Port Capabilities version of the + * Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(port)); + cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; + /* Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) + ? FW_PORT_CMD_MDIOADDR_G(lstatus) + : -1); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap)); + } else { + u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); + + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) + ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) + : -1); + pcaps = be32_to_cpu(cmd.u.info32.pcaps32); + acaps = be32_to_cpu(cmd.u.info32.acaps32); + } + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size); if (ret < 0) return ret; @@ -8209,14 +8791,11 @@ int t4_init_portinfo(struct port_info *pi, int mbox, pi->lport = port; pi->rss_size = rss_size; - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(ret) : -1; - pi->port_type = FW_PORT_CMD_PTYPE_G(ret); + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap), - be16_to_cpu(c.u.info.acap)); + init_link_config(&pi->link_cfg, pcaps, acaps); return 0; } @@ -8663,6 +9242,65 @@ void t4_idma_monitor(struct adapter *adapter, } } +/** + * t4_load_cfg - download config file + * @adap: the adapter + * @cfg_data: the cfg text file to write + * @size: text file size + * + * Write the supplied config text file to the card's serial flash. + */ +int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + int ret, i, n, cfg_addr; + unsigned int addr; + unsigned int flash_cfg_start_sec; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + + cfg_addr = t4_flash_cfg_addr(adap); + if (cfg_addr < 0) + return cfg_addr; + + addr = cfg_addr; + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_CFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", + FLASH_CFG_MAX_SIZE); + return -EFBIG; + } + + i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + /* If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter Firmware Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + if ((size - i) < SF_PAGE_SIZE) + n = size - i; + else + n = SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "config file %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} + /** * t4_set_vf_mac - Set MAC address for the specified VF * @adapter: The adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0ebed64d62d3..ca2756dcefc5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1124,6 +1124,8 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, + FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, + FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, }; @@ -1171,7 +1173,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, - FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32 + FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32, + FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; /* @@ -2254,6 +2257,7 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_FM_S 6 #define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) +/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, @@ -2289,6 +2293,84 @@ enum fw_port_mdi { #define FW_PORT_CAP_MDI_S 9 #define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) +/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ +#define FW_PORT_CAP32_SPEED_100M 0x00000001UL +#define FW_PORT_CAP32_SPEED_1G 0x00000002UL +#define FW_PORT_CAP32_SPEED_10G 0x00000004UL +#define FW_PORT_CAP32_SPEED_25G 0x00000008UL +#define FW_PORT_CAP32_SPEED_40G 0x00000010UL +#define FW_PORT_CAP32_SPEED_50G 0x00000020UL +#define FW_PORT_CAP32_SPEED_100G 0x00000040UL +#define FW_PORT_CAP32_SPEED_200G 0x00000080UL +#define FW_PORT_CAP32_SPEED_400G 0x00000100UL +#define FW_PORT_CAP32_SPEED_RESERVED1 0x00000200UL +#define FW_PORT_CAP32_SPEED_RESERVED2 0x00000400UL +#define FW_PORT_CAP32_SPEED_RESERVED3 0x00000800UL +#define FW_PORT_CAP32_RESERVED1 0x0000f000UL +#define FW_PORT_CAP32_FC_RX 0x00010000UL +#define FW_PORT_CAP32_FC_TX 0x00020000UL +#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL +#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL +#define FW_PORT_CAP32_ANEG 0x00100000UL +#define FW_PORT_CAP32_MDIX 0x00200000UL +#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_FEC_RS 0x00800000UL +#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL +#define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL +#define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL +#define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL +#define FW_PORT_CAP32_RESERVED2 0xf0000000UL + +#define FW_PORT_CAP32_SPEED_S 0 +#define FW_PORT_CAP32_SPEED_M 0xfff +#define FW_PORT_CAP32_SPEED_V(x) ((x) << FW_PORT_CAP32_SPEED_S) +#define FW_PORT_CAP32_SPEED_G(x) \ + (((x) >> FW_PORT_CAP32_SPEED_S) & FW_PORT_CAP32_SPEED_M) + +#define FW_PORT_CAP32_FC_S 16 +#define FW_PORT_CAP32_FC_M 0x3 +#define FW_PORT_CAP32_FC_V(x) ((x) << FW_PORT_CAP32_FC_S) +#define FW_PORT_CAP32_FC_G(x) \ + (((x) >> FW_PORT_CAP32_FC_S) & FW_PORT_CAP32_FC_M) + +#define FW_PORT_CAP32_802_3_S 18 +#define FW_PORT_CAP32_802_3_M 0x3 +#define FW_PORT_CAP32_802_3_V(x) ((x) << FW_PORT_CAP32_802_3_S) +#define FW_PORT_CAP32_802_3_G(x) \ + (((x) >> FW_PORT_CAP32_802_3_S) & FW_PORT_CAP32_802_3_M) + +#define FW_PORT_CAP32_ANEG_S 20 +#define FW_PORT_CAP32_ANEG_M 0x1 +#define FW_PORT_CAP32_ANEG_V(x) ((x) << FW_PORT_CAP32_ANEG_S) +#define FW_PORT_CAP32_ANEG_G(x) \ + (((x) >> FW_PORT_CAP32_ANEG_S) & FW_PORT_CAP32_ANEG_M) + +enum fw_port_mdi32 { + FW_PORT_CAP32_MDI_UNCHANGED, + FW_PORT_CAP32_MDI_AUTO, + FW_PORT_CAP32_MDI_F_STRAIGHT, + FW_PORT_CAP32_MDI_F_CROSSOVER +}; + +#define FW_PORT_CAP32_MDI_S 21 +#define FW_PORT_CAP32_MDI_M 3 +#define FW_PORT_CAP32_MDI_V(x) ((x) << FW_PORT_CAP32_MDI_S) +#define FW_PORT_CAP32_MDI_G(x) \ + (((x) >> FW_PORT_CAP32_MDI_S) & FW_PORT_CAP32_MDI_M) + +#define FW_PORT_CAP32_FEC_S 23 +#define FW_PORT_CAP32_FEC_M 0x1f +#define FW_PORT_CAP32_FEC_V(x) ((x) << FW_PORT_CAP32_FEC_S) +#define FW_PORT_CAP32_FEC_G(x) \ + (((x) >> FW_PORT_CAP32_FEC_S) & FW_PORT_CAP32_FEC_M) + +/* macros to isolate various 32-bit Port Capabilities sub-fields */ +#define CAP32_SPEED(__cap32) \ + (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) & __cap32) + +#define CAP32_FEC(__cap32) \ + (FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) & __cap32) + enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_L2_CFG = 0x0002, @@ -2298,6 +2380,8 @@ enum fw_port_action { FW_PORT_ACTION_DCB_READ_TRANS = 0x0006, FW_PORT_ACTION_DCB_READ_RECV = 0x0007, FW_PORT_ACTION_DCB_READ_DET = 0x0008, + FW_PORT_ACTION_L1_CFG32 = 0x0009, + FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, @@ -2445,6 +2529,18 @@ struct fw_port_cmd { __be64 r12; } control; } dcb; + struct fw_port_l1cfg32 { + __be32 rcap32; + __be32 r; + } l1cfg32; + struct fw_port_info32 { + __be32 lstatus32_to_cbllen32; + __be32 auxlinfo32_mtu32; + __be32 linkattr32; + __be32 pcaps32; + __be32 acaps32; + __be32 lpacaps32; + } info32; } u; }; @@ -2553,6 +2649,85 @@ struct fw_port_cmd { #define FW_PORT_CMD_DCB_VERSION_G(x) \ (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) +#define FW_PORT_CMD_LSTATUS32_S 31 +#define FW_PORT_CMD_LSTATUS32_M 0x1 +#define FW_PORT_CMD_LSTATUS32_V(x) ((x) << FW_PORT_CMD_LSTATUS32_S) +#define FW_PORT_CMD_LSTATUS32_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS32_S) & FW_PORT_CMD_LSTATUS32_M) +#define FW_PORT_CMD_LSTATUS32_F FW_PORT_CMD_LSTATUS32_V(1U) + +#define FW_PORT_CMD_LINKDNRC32_S 28 +#define FW_PORT_CMD_LINKDNRC32_M 0x7 +#define FW_PORT_CMD_LINKDNRC32_V(x) ((x) << FW_PORT_CMD_LINKDNRC32_S) +#define FW_PORT_CMD_LINKDNRC32_G(x) \ + (((x) >> FW_PORT_CMD_LINKDNRC32_S) & FW_PORT_CMD_LINKDNRC32_M) + +#define FW_PORT_CMD_DCBXDIS32_S 27 +#define FW_PORT_CMD_DCBXDIS32_M 0x1 +#define FW_PORT_CMD_DCBXDIS32_V(x) ((x) << FW_PORT_CMD_DCBXDIS32_S) +#define FW_PORT_CMD_DCBXDIS32_G(x) \ + (((x) >> FW_PORT_CMD_DCBXDIS32_S) & FW_PORT_CMD_DCBXDIS32_M) +#define FW_PORT_CMD_DCBXDIS32_F FW_PORT_CMD_DCBXDIS32_V(1U) + +#define FW_PORT_CMD_MDIOCAP32_S 26 +#define FW_PORT_CMD_MDIOCAP32_M 0x1 +#define FW_PORT_CMD_MDIOCAP32_V(x) ((x) << FW_PORT_CMD_MDIOCAP32_S) +#define FW_PORT_CMD_MDIOCAP32_G(x) \ + (((x) >> FW_PORT_CMD_MDIOCAP32_S) & FW_PORT_CMD_MDIOCAP32_M) +#define FW_PORT_CMD_MDIOCAP32_F FW_PORT_CMD_MDIOCAP32_V(1U) + +#define FW_PORT_CMD_MDIOADDR32_S 21 +#define FW_PORT_CMD_MDIOADDR32_M 0x1f +#define FW_PORT_CMD_MDIOADDR32_V(x) ((x) << FW_PORT_CMD_MDIOADDR32_S) +#define FW_PORT_CMD_MDIOADDR32_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR32_S) & FW_PORT_CMD_MDIOADDR32_M) + +#define FW_PORT_CMD_PORTTYPE32_S 13 +#define FW_PORT_CMD_PORTTYPE32_M 0xff +#define FW_PORT_CMD_PORTTYPE32_V(x) ((x) << FW_PORT_CMD_PORTTYPE32_S) +#define FW_PORT_CMD_PORTTYPE32_G(x) \ + (((x) >> FW_PORT_CMD_PORTTYPE32_S) & FW_PORT_CMD_PORTTYPE32_M) + +#define FW_PORT_CMD_MODTYPE32_S 8 +#define FW_PORT_CMD_MODTYPE32_M 0x1f +#define FW_PORT_CMD_MODTYPE32_V(x) ((x) << FW_PORT_CMD_MODTYPE32_S) +#define FW_PORT_CMD_MODTYPE32_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE32_S) & FW_PORT_CMD_MODTYPE32_M) + +#define FW_PORT_CMD_CBLLEN32_S 0 +#define FW_PORT_CMD_CBLLEN32_M 0xff +#define FW_PORT_CMD_CBLLEN32_V(x) ((x) << FW_PORT_CMD_CBLLEN32_S) +#define FW_PORT_CMD_CBLLEN32_G(x) \ + (((x) >> FW_PORT_CMD_CBLLEN32_S) & FW_PORT_CMD_CBLLEN32_M) + +#define FW_PORT_CMD_AUXLINFO32_S 24 +#define FW_PORT_CMD_AUXLINFO32_M 0xff +#define FW_PORT_CMD_AUXLINFO32_V(x) ((x) << FW_PORT_CMD_AUXLINFO32_S) +#define FW_PORT_CMD_AUXLINFO32_G(x) \ + (((x) >> FW_PORT_CMD_AUXLINFO32_S) & FW_PORT_CMD_AUXLINFO32_M) + +#define FW_PORT_AUXLINFO32_KX4_S 2 +#define FW_PORT_AUXLINFO32_KX4_M 0x1 +#define FW_PORT_AUXLINFO32_KX4_V(x) \ + ((x) << FW_PORT_AUXLINFO32_KX4_S) +#define FW_PORT_AUXLINFO32_KX4_G(x) \ + (((x) >> FW_PORT_AUXLINFO32_KX4_S) & FW_PORT_AUXLINFO32_KX4_M) +#define FW_PORT_AUXLINFO32_KX4_F FW_PORT_AUXLINFO32_KX4_V(1U) + +#define FW_PORT_AUXLINFO32_KR_S 1 +#define FW_PORT_AUXLINFO32_KR_M 0x1 +#define FW_PORT_AUXLINFO32_KR_V(x) \ + ((x) << FW_PORT_AUXLINFO32_KR_S) +#define FW_PORT_AUXLINFO32_KR_G(x) \ + (((x) >> FW_PORT_AUXLINFO32_KR_S) & FW_PORT_AUXLINFO32_KR_M) +#define FW_PORT_AUXLINFO32_KR_F FW_PORT_AUXLINFO32_KR_V(1U) + +#define FW_PORT_CMD_MTU32_S 0 +#define FW_PORT_CMD_MTU32_M 0xffff +#define FW_PORT_CMD_MTU32_V(x) ((x) << FW_PORT_CMD_MTU32_S) +#define FW_PORT_CMD_MTU32_G(x) \ + (((x) >> FW_PORT_CMD_MTU32_S) & FW_PORT_CMD_MTU32_M) + enum fw_port_type { FW_PORT_TYPE_FIBER_XFI, FW_PORT_TYPE_FIBER_XAUI, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2b85b874fd0d..8996ebbd222e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -182,7 +182,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) break; } - switch (pi->link_cfg.fc) { + switch ((int)pi->link_cfg.fc) { case PAUSE_RX: fc = "RX"; break; @@ -191,7 +191,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) fc = "TX"; break; - case PAUSE_RX|PAUSE_TX: + case PAUSE_RX | PAUSE_TX: fc = "RX/TX"; break; @@ -1213,7 +1213,11 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, } else if (port_type == FW_PORT_TYPE_SFP || port_type == FW_PORT_TYPE_QSFP_10G || port_type == FW_PORT_TYPE_QSA || - port_type == FW_PORT_TYPE_QSFP) { + port_type == FW_PORT_TYPE_QSFP || + port_type == FW_PORT_TYPE_CR4_QSFP || + port_type == FW_PORT_TYPE_CR_QSFP || + port_type == FW_PORT_TYPE_CR2_QSFP || + port_type == FW_PORT_TYPE_SFP28) { if (mod_type == FW_PORT_MOD_TYPE_LR || mod_type == FW_PORT_MOD_TYPE_SR || mod_type == FW_PORT_MOD_TYPE_ER || @@ -1224,6 +1228,9 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, return PORT_DA; else return PORT_OTHER; + } else if (port_type == FW_PORT_TYPE_KR4_100G || + port_type == FW_PORT_TYPE_KR_SFP28) { + return PORT_NONE; } return PORT_OTHER; @@ -1242,12 +1249,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned int fw_caps, unsigned long *link_mode_mask) { - #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\ - ## _BIT, link_mode_mask) + #define SET_LMM(__lmm_name) \ + __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_LMM(__lmm_name); \ } while (0) @@ -1310,6 +1318,16 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, SET_LMM(25000baseCR_Full); break; + case FW_PORT_TYPE_KR_SFP28: + SET_LMM(Backplane); + SET_LMM(25000baseKR_Full); + break; + + case FW_PORT_TYPE_CR2_QSFP: + SET_LMM(FIBRE); + SET_LMM(50000baseSR2_Full); + break; + case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); @@ -1329,12 +1347,18 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, } static int cxgb4vf_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings - *link_ksettings) + struct ethtool_link_ksettings *link_ksettings) { - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; + /* For the nonce, the Firmware doesn't send up Port State changes + * when the Virtual Interface attached to the Port is down. So + * if it's down, let's grab any changes. + */ + if (!netif_running(dev)) + (void)t4vf_update_port_info(pi); + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); @@ -1351,11 +1375,11 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, base->mdio_support = 0; } - fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, link_ksettings->link_modes.supported); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, link_ksettings->link_modes.advertising); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); if (netif_carrier_ok(dev)) { @@ -1367,7 +1391,7 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, } base->autoneg = pi->link_cfg.autoneg; - if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); if (pi->link_cfg.autoneg) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index b3903fe411aa..9cf9c56b0f73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -104,24 +104,62 @@ struct t4vf_port_stats { /* * Per-"port" (Virtual Interface) link configuration ... */ -struct link_config { - unsigned int supported; /* link capabilities */ - unsigned int advertising; /* advertised capabilities */ - unsigned short lp_advertising; /* peer advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ }; -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 +enum cc_pause { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 }; +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */ +}; + +struct link_config { + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + u32 speed; /* actual link speed */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec auto_fec; /* Forward Error Correction: */ + enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */ + enum cc_fec fec; /* requested, and actual in use */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ +}; + +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ +static inline bool is_x_10g_port(const struct link_config *lc) +{ + fw_port_cap32_t speeds, high_speeds; + + speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); + high_speeds = + speeds & ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); + + return high_speeds != 0; +} + /* * General device parameters ... */ @@ -227,6 +265,7 @@ struct adapter_params { struct arch_specific_params arch; /* chip specific params */ enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ }; /* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. @@ -266,24 +305,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; iter++) -static inline bool is_10g_port(const struct link_config *lc) -{ - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; -} - -/* Return true if the Link Configuration supports "High Speeds" (those greater - * than 1Gb/s). - */ -static inline bool is_x_10g_port(const struct link_config *lc) -{ - unsigned int speeds, high_speeds; - - speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); - - return high_speeds != 0; -} - static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) { return adapter->params.vpd.cclk / 1000; @@ -387,6 +408,7 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, unsigned int); int t4vf_eth_eq_free(struct adapter *, unsigned int); +int t4vf_update_port_info(struct port_info *pi); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e98248f00fef..a8d94963b4d0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -313,32 +313,130 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return ret; } -#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ - FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) /** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/* Translate Firmware Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/** + * Return the highest speed set in the port capabilities, in Mb/s. + */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ + #define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(400G, 400000); + TEST_SPEED_RETURN(200G, 200000); + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/* * init_link_config - initialize a link's SW state * @lc: structure holding the link state - * @caps: link capabilities + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int caps) +static void init_link_config(struct link_config *lc, + fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - lc->supported = caps; - lc->lp_advertising = 0; - lc->requested_speed = 0; + lc->pcaps = pcaps; + lc->lpacaps = 0; + lc->speed_caps = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + + /* For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + lc->auto_fec = fwcap_to_cc_fec(acaps); + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = acaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -351,9 +449,30 @@ static void init_link_config(struct link_config *lc, unsigned int caps) int t4vf_port_init(struct adapter *adapter, int pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); + unsigned int fw_caps = adapter->params.fw_caps_support; struct fw_vi_cmd vi_cmd, vi_rpl; struct fw_port_cmd port_cmd, port_rpl; - int v; + enum fw_port_type port_type; + int mdio_addr; + fw_port_cap32_t pcaps, acaps; + int ret; + + /* If we haven't yet determined whether we're talking to Firmware + * which knows the new 32-bit Port Capabilities, it's time to find + * out now. This will also tell new Firmware to send us Port Status + * Updates using the new 32-bit Port Capabilities version of the + * Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4vf_set_params(adapter, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } /* * Execute a VI Read command to get our Virtual Interface information @@ -365,9 +484,9 @@ int t4vf_port_init(struct adapter *adapter, int pidx) FW_CMD_READ_F); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); - v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); - if (v) - return v; + ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (ret != FW_SUCCESS) + return ret; BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); @@ -385,21 +504,42 @@ int t4vf_port_init(struct adapter *adapter, int pidx) FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_PORT_CMD_PORTID_V(pi->port_id)); - port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(port_cmd)); - v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); - if (v) - return v; + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); + if (ret != FW_SUCCESS) + return ret; - v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); - pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(v) : -1; - pi->port_type = FW_PORT_CMD_PTYPE_G(v); + /* Extract the various fields from the Port Information message. */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) + ? FW_PORT_CMD_MDIOADDR_G(lstatus) + : -1); + pcaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.acap)); + } else { + u32 lstatus32 = + be32_to_cpu(port_rpl.u.info32.lstatus32_to_cbllen32); + + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) + ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) + : -1); + pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); + acaps = be32_to_cpu(port_rpl.u.info32.acaps32); + } + + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); - + init_link_config(&pi->link_cfg, pcaps, acaps); return 0; } @@ -1666,6 +1806,202 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } +/** + * t4vf_link_down_rc_str - return a string for a Link Down Reason Code + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +const char *t4vf_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/** + * t4vf_handle_get_port_info - process a FW reply message + * @pi: the port info + * @rpl: start of the FW message + * + * Processes a GET_PORT_INFO FW reply message. + */ +void t4vf_handle_get_port_info(struct port_info *pi, + const struct fw_port_cmd *cmd) +{ + int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + int link_ok, linkdnrc; + enum fw_port_type port_type; + enum fw_port_module_type mod_type; + unsigned int speed, fc, fec; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + + /* Extract the various fields from the Port Information message. */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + linkattr = 0; + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32; + + lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + /* When a new Transceiver Module is inserted, the Firmware + * will examine any Forward Error Correction parameters + * present in the Transceiver Module i2c EPROM and determine + * the supported and recommended FEC settings from those + * based on IEEE 802.3 standards. We always record the + * IEEE 802.3 recommended "automatic" settings. + */ + lc->auto_fec = fec; + + /* Some versions of the early T6 Firmware "cheated" when + * handling different Transceiver Modules by changing the + * underlaying Port Type reported to the Host Drivers. As + * such we need to capture whatever Port Type the Firmware + * sends us and record it in case it's different from what we + * were told earlier. Unfortunately, since Firmware is + * forever, we'll need to keep this code here forever, but in + * later T6 Firmware it should just be an assignment of the + * same value already recorded. + */ + pi->port_type = port_type; + + pi->mod_type = mod_type; + t4vf_os_portmod_changed(adapter, pi->pidx); + } + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", + pi->port_id, t4vf_link_down_rc_str(linkdnrc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->fec = fec; + + lc->pcaps = pcaps; + lc->lpacaps = lpacaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->speed_caps = fwcap_to_speed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + + t4vf_os_link_changed(adapter, pi->pidx, link_ok); + } +} + +/** + * t4vf_update_port_info - retrieve and update port information if changed + * @pi: the port_info + * + * We issue a Get Port Information Command to the Firmware and, if + * successful, we check to see if anything is different from what we + * last recorded and update things accordingly. + */ +int t4vf_update_port_info(struct port_info *pi) +{ + unsigned int fw_caps = pi->adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd; + int ret; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(pi->adapter, &port_cmd, sizeof(port_cmd), + &port_cmd); + if (ret) + return ret; + t4vf_handle_get_port_info(pi, &port_cmd); + return 0; +} + /** * t4vf_handle_fw_rpl - process a firmware reply message * @adapter: the adapter @@ -1685,15 +2021,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) */ const struct fw_port_cmd *port_cmd = (const struct fw_port_cmd *)rpl; - u32 stat, mod; - int action, port_id, link_ok, speed, fc, pidx; - - /* - * Extract various fields from port status change message. - */ - action = FW_PORT_CMD_ACTION_G( + int action = FW_PORT_CMD_ACTION_G( be32_to_cpu(port_cmd->action_to_len16)); - if (action != FW_PORT_ACTION_GET_PORT_INFO) { + int port_id, pidx; + + if (action != FW_PORT_ACTION_GET_PORT_INFO && + action != FW_PORT_ACTION_GET_PORT_INFO32) { dev_err(adapter->pdev_dev, "Unknown firmware PORT reply action %x\n", action); @@ -1702,61 +2035,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) port_id = FW_PORT_CMD_PORTID_G( be32_to_cpu(port_cmd->op_to_portid)); - - stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - speed = 0; - fc = 0; - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; - - /* - * Scan all of our "ports" (Virtual Interfaces) looking for - * those bound to the physical port which has changed. If - * our recorded state doesn't match the current state, - * signal that change to the OS code. - */ for_each_port(adapter, pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); - struct link_config *lc; if (pi->port_id != port_id) continue; - - lc = &pi->link_cfg; - - mod = FW_PORT_CMD_MODTYPE_G(stat); - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4vf_os_portmod_changed(adapter, pidx); - } - - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { - /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = - be16_to_cpu(port_cmd->u.info.pcap); - lc->lp_advertising = - be16_to_cpu(port_cmd->u.info.lpacap); - t4vf_os_link_changed(adapter, pidx, link_ok); - } + t4vf_handle_get_port_info(pi, port_cmd); } break; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 16fe776ddbe5..50222b7b81f3 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -65,7 +65,7 @@ MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); */ static int debug; module_param(debug, int, 0644); -MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); +MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)"); /* DM9000 register address locking. * diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 47be5018d35d..0affee9c8aa2 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2094,7 +2094,7 @@ static int de4x5_eisa_remove(struct device *device) return 0; } -static struct eisa_device_id de4x5_eisa_ids[] = { +static const struct eisa_device_id de4x5_eisa_ids[] = { { "DEC4250", 0 }, /* 0 is the board name index... */ { "" } }; diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h index 38431a155f09..06660dbc44b7 100644 --- a/drivers/net/ethernet/dec/tulip/tulip.h +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -515,7 +515,7 @@ void comet_timer(unsigned long data); extern int tulip_debug; extern const char * const medianame[]; extern const char tulip_media_cap[]; -extern struct tulip_chip_table tulip_tbl[]; +extern const struct tulip_chip_table tulip_tbl[]; void oom_timer(unsigned long data); extern u8 t21040_csr13[]; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 17e566a8b345..851b6d1f5a42 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -138,7 +138,7 @@ static void tulip_timer(unsigned long data) * It is indexed via the values in 'enum chips' */ -struct tulip_chip_table tulip_tbl[] = { +const struct tulip_chip_table tulip_tbl[] = { { }, /* placeholder for array, slot unused currently */ { }, /* placeholder for array, slot unused currently */ @@ -1303,7 +1303,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 0x00, 'L', 'i', 'n', 'u', 'x' }; static int last_irq; - static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; unsigned short sum; unsigned char *ee_data; @@ -1557,7 +1556,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && ee_data[2] == 0) { sa_offset = 2; /* Grrr, damn Matrox boards. */ - multiport_cnt = 4; } #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 4ee042c034a1..1b79a6defd56 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -73,7 +73,7 @@ #define ETHERCAT_MASTER_ID 0x14 -static struct pci_device_id ids[] = { +static const struct pci_device_id ids[] = { { PCI_DEVICE(0x15ec, 0x5000), }, { 0, } }; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 674cf9d13b98..8984c4938881 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -930,6 +930,14 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb) return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; } +static inline bool is_ipv6_ext_hdr(struct sk_buff *skb) +{ + if (ip_hdr(skb)->version == 6) + return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr); + else + return false; +} + #define be_error_recovering(adapter) \ (adapter->flags & BE_FLAGS_TRY_RECOVERY) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 319eee36649b..0e3d9f39a807 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5089,6 +5089,20 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct be_adapter *adapter = netdev_priv(dev); u8 l4_hdr = 0; + if (skb_is_gso(skb)) { + /* IPv6 TSO requests with extension hdrs are a problem + * to Lancer and BE3 HW. Disable TSO6 feature. + */ + if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb)) + features &= ~NETIF_F_TSO6; + + /* Lancer cannot handle the packet with MSS less than 256. + * Disable the GSO support in such cases + */ + if (lancer_chip(adapter) && skb_shinfo(skb)->gso_size < 256) + features &= ~NETIF_F_GSO_MASK; + } + /* The code below restricts offload features for some tunneled and * Q-in-Q packets. * Offload features for normal (non tunnel) packets are unchanged. diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 2b62841c4c63..05989aafaf32 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -139,10 +139,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv) } ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { - struct net_device *netdev; - _be_roce_dev_add(dev); - netdev = dev->netdev; } mutex_unlock(&be_adapter_list_lock); return 0; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 59da7ac3c108..9ed8e4b81530 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1623,6 +1623,8 @@ static const struct net_device_ops ftgmac100_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ftgmac100_poll_controller, #endif + .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, }; static int ftgmac100_setup_mdio(struct net_device *netdev) @@ -1837,6 +1839,9 @@ static int ftgmac100_probe(struct platform_device *pdev) NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + if (priv->use_ncsi) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + /* AST2400 doesn't have working HW checksum generation */ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) netdev->hw_features &= ~NETIF_F_HW_CSUM; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 757b873735a5..42258060f142 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -158,7 +158,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ dpaa_rx_extra_headroom) -#define DPAA_ETH_RX_QUEUES 128 +#define DPAA_ETH_PCD_RXQ_NUM 128 #define DPAA_ENQUEUE_RETRIES 100000 @@ -169,6 +169,7 @@ struct fm_port_fqs { struct dpaa_fq *tx_errq; struct dpaa_fq *rx_defq; struct dpaa_fq *rx_errq; + struct dpaa_fq *rx_pcdq; }; /* All the dpa bps in use at any moment */ @@ -235,7 +236,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->max_mtu = dpaa_get_max_mtu(); net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_LLTX); + NETIF_F_LLTX | NETIF_F_RXHASH); net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; /* The kernels enables GSO automatically, if we declare NETIF_F_SG. @@ -342,18 +343,19 @@ static void dpaa_get_stats64(struct net_device *net_dev, } } -static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, - u32 chain_index, __be16 proto, struct tc_to_netdev *tc) +static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; if (num_tc == priv->num_tc) return 0; @@ -398,8 +400,8 @@ static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) of_dev = of_find_device_by_node(mac_node); if (!of_dev) { - dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", - mac_node->full_name); + dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n", + mac_node); of_node_put(mac_node); return ERR_PTR(-EINVAL); } @@ -627,6 +629,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 5; break; case FQ_TYPE_RX_DEFAULT: + case FQ_TYPE_RX_PCD: fq->wq = 6; break; case FQ_TYPE_TX: @@ -687,6 +690,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, struct fm_port_fqs *port_fqs) { struct dpaa_fq *dpaa_fq; + u32 fq_base, fq_base_aligned, i; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); if (!dpaa_fq) @@ -700,6 +704,26 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_defq = &dpaa_fq[0]; + /* the PCD FQIDs range needs to be aligned for correct operation */ + if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM)) + goto fq_alloc_failed; + + fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM); + + for (i = fq_base; i < fq_base_aligned; i++) + qman_release_fqid(i); + + for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM; + i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++) + qman_release_fqid(i); + + dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM, + list, FQ_TYPE_RX_PCD); + if (!dpaa_fq) + goto fq_alloc_failed; + + port_fqs->rx_pcdq = &dpaa_fq[0]; + if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; @@ -869,13 +893,14 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, const struct dpaa_fq_cbs *fq_cbs, struct fman_port *tx_port) { - int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; + int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 portals[NR_CPUS]; + u16 channels[NR_CPUS]; struct dpaa_fq *fq; for_each_cpu(cpu, affine_cpus) - portals[num_portals++] = qman_affine_channel(cpu); + channels[num_portals++] = qman_affine_channel(cpu); + if (num_portals == 0) dev_err(priv->net_dev->dev.parent, "No Qman software (affine) channels found"); @@ -889,6 +914,12 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_RX_ERROR: dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); break; + case FQ_TYPE_RX_PCD: + if (!num_portals) + continue; + dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); + fq->channel = channels[portal_cnt++ % num_portals]; + break; case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); @@ -1038,7 +1069,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) /* Put all the ingress queues in our "ingress CGR". */ if (priv->use_ingress_cgr && (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || - dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { + dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || + dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; @@ -1169,7 +1201,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, size_t count, struct dpaa_fq *errq, - struct dpaa_fq *defq, + struct dpaa_fq *defq, struct dpaa_fq *pcdq, struct dpaa_buffer_layout *buf_layout) { struct fman_buffer_prefix_content buf_prefix_content; @@ -1189,6 +1221,10 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, rx_p = ¶ms.specific_params.rx_params; rx_p->err_fqid = errq->fqid; rx_p->dflt_fqid = defq->fqid; + if (pcdq) { + rx_p->pcd_base_fqid = pcdq->fqid; + rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; + } count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); rx_p->ext_buf_pools.num_of_pools_used = (u8)count; @@ -1233,7 +1269,8 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev, return err; err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, - port_fqs->rx_defq, &buf_layout[RX]); + port_fqs->rx_defq, port_fqs->rx_pcdq, + &buf_layout[RX]); return err; } @@ -2200,12 +2237,13 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, dma_addr_t addr = qm_fd_addr(fd); enum qm_fd_format fd_format; struct net_device *net_dev; - u32 fd_status; + u32 fd_status, hash_offset; struct dpaa_bp *dpaa_bp; struct dpaa_priv *priv; unsigned int skb_len; struct sk_buff *skb; int *count_ptr; + void *vaddr; fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2251,7 +2289,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); /* prefetch the first 64 bytes of the frame or the SGT start */ - prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); + vaddr = phys_to_virt(addr); + prefetch(vaddr + qm_fd_get_offset(fd)); fd_format = qm_fd_get_format(fd); /* The only FD types that we may receive are contig and S/G */ @@ -2272,6 +2311,18 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, skb->protocol = eth_type_trans(skb, net_dev); + if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && + !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], + &hash_offset)) { + enum pkt_hash_types type; + + /* if L4 exists, it was used in the hash generation */ + type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; + skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), + type); + } + skb_len = skb->len; if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) @@ -2510,6 +2561,9 @@ static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) dpaa_bp->bpid = FSL_DPAA_BPID_INV; dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); + if (!dpaa_bp->percpu_count) + return ERR_PTR(-ENOMEM); + dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; dpaa_bp->seed_cb = dpaa_bp_seed; @@ -2737,6 +2791,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) if (err) goto init_ports_failed; + /* Rx traffic distribution based on keygen hashing defaults to on */ + priv->keygen_in_use = true; + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); if (!priv->percpu_priv) { dev_err(dev, "devm_alloc_percpu() failed\n"); @@ -2829,7 +2886,7 @@ static int dpaa_remove(struct platform_device *pdev) return err; } -static struct platform_device_id dpaa_devtype[] = { +static const struct platform_device_id dpaa_devtype[] = { { .name = "dpaa-ethernet", .driver_data = 0, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index 9941a7866ebe..bd9422082f83 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -52,6 +52,7 @@ enum dpaa_fq_type { FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ FQ_TYPE_RX_ERROR, /* Rx Error FQs */ + FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */ FQ_TYPE_TX, /* "Real" Tx FQs */ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ @@ -158,6 +159,7 @@ struct dpaa_priv { struct list_head dpaa_fq_list; u8 num_tc; + bool keygen_in_use; u32 msg_enable; /* net_device message level */ struct { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index ec75d1c6fa89..0d9b185e317f 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -71,6 +71,9 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, case FQ_TYPE_RX_ERROR: str = "Rx error"; break; + case FQ_TYPE_RX_PCD: + str = "Rx PCD"; + break; case FQ_TYPE_TX_CONFIRM: str = "Tx default confirmation"; break; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index aad825088357..faea674094b9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -399,6 +399,122 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, memcpy(strings, dpaa_stats_global, size); } +static int dpaa_get_hash_opts(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct dpaa_priv *priv = netdev_priv(dev); + + cmd->data = 0; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (priv->keygen_in_use) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case IPV4_FLOW: + case IPV6_FLOW: + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V4_FLOW: + case AH_V6_FLOW: + case ESP_V4_FLOW: + case ESP_V6_FLOW: + if (priv->keygen_in_use) + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + cmd->data = 0; + break; + } + + return 0; +} + +static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *unused) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + ret = dpaa_get_hash_opts(dev, cmd); + break; + default: + break; + } + + return ret; +} + +static void dpaa_set_hash(struct net_device *net_dev, bool enable) +{ + struct mac_device *mac_dev; + struct fman_port *rxport; + struct dpaa_priv *priv; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + rxport = mac_dev->port[0]; + + fman_port_use_kg_hash(rxport, enable); + priv->keygen_in_use = enable; +} + +static int dpaa_set_hash_opts(struct net_device *dev, + struct ethtool_rxnfc *nfc) +{ + int ret = -EINVAL; + + /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V4_FLOW: + case AH_V6_FLOW: + case ESP_V4_FLOW: + case ESP_V6_FLOW: + dpaa_set_hash(dev, !!nfc->data); + ret = 0; + break; + default: + break; + } + + return ret; +} + +static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = dpaa_set_hash_opts(dev, cmd); + break; + default: + break; + } + + return ret; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -412,4 +528,6 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_strings = dpaa_get_strings, .get_link_ksettings = dpaa_get_link_ksettings, .set_link_ksettings = dpaa_set_link_ksettings, + .get_rxnfc = dpaa_get_rxnfc, + .set_rxnfc = dpaa_set_rxnfc, }; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 38c7b21e5d63..ede1876a9a19 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -374,8 +374,8 @@ struct bufdesc_ex { #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) +#define FEC_NAPI_IMASK FEC_ENET_MII #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a6e323f15637..3dc2d771a222 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -173,10 +173,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif /* CONFIG_M5272 */ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + * + * 2048 byte skbufs are allocated. However, alignment requirements + * varies between FEC variants. Worst case is 64, so round down by 64. */ -#define PKT_MAXBUF_SIZE 1522 +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) @@ -224,7 +226,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define COPYBREAK_DEFAULT 256 -#define TSO_HEADER_SIZE 128 /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) @@ -851,7 +852,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) @@ -1558,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) if (int_events == 0) return false; - if (int_events & FEC_ENET_RXF) + if (int_events & FEC_ENET_RXF_0) fep->work_rx |= (1 << 2); if (int_events & FEC_ENET_RXF_1) fep->work_rx |= (1 << 0); if (int_events & FEC_ENET_RXF_2) fep->work_rx |= (1 << 1); - if (int_events & FEC_ENET_TXF) + if (int_events & FEC_ENET_TXF_0) fep->work_tx |= (1 << 2); if (int_events & FEC_ENET_TXF_1) fep->work_tx |= (1 << 0); @@ -1603,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id) } if (fep->ptp_clock) - fec_ptp_check_pps_event(fep); - + if (fec_ptp_check_pps_event(fep)) + ret = IRQ_HANDLED; return ret; } @@ -1904,8 +1905,10 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, fep->phy_interface); - if (!phy_dev) + if (!phy_dev) { + netdev_err(ndev, "Unable to connect to phy\n"); return -ENODEV; + } } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index aa8cf5d2a53c..6d7269d87a85 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -960,8 +960,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* We're done ! */ platform_set_drvdata(op, ndev); - netdev_info(ndev, "%s MAC %pM\n", - op->dev.of_node->full_name, ndev->dev_addr); + netdev_info(ndev, "%pOF MAC %pM\n", + op->dev.of_node, ndev->dev_addr); return 0; diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile index 60491779e49f..2c38119b172c 100644 --- a/drivers/net/ethernet/freescale/fman/Makefile +++ b/drivers/net/ethernet/freescale/fman/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_FSL_FMAN) += fsl_fman.o obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o obj-$(CONFIG_FSL_FMAN) += fsl_mac.o -fsl_fman-objs := fman_muram.o fman.o fman_sp.o +fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o fsl_fman_port-objs := fman_port.o fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 4aefe2438969..9530405030a7 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -32,9 +32,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include "fman.h" -#include "fman_muram.h" - #include #include #include @@ -46,6 +43,10 @@ #include #include +#include "fman.h" +#include "fman_muram.h" +#include "fman_keygen.h" + /* General defines */ #define FMAN_LIODN_TBL 64 /* size of LIODN table */ #define MAX_NUM_OF_MACS 10 @@ -56,6 +57,7 @@ /* Modules registers offsets */ #define BMI_OFFSET 0x00080000 #define QMI_OFFSET 0x00080400 +#define KG_OFFSET 0x000C1000 #define DMA_OFFSET 0x000C2000 #define FPM_OFFSET 0x000C3000 #define IMEM_OFFSET 0x000C4000 @@ -564,80 +566,6 @@ struct fman_cfg { u32 qmi_def_tnums_thresh; }; -/* Structure that holds information received from device tree */ -struct fman_dts_params { - void __iomem *base_addr; /* FMan virtual address */ - struct resource *res; /* FMan memory resource */ - u8 id; /* FMan ID */ - - int err_irq; /* FMan Error IRQ */ - - u16 clk_freq; /* FMan clock freq (In Mhz) */ - - u32 qman_channel_base; /* QMan channels base */ - u32 num_of_qman_channels; /* Number of QMan channels */ - - struct resource muram_res; /* MURAM resource */ -}; - -/** fman_exceptions_cb - * fman - Pointer to FMan - * exception - The exception. - * - * Exceptions user callback routine, will be called upon an exception - * passing the exception identification. - * - * Return: irq status - */ -typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman, - enum fman_exceptions exception); - -/** fman_bus_error_cb - * fman - Pointer to FMan - * port_id - Port id - * addr - Address that caused the error - * tnum - Owner of error - * liodn - Logical IO device number - * - * Bus error user callback routine, will be called upon bus error, - * passing parameters describing the errors and the owner. - * - * Return: IRQ status - */ -typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id, - u64 addr, u8 tnum, u16 liodn); - -struct fman { - struct device *dev; - void __iomem *base_addr; - struct fman_intr_src intr_mng[FMAN_EV_CNT]; - - struct fman_fpm_regs __iomem *fpm_regs; - struct fman_bmi_regs __iomem *bmi_regs; - struct fman_qmi_regs __iomem *qmi_regs; - struct fman_dma_regs __iomem *dma_regs; - struct fman_hwp_regs __iomem *hwp_regs; - fman_exceptions_cb *exception_cb; - fman_bus_error_cb *bus_error_cb; - /* Spinlock for FMan use */ - spinlock_t spinlock; - struct fman_state_struct *state; - - struct fman_cfg *cfg; - struct muram_info *muram; - /* cam section in muram */ - unsigned long cam_offset; - size_t cam_size; - /* Fifo in MURAM */ - unsigned long fifo_offset; - size_t fifo_size; - - u32 liodn_base[64]; - u32 liodn_offset[64]; - - struct fman_dts_params dts_params; -}; - static irqreturn_t fman_exceptions(struct fman *fman, enum fman_exceptions exception) { @@ -1811,6 +1739,7 @@ static int fman_config(struct fman *fman) fman->qmi_regs = base_addr + QMI_OFFSET; fman->dma_regs = base_addr + DMA_OFFSET; fman->hwp_regs = base_addr + HWP_OFFSET; + fman->kg_regs = base_addr + KG_OFFSET; fman->base_addr = base_addr; spin_lock_init(&fman->spinlock); @@ -1925,8 +1854,8 @@ static int fman_reset(struct fman *fman) guts_regs = of_iomap(guts_node, 0); if (!guts_regs) { - dev_err(fman->dev, "%s: Couldn't map %s regs\n", - __func__, guts_node->full_name); + dev_err(fman->dev, "%s: Couldn't map %pOF regs\n", + __func__, guts_node); goto guts_regs; } #define FMAN1_ALL_MACS_MASK 0xFCC00000 @@ -2083,6 +2012,11 @@ static int fman_init(struct fman *fman) /* Init HW Parser */ hwp_init(fman->hwp_regs); + /* Init KeyGen */ + fman->keygen = keygen_init(fman->kg_regs); + if (!fman->keygen) + return -EINVAL; + err = enable(fman, cfg); if (err != 0) return err; @@ -2434,15 +2368,21 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) int i; if (fman->state->rev_info.major >= 6) { - u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, - 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + static const u32 port_ids[] = { + 0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, + 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7 + }; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { if (port_ids[i] == port_id) break; } } else { - u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, - 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + static const u32 port_ids[] = { + 0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, + 0x2, 0x3, 0x4, 0x5, 0x7, 0x7 + }; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { if (port_ids[i] == port_id) break; @@ -2780,8 +2720,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32(fm_node, "cell-index", &val); if (err) { - dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.id = (u8)val; @@ -2834,8 +2774,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", &range[0], 2); if (err) { - dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.qman_channel_base = range[0]; diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index f53e1473dbcc..bfa02e0014ae 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -34,6 +34,8 @@ #define __FM_H #include +#include +#include /* FM Frame descriptor macros */ /* Frame queue Context Override */ @@ -274,6 +276,81 @@ struct fman_intr_src { void *src_handle; }; +/** fman_exceptions_cb + * fman - Pointer to FMan + * exception - The exception. + * + * Exceptions user callback routine, will be called upon an exception + * passing the exception identification. + * + * Return: irq status + */ +typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman, + enum fman_exceptions exception); +/** fman_bus_error_cb + * fman - Pointer to FMan + * port_id - Port id + * addr - Address that caused the error + * tnum - Owner of error + * liodn - Logical IO device number + * + * Bus error user callback routine, will be called upon bus error, + * passing parameters describing the errors and the owner. + * + * Return: IRQ status + */ +typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id, + u64 addr, u8 tnum, u16 liodn); + +/* Structure that holds information received from device tree */ +struct fman_dts_params { + void __iomem *base_addr; /* FMan virtual address */ + struct resource *res; /* FMan memory resource */ + u8 id; /* FMan ID */ + + int err_irq; /* FMan Error IRQ */ + + u16 clk_freq; /* FMan clock freq (In Mhz) */ + + u32 qman_channel_base; /* QMan channels base */ + u32 num_of_qman_channels; /* Number of QMan channels */ + + struct resource muram_res; /* MURAM resource */ +}; + +struct fman { + struct device *dev; + void __iomem *base_addr; + struct fman_intr_src intr_mng[FMAN_EV_CNT]; + + struct fman_fpm_regs __iomem *fpm_regs; + struct fman_bmi_regs __iomem *bmi_regs; + struct fman_qmi_regs __iomem *qmi_regs; + struct fman_dma_regs __iomem *dma_regs; + struct fman_hwp_regs __iomem *hwp_regs; + struct fman_kg_regs __iomem *kg_regs; + fman_exceptions_cb *exception_cb; + fman_bus_error_cb *bus_error_cb; + /* Spinlock for FMan use */ + spinlock_t spinlock; + struct fman_state_struct *state; + + struct fman_cfg *cfg; + struct muram_info *muram; + struct fman_keygen *keygen; + /* cam section in muram */ + unsigned long cam_offset; + size_t cam_size; + /* Fifo in MURAM */ + unsigned long fifo_offset; + size_t fifo_size; + + u32 liodn_base[64]; + u32 liodn_offset[64]; + + struct fman_dts_params dts_params; +}; + /* Structure for port-FM communication during fman_port_init. */ struct fman_port_init_params { u8 port_id; /* port Id */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 98bba10fc38c..ea43b4974149 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,7 +123,7 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 -#define DTSEC_TCTRL_GTS 0x00000020 +#define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 @@ -863,6 +863,52 @@ int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val) return 0; } +static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (mode & COMM_MODE_TX) + iowrite32be(ioread32be(®s->tctrl) & + ~TCTRL_GTS, ®s->tctrl); + if (mode & COMM_MODE_RX) + iowrite32be(ioread32be(®s->rctrl) & + ~RCTRL_GRS, ®s->rctrl); +} + +static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + /* Graceful stop - Assert the graceful Rx stop bit */ + if (mode & COMM_MODE_RX) { + tmp = ioread32be(®s->rctrl) | RCTRL_GRS; + iowrite32be(tmp, ®s->rctrl); + + if (dtsec->fm_rev_info.major == 2) { + /* Workaround for dTSEC Errata A002 */ + usleep_range(100, 200); + } else { + /* Workaround for dTSEC Errata A004839 */ + usleep_range(10, 50); + } + } + + /* Graceful stop - Assert the graceful Tx stop bit */ + if (mode & COMM_MODE_TX) { + if (dtsec->fm_rev_info.major == 2) { + /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */ + pr_debug("GTS not supported due to DTSEC_A004 Errata.\n"); + } else { + tmp = ioread32be(®s->tctrl) | TCTRL_GTS; + iowrite32be(tmp, ®s->tctrl); + + /* Workaround for dTSEC Errata A0012, A0014 */ + usleep_range(10, 50); + } + } +} + int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) { struct dtsec_regs __iomem *regs = dtsec->regs; @@ -880,13 +926,8 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) iowrite32be(tmp, ®s->maccfg1); - /* Graceful start - clear the graceful receive stop bit */ - if (mode & COMM_MODE_TX) - iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS, - ®s->tctrl); - if (mode & COMM_MODE_RX) - iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, - ®s->rctrl); + /* Graceful start - clear the graceful Rx/Tx stop bit */ + graceful_start(dtsec, mode); return 0; } @@ -899,23 +940,8 @@ int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode) if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; - /* Gracefull stop - Assert the graceful transmit stop bit */ - if (mode & COMM_MODE_RX) { - tmp = ioread32be(®s->rctrl) | RCTRL_GRS; - iowrite32be(tmp, ®s->rctrl); - - if (dtsec->fm_rev_info.major == 2) - usleep_range(100, 200); - else - udelay(10); - } - - if (mode & COMM_MODE_TX) { - if (dtsec->fm_rev_info.major == 2) - pr_debug("GTS not supported due to DTSEC_A004 errata.\n"); - else - pr_debug("GTS not supported due to DTSEC_A0014 errata.\n"); - } + /* Graceful stop - Assert the graceful Rx/Tx stop bit */ + graceful_stop(dtsec, mode); tmp = ioread32be(®s->maccfg1); if (mode & COMM_MODE_RX) @@ -933,11 +959,19 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u16 pause_time, u16 __maybe_unused thresh_time) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 ptv = 0; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + if (pause_time) { /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { @@ -958,17 +992,27 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, ®s->maccfg1); + graceful_start(dtsec, mode); + return 0; } int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 tmp; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + tmp = ioread32be(®s->maccfg1); if (en) tmp |= MACCFG1_RX_FLOW; @@ -976,20 +1020,34 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) tmp &= ~MACCFG1_RX_FLOW; iowrite32be(tmp, ®s->maccfg1); + graceful_start(dtsec, mode); + return 0; } int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) { + struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; + if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + /* Initialize MAC Station Address registers (1 & 2) * Station address have to be swapped (big endian to little endian */ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + graceful_start(dtsec, mode); + return 0; } @@ -1162,11 +1220,19 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 tmp; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + tmp = ioread32be(®s->maccfg2); /* Full Duplex */ @@ -1186,6 +1252,8 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) tmp &= ~DTSEC_ECNTRL_R100M; iowrite32be(tmp, ®s->ecntrl); + graceful_start(dtsec, mode); + return 0; } diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c new file mode 100644 index 000000000000..f54da3c684d0 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c @@ -0,0 +1,783 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NXP nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "fman_keygen.h" + +/* Maximum number of HW Ports */ +#define FMAN_MAX_NUM_OF_HW_PORTS 64 + +/* Maximum number of KeyGen Schemes */ +#define FM_KG_MAX_NUM_OF_SCHEMES 32 + +/* Number of generic KeyGen Generic Extract Command Registers */ +#define FM_KG_NUM_OF_GENERIC_REGS 8 + +/* Dummy port ID */ +#define DUMMY_PORT_ID 0 + +/* Select Scheme Value Register */ +#define KG_SCH_DEF_USE_KGSE_DV_0 2 +#define KG_SCH_DEF_USE_KGSE_DV_1 3 + +/* Registers Shifting values */ +#define FM_KG_KGAR_NUM_SHIFT 16 +#define KG_SCH_DEF_L4_PORT_SHIFT 8 +#define KG_SCH_DEF_IP_ADDR_SHIFT 18 +#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24 + +/* KeyGen Registers bit field masks: */ + +/* Enable bit field mask for KeyGen General Configuration Register */ +#define FM_KG_KGGCR_EN 0x80000000 + +/* KeyGen Global Registers bit field masks */ +#define FM_KG_KGAR_GO 0x80000000 +#define FM_KG_KGAR_READ 0x40000000 +#define FM_KG_KGAR_WRITE 0x00000000 +#define FM_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000 +#define FM_KG_KGAR_SCM_WSEL_UPDATE_CNT 0x00008000 + +#define FM_KG_KGAR_ERR 0x20000000 +#define FM_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000 +#define FM_KG_KGAR_SEL_PORT_ENTRY 0x02000000 +#define FM_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000 +#define FM_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000 + +/* Error events exceptions */ +#define FM_EX_KG_DOUBLE_ECC 0x80000000 +#define FM_EX_KG_KEYSIZE_OVERFLOW 0x40000000 + +/* Scheme Registers bit field masks */ +#define KG_SCH_MODE_EN 0x80000000 +#define KG_SCH_VSP_NO_KSP_EN 0x80000000 +#define KG_SCH_HASH_CONFIG_SYM 0x40000000 + +/* Known Protocol field codes */ +#define KG_SCH_KN_PORT_ID 0x80000000 +#define KG_SCH_KN_MACDST 0x40000000 +#define KG_SCH_KN_MACSRC 0x20000000 +#define KG_SCH_KN_TCI1 0x10000000 +#define KG_SCH_KN_TCI2 0x08000000 +#define KG_SCH_KN_ETYPE 0x04000000 +#define KG_SCH_KN_PPPSID 0x02000000 +#define KG_SCH_KN_PPPID 0x01000000 +#define KG_SCH_KN_MPLS1 0x00800000 +#define KG_SCH_KN_MPLS2 0x00400000 +#define KG_SCH_KN_MPLS_LAST 0x00200000 +#define KG_SCH_KN_IPSRC1 0x00100000 +#define KG_SCH_KN_IPDST1 0x00080000 +#define KG_SCH_KN_PTYPE1 0x00040000 +#define KG_SCH_KN_IPTOS_TC1 0x00020000 +#define KG_SCH_KN_IPV6FL1 0x00010000 +#define KG_SCH_KN_IPSRC2 0x00008000 +#define KG_SCH_KN_IPDST2 0x00004000 +#define KG_SCH_KN_PTYPE2 0x00002000 +#define KG_SCH_KN_IPTOS_TC2 0x00001000 +#define KG_SCH_KN_IPV6FL2 0x00000800 +#define KG_SCH_KN_GREPTYPE 0x00000400 +#define KG_SCH_KN_IPSEC_SPI 0x00000200 +#define KG_SCH_KN_IPSEC_NH 0x00000100 +#define KG_SCH_KN_IPPID 0x00000080 +#define KG_SCH_KN_L4PSRC 0x00000004 +#define KG_SCH_KN_L4PDST 0x00000002 +#define KG_SCH_KN_TFLG 0x00000001 + +/* NIA values */ +#define NIA_ENG_BMI 0x00500000 +#define NIA_BMI_AC_ENQ_FRAME 0x00000002 +#define ENQUEUE_KG_DFLT_NIA (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME) + +/* Hard-coded configuration: + * These values are used as hard-coded values for KeyGen configuration + * and they replace user selections for this hard-coded version + */ + +/* Hash distribution shift */ +#define DEFAULT_HASH_DIST_FQID_SHIFT 0 + +/* Hash shift */ +#define DEFAULT_HASH_SHIFT 0 + +/* Symmetric hash usage: + * Warning: + * - the value for symmetric hash usage must be in accordance with hash + * key defined below + * - according to tests performed, spreading is not working if symmetric + * hash is set on true + * So ultimately symmetric hash functionality should be always disabled: + */ +#define DEFAULT_SYMMETRIC_HASH false + +/* Hash Key extraction fields: */ +#define DEFAULT_HASH_KEY_EXTRACT_FIELDS \ + (KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \ + KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST) + +/* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP) + * don't exist in the frame + */ +/* Default IPv4 address */ +#define DEFAULT_HASH_KEY_IPv4_ADDR 0x0A0A0A0A +/* Default L4 port */ +#define DEFAULT_HASH_KEY_L4_PORT 0x0B0B0B0B + +/* KeyGen Memory Mapped Registers: */ + +/* Scheme Configuration RAM Registers */ +struct fman_kg_scheme_regs { + u32 kgse_mode; /* 0x100: MODE */ + u32 kgse_ekfc; /* 0x104: Extract Known Fields Command */ + u32 kgse_ekdv; /* 0x108: Extract Known Default Value */ + u32 kgse_bmch; /* 0x10C: Bit Mask Command High */ + u32 kgse_bmcl; /* 0x110: Bit Mask Command Low */ + u32 kgse_fqb; /* 0x114: Frame Queue Base */ + u32 kgse_hc; /* 0x118: Hash Command */ + u32 kgse_ppc; /* 0x11C: Policer Profile Command */ + u32 kgse_gec[FM_KG_NUM_OF_GENERIC_REGS]; + /* 0x120: Generic Extract Command */ + u32 kgse_spc; + /* 0x140: KeyGen Scheme Entry Statistic Packet Counter */ + u32 kgse_dv0; /* 0x144: KeyGen Scheme Entry Default Value 0 */ + u32 kgse_dv1; /* 0x148: KeyGen Scheme Entry Default Value 1 */ + u32 kgse_ccbs; + /* 0x14C: KeyGen Scheme Entry Coarse Classification Bit*/ + u32 kgse_mv; /* 0x150: KeyGen Scheme Entry Match vector */ + u32 kgse_om; /* 0x154: KeyGen Scheme Entry Operation Mode bits */ + u32 kgse_vsp; + /* 0x158: KeyGen Scheme Entry Virtual Storage Profile */ +}; + +/* Port Partition Configuration Registers */ +struct fman_kg_pe_regs { + u32 fmkg_pe_sp; /* 0x100: KeyGen Port entry Scheme Partition */ + u32 fmkg_pe_cpp; + /* 0x104: KeyGen Port Entry Classification Plan Partition */ +}; + +/* General Configuration and Status Registers + * Global Statistic Counters + * KeyGen Global Registers + */ +struct fman_kg_regs { + u32 fmkg_gcr; /* 0x000: KeyGen General Configuration Register */ + u32 res004; /* 0x004: Reserved */ + u32 res008; /* 0x008: Reserved */ + u32 fmkg_eer; /* 0x00C: KeyGen Error Event Register */ + u32 fmkg_eeer; /* 0x010: KeyGen Error Event Enable Register */ + u32 res014; /* 0x014: Reserved */ + u32 res018; /* 0x018: Reserved */ + u32 fmkg_seer; /* 0x01C: KeyGen Scheme Error Event Register */ + u32 fmkg_seeer; /* 0x020: KeyGen Scheme Error Event Enable Register */ + u32 fmkg_gsr; /* 0x024: KeyGen Global Status Register */ + u32 fmkg_tpc; /* 0x028: Total Packet Counter Register */ + u32 fmkg_serc; /* 0x02C: Soft Error Capture Register */ + u32 res030[4]; /* 0x030: Reserved */ + u32 fmkg_fdor; /* 0x034: Frame Data Offset Register */ + u32 fmkg_gdv0r; /* 0x038: Global Default Value Register 0 */ + u32 fmkg_gdv1r; /* 0x03C: Global Default Value Register 1 */ + u32 res04c[6]; /* 0x040: Reserved */ + u32 fmkg_feer; /* 0x044: Force Error Event Register */ + u32 res068[38]; /* 0x048: Reserved */ + union { + u32 fmkg_indirect[63]; /* 0x100: Indirect Access Registers */ + struct fman_kg_scheme_regs fmkg_sch; /* Scheme Registers */ + struct fman_kg_pe_regs fmkg_pe; /* Port Partition Registers */ + }; + u32 fmkg_ar; /* 0x1FC: KeyGen Action Register */ +}; + +/* KeyGen Scheme data */ +struct keygen_scheme { + bool used; /* Specifies if this scheme is used */ + u8 hw_port_id; + /* Hardware port ID + * schemes sharing between multiple ports is not + * currently supported + * so we have only one port id bound to a scheme + */ + u32 base_fqid; + /* Base FQID: + * Must be between 1 and 2^24-1 + * If hash is used and an even distribution is + * expected according to hash_fqid_count, + * base_fqid must be aligned to hash_fqid_count + */ + u32 hash_fqid_count; + /* FQ range for hash distribution: + * Must be a power of 2 + * Represents the range of queues for spreading + */ + bool use_hashing; /* Usage of Hashing and spreading over FQ */ + bool symmetric_hash; /* Symmetric Hash option usage */ + u8 hashShift; + /* Hash result right shift. + * Select the 24 bits out of the 64 hash result. + * 0 means using the 24 LSB's, otherwise + * use the 24 LSB's after shifting right + */ + u32 match_vector; /* Match Vector */ +}; + +/* KeyGen driver data */ +struct fman_keygen { + struct keygen_scheme schemes[FM_KG_MAX_NUM_OF_SCHEMES]; + /* Array of schemes */ + struct fman_kg_regs __iomem *keygen_regs; /* KeyGen registers */ +}; + +/* keygen_write_ar_wait + * + * Write Action Register with specified value, wait for GO bit field to be + * idle and then read the error + * + * regs: KeyGen registers + * fmkg_ar: Action Register value + * + * Return: Zero for success or error code in case of failure + */ +static int keygen_write_ar_wait(struct fman_kg_regs __iomem *regs, u32 fmkg_ar) +{ + iowrite32be(fmkg_ar, ®s->fmkg_ar); + + /* Wait for GO bit field to be idle */ + while (fmkg_ar & FM_KG_KGAR_GO) + fmkg_ar = ioread32be(®s->fmkg_ar); + + if (fmkg_ar & FM_KG_KGAR_ERR) + return -EINVAL; + + return 0; +} + +/* build_ar_scheme + * + * Build Action Register value for scheme settings + * + * scheme_id: Scheme ID + * update_counter: update scheme counter + * write: true for action to write the scheme or false for read action + * + * Return: AR value + */ +static u32 build_ar_scheme(u8 scheme_id, bool update_counter, bool write) +{ + u32 rw = (u32)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ); + + return (u32)(FM_KG_KGAR_GO | + rw | + FM_KG_KGAR_SEL_SCHEME_ENTRY | + DUMMY_PORT_ID | + ((u32)scheme_id << FM_KG_KGAR_NUM_SHIFT) | + (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0)); +} + +/* build_ar_bind_scheme + * + * Build Action Register value for port binding to schemes + * + * hwport_id: HW Port ID + * write: true for action to write the bind or false for read action + * + * Return: AR value + */ +static u32 build_ar_bind_scheme(u8 hwport_id, bool write) +{ + u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ; + + return (u32)(FM_KG_KGAR_GO | + rw | + FM_KG_KGAR_SEL_PORT_ENTRY | + hwport_id | + FM_KG_KGAR_SEL_PORT_WSEL_SP); +} + +/* keygen_write_sp + * + * Write Scheme Partition Register with specified value + * + * regs: KeyGen Registers + * sp: Scheme Partition register value + * add: true to add a scheme partition or false to clear + * + * Return: none + */ +static void keygen_write_sp(struct fman_kg_regs __iomem *regs, u32 sp, bool add) +{ + u32 tmp; + + tmp = ioread32be(®s->fmkg_pe.fmkg_pe_sp); + + if (add) + tmp |= sp; + else + tmp &= ~sp; + + iowrite32be(tmp, ®s->fmkg_pe.fmkg_pe_sp); +} + +/* build_ar_bind_cls_plan + * + * Build Action Register value for Classification Plan + * + * hwport_id: HW Port ID + * write: true for action to write the CP or false for read action + * + * Return: AR value + */ +static u32 build_ar_bind_cls_plan(u8 hwport_id, bool write) +{ + u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ; + + return (u32)(FM_KG_KGAR_GO | + rw | + FM_KG_KGAR_SEL_PORT_ENTRY | + hwport_id | + FM_KG_KGAR_SEL_PORT_WSEL_CPP); +} + +/* keygen_write_cpp + * + * Write Classification Plan Partition Register with specified value + * + * regs: KeyGen Registers + * cpp: CPP register value + * + * Return: none + */ +static void keygen_write_cpp(struct fman_kg_regs __iomem *regs, u32 cpp) +{ + iowrite32be(cpp, ®s->fmkg_pe.fmkg_pe_cpp); +} + +/* keygen_write_scheme + * + * Write all Schemes Registers with specified values + * + * regs: KeyGen Registers + * scheme_id: Scheme ID + * scheme_regs: Scheme registers values desired to be written + * update_counter: update scheme counter + * + * Return: Zero for success or error code in case of failure + */ +static int keygen_write_scheme(struct fman_kg_regs __iomem *regs, u8 scheme_id, + struct fman_kg_scheme_regs *scheme_regs, + bool update_counter) +{ + u32 ar_reg; + int err, i; + + /* Write indirect scheme registers */ + iowrite32be(scheme_regs->kgse_mode, ®s->fmkg_sch.kgse_mode); + iowrite32be(scheme_regs->kgse_ekfc, ®s->fmkg_sch.kgse_ekfc); + iowrite32be(scheme_regs->kgse_ekdv, ®s->fmkg_sch.kgse_ekdv); + iowrite32be(scheme_regs->kgse_bmch, ®s->fmkg_sch.kgse_bmch); + iowrite32be(scheme_regs->kgse_bmcl, ®s->fmkg_sch.kgse_bmcl); + iowrite32be(scheme_regs->kgse_fqb, ®s->fmkg_sch.kgse_fqb); + iowrite32be(scheme_regs->kgse_hc, ®s->fmkg_sch.kgse_hc); + iowrite32be(scheme_regs->kgse_ppc, ®s->fmkg_sch.kgse_ppc); + iowrite32be(scheme_regs->kgse_spc, ®s->fmkg_sch.kgse_spc); + iowrite32be(scheme_regs->kgse_dv0, ®s->fmkg_sch.kgse_dv0); + iowrite32be(scheme_regs->kgse_dv1, ®s->fmkg_sch.kgse_dv1); + iowrite32be(scheme_regs->kgse_ccbs, ®s->fmkg_sch.kgse_ccbs); + iowrite32be(scheme_regs->kgse_mv, ®s->fmkg_sch.kgse_mv); + iowrite32be(scheme_regs->kgse_om, ®s->fmkg_sch.kgse_om); + iowrite32be(scheme_regs->kgse_vsp, ®s->fmkg_sch.kgse_vsp); + + for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++) + iowrite32be(scheme_regs->kgse_gec[i], + ®s->fmkg_sch.kgse_gec[i]); + + /* Write AR (Action register) */ + ar_reg = build_ar_scheme(scheme_id, update_counter, true); + err = keygen_write_ar_wait(regs, ar_reg); + if (err != 0) { + pr_err("Writing Action Register failed\n"); + return err; + } + + return err; +} + +/* get_free_scheme_id + * + * Find the first free scheme available to be used + * + * keygen: KeyGen handle + * scheme_id: pointer to scheme id + * + * Return: 0 on success, -EINVAL when the are no available free schemes + */ +static int get_free_scheme_id(struct fman_keygen *keygen, u8 *scheme_id) +{ + u8 i; + + for (i = 0; i < FM_KG_MAX_NUM_OF_SCHEMES; i++) + if (!keygen->schemes[i].used) { + *scheme_id = i; + return 0; + } + + return -EINVAL; +} + +/* get_scheme + * + * Provides the scheme for specified ID + * + * keygen: KeyGen handle + * scheme_id: Scheme ID + * + * Return: handle to required scheme + */ +static struct keygen_scheme *get_scheme(struct fman_keygen *keygen, + u8 scheme_id) +{ + if (scheme_id >= FM_KG_MAX_NUM_OF_SCHEMES) + return NULL; + return &keygen->schemes[scheme_id]; +} + +/* keygen_bind_port_to_schemes + * + * Bind the port to schemes + * + * keygen: KeyGen handle + * scheme_id: id of the scheme to bind to + * bind: true to bind the port or false to unbind it + * + * Return: Zero for success or error code in case of failure + */ +static int keygen_bind_port_to_schemes(struct fman_keygen *keygen, + u8 scheme_id, + bool bind) +{ + struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs; + struct keygen_scheme *scheme; + u32 ar_reg; + u32 schemes_vector = 0; + int err; + + scheme = get_scheme(keygen, scheme_id); + if (!scheme) { + pr_err("Requested Scheme does not exist\n"); + return -EINVAL; + } + if (!scheme->used) { + pr_err("Cannot bind port to an invalid scheme\n"); + return -EINVAL; + } + + schemes_vector |= 1 << (31 - scheme_id); + + ar_reg = build_ar_bind_scheme(scheme->hw_port_id, false); + err = keygen_write_ar_wait(keygen_regs, ar_reg); + if (err != 0) { + pr_err("Reading Action Register failed\n"); + return err; + } + + keygen_write_sp(keygen_regs, schemes_vector, bind); + + ar_reg = build_ar_bind_scheme(scheme->hw_port_id, true); + err = keygen_write_ar_wait(keygen_regs, ar_reg); + if (err != 0) { + pr_err("Writing Action Register failed\n"); + return err; + } + + return 0; +} + +/* keygen_scheme_setup + * + * Setup the scheme according to required configuration + * + * keygen: KeyGen handle + * scheme_id: scheme ID + * enable: true to enable scheme or false to disable it + * + * Return: Zero for success or error code in case of failure + */ +static int keygen_scheme_setup(struct fman_keygen *keygen, u8 scheme_id, + bool enable) +{ + struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs; + struct fman_kg_scheme_regs scheme_regs; + struct keygen_scheme *scheme; + u32 tmp_reg; + int err; + + scheme = get_scheme(keygen, scheme_id); + if (!scheme) { + pr_err("Requested Scheme does not exist\n"); + return -EINVAL; + } + if (enable && scheme->used) { + pr_err("The requested Scheme is already used\n"); + return -EINVAL; + } + + /* Clear scheme registers */ + memset(&scheme_regs, 0, sizeof(struct fman_kg_scheme_regs)); + + /* Setup all scheme registers: */ + tmp_reg = 0; + + if (enable) { + /* Enable Scheme */ + tmp_reg |= KG_SCH_MODE_EN; + /* Enqueue frame NIA */ + tmp_reg |= ENQUEUE_KG_DFLT_NIA; + } + + scheme_regs.kgse_mode = tmp_reg; + + scheme_regs.kgse_mv = scheme->match_vector; + + /* Scheme don't override StorageProfile: + * valid only for DPAA_VERSION >= 11 + */ + scheme_regs.kgse_vsp = KG_SCH_VSP_NO_KSP_EN; + + /* Configure Hard-Coded Rx Hashing: */ + + if (scheme->use_hashing) { + /* configure kgse_ekfc */ + scheme_regs.kgse_ekfc = DEFAULT_HASH_KEY_EXTRACT_FIELDS; + + /* configure kgse_ekdv */ + tmp_reg = 0; + tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_0 << + KG_SCH_DEF_IP_ADDR_SHIFT); + tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_1 << + KG_SCH_DEF_L4_PORT_SHIFT); + scheme_regs.kgse_ekdv = tmp_reg; + + /* configure kgse_dv0 */ + scheme_regs.kgse_dv0 = DEFAULT_HASH_KEY_IPv4_ADDR; + /* configure kgse_dv1 */ + scheme_regs.kgse_dv1 = DEFAULT_HASH_KEY_L4_PORT; + + /* configure kgse_hc */ + tmp_reg = 0; + tmp_reg |= ((scheme->hash_fqid_count - 1) << + DEFAULT_HASH_DIST_FQID_SHIFT); + tmp_reg |= scheme->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT; + + if (scheme->symmetric_hash) { + /* Normally extraction key should be verified if + * complies with symmetric hash + * But because extraction is hard-coded, we are sure + * the key is symmetric + */ + tmp_reg |= KG_SCH_HASH_CONFIG_SYM; + } + scheme_regs.kgse_hc = tmp_reg; + } else { + scheme_regs.kgse_ekfc = 0; + scheme_regs.kgse_hc = 0; + scheme_regs.kgse_ekdv = 0; + scheme_regs.kgse_dv0 = 0; + scheme_regs.kgse_dv1 = 0; + } + + /* configure kgse_fqb: Scheme FQID base */ + tmp_reg = 0; + tmp_reg |= scheme->base_fqid; + scheme_regs.kgse_fqb = tmp_reg; + + /* features not used by hard-coded configuration */ + scheme_regs.kgse_bmch = 0; + scheme_regs.kgse_bmcl = 0; + scheme_regs.kgse_spc = 0; + + /* Write scheme registers */ + err = keygen_write_scheme(keygen_regs, scheme_id, &scheme_regs, true); + if (err != 0) { + pr_err("Writing scheme registers failed\n"); + return err; + } + + /* Update used field for Scheme */ + scheme->used = enable; + + return 0; +} + +/* keygen_init + * + * KeyGen initialization: + * Initializes and enables KeyGen, allocate driver memory, setup registers, + * clear port bindings, invalidate all schemes + * + * keygen_regs: KeyGen registers base address + * + * Return: Handle to KeyGen driver + */ +struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs) +{ + struct fman_keygen *keygen; + u32 ar; + int i; + + /* Allocate memory for KeyGen driver */ + keygen = kzalloc(sizeof(*keygen), GFP_KERNEL); + if (!keygen) + return NULL; + + keygen->keygen_regs = keygen_regs; + + /* KeyGen initialization (for Master partition): + * Setup KeyGen registers + */ + iowrite32be(ENQUEUE_KG_DFLT_NIA, &keygen_regs->fmkg_gcr); + + iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW, + &keygen_regs->fmkg_eer); + + iowrite32be(0, &keygen_regs->fmkg_fdor); + iowrite32be(0, &keygen_regs->fmkg_gdv0r); + iowrite32be(0, &keygen_regs->fmkg_gdv1r); + + /* Clear binding between ports to schemes and classification plans + * so that all ports are not bound to any scheme/classification plan + */ + for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) { + /* Clear all pe sp schemes registers */ + keygen_write_sp(keygen_regs, 0xffffffff, false); + ar = build_ar_bind_scheme(i, true); + keygen_write_ar_wait(keygen_regs, ar); + + /* Clear all pe cpp classification plans registers */ + keygen_write_cpp(keygen_regs, 0); + ar = build_ar_bind_cls_plan(i, true); + keygen_write_ar_wait(keygen_regs, ar); + } + + /* Enable all scheme interrupts */ + iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seer); + iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seeer); + + /* Enable KyeGen */ + iowrite32be(ioread32be(&keygen_regs->fmkg_gcr) | FM_KG_KGGCR_EN, + &keygen_regs->fmkg_gcr); + + return keygen; +} +EXPORT_SYMBOL(keygen_init); + +/* keygen_port_hashing_init + * + * Initializes a port for Rx Hashing with specified configuration parameters + * + * keygen: KeyGen handle + * hw_port_id: HW Port ID + * hash_base_fqid: Hashing Base FQID used for spreading + * hash_size: Hashing size + * + * Return: Zero for success or error code in case of failure + */ +int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id, + u32 hash_base_fqid, u32 hash_size) +{ + struct keygen_scheme *scheme; + u8 scheme_id; + int err; + + /* Validate Scheme configuration parameters */ + if (hash_base_fqid == 0 || (hash_base_fqid & ~0x00FFFFFF)) { + pr_err("Base FQID must be between 1 and 2^24-1\n"); + return -EINVAL; + } + if (hash_size == 0 || (hash_size & (hash_size - 1)) != 0) { + pr_err("Hash size must be power of two\n"); + return -EINVAL; + } + + /* Find a free scheme */ + err = get_free_scheme_id(keygen, &scheme_id); + if (err) { + pr_err("The maximum number of available Schemes has been exceeded\n"); + return -EINVAL; + } + + /* Create and configure Hard-Coded Scheme: */ + + scheme = get_scheme(keygen, scheme_id); + if (!scheme) { + pr_err("Requested Scheme does not exist\n"); + return -EINVAL; + } + if (scheme->used) { + pr_err("The requested Scheme is already used\n"); + return -EINVAL; + } + + /* Clear all scheme fields because the scheme may have been + * previously used + */ + memset(scheme, 0, sizeof(struct keygen_scheme)); + + /* Setup scheme: */ + scheme->hw_port_id = hw_port_id; + scheme->use_hashing = true; + scheme->base_fqid = hash_base_fqid; + scheme->hash_fqid_count = hash_size; + scheme->symmetric_hash = DEFAULT_SYMMETRIC_HASH; + scheme->hashShift = DEFAULT_HASH_SHIFT; + + /* All Schemes in hard-coded configuration + * are Indirect Schemes + */ + scheme->match_vector = 0; + + err = keygen_scheme_setup(keygen, scheme_id, true); + if (err != 0) { + pr_err("Scheme setup failed\n"); + return err; + } + + /* Bind Rx port to Scheme */ + err = keygen_bind_port_to_schemes(keygen, scheme_id, true); + if (err != 0) { + pr_err("Binding port to schemes failed\n"); + return err; + } + + return 0; +} +EXPORT_SYMBOL(keygen_port_hashing_init); diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h new file mode 100644 index 000000000000..c4640de3f4cb --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h @@ -0,0 +1,46 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NXP nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __KEYGEN_H +#define __KEYGEN_H + +#include + +struct fman_keygen; +struct fman_kg_regs; + +struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs); + +int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id, + u32 hash_base_fqid, u32 hash_size); + +#endif /* __KEYGEN_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 57bf44fa16a1..1789b206be58 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -32,10 +32,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include "fman_port.h" -#include "fman.h" -#include "fman_sp.h" - #include #include #include @@ -45,6 +41,11 @@ #include #include +#include "fman.h" +#include "fman_port.h" +#include "fman_sp.h" +#include "fman_keygen.h" + /* Queue ID */ #define DFLT_FQ_ID 0x00FFFFFF @@ -184,6 +185,7 @@ #define NIA_ENG_QMI_ENQ 0x00540000 #define NIA_ENG_QMI_DEQ 0x00580000 #define NIA_ENG_HWP 0x00440000 +#define NIA_ENG_HWK 0x00480000 #define NIA_BMI_AC_ENQ_FRAME 0x00000002 #define NIA_BMI_AC_TX_RELEASE 0x000002C0 #define NIA_BMI_AC_RELEASE 0x000000C0 @@ -394,6 +396,8 @@ struct fman_port_bpools { struct fman_port_cfg { u32 dflt_fqid; u32 err_fqid; + u32 pcd_base_fqid; + u32 pcd_fqs_count; u8 deq_sp; bool deq_high_priority; enum fman_port_deq_type deq_type; @@ -1271,6 +1275,10 @@ static void set_rx_dflt_cfg(struct fman_port *port, port_params->specific_params.rx_params.err_fqid; port->cfg->dflt_fqid = port_params->specific_params.rx_params.dflt_fqid; + port->cfg->pcd_base_fqid = + port_params->specific_params.rx_params.pcd_base_fqid; + port->cfg->pcd_fqs_count = + port_params->specific_params.rx_params.pcd_fqs_count; } static void set_tx_dflt_cfg(struct fman_port *port, @@ -1397,6 +1405,24 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) } EXPORT_SYMBOL(fman_port_config); +/** + * fman_port_use_kg_hash + * port: A pointer to a FM Port module. + * Sets the HW KeyGen or the BMI as HW Parser next engine, enabling + * or bypassing the KeyGen hashing of Rx traffic + */ +void fman_port_use_kg_hash(struct fman_port *port, bool enable) +{ + if (enable) + /* After the Parser frames go to KeyGen */ + iowrite32be(NIA_ENG_HWK, &port->bmi_regs->rx.fmbm_rfpne); + else + /* After the Parser frames go to BMI */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, + &port->bmi_regs->rx.fmbm_rfpne); +} +EXPORT_SYMBOL(fman_port_use_kg_hash); + /** * fman_port_init * port: A pointer to a FM Port module. @@ -1407,9 +1433,10 @@ EXPORT_SYMBOL(fman_port_config); */ int fman_port_init(struct fman_port *port) { + struct fman_port_init_params params; + struct fman_keygen *keygen; struct fman_port_cfg *cfg; int err; - struct fman_port_init_params params; if (is_init_done(port->cfg)) return -EINVAL; @@ -1472,6 +1499,17 @@ int fman_port_init(struct fman_port *port) if (err) return err; + if (port->cfg->pcd_fqs_count) { + keygen = port->dts_params.fman->keygen; + err = keygen_port_hashing_init(keygen, port->port_id, + port->cfg->pcd_base_fqid, + port->cfg->pcd_fqs_count); + if (err) + return err; + + fman_port_use_kg_hash(port, true); + } + kfree(port->cfg); port->cfg = NULL; @@ -1682,6 +1720,17 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port) } EXPORT_SYMBOL(fman_port_get_qman_channel_id); +int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset) +{ + if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE) + return -EINVAL; + + *offset = port->buffer_offsets.hash_result_offset; + + return 0; +} +EXPORT_SYMBOL(fman_port_get_hash_result_offset); + static int fman_port_probe(struct platform_device *of_dev) { struct fman_port *port; @@ -1720,8 +1769,8 @@ static int fman_port_probe(struct platform_device *of_dev) err = of_property_read_u32(port_node, "cell-index", &val); if (err) { - dev_err(port->dev, "%s: reading cell-index for %s failed\n", - __func__, port_node->full_name); + dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", + __func__, port_node); err = -EINVAL; goto return_err; } diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h index 8ba901737048..e86ca6a34e4e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.h +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -100,6 +100,9 @@ struct fman_port; struct fman_port_rx_params { u32 err_fqid; /* Error Queue Id. */ u32 dflt_fqid; /* Default Queue Id. */ + u32 pcd_base_fqid; /* PCD base Queue Id. */ + u32 pcd_fqs_count; /* Number of PCD FQs. */ + /* Which external buffer pools are used * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes. */ @@ -134,6 +137,8 @@ struct fman_port_params { int fman_port_config(struct fman_port *port, struct fman_port_params *params); +void fman_port_use_kg_hash(struct fman_port *port, bool enable); + int fman_port_init(struct fman_port *port); int fman_port_cfg_buf_prefix_content(struct fman_port *port, @@ -146,6 +151,8 @@ int fman_port_enable(struct fman_port *port); u32 fman_port_get_qman_channel_id(struct fman_port *port); +int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset); + struct fman_port *fman_port_bind(struct device *dev); #endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 1c7da16ad0ff..387eb4a88b72 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -700,8 +700,8 @@ static int mac_probe(struct platform_device *_of_dev) priv->internal_phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); } else { - dev_err(dev, "MAC node (%s) contains unsupported MAC\n", - mac_node->full_name); + dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n", + mac_node); err = -EINVAL; goto _return; } @@ -714,16 +714,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { - dev_err(dev, "of_get_parent(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_parent(%pOF) failed\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -731,8 +730,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - dev_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -741,7 +739,7 @@ static int mac_probe(struct platform_device *_of_dev) priv->fman = fman_bind(&of_dev->dev); if (!priv->fman) { - dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name); + dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; goto _return_of_node_put; } @@ -751,8 +749,8 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the address of the memory mapped registers */ err = of_address_to_resource(mac_node, 0, &res); if (err < 0) { - dev_err(dev, "of_address_to_resource(%s) = %d\n", - mac_node->full_name, err); + dev_err(dev, "of_address_to_resource(%pOF) = %d\n", + mac_node, err); goto _return_dev_set_drvdata; } @@ -786,8 +784,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - mac_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -796,8 +793,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the MAC address */ mac_addr = of_get_mac_address(mac_node); if (!mac_addr) { - dev_err(dev, "of_get_mac_address(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -806,15 +802,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); if (unlikely(nph < 0)) { - dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", + mac_node); err = nph; goto _return_dev_set_drvdata; } if (nph != ARRAY_SIZE(mac_dev->port)) { - dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n", - mac_node->full_name); + dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -823,24 +819,24 @@ static int mac_probe(struct platform_device *_of_dev) /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { - dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", + mac_node); err = -EINVAL; goto _return_of_node_put; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } mac_dev->port[i] = fman_port_bind(&of_dev->dev); if (!mac_dev->port[i]) { - dev_err(dev, "dev_get_drvdata(%s) failed\n", - dev_node->full_name); + dev_err(dev, "dev_get_drvdata(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -851,8 +847,8 @@ static int mac_probe(struct platform_device *_of_dev) phy_if = of_get_phy_mode(mac_node); if (phy_if < 0) { dev_warn(dev, - "of_get_phy_mode() for %s failed. Defaulting to SGMII\n", - mac_node->full_name); + "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n", + mac_node); phy_if = PHY_INTERFACE_MODE_SGMII; } priv->phy_if = phy_if; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 1f015edcca22..c8e5d889bd81 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -100,7 +100,7 @@ static inline void mdc(struct mdiobb_ctrl *ctrl, int what) in_be32(bitbang->dat); } -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = mdc, .set_mdio_dir = mdio_dir, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index a10de1e9c157..80ad16acf0f1 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -267,8 +267,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) ret = of_address_to_resource(np, 0, &res); if (ret < 0) { - pr_debug("fsl-pq-mdio: no address range in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no address range in node %pOF\n", + np); continue; } @@ -280,8 +280,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) if (!iprop) { iprop = of_get_property(np, "device-id", NULL); if (!iprop) { - pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no UCC ID in node %pOF\n", + np); continue; } } @@ -293,8 +293,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) * numbered from 1, not 0. */ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { - pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: invalid UCC ID in node %pOF\n", + np); continue; } @@ -442,8 +442,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (data->get_tbipa) { for_each_child_of_node(np, tbi) { if (strcmp(tbi->type, "tbi-phy") == 0) { - dev_dbg(&pdev->dev, "found TBI PHY node %s\n", - strrchr(tbi->full_name, '/') + 1); + dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n", + tbi); break; } } @@ -454,8 +454,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (!prop) { dev_err(&pdev->dev, - "missing 'reg' property in node %s\n", - tbi->full_name); + "missing 'reg' property in node %pOF\n", + tbi); err = -EBUSY; goto error; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c4b4b0a1bbf0..5be52d89b182 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3687,7 +3687,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) u32 tempval1 = gfar_read(®s->maccfg1); u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); - u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); + u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); if (phydev->duplex != priv->oldduplex) { if (!(phydev->duplex)) diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 721be13081f9..544114281ea7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -411,7 +411,7 @@ static int ptp_gianfar_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_gianfar_caps = { +static const struct ptp_clock_info ptp_gianfar_caps = { .owner = THIS_MODULE, .name = "gianfar clock", .max_adj = 512000, diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index d11287e11371..91c7bdb9b43c 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -76,4 +76,31 @@ config HNS_ENET This selects the general ethernet driver for HNS. This module make use of any HNS AE driver, such as HNS_DSAF +config HNS3 + tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" + depends on PCI + ---help--- + This selects the framework support for Hisilicon Network Subsystem 3. + This layer facilitates clients like ENET, RoCE and user-space ethernet + drivers(like ODP)to register with HNAE devices and their associated + operations. + +config HNS3_HCLGE + tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI + depends on HNS3 + ---help--- + This selects the HNS3_HCLGE network acceleration engine & its hardware + compatibility layer. The engine would be used in Hisilicon hip08 family of + SoCs and further upcoming SoCs. + +config HNS3_ENET + tristate "Hisilicon HNS3 Ethernet Device Support" + depends on 64BIT && PCI + depends on HNS3 && HNS3_HCLGE + ---help--- + This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 + family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 + devices and their associated operations. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 8661695024dc..3828c435c18f 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o obj-$(CONFIG_HIP04_ETH) += hip04_eth.o obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ +obj-$(CONFIG_HNS3) += hns3/ obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 9d9b6e6dd988..a051e582d541 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -202,6 +202,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; spin_lock_init(&ring->lock); + ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); /* not matter for tx or rx ring, the ntc and ntc start from 0 */ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 7ba653af19cb..3e62692af011 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -89,6 +89,10 @@ do { \ #define RCB_RING_NAME_LEN 16 +#define HNAE_LOWEST_LATENCY_COAL_PARAM 30 +#define HNAE_LOW_LATENCY_COAL_PARAM 80 +#define HNAE_BULK_LATENCY_COAL_PARAM 150 + enum hnae_led_state { HNAE_LED_INACTIVE, HNAE_LED_ACTIVE, @@ -292,6 +296,12 @@ struct hnae_ring { int flags; /* ring attribute */ int irq_init_flag; + + /* total rx bytes after last rx rate calucated */ + u64 coal_last_rx_bytes; + unsigned long coal_last_jiffies; + u32 coal_param; + u32 coal_rx_rate; /* rx rate in MB */ }; #define ring_ptr_move_fw(ring, p) \ @@ -548,8 +558,13 @@ struct hnae_handle { u32 if_support; int q_num; int vf_id; + unsigned long coal_last_jiffies; + u32 coal_param; /* self adapt coalesce param */ + /* the ring index of last ring that set coal param */ + u32 coal_ring_idx; u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ + bool coal_adapt_en; enum hnae_port_type port_type; enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a37166ee577b..bd68379d2bea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -99,6 +99,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, ae_handle->owner_dev = dsaf_dev->dev; ae_handle->dev = dev; ae_handle->q_num = qnum_per_vf; + ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; /* find ring pair, and set vf id*/ for (ae_handle->vf_id = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 3987699f8fe6..36520634c96a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -812,6 +812,113 @@ static int hns_desc_unused(struct hnae_ring *ring) return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } +#define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */ +#define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */ + +#define HNS_COAL_BDNUM 3 + +static u32 hns_coal_rx_bdnum(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + + if (coal_enable && + ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE) + return HNS_COAL_BDNUM; + else + return 0; +} + +static void hns_update_rx_rate(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + u32 time_passed_ms; + u64 total_bytes; + + if (!coal_enable || + time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4))) + return; + + /* ring->stats.rx_bytes overflowed */ + if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) { + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; + return; + } + + total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes; + time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies); + do_div(total_bytes, time_passed_ms); + ring->coal_rx_rate = total_bytes >> 10; + + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; +} + +/** + * smooth_alg - smoothing algrithm for adjusting coalesce parameter + **/ +static u32 smooth_alg(u32 new_param, u32 old_param) +{ + u32 gap = (new_param > old_param) ? new_param - old_param + : old_param - new_param; + + if (gap > 8) + gap >>= 3; + + if (new_param > old_param) + return old_param + gap; + else + return old_param - gap; +} + +/** + * hns_nic_adp_coalesce - self adapte coalesce according to rx rate + * @ring_data: pointer to hns_nic_ring_data + **/ +static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + struct hnae_handle *handle = ring->q->handle; + u32 new_coal_param, old_coal_param = ring->coal_param; + + if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE) + new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; + else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE) + new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM; + else + new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM; + + if (new_coal_param == old_coal_param && + new_coal_param == handle->coal_param) + return; + + new_coal_param = smooth_alg(new_coal_param, old_coal_param); + ring->coal_param = new_coal_param; + + /** + * Because all ring in one port has one coalesce param, when one ring + * calculate its own coalesce param, it cannot write to hardware at + * once. There are three conditions as follows: + * 1. current ring's coalesce param is larger than the hardware. + * 2. or ring which adapt last time can change again. + * 3. timeout. + */ + if (new_coal_param == handle->coal_param) { + handle->coal_last_jiffies = jiffies; + handle->coal_ring_idx = ring_data->queue_index; + } else if (new_coal_param > handle->coal_param || + handle->coal_ring_idx == ring_data->queue_index || + time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) { + handle->dev->ops->set_coalesce_usecs(handle, + new_coal_param); + handle->dev->ops->set_coalesce_frames(handle, + 1, new_coal_param); + handle->coal_param = new_coal_param; + handle->coal_ring_idx = ring_data->queue_index; + handle->coal_last_jiffies = jiffies; + } +} + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { @@ -868,20 +975,27 @@ static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int num = 0; + bool rx_stopped; - ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + hns_update_rx_rate(ring); /* for hardware bug fixed */ + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (num > 0) { + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + + rx_stopped = true; + } else { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring, 1); - return false; - } else { - return true; + rx_stopped = false; } + + return rx_stopped; } static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) @@ -889,12 +1003,17 @@ static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) struct hnae_ring *ring = ring_data->ring; int num; + hns_update_rx_rate(ring); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (!num) + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + return true; - else - return false; + } + + return false; } static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 9cb4c7884201..26e9afcbdd50 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -38,7 +38,7 @@ struct hns_nic_ring_data { struct hnae_ring *ring; struct napi_struct napi; cpumask_t mask; /* affinity mask */ - int queue_index; + u32 queue_index; int (*poll_one)(struct hns_nic_ring_data *, int, void *); void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *); bool (*fini_process)(struct hns_nic_ring_data *); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a8db27e86a11..7ea7f8a4aa2a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -595,7 +595,7 @@ static void hns_nic_self_test(struct net_device *ndev, set_bit(NIC_STATE_TESTING, &priv->state); if (if_running) - (void)dev_close(ndev); + dev_close(ndev); for (i = 0; i < SELF_TEST_TPYE_NUM; i++) { if (!st_param[i][1]) @@ -735,8 +735,8 @@ static int hns_get_coalesce(struct net_device *net_dev, ops = priv->ae_handle->dev->ops; - ec->use_adaptive_rx_coalesce = 1; - ec->use_adaptive_tx_coalesce = 1; + ec->use_adaptive_rx_coalesce = priv->ae_handle->coal_adapt_en; + ec->use_adaptive_tx_coalesce = priv->ae_handle->coal_adapt_en; if ((!ops->get_coalesce_usecs) || (!ops->get_max_coalesced_frames)) @@ -787,6 +787,9 @@ static int hns_set_coalesce(struct net_device *net_dev, (!ops->set_coalesce_frames)) return -ESRCH; + if (ec->use_adaptive_rx_coalesce != priv->ae_handle->coal_adapt_en) + priv->ae_handle->coal_adapt_en = ec->use_adaptive_rx_coalesce; + rc1 = ops->set_coalesce_usecs(priv->ae_handle, ec->rx_coalesce_usecs); diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile new file mode 100644 index 000000000000..a9349e1f3e51 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the HISILICON network device drivers. +# + +obj-$(CONFIG_HNS3) += hns3pf/ + +obj-$(CONFIG_HNS3) += hnae3.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c new file mode 100644 index 000000000000..5bcb2238acb2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "hnae3.h" + +static LIST_HEAD(hnae3_ae_algo_list); +static LIST_HEAD(hnae3_client_list); +static LIST_HEAD(hnae3_ae_dev_list); + +/* we are keeping things simple and using single lock for all the + * list. This is a non-critical code so other updations, if happen + * in parallel, can wait. + */ +static DEFINE_MUTEX(hnae3_common_lock); + +static bool hnae3_client_match(enum hnae3_client_type client_type, + enum hnae3_dev_type dev_type) +{ + if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC || + client_type == HNAE3_CLIENT_ROCE)) + return true; + + if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC) + return true; + + return false; +} + +static int hnae3_match_n_instantiate(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, bool is_reg) +{ + int ret; + + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + return 0; + } + + /* now, (un-)instantiate client by calling lower layer */ + if (is_reg) { + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client\n"); + return ret; + } + + ae_dev->ops->uninit_client_instance(client, ae_dev); + return 0; +} + +int hnae3_register_client(struct hnae3_client *client) +{ + struct hnae3_client *client_tmp; + struct hnae3_ae_dev *ae_dev; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) + goto exit; + } + + list_add_tail(&client->node, &hnae3_client_list); + + /* initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + /* if the client could not be initialized on current port, for + * any error reasons, move on to next available port + */ + ret = hnae3_match_n_instantiate(client, ae_dev, true); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed for port\n"); + } + +exit: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_client); + +void hnae3_unregister_client(struct hnae3_client *client) +{ + struct hnae3_ae_dev *ae_dev; + + mutex_lock(&hnae3_common_lock); + /* un-initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false); + } + + list_del(&client->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_client); + +/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework + * @ae_algo: AE algorithm + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + + list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); + + /* Check if this algo/ops matches the list of ae_devs */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* ae_dev init should set flag */ + ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + continue; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + } + } + + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_algo); + +/* hnae3_unregister_ae_algo - unregisters a AE algorithm + * @ae_algo: the AE algorithm to unregister + */ +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_dev */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* check the client list for the match with this ae_dev type and + * un-initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_algo->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo); + +/* hnae3_register_ae_dev - registers a AE device to hnae3 framework + * @ae_dev: the AE device + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); + + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + ae_dev->ops = ae_algo->ops; + + if (!ae_dev->ops) { + dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + goto out_err; + } + + /* ae_dev init should set flag */ + ret = ae_dev->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + goto out_err; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + break; + } + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + } + +out_err: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_dev); + +/* hnae3_unregister_ae_dev - unregisters a AE device + * @ae_dev: the AE device to unregister + */ +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_dev->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_dev); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h new file mode 100644 index 000000000000..1a01cadfe5f3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNAE3_H +#define __HNAE3_H + +/* Names used in this framework: + * ae handle (handle): + * a set of queues provided by AE + * ring buffer queue (rbq): + * the channel between upper layer and the AE, can do tx and rx + * ring: + * a tx or rx channel within a rbq + * ring description (desc): + * an element in the ring with packet information + * buffer: + * a memory region referred by desc with the full packet payload + * + * "num" means a static number set as a parameter, "count" mean a dynamic + * number set while running + * "cb" means control block + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Device IDs */ +#define HNAE3_DEV_ID_GE 0xA220 +#define HNAE3_DEV_ID_25GE 0xA221 +#define HNAE3_DEV_ID_25GE_RDMA 0xA222 +#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223 +#define HNAE3_DEV_ID_50GE_RDMA 0xA224 +#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225 +#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNAE3_DEV_ID_100G_VF 0xA22E +#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F + +#define HNAE3_CLASS_NAME_SIZE 16 + +#define HNAE3_DEV_INITED_B 0x0 +#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_DCB_B 0x2 + +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ + BIT(HNAE3_DEV_SUPPORT_ROCE_B)) + +#define hnae3_dev_roce_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + +#define hnae3_dev_dcb_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + +#define ring_ptr_move_fw(ring, p) \ + ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) +#define ring_ptr_move_bw(ring, p) \ + ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) + +enum hns_desc_type { + DESC_TYPE_SKB, + DESC_TYPE_PAGE, +}; + +struct hnae3_handle; + +struct hnae3_queue { + void __iomem *io_base; + struct hnae3_ae_algo *ae_algo; + struct hnae3_handle *handle; + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ +}; + +/*hnae3 loop mode*/ +enum hnae3_loop { + HNAE3_MAC_INTER_LOOP_MAC, + HNAE3_MAC_INTER_LOOP_SERDES, + HNAE3_MAC_INTER_LOOP_PHY, + HNAE3_MAC_LOOP_NONE, +}; + +enum hnae3_client_type { + HNAE3_CLIENT_KNIC, + HNAE3_CLIENT_UNIC, + HNAE3_CLIENT_ROCE, +}; + +enum hnae3_dev_type { + HNAE3_DEV_KNIC, + HNAE3_DEV_UNIC, +}; + +/* mac media type */ +enum hnae3_media_type { + HNAE3_MEDIA_TYPE_UNKNOWN, + HNAE3_MEDIA_TYPE_FIBER, + HNAE3_MEDIA_TYPE_COPPER, + HNAE3_MEDIA_TYPE_BACKPLANE, +}; + +struct hnae3_vector_info { + u8 __iomem *io_addr; + int vector; +}; + +#define HNAE3_RING_TYPE_B 0 +#define HNAE3_RING_TYPE_TX 0 +#define HNAE3_RING_TYPE_RX 1 + +struct hnae3_ring_chain_node { + struct hnae3_ring_chain_node *next; + u32 tqp_index; + u32 flag; +}; + +#define HNAE3_IS_TX_RING(node) \ + (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX) + +struct hnae3_client_ops { + int (*init_instance)(struct hnae3_handle *handle); + void (*uninit_instance)(struct hnae3_handle *handle, bool reset); + void (*link_status_change)(struct hnae3_handle *handle, bool state); +}; + +#define HNAE3_CLIENT_NAME_LENGTH 16 +struct hnae3_client { + char name[HNAE3_CLIENT_NAME_LENGTH]; + u16 version; + unsigned long state; + enum hnae3_client_type type; + const struct hnae3_client_ops *ops; + struct list_head node; +}; + +struct hnae3_ae_dev { + struct pci_dev *pdev; + const struct hnae3_ae_ops *ops; + struct list_head node; + u32 flag; + enum hnae3_dev_type dev_type; + void *priv; +}; + +/* This struct defines the operation on the handle. + * + * init_ae_dev(): (mandatory) + * Get PF configure from pci_dev and initialize PF hardware + * uninit_ae_dev() + * Disable PF device and release PF resource + * register_client + * Register client to ae_dev + * unregister_client() + * Unregister client from ae_dev + * start() + * Enable the hardware + * stop() + * Disable the hardware + * get_status() + * Get the carrier state of the back channel of the handle, 1 for ok, 0 for + * non-ok + * get_ksettings_an_result() + * Get negotiation status,speed and duplex + * update_speed_duplex_h() + * Update hardware speed and duplex + * get_media_type() + * Get media type of MAC + * adjust_link() + * Adjust link status + * set_loopback() + * Set loopback + * set_promisc_mode + * Set promisc mode + * set_mtu() + * set mtu + * get_pauseparam() + * get tx and rx of pause frame use + * set_pauseparam() + * set tx and rx of pause frame use + * set_autoneg() + * set auto autonegotiation of pause frame use + * get_autoneg() + * get auto autonegotiation of pause frame use + * get_coalesce_usecs() + * get usecs to delay a TX interrupt after a packet is sent + * get_rx_max_coalesced_frames() + * get Maximum number of packets to be sent before a TX interrupt. + * set_coalesce_usecs() + * set usecs to delay a TX interrupt after a packet is sent + * set_coalesce_frames() + * set Maximum number of packets to be sent before a TX interrupt. + * get_mac_addr() + * get mac address + * set_mac_addr() + * set mac address + * add_uc_addr + * Add unicast addr to mac table + * rm_uc_addr + * Remove unicast addr from mac table + * set_mc_addr() + * Set multicast address + * add_mc_addr + * Add multicast address to mac table + * rm_mc_addr + * Remove multicast address from mac table + * update_stats() + * Update Old network device statistics + * get_ethtool_stats() + * Get ethtool network device statistics + * get_strings() + * Get a set of strings that describe the requested objects + * get_sset_count() + * Get number of strings that @get_strings will write + * update_led_status() + * Update the led status + * set_led_id() + * Set led id + * get_regs() + * Get regs dump + * get_regs_len() + * Get the len of the regs dump + * get_rss_key_size() + * Get rss key size + * get_rss_indir_size() + * Get rss indirection table size + * get_rss() + * Get rss table + * set_rss() + * Set rss table + * get_tc_size() + * Get tc size of handle + * get_vector() + * Get vector number and vector information + * map_ring_to_vector() + * Map rings to vector + * unmap_ring_from_vector() + * Unmap rings from vector + * add_tunnel_udp() + * Add tunnel information to hardware + * del_tunnel_udp() + * Delete tunnel information from hardware + * reset_queue() + * Reset queue + * get_fw_version() + * Get firmware version + * get_mdix_mode() + * Get media typr of phy + * set_vlan_filter() + * Set vlan filter config of Ports + * set_vf_vlan_filter() + * Set vlan filter config of vf + */ +struct hnae3_ae_ops { + int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); + void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); + + int (*init_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + void (*uninit_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + int (*start)(struct hnae3_handle *handle); + void (*stop)(struct hnae3_handle *handle); + int (*get_status)(struct hnae3_handle *handle); + void (*get_ksettings_an_result)(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex); + + int (*update_speed_duplex_h)(struct hnae3_handle *handle); + int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, + u8 duplex); + + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type); + void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); + int (*set_loopback)(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en); + + void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); + + void (*get_pauseparam)(struct hnae3_handle *handle, + u32 *auto_neg, u32 *rx_en, u32 *tx_en); + int (*set_pauseparam)(struct hnae3_handle *handle, + u32 auto_neg, u32 rx_en, u32 tx_en); + + int (*set_autoneg)(struct hnae3_handle *handle, bool enable); + int (*get_autoneg)(struct hnae3_handle *handle); + + void (*get_coalesce_usecs)(struct hnae3_handle *handle, + u32 *tx_usecs, u32 *rx_usecs); + void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle, + u32 *tx_frames, u32 *rx_frames); + int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout); + int (*set_coalesce_frames)(struct hnae3_handle *handle, + u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae3_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); + + void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); + int (*set_mac_addr)(struct hnae3_handle *handle, void *p); + int (*add_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*set_mc_addr)(struct hnae3_handle *handle, void *addr); + int (*add_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + + void (*set_tso_stats)(struct hnae3_handle *handle, int enable); + void (*update_stats)(struct hnae3_handle *handle, + struct net_device_stats *net_stats); + void (*get_stats)(struct hnae3_handle *handle, u64 *data); + + void (*get_strings)(struct hnae3_handle *handle, + u32 stringset, u8 *data); + int (*get_sset_count)(struct hnae3_handle *handle, int stringset); + + void (*get_regs)(struct hnae3_handle *handle, void *data); + int (*get_regs_len)(struct hnae3_handle *handle); + + u32 (*get_rss_key_size)(struct hnae3_handle *handle); + u32 (*get_rss_indir_size)(struct hnae3_handle *handle); + int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); + + int (*get_tc_size)(struct hnae3_handle *handle); + + int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info); + int (*map_ring_to_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + int (*unmap_ring_from_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + + int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + + void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + u32 (*get_fw_version)(struct hnae3_handle *handle); + void (*get_mdix_mode)(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix); + + int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); + int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto); +}; + +struct hnae3_ae_algo { + const struct hnae3_ae_ops *ops; + struct list_head node; + char name[HNAE3_CLASS_NAME_SIZE]; + const struct pci_device_id *pdev_id_table; +}; + +#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) +#define HNAE3_ITR_COUNTDOWN_START 100 + +struct hnae3_tc_info { + u16 tqp_offset; /* TQP offset from base TQP */ + u16 tqp_count; /* Total TQPs */ + u8 tc; /* TC index */ + bool enable; /* If this TC is enable or not */ +}; + +#define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 +struct hnae3_knic_private_info { + struct net_device *netdev; /* Set by KNIC client when init instance */ + u16 rss_size; /* Allocated RSS queues */ + u16 rx_buf_len; + u16 num_desc; + + u8 num_tc; /* Total number of enabled TCs */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ + struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + + u16 num_tqps; /* total number of TQPs in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ +}; + +struct hnae3_roce_private_info { + struct net_device *netdev; + void __iomem *roce_io_base; + int base_vector; + int num_vectors; +}; + +struct hnae3_unic_private_info { + struct net_device *netdev; + u16 rx_buf_len; + u16 num_desc; + u16 num_tqps; /* total number of tqps in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ +}; + +#define HNAE3_SUPPORT_MAC_LOOPBACK 1 +#define HNAE3_SUPPORT_PHY_LOOPBACK 2 +#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 + +struct hnae3_handle { + struct hnae3_client *client; + struct pci_dev *pdev; + void *priv; + struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ + u64 flags; /* Indicate the capabilities for this handle*/ + + union { + struct net_device *netdev; /* first member */ + struct hnae3_knic_private_info kinfo; + struct hnae3_unic_private_info uinfo; + struct hnae3_roce_private_info rinfo; + }; + + u32 numa_node_mask; /* for multi-chip support */ +}; + +#define hnae_set_field(origin, mask, shift, val) \ + do { \ + (origin) &= (~(mask)); \ + (origin) |= ((val) << (shift)) & (mask); \ + } while (0) +#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) + +#define hnae_set_bit(origin, shift, val) \ + hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae_get_bit(origin, shift) \ + hnae_get_field((origin), (0x1 << (shift)), (shift)) + +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); + +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); + +void hnae3_unregister_client(struct hnae3_client *client); +int hnae3_register_client(struct hnae3_client *client); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile new file mode 100644 index 000000000000..162e8a42acd0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c new file mode 100644 index 000000000000..8b511e6e0ce9 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" + +#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) +#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int hclge_ring_space(struct hclge_cmq_ring *ring) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) +{ + dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) +{ + struct hclge_hw *hw = &hdev->hw; + struct hclge_cmq_ring *ring = + (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; + int ret; + + ring->flag = ring_type; + ring->dev = hdev; + + ret = hclge_alloc_cmd_desc(ring); + if (ret) { + dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n", + (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret); + return ret; + } + + ring->next_to_clean = 0; + ring->next_to_use = 0; + + return 0; +} + +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); +} + +static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hclge_dev *hdev = ring->dev; + struct hclge_hw *hw = &hdev->hw; + + if (ring->flag == HCLGE_TYPE_CSQ) { + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + } else { + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + } +} + +static void hclge_cmd_init_regs(struct hclge_hw *hw) +{ + hclge_cmd_config_regs(&hw->cmq.csq); + hclge_cmd_config_regs(&hw->cmq.crq); +} + +static int hclge_cmd_csq_clean(struct hclge_hw *hw) +{ + struct hclge_cmq_ring *csq = &hw->cmq.csq; + u16 ntc = csq->next_to_clean; + struct hclge_desc *desc; + int clean = 0; + u32 head; + + desc = &csq->desc[ntc]; + head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + + while (head != ntc) { + memset(desc, 0, sizeof(*desc)); + ntc++; + if (ntc == csq->desc_num) + ntc = 0; + desc = &csq->desc[ntc]; + clean++; + } + csq->next_to_clean = ntc; + + return clean; +} + +static int hclge_cmd_csq_done(struct hclge_hw *hw) +{ + u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + return head == hw->cmq.csq.next_to_use; +} + +static bool hclge_is_special_opcode(u16 opcode) +{ + u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032}; + int i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_desc *desc_to_use; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int retval = 0; + u16 opcode, desc_ret; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (num > hclge_ring_space(&hw->cmq.csq)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + opcode = desc[0].opcode; + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); + + /** + * If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_SEND_SYNC(desc->flag)) { + do { + if (hclge_cmd_csq_done(hw)) + break; + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + } + + if (hclge_cmd_csq_done(hw)) { + complete = true; + handle = 0; + while (handle < num) { + /* Get the result of hardware write back */ + desc_to_use = &hw->cmq.csq.desc[ntc]; + desc[handle] = *desc_to_use; + pr_debug("Get cmd desc:\n"); + + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = desc[handle].retval; + else + desc_ret = desc[0].retval; + + if ((enum hclge_cmd_return_status)desc_ret == + HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else + retval = -EIO; + hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + ntc++; + handle++; + if (ntc == hw->cmq.csq.desc_num) + ntc = 0; + } + } + + if (!complete) + retval = -EAGAIN; + + /* Clean the command send queue */ + handle = hclge_cmd_csq_clean(hw); + if (handle != num) { + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + } + + spin_unlock_bh(&hw->cmq.csq.lock); + + return retval; +} + +enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw, + u32 *version) +{ + struct hclge_query_version *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); + resp = (struct hclge_query_version *)desc.data; + + ret = hclge_cmd_send(hw, &desc, 1); + if (!ret) + *version = le32_to_cpu(resp->firmware); + + return ret; +} + +int hclge_cmd_init(struct hclge_dev *hdev) +{ + u32 version; + int ret; + + /* Setup the queue entries for use cmd queue */ + hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + + /* Setup Tx write back timeout */ + hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; + + /* Setup queue rings */ + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CSQ ring setup error %d\n", ret); + return ret; + } + + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CRQ ring setup error %d\n", ret); + goto err_csq; + } + + hclge_cmd_init_regs(&hdev->hw); + + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); + if (ret) { + dev_err(&hdev->pdev->dev, + "firmware version query failed %d\n", ret); + return ret; + } + hdev->fw_version = version; + + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + + return 0; +err_csq: + hclge_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +static void hclge_destroy_queue(struct hclge_cmq_ring *ring) +{ + spin_lock_bh(&ring->lock); + hclge_free_cmd_desc(ring); + spin_unlock_bh(&ring->lock); +} + +void hclge_destroy_cmd_queue(struct hclge_hw *hw) +{ + hclge_destroy_queue(&hw->cmq.csq); + hclge_destroy_queue(&hw->cmq.crq); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h new file mode 100644 index 000000000000..758cf3948131 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_CMD_H +#define __HCLGE_CMD_H +#include +#include + +#define HCLGE_CMDQ_TX_TIMEOUT 1000 + +struct hclge_dev; +struct hclge_desc { + __le16 opcode; + +#define HCLGE_CMDQ_RX_INVLD_B 0 +#define HCLGE_CMDQ_RX_OUTVLD_B 1 + + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[6]; +}; + +struct hclge_desc_cb { + dma_addr_t dma; + void *va; + u32 length; +}; + +struct hclge_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclge_desc *desc; + struct hclge_desc_cb *desc_cb; + struct hclge_dev *dev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 flag; + spinlock_t lock; /* Command queue lock */ +}; + +enum hclge_cmd_return_status { + HCLGE_CMD_EXEC_SUCCESS = 0, + HCLGE_CMD_NO_AUTH = 1, + HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_QUEUE_FULL = 3, +}; + +enum hclge_cmd_status { + HCLGE_STATUS_SUCCESS = 0, + HCLGE_ERR_CSQ_FULL = -1, + HCLGE_ERR_CSQ_TIMEOUT = -2, + HCLGE_ERR_CSQ_ERROR = -3, +}; + +struct hclge_cmq { + struct hclge_cmq_ring csq; + struct hclge_cmq_ring crq; + u16 tx_timeout; /* Tx timeout */ + enum hclge_cmd_status last_status; +}; + +#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0 +#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1 +#define HCLGE_CMD_FLAG_NEXT_SHIFT 2 +#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3 +#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4 +#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5 + +#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT) +#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT) +#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT) +#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT) +#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT) +#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) + +enum hclge_opcode_type { + /* Generic command */ + HCLGE_OPC_QUERY_FW_VER = 0x0001, + HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, + HCLGE_OPC_GBL_RST_STATUS = 0x0021, + HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, + HCLGE_OPC_QUERY_PF_RSRC = 0x0023, + HCLGE_OPC_QUERY_VF_RSRC = 0x0024, + HCLGE_OPC_GET_CFG_PARAM = 0x0025, + + HCLGE_OPC_STATS_64_BIT = 0x0030, + HCLGE_OPC_STATS_32_BIT = 0x0031, + HCLGE_OPC_STATS_MAC = 0x0032, + /* Device management command */ + + /* MAC commond */ + HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, + HCLGE_OPC_CONFIG_AN_MODE = 0x0304, + HCLGE_OPC_QUERY_AN_RESULT = 0x0306, + HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, + HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + /* MACSEC command */ + + /* PFC/Pause CMD*/ + HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HCLGE_OPC_CFG_MAC_PARA = 0x0703, + HCLGE_OPC_CFG_PFC_PARA = 0x0704, + HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, + HCLGE_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, + HCLGE_OPC_TM_PG_WEIGHT = 0x0809, + HCLGE_OPC_TM_QS_WEIGHT = 0x080A, + HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, + HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, + HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, + HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, + HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, + HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, + HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + + /* Packet buffer allocate command */ + HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, + HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, + HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, + + /* PTP command */ + /* TQP management command */ + HCLGE_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP command */ + HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, + HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, + HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, + HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, + HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, + HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, + HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, + HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* TSO cmd */ + HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + + /* RSS cmd */ + HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGE_OPC_RSS_TC_MODE = 0x0D08, + HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, + + /* Promisuous mode command */ + HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Interrupts cmd */ + HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, + HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* MAC command */ + HCLGE_OPC_MAC_VLAN_ADD = 0x1000, + HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, + HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, + HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + + /* Multicast linear table cmd */ + HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, + HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, + HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, + HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, + + /* VLAN command */ + HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, + HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + + /* MDIO command */ + HCLGE_OPC_MDIO_CONFIG = 0x1900, + + /* QCN command */ + HCLGE_OPC_QCN_MOD_CFG = 0x1A01, + HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, + HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, + HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, + HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, + HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, + HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + + /* Mailbox cmd */ + HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, +}; + +#define HCLGE_TQP_REG_OFFSET 0x80000 +#define HCLGE_TQP_REG_SIZE 0x200 + +#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 +#define HCLGE_RCB_INIT_FLAG_EN_B 0 +#define HCLGE_RCB_INIT_FLAG_FINI_B 8 +struct hclge_config_rcb_init { + __le16 rcb_init_flag; + u8 rsv[22]; +}; + +struct hclge_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGE_TQP_MAP_TYPE_PF 0 +#define HCLGE_TQP_MAP_TYPE_VF 1 +#define HCLGE_TQP_MAP_TYPE_B 0 +#define HCLGE_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclge_int_type { + HCLGE_INT_TX, + HCLGE_INT_RX, + HCLGE_INT_EVENT, +}; + +struct hclge_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGE_INT_TYPE_S 0 +#define HCLGE_INT_TYPE_M 0x3 +#define HCLGE_TQP_ID_S 2 +#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S) +#define HCLGE_INT_GL_IDX_S 13 +#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S) + __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 rsv; +}; + +#define HCLGE_TC_NUM 8 +#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +struct hclge_tx_buff_alloc { + __le16 tx_pkt_buff[HCLGE_TC_NUM]; + u8 tx_buff_rsv[8]; +}; + +struct hclge_rx_priv_buff { + __le16 buf_num[HCLGE_TC_NUM]; + __le16 shared_buf; + u8 rsv[6]; +}; + +struct hclge_query_version { + __le32 firmware; + __le32 firmware_rsv[5]; +}; + +#define HCLGE_RX_PRIV_EN_B 15 +#define HCLGE_TC_NUM_ONE_DESC 4 +struct hclge_priv_wl { + __le16 high; + __le16 low; +}; + +struct hclge_rx_priv_wl_buf { + struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_thrd { + struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_wl { + struct hclge_priv_wl com_wl; +}; + +struct hclge_waterline { + u32 low; + u32 high; +}; + +struct hclge_tc_thrd { + u32 low; + u32 high; +}; + +struct hclge_priv_buf { + struct hclge_waterline wl; /* Waterline for low and high*/ + u32 buf_size; /* TC private buffer size */ + u32 enable; /* Enable TC private buffer or not */ +}; + +#define HCLGE_MAX_TC_NUM 8 +struct hclge_shared_buf { + struct hclge_waterline self; + struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; + u32 buf_size; +}; + +#define HCLGE_RX_COM_WL_EN_B 15 +struct hclge_rx_com_wl_buf { + __le16 high_wl; + __le16 low_wl; + u8 rsv[20]; +}; + +#define HCLGE_RX_PKT_EN_B 15 +struct hclge_rx_pkt_buf { + __le16 high_pkt; + __le16 low_pkt; + u8 rsv[20]; +}; + +#define HCLGE_PF_STATE_DONE_B 0 +#define HCLGE_PF_STATE_MAIN_B 1 +#define HCLGE_PF_STATE_BOND_B 2 +#define HCLGE_PF_STATE_MAC_N_B 6 +#define HCLGE_PF_MAC_NUM_MASK 0x3 +#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) +#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +struct hclge_func_status { + __le32 vf_rst_state[4]; + u8 pf_state; + u8 mac_id; + u8 rsv1; + u8 pf_cnt_in_mac; + u8 pf_num; + u8 vf_num; + u8 rsv[2]; +}; + +struct hclge_pf_res { + __le16 tqp_num; + __le16 buf_size; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_ba_rocee; +#define HCLGE_PF_VEC_NUM_S 0 +#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S) + __le16 pf_intr_vector_number; + __le16 pf_own_fun_number; + __le32 rsv[3]; +}; + +#define HCLGE_CFG_OFFSET_S 0 +#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */ +#define HCLGE_CFG_RD_LEN_S 24 +#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S) +#define HCLGE_CFG_RD_LEN_BYTES 16 +#define HCLGE_CFG_RD_LEN_UNIT 4 + +#define HCLGE_CFG_VMDQ_S 0 +#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S) +#define HCLGE_CFG_TC_NUM_S 8 +#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S) +#define HCLGE_CFG_TQP_DESC_N_S 16 +#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S) +#define HCLGE_CFG_PHY_ADDR_S 0 +#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S) +#define HCLGE_CFG_MEDIA_TP_S 8 +#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S) +#define HCLGE_CFG_RX_BUF_LEN_S 16 +#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S) +#define HCLGE_CFG_MAC_ADDR_H_S 0 +#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S) +#define HCLGE_CFG_DEFAULT_SPEED_S 16 +#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S) + +struct hclge_cfg_param { + __le32 offset; + __le32 rsv; + __le32 param[4]; +}; + +#define HCLGE_MAC_MODE 0x0 +#define HCLGE_DESC_NUM 0x40 + +#define HCLGE_ALLOC_VALID_B 0 +struct hclge_vf_num { + u8 alloc_valid; + u8 rsv[23]; +}; + +#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGE_RSS_HASH_KEY_OFFSET_B 4 +#define HCLGE_RSS_HASH_KEY_NUM 16 +struct hclge_rss_config { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGE_RSS_HASH_KEY_NUM]; +}; + +struct hclge_rss_input_tuple { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +#define HCLGE_RSS_CFG_TBL_SIZE 16 + +struct hclge_rss_indirection_table { + u16 start_table_index; + u16 rss_set_bitmap; + u8 rsv[4]; + u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; +}; + +#define HCLGE_RSS_TC_OFFSET_S 0 +#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S) +#define HCLGE_RSS_TC_SIZE_S 12 +#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S) +#define HCLGE_RSS_TC_VALID_B 15 +struct hclge_rss_tc_mode { + u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; + u8 rsv[8]; +}; + +#define HCLGE_LINK_STS_B 0 +#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) +struct hclge_link_status { + u8 status; + u8 rsv[23]; +}; + +struct hclge_promisc_param { + u8 vf_id; + u8 enable; +}; + +#define HCLGE_PROMISC_EN_B 1 +#define HCLGE_PROMISC_EN_ALL 0x7 +#define HCLGE_PROMISC_EN_UC 0x1 +#define HCLGE_PROMISC_EN_MC 0x2 +#define HCLGE_PROMISC_EN_BC 0x4 +struct hclge_promisc_cfg { + u8 flag; + u8 vf_id; + __le16 rsv0; + u8 rsv1[20]; +}; + +enum hclge_promisc_type { + HCLGE_UNICAST = 1, + HCLGE_MULTICAST = 2, + HCLGE_BROADCAST = 3, +}; + +#define HCLGE_MAC_TX_EN_B 6 +#define HCLGE_MAC_RX_EN_B 7 +#define HCLGE_MAC_PAD_TX_B 11 +#define HCLGE_MAC_PAD_RX_B 12 +#define HCLGE_MAC_1588_TX_B 13 +#define HCLGE_MAC_1588_RX_B 14 +#define HCLGE_MAC_APP_LP_B 15 +#define HCLGE_MAC_LINE_LP_B 16 +#define HCLGE_MAC_FCS_TX_B 17 +#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HCLGE_MAC_RX_FCS_STRIP_B 19 +#define HCLGE_MAC_RX_FCS_B 20 +#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 +#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hclge_config_mac_mode { + __le32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +#define HCLGE_CFG_SPEED_S 0 +#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S) + +#define HCLGE_CFG_DUPLEX_B 7 +#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) + +struct hclge_config_mac_speed_dup { + u8 speed_dup; + +#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 + u8 mac_change_fec_en; + u8 rsv[22]; +}; + +#define HCLGE_QUERY_SPEED_S 3 +#define HCLGE_QUERY_AN_B 0 +#define HCLGE_QUERY_DUPLEX_B 2 + +#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S) +#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) +#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) + +struct hclge_query_an_speed_dup { + u8 an_syn_dup_speed; + u8 pause; + u8 rsv[23]; +}; + +#define HCLGE_RING_ID_MASK 0x3ff +#define HCLGE_TQP_ENABLE_B 0 + +#define HCLGE_MAC_CFG_AN_EN_B 0 +#define HCLGE_MAC_CFG_AN_INT_EN_B 1 +#define HCLGE_MAC_CFG_AN_INT_MSK_B 2 +#define HCLGE_MAC_CFG_AN_INT_CLR_B 3 +#define HCLGE_MAC_CFG_AN_RST_B 4 + +#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) + +struct hclge_config_auto_neg { + __le32 cfg_an_cmd_flag; + u8 rsv[20]; +}; + +#define HCLGE_MAC_MIN_MTU 64 +#define HCLGE_MAC_MAX_MTU 9728 +#define HCLGE_MAC_UPLINK_PORT 0x100 + +struct hclge_config_max_frm_size { + __le16 max_frm_size; + u8 rsv[22]; +}; + +enum hclge_mac_vlan_tbl_opcode { + HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1 +#define HCLGE_MAC_EPORT_SW_EN_B 0xc +#define HCLGE_MAC_EPORT_TYPE_B 0xb +#define HCLGE_MAC_EPORT_VFID_S 0x3 +#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S) +#define HCLGE_MAC_EPORT_PFID_S 0x0 +#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S) +struct hclge_mac_vlan_tbl_entry { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + u8 entry_type; + u8 mc_mac_en; + __le16 egress_port; + __le16 egress_queue; + u8 rsv2[6]; +}; + +#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 +#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S) +#define HCLGE_CFG_MTA_MAC_EN_B 0x7 +struct hclge_mta_filter_mode { + u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ + u8 rsv[23]; +}; + +#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 +struct hclge_cfg_func_mta_filter { + u8 accept; /* Only used lowest 1 bit */ + u8 function_id; + u8 rsv[22]; +}; + +#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S) +struct hclge_cfg_func_mta_item { + u16 item_idx; /* Only used lowest 12 bit */ + u8 accept; /* Only used lowest 1 bit */ + u8 rsv[21]; +}; + +struct hclge_mac_vlan_add { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 +struct hclge_mac_vlan_remove { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +struct hclge_vlan_filter_ctrl { + u8 vlan_type; + u8 vlan_fe; + u8 rsv[22]; +}; + +struct hclge_vlan_filter_pf_cfg { + u8 vlan_offset; + u8 vlan_cfg; + u8 rsv[2]; + u8 vlan_offset_bitmap[20]; +}; + +struct hclge_vlan_filter_vf_cfg { + u16 vlan_id; + u8 resp_code; + u8 rsv; + u8 vlan_cfg; + u8 rsv1[3]; + u8 vf_bitmap[16]; +}; + +struct hclge_cfg_com_tqp_queue { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclge_cfg_tx_queue_pointer { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#define HCLGE_TSO_MSS_MIN_S 0 +#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S) + +#define HCLGE_TSO_MSS_MAX_S 16 +#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S) + +struct hclge_cfg_tso_status { + __le16 tso_mss_min; + __le16 tso_mss_max; + u8 rsv[20]; +}; + +#define HCLGE_TSO_MSS_MIN 256 +#define HCLGE_TSO_MSS_MAX 9668 + +#define HCLGE_TQP_RESET_B 0 +struct hclge_reset_tqp_queue { + __le16 tqp_id; + u8 reset_req; + u8 ready_to_reset; + u8 rsv[20]; +}; + +#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ +#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ + +#define HCLGE_TYPE_CRQ 0 +#define HCLGE_TYPE_CSQ 1 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 +#define HCLGE_NIC_CMQ_EN_B 16 +#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) +#define HCLGE_NIC_CMQ_DESC_NUM 1024 +#define HCLGE_NIC_CMQ_DESC_NUM_S 3 + +int hclge_cmd_init(struct hclge_dev *hdev); +static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +#define hclge_write_dev(a, reg, value) \ + hclge_write_reg((a)->io_base, (reg), (value)) +#define hclge_read_dev(a, reg) \ + hclge_read_reg((a)->io_base, (reg)) + +static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define HCLGE_SEND_SYNC(flag) \ + ((flag) & HCLGE_CMD_FLAG_NO_INTR) + +struct hclge_hw; +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read); + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param); + +enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); + +void hclge_destroy_cmd_queue(struct hclge_hw *hw); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c new file mode 100644 index 000000000000..c1cdbfd83bdb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -0,0 +1,4291 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define HCLGE_NAME "hclge" +#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) +#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) + +static int hclge_rss_init_hw(struct hclge_dev *hdev); +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable); +static int hclge_init_vlan_config(struct hclge_dev *hdev); + +static struct hnae3_ae_algo ae_algo; + +static const struct pci_device_id ae_algo_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* required last entry */ + {0, } +}; + +static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { + "Mac Loopback test", + "Serdes Loopback test", + "Phy Loopback test" +}; + +static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { + {"igu_rx_oversize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, + {"igu_rx_undersize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, + {"igu_rx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, + {"igu_rx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, + {"igu_rx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, + {"igu_rx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, + {"egu_tx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, + {"egu_tx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, + {"egu_tx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, + {"egu_tx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, + {"ssu_ppp_mac_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, + {"ssu_ppp_host_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, + {"ppp_ssu_mac_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, + {"ppp_ssu_host_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, + {"ssu_tx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, + {"ssu_tx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, + {"ssu_rx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, + {"ssu_rx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} +}; + +static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { + {"igu_rx_err_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, + {"igu_rx_no_eof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, + {"igu_rx_no_sof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, + {"egu_tx_1588_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, + {"ssu_full_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, + {"ssu_part_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, + {"ppp_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, + {"ppp_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, + {"ssu_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, + {"pkt_curr_buf_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, + {"qcn_fb_rcv_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, + {"qcn_fb_drop_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, + {"qcn_fb_invaild_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, + {"rx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, + {"rx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, + {"rx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, + {"rx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, + {"rx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, + {"rx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, + {"rx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, + {"rx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, + {"rx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, + {"rx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, + {"rx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, + {"rx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, + {"rx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, + {"rx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, + {"rx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, + {"rx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, + {"tx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, + {"tx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, + {"tx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, + {"tx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, + {"tx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, + {"tx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, + {"tx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, + {"tx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, + {"tx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, + {"tx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, + {"tx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, + {"tx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, + {"tx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, + {"tx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, + {"tx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, + {"tx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, + {"pkt_curr_buf_tc0_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, + {"pkt_curr_buf_tc1_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, + {"pkt_curr_buf_tc2_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, + {"pkt_curr_buf_tc3_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, + {"pkt_curr_buf_tc4_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, + {"pkt_curr_buf_tc5_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, + {"pkt_curr_buf_tc6_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, + {"pkt_curr_buf_tc7_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, + {"mb_uncopy_num", + HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, + {"lo_pri_unicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, + {"hi_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, + {"lo_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, + {"rx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, + {"tx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, + {"nic_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, + {"roc_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} +}; + +static const struct hclge_comm_stats_str g_mac_stats_string[] = { + {"mac_tx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, + {"mac_tx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, + {"mac_tx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, + {"mac_rx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, + {"mac_rx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, + + {"mac_trans_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, + {"mac_trans_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, + {"mac_trans_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, + {"mac_trans_err_all_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, + {"mac_trans_from_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, + {"mac_trans_from_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, + {"mac_rcv_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, + {"mac_rcv_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, + {"mac_rcv_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, + {"mac_rcv_fcs_err_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, + {"mac_rcv_send_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, + {"mac_rcv_send_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} +}; + +static int hclge_64_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_64_BIT_CMD_NUM 5 +#define HCLGE_64_BIT_RTN_DATANUM 4 + u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); + struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit pkt stats fail, status = %d.\n", ret); + return ret; + } + + for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_RTN_DATANUM - 1; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_64_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) +{ + stats->pkt_curr_buf_cnt = 0; + stats->pkt_curr_buf_tc0_cnt = 0; + stats->pkt_curr_buf_tc1_cnt = 0; + stats->pkt_curr_buf_tc2_cnt = 0; + stats->pkt_curr_buf_tc3_cnt = 0; + stats->pkt_curr_buf_tc4_cnt = 0; + stats->pkt_curr_buf_tc5_cnt = 0; + stats->pkt_curr_buf_tc6_cnt = 0; + stats->pkt_curr_buf_tc7_cnt = 0; +} + +static int hclge_32_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_32_BIT_CMD_NUM 8 +#define HCLGE_32_BIT_RTN_DATANUM 8 + + struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; + struct hclge_32_bit_stats *all_32_bit_stats; + u32 *desc_data; + int i, k, n; + u64 *data; + int ret; + + all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; + data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit pkt stats fail, status = %d.\n", ret); + + return ret; + } + + hclge_reset_partial_32bit_counter(all_32_bit_stats); + for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + all_32_bit_stats->igu_rx_err_pkt += + cpu_to_le32(desc[i].data[0]); + all_32_bit_stats->igu_rx_no_eof_pkt += + cpu_to_le32(desc[i].data[1] & 0xffff); + all_32_bit_stats->igu_rx_no_sof_pkt += + cpu_to_le32((desc[i].data[1] >> 16) & 0xffff); + + desc_data = (u32 *)(&desc[i].data[2]); + n = HCLGE_32_BIT_RTN_DATANUM - 4; + } else { + desc_data = (u32 *)(&desc[i]); + n = HCLGE_32_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le32(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_mac_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_CMD_NUM 17 +#define HCLGE_RTN_DATA_NUM 4 + + u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC pkt stats fail, status = %d.\n", ret); + + return ret; + } + + for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_RTN_DATA_NUM - 2; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_RTN_DATA_NUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_tqps_update_stats(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hnae3_queue *queue; + struct hclge_desc desc[1]; + struct hclge_tqp *tqp; + int ret, i; + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_RX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_TX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + return 0; +} + +static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_tqp *tqp; + u64 *buff = data; + int i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd); + } + + return buff; +} + +static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + return kinfo->num_tqps * (2); +} + +static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u8 *buff = data; + int i = 0; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(kinfo->tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + return buff; +} + +static u64 *hclge_comm_get_stats(void *comm_stats, + const struct hclge_comm_stats_str strs[], + int size, u64 *data) +{ + u64 *buf = data; + u32 i; + + for (i = 0; i < size; i++) + buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + + return buf + size; +} + +static u8 *hclge_comm_get_strings(u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 *data) +{ + char *buff = (char *)data; + u32 i; + + if (stringset != ETH_SS_STATS) + return buff; + + for (i = 0; i < size; i++) { + snprintf(buff, ETH_GSTRING_LEN, + strs[i].desc); + buff = buff + ETH_GSTRING_LEN; + } + + return (u8 *)buff; +} + +static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + net_stats->tx_dropped = 0; + net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; + + net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; + net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + + net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; + net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; + + net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_length_errors = + hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_length_errors += + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_over_errors = + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; +} + +static void hclge_update_stats_for_all(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle; + int status; + + handle = &hdev->vport[0].nic; + if (handle->client) { + status = hclge_tqps_update_stats(handle); + if (status) { + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + } + } + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); +} + +static void hclge_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_hw_stats *hw_stats = &hdev->hw_stats; + int status; + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", + status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + status = hclge_64_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 64 bit stats fail, status = %d.\n", + status); + + status = hclge_tqps_update_stats(handle); + if (status) + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + + hclge_update_netstat(hw_stats, net_stats); +} + +static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) +{ +#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int count = 0; + + /* Loopback test support rules: + * mac: only GE mode support + * serdes: all mac mode will support include GE/XGE/LGE/CGE + * phy: only support when phy device exist on board + */ + if (stringset == ETH_SS_TEST) { + /* clear loopback bit flags at first */ + handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); + if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { + count += 1; + handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + } else { + count = -EOPNOTSUPP; + } + } else if (stringset == ETH_SS_STATS) { + count = ARRAY_SIZE(g_mac_stats_string) + + ARRAY_SIZE(g_all_32bit_stats_string) + + ARRAY_SIZE(g_all_64bit_stats_string) + + hclge_tqps_get_sset_count(handle, stringset); + } + + return count; +} + +static void hclge_get_strings(struct hnae3_handle *handle, + u32 stringset, + u8 *data) +{ + u8 *p = (char *)data; + int size; + + if (stringset == ETH_SS_STATS) { + size = ARRAY_SIZE(g_mac_stats_string); + p = hclge_comm_get_strings(stringset, + g_mac_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_32bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_32bit_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_64bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_64bit_stats_string, + size, + p); + p = hclge_tqps_get_strings(handle, p); + } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } +} + +static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u64 *p; + + p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, + g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), + data); + p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, + g_all_32bit_stats_string, + ARRAY_SIZE(g_all_32bit_stats_string), + p); + p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, + g_all_64bit_stats_string, + ARRAY_SIZE(g_all_64bit_stats_string), + p); + p = hclge_tqps_get_stats(handle, p); +} + +static int hclge_parse_func_status(struct hclge_dev *hdev, + struct hclge_func_status *status) +{ + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) + return -EINVAL; + + /* Set the pf to main pf */ + if (status->pf_state & HCLGE_PF_STATE_MAIN) + hdev->flag |= HCLGE_FLAG_MAIN; + else + hdev->flag &= ~HCLGE_FLAG_MAIN; + + hdev->num_req_vfs = status->vf_num / status->pf_num; + return 0; +} + +static int hclge_query_function_status(struct hclge_dev *hdev) +{ + struct hclge_func_status *req; + struct hclge_desc desc; + int timeout = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); + req = (struct hclge_func_status *)desc.data; + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status failed %d.\n", + ret); + + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + usleep_range(1000, 2000); + } while (timeout++ < 5); + + ret = hclge_parse_func_status(hdev, req); + + return ret; +} + +static int hclge_query_pf_resource(struct hclge_dev *hdev) +{ + struct hclge_pf_res *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource failed %d.\n", ret); + return ret; + } + + req = (struct hclge_pf_res *)desc.data; + hdev->num_tqps = __le16_to_cpu(req->tqp_num); + hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (hnae3_dev_roce_supported(hdev)) { + hdev->num_roce_msix = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + + /* PF should have NIC vectors and Roce vectors, + * NIC vectors are queued before Roce vectors. + */ + hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; + } else { + hdev->num_msi = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + } + + return 0; +} + +static int hclge_parse_speed(int speed_cmd, int *speed) +{ + switch (speed_cmd) { + case 6: + *speed = HCLGE_MAC_SPEED_10M; + break; + case 7: + *speed = HCLGE_MAC_SPEED_100M; + break; + case 0: + *speed = HCLGE_MAC_SPEED_1G; + break; + case 1: + *speed = HCLGE_MAC_SPEED_10G; + break; + case 2: + *speed = HCLGE_MAC_SPEED_25G; + break; + case 3: + *speed = HCLGE_MAC_SPEED_40G; + break; + case 4: + *speed = HCLGE_MAC_SPEED_50G; + break; + case 5: + *speed = HCLGE_MAC_SPEED_100G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ + struct hclge_cfg_param *req; + u64 mac_addr_tmp_high; + u64 mac_addr_tmp; + int i; + + req = (struct hclge_cfg_param *)desc[0].data; + + /* get the configuration */ + cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); + /* get mac_address */ + mac_addr_tmp = __le32_to_cpu(req->param[2]); + mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + for (i = 0; i < ETH_ALEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hclge_cfg_param *)desc[1].data; + cfg->numa_node_map = __le32_to_cpu(req->param[0]); +} + +/* hclge_get_cfg: query the static parameter from flash + * @hdev: pointer to struct hclge_dev + * @hcfg: the config structure to be getted + */ +static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) +{ + struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; + struct hclge_cfg_param *req; + int i, ret; + + for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + req = (struct hclge_cfg_param *)desc[i].data; + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, + true); + hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + /* Len should be united by 4 bytes when send to hardware */ + hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M, + HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + req->offset = cpu_to_le32(req->offset); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "get config failed %d.\n", ret); + return ret; + } + + hclge_parse_cfg(hcfg, desc); + return 0; +} + +static int hclge_get_cap(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_query_function_status(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status error %d.\n", ret); + return ret; + } + + /* get pf resource */ + ret = hclge_query_pf_resource(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource error %d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_configure(struct hclge_dev *hdev) +{ + struct hclge_cfg cfg; + int ret, i; + + ret = hclge_get_cfg(hdev, &cfg); + if (ret) { + dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); + return ret; + } + + hdev->num_vmdq_vport = cfg.vmdq_vport_num; + hdev->base_tqp_pid = 0; + hdev->rss_size_max = 1; + hdev->rx_buf_len = cfg.rx_buf_len; + ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); + hdev->hw.mac.media_type = cfg.media_type; + hdev->hw.mac.phy_addr = cfg.phy_addr; + hdev->num_desc = cfg.tqp_desc_num; + hdev->tm_info.num_pg = 1; + hdev->tm_info.num_tc = cfg.tc_num; + hdev->tm_info.hw_pfc_map = 0; + + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); + if (ret) { + dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); + return ret; + } + + if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) || + (hdev->tm_info.num_tc < 1)) { + dev_warn(&hdev->pdev->dev, "TC num = %d.\n", + hdev->tm_info.num_tc); + hdev->tm_info.num_tc = 1; + } + + /* Currently not support uncontiuous tc */ + for (i = 0; i < cfg.tc_num; i++) + hnae_set_bit(hdev->hw_tc_map, i, 1); + + if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + else + hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; + + return ret; +} + +static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, + int tso_mss_max) +{ + struct hclge_cfg_tso_status *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hclge_cfg_tso_status *)desc.data; + hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_alloc_tqps(struct hclge_dev *hdev) +{ + struct hclge_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algo; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.desc_num = hdev->num_desc; + tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + + tqp++; + } + + return 0; +} + +static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, + u16 tqp_pid, u16 tqp_vid, bool is_pf) +{ + struct hclge_tqp_map *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); + + req = (struct hclge_tqp_map *)desc.data; + req->tqp_id = cpu_to_le16(tqp_pid); + req->tqp_vf = cpu_to_le16(func_id); + req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | + 1 << HCLGE_TQP_MAP_EN_B; + req->tqp_vid = cpu_to_le16(tqp_vid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_assign_tqp(struct hclge_vport *vport, + struct hnae3_queue **tqp, u16 num_tqps) +{ + struct hclge_dev *hdev = vport->back; + int i, alloced, func_id, ret; + bool is_pf; + + func_id = vport->vport_id; + is_pf = (vport->vport_id == 0) ? true : false; + + for (i = 0, alloced = 0; i < hdev->num_tqps && + alloced < num_tqps; i++) { + if (!hdev->htqp[i].alloced) { + hdev->htqp[i].q.handle = &vport->nic; + hdev->htqp[i].q.tqp_index = alloced; + tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].alloced = true; + ret = hclge_map_tqps_to_func(hdev, func_id, + hdev->htqp[i].index, + alloced, is_pf); + if (ret) + return ret; + + alloced++; + } + } + vport->alloc_tqps = num_tqps; + + return 0; +} + +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + struct hclge_dev *hdev = vport->back; + int i, ret; + + kinfo->num_desc = hdev->num_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); + kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + } + } + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); + return -EINVAL; + } + + return 0; +} + +static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + /* this would be initialized later */ +} + +static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + int ret; + + nic->pdev = hdev->pdev; + nic->ae_algo = &ae_algo; + nic->numa_node_mask = hdev->numa_node_mask; + + if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { + ret = hclge_knic_setup(vport, num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", + ret); + return ret; + } + } else { + hclge_unic_setup(vport, num_tqps); + } + + return 0; +} + +static int hclge_alloc_vport(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_vport *vport; + u32 tqp_main_vport; + u32 tqp_per_vport; + int num_vport, i; + int ret; + + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + + if (hdev->num_tqps < num_vport) + num_vport = hdev->num_tqps; + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; + tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; + + vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), + GFP_KERNEL); + if (!vport) + return -ENOMEM; + + hdev->vport = vport; + hdev->num_alloc_vport = num_vport; + +#ifdef CONFIG_PCI_IOV + /* Enable SRIOV */ + if (hdev->num_req_vfs) { + dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", + hdev->num_req_vfs); + ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); + if (ret) { + hdev->num_alloc_vfs = 0; + dev_err(&pdev->dev, "SRIOV enable failed %d\n", + ret); + return ret; + } + } + hdev->num_alloc_vfs = hdev->num_req_vfs; +#endif + + for (i = 0; i < num_vport; i++) { + vport->back = hdev; + vport->vport_id = i; + + if (i == 0) + ret = hclge_vport_setup(vport, tqp_main_vport); + else + ret = hclge_vport_setup(vport, tqp_per_vport); + if (ret) { + dev_err(&pdev->dev, + "vport setup failed for vport %d, %d\n", + i, ret); + return ret; + } + + vport++; + } + + return 0; +} + +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) +{ +/* TX buffer size is unit by 128 byte */ +#define HCLGE_BUF_SIZE_UNIT_SHIFT 7 +#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hclge_tx_buff_alloc *req; + struct hclge_desc desc; + int ret; + u8 i; + + req = (struct hclge_tx_buff_alloc *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HCLGE_TC_NUM; i++) + req->tx_pkt_buff[i] = + cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | + HCLGE_BUF_SIZE_UPDATE_EN_MSK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size) +{ + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size); + + if (ret) { + dev_err(&hdev->pdev->dev, + "tx buffer alloc failed %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_tc_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) + cnt++; + return cnt; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if ((hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + u32 rx_priv = 0; + int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) +{ + u32 shared_buf_min, shared_buf_tc, shared_std; + int tc_num, pfc_enable_num; + u32 shared_buf; + u32 rx_priv; + int i; + + tc_num = hclge_get_tc_num(hdev); + pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + + if (hnae3_dev_dcb_supported(hdev)) + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + else + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + + shared_buf_tc = pfc_enable_num * hdev->mps + + (tc_num - pfc_enable_num) * hdev->mps / 2 + + hdev->mps; + shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + + rx_priv = hclge_get_rx_priv_buff_alloced(hdev); + if (rx_all <= rx_priv + shared_std) + return false; + + shared_buf = rx_all - rx_priv; + hdev->s_buf.buf_size = shared_buf; + hdev->s_buf.self.high = shared_buf; + hdev->s_buf.self.low = 2 * hdev->mps; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + if ((hdev->hw_tc_map & BIT(i)) && + (hdev->tm_info.hw_pfc_map & BIT(i))) { + hdev->s_buf.tc_thrd[i].low = hdev->mps; + hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps; + } else { + hdev->s_buf.tc_thrd[i].low = 0; + hdev->s_buf.tc_thrd[i].high = hdev->mps; + } + } + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @tx_size: the allocated tx buffer for all TCs + * @return: 0: calculate sucessful, negative: fail + */ +int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) +{ + u32 rx_all = hdev->pkt_buf_size - tx_size; + int no_pfc_priv_num, pfc_priv_num; + struct hclge_priv_buf *priv; + int i; + + /* When DCB is not supported, rx private + * buffer is not allocated. + */ + if (!hnae3_dev_dcb_supported(hdev)) { + if (!hclge_is_rx_buf_ok(hdev, rx_all)) + return -ENOMEM; + + return 0; + } + + /* step 1, try to alloc private buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i)) { + priv->enable = 1; + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = hdev->mps; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = 2 * hdev->mps; + priv->buf_size = priv->wl.high; + } + } else { + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 2, try to decrease the buffer size of + * no pfc TC's private buffer + */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = 128; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = hdev->mps; + priv->buf_size = priv->wl.high; + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 3, try to reduce the number of pfc disabled TCs, + * which have private buffer + */ + /* get the total no pfc enable TC number, which have private buffer */ + no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i))) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + no_pfc_priv_num == 0) + break; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 4, try to reduce the number of pfc enabled TCs + * which have private buffer. + */ + pfc_priv_num = hclge_get_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + pfc_priv_num == 0) + break; + } + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + return -ENOMEM; +} + +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_buff *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hclge_rx_priv_buff *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + + req->buf_num[i] = + cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); + req->buf_num[i] |= + cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); + } + + req->shared_buf = + cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + (1 << HCLGE_TC0_PRI_BUF_EN_B)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private buffer alloc cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) + +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_wl_buf *req; + struct hclge_priv_buf *priv; + struct hclge_desc desc[2]; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j]; + req->tc_wl[j].high = + cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << + HCLGE_RX_PRIV_EN_B); + req->tc_wl[j].low = + cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptor at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private waterline config cmd failed %d\n", + ret); + return ret; + } + return 0; +} + +static int hclge_common_thrd_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *s_buf = &hdev->s_buf; + struct hclge_rx_com_thrd *req; + struct hclge_desc desc[2]; + struct hclge_tc_thrd *tc; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, false); + req = (struct hclge_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; + + req->com_thrd[j].high = + cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << + HCLGE_RX_PRIV_EN_B); + req->com_thrd[j].low = + cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptors at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "common threshold config cmd failed %d\n", ret); + return ret; + } + return 0; +} + +static int hclge_common_wl_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *buf = &hdev->s_buf; + struct hclge_rx_com_wl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hclge_rx_com_wl *)desc.data; + req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); + req->com_wl.high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << + HCLGE_RX_PRIV_EN_B); + + req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); + req->com_wl.low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << + HCLGE_RX_PRIV_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "common waterline config cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +int hclge_buffer_alloc(struct hclge_dev *hdev) +{ + u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF; + int ret; + + hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, + sizeof(struct hclge_priv_buf), + GFP_KERNEL | __GFP_ZERO); + if (!hdev->priv_buf) + return -ENOMEM; + + ret = hclge_tx_buffer_alloc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not alloc tx buffers %d\n", ret); + return ret; + } + + ret = hclge_rx_buffer_calc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc rx priv buffer size for all TCs %d\n", + ret); + return ret; + } + + ret = hclge_rx_priv_buf_alloc(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", + ret); + return ret; + } + + if (hnae3_dev_dcb_supported(hdev)) { + ret = hclge_rx_priv_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", + ret); + return ret; + } + + ret = hclge_common_thrd_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", + ret); + return ret; + } + } + + ret = hclge_common_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common waterline %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_roce_base_info(struct hclge_vport *vport) +{ + struct hnae3_handle *roce = &vport->roce; + struct hnae3_handle *nic = &vport->nic; + + roce->rinfo.num_vectors = vport->back->num_roce_msix; + + if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || + vport->back->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = vport->back->roce_base_vector; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = vport->back->hw.io_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclge_init_msix(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret, i; + + hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!hdev->msix_entries) + return -ENOMEM; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) { + hdev->msix_entries[i].entry = i; + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + } + + hdev->num_msi_left = hdev->num_msi; + hdev->base_msi_vector = hdev->pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, + hdev->num_msi, hdev->num_msi); + if (ret < 0) { + dev_info(&hdev->pdev->dev, + "MSI-X vector alloc failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_msi(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); + if (vectors < 0) { + dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); + return -EINVAL; + } + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + return 0; +} + +static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) + mac->duplex = (u8)duplex; + else + mac->duplex = HCLGE_MAC_FULL; + + mac->speed = speed; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + struct hclge_config_mac_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_config_mac_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); + + hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + + switch (speed) { + case HCLGE_MAC_SPEED_10M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); + break; + case HCLGE_MAC_SPEED_100M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); + break; + case HCLGE_MAC_SPEED_1G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); + break; + case HCLGE_MAC_SPEED_10G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); + break; + case HCLGE_MAC_SPEED_25G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); + break; + case HCLGE_MAC_SPEED_40G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); + break; + case HCLGE_MAC_SPEED_50G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); + break; + case HCLGE_MAC_SPEED_100G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); + break; + default: + dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); + return -EINVAL; + } + + hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config cmd failed %d.\n", ret); + return ret; + } + + hclge_check_speed_dup(hdev, duplex, speed); + + return 0; +} + +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex); +} + +static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, + u8 *duplex) +{ + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int speed_tmp; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/autoneg/duplex query cmd failed %d\n", + ret); + return ret; + } + + *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); + + ret = hclge_parse_speed(speed_tmp, speed); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not parse speed(=%d), %d\n", speed_tmp, ret); + return -EIO; + } + + return 0; +} + +static int hclge_query_autoneg_result(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query cmd failed %d.\n", ret); + return ret; + } + + mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); + + return 0; +} + +static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +{ + struct hclge_config_auto_neg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); + + req = (struct hclge_config_auto_neg *)desc.data; + hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_set_autoneg_en(hdev, enable); +} + +static int hclge_get_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_query_autoneg_result(hdev); + + return hdev->hw.mac.autoneg; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mac speed dup fail ret=%d\n", ret); + return ret; + } + + mac->link = 0; + + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_warn(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + return ret; + } + + /* Initialize the MTA table work mode */ + hdev->accept_mta_mc = true; + hdev->enable_mta = true; + hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; + + ret = hclge_set_mta_filter_mode(hdev, + hdev->mta_mac_sel_type, + hdev->enable_mta); + if (ret) { + dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", + ret); + return ret; + } + + return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); +} + +static void hclge_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && + !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) + (void)schedule_work(&hdev->service_task); +} + +static int hclge_get_mac_link_status(struct hclge_dev *hdev) +{ + struct hclge_link_status *req; + struct hclge_desc desc; + int link_status; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", + ret); + return ret; + } + + req = (struct hclge_link_status *)desc.data; + link_status = req->status & HCLGE_LINK_STATUS; + + return !!link_status; +} + +static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +{ + int mac_state; + int link_stat; + + mac_state = hclge_get_mac_link_status(hdev); + + if (hdev->hw.mac.phydev) { + if (!genphy_read_status(hdev->hw.mac.phydev)) + link_stat = mac_state & + hdev->hw.mac.phydev->link; + else + link_stat = 0; + + } else { + link_stat = mac_state; + } + + return !!link_stat; +} + +static void hclge_update_link_status(struct hclge_dev *hdev) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle; + int state; + int i; + + if (!client) + return; + state = hclge_get_mac_phy_link(hdev); + if (state != hdev->hw.mac.link) { + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + handle = &hdev->vport[i].nic; + client->ops->link_status_change(handle, state); + } + hdev->hw.mac.link = state; + } +} + +static int hclge_update_speed_duplex(struct hclge_dev *hdev) +{ + struct hclge_mac mac = hdev->hw.mac; + u8 duplex; + int speed; + int ret; + + /* get the speed and duplex as autoneg'result from mac cmd when phy + * doesn't exit. + */ + if (mac.phydev) + return 0; + + /* update mac->antoneg. */ + ret = hclge_query_autoneg_result(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query failed %d\n", ret); + return ret; + } + + if (!mac.autoneg) + return 0; + + ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac autoneg/speed/duplex query failed %d\n", ret); + return ret; + } + + if ((mac.speed != speed) || (mac.duplex != duplex)) { + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; + } + } + + return 0; +} + +static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_update_speed_duplex(hdev); +} + +static int hclge_get_status(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_link_status(hdev); + + return hdev->hw.mac.link; +} + +static void hclge_service_timer(unsigned long data) +{ + struct hclge_dev *hdev = (struct hclge_dev *)data; + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + hclge_task_schedule(hdev); +} + +static void hclge_service_complete(struct hclge_dev *hdev) +{ + WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); + + /* Flush memory before next watchdog */ + smp_mb__before_atomic(); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task); + + hclge_update_speed_duplex(hdev); + hclge_update_link_status(hdev); + hclge_update_stats_for_all(hdev); + hclge_service_complete(hdev); +} + +static void hclge_disable_sriov(struct hclge_dev *hdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(hdev->pdev)) { + dev_warn(&hdev->pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(hdev->pdev); +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + int i, j; + + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.io_base + + HCLGE_VECTOR_REG_BASE + + (i - 1) * HCLGE_VECTOR_REG_OFFSET + + vport->vport_id * + HCLGE_VECTOR_VF_OFFSET; + hdev->vector_status[i] = vport->vport_id; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) { + if (hdev->msix_entries) { + if (vector == hdev->msix_entries[i].vector) + return i; + } else { + if (vector == (hdev->base_msi_vector + i)) + return i; + } + } + return -EINVAL; +} + +static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_KEY_SIZE; +} + +static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_IND_TBL_SIZE; +} + +static int hclge_get_rss_algo(struct hclge_dev *hdev) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int rss_hash_algo; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get link status error, status =%d\n", ret); + return ret; + } + + req = (struct hclge_rss_config *)desc.data; + rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); + + if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) + return ETH_RSS_HASH_TOP; + + return -EINVAL; +} + +static int hclge_set_rss_algo_key(struct hclge_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclge_rss_config *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); + req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGE_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) +{ + struct hclge_rss_indirection_table *req; + struct hclge_desc desc; + int i, j; + int ret; + + req = (struct hclge_rss_indirection_table *)desc.data; + + for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { + hclge_cmd_setup_basic_desc + (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); + + req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE; + req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK; + + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) + req->rss_result[j] = + indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss indir table fail,status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, + u16 *tc_size, u16 *tc_offset) +{ + struct hclge_rss_tc_mode *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); + req = (struct hclge_rss_tc_mode *)desc.data; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss tc mode fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) +{ +#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf +#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f + struct hclge_rss_input_tuple *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclge_rss_input_tuple *)desc.data; + req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, + u8 *key, u8 *hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i; + + /* Get hash algorithm */ + if (hfunc) + *hfunc = hclge_get_rss_algo(hdev); + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); + + /* Get indirect table */ + if (indir) + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + indir[i] = vport->rss_indirection_tbl[i]; + + return 0; +} + +static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 hash_algo; + int ret, i; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + /* Update the shadow RSS key with user specified qids */ + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); + + if (hfunc == ETH_RSS_HASH_TOP || + hfunc == ETH_RSS_HASH_NO_CHANGE) + hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + else + return -EINVAL; + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); + if (ret) + return ret; + } + + /* Update the shadow RSS table with user specified qids */ + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + vport->rss_indirection_tbl[i] = indir[i]; + + /* Update the hardware */ + ret = hclge_set_rss_indir_table(hdev, indir); + return ret; +} + +static int hclge_get_tc_size(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rss_size_max; +} + +static int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + struct hclge_vport *vport = hdev->vport; + u16 tc_offset[HCLGE_MAX_TC_NUM]; + u8 rss_key[HCLGE_RSS_KEY_SIZE]; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM]; + u32 *rss_indir = NULL; + u16 rss_size = 0, roundup_size; + const u8 *key; + int i, ret, j; + + rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + /* Get default RSS key */ + netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); + + /* Initialize RSS indirect table for each vport */ + for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { + vport[j].rss_indirection_tbl[i] = + i % vport[j].alloc_rss_size; + + /* vport 0 is for PF */ + if (j != 0) + continue; + + rss_size = vport[j].alloc_rss_size; + rss_indir[i] = vport[j].rss_indirection_tbl[i]; + } + } + ret = hclge_set_rss_indir_table(hdev, rss_indir); + if (ret) + goto err; + + key = rss_key; + ret = hclge_set_rss_algo_key(hdev, hfunc, key); + if (ret) + goto err; + + ret = hclge_set_rss_input_tuple(hdev); + if (ret) + goto err; + + /* Each TC have the same queue size, and tc_size set to hardware is + * the log2 of roundup power of two of rss_size, the acutal queue + * size is limited by indirection table. + */ + if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %d\n", + rss_size); + ret = -EINVAL; + goto err; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; + } + + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + +err: + kfree(rss_indir); + + return ret; +} + +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_ADD_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); +} + +static int hclge_unmap_ring_from_vector( + struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int i, vector_id; + int ret; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_DEL_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param) +{ + struct hclge_promisc_cfg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); + + req = (struct hclge_promisc_cfg *)desc.data; + req->vf_id = param->vf_id; + req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", ret); + return ret; + } + return 0; +} + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id) +{ + if (!param) + return; + + memset(param, 0, sizeof(struct hclge_promisc_param)); + if (en_uc) + param->enable = HCLGE_PROMISC_EN_UC; + if (en_mc) + param->enable |= HCLGE_PROMISC_EN_MC; + if (en_bc) + param->enable |= HCLGE_PROMISC_EN_BC; + param->vf_id = vport_id; +} + +static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_promisc_param param; + + hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_cmd_set_promisc_mode(hdev, ¶m); +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ + struct hclge_desc desc; + struct hclge_config_mac_mode *req = + (struct hclge_config_mac_mode *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); +} + +static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue *req = + (struct hclge_cfg_com_tqp_queue *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGE_TQP_ENABLE_B; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Tqp enable fail, status =%d.\n", ret); + return ret; +} + +static void hclge_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + int i; + + for (i = 0; i < vport->alloc_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id, ret; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* todo clear interrupt */ + /* ring enable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, true); + } + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + ret = hclge_mac_start_phy(hdev); + if (ret) + return ret; + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* Ring disable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, false); + } + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + int return_status = -EIO; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if ((!resp_code) || (resp_code == 1)) { + return_status = 0; + } else if (resp_code == 2) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for uc_overflow.\n"); + } else if (resp_code == 3) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for mc_overflow.\n"); + } else { + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", + op); + } + + return return_status; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ + int word_num; + int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid <= 191) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= ~(1 << bit_num); + else + desc[1].data[word_num] |= (1 << bit_num); + } else { + word_num = (vfid - 192) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= ~(1 << bit_num); + else + desc[2].data[word_num] |= (1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 0; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req, + const u8 *addr) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, + const u8 *addr) +{ + u16 high_val = addr[1] | (addr[0] << 8); + struct hclge_dev *hdev = vport->back; + u32 rsh = 4 - hdev->mta_mac_sel_type; + u16 ret_val = (high_val >> rsh) & 0xfff; + + return ret_val; +} + +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable) +{ + struct hclge_mta_filter_mode *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_mta_filter_mode *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); + + hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mat filter mode failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable) +{ + struct hclge_cfg_func_mta_filter *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_filter *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); + + hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); + req->function_id = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config func_id enable failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_mta_table_item(struct hclge_vport *vport, + u16 idx, + bool enable) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_cfg_func_mta_item *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_item *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); + hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + + hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); + req->item_idx = cpu_to_le16(req->item_idx); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mta table item failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc.data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *desc, + bool is_mc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], + HCLGE_OPC_MAC_VLAN_ADD, + true); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + } else { + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + } + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc[0].data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (desc.data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (mc_desc[0].data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, + mc_desc[0].retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_uc_addr_common(vport, addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", + addr, + is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_SW_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_TYPE_B, 0); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M, + HCLGE_MAC_EPORT_PFID_S, 0); + req.egress_port = cpu_to_le16(req.egress_port); + + hclge_prepare_mac_addr(&req, addr); + + status = hclge_add_mac_vlan_tbl(vport, &req, NULL); + + return status; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_uc_addr_common(vport, addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_remove_mac_vlan_tbl(vport, &req); + + return status; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_mc_addr_common(vport, addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + struct hclge_desc desc[3]; + u16 tbl_idx; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, update VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } else { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + + return status; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_mc_addr_common(vport, addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + struct hclge_desc desc[3]; + u16 tbl_idx; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, true); + + if (hclge_is_all_function_id_zero(desc)) + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + else + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + + } else { + /* This mac addr do not exist, can't delete it */ + dev_err(&hdev->pdev->dev, + "Rm multicast mac addr failed, ret = %d.\n", + status); + return -EIO; + } + + /* Set MTB table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, false); + + return status; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + dev_err(&hdev->pdev->dev, + "Change uc mac err! invalid mac:%p.\n", + new_addr); + return -EINVAL; + } + + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + + if (!hclge_add_uc_addr(handle, new_addr)) { + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + return 0; + } + + return -EIO; +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + bool filter_en) +{ + struct hclge_vlan_filter_ctrl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); + + req = (struct hclge_vlan_filter_ctrl *)desc.data; + req->vlan_type = vlan_type; + req->vlan_fe = filter_en; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto) +{ +#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vlan_filter_vf_cfg *req0; + struct hclge_vlan_filter_vf_cfg *req1; + struct hclge_desc desc[2]; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data; + + req0->vlan_id = vlan; + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + if (!is_kill) { + if (!req0->resp_code || req0->resp_code == 1) + return 0; + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } else { + if (!req0->resp_code) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } + + return -EIO; +} + +static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_filter_pf_cfg *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / 160; + vlan_offset_byte = (vlan_id % 160) / 8; + vlan_offset_byte_val = 1 << (vlan_id % 8); + + req = (struct hclge_vlan_filter_pf_cfg *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", + ret); + return ret; + } + + ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set pf vlan filter config fail, ret =%d.\n", + ret); + return -EIO; + } + + return 0; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ +#define HCLGE_VLAN_TYPE_VF_TABLE 0 +#define HCLGE_VLAN_TYPE_PORT_TABLE 1 + struct hnae3_handle *handle; + int ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, + true); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, + true); + if (ret) + return ret; + + handle = &hdev->vport[0].nic; + return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +} + +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_config_max_frm_size *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) + return -EINVAL; + + hdev->mps = new_mtu; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hclge_config_max_frm_size *)desc.data; + req->max_frm_size = cpu_to_le16(new_mtu); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, + bool enable) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send tqp reset cmd error, status =%d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get reset status error, status =%d\n", ret); + return ret; + } + + return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); +} + +static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int reset_try_times = 0; + int reset_status; + int ret; + + ret = hclge_tqp_enable(hdev, queue_id, 0, false); + if (ret) { + dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return; + } + + reset_try_times = 0; + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + /* Wait for tqp hw reset */ + msleep(20); + reset_status = hclge_get_reset_status(hdev, queue_id); + if (reset_status) + break; + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); + return; + } +} + +static u32 hclge_get_fw_version(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fw_version; +} + +static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, + u32 *rx_en, u32 *tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *auto_neg = hclge_get_autoneg(handle); + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + *rx_en = 0; + *tx_en = 0; + return; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { + *rx_en = 1; + *tx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { + *tx_en = 1; + *rx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { + *rx_en = 1; + *tx_en = 1; + } else { + *rx_en = 0; + *tx_en = 0; + } +} + +static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = hdev->hw.mac.autoneg; +} + +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + +static void hclge_get_mdix_mode(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int mdix_ctrl, mdix, retval, is_resolved; + + if (!phydev) { + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + *tp_mdix = ETH_TP_MDI_INVALID; + return; + } + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); + + retval = phy_read(phydev, HCLGE_PHY_CSC_REG); + mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); + + retval = phy_read(phydev, HCLGE_PHY_CSS_REG); + mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); + + switch (mdix_ctrl) { + case 0x0: + *tp_mdix_ctrl = ETH_TP_MDI; + break; + case 0x1: + *tp_mdix_ctrl = ETH_TP_MDI_X; + break; + case 0x3: + *tp_mdix_ctrl = ETH_TP_MDI_AUTO; + break; + default: + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + break; + } + + if (!is_resolved) + *tp_mdix = ETH_TP_MDI_INVALID; + else if (mdix) + *tp_mdix = ETH_TP_MDI_X; + else + *tp_mdix = ETH_TP_MDI; +} + +static int hclge_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i, ret; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + + hdev->nic_client = client; + vport->nic.client = client; + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + if (hdev->roce_client && + hnae3_dev_roce_supported(hdev)) { + struct hnae3_client *rc = hdev->roce_client; + + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = rc->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + + break; + case HNAE3_CLIENT_UNIC: + hdev->nic_client = client; + vport->nic.client = client; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + vport->roce.client = client; + } + + if (hdev->roce_client) { + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = client->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + } + } + + return 0; +err: + return ret; +} + +static void hclge_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + if (hdev->roce_client) + hdev->roce_client->ops->uninit_instance(&vport->roce, + 0); + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (client->ops->uninit_instance) + client->ops->uninit_instance(&vport->nic, 0); + } +} + +static int hclge_pci_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + goto err_no_drvdata; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "can't set consistent PCI DMA"); + goto err_disable_device; + } + dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); + } + + ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->back = hdev; + hw->io_base = pcim_iomap(pdev, 2, 0); + if (!hw->io_base) { + dev_err(&pdev->dev, "Can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + return 0; +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_no_drvdata: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +static void hclge_pci_uninit(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) { + pci_disable_msix(pdev); + devm_kfree(&pdev->dev, hdev->msix_entries); + hdev->msix_entries = NULL; + } else { + pci_disable_msi(pdev); + } + + pci_clear_master(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclge_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) { + ret = -ENOMEM; + goto err_hclge_dev; + } + + hdev->flag |= HCLGE_FLAG_USE_MSIX; + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + ret = hclge_pci_init(hdev); + if (ret) { + dev_err(&pdev->dev, "PCI init failed\n"); + goto err_pci_init; + } + + /* Command queue initialize */ + ret = hclge_cmd_init(hdev); + if (ret) + goto err_cmd_init; + + ret = hclge_get_cap(hdev); + if (ret) { + dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", + ret); + return ret; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + return ret; + } + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) + ret = hclge_init_msix(hdev); + else + ret = hclge_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_vport(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + ret = hclge_buffer_alloc(hdev); + if (ret) { + dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + setup_timer(&hdev->service_timer, hclge_service_timer, + (unsigned long)hdev); + INIT_WORK(&hdev->service_task, hclge_service_task); + + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); + return 0; + +err_cmd_init: + pci_release_regions(pdev); +err_pci_init: + pci_set_drvdata(pdev, NULL); +err_hclge_dev: + return ret; +} + +static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_mac *mac = &hdev->hw.mac; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (IS_ENABLED(CONFIG_PCI_IOV)) + hclge_disable_sriov(hdev); + + if (hdev->service_timer.data) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + + if (mac->phydev) + mdiobus_unregister(mac->mdio_bus); + + hclge_destroy_cmd_queue(&hdev->hw); + hclge_pci_uninit(hdev); + ae_dev->priv = NULL; +} + +static const struct hnae3_ae_ops hclge_ops = { + .init_ae_dev = hclge_init_ae_dev, + .uninit_ae_dev = hclge_uninit_ae_dev, + .init_client_instance = hclge_init_client_instance, + .uninit_client_instance = hclge_uninit_client_instance, + .map_ring_to_vector = hclge_map_handle_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_from_vector, + .get_vector = hclge_get_vector, + .set_promisc_mode = hclge_set_promisc_mode, + .start = hclge_ae_start, + .stop = hclge_ae_stop, + .get_status = hclge_get_status, + .get_ksettings_an_result = hclge_get_ksettings_an_result, + .update_speed_duplex_h = hclge_update_speed_duplex_h, + .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, + .get_media_type = hclge_get_media_type, + .get_rss_key_size = hclge_get_rss_key_size, + .get_rss_indir_size = hclge_get_rss_indir_size, + .get_rss = hclge_get_rss, + .set_rss = hclge_set_rss, + .get_tc_size = hclge_get_tc_size, + .get_mac_addr = hclge_get_mac_addr, + .set_mac_addr = hclge_set_mac_addr, + .add_uc_addr = hclge_add_uc_addr, + .rm_uc_addr = hclge_rm_uc_addr, + .add_mc_addr = hclge_add_mc_addr, + .rm_mc_addr = hclge_rm_mc_addr, + .set_autoneg = hclge_set_autoneg, + .get_autoneg = hclge_get_autoneg, + .get_pauseparam = hclge_get_pauseparam, + .set_mtu = hclge_set_mtu, + .reset_queue = hclge_reset_tqp, + .get_stats = hclge_get_stats, + .update_stats = hclge_update_stats, + .get_strings = hclge_get_strings, + .get_sset_count = hclge_get_sset_count, + .get_fw_version = hclge_get_fw_version, + .get_mdix_mode = hclge_get_mdix_mode, + .set_vlan_filter = hclge_set_port_vlan_filter, + .set_vf_vlan_filter = hclge_set_vf_vlan_filter, +}; + +static struct hnae3_ae_algo ae_algo = { + .ops = &hclge_ops, + .name = HCLGE_NAME, + .pdev_id_table = ae_algo_pci_tbl, +}; + +static int hclge_init(void) +{ + pr_info("%s is initializing\n", HCLGE_NAME); + + return hnae3_register_ae_algo(&ae_algo); +} + +static void hclge_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algo); +} +module_init(hclge_init); +module_exit(hclge_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGE Driver"); +MODULE_VERSION(HCLGE_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h new file mode 100644 index 000000000000..9fcfd9395424 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MAIN_H +#define __HCLGE_MAIN_H +#include +#include +#include +#include "hclge_cmd.h" +#include "hnae3.h" + +#define HCLGE_MOD_VERSION "v1.0" +#define HCLGE_DRIVER_NAME "hclge" + +#define HCLGE_INVALID_VPORT 0xffff + +#define HCLGE_ROCE_VECTOR_OFFSET 96 + +#define HCLGE_PF_CFG_BLOCK_SIZE 32 +#define HCLGE_PF_CFG_DESC_NUM \ + (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) + +#define HCLGE_VECTOR_REG_BASE 0x20000 + +#define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_VF_OFFSET 0x100000 + +#define HCLGE_RSS_IND_TBL_SIZE 512 +#define HCLGE_RSS_SET_BITMAP_MSK 0xffff +#define HCLGE_RSS_KEY_SIZE 40 +#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGE_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 +#define HCLGE_RSS_HASH_ALGO_MASK 0xf +#define HCLGE_RSS_CFG_TBL_NUM \ + (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) + +#define HCLGE_RSS_TC_SIZE_0 1 +#define HCLGE_RSS_TC_SIZE_1 2 +#define HCLGE_RSS_TC_SIZE_2 4 +#define HCLGE_RSS_TC_SIZE_3 8 +#define HCLGE_RSS_TC_SIZE_4 16 +#define HCLGE_RSS_TC_SIZE_5 32 +#define HCLGE_RSS_TC_SIZE_6 64 +#define HCLGE_RSS_TC_SIZE_7 128 + +#define HCLGE_TQP_RESET_TRY_TIMES 10 + +#define HCLGE_PHY_PAGE_MDIX 0 +#define HCLGE_PHY_PAGE_COPPER 0 + +/* Page Selection Reg. */ +#define HCLGE_PHY_PAGE_REG 22 + +/* Copper Specific Control Register */ +#define HCLGE_PHY_CSC_REG 16 + +/* Copper Specific Status Register */ +#define HCLGE_PHY_CSS_REG 17 + +#define HCLGE_PHY_MDIX_CTRL_S (5) +#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S) + +#define HCLGE_PHY_MDIX_STATUS_B (6) +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) + +enum HCLGE_DEV_STATE { + HCLGE_STATE_REINITING, + HCLGE_STATE_DOWN, + HCLGE_STATE_DISABLED, + HCLGE_STATE_REMOVING, + HCLGE_STATE_SERVICE_INITED, + HCLGE_STATE_SERVICE_SCHED, + HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_MBX_IRQ, + HCLGE_STATE_MAX +}; + +#define HCLGE_MPF_ENBALE 1 +struct hclge_caps { + u16 num_tqp; + u16 num_buffer_cell; + u32 flag; + u16 vmdq; +}; + +enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ + HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ + HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ + HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ + HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ + HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ + HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ + HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ +}; + +enum HCLGE_MAC_DUPLEX { + HCLGE_MAC_HALF, + HCLGE_MAC_FULL +}; + +enum hclge_mta_dmac_sel_type { + HCLGE_MAC_ADDR_47_36, + HCLGE_MAC_ADDR_46_35, + HCLGE_MAC_ADDR_45_34, + HCLGE_MAC_ADDR_44_33, +}; + +struct hclge_mac { + u8 phy_addr; + u8 flag; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 autoneg; + u8 duplex; + u32 speed; + int link; /* store the link status of mac & phy (if phy exit)*/ + struct phy_device *phydev; + struct mii_bus *mdio_bus; + phy_interface_t phy_if; +}; + +struct hclge_hw { + void __iomem *io_base; + struct hclge_mac mac; + int num_vec; + struct hclge_cmq cmq; + struct hclge_caps caps; + void *back; +}; + +/* TQP stats */ +struct hlcge_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclge_tqp { + struct device *dev; /* Device for DMA mapping */ + struct hnae3_queue q; + struct hlcge_tqp_stats tqp_stats; + u16 index; /* Global index in a NIC controller */ + + bool alloced; +}; + +enum hclge_fc_mode { + HCLGE_FC_NONE, + HCLGE_FC_RX_PAUSE, + HCLGE_FC_TX_PAUSE, + HCLGE_FC_FULL, + HCLGE_FC_PFC, + HCLGE_FC_DEFAULT +}; + +#define HCLGE_PG_NUM 4 +#define HCLGE_SCH_MODE_SP 0 +#define HCLGE_SCH_MODE_DWRR 1 +struct hclge_pg_info { + u8 pg_id; + u8 pg_sch_mode; /* 0: sp; 1: dwrr */ + u8 tc_bit_map; + u32 bw_limit; + u8 tc_dwrr[HNAE3_MAX_TC]; +}; + +struct hclge_tc_info { + u8 tc_id; + u8 tc_sch_mode; /* 0: sp; 1: dwrr */ + u8 pgid; + u32 bw_limit; +}; + +struct hclge_cfg { + u8 vmdq_vport_num; + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 default_speed; + u32 numa_node_map; +}; + +struct hclge_tm_info { + u8 num_tc; + u8 num_pg; /* It must be 1 if vNET-Base schd */ + u8 pg_dwrr[HCLGE_PG_NUM]; + u8 prio_tc[HNAE3_MAX_USER_PRIO]; + struct hclge_pg_info pg_info[HCLGE_PG_NUM]; + struct hclge_tc_info tc_info[HNAE3_MAX_TC]; + enum hclge_fc_mode fc_mode; + u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ +}; + +struct hclge_comm_stats_str { + char desc[ETH_GSTRING_LEN]; + unsigned long offset; +}; + +/* all 64bit stats, opcode id: 0x0030 */ +struct hclge_64_bit_stats { + /* query_igu_stat */ + u64 igu_rx_oversize_pkt; + u64 igu_rx_undersize_pkt; + u64 igu_rx_out_all_pkt; + u64 igu_rx_uni_pkt; + u64 igu_rx_multi_pkt; + u64 igu_rx_broad_pkt; + u64 rsv0; + + /* query_egu_stat */ + u64 egu_tx_out_all_pkt; + u64 egu_tx_uni_pkt; + u64 egu_tx_multi_pkt; + u64 egu_tx_broad_pkt; + + /* ssu_ppp packet stats */ + u64 ssu_ppp_mac_key_num; + u64 ssu_ppp_host_key_num; + u64 ppp_ssu_mac_rlt_num; + u64 ppp_ssu_host_rlt_num; + + /* ssu_tx_in_out_dfx_stats */ + u64 ssu_tx_in_num; + u64 ssu_tx_out_num; + /* ssu_rx_in_out_dfx_stats */ + u64 ssu_rx_in_num; + u64 ssu_rx_out_num; +}; + +/* all 32bit stats, opcode id: 0x0031 */ +struct hclge_32_bit_stats { + u64 igu_rx_err_pkt; + u64 igu_rx_no_eof_pkt; + u64 igu_rx_no_sof_pkt; + u64 egu_tx_1588_pkt; + u64 egu_tx_err_pkt; + u64 ssu_full_drop_num; + u64 ssu_part_drop_num; + u64 ppp_key_drop_num; + u64 ppp_rlt_drop_num; + u64 ssu_key_drop_num; + u64 pkt_curr_buf_cnt; + u64 qcn_fb_rcv_cnt; + u64 qcn_fb_drop_cnt; + u64 qcn_fb_invaild_cnt; + u64 rsv0; + u64 rx_packet_tc0_in_cnt; + u64 rx_packet_tc1_in_cnt; + u64 rx_packet_tc2_in_cnt; + u64 rx_packet_tc3_in_cnt; + u64 rx_packet_tc4_in_cnt; + u64 rx_packet_tc5_in_cnt; + u64 rx_packet_tc6_in_cnt; + u64 rx_packet_tc7_in_cnt; + u64 rx_packet_tc0_out_cnt; + u64 rx_packet_tc1_out_cnt; + u64 rx_packet_tc2_out_cnt; + u64 rx_packet_tc3_out_cnt; + u64 rx_packet_tc4_out_cnt; + u64 rx_packet_tc5_out_cnt; + u64 rx_packet_tc6_out_cnt; + u64 rx_packet_tc7_out_cnt; + + /* Tx packet level statistics */ + u64 tx_packet_tc0_in_cnt; + u64 tx_packet_tc1_in_cnt; + u64 tx_packet_tc2_in_cnt; + u64 tx_packet_tc3_in_cnt; + u64 tx_packet_tc4_in_cnt; + u64 tx_packet_tc5_in_cnt; + u64 tx_packet_tc6_in_cnt; + u64 tx_packet_tc7_in_cnt; + u64 tx_packet_tc0_out_cnt; + u64 tx_packet_tc1_out_cnt; + u64 tx_packet_tc2_out_cnt; + u64 tx_packet_tc3_out_cnt; + u64 tx_packet_tc4_out_cnt; + u64 tx_packet_tc5_out_cnt; + u64 tx_packet_tc6_out_cnt; + u64 tx_packet_tc7_out_cnt; + + /* packet buffer statistics */ + u64 pkt_curr_buf_tc0_cnt; + u64 pkt_curr_buf_tc1_cnt; + u64 pkt_curr_buf_tc2_cnt; + u64 pkt_curr_buf_tc3_cnt; + u64 pkt_curr_buf_tc4_cnt; + u64 pkt_curr_buf_tc5_cnt; + u64 pkt_curr_buf_tc6_cnt; + u64 pkt_curr_buf_tc7_cnt; + + u64 mb_uncopy_num; + u64 lo_pri_unicast_rlt_drop_num; + u64 hi_pri_multicast_rlt_drop_num; + u64 lo_pri_multicast_rlt_drop_num; + u64 rx_oq_drop_pkt_cnt; + u64 tx_oq_drop_pkt_cnt; + u64 nic_l2_err_drop_pkt_cnt; + u64 roc_l2_err_drop_pkt_cnt; +}; + +/* mac stats ,opcode id: 0x0032 */ +struct hclge_mac_stats { + u64 mac_tx_mac_pause_num; + u64 mac_rx_mac_pause_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_overrsize_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_max_oct_pkt_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_overrsize_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_max_oct_pkt_num; + + u64 mac_trans_fragment_pkt_num; + u64 mac_trans_undermin_pkt_num; + u64 mac_trans_jabber_pkt_num; + u64 mac_trans_err_all_pkt_num; + u64 mac_trans_from_app_good_pkt_num; + u64 mac_trans_from_app_bad_pkt_num; + u64 mac_rcv_fragment_pkt_num; + u64 mac_rcv_undermin_pkt_num; + u64 mac_rcv_jabber_pkt_num; + u64 mac_rcv_fcs_err_pkt_num; + u64 mac_rcv_send_app_good_pkt_num; + u64 mac_rcv_send_app_bad_pkt_num; +}; + +struct hclge_hw_stats { + struct hclge_mac_stats mac_stats; + struct hclge_64_bit_stats all_64_bit_stats; + struct hclge_32_bit_stats all_32_bit_stats; +}; + +struct hclge_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclge_hw hw; + struct hclge_hw_stats hw_stats; + unsigned long state; + + u32 fw_version; + u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ + u16 num_tqps; /* Num task queue pairs of this PF */ + u16 num_req_vfs; /* Num VFs requested for this PF */ + + u16 num_roce_msix; /* Num of roce vectors for this PF */ + int roce_base_vector; + + /* Base task tqp physical id of this PF */ + u16 base_tqp_pid; + u16 alloc_rss_size; /* Allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + /* Num of guaranteed filters for this PF */ + u16 fdir_pf_filter_count; + u16 num_alloc_vport; /* Num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_desc; + u8 hw_tc_map; + u8 tc_num_last_time; + enum hclge_fc_mode fc_mode_last_time; + +#define HCLGE_FLAG_TC_BASE_SCH_MODE 1 +#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 + u8 tx_sch_mode; + + u8 default_up; + struct hclge_tm_info tm_info; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u32 base_msi_vector; + struct msix_entry *msix_entries; + u16 *vector_status; + + u16 pending_udp_bitmap; + + u16 rx_itr_default; + u16 tx_itr_default; + + u16 adminq_work_limit; /* Num of admin receive queue desc to process */ + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list service_timer; + struct work_struct service_task; + + bool cur_promisc; + int num_alloc_vfs; /* Actual number of VFs allocated */ + + struct hclge_tqp *htqp; + struct hclge_vport *vport; + + struct dentry *hclge_dbgfs; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + +#define HCLGE_FLAG_USE_MSI 0x00000001 +#define HCLGE_FLAG_USE_MSIX 0x00000002 +#define HCLGE_FLAG_MAIN 0x00000004 +#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 +#define HCLGE_FLAG_DCB_ENABLE 0x00000010 + u32 flag; + + u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 mps; /* Max packet size */ + struct hclge_priv_buf *priv_buf; + struct hclge_shared_buf s_buf; + + enum hclge_mta_dmac_sel_type mta_mac_sel_type; + bool enable_mta; /* Mutilcast filter enable */ + bool accept_mta_mc; /* Whether accept mta filter multicast */ +}; + +struct hclge_vport { + u16 alloc_tqps; /* Allocated Tx/Rx queues */ + + u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ + /* User configured lookup table entries */ + u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + u16 alloc_rss_size; + + u16 qs_offset; + u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 dwrr; + + int vport_id; + struct hclge_dev *back; /* Back reference to associated dev */ + struct hnae3_handle nic; + struct hnae3_handle roce; +}; + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id); + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable); +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, + struct hnae3_ring_chain_node *ring_chain); +static inline int hclge_get_queue_id(struct hnae3_queue *queue) +{ + struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); + + return tqp->index; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); +int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c new file mode 100644 index 000000000000..f32d719c4f77 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" + +enum hclge_mdio_c22_op_seq { + HCLGE_MDIO_C22_WRITE = 1, + HCLGE_MDIO_C22_READ = 2 +}; + +#define HCLGE_MDIO_CTRL_START_B 0 +#define HCLGE_MDIO_CTRL_ST_S 1 +#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S) +#define HCLGE_MDIO_CTRL_OP_S 3 +#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S) + +#define HCLGE_MDIO_PHYID_S 0 +#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S) + +#define HCLGE_MDIO_PHYREG_S 0 +#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S) + +#define HCLGE_MDIO_STA_B 0 + +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + +static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, + u16 data) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + + mdio_cmd->data_wr = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio write fail when sending cmd, status is %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + + /* Read out phy data */ + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio read fail when get data, status is %d.\n", + ret); + return ret; + } + + if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + dev_err(&hdev->pdev->dev, "mdio read data error\n"); + return -EIO; + } + + return le16_to_cpu(mdio_cmd->data_rd); +} + +int hclge_mac_mdio_config(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) + return 0; + + mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "hisilicon MII bus"; + mdio_bus->read = hclge_mdio_read; + mdio_bus->write = hclge_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", + dev_name(&hdev->pdev->dev)); + + mdio_bus->parent = &hdev->pdev->dev; + mdio_bus->priv = hdev; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + ret = mdiobus_register(mdio_bus); + if (ret) { + dev_err(mdio_bus->parent, + "Failed to register MDIO bus ret = %#x\n", ret); + return ret; + } + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev) { + dev_err(mdio_bus->parent, "Failed to get phy device\n"); + mdiobus_unregister(mdio_bus); + return -EIO; + } + + mac->phydev = phydev; + mac->mdio_bus = mdio_bus; + + return 0; +} + +static void hclge_mac_adjust_link(struct net_device *netdev) +{ + struct hnae3_handle *h = *((void **)netdev_priv(netdev)); + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int duplex, speed; + int ret; + + speed = netdev->phydev->speed; + duplex = netdev->phydev->duplex; + + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); +} + +int hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) + return 0; + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + phy_start(phydev); + + return 0; +} + +void hclge_mac_stop_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return; + + phy_stop(phydev); + phy_disconnect(phydev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h new file mode 100644 index 000000000000..c5e91cfb8f2c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MDIO_H +#define __HCLGE_MDIO_H + +int hclge_mac_mdio_config(struct hclge_dev *hdev); +int hclge_mac_start_phy(struct hclge_dev *hdev); +void hclge_mac_stop_phy(struct hclge_dev *hdev); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c new file mode 100644 index 000000000000..73a75d7cc551 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -0,0 +1,1022 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" + +enum hclge_shaper_level { + HCLGE_SHAPER_LVL_PRI = 0, + HCLGE_SHAPER_LVL_PG = 1, + HCLGE_SHAPER_LVL_PORT = 2, + HCLGE_SHAPER_LVL_QSET = 3, + HCLGE_SHAPER_LVL_CNT = 4, + HCLGE_SHAPER_LVL_VF = 0, + HCLGE_SHAPER_LVL_PF = 1, +}; + +#define HCLGE_SHAPER_BS_U_DEF 1 +#define HCLGE_SHAPER_BS_S_DEF 4 + +#define HCLGE_ETHER_MAX_RATE 100000 + +/* hclge_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @ir_b: IR_B parameter of IR shaper + * @ir_u: IR_U parameter of IR shaper + * @ir_s: IR_S parameter of IR shaper + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, + u8 *ir_b, u8 *ir_u, u8 *ir_s) +{ + const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + u8 ir_u_calc = 0, ir_s_calc = 0; + u32 ir_calc; + u32 tick; + + /* Calc tick */ + if (shaper_level >= HCLGE_SHAPER_LVL_CNT) + return -EINVAL; + + tick = tick_array[shaper_level]; + + /** + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (1008000 + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + *ir_b = 126; + *ir_u = 0; + *ir_s = 0; + + return 0; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + while (ir_calc > ir) { + ir_s_calc++; + ir_calc = 1008000 / (tick * (1 << ir_s_calc)); + } + + if (ir_calc == ir) + *ir_b = 126; + else + *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; + } else { + /* Increasing the numerator to select ir_u value */ + u32 numerator; + + while (ir_calc < ir) { + ir_u_calc++; + numerator = 1008000 * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } + + if (ir_calc == ir) { + *ir_b = 126; + } else { + u32 denominator = (8000 * (1 << --ir_u_calc)); + *ir_b = (ir * tick + (denominator >> 1)) / denominator; + } + } + + *ir_u = ir_u_calc; + *ir_s = ir_s_calc; + + return 0; +} + +static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) +{ + u8 tc; + + tc = hdev->tm_info.prio_tc[pri_id]; + + if (tc >= hdev->tm_info.num_tc) + return -EINVAL; + + /** + * the register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); + + return 0; +} + +static int hclge_up_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u8 *pri = (u8 *)desc.data; + u8 pri_id; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { + ret = hclge_fill_pri_array(hdev, pri, pri_id); + if (ret) + return ret; + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, + u8 pg_id, u8 pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, + u16 qs_id, u8 pri) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, + u8 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = cpu_to_le16(q_id); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, + u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + + weight = (struct hclge_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, + u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hclge_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, + u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + + weight = (struct hclge_qs_weight_cmd *)desc.data; + + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pg_id, + u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pri_id, + u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : + HCLGE_OPC_TM_PRI_C_SHAPPING; + + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pg_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pri_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(qs_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + + /* Qset and tc is one by one mapping */ + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + + kinfo = &vport->nic.kinfo; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + kinfo->num_tc = + min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, + kinfo->num_tqps / kinfo->num_tc); + vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + vport->dwrr = 100; /* 100 percent as init */ + vport->alloc_rss_size = kinfo->rss_size; + + for (i = 0; i < kinfo->num_tc; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + } + } + + memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, + FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); +} + +static void hclge_tm_vport_info_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_tm_vport_tc_info_update(vport); + + vport++; + } +} + +static void hclge_tm_tc_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + hdev->tm_info.tc_info[i].tc_id = i; + hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].pgid = 0; + hdev->tm_info.tc_info[i].bw_limit = + hdev->tm_info.pg_info[0].bw_limit; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = + (i >= hdev->tm_info.num_tc) ? 0 : i; + + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; +} + +static void hclge_tm_pg_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + int k; + + hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; + + hdev->tm_info.pg_info[i].pg_id = i; + hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; + + hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; + + if (i != 0) + continue; + + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; + for (k = 0; k < hdev->tm_info.num_tc; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; + } +} + +static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +{ + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tm_info.num_pg != 1)) + return -EINVAL; + + hclge_tm_pg_info_init(hdev); + + hclge_tm_tc_info_init(hdev); + + hclge_tm_vport_info_update(hdev); + + hdev->tm_info.fc_mode = HCLGE_FC_NONE; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + + return 0; +} + +static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg mapping */ + ret = hclge_tm_pg_to_pri_map_cfg( + hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + /* Cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* Pg to pri */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Calc shaper para */ + ret = hclge_shaper_para_calc( + hdev->tm_info.pg_info[i].bw_limit, + HCLGE_SHAPER_LVL_PG, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + /* cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* pg to prio */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hclge_tm_pg_weight_cfg(hdev, i, + hdev->tm_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_queue **tqp = kinfo->tqp; + struct hnae3_tc_info *v_tc_info; + u32 i, j; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + for (j = 0; j < v_tc_info->tqp_count; j++) { + struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + + ret = hclge_tm_q_to_qs_map_cfg(hdev, + hclge_get_queue_id(q), + vport->qs_offset + i); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + /* Cfg qs -> pri mapping, one by one mapping */ + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i); + if (ret) + return ret; + } + } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { + int k; + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg( + hdev, vport[k].qs_offset + i, k); + if (ret) + return ret; + } + } else { + return -EINVAL; + } + + /* Cfg q -> qs mapping */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_PRI, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + u8 ir_u, ir_b, ir_s; + int ret; + + ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, + vport->vport_id, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, + vport->vport_id, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info *v_tc_info; + u8 ir_u, ir_b, ir_s; + u32 i; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_QSET, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + /* Need config vport shaper */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); + if (ret) + return ret; + + ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_pg_info *pg_info; + u8 dwrr; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + + ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + /* Vf dwrr */ + ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); + if (ret) + return ret; + + /* Qset dwrr */ + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport->qs_offset + i, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_map_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_to_pri_map(hdev); + if (ret) + return ret; + + return hclge_tm_pri_q_qs_cfg(hdev); +} + +static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_shaper_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_shaper_cfg(hdev); +} + +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_dwrr_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_dwrr_cfg(hdev); +} + +static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) +{ + int ret; + u8 i; + + /* Only being config on TC-Based scheduler mode */ + if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + ret = hclge_tm_pg_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); + if (ret) + return ret; + + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_pri_schd_mode_cfg(hdev, i); + if (ret) + return ret; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + } else { + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_schd_mode_vnet_base_cfg(vport); + if (ret) + return ret; + + vport++; + } + } + + return 0; +} + +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_lvl2_schd_mode_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_lvl34_schd_mode_cfg(hdev); +} + +static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +{ + int ret; + + /* Cfg tm mapping */ + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + /* Cfg tm shaper */ + ret = hclge_tm_shaper_cfg(hdev); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hclge_tm_dwrr_cfg(hdev); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hclge_tm_schd_mode_hw(hdev); +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev) +{ + bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC; + int ret; + u8 i; + + ret = hclge_mac_pause_en_cfg(hdev, en, en); + if (ret) + return ret; + + /* Only DCB-supported dev supports qset back pressure setting */ + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_bp_cfg(hdev, i); + if (ret) + return ret; + } + + return hclge_up_to_tc_map(hdev); +} + +int hclge_tm_init_hw(struct hclge_dev *hdev) +{ + int ret; + + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) + return -ENOTSUPP; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev); + if (ret) + return ret; + + return 0; +} + +int hclge_tm_schd_init(struct hclge_dev *hdev) +{ + int ret = hclge_tm_schd_info_init(hdev); + + if (ret) + return ret; + + return hclge_tm_init_hw(hdev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h new file mode 100644 index 000000000000..85158b0d73fe --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_TM_H +#define __HCLGE_TM_H + +#include + +/* MAC Pause */ +#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) + +/* SP or DWRR */ +#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) +#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) + +struct hclge_pg_to_pri_link_cmd { + u8 pg_id; + u8 rsvd1[3]; + u8 pri_bit_map; +}; + +struct hclge_qs_to_pri_link_cmd { + __le16 qs_id; + __le16 rsvd; + u8 priority; +#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0) + u8 link_vld; +}; + +struct hclge_nq_to_qs_link_cmd { + __le16 nq_id; + __le16 rsvd; +#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) + __le16 qset_id; +}; + +struct hclge_pg_weight_cmd { + u8 pg_id; + u8 dwrr; +}; + +struct hclge_priority_weight_cmd { + u8 pri_id; + u8 dwrr; +}; + +struct hclge_qs_weight_cmd { + __le16 qs_id; + u8 dwrr; +}; + +#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) +#define HCLGE_TM_SHAP_IR_B_LSH 0 +#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) +#define HCLGE_TM_SHAP_IR_U_LSH 8 +#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12) +#define HCLGE_TM_SHAP_IR_S_LSH 12 +#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16) +#define HCLGE_TM_SHAP_BS_B_LSH 16 +#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21) +#define HCLGE_TM_SHAP_BS_S_LSH 21 + +enum hclge_shap_bucket { + HCLGE_TM_SHAP_C_BUCKET = 0, + HCLGE_TM_SHAP_P_BUCKET, +}; + +struct hclge_pri_shapping_cmd { + u8 pri_id; + u8 rsvd[3]; + __le32 pri_shapping_para; +}; + +struct hclge_pg_shapping_cmd { + u8 pg_id; + u8 rsvd[3]; + __le32 pg_shapping_para; +}; + +struct hclge_bp_to_qs_map_cmd { + u8 tc_id; + u8 rsvd[2]; + u8 qs_group_id; + __le32 qs_bit_map; + u32 rsvd1; +}; + +#define hclge_tm_set_field(dest, string, val) \ + hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) +#define hclge_tm_get_field(src, string) \ + hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH)) + +int hclge_tm_schd_init(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c new file mode 100644 index 000000000000..35369e1c8036 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -0,0 +1,2898 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" + +const char hns3_driver_name[] = "hns3"; +const char hns3_driver_version[] = VERMAGIC_STRING; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +static irqreturn_t hns3_irq_handle(int irq, void *dev) +{ + struct hns3_enet_tqp_vector *tqp_vector = dev; + + napi_schedule(&tqp_vector->napi); + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "TxRx", + txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Rx", + rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Tx", + tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, + tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + return ret; + } + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); +} + +static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + /* this defines the configuration for GL (Interrupt Gap Limiter) + * GL defines inter interrupt gap. + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); +} + +static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + */ + + /* Default :enable interrupt coalesce */ + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; + hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); + /* for now we are disabling Interrupt RL - we + * will re-enable later + */ + hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + /* get irq resource for all vectors */ + ret = hns3_nic_init_irq(priv); + if (ret) { + netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + return ret; + } + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) + goto out_start_err; + + return 0; + +out_start_err: + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + + hns3_nic_uninit_irq(priv); + + return ret; +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + netif_carrier_off(netdev); + + ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* free irq resources */ + hns3_nic_uninit_irq(priv); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +void hns3_set_multicast_list(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct netdev_hw_addr *ha = NULL; + + if (h->ae_algo->ops->set_mc_addr) { + netdev_for_each_mc_addr(ha, netdev) + if (h->ae_algo->ops->set_mc_addr(h, ha->addr)) + netdev_err(netdev, "set multicast fail\n"); + } +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_mc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_mc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + if (netdev->flags & IFF_PROMISC) + h->ae_algo->ops->set_promisc_mode(h, 1); + else + h->ae_algo->ops->set_promisc_mode(h, 0); + } + if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + netdev_err(netdev, "sync uc address fail\n"); + if (netdev->flags & IFF_MULTICAST) + if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + netdev_err(netdev, "sync mc address fail\n"); +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, + u16 *mss, u32 *type_cs_vlan_tso) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet.*/ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if ((!(skb_shinfo(skb)->gso_type & + SKB_GSO_PARTIAL)) && + (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* Software should clear the udp's checksum + * field when tso is needed. + */ + l4.udp->check = 0; + } + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet*/ + l4_offset = l4.hdr - skb->data; + hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner pseudo checksum when tso*/ + l4_paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + + /* find the txbd field values */ + *paylen = skb->len - hdr_len; + hnae_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + return 0; +} + +static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } else { + return -EINVAL; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return 0; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; + + return 0; +} + +static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + unsigned char *l2_hdr; + u8 l4_proto = ol4_proto; + u32 ol2_len; + u32 ol3_len; + u32 ol4_len; + u32 l2_len; + u32 l3_len; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute L2 header size for normal packet, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* tunnel packet*/ + if (skb->encapsulation) { + /* compute OL2 header size, defined in 2 Bytes */ + ol2_len = l2_len; + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + ol3_len = l4.hdr - l3.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); + + /* MAC in UDP, MAC in GRE (0x6558)*/ + if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { + /* switch MAC header ptr from outer to inner header.*/ + l2_hdr = skb_inner_mac_header(skb); + + /* compute OL4 header size, defined in 4 Bytes. */ + ol4_len = l2_hdr - l4.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, ol4_len >> 2); + + /* switch IP header ptr from outer to inner header */ + l3.hdr = skb_inner_network_header(skb); + + /* compute inner l2 header size, defined in 2 Bytes. */ + l2_len = l3.hdr - l2_hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + } else { + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + + l4_proto = il4_proto; + } + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + break; + default: + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } +} + +static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + u32 l4_proto = ol4_proto; + + l3.hdr = skb_network_header(skb); + + /* define OL3 type and tunnel type(OL4).*/ + if (skb->encapsulation) { + /* define outer network header type.*/ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + } + + /* define tunnel type(OL4).*/ + switch (l4_proto) { + case IPPROTO_UDP: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + break; + case IPPROTO_GRE: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + l3.hdr = skb_inner_network_header(skb); + l4_proto = il4_proto; + } + + if (l3.v4->version == 4) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } else if (l3.v6->version == 6) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } + + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + return 0; +} + +static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +{ + /* Config bd buffer end */ + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_M, 0); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + u32 ol_type_vlan_len_msec = 0; + u16 bdtp_fe_sc_vld_ra_ri = 0; + u32 type_cs_vlan_tso = 0; + struct sk_buff *skb; + u32 paylen = 0; + u16 mss = 0; + __be16 protocol; + u8 ol4_proto; + u8 il4_proto; + int ret; + + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + paylen = cpu_to_le16(skb->len); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + + /* vlan packet*/ + if (protocol == htons(ETH_P_8021Q)) { + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (ret) + return ret; + hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (ret) + return ret; + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = + cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le16(paylen); + desc->tx.mss = cpu_to_le16(mss); + } + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + return 0; +} + +static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + unsigned int frag_buf_num; + unsigned int k; + int sizeoflast; + int ret; + + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + ret = hns3_fill_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : HNS3_MAX_BD_SIZE, + dma + HNS3_MAX_BD_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE); + if (ret) + return ret; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + struct skb_frag_struct *frag; + int bdnum_for_frag; + int frag_num; + int buf_num; + int size; + int i; + + size = skb_headlen(skb); + buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + bdnum_for_frag = + (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + return -ENOMEM; + + buf_num += bdnum_for_frag; + } + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + int buf_num; + + /* No. of segments (plus a header) */ + buf_num = skb_shinfo(skb)->nr_frags + 1; + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + + return 0; +} + +static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* unmap the descriptor dma address */ + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + dma_unmap_single(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + } +} + +static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_nic_ring_data *ring_data = + &tx_ring_data(priv, skb->queue_mapping); + struct hns3_enet_ring *ring = ring_data->ring; + struct device *dev = priv->dev; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int next_to_use_head; + int next_to_use_frag; + dma_addr_t dma; + int buf_num; + int seg_num; + int size; + int ret; + int i; + + /* Prefetch the data used later */ + prefetch(skb->data); + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + goto out_net_tx_busy; + case -ENOMEM: + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + netdev_err(netdev, "no memory to xmit!\n"); + + goto out_err_tx_ok; + default: + break; + } + + /* No. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; + /* Fill the first part */ + size = skb_headlen(skb); + + next_to_use_head = ring->next_to_use; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX head DMA map failed\n"); + ring->stats.sw_err_cnt++; + goto out_err_tx_ok; + } + + ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (ret) + goto head_dma_map_err; + + next_to_use_frag = ring->next_to_use; + /* Fill the fragments */ + for (i = 1; i < seg_num; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); + ring->stats.sw_err_cnt++; + goto frag_dma_map_err; + } + ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); + + if (ret) + goto frag_dma_map_err; + } + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + netdev_tx_sent_queue(dev_queue, skb->len); + + wmb(); /* Commit all data before submit */ + + hnae_queue_xmit(ring->tqp, buf_num); + + return NETDEV_TX_OK; + +frag_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_frag); + +head_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_head); + +out_err_tx_ok: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +out_net_tx_busy: + netif_stop_subqueue(netdev, ring_data->queue_index); + smp_mb(); /* Commit all data before submit */ + + return NETDEV_TX_BUSY; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + + netdev->features = features; + return 0; +} + +static void +hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + unsigned int start; + unsigned int idx; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 rx_pkts = 0; + + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = priv->ring_data[idx].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + tx_bytes += ring->stats.tx_bytes; + tx_pkts += ring->stats.tx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* fetch the rx stats */ + ring = priv->ring_data[idx + queue_num].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + rx_bytes += ring->stats.rx_bytes; + rx_pkts += ring->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } + + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_pkts; + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_pkts; + + stats->rx_errors = netdev->stats.rx_errors; + stats->multicast = netdev->stats.multicast; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = netdev->stats.tx_errors; + stats->rx_dropped = netdev->stats.rx_dropped; + stats->tx_dropped = netdev->stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (udp_tnl->used && udp_tnl->dst_port == port) { + udp_tnl->used++; + return; + } + + if (udp_tnl->used) { + netdev_warn(netdev, + "UDP tunnel [%d], port [%d] offload\n", type, port); + return; + } + + udp_tnl->dst_port = port; + udp_tnl->used = 1; + /* TBD send command to hardware to add port */ + if (h->ae_algo->ops->add_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (!udp_tnl->used || udp_tnl->dst_port != port) { + netdev_warn(netdev, + "Invalid UDP tunnel port %d\n", port); + return; + } + + udp_tnl->used--; + if (udp_tnl->used) + return; + + udp_tnl->dst_port = 0; + /* TBD send command to hardware to del port */ + if (h->ae_algo->ops->del_tunnel_udp) + h->ae_algo->ops->del_tunnel_udp(h, port); +} + +/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports + * @netdev: This physical ports's netdev + * @ti: Tunnel information + */ +static void hns3_nic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); + break; + } +} + +static void hns3_nic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + break; + } +} + +static int hns3_setup_tc(struct net_device *netdev, u8 tc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int i; + int ret; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (kinfo->num_tc == tc) + return 0; + + if (!netdev) + return -EINVAL; + + if (!tc) { + netdev_reset_tc(netdev); + return 0; + } + + /* Set num_tc for netdev */ + ret = netdev_set_num_tc(netdev, tc); + if (ret) + return ret; + + /* Set per TC queues for the VSI */ + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (kinfo->tc_info[i].enable) + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + + return 0; +} + +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; + + return hns3_setup_tc(dev, mqprio->num_tc); +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(netdev); + int ret; + + if (!h->ae_algo->ops->set_mtu) + return -EOPNOTSUPP; + + /* if this was called with netdev up then bring netdevice down */ + if (if_running) { + (void)hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = h->ae_algo->ops->set_mtu(h, new_mtu); + if (ret) { + netdev_err(netdev, "failed to change MTU in hardware %d\n", + ret); + return ret; + } + + /* if the netdev was running earlier, bring it up again */ + if (if_running && hns3_nic_net_open(netdev)) + ret = -EINVAL; + + return ret; +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_change_mtu = hns3_nic_change_mtu, + .ndo_set_features = hns3_nic_set_features, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, + .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, +}; + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), + GFP_KERNEL); + if (!ae_dev) { + ret = -ENOMEM; + return ret; + } + + ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; + ae_dev->dev_type = HNAE3_DEV_KNIC; + pci_set_drvdata(pdev, ae_dev); + + return hnae3_register_ae_dev(ae_dev); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + + devm_kfree(&pdev->dev, ae_dev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->vlan_features |= + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hnae_page_order(ring); + struct page *p; + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hnae_page_size(ring); + cb->type = DESC_TYPE_PAGE; + + memset(cb->buf, 0, cb->length); + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dev_kfree_skb_any((struct sk_buff *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring)) + put_page((struct page *)cb->priv); + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + hns3_free_buffers(ring); + + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffers(ring); +out: + return ret; +} + +static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_buffer_attach(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_map_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); +} + +static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, + int *pkts) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + hns3_free_buffer_detach(ring, ring->next_to_clean); + + ring_ptr_move_fw(ring, next_to_clean); +} + +static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h > ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct netdev_queue *dev_queue; + int bytes, pkts; + int head; + + head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (is_ring_empty(ring) || head == ring->next_to_clean) + return 0; /* no data to poll */ + + if (!is_valid_clean_head(ring, head)) { + netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, + ring->next_to_use, ring->next_to_clean); + + u64_stats_update_begin(&ring->syncp); + ring->stats.io_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -EIO; + } + + bytes = 0; + pkts = 0; + while (head != ring->next_to_clean && budget) { + hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + budget--; + } + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(pkts && netif_carrier_ok(netdev) && + (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } + + return !!budget; +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +static void +hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + u64_stats_update_begin(&ring->syncp); + ring->stats.reuse_pg_cnt++; + u64_stats_update_end(&ring->syncp); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + netdev_err(ring->tqp->handle->kinfo.netdev, + "hnae reserve buffer map failed.\n"); + break; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + wmb(); /* Make all data has been write before submit */ + writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +/* hns3_nic_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + */ +static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, + unsigned int max_size) +{ + unsigned char *network; + u8 hlen; + + /* This should never happen, but better safe than sorry */ + if (max_size < ETH_HLEN) + return max_size; + + /* Initialize network frame pointer */ + network = data; + + /* Set first protocol and move network header forward */ + network += ETH_HLEN; + + /* Handle any vlan tag if present */ + if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) + == HNS3_RX_FLAG_VLAN_PRESENT) { + if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) + return max_size; + + network += VLAN_HLEN; + } + + /* Handle L3 protocols */ + if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV4) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct iphdr))) + return max_size; + + /* Access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (network[0] & 0x0F) << 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return network - data; + + /* Record next protocol if header is present */ + } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV6) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct ipv6hdr))) + return max_size; + + /* Record next protocol */ + hlen = sizeof(struct ipv6hdr); + } else { + return network - data; + } + + /* Relocate pointer to start of L4 header */ + network += hlen; + + /* Finally sort out TCP/UDP */ + if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_TCP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct tcphdr))) + return max_size; + + /* Access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (network[12] & 0xF0) >> 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return network - data; + + network += hlen; + } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_UDP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct udphdr))) + return max_size; + + network += sizeof(struct udphdr); + } + + /* If everything has gone correctly network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((typeof(max_size))(network - data) < max_size) + return network - data; + else + return max_size; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + + /* Avoid re-using remote pages,flag default unreuse */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* If we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* Flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; + + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } + return; + } + + /* Move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* Bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int l3_type, l4_type; + u32 bd_base_info; + int ol4_type; + u32 l234info; + + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if hardware has done checksum */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + return; + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + netdev_err(netdev, "L3/L4 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l3l4_csum_err++; + u64_stats_update_end(&ring->syncp); + + return; + } + + l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + case HNS3_OL4_TYPE_NO_TUN: + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if (l3_type == HNS3_L3_TYPE_IPV4 || + (l3_type == HNS3_L3_TYPE_IPV6 && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb, int *out_bnum) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + struct sk_buff *skb; + unsigned char *va; + u32 bd_base_info; + int pull_len; + u32 l234info; + int length; + int bnum; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + length = le16_to_cpu(desc->rx.pkt_len); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Check valid BD */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -EFAULT; + + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + + /* Prefetch first cache line of first page + * Idea is to cache few bytes of the header of the packet. Our L1 Cache + * line size is 64B so need to prefetch twice to make it 128B. But in + * actual we can have greater size of caches with 128B Level 1 cache + * lines. In such a case, single fetch would suffice to cache in the + * relevant part of the header. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + bnum = 1; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + } else { + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + pull_len = hns3_nic_get_headlen(va, l234info, + HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, + ALIGN(pull_len, sizeof(long))); + + hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + bnum++; + } + } + + *out_bnum = bnum; + + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + netdev_err(netdev, "no valid bd,%016llx,%016llx\n", + ((u64 *)desc)[0], ((u64 *)desc)[1]); + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (unlikely((!desc->rx.pkt_len) || + hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + netdev_err(netdev, "truncated pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + netdev_err(netdev, "L2 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l2_err++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += skb->len; + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += skb->len; + + hns3_rx_checksum(ring, skb, desc); + return 0; +} + +static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns3_desc_unused(ring); + struct sk_buff *skb = NULL; + int num, bnum = 0; + + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + rmb(); /* Make sure num taken effect before the other data is touched */ + + recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; + + while (recv_pkts < budget && recv_bds < num) { + /* Reuse or realloc buffers */ + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + clean_count = 0; + unused_count = hns3_desc_unused(ring); + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring, &skb, &bnum); + if (unlikely(!skb)) /* This fault cannot be repaired */ + goto out; + + recv_bds += bnum; + clean_count += bnum; + if (unlikely(err)) { /* Do jump the err */ + recv_pkts++; + continue; + } + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + (void)napi_gro_receive(&ring->tqp_vector->napi, skb); + + recv_pkts++; + } + +out: + /* Make all data has been write before submit */ + if (clean_count + unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + + return recv_pkts; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ +#define HNS3_RX_ULTRA_PACKET_RATE 40000 + enum hns3_flow_level_range new_flow_level; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_secs; + int bytes_per_usecs; + u16 new_int_gl; + int usecs; + + if (!ring_group->int_gl) + return false; + + if (ring_group->total_packets == 0) { + ring_group->int_gl = HNS3_INT_GL_50K; + ring_group->flow_level = HNS3_FLOW_LOW; + return true; + } + + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ + new_flow_level = ring_group->flow_level; + new_int_gl = ring_group->int_gl; + tqp_vector = ring_group->ring->tqp_vector; + usecs = (ring_group->int_gl << 1); + bytes_per_usecs = ring_group->total_bytes / usecs; + /* 1000000 microseconds */ + packets_per_secs = ring_group->total_packets * 1000000 / usecs; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_usecs > 10) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_usecs > 20) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_usecs <= 10) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_usecs <= 20) + new_flow_level = HNS3_FLOW_MID; + break; + } + + if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && + (&tqp_vector->rx_group == ring_group)) + new_flow_level = HNS3_FLOW_ULTRA; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + new_int_gl = HNS3_INT_GL_50K; + break; + case HNS3_FLOW_MID: + new_int_gl = HNS3_INT_GL_20K; + break; + case HNS3_FLOW_HIGH: + new_int_gl = HNS3_INT_GL_18K; + break; + case HNS3_FLOW_ULTRA: + new_int_gl = HNS3_INT_GL_8K; + break; + default: + break; + } + + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->flow_level = new_flow_level; + if (new_int_gl != ring_group->int_gl) { + ring_group->int_gl = new_int_gl; + return true; + } + return false; +} + +static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +{ + u16 rx_int_gl, tx_int_gl; + bool rx, tx; + + rx = hns3_get_new_int_gl(&tqp_vector->rx_group); + tx = hns3_get_new_int_gl(&tqp_vector->tx_group); + rx_int_gl = tqp_vector->rx_group.int_gl; + tx_int_gl = tqp_vector->tx_group.int_gl; + if (rx && tx) { + if (rx_int_gl > tx_int_gl) { + tqp_vector->tx_group.int_gl = rx_int_gl; + tqp_vector->tx_group.flow_level = + tqp_vector->rx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); + } else { + tqp_vector->rx_group.int_gl = tx_int_gl; + tqp_vector->rx_group.flow_level = + tqp_vector->tx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); + } + } +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) { + if (!hns3_clean_tx_ring(ring, budget)) + clean_complete = false; + } + + /* make sure rx ring budget not smaller than 1 */ + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget); + + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + napi_complete(napi); + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + + return rx_pkt_total; +} + +static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = head; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *tx_ring; + struct hns3_enet_ring *rx_ring; + + tx_ring = tqp_vector->tx_group.ring; + if (tx_ring) { + cur_chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain->next = NULL; + + while (tx_ring->next) { + tx_ring = tx_ring->next; + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain = chain; + } + } + + rx_ring = tqp_vector->rx_group.ring; + if (!tx_ring && rx_ring) { + cur_chain->next = NULL; + cur_chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + + rx_ring = rx_ring->next; + } + + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + cur_chain = chain; + + rx_ring = rx_ring->next; + } + + return 0; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) + return -ENOMEM; + + for (i = 0; i < tqp_num; i++) { + u16 vector_i = i % vector_num; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + priv->ring_data[i].ring); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + priv->ring_data[i + tqp_num].ring); + + tqp_vector->idx = vector_i; + tqp_vector->mask_addr = vector[vector_i].io_addr; + tqp_vector->vector_irq = vector[vector_i].vector; + tqp_vector->num_tqps++; + + priv->ring_data[i].ring->tqp_vector = tqp_vector; + priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + } + + for (i = 0; i < vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + hns3_vector_gl_rl_init(tqp_vector); + tqp_vector->handle = h; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + goto out; + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + goto out; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + return ret; + + ret = h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + return ret; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { + (void)irq_set_affinity_hint( + priv->tqp_vector[i].vector_irq, + NULL); + devm_free_irq(&pdev->dev, + priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); + } + + priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; + + netif_napi_del(&priv->tqp_vector[i].napi); + } + + devm_kfree(&pdev->dev, priv->tqp_vector); + + return 0; +} + +static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + int ring_type) +{ + struct hns3_nic_ring_data *ring_data = priv->ring_data; + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_ring *ring; + + ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring_data[q->tqp_index].ring = ring; + ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + } else { + ring_data[q->tqp_index + queue_num].ring = ring; + ring->io_base = q->io_base; + } + + hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring_data[q->tqp_index].queue_index = q->tqp_index; + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = q->desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return 0; +} + +static int hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + int ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + if (ret) + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); + if (ret) + return ret; + + return 0; +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * + sizeof(*priv->ring_data) * 2, + GFP_KERNEL); + if (!priv->ring_data) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); + if (ret) + goto err; + } + + return 0; +err: + devm_kfree(&pdev->dev, priv->ring_data); + return ret; +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), + GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + kfree(ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +static void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + kfree(ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +static int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + hns3_init_ring_hw(priv->ring_data[i].ring); + + u64_stats_init(&priv->ring_data[i].ring->syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(priv->ring_data[i].ring); + + return -ENOMEM; +} + +static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (h->ae_algo->ops->reset_queue) + h->ae_algo->ops->reset_queue(h, i); + + hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + } + + return 0; +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static void hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN]; + + if (h->ae_algo->ops->get_mac_addr) { + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + } + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + dev_warn(priv->dev, "using random MAC address %pM\n", + netdev->dev_addr); + } + + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + +} + +static void hns3_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), + handle->kinfo.num_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + hns3_nic_set_priv_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring_data; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ + netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + return ret; + +out_reg_netdev_fail: +out_init_ring_data: + (void)hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +out_init_vector_data: +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) + netdev_err(netdev, "uninit vector error\n"); + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev_info(netdev, "link down\n"); + } +} + +const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + ret = hnae3_register_client(&client); + if (ret) + return ret; + + ret = pci_register_driver(&hns3_driver); + if (ret) + hnae3_unregister_client(&client); + + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h new file mode 100644 index 000000000000..7e8746189747 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -0,0 +1,593 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include "hnae3.h" + +extern const char hns3_driver_version[]; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C + +#define HNS3_RING_PREFETCH_EN_REG 0x0007C +#define HNS3_RING_CFG_VF_NUM_REG 0x00080 +#define HNS3_RING_ASID_REG 0x0008C +#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_T0_BE_RST 0x00094 +#define HNS3_RING_COULD_BE_RST 0x00098 +#define HNS3_RING_WRR_WEIGHT_REG 0x0009c + +#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 +#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 +#define HNS3_RX_RING_INT_STS_REG 0x000A8 +#define HNS3_RING_INTMSK_TXWL_REG 0x000AC +#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 +#define HNS3_TX_RING_INT_STS_REG 0x000B4 +#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 +#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC +#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 +#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 + +#define HNS3_RING_MB_CTRL_REG 0x00100 +#define HNS3_RING_MB_DATA_BASE_REG 0x00200 + +#define HNS3_TX_REG_OFFSET 0x40 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32768 +#define HNS3_MAX_MTU 9728 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_HDL_S 16 +#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) +#define HNS3_RXD_HSIND_B 31 + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS + +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + __le64 addr; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack*/ + void *priv; + u16 page_offset; + u16 reuse_flag; + + u16 length; /* length of the buffer */ + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + + /* reserved for 0xA~0xB*/ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE*/ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE*/ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct ring_stats { + u64 io_err_cnt; + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_err_cnt; + u64 restart_queue; + u64 tx_busy; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 non_vld_descs; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + }; + }; +}; + +struct hns3_enet_ring { + u8 __iomem *io_base; /* base io address for the ring */ + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + char ring_name[HNS3_RING_NAME_LEN]; + struct device *dev; /* will be used for DMA mapping of descriptors */ + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + u16 max_desc_num_per_pkt; + u16 max_raw_data_sz_per_desc; + u16 max_pkt_size; + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + + u32 flag; /* ring attribute */ + int irq_init_flag; + + int numa_node; + cpumask_t affinity_mask; +}; + +struct hns_queue; + +struct hns3_nic_ring_data { + struct hns3_enet_ring *ring; + struct napi_struct napi; + int queue_index; + int (*poll_one)(struct hns3_nic_ring_data *, int, void *); + void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); + void (*fini_process)(struct hns3_nic_ring_data *); +}; + +struct hns3_nic_ops { + int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hns3_enet_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +enum hns3_link_mode_bits { + HNS3_LM_FIBRE_BIT = BIT(0), + HNS3_LM_AUTONEG_BIT = BIT(1), + HNS3_LM_TP_BIT = BIT(2), + HNS3_LM_PAUSE_BIT = BIT(3), + HNS3_LM_BACKPLANE_BIT = BIT(4), + HNS3_LM_10BASET_HALF_BIT = BIT(5), + HNS3_LM_10BASET_FULL_BIT = BIT(6), + HNS3_LM_100BASET_HALF_BIT = BIT(7), + HNS3_LM_100BASET_FULL_BIT = BIT(8), + HNS3_LM_1000BASET_FULL_BIT = BIT(9), + HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), + HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), + HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), + HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), + HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), + HNS3_LM_COUNT = 15 +}; + +#define HNS3_INT_GL_50K 0x000A +#define HNS3_INT_GL_20K 0x0019 +#define HNS3_INT_GL_18K 0x001B +#define HNS3_INT_GL_8K 0x003E + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + enum hns3_flow_level_range flow_level; + u16 int_gl; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + u16 num_tqps; /* total number of tqps in TQP vector */ + + cpumask_t affinity_mask; + char name[HNAE3_INT_NAME_LEN]; + + /* when 0 should adjust interrupt coalesce parameter */ + u8 int_adapt_down; +} ____cacheline_internodealigned_in_smp; + +enum hns3_udp_tnl_type { + HNS3_UDP_TNL_VXLAN, + HNS3_UDP_TNL_GENEVE, + HNS3_UDP_TNL_MAX, +}; + +struct hns3_udp_tunnel { + u16 dst_port; + int used; +}; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + u32 enet_ver; + u32 port_id; + struct net_device *netdev; + struct device *dev; + struct hns3_nic_ops ops; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + + /* The most recently read link state */ + int link; + u64 tx_timeout_count; + + unsigned long state; + + struct timer_list service_timer; + + struct work_struct service_task; + + struct notifier_block notifier_block; + /* Vxlan/Geneve information */ + struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +/* the distance between [begin, end) in a ring buffer + * note: there is a unuse slot between the begin and the end + */ +static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) +{ + return (end - begin + ring->desc_num) % ring->desc_num; +} + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + return ring->desc_num - + ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; +} + +static inline int is_ring_empty(struct hns3_enet_ring *ring) +{ + return ring->next_to_use == ring->next_to_clean; +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ + (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) + +#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) + +#define hnae_buf_size(_ring) ((_ring)->buf_size) +#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) +#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +void hns3_ethtool_set_ops(struct net_device *netdev); + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c new file mode 100644 index 000000000000..d636399232fb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include "hns3_enet.h" + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_size; + int stats_offset; +}; + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats), \ +} \ + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("tx_pkts", tx_pkts), + HNS3_TQP_STAT("tx_bytes", tx_bytes), + HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), + HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("tx_busy", tx_busy), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("rx_pkts", rx_pkts), + HNS3_TQP_STAT("rx_bytes", rx_bytes), + HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), + HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), + HNS3_TQP_STAT("rx_l2_err", l2_err), + HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), +}; + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +struct hns3_link_mode_mapping { + u32 hns3_link_mode; + u32 ethtool_link_mode; +}; + +static const struct hns3_link_mode_mapping hns3_lm_map[] = { + {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, + {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, + {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, + {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, + {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, + {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, +}; + +static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, + bool is_advertised) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { + if (!(caps & hns3_lm_map[i].hns3_link_mode)) + continue; + + if (is_advertised) { + ethtool_link_ksettings_zero_link_mode(cmd, + advertising); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.advertising); + } else { + ethtool_link_ksettings_zero_link_mode(cmd, + supported); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.supported); + } + } +} + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + } + + return 0; +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps) +{ +#define MAX_PREFIX_SIZE (8 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + u32 i; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i].ring; + for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u64 *p = data; + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + strncpy(drvinfo->version, hns3_driver_version, + sizeof(drvinfo->version)); + drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; + + strncpy(drvinfo->driver, h->pdev->driver->name, + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + priv->ae_handle->ae_algo->ops->get_fw_version(h)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h; + + h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring_data[0].ring->desc_num; + param->rx_pending = priv->ring_data[queue_num].ring->desc_num; +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u32 supported_caps; + u32 advertised_caps; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u8 link_stat; + u8 auto_neg; + u8 duplex; + u32 speed; + + if (!h->ae_algo || !h->ae_algo->ops) + return -EOPNOTSUPP; + + /* 1.auto_neg & speed & duplex from cmd */ + if (h->ae_algo->ops->get_ksettings_an_result) { + h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg, + &speed, &duplex); + cmd->base.autoneg = auto_neg; + cmd->base.speed = speed; + cmd->base.duplex = duplex; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = (u32)SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + } + + /* 2.media_type get from bios parameter block */ + if (h->ae_algo->ops->get_media_type) { + h->ae_algo->ops->get_media_type(h, &media_type); + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } + + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); + } + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (h->ae_algo->ops->get_mdix_mode) + h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + /* 4.mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + return 0; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_key_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_indir_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_indir_size(h); +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + /* currently we only support Toeplitz hash */ + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { + netdev_err(netdev, + "hash func not supported (only Toeplitz hash)\n"); + return -EOPNOTSUPP; + } + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->ae_algo->ops->get_tc_size(h); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct ethtool_ops hns3_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &hns3_ethtool_ops; +} diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c6164a98f257..c8c7ad2eff77 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -194,7 +194,7 @@ static const char *hp100_isa_tbl[] = { }; #endif -static struct eisa_device_id hp100_eisa_tbl[] = { +static const struct eisa_device_id hp100_eisa_tbl[] = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ { "HWP1940" }, /* HP J2577 */ diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig new file mode 100644 index 000000000000..c1a95ae4058b --- /dev/null +++ b/drivers/net/ethernet/huawei/Kconfig @@ -0,0 +1,19 @@ +# +# Huawei driver configuration +# + +config NET_VENDOR_HUAWEI + bool "Huawei devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Huawei cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_HUAWEI + +source "drivers/net/ethernet/huawei/hinic/Kconfig" + +endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile new file mode 100644 index 000000000000..5c37cc8fc1bc --- /dev/null +++ b/drivers/net/ethernet/huawei/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Huawei device drivers. +# + +obj-$(CONFIG_HINIC) += hinic/ diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig new file mode 100644 index 000000000000..08db24954f7e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/Kconfig @@ -0,0 +1,12 @@ +# +# Huawei driver configuration +# + +config HINIC + tristate "Huawei Intelligent PCIE Network Interface Card" + depends on (PCI_MSI && X86) + ---help--- + This driver supports HiNIC PCIE Ethernet cards. + To compile this driver as part of the kernel, choose Y here. + If unsure, choose N. + The default is compiled as module. diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile new file mode 100644 index 000000000000..289ce88bb2d0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_HINIC) += hinic.o + +hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ + hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \ + hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \ + hinic_common.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c new file mode 100644 index 000000000000..02c74fd8380e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.c @@ -0,0 +1,80 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include + +#include "hinic_common.h" + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hinic_cpu_to_be32(void *data, int len) +{ + u32 *mem = data; + int i; + + len = len / sizeof(u32); + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hinic_be32_to_cpu(void *data, int len) +{ + u32 *mem = data; + int i; + + len = len / sizeof(u32); + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +/** + * hinic_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + **/ +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +/** + * hinic_sge_to_dma - get dma address from scatter gather entry + * @sge: scatter gather entry + * + * Return dma address of sg entry + **/ +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) +{ + return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h new file mode 100644 index 000000000000..2c06b76e94a1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.h @@ -0,0 +1,38 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_COMMON_H +#define HINIC_COMMON_H + +#include + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +struct hinic_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +void hinic_cpu_to_be32(void *data, int len); + +void hinic_be32_to_cpu(void *data, int len); + +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len); + +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h new file mode 100644 index 000000000000..5186cc9023aa --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -0,0 +1,64 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_DEV_H +#define HINIC_DEV_H + +#include +#include +#include +#include +#include + +#include "hinic_hw_dev.h" +#include "hinic_tx.h" +#include "hinic_rx.h" + +#define HINIC_DRV_NAME "hinic" + +enum hinic_flags { + HINIC_LINK_UP = BIT(0), + HINIC_INTF_UP = BIT(1), +}; + +struct hinic_rx_mode_work { + struct work_struct work; + u32 rx_mode; +}; + +struct hinic_dev { + struct net_device *netdev; + struct hinic_hwdev *hwdev; + + u32 msg_enable; + unsigned int tx_weight; + unsigned int rx_weight; + + unsigned int flags; + + struct semaphore mgmt_lock; + unsigned long *vlan_bitmap; + + struct hinic_rx_mode_work rx_mode_work; + struct workqueue_struct *workq; + + struct hinic_txq *txqs; + struct hinic_rxq *rxqs; + + struct hinic_txq_stats tx_stats; + struct hinic_rxq_stats rx_stats; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c new file mode 100644 index 000000000000..c40603a183df --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -0,0 +1,978 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_csr.h" +#include "hinic_hw_if.h" +#include "hinic_hw_api_cmd.h" + +#define API_CHAIN_NUM_CELLS 32 + +#define API_CMD_CELL_SIZE_SHIFT 6 +#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT)) + +#define API_CMD_CELL_SIZE(cell_size) \ + (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ + (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) + +#define API_CMD_CELL_SIZE_VAL(size) \ + ilog2((size) >> API_CMD_CELL_SIZE_SHIFT) + +#define API_CMD_BUF_SIZE 2048 + +/* Sizes of the members in hinic_api_cmd_cell */ +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CMD_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 1000 + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3) +#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2) + +#define RD_DMA_ATTR_DEFAULT 0 +#define WR_DMA_ATTR_DEFAULT 0 + +enum api_cmd_data_format { + SGE_DATA = 1, /* cell data is passed by hw address */ +}; + +enum api_cmd_type { + API_CMD_WRITE = 0, +}; + +enum api_cmd_bypass { + NO_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_xor_chk_level { + XOR_CHK_DIS = 0, + + XOR_CHK_ALL = 3, +}; + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 *val, checksum = 0; + + val = data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic_hwif *hwif = chain->hwif; + u32 addr, prod_idx; + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + prod_idx = hinic_hwif_read_reg(hwif, addr); + + prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX); + + prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX); + + hinic_hwif_write_reg(hwif, addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwif, addr); + + return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + * + * Return 0 - Success, negative - Failure + **/ +static int chain_busy(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + u32 prod_idx; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + prod_idx = chain->prod_idx; + + /* check for a space for a new command */ + if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { + dev_err(&pdev->dev, "API CMD chain %d is busy\n", + chain->chain_type); + return -EBUSY; + } + break; + + default: + dev_err(&pdev->dev, "Unknown API CMD Chain type\n"); + break; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of a specific cell type + * @type: chain type + * + * Return the data(Desc + Address) size in the cell + **/ +static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) +{ + u8 cell_data_size = 0; + + switch (type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CMD_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control value into it + * @data_size: the size of the data in the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) +{ + u8 chksum; + u64 ctrl; + + ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) | + HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) | + HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *cell = chain->curr_node; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) | + HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS); + break; + + default: + dev_err(&pdev->dev, "unknown Chain type\n"); + return; + } + + cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | + HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * + * Return 0 - Success, negative - Failure + **/ +static void prepare_cell(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *curr_node = chain->curr_node; + u16 data_size = get_cell_data_size(chain->chain_type); + + prepare_cell_ctrl(&curr_node->ctrl, data_size); + prepare_api_cmd(chain, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +/** + * api_cmd_status_update - update the status in the chain struct + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type; + struct hinic_api_cmd_status *wb_status; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + u64 status_header; + u32 status; + + wb_status = chain->wb_status; + status_header = be64_to_cpu(wb_status->header); + + status = be32_to_cpu(wb_status->status); + if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) { + dev_err(&pdev->dev, "API CMD status: Xor check error\n"); + return; + } + + chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC_API_CMD_MAX) { + dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type); + return; + } + + chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to api cmd command to complete + * @chain: the chain of the command + * + * Return 0 - Success, negative - Failure + **/ +static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) +{ + int err = -ETIMEDOUT; + unsigned long end; + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + do { + api_cmd_status_update(chain); + + /* wait for CI to be updated - sign for completion */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + msleep(20); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * + * Return 0 - Success, negative - Failure + **/ +static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + dev_err(&pdev->dev, "API CMD Poll status timeout\n"); + break; + } + break; + + default: + dev_err(&pdev->dev, "unknown API CMD Chain type\n"); + err = -EINVAL; + break; + } + + return err; +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell_ctxt *ctxt; + int err; + + down(&chain->sem); + if (chain_busy(chain)) { + up(&chain->sem); + return -EBUSY; + } + + prepare_cell(chain, dest, cmd, cmd_size); + cmd_chain_prod_idx_inc(chain); + + wmb(); /* inc pi before issue the command */ + + set_prod_idx(chain); /* issue the command */ + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + chain->curr_node = ctxt->cell_vaddr; + + err = wait_for_api_cmd_completion(chain); + + up(&chain->sem); + return err; +} + +/** + * hinic_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * + * Return 0 - Success, negative - Failure + **/ +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 size) +{ + /* Verify the chain type */ + if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU) + return api_cmd(chain, dest, cmd, size); + + return -EINVAL; +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + int err = -ETIMEDOUT; + unsigned long end; + u32 reg_addr, val; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(hwif, reg_addr); + + val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic_hwif_write_reg(hwif, reg_addr, val); + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + do { + val = hinic_hwif_read_reg(hwif, reg_addr); + + if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + msleep(20); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, ctrl; + u16 cell_size; + + /* Read Modify Write */ + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size); + + ctrl = hinic_hwif_read_reg(hwif, addr); + + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) | + HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) | + HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set in HW status address for + **/ +static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set in HW the number of cells for + **/ +static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head of a chain in the HW + * @chain: the API CMD specific chain to set in HW the head for + **/ +static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, ctrl; + + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hinic_hwif_read_reg(hwif, addr); + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + err = api_cmd_hw_restart(chain); + if (err) { + dev_err(&pdev->dev, "Failed to restart API CMD HW\n"); + return err; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + return 0; +} + +/** + * free_cmd_buf - free the dma buffer of API CMD command + * @chain: the API CMD specific chain of the cmd + * @cell_idx: the cell index of the cmd + **/ +static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE, + cell_ctxt->api_cmd_vaddr, + cell_ctxt->api_cmd_paddr); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + dma_addr_t cmd_paddr; + u8 *cmd_vaddr; + int err = 0; + + cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, + &cmd_paddr, GFP_KERNEL); + if (!cmd_vaddr) { + dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); + return -ENOMEM; + } + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = cmd_vaddr; + cell_ctxt->api_cmd_paddr = cmd_paddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); + break; + + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + free_cmd_buf(chain, cell_idx); + err = -EINVAL; + break; + } + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell for specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the index of the cell to create + * @pre_node: previous cell + * @node_vaddr: the returned virt addr of the cell + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, + int cell_idx, + struct hinic_api_cmd_cell *pre_node, + struct hinic_api_cmd_cell **node_vaddr) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr; + int err; + + node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, + &node_paddr, GFP_KERNEL); + if (!node) { + dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); + return -ENOMEM; + } + + node->read.hw_wb_resp_paddr = 0; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = node; + cell_ctxt->cell_paddr = node_paddr; + + if (!pre_node) { + chain->head_cell_paddr = node_paddr; + chain->head_node = node; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(node_paddr); + } + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmd buffer\n"); + goto err_alloc_cmd_buf; + } + break; + + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto err_alloc_cmd_buf; + } + + *node_vaddr = node; + return 0; + +err_alloc_cmd_buf: + dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr); + return err; +} + +/** + * api_cmd_destroy_cell - destroy API CMD cell of specific chain + * @chain: the API CMD specific chain to destroy its cell + * @cell_idx: the cell to destroy + **/ +static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, + int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr; + size_t node_size; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + node = cell_ctxt->cell_vaddr; + node_paddr = cell_ctxt->cell_paddr; + node_size = chain->cell_size; + + if (cell_ctxt->api_cmd_vaddr) { + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + free_cmd_buf(chain, cell_idx); + break; + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + break; + } + + dma_free_coherent(&pdev->dev, node_size, node, + node_paddr); + } +} + +/** + * api_cmd_destroy_cells - destroy API CMD cells of specific chain + * @chain: the API CMD specific chain to destroy its cells + * @num_cells: number of cells to destroy + **/ +static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, + int num_cells) +{ + int cell_idx; + + for (cell_idx = 0; cell_idx < num_cells; cell_idx++) + api_cmd_destroy_cell(chain, cell_idx); +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err, cell_idx; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + dev_err(&pdev->dev, "Failed to create API CMD cell\n"); + goto err_create_cell; + } + + pre_node = node; + } + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; + +err_create_cell: + api_cmd_destroy_cells(chain, cell_idx); + return err; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * + * Return 0 - Success, negative - Failure + **/ +static int api_chain_init(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwif *hwif = attr->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t cell_ctxt_size; + + chain->hwif = hwif; + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) + return -ENOMEM; + + chain->wb_status = dma_zalloc_coherent(&pdev->dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @attr: attributes to set the chain + * + * Return the created chain + **/ +static struct hinic_api_cmd_chain * + api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwif *hwif = attr->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n"); + return ERR_PTR(-EINVAL); + } + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return ERR_PTR(-ENOMEM); + + err = api_chain_init(chain, attr); + if (err) { + dev_err(&pdev->dev, "Failed to initialize chain\n"); + return ERR_PTR(err); + } + + err = api_cmd_create_cells(chain); + if (err) { + dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n"); + goto err_create_cells; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + dev_err(&pdev->dev, "Failed to initialize chain HW\n"); + goto err_chain_hw_init; + } + + return chain; + +err_chain_hw_init: + api_cmd_destroy_cells(chain, chain->num_cells); + +err_create_cells: + api_chain_free(chain); + return ERR_PTR(err); +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + api_cmd_destroy_cells(chain, chain->num_cells); + api_chain_free(chain); +} + +/** + * hinic_api_cmd_init - Initialize all the API CMD chains + * @chain: the API CMD chains that are initialized + * @hwif: the hardware interface of a pci function device + * + * Return 0 - Success, negative - Failure + **/ +int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, + struct hinic_hwif *hwif) +{ + enum hinic_api_cmd_chain_type type, chain_type; + struct hinic_api_cmd_chain_attr attr; + struct pci_dev *pdev = hwif->pdev; + size_t hw_cell_sz; + int err; + + hw_cell_sz = sizeof(struct hinic_api_cmd_cell); + + attr.hwif = hwif; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz); + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + chain[chain_type] = api_cmd_create_chain(&attr); + if (IS_ERR(chain[chain_type])) { + dev_err(&pdev->dev, "Failed to create chain %d\n", + chain_type); + err = PTR_ERR(chain[chain_type]); + goto err_create_chain; + } + } + + return 0; + +err_create_chain: + type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; type < chain_type; type++) { + if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + api_cmd_destroy_chain(chain[type]); + } + + return err; +} + +/** + * hinic_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that are freed + **/ +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) +{ + enum hinic_api_cmd_chain_type chain_type; + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + api_cmd_destroy_chain(chain[chain_type]); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h new file mode 100644 index 000000000000..31b94d5d47f7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -0,0 +1,208 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_API_CMD_H +#define HINIC_HW_API_CMD_H + +#include +#include + +#include "hinic_hw_if.h" + +#define HINIC_API_CMD_PI_IDX_SHIFT 0 + +#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF + +#define HINIC_API_CMD_PI_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \ + HINIC_API_CMD_PI_##member##_SHIFT) + +#define HINIC_API_CMD_PI_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \ + << HINIC_API_CMD_PI_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1 + +#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3 + +#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0 +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16 +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24 +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF + +#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1 +#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1 +#define HINIC_API_CMD_DESC_DEST_MASK 0x1F +#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF +#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF + +#define HINIC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ + HINIC_API_CMD_DESC_##member##_SHIFT) + +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF + +#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_HEADER_##member##_MASK) + +#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3 + +#define HINIC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_##member##_MASK) + +enum hinic_api_cmd_chain_type { + HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, + + HINIC_API_CMD_MAX, +}; + +struct hinic_api_cmd_chain_attr { + struct hinic_hwif *hwif; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; +}; + +struct hinic_api_cmd_status { + u64 header; + u32 status; + u32 rsvd0; + u32 rsvd1; + u32 rsvd2; + u64 rsvd3; +}; + +/* HW struct */ +struct hinic_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic_api_cmd_cell_ctxt { + dma_addr_t cell_paddr; + struct hinic_api_cmd_cell *cell_vaddr; + + dma_addr_t api_cmd_paddr; + u8 *api_cmd_vaddr; +}; + +struct hinic_api_cmd_chain { + struct hinic_hwif *hwif; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + + /* HW members in 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + + dma_addr_t wb_status_paddr; + struct hinic_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic_api_cmd_cell *head_node; + struct hinic_api_cmd_cell *curr_node; +}; + +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 size); + +int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, + struct hinic_hwif *hwif); + +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c new file mode 100644 index 000000000000..7d95f0866fb0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -0,0 +1,946 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" +#include "hinic_hw_io.h" +#include "hinic_hw_dev.h" + +#define CMDQ_CEQE_TYPE_SHIFT 0 + +#define CMDQ_CEQE_TYPE_MASK 0x7 + +#define CMDQ_CEQE_GET(val, member) \ + (((val) >> CMDQ_CEQE_##member##_SHIFT) \ + & CMDQ_CEQE_##member##_MASK) + +#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 + +#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF + +#define CMDQ_WQE_ERRCODE_GET(val, member) \ + (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ + & CMDQ_WQE_ERRCODE_##member##_MASK) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) + +#define CMDQ_WQE_COMPLETED(ctrl_info) \ + HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define CMDQ_DB_OFF SZ_2K + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 +#define CMDQ_DEPTH SZ_4K + +#define CMDQ_WQ_PAGE_SIZE SZ_4K + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_TIMEOUT 1000 + +#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ + struct hinic_func_to_io, \ + cmdqs) + +enum cmdq_wqe_type { + WQE_LCMD_TYPE = 0, + WQE_SCMD_TYPE = 1, +}; + +enum completion_format { + COMPLETE_DIRECT = 0, + COMPLETE_SGE = 1, +}; + +enum data_format { + DATA_SGE = 0, + DATA_DIRECT = 1, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */ + BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */ +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */ + CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */ +}; + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_cmd_type { + CMDQ_CMD_SYNC_DIRECT_RESP = 0, + CMDQ_CMD_SYNC_SGE_RESP = 1, +}; + +enum completion_request { + NO_CEQ = 0, + CEQ_SET = 1, +}; + +/** + * hinic_alloc_cmdq_buf - alloc buffer for sending command + * @cmdqs: the cmdqs + * @cmdq_buf: the buffer returned in this struct + * + * Return 0 - Success, negative - Failure + **/ +int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf) +{ + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, + &cmdq_buf->dma_addr); + if (!cmdq_buf->buf) { + dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * hinic_free_cmdq_buf - free buffer + * @cmdqs: the cmdqs + * @cmdq_buf: the buffer to free that is in this struct + **/ +void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf) +{ + pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); +} + +static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) +{ + unsigned int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, + struct hinic_cmdq_buf *buf_out) +{ + struct hinic_sge_resp *sge_resp = &completion->sge_resp; + + hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); +} + +static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + enum ctrl_sect_len ctrl_len; + struct hinic_ctrl *ctrl; + u32 saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->direct_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) | + HINIC_CMDQ_CTRL_SET(cmd, CMD) | + HINIC_CMDQ_CTRL_SET(mod, MOD) | + HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + CMDQ_WQE_HEADER(wqe)->header_info = + HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED); + + saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; + saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM); + + if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM)) + CMDQ_WQE_HEADER(wqe)->saved_data |= + HINIC_SAVED_DATA_SET(1, ARM); + else + CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; +} + +static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, + struct hinic_cmdq_buf *buf_in) +{ + hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, + void *buf_in, u32 in_size) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic_cmdq_buf *buf_in, + struct hinic_cmdq_buf *buf_out, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format; + + switch (cmd_type) { + case CMDQ_CMD_SYNC_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out); + break; + case CMDQ_CMD_SYNC_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hinic_cmdq_buf *buf_out, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + enum completion_format complete_format; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + + wqe_scmd = &direct_wqe->wqe_scmd; + + switch (cmd_type) { + case CMDQ_CMD_SYNC_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); + break; + case CMDQ_CMD_SYNC_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); +} + +static void cmdq_wqe_fill(void *dst, void *src) +{ + memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_fill_db(u32 *db_info, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) | + HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE); +} + +static void cmdq_set_db(struct hinic_cmdq *cmdq, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + u32 db_info; + + cmdq_fill_db(&db_info, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db_info = cpu_to_be32(db_info); + + wmb(); /* write all before the doorbell */ + + writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, + u64 *resp) +{ + struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; + u16 curr_prod_idx, next_prod_idx; + int errcode, wrapped, num_wqebbs; + struct hinic_wq *wq = cmdq->wq; + struct hinic_hw_wqe *hw_wqe; + struct completion done; + + /* Keep doorbell index correct. bh - for tasklet(ceq). */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); + if (IS_ERR(hw_wqe)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + curr_cmdq_wqe = &hw_wqe->cmdq_wqe; + + wrapped = cmdq->wrapped; + + num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq->errcode[curr_prod_idx] = &errcode; + + init_completion(&done); + cmdq->done[curr_prod_idx] = &done; + + cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL, + wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd, + curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdq->errcode[curr_prod_idx] == &errcode) + cmdq->errcode[curr_prod_idx] = NULL; + + if (cmdq->done[curr_prod_idx] == &done) + cmdq->done[curr_prod_idx] = NULL; + + spin_unlock_bh(&cmdq->cmdq_lock); + + return -ETIMEDOUT; + } + + smp_rmb(); /* read error code after completion */ + + if (resp) { + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd; + + *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp); + } + + if (errcode != 0) + return -EFAULT; + + return 0; +} + +static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, + u16 in_size) +{ + struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; + u16 curr_prod_idx, next_prod_idx; + struct hinic_wq *wq = cmdq->wq; + struct hinic_hw_wqe *hw_wqe; + int wrapped, num_wqebbs; + + /* Keep doorbell index correct */ + spin_lock(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); + if (IS_ERR(hw_wqe)) { + spin_unlock(&cmdq->cmdq_lock); + return -EBUSY; + } + + curr_cmdq_wqe = &hw_wqe->cmdq_wqe; + + wrapped = cmdq->wrapped; + + num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, + in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, + HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock(&cmdq->cmdq_lock); + return 0; +} + +static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) +{ + if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) + return -EINVAL; + + return 0; +} + +/** + * hinic_cmdq_direct_resp - send command with direct data as resp + * @cmdqs: the cmdqs + * @mod: module on the card that will handle the command + * @cmd: the command + * @buf_in: the buffer for the command + * @resp: the response to return + * + * Return 0 - Success, negative - Failure + **/ +int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, u64 *resp) +{ + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = cmdq_params_valid(buf_in); + if (err) { + dev_err(&pdev->dev, "Invalid CMDQ parameters\n"); + return err; + } + + return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], + mod, cmd, buf_in, resp); +} + +/** + * hinic_set_arm_bit - set arm bit for enable interrupt again + * @cmdqs: the cmdqs + * @q_type: type of queue to set the arm bit for + * @q_id: the queue number + * + * Return 0 - Success, negative - Failure + **/ +int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, + enum hinic_set_arm_qtype q_type, u32 q_id) +{ + struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_arm_bit arm_bit; + int err; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + + err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); + if (err) { + dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); + return err; + } + + return 0; +} + +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe) +{ + u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); + unsigned int bufdesc_len, wqe_size; + struct hinic_ctrl *ctrl; + + bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); + if (wqe_size == WQE_LCMD_SIZE) { + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + + ctrl = &wqe_lcmd->ctrl; + } else { + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + + wqe_scmd = &direct_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + + wmb(); /* verify wqe is clear */ +} + +/** + * cmdq_arm_ceq_handler - cmdq completion event handler for arm command + * @cmdq: the cmdq of the arm command + * @wqe: the wqe of the arm command + * + * Return 0 - Success, negative - Failure + **/ +static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe) +{ + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + struct hinic_ctrl *ctrl; + u32 ctrl_info; + + wqe_scmd = &direct_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + ctrl_info = be32_to_cpu(ctrl->ctrl_info); + + /* HW should toggle the HW BUSY BIT */ + if (!CMDQ_WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe); + + hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); + return 0; +} + +static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, + int errcode) +{ + if (cmdq->errcode[prod_idx]) + *cmdq->errcode[prod_idx] = errcode; +} + +/** + * cmdq_arm_ceq_handler - cmdq completion event handler for sync command + * @cmdq: the cmdq of the command + * @cons_idx: the consumer index to update the error code for + * @errcode: the error code + **/ +static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, + int errcode) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + cmdq_update_errcode(cmdq, prod_idx, errcode); + + wmb(); /* write all before update for the command request */ + + if (cmdq->done[prod_idx]) + complete(cmdq->done[prod_idx]); + spin_unlock(&cmdq->cmdq_lock); +} + +static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, + struct hinic_cmdq_wqe *cmdq_wqe) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; + struct hinic_status *status = &wqe_lcmd->status; + struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; + int errcode; + + if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) + return -EBUSY; + + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); + + cmdq_sync_cmd_handler(cmdq, ci, errcode); + + clear_wqe_complete_bit(cmdq, cmdq_wqe); + hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); + return 0; +} + +/** + * cmdq_ceq_handler - cmdq completion event handler + * @handle: private data for the handler(cmdqs) + * @ceqe_data: ceq element data + **/ +static void cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); + struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; + struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic_cmdq_header *header; + struct hinic_hw_wqe *hw_wqe; + int err, set_arm = 0; + u32 saved_data; + u16 ci; + + /* Read the smallest wqe size for getting wqe size */ + while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { + if (IS_ERR(hw_wqe)) + break; + + header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); + saved_data = be32_to_cpu(header->saved_data); + + if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) + break; + } else { + set_arm = 1; + + hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); + if (IS_ERR(hw_wqe)) + break; + + if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) + break; + } + } + + if (set_arm) { + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); + if (err) + dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); + } +} + +/** + * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq + * @cmdq_ctxt: cmdq ctxt to initialize + * @cmdq: the cmdq + * @cmdq_pages: the memory of the queue + **/ +static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt, + struct hinic_cmdq *cmdq, + struct hinic_cmdq_pages *cmdq_pages) +{ + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hinic_wq *wq = cmdq->wq; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); + + ctxt_info->curr_wqe_page_pfn = + HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); + + /* block PFN - Read Modify Write */ + cmdq_first_block_paddr = cmdq_pages->page_paddr; + + pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); + + ctxt_info->wq_block_pfn = + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); + + cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif); + cmdq_ctxt->cmdq_type = cmdq->cmdq_type; +} + +/** + * init_cmdq - initialize cmdq + * @cmdq: the cmdq + * @wq: the wq attaced to the cmdq + * @q_type: the cmdq type of the cmdq + * @db_area: doorbell area for the cmdq + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, + enum hinic_cmdq_type q_type, void __iomem *db_area) +{ + int err; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->done = vzalloc(wq->q_depth * sizeof(*cmdq->done)); + if (!cmdq->done) + return -ENOMEM; + + cmdq->errcode = vzalloc(wq->q_depth * sizeof(*cmdq->errcode)); + if (!cmdq->errcode) { + err = -ENOMEM; + goto err_errcode; + } + + cmdq->db_base = db_area + CMDQ_DB_OFF; + return 0; + +err_errcode: + vfree(cmdq->done); + return err; +} + +/** + * free_cmdq - Free cmdq + * @cmdq: the cmdq to free + **/ +static void free_cmdq(struct hinic_cmdq *cmdq) +{ + vfree(cmdq->errcode); + vfree(cmdq->done); +} + +/** + * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq + * @hwdev: the NIC HW device + * @cmdqs: cmdqs to write the ctxts for + * &db_area: db_area for all the cmdqs + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdqs *cmdqs, void __iomem **db_area) +{ + struct hinic_hwif *hwif = hwdev->hwif; + enum hinic_cmdq_type type, cmdq_type; + struct hinic_cmdq_ctxt *cmdq_ctxts; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + size_t cmdq_ctxts_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI function type\n"); + return -EINVAL; + } + + cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); + cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + if (!cmdq_ctxts) + return -ENOMEM; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], + &cmdqs->saved_wqs[cmdq_type], cmdq_type, + db_area[cmdq_type]); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmdq\n"); + goto err_init_cmdq; + } + + cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type], + &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages); + } + + /* Write the CMDQ ctxts */ + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_CMDQ_CTXT_SET, + &cmdq_ctxts[cmdq_type], + sizeof(cmdq_ctxts[cmdq_type]), + NULL, NULL, HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", + cmdq_type); + goto err_write_cmdq_ctxt; + } + } + + devm_kfree(&pdev->dev, cmdq_ctxts); + return 0; + +err_write_cmdq_ctxt: + cmdq_type = HINIC_MAX_CMDQ_TYPES; + +err_init_cmdq: + for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) + free_cmdq(&cmdqs->cmdq[type]); + + devm_kfree(&pdev->dev, cmdq_ctxts); + return err; +} + +/** + * hinic_init_cmdqs - init all cmdqs + * @cmdqs: cmdqs to init + * @hwif: HW interface for accessing cmdqs + * @db_area: doorbell areas for all the cmdqs + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, + void __iomem **db_area) +{ + struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); + struct pci_dev *pdev = hwif->pdev; + struct hinic_hwdev *hwdev; + size_t saved_wqs_size; + u16 max_wqe_size; + int err; + + cmdqs->hwif = hwif; + cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0); + if (!cmdqs->cmdq_buf_pool) + return -ENOMEM; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + err = -ENOMEM; + goto err_saved_wqs; + } + + max_wqe_size = WQE_LCMD_SIZE; + err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, + HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, + CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); + goto err_cmdq_wqs; + } + + hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io); + err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); + if (err) { + dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); + goto err_cmdq_ctxt; + } + + hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs, + cmdq_ceq_handler); + return 0; + +err_cmdq_ctxt: + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + +err_cmdq_wqs: + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + +err_saved_wqs: + pci_pool_destroy(cmdqs->cmdq_buf_pool); + return err; +} + +/** + * hinic_free_cmdqs - free all cmdqs + * @cmdqs: cmdqs to free + **/ +void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) +{ + struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_cmdq_type cmdq_type; + + hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) + free_cmdq(&cmdqs->cmdq[cmdq_type]); + + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + + pci_pool_destroy(cmdqs->cmdq_buf_pool); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h new file mode 100644 index 000000000000..b35583400cb6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -0,0 +1,187 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CMDQ_H +#define HINIC_CMDQ_H + +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wq.h" + +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56 +#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63 + +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F +#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1 + +#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_CI_SHIFT 52 + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + +#define HINIC_SAVED_DATA_ARM_SHIFT 31 + +#define HINIC_SAVED_DATA_ARM_MASK 0x1 + +#define HINIC_SAVED_DATA_SET(val, member) \ + (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \ + << HINIC_SAVED_DATA_##member##_SHIFT) + +#define HINIC_SAVED_DATA_GET(val, member) \ + (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \ + & HINIC_SAVED_DATA_##member##_MASK) + +#define HINIC_SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \ + << HINIC_SAVED_DATA_##member##_SHIFT))) + +#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23 +#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27 + +#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF +#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1 +#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7 +#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F + +#define HINIC_CMDQ_DB_INFO_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \ + << HINIC_CMDQ_DB_INFO_##member##_SHIFT) + +#define HINIC_CMDQ_BUF_SIZE 2048 + +#define HINIC_CMDQ_BUF_HW_RSVD 8 +#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \ + HINIC_CMDQ_BUF_HW_RSVD) + +enum hinic_cmdq_type { + HINIC_CMDQ_SYNC, + + HINIC_MAX_CMDQ_TYPES, +}; + +enum hinic_set_arm_qtype { + HINIC_SET_ARM_CMDQ, +}; + +enum hinic_cmd_ack_type { + HINIC_CMD_ACK_TYPE_CMDQ, +}; + +struct hinic_cmdq_buf { + void *buf; + dma_addr_t dma_addr; + size_t size; +}; + +struct hinic_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_type; + u8 rsvd1[1]; + + u8 rsvd2[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + +struct hinic_cmdq { + struct hinic_wq *wq; + + enum hinic_cmdq_type cmdq_type; + int wrapped; + + /* Lock for keeping the doorbell order */ + spinlock_t cmdq_lock; + + struct completion **done; + int **errcode; + + /* doorbell area */ + void __iomem *db_base; +}; + +struct hinic_cmdqs { + struct hinic_hwif *hwif; + + struct pci_pool *cmdq_buf_pool; + + struct hinic_wq *saved_wqs; + + struct hinic_cmdq_pages cmdq_pages; + + struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; +}; + +int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf); + +void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf); + +int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, u64 *out_param); + +int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, + enum hinic_set_arm_qtype q_type, u32 q_id); + +int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, + void __iomem **db_area); + +void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h new file mode 100644 index 000000000000..f39b184f674d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -0,0 +1,149 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_CSR_H +#define HINIC_HW_CSR_H + +/* HW interface registers */ +#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 + +#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 +#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 + +#define HINIC_DMA_ATTR_BASE 0xC80 +#define HINIC_ELECTION_BASE 0x4200 + +#define HINIC_DMA_ATTR_STRIDE 0x4 +#define HINIC_CSR_DMA_ATTR_ADDR(idx) \ + (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE) + +#define HINIC_PPF_ELECTION_STRIDE 0x4 +#define HINIC_CSR_MAX_PORTS 4 + +#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \ + (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE) + +/* API CMD registers */ +#define HINIC_CSR_API_CMD_BASE 0xF000 + +#define HINIC_CSR_API_CMD_STRIDE 0x100 + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +/* MSI-X registers */ +#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 +#define HINIC_CSR_MSIX_CNT_BASE 0x2004 + +#define HINIC_CSR_MSIX_STRIDE 0x8 + +#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ + (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ + (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +/* EQ registers */ +#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 + +#define HINIC_EQ_MTT_OFF_STRIDE 0x40 + +#define HINIC_CSR_AEQ_MTT_OFF(id) \ + (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_CEQ_MTT_OFF(id) \ + (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08 +#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C + +#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008 +#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C + +#define HINIC_EQ_OFF_STRIDE 0x80 + +#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c new file mode 100644 index 000000000000..79b567447084 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -0,0 +1,1013 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_qp_ctxt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" +#include "hinic_hw_dev.h" + +#define IO_STATUS_TIMEOUT 100 +#define OUTBOUND_STATE_TIMEOUT 100 +#define DB_STATE_TIMEOUT 100 + +#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ + (2 * (max_qps) + (num_aeqs) + (num_ceqs)) + +#define ADDR_IN_4BYTES(addr) ((addr) >> 2) + +enum intr_type { + INTR_MSIX_TYPE, +}; + +enum io_status { + IO_STOPPED = 0, + IO_RUNNING = 1, +}; + +enum hw_ioctxt_set_cmdq_depth { + HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT, +}; + +/* HW struct */ +struct hinic_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 rsvd1[5]; + u8 intr_type; + u8 rsvd2[66]; + u16 max_sqs; + u16 max_rqs; + u8 rsvd3[208]; +}; + +/** + * get_capability - convert device capabilities to NIC capabilities + * @hwdev: the HW device to set and convert device capabilities for + * @dev_cap: device capabilities from FW + * + * Return 0 - Success, negative - Failure + **/ +static int get_capability(struct hinic_hwdev *hwdev, + struct hinic_dev_cap *dev_cap) +{ + struct hinic_cap *nic_cap = &hwdev->nic_cap; + int num_aeqs, num_ceqs, num_irqs; + + if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif)) + return -EINVAL; + + if (dev_cap->intr_type != INTR_MSIX_TYPE) + return -EFAULT; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); + num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); + + /* Each QP has its own (SQ + RQ) interrupts */ + nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; + + if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) + nic_cap->num_qps = HINIC_Q_CTXT_MAX; + + /* num_qps must be power of 2 */ + nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1); + + nic_cap->max_qps = dev_cap->max_sqs + 1; + if (nic_cap->max_qps != (dev_cap->max_rqs + 1)) + return -EFAULT; + + if (nic_cap->num_qps > nic_cap->max_qps) + nic_cap->num_qps = nic_cap->max_qps; + + return 0; +} + +/** + * get_cap_from_fw - get device capabilities from FW + * @pfhwdev: the PF HW device to get capabilities for + * + * Return 0 - Success, negative - Failure + **/ +static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev) +{ + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_dev_cap dev_cap; + u16 in_len, out_len; + int err; + + in_len = 0; + out_len = sizeof(dev_cap); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM, + HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap, + &out_len, HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to get capability from FW\n"); + return err; + } + + return get_capability(hwdev, &dev_cap); +} + +/** + * get_dev_cap - get device capabilities + * @hwdev: the NIC HW device to get capabilities for + * + * Return 0 - Success, negative - Failure + **/ +static int get_dev_cap(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + int err; + + switch (HINIC_FUNC_TYPE(hwif)) { + case HINIC_PPF: + case HINIC_PF: + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = get_cap_from_fw(pfhwdev); + if (err) { + dev_err(&pdev->dev, "Failed to get capability from FW\n"); + return err; + } + break; + + default: + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + return 0; +} + +/** + * init_msix - enable the msix and save the entries + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int init_msix(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int nr_irqs, num_aeqs, num_ceqs; + size_t msix_entries_size; + int i, err; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); + nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs); + if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) + nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); + + msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); + hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, + GFP_KERNEL); + if (!hwdev->msix_entries) + return -ENOMEM; + + for (i = 0; i < nr_irqs; i++) + hwdev->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs); + if (err) { + dev_err(&pdev->dev, "Failed to enable pci msix\n"); + return err; + } + + return 0; +} + +/** + * disable_msix - disable the msix + * @hwdev: the NIC HW device + **/ +static void disable_msix(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + + pci_disable_msix(pdev); +} + +/** + * hinic_port_msg_cmd - send port msg to mgmt + * @hwdev: the NIC HW device + * @cmd: the port command + * @buf_in: input buffer + * @in_size: input size + * @buf_out: output buffer + * @out_size: returned output size + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, + buf_in, in_size, buf_out, out_size, + HINIC_MGMT_MSG_SYNC); +} + +/** + * init_fw_ctxt- Init Firmware tables before network mgmt and io operations + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int init_fw_ctxt(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmd_fw_ctxt fw_ctxt; + struct hinic_pfhwdev *pfhwdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, + &fw_ctxt, sizeof(fw_ctxt), + &fw_ctxt, &out_size); + if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) { + dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n", + fw_ctxt.status); + return -EFAULT; + } + + return 0; +} + +/** + * set_hw_ioctxt - set the shape of the IO queues in FW + * @hwdev: the NIC HW device + * @rq_depth: rq depth + * @sq_depth: sq depth + * + * Return 0 - Success, negative - Failure + **/ +static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, + unsigned int sq_depth) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_cmd_hw_ioctxt hw_ioctxt; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; + hw_ioctxt.cmdq_depth = 0; + + hw_ioctxt.rq_depth = ilog2(rq_depth); + + hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX; + + hw_ioctxt.sq_depth = ilog2(sq_depth); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_HWCTXT_SET, + &hw_ioctxt, sizeof(hw_ioctxt), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} + +static int wait_for_outbound_state(struct hinic_hwdev *hwdev) +{ + enum hinic_outbound_state outbound_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + unsigned long end; + + end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT); + do { + outbound_state = hinic_outbound_state_get(hwif); + + if (outbound_state == HINIC_OUTBOUND_ENABLE) + return 0; + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n"); + return -EFAULT; +} + +static int wait_for_db_state(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_db_state db_state; + unsigned long end; + + end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT); + do { + db_state = hinic_db_state_get(hwif); + + if (db_state == HINIC_DB_ENABLE) + return 0; + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for DB - Timeout\n"); + return -EFAULT; +} + +static int wait_for_io_stopped(struct hinic_hwdev *hwdev) +{ + struct hinic_cmd_io_status cmd_io_status; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + unsigned long end; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); + do { + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_IO_STATUS_GET, + &cmd_io_status, sizeof(cmd_io_status), + &cmd_io_status, &out_size, + HINIC_MGMT_MSG_SYNC); + if ((err) || (out_size != sizeof(cmd_io_status))) { + dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", + err); + return err; + } + + if (cmd_io_status.status == IO_STOPPED) { + dev_info(&pdev->dev, "IO stopped\n"); + return 0; + } + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); + return -ETIMEDOUT; +} + +/** + * clear_io_resource - set the IO resources as not active in the NIC + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int clear_io_resources(struct hinic_hwdev *hwdev) +{ + struct hinic_cmd_clear_io_res cmd_clear_io_res; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + err = wait_for_io_stopped(hwdev); + if (err) { + dev_err(&pdev->dev, "IO has not stopped yet\n"); + return err; + } + + cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res, + sizeof(cmd_clear_io_res), NULL, NULL, + HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to clear IO resources\n"); + return err; + } + + return 0; +} + +/** + * set_resources_state - set the state of the resources in the NIC + * @hwdev: the NIC HW device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +static int set_resources_state(struct hinic_hwdev *hwdev, + enum hinic_res_state state) +{ + struct hinic_cmd_set_res_state res_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + res_state.state = state; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, + HINIC_MOD_COMM, + HINIC_COMM_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} + +/** + * get_base_qpn - get the first qp number + * @hwdev: the NIC HW device + * @base_qpn: returned qp number + * + * Return 0 - Success, negative - Failure + **/ +static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) +{ + struct hinic_cmd_base_qpn cmd_base_qpn; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN, + &cmd_base_qpn, sizeof(cmd_base_qpn), + &cmd_base_qpn, &out_size); + if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) { + dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n", + cmd_base_qpn.status); + return -EFAULT; + } + + *base_qpn = cmd_base_qpn.qpn; + return 0; +} + +/** + * hinic_hwdev_ifup - Preparing the HW for passing IO + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_cap *nic_cap = &hwdev->nic_cap; + struct hinic_hwif *hwif = hwdev->hwif; + int err, num_aeqs, num_ceqs, num_qps; + struct msix_entry *ceq_msix_entries; + struct msix_entry *sq_msix_entries; + struct msix_entry *rq_msix_entries; + struct pci_dev *pdev = hwif->pdev; + u16 base_qpn; + + err = get_base_qpn(hwdev, &base_qpn); + if (err) { + dev_err(&pdev->dev, "Failed to get global base qp number\n"); + return err; + } + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); + + ceq_msix_entries = &hwdev->msix_entries[num_aeqs]; + + err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs, + ceq_msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to init IO channel\n"); + return err; + } + + num_qps = nic_cap->num_qps; + sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs]; + rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; + + err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, + sq_msix_entries, rq_msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to create QPs\n"); + goto err_create_qps; + } + + err = wait_for_db_state(hwdev); + if (err) { + dev_warn(&pdev->dev, "db - disabled, try again\n"); + hinic_db_state_set(hwif, HINIC_DB_ENABLE); + } + + err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH); + if (err) { + dev_err(&pdev->dev, "Failed to set HW IO ctxt\n"); + goto err_hw_ioctxt; + } + + return 0; + +err_hw_ioctxt: + hinic_io_destroy_qps(func_to_io, num_qps); + +err_create_qps: + hinic_io_free(func_to_io); + return err; +} + +/** + * hinic_hwdev_ifdown - Closing the HW for passing IO + * @hwdev: the NIC HW device + * + **/ +void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_cap *nic_cap = &hwdev->nic_cap; + + clear_io_resources(hwdev); + + hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); + hinic_io_free(func_to_io); +} + +/** + * hinic_hwdev_cb_register - register callback handler for MGMT events + * @hwdev: the NIC HW device + * @cmd: the mgmt event + * @handle: private data for the handler + * @handler: event handler + **/ +void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd, void *handle, + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_nic_cb *nic_cb; + u8 cmd_cb; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + nic_cb->handler = handler; + nic_cb->handle = handle; + nic_cb->cb_state = HINIC_CB_ENABLED; +} + +/** + * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events + * @hwdev: the NIC HW device + * @cmd: the mgmt event + **/ +void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_nic_cb *nic_cb; + u8 cmd_cb; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + nic_cb->cb_state &= ~HINIC_CB_ENABLED; + + while (nic_cb->cb_state & HINIC_CB_RUNNING) + schedule(); + + nic_cb->handler = NULL; +} + +/** + * nic_mgmt_msg_handler - nic mgmt event handler + * @handle: private data for the handler + * @buf_in: input buffer + * @in_size: input size + * @buf_out: output buffer + * @out_size: returned output size + **/ +static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_pfhwdev *pfhwdev = handle; + enum hinic_cb_state cb_state; + struct hinic_nic_cb *nic_cb; + struct hinic_hwdev *hwdev; + struct hinic_hwif *hwif; + struct pci_dev *pdev; + u8 cmd_cb; + + hwdev = &pfhwdev->hwdev; + hwif = hwdev->hwif; + pdev = hwif->pdev; + + if ((cmd < HINIC_MGMT_MSG_CMD_BASE) || + (cmd >= HINIC_MGMT_MSG_CMD_MAX)) { + dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd); + return; + } + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + cb_state = cmpxchg(&nic_cb->cb_state, + HINIC_CB_ENABLED, + HINIC_CB_ENABLED | HINIC_CB_RUNNING); + + if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler)) + nic_cb->handler(nic_cb->handle, buf_in, + in_size, buf_out, out_size); + else + dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd); + + nic_cb->cb_state &= ~HINIC_CB_RUNNING; +} + +/** + * init_pfhwdev - Initialize the extended components of PF + * @pfhwdev: the HW device for PF + * + * Return 0 - success, negative - failure + **/ +static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) +{ + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n"); + return err; + } + + hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, + pfhwdev, nic_mgmt_msg_handler); + + hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); + return 0; +} + +/** + * free_pfhwdev - Free the extended components of PF + * @pfhwdev: the HW device for PF + **/ +static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) +{ + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + + hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); + + hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); + + hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); +} + +/** + * hinic_init_hwdev - Initialize the NIC HW + * @pdev: the NIC pci device + * + * Return initialized NIC HW device + * + * Initialize the NIC HW device and return a pointer to it + **/ +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) +{ + struct hinic_pfhwdev *pfhwdev; + struct hinic_hwdev *hwdev; + struct hinic_hwif *hwif; + int err, num_aeqs; + + hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return ERR_PTR(-ENOMEM); + + err = hinic_init_hwif(hwif, pdev); + if (err) { + dev_err(&pdev->dev, "Failed to init HW interface\n"); + return ERR_PTR(err); + } + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + err = -EFAULT; + goto err_func_type; + } + + pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL); + if (!pfhwdev) { + err = -ENOMEM; + goto err_pfhwdev_alloc; + } + + hwdev = &pfhwdev->hwdev; + hwdev->hwif = hwif; + + err = init_msix(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init msix\n"); + goto err_init_msix; + } + + err = wait_for_outbound_state(hwdev); + if (err) { + dev_warn(&pdev->dev, "outbound - disabled, try again\n"); + hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE); + } + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + + err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, + HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, + hwdev->msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to init async event queues\n"); + goto err_aeqs_init; + } + + err = init_pfhwdev(pfhwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init PF HW device\n"); + goto err_init_pfhwdev; + } + + err = get_dev_cap(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to get device capabilities\n"); + goto err_dev_cap; + } + + err = init_fw_ctxt(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init function table\n"); + goto err_init_fw_ctxt; + } + + err = set_resources_state(hwdev, HINIC_RES_ACTIVE); + if (err) { + dev_err(&pdev->dev, "Failed to set resources state\n"); + goto err_resources_state; + } + + return hwdev; + +err_resources_state: +err_init_fw_ctxt: +err_dev_cap: + free_pfhwdev(pfhwdev); + +err_init_pfhwdev: + hinic_aeqs_free(&hwdev->aeqs); + +err_aeqs_init: + disable_msix(hwdev); + +err_init_msix: +err_pfhwdev_alloc: +err_func_type: + hinic_free_hwif(hwif); + return ERR_PTR(err); +} + +/** + * hinic_free_hwdev - Free the NIC HW device + * @hwdev: the NIC HW device + **/ +void hinic_free_hwdev(struct hinic_hwdev *hwdev) +{ + struct hinic_pfhwdev *pfhwdev = container_of(hwdev, + struct hinic_pfhwdev, + hwdev); + + set_resources_state(hwdev, HINIC_RES_CLEAN); + + free_pfhwdev(pfhwdev); + + hinic_aeqs_free(&hwdev->aeqs); + + disable_msix(hwdev); + + hinic_free_hwif(hwdev->hwif); +} + +/** + * hinic_hwdev_num_qps - return the number QPs available for use + * @hwdev: the NIC HW device + * + * Return number QPs available for use + **/ +int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) +{ + struct hinic_cap *nic_cap = &hwdev->nic_cap; + + return nic_cap->num_qps; +} + +/** + * hinic_hwdev_get_sq - get SQ + * @hwdev: the NIC HW device + * @i: the position of the SQ + * + * Return: the SQ in the i position + **/ +struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_qp *qp = &func_to_io->qps[i]; + + if (i >= hinic_hwdev_num_qps(hwdev)) + return NULL; + + return &qp->sq; +} + +/** + * hinic_hwdev_get_sq - get RQ + * @hwdev: the NIC HW device + * @i: the position of the RQ + * + * Return: the RQ in the i position + **/ +struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_qp *qp = &func_to_io->qps[i]; + + if (i >= hinic_hwdev_num_qps(hwdev)) + return NULL; + + return &qp->rq; +} + +/** + * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry + * @hwdev: the NIC HW device + * @msix_index: msix_index + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index) +{ + return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index); +} + +/** + * hinic_hwdev_msix_set - set message attribute for msix entry + * @hwdev: the NIC HW device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer) +{ + return hinic_msix_attr_set(hwdev->hwif, msix_index, + pending_limit, coalesc_timer, + lli_timer_cfg, lli_credit_limit, + resend_timer); +} + +/** + * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq + * @hwdev: the NIC HW device + * @sq: send queue + * @pending_limit: the maximum pending update ci events (unit 8) + * @coalesc_timer: coalesc period for update ci (unit 8 us) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, + u8 pending_limit, u8 coalesc_timer) +{ + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_cmd_hw_ci hw_ci; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + hw_ci.dma_attr_off = 0; + hw_ci.pending_limit = pending_limit; + hw_ci.coalesc_timer = coalesc_timer; + + hw_ci.msix_en = 1; + hw_ci.msix_entry_idx = sq->msix_entry; + + hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + hw_ci.sq_id = qp->q_id; + + hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, + HINIC_MOD_COMM, + HINIC_COMM_CMD_SQ_HI_CI_SET, + &hw_ci, sizeof(hw_ci), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h new file mode 100644 index 000000000000..0f5563f3b779 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -0,0 +1,239 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_DEV_H +#define HINIC_HW_DEV_H + +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" + +#define HINIC_MAX_QPS 32 + +#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \ + HINIC_MGMT_MSG_CMD_BASE) + +struct hinic_cap { + u16 max_qps; + u16 num_qps; +}; + +enum hinic_port_cmd { + HINIC_PORT_CMD_CHANGE_MTU = 2, + + HINIC_PORT_CMD_ADD_VLAN = 3, + HINIC_PORT_CMD_DEL_VLAN = 4, + + HINIC_PORT_CMD_SET_MAC = 9, + HINIC_PORT_CMD_GET_MAC = 10, + HINIC_PORT_CMD_DEL_MAC = 11, + + HINIC_PORT_CMD_SET_RX_MODE = 12, + + HINIC_PORT_CMD_GET_LINK_STATE = 24, + + HINIC_PORT_CMD_SET_PORT_STATE = 41, + + HINIC_PORT_CMD_FWCTXT_INIT = 69, + + HINIC_PORT_CMD_SET_FUNC_STATE = 93, + + HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, + + HINIC_PORT_CMD_GET_CAP = 170, +}; + +enum hinic_mgmt_msg_cmd { + HINIC_MGMT_MSG_CMD_BASE = 160, + + HINIC_MGMT_MSG_CMD_LINK_STATUS = 160, + + HINIC_MGMT_MSG_CMD_MAX, +}; + +enum hinic_cb_state { + HINIC_CB_ENABLED = BIT(0), + HINIC_CB_RUNNING = BIT(1), +}; + +enum hinic_res_state { + HINIC_RES_CLEAN = 0, + HINIC_RES_ACTIVE = 1, +}; + +struct hinic_cmd_fw_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rx_buf_sz; + + u32 rsvd1; +}; + +struct hinic_cmd_hw_ioctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + + u16 rsvd1; + + u8 set_cmdq_depth; + u8 cmdq_depth; + + u8 rsvd2; + u8 rsvd3; + u8 rsvd4; + u8 rsvd5; + + u16 rq_depth; + u16 rx_buf_sz_idx; + u16 sq_depth; +}; + +struct hinic_cmd_io_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 rsvd1; + u8 rsvd2; + u32 io_status; +}; + +struct hinic_cmd_clear_io_res { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 rsvd1; + u8 rsvd2; +}; + +struct hinic_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +struct hinic_cmd_base_qpn { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 qpn; +}; + +struct hinic_cmd_hw_ci { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + + u8 dma_attr_off; + u8 pending_limit; + u8 coalesc_timer; + + u8 msix_en; + u16 msix_entry_idx; + + u32 sq_id; + u32 rsvd1; + u64 ci_addr; +}; + +struct hinic_hwdev { + struct hinic_hwif *hwif; + struct msix_entry *msix_entries; + + struct hinic_aeqs aeqs; + struct hinic_func_to_io func_to_io; + + struct hinic_cap nic_cap; +}; + +struct hinic_nic_cb { + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size); + + void *handle; + unsigned long cb_state; +}; + +struct hinic_pfhwdev { + struct hinic_hwdev hwdev; + + struct hinic_pf_to_mgmt pf_to_mgmt; + + struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD]; +}; + +void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd, void *handle, + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)); + +void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd); + +int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic_hwdev_ifup(struct hinic_hwdev *hwdev); + +void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev); + +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); + +void hinic_free_hwdev(struct hinic_hwdev *hwdev); + +int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev); + +struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i); + +struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i); + +int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index); + +int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + +int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, + u8 pending_limit, u8 coalesc_timer); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c new file mode 100644 index 000000000000..7cb8b9b94726 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -0,0 +1,886 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_csr.h" +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" + +#define HINIC_EQS_WQ_NAME "hinic_eqs" + +#define GET_EQ_NUM_PAGES(eq, pg_size) \ + (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) + +#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) + +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ + HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) + +#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ + HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) + +#define GET_EQ_ELEMENT(eq, idx) \ + ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ + (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ + GET_EQ_ELEMENT(eq, idx)) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *) \ + GET_EQ_ELEMENT(eq, idx)) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define EQ_MAX_PAGES 8 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) + +#define work_to_aeq_work(work) \ + container_of(work, struct hinic_eq_work, work) + +#define DMA_ATTR_AEQ_DEFAULT 0 +#define DMA_ATTR_CEQ_DEFAULT 0 + +/* No coalescence */ +#define THRESH_CEQ_DEFAULT 0 + +enum eq_int_mode { + EQ_INT_MODE_ARMED, + EQ_INT_MODE_ALWAYS +}; + +enum eq_arm_state { + EQ_NOT_ARMED, + EQ_ARMED +}; + +/** + * hinic_aeq_register_hw_cb - register AEQ callback for specific event + * @aeqs: pointer to Async eqs of the chip + * @event: aeq event to register callback for it + * @handle: private data will be used by the callback + * @hw_handler: callback function + **/ +void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event, void *handle, + void (*hwe_handler)(void *handle, void *data, + u8 size)) +{ + struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; + + hwe_cb->hwe_handler = hwe_handler; + hwe_cb->handle = handle; + hwe_cb->hwe_state = HINIC_EQE_ENABLED; +} + +/** + * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event + * @aeqs: pointer to Async eqs of the chip + * @event: aeq event to unregister callback for it + **/ +void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event) +{ + struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; + + hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; + + while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) + schedule(); + + hwe_cb->hwe_handler = NULL; +} + +/** + * hinic_ceq_register_cb - register CEQ callback for specific event + * @ceqs: pointer to Completion eqs part of the chip + * @event: ceq event to register callback for it + * @handle: private data will be used by the callback + * @handler: callback function + **/ +void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event, void *handle, + void (*handler)(void *handle, u32 ceqe_data)) +{ + struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; + + ceq_cb->handler = handler; + ceq_cb->handle = handle; + ceq_cb->ceqe_state = HINIC_EQE_ENABLED; +} + +/** + * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event + * @ceqs: pointer to Completion eqs part of the chip + * @event: ceq event to unregister callback for it + **/ +void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event) +{ + struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; + + ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; + + while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) + schedule(); + + ceq_cb->handler = NULL; +} + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + int idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return (checksum & 0xF); +} + +/** + * eq_update_ci - update the HW cons idx of event queue + * @eq: the event queue to update the cons idx for + **/ +static void eq_update_ci(struct hinic_eq *eq) +{ + u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); + + /* Read Modify Write */ + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_EQ_CI_CLEAR(val, IDX) & + HINIC_EQ_CI_CLEAR(val, WRAPPED) & + HINIC_EQ_CI_CLEAR(val, INT_ARMED) & + HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); + + val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | + HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | + HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); + + val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hinic_hwif_write_reg(eq->hwif, addr, val); +} + +/** + * aeq_irq_handler - handler for the AEQ event + * @eq: the Async Event Queue that received the event + **/ +static void aeq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic_hwif *hwif = aeqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_aeq_elem *aeqe_curr; + struct hinic_hw_event_cb *hwe_cb; + enum hinic_aeq_type event; + unsigned long eqe_state; + u32 aeqe_desc; + int i, size; + + for (i = 0; i < eq->q_len; i++) { + aeqe_curr = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_curr->desc); + + /* HW toggles the wrapped bit, when it adds eq element */ + if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + break; + + event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (event >= HINIC_MAX_AEQ_EVENTS) { + dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); + return; + } + + if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + hwe_cb = &aeqs->hwe_cb[event]; + + size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + + eqe_state = cmpxchg(&hwe_cb->hwe_state, + HINIC_EQE_ENABLED, + HINIC_EQE_ENABLED | + HINIC_EQE_RUNNING); + if ((eqe_state == HINIC_EQE_ENABLED) && + (hwe_cb->hwe_handler)) + hwe_cb->hwe_handler(hwe_cb->handle, + aeqe_curr->data, size); + else + dev_err(&pdev->dev, "Unhandled AEQ Event %d\n", + event); + + hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; + } + + eq->cons_idx++; + + if (eq->cons_idx == eq->q_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + } +} + +/** + * ceq_event_handler - handler for the ceq events + * @ceqs: ceqs part of the chip + * @ceqe: ceq element that describes the event + **/ +static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) +{ + struct hinic_hwif *hwif = ceqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_ceq_cb *ceq_cb; + enum hinic_ceq_type event; + unsigned long eqe_state; + + event = CEQE_TYPE(ceqe); + if (event >= HINIC_MAX_CEQ_EVENTS) { + dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); + return; + } + + ceq_cb = &ceqs->ceq_cb[event]; + + eqe_state = cmpxchg(&ceq_cb->ceqe_state, + HINIC_EQE_ENABLED, + HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); + + if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler)) + ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); + else + dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); + + ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; +} + +/** + * ceq_irq_handler - handler for the CEQ event + * @eq: the Completion Event Queue that received the event + **/ +static void ceq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + + /* Data in HW is in Big endian Format */ + ceqe = be32_to_cpu(ceqe); + + /* HW toggles the wrapped bit, when it adds eq element event */ + if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + break; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->q_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + } +} + +/** + * eq_irq_handler - handler for the EQ event + * @data: the Event Queue that received the event + **/ +static void eq_irq_handler(void *data) +{ + struct hinic_eq *eq = data; + + if (eq->type == HINIC_AEQ) + aeq_irq_handler(eq); + else if (eq->type == HINIC_CEQ) + ceq_irq_handler(eq); + + eq_update_ci(eq); +} + +/** + * eq_irq_work - the work of the EQ that received the event + * @work: the work struct that is associated with the EQ + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic_eq_work *aeq_work = work_to_aeq_work(work); + struct hinic_eq *aeq; + + aeq = aeq_work->data; + eq_irq_handler(aeq); +} + +/** + * ceq_tasklet - the tasklet of the EQ that received the event + * @ceq_data: the eq + **/ +static void ceq_tasklet(unsigned long ceq_data) +{ + struct hinic_eq *ceq = (struct hinic_eq *)ceq_data; + + eq_irq_handler(ceq); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the Async Event Queue that collected the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hinic_eq_work *aeq_work; + struct hinic_eq *aeq = data; + struct hinic_aeqs *aeqs; + + /* clear resend timer cnt register */ + hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + aeqs = aeq_to_aeqs(aeq); + queue_work(aeqs->workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the Completion Event Queue that collected the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic_eq *ceq = data; + + /* clear resend timer cnt register */ + hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static void set_ctrl0(struct hinic_eq *eq) +{ + struct msix_entry *msix_entry = &eq->msix_entry; + enum hinic_eq_type type = eq->type; + u32 addr, val, ctrl0; + + if (type == HINIC_AEQ) { + /* RMW Ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & + HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); + + ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | + HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | + HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), + PCI_INTF_IDX) | + HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); + + val |= ctrl0; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } else if (type == HINIC_CEQ) { + /* RMW Ctrl0 */ + addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & + HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & + HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | + HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | + HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | + HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), + PCI_INTF_IDX) | + HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } +} + +static void set_ctrl1(struct hinic_eq *eq) +{ + enum hinic_eq_type type = eq->type; + u32 page_size_val, elem_size; + u32 addr, val, ctrl1; + + if (type == HINIC_AEQ) { + /* RMW Ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & + HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & + HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); + + ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | + HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + val |= ctrl1; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } else if (type == HINIC_CEQ) { + /* RMW Ctrl1 */ + addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & + HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); + + ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | + HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + val |= ctrl1; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } +} + +/** + * set_eq_ctrls - setting eq's ctrl registers + * @eq: the Event Queue for setting + **/ +static void set_eq_ctrls(struct hinic_eq *eq) +{ + set_ctrl0(eq); + set_ctrl1(eq); +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the Async Event Queue + * @init_val: value to initialize the elements with it + **/ +static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + struct hinic_aeq_elem *aeqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the initilzation values */ +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + u32 *ceqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the initilzation values */ +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + * + * Return 0 - Success, Negative - Failure + **/ +static int alloc_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwif; + struct pci_dev *pdev = hwif->pdev; + u32 init_val, addr, val; + size_t addr_size; + int err, pg; + + addr_size = eq->num_pages * sizeof(*eq->dma_addr); + eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + addr_size = eq->num_pages * sizeof(*eq->virt_addr); + eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto err_virt_addr_alloc; + } + + for (pg = 0; pg < eq->num_pages; pg++) { + eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, + eq->page_size, + &eq->dma_addr[pg], + GFP_KERNEL); + if (!eq->virt_addr[pg]) { + err = -ENOMEM; + goto err_dma_alloc; + } + + addr = EQ_HI_PHYS_ADDR_REG(eq, pg); + val = upper_32_bits(eq->dma_addr[pg]); + + hinic_hwif_write_reg(hwif, addr, val); + + addr = EQ_LO_PHYS_ADDR_REG(eq, pg); + val = lower_32_bits(eq->dma_addr[pg]); + + hinic_hwif_write_reg(hwif, addr, val); + } + + init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); + + if (eq->type == HINIC_AEQ) + aeq_elements_init(eq, init_val); + else if (eq->type == HINIC_CEQ) + ceq_elements_init(eq, init_val); + + return 0; + +err_dma_alloc: + while (--pg >= 0) + dma_free_coherent(&pdev->dev, eq->page_size, + eq->virt_addr[pg], + eq->dma_addr[pg]); + + devm_kfree(&pdev->dev, eq->virt_addr); + +err_virt_addr_alloc: + devm_kfree(&pdev->dev, eq->dma_addr); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the Event Queue + **/ +static void free_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwif; + struct pci_dev *pdev = hwif->pdev; + int pg; + + for (pg = 0; pg < eq->num_pages; pg++) + dma_free_coherent(&pdev->dev, eq->page_size, + eq->virt_addr[pg], + eq->dma_addr[pg]); + + devm_kfree(&pdev->dev, eq->virt_addr); + devm_kfree(&pdev->dev, eq->dma_addr); +} + +/** + * init_eq - initialize Event Queue + * @eq: the event queue + * @hwif: the HW interface of a PCI function device + * @type: the type of the event queue, aeq or ceq + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @page_size: the page size of the pages in the event queue + * @entry: msix entry associated with the event queue + * + * Return 0 - Success, Negative - Failure + **/ +static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, + enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, + struct msix_entry entry) +{ + struct pci_dev *pdev = hwif->pdev; + int err; + + eq->hwif = hwif; + eq->type = type; + eq->q_id = q_id; + eq->q_len = q_len; + eq->page_size = page_size; + + /* Clear PI and CI, also clear the ARM bit */ + hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); + hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + if (type == HINIC_AEQ) { + eq->elem_size = HINIC_AEQE_SIZE; + } else if (type == HINIC_CEQ) { + eq->elem_size = HINIC_CEQE_SIZE; + } else { + dev_err(&pdev->dev, "Invalid EQ type\n"); + return -EINVAL; + } + + eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); + + eq->msix_entry = entry; + + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + dev_err(&pdev->dev, "num elements in eq page != power of 2\n"); + return -EINVAL; + } + + if (eq->num_pages > EQ_MAX_PAGES) { + dev_err(&pdev->dev, "too many pages for eq\n"); + return -EINVAL; + } + + set_eq_ctrls(eq); + eq_update_ci(eq); + + err = alloc_eq_pages(eq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate pages for eq\n"); + return err; + } + + if (type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } else if (type == HINIC_CEQ) { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (unsigned long)eq); + } + + /* set the attributes of the msix entry */ + hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, + HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, + HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, + HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, + HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, + HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); + + if (type == HINIC_AEQ) + err = request_irq(entry.vector, aeq_interrupt, 0, + "hinic_aeq", eq); + else if (type == HINIC_CEQ) + err = request_irq(entry.vector, ceq_interrupt, 0, + "hinic_ceq", eq); + + if (err) { + dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); + goto err_req_irq; + } + + return 0; + +err_req_irq: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove Event Queue + * @eq: the event queue + **/ +static void remove_eq(struct hinic_eq *eq) +{ + struct msix_entry *entry = &eq->msix_entry; + + free_irq(entry->vector, eq); + + if (eq->type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + } else if (eq->type == HINIC_CEQ) { + tasklet_kill(&eq->ceq_tasklet); + } + + free_eq_pages(eq); +} + +/** + * hinic_aeqs_init - initialize all the aeqs + * @aeqs: pointer to Async eqs of the chip + * @hwif: the HW interface of a PCI function device + * @num_aeqs: number of AEQs + * @q_len: number of EQ elements + * @page_size: the page size of the pages in the event queue + * @msix_entries: msix entries associated with the event queues + * + * Return 0 - Success, negative - Failure + **/ +int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, + int num_aeqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries) +{ + struct pci_dev *pdev = hwif->pdev; + int err, i, q_id; + + aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); + if (!aeqs->workq) + return -ENOMEM; + + aeqs->hwif = hwif; + aeqs->num_aeqs = num_aeqs; + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len, + page_size, msix_entries[q_id]); + if (err) { + dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id); + goto err_init_aeq; + } + } + + return 0; + +err_init_aeq: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + return err; +} + +/** + * hinic_aeqs_free - free all the aeqs + * @aeqs: pointer to Async eqs of the chip + **/ +void hinic_aeqs_free(struct hinic_aeqs *aeqs) +{ + int q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + destroy_workqueue(aeqs->workq); +} + +/** + * hinic_ceqs_init - init all the ceqs + * @ceqs: ceqs part of the chip + * @hwif: the hardware interface of a pci function device + * @num_ceqs: number of CEQs + * @q_len: number of EQ elements + * @page_size: the page size of the event queue + * @msix_entries: msix entries associated with the event queues + * + * Return 0 - Success, Negative - Failure + **/ +int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, + int num_ceqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries) +{ + struct pci_dev *pdev = hwif->pdev; + int i, q_id, err; + + ceqs->hwif = hwif; + ceqs->num_ceqs = num_ceqs; + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len, + page_size, msix_entries[q_id]); + if (err) { + dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id); + goto err_init_ceq; + } + } + + return 0; + +err_init_ceq: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + return err; +} + +/** + * hinic_ceqs_free - free all the ceqs + * @ceqs: ceqs part of the chip + **/ +void hinic_ceqs_free(struct hinic_ceqs *ceqs) +{ + int q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h new file mode 100644 index 000000000000..ecb9c2bc6dc8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -0,0 +1,265 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_EQS_H +#define HINIC_HW_EQS_H + +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" + +#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0 +#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31 + +#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF +#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F +#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1 + +#define HINIC_AEQ_CTRL_0_SET(val, member) \ + (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \ + HINIC_AEQ_CTRL_0_##member##_SHIFT) + +#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \ + << HINIC_AEQ_CTRL_0_##member##_SHIFT))) + +#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0 +#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF +#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3 +#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF + +#define HINIC_AEQ_CTRL_1_SET(val, member) \ + (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \ + HINIC_AEQ_CTRL_1_##member##_SHIFT) + +#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \ + << HINIC_AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20 +#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF +#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F +#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF +#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1 + +#define HINIC_CEQ_CTRL_0_SET(val, member) \ + (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \ + HINIC_CEQ_CTRL_0_##member##_SHIFT) + +#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \ + << HINIC_CEQ_CTRL_0_##member##_SHIFT))) + +#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0 +#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF +#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF + +#define HINIC_CEQ_CTRL_1_SET(val, member) \ + (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \ + HINIC_CEQ_CTRL_1_##member##_SHIFT) + +#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \ + << HINIC_CEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0 +#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7 +#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8 +#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F +#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1 +#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF +#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1 + +#define HINIC_EQ_ELEM_DESC_SET(val, member) \ + (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \ + HINIC_EQ_ELEM_DESC_##member##_SHIFT) + +#define HINIC_EQ_ELEM_DESC_GET(val, member) \ + (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \ + HINIC_EQ_ELEM_DESC_##member##_MASK) + +#define HINIC_EQ_CI_IDX_SHIFT 0 +#define HINIC_EQ_CI_WRAPPED_SHIFT 20 +#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24 +#define HINIC_EQ_CI_INT_ARMED_SHIFT 31 + +#define HINIC_EQ_CI_IDX_MASK 0xFFFFF +#define HINIC_EQ_CI_WRAPPED_MASK 0x1 +#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF +#define HINIC_EQ_CI_INT_ARMED_MASK 0x1 + +#define HINIC_EQ_CI_SET(val, member) \ + (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \ + HINIC_EQ_CI_##member##_SHIFT) + +#define HINIC_EQ_CI_CLEAR(val, member) \ + ((val) & (~(HINIC_EQ_CI_##member##_MASK \ + << HINIC_EQ_CI_##member##_SHIFT))) + +#define HINIC_MAX_AEQS 4 +#define HINIC_MAX_CEQS 32 + +#define HINIC_AEQE_SIZE 64 +#define HINIC_CEQE_SIZE 4 + +#define HINIC_AEQE_DESC_SIZE 4 +#define HINIC_AEQE_DATA_SIZE \ + (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) + +#define HINIC_DEFAULT_AEQ_LEN 64 +#define HINIC_DEFAULT_CEQ_LEN 1024 + +#define HINIC_EQ_PAGE_SIZE SZ_4K + +#define HINIC_CEQ_ID_CMDQ 0 + +enum hinic_eq_type { + HINIC_AEQ, + HINIC_CEQ, +}; + +enum hinic_aeq_type { + HINIC_MSG_FROM_MGMT_CPU = 2, + + HINIC_MAX_AEQ_EVENTS, +}; + +enum hinic_ceq_type { + HINIC_CEQ_CMDQ = 3, + + HINIC_MAX_CEQ_EVENTS, +}; + +enum hinic_eqe_state { + HINIC_EQE_ENABLED = BIT(0), + HINIC_EQE_RUNNING = BIT(1), +}; + +struct hinic_aeq_elem { + u8 data[HINIC_AEQE_DATA_SIZE]; + u32 desc; +}; + +struct hinic_eq_work { + struct work_struct work; + void *data; +}; + +struct hinic_eq { + struct hinic_hwif *hwif; + + enum hinic_eq_type type; + int q_id; + u32 q_len; + u32 page_size; + + u32 cons_idx; + int wrapped; + + size_t elem_size; + int num_pages; + int num_elem_in_pg; + + struct msix_entry msix_entry; + + dma_addr_t *dma_addr; + void **virt_addr; + + struct hinic_eq_work aeq_work; + + struct tasklet_struct ceq_tasklet; +}; + +struct hinic_hw_event_cb { + void (*hwe_handler)(void *handle, void *data, u8 size); + void *handle; + unsigned long hwe_state; +}; + +struct hinic_aeqs { + struct hinic_hwif *hwif; + + struct hinic_eq aeq[HINIC_MAX_AEQS]; + int num_aeqs; + + struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS]; + + struct workqueue_struct *workq; +}; + +struct hinic_ceq_cb { + void (*handler)(void *handle, u32 ceqe_data); + void *handle; + enum hinic_eqe_state ceqe_state; +}; + +struct hinic_ceqs { + struct hinic_hwif *hwif; + + struct hinic_eq ceq[HINIC_MAX_CEQS]; + int num_ceqs; + + struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; +}; + +void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event, void *handle, + void (*hwe_handler)(void *handle, void *data, + u8 size)); + +void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event); + +void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event, void *handle, + void (*ceq_cb)(void *handle, u32 ceqe_data)); + +void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event); + +int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, + int num_aeqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries); + +void hinic_aeqs_free(struct hinic_aeqs *aeqs); + +int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, + int num_ceqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries); + +void hinic_ceqs_free(struct hinic_ceqs *ceqs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c new file mode 100644 index 000000000000..823a17061a97 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -0,0 +1,351 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_csr.h" +#include "hinic_hw_if.h" + +#define PCIE_ATTR_ENTRY 0 + +#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) + +/** + * hinic_msix_attr_set - set message attribute for msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer, u8 lli_credit_limit, + u8 resend_timer) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | + HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | + HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) | + HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | + HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + return 0; +} + +/** + * hinic_msix_attr_get - get message attribute of msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer, + u8 *lli_timer, u8 *lli_credit_limit, + u8 *resend_timer) +{ + u32 addr, val; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + val = hinic_hwif_read_reg(hwif, addr); + + *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); + *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); + *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); + *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); + *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); + return 0; +} + +/** + * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER); + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + return 0; +} + +/** + * hinic_set_pf_action - set action on pf channel + * @hwif: the HW interface of a pci function device + * @action: action on pf channel + * + * Return 0 - Success, negative - Failure + **/ +void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) +{ + u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); + + attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION); + attr5 |= HINIC_FA5_SET(action, PF_ACTION); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); +} + +enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_FA4_GET(attr4, OUTBOUND_STATE); +} + +void hinic_outbound_state_set(struct hinic_hwif *hwif, + enum hinic_outbound_state outbound_state) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE); + attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); +} + +enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_FA4_GET(attr4, DB_STATE); +} + +void hinic_db_state_set(struct hinic_hwif *hwif, + enum hinic_db_state db_state) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE); + attr4 |= HINIC_FA4_SET(db_state, DB_STATE); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); +} + +/** + * hwif_ready - test if the HW is ready for use + * @hwif: the HW interface of a pci function device + * + * Return 0 - Success, negative - Failure + **/ +static int hwif_ready(struct hinic_hwif *hwif) +{ + struct pci_dev *pdev = hwif->pdev; + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + if (!HINIC_FA1_GET(attr1, INIT_STATUS)) { + dev_err(&pdev->dev, "hwif status is not ready\n"); + return -EFAULT; + } + + return 0; +} + +/** + * set_hwif_attr - set the attributes in the relevant members in hwif + * @hwif: the HW interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + **/ +static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1) +{ + hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX); + hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX); + hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX); + hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE); + + hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC)); +} + +/** + * read_hwif_attr - read the attributes and set members in hwif + * @hwif: the HW interface of a pci function device + **/ +static void read_hwif_attr(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the HW interface of a pci function device + **/ +static void set_ppf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif)); + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX); + + val |= ppf_election; + hinic_hwif_write_reg(hwif, addr, val); + + /* check PPF */ + val = hinic_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif)) + attr->func_type = HINIC_PPF; +} + +/** + * set_dma_attr - set the dma attributes in the HW + * @hwif: the HW interface of a pci function device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + **/ +static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx); + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_DMA_ATTR_CLEAR(val, ST) & + HINIC_DMA_ATTR_CLEAR(val, AT) & + HINIC_DMA_ATTR_CLEAR(val, PH) & + HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) & + HINIC_DMA_ATTR_CLEAR(val, TPH_EN); + + dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) | + HINIC_DMA_ATTR_SET(at, AT) | + HINIC_DMA_ATTR_SET(ph, PH) | + HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) | + HINIC_DMA_ATTR_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwif: the HW interface of a pci function device + **/ +static void dma_attr_init(struct hinic_hwif *hwif) +{ + set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE); +} + +/** + * hinic_init_hwif - initialize the hw interface + * @hwif: the HW interface of a pci function device + * @pdev: the pci device for acessing PCI resources + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) +{ + int err; + + hwif->pdev = pdev; + + hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR); + if (!hwif->cfg_regs_bar) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + err = hwif_ready(hwif); + if (err) { + dev_err(&pdev->dev, "HW interface is not ready\n"); + goto err_hwif_ready; + } + + read_hwif_attr(hwif); + + if (HINIC_IS_PF(hwif)) + set_ppf(hwif); + + /* No transactionss before DMA is initialized */ + dma_attr_init(hwif); + return 0; + +err_hwif_ready: + iounmap(hwif->cfg_regs_bar); + return err; +} + +/** + * hinic_free_hwif - free the HW interface + * @hwif: the HW interface of a pci function device + **/ +void hinic_free_hwif(struct hinic_hwif *hwif) +{ + iounmap(hwif->cfg_regs_bar); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h new file mode 100644 index 000000000000..5b4760c0e9f5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -0,0 +1,272 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_IF_H +#define HINIC_HW_IF_H + +#include +#include +#include +#include + +#define HINIC_DMA_ATTR_ST_SHIFT 0 +#define HINIC_DMA_ATTR_AT_SHIFT 8 +#define HINIC_DMA_ATTR_PH_SHIFT 10 +#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12 +#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13 + +#define HINIC_DMA_ATTR_ST_MASK 0xFF +#define HINIC_DMA_ATTR_AT_MASK 0x3 +#define HINIC_DMA_ATTR_PH_MASK 0x3 +#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1 +#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1 + +#define HINIC_DMA_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \ + HINIC_DMA_ATTR_##member##_SHIFT) + +#define HINIC_DMA_ATTR_CLEAR(val, member) \ + ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \ + << HINIC_DMA_ATTR_##member##_SHIFT))) + +#define HINIC_FA0_FUNC_IDX_SHIFT 0 +#define HINIC_FA0_PF_IDX_SHIFT 10 +#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14 +/* reserved members - off 16 */ +#define HINIC_FA0_FUNC_TYPE_SHIFT 24 + +#define HINIC_FA0_FUNC_IDX_MASK 0x3FF +#define HINIC_FA0_PF_IDX_MASK 0xF +#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_FA0_FUNC_TYPE_MASK 0x1 + +#define HINIC_FA0_GET(val, member) \ + (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK) + +#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8 +/* reserved members - off 10 */ +#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12 +/* reserved members - off 15 */ +#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20 +#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24 +/* reserved members - off 27 */ +#define HINIC_FA1_INIT_STATUS_SHIFT 30 + +#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7 +#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF +#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC_FA1_INIT_STATUS_MASK 0x1 + +#define HINIC_FA1_GET(val, member) \ + (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) + +#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0 +#define HINIC_FA4_DB_STATE_SHIFT 1 + +#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1 +#define HINIC_FA4_DB_STATE_MASK 0x1 + +#define HINIC_FA4_GET(val, member) \ + (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK) + +#define HINIC_FA4_SET(val, member) \ + ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT) + +#define HINIC_FA4_CLEAR(val, member) \ + ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT))) + +#define HINIC_FA5_PF_ACTION_SHIFT 0 +#define HINIC_FA5_PF_ACTION_MASK 0xFFFF + +#define HINIC_FA5_SET(val, member) \ + (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT) + +#define HINIC_FA5_CLEAR(val, member) \ + ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT))) + +#define HINIC_PPF_ELECTION_IDX_SHIFT 0 +#define HINIC_PPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_PPF_ELECTION_SET(val, member) \ + (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \ + HINIC_PPF_ELECTION_##member##_SHIFT) + +#define HINIC_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ + HINIC_PPF_ELECTION_##member##_MASK) + +#define HINIC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ + << HINIC_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 +#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_LLI_TIMER_SHIFT 16 +#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 +#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF +#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF +#define HINIC_MSIX_LLI_TIMER_MASK 0xFF +#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F +#define HINIC_MSIX_RESEND_TIMER_MASK 0x7 + +#define HINIC_MSIX_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_MSIX_##member##_MASK) << \ + HINIC_MSIX_##member##_SHIFT) + +#define HINIC_MSIX_ATTR_GET(val, member) \ + (((val) >> HINIC_MSIX_##member##_SHIFT) & \ + HINIC_MSIX_##member##_MASK) + +#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1 + +#define HINIC_MSIX_CNT_SET(val, member) \ + (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \ + HINIC_MSIX_CNT_##member##_SHIFT) + +#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) +#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) +#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) + +#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) +#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) +#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) + +#define HINIC_PCI_CFG_REGS_BAR 0 +#define HINIC_PCI_DB_BAR 4 + +#define HINIC_PCIE_ST_DISABLE 0 +#define HINIC_PCIE_AT_DISABLE 0 +#define HINIC_PCIE_PH_DISABLE 0 + +#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */ +#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */ + +enum hinic_pcie_nosnoop { + HINIC_PCIE_SNOOP = 0, + HINIC_PCIE_NO_SNOOP = 1, +}; + +enum hinic_pcie_tph { + HINIC_PCIE_TPH_DISABLE = 0, + HINIC_PCIE_TPH_ENABLE = 1, +}; + +enum hinic_func_type { + HINIC_PF = 0, + HINIC_PPF = 2, +}; + +enum hinic_mod_type { + HINIC_MOD_COMM = 0, /* HW communication module */ + HINIC_MOD_L2NIC = 1, /* L2NIC module */ + HINIC_MOD_CFGM = 7, /* Configuration module */ + + HINIC_MOD_MAX = 15 +}; + +enum hinic_node_id { + HINIC_NODE_ID_MGMT = 21, +}; + +enum hinic_pf_action { + HINIC_PF_MGMT_INIT = 0x0, + + HINIC_PF_MGMT_ACTIVE = 0x11, +}; + +enum hinic_outbound_state { + HINIC_OUTBOUND_ENABLE = 0, + HINIC_OUTBOUND_DISABLE = 1, +}; + +enum hinic_db_state { + HINIC_DB_ENABLE = 0, + HINIC_DB_DISABLE = 1, +}; + +struct hinic_func_attr { + u16 func_idx; + u8 pf_idx; + u8 pci_intf_idx; + + enum hinic_func_type func_type; + + u8 ppf_idx; + + u16 num_irqs; + u8 num_aeqs; + u8 num_ceqs; + + u8 num_dma_attr; +}; + +struct hinic_hwif { + struct pci_dev *pdev; + void __iomem *cfg_regs_bar; + + struct hinic_func_attr attr; +}; + +static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_bar + reg)); +} + +static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, + u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); +} + +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer, u8 *lli_credit_limit, + u8 *resend_timer); + +int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); + +void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); + +enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif); + +void hinic_outbound_state_set(struct hinic_hwif *hwif, + enum hinic_outbound_state outbound_state); + +enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif); + +void hinic_db_state_set(struct hinic_hwif *hwif, + enum hinic_db_state db_state); + +int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); + +void hinic_free_hwif(struct hinic_hwif *hwif); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c new file mode 100644 index 000000000000..8e5897669a3a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -0,0 +1,533 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" +#include "hinic_hw_qp_ctxt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" + +#define CI_Q_ADDR_SIZE sizeof(u32) + +#define CI_ADDR(base_addr, q_id) ((base_addr) + \ + (q_id) * CI_Q_ADDR_SIZE) + +#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) + +#define DB_IDX(db, db_base) \ + (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) + +enum io_cmd { + IO_CMD_MODIFY_QUEUE_CTXT = 0, +}; + +static void init_db_area_idx(struct hinic_free_db_area *free_db_area) +{ + int i; + + for (i = 0; i < HINIC_DB_MAX_AREAS; i++) + free_db_area->db_idx[i] = i; + + free_db_area->alloc_pos = 0; + free_db_area->return_pos = HINIC_DB_MAX_AREAS; + + free_db_area->num_free = HINIC_DB_MAX_AREAS; + + sema_init(&free_db_area->idx_lock, 1); +} + +static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) +{ + struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; + int pos, idx; + + down(&free_db_area->idx_lock); + + free_db_area->num_free--; + + if (free_db_area->num_free < 0) { + free_db_area->num_free++; + up(&free_db_area->idx_lock); + return ERR_PTR(-ENOMEM); + } + + pos = free_db_area->alloc_pos++; + pos &= HINIC_DB_MAX_AREAS - 1; + + idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = -1; + + up(&free_db_area->idx_lock); + + return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; +} + +static void return_db_area(struct hinic_func_to_io *func_to_io, + void __iomem *db_base) +{ + struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; + int pos, idx = DB_IDX(db_base, func_to_io->db_base); + + down(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= HINIC_DB_MAX_AREAS - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + up(&free_db_area->idx_lock); +} + +static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_sqs) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct hinic_sq_ctxt_block *sq_ctxt_block; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_buf cmdq_buf; + struct hinic_sq_ctxt *sq_ctxt; + struct hinic_qp *qp; + u64 out_param; + int err, i; + + err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); + return err; + } + + sq_ctxt_block = cmdq_buf.buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, + num_sqs, func_to_io->max_qps); + for (i = 0; i < num_sqs; i++) { + qp = &func_to_io->qps[i]; + + hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, + base_qpn + qp->q_id); + } + + cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); + + err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, + IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, + &out_param); + if ((err) || (out_param != 0)) { + dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); + err = -EFAULT; + } + + hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + return err; +} + +static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_rqs) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct hinic_rq_ctxt_block *rq_ctxt_block; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_buf cmdq_buf; + struct hinic_rq_ctxt *rq_ctxt; + struct hinic_qp *qp; + u64 out_param; + int err, i; + + err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); + return err; + } + + rq_ctxt_block = cmdq_buf.buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, + num_rqs, func_to_io->max_qps); + for (i = 0; i < num_rqs; i++) { + qp = &func_to_io->qps[i]; + + hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, + base_qpn + qp->q_id); + } + + cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); + + err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, + IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, + &out_param); + if ((err) || (out_param != 0)) { + dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); + err = -EFAULT; + } + + hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + return err; +} + +/** + * write_qp_ctxts - write the qp ctxt to HW + * @func_to_io: func to io channel that holds the IO components + * @base_qpn: first qp number + * @num_qps: number of qps to write + * + * Return 0 - Success, negative - Failure + **/ +static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_qps) +{ + return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || + write_rq_ctxts(func_to_io, base_qpn, num_qps)); +} + +/** + * init_qp - Initialize a Queue Pair + * @func_to_io: func to io channel that holds the IO components + * @qp: pointer to the qp to initialize + * @q_id: the id of the qp + * @sq_msix_entry: msix entry for sq + * @rq_msix_entry: msix entry for rq + * + * Return 0 - Success, negative - Failure + **/ +static int init_qp(struct hinic_func_to_io *func_to_io, + struct hinic_qp *qp, int q_id, + struct msix_entry *sq_msix_entry, + struct msix_entry *rq_msix_entry) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + void __iomem *db_base; + int err; + + qp->q_id = q_id; + + err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], + HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, + HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); + return err; + } + + err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], + HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, + HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); + goto err_rq_alloc; + } + + db_base = get_db_area(func_to_io); + if (IS_ERR(db_base)) { + dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); + err = PTR_ERR(db_base); + goto err_get_db; + } + + func_to_io->sq_db[q_id] = db_base; + + err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], + sq_msix_entry, + CI_ADDR(func_to_io->ci_addr_base, q_id), + CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); + if (err) { + dev_err(&pdev->dev, "Failed to init SQ\n"); + goto err_sq_init; + } + + err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], + rq_msix_entry); + if (err) { + dev_err(&pdev->dev, "Failed to init RQ\n"); + goto err_rq_init; + } + + return 0; + +err_rq_init: + hinic_clean_sq(&qp->sq); + +err_sq_init: + return_db_area(func_to_io, db_base); + +err_get_db: + hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); + +err_rq_alloc: + hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); + return err; +} + +/** + * destroy_qp - Clean the resources of a Queue Pair + * @func_to_io: func to io channel that holds the IO components + * @qp: pointer to the qp to clean + **/ +static void destroy_qp(struct hinic_func_to_io *func_to_io, + struct hinic_qp *qp) +{ + int q_id = qp->q_id; + + hinic_clean_rq(&qp->rq); + hinic_clean_sq(&qp->sq); + + return_db_area(func_to_io, func_to_io->sq_db[q_id]); + + hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); + hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); +} + +/** + * hinic_io_create_qps - Create Queue Pairs + * @func_to_io: func to io channel that holds the IO components + * @base_qpn: base qp number + * @num_qps: number queue pairs to create + * @sq_msix_entry: msix entries for sq + * @rq_msix_entry: msix entries for rq + * + * Return 0 - Success, negative - Failure + **/ +int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, + u16 base_qpn, int num_qps, + struct msix_entry *sq_msix_entries, + struct msix_entry *rq_msix_entries) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t qps_size, wq_size, db_size; + void *ci_addr_base; + int i, j, err; + + qps_size = num_qps * sizeof(*func_to_io->qps); + func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); + if (!func_to_io->qps) + return -ENOMEM; + + wq_size = num_qps * sizeof(*func_to_io->sq_wq); + func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + if (!func_to_io->sq_wq) { + err = -ENOMEM; + goto err_sq_wq; + } + + wq_size = num_qps * sizeof(*func_to_io->rq_wq); + func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + if (!func_to_io->rq_wq) { + err = -ENOMEM; + goto err_rq_wq; + } + + db_size = num_qps * sizeof(*func_to_io->sq_db); + func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); + if (!func_to_io->sq_db) { + err = -ENOMEM; + goto err_sq_db; + } + + ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), + &func_to_io->ci_dma_base, + GFP_KERNEL); + if (!ci_addr_base) { + dev_err(&pdev->dev, "Failed to allocate CI area\n"); + err = -ENOMEM; + goto err_ci_base; + } + + func_to_io->ci_addr_base = ci_addr_base; + + for (i = 0; i < num_qps; i++) { + err = init_qp(func_to_io, &func_to_io->qps[i], i, + &sq_msix_entries[i], &rq_msix_entries[i]); + if (err) { + dev_err(&pdev->dev, "Failed to create QP %d\n", i); + goto err_init_qp; + } + } + + err = write_qp_ctxts(func_to_io, base_qpn, num_qps); + if (err) { + dev_err(&pdev->dev, "Failed to init QP ctxts\n"); + goto err_write_qp_ctxts; + } + + return 0; + +err_write_qp_ctxts: +err_init_qp: + for (j = 0; j < i; j++) + destroy_qp(func_to_io, &func_to_io->qps[j]); + + dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), + func_to_io->ci_addr_base, func_to_io->ci_dma_base); + +err_ci_base: + devm_kfree(&pdev->dev, func_to_io->sq_db); + +err_sq_db: + devm_kfree(&pdev->dev, func_to_io->rq_wq); + +err_rq_wq: + devm_kfree(&pdev->dev, func_to_io->sq_wq); + +err_sq_wq: + devm_kfree(&pdev->dev, func_to_io->qps); + return err; +} + +/** + * hinic_io_destroy_qps - Destroy the IO Queue Pairs + * @func_to_io: func to io channel that holds the IO components + * @num_qps: number queue pairs to destroy + **/ +void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t ci_table_size; + int i; + + ci_table_size = CI_TABLE_SIZE(num_qps); + + for (i = 0; i < num_qps; i++) + destroy_qp(func_to_io, &func_to_io->qps[i]); + + dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, + func_to_io->ci_dma_base); + + devm_kfree(&pdev->dev, func_to_io->sq_db); + + devm_kfree(&pdev->dev, func_to_io->rq_wq); + devm_kfree(&pdev->dev, func_to_io->sq_wq); + + devm_kfree(&pdev->dev, func_to_io->qps); +} + +/** + * hinic_io_init - Initialize the IO components + * @func_to_io: func to io channel that holds the IO components + * @hwif: HW interface for accessing IO + * @max_qps: maximum QPs in HW + * @num_ceqs: number completion event queues + * @ceq_msix_entries: msix entries for ceqs + * + * Return 0 - Success, negative - Failure + **/ +int hinic_io_init(struct hinic_func_to_io *func_to_io, + struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, + struct msix_entry *ceq_msix_entries) +{ + struct pci_dev *pdev = hwif->pdev; + enum hinic_cmdq_type cmdq, type; + void __iomem *db_area; + int err; + + func_to_io->hwif = hwif; + func_to_io->qps = NULL; + func_to_io->max_qps = max_qps; + + err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, + HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, + ceq_msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to init CEQs\n"); + return err; + } + + err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); + goto err_wqs_alloc; + } + + func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); + if (!func_to_io->db_base) { + dev_err(&pdev->dev, "Failed to remap IO DB area\n"); + err = -ENOMEM; + goto err_db_ioremap; + } + + init_db_area_idx(&func_to_io->free_db_area); + + for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { + db_area = get_db_area(func_to_io); + if (IS_ERR(db_area)) { + dev_err(&pdev->dev, "Failed to get cmdq db area\n"); + err = PTR_ERR(db_area); + goto err_db_area; + } + + func_to_io->cmdq_db_area[cmdq] = db_area; + } + + err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, + func_to_io->cmdq_db_area); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); + goto err_init_cmdqs; + } + + return 0; + +err_init_cmdqs: +err_db_area: + for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) + return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); + + iounmap(func_to_io->db_base); + +err_db_ioremap: + hinic_wqs_free(&func_to_io->wqs); + +err_wqs_alloc: + hinic_ceqs_free(&func_to_io->ceqs); + return err; +} + +/** + * hinic_io_free - Free the IO components + * @func_to_io: func to io channel that holds the IO components + **/ +void hinic_io_free(struct hinic_func_to_io *func_to_io) +{ + enum hinic_cmdq_type cmdq; + + hinic_free_cmdqs(&func_to_io->cmdqs); + + for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) + return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); + + iounmap(func_to_io->db_base); + hinic_wqs_free(&func_to_io->wqs); + hinic_ceqs_free(&func_to_io->ceqs); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h new file mode 100644 index 000000000000..adb64179d47d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -0,0 +1,97 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_IO_H +#define HINIC_HW_IO_H + +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" +#include "hinic_hw_qp.h" + +#define HINIC_DB_PAGE_SIZE SZ_4K +#define HINIC_DB_SIZE SZ_4M + +#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE) + +enum hinic_db_type { + HINIC_DB_CMDQ_TYPE, + HINIC_DB_SQ_TYPE, +}; + +enum hinic_io_path { + HINIC_CTRL_PATH, + HINIC_DATA_PATH, +}; + +struct hinic_free_db_area { + int db_idx[HINIC_DB_MAX_AREAS]; + + int alloc_pos; + int return_pos; + + int num_free; + + /* Lock for getting db area */ + struct semaphore idx_lock; +}; + +struct hinic_func_to_io { + struct hinic_hwif *hwif; + + struct hinic_ceqs ceqs; + + struct hinic_wqs wqs; + + struct hinic_wq *sq_wq; + struct hinic_wq *rq_wq; + + struct hinic_qp *qps; + u16 max_qps; + + void __iomem **sq_db; + void __iomem *db_base; + + void *ci_addr_base; + dma_addr_t ci_dma_base; + + struct hinic_free_db_area free_db_area; + + void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES]; + + struct hinic_cmdqs cmdqs; +}; + +int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, + u16 base_qpn, int num_qps, + struct msix_entry *sq_msix_entries, + struct msix_entry *rq_msix_entries); + +void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, + int num_qps); + +int hinic_io_init(struct hinic_func_to_io *func_to_io, + struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, + struct msix_entry *ceq_msix_entries); + +void hinic_io_free(struct hinic_func_to_io *func_to_io); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c new file mode 100644 index 000000000000..278dc13f3dae --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -0,0 +1,597 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_api_cmd.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_dev.h" + +#define SYNC_MSG_ID_MASK 0x1FF + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ + SYNC_MSG_ID_MASK)) + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) + +#define MGMT_MSG_LEN_MIN 20 +#define MGMT_MSG_LEN_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define SEGMENT_LEN 48 + +#define MAX_PF_MGMT_BUF_SIZE 2048 + +/* Data should be SEG LEN size aligned */ +#define MAX_MSG_LEN 2016 + +#define MSG_NOT_RESP 0xFFFF + +#define MGMT_MSG_TIMEOUT 1000 + +#define mgmt_to_pfhwdev(pf_mgmt) \ + container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) + +enum msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum mgmt_direction_type { + MGMT_DIRECT_SEND = 0, + MGMT_RESP = 1, +}; + +enum msg_ack_type { + MSG_ACK = 0, + MSG_NO_ACK = 1, +}; + +/** + * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that this handler will handle its messages + * @handle: private data for the callback + * @callback: the handler that will handle messages + **/ +void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, + void *handle, + void (*callback)(void *handle, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)) +{ + struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; + + mgmt_cb->cb = callback; + mgmt_cb->handle = handle; + mgmt_cb->state = HINIC_MGMT_CB_ENABLED; +} + +/** + * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that this handler handles its messages + **/ +void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod) +{ + struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; + + mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; + + while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) + schedule(); + + mgmt_cb->cb = NULL; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: ask for response + * @direction: the direction of the message + * @cmd: command of the message + * @msg_id: message id + * + * Return the prepared header value + **/ +static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, + u16 msg_len, enum hinic_mod_type mod, + enum msg_ack_type ack_type, + enum mgmt_direction_type direction, + u16 cmd, u16 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + + return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | + HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header for the message + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * + * Return the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* RSVD + HEADER_SIZE + DATA_LEN */ + u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; + + if (msg_len > MGMT_MSG_LEN_MIN) + msg_len = MGMT_MSG_LEN_MIN + + ALIGN((msg_len - MGMT_MSG_LEN_MIN), + MGMT_MSG_LEN_STEP); + else + msg_len = MGMT_MSG_LEN_MIN; + + return msg_len; +} + +/** + * send_msg_to_mgmt - send message to mgmt by API CMD + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @data: the msg data + * @data_len: the msg data length + * @ack_type: ask for response + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *data, u16 data_len, + enum msg_ack_type ack_type, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + struct hinic_api_cmd_chain *chain; + u64 header; + u16 msg_id; + + msg_id = SYNC_MSG_ID(pf_to_mgmt); + + if (direction == MGMT_RESP) { + header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, + direction, cmd, resp_msg_id); + } else { + SYNC_MSG_ID_INC(pf_to_mgmt); + header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, + direction, cmd, msg_id); + } + + prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, + pf_to_mgmt->sync_msg_buf, + mgmt_msg_len(data_len)); +} + +/** + * msg_to_mgmt_sync - send sync message to mgmt + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @buf_out: response + * @out_size: response length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *buf_in, u16 in_size, + u8 *buf_out, u16 *out_size, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_recv_msg *recv_msg; + struct completion *recv_done; + u16 msg_id; + int err; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + if (resp_msg_id == MSG_NOT_RESP) + msg_id = SYNC_MSG_ID(pf_to_mgmt); + else + msg_id = resp_msg_id; + + init_completion(recv_done); + + err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, + MSG_ACK, direction, resp_msg_id); + if (err) { + dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); + goto unlock_sync_msg; + } + + if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { + dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + + smp_rmb(); /* verify reading after completion */ + + if (recv_msg->msg_id != msg_id) { + dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); + err = -EFAULT; + goto unlock_sync_msg; + } + + if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) { + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + return err; +} + +/** + * msg_to_mgmt_async - send message to mgmt without response + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *buf_in, u16 in_size, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + int err; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, + MSG_NO_ACK, direction, resp_msg_id); + + up(&pf_to_mgmt->sync_msg_lock); + return err; +} + +/** + * hinic_msg_to_mgmt - send message to mgmt + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @buf_out: response + * @out_size: returned response length + * @sync: sync msg or async msg + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + enum hinic_mgmt_msg_type sync) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + + if (sync != HINIC_MGMT_MSG_SYNC) { + dev_err(&pdev->dev, "Invalid MGMT msg type\n"); + return -EINVAL; + } + + if (!MSG_SZ_IS_VALID(in_size)) { + dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); + return -EINVAL; + } + + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + buf_out, out_size, MGMT_DIRECT_SEND, + MSG_NOT_RESP); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + u8 *buf_out = recv_msg->buf_out; + struct hinic_mgmt_cb *mgmt_cb; + unsigned long cb_state; + u16 out_size = 0; + + if (recv_msg->mod >= HINIC_MOD_MAX) { + dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", + recv_msg->mod); + return; + } + + mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; + + cb_state = cmpxchg(&mgmt_cb->state, + HINIC_MGMT_CB_ENABLED, + HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); + + if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) + mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, + recv_msg->msg, recv_msg->msg_len, + buf_out, &out_size); + else + dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", + recv_msg->mod); + + mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; + + if (!recv_msg->async_mgmt_to_pf) + /* MGMT sent sync msg, send the response */ + msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, + buf_out, out_size, MGMT_RESP, + recv_msg->msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for a response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + wmb(); /* verify writing all, before reading */ + + complete(&recv_msg->recv_done); +} + +/** + * recv_mgmt_msg_handler - handler for a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + u64 *header, struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + int seq_id, seg_len; + u8 *msg_body; + + seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); + seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); + + if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { + dev_err(&pdev->dev, "recv big mgmt msg\n"); + return; + } + + msg_body = (u8 *)header + sizeof(*header); + memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); + + if (!HINIC_MSG_HEADER_GET(*header, LAST)) + return; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); + + if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + else + mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); +} + +/** + * mgmt_msg_aeqe_handler - handler for a mgmt message event + * @handle: PF to MGMT channel + * @data: the header of the message + * @size: unused + **/ +static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) +{ + struct hinic_pf_to_mgmt *pf_to_mgmt = handle; + struct hinic_recv_msg *recv_msg; + u64 *header = (u64 *)data; + + recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == + MGMT_DIRECT_SEND ? + &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate receive message memory + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: pointer that will hold the allocated data + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + + recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!recv_msg->buf_out) + return -ENOMEM; + + return 0; +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = alloc_recv_msg(pf_to_mgmt, + &pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(pf_to_mgmt, + &pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); + return err; + } + + pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, + MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) + return -ENOMEM; + + return 0; +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * @hwif: HW interface the PF to MGMT will use for accessing HW + * + * Return 0 - Success, negative - Failure + **/ +int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_hwif *hwif) +{ + struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct pci_dev *pdev = hwif->pdev; + int err; + + pf_to_mgmt->hwif = hwif; + + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->sync_msg_id = 0; + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); + return err; + } + + err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); + return err; + } + + hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, + pf_to_mgmt, + mgmt_msg_aeqe_handler); + return 0; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + **/ +void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) +{ + struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + + hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h new file mode 100644 index 000000000000..320711e8dee6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -0,0 +1,153 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_MGMT_H +#define HINIC_HW_MGMT_H + +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_api_cmd.h" + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_ZEROS_SHIFT 40 +#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48 +#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3 +#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +enum hinic_mgmt_msg_type { + HINIC_MGMT_MSG_SYNC = 1, +}; + +enum hinic_cfg_cmd { + HINIC_CFG_NIC_CAP = 0, +}; + +enum hinic_comm_cmd { + HINIC_COMM_CMD_IO_STATUS_GET = 0x3, + + HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, + + HINIC_COMM_CMD_HWCTXT_SET = 0x12, + HINIC_COMM_CMD_HWCTXT_GET = 0x13, + + HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14, + + HINIC_COMM_CMD_RES_STATE_SET = 0x24, + + HINIC_COMM_CMD_IO_RES_CLEAR = 0x29, + + HINIC_COMM_CMD_MAX = 0x32, +}; + +enum hinic_mgmt_cb_state { + HINIC_MGMT_CB_ENABLED = BIT(0), + HINIC_MGMT_CB_RUNNING = BIT(1), +}; + +struct hinic_recv_msg { + u8 *msg; + u8 *buf_out; + + struct completion recv_done; + + u16 cmd; + enum hinic_mod_type mod; + int async_mgmt_to_pf; + + u16 msg_len; + u16 msg_id; +}; + +struct hinic_mgmt_cb { + void (*cb)(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + + void *handle; + unsigned long state; +}; + +struct hinic_pf_to_mgmt { + struct hinic_hwif *hwif; + + struct semaphore sync_msg_lock; + u16 sync_msg_id; + u8 *sync_msg_buf; + + struct hinic_recv_msg recv_resp_msg_from_mgmt; + struct hinic_recv_msg recv_msg_from_mgmt; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; +}; + +void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, + void *handle, + void (*callback)(void *handle, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)); + +void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod); + +int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + enum hinic_mgmt_msg_type sync); + +int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_hwif *hwif); + +void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c new file mode 100644 index 000000000000..b9db6d649743 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -0,0 +1,887 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_qp_ctxt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" + +#define SQ_DB_OFF SZ_2K + +/* The number of cache line to prefetch Until threshold state */ +#define WQ_PREFETCH_MAX 2 +/* The number of cache line to prefetch After threshold state */ +#define WQ_PREFETCH_MIN 1 +/* Threshold state */ +#define WQ_PREFETCH_THRESHOLD 256 + +/* sizes of the SQ/RQ ctxt */ +#define Q_CTXT_SIZE 48 +#define CTXT_RSVD 240 + +#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) + +#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ + (max_sqs + (q_id)) * Q_CTXT_SIZE) + +#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) +#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) +#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3) + +#define SQ_DB_PI_HI_SHIFT 8 +#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT) + +#define SQ_DB_PI_LOW_MASK 0xFF +#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK) + +#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) + +#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) +#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) + +#define TX_MAX_MSS_DEFAULT 0x3E00 + +enum sq_wqe_type { + SQ_NORMAL_WQE = 0, +}; + +enum rq_completion_fmt { + RQ_COMPLETE_SGE = 1 +}; + +void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues) +{ + u16 max_sqs = max_queues; + u16 max_rqs = max_queues; + + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->queue_type = ctxt_type; + + if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) + qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0); + else + qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0); + + qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); + + hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, + struct hinic_sq *sq, u16 global_qid) +{ + u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + u16 pi_start, ci_start; + struct hinic_wq *wq; + + wq = sq->wq; + ci_start = atomic_read(&wq->cons_idx); + pi_start = atomic_read(&wq->prod_idx); + + /* Read the first page paddr from the WQ page paddr ptrs */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid, + GLOBAL_SQ_ID) | + HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN); + + sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) | + HINIC_SQ_CTXT_CI_SET(1, WRAPPED); + + sq_ctxt->wq_hi_pfn_pi = + HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI); + + sq_ctxt->wq_lo_pfn = wq_page_pfn_lo; + + sq_ctxt->pref_cache = + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_wrapped = 1; + + sq_ctxt->pref_wq_hi_pfn_ci = + HINIC_SQ_CTXT_PREF_SET(ci_start, CI) | + HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN); + + sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; + + sq_ctxt->wq_block_hi_pfn = + HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); + + sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; + + hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, + struct hinic_rq *rq, u16 global_qid) +{ + u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + u16 pi_start, ci_start; + struct hinic_wq *wq; + + wq = rq->wq; + ci_start = atomic_read(&wq->cons_idx); + pi_start = atomic_read(&wq->prod_idx); + + /* Read the first page paddr from the WQ page paddr ptrs */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) | + HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED); + + rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) | + HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); + + rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, + HI_PFN) | + HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI); + + rq_ctxt->wq_lo_pfn = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_wrapped = 1; + + rq_ctxt->pref_wq_hi_pfn_ci = + HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) | + HINIC_RQ_CTXT_PREF_SET(ci_start, CI); + + rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); + + rq_ctxt->wq_block_hi_pfn = + HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); + + rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; + + hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +/** + * alloc_sq_skb_arr - allocate sq array for saved skb + * @sq: HW Send Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_sq_skb_arr(struct hinic_sq *sq) +{ + struct hinic_wq *wq = sq->wq; + size_t skb_arr_size; + + skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); + sq->saved_skb = vzalloc(skb_arr_size); + if (!sq->saved_skb) + return -ENOMEM; + + return 0; +} + +/** + * free_sq_skb_arr - free sq array for saved skb + * @sq: HW Send Queue + **/ +static void free_sq_skb_arr(struct hinic_sq *sq) +{ + vfree(sq->saved_skb); +} + +/** + * alloc_rq_skb_arr - allocate rq array for saved skb + * @rq: HW Receive Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_rq_skb_arr(struct hinic_rq *rq) +{ + struct hinic_wq *wq = rq->wq; + size_t skb_arr_size; + + skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); + rq->saved_skb = vzalloc(skb_arr_size); + if (!rq->saved_skb) + return -ENOMEM; + + return 0; +} + +/** + * free_rq_skb_arr - free rq array for saved skb + * @rq: HW Receive Queue + **/ +static void free_rq_skb_arr(struct hinic_rq *rq) +{ + vfree(rq->saved_skb); +} + +/** + * hinic_init_sq - Initialize HW Send Queue + * @sq: HW Send Queue + * @hwif: HW Interface for accessing HW + * @wq: Work Queue for the data of the SQ + * @entry: msix entry for sq + * @ci_addr: address for reading the current HW consumer index + * @ci_dma_addr: dma address for reading the current HW consumer index + * @db_base: doorbell base address + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry, + void *ci_addr, dma_addr_t ci_dma_addr, + void __iomem *db_base) +{ + sq->hwif = hwif; + + sq->wq = wq; + + sq->irq = entry->vector; + sq->msix_entry = entry->entry; + + sq->hw_ci_addr = ci_addr; + sq->hw_ci_dma_addr = ci_dma_addr; + + sq->db_base = db_base + SQ_DB_OFF; + + return alloc_sq_skb_arr(sq); +} + +/** + * hinic_clean_sq - Clean HW Send Queue's Resources + * @sq: Send Queue + **/ +void hinic_clean_sq(struct hinic_sq *sq) +{ + free_sq_skb_arr(sq); +} + +/** + * alloc_rq_cqe - allocate rq completion queue elements + * @rq: HW Receive Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_rq_cqe(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t cqe_dma_size, cqe_size; + struct hinic_wq *wq = rq->wq; + int j, i; + + cqe_size = wq->q_depth * sizeof(*rq->cqe); + rq->cqe = vzalloc(cqe_size); + if (!rq->cqe) + return -ENOMEM; + + cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); + rq->cqe_dma = vzalloc(cqe_dma_size); + if (!rq->cqe_dma) + goto err_cqe_dma_arr_alloc; + + for (i = 0; i < wq->q_depth; i++) { + rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, + sizeof(*rq->cqe[i]), + &rq->cqe_dma[i], GFP_KERNEL); + if (!rq->cqe[i]) + goto err_cqe_alloc; + } + + return 0; + +err_cqe_alloc: + for (j = 0; j < i; j++) + dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], + rq->cqe_dma[j]); + + vfree(rq->cqe_dma); + +err_cqe_dma_arr_alloc: + vfree(rq->cqe); + return -ENOMEM; +} + +/** + * free_rq_cqe - free rq completion queue elements + * @rq: HW Receive Queue + **/ +static void free_rq_cqe(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_wq *wq = rq->wq; + int i; + + for (i = 0; i < wq->q_depth; i++) + dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], + rq->cqe_dma[i]); + + vfree(rq->cqe_dma); + vfree(rq->cqe); +} + +/** + * hinic_init_rq - Initialize HW Receive Queue + * @rq: HW Receive Queue + * @hwif: HW Interface for accessing HW + * @wq: Work Queue for the data of the RQ + * @entry: msix entry for rq + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry) +{ + struct pci_dev *pdev = hwif->pdev; + size_t pi_size; + int err; + + rq->hwif = hwif; + + rq->wq = wq; + + rq->irq = entry->vector; + rq->msix_entry = entry->entry; + + rq->buf_sz = HINIC_RX_BUF_SZ; + + err = alloc_rq_skb_arr(rq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); + return err; + } + + err = alloc_rq_cqe(rq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); + goto err_alloc_rq_cqe; + } + + /* HW requirements: Must be at least 32 bit */ + pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); + rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, + &rq->pi_dma_addr, GFP_KERNEL); + if (!rq->pi_virt_addr) { + dev_err(&pdev->dev, "Failed to allocate PI address\n"); + err = -ENOMEM; + goto err_pi_virt; + } + + return 0; + +err_pi_virt: + free_rq_cqe(rq); + +err_alloc_rq_cqe: + free_rq_skb_arr(rq); + return err; +} + +/** + * hinic_clean_rq - Clean HW Receive Queue's Resources + * @rq: HW Receive Queue + **/ +void hinic_clean_rq(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t pi_size; + + pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); + dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr, + rq->pi_dma_addr); + + free_rq_cqe(rq); + free_rq_skb_arr(rq); +} + +/** + * hinic_get_sq_free_wqebbs - return number of free wqebbs for use + * @sq: send queue + * + * Return number of free wqebbs + **/ +int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) +{ + struct hinic_wq *wq = sq->wq; + + return atomic_read(&wq->delta) - 1; +} + +/** + * hinic_get_rq_free_wqebbs - return number of free wqebbs for use + * @rq: recv queue + * + * Return number of free wqebbs + **/ +int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) +{ + struct hinic_wq *wq = rq->wq; + + return atomic_read(&wq->delta) - 1; +} + +static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, + int nr_descs) +{ + u32 ctrl_size, task_size, bufdesc_size; + + ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); + task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); + bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc); + bufdesc_size = SIZE_8BYTES(bufdesc_size); + + ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | + HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) | + HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + HINIC_SQ_CTRL_SET(ctrl_size, LEN); + + ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, + QUEUE_INFO_MSS); +} + +static void sq_prepare_task(struct hinic_sq_task *task) +{ + task->pkt_info0 = + HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | + HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | + HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, + INNER_L3TYPE) | + HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, + VLAN_OFFLOAD) | + HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); + + task->pkt_info1 = + HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | + HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | + HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); + + task->pkt_info2 = + HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | + HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | + HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, + TUNNEL_L4TYPE) | + HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, + OUTER_L3TYPE); + + task->ufo_v6_identify = 0; + + task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); + + task->zero_pad = 0; +} + +/** + * hinic_sq_prepare_wqe - prepare wqe before insert to the queue + * @sq: send queue + * @prod_idx: pi value + * @sq_wqe: wqe to prepare + * @sges: sges for use by the wqe for send for buf addresses + * @nr_sges: number of sges + **/ +void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, + int nr_sges) +{ + int i; + + sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges); + + sq_prepare_task(&sq_wqe->task); + + for (i = 0; i < nr_sges; i++) + sq_wqe->buf_descs[i].sge = sges[i]; +} + +/** + * sq_prepare_db - prepare doorbell to write + * @sq: send queue + * @prod_idx: pi value for the doorbell + * @cos: cos of the doorbell + * + * Return db value + **/ +static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) +{ + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); + + /* Data should be written to HW in Big Endian Format */ + return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) | + HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) | + HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) | + HINIC_SQ_DB_INFO_SET(cos, COS) | + HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); +} + +/** + * hinic_sq_write_db- write doorbell + * @sq: send queue + * @prod_idx: pi value for the doorbell + * @wqe_size: wqe size + * @cos: cos of the wqe + **/ +void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, + unsigned int cos) +{ + struct hinic_wq *wq = sq->wq; + + /* increment prod_idx to the next */ + prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + wmb(); /* Write all before the doorbell */ + + writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); +} + +/** + * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi + * @sq: sq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, + unsigned int wqe_size, u16 *prod_idx) +{ + struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, + prod_idx); + + if (IS_ERR(hw_wqe)) + return NULL; + + return &hw_wqe->sq_wqe; +} + +/** + * hinic_sq_write_wqe - write the wqe to the sq + * @sq: send queue + * @prod_idx: pi of the wqe + * @sq_wqe: the wqe to write + * @skb: skb to save + * @wqe_size: the size of the wqe + **/ +void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *sq_wqe, + struct sk_buff *skb, unsigned int wqe_size) +{ + struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe; + + sq->saved_skb[prod_idx] = skb; + + /* The data in the HW should be in Big Endian Format */ + hinic_cpu_to_be32(sq_wqe, wqe_size); + + hinic_write_wqe(sq->wq, hw_wqe, wqe_size); +} + +/** + * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci + * @sq: send queue + * @skb: return skb that was saved + * @wqe_size: the size of the wqe + * @cons_idx: consumer index of the wqe + * + * Return wqe in ci position + **/ +struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx) +{ + struct hinic_hw_wqe *hw_wqe; + struct hinic_sq_wqe *sq_wqe; + struct hinic_sq_ctrl *ctrl; + unsigned int buf_sect_len; + u32 ctrl_info; + + /* read the ctrl section for getting wqe size */ + hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); + if (IS_ERR(hw_wqe)) + return NULL; + + sq_wqe = &hw_wqe->sq_wqe; + ctrl = &sq_wqe->ctrl; + ctrl_info = be32_to_cpu(ctrl->ctrl_info); + buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN); + + *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); + *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); + + *skb = sq->saved_skb[*cons_idx]; + + /* using the real wqe size to read wqe again */ + hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx); + + return &hw_wqe->sq_wqe; +} + +/** + * hinic_sq_put_wqe - release the ci for new wqes + * @sq: send queue + * @wqe_size: the size of the wqe + **/ +void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) +{ + hinic_put_wqe(sq->wq, wqe_size); +} + +/** + * hinic_sq_get_sges - get sges from the wqe + * @sq_wqe: wqe to get the sges from its buffer addresses + * @sges: returned sges + * @nr_sges: number sges to return + **/ +void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, + int nr_sges) +{ + int i; + + for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) { + sges[i] = sq_wqe->buf_descs[i].sge; + hinic_be32_to_cpu(&sges[i], sizeof(sges[i])); + } +} + +/** + * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi + * @rq: rq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, + unsigned int wqe_size, u16 *prod_idx) +{ + struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, + prod_idx); + + if (IS_ERR(hw_wqe)) + return NULL; + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_rq_write_wqe - write the wqe to the rq + * @rq: recv queue + * @prod_idx: pi of the wqe + * @rq_wqe: the wqe to write + * @skb: skb to save + **/ +void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) +{ + struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe; + + rq->saved_skb[prod_idx] = skb; + + /* The data in the HW should be in Big Endian Format */ + hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe)); + + hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); +} + +/** + * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci + * @rq: recv queue + * @wqe_size: the size of the wqe + * @skb: return saved skb + * @cons_idx: consumer index of the wqe + * + * Return wqe in ci position + **/ +struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, u16 *cons_idx) +{ + struct hinic_hw_wqe *hw_wqe; + struct hinic_rq_cqe *cqe; + int rx_done; + u32 status; + + hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); + if (IS_ERR(hw_wqe)) + return NULL; + + cqe = rq->cqe[*cons_idx]; + + status = be32_to_cpu(cqe->status); + + rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); + if (!rx_done) + return NULL; + + *skb = rq->saved_skb[*cons_idx]; + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position + * @rq: recv queue + * @wqe_size: the size of the wqe + * @skb: return saved skb + * @cons_idx: consumer index in the wq + * + * Return wqe in incremented ci position + **/ +struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, + u16 *cons_idx) +{ + struct hinic_wq *wq = rq->wq; + struct hinic_hw_wqe *hw_wqe; + unsigned int num_wqebbs; + + wqe_size = ALIGN(wqe_size, wq->wqebb_size); + num_wqebbs = wqe_size / wq->wqebb_size; + + *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs); + + *skb = rq->saved_skb[*cons_idx]; + + hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_put_wqe - release the ci for new wqes + * @rq: recv queue + * @cons_idx: consumer index of the wqe + * @wqe_size: the size of the wqe + **/ +void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, + unsigned int wqe_size) +{ + struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; + u32 status = be32_to_cpu(cqe->status); + + status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE); + + /* Rx WQE size is 1 WQEBB, no wq shadow*/ + cqe->status = cpu_to_be32(status); + + wmb(); /* clear done flag */ + + hinic_put_wqe(rq->wq, wqe_size); +} + +/** + * hinic_rq_get_sge - get sge from the wqe + * @rq: recv queue + * @rq_wqe: wqe to get the sge from its buf address + * @cons_idx: consumer index + * @sge: returned sge + **/ +void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, + u16 cons_idx, struct hinic_sge *sge) +{ + struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; + u32 len = be32_to_cpu(cqe->len); + + sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr); + sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr); + sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN); +} + +/** + * hinic_rq_prepare_wqe - prepare wqe before insert to the queue + * @rq: recv queue + * @prod_idx: pi value + * @rq_wqe: the wqe + * @sge: sge for use by the wqe for recv buf address + **/ +void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) +{ + struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; + struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; + struct hinic_rq_cqe *cqe = rq->cqe[prod_idx]; + struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; + dma_addr_t cqe_dma = rq->cqe_dma[prod_idx]; + + ctrl->ctrl_info = + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), + COMPLETE_LEN) | + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), + BUFDESC_SECT_LEN) | + HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); + + hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe)); + + buf_desc->hi_addr = sge->hi_addr; + buf_desc->lo_addr = sge->lo_addr; +} + +/** + * hinic_rq_update - update pi of the rq + * @rq: recv queue + * @prod_idx: pi value + **/ +void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) +{ + *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1)); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h new file mode 100644 index 000000000000..df729a1587e9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -0,0 +1,201 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_QP_H +#define HINIC_HW_QP_H + +#include +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_qp_ctxt.h" + +#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0 +#define HINIC_SQ_DB_INFO_QID_SHIFT 8 +#define HINIC_SQ_DB_INFO_PATH_SHIFT 23 +#define HINIC_SQ_DB_INFO_COS_SHIFT 24 +#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27 + +#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF +#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF +#define HINIC_SQ_DB_INFO_PATH_MASK 0x1 +#define HINIC_SQ_DB_INFO_COS_MASK 0x7 +#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F + +#define HINIC_SQ_DB_INFO_SET(val, member) \ + (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \ + << HINIC_SQ_DB_INFO_##member##_SHIFT) + +#define HINIC_SQ_WQEBB_SIZE 64 +#define HINIC_RQ_WQEBB_SIZE 32 + +#define HINIC_SQ_PAGE_SIZE SZ_4K +#define HINIC_RQ_PAGE_SIZE SZ_4K + +#define HINIC_SQ_DEPTH SZ_4K +#define HINIC_RQ_DEPTH SZ_4K + +/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */ +#define HINIC_RX_BUF_SZ 2048 +#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX + +#define HINIC_MIN_TX_WQE_SIZE(wq) \ + ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size) + +#define HINIC_MIN_TX_NUM_WQEBBS(sq) \ + (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) + +enum hinic_rx_buf_sz_idx { + HINIC_RX_BUF_SZ_32_IDX, + HINIC_RX_BUF_SZ_64_IDX, + HINIC_RX_BUF_SZ_96_IDX, + HINIC_RX_BUF_SZ_128_IDX, + HINIC_RX_BUF_SZ_192_IDX, + HINIC_RX_BUF_SZ_256_IDX, + HINIC_RX_BUF_SZ_384_IDX, + HINIC_RX_BUF_SZ_512_IDX, + HINIC_RX_BUF_SZ_768_IDX, + HINIC_RX_BUF_SZ_1024_IDX, + HINIC_RX_BUF_SZ_1536_IDX, + HINIC_RX_BUF_SZ_2048_IDX, + HINIC_RX_BUF_SZ_3072_IDX, + HINIC_RX_BUF_SZ_4096_IDX, + HINIC_RX_BUF_SZ_8192_IDX, + HINIC_RX_BUF_SZ_16384_IDX, +}; + +struct hinic_sq { + struct hinic_hwif *hwif; + + struct hinic_wq *wq; + + u32 irq; + u16 msix_entry; + + void *hw_ci_addr; + dma_addr_t hw_ci_dma_addr; + + void __iomem *db_base; + + struct sk_buff **saved_skb; +}; + +struct hinic_rq { + struct hinic_hwif *hwif; + + struct hinic_wq *wq; + + u32 irq; + u16 msix_entry; + + size_t buf_sz; + + struct sk_buff **saved_skb; + + struct hinic_rq_cqe **cqe; + dma_addr_t *cqe_dma; + + u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; +}; + +struct hinic_qp { + struct hinic_sq sq; + struct hinic_rq rq; + + u16 q_id; +}; + +void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues); + +void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, + struct hinic_sq *sq, u16 global_qid); + +void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, + struct hinic_rq *rq, u16 global_qid); + +int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, + dma_addr_t ci_dma_addr, void __iomem *db_base); + +void hinic_clean_sq(struct hinic_sq *sq); + +int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry); + +void hinic_clean_rq(struct hinic_rq *rq); + +int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); + +int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); + +void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *wqe, struct hinic_sge *sges, + int nr_sges); + +void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, + unsigned int cos); + +struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, + unsigned int wqe_size, u16 *prod_idx); + +void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *wqe, struct sk_buff *skb, + unsigned int wqe_size); + +struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx); + +void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); + +void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges, + int nr_sges); + +struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, + unsigned int wqe_size, u16 *prod_idx); + +void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *wqe, struct sk_buff *skb); + +struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, u16 *cons_idx); + +struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, + u16 *cons_idx); + +void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, + unsigned int wqe_size); + +void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe, + u16 cons_idx, struct hinic_sge *sge); + +void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *wqe, struct hinic_sge *sge); + +void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h new file mode 100644 index 000000000000..376abf00762b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h @@ -0,0 +1,214 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_QP_CTXT_H +#define HINIC_HW_QP_CTXT_H + +#include + +#include "hinic_hw_cmdq.h" + +#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 +#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 + +#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF +#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1 + +#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11 +#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23 + +#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF +#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1 + +#define HINIC_SQ_CTXT_CI_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \ + << HINIC_SQ_CTXT_CI_##member##_SHIFT) + +#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20 + +#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF +#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF + +#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \ + << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF +#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF +#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F + +#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 +#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20 + +#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF +#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF + +#define HINIC_SQ_CTXT_PREF_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \ + << HINIC_SQ_CTXT_PREF_##member##_SHIFT) + +#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 + +#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF + +#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \ + << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 +#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1 + +#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1 +#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1 + +#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0 +#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22 + +#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF +#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF + +#define HINIC_RQ_CTXT_PI_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \ + HINIC_RQ_CTXT_PI_##member##_SHIFT) + +#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20 + +#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF +#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF + +#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF +#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF +#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F + +#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 +#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20 + +#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF +#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF + +#define HINIC_RQ_CTXT_PREF_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \ + HINIC_RQ_CTXT_PREF_##member##_SHIFT) + +#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 + +#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF + +#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \ + + (num_sqs) * sizeof(struct hinic_sq_ctxt)) + +#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \ + + (num_rqs) * sizeof(struct hinic_rq_ctxt)) + +#define HINIC_WQ_PAGE_PFN_SHIFT 12 +#define HINIC_WQ_BLOCK_PFN_SHIFT 9 + +#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT) +#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \ + HINIC_WQ_BLOCK_PFN_SHIFT) + +#define HINIC_Q_CTXT_MAX \ + ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \ + / sizeof(struct hinic_sq_ctxt)) + +enum hinic_qp_ctxt_type { + HINIC_QP_CTXT_TYPE_SQ, + HINIC_QP_CTXT_TYPE_RQ +}; + +struct hinic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +struct hinic_sq_ctxt { + u32 ceq_attr; + + u32 ci_wrapped; + + u32 wq_hi_pfn_pi; + u32 wq_lo_pfn; + + u32 pref_cache; + u32 pref_wrapped; + u32 pref_wq_hi_pfn_ci; + u32 pref_wq_lo_pfn; + + u32 rsvd0; + u32 rsvd1; + + u32 wq_block_hi_pfn; + u32 wq_block_lo_pfn; +}; + +struct hinic_rq_ctxt { + u32 ceq_attr; + + u32 pi_intr_attr; + + u32 wq_hi_pfn_ci; + u32 wq_lo_pfn; + + u32 pref_cache; + u32 pref_wrapped; + + u32 pref_wq_hi_pfn_ci; + u32 pref_wq_lo_pfn; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + + u32 wq_block_hi_pfn; + u32 wq_block_lo_pfn; +}; + +struct hinic_sq_ctxt_block { + struct hinic_qp_ctxt_header hdr; + struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_rq_ctxt_block { + struct hinic_qp_ctxt_header hdr; + struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c new file mode 100644 index 000000000000..3e3181c089bd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -0,0 +1,878 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" + +#define WQS_BLOCKS_PER_PAGE 4 + +#define WQ_BLOCK_SIZE 4096 +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) + +#define WQS_MAX_NUM_BLOCKS 128 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) + +#define CMDQ_BLOCK_SIZE 512 +#define CMDQ_PAGE_SIZE 4096 + +#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) + +#define WQ_BASE_VADDR(wqs, wq) \ + ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) \ + ((wqs)->page_paddr[(wq)->page_idx] \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + ((void *)((cmdq_pages)->page_vaddr) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + ((cmdq_pages)->page_paddr \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + ((void *)((cmdq_pages)->shadow_page_vaddr) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \ + (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \ + & ((wq)->num_q_pages - 1)) + +#define WQ_PAGE_ADDR(wq, idx) \ + ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQE_IN_RANGE(wqe, start, end) \ + (((unsigned long)(wqe) >= (unsigned long)(start)) && \ + ((unsigned long)(wqe) < (unsigned long)(end))) + +#define WQE_SHADOW_PAGE(wq, wqe) \ + (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ + / (wq)->max_wqe_size) + +/** + * queue_alloc_page - allocate page for Queue + * @hwif: HW interface for allocating DMA + * @vaddr: virtual address will be returned in this address + * @paddr: physical address will be returned in this address + * @shadow_vaddr: VM area will be return here for holding WQ page addresses + * @page_sz: page size of each WQ page + * + * Return 0 - Success, negative - Failure + **/ +static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, + void ***shadow_vaddr, size_t page_sz) +{ + struct pci_dev *pdev = hwif->pdev; + dma_addr_t dma_addr; + + *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); + return -ENOMEM; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) + goto err_shadow_vaddr; + + return 0; + +err_shadow_vaddr: + dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +/** + * wqs_allocate_page - allocate page for WQ set + * @wqs: Work Queue Set + * @page_idx: the page index of the page will be allocated + * + * Return 0 - Success, negative - Failure + **/ +static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) +{ + return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +/** + * wqs_free_page - free page of WQ set + * @wqs: Work Queue Set + * @page_idx: the page index of the page will be freed + **/ +static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +/** + * cmdq_allocate_page - allocate page for cmdq + * @cmdq_pages: the pages of the cmdq queue struct to hold the page + * + * Return 0 - Success, negative - Failure + **/ +static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, + &cmdq_pages->page_paddr, + &cmdq_pages->shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +/** + * cmdq_free_page - free page from cmdq + * @cmdq_pages: the pages of the cmdq queue struct that hold the page + * + * Return 0 - Success, negative - Failure + **/ +static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) +{ + struct hinic_hwif *hwif = cmdq_pages->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, + cmdq_pages->page_vaddr, + (dma_addr_t)cmdq_pages->page_paddr); + vfree(cmdq_pages->shadow_page_vaddr); +} + +static int alloc_page_arrays(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t size; + + size = wqs->num_pages * sizeof(*wqs->page_paddr); + wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto err_page_vaddr; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto err_page_shadow_vaddr; + + return 0; + +err_page_shadow_vaddr: + devm_kfree(&pdev->dev, wqs->page_vaddr); + +err_page_vaddr: + devm_kfree(&pdev->dev, wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_arrays(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + devm_kfree(&pdev->dev, wqs->shadow_page_vaddr); + devm_kfree(&pdev->dev, wqs->page_vaddr); + devm_kfree(&pdev->dev, wqs->page_paddr); +} + +static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx, + int *block_idx) +{ + int pos; + + down(&wqs->alloc_blocks_lock); + + wqs->num_free_blks--; + + if (wqs->num_free_blks < 0) { + wqs->num_free_blks++; + up(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = -1; + wqs->free_blocks[pos].block_idx = -1; + + up(&wqs->alloc_blocks_lock); + return 0; +} + +static void wqs_return_block(struct hinic_wqs *wqs, int page_idx, + int block_idx) +{ + int pos; + + down(&wqs->alloc_blocks_lock); + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + wqs->num_free_blks++; + + up(&wqs->alloc_blocks_lock); +} + +static void init_wqs_blocks_arr(struct hinic_wqs *wqs) +{ + int page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = pos; + wqs->num_free_blks = pos; + + sema_init(&wqs->alloc_blocks_lock, 1); +} + +/** + * hinic_wqs_alloc - allocate Work Queues set + * @wqs: Work Queue Set + * @max_wqs: maximum wqs to allocate + * @hwif: HW interface for use for the allocation + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, + struct hinic_hwif *hwif) +{ + struct pci_dev *pdev = hwif->pdev; + int err, i, page_idx; + + max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE); + if (max_wqs > WQS_MAX_NUM_BLOCKS) { + dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs); + return -EINVAL; + } + + wqs->hwif = hwif; + wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE; + + if (alloc_page_arrays(wqs)) { + dev_err(&pdev->dev, + "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + dev_err(&pdev->dev, "Failed wq page allocation\n"); + goto err_wq_allocate_page; + } + } + + wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs), + GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto err_alloc_blocks; + } + + init_wqs_blocks_arr(wqs); + return 0; + +err_alloc_blocks: +err_wq_allocate_page: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_arrays(wqs); + return err; +} + +/** + * hinic_wqs_free - free Work Queues set + * @wqs: Work Queue Set + **/ +void hinic_wqs_free(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + int page_idx; + + devm_kfree(&pdev->dev, wqs->free_blocks); + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_arrays(wqs); +} + +/** + * alloc_wqes_shadow - allocate WQE shadows for WQ + * @wq: WQ to allocate shadows for + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_wqes_shadow(struct hinic_wq *wq) +{ + struct hinic_hwif *hwif = wq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t size; + + size = wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wq->shadow_wqe) + return -ENOMEM; + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wq->shadow_idx) + goto err_shadow_idx; + + return 0; + +err_shadow_idx: + devm_kfree(&pdev->dev, wq->shadow_wqe); + return -ENOMEM; +} + +/** + * free_wqes_shadow - free WQE shadows of WQ + * @wq: WQ to free shadows from + **/ +static void free_wqes_shadow(struct hinic_wq *wq) +{ + struct hinic_hwif *hwif = wq->hwif; + struct pci_dev *pdev = hwif->pdev; + + devm_kfree(&pdev->dev, wq->shadow_idx); + devm_kfree(&pdev->dev, wq->shadow_wqe); +} + +/** + * free_wq_pages - free pages of WQ + * @hwif: HW interface for releasing dma addresses + * @wq: WQ to free pages from + * @num_q_pages: number pages to free + **/ +static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, + int num_q_pages) +{ + struct pci_dev *pdev = hwif->pdev; + int i; + + for (i = 0; i < num_q_pages; i++) { + void **vaddr = &wq->shadow_block_vaddr[i]; + u64 *paddr = &wq->block_vaddr[i]; + dma_addr_t dma_addr; + + dma_addr = (dma_addr_t)be64_to_cpu(*paddr); + dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, + dma_addr); + } + + free_wqes_shadow(wq); +} + +/** + * alloc_wq_pages - alloc pages for WQ + * @hwif: HW interface for allocating dma addresses + * @wq: WQ to allocate pages for + * @max_pages: maximum pages allowed + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, + int max_pages) +{ + struct pci_dev *pdev = hwif->pdev; + int i, err, num_q_pages; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > max_pages) { + dev_err(&pdev->dev, "Number wq pages exceeds the limit\n"); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + dev_err(&pdev->dev, "Number wq pages must be power of 2\n"); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate wqe shadow\n"); + return err; + } + + for (i = 0; i < num_q_pages; i++) { + void **vaddr = &wq->shadow_block_vaddr[i]; + u64 *paddr = &wq->block_vaddr[i]; + dma_addr_t dma_addr; + + *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, + &dma_addr, GFP_KERNEL); + if (!*vaddr) { + dev_err(&pdev->dev, "Failed to allocate wq page\n"); + goto err_alloc_wq_pages; + } + + /* HW uses Big Endian Format */ + *paddr = cpu_to_be64(dma_addr); + } + + return 0; + +err_alloc_wq_pages: + free_wq_pages(wq, hwif, i); + return -ENOMEM; +} + +/** + * hinic_wq_allocate - Allocate the WQ resources from the WQS + * @wqs: WQ set from which to allocate the WQ resources + * @wq: WQ to allocate resources for it from the WQ set + * @wqebb_size: Work Queue Block Byte Size + * @wq_page_size: the page size in the Work Queue + * @q_depth: number of wqebbs in WQ + * @max_wqe_size: maximum WQE size that will be used in the WQ + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u16 wqebb_size, u16 wq_page_size, u16 q_depth, + u16 max_wqe_size) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + return -EINVAL; + } + + if (wq_page_size == 0) { + dev_err(&pdev->dev, "wq_page_size must be > 0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); + return -EINVAL; + } + + wq->hwif = hwif; + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + dev_err(&pdev->dev, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); + if (err) { + dev_err(&pdev->dev, "Failed to allocate wq pages\n"); + goto err_alloc_wq_pages; + } + + atomic_set(&wq->cons_idx, 0); + atomic_set(&wq->prod_idx, 0); + atomic_set(&wq->delta, q_depth); + wq->mask = q_depth - 1; + + return 0; + +err_alloc_wq_pages: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +/** + * hinic_wq_free - Free the WQ resources to the WQS + * @wqs: WQ set to free the WQ resources to it + * @wq: WQ to free its resources to the WQ set resources + **/ +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) +{ + free_wq_pages(wq, wqs->hwif, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} + +/** + * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs + * @cmdq_pages: will hold the pages of the cmdq + * @wq: returned wqs + * @hwif: HW interface + * @cmdq_blocks: number of cmdq blocks/wq to allocate + * @wqebb_size: Work Queue Block Byte Size + * @wq_page_size: the page size in the Work Queue + * @q_depth: number of wqebbs in WQ + * @max_wqe_size: maximum WQE size that will be used in the WQ + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size) +{ + struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page; + int i, j, err = -ENOMEM; + + if (wqebb_size == 0) { + dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + return -EINVAL; + } + + if (wq_page_size == 0) { + dev_err(&pdev->dev, "wq_page_size must be > 0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); + return -EINVAL; + } + + cmdq_pages->hwif = hwif; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].hwif = hwif; + wq[i].page_idx = 0; + wq[i].block_idx = i; + + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = num_wqebbs_per_page; + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, + CMDQ_WQ_MAX_PAGES); + if (err) { + dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); + goto err_cmdq_block; + } + + atomic_set(&wq[i].cons_idx, 0); + atomic_set(&wq[i].prod_idx, 0); + atomic_set(&wq[i].delta, q_depth); + wq[i].mask = q_depth - 1; + } + + return 0; + +err_cmdq_block: + for (j = 0; j < i; j++) + free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +/** + * hinic_wqs_cmdq_free - Free wqs from cmdqs + * @cmdq_pages: hold the pages of the cmdq + * @wq: wqs to free + * @cmdq_blocks: number of wqs to free + **/ +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} + +static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 idx) +{ + void *wqebb_addr; + int i; + + for (i = 0; i < num_wqebbs; i++, idx++) { + idx = MASKED_WQE_IDX(wq, idx); + wqebb_addr = WQ_PAGE_ADDR(wq, idx) + + WQE_PAGE_OFF(wq, idx); + + memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); + + shadow_addr += wq->wqebb_size; + } +} + +static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 idx) +{ + void *wqebb_addr; + int i; + + for (i = 0; i < num_wqebbs; i++, idx++) { + idx = MASKED_WQE_IDX(wq, idx); + wqebb_addr = WQ_PAGE_ADDR(wq, idx) + + WQE_PAGE_OFF(wq, idx); + + memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); + shadow_addr += wq->wqebb_size; + } +} + +/** + * hinic_get_wqe - get wqe ptr in the current pi and update the pi + * @wq: wq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *prod_idx) +{ + int curr_pg, end_pg, num_wqebbs; + u16 curr_prod_idx, end_prod_idx; + + *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); + + num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { + atomic_add(num_wqebbs, &wq->delta); + return ERR_PTR(-EBUSY); + } + + end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); + + end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); + curr_prod_idx = end_prod_idx - num_wqebbs; + curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* end prod index points to the next wqebb, therefore minus 1 */ + end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = curr_prod_idx; + + if (curr_pg != end_pg) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +/** + * hinic_put_wqe - return the wqe place to use for a new wqe + * @wq: wq to return wqe + * @wqe_size: wqe size + **/ +void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) +{ + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + atomic_add(num_wqebbs, &wq->cons_idx); + + atomic_add(num_wqebbs, &wq->delta); +} + +/** + * hinic_read_wqe - read wqe ptr in the current ci + * @wq: wq to get read from + * @wqe_size: wqe size + * @cons_idx: returned ci + * + * Return wqe pointer + **/ +struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *cons_idx) +{ + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + u16 curr_cons_idx, end_cons_idx; + int curr_pg, end_pg; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return ERR_PTR(-EBUSY); + + curr_cons_idx = atomic_read(&wq->cons_idx); + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} + +/** + * hinic_read_wqe_direct - read wqe directly from ci position + * @wq: wq + * @cons_idx: ci position + * + * Return wqe + **/ +struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) +{ + return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); +} + +/** + * wqe_shadow - check if a wqe is shadow + * @wq: wq of the wqe + * @wqe: the wqe for shadow checking + * + * Return true - shadow, false - Not shadow + **/ +static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) +{ + size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; + + return WQE_IN_RANGE(wqe, wq->shadow_wqe, + &wq->shadow_wqe[wqe_shadow_size]); +} + +/** + * hinic_write_wqe - write the wqe to the wq + * @wq: wq to write wqe to + * @wqe: wqe to write + * @wqe_size: wqe size + **/ +void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, + unsigned int wqe_size) +{ + int curr_pg, num_wqebbs; + void *shadow_addr; + u16 prod_idx; + + if (wqe_shadow(wq, wqe)) { + curr_pg = WQE_SHADOW_PAGE(wq, wqe); + + prod_idx = wq->shadow_idx[curr_pg]; + num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h new file mode 100644 index 000000000000..9c030a0f035e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -0,0 +1,117 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_WQ_H +#define HINIC_HW_WQ_H + +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" + +struct hinic_free_block { + int page_idx; + int block_idx; +}; + +struct hinic_wq { + struct hinic_hwif *hwif; + + int page_idx; + int block_idx; + + u16 wqebb_size; + u16 wq_page_size; + u16 q_depth; + u16 max_wqe_size; + u16 num_wqebbs_per_page; + + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + void **shadow_block_vaddr; + u64 *block_vaddr; + + int num_q_pages; + u8 *shadow_wqe; + u16 *shadow_idx; + + atomic_t cons_idx; + atomic_t prod_idx; + atomic_t delta; + u16 mask; +}; + +struct hinic_wqs { + struct hinic_hwif *hwif; + int num_pages; + + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + void ***shadow_page_vaddr; + + struct hinic_free_block *free_blocks; + int alloc_blk_pos; + int return_blk_pos; + int num_free_blks; + + /* Lock for getting a free block from the WQ set */ + struct semaphore alloc_blocks_lock; +}; + +struct hinic_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 page_paddr; + u64 *page_vaddr; + void **shadow_page_vaddr; + + struct hinic_hwif *hwif; +}; + +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size); + +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks); + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, + struct hinic_hwif *hwif); + +void hinic_wqs_free(struct hinic_wqs *wqs); + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u16 wqebb_size, u16 wq_page_size, u16 q_depth, + u16 max_wqe_size); + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); + +struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *prod_idx); + +void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); + +struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *cons_idx); + +struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx); + +void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, + unsigned int wqe_size); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h new file mode 100644 index 000000000000..bc73485483c5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h @@ -0,0 +1,368 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_WQE_H +#define HINIC_HW_WQE_H + +#include "hinic_common.h" + +#define HINIC_CMDQ_CTRL_PI_SHIFT 0 +#define HINIC_CMDQ_CTRL_CMD_SHIFT 16 +#define HINIC_CMDQ_CTRL_MOD_SHIFT 24 +#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF +#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF +#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F +#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3 +#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1 + +#define HINIC_CMDQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \ + << HINIC_CMDQ_CTRL_##member##_SHIFT) + +#define HINIC_CMDQ_CTRL_GET(val, member) \ + (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \ + & HINIC_CMDQ_CTRL_##member##_MASK) + +#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31 + +#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3 +#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3 +#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1 + +#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \ + << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) + +#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ + & HINIC_CMDQ_WQE_HEADER_##member##_MASK) + +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define HINIC_SQ_CTRL_LEN_SHIFT 29 + +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF +#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F +#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 +#define HINIC_SQ_CTRL_LEN_MASK 0x3 + +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 + +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF + +#define HINIC_SQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ + << HINIC_SQ_CTRL_##member##_SHIFT) + +#define HINIC_SQ_CTRL_GET(val, member) \ + (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ + & HINIC_SQ_CTRL_##member##_MASK) + +#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 +#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 +#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 +#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 +#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15 +#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 + +#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3 +#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3 +#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1 +#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1 +#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF + +#define HINIC_SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \ + HINIC_SQ_TASK_INFO0_##member##_SHIFT) + +/* 8 bits reserved */ +#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 +#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16 +#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24 + +/* 8 bits reserved */ +#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF + +#define HINIC_SQ_TASK_INFO1_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ + HINIC_SQ_TASK_INFO1_##member##_SHIFT) + +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0 +#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12 +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22 +/* 8 bits reserved */ + +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF +#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 +/* 8 bits reserved */ + +#define HINIC_SQ_TASK_INFO2_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \ + HINIC_SQ_TASK_INFO2_##member##_SHIFT) + +/* 31 bits reserved */ +#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31 + +/* 31 bits reserved */ +#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1 + +#define HINIC_SQ_TASK_INFO4_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \ + HINIC_SQ_TASK_INFO4_##member##_SHIFT) + +#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 + +#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1 + +#define HINIC_RQ_CQE_STATUS_GET(val, member) \ + (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \ + HINIC_RQ_CQE_STATUS_##member##_MASK) + +#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \ + ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \ + HINIC_RQ_CQE_STATUS_##member##_SHIFT))) + +#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16 + +#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF + +#define HINIC_RQ_CQE_SGE_GET(val, member) \ + (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \ + HINIC_RQ_CQE_SGE_##member##_MASK) + +#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 +#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27 +#define HINIC_RQ_CTRL_LEN_SHIFT 29 + +#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF +#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1 +#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3 +#define HINIC_RQ_CTRL_LEN_MASK 0x3 + +#define HINIC_RQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \ + HINIC_RQ_CTRL_##member##_SHIFT) + +#define HINIC_SQ_WQE_SIZE(nr_sges) \ + (sizeof(struct hinic_sq_ctrl) + \ + sizeof(struct hinic_sq_task) + \ + (nr_sges) * sizeof(struct hinic_sq_bufdesc)) + +#define HINIC_SCMD_DATA_LEN 16 + +#define HINIC_MAX_SQ_BUFDESCS 17 + +#define HINIC_SQ_WQE_MAX_SIZE 320 +#define HINIC_RQ_WQE_SIZE 32 + +enum hinic_l4offload_type { + HINIC_L4_OFF_DISABLE = 0, + HINIC_TCP_OFFLOAD_ENABLE = 1, + HINIC_SCTP_OFFLOAD_ENABLE = 2, + HINIC_UDP_OFFLOAD_ENABLE = 3, +}; + +enum hinic_vlan_offload { + HINIC_VLAN_OFF_DISABLE = 0, + HINIC_VLAN_OFF_ENABLE = 1, +}; + +enum hinic_pkt_parsed { + HINIC_PKT_NOT_PARSED = 0, + HINIC_PKT_PARSED = 1, +}; + +enum hinic_outer_l3type { + HINIC_OUTER_L3TYPE_UNKNOWN = 0, + HINIC_OUTER_L3TYPE_IPV6 = 1, + HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2, + HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3, +}; + +enum hinic_media_type { + HINIC_MEDIA_UNKNOWN = 0, +}; + +enum hinic_l2type { + HINIC_L2TYPE_ETH = 0, +}; + +enum hinc_tunnel_l4type { + HINIC_TUNNEL_L4TYPE_UNKNOWN = 0, +}; + +struct hinic_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic_status { + u32 status_info; +}; + +struct hinic_ctrl { + u32 ctrl_info; +}; + +struct hinic_sge_resp { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_cmdq_completion { + /* HW Format */ + union { + struct hinic_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC_SCMD_DATA_LEN]; +}; + +struct hinic_lcmd_bufdesc { + struct hinic_sge sge; + u32 rsvd1; + u64 rsvd2; + u64 rsvd3; +}; + +struct hinic_cmdq_wqe_scmd { + struct hinic_cmdq_header header; + u64 rsvd; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_scmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_wqe_lcmd { + struct hinic_cmdq_header header; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_lcmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_direct_wqe { + struct hinic_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic_cmdq_wqe { + /* HW Format */ + union { + struct hinic_cmdq_direct_wqe direct_wqe; + struct hinic_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hinic_sq_ctrl { + u32 ctrl_info; + u32 queue_info; +}; + +struct hinic_sq_task { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 ufo_v6_identify; + u32 pkt_info4; + u32 zero_pad; +}; + +struct hinic_sq_bufdesc { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_sq_wqe { + struct hinic_sq_ctrl ctrl; + struct hinic_sq_task task; + struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; +}; + +struct hinic_rq_cqe { + u32 status; + u32 len; + + u32 rsvd2; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 rsvd7; +}; + +struct hinic_rq_ctrl { + u32 ctrl_info; +}; + +struct hinic_rq_cqe_sect { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_rq_bufdesc { + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic_rq_wqe { + struct hinic_rq_ctrl ctrl; + u32 rsvd; + struct hinic_rq_cqe_sect cqe_sect; + struct hinic_rq_bufdesc buf_desc; +}; + +struct hinic_hw_wqe { + /* HW Format */ + union { + struct hinic_cmdq_wqe cmdq_wqe; + struct hinic_sq_wqe sq_wqe; + struct hinic_rq_wqe rq_wqe; + }; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c new file mode 100644 index 000000000000..eb53bd93065e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -0,0 +1,1112 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_qp.h" +#include "hinic_hw_dev.h" +#include "hinic_port.h" +#include "hinic_tx.h" +#include "hinic_rx.h" +#include "hinic_dev.h" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); +MODULE_LICENSE("GPL"); + +static unsigned int tx_weight = 64; +module_param(tx_weight, uint, 0644); +MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); + +static unsigned int rx_weight = 64; +module_param(rx_weight, uint, 0644); +MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); + +#define PCI_DEVICE_ID_HI1822_PF 0x1822 + +#define HINIC_WQ_NAME "hinic_dev" + +#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) + +#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) + +#define work_to_rx_mode_work(work) \ + container_of(work, struct hinic_rx_mode_work, work) + +#define rx_mode_work_to_nic_dev(rx_mode_work) \ + container_of(rx_mode_work, struct hinic_dev, rx_mode_work) + +static int change_mac_addr(struct net_device *netdev, const u8 *addr); + +static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, + enum hinic_speed speed) +{ + switch (speed) { + case HINIC_SPEED_10MB_LINK: + link_ksettings->base.speed = SPEED_10; + break; + + case HINIC_SPEED_100MB_LINK: + link_ksettings->base.speed = SPEED_100; + break; + + case HINIC_SPEED_1000MB_LINK: + link_ksettings->base.speed = SPEED_1000; + break; + + case HINIC_SPEED_10GB_LINK: + link_ksettings->base.speed = SPEED_10000; + break; + + case HINIC_SPEED_25GB_LINK: + link_ksettings->base.speed = SPEED_25000; + break; + + case HINIC_SPEED_40GB_LINK: + link_ksettings->base.speed = SPEED_40000; + break; + + case HINIC_SPEED_100GB_LINK: + link_ksettings->base.speed = SPEED_100000; + break; + + default: + link_ksettings->base.speed = SPEED_UNKNOWN; + break; + } +} + +static int hinic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings + *link_ksettings) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + enum hinic_port_link_state link_state; + struct hinic_port_cap port_cap; + int err; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + + link_ksettings->base.speed = SPEED_UNKNOWN; + link_ksettings->base.autoneg = AUTONEG_DISABLE; + link_ksettings->base.duplex = DUPLEX_UNKNOWN; + + err = hinic_port_get_cap(nic_dev, &port_cap); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to get port capabilities\n"); + return err; + } + + err = hinic_port_link_state(nic_dev, &link_state); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to get port link state\n"); + return err; + } + + if (link_state != HINIC_LINK_STATE_UP) { + netif_info(nic_dev, drv, netdev, "No link\n"); + return err; + } + + set_link_speed(link_ksettings, port_cap.speed); + + if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); + + if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) + link_ksettings->base.autoneg = AUTONEG_ENABLE; + + link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ? + DUPLEX_FULL : DUPLEX_HALF; + return 0; +} + +static void hinic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + + strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); +} + +static void hinic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + ring->rx_max_pending = HINIC_RQ_DEPTH; + ring->tx_max_pending = HINIC_SQ_DEPTH; + ring->rx_pending = HINIC_RQ_DEPTH; + ring->tx_pending = HINIC_SQ_DEPTH; +} + +static void hinic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + + channels->max_rx = hwdev->nic_cap.max_qps; + channels->max_tx = hwdev->nic_cap.max_qps; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = hinic_hwdev_num_qps(hwdev); + channels->tx_count = hinic_hwdev_num_qps(hwdev); + channels->other_count = 0; + channels->combined_count = 0; +} + +static const struct ethtool_ops hinic_ethtool_ops = { + .get_link_ksettings = hinic_get_link_ksettings, + .get_drvinfo = hinic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .get_channels = hinic_get_channels, +}; + +static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; + struct hinic_rxq_stats rx_stats; + + u64_stats_init(&rx_stats.syncp); + + hinic_rxq_get_stats(rxq, &rx_stats); + + u64_stats_update_begin(&nic_rx_stats->syncp); + nic_rx_stats->bytes += rx_stats.bytes; + nic_rx_stats->pkts += rx_stats.pkts; + u64_stats_update_end(&nic_rx_stats->syncp); + + hinic_rxq_clean_stats(rxq); +} + +static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) +{ + struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; + struct hinic_txq_stats tx_stats; + + u64_stats_init(&tx_stats.syncp); + + hinic_txq_get_stats(txq, &tx_stats); + + u64_stats_update_begin(&nic_tx_stats->syncp); + nic_tx_stats->bytes += tx_stats.bytes; + nic_tx_stats->pkts += tx_stats.pkts; + nic_tx_stats->tx_busy += tx_stats.tx_busy; + nic_tx_stats->tx_wake += tx_stats.tx_wake; + nic_tx_stats->tx_dropped += tx_stats.tx_dropped; + u64_stats_update_end(&nic_tx_stats->syncp); + + hinic_txq_clean_stats(txq); +} + +static void update_nic_stats(struct hinic_dev *nic_dev) +{ + int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + + for (i = 0; i < num_qps; i++) + update_rx_stats(nic_dev, &nic_dev->rxqs[i]); + + for (i = 0; i < num_qps; i++) + update_tx_stats(nic_dev, &nic_dev->txqs[i]); +} + +/** + * create_txqs - Create the Logical Tx Queues of specific NIC device + * @nic_dev: the specific NIC device + * + * Return 0 - Success, negative - Failure + **/ +static int create_txqs(struct hinic_dev *nic_dev) +{ + int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + size_t txq_size; + + if (nic_dev->txqs) + return -EINVAL; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (i = 0; i < num_txqs; i++) { + struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); + + err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to init Txq\n"); + goto err_init_txq; + } + } + + return 0; + +err_init_txq: + for (j = 0; j < i; j++) + hinic_clean_txq(&nic_dev->txqs[j]); + + devm_kfree(&netdev->dev, nic_dev->txqs); + return err; +} + +/** + * free_txqs - Free the Logical Tx Queues of specific NIC device + * @nic_dev: the specific NIC device + **/ +static void free_txqs(struct hinic_dev *nic_dev) +{ + int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + + if (!nic_dev->txqs) + return; + + for (i = 0; i < num_txqs; i++) + hinic_clean_txq(&nic_dev->txqs[i]); + + devm_kfree(&netdev->dev, nic_dev->txqs); + nic_dev->txqs = NULL; +} + +/** + * create_txqs - Create the Logical Rx Queues of specific NIC device + * @nic_dev: the specific NIC device + * + * Return 0 - Success, negative - Failure + **/ +static int create_rxqs(struct hinic_dev *nic_dev) +{ + int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + size_t rxq_size; + + if (nic_dev->rxqs) + return -EINVAL; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) + return -ENOMEM; + + for (i = 0; i < num_rxqs; i++) { + struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); + + err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to init rxq\n"); + goto err_init_rxq; + } + } + + return 0; + +err_init_rxq: + for (j = 0; j < i; j++) + hinic_clean_rxq(&nic_dev->rxqs[j]); + + devm_kfree(&netdev->dev, nic_dev->rxqs); + return err; +} + +/** + * free_txqs - Free the Logical Rx Queues of specific NIC device + * @nic_dev: the specific NIC device + **/ +static void free_rxqs(struct hinic_dev *nic_dev) +{ + int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + + if (!nic_dev->rxqs) + return; + + for (i = 0; i < num_rxqs; i++) + hinic_clean_rxq(&nic_dev->rxqs[i]); + + devm_kfree(&netdev->dev, nic_dev->rxqs); + nic_dev->rxqs = NULL; +} + +static int hinic_open(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + enum hinic_port_link_state link_state; + int err, ret, num_qps; + + if (!(nic_dev->flags & HINIC_INTF_UP)) { + err = hinic_hwdev_ifup(nic_dev->hwdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed - HW interface up\n"); + return err; + } + } + + err = create_txqs(nic_dev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to create Tx queues\n"); + goto err_create_txqs; + } + + err = create_rxqs(nic_dev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to create Rx queues\n"); + goto err_create_rxqs; + } + + num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + netif_set_real_num_tx_queues(netdev, num_qps); + netif_set_real_num_rx_queues(netdev, num_qps); + + err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set port state\n"); + goto err_port_state; + } + + err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set func port state\n"); + goto err_func_port_state; + } + + /* Wait up to 3 sec between port enable to link state */ + msleep(3000); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_link_state(nic_dev, &link_state); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); + goto err_port_link; + } + + if (link_state == HINIC_LINK_STATE_UP) + nic_dev->flags |= HINIC_LINK_UP; + + nic_dev->flags |= HINIC_INTF_UP; + + if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == + (HINIC_LINK_UP | HINIC_INTF_UP)) { + netif_info(nic_dev, drv, netdev, "link + intf UP\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + } + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); + return 0; + +err_port_link: + up(&nic_dev->mgmt_lock); + ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + if (ret) + netif_warn(nic_dev, drv, netdev, + "Failed to revert func port state\n"); + +err_func_port_state: + ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + if (ret) + netif_warn(nic_dev, drv, netdev, + "Failed to revert port state\n"); + +err_port_state: + free_rxqs(nic_dev); + +err_create_rxqs: + free_txqs(nic_dev); + +err_create_txqs: + if (!(nic_dev->flags & HINIC_INTF_UP)) + hinic_hwdev_ifdown(nic_dev->hwdev); + return err; +} + +static int hinic_close(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + unsigned int flags; + int err; + + down(&nic_dev->mgmt_lock); + + flags = nic_dev->flags; + nic_dev->flags &= ~HINIC_INTF_UP; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + update_nic_stats(nic_dev); + + up(&nic_dev->mgmt_lock); + + err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set func port state\n"); + nic_dev->flags |= (flags & HINIC_INTF_UP); + return err; + } + + err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); + nic_dev->flags |= (flags & HINIC_INTF_UP); + return err; + } + + free_rxqs(nic_dev); + free_txqs(nic_dev); + + if (flags & HINIC_INTF_UP) + hinic_hwdev_ifdown(nic_dev->hwdev); + + netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); + return 0; +} + +static int hinic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int err; + + netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); + + err = hinic_port_set_mtu(nic_dev, new_mtu); + if (err) + netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); + else + netdev->mtu = new_mtu; + + return err; +} + +/** + * change_mac_addr - change the main mac address of network device + * @netdev: network device + * @addr: mac address to set + * + * Return 0 - Success, negative - Failure + **/ +static int change_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to delete mac\n"); + break; + } + + err = hinic_port_add_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + +static int hinic_set_mac_addr(struct net_device *netdev, void *addr) +{ + unsigned char new_mac[ETH_ALEN]; + struct sockaddr *saddr = addr; + int err; + + memcpy(new_mac, saddr->sa_data, ETH_ALEN); + + err = change_mac_addr(netdev, new_mac); + if (!err) + memcpy(netdev->dev_addr, new_mac, ETH_ALEN); + + return err; +} + +/** + * add_mac_addr - add mac address to network device + * @netdev: network device + * @addr: mac address to add + * + * Return 0 - Success, negative - Failure + **/ +static int add_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_add_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + +/** + * remove_mac_addr - remove mac address from network device + * @netdev: network device + * @addr: mac address to remove + * + * Return 0 - Success, negative - Failure + **/ +static int remove_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_del_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to delete mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + +static int hinic_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int ret, err; + + netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_add_vlan(nic_dev, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); + goto err_vlan_add; + } + + err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); + goto err_add_mac; + } + + bitmap_set(nic_dev->vlan_bitmap, vid, 1); + + up(&nic_dev->mgmt_lock); + return 0; + +err_add_mac: + ret = hinic_port_del_vlan(nic_dev, vid); + if (ret) + netif_err(nic_dev, drv, netdev, + "Failed to revert by removing vlan\n"); + +err_vlan_add: + up(&nic_dev->mgmt_lock); + return err; +} + +static int hinic_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int err; + + netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_del_vlan(nic_dev, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto err_del_vlan; + } + + bitmap_clear(nic_dev->vlan_bitmap, vid, 1); + + up(&nic_dev->mgmt_lock); + return 0; + +err_del_vlan: + up(&nic_dev->mgmt_lock); + return err; +} + +static void set_rx_mode(struct work_struct *work) +{ + struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); + struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); + + netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); + + hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); + + __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); + __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); +} + +static void hinic_set_rx_mode(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rx_mode_work *rx_mode_work; + u32 rx_mode; + + rx_mode_work = &nic_dev->rx_mode_work; + + rx_mode = HINIC_RX_MODE_UC | + HINIC_RX_MODE_MC | + HINIC_RX_MODE_BC; + + if (netdev->flags & IFF_PROMISC) + rx_mode |= HINIC_RX_MODE_PROMISC; + else if (netdev->flags & IFF_ALLMULTI) + rx_mode |= HINIC_RX_MODE_MC_ALL; + + rx_mode_work->rx_mode = rx_mode; + + queue_work(nic_dev->workq, &rx_mode_work->work); +} + +static void hinic_tx_timeout(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + + netif_err(nic_dev, drv, netdev, "Tx timeout\n"); +} + +static void hinic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rxq_stats *nic_rx_stats; + struct hinic_txq_stats *nic_tx_stats; + + nic_rx_stats = &nic_dev->rx_stats; + nic_tx_stats = &nic_dev->tx_stats; + + down(&nic_dev->mgmt_lock); + + if (nic_dev->flags & HINIC_INTF_UP) + update_nic_stats(nic_dev); + + up(&nic_dev->mgmt_lock); + + stats->rx_bytes = nic_rx_stats->bytes; + stats->rx_packets = nic_rx_stats->pkts; + + stats->tx_bytes = nic_tx_stats->bytes; + stats->tx_packets = nic_tx_stats->pkts; + stats->tx_errors = nic_tx_stats->tx_dropped; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void hinic_netpoll(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int i, num_qps; + + num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + for (i = 0; i < num_qps; i++) { + struct hinic_txq *txq = &nic_dev->txqs[i]; + struct hinic_rxq *rxq = &nic_dev->rxqs[i]; + + napi_schedule(&txq->napi); + napi_schedule(&rxq->napi); + } +} +#endif + +static const struct net_device_ops hinic_netdev_ops = { + .ndo_open = hinic_open, + .ndo_stop = hinic_close, + .ndo_change_mtu = hinic_change_mtu, + .ndo_set_mac_address = hinic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, + .ndo_set_rx_mode = hinic_set_rx_mode, + .ndo_start_xmit = hinic_xmit_frame, + .ndo_tx_timeout = hinic_tx_timeout, + .ndo_get_stats64 = hinic_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif +}; + +static void netdev_features_init(struct net_device *netdev) +{ + netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; + + netdev->vlan_features = netdev->hw_features; + + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; +} + +/** + * link_status_event_handler - link event handler + * @handle: nic device for the handler + * @buf_in: input buffer + * @in_size: input size + * @buf_in: output buffer + * @out_size: returned output size + * + * Return 0 - Success, negative - Failure + **/ +static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_port_link_status *link_status, *ret_link_status; + struct hinic_dev *nic_dev = handle; + + link_status = buf_in; + + if (link_status->link == HINIC_LINK_STATE_UP) { + down(&nic_dev->mgmt_lock); + + nic_dev->flags |= HINIC_LINK_UP; + + if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == + (HINIC_LINK_UP | HINIC_INTF_UP)) { + netif_carrier_on(nic_dev->netdev); + netif_tx_wake_all_queues(nic_dev->netdev); + } + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); + } else { + down(&nic_dev->mgmt_lock); + + nic_dev->flags &= ~HINIC_LINK_UP; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); + } + + ret_link_status = buf_out; + ret_link_status->status = 0; + + *out_size = sizeof(*ret_link_status); +} + +/** + * nic_dev_init - Initialize the NIC device + * @pdev: the NIC pci device + * + * Return 0 - Success, negative - Failure + **/ +static int nic_dev_init(struct pci_dev *pdev) +{ + struct hinic_rx_mode_work *rx_mode_work; + struct hinic_txq_stats *tx_stats; + struct hinic_rxq_stats *rx_stats; + struct hinic_dev *nic_dev; + struct net_device *netdev; + struct hinic_hwdev *hwdev; + int err, num_qps; + + hwdev = hinic_init_hwdev(pdev); + if (IS_ERR(hwdev)) { + dev_err(&pdev->dev, "Failed to initialize HW device\n"); + return PTR_ERR(hwdev); + } + + num_qps = hinic_hwdev_num_qps(hwdev); + if (num_qps <= 0) { + dev_err(&pdev->dev, "Invalid number of QPS\n"); + err = -EINVAL; + goto err_num_qps; + } + + netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); + err = -ENOMEM; + goto err_alloc_etherdev; + } + + netdev->netdev_ops = &hinic_netdev_ops; + netdev->ethtool_ops = &hinic_ethtool_ops; + netdev->max_mtu = ETH_MAX_MTU; + + nic_dev = netdev_priv(netdev); + nic_dev->netdev = netdev; + nic_dev->hwdev = hwdev; + nic_dev->msg_enable = MSG_ENABLE_DEFAULT; + nic_dev->flags = 0; + nic_dev->txqs = NULL; + nic_dev->rxqs = NULL; + nic_dev->tx_weight = tx_weight; + nic_dev->rx_weight = rx_weight; + + sema_init(&nic_dev->mgmt_lock, 1); + + tx_stats = &nic_dev->tx_stats; + rx_stats = &nic_dev->rx_stats; + + u64_stats_init(&tx_stats->syncp); + u64_stats_init(&rx_stats->syncp); + + nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, + VLAN_BITMAP_SIZE(nic_dev), + GFP_KERNEL); + if (!nic_dev->vlan_bitmap) { + err = -ENOMEM; + goto err_vlan_bitmap; + } + + nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); + if (!nic_dev->workq) { + err = -ENOMEM; + goto err_workq; + } + + pci_set_drvdata(pdev, netdev); + + err = hinic_port_get_mac(nic_dev, netdev->dev_addr); + if (err) + dev_warn(&pdev->dev, "Failed to get mac address\n"); + + err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); + if (err) { + dev_err(&pdev->dev, "Failed to add mac\n"); + goto err_add_mac; + } + + err = hinic_port_set_mtu(nic_dev, netdev->mtu); + if (err) { + dev_err(&pdev->dev, "Failed to set mtu\n"); + goto err_set_mtu; + } + + rx_mode_work = &nic_dev->rx_mode_work; + INIT_WORK(&rx_mode_work->work, set_rx_mode); + + netdev_features_init(netdev); + + netif_carrier_off(netdev); + + hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, + nic_dev, link_status_event_handler); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_reg_netdev; + } + + return 0; + +err_reg_netdev: + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_STATUS); + cancel_work_sync(&rx_mode_work->work); + +err_set_mtu: +err_add_mac: + pci_set_drvdata(pdev, NULL); + destroy_workqueue(nic_dev->workq); + +err_workq: +err_vlan_bitmap: + free_netdev(netdev); + +err_alloc_etherdev: +err_num_qps: + hinic_free_hwdev(hwdev); + return err; +} + +static int hinic_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err = pci_enable_device(pdev); + + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, HINIC_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to request PCI regions\n"); + goto err_pci_regions; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_dma_mask; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Couldn't set 64-bit consistent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Failed to set consistent DMA mask\n"); + goto err_dma_consistent_mask; + } + } + + err = nic_dev_init(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to initialize NIC device\n"); + goto err_nic_dev_init; + } + + dev_info(&pdev->dev, "HiNIC driver - probed\n"); + return 0; + +err_nic_dev_init: +err_dma_consistent_mask: +err_dma_mask: + pci_release_regions(pdev); + +err_pci_regions: + pci_disable_device(pdev); + return err; +} + +static void hinic_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rx_mode_work *rx_mode_work; + + unregister_netdev(netdev); + + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_STATUS); + + rx_mode_work = &nic_dev->rx_mode_work; + cancel_work_sync(&rx_mode_work->work); + + pci_set_drvdata(pdev, NULL); + + destroy_workqueue(nic_dev->workq); + + hinic_free_hwdev(nic_dev->hwdev); + + free_netdev(netdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + dev_info(&pdev->dev, "HiNIC driver - removed\n"); +} + +static const struct pci_device_id hinic_pci_table[] = { + { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0}, + { 0, 0} +}; +MODULE_DEVICE_TABLE(pci, hinic_pci_table); + +static struct pci_driver hinic_driver = { + .name = HINIC_DRV_NAME, + .id_table = hinic_pci_table, + .probe = hinic_probe, + .remove = hinic_remove, +}; + +module_pci_driver(hinic_driver); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c new file mode 100644 index 000000000000..4d4e3f05fb5f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -0,0 +1,379 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_dev.h" +#include "hinic_port.h" +#include "hinic_dev.h" + +#define HINIC_MIN_MTU_SIZE 256 +#define HINIC_MAX_JUMBO_FRAME_SIZE 15872 + +enum mac_op { + MAC_DEL, + MAC_SET, +}; + +/** + * change_mac - change(add or delete) mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number to set with the mac + * @op: add or delete the mac + * + * Return 0 - Success, negative - Failure + **/ +static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id, enum mac_op op) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mac_cmd port_mac_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_port_cmd cmd; + u16 out_size; + int err; + + if (vlan_id >= VLAN_N_VID) { + netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n"); + return -EINVAL; + } + + if (op == MAC_SET) + cmd = HINIC_PORT_CMD_SET_MAC; + else + cmd = HINIC_PORT_CMD_DEL_MAC; + + port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + port_mac_cmd.vlan_id = vlan_id; + memcpy(port_mac_cmd.mac, addr, ETH_ALEN); + + err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd, + sizeof(port_mac_cmd), + &port_mac_cmd, &out_size); + if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { + dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n", + port_mac_cmd.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_add_mac - add mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number to set with the mac + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_add_mac(struct hinic_dev *nic_dev, + const u8 *addr, u16 vlan_id) +{ + return change_mac(nic_dev, addr, vlan_id, MAC_SET); +} + +/** + * hinic_port_del_mac - remove mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number that is connected to the mac + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id) +{ + return change_mac(nic_dev, addr, vlan_id, MAC_DEL); +} + +/** + * hinic_port_get_mac - get the mac address of the nic device + * @nic_dev: nic device + * @addr: returned mac address + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mac_cmd port_mac_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC, + &port_mac_cmd, sizeof(port_mac_cmd), + &port_mac_cmd, &out_size); + if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { + dev_err(&pdev->dev, "Failed to get mac, ret = %d\n", + port_mac_cmd.status); + return -EFAULT; + } + + memcpy(addr, port_mac_cmd.mac, ETH_ALEN); + return 0; +} + +/** + * hinic_port_set_mtu - set mtu + * @nic_dev: nic device + * @new_mtu: new mtu + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mtu_cmd port_mtu_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int err, max_frame; + u16 out_size; + + if (new_mtu < HINIC_MIN_MTU_SIZE) { + netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) { + netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + port_mtu_cmd.mtu = new_mtu; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, + &port_mtu_cmd, sizeof(port_mtu_cmd), + &port_mtu_cmd, &out_size); + if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) { + dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n", + port_mtu_cmd.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_add_vlan - add vlan to the nic device + * @nic_dev: nic device + * @vlan_id: the vlan number to add + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_vlan_cmd port_vlan_cmd; + + port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + port_vlan_cmd.vlan_id = vlan_id; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN, + &port_vlan_cmd, sizeof(port_vlan_cmd), + NULL, NULL); +} + +/** + * hinic_port_del_vlan - delete vlan from the nic device + * @nic_dev: nic device + * @vlan_id: the vlan number to delete + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_vlan_cmd port_vlan_cmd; + + port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + port_vlan_cmd.vlan_id = vlan_id; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN, + &port_vlan_cmd, sizeof(port_vlan_cmd), + NULL, NULL); +} + +/** + * hinic_port_set_rx_mode - set rx mode in the nic device + * @nic_dev: nic device + * @rx_mode: the rx mode to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_rx_mode_cmd rx_mode_cmd; + + rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + rx_mode_cmd.rx_mode = rx_mode; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE, + &rx_mode_cmd, sizeof(rx_mode_cmd), + NULL, NULL); +} + +/** + * hinic_port_link_state - get the link state + * @nic_dev: nic device + * @link_state: the returned link state + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_link_state(struct hinic_dev *nic_dev, + enum hinic_port_link_state *link_state) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_port_link_cmd link_cmd; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, + &link_cmd, sizeof(link_cmd), + &link_cmd, &out_size); + if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) { + dev_err(&pdev->dev, "Failed to get link state, ret = %d\n", + link_cmd.status); + return -EINVAL; + } + + *link_state = link_cmd.state; + return 0; +} + +/** + * hinic_port_set_state - set port state + * @nic_dev: nic device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_state_cmd port_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + port_state.state = state; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE, + &port_state, sizeof(port_state), + &port_state, &out_size); + if (err || (out_size != sizeof(port_state)) || port_state.status) { + dev_err(&pdev->dev, "Failed to set port state, ret = %d\n", + port_state.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_set_func_state- set func device state + * @nic_dev: nic device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_func_state(struct hinic_dev *nic_dev, + enum hinic_func_port_state state) +{ + struct hinic_port_func_state_cmd func_state; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + func_state.state = state; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE, + &func_state, sizeof(func_state), + &func_state, &out_size); + if (err || (out_size != sizeof(func_state)) || func_state.status) { + dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n", + func_state.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_get_cap - get port capabilities + * @nic_dev: nic device + * @port_cap: returned port capabilities + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_get_cap(struct hinic_dev *nic_dev, + struct hinic_port_cap *port_cap) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP, + port_cap, sizeof(*port_cap), + port_cap, &out_size); + if (err || (out_size != sizeof(*port_cap)) || port_cap->status) { + dev_err(&pdev->dev, + "Failed to get port capabilities, ret = %d\n", + port_cap->status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h new file mode 100644 index 000000000000..9404365195dd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -0,0 +1,198 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_PORT_H +#define HINIC_PORT_H + +#include +#include +#include + +#include "hinic_dev.h" + +enum hinic_rx_mode { + HINIC_RX_MODE_UC = BIT(0), + HINIC_RX_MODE_MC = BIT(1), + HINIC_RX_MODE_BC = BIT(2), + HINIC_RX_MODE_MC_ALL = BIT(3), + HINIC_RX_MODE_PROMISC = BIT(4), +}; + +enum hinic_port_link_state { + HINIC_LINK_STATE_DOWN, + HINIC_LINK_STATE_UP, +}; + +enum hinic_port_state { + HINIC_PORT_DISABLE = 0, + HINIC_PORT_ENABLE = 3, +}; + +enum hinic_func_port_state { + HINIC_FUNC_PORT_DISABLE = 0, + HINIC_FUNC_PORT_ENABLE = 2, +}; + +enum hinic_autoneg_cap { + HINIC_AUTONEG_UNSUPPORTED, + HINIC_AUTONEG_SUPPORTED, +}; + +enum hinic_autoneg_state { + HINIC_AUTONEG_DISABLED, + HINIC_AUTONEG_ACTIVE, +}; + +enum hinic_duplex { + HINIC_DUPLEX_HALF, + HINIC_DUPLEX_FULL, +}; + +enum hinic_speed { + HINIC_SPEED_10MB_LINK = 0, + HINIC_SPEED_100MB_LINK, + HINIC_SPEED_1000MB_LINK, + HINIC_SPEED_10GB_LINK, + HINIC_SPEED_25GB_LINK, + HINIC_SPEED_40GB_LINK, + HINIC_SPEED_100GB_LINK, + + HINIC_SPEED_UNKNOWN = 0xFF, +}; + +struct hinic_port_mac_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 vlan_id; + u16 rsvd1; + unsigned char mac[ETH_ALEN]; +}; + +struct hinic_port_mtu_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_port_vlan_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 vlan_id; +}; + +struct hinic_port_rx_mode_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd; + u32 rx_mode; +}; + +struct hinic_port_link_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; +}; + +struct hinic_port_state_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1[3]; +}; + +struct hinic_port_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 rsvd1; + u8 link; + u8 rsvd2; +}; + +struct hinic_port_func_state_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 rsvd2[3]; +}; + +int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id); + +int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id); + +int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr); + +int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu); + +int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id); + +int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id); + +int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode); + +int hinic_port_link_state(struct hinic_dev *nic_dev, + enum hinic_port_link_state *link_state); + +int hinic_port_set_state(struct hinic_dev *nic_dev, + enum hinic_port_state state); + +int hinic_port_set_func_state(struct hinic_dev *nic_dev, + enum hinic_func_port_state state); + +int hinic_port_get_cap(struct hinic_dev *nic_dev, + struct hinic_port_cap *port_cap); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c new file mode 100644 index 000000000000..1d4f712b15a8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -0,0 +1,509 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_dev.h" +#include "hinic_rx.h" +#include "hinic_dev.h" + +#define RX_IRQ_NO_PENDING 0 +#define RX_IRQ_NO_COALESC 0 +#define RX_IRQ_NO_LLI_TIMER 0 +#define RX_IRQ_NO_CREDIT 0 +#define RX_IRQ_NO_RESEND_TIMER 0 + +/** + * hinic_rxq_clean_stats - Clean the statistics of specific queue + * @rxq: Logical Rx Queue + **/ +void hinic_rxq_clean_stats(struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->pkts = 0; + rxq_stats->bytes = 0; + u64_stats_update_end(&rxq_stats->syncp); +} + +/** + * hinic_rxq_get_stats - get statistics of Rx Queue + * @rxq: Logical Rx Queue + * @stats: return updated stats here + **/ +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + stats->pkts = rxq_stats->pkts; + stats->bytes = rxq_stats->bytes; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +/** + * rxq_stats_init - Initialize the statistics of specific queue + * @rxq: Logical Rx Queue + **/ +static void rxq_stats_init(struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_init(&rxq_stats->syncp); + hinic_rxq_clean_stats(rxq); +} + +/** + * rx_alloc_skb - allocate skb and map it to dma address + * @rxq: rx queue + * @dma_addr: returned dma address for the skb + * + * Return skb + **/ +static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, + dma_addr_t *dma_addr) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct sk_buff *skb; + dma_addr_t addr; + int err; + + skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); + if (!skb) { + netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); + return NULL; + } + + addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, + DMA_FROM_DEVICE); + err = dma_mapping_error(&pdev->dev, addr); + if (err) { + dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); + goto err_rx_map; + } + + *dma_addr = addr; + return skb; + +err_rx_map: + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * rx_unmap_skb - unmap the dma address of the skb + * @rxq: rx queue + * @dma_addr: dma address of the skb + **/ +static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, + DMA_FROM_DEVICE); +} + +/** + * rx_free_skb - unmap and free skb + * @rxq: rx queue + * @skb: skb to free + * @dma_addr: dma address of the skb + **/ +static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, + dma_addr_t dma_addr) +{ + rx_unmap_skb(rxq, dma_addr); + dev_kfree_skb_any(skb); +} + +/** + * rx_alloc_pkts - allocate pkts in rx queue + * @rxq: rx queue + * + * Return number of skbs allocated + **/ +static int rx_alloc_pkts(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_rq_wqe *rq_wqe; + unsigned int free_wqebbs; + struct hinic_sge sge; + dma_addr_t dma_addr; + struct sk_buff *skb; + int i, alloc_more; + u16 prod_idx; + + free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); + alloc_more = 0; + + /* Limit the allocation chunks */ + if (free_wqebbs > nic_dev->rx_weight) + free_wqebbs = nic_dev->rx_weight; + + for (i = 0; i < free_wqebbs; i++) { + skb = rx_alloc_skb(rxq, &dma_addr); + if (!skb) { + netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); + alloc_more = 1; + goto skb_out; + } + + hinic_set_sge(&sge, dma_addr, skb->len); + + rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, + &prod_idx); + if (!rq_wqe) { + rx_free_skb(rxq, skb, dma_addr); + alloc_more = 1; + goto skb_out; + } + + hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); + + hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); + } + +skb_out: + if (i) { + wmb(); /* write all the wqes before update PI */ + + hinic_rq_update(rxq->rq, prod_idx); + } + + if (alloc_more) + tasklet_schedule(&rxq->rx_task); + + return i; +} + +/** + * free_all_rx_skbs - free all skbs in rx queue + * @rxq: rx queue + **/ +static void free_all_rx_skbs(struct hinic_rxq *rxq) +{ + struct hinic_rq *rq = rxq->rq; + struct hinic_hw_wqe *hw_wqe; + struct hinic_sge sge; + u16 ci; + + while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { + if (IS_ERR(hw_wqe)) + break; + + hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); + + hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); + + rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); + } +} + +/** + * rx_alloc_task - tasklet for queue allocation + * @data: rx queue + **/ +static void rx_alloc_task(unsigned long data) +{ + struct hinic_rxq *rxq = (struct hinic_rxq *)data; + + (void)rx_alloc_pkts(rxq); +} + +/** + * rx_recv_jumbo_pkt - Rx handler for jumbo pkt + * @rxq: rx queue + * @head_skb: the first skb in the list + * @left_pkt_len: left size of the pkt exclude head skb + * @ci: consumer index + * + * Return number of wqes that used for the left of the pkt + **/ +static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, + unsigned int left_pkt_len, u16 ci) +{ + struct sk_buff *skb, *curr_skb = head_skb; + struct hinic_rq_wqe *rq_wqe; + unsigned int curr_len; + struct hinic_sge sge; + int num_wqes = 0; + + while (left_pkt_len > 0) { + rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, + &skb, &ci); + + num_wqes++; + + hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + + rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + + prefetch(skb->data); + + curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : + left_pkt_len; + + left_pkt_len -= curr_len; + + __skb_put(skb, curr_len); + + if (curr_skb == head_skb) + skb_shinfo(head_skb)->frag_list = skb; + else + curr_skb->next = skb; + + head_skb->len += skb->len; + head_skb->data_len += skb->len; + head_skb->truesize += skb->truesize; + + curr_skb = skb; + } + + return num_wqes; +} + +/** + * rxq_recv - Rx handler + * @rxq: rx queue + * @budget: maximum pkts to process + * + * Return number of pkts received + **/ +static int rxq_recv(struct hinic_rxq *rxq, int budget) +{ + struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); + u64 pkt_len = 0, rx_bytes = 0; + struct hinic_rq_wqe *rq_wqe; + int num_wqes, pkts = 0; + struct hinic_sge sge; + struct sk_buff *skb; + u16 ci; + + while (pkts < budget) { + num_wqes = 0; + + rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, + &ci); + if (!rq_wqe) + break; + + hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + + rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + + prefetch(skb->data); + + pkt_len = sge.len; + + if (pkt_len <= HINIC_RX_BUF_SZ) { + __skb_put(skb, pkt_len); + } else { + __skb_put(skb, HINIC_RX_BUF_SZ); + num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - + HINIC_RX_BUF_SZ, ci); + } + + hinic_rq_put_wqe(rxq->rq, ci, + (num_wqes + 1) * HINIC_RQ_WQE_SIZE); + + skb_record_rx_queue(skb, qp->q_id); + skb->protocol = eth_type_trans(skb, rxq->netdev); + + napi_gro_receive(&rxq->napi, skb); + + pkts++; + rx_bytes += pkt_len; + } + + if (pkts) + tasklet_schedule(&rxq->rx_task); /* hinic_rx_alloc_pkts */ + + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.pkts += pkts; + rxq->rxq_stats.bytes += rx_bytes; + u64_stats_update_end(&rxq->rxq_stats.syncp); + + return pkts; +} + +static int rx_poll(struct napi_struct *napi, int budget) +{ + struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); + struct hinic_rq *rq = rxq->rq; + int pkts; + + pkts = rxq_recv(rxq, budget); + if (pkts >= budget) + return budget; + + napi_complete(napi); + enable_irq(rq->irq); + return pkts; +} + +static void rx_add_napi(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + + netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); + napi_enable(&rxq->napi); +} + +static void rx_del_napi(struct hinic_rxq *rxq) +{ + napi_disable(&rxq->napi); + netif_napi_del(&rxq->napi); +} + +static irqreturn_t rx_irq(int irq, void *data) +{ + struct hinic_rxq *rxq = (struct hinic_rxq *)data; + struct hinic_rq *rq = rxq->rq; + struct hinic_dev *nic_dev; + + /* Disable the interrupt until napi will be completed */ + disable_irq_nosync(rq->irq); + + nic_dev = netdev_priv(rxq->netdev); + hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); + + napi_schedule(&rxq->napi); + return IRQ_HANDLED; +} + +static int rx_request_irq(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_rq *rq = rxq->rq; + int err; + + rx_add_napi(rxq); + + hinic_hwdev_msix_set(hwdev, rq->msix_entry, + RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, + RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, + RX_IRQ_NO_RESEND_TIMER); + + err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); + if (err) { + rx_del_napi(rxq); + return err; + } + + return 0; +} + +static void rx_free_irq(struct hinic_rxq *rxq) +{ + struct hinic_rq *rq = rxq->rq; + + free_irq(rq->irq, rxq); + rx_del_napi(rxq); +} + +/** + * hinic_init_rxq - Initialize the Rx Queue + * @rxq: Logical Rx Queue + * @rq: Hardware Rx Queue to connect the Logical queue with + * @netdev: network device to connect the Logical queue with + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, + struct net_device *netdev) +{ + struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); + int err, pkts, irqname_len; + + rxq->netdev = netdev; + rxq->rq = rq; + + rxq_stats_init(rxq); + + irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1; + rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + if (!rxq->irq_name) + return -ENOMEM; + + sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id); + + tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq); + + pkts = rx_alloc_pkts(rxq); + if (!pkts) { + err = -ENOMEM; + goto err_rx_pkts; + } + + err = rx_request_irq(rxq); + if (err) { + netdev_err(netdev, "Failed to request Rx irq\n"); + goto err_req_rx_irq; + } + + return 0; + +err_req_rx_irq: +err_rx_pkts: + tasklet_kill(&rxq->rx_task); + free_all_rx_skbs(rxq); + devm_kfree(&netdev->dev, rxq->irq_name); + return err; +} + +/** + * hinic_clean_rxq - Clean the Rx Queue + * @rxq: Logical Rx Queue + **/ +void hinic_clean_rxq(struct hinic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + + rx_free_irq(rxq); + + tasklet_kill(&rxq->rx_task); + free_all_rx_skbs(rxq); + devm_kfree(&netdev->dev, rxq->irq_name); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h new file mode 100644 index 000000000000..27c9af4b1c12 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -0,0 +1,55 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_RX_H +#define HINIC_RX_H + +#include +#include +#include +#include + +#include "hinic_hw_qp.h" + +struct hinic_rxq_stats { + u64 pkts; + u64 bytes; + + struct u64_stats_sync syncp; +}; + +struct hinic_rxq { + struct net_device *netdev; + struct hinic_rq *rq; + + struct hinic_rxq_stats rxq_stats; + + char *irq_name; + + struct tasklet_struct rx_task; + + struct napi_struct napi; +}; + +void hinic_rxq_clean_stats(struct hinic_rxq *rxq); + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); + +int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, + struct net_device *netdev); + +void hinic_clean_rxq(struct hinic_rxq *rxq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c new file mode 100644 index 000000000000..abe3e38cd342 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -0,0 +1,504 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_dev.h" +#include "hinic_dev.h" +#include "hinic_tx.h" + +#define TX_IRQ_NO_PENDING 0 +#define TX_IRQ_NO_COALESC 0 +#define TX_IRQ_NO_LLI_TIMER 0 +#define TX_IRQ_NO_CREDIT 0 +#define TX_IRQ_NO_RESEND_TIMER 0 + +#define CI_UPDATE_NO_PENDING 0 +#define CI_UPDATE_NO_COALESC 0 + +#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) + +#define MIN_SKB_LEN 64 + +/** + * hinic_txq_clean_stats - Clean the statistics of specific queue + * @txq: Logical Tx Queue + **/ +void hinic_txq_clean_stats(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->pkts = 0; + txq_stats->bytes = 0; + txq_stats->tx_busy = 0; + txq_stats->tx_wake = 0; + txq_stats->tx_dropped = 0; + u64_stats_update_end(&txq_stats->syncp); +} + +/** + * hinic_txq_get_stats - get statistics of Tx Queue + * @txq: Logical Tx Queue + * @stats: return updated stats here + **/ +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->pkts = txq_stats->pkts; + stats->bytes = txq_stats->bytes; + stats->tx_busy = txq_stats->tx_busy; + stats->tx_wake = txq_stats->tx_wake; + stats->tx_dropped = txq_stats->tx_dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +/** + * txq_stats_init - Initialize the statistics of specific queue + * @txq: Logical Tx Queue + **/ +static void txq_stats_init(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_init(&txq_stats->syncp); + hinic_txq_clean_stats(txq); +} + +/** + * tx_map_skb - dma mapping for skb and return sges + * @nic_dev: nic device + * @skb: the skb + * @sges: returned sges + * + * Return 0 - Success, negative - Failure + **/ +static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct skb_frag_struct *frag; + dma_addr_t dma_addr; + int i, j; + + dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_addr)) { + dev_err(&pdev->dev, "Failed to map Tx skb data\n"); + return -EFAULT; + } + + hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); + + for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_addr)) { + dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); + goto err_tx_map; + } + + hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); + } + + return 0; + +err_tx_map: + for (j = 0; j < i; j++) + dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), + sges[j + 1].len, DMA_TO_DEVICE); + + dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, + DMA_TO_DEVICE); + return -EFAULT; +} + +/** + * tx_unmap_skb - unmap the dma address of the skb + * @nic_dev: nic device + * @skb: the skb + * @sges: the sges that are connected to the skb + **/ +static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) + dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), + sges[i + 1].len, DMA_TO_DEVICE); + + dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, + DMA_TO_DEVICE); +} + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct netdev_queue *netdev_txq; + int nr_sges, err = NETDEV_TX_OK; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + struct hinic_txq *txq; + struct hinic_qp *qp; + u16 prod_idx; + + txq = &nic_dev->txqs[skb->queue_mapping]; + qp = container_of(txq->sq, struct hinic_qp, sq); + + if (skb->len < MIN_SKB_LEN) { + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { + netdev_err(netdev, "Failed to pad skb\n"); + goto update_error_stats; + } + + skb->len = MIN_SKB_LEN; + } + + nr_sges = skb_shinfo(skb)->nr_frags + 1; + if (nr_sges > txq->max_sges) { + netdev_err(netdev, "Too many Tx sges\n"); + goto skb_error; + } + + err = tx_map_skb(nic_dev, skb, txq->sges); + if (err) + goto skb_error; + + wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); + + sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); + if (!sq_wqe) { + tx_unmap_skb(nic_dev, skb, txq->sges); + + netif_stop_subqueue(netdev, qp->q_id); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_busy++; + u64_stats_update_end(&txq->txq_stats.syncp); + err = NETDEV_TX_BUSY; + goto flush_skbs; + } + + hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + + hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); + +flush_skbs: + netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); + if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) + hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); + + return err; + +skb_error: + dev_kfree_skb_any(skb); + +update_error_stats: + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + return err; +} + +/** + * tx_free_skb - unmap and free skb + * @nic_dev: nic device + * @skb: the skb + * @sges: the sges that are connected to the skb + **/ +static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + tx_unmap_skb(nic_dev, skb, sges); + + dev_kfree_skb_any(skb); +} + +/** + * free_all_rx_skbs - free all skbs in tx queue + * @txq: tx queue + **/ +static void free_all_tx_skbs(struct hinic_txq *txq) +{ + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_sq *sq = txq->sq; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + struct sk_buff *skb; + int nr_sges; + u16 ci; + + while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { + nr_sges = skb_shinfo(skb)->nr_frags + 1; + + hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + + hinic_sq_put_wqe(sq, wqe_size); + + tx_free_skb(nic_dev, skb, txq->free_sges); + } +} + +/** + * free_tx_poll - free finished tx skbs in tx queue that connected to napi + * @napi: napi + * @budget: number of tx + * + * Return 0 - Success, negative - Failure + **/ +static int free_tx_poll(struct napi_struct *napi, int budget) +{ + struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); + struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct netdev_queue *netdev_txq; + struct hinic_sq *sq = txq->sq; + struct hinic_wq *wq = sq->wq; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + int nr_sges, pkts = 0; + struct sk_buff *skb; + u64 tx_bytes = 0; + u16 hw_ci, sw_ci; + + do { + hw_ci = HW_CONS_IDX(sq) & wq->mask; + + sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); + if ((!sq_wqe) || + (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) + break; + + tx_bytes += skb->len; + pkts++; + + nr_sges = skb_shinfo(skb)->nr_frags + 1; + + hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + + hinic_sq_put_wqe(sq, wqe_size); + + tx_free_skb(nic_dev, skb, txq->free_sges); + } while (pkts < budget); + + if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && + hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { + netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + netif_wake_subqueue(nic_dev->netdev, qp->q_id); + + __netif_tx_unlock(netdev_txq); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_wake++; + u64_stats_update_end(&txq->txq_stats.syncp); + } + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.bytes += tx_bytes; + txq->txq_stats.pkts += pkts; + u64_stats_update_end(&txq->txq_stats.syncp); + + if (pkts < budget) { + napi_complete(napi); + enable_irq(sq->irq); + return pkts; + } + + return budget; +} + +static void tx_napi_add(struct hinic_txq *txq, int weight) +{ + netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); + napi_enable(&txq->napi); +} + +static void tx_napi_del(struct hinic_txq *txq) +{ + napi_disable(&txq->napi); + netif_napi_del(&txq->napi); +} + +static irqreturn_t tx_irq(int irq, void *data) +{ + struct hinic_txq *txq = data; + struct hinic_dev *nic_dev; + + nic_dev = netdev_priv(txq->netdev); + + /* Disable the interrupt until napi will be completed */ + disable_irq_nosync(txq->sq->irq); + + hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); + + napi_schedule(&txq->napi); + return IRQ_HANDLED; +} + +static int tx_request_irq(struct hinic_txq *txq) +{ + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_sq *sq = txq->sq; + int err; + + tx_napi_add(txq, nic_dev->tx_weight); + + hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, + TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, + TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, + TX_IRQ_NO_RESEND_TIMER); + + err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); + if (err) { + dev_err(&pdev->dev, "Failed to request Tx irq\n"); + tx_napi_del(txq); + return err; + } + + return 0; +} + +static void tx_free_irq(struct hinic_txq *txq) +{ + struct hinic_sq *sq = txq->sq; + + free_irq(sq->irq, txq); + tx_napi_del(txq); +} + +/** + * hinic_init_txq - Initialize the Tx Queue + * @txq: Logical Tx Queue + * @sq: Hardware Tx Queue to connect the Logical queue with + * @netdev: network device to connect the Logical queue with + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, + struct net_device *netdev) +{ + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + int err, irqname_len; + size_t sges_size; + + txq->netdev = netdev; + txq->sq = sq; + + txq_stats_init(txq); + + txq->max_sges = HINIC_MAX_SQ_BUFDESCS; + + sges_size = txq->max_sges * sizeof(*txq->sges); + txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + if (!txq->sges) + return -ENOMEM; + + sges_size = txq->max_sges * sizeof(*txq->free_sges); + txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + if (!txq->free_sges) { + err = -ENOMEM; + goto err_alloc_free_sges; + } + + irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; + txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + if (!txq->irq_name) { + err = -ENOMEM; + goto err_alloc_irqname; + } + + sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); + + err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, + CI_UPDATE_NO_COALESC); + if (err) + goto err_hw_ci; + + err = tx_request_irq(txq); + if (err) { + netdev_err(netdev, "Failed to request Tx irq\n"); + goto err_req_tx_irq; + } + + return 0; + +err_req_tx_irq: +err_hw_ci: + devm_kfree(&netdev->dev, txq->irq_name); + +err_alloc_irqname: + devm_kfree(&netdev->dev, txq->free_sges); + +err_alloc_free_sges: + devm_kfree(&netdev->dev, txq->sges); + return err; +} + +/** + * hinic_clean_txq - Clean the Tx Queue + * @txq: Logical Tx Queue + **/ +void hinic_clean_txq(struct hinic_txq *txq) +{ + struct net_device *netdev = txq->netdev; + + tx_free_irq(txq); + + free_all_tx_skbs(txq); + + devm_kfree(&netdev->dev, txq->irq_name); + devm_kfree(&netdev->dev, txq->free_sges); + devm_kfree(&netdev->dev, txq->sges); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h new file mode 100644 index 000000000000..1fa55dce5aa7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -0,0 +1,62 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_TX_H +#define HINIC_TX_H + +#include +#include +#include +#include + +#include "hinic_common.h" +#include "hinic_hw_qp.h" + +struct hinic_txq_stats { + u64 pkts; + u64 bytes; + u64 tx_busy; + u64 tx_wake; + u64 tx_dropped; + + struct u64_stats_sync syncp; +}; + +struct hinic_txq { + struct net_device *netdev; + struct hinic_sq *sq; + + struct hinic_txq_stats txq_stats; + + int max_sges; + struct hinic_sge *sges; + struct hinic_sge *free_sges; + + char *irq_name; + struct napi_struct napi; +}; + +void hinic_txq_clean_stats(struct hinic_txq *txq); + +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, + struct net_device *netdev); + +void hinic_clean_txq(struct hinic_txq *txq); + +#endif diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c index aa22e108f09b..b69c622ba8b2 100644 --- a/drivers/net/ethernet/i825xx/lasi_82596.c +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -96,8 +96,6 @@ #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ -#define DMA_ALLOC dma_alloc_noncoherent -#define DMA_FREE dma_free_noncoherent #define DMA_WBACK(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) @@ -200,8 +198,8 @@ static int __exit lan_remove_chip(struct parisc_device *pdev) struct i596_private *lp = netdev_priv(dev); unregister_netdev (dev); - DMA_FREE(&pdev->dev, sizeof(struct i596_private), - (void *)lp->dma, lp->dma_addr); + dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma, + lp->dma_addr, DMA_ATTR_NON_CONSISTENT); free_netdev (dev); return 0; } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 8449c58f01fd..f00a1dc2128c 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -1063,8 +1063,9 @@ static int i82596_probe(struct net_device *dev) if (!dev->base_addr || !dev->irq) return -ENODEV; - dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent, - sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); + dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), + &lp->dma_addr, GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); if (!dma) { printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); return -ENOMEM; @@ -1085,8 +1086,8 @@ static int i82596_probe(struct net_device *dev) i = register_netdev(dev); if (i) { - DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), - (void *)dma, lp->dma_addr); + dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), + dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); return i; } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 2af7f77345fb..b2c04a789744 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -23,8 +23,6 @@ static const char sni_82596_string[] = "snirm_82596"; -#define DMA_ALLOC dma_alloc_coherent -#define DMA_FREE dma_free_coherent #define DMA_WBACK(priv, addr, len) do { } while (0) #define DMA_INV(priv, addr, len) do { } while (0) #define DMA_WBACK_INV(priv, addr, len) do { } while (0) @@ -152,8 +150,8 @@ static int sni_82596_driver_remove(struct platform_device *pdev) struct i596_private *lp = netdev_priv(dev); unregister_netdev(dev); - DMA_FREE(dev->dev.parent, sizeof(struct i596_private), - lp->dma, lp->dma_addr); + dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma, + lp->dma_addr, DMA_ATTR_NON_CONSISTENT); iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index b9d310f20bcc..4878b7169e0f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3102,8 +3102,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (!dn_log_port_id) { - pr_err("bad device node: eth_dn name=%s\n", - eth_dn->full_name); + pr_err("bad device node: eth_dn name=%pOF\n", eth_dn); continue; } @@ -3425,7 +3424,7 @@ static int ehea_probe_adapter(struct platform_device *dev) if (!adapter->handle) { dev_err(&dev->dev, "failed getting handle for adapter" - " '%s'\n", dev->dev.of_node->full_name); + " '%pOF'\n", dev->dev.of_node); ret = -ENODEV; goto out_free_ad; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 259e69a52ec5..7feff2450ed6 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -133,8 +133,7 @@ static inline void emac_report_timeout_error(struct emac_instance *dev, EMAC_FTR_440EP_PHY_CLK_FIX)) DBG(dev, "%s" NL, error); else if (net_ratelimit()) - printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name, - error); + printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error); } /* EMAC PHY clock workaround: @@ -2258,8 +2257,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strlcpy(info->driver, "ibm_emac", sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", - dev->cell_index, dev->ofdev->dev.of_node->full_name); + snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF", + dev->cell_index, dev->ofdev->dev.of_node); } static const struct ethtool_ops emac_ethtool_ops = { @@ -2431,8 +2430,8 @@ static int emac_read_uint_prop(struct device_node *np, const char *name, const u32 *prop = of_get_property(np, name, &len); if (prop == NULL || len < sizeof(u32)) { if (fatal) - printk(KERN_ERR "%s: missing %s property\n", - np->full_name, name); + printk(KERN_ERR "%pOF: missing %s property\n", + np, name); return -ENODEV; } *val = *prop; @@ -2768,7 +2767,7 @@ static int emac_init_phy(struct emac_instance *dev) #endif mutex_unlock(&emac_phy_map_lock); if (i == 0x20) { - printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + printk(KERN_WARNING "%pOF: can't find PHY!\n", np); return -ENXIO; } @@ -2894,8 +2893,8 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; #else - printk(KERN_ERR "%s: Flow control not disabled!\n", - np->full_name); + printk(KERN_ERR "%pOF: Flow control not disabled!\n", + np); return -ENXIO; #endif } @@ -2918,8 +2917,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_TAH dev->features |= EMAC_FTR_HAS_TAH; #else - printk(KERN_ERR "%s: TAH support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: TAH support not enabled !\n", np); return -ENXIO; #endif } @@ -2928,8 +2926,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_ZMII dev->features |= EMAC_FTR_HAS_ZMII; #else - printk(KERN_ERR "%s: ZMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2938,8 +2935,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_RGMII dev->features |= EMAC_FTR_HAS_RGMII; #else - printk(KERN_ERR "%s: RGMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2947,8 +2943,8 @@ static int emac_init_config(struct emac_instance *dev) /* Read MAC-address */ p = of_get_property(np, "local-mac-address", NULL); if (p == NULL) { - printk(KERN_ERR "%s: Can't find local-mac-address property\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", + np); return -ENXIO; } memcpy(dev->ndev->dev_addr, p, ETH_ALEN); @@ -3036,30 +3032,24 @@ static int emac_probe(struct platform_device *ofdev) /* Init various config data based on device-tree */ err = emac_init_config(dev); - if (err != 0) + if (err) goto err_free; /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ dev->emac_irq = irq_of_parse_and_map(np, 0); dev->wol_irq = irq_of_parse_and_map(np, 1); if (!dev->emac_irq) { - printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); + err = -ENODEV; goto err_free; } ndev->irq = dev->emac_irq; /* Map EMAC regs */ - if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); - goto err_irq_unmap; - } - // TODO : request_mem_region - dev->emacp = ioremap(dev->rsrc_regs.start, - resource_size(&dev->rsrc_regs)); + // TODO : platform_get_resource() and devm_ioremap_resource() + dev->emacp = of_iomap(np, 0); if (dev->emacp == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); err = -ENOMEM; goto err_irq_unmap; } @@ -3068,8 +3058,7 @@ static int emac_probe(struct platform_device *ofdev) err = emac_wait_deps(dev); if (err) { printk(KERN_ERR - "%s: Timeout waiting for dependent devices\n", - np->full_name); + "%pOF: Timeout waiting for dependent devices\n", np); /* display more info about what's missing ? */ goto err_reg_unmap; } @@ -3084,8 +3073,8 @@ static int emac_probe(struct platform_device *ofdev) dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); err = mal_register_commac(dev->mal, &dev->commac); if (err) { - printk(KERN_ERR "%s: failed to register with mal %s!\n", - np->full_name, dev->mal_dev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n", + np, dev->mal_dev->dev.of_node); goto err_rel_deps; } dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); @@ -3161,8 +3150,8 @@ static int emac_probe(struct platform_device *ofdev) err = register_netdev(ndev); if (err) { - printk(KERN_ERR "%s: failed to register net device (%d)!\n", - np->full_name, err); + printk(KERN_ERR "%pOF: failed to register net device (%d)!\n", + np, err); goto err_detach_tah; } @@ -3176,8 +3165,8 @@ static int emac_probe(struct platform_device *ofdev) wake_up_all(&emac_probe_wait); - printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n", - ndev->name, dev->cell_index, np->full_name, ndev->dev_addr); + printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", + ndev->name, dev->cell_index, np, ndev->dev_addr); if (dev->phy_mode == PHY_MODE_SGMII) printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index f10e156641d5..369de2cfb15b 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -167,7 +167,6 @@ struct emac_error_stats { struct emac_instance { struct net_device *ndev; - struct resource rsrc_regs; struct emac_regs __iomem *emacp; struct platform_device *ofdev; struct device_node **blist; /* bootlist entry */ diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h index 5bdfc174a07e..9d06d3be3161 100644 --- a/drivers/net/ethernet/ibm/emac/debug.h +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -31,7 +31,7 @@ #endif #define EMAC_DBG(d, name, fmt, arg...) \ - printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg) + printk(KERN_DEBUG #name "%pOF: " fmt, d->ofdev->dev.of_node, ## arg) #if DBG_LEVEL > 0 # define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 91b1a558f37d..fff09dcf9e34 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget) unsigned long flags; MAL_DBG2(mal, "poll(%d)" NL, budget); - again: + /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = @@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget) spin_lock_irqsave(&mal->lock, flags); mal_disable_eob_irq(mal); spin_unlock_irqrestore(&mal->lock, flags); - goto again; } mc->ops->poll_tx(mc->dev); } @@ -579,8 +578,8 @@ static int mal_probe(struct platform_device *ofdev) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else - printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", + ofdev->dev.of_node); err = -ENODEV; goto fail; #endif @@ -687,8 +686,8 @@ static int mal_probe(struct platform_device *ofdev) mal_enable_eob_irq(mal); printk(KERN_INFO - "MAL v%d %s, %d TX channels, %d RX channels\n", - mal->version, ofdev->dev.of_node->full_name, + "MAL v%d %pOF, %d TX channels, %d RX channels\n", + mal->version, ofdev->dev.of_node, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 206ccbbae7bb..c4a1ac38bba8 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -104,8 +104,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Check if we need to attach to a RGMII */ if (input < 0 || !rgmii_valid_mode(mode)) { - printk(KERN_ERR "%s: unsupported settings !\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: unsupported settings !\n", + ofdev->dev.of_node); return -ENODEV; } @@ -114,8 +114,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Enable this input */ out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); - printk(KERN_NOTICE "%s: input %d in %s mode\n", - ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode)); + printk(KERN_NOTICE "%pOF: input %d in %s mode\n", + ofdev->dev.of_node, input, rgmii_mode_name(mode)); ++dev->users; @@ -249,8 +249,7 @@ static int rgmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -258,8 +257,7 @@ static int rgmii_probe(struct platform_device *ofdev) dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, sizeof(struct rgmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -278,8 +276,8 @@ static int rgmii_probe(struct platform_device *ofdev) out_be32(&dev->base->fer, 0); printk(KERN_INFO - "RGMII %s initialized with%s MDIO support\n", - ofdev->dev.of_node->full_name, + "RGMII %pOF initialized with%s MDIO support\n", + ofdev->dev.of_node, (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 32cb6c9007c5..9912456dca48 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -58,8 +58,7 @@ void tah_reset(struct platform_device *ofdev) --n; if (unlikely(!n)) - printk(KERN_ERR "%s: reset timeout\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: reset timeout\n", ofdev->dev.of_node); /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ out_be32(&p->mr, @@ -105,8 +104,7 @@ static int tah_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -114,8 +112,7 @@ static int tah_probe(struct platform_device *ofdev) dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -124,8 +121,7 @@ static int tah_probe(struct platform_device *ofdev) /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); - printk(KERN_INFO - "TAH %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "TAH %pOF initialized\n", ofdev->dev.of_node); wmb(); return 0; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 8727b865ea02..89c42d362292 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -121,15 +121,15 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) } else dev->mode = *mode; - printk(KERN_NOTICE "%s: bridge in %s mode\n", - ofdev->dev.of_node->full_name, + printk(KERN_NOTICE "%pOF: bridge in %s mode\n", + ofdev->dev.of_node, zmii_mode_name(dev->mode)); } else { /* All inputs must use the same mode */ if (*mode != PHY_MODE_NA && *mode != dev->mode) { printk(KERN_ERR - "%s: invalid mode %d specified for input %d\n", - ofdev->dev.of_node->full_name, *mode, input); + "%pOF: invalid mode %d specified for input %d\n", + ofdev->dev.of_node, *mode, input); mutex_unlock(&dev->lock); return -EINVAL; } @@ -250,8 +250,7 @@ static int zmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -259,8 +258,7 @@ static int zmii_probe(struct platform_device *ofdev) dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, sizeof(struct zmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -270,8 +268,7 @@ static int zmii_probe(struct platform_device *ofdev) /* Disable all inputs by default */ out_be32(&dev->base->fer, 0); - printk(KERN_INFO - "ZMII %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "ZMII %pOF initialized\n", ofdev->dev.of_node); wmb(); platform_set_drvdata(ofdev, dev); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d17c2b03f580..f210398200ec 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1897,7 +1897,7 @@ static int ibmveth_resume(struct device *dev) return 0; } -static struct vio_device_id ibmveth_device_table[] = { +static const struct vio_device_id ibmveth_device_table[] = { { "network", "IBM,l-lan"}, { "", "" } }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c45e8e3b82d3..cb8182f4fdfa 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -347,6 +347,31 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) } } +static void release_stats_buffers(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->tx_stats_buffers); + kfree(adapter->rx_stats_buffers); +} + +static int init_stats_buffers(struct ibmvnic_adapter *adapter) +{ + adapter->tx_stats_buffers = + kcalloc(adapter->req_tx_queues, + sizeof(struct ibmvnic_tx_queue_stats), + GFP_KERNEL); + if (!adapter->tx_stats_buffers) + return -ENOMEM; + + adapter->rx_stats_buffers = + kcalloc(adapter->req_rx_queues, + sizeof(struct ibmvnic_rx_queue_stats), + GFP_KERNEL); + if (!adapter->rx_stats_buffers) + return -ENOMEM; + + return 0; +} + static void release_stats_token(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; @@ -374,6 +399,7 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) } adapter->stats_token = stok; + netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); return 0; } @@ -387,6 +413,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); + rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); if (rc) return rc; @@ -419,6 +447,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); + kfree(rx_pool->free_map); free_long_term_buff(adapter, &rx_pool->long_term_buff); @@ -465,7 +495,7 @@ static int init_rx_pools(struct net_device *netdev) rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, - "Initializing rx_pool %d, %lld buffs, %lld bytes each\n", + "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", i, adapter->req_rx_add_entries_per_subcrq, be64_to_cpu(size_array[i])); @@ -515,6 +545,8 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); + tx_pool = &adapter->tx_pool[i]; rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -545,6 +577,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); tx_pool = &adapter->tx_pool[i]; kfree(tx_pool->tx_buff); free_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -571,6 +604,11 @@ static int init_tx_pools(struct net_device *netdev) for (i = 0; i < tx_subcrqs; i++) { tx_pool = &adapter->tx_pool[i]; + + netdev_dbg(adapter->netdev, + "Initializing tx_pool[%d], %lld buffs\n", + i, adapter->req_tx_entries_per_subcrq); + tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); @@ -641,8 +679,10 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) if (!adapter->napi_enabled) return; - for (i = 0; i < adapter->req_rx_queues; i++) + for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); napi_disable(&adapter->napi[i]); + } adapter->napi_enabled = false; } @@ -700,12 +740,16 @@ static void release_resources(struct ibmvnic_adapter *adapter) release_rx_pools(adapter); release_stats_token(adapter); + release_stats_buffers(adapter); release_error_buffers(adapter); if (adapter->napi) { for (i = 0; i < adapter->req_rx_queues; i++) { - if (&adapter->napi[i]) + if (&adapter->napi[i]) { + netdev_dbg(adapter->netdev, + "Releasing napi[%d]\n", i); netif_napi_del(&adapter->napi[i]); + } } } } @@ -718,7 +762,8 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) bool resend; int rc; - netdev_err(netdev, "setting link state %d\n", link_state); + netdev_dbg(netdev, "setting link state %d\n", link_state); + memset(&crq, 0, sizeof(crq)); crq.logical_link_state.first = IBMVNIC_CRQ_CMD; crq.logical_link_state.cmd = LOGICAL_LINK_STATE; @@ -755,6 +800,9 @@ static int set_real_num_queues(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; + netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", + adapter->req_tx_queues, adapter->req_rx_queues); + rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); if (rc) { netdev_err(netdev, "failed to set the number of tx queues\n"); @@ -777,6 +825,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; + rc = init_stats_buffers(adapter); + if (rc) + return rc; + rc = init_stats_token(adapter); if (rc) return rc; @@ -788,6 +840,7 @@ static int init_resources(struct ibmvnic_adapter *adapter) return -ENOMEM; for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Adding napi[%d]\n", i); netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, NAPI_POLL_WEIGHT); } @@ -816,6 +869,7 @@ static int __ibmvnic_open(struct net_device *netdev) * set the logical link state to up */ for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->rx_scrq[i]->irq); else @@ -823,6 +877,7 @@ static int __ibmvnic_open(struct net_device *netdev) } for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); else @@ -896,6 +951,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!tx_pool) continue; + netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); for (j = 0; j < tx_entries; j++) { if (tx_pool->tx_buff[j].skb) { dev_kfree_skb_any(tx_pool->tx_buff[j].skb); @@ -923,8 +979,11 @@ static int __ibmvnic_close(struct net_device *netdev) if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) - if (adapter->tx_scrq[i]->irq) + if (adapter->tx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling tx_scrq[%d] irq\n", i); disable_irq(adapter->tx_scrq[i]->irq); + } } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); @@ -943,8 +1002,11 @@ static int __ibmvnic_close(struct net_device *netdev) break; } - if (adapter->rx_scrq[i]->irq) + if (adapter->rx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling rx_scrq[%d] irq\n", i); disable_irq(adapter->rx_scrq[i]->irq); + } } } @@ -1259,6 +1321,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; + adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; + adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; return ret; } @@ -1334,6 +1399,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int i, rc; + netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", + rwi->reset_reason); + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1458,6 +1526,7 @@ static void __ibmvnic_reset(struct work_struct *work) } if (rc) { + netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); mutex_unlock(&adapter->reset_lock); return; @@ -1491,7 +1560,7 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_err(netdev, "Matching reset found, skipping\n"); + netdev_dbg(netdev, "Skipping matching reset\n"); mutex_unlock(&adapter->rwi_lock); return; } @@ -1507,6 +1576,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); + + netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); } @@ -1560,7 +1631,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) rx_comp.correlator); /* do error checking */ if (next->rx_comp.rc) { - netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); + netdev_dbg(netdev, "rx buffer returned with rc %x\n", + be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); @@ -1599,6 +1671,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; + adapter->rx_stats_buffers[scrq_num].packets++; + adapter->rx_stats_buffers[scrq_num].bytes += length; frames_processed++; } @@ -1708,18 +1782,36 @@ static u32 ibmvnic_get_link(struct net_device *netdev) static void ibmvnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - ring->rx_max_pending = 0; - ring->tx_max_pending = 0; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; - ring->rx_pending = 0; - ring->tx_pending = 0; + ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; + ring->tx_pending = adapter->req_tx_entries_per_subcrq; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } +static void ibmvnic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->req_rx_queues; + channels->tx_count = adapter->req_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) @@ -1727,13 +1819,39 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; + } } static int ibmvnic_get_sset_count(struct net_device *dev, int sset) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); + switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(ibmvnic_stats); + return ARRAY_SIZE(ibmvnic_stats) + + adapter->req_tx_queues * NUM_TX_STATS + + adapter->req_rx_queues * NUM_RX_STATS; default: return -EOPNOTSUPP; } @@ -1744,7 +1862,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, { struct ibmvnic_adapter *adapter = netdev_priv(dev); union ibmvnic_crq crq; - int i; + int i, j; memset(&crq, 0, sizeof(crq)); crq.request_statistics.first = IBMVNIC_CRQ_CMD; @@ -1759,7 +1877,26 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) - data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); + data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, + ibmvnic_stats[i].offset)); + + for (j = 0; j < adapter->req_tx_queues; j++) { + data[i] = adapter->tx_stats_buffers[j].packets; + i++; + data[i] = adapter->tx_stats_buffers[j].bytes; + i++; + data[i] = adapter->tx_stats_buffers[j].dropped_packets; + i++; + } + + for (j = 0; j < adapter->req_rx_queues; j++) { + data[i] = adapter->rx_stats_buffers[j].packets; + i++; + data[i] = adapter->rx_stats_buffers[j].bytes; + i++; + data[i] = adapter->rx_stats_buffers[j].interrupts; + i++; + } } static const struct ethtool_ops ibmvnic_ethtool_ops = { @@ -1768,6 +1905,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .set_msglevel = ibmvnic_set_msglevel, .get_link = ibmvnic_get_link, .get_ringparam = ibmvnic_get_ringparam, + .get_channels = ibmvnic_get_channels, .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, @@ -1800,12 +1938,14 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) int i, rc; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); if (rc) return rc; } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); if (rc) return rc; @@ -1909,6 +2049,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->tx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", + i); if (adapter->tx_scrq[i]->irq) { free_irq(adapter->tx_scrq[i]->irq, adapter->tx_scrq[i]); @@ -1928,6 +2070,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->rx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", + i); if (adapter->rx_scrq[i]->irq) { free_irq(adapter->rx_scrq[i]->irq, adapter->rx_scrq[i]); @@ -2064,6 +2208,8 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; + if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { disable_scrq_irq(adapter, scrq); __napi_schedule(&adapter->napi[scrq->scrq_num]); @@ -2080,6 +2226,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) int rc = 0; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", + i); scrq = adapter->tx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); @@ -2101,6 +2249,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", + i); scrq = adapter->rx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); if (!scrq->irq) { @@ -3739,31 +3889,35 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) do { rc = ibmvnic_init(adapter); - if (rc && rc != EAGAIN) { - free_netdev(netdev); - return rc; - } + if (rc && rc != EAGAIN) + goto ibmvnic_init_fail; } while (rc == EAGAIN); netdev->mtu = adapter->req_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); - if (rc) { - free_netdev(netdev); - return rc; - } + if (rc) + goto ibmvnic_init_fail; rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - device_remove_file(&dev->dev, &dev_attr_failover); - free_netdev(netdev); - return rc; + goto ibmvnic_register_fail; } dev_info(&dev->dev, "ibmvnic registered\n"); adapter->state = VNIC_PROBED; return 0; + +ibmvnic_register_fail: + device_remove_file(&dev->dev, &dev_attr_failover); + +ibmvnic_init_fail: + release_sub_crqs(adapter); + release_crq_queue(adapter); + free_netdev(netdev); + + return rc; } static int ibmvnic_remove(struct vio_dev *dev) @@ -3859,20 +4013,16 @@ static int ibmvnic_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int i; if (adapter->state != VNIC_OPEN) return 0; - /* kick the interrupt handlers just in case we lost an interrupt */ - for (i = 0; i < adapter->req_rx_queues; i++) - ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, - adapter->rx_scrq[i]); + tasklet_schedule(&adapter->tasklet); return 0; } -static struct vio_device_id ibmvnic_device_table[] = { +static const struct vio_device_id ibmvnic_device_table[] = { {"network", "IBM,vnic"}, {"", "" } }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8eff6e15f4bb..d02257ccc377 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -166,6 +166,20 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); +#define NUM_TX_STATS 3 +struct ibmvnic_tx_queue_stats { + u64 packets; + u64 bytes; + u64 dropped_packets; +}; + +#define NUM_RX_STATS 3 +struct ibmvnic_rx_queue_stats { + u64 packets; + u64 bytes; + u64 interrupts; +}; + struct ibmvnic_acl_buffer { __be32 len; __be32 version; @@ -956,6 +970,9 @@ struct ibmvnic_adapter { int tx_send_failed; int tx_map_failed; + struct ibmvnic_tx_queue_stats *tx_stats_buffers; + struct ibmvnic_rx_queue_stats *rx_stats_buffers; + int phys_link_state; int logical_link_state; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 66bd5060a65b..d803b1a12349 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -100,6 +100,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE #define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB #define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC +#define E1000_DEV_ID_PCH_ICP_I219_LM8 0x15DF +#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 +#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 +#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 68ea8b4555ab..d6d4ed7acf03 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2437,6 +2437,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + if (ret_val) + return ret_val; } } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2dcb5463d9b8..327dfe5bedc0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7544,6 +7544,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 5e37387c7082..e69d49d91d67 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1265,15 +1265,17 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc) return err; } -static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - return fm10k_setup_tc(dev, tc->mqprio->num_tc); + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return fm10k_setup_tc(dev, mqprio->num_tc); } static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d616f698e155..d0c1bf5441d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -75,11 +75,11 @@ #define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ - (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) + (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ - (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) + (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 @@ -401,6 +401,27 @@ struct i40e_pf { struct timer_list service_timer; struct work_struct service_task; + u64 hw_features; +#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0) +#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1) +#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2) +#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3) +#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4) +#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5) +#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6) +#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7) +#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8) +#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9) +#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10) +#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11) +#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12) +#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13) +#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14) +#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15) +#define I40E_HW_STOP_FW_LLDP BIT_ULL(16) +#define I40E_HW_PORT_ID_VALID BIT_ULL(17) +#define I40E_HW_RESTART_AUTONEG BIT_ULL(18) + u64 flags; #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) @@ -420,33 +441,14 @@ struct i40e_pf { #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) -#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) -#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) -#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32) -#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) -#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) -#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) #define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) -#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) -#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) -#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) -#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) -#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) -#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) -#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) -#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) -#define I40E_FLAG_PHY_CONTROLS_LEDS BIT_ULL(48) -#define I40E_FLAG_PF_MAC BIT_ULL(50) #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) -#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) -#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) #define I40E_FLAG_CLIENT_RESET BIT_ULL(54) #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) -#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) #define I40E_FLAG_LEGACY_RX BIT_ULL(58) struct i40e_client_instance *cinst; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8e082a946411..111426ba5fbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -328,9 +328,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ if (hw->debug_mask & mask) { - char prefix[20]; + char prefix[27]; - snprintf(prefix, 20, + snprintf(prefix, sizeof(prefix), "i40e %02x:%02x.%x: \t0x", hw->bus.bus_id, hw->bus.device, @@ -2529,6 +2529,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) if (status) return status; + hw->phy.link_info.req_fec_info = + abilities.fec_cfg_curr_mod_ext_info & + (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); + memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type)); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9692a5294fa3..05e89864f781 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -271,7 +271,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) *advertising |= ADVERTISED_1000baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { *supported |= SUPPORTED_100baseT_Full; *advertising |= ADVERTISED_100baseT_Full; } @@ -340,12 +340,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_20000baseKR2_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *supported |= SUPPORTED_10000baseKR_Full | SUPPORTED_Autoneg; *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *advertising |= ADVERTISED_10000baseKR_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { @@ -356,12 +356,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_10000baseKX4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *supported |= SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg; *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *advertising |= ADVERTISED_1000baseKX_Full; } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || @@ -474,7 +474,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) advertising |= ADVERTISED_1000baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) @@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u32 *reg_buf = p; - int i, j, ri; + unsigned int i, j, ri; u32 reg; /* Tell ethtool which driver-version-specific regs output we have. @@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + unsigned int j; int i = 0; char *p; - int j; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; @@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; char *p = (char *)data; - int i; + unsigned int i; switch (stringset) { case ETH_SS_TEST: @@ -1765,7 +1765,7 @@ static int i40e_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); - if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) + if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | @@ -2005,7 +2005,7 @@ static int i40e_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) { + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { pf->led_status = i40e_led_get(hw); } else { i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); @@ -2015,19 +2015,19 @@ static int i40e_set_phys_id(struct net_device *netdev, } return blink_freq; case ETHTOOL_ID_ON: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0xf, false); else ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0x0, false); else ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) { + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { i40e_led_set(hw, pf->led_status, false); } else { ret = i40e_led_set_phy(hw, false, pf->led_status, @@ -2194,14 +2194,29 @@ static int __i40e_set_coalesce(struct net_device *netdev, int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); + u16 intrl_reg, cur_rx_itr, cur_tx_itr; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u16 intrl_reg; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; + if (queue < 0) { + cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting; + cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting; + } else if (queue < vsi->num_queue_pairs) { + cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting; + cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting; + } else { + netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + vsi->num_queue_pairs - 1); + return -EINVAL; + } + + cur_tx_itr &= ~I40E_ITR_DYNAMIC; + cur_rx_itr &= ~I40E_ITR_DYNAMIC; + /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); @@ -2214,15 +2229,34 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; + if (ec->rx_coalesce_usecs != cur_rx_itr && + ec->use_adaptive_rx_coalesce) { + netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n"); + return -EINVAL; } + if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->tx_coalesce_usecs != cur_tx_itr && + ec->use_adaptive_tx_coalesce) { + netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n"); + return -EINVAL; + } + + if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) + ec->rx_coalesce_usecs = I40E_MIN_ITR << 1; + + if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) + ec->tx_coalesce_usecs = I40E_MIN_ITR << 1; + intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { @@ -2230,27 +2264,14 @@ static int __i40e_set_coalesce(struct net_device *netdev, vsi->int_rate_limit); } - if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) { - netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; - } - /* rx and tx usecs has per queue value. If user doesn't specify the queue, * apply to all queues. */ if (queue < 0) { for (i = 0; i < vsi->num_queue_pairs; i++) i40e_set_itr_per_queue(vsi, ec, i); - } else if (queue < vsi->num_queue_pairs) { - i40e_set_itr_per_queue(vsi, ec, queue); } else { - netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", - vsi->num_queue_pairs - 1); - return -EINVAL; + i40e_set_itr_per_queue(vsi, ec, queue); } return 0; @@ -2727,22 +2748,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); break; case TCP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); break; case UDP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); @@ -2751,7 +2772,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) break; case UDP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); @@ -4069,23 +4090,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u64 changed_flags; + u64 orig_flags, new_flags, changed_flags; u32 i, j; - changed_flags = pf->flags; + orig_flags = READ_ONCE(pf->flags); + new_flags = orig_flags; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gstrings_priv_flags[i]; - if (priv_flags->read_only) - continue; - if (flags & BIT(i)) - pf->flags |= priv_flags->flag; + new_flags |= priv_flags->flag; else - pf->flags &= ~(priv_flags->flag); + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; } if (pf->hw.pf_id != 0) @@ -4096,18 +4120,40 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) priv_flags = &i40e_gl_gstrings_priv_flags[j]; - if (priv_flags->read_only) - continue; - if (flags & BIT(i + j)) - pf->flags |= priv_flags->flag; + new_flags |= priv_flags->flag; else - pf->flags &= ~(priv_flags->flag); + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; } flags_complete: - /* check for flags that changed */ - changed_flags ^= pf->flags; + /* Before we finalize any flag changes, we need to perform some + * checks to ensure that the changes are supported and safe. + */ + + /* ATR eviction is not supported on all devices */ + if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && + !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) + return -EOPNOTSUPP; + + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg64 returns anything but the old value, this means that + * something else has modified the flags variable since we copied it + * originally. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&pf->pdev->dev, + "Unable to update pf->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were @@ -4121,10 +4167,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } - /* Only allow ATR evict on hardware that is capable of handling it */ - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; - if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; int ret; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2db93d3f6d23..6498da8806cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2595,9 +2595,20 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; - if (!vid || vsi->info.pvid) + if (vsi->info.pvid) return -EINVAL; + /* The network stack will attempt to add VID=0, with the intention to + * receive priority tagged packets with a VLAN of 0. Our HW receives + * these packets by default when configured to receive untagged + * packets, so we don't need to add a filter for this case. + * Additionally, HW interprets adding a VID=0 filter as meaning to + * receive *only* tagged traffic and stops receiving untagged traffic. + * Thus, we do not want to actually add a filter for VID=0 + */ + if (!vid) + return 0; + /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); @@ -2674,15 +2685,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, if (vid >= VLAN_N_VID) return -EINVAL; - /* If the network stack called us with vid = 0 then - * it is asking to receive priority tagged packets with - * vlan id 0. Our HW receives them by default when configured - * to receive untagged packets so there is no need to add an - * extra filter for vlan 0 tagged packets. - */ - if (vid) - ret = i40e_vsi_add_vlan(vsi, vid); - + ret = i40e_vsi_add_vlan(vsi, vid); if (!ret) set_bit(vid, vsi->active_vlans); @@ -2713,44 +2716,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, return 0; } -/** - * i40e_macaddr_init - explicitly write the mac address filters - * - * @vsi: pointer to the vsi - * @macaddr: the MAC address - * - * This is needed when the macaddr has been obtained by other - * means than the default, e.g., from Open Firmware or IDPROM. - * Returns 0 on success, negative on failure - **/ -static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) -{ - int ret; - struct i40e_aqc_add_macvlan_element_data element; - - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - macaddr, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Addr change for VSI failed: %d\n", ret); - return -EADDRNOTAVAIL; - } - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "add filter failed err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); - } - return ret; -} - /** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up @@ -2909,22 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; - cpumask_var_t mask; if (!ring->q_vector || !ring->netdev) return; - /* Single TC mode enable XPS */ - if (vsi->tc_config.numtc <= 1) { - if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) - netif_set_xps_queue(ring->netdev, - &ring->q_vector->affinity_mask, - ring->queue_index); - } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - /* Disable XPS to allow selection based on TC */ - bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); - netif_set_xps_queue(ring->netdev, mask, ring->queue_index); - free_cpumask_var(mask); + if ((vsi->tc_config.numtc <= 1) && + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + netif_set_xps_queue(ring->netdev, + get_cpu_mask(ring->q_vector->v_idx), + ring->queue_index); } /* schedule our worker thread which will take care of @@ -3203,19 +3161,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { - struct i40e_pf *pf = vsi->back; - int err; - if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); - - if (!!(pf->flags & I40E_FLAG_PF_MAC)) { - err = i40e_macaddr_init(vsi, pf->hw.mac.addr); - if (err) { - dev_warn(&pf->pdev->dev, - "could not set up macaddr; err %d\n", err); - } - } } /** @@ -3495,7 +3442,7 @@ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, struct i40e_q_vector *q_vector = container_of(notify, struct i40e_q_vector, affinity_notify); - q_vector->affinity_mask = *mask; + cpumask_copy(&q_vector->affinity_mask, mask); } /** @@ -3559,8 +3506,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + /* get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to use here. + */ + irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); } vsi->irqs_ready = true; @@ -4342,7 +4291,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ + /* remove our suggested affinity mask for this IRQ */ irq_set_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); free_irq(irq_num, vsi->q_vectors[i]); @@ -4773,7 +4722,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) { struct net_device *netdev; struct i40e_vsi *vsi; - int i; + unsigned int i; /* Only for LAN VSI */ vsi = pf->vsi[pf->lan_vsi]; @@ -5350,7 +5299,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) + if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) goto out; /* Get the initial DCB configuration */ @@ -5400,6 +5349,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) char *speed = "Unknown"; char *fc = "Unknown"; char *fec = ""; + char *req_fec = ""; char *an = ""; new_speed = vsi->back->hw.phy.link_info.link_speed; @@ -5461,6 +5411,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) } if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + req_fec = ", Requested FEC: None"; fec = ", FEC: None"; an = ", Autoneg: False"; @@ -5473,10 +5424,22 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) else if (vsi->back->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fec = ", FEC: CL108 RS-FEC"; + + /* 'CL108 RS-FEC' should be displayed when RS is requested, or + * both RS and FC are requested + */ + if (vsi->back->hw.phy.link_info.req_fec_info & + (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { + if (vsi->back->hw.phy.link_info.req_fec_info & + I40E_AQ_REQUEST_FEC_RS) + req_fec = ", Requested FEC: CL108 RS-FEC"; + else + req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; + } } - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n", - speed, fec, an, fc); + netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); } /** @@ -5656,16 +5619,17 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) return ret; } -static int __i40e_setup_tc(struct net_device *netdev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - return i40e_setup_tc(netdev, tc->mqprio->num_tc); + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return i40e_setup_tc(netdev, mqprio->num_tc); } /** @@ -7331,7 +7295,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) wr32(hw, I40E_REG_MSS, val); } - if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { + if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -7520,6 +7484,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } +static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +{ + switch (port->type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + default: + return "unknown"; + } +} + /** * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * @pf: board private structure @@ -7565,14 +7541,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_dbg(&pf->pdev->dev, - "%s %s port %d, index %d failed, err %s aq_err %s\n", - pf->udp_ports[i].type ? "vxlan" : "geneve", - port ? "add" : "delete", - port, i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + i40e_tunnel_name(&pf->udp_ports[i]), + port ? "add" : "delete", + port, i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->udp_ports[i].port = 0; } } @@ -7957,7 +7933,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->tx_itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = ring++; @@ -7974,7 +7950,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->tx_itr_setting = pf->tx_itr_default; @@ -8261,7 +8237,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) q_vector->vsi = vsi; q_vector->v_idx = v_idx; - cpumask_set_cpu(cpu, &q_vector->affinity_mask); + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, @@ -8510,7 +8486,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) u8 *lut; int ret; - if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) return 0; if (!vsi->rss_size) @@ -8640,7 +8616,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); @@ -8659,7 +8635,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); @@ -8987,25 +8963,56 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } + if (pf->hw.mac.type == I40E_MAC_X722) { + pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | + I40E_HW_128_QP_RSS_CAPABLE | + I40E_HW_ATR_EVICT_CAPABLE | + I40E_HW_WB_ON_ITR_CAPABLE | + I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_HW_NO_PCI_LINK_CHECK | + I40E_HW_USE_SET_LLDP_MIB | + I40E_HW_GENEVE_OFFLOAD_CAPABLE | + I40E_HW_PTP_L4_CAPABLE | + I40E_HW_WOL_MC_MAGIC_PKT_WAKE | + I40E_HW_OUTER_UDP_CSUM_CAPABLE); + +#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 + if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != + I40E_FDEVICT_PCTYPE_DEFAULT) { + dev_warn(&pf->pdev->dev, + "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); + pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; + } + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; + } + + /* Enable HW ATR eviction if possible */ + if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { - pf->flags |= I40E_FLAG_RESTART_AUTONEG; + pf->hw_features |= I40E_HW_RESTART_AUTONEG; /* No DCB support for FW < v4.33 */ - pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; + pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; } /* Disable FW LLDP if FW < v4.3 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) - pf->flags |= I40E_FLAG_STOP_FW_LLDP; + pf->hw_features |= I40E_HW_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) - pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; + pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; @@ -9028,29 +9035,6 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - if (pf->hw.mac.type == I40E_MAC_X722) { - pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE - | I40E_FLAG_128_QP_RSS_CAPABLE - | I40E_FLAG_HW_ATR_EVICT_CAPABLE - | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE - | I40E_FLAG_WB_ON_ITR_CAPABLE - | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE - | I40E_FLAG_NO_PCI_LINK_CHECK - | I40E_FLAG_USE_SET_LLDP_MIB - | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE - | I40E_FLAG_PTP_L4_CAPABLE - | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE; - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - } - - /* Enable HW ATR eviction if possible */ - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -9231,7 +9215,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; break; case UDP_TUNNEL_TYPE_GENEVE: - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) return; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; break; @@ -9298,7 +9282,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) + if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); @@ -9589,6 +9573,7 @@ static int i40e_xdp(struct net_device *dev, return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: return -EINVAL; @@ -9675,7 +9660,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_RXCSUM | 0; - if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) + if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; @@ -9714,8 +9699,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { - /* relate the VSI_VMDQ name to the VSI_MAIN name */ - snprintf(netdev->name, IFNAMSIZ, "%sv%%d", + /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we + * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to + * the end, which is 4 bytes long, so force truncation of the + * original name by IFNAMSIZ - 4 + */ + snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", + IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); @@ -9756,8 +9746,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; return 0; } @@ -9890,13 +9879,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { + /* Single TC condition is not fatal, + * message and continue + */ dev_info(&pf->pdev->dev, "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", enabled_tc, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - ret = -ENOENT; } } break; @@ -10387,17 +10378,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: - /* Apply relevant filters if a platform-specific mac - * address was selected. - */ - if (!!(pf->flags & I40E_FLAG_PF_MAC)) { - ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); - if (ret) { - dev_warn(&pf->pdev->dev, - "could not set up macaddr; err %d\n", - ret); - } - } case I40E_VSI_VMDQ2: ret = i40e_config_netdev(vsi); if (ret) @@ -10434,7 +10414,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } - if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vsi->type == I40E_VSI_VMDQ2)) { ret = i40e_vsi_config_rss(vsi); } @@ -11443,7 +11423,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { + if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } @@ -11460,7 +11440,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) - pf->flags |= I40E_FLAG_PORT_ID_VALID; + pf->hw_features |= I40E_HW_PORT_ID_VALID; pci_set_drvdata(pdev, pf); pci_save_state(pdev); @@ -11576,7 +11556,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { + if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -11663,7 +11643,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ - if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { + if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; @@ -11734,9 +11714,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) - pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS; + pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) - pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER; + pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; /* print a string summarizing features */ i40e_print_features(pf); @@ -12048,7 +12028,7 @@ static void i40e_shutdown(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf, false); @@ -12080,7 +12060,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); - if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf, false); @@ -12089,7 +12069,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_stop_misc_vector(pf); - + if (pf->msix_entries) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } retval = pci_save_state(pdev); if (retval) return retval; @@ -12129,6 +12112,15 @@ static int i40e_resume(struct pci_dev *pdev) /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { clear_bit(__I40E_DOWN, pf->state); + if (pf->msix_entries) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_err(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + } + } i40e_reset_and_rebuild(pf, false, false); } @@ -12168,12 +12160,14 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); - /* we will see if single thread per module is enough for now, - * it can't be any worse than using the system workqueue which - * was already single threaded + /* There is no need to throttle the number of active tasks because + * each device limits its own task using a state bit for scheduling + * the service task, and the device tasks do not interfere with each + * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM + * since we need to be able to guarantee forward progress even under + * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 800bd55d0159..57505b1df98d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -134,8 +134,25 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, **/ void i40e_release_nvm(struct i40e_hw *hw) { - if (!hw->nvm.blank_nvm_mode) - i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + i40e_status ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + usleep_range(1000, 2000); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, + 0, NULL); + total_delay++; + } } /** @@ -230,6 +247,7 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, struct i40e_asq_cmd_details cmd_details; memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire @@ -266,7 +284,7 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, u16 *data) @@ -280,27 +298,49 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, } /** - * i40e_read_nvm_word - Reads Shadow RAM + * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM. + * + * Do not use this function except in cases where the nvm lock is already + * taken via i40e_acquire_nvm(). + **/ +static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, u16 *data) +{ + i40e_status ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + else + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + return ret_code; +} + +/** + * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM. **/ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - enum i40e_status_code ret_code = 0; + i40e_status ret_code = 0; ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_read_nvm_word_aq(hw, offset, data); - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); - } - i40e_release_nvm(hw); - } + if (ret_code) + return ret_code; + + ret_code = __i40e_read_nvm_word(hw, offset, data); + + i40e_release_nvm(hw); + return ret_code; } @@ -393,31 +433,25 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * method. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, u16 *words, + u16 *data) { - enum i40e_status_code ret_code = 0; + i40e_status ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, - data); - i40e_release_nvm(hw); - } - } else { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); + else ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); - } return ret_code; } @@ -499,15 +533,15 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, data = (u16 *)vmem.va; /* read pointer to VPD area */ - ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ - ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -521,7 +555,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; - ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -593,14 +627,19 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 checksum_sr = 0; u16 checksum_local = 0; - ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); - if (ret_code) - goto i40e_validate_nvm_checksum_exit; - - /* Do not use i40e_read_nvm_word() because we do not want to take - * the synchronization semaphores twice here. + /* We must acquire the NVM lock in order to correctly synchronize the + * NVM accesses across multiple PFs. Without doing so it is possible + * for one of the PFs to read invalid data potentially indicating that + * the checksum is invalid. */ - i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_code) + return ret_code; + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + i40e_release_nvm(hw); + if (ret_code) + return ret_code; /* Verify read checksum from EEPROM is the same as * calculated checksum @@ -612,7 +651,6 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, if (checksum) *checksum = checksum_local; -i40e_validate_nvm_checksum_exit: return ret_code; } @@ -736,6 +774,15 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. + */ + mutex_lock(&hw->aq.arq_mutex); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); @@ -756,7 +803,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, */ if (cmd->offset == 0xffff) { i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); - return 0; + status = 0; + goto exit; } status = I40E_ERR_NOT_READY; @@ -771,6 +819,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, *perrno = -ESRCH; break; } +exit: + mutex_unlock(&hw->aq.arq_mutex); return status; } @@ -997,6 +1047,7 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, break; case I40E_NVMUPD_CSUM_CON: + /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? @@ -1011,6 +1062,7 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, break; case I40E_NVMUPD_CSUM_LCB: + /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index df613ea40313..a39b13197891 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -311,8 +311,6 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 1a0be835fa06..d8456c381c99 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct timespec64 now; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now); - now = timespec64_add(now, then); + timespec64_add_ns(&now, delta); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); @@ -570,7 +569,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE)) + if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | @@ -584,7 +583,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE)) + if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; /* fall through */ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -593,7 +592,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V2; - if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) { + if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) { tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2194960d5855..1519dfb851d0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -959,19 +959,31 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; - struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; - int usecs; + unsigned int usecs, estimated_usecs; if (rc->total_packets == 0 || !rc->itr) return false; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + + /* The calculations in this algorithm depend on interrupts actually + * firing at the ITR rate. This may not happen if the packet rate is + * really low, or if we've been napi polling. Check to make sure + * that's not the case before we continue. + */ + estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update); + if (estimated_usecs > usecs) { + new_latency_range = I40E_LOW_LATENCY; + goto reset_latency; + } + /* simple throttlerate management * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) * 20-1249MB/s bulk (18000 ints/s) - * > 40000 Rx packets per second (8000 ints/s) * * The math works out because the divisor is in 10^(-6) which * turns the bytes/us input value into MB/s values, but @@ -979,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * are in 2 usec increments in the ITR registers, and make sure * to use the smoothed values that the countdown timer gives us. */ - usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_int = rc->total_bytes / usecs; - switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -994,24 +1003,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } - /* this is to adjust RX more aggressively when streaming small - * packets. The value of 40000 was picked as it is just beyond - * what the hardware can receive per second if in low latency - * mode. - */ -#define RX_ULTRA_PACKET_RATE 40000 - - if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && - (&qv->rx == rc)) - new_latency_range = I40E_ULTRA_LATENCY; - +reset_latency: rc->latency_range = new_latency_range; switch (new_latency_range) { @@ -1024,21 +1022,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) case I40E_BULK_LATENCY: new_itr = I40E_ITR_18K; break; - case I40E_ULTRA_LATENCY: - new_itr = I40E_ITR_8K; - break; default: break; } rc->total_bytes = 0; rc->total_packets = 0; + rc->last_itr_update = jiffies; if (new_itr != rc->itr) { rc->itr = new_itr; return true; } - return false; } @@ -2065,7 +2060,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; struct xdp_buff xdp; @@ -2198,7 +2193,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -2243,6 +2238,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, int idx = q_vector->v_idx; int rx_itr_setting, tx_itr_setting; + /* If we don't have MSIX, then we only need to re-enable icr0 */ + if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { + i40e_irq_dynamic_enable_icr0(vsi->back, false); + return; + } + vector = (q_vector->v_idx + vsi->base_vector); /* avoid dynamic calculation if in countdown mode OR if @@ -2363,7 +2364,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { - const cpumask_t *aff_mask = &q_vector->affinity_mask; int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, @@ -2373,15 +2373,22 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ - if (likely(cpumask_test_cpu(cpu_id, aff_mask) || - !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) { -tx_only: - if (arm_wb) { - q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); - } - return budget; + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + + /* Force an interrupt */ + i40e_force_wb(vsi, q_vector); + + /* Return budget-1 so that polling stops */ + return budget - 1; } +tx_only: + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; + i40e_enable_wb_on_itr(vsi, q_vector); + } + return budget; } if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) @@ -2390,16 +2397,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete_done(napi, work_done); - /* If we're prematurely stopping polling to fix the interrupt - * affinity we want to make sure polling starts back up so we - * issue a call to i40e_force_wb which triggers a SW interrupt. - */ - if (!clean_complete) - i40e_force_wb(vsi, q_vector); - else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) - i40e_irq_dynamic_enable_icr0(vsi->back, false); - else - i40e_update_enable_itr(vsi, q_vector); + i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } @@ -2453,9 +2451,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; } else { - hlen = hdr.network - skb->data; - l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); - hlen -= hdr.network - skb->data; + /* find the start of the innermost ipv6 header */ + unsigned int inner_hlen = hdr.network - skb->data; + unsigned int h_offset = inner_hlen; + + /* this function updates h_offset to the end of the header */ + l4_proto = + ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); + /* hlen will contain our best estimate of the tcp header */ + hlen = h_offset - inner_hlen; } if (l4_proto != IPPROTO_TCP) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index b288d58313a6..2f848bc5e391 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -112,7 +112,7 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ - (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes (a multiple of 128) */ @@ -130,6 +130,7 @@ enum i40e_dyn_idx_t { * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define i40e_rx_desc i40e_32byte_rx_desc #define I40E_RX_DMA_ATTR \ @@ -453,7 +454,6 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, - I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { @@ -461,6 +461,7 @@ struct i40e_ring_container { struct i40e_ring *ring; unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ + unsigned long last_itr_update; /* jiffies of last ITR update */ u16 count; enum i40e_latency_range latency_range; u16 itr; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 3a18ed13edc4..fd4bbdd88b57 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -185,6 +185,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 req_fec_info; u8 fec_info; u8 ext_info; u8 loopback; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ecbe40ea8ffe..4d1e670f490e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1528,54 +1528,54 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; - vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2; + vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; if (i40e_vf_client_capable(pf, vf->vf_id) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - vfres->vf_offload_flags |= + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; - if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - ret = I40E_ERR_PARAM; + aq_ret = I40E_ERR_PARAM; goto err; } - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } - if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { + if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - vfres->vf_offload_flags |= + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; } @@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, NULL); } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - aq_ret = 0; - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { - aq_ret = - i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - } + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; if (aq_ret) dev_err(&pf->pdev->dev, "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", @@ -1760,7 +1758,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - allmulti, NULL, + alluni, NULL, true); aq_err = pf->hw.aq.asq_last_status; if (aq_ret) { @@ -2531,6 +2529,60 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); } +/** + * i40e_vc_enable_vlan_stripping + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Enable vlan header stripping for the VF + **/ +static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, + u16 msglen) +{ + struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + i40e_vlan_stripping_enable(vsi); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + aq_ret); +} + +/** + * i40e_vc_disable_vlan_stripping + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Disable vlan header stripping for the VF + **/ +static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, + u16 msglen) +{ + struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + i40e_vlan_stripping_disable(vsi); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + aq_ret); +} + /** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure @@ -2650,6 +2702,12 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_SET_RSS_HENA: ret = i40e_vc_set_rss_hena(vf, msg, msglen); break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); + break; case VIRTCHNL_OP_UNKNOWN: default: @@ -2764,7 +2822,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_unlock_bh(&vsi->mac_filter_hash_lock); - dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -2772,7 +2829,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } ether_addr_copy(vf->default_lan_addr.addr, mac); - vf->pf_set_mac = true; + + if (is_zero_ether_addr(mac)) { + vf->pf_set_mac = false; + dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); + } else { + vf->pf_set_mac = true; + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", + mac, vf_id); + } + /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 1dd1938f594f..8d3a2bfe186a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -333,9 +333,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ if (hw->debug_mask & mask) { - char prefix[20]; + char prefix[27]; - snprintf(prefix, 20, + snprintf(prefix, sizeof(prefix), "i40evf %02x:%02x.%x: \t0x", hw->bus.bus_id, hw->bus.device, @@ -1104,7 +1104,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; - hw->dev_caps.dcb = msg->vf_offload_flags & + hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.fcoe = 0; for (i = 0; i < msg->num_vsis; i++) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 5e314fd3c016..a90737786c34 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -54,7 +54,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40evf_allocate_dma_mem_d(h, m, s, a) @@ -63,7 +63,7 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 12b02e530503..c32c62462c84 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -275,7 +275,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -357,19 +357,31 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; - struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; - int usecs; + unsigned int usecs, estimated_usecs; if (rc->total_packets == 0 || !rc->itr) return false; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + + /* The calculations in this algorithm depend on interrupts actually + * firing at the ITR rate. This may not happen if the packet rate is + * really low, or if we've been napi polling. Check to make sure + * that's not the case before we continue. + */ + estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update); + if (estimated_usecs > usecs) { + new_latency_range = I40E_LOW_LATENCY; + goto reset_latency; + } + /* simple throttlerate management * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) * 20-1249MB/s bulk (18000 ints/s) - * > 40000 Rx packets per second (8000 ints/s) * * The math works out because the divisor is in 10^(-6) which * turns the bytes/us input value into MB/s values, but @@ -377,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * are in 2 usec increments in the ITR registers, and make sure * to use the smoothed values that the countdown timer gives us. */ - usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_int = rc->total_bytes / usecs; - switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -392,24 +401,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } - /* this is to adjust RX more aggressively when streaming small - * packets. The value of 40000 was picked as it is just beyond - * what the hardware can receive per second if in low latency - * mode. - */ -#define RX_ULTRA_PACKET_RATE 40000 - - if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && - (&qv->rx == rc)) - new_latency_range = I40E_ULTRA_LATENCY; - +reset_latency: rc->latency_range = new_latency_range; switch (new_latency_range) { @@ -422,21 +420,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) case I40E_BULK_LATENCY: new_itr = I40E_ITR_18K; break; - case I40E_ULTRA_LATENCY: - new_itr = I40E_ITR_8K; - break; default: break; } rc->total_bytes = 0; rc->total_packets = 0; + rc->last_itr_update = jiffies; if (new_itr != rc->itr) { rc->itr = new_itr; return true; } - return false; } @@ -1299,7 +1294,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; unsigned int size; @@ -1406,7 +1401,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -1575,7 +1570,6 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { - const cpumask_t *aff_mask = &q_vector->affinity_mask; int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, @@ -1585,14 +1579,22 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ - if (likely(cpumask_test_cpu(cpu_id, aff_mask))) { -tx_only: - if (arm_wb) { - q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); - } - return budget; + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + + /* Force an interrupt */ + i40evf_force_wb(vsi, q_vector); + + /* Return budget-1 so that polling stops */ + return budget - 1; } +tx_only: + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; + i40e_enable_wb_on_itr(vsi, q_vector); + } + return budget; } if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) @@ -1601,14 +1603,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete_done(napi, work_done); - /* If we're prematurely stopping polling to fix the interrupt - * affinity we want to make sure polling starts back up so we - * issue a call to i40evf_force_wb which triggers a SW interrupt. - */ - if (!clean_complete) - i40evf_force_wb(vsi, q_vector); - else - i40e_update_enable_itr(vsi, q_vector); + i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 901282c87cf6..0d9f98bc07bd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -98,10 +98,6 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) -#define i40e_pf_get_default_rss_hena(pf) \ - (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) - /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ @@ -117,6 +113,7 @@ enum i40e_dyn_idx_t { * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define i40e_rx_desc i40e_32byte_rx_desc #define I40E_RX_DMA_ATTR \ @@ -428,7 +425,6 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, - I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { @@ -436,6 +432,7 @@ struct i40e_ring_container { struct i40e_ring *ring; unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ + unsigned long last_itr_update; /* jiffies of last ITR update */ u16 count; enum i40e_latency_range latency_range; u16 itr; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index bde7f24af1c6..2ea919d9cdcf 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -159,6 +159,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 req_fec_info; u8 fec_info; u8 ext_info; u8 loopback; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 6cc92089fecb..82f69031e5cd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -39,6 +39,18 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -109,7 +121,7 @@ struct i40e_q_vector { #define ITR_COUNTDOWN_START 100 u8 itr_countdown; /* when 0 or 1 update ITR */ int v_idx; /* vector index in list */ - char name[IFNAMSIZ + 9]; + char name[IFNAMSIZ + 15]; bool arm_wb_state; cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; @@ -183,6 +195,7 @@ struct i40evf_adapter { struct work_struct adminq_task; struct delayed_work client_task; struct delayed_work init_task; + wait_queue_head_t down_waitqueue; struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; @@ -225,8 +238,6 @@ struct i40evf_adapter { /* duplicates for common code */ #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED -#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE -#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE #define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX /* flags for admin queue service task */ u32 aq_required; @@ -250,6 +261,8 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) #define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) #define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) +#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) +#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) /* OS defined structs */ struct net_device *netdev; @@ -266,19 +279,19 @@ struct i40evf_adapter { enum virtchnl_link_speed link_speed; enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_offload_flags & \ + (_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_IWARP : \ 0) #define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ -#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ +#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_RSS_PF) -#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ +#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_RSS_AQ) -#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ +#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ VIRTCHNL_VF_OFFLOAD_RSS_PF))) -#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ @@ -347,6 +360,8 @@ void i40evf_get_hena(struct i40evf_adapter *adapter); void i40evf_set_hena(struct i40evf_adapter *adapter); void i40evf_set_rss_key(struct i40evf_adapter *adapter); void i40evf_set_rss_lut(struct i40evf_adapter *adapter); +void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter); +void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 9bb2cc7dd4e4..65874d6b3ab9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -165,7 +165,7 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40evf_adapter *adapter = netdev_priv(netdev); - int i, j; + unsigned int i, j; char *p; for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { @@ -197,7 +197,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) int i; if (sset == ETH_SS_STATS) { - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { memcpy(p, i40evf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; @@ -258,29 +258,50 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev) static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) { struct i40evf_adapter *adapter = netdev_priv(netdev); - u64 changed_flags; + u32 orig_flags, new_flags, changed_flags; u32 i; - changed_flags = adapter->flags; + orig_flags = READ_ONCE(adapter->flags); + new_flags = orig_flags; for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { const struct i40evf_priv_flags *priv_flags; priv_flags = &i40evf_gstrings_priv_flags[i]; - if (priv_flags->read_only) - continue; - if (flags & BIT(i)) - adapter->flags |= priv_flags->flag; + new_flags |= priv_flags->flag; else - adapter->flags &= ~(priv_flags->flag); + new_flags &= ~(priv_flags->flag); + + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; } - /* check for flags that changed */ - changed_flags ^= adapter->flags; + /* Before we finalize any flag changes, any checks which we need to + * perform to determine if the new flags will be supported should go + * here... + */ - /* Process any additional changes needed as a result of flag changes. */ + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg returns anything but the old value, this means + * something else must have modified the flags variable since we + * copied it. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&adapter->pdev->dev, + "Unable to update adapter->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; + + /* Process any additional changes needed as a result of flag changes. + * The changed_flags value reflects the list of bits that were changed + * in the code above. + */ /* issue a reset to force legacy-rx change to take effect */ if (changed_flags & I40EVF_FLAG_LEGACY_RX) { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c213a347909..1825d956bb00 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -520,7 +520,7 @@ static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, struct i40e_q_vector *q_vector = container_of(notify, struct i40e_q_vector, affinity_notify); - q_vector->affinity_mask = *mask; + cpumask_copy(&q_vector->affinity_mask, mask); } /** @@ -543,9 +543,9 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} static int i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) { - int vector, err, q_vectors; - int rx_int_idx = 0, tx_int_idx = 0; - int irq_num; + unsigned int vector, q_vectors; + unsigned int rx_int_idx = 0, tx_int_idx = 0; + int irq_num, err; i40evf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -556,18 +556,15 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "i40evf-%s-%s-%d", basename, - "TxRx", rx_int_idx++); + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-TxRx-%d", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "i40evf-%s-%s-%d", basename, - "rx", rx_int_idx++); + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-rx-%d", basename, rx_int_idx++); } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "i40evf-%s-%s-%d", basename, - "tx", tx_int_idx++); + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-tx-%d", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -587,8 +584,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector->affinity_notify.release = i40evf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + /* get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to use here. + */ + irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); } return 0; @@ -1143,6 +1142,7 @@ void i40evf_down(struct i40evf_adapter *adapter) } clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } /** @@ -1241,7 +1241,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF); - if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; rx_ring = &adapter->rx_rings[i]; @@ -1417,7 +1417,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - if (adapter->vf_res->vf_offload_flags & + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; else @@ -1458,6 +1458,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, i40evf_napi_poll, NAPI_POLL_WEIGHT); } @@ -1678,6 +1679,16 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { + i40evf_enable_vlan_stripping(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { + i40evf_disable_vlan_stripping(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { i40evf_configure_queues(adapter); goto watchdog_done; @@ -1794,6 +1805,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -1877,7 +1889,7 @@ static void i40evf_reset_task(struct work_struct *work) } continue_reset: - if (netif_running(adapter->netdev)) { + if (netif_running(netdev)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -1939,12 +1951,13 @@ static void i40evf_reset_task(struct work_struct *work) i40evf_irq_enable(adapter, true); } else { adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); } return; reset_err: dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(adapter->netdev); + i40evf_close(netdev); } /** @@ -1957,8 +1970,8 @@ static void i40evf_adminq_task(struct work_struct *work) container_of(work, struct i40evf_adapter, adminq_task); struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - struct virtchnl_msg *v_msg; - i40e_status ret; + enum virtchnl_ops v_op; + i40e_status ret, v_ret; u32 val, oldval; u16 pending; @@ -1970,15 +1983,15 @@ static void i40evf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; - v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40evf_clean_arq_element(hw, &event, &pending); - if (ret || !v_msg->v_opcode) + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + + if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ - i40evf_virtchnl_completion(adapter, v_msg->v_opcode, - (i40e_status)v_msg->v_retval, - event.msg_buf, + i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); @@ -2238,6 +2251,7 @@ static int i40evf_open(struct net_device *netdev) static int i40evf_close(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); + int status; if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; @@ -2255,7 +2269,18 @@ static int i40evf_close(struct net_device *netdev) * still active and can DMA into memory. Resources are cleared in * i40evf_virtchnl_completion() after we get confirmation from the PF * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __I40EVF_DOWN before + * returning. State change occurs in i40evf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). */ + + status = wait_event_timeout(adapter->down_waitqueue, + adapter->state == __I40EVF_DOWN, + msecs_to_jiffies(200)); + if (!status) + netdev_warn(netdev, "Device resources not yet released\n"); return 0; } @@ -2281,6 +2306,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +/** + * i40e_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int i40evf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (!VLAN_ALLOWED(adapter)) + return -EINVAL; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + + return 0; +} + /** * i40evf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -2356,7 +2403,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); features &= ~I40EVF_VLAN_FEATURES; - if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN) + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) features |= I40EVF_VLAN_FEATURES; return features; } @@ -2374,6 +2421,7 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, .ndo_features_check = i40evf_features_check, .ndo_fix_features = i40evf_fix_features, + .ndo_set_features = i40evf_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40evf_netpoll, #endif @@ -2443,7 +2491,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) /* advertise to stack only if offloads for encapsulated packets is * supported */ - if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | @@ -2453,7 +2501,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_GSO_PARTIAL | 0; - if (!(vfres->vf_offload_flags & + if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; @@ -2481,7 +2529,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = vfres->rss_key_size; adapter->rss_lut_size = vfres->rss_lut_size; } else { @@ -2625,7 +2673,7 @@ static void i40evf_init_task(struct work_struct *work) /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN); + netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -2649,7 +2697,7 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); - if (adapter->vf_res->vf_offload_flags & + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; @@ -2683,6 +2731,7 @@ static void i40evf_init_task(struct work_struct *work) adapter->state = __I40EVF_DOWN; set_bit(__I40E_VSI_DOWN, adapter->vsi.state); i40evf_misc_irq_enable(adapter); + wake_up(&adapter->down_waitqueue); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); @@ -2844,6 +2893,9 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + /* Setup the wait queue for indicating transition to down status */ + init_waitqueue_head(&adapter->down_waitqueue); + return 0; err_ioremap: diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d2bb250a71af..85876f4fb1fb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -820,6 +820,46 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) kfree(vrl); } +/** + * i40evf_enable_vlan_stripping + * @adapter: adapter structure + * + * Request VLAN header stripping to be enabled + **/ +void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + NULL, 0); +} + +/** + * i40evf_disable_vlan_stripping + * @adapter: adapter structure + * + * Request VLAN header stripping to be disabled + **/ +void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + NULL, 0); +} + /** * i40evf_print_link_message - print link up or down * @adapter: adapter structure @@ -991,8 +1031,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); - if (adapter->state == __I40EVF_DOWN_PENDING) + if (adapter->state == __I40EVF_DOWN_PENDING) { adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); + } break; case VIRTCHNL_OP_VERSION: case VIRTCHNL_OP_CONFIG_IRQ_MAP: diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 4a50870e0fa7..c37cc8bccf47 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -340,6 +340,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1659,6 +1662,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = igb_copper_link_setup_82580(hw); break; + case e1000_phy_bcm54616: + ret_val = 0; + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d8517779439b..1de82f247312 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -889,6 +889,7 @@ #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 #define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2fb2213cd562..6c9485ab4b57 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -128,6 +128,7 @@ enum e1000_phy_type { e1000_phy_ife, e1000_phy_82580, e1000_phy_i210, + e1000_phy_bcm54616, }; enum e1000_bus_type { @@ -491,13 +492,16 @@ struct e1000_fc_info { struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write)(struct e1000_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct e1000_hw *, u16); - s32 (*check_for_ack)(struct e1000_hw *, u16); - s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); }; struct e1000_mbx_stats { diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 00e263f0c030..bffd58f7b2a1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -32,7 +32,8 @@ * * returns SUCCESS if it successfully read message from buffer **/ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; @@ -42,7 +43,7 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) size = mbx->size; if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); return ret_val; } @@ -124,6 +125,24 @@ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) return ret_val; } +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + /** * igb_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure @@ -204,7 +223,7 @@ static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, ret_val = igb_poll_for_msg(hw, mbx_id); if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); out: return ret_val; } @@ -340,6 +359,26 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) return ret_val; } +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + /** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure @@ -385,13 +424,14 @@ static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index + * @unlock: unlock the mailbox when done? * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number, bool unlock) { s32 ret_val; u16 i; @@ -405,8 +445,12 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, for (i = 0; i < size; i++) msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); - /* Acknowledge the message and release buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); /* update stats */ hw->mbx.stats.msgs_rx++; @@ -437,6 +481,7 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) mbx->ops.check_for_msg = igb_check_for_msg_pf; mbx->ops.check_for_ack = igb_check_for_ack_pf; mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 3e7fed73df15..a62b08e1572e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -67,11 +67,13 @@ #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_check_for_msg(struct e1000_hw *, u16); -s32 igb_check_for_ack(struct e1000_hw *, u16); -s32 igb_check_for_rst(struct e1000_hw *, u16); -s32 igb_init_mbx_params_pf(struct e1000_hw *); +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); #endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ec62410b035a..fd4a46b03cc8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1791,6 +1791,8 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + igb_nfc_filter_exit(adapter); + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -3317,8 +3319,6 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); - igb_nfc_filter_exit(adapter); - igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); @@ -5380,7 +5380,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; @@ -5745,8 +5746,6 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - else - dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); ack |= TSINTR_SYS_WRAP; } @@ -6676,32 +6675,33 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; - retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); if (retval) { /* if receive failed revoke VF CTS stats and restart init */ dev_err(&pdev->dev, "Error receiving message from VF\n"); vf_data->flags &= ~IGB_VF_FLAG_CTS; if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; goto out; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return; + goto unlock; /* until the vf completes a reset it should not be * allowed to start any configuration. */ if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ igb_vf_reset_msg(adapter, vf); return; } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; retval = -1; goto out; } @@ -6742,7 +6742,12 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + /* unlocks mailbox */ igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); } static void igb_msg_task(struct igb_adapter *adapter) diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 34faa113a8a0..a127688e83e6 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -296,8 +296,12 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) struct e1000_hw *hw = &adapter->hw; *data = 0; + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.check_for_link(hw); + spin_unlock_bh(&hw->mbx_lock); + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 01752f44ace2..c9a441632e9f 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -264,6 +264,8 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) @@ -300,6 +302,8 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1b9cbbe88f6f..1ed556911b14 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,12 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + + spin_lock_bh(&hw->mbx_lock); + e1000_rlpml_set_vf(hw, max_frame_size); + + spin_unlock_bh(&hw->mbx_lock); } static int igbvf_vlan_rx_add_vid(struct net_device *netdev, @@ -1244,10 +1249,16 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, true)) { dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + set_bit(vid, adapter->active_vlans); return 0; } @@ -1258,11 +1269,17 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + clear_bit(vid, adapter->active_vlans); return 0; } @@ -1428,7 +1445,11 @@ static void igbvf_set_multi(struct net_device *netdev) netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + + spin_unlock_bh(&hw->mbx_lock); kfree(mta_list); } @@ -1449,16 +1470,24 @@ static int igbvf_set_uni(struct net_device *netdev) return -ENOSPC; } + spin_lock_bh(&hw->mbx_lock); + /* Clear all unicast MAC filters */ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); + spin_unlock_bh(&hw->mbx_lock); + if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; /* Add MAC filters one by one */ netdev_for_each_uc_addr(ha, netdev) { + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, ha->addr); + + spin_unlock_bh(&hw->mbx_lock); udelay(200); } } @@ -1503,12 +1532,16 @@ static void igbvf_reset(struct igbvf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) dev_err(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); + spin_unlock_bh(&hw->mbx_lock); + if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1643,6 +1676,7 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter) igbvf_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->hw.mbx_lock); set_bit(__IGBVF_DOWN, &adapter->state); return 0; @@ -1786,8 +1820,12 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + spin_unlock_bh(&hw->mbx_lock); + if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; @@ -1858,7 +1896,12 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) if (test_bit(__IGBVF_DOWN, &adapter->state)) return false; + spin_lock_bh(&hw->mbx_lock); + ret_val = hw->mac.ops.check_for_link(hw); + + spin_unlock_bh(&hw->mbx_lock); + link_active = !hw->mac.get_link_status; /* if check for link returns error we will need to reset */ @@ -2808,6 +2851,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + spin_lock_bh(&hw->mbx_lock); + /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -2824,6 +2869,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->addr_len); } + spin_unlock_bh(&hw->mbx_lock); + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 528be116184e..9577ccf4b26a 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -149,7 +149,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); - msleep(10); + mdelay(10); /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); @@ -230,6 +230,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u16 *hash_list = (u16 *)&msgbuf[1]; u32 hash_value; u32 cnt, i; + s32 ret_val; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the @@ -250,7 +251,9 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, mc_addr_list += ETH_ALEN; } - mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + ret_val = mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** @@ -293,11 +296,14 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; + s32 ret_val; msgbuf[0] = E1000_VF_SET_LPE; msgbuf[1] = max_size; - mbx->ops.write_posted(hw, msgbuf, 2); + ret_val = mbx->ops.write_posted(hw, msgbuf, 2); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 4cf78b0dec50..d213eefb6169 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -245,6 +245,7 @@ struct e1000_hw { struct e1000_mac_info mac; struct e1000_mbx_info mbx; + spinlock_t mbx_lock; /* serializes mailbox ops */ union { struct e1000_dev_spec_vf vf; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 523f9d05a810..8a32eb7d47b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) **/ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { -#ifndef CONFIG_SPARC - u32 regval; - u32 i; -#endif s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); - -#ifndef CONFIG_SPARC - /* Disable relaxed ordering */ - for (i = 0; ((i < hw->mac.max_tx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); - } - - for (i = 0; ((i < hw->mac.max_rx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | - IXGBE_DCA_RXCTRL_HEAD_WRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } -#endif if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 4e35e7017f3d..6e6ab6f6875e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.check_link(hw, &speed, &link_up, false); - /* if link is down, assume supported */ - if (link_up) - supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + /* flow control autoneg black list */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + supported = false; + break; + default: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? true : false; - else - supported = true; + else + supported = true; + } + break; case ixgbe_media_type_backplane: - supported = true; + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) + supported = false; + else + supported = true; break; case ixgbe_media_type_copper: /* only some copper devices support flow control autoneg */ @@ -111,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) break; } + if (!supported) + hw_dbg(hw, "Device %x does not support flow control autoneg\n", + hw->device_id); + return supported; } @@ -350,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); -#ifndef CONFIG_ARCH_WANT_RELAX_ORDER - /* Disable relaxed ordering */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - u32 regval; - - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); - regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); - } - - for (i = 0; i < hw->mac.max_rx_queues; i++) { - u32 regval; - - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | - IXGBE_DCA_RXCTRL_HEAD_WRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } -#endif return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 72c565712a5f..c3e7a8191128 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_ring; - int i, err = 0; + int i, j, err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) @@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } /* allocate temporary buffer to store rings in */ - i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); - i = max_t(int, i, adapter->num_xdp_queues); + i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, + adapter->num_rx_queues); temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); if (!temp_ring) { @@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } } - for (i = 0; i < adapter->num_xdp_queues; i++) { - memcpy(&temp_ring[i], adapter->xdp_ring[i], + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + memcpy(&temp_ring[i], adapter->xdp_ring[j], sizeof(struct ixgbe_ring)); temp_ring[i].count = new_tx_count; @@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, memcpy(adapter->tx_ring[i], &temp_ring[i], sizeof(struct ixgbe_ring)); } - for (i = 0; i < adapter->num_xdp_queues; i++) { - ixgbe_free_tx_resources(adapter->xdp_ring[i]); + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + ixgbe_free_tx_resources(adapter->xdp_ring[j]); - memcpy(adapter->xdp_ring[i], &temp_ring[i], + memcpy(adapter->xdp_ring[j], &temp_ring[i], sizeof(struct ixgbe_ring)); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index b45fdc98033d..f1bfae0c41d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - ixgbe_for_each_ring(ring, q_vector->tx) - adapter->tx_ring[ring->queue_index] = NULL; + ixgbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } ixgbe_for_each_ring(ring, q_vector->rx) adapter->rx_ring[ring->queue_index] = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f1dbdf26d8e1..4d76afd13868 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -386,7 +386,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { struct ixgbe_adapter *adapter; int i; @@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { - int result = IXGBE_XDP_PASS; + int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; u32 act; @@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_TX: result = ixgbe_xmit_xdp_ring(adapter, xdp); break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + result = IXGBE_XDP_TX; + else + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(act); /* fallthrough */ @@ -2408,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); + + xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); @@ -4872,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) return; - vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; + vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) @@ -5810,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) usleep_range(10000, 20000); + /* synchronize_sched() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_sched(); netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ @@ -8517,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) return ixgbe_ptp_set_ts_config(adapter, req); case SIOCGHWTSTAMP: return ixgbe_ptp_get_ts_config(adapter, req); + case SIOCGMIIPHY: + if (!adapter->hw.phy.ops.read_reg) + return -EOPNOTSUPP; + /* fall through */ default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -8839,7 +8855,6 @@ static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, } static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); @@ -8941,7 +8956,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); @@ -9025,9 +9040,9 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, } static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { + __be16 protocol = cls->common.protocol; u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; @@ -9214,41 +9229,49 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, return err; } -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int ixgbe_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (chain_index) + if (!is_classid_clsact_ingress(cls_u32->common.classid) || + cls_u32->common.chain_index) return -EOPNOTSUPP; - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, - proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return ixgbe_delete_clsu32(adapter, tc->cls_u32); - case TC_CLSU32_NEW_HNODE: - case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, proto, - tc->cls_u32); - case TC_CLSU32_DELETE_HNODE: - return ixgbe_configure_clsu32_del_hnode(adapter, - tc->cls_u32); - default: - return -EINVAL; - } + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); + default: + return -EOPNOTSUPP; } +} - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; +static int ixgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return ixgbe_setup_tc(dev, mqprio->num_tc); +} - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return ixgbe_setup_tc(dev, tc->mqprio->num_tc); +static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(dev, type_data); + case TC_SETUP_MQPRIO: + return ixgbe_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } } #ifdef CONFIG_PCI_IOV @@ -9823,6 +9846,53 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) } } +static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + int err; + + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return -ENETDOWN; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return -ENXIO; + + err = ixgbe_xmit_xdp_ring(adapter, xdp); + if (err != IXGBE_XDP_TX) + return -ENOSPC; + + return 0; +} + +static void ixgbe_xdp_flush(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + + return; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -9869,6 +9939,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, .ndo_xdp = ixgbe_xdp, + .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xdp_flush = ixgbe_xdp_flush, }; /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 0760bd7eeb01..112d24c6c9ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -679,8 +679,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { - struct list_head *pos; struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { @@ -721,13 +722,15 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!entry || !entry->free) return -ENOSPC; + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval < 0) + return retval; + entry->free = false; entry->is_macvlan = true; entry->vf = vf; memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, mac_addr, vf); - return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9c2460c5ef1b..ffa0ee5cd0f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3778,8 +3778,8 @@ struct ixgbe_info { #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 72d84a065e34..19fbb2f28ea4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1555,9 +1555,14 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { + struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + /* Disable AN and force speed to 10G Serial. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -1874,8 +1879,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, else force_speed = IXGBE_LINK_SPEED_1GB_FULL; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { status = ixgbe_setup_ixfi_x550em(hw, &force_speed); if (status) @@ -2404,17 +2411,30 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); /* Enable link status change alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, ®); - if (status) - return status; - reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_x550em_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, ®); + if (status) + return status; - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, reg); - if (status) - return status; + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, reg); + if (status) + return status; + } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, @@ -2615,7 +2635,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; - if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + if (!(hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; return ixgbe_setup_kr_speed_x550em(hw, speed); @@ -2822,7 +2843,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) { bool pause, asm_dir; u32 reg_val; - s32 rc; + s32 rc = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { @@ -2865,32 +2886,37 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) return IXGBE_ERR_CONFIG; } - if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) - return 0; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; - rc = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - ®_val); - if (rc) - return rc; - - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } return rc; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9c94ea9b2b80..81c1fac00d33 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -183,8 +183,6 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define DEFAULT_TX_QUEUE_SIZE 512 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) -#define TSO_HEADER_SIZE 128 - /* Max number of allowed TCP segments for software TSO */ #define MV643XX_MAX_TSO_SEGS 100 #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) @@ -1123,7 +1121,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); if (!WARN_ON(!skb)) - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } if (cmd_sts & ERROR_SUMMARY) { @@ -2026,7 +2024,7 @@ static void rxq_deinit(struct rx_queue *rxq) for (i = 0; i < rxq->rx_ring_size; i++) { if (rxq->rx_skb[i]) { - dev_kfree_skb(rxq->rx_skb[i]); + dev_consume_skb_any(rxq->rx_skb[i]); rxq->rx_desc_count--; } } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0aab74c2a209..64a04975bcf8 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -281,9 +281,6 @@ */ #define MVNETA_RSS_LU_TABLE_SIZE 1 -/* TSO header size */ -#define TSO_HEADER_SIZE 128 - /* Max number of Rx descriptors */ #define MVNETA_MAX_RXD 128 @@ -4332,7 +4329,7 @@ static int mvneta_probe(struct platform_device *pdev) } } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4d598ca8503a..9c86cb7cb988 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -27,12 +28,15 @@ #include #include #include +#include #include #include #include +#include #include #include #include +#include /* RX Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) @@ -120,6 +124,9 @@ #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c @@ -183,22 +190,25 @@ #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 /* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) -#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 -#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) @@ -206,6 +216,7 @@ #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) @@ -265,7 +276,7 @@ #define MVPP2_BM_VIRT_RLS_REG 0x64c0 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 /* TX Scheduler registers */ @@ -307,57 +318,87 @@ /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 -#define MVPP2_GMAC_PORT_EN_MASK BIT(0) -#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 -#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc -#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) #define MVPP2_GMAC_CTRL_1_REG 0x4 -#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) -#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) -#define MVPP2_GMAC_PCS_LB_EN_BIT 6 -#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) -#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 #define MVPP2_GMAC_CTRL_2_REG 0x8 -#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) -#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) -#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) -#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) #define MVPP2_GMAC_AUTONEG_CONFIG 0xc -#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) -#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) -#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) -#define MVPP2_GMAC_AN_SPEED_EN BIT(7) -#define MVPP2_GMAC_FC_ADV_EN BIT(9) -#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) -#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_STATUS0 0x10 +#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c -#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_INT_STAT 0x20 +#define MVPP22_GMAC_INT_STAT_LINK BIT(1) +#define MVPP22_GMAC_INT_MASK 0x24 +#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) #define MVPP22_GMAC_CTRL_4_REG 0x90 -#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) -#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) -#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_GMAC_INT_SUM_MASK 0xa4 +#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, * relative to port->base. */ #define MVPP22_XLG_CTRL0_REG 0x100 -#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) -#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) -#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) - +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff +#define MVPP22_XLG_STATUS 0x10c +#define MVPP22_XLG_STATUS_LINK_UP BIT(0) +#define MVPP22_XLG_INT_STAT 0x114 +#define MVPP22_XLG_INT_STAT_LINK BIT(1) +#define MVPP22_XLG_INT_MASK 0x118 +#define MVPP22_XLG_INT_MASK_LINK BIT(1) #define MVPP22_XLG_CTRL3_REG 0x11c -#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_EXT_INT_MASK 0x15c +#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) /* SMI registers. PPv2.2 only, relative to priv->iface_base. */ #define MVPP22_SMI_MISC_CFG_REG 0x1204 -#define MVPP22_SMI_POLLING_EN BIT(10) +#define MVPP22_SMI_POLLING_EN BIT(10) #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) @@ -367,11 +408,44 @@ #define MVPP2_QUEUE_NEXT_DESC(q, index) \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + /* Various constants */ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -602,6 +676,7 @@ enum mvpp2_tag_type { #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) #define MVPP2_PRS_RI_UDF3_MASK 0x300000 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 @@ -685,7 +760,8 @@ enum mvpp2_prs_l3_cast { #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K -#define MVPP2_MAX_CPUS 4 +#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS enum mvpp2_bm_type { MVPP2_BM_FREE, @@ -701,16 +777,23 @@ struct mvpp2 { void __iomem *lms_base; void __iomem *iface_base; - /* On PPv2.2, each CPU can access the base register through a - * separate address space, each 64 KB apart from each - * other. + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. */ - void __iomem *cpu_base[MVPP2_MAX_CPUS]; + void __iomem *swth_base[MVPP2_MAX_THREADS]; + + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; /* Common clocks */ struct clk *pp_clk; struct clk *gop_clk; struct clk *mg_clk; + struct clk *axi_clk; /* List of pointers to port structures */ struct mvpp2_port **port_list; @@ -752,6 +835,18 @@ struct mvpp2_port_pcpu { struct tasklet_struct tx_done_tasklet; }; +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + struct mvpp2_port { u8 id; @@ -760,7 +855,7 @@ struct mvpp2_port { */ int gop_id; - int irq; + int link_irq; struct mvpp2 *priv; @@ -768,14 +863,13 @@ struct mvpp2_port { void __iomem *base; struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; struct net_device *dev; int pkt_size; - u32 pending_cause_rx; - struct napi_struct napi; - /* Per-CPU port control */ struct mvpp2_port_pcpu __percpu *pcpu; @@ -788,6 +882,7 @@ struct mvpp2_port { phy_interface_t phy_interface; struct device_node *phy_node; + struct phy *comphy; unsigned int link; unsigned int duplex; unsigned int speed; @@ -797,6 +892,12 @@ struct mvpp2_port { /* Index of first port's physical RXQ */ u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -932,6 +1033,10 @@ struct mvpp2_txq_pcpu { /* Index of the TX DMA descriptor to be cleaned up */ int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; }; struct mvpp2_tx_queue { @@ -1062,12 +1167,14 @@ struct mvpp2_bm_pool { u32 port_map; }; -/* Static declaractions */ +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 -/* Number of RXQs used by single port */ -static int rxq_number = MVPP2_DEFAULT_RXQ; -/* Number of TXQs used by single port */ -static int txq_number = MVPP2_MAX_TXQ; +static int queue_mode = MVPP2_QDIST_SINGLE_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" @@ -1076,12 +1183,12 @@ static int txq_number = MVPP2_MAX_TXQ; static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { - writel(data, priv->cpu_base[0] + offset); + writel(data, priv->swth_base[0] + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { - return readl(priv->cpu_base[0] + offset); + return readl(priv->swth_base[0] + offset); } /* These accessors should be used to access: @@ -1123,13 +1230,13 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data) { - writel(data, priv->cpu_base[cpu] + offset); + writel(data, priv->swth_base[cpu] + offset); } static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset) { - return readl(priv->cpu_base[cpu] + offset); + return readl(priv->swth_base[cpu] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@ -2210,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, (proto != IPPROTO_IGMP)) return -EINVAL; - /* Fragmented packet */ + /* Not fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) @@ -2229,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, - ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); @@ -2241,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); - /* Not fragmented packet */ + /* Fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) @@ -2253,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); @@ -4070,7 +4184,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_long->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); } @@ -4084,7 +4198,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_short->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, port->pool_short->id); } @@ -4125,22 +4239,40 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); } static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } /* Mask the current CPU's Rx/Tx interrupts @@ -4162,15 +4294,345 @@ static void mvpp2_interrupts_mask(void *arg) static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; + u32 val; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), - (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_percpu_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } } /* Port configuration routines */ +static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; + else if (port->gop_id == 3) + val |= GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); +} + +static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | + GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + if (port->gop_id > 1) { + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val &= ~GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); + } +} + +static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + u32 val; + + /* XPCS */ + val = readl(xpcs + MVPP22_XPCS_CFG0); + val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | + MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); + val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); + writel(val, xpcs + MVPP22_XPCS_CFG0); + + /* MPCS */ + val = readl(mpcs + MVPP22_MPCS_CTRL); + val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; + writel(val, mpcs + MVPP22_MPCS_CTRL); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | + MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); +} + +static int mvpp22_gop_init(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + if (!priv->sysctrl_base) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->gop_id == 0) + goto invalid_conf; + mvpp22_gop_init_rgmii(port); + break; + case PHY_INTERFACE_MODE_SGMII: + mvpp22_gop_init_sgmii(port); + break; + case PHY_INTERFACE_MODE_10GKR: + if (port->gop_id != 0) + goto invalid_conf; + mvpp22_gop_init_10gkr(port); + break; + default: + goto unsupported_conf; + } + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); + val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | + GENCONF_PORT_CTRL1_EN(port->gop_id); + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); + val |= GENCONF_SOFT_RESET1_GOP; + regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); + +unsupported_conf: + return 0; + +invalid_conf: + netdev_err(port->dev, "Invalid port configuration\n"); + return -EINVAL; +} + +static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + /* Enable the GMAC link status irq for this port */ + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } + + if (port->gop_id == 0) { + /* Enable the XLG/GIG irqs for this port */ + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + val |= MVPP22_XLG_EXT_INT_MASK_XLG; + else + val |= MVPP22_XLG_EXT_INT_MASK_GIG; + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } +} + +static void mvpp22_gop_mask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | + MVPP22_XLG_EXT_INT_MASK_GIG); + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } +} + +static void mvpp22_gop_setup_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_MASK); + val |= MVPP22_GMAC_INT_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_MASK); + } + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_INT_MASK); + val |= MVPP22_XLG_INT_MASK_LINK; + writel(val, port->base + MVPP22_XLG_INT_MASK); + } + + mvpp22_gop_unmask_irq(port); +} + +static int mvpp22_comphy_init(struct mvpp2_port *port) +{ + enum phy_mode mode; + int ret; + + if (!port->comphy) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + mode = PHY_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_10GKR: + mode = PHY_MODE_10GKR; + break; + default: + return -EINVAL; + } + + ret = phy_set_mode(port->comphy, mode); + if (ret) + return ret; + + return phy_power_on(port->comphy); +} + +static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) +{ + u32 val; + + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_DISABLE_PADDING; + val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + val &= ~MVPP22_CTRL4_DP_CLK_SEL; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val &= ~MVPP2_GMAC_DISABLE_PADDING; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + } + + /* The port is connected to a copper PHY */ + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_PORT_TYPE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_AN_DUPLEX_EN; + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) + val |= MVPP2_GMAC_IN_BAND_AUTONEG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) +{ + u32 val; + + /* Force link down */ + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + /* Configure the PCS and in-band AN */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { + val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; + } + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + mvpp2_port_mii_gmac_configure_mode(port); + + /* Unset the GMAC reset state */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val &= ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + /* Stop forcing link down */ + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port) +{ + u32 val; + + if (port->gop_id != 0) + return; + + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + val = readl(port->base + MVPP22_XLG_CTRL4_REG); + val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; + val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; + writel(val, port->base + MVPP22_XLG_CTRL4_REG); +} + static void mvpp22_port_mii_set(struct mvpp2_port *port) { u32 val; @@ -4188,38 +4650,18 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) writel(val, port->base + MVPP22_XLG_CTRL3_REG); } - - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - if (port->phy_interface == PHY_INTERFACE_MODE_RGMII) - val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL; - else - val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; - val &= ~MVPP22_CTRL4_DP_CLK_SEL; - val |= MVPP22_CTRL4_SYNC_BYPASS; - val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); } static void mvpp2_port_mii_set(struct mvpp2_port *port) { - u32 val; - if (port->priv->hw_version == MVPP22) mvpp22_port_mii_set(port); - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_SGMII: - val |= MVPP2_GMAC_INBAND_AN_MASK; - break; - case PHY_INTERFACE_MODE_RGMII: - val |= MVPP2_GMAC_PORT_RGMII_MASK; - default: - val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; - } - - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) + mvpp2_port_mii_gmac_configure(port); + else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + mvpp2_port_mii_xlg_configure(port); } static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) @@ -4326,6 +4768,18 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } +/* Change maximum receive size of the port */ +static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; + val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); +} + /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { @@ -4376,7 +4830,7 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); /* Enable Rx cache snoop */ - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_SNOOP_PKT_SIZE_MASK | @@ -4394,7 +4848,7 @@ static void mvpp2_ingress_enable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val &= ~MVPP2_RXQ_DISABLE_MASK; @@ -4407,7 +4861,7 @@ static void mvpp2_ingress_disable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_RXQ_DISABLE_MASK; @@ -4426,7 +4880,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port) /* Enable all initialized TXs. */ qmap = 0; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; if (txq->descs) @@ -4712,7 +5166,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg) struct mvpp2_port *port = arg; int queue; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; mvpp2_percpu_read(port->priv, smp_processor_id(), @@ -4753,7 +5207,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); } - for (txq = 0; txq < txq_number; txq++) { + for (txq = 0; txq < port->ntxqs; txq++) { val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; @@ -4787,6 +5241,23 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, put_cpu(); } +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + int cpu = get_cpu(); + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + + put_cpu(); +} + static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) { u64 tmp = (u64)clk_hz * usec; @@ -4823,6 +5294,22 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4881,7 +5368,8 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } -static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + int cpu) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@ -4892,7 +5380,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) if (!txq) break; - txq_pcpu = this_cpu_ptr(txq->pcpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@ -4908,15 +5396,14 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, - int desc_num, int cpu, + struct mvpp2_tx_queue *aggr_txq, int cpu, struct mvpp2 *priv) { u32 txq_dma; /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, - desc_num * MVPP2_DESC_ALIGNED_SIZE, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; @@ -4937,7 +5424,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + MVPP2_AGGR_TXQ_SIZE); return 0; } @@ -5118,6 +5606,14 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; + + txq_pcpu->tso_headers = + dma_alloc_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, + &txq_pcpu->tso_headers_dma, + GFP_KERNEL); + if (!txq_pcpu->tso_headers) + goto cleanup; } return 0; @@ -5125,6 +5621,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); + + dma_free_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); } dma_free_coherent(port->dev->dev.parent, @@ -5144,6 +5645,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); + + dma_free_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); } if (txq->descs) @@ -5229,7 +5735,7 @@ static void mvpp2_cleanup_txqs(struct mvpp2_port *port) val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; mvpp2_txq_clean(port, txq); mvpp2_txq_deinit(port, txq); @@ -5246,7 +5752,7 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) { int queue; - for (queue = 0; queue < rxq_number; queue++) + for (queue = 0; queue < port->nrxqs; queue++) mvpp2_rxq_deinit(port, port->rxqs[queue]); } @@ -5255,7 +5761,7 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port) { int queue, err; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { err = mvpp2_rxq_init(port, port->rxqs[queue]); if (err) goto err_cleanup; @@ -5273,13 +5779,21 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; int queue, err; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; } + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -5291,72 +5805,170 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) /* The callback for per-port interrupt */ static irqreturn_t mvpp2_isr(int irq, void *dev_id) { - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct mvpp2_queue_vector *qv = dev_id; - mvpp2_interrupts_disable(port); + mvpp2_qvec_interrupt_disable(qv); - napi_schedule(&port->napi); + napi_schedule(&qv->napi); return IRQ_HANDLED; } +/* Per-port interrupt for link status changes */ +static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) +{ + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct net_device *dev = port->dev; + bool event = false, link = false; + u32 val; + + mvpp22_gop_mask_irq(port); + + if (port->gop_id == 0 && + port->phy_interface == PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP22_XLG_INT_STAT); + if (val & MVPP22_XLG_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP22_XLG_STATUS); + if (val & MVPP22_XLG_STATUS_LINK_UP) + link = true; + } + } else if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_INT_STAT); + if (val & MVPP22_GMAC_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP2_GMAC_STATUS0); + if (val & MVPP2_GMAC_STATUS0_LINK_UP) + link = true; + } + } + + if (!netif_running(dev) || !event) + goto handled; + + if (link) { + mvpp2_interrupts_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } else { + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + mvpp2_ingress_disable(port); + mvpp2_egress_disable(port); + + mvpp2_interrupts_disable(port); + } + +handled: + mvpp22_gop_unmask_irq(port); + return IRQ_HANDLED; +} + +static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port, + struct phy_device *phydev) +{ + u32 val; + + if (port->phy_interface != PHY_INTERFACE_MODE_RGMII && + port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID && + port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID && + port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID && + port->phy_interface != PHY_INTERFACE_MODE_SGMII) + return; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN); + + if (phydev->duplex) + val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + + if (phydev->speed == SPEED_1000) + val |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (phydev->speed == SPEED_100) + val |= MVPP2_GMAC_CONFIG_MII_SPEED; + + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + /* Adjust link */ static void mvpp2_link_event(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - int status_change = 0; + bool link_reconfigured = false; u32 val; if (phydev->link) { + if (port->phy_interface != phydev->interface && port->comphy) { + /* disable current port for reconfiguration */ + mvpp2_interrupts_disable(port); + netif_carrier_off(port->dev); + mvpp2_port_disable(port); + phy_power_off(port->comphy); + + /* comphy reconfiguration */ + port->phy_interface = phydev->interface; + mvpp22_comphy_init(port); + + /* gop/mac reconfiguration */ + mvpp22_gop_init(port); + mvpp2_port_mii_set(port); + + link_reconfigured = true; + } + if ((port->speed != phydev->speed) || (port->duplex != phydev->duplex)) { - u32 val; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX | - MVPP2_GMAC_AN_SPEED_EN | - MVPP2_GMAC_AN_DUPLEX_EN); - - if (phydev->duplex) - val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - - if (phydev->speed == SPEED_1000) - val |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVPP2_GMAC_CONFIG_MII_SPEED; - - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + mvpp2_gmac_set_autoneg(port, phydev); port->duplex = phydev->duplex; port->speed = phydev->speed; } } - if (phydev->link != port->link) { - if (!phydev->link) { - port->duplex = -1; - port->speed = 0; - } - + if (phydev->link != port->link || link_reconfigured) { port->link = phydev->link; - status_change = 1; - } - if (status_change) { if (phydev->link) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= (MVPP2_GMAC_FORCE_LINK_PASS | - MVPP2_GMAC_FORCE_LINK_DOWN); - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val |= (MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN); + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + mvpp2_interrupts_enable(port); + mvpp2_port_enable(port); + mvpp2_egress_enable(port); mvpp2_ingress_enable(port); + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); } else { + port->duplex = -1; + port->speed = 0; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); mvpp2_ingress_disable(port); mvpp2_egress_disable(port); + + mvpp2_port_disable(port); + mvpp2_interrupts_disable(port); } + phy_print_status(phydev); } } @@ -5385,8 +5997,8 @@ static void mvpp2_tx_proc_cb(unsigned long data) port_pcpu->timer_scheduled = false; /* Process all the Tx queues */ - cause = (1 << txq_number) - 1; - tx_todo = mvpp2_tx_done(port, cause); + cause = (1 << port->ntxqs) - 1; + tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); /* Set the timer in case not all the packets were processed */ if (tx_todo) @@ -5498,8 +6110,8 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) } /* Main rx processing */ -static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, - struct mvpp2_rx_queue *rxq) +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; int rx_received; @@ -5577,7 +6189,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -5665,6 +6277,123 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, return -ENOMEM; } +static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, + struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int hdr_sz) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); + + addr = txq_pcpu->tso_headers_dma + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN); + + mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | + MVPP2_TXD_F_DESC | + MVPP2_TXD_PADDING_DISABLE); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); +} + +static inline int mvpp2_tso_put_data(struct sk_buff *skb, + struct net_device *dev, struct tso_t *tso, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int sz, bool left, bool last) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t buf_dma_addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, sz); + + buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + return -ENOMEM; + } + + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); + + if (!left) { + mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); + if (last) { + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + return 0; + } + } else { + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + } + + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + return 0; +} + +static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct tso_t tso; + int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i, len, descs = 0; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, + tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + tso_count_descs(skb))) + return 0; + + tso_start(skb, &tso); + len = skb->len - hdr_sz; + while (len > 0) { + int left = min_t(int, skb_shinfo(skb)->gso_size, len); + char *hdr = txq_pcpu->tso_headers + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + + len -= left; + descs++; + + tso_build_hdr(skb, hdr, &tso, left, len == 0); + mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); + + while (left > 0) { + int sz = min_t(int, tso.size, left); + left -= sz; + descs++; + + if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, + txq_pcpu, sz, left, len == 0)) + goto release; + tso_build_data(skb, &tso, sz); + } + } + + return descs; + +release: + for (i = descs - 1; i >= 0; i--) { + struct mvpp2_tx_desc *tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + return 0; +} + /* Main tx processing */ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { @@ -5682,6 +6411,10 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) txq_pcpu = this_cpu_ptr(txq->pcpu); aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + if (skb_is_gso(skb)) { + frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); + goto out; + } frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ @@ -5731,22 +6464,21 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) } } - txq_pcpu->reserved_num -= frags; - txq_pcpu->count += frags; - aggr_txq->count += frags; - - /* Enable transmit */ - wmb(); - mvpp2_aggr_txq_pend_desc_add(port, frags); - - if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { - struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - - netif_tx_stop_queue(nq); - } out: if (frags > 0) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) + netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; @@ -5762,7 +6494,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) mvpp2_txq_done(port, txq, txq_pcpu); /* Set the timer in case not all frags were processed */ - if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); mvpp2_timer_set(port_pcpu); @@ -5783,11 +6516,14 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) static int mvpp2_poll(struct napi_struct *napi, int budget) { - u32 cause_rx_tx, cause_rx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; int cpu = smp_processor_id(); + qv = container_of(napi, struct mvpp2_queue_vector, napi); + /* Rx/Tx cause register * * Bits 0-15: each bit indicates received packets on the Rx queue @@ -5798,11 +6534,10 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, + cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { mvpp2_cause_error(port->dev, cause_misc); @@ -5813,10 +6548,16 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } /* Process RX packets */ - cause_rx |= port->pending_cause_rx; + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx <<= qv->first_rxq; + cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { int count; struct mvpp2_rx_queue *rxq; @@ -5825,7 +6566,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (!rxq) break; - count = mvpp2_rx(port, budget, rxq); + count = mvpp2_rx(port, napi, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { @@ -5841,9 +6582,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx = 0; napi_complete_done(napi, rx_done); - mvpp2_interrupts_enable(port); + mvpp2_qvec_interrupt_enable(qv); } - port->pending_cause_rx = cause_rx; + qv->pending_cause_rx = cause_rx; return rx_done; } @@ -5851,17 +6592,32 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) static void mvpp2_start_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; + + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + mvpp2_xlg_max_rx_size_set(port); + else + mvpp2_gmac_max_rx_size_set(port); - mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); - napi_enable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); + if (port->priv->hw_version == MVPP22) { + mvpp22_comphy_init(port); + mvpp22_gop_init(port); + } + + mvpp2_port_mii_set(port); mvpp2_port_enable(port); - phy_start(ndev->phydev); + if (ndev->phydev) + phy_start(ndev->phydev); netif_tx_start_all_queues(port->dev); } @@ -5869,6 +6625,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) static void mvpp2_stop_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); @@ -5878,14 +6635,17 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) /* Disable interrupts on all CPUs */ mvpp2_interrupts_disable(port); - napi_disable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); netif_carrier_off(port->dev); netif_tx_stop_all_queues(port->dev); mvpp2_egress_disable(port); mvpp2_port_disable(port); - phy_stop(ndev->phydev); + if (ndev->phydev) + phy_stop(ndev->phydev); + phy_power_off(port->comphy); } static int mvpp2_check_ringparam_valid(struct net_device *dev, @@ -5941,6 +6701,10 @@ static int mvpp2_phy_connect(struct mvpp2_port *port) { struct phy_device *phy_dev; + /* No PHY is attached */ + if (!port->phy_node) + return 0; + phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, port->phy_interface); if (!phy_dev) { @@ -5961,12 +6725,56 @@ static void mvpp2_phy_disconnect(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + if (!ndev->phydev) + return; + phy_disconnect(ndev->phydev); } +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_affinity_hint(qv->irq, + cpumask_of(qv->sw_thread_id)); + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2 *priv = port->priv; unsigned char mac_bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int err; @@ -6006,28 +6814,44 @@ static int mvpp2_open(struct net_device *dev) goto err_cleanup_rxqs; } - err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); + err = mvpp2_irqs_init(port); if (err) { - netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); + netdev_err(port->dev, "cannot init IRQs\n"); goto err_cleanup_txqs; } + if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) { + err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, + dev->name, port); + if (err) { + netdev_err(port->dev, "cannot request link IRQ %d\n", + port->link_irq); + goto err_free_irq; + } + + mvpp22_gop_setup_irq(port); + } + /* In default link is down */ netif_carrier_off(port->dev); err = mvpp2_phy_connect(port); if (err < 0) - goto err_free_irq; + goto err_free_link_irq; /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); mvpp2_start_dev(port); return 0; +err_free_link_irq: + if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) + free_irq(port->link_irq, port); err_free_irq: - free_irq(port->irq, port); + mvpp2_irqs_deinit(port); err_cleanup_txqs: mvpp2_cleanup_txqs(port); err_cleanup_rxqs: @@ -6039,6 +6863,7 @@ static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; + struct mvpp2 *priv = port->priv; int cpu; mvpp2_stop_dev(port); @@ -6046,14 +6871,20 @@ static int mvpp2_stop(struct net_device *dev) /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); - free_irq(port->irq, port); - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) + free_irq(port->link_irq, port); - hrtimer_cancel(&port_pcpu->tx_done_timer); - port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); + mvpp2_irqs_deinit(port); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -6228,7 +7059,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int queue; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; @@ -6237,10 +7068,18 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, mvpp2_rx_time_coal_set(port, rxq); } - for (queue = 0; queue < txq_number; queue++) { + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); } return 0; @@ -6365,6 +7204,129 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v; + int i, ret; + + port->nqvecs = num_possible_cpus(); + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + port->nqvecs += 1; + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i * MVPP2_DEFAULT_RXQ; + v->nrxqs = MVPP2_DEFAULT_RXQ; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + v->irq = of_irq_get_byname(port_node, irqname); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { @@ -6373,15 +7335,22 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; + if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || + (port->ntxqs > MVPP2_MAX_TXQ)) + return -EINVAL; + /* Disable port */ mvpp2_egress_disable(port); mvpp2_port_disable(port); - port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) return -ENOMEM; @@ -6389,7 +7358,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) /* Associate physical Tx queues to this port and initialize. * The mapping is predefined. */ - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int queue_phy_id = mvpp2_txq_phys(port->id, queue); struct mvpp2_tx_queue *txq; @@ -6416,7 +7385,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->txqs[queue] = txq; } - port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), GFP_KERNEL); if (!port->rxqs) { err = -ENOMEM; @@ -6424,7 +7393,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) } /* Allocate and initialize Rx queue for this port */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq; /* Map physical Rx queue to port's logical Rx queue */ @@ -6441,22 +7410,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->rxqs[queue] = rxq; } - /* Configure Rx queue group interrupt for this port */ - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - rxq_number); - } else { - u32 val; - - val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } + mvpp2_rx_irqs_setup(port); /* Create Rx descriptor rings */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->size = port->rx_ring_size; @@ -6484,7 +7441,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) return 0; err_free_percpu: - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { if (!port->txqs[queue]) continue; free_percpu(port->txqs[queue]->pcpu); @@ -6492,35 +7449,93 @@ static int mvpp2_port_init(struct mvpp2_port *port) return err; } +/* Checks if the port DT description has the TX interrupts + * described. On PPv2.1, there are no such interrupts. On PPv2.2, + * there are available, but we need to keep support for old DTs. + */ +static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, + struct device_node *port_node) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", + "tx-cpu2", "tx-cpu3" }; + int ret, i; + + if (priv->hw_version == MVPP21) + return false; + + for (i = 0; i < 5; i++) { + ret = of_property_match_string(port_node, "interrupt-names", + irqs[i]); + if (ret < 0) + return false; + } + + return true; +} + +static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, + struct device_node *port_node, + char **mac_from) +{ + struct mvpp2_port *port = netdev_priv(dev); + char hw_mac_addr[ETH_ALEN] = {0}; + const char *dt_mac_addr; + + dt_mac_addr = of_get_mac_address(port_node); + if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { + *mac_from = "device tree"; + ether_addr_copy(dev->dev_addr, dt_mac_addr); + return; + } + + if (priv->hw_version == MVPP21) { + mvpp21_get_mac_address(port, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + *mac_from = "hardware"; + ether_addr_copy(dev->dev_addr, hw_mac_addr); + return; + } + } + + *mac_from = "random"; + eth_hw_addr_random(dev); +} + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, - struct mvpp2 *priv) + struct mvpp2 *priv, int index) { struct device_node *phy_node; + struct phy *comphy; struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; struct net_device *dev; struct resource *res; - const char *dt_mac_addr; - const char *mac_from; - char hw_mac_addr[ETH_ALEN] = {0}; + char *mac_from = ""; + unsigned int ntxqs, nrxqs; + bool has_tx_irqs; u32 id; int features; int phy_mode; int err, i, cpu; - dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number); + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + + if (!has_tx_irqs) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + + ntxqs = MVPP2_MAX_TXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) + nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); + else + nrxqs = MVPP2_DEFAULT_RXQ; + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) return -ENOMEM; phy_node = of_parse_phandle(port_node, "phy", 0); - if (!phy_node) { - dev_err(&pdev->dev, "missing phy\n"); - err = -ENODEV; - goto err_free_netdev; - } - phy_mode = of_get_phy_mode(port_node); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy mode\n"); @@ -6528,6 +7543,15 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_netdev; } + comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); + if (IS_ERR(comphy)) { + if (PTR_ERR(comphy) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_free_netdev; + } + comphy = NULL; + } + if (of_property_read_u32(port_node, "port-id", &id)) { err = -EINVAL; dev_err(&pdev->dev, "missing port-id value\n"); @@ -6540,25 +7564,37 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); + port->dev = dev; + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; - port->irq = irq_of_parse_and_map(port_node, 0); - if (port->irq <= 0) { - err = -EINVAL; + err = mvpp2_queue_vectors_init(port, port_node); + if (err) goto err_free_netdev; + + port->link_irq = of_irq_get_byname(port_node, "link"); + if (port->link_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_deinit_qvecs; } + if (port->link_irq <= 0) + /* the link irq is optional */ + port->link_irq = 0; if (of_property_read_bool(port_node, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; - port->priv = priv; port->id = id; if (priv->hw_version == MVPP21) - port->first_rxq = port->id * rxq_number; + port->first_rxq = port->id * port->nrxqs; else port->first_rxq = port->id * priv->max_port_rxqs; port->phy_node = phy_node; port->phy_interface = phy_mode; + port->comphy = comphy; if (priv->hw_version == MVPP21) { res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); @@ -6572,7 +7608,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, &port->gop_id)) { err = -EINVAL; dev_err(&pdev->dev, "missing gop-port-id value\n"); - goto err_free_irq; + goto err_deinit_qvecs; } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); @@ -6585,25 +7621,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_irq; } - dt_mac_addr = of_get_mac_address(port_node); - if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { - mac_from = "device tree"; - ether_addr_copy(dev->dev_addr, dt_mac_addr); - } else { - if (priv->hw_version == MVPP21) - mvpp21_get_mac_address(port, hw_mac_addr); - if (is_valid_ether_addr(hw_mac_addr)) { - mac_from = "hardware"; - ether_addr_copy(dev->dev_addr, hw_mac_addr); - } else { - mac_from = "random"; - eth_hw_addr_random(dev); - } - } + mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); port->tx_ring_size = MVPP2_MAX_TXD; port->rx_ring_size = MVPP2_MAX_RXD; - port->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); @@ -6612,7 +7633,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_stats; } - mvpp2_port_mii_set(port); mvpp2_port_periodic_xon_disable(port); if (priv->hw_version == MVPP21) @@ -6626,20 +7646,22 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_txq_pcpu; } - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; - port_pcpu->timer_scheduled = false; + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; - tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, - (unsigned long)dev); + tasklet_init(&port_pcpu->tx_done_tasklet, + mvpp2_tx_proc_cb, + (unsigned long)dev); + } } - netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); - features = NETIF_F_SG | NETIF_F_IP_CSUM; + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; dev->vlan_features |= features; @@ -6656,18 +7678,21 @@ static int mvpp2_port_probe(struct platform_device *pdev, } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - priv->port_list[id] = port; + priv->port_list[index] = port; return 0; err_free_port_pcpu: free_percpu(port->pcpu); err_free_txq_pcpu: - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); err_free_irq: - irq_dispose_mapping(port->irq); + if (port->link_irq) + irq_dispose_mapping(port->link_irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); err_free_netdev: of_node_put(phy_node); free_netdev(dev); @@ -6683,9 +7708,11 @@ static void mvpp2_port_remove(struct mvpp2_port *port) of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); - irq_dispose_mapping(port->irq); + mvpp2_queue_vectors_deinit(port); + if (port->link_irq) + irq_dispose_mapping(port->link_irq); free_netdev(port->dev); } @@ -6800,13 +7827,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) int err, i; u32 val; - /* Checks for hardware constraints */ - if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || - (txq_number > MVPP2_MAX_TXQ)) { - dev_err(&pdev->dev, "invalid queue size parameter\n"); - return -EINVAL; - } - /* MBUS windows configuration */ dram_target_info = mv_mbus_dram_info(); if (dram_target_info) @@ -6836,8 +7856,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) for_each_present_cpu(i) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; - err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], - MVPP2_AGGR_TXQ_SIZE, i, priv); + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); if (err < 0) return err; } @@ -6845,23 +7864,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) /* Rx Fifo Init */ mvpp2_rx_fifo_init(priv); - /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) { - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), - rxq_number); - continue; - } else { - u32 val; - - val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } - } - if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); @@ -6892,7 +7894,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int port_count, cpu; + int port_count, i; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -6917,14 +7919,25 @@ static int mvpp2_probe(struct platform_device *pdev) priv->iface_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->iface_base)) return PTR_ERR(priv->iface_base); + + priv->sysctrl_base = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->sysctrl_base)) + /* The system controller regmap is optional for dt + * compatibility reasons. When not provided, the + * configuration of the GoP relies on the + * firmware/bootloader. + */ + priv->sysctrl_base = NULL; } - for_each_present_cpu(cpu) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; addr_space_sz = (priv->hw_version == MVPP21 ? MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); - priv->cpu_base[cpu] = base + cpu * addr_space_sz; + priv->swth_base[i] = base + i * addr_space_sz; } if (priv->hw_version == MVPP21) @@ -6958,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev) err = clk_prepare_enable(priv->mg_clk); if (err < 0) goto err_gop_clk; + + priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_gop_clk; + priv->axi_clk = NULL; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err < 0) + goto err_gop_clk; + } } /* Get system's tclk rate */ @@ -7000,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev) } /* Initialize ports */ + i = 0; for_each_available_child_of_node(dn, port_node) { - err = mvpp2_port_probe(pdev, port_node, priv); + err = mvpp2_port_probe(pdev, port_node, priv, i); if (err < 0) goto err_mg_clk; + i++; } platform_set_drvdata(pdev, priv); return 0; err_mg_clk: + clk_disable_unprepare(priv->axi_clk); if (priv->hw_version == MVPP22) clk_disable_unprepare(priv->mg_clk); err_gop_clk: @@ -7047,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev) aggr_txq->descs_dma); } + clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5d7d94de4e00..eef35bf3e849 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3516,7 +3516,7 @@ static const char *skge_board_name(const struct skge_hw *hw) if (skge_chips[i].id == hw->chip_id) return skge_chips[i].name; - snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); return buf; } @@ -4193,7 +4193,7 @@ static struct pci_driver skge_driver = { .driver.pm = SKGE_PM_OPS, }; -static struct dmi_system_id skge_32bit_dma_boards[] = { +static const struct dmi_system_id skge_32bit_dma_boards[] = { { .ident = "Gigabyte nForce boards", .matches = { diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 698bb89aa901..f9149d2a4694 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,11 +7,11 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC - tristate "MediaTek MT7623 Gigabit ethernet support" - depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701) + tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_VENDOR_MEDIATEK select PHYLIB ---help--- This driver supports the gigabit ethernet MACs in the - MediaTek MT2701/MT7623 chipset family. + MediaTek SoC family. endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e588a0cdb074..5e81a7263654 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -53,7 +53,8 @@ static const struct mtk_ethtool_stats { }; static const char * const mtk_clks_source_name[] = { - "ethif", "esw", "gp1", "gp2", "trgpll" + "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m", + "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" }; void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) @@ -163,6 +164,47 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) mtk_w32(eth, val, TRGMII_TCK_CTRL); } +static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id) +{ + u32 val; + + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER, + SGMII_LINK_TIMER_DEFAULT); + + regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val); + val |= SGMII_REMOTE_FAULT_DIS; + regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val); + + regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val); + + regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val &= ~SGMII_PHYA_PWD; + regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val); + + /* Determine MUX for which GMAC uses the SGMII interface */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) { + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_SGMII_MASK; + val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2; + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + dev_info(eth->dev, "setup shared sgmii for gmac=%d\n", + mac_id); + } + + /* Setup the GMAC1 going through SGMII path when SoC also support + * ESW on GMAC1 + */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) && + !mac_id) { + mtk_w32(eth, 0, MTK_MAC_MISC); + dev_info(eth->dev, "setup gmac1 going through sgmii"); + } +} + static void mtk_phy_link_adjust(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -185,7 +227,8 @@ static void mtk_phy_link_adjust(struct net_device *dev) break; }; - if (mac->id == 0 && !mac->trgmii) + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && + !mac->id && !mac->trgmii) mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); if (dev->phydev->link) @@ -269,6 +312,7 @@ static int mtk_phy_connect(struct net_device *dev) if (!np) return -ENODEV; + mac->ge_mode = 0; switch (of_get_phy_mode(np)) { case PHY_INTERFACE_MODE_TRGMII: mac->trgmii = true; @@ -276,7 +320,10 @@ static int mtk_phy_connect(struct net_device *dev) case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: - mac->ge_mode = 0; + break; + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) + mtk_gmac_sgmii_hw_setup(eth, mac->id); break; case PHY_INTERFACE_MODE_MII: mac->ge_mode = 1; @@ -1032,7 +1079,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; - static int condition; int total = 0, i; memset(done, 0, sizeof(done)); @@ -1056,10 +1102,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) mac = 1; skb = tx_buf->skb; - if (!skb) { - condition = 1; + if (!skb) break; - } if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { bytes[mac] += skb->len; @@ -1241,9 +1285,19 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; + struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size; int i; + u32 offset = 0; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) + return -EINVAL; + ring = ð->rx_ring_qdma; + offset = 0x1000; + } else { + ring = ð->rx_ring[ring_no]; + } if (rx_flag == MTK_RX_FLAGS_HWLRO) { rx_data_len = MTK_MAX_LRO_RX_LENGTH; @@ -1293,17 +1347,16 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) */ wmb(); - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); return 0; } -static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; int i; if (ring->data && ring->dma) { @@ -1629,6 +1682,10 @@ static int mtk_dma_init(struct mtk_eth *eth) if (err) return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) return err; @@ -1668,12 +1725,13 @@ static void mtk_dma_free(struct mtk_eth *eth) eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth, 0); + mtk_rx_clean(eth, ð->rx_ring[0]); + mtk_rx_clean(eth, ð->rx_ring_qdma); if (eth->hwlro) { mtk_hwlro_rx_uninit(eth); for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) - mtk_rx_clean(eth, i); + mtk_rx_clean(eth, ð->rx_ring[i]); } kfree(eth->scratch_head); @@ -1740,7 +1798,9 @@ static int mtk_start_dma(struct mtk_eth *eth) mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, MTK_QDMA_GLO_CFG); mtk_w32(eth, @@ -1837,9 +1897,36 @@ static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) mdelay(10); } +static void mtk_clk_disable(struct mtk_eth *eth) +{ + int clk; + + for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) + clk_disable_unprepare(eth->clks[clk]); +} + +static int mtk_clk_enable(struct mtk_eth *eth) +{ + int clk, ret; + + for (clk = 0; clk < MTK_CLK_MAX ; clk++) { + ret = clk_prepare_enable(eth->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(eth->clks[clk]); + + return ret; +} + static int mtk_hw_init(struct mtk_eth *eth) { - int i, val; + int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) return 0; @@ -1847,10 +1934,10 @@ static int mtk_hw_init(struct mtk_eth *eth) pm_runtime_enable(eth->dev); pm_runtime_get_sync(eth->dev); - clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); - clk_prepare_enable(eth->clks[MTK_CLK_ESW]); - clk_prepare_enable(eth->clks[MTK_CLK_GP1]); - clk_prepare_enable(eth->clks[MTK_CLK_GP2]); + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + ethsys_reset(eth, RSTCTRL_FE); ethsys_reset(eth, RSTCTRL_PPE); @@ -1918,6 +2005,12 @@ static int mtk_hw_init(struct mtk_eth *eth) } return 0; + +err_disable_pm: + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return ret; } static int mtk_hw_deinit(struct mtk_eth *eth) @@ -1925,10 +2018,7 @@ static int mtk_hw_deinit(struct mtk_eth *eth) if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) return 0; - clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); - clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); - clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); - clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + mtk_clk_disable(eth); pm_runtime_put_sync(eth->dev); pm_runtime_disable(eth->dev); @@ -2395,6 +2485,7 @@ static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) static bool mtk_is_hwlro_supported(struct mtk_eth *eth) { switch (eth->chip_id) { + case MT7622_ETH: case MT7623_ETH: return true; } @@ -2406,6 +2497,7 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; + const struct of_device_id *match; struct mtk_eth *eth; int err; int i; @@ -2414,6 +2506,9 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + match = of_match_device(of_mtk_match, &pdev->dev); + eth->soc = (struct mtk_soc_data *)match->data; + eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(eth->base)) @@ -2430,6 +2525,16 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->ethsys); } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmiisys = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,sgmiisys"); + if (IS_ERR(eth->sgmiisys)) { + dev_err(&pdev->dev, "no sgmiisys regmap found\n"); + return PTR_ERR(eth->sgmiisys); + } + } + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,pctl"); if (IS_ERR(eth->pctl)) { @@ -2450,7 +2555,12 @@ static int mtk_probe(struct platform_device *pdev) if (IS_ERR(eth->clks[i])) { if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) return -EPROBE_DEFER; - return -ENODEV; + if (eth->soc->required_clks & BIT(i)) { + dev_err(&pdev->dev, "clock %s not found\n", + mtk_clks_source_name[i]); + return -EINVAL; + } + eth->clks[i] = NULL; } } @@ -2553,8 +2663,25 @@ static int mtk_remove(struct platform_device *pdev) return 0; } +static const struct mtk_soc_data mt2701_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + +static const struct mtk_soc_data mt7622_data = { + .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, + .required_clks = MT7622_CLKS_BITMAP +}; + +static const struct mtk_soc_data mt7623_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt2701-eth" }, + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 5868a09f623a..3d3c24a28112 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -302,6 +302,9 @@ #define PHY_IAC_REG_SHIFT 25 #define PHY_IAC_TIMEOUT HZ +#define MTK_MAC_MISC 0x1000c +#define MTK_MUX_TO_ESW BIT(0) + /* Mac control registers */ #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) #define MAC_MCR_MAX_RX_1536 BIT(24) @@ -357,11 +360,15 @@ #define ETHSYS_CHIPID0_3 0x0 #define ETHSYS_CHIPID4_7 0x4 #define MT7623_ETH 7623 +#define MT7622_ETH 7622 /* ethernet subsystem config register */ #define ETHSYS_SYSCFG0 0x14 #define SYSCFG0_GE_MASK 0x3 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) +#define SYSCFG0_SGMII_MASK (3 << 8) +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8)) +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8)) /* ethernet subsystem clock register */ #define ETHSYS_CLKCFG0 0x2c @@ -372,6 +379,23 @@ #define RSTCTRL_FE BIT(6) #define RSTCTRL_PPE BIT(31) +/* SGMII subsystem config registers */ +/* Register to auto-negotiation restart */ +#define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_AN_RESTART BIT(9) + +/* Register to programmable link timer, the unit in 2 * 8ns */ +#define SGMSYS_PCS_LINK_TIMER 0x18 +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) + +/* Register to control remote fault */ +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_REMOTE_FAULT_DIS BIT(8) + +/* Register to power up QPHY */ +#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 +#define SGMII_PHYA_PWD BIT(4) + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -437,12 +461,31 @@ enum mtk_tx_flags { enum mtk_clks_map { MTK_CLK_ETHIF, MTK_CLK_ESW, + MTK_CLK_GP0, MTK_CLK_GP1, MTK_CLK_GP2, MTK_CLK_TRGPLL, + MTK_CLK_SGMII_TX_250M, + MTK_CLK_SGMII_RX_250M, + MTK_CLK_SGMII_CDR_REF, + MTK_CLK_SGMII_CDR_FB, + MTK_CLK_SGMII_CK, + MTK_CLK_ETH2PLL, MTK_CLK_MAX }; +#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL)) enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING @@ -489,6 +532,7 @@ struct mtk_tx_ring { enum mtk_rx_flags { MTK_RX_FLAGS_NORMAL = 0, MTK_RX_FLAGS_HWLRO, + MTK_RX_FLAGS_QDMA, }; /* struct mtk_rx_ring - This struct holds info describing a RX ring @@ -511,6 +555,28 @@ struct mtk_rx_ring { u32 crx_idx_reg; }; +#define MTK_TRGMII BIT(0) +#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII) +#define MTK_ESW BIT(4) +#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW) +#define MTK_SGMII BIT(8) +#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII) +#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII) +#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ + MTK_GMAC2_SGMII) +#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) + +/* struct mtk_eth_data - This is the structure holding all differences + * among various plaforms + * @caps Flags shown the extra capability for the SoC + * @required_clks Flags shown the bitmap for required clocks on + * the target SoC + */ +struct mtk_soc_data { + u32 caps; + u32 required_clks; +}; + /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 @@ -529,11 +595,14 @@ struct mtk_rx_ring { * @msg_enable: Ethtool msg level * @ethsys: The register map pointing at the range used to setup * MII modes + * @sgmiisys: The register map pointing at the range used to setup + * SGMII modes * @pctl: The register map pointing at the range used to setup * GMAC port drive/slew values * @dma_refcnt: track how many netdevs are using the DMA engine - * @tx_ring: Pointer to the memore holding info about the TX ring - * @rx_ring: Pointer to the memore holding info about the RX ring + * @tx_ring: Pointer to the memory holding info about the TX ring + * @rx_ring: Pointer to the memory holding info about the RX ring + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring * @tx_napi: The TX NAPI struct * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring @@ -542,7 +611,8 @@ struct mtk_rx_ring { * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring - * @state Initialization and runtime state of the device. + * @state: Initialization and runtime state of the device + * @soc: Holding specific data among vaious SoCs */ struct mtk_eth { @@ -558,12 +628,14 @@ struct mtk_eth { u32 msg_enable; unsigned long sysclk; struct regmap *ethsys; + struct regmap *sgmiisys; struct regmap *pctl; u32 chip_id; bool hwlro; atomic_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; + struct mtk_rx_ring rx_ring_qdma; struct napi_struct tx_napi; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; @@ -574,6 +646,8 @@ struct mtk_eth { struct mii_bus *mii_bus; struct work_struct pending_work; unsigned long state; + + const struct mtk_soc_data *soc; }; /* struct mtk_mac - the structure that holds the info about the MACs of the diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 84a200764111..872548cd9431 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -5,9 +5,10 @@ config NET_VENDOR_MELLANOX bool "Mellanox devices" default y - depends on PCI + depends on PCI || I2C ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet or RDMA) device belonging to this + class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b651c1210555..6dabd983e7e0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -186,7 +186,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, bitmap->effective_len = bitmap->avail; spin_lock_init(&bitmap->lock); bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * - sizeof (long), GFP_KERNEL); + sizeof(long), GFP_KERNEL); if (!bitmap->table) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78b89ceb4f46..6a9086dc1e92 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1958,19 +1958,19 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) int i; int err; int num_vfs; - u16 availible_vpp; + u16 available_vpp; u8 vpp_param[MLX4_NUM_UP]; struct mlx4_qos_manager *port_qos; struct mlx4_priv *priv = mlx4_priv(dev); - err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); if (err) { - mlx4_info(dev, "Failed query availible VPPs\n"); + mlx4_info(dev, "Failed query available VPPs\n"); return; } port_qos = &priv->mfunc.master.qos_ctl[port]; - num_vfs = (availible_vpp / + num_vfs = (available_vpp / bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); for (i = 0; i < MLX4_NUM_UP; i++) { @@ -1985,14 +1985,14 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) } /* Query actual allocated VPP, just to make sure */ - err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); if (err) { - mlx4_info(dev, "Failed query availible VPPs\n"); + mlx4_info(dev, "Failed query available VPPs\n"); return; } port_qos->num_of_qos_vfs = num_vfs; - mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp); + mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp); for (i = 0; i < MLX4_NUM_UP; i++) mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, @@ -2637,7 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) int err = 0; priv->cmd.context = kmalloc(priv->cmd.max_cmds * - sizeof (struct mlx4_cmd_context), + sizeof(struct mlx4_cmd_context), GFP_KERNEL); if (!priv->cmd.context) return -ENOMEM; @@ -2695,7 +2695,7 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) { struct mlx4_cmd_mailbox *mailbox; - mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); + mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); if (!mailbox) return ERR_PTR(-ENOMEM); @@ -2891,7 +2891,7 @@ static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP); if (slave > port_qos->num_of_qos_vfs) { - mlx4_info(dev, "No availible VPP resources for this VF\n"); + mlx4_info(dev, "No available VPP resources for this VF\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index f849eec21824..1e487acb4667 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -209,12 +209,10 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq->moder_cnt, cq->moder_time); } -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, &priv->mdev->uar_lock); - - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2b0cbca4beb5..686e18de9a97 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -147,7 +147,7 @@ void mlx4_en_update_loopback_state(struct net_device *dev, mutex_unlock(&priv->mdev->state_lock); } -static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) { struct mlx4_en_profile *params = &mdev->profile; int i; @@ -176,8 +176,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; } - - return 0; } static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) @@ -309,10 +307,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) } /* Build device profile according to supplied module parameters */ - if (mlx4_en_get_profile(mdev)) { - mlx4_err(mdev, "Bad module parameters, aborting\n"); - goto err_mr; - } + mlx4_en_get_profile(mdev); /* Configure which ports to start according to module parameters */ mdev->port_cnt = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e3e6d9fa69fd..9c218f1cfc6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -130,19 +130,20 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) return err; } -static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; + + if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) return -EINVAL; - if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) - return -EINVAL; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc); + return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL @@ -732,6 +733,21 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); } +static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv, + unsigned char new_mac[ETH_ALEN + 2]) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN)) + return; + + err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac); + if (err) + en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n", + new_mac, priv->port, err); +} + static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, unsigned char new_mac[ETH_ALEN + 2]) { @@ -766,8 +782,12 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr) mutex_lock(&mdev->state_lock); memcpy(new_mac, saddr->sa_data, ETH_ALEN); err = mlx4_en_do_set_mac(priv, new_mac); - if (!err) - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + if (err) + goto out; + + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + mlx4_en_update_user_mac(priv, new_mac); +out: mutex_unlock(&mdev->state_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 86d2d42d658d..5a47f9669621 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; - memset(context, 0, sizeof *context); + memset(context, 0, sizeof(*context)); context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); context->pd = cpu_to_be32(mdev->priv_pdn); context->mtu_msgmax = 0xff; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index ec24c4057be6..b97a55c827eb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1056,7 +1056,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, } qp->event = mlx4_en_sqp_event; - memset(context, 0, sizeof *context); + memset(context, 0, sizeof(*context)); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn, ring->cqn, -1, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index a81db2582555..8a32a8f7f9c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -644,7 +644,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; - int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl); unsigned int hlen = skb_headlen(skb); if (skb->len <= spc) { diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 07406cf2eacd..6f57c052053e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -259,7 +259,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) if (!s_slave->active) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; @@ -276,7 +276,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) /*don't send if we don't have the that slave */ if (dev->persist->num_vfs < slave) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; @@ -295,7 +295,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, /*don't send if we don't have the that slave */ if (dev->persist->num_vfs < slave) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; eqe.subtype = port_subtype_change; @@ -432,7 +432,7 @@ int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) { struct mlx4_eqe eqe; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; @@ -726,7 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } memcpy(&priv->mfunc.master.comm_arm_bit_vector, eqe->event.comm_channel_arm.bit_vec, - sizeof eqe->event.comm_channel_arm.bit_vec); + sizeof(eqe->event.comm_channel_arm.bit_vec)); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.comm_work); break; @@ -984,15 +984,15 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, */ npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; - eq->page_list = kmalloc(npages * sizeof *eq->page_list, - GFP_KERNEL); + eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), + GFP_KERNEL); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) goto err_out_free; @@ -1161,7 +1161,7 @@ int mlx4_alloc_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, - sizeof *priv->eq_table.eq, GFP_KERNEL); + sizeof(*priv->eq_table.eq), GFP_KERNEL); if (!priv->eq_table.eq) return -ENOMEM; @@ -1180,7 +1180,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) int i; priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), - sizeof *priv->eq_table.uar_map, + sizeof(*priv->eq_table.uar_map), GFP_KERNEL); if (!priv->eq_table.uar_map) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 041c0ed65929..16c09949afd5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "fw.h" #include "icm.h" @@ -57,7 +58,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); do { \ void *__p = (char *) (source) + (offset); \ u64 val; \ - switch (sizeof (dest)) { \ + switch (sizeof(dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ @@ -162,6 +163,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [35] = "Diag counters per port", [36] = "QinQ VST mode support", [37] = "sl to vl mapping table change event support", + [38] = "user MAC support", }; int i; @@ -679,22 +681,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); - func_cap->qp0_qkey = qkey; + func_cap->spec_qps.qp0_qkey = qkey; } else { - func_cap->qp0_qkey = 0; + func_cap->spec_qps.qp0_qkey = 0; } MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); - func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; + func_cap->spec_qps.qp0_tunnel = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); - func_cap->qp0_proxy_qpn = size & 0xFFFFFF; + func_cap->spec_qps.qp0_proxy = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); - func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; + func_cap->spec_qps.qp1_tunnel = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); - func_cap->qp1_proxy_qpn = size & 0xFFFFFF; + func_cap->spec_qps.qp1_proxy = size & 0xFFFFFF; if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) MLX4_GET(func_cap->phys_port_id, outbox, @@ -778,6 +780,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_CAP_USER_MAC_EN_OFFSET 0x5C #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 @@ -949,6 +952,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); dev_cap->max_sq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_USER_MAC_EN_OFFSET); + if (field & (1 << 2)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_USER_MAC_EN; MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); if (field & 0x1) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; @@ -1534,7 +1540,7 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { if (virt != -1) { pages[nent * 2] = cpu_to_be64(virt); - virt += 1 << lg; + virt += 1ULL << lg; } pages[nent * 2 + 1] = @@ -2450,14 +2456,14 @@ int mlx4_config_dev_retrieval(struct mlx4_dev *dev, csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & CONFIG_DEV_RX_CSUM_MODE_MASK; - if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) return -EINVAL; params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & CONFIG_DEV_RX_CSUM_MODE_MASK; - if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) return -EINVAL; params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index b52ba01aa486..cd6399c76bfd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -144,11 +144,7 @@ struct mlx4_func_cap { int max_eq; int reserved_eq; int mcg_quota; - u32 qp0_qkey; - u32 qp0_tunnel_qpn; - u32 qp0_proxy_qpn; - u32 qp1_tunnel_qpn; - u32 qp1_proxy_qpn; + struct mlx4_spec_qps spec_qps; u32 reserved_lkey; u8 physical_port; u8 flags0; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c index 8f2fde0487c4..3a09d7122d3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c @@ -65,7 +65,7 @@ struct mlx4_set_port_scheduler_context { /* Granular Qos (per VF) section */ struct mlx4_alloc_vpp_param { - __be32 availible_vpp; + __be32 available_vpp; __be32 vpp_p_up[MLX4_NUM_UP]; }; @@ -157,7 +157,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, - u16 *availible_vpp, u8 *vpp_p_up) + u16 *available_vpp, u8 *vpp_p_up) { int i; int err; @@ -179,7 +179,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, goto out; /* Total number of supported VPPs */ - *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); + *available_vpp = (u16)be32_to_cpu(out_param->available_vpp); for (i = 0; i < MLX4_NUM_UP; i++) vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h index ac1f331878e6..582997577a04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -84,23 +84,23 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, u8 *pg, u16 *ratelimit); /** - * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation. - * Before distribution of VPPs to priorities, only availible_vpp is returned. + * mlx4_ALLOCATE_VPP_get - Query port VPP available resources and allocation. + * Before distribution of VPPs to priorities, only available_vpp is returned. * After initialization it returns the distribution of VPPs among priorities. * * @dev: mlx4_dev. * @port: Physical port number. - * @availible_vpp: Pointer to variable where number of availible VPPs is stored + * @available_vpp: Pointer to variable where number of available VPPs is stored * @vpp_p_up: Distribution of VPPs to priorities is stored in this array * * Returns 0 on success or a negative mlx4_core errno code. **/ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, - u16 *availible_vpp, u8 *vpp_p_up); + u16 *available_vpp, u8 *vpp_p_up); /** * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. * The total number of VPPs assigned to all for a port must not exceed - * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get. + * the value reported by available_vpp in mlx4_ALLOCATE_VPP_get. * VPP allocation is allowed only after the port type has been set, * and while no QPs are open for this port. * diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 5a7816e7c7b4..a822f7a56bc5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -400,7 +400,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; - table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); + table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); if (!table->icm) return -ENOMEM; table->virt = virt; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index dee67fa39107..c9169a490557 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -39,8 +39,8 @@ #include #define MLX4_ICM_CHUNK_LEN \ - ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ - (sizeof (struct scatterlist))) + ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ + (sizeof(struct scatterlist))) enum { MLX4_ICM_PAGE_SHIFT = 12, diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index e00f627331cb..2edcce98ab2d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -53,7 +53,7 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) { struct mlx4_device_context *dev_ctx; - dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a594bfd9e095..e61c99ef741d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -34,6 +34,7 @@ */ #include +#include #include #include #include @@ -121,7 +122,7 @@ static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" DRV_VERSION "\n"; -static struct mlx4_profile default_profile = { +static const struct mlx4_profile default_profile = { .num_qp = 1 << 18, .num_srq = 1 << 16, .rdmarc_per_qp = 1 << 4, @@ -131,7 +132,7 @@ static struct mlx4_profile default_profile = { .num_mtt = 1 << 20, /* It is really num mtt segements */ }; -static struct mlx4_profile low_mem_profile = { +static const struct mlx4_profile low_mem_profile = { .num_qp = 1 << 17, .num_srq = 1 << 6, .rdmarc_per_qp = 1 << 4, @@ -819,38 +820,93 @@ static void slave_adjust_steering_mode(struct mlx4_dev *dev, mlx4_steering_mode_str(dev->caps.steering_mode)); } +static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev) +{ + kfree(dev->caps.spec_qps); + dev->caps.spec_qps = NULL; +} + +static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev) +{ + struct mlx4_func_cap *func_cap = NULL; + struct mlx4_caps *caps = &dev->caps; + int i, err = 0; + + func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); + caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL); + + if (!func_cap || !caps->spec_qps) { + mlx4_err(dev, "Failed to allocate memory for special qps cap\n"); + err = -ENOMEM; + goto err_mem; + } + + for (i = 1; i <= caps->num_ports; ++i) { + err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); + goto err_mem; + } + caps->spec_qps[i - 1] = func_cap->spec_qps; + caps->port_mask[i] = caps->port_type[i]; + caps->phys_port_id[i] = func_cap->phys_port_id; + err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, + &caps->gid_table_len[i], + &caps->pkey_table_len[i]); + if (err) { + mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n", + i, err); + goto err_mem; + } + } + +err_mem: + if (err) + mlx4_slave_destroy_special_qp_cap(dev); + kfree(func_cap); + return err; +} + static int mlx4_slave_cap(struct mlx4_dev *dev) { int err; u32 page_size; - struct mlx4_dev_cap dev_cap; - struct mlx4_func_cap func_cap; - struct mlx4_init_hca_param hca_param; - u8 i; + struct mlx4_dev_cap *dev_cap = NULL; + struct mlx4_func_cap *func_cap = NULL; + struct mlx4_init_hca_param *hca_param = NULL; - memset(&hca_param, 0, sizeof(hca_param)); - err = mlx4_QUERY_HCA(dev, &hca_param); + hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL); + func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + if (!hca_param || !func_cap || !dev_cap) { + mlx4_err(dev, "Failed to allocate memory for slave_cap\n"); + err = -ENOMEM; + goto free_mem; + } + + err = mlx4_QUERY_HCA(dev, hca_param); if (err) { mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); - return err; + goto free_mem; } /* fail if the hca has an unknown global capability * at this time global_caps should be always zeroed */ - if (hca_param.global_caps) { + if (hca_param->global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); - return -EINVAL; + err = -EINVAL; + goto free_mem; } - dev->caps.hca_core_clock = hca_param.hca_core_clock; + dev->caps.hca_core_clock = hca_param->hca_core_clock; - memset(&dev_cap, 0, sizeof(dev_cap)); - dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; - err = mlx4_dev_cap(dev, &dev_cap); + dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp; + err = mlx4_dev_cap(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - return err; + goto free_mem; } err = mlx4_QUERY_FW(dev); @@ -862,21 +918,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (page_size > PAGE_SIZE) { mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", page_size, PAGE_SIZE); - return -ENODEV; + err = -ENODEV; + goto free_mem; } /* Set uar_page_shift for VF */ - dev->uar_page_shift = hca_param.uar_page_sz + 12; + dev->uar_page_shift = hca_param->uar_page_sz + 12; /* Make sure the master uar page size is valid */ if (dev->uar_page_shift > PAGE_SHIFT) { mlx4_err(dev, "Invalid configuration: uar page size is larger than system page size\n"); - return -ENODEV; + err = -ENODEV; + goto free_mem; } /* Set reserved_uars based on the uar_page_shift */ - mlx4_set_num_reserved_uars(dev, &dev_cap); + mlx4_set_num_reserved_uars(dev, dev_cap); /* Although uar page size in FW differs from system page size, * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) @@ -884,34 +942,35 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) */ dev->caps.uar_page_size = PAGE_SIZE; - memset(&func_cap, 0, sizeof(func_cap)); - err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); + err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", err); - return err; + goto free_mem; } - if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != + if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", - func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); - return -EINVAL; + func_cap->pf_context_behaviour, + PF_CONTEXT_BEHAVIOUR_MASK); + err = -EINVAL; + goto free_mem; } - dev->caps.num_ports = func_cap.num_ports; - dev->quotas.qp = func_cap.qp_quota; - dev->quotas.srq = func_cap.srq_quota; - dev->quotas.cq = func_cap.cq_quota; - dev->quotas.mpt = func_cap.mpt_quota; - dev->quotas.mtt = func_cap.mtt_quota; - dev->caps.num_qps = 1 << hca_param.log_num_qps; - dev->caps.num_srqs = 1 << hca_param.log_num_srqs; - dev->caps.num_cqs = 1 << hca_param.log_num_cqs; - dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; - dev->caps.num_eqs = func_cap.max_eq; - dev->caps.reserved_eqs = func_cap.reserved_eq; - dev->caps.reserved_lkey = func_cap.reserved_lkey; + dev->caps.num_ports = func_cap->num_ports; + dev->quotas.qp = func_cap->qp_quota; + dev->quotas.srq = func_cap->srq_quota; + dev->quotas.cq = func_cap->cq_quota; + dev->quotas.mpt = func_cap->mpt_quota; + dev->quotas.mtt = func_cap->mtt_quota; + dev->caps.num_qps = 1 << hca_param->log_num_qps; + dev->caps.num_srqs = 1 << hca_param->log_num_srqs; + dev->caps.num_cqs = 1 << hca_param->log_num_cqs; + dev->caps.num_mpts = 1 << hca_param->log_mpt_sz; + dev->caps.num_eqs = func_cap->max_eq; + dev->caps.reserved_eqs = func_cap->reserved_eq; + dev->caps.reserved_lkey = func_cap->reserved_lkey; dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; @@ -919,43 +978,16 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (dev->caps.num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev->caps.num_ports, MLX4_MAX_PORTS); - return -ENODEV; + err = -ENODEV; + goto free_mem; } mlx4_replace_zero_macs(dev); - dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); - dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - - if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || - !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || - !dev->caps.qp0_qkey) { - err = -ENOMEM; - goto err_mem; - } - - for (i = 1; i <= dev->caps.num_ports; ++i) { - err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); - if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", - i, err); - goto err_mem; - } - dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; - dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; - dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; - dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; - dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; - dev->caps.port_mask[i] = dev->caps.port_type[i]; - dev->caps.phys_port_id[i] = func_cap.phys_port_id; - err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, - &dev->caps.gid_table_len[i], - &dev->caps.pkey_table_len[i]); - if (err) - goto err_mem; + err = mlx4_slave_special_qp_cap(dev); + if (err) { + mlx4_err(dev, "Set special QP caps failed. aborting\n"); + goto free_mem; } if (dev->caps.uar_page_size * (dev->caps.num_uars - @@ -970,7 +1002,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) goto err_mem; } - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { + if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { dev->caps.eqe_size = 64; dev->caps.eqe_factor = 1; } else { @@ -978,20 +1010,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.eqe_factor = 0; } - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { + if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { - dev->caps.eqe_size = hca_param.eqe_size; + if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param->eqe_size; dev->caps.eqe_factor = 0; } - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { - dev->caps.cqe_size = hca_param.cqe_size; + if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param->cqe_size; /* User still need to know when CQE > 32B */ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } @@ -999,31 +1031,27 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); - slave_adjust_steering_mode(dev, &dev_cap, &hca_param); - mlx4_dbg(dev, "RSS support for IP fragments is %s\n", - hca_param.rss_ip_frags ? "on" : "off"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN; + mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n"); - if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && + slave_adjust_steering_mode(dev, dev_cap, hca_param); + mlx4_dbg(dev, "RSS support for IP fragments is %s\n", + hca_param->rss_ip_frags ? "on" : "off"); + + if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && dev->caps.bf_reg_size) dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; - if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) + if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; - return 0; - err_mem: - kfree(dev->caps.qp0_qkey); - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); - dev->caps.qp0_qkey = NULL; - dev->caps.qp0_tunnel = NULL; - dev->caps.qp0_proxy = NULL; - dev->caps.qp1_tunnel = NULL; - dev->caps.qp1_proxy = NULL; - + if (err) + mlx4_slave_destroy_special_qp_cap(dev); +free_mem: + kfree(hca_param); + kfree(func_cap); + kfree(dev_cap); return err; } @@ -2399,7 +2427,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; } priv->eq_table.inta_pin = adapter.inta_pin; - memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); + memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); return 0; @@ -2407,13 +2435,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) unmap_internal_clock(dev); unmap_bf_area(dev); - if (mlx4_is_slave(dev)) { - kfree(dev->caps.qp0_qkey); - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); - } + if (mlx4_is_slave(dev)) + mlx4_slave_destroy_special_qp_cap(dev); err_close: if (mlx4_is_slave(dev)) @@ -2870,7 +2893,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) dev->caps.num_eqs - dev->caps.reserved_eqs, MAX_MSIX); - entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); if (!entries) goto no_msi; @@ -3597,13 +3620,8 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, mlx4_multi_func_cleanup(dev); } - if (mlx4_is_slave(dev)) { - kfree(dev->caps.qp0_qkey); - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); - } + if (mlx4_is_slave(dev)) + mlx4_slave_destroy_special_qp_cap(dev); err_close: mlx4_close_hca(dev); @@ -3659,7 +3677,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, * per port, we must limit the number of VFs to 63 (since their are * 128 MACs) */ - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc; total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; if (nvfs[i] < 0) { @@ -3668,7 +3686,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, goto err_disable_pdev; } } - for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc; i++) { prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { @@ -3747,11 +3765,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, if (total_vfs) { unsigned vfs_offset = 0; - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + for (i = 0; i < ARRAY_SIZE(nvfs) && vfs_offset + nvfs[i] < extended_func_num(pdev); vfs_offset += nvfs[i], i++) ; - if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + if (i == ARRAY_SIZE(nvfs)) { err = -ENODEV; goto err_release_regions; } @@ -3783,7 +3801,6 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, err_disable_pdev: mlx4_pci_disable_device(&priv->dev); - pci_set_drvdata(pdev, NULL); return err; } @@ -3944,11 +3961,7 @@ static void mlx4_unload_one(struct pci_dev *pdev) if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); - kfree(dev->caps.qp0_qkey); - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); + mlx4_slave_destroy_special_qp_cap(dev); kfree(dev->dev_vfs); mlx4_clean_dev(dev); @@ -3998,7 +4011,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); - pci_set_drvdata(pdev, NULL); } static int restore_current_port_types(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 0710b3677464..4c5306dbcf11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -162,7 +162,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, return -EINVAL; s_steer = &mlx4_priv(dev)->steer[port - 1]; - new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); if (!new_entry) return -ENOMEM; @@ -175,7 +175,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, */ pqp = get_promisc_qp(dev, port, steer, qpn); if (pqp) { - dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); if (!dqp) { err = -ENOMEM; goto out_alloc; @@ -274,7 +274,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, } /* add the qp as a duplicate on this index */ - dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); if (!dqp) return -ENOMEM; dqp->qpn = qpn; @@ -443,7 +443,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, goto out_mutex; } - pqp = kmalloc(sizeof *pqp, GFP_KERNEL); + pqp = kmalloc(sizeof(*pqp), GFP_KERNEL); if (!pqp) { err = -ENOMEM; goto out_mutex; @@ -514,7 +514,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, /* add the new qpn to list of promisc qps */ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); /* now need to add all the promisc qps to default entry */ - memset(mgm, 0, sizeof *mgm); + memset(mgm, 0, sizeof(*mgm)); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { if (members_count == dev->caps.num_qp_per_mgm) { @@ -1144,7 +1144,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], index += dev->caps.num_mgms; new_entry = 1; - memset(mgm, 0, sizeof *mgm); + memset(mgm, 0, sizeof(*mgm)); memcpy(mgm->gid, gid, 16); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 852d00a5b016..c68da1986e51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -807,6 +807,8 @@ struct mlx4_set_port_general_context { u8 phv_en; u8 reserved6[5]; __be16 user_mtu; + u16 reserved7; + u8 user_mac[6]; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d350b2158104..fdb3ad0cbe54 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -685,7 +685,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx); void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 24282cd017d3..c7c0764991c9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -106,9 +106,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), GFP_KERNEL); - buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, + buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; @@ -703,13 +703,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, return -ENOMEM; dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, - npages * sizeof (u64), DMA_TO_DEVICE); + npages * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, - npages * sizeof (u64), DMA_TO_DEVICE); + npages * sizeof(u64), DMA_TO_DEVICE); return 0; } @@ -1052,7 +1052,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, return -EINVAL; /* All MTTs must fit in the same page */ - if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) return -EINVAL; fmr->page_shift = page_shift; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 4e36e287d605..3ef3406ff4cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -52,6 +52,7 @@ #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) +#define MLX4_FLAG2_V_USER_MAC_MASK BIT(6) #define MLX4_FLAG_V_MTU_MASK BIT(0) #define MLX4_FLAG_V_PPRX_MASK BIT(1) #define MLX4_FLAG_V_PPTX_MASK BIT(2) @@ -1700,6 +1701,30 @@ int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu) } EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu); +int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK; + memcpy(context->user_mac, user_mac, sizeof(context->user_mac)); + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_user_mac); + int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 5e5b4475b85e..728a2fb1f5c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -174,7 +174,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); *(__be32 *) mailbox->buf = cpu_to_be32(optpar); - memcpy(mailbox->buf + 8, context, sizeof *context); + memcpy(mailbox->buf + 8, context, sizeof(*context)); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); @@ -845,24 +845,21 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, * since the PF does not call mlx4_slave_caps */ - dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - - if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || - !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { + dev->caps.spec_qps = kcalloc(dev->caps.num_ports, + sizeof(*dev->caps.spec_qps), + GFP_KERNEL); + if (!dev->caps.spec_qps) { err = -ENOMEM; goto err_mem; } for (k = 0; k < dev->caps.num_ports; k++) { - dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn + + dev->caps.spec_qps[k].qp0_proxy = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev) + k; - dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX; - dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn + + dev->caps.spec_qps[k].qp0_tunnel = dev->caps.spec_qps[k].qp0_proxy + 8 * MLX4_MFUNC_MAX; + dev->caps.spec_qps[k].qp1_proxy = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; - dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX; + dev->caps.spec_qps[k].qp1_tunnel = dev->caps.spec_qps[k].qp1_proxy + 8 * MLX4_MFUNC_MAX; } } @@ -874,12 +871,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) return err; err_mem: - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); - dev->caps.qp0_tunnel = dev->caps.qp0_proxy = - dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; + kfree(dev->caps.spec_qps); + dev->caps.spec_qps = NULL; mlx4_cleanup_qp_zones(dev); return err; } @@ -908,7 +901,7 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) - memcpy(context, mailbox->buf + 8, sizeof *context); + memcpy(context, mailbox->buf + 8, sizeof(*context)); mlx4_free_cmd_mailbox(dev, mailbox); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 215e21c3dc8a..fabb53379727 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1040,7 +1040,7 @@ static struct res_common *alloc_qp_tr(int id) { struct res_qp *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1058,7 +1058,7 @@ static struct res_common *alloc_mtt_tr(int id, int order) { struct res_mtt *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1074,7 +1074,7 @@ static struct res_common *alloc_mpt_tr(int id, int key) { struct res_mpt *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1089,7 +1089,7 @@ static struct res_common *alloc_eq_tr(int id) { struct res_eq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1103,7 +1103,7 @@ static struct res_common *alloc_cq_tr(int id) { struct res_cq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1118,7 +1118,7 @@ static struct res_common *alloc_srq_tr(int id) { struct res_srq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1133,7 +1133,7 @@ static struct res_common *alloc_counter_tr(int id, int port) { struct res_counter *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1148,7 +1148,7 @@ static struct res_common *alloc_xrcdn_tr(int id) { struct res_xrcdn *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1162,7 +1162,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) { struct res_fs_rule *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1274,7 +1274,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct rb_root *root = &tracker->res_tree[type]; - res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); + res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL); if (!res_arr) return -ENOMEM; @@ -2027,7 +2027,7 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) return -EINVAL; - res = kzalloc(sizeof *res, GFP_KERNEL); + res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) { mlx4_release_resource(dev, slave, RES_MAC, 1, port); return -ENOMEM; @@ -4020,7 +4020,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, struct res_gid *res; int err; - res = kzalloc(sizeof *res, GFP_KERNEL); + res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 5aee05992f27..fdaef00465d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -34,6 +34,27 @@ config MLX5_CORE_EN ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. +config MLX5_MPFS + bool "Mellanox Technologies MLX5 MPFS support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS) + support in ConnectX NIC. MPFs is required for when multi-PF configuration + is enabled to allow passing user configured unicast MAC addresses to the + requesting PF. + +config MLX5_ESWITCH + bool "Mellanox Technologies MLX5 SRIOV E-Switch support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. + E-Switch provides internal SRIOV packet steering and switching for the + enabled VFs and PF in two available modes: + Legacy SRIOV mode (L2 mac vlan steering based). + Switchdev mode (eswitch offloads). + config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" default y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9d17e4e76d3a..87a3099808f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -4,17 +4,21 @@ subdir-ccflags-y += -I$(src) mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o + fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ + diag/fs_tracepoint.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ - en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ - en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ - en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o + +mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o + +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o @@ -22,3 +26,5 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o + +CFLAGS_tracepoint.o := -I$(src) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 3c95f7f53802..47239bf7bf43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -258,6 +258,7 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { u32 db_per_page = PAGE_SIZE / cache_line_size(); + mutex_lock(&dev->priv.pgdir_mutex); __set_bit(db->index, db->u.pgdir->bitmap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1acbb721f38d..1fffdebbc9e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -802,7 +802,6 @@ static void cmd_work_handler(struct work_struct *work) bool poll_cmd = ent->polling; int alloc_ret; - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a62f4b6a21a5..ff60cf7342ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -45,11 +45,70 @@ struct mlx5_device_context { unsigned long state; }; +struct mlx5_delayed_event { + struct list_head list; + struct mlx5_core_dev *dev; + enum mlx5_dev_event event; + unsigned long param; +}; + enum { MLX5_INTERFACE_ADDED, MLX5_INTERFACE_ATTACHED, }; +static void add_delayed_event(struct mlx5_priv *priv, + struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + unsigned long param) +{ + struct mlx5_delayed_event *delayed_event; + + delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC); + if (!delayed_event) { + mlx5_core_err(dev, "event %d is missed\n", event); + return; + } + + mlx5_core_dbg(dev, "Accumulating event %d\n", event); + delayed_event->dev = dev; + delayed_event->event = event; + delayed_event->param = param; + list_add_tail(&delayed_event->list, &priv->waiting_events_list); +} + +static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, + struct mlx5_core_dev *dev, + struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + /* stop delaying events */ + priv->is_accum_events = false; + + /* fire all accumulated events before new event comes */ + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); + list_del(&de->list); + kfree(de); + } +} + +static void cleanup_delayed_evets(struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + spin_lock_irq(&priv->ctx_lock); + priv->is_accum_events = false; + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + list_del(&de->list); + kfree(de); + } + spin_unlock_irq(&priv->ctx_lock); +} + void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; @@ -63,6 +122,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) return; dev_ctx->intf = intf; + /* accumulating events that can come after mlx5_ib calls to + * ib_register_device, till adding that interface to the events list. + */ + + priv->is_accum_events = true; + dev_ctx->context = intf->add(dev); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); if (intf->attach) @@ -71,6 +136,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); + + fire_delayed_event_locked(dev_ctx, dev, priv); + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (dev_ctx->intf->pfault) { if (priv->pfault) { @@ -84,6 +152,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); + /* delete all accumulated events */ + cleanup_delayed_evets(priv); } } @@ -341,6 +411,9 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_lock_irqsave(&priv->ctx_lock, flags); + if (priv->is_accum_events) + add_delayed_event(priv, dev, event, param); + list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) dev_ctx->intf->event(dev, dev_ctx->context, event, param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c new file mode 100644 index 000000000000..0be4575b58a2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define CREATE_TRACE_POINTS + +#include "fs_tracepoint.h" +#include + +#define DECLARE_MASK_VAL(type, name) struct {type m; type v; } name +#define MASK_VAL(type, spec, name, mask, val, fld) \ + DECLARE_MASK_VAL(type, name) = \ + {.m = MLX5_GET(spec, mask, fld),\ + .v = MLX5_GET(spec, val, fld)} +#define MASK_VAL_BE(type, spec, name, mask, val, fld) \ + DECLARE_MASK_VAL(type, name) = \ + {.m = MLX5_GET_BE(type, spec, mask, fld),\ + .v = MLX5_GET_BE(type, spec, val, fld)} +#define GET_MASKED_VAL(name) (name.m & name.v) + +#define GET_MASK_VAL(name, type, mask, val, fld) \ + (name.m = MLX5_GET(type, mask, fld), \ + name.v = MLX5_GET(type, val, fld), \ + name.m & name.v) +#define PRINT_MASKED_VAL(name, p, format) { \ + if (name.m) \ + trace_seq_printf(p, __stringify(name) "=" format " ", name.v); \ + } +#define PRINT_MASKED_VALP(name, cast, p, format) { \ + if (name.m) \ + trace_seq_printf(p, __stringify(name) "=" format " ", \ + (cast)&name.v);\ + } + +static void print_lyr_2_4_hdrs(struct trace_seq *p, + const u32 *mask, const u32 *value) +{ +#define MASK_VAL_L2(type, name, fld) \ + MASK_VAL(type, fte_match_set_lyr_2_4, name, mask, value, fld) + DECLARE_MASK_VAL(u64, smac) = { + .m = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16) << 16 | + MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0), + .v = MLX5_GET(fte_match_set_lyr_2_4, value, smac_47_16) << 16 | + MLX5_GET(fte_match_set_lyr_2_4, value, smac_15_0)}; + DECLARE_MASK_VAL(u64, dmac) = { + .m = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16) << 16 | + MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0), + .v = MLX5_GET(fte_match_set_lyr_2_4, value, dmac_47_16) << 16 | + MLX5_GET(fte_match_set_lyr_2_4, value, dmac_15_0)}; + MASK_VAL_L2(u16, ethertype, ethertype); + + PRINT_MASKED_VALP(smac, u8 *, p, "%pM"); + PRINT_MASKED_VALP(dmac, u8 *, p, "%pM"); + PRINT_MASKED_VAL(ethertype, p, "%04x"); + + if (ethertype.m == 0xffff) { + if (ethertype.v == ETH_P_IP) { +#define MASK_VAL_L2_BE(type, name, fld) \ + MASK_VAL_BE(type, fte_match_set_lyr_2_4, name, mask, value, fld) + MASK_VAL_L2_BE(u32, src_ipv4, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MASK_VAL_L2_BE(u32, dst_ipv4, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + + PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p, + "%pI4"); + PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p, + "%pI4"); + } else if (ethertype.v == ETH_P_IPV6) { + static const struct in6_addr full_ones = { + .in6_u.u6_addr32 = {htonl(0xffffffff), + htonl(0xffffffff), + htonl(0xffffffff), + htonl(0xffffffff)}, + }; + DECLARE_MASK_VAL(struct in6_addr, src_ipv6); + DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); + + memcpy(src_ipv6.m.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(src_ipv6.m)); + memcpy(dst_ipv6.m.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(dst_ipv6.m)); + memcpy(src_ipv6.v.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(src_ipv6.v)); + memcpy(dst_ipv6.v.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(dst_ipv6.v)); + + if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones))) + trace_seq_printf(p, "src_ipv6=%pI6 ", + &src_ipv6.v); + if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones))) + trace_seq_printf(p, "dst_ipv6=%pI6 ", + &dst_ipv6.v); + } + } + +#define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\ + MASK_VAL_L2(type, name, fld); \ + PRINT_MASKED_VAL(name, p, format); \ +} + + PRINT_MASKED_VAL_L2(u8, ip_protocol, ip_protocol, p, "%02x"); + PRINT_MASKED_VAL_L2(u16, tcp_flags, tcp_flags, p, "%x"); + PRINT_MASKED_VAL_L2(u16, tcp_sport, tcp_sport, p, "%u"); + PRINT_MASKED_VAL_L2(u16, tcp_dport, tcp_dport, p, "%u"); + PRINT_MASKED_VAL_L2(u16, udp_sport, udp_sport, p, "%u"); + PRINT_MASKED_VAL_L2(u16, udp_dport, udp_dport, p, "%u"); + PRINT_MASKED_VAL_L2(u16, first_vid, first_vid, p, "%04x"); + PRINT_MASKED_VAL_L2(u8, first_prio, first_prio, p, "%x"); + PRINT_MASKED_VAL_L2(u8, first_cfi, first_cfi, p, "%d"); + PRINT_MASKED_VAL_L2(u8, ip_dscp, ip_dscp, p, "%02x"); + PRINT_MASKED_VAL_L2(u8, ip_ecn, ip_ecn, p, "%x"); + PRINT_MASKED_VAL_L2(u8, cvlan_tag, cvlan_tag, p, "%d"); + PRINT_MASKED_VAL_L2(u8, svlan_tag, svlan_tag, p, "%d"); + PRINT_MASKED_VAL_L2(u8, frag, frag, p, "%d"); +} + +static void print_misc_parameters_hdrs(struct trace_seq *p, + const u32 *mask, const u32 *value) +{ +#define MASK_VAL_MISC(type, name, fld) \ + MASK_VAL(type, fte_match_set_misc, name, mask, value, fld) +#define PRINT_MASKED_VAL_MISC(type, name, fld, p, format) {\ + MASK_VAL_MISC(type, name, fld); \ + PRINT_MASKED_VAL(name, p, format); \ +} + DECLARE_MASK_VAL(u64, gre_key) = { + .m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 | + MLX5_GET(fte_match_set_misc, mask, gre_key_l), + .v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 | + MLX5_GET(fte_match_set_misc, value, gre_key_l)}; + + PRINT_MASKED_VAL(gre_key, p, "%llu"); + PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u"); + PRINT_MASKED_VAL_MISC(u16, source_port, source_port, p, "%u"); + PRINT_MASKED_VAL_MISC(u8, outer_second_prio, outer_second_prio, + p, "%u"); + PRINT_MASKED_VAL_MISC(u8, outer_second_cfi, outer_second_cfi, p, "%u"); + PRINT_MASKED_VAL_MISC(u16, outer_second_vid, outer_second_vid, p, "%u"); + PRINT_MASKED_VAL_MISC(u8, inner_second_prio, inner_second_prio, + p, "%u"); + PRINT_MASKED_VAL_MISC(u8, inner_second_cfi, inner_second_cfi, p, "%u"); + PRINT_MASKED_VAL_MISC(u16, inner_second_vid, inner_second_vid, p, "%u"); + + PRINT_MASKED_VAL_MISC(u8, outer_second_cvlan_tag, + outer_second_cvlan_tag, p, "%u"); + PRINT_MASKED_VAL_MISC(u8, inner_second_cvlan_tag, + inner_second_cvlan_tag, p, "%u"); + PRINT_MASKED_VAL_MISC(u8, outer_second_svlan_tag, + outer_second_svlan_tag, p, "%u"); + PRINT_MASKED_VAL_MISC(u8, inner_second_svlan_tag, + inner_second_svlan_tag, p, "%u"); + + PRINT_MASKED_VAL_MISC(u8, gre_protocol, gre_protocol, p, "%u"); + + PRINT_MASKED_VAL_MISC(u32, vxlan_vni, vxlan_vni, p, "%u"); + PRINT_MASKED_VAL_MISC(u32, outer_ipv6_flow_label, outer_ipv6_flow_label, + p, "%x"); + PRINT_MASKED_VAL_MISC(u32, inner_ipv6_flow_label, inner_ipv6_flow_label, + p, "%x"); +} + +const char *parse_fs_hdrs(struct trace_seq *p, + u8 match_criteria_enable, + const u32 *mask_outer, + const u32 *mask_misc, + const u32 *mask_inner, + const u32 *value_outer, + const u32 *value_misc, + const u32 *value_inner) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) { + trace_seq_printf(p, "[outer] "); + print_lyr_2_4_hdrs(p, mask_outer, value_outer); + } + + if (match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) { + trace_seq_printf(p, "[misc] "); + print_misc_parameters_hdrs(p, mask_misc, value_misc); + } + if (match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) { + trace_seq_printf(p, "[inner] "); + print_lyr_2_4_hdrs(p, mask_inner, value_inner); + } + trace_seq_putc(p, 0); + return ret; +} + +const char *parse_fs_dst(struct trace_seq *p, + const struct mlx5_flow_destination *dst, + u32 counter_id) +{ + const char *ret = trace_seq_buffer_ptr(p); + + switch (dst->type) { + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + trace_seq_printf(p, "vport=%u\n", dst->vport_num); + break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + trace_seq_printf(p, "ft=%p\n", dst->ft); + break; + case MLX5_FLOW_DESTINATION_TYPE_TIR: + trace_seq_printf(p, "tir=%u\n", dst->tir_num); + break; + case MLX5_FLOW_DESTINATION_TYPE_COUNTER: + trace_seq_printf(p, "counter_id=%u\n", counter_id); + break; + } + + trace_seq_putc(p, 0); + return ret; +} + +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fte); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_rule); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_rule); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h new file mode 100644 index 000000000000..80eef4163f52 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(_MLX5_FS_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_FS_TP_ + +#include +#include +#include "../fs_core.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#define __parse_fs_hdrs(match_criteria_enable, mouter, mmisc, minner, vouter, \ + vinner, vmisc) \ + parse_fs_hdrs(p, match_criteria_enable, mouter, mmisc, minner, vouter,\ + vinner, vmisc) + +const char *parse_fs_hdrs(struct trace_seq *p, + u8 match_criteria_enable, + const u32 *mask_outer, + const u32 *mask_misc, + const u32 *mask_inner, + const u32 *value_outer, + const u32 *value_misc, + const u32 *value_inner); + +#define __parse_fs_dst(dst, counter_id) \ + parse_fs_dst(p, (const struct mlx5_flow_destination *)dst, counter_id) + +const char *parse_fs_dst(struct trace_seq *p, + const struct mlx5_flow_destination *dst, + u32 counter_id); + +TRACE_EVENT(mlx5_fs_add_fg, + TP_PROTO(const struct mlx5_flow_group *fg), + TP_ARGS(fg), + TP_STRUCT__entry( + __field(const struct mlx5_flow_group *, fg) + __field(const struct mlx5_flow_table *, ft) + __field(u32, start_index) + __field(u32, end_index) + __field(u32, id) + __field(u8, mask_enable) + __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) + ), + TP_fast_assign( + __entry->fg = fg; + fs_get_obj(__entry->ft, fg->node.parent); + __entry->start_index = fg->start_index; + __entry->end_index = fg->start_index + fg->max_ftes; + __entry->id = fg->id; + __entry->mask_enable = fg->mask.match_criteria_enable; + memcpy(__entry->mask_outer, + MLX5_ADDR_OF(fte_match_param, + &fg->mask.match_criteria, + outer_headers), + sizeof(__entry->mask_outer)); + memcpy(__entry->mask_inner, + MLX5_ADDR_OF(fte_match_param, + &fg->mask.match_criteria, + inner_headers), + sizeof(__entry->mask_inner)); + memcpy(__entry->mask_misc, + MLX5_ADDR_OF(fte_match_param, + &fg->mask.match_criteria, + misc_parameters), + sizeof(__entry->mask_misc)); + + ), + TP_printk("fg=%p ft=%p id=%u start=%u end=%u bit_mask=%02x %s\n", + __entry->fg, __entry->ft, __entry->id, + __entry->start_index, __entry->end_index, + __entry->mask_enable, + __parse_fs_hdrs(__entry->mask_enable, + __entry->mask_outer, + __entry->mask_misc, + __entry->mask_inner, + __entry->mask_outer, + __entry->mask_misc, + __entry->mask_inner)) + ); + +TRACE_EVENT(mlx5_fs_del_fg, + TP_PROTO(const struct mlx5_flow_group *fg), + TP_ARGS(fg), + TP_STRUCT__entry( + __field(const struct mlx5_flow_group *, fg) + __field(u32, id) + ), + TP_fast_assign( + __entry->fg = fg; + __entry->id = fg->id; + + ), + TP_printk("fg=%p id=%u\n", + __entry->fg, __entry->id) + ); + +#define ACTION_FLAGS \ + {MLX5_FLOW_CONTEXT_ACTION_ALLOW, "ALLOW"},\ + {MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\ + {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\ + {MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\ + {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ + {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ + {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ + {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} + +TRACE_EVENT(mlx5_fs_set_fte, + TP_PROTO(const struct fs_fte *fte, int new_fte), + TP_ARGS(fte, new_fte), + TP_STRUCT__entry( + __field(const struct fs_fte *, fte) + __field(const struct mlx5_flow_group *, fg) + __field(u32, group_index) + __field(u32, index) + __field(u32, action) + __field(u32, flow_tag) + __field(u8, mask_enable) + __field(int, new_fte) + __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) + __array(u32, value_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, value_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) + __array(u32, value_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) + ), + TP_fast_assign( + __entry->fte = fte; + __entry->new_fte = new_fte; + fs_get_obj(__entry->fg, fte->node.parent); + __entry->group_index = __entry->fg->id; + __entry->index = fte->index; + __entry->action = fte->action; + __entry->mask_enable = __entry->fg->mask.match_criteria_enable; + __entry->flow_tag = fte->flow_tag; + memcpy(__entry->mask_outer, + MLX5_ADDR_OF(fte_match_param, + &__entry->fg->mask.match_criteria, + outer_headers), + sizeof(__entry->mask_outer)); + memcpy(__entry->mask_inner, + MLX5_ADDR_OF(fte_match_param, + &__entry->fg->mask.match_criteria, + inner_headers), + sizeof(__entry->mask_inner)); + memcpy(__entry->mask_misc, + MLX5_ADDR_OF(fte_match_param, + &__entry->fg->mask.match_criteria, + misc_parameters), + sizeof(__entry->mask_misc)); + memcpy(__entry->value_outer, + MLX5_ADDR_OF(fte_match_param, + &fte->val, + outer_headers), + sizeof(__entry->value_outer)); + memcpy(__entry->value_inner, + MLX5_ADDR_OF(fte_match_param, + &fte->val, + inner_headers), + sizeof(__entry->value_inner)); + memcpy(__entry->value_misc, + MLX5_ADDR_OF(fte_match_param, + &fte->val, + misc_parameters), + sizeof(__entry->value_misc)); + + ), + TP_printk("op=%s fte=%p fg=%p index=%u group_index=%u action=<%s> flow_tag=%x %s\n", + __entry->new_fte ? "add" : "set", + __entry->fte, __entry->fg, __entry->index, + __entry->group_index, __print_flags(__entry->action, "|", + ACTION_FLAGS), + __entry->flow_tag, + __parse_fs_hdrs(__entry->mask_enable, + __entry->mask_outer, + __entry->mask_misc, + __entry->mask_inner, + __entry->value_outer, + __entry->value_misc, + __entry->value_inner)) + ); + +TRACE_EVENT(mlx5_fs_del_fte, + TP_PROTO(const struct fs_fte *fte), + TP_ARGS(fte), + TP_STRUCT__entry( + __field(const struct fs_fte *, fte) + __field(u32, index) + ), + TP_fast_assign( + __entry->fte = fte; + __entry->index = fte->index; + + ), + TP_printk("fte=%p index=%u\n", + __entry->fte, __entry->index) + ); + +TRACE_EVENT(mlx5_fs_add_rule, + TP_PROTO(const struct mlx5_flow_rule *rule), + TP_ARGS(rule), + TP_STRUCT__entry( + __field(const struct mlx5_flow_rule *, rule) + __field(const struct fs_fte *, fte) + __field(u32, sw_action) + __field(u32, index) + __field(u32, counter_id) + __array(u8, destination, sizeof(struct mlx5_flow_destination)) + ), + TP_fast_assign( + __entry->rule = rule; + fs_get_obj(__entry->fte, rule->node.parent); + __entry->index = __entry->fte->dests_size - 1; + __entry->sw_action = rule->sw_action; + memcpy(__entry->destination, + &rule->dest_attr, + sizeof(__entry->destination)); + if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER && + rule->dest_attr.counter) + __entry->counter_id = + rule->dest_attr.counter->id; + ), + TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n", + __entry->rule, __entry->fte, __entry->index, + __print_flags(__entry->sw_action, "|", ACTION_FLAGS), + __parse_fs_dst(__entry->destination, __entry->counter_id)) + ); + +TRACE_EVENT(mlx5_fs_del_rule, + TP_PROTO(const struct mlx5_flow_rule *rule), + TP_ARGS(rule), + TP_STRUCT__entry( + __field(const struct mlx5_flow_rule *, rule) + __field(const struct fs_fte *, fte) + ), + TP_fast_assign( + __entry->rule = rule; + fs_get_obj(__entry->fte, rule->node.parent); + ), + TP_printk("rule=%p fte=%p\n", + __entry->rule, __entry->fte) + ); +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ./diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE fs_tracepoint +#include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 040d1af310b4..cc13d3dbd366 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -291,10 +291,11 @@ struct mlx5e_tstamp { enum { MLX5E_RQ_STATE_ENABLED, - MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, MLX5E_RQ_STATE_AM, }; +#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr)) + struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; @@ -342,7 +343,6 @@ enum { struct mlx5e_sq_wqe_info { u8 opcode; - u8 num_wqebbs; }; struct mlx5e_txqsq { @@ -418,13 +418,8 @@ struct mlx5e_xdpsq { struct mlx5e_icosq { /* data path */ - /* dirtied @completion */ - u16 cc; - /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; - u32 dma_fifo_pc; - u16 prev_cc; struct mlx5e_cq cq; @@ -438,7 +433,6 @@ struct mlx5e_icosq { void __iomem *uar_map; u32 sqn; u16 edge; - struct device *pdev; __be32 mkey_be; unsigned long state; @@ -507,7 +501,7 @@ struct mlx5e_rx_am { /* Adaptive Moderation */ */ #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \ MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT) -#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) +#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) struct mlx5e_page_cache { u32 head; u32 tail; @@ -516,7 +510,7 @@ struct mlx5e_page_cache { struct mlx5e_rq; typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); -typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16); +typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); struct mlx5e_rq { @@ -527,21 +521,26 @@ struct mlx5e_rq { struct { struct mlx5e_wqe_frag_info *frag_info; u32 frag_sz; /* max possible skb frag_sz */ - bool page_reuse; - bool xdp_xmit; + union { + bool page_reuse; + bool xdp_xmit; + }; } wqe; struct { struct mlx5e_mpw_info *info; void *mtt_no_align; + u16 num_strides; + u8 log_stride_sz; + bool umr_in_progress; } mpwqe; }; struct { + u16 headroom; u8 page_order; - u32 wqe_sz; /* wqe data buffer size */ u8 map_dir; /* dma map direction */ } buff; - __be32 mkey_be; + struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; struct mlx5e_tstamp *tstamp; @@ -550,12 +549,11 @@ struct mlx5e_rq { struct mlx5e_page_cache page_cache; mlx5e_fp_handle_rx_cqe handle_rx_cqe; - mlx5e_fp_alloc_wqe alloc_wqe; + mlx5e_fp_post_rx_wqes post_wqes; mlx5e_fp_dealloc_wqe dealloc_wqe; unsigned long state; int ix; - u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ @@ -565,19 +563,13 @@ struct mlx5e_rq { /* control */ struct mlx5_wq_ctrl wq_ctrl; + __be32 mkey_be; u8 wq_type; - u32 mpwqe_stride_sz; - u32 mpwqe_num_strides; u32 rqn; - struct mlx5e_channel *channel; struct mlx5_core_dev *mdev; struct mlx5_core_mkey umr_mkey; } ____cacheline_aligned_in_smp; -enum channel_flags { - MLX5E_CHANNEL_NAPI_SCHED = 1, -}; - struct mlx5e_channel { /* data path */ struct mlx5e_rq rq; @@ -589,7 +581,9 @@ struct mlx5e_channel { struct net_device *netdev; __be32 mkey_be; u8 num_tc; - unsigned long flags; + + /* data path - accessed per napi poll */ + struct irq_desc *irq_desc; /* control */ struct mlx5e_priv *priv; @@ -620,6 +614,12 @@ enum mlx5e_traffic_types { MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, }; +enum mlx5e_tunnel_types { + MLX5E_TT_IPV4_GRE, + MLX5E_TT_IPV6_GRE, + MLX5E_NUM_TUNNEL_TT, +}; + enum { MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, @@ -679,6 +679,7 @@ struct mlx5e_l2_table { struct mlx5e_ttc_table { struct mlx5e_flow_table ft; struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; + struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; }; #define ARFS_HASH_SHIFT BITS_PER_BYTE @@ -711,6 +712,7 @@ enum { MLX5E_VLAN_FT_LEVEL = 0, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, + MLX5E_INNER_TTC_FT_LEVEL, MLX5E_ARFS_FT_LEVEL }; @@ -736,6 +738,7 @@ struct mlx5e_flow_steering { struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; struct mlx5e_ttc_table ttc; + struct mlx5e_ttc_table inner_ttc; struct mlx5e_arfs_tables arfs; }; @@ -769,6 +772,7 @@ struct mlx5e_priv { u32 tisn[MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; u32 tx_rates[MLX5E_MAX_NUM_SQS]; int hard_mtu; @@ -839,11 +843,9 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); -int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); -void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); void mlx5e_rx_am(struct mlx5e_rq *rq); @@ -903,7 +905,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, struct mlx5e_redirect_rqt_param rrp); void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, enum mlx5e_traffic_types tt, - void *tirc); + void *tirc, bool inner); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); @@ -922,8 +924,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); -void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, - u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); @@ -932,6 +933,12 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u8 rq_type); +static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +{ + return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); +} + static inline struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index f5594014715b..d12e9fc0d76b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -176,7 +176,6 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { - switch (sset) { case ETH_SS_STATS: return NUM_SW_COUNTERS + @@ -207,7 +206,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) return mlx5e_ethtool_get_sset_count(priv, sset); } -static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) +static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) { int i, j, tc, prio, idx = 0; unsigned long pfc_combined; @@ -242,10 +241,22 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_eth_ext_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pcie_perf_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc64[i].format); + + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stall_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -297,8 +308,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) priv->channel_tc2txq[i][tc]); } -void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, - uint32_t stringset, uint8_t *data) +void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) { int i; @@ -320,8 +330,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, } } -static void mlx5e_get_strings(struct net_device *dev, - uint32_t stringset, uint8_t *data) +static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -373,10 +382,22 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, + pport_eth_ext_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc64, i); + + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stall_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], @@ -642,8 +663,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->mdev, - new_channels.params.indirection_rqt, + mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { @@ -966,24 +986,27 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type) if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) return ptys2connector_type[connector_type]; - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) - | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) - | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) - | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return PORT_FIBRE; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | + MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | + MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | + MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return PORT_FIBRE; } - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) - | MLX5E_PROT_MASK(MLX5E_10GBASE_CR) - | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { - return PORT_DA; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) | + MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | + MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { + return PORT_DA; } - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) - | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) - | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) - | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { - return PORT_NONE; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | + MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | + MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) | + MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { + return PORT_NONE; } return PORT_OTHER; @@ -1190,9 +1213,18 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(tirc, 0, ctxlen); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); } + + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(tirc, 0, ctxlen); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); + mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen); + } } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index dfccb5305e9c..850cdc980ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -36,6 +36,7 @@ #include #include #include "en.h" +#include "lib/mpfs.h" static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type); @@ -65,6 +66,7 @@ struct mlx5e_l2_hash_node { struct hlist_node hlist; u8 action; struct mlx5e_l2_rule ai; + bool mpfs; }; static inline int mlx5e_hash_l2(u8 *addr) @@ -289,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_any_vid_rules(priv); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) @@ -300,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_any_vid_rules(priv); + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -362,17 +364,30 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, struct mlx5e_l2_hash_node *hn) { - switch (hn->action) { + u8 action = hn->action; + int l2_err = 0; + + switch (action) { case MLX5E_ACTION_ADD: mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); + if (!is_multicast_ether_addr(hn->ai.addr)) { + l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); + hn->mpfs = !l2_err; + } hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: + if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) + l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); mlx5e_del_l2_flow_rule(priv, &hn->ai); mlx5e_del_l2_from_hash(hn); break; } + + if (l2_err) + netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", + action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); } static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) @@ -593,12 +608,21 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) ttc->rules[i] = NULL; } } + + for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { + mlx5_del_flow_rules(ttc->tunnel_rules[i]); + ttc->tunnel_rules[i] = NULL; + } + } } -static struct { +struct mlx5e_etype_proto { u16 etype; u8 proto; -} ttc_rules[] = { +}; + +static struct mlx5e_etype_proto ttc_rules[] = { [MLX5E_TT_IPV4_TCP] = { .etype = ETH_P_IP, .proto = IPPROTO_TCP, @@ -645,6 +669,28 @@ static struct { }, }; +static struct mlx5e_etype_proto ttc_tunnel_rules[] = { + [MLX5E_TT_IPV4_GRE] = { + .etype = ETH_P_IP, + .proto = IPPROTO_GRE, + }, + [MLX5E_TT_IPV6_GRE] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_GRE, + }, +}; + +static u8 mlx5e_etype_to_ipv(u16 ethertype) +{ + if (ethertype == ETH_P_IP) + return 4; + + if (ethertype == ETH_P_IPV6) + return 6; + + return 0; +} + static struct mlx5_flow_handle * mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, struct mlx5_flow_table *ft, @@ -652,10 +698,12 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, u16 etype, u8 proto) { + int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; int err = 0; + u8 ipv; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) @@ -666,7 +714,13 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); } - if (etype) { + + ipv = mlx5e_etype_to_ipv(etype); + if (match_ipv_outer && ipv) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); + } else if (etype) { spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); @@ -708,6 +762,20 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) goto del_rules; } + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return 0; + + rules = ttc->tunnel_rules; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.inner_ttc.ft.t; + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_tunnel_rules[tt].etype, + ttc_tunnel_rules[tt].proto); + if (IS_ERR(rules[tt])) + goto del_rules; + } + return 0; del_rules: @@ -718,13 +786,23 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) } #define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ MLX5E_TTC_GROUP2_SIZE +\ MLX5E_TTC_GROUP3_SIZE) -static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) + +#define MLX5E_INNER_TTC_NUM_GROUPS 3 +#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ + MLX5E_INNER_TTC_GROUP2_SIZE +\ + MLX5E_INNER_TTC_GROUP3_SIZE) + +static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, + bool use_ipv) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5e_flow_table *ft = &ttc->ft; @@ -746,7 +824,10 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) /* L4 Group */ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + if (use_ipv) + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); + else + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_TTC_GROUP1_SIZE; @@ -787,6 +868,190 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) return err; } +static struct mlx5_flow_handle * +mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, u8 proto) +{ + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + u8 ipv; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + ipv = mlx5e_etype_to_ipv(etype); + if (etype && ipv) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); + } + + if (proto) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); + } + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + } + + kvfree(spec); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_handle **rules; + struct mlx5e_ttc_table *ttc; + struct mlx5_flow_table *ft; + int err; + int tt; + + ttc = &priv->fs.inner_ttc; + ft = ttc->ft.t; + rules = ttc->rules; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + if (tt == MLX5E_TT_ANY) + dest.tir_num = priv->direct_tir[0].tirn; + else + dest.tir_num = priv->inner_indir_tir[tt].tirn; + + rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rules[tt])) + goto del_rules; + } + + return 0; + +del_rules: + err = PTR_ERR(rules[tt]); + rules[tt] = NULL; + mlx5e_cleanup_ttc_rules(ttc); + return err; +} + +static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_flow_table *ft = &ttc->ft; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_INNER_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_INNER_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_INNER_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5e_flow_table *ft = &ttc->ft; + int err; + + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return 0; + + ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE; + ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = mlx5e_create_inner_ttc_table_groups(ttc); + if (err) + goto err; + + err = mlx5e_generate_inner_ttc_table_rules(priv); + if (err) + goto err; + + return 0; + +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; + + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return; + + mlx5e_cleanup_ttc_rules(ttc); + mlx5e_destroy_flow_table(&ttc->ft); +} + void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) { struct mlx5e_ttc_table *ttc = &priv->fs.ttc; @@ -797,6 +1062,7 @@ void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) int mlx5e_create_ttc_table(struct mlx5e_priv *priv) { + bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); struct mlx5e_ttc_table *ttc = &priv->fs.ttc; struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_flow_table *ft = &ttc->ft; @@ -813,7 +1079,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv) return err; } - err = mlx5e_create_ttc_table_groups(ttc); + err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer); if (err) goto err; @@ -1139,11 +1405,18 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } + err = mlx5e_create_inner_ttc_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", + err); + goto err_destroy_arfs_tables; + } + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); - goto err_destroy_arfs_tables; + goto err_destroy_inner_ttc_table; } err = mlx5e_create_l2_table(priv); @@ -1168,6 +1441,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: mlx5e_destroy_ttc_table(priv); +err_destroy_inner_ttc_table: + mlx5e_destroy_inner_ttc_table(priv); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -1179,6 +1454,7 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); mlx5e_destroy_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d75f3099d164..cc11bbbd0309 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) struct mlx5e_sw_stats temp, *s = &temp; struct mlx5e_rq_stats *rq_stats; struct mlx5e_sq_stats *sq_stats; - u64 tx_offload_none = 0; int i, j; memset(s, 0, sizeof(*s)); @@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_tx += rq_stats->xdp_tx; @@ -213,6 +213,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_cache_full += rq_stats->cache_full; s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_waive += rq_stats->cache_waive; for (j = 0; j < priv->channels.params.num_tc; j++) { sq_stats = &c->sq[j].stats; @@ -228,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_queue_dropped += sq_stats->dropped; s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; - tx_offload_none += sq_stats->csum_none; + s->tx_csum_none += sq_stats->csum_none; + s->tx_csum_partial += sq_stats->csum_partial; } } - /* Update calculated offload counters */ - s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; - s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete; - s->link_down_events_phy = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); @@ -293,6 +291,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } + if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) { + out = pstats->eth_ext_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -593,12 +597,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->rx_headroom = params->rq_headroom; + rq->buff.headroom = params->rq_headroom; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; + rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; @@ -615,11 +619,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz); - rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides); + rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz; + rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides); - rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; - byte_count = rq->buff.wqe_sz; + byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) @@ -639,7 +642,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = -ENOMEM; goto err_rq_wq_destroy; } - rq->alloc_wqe = mlx5e_alloc_rx_wqe; + rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; #ifdef CONFIG_MLX5_EN_IPSEC @@ -655,18 +658,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.wqe_sz = params->lro_en ? + byte_count = params->lro_en ? params->lro_wqe_sz : MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) - rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN; + byte_count += MLX5E_METADATA_ETHER_LEN; #endif rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; - byte_count = rq->buff.wqe_sz; /* calc the required page order */ - rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count); + rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE); rq->buff.page_order = order_base_2(npages); @@ -677,6 +679,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + + wqe->data.addr = cpu_to_be64(dma_offset); + } + wqe->data.byte_count = cpu_to_be32(byte_count); wqe->data.lkey = rq->mkey_be; } @@ -883,7 +891,8 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) u16 wqe_ix; /* UMR WQE (if in progress) is always at wq->head */ - if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + rq->mpwqe.umr_in_progress) mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); while (!mlx5_wq_ll_is_empty(wq)) { @@ -926,7 +935,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, goto err_destroy_rq; if (params->rx_am_enabled) - set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + c->rq.state |= BIT(MLX5E_RQ_STATE_AM); return 0; @@ -946,7 +955,6 @@ static void mlx5e_activate_rq(struct mlx5e_rq *rq) set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - sq->db.ico_wqe[pi].num_wqebbs = 1; nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } @@ -1048,7 +1056,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; int err; - sq->pdev = c->pdev; sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -1752,7 +1759,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; struct mlx5e_channel *c; + unsigned int irq; int err; + int eqn; c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); if (!c) @@ -1768,6 +1777,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; + mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + c->irq_desc = irq_to_desc(irq); + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); @@ -2345,9 +2357,10 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, enum mlx5e_traffic_types tt, - void *tirc) + void *tirc, bool inner) { - void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : + MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -2496,6 +2509,21 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) return err; } +static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, + enum mlx5e_traffic_types tt, + u32 *tirc) +{ + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); + + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); + MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); + + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); +} + static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@ -2583,12 +2611,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) } } -static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_GEN(mdev, vport_group_manager) && - MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH); -} - void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { int num_txqs = priv->channels.num * priv->channels.params.num_tc; @@ -2602,7 +2624,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2613,7 +2635,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -2690,6 +2712,8 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); + if (!err) + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); return err; @@ -2724,6 +2748,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -2864,7 +2889,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); } static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) @@ -2883,6 +2908,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) struct mlx5e_tir *tir; void *tirc; int inlen; + int i = 0; int err; u32 *in; int tt; @@ -2898,16 +2924,36 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tt, tirc); err = mlx5e_create_tir(priv->mdev, tir, in, inlen); - if (err) - goto err_destroy_tirs; + if (err) { + mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_inner_tirs; + } } + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + goto out; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { + memset(in, 0, inlen); + tir = &priv->inner_indir_tir[i]; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); + if (err) { + mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); + goto err_destroy_inner_tirs; + } + } + +out: kvfree(in); return 0; -err_destroy_tirs: - mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); +err_destroy_inner_tirs: + for (i--; i >= 0; i--) + mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); + for (tt--; tt >= 0; tt--) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); @@ -2961,6 +3007,12 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); } void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) @@ -3000,12 +3052,16 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) return 0; } -static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +static int mlx5e_setup_tc_mqprio(struct net_device *netdev, + struct tc_mqprio_qopt *mqprio) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; + u8 tc = mqprio->num_tc; int err = 0; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (tc && tc != MLX5E_MAX_NUM_TC) return -EINVAL; @@ -3029,39 +3085,42 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) return err; } -static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +#ifdef CONFIG_MLX5_ESWITCH +static int mlx5e_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - goto mqprio; - - if (chain_index) + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - switch (tc->type) { - case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); default: return -EOPNOTSUPP; } +} +#endif -mqprio: - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return mlx5e_setup_tc(dev, tc->mqprio->num_tc); +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { +#ifdef CONFIG_MLX5_ESWITCH + case TC_SETUP_CLSFLOWER: + return mlx5e_setup_tc_cls_flower(dev, type_data); +#endif + case TC_SETUP_MQPRIO: + return mlx5e_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } } static void @@ -3271,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev, err = feature_handler(netdev, enable); if (err) { - netdev_err(netdev, "%s feature 0x%llx failed err %d\n", - enable ? "Enable" : "Disable", feature, err); + netdev_err(netdev, "%s feature %pNF failed, err %d\n", + enable ? "Enable" : "Disable", &feature, err); return err; } @@ -3358,6 +3417,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +#ifdef CONFIG_MLX5_ESWITCH static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3460,6 +3520,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } +#endif static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) @@ -3489,13 +3550,13 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); } -static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, - struct sk_buff *skb, - netdev_features_t features) +static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, + struct sk_buff *skb, + netdev_features_t features) { struct udphdr *udph; - u16 proto; - u16 port = 0; + u8 proto; + u16 port; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): @@ -3508,14 +3569,17 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, goto out; } - if (proto == IPPROTO_UDP) { + switch (proto) { + case IPPROTO_GRE: + return features; + case IPPROTO_UDP: udph = udp_hdr(skb); port = be16_to_cpu(udph->dest); - } - /* Verify if UDP port is being offloaded by HW */ - if (port && mlx5e_vxlan_lookup_port(priv, port)) - return features; + /* Verify if UDP port is being offloaded by HW */ + if (mlx5e_vxlan_lookup_port(priv, port)) + return features; + } out: /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ @@ -3539,7 +3603,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) - return mlx5e_vxlan_features_check(priv, skb, features); + return mlx5e_tunnel_features_check(priv, skb, features); return features; } @@ -3636,7 +3700,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); /* napi_schedule in case we have missed anything */ - set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); napi_schedule(&c->napi); if (old_prog) @@ -3693,11 +3756,11 @@ static void mlx5e_netpoll(struct net_device *dev) } #endif -static const struct net_device_ops mlx5e_netdev_ops_basic = { +static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, @@ -3708,6 +3771,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -3716,29 +3782,8 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif -}; - -static const struct net_device_ops mlx5e_netdev_ops_sriov = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, - .ndo_select_queue = mlx5e_select_queue, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_set_rx_mode = mlx5e_set_rx_mode, - .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, - .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, - .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_features_check = mlx5e_features_check, -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = mlx5e_rx_flow_steer, -#endif +#ifdef CONFIG_MLX5_ESWITCH + /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, @@ -3747,13 +3792,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, +#endif }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -3790,8 +3831,7 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } -void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, - u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { int i; @@ -3934,7 +3974,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* RSS */ params->rss_hfunc = ETH_RSS_HASH_XOR; netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt, + mlx5e_build_default_indir_rqt(params->indirection_rqt, MLX5E_INDIR_RQT_SIZE, max_channels); } @@ -3973,9 +4013,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +#endif static void mlx5e_build_nic_netdev(struct net_device *netdev) { @@ -3986,15 +4028,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - netdev->netdev_ops = &mlx5e_netdev_ops_sriov; + netdev->netdev_ops = &mlx5e_netdev_ops; + #ifdef CONFIG_MLX5_CORE_EN_DCB - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_basic; - } netdev->watchdog_timeo = 15 * HZ; @@ -4017,20 +4056,32 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; - if (mlx5e_vxlan_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; + if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + } + + if (mlx5e_vxlan_allowed(mdev)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } + if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + } + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); if (fcs_supported) @@ -4066,8 +4117,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#ifdef CONFIG_NET_SWITCHDEV - if (MLX5_CAP_GEN(mdev, vport_group_manager)) +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) + if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4199,6 +4250,10 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_init_l2_addr(priv); + /* Marking the link as currently not needed by the Driver */ + if (!netif_running(netdev)) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); @@ -4209,7 +4264,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4243,7 +4298,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4416,32 +4471,29 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) static void *mlx5e_add(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - struct mlx5e_rep_priv *rpriv = NULL; - void *priv; - int vport; - int err; struct net_device *netdev; + void *rpriv = NULL; + void *priv; + int err; err = mlx5e_check_required_hca_cap(mdev); if (err) return NULL; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); +#ifdef CONFIG_MLX5_ESWITCH + if (MLX5_VPORT_MANAGER(mdev)) { + rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { - mlx5_core_warn(mdev, - "Not creating net device, Failed to alloc rep priv data\n"); + mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); return NULL; } - rpriv->rep = &esw->offloads.vport_reps[0]; } +#endif netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - goto err_unregister_reps; + goto err_free_rpriv; } priv = netdev_priv(netdev); @@ -4462,14 +4514,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) err_detach: mlx5e_detach(mdev, priv); - err_destroy_netdev: mlx5e_destroy_netdev(priv); - -err_unregister_reps: - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); - +err_free_rpriv: kfree(rpriv); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e60be9c277..45e03c427faf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -613,15 +613,18 @@ static int mlx5e_rep_open(struct net_device *dev) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; - err = mlx5e_open(dev); + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(dev); if (err) - return err; + goto unlock; - err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP); - if (!err) + if (!mlx5_eswitch_set_vport_state(esw, rep->vport, + MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); - return 0; +unlock: + mutex_unlock(&priv->state_lock); + return err; } static int mlx5e_rep_close(struct net_device *dev) @@ -630,10 +633,13 @@ static int mlx5e_rep_close(struct net_device *dev) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + mutex_lock(&priv->state_lock); (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); - - return mlx5e_close(dev); + ret = mlx5e_close_locked(dev); + mutex_unlock(&priv->state_lock); + return ret; } static int mlx5e_rep_get_phys_port_name(struct net_device *dev, @@ -651,37 +657,42 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, return 0; } -static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int +mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - if (tc->egress_dev) { + if (cls_flower->egress_dev) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); - return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle, - chain_index, - proto, tc); + dev = mlx5_eswitch_get_uplink_netdev(esw); + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + cls_flower); } - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} - switch (tc->type) { +static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_rep_setup_tc_cls_flower(dev, type_data); default: return -EOPNOTSUPP; } @@ -773,7 +784,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, - .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, + .ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, @@ -913,7 +924,7 @@ static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) return MLX5E_PORT_REPRESENTOR_NCH; } -static struct mlx5e_profile mlx5e_rep_profile = { +static const struct mlx5e_profile mlx5e_rep_profile = { .init = mlx5e_init_rep, .init_rx = mlx5e_init_rep_rx, .cleanup_rx = mlx5e_cleanup_rep_rx, @@ -1099,3 +1110,16 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ } + +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv; + + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return NULL; + + rpriv->rep = &esw->offloads.vport_reps[0]; + return rpriv; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index a0a1a7a1d6c0..5659ed9f51e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -38,6 +38,7 @@ #include "eswitch.h" #include "en.h" +#ifdef CONFIG_MLX5_ESWITCH struct mlx5e_neigh_update_table { struct rhashtable neigh_ht; /* Save the neigh hash entries in a list in addition to the hash table @@ -123,6 +124,7 @@ struct mlx5e_encap_entry { int encap_size; }; +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); @@ -141,5 +143,12 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); +#else /* CONFIG_MLX5_ESWITCH */ +static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {} +static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {} +static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } +static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} +#endif #endif /* __MLX5E_REP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7344433259fc..15a1687483cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -163,7 +163,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, static inline bool mlx5e_page_is_reserved(struct page *page) { - return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id(); + return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); } static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, @@ -177,8 +177,10 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, return false; } - if (unlikely(page_is_pfmemalloc(dma_info->page))) + if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { + rq->stats.cache_waive++; return false; + } cache->page_cache[cache->tail] = *dma_info; cache->tail = tail_next; @@ -252,7 +254,7 @@ static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, !mlx5e_page_is_reserved(wi->di.page); } -int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; @@ -263,8 +265,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) wi->offset = 0; } - wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + - rq->rx_headroom); + wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); return 0; } @@ -296,7 +297,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) { - return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; + return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; } static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, @@ -305,7 +306,7 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, u32 page_idx, u32 frag_offset, u32 len) { - unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz); + unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); dma_sync_single_for_cpu(rq->pdev, wi->umr.dma_info[page_idx].addr + frag_offset, @@ -358,7 +359,6 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) /* fill sq edge with nops to avoid wqe wrap around */ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - sq->db.ico_wqe[pi].num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } @@ -369,41 +369,35 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) MLX5_OPCODE_UMR); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; - sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs; sq->pc += num_wqebbs; mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); } static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, - struct mlx5e_rx_wqe *wqe, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT; int pg_strides = mlx5e_mpwqe_strides_per_page(rq); + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; int err; int i; - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i]; - + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) goto err_unmap; wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR); page_ref_add(dma_info->page, pg_strides); - wi->skbs_frags[i] = 0; } + memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; - wqe->data.addr = cpu_to_be64(dma_offset); return 0; err_unmap: while (--i >= 0) { - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i]; - + dma_info--; page_ref_sub(dma_info->page, pg_strides); mlx5e_page_release(rq, dma_info, true); } @@ -414,27 +408,21 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) { int pg_strides = mlx5e_mpwqe_strides_per_page(rq); + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; int i; - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i]; - + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); mlx5e_page_release(rq, dma_info, true); } } -void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); - - if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) { - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); - return; - } + rq->mpwqe.umr_in_progress = false; mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); @@ -444,16 +432,18 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) mlx5_wq_ll_update_db_record(wq); } -int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { int err; - err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix); - if (unlikely(err)) + err = mlx5e_alloc_rx_umr_mpwqe(rq, ix); + if (unlikely(err)) { + rq->stats.buff_alloc_err++; return err; - set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + } + rq->mpwqe.umr_in_progress = true; mlx5e_post_umr_wqe(rq, ix); - return -EBUSY; + return 0; } void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) @@ -463,94 +453,150 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_mpwqe(rq, wi); } -#define RQ_CANNOT_POST(rq) \ - (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \ - test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) - bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; + int err; - if (unlikely(RQ_CANNOT_POST(rq))) + if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) return false; - while (!mlx5_wq_ll_is_full(wq)) { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - int err; + if (mlx5_wq_ll_is_full(wq)) + return false; - err = rq->alloc_wqe(rq, wqe, wq->head); - if (err == -EBUSY) - return true; + do { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); if (unlikely(err)) { rq->stats.buff_alloc_err++; break; } mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - } + } while (!mlx5_wq_ll_is_full(wq)); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); mlx5_wq_ll_update_db_record(wq); - return !mlx5_wq_ll_is_full(wq); + return !!err; +} + +static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, + struct mlx5e_icosq *sq, + struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; + + mlx5_cqwq_pop(&cq->wq); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { + WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", + cqe->op_own); + return; + } + + if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { + mlx5e_post_rx_mpwqe(rq); + return; + } + + if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) + WARN_ONCE(true, + "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", + icowi->opcode); +} + +static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) +{ + struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); + struct mlx5_cqe64 *cqe; + + if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + return; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (likely(!cqe)) + return; + + /* by design, there's only a single cqe */ + mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe); + + mlx5_cqwq_update_db_record(&cq->wq); +} + +bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + + if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + return false; + + mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); + + if (mlx5_wq_ll_is_full(wq)) + return false; + + if (!rq->mpwqe.umr_in_progress) + mlx5e_alloc_rx_mpwqe(rq, wq->head); + + return true; } static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); - struct iphdr *ipv4; - struct ipv6hdr *ipv6; struct tcphdr *tcp; int network_depth = 0; __be16 proto; u16 tot_len; + void *ip_p; u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); - int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || - (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); - ipv4 = (struct iphdr *)(skb->data + network_depth); - ipv6 = (struct ipv6hdr *)(skb->data + network_depth); tot_len = cqe_bcnt - network_depth; + ip_p = skb->data + network_depth; if (proto == htons(ETH_P_IP)) { - tcp = (struct tcphdr *)(skb->data + network_depth + - sizeof(struct iphdr)); - ipv6 = NULL; + struct iphdr *ipv4 = ip_p; + + tcp = ip_p + sizeof(struct iphdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - } else { - tcp = (struct tcphdr *)(skb->data + network_depth + - sizeof(struct ipv6hdr)); - ipv4 = NULL; - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - } - if (get_cqe_lro_tcppsh(cqe)) - tcp->psh = 1; - - if (tcp_ack) { - tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; - } - - if (ipv4) { ipv4->ttl = cqe->lro_min_ttl; ipv4->tot_len = cpu_to_be16(tot_len); ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, ipv4->ihl); } else { + struct ipv6hdr *ipv6 = ip_p; + + tcp = ip_p + sizeof(struct ipv6hdr); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + ipv6->hop_limit = cqe->lro_min_ttl; ipv6->payload_len = cpu_to_be16(tot_len - sizeof(struct ipv6hdr)); } + + tcp->psh = get_cqe_lro_tcppsh(cqe); + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } } static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, @@ -581,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; + rq->stats.csum_unnecessary++; return; } @@ -598,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum_level = 1; skb->encapsulation = 1; rq->stats.csum_unnecessary_inner++; + return; } + rq->stats.csum_unnecessary++; return; } csum_none: @@ -776,9 +825,9 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { struct mlx5e_dma_info *di = &wi->di; + u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb; void *va, *data; - u16 rx_headroom = rq->rx_headroom; bool consumed; u32 frag_size; @@ -857,6 +906,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) &wqe->next.next_wqe_index); } +#ifdef CONFIG_MLX5_ESWITCH void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; @@ -901,6 +951,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, &wqe->next.next_wqe_index); } +#endif static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, @@ -909,7 +960,7 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb) { u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); - u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz; + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; u32 head_page_idx = page_idx; @@ -977,7 +1028,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); mpwrq_cqe_out: - if (likely(wi->consumed_strides < rq->mpwqe_num_strides)) + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; mlx5e_free_rx_mpwqe(rq, wi); @@ -987,21 +1038,23 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; + struct mlx5e_xdpsq *xdpsq; + struct mlx5_cqe64 *cqe; int work_done = 0; - if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) + if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) return 0; if (cq->decmprs_left) work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); - for (; work_done < budget; work_done++) { - struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq); + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return 0; - if (!cqe) - break; + xdpsq = &rq->xdpsq; + do { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { work_done += mlx5e_decompress_cqes_start(rq, cq, @@ -1012,7 +1065,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) mlx5_cqwq_pop(&cq->wq); rq->handle_rx_cqe(rq, cqe); - } + } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); if (xdpsq->db.doorbell) { mlx5e_xmit_xdp_doorbell(xdpsq); @@ -1030,13 +1083,18 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) { struct mlx5e_xdpsq *sq; + struct mlx5_cqe64 *cqe; struct mlx5e_rq *rq; u16 sqcc; int i; sq = container_of(cq, struct mlx5e_xdpsq, cq); - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + return false; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) return false; rq = container_of(sq, struct mlx5e_rq, xdpsq); @@ -1046,15 +1104,11 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) */ sqcc = sq->cc; - for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { - struct mlx5_cqe64 *cqe; + i = 0; + do { u16 wqe_counter; bool last_wqe; - cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) - break; - mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); @@ -1072,7 +1126,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) /* Recycle RX page */ mlx5e_page_release(rq, di, true); } while (!last_wqe); - } + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index e65517eafc58..f8637213afc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -47,7 +47,7 @@ struct counter_desc { char format[ETH_GSTRING_LEN]; - int offset; /* Byte offset */ + size_t offset; /* Byte offset */ }; struct mlx5e_sw_stats { @@ -68,6 +68,7 @@ struct mlx5e_sw_stats { u64 rx_xdp_drop; u64 rx_xdp_tx; u64 rx_xdp_tx_full; + u64 tx_csum_none; u64 tx_csum_partial; u64 tx_csum_partial_inner; u64 tx_queue_stopped; @@ -84,6 +85,7 @@ struct mlx5e_sw_stats { u64 rx_cache_full; u64 rx_cache_empty; u64 rx_cache_busy; + u64 rx_cache_waive; /* Special handling counters */ u64 link_down_events_phy; @@ -107,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, @@ -123,6 +126,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; @@ -216,6 +220,12 @@ static const struct counter_desc vport_stats_desc[] = { MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ counter_set.eth_per_prio_grp_data_layout.c##_high) #define NUM_PPORT_PRIO 8 +#define PPORT_ETH_EXT_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_extended_cntrs_grp_data_layout.c##_high) +#define PPORT_ETH_EXT_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \ + counter_set.eth_extended_cntrs_grp_data_layout.c##_high) struct mlx5e_pport_stats { __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; @@ -224,6 +234,7 @@ struct mlx5e_pport_stats { __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -290,12 +301,22 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +static const struct counter_desc pport_eth_ext_stats_desc[] = { + { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, +}; + #define PCIE_PERF_OFF(c) \ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) #define PCIE_PERF_GET(pcie_stats, c) \ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_OFF64(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) +#define PCIE_PERF_GET64(pcie_stats, c) \ + MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) + struct mlx5e_pcie_stats { __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; }; @@ -305,10 +326,22 @@ static const struct counter_desc pcie_perf_stats_desc[] = { { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, }; +static const struct counter_desc pcie_perf_stats_desc64[] = { + { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, +}; + +static const struct counter_desc pcie_perf_stall_stats_desc[] = { + { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, + { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, + { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, + { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; u64 csum_complete; + u64 csum_unnecessary; u64 csum_unnecessary_inner; u64 csum_none; u64 lro_packets; @@ -326,12 +359,14 @@ struct mlx5e_rq_stats { u64 cache_full; u64 cache_empty; u64 cache_busy; + u64 cache_waive; }; static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, @@ -349,6 +384,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, }; struct mlx5e_sq_stats { @@ -360,6 +396,7 @@ struct mlx5e_sq_stats { u64 tso_bytes; u64 tso_inner_packets; u64 tso_inner_bytes; + u64 csum_partial; u64 csum_partial_inner; u64 nop; /* less likely accessed in data path */ @@ -376,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, @@ -397,17 +435,29 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PCIE_PERF_COUNTERS(priv) \ (ARRAY_SIZE(pcie_perf_stats_desc) * \ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) +#define NUM_PCIE_PERF_COUNTERS64(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc64) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) +#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) +#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ + (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ - NUM_PPORT_PRIO) -#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) + NUM_PPORT_PRIO + \ + NUM_PPORT_ETH_EXT_COUNTERS(priv)) +#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ + NUM_PCIE_PERF_COUNTERS64(priv) +\ + NUM_PCIE_PERF_STALL_COUNTERS(priv)) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7f282e8f4e7f..1aa2028ed995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda return true; } +static bool modify_header_match_supported(struct mlx5_flow_spec *spec, + struct tcf_exts *exts) +{ + const struct tc_action *a; + bool modify_ip_header; + LIST_HEAD(actions); + u8 htype, ip_proto; + void *headers_v; + u16 ethertype; + int nkeys, i; + + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); + + /* for non-IP we only re-write MACs, so we're okay */ + if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) + goto out_ok; + + modify_ip_header = false; + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + if (!is_tcf_pedit(a)) + continue; + + nkeys = tcf_pedit_nkeys(a); + for (i = 0; i < nkeys; i++) { + htype = tcf_pedit_htype(a, i); + if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || + htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { + modify_ip_header = true; + break; + } + } + } + + ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); + if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + pr_info("can't offload re-write of ip proto %d\n", ip_proto); + return false; + } + +out_ok: + return true; +} + +static bool actions_match_supported(struct mlx5e_priv *priv, + struct tcf_exts *exts, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) +{ + u32 actions; + + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + actions = flow->esw_attr->action; + else + actions = flow->nic_attr->action; + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + return modify_header_match_supported(&parse_attr->spec, exts); + + return true; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -1326,7 +1389,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; @@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EINVAL; } + if (!actions_match_supported(priv, exts, parse_attr, flow)) + return -EOPNOTSUPP; + return 0; } @@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, break; default: err = -EOPNOTSUPP; - goto out; + goto free_encap; } fl4.flowi4_tos = tun_key->tos; fl4.daddr = tun_key->u.ipv4.dst; @@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &fl4, &n, &ttl); if (err) - goto out; + goto free_encap; /* used by mlx5e_detach_encap to lookup a neigh hash table * entry in the neigh hash table when a user deletes a rule @@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, */ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); if (err) - goto out; + goto free_encap; read_lock_bh(&n->lock); nud_state = n->nud_state; @@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, destroy_neigh_entry: mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); -out: +free_encap: kfree(encap_header); +out: if (n) neigh_release(n); return err; @@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, break; default: err = -EOPNOTSUPP; - goto out; + goto free_encap; } fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); @@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &fl6, &n, &ttl); if (err) - goto out; + goto free_encap; /* used by mlx5e_detach_encap to lookup a neigh hash table * entry in the neigh hash table when a user deletes a rule @@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, */ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); if (err) - goto out; + goto free_encap; read_lock_bh(&n->lock); nud_state = n->nud_state; @@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, destroy_neigh_entry: mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); -out: +free_encap: kfree(encap_header); +out: if (n) neigh_release(n); return err; @@ -1791,6 +1859,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, } } + /* must verify if encap is valid or not */ if (found) goto attach_flow; @@ -1817,6 +1886,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, *encap_dev = e->out_dev; if (e->flags & MLX5_ENCAP_ENTRY_VALID) attr->encap_id = e->encap_id; + else + err = -EAGAIN; return err; @@ -1837,7 +1908,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, bool encap = false; int err = 0; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; memset(attr, 0, sizeof(*attr)); @@ -1934,10 +2005,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EINVAL; } + + if (!actions_match_supported(priv, exts, parse_attr, flow)) + return -EOPNOTSUPP; + return err; } -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index ecbe30d808ae..c14c263a739b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -33,12 +33,15 @@ #ifndef __MLX5_EN_TC_H__ #define __MLX5_EN_TC_H__ +#include + #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff +#ifdef CONFIG_MLX5_ESWITCH int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); @@ -60,4 +63,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) return atomic_read(&priv->fs.tc.ht.nelems); } +#else /* CONFIG_MLX5_ESWITCH */ +static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } +#endif + #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 31353e5c3c78..1d6925d4369a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct sq->stats.csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + sq->stats.csum_partial++; } } else sq->stats.csum_none++; @@ -394,6 +395,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_txqsq *sq; + struct mlx5_cqe64 *cqe; u32 dma_fifo_cc; u32 nbytes; u16 npkts; @@ -402,7 +404,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_txqsq, cq); - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + return false; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) return false; npkts = 0; @@ -416,15 +422,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) /* avoid dirtying sq cache line every cqe */ dma_fifo_cc = sq->dma_fifo_cc; - for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { - struct mlx5_cqe64 *cqe; + i = 0; + do { u16 wqe_counter; bool last_wqe; - cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) - break; - mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); @@ -467,7 +469,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sqcc += wi->num_wqebbs; napi_consume_skb(skb, napi_budget); } while (!last_wqe); - } + + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 92db28a9ed43..e906b754415c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -30,66 +30,18 @@ * SOFTWARE. */ +#include #include "en.h" -static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, - struct mlx5e_icosq *sq, - struct mlx5_cqe64 *cqe, - u16 *sqcc) +static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { - struct mlx5_wq_cyc *wq = &sq->wq; - u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; - struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; - struct mlx5e_rq *rq = &sq->channel->rq; + int current_cpu = smp_processor_id(); + const struct cpumask *aff; + struct irq_data *idata; - prefetch(rq); - mlx5_cqwq_pop(&cq->wq); - *sqcc += icowi->num_wqebbs; - - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { - WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", - cqe->op_own); - return; - } - - if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { - mlx5e_post_rx_mpwqe(rq); - return; - } - - if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) - WARN_ONCE(true, - "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", - icowi->opcode); -} - -static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) -{ - struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); - struct mlx5_cqe64 *cqe; - u16 sqcc; - - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return; - - cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (likely(!cqe)) - return; - - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), - * otherwise a cq overrun may occur - */ - sqcc = sq->cc; - - /* by design, there's only a single cqe */ - mlx5e_poll_ico_single_cqe(cq, sq, cqe, &sqcc); - - mlx5_cqwq_update_db_record(&cq->wq); - - /* ensure cq space is freed before enabling more cqes */ - wmb(); - - sq->cc = sqcc; + idata = irq_desc_get_irq_data(c->irq_desc); + aff = irq_data_get_affinity_mask(idata); + return cpumask_test_cpu(current_cpu, aff); } int mlx5e_napi_poll(struct napi_struct *napi, int budget) @@ -100,8 +52,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) int work_done; int i; - clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); - for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); @@ -111,25 +61,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; - mlx5e_poll_ico_cq(&c->icosq.cq); + busy |= c->rq.post_wqes(&c->rq); - busy |= mlx5e_post_rx_wqes(&c->rq); - - if (busy) - return budget; - - napi_complete_done(napi, work_done); - - /* avoid losing completion event during/after polling cqs */ - if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { - napi_schedule(napi); - return work_done; + if (busy) { + if (likely(mlx5e_channel_no_affinity_change(c))) + return budget; + if (work_done == budget) + work_done--; } + if (unlikely(!napi_complete_done(napi, work_done))) + return work_done; + for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); - if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state)) + if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) mlx5e_rx_am(&c->rq); mlx5e_cq_arm(&c->rq.cq); @@ -143,7 +90,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); cq->event_ctr++; - set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); napi_schedule(cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index edd11ebd392e..fc606bfd1d6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -36,9 +36,7 @@ #include #include "mlx5_core.h" #include "fpga/core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -193,6 +191,7 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm) { __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + __raw_writel((__force u32)cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ mb(); @@ -483,11 +482,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) } break; -#ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); break; -#endif case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: mlx5_port_module_event(dev, eqe); @@ -702,9 +699,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && - MLX5_CAP_GEN(dev, vport_group_manager) && - mlx5_core_is_pf(dev)) + if (MLX5_VPORT_MANAGER(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5b41f692acad..c77f4c0c7769 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -46,19 +46,13 @@ enum { MLX5_ACTION_DEL = 2, }; -/* E-Switch UC L2 table hash node */ -struct esw_uc_addr { - struct l2addr_node node; - u32 table_index; - u32 vport; -}; - /* Vport UC/MC hash node */ struct vport_addr { struct l2addr_node node; u8 action; u32 vport; - struct mlx5_flow_handle *flow_rule; /* SRIOV only */ + struct mlx5_flow_handle *flow_rule; + bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc; }; @@ -154,81 +148,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); } -/* HW L2 Table (MPFS) management */ -static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, - u8 *mac, u8 vlan_valid, u16 vlan) -{ - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; - u8 *in_mac_addr; - - MLX5_SET(set_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_SET_L2_TABLE_ENTRY); - MLX5_SET(set_l2_table_entry_in, in, table_index, index); - MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid); - MLX5_SET(set_l2_table_entry_in, in, vlan, vlan); - - in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); - ether_addr_copy(&in_mac_addr[2], mac); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) -{ - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; - - MLX5_SET(delete_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); - MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) -{ - int err = 0; - - *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size); - if (*ix >= l2_table->size) - err = -ENOSPC; - else - __set_bit(*ix, l2_table->bitmap); - - return err; -} - -static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix) -{ - __clear_bit(ix, l2_table->bitmap); -} - -static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac, - u8 vlan_valid, u16 vlan, - u32 *index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - int err; - - err = alloc_l2_table_index(l2_table, index); - if (err) - return err; - - err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan); - if (err) - free_l2_table_index(l2_table, *index); - - return err; -} - -static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - - del_l2_table_entry_cmd(dev, index); - free_l2_table_index(l2_table, index); -} - /* E-Switch FDB */ static struct mlx5_flow_handle * __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, @@ -455,65 +374,60 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; int err; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (esw_uc) { + /* Skip mlx5_mpfs_add_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport) + goto fdb_add; + + err = mlx5_mpfs_add_mac(esw->dev, mac); + if (err) { esw_warn(esw->dev, - "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", - mac, vport, esw_uc->vport); - return -EEXIST; + "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + return err; } + vaddr->mpfs = true; - esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); - if (!esw_uc) - return -ENOMEM; - esw_uc->vport = vport; - - err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index); - if (err) - goto abort; - +fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); - esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); - return err; -abort: - l2addr_hash_del(esw_uc); - return err; + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", + vport, mac, vaddr->flow_rule); + + return 0; } static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; + int err = 0; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (!esw_uc || esw_uc->vport != vport) { - esw_debug(esw->dev, - "MAC(%pM) doesn't belong to vport (%d)\n", - mac, vport); - return -EINVAL; - } - esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); + /* Skip mlx5_mpfs_del_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport || !vaddr->mpfs) + goto fdb_del; - del_l2_table_entry(esw->dev, esw_uc->table_index); + err = mlx5_mpfs_del_mac(esw->dev, mac); + if (err) + esw_warn(esw->dev, + "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + vaddr->mpfs = false; +fdb_del: if (vaddr->flow_rule) mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; - l2addr_hash_del(esw_uc); return 0; } @@ -1611,13 +1525,14 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ +#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!ESW_ALLOWED(esw)) return 0; if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || @@ -1634,7 +1549,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw->mode = mode; - esw_disable_vport(esw, 0); if (mode == SRIOV_LEGACY) err = esw_create_legacy_fdb_table(esw, nvfs + 1); @@ -1647,7 +1561,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (err) esw_warn(esw->dev, "Failed to create eswitch TSAR"); - enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; + /* Don't enable vport events when in SRIOV_OFFLOADS mode, since: + * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode + * 2. FDB/Eswitch is programmed by user space tools + */ + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); @@ -1656,7 +1574,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) return 0; abort: - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); esw->mode = SRIOV_NONE; return err; } @@ -1667,9 +1584,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int nvports; int i; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || - esw->mode == SRIOV_NONE) + if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) return; esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", @@ -1692,44 +1607,21 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_offloads_cleanup(esw, nvports); esw->mode = SRIOV_NONE; - /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); -} - -void mlx5_eswitch_attach(struct mlx5_eswitch *esw) -{ - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return; - - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); - /* VF Vports will be enabled when SRIOV is enabled */ -} - -void mlx5_eswitch_detach(struct mlx5_eswitch *esw) -{ - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return; - - esw_disable_vport(esw, 0); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) { - int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; int vport_num; int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager) || - MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, - "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", - total_vports, l2_table_size, + "Total vports %d, per vport: max uc(%d) max mc(%d)\n", + total_vports, MLX5_MAX_UC_PER_VPORT(dev), MLX5_MAX_MC_PER_VPORT(dev)); @@ -1739,14 +1631,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->dev = dev; - esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), - sizeof(uintptr_t), GFP_KERNEL); - if (!esw->l2_table.bitmap) { - err = -ENOMEM; - goto abort; - } - esw->l2_table.size = l2_table_size; - esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -1797,7 +1681,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->vports); kfree(esw->offloads.vport_reps); kfree(esw); @@ -1806,15 +1689,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); @@ -1838,8 +1719,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) } /* Vport Administration */ -#define ESW_ALLOWED(esw) \ - (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 834a33050969..565c8b7a399a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -37,6 +37,15 @@ #include #include #include +#include "lib/mpfs.h" + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +#ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) @@ -44,9 +53,6 @@ #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) -#define MLX5_L2_ADDR_HASH(addr) (addr[5]) - #define FDB_UPLINK_VPORT 0xffff #define MLX5_MIN_BW_SHARE 1 @@ -54,48 +60,6 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) -/* L2 -mac address based- hash helpers */ -struct l2addr_node { - struct hlist_node hlist; - u8 addr[ETH_ALEN]; -}; - -#define for_each_l2hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ - hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) - -#define l2addr_hash_find(hash, mac, type) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - bool found = false; \ - type *ptr = NULL; \ - \ - hlist_for_each_entry(ptr, &hash[ix], node.hlist) \ - if (ether_addr_equal(ptr->node.addr, mac)) {\ - found = true; \ - break; \ - } \ - if (!found) \ - ptr = NULL; \ - ptr; \ -}) - -#define l2addr_hash_add(hash, mac, type, gfp) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - type *ptr = NULL; \ - \ - ptr = kzalloc(sizeof(type), gfp); \ - if (ptr) { \ - ether_addr_copy(ptr->node.addr, mac); \ - hlist_add_head(&ptr->node.hlist, &hash[ix]);\ - } \ - ptr; \ -}) - -#define l2addr_hash_del(ptr) ({ \ - hlist_del(&ptr->node.hlist); \ - kfree(ptr); \ -}) - struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -150,12 +114,6 @@ struct mlx5_vport { u16 enabled_events; }; -struct mlx5_l2_table { - struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE]; - u32 size; - unsigned long *bitmap; -}; - struct mlx5_eswitch_fdb { void *fdb; union { @@ -175,12 +133,6 @@ struct mlx5_eswitch_fdb { }; }; -enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS -}; - struct mlx5_esw_sq { struct mlx5_flow_handle *send_to_vport_rule; struct list_head list; @@ -222,7 +174,6 @@ struct esw_mc_addr { /* SRIOV only */ struct mlx5_eswitch { struct mlx5_core_dev *dev; - struct mlx5_l2_table l2_table; struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct workqueue_struct *work_queue; @@ -250,8 +201,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); -void mlx5_eswitch_attach(struct mlx5_eswitch *esw); -void mlx5_eswitch_detach(struct mlx5_eswitch *esw); void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); @@ -345,4 +294,13 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) +#else /* CONFIG_MLX5_ESWITCH */ +/* eswitch API stubs */ +static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} +static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} +static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } +static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} +#endif /* CONFIG_MLX5_ESWITCH */ + #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 5bc0593bd76e..d9fd8570b07c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -433,6 +433,8 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) struct mlx5_flow_table *fdb = NULL; int esw_size, err = 0; u32 flags = 0; + u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { @@ -443,9 +445,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), - MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + max_flow_counter, ESW_OFFLOADS_NUM_GROUPS); - esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c index e37453d838db..c0fd2212e890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c @@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, return 0; } -int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) +int mlx5_fpga_caps(struct mlx5_core_dev *dev) { u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; - return mlx5_core_access_reg(dev, in, sizeof(in), caps, + return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga, MLX5_ST_SZ_BYTES(fpga_cap), MLX5_REG_FPGA_CAP, 0, 0); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index 94bdfd47c3f0..d05233c9b4f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h @@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters { u64 rx_total_drop; }; -int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); +int mlx5_fpga_caps(struct mlx5_core_dev *dev); int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 9034e9960a76..dc8970346521 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) if (err) goto out; - err = mlx5_fpga_caps(fdev->mdev, - fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]); + err = mlx5_fpga_caps(fdev->mdev); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e750f07793b8..36ecc2b2e187 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -263,7 +263,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); - memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); + memcpy(in_match_value, &fte->val, sizeof(fte->val)); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { @@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, } if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, + log_max_flow_counter, + ft->type)); int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); list_size++; } + if (list_size > max_list_size) { + err = -EINVAL; + goto err_out; + } MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, list_size); } err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); +err_out: kvfree(in); return err; } @@ -359,7 +367,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) { u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -374,7 +382,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) return err; } -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; @@ -385,7 +393,7 @@ int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + @@ -409,14 +417,14 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, } struct mlx5_cmd_fc_bulk { - u16 id; + u32 id; int num; int outlen; u32 out[0]; }; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num) { struct mlx5_cmd_fc_bulk *b; int outlen = @@ -453,7 +461,7 @@ mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) } void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes) { int index = id - b->id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 0f98a7cf4877..c6d7bdf255b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -74,20 +74,20 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, u32 underlay_qpn); -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes); struct mlx5_cmd_fc_bulk; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num); +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num); void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e8690fe46bf2..5a7bea688ec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -36,6 +36,7 @@ #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" +#include "diag/fs_tracepoint.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -82,8 +83,8 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Vlan, mac, ttc, aRFS */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 4 +/* Vlan, mac, ttc, inner ttc, aRFS */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 5 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -150,6 +151,23 @@ enum fs_i_mutex_lock_class { FS_MUTEX_CHILD }; +static const struct rhashtable_params rhash_fte = { + .key_len = FIELD_SIZEOF(struct fs_fte, val), + .key_offset = offsetof(struct fs_fte, val), + .head_offset = offsetof(struct fs_fte, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +static const struct rhashtable_params rhash_fg = { + .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask), + .key_offset = offsetof(struct mlx5_flow_group, mask), + .head_offset = offsetof(struct mlx5_flow_group, hash), + .automatic_shrinking = true, + .min_size = 1, + +}; + static void del_rule(struct fs_node *node); static void del_flow_table(struct fs_node *node); static void del_flow_group(struct fs_node *node); @@ -255,71 +273,77 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, return NULL; } -static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) +static bool check_last_reserved(const u32 *match_criteria) { - unsigned int i; + char *match_criteria_reserved = + MLX5_ADDR_OF(fte_match_param, match_criteria, MLX5_FTE_MATCH_PARAM_RESERVED); - for (i = 0; i < size; i++, mask++, val1++, val2++) - if ((*((u8 *)val1) & (*(u8 *)mask)) != - ((*(u8 *)val2) & (*(u8 *)mask))) - return false; - - return true; + return !match_criteria_reserved[0] && + !memcmp(match_criteria_reserved, match_criteria_reserved + 1, + MLX5_FLD_SZ_BYTES(fte_match_param, + MLX5_FTE_MATCH_PARAM_RESERVED) - 1); } -static bool compare_match_value(struct mlx5_flow_group_mask *mask, - void *fte_param1, void *fte_param2) +static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria) { - if (mask->match_criteria_enable & - 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) { - void *fte_match1 = MLX5_ADDR_OF(fte_match_param, - fte_param1, outer_headers); - void *fte_match2 = MLX5_ADDR_OF(fte_match_param, - fte_param2, outer_headers); - void *fte_mask = MLX5_ADDR_OF(fte_match_param, - mask->match_criteria, outer_headers); + if (match_criteria_enable & ~( + (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) | + (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) | + (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS))) + return false; - if (!masked_memcmp(fte_mask, fte_match1, fte_match2, - MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + if (!(match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS)) { + char *fg_type_mask = MLX5_ADDR_OF(fte_match_param, + match_criteria, outer_headers); + + if (fg_type_mask[0] || + memcmp(fg_type_mask, fg_type_mask + 1, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1)) return false; } - if (mask->match_criteria_enable & - 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) { - void *fte_match1 = MLX5_ADDR_OF(fte_match_param, - fte_param1, misc_parameters); - void *fte_match2 = MLX5_ADDR_OF(fte_match_param, - fte_param2, misc_parameters); - void *fte_mask = MLX5_ADDR_OF(fte_match_param, - mask->match_criteria, misc_parameters); + if (!(match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS)) { + char *fg_type_mask = MLX5_ADDR_OF(fte_match_param, + match_criteria, misc_parameters); - if (!masked_memcmp(fte_mask, fte_match1, fte_match2, - MLX5_ST_SZ_BYTES(fte_match_set_misc))) + if (fg_type_mask[0] || + memcmp(fg_type_mask, fg_type_mask + 1, + MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) return false; } - if (mask->match_criteria_enable & - 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) { - void *fte_match1 = MLX5_ADDR_OF(fte_match_param, - fte_param1, inner_headers); - void *fte_match2 = MLX5_ADDR_OF(fte_match_param, - fte_param2, inner_headers); - void *fte_mask = MLX5_ADDR_OF(fte_match_param, - mask->match_criteria, inner_headers); + if (!(match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS)) { + char *fg_type_mask = MLX5_ADDR_OF(fte_match_param, + match_criteria, inner_headers); - if (!masked_memcmp(fte_mask, fte_match1, fte_match2, - MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + if (fg_type_mask[0] || + memcmp(fg_type_mask, fg_type_mask + 1, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1)) return false; } - return true; + + return check_last_reserved(match_criteria); } -static bool compare_match_criteria(u8 match_criteria_enable1, - u8 match_criteria_enable2, - void *mask1, void *mask2) +static bool check_valid_spec(const struct mlx5_flow_spec *spec) { - return match_criteria_enable1 == match_criteria_enable2 && - !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param)); + int i; + + if (!check_valid_mask(spec->match_criteria_enable, spec->match_criteria)) { + pr_warn("mlx5_core: Match criteria given mismatches match_criteria_enable\n"); + return false; + } + + for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++) + if (spec->match_value[i] & ~spec->match_criteria[i]) { + pr_warn("mlx5_core: match_value differs from match_criteria\n"); + return false; + } + + return check_last_reserved(spec->match_value); } static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) @@ -360,6 +384,8 @@ static void del_flow_table(struct fs_node *node) err = mlx5_cmd_destroy_flow_table(dev, ft); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); + ida_destroy(&ft->fte_allocator); + rhltable_destroy(&ft->fgs_hash); fs_get_obj(prio, ft->node.parent); prio->num_ft--; } @@ -370,22 +396,16 @@ static void del_rule(struct fs_node *node) struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; - u32 *match_value; int modify_mask; struct mlx5_core_dev *dev = get_dev(node); - int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int err; bool update_fte = false; - match_value = kvzalloc(match_len, GFP_KERNEL); - if (!match_value) - return; - fs_get_obj(rule, node); fs_get_obj(fte, rule->node.parent); fs_get_obj(fg, fte->node.parent); - memcpy(match_value, fte->val, sizeof(fte->val)); fs_get_obj(ft, fg->node.parent); + trace_mlx5_fs_del_rule(rule); list_del(&rule->node.list); if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { mutex_lock(&rule->dest_attr.ft->lock); @@ -414,7 +434,18 @@ static void del_rule(struct fs_node *node) "%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); } - kvfree(match_value); +} + +static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg) +{ + struct mlx5_flow_table *ft; + int ret; + + ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte); + WARN_ON(ret); + fte->status = 0; + fs_get_obj(ft, fg->node.parent); + ida_simple_remove(&ft->fte_allocator, fte->index); } static void del_fte(struct fs_node *node) @@ -428,6 +459,7 @@ static void del_fte(struct fs_node *node) fs_get_obj(fte, node); fs_get_obj(fg, fte->node.parent); fs_get_obj(ft, fg->node.parent); + trace_mlx5_fs_del_fte(fte); dev = get_dev(&ft->node); err = mlx5_cmd_delete_fte(dev, ft, @@ -437,8 +469,7 @@ static void del_fte(struct fs_node *node) "flow steering can't delete fte in index %d of flow group id %d\n", fte->index, fg->id); - fte->status = 0; - fg->num_ftes--; + destroy_fte(fte, fg); } static void del_flow_group(struct fs_node *node) @@ -446,14 +477,21 @@ static void del_flow_group(struct fs_node *node) struct mlx5_flow_group *fg; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; + int err; fs_get_obj(fg, node); fs_get_obj(ft, fg->node.parent); dev = get_dev(&ft->node); + trace_mlx5_fs_del_fg(fg); if (ft->autogroup.active) ft->autogroup.num_groups--; + rhashtable_destroy(&fg->ftes_hash); + err = rhltable_remove(&ft->fgs_hash, + &fg->hash, + rhash_fg); + WARN_ON(err); if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); @@ -488,10 +526,17 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) u8 match_criteria_enable = MLX5_GET(create_flow_group_in, create_fg_in, match_criteria_enable); + int ret; + fg = kzalloc(sizeof(*fg), GFP_KERNEL); if (!fg) return ERR_PTR(-ENOMEM); + ret = rhashtable_init(&fg->ftes_hash, &rhash_fte); + if (ret) { + kfree(fg); + return ERR_PTR(ret); + } fg->mask.match_criteria_enable = match_criteria_enable; memcpy(&fg->mask.match_criteria, match_criteria, sizeof(fg->mask.match_criteria)); @@ -509,10 +554,17 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft u32 flags) { struct mlx5_flow_table *ft; + int ret; ft = kzalloc(sizeof(*ft), GFP_KERNEL); if (!ft) - return NULL; + return ERR_PTR(-ENOMEM); + + ret = rhltable_init(&ft->fgs_hash, &rhash_fg); + if (ret) { + kfree(ft); + return ERR_PTR(ret); + } ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; @@ -523,6 +575,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->flags = flags; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); + ida_init(&ft->fte_allocator); return ft; } @@ -812,8 +865,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, root->table_type, op_mod, ft_attr->flags); - if (!ft) { - err = -ENOMEM; + if (IS_ERR(ft)) { + err = PTR_ERR(ft); goto unlock_root; } @@ -839,6 +892,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa destroy_ft: mlx5_cmd_destroy_flow_table(root->dev, ft); free_ft: + ida_destroy(&ft->fte_allocator); kfree(ft); unlock_root: mutex_unlock(&root->chain_lock); @@ -924,11 +978,13 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table * if (IS_ERR(fg)) return fg; + err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg); + if (err) + goto err_free_fg; + err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); - if (err) { - kfree(fg); - return ERR_PTR(err); - } + if (err) + goto err_remove_fg; if (ft->autogroup.active) ft->autogroup.num_groups++; @@ -938,14 +994,33 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table * /* Add node to group list */ list_add(&fg->node.list, prev_fg); + trace_mlx5_fs_add_fg(fg); return fg; + +err_remove_fg: + WARN_ON(rhltable_remove(&ft->fgs_hash, + &fg->hash, + rhash_fg)); +err_free_fg: + rhashtable_destroy(&fg->ftes_hash); + kfree(fg); + + return ERR_PTR(err); } struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *fg_in) { + void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, + fg_in, match_criteria); + u8 match_criteria_enable = MLX5_GET(create_flow_group_in, + fg_in, + match_criteria_enable); struct mlx5_flow_group *fg; + if (!check_valid_mask(match_criteria_enable, match_criteria)) + return ERR_PTR(-EINVAL); + if (ft->autogroup.active) return ERR_PTR(-EPERM); @@ -1102,43 +1177,38 @@ add_rule_fte(struct fs_fte *fte, return ERR_PTR(err); } -/* Assumed fg is locked */ -static unsigned int get_free_fte_index(struct mlx5_flow_group *fg, - struct list_head **prev) -{ - struct fs_fte *fte; - unsigned int start = fg->start_index; - - if (prev) - *prev = &fg->node.children; - - /* assumed list is sorted by index */ - fs_for_each_fte(fte, fg) { - if (fte->index != start) - return start; - start++; - if (prev) - *prev = &fte->node.list; - } - - return start; -} - -/* prev is output, prev->next = new_fte */ static struct fs_fte *create_fte(struct mlx5_flow_group *fg, u32 *match_value, - struct mlx5_flow_act *flow_act, - struct list_head **prev) + struct mlx5_flow_act *flow_act) { + struct mlx5_flow_table *ft; struct fs_fte *fte; int index; + int ret; + + fs_get_obj(ft, fg->node.parent); + index = ida_simple_get(&ft->fte_allocator, fg->start_index, + fg->start_index + fg->max_ftes, + GFP_KERNEL); + if (index < 0) + return ERR_PTR(index); - index = get_free_fte_index(fg, prev); fte = alloc_fte(flow_act, match_value, index); - if (IS_ERR(fte)) - return fte; + if (IS_ERR(fte)) { + ret = PTR_ERR(fte); + goto err_alloc; + } + ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte); + if (ret) + goto err_hash; return fte; + +err_hash: + kfree(fte); +err_alloc: + ida_simple_remove(&ft->fte_allocator, index); + return ERR_PTR(ret); } static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, @@ -1226,79 +1296,104 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } +static bool check_conflicting_actions(u32 action1, u32 action2) +{ + u32 xored_actions = action1 ^ action2; + + /* if one rule only wants to count, it's ok */ + if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || + action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT) + return false; + + if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_ENCAP | + MLX5_FLOW_CONTEXT_ACTION_DECAP)) + return true; + + return false; +} + +static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act) +{ + if (check_conflicting_actions(flow_act->action, fte->action)) { + mlx5_core_warn(get_dev(&fte->node), + "Found two FTEs with conflicting actions\n"); + return -EEXIST; + } + + if (fte->flow_tag != flow_act->flow_tag) { + mlx5_core_warn(get_dev(&fte->node), + "FTE flow tag %u already exists with different flow tag %u\n", + fte->flow_tag, + flow_act->flow_tag); + return -EEXIST; + } + + return 0; +} + static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, u32 *match_value, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, - int dest_num) + int dest_num, + struct fs_fte *fte) { struct mlx5_flow_handle *handle; struct mlx5_flow_table *ft; - struct list_head *prev; - struct fs_fte *fte; int i; - nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); - fs_for_each_fte(fte, fg) { + if (fte) { + int old_action; + int ret; + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); - if (compare_match_value(&fg->mask, match_value, &fte->val) && - (flow_act->action & fte->action)) { - int old_action = fte->action; - - if (fte->flow_tag != flow_act->flow_tag) { - mlx5_core_warn(get_dev(&fte->node), - "FTE flow tag %u already exists with different flow tag %u\n", - fte->flow_tag, - flow_act->flow_tag); - handle = ERR_PTR(-EEXIST); - goto unlock_fte; - } - - fte->action |= flow_act->action; - handle = add_rule_fte(fte, fg, dest, dest_num, - old_action != flow_act->action); - if (IS_ERR(handle)) { - fte->action = old_action; - goto unlock_fte; - } else { - goto add_rules; - } + ret = check_conflicting_ftes(fte, flow_act); + if (ret) { + handle = ERR_PTR(ret); + goto unlock_fte; + } + + old_action = fte->action; + fte->action |= flow_act->action; + handle = add_rule_fte(fte, fg, dest, dest_num, + old_action != flow_act->action); + if (IS_ERR(handle)) { + fte->action = old_action; + goto unlock_fte; + } else { + trace_mlx5_fs_set_fte(fte, false); + goto add_rules; } - unlock_ref_node(&fte->node); } fs_get_obj(ft, fg->node.parent); - if (fg->num_ftes >= fg->max_ftes) { - handle = ERR_PTR(-ENOSPC); - goto unlock_fg; - } - fte = create_fte(fg, match_value, flow_act, &prev); - if (IS_ERR(fte)) { - handle = (void *)fte; - goto unlock_fg; - } + fte = create_fte(fg, match_value, flow_act); + if (IS_ERR(fte)) + return (void *)fte; tree_init_node(&fte->node, 0, del_fte); nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); handle = add_rule_fte(fte, fg, dest, dest_num, false); if (IS_ERR(handle)) { unlock_ref_node(&fte->node); + destroy_fte(fte, fg); kfree(fte); - goto unlock_fg; + return handle; } - fg->num_ftes++; - tree_add_node(&fte->node, &fg->node); - list_add(&fte->node.list, prev); + /* fte list isn't sorted */ + list_add_tail(&fte->node.list, &fg->node.children); + trace_mlx5_fs_set_fte(fte, true); add_rules: for (i = 0; i < handle->num_rules; i++) { - if (atomic_read(&handle->rule[i]->node.refcount) == 1) + if (atomic_read(&handle->rule[i]->node.refcount) == 1) { tree_add_node(&handle->rule[i]->node, &fte->node); + trace_mlx5_fs_add_rule(handle->rule[i]); + } } unlock_fte: unlock_ref_node(&fte->node); -unlock_fg: - unlock_ref_node(&fg->node); return handle; } @@ -1346,6 +1441,96 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, return true; } +static struct mlx5_flow_handle * +try_add_to_existing_fg(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) +{ + struct mlx5_flow_group *g; + struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT); + struct rhlist_head *tmp, *list; + struct match_list { + struct list_head list; + struct mlx5_flow_group *g; + } match_list, *iter; + LIST_HEAD(match_head); + + rcu_read_lock(); + /* Collect all fgs which has a matching match_criteria */ + list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg); + rhl_for_each_entry_rcu(g, tmp, list, hash) { + struct match_list *curr_match; + + if (likely(list_empty(&match_head))) { + match_list.g = g; + list_add_tail(&match_list.list, &match_head); + continue; + } + curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); + + if (!curr_match) { + rcu_read_unlock(); + rule = ERR_PTR(-ENOMEM); + goto free_list; + } + curr_match->g = g; + list_add_tail(&curr_match->list, &match_head); + } + rcu_read_unlock(); + + /* Try to find a fg that already contains a matching fte */ + list_for_each_entry(iter, &match_head, list) { + struct fs_fte *fte; + + g = iter->g; + nested_lock_ref_node(&g->node, FS_MUTEX_PARENT); + fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, + rhash_fte); + if (fte) { + rule = add_rule_fg(g, spec->match_value, + flow_act, dest, dest_num, fte); + unlock_ref_node(&g->node); + goto free_list; + } + unlock_ref_node(&g->node); + } + + /* No group with matching fte found. Try to add a new fte to any + * matching fg. + */ + list_for_each_entry(iter, &match_head, list) { + g = iter->g; + + nested_lock_ref_node(&g->node, FS_MUTEX_PARENT); + rule = add_rule_fg(g, spec->match_value, + flow_act, dest, dest_num, NULL); + if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) { + unlock_ref_node(&g->node); + goto free_list; + } + unlock_ref_node(&g->node); + } + +free_list: + if (!list_empty(&match_head)) { + struct match_list *match_tmp; + + /* The most common case is having one FG. Since we want to + * optimize this case, we save the first on the stack. + * Therefore, no need to free it. + */ + list_del(&list_first_entry(&match_head, typeof(*iter), list)->list); + list_for_each_entry_safe(iter, match_tmp, &match_head, list) { + list_del(&iter->list); + kfree(iter); + } + } + + return rule; +} + static struct mlx5_flow_handle * _mlx5_add_flow_rules(struct mlx5_flow_table *ft, struct mlx5_flow_spec *spec, @@ -1358,22 +1543,18 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, struct mlx5_flow_handle *rule; int i; + if (!check_valid_spec(spec)) + return ERR_PTR(-EINVAL); + for (i = 0; i < dest_num; i++) { if (!dest_is_valid(&dest[i], flow_act->action, ft)) return ERR_PTR(-EINVAL); } nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); - fs_for_each_fg(g, ft) - if (compare_match_criteria(g->mask.match_criteria_enable, - spec->match_criteria_enable, - g->mask.match_criteria, - spec->match_criteria)) { - rule = add_rule_fg(g, spec->match_value, - flow_act, dest, dest_num); - if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) - goto unlock; - } + rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num); + if (!IS_ERR(rule)) + goto unlock; g = create_autogroup(ft, spec->match_criteria_enable, spec->match_criteria); @@ -1382,7 +1563,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, goto unlock; } - rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num); + rule = add_rule_fg(g, spec->match_value, flow_act, dest, + dest_num, NULL); if (IS_ERR(rule)) { /* Remove assumes refcount > 0 and autogroup creates a group * with a refcount = 0. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 990acee6fb09..48dd78975062 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -34,6 +34,7 @@ #define _MLX5_FS_CORE_ #include +#include enum fs_node_type { FS_TYPE_NAMESPACE, @@ -51,6 +52,7 @@ enum fs_flow_table_type { FS_FT_FDB = 0X4, FS_FT_SNIFFER_RX = 0X5, FS_FT_SNIFFER_TX = 0X6, + FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, }; enum fs_flow_table_op_mod { @@ -118,6 +120,8 @@ struct mlx5_flow_table { /* FWD rules that point on this flow table */ struct list_head fwd_rules; u32 flags; + struct ida fte_allocator; + struct rhltable fgs_hash; }; struct mlx5_fc_cache { @@ -136,17 +140,29 @@ struct mlx5_fc { u64 lastpackets; u64 lastbytes; - u16 id; + u32 id; bool deleted; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; }; +#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600 +/* Calculate the fte_match_param length and without the reserved length. + * Make sure the reserved field is the last. + */ +#define MLX5_ST_SZ_DW_MATCH_PARAM \ + ((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \ + BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \ + MLX5_FLD_SZ_BYTES(fte_match_param, \ + MLX5_FTE_MATCH_PARAM_RESERVED) +\ + MLX5_BYTE_OFF(fte_match_param, \ + MLX5_FTE_MATCH_PARAM_RESERVED))) + /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; - u32 val[MLX5_ST_SZ_DW(fte_match_param)]; + u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; u32 flow_tag; u32 index; @@ -155,6 +171,7 @@ struct fs_fte { u32 modify_id; enum fs_fte_status status; struct mlx5_fc *counter; + struct rhash_head hash; }; /* Type of children is mlx5_flow_table/namespace */ @@ -174,7 +191,7 @@ struct mlx5_flow_namespace { struct mlx5_flow_group_mask { u8 match_criteria_enable; - u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; + u32 match_criteria[MLX5_ST_SZ_DW_MATCH_PARAM]; }; /* Type of children is fs_fte */ @@ -183,8 +200,9 @@ struct mlx5_flow_group { struct mlx5_flow_group_mask mask; u32 start_index; u32 max_ftes; - u32 num_ftes; u32 id; + struct rhashtable ftes_hash; + struct rhlist_head hash; }; struct mlx5_flow_root_namespace { @@ -243,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); #define fs_for_each_dst(pos, fte) \ fs_list_for_each_entry(pos, &(fte)->node.children) +#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \ + (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \ + (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \ + (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \ + (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ + (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ + (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\ + ) + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 6507d8acc54d..89d1f8650033 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -38,6 +38,8 @@ #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +/* Max number of counters to query in bulk read is 32K */ +#define MLX5_SW_MAX_COUNTERS_BULK BIT(15) /* locking scheme: * @@ -90,16 +92,21 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) rb_insert_color(&counter->node, root); } +/* The function returns the last node that was queried so the caller + * function can continue calling it till all counters are queried. + */ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, - u16 last_id) + u32 last_id) { struct mlx5_cmd_fc_bulk *b; struct rb_node *node = NULL; - u16 afirst_id; + u32 afirst_id; int num; int err; - int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk); + + int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); /* first id must be aligned to 4 when using bulk query */ afirst_id = first->id & ~0x3; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index eb04e97d8765..43c126c63955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -39,10 +39,11 @@ static void mlx5i_get_drvinfo(struct net_device *dev, struct mlx5e_priv *priv = mlx5i_epriv(dev); mlx5e_ethtool_get_drvinfo(priv, drvinfo); + strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]", + sizeof(drvinfo->driver)); } -static void mlx5i_get_strings(struct net_device *dev, - uint32_t stringset, uint8_t *data) +static void mlx5i_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct mlx5e_priv *priv = mlx5i_epriv(dev); @@ -129,17 +130,123 @@ static int mlx5i_flash_device(struct net_device *netdev, return mlx5e_ethtool_flash_device(priv, flash); } -const struct ethtool_ops mlx5i_ethtool_ops = { - .get_drvinfo = mlx5i_get_drvinfo, - .get_strings = mlx5i_get_strings, - .get_sset_count = mlx5i_get_sset_count, - .get_ethtool_stats = mlx5i_get_ethtool_stats, - .get_ringparam = mlx5i_get_ringparam, - .set_ringparam = mlx5i_set_ringparam, - .flash_device = mlx5i_flash_device, - .get_channels = mlx5i_get_channels, - .set_channels = mlx5i_set_channels, - .get_coalesce = mlx5i_get_coalesce, - .set_coalesce = mlx5i_set_coalesce, - .get_ts_info = mlx5i_get_ts_info, +enum mlx5_ptys_width { + MLX5_PTYS_WIDTH_1X = 1 << 0, + MLX5_PTYS_WIDTH_2X = 1 << 1, + MLX5_PTYS_WIDTH_4X = 1 << 2, + MLX5_PTYS_WIDTH_8X = 1 << 3, + MLX5_PTYS_WIDTH_12X = 1 << 4, +}; + +static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width) +{ + switch (width) { + case MLX5_PTYS_WIDTH_1X: return 1; + case MLX5_PTYS_WIDTH_2X: return 2; + case MLX5_PTYS_WIDTH_4X: return 4; + case MLX5_PTYS_WIDTH_8X: return 8; + case MLX5_PTYS_WIDTH_12X: return 12; + default: return -1; + } +} + +enum mlx5_ptys_rate { + MLX5_PTYS_RATE_SDR = 1 << 0, + MLX5_PTYS_RATE_DDR = 1 << 1, + MLX5_PTYS_RATE_QDR = 1 << 2, + MLX5_PTYS_RATE_FDR10 = 1 << 3, + MLX5_PTYS_RATE_FDR = 1 << 4, + MLX5_PTYS_RATE_EDR = 1 << 5, + MLX5_PTYS_RATE_HDR = 1 << 6, +}; + +static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) +{ + switch (rate) { + case MLX5_PTYS_RATE_SDR: return 2500; + case MLX5_PTYS_RATE_DDR: return 5000; + case MLX5_PTYS_RATE_QDR: + case MLX5_PTYS_RATE_FDR10: return 10000; + case MLX5_PTYS_RATE_FDR: return 14000; + case MLX5_PTYS_RATE_EDR: return 25000; + case MLX5_PTYS_RATE_HDR: return 50000; + default: return -1; + } +} + +static int mlx5i_get_port_settings(struct net_device *netdev, + u16 *ib_link_width_oper, u16 *ib_proto_oper) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; + int ret; + + ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1); + if (ret) + return ret; + + *ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); + *ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + + return 0; +} + +static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) +{ + int rate, width; + + rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper); + if (rate < 0) + return -EINVAL; + width = mlx5_ptys_width_enum_to_int(ib_link_width_oper); + if (width < 0) + return -EINVAL; + + return rate * width; +} + +static int mlx5i_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + u16 ib_link_width_oper; + u16 ib_proto_oper; + int speed, ret; + + ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper); + if (ret) + return ret; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + + speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper); + if (speed < 0) + return -EINVAL; + + link_ksettings->base.duplex = DUPLEX_FULL; + link_ksettings->base.port = PORT_OTHER; + + link_ksettings->base.autoneg = AUTONEG_DISABLE; + + link_ksettings->base.speed = speed; + + return 0; +} + +const struct ethtool_ops mlx5i_ethtool_ops = { + .get_drvinfo = mlx5i_get_drvinfo, + .get_strings = mlx5i_get_strings, + .get_sset_count = mlx5i_get_sset_count, + .get_ethtool_stats = mlx5i_get_ethtool_stats, + .get_ringparam = mlx5i_get_ringparam, + .set_ringparam = mlx5i_set_ringparam, + .flash_device = mlx5i_flash_device, + .get_channels = mlx5i_get_channels, + .set_channels = mlx5i_set_channels, + .get_coalesce = mlx5i_get_coalesce, + .set_coalesce = mlx5i_set_coalesce, + .get_ts_info = mlx5i_get_ts_info, + .get_link_ksettings = mlx5i_get_link_ksettings, + .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 85298051a3e4..145e392ab849 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); const struct mlx5e_profile *profile = priv->profile; + struct mlx5_core_dev *mdev = priv->mdev; mlx5e_detach_netdev(priv); profile->cleanup(priv); destroy_workqueue(priv->wq); free_netdev(netdev); - mlx5e_destroy_mdev_resources(priv->mdev); + mlx5e_destroy_mdev_resources(mdev); } EXPORT_SYMBOL(mlx5_rdma_netdev_free); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c new file mode 100644 index 000000000000..7cb67122e8b5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" +#include "lib/mpfs.h" + +/* HW L2 Table (MPFS) management */ +static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; + u8 *in_mac_addr; + + MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, table_index, index); + + in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); + ether_addr_copy(&in_mac_addr[2], mac); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) +{ + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; + + MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); + MLX5_SET(delete_l2_table_entry_in, in, table_index, index); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +/* UC L2 table hash node */ +struct l2table_node { + struct l2addr_node node; + u32 index; /* index in HW l2 table */ +}; + +struct mlx5_mpfs { + struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE]; + struct mutex lock; /* Synchronize l2 table access */ + u32 size; + unsigned long *bitmap; +}; + +static int alloc_l2table_index(struct mlx5_mpfs *l2table, u32 *ix) +{ + int err = 0; + + *ix = find_first_zero_bit(l2table->bitmap, l2table->size); + if (*ix >= l2table->size) + err = -ENOSPC; + else + __set_bit(*ix, l2table->bitmap); + + return err; +} + +static void free_l2table_index(struct mlx5_mpfs *l2table, u32 ix) +{ + __clear_bit(ix, l2table->bitmap); +} + +int mlx5_mpfs_init(struct mlx5_core_dev *dev) +{ + int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); + struct mlx5_mpfs *mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); + if (!mpfs) + return -ENOMEM; + + mutex_init(&mpfs->lock); + mpfs->size = l2table_size; + mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size), + sizeof(uintptr_t), GFP_KERNEL); + if (!mpfs->bitmap) { + kfree(mpfs); + return -ENOMEM; + } + + dev->priv.mpfs = mpfs; + return 0; +} + +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return; + + WARN_ON(!hlist_empty(mpfs->hash)); + kfree(mpfs->bitmap); + kfree(mpfs); +} + +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + u32 index; + int err; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (l2addr) { + err = -EEXIST; + goto abort; + } + + err = alloc_l2table_index(mpfs, &index); + if (err) + goto abort; + + l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL); + if (!l2addr) { + free_l2table_index(mpfs, index); + err = -ENOMEM; + goto abort; + } + + l2addr->index = index; + err = set_l2table_entry_cmd(dev, index, mac); + if (err) { + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + } + + mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index); +abort: + mutex_unlock(&mpfs->lock); + return err; +} + +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + int err = 0; + u32 index; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (!l2addr) { + err = -ENOENT; + goto unlock; + } + + index = l2addr->index; + del_l2table_entry_cmd(dev, index); + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index); +unlock: + mutex_unlock(&mpfs->lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h new file mode 100644 index 000000000000..4a7b2c3203a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_MPFS_H__ +#define __MLX5_MPFS_H__ + +#include +#include + +/* L2 -mac address based- hash helpers */ +#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) +#define MLX5_L2_ADDR_HASH(addr) (addr[5]) + +struct l2addr_node { + struct hlist_node hlist; + u8 addr[ETH_ALEN]; +}; + +#define for_each_l2hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist) + +#define l2addr_hash_find(hash, mac, type) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + bool found = false; \ + type *ptr = NULL; \ + \ + hlist_for_each_entry(ptr, &(hash)[ix], node.hlist) \ + if (ether_addr_equal(ptr->node.addr, mac)) {\ + found = true; \ + break; \ + } \ + if (!found) \ + ptr = NULL; \ + ptr; \ +}) + +#define l2addr_hash_add(hash, mac, type, gfp) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + type *ptr = NULL; \ + \ + ptr = kzalloc(sizeof(type), gfp); \ + if (ptr) { \ + ether_addr_copy(ptr->node.addr, mac); \ + hlist_add_head(&ptr->node.hlist, &(hash)[ix]);\ + } \ + ptr; \ +}) + +#define l2addr_hash_del(ptr) ({ \ + hlist_del(&(ptr)->node.hlist); \ + kfree(ptr); \ +}) + +#ifdef CONFIG_MLX5_MPFS +int mlx5_mpfs_init(struct mlx5_core_dev *dev); +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); +#else /* #ifndef CONFIG_MLX5_MPFS */ +static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} +static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +#endif +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8c4b45ef539c..0d2c8dcd6eae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -54,9 +54,8 @@ #include #include "mlx5_core.h" #include "fs_core.h" -#ifdef CONFIG_MLX5_CORE_EN +#include "lib/mpfs.h" #include "eswitch.h" -#endif #include "lib/mlx5.h" #include "fpga/core.h" #include "accel/ipsec.h" @@ -788,7 +787,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -EOPNOTSUPP; } - static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; @@ -897,13 +895,17 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_tables_cleanup; } -#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_mpfs_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init l2 table %d\n", err); + goto err_rl_cleanup; + } + err = mlx5_eswitch_init(dev); if (err) { dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); - goto err_rl_cleanup; + goto err_mpfs_cleanup; } -#endif err = mlx5_sriov_init(dev); if (err) { @@ -922,13 +924,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) err_sriov_cleanup: mlx5_sriov_cleanup(dev); err_eswitch_cleanup: -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); - +err_mpfs_cleanup: + mlx5_mpfs_cleanup(dev); err_rl_cleanup: -#endif mlx5_cleanup_rl_table(dev); - err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@ -946,9 +946,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { mlx5_fpga_cleanup(dev); mlx5_sriov_cleanup(dev); -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); -#endif + mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); @@ -1106,10 +1105,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_fs; } -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_attach(dev->priv.eswitch); -#endif - err = mlx5_sriov_attach(dev); if (err) { dev_err(&pdev->dev, "sriov init failed %d\n", err); @@ -1152,9 +1147,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); err_sriov: -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); err_fs: @@ -1225,9 +1217,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_fpga_device_stop(dev); mlx5_sriov_detach(dev); -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); @@ -1258,7 +1247,7 @@ struct mlx5_core_event_handler { }; static const struct devlink_ops mlx5_devlink_ops = { -#ifdef CONFIG_MLX5_CORE_EN +#ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, @@ -1298,6 +1287,9 @@ static int init_one(struct pci_dev *pdev, mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + INIT_LIST_HEAD(&priv->waiting_events_list); + priv->is_accum_events = false; + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING err = init_srcu_struct(&priv->pfault_srcu); if (err) { @@ -1352,7 +1344,6 @@ static int init_one(struct pci_dev *pdev, cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: #endif - pci_set_drvdata(pdev, NULL); devlink_free(devlink); return err; @@ -1379,7 +1370,6 @@ static void remove_one(struct pci_dev *pdev) #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING cleanup_srcu_struct(&priv->pfault_srcu); #endif - pci_set_drvdata(pdev, NULL); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 01d637dac533..b7c2900b75f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -43,6 +43,10 @@ #define DRIVER_VERSION "5.0-0" #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) extern uint mlx5_core_debug_mask; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 28d8472b36f1..2a8b529ce6dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -34,9 +34,7 @@ #include #include #include "mlx5_core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) { @@ -90,14 +88,12 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } -#ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } -#endif for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); @@ -113,11 +109,10 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) mlx5_core_warn(dev, "failed to restore VF %d settings, err %d\n", vf, err); - continue; + continue; } } mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); - } return 0; @@ -130,11 +125,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) int vf; if (!sriov->enabled_vfs) -#ifdef CONFIG_MLX5_CORE_EN - goto disable_sriov_resources; -#else - return; -#endif + goto out; for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) @@ -148,10 +139,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) sriov->enabled_vfs--; } -#ifdef CONFIG_MLX5_CORE_EN -disable_sriov_resources: +out: mlx5_eswitch_disable_sriov(dev->priv.eswitch); -#endif if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 695adff89d71..d56eea310509 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -75,6 +75,7 @@ config MLXSW_SPECTRUM depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n + depends on IPV6 || IPV6=n select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 62fc42f396bb..891ff418bb5e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -16,8 +16,9 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_acl.o spectrum_flower.o \ - spectrum_cnt.o spectrum_dpipe.o \ - spectrum_fid.o + spectrum_cnt.o spectrum_fid.o \ + spectrum_ipip.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o +mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index affe84eb4bff..9d5e7cf288be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -667,7 +667,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, int err; dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", - trans->tid, reg->id, mlxsw_reg_id_str(reg->id), + tid, reg->id, mlxsw_reg_id_str(reg->id), mlxsw_core_reg_access_type_str(type)); skb = mlxsw_emad_alloc(mlxsw_core, reg->len); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 9807ef814e42..f6963b0b4a55 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -57,6 +57,9 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, MLXSW_AFK_ELEMENT_MAX, }; @@ -104,6 +107,9 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1bd34d9a7b9e..cc27c5de5a1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5,6 +5,7 @@ * Copyright (c) 2015 Elad Raz * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2016 Yotam Gigi + * Copyright (c) 2017 Petr Machata * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -3679,15 +3680,17 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP, MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP, MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, }; /* reg_htgt_trap_group @@ -3952,10 +3955,12 @@ MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); */ MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); -static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) +static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en, + bool ipv6_en) { MLXSW_REG_ZERO(rgcr, payload); mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); + mlxsw_reg_rgcr_ipv6_en_set(payload, ipv6_en); } /* RITR - Router Interface Table Register @@ -3988,16 +3993,18 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); enum mlxsw_reg_ritr_if_type { + /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, + /* FID interface. */ MLXSW_REG_RITR_FID_IF, + /* Sub-port interface. */ MLXSW_REG_RITR_SP_IF, + /* Loopback Interface. */ + MLXSW_REG_RITR_LOOPBACK_IF, }; /* reg_ritr_type - * Router interface type. - * 0 - VLAN interface. - * 1 - FID interface. - * 2 - Sub-port interface. + * Router interface type as per enum mlxsw_reg_ritr_if_type. * Access: RW */ MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); @@ -4125,6 +4132,67 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); */ MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); +/* Loopback Interface */ + +enum mlxsw_reg_ritr_loopback_protocol { + /* IPinIP IPv4 underlay Unicast */ + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4, + /* IPinIP IPv6 underlay Unicast */ + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6, +}; + +/* reg_ritr_loopback_protocol + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_protocol, 0x08, 28, 4); + +enum mlxsw_reg_ritr_loopback_ipip_type { + /* Tunnel is IPinIP. */ + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_IP, + /* Tunnel is GRE, no key. */ + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP, + /* Tunnel is GRE, with a key. */ + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP, +}; + +/* reg_ritr_loopback_ipip_type + * Encapsulation type. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_ipip_type, 0x10, 24, 4); + +enum mlxsw_reg_ritr_loopback_ipip_options { + /* The key is defined by gre_key. */ + MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, +}; + +/* reg_ritr_loopback_ipip_options + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_ipip_options, 0x10, 20, 4); + +/* reg_ritr_loopback_ipip_uvr + * Underlay Virtual Router ID. + * Range is 0..cap_max_virtual_routers-1. + * Reserved for Spectrum-2. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_ipip_uvr, 0x10, 0, 16); + +/* reg_ritr_loopback_ipip_usip* + * Encapsulation Underlay source IP. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ritr, loopback_ipip_usip6, 0x18, 16); +MLXSW_ITEM32(reg, ritr, loopback_ipip_usip4, 0x24, 0, 32); + +/* reg_ritr_loopback_ipip_gre_key + * GRE Key. + * Reserved when ipip_type is not IP_IN_GRE_KEY_IN_IP. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_ipip_gre_key, 0x28, 0, 32); + /* Shared between ingress/egress */ enum mlxsw_reg_ritr_counter_set_type { /* No Count. */ @@ -4195,24 +4263,54 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, enum mlxsw_reg_ritr_if_type type, - u16 rif, u16 vr_id, u16 mtu, - const char *mac) + u16 rif, u16 vr_id, u16 mtu) { bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; MLXSW_REG_ZERO(ritr, payload); mlxsw_reg_ritr_enable_set(payload, enable); mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_ipv6_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_ipv6_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); +} + +static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac) +{ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); } +static inline void +mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload, + enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, + enum mlxsw_reg_ritr_loopback_ipip_options options, + u16 uvr_id, u32 gre_key) +{ + mlxsw_reg_ritr_loopback_ipip_type_set(payload, ipip_type); + mlxsw_reg_ritr_loopback_ipip_options_set(payload, options); + mlxsw_reg_ritr_loopback_ipip_uvr_set(payload, uvr_id); + mlxsw_reg_ritr_loopback_ipip_gre_key_set(payload, gre_key); +} + +static inline void +mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, + enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, + enum mlxsw_reg_ritr_loopback_ipip_options options, + u16 uvr_id, u32 usip, u32 gre_key) +{ + mlxsw_reg_ritr_loopback_protocol_set(payload, + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4); + mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options, + uvr_id, gre_key); + mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); +} + /* RATR - Router Adjacency Table Register * -------------------------------------- * The RATR register is used to configure the Router Adjacency (next-hop) @@ -4268,6 +4366,38 @@ MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1); */ MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1); +enum mlxsw_reg_ratr_type { + /* Ethernet */ + MLXSW_REG_RATR_TYPE_ETHERNET, + /* IPoIB Unicast without GRH. + * Reserved for Spectrum. + */ + MLXSW_REG_RATR_TYPE_IPOIB_UC, + /* IPoIB Unicast with GRH. Supported only in table 0 (Ethernet unicast + * adjacency). + * Reserved for Spectrum. + */ + MLXSW_REG_RATR_TYPE_IPOIB_UC_W_GRH, + /* IPoIB Multicast. + * Reserved for Spectrum. + */ + MLXSW_REG_RATR_TYPE_IPOIB_MC, + /* MPLS. + * Reserved for SwitchX/-2. + */ + MLXSW_REG_RATR_TYPE_MPLS, + /* IPinIP Encap. + * Reserved for SwitchX/-2. + */ + MLXSW_REG_RATR_TYPE_IPIP, +}; + +/* reg_ratr_type + * Adjacency entry type. + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, type, 0x04, 28, 4); + /* reg_ratr_adjacency_index_low * Bits 15:0 of index into the adjacency table. * For SwitchX and SwitchX-2, the adjacency table is linear and @@ -4297,17 +4427,17 @@ enum mlxsw_reg_ratr_trap_action { */ MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4); -enum mlxsw_reg_ratr_trap_id { - MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0, - MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1, -}; - /* reg_ratr_adjacency_index_high * Bits 23:16 of the adjacency_index. * Access: Index */ MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8); +enum mlxsw_reg_ratr_trap_id { + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0, + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1, +}; + /* reg_ratr_trap_id * Trap ID to be reported to CPU. * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. @@ -4322,14 +4452,44 @@ MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8); */ MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6); +enum mlxsw_reg_ratr_ipip_type { + /* IPv4, address set by mlxsw_reg_ratr_ipip_ipv4_udip. */ + MLXSW_REG_RATR_IPIP_TYPE_IPV4, + /* IPv6, address set by mlxsw_reg_ratr_ipip_ipv6_ptr. */ + MLXSW_REG_RATR_IPIP_TYPE_IPV6, +}; + +/* reg_ratr_ipip_type + * Underlay destination ip type. + * Note: the type field must match the protocol of the router interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, ipip_type, 0x10, 16, 4); + +/* reg_ratr_ipip_ipv4_udip + * Underlay ipv4 dip. + * Reserved when ipip_type is IPv6. + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, ipip_ipv4_udip, 0x18, 0, 32); + +/* reg_ratr_ipip_ipv6_ptr + * Pointer to IPv6 underlay destination ip address. + * For Spectrum: Pointer to KVD linear space. + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, ipip_ipv6_ptr, 0x1C, 0, 24); + static inline void mlxsw_reg_ratr_pack(char *payload, enum mlxsw_reg_ratr_op op, bool valid, + enum mlxsw_reg_ratr_type type, u32 adjacency_index, u16 egress_rif) { MLXSW_REG_ZERO(ratr, payload); mlxsw_reg_ratr_op_set(payload, op); mlxsw_reg_ratr_v_set(payload, valid); + mlxsw_reg_ratr_type_set(payload, type); mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index); mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16); mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif); @@ -4341,6 +4501,12 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); } +static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip) +{ + mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV4); + mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip); +} + /* RICNT - Router Interface Counter Register * ----------------------------------------- * The RICNT register retrieves per port performance counters @@ -4712,12 +4878,13 @@ MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); /* reg_ralue_dip* * The prefix of the route or of the marker that the object of the LPM * is compared with. The most significant bits of the dip are the prefix. - * The list significant bits must be '0' if the prefix_len is smaller + * The least significant bits must be '0' if the prefix_len is smaller * than 128 for IPv6 or smaller than 32 for IPv4. * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. * Access: Index */ MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); +MLXSW_ITEM_BUF(reg, ralue, dip6, 0x0C, 16); enum mlxsw_reg_ralue_entry_type { MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, @@ -4806,7 +4973,7 @@ MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); */ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); -/* reg_ralue_v +/* reg_ralue_ip2me_v * Valid bit for the tunnel_ptr field. * If valid = 0 then trap to CPU as IP2ME trap ID. * If valid = 1 and the packet format allows NVE or IPinIP tunnel @@ -4816,15 +4983,15 @@ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); +MLXSW_ITEM32(reg, ralue, ip2me_v, 0x24, 31, 1); -/* reg_ralue_tunnel_ptr +/* reg_ralue_ip2me_tunnel_ptr * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. * For Spectrum, pointer to KVD Linear. * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); +MLXSW_ITEM32(reg, ralue, ip2me_tunnel_ptr, 0x24, 0, 24); static inline void mlxsw_reg_ralue_pack(char *payload, enum mlxsw_reg_ralxx_protocol protocol, @@ -4851,6 +5018,16 @@ static inline void mlxsw_reg_ralue_pack4(char *payload, mlxsw_reg_ralue_dip4_set(payload, dip); } +static inline void mlxsw_reg_ralue_pack6(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + const void *dip) +{ + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip6_memcpy_to(payload, dip); +} + static inline void mlxsw_reg_ralue_act_remote_pack(char *payload, enum mlxsw_reg_ralue_trap_action trap_action, @@ -4883,6 +5060,15 @@ mlxsw_reg_ralue_act_ip2me_pack(char *payload) MLXSW_REG_RALUE_ACTION_TYPE_IP2ME); } +static inline void +mlxsw_reg_ralue_act_ip2me_tun_pack(char *payload, u32 tunnel_ptr) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME); + mlxsw_reg_ralue_ip2me_v_set(payload, 1); + mlxsw_reg_ralue_ip2me_tunnel_ptr_set(payload, tunnel_ptr); +} + /* RAUHT - Router Algorithmic LPM Unicast Host Table Register * ---------------------------------------------------------- * The RAUHT register is used to configure and query the Unicast Host table in @@ -4954,6 +5140,7 @@ MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); * Access: Index */ MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); +MLXSW_ITEM_BUF(reg, rauht, dip6, 0x10, 16); enum mlxsw_reg_rauht_trap_action { MLXSW_REG_RAUHT_TRAP_ACTION_NOP, @@ -4982,6 +5169,15 @@ enum mlxsw_reg_rauht_trap_id { */ MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9); +enum mlxsw_reg_flow_counter_set_type { + /* No count */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Count packets and bytes */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + /* reg_rauht_counter_set_type * Counter set type for flow counters * Access: RW @@ -5018,6 +5214,23 @@ static inline void mlxsw_reg_rauht_pack4(char *payload, mlxsw_reg_rauht_dip4_set(payload, dip); } +static inline void mlxsw_reg_rauht_pack6(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, const char *dip) +{ + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_type_set(payload, MLXSW_REG_RAUHT_TYPE_IPV6); + mlxsw_reg_rauht_dip6_memcpy_to(payload, dip); +} + +static inline void mlxsw_reg_rauht_pack_counter(char *payload, + u64 counter_index) +{ + mlxsw_reg_rauht_counter_index_set(payload, counter_index); + mlxsw_reg_rauht_counter_set_type_set(payload, + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); +} + /* RALEU - Router Algorithmic LPM ECMP Update Register * --------------------------------------------------- * The register enables updating the ECMP section in the action for multiple @@ -5216,6 +5429,30 @@ MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); +#define MLXSW_REG_RAUHTD_IPV6_ENT_LEN 0x20 + +/* reg_rauhtd_ipv6_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_rif + * Router interface. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_dip + * Destination IPv6 address. + * Access: RO + */ +MLXSW_ITEM_BUF_INDEXED(reg, rauhtd, ipv6_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x10); + static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, int ent_index, u16 *p_rif, u32 *p_dip) @@ -5224,6 +5461,141 @@ static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); } +static inline void mlxsw_reg_rauhtd_ent_ipv6_unpack(char *payload, + int rec_index, u16 *p_rif, + char *p_dip) +{ + *p_rif = mlxsw_reg_rauhtd_ipv6_ent_rif_get(payload, rec_index); + mlxsw_reg_rauhtd_ipv6_ent_dip_memcpy_from(payload, rec_index, p_dip); +} + +/* RTDP - Routing Tunnel Decap Properties Register + * ----------------------------------------------- + * The RTDP register is used for configuring the tunnel decap properties of NVE + * and IPinIP. + */ +#define MLXSW_REG_RTDP_ID 0x8020 +#define MLXSW_REG_RTDP_LEN 0x44 + +MLXSW_REG_DEFINE(rtdp, MLXSW_REG_RTDP_ID, MLXSW_REG_RTDP_LEN); + +enum mlxsw_reg_rtdp_type { + MLXSW_REG_RTDP_TYPE_NVE, + MLXSW_REG_RTDP_TYPE_IPIP, +}; + +/* reg_rtdp_type + * Type of the RTDP entry as per enum mlxsw_reg_rtdp_type. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, type, 0x00, 28, 4); + +/* reg_rtdp_tunnel_index + * Index to the Decap entry. + * For Spectrum, Index to KVD Linear. + * Access: Index + */ +MLXSW_ITEM32(reg, rtdp, tunnel_index, 0x00, 0, 24); + +/* IPinIP */ + +/* reg_rtdp_ipip_irif + * Ingress Router Interface for the overlay router + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_irif, 0x04, 16, 16); + +enum mlxsw_reg_rtdp_ipip_sip_check { + /* No sip checks. */ + MLXSW_REG_RTDP_IPIP_SIP_CHECK_NO, + /* Filter packet if underlay is not IPv4 or if underlay SIP does not + * equal ipv4_usip. + */ + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV4, + /* Filter packet if underlay is not IPv6 or if underlay SIP does not + * equal ipv6_usip. + */ + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6 = 3, +}; + +/* reg_rtdp_ipip_sip_check + * SIP check to perform. If decapsulation failed due to these configurations + * then trap_id is IPIP_DECAP_ERROR. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_sip_check, 0x04, 0, 3); + +/* If set, allow decapsulation of IPinIP (without GRE). */ +#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_IPIP BIT(0) +/* If set, allow decapsulation of IPinGREinIP without a key. */ +#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE BIT(1) +/* If set, allow decapsulation of IPinGREinIP with a key. */ +#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY BIT(2) + +/* reg_rtdp_ipip_type_check + * Flags as per MLXSW_REG_RTDP_IPIP_TYPE_CHECK_*. If decapsulation failed due to + * these configurations then trap_id is IPIP_DECAP_ERROR. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_type_check, 0x08, 24, 3); + +/* reg_rtdp_ipip_gre_key_check + * Whether GRE key should be checked. When check is enabled: + * - A packet received as IPinIP (without GRE) will always pass. + * - A packet received as IPinGREinIP without a key will not pass the check. + * - A packet received as IPinGREinIP with a key will pass the check only if the + * key in the packet is equal to expected_gre_key. + * If decapsulation failed due to GRE key then trap_id is IPIP_DECAP_ERROR. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_gre_key_check, 0x08, 23, 1); + +/* reg_rtdp_ipip_ipv4_usip + * Underlay IPv4 address for ipv4 source address check. + * Reserved when sip_check is not '1'. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_ipv4_usip, 0x0C, 0, 32); + +/* reg_rtdp_ipip_ipv6_usip_ptr + * This field is valid when sip_check is "sipv6 check explicitly". This is a + * pointer to the IPv6 DIP which is configured by RIPS. For Spectrum, the index + * is to the KVD linear. + * Reserved when sip_check is not MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_ipv6_usip_ptr, 0x10, 0, 24); + +/* reg_rtdp_ipip_expected_gre_key + * GRE key for checking. + * Reserved when gre_key_check is '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, ipip_expected_gre_key, 0x14, 0, 32); + +static inline void mlxsw_reg_rtdp_pack(char *payload, + enum mlxsw_reg_rtdp_type type, + u32 tunnel_index) +{ + MLXSW_REG_ZERO(rtdp, payload); + mlxsw_reg_rtdp_type_set(payload, type); + mlxsw_reg_rtdp_tunnel_index_set(payload, tunnel_index); +} + +static inline void +mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv4_usip, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_irif_set(payload, irif); + mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check); + mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check); + mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check); + mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); + mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -5982,15 +6354,6 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN); -enum mlxsw_reg_mgpc_counter_set_type { - /* No count */ - MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00, - /* Count packets and bytes */ - MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, - /* Count only packets */ - MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05, -}; - /* reg_mgpc_counter_set_type * Counter set type. * Access: OP @@ -6030,7 +6393,7 @@ MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64); static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, enum mlxsw_reg_mgpc_opcode opcode, - enum mlxsw_reg_mgpc_counter_set_type set_type) + enum mlxsw_reg_flow_counter_set_type set_type) { MLXSW_REG_ZERO(mgpc, payload); mlxsw_reg_mgpc_counter_index_set(payload, counter_index); @@ -6494,6 +6857,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rgcr), MLXSW_REG(ritr), MLXSW_REG(ratr), + MLXSW_REG(rtdp), MLXSW_REG(ricnt), MLXSW_REG(ralta), MLXSW_REG(ralst), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c6a3e61b53bd..696b99e65a5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -58,6 +58,7 @@ #include #include #include +#include #include "spectrum.h" #include "pci.h" @@ -381,12 +382,14 @@ int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, - MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); if (err) return err; - *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); - *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); + if (packets) + *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); + if (bytes) + *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); return 0; } @@ -396,7 +399,7 @@ static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, char mgpc_pl[MLXSW_REG_MGPC_LEN]; mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, - MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); } @@ -572,15 +575,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; int i; for (i = 0; i < mlxsw_sp->span.entries_count; i++) { struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - if (curr->used && curr->local_port == port->local_port) + if (curr->used && curr->local_port == local_port) return curr; } return NULL; @@ -591,7 +593,8 @@ static struct mlxsw_sp_span_entry { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(port); + span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, + port->local_port); if (span_entry) { /* Already exists, just take a reference */ span_entry->ref_count++; @@ -780,12 +783,13 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, } static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, + u8 destination_port, enum mlxsw_sp_span_type type) { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(to); + span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, + destination_port); if (!span_entry) { netdev_err(from->dev, "no span entry found\n"); return; @@ -1560,14 +1564,12 @@ static void mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - to_port = mlxsw_sp->ports[mirror->to_local_port]; span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, + span_type); } static int @@ -1616,16 +1618,16 @@ mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) } static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - __be16 protocol, - struct tc_cls_matchall_offload *cls, + struct tc_cls_matchall_offload *f, bool ingress) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + __be16 protocol = f->common.protocol; const struct tc_action *a; LIST_HEAD(actions); int err; - if (!tc_single_action(cls->exts)) { + if (!tcf_exts_has_one_action(f->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); return -EOPNOTSUPP; } @@ -1633,9 +1635,9 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); if (!mall_tc_entry) return -ENOMEM; - mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->cookie = f->cookie; - tcf_exts_to_list(cls->exts, &actions); + tcf_exts_to_list(f->exts, &actions); a = list_first_entry(&actions, struct tc_action, list); if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { @@ -1647,7 +1649,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mirror, a, ingress); } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; - err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, a, ingress); } else { err = -EOPNOTSUPP; @@ -1665,12 +1667,12 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls) + struct tc_cls_matchall_offload *f) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, - cls->cookie); + f->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; @@ -1692,49 +1694,72 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, kfree(mall_tc_entry); } -static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *f) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress; - if (chain_index) + if (is_classid_clsact_ingress(f->common.classid)) + ingress = true; + else if (is_classid_clsact_egress(f->common.classid)) + ingress = false; + else return -EOPNOTSUPP; - switch (tc->type) { - case TC_SETUP_MATCHALL: - switch (tc->cls_mall->command) { - case TC_CLSMATCHALL_REPLACE: - return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, - proto, - tc->cls_mall, - ingress); - case TC_CLSMATCHALL_DESTROY: - mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, - tc->cls_mall); - return 0; - default: - return -EOPNOTSUPP; - } - case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, - proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, - tc->cls_flower); - return 0; - case TC_CLSFLOWER_STATS: - return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, - tc->cls_flower); - default: - return -EOPNOTSUPP; - } - } + if (f->common.chain_index) + return -EOPNOTSUPP; - return -EOPNOTSUPP; + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_flower_offload *f) +{ + bool ingress; + + if (is_classid_clsact_ingress(f->common.classid)) + ingress = true; + else if (is_classid_clsact_egress(f->common.classid)) + ingress = false; + else + return -EOPNOTSUPP; + + switch (f->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); + return 0; + case TC_CLSFLOWER_STATS: + return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); + default: + return -EOPNOTSUPP; + } +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); + case TC_SETUP_CLSFLOWER: + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); + default: + return -EOPNOTSUPP; + } } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -2519,7 +2544,9 @@ static int mlxsw_sp_flash_device(struct net_device *dev, return err; } -#define MLXSW_SP_QSFP_I2C_ADDR 0x50 +#define MLXSW_SP_I2C_ADDR_LOW 0x50 +#define MLXSW_SP_I2C_ADDR_HIGH 0x51 +#define MLXSW_SP_EEPROM_PAGE_LENGTH 256 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, u16 offset, u16 size, void *data, @@ -2528,12 +2555,25 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; + u16 i2c_addr; int status; int err; size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); + + if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && + offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; + + i2c_addr = MLXSW_SP_I2C_ADDR_LOW; + if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { + i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; + offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; + } + mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, - 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); + 0, 0, offset, size, i2c_addr); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); if (err) @@ -3333,15 +3373,48 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), /* L3 traps */ - MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), - MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), - MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), - MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), - MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), + MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), + MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -3376,15 +3449,17 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: rate = 16 * 1024; burst_size = 10; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: rate = 1024; burst_size = 7; break; @@ -3433,21 +3508,23 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) priority = 5; tc = 5; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: priority = 4; tc = 4; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: priority = 3; tc = 3; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: priority = 2; tc = 2; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: priority = 1; @@ -3694,7 +3771,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_fids_fini(mlxsw_sp); } -static struct mlxsw_config_profile mlxsw_sp_config_profile = { +static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_mid = 1, @@ -4363,6 +4440,10 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { .priority = 10, /* Must be called before FIB notifier block */ }; +static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inet6addr_event, +}; + static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { .notifier_call = mlxsw_sp_router_netevent_event, }; @@ -4383,6 +4464,7 @@ static int __init mlxsw_sp_module_init(void) register_netdevice_notifier(&mlxsw_sp_netdevice_nb); register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); register_netevent_notifier(&mlxsw_sp_router_netevent_nb); err = mlxsw_core_driver_register(&mlxsw_sp_driver); @@ -4399,6 +4481,7 @@ static int __init mlxsw_sp_module_init(void) mlxsw_core_driver_unregister(&mlxsw_sp_driver); err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; @@ -4409,6 +4492,7 @@ static void __exit mlxsw_sp_module_exit(void) mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver); unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 5ef98d4d0ab6..84ce83acdc19 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -77,6 +77,7 @@ enum mlxsw_sp_rif_type { MLXSW_SP_RIF_TYPE_SUBPORT, MLXSW_SP_RIF_TYPE_VLAN, MLXSW_SP_RIF_TYPE_FID, + MLXSW_SP_RIF_TYPE_IPIP_LB, /* IP-in-IP loopback. */ MLXSW_SP_RIF_TYPE_MAX, }; @@ -384,6 +385,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); void @@ -415,6 +418,7 @@ struct mlxsw_sp_acl_profile_ops { int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, struct net_device *dev, bool ingress); void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + u16 (*ruleset_group_id)(void *ruleset_priv); size_t rule_priv_size; int (*rule_add)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -438,11 +442,16 @@ struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, + enum mlxsw_sp_acl_profile profile); +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, enum mlxsw_sp_acl_profile profile); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); +u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset); struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); @@ -506,7 +515,7 @@ extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f); void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 01a1501b56ca..4b2455e3e079 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -74,6 +74,7 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_ruleset_ht_key { struct net_device *dev; /* dev this ruleset is bound to */ bool ingress; + u32 chain_index; const struct mlxsw_sp_acl_profile_ops *ops; }; @@ -163,7 +164,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - struct net_device *dev, bool ingress) + struct net_device *dev, bool ingress, + u32 chain_index) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; @@ -171,13 +173,20 @@ static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, ruleset->ht_key.dev = dev; ruleset->ht_key.ingress = ingress; + ruleset->ht_key.chain_index = chain_index; err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); if (err) return err; - err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); - if (err) - goto err_ops_ruleset_bind; + if (!ruleset->ht_key.chain_index) { + /* We only need ruleset with chain index 0, the implicit one, + * to be directly bound to device. The rest of the rulesets + * are bound by "Goto action set". + */ + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + if (err) + goto err_ops_ruleset_bind; + } return 0; err_ops_ruleset_bind: @@ -192,7 +201,8 @@ static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + if (!ruleset->ht_key.chain_index) + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); } @@ -211,14 +221,48 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); } +static struct mlxsw_sp_acl_ruleset * +__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev, + bool ingress, u32 chain_index, + const struct mlxsw_sp_acl_profile_ops *ops) +{ + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.dev = dev; + ht_key.ingress = ingress; + ht_key.chain_index = chain_index; + ht_key.ops = ops; + return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + mlxsw_sp_acl_ruleset_ht_params); +} + struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset *ruleset; + + ops = acl->ops->profile_ops(mlxsw_sp, profile); + if (!ops) + return ERR_PTR(-EINVAL); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, + chain_index, ops); + if (!ruleset) + return ERR_PTR(-ENOENT); + return ruleset; +} + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, enum mlxsw_sp_acl_profile profile) { const struct mlxsw_sp_acl_profile_ops *ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - struct mlxsw_sp_acl_ruleset_ht_key ht_key; struct mlxsw_sp_acl_ruleset *ruleset; int err; @@ -226,12 +270,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, if (!ops) return ERR_PTR(-EINVAL); - memset(&ht_key, 0, sizeof(ht_key)); - ht_key.dev = dev; - ht_key.ingress = ingress; - ht_key.ops = ops; - ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, - mlxsw_sp_acl_ruleset_ht_params); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, + chain_index, ops); if (ruleset) { mlxsw_sp_acl_ruleset_ref_inc(ruleset); return ruleset; @@ -239,7 +279,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); if (IS_ERR(ruleset)) return ruleset; - err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, + ingress, chain_index); if (err) goto err_ruleset_bind; return ruleset; @@ -255,6 +296,13 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); } +u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + return ops->ruleset_group_id(ruleset->priv); +} + static int mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei) @@ -369,7 +417,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port = mlxsw_sp_port->local_port; in_port = false; } else { - /* If out_dev is NULL, the called wants to + /* If out_dev is NULL, the caller wants to * set forward to ingress port. */ local_port = 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 85d5001a5818..fb8031828454 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -70,6 +70,9 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 61a10f166f97..50b40de1fb91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -295,6 +295,12 @@ mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); } +static u16 +mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) +{ + return group->id; +} + static unsigned int mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) { @@ -984,6 +990,9 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { @@ -1060,6 +1069,14 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); } +static u16 +mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_id(&ruleset->group); +} + static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1096,6 +1113,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, + .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index af2c65a3fd9f..51e6846da72b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -74,6 +74,9 @@ static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = { static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = { &mlxsw_sp_dpipe_header_metadata, + &devlink_dpipe_header_ethernet, + &devlink_dpipe_header_ipv4, + &devlink_dpipe_header_ipv6, }; static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = { @@ -114,26 +117,6 @@ static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv, return devlink_dpipe_match_put(skb, &match); } -static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry) -{ - unsigned int value_count, value_index; - struct devlink_dpipe_value *value; - - value = entry->action_values; - value_count = entry->action_values_count; - for (value_index = 0; value_index < value_count; value_index++) { - kfree(value[value_index].value); - kfree(value[value_index].mask); - } - - value = entry->match_values; - value_count = entry->match_values_count; - for (value_index = 0; value_index < value_count; value_index++) { - kfree(value[value_index].value); - kfree(value[value_index].mask); - } -} - static void mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match, struct devlink_dpipe_action *action) @@ -215,8 +198,8 @@ static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled, - struct devlink_dpipe_dump_ctx *dump_ctx) +mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) { struct devlink_dpipe_value match_value, action_value; struct devlink_dpipe_action action = {0}; @@ -270,16 +253,16 @@ mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled, goto start_again; rtnl_unlock(); - mlxsw_sp_erif_entry_clear(&entry); + devlink_dpipe_entry_clear(&entry); return 0; err_entry_append: err_entry_get: rtnl_unlock(); - mlxsw_sp_erif_entry_clear(&entry); + devlink_dpipe_entry_clear(&entry); return err; } -static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable) +static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable) { struct mlxsw_sp *mlxsw_sp = priv; int i; @@ -301,24 +284,29 @@ static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable) return 0; } +static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); +} + static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump, - .entries_dump = mlxsw_sp_table_erif_entries_dump, - .counters_set_update = mlxsw_sp_table_erif_counters_update, + .entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump, + .counters_set_update = mlxsw_sp_dpipe_table_erif_counters_update, + .size_get = mlxsw_sp_dpipe_table_erif_size_get, }; static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - u64 table_size; - table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); return devlink_dpipe_table_register(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF, &mlxsw_sp_erif_ops, - mlxsw_sp, table_size, - false); + mlxsw_sp, false); } static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp) @@ -328,6 +316,516 @@ static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp) devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF); } +static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type) +{ + struct devlink_dpipe_match match = {0}; + int err; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; + + err = devlink_dpipe_match_put(skb, &match); + if (err) + return err; + + switch (type) { + case AF_INET: + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &devlink_dpipe_header_ipv4; + match.field_id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP; + break; + case AF_INET6: + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &devlink_dpipe_header_ipv6; + match.field_id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return devlink_dpipe_match_put(skb, &match); +} + +static int +mlxsw_sp_dpipe_table_host4_matches_dump(void *priv, struct sk_buff *skb) +{ + return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET); +} + +static int +mlxsw_sp_dpipe_table_host_actions_dump(void *priv, struct sk_buff *skb) +{ + struct devlink_dpipe_action action = {0}; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &devlink_dpipe_header_ethernet; + action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC; + + return devlink_dpipe_action_put(skb, &action); +} + +enum mlxsw_sp_dpipe_table_host_match { + MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF, + MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP, + MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT, +}; + +static void +mlxsw_sp_dpipe_table_host_match_action_prepare(struct devlink_dpipe_match *matches, + struct devlink_dpipe_action *action, + int type) +{ + struct devlink_dpipe_match *match; + + match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF]; + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; + + match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP]; + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + switch (type) { + case AF_INET: + match->header = &devlink_dpipe_header_ipv4; + match->field_id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP; + break; + case AF_INET6: + match->header = &devlink_dpipe_header_ipv6; + match->field_id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP; + break; + default: + WARN_ON(1); + return; + } + + action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action->header = &devlink_dpipe_header_ethernet; + action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC; +} + +static int +mlxsw_sp_dpipe_table_host_entry_prepare(struct devlink_dpipe_entry *entry, + struct devlink_dpipe_value *match_values, + struct devlink_dpipe_match *matches, + struct devlink_dpipe_value *action_value, + struct devlink_dpipe_action *action, + int type) +{ + struct devlink_dpipe_value *match_value; + struct devlink_dpipe_match *match; + + entry->match_values = match_values; + entry->match_values_count = MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT; + + entry->action_values = action_value; + entry->action_values_count = 1; + + match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF]; + match_value = &match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF]; + + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP]; + match_value = &match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP]; + + match_value->match = match; + switch (type) { + case AF_INET: + match_value->value_size = sizeof(u32); + break; + case AF_INET6: + match_value->value_size = sizeof(struct in6_addr); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + action_value->action = action; + action_value->value_size = sizeof(u64); + action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); + if (!action_value->value) + return -ENOMEM; + + return 0; +} + +static void +__mlxsw_sp_dpipe_table_host_entry_fill(struct devlink_dpipe_entry *entry, + struct mlxsw_sp_rif *rif, + unsigned char *ha, void *dip) +{ + struct devlink_dpipe_value *value; + u32 *rif_value; + u8 *ha_value; + + /* Set Match RIF index */ + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF]; + + rif_value = value->value; + *rif_value = mlxsw_sp_rif_index(rif); + value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif); + value->mapping_valid = true; + + /* Set Match DIP */ + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP]; + memcpy(value->value, dip, value->value_size); + + /* Set Action DMAC */ + value = entry->action_values; + ha_value = value->value; + ether_addr_copy(ha_value, ha); +} + +static void +mlxsw_sp_dpipe_table_host4_entry_fill(struct devlink_dpipe_entry *entry, + struct mlxsw_sp_neigh_entry *neigh_entry, + struct mlxsw_sp_rif *rif) +{ + unsigned char *ha; + u32 dip; + + ha = mlxsw_sp_neigh_entry_ha(neigh_entry); + dip = mlxsw_sp_neigh4_entry_dip(neigh_entry); + __mlxsw_sp_dpipe_table_host_entry_fill(entry, rif, ha, &dip); +} + +static void +mlxsw_sp_dpipe_table_host6_entry_fill(struct devlink_dpipe_entry *entry, + struct mlxsw_sp_neigh_entry *neigh_entry, + struct mlxsw_sp_rif *rif) +{ + struct in6_addr *dip; + unsigned char *ha; + + ha = mlxsw_sp_neigh_entry_ha(neigh_entry); + dip = mlxsw_sp_neigh6_entry_dip(neigh_entry); + + __mlxsw_sp_dpipe_table_host_entry_fill(entry, rif, ha, dip); +} + +static void +mlxsw_sp_dpipe_table_host_entry_fill(struct mlxsw_sp *mlxsw_sp, + struct devlink_dpipe_entry *entry, + struct mlxsw_sp_neigh_entry *neigh_entry, + struct mlxsw_sp_rif *rif, + int type) +{ + int err; + + switch (type) { + case AF_INET: + mlxsw_sp_dpipe_table_host4_entry_fill(entry, neigh_entry, rif); + break; + case AF_INET6: + mlxsw_sp_dpipe_table_host6_entry_fill(entry, neigh_entry, rif); + break; + default: + WARN_ON(1); + return; + } + + err = mlxsw_sp_neigh_counter_get(mlxsw_sp, neigh_entry, + &entry->counter); + if (!err) + entry->counter_valid = true; +} + +static int +mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp, + struct devlink_dpipe_entry *entry, + bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx, + int type) +{ + int rif_neigh_count = 0; + int rif_neigh_skip = 0; + int neigh_count = 0; + int rif_count; + int i, j; + int err; + + rtnl_lock(); + i = 0; + rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); +start_again: + err = devlink_dpipe_entry_ctx_prepare(dump_ctx); + if (err) + goto err_ctx_prepare; + j = 0; + rif_neigh_skip = rif_neigh_count; + for (; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); + struct mlxsw_sp_neigh_entry *neigh_entry; + + if (!rif) + continue; + + rif_neigh_count = 0; + mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) { + int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry); + + if (neigh_type != type) + continue; + + if (neigh_type == AF_INET6 && + mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) + continue; + + if (rif_neigh_count < rif_neigh_skip) + goto skip; + + mlxsw_sp_dpipe_table_host_entry_fill(mlxsw_sp, entry, + neigh_entry, rif, + type); + entry->index = neigh_count; + err = devlink_dpipe_entry_ctx_append(dump_ctx, entry); + if (err) { + if (err == -EMSGSIZE) { + if (!j) + goto err_entry_append; + else + goto out; + } + goto err_entry_append; + } + neigh_count++; + j++; +skip: + rif_neigh_count++; + } + rif_neigh_skip = 0; + } +out: + devlink_dpipe_entry_ctx_close(dump_ctx); + if (i != rif_count) + goto start_again; + + rtnl_unlock(); + return 0; + +err_ctx_prepare: +err_entry_append: + rtnl_unlock(); + return err; +} + +static int +mlxsw_sp_dpipe_table_host_entries_dump(struct mlxsw_sp *mlxsw_sp, + bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx, + int type) +{ + struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT]; + struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT]; + struct devlink_dpipe_value action_value; + struct devlink_dpipe_action action = {0}; + struct devlink_dpipe_entry entry = {0}; + int err; + + memset(matches, 0, MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT * + sizeof(matches[0])); + memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT * + sizeof(match_values[0])); + memset(&action_value, 0, sizeof(action_value)); + + mlxsw_sp_dpipe_table_host_match_action_prepare(matches, &action, type); + err = mlxsw_sp_dpipe_table_host_entry_prepare(&entry, match_values, + matches, &action_value, + &action, type); + if (err) + goto out; + + err = mlxsw_sp_dpipe_table_host_entries_get(mlxsw_sp, &entry, + counters_enabled, dump_ctx, + type); +out: + devlink_dpipe_entry_clear(&entry); + return err; +} + +static int +mlxsw_sp_dpipe_table_host4_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return mlxsw_sp_dpipe_table_host_entries_dump(mlxsw_sp, + counters_enabled, + dump_ctx, AF_INET); +} + +static void +mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp, + bool enable, int type) +{ + int i; + + rtnl_lock(); + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); + struct mlxsw_sp_neigh_entry *neigh_entry; + + if (!rif) + continue; + mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) { + int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry); + + if (neigh_type != type) + continue; + + if (neigh_type == AF_INET6 && + mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) + continue; + + mlxsw_sp_neigh_entry_counter_update(mlxsw_sp, + neigh_entry, + enable); + } + } + rtnl_unlock(); +} + +static int mlxsw_sp_dpipe_table_host4_counters_update(void *priv, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_dpipe_table_host_counters_update(mlxsw_sp, enable, AF_INET); + return 0; +} + +static u64 +mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type) +{ + u64 size = 0; + int i; + + rtnl_lock(); + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); + struct mlxsw_sp_neigh_entry *neigh_entry; + + if (!rif) + continue; + mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) { + int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry); + + if (neigh_type != type) + continue; + + if (neigh_type == AF_INET6 && + mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) + continue; + + size++; + } + } + rtnl_unlock(); + + return size; +} + +static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET); +} + +static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { + .matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump, + .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, + .entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump, + .counters_set_update = mlxsw_sp_dpipe_table_host4_counters_update, + .size_get = mlxsw_sp_dpipe_table_host4_size_get, +}; + +static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + return devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST4, + &mlxsw_sp_host4_ops, + mlxsw_sp, false); +} + +static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST4); +} + +static int +mlxsw_sp_dpipe_table_host6_matches_dump(void *priv, struct sk_buff *skb) +{ + return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET6); +} + +static int +mlxsw_sp_dpipe_table_host6_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return mlxsw_sp_dpipe_table_host_entries_dump(mlxsw_sp, + counters_enabled, + dump_ctx, AF_INET6); +} + +static int mlxsw_sp_dpipe_table_host6_counters_update(void *priv, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_dpipe_table_host_counters_update(mlxsw_sp, enable, AF_INET6); + return 0; +} + +static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6); +} + +static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { + .matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump, + .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, + .entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump, + .counters_set_update = mlxsw_sp_dpipe_table_host6_counters_update, + .size_get = mlxsw_sp_dpipe_table_host6_size_get, +}; + +static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + return devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST6, + &mlxsw_sp_host6_ops, + mlxsw_sp, false); +} + +static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST6); +} + int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); @@ -339,10 +837,22 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) return err; err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp); if (err) - goto err_erif_register; + goto err_erif_table_init; + + err = mlxsw_sp_dpipe_host4_table_init(mlxsw_sp); + if (err) + goto err_host4_table_init; + + err = mlxsw_sp_dpipe_host6_table_init(mlxsw_sp); + if (err) + goto err_host6_table_init; return 0; -err_erif_register: +err_host6_table_init: + mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp); +err_host4_table_init: + mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp); +err_erif_table_init: devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core)); return err; } @@ -351,6 +861,8 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp); + mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp); mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp); devlink_dpipe_headers_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h index d2089298cba3..283fde4e6783 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -35,9 +35,26 @@ #ifndef _MLXSW_PIPELINE_H_ #define _MLXSW_PIPELINE_H_ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp); +#else + +static inline int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) +{ + return 0; +} + +static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) +{ +} + +#endif + #define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif" +#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4" +#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6" #endif /* _MLXSW_PIPELINE_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 6afbe9ec64e2..bbd238e50f05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -109,7 +109,6 @@ static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, @@ -117,6 +116,7 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 21bb2bf62d3e..8aace9a06a5d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -45,7 +45,7 @@ #include "core_acl_flex_keys.h" static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, + struct net_device *dev, bool ingress, struct mlxsw_sp_acl_rule_info *rulei, struct tcf_exts *exts) { @@ -53,7 +53,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return 0; /* Count action is inserted first */ @@ -71,6 +71,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_rulei_act_trap(rulei); if (err) return err; + } else if (is_tcf_gact_goto_chain(a)) { + u32 chain_index = tcf_gact_goto_chain_index(a); + struct mlxsw_sp_acl_ruleset *ruleset; + u16 group_id; + + ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev, + ingress, + chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); + mlxsw_sp_acl_rulei_act_jump(rulei, group_id); } else if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; @@ -212,11 +226,46 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u16 n_proto) +{ + struct flow_dissector_key_ip *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) + return 0; + + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { + dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, + key->ttl, mask->ttl); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, + key->tos & 0x3, mask->tos & 0x3); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, + key->tos >> 6, mask->tos >> 6); + + return 0; +} + static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, + struct net_device *dev, bool ingress, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { + u16 n_proto_mask = 0; + u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; @@ -229,12 +278,13 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); return -EOPNOTSUPP; } - mlxsw_sp_acl_rulei_priority(rulei, f->prio); + mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = @@ -253,8 +303,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->mask); - u16 n_proto_key = ntohs(key->n_proto); - u16 n_proto_mask = ntohs(mask->n_proto); + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -324,11 +374,16 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); + err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); + if (err) + return err; + + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress, + rulei, f->exts); } int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; @@ -338,6 +393,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int err; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); @@ -349,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, } rulei = mlxsw_sp_acl_rule_rulei(rule); - err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f); if (err) goto err_flower_parse; @@ -381,7 +437,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct mlxsw_sp_acl_rule *rule; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, + ingress, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return; @@ -407,7 +463,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int err; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, + ingress, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (WARN_ON(IS_ERR(ruleset))) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c new file mode 100644 index 000000000000..702fe945227c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -0,0 +1,214 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Petr Machata + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "spectrum_ipip.h" + +static bool +mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return !!(tun->parms.i_flags & TUNNEL_KEY); +} + +static bool +mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return !!(tun->parms.o_flags & TUNNEL_KEY); +} + +static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return mlxsw_sp_ipip_netdev_has_ikey(ol_dev) ? + be32_to_cpu(tun->parms.i_key) : 0; +} + +static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return mlxsw_sp_ipip_netdev_has_okey(ol_dev) ? + be32_to_cpu(tun->parms.o_key) : 0; +} + +static int +mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + __be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev); + char ratr_pl[MLXSW_REG_RATR_LEN]; + + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, + true, MLXSW_REG_RATR_TYPE_IPIP, + adj_index, rif_index); + mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, + u32 tunnel_index, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev); + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev); + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + unsigned int type_check; + u32 daddr4; + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); + + type_check = has_ikey ? + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE; + + /* Linux demuxes tunnels based on packet SIP (which must match tunnel + * remote IP). Thus configure decap so that it filters out packets that + * are not IPv4 or have the wrong SIP. IPIP_DECAP_ERROR trap is + * generated for packets that fail this criterion. Linux then handles + * such packets in slow path and generates ICMP destination unreachable. + */ + daddr4 = be32_to_cpu(mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev)); + mlxsw_reg_rtdp_ipip4_pack(rtdp_pl, rif_index, + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV4, + type_check, has_ikey, daddr4, ikey); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); +} + +static int +mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp, + u32 dip, u8 prefix_len, u16 ul_vr_id, + enum mlxsw_reg_ralue_op op, + u32 tunnel_index) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + + mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op, + ul_vr_id, prefix_len, dip); + mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + enum mlxsw_reg_ralue_op op, + u32 tunnel_index) +{ + u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb); + __be32 dip; + int err; + + err = mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(mlxsw_sp, tunnel_index, + ipip_entry); + if (err) + return err; + + dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, + ipip_entry->ol_dev).addr4; + return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip), + 32, ul_vr_id, op, + tunnel_index); +} + +static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev); + union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev); + union mlxsw_sp_l3addr naddr = {0}; + + /* Tunnels with unset local or remote address are valid in Linux and + * used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access + * (NBMA) tunnels. In principle these can be offloaded, but the driver + * currently doesn't support this. So punt. + */ + return memcmp(&saddr, &naddr, sizeof(naddr)) && + memcmp(&daddr, &naddr, sizeof(naddr)); +} + +static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev, + enum mlxsw_sp_l3proto ol_proto) +{ + struct ip_tunnel *tunnel = netdev_priv(ol_dev); + __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + bool inherit_ttl = tunnel->parms.iph.ttl == 0; + bool inherit_tos = tunnel->parms.iph.tos & 0x1; + + return (tunnel->parms.i_flags & ~okflags) == 0 && + (tunnel->parms.o_flags & ~okflags) == 0 && + inherit_ttl && inherit_tos && + mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev); +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; + + lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ? + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; + return (struct mlxsw_sp_rif_ipip_lb_config){ + .lb_ipipt = lb_ipipt, + .okey = mlxsw_sp_ipip_netdev_okey(ol_dev), + .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, + .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, + ol_dev), + }; +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { + .dev_type = ARPHRD_IPGRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV4, + .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4, + .fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4, + .can_offload = mlxsw_sp_ipip_can_offload_gre4, + .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = { + [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h new file mode 100644 index 000000000000..1c2db831d83b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -0,0 +1,79 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Petr Machata + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_IPIP_H_ +#define _MLXSW_IPIP_H_ + +#include "spectrum_router.h" +#include + +enum mlxsw_sp_ipip_type { + MLXSW_SP_IPIP_TYPE_GRE4, + MLXSW_SP_IPIP_TYPE_MAX, +}; + +struct mlxsw_sp_ipip_entry { + enum mlxsw_sp_ipip_type ipipt; + struct net_device *ol_dev; /* Overlay. */ + struct mlxsw_sp_rif_ipip_lb *ol_lb; + unsigned int ref_count; /* Number of next hops using the tunnel. */ + struct mlxsw_sp_fib_entry *decap_fib_entry; + struct list_head ipip_list_node; +}; + +struct mlxsw_sp_ipip_ops { + int dev_type; + enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ + + int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry); + + bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev, + enum mlxsw_sp_l3proto ol_proto); + + /* Return a configuration for creating an overlay loopback RIF. */ + struct mlxsw_sp_rif_ipip_lb_config + (*ol_loopback_config)(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev); + + int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + enum mlxsw_reg_ralue_op op, + u32 tunnel_index); +}; + +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[]; + +#endif /* _MLXSW_IPIP_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4b2e0fd7d51e..c16718d296d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1,9 +1,10 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko * Copyright (c) 2016 Ido Schimmel * Copyright (c) 2016 Yotam Gigi + * Copyright (c) 2017 Petr Machata * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -43,18 +44,27 @@ #include #include #include +#include +#include #include #include #include #include +#include #include +#include #include +#include +#include +#include +#include #include "spectrum.h" #include "core.h" #include "reg.h" #include "spectrum_cnt.h" #include "spectrum_dpipe.h" +#include "spectrum_ipip.h" #include "spectrum_router.h" struct mlxsw_sp_vr; @@ -79,9 +89,11 @@ struct mlxsw_sp_router { struct delayed_work nexthop_probe_dw; #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ struct list_head nexthop_neighs_list; + struct list_head ipip_list; bool aborted; struct notifier_block fib_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; + const struct mlxsw_sp_ipip_ops **ipip_ops_arr; }; struct mlxsw_sp_rif { @@ -122,6 +134,17 @@ struct mlxsw_sp_rif_subport { bool lag; }; +struct mlxsw_sp_rif_ipip_lb { + struct mlxsw_sp_rif common; + struct mlxsw_sp_rif_ipip_lb_config lb_config; + u16 ul_vr_id; /* Reserved for Spectrum-2. */ +}; + +struct mlxsw_sp_rif_params_ipip_lb { + struct mlxsw_sp_rif_params common; + struct mlxsw_sp_rif_ipip_lb_config lb_config; +}; + struct mlxsw_sp_rif_ops { enum mlxsw_sp_rif_type type; size_t rif_size; @@ -304,7 +327,7 @@ static struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) +#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); @@ -313,19 +336,6 @@ struct mlxsw_sp_prefix_usage { #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) -static bool -mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1, - struct mlxsw_sp_prefix_usage *prefix_usage2) -{ - unsigned char prefix; - - mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) { - if (!test_bit(prefix, prefix_usage2->b)) - return false; - } - return true; -} - static bool mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, struct mlxsw_sp_prefix_usage *prefix_usage2) @@ -371,6 +381,14 @@ enum mlxsw_sp_fib_entry_type { MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, MLXSW_SP_FIB_ENTRY_TYPE_TRAP, + + /* This is a special case of local delivery, where a packet should be + * decapsulated on reception. Note that there is no corresponding ENCAP, + * because that's a type of next hop, not of FIB entry. (There can be + * several next hops in a REMOTE entry, and some of them may be + * encapsulating entries.) + */ + MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP, }; struct mlxsw_sp_nexthop_group; @@ -384,11 +402,9 @@ struct mlxsw_sp_fib_node { struct mlxsw_sp_fib_key key; }; -struct mlxsw_sp_fib_entry_params { - u32 tb_id; - u32 prio; - u8 tos; - u8 type; +struct mlxsw_sp_fib_entry_decap { + struct mlxsw_sp_ipip_entry *ipip_entry; + u32 tunnel_index; }; struct mlxsw_sp_fib_entry { @@ -397,13 +413,26 @@ struct mlxsw_sp_fib_entry { enum mlxsw_sp_fib_entry_type type; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; - struct mlxsw_sp_fib_entry_params params; - bool offloaded; + struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */ }; -enum mlxsw_sp_l3proto { - MLXSW_SP_L3_PROTO_IPV4, - MLXSW_SP_L3_PROTO_IPV6, +struct mlxsw_sp_fib4_entry { + struct mlxsw_sp_fib_entry common; + u32 tb_id; + u32 prio; + u8 tos; + u8 type; +}; + +struct mlxsw_sp_fib6_entry { + struct mlxsw_sp_fib_entry common; + struct list_head rt6_list; + unsigned int nrt6; +}; + +struct mlxsw_sp_rt6 { + struct list_head list; + struct rt6_info *rt; }; struct mlxsw_sp_lpm_tree { @@ -428,6 +457,7 @@ struct mlxsw_sp_vr { u32 tb_id; /* kernel fib table id */ unsigned int rif_count; struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -487,15 +517,15 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); } -static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { char ralta_pl[MLXSW_REG_RALTA_LEN]; mlxsw_reg_ralta_pack(ralta_pl, false, (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, lpm_tree->id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); } static int @@ -551,10 +581,10 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(err); } -static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { - return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); } static struct mlxsw_sp_lpm_tree * @@ -571,24 +601,21 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, prefix_usage)) - goto inc_ref_count; + return lpm_tree; } - lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, - proto); - if (IS_ERR(lpm_tree)) - return lpm_tree; - -inc_ref_count: - lpm_tree->ref_count++; - return lpm_tree; + return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); } -static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) +{ + lpm_tree->ref_count++; +} + +static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { if (--lpm_tree->ref_count == 0) - return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); - return 0; + mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); } #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ @@ -625,7 +652,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4; + return !!vr->fib4 || !!vr->fib6; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -642,13 +669,13 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) } static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib *fib) + const struct mlxsw_sp_fib *fib, u8 tree_id) { char raltb_pl[MLXSW_REG_RALTB_LEN]; mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, (enum mlxsw_reg_ralxx_protocol) fib->proto, - fib->lpm_tree->id); + tree_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } @@ -694,7 +721,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, case MLXSW_SP_L3_PROTO_IPV4: return vr->fib4; case MLXSW_SP_L3_PROTO_IPV6: - BUG_ON(1); + return vr->fib6; } return NULL; } @@ -703,6 +730,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; + int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) @@ -710,56 +738,28 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(vr->fib4)) return ERR_CAST(vr->fib4); + vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(vr->fib6)) { + err = PTR_ERR(vr->fib6); + goto err_fib6_create; + } vr->tb_id = tb_id; return vr; + +err_fib6_create: + mlxsw_sp_fib_destroy(vr->fib4); + vr->fib4 = NULL; + return ERR_PTR(err); } static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { + mlxsw_sp_fib_destroy(vr->fib6); + vr->fib6 = NULL; mlxsw_sp_fib_destroy(vr->fib4); vr->fib4 = NULL; } -static int -mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, - struct mlxsw_sp_prefix_usage *req_prefix_usage) -{ - struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree; - struct mlxsw_sp_lpm_tree *new_tree; - int err; - - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) - return 0; - - new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, - fib->proto); - if (IS_ERR(new_tree)) { - /* We failed to get a tree according to the required - * prefix usage. However, the current tree might be still good - * for us if our requirement is subset of the prefixes used - * in the tree. - */ - if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, - &lpm_tree->prefix_usage)) - return 0; - return PTR_ERR(new_tree); - } - - /* Prevent packet loss by overwriting existing binding */ - fib->lpm_tree = new_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); - if (err) - goto err_tree_bind; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); - - return 0; - -err_tree_bind: - fib->lpm_tree = lpm_tree; - mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); - return err; -} - static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; @@ -773,10 +773,105 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { - if (!vr->rif_count && list_empty(&vr->fib4->node_list)) + if (!vr->rif_count && list_empty(&vr->fib4->node_list) && + list_empty(&vr->fib6->node_list)) mlxsw_sp_vr_destroy(vr); } +static bool +mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto, u8 tree_id) +{ + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); + + if (!mlxsw_sp_vr_is_used(vr)) + return false; + if (fib->lpm_tree && fib->lpm_tree->id == tree_id) + return true; + return false; +} + +static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_lpm_tree *new_tree) +{ + struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; + int err; + + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); + if (err) + return err; + fib->lpm_tree = new_tree; + mlxsw_sp_lpm_tree_hold(new_tree); + mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); + return 0; +} + +static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_lpm_tree *new_tree) +{ + struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; + enum mlxsw_sp_l3proto proto = fib->proto; + u8 old_id, new_id = new_tree->id; + struct mlxsw_sp_vr *vr; + int i, err; + + if (!old_tree) + goto no_replace; + old_id = old_tree->id; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + vr = &mlxsw_sp->router->vrs[i]; + if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id)) + continue; + err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, + mlxsw_sp_vr_fib(vr, proto), + new_tree); + if (err) + goto err_tree_replace; + } + + return 0; + +err_tree_replace: + for (i--; i >= 0; i--) { + if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id)) + continue; + mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, + mlxsw_sp_vr_fib(vr, proto), + old_tree); + } + return err; + +no_replace: + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); + if (err) + return err; + fib->lpm_tree = new_tree; + mlxsw_sp_lpm_tree_hold(new_tree); + return 0; +} + +static void +mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_l3proto proto, + struct mlxsw_sp_prefix_usage *req_prefix_usage) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); + unsigned char prefix; + + if (!mlxsw_sp_vr_is_used(vr)) + continue; + mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage) + mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix); + } +} + static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_vr *vr; @@ -816,6 +911,374 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->vrs); } +static struct net_device * +__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + struct net *net = dev_net(ol_dev); + + return __dev_get_by_index(net, tun->parms.link); +} + +static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) +{ + struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + + if (d) + return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; + else + return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif_params *params); + +static struct mlxsw_sp_rif_ipip_lb * +mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt, + struct net_device *ol_dev) +{ + struct mlxsw_sp_rif_params_ipip_lb lb_params; + const struct mlxsw_sp_ipip_ops *ipip_ops; + struct mlxsw_sp_rif *rif; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; + lb_params = (struct mlxsw_sp_rif_params_ipip_lb) { + .common.dev = ol_dev, + .common.lag = false, + .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev), + }; + + rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common); + if (IS_ERR(rif)) + return ERR_CAST(rif); + return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); +} + +static struct mlxsw_sp_ipip_entry * +mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt, + struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + struct mlxsw_sp_ipip_entry *ret = NULL; + + ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); + if (!ipip_entry) + return ERR_PTR(-ENOMEM); + + ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt, + ol_dev); + if (IS_ERR(ipip_entry->ol_lb)) { + ret = ERR_CAST(ipip_entry->ol_lb); + goto err_ol_ipip_lb_create; + } + + ipip_entry->ipipt = ipipt; + ipip_entry->ol_dev = ol_dev; + + return ipip_entry; + +err_ol_ipip_lb_create: + kfree(ipip_entry); + return ret; +} + +static void +mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON(ipip_entry->ref_count > 0); + mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); + kfree(ipip_entry); +} + +static __be32 +mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return tun->parms.iph.saddr; +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return (union mlxsw_sp_l3addr) { + .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev), + }; + case MLXSW_SP_L3_PROTO_IPV6: + break; + }; + + WARN_ON(1); + return (union mlxsw_sp_l3addr) { + .addr4 = 0, + }; +} + +__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return tun->parms.iph.daddr; +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return (union mlxsw_sp_l3addr) { + .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev), + }; + case MLXSW_SP_L3_PROTO_IPV6: + break; + }; + + WARN_ON(1); + return (union mlxsw_sp_l3addr) { + .addr4 = 0, + }; +} + +static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, + const union mlxsw_sp_l3addr *addr2) +{ + return !memcmp(addr1, addr2, sizeof(*addr1)); +} + +static bool +mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, + const enum mlxsw_sp_l3proto ul_proto, + union mlxsw_sp_l3addr saddr, + u32 ul_tb_id, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); + enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; + union mlxsw_sp_l3addr tun_saddr; + + if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) + return false; + + tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev); + return tun_ul_tb_id == ul_tb_id && + mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); +} + +static int +mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + u32 tunnel_index; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index); + if (err) + return err; + + ipip_entry->decap_fib_entry = fib_entry; + fib_entry->decap.ipip_entry = ipip_entry; + fib_entry->decap.tunnel_index = tunnel_index; + return 0; +} + +static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + /* Unlink this node from the IPIP entry that it's the decap entry of. */ + fib_entry->decap.ipip_entry->decap_fib_entry = NULL; + fib_entry->decap.ipip_entry = NULL; + mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index); +} + +static struct mlxsw_sp_fib_node * +mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len); +static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry); + +static void +mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry; + + mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry); + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + + mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); +} + +static void +mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct mlxsw_sp_fib_entry *decap_fib_entry) +{ + if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry, + ipip_entry)) + return; + decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; + + if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry)) + mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); +} + +/* Given an IPIP entry, find the corresponding decap route. */ +static struct mlxsw_sp_fib_entry * +mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + static struct mlxsw_sp_fib_node *fib_node; + const struct mlxsw_sp_ipip_ops *ipip_ops; + struct mlxsw_sp_fib_entry *fib_entry; + unsigned char saddr_prefix_len; + union mlxsw_sp_l3addr saddr; + struct mlxsw_sp_fib *ul_fib; + struct mlxsw_sp_vr *ul_vr; + const void *saddrp; + size_t saddr_len; + u32 ul_tb_id; + u32 saddr4; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + + ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); + ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id); + if (!ul_vr) + return NULL; + + ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto); + saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto, + ipip_entry->ol_dev); + + switch (ipip_ops->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + saddr4 = be32_to_cpu(saddr.addr4); + saddrp = &saddr4; + saddr_len = 4; + saddr_prefix_len = 32; + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON(1); + return NULL; + } + + fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len, + saddr_prefix_len); + if (!fib_node || list_empty(&fib_node->entry_list)) + return NULL; + + fib_entry = list_first_entry(&fib_node->entry_list, + struct mlxsw_sp_fib_entry, list); + if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) + return NULL; + + return fib_entry; +} + +static struct mlxsw_sp_ipip_entry * +mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt, + struct net_device *ol_dev) +{ + u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); + struct mlxsw_sp_router *router = mlxsw_sp->router; + struct mlxsw_sp_fib_entry *decap_fib_entry; + struct mlxsw_sp_ipip_entry *ipip_entry; + enum mlxsw_sp_l3proto ul_proto; + union mlxsw_sp_l3addr saddr; + + list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, + ipip_list_node) { + if (ipip_entry->ol_dev == ol_dev) + goto inc_ref_count; + + /* The configuration where several tunnels have the same local + * address in the same underlay table needs special treatment in + * the HW. That is currently not implemented in the driver. + */ + ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; + saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); + if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, + ul_tb_id, ipip_entry)) + return ERR_PTR(-EEXIST); + } + + ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev); + if (IS_ERR(ipip_entry)) + return ipip_entry; + + decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry); + if (decap_fib_entry) + mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, + decap_fib_entry); + + list_add_tail(&ipip_entry->ipip_list_node, + &mlxsw_sp->router->ipip_list); + +inc_ref_count: + ++ipip_entry->ref_count; + return ipip_entry; +} + +static void +mlxsw_sp_ipip_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + if (--ipip_entry->ref_count == 0) { + list_del(&ipip_entry->ipip_list_node); + if (ipip_entry->decap_fib_entry) + mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); + mlxsw_sp_ipip_entry_destroy(ipip_entry); + } +} + +static bool +mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ul_dev, + enum mlxsw_sp_l3proto ul_proto, + union mlxsw_sp_l3addr ul_dip, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; + enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; + struct net_device *ipip_ul_dev; + + if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) + return false; + + ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); + return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, + ul_tb_id, ipip_entry) && + (!ipip_ul_dev || ipip_ul_dev == ul_dev); +} + +/* Given decap parameters, find the corresponding IPIP entry. */ +static struct mlxsw_sp_ipip_entry * +mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ul_dev, + enum mlxsw_sp_l3proto ul_proto, + union mlxsw_sp_l3addr ul_dip) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + + list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, + ipip_list_node) + if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev, + ul_proto, ul_dip, + ipip_entry)) + return ipip_entry; + + return NULL; +} + struct mlxsw_sp_neigh_key { struct neighbour *n; }; @@ -831,6 +1294,8 @@ struct mlxsw_sp_neigh_entry { * this neigh entry */ struct list_head nexthop_neighs_list_node; + unsigned int counter_index; + bool counter_valid; }; static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { @@ -839,6 +1304,62 @@ static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { .key_len = sizeof(struct mlxsw_sp_neigh_key), }; +struct mlxsw_sp_neigh_entry * +mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + if (!neigh_entry) { + if (list_empty(&rif->neigh_list)) + return NULL; + else + return list_first_entry(&rif->neigh_list, + typeof(*neigh_entry), + rif_list_node); + } + if (neigh_entry->rif_list_node.next == &rif->neigh_list) + return NULL; + return list_next_entry(neigh_entry, rif_list_node); +} + +int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + return neigh_entry->key.n->tbl->family; +} + +unsigned char * +mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + return neigh_entry->ha; +} + +u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct neighbour *n; + + n = neigh_entry->key.n; + return ntohl(*((__be32 *) n->primary_key)); +} + +struct in6_addr * +mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct neighbour *n; + + n = neigh_entry->key.n; + return (struct in6_addr *) &n->primary_key; +} + +int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + u64 *p_counter) +{ + if (!neigh_entry->counter_valid) + return -EINVAL; + + return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index, + p_counter, NULL); +} + static struct mlxsw_sp_neigh_entry * mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, u16 rif) @@ -879,6 +1400,53 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_ht_params); } +static bool +mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct devlink *devlink; + const char *table_name; + + switch (mlxsw_sp_neigh_entry_type(neigh_entry)) { + case AF_INET: + table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4; + break; + case AF_INET6: + table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6; + break; + default: + WARN_ON(1); + return false; + } + + devlink = priv_to_devlink(mlxsw_sp->core); + return devlink_dpipe_table_counter_enabled(devlink, table_name); +} + +static void +mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry)) + return; + + if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index)) + return; + + neigh_entry->counter_valid = true; +} + +static void +mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + if (!neigh_entry->counter_valid) + return; + mlxsw_sp_flow_counter_free(mlxsw_sp, + neigh_entry->counter_index); + neigh_entry->counter_valid = false; +} + static struct mlxsw_sp_neigh_entry * mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { @@ -898,6 +1466,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) if (err) goto err_neigh_entry_insert; + mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); list_add(&neigh_entry->rif_list_node, &rif->neigh_list); return neigh_entry; @@ -912,6 +1481,7 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry) { list_del(&neigh_entry->rif_list_node); + mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); mlxsw_sp_neigh_entry_free(neigh_entry); } @@ -929,8 +1499,15 @@ mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) static void mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) { - unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + unsigned long interval; +#if IS_ENABLED(CONFIG_IPV6) + interval = min_t(unsigned long, + NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), + NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); +#else + interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); +#endif mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); } @@ -965,6 +1542,44 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } +#if IS_ENABLED(CONFIG_IPV6) +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + struct net_device *dev; + struct neighbour *n; + struct in6_addr dip; + u16 rif; + + mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif, + (char *) &dip); + + if (!mlxsw_sp->router->rifs[rif]) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); + return; + } + + dev = mlxsw_sp->router->rifs[rif]->dev; + n = neigh_lookup(&nd_tbl, &dip, dev); + if (!n) { + netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", + &dip); + return; + } + + netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); + neigh_event_send(n, NULL); + neigh_release(n); +} +#else +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ +} +#endif + static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) @@ -988,6 +1603,15 @@ static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, } +static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + /* One record contains one entry. */ + mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); +} + static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) { @@ -997,7 +1621,8 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, rec_index); break; case MLXSW_REG_RAUHTD_TYPE_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); break; } } @@ -1022,22 +1647,20 @@ static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) return false; } -static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +static int +__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + enum mlxsw_reg_rauhtd_type type) { - char *rauhtd_pl; - u8 num_rec; - int i, err; - - rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); - if (!rauhtd_pl) - return -ENOMEM; + int i, num_rec; + int err; /* Make sure the neighbour's netdev isn't removed in the * process. */ rtnl_lock(); do { - mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4); + mlxsw_reg_rauhtd_pack(rauhtd_pl, type); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), rauhtd_pl); if (err) { @@ -1051,6 +1674,27 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); rtnl_unlock(); + return err; +} + +static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_rauhtd_type type; + char *rauhtd_pl; + int err; + + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); + if (!rauhtd_pl) + return -ENOMEM; + + type = MLXSW_REG_RAUHTD_TYPE_IPV4; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); + if (err) + goto out; + + type = MLXSW_REG_RAUHTD_TYPE_IPV6; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); +out: kfree(rauhtd_pl); return err; } @@ -1143,9 +1787,43 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, dip); + if (neigh_entry->counter_valid) + mlxsw_reg_rauht_pack_counter(rauht_pl, + neigh_entry->counter_index); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } +static void +mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + enum mlxsw_reg_rauht_op op) +{ + struct neighbour *n = neigh_entry->key.n; + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + const char *dip = n->primary_key; + + mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, + dip); + if (neigh_entry->counter_valid) + mlxsw_reg_rauht_pack_counter(rauht_pl, + neigh_entry->counter_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct neighbour *n = neigh_entry->key.n; + + /* Packets with a link-local destination address are trapped + * after LPM lookup and never reach the neighbour table, so + * there is no need to program such neighbours to the device. + */ + if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & + IPV6_ADDR_LINKLOCAL) + return true; + return false; +} + static void mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, @@ -1154,11 +1832,29 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, if (!adding && !neigh_entry->connected) return; neigh_entry->connected = adding; - if (neigh_entry->key.n->tbl == &arp_tbl) + if (neigh_entry->key.n->tbl->family == AF_INET) { mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, mlxsw_sp_rauht_op(adding)); - else + } else if (neigh_entry->key.n->tbl->family == AF_INET6) { + if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) + return; + mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, + mlxsw_sp_rauht_op(adding)); + } else { WARN_ON_ONCE(1); + } +} + +void +mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool adding) +{ + if (adding) + mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); + else + mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true); } struct mlxsw_sp_neigh_event_work { @@ -1227,7 +1923,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, p = ptr; /* We don't care about changes in the default table. */ - if (!p->dev || p->tbl != &arp_tbl) + if (!p->dev || (p->tbl->family != AF_INET && + p->tbl->family != AF_INET6)) return NOTIFY_DONE; /* We are in atomic context and can't take RTNL mutex, @@ -1246,7 +1943,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, case NETEVENT_NEIGH_UPDATE: n = ptr; - if (n->tbl != &arp_tbl) + if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) return NOTIFY_DONE; mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); @@ -1307,27 +2004,23 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router->neigh_ht); } -static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif *rif) -{ - char rauht_pl[MLXSW_REG_RAUHT_LEN]; - - mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, - rif->rif_index, rif->addr); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); -} - static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; - mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, - rif_list_node) + rif_list_node) { + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + } } +enum mlxsw_sp_nexthop_type { + MLXSW_SP_NEXTHOP_TYPE_ETH, + MLXSW_SP_NEXTHOP_TYPE_IPIP, +}; + struct mlxsw_sp_nexthop_key { struct fib_nh *fib_nh; }; @@ -1340,6 +2033,8 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; + unsigned char gw_addr[sizeof(struct in6_addr)]; + int ifindex; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -1350,17 +2045,18 @@ struct mlxsw_sp_nexthop { update:1; /* set indicates that MAC of this neigh should be * updated in HW */ - struct mlxsw_sp_neigh_entry *neigh_entry; -}; - -struct mlxsw_sp_nexthop_group_key { - struct fib_info *fi; + enum mlxsw_sp_nexthop_type type; + union { + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_ipip_entry *ipip_entry; + }; }; struct mlxsw_sp_nexthop_group { + void *priv; struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ - struct mlxsw_sp_nexthop_group_key key; + struct neigh_table *neigh_tbl; u8 adj_index_valid:1, gateway:1; /* routes using the group use a gateway */ u32 adj_index; @@ -1370,15 +2066,154 @@ struct mlxsw_sp_nexthop_group { #define nh_rif nexthops[0].rif }; +static struct fib_info * +mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) +{ + return nh_grp->priv; +} + +struct mlxsw_sp_nexthop_group_cmp_arg { + enum mlxsw_sp_l3proto proto; + union { + struct fib_info *fi; + struct mlxsw_sp_fib6_entry *fib6_entry; + }; +}; + +static bool +mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, + const struct in6_addr *gw, int ifindex) +{ + int i; + + for (i = 0; i < nh_grp->count; i++) { + const struct mlxsw_sp_nexthop *nh; + + nh = &nh_grp->nexthops[i]; + if (nh->ifindex == ifindex && + ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) + return true; + } + + return false; +} + +static bool +mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + if (nh_grp->count != fib6_entry->nrt6) + return false; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct in6_addr *gw; + int ifindex; + + ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex; + gw = &mlxsw_sp_rt6->rt->rt6i_gateway; + if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex)) + return false; + } + + return true; +} + +static int +mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; + const struct mlxsw_sp_nexthop_group *nh_grp = ptr; + + switch (cmp_arg->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); + case MLXSW_SP_L3_PROTO_IPV6: + return !mlxsw_sp_nexthop6_group_cmp(nh_grp, + cmp_arg->fib6_entry); + default: + WARN_ON(1); + return 1; + } +} + +static int +mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp) +{ + return nh_grp->neigh_tbl->family; +} + +static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) +{ + const struct mlxsw_sp_nexthop_group *nh_grp = data; + const struct mlxsw_sp_nexthop *nh; + struct fib_info *fi; + unsigned int val; + int i; + + switch (mlxsw_sp_nexthop_group_type(nh_grp)) { + case AF_INET: + fi = mlxsw_sp_nexthop4_group_fi(nh_grp); + return jhash(&fi, sizeof(fi), seed); + case AF_INET6: + val = nh_grp->count; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + val ^= nh->ifindex; + } + return jhash(&val, sizeof(val), seed); + default: + WARN_ON(1); + return 0; + } +} + +static u32 +mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) +{ + unsigned int val = fib6_entry->nrt6; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + struct net_device *dev; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + dev = mlxsw_sp_rt6->rt->dst.dev; + val ^= dev->ifindex; + } + + return jhash(&val, sizeof(val), seed); +} + +static u32 +mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) +{ + const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; + + switch (cmp_arg->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed); + default: + WARN_ON(1); + return 0; + } +} + static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { - .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key), .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), - .key_len = sizeof(struct mlxsw_sp_nexthop_group_key), + .hashfn = mlxsw_sp_nexthop_group_hash, + .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj, + .obj_cmpfn = mlxsw_sp_nexthop_group_cmp, }; static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && + !nh_grp->gateway) + return 0; + return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht, &nh_grp->ht_node, mlxsw_sp_nexthop_group_ht_params); @@ -1387,16 +2222,38 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && + !nh_grp->gateway) + return; + rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht, &nh_grp->ht_node, mlxsw_sp_nexthop_group_ht_params); } static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group_key key) +mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct fib_info *fi) { - return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key, + struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + + cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4; + cmp_arg.fi = fi; + return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, + &cmp_arg, + mlxsw_sp_nexthop_group_ht_params); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + + cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6; + cmp_arg.fib6_entry = fib6_entry; + return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, + &cmp_arg, mlxsw_sp_nexthop_group_ht_params); } @@ -1473,15 +2330,26 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, char ratr_pl[MLXSW_REG_RATR_LEN]; mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, - true, adj_index, neigh_entry->rif); + true, MLXSW_REG_RATR_TYPE_ETHERNET, + adj_index, neigh_entry->rif); mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); } +static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, + struct mlxsw_sp_nexthop *nh) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; + return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); +} + static int -mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - bool reallocate) +mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + bool reallocate) { u32 adj_index = nh_grp->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; @@ -1497,8 +2365,16 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, } if (nh->update || reallocate) { - err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, - adj_index, nh); + switch (nh->type) { + case MLXSW_SP_NEXTHOP_TYPE_ETH: + err = mlxsw_sp_nexthop_mac_update + (mlxsw_sp, adj_index, nh); + break; + case MLXSW_SP_NEXTHOP_TYPE_IPIP: + err = mlxsw_sp_nexthop_ipip_update + (mlxsw_sp, adj_index, nh); + break; + } if (err) return err; nh->update = 0; @@ -1509,9 +2385,6 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry); - static bool mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, const struct mlxsw_sp_fib_entry *fib_entry); @@ -1534,6 +2407,24 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, return 0; } +static void +mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op, int err); + +static void +mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) +{ + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE; + struct mlxsw_sp_fib_entry *fib_entry; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, + fib_entry)) + continue; + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); + } +} + static void mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) @@ -1556,7 +2447,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - if (nh->should_offload ^ nh->offloaded) { + if (nh->should_offload != nh->offloaded) { offload_change = true; if (nh->should_offload) nh->update = 1; @@ -1568,8 +2459,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* Nothing was added or removed, so no need to reallocate. Just * update MAC on existing adjacency indexes. */ - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, - false); + err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -1596,7 +2486,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, nh_grp->adj_index_valid = 1; nh_grp->adj_index = adj_index; nh_grp->ecmp_size = ecmp_size; - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true); + err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -1621,6 +2511,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; } + + /* Offload state within the group changed, so update the flags. */ + mlxsw_sp_nexthop_fib_entries_refresh(nh_grp); + return; set_trap: @@ -1640,9 +2534,9 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, bool removing) { - if (!removing && !nh->should_offload) + if (!removing) nh->should_offload = 1; - else if (removing && nh->offloaded) + else if (nh->offloaded) nh->should_offload = 0; nh->update = 1; } @@ -1684,7 +2578,6 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct fib_nh *fib_nh = nh->key.fib_nh; struct neighbour *n; u8 nud_state, dead; int err; @@ -1693,13 +2586,14 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, return 0; /* Take a reference of neigh here ensuring that neigh would - * not be detructed before the nexthop entry is finished. + * not be destructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ - n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (!n) { - n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, + nh->rif->dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); @@ -1761,18 +2655,136 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } -static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev, + enum mlxsw_sp_ipip_type *p_type) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + const struct mlxsw_sp_ipip_ops *ipip_ops; + enum mlxsw_sp_ipip_type ipipt; + + for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) { + ipip_ops = router->ipip_ops_arr[ipipt]; + if (dev->type == ipip_ops->dev_type) { + if (p_type) + *p_type = ipipt; + return true; + } + } + return false; +} + +static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt, + struct mlxsw_sp_nexthop *nh, + struct net_device *ol_dev) +{ + if (!nh->nh_grp->gateway || nh->ipip_entry) + return 0; + + nh->ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev); + if (IS_ERR(nh->ipip_entry)) + return PTR_ERR(nh->ipip_entry); + + __mlxsw_sp_nexthop_neigh_update(nh, false); + return 0; +} + +static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry; + + if (!ipip_entry) + return; + + __mlxsw_sp_nexthop_neigh_update(nh, true); + mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry); + nh->ipip_entry = NULL; +} + +static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp, + const struct fib_nh *fib_nh, + enum mlxsw_sp_ipip_type *p_ipipt) +{ + struct net_device *dev = fib_nh->nh_dev; + + return dev && + fib_nh->nh_parent->fib_type == RTN_UNICAST && + mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt); +} + +static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + switch (nh->type) { + case MLXSW_SP_NEXTHOP_TYPE_ETH: + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + break; + case MLXSW_SP_NEXTHOP_TYPE_IPIP: + mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); + break; + } +} + +static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + struct net_device *dev = fib_nh->nh_dev; + enum mlxsw_sp_ipip_type ipipt; + struct mlxsw_sp_rif *rif; + int err; + + if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) && + router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, + MLXSW_SP_L3_PROTO_IPV4)) { + nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; + err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); + if (err) + return err; + mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); + return 0; + } + + nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + + mlxsw_sp_nexthop_rif_init(nh, rif); + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_neigh_init; + + return 0; + +err_neigh_init: + mlxsw_sp_nexthop_rif_fini(nh); + return err; +} + +static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); +} + +static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) { struct net_device *dev = fib_nh->nh_dev; struct in_device *in_dev; - struct mlxsw_sp_rif *rif; int err; nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; + memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw)); err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; @@ -1785,37 +2797,29 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, fib_nh->nh_flags & RTNH_F_LINKDOWN) return 0; - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!rif) - return 0; - mlxsw_sp_nexthop_rif_init(nh, rif); - - err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); if (err) goto err_nexthop_neigh_init; return 0; err_nexthop_neigh_init: - mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); return err; } -static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { - mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); - mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } -static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, - unsigned long event, struct fib_nh *fib_nh) +static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, struct fib_nh *fib_nh) { struct mlxsw_sp_nexthop_key key; struct mlxsw_sp_nexthop *nh; - struct mlxsw_sp_rif *rif; if (mlxsw_sp->router->aborted) return; @@ -1825,18 +2829,12 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, if (WARN_ON_ONCE(!nh)) return; - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); - if (!rif) - return; - switch (event) { case FIB_EVENT_NH_ADD: - mlxsw_sp_nexthop_rif_init(nh, rif); - mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); break; case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); - mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); break; } @@ -1849,14 +2847,20 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, *tmp; list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { - mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); - mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } } +static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, + const struct fib_info *fi) +{ + return fi->fib_nh->nh_scope == RT_SCOPE_LINK || + mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL); +} + static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; @@ -1870,17 +2874,19 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) nh_grp = kzalloc(alloc_size, GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); + nh_grp->priv = fi; INIT_LIST_HEAD(&nh_grp->fib_list); - nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; + nh_grp->neigh_tbl = &arp_tbl; + + nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi); nh_grp->count = fi->fib_nhs; - nh_grp->key.fi = fi; fib_info_hold(fi); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; - err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh); + err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); if (err) - goto err_nexthop_init; + goto err_nexthop4_init; } err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); if (err) @@ -1889,19 +2895,19 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) return nh_grp; err_nexthop_group_insert: -err_nexthop_init: +err_nexthop4_init: for (i--; i >= 0; i--) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } - fib_info_put(nh_grp->key.fi); + fib_info_put(fi); kfree(nh_grp); return ERR_PTR(err); } static void -mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) +mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) { struct mlxsw_sp_nexthop *nh; int i; @@ -1909,25 +2915,23 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); WARN_ON_ONCE(nh_grp->adj_index_valid); - fib_info_put(nh_grp->key.fi); + fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp)); kfree(nh_grp); } -static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - struct fib_info *fi) +static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct fib_info *fi) { - struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; - key.fi = fi; - nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); + nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi); if (!nh_grp) { - nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); + nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) return PTR_ERR(nh_grp); } @@ -1936,15 +2940,25 @@ static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; list_del(&fib_entry->nexthop_group_node); if (!list_empty(&nh_grp->fib_list)) return; - mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); + mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); +} + +static bool +mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib4_entry *fib4_entry; + + fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, + common); + return !fib4_entry->tos; } static bool @@ -1952,29 +2966,133 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; - if (fib_entry->params.tos) - return false; + switch (fib_entry->fib_node->fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + if (!mlxsw_sp_fib4_entry_should_offload(fib_entry)) + return false; + break; + case MLXSW_SP_L3_PROTO_IPV6: + break; + } switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: return !!nh_group->adj_index_valid; case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: return !!nh_group->nh_rif; + case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: + return true; default: return false; } } +static struct mlxsw_sp_nexthop * +mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + if (nh->rif && nh->rif->dev == rt->dst.dev && + ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, + &rt->rt6i_gateway)) + return nh; + continue; + } + + return NULL; +} + +static void +mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) { + nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + return; + } + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + if (nh->offloaded) + nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + else + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + return; + } + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + struct mlxsw_sp_nexthop *nh; + + nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); + if (nh && nh->offloaded) + mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + else + mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + } +} + static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { - fib_entry->offloaded = true; - switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_inc(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_set(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_set(fib_entry); + break; } } @@ -1983,13 +3101,12 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_dec(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_unset(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_unset(fib_entry); + break; } - - fib_entry->offloaded = false; } static void @@ -1998,17 +3115,13 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, { switch (op) { case MLXSW_REG_RALUE_OP_WRITE_DELETE: - if (!fib_entry->offloaded) - return; return mlxsw_sp_fib_entry_offload_unset(fib_entry); case MLXSW_REG_RALUE_OP_WRITE_WRITE: if (err) return; - if (mlxsw_sp_fib_entry_should_offload(fib_entry) && - !fib_entry->offloaded) + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_set(fib_entry); - else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) && - fib_entry->offloaded) + else if (!mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_unset(fib_entry); return; default: @@ -2016,13 +3129,37 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, } } -static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static void +mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, + const struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; + enum mlxsw_reg_ralxx_protocol proto; + u32 *p_dip; + + proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; + + switch (fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + p_dip = (u32 *) fib_entry->fib_node->key.addr; + mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + *p_dip); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + fib_entry->fib_node->key.addr); + break; + } +} + +static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -2041,24 +3178,19 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u16 trap_id = 0; u16 rif_index = 0; @@ -2070,42 +3202,53 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int +mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry; + const struct mlxsw_sp_ipip_ops *ipip_ops; + + if (WARN_ON(!ipip_entry)) + return -EINVAL; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op, + fib_entry->decap.tunnel_index); +} + +static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: - return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: - return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: + return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, + fib_entry, op); } return -EINVAL; } @@ -2114,16 +3257,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - int err = -EINVAL; + int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); - switch (fib_entry->fib_node->fib->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); - break; - case MLXSW_SP_L3_PROTO_IPV6: - return err; - } mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); + return err; } @@ -2146,11 +3283,23 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, struct mlxsw_sp_fib_entry *fib_entry) { + union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; + struct net_device *dev = fen_info->fi->fib_dev; + struct mlxsw_sp_ipip_entry *ipip_entry; struct fib_info *fi = fen_info->fi; switch (fen_info->type) { - case RTN_BROADCAST: /* fall through */ case RTN_LOCAL: + ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, + MLXSW_SP_L3_PROTO_IPV4, dip); + if (ipip_entry) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; + return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, + fib_entry, + ipip_entry); + } + /* fall through */ + case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; case RTN_UNREACHABLE: /* fall through */ @@ -2163,82 +3312,87 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; return 0; case RTN_UNICAST: - if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; - else + if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; return 0; default: return -EINVAL; } } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, const struct fib_entry_notifier_info *fen_info) { + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_entry *fib_entry; int err; - fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); - if (!fib_entry) { - err = -ENOMEM; - goto err_fib_entry_alloc; - } + fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL); + if (!fib4_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib4_entry->common; err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); if (err) goto err_fib4_entry_type_set; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi); + err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); if (err) - goto err_nexthop_group_get; + goto err_nexthop4_group_get; - fib_entry->params.prio = fen_info->fi->fib_priority; - fib_entry->params.tb_id = fen_info->tb_id; - fib_entry->params.type = fen_info->type; - fib_entry->params.tos = fen_info->tos; + fib4_entry->prio = fen_info->fi->fib_priority; + fib4_entry->tb_id = fen_info->tb_id; + fib4_entry->type = fen_info->type; + fib4_entry->tos = fen_info->tos; fib_entry->fib_node = fib_node; - return fib_entry; + return fib4_entry; -err_nexthop_group_get: +err_nexthop4_group_get: err_fib4_entry_type_set: - kfree(fib_entry); -err_fib_entry_alloc: + kfree(fib4_entry); return ERR_PTR(err); } static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); - kfree(fib_entry); + mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); + kfree(fib4_entry); } -static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info); - -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); - if (IS_ERR(fib_node)) + vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (!fib_node) return NULL; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id == fen_info->tb_id && - fib_entry->params.tos == fen_info->tos && - fib_entry->params.type == fen_info->type && - fib_entry->nh_group->key.fi == fen_info->fi) { - return fib_entry; + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id == fen_info->tb_id && + fib4_entry->tos == fen_info->tos && + fib4_entry->type == fen_info->type && + mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) == + fen_info->fi) { + return fib4_entry; } } @@ -2311,6 +3465,53 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, struct mlxsw_sp_fib_entry, list) == fib_entry; } +static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + /* Since the tree is shared between all virtual routers we must + * make sure it contains all the required prefix lengths. This + * can be computed by either adding the new prefix length to the + * existing prefix usage of a bound tree, or by aggregating the + * prefix lengths across all virtual routers and adding the new + * one as well. + */ + if (fib->lpm_tree) + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, + &fib->lpm_tree->prefix_usage); + else + mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); + + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + return PTR_ERR(lpm_tree); + + if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id) + return 0; + + err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); + if (err) + return err; + + return 0; +} + +static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib) +{ + if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) + return; + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); + mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); + fib->lpm_tree = NULL; +} + static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; @@ -2333,8 +3534,6 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, struct mlxsw_sp_fib *fib) { - struct mlxsw_sp_prefix_usage req_prefix_usage; - struct mlxsw_sp_lpm_tree *lpm_tree; int err; err = mlxsw_sp_fib_node_insert(fib, fib_node); @@ -2342,33 +3541,15 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, return err; fib_node->fib = fib; - mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); - - if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { - err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, - &req_prefix_usage); - if (err) - goto err_tree_check; - } else { - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, - fib->proto); - if (IS_ERR(lpm_tree)) - return PTR_ERR(lpm_tree); - fib->lpm_tree = lpm_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); - if (err) - goto err_tree_bind; - } + err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node); + if (err) + goto err_fib_lpm_tree_link; mlxsw_sp_fib_node_prefix_inc(fib_node); return 0; -err_tree_bind: - fib->lpm_tree = NULL; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); -err_tree_check: +err_fib_lpm_tree_link: fib_node->fib = NULL; mlxsw_sp_fib_node_remove(fib, fib_node); return err; @@ -2377,46 +3558,34 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; struct mlxsw_sp_fib *fib = fib_node->fib; mlxsw_sp_fib_node_prefix_dec(fib_node); - - if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); - fib->lpm_tree = NULL; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); - } else { - mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage); - } - + mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib); fib_node->fib = NULL; mlxsw_sp_fib_node_remove(fib, fib_node); } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, + size_t addr_len, unsigned char prefix_len, + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id); if (IS_ERR(vr)) return ERR_CAST(vr); - fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + fib = mlxsw_sp_vr_fib(vr, proto); - fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len); if (fib_node) return fib_node; - fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len); if (!fib_node) { err = -ENOMEM; goto err_fib_node_create; @@ -2435,8 +3604,8 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(err); } -static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_node *fib_node) +static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) { struct mlxsw_sp_vr *vr = fib_node->fib->vr; @@ -2447,95 +3616,100 @@ static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_vr_put(vr); } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct mlxsw_sp_fib_entry_params *params) + const struct mlxsw_sp_fib4_entry *new4_entry) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id > params->tb_id) + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id > new4_entry->tb_id) continue; - if (fib_entry->params.tb_id != params->tb_id) + if (fib4_entry->tb_id != new4_entry->tb_id) break; - if (fib_entry->params.tos > params->tos) + if (fib4_entry->tos > new4_entry->tos) continue; - if (fib_entry->params.prio >= params->prio || - fib_entry->params.tos < params->tos) - return fib_entry; + if (fib4_entry->prio >= new4_entry->prio || + fib4_entry->tos < new4_entry->tos) + return fib4_entry; } return NULL; } -static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, - struct mlxsw_sp_fib_entry *new_entry) +static int +mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, + struct mlxsw_sp_fib4_entry *new4_entry) { struct mlxsw_sp_fib_node *fib_node; - if (WARN_ON(!fib_entry)) + if (WARN_ON(!fib4_entry)) return -EINVAL; - fib_node = fib_entry->fib_node; - list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id != new_entry->params.tb_id || - fib_entry->params.tos != new_entry->params.tos || - fib_entry->params.prio != new_entry->params.prio) + fib_node = fib4_entry->common.fib_node; + list_for_each_entry_from(fib4_entry, &fib_node->entry_list, + common.list) { + if (fib4_entry->tb_id != new4_entry->tb_id || + fib4_entry->tos != new4_entry->tos || + fib4_entry->prio != new4_entry->prio) break; } - list_add_tail(&new_entry->list, &fib_entry->list); + list_add_tail(&new4_entry->common.list, &fib4_entry->common.list); return 0; } static int -mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *new_entry, +mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *fib4_entry; - fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); if (append) - return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); - if (replace && WARN_ON(!fib_entry)) + return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry); + if (replace && WARN_ON(!fib4_entry)) return -EINVAL; /* Insert new entry before replaced one, so that we can later * remove the second. */ - if (fib_entry) { - list_add_tail(&new_entry->list, &fib_entry->list); + if (fib4_entry) { + list_add_tail(&new4_entry->common.list, + &fib4_entry->common.list); } else { - struct mlxsw_sp_fib_entry *last; + struct mlxsw_sp_fib4_entry *last; - list_for_each_entry(last, &fib_node->entry_list, list) { - if (new_entry->params.tb_id > last->params.tb_id) + list_for_each_entry(last, &fib_node->entry_list, common.list) { + if (new4_entry->tb_id > last->tb_id) break; - fib_entry = last; + fib4_entry = last; } - if (fib_entry) - list_add(&new_entry->list, &fib_entry->list); + if (fib4_entry) + list_add(&new4_entry->common.list, + &fib4_entry->common.list); else - list_add(&new_entry->list, &fib_node->entry_list); + list_add(&new4_entry->common.list, + &fib_node->entry_list); } return 0; } static void -mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) { - list_del(&fib_entry->list); + list_del(&fib4_entry->common.list); } -static int -mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return 0; @@ -2552,11 +3726,11 @@ mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); } -static void -mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return; @@ -2574,54 +3748,53 @@ mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; int err; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace, - append); + err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append); if (err) return err; - err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry); + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common); if (err) - goto err_fib4_node_entry_add; + goto err_fib_node_entry_add; return 0; -err_fib4_node_entry_add: - mlxsw_sp_fib4_node_list_remove(fib_entry); +err_fib_node_entry_add: + mlxsw_sp_fib4_node_list_remove(fib4_entry); return err; } static void mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib4_node_list_remove(fib4_entry); - mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); - mlxsw_sp_fib4_node_list_remove(fib_entry); + if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) + mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common); } static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - struct mlxsw_sp_fib_entry *replaced; + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *replaced; if (!replace) return; /* We inserted the new entry before replaced one */ - replaced = list_next_entry(fib_entry, list); + replaced = list_next_entry(fib4_entry, common.list); mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } static int @@ -2629,76 +3802,778 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; int err; if (mlxsw_sp->router->aborted) return 0; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, + &fen_info->dst, sizeof(fen_info->dst), + fen_info->dst_len, + MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(fib_node)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); return PTR_ERR(fib_node); } - fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); - if (IS_ERR(fib_entry)) { + fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); + if (IS_ERR(fib4_entry)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); - err = PTR_ERR(fib_entry); + err = PTR_ERR(fib4_entry); goto err_fib4_entry_create; } - err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace, + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace, append); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib4_node_entry_link; } - mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace); + mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace); return 0; err_fib4_node_entry_link: - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); err_fib4_entry_create: - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; } static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; if (mlxsw_sp->router->aborted) return; - fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); - if (WARN_ON(!fib_entry)) + fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); + if (WARN_ON(!fib4_entry)) return; - fib_node = fib_entry->fib_node; + fib_node = fib4_entry->common.fib_node; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } -static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) +{ + /* Packets with link-local destination IP arriving to the router + * are trapped to the CPU, so no need to program specific routes + * for them. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL) + return true; + + /* Multicast routes aren't supported, so ignore them. Neighbour + * Discovery packets are specifically trapped. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) + return true; + + /* Cloned routes are irrelevant in the forwarding path. */ + if (rt->rt6i_flags & RTF_CACHE) + return true; + + return false; +} + +static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL); + if (!mlxsw_sp_rt6) + return ERR_PTR(-ENOMEM); + + /* In case of route replace, replaced route is deleted with + * no notification. Take reference to prevent accessing freed + * memory. + */ + mlxsw_sp_rt6->rt = rt; + rt6_hold(rt); + + return mlxsw_sp_rt6; +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ + rt6_release(rt); +} +#else +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ +} +#endif + +static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); + kfree(mlxsw_sp_rt6); +} + +static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt) +{ + /* RTF_CACHE routes are ignored */ + return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; +} + +static struct rt6_info * +mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) +{ + return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt; +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + + if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same + * virtual router. + */ + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (rt->rt6i_metric < nrt->rt6i_metric) + continue; + if (rt->rt6i_metric == nrt->rt6i_metric && + mlxsw_sp_fib6_rt_can_mp(rt)) + return fib6_entry; + if (rt->rt6i_metric > nrt->rt6i_metric) + break; + } + + return NULL; +} + +static struct mlxsw_sp_rt6 * +mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, + const struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + if (mlxsw_sp_rt6->rt == rt) + return mlxsw_sp_rt6; + } + + return NULL; +} + +static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, + const struct rt6_info *rt, + enum mlxsw_sp_ipip_type *ret) +{ + return rt->dst.dev && + mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret); +} + +static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + const struct rt6_info *rt) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + struct net_device *dev = rt->dst.dev; + enum mlxsw_sp_ipip_type ipipt; + struct mlxsw_sp_rif *rif; + int err; + + if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) && + router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, + MLXSW_SP_L3_PROTO_IPV6)) { + nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; + err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); + if (err) + return err; + mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); + return 0; + } + + nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + mlxsw_sp_nexthop_rif_init(nh, rif); + + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_nexthop_neigh_init; + + return 0; + +err_nexthop_neigh_init: + mlxsw_sp_nexthop_rif_fini(nh); + return err; +} + +static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); +} + +static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + const struct rt6_info *rt) +{ + struct net_device *dev = rt->dst.dev; + + nh->nh_grp = nh_grp; + memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + + if (!dev) + return 0; + nh->ifindex = dev->ifindex; + + return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt); +} + +static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); +} + +static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, + const struct rt6_info *rt) +{ + return rt->rt6i_flags & RTF_GATEWAY || + mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + struct mlxsw_sp_nexthop *nh; + size_t alloc_size; + int i = 0; + int err; + + alloc_size = sizeof(*nh_grp) + + fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop); + nh_grp = kzalloc(alloc_size, GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->fib_list); +#if IS_ENABLED(CONFIG_IPV6) + nh_grp->neigh_tbl = &nd_tbl; +#endif + mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, + struct mlxsw_sp_rt6, list); + nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); + nh_grp->count = fib6_entry->nrt6; + for (i = 0; i < nh_grp->count; i++) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + nh = &nh_grp->nexthops[i]; + err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); + if (err) + goto err_nexthop6_init; + mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); + } + + err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); + if (err) + goto err_nexthop_group_insert; + + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + return nh_grp; + +err_nexthop_group_insert: +err_nexthop6_init: + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + int i = nh_grp->count; + + mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON(nh_grp->adj_index_valid); + kfree(nh_grp); +} + +static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry); + if (!nh_grp) { + nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + } + + list_add_tail(&fib6_entry->common.nexthop_group_node, + &nh_grp->fib_list); + fib6_entry->common.nh_group = nh_grp; + + return 0; +} + +static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + + list_del(&fib_entry->nexthop_group_node); + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); +} + +static int +mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; + int err; + + fib6_entry->common.nh_group = NULL; + list_del(&fib6_entry->common.nexthop_group_node); + + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + /* In case this entry is offloaded, then the adjacency index + * currently associated with it in the device's table is that + * of the old group. Start using the new one instead. + */ + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + if (list_empty(&old_nh_grp->fib_list)) + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp); + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); +err_nexthop6_group_get: + list_add_tail(&fib6_entry->common.nexthop_group_node, + &old_nh_grp->fib_list); + fib6_entry->common.nh_group = old_nh_grp; + return err; +} + +static int +mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) + return PTR_ERR(mlxsw_sp_rt6); + + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6++; + + err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_update; + + return 0; + +err_nexthop6_group_update: + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + return err; +} + +static void +mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt); + if (WARN_ON(!mlxsw_sp_rt6)) + return; + + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +} + +static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct rt6_info *rt) +{ + /* Packets hitting RTF_REJECT routes need to be discarded by the + * stack. We can rely on their destination device not having a + * RIF (it's the loopback device) and can thus use action type + * local, which will cause them to be trapped with a lower + * priority than packets that need to be locally received. + */ + if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + else if (rt->rt6i_flags & RTF_REJECT) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; +} + +static void +mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp; + + list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list, + list) { + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + } +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL); + if (!fib6_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib6_entry->common; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) { + err = PTR_ERR(mlxsw_sp_rt6); + goto err_rt6_create; + } + + mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt); + + INIT_LIST_HEAD(&fib6_entry->rt6_list); + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6 = 1; + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + fib_entry->fib_node = fib_node; + + return fib6_entry; + +err_nexthop6_group_get: + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +err_rt6_create: + kfree(fib6_entry); + return ERR_PTR(err); +} + +static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); + WARN_ON(fib6_entry->nrt6); + kfree(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (replace && rt->rt6i_metric == nrt->rt6i_metric) { + if (mlxsw_sp_fib6_rt_can_mp(rt) == + mlxsw_sp_fib6_rt_can_mp(nrt)) + return fib6_entry; + if (mlxsw_sp_fib6_rt_can_mp(nrt)) + fallback = fallback ?: fib6_entry; + } + if (rt->rt6i_metric > nrt->rt6i_metric) + return fallback ?: fib6_entry; + } + + return fallback; +} + +static int +mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; + struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); + struct mlxsw_sp_fib6_entry *fib6_entry; + + fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace); + + if (replace && WARN_ON(!fib6_entry)) + return -EINVAL; + + if (fib6_entry) { + list_add_tail(&new6_entry->common.list, + &fib6_entry->common.list); + } else { + struct mlxsw_sp_fib6_entry *last; + + list_for_each_entry(last, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last); + + if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) + break; + fib6_entry = last; + } + + if (fib6_entry) + list_add(&new6_entry->common.list, + &fib6_entry->common.list); + else + list_add(&new6_entry->common.list, + &fib_node->entry_list); + } + + return 0; +} + +static void +mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + list_del(&fib6_entry->common.list); +} + +static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) +{ + int err; + + err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace); + if (err) + return err; + + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_fib6_node_list_remove(fib6_entry); + return err; +} + +static void +mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_node_list_remove(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, + const struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen); + if (!fib_node) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && + rt->rt6i_metric == iter_rt->rt6i_metric && + mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) + return fib6_entry; + } + + return NULL; +} + +static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + struct mlxsw_sp_fib6_entry *replaced; + + if (!replace) + return; + + replaced = list_next_entry(fib6_entry, common.list); + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + +static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + int err; + + if (mlxsw_sp->router->aborted) + return 0; + + if (rt->rt6i_src.plen) + return -EINVAL; + + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return 0; + + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id, + &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib_node)) + return PTR_ERR(fib_node); + + /* Before creating a new entry, try to append route to an existing + * multipath entry. + */ + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); + if (fib6_entry) { + err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); + if (err) + goto err_fib6_entry_nexthop_add; + return 0; + } + + fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); + if (IS_ERR(fib6_entry)) { + err = PTR_ERR(fib6_entry); + goto err_fib6_entry_create; + } + + err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace); + if (err) + goto err_fib6_node_entry_link; + + mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace); + + return 0; + +err_fib6_node_entry_link: + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); +err_fib6_entry_create: +err_fib6_entry_nexthop_add: + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return err; +} + +static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + + if (mlxsw_sp->router->aborted) + return; + + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return; + + fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); + if (WARN_ON(!fib6_entry)) + return; + + /* If route is part of a multipath entry, but not the last one + * removed, then only reduce its nexthop group. + */ + if (!list_is_singular(&fib6_entry->rt6_list)) { + mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt); + return; + } + + fib_node = fib6_entry->common.fib_node; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + +static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_ralxx_protocol proto, + u8 tree_id) { char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN]; int i, err; - mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); if (err) return err; - mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); if (err) return err; @@ -2708,20 +4583,14 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) char raltb_pl[MLXSW_REG_RALTB_LEN]; char ralue_pl[MLXSW_REG_RALUE_LEN]; - if (!mlxsw_sp_vr_is_used(vr)) - continue; - - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); if (err) return err; - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, - MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0, - 0); + mlxsw_reg_ralue_pack(ralue_pl, proto, + MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -2732,17 +4601,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; + int err; + + err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN); + if (err) + return err; + + proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; + return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN + 1); +} + static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_fib_entry *fib_entry, *tmp; + struct mlxsw_sp_fib4_entry *fib4_entry, *tmp; - list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) { - bool do_break = &tmp->list == &fib_node->entry_list; + list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); /* Break when entry list is empty and node was freed. * Otherwise, we'll access freed memory in the next * iteration. @@ -2752,6 +4637,23 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, } } +static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_fib6_entry *fib6_entry, *tmp; + + list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + if (do_break) + break; + } +} + static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { @@ -2760,7 +4662,7 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node); break; } } @@ -2791,10 +4693,17 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); + + /* If virtual router was only used for IPv4, then it's no + * longer used. + */ + if (!mlxsw_sp_vr_is_used(vr)) + continue; + mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } } -static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) { int err; @@ -2811,6 +4720,7 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_fib_event_work { struct work_struct work; union { + struct fib6_entry_notifier_info fen6_info; struct fib_entry_notifier_info fen_info; struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; @@ -2819,7 +4729,7 @@ struct mlxsw_sp_fib_event_work { unsigned long event; }; -static void mlxsw_sp_router_fib_event_work(struct work_struct *work) +static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) { struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); @@ -2839,7 +4749,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, replace, append); if (err) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_ENTRY_DEL: @@ -2850,13 +4760,13 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; if (!fib4_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_rule_put(rule); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event, - fib_work->fnh_info.fib_nh); + mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, + fib_work->fnh_info.fib_nh); fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); break; } @@ -2864,6 +4774,87 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) kfree(fib_work); } +static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) +{ + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; + bool replace; + int err; + + rtnl_lock(); + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + err = mlxsw_sp_router_fib6_add(mlxsw_sp, + fib_work->fen6_info.rt, replace); + if (err) + mlxsw_sp_router_fib_abort(mlxsw_sp); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; + case FIB_EVENT_ENTRY_DEL: + mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + rule = fib_work->fr_info.rule; + if (!fib6_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib_abort(mlxsw_sp); + fib_rule_put(rule); + break; + } + rtnl_unlock(); + kfree(fib_work); +} + +static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info)); + /* Take referece on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info)); + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; + } +} + +static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info)); + rt6_hold(fib_work->fen6_info.rt); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + } +} + /* Called with rcu_read_lock() */ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) @@ -2872,38 +4863,26 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; - if (!net_eq(info->net, &init_net)) + if (!net_eq(info->net, &init_net) || + (info->family != AF_INET && info->family != AF_INET6)) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) return NOTIFY_BAD; - INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work); router = container_of(nb, struct mlxsw_sp_router, fib_nb); fib_work->mlxsw_sp = router->mlxsw_sp; fib_work->event = event; - switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ - case FIB_EVENT_ENTRY_ADD: /* fall through */ - case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); - /* Take referece on fib_info to prevent it from being - * freed while work is queued. Release it afterwards. - */ - fib_info_hold(fib_work->fen_info.fi); + switch (info->family) { + case AF_INET: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); + mlxsw_sp_router_fib4_event(fib_work, info); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); - fib_rule_get(fib_work->fr_info.rule); - break; - case FIB_EVENT_NH_ADD: /* fall through */ - case FIB_EVENT_NH_DEL: - memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); - fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + case AF_INET6: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); + mlxsw_sp_router_fib6_event(fib_work, info); break; } @@ -2948,17 +4927,28 @@ static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); } -static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, - const struct in_device *in_dev, - unsigned long event) +static bool +mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, + unsigned long event) { + struct inet6_dev *inet6_dev; + bool addr_list_empty = true; + struct in_device *idev; + switch (event) { case NETDEV_UP: - if (!rif) - return true; - return false; + return rif == NULL; case NETDEV_DOWN: - if (rif && !in_dev->ifa_list && + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + addr_list_empty = false; + + inet6_dev = __in6_dev_get(dev); + if (addr_list_empty && inet6_dev && + !list_empty(&inet6_dev->addr_list)) + addr_list_empty = false; + + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; /* It is possible we already removed the RIF ourselves @@ -2977,7 +4967,10 @@ mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp, { enum mlxsw_sp_fid_type type; - /* RIF type is derived from the type of the underlying FID */ + if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL)) + return MLXSW_SP_RIF_TYPE_IPIP_LB; + + /* Otherwise RIF type is derived from the type of the underlying FID. */ if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev))) type = MLXSW_SP_FID_TYPE_8021Q; else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev)) @@ -3036,6 +5029,16 @@ u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif) return rif->rif_index; } +u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif) +{ + return lb_rif->common.rif_index; +} + +u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) +{ + return lb_rif->ul_vr_id; +} + int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) { return rif->dev->ifindex; @@ -3047,9 +5050,9 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, { u32 tb_id = l3mdev_fib_table(params->dev); const struct mlxsw_sp_rif_ops *ops; + struct mlxsw_sp_fid *fid = NULL; enum mlxsw_sp_rif_type type; struct mlxsw_sp_rif *rif; - struct mlxsw_sp_fid *fid; struct mlxsw_sp_vr *vr; u16 rif_index; int err; @@ -3060,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); if (IS_ERR(vr)) return ERR_CAST(vr); + vr->rif_count++; err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); if (err) @@ -3073,12 +5077,14 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, rif->mlxsw_sp = mlxsw_sp; rif->ops = ops; - fid = ops->fid_get(rif); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - goto err_fid_get; + if (ops->fid_get) { + fid = ops->fid_get(rif); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + goto err_fid_get; + } + rif->fid = fid; } - rif->fid = fid; if (ops->setup) ops->setup(rif, params); @@ -3087,26 +5093,19 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_configure; - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr, - mlxsw_sp_fid_index(fid), true); - if (err) - goto err_rif_fdb_op; - mlxsw_sp_rif_counters_alloc(rif); - mlxsw_sp_fid_rif_set(fid, rif); mlxsw_sp->router->rifs[rif_index] = rif; - vr->rif_count++; return rif; -err_rif_fdb_op: - ops->deconfigure(rif); err_configure: - mlxsw_sp_fid_put(fid); + if (fid) + mlxsw_sp_fid_put(fid); err_fid_get: kfree(rif); err_rif_alloc: err_rif_index_alloc: + vr->rif_count--; mlxsw_sp_vr_put(vr); return ERR_PTR(err); } @@ -3121,15 +5120,14 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id]; - vr->rif_count--; mlxsw_sp->router->rifs[rif->rif_index] = NULL; - mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_counters_free(rif); - mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr, - mlxsw_sp_fid_index(fid), false); ops->deconfigure(rif); - mlxsw_sp_fid_put(fid); + if (fid) + /* Loopback RIFs are not associated with a FID. */ + mlxsw_sp_fid_put(fid); kfree(rif); + vr->rif_count--; mlxsw_sp_vr_put(vr); } @@ -3356,7 +5354,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, goto out; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event)) + if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; err = __mlxsw_sp_inetaddr_event(dev, event); @@ -3364,6 +5362,61 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, return notifier_from_errno(err); } +struct mlxsw_sp_inet6addr_event_work { + struct work_struct work; + struct net_device *dev; + unsigned long event; +}; + +static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) +{ + struct mlxsw_sp_inet6addr_event_work *inet6addr_work = + container_of(work, struct mlxsw_sp_inet6addr_event_work, work); + struct net_device *dev = inet6addr_work->dev; + unsigned long event = inet6addr_work->event; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + + rtnl_lock(); + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, dev, event)) + goto out; + + __mlxsw_sp_inetaddr_event(dev, event); +out: + rtnl_unlock(); + dev_put(dev); + kfree(inet6addr_work); +} + +/* Called with rcu_read_lock() */ +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; + struct mlxsw_sp_inet6addr_event_work *inet6addr_work; + struct net_device *dev = if6->idev->dev; + + if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + return NOTIFY_DONE; + + inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); + if (!inet6addr_work) + return NOTIFY_BAD; + + INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); + inet6addr_work->dev = dev; + inet6addr_work->event = event; + dev_hold(dev); + mlxsw_core_schedule_work(&inet6addr_work->work); + + return NOTIFY_DONE; +} + static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, const char *mac, int mtu) { @@ -3501,8 +5554,8 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) rif_subport = mlxsw_sp_rif_subport_rif(rif); mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF, - rif->rif_index, rif->vr_id, rif->dev->mtu, - rif->dev->dev_addr); + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag, rif_subport->lag ? rif_subport->lag_id : rif_subport->system_port, @@ -3513,11 +5566,32 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif) { - return mlxsw_sp_rif_subport_op(rif, true); + int err; + + err = mlxsw_sp_rif_subport_op(rif, true); + if (err) + return err; + + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + goto err_rif_fdb_op; + + mlxsw_sp_fid_rif_set(rif->fid, rif); + return 0; + +err_rif_fdb_op: + mlxsw_sp_rif_subport_op(rif, false); + return err; } static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) { + struct mlxsw_sp_fid *fid = rif->fid; + + mlxsw_sp_fid_rif_set(fid, NULL); + mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(fid), false); mlxsw_sp_rif_subport_op(rif, false); } @@ -3544,7 +5618,8 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, char ritr_pl[MLXSW_REG_RITR_LEN]; mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id, - rif->dev->mtu, rif->dev->dev_addr); + rif->dev->mtu); + mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); @@ -3565,25 +5640,48 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) goto err_fid_bc_flood_set; + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + goto err_rif_fdb_op; + + mlxsw_sp_fid_rif_set(rif->fid, rif); return 0; +err_rif_fdb_op: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, + mlxsw_sp_router_port(mlxsw_sp), false); err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); return err; } static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) { - struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_fid *fid = rif->fid; + mlxsw_sp_fid_rif_set(fid, NULL); + mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(fid), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); } @@ -3614,25 +5712,48 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) goto err_fid_bc_flood_set; + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + goto err_rif_fdb_op; + + mlxsw_sp_fid_rif_set(rif->fid, rif); return 0; +err_rif_fdb_op: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, + mlxsw_sp_router_port(mlxsw_sp), false); err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); return err; } static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) { - struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; u16 fid_index = mlxsw_sp_fid_index(rif->fid); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_fid *fid = rif->fid; + mlxsw_sp_fid_rif_set(fid, NULL); + mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, + mlxsw_sp_fid_index(fid), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); } @@ -3650,10 +5771,104 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .fid_get = mlxsw_sp_rif_fid_fid_get, }; +static struct mlxsw_sp_rif_ipip_lb * +mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif) +{ + return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); +} + +static void +mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, + const struct mlxsw_sp_rif_params *params) +{ + struct mlxsw_sp_rif_params_ipip_lb *params_lb; + struct mlxsw_sp_rif_ipip_lb *rif_lb; + + params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb, + common); + rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif); + rif_lb->lb_config = params_lb->lb_config; +} + +static int +mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, + struct mlxsw_sp_vr *ul_vr, bool enable) +{ + struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + struct mlxsw_sp_rif *rif = &lb_rif->common; + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u32 saddr4; + + switch (lb_cf.ul_protocol) { + case MLXSW_SP_L3_PROTO_IPV4: + saddr4 = be32_to_cpu(lb_cf.saddr.addr4); + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, + MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, + ul_vr->id, saddr4, lb_cf.okey); + break; + + case MLXSW_SP_L3_PROTO_IPV6: + return -EAFNOSUPPORT; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int +mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); + u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_vr *ul_vr; + int err; + + ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id); + if (IS_ERR(ul_vr)) + return PTR_ERR(ul_vr); + + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true); + if (err) + goto err_loopback_op; + + lb_rif->ul_vr_id = ul_vr->id; + ++ul_vr->rif_count; + return 0; + +err_loopback_op: + mlxsw_sp_vr_put(ul_vr); + return err; +} + +static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_vr *ul_vr; + + ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; + mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false); + + --ul_vr->rif_count; + mlxsw_sp_vr_put(ul_vr); +} + +static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = { + .type = MLXSW_SP_RIF_TYPE_IPIP_LB, + .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), + .setup = mlxsw_sp_rif_ipip_lb_setup, + .configure = mlxsw_sp_rif_ipip_lb_configure, + .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure, +}; + static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, + [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops, }; static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) @@ -3681,6 +5896,18 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->rifs); } +static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; + INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); + return 0; +} + +static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) +{ + WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); +} + static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) { struct mlxsw_sp_router *router; @@ -3704,7 +5931,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); - mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) @@ -3716,7 +5943,7 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; - mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_rgcr_pack(rgcr_pl, false, false); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); } @@ -3740,6 +5967,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_rifs_init; + err = mlxsw_sp_ipips_init(mlxsw_sp); + if (err) + goto err_ipips_init; + err = rhashtable_init(&mlxsw_sp->router->nexthop_ht, &mlxsw_sp_nexthop_ht_params); if (err) @@ -3781,6 +6012,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) err_nexthop_group_ht_init: rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); err_nexthop_ht_init: + mlxsw_sp_ipips_fini(mlxsw_sp); +err_ipips_init: mlxsw_sp_rifs_fini(mlxsw_sp); err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); @@ -3797,6 +6030,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_lpm_fini(mlxsw_sp); rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); + mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); kfree(mlxsw_sp->router); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a3e8d2b25148..345fcc4f38e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -36,15 +36,38 @@ #define _MLXSW_ROUTER_H_ #include "spectrum.h" +#include "reg.h" + +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +}; + +union mlxsw_sp_l3addr { + __be32 addr4; + struct in6_addr addr6; +}; + +struct mlxsw_sp_rif_ipip_lb; +struct mlxsw_sp_rif_ipip_lb_config { + enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; + u32 okey; + enum mlxsw_sp_l3proto ul_protocol; /* Underlay. */ + union mlxsw_sp_l3addr saddr; +}; enum mlxsw_sp_rif_counter_dir { MLXSW_SP_RIF_COUNTER_INGRESS, MLXSW_SP_RIF_COUNTER_EGRESS, }; +struct mlxsw_sp_neigh_entry; + struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); +u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); +u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, @@ -56,5 +79,33 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir); +struct mlxsw_sp_neigh_entry * +mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, + struct mlxsw_sp_neigh_entry *neigh_entry); +int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry); +unsigned char * +mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry); +u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry); +struct in6_addr * +mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry); + +#define mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) \ + for (neigh_entry = mlxsw_sp_rif_neigh_next(rif, NULL); neigh_entry; \ + neigh_entry = mlxsw_sp_rif_neigh_next(rif, neigh_entry)) +int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + u64 *p_counter); +void +mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool adding); +bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry); +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev); +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev); +__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev); #endif /* _MLXSW_ROUTER_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index 74341fe0eb25..ab7a29846bfa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -497,7 +497,7 @@ static void mlxsw_sib_fini(struct mlxsw_core *mlxsw_core) mlxsw_sib_ports_remove(mlxsw_sib); } -static struct mlxsw_config_profile mlxsw_sib_config_profile = { +static const struct mlxsw_config_profile mlxsw_sib_config_profile = { .used_max_system_port = 1, .max_system_port = 48000, .used_max_ib_mc = 1, diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3b0f72455681..f3c29bbf07e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1674,7 +1674,7 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) mlxsw_sx_ports_remove(mlxsw_sx); } -static struct mlxsw_config_profile mlxsw_sx_config_profile = { +static const struct mlxsw_config_profile mlxsw_sx_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_mid = 1, diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 12b5ed58f3eb..f396a1fef633 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -61,11 +61,33 @@ enum { MLXSW_TRAP_ID_MTUERROR = 0x52, MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, - MLXSW_TRAP_ID_OSPF = 0x55, + MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_SRC = 0x62, + MLXSW_TRAP_ID_IPV6_ALL_NODES_LINK = 0x63, + MLXSW_TRAP_ID_IPV6_OSPF = 0x64, + MLXSW_TRAP_ID_IPV6_MLDV12_LISTENER_QUERY = 0x65, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_REPORT = 0x66, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_DONE = 0x67, + MLXSW_TRAP_ID_IPV6_MLDV2_LISTENER_REPORT = 0x68, + MLXSW_TRAP_ID_IPV6_DHCP = 0x69, + MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, - MLXSW_TRAP_ID_BGP_IPV4 = 0x88, + MLXSW_TRAP_ID_IPV4_BGP = 0x88, + MLXSW_TRAP_ID_IPV6_BGP = 0x89, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D, + MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, + MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, + MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, + MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, MLXSW_TRAP_ID_MAX = 0x1FF diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index c0d7d5eec7e7..2e4effa9fe45 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -161,7 +161,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->rx_head = 0; - /* reset the MAC controller TX/RX desciptor base address */ + /* reset the MAC controller TX/RX descriptor base address */ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); } @@ -269,9 +269,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) priv->rx_head = rx_head; } - if (rx < budget) { + if (rx < budget) napi_complete_done(napi, rx); - } priv->reg_imr |= RPKT_FINISH_M; writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); @@ -289,8 +288,8 @@ static int moxart_tx_queue_space(struct net_device *ndev) static void moxart_tx_finished(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - unsigned tx_head = priv->tx_head; - unsigned tx_tail = priv->tx_tail; + unsigned int tx_head = priv->tx_head; + unsigned int tx_tail = priv->tx_tail; while (tx_tail != tx_head) { dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], @@ -312,7 +311,7 @@ static void moxart_tx_finished(struct net_device *ndev) static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) { - struct net_device *ndev = (struct net_device *) dev_id; + struct net_device *ndev = (struct net_device *)dev_id; struct moxart_mac_priv_t *priv = netdev_priv(ndev); unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS); @@ -495,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); - if (priv->tx_desc_base == NULL) { + if (!priv->tx_desc_base) { ret = -ENOMEM; goto init_fail; } @@ -503,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); - if (priv->rx_desc_base == NULL) { + if (!priv->rx_desc_base) { ret = -ENOMEM; goto init_fail; } diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 686b8957d5cf..bee608b547d1 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -55,17 +55,17 @@ #define RX_DESC2_ADDRESS_VIRT 4 #define TX_DESC_NUM 64 -#define TX_DESC_NUM_MASK (TX_DESC_NUM-1) +#define TX_DESC_NUM_MASK (TX_DESC_NUM - 1) #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) #define TX_BUF_SIZE 1600 -#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) +#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK + 1) #define TX_WAKE_THRESHOLD 16 #define RX_DESC_NUM 64 -#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) +#define RX_DESC_NUM_MASK (RX_DESC_NUM - 1) #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK)) #define RX_BUF_SIZE 1600 -#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1) +#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK + 1) #define REG_INTERRUPT_STATUS 0 #define REG_INTERRUPT_MASK 4 diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index fd2ec36c6fa1..462eda926b1c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -42,8 +42,6 @@ * aggregated as a single large packet * napi: This parameter used to enable/disable NAPI (polling Rx) * Possible values '1' for enable and '0' for disable. Default is '1' - * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO) - * Possible values '1' for enable and '0' for disable. Default is '0' * vlan_tag_strip: This can be used to enable or disable vlan stripping. * Possible values '1' for enable , '0' for disable. * Default is '2' - which means disable in promisc mode @@ -453,7 +451,6 @@ S2IO_PARM_INT(lro_max_pkts, 0xFFFF); S2IO_PARM_INT(indicate_max_pkts, 0); S2IO_PARM_INT(napi, 1); -S2IO_PARM_INT(ufo, 0); S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); static unsigned int tx_fifo_len[MAX_TX_FIFOS] = @@ -4128,32 +4125,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb_headlen(skb); - if (offload_type == SKB_GSO_UDP) { - int ufo_size; - - ufo_size = s2io_udp_mss(skb); - ufo_size &= ~7; - txdp->Control_1 |= TXD_UFO_EN; - txdp->Control_1 |= TXD_UFO_MSS(ufo_size); - txdp->Control_1 |= TXD_BUFFER0_SIZE(8); -#ifdef __BIG_ENDIAN - /* both variants do cpu_to_be64(be32_to_cpu(...)) */ - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id; -#else - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; -#endif - txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; - txdp->Buffer_Pointer = pci_map_single(sp->pdev, - fifo->ufo_in_band_v, - sizeof(u64), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) - goto pci_map_failed; - txdp++; - } - txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) @@ -4161,8 +4132,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Host_Control = (unsigned long)skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; frg_cnt = skb_shinfo(skb)->nr_frags; /* For fragmented SKB. */ @@ -4177,14 +4146,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_size(frag), DMA_TO_DEVICE); txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; } txdp->Control_1 |= TXD_GATHER_CODE_LAST; - if (offload_type == SKB_GSO_UDP) - frg_cnt++; /* as Txd0 was used for inband header */ - tx_fifo = mac_control->tx_FIFO_start[queue]; val64 = fifo->list_info[put_off].list_phy_addr; writeq(val64, &tx_fifo->TxDL_Pointer); @@ -7910,11 +7874,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) NETIF_F_RXCSUM | NETIF_F_LRO; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (sp->device_type & XFRAME_II_DEVICE) { - dev->hw_features |= NETIF_F_UFO; - if (ufo) - dev->features |= NETIF_F_UFO; - } if (sp->high_dma_flag == true) dev->features |= NETIF_F_HIGHDMA; dev->watchdog_timeo = WATCH_DOG_TIMEOUT; @@ -8147,10 +8106,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", dev->name); - if (ufo) - DBG_PRINT(ERR_DBG, - "%s: UDP Fragmentation Offload(UFO) enabled\n", - dev->name); /* Initialize device name */ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, sp->product_name); diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index 0e331e2f685a..ae0c46ba7546 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -29,6 +29,7 @@ config NFP_APP_FLOWER bool "NFP4000/NFP6000 TC Flower offload support" depends on NFP depends on NET_SWITCHDEV + default y ---help--- Enable driver support for TC Flower offload on NFP4000 and NFP6000. Say Y, if you are planning to make use of TC Flower offload diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index b8e1358868bd..96e579a15cbe 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -23,6 +23,7 @@ nfp-objs := \ nfp_net_ethtool.o \ nfp_net_main.o \ nfp_net_repr.o \ + nfp_net_sriov.o \ nfp_netvf_main.o \ nfp_port.o \ bpf/main.o \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8e57fda6b8b5..239dfbe8a0a1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); } +static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); +} + +static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); } +static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); +} + +static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); +} + static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, + [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, + [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, + [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, + [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_EXIT] = goto_out, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index afbdf5fd4e4f..be2cf10a2cd7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -84,7 +84,7 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) } static int -nfp_bpf_vnic_init(struct nfp_app *app, struct nfp_net *nn, unsigned int id) +nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { struct nfp_net_bpf_priv *priv; int ret; @@ -106,14 +106,14 @@ nfp_bpf_vnic_init(struct nfp_app *app, struct nfp_net *nn, unsigned int id) setup_timer(&priv->rx_filter_stats_timer, nfp_net_filter_stats_timer, (unsigned long)nn); - ret = nfp_app_nic_vnic_init(app, nn, id); + ret = nfp_app_nic_vnic_alloc(app, nn, id); if (ret) kfree(priv); return ret; } -static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) +static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) { if (nn->dp.bpf_offload_xdp) nfp_bpf_xdp_offload(app, nn, NULL); @@ -121,23 +121,21 @@ static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) } static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { + struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = netdev_priv(netdev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -EOPNOTSUPP; - if (proto != htons(ETH_P_ALL)) + if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || + !is_classid_clsact_ingress(cls_bpf->common.classid) || + cls_bpf->common.protocol != htons(ETH_P_ALL) || + cls_bpf->common.chain_index) return -EOPNOTSUPP; - if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->dp.bpf_offload_xdp) - return nfp_net_bpf_offload(nn, tc->cls_bpf); - else - return -EBUSY; - } + if (nn->dp.bpf_offload_xdp) + return -EBUSY; - return -EINVAL; + return nfp_net_bpf_offload(nn, cls_bpf); } static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) @@ -151,8 +149,8 @@ const struct nfp_app_type app_bpf = { .extra_cap = nfp_bpf_extra_cap, - .vnic_init = nfp_bpf_vnic_init, - .vnic_clean = nfp_bpf_vnic_clean, + .vnic_alloc = nfp_bpf_vnic_alloc, + .vnic_free = nfp_bpf_vnic_free, .setup_tc = nfp_bpf_setup_tc, .tc_busy = nfp_bpf_tc_busy, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 78d80a364edb..a88bb5bc0082 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -115,14 +115,14 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) /* TC direct action */ if (cls_bpf->exts_integrated) { - if (tc_no_actions(cls_bpf->exts)) + if (!tcf_exts_has_actions(cls_bpf->exts)) return NN_ACT_DIRECT; return -EOPNOTSUPP; } /* TC legacy mode */ - if (!tc_single_action(cls_bpf->exts)) + if (!tcf_exts_has_one_action(cls_bpf->exts)) return -EOPNOTSUPP; tcf_exts_to_list(cls_bpf->exts, &actions); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index d696ba46f70a..5b783a91b115 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -79,28 +79,32 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, const struct bpf_verifier_env *env) { const struct bpf_reg_state *reg0 = &env->cur_state.regs[0]; + u64 imm; if (nfp_prog->act == NN_ACT_XDP) return 0; - if (reg0->type != CONST_IMM) { - pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); + pr_info("unsupported exit state: %d, var_off: %s\n", + reg0->type, tn_buf); return -EINVAL; } - if (nfp_prog->act != NN_ACT_DIRECT && - reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) { + imm = reg0->var_off.value; + if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } - if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT && - reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN && - reg0->imm != TC_ACT_QUEUED) { + if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT && + imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && + imm != TC_ACT_QUEUED) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index b0837b58c3a1..c3ca05d10fe1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -34,10 +34,12 @@ #include #include #include +#include #include #include "main.h" #include "../nfpcore/nfp_cpp.h" +#include "../nfp_net.h" #include "../nfp_net_repr.h" #include "./cmsg.h" @@ -75,6 +77,39 @@ nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, return skb; } +struct sk_buff * +nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) +{ + struct nfp_flower_cmsg_mac_repr *msg; + struct sk_buff *skb; + unsigned int size; + + size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); + skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR); + if (!skb) + return NULL; + + msg = nfp_flower_cmsg_get_data(skb); + memset(msg->reserved, 0, sizeof(msg->reserved)); + msg->num_ports = num_ports; + + return skb; +} + +void +nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, + unsigned int nbi, unsigned int nbi_port, + unsigned int phys_port) +{ + struct nfp_flower_cmsg_mac_repr *msg; + + msg = nfp_flower_cmsg_get_data(skb); + msg->ports[idx].idx = idx; + msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI; + msg->ports[idx].nbi_port = nbi_port; + msg->ports[idx].phys_port = phys_port; +} + int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) { struct nfp_flower_cmsg_portmod *msg; @@ -106,23 +141,33 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) msg = nfp_flower_cmsg_get_data(skb); link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK; + rtnl_lock(); rcu_read_lock(); netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); + rcu_read_unlock(); if (!netdev) { nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", be32_to_cpu(msg->portnum)); - rcu_read_unlock(); + rtnl_unlock(); return; } - if (link) + if (link) { + u16 mtu = be16_to_cpu(msg->mtu); + netif_carrier_on(netdev); - else + + /* An MTU of 0 from the firmware should be ignored */ + if (mtu) + dev_set_mtu(netdev, mtu); + } else { netif_carrier_off(netdev); - rcu_read_unlock(); + } + rtnl_unlock(); } -void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +static void +nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; @@ -146,8 +191,30 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); + goto out; } + dev_consume_skb_any(skb); + return; out: dev_kfree_skb_any(skb); } + +void nfp_flower_cmsg_process_rx(struct work_struct *work) +{ + struct nfp_flower_priv *priv; + struct sk_buff *skb; + + priv = container_of(work, struct nfp_flower_priv, cmsg_work); + + while ((skb = skb_dequeue(&priv->cmsg_skbs))) + nfp_flower_cmsg_process_one_rx(priv->app, skb); +} + +void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_priv *priv = app->priv; + + skb_queue_tail(&priv->cmsg_skbs, skb); + schedule_work(&priv->cmsg_work); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index cf738de170ab..a2ec60344236 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -247,12 +247,27 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_MAX = 32, }; +/* NFP_FLOWER_CMSG_TYPE_MAC_REPR */ +struct nfp_flower_cmsg_mac_repr { + u8 reserved[3]; + u8 num_ports; + struct { + u8 idx; + u8 info; + u8 nbi_port; + u8 phys_port; + } ports[0]; +}; + +#define NFP_FLOWER_CMSG_MAC_REPR_NBI GENMASK(1, 0) + /* NFP_FLOWER_CMSG_TYPE_PORT_MOD */ struct nfp_flower_cmsg_portmod { __be32 portnum; @@ -308,7 +323,14 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb) return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN; } +struct sk_buff * +nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); +void +nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, + unsigned int nbi, unsigned int nbi_port, + unsigned int phys_port); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); +void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6a65c8b33807..91fe03617106 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -127,6 +127,11 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) static void nfp_flower_sriov_disable(struct nfp_app *app) { + struct nfp_flower_priv *priv = app->priv; + + if (!priv->nn) + return; + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); } @@ -159,12 +164,18 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } + /* For now we only support 1 PF */ + WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); + port = nfp_port_alloc(app, port_type, reprs->reprs[i]); if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; + port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; /* For now we only support 1 PF */ + port->pf_id = 0; port->vf_id = i; + port->vnic = + app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } eth_hw_addr_random(reprs->reprs[i]); @@ -197,32 +208,37 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) { + struct nfp_flower_priv *priv = app->priv; + + if (!priv->nn) + return 0; + return nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, num_vfs); } -static void nfp_flower_stop(struct nfp_app *app) -{ - nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); - nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); - -} - static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; struct nfp_reprs *reprs, *old_reprs; + struct sk_buff *ctrl_skb; unsigned int i; int err; - reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); - if (!reprs) + ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); + if (!ctrl_skb) return -ENOMEM; + reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); + if (!reprs) { + err = -ENOMEM; + goto err_free_ctrl_skb; + } + for (i = 0; i < eth_tbl->count; i++) { - int phys_port = eth_tbl->ports[i].index; + unsigned int phys_port = eth_tbl->ports[i].index; struct nfp_port *port; u32 cmsg_port_id; @@ -255,6 +271,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } + nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, + eth_tbl->ports[i].nbi, + eth_tbl->ports[i].base, + phys_port); + nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, reprs->reprs[phys_port]->name); } @@ -265,37 +286,31 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } + /* The MAC_REPR control message should be sent after the MAC + * representors are registered using nfp_app_reprs_set(). This is + * because the firmware may respond with control messages for the + * MAC representors, f.e. to provide the driver with information + * about their state, and without registration the driver will drop + * any such messages. + */ + nfp_ctrl_tx(app->ctrl, ctrl_skb); + return 0; err_reprs_clean: nfp_reprs_clean_and_free(reprs); +err_free_ctrl_skb: + kfree_skb(ctrl_skb); return err; } -static int nfp_flower_start(struct nfp_app *app) +static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, + unsigned int id) { - int err; - - err = nfp_flower_spawn_phy_reprs(app, app->priv); - if (err) - return err; - - return nfp_flower_spawn_vnic_reprs(app, - NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, - NFP_REPR_TYPE_PF, 1); -} - -static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn, - unsigned int id) -{ - struct nfp_flower_priv *priv = app->priv; - if (id > 0) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } - priv->nn = nn; - eth_hw_addr_random(nn->dp.netdev); netif_keep_dst(nn->dp.netdev); @@ -306,9 +321,59 @@ static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn, return PTR_ERR_OR_ZERO(nn->port); } +static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_flower_priv *priv = app->priv; + + if (app->pf->num_vfs) + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); + + priv->nn = NULL; +} + +static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_flower_priv *priv = app->priv; + int err; + + priv->nn = nn; + + err = nfp_flower_spawn_phy_reprs(app, app->priv); + if (err) + goto err_clear_nn; + + err = nfp_flower_spawn_vnic_reprs(app, + NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, + NFP_REPR_TYPE_PF, 1); + if (err) + goto err_destroy_reprs_phy; + + if (app->pf->num_vfs) { + err = nfp_flower_spawn_vnic_reprs(app, + NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, + NFP_REPR_TYPE_VF, + app->pf->num_vfs); + if (err) + goto err_destroy_reprs_pf; + } + + return 0; + +err_destroy_reprs_pf: + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); +err_destroy_reprs_phy: + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); +err_clear_nn: + priv->nn = NULL; + return err; +} + static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; + struct nfp_flower_priv *app_priv; u64 version; int err; @@ -339,10 +404,15 @@ static int nfp_flower_init(struct nfp_app *app) return -EINVAL; } - app->priv = vzalloc(sizeof(struct nfp_flower_priv)); - if (!app->priv) + app_priv = vzalloc(sizeof(struct nfp_flower_priv)); + if (!app_priv) return -ENOMEM; + app->priv = app_priv; + app_priv->app = app; + skb_queue_head_init(&app_priv->cmsg_skbs); + INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); + err = nfp_flower_metadata_init(app); if (err) goto err_free_app_priv; @@ -356,6 +426,11 @@ static int nfp_flower_init(struct nfp_app *app) static void nfp_flower_clean(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + + skb_queue_purge(&app_priv->cmsg_skbs); + flush_work(&app_priv->cmsg_work); + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; @@ -371,14 +446,13 @@ const struct nfp_app_type app_flower = { .init = nfp_flower_init, .clean = nfp_flower_clean, + .vnic_alloc = nfp_flower_vnic_alloc, .vnic_init = nfp_flower_vnic_init, + .vnic_clean = nfp_flower_vnic_clean, .repr_open = nfp_flower_repr_netdev_open, .repr_stop = nfp_flower_repr_netdev_stop, - .start = nfp_flower_start, - .stop = nfp_flower_stop, - .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9e64c048e83f..c20dd00a1cae 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -38,8 +38,9 @@ #include #include #include +#include +#include -struct tc_to_netdev; struct net_device; struct nfp_app; @@ -71,6 +72,7 @@ struct nfp_fl_stats_id { /** * struct nfp_flower_priv - Flower APP per-vNIC priv data + * @app: Back pointer to app * @nn: Pointer to vNIC * @mask_id_seed: Seed used for mask hash table * @flower_version: HW version of flower @@ -78,8 +80,11 @@ struct nfp_fl_stats_id { * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks * @flow_table: Hash table used to store flower rules + * @cmsg_work: Workqueue for control messages processing + * @cmsg_skbs: List of skbs for control message processing */ struct nfp_flower_priv { + struct nfp_app *app; struct nfp_net *nn; u32 mask_id_seed; u64 flower_version; @@ -87,6 +92,8 @@ struct nfp_flower_priv { struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); + struct work_struct cmsg_work; + struct sk_buff_head cmsg_skbs; }; struct nfp_fl_key_ls { @@ -135,7 +142,7 @@ int nfp_flower_metadata_init(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 74a96d6bb05c..a18b4d2b1d3e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -44,6 +44,16 @@ #include "../nfp_net.h" #include "../nfp_port.h" +#define NFP_FLOWER_WHITELIST_DISSECTOR \ + (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_BASIC) | \ + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_IP)) + static int nfp_flower_xmit_flow(struct net_device *netdev, struct nfp_fl_payload *nfp_flow, u8 mtype) @@ -112,6 +122,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, u8 key_layer; int key_size; + if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + return -EOPNOTSUPP; + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_control *mask_enc_ctl = @@ -409,16 +422,15 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, } int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + struct tc_cls_flower_offload *cls_flower = type_data; + + if (type != TC_SETUP_CLSFLOWER || + !is_classid_clsact_ingress(cls_flower->common.classid) || + !eth_proto_is_802_3(cls_flower->common.protocol) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - if (!eth_proto_is_802_3(proto)) - return -EOPNOTSUPP; - - if (tc->type != TC_SETUP_CLSFLOWER) - return -EINVAL; - - return nfp_flower_repr_offload(app, netdev, tc->cls_flower); + return nfp_flower_repr_offload(app, netdev, cls_flower); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index c704c022574f..82c290763529 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -38,6 +38,7 @@ #include "nfpcore/nfp_nffw.h" #include "nfp_app.h" #include "nfp_main.h" +#include "nfp_net.h" #include "nfp_net_repr.h" static const struct nfp_app_type *apps[] = { @@ -48,6 +49,25 @@ static const struct nfp_app_type *apps[] = { #endif }; +struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) +{ + if (nfp_netdev_is_nfp_net(netdev)) { + struct nfp_net *nn = netdev_priv(netdev); + + return nn->app; + } + + if (nfp_netdev_is_nfp_repr(netdev)) { + struct nfp_repr *repr = netdev_priv(netdev); + + return repr->app; + } + + WARN(1, "Unknown netdev type for nfp_app\n"); + + return NULL; +} + const char *nfp_app_mip_name(struct nfp_app *app) { if (!app || !app->pf->mip) @@ -105,7 +125,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) return ERR_PTR(-EINVAL); } - if (WARN_ON(!apps[i]->name || !apps[i]->vnic_init)) + if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc)) return ERR_PTR(-EINVAL); app = kzalloc(sizeof(*app), GFP_KERNEL); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 5d714e10d9a9..af640b5c2108 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -42,7 +42,6 @@ struct bpf_prog; struct net_device; struct pci_dev; struct sk_buff; -struct tc_to_netdev; struct sk_buff; struct nfp_app; struct nfp_cpp; @@ -70,8 +69,10 @@ extern const struct nfp_app_type app_flower; * @init: perform basic app checks and init * @clean: clean app state * @extra_cap: extra capabilities string - * @vnic_init: init vNICs (assign port types, etc.) - * @vnic_clean: clean up app's vNIC state + * @vnic_alloc: allocate vNICs (assign port types, etc.) + * @vnic_free: free up app's vNIC state + * @vnic_init: vNIC netdev was registered + * @vnic_clean: vNIC netdev about to be unregistered * @repr_open: representor netdev open callback * @repr_stop: representor netdev stop callback * @start: start application logic @@ -96,8 +97,10 @@ struct nfp_app_type { const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn); - int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn, - unsigned int id); + int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn, + unsigned int id); + void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn); + int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn); void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn); int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); @@ -109,7 +112,7 @@ struct nfp_app_type { void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); @@ -158,10 +161,23 @@ static inline void nfp_app_clean(struct nfp_app *app) app->type->clean(app); } -static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn, - unsigned int id) +static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, + unsigned int id) { - return app->type->vnic_init(app, nn, id); + return app->type->vnic_alloc(app, nn, id); +} + +static inline void nfp_app_vnic_free(struct nfp_app *app, struct nfp_net *nn) +{ + if (app->type->vnic_free) + app->type->vnic_free(app, nn); +} + +static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn) +{ + if (!app->type->vnic_init) + return 0; + return app->type->vnic_init(app, nn); } static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn) @@ -238,12 +254,11 @@ static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, - struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { if (!app || !app->type->setup_tc) return -EOPNOTSUPP; - return app->type->setup_tc(app, netdev, handle, proto, tc); + return app->type->setup_tc(app, netdev, type, type_data); } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, @@ -295,6 +310,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) return app->type->repr_get(app, id); } +struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); + struct nfp_reprs * nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, struct nfp_reprs *reprs); @@ -308,7 +325,7 @@ void nfp_app_free(struct nfp_app *app); /* Callbacks shared between apps */ -int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn, - unsigned int id); +int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, + unsigned int id); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index 4e37c81f9eaf..2a2f2fbc8850 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -60,8 +60,8 @@ nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, return nn->port->type == NFP_PORT_INVALID; } -int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn, - unsigned int id) +int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, + unsigned int id) { int err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 3f199db2002e..f8fa63b66739 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -74,6 +74,45 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +static bool nfp_board_ready(struct nfp_pf *pf) +{ + const char *cp; + long state; + int err; + + cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); + if (!cp) + return false; + + err = kstrtol(cp, 0, &state); + if (err < 0) + return false; + + return state == 15; +} + +static int nfp_pf_board_state_wait(struct nfp_pf *pf) +{ + const unsigned long wait_until = jiffies + 10 * HZ; + + while (!nfp_board_ready(pf)) { + if (time_is_before_eq_jiffies(wait_until)) { + nfp_err(pf->cpp, "NFP board initialization timeout\n"); + return -EINVAL; + } + + nfp_info(pf->cpp, "waiting for board initialization\n"); + if (msleep_interruptible(500)) + return -ERESTARTSYS; + + /* Refresh cached information */ + kfree(pf->hwinfo); + pf->hwinfo = nfp_hwinfo_read(pf->cpp); + } + + return 0; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { int err; @@ -172,6 +211,21 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } +static const struct firmware * +nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) +{ + const struct firmware *fw = NULL; + int err; + + err = request_firmware_direct(&fw, name, &pdev->dev); + nfp_info(pf->cpp, " %s: %s\n", + name, err ? "not found" : "found, loading..."); + if (err) + return NULL; + + return fw; +} + /** * nfp_net_fw_find() - Find the correct firmware image for netdev mode * @pdev: PCI Device structure @@ -182,13 +236,32 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) static const struct firmware * nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) { - const struct firmware *fw = NULL; struct nfp_eth_table_port *port; + const struct firmware *fw; const char *fw_model; char fw_name[256]; - int spc, err = 0; - int i, j; + const u8 *serial; + u16 interface; + int spc, i, j; + nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); + + /* First try to find a firmware image specific for this device */ + interface = nfp_cpp_interface(pf->cpp); + nfp_cpp_serial(pf->cpp, &serial); + sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw", + serial, interface >> 8, interface & 0xff); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Then try the PCI name */ + sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -221,13 +294,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (spc <= 0) return NULL; - err = request_firmware(&fw, fw_name, &pdev->dev); - if (err) - return NULL; - - dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); - - return fw; + return nfp_net_fw_request(pdev, pf, fw_name); } /** @@ -284,6 +351,10 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); + if (err) + return err; + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); @@ -397,6 +468,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); + err = nfp_pf_board_state_wait(pf); + if (err) + goto err_hwinfo_free; + err = devlink_register(devlink, &pdev->dev); if (err) goto err_hwinfo_free; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 6922410806db..be0ee59f2eb9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -73,6 +73,8 @@ struct nfp_rtsym_table; * @mac_stats_mem: Pointer to mapped MAC stats area * @vf_cfg_bar: Pointer to the CPP area for the VF configuration BAR * @vf_cfg_mem: Pointer to mapped VF configuration area + * @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table + * @vfcfg_tbl2: Pointer to mapped VF config table * @irq_entries: Array of MSI-X entries for all vNICs * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled @@ -107,6 +109,8 @@ struct nfp_pf { u8 __iomem *mac_stats_mem; struct nfp_cpp_area *vf_cfg_bar; u8 __iomem *vf_cfg_mem; + struct nfp_cpp_area *vfcfg_tbl2_area; + u8 __iomem *vfcfg_tbl2; struct msix_entry *irq_entries; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index b1fa77bd708b..d51d8237b984 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -573,7 +573,6 @@ struct nfp_net_dp { * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs - * @ethtool_dump_flag: Ethtool dump flag * @vnic_list: Entry on device vNIC list * @pdev: Backpointer to PCI device * @app: APP handle if available @@ -640,7 +639,6 @@ struct nfp_net { u8 __iomem *rx_bar; struct dentry *debugfs_dir; - u32 ethtool_dump_flag; struct list_head vnic_list; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 66a09e490cf5..1c0187f0af51 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -71,6 +71,7 @@ #include "nfp_app.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" +#include "nfp_net_sriov.h" #include "nfp_port.h" /** @@ -990,7 +991,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) /* check for last gather fragment */ if (fidx == nr_frags - 1) - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; @@ -2659,6 +2660,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 2: Tell NFP */ nfp_net_clear_config_and_disable(nn); + nfp_port_configure(netdev, false); /* Step 3: Free resources */ @@ -2776,16 +2778,21 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_all; /* Step 2: Configure the NFP + * - Ifup the physical interface if it exists * - Enable rings from 0 to tx_rings/rx_rings - 1. * - Write MAC address (in case it changed) * - Set the MTU * - Set the Freelist buffer size * - Enable the FW */ - err = nfp_net_set_config_and_enable(nn); + err = nfp_port_configure(netdev, true); if (err) goto err_free_all; + err = nfp_net_set_config_and_enable(nn); + if (err) + goto err_port_disable; + /* Step 3: Enable for kernel * - put some freelist descriptors on each RX ring * - enable NAPI on each ring @@ -2796,6 +2803,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; +err_port_disable: + nfp_port_configure(netdev, false); err_free_all: nfp_net_close_free_all(nn); return err; @@ -3413,6 +3422,11 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, + .ndo_set_vf_mac = nfp_app_set_vf_mac, + .ndo_set_vf_vlan = nfp_app_set_vf_vlan, + .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, + .ndo_get_vf_config = nfp_app_get_vf_config, + .ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_setup_tc = nfp_port_setup_tc, .ndo_tx_timeout = nfp_net_tx_timeout, .ndo_set_rx_mode = nfp_net_set_rx_mode, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index e5e94e0746ec..b0a452ba9039 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -164,6 +164,7 @@ #define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */ #define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ #define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */ +#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */ #define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ #define NFP_NET_CFG_TXRS_ENABLE 0x0008 #define NFP_NET_CFG_RXRS_ENABLE 0x0010 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 40217ece5fcb..cf81cf95d1d8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -125,7 +125,6 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_desc *txd; int d_rd_p, d_wr_p, txd_cnt; - struct sk_buff *skb; struct nfp_net *nn; int i; @@ -158,13 +157,15 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) txd->vals[0], txd->vals[1], txd->vals[2], txd->vals[3]); - skb = READ_ONCE(tx_ring->txbufs[i].skb); - if (skb) { - if (tx_ring == r_vec->tx_ring) + if (tx_ring == r_vec->tx_ring) { + struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb); + + if (skb) seq_printf(file, " skb->head=%p skb->data=%p", skb->head, skb->data); - else - seq_printf(file, " frag=%p", skb); + } else { + seq_printf(file, " frag=%p", + READ_ONCE(tx_ring->txbufs[i].frag)); } if (tx_ring->txbufs[i].dma_addr) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6e31355c3567..07969f06df10 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -59,82 +59,129 @@ enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, }; -/* Support for stats. Returns netdev, driver, and device stats */ -enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; -struct _nfp_net_et_stats { +struct nfp_et_stat { char name[ETH_GSTRING_LEN]; - int type; - int sz; int off; }; -#define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \ - FIELD_SIZEOF(struct net_device_stats, m), \ - offsetof(struct net_device_stats, m) -/* For stats in the control BAR (other than Q stats) */ -#define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \ - sizeof(u64), \ - (m) -static const struct _nfp_net_et_stats nfp_net_et_stats[] = { - /* netdev stats */ - {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)}, - {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)}, - {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)}, - {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)}, - {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)}, - {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)}, - {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)}, - {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)}, - {"multicast", NN_ET_NETDEV_STAT(multicast)}, - {"collisions", NN_ET_NETDEV_STAT(collisions)}, - {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)}, - {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)}, - {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)}, - {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)}, - {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)}, - {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)}, - {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)}, - {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)}, +static const struct nfp_et_stat nfp_net_et_stats[] = { /* Stats from the device */ - {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)}, - {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)}, - {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)}, - {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)}, - {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)}, - {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)}, - {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)}, - {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)}, - {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)}, + { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS }, + { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS }, + { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS }, + { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS }, + { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS }, + { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS }, + { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES }, + { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES }, + { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES }, - {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)}, - {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)}, - {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)}, - {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)}, - {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)}, - {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)}, - {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)}, - {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)}, - {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)}, + { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS }, + { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS }, + { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS }, + { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS }, + { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS }, + { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS }, + { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES }, + { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES }, + { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES }, - {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)}, - {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)}, + { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES }, + { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES }, /* see comments in outro functions in nfp_bpf_jit.c to find out * how different BPF modes use app-specific counters */ - {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)}, - {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)}, - {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)}, - {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)}, - {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)}, - {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)}, + { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES }, + { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES }, + { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES }, + { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES }, + { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES }, + { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES }, +}; + +static const struct nfp_et_stat nfp_mac_et_stats[] = { + { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS, }, + { "rx_frame_too_long_errors", + NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, }, + { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, }, + { "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, }, + { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, }, + { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, }, + { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, }, + { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, }, + { "rx_pause_mac_ctrl_frames", + NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, }, + { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, }, + { "rx_frame_check_sequence_errors", + NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, }, + { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS, }, + { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS, }, + { "rx_pkts", NFP_MAC_STATS_RX_PKTS, }, + { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS, }, + { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS, }, + { "rx_pkts_65_to_127_octets", + NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, }, + { "rx_pkts_128_to_255_octets", + NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, }, + { "rx_pkts_256_to_511_octets", + NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, }, + { "rx_pkts_512_to_1023_octets", + NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, }, + { "rx_pkts_1024_to_1518_octets", + NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, }, + { "rx_pkts_1519_to_max_octets", + NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, }, + { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS, }, + { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS, }, + { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS, }, + { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, }, + { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, }, + { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, }, + { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, }, + { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, }, + { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, }, + { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, }, + { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, }, + { "rx_mac_ctrl_frames_received", + NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, }, + { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP, }, + { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP, }, + { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS, }, + { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, }, + { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS, }, + { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS, }, + { "tx_pause_mac_ctrl_frames", + NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, }, + { "tx_frames_transmitted_ok", + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, }, + { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS, }, + { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS, }, + { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS, }, + { "tx_pkts_65_to_127_octets", + NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, }, + { "tx_pkts_128_to_255_octets", + NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, }, + { "tx_pkts_256_to_511_octets", + NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, }, + { "tx_pkts_512_to_1023_octets", + NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, }, + { "tx_pkts_1024_to_1518_octets", + NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, }, + { "tx_pkts_1519_to_max_octets", + NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, }, + { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, }, + { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, }, + { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, }, + { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, }, + { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, }, + { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, }, + { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, }, + { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, }, }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) -#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3) +#define NN_ET_SWITCH_STATS_LEN 9 #define NN_ET_RVEC_GATHER_STATS 7 -#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2) -#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ - NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) { @@ -147,34 +194,53 @@ static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) if (IS_ERR(nsp)) return; - snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu", + snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu", nfp_nsp_get_abi_ver_major(nsp), nfp_nsp_get_abi_ver_minor(nsp)); nfp_nsp_close(nsp); } -static void nfp_net_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) +static void +nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev, + const char *vnic_version, struct ethtool_drvinfo *drvinfo) { char nsp_version[ETHTOOL_FWVERS_LEN] = {}; - struct nfp_net *nn = netdev_priv(netdev); - strlcpy(drvinfo->driver, nn->pdev->driver->name, - sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version)); - nfp_net_get_nspinfo(nn->app, nsp_version); + nfp_net_get_nspinfo(app, nsp_version); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d %s %s %s", + "%s %s %s %s", vnic_version, nsp_version, + nfp_app_mip_name(app), nfp_app_name(app)); +} + +static void +nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + char vnic_version[ETHTOOL_FWVERS_LEN] = {}; + struct nfp_net *nn = netdev_priv(netdev); + + snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d", nn->fw_ver.resv, nn->fw_ver.class, - nn->fw_ver.major, nn->fw_ver.minor, nsp_version, - nfp_app_mip_name(nn->app), nfp_app_name(nn->app)); + nn->fw_ver.major, nn->fw_ver.minor); strlcpy(drvinfo->bus_info, pci_name(nn->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = NN_ET_STATS_LEN; - drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; + nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo); +} + +static void +nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nfp_app *app; + + app = nfp_app_from_netdev(netdev); + if (!app) + return; + + nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } /** @@ -346,123 +412,214 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } +static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + return data + ETH_GSTRING_LEN; +} + +static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + + return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3; +} + +static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + for (i = 0; i < nn->dp.num_r_vecs; i++) { + data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_busy", i); + } + + data = nfp_pr_et(data, "hw_rx_csum_ok"); + data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); + data = nfp_pr_et(data, "hw_rx_csum_err"); + data = nfp_pr_et(data, "hw_tx_csum"); + data = nfp_pr_et(data, "hw_tx_inner_csum"); + data = nfp_pr_et(data, "tx_gather"); + data = nfp_pr_et(data, "tx_lso"); + + return data; +} + +static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) +{ + u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; + struct nfp_net *nn = netdev_priv(netdev); + u64 tmp[NN_ET_RVEC_GATHER_STATS]; + unsigned int i, j; + + for (i = 0; i < nn->dp.num_r_vecs; i++) { + unsigned int start; + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); + *data++ = nn->r_vecs[i].rx_pkts; + tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; + tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; + tmp[2] = nn->r_vecs[i].hw_csum_rx_error; + } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); + *data++ = nn->r_vecs[i].tx_pkts; + *data++ = nn->r_vecs[i].tx_busy; + tmp[3] = nn->r_vecs[i].hw_csum_tx; + tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; + tmp[5] = nn->r_vecs[i].tx_gather; + tmp[6] = nn->r_vecs[i].tx_lso; + } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); + + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + gathered_stats[j] += tmp[j]; + } + + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + *data++ = gathered_stats[j]; + + return data; +} + +static unsigned int +nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +{ + return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; +} + +static u8 * +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, + unsigned int tx_rings, bool repr) +{ + int swap_off, i; + + BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2); + /* If repr is true first add SWITCH_STATS_LEN and then subtract it + * effectively swapping the RX and TX statistics (giving us the RX + * and TX from perspective of the switch). + */ + swap_off = repr * NN_ET_SWITCH_STATS_LEN; + + for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name); + + for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name); + + for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i].name); + + for (i = 0; i < tx_rings; i++) { + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); + } + + for (i = 0; i < rx_rings; i++) { + data = nfp_pr_et(data, "rxq_%u_pkts", i); + data = nfp_pr_et(data, "rxq_%u_bytes", i); + } + + return data; +} + +static u64 * +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, + unsigned int rx_rings, unsigned int tx_rings) +{ + unsigned int i; + + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) + *data++ = readq(mem + nfp_net_et_stats[i].off); + + for (i = 0; i < tx_rings; i++) { + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); + } + + for (i = 0; i < rx_rings; i++) { + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + } + + return data; +} + +static unsigned int nfp_mac_get_stats_count(struct net_device *netdev) +{ + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return 0; + + return ARRAY_SIZE(nfp_mac_et_stats); +} + +static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data) +{ + struct nfp_port *port; + unsigned int i; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return data; + + for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) + data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name); + + return data; +} + +static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data) +{ + struct nfp_port *port; + unsigned int i; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return data; + + for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) + *data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off); + + return data; +} + static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct nfp_net *nn = netdev_priv(netdev); - u8 *p = data; - int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { - memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < nn->dp.num_r_vecs; i++) { - sprintf(p, "rvec_%u_rx_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rvec_%u_tx_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rvec_%u_tx_busy", i); - p += ETH_GSTRING_LEN; - } - strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "tx_gather", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "tx_lso", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - for (i = 0; i < nn->dp.num_tx_rings; i++) { - sprintf(p, "txq_%u_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "txq_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < nn->dp.num_rx_rings; i++) { - sprintf(p, "rxq_%u_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rxq_%u_bytes", i); - p += ETH_GSTRING_LEN; - } + data = nfp_vnic_get_sw_stats_strings(netdev, data); + data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, + nn->dp.num_tx_rings, + false); + data = nfp_mac_get_stats_strings(netdev, data); break; } } -static void nfp_net_get_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static void +nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) { - u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); - struct rtnl_link_stats64 *netdev_stats; - struct rtnl_link_stats64 temp = {}; - u64 tmp[NN_ET_RVEC_GATHER_STATS]; - u8 __iomem *io_p; - int i, j, k; - u8 *p; - netdev_stats = dev_get_stats(netdev, &temp); - - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { - switch (nfp_net_et_stats[i].type) { - case NETDEV_ET_STATS: - p = (char *)netdev_stats + nfp_net_et_stats[i].off; - data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? - *(u64 *)p : *(u32 *)p; - break; - - case NFP_NET_DEV_ET_STATS: - io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; - data[i] = readq(io_p); - break; - } - } - for (j = 0; j < nn->dp.num_r_vecs; j++) { - unsigned int start; - - do { - start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); - data[i++] = nn->r_vecs[j].rx_pkts; - tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; - tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; - tmp[2] = nn->r_vecs[j].hw_csum_rx_error; - } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); - - do { - start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); - data[i++] = nn->r_vecs[j].tx_pkts; - data[i++] = nn->r_vecs[j].tx_busy; - tmp[3] = nn->r_vecs[j].hw_csum_tx; - tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; - tmp[5] = nn->r_vecs[j].tx_gather; - tmp[6] = nn->r_vecs[j].tx_lso; - } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); - - for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) - gathered_stats[k] += tmp[k]; - } - for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) - data[i++] = gathered_stats[j]; - for (j = 0; j < nn->dp.num_tx_rings; j++) { - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j); - data[i++] = readq(io_p); - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; - data[i++] = readq(io_p); - } - for (j = 0; j < nn->dp.num_rx_rings; j++) { - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j); - data[i++] = readq(io_p); - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; - data[i++] = readq(io_p); - } + data = nfp_vnic_get_sw_stats(netdev, data); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, + nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_mac_get_stats(netdev, data); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -471,7 +628,54 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: - return NN_ET_STATS_LEN; + return nfp_vnic_get_sw_stats_count(netdev) + + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, + nn->dp.num_tx_rings) + + nfp_mac_get_stats_count(netdev); + default: + return -EOPNOTSUPP; + } +} + +static void nfp_port_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + switch (stringset) { + case ETH_SS_STATS: + if (nfp_port_is_vnic(port)) + data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + else + data = nfp_mac_get_stats_strings(netdev, data); + break; + } +} + +static void +nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + if (nfp_port_is_vnic(port)) + data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + else + data = nfp_mac_get_stats(netdev, data); +} + +static int nfp_port_get_sset_count(struct net_device *netdev, int sset) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + unsigned int count; + + switch (sset) { + case ETH_SS_STATS: + if (nfp_port_is_vnic(port)) + count = nfp_vnic_get_hw_stats_count(0, 0); + else + count = nfp_mac_get_stats_count(netdev); + return count; default: return -EOPNOTSUPP; } @@ -708,18 +912,18 @@ static int nfp_net_get_coalesce(struct net_device *netdev, /* Other debug dumps */ static int -nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) +nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer) { struct nfp_resource *res; int ret; - if (!nn->app) + if (!app) return -EOPNOTSUPP; dump->version = 1; dump->flag = NFP_DUMP_NSP_DIAG; - res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG); + res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG); if (IS_ERR(res)) return PTR_ERR(res); @@ -729,7 +933,7 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) goto exit_release; } - ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res), + ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res), nfp_resource_address(res), buffer, dump->len); if (ret != dump->len) @@ -746,32 +950,30 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) return ret; } -static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val) +static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val) { - struct nfp_net *nn = netdev_priv(netdev); + struct nfp_app *app = nfp_app_from_netdev(netdev); - if (!nn->app) + if (!app) return -EOPNOTSUPP; if (val->flag != NFP_DUMP_NSP_DIAG) return -EINVAL; - nn->ethtool_dump_flag = val->flag; - return 0; } static int -nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { - return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL); + return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL); } static int -nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, +nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { - return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer); + return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer); } static int nfp_net_set_coalesce(struct net_device *netdev, @@ -928,9 +1130,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_rxfh = nfp_net_set_rxfh, .get_regs_len = nfp_net_get_regs_len, .get_regs = nfp_net_get_regs, - .set_dump = nfp_net_set_dump, - .get_dump_flag = nfp_net_get_dump_flag, - .get_dump_data = nfp_net_get_dump_data, + .set_dump = nfp_app_set_dump, + .get_dump_flag = nfp_app_get_dump_flag, + .get_dump_data = nfp_app_get_dump_data, .get_coalesce = nfp_net_get_coalesce, .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, @@ -939,6 +1141,17 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_link_ksettings = nfp_net_set_link_ksettings, }; +const struct ethtool_ops nfp_port_ethtool_ops = { + .get_drvinfo = nfp_app_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = nfp_port_get_strings, + .get_ethtool_stats = nfp_port_get_stats, + .get_sset_count = nfp_port_get_sset_count, + .set_dump = nfp_app_set_dump, + .get_dump_flag = nfp_app_get_dump_flag, + .get_dump_data = nfp_app_get_dump_data, +}; + void nfp_net_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &nfp_net_ethtool_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 34b985384d26..ff373acd28f3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -57,29 +57,13 @@ #include "nfpcore/nfp6000_pcie.h" #include "nfp_app.h" #include "nfp_net_ctrl.h" +#include "nfp_net_sriov.h" #include "nfp_net.h" #include "nfp_main.h" #include "nfp_port.h" #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) -static int nfp_is_ready(struct nfp_pf *pf) -{ - const char *cp; - long state; - int err; - - cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); - if (!cp) - return 0; - - err = kstrtol(cp, 0, &state); - if (err < 0) - return 0; - - return state == 15; -} - /** * nfp_net_get_mac_addr() - Get the MAC address. * @pf: NFP PF handle @@ -160,6 +144,8 @@ nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) { + if (nfp_net_is_data_vnic(nn)) + nfp_app_vnic_free(pf->app, nn); nfp_port_free(nn->port); list_del(&nn->vnic_list); pf->num_vnics--; @@ -204,7 +190,7 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev, nn->stride_tx = stride; if (needs_netdev) { - err = nfp_app_vnic_init(pf->app, nn, id); + err = nfp_app_vnic_alloc(pf->app, nn, id); if (err) { nfp_net_free(nn); return ERR_PTR(err); @@ -242,8 +228,17 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) nfp_net_info(nn); + if (nfp_net_is_data_vnic(nn)) { + err = nfp_app_vnic_init(pf->app, nn); + if (err) + goto err_devlink_port_clean; + } + return 0; +err_devlink_port_clean: + if (nn->port) + nfp_devlink_port_unregister(nn->port); err_dfs_clean: nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_clean(nn); @@ -287,11 +282,12 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn) { + if (nfp_net_is_data_vnic(nn)) + nfp_app_vnic_clean(pf->app, nn); if (nn->port) nfp_devlink_port_unregister(nn->port); nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_clean(nn); - nfp_app_vnic_clean(pf->app, nn); } static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf) @@ -388,7 +384,7 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar); if (IS_ERR(ctrl_bar)) { - nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); err = PTR_ERR(ctrl_bar); goto err_app_clean; } @@ -456,10 +452,14 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) { int err; - err = nfp_app_start(pf->app, pf->ctrl_vnic); + err = nfp_net_pf_app_start_ctrl(pf); if (err) return err; + err = nfp_app_start(pf->app, pf->ctrl_vnic); + if (err) + goto err_ctrl_stop; + if (pf->num_vfs) { err = nfp_app_sriov_enable(pf->app, pf->num_vfs); if (err) @@ -470,6 +470,8 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) err_app_stop: nfp_app_stop(pf->app); +err_ctrl_stop: + nfp_net_pf_app_stop_ctrl(pf); return err; } @@ -478,10 +480,13 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf) if (pf->num_vfs) nfp_app_sriov_disable(pf->app); nfp_app_stop(pf->app); + nfp_net_pf_app_stop_ctrl(pf); } static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) { + if (pf->vfcfg_tbl2_area) + nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); if (pf->vf_cfg_bar) nfp_cpp_area_release_free(pf->vf_cfg_bar); if (pf->mac_stats_bar) @@ -497,7 +502,7 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; - mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0", + mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", min_size, &pf->data_vnic_bar); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); @@ -528,17 +533,32 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vf_cfg_mem = NULL; } + min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; + pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2", + "_pf%d_net_vf_cfg2", + min_size, &pf->vfcfg_tbl2_area); + if (IS_ERR(pf->vfcfg_tbl2)) { + if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { + err = PTR_ERR(pf->vfcfg_tbl2); + goto err_unmap_vf_cfg; + } + pf->vfcfg_tbl2 = NULL; + } + mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0, NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); err = PTR_ERR(mem); - goto err_unmap_vf_cfg; + goto err_unmap_vfcfg_tbl2; } return 0; +err_unmap_vfcfg_tbl2: + if (pf->vfcfg_tbl2_area) + nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); err_unmap_vf_cfg: if (pf->vf_cfg_bar) nfp_cpp_area_release_free(pf->vf_cfg_bar); @@ -552,7 +572,7 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) static void nfp_net_pci_remove_finish(struct nfp_pf *pf) { - nfp_net_pf_app_stop_ctrl(pf); + nfp_net_pf_app_stop(pf); /* stop app first, to avoid double free of ctrl vNIC's ddir */ nfp_net_debugfs_dir_clean(&pf->ddir); @@ -683,22 +703,15 @@ int nfp_net_pci_probe(struct nfp_pf *pf) { struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; - struct nfp_net *nn; int stride; int err; INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); - /* Verify that the board has completed initialization */ - if (!nfp_is_ready(pf)) { - nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); - return -EINVAL; - } - if (!pf->rtbl) { nfp_err(pf->cpp, "No %s, giving up.\n", pf->fw_loaded ? "symbol table" : "firmware found"); - return -EPROBE_DEFER; + return -EINVAL; } mutex_lock(&pf->lock); @@ -760,7 +773,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_free_vnics; - err = nfp_net_pf_app_start_ctrl(pf); + err = nfp_net_pf_app_start(pf); if (err) goto err_free_irqs; @@ -768,20 +781,12 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; - err = nfp_net_pf_app_start(pf); - if (err) - goto err_clean_vnics; - mutex_unlock(&pf->lock); return 0; -err_clean_vnics: - list_for_each_entry(nn, &pf->vnics, vnic_list) - if (nfp_net_is_data_vnic(nn)) - nfp_net_pf_clean_vnic(pf, nn); err_stop_app: - nfp_net_pf_app_stop_ctrl(pf); + nfp_net_pf_app_stop(pf); err_free_irqs: nfp_net_pf_free_irqs(pf); err_free_vnics: @@ -805,8 +810,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf) if (list_empty(&pf->vnics)) goto out; - nfp_net_pf_app_stop(pf); - list_for_each_entry(nn, &pf->vnics, vnic_list) if (nfp_net_is_data_vnic(nn)) nfp_net_pf_clean_vnic(pf, nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 8ec5474f4b18..d540a9dc77b3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -43,6 +43,7 @@ #include "nfp_main.h" #include "nfp_net_ctrl.h" #include "nfp_net_repr.h" +#include "nfp_net_sriov.h" #include "nfp_port.h" static void @@ -78,12 +79,10 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) } static void -nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port, +nfp_repr_phy_port_get_stats64(struct nfp_port *port, struct rtnl_link_stats64 *stats) { - u8 __iomem *mem; - - mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE; + u8 __iomem *mem = port->eth_stats; /* TX and RX stats are flipped as we are returning the stats as seen * at the switch port corresponding to the phys port. @@ -98,67 +97,38 @@ nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port, } static void -nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf, - struct rtnl_link_stats64 *stats) +nfp_repr_vnic_get_stats64(struct nfp_port *port, + struct rtnl_link_stats64 *stats) { - u8 __iomem *mem; - - mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ; - /* TX and RX stats are flipped as we are returning the stats as seen * at the switch port corresponding to the VF. */ - stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES); - stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS); - stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS); + stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES); + stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS); + stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS); - stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES); - stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS); - stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS); -} - -static void -nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf, - struct rtnl_link_stats64 *stats) -{ - u8 __iomem *mem; - - if (pf) - return; - - mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar); - - stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES); - stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS); - stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS); - - stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES); - stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS); - stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS); + stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES); + stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS); + stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS); } static void nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_repr *repr = netdev_priv(netdev); - struct nfp_eth_table_port *eth_port; - struct nfp_app *app = repr->app; if (WARN_ON(!repr->port)) return; switch (repr->port->type) { case NFP_PORT_PHYS_PORT: - eth_port = __nfp_port_get_eth_port(repr->port); - if (!eth_port) + if (!__nfp_port_get_eth_port(repr->port)) break; - nfp_repr_phy_port_get_stats64(app, eth_port->index, stats); + nfp_repr_phy_port_get_stats64(repr->port, stats); break; case NFP_PORT_PF_PORT: - nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats); - break; case NFP_PORT_VF_PORT: - nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats); + nfp_repr_vnic_get_stats64(repr->port, stats); default: break; } @@ -239,15 +209,34 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) static int nfp_repr_stop(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; - return nfp_app_repr_stop(repr->app, repr); + err = nfp_app_repr_stop(repr->app, repr); + if (err) + return err; + + nfp_port_configure(netdev, false); + return 0; } static int nfp_repr_open(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; - return nfp_app_repr_open(repr->app, repr); + err = nfp_port_configure(netdev, true); + if (err) + return err; + + err = nfp_app_repr_open(repr->app, repr); + if (err) + goto err_port_disable; + + return 0; + +err_port_disable: + nfp_port_configure(netdev, false); + return err; } const struct net_device_ops nfp_repr_netdev_ops = { @@ -259,6 +248,11 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_get_offload_stats = nfp_repr_get_offload_stats, .ndo_get_phys_port_name = nfp_port_get_phys_port_name, .ndo_setup_tc = nfp_port_setup_tc, + .ndo_set_vf_mac = nfp_app_set_vf_mac, + .ndo_set_vf_vlan = nfp_app_set_vf_vlan, + .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, + .ndo_get_vf_config = nfp_app_get_vf_config, + .ndo_set_vf_link_state = nfp_app_set_vf_link_state, }; static void nfp_repr_clean(struct nfp_repr *repr) @@ -301,6 +295,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, repr->dst->u.port_info.lower_dev = pf_netdev; netdev->netdev_ops = &nfp_repr_netdev_ops; + netdev->ethtool_ops = &nfp_port_ethtool_ops; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c new file mode 100644 index 000000000000..e6d2e06b050c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "nfpcore/nfp_cpp.h" +#include "nfp_app.h" +#include "nfp_main.h" +#include "nfp_net_ctrl.h" +#include "nfp_net.h" +#include "nfp_net_sriov.h" + +static int +nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) +{ + u16 cap_vf; + + if (!app || !app->pf->vfcfg_tbl2) + return -EOPNOTSUPP; + + cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); + if ((cap_vf & cap) != cap) { + nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); + return -EOPNOTSUPP; + } + + if (vf < 0 || vf >= app->pf->num_vfs) { + nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); + return -EINVAL; + } + + return 0; +} + +static int +nfp_net_sriov_update(struct nfp_app *app, int vf, u16 update, const char *msg) +{ + struct nfp_net *nn; + int ret; + + /* Write update info to mailbox in VF config symbol */ + writeb(vf, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_NUM); + writew(update, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_UPD); + + nn = list_first_entry(&app->pf->vnics, struct nfp_net, vnic_list); + /* Signal VF reconfiguration */ + ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VF); + if (ret) + return ret; + + ret = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_RET); + if (ret) + nfp_warn(app->pf->cpp, + "FW refused VF %s update with errno: %d\n", msg, ret); + return -ret; +} + +int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); + if (err) + return err; + + if (is_multicast_ether_addr(mac)) { + nfp_warn(app->pf->cpp, + "invalid Ethernet address %pM for VF id %d\n", + mac, vf); + return -EINVAL; + } + + /* Write MAC to VF entry in VF config symbol */ + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + writel(get_unaligned_be32(mac), app->pf->vfcfg_tbl2 + vf_offset); + writew(get_unaligned_be16(mac + 4), + app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); +} + +int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + u16 vlan_tci; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); + if (err) + return err; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + if (vlan > 4095 || qos > 7) { + nfp_warn(app->pf->cpp, + "invalid vlan id or qos for VF id %d\n", vf); + return -EINVAL; + } + + /* Write VLAN tag to VF entry in VF config symbol */ + vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) | + FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos); + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN, + "vlan"); +} + +int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + u8 vf_ctrl; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, + "spoofchk"); + if (err) + return err; + + /* Write spoof check control bit to VF entry in VF config symbol */ + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + + NFP_NET_VF_CFG_CTRL; + vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); + vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_SPOOF; + vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_SPOOF, enable); + writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_SPOOF, + "spoofchk"); +} + +int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, + int link_state) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + u8 vf_ctrl; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, + "link_state"); + if (err) + return err; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + case IFLA_VF_LINK_STATE_ENABLE: + case IFLA_VF_LINK_STATE_DISABLE: + break; + default: + return -EINVAL; + } + + /* Write link state to VF entry in VF config symbol */ + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + + NFP_NET_VF_CFG_CTRL; + vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); + vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_LINK_STATE; + vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_LINK_STATE, link_state); + writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_LINK_STATE, + "link state"); +} + +int nfp_app_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + u16 vlan_tci; + u32 mac_hi; + u16 mac_lo; + u8 flags; + int err; + + err = nfp_net_sriov_check(app, vf, 0, ""); + if (err) + return err; + + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + + mac_hi = readl(app->pf->vfcfg_tbl2 + vf_offset); + mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); + + flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL); + vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vf; + + put_unaligned_be32(mac_hi, &ivi->mac[0]); + put_unaligned_be16(mac_lo, &ivi->mac[4]); + + ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci); + ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci); + + ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); + ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h new file mode 100644 index 000000000000..e9df9d1eab8e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _NFP_NET_SRIOV_H_ +#define _NFP_NET_SRIOV_H_ + +/** + * SRIOV VF configuration. + * The configuration memory begins with a mailbox region for communication with + * the firmware followed by individual VF entries. + */ +#define NFP_NET_VF_CFG_SZ 16 +#define NFP_NET_VF_CFG_MB_SZ 16 + +/* VF config mailbox */ +#define NFP_NET_VF_CFG_MB 0x0 +#define NFP_NET_VF_CFG_MB_CAP 0x0 +#define NFP_NET_VF_CFG_MB_CAP_MAC (0x1 << 0) +#define NFP_NET_VF_CFG_MB_CAP_VLAN (0x1 << 1) +#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2) +#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3) +#define NFP_NET_VF_CFG_MB_RET 0x2 +#define NFP_NET_VF_CFG_MB_UPD 0x4 +#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) +#define NFP_NET_VF_CFG_MB_UPD_VLAN (0x1 << 1) +#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2) +#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3) +#define NFP_NET_VF_CFG_MB_VF_NUM 0x7 + +/* VF config entry + * MAC_LO is set that the MAC address can be read in a single 6 byte read + * by the NFP + */ +#define NFP_NET_VF_CFG_MAC 0x0 +#define NFP_NET_VF_CFG_MAC_HI 0x0 +#define NFP_NET_VF_CFG_MAC_LO 0x6 +#define NFP_NET_VF_CFG_CTRL 0x4 +#define NFP_NET_VF_CFG_CTRL_SPOOF 0x4 +#define NFP_NET_VF_CFG_CTRL_LINK_STATE 0x3 +#define NFP_NET_VF_CFG_LS_MODE_AUTO 0 +#define NFP_NET_VF_CFG_LS_MODE_ENABLE 1 +#define NFP_NET_VF_CFG_LS_MODE_DISABLE 2 +#define NFP_NET_VF_CFG_VLAN 0x8 +#define NFP_NET_VF_CFG_VLAN_QOS 0xe000 +#define NFP_NET_VF_CFG_VLAN_VID 0x0fff + +int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); +int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, + int link_state); +int nfp_app_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); + +#endif /* _NFP_NET_SRIOV_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index e42644dbb865..34a6e035fe9a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -88,19 +88,16 @@ const struct switchdev_ops nfp_port_switchdev_ops = { .switchdev_port_attr_get = nfp_port_attr_get, }; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { struct nfp_port *port; - if (chain_index) - return -EOPNOTSUPP; - port = nfp_port_from_netdev(netdev); if (!port) return -EOPNOTSUPP; - return nfp_app_setup_tc(port->app, netdev, handle, proto, tc); + return nfp_app_setup_tc(port->app, netdev, type, type_data); } struct nfp_port * @@ -181,6 +178,33 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) return 0; } +/** + * nfp_port_configure() - helper to set the interface configured bit + * @netdev: net_device instance + * @configed: Desired state + * + * Helper to set the ifup/ifdown state on the PHY only if there is a physical + * interface associated with the netdev. + * + * Return: + * 0 - configuration successful (or no change); + * -ERRNO - configuration failed. + */ +int nfp_port_configure(struct net_device *netdev, bool configed) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + int err; + + port = nfp_port_from_netdev(netdev); + eth_port = __nfp_port_get_eth_port(port); + if (!eth_port) + return 0; + + err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); + return err < 0 && err != -EOPNOTSUPP ? err : 0; +} + int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_port *port, unsigned int id) { @@ -201,6 +225,9 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; + if (pf->mac_stats_mem) + port->eth_stats = + pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index a33d22e18f94..51dcb9c603ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -36,7 +36,6 @@ #include -struct tc_to_netdev; struct net_device; struct nfp_app; struct nfp_pf; @@ -77,8 +76,10 @@ enum nfp_port_flags { * @dl_port: devlink port structure * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry + * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id + * @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory * @port_list: entry on pf's list of ports */ struct nfp_port { @@ -96,21 +97,29 @@ struct nfp_port { struct { unsigned int eth_id; struct nfp_eth_table_port *eth_port; + u8 __iomem *eth_stats; }; /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */ struct { unsigned int pf_id; unsigned int vf_id; + u8 __iomem *vnic; }; }; struct list_head port_list; }; +extern const struct ethtool_ops nfp_port_ethtool_ops; extern const struct switchdev_ops nfp_port_switchdev_ops; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data); + +static inline bool nfp_port_is_vnic(const struct nfp_port *port) +{ + return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT; +} struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * @@ -120,6 +129,7 @@ struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port); int nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len); +int nfp_port_configure(struct net_device *netdev, bool configed); struct nfp_port * nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type, @@ -144,31 +154,32 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_SIZE 0x0200 #define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000) + /* unused 0x008 */ #define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010) #define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018) #define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020) #define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028) #define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030) -#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038) +#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038) #define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040) #define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048) #define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050) #define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058) #define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060) #define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068) -#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070) -#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078) -#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080) -#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088) -#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090) -#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098) -#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0) -#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8) +#define NFP_MAC_STATS_RX_PKTS (NFP_MAC_STATS_BASE + 0x070) +#define NFP_MAC_STATS_RX_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078) +#define NFP_MAC_STATS_RX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080) +#define NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088) +#define NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090) +#define NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098) +#define NFP_MAC_STATS_RX_JABBERS (NFP_MAC_STATS_BASE + 0x0a0) +#define NFP_MAC_STATS_RX_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8) -#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0) -#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8) -#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0) +#define NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0) +#define NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8) +#define NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0) #define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8) @@ -178,9 +189,12 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108) #define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110) #define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118) - + /* unused 0x120 */ + /* unused 0x128 */ + /* unused 0x130 */ #define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138) #define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140) + /* unused 0x148 */ #define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150) #define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158) #define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160) @@ -192,8 +206,16 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190) #define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198) #define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0) -#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8) -#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0) -#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8) +#define NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x1a8) +#define NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0) +#define NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x1c0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x1c8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x1d0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x1d8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x1e0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x1e8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x1f0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x1f8) #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 1a8d04a1e113..3ce51f03126f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -97,6 +97,8 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); void nfp_resource_release(struct nfp_resource *res); +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); + u32 nfp_resource_cpp_id(struct nfp_resource *res); const char *nfp_resource_name(struct nfp_resource *res); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index c2bc36e8649f..f6f7c085f8e0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -391,7 +391,10 @@ int nfp_eth_config_commit_end(struct nfp_nsp *nsp) * Enable or disable PHY module (this usually means setting the TX lanes * disable bits). * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) { @@ -427,7 +430,10 @@ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) * * Set the ifup/ifdown state on the PHY. * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) { @@ -439,6 +445,14 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) if (IS_ERR(nsp)) return PTR_ERR(nsp); + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + entries = nfp_nsp_config_entries(nsp); /* Check if we are already in requested state */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 072612263dab..b1dd13ff282b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -249,6 +249,51 @@ void nfp_resource_release(struct nfp_resource *res) kfree(res); } +/** + * nfp_resource_wait() - Wait for resource to appear + * @cpp: NFP CPP handle + * @name: Name of the resource + * @secs: Number of seconds to wait + * + * Wait for resource to appear in the resource table, grab and release + * its lock. The wait is jiffies-based, don't expect fine granularity. + * + * Return: 0 on success, errno otherwise. + */ +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + secs * HZ; + struct nfp_resource *res; + + while (true) { + res = nfp_resource_acquire(cpp, name); + if (!IS_ERR(res)) { + nfp_resource_release(res); + return 0; + } + + if (PTR_ERR(res) != -ENOENT) { + nfp_err(cpp, "error waiting for resource %s: %ld\n", + name, PTR_ERR(res)); + return PTR_ERR(res); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(cpp, "timeout waiting for resource %s\n", name); + return -ETIMEDOUT; + } + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_info(cpp, "waiting for NFP resource %s\n", name); + } + if (msleep_interruptible(10)) { + nfp_err(cpp, "wait for resource %s interrupted\n", + name); + return -ERESTARTSYS; + } + } +} + /** * nfp_resource_cpp_id() - Return the cpp_id of a resource handle * @res: NFP Resource handle diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index 520684242b7d..d5b587fccaa3 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -49,10 +49,22 @@ static int nfp_nic_init(struct nfp_app *app) return 0; } +static int nfp_nic_sriov_enable(struct nfp_app *app, int num_vfs) +{ + return 0; +} + +static void nfp_nic_sriov_disable(struct nfp_app *app) +{ +} + const struct nfp_app_type app_nic = { .id = NFP_APP_CORE_NIC, .name = "nic", .init = nfp_nic_init, - .vnic_init = nfp_app_nic_vnic_init, + .vnic_alloc = nfp_app_nic_vnic_alloc, + + .sriov_enable = nfp_nic_sriov_enable, + .sriov_disable = nfp_nic_sriov_disable, }; diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 89ab786da25f..4a67c55aa9f1 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index aa912f43e15f..994a83a1f0a5 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5629,9 +5629,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); - init_timer_deferrable(&np->stats_poll); - np->stats_poll.data = (unsigned long) dev; - np->stats_poll.function = nv_do_stats_poll; /* timer handler */ + setup_deferrable_timer(&np->stats_poll, nv_do_stats_poll, + (unsigned long)dev); err = pci_enable_device(pci_dev); if (err) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 827de838389f..f2e8de607119 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2828,7 +2828,7 @@ netxen_show_bridged_mode(struct device *dev, return sprintf(buf, "%d\n", bridged_mode); } -static struct device_attribute dev_attr_bridged_mode = { +static const struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_bridged_mode, .store = netxen_store_bridged_mode, @@ -2860,7 +2860,7 @@ netxen_show_diag_mode(struct device *dev, !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); } -static struct device_attribute dev_attr_diag_mode = { +static const struct device_attribute dev_attr_diag_mode = { .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_diag_mode, .store = netxen_store_diag_mode, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index eaca4578435d..8f6ccc0c39e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1244,7 +1244,6 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, if (!dcbx_info) return -ENOMEM; - memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); if (rc) { kfree(dcbx_info); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6c87bed13bd2..58a689fb04db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1684,6 +1684,8 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); p_hwfn->first_on_engine = (load_code == @@ -2472,6 +2474,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -2534,6 +2537,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; + p_caps = &p_hwfn->mcp_info->link_capabilities; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); link_temp = qed_rd(p_hwfn, p_ptt, @@ -2588,10 +2592,45 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", - link->speed.forced_speed, link->speed.advertised_speeds, - link->speed.autoneg, link->pause.autoneg); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + + offsetof(struct nvm_cfg1_port, ext_phy)); + link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; + link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; + p_caps->default_eee = QED_MCP_EEE_ENABLED; + link->eee.enable = true; + switch (link_temp) { + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: + p_caps->default_eee = QED_MCP_EEE_DISABLED; + link->eee.enable = false; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: + p_caps->eee_lpi_timer = + EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; + break; + } + + link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; + link->eee.tx_lpi_enable = link->eee.enable; + link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; + } else { + p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", + link->speed.forced_speed, + link->speed.advertised_speeds, + link->speed.autoneg, + link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer); /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -2751,6 +2790,27 @@ static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_hw_info_port_num_ah(p_hwfn, p_ptt); } +static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_capabilities *p_caps; + u32 eee_status; + + p_caps = &p_hwfn->mcp_info->link_capabilities; + if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) + return; + + p_caps->eee_speed_caps = 0; + eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> + EEE_SUPPORTED_SPEED_OFFSET; + + if (eee_status & EEE_1G_SUPPORTED) + p_caps->eee_speed_caps |= QED_EEE_1G_ADV; + if (eee_status & EEE_10G_ADV) + p_caps->eee_speed_caps |= QED_EEE_10G_ADV; +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2767,6 +2827,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_hw_info_port_num(p_hwfn, p_ptt); + qed_mcp_get_capabilities(p_hwfn, p_ptt); + qed_hw_get_nvm_info(p_hwfn, p_ptt); rc = qed_int_igu_read_cam(p_hwfn, p_ptt); @@ -2785,6 +2847,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->func_info.ovlan; qed_mcp_cmd_port_init(p_hwfn, p_ptt); + + qed_get_eee_caps(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { @@ -3630,7 +3694,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } p_coal_timeset = p_eth_qzone; - memset(p_coal_timeset, 0, eth_qzone_size); + memset(p_eth_qzone, 0, eth_qzone_size); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); @@ -3638,12 +3702,46 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) +{ + struct qed_queue_cid *p_cid = p_handle; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0; + + p_hwfn = p_cid->p_owner; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (rx_coal) { + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + } + + if (tx_coal) { + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + } +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct ustorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3660,32 +3758,29 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, false); if (rc) goto out; - address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); if (rc) goto out; - p_hwfn->cdev->rx_coalesce_usecs = coalesce; out: return rc; } -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct xstorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3702,22 +3797,16 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, true); if (rc) goto out; - address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); - if (rc) - goto out; - - p_hwfn->cdev->tx_coalesce_usecs = coalesce; out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 1f1df1bf127c..defdda1ffaa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -443,38 +443,35 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue - * The fact that we can configure coalescing to up to 511, but on varying - * accuracy [the bigger the value the less accurate] up to a mistake of 3usec - * for the highest values. + * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. * * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id + * @param p_coal - store coalesce value read from the hardware. + * @param p_handle * * @return int - */ -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); + **/ +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue - * While the API allows setting coalescing per-qid, all tx queues sharing a - * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] - * otherwise configuration would break. + * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * Tx queue. The fact that we can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. * - * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id + * + * @param rx_coal - Rx Coalesce value in micro seconds. + * @param tx_coal - TX Coalesce value in micro seconds. + * @param p_handle * * @return int - */ -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); + **/ +int +qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 31fb0bffa098..3427fe7049b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -10825,6 +10825,17 @@ struct eth_phy_cfg { #define ETH_LOOPBACK_EXT (3) #define ETH_LOOPBACK_MAC (4) + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) +#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) + u32 feature_config_flags; #define ETH_EEE_MODE_ADV_LPI (1 << 0) }; @@ -11242,6 +11253,25 @@ struct public_port { u32 wol_pkt_len; u32 wol_pkt_details; struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 }; struct public_func { @@ -11570,6 +11600,9 @@ struct public_drv_mb { #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000 +#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 +#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -11653,6 +11686,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000 @@ -11696,6 +11733,9 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 +/* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 + #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) u32 drv_pulse_mb; @@ -11891,7 +11931,16 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 u32 phy_cfg; u32 mgmt_traffic; + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + u32 mba_cfg1; u32 mba_cfg2; u32 vf_cfg; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 0ba5ec8a9814..085338990f49 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_ptt *p_ptt; + int rc = 0; + + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + + return rc; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (p_cid->b_is_rx) { + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } else { + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, return rc; } +static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_hwfn *p_hwfn; + int rc; + + p_hwfn = p_cid->p_owner; + rc = qed_get_queue_coalesce(p_hwfn, coal, handle); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + + return rc; +} + static int qed_fp_cqe_completion(struct qed_dev *dev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { @@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .tunn_config = &qed_tunn_configure, .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, + .get_coalesce = &qed_get_coalesce, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index f8f09aadced7..cc1f248551c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -400,4 +400,20 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, u8 qed_mcast_bin_from_mac(u8 *mac); -#endif /* _QED_L2_H */ +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b11399606990..27832885a87f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -954,9 +954,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_tunnel_info tunn_info; const u8 *data = NULL; struct qed_hwfn *hwfn; -#ifdef CONFIG_RFS_ACCEL struct qed_ptt *p_ptt; -#endif int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -972,7 +970,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) { p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (p_ptt) { @@ -983,7 +980,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } } -#endif } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -1091,12 +1087,10 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (IS_PF(cdev)) release_firmware(cdev->firmware); -#ifdef CONFIG_RFS_ACCEL if (IS_PF(cdev) && (cdev->num_hwfns == 1) && QED_LEADING_HWFN(cdev)->p_arfs_ptt) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_iov_wq_stop(cdev, false); @@ -1111,11 +1105,9 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); @@ -1305,6 +1297,10 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) } } + if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) + memcpy(&link_params->eee, ¶ms->eee, + sizeof(link_params->eee)); + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); @@ -1491,6 +1487,21 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) if_link->lp_caps |= QED_LM_Asym_Pause_BIT; + + if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { + if_link->eee_supported = false; + } else { + if_link->eee_supported = true; + if_link->eee_active = link.eee_active; + if_link->sup_caps = link_caps.eee_speed_caps; + /* MFW clears adv_caps on eee disable; use configured value */ + if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : + params.eee.adv_caps; + if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; + if_link->eee.enable = params.eee.enable; + if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; + if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; + } } static void qed_get_current_link(struct qed_dev *cdev, @@ -1557,36 +1568,10 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return rc; } -static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) -{ - *rx_coal = cdev->rx_coalesce_usecs; - *tx_coal = cdev->tx_coalesce_usecs; -} - static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, - u16 qid, u16 sb_id) + void *handle) { - struct qed_hwfn *hwfn; - struct qed_ptt *ptt; - int hwfn_index; - int status = 0; - - hwfn_index = qid % cdev->num_hwfns; - hwfn = &cdev->hwfns[hwfn_index]; - ptt = qed_ptt_acquire(hwfn); - if (!ptt) - return -EAGAIN; - - status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, - qid / cdev->num_hwfns, sb_id); - if (status) - goto out; - status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, - qid / cdev->num_hwfns, sb_id); -out: - qed_ptt_release(hwfn, ptt); - - return status; + return qed_set_queue_coalesce(rx_coal, tx_coal, handle); } static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) @@ -1735,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = { .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, .nvm_get_image = &qed_nvm_get_image, - .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, .update_drv_state = &qed_update_drv_state, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 3eb241657368..376485d99357 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1097,6 +1097,31 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); } +static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link) +{ + u32 eee_status, val; + + p_link->eee_adv_caps = 0; + p_link->eee_lp_adv_caps = 0; + eee_status = qed_rd(p_hwfn, + p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_adv_caps |= QED_EEE_10G_ADV; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1228,6 +1253,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + qed_link_update(p_hwfn); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1251,6 +1279,19 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + if (params->eee.enable) + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & QED_EEE_1G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; + if (params->eee.adv_caps & QED_EEE_10G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; + phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << + EEE_TX_TIMER_USEC_OFFSET) & + EEE_TX_TIMER_USEC_MASK; + } p_hwfn->b_drv_link_init = b_up; @@ -2822,3 +2863,28 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, p_unlock->resource = resource; } } + +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, + 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); + if (!rc) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), + "MFW supported features: %08x\n", + p_hwfn->mcp_info->capabilities); + + return rc; +} + +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param, features; + + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, + features, &mcp_resp, &mcp_param); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index af03b3651411..c7ec2395d1ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -53,15 +53,25 @@ struct qed_mcp_link_pause_params { bool forced_tx; }; +enum qed_mcp_eee_mode { + QED_MCP_EEE_DISABLED, + QED_MCP_EEE_ENABLED, + QED_MCP_EEE_UNSUPPORTED +}; + struct qed_mcp_link_params { - struct qed_mcp_link_speed_params speed; - struct qed_mcp_link_pause_params pause; - u32 loopback_mode; + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_mcp_link_capabilities { u32 speed_capabilities; bool default_speed_autoneg; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; }; struct qed_mcp_link_state { @@ -102,6 +112,9 @@ struct qed_mcp_link_state { u8 partner_adv_pause; bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; }; struct qed_mcp_function_info { @@ -546,6 +559,9 @@ struct qed_mcp_info { u8 *mfw_mb_shadow; u16 mfw_mb_length; u32 mcp_hist; + + /* Capabilties negotiated with the MFW */ + u32 capabilities; }; struct qed_mcp_mb_params { @@ -925,5 +941,20 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); +/** + * @brief Learn of supported MFW features; To be done during early init + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Inform MFW of set of features supported by driver. Should be done + * inside the content of the LOAD_REQ. + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 2cfd3bd9a031..3f40b1de7957 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3400,6 +3400,157 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, length, status); } +static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + u16 coal = 0, qid, i; + bool b_is_rx; + int rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + } else { + if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((!p_queue->cids[i].p_cid) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_coalesce *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_queue_cid *p_cid; + u16 rx_coal, tx_coal; + int rc = 0, i; + u16 qid; + + req = &mbx->req_virt->update_coalesce; + + rx_coal = req->rx_coal; + tx_coal = req->tx_coal; + qid = req->qid; + + if (!qed_iov_validate_rxq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!qed_iov_validate_txq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + if (tx_coal) { + struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (!p_queue->cids[i].p_cid) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(struct pfvf_def_resp_tlv), status); +} static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3725,6 +3876,12 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_UPDATE_TUNN_PARAM: qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_COALESCE_UPDATE: + qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_READ: + qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c2e44bce398c..3955929ba892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -217,6 +217,9 @@ struct qed_vf_info { u8 num_rxqs; u8 num_txqs; + u16 rx_coal; + u16 tx_coal; + u8 num_sbs; u8 num_mac_filters; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 1926d1ed439f..91b5e9f02a62 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1343,6 +1343,81 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) return rc; } +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_read_coal_resp_tlv *resp; + struct vfpf_read_coal_req_tlv *req; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); + req->qid = p_cid->rel.queue_id; + req->is_rx = p_cid->b_is_rx ? 1 : 0; + + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + resp = &p_iov->pf2vf_reply->read_coal_resp; + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + *p_coal = resp->coal; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int +qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_coalesce *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); + + req->rx_coal = rx_coal; + req->tx_coal = tx_coal; + req->qid = p_cid->rel.queue_id; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", + rx_coal, tx_coal, req->qid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + if (rx_coal) + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + + if (tx_coal) + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 34d9b882a780..97d44dfb38ca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -497,6 +497,27 @@ struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; +struct vfpf_update_coalesce { + struct vfpf_first_tlv first_tlv; + u16 rx_coal; + u16 tx_coal; + u16 qid; + u8 padding[2]; +}; + +struct vfpf_read_coal_req_tlv { + struct vfpf_first_tlv first_tlv; + u16 qid; + u8 is_rx; + u8 padding[5]; +}; + +struct pfvf_read_coal_resp_tlv { + struct pfvf_tlv hdr; + u16 coal; + u8 padding[6]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -509,7 +530,8 @@ union vfpf_tlvs { struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_update_tunn_param_tlv tunn_param_update; - struct channel_list_end_tlv list_end; + struct vfpf_update_coalesce update_coalesce; + struct vfpf_read_coal_req_tlv read_coal_req; struct tlv_buffer_size tlv_buf_size; }; @@ -519,6 +541,7 @@ union pfvf_tlvs { struct tlv_buffer_size tlv_buf_size; struct pfvf_start_queue_resp_tlv queue_start; struct pfvf_update_tunn_param_tlv tunn_param_resp; + struct pfvf_read_coal_resp_tlv read_coal_resp; }; enum qed_bulletin_bit { @@ -624,8 +647,9 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, - CHANNEL_TLV_RESERVED, + CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, + CHANNEL_TLV_COALESCE_READ, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -677,6 +701,31 @@ struct qed_vf_iov { bool b_doorbell_bar; }; +/** + * @brief VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the configuration. + * + * @param p_hwfn + * @param rx_coal - coalesce value in micro second for rx queue + * @param tx_coal - coalesce value in micro second for tx queue + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, + u16 tx_coal, struct qed_queue_cid *p_cid); + +/** + * @brief VF - Get coalesce per VF's relative queue. + * + * @param p_hwfn + * @param p_coal - coalesce value in micro second for VF queues. + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid); + #ifdef CONFIG_QED_SRIOV /** * @brief Read the VF bulletin and act on it if needed diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 4dfb238221f9..adb700512baa 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -160,6 +160,8 @@ struct qede_rdma_dev { struct qede_ptp; +#define QEDE_RFS_MAX_FLTR 256 + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -241,9 +243,7 @@ struct qede_dev { u16 vxlan_dst_port; u16 geneve_dst_port; -#ifdef CONFIG_RFS_ACCEL struct qede_arfs *arfs; -#endif bool wol_enabled; struct qede_rdma_dev rdma_info; @@ -447,16 +447,21 @@ struct qede_fastpath { #ifdef CONFIG_RFS_ACCEL int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); +#define QEDE_SP_ARFS_CONFIG 4 +#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) +#endif + void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); void qede_free_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev); - -#define QEDE_SP_ARFS_CONFIG 4 -#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) -#define QEDE_RFS_MAX_FLTR 256 -#endif +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs); +int qede_get_arfs_filter_count(struct qede_dev *edev); struct qede_reload_args { void (*func)(struct qede_dev *edev, struct qede_reload_args *args); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6a03d3e66cff..dae741270022 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -702,24 +702,62 @@ static u32 qede_get_link(struct net_device *dev) static int qede_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { + void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); - u16 rxc, txc; + u16 rx_coal, tx_coal, i, rc = 0; + struct qede_fastpath *fp; + + rx_coal = QED_DEFAULT_RX_USECS; + tx_coal = QED_DEFAULT_TX_USECS; memset(coal, 0, sizeof(struct ethtool_coalesce)); - edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); - coal->rx_coalesce_usecs = rxc; - coal->tx_coalesce_usecs = txc; + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + for_each_queue(i) { + fp = &edev->fp_array[i]; - return 0; + if (fp->type & QEDE_FASTPATH_RX) { + rx_handle = fp->rxq->handle; + break; + } + } + + rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); + if (rc) { + DP_INFO(edev, "Read Rx coalesce error\n"); + goto out; + } + + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + tx_handle = fp->txq->handle; + break; + } + } + + rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); + if (rc) + DP_INFO(edev, "Read Tx coalesce error\n"); + } + +out: + __qede_unlock(edev); + + coal->rx_coalesce_usecs = rx_coal; + coal->tx_coalesce_usecs = tx_coal; + + return rc; } static int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct qede_dev *edev = netdev_priv(dev); + struct qede_fastpath *fp; int i, rc = 0; - u16 rxc, txc, sb_id; + u16 rxc, txc; if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); @@ -730,21 +768,36 @@ static int qede_set_coalesce(struct net_device *dev, coal->tx_coalesce_usecs > QED_COALESCE_MAX) { DP_INFO(edev, "Can't support requested %s coalesce value [max supported value %d]\n", - coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" - : "tx", - QED_COALESCE_MAX); + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : + "tx", QED_COALESCE_MAX); return -EINVAL; } rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; for_each_queue(i) { - sb_id = edev->fp_array[i].sb_info->igu_sb_id; - rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, - (u16)i, sb_id); - if (rc) { - DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); - return rc; + fp = &edev->fp_array[i]; + + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + rxc, 0, + fp->rxq->handle); + if (rc) { + DP_INFO(edev, + "Set RX coalesce error, rc = %d\n", rc); + return rc; + } + } + + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + 0, txc, + fp->txq->handle); + if (rc) { + DP_INFO(edev, + "Set TX coalesce error, rc = %d\n", rc); + return rc; + } } } @@ -1045,20 +1098,34 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) + u32 *rule_locs) { struct qede_dev *edev = netdev_priv(dev); + int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = QEDE_RSS_COUNT(edev); - return 0; + break; case ETHTOOL_GRXFH: - return qede_get_rss_flags(edev, info); + rc = qede_get_rss_flags(edev, info); + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = qede_get_arfs_filter_count(edev); + info->data = QEDE_RFS_MAX_FLTR; + break; + case ETHTOOL_GRXCLSRULE: + rc = qede_get_cls_rule_entry(edev, info); + break; + case ETHTOOL_GRXCLSRLALL: + rc = qede_get_cls_rule_all(edev, info, rule_locs); + break; default: DP_ERR(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) @@ -1168,14 +1235,24 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct qede_dev *edev = netdev_priv(dev); + int rc; switch (info->cmd) { case ETHTOOL_SRXFH: - return qede_set_rss_flags(edev, info); + rc = qede_set_rss_flags(edev, info); + break; + case ETHTOOL_SRXCLSRLINS: + rc = qede_add_cls_rule(edev, info); + break; + case ETHTOOL_SRXCLSRLDEL: + rc = qede_del_cls_rule(edev, info); + break; default: DP_INFO(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static u32 qede_get_rxfh_indir_size(struct net_device *dev) @@ -1607,6 +1684,87 @@ static int qede_get_tunable(struct net_device *dev, return 0; } +static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + if (current_link.eee.adv_caps & QED_EEE_1G_ADV) + edata->advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.adv_caps & QED_EEE_10G_ADV) + edata->advertised |= ADVERTISED_10000baseT_Full; + if (current_link.sup_caps & QED_EEE_1G_ADV) + edata->supported = ADVERTISED_1000baseT_Full; + if (current_link.sup_caps & QED_EEE_10G_ADV) + edata->supported |= ADVERTISED_10000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) + edata->lp_advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) + edata->lp_advertised |= ADVERTISED_10000baseT_Full; + + edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; + edata->eee_enabled = current_link.eee.enable; + edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable; + edata->eee_active = current_link.eee_active; + + return 0; +} + +static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params params; + + if (!edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; + + if (!(edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) || + ((edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) != + edata->advertised)) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid advertised capabilities %d\n", + edata->advertised); + return -EINVAL; + } + + if (edata->advertised & ADVERTISED_1000baseT_Full) + params.eee.adv_caps = QED_EEE_1G_ADV; + if (edata->advertised & ADVERTISED_10000baseT_Full) + params.eee.adv_caps |= QED_EEE_10G_ADV; + params.eee.enable = edata->eee_enabled; + params.eee.tx_lpi_enable = edata->tx_lpi_enabled; + params.eee.tx_lpi_timer = edata->tx_lpi_timer; + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1640,6 +1798,9 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_eee = qede_get_eee, + .set_eee = qede_set_eee, + .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, }; @@ -1650,6 +1811,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_strings = qede_get_strings, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f939db5bac5f..f79e36e4060a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,7 +38,6 @@ #include #include "qede.h" -#ifdef CONFIG_RFS_ACCEL struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -76,10 +75,12 @@ struct qede_arfs_fltr_node { u16 next_rxq_id; bool filter_op; bool used; + u8 fw_rc; struct hlist_node node; }; struct qede_arfs { +#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx]) #define QEDE_ARFS_POLL_COUNT 100 #define QEDE_RFS_FLW_BITSHIFT (4) #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) @@ -121,11 +122,56 @@ qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) kfree(fltr); } +static int +qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr, + u16 bucket_idx) +{ + fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, + fltr->buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { + DP_NOTICE(edev, "Failed to map DMA memory for rule\n"); + qede_free_arfs_filter(edev, fltr); + return -ENOMEM; + } + + INIT_HLIST_NODE(&fltr->node); + hlist_add_head(&fltr->node, + QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); + edev->arfs->filter_count++; + + if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { + edev->ops->configure_arfs_searcher(edev->cdev, true); + edev->arfs->enable = true; + } + + return 0; +} + +static void +qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + hlist_del(&fltr->node); + dma_unmap_single(&edev->pdev->dev, fltr->mapping, + fltr->buf_len, DMA_TO_DEVICE); + + qede_free_arfs_filter(edev, fltr); + edev->arfs->filter_count--; + + if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->enable = false; + edev->ops->configure_arfs_searcher(edev->cdev, false); + } +} + void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) { struct qede_arfs_fltr_node *fltr = filter; struct qede_dev *edev = dev; + fltr->fw_rc = fw_rc; + if (fw_rc) { DP_NOTICE(edev, "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", @@ -185,18 +231,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && !fltr->used) || free_fltr) { - hlist_del(&fltr->node); - dma_unmap_single(&edev->pdev->dev, - fltr->mapping, - fltr->buf_len, DMA_TO_DEVICE); - qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; + qede_dequeue_fltr_and_config_searcher(edev, + fltr); } else { - if ((rps_may_expire_flow(edev->ndev, - fltr->rxq_id, - fltr->flow_id, - fltr->sw_id) || del) && - !free_fltr) + bool flow_exp = false; +#ifdef CONFIG_RFS_ACCEL + flow_exp = rps_may_expire_flow(edev->ndev, + fltr->rxq_id, + fltr->flow_id, + fltr->sw_id); +#endif + if ((flow_exp || del) && !free_fltr) qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); @@ -213,10 +258,12 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, false); } +#ifdef CONFIG_RFS_ACCEL } else { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); +#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); @@ -258,25 +305,26 @@ int qede_alloc_arfs(struct qede_dev *edev) spin_lock_init(&edev->arfs->arfs_list_lock); for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) - INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]); - - edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); - if (!edev->ndev->rx_cpu_rmap) { - vfree(edev->arfs); - edev->arfs = NULL; - return -ENOMEM; - } + INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i)); edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * sizeof(long)); if (!edev->arfs->arfs_fltr_bmap) { - free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); - edev->ndev->rx_cpu_rmap = NULL; vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } +#ifdef CONFIG_RFS_ACCEL + edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); + if (!edev->ndev->rx_cpu_rmap) { + vfree(edev->arfs->arfs_fltr_bmap); + edev->arfs->arfs_fltr_bmap = NULL; + vfree(edev->arfs); + edev->arfs = NULL; + return -ENOMEM; + } +#endif return 0; } @@ -285,16 +333,19 @@ void qede_free_arfs(struct qede_dev *edev) if (!edev->arfs) return; +#ifdef CONFIG_RFS_ACCEL if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; +#endif vfree(edev->arfs->arfs_fltr_bmap); edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; } +#ifdef CONFIG_RFS_ACCEL static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, const struct sk_buff *skb) { @@ -394,9 +445,8 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_lock_bh(&edev->arfs->arfs_list_lock); - n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx], + n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), skb, ports[0], ports[1], ip_proto); - if (n) { /* Filter match */ n->next_rxq_id = rxq_index; @@ -448,23 +498,9 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, n->tuple.ip_proto = ip_proto; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); - n->mapping = dma_map_single(&edev->pdev->dev, n->data, - n->buf_len, DMA_TO_DEVICE); - if (dma_mapping_error(&edev->pdev->dev, n->mapping)) { - DP_NOTICE(edev, "Failed to map DMA memory for arfs\n"); - qede_free_arfs_filter(edev, n); - rc = -ENOMEM; + rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); + if (rc) goto ret_unlock; - } - - INIT_HLIST_NODE(&n->node); - hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - edev->ops->configure_arfs_searcher(edev->cdev, true); - edev->arfs->enable = true; - } qede_configure_arfs_fltr(edev, n, n->rxq_id, true); @@ -472,6 +508,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); + return n->sw_id; ret_unlock: @@ -1263,3 +1300,371 @@ void qede_config_rx_mode(struct net_device *ndev) out: kfree(uc_macs); } + +static struct qede_arfs_fltr_node * +qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) +{ + struct qede_arfs_fltr_node *fltr; + + hlist_for_each_entry(fltr, head, node) + if (location == fltr->sw_id) + return fltr; + + return NULL; +} + +static bool +qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, + struct ethtool_rx_flow_spec *fsp, + __be16 proto) +{ + if (proto == htons(ETH_P_IP)) { + struct ethtool_tcpip4_spec *ip; + + ip = &fsp->h_u.tcp_ip4_spec; + + if (tpos->tuple.src_ipv4 == ip->ip4src && + tpos->tuple.dst_ipv4 == ip->ip4dst) + return true; + else + return false; + } else { + struct ethtool_tcpip6_spec *ip6; + struct in6_addr *src; + + ip6 = &fsp->h_u.tcp_ip6_spec; + src = &tpos->tuple.src_ipv6; + + if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && + !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, + sizeof(struct in6_addr))) + return true; + else + return false; + } + return false; +} + +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_head *head; + int cnt = 0, rc = 0; + + info->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry(fltr, head, node) { + if (cnt == info->rule_cnt) { + rc = -EMSGSIZE; + goto unlock; + } + + rule_locs[cnt] = fltr->sw_id; + cnt++; + } + + info->rule_cnt = cnt; + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = 0; + + cmd->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) { + DP_NOTICE(edev, "Rule not found - location=0x%x\n", + fsp->location); + rc = -EINVAL; + goto unlock; + } + + if (fltr->tuple.eth_proto == htons(ETH_P_IP)) { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V4_FLOW; + else + fsp->flow_type = UDP_V4_FLOW; + + fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4; + fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4; + } else { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V6_FLOW; + else + fsp->flow_type = UDP_V6_FLOW; + fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port; + memcpy(&fsp->h_u.tcp_ip6_spec.ip6src, + &fltr->tuple.src_ipv6, sizeof(struct in6_addr)); + memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst, + &fltr->tuple.dst_ipv6, sizeof(struct in6_addr)); + } + + fsp->ring_cookie = fltr->rxq_id; + +unlock: + __qede_unlock(edev); + return rc; +} + +static int +qede_validate_and_check_flow_exist(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fsp, + int *min_hlen) +{ + __be16 src_port = 0x0, dst_port = 0x0; + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + __be16 eth_proto; + u8 ip_proto; + + if (fsp->location >= QEDE_RFS_MAX_FLTR || + fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) + return -EINVAL; + + if (fsp->flow_type == TCP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_UDP; + } else if (fsp->flow_type == TCP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_UDP; + } else { + DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", + fsp->flow_type); + return -EPROTONOSUPPORT; + } + + if (eth_proto == htons(ETH_P_IP)) { + src_port = fsp->h_u.tcp_ip4_spec.psrc; + dst_port = fsp->h_u.tcp_ip4_spec.pdst; + } else { + src_port = fsp->h_u.tcp_ip6_spec.psrc; + dst_port = fsp->h_u.tcp_ip6_spec.pdst; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + hlist_for_each_entry_safe(fltr, temp, head, node) { + if ((fltr->tuple.ip_proto == ip_proto && + fltr->tuple.eth_proto == eth_proto && + qede_compare_user_flow_ips(fltr, fsp, eth_proto) && + fltr->tuple.src_port == src_port && + fltr->tuple.dst_port == dst_port) || + fltr->sw_id == fsp->location) + return -EEXIST; + } + + return 0; +} + +static int +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + int count = QEDE_ARFS_POLL_COUNT; + + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + int min_hlen = ETH_HLEN, rc; + struct ethhdr *eth; + struct iphdr *ip; + __be16 *ports; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + if (rc) + goto unlock; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + n->rxq_id = fsp->ring_cookie; + n->next_rxq_id = n->rxq_id; + eth = (struct ethhdr *)n->data; + + if (info->fs.flow_type == TCP_V4_FLOW || + info->fs.flow_type == UDP_V4_FLOW) { + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct iphdr)); + eth->h_proto = htons(ETH_P_IP); + n->tuple.eth_proto = htons(ETH_P_IP); + n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; + n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; + n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + ip = (struct iphdr *)(n->data + ETH_HLEN); + ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; + ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; + ip->version = 0x4; + ip->ihl = 0x5; + + if (info->fs.flow_type == TCP_V4_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip->protocol = IPPROTO_TCP; + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip->protocol = IPPROTO_UDP; + } + ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); + } else { + struct ipv6hdr *ip6; + + ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + eth->h_proto = htons(ETH_P_IPV6); + n->tuple.eth_proto = htons(ETH_P_IPV6); + memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + memcpy(&ip6->saddr, &n->tuple.src_ipv6, + sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &n->tuple.dst_ipv6, + sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (info->fs.flow_type == TCP_V6_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); + } + } + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = -EPERM; + + __qede_lock(edev); + if (!edev->arfs) + goto unlock; + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) + goto unlock; + + qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); + + rc = qede_poll_arfs_filter_config(edev, fltr); + if (rc == 0) + qede_dequeue_fltr_and_config_searcher(edev, fltr); + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_arfs_filter_count(struct qede_dev *edev) +{ + int count = 0; + + __qede_lock(edev); + + if (!edev->arfs) + goto unlock; + + count = edev->arfs->filter_count; + +unlock: + __qede_unlock(edev); + return count; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 06ca13dd9ddb..e5ee9f274a71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -873,9 +873,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) */ pf_params.eth_pf_params.num_vf_cons = 48; -#ifdef CONFIG_RFS_ACCEL pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; -#endif qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -1984,12 +1982,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); -#ifdef CONFIG_RFS_ACCEL + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } -#endif + /* Release the interrupts */ qede_sync_free_irqs(edev); edev->ops->common->set_fp_int(edev->cdev, 0); @@ -2041,13 +2039,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; -#ifdef CONFIG_RFS_ACCEL if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { rc = qede_alloc_arfs(edev); if (rc) DP_NOTICE(edev, "aRFS memory allocation failed\n"); } -#endif + qede_napi_add_enable(edev); DP_INFO(edev, "Napi added and enabled\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index be41e4c77b65..c48a0e2d4d7e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -592,13 +592,9 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) } while (--retries); - if (!retries) { - dev_err(&adapter->pdev->dev, "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; + dev_err(&adapter->pdev->dev, "Receive Peg initialization not complete, state: 0x%x.\n", + val); + return -EIO; } int diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 0844b7c75767..afa10a163da1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1285,7 +1285,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - static const struct qlcnic_dump_operations *fw_dump_ops; + const struct qlcnic_dump_operations *fw_dump_ops; struct qlcnic_83xx_dump_template_hdr *hdr_83xx; u32 entry_offset, dump, no_entries, buf_offset = 0; int i, k, ops_cnt, ops_index, dump_size = 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 73027a6c06c7..287d89dd086f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1174,19 +1174,19 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, return size; } -static struct device_attribute dev_attr_bridged_mode = { +static const struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_bridged_mode, .store = qlcnic_store_bridged_mode, }; -static struct device_attribute dev_attr_diag_mode = { +static const struct device_attribute dev_attr_diag_mode = { .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_diag_mode, .store = qlcnic_store_diag_mode, }; -static struct device_attribute dev_attr_beacon = { +static const struct device_attribute dev_attr_beacon = { .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_beacon, .store = qlcnic_store_beacon, @@ -1248,7 +1248,7 @@ static const struct bin_attribute bin_attr_pm_config = { .write = qlcnic_sysfs_write_pm_config, }; -static struct bin_attribute bin_attr_flash = { +static const struct bin_attribute bin_attr_flash = { .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_83xx_sysfs_flash_read_handler, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index e3223f2fe2ff..fe2599b83d09 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -144,42 +144,23 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, xaui_direct_valid = xaui_indirect_valid = 1; /* The XAUI needs to be read out per port */ - if (qdev->func & 1) { - /* We are NIC 2 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } else { - /* We are NIC 1 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; + status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; /* * XFI register is shared so only need to read one diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index 877675a27b9f..f5200712718d 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -59,4 +59,6 @@ config QCOM_EMAC low power, Receive-Side Scaling (RSS), and IEEE 1588-2008 Precision Clock Synchronization Protocol. +source "drivers/net/ethernet/qualcomm/rmnet/Kconfig" + endif # NET_VENDOR_QUALCOMM diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile index 92fa7c4da90a..1847350f48a7 100644 --- a/drivers/net/ethernet/qualcomm/Makefile +++ b/drivers/net/ethernet/qualcomm/Makefile @@ -9,3 +9,5 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o qcauart-objs := qca_uart.o obj-y += emac/ + +obj-$(CONFIG_RMNET) += rmnet/ diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index bbe24639aa5a..c8c6231b87f3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data) static int emac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { + case ETH_SS_PRIV_FLAGS: + return 1; case ETH_SS_STATS: return EMAC_STATS_LEN; default: @@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) unsigned int i; switch (stringset) { + case ETH_SS_PRIV_FLAGS: + strcpy(data, "single-pause-mode"); + break; + case ETH_SS_STATS: for (i = 0; i < EMAC_STATS_LEN; i++) { strlcpy(data, emac_ethtool_stat_strings[i], @@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev) return EMAC_MAX_REG_SIZE * sizeof(u32); } +#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0) + +static int emac_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +static u32 emac_get_priv_flags(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0; +} + static const struct ethtool_ops emac_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, @@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_regs_len = emac_get_regs_len, .get_regs = emac_get_regs, + + .set_priv_flags = emac_set_priv_flags, + .get_priv_flags = emac_get_priv_flags, }; void emac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index bcd4708b3745..3ed9033e56db 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt) mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | DEBUG_MODE | SINGLE_PAUSE_MODE); + /* Enable single-pause-frame mode if requested. + * + * If enabled, the EMAC will send a single pause frame when the RX + * queue is full. This normally leads to packet loss because + * the pause frame disables the remote MAC only for 33ms (the quanta), + * and then the remote MAC continues sending packets even though + * the RX queue is still full. + * + * If disabled, the EMAC sends a pause frame every 31ms until the RX + * queue is no longer full. Normally, this is the preferred + * method of operation. However, when the system is hung (e.g. + * cores are halted), the EMAC interrupt handler is never called + * and so the RX queue fills up quickly and stays full. The resuling + * non-stop "flood" of pause frames sometimes has the effect of + * disabling nearby switches. In some cases, other nearby switches + * are also affected, shutting down the entire network. + * + * The user can enable or disable single-pause-frame mode + * via ethtool. + */ + mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0; + writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); @@ -876,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, curr_rxbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, skb->data, - curr_rxbuf->length, DMA_FROM_DEVICE); + adpt->rxbuf_size, DMA_FROM_DEVICE); + ret = dma_mapping_error(adpt->netdev->dev.parent, curr_rxbuf->dma_addr); if (ret) { diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 60850bfa3d32..759543512117 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) /* default to automatic flow control */ adpt->automatic = true; + + /* Disable single-pause-frame mode by default */ + adpt->single_pause_mode = false; } /* Get the clock */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 8ee4ec6aef2e..d7c9f44209d4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -363,6 +363,9 @@ struct emac_adapter { bool tx_flow_control; bool rx_flow_control; + /* True == use single-pause-frame mode. */ + bool single_pause_mode; + /* Ring parameter */ u8 tpd_burst; u8 rfd_burst; diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig new file mode 100644 index 000000000000..6e2587af47a4 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig @@ -0,0 +1,12 @@ +# +# RMNET MAP driver +# + +menuconfig RMNET + tristate "RmNet MAP driver" + default n + ---help--- + If you select this, you will enable the RMNET module which is used + for handling data in the multiplexing and aggregation protocol (MAP) + format in the embedded data path. RMNET devices can be attached to + any IP mode physical device. diff --git a/drivers/net/ethernet/qualcomm/rmnet/Makefile b/drivers/net/ethernet/qualcomm/rmnet/Makefile new file mode 100644 index 000000000000..01bddf207cac --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the RMNET module +# + +rmnet-y := rmnet_config.o +rmnet-y += rmnet_vnd.o +rmnet-y += rmnet_handlers.o +rmnet-y += rmnet_map_data.o +rmnet-y += rmnet_map_command.o +obj-$(CONFIG_RMNET) += rmnet.o diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c new file mode 100644 index 000000000000..1e33aea59f50 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -0,0 +1,353 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET configuration engine + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_vnd.h" +#include "rmnet_private.h" + +/* Locking scheme - + * The shared resource which needs to be protected is realdev->rx_handler_data. + * For the writer path, this is using rtnl_lock(). The writer paths are + * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These + * paths are already called with rtnl_lock() acquired in. There is also an + * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For + * dereference here, we will need to use rtnl_dereference(). Dev list writing + * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). + * For the reader path, the real_dev->rx_handler_data is called in the TX / RX + * path. We only need rcu_read_lock() for these scenarios. In these cases, + * the rcu_read_lock() is held in __dev_queue_xmit() and + * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() + * to get the relevant information. For dev list reading, we again acquire + * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). + * We also use unregister_netdevice_many() to free all rmnet devices in + * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in + * same context. + */ + +/* Local Definitions and Declarations */ + +struct rmnet_walk_data { + struct net_device *real_dev; + struct list_head *head; + struct rmnet_port *port; +}; + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + +/* Needs rtnl lock */ +static struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev) +{ + return rtnl_dereference(real_dev->rx_handler_data); +} + +static struct rmnet_endpoint* +rmnet_get_endpoint(struct net_device *dev, int config_id) +{ + struct rmnet_endpoint *ep; + struct rmnet_port *port; + + if (!rmnet_is_real_dev_registered(dev)) { + ep = rmnet_vnd_get_endpoint(dev); + } else { + port = rmnet_get_port_rtnl(dev); + + ep = &port->muxed_ep[config_id]; + } + + return ep; +} + +static int rmnet_unregister_real_device(struct net_device *real_dev, + struct rmnet_port *port) +{ + if (port->nr_rmnet_devs) + return -EINVAL; + + kfree(port); + + netdev_rx_handler_unregister(real_dev); + + /* release reference on real_dev */ + dev_put(real_dev); + + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; +} + +static int rmnet_register_real_device(struct net_device *real_dev) +{ + struct rmnet_port *port; + int rc; + + ASSERT_RTNL(); + + if (rmnet_is_real_dev_registered(real_dev)) + return 0; + + port = kzalloc(sizeof(*port), GFP_ATOMIC); + if (!port) + return -ENOMEM; + + port->dev = real_dev; + rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port); + if (rc) { + kfree(port); + return -EBUSY; + } + + /* hold on to real dev for MAP data */ + dev_hold(real_dev); + + netdev_dbg(real_dev, "registered with rmnet\n"); + return 0; +} + +static void rmnet_set_endpoint_config(struct net_device *dev, + u8 mux_id, u8 rmnet_mode, + struct net_device *egress_dev) +{ + struct rmnet_endpoint *ep; + + netdev_dbg(dev, "id %d mode %d dev %s\n", + mux_id, rmnet_mode, egress_dev->name); + + ep = rmnet_get_endpoint(dev, mux_id); + /* This config is cleared on every set, so its ok to not + * clear it on a device delete. + */ + memset(ep, 0, sizeof(struct rmnet_endpoint)); + ep->rmnet_mode = rmnet_mode; + ep->egress_dev = egress_dev; + ep->mux_id = mux_id; +} + +static int rmnet_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING | + RMNET_INGRESS_FORMAT_DEAGGREGATION | + RMNET_INGRESS_FORMAT_MAP; + int egress_format = RMNET_EGRESS_FORMAT_MUXING | + RMNET_EGRESS_FORMAT_MAP; + struct net_device *real_dev; + int mode = RMNET_EPMODE_VND; + struct rmnet_port *port; + int err = 0; + u16 mux_id; + + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev || !dev) + return -ENODEV; + + if (!data[IFLA_VLAN_ID]) + return -EINVAL; + + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + + err = rmnet_register_real_device(real_dev); + if (err) + goto err0; + + port = rmnet_get_port_rtnl(real_dev); + err = rmnet_vnd_newlink(mux_id, dev, port, real_dev); + if (err) + goto err1; + + err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL); + if (err) + goto err2; + + netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n", + ingress_format, egress_format); + port->egress_data_format = egress_format; + port->ingress_data_format = ingress_format; + + rmnet_set_endpoint_config(real_dev, mux_id, mode, dev); + rmnet_set_endpoint_config(dev, mux_id, mode, real_dev); + return 0; + +err2: + rmnet_vnd_dellink(mux_id, port); +err1: + rmnet_unregister_real_device(real_dev, port); +err0: + return err; +} + +static void rmnet_dellink(struct net_device *dev, struct list_head *head) +{ + struct net_device *real_dev; + struct rmnet_port *port; + u8 mux_id; + + rcu_read_lock(); + real_dev = netdev_master_upper_dev_get_rcu(dev); + rcu_read_unlock(); + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + mux_id = rmnet_vnd_get_mux(dev); + rmnet_vnd_dellink(mux_id, port); + netdev_upper_dev_unlink(dev, real_dev); + rmnet_unregister_real_device(real_dev, port); + + unregister_netdevice_queue(dev, head); +} + +static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) +{ + struct rmnet_walk_data *d = data; + u8 mux_id; + + mux_id = rmnet_vnd_get_mux(rmnet_dev); + + rmnet_vnd_dellink(mux_id, d->port); + netdev_upper_dev_unlink(rmnet_dev, d->real_dev); + unregister_netdevice_queue(rmnet_dev, d->head); + + return 0; +} + +static void rmnet_force_unassociate_device(struct net_device *dev) +{ + struct net_device *real_dev = dev; + struct rmnet_walk_data d; + struct rmnet_port *port; + LIST_HEAD(list); + + if (!rmnet_is_real_dev_registered(real_dev)) + return; + + ASSERT_RTNL(); + + d.real_dev = real_dev; + d.head = &list; + + port = rmnet_get_port_rtnl(dev); + d.port = port; + + rcu_read_lock(); + netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); + rcu_read_unlock(); + unregister_netdevice_many(&list); + + rmnet_unregister_real_device(real_dev, port); +} + +static int rmnet_config_notify_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct net_device *dev = netdev_notifier_info_to_dev(data); + + if (!dev) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: + netdev_dbg(dev, "Kernel unregister\n"); + rmnet_force_unassociate_device(dev); + break; + + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rmnet_dev_notifier __read_mostly = { + .notifier_call = rmnet_config_notify_cb, +}; + +static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + u16 mux_id; + + if (!data || !data[IFLA_VLAN_ID]) + return -EINVAL; + + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) + return -ERANGE; + + return 0; +} + +static size_t rmnet_get_size(const struct net_device *dev) +{ + return nla_total_size(2); /* IFLA_VLAN_ID */ +} + +struct rtnl_link_ops rmnet_link_ops __read_mostly = { + .kind = "rmnet", + .maxtype = __IFLA_VLAN_MAX, + .priv_size = sizeof(struct rmnet_priv), + .setup = rmnet_vnd_setup, + .validate = rmnet_rtnl_validate, + .newlink = rmnet_newlink, + .dellink = rmnet_dellink, + .get_size = rmnet_get_size, +}; + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +/* Startup/Shutdown */ + +static int __init rmnet_init(void) +{ + int rc; + + rc = register_netdevice_notifier(&rmnet_dev_notifier); + if (rc != 0) + return rc; + + rc = rtnl_link_register(&rmnet_link_ops); + if (rc != 0) { + unregister_netdevice_notifier(&rmnet_dev_notifier); + return rc; + } + return rc; +} + +static void __exit rmnet_exit(void) +{ + unregister_netdevice_notifier(&rmnet_dev_notifier); + rtnl_link_unregister(&rmnet_link_ops); +} + +module_init(rmnet_init) +module_exit(rmnet_exit) +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h new file mode 100644 index 000000000000..dde4e9f14f4a --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include + +#ifndef _RMNET_CONFIG_H_ +#define _RMNET_CONFIG_H_ + +#define RMNET_MAX_LOGICAL_EP 255 + +/* Information about the next device to deliver the packet to. + * Exact usage of this parameter depends on the rmnet_mode. + */ +struct rmnet_endpoint { + u8 rmnet_mode; + u8 mux_id; + struct net_device *egress_dev; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + struct rmnet_endpoint local_ep; + struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP]; + u32 ingress_data_format; + u32 egress_data_format; + struct net_device *rmnet_devices[RMNET_MAX_LOGICAL_EP]; + u8 nr_rmnet_devs; +}; + +extern struct rtnl_link_ops rmnet_link_ops; + +struct rmnet_priv { + struct rmnet_endpoint local_ep; + u8 mux_id; + struct net_device *real_dev; +}; + +struct rmnet_port *rmnet_get_port(struct net_device *real_dev); + +#endif /* _RMNET_CONFIG_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c new file mode 100644 index 000000000000..540c7622dcb1 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -0,0 +1,271 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#include +#include +#include "rmnet_private.h" +#include "rmnet_config.h" +#include "rmnet_vnd.h" +#include "rmnet_map.h" +#include "rmnet_handlers.h" + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (skb->data[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } +} + +/* Generic handler */ + +static rx_handler_result_t +rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep) +{ + if (!ep->egress_dev) + kfree_skb(skb); + else + rmnet_egress_handler(skb, ep); + + return RX_HANDLER_CONSUMED; +} + +static rx_handler_result_t +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep) +{ + switch (ep->rmnet_mode) { + case RMNET_EPMODE_NONE: + return RX_HANDLER_PASS; + + case RMNET_EPMODE_BRIDGE: + return rmnet_bridge_handler(skb, ep); + + case RMNET_EPMODE_VND: + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + rmnet_vnd_rx_fixup(skb, skb->dev); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + netif_receive_skb(skb); + return RX_HANDLER_CONSUMED; + + default: + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } +} + +static rx_handler_result_t +rmnet_ingress_deliver_packet(struct sk_buff *skb, + struct rmnet_port *port) +{ + if (!port) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + skb->dev = port->local_ep.egress_dev; + + return rmnet_deliver_skb(skb, &port->local_ep); +} + +/* MAP handler */ + +static rx_handler_result_t +__rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_endpoint *ep; + u8 mux_id; + u16 len; + + if (RMNET_MAP_GET_CD_BIT(skb)) { + if (port->ingress_data_format + & RMNET_INGRESS_FORMAT_MAP_COMMANDS) + return rmnet_map_command(skb, port); + + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + mux_id = RMNET_MAP_GET_MUX_ID(skb); + len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = &port->muxed_ep[mux_id]; + + if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) + skb->dev = ep->egress_dev; + + /* Subtract MAP header */ + skb_pull(skb, sizeof(struct rmnet_map_header)); + skb_trim(skb, len); + rmnet_set_skb_proto(skb); + return rmnet_deliver_skb(skb, ep); +} + +static rx_handler_result_t +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct sk_buff *skbn; + int rc; + + if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { + while ((skbn = rmnet_map_deaggregate(skb)) != NULL) + __rmnet_map_ingress_handler(skbn, port); + + consume_skb(skb); + rc = RX_HANDLER_CONSUMED; + } else { + rc = __rmnet_map_ingress_handler(skb, port); + } + + return rc; +} + +static int rmnet_map_egress_handler(struct sk_buff *skb, + struct rmnet_port *port, + struct rmnet_endpoint *ep, + struct net_device *orig_dev) +{ + int required_headroom, additional_header_len; + struct rmnet_map_header *map_header; + + additional_header_len = 0; + required_headroom = sizeof(struct rmnet_map_header); + + if (skb_headroom(skb) < required_headroom) { + if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) + return RMNET_MAP_CONSUMED; + } + + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); + if (!map_header) + return RMNET_MAP_CONSUMED; + + if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { + if (ep->mux_id == 0xff) + map_header->mux_id = 0; + else + map_header->mux_id = ep->mux_id; + } + + skb->protocol = htons(ETH_P_MAP); + + return RMNET_MAP_SUCCESS; +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct rmnet_port *port; + struct sk_buff *skb = *pskb; + struct net_device *dev; + int rc; + + if (!skb) + return RX_HANDLER_CONSUMED; + + dev = skb->dev; + port = rmnet_get_port(dev); + + if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) { + rc = rmnet_map_ingress_handler(skb, port); + } else { + switch (ntohs(skb->protocol)) { + case ETH_P_MAP: + if (port->local_ep.rmnet_mode == + RMNET_EPMODE_BRIDGE) { + rc = rmnet_ingress_deliver_packet(skb, port); + } else { + kfree_skb(skb); + rc = RX_HANDLER_CONSUMED; + } + break; + + case ETH_P_IP: + case ETH_P_IPV6: + rc = rmnet_ingress_deliver_packet(skb, port); + break; + + default: + rc = RX_HANDLER_PASS; + } + } + + return rc; +} + +/* Modifies packet as per logical endpoint configuration and egress data format + * for egress device configured in logical endpoint. Packet is then transmitted + * on the egress device. + */ +void rmnet_egress_handler(struct sk_buff *skb, + struct rmnet_endpoint *ep) +{ + struct net_device *orig_dev; + struct rmnet_port *port; + + orig_dev = skb->dev; + skb->dev = ep->egress_dev; + + port = rmnet_get_port(skb->dev); + if (!port) { + kfree_skb(skb); + return; + } + + if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { + switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) { + case RMNET_MAP_CONSUMED: + return; + + case RMNET_MAP_SUCCESS: + break; + + default: + kfree_skb(skb); + return; + } + } + + if (ep->rmnet_mode == RMNET_EPMODE_VND) + rmnet_vnd_tx_fixup(skb, orig_dev); + + dev_queue_xmit(skb); +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h new file mode 100644 index 000000000000..f2638cf5693c --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#ifndef _RMNET_HANDLERS_H_ +#define _RMNET_HANDLERS_H_ + +#include "rmnet_config.h" + +void rmnet_egress_handler(struct sk_buff *skb, + struct rmnet_endpoint *ep); + +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); + +#endif /* _RMNET_HANDLERS_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h new file mode 100644 index 000000000000..ce2302c25b12 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -0,0 +1,86 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_MAP_H_ +#define _RMNET_MAP_H_ + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u16 ip_family:2; + u16 reserved:14; + u16 flow_control_seq_num; + u32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +enum rmnet_map_results { + RMNET_MAP_SUCCESS, + RMNET_MAP_CONSUMED, + RMNET_MAP_GENERAL_FAILURE, + RMNET_MAP_NOT_ENABLED, + RMNET_MAP_FAILED_AGGREGATION, + RMNET_MAP_FAILED_MUX +}; + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +struct rmnet_map_header { + u8 pad_len:6; + u8 reserved_bit:1; + u8 cd_bit:1; + u8 mux_id; + u16 pkt_len; +} __aligned(1); + +#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ + (Y)->data)->mux_id) +#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ + (Y)->data)->cd_bit) +#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ + (Y)->data)->pad_len) +#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ + ((Y)->data + \ + sizeof(struct rmnet_map_header))) +#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ + (Y)->data)->pkt_len)) + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +#define RMNET_MAP_NO_PAD_BYTES 0 +#define RMNET_MAP_ADD_PAD_BYTES 1 + +u8 rmnet_map_demultiplex(struct sk_buff *skb); +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); +struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad); +rx_handler_result_t rmnet_map_command(struct sk_buff *skb, + struct rmnet_port *port); + +#endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c new file mode 100644 index 000000000000..d1ea5e21b982 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -0,0 +1,106 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +static u8 rmnet_map_do_flow_control(struct sk_buff *skb, + struct rmnet_port *rdinfo, + int enable) +{ + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + mux_id = RMNET_MAP_GET_MUX_ID(skb); + cmd = RMNET_MAP_GET_CMD_START(skb); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = &rdinfo->muxed_ep[mux_id]; + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) { + kfree_skb(skb); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } else { + return RMNET_MAP_COMMAND_ACK; + } +} + +static void rmnet_map_send_ack(struct sk_buff *skb, + unsigned char type) +{ + struct rmnet_map_control_command *cmd; + int xmit_status; + + skb->protocol = htons(ETH_P_MAP); + + cmd = RMNET_MAP_GET_CMD_START(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(skb->dev); + xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); + netif_tx_unlock(skb->dev); +} + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +rx_handler_result_t rmnet_map_command(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = RMNET_MAP_GET_CMD_START(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(skb, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(skb, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + kfree_skb(skb); + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(skb, rc); + return RX_HANDLER_CONSUMED; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c new file mode 100644 index 000000000000..86b8c758f94e --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -0,0 +1,102 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data MAP protocol + * + */ + +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" + +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) + +/* Adds MAP header to front of skb->data + * Padding is calculated and set appropriately in MAP header. Mux ID is + * initialized to 0. + */ +struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad) +{ + struct rmnet_map_header *map_header; + u32 padding, map_datalen; + u8 *padbytes; + + if (skb_headroom(skb) < sizeof(struct rmnet_map_header)) + return NULL; + + map_datalen = skb->len - hdrlen; + map_header = (struct rmnet_map_header *) + skb_push(skb, sizeof(struct rmnet_map_header)); + memset(map_header, 0, sizeof(struct rmnet_map_header)); + + if (pad == RMNET_MAP_NO_PAD_BYTES) { + map_header->pkt_len = htons(map_datalen); + return map_header; + } + + padding = ALIGN(map_datalen, 4) - map_datalen; + + if (padding == 0) + goto done; + + if (skb_tailroom(skb) < padding) + return NULL; + + padbytes = (u8 *)skb_put(skb, padding); + memset(padbytes, 0, padding); + +done: + map_header->pkt_len = htons(map_datalen + padding); + map_header->pad_len = padding & 0x3F; + + return map_header; +} + +/* Deaggregates a single packet + * A whole new buffer is allocated for each portion of an aggregated frame. + * Caller should keep calling deaggregate() on the source skb until 0 is + * returned, indicating that there are no more packets to deaggregate. Caller + * is responsible for freeing the original skb. + */ +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) +{ + struct rmnet_map_header *maph; + struct sk_buff *skbn; + u32 packet_len; + + if (skb->len == 0) + return NULL; + + maph = (struct rmnet_map_header *)skb->data; + packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + + if (((int)skb->len - (int)packet_len) < 0) + return NULL; + + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); + if (!skbn) + return NULL; + + skbn->dev = skb->dev; + skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); + skb_put(skbn, packet_len); + memcpy(skbn->data, skb->data, packet_len); + skb_pull(skb, packet_len); + + return skbn; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h new file mode 100644 index 000000000000..7967198fdd90 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_PRIVATE_H_ +#define _RMNET_PRIVATE_H_ + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +/* Constants */ +#define RMNET_EGRESS_FORMAT__RESERVED__ BIT(0) +#define RMNET_EGRESS_FORMAT_MAP BIT(1) +#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2) +#define RMNET_EGRESS_FORMAT_MUXING BIT(3) +#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 BIT(4) +#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(5) + +#define RMNET_INGRESS_FIX_ETHERNET BIT(0) +#define RMNET_INGRESS_FORMAT_MAP BIT(1) +#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2) +#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3) +#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4) +#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3 BIT(5) +#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(6) + +/* Pass the frame up the stack with no modifications to skb->dev */ +#define RMNET_EPMODE_NONE (0) +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +#endif /* _RMNET_PRIVATE_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c new file mode 100644 index 000000000000..7f90d5587653 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -0,0 +1,174 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data virtual network driver + * + */ + +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_vnd.h" + +/* RX/TX Fixup */ + +void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) +{ + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; +} + +void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) +{ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; +} + +/* Network Device Operations */ + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + priv = netdev_priv(dev); + if (priv->local_ep.egress_dev) { + rmnet_egress_handler(skb, &priv->local_ep); + } else { + dev->stats.tx_dropped++; + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static int rmnet_vnd_get_iflink(const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + return priv->real_dev->ifindex; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_get_iflink = rmnet_vnd_get_iflink, +}; + +/* Called by kernel whenever a new rmnet device is created. Sets MTU, + * flags, ARP type, needed headroom, etc... + */ +void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + rmnet_dev->needs_free_netdev = true; +} + +/* Exposed API */ + +int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev) +{ + struct rmnet_priv *priv; + int rc; + + if (port->rmnet_devices[id]) + return -EINVAL; + + rc = register_netdevice(rmnet_dev); + if (!rc) { + port->rmnet_devices[id] = rmnet_dev; + port->nr_rmnet_devs++; + + rmnet_dev->rtnl_link_ops = &rmnet_link_ops; + + priv = netdev_priv(rmnet_dev); + priv->mux_id = id; + priv->real_dev = real_dev; + + netdev_dbg(rmnet_dev, "rmnet dev created\n"); + } + + return rc; +} + +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port) +{ + if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id]) + return -EINVAL; + + port->rmnet_devices[id] = NULL; + port->nr_rmnet_devs--; + return 0; +} + +u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) +{ + struct rmnet_priv *priv; + + priv = netdev_priv(rmnet_dev); + return priv->mux_id; +} + +/* Gets the logical endpoint configuration for a RmNet virtual network device + * node. Caller should confirm that devices is a RmNet VND before calling. + */ +struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev) +{ + struct rmnet_priv *priv; + + if (!rmnet_dev) + return NULL; + + priv = netdev_priv(rmnet_dev); + + return &priv->local_ep; +} + +int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) +{ + netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); + /* Although we expect similar number of enable/disable + * commands, optimize for the disable. That is more + * latency sensitive than enable + */ + if (unlikely(enable)) + netif_wake_queue(rmnet_dev); + else + netif_stop_queue(rmnet_dev); + + return 0; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h new file mode 100644 index 000000000000..8a4042f0f6bf --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data Virtual Network Device APIs + * + */ + +#ifndef _RMNET_VND_H_ +#define _RMNET_VND_H_ + +int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); +struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *dev); +int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev); +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port); +void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); +void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); +u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); +void rmnet_vnd_setup(struct net_device *dev); +#endif /* _RMNET_VND_H_ */ diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ca22f2898664..d24b47b8e0b2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget && napi_complete_done(napi, work_done)) { + if (work_done < budget) { unsigned long flags; spin_lock_irqsave(&tp->lock, flags); - RTL_W16_F(IntrMask, rtl8139_intr_mask); + if (napi_complete_done(napi, work_done)) + RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 0525bd696d5d..96a27b00c90e 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -991,6 +991,7 @@ struct ravb_private { struct net_device *ndev; struct platform_device *pdev; void __iomem *addr; + struct clk *clk; struct mdiobb_ctrl mdiobb; u32 num_rx_ring[NUM_RX_QUEUE]; u32 num_tx_ring[NUM_TX_QUEUE]; @@ -1033,6 +1034,7 @@ struct ravb_private { unsigned no_avb_link:1; unsigned avb_link_active_low:1; + unsigned wol_enabled:1; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 5931e859876c..fdf30bfa403b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -680,6 +680,9 @@ static void ravb_emac_interrupt_unlocked(struct net_device *ndev) ecsr = ravb_read(ndev, ECSR); ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ + + if (ecsr & ECSR_MPD) + pm_wakeup_event(&priv->pdev->dev, 0); if (ecsr & ECSR_ICD) ndev->stats.tx_carrier_errors++; if (ecsr & ECSR_LCHNG) { @@ -1330,6 +1333,33 @@ static int ravb_get_ts_info(struct net_device *ndev, return 0; } +static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (!priv->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); + + return 0; +} + static const struct ethtool_ops ravb_ethtool_ops = { .nway_reset = ravb_nway_reset, .get_msglevel = ravb_get_msglevel, @@ -1343,6 +1373,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, .get_link_ksettings = ravb_get_link_ksettings, .set_link_ksettings = ravb_set_link_ksettings, + .get_wol = ravb_get_wol, + .set_wol = ravb_set_wol, }; static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, @@ -2041,6 +2073,11 @@ static int ravb_probe(struct platform_device *pdev) priv->chip_id = chip_id; + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + priv->clk = NULL; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; @@ -2107,6 +2144,9 @@ static int ravb_probe(struct platform_device *pdev) if (error) goto out_napi_del; + if (priv->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* Print device information */ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -2160,15 +2200,66 @@ static int ravb_remove(struct platform_device *pdev) return 0; } +static int ravb_wol_setup(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Only allow ECI interrupts */ + synchronize_irq(priv->emac_irq); + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); + + /* Enable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(priv->clk); + + return enable_irq_wake(priv->emac_irq); +} + +static int ravb_wol_restore(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi[RAVB_NC]); + napi_enable(&priv->napi[RAVB_BE]); + + /* Disable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, 0); + + ret = ravb_close(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(priv->clk); + + return disable_irq_wake(priv->emac_irq); +} + static int __maybe_unused ravb_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - int ret = 0; + struct ravb_private *priv = netdev_priv(ndev); + int ret; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (priv->wol_enabled) + ret = ravb_wol_setup(ndev); + else ret = ravb_close(ndev); - } return ret; } @@ -2179,6 +2270,33 @@ static int __maybe_unused ravb_resume(struct device *dev) struct ravb_private *priv = netdev_priv(ndev); int ret = 0; + if (priv->wol_enabled) { + /* Reduce the usecount of the clock to zero and then + * restore it to its original value. This is done to force + * the clock to be re-enabled which is a workaround + * for renesas-cpg-mssr driver which do not enable clocks + * when resuming from PSCI suspend/resume. + * + * Without this workaround the driver fails to communicate + * with the hardware if WoL was enabled when the system + * entered PSCI suspend. This is due to that if WoL is enabled + * we explicitly keep the clock from being turned off when + * suspending, but in PSCI sleep power is cut so the clock + * is disabled anyhow, the clock driver is not aware of this + * so the clock is not turned back on when resuming. + * + * TODO: once the renesas-cpg-mssr suspend/resume is working + * this clock dance should be removed. + */ + clk_disable(priv->clk); + clk_disable(priv->clk); + clk_enable(priv->clk); + clk_enable(priv->clk); + + /* Set reset mode to rearm the WoL logic */ + ravb_write(ndev, CCC_OPC_RESET, CCC); + } + /* All register have been reset to default values. * Restore all registers which where setup at probe time and * reopen device if it was running before system suspended. @@ -2202,6 +2320,11 @@ static int __maybe_unused ravb_resume(struct device *dev) ravb_write(ndev, priv->desc_bat_dma, DBAT); if (netif_running(ndev)) { + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } ret = ravb_open(ndev); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2dc0a8ef305..d2e88a30f57b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3402,7 +3402,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { #define SH_ETH_PM_OPS NULL #endif -static struct platform_device_id sh_eth_id_table[] = { +static const struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index b1e5c07099fa..fc8f8bdf6579 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -2191,6 +2192,10 @@ static int rocker_router_fib_event(struct notifier_block *nb, { struct rocker *rocker = container_of(nb, struct rocker, fib_nb); struct rocker_fib_event_work *fib_work; + struct fib_notifier_info *info = ptr; + + if (info->family != AF_INET) + return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 600e30e8f0be..0653b70723a3 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1177,7 +1177,7 @@ static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, entry->group_id = group_id; entry->group_count = group_count; - entry->group_ids = kcalloc(flags, group_count, sizeof(u32)); + entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL); if (!entry->group_ids) { kfree(entry); return -ENOMEM; @@ -1456,7 +1456,7 @@ static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, int err = 0; int i; - group_ids = kcalloc(flags, port_count, sizeof(u32)); + group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL); if (!group_ids) return -ENOMEM; @@ -2761,7 +2761,7 @@ static int ofdpa_fib4_add(struct rocker *rocker, fen_info->tb_id, 0); if (err) return err; - fib_info_offload_inc(fen_info->fi); + fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD; return 0; } @@ -2776,7 +2776,7 @@ static int ofdpa_fib4_del(struct rocker *rocker, ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; - fib_info_offload_dec(fen_info->fi); + fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); @@ -2803,7 +2803,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker) rocker); if (!ofdpa_port) continue; - fib_info_offload_dec(flow_entry->fi); + flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, flow_entry); } diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h index a63ef82e7c72..dfae3c9d57c6 100644 --- a/drivers/net/ethernet/rocker/rocker_tlv.h +++ b/drivers/net/ethernet/rocker/rocker_tlv.h @@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info) int rocker_tlv_put(struct rocker_desc_info *desc_info, int attrtype, int attrlen, const void *data); -static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, - int attrtype, u8 value) +static inline int +rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); + u8 tmp = value; /* work around GCC PR81715 */ + + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp); } -static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, - int attrtype, u16 value) +static inline int +rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); + u16 tmp = value; + + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp); } -static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, - int attrtype, __be16 value) +static inline int +rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); + __be16 tmp = value; + + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp); } -static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, - int attrtype, u32 value) +static inline int +rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); + u32 tmp = value; + + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp); } -static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, - int attrtype, __be32 value) +static inline int +rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); + __be32 tmp = value; + + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp); } -static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, - int attrtype, u64 value) +static inline int +rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value) { - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); + u64 tmp = value; + + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp); } static inline struct rocker_tlv * diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 70347720fdf9..573691bc3b71 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -737,8 +737,8 @@ static int sgiseeq_probe(struct platform_device *pdev) sp = netdev_priv(dev); /* Make private data page aligned */ - sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), - &sp->srings_dma, GFP_KERNEL); + sr = dma_alloc_attrs(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if (!sr) { printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); err = -ENOMEM; @@ -813,8 +813,8 @@ static int sgiseeq_remove(struct platform_device *pdev) struct sgiseeq_private *sp = netdev_priv(dev); unregister_netdev(dev); - dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, - sp->srings_dma); + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index fcea9371ab7f..d407adf59610 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,8 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index e5a7a40cc8b6..4f3bb30661ea 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -32,8 +32,8 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index); -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx); extern bool ef4_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index f1520a404ac6..6a75f4140a4b 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -425,24 +425,25 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct ef4_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct ef4_channel *channel; struct ef4_tx_queue *tx_queue; unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 990a63d7fcb7..c7407d129c7d 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -746,59 +746,171 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, return NULL; } -#define SFP_PAGE_SIZE 128 -#define SFP_NUM_PAGES 2 -static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, - struct ethtool_eeprom *ee, u8 *data) +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE BIT(2) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); size_t outlen; - int rc; unsigned int payload_len; - unsigned int space_remaining = ee->len; - unsigned int page; - unsigned int page_off; unsigned int to_copy; - u8 *user_data = data; + int rc; - BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + if (offset > SFP_PAGE_SIZE) + return -EINVAL; + + to_copy = min(space, SFP_PAGE_SIZE - offset); + + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); + + return to_copy; +} + +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + int rc; + u8 data; + + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} + +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: + case 0xd: + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } page_off = ee->offset % SFP_PAGE_SIZE; - page = ee->offset / SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; - while (space_remaining && (page < SFP_NUM_PAGES)) { - MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, + space_remaining); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); - if (rc) + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { return rc; - - if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + - SFP_PAGE_SIZE)) - return -EIO; - - payload_len = MCDI_DWORD(outbuf, - GET_PHY_MEDIA_INFO_OUT_DATALEN); - if (payload_len != SFP_PAGE_SIZE) - return -EIO; - - /* Copy as much as we can into data */ - payload_len -= page_off; - to_copy = (space_remaining < payload_len) ? - space_remaining : payload_len; - - memcpy(user_data, - MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, - to_copy); - - space_remaining -= to_copy; - user_data += to_copy; - page_off = 0; - page++; + } } return 0; @@ -807,16 +919,42 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) { - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + int sff_8472_level; + int diag_type; - switch (phy_cfg->media) { + switch (efx_mcdi_phy_module_type(efx)) { case MC_CMD_MEDIA_SFP_PLUS: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation. + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if ((sff_8472_level == 0) || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + default: return -EOPNOTSUPP; } + + return 0; } static const struct efx_phy_operations efx_mcdi_phy_ops = { diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 02d41eb4a8e9..32bf1fecf864 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -653,24 +653,25 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct efx_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (num_tc > EFX_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 0b6a39b003a4..012fb66eed8d 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2595,6 +2595,11 @@ static int smsc911x_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct smsc911x_data *pdata = netdev_priv(ndev); + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + /* enable wake on LAN, energy detection and the external PME * signal. */ smsc911x_reg_write(pdata, PMT_CTRL, @@ -2628,7 +2633,15 @@ static int smsc911x_resume(struct device *dev) while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) udelay(1000); - return (to == 0) ? -EIO : 0; + if (to == 0) + return -EIO; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; } static const struct dev_pm_ops smsc911x_pm_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 85c0e41f8021..97035766c291 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -45,6 +45,15 @@ config DWMAC_GENERIC platform specific code to function or is using platform data for setup. +config DWMAC_ANARION + tristate "Adaptrum Anarion GMAC support" + default ARC + depends on OF && (ARC || COMPILE_TEST) + help + Support for Adaptrum Anarion GMAC Ethernet controller. + + This selects the Anarion SoC glue layer support for the stmmac driver. + config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index fd4937a7fcab..238307fadcdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -7,6 +7,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c new file mode 100644 index 000000000000..85ce80c600c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -0,0 +1,152 @@ +/* + * Adaptrum Anarion DWMAC glue layer + * + * Copyright (C) 2017, Adaptrum, Inc. + * (Written by Alexandru Gagniuc for Adaptrum, Inc.) + * Licensed under the GPLv2 or (at your option) any later version. + */ + +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define GMAC_RESET_CONTROL_REG 0 +#define GMAC_SW_CONFIG_REG 4 +#define GMAC_CONFIG_INTF_SEL_MASK (0x7 << 0) +#define GMAC_CONFIG_INTF_RGMII (0x1 << 0) + +struct anarion_gmac { + uintptr_t ctl_block; + uint32_t phy_intf_sel; +}; + +static uint32_t gmac_read_reg(struct anarion_gmac *gmac, uint8_t reg) +{ + return readl((void *)(gmac->ctl_block + reg)); +}; + +static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val) +{ + writel(val, (void *)(gmac->ctl_block + reg)); +} + +static int anarion_gmac_init(struct platform_device *pdev, void *priv) +{ + uint32_t sw_config; + struct anarion_gmac *gmac = priv; + + /* Reset logic, configure interface mode, then release reset. SIMPLE! */ + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); + + sw_config = gmac_read_reg(gmac, GMAC_SW_CONFIG_REG); + sw_config &= ~GMAC_CONFIG_INTF_SEL_MASK; + sw_config |= (gmac->phy_intf_sel & GMAC_CONFIG_INTF_SEL_MASK); + gmac_write_reg(gmac, GMAC_SW_CONFIG_REG, sw_config); + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 0); + + return 0; +} + +static void anarion_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct anarion_gmac *gmac = priv; + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); +} + +static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) +{ + int phy_mode; + struct resource *res; + void __iomem *ctl_block; + struct anarion_gmac *gmac; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ctl_block = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctl_block)) { + dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n", + PTR_ERR(ctl_block)); + return ctl_block; + } + + gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->ctl_block = (uintptr_t)ctl_block; + + phy_mode = of_get_phy_mode(pdev->dev.of_node); + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: + case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; + break; + default: + dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", + phy_mode); + return ERR_PTR(-ENOTSUPP); + } + + return gmac; +} + +static int anarion_dwmac_probe(struct platform_device *pdev) +{ + int ret; + struct anarion_gmac *gmac; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + gmac = anarion_config_dt(pdev); + if (IS_ERR(gmac)) + return PTR_ERR(gmac); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->init = anarion_gmac_init; + plat_dat->exit = anarion_gmac_exit; + anarion_gmac_init(pdev, gmac); + plat_dat->bsp_priv = gmac; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) { + stmmac_remove_config_dt(pdev, plat_dat); + return ret; + } + + return 0; +} + +static const struct of_device_id anarion_dwmac_match[] = { + { .compatible = "adaptrum,anarion-gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, anarion_dwmac_match); + +static struct platform_driver anarion_dwmac_driver = { + .probe = anarion_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "anarion-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = anarion_dwmac_match, + }, +}; +module_platform_driver(anarion_dwmac_driver); + +MODULE_DESCRIPTION("Adaptrum Anarion DWMAC specific glue layer"); +MODULE_AUTHOR("Alexandru Gagniuc "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index dd6a2f9791cc..5efef8001edf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = { .remove = dwc_eth_dwmac_remove, .driver = { .name = "dwc-eth-dwmac", + .pm = &stmmac_pltfr_pm_ops, .of_match_table = dwc_eth_dwmac_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 9685555932ea..4404650b32c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -89,7 +89,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) char clk_name[32]; const char *clk_div_parents[1]; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; - static struct clk_div_table clk_25m_div_table[] = { + static const struct clk_div_table clk_25m_div_table[] = { { .val = 0, .div = 5 }, { .val = 1, .div = 10 }, { /* sentinel */ }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f0df5193f047..13133b30b575 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -41,6 +41,7 @@ struct rk_gmac_ops { void (*set_to_rmii)(struct rk_priv_data *bsp_priv); void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed); void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed); + void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); }; struct rk_priv_data { @@ -52,6 +53,7 @@ struct rk_priv_data { bool clk_enabled; bool clock_input; + bool integrated_phy; struct clk *clk_mac; struct clk *gmac_clkin; @@ -61,6 +63,9 @@ struct rk_priv_data { struct clk *clk_mac_refout; struct clk *aclk_mac; struct clk *pclk_mac; + struct clk *clk_phy; + + struct reset_control *phy_reset; int tx_delay; int rx_delay; @@ -78,9 +83,122 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define RK3128_GRF_MAC_CON0 0x0168 +#define RK3128_GRF_MAC_CON1 0x016c + +/* RK3128_GRF_MAC_CON0 */ +#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) +#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) +#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) +#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) +#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3128_GRF_MAC_CON1 */ +#define RK3128_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8)) +#define RK3128_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8)) +#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9) +#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) +#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10) +#define RK3128_GMAC_SPEED_100M GRF_BIT(10) +#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11) +#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) +#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) +#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) +#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) +#define RK3128_GMAC_RMII_MODE GRF_BIT(14) +#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) + +static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_PHY_INTF_SEL_RGMII | + RK3128_GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0, + DELAY_ENABLE(RK3128, tx_delay, rx_delay) | + RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3128_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); +} + +static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_RMII_CLK_2_5M | + RK3128_GMAC_SPEED_10M); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, + RK3128_GMAC_RMII_CLK_25M | + RK3128_GMAC_SPEED_100M); + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops rk3128_ops = { + .set_to_rgmii = rk3128_set_to_rgmii, + .set_to_rmii = rk3128_set_to_rmii, + .set_rgmii_speed = rk3128_set_rgmii_speed, + .set_rmii_speed = rk3128_set_rmii_speed, +}; + #define RK3228_GRF_MAC_CON0 0x0900 #define RK3228_GRF_MAC_CON1 0x0904 +#define RK3228_GRF_CON_MUX 0x50 + /* RK3228_GRF_MAC_CON0 */ #define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) #define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) @@ -106,6 +224,9 @@ struct rk_priv_data { #define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) #define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1) +/* RK3228_GRF_COM_MUX */ +#define RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY GRF_BIT(15) + static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { @@ -186,11 +307,18 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) dev_err(dev, "unknown speed value for RMII! speed=%d", speed); } +static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK3228_GRF_CON_MUX, + RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY); +} + static const struct rk_gmac_ops rk3228_ops = { .set_to_rgmii = rk3228_set_to_rgmii, .set_to_rmii = rk3228_set_to_rmii, .set_rgmii_speed = rk3228_set_rgmii_speed, .set_rmii_speed = rk3228_set_rmii_speed, + .integrated_phy_powerup = rk3228_integrated_phy_powerup, }; #define RK3288_GRF_SOC_CON1 0x0248 @@ -306,6 +434,8 @@ static const struct rk_gmac_ops rk3288_ops = { #define RK3328_GRF_MAC_CON0 0x0900 #define RK3328_GRF_MAC_CON1 0x0904 +#define RK3328_GRF_MAC_CON2 0x0908 +#define RK3328_GRF_MACPHY_CON1 0xb04 /* RK3328_GRF_MAC_CON0 */ #define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) @@ -332,6 +462,9 @@ static const struct rk_gmac_ops rk3288_ops = { #define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) #define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) +/* RK3328_GRF_MACPHY_CON1 */ +#define RK3328_MACPHY_RMII_MODE GRF_BIT(9) + static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { @@ -356,18 +489,19 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) { struct device *dev = &bsp_priv->pdev->dev; + unsigned int reg; if (IS_ERR(bsp_priv->grf)) { dev_err(dev, "Missing rockchip,grf property\n"); return; } - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : + RK3328_GRF_MAC_CON1; + + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_PHY_INTF_SEL_RMII | RK3328_GMAC_RMII_MODE); - - /* set MAC to RMII mode */ - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); } static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) @@ -395,29 +529,40 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; + unsigned int reg; if (IS_ERR(bsp_priv->grf)) { dev_err(dev, "Missing rockchip,grf property\n"); return; } + reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : + RK3328_GRF_MAC_CON1; + if (speed == 10) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M); else if (speed == 100) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M); else dev_err(dev, "unknown speed value for RMII! speed=%d", speed); } +static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1, + RK3328_MACPHY_RMII_MODE); +} + static const struct rk_gmac_ops rk3328_ops = { .set_to_rgmii = rk3328_set_to_rgmii, .set_to_rmii = rk3328_set_to_rmii, .set_rgmii_speed = rk3328_set_rgmii_speed, .set_rmii_speed = rk3328_set_rmii_speed, + .integrated_phy_powerup = rk3328_integrated_phy_powerup, }; #define RK3366_GRF_SOC_CON6 0x0418 @@ -753,10 +898,108 @@ static const struct rk_gmac_ops rk3399_ops = { .set_rmii_speed = rk3399_set_rmii_speed, }; -static int gmac_clk_init(struct rk_priv_data *bsp_priv) +#define RV1108_GRF_GMAC_CON0 0X0900 + +/* RV1108_GRF_GMAC_CON0 */ +#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3) +#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RV1108_GMAC_SPEED_100M GRF_BIT(2) +#define RV1108_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RV1108_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) + +static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv) { struct device *dev = &bsp_priv->pdev->dev; + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_PHY_INTF_SEL_RMII); +} + +static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_RMII_CLK_2_5M | + RV1108_GMAC_SPEED_10M); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_RMII_CLK_25M | + RV1108_GMAC_SPEED_100M); + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops rv1108_ops = { + .set_to_rmii = rv1108_set_to_rmii, + .set_rmii_speed = rv1108_set_rmii_speed, +}; + +#define RK_GRF_MACPHY_CON0 0xb00 +#define RK_GRF_MACPHY_CON1 0xb04 +#define RK_GRF_MACPHY_CON2 0xb08 +#define RK_GRF_MACPHY_CON3 0xb0c + +#define RK_MACPHY_ENABLE GRF_BIT(0) +#define RK_MACPHY_DISABLE GRF_CLR_BIT(0) +#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14) +#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7)) +#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0) +#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0) + +static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv) +{ + if (priv->ops->integrated_phy_powerup) + priv->ops->integrated_phy_powerup(priv); + + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE); + + regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID); + regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID); + + if (priv->phy_reset) { + /* PHY needs to be disabled before trying to reset it */ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); + usleep_range(10, 20); + if (priv->phy_reset) + reset_control_deassert(priv->phy_reset); + usleep_range(10, 20); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE); + msleep(30); + } +} + +static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); +} + +static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) +{ + struct rk_priv_data *bsp_priv = plat->bsp_priv; + struct device *dev = &bsp_priv->pdev->dev; + int ret; + bsp_priv->clk_enabled = false; bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx"); @@ -806,6 +1049,16 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv) clk_set_rate(bsp_priv->clk_mac, 50000000); } + if (plat->phy_node && bsp_priv->integrated_phy) { + bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0); + if (IS_ERR(bsp_priv->clk_phy)) { + ret = PTR_ERR(bsp_priv->clk_phy); + dev_err(dev, "Cannot get PHY clock: %d\n", ret); + return -EINVAL; + } + clk_set_rate(bsp_priv->clk_phy, 50000000); + } + return 0; } @@ -829,6 +1082,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) bsp_priv->clk_mac_refout); } + if (!IS_ERR(bsp_priv->clk_phy)) + clk_prepare_enable(bsp_priv->clk_phy); + if (!IS_ERR(bsp_priv->aclk_mac)) clk_prepare_enable(bsp_priv->aclk_mac); @@ -861,6 +1117,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) bsp_priv->clk_mac_refout); } + if (!IS_ERR(bsp_priv->clk_phy)) + clk_disable_unprepare(bsp_priv->clk_phy); + if (!IS_ERR(bsp_priv->aclk_mac)) clk_disable_unprepare(bsp_priv->aclk_mac); @@ -905,6 +1164,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) } static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, + struct plat_stmmacenet_data *plat, const struct rk_gmac_ops *ops) { struct rk_priv_data *bsp_priv; @@ -967,9 +1227,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - bsp_priv->pdev = pdev; - gmac_clk_init(bsp_priv); + if (plat->phy_node) { + bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node, + "phy-is-integrated"); + if (bsp_priv->integrated_phy) { + bsp_priv->phy_reset = of_reset_control_get(plat->phy_node, NULL); + if (IS_ERR(bsp_priv->phy_reset)) { + dev_err(&pdev->dev, "No PHY reset control found.\n"); + bsp_priv->phy_reset = NULL; + } + } + } + dev_info(dev, "integrated PHY? (%s).\n", + bsp_priv->integrated_phy ? "yes" : "no"); + + bsp_priv->pdev = pdev; return bsp_priv; } @@ -1017,6 +1290,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) pm_runtime_enable(dev); pm_runtime_get_sync(dev); + if (bsp_priv->integrated_phy) + rk_gmac_integrated_phy_powerup(bsp_priv); + return 0; } @@ -1024,6 +1300,9 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac) { struct device *dev = &gmac->pdev->dev; + if (gmac->integrated_phy) + rk_gmac_integrated_phy_powerdown(gmac); + pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -1075,12 +1354,16 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->fix_mac_speed = rk_fix_speed; - plat_dat->bsp_priv = rk_gmac_setup(pdev, data); + plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); if (IS_ERR(plat_dat->bsp_priv)) { ret = PTR_ERR(plat_dat->bsp_priv); goto err_remove_config_dt; } + ret = rk_gmac_clk_init(plat_dat); + if (ret) + return ret; + ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) goto err_remove_config_dt; @@ -1141,12 +1424,14 @@ static int rk_gmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, + { .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops }, { } }; MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c4407e8e39a3..2f7d7ec59962 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) { void __iomem *ioaddr = hw->pcsr; unsigned int pmt = 0; + u32 config; if (mode & WAKE_MAGIC) { pr_debug("GMAC: WOL Magic frame\n"); @@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) pmt |= power_down | global_unicast | wake_up_frame_en; } + if (pmt) { + /* The receiver must be enabled for WOL before powering down */ + config = readl(ioaddr + GMAC_CONFIG); + config |= GMAC_CONFIG_RE; + writel(config, ioaddr + GMAC_CONFIG); + } writel(pmt, ioaddr + GMAC_PMT); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 72ec711fcba2..f5f37bfa1d58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -248,9 +248,6 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); - int act = 0; - char irq_num[4]; - char *irq_str; if (!phydev) continue; @@ -273,19 +270,6 @@ int stmmac_mdio_register(struct net_device *ndev) if (priv->plat->phy_addr == -1) priv->plat->phy_addr = addr; - act = (priv->plat->phy_addr == addr); - switch (phydev->irq) { - case PHY_POLL: - irq_str = "POLL"; - break; - case PHY_IGNORE_INTERRUPT: - irq_str = "IGNORE"; - break; - default: - sprintf(irq_num, "%d", phydev->irq); - irq_str = irq_num; - break; - } phy_attached_info(phydev); found = 1; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index a366b3747eeb..8a280b48e3a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, { .compatible = "allwinner,sun8i-h3-emac" }, { .compatible = "allwinner,sun8i-v3s-emac" }, { .compatible = "allwinner,sun50i-a64-emac" }, + {}, }; /* If phy-handle property is passed from DT, use it as the PHY */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index d71bd80c5b5b..e471a903c654 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -152,7 +152,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp, } /* structure describing a PTP hardware clock */ -static struct ptp_clock_info stmmac_ptp_clock_ops = { +static const struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 8603e397097e..5b56c24b6ed2 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -248,7 +248,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4bb04aaf9650..6a4e8e1bbd90 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9221,8 +9221,7 @@ static int niu_get_of_props(struct niu *np) phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { - netdev_err(dev, "%s: OF node lacks phy-type property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; } @@ -9232,26 +9231,25 @@ static int niu_get_of_props(struct niu *np) strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { - netdev_err(dev, "%s: Illegal phy string [%s]\n", - dp->full_name, np->vpd.phy_type); + netdev_err(dev, "%pOF: Illegal phy string [%s]\n", + dp, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { - netdev_err(dev, "%s: OF node lacks local-mac-address property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", + dp); return -EINVAL; } if (prop_len != dev->addr_len) { - netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", - dp->full_name, prop_len); + netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", + dp, prop_len); } memcpy(dev->dev_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->dev_addr[0])) { - netdev_err(dev, "%s: OF MAC address is invalid\n", - dp->full_name); - netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); + netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); + netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); return -EINVAL; } @@ -10027,8 +10025,8 @@ static int niu_of_probe(struct platform_device *op) reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { - dev_err(&op->dev, "%s: No 'reg' property, aborting\n", - op->dev.of_node->full_name); + dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", + op->dev.of_node); return -ENODEV; } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 75b167e3fe98..0b95105f7060 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->watchdog_timeo = VNET_TX_TIMEOUT; dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | - NETIF_F_IP_CSUM | NETIF_F_SG; + NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 9e86833249d4..ecf456c7b6d1 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -303,7 +303,7 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, return skb; } -static inline void vnet_fullcsum(struct sk_buff *skb) +static inline void vnet_fullcsum_ipv4(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int offset = skb_transport_offset(skb); @@ -335,6 +335,40 @@ static inline void vnet_fullcsum(struct sk_buff *skb) } } +#if IS_ENABLED(CONFIG_IPV6) +static inline void vnet_fullcsum_ipv6(struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IPV6)) + return; + if (ip6h->nexthdr != IPPROTO_TCP && + ip6h->nexthdr != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (ip6h->nexthdr == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (ip6h->nexthdr == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} +#endif + static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); @@ -394,9 +428,14 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; - skb_reset_transport_header(skb); skb_set_transport_header(skb, ihl); - vnet_fullcsum(skb); + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + skb_set_transport_header(skb, + sizeof(struct ipv6hdr)); + vnet_fullcsum_ipv6(skb); +#endif } } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { @@ -1115,24 +1154,47 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) if (skb->ip_summed == CHECKSUM_PARTIAL) start = skb_checksum_start_offset(skb); if (start) { - struct iphdr *iph = ip_hdr(nskb); int offset = start + nskb->csum_offset; + /* copy the headers, no csum here */ if (skb_copy_bits(skb, 0, nskb->data, start)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } + + /* copy the rest, with csum calculation */ *(__sum16 *)(skb->data + offset) = 0; csum = skb_copy_and_csum_bits(skb, start, nskb->data + start, skb->len - start, 0); - if (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) { - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - start, - iph->protocol, csum); + + /* add in the header checksums */ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(nskb); + + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, + iph->daddr, + skb->len - start, + iph->protocol, + csum); + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(nskb); + + if (ip6h->nexthdr == IPPROTO_TCP || + ip6h->nexthdr == IPPROTO_UDP) { + csum = csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, + skb->len - start, + ip6h->nexthdr, + csum); + } } + + /* save the final result */ *(__sum16 *)(nskb->data + offset) = csum; nskb->ip_summed = CHECKSUM_NONE; @@ -1318,8 +1380,14 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (unlikely(!skb)) goto out_dropped; - if (skb->ip_summed == CHECKSUM_PARTIAL) - vnet_fullcsum(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol == htons(ETH_P_IP)) + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + vnet_fullcsum_ipv6(skb); +#endif + } dr = &port->vio.drings[VIO_DRIVER_TX_RING]; i = skb_get_queue_mapping(skb); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 3b91257683bc..e1b55b8fb8e0 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -17,6 +17,7 @@ #include #include +#include #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index badd0a8caeb9..db8a4bcfc6c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1321,8 +1321,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); if (!phy) { - dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", - slave->data->phy_node->full_name, + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", + slave->data->phy_node, slave->slave_num); return; } @@ -2670,8 +2670,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if (slave_data->phy_node) { dev_dbg(&pdev->dev, - "slave[%d] using phy-handle=\"%s\"\n", - i, slave_data->phy_node->full_name); + "slave[%d] using phy-handle=\"%pOF\"\n", + i, slave_data->phy_node); } else if (of_phy_is_fixed_link(slave_node)) { /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. @@ -2827,7 +2827,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) #define CPSW_QUIRK_IRQ BIT(0) -static struct platform_device_id cpsw_devtype[] = { +static const struct platform_device_id cpsw_devtype[] = { { /* keep it for existing comaptibles */ .name = "cpsw", diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index c2121d214f08..e7b76f6b4f67 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -298,7 +298,7 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) return (long)delay; } -static struct ptp_clock_info cpts_info = { +static const struct ptp_clock_info cpts_info = { .owner = THIS_MODULE, .name = "CTPS timer", .max_adj = 1000000, diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 64d5527feb2a..4bb561856af5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1480,8 +1480,8 @@ static int emac_dev_open(struct net_device *ndev) phydev = of_phy_connect(ndev, priv->phy_node, &emac_adjust_link, 0, 0); if (!phydev) { - dev_err(emac_dev, "could not connect to phy %s\n", - priv->phy_node->full_name); + dev_err(emac_dev, "could not connect to phy %pOF\n", + priv->phy_node); ret = -ENODEV; goto err; } diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 33df340db1f1..3c33f4504d8e 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -159,8 +159,10 @@ static int davinci_mdio_reset(struct mii_bus *bus) /* dump hardware version info */ ver = __raw_readl(&data->regs->version); - dev_info(data->dev, "davinci mdio revision %d.%d\n", - (ver >> 8) & 0xff, ver & 0xff); + dev_info(data->dev, + "davinci mdio revision %d.%d, bus freq %ld\n", + (ver >> 8) & 0xff, ver & 0xff, + data->pdata.bus_freq); if (data->skip_scan) goto done; @@ -198,8 +200,10 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) return 0; reg = __raw_readl(®s->control); - if ((reg & CONTROL_IDLE) == 0) + if ((reg & CONTROL_IDLE) == 0) { + usleep_range(100, 200); continue; + } /* * An emac soft_reset may have clobbered the mdio controller's diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9d52c3a78621..437d36289786 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1877,20 +1877,21 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || @@ -2144,7 +2145,6 @@ static void netcp_delete_interface(struct netcp_device *netcp_device, of_node_put(netcp->node_interface); unregister_netdev(ndev); - netif_napi_del(&netcp->rx_napi); free_netdev(ndev); } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index aec95382ea5c..c00102b8145a 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -873,7 +873,7 @@ static int ptp_mpipe_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_mpipe_caps = { +static const struct ptp_clock_info ptp_mpipe_caps = { .owner = THIS_MODULE, .name = "mPIPE clock", .max_adj = 999999999, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index acd29d60174a..83e6f76eb965 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2598,7 +2598,7 @@ static struct platform_driver rhine_driver_platform = { } }; -static struct dmi_system_id rhine_dmi_table[] __initdata = { +static const struct dmi_system_id rhine_dmi_table[] __initconst = { { .ident = "EPIA-M", .matches = { diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index d73da8afe08e..60abc9250f56 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1089,7 +1089,7 @@ static int temac_of_probe(struct platform_device *op) lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); if (lp->phy_node) - dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); + dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); /* Add the device attributes */ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index af27f7d1cbf3..5ef626331f85 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -389,7 +389,7 @@ struct axidma_bd { * @dma_err_tasklet: Tasklet structure to process Axi DMA errors * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number - * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X + * @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X * @options: AxiEthernet option word * @last_link: Phy link state in which the PHY was negotiated earlier * @features: Stores the extended features supported by the axienet hw @@ -432,7 +432,7 @@ struct axienet_local { int tx_irq; int rx_irq; - u32 phy_type; + phy_interface_t phy_mode; u32 options; /* Current options word */ u32 last_link; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 33c595f4691d..e74e1e897864 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -531,11 +531,11 @@ static void axienet_adjust_link(struct net_device *ndev) link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { - if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) + if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) setspeed = 0; } else { if ((phy->speed == SPEED_1000) && - (lp->phy_type == XAE_PHY_TYPE_MII)) + (lp->phy_mode == PHY_INTERFACE_MODE_MII)) setspeed = 0; } @@ -935,15 +935,8 @@ static int axienet_open(struct net_device *ndev) return ret; if (lp->phy_node) { - if (lp->phy_type == XAE_PHY_TYPE_GMII) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); - } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_RGMII_ID); - } + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, lp->phy_mode); if (!phydev) dev_err(lp->dev, "of_phy_connect() failed\n"); @@ -1539,7 +1532,38 @@ static int axienet_probe(struct platform_device *pdev) * the device-tree and accordingly set flags. */ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); - of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type); + + /* Start with the proprietary, and broken phy_type */ + ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); + if (!ret) { + netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); + switch (value) { + case XAE_PHY_TYPE_MII: + lp->phy_mode = PHY_INTERFACE_MODE_MII; + break; + case XAE_PHY_TYPE_GMII: + lp->phy_mode = PHY_INTERFACE_MODE_GMII; + break; + case XAE_PHY_TYPE_RGMII_2_0: + lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + break; + case XAE_PHY_TYPE_SGMII: + lp->phy_mode = PHY_INTERFACE_MODE_SGMII; + break; + case XAE_PHY_TYPE_1000BASE_X: + lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; + break; + default: + ret = -EINVAL; + goto free_netdev; + } + } else { + lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (lp->phy_mode < 0) { + ret = -EINVAL; + goto free_netdev; + } + } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index f71883264cc0..fd5288ff53b5 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1781,7 +1781,7 @@ static int __init setup_xirc2ps_cs(char *str) */ int ints[10] = { -1 }; - str = get_options(str, 9, ints); + str = get_options(str, ARRAY_SIZE(ints), ints); #define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; } MAYBE_SET(if_port, 3); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index f4a816cf012a..61fceee73c1b 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3767,7 +3767,7 @@ static void dfx_pci_unregister(struct pci_dev *pdev) #endif /* CONFIG_PCI */ #ifdef CONFIG_EISA -static struct eisa_device_id dfx_eisa_table[] = { +static const struct eisa_device_id dfx_eisa_table[] = { { "DEC3001", DEFEA_PROD_ID_1 }, { "DEC3002", DEFEA_PROD_ID_2 }, { "DEC3003", DEFEA_PROD_ID_3 }, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 2bbda71818ad..f6404074b7b0 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -715,6 +715,7 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs4, struct flowi4 *fl4, const struct ip_tunnel_info *info) { @@ -724,7 +725,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct rtable *rt = NULL; __u8 tos; - if (!rcu_dereference(geneve->sock4)) + if (!gs4) return ERR_PTR(-EIO); memset(fl4, 0, sizeof(*fl4)); @@ -764,6 +765,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs6, struct flowi6 *fl6, const struct ip_tunnel_info *info) { @@ -771,10 +773,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct geneve_dev *geneve = netdev_priv(dev); struct dst_entry *dst = NULL; struct dst_cache *dst_cache; - struct geneve_sock *gs6; __u8 prio; - gs6 = rcu_dereference(geneve->sock6); if (!gs6) return ERR_PTR(-EIO); @@ -827,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -866,7 +866,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -951,8 +951,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -962,8 +963,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1014,16 +1016,22 @@ static struct device_type geneve_type = { * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void geneve_push_rx_ports(struct net_device *dev) +static void geneve_offload_rx_ports(struct net_device *dev, bool push) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) - udp_tunnel_push_rx_port(dev, gs->sock, - UDP_TUNNEL_TYPE_GENEVE); + list_for_each_entry_rcu(gs, &gn->sock_list, list) { + if (push) { + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } else { + udp_tunnel_drop_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } + } rcu_read_unlock(); } @@ -1078,21 +1086,33 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided link layer address is not Ethernet"); return -EINVAL; + } - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; + } } - if (!data) + if (!data) { + NL_SET_ERR_MSG(extack, + "Not enough attributes provided to perform the operation"); return -EINVAL; + } if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); - if (vni >= GENEVE_N_VID) + if (vni >= GENEVE_N_VID) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_ID], + "Geneve ID must be lower than 16777216"); return -ERANGE; + } } return 0; @@ -1140,7 +1160,17 @@ static bool is_tnl_info_zero(const struct ip_tunnel_info *info) return true; } +static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, + struct ip_tunnel_info *b) +{ + if (ip_tunnel_info_af(a) == AF_INET) + return a->key.u.ipv4.dst == b->key.u.ipv4.dst; + else + return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); +} + static int geneve_configure(struct net *net, struct net_device *dev, + struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum) { @@ -1149,8 +1179,11 @@ static int geneve_configure(struct net *net, struct net_device *dev, bool tun_collect_md, tun_on_same_port; int err, encap_len; - if (metadata && !is_tnl_info_zero(info)) + if (metadata && !is_tnl_info_zero(info)) { + NL_SET_ERR_MSG(extack, + "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); return -EINVAL; + } geneve->net = net; geneve->dev = dev; @@ -1171,11 +1204,17 @@ static int geneve_configure(struct net *net, struct net_device *dev, dev->needed_headroom = encap_len + ETH_HLEN; if (metadata) { - if (tun_on_same_port) + if (tun_on_same_port) { + NL_SET_ERR_MSG(extack, + "There can be only one externally controlled device on a destination port"); return -EPERM; + } } else { - if (tun_collect_md) + if (tun_collect_md) { + NL_SET_ERR_MSG(extack, + "There already exists an externally controlled device on this destination port"); return -EPERM; + } } dst_cache_reset(&geneve->info.dst_cache); @@ -1197,47 +1236,62 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) info->key.tp_dst = htons(dst_port); } -static int geneve_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack, + struct ip_tunnel_info *info, bool *metadata, + bool *use_udp6_rx_checksums, bool changelink) { - bool use_udp6_rx_checksums = false; - struct ip_tunnel_info info; - bool metadata = false; + int attrtype; - init_tnl_info(&info, GENEVE_UDP_PORT); - - if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) + if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { + NL_SET_ERR_MSG(extack, + "Cannot specify both IPv4 and IPv6 Remote addresses"); return -EINVAL; + } if (data[IFLA_GENEVE_REMOTE]) { - info.key.u.ipv4.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) { + attrtype = IFLA_GENEVE_REMOTE; + goto change_notsup; + } + + info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); - if (IN_MULTICAST(ntohl(info.key.u.ipv4.dst))) { - netdev_dbg(dev, "multicast remote is unsupported\n"); + if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], + "Remote IPv4 address cannot be Multicast"); return -EINVAL; } } if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) - info.mode = IP_TUNNEL_INFO_IPV6; - info.key.u.ipv6.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { + attrtype = IFLA_GENEVE_REMOTE6; + goto change_notsup; + } + + info->mode = IP_TUNNEL_INFO_IPV6; + info->key.u.ipv6.dst = nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); - if (ipv6_addr_type(&info.key.u.ipv6.dst) & + if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { - netdev_dbg(dev, "link-local remote is unsupported\n"); + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "Remote IPv6 address cannot be link-local"); return -EINVAL; } - if (ipv6_addr_is_multicast(&info.key.u.ipv6.dst)) { - netdev_dbg(dev, "multicast remote is unsupported\n"); + if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "Remote IPv6 address cannot be Multicast"); return -EINVAL; } - info.key.tun_flags |= TUNNEL_CSUM; - use_udp6_rx_checksums = true; + info->key.tun_flags |= TUNNEL_CSUM; + *use_udp6_rx_checksums = true; #else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } @@ -1245,46 +1299,187 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_ID]) { __u32 vni; __u8 tvni[3]; + __be64 tunid; vni = nla_get_u32(data[IFLA_GENEVE_ID]); tvni[0] = (vni & 0x00ff0000) >> 16; tvni[1] = (vni & 0x0000ff00) >> 8; tvni[2] = vni & 0x000000ff; - info.key.tun_id = vni_to_tunnel_id(tvni); + tunid = vni_to_tunnel_id(tvni); + if (changelink && (tunid != info->key.tun_id)) { + attrtype = IFLA_GENEVE_ID; + goto change_notsup; + } + info->key.tun_id = tunid; } + if (data[IFLA_GENEVE_TTL]) - info.key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); if (data[IFLA_GENEVE_TOS]) - info.key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_LABEL]) { - info.key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & + info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; - if (info.key.label && (!(info.mode & IP_TUNNEL_INFO_IPV6))) + if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL], + "Label attribute only applies for IPv6 Geneve devices"); return -EINVAL; + } } - if (data[IFLA_GENEVE_PORT]) - info.key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + if (data[IFLA_GENEVE_PORT]) { + if (changelink) { + attrtype = IFLA_GENEVE_PORT; + goto change_notsup; + } + info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + } - if (data[IFLA_GENEVE_COLLECT_METADATA]) - metadata = true; + if (data[IFLA_GENEVE_COLLECT_METADATA]) { + if (changelink) { + attrtype = IFLA_GENEVE_COLLECT_METADATA; + goto change_notsup; + } + *metadata = true; + } - if (data[IFLA_GENEVE_UDP_CSUM] && - nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - info.key.tun_flags |= TUNNEL_CSUM; + if (data[IFLA_GENEVE_UDP_CSUM]) { + if (changelink) { + attrtype = IFLA_GENEVE_UDP_CSUM; + goto change_notsup; + } + if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + info->key.tun_flags |= TUNNEL_CSUM; + } - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) - info.key.tun_flags &= ~TUNNEL_CSUM; + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { + if (changelink) { + attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; + goto change_notsup; + } + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) + info->key.tun_flags &= ~TUNNEL_CSUM; + } - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) - use_udp6_rx_checksums = false; + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { + if (changelink) { + attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; + goto change_notsup; + } + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) + *use_udp6_rx_checksums = false; + } - return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums); + return 0; +change_notsup: + NL_SET_ERR_MSG_ATTR(extack, data[attrtype], + "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported"); + return -EOPNOTSUPP; +} + +static int geneve_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + bool use_udp6_rx_checksums = false; + struct ip_tunnel_info info; + bool metadata = false; + int err; + + init_tnl_info(&info, GENEVE_UDP_PORT); + err = geneve_nl2info(tb, data, extack, &info, &metadata, + &use_udp6_rx_checksums, false); + if (err) + return err; + + return geneve_configure(net, dev, extack, &info, metadata, + use_udp6_rx_checksums); +} + +/* Quiesces the geneve device data path for both TX and RX. + * + * On transmit geneve checks for non-NULL geneve_sock before it proceeds. + * So, if we set that socket to NULL under RCU and wait for synchronize_net() + * to complete for the existing set of in-flight packets to be transmitted, + * then we would have quiesced the transmit data path. All the future packets + * will get dropped until we unquiesce the data path. + * + * On receive geneve dereference the geneve_sock stashed in the socket. So, + * if we set that to NULL under RCU and wait for synchronize_net() to + * complete, then we would have quiesced the receive data path. + */ +static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, + struct geneve_sock **gs6) +{ + *gs4 = rtnl_dereference(geneve->sock4); + rcu_assign_pointer(geneve->sock4, NULL); + if (*gs4) + rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); +#if IS_ENABLED(CONFIG_IPV6) + *gs6 = rtnl_dereference(geneve->sock6); + rcu_assign_pointer(geneve->sock6, NULL); + if (*gs6) + rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); +#else + *gs6 = NULL; +#endif + synchronize_net(); +} + +/* Resumes the geneve device data path for both TX and RX. */ +static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, + struct geneve_sock __maybe_unused *gs6) +{ + rcu_assign_pointer(geneve->sock4, gs4); + if (gs4) + rcu_assign_sk_user_data(gs4->sock->sk, gs4); +#if IS_ENABLED(CONFIG_IPV6) + rcu_assign_pointer(geneve->sock6, gs6); + if (gs6) + rcu_assign_sk_user_data(gs6->sock->sk, gs6); +#endif + synchronize_net(); +} + +static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct geneve_dev *geneve = netdev_priv(dev); + struct geneve_sock *gs4, *gs6; + struct ip_tunnel_info info; + bool metadata; + bool use_udp6_rx_checksums; + int err; + + /* If the geneve device is configured for metadata (or externally + * controlled, for example, OVS), then nothing can be changed. + */ + if (geneve->collect_md) + return -EOPNOTSUPP; + + /* Start with the existing info. */ + memcpy(&info, &geneve->info, sizeof(info)); + metadata = geneve->collect_md; + use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; + err = geneve_nl2info(tb, data, extack, &info, &metadata, + &use_udp6_rx_checksums, true); + if (err) + return err; + + if (!geneve_dst_addr_equal(&geneve->info, &info)) + dst_cache_reset(&info.dst_cache); + + geneve_quiesce(geneve, &gs4, &gs6); + geneve->info = info; + geneve->collect_md = metadata; + geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; + geneve_unquiesce(geneve, gs4, gs6); + + return 0; } static void geneve_dellink(struct net_device *dev, struct list_head *head) @@ -1375,6 +1570,7 @@ static struct rtnl_link_ops geneve_link_ops __read_mostly = { .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, + .changelink = geneve_changelink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, @@ -1396,7 +1592,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, &info, true, true); + err = geneve_configure(net, dev, NULL, &info, true, true); if (err) { free_netdev(dev); return ERR_PTR(err); @@ -1426,8 +1622,14 @@ static int geneve_netdevice_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - geneve_push_rx_ports(dev); + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) { + geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } else if (event == NETDEV_UNREGISTER) { + geneve_offload_rx_ports(dev, false); + } else if (event == NETDEV_REGISTER) { + geneve_offload_rx_ports(dev, true); + } return NOTIFY_DONE; } diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 92b13b39f426..e1783832d304 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -386,7 +386,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops par96_ops = { +static const struct hdlcdrv_ops par96_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = par96_open, diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index d9a646acca20..190f66c88479 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -508,7 +508,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index f1c8a9ff3891..3c823c648cf5 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -542,7 +542,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index dec6b76bc0fb..cde41200f40a 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -581,7 +581,7 @@ static int __init setup_adapter(int card_base, int type, int n) priv->param.dma = -1; INIT_WORK(&priv->rx_work, rx_bh); dev->ml_priv = priv; - sprintf(dev->name, "dmascc%i", 2 * n + i); + snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); dev->base_addr = card_base; dev->irq = irq; dev->netdev_ops = &scc_netdev_ops; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 12cc64bfcff8..5176be76ca7d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -147,8 +147,11 @@ struct hv_netvsc_packet { struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; int ring_size; - u32 max_num_vrss_chns; u32 num_chn; + u32 send_sections; + u32 recv_sections; + u32 send_section_size; + u32 recv_section_size; }; enum rndis_device_state { @@ -183,13 +186,16 @@ struct rndis_device { /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *info); +struct net_device_context; + +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct hv_device *device, +int netvsc_send(struct net_device_context *ndc, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **page_buffer, + struct hv_page_buffer *page_buffer, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); @@ -200,22 +206,26 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); + +void rndis_set_subchannel(struct work_struct *w); +bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *info); +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *info); void rndis_filter_update(struct netvsc_device *nvdev); void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); int rndis_filter_set_rss_param(struct rndis_device *rdev, - const u8 *key, int num_queue); + const u8 *key); int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen); -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); +int rndis_filter_set_device_mac(struct netvsc_device *ndev, + const char *mac); void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); @@ -630,12 +640,12 @@ struct nvsp_message { #define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ #define NETVSC_INVALID_INDEX -1 +#define NETVSC_SEND_SECTION_SIZE 6144 +#define NETVSC_RECV_SECTION_SIZE 1728 #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 -#define NETVSC_PACKET_SIZE 4096 - #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_DEFAULT 8 @@ -654,13 +664,10 @@ struct recv_comp_data { u32 status; }; -/* Netvsc Receive Slots Max */ -#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1) - struct multi_recv_comp { - void *buf; /* queued receive completions */ - u32 first; /* first data entry */ - u32 next; /* next entry for writing */ + struct recv_comp_data *slots; + u32 first; /* first data entry */ + u32 next; /* next entry for writing */ }; struct netvsc_stats { @@ -677,6 +684,17 @@ struct netvsc_ethtool_stats { unsigned long tx_no_space; unsigned long tx_too_big; unsigned long tx_busy; + unsigned long tx_send_full; + unsigned long rx_comp_busy; +}; + +struct netvsc_vf_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; }; struct netvsc_reconfig { @@ -706,24 +724,27 @@ struct net_device_context { u32 tx_send_table[VRSS_SEND_TAB_SIZE]; /* Ethtool settings */ + bool udp4_l4_hash; + bool udp6_l4_hash; u8 duplex; u32 speed; struct netvsc_ethtool_stats eth_stats; /* State to manage the associated VF interface. */ struct net_device __rcu *vf_netdev; + struct netvsc_vf_pcpu_stats __percpu *vf_stats; + struct delayed_work vf_takeover; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - - bool datapath; /* 0 - synthetic, 1 - VF nic */ }; /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; + struct netvsc_device *net_device; const struct vmpacket_descriptor *desc; struct napi_struct napi; struct multi_send_data msd; @@ -743,14 +764,13 @@ struct netvsc_device { /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; - u32 recv_buf_size; u32 recv_buf_gpadl_handle; u32 recv_section_cnt; - struct nvsp_1_receive_buffer_section *recv_section; + u32 recv_section_size; + u32 recv_completion_cnt; /* Send buffer allocated by us */ void *send_buf; - u32 send_buf_size; u32 send_buf_gpadl_handle; u32 send_section_cnt; u32 send_section_size; @@ -766,6 +786,7 @@ struct netvsc_device { u32 num_chn; atomic_t open_chn; + struct work_struct subchan_work; wait_queue_head_t subchan_open; struct rndis_device *extension; @@ -775,8 +796,6 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - atomic_t num_outstanding_recvs; - atomic_t open_cnt; struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; @@ -784,18 +803,6 @@ struct netvsc_device { struct rcu_head rcu; }; -static inline struct netvsc_device * -net_device_to_netvsc_device(struct net_device *ndev) -{ - return ((struct net_device_context *)netdev_priv(ndev))->nvdev; -} - -static inline struct netvsc_device * -hv_device_to_netvsc_device(struct hv_device *device) -{ - return net_device_to_netvsc_device(hv_get_drvdata(device)); -} - /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d18c3326a1f7..8d5077fb0492 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -29,6 +29,9 @@ #include #include #include +#include +#include + #include #include "hyperv_net.h" @@ -41,7 +44,7 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nv_dev = net_device_ctx->nvdev; + struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; memset(init_pkt, 0, sizeof(struct nvsp_message)); @@ -57,8 +60,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) sizeof(struct nvsp_message), (unsigned long)init_pkt, VM_PKT_DATA_INBAND, 0); - - net_device_ctx->datapath = vf; } static struct netvsc_device *alloc_net_device(void) @@ -69,16 +70,15 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->chan_table[0].mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); + INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); return net_device; } @@ -90,7 +90,7 @@ static void free_netvsc_device(struct rcu_head *head) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->chan_table[i].mrc.buf); + vfree(nvdev->chan_table[i].mrc.slots); kfree(nvdev); } @@ -104,7 +104,8 @@ static void netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + struct net_device_context *ndc = netdev_priv(ndev); + struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev); int ret; /* @@ -144,6 +145,7 @@ static void netvsc_destroy_buf(struct hv_device *device) "revoke receive buffer to netvsp\n"); return; } + net_device->recv_section_cnt = 0; } /* Teardown the gpadl on the vsp end */ @@ -168,19 +170,13 @@ static void netvsc_destroy_buf(struct hv_device *device) net_device->recv_buf = NULL; } - if (net_device->recv_section) { - net_device->recv_section_cnt = 0; - kfree(net_device->recv_section); - net_device->recv_section = NULL; - } - /* Deal with the send buffer we may have setup. * If we got a send section size, it means we received a * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need * to send a revoke msg here */ - if (net_device->send_section_size) { + if (net_device->send_section_cnt) { /* Send the revoke receive buffer */ revoke_packet = &net_device->revoke_packet; memset(revoke_packet, 0, sizeof(struct nvsp_message)); @@ -212,6 +208,7 @@ static void netvsc_destroy_buf(struct hv_device *device) "revoke send buffer to netvsp\n"); return; } + net_device->send_section_cnt = 0; } /* Teardown the gpadl on the vsp end */ if (net_device->send_buf_gpadl_handle) { @@ -236,25 +233,40 @@ static void netvsc_destroy_buf(struct hv_device *device) kfree(net_device->send_section_map); } -static int netvsc_init_buf(struct hv_device *device, - struct netvsc_device *net_device) +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) { - int ret = 0; + struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; + int node = cpu_to_node(nvchan->channel->target_cpu); + size_t size; + + size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); + nvchan->mrc.slots = vzalloc_node(size, node); + if (!nvchan->mrc.slots) + nvchan->mrc.slots = vzalloc(size); + + return nvchan->mrc.slots ? 0 : -ENOMEM; +} + +static int netvsc_init_buf(struct hv_device *device, + struct netvsc_device *net_device, + const struct netvsc_device_info *device_info) +{ + struct nvsp_1_message_send_receive_buffer_complete *resp; + struct net_device *ndev = hv_get_drvdata(device); struct nvsp_message *init_packet; - struct net_device *ndev; + unsigned int buf_size; size_t map_words; - int node; + int ret = 0; - ndev = hv_get_drvdata(device); - - node = cpu_to_node(device->channel->target_cpu); - net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); - if (!net_device->recv_buf) - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + /* Get receive buffer area. */ + buf_size = device_info->recv_sections * device_info->recv_section_size; + buf_size = roundup(buf_size, PAGE_SIZE); + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { - netdev_err(ndev, "unable to allocate receive " - "buffer of size %d\n", net_device->recv_buf_size); + netdev_err(ndev, + "unable to allocate receive buffer of size %u\n", + buf_size); ret = -ENOMEM; goto cleanup; } @@ -265,7 +277,7 @@ static int netvsc_init_buf(struct hv_device *device, * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, - net_device->recv_buf_size, + buf_size, &net_device->recv_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, @@ -297,49 +309,45 @@ static int netvsc_init_buf(struct hv_device *device, wait_for_completion(&net_device->channel_init_wait); /* Check the response */ - if (init_packet->msg.v1_msg. - send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { - netdev_err(ndev, "Unable to complete receive buffer " - "initialization with NetVsp - status %d\n", - init_packet->msg.v1_msg. - send_recv_buf_complete.status); + resp = &init_packet->msg.v1_msg.send_recv_buf_complete; + if (resp->status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, + "Unable to complete receive buffer initialization with NetVsp - status %d\n", + resp->status); ret = -EINVAL; goto cleanup; } /* Parse the response */ + netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", + resp->num_sections, resp->sections[0].sub_alloc_size, + resp->sections[0].num_sub_allocs); - net_device->recv_section_cnt = init_packet->msg. - v1_msg.send_recv_buf_complete.num_sections; - - net_device->recv_section = kmemdup( - init_packet->msg.v1_msg.send_recv_buf_complete.sections, - net_device->recv_section_cnt * - sizeof(struct nvsp_1_receive_buffer_section), - GFP_KERNEL); - if (net_device->recv_section == NULL) { + /* There should only be one section for the entire receive buffer */ + if (resp->num_sections != 1 || resp->sections[0].offset != 0) { ret = -EINVAL; goto cleanup; } - /* - * For 1st release, there should only be 1 section that represents the - * entire receive buffer - */ - if (net_device->recv_section_cnt != 1 || - net_device->recv_section->offset != 0) { - ret = -EINVAL; - goto cleanup; - } + net_device->recv_section_size = resp->sections[0].sub_alloc_size; + net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; - /* Now setup the send buffer. - */ - net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); - if (!net_device->send_buf) - net_device->send_buf = vzalloc(net_device->send_buf_size); + /* Setup receive completion ring */ + net_device->recv_completion_cnt + = round_up(net_device->recv_section_cnt + 1, + PAGE_SIZE / sizeof(u64)); + ret = netvsc_alloc_recv_comp_ring(net_device, 0); + if (ret) + goto cleanup; + + /* Now setup the send buffer. */ + buf_size = device_info->send_sections * device_info->send_section_size; + buf_size = round_up(buf_size, PAGE_SIZE); + + net_device->send_buf = vzalloc(buf_size); if (!net_device->send_buf) { - netdev_err(ndev, "unable to allocate send " - "buffer of size %d\n", net_device->send_buf_size); + netdev_err(ndev, "unable to allocate send buffer of size %u\n", + buf_size); ret = -ENOMEM; goto cleanup; } @@ -349,7 +357,7 @@ static int netvsc_init_buf(struct hv_device *device, * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, - net_device->send_buf_size, + buf_size, &net_device->send_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, @@ -394,10 +402,8 @@ static int netvsc_init_buf(struct hv_device *device, net_device->send_section_size = init_packet->msg. v1_msg.send_send_buf_complete.section_size; - /* Section count is simply the size divided by the section size. - */ - net_device->send_section_cnt = - net_device->send_buf_size / net_device->send_section_size; + /* Section count is simply the size divided by the section size. */ + net_device->send_section_cnt = buf_size / net_device->send_section_size; netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); @@ -475,7 +481,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, } static int netvsc_connect_vsp(struct hv_device *device, - struct netvsc_device *net_device) + struct netvsc_device *net_device, + const struct netvsc_device_info *device_info) { const u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, @@ -525,14 +532,8 @@ static int netvsc_connect_vsp(struct hv_device *device, if (ret != 0) goto cleanup; - /* Post the big receive buffer to NetVSP */ - if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) - net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; - else - net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; - net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE; - ret = netvsc_init_buf(device, net_device); + ret = netvsc_init_buf(device, net_device, device_info); cleanup: return ret; @@ -550,9 +551,12 @@ void netvsc_device_remove(struct hv_device *device) { struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device + = rtnl_dereference(net_device_ctx->nvdev); int i; + cancel_work_sync(&net_device->subchan_work); + netvsc_disconnect_vsp(device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); @@ -693,7 +697,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 pend_size, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { char *start = net_device->send_buf; @@ -714,9 +718,9 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, } for (i = 0; i < page_count; i++) { - char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT); - u32 offset = (*pb)[i].offset; - u32 len = (*pb)[i].len; + char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); + u32 offset = pb[i].offset; + u32 len = pb[i].len; memcpy(dest, (src + offset), len); msg_size += len; @@ -735,36 +739,32 @@ static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { struct nvsp_message nvmsg; - struct netvsc_channel *nvchan - = &net_device->chan_table[packet->q_idx]; + struct nvsp_1_message_send_rndis_packet * const rpkt = + &nvmsg.msg.v1_msg.send_rndis_pkt; + struct netvsc_channel * const nvchan = + &net_device->chan_table[packet->q_idx]; struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; - struct hv_page_buffer *pgbuf; u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; - if (skb != NULL) { - /* 0 is RMC_DATA; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; - } else { - /* 1 is RMC_CONTROL; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; - } - - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = - packet->send_buf_index; - if (packet->send_buf_index == NETVSC_INVALID_INDEX) - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + if (skb) + rpkt->channel_type = 0; /* 0 is RMC_DATA */ else - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = - packet->total_data_buflen; + rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ + + rpkt->send_buf_section_index = packet->send_buf_index; + if (packet->send_buf_index == NETVSC_INVALID_INDEX) + rpkt->send_buf_section_size = 0; + else + rpkt->send_buf_section_size = packet->total_data_buflen; req_id = (ulong)skb; @@ -772,21 +772,18 @@ static inline int netvsc_send_pkt( return -ENODEV; if (packet->page_buf_cnt) { - pgbuf = packet->cp_partial ? (*pb) + - packet->rmsg_pgcnt : (*pb); - ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, - pgbuf, - packet->page_buf_cnt, - &nvmsg, - sizeof(struct nvsp_message), - req_id, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (packet->cp_partial) + pb += packet->rmsg_pgcnt; + + ret = vmbus_sendpacket_pagebuffer(out_channel, + pb, packet->page_buf_cnt, + &nvmsg, sizeof(nvmsg), + req_id); } else { - ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, - sizeof(struct nvsp_message), - req_id, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket(out_channel, + &nvmsg, sizeof(nvmsg), + req_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } if (ret == 0) { @@ -801,8 +798,10 @@ static inline int netvsc_send_pkt( ret = -ENOSPC; } } else { - netdev_err(ndev, "Unable to send packet %p ret %d\n", - packet, ret); + netdev_err(ndev, + "Unable to send packet pages %u len %u, ret %d\n", + packet->page_buf_cnt, packet->total_data_buflen, + ret); } return ret; @@ -820,13 +819,16 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, msdp->count = 0; } -int netvsc_send(struct hv_device *device, +/* RCU already held by caller */ +int netvsc_send(struct net_device_context *ndev_ctx, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); + struct netvsc_device *net_device + = rcu_dereference_bh(ndev_ctx->nvdev); + struct hv_device *device = ndev_ctx->device_ctx; int ret = 0; struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; @@ -838,7 +840,7 @@ int netvsc_send(struct hv_device *device, bool xmit_more = (skb != NULL) ? skb->xmit_more : false; /* If device is rescinded, return error and packet will get dropped. */ - if (unlikely(net_device->destroy)) + if (unlikely(!net_device || net_device->destroy)) return -ENODEV; /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get @@ -878,7 +880,9 @@ int netvsc_send(struct hv_device *device, } else if (pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); - if (section_index != NETVSC_INVALID_INDEX) { + if (unlikely(section_index == NETVSC_INVALID_INDEX)) { + ++ndev_ctx->eth_stats.tx_send_full; + } else { move_pkt_msd(&msd_send, &msd_skb, msdp); msd_len = 0; } @@ -943,130 +947,99 @@ int netvsc_send(struct hv_device *device, return ret; } -static int netvsc_send_recv_completion(struct vmbus_channel *channel, - u64 transaction_id, u32 status) +/* Send pending recv completions */ +static int send_recv_completions(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_channel *nvchan) { - struct nvsp_message recvcompMessage; + struct multi_recv_comp *mrc = &nvchan->mrc; + struct recv_comp_msg { + struct nvsp_message_header hdr; + u32 status; + } __packed; + struct recv_comp_msg msg = { + .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, + }; int ret; - recvcompMessage.hdr.msg_type = - NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; + while (mrc->first != mrc->next) { + const struct recv_comp_data *rcd + = mrc->slots + mrc->first; - recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; + msg.status = rcd->status; + ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), + rcd->tid, VM_PKT_COMP, 0); + if (unlikely(ret)) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); - /* Send the completion */ - ret = vmbus_sendpacket(channel, &recvcompMessage, - sizeof(struct nvsp_message_header) + sizeof(u32), - transaction_id, VM_PKT_COMP, 0); + ++ndev_ctx->eth_stats.rx_comp_busy; + return ret; + } - return ret; + if (++mrc->first == nvdev->recv_completion_cnt) + mrc->first = 0; + } + + /* receive completion ring has been emptied */ + if (unlikely(nvdev->destroy)) + wake_up(&nvdev->wait_drain); + + return 0; } -static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, - u32 *filled, u32 *avail) +/* Count how many receive completions are outstanding */ +static void recv_comp_slot_avail(const struct netvsc_device *nvdev, + const struct multi_recv_comp *mrc, + u32 *filled, u32 *avail) { - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 first = mrc->first; - u32 next = mrc->next; + u32 count = nvdev->recv_completion_cnt; - *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : - next - first; + if (mrc->next >= mrc->first) + *filled = mrc->next - mrc->first; + else + *filled = (count - mrc->first) + mrc->next; - *avail = NETVSC_RECVSLOT_MAX - *filled - 1; + *avail = count - *filled - 1; } -/* Read the first filled slot, no change to index */ -static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device - *nvdev, u16 q_idx) +/* Add receive complete to ring to send to host. */ +static void enq_receive_complete(struct net_device *ndev, + struct netvsc_device *nvdev, u16 q_idx, + u64 tid, u32 status) { - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; + struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; + struct multi_recv_comp *mrc = &nvchan->mrc; + struct recv_comp_data *rcd; u32 filled, avail; - if (unlikely(!mrc->buf)) - return NULL; + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!filled) - return NULL; - - return mrc->buf + mrc->first * sizeof(struct recv_comp_data); -} - -/* Put the first filled slot back to available pool */ -static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - int num_recv; - - mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; - - num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); - - if (nvdev->destroy && num_recv == 0) - wake_up(&nvdev->wait_drain); -} - -/* Check and send pending recv completions */ -static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, - struct vmbus_channel *channel, u16 q_idx) -{ - struct recv_comp_data *rcd; - int ret; - - while (true) { - rcd = read_recv_comp_slot(nvdev, q_idx); - if (!rcd) - break; - - ret = netvsc_send_recv_completion(channel, rcd->tid, - rcd->status); - if (ret) - break; - - put_recv_comp_slot(nvdev, q_idx); + if (unlikely(filled > NAPI_POLL_WEIGHT)) { + send_recv_completions(ndev, nvdev, nvchan); + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); } -} -#define NETVSC_RCD_WATERMARK 80 + if (unlikely(!avail)) { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, tid); + return; + } -/* Get next available slot */ -static inline struct recv_comp_data *get_recv_comp_slot( - struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 filled, avail, next; - struct recv_comp_data *rcd; + rcd = mrc->slots + mrc->next; + rcd->tid = tid; + rcd->status = status; - if (unlikely(!nvdev->recv_section)) - return NULL; - - if (unlikely(!mrc->buf)) - return NULL; - - if (atomic_read(&nvdev->num_outstanding_recvs) > - nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100) - netvsc_chk_recv_comp(nvdev, channel, q_idx); - - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!avail) - return NULL; - - next = mrc->next; - rcd = mrc->buf + next * sizeof(struct recv_comp_data); - mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; - - atomic_inc(&nvdev->num_outstanding_recvs); - - return rcd; + if (++mrc->next == nvdev->recv_completion_cnt) + mrc->next = 0; } static int netvsc_receive(struct net_device *ndev, - struct netvsc_device *net_device, - struct net_device_context *net_device_ctx, - struct hv_device *device, - struct vmbus_channel *channel, - const struct vmpacket_descriptor *desc, - struct nvsp_message *nvsp) + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + const struct vmpacket_descriptor *desc, + struct nvsp_message *nvsp) { const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); @@ -1075,7 +1048,6 @@ static int netvsc_receive(struct net_device *ndev, u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - int ret; /* Make sure this is a valid nvsp packet */ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { @@ -1106,25 +1078,9 @@ static int netvsc_receive(struct net_device *ndev, channel, data, buflen); } - if (net_device->chan_table[q_idx].mrc.buf) { - struct recv_comp_data *rcd; + enq_receive_complete(ndev, net_device, q_idx, + vmxferpage_packet->d.trans_id, status); - rcd = get_recv_comp_slot(net_device, channel, q_idx); - if (rcd) { - rcd->tid = vmxferpage_packet->d.trans_id; - rcd->status = status; - } else { - netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", - q_idx, vmxferpage_packet->d.trans_id); - } - } else { - ret = netvsc_send_recv_completion(channel, - vmxferpage_packet->d.trans_id, - status); - if (ret) - netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", - q_idx, vmxferpage_packet->d.trans_id, ret); - } return count; } @@ -1220,11 +1176,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) { struct netvsc_channel *nvchan = container_of(napi, struct netvsc_channel, napi); + struct netvsc_device *net_device = nvchan->net_device; struct vmbus_channel *channel = nvchan->channel; struct hv_device *device = netvsc_channel_to_device(channel); - u16 q_idx = channel->offermsg.offer.sub_channel_index; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); int work_done = 0; /* If starting a new interval */ @@ -1237,17 +1192,19 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If receive ring was exhausted - * and not doing busy poll + /* If send of pending receive completions suceeded + * and did not exhaust NAPI budget this time + * and not doing busy poll * then re-enable host interrupts - * and reschedule if ring is not empty. + * and reschedule if ring is not empty. */ - if (work_done < budget && + if (send_recv_completions(ndev, net_device, nvchan) == 0 && + work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound) != 0) + hv_end_read(&channel->inbound)) { + hv_begin_read(&channel->inbound); napi_reschedule(napi); - - netvsc_chk_recv_comp(net_device, channel, q_idx); + } /* Driver may overshoot since multiple packets per descriptor */ return min(work_done, budget); @@ -1259,10 +1216,15 @@ int netvsc_poll(struct napi_struct *napi, int budget) void netvsc_channel_cb(void *context) { struct netvsc_channel *nvchan = context; + struct vmbus_channel *channel = nvchan->channel; + struct hv_ring_buffer_info *rbi = &channel->inbound; + + /* preload first vmpacket descriptor */ + prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); if (napi_schedule_prep(&nvchan->napi)) { /* disable interupts from host */ - hv_begin_read(&nvchan->channel->inbound); + hv_begin_read(rbi); __napi_schedule(&nvchan->napi); } @@ -1272,8 +1234,8 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *device_info) +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; int ring_size = device_info->ring_size; @@ -1283,7 +1245,7 @@ int netvsc_device_add(struct hv_device *device, net_device = alloc_net_device(); if (!net_device) - return -ENOMEM; + return ERR_PTR(-ENOMEM); net_device->ring_size = ring_size; @@ -1303,6 +1265,7 @@ int netvsc_device_add(struct hv_device *device, struct netvsc_channel *nvchan = &net_device->chan_table[i]; nvchan->channel = device->channel; + nvchan->net_device = net_device; u64_stats_init(&nvchan->tx_stats.syncp); u64_stats_init(&nvchan->rx_stats.syncp); } @@ -1334,17 +1297,18 @@ int netvsc_device_add(struct hv_device *device, rcu_assign_pointer(net_device_ctx->nvdev, net_device); /* Connect with the NetVsp */ - ret = netvsc_connect_vsp(device, net_device); + ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { netdev_err(ndev, "unable to connect to NetVSP - %d\n", ret); goto close; } - return ret; + return net_device; close: - netif_napi_del(&net_device->chan_table[0].napi); + RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ vmbus_close(device->channel); @@ -1352,6 +1316,5 @@ int netvsc_device_add(struct hv_device *device, cleanup: free_netvsc_device(&net_device->rcu); - return ret; - + return ERR_PTR(ret); } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d91cbc6c3ca4..a32ae02e1b6c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -33,6 +33,9 @@ #include #include #include +#include +#include + #include #include #include @@ -42,8 +45,14 @@ #include "hyperv_net.h" -#define RING_SIZE_MIN 64 +#define RING_SIZE_MIN 64 +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX 192 /* ~1M */ +#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ +#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ + #define LINKCHANGE_INT (2 * HZ) +#define VF_TAKEOVER_INT (HZ / 10) static int ring_size = 128; module_param(ring_size, int, S_IRUGO); @@ -69,7 +78,8 @@ static void netvsc_set_multicast_list(struct net_device *net) static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = ndev_ctx->nvdev; + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); struct rndis_device *rdev; int ret = 0; @@ -85,22 +95,40 @@ static int netvsc_open(struct net_device *net) netif_tx_wake_all_queues(net); rdev = nvdev->extension; - if (!rdev->link_state && !ndev_ctx->datapath) + + if (!rdev->link_state) netif_carrier_on(net); - return ret; + if (vf_netdev) { + /* Setting synthetic device up transparently sets + * slave as up. If open fails, then slave will be + * still be offline (and not used). + */ + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(net, + "unable to open slave: %s: %d\n", + vf_netdev->name, ret); + } + return 0; } static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - int ret; + int ret = 0; u32 aread, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); + /* No need to close rndis filter if it is removed already */ + if (!nvdev) + goto out; + ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); @@ -139,11 +167,15 @@ static int netvsc_close(struct net_device *net) ret = -ETIMEDOUT; } +out: + if (vf_netdev) + dev_close(vf_netdev); + return ret; } static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, - int pkt_type) + int pkt_type) { struct rndis_packet *rndis_pkt; struct rndis_per_packet_info *ppi; @@ -163,10 +195,12 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } -/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute - * hash for non-TCP traffic with only IP numbers. +/* Azure hosts don't support non-TCP port numbers in hashing for fragmented + * packets. We can use ethtool to change UDP hash level when necessary. */ -static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) +static inline u32 netvsc_get_hash( + struct sk_buff *skb, + const struct net_device_context *ndc) { struct flow_keys flow; u32 hash; @@ -177,7 +211,11 @@ static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) return 0; - if (flow.basic.ip_proto == IPPROTO_TCP) { + if (flow.basic.ip_proto == IPPROTO_TCP || + (flow.basic.ip_proto == IPPROTO_UDP && + ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) || + (flow.basic.n_proto == htons(ETH_P_IPV6) && + ndc->udp6_l4_hash)))) { return skb_get_hash(skb); } else { if (flow.basic.n_proto == htons(ETH_P_IP)) @@ -200,7 +238,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sock *sk = skb->sk; int q_idx; - q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) & + q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) & (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ @@ -222,13 +260,11 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, * * TODO support XPS - but get_xps_queue not exported */ -static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) { - unsigned int num_tx_queues = ndev->real_num_tx_queues; int q_idx = sk_tx_queue_get(skb->sk); - if (q_idx < 0 || skb->ooo_okay) { + if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { /* If forwarding a packet, we use the recorded queue when * available for better cache locality. */ @@ -238,14 +274,35 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); } - while (unlikely(q_idx >= num_tx_queues)) - q_idx -= num_tx_queues; - return q_idx; } +static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_device_context *ndc = netdev_priv(ndev); + struct net_device *vf_netdev; + u16 txq; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndc->vf_netdev); + if (vf_netdev) { + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + } else { + txq = netvsc_pick_tx(ndev, skb); + } + rcu_read_unlock(); + + while (unlikely(txq >= ndev->real_num_tx_queues)) + txq -= ndev->real_num_tx_queues; + + return txq; +} + static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, - struct hv_page_buffer *pb) + struct hv_page_buffer *pb) { int j = 0; @@ -280,9 +337,8 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, struct hv_netvsc_packet *packet, - struct hv_page_buffer **page_buf) + struct hv_page_buffer *pb) { - struct hv_page_buffer *pb = *page_buf; u32 slots_used = 0; char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; @@ -293,10 +349,9 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, * 2. skb linear data * 3. skb fragment data */ - if (hdr != NULL) - slots_used += fill_pg_buf(virt_to_page(hdr), - offset_in_page(hdr), - len, &pb[slots_used]); + slots_used += fill_pg_buf(virt_to_page(hdr), + offset_in_page(hdr), + len, &pb[slots_used]); packet->rmsg_size = len; packet->rmsg_pgcnt = slots_used; @@ -359,13 +414,40 @@ static u32 net_checksum_info(struct sk_buff *skb) if (ip6->nexthdr == IPPROTO_TCP) return TRANSPORT_INFO_IPV6_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + else if (ip6->nexthdr == IPPROTO_UDP) return TRANSPORT_INFO_IPV6_UDP; } return TRANSPORT_INFO_NOT_IP; } +/* Send skb on the slave VF device. */ +static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, + struct sk_buff *skb) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + unsigned int len = skb->len; + int rc; + + skb->dev = vf_netdev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + rc = dev_queue_xmit(skb); + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); + } + + return rc; +} + static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -374,11 +456,19 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; + struct net_device *vf_netdev; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; - struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; - struct hv_page_buffer *pb = page_buf; + struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + + /* if VF is present and up then redirect packets + * already called with rcu_read_lock_bh + */ + vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && + !netpoll_tx_running(net)) + return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number @@ -448,9 +538,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg_size += NDIS_VLAN_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, - IEEE_8021Q_INFO); - vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + - ppi->ppi_offset); + IEEE_8021Q_INFO); + + vlan = (void *)ppi + ppi->ppi_offset; vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; @@ -463,8 +553,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, TCP_LARGESEND_PKTINFO); - lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + - ppi->ppi_offset); + lso_info = (void *)ppi + ppi->ppi_offset; lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (skb->protocol == htons(ETH_P_IP)) { @@ -524,12 +613,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, - skb, packet, &pb); + skb, packet, pb); /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx->device_ctx, packet, - rndis_msg, &pb, skb); + + ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -551,6 +640,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ++net_device_ctx->eth_stats.tx_no_memory; goto drop; } + /* * netvsc_linkstatus_callback - Link up/down notification */ @@ -574,8 +664,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; - speed = *(u32 *)((void *)indicate + indicate-> - status_buf_offset) / 10000; + speed = *(u32 *)((void *)indicate + + indicate->status_buf_offset) / 10000; ndev_ctx->speed = speed; return; } @@ -658,29 +748,18 @@ int netvsc_recv_callback(struct net_device *net, struct netvsc_device *net_device; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; - struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - /* - * If necessary, inject this packet into the VF interface. - * On Hyper-V, multicast and brodcast packets are only delivered - * to the synthetic interface (after subjecting these to - * policy filters on the host). Deliver these via the VF - * interface in the guest. - */ rcu_read_lock(); net_device = rcu_dereference(net_device_ctx->nvdev); if (unlikely(!net_device)) goto drop; nvchan = &net_device->chan_table[q_idx]; - vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); - if (vf_netdev && (vf_netdev->flags & IFF_UP)) - net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, &nvchan->napi, @@ -692,8 +771,7 @@ int netvsc_recv_callback(struct net_device *net, return NVSP_STAT_FAIL; } - if (net != vf_netdev) - skb_record_rx_queue(skb, q_idx); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics @@ -736,48 +814,22 @@ static void netvsc_get_channels(struct net_device *net, } } -static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, - u32 num_chn) -{ - struct netvsc_device_info device_info; - int ret; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = num_chn; - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = num_chn; - - ret = rndis_filter_device_add(dev, &device_info); - if (ret) - return ret; - - ret = netif_set_real_num_tx_queues(net, num_chn); - if (ret) - return ret; - - ret = netif_set_real_num_rx_queues(net, num_chn); - - return ret; -} - static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - unsigned int count = channels->combined_count; - bool was_running; - int ret; + unsigned int orig, count = channels->combined_count; + struct netvsc_device_info device_info; + bool was_opened; + int ret = 0; /* We do not support separate count for rx, tx, or other */ if (count == 0 || channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; - if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) - return -EINVAL; - if (!nvdev || nvdev->destroy) return -ENODEV; @@ -787,25 +839,39 @@ static int netvsc_set_channels(struct net_device *net, if (count > nvdev->max_chn) return -EINVAL; - was_running = netif_running(net); - if (was_running) { - ret = netvsc_close(net); - if (ret) - return ret; - } + orig = nvdev->num_chn; + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = count; + device_info.ring_size = ring_size; + device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; + device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(dev, nvdev); - ret = netvsc_set_queues(net, dev, count); - if (ret == 0) - nvdev->num_chn = count; - else - netvsc_set_queues(net, dev, nvdev->num_chn); + nvdev = rndis_filter_device_add(dev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); + device_info.num_chn = orig; + nvdev = rndis_filter_device_add(dev, &device_info); - if (was_running) - ret = netvsc_open(net); + if (IS_ERR(nvdev)) { + netdev_err(net, "restoring channel setting failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } + } + + if (was_opened) + rndis_filter_open(nvdev); /* We may have missed link change notifications */ + net_device_ctx->last_reconfig = 0; schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; @@ -832,6 +898,9 @@ static void netvsc_init_settings(struct net_device *dev) { struct net_device_context *ndc = netdev_priv(dev); + ndc->udp4_l4_hash = true; + ndc->udp6_l4_hash = true; + ndc->speed = SPEED_UNKNOWN; ndc->duplex = DUPLEX_FULL; } @@ -869,41 +938,63 @@ static int netvsc_set_link_ksettings(struct net_device *dev, static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); + struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; + int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; - bool was_running; + bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; - was_running = netif_running(ndev); - if (was_running) { - ret = netvsc_close(ndev); + /* Change MTU of underlying VF netdev first. */ + if (vf_netdev) { + ret = dev_set_mtu(vf_netdev, mtu); if (ret) return ret; } + netif_device_detach(ndev); + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; - device_info.max_num_vrss_chns = nvdev->num_chn; + device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; + device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(hdev, nvdev); - /* 'nvdev' has been freed in rndis_filter_device_remove() -> - * netvsc_device_remove () -> free_netvsc_device(). - * We mustn't access it before it's re-created in - * rndis_filter_device_add() -> netvsc_device_add(). - */ - ndev->mtu = mtu; - rndis_filter_device_add(hdev, &device_info); + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); - if (was_running) - ret = netvsc_open(ndev); + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; + nvdev = rndis_filter_device_add(hdev, &device_info); + + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); + + if (IS_ERR(nvdev)) { + netdev_err(ndev, "restoring mtu failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } + } + + if (was_opened) + rndis_filter_open(nvdev); + + netif_device_attach(ndev); /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); @@ -911,16 +1002,56 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } +static void netvsc_get_vf_stats(struct net_device *net, + struct netvsc_vf_pcpu_stats *tot) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + int i; + + memset(tot, 0, sizeof(*tot)); + + for_each_possible_cpu(i) { + const struct netvsc_vf_pcpu_stats *stats + = per_cpu_ptr(ndev_ctx->vf_stats, i); + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + rx_packets = stats->rx_packets; + tx_packets = stats->tx_packets; + rx_bytes = stats->rx_bytes; + tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; + tot->tx_dropped += stats->tx_dropped; + } +} + static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_vf_pcpu_stats vf_tot; int i; if (!nvdev) return; + netdev_stats_to_stats64(t, &net->stats); + + netvsc_get_vf_stats(net, &vf_tot); + t->rx_packets += vf_tot.rx_packets; + t->tx_packets += vf_tot.tx_packets; + t->rx_bytes += vf_tot.rx_bytes; + t->tx_bytes += vf_tot.tx_bytes; + t->tx_dropped += vf_tot.tx_dropped; + for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; const struct netvsc_stats *stats; @@ -949,33 +1080,36 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } - - t->tx_dropped = net->stats.tx_dropped; - t->tx_errors = net->stats.tx_errors; - - t->rx_dropped = net->stats.rx_dropped; - t->rx_errors = net->stats.rx_errors; } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { + struct net_device_context *ndc = netdev_priv(ndev); + struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); struct sockaddr *addr = p; - char save_adr[ETH_ALEN]; - unsigned char save_aatype; int err; - memcpy(save_adr, ndev->dev_addr, ETH_ALEN); - save_aatype = ndev->addr_assign_type; - - err = eth_mac_addr(ndev, p); - if (err != 0) + err = eth_prepare_mac_addr_change(ndev, p); + if (err) return err; - err = rndis_filter_set_device_mac(ndev, addr->sa_data); - if (err != 0) { - /* roll back to saved MAC */ - memcpy(ndev->dev_addr, save_adr, ETH_ALEN); - ndev->addr_assign_type = save_aatype; + if (!nvdev) + return -ENODEV; + + if (vf_netdev) { + err = dev_set_mac_address(vf_netdev, addr); + if (err) + return err; + } + + err = rndis_filter_set_device_mac(nvdev, addr->sa_data); + if (!err) { + eth_commit_mac_addr_change(ndev, p); + } else if (vf_netdev) { + /* rollback change on VF */ + memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); + dev_set_mac_address(vf_netdev, addr); } return err; @@ -990,9 +1124,18 @@ static const struct { { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, + { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, + { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, +}, vf_stats[] = { + { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, + { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, + { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, + { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, + { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, }; #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) +#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) /* 4 statistics per queue (rx/tx packets/bytes) */ #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) @@ -1007,7 +1150,9 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set) switch (string_set) { case ETH_SS_STATS: - return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); + return NETVSC_GLOBAL_STATS_LEN + + NETVSC_VF_STATS_LEN + + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -1017,9 +1162,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; + struct netvsc_vf_pcpu_stats sum; unsigned int start; u64 packets, bytes; int i, j; @@ -1030,6 +1176,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + netvsc_get_vf_stats(dev, &sum); + for (j = 0; j < NETVSC_VF_STATS_LEN; j++) + data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); + for (j = 0; j < nvdev->num_chn; j++) { qstats = &nvdev->chan_table[j].tx_stats; @@ -1055,7 +1205,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); u8 *p = data; int i; @@ -1064,11 +1214,16 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(p + i * ETH_GSTRING_LEN, - netvsc_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { + memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { + memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } - p += i * ETH_GSTRING_LEN; for (i = 0; i < nvdev->num_chn; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; @@ -1085,7 +1240,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static int -netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, +netvsc_get_rss_hash_opts(struct net_device_context *ndc, struct ethtool_rxnfc *info) { info->data = RXH_IP_SRC | RXH_IP_DST; @@ -1094,9 +1249,20 @@ netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, case TCP_V4_FLOW: case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + break; + case UDP_V4_FLOW: + if (ndc->udp4_l4_hash) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + break; + case UDP_V6_FLOW: + if (ndc->udp6_l4_hash) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + break; + case IPV4_FLOW: case IPV6_FLOW: break; @@ -1113,7 +1279,7 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); if (!nvdev) return -ENODEV; @@ -1124,11 +1290,51 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return 0; case ETHTOOL_GRXFH: - return netvsc_get_rss_hash_opts(nvdev, info); + return netvsc_get_rss_hash_opts(ndc, info); } return -EOPNOTSUPP; } +static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, + struct ethtool_rxnfc *info) +{ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (info->flow_type == UDP_V4_FLOW) + ndc->udp4_l4_hash = true; + else if (info->flow_type == UDP_V6_FLOW) + ndc->udp6_l4_hash = true; + else + return -EOPNOTSUPP; + + return 0; + } + + if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + if (info->flow_type == UDP_V4_FLOW) + ndc->udp4_l4_hash = false; + else if (info->flow_type == UDP_V6_FLOW) + ndc->udp6_l4_hash = false; + else + return -EOPNOTSUPP; + + return 0; + } + + return -EOPNOTSUPP; +} + +static int +netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) +{ + struct net_device_context *ndc = netdev_priv(ndev); + + if (info->cmd == ETHTOOL_SRXFH) + return netvsc_set_rss_hash_opts(ndc, info); + + return -EOPNOTSUPP; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *dev) { @@ -1163,7 +1369,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev = rcu_dereference(ndc->nvdev); + struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); struct rndis_device *rndis_dev; int i; @@ -1202,7 +1408,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - if (indir[i] >= VRSS_CHANNEL_MAX) + if (indir[i] >= ndev->num_chn) return -EINVAL; for (i = 0; i < ITAB_NUM; i++) @@ -1216,7 +1422,107 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, key = rndis_dev->rss_key; } - return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); + return rndis_filter_set_rss_param(rndis_dev, key); +} + +/* Hyper-V RNDIS protocol does not have ring in the HW sense. + * It does have pre-allocated receive area which is divided into sections. + */ +static void __netvsc_get_ringparam(struct netvsc_device *nvdev, + struct ethtool_ringparam *ring) +{ + u32 max_buf_size; + + ring->rx_pending = nvdev->recv_section_cnt; + ring->tx_pending = nvdev->send_section_cnt; + + if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; + else + max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; + + ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; + ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE + / nvdev->send_section_size; +} + +static void netvsc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct net_device_context *ndevctx = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + + if (!nvdev) + return; + + __netvsc_get_ringparam(nvdev, ring); +} + +static int netvsc_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct net_device_context *ndevctx = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + struct hv_device *hdev = ndevctx->device_ctx; + struct netvsc_device_info device_info; + struct ethtool_ringparam orig; + u32 new_tx, new_rx; + bool was_opened; + int ret = 0; + + if (!nvdev || nvdev->destroy) + return -ENODEV; + + memset(&orig, 0, sizeof(orig)); + __netvsc_get_ringparam(nvdev, &orig); + + new_tx = clamp_t(u32, ring->tx_pending, + NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); + new_rx = clamp_t(u32, ring->rx_pending, + NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); + + if (new_tx == orig.tx_pending && + new_rx == orig.rx_pending) + return 0; /* no change */ + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = nvdev->num_chn; + device_info.ring_size = ring_size; + device_info.send_sections = new_tx; + device_info.send_section_size = nvdev->send_section_size; + device_info.recv_sections = new_rx; + device_info.recv_section_size = nvdev->recv_section_size; + + netif_device_detach(ndev); + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); + + rndis_filter_device_remove(hdev, nvdev); + + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); + + device_info.send_sections = orig.tx_pending; + device_info.recv_sections = orig.rx_pending; + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + netdev_err(ndev, "restoring ringparam failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } + } + + if (was_opened) + rndis_filter_open(nvdev); + netif_device_attach(ndev); + + /* We may have missed link change notifications */ + ndevctx->last_reconfig = 0; + schedule_delayed_work(&ndevctx->dwork, 0); + + return ret; } static const struct ethtool_ops ethtool_ops = { @@ -1229,12 +1535,15 @@ static const struct ethtool_ops ethtool_ops = { .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = netvsc_get_rxnfc, + .set_rxnfc = netvsc_set_rxnfc, .get_rxfh_key_size = netvsc_get_rxfh_key_size, .get_rxfh_indir_size = netvsc_rss_indir_size, .get_rxfh = netvsc_get_rxfh, .set_rxfh = netvsc_set_rxfh, .get_link_ksettings = netvsc_get_link_ksettings, .set_link_ksettings = netvsc_set_link_ksettings, + .get_ringparam = netvsc_get_ringparam, + .set_ringparam = netvsc_set_ringparam, }; static const struct net_device_ops device_ops = { @@ -1313,8 +1622,7 @@ static void netvsc_link_change(struct work_struct *w) case RNDIS_STATUS_MEDIA_CONNECT: if (rdev->link_state) { rdev->link_state = false; - if (!ndev_ctx->datapath) - netif_carrier_on(net); + netif_carrier_on(net); netif_tx_wake_all_queues(net); } else { notify = true; @@ -1391,7 +1699,7 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) continue; /* not a netvsc device */ net_device_ctx = netdev_priv(dev); - if (net_device_ctx->nvdev == NULL) + if (!rtnl_dereference(net_device_ctx->nvdev)) continue; /* device is removed */ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) @@ -1401,6 +1709,108 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) return NULL; } +/* Called when VF is injecting data into network stack. + * Change the associated network device from VF to netvsc. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + skb->dev = ndev; + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + return RX_HANDLER_ANOTHER; +} + +static int netvsc_vf_join(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + int ret; + + ret = netdev_rx_handler_register(vf_netdev, + netvsc_vf_handle_frame, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not register netvsc VF receive handler (err = %d)\n", + ret); + goto rx_handler_failed; + } + + ret = netdev_upper_dev_link(vf_netdev, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not set master device %s (err = %d)\n", + ndev->name, ret); + goto upper_link_failed; + } + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); + return 0; + +upper_link_failed: + netdev_rx_handler_unregister(vf_netdev); +rx_handler_failed: + return ret; +} + +static void __netvsc_vf_setup(struct net_device *ndev, + struct net_device *vf_netdev) +{ + int ret; + + /* Align MTU of VF with master */ + ret = dev_set_mtu(vf_netdev, ndev->mtu); + if (ret) + netdev_warn(vf_netdev, + "unable to change mtu to %u\n", ndev->mtu); + + if (netif_running(ndev)) { + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(vf_netdev, + "unable to open: %d\n", ret); + } +} + +/* Setup VF as slave of the synthetic device. + * Runs in workqueue to avoid recursion in netlink callbacks. + */ +static void netvsc_vf_setup(struct work_struct *w) +{ + struct net_device_context *ndev_ctx + = container_of(w, struct net_device_context, vf_takeover.work); + struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); + struct net_device *vf_netdev; + + if (!rtnl_trylock()) { + schedule_delayed_work(&ndev_ctx->vf_takeover, 0); + return; + } + + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) + __netvsc_vf_setup(ndev, vf_netdev); + + rtnl_unlock(); +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1424,22 +1834,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; + if (netvsc_vf_join(vf_netdev, ndev) != 0) + return NOTIFY_DONE; + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - /* - * Take a reference on the module. - */ - try_module_get(THIS_MODULE); dev_hold(vf_netdev); rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); return NOTIFY_OK; } -static int netvsc_vf_up(struct net_device *vf_netdev) +/* VF up/down change detected, schedule to change data path */ +static int netvsc_vf_changed(struct net_device *vf_netdev) { - struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; + struct netvsc_device *netvsc_dev; + struct net_device *ndev; + bool vf_is_up = netif_running(vf_netdev); ndev = get_netvsc_byref(vf_netdev); if (!ndev) @@ -1447,49 +1858,12 @@ static int netvsc_vf_up(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); - - netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - - /* - * Open the device before switching data path. - */ - rndis_filter_open(netvsc_dev); - - /* - * notify the host to switch the data path. - */ - netvsc_switch_datapath(ndev, true); - netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name); - - netif_carrier_off(ndev); - - /* Now notify peers through VF device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); - - return NOTIFY_OK; -} - -static int netvsc_vf_down(struct net_device *vf_netdev) -{ - struct net_device *ndev; - struct netvsc_device *netvsc_dev; - struct net_device_context *net_device_ctx; - - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) + if (!netvsc_dev) return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); - netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); - - netdev_info(ndev, "VF down: %s\n", vf_netdev->name); - netvsc_switch_datapath(ndev, false); - netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); - rndis_filter_close(netvsc_dev); - netif_carrier_on(ndev); - - /* Now notify peers through netvsc device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); + netvsc_switch_datapath(ndev, vf_is_up); + netdev_info(ndev, "Data path switched %s VF: %s\n", + vf_is_up ? "to" : "from", vf_netdev->name); return NOTIFY_OK; } @@ -1504,12 +1878,15 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); + cancel_delayed_work_sync(&net_device_ctx->vf_takeover); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); + netdev_rx_handler_unregister(vf_netdev); + netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); - module_put(THIS_MODULE); + return NOTIFY_OK; } @@ -1520,12 +1897,12 @@ static int netvsc_probe(struct hv_device *dev, struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; - int ret; + int ret = -ENOMEM; net = alloc_etherdev_mq(sizeof(struct net_device_context), VRSS_CHANNEL_MAX); if (!net) - return -ENOMEM; + goto no_net; netif_carrier_off(net); @@ -1544,6 +1921,12 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + + net_device_ctx->vf_stats + = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); + if (!net_device_ctx->vf_stats) + goto no_stats; net->netdev_ops = &device_ops; net->ethtool_ops = ðtool_ops; @@ -1556,13 +1939,18 @@ static int netvsc_probe(struct hv_device *dev, memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; - ret = rndis_filter_device_add(dev, &device_info); - if (ret != 0) { + device_info.send_sections = NETVSC_DEFAULT_TX; + device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; + device_info.recv_sections = NETVSC_DEFAULT_RX; + device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; + + nvdev = rndis_filter_device_add(dev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - free_netdev(net); - hv_set_drvdata(dev, NULL); - return ret; + goto rndis_failed; } + memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); /* hw_features computed in rndis_filter_device_add */ @@ -1571,10 +1959,7 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - /* RCU not necessary here, device not registered */ - nvdev = net_device_ctx->nvdev; - netif_set_real_num_tx_queues(net, nvdev->num_chn); - netif_set_real_num_rx_queues(net, nvdev->num_chn); + netdev_lockdep_set_classes(net); /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; @@ -1586,20 +1971,29 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev, nvdev); - free_netdev(net); + goto register_failed; } return ret; + +register_failed: + rndis_filter_device_remove(dev, nvdev); +rndis_failed: + free_percpu(net_device_ctx->vf_stats); +no_stats: + hv_set_drvdata(dev, NULL); + free_netdev(net); +no_net: + return ret; } static int netvsc_remove(struct hv_device *dev) { - struct net_device *net; struct net_device_context *ndev_ctx; + struct net_device *vf_netdev; + struct net_device *net; net = hv_get_drvdata(dev); - if (net == NULL) { dev_err(&dev->device, "No net device to remove\n"); return 0; @@ -1616,13 +2010,19 @@ static int netvsc_remove(struct hv_device *dev) * removed. Also blocks mtu and channel changes. */ rtnl_lock(); - rndis_filter_device_remove(dev, ndev_ctx->nvdev); - rtnl_unlock(); + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) + netvsc_unregister_vf(vf_netdev); - unregister_netdev(net); + unregister_netdevice(net); + + rndis_filter_device_remove(dev, + rtnl_dereference(ndev_ctx->nvdev)); + rtnl_unlock(); hv_set_drvdata(dev, NULL); + free_percpu(ndev_ctx->vf_stats); free_netdev(net); return 0; } @@ -1677,9 +2077,8 @@ static int netvsc_netdev_event(struct notifier_block *this, case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: - return netvsc_vf_up(event_dev); case NETDEV_DOWN: - return netvsc_vf_down(event_dev); + return netvsc_vf_changed(event_dev); default: return NOTIFY_DONE; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index d6308ffda53e..065b204d8e17 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "hyperv_net.h" @@ -213,11 +214,11 @@ static void dump_rndis_message(struct hv_device *hv_dev, static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { - int ret; struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); + int ret; /* Setup the packet to send it */ packet = &req->pkt; @@ -243,7 +244,10 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } - ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL); + rcu_read_lock_bh(); + ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + rcu_read_unlock_bh(); + return ret; } @@ -443,8 +447,9 @@ int rndis_filter_receive(struct net_device *ndev, return 0; } -static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, - void *result, u32 *result_size) +static int rndis_filter_query_device(struct rndis_device *dev, + struct netvsc_device *nvdev, + u32 oid, void *result, u32 *result_size) { struct rndis_request *request; u32 inresult_size = *result_size; @@ -471,8 +476,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->dev_vc_handle = 0; if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { - struct net_device_context *ndevctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = ndevctx->nvdev; struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; @@ -541,14 +544,15 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, /* Get the hardware offload capabilities */ static int -rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device, + struct ndis_offload *caps) { u32 caps_len = sizeof(*caps); int ret; memset(caps, 0, sizeof(*caps)); - ret = rndis_filter_query_device(dev, + ret = rndis_filter_query_device(dev, net_device, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, caps, &caps_len); if (ret) @@ -577,11 +581,12 @@ rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) return 0; } -static int rndis_filter_query_device_mac(struct rndis_device *dev) +static int rndis_filter_query_device_mac(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = ETH_ALEN; - return rndis_filter_query_device(dev, + return rndis_filter_query_device(dev, net_device, RNDIS_OID_802_3_PERMANENT_ADDRESS, dev->hw_mac_adr, &size); } @@ -589,9 +594,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev) #define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14 -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) +int rndis_filter_set_device_mac(struct netvsc_device *nvdev, + const char *mac) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -645,11 +650,8 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + if (set_complete->status != RNDIS_STATUS_SUCCESS) + ret = -EIO; cleanup: put_rndis_request(rdev, request); @@ -658,9 +660,9 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) static int rndis_filter_set_offload_params(struct net_device *ndev, + struct netvsc_device *nvdev, struct ndis_offload_params *req_offloads) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -715,7 +717,7 @@ rndis_filter_set_offload_params(struct net_device *ndev, } int rndis_filter_set_rss_param(struct rndis_device *rdev, - const u8 *rss_key, int num_queue) + const u8 *rss_key) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; @@ -782,27 +784,27 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, return ret; } -static int rndis_filter_query_device_link_status(struct rndis_device *dev) +static int rndis_filter_query_device_link_status(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_status; - int ret; - ret = rndis_filter_query_device(dev, - RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, - &link_status, &size); - - return ret; + return rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + &link_status, &size); } -static int rndis_filter_query_link_speed(struct rndis_device *dev) +static int rndis_filter_query_link_speed(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_speed; struct net_device_context *ndc; int ret; - ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, + ret = rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_LINK_SPEED, &link_speed, &size); if (!ret) { @@ -871,14 +873,14 @@ void rndis_filter_update(struct netvsc_device *nvdev) schedule_work(&rdev->mcast_work); } -static int rndis_filter_init_device(struct rndis_device *dev) +static int rndis_filter_init_device(struct rndis_device *dev, + struct netvsc_device *nvdev) { struct rndis_request *request; struct rndis_initialize_request *init; struct rndis_initialize_complete *init_complete; u32 status; int ret; - struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev); request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -926,12 +928,12 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev) { int i; - if (atomic_read(&nvdev->num_outstanding_recvs) > 0) - return false; - for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + if (nvchan->mrc.first != nvchan->mrc.next) + return false; + if (atomic_read(&nvchan->queue_sends) > 0) return false; } @@ -944,7 +946,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) struct rndis_request *request; struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -1015,20 +1017,20 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); - struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; int ret; - if (chn_index >= nvscdev->num_chn) + /* This is safe because this callback only happens when + * new device is being setup and waiting on the channel_init_wait. + */ + nvscdev = rcu_dereference_raw(ndev_ctx->nvdev); + if (!nvscdev || chn_index >= nvscdev->num_chn) return; nvchan = nvscdev->chan_table + chn_index; - nvchan->mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - - if (!nvchan->mrc.buf) - return; /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling @@ -1037,8 +1039,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) /* Set the channel before opening.*/ nvchan->channel = new_sc; - netif_napi_add(ndev, &nvchan->napi, - netvsc_poll, NAPI_POLL_WEIGHT); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, @@ -1046,14 +1046,90 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (ret == 0) napi_enable(&nvchan->napi); else - netif_napi_del(&nvchan->napi); + netdev_notice(ndev, "sub channel open failed: %d\n", ret); - atomic_inc(&nvscdev->open_chn); - wake_up(&nvscdev->subchan_open); + if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) + wake_up(&nvscdev->subchan_open); } -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *device_info) +/* Open sub-channels after completing the handling of the device probe. + * This breaks overlap of processing the host message for the + * new primary channel with the initialization of sub-channels. + */ +void rndis_set_subchannel(struct work_struct *w) +{ + struct netvsc_device *nvdev + = container_of(w, struct netvsc_device, subchan_work); + struct nvsp_message *init_packet = &nvdev->channel_init_pkt; + struct net_device_context *ndev_ctx; + struct rndis_device *rdev; + struct net_device *ndev; + struct hv_device *hv_dev; + int i, ret; + + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (!rdev) + goto unlock; /* device was removed */ + + ndev = rdev->ndev; + ndev_ctx = netdev_priv(ndev); + hv_dev = ndev_ctx->device_ctx; + + memset(init_packet, 0, sizeof(struct nvsp_message)); + init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; + init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; + init_packet->msg.v5_msg.subchn_req.num_subchannels = + nvdev->num_chn - 1; + ret = vmbus_sendpacket(hv_dev->channel, init_packet, + sizeof(struct nvsp_message), + (unsigned long)init_packet, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); + goto failed; + } + + wait_for_completion(&nvdev->channel_init_wait); + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, "sub channel request failed\n"); + goto failed; + } + + nvdev->num_chn = 1 + + init_packet->msg.v5_msg.subchn_comp.num_subchannels; + + /* wait for all sub channels to open */ + wait_event(nvdev->subchan_open, + atomic_read(&nvdev->open_chn) == nvdev->num_chn); + + /* ignore failues from setting rss parameters, still have channels */ + rndis_filter_set_rss_param(rdev, netvsc_hash_key); + + netif_set_real_num_tx_queues(ndev, nvdev->num_chn); + netif_set_real_num_rx_queues(ndev, nvdev->num_chn); + + rtnl_unlock(); + return; + +failed: + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; +unlock: + rtnl_unlock(); +} + +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); @@ -1061,32 +1137,30 @@ int rndis_filter_device_add(struct hv_device *dev, struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; - struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; - u32 mtu, size, num_rss_qs; + u32 mtu, size; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) - return -ENODEV; + return ERR_PTR(-ENODEV); /* * Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, device_info); - if (ret != 0) { + net_device = netvsc_device_add(dev, device_info); + if (IS_ERR(net_device)) { kfree(rndis_device); - return ret; + return net_device; } /* Initialize the rndis device */ - net_device = net_device_ctx->nvdev; net_device->max_chn = 1; net_device->num_chn = 1; @@ -1094,35 +1168,29 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device->ndev = net; /* Send the rndis initialization message */ - ret = rndis_filter_init_device(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_filter_init_device(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; /* Get the MTU from the host */ size = sizeof(u32); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) net->mtu = mtu; /* Get the mac address */ - ret = rndis_filter_query_device_mac(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_filter_query_device_mac(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Find HW offload capabilities */ - ret = rndis_query_hwcaps(rndis_device, &hwcaps); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps); + if (ret != 0) + goto err_dev_remv; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1177,24 +1245,24 @@ int rndis_filter_device_add(struct hv_device *dev, netif_set_gso_max_size(net, gso_max_size); - ret = rndis_filter_set_offload_params(net, &offloads); + ret = rndis_filter_set_offload_params(net, net_device, &offloads); if (ret) goto err_dev_remv; - rndis_filter_query_device_link_status(rndis_device); + rndis_filter_query_device_link_status(rndis_device, net_device); netdev_dbg(net, "Device MAC %pM link state %s\n", rndis_device->hw_mac_adr, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return 0; + return net_device; - rndis_filter_query_link_speed(rndis_device); + rndis_filter_query_link_speed(rndis_device, net_device); /* vRSS setup */ memset(&rsscap, 0, rsscap_size); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, OID_GEN_RECEIVE_SCALE_CAPABILITIES, &rsscap, &rsscap_size); if (ret || rsscap.num_recv_que < 2) @@ -1220,53 +1288,36 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn); atomic_set(&net_device->open_chn, 1); - num_rss_qs = net_device->num_chn - 1; - if (num_rss_qs == 0) - return 0; - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); - init_packet = &net_device->channel_init_pkt; - memset(init_packet, 0, sizeof(struct nvsp_message)); - init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; - init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; - init_packet->msg.v5_msg.subchn_req.num_subchannels = - net_device->num_chn - 1; - ret = vmbus_sendpacket(dev->channel, init_packet, - sizeof(struct nvsp_message), - (unsigned long)init_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto out; - - wait_for_completion(&net_device->channel_init_wait); - if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { - ret = -ENODEV; - goto out; + for (i = 1; i < net_device->num_chn; i++) { + ret = netvsc_alloc_recv_comp_ring(net_device, i); + if (ret) { + while (--i != 0) + vfree(net_device->chan_table[i].mrc.slots); + goto out; + } } - net_device->num_chn = 1 + - init_packet->msg.v5_msg.subchn_comp.num_subchannels; + for (i = 1; i < net_device->num_chn; i++) + netif_napi_add(net, &net_device->chan_table[i].napi, + netvsc_poll, NAPI_POLL_WEIGHT); - /* wait for all sub channels to open */ - wait_event(net_device->subchan_open, - atomic_read(&net_device->open_chn) == net_device->num_chn); + if (net_device->num_chn > 1) + schedule_work(&net_device->subchan_work); - /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, - net_device->num_chn); out: + /* if unavailable, just proceed with one queue */ if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; } - return 0; /* return 0 because primary channel can be used alone */ + return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); - return ret; + return ERR_PTR(ret); } void rndis_filter_device_remove(struct hv_device *dev, @@ -1277,10 +1328,10 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); - kfree(rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); + kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) @@ -1304,3 +1355,8 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } + +bool rndis_filter_opened(const struct netvsc_device *nvdev) +{ + return atomic_read(&nvdev->open_cnt) > 0; +} diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index a626c539fb17..24a1eabbbc9d 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include @@ -916,10 +917,7 @@ static int ca8210_spi_transfer( struct cas_control *cas_ctl; if (!spi) { - dev_crit( - &spi->dev, - "NULL spi device passed to ca8210_spi_transfer\n" - ); + pr_crit("NULL spi device passed to %s\n", __func__); return -ENODEV; } diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 7d334963dc08..ee7084b2d52d 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1330,7 +1330,8 @@ static int mrf24j40_probe(struct spi_device *spi) if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) { dev_warn(&spi->dev, "spi clock above possible maximum: %d", MAX_SPI_SPEED_HZ); - return -EINVAL; + ret = -EINVAL; + goto err_register_device; } ret = mrf24j40_hw_init(devrec); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 8dab74a81303..c74893c1e620 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -15,7 +15,7 @@ struct ipvlan_netns { unsigned int ipvl_nf_hook_refcnt; }; -static struct nf_hook_ops ipvl_nfops[] __read_mostly = { +static const struct nf_hook_ops ipvl_nfops[] = { { .hook = ipvlan_nf_input, .pf = NFPROTO_IPV4, @@ -169,7 +169,7 @@ static void ipvlan_port_destroy(struct net_device *dev) #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 22f133ea8d7b..5dea2063dbc8 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -24,7 +24,7 @@ #include #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static dev_t ipvtap_major; static struct cdev ipvtap_cdev; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0f581ee74fe4..d2aea961e0f4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -835,13 +835,13 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_GSO_ROBUST) + NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) #define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) @@ -874,6 +874,7 @@ static int macvlan_init(struct net_device *dev) dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; + dev->hw_enc_features |= dev->features; dev->gso_max_size = lowerdev->gso_max_size; dev->gso_max_segs = lowerdev->gso_max_segs; dev->hard_header_len = lowerdev->hard_header_len; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 91e7b19bbf86..c2d0ea2fb019 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -49,7 +49,7 @@ static struct class macvtap_class = { static struct cdev macvtap_cdev; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static void macvtap_count_tx_dropped(struct tap_dev *tap) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 928fd892f167..cd931cf9dcc2 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -5,7 +5,7 @@ menuconfig MDIO_DEVICE tristate "MDIO bus device drivers" help - MDIO devices and driver infrastructure code. + MDIO devices and driver infrastructure code. config MDIO_BUS tristate @@ -85,7 +85,7 @@ config MDIO_BUS_MUX_MMIOREG parent bus. Child bus selection is under the control of one of the FPGA's registers. - Currently, only 8-bit registers are supported. + Currently, only 8/16/32 bits registers are supported. config MDIO_CAVIUM tristate @@ -106,12 +106,22 @@ config MDIO_HISI_FEMAC This module provides a driver for the MDIO busses found in the Hisilicon SoC that have an Fast Ethernet MAC. +config MDIO_I2C + tristate + depends on I2C + help + Support I2C based PHYs. This provides a MDIO bus bridged + to I2C to allow PHYs connected in I2C mode to be accessed + using the existing infrastructure. + + This is library mode. + config MDIO_MOXART - tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART - help - This driver supports the MDIO interface found in the network - interface units of the MOXA ART SoC + tristate "MOXA ART MDIO interface support" + depends on ARCH_MOXART + help + This driver supports the MDIO interface found in the network + interface units of the MOXA ART SoC config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" @@ -150,6 +160,16 @@ config MDIO_XGENE endif +config PHYLINK + tristate + depends on NETDEVICES + select PHYLIB + select SWPHY + help + PHYlink models the link between the PHY and MAC, allowing fixed + configuration links, PHYs, and Serdes links with MAC level + autonegotiation modes. + menuconfig PHYLIB tristate "PHY Device support and infrastructure" depends on NETDEVICES @@ -172,7 +192,7 @@ config LED_TRIGGER_PHY state change will trigger the events, for consumption by an LED class driver. There are triggers for each link speed currently supported by the phy, and are of the form: - :: + :: Where speed is in the form: Mbps or Gbps @@ -180,15 +200,20 @@ config LED_TRIGGER_PHY comment "MII PHY device drivers" +config SFP + tristate "SFP cage support" + depends on I2C && PHYLINK + select MDIO_I2C + config AMD_PHY tristate "AMD PHYs" ---help--- Currently supports the am79c874 config AQUANTIA_PHY - tristate "Aquantia PHYs" - ---help--- - Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + tristate "Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 config AT803X_PHY tristate "AT803X PHYs" @@ -341,6 +366,11 @@ config REALTEK_PHY ---help--- Supports the Realtek 821x PHY. +config ROCKCHIP_PHY + tristate "Driver for Rockchip Ethernet PHYs" + ---help--- + Currently supports the integrated Ethernet PHY. + config SMSC_PHY tristate "SMSC PHYs" ---help--- @@ -352,21 +382,21 @@ config STE10XP This is the driver for the STe100p and STe101p PHYs. config TERANETICS_PHY - tristate "Teranetics PHYs" - ---help--- - Currently supports the Teranetics TN2020 + tristate "Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 config VITESSE_PHY - tristate "Vitesse PHYs" - ---help--- - Currently supports the vsc8244 + tristate "Vitesse PHYs" + ---help--- + Currently supports the vsc8244 config XILINX_GMII2RGMII - tristate "Xilinx GMII2RGMII converter driver" - ---help--- - This driver support xilinx GMII to RGMII IP core it provides - the Reduced Gigabit Media Independent Interface(RGMII) between - Ethernet physical media devices and the Gigabit Ethernet controller. + tristate "Xilinx GMII2RGMII converter driver" + ---help--- + This driver support xilinx GMII to RGMII IP core it provides + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 8e9b9f349384..416df92fbf4f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -18,6 +18,7 @@ endif libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o +obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o @@ -30,12 +31,17 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_SFP) += sfp.o +sfp-obj-$(CONFIG_SFP) += sfp-bus.o +obj-y += $(sfp-obj-y) $(sfp-obj-m) + obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_AT803X_PHY) += at803x.o @@ -66,6 +72,7 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_REALTEK_PHY) += realtek.o +obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o obj-$(CONFIG_SMSC_PHY) += smsc.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_TERANETICS_PHY) += teranetics.o diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index caa9f6e17f34..8b33f688ac8a 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -511,7 +511,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) static int bcm7xxx_suspend(struct phy_device *phydev) { int ret; - const struct bcm7xxx_regs { + static const struct bcm7xxx_regs { int reg; u16 value; } bcm7xxx_suspend_cfg[] = { diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index c3065236ffcc..cbd629822f04 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -874,7 +874,6 @@ static void decode_rxts(struct dp83640_private *dp83640, shhwtstamps = skb_hwtstamps(skb); memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns); - netif_rx_ni(skb); list_add(&rxts->list, &dp83640->rxpool); break; } @@ -885,6 +884,9 @@ static void decode_rxts(struct dp83640_private *dp83640, list_add_tail(&rxts->list, &dp83640->rxts); out: spin_unlock_irqrestore(&dp83640->rx_lock, flags); + + if (shhwtstamps) + netif_rx_ni(skb); } static void decode_txts(struct dp83640_private *dp83640, @@ -1425,7 +1427,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, shhwtstamps = skb_hwtstamps(skb); memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns); - netif_rx_ni(skb); list_del_init(&rxts->list); list_add(&rxts->list, &dp83640->rxpool); break; @@ -1438,6 +1439,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; skb_queue_tail(&dp83640->rx_queue, skb); schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); + } else { + netif_rx_ni(skb); } return true; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5d314f143aea..15cbcdba618a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -55,43 +55,35 @@ #define MII_M1011_IMASK_INIT 0x6400 #define MII_M1011_IMASK_CLEAR 0x0000 -#define MII_M1011_PHY_SCR 0x10 -#define MII_M1011_PHY_SCR_MDI 0x0000 -#define MII_M1011_PHY_SCR_MDI_X 0x0020 -#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 - -#define MII_M1145_PHY_EXT_SR 0x1b -#define MII_M1145_PHY_EXT_CR 0x14 -#define MII_M1145_RGMII_RX_DELAY 0x0080 -#define MII_M1145_RGMII_TX_DELAY 0x0002 -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 - -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 +#define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11) +#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12 +#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800 +#define MII_M1011_PHY_SCR_MDI (0x0 << 5) +#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5) +#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5) #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c #define MII_M1111_PHY_EXT_CR 0x14 -#define MII_M1111_RX_DELAY 0x80 -#define MII_M1111_TX_DELAY 0x2 +#define MII_M1111_RGMII_RX_DELAY BIT(7) +#define MII_M1111_RGMII_TX_DELAY BIT(1) #define MII_M1111_PHY_EXT_SR 0x1b #define MII_M1111_HWCFG_MODE_MASK 0xf -#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb #define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 #define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1111_HWCFG_MODE_RTBI 0x7 #define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9 -#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 -#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 +#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb +#define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13) +#define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15) #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4))) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 @@ -108,24 +100,24 @@ #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) /* Copper Specific Interrupt Enable Register */ -#define MII_88E1318S_PHY_CSIER 0x12 +#define MII_88E1318S_PHY_CSIER 0x12 /* WOL Event Interrupt Enable */ -#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) +#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) /* LED Timer Control Register */ -#define MII_88E1318S_PHY_LED_TCR 0x12 -#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) -#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) -#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) +#define MII_88E1318S_PHY_LED_TCR 0x12 +#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) +#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) +#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) /* Magic Packet MAC address registers */ -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 -#define MII_88E1318S_PHY_WOL_CTRL 0x10 -#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) -#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) +#define MII_88E1318S_PHY_WOL_CTRL 0x10 +#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) +#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 @@ -138,8 +130,6 @@ #define MII_M1011_PHY_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_STATUS_LINK 0x0400 -#define MII_M1116R_CONTROL_REG_MAC 21 - #define MII_88E3016_PHY_SPEC_CTRL 0x10 #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 @@ -152,7 +142,7 @@ #define LPA_FIBER_1000HALF 0x40 #define LPA_FIBER_1000FULL 0x20 -#define LPA_PAUSE_FIBER 0x180 +#define LPA_PAUSE_FIBER 0x180 #define LPA_PAUSE_ASYM_FIBER 0x100 #define ADVERTISE_FIBER_1000HALF 0x40 @@ -274,6 +264,23 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity) return 0; } +static int marvell_set_downshift(struct phy_device *phydev, bool enable, + u8 retries) +{ + int reg; + + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK; + reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT); + if (enable) + reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN; + + return phy_write(phydev, MII_M1011_PHY_SCR, reg); +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -292,17 +299,11 @@ static int marvell_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -318,8 +319,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -354,7 +354,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) @@ -370,17 +370,11 @@ static int m88e1111_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -466,7 +460,7 @@ static int marvell_of_reg_init(struct phy_device *phydev) } #endif /* CONFIG_OF_MDIO */ -static int m88e1121_config_aneg(struct phy_device *phydev) +static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) { int err, oldpage, mscr; @@ -474,31 +468,45 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (oldpage < 0) return oldpage; + mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG); + if (mscr < 0) { + err = mscr; + goto out; + } + + mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY); + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; + + err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + +out: + marvell_set_page(phydev, oldpage); + + return err; +} + +static int m88e1121_config_aneg(struct phy_device *phydev) +{ + int err = 0; + if (phy_interface_is_rgmii(phydev)) { - mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & - MII_88E1121_PHY_MSCR_DELAY_MASK; - - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | - MII_88E1121_PHY_MSCR_TX_DELAY); - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; - - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); - if (err < 0) + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err) return err; } - marvell_set_page(phydev, oldpage); - - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -596,7 +604,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) if (changed == 0) { /* Advertisement hasn't changed, but maybe aneg was never on to - * begin with? Or maybe phy was isolated? + * begin with? Or maybe phy was isolated? */ int ctl = phy_read(phydev, MII_BMCR); @@ -653,12 +661,9 @@ static int marvell_config_init(struct phy_device *phydev) static int m88e1116r_config_init(struct phy_device *phydev) { - int temp; int err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -668,34 +673,21 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_M1011_PHY_SCR); - temp |= (7 << 12); /* max number of gigabit attempts */ - temp |= (1 << 11); /* enable downshift */ - temp |= MII_M1011_PHY_SCR_AUTO_CROSS; - err = phy_write(phydev, MII_M1011_PHY_SCR, temp); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (err < 0) - return err; - temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC); - temp |= (1 << 5); - temp |= (1 << 4); - err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp); - if (err < 0) - return err; - err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + err = marvell_set_downshift(phydev, true, 8); if (err < 0) return err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = m88e1121_config_aneg_rgmii_delays(phydev); if (err < 0) return err; - mdelay(500); + err = genphy_soft_reset(phydev); + if (err < 0) + return err; return marvell_config_init(phydev); } @@ -719,9 +711,29 @@ static int m88e3016_config_init(struct phy_device *phydev) return marvell_config_init(phydev); } -static int m88e1111_config_init_rgmii(struct phy_device *phydev) +static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev, + u16 mode, + int fibre_copper_auto) +{ + int temp; + + temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); + if (temp < 0) + return temp; + + temp &= ~(MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_FIBER_COPPER_AUTO | + MII_M1111_HWCFG_FIBER_COPPER_RES); + temp |= mode; + + if (fibre_copper_auto) + temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; + + return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); +} + +static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { - int err; int temp; temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); @@ -729,16 +741,24 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) return temp; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); + temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY); } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - temp &= ~MII_M1111_TX_DELAY; - temp |= MII_M1111_RX_DELAY; + temp &= ~MII_M1111_RGMII_TX_DELAY; + temp |= MII_M1111_RGMII_RX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - temp &= ~MII_M1111_RX_DELAY; - temp |= MII_M1111_TX_DELAY; + temp &= ~MII_M1111_RGMII_RX_DELAY; + temp |= MII_M1111_RGMII_TX_DELAY; } - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); +} + +static int m88e1111_config_init_rgmii(struct phy_device *phydev) +{ + int temp; + int err; + + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; @@ -759,17 +779,11 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) static int m88e1111_config_init_sgmii(struct phy_device *phydev) { int err; - int temp; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK); - temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; @@ -780,48 +794,27 @@ static int m88e1111_config_init_sgmii(struct phy_device *phydev) static int m88e1111_config_init_rtbi(struct phy_device *phydev) { int err; - int temp; - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; - - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); - if (err < 0) + err = m88e1111_config_init_rgmii_delays(phydev); + if (err) return err; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; /* soft reset */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - do - temp = phy_read(phydev, MII_BMCR); - while (temp & BMCR_RESET); - - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | - MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1111_config_init(struct phy_device *phydev) @@ -850,7 +843,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1121_config_init(struct phy_device *phydev) @@ -912,12 +905,11 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -961,7 +953,7 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1149_config_init(struct phy_device *phydev) @@ -987,20 +979,15 @@ static int m88e1149_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1145_config_init_rgmii(struct phy_device *phydev) { + int temp; int err; - int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR); - if (temp < 0) - return temp; - - temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY); - - err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp); + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; @@ -1032,16 +1019,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev) static int m88e1145_config_init_sgmii(struct phy_device *phydev) { - int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); - - if (temp < 0) - return temp; - - temp &= ~MII_M1145_HWCFG_MODE_MASK; - temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1145_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1145_config_init(struct phy_device *phydev) @@ -1515,7 +1495,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } #ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) +#define UINT64_MAX (u64)(~((u64)0)) #endif static u64 marvell_get_stat(struct phy_device *phydev, int i) { diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 34395230ce70..08e0647b85e2 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -21,6 +21,8 @@ #include #include +#include + #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) #define MDIO_READ_FAIL (1 << 28) @@ -41,46 +43,80 @@ struct unimac_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; + int (*wait_func) (void *wait_func_data); + void *wait_func_data; }; +static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(priv->base + offset); + else + return readl_relaxed(priv->base + offset); +} + +static inline void unimac_mdio_writel(struct unimac_mdio_priv *priv, u32 val, + u32 offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(val, priv->base + offset); + else + writel_relaxed(val, priv->base + offset); +} + static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) { u32 reg; - reg = __raw_readl(priv->base + MDIO_CMD); + reg = unimac_mdio_readl(priv, MDIO_CMD); reg |= MDIO_START_BUSY; - __raw_writel(reg, priv->base + MDIO_CMD); + unimac_mdio_writel(priv, reg, MDIO_CMD); } static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) { - return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; + return unimac_mdio_readl(priv, MDIO_CMD) & MDIO_START_BUSY; +} + +static int unimac_mdio_poll(void *wait_func_data) +{ + struct unimac_mdio_priv *priv = wait_func_data; + unsigned int timeout = 1000; + + do { + if (!unimac_mdio_busy(priv)) + return 0; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + return 0; } static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; + int ret; u32 cmd; /* Prepare the read operation */ cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); - __raw_writel(cmd, priv->base + MDIO_CMD); + unimac_mdio_writel(priv, cmd, MDIO_CMD); /* Start MDIO transaction */ unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; + ret = priv->wait_func(priv->wait_func_data); + if (ret) + return ret; - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - cmd = __raw_readl(priv->base + MDIO_CMD); + cmd = unimac_mdio_readl(priv, MDIO_CMD); /* Some broken devices are known not to release the line during * turn-around, e.g: Broadcom BCM53125 external switches, so check for @@ -97,27 +133,16 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; u32 cmd; /* Prepare the write operation */ cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT) | (0xffff & val); - __raw_writel(cmd, priv->base + MDIO_CMD); + unimac_mdio_writel(priv, cmd, MDIO_CMD); unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; - - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - return 0; + return priv->wait_func(priv->wait_func_data); } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with @@ -155,8 +180,10 @@ static int unimac_mdio_reset(struct mii_bus *bus) } for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) + if (read_mask & 1 << addr) { + dev_dbg(&bus->dev, "Workaround for PHY @ %d\n", addr); mdiobus_read(bus, addr, MII_BMSR); + } } return 0; @@ -164,6 +191,7 @@ static int unimac_mdio_reset(struct mii_bus *bus) static int unimac_mdio_probe(struct platform_device *pdev) { + struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; @@ -193,12 +221,21 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus = priv->mii_bus; bus->priv = priv; - bus->name = "unimac MII bus"; + if (pdata) { + bus->name = pdata->bus_name; + priv->wait_func = pdata->wait_func; + priv->wait_func_data = pdata->wait_func_data; + bus->phy_mask = ~pdata->phy_mask; + } else { + bus->name = "unimac MII bus"; + priv->wait_func_data = priv; + priv->wait_func = unimac_mdio_poll; + } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { @@ -240,7 +277,7 @@ MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { - .name = "unimac-mdio", + .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, }, .probe = unimac_mdio_probe, @@ -251,4 +288,4 @@ module_platform_driver(unimac_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:unimac-mdio"); +MODULE_ALIAS("platform:" UNIMAC_MDIO_DRV_NAME); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7faa79b254ef..4333c6e14742 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -116,7 +116,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) gpiod_set_value(bitbang->mdc, what); } -static struct mdiobb_ops mdio_gpio_ops = { +static const struct mdiobb_ops mdio_gpio_ops = { .owner = THIS_MODULE, .set_mdc = mdc_set, .set_mdio_dir = mdio_dir, diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c new file mode 100644 index 000000000000..6d24fd13ca86 --- /dev/null +++ b/drivers/net/phy/mdio-i2c.c @@ -0,0 +1,109 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015-2016 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Network PHYs can appear on I2C buses when they are part of SFP module. + * This driver exposes these PHYs to the networking PHY code, allowing + * our PHY drivers access to these PHYs, and so allowing configuration + * of their settings. + */ +#include +#include + +#include "mdio-i2c.h" + +/* + * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is + * specified to be present in SFP modules. These correspond with PHY + * addresses 16 and 17. Disallow access to these "phy" addresses. + */ +static bool i2c_mii_valid_phy_id(int phy_id) +{ + return phy_id != 0x10 && phy_id != 0x11; +} + +static unsigned int i2c_mii_phy_addr(int phy_id) +{ + return phy_id + 0x40; +} + +static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msgs[2]; + u8 data[2], dev_addr = reg; + int bus_addr, ret; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0xffff; + + bus_addr = i2c_mii_phy_addr(phy_id); + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = sizeof(data); + msgs[1].buf = data; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return 0xffff; + + return data[0] << 8 | data[1]; +} + +static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msg; + int ret; + u8 data[3]; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0; + + data[0] = reg; + data[1] = val >> 8; + data[2] = val; + + msg.addr = i2c_mii_phy_addr(phy_id); + msg.flags = 0; + msg.len = 3; + msg.buf = data; + + ret = i2c_transfer(i2c, &msg, 1); + + return ret < 0 ? ret : 0; +} + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c) +{ + struct mii_bus *mii; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return ERR_PTR(-EINVAL); + + mii = mdiobus_alloc(); + if (!mii) + return ERR_PTR(-ENOMEM); + + snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent)); + mii->parent = parent; + mii->read = i2c_mii_read; + mii->write = i2c_mii_write; + mii->priv = i2c; + + return mii; +} +EXPORT_SYMBOL_GPL(mdio_i2c_alloc); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("MDIO I2C bridge library"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h new file mode 100644 index 000000000000..889ab57d7f3e --- /dev/null +++ b/drivers/net/phy/mdio-i2c.h @@ -0,0 +1,19 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef MDIO_I2C_H +#define MDIO_I2C_H + +struct device; +struct i2c_adapter; +struct mii_bus; + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c); + +#endif diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0a5f62e0efcc..0831b7142df7 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -199,7 +199,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, md); - rc = mdio_mux_init(md->dev, mdio_mux_iproc_switch_fn, + rc = mdio_mux_init(md->dev, md->dev->of_node, mdio_mux_iproc_switch_fn, &md->mux_handle, md, md->mii_bus); if (rc) { dev_info(md->dev, "mdiomux initialization failed\n"); diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 919949960a10..082ffef0dec4 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -54,7 +54,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev) if (IS_ERR(s->gpios)) return PTR_ERR(s->gpios); - r = mdio_mux_init(&pdev->dev, + r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); if (r != 0) { diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 6a33646bdf05..2573ab012f16 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -105,7 +105,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) const __be32 *iprop; int len, ret; - dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); + dev_dbg(&pdev->dev, "probing node %pOF\n", np); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) @@ -113,8 +113,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &res); if (ret) { - dev_err(&pdev->dev, "could not obtain memory map for node %s\n", - np->full_name); + dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n", + np); return ret; } s->phys = res.start; @@ -145,25 +145,26 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) for_each_available_child_of_node(np, np2) { iprop = of_get_property(np2, "reg", &len); if (!iprop || len != sizeof(uint32_t)) { - dev_err(&pdev->dev, "mdio-mux child node %s is " - "missing a 'reg' property\n", np2->full_name); + dev_err(&pdev->dev, "mdio-mux child node %pOF is " + "missing a 'reg' property\n", np2); of_node_put(np2); return -ENODEV; } if (be32_to_cpup(iprop) & ~s->mask) { - dev_err(&pdev->dev, "mdio-mux child node %s has " + dev_err(&pdev->dev, "mdio-mux child node %pOF has " "a 'reg' value with unmasked bits\n", - np2->full_name); + np2); of_node_put(np2); return -ENODEV; } } - ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, + ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node, + mdio_mux_mmioreg_switch_fn, &s->mux_handle, s, NULL); if (ret) { - dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", - np->full_name); + dev_err(&pdev->dev, "failed to register mdio-mux bus %pOF\n", + np); return ret; } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index c608e1dfaf09..0a86f1e4c02f 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -13,7 +13,6 @@ #include #include -#define DRV_VERSION "1.0" #define DRV_DESCRIPTION "MDIO bus multiplexer driver" struct mdio_mux_child_bus; @@ -87,6 +86,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id, static int parent_count; int mdio_mux_init(struct device *dev, + struct device_node *mux_node, int (*switch_fn)(int cur, int desired, void *data), void **mux_handle, void *data, @@ -99,11 +99,11 @@ int mdio_mux_init(struct device *dev, struct mdio_mux_parent_bus *pb; struct mdio_mux_child_bus *cb; - if (!dev->of_node) + if (!mux_node) return -ENODEV; if (!mux_bus) { - parent_bus_node = of_parse_phandle(dev->of_node, + parent_bus_node = of_parse_phandle(mux_node, "mdio-parent-bus", 0); if (!parent_bus_node) @@ -117,10 +117,11 @@ int mdio_mux_init(struct device *dev, } else { parent_bus_node = NULL; parent_bus = mux_bus; + get_device(&parent_bus->dev); } pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); - if (pb == NULL) { + if (!pb) { ret_val = -ENOMEM; goto err_pb_kz; } @@ -132,22 +133,19 @@ int mdio_mux_init(struct device *dev, pb->mii_bus = parent_bus; ret_val = -ENODEV; - for_each_available_child_of_node(dev->of_node, child_bus_node) { + for_each_available_child_of_node(mux_node, child_bus_node) { int v; r = of_property_read_u32(child_bus_node, "reg", &v); if (r) { dev_err(dev, - "Error: Failed to find reg for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to find reg for child %pOF\n", + child_bus_node); continue; } cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); - if (cb == NULL) { - dev_err(dev, - "Error: Failed to allocate memory for child %s\n", - of_node_full_name(child_bus_node)); + if (!cb) { ret_val = -ENOMEM; continue; } @@ -156,9 +154,6 @@ int mdio_mux_init(struct device *dev, cb->mii_bus = mdiobus_alloc(); if (!cb->mii_bus) { - dev_err(dev, - "Error: Failed to allocate MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); ret_val = -ENOMEM; devm_kfree(dev, cb); continue; @@ -174,8 +169,8 @@ int mdio_mux_init(struct device *dev, r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { dev_err(dev, - "Error: Failed to register MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to register MDIO bus for child %pOF\n", + child_bus_node); mdiobus_free(cb->mii_bus); devm_kfree(dev, cb); } else { @@ -185,16 +180,13 @@ int mdio_mux_init(struct device *dev, } if (pb->children) { *mux_handle = pb; - dev_info(dev, "Version " DRV_VERSION "\n"); return 0; } dev_err(dev, "Error: No acceptable child buses found\n"); devm_kfree(dev, pb); err_pb_kz: - /* balance the reference of_mdio_find_bus() took */ - if (!mux_bus) - put_device(&parent_bus->dev); + put_device(&parent_bus->dev); err_parent_bus: of_node_put(parent_bus_node); return ret_val; @@ -212,12 +204,10 @@ void mdio_mux_uninit(void *mux_handle) cb = cb->next; } - /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */ put_device(&pb->mii_bus->dev); } EXPORT_SYMBOL_GPL(mdio_mux_uninit); MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 6739b738bbaf..21f75ae244b3 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -9,6 +9,186 @@ #include #include +const char *phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; + case SPEED_100000: + return "100Gbps"; + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +EXPORT_SYMBOL_GPL(phy_speed_to_str); + +const char *phy_duplex_to_str(unsigned int duplex) +{ + if (duplex == DUPLEX_HALF) + return "Half"; + if (duplex == DUPLEX_FULL) + return "Full"; + if (duplex == DUPLEX_UNKNOWN) + return "Unknown"; + return "Unsupported (update phy-core.c)"; +} +EXPORT_SYMBOL_GPL(phy_duplex_to_str); + +/* A mapping of all SUPPORTED settings to speed/duplex. This table + * must be grouped by speed and sorted in descending match priority + * - iow, descending speed. */ +static const struct phy_setting settings[] = { + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + }, + { + .speed = SPEED_2500, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, + }, +}; + +/** + * phy_lookup_setting - lookup a PHY setting + * @speed: speed to match + * @duplex: duplex to match + * @mask: allowed link modes + * @maxbit: bit size of link modes + * @exact: an exact match is required + * + * Search the settings array for a setting that matches the speed and + * duplex, and which is supported. + * + * If @exact is unset, either an exact match or %NULL for no match will + * be returned. + * + * If @exact is set, an exact match, the fastest supported setting at + * or below the specified speed, the slowest supported setting, or if + * they all fail, %NULL will be returned. + */ +const struct phy_setting * +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact) +{ + const struct phy_setting *p, *match = NULL, *last = NULL; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->bit < maxbit && test_bit(p->bit, mask)) { + last = p; + if (p->speed == speed && p->duplex == duplex) { + /* Exact match for speed and duplex */ + match = p; + break; + } else if (!exact) { + if (!match && p->speed <= speed) + /* Candidate */ + match = p; + + if (p->speed < speed) + break; + } + } + } + + if (!match && !exact) + match = last; + + return match; +} +EXPORT_SYMBOL_GPL(phy_lookup_setting); + +size_t phy_speeds(unsigned int *speeds, size_t size, + unsigned long *mask, size_t maxbit) +{ + size_t count; + int i; + + for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) + if (settings[i].bit < maxbit && + test_bit(settings[i].bit, mask) && + (count == 0 || speeds[count - 1] != settings[i].speed)) + speeds[count++] = settings[i].speed; + + return count; +} + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d0626bf5c540..2b1e67bc1e73 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -39,42 +38,6 @@ #include -static const char *phy_speed_to_str(int speed) -{ - switch (speed) { - case SPEED_10: - return "10Mbps"; - case SPEED_100: - return "100Mbps"; - case SPEED_1000: - return "1Gbps"; - case SPEED_2500: - return "2.5Gbps"; - case SPEED_5000: - return "5Gbps"; - case SPEED_10000: - return "10Gbps"; - case SPEED_14000: - return "14Gbps"; - case SPEED_20000: - return "20Gbps"; - case SPEED_25000: - return "25Gbps"; - case SPEED_40000: - return "40Gbps"; - case SPEED_50000: - return "50Gbps"; - case SPEED_56000: - return "56Gbps"; - case SPEED_100000: - return "100Gbps"; - case SPEED_UNKNOWN: - return "Unknown"; - default: - return "Unsupported (update phy.c)"; - } -} - #define PHY_STATE_STR(_state) \ case PHY_##_state: \ return __stringify(_state); \ @@ -110,7 +73,7 @@ void phy_print_status(struct phy_device *phydev) netdev_info(phydev->attached_dev, "Link is Up - %s/%s - flow control %s\n", phy_speed_to_str(phydev->speed), - DUPLEX_FULL == phydev->duplex ? "Full" : "Half", + phy_duplex_to_str(phydev->duplex), phydev->pause ? "rx/tx" : "off"); } else { netdev_info(phydev->attached_dev, "Link is Down\n"); @@ -194,123 +157,6 @@ int phy_aneg_done(struct phy_device *phydev) } EXPORT_SYMBOL(phy_aneg_done); -/* A structure for mapping a particular speed and duplex - * combination to a particular SUPPORTED and ADVERTISED value - */ -struct phy_setting { - int speed; - int duplex; - u32 setting; -}; - -/* A mapping of all SUPPORTED settings to speed/duplex. This table - * must be grouped by speed and sorted in descending match priority - * - iow, descending speed. */ -static const struct phy_setting settings[] = { - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKR_Full, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKX4_Full, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseT_Full, - }, - { - .speed = SPEED_2500, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_2500baseX_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseKX_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseT_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_1000baseT_Half, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_100baseT_Full, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_100baseT_Half, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10baseT_Full, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_10baseT_Half, - }, -}; - -/** - * phy_lookup_setting - lookup a PHY setting - * @speed: speed to match - * @duplex: duplex to match - * @features: allowed link modes - * @exact: an exact match is required - * - * Search the settings array for a setting that matches the speed and - * duplex, and which is supported. - * - * If @exact is unset, either an exact match or %NULL for no match will - * be returned. - * - * If @exact is set, an exact match, the fastest supported setting at - * or below the specified speed, the slowest supported setting, or if - * they all fail, %NULL will be returned. - */ -static const struct phy_setting * -phy_lookup_setting(int speed, int duplex, u32 features, bool exact) -{ - const struct phy_setting *p, *match = NULL, *last = NULL; - int i; - - for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->setting & features) { - last = p; - if (p->speed == speed && p->duplex == duplex) { - /* Exact match for speed and duplex */ - match = p; - break; - } else if (!exact) { - if (!match && p->speed <= speed) - /* Candidate */ - match = p; - - if (p->speed < speed) - break; - } - } - } - - if (!match && !exact) - match = last; - - return match; -} - /** * phy_find_valid - find a PHY setting that matches the requested parameters * @speed: desired speed @@ -327,7 +173,9 @@ phy_lookup_setting(int speed, int duplex, u32 features, bool exact) static const struct phy_setting * phy_find_valid(int speed, int duplex, u32 supported) { - return phy_lookup_setting(speed, duplex, supported, false); + unsigned long mask = supported; + + return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); } /** @@ -344,16 +192,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { - unsigned int count = 0; - unsigned int idx = 0; + unsigned long supported = phy->supported; - for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) - /* Assumes settings are grouped by speed */ - if ((settings[idx].setting & phy->supported) && - (count == 0 || speeds[count - 1] != settings[idx].speed)) - speeds[count++] = settings[idx].speed; - - return count; + return phy_speeds(speeds, size, &supported, BITS_PER_LONG); } /** @@ -367,7 +208,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy, */ static inline bool phy_check_valid(int speed, int duplex, u32 features) { - return !!phy_lookup_setting(speed, duplex, features, true); + unsigned long mask = features; + + return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); } /** @@ -530,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, cmd->base.port = PORT_BNC; else cmd->base.port = PORT_MII; - + cmd->base.transceiver = phy_is_internal(phydev) ? + XCVR_INTERNAL : XCVR_EXTERNAL; cmd->base.phy_address = phydev->mdio.addr; cmd->base.autoneg = phydev->autoneg; cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; @@ -705,14 +549,15 @@ EXPORT_SYMBOL(phy_start_aneg); * * Description: The PHY infrastructure can run a state machine * which tracks whether the PHY is starting up, negotiating, - * etc. This function starts the timer which tracks the state - * of the PHY. If you want to maintain your own state machine, + * etc. This function starts the delayed workqueue which tracks + * the state of the PHY. If you want to maintain your own state machine, * do not call this function. */ void phy_start_machine(struct phy_device *phydev) { queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); } +EXPORT_SYMBOL_GPL(phy_start_machine); /** * phy_trigger_machine - trigger the state machine to run @@ -737,9 +582,9 @@ void phy_trigger_machine(struct phy_device *phydev, bool sync) * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * - * Description: Stops the state machine timer, sets the state to UP - * (unless it wasn't up yet). This function must be called BEFORE - * phy_detach. + * Description: Stops the state machine delayed workqueue, sets the + * state to UP (unless it wasn't up yet). This function must be + * called BEFORE phy_detach. */ void phy_stop_machine(struct phy_device *phydev) { @@ -1019,9 +864,15 @@ void phy_start(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start); -static void phy_adjust_link(struct phy_device *phydev) +static void phy_link_up(struct phy_device *phydev) { - phydev->adjust_link(phydev->attached_dev); + phydev->phy_link_change(phydev, true, true); + phy_led_trigger_change_speed(phydev); +} + +static void phy_link_down(struct phy_device *phydev, bool do_carrier) +{ + phydev->phy_link_change(phydev, false, do_carrier); phy_led_trigger_change_speed(phydev); } @@ -1066,8 +917,7 @@ void phy_state_machine(struct work_struct *work) /* If the link is down, give up on negotiation for now */ if (!phydev->link) { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); break; } @@ -1079,9 +929,7 @@ void phy_state_machine(struct work_struct *work) /* If AN is done, we're running */ if (err > 0) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); - + phy_link_up(phydev); } else if (0 == phydev->link_timeout--) needs_aneg = true; break; @@ -1106,8 +954,7 @@ void phy_state_machine(struct work_struct *work) } } phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_up(phydev); } break; case PHY_FORCING: @@ -1117,13 +964,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { if (0 == phydev->link_timeout--) needs_aneg = true; + phy_link_down(phydev, false); } - - phy_adjust_link(phydev); break; case PHY_RUNNING: /* Only register a CHANGE if we are polling and link changed @@ -1155,14 +1001,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); + phy_link_down(phydev, true); } - phy_adjust_link(phydev); - if (phy_interrupt_is_valid(phydev)) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); @@ -1170,8 +1014,7 @@ void phy_state_machine(struct work_struct *work) case PHY_HALTED: if (phydev->link) { phydev->link = 0; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); do_suspend = true; } break; @@ -1191,11 +1034,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } else { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -1207,11 +1050,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } break; } @@ -1226,9 +1069,10 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - phydev_dbg(phydev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), - phy_state_to_str(phydev->state)); + if (old_state != phydev->state) + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 2f742ae5b92e..67f25ac29025 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -688,6 +688,19 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); +static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct net_device *netdev = phydev->attached_dev; + + if (do_carrier) { + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + } + phydev->adjust_link(netdev); +} + /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct @@ -861,21 +874,37 @@ void phy_attached_info(struct phy_device *phydev) } EXPORT_SYMBOL(phy_attached_info); -#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" +#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)" void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; + char *irq_str; + char irq_num[8]; + + switch(phydev->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + snprintf(irq_num, sizeof(irq_num), "%d", phydev->irq); + irq_str = irq_num; + break; + } + if (!fmt) { dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", drv_name, phydev_name(phydev), - phydev->irq); + irq_str); } else { va_list ap; dev_info(&phydev->mdio.dev, ATTACHED_FMT, drv_name, phydev_name(phydev), - phydev->irq); + irq_str); va_start(ap, fmt); vprintk(fmt, ap); @@ -953,6 +982,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, goto error; } + phydev->phy_link_change = phy_link_change; phydev->attached_dev = dev; dev->phydev = phydev; @@ -1072,6 +1102,7 @@ void phy_detach(struct phy_device *phydev) phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); + phydev->phylink = NULL; phy_led_triggers_unregister(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c new file mode 100644 index 000000000000..bcb4755bcd95 --- /dev/null +++ b/drivers/net/phy/phylink.c @@ -0,0 +1,1462 @@ +/* + * phylink models the MAC to optional PHY connection, supporting + * technologies such as SFP cages where the PHY is hot-pluggable. + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sfp.h" +#include "swphy.h" + +#define SUPPORTED_INTERFACES \ + (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \ + SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane) +#define ADVERTISED_INTERFACES \ + (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \ + ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane) + +enum { + PHYLINK_DISABLE_STOPPED, + PHYLINK_DISABLE_LINK, +}; + +struct phylink { + struct net_device *netdev; + const struct phylink_mac_ops *ops; + + unsigned long phylink_disable_state; /* bitmask of disables */ + struct phy_device *phydev; + phy_interface_t link_interface; /* PHY_INTERFACE_xxx */ + u8 link_an_mode; /* MLO_AN_xxx */ + u8 link_port; /* The current non-phy ethtool port */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + + /* The link configuration settings */ + struct phylink_link_state link_config; + struct gpio_desc *link_gpio; + + struct mutex state_mutex; + struct phylink_link_state phy_state; + struct work_struct resolve; + + bool mac_link_dropped; + + struct sfp_bus *sfp_bus; +}; + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline bool linkmode_empty(const unsigned long *src) +{ + return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +void phylink_set_port_modes(unsigned long *mask) +{ + phylink_set(mask, TP); + phylink_set(mask, AUI); + phylink_set(mask, MII); + phylink_set(mask, FIBRE); + phylink_set(mask, BNC); + phylink_set(mask, Backplane); +} +EXPORT_SYMBOL_GPL(phylink_set_port_modes); + +static int phylink_is_empty_linkmode(const unsigned long *linkmode) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, }; + + phylink_set_port_modes(tmp); + phylink_set(tmp, Autoneg); + phylink_set(tmp, Pause); + phylink_set(tmp, Asym_Pause); + + bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return linkmode_empty(tmp); +} + +static const char *phylink_an_mode_str(unsigned int mode) +{ + static const char *modestr[] = { + [MLO_AN_PHY] = "phy", + [MLO_AN_FIXED] = "fixed", + [MLO_AN_SGMII] = "SGMII", + [MLO_AN_8023Z] = "802.3z", + }; + + return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; +} + +static int phylink_validate(struct phylink *pl, unsigned long *supported, + struct phylink_link_state *state) +{ + pl->ops->validate(pl->netdev, supported, state); + + return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; +} + +static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np) +{ + struct device_node *fixed_node; + const struct phy_setting *s; + struct gpio_desc *desc; + const __be32 *fixed_prop; + u32 speed; + int ret, len; + + fixed_node = of_get_child_by_name(np, "fixed-link"); + if (fixed_node) { + ret = of_property_read_u32(fixed_node, "speed", &speed); + + pl->link_config.speed = speed; + pl->link_config.duplex = DUPLEX_HALF; + + if (of_property_read_bool(fixed_node, "full-duplex")) + pl->link_config.duplex = DUPLEX_FULL; + + /* We treat the "pause" and "asym-pause" terminology as + * defining the link partner's ability. */ + if (of_property_read_bool(fixed_node, "pause")) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (of_property_read_bool(fixed_node, "asym-pause")) + pl->link_config.pause |= MLO_PAUSE_ASYM; + + if (ret == 0) { + desc = fwnode_get_named_gpiod(&fixed_node->fwnode, + "link-gpios", 0, + GPIOD_IN, "?"); + + if (!IS_ERR(desc)) + pl->link_gpio = desc; + else if (desc == ERR_PTR(-EPROBE_DEFER)) + ret = -EPROBE_DEFER; + } + of_node_put(fixed_node); + + if (ret) + return ret; + } else { + fixed_prop = of_get_property(np, "fixed-link", &len); + if (!fixed_prop) { + netdev_err(pl->netdev, "broken fixed-link?\n"); + return -EINVAL; + } + if (len == 5 * sizeof(*fixed_prop)) { + pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ? + DUPLEX_FULL : DUPLEX_HALF; + pl->link_config.speed = be32_to_cpu(fixed_prop[2]); + if (be32_to_cpu(fixed_prop[3])) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (be32_to_cpu(fixed_prop[4])) + pl->link_config.pause |= MLO_PAUSE_ASYM; + } + } + + if (pl->link_config.speed > SPEED_1000 && + pl->link_config.duplex != DUPLEX_FULL) + netdev_warn(pl->netdev, "fixed link specifies half duplex for %dMbps link?\n", + pl->link_config.speed); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, true); + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + if (s) { + __set_bit(s->bit, pl->supported); + } else { + netdev_warn(pl->netdev, "fixed link %s duplex %dMbps not recognised\n", + pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", + pl->link_config.speed); + } + + linkmode_and(pl->link_config.advertising, pl->link_config.advertising, + pl->supported); + + pl->link_config.link = 1; + pl->link_config.an_complete = 1; + + return 0; +} + +static int phylink_parse_mode(struct phylink *pl, struct device_node *np) +{ + struct device_node *dn; + const char *managed; + + dn = of_get_child_by_name(np, "fixed-link"); + if (dn || of_find_property(np, "fixed-link", NULL)) + pl->link_an_mode = MLO_AN_FIXED; + of_node_put(dn); + + if (of_property_read_string(np, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) { + if (pl->link_an_mode == MLO_AN_FIXED) { + netdev_err(pl->netdev, + "can't use both fixed-link and in-band-status\n"); + return -EINVAL; + } + + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + phylink_set(pl->supported, Autoneg); + phylink_set(pl->supported, Asym_Pause); + phylink_set(pl->supported, Pause); + pl->link_config.an_enabled = true; + + switch (pl->link_config.interface) { + case PHY_INTERFACE_MODE_SGMII: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + + case PHY_INTERFACE_MODE_1000BASEX: + phylink_set(pl->supported, 1000baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(pl->supported, 2500baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + case PHY_INTERFACE_MODE_10GKR: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + phylink_set(pl->supported, 1000baseX_Full); + phylink_set(pl->supported, 10000baseKR_Full); + phylink_set(pl->supported, 10000baseCR_Full); + phylink_set(pl->supported, 10000baseSR_Full); + phylink_set(pl->supported, 10000baseLR_Full); + phylink_set(pl->supported, 10000baseLRM_Full); + phylink_set(pl->supported, 10000baseER_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + + default: + netdev_err(pl->netdev, + "incorrect link mode %s for in-band status\n", + phy_modes(pl->link_config.interface)); + return -EINVAL; + } + + linkmode_copy(pl->link_config.advertising, pl->supported); + + if (phylink_validate(pl, pl->supported, &pl->link_config)) { + netdev_err(pl->netdev, + "failed to validate link configuration for in-band status\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phylink_mac_config(struct phylink *pl, + const struct phylink_link_state *state) +{ + netdev_dbg(pl->netdev, + "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n", + __func__, phylink_an_mode_str(pl->link_an_mode), + phy_modes(state->interface), + phy_speed_to_str(state->speed), + phy_duplex_to_str(state->duplex), + __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising, + state->pause, state->link, state->an_enabled); + + pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); +} + +static void phylink_mac_an_restart(struct phylink *pl) +{ + if (pl->link_config.an_enabled && + (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX || + pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX)) + pl->ops->mac_an_restart(pl->netdev); +} + +static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state) +{ + struct net_device *ndev = pl->netdev; + + linkmode_copy(state->advertising, pl->link_config.advertising); + linkmode_zero(state->lp_advertising); + state->interface = pl->link_config.interface; + state->an_enabled = pl->link_config.an_enabled; + state->link = 1; + + return pl->ops->mac_link_state(ndev, state); +} + +/* The fixed state is... fixed except for the link state, + * which may be determined by a GPIO. + */ +static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) +{ + *state = pl->link_config; + if (pl->link_gpio) + state->link = !!gpiod_get_value(pl->link_gpio); +} + +/* Flow control is resolved according to our and the link partners + * advertisments using the following drawn from the 802.3 specs: + * Local device Link partner + * Pause AsymDir Pause AsymDir Result + * 1 X 1 X TX+RX + * 0 1 1 1 RX + * 1 1 0 1 TX + */ +static void phylink_resolve_flow(struct phylink *pl, + struct phylink_link_state *state) +{ + int new_pause = 0; + + if (pl->link_config.pause & MLO_PAUSE_AN) { + int pause = 0; + + if (phylink_test(pl->link_config.advertising, Pause)) + pause |= MLO_PAUSE_SYM; + if (phylink_test(pl->link_config.advertising, Asym_Pause)) + pause |= MLO_PAUSE_ASYM; + + pause &= state->pause; + + if (pause & MLO_PAUSE_SYM) + new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; + else if (pause & MLO_PAUSE_ASYM) + new_pause = state->pause & MLO_PAUSE_SYM ? + MLO_PAUSE_RX : MLO_PAUSE_TX; + } else { + new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; + } + + state->pause &= ~MLO_PAUSE_TXRX_MASK; + state->pause |= new_pause; +} + +static const char *phylink_pause_to_str(int pause) +{ + switch (pause & MLO_PAUSE_TXRX_MASK) { + case MLO_PAUSE_TX | MLO_PAUSE_RX: + return "rx/tx"; + case MLO_PAUSE_TX: + return "tx"; + case MLO_PAUSE_RX: + return "rx"; + default: + return "off"; + } +} + +static void phylink_resolve(struct work_struct *w) +{ + struct phylink *pl = container_of(w, struct phylink, resolve); + struct phylink_link_state link_state; + struct net_device *ndev = pl->netdev; + + mutex_lock(&pl->state_mutex); + if (pl->phylink_disable_state) { + pl->mac_link_dropped = false; + link_state.link = false; + } else if (pl->mac_link_dropped) { + link_state.link = false; + } else { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + link_state = pl->phy_state; + phylink_resolve_flow(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_FIXED: + phylink_get_fixed_state(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_SGMII: + phylink_get_mac_state(pl, &link_state); + if (pl->phydev) { + bool changed = false; + + link_state.link = link_state.link && + pl->phy_state.link; + + if (pl->phy_state.interface != + link_state.interface) { + link_state.interface = pl->phy_state.interface; + changed = true; + } + + /* Propagate the flow control from the PHY + * to the MAC. Also propagate the interface + * if changed. + */ + if (pl->phy_state.link || changed) { + link_state.pause |= pl->phy_state.pause; + phylink_resolve_flow(pl, &link_state); + + phylink_mac_config(pl, &link_state); + } + } + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + break; + } + } + + if (link_state.link != netif_carrier_ok(ndev)) { + if (!link_state.link) { + netif_carrier_off(ndev); + pl->ops->mac_link_down(ndev, pl->link_an_mode); + netdev_info(ndev, "Link is Down\n"); + } else { + pl->ops->mac_link_up(ndev, pl->link_an_mode, + pl->phydev); + + netif_carrier_on(ndev); + + netdev_info(ndev, + "Link is Up - %s/%s - flow control %s\n", + phy_speed_to_str(link_state.speed), + phy_duplex_to_str(link_state.duplex), + phylink_pause_to_str(link_state.pause)); + } + } + if (!link_state.link && pl->mac_link_dropped) { + pl->mac_link_dropped = false; + queue_work(system_power_efficient_wq, &pl->resolve); + } + mutex_unlock(&pl->state_mutex); +} + +static void phylink_run_resolve(struct phylink *pl) +{ + if (!pl->phylink_disable_state) + queue_work(system_power_efficient_wq, &pl->resolve); +} + +static const struct sfp_upstream_ops sfp_phylink_ops; + +static int phylink_register_sfp(struct phylink *pl, struct device_node *np) +{ + struct device_node *sfp_np; + + sfp_np = of_parse_phandle(np, "sfp", 0); + if (!sfp_np) + return 0; + + pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl, + &sfp_phylink_ops); + if (!pl->sfp_bus) + return -ENOMEM; + + return 0; +} + +struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, + phy_interface_t iface, const struct phylink_mac_ops *ops) +{ + struct phylink *pl; + int ret; + + pl = kzalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return ERR_PTR(-ENOMEM); + + mutex_init(&pl->state_mutex); + INIT_WORK(&pl->resolve, phylink_resolve); + pl->netdev = ndev; + pl->phy_state.interface = iface; + pl->link_interface = iface; + pl->link_port = PORT_MII; + pl->link_config.interface = iface; + pl->link_config.pause = MLO_PAUSE_AN; + pl->link_config.speed = SPEED_UNKNOWN; + pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->ops = ops; + __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + ret = phylink_parse_mode(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + + if (pl->link_an_mode == MLO_AN_FIXED) { + ret = phylink_parse_fixedlink(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + } + + ret = phylink_register_sfp(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + + return pl; +} +EXPORT_SYMBOL_GPL(phylink_create); + +void phylink_destroy(struct phylink *pl) +{ + if (pl->sfp_bus) + sfp_unregister_upstream(pl->sfp_bus); + + cancel_work_sync(&pl->resolve); + kfree(pl); +} +EXPORT_SYMBOL_GPL(phylink_destroy); + +void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct phylink *pl = phydev->phylink; + + mutex_lock(&pl->state_mutex); + pl->phy_state.speed = phydev->speed; + pl->phy_state.duplex = phydev->duplex; + pl->phy_state.pause = MLO_PAUSE_NONE; + if (phydev->pause) + pl->phy_state.pause |= MLO_PAUSE_SYM; + if (phydev->asym_pause) + pl->phy_state.pause |= MLO_PAUSE_ASYM; + pl->phy_state.interface = phydev->interface; + pl->phy_state.link = up; + mutex_unlock(&pl->state_mutex); + + phylink_run_resolve(pl); + + netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down", + phy_modes(phydev->interface), + phy_speed_to_str(phydev->speed), + phy_duplex_to_str(phydev->duplex)); +} + +static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) +{ + struct phylink_link_state config; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + u32 advertising; + int ret; + + memset(&config, 0, sizeof(config)); + ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported); + ethtool_convert_legacy_u32_to_link_mode(config.advertising, + phy->advertising); + config.interface = pl->link_config.interface; + + /* + * This is the new way of dealing with flow control for PHYs, + * as described by Timur Tabi in commit 529ed1275263 ("net: phy: + * phy drivers should not set SUPPORTED_[Asym_]Pause") except + * using our validate call to the MAC, we rely upon the MAC + * clearing the bits from both supported and advertising fields. + */ + if (phylink_test(supported, Pause)) + phylink_set(config.advertising, Pause); + if (phylink_test(supported, Asym_Pause)) + phylink_set(config.advertising, Asym_Pause); + + ret = phylink_validate(pl, supported, &config); + if (ret) + return ret; + + phy->phylink = pl; + phy->phy_link_change = phylink_phy_change; + + netdev_info(pl->netdev, + "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev), + phy->drv->name); + + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = phy; + pl->phydev = phy; + linkmode_copy(pl->supported, supported); + linkmode_copy(pl->link_config.advertising, config.advertising); + + /* Restrict the phy advertisment according to the MAC support. */ + ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); + phy->advertising = advertising; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + + netdev_dbg(pl->netdev, + "phy: setting supported %*pb advertising 0x%08x\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, + phy->advertising); + + phy_start_machine(phy); + if (phy->irq > 0) + phy_start_interrupts(phy); + + return 0; +} + +int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) +{ + int ret; + + ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); + if (ret) + return ret; + + ret = phylink_bringup_phy(pl, phy); + if (ret) + phy_detach(phy); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_connect_phy); + +int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) +{ + struct device_node *phy_node; + struct phy_device *phy_dev; + int ret; + + /* Fixed links are handled without needing a PHY */ + if (pl->link_an_mode == MLO_AN_FIXED) + return 0; + + phy_node = of_parse_phandle(dn, "phy-handle", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy-device", 0); + + if (!phy_node) { + if (pl->link_an_mode == MLO_AN_PHY) { + netdev_err(pl->netdev, "unable to find PHY node\n"); + return -ENODEV; + } + return 0; + } + + phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface); + /* We're done with the phy_node handle */ + of_node_put(phy_node); + + if (!phy_dev) + return -ENODEV; + + ret = phylink_bringup_phy(pl, phy_dev); + if (ret) + phy_detach(phy_dev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_of_phy_connect); + +void phylink_disconnect_phy(struct phylink *pl) +{ + struct phy_device *phy; + + WARN_ON(!lockdep_rtnl_is_held()); + + phy = pl->phydev; + if (phy) { + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = NULL; + pl->phydev = NULL; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + flush_work(&pl->resolve); + + phy_disconnect(phy); + } +} +EXPORT_SYMBOL_GPL(phylink_disconnect_phy); + +void phylink_mac_change(struct phylink *pl, bool up) +{ + if (!up) + pl->mac_link_dropped = true; + phylink_run_resolve(pl); + netdev_dbg(pl->netdev, "mac link %s\n", up ? "up" : "down"); +} +EXPORT_SYMBOL_GPL(phylink_mac_change); + +void phylink_start(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + netdev_info(pl->netdev, "configuring for %s/%s link mode\n", + phylink_an_mode_str(pl->link_an_mode), + phy_modes(pl->link_config.interface)); + + /* Apply the link configuration to the MAC when starting. This allows + * a fixed-link to start with the correct parameters, and also + * ensures that we set the appropriate advertisment for Serdes links. + */ + phylink_resolve_flow(pl, &pl->link_config); + phylink_mac_config(pl, &pl->link_config); + + clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + phylink_run_resolve(pl); + + if (pl->sfp_bus) + sfp_upstream_start(pl->sfp_bus); + if (pl->phydev) + phy_start(pl->phydev); +} +EXPORT_SYMBOL_GPL(phylink_start); + +void phylink_stop(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + phy_stop(pl->phydev); + if (pl->sfp_bus) + sfp_upstream_stop(pl->sfp_bus); + + set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + flush_work(&pl->resolve); +} +EXPORT_SYMBOL_GPL(phylink_stop); + +void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + wol->supported = 0; + wol->wolopts = 0; + + if (pl->phydev) + phy_ethtool_get_wol(pl->phydev, wol); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); + +int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_wol(pl->phydev, wol); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_wol); + +static void phylink_merge_link_mode(unsigned long *dst, const unsigned long *b) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask); + + linkmode_zero(mask); + phylink_set_port_modes(mask); + + linkmode_and(dst, dst, mask); + linkmode_or(dst, dst, b); +} + +static void phylink_get_ksettings(const struct phylink_link_state *state, + struct ethtool_link_ksettings *kset) +{ + phylink_merge_link_mode(kset->link_modes.advertising, state->advertising); + linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising); + kset->base.speed = state->speed; + kset->base.duplex = state->duplex; + kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE : + AUTONEG_DISABLE; +} + +int phylink_ethtool_ksettings_get(struct phylink *pl, + struct ethtool_link_ksettings *kset) +{ + struct phylink_link_state link_state; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) { + phy_ethtool_ksettings_get(pl->phydev, kset); + } else { + kset->base.port = pl->link_port; + } + + linkmode_copy(kset->link_modes.supported, pl->supported); + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + /* We are using fixed settings. Report these as the + * current link settings - and note that these also + * represent the supported speeds/duplex/pause modes. + */ + phylink_get_fixed_state(pl, &link_state); + phylink_get_ksettings(&link_state, kset); + break; + + case MLO_AN_SGMII: + /* If there is a phy attached, then use the reported + * settings from the phy with no modification. + */ + if (pl->phydev) + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + + /* The MAC is reporting the link results from its own PCS + * layer via in-band status. Report these as the current + * link settings. + */ + phylink_get_ksettings(&link_state, kset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get); + +int phylink_ethtool_ksettings_set(struct phylink *pl, + const struct ethtool_link_ksettings *kset) +{ + struct ethtool_link_ksettings our_kset; + struct phylink_link_state config; + int ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (kset->base.autoneg != AUTONEG_DISABLE && + kset->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + config = pl->link_config; + + /* Mask out unsupported advertisments */ + linkmode_and(config.advertising, kset->link_modes.advertising, + pl->supported); + + /* FIXME: should we reject autoneg if phy/mac does not support it? */ + if (kset->base.autoneg == AUTONEG_DISABLE) { + const struct phy_setting *s; + + /* Autonegotiation disabled, select a suitable speed and + * duplex. + */ + s = phy_lookup_setting(kset->base.speed, kset->base.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, false); + if (!s) + return -EINVAL; + + /* If we have a fixed link (as specified by firmware), refuse + * to change link parameters. + */ + if (pl->link_an_mode == MLO_AN_FIXED && + (s->speed != pl->link_config.speed || + s->duplex != pl->link_config.duplex)) + return -EINVAL; + + config.speed = s->speed; + config.duplex = s->duplex; + config.an_enabled = false; + + __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } else { + /* If we have a fixed link, refuse to enable autonegotiation */ + if (pl->link_an_mode == MLO_AN_FIXED) + return -EINVAL; + + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.an_enabled = true; + + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } + + if (phylink_validate(pl, pl->supported, &config)) + return -EINVAL; + + /* If autonegotiation is enabled, we must have an advertisment */ + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) + return -EINVAL; + + our_kset = *kset; + linkmode_copy(our_kset.link_modes.advertising, config.advertising); + our_kset.base.speed = config.speed; + our_kset.base.duplex = config.duplex; + + /* If we have a PHY, configure the phy */ + if (pl->phydev) { + ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset); + if (ret) + return ret; + } + + mutex_lock(&pl->state_mutex); + /* Configure the MAC to match the new settings */ + linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.speed = our_kset.base.speed; + pl->link_config.duplex = our_kset.base.duplex; + pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + phylink_mac_config(pl, &pl->link_config); + phylink_mac_an_restart(pl); + } + mutex_unlock(&pl->state_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set); + +int phylink_ethtool_nway_reset(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_restart_aneg(pl->phydev); + phylink_mac_an_restart(pl); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset); + +void phylink_ethtool_get_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN); + pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX); + pause->tx_pause = !!(pl->link_config.pause & MLO_PAUSE_TX); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam); + +int phylink_ethtool_set_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + struct phylink_link_state *config = &pl->link_config; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (!phylink_test(pl->supported, Pause) && + !phylink_test(pl->supported, Asym_Pause)) + return -EOPNOTSUPP; + + if (!phylink_test(pl->supported, Asym_Pause) && + !pause->autoneg && pause->rx_pause != pause->tx_pause) + return -EINVAL; + + config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK); + + if (pause->autoneg) + config->pause |= MLO_PAUSE_AN; + if (pause->rx_pause) + config->pause |= MLO_PAUSE_RX; + if (pause->tx_pause) + config->pause |= MLO_PAUSE_TX; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + /* Silently mark the carrier down, and then trigger a resolve */ + netif_carrier_off(pl->netdev); + phylink_run_resolve(pl); + break; + + case MLO_AN_FIXED: + /* Should we allow fixed links to change against the config? */ + phylink_resolve_flow(pl, config); + phylink_mac_config(pl, config); + break; + + case MLO_AN_SGMII: + case MLO_AN_8023Z: + phylink_mac_config(pl, config); + phylink_mac_an_restart(pl); + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); + +int phylink_ethtool_get_module_info(struct phylink *pl, + struct ethtool_modinfo *modinfo) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_info(pl->sfp_bus, modinfo); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_info); + +int phylink_ethtool_get_module_eeprom(struct phylink *pl, + struct ethtool_eeprom *ee, u8 *buf) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_eeprom(pl->sfp_bus, ee, buf); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom); + +int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) +{ + int ret = -EPROTONOSUPPORT; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_init_eee(pl->phydev, clk_stop_enable); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_init_eee); + +int phylink_get_eee_err(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_get_eee_err(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_get_eee_err); + +int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_get_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee); + +int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee); + +/* This emulates MII registers for a fixed-mode phy operating as per the + * passed in state. "aneg" defines if we report negotiation is possible. + * + * FIXME: should deal with negotiation state too. + */ +static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg, + struct phylink_link_state *state, bool aneg) +{ + struct fixed_phy_status fs; + int val; + + fs.link = state->link; + fs.speed = state->speed; + fs.duplex = state->duplex; + fs.pause = state->pause & MLO_PAUSE_SYM; + fs.asym_pause = state->pause & MLO_PAUSE_ASYM; + + val = swphy_read_reg(reg, &fs); + if (reg == MII_BMSR) { + if (!state->an_complete) + val &= ~BMSR_ANEGCOMPLETE; + if (!aneg) + val &= ~BMSR_ANEGCAPABLE; + } + return val; +} + +static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); +} + +static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + + return mdiobus_write(phydev->mdio.bus, prtad, devad, val); +} + +static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phylink_link_state state; + int val = 0xffff; + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + if (phy_id == 0) { + phylink_get_fixed_state(pl, &state); + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + if (phy_id == 0) { + val = phylink_get_mac_state(pl, &state); + if (val < 0) + return val; + + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + } + + return val & 0xffff; +} + +static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + break; + } + + return 0; +} + +int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = if_mii(ifr); + int ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) { + /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = pl->phydev->mdio.addr; + + case SIOCGMIIREG: + ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; + + case SIOCSMIIREG: + ret = phylink_phy_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; + + default: + ret = phy_mii_ioctl(pl->phydev, ifr, cmd); + break; + } + } else { + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = 0; + + case SIOCGMIIREG: + ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; + + case SIOCSMIIREG: + ret = phylink_mii_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_mii_ioctl); + + + +static int phylink_sfp_module_insert(void *upstream, + const struct sfp_eeprom_id *id) +{ + struct phylink *pl = upstream; + __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; + struct phylink_link_state config; + phy_interface_t iface; + int mode, ret = 0; + bool changed; + u8 port; + + sfp_parse_support(pl->sfp_bus, id, support); + port = sfp_parse_port(pl->sfp_bus, id, support); + iface = sfp_parse_interface(pl->sfp_bus, id); + + WARN_ON(!lockdep_rtnl_is_held()); + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + mode = MLO_AN_SGMII; + break; + case PHY_INTERFACE_MODE_1000BASEX: + mode = MLO_AN_8023Z; + break; + default: + return -EINVAL; + } + + memset(&config, 0, sizeof(config)); + linkmode_copy(config.advertising, support); + config.interface = iface; + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + config.an_enabled = pl->link_config.an_enabled; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + return ret; + } + + netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + + if (mode == MLO_AN_8023Z && pl->phydev) + return -EINVAL; + + changed = !bitmap_equal(pl->supported, support, + __ETHTOOL_LINK_MODE_MASK_NBITS); + if (changed) { + linkmode_copy(pl->supported, support); + linkmode_copy(pl->link_config.advertising, config.advertising); + } + + if (pl->link_an_mode != mode || + pl->link_config.interface != config.interface) { + pl->link_config.interface = config.interface; + pl->link_an_mode = mode; + + changed = true; + + netdev_info(pl->netdev, "switched to %s/%s link mode\n", + phylink_an_mode_str(mode), + phy_modes(config.interface)); + } + + pl->link_port = port; + + if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_mac_config(pl, &pl->link_config); + + return ret; +} + +static void phylink_sfp_link_down(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + flush_work(&pl->resolve); + + netif_carrier_off(pl->netdev); +} + +static void phylink_sfp_link_up(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + phylink_run_resolve(pl); +} + +static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) +{ + return phylink_connect_phy(upstream, phy); +} + +static void phylink_sfp_disconnect_phy(void *upstream) +{ + phylink_disconnect_phy(upstream); +} + +static const struct sfp_upstream_ops sfp_phylink_ops = { + .module_insert = phylink_sfp_module_insert, + .link_up = phylink_sfp_link_up, + .link_down = phylink_sfp_link_down, + .connect_phy = phylink_sfp_connect_phy, + .disconnect_phy = phylink_sfp_disconnect_phy, +}; + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c new file mode 100644 index 000000000000..c092af137056 --- /dev/null +++ b/drivers/net/phy/rockchip.c @@ -0,0 +1,233 @@ +/** + * drivers/net/phy/rockchip.c + * + * Driver for ROCKCHIP Ethernet PHYs + * + * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd + * + * David Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#define INTERNAL_EPHY_ID 0x1234d400 + +#define MII_INTERNAL_CTRL_STATUS 17 +#define SMI_ADDR_TSTCNTL 20 +#define SMI_ADDR_TSTREAD1 21 +#define SMI_ADDR_TSTREAD2 22 +#define SMI_ADDR_TSTWRITE 23 +#define MII_SPECIAL_CONTROL_STATUS 31 + +#define MII_AUTO_MDIX_EN BIT(7) +#define MII_MDIX_EN BIT(6) + +#define MII_SPEED_10 BIT(2) +#define MII_SPEED_100 BIT(3) + +#define TSTCNTL_RD (BIT(15) | BIT(10)) +#define TSTCNTL_WR (BIT(14) | BIT(10)) + +#define TSTMODE_ENABLE 0x400 +#define TSTMODE_DISABLE 0x0 + +#define WR_ADDR_A7CFG 0x18 + +static int rockchip_init_tstmode(struct phy_device *phydev) +{ + int ret; + + /* Enable access to Analog and DSP register banks */ + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_ENABLE); + if (ret) + return ret; + + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_DISABLE); + if (ret) + return ret; + + return phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_ENABLE); +} + +static int rockchip_close_tstmode(struct phy_device *phydev) +{ + /* Back to basic register bank */ + return phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_DISABLE); +} + +static int rockchip_integrated_phy_analog_init(struct phy_device *phydev) +{ + int ret; + + ret = rockchip_init_tstmode(phydev); + if (ret) + return ret; + + /* + * Adjust tx amplitude to make sginal better, + * the default value is 0x8. + */ + ret = phy_write(phydev, SMI_ADDR_TSTWRITE, 0xB); + if (ret) + return ret; + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTCNTL_WR | WR_ADDR_A7CFG); + if (ret) + return ret; + + return rockchip_close_tstmode(phydev); +} + +static int rockchip_integrated_phy_config_init(struct phy_device *phydev) +{ + int val, ret; + + /* + * The auto MIDX has linked problem on some board, + * workround to disable auto MDIX. + */ + val = phy_read(phydev, MII_INTERNAL_CTRL_STATUS); + if (val < 0) + return val; + val &= ~MII_AUTO_MDIX_EN; + ret = phy_write(phydev, MII_INTERNAL_CTRL_STATUS, val); + if (ret) + return ret; + + return rockchip_integrated_phy_analog_init(phydev); +} + +static void rockchip_link_change_notify(struct phy_device *phydev) +{ + int speed = SPEED_10; + + if (phydev->autoneg == AUTONEG_ENABLE) { + int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS); + + if (reg < 0) { + phydev_err(phydev, "phy_read err: %d.\n", reg); + return; + } + + if (reg & MII_SPEED_100) + speed = SPEED_100; + else if (reg & MII_SPEED_10) + speed = SPEED_10; + } else { + int bmcr = phy_read(phydev, MII_BMCR); + + if (bmcr < 0) { + phydev_err(phydev, "phy_read err: %d.\n", bmcr); + return; + } + + if (bmcr & BMCR_SPEED100) + speed = SPEED_100; + else + speed = SPEED_10; + } + + /* + * If mode switch happens from 10BT to 100BT, all DSP/AFE + * registers are set to default values. So any AFE/DSP + * registers have to be re-initialized in this case. + */ + if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) { + int ret = rockchip_integrated_phy_analog_init(phydev); + if (ret) + phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n", + ret); + } +} + +static int rockchip_set_polarity(struct phy_device *phydev, int polarity) +{ + int reg, err, val; + + /* get the current settings */ + reg = phy_read(phydev, MII_INTERNAL_CTRL_STATUS); + if (reg < 0) + return reg; + + reg &= ~MII_AUTO_MDIX_EN; + val = reg; + switch (polarity) { + case ETH_TP_MDI: + val &= ~MII_MDIX_EN; + break; + case ETH_TP_MDI_X: + val |= MII_MDIX_EN; + break; + case ETH_TP_MDI_AUTO: + case ETH_TP_MDI_INVALID: + default: + return 0; + } + + if (val != reg) { + /* Set the new polarity value in the register */ + err = phy_write(phydev, MII_INTERNAL_CTRL_STATUS, val); + if (err) + return err; + } + + return 0; +} + +static int rockchip_config_aneg(struct phy_device *phydev) +{ + int err; + + err = rockchip_set_polarity(phydev, phydev->mdix); + if (err < 0) + return err; + + return genphy_config_aneg(phydev); +} + +static int rockchip_phy_resume(struct phy_device *phydev) +{ + genphy_resume(phydev); + + return rockchip_integrated_phy_config_init(phydev); +} + +static struct phy_driver rockchip_phy_driver[] = { +{ + .phy_id = INTERNAL_EPHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "Rockchip integrated EPHY", + .features = PHY_BASIC_FEATURES, + .flags = 0, + .link_change_notify = rockchip_link_change_notify, + .soft_reset = genphy_soft_reset, + .config_init = rockchip_integrated_phy_config_init, + .config_aneg = rockchip_config_aneg, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = rockchip_phy_resume, +}, +}; + +module_phy_driver(rockchip_phy_driver); + +static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = { + { INTERNAL_EPHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl); + +MODULE_AUTHOR("David Wu "); +MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c new file mode 100644 index 000000000000..5cb5384697ea --- /dev/null +++ b/drivers/net/phy/sfp-bus.c @@ -0,0 +1,475 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "sfp.h" + +struct sfp_bus { + struct kref kref; + struct list_head node; + struct device_node *device_node; + + const struct sfp_socket_ops *socket_ops; + struct device *sfp_dev; + struct sfp *sfp; + + const struct sfp_upstream_ops *upstream_ops; + void *upstream; + struct net_device *netdev; + struct phy_device *phydev; + + bool registered; + bool started; +}; + + +int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + int port; + + /* port is the physical connector, set this from the connector field. */ + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + if (support) + phylink_set(support, FIBRE); + port = PORT_FIBRE; + break; + + case SFP_CONNECTOR_RJ45: + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) { + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + } + /* fallthrough */ + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + port = PORT_OTHER; + break; + default: + dev_warn(bus->sfp_dev, "SFP: unknown connector id 0x%02x\n", + id->base.connector); + port = PORT_OTHER; + break; + } + + return port; +} +EXPORT_SYMBOL_GPL(sfp_parse_port); + +phy_interface_t sfp_parse_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id) +{ + phy_interface_t iface; + + /* Setting the serdes link mode is guesswork: there's no field in + * the EEPROM which indicates what mode should be used. + * + * If the module wants 64b66b, then it must be >= 10G. + * + * If it's a gigabit-only fiber module, it probably does not have + * a PHY, so switch to 802.3z negotiation mode. Otherwise, switch + * to SGMII mode (which is required to support non-gigabit speeds). + */ + switch (id->base.encoding) { + case SFP_ENCODING_8472_64B66B: + iface = PHY_INTERFACE_MODE_10GKR; + break; + + case SFP_ENCODING_8B10B: + if (!id->base.e1000_base_t && + !id->base.e100_base_lx && + !id->base.e100_base_fx) + iface = PHY_INTERFACE_MODE_1000BASEX; + else + iface = PHY_INTERFACE_MODE_SGMII; + break; + + default: + iface = PHY_INTERFACE_MODE_NA; + dev_err(bus->sfp_dev, + "SFP module encoding does not support 8b10b nor 64b66b\n"); + break; + } + + return iface; +} +EXPORT_SYMBOL_GPL(sfp_parse_interface); + +void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + phylink_set(support, Autoneg); + phylink_set(support, Pause); + phylink_set(support, Asym_Pause); + + /* Set ethtool support from the compliance fields. */ + if (id->base.e10g_base_sr) + phylink_set(support, 10000baseSR_Full); + if (id->base.e10g_base_lr) + phylink_set(support, 10000baseLR_Full); + if (id->base.e10g_base_lrm) + phylink_set(support, 10000baseLRM_Full); + if (id->base.e10g_base_er) + phylink_set(support, 10000baseER_Full); + if (id->base.e1000_base_sx || + id->base.e1000_base_lx || + id->base.e1000_base_cx) + phylink_set(support, 1000baseX_Full); + if (id->base.e1000_base_t) { + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + } + + switch (id->base.extended_cc) { + case 0x00: /* Unspecified */ + break; + case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */ + phylink_set(support, 100000baseSR4_Full); + phylink_set(support, 25000baseSR_Full); + break; + case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */ + case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */ + phylink_set(support, 100000baseLR4_ER4_Full); + break; + case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */ + case 0x0c: /* 25Gbase-CR CA-S */ + case 0x0d: /* 25Gbase-CR CA-N */ + phylink_set(support, 100000baseCR4_Full); + phylink_set(support, 25000baseCR_Full); + break; + default: + dev_warn(bus->sfp_dev, + "Unknown/unsupported extended compliance code: 0x%02x\n", + id->base.extended_cc); + break; + } + + /* For fibre channel SFP, derive possible BaseX modes */ + if (id->base.fc_speed_100 || + id->base.fc_speed_200 || + id->base.fc_speed_400) { + if (id->base.br_nominal >= 31) + phylink_set(support, 2500baseX_Full); + if (id->base.br_nominal >= 12) + phylink_set(support, 1000baseX_Full); + } + + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) + break; + + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + default: + /* a guess at the supported link modes */ + dev_warn(bus->sfp_dev, + "Guessing link modes, please report...\n"); + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + break; + } +} +EXPORT_SYMBOL_GPL(sfp_parse_support); + + +static LIST_HEAD(sfp_buses); +static DEFINE_MUTEX(sfp_mutex); + +static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus) +{ + return bus->registered ? bus->upstream_ops : NULL; +} + +static struct sfp_bus *sfp_bus_get(struct device_node *np) +{ + struct sfp_bus *sfp, *new, *found = NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + + mutex_lock(&sfp_mutex); + + list_for_each_entry(sfp, &sfp_buses, node) { + if (sfp->device_node == np) { + kref_get(&sfp->kref); + found = sfp; + break; + } + } + + if (!found && new) { + kref_init(&new->kref); + new->device_node = np; + list_add(&new->node, &sfp_buses); + found = new; + new = NULL; + } + + mutex_unlock(&sfp_mutex); + + kfree(new); + + return found; +} + +static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex) +{ + struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref); + + list_del(&bus->node); + mutex_unlock(&sfp_mutex); + kfree(bus); +} + +static void sfp_bus_put(struct sfp_bus *bus) +{ + kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex); +} + +static int sfp_register_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + int ret; + + if (ops) { + if (ops->link_down) + ops->link_down(bus->upstream); + if (ops->connect_phy && bus->phydev) { + ret = ops->connect_phy(bus->upstream, bus->phydev); + if (ret) + return ret; + } + } + if (bus->started) + bus->socket_ops->start(bus->sfp); + bus->registered = true; + return 0; +} + +static void sfp_unregister_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + + if (bus->registered) { + if (bus->started) + bus->socket_ops->stop(bus->sfp); + if (bus->phydev && ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + } + bus->registered = false; +} + + +int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_info(bus->sfp, modinfo); +} +EXPORT_SYMBOL_GPL(sfp_get_module_info); + +int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_eeprom(bus->sfp, ee, data); +} +EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); + +void sfp_upstream_start(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->start(bus->sfp); + bus->started = true; +} +EXPORT_SYMBOL_GPL(sfp_upstream_start); + +void sfp_upstream_stop(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->stop(bus->sfp); + bus->started = false; +} +EXPORT_SYMBOL_GPL(sfp_upstream_stop); + +struct sfp_bus *sfp_register_upstream(struct device_node *np, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(np); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->upstream_ops = ops; + bus->upstream = upstream; + bus->netdev = ndev; + + if (bus->sfp) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_upstream); + +void sfp_unregister_upstream(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->upstream = NULL; + bus->netdev = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_upstream); + + +/* Socket driver entry points */ +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->connect_phy) + ret = ops->connect_phy(bus->upstream, phydev); + + if (ret == 0) + bus->phydev = phydev; + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_add_phy); + +void sfp_remove_phy(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + bus->phydev = NULL; +} +EXPORT_SYMBOL_GPL(sfp_remove_phy); + + +void sfp_link_up(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_up) + ops->link_up(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_up); + +void sfp_link_down(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_down) + ops->link_down(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_down); + +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->module_insert) + ret = ops->module_insert(bus->upstream, id); + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_module_insert); + +void sfp_module_remove(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->module_remove) + ops->module_remove(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_module_remove); + +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(dev->of_node); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->sfp_dev = dev; + bus->sfp = sfp; + bus->socket_ops = ops; + + if (bus->netdev) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_socket); + +void sfp_unregister_socket(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->sfp_dev = NULL; + bus->sfp = NULL; + bus->socket_ops = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_socket); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c new file mode 100644 index 000000000000..baee371bf767 --- /dev/null +++ b/drivers/net/phy/sfp.c @@ -0,0 +1,915 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdio-i2c.h" +#include "sfp.h" +#include "swphy.h" + +enum { + GPIO_MODDEF0, + GPIO_LOS, + GPIO_TX_FAULT, + GPIO_TX_DISABLE, + GPIO_RATE_SELECT, + GPIO_MAX, + + SFP_F_PRESENT = BIT(GPIO_MODDEF0), + SFP_F_LOS = BIT(GPIO_LOS), + SFP_F_TX_FAULT = BIT(GPIO_TX_FAULT), + SFP_F_TX_DISABLE = BIT(GPIO_TX_DISABLE), + SFP_F_RATE_SELECT = BIT(GPIO_RATE_SELECT), + + SFP_E_INSERT = 0, + SFP_E_REMOVE, + SFP_E_DEV_DOWN, + SFP_E_DEV_UP, + SFP_E_TX_FAULT, + SFP_E_TX_CLEAR, + SFP_E_LOS_HIGH, + SFP_E_LOS_LOW, + SFP_E_TIMEOUT, + + SFP_MOD_EMPTY = 0, + SFP_MOD_PROBE, + SFP_MOD_PRESENT, + SFP_MOD_ERROR, + + SFP_DEV_DOWN = 0, + SFP_DEV_UP, + + SFP_S_DOWN = 0, + SFP_S_INIT, + SFP_S_WAIT_LOS, + SFP_S_LINK_UP, + SFP_S_TX_FAULT, + SFP_S_REINIT, + SFP_S_TX_DISABLE, +}; + +static const char *gpio_of_names[] = { + "mod-def0", + "los", + "tx-fault", + "tx-disable", + "rate-select0", +}; + +static const enum gpiod_flags gpio_flags[] = { + GPIOD_IN, + GPIOD_IN, + GPIOD_IN, + GPIOD_ASIS, + GPIOD_ASIS, +}; + +#define T_INIT_JIFFIES msecs_to_jiffies(300) +#define T_RESET_US 10 +#define T_FAULT_RECOVER msecs_to_jiffies(1000) + +/* SFP module presence detection is poor: the three MOD DEF signals are + * the same length on the PCB, which means it's possible for MOD DEF 0 to + * connect before the I2C bus on MOD DEF 1/2. + * + * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to + * be deasserted) but makes no mention of the earliest time before we can + * access the I2C EEPROM. However, Avago modules require 300ms. + */ +#define T_PROBE_INIT msecs_to_jiffies(300) +#define T_PROBE_RETRY msecs_to_jiffies(100) + +/* + * SFP modules appear to always have their PHY configured for bus address + * 0x56 (which with mdio-i2c, translates to a PHY address of 22). + */ +#define SFP_PHY_ADDR 22 + +/* + * Give this long for the PHY to reset. + */ +#define T_PHY_RESET_MS 50 + +static DEFINE_MUTEX(sfp_mutex); + +struct sfp { + struct device *dev; + struct i2c_adapter *i2c; + struct mii_bus *i2c_mii; + struct sfp_bus *sfp_bus; + struct phy_device *mod_phy; + + unsigned int (*get_state)(struct sfp *); + void (*set_state)(struct sfp *, unsigned int); + int (*read)(struct sfp *, bool, u8, void *, size_t); + + struct gpio_desc *gpio[GPIO_MAX]; + + unsigned int state; + struct delayed_work poll; + struct delayed_work timeout; + struct mutex sm_mutex; + unsigned char sm_mod_state; + unsigned char sm_dev_state; + unsigned short sm_state; + unsigned int sm_retries; + + struct sfp_eeprom_id id; +}; + +static unsigned long poll_jiffies; + +static unsigned int sfp_gpio_get_state(struct sfp *sfp) +{ + unsigned int i, state, v; + + for (i = state = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + v = gpiod_get_value_cansleep(sfp->gpio[i]); + if (v) + state |= BIT(i); + } + + return state; +} + +static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) +{ + if (state & SFP_F_PRESENT) { + /* If the module is present, drive the signals */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE], + state & SFP_F_TX_DISABLE); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_output(sfp->gpio[GPIO_RATE_SELECT], + state & SFP_F_RATE_SELECT); + } else { + /* Otherwise, let them float to the pull-ups */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_input(sfp->gpio[GPIO_TX_DISABLE]); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_input(sfp->gpio[GPIO_RATE_SELECT]); + } +} + +static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr, + void *buf, size_t len) +{ + struct i2c_msg msgs[2]; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = buf; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? len : 0; +} + +static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf, + size_t len) +{ + return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len); +} + +static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) +{ + struct mii_bus *i2c_mii; + int ret; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return -EINVAL; + + sfp->i2c = i2c; + sfp->read = sfp_i2c_read; + + i2c_mii = mdio_i2c_alloc(sfp->dev, i2c); + if (IS_ERR(i2c_mii)) + return PTR_ERR(i2c_mii); + + i2c_mii->name = "SFP I2C Bus"; + i2c_mii->phy_mask = ~0; + + ret = mdiobus_register(i2c_mii); + if (ret < 0) { + mdiobus_free(i2c_mii); + return ret; + } + + sfp->i2c_mii = i2c_mii; + + return 0; +} + + +/* Interface */ +static unsigned int sfp_get_state(struct sfp *sfp) +{ + return sfp->get_state(sfp); +} + +static void sfp_set_state(struct sfp *sfp, unsigned int state) +{ + sfp->set_state(sfp, state); +} + +static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +{ + return sfp->read(sfp, a2, addr, buf, len); +} + +static unsigned int sfp_check(void *buf, size_t len) +{ + u8 *p, check; + + for (p = buf, check = 0; len; p++, len--) + check += *p; + + return check; +} + +/* Helpers */ +static void sfp_module_tx_disable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 1); + sfp->state |= SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_enable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 0); + sfp->state &= ~SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_fault_reset(struct sfp *sfp) +{ + unsigned int state = sfp->state; + + if (state & SFP_F_TX_DISABLE) + return; + + sfp_set_state(sfp, state | SFP_F_TX_DISABLE); + + udelay(T_RESET_US); + + sfp_set_state(sfp, state); +} + +/* SFP state machine */ +static void sfp_sm_set_timer(struct sfp *sfp, unsigned int timeout) +{ + if (timeout) + mod_delayed_work(system_power_efficient_wq, &sfp->timeout, + timeout); + else + cancel_delayed_work(&sfp->timeout); +} + +static void sfp_sm_next(struct sfp *sfp, unsigned int state, + unsigned int timeout) +{ + sfp->sm_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state, unsigned int timeout) +{ + sfp->sm_mod_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_phy_detach(struct sfp *sfp) +{ + phy_stop(sfp->mod_phy); + sfp_remove_phy(sfp->sfp_bus); + phy_device_remove(sfp->mod_phy); + phy_device_free(sfp->mod_phy); + sfp->mod_phy = NULL; +} + +static void sfp_sm_probe_phy(struct sfp *sfp) +{ + struct phy_device *phy; + int err; + + msleep(T_PHY_RESET_MS); + + phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + return; + } + if (!phy) { + dev_info(sfp->dev, "no PHY detected\n"); + return; + } + + err = sfp_add_phy(sfp->sfp_bus, phy); + if (err) { + phy_device_remove(phy); + phy_device_free(phy); + dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err); + return; + } + + sfp->mod_phy = phy; + phy_start(phy); +} + +static void sfp_sm_link_up(struct sfp *sfp) +{ + sfp_link_up(sfp->sfp_bus); + sfp_sm_next(sfp, SFP_S_LINK_UP, 0); +} + +static void sfp_sm_link_down(struct sfp *sfp) +{ + sfp_link_down(sfp->sfp_bus); +} + +static void sfp_sm_link_check_los(struct sfp *sfp) +{ + unsigned int los = sfp->state & SFP_F_LOS; + + /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor + * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume + * the same as SFP_OPTIONS_LOS_NORMAL set. + */ + if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) + los ^= SFP_F_LOS; + + if (los) + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + else + sfp_sm_link_up(sfp); +} + +static void sfp_sm_fault(struct sfp *sfp, bool warn) +{ + if (sfp->sm_retries && !--sfp->sm_retries) { + dev_err(sfp->dev, "module persistently indicates fault, disabling\n"); + sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0); + } else { + if (warn) + dev_err(sfp->dev, "module transmit fault indicated\n"); + + sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER); + } +} + +static void sfp_sm_mod_init(struct sfp *sfp) +{ + sfp_module_tx_enable(sfp); + + /* Wait t_init before indicating that the link is up, provided the + * current state indicates no TX_FAULT. If TX_FAULT clears before + * this time, that's fine too. + */ + sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES); + sfp->sm_retries = 5; + + /* Setting the serdes link mode is guesswork: there's no + * field in the EEPROM which indicates what mode should + * be used. + * + * If it's a gigabit-only fiber module, it probably does + * not have a PHY, so switch to 802.3z negotiation mode. + * Otherwise, switch to SGMII mode (which is required to + * support non-gigabit speeds) and probe for a PHY. + */ + if (sfp->id.base.e1000_base_t || + sfp->id.base.e100_base_lx || + sfp->id.base.e100_base_fx) + sfp_sm_probe_phy(sfp); +} + +static int sfp_sm_mod_probe(struct sfp *sfp) +{ + /* SFP module inserted - read I2C data */ + struct sfp_eeprom_id id; + char vendor[17]; + char part[17]; + char sn[17]; + char date[9]; + char rev[5]; + u8 check; + int err; + + err = sfp_read(sfp, false, 0, &id, sizeof(id)); + if (err < 0) { + dev_err(sfp->dev, "failed to read EEPROM: %d\n", err); + return -EAGAIN; + } + + if (err != sizeof(id)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", err); + return -EAGAIN; + } + + /* Validate the checksum over the base structure */ + check = sfp_check(&id.base, sizeof(id.base) - 1); + if (check != id.base.cc_base) { + dev_err(sfp->dev, + "EEPROM base structure checksum failure: 0x%02x\n", + check); + print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, + 16, 1, &id, sizeof(id.base) - 1, true); + return -EINVAL; + } + + check = sfp_check(&id.ext, sizeof(id.ext) - 1); + if (check != id.ext.cc_ext) { + dev_err(sfp->dev, + "EEPROM extended structure checksum failure: 0x%02x\n", + check); + memset(&id.ext, 0, sizeof(id.ext)); + } + + sfp->id = id; + + memcpy(vendor, sfp->id.base.vendor_name, 16); + vendor[16] = '\0'; + memcpy(part, sfp->id.base.vendor_pn, 16); + part[16] = '\0'; + memcpy(rev, sfp->id.base.vendor_rev, 4); + rev[4] = '\0'; + memcpy(sn, sfp->id.ext.vendor_sn, 16); + sn[16] = '\0'; + memcpy(date, sfp->id.ext.datecode, 8); + date[8] = '\0'; + + dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", vendor, part, rev, sn, date); + + /* We only support SFP modules, not the legacy GBIC modules. */ + if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP || + sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) { + dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n", + sfp->id.base.phys_id, sfp->id.base.phys_ext_id); + return -EINVAL; + } + + return sfp_module_insert(sfp->sfp_bus, &sfp->id); +} + +static void sfp_sm_mod_remove(struct sfp *sfp) +{ + sfp_module_remove(sfp->sfp_bus); + + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + + sfp_module_tx_disable(sfp); + + memset(&sfp->id, 0, sizeof(sfp->id)); + + dev_info(sfp->dev, "module removed\n"); +} + +static void sfp_sm_event(struct sfp *sfp, unsigned int event) +{ + mutex_lock(&sfp->sm_mutex); + + dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event); + + /* This state machine tracks the insert/remove state of + * the module, and handles probing the on-board EEPROM. + */ + switch (sfp->sm_mod_state) { + default: + if (event == SFP_E_INSERT) { + sfp_module_tx_disable(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); + } + break; + + case SFP_MOD_PROBE: + if (event == SFP_E_REMOVE) { + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } else if (event == SFP_E_TIMEOUT) { + int err = sfp_sm_mod_probe(sfp); + + if (err == 0) + sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0); + else if (err == -EAGAIN) + sfp_sm_set_timer(sfp, T_PROBE_RETRY); + else + sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0); + } + break; + + case SFP_MOD_PRESENT: + case SFP_MOD_ERROR: + if (event == SFP_E_REMOVE) { + sfp_sm_mod_remove(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } + break; + } + + /* This state machine tracks the netdev up/down state */ + switch (sfp->sm_dev_state) { + default: + if (event == SFP_E_DEV_UP) + sfp->sm_dev_state = SFP_DEV_UP; + break; + + case SFP_DEV_UP: + if (event == SFP_E_DEV_DOWN) { + /* If the module has a PHY, avoid raising TX disable + * as this resets the PHY. Otherwise, raise it to + * turn the laser off. + */ + if (!sfp->mod_phy) + sfp_module_tx_disable(sfp); + sfp->sm_dev_state = SFP_DEV_DOWN; + } + break; + } + + /* Some events are global */ + if (sfp->sm_state != SFP_S_DOWN && + (sfp->sm_mod_state != SFP_MOD_PRESENT || + sfp->sm_dev_state != SFP_DEV_UP)) { + if (sfp->sm_state == SFP_S_LINK_UP && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_link_down(sfp); + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + sfp_sm_next(sfp, SFP_S_DOWN, 0); + mutex_unlock(&sfp->sm_mutex); + return; + } + + /* The main state machine */ + switch (sfp->sm_state) { + case SFP_S_DOWN: + if (sfp->sm_mod_state == SFP_MOD_PRESENT && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_mod_init(sfp); + break; + + case SFP_S_INIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) + sfp_sm_link_check_los(sfp); + break; + + case SFP_S_WAIT_LOS: + if (event == SFP_E_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) + sfp_sm_link_up(sfp); + break; + + case SFP_S_LINK_UP: + if (event == SFP_E_TX_FAULT) { + sfp_sm_link_down(sfp); + sfp_sm_fault(sfp, true); + } else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { + sfp_sm_link_down(sfp); + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + } + break; + + case SFP_S_TX_FAULT: + if (event == SFP_E_TIMEOUT) { + sfp_module_tx_fault_reset(sfp); + sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES); + } + break; + + case SFP_S_REINIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) { + sfp_sm_fault(sfp, false); + } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) { + dev_info(sfp->dev, "module transmit fault recovered\n"); + sfp_sm_link_check_los(sfp); + } + break; + + case SFP_S_TX_DISABLE: + break; + } + + dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state); + + mutex_unlock(&sfp->sm_mutex); +} + +static void sfp_start(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_UP); +} + +static void sfp_stop(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_DOWN); +} + +static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo) +{ + /* locking... and check module is present */ + + if (sfp->id.ext.sff8472_compliance) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + return 0; +} + +static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data) +{ + unsigned int first, last, len; + int ret; + + if (ee->len == 0) + return -EINVAL; + + first = ee->offset; + last = ee->offset + ee->len; + if (first < ETH_MODULE_SFF_8079_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); + len -= first; + + ret = sfp->read(sfp, false, first, data, len); + if (ret < 0) + return ret; + + first += len; + data += len; + } + if (first >= ETH_MODULE_SFF_8079_LEN && + first < ETH_MODULE_SFF_8472_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); + len -= first; + first -= ETH_MODULE_SFF_8079_LEN; + + ret = sfp->read(sfp, true, first, data, len); + if (ret < 0) + return ret; + } + return 0; +} + +static const struct sfp_socket_ops sfp_module_ops = { + .start = sfp_start, + .stop = sfp_stop, + .module_info = sfp_module_info, + .module_eeprom = sfp_module_eeprom, +}; + +static void sfp_timeout(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, timeout.work); + + rtnl_lock(); + sfp_sm_event(sfp, SFP_E_TIMEOUT); + rtnl_unlock(); +} + +static void sfp_check_state(struct sfp *sfp) +{ + unsigned int state, i, changed; + + state = sfp_get_state(sfp); + changed = state ^ sfp->state; + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + + for (i = 0; i < GPIO_MAX; i++) + if (changed & BIT(i)) + dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i], + !!(sfp->state & BIT(i)), !!(state & BIT(i))); + + state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT); + sfp->state = state; + + rtnl_lock(); + if (changed & SFP_F_PRESENT) + sfp_sm_event(sfp, state & SFP_F_PRESENT ? + SFP_E_INSERT : SFP_E_REMOVE); + + if (changed & SFP_F_TX_FAULT) + sfp_sm_event(sfp, state & SFP_F_TX_FAULT ? + SFP_E_TX_FAULT : SFP_E_TX_CLEAR); + + if (changed & SFP_F_LOS) + sfp_sm_event(sfp, state & SFP_F_LOS ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW); + rtnl_unlock(); +} + +static irqreturn_t sfp_irq(int irq, void *data) +{ + struct sfp *sfp = data; + + sfp_check_state(sfp); + + return IRQ_HANDLED; +} + +static void sfp_poll(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, poll.work); + + sfp_check_state(sfp); + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); +} + +static struct sfp *sfp_alloc(struct device *dev) +{ + struct sfp *sfp; + + sfp = kzalloc(sizeof(*sfp), GFP_KERNEL); + if (!sfp) + return ERR_PTR(-ENOMEM); + + sfp->dev = dev; + + mutex_init(&sfp->sm_mutex); + INIT_DELAYED_WORK(&sfp->poll, sfp_poll); + INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout); + + return sfp; +} + +static void sfp_cleanup(void *data) +{ + struct sfp *sfp = data; + + cancel_delayed_work_sync(&sfp->poll); + cancel_delayed_work_sync(&sfp->timeout); + if (sfp->i2c_mii) { + mdiobus_unregister(sfp->i2c_mii); + mdiobus_free(sfp->i2c_mii); + } + if (sfp->i2c) + i2c_put_adapter(sfp->i2c); + kfree(sfp); +} + +static int sfp_probe(struct platform_device *pdev) +{ + struct sfp *sfp; + bool poll = false; + int irq, err, i; + + sfp = sfp_alloc(&pdev->dev); + if (IS_ERR(sfp)) + return PTR_ERR(sfp); + + platform_set_drvdata(pdev, sfp); + + err = devm_add_action(sfp->dev, sfp_cleanup, sfp); + if (err < 0) + return err; + + if (pdev->dev.of_node) { + struct device_node *node = pdev->dev.of_node; + struct device_node *np; + + np = of_parse_phandle(node, "i2c-bus", 0); + if (np) { + struct i2c_adapter *i2c; + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + if (!i2c) + return -EPROBE_DEFER; + + err = sfp_i2c_configure(sfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; + } + } + + for (i = 0; i < GPIO_MAX; i++) { + sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev, + gpio_of_names[i], gpio_flags[i]); + if (IS_ERR(sfp->gpio[i])) + return PTR_ERR(sfp->gpio[i]); + } + + sfp->get_state = sfp_gpio_get_state; + sfp->set_state = sfp_gpio_set_state; + } + + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + + /* Get the initial state, and always signal TX disable, + * since the network interface will not be up. + */ + sfp->state = sfp_get_state(sfp) | SFP_F_TX_DISABLE; + + if (sfp->gpio[GPIO_RATE_SELECT] && + gpiod_get_value_cansleep(sfp->gpio[GPIO_RATE_SELECT])) + sfp->state |= SFP_F_RATE_SELECT; + sfp_set_state(sfp, sfp->state); + sfp_module_tx_disable(sfp); + rtnl_lock(); + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); + rtnl_unlock(); + + for (i = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + irq = gpiod_to_irq(sfp->gpio[i]); + if (!irq) { + poll = true; + continue; + } + + err = devm_request_threaded_irq(sfp->dev, irq, NULL, sfp_irq, + IRQF_ONESHOT | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + dev_name(sfp->dev), sfp); + if (err) + poll = true; + } + + if (poll) + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + + return 0; +} + +static int sfp_remove(struct platform_device *pdev) +{ + struct sfp *sfp = platform_get_drvdata(pdev); + + sfp_unregister_socket(sfp->sfp_bus); + + return 0; +} + +static const struct of_device_id sfp_of_match[] = { + { .compatible = "sff,sfp", }, + { }, +}; +MODULE_DEVICE_TABLE(of, sfp_of_match); + +static struct platform_driver sfp_driver = { + .probe = sfp_probe, + .remove = sfp_remove, + .driver = { + .name = "sfp", + .of_match_table = sfp_of_match, + }, +}; + +static int sfp_init(void) +{ + poll_jiffies = msecs_to_jiffies(100); + + return platform_driver_register(&sfp_driver); +} +module_init(sfp_init); + +static void sfp_exit(void) +{ + platform_driver_unregister(&sfp_driver); +} +module_exit(sfp_exit); + +MODULE_ALIAS("platform:sfp"); +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h new file mode 100644 index 000000000000..31b0acf337e2 --- /dev/null +++ b/drivers/net/phy/sfp.h @@ -0,0 +1,28 @@ +#ifndef SFP_H +#define SFP_H + +#include +#include + +struct sfp; + +struct sfp_socket_ops { + void (*start)(struct sfp *sfp); + void (*stop)(struct sfp *sfp); + int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data); +}; + +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev); +void sfp_remove_phy(struct sfp_bus *bus); +void sfp_link_up(struct sfp_bus *bus); +void sfp_link_down(struct sfp_bus *bus); +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +void sfp_module_remove(struct sfp_bus *bus); +int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops); +void sfp_unregister_socket(struct sfp_bus *bus); + +#endif diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index d15dd3938ba8..2e5150b0b8d5 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) priv->phy_drv->read_status(phydev); val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); - val &= XILINX_GMII2RGMII_SPEED_MASK; + val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) val |= BMCR_SPEED1000; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a404552555d4..e365866600ba 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -120,7 +120,7 @@ struct ppp { int n_channels; /* how many channels are attached 54 */ spinlock_t rlock; /* lock for receive side 58 */ spinlock_t wlock; /* lock for transmit side 5c */ - int *xmit_recursion __percpu; /* xmit recursion detect */ + int __percpu *xmit_recursion; /* xmit recursion detect */ int mru; /* max receive unit 60 */ unsigned int flags; /* control bits 64 */ unsigned int xstate; /* transmit state bits 68 */ @@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) static int ppp_dev_init(struct net_device *dev) { + struct ppp *ppp; + netdev_lockdep_set_classes(dev); + + ppp = netdev_priv(dev); + /* Let the netdevice take a reference on the ppp file. This ensures + * that ppp_destroy_interface() won't run before the device gets + * unregistered. + */ + atomic_inc(&ppp->file.refcnt); + return 0; } @@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev) wake_up_interruptible(&ppp->file.rwait); } +static void ppp_dev_priv_destructor(struct net_device *dev) +{ + struct ppp *ppp; + + ppp = netdev_priv(dev); + if (atomic_dec_and_test(&ppp->file.refcnt)) + ppp_destroy_interface(ppp); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, @@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev) dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->priv_destructor = ppp_dev_priv_destructor; netif_keep_dst(dev); } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 3570c7576993..21b71ae947fd 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -943,9 +943,6 @@ static int set_offload(struct tap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -956,7 +953,7 @@ static int set_offload(struct tap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1078,7 +1075,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) + TUN_F_TSO_ECN)) return -EINVAL; rtnl_lock(); @@ -1130,7 +1127,7 @@ static long tap_compat_ioctl(struct file *file, unsigned int cmd, } #endif -const struct file_operations tap_fops = { +static const struct file_operations tap_fops = { .owner = THIS_MODULE, .open = tap_open, .release = tap_release, @@ -1218,7 +1215,7 @@ int tap_queue_resize(struct tap_dev *tap) int n = tap->numqueues; int ret, i = 0; - arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); if (!arrays) return -ENOMEM; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0a2c0a42283f..5ce580f413b9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -73,6 +73,8 @@ #include #include #include +#include +#include #include @@ -105,6 +107,9 @@ do { \ } while (0) #endif +#define TUN_HEADROOM 256 +#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, @@ -199,7 +204,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6|NETIF_F_UFO) + NETIF_F_TSO6) int align; int vnet_hdr_sz; @@ -221,6 +226,7 @@ struct tun_struct { u32 flow_count; u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; + struct bpf_prog __rcu *xdp_prog; }; #ifdef CONFIG_TUN_VNET_CROSS_LE @@ -585,6 +591,7 @@ static void tun_detach(struct tun_file *tfile, bool clean) static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); + struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; @@ -617,6 +624,9 @@ static void tun_detach_all(struct net_device *dev) } BUG_ON(tun->numdisabled != 0); + if (xdp_prog) + bpf_prog_put(xdp_prog); + if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } @@ -892,7 +902,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) sk_filter(tfile->socket.sk, skb)) goto drop; - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; skb_tx_timestamp(skb); @@ -1003,6 +1013,46 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = tx_dropped; } +static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct tun_struct *tun = netdev_priv(dev); + struct bpf_prog *old_prog; + + old_prog = rtnl_dereference(tun->xdp_prog); + rcu_assign_pointer(tun->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static u32 tun_xdp_query(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + const struct bpf_prog *xdp_prog; + + xdp_prog = rtnl_dereference(tun->xdp_prog); + if (xdp_prog) + return xdp_prog->aux->id; + + return 0; +} + +static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return tun_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = tun_xdp_query(dev); + xdp->prog_attached = !!xdp->prog_id; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -1033,6 +1083,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, + .ndo_xdp = tun_xdp, }; static void tun_flow_init(struct tun_struct *tun) @@ -1190,6 +1241,138 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, } } +static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, + int len, int noblock, bool zerocopy) +{ + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) + return false; + + if (tfile->socket.sk->sk_sndbuf != INT_MAX) + return false; + + if (!noblock) + return false; + + if (zerocopy) + return false; + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) + return false; + + return true; +} + +static struct sk_buff *tun_build_skb(struct tun_struct *tun, + struct tun_file *tfile, + struct iov_iter *from, + struct virtio_net_hdr *hdr, + int len, int *skb_xdp) +{ + struct page_frag *alloc_frag = ¤t->task_frag; + struct sk_buff *skb; + struct bpf_prog *xdp_prog; + int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int delta = 0; + char *buf; + size_t copied; + bool xdp_xmit = false; + int err, pad = TUN_RX_PAD; + + rcu_read_lock(); + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) + pad += TUN_HEADROOM; + buflen += SKB_DATA_ALIGN(len + pad); + rcu_read_unlock(); + + if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + copied = copy_page_from_iter(alloc_frag->page, + alloc_frag->offset + pad, + len, from); + if (copied != len) + return ERR_PTR(-EFAULT); + + /* There's a small window that XDP may be set after the check + * of xdp_prog above, this should be rare and for simplicity + * we do XDP on skb in case the headroom is not enough. + */ + if (hdr->gso_type || !xdp_prog) + *skb_xdp = 1; + else + *skb_xdp = 0; + + rcu_read_lock(); + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog && !*skb_xdp) { + struct xdp_buff xdp; + void *orig_data; + u32 act; + + xdp.data_hard_start = buf; + xdp.data = buf + pad; + xdp.data_end = xdp.data + len; + orig_data = xdp.data; + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_REDIRECT: + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + if (err) + goto err_redirect; + return NULL; + case XDP_TX: + xdp_xmit = true; + /* fall through */ + case XDP_PASS: + delta = orig_data - xdp.data; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + goto err_xdp; + } + } + + skb = build_skb(buf, buflen); + if (!skb) { + rcu_read_unlock(); + return ERR_PTR(-ENOMEM); + } + + skb_reserve(skb, pad - delta); + skb_put(skb, len + delta); + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + if (xdp_xmit) { + skb->dev = tun->dev; + generic_xdp_tx(skb, xdp_prog); + rcu_read_lock(); + return NULL; + } + + rcu_read_unlock(); + + return skb; + +err_redirect: + put_page(alloc_frag->page); +err_xdp: + rcu_read_unlock(); + this_cpu_inc(tun->pcpu_stats->rx_dropped); + return NULL; +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, @@ -1206,6 +1389,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, bool zerocopy = false; int err; u32 rxhash; + int skb_xdp = 1; if (!(tun->dev->flags & IFF_UP)) return -EIO; @@ -1263,30 +1447,44 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, zerocopy = true; } - if (!zerocopy) { - copylen = len; - if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) - linear = good_linear; - else - linear = tun16_to_cpu(tun, gso.hdr_len); - } - - skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); - if (IS_ERR(skb)) { - if (PTR_ERR(skb) != -EAGAIN) + if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { + /* For the packet that is not easy to be processed + * (e.g gso or jumbo packet), we will do it at after + * skb was created with generic XDP routine. + */ + skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); + if (IS_ERR(skb)) { this_cpu_inc(tun->pcpu_stats->rx_dropped); - return PTR_ERR(skb); - } + return PTR_ERR(skb); + } + if (!skb) + return total_len; + } else { + if (!zerocopy) { + copylen = len; + if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) + linear = good_linear; + else + linear = tun16_to_cpu(tun, gso.hdr_len); + } - if (zerocopy) - err = zerocopy_sg_from_iter(skb, from); - else - err = skb_copy_datagram_from_iter(skb, 0, from, len); + skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); + if (IS_ERR(skb)) { + if (PTR_ERR(skb) != -EAGAIN) + this_cpu_inc(tun->pcpu_stats->rx_dropped); + return PTR_ERR(skb); + } - if (err) { - this_cpu_inc(tun->pcpu_stats->rx_dropped); - kfree_skb(skb); - return -EFAULT; + if (zerocopy) + err = zerocopy_sg_from_iter(skb, from); + else + err = skb_copy_datagram_from_iter(skb, 0, from, len); + + if (err) { + this_cpu_inc(tun->pcpu_stats->rx_dropped); + kfree_skb(skb); + return -EFAULT; + } } if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { @@ -1298,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { - switch (skb->data[0] & 0xf0) { - case 0x40: + u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; + + switch (ip_version) { + case 4: pi.proto = htons(ETH_P_IP); break; - case 0x60: + case 6: pi.proto = htons(ETH_P_IPV6); break; default: @@ -1334,6 +1534,22 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); + if (skb_xdp) { + struct bpf_prog *xdp_prog; + int ret; + + rcu_read_lock(); + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + ret = do_xdp_generic(xdp_prog, skb); + if (ret != XDP_PASS) { + rcu_read_unlock(); + return total_len; + } + } + rcu_read_unlock(); + } + rxhash = __skb_get_hash_symmetric(skb); #ifndef CONFIG_4KSTACKS tun_rx_batched(tun, tfile, skb, more); @@ -1924,11 +2140,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } - - if (arg & TUN_F_UFO) { - features |= NETIF_F_UFO; - arg &= ~TUN_F_UFO; - } } /* This gives the user a way to test for new features in future by @@ -2540,7 +2751,7 @@ static int tun_queue_resize(struct tun_struct *tun) int n = tun->numqueues + tun->numdisabled; int ret, i; - arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); if (!arrays) return -ENOMEM; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index fce92f0e5abd..dbc90313f472 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -961,7 +961,7 @@ static void catc_disconnect(struct usb_interface *intf) * Module functions and tables. */ -static struct usb_device_id catc_id_table [] = { +static const struct usb_device_id catc_id_table[] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 2952cb570996..288ecd999171 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -304,7 +304,7 @@ static void usbpn_setup(struct net_device *dev) /* * USB driver callbacks */ -static struct usb_device_id usbpn_ids[] = { +static const struct usb_device_id usbpn_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8ab281b478f2..52ea80bcd639 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc) desc->bInterfaceProtocol == 3); } +static int is_novatel_rndis(struct usb_interface_descriptor *desc) +{ + return (desc->bInterfaceClass == USB_CLASS_MISC && + desc->bInterfaceSubClass == 4 && + desc->bInterfaceProtocol == 1); +} + #else #define is_rndis(desc) 0 #define is_activesync(desc) 0 #define is_wireless_rndis(desc) 0 +#define is_novatel_rndis(desc) 0 #endif @@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) */ rndis = (is_rndis(&intf->cur_altsetting->desc) || is_activesync(&intf->cur_altsetting->desc) || - is_wireless_rndis(&intf->cur_altsetting->desc)); + is_wireless_rndis(&intf->cur_altsetting->desc) || + is_novatel_rndis(&intf->cur_altsetting->desc)); memset(info, 0, sizeof(*info)); info->control = intf; @@ -547,9 +556,11 @@ static const struct driver_info wwan_info = { #define REALTEK_VENDOR_ID 0x0bda #define SAMSUNG_VENDOR_ID 0x04e8 #define LENOVO_VENDOR_ID 0x17ef +#define LINKSYS_VENDOR_ID 0x13b1 #define NVIDIA_VENDOR_ID 0x0955 #define HP_VENDOR_ID 0x03f0 #define MICROSOFT_VENDOR_ID 0x045e +#define UBLOX_VENDOR_ID 0x1546 static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -737,6 +748,15 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +#if IS_ENABLED(CONFIG_USB_RTL8152) +/* Linksys USB3GIGV1 Ethernet Adapter */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, +#endif + /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, @@ -849,6 +869,18 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&zte_cdc_info, +}, { + /* U-blox TOBY-L2 */ + USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, +}, { + /* U-blox SARA-U2 */ + USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9c80e80c5493..47cab1bde065 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -367,7 +367,7 @@ static struct attribute *cdc_ncm_sysfs_attrs[] = { NULL, }; -static struct attribute_group cdc_ncm_sysfs_attr_group = { +static const struct attribute_group cdc_ncm_sysfs_attr_group = { .name = "cdc_ncm", .attrs = cdc_ncm_sysfs_attrs, }; diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 0f213ea22c75..d49c7103085e 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -87,7 +87,7 @@ #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) #define IPHETH_CARRIER_ON 0x04 -static struct usb_device_id ipheth_table[] = { +static const struct usb_device_id ipheth_table[] = { { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 92e4fd29ae44..f1605833c5cf 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -125,7 +125,7 @@ static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ -static struct usb_device_id usb_klsi_table[] = { +static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b99a7fb09f8e..0161f77641fa 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; ee->magic = LAN78XX_EEPROM_MAGIC; - return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + + usb_autopm_put_interface(dev->intf); + + return ret; } static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; - /* Allow entire eeprom update only */ - if ((ee->magic == LAN78XX_EEPROM_MAGIC) && - (ee->offset == 0) && - (ee->len == 512) && - (data[0] == EEPROM_INDICATOR)) - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; + + /* Invalid EEPROM_INDICATOR at offset zero will result in a failure + * to load data from EEPROM + */ + if (ee->magic == LAN78XX_EEPROM_MAGIC) + ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); else if ((ee->magic == LAN78XX_OTP_MAGIC) && (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) - return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); + ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); - return -EINVAL; + usb_autopm_put_interface(dev->intf); + + return ret; } static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, @@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 6cfffeff6108..941ece08ba78 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -613,6 +613,7 @@ enum rtl8152_flags { #define VENDOR_ID_MICROSOFT 0x045e #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef +#define VENDOR_ID_LINKSYS 0x13b1 #define VENDOR_ID_NVIDIA 0x0955 #define MCU_TYPE_PLA 0x0100 @@ -5303,7 +5304,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) .bInterfaceProtocol = USB_CDC_PROTO_NONE /* table of devices that work with this driver */ -static struct usb_device_id rtl8152_table[] = { +static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, @@ -5316,6 +5317,7 @@ static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, {} }; diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a151f267aebb..b807c91abe1d 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -632,6 +632,10 @@ static const struct usb_device_id products [] = { /* RNDIS for tethering */ USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, +}, { + /* Novatel Verizon USB730L */ + USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), + .driver_info = (unsigned long) &rndis_info, }, { }, // END }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index daaa88a66f40..5f565bd574da 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -112,7 +112,7 @@ #undef EEPROM_WRITE /* table of devices that work with this driver */ -static struct usb_device_id rtl8150_table[] = { +static const struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 340c13484e5c..309b88acd3d0 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -526,7 +526,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev) static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { - u32 flow, afc_cfg = 0; + u32 flow = 0, afc_cfg; int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); if (ret < 0) @@ -537,20 +537,19 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, if (cap & FLOW_CTRL_RX) flow = 0xFFFF0002; - else - flow = 0; - if (cap & FLOW_CTRL_TX) + if (cap & FLOW_CTRL_TX) { afc_cfg |= 0xF; - else + flow |= 0xFFFF0000; + } else { afc_cfg &= ~0xF; + } netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", cap & FLOW_CTRL_RX ? "enabled" : "disabled", cap & FLOW_CTRL_TX ? "enabled" : "disabled"); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); - flow = 0; afc_cfg |= 0xF; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b06169ea60dc..511f8339fa96 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,13 @@ DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" +static const unsigned long guest_offloads[] = { + VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO +}; + struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; @@ -164,10 +171,13 @@ struct virtnet_info { u8 ctrl_promisc; u8 ctrl_allmulti; u16 ctrl_vid; + u64 ctrl_offloads; /* Ethtool settings */ u8 duplex; u32 speed; + + unsigned long guest_offloads; }; struct padded_vnet_hdr { @@ -270,6 +280,23 @@ static void skb_xmit_done(struct virtqueue *vq) netif_wake_subqueue(vi->dev, vq2txq(vq)); } +#define MRG_CTX_HEADER_SHIFT 22 +static void *mergeable_len_to_ctx(unsigned int truesize, + unsigned int headroom) +{ + return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); +} + +static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; +} + +static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); +} + /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -292,7 +319,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, hdr_len = vi->hdr_len; if (vi->mergeable_rx_bufs) - hdr_padded_len = sizeof *hdr; + hdr_padded_len = sizeof(*hdr); else hdr_padded_len = sizeof(struct padded_vnet_hdr); @@ -390,120 +417,28 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; } -static struct sk_buff *receive_small(struct net_device *dev, - struct virtnet_info *vi, - struct receive_queue *rq, - void *buf, unsigned int len) -{ - struct sk_buff *skb; - struct bpf_prog *xdp_prog; - unsigned int xdp_headroom = virtnet_get_headroom(vi); - unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; - unsigned int headroom = vi->hdr_len + header_offset; - unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int delta = 0; - len -= vi->hdr_len; - - rcu_read_lock(); - xdp_prog = rcu_dereference(rq->xdp_prog); - if (xdp_prog) { - struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; - struct xdp_buff xdp; - void *orig_data; - u32 act; - - if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) - goto err_xdp; - - xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; - xdp.data = xdp.data_hard_start + xdp_headroom; - xdp.data_end = xdp.data + len; - orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); - - switch (act) { - case XDP_PASS: - /* Recalculate length in case bpf program changed it */ - delta = orig_data - xdp.data; - break; - case XDP_TX: - if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) - trace_xdp_exception(vi->dev, xdp_prog, act); - rcu_read_unlock(); - goto xdp_xmit; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - trace_xdp_exception(vi->dev, xdp_prog, act); - case XDP_DROP: - goto err_xdp; - } - } - rcu_read_unlock(); - - skb = build_skb(buf, buflen); - if (!skb) { - put_page(virt_to_head_page(buf)); - goto err; - } - skb_reserve(skb, headroom - delta); - skb_put(skb, len + delta); - if (!delta) { - buf += header_offset; - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); - } /* keep zeroed vnet hdr since packet was changed by bpf */ - -err: - return skb; - -err_xdp: - rcu_read_unlock(); - dev->stats.rx_dropped++; - put_page(virt_to_head_page(buf)); -xdp_xmit: - return NULL; -} - -static struct sk_buff *receive_big(struct net_device *dev, - struct virtnet_info *vi, - struct receive_queue *rq, - void *buf, - unsigned int len) -{ - struct page *page = buf; - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); - - if (unlikely(!skb)) - goto err; - - return skb; - -err: - dev->stats.rx_dropped++; - give_pages(rq, page); - return NULL; -} - -/* The conditions to enable XDP should preclude the underlying device from - * sending packets across multiple buffers (num_buf > 1). However per spec - * it does not appear to be illegal to do so but rather just against convention. - * So in order to avoid making a system unresponsive the packets are pushed - * into a page and the XDP program is run. This will be extremely slow and we - * push a warning to the user to fix this as soon as possible. Fixing this may - * require resolving the underlying hardware to determine why multiple buffers - * are being received or simply loading the XDP program in the ingress stack - * after the skb is built because there is no advantage to running it here - * anymore. +/* We copy the packet for XDP in the following cases: + * + * 1) Packet is scattered across multiple rx buffers. + * 2) Headroom space is insufficient. + * + * This is inefficient but it's a temporary condition that + * we hit right after XDP is enabled and until queue is refilled + * with large buffers with sufficient headroom - so it should affect + * at most queue size packets. + * Afterwards, the conditions to enable + * XDP should preclude the underlying device from sending packets + * across multiple buffers (num_buf > 1), and we make sure buffers + * have enough headroom. */ static struct page *xdp_linearize_page(struct receive_queue *rq, u16 *num_buf, struct page *p, int offset, + int page_off, unsigned int *len) { struct page *page = alloc_page(GFP_ATOMIC); - unsigned int page_off = VIRTIO_XDP_HEADROOM; if (!page) return NULL; @@ -545,6 +480,125 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, return NULL; } +static struct sk_buff *receive_small(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + void *buf, void *ctx, + unsigned int len) +{ + struct sk_buff *skb; + struct bpf_prog *xdp_prog; + unsigned int xdp_headroom = (unsigned long)ctx; + unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; + unsigned int headroom = vi->hdr_len + header_offset; + unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + struct page *page = virt_to_head_page(buf); + unsigned int delta = 0; + struct page *xdp_page; + len -= vi->hdr_len; + + rcu_read_lock(); + xdp_prog = rcu_dereference(rq->xdp_prog); + if (xdp_prog) { + struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; + struct xdp_buff xdp; + void *orig_data; + u32 act; + + if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) + goto err_xdp; + + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { + int offset = buf - page_address(page) + header_offset; + unsigned int tlen = len + vi->hdr_len; + u16 num_buf = 1; + + xdp_headroom = virtnet_get_headroom(vi); + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp_page = xdp_linearize_page(rq, &num_buf, page, + offset, header_offset, + &tlen); + if (!xdp_page) + goto err_xdp; + + buf = page_address(xdp_page); + put_page(page); + page = xdp_page; + } + + xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; + xdp.data = xdp.data_hard_start + xdp_headroom; + xdp.data_end = xdp.data + len; + orig_data = xdp.data; + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + /* Recalculate length in case bpf program changed it */ + delta = orig_data - xdp.data; + break; + case XDP_TX: + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) + trace_xdp_exception(vi->dev, xdp_prog, act); + rcu_read_unlock(); + goto xdp_xmit; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); + case XDP_DROP: + goto err_xdp; + } + } + rcu_read_unlock(); + + skb = build_skb(buf, buflen); + if (!skb) { + put_page(page); + goto err; + } + skb_reserve(skb, headroom - delta); + skb_put(skb, len + delta); + if (!delta) { + buf += header_offset; + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + } /* keep zeroed vnet hdr since packet was changed by bpf */ + +err: + return skb; + +err_xdp: + rcu_read_unlock(); + dev->stats.rx_dropped++; + put_page(page); +xdp_xmit: + return NULL; +} + +static struct sk_buff *receive_big(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + void *buf, + unsigned int len) +{ + struct page *page = buf; + struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + + if (unlikely(!skb)) + goto err; + + return skb; + +err: + dev->stats.rx_dropped++; + give_pages(rq, page); + return NULL; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -559,6 +613,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct sk_buff *head_skb, *curr_skb; struct bpf_prog *xdp_prog; unsigned int truesize; + unsigned int headroom = mergeable_ctx_to_headroom(ctx); head_skb = NULL; @@ -571,10 +626,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, u32 act; /* This happens when rx buffer size is underestimated */ - if (unlikely(num_buf > 1)) { + if (unlikely(num_buf > 1 || + headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, &num_buf, - page, offset, &len); + page, offset, + VIRTIO_XDP_HEADROOM, + &len); if (!xdp_page) goto err_xdp; offset = VIRTIO_XDP_HEADROOM; @@ -639,13 +697,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); - if (unlikely(len > (unsigned long)ctx)) { + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; + head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; @@ -665,13 +724,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } page = virt_to_head_page(buf); - if (unlikely(len > (unsigned long)ctx)) { + + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { @@ -754,7 +814,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else - skb = receive_small(dev, vi, rq, buf, len); + skb = receive_small(dev, vi, rq, buf, ctx, len); if (unlikely(!skb)) return 0; @@ -787,12 +847,18 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, return 0; } +/* Unlike mergeable buffers, all buffers are allocated to the + * same size, except for the headroom. For this reason we do + * not need to use mergeable_len_to_ctx here - it is enough + * to store the headroom as the context ignoring the truesize. + */ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); + void *ctx = (void *)(unsigned long)xdp_headroom; int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; @@ -806,10 +872,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, alloc_frag->offset += len; sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, vi->hdr_len + GOOD_PACKET_LEN); - err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); - return err; } @@ -902,7 +967,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, } sg_init_one(rq->sg, buf, len); - ctx = (void *)(unsigned long)len; + ctx = mergeable_len_to_ctx(len, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1014,7 +1079,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget) void *buf; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; while (received < budget && @@ -1813,7 +1878,6 @@ static void virtnet_freeze_down(struct virtio_device *vdev) } static int init_vqs(struct virtnet_info *vi); -static void _remove_vq_common(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { @@ -1842,37 +1906,45 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } -static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) +static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { - struct virtio_device *dev = vi->vdev; - int ret; + struct scatterlist sg; + vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); - virtio_config_disable(dev); - dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; - virtnet_freeze_down(dev); - _remove_vq_common(vi); + sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); - virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { + dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); + return -EINVAL; + } - ret = virtio_finalize_features(dev); - if (ret) - goto err; - - vi->xdp_queue_pairs = xdp_qp; - ret = virtnet_restore_up(dev); - if (ret) - goto err; - ret = _virtnet_set_queues(vi, curr_qp); - if (ret) - goto err; - - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - virtio_config_enable(dev); return 0; -err: - virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); - return ret; +} + +static int virtnet_clear_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = 0; + + if (!vi->guest_offloads) + return 0; + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); +} + +static int virtnet_restore_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = vi->guest_offloads; + + if (!vi->guest_offloads) + return 0; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); } static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, @@ -1884,10 +1956,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, u16 xdp_qp = 0, curr_qp; int i, err; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) + && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); return -EOPNOTSUPP; } @@ -1921,35 +1994,35 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return PTR_ERR(prog); } - /* Changing the headroom in buffers is a disruptive operation because - * existing buffers must be flushed and reallocated. This will happen - * when a xdp program is initially added or xdp is disabled by removing - * the xdp program resulting in number of XDP queues changing. - */ - if (vi->xdp_queue_pairs != xdp_qp) { - err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp); - if (err) { - dev_warn(&dev->dev, "XDP reset failure.\n"); - goto virtio_reset_err; - } - } + /* Make sure NAPI is not using any XDP TX queues for RX. */ + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); + err = _virtnet_set_queues(vi, curr_qp + xdp_qp); + if (err) + goto err; + vi->xdp_queue_pairs = xdp_qp; for (i = 0; i < vi->max_queue_pairs; i++) { old_prog = rtnl_dereference(vi->rq[i].xdp_prog); rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) { + if (!old_prog) + virtnet_clear_guest_offloads(vi); + if (!prog) + virtnet_restore_guest_offloads(vi); + } if (old_prog) bpf_prog_put(old_prog); + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; -virtio_reset_err: - /* On reset error do our best to unwind XDP changes inflight and return - * error up to user space for resolution. The underlying reset hung on - * us so not much we can do here. - */ +err: + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2182,7 +2255,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); if (!ctx) goto err_ctx; @@ -2303,7 +2376,7 @@ static int init_vqs(struct virtnet_info *vi) #ifdef CONFIG_SYSFS static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attribute, char *buf) + char *buf) { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); @@ -2428,7 +2501,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -2438,13 +2511,11 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; - if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) - dev->hw_features |= NETIF_F_UFO; dev->features |= NETIF_F_GSO_ROBUST; if (gso) - dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -2577,6 +2648,10 @@ static int virtnet_probe(struct virtio_device *vdev) netif_carrier_on(dev); } + for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) + if (virtio_has_feature(vi->vdev, guest_offloads[i])) + set_bit(guest_offloads[i], &vi->guest_offloads); + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); @@ -2597,15 +2672,6 @@ static int virtnet_probe(struct virtio_device *vdev) return err; } -static void _remove_vq_common(struct virtnet_info *vi) -{ - vi->vdev->config->reset(vi->vdev); - free_unused_bufs(vi); - _free_receive_bufs(vi); - free_receive_page_frags(vi); - virtnet_del_vqs(vi); -} - static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); @@ -2637,8 +2703,7 @@ static void virtnet_remove(struct virtio_device *vdev) free_netdev(vi->dev); } -#ifdef CONFIG_PM_SLEEP -static int virtnet_freeze(struct virtio_device *vdev) +static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; @@ -2649,7 +2714,7 @@ static int virtnet_freeze(struct virtio_device *vdev) return 0; } -static int virtnet_restore(struct virtio_device *vdev) +static __maybe_unused int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; @@ -2665,7 +2730,6 @@ static int virtnet_restore(struct virtio_device *vdev) return 0; } -#endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, @@ -2682,7 +2746,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ - VIRTIO_NET_F_MTU + VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8a1eaf3c302a..9b243e6f3008 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -47,9 +47,7 @@ static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; - struct rtable __rcu *rth_local; struct rt6_info __rcu *rt6; - struct rt6_info __rcu *rt6_local; u32 tb_id; }; @@ -194,42 +192,10 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* if dst.dev is loopback or the VRF device again this is locally * originated traffic destined to a local address. Short circuit - * to Rx path using our local dst + * to Rx path */ - if (dst->dev == net->loopback_dev || dst->dev == dev) { - struct net_vrf *vrf = netdev_priv(dev); - struct rt6_info *rt6_local; - - /* release looked up dst and use cached local dst */ - dst_release(dst); - - rcu_read_lock(); - - rt6_local = rcu_dereference(vrf->rt6_local); - if (unlikely(!rt6_local)) { - rcu_read_unlock(); - goto err; - } - - /* Ordering issue: cached local dst is created on newlink - * before the IPv6 initialization. Using the local dst - * requires rt6i_idev to be set so make sure it is. - */ - if (unlikely(!rt6_local->rt6i_idev)) { - rt6_local->rt6i_idev = in6_dev_get(dev); - if (!rt6_local->rt6i_idev) { - rcu_read_unlock(); - goto err; - } - } - - dst = &rt6_local->dst; - dst_hold(dst); - - rcu_read_unlock(); - - return vrf_local_xmit(skb, dev, &rt6_local->dst); - } + if (dst->dev == dev) + return vrf_local_xmit(skb, dev, dst); skb_dst_set(skb, dst); @@ -296,30 +262,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, /* if dst.dev is loopback or the VRF device again this is locally * originated traffic destined to a local address. Short circuit - * to Rx path using our local dst + * to Rx path */ - if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { - struct net_vrf *vrf = netdev_priv(vrf_dev); - struct rtable *rth_local; - struct dst_entry *dst = NULL; - - ip_rt_put(rt); - - rcu_read_lock(); - - rth_local = rcu_dereference(vrf->rth_local); - if (likely(rth_local)) { - dst = &rth_local->dst; - dst_hold(dst); - } - - rcu_read_unlock(); - - if (unlikely(!dst)) - goto err; - - return vrf_local_xmit(skb, vrf_dev, dst); - } + if (rt->dst.dev == vrf_dev) + return vrf_local_xmit(skb, vrf_dev, &rt->dst); skb_dst_set(skb, &rt->dst); @@ -528,12 +474,10 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); - struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rt6, NULL); - RCU_INIT_POINTER(vrf->rt6_local, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted @@ -546,19 +490,6 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) dev_hold(dst->dev); dst_release(dst); } - - if (rt6_local) { - if (rt6_local->rt6i_idev) { - in6_dev_put(rt6_local->rt6i_idev); - rt6_local->rt6i_idev = NULL; - } - - dst = &rt6_local->dst; - dev_put(dst->dev); - dst->dev = net->loopback_dev; - dev_hold(dst->dev); - dst_release(dst); - } } static int vrf_rt6_create(struct net_device *dev) @@ -567,7 +498,7 @@ static int vrf_rt6_create(struct net_device *dev) struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); struct fib6_table *rt6i_table; - struct rt6_info *rt6, *rt6_local; + struct rt6_info *rt6; int rc = -ENOMEM; /* IPv6 can be CONFIG enabled and then disabled runtime */ @@ -586,22 +517,7 @@ static int vrf_rt6_create(struct net_device *dev) rt6->rt6i_table = rt6i_table; rt6->dst.output = vrf_output6; - /* create a dst for local routing - packets sent locally - * to local address via the VRF device as a loopback - */ - rt6_local = ip6_dst_alloc(net, dev, flags); - if (!rt6_local) { - dst_release(&rt6->dst); - goto out; - } - - rt6_local->rt6i_idev = in6_dev_get(dev); - rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL; - rt6_local->rt6i_table = rt6i_table; - rt6_local->dst.input = ip6_input; - rcu_assign_pointer(vrf->rt6, rt6); - rcu_assign_pointer(vrf->rt6_local, rt6_local); rc = 0; out: @@ -788,12 +704,10 @@ static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) { struct rtable *rth = rtnl_dereference(vrf->rth); - struct rtable *rth_local = rtnl_dereference(vrf->rth_local); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rth, NULL); - RCU_INIT_POINTER(vrf->rth_local, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted @@ -806,20 +720,12 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) dev_hold(dst->dev); dst_release(dst); } - - if (rth_local) { - dst = &rth_local->dst; - dev_put(dst->dev); - dst->dev = net->loopback_dev; - dev_hold(dst->dev); - dst_release(dst); - } } static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct rtable *rth, *rth_local; + struct rtable *rth; if (!fib_new_table(dev_net(dev), vrf->tb_id)) return -ENOMEM; @@ -829,22 +735,10 @@ static int vrf_rtable_create(struct net_device *dev) if (!rth) return -ENOMEM; - /* create a dst for local ingress routing - packets sent locally - * to local address via the VRF device as a loopback - */ - rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0); - if (!rth_local) { - dst_release(&rth->dst); - return -ENOMEM; - } - rth->dst.output = vrf_output; rth->rt_table_id = vrf->tb_id; - rth_local->rt_table_id = vrf->tb_id; - rcu_assign_pointer(vrf->rth, rth); - rcu_assign_pointer(vrf->rth_local, rth_local); return 0; } @@ -1063,12 +957,12 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, { const struct ipv6hdr *iph = ipv6_hdr(skb); struct flowi6 fl6 = { + .flowi6_iif = ifindex, + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_iif = ifindex, }; struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; @@ -1371,10 +1265,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EINVAL; - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + } + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EADDRNOTAVAIL; + } } return 0; } @@ -1399,12 +1297,17 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct net *net; int err; - if (!data || !data[IFLA_VRF_TABLE]) + if (!data || !data[IFLA_VRF_TABLE]) { + NL_SET_ERR_MSG(extack, "VRF table id is missing"); return -EINVAL; + } vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); - if (vrf->tb_id == RT_TABLE_UNSPEC) + if (vrf->tb_id == RT_TABLE_UNSPEC) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], + "Invalid VRF table id"); return -EINVAL; + } dev->priv_flags |= IFF_L3MDEV_MASTER; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e17baac70f43..d7c49cf1d5e9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #if IS_ENABLED(CONFIG_IPV6) @@ -1261,19 +1262,9 @@ static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, if (gpe->oam_flag) return false; - switch (gpe->next_protocol) { - case VXLAN_GPE_NP_IPV4: - *protocol = htons(ETH_P_IP); - break; - case VXLAN_GPE_NP_IPV6: - *protocol = htons(ETH_P_IPV6); - break; - case VXLAN_GPE_NP_ETHERNET: - *protocol = htons(ETH_P_TEB); - break; - default: + *protocol = tun_p_to_eth_p(gpe->next_protocol); + if (!*protocol) return false; - } unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; return true; @@ -1799,19 +1790,10 @@ static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags, struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; gpe->np_applied = 1; - - switch (protocol) { - case htons(ETH_P_IP): - gpe->next_protocol = VXLAN_GPE_NP_IPV4; - return 0; - case htons(ETH_P_IPV6): - gpe->next_protocol = VXLAN_GPE_NP_IPV6; - return 0; - case htons(ETH_P_TEB): - gpe->next_protocol = VXLAN_GPE_NP_ETHERNET; - return 0; - } - return -EPFNOSUPPORT; + gpe->next_protocol = tun_p_from_eth_p(protocol); + if (!gpe->next_protocol) + return -EPFNOSUPPORT; + return 0; } static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, @@ -2609,7 +2591,7 @@ static struct device_type vxlan_type = { * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void vxlan_push_rx_ports(struct net_device *dev) +static void vxlan_offload_rx_ports(struct net_device *dev, bool push) { struct vxlan_sock *vs; struct net *net = dev_net(dev); @@ -2618,11 +2600,19 @@ static void vxlan_push_rx_ports(struct net_device *dev) spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) - udp_tunnel_push_rx_port(dev, vs->sock, - (vs->flags & VXLAN_F_GPE) ? - UDP_TUNNEL_TYPE_VXLAN_GPE : - UDP_TUNNEL_TYPE_VXLAN); + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { + unsigned short type; + + if (vs->flags & VXLAN_F_GPE) + type = UDP_TUNNEL_TYPE_VXLAN_GPE; + else + type = UDP_TUNNEL_TYPE_VXLAN; + + if (push) + udp_tunnel_push_rx_port(dev, vs->sock, type); + else + udp_tunnel_drop_rx_port(dev, vs->sock, type); + } } spin_unlock(&vn->sock_lock); } @@ -2721,12 +2711,14 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { - pr_debug("invalid link address (not ethernet)\n"); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided link layer address is not Ethernet"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { - pr_debug("invalid all zero ethernet address\n"); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; } } @@ -2734,18 +2726,27 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], if (tb[IFLA_MTU]) { u32 mtu = nla_get_u32(tb[IFLA_MTU]); - if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) + if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], + "MTU must be between 68 and 65535"); return -EINVAL; + } } - if (!data) + if (!data) { + NL_SET_ERR_MSG(extack, + "Required attributes not provided to perform the operation"); return -EINVAL; + } if (data[IFLA_VXLAN_ID]) { u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); - if (id >= VXLAN_N_VID) + if (id >= VXLAN_N_VID) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], + "VXLAN ID must be lower than 16777216"); return -ERANGE; + } } if (data[IFLA_VXLAN_PORT_RANGE]) { @@ -2753,8 +2754,8 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], = nla_data(data[IFLA_VXLAN_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { - pr_debug("port range %u .. %u not valid\n", - ntohs(p->low), ntohs(p->high)); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], + "Invalid source port range"); return -EINVAL; } } @@ -2911,7 +2912,8 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, struct net_device **lower, - struct vxlan_dev *old) + struct vxlan_dev *old, + struct netlink_ext_ack *extack) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *tmp; @@ -2925,6 +2927,8 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, */ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || !(conf->flags & VXLAN_F_COLLECT_METADATA)) { + NL_SET_ERR_MSG(extack, + "VXLAN GPE does not support this combination of attributes"); return -EINVAL; } } @@ -2939,15 +2943,23 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family; } - if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) + if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) { + NL_SET_ERR_MSG(extack, + "Local and remote address must be from the same family"); return -EINVAL; + } - if (vxlan_addr_multicast(&conf->saddr)) + if (vxlan_addr_multicast(&conf->saddr)) { + NL_SET_ERR_MSG(extack, "Local address cannot be multicast"); return -EINVAL; + } if (conf->saddr.sa.sa_family == AF_INET6) { - if (!IS_ENABLED(CONFIG_IPV6)) + if (!IS_ENABLED(CONFIG_IPV6)) { + NL_SET_ERR_MSG(extack, + "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; + } use_ipv6 = true; conf->flags |= VXLAN_F_IPV6; @@ -2959,46 +2971,68 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, if (local_type & IPV6_ADDR_LINKLOCAL) { if (!(remote_type & IPV6_ADDR_LINKLOCAL) && - (remote_type != IPV6_ADDR_ANY)) + (remote_type != IPV6_ADDR_ANY)) { + NL_SET_ERR_MSG(extack, + "Invalid combination of local and remote address scopes"); return -EINVAL; + } conf->flags |= VXLAN_F_IPV6_LINKLOCAL; } else { if (remote_type == - (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) + (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) { + NL_SET_ERR_MSG(extack, + "Invalid combination of local and remote address scopes"); return -EINVAL; + } conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL; } } } - if (conf->label && !use_ipv6) + if (conf->label && !use_ipv6) { + NL_SET_ERR_MSG(extack, + "Label attribute only applies to IPv6 VXLAN devices"); return -EINVAL; + } if (conf->remote_ifindex) { struct net_device *lowerdev; lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); - if (!lowerdev) + if (!lowerdev) { + NL_SET_ERR_MSG(extack, + "Invalid local interface, device not found"); return -ENODEV; + } #if IS_ENABLED(CONFIG_IPV6) if (use_ipv6) { struct inet6_dev *idev = __in6_dev_get(lowerdev); - if (idev && idev->cnf.disable_ipv6) + if (idev && idev->cnf.disable_ipv6) { + NL_SET_ERR_MSG(extack, + "IPv6 support disabled by administrator"); return -EPERM; + } } #endif *lower = lowerdev; } else { - if (vxlan_addr_multicast(&conf->remote_ip)) + if (vxlan_addr_multicast(&conf->remote_ip)) { + NL_SET_ERR_MSG(extack, + "Local interface required for multicast remote destination"); + return -EINVAL; + } #if IS_ENABLED(CONFIG_IPV6) - if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) + if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) { + NL_SET_ERR_MSG(extack, + "Local interface required for link-local local/remote addresses"); return -EINVAL; + } #endif *lower = NULL; @@ -3030,6 +3064,8 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, tmp->cfg.remote_ifindex != conf->remote_ifindex) continue; + NL_SET_ERR_MSG(extack, + "A VXLAN device with the specified VNI already exists"); return -EEXIST; } @@ -3089,14 +3125,14 @@ static void vxlan_config_apply(struct net_device *dev, } static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, - struct vxlan_config *conf, - bool changelink) + struct vxlan_config *conf, bool changelink, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); struct net_device *lowerdev; int ret; - ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan); + ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); if (ret) return ret; @@ -3106,13 +3142,14 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, } static int __vxlan_dev_create(struct net *net, struct net_device *dev, - struct vxlan_config *conf) + struct vxlan_config *conf, + struct netlink_ext_ack *extack) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); int err; - err = vxlan_dev_configure(net, dev, conf, false); + err = vxlan_dev_configure(net, dev, conf, false, extack); if (err) return err; @@ -3358,7 +3395,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (err) return err; - return __vxlan_dev_create(src_net, dev, &conf); + return __vxlan_dev_create(src_net, dev, &conf, extack); } static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], @@ -3378,7 +3415,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); - err = vxlan_dev_configure(vxlan->net, dev, &conf, true); + err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack); if (err) return err; @@ -3584,7 +3621,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - err = __vxlan_dev_create(net, dev, conf); + err = __vxlan_dev_create(net, dev, conf, NULL); if (err < 0) { free_netdev(dev); return ERR_PTR(err); @@ -3631,10 +3668,15 @@ static int vxlan_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + vxlan_offload_rx_ports(dev, false); vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - vxlan_push_rx_ports(dev); + } else if (event == NETDEV_REGISTER) { + vxlan_offload_rx_ports(dev, true); + } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) { + vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } return NOTIFY_DONE; } diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 799830ffcae2..a043fb1367bd 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -483,20 +483,20 @@ static void dscc4_tx_print(struct net_device *dev, static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) { - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd = dpriv->tx_fd; struct RxFD *rx_fd = dpriv->rx_fd; struct sk_buff **skbuff; int i; - pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); + dma_free_coherent(d, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); + dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); skbuff = dpriv->tx_skbuff; for (i = 0; i < TX_RING_SIZE; i++) { if (*skbuff) { - pci_unmap_single(pdev, le32_to_cpu(tx_fd->data), - (*skbuff)->len, PCI_DMA_TODEVICE); + dma_unmap_single(d, le32_to_cpu(tx_fd->data), + (*skbuff)->len, DMA_TO_DEVICE); dev_kfree_skb(*skbuff); } skbuff++; @@ -506,8 +506,9 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) skbuff = dpriv->rx_skbuff; for (i = 0; i < RX_RING_SIZE; i++) { if (*skbuff) { - pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); + dma_unmap_single(d, le32_to_cpu(rx_fd->data), + RX_MAX(HDLC_MAX_MRU), + DMA_FROM_DEVICE); dev_kfree_skb(*skbuff); } skbuff++; @@ -519,22 +520,30 @@ static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; + struct device *d = &dpriv->pci_priv->pdev->dev; struct RxFD *rx_fd = dpriv->rx_fd + dirty; const int len = RX_MAX(HDLC_MAX_MRU); struct sk_buff *skb; - int ret = 0; + dma_addr_t addr; skb = dev_alloc_skb(len); + if (!skb) + goto err_out; + + skb->protocol = hdlc_type_trans(skb, dev); + addr = dma_map_single(d, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(d, addr)) + goto err_free_skb; + dpriv->rx_skbuff[dirty] = skb; - if (skb) { - skb->protocol = hdlc_type_trans(skb, dev); - rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, - skb->data, len, PCI_DMA_FROMDEVICE)); - } else { - rx_fd->data = 0; - ret = -1; - } - return ret; + rx_fd->data = cpu_to_le32(addr); + return 0; + +err_free_skb: + dev_kfree_skb_any(skb); +err_out: + rx_fd->data = 0; + return -1; } /* @@ -646,7 +655,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct sk_buff *skb; int pkt_len; @@ -656,8 +665,8 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, goto refill; } pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); - pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); + dma_unmap_single(d, le32_to_cpu(rx_fd->data), + RX_MAX(HDLC_MAX_MRU), DMA_FROM_DEVICE); if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; @@ -774,8 +783,8 @@ static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = -ENOMEM; - priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); + priv->iqcfg = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma, GFP_KERNEL); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); @@ -786,16 +795,18 @@ static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; - dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); + dpriv->iqtx = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma, + GFP_KERNEL); if (!dpriv->iqtx) goto err_free_iqtx_6; writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); } for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; - dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); + dpriv->iqrx = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma, + GFP_KERNEL); if (!dpriv->iqrx) goto err_free_iqrx_7; writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); @@ -819,18 +830,18 @@ static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err_free_iqrx_7: while (--i >= 0) { dpriv = priv->root + i; - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqrx, dpriv->iqrx_dma); } i = dev_per_card; err_free_iqtx_6: while (--i >= 0) { dpriv = priv->root + i; - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqtx, dpriv->iqtx_dma); } - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, - priv->iqcfg_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, + priv->iqcfg_dma); err_free_irq_5: free_irq(pdev->irq, priv->root); err_release_4: @@ -1145,16 +1156,23 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct dscc4_pci_priv *ppriv = dpriv->pci_priv; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd; + dma_addr_t addr; int next; + addr = dma_map_single(d, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(d, addr)) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + next = dpriv->tx_current%TX_RING_SIZE; dpriv->tx_skbuff[next] = skb; tx_fd = dpriv->tx_fd + next; tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); - tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE)); + tx_fd->data = cpu_to_le32(addr); tx_fd->complete = 0x00000000; tx_fd->jiffies = jiffies; mb(); @@ -1572,8 +1590,9 @@ static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, tx_fd = dpriv->tx_fd + cur; skb = dpriv->tx_skbuff[cur]; if (skb) { - pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&ppriv->pdev->dev, + le32_to_cpu(tx_fd->data), + skb->len, DMA_TO_DEVICE); if (tx_fd->state & FrameEnd) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -1887,16 +1906,22 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) skb = dev_alloc_skb(DUMMY_SKB_SIZE); if (skb) { + struct device *d = &dpriv->pci_priv->pdev->dev; int last = dpriv->tx_dirty%TX_RING_SIZE; struct TxFD *tx_fd = dpriv->tx_fd + last; + dma_addr_t addr; skb->len = DUMMY_SKB_SIZE; skb_copy_to_linear_data(skb, version, strlen(version) % DUMMY_SKB_SIZE); + addr = dma_map_single(d, skb->data, DUMMY_SKB_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(d, addr)) { + dev_kfree_skb_any(skb); + return NULL; + } tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); - tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, - skb->data, DUMMY_SKB_SIZE, - PCI_DMA_TODEVICE)); + tx_fd->data = cpu_to_le32(addr); dpriv->tx_skbuff[last] = skb; } return skb; @@ -1905,18 +1930,20 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) static int dscc4_init_ring(struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd; struct RxFD *rx_fd; void *ring; int i; - ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); + ring = dma_alloc_coherent(d, RX_TOTAL_SIZE, &dpriv->rx_fd_dma, + GFP_KERNEL); if (!ring) goto err_out; dpriv->rx_fd = rx_fd = (struct RxFD *) ring; - ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); + ring = dma_alloc_coherent(d, TX_TOTAL_SIZE, &dpriv->tx_fd_dma, + GFP_KERNEL); if (!ring) goto err_free_dma_rx; dpriv->tx_fd = tx_fd = (struct TxFD *) ring; @@ -1954,9 +1981,9 @@ static int dscc4_init_ring(struct net_device *dev) return 0; err_free_dma_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); + dma_free_coherent(d, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); err_free_dma_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); + dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); err_out: return -ENOMEM; } @@ -1976,16 +2003,16 @@ static void dscc4_remove_one(struct pci_dev *pdev) dscc4_pci_reset(pdev, ioaddr); free_irq(pdev->irq, root); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, - ppriv->iqcfg_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, + ppriv->iqcfg_dma); for (i = 0; i < dev_per_card; i++) { struct dscc4_dev_priv *dpriv = root + i; dscc4_release_ring(dpriv); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqrx, dpriv->iqrx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqtx, dpriv->iqtx_dma); } dscc4_free1(pdev); diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 2f0bd6955f33..deea41e96f01 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -483,11 +483,10 @@ static void z8530_status(struct z8530_channel *chan) write_zsctrl(chan, RES_H_IUS); } -struct z8530_irqhandler z8530_sync = -{ - z8530_rx, - z8530_tx, - z8530_status +struct z8530_irqhandler z8530_sync = { + .rx = z8530_rx, + .tx = z8530_tx, + .status = z8530_status, }; EXPORT_SYMBOL(z8530_sync); @@ -605,15 +604,15 @@ static void z8530_dma_status(struct z8530_channel *chan) } static struct z8530_irqhandler z8530_dma_sync = { - z8530_dma_rx, - z8530_dma_tx, - z8530_dma_status + .rx = z8530_dma_rx, + .tx = z8530_dma_tx, + .status = z8530_dma_status, }; static struct z8530_irqhandler z8530_txdma_sync = { - z8530_rx, - z8530_dma_tx, - z8530_dma_status + .rx = z8530_rx, + .tx = z8530_dma_tx, + .status = z8530_dma_status, }; /** @@ -678,11 +677,10 @@ static void z8530_status_clear(struct z8530_channel *chan) write_zsctrl(chan, RES_H_IUS); } -struct z8530_irqhandler z8530_nop= -{ - z8530_rx_clear, - z8530_tx_clear, - z8530_status_clear +struct z8530_irqhandler z8530_nop = { + .rx = z8530_rx_clear, + .tx = z8530_tx_clear, + .status = z8530_status_clear, }; diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 106d6f8d471a..68f0463ed8df 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1749,7 +1749,7 @@ static void ar5523_disconnect(struct usb_interface *intf) { USB_DEVICE((vendor), (device) + 1), \ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE } -static struct usb_device_id ar5523_id_table[] = { +static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */ diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 412eb1380dcc..87f56d0e17a6 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -29,6 +29,13 @@ config ATH10K_SDIO This module adds experimental support for SDIO/MMC bus. Currently work in progress and will not fully work. +config ATH10K_USB + tristate "Atheros ath10k USB support (EXPERIMENTAL)" + depends on ATH10K && USB + ---help--- + This module adds experimental support for USB bus. Currently + work in progress and will not fully work. + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index b0b19a7eb98b..899b9b79f4ce 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -30,5 +30,8 @@ ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o ath10k_sdio-y += sdio.o +obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o +ath10k_usb-y += usb.o + # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index da770af83036..ff6815e95684 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -197,35 +197,40 @@ static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) dev = &ar_ahb->pdev->dev; - ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold"); + ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_core_cold"); if (IS_ERR(ar_ahb->core_cold_rst)) { ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", PTR_ERR(ar_ahb->core_cold_rst)); return PTR_ERR(ar_ahb->core_cold_rst); } - ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold"); + ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_cold"); if (IS_ERR(ar_ahb->radio_cold_rst)) { ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_cold_rst)); return PTR_ERR(ar_ahb->radio_cold_rst); } - ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm"); + ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_warm"); if (IS_ERR(ar_ahb->radio_warm_rst)) { ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_warm_rst)); return PTR_ERR(ar_ahb->radio_warm_rst); } - ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif"); + ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_srif"); if (IS_ERR(ar_ahb->radio_srif_rst)) { ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_srif_rst)); return PTR_ERR(ar_ahb->radio_srif_rst); } - ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init"); + ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev, + "wifi_cpu_init"); if (IS_ERR(ar_ahb->cpu_init_rst)) { ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", PTR_ERR(ar_ahb->cpu_init_rst)); @@ -787,8 +792,9 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ar_pci->mem = ar_ahb->mem; ar_pci->mem_len = ar_ahb->mem_len; ar_pci->ar = ar; - ar_pci->bus_ops = &ath10k_ahb_bus_ops; + ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops; ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; ret = ath10k_pci_setup_resource(ar); if (ret) { diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 08b84c8c3614..a8afd690290f 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -16,7 +16,6 @@ */ #include "hif.h" -#include "pci.h" #include "ce.h" #include "debug.h" @@ -33,7 +32,7 @@ * Each ring consists of a number of descriptors which specify * an address, length, and meta-data. * - * Typically, one side of the PCIe interconnect (Host or Target) + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) * controls one ring and the other side controls the other ring. * The source side chooses when to initiate a transfer and it * chooses what to send (buffer address, length). The destination @@ -73,57 +72,71 @@ ath10k_get_ring_byte(unsigned int offset, return ((offset & addr_map->mask) >> (addr_map->lsb)); } +static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->bus_ops->read32(ar, offset); +} + +static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->bus_ops->write32(ar, offset, value); +} + static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dst_wr_index_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr, n); } static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dst_wr_index_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr); } static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_wr_index_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_wr_index_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr); } static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_srri_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_base_addr, addr); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr, addr); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_size_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_size_addr, n); } static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, @@ -131,12 +144,13 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->dmax)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dmax)); } static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, @@ -144,11 +158,13 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->src_ring)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->src_ring)); } static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, @@ -156,34 +172,36 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); } static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_drri_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, u32 addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_base_addr, addr); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr, addr); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_size_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_size_addr, n); } static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, @@ -191,11 +209,11 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr, - (addr & ~(srcr_wm->wm_high->mask)) | - (ath10k_set_ring_byte(n, srcr_wm->wm_high))); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_high))); } static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, @@ -203,11 +221,11 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr, - (addr & ~(srcr_wm->wm_low->mask)) | - (ath10k_set_ring_byte(n, srcr_wm->wm_low))); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_low))); } static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, @@ -215,11 +233,11 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr, - (addr & ~(dstr_wm->wm_high->mask)) | - (ath10k_set_ring_byte(n, dstr_wm->wm_high))); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_high))); } static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, @@ -227,66 +245,73 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr, - (addr & ~(dstr_wm->wm_low->mask)) | - (ath10k_set_ring_byte(n, dstr_wm->wm_low))); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_low))); } static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr | host_ie->copy_complete->mask); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr | host_ie->copy_complete->mask); } static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr & ~(host_ie->copy_complete->mask)); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(host_ie->copy_complete->mask)); } static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr & ~(wm_regs->wm_mask)); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(wm_regs->wm_mask)); } static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->misc_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, - misc_ie_addr | misc_regs->err_mask); + u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->misc_ie_addr); + + ath10k_ce_write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr | misc_regs->err_mask); } static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->misc_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, - misc_ie_addr & ~(misc_regs->err_mask)); + u32 misc_ie_addr = ath10k_ce_read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); + + ath10k_ce_write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr & ~(misc_regs->err_mask)); } static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, @@ -295,7 +320,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, { struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); + ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); } /* @@ -362,11 +387,11 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *src_ring = pipe->src_ring; u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); /* * This function must be called only if there is an incomplete @@ -394,13 +419,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -408,14 +433,14 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int delta; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, pipe->src_ring->write_index, pipe->src_ring->sw_index - 1); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return delta; } @@ -423,13 +448,13 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; unsigned int sw_index = dest_ring->sw_index; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } @@ -437,7 +462,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; @@ -446,7 +471,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); if ((pipe->id != 5) && CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) @@ -486,12 +511,12 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -554,14 +579,14 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, nbytesp); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -576,7 +601,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; dest_ring = ce_state->dest_ring; @@ -584,9 +609,9 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = dest_ring->nentries_mask; sw_index = dest_ring->sw_index; @@ -614,7 +639,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -686,7 +711,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; src_ring = ce_state->src_ring; @@ -694,9 +719,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -727,7 +752,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -736,13 +761,13 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_completed_send_next_nolock(ce_state, per_transfer_contextp); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -755,17 +780,18 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, */ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); /* Clear the copy-complete interrupts that will be handled here. */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask); + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, + wm_regs->cc_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); if (ce_state->recv_cb) ce_state->recv_cb(ce_state); @@ -773,7 +799,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) if (ce_state->send_cb) ce_state->send_cb(ce_state); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); /* * Misc CE interrupts are not being handled, but still need @@ -781,7 +807,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); } /* @@ -795,7 +821,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) int ce_id; u32 intr_summary; - intr_summary = CE_INTERRUPT_SUMMARY(ar); + intr_summary = ath10k_ce_interrupt_summary(ar); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { if (intr_summary & (1 << ce_id)) @@ -847,22 +873,25 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ce_id; + struct ath10k_ce_pipe *ce_state; /* Skip the last copy engine, CE7 the diagnostic window, as that * uses polling and isn't initialized for interrupts. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) - ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); + for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { + ce_state = &ce->ce_states[ce_id]; + ath10k_ce_per_engine_handler_adjust(ce_state); + } } static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -898,8 +927,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -1081,8 +1110,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; int ret; /* @@ -1138,8 +1167,8 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { dma_free_coherent(ar->dev, @@ -1168,38 +1197,38 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_crash_data ce; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_crash_data ce_data; u32 addr, id; lockdep_assert_held(&ar->data_lock); ath10k_err(ar, "Copy Engine register dump:\n"); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); for (id = 0; id < CE_COUNT; id++) { addr = ath10k_ce_base_address(ar, id); - ce.base_addr = cpu_to_le32(addr); + ce_data.base_addr = cpu_to_le32(addr); - ce.src_wr_idx = + ce_data.src_wr_idx = cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); - ce.src_r_idx = + ce_data.src_r_idx = cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); - ce.dst_wr_idx = + ce_data.dst_wr_idx = cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); - ce.dst_r_idx = + ce_data.dst_r_idx = cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); if (crash_data) - crash_data->ce_crash_data[id] = ce; + crash_data->ce_crash_data[id] = ce_data; ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, - le32_to_cpu(ce.base_addr), - le32_to_cpu(ce.src_wr_idx), - le32_to_cpu(ce.src_r_idx), - le32_to_cpu(ce.dst_wr_idx), - le32_to_cpu(ce.dst_r_idx)); + le32_to_cpu(ce_data.base_addr), + le32_to_cpu(ce_data.src_wr_idx), + le32_to_cpu(ce_data.src_r_idx), + le32_to_cpu(ce_data.dst_wr_idx), + le32_to_cpu(ce_data.dst_r_idx)); } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); } diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 95743a57525d..bdec794704d9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -122,6 +122,24 @@ struct ath10k_ce_pipe { /* Copy Engine settable attributes */ struct ce_attr; +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + +static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar) +{ + return (struct ath10k_ce *)ar->ce_priv; +} + +struct ath10k_ce { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; +}; + /*==================Send====================*/ /* ath10k_ce_send flags */ @@ -291,9 +309,13 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ - CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) +static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); +} #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 75c5c903c8a6..a4f635820f35 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -519,7 +519,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, dir = "."; snprintf(filename, sizeof(filename), "%s/%s", dir, file); - ret = request_firmware_direct(&fw, filename, ar->dev); + ret = request_firmware(&fw, filename, ar->dev); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", filename, ret); @@ -1454,6 +1454,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, { switch (ar->hif.bus) { case ATH10K_BUS_SDIO: + case ATH10K_BUS_USB: scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin", ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus), fw_api); @@ -1885,6 +1886,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | WMI_10_4_STAT_PEER_EXTD; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; + ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS; if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, fw_file->fw_features)) @@ -2055,6 +2057,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err_wmi_detach; } + /* If firmware indicates Full Rx Reorder support it must be used in a + * slightly different manner. Let HTT code know. + */ + ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, + ar->wmi.svc_map)); + status = ath10k_htt_rx_alloc(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt rx: %d\n", status); @@ -2123,6 +2131,14 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ar->running_fw->fw_file.fw_features)) val |= WMI_10_4_COEX_GPIO_SUPPORT; + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@ -2167,12 +2183,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, } } - /* If firmware indicates Full Rx Reorder support it must be used in a - * slightly different manner. Let HTT code know. - */ - ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, - ar->wmi.svc_map)); - status = ath10k_htt_rx_ring_refill(ar); if (status) { ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); @@ -2516,6 +2526,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca4019_values; break; + case ATH10K_HW_WCN3990: + ar->regs = &wcn3990_regs; + ar->hw_ce_regs = &wcn3990_ce_regs; + ar->hw_values = &wcn3990_values; + break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1aa5cf12fce0..949ebb3e967b 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -92,6 +92,7 @@ enum ath10k_bus { ATH10K_BUS_PCI, ATH10K_BUS_AHB, ATH10K_BUS_SDIO, + ATH10K_BUS_USB, }; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -103,6 +104,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) return "ahb"; case ATH10K_BUS_SDIO: return "sdio"; + case ATH10K_BUS_USB: + return "usb"; } return "unknown"; @@ -459,7 +462,7 @@ struct ath10k_ce_crash_hdr { struct ath10k_fw_crash_data { bool crashed_since_read; - uuid_le uuid; + guid_t guid; struct timespec timestamp; __le32 registers[REG_DUMP_COUNT_QCA988X]; struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; @@ -993,6 +996,10 @@ struct ath10k { u32 reg_ack_cts_timeout_orig; } fw_coverage; + u32 ampdu_reference; + + void *ce_priv; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 389fcb7a9fd0..df514507d3f1 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -70,7 +70,7 @@ struct ath10k_dump_file_data { /* some info we can get from ath10k struct that might help */ - u8 uuid[16]; + guid_t guid; __le32 chip_id; @@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - size_t len = 0, buf_len = 4096; + size_t len = 0, buf_len = 8192; const char *name; ssize_t ret_cnt; bool enabled; @@ -719,7 +719,7 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) lockdep_assert_held(&ar->data_lock); crash_data->crashed_since_read = true; - uuid_le_gen(&crash_data->uuid); + guid_gen(&crash_data->guid); getnstimeofday(&crash_data->timestamp); return crash_data; @@ -766,7 +766,7 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); - memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid)); + guid_copy(&dump_data->guid, &crash_data->guid); dump_data->chip_id = cpu_to_le32(ar->chip_id); dump_data->bus_type = cpu_to_le32(0); dump_data->target_version = cpu_to_le32(ar->target_version); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 257d10985c6e..548ad5483a4a 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -40,6 +40,8 @@ enum ath10k_debug_mask { ATH10K_DBG_AHB = 0x00008000, ATH10K_DBG_SDIO = 0x00010000, ATH10K_DBG_SDIO_DUMP = 0x00020000, + ATH10K_DBG_USB = 0x00040000, + ATH10K_DBG_USB_BULK = 0x00080000, ATH10K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 398dda978d6e..a3f5dc78353f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -890,16 +890,26 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, status->nss = 0; status->encoding = RX_ENC_LEGACY; status->bw = RATE_INFO_BW_20; + status->flag &= ~RX_FLAG_MACTIME_END; status->flag |= RX_FLAG_NO_SIGNAL_VAL; + status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); + status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; + status->ampdu_reference = ar->ampdu_reference; + ath10k_htt_rx_h_signal(ar, status, rxd); ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); ath10k_htt_rx_h_rates(ar, status, rxd); } - if (is_last_ppdu) + if (is_last_ppdu) { ath10k_htt_rx_h_mactime(ar, status, rxd); + + /* set ampdu last segment flag */ + status->flag |= RX_FLAG_AMPDU_IS_LAST; + ar->ampdu_reference++; + } } static const char * const tid_to_ac[] = { @@ -1514,7 +1524,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, */ if (!rx_status->freq) { - ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); return false; } @@ -1735,7 +1745,8 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) } static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, - struct sk_buff_head *amsdu) + struct sk_buff_head *amsdu, + int budget_left) { struct sk_buff *msdu; struct htt_rx_desc *rxd; @@ -1746,8 +1757,9 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, if (WARN_ON(!skb_queue_empty(amsdu))) return -EINVAL; - while ((msdu = __skb_dequeue(list))) { + while ((msdu = __skb_dequeue(list)) && budget_left) { __skb_queue_tail(amsdu, msdu); + budget_left--; rxd = (void *)msdu->data - sizeof(*rxd); if (rxd->msdu_end.common.info0 & @@ -1838,7 +1850,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, return num_msdu; } -static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, + int budget_left) { struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (void *)skb->data; @@ -1895,9 +1908,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) if (offload) num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); - while (!skb_queue_empty(&list)) { + while (!skb_queue_empty(&list) && budget_left) { __skb_queue_head_init(&amsdu); - ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); + ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left); switch (ret) { case 0: /* Note: The in-order indication may report interleaved @@ -1907,6 +1920,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * should still give an idea about rx rate to the user. */ num_msdus += skb_queue_len(&amsdu); + budget_left -= skb_queue_len(&amsdu); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_mpdu(ar, &amsdu, status); @@ -2549,7 +2563,8 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) } spin_lock_bh(&htt->rx_ring.lock); - num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb); + num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb, + (budget - quota)); spin_unlock_bh(&htt->rx_ring.lock); if (num_rx_msdus < 0) { resched_napi = true; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index afb0c01cbb55..a860691d635d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -192,6 +192,156 @@ const struct ath10k_hw_values qca4019_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_regs wcn3990_regs = { + .rtc_soc_base_address = 0x00000000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00000000, + .ce_wrapper_base_address = 0x0024C000, + .ce0_base_address = 0x00240000, + .ce1_base_address = 0x00241000, + .ce2_base_address = 0x00242000, + .ce3_base_address = 0x00243000, + .ce4_base_address = 0x00244000, + .ce5_base_address = 0x00245000, + .ce6_base_address = 0x00246000, + .ce7_base_address = 0x00247000, + .ce8_base_address = 0x00248000, + .ce9_base_address = 0x00249000, + .ce10_base_address = 0x0024A000, + .ce11_base_address = 0x0024B000, + .soc_chip_id_address = 0x000000f0, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = GENMASK(17, 17), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { + .msb = 0x00000012, + .lsb = 0x00000012, + .mask = GENMASK(18, 18), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { + .addr = 0x00000018, + .src_ring = &wcn3990_src_ring, + .dst_ring = &wcn3990_dst_ring, + .dmax = &wcn3990_dmax, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { + .mask = GENMASK(0, 0), +}; + +static struct ath10k_hw_ce_host_ie wcn3990_host_ie = { + .copy_complete = &wcn3990_host_ie_cc, +}; + +static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { + .axi_err = 0x00000100, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000003E0, + .addr = 0x00000038, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { + .msb = 0x00000000, + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_src_wm_low, + .wm_high = &wcn3990_src_wm_high, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_dst_wm_low, + .wm_high = &wcn3990_dst_wm_high, +}; + +struct ath10k_hw_ce_regs wcn3990_ce_regs = { + .sr_base_addr = 0x00000000, + .sr_size_addr = 0x00000008, + .dr_base_addr = 0x0000000c, + .dr_size_addr = 0x00000014, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .ddr_addr_for_rri_low = 0x00000004, + .ddr_addr_for_rri_high = 0x00000008, + .ce_rri_low = 0x0024C004, + .ce_rri_high = 0x0024C008, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &wcn3990_ctrl1, + .host_ie = &wcn3990_host_ie, + .wm_regs = &wcn3990_wm_reg, + .misc_regs = &wcn3990_misc_reg, + .wm_srcr = &wcn3990_wm_src_ring, + .wm_dstr = &wcn3990_wm_dst_ring, +}; + +const struct ath10k_hw_values wcn3990_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 12, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { .msb = 0x00000010, .lsb = 0x00000010, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 97dc1479f44e..0c089f6dd3d9 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -231,6 +231,7 @@ enum ath10k_hw_rev { ATH10K_HW_QCA9377, ATH10K_HW_QCA4019, ATH10K_HW_QCA9887, + ATH10K_HW_WCN3990, }; struct ath10k_hw_regs { @@ -247,6 +248,10 @@ struct ath10k_hw_regs { u32 ce5_base_address; u32 ce6_base_address; u32 ce7_base_address; + u32 ce8_base_address; + u32 ce9_base_address; + u32 ce10_base_address; + u32 ce11_base_address; u32 soc_reset_control_si0_rst_mask; u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; @@ -267,6 +272,7 @@ extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; extern const struct ath10k_hw_regs qca99x0_regs; extern const struct ath10k_hw_regs qca4019_regs; +extern const struct ath10k_hw_regs wcn3990_regs; struct ath10k_hw_ce_regs_addr_map { u32 msb; @@ -362,6 +368,8 @@ extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; extern const struct ath10k_hw_values qca9888_values; extern const struct ath10k_hw_values qca4019_values; +extern const struct ath10k_hw_values wcn3990_values; +extern struct ath10k_hw_ce_regs wcn3990_ce_regs; extern struct ath10k_hw_ce_regs qcax_ce_regs; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, @@ -375,6 +383,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, #define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) +#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990) /* Known peculiarities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap @@ -711,6 +720,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_IPHDR_PAD_CONFIG 1 #define TARGET_10_4_QWRAP_CONFIG 0 +/* TDLS config */ +#define TARGET_10_4_NUM_TDLS_VDEVS 1 +#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1 +#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1 + /* Maximum number of Copy Engine's supported */ #define CE_COUNT_MAX 12 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 55c808f03a84..5683f1a5330e 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7644,6 +7644,7 @@ static const struct ieee80211_ops ath10k_ops = { #ifdef CONFIG_PM .suspend = ath10k_wow_op_suspend, .resume = ath10k_wow_op_resume, + .set_wakeup = ath10k_wow_op_set_wakeup, #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, @@ -8197,8 +8198,11 @@ int ath10k_mac_register(struct ath10k *ar) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; } - if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) + if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); + } ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 7ebfc409018d..195dafb98131 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -672,16 +672,16 @@ static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - ar_pci->bus_ops->write32(ar, offset, value); + ce->bus_ops->write32(ar, offset, value); } inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->read32(ar, offset); + return ce->bus_ops->read32(ar, offset); } u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) @@ -761,7 +761,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; struct sk_buff *skb; dma_addr_t paddr; @@ -784,9 +784,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -801,6 +801,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; int ret, num; @@ -810,9 +811,9 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); @@ -882,6 +883,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; @@ -892,7 +894,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -986,7 +988,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1034,6 +1036,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; @@ -1043,7 +1046,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -1147,7 +1150,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1342,6 +1345,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; @@ -1350,7 +1354,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, unsigned int write_index; int err, i = 0; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -1396,14 +1400,14 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return err; } @@ -1459,7 +1463,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar, static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data; - char uuid[50]; + char guid[UUID_STRING_LEN + 1]; spin_lock_bh(&ar->data_lock); @@ -1468,11 +1472,11 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) crash_data = ath10k_debug_get_new_fw_crash_data(ar); if (crash_data) - scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); + scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); else - scnprintf(uuid, sizeof(uuid), "n/a"); + scnprintf(guid, sizeof(guid), "n/a"); - ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); + ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); ath10k_ce_dump_registers(ar, crash_data); @@ -1593,6 +1597,8 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) * to mask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } @@ -1619,6 +1625,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) * to unmask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } @@ -2000,9 +2008,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) static int ath10k_bus_get_num_banks(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->get_num_banks(ar); + return ce->bus_ops->get_num_banks(ar); } int ath10k_pci_init_config(struct ath10k *ar) @@ -2173,11 +2181,12 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; + struct ath10k_ce *ce = ath10k_ce_priv(ar); int i, ret; for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; - pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->ce_hdl = &ce->ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -2825,7 +2834,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * interrupts safer to check for pending interrupts for * immediate servicing. */ - if (CE_INTERRUPT_SUMMARY(ar)) { + if (ath10k_ce_interrupt_summary(ar)) { napi_reschedule(ctx); goto out; } @@ -3142,9 +3151,10 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) int ath10k_pci_setup_resource(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ce->ce_lock); spin_lock_init(&ar_pci->ps_lock); setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, @@ -3263,10 +3273,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; - ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; @@ -3385,11 +3396,50 @@ static void ath10k_pci_remove(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); +static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) +{ + struct ath10k *ar = dev_get_drvdata(dev); + int ret; + + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) + return 0; + + ret = ath10k_hif_suspend(ar); + if (ret) + ath10k_warn(ar, "failed to suspend hif: %d\n", ret); + + return ret; +} + +static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) +{ + struct ath10k *ar = dev_get_drvdata(dev); + int ret; + + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) + return 0; + + ret = ath10k_hif_resume(ar); + if (ret) + ath10k_warn(ar, "failed to resume hif: %d\n", ret); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, + ath10k_pci_pm_suspend, + ath10k_pci_pm_resume); + static struct pci_driver ath10k_pci_driver = { .name = "ath10k_pci", .id_table = ath10k_pci_id_table, .probe = ath10k_pci_probe, .remove = ath10k_pci_remove, +#ifdef CONFIG_PM + .driver.pm = &ath10k_pci_pm_ops, +#endif }; static int __init ath10k_pci_init(void) diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index c1e08ad63940..424ff323b2dc 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -150,12 +150,6 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; -struct ath10k_bus_ops { - u32 (*read32)(struct ath10k *ar, u32 offset); - void (*write32)(struct ath10k *ar, u32 offset, u32 value); - int (*get_num_banks)(struct ath10k *ar); -}; - enum ath10k_pci_irq_mode { ATH10K_PCI_IRQ_AUTO = 0, ATH10K_PCI_IRQ_LEGACY = 1, @@ -177,11 +171,7 @@ struct ath10k_pci { /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; - /* FIXME: document what this really protects */ - spinlock_t ce_lock; - - /* Map CE id to ce_state */ - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + struct ath10k_ce ce; struct timer_list rx_post_retry; /* Due to HW quirks it is recommended to disable ASPM during device @@ -225,8 +215,6 @@ struct ath10k_pci { */ bool pci_ps; - const struct ath10k_bus_ops *bus_ops; - /* Chip specific pci reset routine used to do a safe reset */ int (*pci_soft_reset)(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 859ed870bd97..03a69e5b1116 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -683,7 +683,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, lookaheads[0] = msg_lookahead; timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ; - while (time_before(jiffies, timeout)) { + do { /* Try to allocate as many HTC RX packets indicated by * n_lookaheads. */ @@ -719,7 +719,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, * performance in high throughput situations. */ *done = false; - } + } while (time_before(jiffies, timeout)); if (ret && (ret != -ECANCELED)) ath10k_warn(ar, "failed to get pending recv messages: %d\n", @@ -1336,16 +1336,14 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func) sdio_release_host(ar_sdio->func); timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ; - while (time_before(jiffies, timeout) && !done) { + do { ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done); if (ret) break; - } + } while (time_before(jiffies, timeout) && !done); sdio_claim_host(ar_sdio->func); - wake_up(&ar_sdio->irq_wq); - if (ret && ret != -ECANCELED) ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", ret); @@ -2000,8 +1998,6 @@ static int ath10k_sdio_probe(struct sdio_func *func, goto err_free_bmi_buf; } - init_waitqueue_head(&ar_sdio->irq_wq); - for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++) ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h index 3f61c67c601d..4ff7b545293b 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.h +++ b/drivers/net/wireless/ath/ath10k/sdio.h @@ -210,8 +210,6 @@ struct ath10k_sdio { /* temporary buffer for BMI requests */ u8 *bmi_buf; - wait_queue_head_t irq_wq; - bool is_disabled; struct workqueue_struct *workqueue; diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c new file mode 100644 index 000000000000..d4803ff5a78a --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -0,0 +1,1106 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "debug.h" +#include "core.h" +#include "bmi.h" +#include "hif.h" +#include "htc.h" +#include "usb.h" + +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe); + +/* inlined helper functions */ + +static inline enum ath10k_htc_ep_id +eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) +{ + return (enum ath10k_htc_ep_id)htc_hdr->eid; +} + +static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) +{ + return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; +} + +/* pipe/urb operations */ +static struct ath10k_urb_context * +ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context = NULL; + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = list_first_entry(&pipe->urb_list_head, + struct ath10k_urb_context, link); + list_del(&urb_context->link); + pipe->urb_cnt--; + } + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); + + return urb_context; +} + +static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, + struct ath10k_urb_context *urb_context) +{ + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + + pipe->urb_cnt++; + list_add(&urb_context->link, &pipe->urb_list_head); + + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); +} + +static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) +{ + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; + + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +static void ath10k_usb_free_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context; + + if (!pipe->ar_usb) { + /* nothing allocated for this pipe */ + return; + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + } + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + + if (!urb_context) + break; + + kfree(urb_context); + } +} + +static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) + ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); +} + +/* hif usb rx/tx completion functions */ + +static void ath10k_usb_recv_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + int status = 0; + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d stat %d len %d urb 0x%pK\n", + pipe->logical_pipe_num, urb->status, urb->actual_length, + urb); + + if (urb->status != 0) { + status = -EIO; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* no need to spew these errors when device + * removed or urb killed due to driver shutdown + */ + status = -ECANCELED; + break; + default: + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d ep 0x%2.2x failed: %d\n", + pipe->logical_pipe_num, + pipe->ep_address, urb->status); + break; + } + goto cleanup_recv_urb; + } + + if (urb->actual_length == 0) + goto cleanup_recv_urb; + + skb = urb_context->skb; + + /* we are going to pass it up */ + urb_context->skb = NULL; + skb_put(skb, urb->actual_length); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); + +cleanup_recv_urb: + ath10k_usb_cleanup_recv_urb(urb_context); + + if (status == 0 && + pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + ath10k_usb_post_recv_transfers(ar, pipe); + } +} + +static void ath10k_usb_transmit_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + if (urb->status != 0) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "pipe: %d, failed:%d\n", + pipe->logical_pipe_num, urb->status); + } + + skb = urb_context->skb; + urb_context->skb = NULL; + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); +} + +/* pipe operations */ +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe) +{ + struct ath10k_urb_context *urb_context; + struct urb *urb; + int usb_status; + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); + if (!urb_context->skb) + goto err; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + goto err; + + usb_fill_bulk_urb(urb, + recv_pipe->ar_usb->udev, + recv_pipe->usb_pipe_handle, + urb_context->skb->data, + ATH10K_USB_RX_BUFFER_SIZE, + ath10k_usb_recv_complete, urb_context); + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, recv_pipe->ep_address, + ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); + + usb_anchor_urb(urb, &recv_pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv failed: %d\n", + usb_status); + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err; + } + usb_free_urb(urb); + } + + return; + +err: + ath10k_usb_cleanup_recv_urb(urb_context); +} + +static void ath10k_usb_flush_all(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + if (ar_usb->pipes[i].ar_usb) { + usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); + cancel_work_sync(&ar_usb->pipes[i].io_complete_work); + } + } +} + +static void ath10k_usb_start_recv_pipes(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); +} + +static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc_hdr *htc_hdr; + struct ath10k_htc_ep *ep; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + ep = &ar->htc.endpoint[htc_hdr->eid]; + ath10k_htc_notify_tx_completion(ep, skb); + /* The TX complete handler now owns the skb... */ +} + +static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_hdr *htc_hdr; + enum ath10k_htc_ep_id eid; + struct ath10k_htc_ep *ep; + u16 payload_len; + u8 *trailer; + int ret; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + eid = eid_from_htc_hdr(htc_hdr); + ep = &ar->htc.endpoint[eid]; + + if (ep->service_id == 0) { + ath10k_warn(ar, "ep %d is not connected\n", eid); + goto out_free_skb; + } + + payload_len = le16_to_cpu(htc_hdr->len); + if (!payload_len) { + ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (payload_len < htc_hdr->trailer_len) { + ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { + trailer = skb->data + sizeof(*htc_hdr) + payload_len - + htc_hdr->trailer_len; + + ret = ath10k_htc_process_trailer(htc, + trailer, + htc_hdr->trailer_len, + eid, + NULL, + NULL); + if (ret) + goto out_free_skb; + + if (is_trailer_only_msg(htc_hdr)) + goto out_free_skb; + + /* strip off the trailer from the skb since it should not + * be passed on to upper layers + */ + skb_trim(skb, skb->len - htc_hdr->trailer_len); + } + + skb_pull(skb, sizeof(*htc_hdr)); + ep->ep_ops.ep_rx_complete(ar, skb); + /* The RX complete handler now owns the skb... */ + + return; + +out_free_skb: + dev_kfree_skb(skb); +} + +static void ath10k_usb_io_comp_work(struct work_struct *work) +{ + struct ath10k_usb_pipe *pipe = container_of(work, + struct ath10k_usb_pipe, + io_complete_work); + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) + ath10k_usb_tx_complete(ar, skb); + else + ath10k_usb_rx_complete(ar, skb); + } +} + +#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) +#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + +static void ath10k_usb_destroy(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_usb_flush_all(ar); + ath10k_usb_cleanup_pipe_resources(ar); + usb_set_intfdata(ar_usb->interface, NULL); + + kfree(ar_usb->diag_cmd_buffer); + kfree(ar_usb->diag_resp_buffer); +} + +static int ath10k_usb_hif_start(struct ath10k *ar) +{ + int i; + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_usb_start_recv_pipes(ar); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = ATH10K_USB_PIPE_TX_CTRL; + i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { + ar_usb->pipes[i].urb_cnt_thresh = + ar_usb->pipes[i].urb_alloc / 2; + } + + return 0; +} + +static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; + struct ath10k_urb_context *urb_context; + struct sk_buff *skb; + struct urb *urb; + int ret, i; + + for (i = 0; i < n_items; i++) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + if (!urb_context) { + ret = -ENOMEM; + goto err; + } + + skb = items[i].transfer_context; + urb_context->skb = skb; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + ret = -ENOMEM; + goto err_free_urb_to_pipe; + } + + usb_fill_bulk_urb(urb, + ar_usb->udev, + pipe->usb_pipe_handle, + skb->data, + skb->len, + ath10k_usb_transmit_complete, urb_context); + + if (!(skb->len % pipe->max_packet_size)) { + /* hit a max packet boundary on this pipe */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + + usb_anchor_urb(urb, &pipe->urb_submitted); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk transmit failed: %d\n", ret); + usb_unanchor_urb(urb); + ret = -EINVAL; + goto err_free_urb_to_pipe; + } + + usb_free_urb(urb); + } + + return 0; + +err_free_urb_to_pipe: + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +err: + return ret; +} + +static void ath10k_usb_hif_stop(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); +} + +static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + return ar_usb->pipes[pipe_id].urb_cnt; +} + +static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmemdup(data, size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_sndctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 1000); + + if (ret < 0) { + ath10k_warn(ar, "Failed to submit usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + kfree(buf); + + return 0; +} + +static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_rcvctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (ret < 0) { + ath10k_warn(ar, "Failed to read usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + memcpy((u8 *)data, buf, size); + + kfree(buf); + + return 0; +} + +static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, + u8 req_val, u8 *req_buf, u32 req_len, + u8 resp_val, u8 *resp_buf, + u32 *resp_len) +{ + int ret; + + /* send command */ + ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, + req_buf, req_len); + if (ret) + goto err; + + /* get response */ + if (resp_buf) { + ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, + resp_buf, *resp_len); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} + +static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_read *cmd; + u32 resp_len; + int ret; + + if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; + cmd->address = cpu_to_le32(address); + resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + ATH10K_USB_CONTROL_REQ_DIAG_RESP, + ar_usb->diag_resp_buffer, &resp_len); + if (ret) + return ret; + + if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EMSGSIZE; + + memcpy(buf, ar_usb->diag_resp_buffer, + sizeof(struct ath10k_usb_ctrl_diag_resp_read)); + + return 0; +} + +static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_write *cmd; + int ret; + + if (nbytes != sizeof(cmd->value)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); + cmd->address = cpu_to_le32(address); + memcpy(&cmd->value, data, nbytes); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + 0, NULL, NULL); + if (ret) + return ret; + + return 0; +} + +static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) +{ + int ret; + + if (req) { + ret = ath10k_usb_submit_ctrl_out(ar, + ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, + 0, 0, req, req_len); + if (ret) { + ath10k_warn(ar, + "unable to send the bmi data to the device: %d\n", + ret); + return ret; + } + } + + if (resp) { + ret = ath10k_usb_submit_ctrl_in(ar, + ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, + 0, 0, resp, *resp_len); + if (ret) { + ath10k_warn(ar, + "Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + } + + return 0; +} + +static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; +} + +static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + switch (svc_id) { + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + /* due to large control packets, shift to data pipe */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; + /* Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + default: + return -EPERM; + } + + return 0; +} + +/* This op is currently only used by htc_wait_target if the HTC ready + * message times out. It is not applicable for USB since there is nothing + * we can do if the HTC ready message does not arrive in time. + * TODO: Make this op non mandatory by introducing a NULL check in the + * hif op wrapper. + */ +static void ath10k_usb_hif_send_complete_check(struct ath10k *ar, + u8 pipe, int force) +{ +} + +static int ath10k_usb_hif_power_up(struct ath10k *ar) +{ + return 0; +} + +static void ath10k_usb_hif_power_down(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_hif_suspend(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} + +static int ath10k_usb_hif_resume(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} +#endif + +static const struct ath10k_hif_ops ath10k_usb_hif_ops = { + .tx_sg = ath10k_usb_hif_tx_sg, + .diag_read = ath10k_usb_hif_diag_read, + .diag_write = ath10k_usb_hif_diag_write, + .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, + .start = ath10k_usb_hif_start, + .stop = ath10k_usb_hif_stop, + .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, + .get_default_pipe = ath10k_usb_hif_get_default_pipe, + .send_complete_check = ath10k_usb_hif_send_complete_check, + .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, + .power_up = ath10k_usb_hif_power_up, + .power_down = ath10k_usb_hif_power_down, +#ifdef CONFIG_PM + .suspend = ath10k_usb_hif_suspend, + .resume = ath10k_usb_hif_resume, +#endif +}; + +static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) +{ + u8 pipe_num = ATH10K_USB_PIPE_INVALID; + + switch (ep_address) { + case ATH10K_USB_EP_ADDR_APP_CTRL_IN: + pipe_num = ATH10K_USB_PIPE_RX_CTRL; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_INT_IN: + pipe_num = ATH10K_USB_PIPE_RX_INT; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA2_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA2; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = ATH10K_USB_PIPE_TX_CTRL; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe, + int urb_cnt) +{ + struct ath10k_urb_context *urb_context; + int i; + + INIT_LIST_HEAD(&pipe->urb_list_head); + init_usb_anchor(&pipe->urb_submitted); + + for (i = 0; i < urb_cnt; i++) { + urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); + if (!urb_context) + return -ENOMEM; + + urb_context->pipe = pipe; + + /* we are only allocate the urb contexts here, the actual URB + * is allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + ath10k_usb_free_urb_to_pipe(pipe, urb_context); + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc); + + return 0; +} + +static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + struct ath10k_usb_pipe *pipe; + int ret, i, urbcount; + u8 pipe_num; + + ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); + + /* walk decriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s bulk ep 0x%2.2x maxpktsz %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + urbcount = 0; + + pipe_num = + ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, + &urbcount); + if (pipe_num == ATH10K_USB_PIPE_INVALID) + continue; + + pipe = &ar_usb->pipes[pipe_num]; + if (pipe->ar_usb) + /* hmmm..pipe was already setup */ + continue; + + pipe->ar_usb = ar_usb; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(ar_usb->udev, + pipe->ep_address); + } + } + + pipe->ep_desc = endpoint; + + if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) + pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; + + ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); + if (ret) + return ret; + } + + return 0; +} + +static int ath10k_usb_create(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_device *dev = interface_to_usbdev(interface); + struct ath10k_usb_pipe *pipe; + int ret, i; + + usb_set_intfdata(interface, ar_usb); + spin_lock_init(&ar_usb->cs_lock); + ar_usb->udev = dev; + ar_usb->interface = interface; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + pipe = &ar_usb->pipes[i]; + INIT_WORK(&pipe->io_complete_work, + ath10k_usb_io_comp_work); + skb_queue_head_init(&pipe->io_comp_queue); + } + + ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); + if (!ar_usb->diag_cmd_buffer) { + ret = -ENOMEM; + goto err; + } + + ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, + GFP_KERNEL); + if (!ar_usb->diag_resp_buffer) { + ret = -ENOMEM; + goto err; + } + + ret = ath10k_usb_setup_pipe_resources(ar, interface); + if (ret) + goto err; + + return 0; + +err: + ath10k_usb_destroy(ar); + return ret; +} + +/* ath10k usb driver registered functions */ +static int ath10k_usb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct ath10k *ar; + struct ath10k_usb *ar_usb; + struct usb_device *dev = interface_to_usbdev(interface); + int ret, vendor_id, product_id; + enum ath10k_hw_rev hw_rev; + u32 chip_id; + + /* Assumption: All USB based chipsets (so far) are QCA9377 based. + * If there will be newer chipsets that does not use the hw reg + * setup as defined in qca6174_regs and qca6174_values, this + * assumption is no longer valid and hw_rev must be setup differently + * depending on chipset. + */ + hw_rev = ATH10K_HW_QCA9377; + + ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, + hw_rev, &ath10k_usb_hif_ops); + if (!ar) { + dev_err(&dev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + usb_get_dev(dev); + vendor_id = le16_to_cpu(dev->descriptor.idVendor); + product_id = le16_to_cpu(dev->descriptor.idProduct); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "usb new func vendor 0x%04x product 0x%04x\n", + vendor_id, product_id); + + ar_usb = ath10k_usb_priv(ar); + ret = ath10k_usb_create(ar, interface); + ar_usb->ar = ar; + + ar->dev_id = product_id; + ar->id.vendor = vendor_id; + ar->id.device = product_id; + + /* TODO: don't know yet how to get chip_id with USB */ + chip_id = 0; + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_warn(ar, "failed to register driver core: %d\n", ret); + goto err; + } + + /* TODO: remove this once USB support is fully implemented */ + ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n"); + + return 0; + +err: + ath10k_core_destroy(ar); + + usb_put_dev(dev); + + return ret; +} + +static void ath10k_usb_remove(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb; + + ar_usb = usb_get_intfdata(interface); + if (!ar_usb) + return; + + ath10k_core_unregister(ar_usb->ar); + ath10k_usb_destroy(ar_usb->ar); + usb_put_dev(interface_to_usbdev(interface)); + ath10k_core_destroy(ar_usb->ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_pm_suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + + ath10k_usb_flush_all(ar_usb->ar); + return 0; +} + +static int ath10k_usb_pm_resume(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + struct ath10k *ar = ar_usb->ar; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); + + return 0; +} + +#else + +#define ath10k_usb_pm_suspend NULL +#define ath10k_usb_pm_resume NULL + +#endif + +/* table of devices that work with this driver */ +static struct usb_device_id ath10k_usb_ids[] = { + {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ + { /* Terminating entry */ }, +}; + +MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); + +static struct usb_driver ath10k_usb_driver = { + .name = "ath10k_usb", + .probe = ath10k_usb_probe, + .suspend = ath10k_usb_pm_suspend, + .resume = ath10k_usb_pm_resume, + .disconnect = ath10k_usb_remove, + .id_table = ath10k_usb_ids, + .supports_autosuspend = true, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(ath10k_usb_driver); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h new file mode 100644 index 000000000000..f60a3cc7d712 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _USB_H_ +#define _USB_H_ + +/* constants */ +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 +#define ATH10K_USB_RX_BUFFER_SIZE 4096 + +#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX + +/* USB endpoint definitions */ +#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81 +#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82 +#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83 +#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84 + +#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01 +#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02 +#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03 +#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04 + +/* diagnostic command defnitions */ +#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1 +#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2 +#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3 +#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4 + +#define ATH10K_USB_CTRL_DIAG_CC_READ 0 +#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1 + +#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80) + +struct ath10k_usb_ctrl_diag_cmd_write { + __le32 cmd; + __le32 address; + __le32 value; + __le32 padding; +} __packed; + +struct ath10k_usb_ctrl_diag_cmd_read { + __le32 cmd; + __le32 address; +} __packed; + +struct ath10k_usb_ctrl_diag_resp_read { + u8 value[4]; +} __packed; + +/* tx/rx pipes for usb */ +enum ath10k_usb_pipe_id { + ATH10K_USB_PIPE_TX_CTRL = 0, + ATH10K_USB_PIPE_TX_DATA_LP, + ATH10K_USB_PIPE_TX_DATA_MP, + ATH10K_USB_PIPE_TX_DATA_HP, + ATH10K_USB_PIPE_RX_CTRL, + ATH10K_USB_PIPE_RX_DATA, + ATH10K_USB_PIPE_RX_DATA2, + ATH10K_USB_PIPE_RX_INT, + ATH10K_USB_PIPE_MAX +}; + +struct ath10k_usb_pipe { + struct list_head urb_list_head; + struct usb_anchor urb_submitted; + u32 urb_alloc; + u32 urb_cnt; + u32 urb_cnt_thresh; + unsigned int usb_pipe_handle; + u32 flags; + u8 ep_address; + u8 logical_pipe_num; + struct ath10k_usb *ar_usb; + u16 max_packet_size; + struct work_struct io_complete_work; + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; +}; + +#define ATH10K_USB_PIPE_FLAG_TX BIT(0) + +/* usb device object */ +struct ath10k_usb { + /* protects pipe->urb_list_head and pipe->urb_cnt */ + spinlock_t cs_lock; + + struct usb_device *udev; + struct usb_interface *interface; + struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX]; + u8 *diag_cmd_buffer; + u8 *diag_resp_buffer; + struct ath10k *ar; +}; + +/* usb urb object */ +struct ath10k_urb_context { + struct list_head link; + struct ath10k_usb_pipe *pipe; + struct sk_buff *skb; + struct ath10k *ar; +}; + +static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar) +{ + return (struct ath10k_usb *)ar->drv_priv; +} + +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 3efb404b83c0..38a97086708b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -651,8 +651,6 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, - .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED, - .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED, .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, @@ -711,6 +709,33 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .pdev_bss_chan_info_request_cmdid = WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, + .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID, + .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID, + .atf_ssid_grouping_request_cmdid = + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + .set_periodic_channel_stats_cfg_cmdid = + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID, + .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID, + .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID, + .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID, + .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, + .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, + .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, }; /* MAIN WMI VDEV param map */ @@ -3305,7 +3330,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, if (arvif->u.ap.noa_data) if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) skb_put_data(bcn, arvif->u.ap.noa_data, - arvif->u.ap.noa_len); + arvif->u.ap.noa_len); } static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, @@ -6473,6 +6498,7 @@ ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->peer_type = __cpu_to_le32(peer_type); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", @@ -7803,14 +7829,28 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, { struct wmi_ext_resource_config_10_4_cmd *cmd; struct sk_buff *skb; + u32 num_tdls_sleep_sta = 0; skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); + if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map)) + num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA; + cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; cmd->host_platform_config = __cpu_to_le32(type); cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); + cmd->wlan_gpio_priority = __cpu_to_le32(-1); + cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT); + cmd->coex_gpio_pin1 = __cpu_to_le32(-1); + cmd->coex_gpio_pin2 = __cpu_to_le32(-1); + cmd->coex_gpio_pin3 = __cpu_to_le32(-1); + cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS); + cmd->num_tdls_conn_table_entries = __cpu_to_le32(20); + cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta); + cmd->max_tdls_concurrent_buffer_sta = + __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi ext resource config host type %d firmware feature bitmap %08x\n", @@ -7818,6 +7858,124 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, return skb; } +static struct sk_buff * +ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct wmi_10_4_tdls_set_state_cmd *cmd; + struct sk_buff *skb; + u32 options = 0; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) + state = WMI_TDLS_ENABLE_PASSIVE; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + options |= WMI_TDLS_BUFFER_STA_EN; + + cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->state = __cpu_to_le32(state); + cmd->notification_interval_ms = __cpu_to_le32(5000); + cmd->tx_discovery_threshold = __cpu_to_le32(100); + cmd->tx_teardown_threshold = __cpu_to_le32(5); + cmd->rssi_teardown_threshold = __cpu_to_le32(-75); + cmd->rssi_delta = __cpu_to_le32(-20); + cmd->tdls_options = __cpu_to_le32(options); + cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); + cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); + cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); + cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); + cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); + cmd->teardown_notification_ms = __cpu_to_le32(10); + cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n", + state, vdev_id); + return skb; +} + +static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp) +{ + u32 peer_qos = 0; + + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VO; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VI; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BK; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BE; + + peer_qos |= SM(sp, WMI_TDLS_PEER_SP); + + return peer_qos; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan_arg) +{ + struct wmi_10_4_tdls_peer_update_cmd *cmd; + struct wmi_tdls_peer_capabilities *peer_cap; + struct wmi_channel *chan; + struct sk_buff *skb; + u32 peer_qos; + int len, chan_len; + int i; + + /* tdls peer update cmd has place holder for one channel*/ + chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0; + + len = sizeof(*cmd) + chan_len * sizeof(*chan); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + memset(skb->data, 0, sizeof(*cmd)); + + cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); + cmd->peer_state = __cpu_to_le32(arg->peer_state); + + peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues, + cap->peer_max_sp); + + peer_cap = &cmd->peer_capab; + peer_cap->peer_qos = __cpu_to_le32(peer_qos); + peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); + peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); + peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); + peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); + peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); + peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); + + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) + peer_cap->peer_operclass[i] = cap->peer_operclass[i]; + + peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); + peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); + peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); + + for (i = 0; i < cap->peer_chan_len; i++) { + chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i]; + ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tdls peer update vdev %i state %d n_chans %u\n", + arg->vdev_id, arg->peer_state, cap->peer_chan_len); + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) { @@ -8197,6 +8355,8 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, + .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state, + .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index baa38c8f847c..7a3606dde227 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -184,6 +184,17 @@ enum wmi_service { WMI_SERVICE_TX_MODE_PUSH_ONLY, WMI_SERVICE_TX_MODE_PUSH_PULL, WMI_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_BTCOEX, + WMI_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, /* keep last */ WMI_SERVICE_MAX, @@ -310,6 +321,21 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_10_4_SERVICE_BTCOEX, + WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_10_4_SERVICE_TDLS, + WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, }; static inline char *wmi_service_name(int service_id) @@ -408,6 +434,16 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); + SVCSTR(WMI_SERVICE_VDEV_RX_FILTER); + SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION); + SVCSTR(WMI_SERVICE_DBGLOG_WARN2); + SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE); + SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT); + SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT); + SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT); + SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT); + SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); + SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); default: return NULL; } @@ -420,9 +456,20 @@ static inline char *wmi_service_name(int service_id) __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ BIT((svc_id) % (sizeof(u32)))) +/* This extension is required to accommodate new services, current limit + * for wmi_services is 64 as target is using only 4-bits of each 32-bit + * wmi_service word. Extending this to make use of remaining unused bits + * for new services. + */ +#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \ + BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4)) + #define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ __set_bit(y, out); \ } while (0) @@ -663,6 +710,36 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TX_MODE_PUSH_PULL, len); SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC, WMI_SERVICE_TX_MODE_DYNAMIC, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_VDEV_RX_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX, + WMI_SERVICE_BTCOEX, len); + SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_CHECK_CAL_VERSION, len); + SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_DBGLOG_WARN2, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, len); + SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, len); + SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TDLS, + WMI_SERVICE_TDLS, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_SERVICE_TDLS_OFFCHAN, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len); } #undef SVCMAP @@ -837,6 +914,29 @@ struct wmi_cmd_map { u32 pdev_bss_chan_info_request_cmdid; u32 pdev_enable_adaptive_cca_cmdid; u32 ext_resource_cfg_cmdid; + u32 vdev_set_ie_cmdid; + u32 set_lteu_config_cmdid; + u32 atf_ssid_grouping_request_cmdid; + u32 peer_atf_ext_request_cmdid; + u32 set_periodic_channel_stats_cfg_cmdid; + u32 peer_bwf_request_cmdid; + u32 btcoex_cfg_cmdid; + u32 peer_tx_mu_txmit_count_cmdid; + u32 peer_tx_mu_txmit_rstcnt_cmdid; + u32 peer_gid_userpos_list_cmdid; + u32 pdev_check_cal_version_cmdid; + u32 coex_version_cfg_cmid; + u32 pdev_get_rx_filter_cmdid; + u32 pdev_extended_nss_cfg_cmdid; + u32 vdev_set_scan_nac_rssi_cmdid; + u32 prog_gpio_band_select_cmdid; + u32 config_smart_logging_cmdid; + u32 debug_fatal_condition_cmdid; + u32 get_tsf_timer_cmdid; + u32 pdev_get_tpc_table_cmdid; + u32 vdev_sifs_trigger_time_cmdid; + u32 pdev_wds_entry_list_cmdid; + u32 tdls_set_offchan_mode_cmdid; }; /* @@ -1647,6 +1747,29 @@ enum wmi_10_4_cmd_id { WMI_10_4_EXT_RESOURCE_CFG_CMDID, WMI_10_4_VDEV_SET_IE_CMDID, WMI_10_4_SET_LTEU_CONFIG_CMDID, + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_10_4_PEER_BWF_REQUEST_CMDID, + WMI_10_4_BTCOEX_CFG_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + WMI_10_4_COEX_VERSION_CFG_CMID, + WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + WMI_10_4_GET_TSF_TIMER_CMDID, + WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + WMI_10_4_TDLS_SET_STATE_CMDID, + WMI_10_4_TDLS_PEER_UPDATE_CMDID, + WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1710,6 +1833,18 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, WMI_10_4_MU_REPORT_EVENTID, + WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID, + WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID, + WMI_10_4_ATF_PEER_STATS_EVENTID, + WMI_10_4_PDEV_GET_RX_FILTER_EVENTID, + WMI_10_4_NAC_RSSI_EVENTID, + WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID, + WMI_10_4_GET_TSF_TIMER_RESP_EVENTID, + WMI_10_4_PDEV_TPC_TABLE_EVENTID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID, + WMI_10_4_TDLS_PEER_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -2718,6 +2853,18 @@ struct wmi_resource_config_10_4 { __le32 qwrap_config; } __packed; +enum wmi_coex_version { + WMI_NO_COEX_VERSION_SUPPORT = 0, + /* 3 wire coex support*/ + WMI_COEX_VERSION_1 = 1, + /* 2.5 wire coex support*/ + WMI_COEX_VERSION_2 = 2, + /* 2.5 wire coex with duty cycle support */ + WMI_COEX_VERSION_3 = 3, + /* 4 wire coex support*/ + WMI_COEX_VERSION_4 = 4, +}; + /** * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags * @WMI_10_4_LTEU_SUPPORT: LTEU config @@ -2726,6 +2873,14 @@ struct wmi_resource_config_10_4 { * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats * @WMI_10_4_PEER_STATS: Per station stats + * @WMI_10_4_VDEV_STATS: Per vdev stats + * @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable + * @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable + * @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable + * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable + * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host + * enable/disable + * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable */ enum wmi_10_4_feature_mask { WMI_10_4_LTEU_SUPPORT = BIT(0), @@ -2734,6 +2889,14 @@ enum wmi_10_4_feature_mask { WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3), WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4), WMI_10_4_PEER_STATS = BIT(5), + WMI_10_4_VDEV_STATS = BIT(6), + WMI_10_4_TDLS = BIT(7), + WMI_10_4_TDLS_OFFCHAN = BIT(8), + WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9), + WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10), + WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11), + WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12), + }; struct wmi_ext_resource_config_10_4_cmd { @@ -2741,6 +2904,22 @@ struct wmi_ext_resource_config_10_4_cmd { __le32 host_platform_config; /* see enum wmi_10_4_feature_mask */ __le32 fw_feature_bitmap; + /* WLAN priority GPIO number */ + __le32 wlan_gpio_priority; + /* see enum wmi_coex_version */ + __le32 coex_version; + /* COEX GPIO config */ + __le32 coex_gpio_pin1; + __le32 coex_gpio_pin2; + __le32 coex_gpio_pin3; + /* number of vdevs allowed to perform tdls */ + __le32 num_tdls_vdevs; + /* number of peers to track per TDLS vdev */ + __le32 num_tdls_conn_table_entries; + /* number of tdls sleep sta supported */ + __le32 max_tdls_concurrent_sleep_sta; + /* number of tdls buffer sta supported */ + __le32 max_tdls_concurrent_buffer_sta; }; /* strucutre describing host memory chunk. */ @@ -5698,6 +5877,7 @@ struct wmi_tbtt_offset_event { struct wmi_peer_create_cmd { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; + __le32 peer_type; } __packed; enum wmi_peer_type { @@ -6556,6 +6736,22 @@ struct wmi_tdls_peer_update_cmd_arg { #define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32 +#define WMI_TDLS_PEER_SP_MASK 0x60 +#define WMI_TDLS_PEER_SP_LSB 5 + +enum wmi_tdls_options { + WMI_TDLS_OFFCHAN_EN = BIT(0), + WMI_TDLS_BUFFER_STA_EN = BIT(1), + WMI_TDLS_SLEEP_STA_EN = BIT(2), +}; + +enum { + WMI_TDLS_PEER_QOS_AC_VO = BIT(0), + WMI_TDLS_PEER_QOS_AC_VI = BIT(1), + WMI_TDLS_PEER_QOS_AC_BK = BIT(2), + WMI_TDLS_PEER_QOS_AC_BE = BIT(3), +}; + struct wmi_tdls_peer_capab_arg { u8 peer_uapsd_queues; u8 peer_max_sp; @@ -6571,6 +6767,79 @@ struct wmi_tdls_peer_capab_arg { u32 pref_offchan_bw; }; +struct wmi_10_4_tdls_set_state_cmd { + __le32 vdev_id; + __le32 state; + __le32 notification_interval_ms; + __le32 tx_discovery_threshold; + __le32 tx_teardown_threshold; + __le32 rssi_teardown_threshold; + __le32 rssi_delta; + __le32 tdls_options; + __le32 tdls_peer_traffic_ind_window; + __le32 tdls_peer_traffic_response_timeout_ms; + __le32 tdls_puapsd_mask; + __le32 tdls_puapsd_inactivity_time_ms; + __le32 tdls_puapsd_rx_frame_threshold; + __le32 teardown_notification_ms; + __le32 tdls_peer_kickout_threshold; +} __packed; + +struct wmi_tdls_peer_capabilities { + __le32 peer_qos; + __le32 buff_sta_support; + __le32 off_chan_support; + __le32 peer_curr_operclass; + __le32 self_curr_operclass; + __le32 peer_chan_len; + __le32 peer_operclass_len; + u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + __le32 is_peer_responder; + __le32 pref_offchan_num; + __le32 pref_offchan_bw; + struct wmi_channel peer_chan_list[1]; +} __packed; + +struct wmi_10_4_tdls_peer_update_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 peer_state; + __le32 reserved[4]; + struct wmi_tdls_peer_capabilities peer_capab; +} __packed; + +enum wmi_tdls_peer_reason { + WMI_TDLS_TEARDOWN_REASON_TX, + WMI_TDLS_TEARDOWN_REASON_RSSI, + WMI_TDLS_TEARDOWN_REASON_SCAN, + WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE, + WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT, + WMI_TDLS_TEARDOWN_REASON_BAD_PTR, + WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE, + WMI_TDLS_ENTER_BUF_STA, + WMI_TDLS_EXIT_BUF_STA, + WMI_TDLS_ENTER_BT_BUSY_MODE, + WMI_TDLS_EXIT_BT_BUSY_MODE, + WMI_TDLS_SCAN_STARTED_EVENT, + WMI_TDLS_SCAN_COMPLETED_EVENT, +}; + +enum wmi_tdls_peer_notification { + WMI_TDLS_SHOULD_DISCOVER, + WMI_TDLS_SHOULD_TEARDOWN, + WMI_TDLS_PEER_DISCONNECTED, + WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION, +}; + +struct wmi_tdls_peer_event { + struct wmi_mac_addr peer_macaddr; + /* see enum wmi_tdls_peer_notification*/ + __le32 peer_status; + /* see enum wmi_tdls_peer_reason */ + __le32 peer_reason; + __le32 vdev_id; +} __packed; + enum wmi_txbf_conf { WMI_TXBF_CONF_UNSUPPORTED, WMI_TXBF_CONF_BEFORE_ASSOC, diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 77100d42f401..0d46d6dc7578 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -277,6 +277,18 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, return ret ? 1 : 0; } +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + device_set_wakeup_enable(ar->dev, enabled); + } + mutex_unlock(&ar->conf_mutex); +} + int ath10k_wow_op_resume(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; @@ -336,5 +348,7 @@ int ath10k_wow_init(struct ath10k *ar) ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; + device_set_wakeup_capable(ar->dev, true); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index abbb04b6d1bd..9745b9ddc7f5 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -28,6 +28,7 @@ int ath10k_wow_init(struct ath10k *ar); int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int ath10k_wow_op_resume(struct ieee80211_hw *hw); +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); #else diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 9da3594fd010..4defb7a0330f 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1201,7 +1201,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface) #endif /* table of devices that work with this driver */ -static struct usb_device_id ath6kl_usb_ids[] = { +static const struct usb_device_id ath6kl_usb_ids[] = { {USB_DEVICE(0x0cf3, 0x9375)}, {USB_DEVICE(0x0cf3, 0x9374)}, { /* Terminating entry */ }, diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 2e64977a8ab6..01fa30117288 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1452,7 +1452,7 @@ int ath9k_init_debug(struct ath_hw *ah) #endif #ifdef CONFIG_ATH9K_DYNACK - debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_file("ack_to", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_ackto); #endif debugfs_create_file("tpc", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 0d9687a2aa98..c5f4dd808745 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -20,7 +20,7 @@ MODULE_FIRMWARE(HTC_7010_MODULE_FW); MODULE_FIRMWARE(HTC_9271_MODULE_FW); -static struct usb_device_id ath9k_hif_usb_ids[] = { +static const struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index defacc6c9c99..da2164b0cccc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -71,7 +71,7 @@ static void ath9k_htc_op_ps_restore(struct ath_common *common) ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv); } -static struct ath_ps_ops ath9k_htc_ps_ops = { +static const struct ath_ps_ops ath9k_htc_ps_ops = { .wakeup = ath9k_htc_op_ps_wakeup, .restore = ath9k_htc_op_ps_restore, }; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fd9a61834c17..bb7936090b91 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -104,7 +104,7 @@ static void ath9k_op_ps_restore(struct ath_common *common) ath9k_ps_restore((struct ath_softc *) common->priv); } -static struct ath_ps_ops ath9k_ps_ops = { +static const struct ath_ps_ops ath9k_ps_ops = { .wakeup = ath9k_op_ps_wakeup, .restore = ath9k_op_ps_restore, }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7b7627f85d3a..223606311261 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -388,6 +388,11 @@ static const struct pci_device_id ath_pci_id_table[] = { PCI_VENDOR_ID_DELL, 0x020B), .driver_data = ATH9K_PCI_WOW }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0034, + PCI_VENDOR_ID_DELL, + 0x0300), + .driver_data = ATH9K_PCI_WOW }, /* Killer Wireless (2x2) */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 99ab20334d21..e7c3f3b8457d 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -64,7 +64,7 @@ MODULE_ALIAS("arusb_lnx"); * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ), * whenever you add a new device. */ -static struct usb_device_id carl9170_usb_ids[] = { +static const struct usb_device_id carl9170_usb_ids[] = { /* Atheros 9170 */ { USB_DEVICE(0x0cf3, 0x9170) }, /* Atheros TG121N */ diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 87dfdaf9044c..d5c810a8cc52 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -289,6 +289,11 @@ static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) skb_tail_pointer(skb), WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dxe->dst_addr_l)) { + dev_err(dev, "unable to map skb\n"); + kfree_skb(skb); + return -ENOMEM; + } ctl->skb = skb; return 0; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 517a315e259b..35bd50bcbbd5 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -372,6 +372,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); + mutex_lock(&wcn->conf_mutex); + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { int ch = WCN36XX_HW_CHANNEL(wcn); wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", @@ -382,6 +384,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) } } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -396,6 +400,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n"); + mutex_lock(&wcn->conf_mutex); + *total &= FIF_ALLMULTI; fp = (void *)(unsigned long)multicast; @@ -408,6 +414,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc) wcn36xx_smd_set_mc_list(wcn, vif, fp); } + + mutex_unlock(&wcn->conf_mutex); kfree(fp); } @@ -471,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_conf->key, key_conf->keylen); + mutex_lock(&wcn->conf_mutex); + switch (key_conf->cipher) { case WLAN_CIPHER_SUITE_WEP40: vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; @@ -565,6 +575,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } out: + mutex_unlock(&wcn->conf_mutex); + return ret; } @@ -725,6 +737,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n", vif, changed); + mutex_lock(&wcn->conf_mutex); + if (changed & BSS_CHANGED_BEACON_INFO) { wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed dtim period %d\n", @@ -787,7 +801,13 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, bss_conf->aid); vif_priv->sta_assoc = true; - rcu_read_lock(); + + /* + * Holding conf_mutex ensures mutal exclusion with + * wcn36xx_sta_remove() and as such ensures that sta + * won't be freed while we're operating on it. As such + * we do not need to hold the rcu_read_lock(). + */ sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { wcn36xx_err("sta %pM is not found\n", @@ -811,7 +831,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, * place where AID is available. */ wcn36xx_smd_config_sta(wcn, vif, sta); - rcu_read_unlock(); } else { wcn36xx_dbg(WCN36XX_DBG_MAC, "disassociated bss %pM vif %pM AID=%d\n", @@ -873,6 +892,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } } out: + + mutex_unlock(&wcn->conf_mutex); + return; } @@ -882,7 +904,10 @@ static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) struct wcn36xx *wcn = hw->priv; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value); + mutex_lock(&wcn->conf_mutex); wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -893,8 +918,12 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw, struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif); + mutex_lock(&wcn->conf_mutex); + list_del(&vif_priv->list); wcn36xx_smd_delete_sta_self(wcn, vif->addr); + + mutex_unlock(&wcn->conf_mutex); } static int wcn36xx_add_interface(struct ieee80211_hw *hw, @@ -915,9 +944,13 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, return -EOPNOTSUPP; } + mutex_lock(&wcn->conf_mutex); + list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -930,6 +963,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", vif, sta->addr); + mutex_lock(&wcn->conf_mutex); + spin_lock_init(&sta_priv->ampdu_lock); sta_priv->vif = vif_priv; /* @@ -941,6 +976,9 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sta_priv->aid = sta->aid; wcn36xx_smd_config_sta(wcn, vif, sta); } + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -954,8 +992,13 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", vif, sta->addr, sta_priv->sta_index); + mutex_lock(&wcn->conf_mutex); + wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); sta_priv->vif = NULL; + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -999,6 +1042,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); + mutex_lock(&wcn->conf_mutex); + switch (action) { case IEEE80211_AMPDU_RX_START: sta_priv->tid = tid; @@ -1038,6 +1083,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_err("Unknown AMPDU action\n"); } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -1216,6 +1263,7 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; + mutex_init(&wcn->conf_mutex); mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index b52b4da9a967..6aefba4c0cda 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -202,6 +202,9 @@ struct wcn36xx { struct qcom_smem_state *tx_rings_empty_state; unsigned tx_rings_empty_state_bit; + /* prevents concurrent FW reconfiguration */ + struct mutex conf_mutex; + /* * smd_buf must be protected with smd_mutex to garantee * that all messages are sent one after another diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 6dfedc8bd6a3..b448926b0c0f 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -40,3 +40,15 @@ config WIL6210_TRACING option if you are interested in debugging the driver. If unsure, say Y to make it easier to debug problems. + +config WIL6210_DEBUGFS + bool "wil6210 debugfs support" + depends on WIL6210 + depends on DEBUG_FS + default y + ---help--- + Say Y here to enable wil6210 debugfs support, using the + kernel debugfs infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say Y to make it easier to debug problems. diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 4ae21da78e9e..d27efe83748b 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -4,7 +4,7 @@ wil6210-y := main.o wil6210-y += netdev.o wil6210-y += cfg80211.o wil6210-y += pcie_bus.o -wil6210-y += debugfs.o +wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o wil6210-y += wmi.o wil6210-y += interrupt.o wil6210-y += txrx.o diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 0b5383a62d42..85d5c04618eb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -26,6 +26,12 @@ bool disable_ap_sme; module_param(disable_ap_sme, bool, 0444); MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); +#ifdef CONFIG_PM +static struct wiphy_wowlan_support wil_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + #define CHAN60G(_channel, _flags) { \ .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -273,12 +279,12 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" - " BF status 0x%08x SNR 0x%08x SQI %d%%\n" + " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, - le32_to_cpu(reply.evt.snr_val), + reply.evt.rssi, reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), @@ -311,7 +317,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, if (test_bit(wil_status_fwconnected, wil->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); - sinfo->signal = reply.evt.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, + wil->fw_capabilities)) + sinfo->signal = reply.evt.rssi; + else + sinfo->signal = reply.evt.sqi; } return rc; @@ -372,6 +382,34 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return rc; } +static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "start_p2p_device: entered\n"); + wil->p2p.p2p_dev_started = 1; + return 0; +} + +static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_p2p_info *p2p = &wil->p2p; + + if (!p2p->p2p_dev_started) + return; + + wil_dbg_misc(wil, "stop_p2p_device: entered\n"); + mutex_lock(&wil->mutex); + mutex_lock(&wil->p2p_wdev_mutex); + wil_p2p_stop_radio_operations(wil); + p2p->p2p_dev_started = 0; + mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->mutex); +} + static struct wireless_dev * wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, @@ -420,6 +458,7 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy, return -EINVAL; } + wil_cfg80211_stop_p2p_device(wiphy, wdev); wil_p2p_wdev_free(wil); return 0; @@ -801,7 +840,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, wil->bss = bss; /* Connect can take lots of time */ mod_timer(&wil->connect_timer, - jiffies + msecs_to_jiffies(2000)); + jiffies + msecs_to_jiffies(5000)); } else { clear_bit(wil_status_fwconnecting, wil->status); } @@ -884,6 +923,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; @@ -1648,34 +1690,6 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, return 0; } -static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, - struct wireless_dev *wdev) -{ - struct wil6210_priv *wil = wiphy_to_wil(wiphy); - - wil_dbg_misc(wil, "start_p2p_device: entered\n"); - wil->p2p.p2p_dev_started = 1; - return 0; -} - -static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, - struct wireless_dev *wdev) -{ - struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wil_p2p_info *p2p = &wil->p2p; - - if (!p2p->p2p_dev_started) - return; - - wil_dbg_misc(wil, "stop_p2p_device: entered\n"); - mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); - wil_p2p_stop_radio_operations(wil); - p2p->p2p_dev_started = 0; - mutex_unlock(&wil->p2p_wdev_mutex); - mutex_unlock(&wil->mutex); -} - static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) @@ -1791,7 +1805,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz; - /* TODO: figure this out */ + /* may change after reading FW capabilities */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; @@ -1801,6 +1815,10 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands); wiphy->vendor_commands = wil_nl80211_vendor_commands; + +#ifdef CONFIG_PM + wiphy->wowlan = &wil_wowlan_support; +#endif } struct wireless_dev *wil_cfg80211_init(struct device *dev) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index f82506d276d3..6db00c167d2e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -20,7 +20,6 @@ #include #include #include - #include "wil6210.h" #include "wmi.h" #include "txrx.h" @@ -30,7 +29,6 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ -u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */ enum dbg_off_type { doff_u32 = 0, @@ -801,6 +799,9 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, int rc; void *frame; + if (!len) + return -EINVAL; + frame = memdup_user(buf, len); if (IS_ERR(frame)) return PTR_ERR(frame); @@ -1013,6 +1014,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) " TSF = 0x%016llx\n" " TxMCS = %2d TxTpt = %4d\n" " SQI = %4d\n" + " RSSI = %4d\n" " Status = 0x%08x %s\n" " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" " Goodput(rx:tx) %4d:%4d\n" @@ -1022,6 +1024,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) le16_to_cpu(reply.evt.bf_mcs), le32_to_cpu(reply.evt.tx_tpt), reply.evt.sqi, + reply.evt.rssi, status, wil_bfstatus_str(status), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), @@ -1612,6 +1615,8 @@ static ssize_t wil_write_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->suspend_stats.collection_start = ktime_get(); return len; } @@ -1623,18 +1628,27 @@ static ssize_t wil_read_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; static char text[400]; int n; + unsigned long long stats_collection_time = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.collection_start)); n = snprintf(text, sizeof(text), "Suspend statistics:\n" "successful suspends:%ld failed suspends:%ld\n" "successful resumes:%ld failed resumes:%ld\n" - "rejected by host:%ld rejected by device:%ld\n", + "rejected by host:%ld rejected by device:%ld\n" + "total suspend time:%lld min suspend time:%lld\n" + "max suspend time:%lld stats collection time: %lld\n", wil->suspend_stats.successful_suspends, wil->suspend_stats.failed_suspends, wil->suspend_stats.successful_resumes, wil->suspend_stats.failed_resumes, wil->suspend_stats.rejected_by_host, - wil->suspend_stats.rejected_by_device); + wil->suspend_stats.rejected_by_device, + wil->suspend_stats.total_suspend_time, + wil->suspend_stats.min_suspend_time, + wil->suspend_stats.max_suspend_time, + stats_collection_time); n = min_t(int, n, sizeof(text)); @@ -1747,6 +1761,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(chip_revision, 0444, doff_u8), WIL_FIELD(abft_len, 0644, doff_u8), WIL_FIELD(wakeup_trigger, 0644, doff_u8), + WIL_FIELD(vring_idle_trsh, 0644, doff_u32), {}, }; @@ -1762,8 +1777,6 @@ static const struct dbg_off dbg_statics[] = { {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh, - doff_u32}, {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; @@ -1790,6 +1803,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil->suspend_stats.collection_start = ktime_get(); + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index cad8a95c4e4e..59def4f3fcf3 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -244,7 +244,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: RX\n"); + wil_err_ratelimited(wil, "spurious IRQ: RX\n"); return IRQ_NONE; } @@ -269,11 +269,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_rx); } else { - wil_err(wil, + wil_err_ratelimited( + wil, "Got Rx interrupt while stopping interface\n"); } } else { - wil_err(wil, "Got Rx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n"); } } @@ -302,7 +303,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: TX\n"); + wil_err_ratelimited(wil, "spurious IRQ: TX\n"); return IRQ_NONE; } @@ -318,12 +319,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_tx); } else { - wil_err(wil, "Got Tx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n"); } } if (unlikely(isr)) - wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); + wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n", + isr); /* Tx IRQ will be enabled when NAPI processing finished */ diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index daf944a71901..bac829aa950d 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -394,10 +394,11 @@ static void wil_fw_error_worker(struct work_struct *work) struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "fw error worker\n"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { wil_info(wil, "No recovery - interface is down\n"); return; } @@ -578,6 +579,9 @@ int wil_priv_init(struct wil6210_priv *wil) wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | WMI_WAKEUP_TRIGGER_BCAST; + memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->vring_idle_trsh = 16; return 0; @@ -926,6 +930,29 @@ int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) return rc; } +static void wil_pre_fw_config(struct wil6210_priv *wil) +{ + /* Mark FW as loaded from host */ + wil_s(wil, RGF_USER_USAGE_6, 1); + + /* clear any interrupts which on-card-firmware + * may have set + */ + wil6210_clear_irq(wil); + /* CAF_ICR - clear and mask */ + /* it is W1C, clear by writing back same value */ + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */ + wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0); + + if (wil->fw_calib_result > 0) { + __le32 val = cpu_to_le32(wil->fw_calib_result | + (CALIB_RESULT_SIGNATURE << 8)); + wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val); + } +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -1019,18 +1046,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) return rc; - /* Mark FW as loaded from host */ - wil_s(wil, RGF_USER_USAGE_6, 1); - - /* clear any interrupts which on-card-firmware - * may have set - */ - wil6210_clear_irq(wil); - /* CAF_ICR - clear and mask */ - /* it is W1C, clear by writing back same value */ - wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); - + wil_pre_fw_config(wil); wil_release_cpu(wil); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d571feb2370e..6a3ab4bf916d 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -84,6 +84,9 @@ void wil_set_capabilities(struct wil6210_priv *wil) /* extract FW capabilities from file without loading the FW */ wil_request_firmware(wil, wil->wil_fw_name, false); + + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM; } void wil_disable_irq(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index ce1f384e7f8e..8f5d1b447aaa 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -21,10 +21,11 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { /* can always sleep when down */ wil_dbg_pm(wil, "Interface is down\n"); goto out; @@ -85,7 +86,9 @@ static int wil_resume_keep_radio_on(struct wil6210_priv *wil) /* Send WMI resume request to the device */ rc = wmi_resume(wil); if (rc) { - wil_err(wil, "device failed to resume (%d), resetting\n", rc); + wil_err(wil, "device failed to resume (%d)\n", rc); + if (no_fw_recovery) + goto out; rc = wil_down(wil); if (rc) { wil_err(wil, "wil_down failed (%d)\n", rc); @@ -298,6 +301,9 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); + if (!rc) + wil->suspend_stats.suspend_start_time = ktime_get(); + return rc; } @@ -307,6 +313,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) struct net_device *ndev = wil_to_ndev(wil); bool keep_radio_on = ndev->flags & IFF_UP && wil->keep_radio_on_during_sleep; + unsigned long long suspend_time_usec = 0; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); @@ -324,8 +331,20 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) else rc = wil_resume_radio_off(wil); + if (rc) + goto out; + + suspend_time_usec = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.suspend_start_time)); + wil->suspend_stats.total_suspend_time += suspend_time_usec; + if (suspend_time_usec < wil->suspend_stats.min_suspend_time) + wil->suspend_stats.min_suspend_time = suspend_time_usec; + if (suspend_time_usec > wil->suspend_stats.max_suspend_time) + wil->suspend_stats.max_suspend_time = suspend_time_usec; + out: - wil_dbg_pm(wil, "resume: %s => %d\n", - is_runtime ? "runtime" : "system", rc); + wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n", + is_runtime ? "runtime" : "system", rc, suspend_time_usec); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index ec57bcce9601..389c718cd257 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1666,7 +1666,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + descs_used)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -1813,7 +1813,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -2175,7 +2175,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* performance monitoring */ used_new = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used_new, used_before_complete)) { wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", ringid, used_before_complete, used_new); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index d085ccfc7228..315ec8b59662 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -30,7 +30,6 @@ extern bool no_fw_recovery; extern unsigned int mtu_max; extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; -extern u32 vring_idle_trsh; extern bool rx_align_2; extern bool rx_large_buf; extern bool debug_fw; @@ -90,6 +89,11 @@ struct wil_suspend_stats { unsigned long failed_resumes; unsigned long rejected_by_device; unsigned long rejected_by_host; + unsigned long long total_suspend_time; + unsigned long long min_suspend_time; + unsigned long long max_suspend_time; + ktime_t collection_start; + ktime_t suspend_start_time; }; /* Calculate MAC buffer size for the firmware. It includes all overhead, @@ -166,6 +170,10 @@ struct RGF_ICR { #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_BL (0x880A3C) /* Boot Loader */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ +#define RGF_USER_FW_CALIB_RESULT (0x880a90) /* b0-7:result + * b8-15:signature + */ + #define CALIB_RESULT_SIGNATURE (0x11) #define RGF_USER_CLKS_CTL_0 (0x880abc) #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */ @@ -260,6 +268,7 @@ struct RGF_ICR { #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2) #define RGF_HP_CTRL (0x88265c) +#define RGF_PAL_UNIT_ICR (0x88266c) /* struct RGF_ICR */ #define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4) /* MAC timer, usec, for packet lifetime */ @@ -684,6 +693,7 @@ struct wil6210_priv { u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; + u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ /* scan */ struct cfg80211_scan_request *scan_request; @@ -719,6 +729,8 @@ struct wil6210_priv { enum wmi_ps_profile_type ps_profile; + int fw_calib_result; + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; @@ -929,8 +941,14 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); +#if defined(CONFIG_WIL6210_DEBUGFS) int wil6210_debugfs_init(struct wil6210_priv *wil); void wil6210_debugfs_remove(struct wil6210_priv *wil); +#else +static inline int wil6210_debugfs_init(struct wil6210_priv *wil) { return 0; } +static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {} +#endif + int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct station_info *sinfo); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 65ef67321fc0..ffdd2fa401b1 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -344,6 +344,11 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) strlcpy(wdev->wiphy->fw_version, wil->fw_version, sizeof(wdev->wiphy->fw_version)); + if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) { + wil_dbg_wmi(wil, "rfc calibration result %d\n", + evt->rfc_read_calib_result); + wil->fw_calib_result = evt->rfc_read_calib_result; + } wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, wil->status); /* let the reset sequence continue */ @@ -381,12 +386,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) ch_no = data->info.channel + 1; freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ); channel = ieee80211_get_channel(wiphy, freq); - signal = data->info.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + signal = 100 * data->info.rssi; + else + signal = data->info.sqi; d_status = le16_to_cpu(data->info.status); fc = rx_mgmt_frame->frame_control; - wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", - data->info.channel, data->info.mcs, data->info.snr, + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n", + data->info.channel, data->info.mcs, data->info.rssi, data->info.sqi); wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, le16_to_cpu(fc)); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 256f63c57da0..5263ee717a4f 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -36,6 +36,11 @@ #define WMI_PROX_RANGE_NUM (3) #define WMI_MAX_LOSS_DMG_BEACONS (20) #define MAX_NUM_OF_SECTORS (128) +#define WMI_SCHED_MAX_ALLOCS_PER_CMD (4) +#define WMI_RF_DTYPE_LENGTH (3) +#define WMI_RF_ETYPE_LENGTH (3) +#define WMI_RF_RX2TX_LENGTH (3) +#define WMI_RF_ETYPE_VAL_PER_RANGE (5) /* Mailbox interface * used for commands and events @@ -52,14 +57,20 @@ enum wmi_mid { * the host */ enum wmi_fw_capability { - WMI_FW_CAPABILITY_FTM = 0, - WMI_FW_CAPABILITY_PS_CONFIG = 1, - WMI_FW_CAPABILITY_RF_SECTORS = 2, - WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, - WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, - WMI_FW_CAPABILITY_WMI_ONLY = 5, - WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, - WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_FTM = 0, + WMI_FW_CAPABILITY_PS_CONFIG = 1, + WMI_FW_CAPABILITY_RF_SECTORS = 2, + WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, + WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_WMI_ONLY = 5, + WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, + WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_LONG_RANGE = 9, + WMI_FW_CAPABILITY_FIXED_SCHEDULING = 10, + WMI_FW_CAPABILITY_MULTI_DIRECTED_OMNIS = 11, + WMI_FW_CAPABILITY_RSSI_REPORTING = 12, + WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13, + WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_MAX, }; @@ -79,6 +90,7 @@ enum wmi_command_id { WMI_START_SCAN_CMDID = 0x07, WMI_SET_BSS_FILTER_CMDID = 0x09, WMI_SET_PROBED_SSID_CMDID = 0x0A, + /* deprecated */ WMI_SET_LISTEN_INT_CMDID = 0x0B, WMI_BCON_CTRL_CMDID = 0x0F, WMI_ADD_CIPHER_KEY_CMDID = 0x16, @@ -93,26 +105,28 @@ enum wmi_command_id { WMI_ECHO_CMDID = 0x803, WMI_DEEP_ECHO_CMDID = 0x804, WMI_CONFIG_MAC_CMDID = 0x805, + /* deprecated */ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806, WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808, WMI_PHY_GET_STATISTICS_CMDID = 0x809, + /* deprecated */ WMI_FS_TUNE_CMDID = 0x80A, + /* deprecated */ WMI_CORR_MEASURE_CMDID = 0x80B, WMI_READ_RSSI_CMDID = 0x80C, WMI_TEMP_SENSE_CMDID = 0x80E, WMI_DC_CALIB_CMDID = 0x80F, + /* deprecated */ WMI_SEND_TONE_CMDID = 0x810, + /* deprecated */ WMI_IQ_TX_CALIB_CMDID = 0x811, + /* deprecated */ WMI_IQ_RX_CALIB_CMDID = 0x812, - WMI_SET_UCODE_IDLE_CMDID = 0x813, WMI_SET_WORK_MODE_CMDID = 0x815, WMI_LO_LEAKAGE_CALIB_CMDID = 0x816, - WMI_MARLON_R_READ_CMDID = 0x818, - WMI_MARLON_R_WRITE_CMDID = 0x819, - WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A, - MAC_IO_STATIC_PARAMS_CMDID = 0x81B, - MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C, + WMI_LO_POWER_CALIB_FROM_OTP_CMDID = 0x817, WMI_SILENT_RSSI_CALIB_CMDID = 0x81D, + /* deprecated */ WMI_RF_RX_TEST_CMDID = 0x81E, WMI_CFG_RX_CHAIN_CMDID = 0x820, WMI_VRING_CFG_CMDID = 0x821, @@ -126,11 +140,6 @@ enum wmi_command_id { WMI_SET_PCP_CHANNEL_CMDID = 0x829, WMI_GET_PCP_CHANNEL_CMDID = 0x82A, WMI_SW_TX_REQ_CMDID = 0x82B, - WMI_READ_MAC_RXQ_CMDID = 0x830, - WMI_READ_MAC_TXQ_CMDID = 0x831, - WMI_WRITE_MAC_RXQ_CMDID = 0x832, - WMI_WRITE_MAC_TXQ_CMDID = 0x833, - WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834, WMI_MLME_PUSH_CMDID = 0x835, WMI_BEAMFORMING_MGMT_CMDID = 0x836, WMI_BF_TXSS_MGMT_CMDID = 0x837, @@ -144,9 +153,13 @@ enum wmi_command_id { WMI_MAINTAIN_RESUME_CMDID = 0x851, WMI_RS_MGMT_CMDID = 0x852, WMI_RF_MGMT_CMDID = 0x853, - WMI_OTP_READ_CMDID = 0x856, - WMI_OTP_WRITE_CMDID = 0x857, + WMI_RF_XPM_READ_CMDID = 0x856, + WMI_RF_XPM_WRITE_CMDID = 0x857, WMI_LED_CFG_CMDID = 0x858, + WMI_SET_CONNECT_SNR_THR_CMDID = 0x85B, + WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, + WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, /* Performance monitoring commands */ WMI_BF_CTRL_CMDID = 0x862, WMI_NOTIFY_REQ_CMDID = 0x863, @@ -154,7 +167,6 @@ enum wmi_command_id { WMI_GET_RF_STATUS_CMDID = 0x866, WMI_GET_BASEBAND_TYPE_CMDID = 0x867, WMI_UNIT_TEST_CMDID = 0x900, - WMI_HICCUP_CMDID = 0x901, WMI_FLASH_READ_CMDID = 0x902, WMI_FLASH_WRITE_CMDID = 0x903, /* Power management */ @@ -174,16 +186,6 @@ enum wmi_command_id { WMI_GET_PCP_FACTOR_CMDID = 0x91B, /* Power Save Configuration Commands */ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C, - /* Not supported yet */ - WMI_PS_DEV_CFG_CMDID = 0x91D, - /* Not supported yet */ - WMI_PS_DEV_CFG_READ_CMDID = 0x91E, - /* Per MAC Power Save Configuration commands - * Not supported yet - */ - WMI_PS_MID_CFG_CMDID = 0x91F, - /* Not supported yet */ - WMI_PS_MID_CFG_READ_CMDID = 0x920, WMI_RS_CFG_CMDID = 0x921, WMI_GET_DETAILED_RS_RES_CMDID = 0x922, WMI_AOA_MEAS_CMDID = 0x923, @@ -194,13 +196,16 @@ enum wmi_command_id { WMI_DEL_STA_CMDID = 0x936, WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940, WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, + /* Read Power Save profile type */ + WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, WMI_TOF_SET_LCI_CMDID = 0x994, - WMI_TOF_CHANNEL_INFO_CMDID = 0x995, + WMI_TOF_CFG_RESPONDER_CMDID = 0x996, WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997, WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998, + WMI_TOF_CHANNEL_INFO_CMDID = 0x999, WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0, WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2, @@ -209,12 +214,20 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_SCHEDULING_SCHEME_CMDID = 0xA01, + WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, + WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, + WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, + /* deprecated */ WMI_GET_PMK_CMDID = 0xF048, WMI_SET_PASSPHRASE_CMDID = 0xF049, + /* deprecated */ WMI_SEND_ASSOC_RES_CMDID = 0xF04A, + /* deprecated */ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B, WMI_MAC_ADDR_REQ_CMDID = 0xF04D, WMI_FW_VER_CMDID = 0xF04E, @@ -440,11 +453,6 @@ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; -/* WMI_RF_RX_TEST_CMDID */ -struct wmi_rf_rx_test_cmd { - __le32 sector; -} __packed; - /* WMI_CORR_MEASURE_CMDID */ struct wmi_corr_measure_cmd { __le32 freq_mhz; @@ -657,6 +665,20 @@ struct wmi_bcast_vring_cfg_cmd { struct wmi_bcast_vring_cfg vring_cfg; } __packed; +/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */ +struct wmi_lo_power_calib_from_otp_cmd { + /* index to read from OTP. zero based */ + u8 index; + u8 reserved[3]; +} __packed; + +/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */ +struct wmi_lo_power_calib_from_otp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_VRING_BA_EN_CMDID */ struct wmi_vring_ba_en_cmd { u8 ringid; @@ -692,6 +714,24 @@ enum wmi_sniffer_cfg_mode { WMI_SNIFFER_ON = 0x01, }; +/* WMI_SILENT_RSSI_TABLE */ +enum wmi_silent_rssi_table { + RF_TEMPERATURE_CALIB_DEFAULT_DB = 0x00, + RF_TEMPERATURE_CALIB_HIGH_POWER_DB = 0x01, +}; + +/* WMI_SILENT_RSSI_STATUS */ +enum wmi_silent_rssi_status { + SILENT_RSSI_SUCCESS = 0x00, + SILENT_RSSI_FAILURE = 0x01, +}; + +/* WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID */ +struct wmi_set_active_silent_rssi_table_cmd { + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + enum wmi_sniffer_cfg_phy_info_mode { WMI_SNIFFER_PHY_INFO_DISABLED = 0x00, WMI_SNIFFER_PHY_INFO_ENABLED = 0x01, @@ -835,18 +875,85 @@ struct wmi_echo_cmd { __le32 value; } __packed; -/* WMI_OTP_READ_CMDID */ -struct wmi_otp_read_cmd { - __le32 addr; - __le32 size; - __le32 values; +/* WMI_RF_PWR_ON_DELAY_CMDID + * set FW time parameters used through RF resetting + * RF reset consists of bringing its power down for a period of time, then + * bringing the power up + * Returned event: WMI_RF_PWR_ON_DELAY_RSP_EVENTID + */ +struct wmi_rf_pwr_on_delay_cmd { + /* time in usec the FW waits after bringing the RF PWR down, + * set 0 for default + */ + __le16 down_delay_usec; + /* time in usec the FW waits after bringing the RF PWR up, + * set 0 for default + */ + __le16 up_delay_usec; } __packed; -/* WMI_OTP_WRITE_CMDID */ -struct wmi_otp_write_cmd { - __le32 addr; - __le32 size; - __le32 values; +/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID + * This API controls the Tx and Rx gain over temperature. + * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. + * It also controls the Tx gain index, by controlling the Rx to Tx gain index + * offset. + * The control is divided by 3 temperature values to 4 temperature ranges. + * Each parameter uses its own temperature values. + * Returned event: WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID + */ +struct wmi_set_high_power_table_params_cmd { + /* Temperature range for Tx D-type parameters */ + u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved0; + /* Tx D-type values to be used for each temperature range */ + __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx D-type parameters */ + u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved1; + /* Rx D-type values to be used for each temperature range */ + __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx E-type parameters */ + u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved2; + /* Rx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + /* Temperature range for rx_2_tx_offs parameters */ + u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; + u8 reserved3; + /* Rx to Tx gain index offset */ + s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; +} __packed; + +/* CMD: WMI_RF_XPM_READ_CMDID */ +struct wmi_rf_xpm_read_cmd { + u8 rf_id; + u8 reserved[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; +} __packed; + +/* CMD: WMI_RF_XPM_WRITE_CMDID */ +struct wmi_rf_xpm_write_cmd { + u8 rf_id; + u8 reserved0[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; + /* boolean flag indicating whether FW should verify the write + * operation + */ + u8 verify; + u8 reserved1[3]; + /* actual size=num_bytes */ + u8 data_bytes[0]; } __packed; /* WMI_TEMP_SENSE_CMDID @@ -989,19 +1096,26 @@ struct wmi_ftm_dest_info { */ __le16 burst_period; u8 dst_mac[WMI_MAC_LEN]; - __le16 reserved; + u8 reserved; + u8 num_burst_per_aoa_meas; } __packed; /* WMI_TOF_SESSION_START_CMDID */ struct wmi_tof_session_start_cmd { __le32 session_id; - u8 num_of_aoa_measures; + u8 reserved1; u8 aoa_type; __le16 num_of_dest; u8 reserved[4]; struct wmi_ftm_dest_info ftm_dest_info[0]; } __packed; +/* WMI_TOF_CFG_RESPONDER_CMDID */ +struct wmi_tof_cfg_responder_cmd { + u8 enable; + u8 reserved[3]; +} __packed; + enum wmi_tof_channel_info_report_type { WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1, WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2, @@ -1022,7 +1136,99 @@ struct wmi_tof_set_tx_rx_offset_cmd { __le32 tx_offset; /* RX delay offset */ __le32 rx_offset; - __le32 reserved[2]; + /* Mask to define which RFs to configure. 0 means all RFs */ + __le32 rf_mask; + /* Offset to strongest tap of CIR */ + __le32 precursor; +} __packed; + +/* WMI_TOF_GET_TX_RX_OFFSET_CMDID */ +struct wmi_tof_get_tx_rx_offset_cmd { + /* rf index to read offsets from */ + u8 rf_index; + u8 reserved[3]; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_map_mcs_to_schd_params { + u8 mcs; + /* time in usec from start slot to start tx flow - default 15 */ + u8 time_in_usec_before_initiate_tx; + /* RD enable - if yes consider RD according to STA mcs */ + u8 rd_enabled; + u8 reserved; + /* time in usec from start slot to stop vring */ + __le16 time_in_usec_to_stop_vring; + /* timeout to force flush from start of slot */ + __le16 flush_to_in_usec; + /* per mcs the mac buffer limit size in bytes */ + __le32 mac_buff_size_in_bytes; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID */ +struct wmi_fixed_scheduling_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +#define WMI_NUM_MCS (13) + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_fixed_scheduling_config_cmd { + /* defaults in the SAS table */ + struct wmi_map_mcs_to_schd_params mcs_to_schd_params_map[WMI_NUM_MCS]; + /* default 150 uSec */ + __le16 max_sta_rd_ppdu_duration_in_usec; + /* default 300 uSec */ + __le16 max_sta_grant_ppdu_duration_in_usec; + /* default 1000 uSec */ + __le16 assoc_slot_duration_in_usec; + /* default 360 uSec */ + __le16 virtual_slot_duration_in_usec; + /* each this field value slots start with grant frame to the station + * - default 2 + */ + u8 number_of_ap_slots_for_initiate_grant; + u8 reserved[3]; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_CMDID */ +struct wmi_enable_fixed_scheduling_cmd { + __le32 reserved; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID */ +struct wmi_enable_fixed_scheduling_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID */ +struct wmi_set_multi_directed_omnis_config_cmd { + /* number of directed omnis at destination AP */ + u8 dest_ap_num_directed_omnis; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID */ +struct wmi_set_multi_directed_omnis_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_CMDID */ +struct wmi_set_long_range_config_cmd { + __le32 reserved; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID */ +struct wmi_set_long_range_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; } __packed; /* WMI Events @@ -1038,19 +1244,22 @@ enum wmi_event_id { WMI_FW_READY_EVENTID = 0x1801, WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, WMI_ECHO_RSP_EVENTID = 0x1803, + /* deprecated */ WMI_FS_TUNE_DONE_EVENTID = 0x180A, + /* deprecated */ WMI_CORR_MEASURE_EVENTID = 0x180B, WMI_READ_RSSI_EVENTID = 0x180C, WMI_TEMP_SENSE_DONE_EVENTID = 0x180E, WMI_DC_CALIB_DONE_EVENTID = 0x180F, + /* deprecated */ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, + /* deprecated */ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, - WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, - WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, - WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A, + WMI_LO_POWER_CALIB_FROM_OTP_EVENTID = 0x1817, WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D, + /* deprecated */ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E, WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, WMI_VRING_CFG_DONE_EVENTID = 0x1821, @@ -1061,11 +1270,6 @@ enum wmi_event_id { WMI_GET_SSID_EVENTID = 0x1828, WMI_GET_PCP_CHANNEL_EVENTID = 0x182A, WMI_SW_TX_COMPLETE_EVENTID = 0x182B, - WMI_READ_MAC_RXQ_EVENTID = 0x1830, - WMI_READ_MAC_TXQ_EVENTID = 0x1831, - WMI_WRITE_MAC_RXQ_EVENTID = 0x1832, - WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, - WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, @@ -1076,8 +1280,12 @@ enum wmi_event_id { WMI_TX_MGMT_PACKET_EVENTID = 0x1841, WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842, WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843, - WMI_OTP_READ_RESULT_EVENTID = 0x1856, + WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856, + WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857, WMI_LED_CFG_DONE_EVENTID = 0x1858, + WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, + WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, @@ -1106,14 +1314,6 @@ enum wmi_event_id { WMI_PCP_FACTOR_EVENTID = 0x191A, /* Power Save Configuration Events */ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C, - /* Not supported yet */ - WMI_PS_DEV_CFG_EVENTID = 0x191D, - /* Not supported yet */ - WMI_PS_DEV_CFG_READ_EVENTID = 0x191E, - /* Not supported yet */ - WMI_PS_MID_CFG_EVENTID = 0x191F, - /* Not supported yet */ - WMI_PS_MID_CFG_READ_EVENTID = 0x1920, WMI_RS_CFG_DONE_EVENTID = 0x1921, WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, WMI_AOA_MEAS_EVENTID = 0x1923, @@ -1122,14 +1322,17 @@ enum wmi_event_id { WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940, WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, + /* return the Power Save profile */ + WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, WMI_TOF_SET_LCI_EVENTID = 0x1994, WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995, - WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996, + WMI_TOF_CFG_RESPONDER_EVENTID = 0x1996, WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997, WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998, + WMI_TOF_CHANNEL_INFO_EVENTID = 0x1999, WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0, WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1, WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2, @@ -1138,12 +1341,18 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, + WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, + WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, + WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; /* Events data structures */ @@ -1200,7 +1409,7 @@ struct wmi_fw_ver_event { __le32 bl_minor; __le32 bl_subminor; __le32 bl_build; - /* The number of entries in the FW capabilies array */ + /* The number of entries in the FW capabilities array */ u8 fw_capabilities_len; u8 reserved[3]; /* FW capabilities info @@ -1245,7 +1454,9 @@ struct wmi_get_rf_status_event { __le32 board_file_platform_type; /* board file version */ __le32 board_file_version; - __le32 reserved[2]; + /* enabled XIFs bit vector */ + __le32 enabled_xif_vector; + __le32 reserved; } __packed; /* WMI_GET_BASEBAND_TYPE_EVENTID */ @@ -1299,6 +1510,9 @@ struct wmi_ready_event { /* enum wmi_phy_capability */ u8 phy_capability; u8 numof_additional_mids; + /* rfc read calibration result. 5..15 */ + u8 rfc_read_calib_result; + u8 reserved[3]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ @@ -1306,7 +1520,8 @@ struct wmi_notify_req_done_event { /* beamforming status, 0: fail; 1: OK; 2: retrying */ __le32 status; __le64 tsf; - __le32 snr_val; + s8 rssi; + u8 reserved0[3]; __le32 tx_tpt; __le32 tx_goodput; __le32 rx_goodput; @@ -1576,7 +1791,7 @@ struct wmi_sw_tx_complete_event { u8 reserved[3]; } __packed; -/* WMI_CORR_MEASURE_EVENTID */ +/* WMI_CORR_MEASURE_EVENTID - deprecated */ struct wmi_corr_measure_event { /* signed */ __le32 i; @@ -1602,31 +1817,35 @@ struct wmi_get_ssid_event { /* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; - s8 snr; + s8 rssi; u8 range; u8 sqi; __le16 stype; __le16 status; __le32 len; - /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 qid; - /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 mid; u8 cid; /* From Radio MNGR */ u8 channel; } __packed; -/* wmi_otp_read_write_cmd */ -struct wmi_otp_read_write_cmd { - __le32 addr; - __le32 size; - u8 values[0]; +/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */ +struct wmi_rf_xpm_read_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; + /* requested num_bytes of data */ + u8 data_bytes[0]; } __packed; -/* WMI_OTP_READ_RESULT_EVENTID */ -struct wmi_otp_read_result_event { - u8 payload[0]; +/* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */ +struct wmi_rf_xpm_write_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; } __packed; /* WMI_TX_MGMT_PACKET_EVENTID */ @@ -1645,6 +1864,20 @@ struct wmi_echo_rsp_event { __le32 echoed_value; } __packed; +/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */ +struct wmi_rf_pwr_on_delay_rsp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID */ +struct wmi_set_high_power_table_params_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures @@ -1722,14 +1955,22 @@ struct wmi_led_cfg_cmd { u8 reserved; } __packed; +/* \WMI_SET_CONNECT_SNR_THR_CMDID */ +struct wmi_set_connect_snr_thr_cmd { + u8 enable; + u8 reserved; + /* 1/4 Db units */ + __le16 omni_snr_thr; + /* 1/4 Db units */ + __le16 direct_snr_thr; +} __packed; + /* WMI_LED_CFG_DONE_EVENTID */ struct wmi_led_cfg_done_event { /* led config status */ __le32 status; } __packed; -#define WMI_NUM_MCS (13) - /* Rate search parameters configuration per connection */ struct wmi_rs_cfg { /* The maximal allowed PER for each MCS @@ -1754,6 +1995,98 @@ struct wmi_rs_cfg { __le32 mcs_en_vec; } __packed; +/* Slot types */ +enum wmi_sched_scheme_slot_type { + WMI_SCHED_SLOT_SP = 0x0, + WMI_SCHED_SLOT_CBAP = 0x1, + WMI_SCHED_SLOT_IDLE = 0x2, + WMI_SCHED_SLOT_ANNOUNCE_NO_ACK = 0x3, + WMI_SCHED_SLOT_DISCOVERY = 0x4, +}; + +enum wmi_sched_scheme_slot_flags { + WMI_SCHED_SCHEME_SLOT_PERIODIC = 0x1, +}; + +struct wmi_sched_scheme_slot { + /* in microsecond */ + __le32 tbtt_offset; + /* wmi_sched_scheme_slot_flags */ + u8 flags; + /* wmi_sched_scheme_slot_type */ + u8 type; + /* in microsecond */ + __le16 duration; + /* frame_exchange_sequence_duration */ + __le16 tx_op; + /* time in microseconds between two consecutive slots + * relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + */ + __le16 period; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * number of times to repeat allocation + */ + u8 num_of_blocks; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * every idle_period allocation will be idle + */ + u8 idle_period; + u8 src_aid; + u8 dest_aid; + __le32 reserved; +} __packed; + +enum wmi_sched_scheme_flags { + /* should not be set when clearing scheduling scheme */ + WMI_SCHED_SCHEME_ENABLE = 0x01, + WMI_SCHED_PROTECTED_SP = 0x02, + /* should be set only on first WMI fragment of scheme */ + WMI_SCHED_FIRST = 0x04, + /* should be set only on last WMI fragment of scheme */ + WMI_SCHED_LAST = 0x08, + WMI_SCHED_IMMEDIATE_START = 0x10, +}; + +enum wmi_sched_scheme_advertisment { + /* ESE is not advertised at all, STA has to be configured with WMI + * also + */ + WMI_ADVERTISE_ESE_DISABLED = 0x0, + WMI_ADVERTISE_ESE_IN_BEACON = 0x1, + WMI_ADVERTISE_ESE_IN_ANNOUNCE_FRAME = 0x2, +}; + +/* WMI_SCHEDULING_SCHEME_CMD */ +struct wmi_scheduling_scheme_cmd { + u8 serial_num; + /* wmi_sched_scheme_advertisment */ + u8 ese_advertisment; + /* wmi_sched_scheme_flags */ + __le16 flags; + u8 num_allocs; + u8 reserved[3]; + __le64 start_tbtt; + /* allocations list */ + struct wmi_sched_scheme_slot allocs[WMI_SCHED_MAX_ALLOCS_PER_CMD]; +} __packed; + +enum wmi_sched_scheme_failure_type { + WMI_SCHED_SCHEME_FAILURE_NO_ERROR = 0x00, + WMI_SCHED_SCHEME_FAILURE_OLD_START_TSF_ERR = 0x01, +}; + +/* WMI_SCHEDULING_SCHEME_EVENTID */ +struct wmi_scheduling_scheme_event { + /* wmi_fw_status_e */ + u8 status; + /* serial number given in command */ + u8 serial_num; + /* wmi_sched_scheme_failure_type */ + u8 failure_type; + /* alignment to 32b */ + u8 reserved[1]; +} __packed; + /* WMI_RS_CFG_CMDID */ struct wmi_rs_cfg_cmd { /* connection id */ @@ -1971,6 +2304,19 @@ enum wmi_ps_profile_type { WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03, }; +/* WMI_PS_DEV_PROFILE_CFG_READ_CMDID */ +struct wmi_ps_dev_profile_cfg_read_cmd { + /* reserved */ + __le32 reserved; +} __packed; + +/* WMI_PS_DEV_PROFILE_CFG_READ_EVENTID */ +struct wmi_ps_dev_profile_cfg_read_event { + /* wmi_ps_profile_type_e */ + u8 ps_profile; + u8 reserved[3]; +} __packed; + /* WMI_PS_DEV_PROFILE_CFG_CMDID * * Power save profile to be used by the device @@ -2019,157 +2365,6 @@ enum wmi_ps_d3_resp_policy { WMI_PS_D3_RESP_POLICY_APPROVED = 0x02, }; -/* Device common power save configurations */ -struct wmi_ps_dev_cfg { - /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e - */ - u8 ps_unassoc_min_level; - /* lowest deep sleep clock level while nonassoc, enum - * wmi_ps_deep_sleep_clk_level_e - */ - u8 ps_unassoc_deep_sleep_min_level; - /* lowest level of PS allowed while associated, enum wmi_ps_level_e */ - u8 ps_assoc_min_level; - /* lowest deep sleep clock level while assoc, enum - * wmi_ps_deep_sleep_clk_level_e - */ - u8 ps_assoc_deep_sleep_min_level; - /* enum wmi_ps_deep_sleep_clk_level_e */ - u8 ps_assoc_low_latency_ds_min_level; - /* enum wmi_ps_d3_resp_policy_e */ - u8 ps_D3_response_policy; - /* BOOL */ - u8 ps_D3_pm_pme_enabled; - /* BOOL */ - u8 ps_halp_enable; - u8 ps_deep_sleep_enter_thresh_msec; - /* BOOL */ - u8 ps_voltage_scaling_en; -} __packed; - -/* WMI_PS_DEV_CFG_CMDID - * - * Configure common Power Save parameters of the device and all MIDs. - * - * Returned event: - * - WMI_PS_DEV_CFG_EVENTID - */ -struct wmi_ps_dev_cfg_cmd { - /* Device Power Save configuration to be applied */ - struct wmi_ps_dev_cfg ps_dev_cfg; - /* alignment to 32b */ - u8 reserved[2]; -} __packed; - -/* WMI_PS_DEV_CFG_EVENTID */ -struct wmi_ps_dev_cfg_event { - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - -/* WMI_PS_DEV_CFG_READ_CMDID - * - * request to retrieve device Power Save configuration - * (WMI_PS_DEV_CFG_CMD params) - * - * Returned event: - * - WMI_PS_DEV_CFG_READ_EVENTID - */ -struct wmi_ps_dev_cfg_read_cmd { - __le32 reserved; -} __packed; - -/* WMI_PS_DEV_CFG_READ_EVENTID */ -struct wmi_ps_dev_cfg_read_event { - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; - /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD - * params) - */ - struct wmi_ps_dev_cfg dev_ps_cfg; - /* alignment to 32b */ - u8 reserved[2]; -} __packed; - -/* Per Mac Power Save configurations */ -struct wmi_ps_mid_cfg { - /* Low power RX in BTI is enabled, BOOL */ - u8 beacon_lprx_enable; - /* Sync to sector ID enabled, BOOL */ - u8 beacon_sync_to_sectorId_enable; - /* Low power RX in DTI is enabled, BOOL */ - u8 frame_exchange_lprx_enable; - /* Sleep Cycle while in scheduled PS, 1-31 */ - u8 scheduled_sleep_cycle_pow2; - /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */ - u8 scheduled_num_of_awake_bis; - u8 am_to_traffic_load_thresh_mbp; - u8 traffic_to_am_load_thresh_mbps; - u8 traffic_to_am_num_of_no_traffic_bis; - /* BOOL */ - u8 continuous_traffic_psm; - __le16 no_traffic_to_min_usec; - __le16 no_traffic_to_max_usec; - __le16 snoozing_sleep_interval_milisec; - u8 max_no_data_awake_events; - /* Trigger WEB after k failed beacons */ - u8 num_of_failed_beacons_rx_to_trigger_web; - /* Trigger BF after k failed beacons */ - u8 num_of_failed_beacons_rx_to_trigger_bf; - /* Trigger SOB after k successful beacons */ - u8 num_of_successful_beacons_rx_to_trigger_sob; -} __packed; - -/* WMI_PS_MID_CFG_CMDID - * - * Configure Power Save parameters of a specific MID. - * These parameters are relevant for the specific BSS this MID belongs to. - * - * Returned event: - * - WMI_PS_MID_CFG_EVENTID - */ -struct wmi_ps_mid_cfg_cmd { - /* MAC ID */ - u8 mid; - /* mid PS configuration to be applied */ - struct wmi_ps_mid_cfg ps_mid_cfg; -} __packed; - -/* WMI_PS_MID_CFG_EVENTID */ -struct wmi_ps_mid_cfg_event { - /* MAC ID */ - u8 mid; - /* alignment to 32b */ - u8 reserved[3]; - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - -/* WMI_PS_MID_CFG_READ_CMDID - * - * request to retrieve Power Save configuration of mid - * (WMI_PS_MID_CFG_CMD params) - * - * Returned event: - * - WMI_PS_MID_CFG_READ_EVENTID - */ -struct wmi_ps_mid_cfg_read_cmd { - /* MAC ID */ - u8 mid; - /* alignment to 32b */ - u8 reserved[3]; -} __packed; - -/* WMI_PS_MID_CFG_READ_EVENTID */ -struct wmi_ps_mid_cfg_read_event { - /* MAC ID */ - u8 mid; - /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */ - struct wmi_ps_mid_cfg mid_ps_cfg; - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - #define WMI_AOA_MAX_DATA_SIZE (128) enum wmi_aoa_meas_status { @@ -2260,6 +2455,20 @@ struct wmi_tof_session_end_event { u8 reserved[3]; } __packed; +/* WMI_TOF_SET_LCI_EVENTID */ +struct wmi_tof_set_lci_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TOF_SET_LCR_EVENTID */ +struct wmi_tof_set_lcr_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* Responder FTM Results */ struct wmi_responder_ftm_res { u8 t1[6]; @@ -2313,10 +2522,19 @@ struct wmi_tof_ftm_per_dest_res_event { __le32 tsf_sync; /* actual received ftm per burst */ u8 actual_ftm_per_burst; - u8 reserved0[7]; + /* Measurments are from RFs, defined by the mask */ + __le32 meas_rf_mask; + u8 reserved0[3]; struct wmi_responder_ftm_res responder_ftm_res[0]; } __packed; +/* WMI_TOF_CFG_RESPONDER_EVENTID */ +struct wmi_tof_cfg_responder_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + enum wmi_tof_channel_info_type { WMI_TOF_CHANNEL_INFO_AOA = 0x00, WMI_TOF_CHANNEL_INFO_LCI = 0x01, @@ -2353,12 +2571,15 @@ struct wmi_tof_set_tx_rx_offset_event { struct wmi_tof_get_tx_rx_offset_event { /* enum wmi_fw_status */ u8 status; - u8 reserved1[3]; + /* RF index used to read the offsets */ + u8 rf_index; + u8 reserved1[2]; /* TX delay offset */ __le32 tx_offset; /* RX delay offset */ __le32 rx_offset; - __le32 reserved2[2]; + /* Offset to strongest tap of CIR */ + __le32 precursor; } __packed; /* Result status codes for WMI commands */ @@ -2621,4 +2842,23 @@ struct wmi_prio_tx_sectors_set_default_cfg_event { u8 reserved[3]; } __packed; +/* WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID */ +struct wmi_set_silent_rssi_table_done_event { + /* enum wmi_silent_rssi_status */ + __le32 status; + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + +/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +struct wmi_command_not_supported_event { + /* device id */ + u8 mid; + u8 reserved0; + __le16 command_id; + /* for UT command only, otherwise reserved */ + __le16 command_subtype; + __le16 reserved1; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 09defbcedd5e..94bf01f8b2a8 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -130,7 +130,7 @@ MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin"); #define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) -static struct usb_device_id dev_table[] = { +static const struct usb_device_id dev_table[] = { /* * at76c503-i3861 */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 984c1d0560b1..cd587325e286 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1105,6 +1105,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373), { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7e689c86d565..4157c90ad973 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, eth_broadcast_addr(params_le->bssid); params_le->bss_type = DOT11_BSSTYPE_ANY; - params_le->scan_type = 0; + params_le->scan_type = BRCMF_SCANTYPE_ACTIVE; params_le->channel_num = 0; params_le->nprobes = cpu_to_le32(-1); params_le->active_time = cpu_to_le32(-1); @@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, params_le->home_time = cpu_to_le32(-1); memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); - /* if request is null exit so it will be all channel broadcast scan */ - if (!request) - return; - n_ssids = request->n_ssids; n_channels = request->n_channels; + /* Copy channel array if applicable */ brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", n_channels); @@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, ptr += sizeof(ssid_le); } } else { - brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids); - if ((request->ssids) && request->ssids->ssid_len) { - brcmf_dbg(SCAN, "SSID %s len=%d\n", - params_le->ssid_le.SSID, - request->ssids->ssid_len); - params_le->ssid_le.SSID_len = - cpu_to_le32(request->ssids->ssid_len); - memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid, - request->ssids->ssid_len); - } + brcmf_dbg(SCAN, "Performing passive scan\n"); + params_le->scan_type = BRCMF_SCANTYPE_PASSIVE; } /* Adding mask to channel numbers */ params_le->channel_num = @@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, struct brcmf_cfg80211_info *cfg = ifp->drvr->config; s32 status; struct brcmf_escan_result_le *escan_result_le; + u32 escan_buflen; struct brcmf_bss_info_le *bss_info_le; struct brcmf_bss_info_le *bss = NULL; u32 bi_length; @@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, if (status == BRCMF_E_STATUS_PARTIAL) { brcmf_dbg(SCAN, "ESCAN Partial result\n"); + if (e->datalen < sizeof(*escan_result_le)) { + brcmf_err("invalid event data length\n"); + goto exit; + } escan_result_le = (struct brcmf_escan_result_le *) data; if (!escan_result_le) { brcmf_err("Invalid escan result (NULL pointer)\n"); goto exit; } + escan_buflen = le32_to_cpu(escan_result_le->buflen); + if (escan_buflen > BRCMF_ESCAN_BUF_SIZE || + escan_buflen > e->datalen || + escan_buflen < sizeof(*escan_result_le)) { + brcmf_err("Invalid escan buffer length: %d\n", + escan_buflen); + goto exit; + } if (le16_to_cpu(escan_result_le->bss_count) != 1) { brcmf_err("Invalid bss_count %d: ignoring\n", escan_result_le->bss_count); @@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, } bi_length = le32_to_cpu(bss_info_le->length); - if (bi_length != (le32_to_cpu(escan_result_le->buflen) - - WL_ESCAN_RESULTS_FIXED_SIZE)) { - brcmf_err("Invalid bss_info length %d: ignoring\n", + if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) { + brcmf_err("Ignoring invalid bss_info length: %d\n", bi_length); goto exit; } @@ -3940,6 +3941,7 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) { s32 err; + s32 wpa_val; /* set auth */ err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0); @@ -3954,7 +3956,11 @@ static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) return err; } /* set upper-layer auth */ - err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE); + if (brcmf_is_ibssmode(ifp->vif)) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_val); if (err < 0) { brcmf_err("wpa_auth error %d\n", err); return err; @@ -5693,10 +5699,13 @@ brcmf_notify_roaming_status(struct brcmf_if *ifp, u32 status = e->status; if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { - if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) + if (test_bit(BRCMF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { brcmf_bss_roaming_done(cfg, ifp->ndev, e); - else + } else { brcmf_bss_connect_done(cfg, ifp->ndev, e, true); + brcmf_net_setcarrier(ifp, true); + } } return 0; @@ -6456,6 +6465,8 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (p2p) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)) combo[c].num_different_channels = 2; + else + combo[c].num_different_channels = 1; wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE); @@ -6465,10 +6476,10 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); } else { + combo[c].num_different_channels = 1; c0_limits[i].max = 1; c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); } - combo[c].num_different_channels = 1; combo[c].max_interfaces = i; combo[c].n_limits = i; combo[c].limits = c0_limits; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 05f22ff81d60..c5d1a1cbf601 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -690,6 +690,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: return 0x200000; + case CY_CC_4373_CHIP_ID: + return 0x160000; default: brcmf_err("unknown chip: %s\n", ci->pub.name); break; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 1447a8352383..2d3e5e263a32 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -78,10 +78,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr) return -ENODEV; drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder); - if (IS_ERR(drvr->dbgfs_dir)) - return PTR_ERR(drvr->dbgfs_dir); - - return 0; + return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); } void brcmf_debug_detach(struct brcmf_pub *drvr) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index f1b60740e020..53ae30259989 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -159,7 +159,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) brcmf_feat_firmware_capabilities(ifp); memset(&gscan_cfg, 0, sizeof(gscan_cfg)); - if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID) + if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID && + drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID) brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg", &gscan_cfg, sizeof(gscan_cfg)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index d231042f19d6..091b52979e03 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -601,6 +601,9 @@ int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, if ((nvram_name) && (mapping_table[i].nvram)) strlcat(nvram_name, mapping_table[i].nvram, BRCMF_FW_NAME_LEN); + brcmf_info("using %s for chip %#08x(%d) rev %#08x\n", + fw_name, chip, chip, chiprev); + return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 8391989b1882..e0d22fedb2b4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -45,6 +45,11 @@ #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 +/* scan type definitions */ +#define BRCMF_SCANTYPE_DEFAULT 0xFF +#define BRCMF_SCANTYPE_ACTIVE 0 +#define BRCMF_SCANTYPE_PASSIVE 1 + #define BRCMF_WSEC_MAX_PSK_LEN 32 #define BRCMF_WSEC_PASSPHRASE BIT(0) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f878706613e6..e6e9b00b79d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1951,7 +1951,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } -static struct pci_device_id brcmf_pcie_devid_table[] = { +static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index f3556122c6ac..613caca7dc02 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -618,6 +618,7 @@ BRCMF_FW_NVRAM_DEF(43430A1, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt"); BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt"); BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt"); BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4373, "brcmfmac4373-sdio.bin", "brcmfmac4373-sdio.txt"); static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -636,7 +637,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356) + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), + BRCMF_FW_NVRAM_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) }; static void pkt_align(struct sk_buff *p, int len, int align) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 0eea48e73331..11ffaa01599e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -50,6 +50,7 @@ BRCMF_FW_DEF(43143, "brcmfmac43143.bin"); BRCMF_FW_DEF(43236B, "brcmfmac43236b.bin"); BRCMF_FW_DEF(43242A, "brcmfmac43242a.bin"); BRCMF_FW_DEF(43569, "brcmfmac43569.bin"); +BRCMF_FW_DEF(4373, "brcmfmac4373.bin"); static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -58,7 +59,8 @@ static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43238_CHIP_ID, 0x00000008, 43236B), BRCMF_FW_ENTRY(BRCM_CC_43242_CHIP_ID, 0xFFFFFFFF, 43242A), BRCMF_FW_ENTRY(BRCM_CC_43566_CHIP_ID, 0xFFFFFFFF, 43569), - BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43569) + BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43569), + BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) }; #define TRX_MAGIC 0x30524448 /* "HDR0" */ @@ -1463,15 +1465,20 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf) #define LINKSYS_USB_DEVICE(dev_id) \ { USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) } -static struct usb_device_id brcmf_usb_devid_table[] = { +#define CYPRESS_USB_DEVICE(dev_id) \ + { USB_DEVICE(CY_USB_VENDOR_ID_CYPRESS, dev_id) } + +static const struct usb_device_id brcmf_usb_devid_table[] = { BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID), LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID), + CYPRESS_USB_DEVICE(CY_USB_4373_DEVICE_ID), { USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) }, /* special entry for device with firmware loaded and running */ BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), + CYPRESS_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index f1fb8a3c7a32..57544a3a3ce4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -23,6 +23,7 @@ #define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c #define BRCM_USB_VENDOR_ID_LG 0x043e #define BRCM_USB_VENDOR_ID_LINKSYS 0x13b1 +#define CY_USB_VENDOR_ID_CYPRESS 0x04b4 #define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM /* Chipcommon Core Chip IDs */ @@ -57,6 +58,7 @@ #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 #define BRCM_CC_4371_CHIP_ID 0x4371 +#define CY_CC_4373_CHIP_ID 0x4373 /* USB Device IDs */ #define BRCM_USB_43143_DEVICE_ID 0xbd1e @@ -66,6 +68,7 @@ #define BRCM_USB_43242_LG_DEVICE_ID 0x3101 #define BRCM_USB_43569_DEVICE_ID 0xbd27 #define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc +#define CY_USB_4373_DEVICE_ID 0xbd29 /* PCIE Device IDs */ #define BRCM_PCIE_4350_DEVICE_ID 0x43a3 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 84143a02adce..54201c02fdb8 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7837,7 +7837,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); + int (*writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index aaaca4d08e2b..19c442cb93e4 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -340,7 +340,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static void ipw2100_wx_event_work(struct work_struct *work); static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); -static struct iw_handler_def ipw2100_wx_handler_def; +static const struct iw_handler_def ipw2100_wx_handler_def; static inline void read_register(struct net_device *dev, u32 reg, u32 * val) { @@ -1724,7 +1724,7 @@ static const struct libipw_geo ipw_geos[] = { static int ipw2100_up(struct ipw2100_priv *priv, int deferred) { unsigned long flags; - int rc = 0; + int err = 0; u32 lock; u32 ord_len = sizeof(lock); @@ -1757,33 +1757,33 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->status & STATUS_POWERED || (priv->status & STATUS_RESET_PENDING)) { /* Power cycle the card ... */ - if (ipw2100_power_cycle_adapter(priv)) { + err = ipw2100_power_cycle_adapter(priv); + if (err) { printk(KERN_WARNING DRV_NAME ": %s: Could not cycle adapter.\n", priv->net_dev->name); - rc = 1; goto exit; } } else priv->status |= STATUS_POWERED; /* Load the firmware, start the clocks, etc. */ - if (ipw2100_start_adapter(priv)) { + err = ipw2100_start_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the firmware.\n", priv->net_dev->name); - rc = 1; goto exit; } ipw2100_initialize_ordinals(priv); /* Determine capabilities of this particular HW configuration */ - if (ipw2100_get_hw_features(priv)) { + err = ipw2100_get_hw_features(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to determine HW features.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1792,11 +1792,11 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) priv->ieee->freq_band = LIBIPW_24GHZ_BAND; lock = LOCK_NONE; - if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { + err = ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to clear ordinal lock.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1820,21 +1820,21 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) /* Send all of the commands that must be sent prior to * HOST_COMPLETE */ - if (ipw2100_adapter_setup(priv)) { + err = ipw2100_adapter_setup(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n", priv->net_dev->name); - rc = 1; goto exit; } if (!deferred) { /* Enable the adapter - sends HOST_COMPLETE */ - if (ipw2100_enable_adapter(priv)) { + err = ipw2100_enable_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": " "%s: failed in call to enable adapter.\n", priv->net_dev->name); ipw2100_hw_stop_adapter(priv); - rc = 1; goto exit; } @@ -1844,7 +1844,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) } exit: - return rc; + return err; } static void ipw2100_down(struct ipw2100_priv *priv) @@ -4324,7 +4324,7 @@ static struct attribute *ipw2100_sysfs_entries[] = { NULL, }; -static struct attribute_group ipw2100_attribute_group = { +static const struct attribute_group ipw2100_attribute_group = { .attrs = ipw2100_sysfs_entries, }; @@ -8273,7 +8273,7 @@ static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev) return (struct iw_statistics *)NULL; } -static struct iw_handler_def ipw2100_wx_handler_def = { +static const struct iw_handler_def ipw2100_wx_handler_def = { .standard = ipw2100_wx_handlers, .num_standard = ARRAY_SIZE(ipw2100_wx_handlers), .num_private = ARRAY_SIZE(ipw2100_private_handler), diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 9368abdf18e2..8da87496cb58 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3209,7 +3209,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) struct fw_chunk *chunk; int total_nr = 0; int i; - struct pci_pool *pool; + struct dma_pool *pool; void **virts; dma_addr_t *phys; @@ -3226,9 +3226,10 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) kfree(virts); return -ENOMEM; } - pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); + pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0, + 0); if (!pool) { - IPW_ERROR("pci_pool_create failed\n"); + IPW_ERROR("dma_pool_create failed\n"); kfree(phys); kfree(virts); return -ENOMEM; @@ -3253,7 +3254,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; for (i = 0; i < nr; i++) { - virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, + virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL, &phys[total_nr]); if (!virts[total_nr]) { ret = -ENOMEM; @@ -3297,9 +3298,9 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) } out: for (i = 0; i < total_nr; i++) - pci_pool_free(pool, virts[i], phys[i]); + dma_pool_free(pool, virts[i], phys[i]); - pci_pool_destroy(pool); + dma_pool_destroy(pool); kfree(phys); kfree(virts); @@ -10008,7 +10009,7 @@ static iw_handler ipw_priv_handler[] = { #endif }; -static struct iw_handler_def ipw_wx_handler_def = { +static const struct iw_handler_def ipw_wx_handler_def = { .standard = ipw_wx_handlers, .num_standard = ARRAY_SIZE(ipw_wx_handlers), .num_private = ARRAY_SIZE(ipw_priv_handler), @@ -11500,7 +11501,7 @@ static struct attribute *ipw_sysfs_entries[] = { NULL }; -static struct attribute_group ipw_attribute_group = { +static const struct attribute_group ipw_attribute_group = { .name = NULL, /* put in device directory */ .attrs = ipw_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 38bf403bb1e1..329f3a63dadd 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3464,7 +3464,7 @@ static struct attribute *il3945_sysfs_entries[] = { NULL }; -static struct attribute_group il3945_attribute_group = { +static const struct attribute_group il3945_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il3945_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 5b51fba75595..de9b6522c43f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4654,7 +4654,7 @@ static struct attribute *il_sysfs_entries[] = { NULL }; -static struct attribute_group il_attribute_group = { +static const struct attribute_group il_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 20bd261223af..35a32a3ec882 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,6 +11,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o +iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 5081720608af..2e6c52664cee 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 33 -#define IWL8265_UCODE_API_MAX 33 +#define IWL8000_UCODE_API_MAX 34 +#define IWL8265_UCODE_API_MAX 34 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 22 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 97208ce19f92..2babe0a1f18b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 33 +#define IWL9000_UCODE_API_MAX 34 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index 98f24cd1b44f..76ba1f8bc72f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 33 +#define IWL_A000_UCODE_API_MAX 34 /* Lowest firmware API version supported */ #define IWL_A000_UCODE_API_MIN 24 @@ -75,11 +75,20 @@ #define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" #define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" +#define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" +#define IWL_A000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" +#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 @@ -168,5 +177,38 @@ const struct iwl_cfg iwla000_2ax_cfg_hr = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_HR_F0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_JF_B0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_HR_A0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 2ab2773655a8..f89736d60a3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -311,11 +311,6 @@ enum { /** * rate_n_flags Tx antenna masks - * 4965 has 2 transmitters - * 5100 has 1 transmitter B - * 5150 has 1 transmitter A - * 5300 has 3 transmitters - * 5350 has 3 transmitters * bit14:16 */ #define RATE_MCS_ANT_POS 14 @@ -1230,7 +1225,6 @@ struct iwl_rx_mpdu_res_start { */ /* - * 4965 uCode updates these Tx attempt count values in host DRAM. * Used for managing Tx retries when expecting block-acks. * Driver should set these fields to 0. */ @@ -1437,22 +1431,6 @@ struct agg_tx_status { __le16 sequence; } __packed; -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - * i.e. rate was not chosen from rate table - * or rate table color was changed during frame retries - * refer tlc rate info - */ - -#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 -#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 -#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 - /* refer to ra_tid */ #define IWLAGN_TX_RES_TID_POS 0 #define IWLAGN_TX_RES_TID_MSK 0x0f @@ -1556,7 +1534,7 @@ struct iwl_link_qual_general_params { /* Best single antenna to use for single stream (legacy, SISO). */ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ - /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + /* Best antennas to use for MIMO */ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h new file mode 100644 index 000000000000..3684a3e180e5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -0,0 +1,206 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_alive_h__ +#define __iwl_fw_api_alive_h__ + +/* alive response is_valid values */ +#define ALIVE_RESP_UCODE_OK BIT(0) +#define ALIVE_RESP_RFKILL BIT(1) + +/* alive response ver_type values */ +enum { + FW_TYPE_HW = 0, + FW_TYPE_PROT = 1, + FW_TYPE_AP = 2, + FW_TYPE_WOWLAN = 3, + FW_TYPE_TIMING = 4, + FW_TYPE_WIPAN = 5 +}; + +/* alive response ver_subtype values */ +enum { + FW_SUBTYPE_FULL_FEATURE = 0, + FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ + FW_SUBTYPE_REDUCED = 2, + FW_SUBTYPE_ALIVE_ONLY = 3, + FW_SUBTYPE_WOWLAN = 4, + FW_SUBTYPE_AP_SUBTYPE = 5, + FW_SUBTYPE_WIPAN = 6, + FW_SUBTYPE_INITIALIZE = 9 +}; + +#define IWL_ALIVE_STATUS_ERR 0xDEAD +#define IWL_ALIVE_STATUS_OK 0xCAFE + +#define IWL_ALIVE_FLG_RFKILL BIT(0) + +struct iwl_lmac_alive { + __le32 ucode_minor; + __le32 ucode_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_umac_alive { + __le32 umac_minor; /* UMAC version: minor */ + __le32 umac_major; /* UMAC version: major */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ + +struct mvm_alive_resp_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_3 */ + +struct mvm_alive_resp { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_4 */ + +/** + * enum iwl_extended_cfg_flag - commands driver may send before + * finishing init flow + * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command + * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands + * @IWL_INIT_PHY: driver is going to send PHY_DB commands + */ +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG, + IWL_INIT_NVM, + IWL_INIT_PHY, +}; + +/** + * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * before finishing init flows + * @init_flags: values from iwl_extended_cfg_flags + */ +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( RADIO_VERSION_NOTIFICATION = 0x68 ) + * @radio_flavor: radio flavor + * @radio_step: radio version step + * @radio_dash: radio version dash + */ +struct iwl_radio_version_notif { + __le32 radio_flavor; + __le32 radio_step; + __le32 radio_dash; +} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ + +enum iwl_card_state_flags { + CARD_ENABLED = 0x00, + HW_CARD_DISABLED = 0x01, + SW_CARD_DISABLED = 0x02, + CT_KILL_CARD_DISABLED = 0x04, + HALT_CARD_DISABLED = 0x08, + CARD_DISABLED_MSK = 0x0f, + CARD_IS_RX_ON = 0x10, +}; + +/** + * struct iwl_radio_version_notif - information on the card state + * ( CARD_STATE_NOTIFICATION = 0xa1 ) + * @flags: &enum iwl_card_state_flags + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ + +/** + * struct iwl_fseq_ver_mismatch_nty - Notification about version + * + * This notification does not have a direct impact on the init flow. + * It means that another core (not WiFi) has initiated the FSEQ flow + * and updated the FSEQ version. The driver only prints an error when + * this occurs. + * + * @aux_read_fseq_ver: auxiliary read FSEQ version + * @wifi_fseq_ver: FSEQ version (embedded in WiFi) + */ +struct iwl_fseq_ver_mismatch_ntf { + __le32 aux_read_fseq_ver; + __le32 wifi_fseq_ver; +} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h new file mode 100644 index 000000000000..d2717fafdf5b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_binding_h__ +#define __iwl_fw_api_binding_h__ + +#define MAX_MACS_IN_BINDING (3) +#define MAX_BINDINGS (4) + +/** + * struct iwl_binding_cmd_v1 - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding, + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_binding_cmd_v1 { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; +} __packed; /* BINDING_CMD_API_S_VER_1 */ + +/** + * struct iwl_binding_cmd - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding + * &enum iwl_ctxt_id_and_color + * @lmac_id: the lmac id the binding belongs to + */ +struct iwl_binding_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; + __le32 lmac_id; +} __packed; /* BINDING_CMD_API_S_VER_2 */ + +#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 + +/* The maximal number of fragments in the FW's schedule session */ +#define IWL_MVM_MAX_QUOTA 128 + +/** + * struct iwl_time_quota_data - configuration of time quota per binding + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @quota: absolute time quota in TU. The scheduler will try to divide the + * remainig quota (after Time Events) according to this quota. + * @max_duration: max uninterrupted context duration in TU + */ +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ + +/** + * struct iwl_time_quota_cmd - configuration of time quota between bindings + * ( TIME_QUOTA_CMD = 0x2c ) + * @quotas: allocations per binding + * Note: on non-CDB the fourth one is the auxilary mac and is + * essentially zero. + * On CDB the fourth one is a regular binding. + */ +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[MAX_BINDINGS]; +} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_binding_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h similarity index 82% rename from drivers/net/wireless/intel/iwlwifi/fw/api.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h index 0e107f916ce3..ea4a3f04a83a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -59,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __iwl_fw_api_h__ -#define __iwl_fw_api_h__ +#ifndef __iwl_fw_api_cmdhdr_h__ +#define __iwl_fw_api_cmdhdr_h__ /** * DOC: Host command section @@ -112,15 +112,24 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) #define IWL_ALWAYS_LONG_GROUP 1 /** - * struct iwl_cmd_header + * struct iwl_cmd_header - (short) command header format * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ + /** + * @cmd: Command ID: REPLY_RXON, etc. + */ + u8 cmd; + /** + * @group_id: group ID, for commands with groups + */ u8 group_id; - /* + /** + * @sequence: + * Sequence number for the command. + * * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver * when sending the response to each driver-originated command, so @@ -150,6 +159,13 @@ struct iwl_cmd_header { * driver, and each response/notification received from uCode. * this is the wide version that contains more information about the command * like length, version and command type + * + * @cmd: command ID, like in &struct iwl_cmd_header + * @group_id: group ID, like in &struct iwl_cmd_header + * @sequence: sequence, like in &struct iwl_cmd_header + * @length: length of the command + * @reserved: reserved + * @version: command version */ struct iwl_cmd_header_wide { u8 cmd; @@ -160,48 +176,6 @@ struct iwl_cmd_header_wide { u8 version; } __packed; -/** - * iwl_tx_queue_cfg_actions - TXQ config options - * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue - * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format - */ -enum iwl_tx_queue_cfg_actions { - TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), - TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), -}; - -/** - * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command - * @sta_id: station id - * @tid: tid of the queue - * @flags: see &enum iwl_tx_queue_cfg_actions - * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. - * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) - * @byte_cnt_addr: address of byte count table - * @tfdq_addr: address of TFD circular buffer - */ -struct iwl_tx_queue_cfg_cmd { - u8 sta_id; - u8 tid; - __le16 flags; - __le32 cb_size; - __le64 byte_cnt_addr; - __le64 tfdq_addr; -} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ - -/** - * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config - * @queue_number: queue number assigned to this RA -TID - * @flags: set on failure - * @write_pointer: initial value for write pointer - */ -struct iwl_tx_queue_cfg_rsp { - __le16 queue_number; - __le16 flags; - __le16 write_pointer; - __le16 reserved; -} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ - /** * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations * @type: type of the result - mostly ignored @@ -226,4 +200,12 @@ struct iwl_phy_db_cmd { u8 data[]; } __packed; -#endif /* __iwl_fw_api_h__*/ +/** + * struct iwl_cmd_response - generic response struct for most commands + * @status: status of the command asked, changes for each one + */ +struct iwl_cmd_response { + __le32 status; +}; + +#endif /* __iwl_fw_api_cmdhdr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h similarity index 84% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 8cd06aaa1f54..d09555afe2c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_bt_coex_h__ -#define __fw_api_bt_coex_h__ +#ifndef __iwl_fw_api_coex_h__ +#define __iwl_fw_api_coex_h__ #include #include @@ -81,7 +76,6 @@ enum iwl_bt_coex_lut_type { BT_COEX_INVALID_LUT = 0xff, }; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ -#define BT_COEX_CORUN_LUT_SIZE (32) #define BT_REDUCED_TX_POWER_BIT BIT(7) enum iwl_bt_coex_mode { @@ -111,18 +105,6 @@ struct iwl_bt_coex_cmd { __le32 enabled_modules; } __packed; /* BT_COEX_CMD_API_S_VER_6 */ -/** - * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut - * @corun_lut20: co-running 20 MHz LUT configuration - * @corun_lut40: co-running 40 MHz LUT configuration - * - * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. - */ -struct iwl_bt_coex_corun_lut_update_cmd { - __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; -} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ - /** * struct iwl_bt_coex_reduced_txp_update_cmd * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the @@ -196,6 +178,7 @@ enum iwl_bt_mxbox_dw3 { BT_MBOX(3, ACL_STATE, 3, 1), BT_MBOX(3, MSTR_STATE, 4, 1), BT_MBOX(3, OBX_STATE, 5, 1), + BT_MBOX(3, A2DP_SRC, 6, 1), BT_MBOX(3, OPEN_CON_2, 8, 2), BT_MBOX(3, TRAFFIC_LOAD, 10, 2), BT_MBOX(3, CHL_SEQN_LSB, 12, 1), @@ -205,10 +188,21 @@ enum iwl_bt_mxbox_dw3 { BT_MBOX(3, UPDATE_REQUEST, 21, 1), }; +enum iwl_bt_mxbox_dw4 { + BT_MBOX(4, ATS_BT_INTERVAL, 0, 7), + BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7), +}; + #define BT_MBOX_MSG(_notif, _num, _field) \ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ >> BT_MBOX##_num##_##_field##_POS) +#define BT_MBOX_PRINT(_num, _field, _end) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + "\t%s: %d%s", \ + #_field, \ + BT_MBOX_MSG(notif, _num, _field), \ + true ? "\n" : ", "); enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, @@ -225,11 +219,30 @@ enum iwl_bt_ci_compliance { BT_CI_COMPLIANCE_BOTH = 3, }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ -#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ - (_ttc_rrc_status & BIT(_phy_id)) +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @mbox_msg: message from BT to WiFi + * @msg_idx: the index of the message + * @bt_ci_compliance: enum %iwl_bt_ci_compliance + * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type + * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type + * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading + * @ttc_status: is TTC enabled - one bit per PHY + * @rrc_status: is RRC enabled - one bit per PHY + * @reserved: reserved + */ +struct iwl_bt_coex_profile_notif { + __le32 mbox_msg[8]; + __le32 msg_idx; + __le32 bt_ci_compliance; -#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ - ((_ttc_rrc_status >> 4) & BIT(_phy_id)) + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; + u8 ttc_status; + u8 rrc_status; + __le16 reserved; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */ /** * struct iwl_bt_coex_profile_notif - notification about BT coex @@ -239,10 +252,11 @@ enum iwl_bt_ci_compliance { * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading - * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY + * @ttc_status: is TTC enabled - one bit per PHY + * @rrc_status: is RRC enabled - one bit per PHY * @reserved: reserved */ -struct iwl_bt_coex_profile_notif { +struct iwl_bt_coex_profile_notif_v4 { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; @@ -250,8 +264,9 @@ struct iwl_bt_coex_profile_notif { __le32 primary_ch_lut; __le32 secondary_ch_lut; __le32 bt_activity_grading; - u8 ttc_rrc_status; - u8 reserved[3]; + u8 ttc_status; + u8 rrc_status; + __le16 reserved; } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ -#endif /* __fw_api_bt_coex_h__ */ +#endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h new file mode 100644 index 000000000000..074868394427 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -0,0 +1,657 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_commands_h__ +#define __iwl_fw_api_commands_h__ + +/** + * enum iwl_mvm_command_groups - command groups for the firmware + * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds + * @LONG_GROUP: legacy group with long header, also uses command IDs + * from &enum iwl_legacy_cmds + * @SYSTEM_GROUP: system group, uses command IDs from + * &enum iwl_system_subcmd_ids + * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from + * &enum iwl_mac_conf_subcmd_ids + * @PHY_OPS_GROUP: PHY operations group, uses command IDs from + * &enum iwl_phy_ops_subcmd_ids + * @DATA_PATH_GROUP: data path group, uses command IDs from + * &enum iwl_data_path_subcmd_ids + * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids + * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids + * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from + * &enum iwl_prot_offload_subcmd_ids + * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from + * &enum iwl_regulatory_and_nvm_subcmd_ids + * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds + */ +enum iwl_mvm_command_groups { + LEGACY_GROUP = 0x0, + LONG_GROUP = 0x1, + SYSTEM_GROUP = 0x2, + MAC_CONF_GROUP = 0x3, + PHY_OPS_GROUP = 0x4, + DATA_PATH_GROUP = 0x5, + NAN_GROUP = 0x7, + TOF_GROUP = 0x8, + PROT_OFFLOAD_GROUP = 0xb, + REGULATORY_AND_NVM_GROUP = 0xc, + DEBUG_GROUP = 0xf, +}; + +/** + * enum iwl_legacy_cmds - legacy group command IDs + */ +enum iwl_legacy_cmds { + /** + * @MVM_ALIVE: + * Alive data from the firmware, as described in + * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. + */ + MVM_ALIVE = 0x1, + + /** + * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. + */ + REPLY_ERROR = 0x2, + + /** + * @ECHO_CMD: Send data to the device to have it returned immediately. + */ + ECHO_CMD = 0x3, + + /** + * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. + */ + INIT_COMPLETE_NOTIF = 0x4, + + /** + * @PHY_CONTEXT_CMD: + * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. + */ + PHY_CONTEXT_CMD = 0x8, + + /** + * @DBG_CFG: Debug configuration command. + */ + DBG_CFG = 0x9, + + /** + * @SCAN_ITERATION_COMPLETE_UMAC: + * Firmware indicates a scan iteration completed, using + * &struct iwl_umac_scan_iter_complete_notif. + */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, + + /** + * @SCAN_CFG_CMD: + * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config + */ + SCAN_CFG_CMD = 0xc, + + /** + * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac + */ + SCAN_REQ_UMAC = 0xd, + + /** + * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort + */ + SCAN_ABORT_UMAC = 0xe, + + /** + * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete + */ + SCAN_COMPLETE_UMAC = 0xf, + + /** + * @BA_WINDOW_STATUS_NOTIFICATION_ID: + * uses &struct iwl_ba_window_status_notif + */ + BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, + + /** + * @ADD_STA_KEY: + * &struct iwl_mvm_add_sta_key_cmd_v1 or + * &struct iwl_mvm_add_sta_key_cmd. + */ + ADD_STA_KEY = 0x17, + + /** + * @ADD_STA: + * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. + */ + ADD_STA = 0x18, + + /** + * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd + */ + REMOVE_STA = 0x19, + + /** + * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd + */ + FW_GET_ITEM_CMD = 0x1a, + + /** + * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, + * response in &struct iwl_mvm_tx_resp or + * &struct iwl_mvm_tx_resp_v3 + */ + TX_CMD = 0x1c, + + /** + * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd + */ + TXPATH_FLUSH = 0x1e, + + /** + * @MGMT_MCAST_KEY: + * &struct iwl_mvm_mgmt_mcast_key_cmd or + * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 + */ + MGMT_MCAST_KEY = 0x1f, + + /* scheduler config */ + /** + * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, + * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp + * for newer (A000) hardware. + */ + SCD_QUEUE_CFG = 0x1d, + + /** + * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd + */ + WEP_KEY = 0x20, + + /** + * @SHARED_MEM_CFG: + * retrieve shared memory configuration - response in + * &struct iwl_shared_mem_cfg + */ + SHARED_MEM_CFG = 0x25, + + /** + * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd + */ + TDLS_CHANNEL_SWITCH_CMD = 0x27, + + /** + * @TDLS_CHANNEL_SWITCH_NOTIFICATION: + * uses &struct iwl_tdls_channel_switch_notif + */ + TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, + + /** + * @TDLS_CONFIG_CMD: + * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res + */ + TDLS_CONFIG_CMD = 0xa7, + + /** + * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd + */ + MAC_CONTEXT_CMD = 0x28, + + /** + * @TIME_EVENT_CMD: + * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp + */ + TIME_EVENT_CMD = 0x29, /* both CMD and response */ + + /** + * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif + */ + TIME_EVENT_NOTIFICATION = 0x2a, + + /** + * @BINDING_CONTEXT_CMD: + * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 + */ + BINDING_CONTEXT_CMD = 0x2b, + + /** + * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd + */ + TIME_QUOTA_CMD = 0x2c, + + /** + * @NON_QOS_TX_COUNTER_CMD: + * command is &struct iwl_nonqos_seq_query_cmd + */ + NON_QOS_TX_COUNTER_CMD = 0x2d, + + /** + * @LEDS_CMD: command is &struct iwl_led_cmd + */ + LEDS_CMD = 0x48, + + /** + * @LQ_CMD: using &struct iwl_lq_cmd + */ + LQ_CMD = 0x4e, + + /** + * @FW_PAGING_BLOCK_CMD: + * &struct iwl_fw_paging_cmd + */ + FW_PAGING_BLOCK_CMD = 0x4f, + + /** + * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac + */ + SCAN_OFFLOAD_REQUEST_CMD = 0x51, + + /** + * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents + */ + SCAN_OFFLOAD_ABORT_CMD = 0x52, + + /** + * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req + */ + HOT_SPOT_CMD = 0x53, + + /** + * @SCAN_OFFLOAD_COMPLETE: + * notification, &struct iwl_periodic_scan_complete + */ + SCAN_OFFLOAD_COMPLETE = 0x6D, + + /** + * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: + * update scan offload (scheduled scan) profiles/blacklist/etc. + */ + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, + + /** + * @MATCH_FOUND_NOTIFICATION: scan match found + */ + MATCH_FOUND_NOTIFICATION = 0xd9, + + /** + * @SCAN_ITERATION_COMPLETE: + * uses &struct iwl_lmac_scan_complete_notif + */ + SCAN_ITERATION_COMPLETE = 0xe7, + + /* Phy */ + /** + * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd + */ + PHY_CONFIGURATION_CMD = 0x6a, + + /** + * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db + */ + CALIB_RES_NOTIF_PHY_DB = 0x6b, + + /** + * @PHY_DB_CMD: &struct iwl_phy_db_cmd + */ + PHY_DB_CMD = 0x6c, + + /** + * @TOF_CMD: &struct iwl_tof_config_cmd + */ + TOF_CMD = 0x10, + + /** + * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd + */ + TOF_NOTIFICATION = 0x11, + + /** + * @POWER_TABLE_CMD: &struct iwl_device_power_cmd + */ + POWER_TABLE_CMD = 0x77, + + /** + * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: + * &struct iwl_uapsd_misbehaving_ap_notif + */ + PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, + + /** + * @LTR_CONFIG: &struct iwl_ltr_config_cmd + */ + LTR_CONFIG = 0xee, + + /** + * @REPLY_THERMAL_MNG_BACKOFF: + * Thermal throttling command + */ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + + /** + * @DC2DC_CONFIG_CMD: + * Set/Get DC2DC frequency tune + * Command is &struct iwl_dc2dc_config_cmd, + * response is &struct iwl_dc2dc_config_resp + */ + DC2DC_CONFIG_CMD = 0x83, + + /** + * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd + */ + NVM_ACCESS_CMD = 0x88, + + /** + * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif + */ + BEACON_NOTIFICATION = 0x90, + + /** + * @BEACON_TEMPLATE_CMD: + * Uses one of &struct iwl_mac_beacon_cmd_v6, + * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd + * depending on the device version. + */ + BEACON_TEMPLATE_CMD = 0x91, + /** + * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd + */ + TX_ANT_CONFIGURATION_CMD = 0x98, + + /** + * @STATISTICS_CMD: &struct iwl_statistics_cmd + */ + STATISTICS_CMD = 0x9c, + + /** + * @STATISTICS_NOTIFICATION: + * one of &struct iwl_notif_statistics_v10, + * &struct iwl_notif_statistics_v11, + * &struct iwl_notif_statistics_cdb + */ + STATISTICS_NOTIFICATION = 0x9d, + + /** + * @EOSP_NOTIFICATION: + * Notify that a service period ended, + * &struct iwl_mvm_eosp_notification + */ + EOSP_NOTIFICATION = 0x9e, + + /** + * @REDUCE_TX_POWER_CMD: + * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd + */ + REDUCE_TX_POWER_CMD = 0x9f, + + /** + * @CARD_STATE_NOTIFICATION: + * Card state (RF/CT kill) notification, + * uses &struct iwl_card_state_notif + */ + CARD_STATE_NOTIFICATION = 0xa1, + + /** + * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif + */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + /** + * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd + */ + MAC_PM_POWER_TABLE = 0xa9, + + /** + * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif + */ + MFUART_LOAD_NOTIFICATION = 0xb1, + + /** + * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd + */ + RSS_CONFIG_CMD = 0xb3, + + /** + * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info + */ + REPLY_RX_PHY_CMD = 0xc0, + + /** + * @REPLY_RX_MPDU_CMD: + * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc + */ + REPLY_RX_MPDU_CMD = 0xc1, + + /** + * @FRAME_RELEASE: + * Frame release (reorder helper) notification, uses + * &struct iwl_frame_release + */ + FRAME_RELEASE = 0xc3, + + /** + * @BA_NOTIF: + * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif + * or &struct iwl_mvm_ba_notif depending on the HW + */ + BA_NOTIF = 0xc5, + + /* Location Aware Regulatory */ + /** + * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd + */ + MCC_UPDATE_CMD = 0xc8, + + /** + * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif + */ + MCC_CHUB_UPDATE_CMD = 0xc9, + + /** + * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker + */ + MARKER_CMD = 0xcb, + + /** + * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif + */ + BT_PROFILE_NOTIFICATION = 0xce, + + /** + * @BT_CONFIG: &struct iwl_bt_coex_cmd + */ + BT_CONFIG = 0x9b, + + /** + * @BT_COEX_UPDATE_REDUCED_TXP: + * &struct iwl_bt_coex_reduced_txp_update_cmd + */ + BT_COEX_UPDATE_REDUCED_TXP = 0x5c, + + /** + * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd + */ + BT_COEX_CI = 0x5d, + + /** + * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd + */ + REPLY_SF_CFG_CMD = 0xd1, + /** + * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd + */ + REPLY_BEACON_FILTERING_CMD = 0xd2, + + /** + * @DTS_MEASUREMENT_NOTIFICATION: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIFICATION = 0xdd, + + /** + * @LDBG_CONFIG_CMD: configure continuous trace recording + */ + LDBG_CONFIG_CMD = 0xf6, + + /** + * @DEBUG_LOG_MSG: Debugging log data from firmware + */ + DEBUG_LOG_MSG = 0xf7, + + /** + * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd + */ + BCAST_FILTER_CMD = 0xcf, + + /** + * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd + */ + MCAST_FILTER_CMD = 0xd0, + + /** + * @D3_CONFIG_CMD: &struct iwl_d3_manager_config + */ + D3_CONFIG_CMD = 0xd3, + + /** + * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of + * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, + * &struct iwl_proto_offload_cmd_v3_small, + * &struct iwl_proto_offload_cmd_v3_large + */ + PROT_OFFLOAD_CONFIG_CMD = 0xd4, + + /** + * @OFFLOADS_QUERY_CMD: + * No data in command, response in &struct iwl_wowlan_status + */ + OFFLOADS_QUERY_CMD = 0xd5, + + /** + * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config + */ + REMOTE_WAKE_CONFIG_CMD = 0xd6, + + /** + * @D0I3_END_CMD: End D0i3/D3 state, no command data + */ + D0I3_END_CMD = 0xed, + + /** + * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd + */ + WOWLAN_PATTERNS = 0xe0, + + /** + * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd + */ + WOWLAN_CONFIGURATION = 0xe1, + + /** + * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd + */ + WOWLAN_TSC_RSC_PARAM = 0xe2, + + /** + * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd + */ + WOWLAN_TKIP_PARAM = 0xe3, + + /** + * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd + */ + WOWLAN_KEK_KCK_MATERIAL = 0xe4, + + /** + * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status + */ + WOWLAN_GET_STATUSES = 0xe5, + + /** + * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: + * No command data, response is &struct iwl_scan_offload_profiles_query + */ + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, +}; + +/** + * enum iwl_system_subcmd_ids - system group command IDs + */ +enum iwl_system_subcmd_ids { + /** + * @SHARED_MEM_CFG_CMD: + * response in &struct iwl_shared_mem_cfg or + * &struct iwl_shared_mem_cfg_v2 + */ + SHARED_MEM_CFG_CMD = 0x0, + + /** + * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd + */ + INIT_EXTENDED_CFG_CMD = 0x03, + + /** + * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version + * mismatch during init. The format is specified in + * &struct iwl_fseq_ver_mismatch_ntf. + */ + FSEQ_VER_MISMATCH_NTF = 0xFF, +}; + +#endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h new file mode 100644 index 000000000000..7f645b62804e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -0,0 +1,184 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_config_h__ +#define __iwl_fw_api_config_h__ + +/* + * struct iwl_dqa_enable_cmd + * @cmd_queue: the TXQ number of the command queue + */ +struct iwl_dqa_enable_cmd { + __le32 cmd_queue; +} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ + +/* + * struct iwl_tx_ant_cfg_cmd + * @valid: valid antenna configuration + */ +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +} __packed; + +/** + * struct iwl_calib_ctrl - Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers, using &enum iwl_calib_cfg + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers, using &enum iwl_calib_cfg + */ +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_calib_cfg { + IWL_CALIB_CFG_XTAL_IDX = BIT(0), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), + IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), + IWL_CALIB_CFG_PAPD_IDX = BIT(3), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), + IWL_CALIB_CFG_DC_IDX = BIT(5), + IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), + IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), + IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), + IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), + IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), + IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), + IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), + IWL_CALIB_CFG_DAC_IDX = BIT(16), + IWL_CALIB_CFG_ABS_IDX = BIT(17), + IWL_CALIB_CFG_AGC_IDX = BIT(18), +}; + +/** + * struct iwl_phy_cfg_cmd - Phy configuration command + * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg + * @calib_control: calibration control data + */ +struct iwl_phy_cfg_cmd { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; +} __packed; + +#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) +#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) +#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) +#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) +#define PHY_CFG_TX_CHAIN_A BIT(8) +#define PHY_CFG_TX_CHAIN_B BIT(9) +#define PHY_CFG_TX_CHAIN_C BIT(10) +#define PHY_CFG_RX_CHAIN_A BIT(12) +#define PHY_CFG_RX_CHAIN_B BIT(13) +#define PHY_CFG_RX_CHAIN_C BIT(14) + +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_config_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h new file mode 100644 index 000000000000..2f0d7c498b3e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_context_h__ +#define __iwl_fw_api_context_h__ + +/** + * enum iwl_ctxt_id_and_color - ID and color fields in context dword + * @FW_CTXT_ID_POS: position of the ID + * @FW_CTXT_ID_MSK: mask of the ID + * @FW_CTXT_COLOR_POS: position of the color + * @FW_CTXT_COLOR_MSK: mask of the color + * @FW_CTXT_INVALID: value used to indicate unused/invalid + */ +enum iwl_ctxt_id_and_color { + FW_CTXT_ID_POS = 0, + FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, + FW_CTXT_COLOR_POS = 8, + FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, + FW_CTXT_INVALID = 0xffffffff, +}; + +#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ + ((_color) << FW_CTXT_COLOR_POS)) + +/* Possible actions on PHYs, MACs and Bindings */ +enum iwl_ctxt_action { + FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_ADD, + FW_CTXT_ACTION_MODIFY, + FW_CTXT_ACTION_REMOVE, + FW_CTXT_ACTION_NUM +}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ + +#endif /* __iwl_fw_api_context_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h similarity index 98% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index d4a4c28b7192..57f4bc242023 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_d3_h__ -#define __fw_api_d3_h__ +#ifndef __iwl_fw_api_d3_h__ +#define __iwl_fw_api_d3_h__ /** * enum iwl_d3_wakeup_flags - D3 manager wakeup flags @@ -468,4 +463,4 @@ struct iwl_wowlan_remote_wake_config { /* TODO: NetDetect API */ -#endif /* __fw_api_d3_h__ */ +#endif /* __iwl_fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h new file mode 100644 index 000000000000..aa76dcc148bd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -0,0 +1,127 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_datapath_h__ +#define __iwl_fw_api_datapath_h__ + +/** + * enum iwl_data_path_subcmd_ids - data path group commands + */ +enum iwl_data_path_subcmd_ids { + /** + * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd + */ + DQA_ENABLE_CMD = 0x0, + + /** + * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd + */ + UPDATE_MU_GROUPS_CMD = 0x1, + + /** + * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd + */ + TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + + /** + * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification + */ + STA_PM_NOTIF = 0xFD, + + /** + * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif + */ + MU_GROUP_MGMT_NOTIF = 0xFE, + + /** + * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification + */ + RX_QUEUES_NOTIFICATION = 0xFF, +}; + +/** + * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration + * + * @reserved: reserved + * @membership_status: a bitmap of MU groups + * @user_position:the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ + +/** + * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification + * + * @membership_status: a bitmap of MU groups + * @user_position: the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h new file mode 100644 index 000000000000..9f88b61536bc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -0,0 +1,345 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_debug_h__ +#define __iwl_fw_api_debug_h__ + +/** + * enum iwl_debug_cmds - debug commands + */ +enum iwl_debug_cmds { + /** + * @LMAC_RD_WR: + * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + LMAC_RD_WR = 0x0, + /** + * @UMAC_RD_WR: + * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + UMAC_RD_WR = 0x1, + /** + * @MFU_ASSERT_DUMP_NTF: + * &struct iwl_mfu_assert_dump_notif + */ + MFU_ASSERT_DUMP_NTF = 0xFE, +}; + +/* Error response/notification */ +enum { + FW_ERR_UNKNOWN_CMD = 0x0, + FW_ERR_INVALID_CMD_PARAM = 0x1, + FW_ERR_SERVICE = 0x2, + FW_ERR_ARC_MEMORY = 0x3, + FW_ERR_ARC_CODE = 0x4, + FW_ERR_WATCH_DOG = 0x5, + FW_ERR_WEP_GRP_KEY_INDX = 0x10, + FW_ERR_WEP_KEY_SIZE = 0x11, + FW_ERR_OBSOLETE_FUNC = 0x12, + FW_ERR_UNEXPECTED = 0xFE, + FW_ERR_FATAL = 0xFF +}; + +/** + * struct iwl_error_resp - FW error indication + * ( REPLY_ERROR = 0x2 ) + * @error_type: one of FW_ERR_* + * @cmd_id: the command ID for which the error occurred + * @reserved1: reserved + * @bad_cmd_seq_num: sequence number of the erroneous command + * @error_service: which service created the error, applicable only if + * error_type = 2, otherwise 0 + * @timestamp: TSF in usecs. + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +} __packed; + +#define TX_FIFO_MAX_NUM_9000 8 +#define TX_FIFO_MAX_NUM 15 +#define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 + +/** + * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information + * + * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not + * accessible) + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to + * 0x0 as accessible only via DBGM RDAT) + * @sample_buff_size: internal sample buff size + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre + * 8000 HW set to 0x0 as not accessible) + * @txfifo_size: size of TXF0 ... TXF7 + * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. + */ +struct iwl_shared_mem_cfg_v2 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ + +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * struct iwl_shared_mem_cfg - Shared memory configuration information + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ + +/** + * struct iwl_mfuart_load_notif - mfuart image version & status + * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) + * @installed_ver: installed image version + * @external_ver: external image version + * @status: MFUART loading status + * @duration: MFUART loading time + * @image_size: MFUART image size in bytes +*/ +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; + /* image size valid only in v2 of the command */ + __le32 image_size; +} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */ + +/** + * struct iwl_mfu_assert_dump_notif - mfuart dump logs + * ( MFU_ASSERT_DUMP_NTF = 0xfe ) + * @assert_id: mfuart assert id that cause the notif + * @curr_reset_num: number of asserts since uptime + * @index_num: current chunk id + * @parts_num: total number of chunks + * @data_size: number of data bytes sent + * @data: data buffer + */ +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ + +/** + * enum iwl_mvm_marker_id - marker ids + * + * The ids for different type of markers to insert into the usniffer logs + * + * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dw_len; + u8 marker_id; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + +/* Operation types for the debug mem access */ +enum { + DEBUG_MEM_OP_READ = 0, + DEBUG_MEM_OP_WRITE = 1, + DEBUG_MEM_OP_WRITE_BYTES = 2, +}; + +#define DEBUG_MEM_MAX_SIZE_DWORDS 32 + +/** + * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory + * @op: DEBUG_MEM_OP_* + * @addr: address to read/write from/to + * @len: in dwords, to read/write + * @data: for write opeations, contains the source buffer + */ +struct iwl_dbg_mem_access_cmd { + __le32 op; + __le32 addr; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ + +/* Status responses for the debug mem access */ +enum { + DEBUG_MEM_STATUS_SUCCESS = 0x0, + DEBUG_MEM_STATUS_FAILED = 0x1, + DEBUG_MEM_STATUS_LOCKED = 0x2, + DEBUG_MEM_STATUS_HIDDEN = 0x3, + DEBUG_MEM_STATUS_LENGTH = 0x4, +}; + +/** + * struct iwl_dbg_mem_access_rsp - Response to debug mem commands + * @status: DEBUG_MEM_STATUS_* + * @len: read dwords (0 for write operations) + * @data: contains the read DWs + */ +struct iwl_dbg_mem_access_rsp { + __le32 status; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ + +#define CONT_REC_COMMAND_SIZE 80 +#define ENABLE_CONT_RECORDING 0x15 +#define DISABLE_CONT_RECORDING 0x16 + +/* + * struct iwl_continuous_record_mode - recording mode + */ +struct iwl_continuous_record_mode { + __le16 enable_recording; +} __packed; + +/* + * struct iwl_continuous_record_cmd - enable/disable continuous recording + */ +struct iwl_continuous_record_cmd { + struct iwl_continuous_record_mode record_mode; + u8 pad[CONT_REC_COMMAND_SIZE - + sizeof(struct iwl_continuous_record_mode)]; +} __packed; + +#endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h new file mode 100644 index 000000000000..befc3b126041 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -0,0 +1,183 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_filter_h__ +#define __iwl_fw_api_filter_h__ + +#include "fw/api/mac.h" + +#define MAX_PORT_ID_NUM 2 +#define MAX_MCAST_FILTERING_ADDRESSES 256 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @reserved: reserved + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + +#define MAX_BCAST_FILTERS 8 +#define MAX_BCAST_FILTER_ATTRS 2 + +/** + * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet + * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. + * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. + * start of ip payload). + */ +enum iwl_mvm_bcast_filter_attr_offset { + BCAST_FILTER_OFFSET_PAYLOAD_START = 0, + BCAST_FILTER_OFFSET_IP_END = 1, +}; + +/** + * struct iwl_fw_bcast_filter_attr - broadcast filter attribute + * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. + * @offset: starting offset of this pattern. + * @reserved1: reserved + * @val: value to match - big endian (MSB is the first + * byte to match from offset pos). + * @mask: mask to match (big endian). + */ +struct iwl_fw_bcast_filter_attr { + u8 offset_type; + u8 offset; + __le16 reserved1; + __be32 val; + __be32 mask; +} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ + +/** + * enum iwl_mvm_bcast_filter_frame_type - filter frame type + * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. + * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames + */ +enum iwl_mvm_bcast_filter_frame_type { + BCAST_FILTER_FRAME_TYPE_ALL = 0, + BCAST_FILTER_FRAME_TYPE_IPV4 = 1, +}; + +/** + * struct iwl_fw_bcast_filter - broadcast filter + * @discard: discard frame (1) or let it pass (0). + * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. + * @reserved1: reserved + * @num_attrs: number of valid attributes in this filter. + * @attrs: attributes of this filter. a filter is considered matched + * only when all its attributes are matched (i.e. AND relationship) + */ +struct iwl_fw_bcast_filter { + u8 discard; + u8 frame_type; + u8 num_attrs; + u8 reserved1; + struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; +} __packed; /* BCAST_FILTER_S_VER_1 */ + +/** + * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. + * @default_discard: default action for this mac (discard (1) / pass (0)). + * @reserved1: reserved + * @attached_filters: bitmap of relevant filters for this mac. + */ +struct iwl_fw_bcast_mac { + u8 default_discard; + u8 reserved1; + __le16 attached_filters; +} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ + +/** + * struct iwl_bcast_filter_cmd - broadcast filtering configuration + * @disable: enable (0) / disable (1) + * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) + * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) + * @reserved1: reserved + * @filters: broadcast filters + * @macs: broadcast filtering configuration per-mac + */ +struct iwl_bcast_filter_cmd { + u8 disable; + u8 max_bcast_filters; + u8 max_macs; + u8 reserved1; + struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; + struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; +} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h new file mode 100644 index 000000000000..b30c9d229d6e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h @@ -0,0 +1,71 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_led_h__ +#define __iwl_fw_api_led_h__ + +/** + * struct iwl_led_cmd - LED switching command + * + * @status: LED status (on/off) + */ +struct iwl_led_cmd { + __le32 status; +} __packed; /* LEDS_CMD_API_S_VER_2 */ + +#endif /* __iwl_fw_api_led_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h new file mode 100644 index 000000000000..39c89e85fd2f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_mac_cfg_h__ +#define __iwl_fw_api_mac_cfg_h__ + +/** + * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs + */ +enum iwl_mac_conf_subcmd_ids { + /** + * @LINK_QUALITY_MEASUREMENT_CMD: &struct iwl_link_qual_msrmnt_cmd + */ + LINK_QUALITY_MEASUREMENT_CMD = 0x1, + + /** + * @LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF: + * &struct iwl_link_qual_msrmnt_notif + */ + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, + + /** + * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif + */ + CHANNEL_SWITCH_NOA_NOTIF = 0xFF, +}; + +#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 + +enum iwl_lqm_cmd_operatrions { + LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, + LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, +}; + +enum iwl_lqm_status { + LQM_STATUS_SUCCESS = 0, + LQM_STATUS_TIMEOUT = 1, + LQM_STATUS_ABORT = 2, +}; + +/** + * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command + * @cmd_operation: command operation to be performed (start or stop) + * as defined above. + * @mac_id: MAC ID the measurement applies to. + * @measurement_time: time of the total measurement to be performed, in uSec. + * @timeout: maximum time allowed until a response is sent, in uSec. + */ +struct iwl_link_qual_msrmnt_cmd { + __le32 cmd_operation; + __le32 mac_id; + __le32 measurement_time; + __le32 timeout; +} __packed /* LQM_CMD_API_S_VER_1 */; + +/** + * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification + * + * @frequent_stations_air_time: an array containing the total air time + * (in uSec) used by the most frequently transmitting stations. + * @number_of_stations: the number of uniqe stations included in the array + * (a number between 0 to 16) + * @total_air_time_other_stations: the total air time (uSec) used by all the + * stations which are not included in the above report. + * @time_in_measurement_window: the total time in uSec in which a measurement + * took place. + * @tx_frame_dropped: the number of TX frames dropped due to retry limit during + * measurement + * @mac_id: MAC ID the measurement applies to. + * @status: return status. may be one of the LQM_STATUS_* defined above. + * @reserved: reserved. + */ +struct iwl_link_qual_msrmnt_notif { + __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; + __le32 number_of_stations; + __le32 total_air_time_other_stations; + __le32 time_in_measurement_window; + __le32 tx_frame_dropped; + __le32 mac_id; + __le32 status; + u8 reserved[12]; +} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ + +/** + * struct iwl_channel_switch_noa_notif - Channel switch NOA notification + * + * @id_and_color: ID and color of the MAC + */ +struct iwl_channel_switch_noa_notif { + __le32 id_and_color; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h similarity index 93% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 0c3350ad2f2f..f2e31e040a7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -16,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -31,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,8 +57,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_mac_h__ -#define __fw_api_mac_h__ +#ifndef __iwl_fw_api_mac_h__ +#define __iwl_fw_api_mac_h__ /* * The first MAC indices (starting from 0) are available to the driver, @@ -76,8 +73,6 @@ #define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_INVALID_STA 0xFF -#define IWL_MVM_TDLS_STA_COUNT 4 - enum iwl_ac { AC_BK, AC_BE, @@ -393,4 +388,22 @@ struct iwl_nonqos_seq_query_cmd { __le16 reserved; } __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ -#endif /* __fw_api_mac_h__ */ +/** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: number of expected beacons + * @num_recvd_beacons: number of received beacons + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +#endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h new file mode 100644 index 000000000000..00bc7a25dece --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_nvm_reg_h__ +#define __iwl_fw_api_nvm_reg_h__ + +/** + * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands + */ +enum iwl_regulatory_and_nvm_subcmd_ids { + /** + * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd + */ + NVM_ACCESS_COMPLETE = 0x0, + + /** + * @NVM_GET_INFO: + * Command is &struct iwl_nvm_get_info, + * response is &struct iwl_nvm_get_info_rsp + */ + NVM_GET_INFO = 0x2, +}; + +/** + * enum iwl_nvm_access_op - NVM access opcode + * @IWL_NVM_READ: read NVM + * @IWL_NVM_WRITE: write NVM + */ +enum iwl_nvm_access_op { + IWL_NVM_READ = 0, + IWL_NVM_WRITE = 1, +}; + +/** + * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD + * @NVM_ACCESS_TARGET_CACHE: access the cache + * @NVM_ACCESS_TARGET_OTP: access the OTP + * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM + */ +enum iwl_nvm_access_target { + NVM_ACCESS_TARGET_CACHE = 0, + NVM_ACCESS_TARGET_OTP = 1, + NVM_ACCESS_TARGET_EEPROM = 2, +}; + +/** + * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD + * @NVM_SECTION_TYPE_SW: software section + * @NVM_SECTION_TYPE_REGULATORY: regulatory section + * @NVM_SECTION_TYPE_CALIBRATION: calibration section + * @NVM_SECTION_TYPE_PRODUCTION: production section + * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section + * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section + * @NVM_MAX_NUM_SECTIONS: number of sections + */ +enum iwl_nvm_section_type { + NVM_SECTION_TYPE_SW = 1, + NVM_SECTION_TYPE_REGULATORY = 3, + NVM_SECTION_TYPE_CALIBRATION = 4, + NVM_SECTION_TYPE_PRODUCTION = 5, + NVM_SECTION_TYPE_MAC_OVERRIDE = 11, + NVM_SECTION_TYPE_PHY_SKU = 12, + NVM_MAX_NUM_SECTIONS = 13, +}; + +/** + * struct iwl_nvm_access_cmd - Request the device to send an NVM section + * @op_code: &enum iwl_nvm_access_op + * @target: &enum iwl_nvm_access_target + * @type: &enum iwl_nvm_section_type + * @offset: offset in bytes into the section + * @length: in bytes, to read/write + * @data: if write operation, the data to write. On read its empty + */ +struct iwl_nvm_access_cmd { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ + +/** + * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * @offset: offset in bytes into the section + * @length: in bytes, either how much was written or read + * @type: NVM_SECTION_TYPE_* + * @status: 0 for success, fail otherwise + * @data: if read operation, the data returned. Empty on write. + */ +struct iwl_nvm_access_resp { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ + +/* + * struct iwl_nvm_get_info - request to get NVM data + */ +struct iwl_nvm_get_info { + __le32 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ + +/** + * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp + * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty + */ +enum iwl_nvm_info_general_flags { + NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0), +}; + +/** + * struct iwl_nvm_get_info_general - general NVM data + * @flags: bit 0: 1 - empty, 0 - non-empty + * @nvm_version: nvm version + * @board_type: board type + * @reserved: reserved + */ +struct iwl_nvm_get_info_general { + __le32 flags; + __le16 nvm_version; + u8 board_type; + u8 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_sku - mac information + * @enable_24g: band 2.4G enabled + * @enable_5g: band 5G enabled + * @enable_11n: 11n enabled + * @enable_11ac: 11ac enabled + * @mimo_disable: MIMO enabled + * @ext_crypto: Extended crypto enabled + */ +struct iwl_nvm_get_info_sku { + __le32 enable_24g; + __le32 enable_5g; + __le32 enable_11n; + __le32 enable_11ac; + __le32 mimo_disable; + __le32 ext_crypto; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_phy - phy information + * @tx_chains: BIT 0 chain A, BIT 1 chain B + * @rx_chains: BIT 0 chain A, BIT 1 chain B + */ +struct iwl_nvm_get_info_phy { + __le32 tx_chains; + __le32 rx_chains; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ + +#define IWL_NUM_CHANNELS (51) + +/** + * struct iwl_nvm_get_info_regulatory - regulatory information + * @lar_enabled: is LAR enabled + * @channel_profile: regulatory data of this channel + * @reserved: reserved + */ +struct iwl_nvm_get_info_regulatory { + __le32 lar_enabled; + __le16 channel_profile[IWL_NUM_CHANNELS]; + __le16 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_rsp - response to get NVM data + * @general: general NVM data + * @mac_sku: data relating to MAC sku + * @phy_sku: data relating to PHY sku + * @regulatory: regulatory data + */ +struct iwl_nvm_get_info_rsp { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory regulatory; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ + +/** + * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed + * @reserved: reserved + */ +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + */ +struct iwl_mcc_update_cmd_v1 { + __le16 mcc; + u8 source_id; + u8 reserved; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + * @key: integrity key for MCC API OEM testing + * @reserved2: reserved + */ +struct iwl_mcc_update_cmd { + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 key; + u8 reserved2[20]; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ + +/** + * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v1 { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ + +/** + * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @reserved: reserved. + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le16 time; + __le16 reserved; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ + +/** + * struct iwl_mcc_chub_notif - chub notifies of mcc change + * (MCC_CHUB_UPDATE_CMD = 0xc9) + * The Chub (Communication Hub, CommsHUB) is a HW component that connects to + * the cellular and connectivity cores that gets updates of the mcc, and + * notifies the ucode directly of any mcc change. + * The ucode requests the driver to request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: identity of the change originator, see iwl_mcc_source + * @reserved1: reserved for alignment + */ +struct iwl_mcc_chub_notif { + __le16 mcc; + u8 source_id; + u8 reserved1; +} __packed; /* LAR_MCC_NOTIFY_S */ + +enum iwl_mcc_update_status { + MCC_RESP_NEW_CHAN_PROFILE, + MCC_RESP_SAME_CHAN_PROFILE, + MCC_RESP_INVALID, + MCC_RESP_NVM_DISABLED, + MCC_RESP_ILLEGAL, + MCC_RESP_LOW_PRIORITY, + MCC_RESP_TEST_MODE_ACTIVE, + MCC_RESP_TEST_MODE_NOT_ACTIVE, + MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, +}; + +enum iwl_mcc_source { + MCC_SOURCE_OLD_FW = 0, + MCC_SOURCE_ME = 1, + MCC_SOURCE_BIOS = 2, + MCC_SOURCE_3G_LTE_HOST = 3, + MCC_SOURCE_3G_LTE_DEVICE = 4, + MCC_SOURCE_WIFI = 5, + MCC_SOURCE_RESERVED = 6, + MCC_SOURCE_DEFAULT = 7, + MCC_SOURCE_UNINITIALIZED = 8, + MCC_SOURCE_MCC_API = 9, + MCC_SOURCE_GET_CURRENT = 0x10, + MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, +}; + +#endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h new file mode 100644 index 000000000000..53cab993068f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_offload_h__ +#define __iwl_fw_api_offload_h__ + +/** + * enum iwl_prot_offload_subcmd_ids - protocol offload commands + */ +enum iwl_prot_offload_subcmd_ids { + /** + * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif + */ + STORED_BEACON_NTF = 0xFF, +}; + +#define MAX_STORED_BEACON_SIZE 600 + +/** + * struct iwl_stored_beacon_notif - Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 band; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ + +#endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h new file mode 100644 index 000000000000..e76f9cd4473d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h @@ -0,0 +1,108 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_paging_h__ +#define __iwl_fw_api_paging_h__ + +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/** + * struct iwl_fw_paging_cmd - paging layout + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side + */ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/** + * enum iwl_fw_item_id - FW item IDs + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/** + * struct iwl_fw_get_item_cmd - get an item from the fw + * @item_id: ID of item to obtain, see &enum iwl_fw_item_id + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + +#endif /* __iwl_fw_api_paging_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h new file mode 100644 index 000000000000..45f61c6af14e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -0,0 +1,164 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_ctxt_h__ +#define __iwl_fw_api_phy_ctxt_h__ + +/* Supported bands */ +#define PHY_BAND_5 (0) +#define PHY_BAND_24 (1) + +/* Supported channel width, vary if there is VHT support */ +#define PHY_VHT_CHANNEL_MODE20 (0x0) +#define PHY_VHT_CHANNEL_MODE40 (0x1) +#define PHY_VHT_CHANNEL_MODE80 (0x2) +#define PHY_VHT_CHANNEL_MODE160 (0x3) + +/* + * Control channel position: + * For legacy set bit means upper channel, otherwise lower. + * For VHT - bit-2 marks if the control is lower/upper relative to center-freq + * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. + * center_freq + * | + * 40Mhz |_______|_______| + * 80Mhz |_______|_______|_______|_______| + * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| + * code 011 010 001 000 | 100 101 110 111 + */ +#define PHY_VHT_CTRL_POS_1_BELOW (0x0) +#define PHY_VHT_CTRL_POS_2_BELOW (0x1) +#define PHY_VHT_CTRL_POS_3_BELOW (0x2) +#define PHY_VHT_CTRL_POS_4_BELOW (0x3) +#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) +#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) +#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) +#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) + +/* + * @band: PHY_BAND_* + * @channel: channel number + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + */ +struct iwl_fw_channel_info { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +} __packed; + +#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) +#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) +#define PHY_RX_CHAIN_VALID_POS (1) +#define PHY_RX_CHAIN_VALID_MSK \ + (0x7 << PHY_RX_CHAIN_VALID_POS) +#define PHY_RX_CHAIN_FORCE_SEL_POS (4) +#define PHY_RX_CHAIN_FORCE_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) +#define PHY_RX_CHAIN_CNT_POS (10) +#define PHY_RX_CHAIN_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_CNT_POS) +#define PHY_RX_CHAIN_MIMO_CNT_POS (12) +#define PHY_RX_CHAIN_MIMO_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) +#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) +#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) + +/* TODO: fix the value, make it depend on firmware at runtime? */ +#define NUM_PHY_CTX 3 + +/* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @apply_time: 0 means immediate apply and context switch. + * other value means apply new params after X usecs + * @tx_param_color: ??? + * @ci: channel info + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ + +#endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h new file mode 100644 index 000000000000..9cc59e00bd95 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -0,0 +1,258 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_h__ +#define __iwl_fw_api_phy_h__ + +/** + * enum iwl_phy_ops_subcmd_ids - PHY group commands + */ +enum iwl_phy_ops_subcmd_ids { + /** + * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: + * Uses either &struct iwl_dts_measurement_cmd or + * &struct iwl_ext_dts_measurement_cmd + */ + CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + + /** + * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd + */ + CTDP_CONFIG_CMD = 0x03, + + /** + * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd + */ + TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + + /** + * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd + */ + GEO_TX_POWER_LIMIT = 0x05, + + /** + * @CT_KILL_NOTIFICATION: &struct ct_kill_notif + */ + CT_KILL_NOTIFICATION = 0xFE, + + /** + * @DTS_MEASUREMENT_NOTIF_WIDE: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, +}; + +/* DTS measurements */ + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), + DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), +}; + +/** + * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements + * + * @flags: indicates which measurements we want as specified in + * &enum iwl_dts_measurement_flags + */ +struct iwl_dts_measurement_cmd { + __le32 flags; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ + +/** +* enum iwl_dts_control_measurement_mode - DTS measurement type +* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read +* back (latest value. Not waiting for new value). Use automatic +* SW DTS configuration. +* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, +* trigger DTS reading and provide read back temperature read +* when available. +* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read +* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, +* without measurement trigger. +*/ +enum iwl_dts_control_measurement_mode { + DTS_AUTOMATIC = 0, + DTS_REQUEST_READ = 1, + DTS_OVER_WRITE = 2, + DTS_DIRECT_WITHOUT_MEASURE = 3, +}; + +/** +* enum iwl_dts_used - DTS to use or used for measurement in the DTS request +* @DTS_USE_TOP: Top +* @DTS_USE_CHAIN_A: chain A +* @DTS_USE_CHAIN_B: chain B +* @DTS_USE_CHAIN_C: chain C +* @XTAL_TEMPERATURE: read temperature from xtal +*/ +enum iwl_dts_used { + DTS_USE_TOP = 0, + DTS_USE_CHAIN_A = 1, + DTS_USE_CHAIN_B = 2, + DTS_USE_CHAIN_C = 3, + XTAL_TEMPERATURE = 4, +}; + +/** +* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode +* @DTS_BIT6_MODE: bit 6 mode +* @DTS_BIT8_MODE: bit 8 mode +*/ +enum iwl_dts_bit_mode { + DTS_BIT6_MODE = 0, + DTS_BIT8_MODE = 1, +}; + +/** + * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements + * @control_mode: see &enum iwl_dts_control_measurement_mode + * @temperature: used when over write DTS mode is selected + * @sensor: set temperature sensor to use. See &enum iwl_dts_used + * @avg_factor: average factor to DTS in request DTS read mode + * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode + * @step_duration: step duration for the DTS + */ +struct iwl_ext_dts_measurement_cmd { + __le32 control_mode; + __le32 temperature; + __le32 sensor; + __le32 avg_factor; + __le32 bit_mode; + __le32 step_duration; +} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ + +/** + * struct iwl_dts_measurement_notif_v1 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + */ +struct iwl_dts_measurement_notif_v1 { + __le32 temp; + __le32 voltage; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ + +/** + * struct iwl_dts_measurement_notif_v2 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + * @threshold_idx: the trip index that was crossed + */ +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ + +/** + * struct ct_kill_notif - CT-kill entry notification + * + * @temperature: the current temperature in celsius + * @reserved: reserved + */ +struct ct_kill_notif { + __le16 temperature; + __le16 reserved; +} __packed; /* GRP_PHY_CT_KILL_NTF */ + +/** +* enum ctdp_cmd_operation - CTDP command operations +* @CTDP_CMD_OPERATION_START: update the current budget +* @CTDP_CMD_OPERATION_STOP: stop ctdp +* @CTDP_CMD_OPERATION_REPORT: get the average budget +*/ +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 0x1, + CTDP_CMD_OPERATION_STOP = 0x2, + CTDP_CMD_OPERATION_REPORT = 0x4, +};/* CTDP_CMD_OPERATION_TYPE_E */ + +/** + * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * + * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @budget: the budget in milliwatt + * @window_size: defined in API but not used + */ +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +} __packed; + +#define IWL_MAX_DTS_TRIPS 8 + +/** + * struct temp_report_ths_cmd - set temperature thresholds + * + * @num_temps: number of temperature thresholds passed + * @thresholds: array with the thresholds to be configured + */ +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[IWL_MAX_DTS_TRIPS]; +} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ + +#endif /* __iwl_fw_api_phy_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h similarity index 98% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 7da57ef2454e..a06afb5605d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_power_h__ -#define __fw_api_power_h__ +#ifndef __iwl_fw_api_power_h__ +#define __iwl_fw_api_power_h__ /* Power Management Commands, Responses, Notifications */ @@ -224,7 +219,7 @@ struct iwl_device_power_cmd { /** * struct iwl_mac_power_cmd - New power command containing uAPSD support * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) - * @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color + * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be @@ -528,4 +523,4 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) -#endif +#endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h similarity index 97% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index bdf1228d050b..a13fd8a1be62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -62,10 +57,10 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_rs_h__ -#define __fw_api_rs_h__ +#ifndef __iwl_fw_api_rs_h__ +#define __iwl_fw_api_rs_h__ -#include "fw-api-mac.h" +#include "mac.h" /* * These serve as indexes into @@ -410,4 +405,4 @@ struct iwl_lq_cmd { __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ -#endif /* __fw_api_rs_h__ */ +#endif /* __iwl_fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h similarity index 95% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 59038ade08d8..e7565f37ece9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_rx_h__ -#define __fw_api_rx_h__ +#ifndef __iwl_fw_api_rx_h__ +#define __iwl_fw_api_rx_h__ /* API for pre-9000 hardware */ @@ -571,4 +566,24 @@ struct iwl_mvm_pm_state_notification { __le16 reserved; } __packed; /* PEER_PM_NTFY_API_S_VER_1 */ -#endif /* __fw_api_rx_h__ */ +#define BA_WINDOW_STREAMS_MAX 16 +#define BA_WINDOW_STATUS_TID_MSK 0x000F +#define BA_WINDOW_STATUS_STA_ID_POS 4 +#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 +#define BA_WINDOW_STATUS_VALID_MSK BIT(9) + +/** + * struct iwl_ba_window_status_notif - reordering window's status notification + * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] + * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid + * @start_seq_num: the start sequence number of the bitmap + * @mpdu_rx_count: the number of received MPDUs since entering D0i3 + */ +struct iwl_ba_window_status_notif { + __le64 bitmap[BA_WINDOW_STREAMS_MAX]; + __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; + __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; + __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; +} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h similarity index 98% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 1cd7cc087936..5a40092febfb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_scan_h__ -#define __fw_api_scan_h__ +#ifndef __iwl_fw_api_scan_h__ +#define __iwl_fw_api_scan_h__ /* Scan Commands, Responses, Notifications */ @@ -789,4 +784,4 @@ struct iwl_umac_scan_iter_complete_notif { struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ -#endif +#endif /* __iwl_fw_api_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h new file mode 100644 index 000000000000..e517b55f1bc6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h @@ -0,0 +1,138 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_sf_h__ +#define __iwl_fw_api_sf_h__ + +/* Smart Fifo state */ +enum iwl_sf_state { + SF_LONG_DELAY_ON = 0, /* should never be called by driver */ + SF_FULL_ON, + SF_UNINIT, + SF_INIT_OFF, + SF_HW_NUM_STATES +}; + +/* Smart Fifo possible scenario */ +enum iwl_sf_scenario { + SF_SCENARIO_SINGLE_UNICAST, + SF_SCENARIO_AGG_UNICAST, + SF_SCENARIO_MULTICAST, + SF_SCENARIO_BA_RESP, + SF_SCENARIO_TX_RESP, + SF_NUM_SCENARIO +}; + +#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ +#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ + +/* smart FIFO default values */ +#define SF_W_MARK_SISO 6144 +#define SF_W_MARK_MIMO2 8192 +#define SF_W_MARK_MIMO3 6144 +#define SF_W_MARK_LEGACY 4096 +#define SF_W_MARK_SCAN 4096 + +/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ +#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ + +/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ +#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ +#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ +#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ +#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ + +#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ + +#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) + +/** + * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. + * @state: smart fifo state, types listed in &enum iwl_sf_state. + * @watermark: Minimum allowed available free space in RXF for transient state. + * @long_delay_timeouts: aging and idle timer values for each scenario + * in long delay state. + * @full_on_timeouts: timer values for each scenario in full on state. + */ +struct iwl_sf_cfg_cmd { + __le32 state; + __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; + __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; + __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; +} __packed; /* SF_CFG_API_S_VER_2 */ + +#endif /* __iwl_fw_api_sf_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h similarity index 98% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 81f0a3463bac..af369eba3795 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_sta_h__ -#define __fw_api_sta_h__ +#ifndef __iwl_fw_api_sta_h__ +#define __iwl_fw_api_sta_h__ /** * enum iwl_sta_flags - flags for the ADD_STA host command @@ -291,7 +286,7 @@ struct iwl_mvm_keyinfo { * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color + * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table @@ -372,7 +367,7 @@ enum iwl_sta_type { * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color + * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table @@ -575,4 +570,4 @@ struct iwl_mvm_eosp_notification { __le32 sta_id; } __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ -#endif /* __fw_api_sta_h__ */ +#endif /* __iwl_fw_api_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h similarity index 97% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index c7531da508fd..53cb622aa9ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,9 +59,9 @@ * *****************************************************************************/ -#ifndef __fw_api_stats_h__ -#define __fw_api_stats_h__ -#include "fw-api-mac.h" +#ifndef __iwl_fw_api_stats_h__ +#define __iwl_fw_api_stats_h__ +#include "mac.h" struct mvm_statistics_dbg { __le32 burst_check; @@ -476,4 +471,4 @@ struct iwl_statistics_cmd { __le32 flags; } __packed; /* STATISTICS_CMD_API_S_VER_1 */ -#endif /* __fw_api_stats_h__ */ +#endif /* __iwl_fw_api_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h new file mode 100644 index 000000000000..7c6c2462d0e8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_tdls_h__ +#define __iwl_fw_api_tdls_h__ + +#include "fw/api/tx.h" +#include "fw/api/phy-ctxt.h" + +#define IWL_MVM_TDLS_STA_COUNT 4 + +/* Type of TDLS request */ +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, + TDLS_MOVE_CH, +}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch + * @frame_timestamp: GP2 timestamp of channel-switch request/response packet + * received from peer + * @max_offchan_duration: What amount of microseconds out of a DTIM is given + * to the TDLS off-channel communication. For instance if the DTIM is + * 200TU and the TDLS peer is to be given 25% of the time, the value + * given will be 50TU, or 50 * 1024 if translated into microseconds. + * @switch_time: switch time the peer sent in its channel switch timing IE + * @switch_timeout: switch timeout the peer sent in its channel switch timing IE + */ +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; /* GP2 time of peer packet Rx */ + __le32 max_offchan_duration; /* given in micro-seconds */ + __le32 switch_time; /* given in micro-seconds */ + __le32 switch_timeout; /* given in micro-seconds */ +} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ + +#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 + +/** + * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template + * + * A template representing a TDLS channel-switch request or response frame + * + * @switch_time_offset: offset to the channel switch timing IE in the template + * @tx_cmd: Tx parameters for the frame + * @data: frame data + */ +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd tx_cmd; + u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command + * + * The command is sent to initiate a channel switch and also in response to + * incoming TDLS channel-switch request/response packets from remote peers. + * + * @switch_type: see &enum iwl_tdls_channel_switch_type + * @peer_sta_id: station id of TDLS peer + * @ci: channel we switch to + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification + * + * @status: non-zero on success + * @offchannel_duration: duration given in microseconds + * @sta_id: peer currently performing the channel-switch with + */ +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ + +/** + * struct iwl_tdls_sta_info - TDLS station info + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx + * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer + * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise + */ +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +} __packed; /* TDLS_STA_INFO_VER_1 */ + +/** + * struct iwl_tdls_config_cmd - TDLS basic config command + * + * @id_and_color: MAC id and color being configured + * @tdls_peer_count: amount of currently connected TDLS peers + * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx + * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP + * @sta_info: per-station info. Only the first tdls_peer_count entries are set + * @pti_req_data_offset: offset of network-level data for the PTI template + * @pti_req_tx_cmd: Tx parameters for PTI request template + * @pti_req_template: PTI request template data + */ +struct iwl_tdls_config_cmd { + __le32 id_and_color; /* mac id and color */ + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + + __le32 pti_req_data_offset; + struct iwl_tx_cmd pti_req_tx_cmd; + u8 pti_req_template[0]; +} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_config_sta_info_res - TDLS per-station config information + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to + * the peer + */ +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ + +/** + * struct iwl_tdls_config_res - TDLS config information from FW + * + * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP + * @sta_info: per-station TDLS config information + */ +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; +} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_tdls_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h new file mode 100644 index 000000000000..3721a3ed358b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_time_event_h__ +#define __iwl_fw_api_time_event_h__ + +#include "fw/api/phy-ctxt.h" + +/* Time Event types, according to MAC type */ +enum iwl_time_event_type { + /* BSS Station Events */ + TE_BSS_STA_AGGRESSIVE_ASSOC, + TE_BSS_STA_ASSOC, + TE_BSS_EAP_DHCP_PROT, + TE_BSS_QUIET_PERIOD, + + /* P2P Device Events */ + TE_P2P_DEVICE_DISCOVERABLE, + TE_P2P_DEVICE_LISTEN, + TE_P2P_DEVICE_ACTION_SCAN, + TE_P2P_DEVICE_FULL_SCAN, + + /* P2P Client Events */ + TE_P2P_CLIENT_AGGRESSIVE_ASSOC, + TE_P2P_CLIENT_ASSOC, + TE_P2P_CLIENT_QUIET_PERIOD, + + /* P2P GO Events */ + TE_P2P_GO_ASSOC_PROT, + TE_P2P_GO_REPETITIVET_NOA, + TE_P2P_GO_CT_WINDOW, + + /* WiDi Sync Events */ + TE_WIDI_TX_SYNC, + + /* Channel Switch NoA */ + TE_CHANNEL_SWITCH_PERIOD, + + TE_MAX +}; /* MAC_EVENT_TYPE_API_E_VER_1 */ + +/* Time event - defines for command API v1 */ + +/* + * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V1_FRAG_NONE = 0, + TE_V1_FRAG_SINGLE = 1, + TE_V1_FRAG_DUAL = 2, + TE_V1_FRAG_ENDLESS = 0xffffffff +}; + +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_V1_FRAG_MAX_MSK 0x0fffffff +/* Repeat the time event endlessly (until removed) */ +#define TE_V1_REPEAT_ENDLESS 0xffffffff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff + +/* Time Event dependencies: none, on another TE, or in a specific time */ +enum { + TE_V1_INDEPENDENT = 0, + TE_V1_DEP_OTHER = BIT(0), + TE_V1_DEP_TSF = BIT(1), + TE_V1_EVENT_SOCIOPATHIC = BIT(2), +}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + +/* + * @TE_V1_NOTIF_NONE: no notifications + * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. + * + * Supported Time event notifications configuration. + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + */ +enum { + TE_V1_NOTIF_NONE = 0, + TE_V1_NOTIF_HOST_EVENT_START = BIT(0), + TE_V1_NOTIF_HOST_EVENT_END = BIT(1), + TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), + TE_V1_NOTIF_HOST_FRAG_START = BIT(4), + TE_V1_NOTIF_HOST_FRAG_END = BIT(5), + TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), +}; /* MAC_EVENT_ACTION_API_E_VER_2 */ + +/* Time event - defines for command API */ + +/* + * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 0xfe, + TE_V2_FRAG_ENDLESS = 0xff +}; + +/* Repeat the time event endlessly (until removed) */ +#define TE_V2_REPEAT_ENDLESS 0xff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V2_REPEAT_MAX 0xfe + +#define TE_V2_PLACEMENT_POS 12 +#define TE_V2_ABSENCE_POS 15 + +/** + * enum iwl_time_event_policy - Time event policy values + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + * + * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable + * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. + * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC + * @TE_V2_ABSENCE: are we present or absent during the Time Event. + */ +enum iwl_time_event_policy { + TE_V2_DEFAULT_POLICY = 0x0, + + /* notifications (event start/stop, fragment start/stop) */ + TE_V2_NOTIF_HOST_EVENT_START = BIT(0), + TE_V2_NOTIF_HOST_EVENT_END = BIT(1), + TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), + + TE_V2_NOTIF_HOST_FRAG_START = BIT(4), + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), + T2_V2_START_IMMEDIATELY = BIT(11), + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), + TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), + TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), + + /* are we present or absent during the Time Event. */ + TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), +}; + +/** + * struct iwl_time_event_cmd - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also + * with version 1. determined by IWL_UCODE_TLV_FLAGS) + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of &enum iwl_ctxt_action + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @max_frags: maximal number of fragments the Time Event can be divided to + * @policy: defines whether uCode shall notify the host or other uCode modules + * on event and/or fragment start and/or end + * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * TE_EVENT_SOCIOPATHIC + * using TE_ABSENCE and using TE_NOTIF_*, + * &enum iwl_time_event_policy + */ +struct iwl_time_event_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ + __le32 apply_time; + __le32 max_delay; + __le32 depends_on; + __le32 interval; + __le32 duration; + u8 repeat; + u8 max_frags; + __le16 policy; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ + +/** + * struct iwl_time_event_resp - response structure to iwl_time_event_cmd + * @status: bit 0 indicates success, all others specify errors + * @id: the Time Event type + * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ + +/** + * struct iwl_time_event_notif - notifications of time event start/stop + * ( TIME_EVENT_NOTIFICATION = 0x2a ) + * @timestamp: action timestamp in GP2 + * @session_id: session's unique id + * @unique_id: unique id of the Time Event itself + * @id_and_color: ID and color of the relevant MAC + * @action: &enum iwl_time_event_policy + * @status: true if scheduled, false otherwise (not executed) + */ +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ + +/* + * Aux ROC command + * + * Command requests the firmware to create a time event for a certain duration + * and remain on the given channel. This is done by using the Aux framework in + * the FW. + * The command was first used for Hot Spot issues - but can be used regardless + * to Hot Spot. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the + * event_unique_id should be the id of the time event assigned by ucode. + * Otherwise ignore the event_unique_id. + * @sta_id_and_color: station id and color, resumed during "Remain On Channel" + * activity. + * @channel_info: channel info + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req { + /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ + __le32 id_and_color; + __le32 action; + __le32 event_unique_id; + __le32 sta_id_and_color; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ + +/* + * values for AUX ROC result values + */ +enum iwl_mvm_hot_spot { + HOT_SPOT_RSP_STATUS_OK, + HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, + HOT_SPOT_MAX_NUM_OF_SESSIONS, +}; + +/* + * Aux ROC command response + * + * In response to iwl_hs20_roc_req the FW sends this command to notify the + * driver the uid of the timevent. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @event_unique_id: Unique ID of time event assigned by ucode + * @status: Return status 0 is success, all the rest used for specific errors + */ +struct iwl_hs20_roc_res { + __le32 event_unique_id; + __le32 status; +} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h similarity index 98% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/tof.h index 8658a983c463..7328a1606146 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -60,8 +55,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __fw_api_tof_h__ -#define __fw_api_tof_h__ +#ifndef __iwl_fw_api_tof_h__ +#define __iwl_fw_api_tof_h__ /* ToF sub-group command IDs */ enum iwl_mvm_tof_sub_grp_ids { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h similarity index 96% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h rename to drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 97d7eed32622..14ad9fb895f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -62,8 +57,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_tx_h__ -#define __fw_api_tx_h__ +#ifndef __iwl_fw_api_tx_h__ +#define __iwl_fw_api_tx_h__ /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command @@ -414,7 +409,8 @@ enum iwl_tx_status { * @AGG_TX_STATE_BT_PRIO: * @AGG_TX_STATE_FEW_BYTES: * @AGG_TX_STATE_ABORT: - * @AGG_TX_STATE_LAST_SENT_TTL: + * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or + * BT detection * @AGG_TX_STATE_LAST_SENT_TRY_CNT: * @AGG_TX_STATE_LAST_SENT_BT_KILL: * @AGG_TX_STATE_SCD_QUERY: @@ -426,7 +422,7 @@ enum iwl_tx_status { * occur if tx failed for this frame when it was a member of a previous * aggregation block). If rate scaling is used, retry count indicates the * rate table entry used for all frames in the new agg. - *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for + * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for * this frame * * TODO: complete documentation @@ -438,7 +434,7 @@ enum iwl_tx_agg_status { AGG_TX_STATE_BT_PRIO = 0x002, AGG_TX_STATE_FEW_BYTES = 0x004, AGG_TX_STATE_ABORT = 0x008, - AGG_TX_STATE_LAST_SENT_TTL = 0x010, + AGG_TX_STATE_TX_ON_AIR_DROP = 0x010, AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, AGG_TX_STATE_SCD_QUERY = 0x080, @@ -450,10 +446,6 @@ enum iwl_tx_agg_status { AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, }; -#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ - AGG_TX_STATE_LAST_SENT_TRY_CNT| \ - AGG_TX_STATE_LAST_SENT_BT_KILL) - /* * The mask below describes a status where we are absolutely sure that the MPDU * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've @@ -771,7 +763,8 @@ struct iwl_mac_beacon_cmd_v6 { } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** - * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon @@ -780,38 +773,47 @@ struct iwl_mac_beacon_cmd_v6 { * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ -struct iwl_mac_beacon_cmd_data { +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ + +enum iwl_mac_beacon_flags { + IWL_MAC_BEACON_CCK = BIT(8), + IWL_MAC_BEACON_ANT_A = BIT(9), + IWL_MAC_BEACON_ANT_B = BIT(10), + IWL_MAC_BEACON_ANT_C = BIT(11), }; -/** - * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame - * @data: see &iwl_mac_beacon_cmd_data - */ -struct iwl_mac_beacon_cmd_v7 { - struct iwl_tx_cmd tx; - struct iwl_mac_beacon_cmd_data data; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ - /** * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA - * @byte_cnt: byte count of the beacon frame - * @flags: for future use + * @byte_cnt: byte count of the beacon frame. + * @flags: least significant byte for rate code. The most significant byte + * is &enum iwl_mac_beacon_flags. * @reserved: reserved - * @data: see &iwl_mac_beacon_cmd_data + * @template_id: currently equal to the mac context id of the coresponding mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @ecsa_offset: offset to the ECSA IE if present + * @csa_offset: offset to the CSA IE if present + * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd { __le16 byte_cnt; __le16 flags; __le64 reserved; - struct iwl_mac_beacon_cmd_data data; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; @@ -914,4 +916,4 @@ struct iwl_scd_txq_cfg_rsp { u8 scd_queue; } __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ -#endif /* __fw_api_tx_h__ */ +#endif /* __iwl_fw_api_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h new file mode 100644 index 000000000000..87b4434224a1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -0,0 +1,163 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_txq_h__ +#define __iwl_fw_api_txq_h__ + +/* + * DQA queue numbers + * + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure + * that we are never left without the possibility to connect to an AP. + * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. + * Each MGMT queue is mapped to a single STA + * MGMT frames are frames that return true on ieee80211_is_mgmt() + * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe + * responses + * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. + * DATA frames are intended for !ieee80211_is_mgmt() frames, but if + * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues + * as well + * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames + */ +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_AUX_QUEUE = 1, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_GCAST_QUEUE = 3, + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 31, +}; + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; + +enum iwl_gen2_tx_fifo { + IWL_GEN2_TX_FIFO_CMD = 0, + IWL_GEN2_EDCA_TX_FIFO_BK, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_TRIG_TX_FIFO_BK, + IWL_GEN2_TRIG_TX_FIFO_BE, + IWL_GEN2_TRIG_TX_FIFO_VI, + IWL_GEN2_TRIG_TX_FIFO_VO, +}; + +/** + * enum iwl_tx_queue_cfg_actions - TXQ config options + * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue + * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format + */ +enum iwl_tx_queue_cfg_actions { + TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), + TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), +}; + +/** + * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command + * @sta_id: station id + * @tid: tid of the queue + * @flags: see &enum iwl_tx_queue_cfg_actions + * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. + * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) + * @byte_cnt_addr: address of byte count table + * @tfdq_addr: address of TFD circular buffer + */ +struct iwl_tx_queue_cfg_cmd { + u8 sta_id; + u8 tid; + __le16 flags; + __le32 cb_size; + __le64 byte_cnt_addr; + __le64 tfdq_addr; +} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config + * @queue_number: queue number assigned to this RA -TID + * @flags: set on failure + * @write_pointer: initial value for write pointer + * @reserved: reserved + */ +struct iwl_tx_queue_cfg_rsp { + __le16 queue_number; + __le16 flags; + __le16 write_pointer; + __le16 reserved; +} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ + +#endif /* __iwl_fw_api_txq_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c new file mode 100644 index 000000000000..6f75985eea66 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c @@ -0,0 +1,88 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" +#include "fw/api/alive.h" + +static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data; + + IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n", + __le32_to_cpu(fseq->aux_read_fseq_ver), + __le32_to_cpu(fseq->wifi_fseq_ver)); +} + +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + switch (cmd) { + case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF): + iwl_fwrt_fseq_ver_mismatch(fwrt, rxb); + break; + default: + break; + } +} +IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c similarity index 67% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c rename to drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 1602b360353c..6afc7a799892 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -63,22 +63,37 @@ * *****************************************************************************/ #include - -#include "fw-dbg.h" +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" #include "iwl-io.h" -#include "mvm.h" #include "iwl-prph.h" #include "iwl-csr.h" +/** + * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump + * + * @fwrt_ptr: pointer to the buffer coming from fwrt + * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the + * transport's data. + * @trans_len: length of the valid data in trans_ptr + * @fwrt_len: length of the valid data in fwrt_ptr + */ +struct iwl_fw_dump_ptrs { + struct iwl_trans_dump_data *trans_ptr; + void *fwrt_ptr; + u32 fwrt_len; +}; + #define RADIO_REG_MAX_READ 0x2ad -static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) +static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; unsigned long flags; int i; - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); @@ -88,20 +103,20 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; - iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd); - *pos = (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT); + iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); + *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); - iwl_trans_release_nic_access(mvm->trans, &flags); + iwl_trans_release_nic_access(fwrt->trans, &flags); } -static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) +static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; @@ -122,41 +137,41 @@ static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ - iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1); + iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1); + iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } -static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) +static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; @@ -177,91 +192,91 @@ static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset, + iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset); + iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } -static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) +static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; - struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg; + struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; int i, j; - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; /* Pull RXF1 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); /* Pull LMAC2 RXF1 */ - if (mvm->smem_cfg.num_lmacs > 1) - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, - LMAC2_PRPH_OFFSET, 2); + if (fwrt->smem_cfg.num_lmacs > 1) + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); /* Pull TXF data from LMAC1 */ - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); - iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], - 0, i); + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); + iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], + 0, i); } /* Pull TXF data from LMAC2 */ - if (mvm->smem_cfg.num_lmacs > 1) { - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + if (fwrt->smem_cfg.num_lmacs > 1) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); - iwl_mvm_dump_txf(mvm, dump_data, - cfg->lmac[1].txfifo_size[i], - LMAC2_PRPH_OFFSET, - i + cfg->num_txfifo_entries); + iwl_fwrt_dump_txf(fwrt, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); } } - if (fw_has_capa(&mvm->fw->ucode_capa, + if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->smem_cfg.internal_txfifo_size[i]; + fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) @@ -276,52 +291,45 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - mvm->smem_cfg.num_txfifo_entries); + iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ - iwl_trans_read_prph(mvm->trans, + iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = - iwl_trans_read_prph(mvm->trans, + iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) -{ - if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert) - kfree(mvm->fw_dump_desc); - mvm->fw_dump_desc = NULL; + iwl_trans_release_nic_access(fwrt->trans, &flags); } #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ @@ -531,37 +539,34 @@ static struct scatterlist *alloc_sgtable(int size) return table; } -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_mvm_dump_ptrs *fw_error_dump; + struct iwl_fw_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; + struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; + u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? + 0 : fwrt->trans->cfg->dccm2_len; bool monitor_dump_only = false; int i; - if (!IWL_MVM_COLLECT_FW_ERR_DUMP && - !mvm->trans->dbg_dest_tlv) - return; - - lockdep_assert_held(&mvm->mutex); - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } - if (mvm->fw_dump_trig && - mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + if (fwrt->dump.trig && + fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) monitor_dump_only = true; fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); @@ -569,21 +574,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) goto out; /* SRAM - include stack CCM if driver knows the values for it */ - if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { + if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { - sram_ofs = mvm->cfg->dccm_offset; - sram_len = mvm->cfg->dccm_len; + sram_ofs = fwrt->trans->cfg->dccm_offset; + sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg; - + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_data_len = 0; /* Count RXF2 size */ @@ -621,7 +624,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (fw_has_capa(&mvm->fw->ucode_capa, + if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); @@ -638,7 +641,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } /* Make room for PRPH registers */ - if (!mvm->trans->cfg->gen2) { + if (!fwrt->trans->cfg->gen2) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ @@ -652,7 +655,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) { + if (!fwrt->trans->cfg->gen2 && + fwrt->trans->cfg->mq_rx_supported) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { /* The range includes both boundaries */ @@ -666,12 +670,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 2 + + sizeof(*dump_data) * 3 + + sizeof(*dump_smem_cfg) + fifo_data_len + prph_len + radio_len + @@ -686,31 +691,31 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for MEM segments */ - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { file_len += sizeof(*dump_data) + sizeof(*dump_mem) + le32_to_cpu(fw_dbg_mem[i].len); } /* Make room for fw's virtual image pages, if it exists */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) - file_len += mvm->num_of_paging_blk * + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) + file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); /* If we only want a monitor dump, reset the file length */ if (monitor_dump_only) { - file_len = sizeof(*dump_file) + sizeof(*dump_data) + - sizeof(*dump_info); + file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (mvm->fw_dump_desc) + if (fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + - mvm->fw_dump_desc->len; + fwrt->dump.desc->len; - if (!mvm->fw->n_dbg_mem_tlv) + if (!fwrt->fw->n_dbg_mem_tlv) file_len += sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); @@ -719,7 +724,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) goto out; } - fw_error_dump->op_mode_ptr = dump_file; + fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; @@ -728,32 +733,59 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->device_family = - mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? + fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, + dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, mvm->cfg->name, + strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, + strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable)); dump_data = iwl_fw_error_next_data(dump_data); - /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - iwl_mvm_dump_fifos(mvm, &dump_data); - if (radio_len) - iwl_mvm_read_radio_reg(mvm, &dump_data); + + /* Dump shared memory configuration */ + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); + dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); + dump_smem_cfg = (void *)dump_data->data; + dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); + dump_smem_cfg->num_txfifo_entries = + cpu_to_le32(mem_cfg->num_txfifo_entries); + for (i = 0; i < MAX_NUM_LMAC; i++) { + int j; + + for (j = 0; j < TX_FIFO_MAX_NUM; j++) + dump_smem_cfg->lmac[i].txfifo_size[j] = + cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]); + dump_smem_cfg->lmac[i].rxfifo1_size = + cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); + } + dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); + dump_smem_cfg->internal_txfifo_addr = + cpu_to_le32(mem_cfg->internal_txfifo_addr); + for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { + dump_smem_cfg->internal_txfifo_size[i] = + cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } - if (mvm->fw_dump_desc) { + dump_data = iwl_fw_error_next_data(dump_data); + + /* We only dump the FIFOs if the FW is in error state */ + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + iwl_fw_dump_fifos(fwrt, &dump_data); + if (radio_len) + iwl_read_radio_regs(fwrt, &dump_data); + } + + if (fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + - mvm->fw_dump_desc->len); + fwrt->dump.desc->len); dump_trig = (void *)dump_data->data; - memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, - sizeof(*dump_trig) + mvm->fw_dump_desc->len); + memcpy(dump_trig, &fwrt->dump.desc->trig_desc, + sizeof(*dump_trig) + fwrt->dump.desc->len); dump_data = iwl_fw_error_next_data(dump_data); } @@ -762,18 +794,18 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - if (!mvm->fw->n_dbg_mem_tlv) { + if (!fwrt->fw->n_dbg_mem_tlv) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, sram_len); dump_data = iwl_fw_error_next_data(dump_data); } - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); bool success; @@ -786,13 +818,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): - iwl_trans_read_mem_bytes(mvm->trans, ofs, + iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); success = true; break; case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): - success = iwl_read_prph_block(mvm->trans, ofs, len, + success = iwl_read_prph_block(fwrt->trans, ofs, len, (void *)dump_mem->data); break; default: @@ -813,8 +845,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->smem_offset, dump_mem->data, smem_len); dump_data = iwl_fw_error_next_data(dump_data); } @@ -824,28 +857,29 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->dccm2_offset, dump_mem->data, sram2_len); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) { - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) { + for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = - mvm->fw_paging_db[i].fw_paging_block; - dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys; + fwrt->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)dump_data->data; paging->index = cpu_to_le32(i); - dma_sync_single_for_cpu(mvm->trans->dev, addr, + dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), @@ -855,20 +889,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } if (prph_len) { - iwl_dump_prph(mvm->trans, &dump_data, + iwl_dump_prph(fwrt->trans, &dump_data, iwl_prph_dump_addr_comm, ARRAY_SIZE(iwl_prph_dump_addr_comm)); - if (mvm->cfg->mq_rx_supported) - iwl_dump_prph(mvm->trans, &dump_data, + if (fwrt->trans->cfg->mq_rx_supported) + iwl_dump_prph(fwrt->trans, &dump_data, iwl_prph_dump_addr_9000, ARRAY_SIZE(iwl_prph_dump_addr_9000)); } dump_trans_data: - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, - mvm->fw_dump_trig); - fw_error_dump->op_mode_len = file_len; + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, + fwrt->dump.trig); + fw_error_dump->fwrt_len = file_len; if (fw_error_dump->trans_ptr) file_len += fw_error_dump->trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); @@ -877,68 +911,72 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), - fw_error_dump->op_mode_ptr, - fw_error_dump->op_mode_len, 0); + fw_error_dump->fwrt_ptr, + fw_error_dump->fwrt_len, 0); if (fw_error_dump->trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump->trans_ptr->data, fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); - dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, + fw_error_dump->fwrt_len); + dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } - vfree(fw_error_dump->op_mode_ptr); + vfree(fw_error_dump->fwrt_ptr); vfree(fw_error_dump->trans_ptr); kfree(fw_error_dump); out: - iwl_mvm_free_fw_dump_desc(mvm); - mvm->fw_dump_trig = NULL; - clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); + iwl_fw_free_dump_desc(fwrt); + fwrt->dump.trig = NULL; + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); } +IWL_EXPORT_SYMBOL(iwl_fw_error_dump); -const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { +const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; +IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger) +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger) { unsigned int delay = 0; if (trigger) delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - if (WARN(mvm->trans->state == IWL_TRANS_NO_FW, + if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW, "Can't collect dbg data when FW isn't alive\n")) return -EIO; - if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) return -EBUSY; - if (WARN_ON(mvm->fw_dump_desc)) - iwl_mvm_free_fw_dump_desc(mvm); + if (WARN_ON(fwrt->dump.desc)) + iwl_fw_free_dump_desc(fwrt); - IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", + IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); - mvm->fw_dump_desc = desc; - mvm->fw_dump_trig = trigger; + fwrt->dump.desc = desc; + fwrt->dump.trig = trigger; - schedule_delayed_work(&mvm->fw_dump_wk, delay); + schedule_delayed_work(&fwrt->dump.wk, delay); return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger) +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger) { - struct iwl_mvm_dump_desc *desc; + struct iwl_fw_dump_desc *desc; desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) @@ -948,12 +986,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); + return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) { u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; @@ -978,8 +1017,8 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, len = strlen(buf) + 1; } - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, - trigger); + ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, + trigger); if (ret) return ret; @@ -987,37 +1026,42 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, trigger->occurrences = cpu_to_le16(occurrences - 1); return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; - if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), + if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ - if ((!mvm->fw->dbg_conf_tlv[conf_id] || - !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + if ((!fwrt->fw->dbg_conf_tlv[conf_id] || + !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; - if (!mvm->fw->dbg_conf_tlv[conf_id]) + if (!fwrt->fw->dbg_conf_tlv[conf_id]) return -EINVAL; - if (mvm->fw_dbg_conf != FW_DBG_INVALID) - IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", - mvm->fw_dbg_conf); + if (fwrt->dump.conf != FW_DBG_INVALID) + IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", + fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + struct iwl_host_cmd hcmd = { + .id = cmd->id, + .len = { le16_to_cpu(cmd->len), }, + .data = { cmd->data, }, + }; - ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, - le16_to_cpu(cmd->len), cmd->data); + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; @@ -1025,7 +1069,59 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) ptr += le16_to_cpu(cmd->len); } - mvm->fw_dbg_conf = conf_id; + fwrt->dump.conf = conf_id; return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); + +void iwl_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, dump.wk.work); + + if (fwrt->ops && fwrt->ops->dump_start && + fwrt->ops->dump_start(fwrt->ops_ctx)) + return; + + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* stop recording */ + iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x100); + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + iwl_set_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + } + } else { + u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); + u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); + + /* stop recording */ + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(500); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); + } + } + + if (fwrt->ops && fwrt->ops->dump_end) + fwrt->ops->dump_end(fwrt->ops_ctx); +} + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h similarity index 59% rename from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h rename to drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 4a5287a0c617..0f810ea89d31 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,24 +63,46 @@ * *****************************************************************************/ -#ifndef __mvm_fw_dbg_h__ -#define __mvm_fw_dbg_h__ -#include "fw/file.h" -#include "fw/error-dump.h" -#include "mvm.h" +#ifndef __iwl_fw_dbg_h__ +#define __iwl_fw_dbg_h__ +#include +#include +#include "runtime.h" +#include "file.h" +#include "error-dump.h" -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) __printf(3, 4); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); +/** + * struct iwl_fw_dump_desc - describes the dump + * @len: length of trig_desc->data + * @trig_desc: the description of the dump + */ +struct iwl_fw_dump_desc { + size_t len; + /* must be last */ + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; + +static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) +{ + if (fwrt->dump.desc != &iwl_dump_desc_assert) + kfree(fwrt->dump.desc); + fwrt->dump.desc = NULL; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) __printf(3, 4); +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ @@ -101,25 +123,25 @@ _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, - struct ieee80211_vif *vif) + struct wireless_dev *wdev) { u32 trig_vif = le32_to_cpu(trig->vif_type); return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || - ieee80211_vif_type_p2p(vif) == trig_vif; + wdev->iftype == trig_vif; } static inline bool -iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && - (mvm->fw_dbg_conf == FW_DBG_INVALID || - (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); + (fwrt->dump.conf == FW_DBG_INVALID || + (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); } static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { unsigned long wind_jiff = @@ -127,49 +149,66 @@ iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, u32 id = le32_to_cpu(trig->id); /* If this is the first event checked, jump to update start ts */ - if (mvm->fw_dbg_non_collect_ts_start[id] && - (time_after(mvm->fw_dbg_non_collect_ts_start[id] + wind_jiff, + if (fwrt->dump.non_collect_ts_start[id] && + (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, jiffies))) return true; - mvm->fw_dbg_non_collect_ts_start[id] = jiffies; + fwrt->dump.non_collect_ts_start[id] = jiffies; return false; } static inline bool -iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, +iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { - if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) + if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; - if (iwl_fw_dbg_no_trig_window(mvm, trig)) { - IWL_WARN(mvm, "Trigger %d occurred while no-collect window.\n", + if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { + IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; } - return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); + return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } static inline void -_iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, +_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trigger) return; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); + iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); } -#define iwl_fw_dbg_trigger_simple_stop(mvm, vif, trig) \ - _iwl_fw_dbg_trigger_simple_stop((mvm), (vif), \ - iwl_fw_dbg_get_trigger((mvm)->fw,\ +#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ + _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ + iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -#endif /* __mvm_fw_dbg_h__ */ +static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) +{ + fwrt->dump.conf = FW_DBG_INVALID; +} + +void iwl_fw_error_dump_wk(struct work_struct *work); + +static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt) +{ + flush_delayed_work(&fwrt->dump.wk); +} + +static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->dump.wk); +} + +#endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index cfebde68a391..ed7beca8817e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -7,6 +7,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,6 +94,9 @@ * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and * for that reason is not in use in any other place in the Linux Wi-Fi * stack. + * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem, + * which we get from the fw after ALIVE. The content is structured as + * &struct iwl_fw_error_dump_smem_cfg. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -110,6 +115,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ + IWL_FW_ERROR_DUMP_MEM_CFG = 16, IWL_FW_ERROR_DUMP_MAX, }; @@ -208,6 +214,30 @@ struct iwl_fw_error_dump_fw_mon { u8 data[]; } __packed; +#define MAX_NUM_LMAC 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 +#define TX_FIFO_MAX_NUM 15 +/** + * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration + * This must follow &struct iwl_fwrt_shared_mem_cfg. + * @num_lmacs: number of lmacs + * @num_txfifo_entries: number of tx fifos + * @lmac: sizes of lmacs txfifos and rxfifo1 + * @rxfifo2_size: size of rxfifo2 + * @internal_txfifo_addr: address of internal tx fifo + * @internal_txfifo_size: size of internal tx fifo + */ +struct iwl_fw_error_dump_smem_cfg { + __le32 num_lmacs; + __le32 num_txfifo_entries; + struct { + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + __le32 rxfifo2_size; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /** * struct iwl_fw_error_dump_prph - periphery registers data * @prph_start: address of the first register in this chunk diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index c73a6438ce8f..279248cd9cfb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -246,6 +246,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used + * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to + * include information about ACL time sharing. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -260,7 +262,9 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, + IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -374,6 +378,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, + IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)86, IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c new file mode 100644 index 000000000000..bfe5316bbb6a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" + +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx) +{ + memset(fwrt, 0, sizeof(*fwrt)); + fwrt->trans = trans; + fwrt->fw = fw; + fwrt->dev = trans->dev; + fwrt->dump.conf = FW_DBG_INVALID; + fwrt->ops = ops; + fwrt->ops_ctx = ops_ctx; + INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c index 29bb92e3df59..1096c945a68b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -161,6 +162,15 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, } IWL_EXPORT_SYMBOL(iwl_init_notification_wait); +void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, + struct iwl_notification_wait *wait_entry) +{ + spin_lock_bh(¬if_wait->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(¬if_wait->notif_wait_lock); +} +IWL_EXPORT_SYMBOL(iwl_remove_notification); + int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, unsigned long timeout) @@ -171,9 +181,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, wait_entry->triggered || wait_entry->aborted, timeout); - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); + iwl_remove_notification(notif_wait, wait_entry); if (wait_entry->aborted) return -EIO; @@ -184,12 +192,3 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, return 0; } IWL_EXPORT_SYMBOL(iwl_wait_notification); - -void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, - struct iwl_notification_wait *wait_entry) -{ - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); -} -IWL_EXPORT_SYMBOL(iwl_remove_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c new file mode 100644 index 000000000000..bd2e1fb43f5a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -0,0 +1,162 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/nvm-reg.h" +#include "fw/api/commands.h" +#include "iwl-nvm-parse.h" + +struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) +{ + struct iwl_nvm_get_info cmd = {}; + struct iwl_nvm_get_info_rsp *rsp; + struct iwl_trans *trans = fwrt->trans; + struct iwl_nvm_data *nvm; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) + }; + int ret; + bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && + fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + return ERR_PTR(ret); + + if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), + "Invalid payload len in NVM response from FW %d", + iwl_rx_packet_payload_len(hcmd.resp_pkt))) { + ret = -EINVAL; + goto out; + } + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(fwrt, "OTP is empty\n"); + + nvm = kzalloc(sizeof(*nvm) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!nvm) { + ret = -ENOMEM; + goto out; + } + + iwl_set_hw_address_from_csr(trans, nvm); + /* TODO: if platform NVM has MAC address - override it here */ + + if (!is_valid_ether_addr(nvm->hw_addr)) { + IWL_ERR(fwrt, "no valid mac address was found\n"); + ret = -EINVAL; + goto err_free; + } + + IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); + + /* Initialize general data */ + nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + + /* Initialize MAC sku data */ + nvm->sku_cap_11ac_enable = + le32_to_cpu(rsp->mac_sku.enable_11ac); + nvm->sku_cap_11n_enable = + le32_to_cpu(rsp->mac_sku.enable_11n); + nvm->sku_cap_band_24GHz_enable = + le32_to_cpu(rsp->mac_sku.enable_24g); + nvm->sku_cap_band_52GHz_enable = + le32_to_cpu(rsp->mac_sku.enable_5g); + nvm->sku_cap_mimo_disabled = + le32_to_cpu(rsp->mac_sku.mimo_disable); + + /* Initialize PHY sku data */ + nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); + nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); + + /* Initialize regulatory data */ + nvm->lar_enabled = + le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; + + iwl_init_sbands(trans->dev, trans->cfg, nvm, + rsp->regulatory.channel_profile, + nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, + nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, + nvm->lar_enabled, false); + + iwl_free_resp(&hcmd); + return nvm; + +err_free: + kfree(nvm); +out: + iwl_free_resp(&hcmd); + return ERR_PTR(ret); +} +IWL_EXPORT_SYMBOL(iwl_fw_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c new file mode 100644 index 000000000000..1610722b8099 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -0,0 +1,414 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" + +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt) +{ + int i; + + if (!fwrt->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i]; + + if (!paging->fw_paging_block) { + IWL_DEBUG_FW(fwrt, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, + paging->fw_paging_size, DMA_BIDIRECTIONAL); + + __free_pages(paging->fw_paging_block, + get_order(paging->fw_paging_size)); + paging->fw_paging_block = NULL; + } + kfree(fwrt->trans->paging_download_buf); + fwrt->trans->paging_download_buf = NULL; + fwrt->trans->paging_db = NULL; + + memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db)); +} +IWL_EXPORT_SYMBOL(iwl_free_fw_paging); + +static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx, order, num_of_pages, size, dma_enabled; + + if (fwrt->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(fwrt->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + fwrt->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); + fwrt->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1); + + IWL_DEBUG_FW(fwrt, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + fwrt->num_of_paging_blk, + fwrt->num_of_pages_in_last_blk); + + /* + * Allocate CSS and paging blocks in dram. + */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + + fwrt->fw_paging_db[blk_idx].fw_paging_block = block; + fwrt->fw_paging_db[blk_idx].fw_paging_size = size; + + if (dma_enabled) { + phys = dma_map_page(fwrt->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(fwrt->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + fwrt->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + if (!blk_idx) + IWL_DEBUG_FW(fwrt, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(fwrt, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + /* + * If paging is enabled there should be at least 2 more sections left + * (one for CSS and one for Paging data) + */ + if (sec_idx >= image->num_sec - 1) { + IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); + iwl_free_fw_paging(fwrt); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + fwrt->fw_paging_db[0].fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + fwrt->fw_paging_db[0].fw_paging_phys, + fwrt->fw_paging_db[0].fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d CSS bytes to first block\n", + fwrt->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + block->fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d paging bytes to block %d\n", + fwrt->fw_paging_db[idx].fw_paging_size, + idx); + + offset += fwrt->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (fwrt->num_of_pages_in_last_blk > 0) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d pages in the last block %d\n", + fwrt->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(fwrt, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(fwrt, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + struct iwl_fw_paging_cmd paging_cmd = { + .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (fwrt->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(fwrt->num_of_paging_blk), + }; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(paging_cmd), }, + .data = { &paging_cmd, }, + }; + int blk_idx; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys; + __le32 phy_addr; + + addr = addr >> PAGE_2_EXP_SIZE; + phy_addr = cpu_to_le32(addr); + paging_cmd.device_phy_addr[blk_idx] = phy_addr; + } + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + .len = { sizeof(fw_get_item_cmd), }, + }; + + ret = iwl_trans_send_cmd(fwrt->trans, &cmd); + if (ret) { + IWL_ERR(fwrt, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(fwrt, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + /* Add an extra page for headers */ + fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + + FW_PAGING_SIZE, + GFP_KERNEL); + if (!fwrt->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + fwrt->trans->paging_db = fwrt->fw_paging_db; + IWL_DEBUG_FW(fwrt, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + fwrt->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) +{ + const struct fw_img *fw = &fwrt->fw->img[type]; + int ret; + + if (fwrt->trans->cfg->gen2) + return 0; + + /* + * Configure and operate fw paging mechanism. + * The driver configures the paging flow only once. + * The CPU2 paging image is included in the IWL_UCODE_INIT image. + */ + if (!fw->paging_mem_size) + return 0; + + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(fwrt->trans->dev)) { + ret = iwl_trans_get_paging_item(fwrt); + if (ret) { + IWL_ERR(fwrt, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to send the paging cmd\n"); + iwl_free_fw_paging(fwrt); + return ret; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_init_paging); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h new file mode 100644 index 000000000000..50cfb6d795a5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -0,0 +1,158 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_runtime_h__ +#define __iwl_fw_runtime_h__ + +#include "iwl-config.h" +#include "iwl-trans.h" +#include "img.h" +#include "fw/api/debug.h" +#include "fw/api/paging.h" +#include "iwl-eeprom-parse.h" + +struct iwl_fw_runtime_ops { + int (*dump_start)(void *ctx); + void (*dump_end)(void *ctx); +}; + +#define MAX_NUM_LMAC 2 +struct iwl_fwrt_shared_mem_cfg { + int num_lmacs; + int num_txfifo_entries; + struct { + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + u32 rxfifo2_size; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +}; + +enum iwl_fw_runtime_status { + IWL_FWRT_STATUS_DUMPING = 0, +}; + +/** + * struct iwl_fw_runtime - runtime data for firmware + * @fw: firmware image + * @cfg: NIC configuration + * @dev: device pointer + * @ops: user ops + * @ops_ctx: user ops context + * @status: status flags + * @fw_paging_db: paging database + * @num_of_paging_blk: number of paging blocks + * @num_of_pages_in_last_blk: number of pages in the last block + * @smem_cfg: saved firmware SMEM configuration + * @cur_fw_img: current firmware image, must be maintained by + * the driver by calling &iwl_fw_set_current_image() + * @dump: debug dump data + */ +struct iwl_fw_runtime { + struct iwl_trans *trans; + const struct iwl_fw *fw; + struct device *dev; + + const struct iwl_fw_runtime_ops *ops; + void *ops_ctx; + + unsigned long status; + + /* Paging */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + + enum iwl_ucode_type cur_fw_img; + + /* memory configuration */ + struct iwl_fwrt_shared_mem_cfg smem_cfg; + + /* debug */ + struct { + const struct iwl_fw_dump_desc *desc; + const struct iwl_fw_dbg_trigger_tlv *trig; + struct delayed_work wk; + + u8 conf; + + /* ts of the beginning of a non-collect fw dbg data period */ + unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + } dump; +}; + +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx); + +static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, + enum iwl_ucode_type cur_fw_img) +{ + fwrt->cur_fw_img = cur_fw_img; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); + +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); + +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb); +struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt); + +#endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c new file mode 100644 index 000000000000..76675736ba4f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -0,0 +1,155 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" + +static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; + int i, lmac; + int lmac_num = le32_to_cpu(mem_cfg->lmac_num); + + if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) + return; + + fwrt->smem_cfg.num_lmacs = lmac_num; + fwrt->smem_cfg.num_txfifo_entries = + ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); + + for (lmac = 0; lmac < lmac_num; lmac++) { + struct iwl_shared_mem_lmac_cfg *lmac_cfg = + &mem_cfg->lmac_smem[lmac]; + + for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = + le32_to_cpu(lmac_cfg->txfifo_size[i]); + fwrt->smem_cfg.lmac[lmac].rxfifo1_size = + le32_to_cpu(lmac_cfg->rxfifo1_size); + } +} + +static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; + int i; + + fwrt->smem_cfg.num_lmacs = 1; + + fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[0].txfifo_size[i] = + le32_to_cpu(mem_cfg->txfifo_size[i]); + + fwrt->smem_cfg.lmac[0].rxfifo1_size = + le32_to_cpu(mem_cfg->rxfifo_size[0]); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); + + /* new API has more data, from rxfifo_addr field and on */ + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); + + fwrt->smem_cfg.internal_txfifo_addr = + le32_to_cpu(mem_cfg->internal_txfifo_addr); + + for (i = 0; + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); + i++) + fwrt->smem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + } +} + +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) +{ + struct iwl_host_cmd cmd = { + .flags = CMD_WANT_SKB, + .data = { NULL, }, + .len = { 0, }, + }; + struct iwl_rx_packet *pkt; + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + + if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) + return; + + pkt = cmd.resp_pkt; + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) + iwl_parse_shared_mem_a000(fwrt, pkt); + else + iwl_parse_shared_mem(fwrt, pkt); + + IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n"); + + iwl_free_resp(&cmd); +} +IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index d19c74827fbb..3e057b539d5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -463,6 +463,9 @@ extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; extern const struct iwl_cfg iwla000_2ax_cfg_hr; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index c6c1876c1ad4..b03e0f975b5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -169,7 +169,7 @@ /* * CSR Hardware Revision Workaround Register. Indicates hardware rev; - * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * "step" determines CCK backoff for txpower calculation. * See also CSR_HW_REV register. * Bit fields: * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step @@ -354,11 +354,16 @@ enum { #define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) +#define CSR_HW_REV_TYPE_QNJ (0x0000360) +#define CSR_HW_REV_TYPE_HR_CDB (0x0000340) /* RF_ID value */ -#define CSR_HW_RF_ID_TYPE_JF (0x00105000) +#define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) -#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109000) +#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) + +/* HW_RF CHIP ID */ +#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4e0f86fe0a6f..99676d6c4713 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -479,8 +479,8 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) return 0; } -static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) +static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_api *ucode_api = (void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); @@ -488,23 +488,20 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { - IWL_ERR(drv, - "api flags index %d larger than supported by driver\n", - api_index); - /* don't return an error so we can load FW that has more bits */ - return 0; + IWL_WARN(drv, + "api flags index %d larger than supported by driver\n", + api_index); + return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_api); } - - return 0; } -static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) +static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_capa *ucode_capa = (void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); @@ -512,19 +509,16 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { - IWL_ERR(drv, - "capa flags index %d larger than supported by driver\n", - api_index); - /* don't return an error so we can load FW that has more bits */ - return 0; + IWL_WARN(drv, + "capa flags index %d larger than supported by driver\n", + api_index); + return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_capa); } - - return 0; } static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, @@ -766,14 +760,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) goto invalid_tlv_len; - if (iwl_set_ucode_api_flags(drv, tlv_data, capa)) - goto tlv_error; + iwl_set_ucode_api_flags(drv, tlv_data, capa); break; case IWL_UCODE_TLV_ENABLED_CAPABILITIES: if (tlv_len != sizeof(struct iwl_ucode_capa)) goto invalid_tlv_len; - if (iwl_set_ucode_capabilities(drv, tlv_data, capa)) - goto tlv_error; + iwl_set_ucode_capabilities(drv, tlv_data, capa); break; case IWL_UCODE_TLV_INIT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index c527b8c10370..efb1998dcabd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -241,20 +241,12 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) { + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_HW); - } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) { + else iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, - DEVICE_SET_NMI_8000_VAL); - } else { - iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, - DEVICE_SET_NMI_8000_VAL); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_DRV); - } + UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); } IWL_EXPORT_SYMBOL(iwl_force_nmi); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 3ee6767392b6..3014beef4873 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -79,6 +79,7 @@ /* NVM offsets (in words) definitions */ enum wkp_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ + SUBSYSTEM_ID = 0x0A, HW_ADDR = 0x15, /* NVM SW-Section offset (in words) definitions */ @@ -183,22 +184,26 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = { * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS * on same channel on 2.4 or same UNII band on 5.2 - * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) - * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) - * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) - * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) + * @NVM_CHANNEL_UNIFORM: uniform spreading required + * @NVM_CHANNEL_20MHZ: 20 MHz channel okay + * @NVM_CHANNEL_40MHZ: 40 MHz channel okay + * @NVM_CHANNEL_80MHZ: 80 MHz channel okay + * @NVM_CHANNEL_160MHZ: 160 MHz channel okay + * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) */ enum iwl_nvm_channel_flags { - NVM_CHANNEL_VALID = BIT(0), - NVM_CHANNEL_IBSS = BIT(1), - NVM_CHANNEL_ACTIVE = BIT(3), - NVM_CHANNEL_RADAR = BIT(4), - NVM_CHANNEL_INDOOR_ONLY = BIT(5), - NVM_CHANNEL_GO_CONCURRENT = BIT(6), - NVM_CHANNEL_WIDE = BIT(8), - NVM_CHANNEL_40MHZ = BIT(9), - NVM_CHANNEL_80MHZ = BIT(10), - NVM_CHANNEL_160MHZ = BIT(11), + NVM_CHANNEL_VALID = BIT(0), + NVM_CHANNEL_IBSS = BIT(1), + NVM_CHANNEL_ACTIVE = BIT(3), + NVM_CHANNEL_RADAR = BIT(4), + NVM_CHANNEL_INDOOR_ONLY = BIT(5), + NVM_CHANNEL_GO_CONCURRENT = BIT(6), + NVM_CHANNEL_UNIFORM = BIT(7), + NVM_CHANNEL_20MHZ = BIT(8), + NVM_CHANNEL_40MHZ = BIT(9), + NVM_CHANNEL_80MHZ = BIT(10), + NVM_CHANNEL_160MHZ = BIT(11), + NVM_CHANNEL_DC_HIGH = BIT(12), }; #define CHECK_AND_PRINT_I(x) \ @@ -254,13 +259,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 * const nvm_ch_flags, - bool lar_supported) + bool lar_supported, bool no_wide_in_5ghz) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; u16 ch_flags; - bool is_5ghz; int num_of_ch, num_2ghz_channels; const u8 *nvm_chan; @@ -275,12 +279,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + bool is_5ghz = (ch_idx >= num_2ghz_channels); + ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); - if (ch_idx >= num_2ghz_channels && - !data->sku_cap_band_52GHz_enable) + if (is_5ghz && !data->sku_cap_band_52GHz_enable) continue; + /* workaround to disable wide channels in 5GHz */ + if (no_wide_in_5ghz && is_5ghz) { + ch_flags &= ~(NVM_CHANNEL_40MHZ | + NVM_CHANNEL_80MHZ | + NVM_CHANNEL_160MHZ); + } + if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; @@ -303,8 +315,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, n_channels++; channel->hw_value = nvm_chan[ch_idx]; - channel->band = (ch_idx < num_2ghz_channels) ? - NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + channel->band = is_5ghz ? + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -316,7 +328,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; - is_5ghz = channel->band == NL80211_BAND_5GHZ; /* don't put limitations in case we're using LAR */ if (!lar_supported) @@ -327,7 +338,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->flags = 0; IWL_DEBUG_EEPROM(dev, - "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", + "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", channel->hw_value, is_5ghz ? "5.2" : "2.4", ch_flags, @@ -337,10 +348,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), - CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(UNIFORM), + CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), + CHECK_AND_PRINT_I(DC_HIGH), channel->max_power, ((ch_flags & NVM_CHANNEL_IBSS) && !(ch_flags & NVM_CHANNEL_RADAR)) @@ -432,14 +445,15 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported) + u8 tx_chains, u8 rx_chains, bool lar_supported, + bool no_wide_in_5ghz) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, - lar_supported); + lar_supported, no_wide_in_5ghz); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -568,7 +582,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, - const __le16 *nvm_hw) + const __be16 *nvm_hw) { const u8 *hw_addr; @@ -615,7 +629,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, static int iwl_set_hw_address(struct iwl_trans *trans, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_hw, + struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { if (cfg->mac_addr_from_csr) { @@ -645,9 +659,41 @@ static int iwl_set_hw_address(struct iwl_trans *trans, return 0; } +static bool +iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, + const __be16 *nvm_hw) +{ + /* + * Workaround a bug in Indonesia SKUs where the regulatory in + * some 7000-family OTPs erroneously allow wide channels in + * 5GHz. To check for Indonesia, we take the SKU value from + * bits 1-4 in the subsystem ID and check if it is either 5 or + * 9. In those cases, we need to force-disable wide channels + * in 5GHz otherwise the FW will throw a sysassert when we try + * to use them. + */ + if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* + * Unlike the other sections in the NVM, the hw + * section uses big-endian. + */ + u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); + u8 sku = (subsystem_id & 0x1e) >> 1; + + if (sku == 5 || sku == 9) { + IWL_DEBUG_EEPROM(dev, + "disabling wide channels in 5GHz (0x%0x %d)\n", + subsystem_id, sku); + return true; + } + } + + return false; +} + struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, + const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported) @@ -655,6 +701,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; + bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw); u32 sku, radio_cfg; u16 lar_config; const __le16 *ch_section; @@ -725,7 +772,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, } iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, - lar_fw_supported && lar_enabled); + lar_fw_supported && lar_enabled, no_wide_in_5ghz); data->calib_version = 255; return data; @@ -868,19 +915,27 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, prev_reg_rule_flags = reg_rule_flags; IWL_DEBUG_DEV(dev, IWL_DL_LAR, - "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n", + "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n", center_freq, band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(INDOOR_ONLY), + CHECK_AND_PRINT_I(GO_CONCURRENT), + CHECK_AND_PRINT_I(UNIFORM), + CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), - CHECK_AND_PRINT_I(INDOOR_ONLY), - CHECK_AND_PRINT_I(GO_CONCURRENT), - ch_flags, reg_rule_flags, + CHECK_AND_PRINT_I(DC_HIGH), + ch_flags); + IWL_DEBUG_DEV(dev, IWL_DL_LAR, + "Ch. %d [%sGHz] reg_flags 0x%x: %s\n", + center_freq, + band == NL80211_BAND_5GHZ ? "5.2" : "2.4", + reg_rule_flags, ((ch_flags & NVM_CHANNEL_ACTIVE) && !(ch_flags & NVM_CHANNEL_RADAR)) ? "Ad-Hoc" : ""); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 3fd6506a02ab..2d1a24dd8410 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -77,7 +77,7 @@ */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, + const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); @@ -93,7 +93,8 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, */ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported); + u8 tx_chains, u8 rx_chains, bool lar_supported, + bool no_wide_in_5ghz); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 6772c59b7764..421a869633a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -109,13 +109,12 @@ /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C -/* Device NMI register */ +/* Device NMI register and value for 8000 family and lower hw's */ #define DEVICE_SET_NMI_REG 0x00a01c30 -#define DEVICE_SET_NMI_VAL_HW BIT(0) #define DEVICE_SET_NMI_VAL_DRV BIT(7) -#define DEVICE_SET_NMI_8000_REG 0x00a01c24 -#define DEVICE_SET_NMI_8000_VAL 0x1000000 +/* Device NMI register and value for 9000 family and above hw's */ #define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 +#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK 0xff000000 /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 @@ -404,6 +403,12 @@ enum aux_misc_master1_en { #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_2_STATUS 0xA038C4 +#define UMAG_GEN_HW_STATUS 0xA038C8 + +/* For UMAG_GEN_HW_STATUS reg check */ +enum { + UMAG_GEN_HW_IS_FPGA = BIT(1), +}; /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index eb6842abb4c7..e90abbfba718 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -76,7 +76,8 @@ #include "iwl-config.h" #include "fw/img.h" #include "iwl-op-mode.h" -#include "fw/api.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/txq.h" /** * DOC: Transport layer - what is it ? diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 83ac807e547d..00e6737dda72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -6,7 +6,7 @@ iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o fw-dbg.o +iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM) += d3.o ccflags-y += -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 34dd5c40ce77..79c80f181f7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -67,7 +69,7 @@ #include #include -#include "fw-api-coex.h" +#include "fw/api/coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" @@ -148,215 +150,6 @@ static const __le64 iwl_ci_mask[][3] = { }, }; -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - static enum iwl_bt_coex_lut_type iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) { @@ -437,9 +230,6 @@ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); - if (iwl_mvm_bt_is_plcr_supported(mvm)) - bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); - if (iwl_mvm_is_mplut_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); @@ -560,8 +350,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_AUTOMATIC; if (mvmvif->phy_ctxt && - IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, - mvmvif->phy_ctxt->id)) + (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, @@ -725,17 +514,36 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); - IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); + if (!iwl_mvm_has_new_ats_coex_api(mvm)) { + struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data; + + mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0]; + mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1]; + mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2]; + mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3]; + mvm->last_bt_notif.msg_idx = v4->msg_idx; + mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance; + mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut; + mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut; + mvm->last_bt_notif.bt_activity_grading = + v4->bt_activity_grading; + mvm->last_bt_notif.ttc_status = v4->ttc_status; + mvm->last_bt_notif.rrc_status = v4->rrc_status; + } else { + /* save this notification for future use: rssi fluctuations */ + memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); + } + + IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); + IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", + mvm->last_bt_notif.bt_ci_compliance); + IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", + le32_to_cpu(mvm->last_bt_notif.primary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", + le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", + le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)); - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); } @@ -792,7 +600,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < @@ -816,7 +624,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return true; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < @@ -909,59 +717,3 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { iwl_mvm_bt_coex_notif_handle(mvm); } - -void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_antenna_coupling_notif *notif = (void *)pkt->data; - u32 ant_isolation = le32_to_cpu(notif->isolation); - struct iwl_bt_coex_corun_lut_update_cmd cmd = {}; - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut20)); - - memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut40)); - - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, - "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 6fda8627b726..976640fed334 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -95,7 +95,6 @@ #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 #define IWL_MVM_BT_COEX_SYNC2SCO 1 -#define IWL_MVM_BT_COEX_CORUNNING 0 #define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_RRC 1 #define IWL_MVM_BT_COEX_TTC 1 @@ -111,7 +110,6 @@ #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 -#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 @@ -138,8 +136,10 @@ #define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ #define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ #define IWL_MVM_RS_AGG_DISABLE_START 3 +#define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 +#define IWL_MVM_ENABLE_EBS 1 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 5de19ea10575..b205a7bfb828 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2167,7 +2167,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) * 1. We are not using a unified image * 2. We are using a unified image but had an error while exiting D3 */ - set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); /* * When switching images we return 1, which causes mac80211 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index a7ac281e5cde..71a01df96f8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -65,7 +65,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index c1c9c489edc9..e97904c2c4d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -69,7 +69,6 @@ #include #include "mvm.h" -#include "fw-dbg.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" @@ -83,8 +82,11 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char buf[16]; int pos, budget; + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -104,8 +106,11 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, { int ret; + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -115,6 +120,18 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + iwl_mvm_enter_ctkill(mvm); + + return count; +} + static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -122,7 +139,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) @@ -155,7 +172,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) @@ -192,7 +209,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, return -EINVAL; /* default is to dump the entire data segment */ - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -224,7 +241,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -452,20 +469,9 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - - -#define BT_MBOX_PRINT(_num, _field, _end) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t%s: %d%s", \ - #_field, \ - BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", "); - static -int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, +int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm, + struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); @@ -509,6 +515,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); + BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); @@ -518,7 +525,12 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); + BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm)); + + if (iwl_mvm_has_new_ats_coex_api(mvm)) { + BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false); + BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true); + } return pos; } @@ -537,7 +549,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); + pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); @@ -548,20 +560,15 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf + pos, bufsz - pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - (notif->ttc_rrc_status >> 4) & 0xF); + notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_rrc_status & 0xF); + notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); - pos += scnprintf(buf + pos, bufsz - pos, "corunning = %d\n", - IWL_MVM_BT_COEX_CORUNNING); mutex_unlock(&mvm->mutex); @@ -1123,7 +1130,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, int pos = 0; mutex_lock(&mvm->mutex); - conf = mvm->fw_dbg_conf; + conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); @@ -1190,7 +1197,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, return -EINVAL; mutex_lock(&mvm->mutex); - ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id); + ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; @@ -1211,8 +1218,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (count == 0) return 0; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, - (count - 1), NULL); + iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, + (count - 1), NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); @@ -1642,6 +1649,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); @@ -1829,6 +1837,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index aad265dcfaf5..e8e74dd558f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -68,2823 +68,33 @@ #ifndef __fw_api_h__ #define __fw_api_h__ -#include "fw-api-rs.h" -#include "fw-api-rx.h" -#include "fw-api-tx.h" -#include "fw-api-sta.h" -#include "fw-api-mac.h" -#include "fw-api-power.h" -#include "fw-api-d3.h" -#include "fw-api-coex.h" -#include "fw-api-scan.h" -#include "fw-api-stats.h" -#include "fw-api-tof.h" - -/* Tx queue numbers for non-DQA mode */ -enum { - IWL_MVM_OFFCHANNEL_QUEUE = 8, - IWL_MVM_CMD_QUEUE = 9, -}; - -/* - * DQA queue numbers - * - * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW - * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames - * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames - * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames - * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure - * that we are never left without the possibility to connect to an AP. - * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. - * Each MGMT queue is mapped to a single STA - * MGMT frames are frames that return true on ieee80211_is_mgmt() - * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames - * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe - * responses - * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. - * DATA frames are intended for !ieee80211_is_mgmt() frames, but if - * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues - * as well - * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames - */ -enum iwl_mvm_dqa_txq { - IWL_MVM_DQA_CMD_QUEUE = 0, - IWL_MVM_DQA_AUX_QUEUE = 1, - IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, - IWL_MVM_DQA_GCAST_QUEUE = 3, - IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, - IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, - IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, - IWL_MVM_DQA_MIN_DATA_QUEUE = 10, - IWL_MVM_DQA_MAX_DATA_QUEUE = 31, -}; - -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, - IWL_MVM_TX_FIFO_CMD = 7, -}; - - -/** - * enum iwl_legacy_cmds - legacy group command IDs - */ -enum iwl_legacy_cmds { - /** - * @MVM_ALIVE: - * Alive data from the firmware, as described in - * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. - */ - MVM_ALIVE = 0x1, - - /** - * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. - */ - REPLY_ERROR = 0x2, - - /** - * @ECHO_CMD: Send data to the device to have it returned immediately. - */ - ECHO_CMD = 0x3, - - /** - * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. - */ - INIT_COMPLETE_NOTIF = 0x4, - - /** - * @PHY_CONTEXT_CMD: - * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. - */ - PHY_CONTEXT_CMD = 0x8, - - /** - * @DBG_CFG: Debug configuration command. - */ - DBG_CFG = 0x9, - - /** - * @ANTENNA_COUPLING_NOTIFICATION: - * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif - */ - ANTENNA_COUPLING_NOTIFICATION = 0xa, - - /** - * @SCAN_ITERATION_COMPLETE_UMAC: - * Firmware indicates a scan iteration completed, using - * &struct iwl_umac_scan_iter_complete_notif. - */ - SCAN_ITERATION_COMPLETE_UMAC = 0xb5, - - /** - * @SCAN_CFG_CMD: - * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config - */ - SCAN_CFG_CMD = 0xc, - - /** - * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac - */ - SCAN_REQ_UMAC = 0xd, - - /** - * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort - */ - SCAN_ABORT_UMAC = 0xe, - - /** - * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete - */ - SCAN_COMPLETE_UMAC = 0xf, - - /** - * @BA_WINDOW_STATUS_NOTIFICATION_ID: - * uses &struct iwl_ba_window_status_notif - */ - BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, - - /** - * @ADD_STA_KEY: - * &struct iwl_mvm_add_sta_key_cmd_v1 or - * &struct iwl_mvm_add_sta_key_cmd. - */ - ADD_STA_KEY = 0x17, - - /** - * @ADD_STA: - * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. - */ - ADD_STA = 0x18, - - /** - * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd - */ - REMOVE_STA = 0x19, - - /** - * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd - */ - FW_GET_ITEM_CMD = 0x1a, - - /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, - * response in &struct iwl_mvm_tx_resp or - * &struct iwl_mvm_tx_resp_v3 - */ - TX_CMD = 0x1c, - - /** - * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd - */ - TXPATH_FLUSH = 0x1e, - - /** - * @MGMT_MCAST_KEY: - * &struct iwl_mvm_mgmt_mcast_key_cmd or - * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 - */ - MGMT_MCAST_KEY = 0x1f, - - /* scheduler config */ - /** - * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, - * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp - * for newer (A000) hardware. - */ - SCD_QUEUE_CFG = 0x1d, - - /** - * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd - */ - WEP_KEY = 0x20, - - /** - * @SHARED_MEM_CFG: - * retrieve shared memory configuration - response in - * &struct iwl_shared_mem_cfg - */ - SHARED_MEM_CFG = 0x25, - - /** - * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd - */ - TDLS_CHANNEL_SWITCH_CMD = 0x27, - - /** - * @TDLS_CHANNEL_SWITCH_NOTIFICATION: - * uses &struct iwl_tdls_channel_switch_notif - */ - TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, - - /** - * @TDLS_CONFIG_CMD: - * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res - */ - TDLS_CONFIG_CMD = 0xa7, - - /** - * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd - */ - MAC_CONTEXT_CMD = 0x28, - - /** - * @TIME_EVENT_CMD: - * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp - */ - TIME_EVENT_CMD = 0x29, /* both CMD and response */ - - /** - * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif - */ - TIME_EVENT_NOTIFICATION = 0x2a, - - /** - * @BINDING_CONTEXT_CMD: - * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 - */ - BINDING_CONTEXT_CMD = 0x2b, - - /** - * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd - */ - TIME_QUOTA_CMD = 0x2c, - - /** - * @NON_QOS_TX_COUNTER_CMD: - * command is &struct iwl_nonqos_seq_query_cmd - */ - NON_QOS_TX_COUNTER_CMD = 0x2d, - - /** - * @LQ_CMD: using &struct iwl_lq_cmd - */ - LQ_CMD = 0x4e, - - /** - * @FW_PAGING_BLOCK_CMD: - * &struct iwl_fw_paging_cmd - */ - FW_PAGING_BLOCK_CMD = 0x4f, - - /** - * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac - */ - SCAN_OFFLOAD_REQUEST_CMD = 0x51, - - /** - * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents - */ - SCAN_OFFLOAD_ABORT_CMD = 0x52, - - /** - * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req - */ - HOT_SPOT_CMD = 0x53, - - /** - * @SCAN_OFFLOAD_COMPLETE: - * notification, &struct iwl_periodic_scan_complete - */ - SCAN_OFFLOAD_COMPLETE = 0x6D, - - /** - * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: - * update scan offload (scheduled scan) profiles/blacklist/etc. - */ - SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, - - /** - * @MATCH_FOUND_NOTIFICATION: scan match found - */ - MATCH_FOUND_NOTIFICATION = 0xd9, - - /** - * @SCAN_ITERATION_COMPLETE: - * uses &struct iwl_lmac_scan_complete_notif - */ - SCAN_ITERATION_COMPLETE = 0xe7, - - /* Phy */ - /** - * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd - */ - PHY_CONFIGURATION_CMD = 0x6a, - - /** - * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db - */ - CALIB_RES_NOTIF_PHY_DB = 0x6b, - - /** - * @PHY_DB_CMD: &struct iwl_phy_db_cmd - */ - PHY_DB_CMD = 0x6c, - - /** - * @TOF_CMD: &struct iwl_tof_config_cmd - */ - TOF_CMD = 0x10, - - /** - * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd - */ - TOF_NOTIFICATION = 0x11, - - /** - * @POWER_TABLE_CMD: &struct iwl_device_power_cmd - */ - POWER_TABLE_CMD = 0x77, - - /** - * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: - * &struct iwl_uapsd_misbehaving_ap_notif - */ - PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, - - /** - * @LTR_CONFIG: &struct iwl_ltr_config_cmd - */ - LTR_CONFIG = 0xee, - - /** - * @REPLY_THERMAL_MNG_BACKOFF: - * Thermal throttling command - */ - REPLY_THERMAL_MNG_BACKOFF = 0x7e, - - /** - * @DC2DC_CONFIG_CMD: - * Set/Get DC2DC frequency tune - * Command is &struct iwl_dc2dc_config_cmd, - * response is &struct iwl_dc2dc_config_resp - */ - DC2DC_CONFIG_CMD = 0x83, - - /** - * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd - */ - NVM_ACCESS_CMD = 0x88, - - /** - * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif - */ - BEACON_NOTIFICATION = 0x90, - - /** - * @BEACON_TEMPLATE_CMD: - * Uses one of &struct iwl_mac_beacon_cmd_v6, - * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd - * depending on the device version. - */ - BEACON_TEMPLATE_CMD = 0x91, - /** - * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd - */ - TX_ANT_CONFIGURATION_CMD = 0x98, - - /** - * @STATISTICS_CMD: - * one of &struct iwl_statistics_cmd, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_CMD = 0x9c, - - /** - * @STATISTICS_NOTIFICATION: - * one of &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_NOTIFICATION = 0x9d, - - /** - * @EOSP_NOTIFICATION: - * Notify that a service period ended, - * &struct iwl_mvm_eosp_notification - */ - EOSP_NOTIFICATION = 0x9e, - - /** - * @REDUCE_TX_POWER_CMD: - * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd - */ - REDUCE_TX_POWER_CMD = 0x9f, - - /** - * @CARD_STATE_NOTIFICATION: - * Card state (RF/CT kill) notification, - * uses &struct iwl_card_state_notif - */ - CARD_STATE_NOTIFICATION = 0xa1, - - /** - * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif - */ - MISSED_BEACONS_NOTIFICATION = 0xa2, - - /** - * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd - */ - MAC_PM_POWER_TABLE = 0xa9, - - /** - * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif - */ - MFUART_LOAD_NOTIFICATION = 0xb1, - - /** - * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd - */ - RSS_CONFIG_CMD = 0xb3, - - /** - * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info - */ - REPLY_RX_PHY_CMD = 0xc0, - - /** - * @REPLY_RX_MPDU_CMD: - * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc - */ - REPLY_RX_MPDU_CMD = 0xc1, - - /** - * @FRAME_RELEASE: - * Frame release (reorder helper) notification, uses - * &struct iwl_frame_release - */ - FRAME_RELEASE = 0xc3, - - /** - * @BA_NOTIF: - * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif - * or &struct iwl_mvm_ba_notif depending on the HW - */ - BA_NOTIF = 0xc5, - - /* Location Aware Regulatory */ - /** - * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd - */ - MCC_UPDATE_CMD = 0xc8, - - /** - * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif - */ - MCC_CHUB_UPDATE_CMD = 0xc9, - - /** - * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker - */ - MARKER_CMD = 0xcb, - - /** - * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif - */ - BT_PROFILE_NOTIFICATION = 0xce, - - /** - * @BT_CONFIG: &struct iwl_bt_coex_cmd - */ - BT_CONFIG = 0x9b, - - /** - * @BT_COEX_UPDATE_CORUN_LUT: - * &struct iwl_bt_coex_corun_lut_update_cmd - */ - BT_COEX_UPDATE_CORUN_LUT = 0x5b, - - /** - * @BT_COEX_UPDATE_REDUCED_TXP: - * &struct iwl_bt_coex_reduced_txp_update_cmd - */ - BT_COEX_UPDATE_REDUCED_TXP = 0x5c, - - /** - * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd - */ - BT_COEX_CI = 0x5d, - - /** - * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd - */ - REPLY_SF_CFG_CMD = 0xd1, - /** - * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd - */ - REPLY_BEACON_FILTERING_CMD = 0xd2, - - /** - * @DTS_MEASUREMENT_NOTIFICATION: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIFICATION = 0xdd, - - /** - * @LDBG_CONFIG_CMD: configure continuous trace recording - */ - LDBG_CONFIG_CMD = 0xf6, - - /** - * @DEBUG_LOG_MSG: Debugging log data from firmware - */ - DEBUG_LOG_MSG = 0xf7, - - /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - - /** - * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd - */ - MCAST_FILTER_CMD = 0xd0, - - /** - * @D3_CONFIG_CMD: &struct iwl_d3_manager_config - */ - D3_CONFIG_CMD = 0xd3, - - /** - * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of - * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, - * &struct iwl_proto_offload_cmd_v3_small, - * &struct iwl_proto_offload_cmd_v3_large - */ - PROT_OFFLOAD_CONFIG_CMD = 0xd4, - - /** - * @OFFLOADS_QUERY_CMD: - * No data in command, response in &struct iwl_wowlan_status - */ - OFFLOADS_QUERY_CMD = 0xd5, - - /** - * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config - */ - REMOTE_WAKE_CONFIG_CMD = 0xd6, - - /** - * @D0I3_END_CMD: End D0i3/D3 state, no command data - */ - D0I3_END_CMD = 0xed, - - /** - * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd - */ - WOWLAN_PATTERNS = 0xe0, - - /** - * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd - */ - WOWLAN_CONFIGURATION = 0xe1, - - /** - * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd - */ - WOWLAN_TSC_RSC_PARAM = 0xe2, - - /** - * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd - */ - WOWLAN_TKIP_PARAM = 0xe3, - - /** - * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd - */ - WOWLAN_KEK_KCK_MATERIAL = 0xe4, - - /** - * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status - */ - WOWLAN_GET_STATUSES = 0xe5, - - /** - * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: - * No command data, response is &struct iwl_scan_offload_profiles_query - */ - SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, -}; - -/* Please keep this enum *SORTED* by hex value. - * Needed for binary search, otherwise a warning will be triggered. - */ -enum iwl_mac_conf_subcmd_ids { - LINK_QUALITY_MEASUREMENT_CMD = 0x1, - LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, - CHANNEL_SWITCH_NOA_NOTIF = 0xFF, -}; - -/** - * enum iwl_phy_ops_subcmd_ids - PHY group commands - */ -enum iwl_phy_ops_subcmd_ids { - /** - * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: - * Uses either &struct iwl_dts_measurement_cmd or - * &struct iwl_ext_dts_measurement_cmd - */ - CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, - - /** - * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd - */ - CTDP_CONFIG_CMD = 0x03, - - /** - * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd - */ - TEMP_REPORTING_THRESHOLDS_CMD = 0x04, - - /** - * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd - */ - GEO_TX_POWER_LIMIT = 0x05, - - /** - * @CT_KILL_NOTIFICATION: &struct ct_kill_notif - */ - CT_KILL_NOTIFICATION = 0xFE, - - /** - * @DTS_MEASUREMENT_NOTIF_WIDE: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, -}; - -/** - * enum iwl_system_subcmd_ids - system group command IDs - */ -enum iwl_system_subcmd_ids { - /** - * @SHARED_MEM_CFG_CMD: - * response in &struct iwl_shared_mem_cfg or - * &struct iwl_shared_mem_cfg_v2 - */ - SHARED_MEM_CFG_CMD = 0x0, - - /** - * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd - */ - INIT_EXTENDED_CFG_CMD = 0x03, -}; - -/** - * enum iwl_data_path_subcmd_ids - data path group commands - */ -enum iwl_data_path_subcmd_ids { - /** - * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd - */ - DQA_ENABLE_CMD = 0x0, - - /** - * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd - */ - UPDATE_MU_GROUPS_CMD = 0x1, - - /** - * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd - */ - TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, - - /** - * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification - */ - STA_PM_NOTIF = 0xFD, - - /** - * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif - */ - MU_GROUP_MGMT_NOTIF = 0xFE, - - /** - * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification - */ - RX_QUEUES_NOTIFICATION = 0xFF, -}; - -/** - * enum iwl_prot_offload_subcmd_ids - protocol offload commands - */ -enum iwl_prot_offload_subcmd_ids { - /** - * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif - */ - STORED_BEACON_NTF = 0xFF, -}; - -/** - * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands - */ -enum iwl_regulatory_and_nvm_subcmd_ids { - /** - * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd - */ - NVM_ACCESS_COMPLETE = 0x0, - - /** - * @NVM_GET_INFO: - * Command is &struct iwl_nvm_get_info, - * response is &struct iwl_nvm_get_info_rsp - */ - NVM_GET_INFO = 0x2, -}; - -/** - * enum iwl_debug_cmds - debug commands - */ -enum iwl_debug_cmds { - /** - * @LMAC_RD_WR: - * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - LMAC_RD_WR = 0x0, - /** - * @UMAC_RD_WR: - * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - UMAC_RD_WR = 0x1, - /** - * @MFU_ASSERT_DUMP_NTF: - * &struct iwl_mfu_assert_dump_notif - */ - MFU_ASSERT_DUMP_NTF = 0xFE, -}; - -/** - * enum iwl_mvm_command_groups - command groups for the firmware - * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds - * @LONG_GROUP: legacy group with long header, also uses command IDs - * from &enum iwl_legacy_cmds - * @SYSTEM_GROUP: system group, uses command IDs from - * &enum iwl_system_subcmd_ids - * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from - * &enum iwl_mac_conf_subcmd_ids - * @PHY_OPS_GROUP: PHY operations group, uses command IDs from - * &enum iwl_phy_ops_subcmd_ids - * @DATA_PATH_GROUP: data path group, uses command IDs from - * &enum iwl_data_path_subcmd_ids - * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids - * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids - * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from - * &enum iwl_prot_offload_subcmd_ids - * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from - * &enum iwl_regulatory_and_nvm_subcmd_ids - * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds - */ -enum iwl_mvm_command_groups { - LEGACY_GROUP = 0x0, - LONG_GROUP = 0x1, - SYSTEM_GROUP = 0x2, - MAC_CONF_GROUP = 0x3, - PHY_OPS_GROUP = 0x4, - DATA_PATH_GROUP = 0x5, - PROT_OFFLOAD_GROUP = 0xb, - REGULATORY_AND_NVM_GROUP = 0xc, - DEBUG_GROUP = 0xf, -}; - -/** - * struct iwl_cmd_response - generic response struct for most commands - * @status: status of the command asked, changes for each one - */ -struct iwl_cmd_response { - __le32 status; -}; - -/* - * struct iwl_dqa_enable_cmd - * @cmd_queue: the TXQ number of the command queue - */ -struct iwl_dqa_enable_cmd { - __le32 cmd_queue; -} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ - -/* - * struct iwl_tx_ant_cfg_cmd - * @valid: valid antenna configuration - */ -struct iwl_tx_ant_cfg_cmd { - __le32 valid; -} __packed; - -/** - * struct iwl_calib_ctrl - Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers, using &enum iwl_calib_cfg - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers, using &enum iwl_calib_cfg - */ -struct iwl_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -/* This enum defines the bitmap of various calibrations to enable in both - * init ucode and runtime ucode through CALIBRATION_CFG_CMD. - */ -enum iwl_calib_cfg { - IWL_CALIB_CFG_XTAL_IDX = BIT(0), - IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), - IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), - IWL_CALIB_CFG_PAPD_IDX = BIT(3), - IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), - IWL_CALIB_CFG_DC_IDX = BIT(5), - IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), - IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), - IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), - IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), - IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), - IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), - IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), - IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), - IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), - IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), - IWL_CALIB_CFG_DAC_IDX = BIT(16), - IWL_CALIB_CFG_ABS_IDX = BIT(17), - IWL_CALIB_CFG_AGC_IDX = BIT(18), -}; - -/** - * struct iwl_phy_cfg_cmd - Phy configuration command - * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg - * @calib_control: calibration control data - */ -struct iwl_phy_cfg_cmd { - __le32 phy_cfg; - struct iwl_calib_ctrl calib_control; -} __packed; - -#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) -#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) -#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) -#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) -#define PHY_CFG_TX_CHAIN_A BIT(8) -#define PHY_CFG_TX_CHAIN_B BIT(9) -#define PHY_CFG_TX_CHAIN_C BIT(10) -#define PHY_CFG_RX_CHAIN_A BIT(12) -#define PHY_CFG_RX_CHAIN_B BIT(13) -#define PHY_CFG_RX_CHAIN_C BIT(14) - - -/** - * enum iwl_nvm_access_op - NVM access opcode - * @IWL_NVM_READ: read NVM - * @IWL_NVM_WRITE: write NVM - */ -enum iwl_nvm_access_op { - IWL_NVM_READ = 0, - IWL_NVM_WRITE = 1, -}; - -/** - * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD - * @NVM_ACCESS_TARGET_CACHE: access the cache - * @NVM_ACCESS_TARGET_OTP: access the OTP - * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM - */ -enum iwl_nvm_access_target { - NVM_ACCESS_TARGET_CACHE = 0, - NVM_ACCESS_TARGET_OTP = 1, - NVM_ACCESS_TARGET_EEPROM = 2, -}; - -/** - * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD - * @NVM_SECTION_TYPE_SW: software section - * @NVM_SECTION_TYPE_REGULATORY: regulatory section - * @NVM_SECTION_TYPE_CALIBRATION: calibration section - * @NVM_SECTION_TYPE_PRODUCTION: production section - * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section - * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section - * @NVM_MAX_NUM_SECTIONS: number of sections - */ -enum iwl_nvm_section_type { - NVM_SECTION_TYPE_SW = 1, - NVM_SECTION_TYPE_REGULATORY = 3, - NVM_SECTION_TYPE_CALIBRATION = 4, - NVM_SECTION_TYPE_PRODUCTION = 5, - NVM_SECTION_TYPE_MAC_OVERRIDE = 11, - NVM_SECTION_TYPE_PHY_SKU = 12, - NVM_MAX_NUM_SECTIONS = 13, -}; - -/** - * struct iwl_nvm_access_cmd - Request the device to send an NVM section - * @op_code: &enum iwl_nvm_access_op - * @target: &enum iwl_nvm_access_target - * @type: &enum iwl_nvm_section_type - * @offset: offset in bytes into the section - * @length: in bytes, to read/write - * @data: if write operation, the data to write. On read its empty - */ -struct iwl_nvm_access_cmd { - u8 op_code; - u8 target; - __le16 type; - __le16 offset; - __le16 length; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ - -#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ - -/** - * struct iwl_fw_paging_cmd - paging layout - * - * (FW_PAGING_BLOCK_CMD = 0x4f) - * - * Send to FW the paging layout in the driver. - * - * @flags: various flags for the command - * @block_size: the block size in powers of 2 - * @block_num: number of blocks specified in the command. - * @device_phy_addr: virtual addresses from device side - */ -struct iwl_fw_paging_cmd { - __le32 flags; - __le32 block_size; - __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ - -/* - * Fw items ID's - * - * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload - * download - */ -enum iwl_fw_item_id { - IWL_FW_ITEM_ID_PAGING = 3, -}; - -/* - * struct iwl_fw_get_item_cmd - get an item from the fw - */ -struct iwl_fw_get_item_cmd { - __le32 item_id; -} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ - -#define CONT_REC_COMMAND_SIZE 80 -#define ENABLE_CONT_RECORDING 0x15 -#define DISABLE_CONT_RECORDING 0x16 - -/* - * struct iwl_continuous_record_mode - recording mode - */ -struct iwl_continuous_record_mode { - __le16 enable_recording; -} __packed; - -/* - * struct iwl_continuous_record_cmd - enable/disable continuous recording - */ -struct iwl_continuous_record_cmd { - struct iwl_continuous_record_mode record_mode; - u8 pad[CONT_REC_COMMAND_SIZE - - sizeof(struct iwl_continuous_record_mode)]; -} __packed; - -struct iwl_fw_get_item_resp { - __le32 item_id; - __le32 item_byte_cnt; - __le32 item_val; -} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ - -/** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD - * @offset: offset in bytes into the section - * @length: in bytes, either how much was written or read - * @type: NVM_SECTION_TYPE_* - * @status: 0 for success, fail otherwise - * @data: if read operation, the data returned. Empty on write. - */ -struct iwl_nvm_access_resp { - __le16 offset; - __le16 length; - __le16 type; - __le16 status; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ - -/* MVM_ALIVE 0x1 */ - -/* alive response is_valid values */ -#define ALIVE_RESP_UCODE_OK BIT(0) -#define ALIVE_RESP_RFKILL BIT(1) - -/* alive response ver_type values */ -enum { - FW_TYPE_HW = 0, - FW_TYPE_PROT = 1, - FW_TYPE_AP = 2, - FW_TYPE_WOWLAN = 3, - FW_TYPE_TIMING = 4, - FW_TYPE_WIPAN = 5 -}; - -/* alive response ver_subtype values */ -enum { - FW_SUBTYPE_FULL_FEATURE = 0, - FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ - FW_SUBTYPE_REDUCED = 2, - FW_SUBTYPE_ALIVE_ONLY = 3, - FW_SUBTYPE_WOWLAN = 4, - FW_SUBTYPE_AP_SUBTYPE = 5, - FW_SUBTYPE_WIPAN = 6, - FW_SUBTYPE_INITIALIZE = 9 -}; - -#define IWL_ALIVE_STATUS_ERR 0xDEAD -#define IWL_ALIVE_STATUS_OK 0xCAFE - -#define IWL_ALIVE_FLG_RFKILL BIT(0) - -struct iwl_lmac_alive { - __le32 ucode_minor; - __le32 ucode_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; -} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ - -struct iwl_umac_alive { - __le32 umac_minor; /* UMAC version: minor */ - __le32 umac_major; /* UMAC version: major */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ - -struct mvm_alive_resp_v3 { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_3 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data[2]; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_4 */ - -/* Error response/notification */ -enum { - FW_ERR_UNKNOWN_CMD = 0x0, - FW_ERR_INVALID_CMD_PARAM = 0x1, - FW_ERR_SERVICE = 0x2, - FW_ERR_ARC_MEMORY = 0x3, - FW_ERR_ARC_CODE = 0x4, - FW_ERR_WATCH_DOG = 0x5, - FW_ERR_WEP_GRP_KEY_INDX = 0x10, - FW_ERR_WEP_KEY_SIZE = 0x11, - FW_ERR_OBSOLETE_FUNC = 0x12, - FW_ERR_UNEXPECTED = 0xFE, - FW_ERR_FATAL = 0xFF -}; - -/** - * struct iwl_error_resp - FW error indication - * ( REPLY_ERROR = 0x2 ) - * @error_type: one of FW_ERR_* - * @cmd_id: the command ID for which the error occured - * @reserved1: reserved - * @bad_cmd_seq_num: sequence number of the erroneous command - * @error_service: which service created the error, applicable only if - * error_type = 2, otherwise 0 - * @timestamp: TSF in usecs. - */ -struct iwl_error_resp { - __le32 error_type; - u8 cmd_id; - u8 reserved1; - __le16 bad_cmd_seq_num; - __le32 error_service; - __le64 timestamp; -} __packed; - - -/* Common PHY, MAC and Bindings definitions */ -#define MAX_MACS_IN_BINDING (3) -#define MAX_BINDINGS (4) - -/** - * enum iwl_mvm_id_and_color - ID and color fields in context dword - * @FW_CTXT_ID_POS: position of the ID - * @FW_CTXT_ID_MSK: mask of the ID - * @FW_CTXT_COLOR_POS: position of the color - * @FW_CTXT_COLOR_MSK: mask of the color - * @FW_CTXT_INVALID: value used to indicate unused/invalid - */ -enum iwl_mvm_id_and_color { - FW_CTXT_ID_POS = 0, - FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, - FW_CTXT_COLOR_POS = 8, - FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, - FW_CTXT_INVALID = 0xffffffff, -}; - -#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ - (_color << FW_CTXT_COLOR_POS)) - -/* Possible actions on PHYs, MACs and Bindings */ -enum iwl_phy_ctxt_action { - FW_CTXT_ACTION_STUB = 0, - FW_CTXT_ACTION_ADD, - FW_CTXT_ACTION_MODIFY, - FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM -}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ - -/* Time Events */ - -/* Time Event types, according to MAC type */ -enum iwl_time_event_type { - /* BSS Station Events */ - TE_BSS_STA_AGGRESSIVE_ASSOC, - TE_BSS_STA_ASSOC, - TE_BSS_EAP_DHCP_PROT, - TE_BSS_QUIET_PERIOD, - - /* P2P Device Events */ - TE_P2P_DEVICE_DISCOVERABLE, - TE_P2P_DEVICE_LISTEN, - TE_P2P_DEVICE_ACTION_SCAN, - TE_P2P_DEVICE_FULL_SCAN, - - /* P2P Client Events */ - TE_P2P_CLIENT_AGGRESSIVE_ASSOC, - TE_P2P_CLIENT_ASSOC, - TE_P2P_CLIENT_QUIET_PERIOD, - - /* P2P GO Events */ - TE_P2P_GO_ASSOC_PROT, - TE_P2P_GO_REPETITIVET_NOA, - TE_P2P_GO_CT_WINDOW, - - /* WiDi Sync Events */ - TE_WIDI_TX_SYNC, - - /* Channel Switch NoA */ - TE_CHANNEL_SWITCH_PERIOD, - - TE_MAX -}; /* MAC_EVENT_TYPE_API_E_VER_1 */ - - - -/* Time event - defines for command API v1 */ - -/* - * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V1_FRAG_NONE = 0, - TE_V1_FRAG_SINGLE = 1, - TE_V1_FRAG_DUAL = 2, - TE_V1_FRAG_ENDLESS = 0xffffffff -}; - -/* If a Time Event can be fragmented, this is the max number of fragments */ -#define TE_V1_FRAG_MAX_MSK 0x0fffffff -/* Repeat the time event endlessly (until removed) */ -#define TE_V1_REPEAT_ENDLESS 0xffffffff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff - -/* Time Event dependencies: none, on another TE, or in a specific time */ -enum { - TE_V1_INDEPENDENT = 0, - TE_V1_DEP_OTHER = BIT(0), - TE_V1_DEP_TSF = BIT(1), - TE_V1_EVENT_SOCIOPATHIC = BIT(2), -}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ - -/* - * @TE_V1_NOTIF_NONE: no notifications - * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. - * - * Supported Time event notifications configuration. - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - */ -enum { - TE_V1_NOTIF_NONE = 0, - TE_V1_NOTIF_HOST_EVENT_START = BIT(0), - TE_V1_NOTIF_HOST_EVENT_END = BIT(1), - TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), - TE_V1_NOTIF_HOST_FRAG_START = BIT(4), - TE_V1_NOTIF_HOST_FRAG_END = BIT(5), - TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), -}; /* MAC_EVENT_ACTION_API_E_VER_2 */ - -/* Time event - defines for command API */ - -/* - * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V2_FRAG_NONE = 0, - TE_V2_FRAG_SINGLE = 1, - TE_V2_FRAG_DUAL = 2, - TE_V2_FRAG_MAX = 0xfe, - TE_V2_FRAG_ENDLESS = 0xff -}; - -/* Repeat the time event endlessly (until removed) */ -#define TE_V2_REPEAT_ENDLESS 0xff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V2_REPEAT_MAX 0xfe - -#define TE_V2_PLACEMENT_POS 12 -#define TE_V2_ABSENCE_POS 15 - -/** - * enum iwl_time_event_policy - Time event policy values - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - * - * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable - * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately - * @TE_V2_DEP_OTHER: depends on another time event - * @TE_V2_DEP_TSF: depends on a specific time - * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC - * @TE_V2_ABSENCE: are we present or absent during the Time Event. - */ -enum iwl_time_event_policy { - TE_V2_DEFAULT_POLICY = 0x0, - - /* notifications (event start/stop, fragment start/stop) */ - TE_V2_NOTIF_HOST_EVENT_START = BIT(0), - TE_V2_NOTIF_HOST_EVENT_END = BIT(1), - TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), - - TE_V2_NOTIF_HOST_FRAG_START = BIT(4), - TE_V2_NOTIF_HOST_FRAG_END = BIT(5), - TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), - - /* placement characteristics */ - TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), - TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), - TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), - - /* are we present or absent during the Time Event. */ - TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), -}; - -/** - * struct iwl_time_event_cmd - configuring Time Events - * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also - * with version 1. determined by IWL_UCODE_TLV_FLAGS) - * ( TIME_EVENT_CMD = 0x29 ) - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of &enum iwl_phy_ctxt_action - * @id: this field has two meanings, depending on the action: - * If the action is ADD, then it means the type of event to add. - * For all other actions it is the unique event ID assigned when the - * event was added by the FW. - * @apply_time: When to start the Time Event (in GP2) - * @max_delay: maximum delay to event's start (apply time), in TU - * @depends_on: the unique ID of the event we depend on (if any) - * @interval: interval between repetitions, in TU - * @duration: duration of event in TU - * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS - * @max_frags: maximal number of fragments the Time Event can be divided to - * @policy: defines whether uCode shall notify the host or other uCode modules - * on event and/or fragment start and/or end - * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF - * TE_EVENT_SOCIOPATHIC - * using TE_ABSENCE and using TE_NOTIF_*, - * &enum iwl_time_event_policy - */ -struct iwl_time_event_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - __le32 id; - /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ - __le32 apply_time; - __le32 max_delay; - __le32 depends_on; - __le32 interval; - __le32 duration; - u8 repeat; - u8 max_frags; - __le16 policy; -} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ - -/** - * struct iwl_time_event_resp - response structure to iwl_time_event_cmd - * @status: bit 0 indicates success, all others specify errors - * @id: the Time Event type - * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - */ -struct iwl_time_event_resp { - __le32 status; - __le32 id; - __le32 unique_id; - __le32 id_and_color; -} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ - -/** - * struct iwl_time_event_notif - notifications of time event start/stop - * ( TIME_EVENT_NOTIFICATION = 0x2a ) - * @timestamp: action timestamp in GP2 - * @session_id: session's unique id - * @unique_id: unique id of the Time Event itself - * @id_and_color: ID and color of the relevant MAC - * @action: &enum iwl_time_event_policy - * @status: true if scheduled, false otherwise (not executed) - */ -struct iwl_time_event_notif { - __le32 timestamp; - __le32 session_id; - __le32 unique_id; - __le32 id_and_color; - __le32 action; - __le32 status; -} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ - - -/* Bindings and Time Quota */ - -/** - * struct iwl_binding_cmd_v1 - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding, - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding, - * &enum iwl_mvm_id_and_color - */ -struct iwl_binding_cmd_v1 { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ - -/** - * struct iwl_binding_cmd - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding - * &enum iwl_mvm_id_and_color - * @lmac_id: the lmac id the binding belongs to - */ -struct iwl_binding_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; - __le32 lmac_id; -} __packed; /* BINDING_CMD_API_S_VER_2 */ - -#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) -#define IWL_LMAC_24G_INDEX 0 -#define IWL_LMAC_5G_INDEX 1 - -/* The maximal number of fragments in the FW's schedule session */ -#define IWL_MVM_MAX_QUOTA 128 - -/** - * struct iwl_time_quota_data - configuration of time quota per binding - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @quota: absolute time quota in TU. The scheduler will try to divide the - * remainig quota (after Time Events) according to this quota. - * @max_duration: max uninterrupted context duration in TU - */ -struct iwl_time_quota_data { - __le32 id_and_color; - __le32 quota; - __le32 max_duration; -} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ - -/** - * struct iwl_time_quota_cmd - configuration of time quota between bindings - * ( TIME_QUOTA_CMD = 0x2c ) - * @quotas: allocations per binding - * Note: on non-CDB the fourth one is the auxilary mac and is - * essentially zero. - * On CDB the fourth one is a regular binding. - */ -struct iwl_time_quota_cmd { - struct iwl_time_quota_data quotas[MAX_BINDINGS]; -} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ - - -/* PHY context */ - -/* Supported bands */ -#define PHY_BAND_5 (0) -#define PHY_BAND_24 (1) - -/* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) - -/* - * Control channel position: - * For legacy set bit means upper channel, otherwise lower. - * For VHT - bit-2 marks if the control is lower/upper relative to center-freq - * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 - */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) - -/* - * @band: PHY_BAND_* - * @channel: channel number - * @width: PHY_[VHT|LEGACY]_CHANNEL_* - * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* - */ -struct iwl_fw_channel_info { - u8 band; - u8 channel; - u8 width; - u8 ctrl_pos; -} __packed; - -#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) -#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) -#define PHY_RX_CHAIN_VALID_POS (1) -#define PHY_RX_CHAIN_VALID_MSK \ - (0x7 << PHY_RX_CHAIN_VALID_POS) -#define PHY_RX_CHAIN_FORCE_SEL_POS (4) -#define PHY_RX_CHAIN_FORCE_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) -#define PHY_RX_CHAIN_CNT_POS (10) -#define PHY_RX_CHAIN_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_CNT_POS) -#define PHY_RX_CHAIN_MIMO_CNT_POS (12) -#define PHY_RX_CHAIN_MIMO_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) -#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) -#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) - -/* TODO: fix the value, make it depend on firmware at runtime? */ -#define NUM_PHY_CTX 3 - -/* TODO: complete missing documentation */ -/** - * struct iwl_phy_context_cmd - config of the PHY context - * ( PHY_CONTEXT_CMD = 0x8 ) - * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* - * @apply_time: 0 means immediate apply and context switch. - * other value means apply new params after X usecs - * @tx_param_color: ??? - * @ci: channel info - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 - */ -struct iwl_phy_context_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_1 */ - __le32 apply_time; - __le32 tx_param_color; - struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; -} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ - -/* - * Aux ROC command - * - * Command requests the firmware to create a time event for a certain duration - * and remain on the given channel. This is done by using the Aux framework in - * the FW. - * The command was first used for Hot Spot issues - but can be used regardless - * to Hot Spot. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the - * event_unique_id should be the id of the time event assigned by ucode. - * Otherwise ignore the event_unique_id. - * @sta_id_and_color: station id and color, resumed during "Remain On Channel" - * activity. - * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) - */ -struct iwl_hs20_roc_req { - /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ - __le32 id_and_color; - __le32 action; - __le32 event_unique_id; - __le32 sta_id_and_color; - struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; -} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ - -/* - * values for AUX ROC result values - */ -enum iwl_mvm_hot_spot { - HOT_SPOT_RSP_STATUS_OK, - HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, - HOT_SPOT_MAX_NUM_OF_SESSIONS, -}; - -/* - * Aux ROC command response - * - * In response to iwl_hs20_roc_req the FW sends this command to notify the - * driver the uid of the timevent. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @event_unique_id: Unique ID of time event assigned by ucode - * @status: Return status 0 is success, all the rest used for specific errors - */ -struct iwl_hs20_roc_res { - __le32 event_unique_id; - __le32 status; -} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( RADIO_VERSION_NOTIFICATION = 0x68 ) - * @radio_flavor: radio flavor - * @radio_step: radio version step - * @radio_dash: radio version dash - */ -struct iwl_radio_version_notif { - __le32 radio_flavor; - __le32 radio_step; - __le32 radio_dash; -} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ - -enum iwl_card_state_flags { - CARD_ENABLED = 0x00, - HW_CARD_DISABLED = 0x01, - SW_CARD_DISABLED = 0x02, - CT_KILL_CARD_DISABLED = 0x04, - HALT_CARD_DISABLED = 0x08, - CARD_DISABLED_MSK = 0x0f, - CARD_IS_RX_ON = 0x10, -}; - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: %iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** - * struct iwl_missed_beacons_notif - information on missed beacons - * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) - * @mac_id: interface ID - * @consec_missed_beacons_since_last_rx: number of consecutive missed - * beacons since last RX. - * @consec_missed_beacons: number of consecutive missed beacons - * @num_expected_beacons: number of expected beacons - * @num_recvd_beacons: number of received beacons - */ -struct iwl_missed_beacons_notif { - __le32 mac_id; - __le32 consec_missed_beacons_since_last_rx; - __le32 consec_missed_beacons; - __le32 num_expected_beacons; - __le32 num_recvd_beacons; -} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ - -/** - * struct iwl_mfuart_load_notif - mfuart image version & status - * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) - * @installed_ver: installed image version - * @external_ver: external image version - * @status: MFUART loading status - * @duration: MFUART loading time - * @image_size: MFUART image size in bytes -*/ -struct iwl_mfuart_load_notif { - __le32 installed_ver; - __le32 external_ver; - __le32 status; - __le32 duration; - /* image size valid only in v2 of the command */ - __le32 image_size; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ - -/** - * struct iwl_mfu_assert_dump_notif - mfuart dump logs - * ( MFU_ASSERT_DUMP_NTF = 0xfe ) - * @assert_id: mfuart assert id that cause the notif - * @curr_reset_num: number of asserts since uptime - * @index_num: current chunk id - * @parts_num: total number of chunks - * @data_size: number of data bytes sent - * @data: data buffer - */ -struct iwl_mfu_assert_dump_notif { - __le32 assert_id; - __le32 curr_reset_num; - __le16 index_num; - __le16 parts_num; - __le32 data_size; - __le32 data[0]; -} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/ - -#define MAX_PORT_ID_NUM 2 -#define MAX_MCAST_FILTERING_ADDRESSES 256 - -/** - * struct iwl_mcast_filter_cmd - configure multicast filter. - * @filter_own: Set 1 to filter out multicast packets sent by station itself - * @port_id: Multicast MAC addresses array specifier. This is a strange way - * to identify network interface adopted in host-device IF. - * It is used by FW as index in array of addresses. This array has - * MAX_PORT_ID_NUM members. - * @count: Number of MAC addresses in the array - * @pass_all: Set 1 to pass all multicast packets. - * @bssid: current association BSSID. - * @reserved: reserved - * @addr_list: Place holder for array of MAC addresses. - * IMPORTANT: add padding if necessary to ensure DWORD alignment. - */ -struct iwl_mcast_filter_cmd { - u8 filter_own; - u8 port_id; - u8 count; - u8 pass_all; - u8 bssid[6]; - u8 reserved[2]; - u8 addr_list[0]; -} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ - -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -#define BA_WINDOW_STREAMS_MAX 16 -#define BA_WINDOW_STATUS_TID_MSK 0x000F -#define BA_WINDOW_STATUS_STA_ID_POS 4 -#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 -#define BA_WINDOW_STATUS_VALID_MSK BIT(9) - -/** - * struct iwl_ba_window_status_notif - reordering window's status notification - * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] - * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid - * @start_seq_num: the start sequence number of the bitmap - * @mpdu_rx_count: the number of received MPDUs since entering D0i3 - */ -struct iwl_ba_window_status_notif { - __le64 bitmap[BA_WINDOW_STREAMS_MAX]; - __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; - __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; - __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; -} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - -/* - * enum iwl_mvm_marker_id - maker ids - * - * The ids for different type of markers to insert into the usniffer logs - */ -enum iwl_mvm_marker_id { - MARKER_ID_TX_FRAME_LATENCY = 1, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_mvm_marker - mark info into the usniffer logs - * - * (MARKER_CMD = 0xcb) - * - * Mark the UTC time stamp into the usniffer logs together with additional - * metadata, so the usniffer output can be parsed. - * In the command response the ucode will return the GP2 time. - * - * @dw_len: The amount of dwords following this byte including this byte. - * @marker_id: A unique marker id (iwl_mvm_marker_id). - * @reserved: reserved. - * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC - * @metadata: additional meta data that will be written to the unsiffer log - */ -struct iwl_mvm_marker { - u8 dw_len; - u8 marker_id; - __le16 reserved; - __le64 timestamp; - __le32 metadata[0]; -} __packed; /* MARKER_API_S_VER_1 */ - -/* - * enum iwl_dc2dc_config_id - flag ids - * - * Ids of dc2dc configuration flags - */ -enum iwl_dc2dc_config_id { - DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ - DCDC_FREQ_TUNE_SET = 0x2, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_dc2dc_config_cmd - configure dc2dc values - * - * (DC2DC_CONFIG_CMD = 0x83) - * - * Set/Get & configure dc2dc values. - * The command always returns the current dc2dc values. - * - * @flags: set/get dc2dc - * @enable_low_power_mode: not used. - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_cmd { - __le32 flags; - __le32 enable_low_power_mode; /* not used */ - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd - * - * Current dc2dc values returned by the FW. - * - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_resp { - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ - -/*********************************** - * Smart Fifo API - ***********************************/ -/* Smart Fifo state */ -enum iwl_sf_state { - SF_LONG_DELAY_ON = 0, /* should never be called by driver */ - SF_FULL_ON, - SF_UNINIT, - SF_INIT_OFF, - SF_HW_NUM_STATES -}; - -/* Smart Fifo possible scenario */ -enum iwl_sf_scenario { - SF_SCENARIO_SINGLE_UNICAST, - SF_SCENARIO_AGG_UNICAST, - SF_SCENARIO_MULTICAST, - SF_SCENARIO_BA_RESP, - SF_SCENARIO_TX_RESP, - SF_NUM_SCENARIO -}; - -#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ -#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ - -/* smart FIFO default values */ -#define SF_W_MARK_SISO 6144 -#define SF_W_MARK_MIMO2 8192 -#define SF_W_MARK_MIMO3 6144 -#define SF_W_MARK_LEGACY 4096 -#define SF_W_MARK_SCAN 4096 - -/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ -#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ - -/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ -#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ -#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ -#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ -#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ - -#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ - -#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) - -/** - * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. - * @state: smart fifo state, types listed in &enum iwl_sf_state. - * @watermark: Minimum allowed availabe free space in RXF for transient state. - * @long_delay_timeouts: aging and idle timer values for each scenario - * in long delay state. - * @full_on_timeouts: timer values for each scenario in full on state. - */ -struct iwl_sf_cfg_cmd { - __le32 state; - __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; - __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; - __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; -} __packed; /* SF_CFG_API_S_VER_2 */ - -/*********************************** - * Location Aware Regulatory (LAR) API - MCC updates - ***********************************/ - -/** - * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd_v1 { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ - -/** - * struct iwl_mcc_update_cmd - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - * @key: integrity key for MCC API OEM testing - * @reserved2: reserved - */ -struct iwl_mcc_update_cmd { - __le16 mcc; - u8 source_id; - u8 reserved; - __le32 key; - u8 reserved2[20]; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ - -/** - * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp_v1 { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ - -/** - * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @time: time elapsed from the MCC test start (in 30 seconds TU) - * @reserved: reserved. - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le16 time; - __le16 reserved; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ - -/** - * struct iwl_mcc_chub_notif - chub notifies of mcc change - * (MCC_CHUB_UPDATE_CMD = 0xc9) - * The Chub (Communication Hub, CommsHUB) is a HW component that connects to - * the cellular and connectivity cores that gets updates of the mcc, and - * notifies the ucode directly of any mcc change. - * The ucode requests the driver to request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: identity of the change originator, see iwl_mcc_source - * @reserved1: reserved for alignment - */ -struct iwl_mcc_chub_notif { - __le16 mcc; - u8 source_id; - u8 reserved1; -} __packed; /* LAR_MCC_NOTIFY_S */ - -enum iwl_mcc_update_status { - MCC_RESP_NEW_CHAN_PROFILE, - MCC_RESP_SAME_CHAN_PROFILE, - MCC_RESP_INVALID, - MCC_RESP_NVM_DISABLED, - MCC_RESP_ILLEGAL, - MCC_RESP_LOW_PRIORITY, - MCC_RESP_TEST_MODE_ACTIVE, - MCC_RESP_TEST_MODE_NOT_ACTIVE, - MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, -}; - -enum iwl_mcc_source { - MCC_SOURCE_OLD_FW = 0, - MCC_SOURCE_ME = 1, - MCC_SOURCE_BIOS = 2, - MCC_SOURCE_3G_LTE_HOST = 3, - MCC_SOURCE_3G_LTE_DEVICE = 4, - MCC_SOURCE_WIFI = 5, - MCC_SOURCE_RESERVED = 6, - MCC_SOURCE_DEFAULT = 7, - MCC_SOURCE_UNINITIALIZED = 8, - MCC_SOURCE_MCC_API = 9, - MCC_SOURCE_GET_CURRENT = 0x10, - MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, -}; - -/* DTS measurements */ - -enum iwl_dts_measurement_flags { - DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), - DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), -}; - -/** - * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements - * - * @flags: indicates which measurements we want as specified in - * &enum iwl_dts_measurement_flags - */ -struct iwl_dts_measurement_cmd { - __le32 flags; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ - -/** -* enum iwl_dts_control_measurement_mode - DTS measurement type -* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read -* back (latest value. Not waiting for new value). Use automatic -* SW DTS configuration. -* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, -* trigger DTS reading and provide read back temperature read -* when available. -* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read -* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, -* without measurement trigger. -*/ -enum iwl_dts_control_measurement_mode { - DTS_AUTOMATIC = 0, - DTS_REQUEST_READ = 1, - DTS_OVER_WRITE = 2, - DTS_DIRECT_WITHOUT_MEASURE = 3, -}; - -/** -* enum iwl_dts_used - DTS to use or used for measurement in the DTS request -* @DTS_USE_TOP: Top -* @DTS_USE_CHAIN_A: chain A -* @DTS_USE_CHAIN_B: chain B -* @DTS_USE_CHAIN_C: chain C -* @XTAL_TEMPERATURE: read temperature from xtal -*/ -enum iwl_dts_used { - DTS_USE_TOP = 0, - DTS_USE_CHAIN_A = 1, - DTS_USE_CHAIN_B = 2, - DTS_USE_CHAIN_C = 3, - XTAL_TEMPERATURE = 4, -}; - -/** -* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode -* @DTS_BIT6_MODE: bit 6 mode -* @DTS_BIT8_MODE: bit 8 mode -*/ -enum iwl_dts_bit_mode { - DTS_BIT6_MODE = 0, - DTS_BIT8_MODE = 1, -}; - -/** - * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements - * @control_mode: see &enum iwl_dts_control_measurement_mode - * @temperature: used when over write DTS mode is selected - * @sensor: set temperature sensor to use. See &enum iwl_dts_used - * @avg_factor: average factor to DTS in request DTS read mode - * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode - * @step_duration: step duration for the DTS - */ -struct iwl_ext_dts_measurement_cmd { - __le32 control_mode; - __le32 temperature; - __le32 sensor; - __le32 avg_factor; - __le32 bit_mode; - __le32 step_duration; -} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ - -/** - * struct iwl_dts_measurement_notif_v1 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - */ -struct iwl_dts_measurement_notif_v1 { - __le32 temp; - __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ - -/** - * struct iwl_dts_measurement_notif_v2 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - * @threshold_idx: the trip index that was crossed - */ -struct iwl_dts_measurement_notif_v2 { - __le32 temp; - __le32 voltage; - __le32 threshold_idx; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ - -/** - * struct ct_kill_notif - CT-kill entry notification - * - * @temperature: the current temperature in celsius - * @reserved: reserved - */ -struct ct_kill_notif { - __le16 temperature; - __le16 reserved; -} __packed; /* GRP_PHY_CT_KILL_NTF */ - -/** -* enum ctdp_cmd_operation - CTDP command operations -* @CTDP_CMD_OPERATION_START: update the current budget -* @CTDP_CMD_OPERATION_STOP: stop ctdp -* @CTDP_CMD_OPERATION_REPORT: get the average budget -*/ -enum iwl_mvm_ctdp_cmd_operation { - CTDP_CMD_OPERATION_START = 0x1, - CTDP_CMD_OPERATION_STOP = 0x2, - CTDP_CMD_OPERATION_REPORT = 0x4, -};/* CTDP_CMD_OPERATION_TYPE_E */ - -/** - * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget - * - * @operation: see &enum iwl_mvm_ctdp_cmd_operation - * @budget: the budget in milliwatt - * @window_size: defined in API but not used - */ -struct iwl_mvm_ctdp_cmd { - __le32 operation; - __le32 budget; - __le32 window_size; -} __packed; - -#define IWL_MAX_DTS_TRIPS 8 - -/** - * struct temp_report_ths_cmd - set temperature thresholds - * - * @num_temps: number of temperature thresholds passed - * @thresholds: array with the thresholds to be configured - */ -struct temp_report_ths_cmd { - __le32 num_temps; - __le16 thresholds[IWL_MAX_DTS_TRIPS]; -} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ - -/*********************************** - * TDLS API - ***********************************/ - -/* Type of TDLS request */ -enum iwl_tdls_channel_switch_type { - TDLS_SEND_CHAN_SW_REQ = 0, - TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, - TDLS_MOVE_CH, -}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch - * @frame_timestamp: GP2 timestamp of channel-switch request/response packet - * received from peer - * @max_offchan_duration: What amount of microseconds out of a DTIM is given - * to the TDLS off-channel communication. For instance if the DTIM is - * 200TU and the TDLS peer is to be given 25% of the time, the value - * given will be 50TU, or 50 * 1024 if translated into microseconds. - * @switch_time: switch time the peer sent in its channel switch timing IE - * @switch_timeout: switch timeout the peer sent in its channel switch timing IE - */ -struct iwl_tdls_channel_switch_timing { - __le32 frame_timestamp; /* GP2 time of peer packet Rx */ - __le32 max_offchan_duration; /* given in micro-seconds */ - __le32 switch_time; /* given in micro-seconds */ - __le32 switch_timeout; /* given in micro-seconds */ -} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ - -#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 - -/** - * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template - * - * A template representing a TDLS channel-switch request or response frame - * - * @switch_time_offset: offset to the channel switch timing IE in the template - * @tx_cmd: Tx parameters for the frame - * @data: frame data - */ -struct iwl_tdls_channel_switch_frame { - __le32 switch_time_offset; - struct iwl_tx_cmd tx_cmd; - u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command - * - * The command is sent to initiate a channel switch and also in response to - * incoming TDLS channel-switch request/response packets from remote peers. - * - * @switch_type: see &enum iwl_tdls_channel_switch_type - * @peer_sta_id: station id of TDLS peer - * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type - */ -struct iwl_tdls_channel_switch_cmd { - u8 switch_type; - __le32 peer_sta_id; - struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification - * - * @status: non-zero on success - * @offchannel_duration: duration given in microseconds - * @sta_id: peer currently performing the channel-switch with - */ -struct iwl_tdls_channel_switch_notif { - __le32 status; - __le32 offchannel_duration; - __le32 sta_id; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ - -/** - * struct iwl_tdls_sta_info - TDLS station info - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx - * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer - * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise - */ -struct iwl_tdls_sta_info { - u8 sta_id; - u8 tx_to_peer_tid; - __le16 tx_to_peer_ssn; - __le32 is_initiator; -} __packed; /* TDLS_STA_INFO_VER_1 */ - -/** - * struct iwl_tdls_config_cmd - TDLS basic config command - * - * @id_and_color: MAC id and color being configured - * @tdls_peer_count: amount of currently connected TDLS peers - * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx - * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP - * @sta_info: per-station info. Only the first tdls_peer_count entries are set - * @pti_req_data_offset: offset of network-level data for the PTI template - * @pti_req_tx_cmd: Tx parameters for PTI request template - * @pti_req_template: PTI request template data - */ -struct iwl_tdls_config_cmd { - __le32 id_and_color; /* mac id and color */ - u8 tdls_peer_count; - u8 tx_to_ap_tid; - __le16 tx_to_ap_ssn; - struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; - - __le32 pti_req_data_offset; - struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; -} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_config_sta_info_res - TDLS per-station config information - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to - * the peer - */ -struct iwl_tdls_config_sta_info_res { - __le16 sta_id; - __le16 tx_to_peer_last_seq; -} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ - -/** - * struct iwl_tdls_config_res - TDLS config information from FW - * - * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP - * @sta_info: per-station TDLS config information - */ -struct iwl_tdls_config_res { - __le32 tx_to_ap_last_seq; - struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; -} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ - -#define TX_FIFO_MAX_NUM_9000 8 -#define TX_FIFO_MAX_NUM 15 -#define RX_FIFO_MAX_NUM 2 -#define TX_FIFO_INTERNAL_MAX_NUM 6 - -/** - * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information - * - * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not - * accessible) - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to - * 0x0 as accessible only via DBGM RDAT) - * @sample_buff_size: internal sample buff size - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre - * 8000 HW set to 0x0 as not accessible) - * @txfifo_size: size of TXF0 ... TXF7 - * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @rxfifo_addr: Start address of rxFifo - * @internal_txfifo_addr: start address of internalFifo - * @internal_txfifo_size: internal fifos' size - * - * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG - * set, the last 3 members don't exist. - */ -struct iwl_shared_mem_cfg_v2 { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ - -/** - * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration - * - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) - * @txfifo_size: size of TX FIFOs - * @rxfifo1_addr: RXF1 addr - * @rxfifo1_size: RXF1 size - */ -struct iwl_shared_mem_lmac_cfg { - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo1_addr; - __le32 rxfifo1_size; - -} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ - -/** - * struct iwl_shared_mem_cfg - Shared memory configuration information - * - * @shared_mem_addr: shared memory address - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr - * @sample_buff_size: internal sample buff size - * @rxfifo2_addr: start addr of RXF2 - * @rxfifo2_size: size of RXF2 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @lmac_num: number of LMACs (1 or 2) - * @lmac_smem: per - LMAC smem data - */ -struct iwl_shared_mem_cfg { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 rxfifo2_addr; - __le32 rxfifo2_size; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 lmac_num; - struct iwl_shared_mem_lmac_cfg lmac_smem[2]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ - -/** - * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration - * - * @reserved: reserved - * @membership_status: a bitmap of MU groups - * @user_position:the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_cmd { - __le32 reserved; - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ - -/** - * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification - * - * @membership_status: a bitmap of MU groups - * @user_position: the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_notif { - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ - -#define MAX_STORED_BEACON_SIZE 600 - -/** - * struct iwl_stored_beacon_notif - Stored beacon notification - * - * @system_time: system time on air rise - * @tsf: TSF on air rise - * @beacon_timestamp: beacon on air rise - * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition - * @channel: channel this beacon was received on - * @rates: rate in ucode internal format - * @byte_count: frame's byte count - * @data: beacon data, length in @byte_count - */ -struct iwl_stored_beacon_notif { - __le32 system_time; - __le64 tsf; - __le32 beacon_timestamp; - __le16 band; - __le16 channel; - __le32 rates; - __le32 byte_count; - u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ - -#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 - -enum iwl_lqm_cmd_operatrions { - LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, - LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, -}; - -enum iwl_lqm_status { - LQM_STATUS_SUCCESS = 0, - LQM_STATUS_TIMEOUT = 1, - LQM_STATUS_ABORT = 2, -}; - -/** - * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command - * @cmd_operation: command operation to be performed (start or stop) - * as defined above. - * @mac_id: MAC ID the measurement applies to. - * @measurement_time: time of the total measurement to be performed, in uSec. - * @timeout: maximum time allowed until a response is sent, in uSec. - */ -struct iwl_link_qual_msrmnt_cmd { - __le32 cmd_operation; - __le32 mac_id; - __le32 measurement_time; - __le32 timeout; -} __packed /* LQM_CMD_API_S_VER_1 */; - -/** - * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification - * - * @frequent_stations_air_time: an array containing the total air time - * (in uSec) used by the most frequently transmitting stations. - * @number_of_stations: the number of uniqe stations included in the array - * (a number between 0 to 16) - * @total_air_time_other_stations: the total air time (uSec) used by all the - * stations which are not included in the above report. - * @time_in_measurement_window: the total time in uSec in which a measurement - * took place. - * @tx_frame_dropped: the number of TX frames dropped due to retry limit during - * measurement - * @mac_id: MAC ID the measurement applies to. - * @status: return status. may be one of the LQM_STATUS_* defined above. - * @reserved: reserved. - */ -struct iwl_link_qual_msrmnt_notif { - __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; - __le32 number_of_stations; - __le32 total_air_time_other_stations; - __le32 time_in_measurement_window; - __le32 tx_frame_dropped; - __le32 mac_id; - __le32 status; - u8 reserved[12]; -} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ - -/** - * struct iwl_channel_switch_noa_notif - Channel switch NOA notification - * - * @id_and_color: ID and color of the MAC - */ -struct iwl_channel_switch_noa_notif { - __le32 id_and_color; -} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ - -/* Operation types for the debug mem access */ -enum { - DEBUG_MEM_OP_READ = 0, - DEBUG_MEM_OP_WRITE = 1, - DEBUG_MEM_OP_WRITE_BYTES = 2, -}; - -#define DEBUG_MEM_MAX_SIZE_DWORDS 32 - -/** - * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory - * @op: DEBUG_MEM_OP_* - * @addr: address to read/write from/to - * @len: in dwords, to read/write - * @data: for write opeations, contains the source buffer - */ -struct iwl_dbg_mem_access_cmd { - __le32 op; - __le32 addr; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ - -/* Status responses for the debug mem access */ -enum { - DEBUG_MEM_STATUS_SUCCESS = 0x0, - DEBUG_MEM_STATUS_FAILED = 0x1, - DEBUG_MEM_STATUS_LOCKED = 0x2, - DEBUG_MEM_STATUS_HIDDEN = 0x3, - DEBUG_MEM_STATUS_LENGTH = 0x4, -}; - -/** - * struct iwl_dbg_mem_access_rsp - Response to debug mem commands - * @status: DEBUG_MEM_STATUS_* - * @len: read dwords (0 for write operations) - * @data: contains the read DWs - */ -struct iwl_dbg_mem_access_rsp { - __le32 status; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ - -/** - * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed - * @reserved: reserved - */ -struct iwl_nvm_access_complete_cmd { - __le32 reserved; -} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ - -/** - * enum iwl_extended_cfg_flag - commands driver may send before - * finishing init flow - * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command - * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands - * @IWL_INIT_PHY: driver is going to send PHY_DB commands - */ -enum iwl_extended_cfg_flags { - IWL_INIT_DEBUG_CFG, - IWL_INIT_NVM, - IWL_INIT_PHY, -}; - -/** - * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for - * before finishing init flows - * @init_flags: values from iwl_extended_cfg_flags - */ -struct iwl_init_extended_cfg_cmd { - __le32 init_flags; -} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ - -/* - * struct iwl_nvm_get_info - request to get NVM data - */ -struct iwl_nvm_get_info { - __le32 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_general - general NVM data - * @flags: 1 - empty, 0 - valid - * @nvm_version: nvm version - * @board_type: board type - * @reserved: reserved - */ -struct iwl_nvm_get_info_general { - __le32 flags; - __le16 nvm_version; - u8 board_type; - u8 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_sku - mac information - * @enable_24g: band 2.4G enabled - * @enable_5g: band 5G enabled - * @enable_11n: 11n enabled - * @enable_11ac: 11ac enabled - * @mimo_disable: MIMO enabled - * @ext_crypto: Extended crypto enabled - */ -struct iwl_nvm_get_info_sku { - __le32 enable_24g; - __le32 enable_5g; - __le32 enable_11n; - __le32 enable_11ac; - __le32 mimo_disable; - __le32 ext_crypto; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_phy - phy information - * @tx_chains: BIT 0 chain A, BIT 1 chain B - * @rx_chains: BIT 0 chain A, BIT 1 chain B - */ -struct iwl_nvm_get_info_phy { - __le32 tx_chains; - __le32 rx_chains; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ - -#define IWL_NUM_CHANNELS (51) - -/** - * struct iwl_nvm_get_info_regulatory - regulatory information - * @lar_enabled: is LAR enabled - * @channel_profile: regulatory data of this channel - * @reserved: reserved - */ -struct iwl_nvm_get_info_regulatory { - __le32 lar_enabled; - __le16 channel_profile[IWL_NUM_CHANNELS]; - __le16 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_rsp - response to get NVM data - * @general: general NVM data - * @mac_sku: data relating to MAC sku - * @phy_sku: data relating to PHY sku - * @regulatory: regulatory data - */ -struct iwl_nvm_get_info_rsp { - struct iwl_nvm_get_info_general general; - struct iwl_nvm_get_info_sku mac_sku; - struct iwl_nvm_get_info_phy phy_sku; - struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ - -/** - * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification - * @isolation: antenna isolation value - */ -struct iwl_mvm_antenna_coupling_notif { - __le32 isolation; -} __packed; +#include "fw/api/tdls.h" +#include "fw/api/mac-cfg.h" +#include "fw/api/offload.h" +#include "fw/api/context.h" +#include "fw/api/time-event.h" +#include "fw/api/datapath.h" +#include "fw/api/phy.h" +#include "fw/api/config.h" +#include "fw/api/alive.h" +#include "fw/api/binding.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/coex.h" +#include "fw/api/commands.h" +#include "fw/api/d3.h" +#include "fw/api/filter.h" +#include "fw/api/led.h" +#include "fw/api/mac.h" +#include "fw/api/nvm-reg.h" +#include "fw/api/phy-ctxt.h" +#include "fw/api/power.h" +#include "fw/api/rs.h" +#include "fw/api/rx.h" +#include "fw/api/scan.h" +#include "fw/api/sf.h" +#include "fw/api/sta.h" +#include "fw/api/stats.h" +#include "fw/api/tof.h" +#include "fw/api/tx.h" #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 82863e9273eb..83485493a79a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -78,7 +78,7 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" -#include "fw-dbg.h" +#include "fw/dbg.h" #include "iwl-phy-db.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ @@ -144,134 +144,6 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) return ret; } -void iwl_free_fw_paging(struct iwl_mvm *mvm) -{ - int i; - - if (!mvm->fw_paging_db[0].fw_paging_block) - return; - - for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { - struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; - - if (!paging->fw_paging_block) { - IWL_DEBUG_FW(mvm, - "Paging: block %d already freed, continue to next page\n", - i); - - continue; - } - dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, - paging->fw_paging_size, DMA_BIDIRECTIONAL); - - __free_pages(paging->fw_paging_block, - get_order(paging->fw_paging_size)); - paging->fw_paging_block = NULL; - } - kfree(mvm->trans->paging_download_buf); - mvm->trans->paging_download_buf = NULL; - mvm->trans->paging_db = NULL; - - memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); -} - -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) -{ - int sec_idx, idx; - u32 offset = 0; - - /* - * find where is the paging image start point: - * if CPU2 exist and it's in paging format, then the image looks like: - * CPU1 sections (2 or more) - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 - * CPU2 sections (not paged) - * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 - * non paged to CPU2 paging sec - * CPU2 paging CSS - * CPU2 paging image (including instruction and data) - */ - for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { - if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { - sec_idx++; - break; - } - } - - /* - * If paging is enabled there should be at least 2 more sections left - * (one for CSS and one for Paging data) - */ - if (sec_idx >= image->num_sec - 1) { - IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(mvm); - return -EINVAL; - } - - /* copy the CSS block to the dram */ - IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", - sec_idx); - - memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), - image->sec[sec_idx].data, - mvm->fw_paging_db[0].fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - mvm->fw_paging_db[0].fw_paging_phys, - mvm->fw_paging_db[0].fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d CSS bytes to first block\n", - mvm->fw_paging_db[0].fw_paging_size); - - sec_idx++; - - /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. - */ - for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - block->fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - - IWL_DEBUG_FW(mvm, - "Paging: copied %d paging bytes to block %d\n", - mvm->fw_paging_db[idx].fw_paging_size, - idx); - - offset += mvm->fw_paging_db[idx].fw_paging_size; - } - - /* copy the last paging block */ - if (mvm->num_of_pages_in_last_blk > 0) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d pages in the last block %d\n", - mvm->num_of_pages_in_last_blk, idx); - } - - return 0; -} - void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -293,178 +165,6 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, le32_to_cpu(dump_data[i])); } -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, - const struct fw_img *image) -{ - struct page *block; - dma_addr_t phys = 0; - int blk_idx, order, num_of_pages, size, dma_enabled; - - if (mvm->fw_paging_db[0].fw_paging_block) - return 0; - - dma_enabled = is_device_dma_capable(mvm->trans->dev); - - /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ - BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); - - num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = - DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); - mvm->num_of_pages_in_last_blk = - num_of_pages - - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); - - IWL_DEBUG_FW(mvm, - "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", - mvm->num_of_paging_blk, - mvm->num_of_pages_in_last_blk); - - /* - * Allocate CSS and paging blocks in dram. - */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ - size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; - order = get_order(size); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = size; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one - * since we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = - PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - if (!blk_idx) - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - else - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); - } - - return 0; -} - -static int iwl_save_fw_paging(struct iwl_mvm *mvm, - const struct fw_img *fw) -{ - int ret; - - ret = iwl_alloc_fw_paging_mem(mvm, fw); - if (ret) - return ret; - - return iwl_fill_paging_mem(mvm, fw); -} - -/* send paging cmd to FW in case CPU2 has paging image */ -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) -{ - struct iwl_fw_paging_cmd paging_cmd = { - .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | - PAGING_CMD_IS_ENABLED | - (mvm->num_of_pages_in_last_blk << - PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), - .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), - .block_num = cpu_to_le32(mvm->num_of_paging_blk), - }; - int blk_idx; - - /* loop for for all paging blocks + CSS block */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; - __le32 phy_addr; - - addr = addr >> PAGE_2_EXP_SIZE; - phy_addr = cpu_to_le32(addr); - paging_cmd.device_phy_addr[blk_idx] = phy_addr; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(paging_cmd), &paging_cmd); -} - -/* - * Send paging item cmd to FW in case CPU2 has paging image - */ -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) -{ - int ret; - struct iwl_fw_get_item_cmd fw_get_item_cmd = { - .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), - }; - - struct iwl_fw_get_item_resp *item_resp; - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &fw_get_item_cmd, }, - }; - - cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, - "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", - ret); - return ret; - } - - item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; - if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { - IWL_ERR(mvm, - "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", - le32_to_cpu(item_resp->item_id)); - ret = -EIO; - goto exit; - } - - /* Add an extra page for headers */ - mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + - FW_PAGING_SIZE, - GFP_KERNEL); - if (!mvm->trans->paging_download_buf) { - ret = -ENOMEM; - goto exit; - } - mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); - mvm->trans->paging_db = mvm->fw_paging_db; - IWL_DEBUG_FW(mvm, - "Paging: got paging request address (paging_req_addr 0x%08x)\n", - mvm->trans->paging_req_addr); - -exit: - iwl_free_resp(&cmd); - - return ret; -} - static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -544,48 +244,6 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } -static int iwl_mvm_init_paging(struct iwl_mvm *mvm) -{ - const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; - int ret; - - /* - * Configure and operate fw paging mechanism. - * The driver configures the paging flow only once. - * The CPU2 paging image is included in the IWL_UCODE_INIT image. - */ - if (!fw->paging_mem_size) - return 0; - - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - - return 0; -} static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -593,7 +251,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_mvm_alive_data alive_data; const struct fw_img *fw; int ret, i; - enum iwl_ucode_type old_type = mvm->cur_ucode; + enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; @@ -606,7 +264,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; - mvm->cur_ucode = ucode_type; + iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, @@ -615,7 +273,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); if (ret) { - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } @@ -639,13 +297,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } @@ -673,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - if (iwl_mvm_is_dqa_supported(mvm)) - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; - else - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); @@ -733,7 +388,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) } if (IWL_MVM_PARSE_NVM && read_nvm) { - ret = iwl_nvm_init(mvm, true); + ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto error; @@ -757,8 +412,10 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && read_nvm) { - ret = iwl_mvm_nvm_get_from_fw(mvm); - if (ret) { + mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt); + if (IS_ERR(mvm->nvm_data)) { + ret = PTR_ERR(mvm->nvm_data); + mvm->nvm_data = NULL; IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); return ret; } @@ -774,7 +431,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->cur_ucode; + enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); @@ -799,7 +456,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, true); lockdep_assert_held(&mvm->mutex); @@ -818,22 +475,21 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); if (ret) { IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); - goto error; + goto remove_notif; } if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) - goto error; + goto remove_notif; } /* Read the NVM only at driver load time, no need to do this twice */ if (read_nvm) { - /* Read nvm */ - ret = iwl_nvm_init(mvm, true); + ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; + goto remove_notif; } } @@ -841,8 +497,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (mvm->nvm_file_name) iwl_mvm_load_nvm_to_nic(mvm); - ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); - WARN_ON(ret); + WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans)); /* * abort after reading the nvm in case RF Kill is on, we will complete @@ -851,9 +506,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); - iwl_remove_notification(&mvm->notif_wait, &calib_wait); - ret = 1; - goto out; + goto remove_notif; } mvm->calibrating = true; @@ -861,17 +514,13 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) - goto error; + goto remove_notif; - /* - * Send phy configurations command to init uCode - * to start the 16.0 uCode init image internal calibrations. - */ ret = iwl_send_phy_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); - goto error; + goto remove_notif; } /* @@ -879,15 +528,21 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * just wait for the calibration complete notification. */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, - MVM_UCODE_CALIB_TIMEOUT); + MVM_UCODE_CALIB_TIMEOUT); + if (!ret) + goto out; - if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); - ret = 1; + ret = 0; + } else { + IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", + ret); } + goto out; -error: +remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->calibrating = false; @@ -910,95 +565,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) return ret; } -static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; - int i, lmac; - int lmac_num = le32_to_cpu(mem_cfg->lmac_num); - - if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) - return; - - mvm->smem_cfg.num_lmacs = lmac_num; - mvm->smem_cfg.num_txfifo_entries = - ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); - - for (lmac = 0; lmac < lmac_num; lmac++) { - struct iwl_shared_mem_lmac_cfg *lmac_cfg = - &mem_cfg->lmac_smem[lmac]; - - for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[lmac].txfifo_size[i] = - le32_to_cpu(lmac_cfg->txfifo_size[i]); - mvm->smem_cfg.lmac[lmac].rxfifo1_size = - le32_to_cpu(lmac_cfg->rxfifo1_size); - } -} - -static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; - int i; - - mvm->smem_cfg.num_lmacs = 1; - - mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[0].txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - - mvm->smem_cfg.lmac[0].rxfifo1_size = - le32_to_cpu(mem_cfg->rxfifo_size[0]); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); - - /* new API has more data, from rxfifo_addr field and on */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != - sizeof(mem_cfg->internal_txfifo_size)); - - for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); - i++) - mvm->smem_cfg.internal_txfifo_size[i] = - le32_to_cpu(mem_cfg->internal_txfifo_size[i]); - } -} - -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) -{ - struct iwl_host_cmd cmd = { - .flags = CMD_WANT_SKB, - .data = { NULL, }, - .len = { 0, }, - }; - struct iwl_rx_packet *pkt; - - lockdep_assert_held(&mvm->mutex); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) - cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); - else - cmd.id = SHARED_MEM_CFG; - - if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) - return; - - pkt = cmd.resp_pkt; - if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_parse_shared_mem_a000(mvm, pkt); - else - iwl_mvm_parse_shared_mem(mvm, pkt); - - IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); - - iwl_free_resp(&cmd); -} - static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { @@ -1048,8 +614,8 @@ static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, union acpi_object *data, int data_size) { + union acpi_object *wifi_pkg = NULL; int i; - union acpi_object *wifi_pkg; /* * We need at least two packages, one for the revision and one @@ -1431,6 +997,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; } + +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, + int prof_b) +{ + return -ENOENT; +} + +int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) +{ + return -ENOENT; +} #endif /* CONFIG_ACPI */ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) @@ -1467,7 +1044,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, false); ret = iwl_run_init_mvm_ucode(mvm, false); @@ -1477,9 +1054,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - /* this can't happen */ - if (WARN_ON(ret > 0)) - ret = -ERFKILL; return ret; } @@ -1497,7 +1071,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_init_paging(mvm); + return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) @@ -1518,24 +1092,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - iwl_mvm_get_shared_mem_conf(mvm); + iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg_dest_tlv) - mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; - iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); + mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; + iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; - /* Send phy db control command and then phy db calibration*/ - if (!iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_unified_ucode(mvm)) { + /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; @@ -1551,7 +1125,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Init RSS configuration */ /* TODO - remove a000 disablement when we have RXQ config API */ - if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_has_new_rx_api(mvm) && + mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", @@ -1569,14 +1144,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); - /* Enable DQA-mode if required */ - if (iwl_mvm_is_dqa_supported(mvm)) { - ret = iwl_mvm_send_dqa_cmd(mvm); - if (ret) - goto error; - } else { - IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); - } + ret = iwl_mvm_send_dqa_cmd(mvm); + if (ret) + goto error; /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); @@ -1611,7 +1181,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* TODO: read the budget from BIOS / Platform NVM */ - if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { + + /* + * In case there is no budget from BIOS / Platform NVM the default + * budget should be 2000mW (cooling state 0). + */ + if (iwl_mvm_is_ctdp_supported(mvm)) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); if (ret) @@ -1657,6 +1232,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + iwl_mvm_leds_sync(mvm); + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 3cac4278a5fd..b27269504a62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,26 +68,46 @@ #include "iwl-csr.h" #include "mvm.h" -/* Set led register on */ -static void iwl_mvm_led_enable(struct iwl_mvm *mvm) +static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on) { - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); + struct iwl_led_cmd led_cmd = { + .status = cpu_to_le32(on), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(LONG_GROUP, LEDS_CMD), + .len = { sizeof(led_cmd), }, + .data = { &led_cmd, }, + .flags = CMD_ASYNC, + }; + int err; + + if (!iwl_mvm_firmware_running(mvm)) + return; + + err = iwl_mvm_send_cmd(mvm, &cmd); + + if (err) + IWL_WARN(mvm, "LED command failed: %d\n", err); } -/* Set led register off */ -static void iwl_mvm_led_disable(struct iwl_mvm *mvm) +static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on) { - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) { + iwl_mvm_send_led_fw_cmd(mvm, on); + return; + } + + iwl_write32(mvm->trans, CSR_LED_REG, + on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF); } static void iwl_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); - if (brightness > 0) - iwl_mvm_led_enable(mvm); - else - iwl_mvm_led_disable(mvm); + + iwl_mvm_led_set(mvm, brightness > 0); } int iwl_mvm_leds_init(struct iwl_mvm *mvm) @@ -127,10 +149,24 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) return 0; } +void iwl_mvm_leds_sync(struct iwl_mvm *mvm) +{ + if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) + return; + + /* + * if we control through the register, we're doing it + * even when the firmware isn't up, so no need to sync + */ + if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) + return; + + iwl_mvm_led_set(mvm, mvm->led.brightness > 0); +} + void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { - if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE || - !(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) + if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) return; led_classdev_unregister(&mvm->led); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index dc631b23e189..a2bf530eeae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,7 +72,6 @@ #include "fw-api.h" #include "mvm.h" #include "time-event.h" -#include "fw-dbg.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, @@ -81,6 +80,13 @@ const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_BK, }; +const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_BK, +}; + struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; @@ -235,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); } -static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* Mark the queues used by the sta */ - data->used_hw_queues |= mvmsta->tfd_queue_msk; -} - unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif) { - u8 sta_id; struct iwl_mvm_hw_queues_iface_iterator_data data = { .exclude_vif = exclude_vif, .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue), + BIT(mvm->aux_queue) | + BIT(IWL_MVM_DQA_GCAST_QUEUE), }; - if (iwl_mvm_is_dqa_supported(mvm)) - data.used_hw_queues |= BIT(IWL_MVM_DQA_GCAST_QUEUE); - else - data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE); - lockdep_assert_held(&mvm->mutex); /* mark all VIF used hw queues */ @@ -268,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_iface_hw_queues_iter, &data); - /* - * for DQA, the hw_queue in mac80211 is never really used for - * real traffic (only the few queue IDs covered above), so - * we can reuse the real HW queue IDs the stations use - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return data.used_hw_queues; - - /* don't assign the same hw queues as TDLS stations */ - ieee80211_iterate_stations_atomic(mvm->hw, - iwl_mvm_mac_sta_hw_queues_iter, - &data); - - /* - * Some TDLS stations may be removed but are in the process of being - * drained. Don't touch their queues. - */ - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) - data.used_hw_queues |= mvm->tfd_drained[sta_id]; - return data.used_hw_queues; } @@ -338,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, NUM_TSF_IDS); } -static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { @@ -355,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, int ret, i, queue_limit; unsigned long used_hw_queues; + lockdep_assert_held(&mvm->mutex); + /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. @@ -438,19 +410,14 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * queues in mac80211 almost entirely independent of - * the ones here - no real limit - */ - queue_limit = IEEE80211_MAX_QUEUES; - BUILD_BUG_ON(IEEE80211_MAX_QUEUES > - BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0])); - } else { - /* need to not use too many in this case */ - queue_limit = mvm->first_agg_queue; - } + /* + * queues in mac80211 almost entirely independent of + * the ones here - no real limit + */ + queue_limit = IEEE80211_MAX_QUEUES; + BUILD_BUG_ON(IEEE80211_MAX_QUEUES > + BITS_PER_BYTE * + sizeof(mvm->hw_queue_to_mac80211[0])); /* * Find available queues, and allocate them to the ACs. When in @@ -472,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue; - - if (!iwl_mvm_is_dqa_supported(mvm)) { - queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; - } - } else { - queue = IWL_MVM_DQA_GCAST_QUEUE; - } - /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ - mvmvif->cab_queue = queue; - vif->cab_queue = queue; + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } else { vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } @@ -513,78 +465,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - u32 ac; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); - if (ret) - return ret; - - /* If DQA is supported - queues will be enabled when needed */ - if (iwl_mvm_is_dqa_supported(mvm)) - return 0; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); - break; - case NL80211_IFTYPE_AP: - iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - break; - } - - return 0; -} - -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - int ac; - - lockdep_assert_held(&mvm->mutex); - - /* - * If DQA is supported - queues were already disabled, since in - * DQA-mode the queues are a property of the STA and not of the - * vif, and at this point the STA was already deleted - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MAX_TID_COUNT, 0); - - break; - case NL80211_IFTYPE_AP: - iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - IWL_MAX_TID_COUNT, 0); - } -} - static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, @@ -775,7 +655,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_ac_to_tx_fifo[i]; + u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); cmd->ac[txf].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); @@ -908,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = 0; - int ret, i; + int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | @@ -1049,83 +923,40 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } -static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) +{ + u8 rate; + + if (info->band == NL80211_BAND_5GHZ || vif->p2p) + rate = IWL_FIRST_OFDM_RATE; + else + rate = IWL_FIRST_CCK_RATE; + + return rate; +} + +static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_host_cmd cmd = { - .id = BEACON_TEMPLATE_CMD, - .flags = CMD_ASYNC, - }; - union { - struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; - struct iwl_mac_beacon_cmd_v7 beacon_cmd; - } u = {}; - struct iwl_mac_beacon_cmd beacon_cmd = {}; struct ieee80211_tx_info *info; - u32 beacon_skb_len; - u32 rate, tx_flags; + u8 rate; + u32 tx_flags; - if (WARN_ON(!beacon)) - return -EINVAL; - - beacon_skb_len = beacon->len; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { - u32 csa_offset, ecsa_offset; - - csa_offset = iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon_skb_len); - ecsa_offset = - iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon_skb_len); - - if (iwl_mvm_has_new_tx_api(mvm)) { - beacon_cmd.data.template_id = - cpu_to_le32((u32)mvmvif->id); - beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset); - beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len); - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, - &beacon_cmd.data.tim_idx, - &beacon_cmd.data.tim_size, - beacon->data, - beacon_skb_len); - cmd.len[0] = sizeof(beacon_cmd); - cmd.data[0] = &beacon_cmd; - goto send; - - } else { - u.beacon_cmd.data.ecsa_offset = - cpu_to_le32(ecsa_offset); - u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - cmd.len[0] = sizeof(u.beacon_cmd); - cmd.data[0] = &u; - } - } else { - cmd.len[0] = sizeof(u.beacon_cmd_v6); - cmd.data[0] = &u; - } - - /* TODO: for now the beacon template id is set to be the mac context id. - * Might be better to handle it as another resource ... */ - u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ - u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len); - u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id; - u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx->len = cpu_to_le16((u16)beacon->len); + tx->sta_id = mvmvif->bcast_sta.sta_id; + tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; - u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags); + tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { @@ -1134,37 +965,141 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, mvm->mgmt_last_antenna_idx); } - u.beacon_cmd_v6.tx.rate_n_flags = + tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - if (info->band == NL80211_BAND_5GHZ || vif->p2p) { - rate = IWL_FIRST_OFDM_RATE; - } else { - rate = IWL_FIRST_CCK_RATE; - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(RATE_MCS_CCK_MSK); - } - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); - /* Set up TX beacon command fields */ - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx, - &u.beacon_cmd_v6.tim_size, - beacon->data, - beacon_skb_len); + tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + if (rate == IWL_FIRST_CCK_RATE) + tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); -send: - /* Submit command */ +} + +static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, + struct sk_buff *beacon, + void *data, int len) +{ + struct iwl_host_cmd cmd = { + .id = BEACON_TEMPLATE_CMD, + .flags = CMD_ASYNC, + }; + + cmd.len[0] = len; + cmd.data[0] = data; cmd.dataflags[0] = 0; - cmd.len[1] = beacon_skb_len; + cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } +static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); + struct iwl_mac_beacon_cmd beacon_cmd = {}; + u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + u16 flags; + + flags = iwl_mvm_mac80211_idx_to_hwrate(rate); + + if (rate == IWL_FIRST_CCK_RATE) + flags |= IWL_MAC_BEACON_CCK; + + beacon_cmd.flags = cpu_to_le16(flags); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + if (WARN_ON(!beacon)) + return -EINVAL; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) + return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) + return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); + + return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); +} + /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1559,12 +1494,14 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + trigger)) return; if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ce901be5fba8..3bcaa82f59b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -87,7 +87,6 @@ #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" -#include "fw-dbg.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { @@ -446,8 +445,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); + } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { + /* + * we absolutely need this for the new TX API since that comes + * with many more queues than the current code can deal with + * for station powersave + */ + return -EINVAL; + } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); @@ -455,10 +464,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - if (!iwl_mvm_is_dqa_supported(mvm)) - hw->queues = mvm->first_agg_queue; - else - hw->queues = IEEE80211_MAX_QUEUES; + hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -799,17 +805,16 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, goto drop; } - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; - /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ - if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && - ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control))) + /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ + if ((info->control.vif->type == NL80211_IFTYPE_AP || + info->control.vif->type == NL80211_IFTYPE_ADHOC) && + ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) sta = NULL; if (sta) { @@ -845,11 +850,11 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) return true; } -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ - do { \ - if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void @@ -866,7 +871,8 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; switch (action) { @@ -1029,8 +1035,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) * on D3->D0 transition */ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; - iwl_mvm_fw_error_dump(mvm); + mvm->fwrt.dump.desc = &iwl_dump_desc_assert; + iwl_fw_error_dump(&mvm->fwrt); } /* cleanup all stale references (scan, roc), but keep the @@ -1059,9 +1065,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); - memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -1072,7 +1076,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->vif_count = 0; mvm->rx_ba_sessions = 0; - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); @@ -1255,16 +1259,16 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those - * debugfs files causes the fw_dump_wk to be triggered, and if we + * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - cancel_delayed_work_sync(&mvm->fw_dump_wk); + iwl_fw_cancel_dump(&mvm->fwrt); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); - iwl_mvm_free_fw_dump_desc(mvm); + iwl_fw_free_dump_desc(&mvm->fwrt); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); @@ -1370,17 +1374,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * Only queue for this station is the mcast queue, - * which shouldn't be in TFD mask anyway - */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type, - IWL_STA_MULTICAST); - if (ret) - goto out_release; - } + /* + * Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, + 0, vif->type, + IWL_STA_MULTICAST); + if (ret) + goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; @@ -1426,7 +1428,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unref_phy; - ret = iwl_mvm_add_bcast_sta(mvm, vif); + ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1454,8 +1456,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; - - iwl_mvm_mac_ctxt_release(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); @@ -1467,40 +1467,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); - - if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) { - /* - * mac80211 first removes all the stations of the vif and - * then removes the vif. When it removes a station it also - * flushes the AMPDU session. So by now, all the AMPDU sessions - * of all the stations of this vif are closed, and the queues - * of these AMPDU sessions are properly closed. - * We still need to take care of the shared queues of the vif. - * Flush them here. - * For DQA mode there is no need - broacast and multicast queue - * are flushed separately. - */ - mutex_lock(&mvm->mutex); - iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); - mutex_unlock(&mvm->mutex); - - /* - * There are transports that buffer a few frames in the host. - * For these, the flush above isn't enough since while we were - * flushing, the transport might have sent more frames to the - * device. To solve this, wait here until the transport is - * empty. Technically, this could have replaced the flush - * above, but flush is much faster than draining. So flush - * first, and drain to make sure we have no frames in the - * transport anymore. - * If a station still had frames on the shared queues, it is - * already marked as draining, so to complete the draining, we - * just need to wait until the transport is empty. - */ - iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); - } - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1508,14 +1474,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); - } else { - /* - * By now, all the AC queues are empty. The AGG queues are - * empty too. We already got all the Tx responses for all the - * packets in the queues. The drain work can have been - * triggered. Flush it. - */ - flush_work(&mvm->sta_drained_wk); } } @@ -1556,7 +1514,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, vif); + iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; @@ -1569,7 +1527,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: - iwl_mvm_mac_ctxt_release(mvm, vif); mutex_unlock(&mvm->mutex); } @@ -1589,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; + struct iwl_host_cmd hcmd = { + .id = MCAST_FILTER_CMD, + .flags = CMD_ASYNC, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; int ret, len; /* if we don't have free ports, mcast frames will be dropped */ @@ -1603,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); + hcmd.len[0] = len; + hcmd.data[0] = cmd; + + ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } @@ -1678,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, if (!cmd) goto out; + if (changed_flags & FIF_ALLMULTI) + cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); + + if (cmd->pass_all) + cmd->count = 0; + iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); @@ -2067,8 +2038,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * We received a beacon from the associated AP so * remove the session protection. */ - iwl_mvm_remove_time_event(mvm, mvmvif, - &mvmvif->time_event_data); + iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); @@ -2405,15 +2375,18 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, unsigned long txqs = 0, tids = 0; int tid; + /* + * If we have TVQM then we get too high queue numbers - luckily + * we really shouldn't get here with that because such hardware + * should have firmware supporting buffer station offload. + */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) - continue; - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; @@ -2427,9 +2400,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) - ieee80211_sta_block_awake(hw, sta, true); - for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); @@ -2572,7 +2542,8 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(tdls_trig->action_bitmap & BIT(action))) @@ -2582,9 +2553,9 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "TDLS event occurred, peer %pM, action %d", - peer_addr, action); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "TDLS event occurred, peer %pM, action %d", + peer_addr, action); } static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, @@ -2606,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, * queues, so we should never get a second deferred * frame for the RA/TID. */ - iwl_mvm_start_mac_queues(mvm, info->hw_queue); + iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); ieee80211_free_txskb(mvm->hw, skb); } } @@ -2631,9 +2602,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; - /* if a STA is being removed, reuse its ID */ - flush_work(&mvm->sta_drained_wk); - /* * If we are in a STA removal flow and in DQA mode: * @@ -2648,8 +2616,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST && - iwl_mvm_is_dqa_supported(mvm)) { + new_state == IEEE80211_STA_NOTEXIST) { iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); @@ -3892,7 +3859,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); - iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); + iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -3931,11 +3900,16 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the - * GO/AP arrives. + * GO/AP arrives. In case count <= 1 immediately schedule the + * TE (this might result with some packet loss or connection + * loss). */ - apply_time = chsw->device_timestamp + - ((vif->bss_conf.beacon_int * (chsw->count - 1) - - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + if (chsw->count <= 1) + apply_time = 0; + else + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); @@ -4015,6 +3989,43 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, return ret; } +static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) +{ + if (drop) { + if (iwl_mvm_has_new_tx_api(mvm)) + /* TODO new tx api */ + WARN_ONCE(1, + "Need to implement flush TX queue\n"); + else + iwl_mvm_flush_tx_path(mvm, + iwl_mvm_flushable_queues(mvm) & queues, + 0); + } else { + if (iwl_mvm_has_new_tx_api(mvm)) { + struct ieee80211_sta *sta; + int i; + + mutex_lock(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { + sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + iwl_mvm_wait_sta_queues_empty(mvm, + iwl_mvm_sta_from_mac80211(sta)); + } + + mutex_unlock(&mvm->mutex); + } else { + iwl_trans_wait_tx_queues_empty(mvm->trans, + queues); + } + } +} + static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { @@ -4025,12 +4036,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, int i; u32 msk = 0; - if (!vif || vif->type != NL80211_IFTYPE_STATION) + if (!vif) { + iwl_mvm_flush_no_vif(mvm, queues, drop); + return; + } + + if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ - if (iwl_mvm_is_dqa_supported(mvm)) - flush_work(&mvm->add_stream_wk); + flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -4167,11 +4182,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { -#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ - do { \ - if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt); \ +#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ + do { \ + if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ + break; \ + iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; @@ -4182,7 +4197,8 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (event->u.mlme.data == ASSOC_EVENT) { @@ -4223,16 +4239,17 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR received from %pM, tid %d, ssn %d", - event->u.ba.sta->addr, event->u.ba.tid, - event->u.ba.ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); } static void @@ -4248,15 +4265,16 @@ iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Frame from %pM timed out, tid %d", - event->u.ba.sta->addr, event->u.ba.tid); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, @@ -4290,7 +4308,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* TODO - remove a000 disablement when we have RXQ config API */ - if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) + if (!iwl_mvm_has_new_rx_api(mvm) || + mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) return; notif->cookie = mvm->queue_sync_cookie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ddd8719f27b8..83303bac0e4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -87,6 +87,8 @@ #include "fw-api.h" #include "constants.h" #include "tof.h" +#include "fw/runtime.h" +#include "fw/dbg.h" #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -119,6 +121,9 @@ */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 +/* offchannel queue towards mac80211 */ +#define IWL_MVM_OFFCHANNEL_QUEUE 0 + extern const struct ieee80211_ops iwl_mvm_hw_ops; /** @@ -137,34 +142,6 @@ struct iwl_mvm_mod_params { }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; -/** - * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump - * - * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode - * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the - * transport's data. - * @trans_len: length of the valid data in trans_ptr - * @op_mode_len: length of the valid data in op_mode_ptr - */ -struct iwl_mvm_dump_ptrs { - struct iwl_trans_dump_data *trans_ptr; - void *op_mode_ptr; - u32 op_mode_len; -}; - -/** - * struct iwl_mvm_dump_desc - describes the dump - * @len: length of trig_desc->data - * @trig_desc: the description of the dump - */ -struct iwl_mvm_dump_desc { - size_t len; - /* must be last */ - struct iwl_fw_error_dump_trigger_desc trig_desc; -}; - -extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; - struct iwl_mvm_phy_ctxt { u16 id; u16 color; @@ -606,19 +583,6 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; -#define MAX_NUM_LMAC 2 -struct iwl_mvm_shared_mem_cfg { - int num_lmacs; - int num_txfifo_entries; - struct { - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo1_size; - } lmac[MAX_NUM_LMAC]; - u32 rxfifo2_size; - u32 internal_txfifo_addr; - u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -}; - /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn @@ -766,7 +730,6 @@ struct iwl_mvm { */ struct iwl_mvm_vif *bf_allowed_vif; - enum iwl_ucode_type cur_ucode; bool hw_registered; bool calibrating; u32 error_event_table[2]; @@ -815,10 +778,7 @@ struct iwl_mvm { /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; - /* Paging section */ - struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; - u16 num_of_paging_blk; - u16 num_of_pages_in_last_blk; + struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; @@ -826,11 +786,7 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - struct work_struct sta_drained_wk; unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - atomic_t pending_frames[IWL_MVM_STATION_COUNT]; - u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -847,9 +803,6 @@ struct iwl_mvm { /* max number of simultaneous scans the FW supports */ unsigned int max_scans; - /* ts of the beginning of a non-collect fw dbg data period */ - unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; - /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; @@ -925,10 +878,6 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; - u8 fw_dbg_conf; - struct delayed_work fw_dump_wk; - const struct iwl_mvm_dump_desc *fw_dump_desc; - const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -975,8 +924,6 @@ struct iwl_mvm { struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; - u32 last_ant_isol; - u8 last_corun_lut; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; @@ -1010,9 +957,6 @@ struct iwl_mvm { u16 probe_queue; u16 p2p_dev_queue; - u8 first_agg_queue; - u8 last_agg_queue; - /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ unsigned int max_amsdu_len; /* used for debugfs only */ @@ -1055,7 +999,6 @@ struct iwl_mvm { } peer; } tdls_cs; - struct iwl_mvm_shared_mem_cfg smem_cfg; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; @@ -1095,7 +1038,6 @@ struct iwl_mvm { * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done - * @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running */ enum iwl_mvm_status { @@ -1107,7 +1049,6 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, - IWL_MVM_STATUS_DUMPING_FW_LOG, IWL_MVM_STATUS_FIRMWARE_RUNNING, }; @@ -1180,12 +1121,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } -static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); -} - static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1238,13 +1173,6 @@ static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } -static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && - IWL_MVM_BT_COEX_CORUNNING; -} - static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, @@ -1287,6 +1215,12 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) return mvm->trans->cfg->use_tfh; } +static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) +{ + /* TODO - better define this */ + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000; +} + static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* @@ -1308,6 +1242,12 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_NEW_RX_STATS); } +static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL); +} + static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { @@ -1340,6 +1280,14 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) } extern const u8 iwl_mvm_ac_to_tx_fifo[]; +extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; + +static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, + enum ieee80211_ac_numbers ac) +{ + return iwl_mvm_has_new_tx_api(mvm) ? + iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; +} struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ @@ -1425,8 +1373,7 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); -int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm); +int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm); @@ -1510,7 +1457,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); @@ -1573,9 +1519,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -/* Paging */ -void iwl_free_fw_paging(struct iwl_mvm *mvm); - /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); @@ -1619,6 +1562,7 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); +void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { @@ -1627,6 +1571,9 @@ static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } +static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) +{ +} #endif /* D3 (WoWLAN, NetDetect) */ @@ -1764,10 +1711,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, u8 sta_id, u8 tid, unsigned int timeout); -/* - * Disable a TXQ. - * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. - */ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); @@ -1777,33 +1720,15 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { - u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : - IWL_MVM_CMD_QUEUE; - return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & - ~BIT(cmd_queue)); -} - -static inline -void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 fifo, u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); + ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_free_fw_paging(mvm); + iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - mvm->fw_dbg_conf = FW_DBG_INVALID; + iwl_fw_dump_conf_clear(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); } @@ -1826,6 +1751,7 @@ void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); @@ -1899,21 +1825,7 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, u32 duration, u32 timeout); bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); -#ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); -#else -static inline -int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) -{ - return -ENOENT; -} - -static inline -int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) -{ - return -ENOENT; -} -#endif /* CONFIG_ACPI */ #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index dac7e542a190..422aa6be9932 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -292,7 +292,8 @@ static struct iwl_nvm_data * iwl_parse_nvm_sections(struct iwl_mvm *mvm) { struct iwl_nvm_section *sections = mvm->nvm_sections; - const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; + const __be16 *hw; + const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; /* Checking for required sections */ @@ -326,10 +327,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } } - if (WARN_ON(!mvm->cfg)) - return NULL; - - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; + hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; @@ -546,101 +544,7 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm) -{ - struct iwl_nvm_get_info cmd = {}; - struct iwl_nvm_get_info_rsp *rsp; - struct iwl_trans *trans = mvm->trans; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &cmd, }, - .len = { sizeof(cmd) }, - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) - }; - int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - if (ret) - return ret; - - if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), - "Invalid payload len in NVM response from FW %d", - iwl_rx_packet_payload_len(hcmd.resp_pkt))) { - ret = -EINVAL; - goto out; - } - - rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags)) { - IWL_ERR(mvm, "Invalid NVM data from FW\n"); - ret = -EINVAL; - goto out; - } - - mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) + - sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, GFP_KERNEL); - if (!mvm->nvm_data) { - ret = -ENOMEM; - goto out; - } - - iwl_set_hw_address_from_csr(trans, mvm->nvm_data); - /* TODO: if platform NVM has MAC address - override it here */ - - if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) { - IWL_ERR(trans, "no valid mac address was found\n"); - ret = -EINVAL; - goto err_free; - } - - IWL_INFO(trans, "base HW address: %pM\n", mvm->nvm_data->hw_addr); - - /* Initialize general data */ - mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version); - - /* Initialize MAC sku data */ - mvm->nvm_data->sku_cap_11ac_enable = - le32_to_cpu(rsp->mac_sku.enable_11ac); - mvm->nvm_data->sku_cap_11n_enable = - le32_to_cpu(rsp->mac_sku.enable_11n); - mvm->nvm_data->sku_cap_band_24GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_24g); - mvm->nvm_data->sku_cap_band_52GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_5g); - mvm->nvm_data->sku_cap_mimo_disabled = - le32_to_cpu(rsp->mac_sku.mimo_disable); - - /* Initialize PHY sku data */ - mvm->nvm_data->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); - mvm->nvm_data->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - - /* Initialize regulatory data */ - mvm->nvm_data->lar_enabled = - le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; - - iwl_init_sbands(trans->dev, trans->cfg, mvm->nvm_data, - rsp->regulatory.channel_profile, - mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant, - mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant, - rsp->regulatory.lar_enabled && lar_fw_supported); - - iwl_free_resp(&hcmd); - return 0; - -err_free: - kfree(mvm->nvm_data); -out: - iwl_free_resp(&hcmd); - return ret; -} - -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) +int iwl_nvm_init(struct iwl_mvm *mvm) { int ret, section; u32 size_read = 0; @@ -651,63 +555,61 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) return -EINVAL; /* load NVM values from nic */ - if (read_nvm_from_nic) { - /* Read From FW NVM */ - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); + /* Read From FW NVM */ + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); - nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, - GFP_KERNEL); - if (!nvm_buffer) - return -ENOMEM; - for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { - /* we override the constness for initial read */ - ret = iwl_nvm_read_section(mvm, section, nvm_buffer, - size_read); - if (ret < 0) - continue; - size_read += ret; - temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } + nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, + GFP_KERNEL); + if (!nvm_buffer) + return -ENOMEM; + for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { + /* we override the constness for initial read */ + ret = iwl_nvm_read_section(mvm, section, nvm_buffer, + size_read); + if (ret < 0) + continue; + size_read += ret; + temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } - iwl_mvm_nvm_fixups(mvm, section, temp, ret); + iwl_mvm_nvm_fixups(mvm, section, temp, ret); - mvm->nvm_sections[section].data = temp; - mvm->nvm_sections[section].length = ret; + mvm->nvm_sections[section].data = temp; + mvm->nvm_sections[section].length = ret; #ifdef CONFIG_IWLWIFI_DEBUGFS - switch (section) { - case NVM_SECTION_TYPE_SW: - mvm->nvm_sw_blob.data = temp; - mvm->nvm_sw_blob.size = ret; + switch (section) { + case NVM_SECTION_TYPE_SW: + mvm->nvm_sw_blob.data = temp; + mvm->nvm_sw_blob.size = ret; + break; + case NVM_SECTION_TYPE_CALIBRATION: + mvm->nvm_calib_blob.data = temp; + mvm->nvm_calib_blob.size = ret; + break; + case NVM_SECTION_TYPE_PRODUCTION: + mvm->nvm_prod_blob.data = temp; + mvm->nvm_prod_blob.size = ret; + break; + case NVM_SECTION_TYPE_PHY_SKU: + mvm->nvm_phy_sku_blob.data = temp; + mvm->nvm_phy_sku_blob.size = ret; + break; + default: + if (section == mvm->cfg->nvm_hw_section_num) { + mvm->nvm_hw_blob.data = temp; + mvm->nvm_hw_blob.size = ret; break; - case NVM_SECTION_TYPE_CALIBRATION: - mvm->nvm_calib_blob.data = temp; - mvm->nvm_calib_blob.size = ret; - break; - case NVM_SECTION_TYPE_PRODUCTION: - mvm->nvm_prod_blob.data = temp; - mvm->nvm_prod_blob.size = ret; - break; - case NVM_SECTION_TYPE_PHY_SKU: - mvm->nvm_phy_sku_blob.data = temp; - mvm->nvm_phy_sku_blob.size = ret; - break; - default: - if (section == mvm->cfg->nvm_hw_section_num) { - mvm->nvm_hw_blob.data = temp; - mvm->nvm_hw_blob.size = ret; - break; - } } -#endif } - if (!size_read) - IWL_ERR(mvm, "OTP is blank\n"); - kfree(nvm_buffer); +#endif } + if (!size_read) + IWL_ERR(mvm, "OTP is blank\n"); + kfree(nvm_buffer); /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9c175d5e9d67..231878969332 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -82,11 +82,10 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "time-event.h" -#include "fw-dbg.h" #include "fw-api.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -257,8 +256,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), - RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, - iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC), @@ -326,7 +323,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(DBG_CFG), - HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION), HCMD_NAME(SCAN_CFG_CMD), HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), @@ -351,13 +347,13 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(TIME_QUOTA_CMD), HCMD_NAME(NON_QOS_TX_COUNTER_CMD), + HCMD_NAME(LEDS_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), HCMD_NAME(HOT_SPOT_CMD), HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), - HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), HCMD_NAME(PHY_CONFIGURATION_CMD), @@ -388,6 +384,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), HCMD_NAME(MCC_CHUB_UPDATE_CMD), @@ -510,8 +507,6 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) return 0; } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); - static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = @@ -535,6 +530,34 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) mutex_unlock(&mvm->mutex); } +static int iwl_mvm_fwrt_dump_start(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + int ret; + + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + + return 0; +} + +static void iwl_mvm_fwrt_dump_end(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); +} + +static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { + .dump_start = iwl_mvm_fwrt_dump_start, + .dump_end = iwl_mvm_fwrt_dump_end, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -580,6 +603,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; + iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm); + mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { @@ -596,32 +621,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; - if (!iwl_mvm_is_dqa_supported(mvm)) { - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - if (mvm->cfg->base_params->num_of_queues == 16) { - mvm->aux_queue = 11; - mvm->first_agg_queue = 12; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 12); - } else { - mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 16); - } - } else { - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; - mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; - } mvm->sf_state = SF_UNINIT; - if (iwl_mvm_has_new_tx_api(mvm)) - mvm->cur_ucode = IWL_UCODE_REGULAR; + if (iwl_mvm_has_unified_ucode(mvm)) + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else - mvm->cur_ucode = IWL_UCODE_INIT; + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); @@ -635,9 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); - INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); - INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -688,10 +694,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - if (iwl_mvm_is_dqa_supported(mvm)) - trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; - else - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; @@ -749,7 +752,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); - /* returns 0 if successful, 1 if success but in rfkill */ if (err < 0) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); goto out_free; @@ -800,7 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: - flush_delayed_work(&mvm->fw_dump_wk); + iwl_fw_flush_dump(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; @@ -920,7 +922,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { @@ -932,9 +934,9 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x.%02x received", - pkt->hdr.group_id, pkt->hdr.cmd); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); break; } } @@ -980,8 +982,10 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); - break; + return; } + + iwl_fwrt_handle_notification(&mvm->fwrt, rxb); } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, @@ -1131,7 +1135,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ - return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); + return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) @@ -1160,57 +1164,6 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) -{ - struct iwl_mvm *mvm = - container_of(work, struct iwl_mvm, fw_dump_wk.work); - - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) - return; - - mutex_lock(&mvm->mutex); - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - /* stop recording */ - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x100); - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x1); - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1); - } - } else { - u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); - u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); - - /* stop recording */ - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - udelay(100); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); - /* wait before we collect the data till the DBGC stop */ - udelay(500); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); - } - } - - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); -} - void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); @@ -1234,7 +1187,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, NULL); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1260,7 +1213,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1439,7 +1392,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); @@ -1665,7 +1618,7 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; mutex_lock(&mvm->d0i3_suspend_mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index fb9eaf003ea5..7ee8e9077baf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -251,7 +251,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY; + enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index e684811f8e8b..c11fe2621d51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,7 +75,7 @@ #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" -#include "fw-api-power.h" +#include "fw/api/power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 @@ -186,7 +186,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, if (!mvmvif->queue_params[ac].uapsd) continue; - if (mvm->cur_ucode != IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); @@ -220,14 +220,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); - cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? - cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : - cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); + cmd->snooze_window = + (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : + cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); @@ -502,7 +503,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_power_cmd cmd = {}; iwl_mvm_power_build_cmd(mvm, vif, &cmd, - mvm->cur_ucode != IWL_UCODE_WOWLAN); + mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN); iwl_mvm_power_log(mvm, &cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); @@ -525,8 +526,8 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS - if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : - mvm->disable_power_off) + if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + mvm->disable_power_off_d3 : mvm->disable_power_off) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif @@ -933,7 +934,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, if (!mvmvif->bf_data.bf_enabled) return 0; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 8999a1199d60..0fe723ca844e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -622,7 +622,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + + /* start BA session until the peer sends del BA */ + ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EAGAIN) { /* * driver and mac80211 is out of sync @@ -636,15 +638,32 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, return ret; } -static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid, - struct iwl_lq_sta *lq_data, +static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + u8 tid, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta) { - if (tid < IWL_MAX_TID_COUNT) - rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta); - else + struct iwl_mvm_tid_data *tid_data; + + /* + * In AP mode, tid can be equal to IWL_MAX_TID_COUNT + * when the frame is not QoS + */ + if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) { IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", tid, IWL_MAX_TID_COUNT); + return; + } else if (tid == IWL_MAX_TID_COUNT) { + return; + } + + tid_data = &mvmsta->tid_data[tid]; + if ((tid_data->state == IWL_AGG_OFF) && + (lq_sta->tx_agg_tid_en & BIT(tid)) && + (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); + if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) + tid_data->state = IWL_AGG_QUEUED; + } } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) @@ -753,8 +772,38 @@ static int rs_collect_tpc_data(struct iwl_mvm *mvm, window); } +static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + u8 tid, int successes) +{ + struct iwl_mvm_tid_data *tid_data; + + if (tid >= IWL_MAX_TID_COUNT) + return; + + tid_data = &mvmsta->tid_data[tid]; + + /* + * Measure if there're enough successful transmits per second. + * These statistics are used only to decide if we can start a + * BA session, so it should be updated only when A-MPDU is + * off. + */ + if (tid_data->state != IWL_AGG_OFF) + return; + + if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) || + (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + tid_data->tx_count_last = tid_data->tx_count; + tid_data->tx_count = 0; + tid_data->tpt_meas_start = jiffies; + } else { + tid_data->tx_count += successes; + } +} + static int rs_collect_tlc_data(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, + struct iwl_mvm_sta *mvmsta, u8 tid, struct iwl_scale_tbl_info *tbl, int scale_index, int attempts, int successes) { @@ -764,12 +813,14 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm, return -EINVAL; if (tbl->column != RS_COLUMN_INVALID) { - struct lq_sta_pers *pers = &lq_sta->pers; + struct lq_sta_pers *pers = &mvmsta->lq_sta.pers; pers->tx_stats[tbl->column][scale_index].total += attempts; pers->tx_stats[tbl->column][scale_index].success += successes; } + rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes); + /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]); return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, @@ -1211,12 +1262,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (time_after(jiffies, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { - int t; - IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - for (t = 0; t < IWL_MAX_TID_COUNT; t++) - ieee80211_stop_tx_ba_session(sta, t); - iwl_mvm_rs_rate_init(mvm, sta, info->band, false); return; } @@ -1312,7 +1358,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; - rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, + rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index, info->status.ampdu_len, info->status.ampdu_ack_len); @@ -1351,7 +1397,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, tx_resp_rate.index, 1, i < retries ? 0 : legacy_success, reduced_txp); - rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, + rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl, tx_resp_rate.index, 1, i < retries ? 0 : legacy_success); } @@ -1673,14 +1719,14 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, enum rs_action scale_action) { - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl->rate.index < IWL_RATE_MCS_5_INDEX || scale_action == RS_ACTION_DOWNSCALE) - sta_priv->tlc_amsdu = false; + mvmsta->tlc_amsdu = false; else - sta_priv->tlc_amsdu = true; + mvmsta->tlc_amsdu = true; } /* @@ -2228,11 +2274,10 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, u16 high_low; s32 sr; u8 prev_agg = lq_sta->is_agg; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct rs_rate *rate; - lq_sta->is_agg = !!sta_priv->agg_tids; + lq_sta->is_agg = !!mvmsta->agg_tids; /* * Select rate-scale / modulation-mode table to work with in @@ -2480,44 +2525,12 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, } } - if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ - tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(&tbl1->rate)) { - IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); + if (!ndp) + rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta); - if (tid != IWL_MAX_TID_COUNT) { - tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state != IWL_AGG_OFF) { - IWL_DEBUG_RATE(mvm, - "Stop aggregation on tid %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - } - rs_set_stay_in_table(mvm, 1, lq_sta); - } else { - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - (tid != IWL_MAX_TID_COUNT)) { - tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state == IWL_AGG_OFF && !ndp) { - IWL_DEBUG_RATE(mvm, - "try to aggregate tid %d\n", - tid); - rs_tl_turn_on_agg(mvm, tid, - lq_sta, sta); - } - } - rs_set_stay_in_table(mvm, 0, lq_sta); - } + if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta); } } @@ -2900,10 +2913,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, gfp_t gfp) { - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); @@ -2917,7 +2930,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; - return &sta_priv->lq_sta; + return &mvmsta->lq_sta; } static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, @@ -3109,8 +3122,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hw *hw = mvm->hw; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; struct ieee80211_supported_band *sband; unsigned long supp; /* must be unsigned long for for_each_set_bit */ @@ -3119,8 +3132,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; - lq_sta->lq.sta_id = sta_priv->sta_id; - sta_priv->tlc_amsdu = false; + lq_sta->lq.sta_id = mvmsta->sta_id; + mvmsta->tlc_amsdu = false; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3130,7 +3143,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, IWL_DEBUG_RATE(mvm, "LQ: *** rate scale station global init for station %d ***\n", - sta_priv->sta_id); + mvmsta->sta_id); /* TODO: what is a good starting rate for STA? About middle? Maybe not * the lowest or the highest rate.. Could consider using RSSI from * previous packets? Need to have IEEE 802.1X auth succeed immediately diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 622d543abb70..184c749766f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -67,7 +67,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler @@ -397,10 +396,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) @@ -624,7 +625,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; trig_offset = le32_to_cpu(trig_stats->stop_offset); @@ -636,7 +637,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 71c8b800ffa9..77f77bc5d083 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -63,7 +63,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) @@ -673,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into * the reorder buffer, in which case we just release up to it and the - * rest of the function will take of storing it and releasing up to the - * nssn + * rest of the function will take care of storing it and releasing up to + * the nssn */ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, - buffer->buf_size)) { + buffer->buf_size) || + !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); @@ -854,7 +854,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); - if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { + if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { @@ -908,10 +908,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 35e813bdfbe5..774122fed454 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -69,7 +69,7 @@ #include #include "mvm.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 @@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_ABORT_CMD, }; - u32 status; + u32 status = CAN_ABORT_STATUS; ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); if (ret) @@ -743,7 +743,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, * 4. it's not a p2p find operation. */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && - mvm->last_ebs_successful && + mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 027ee5e72172..c4a343534c5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -297,60 +297,6 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data) rcu_read_unlock(); } -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - unsigned long used_hw_queues; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, true, false); - u32 ac; - - lockdep_assert_held(&mvm->mutex); - - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); - - /* Find available queues, and allocate them to the ACs */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate STA queue\n"); - return -EBUSY; - } - - __set_bit(queue, &used_hw_queues); - mvmsta->hw_queue[ac] = queue; - } - - /* Found a place for all queues - enable them */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], - mvmsta->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); - } - - return 0; -} - -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned long sta_msk; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* disable the TDLS STA-specific queues */ - sta_msk = mvmsta->tfd_queue_msk; - for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); -} - /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, @@ -758,7 +704,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { - .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, @@ -1316,7 +1262,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; - cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); @@ -1330,8 +1276,50 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } +} - atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); +static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, + u16 mac_id, u16 color) +{ + struct iwl_mvm_add_sta_cmd cmd; + int ret; + u32 status = ADD_STA_SUCCESS; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + cmd.sta_id = sta->sta_id; + cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, + color)); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + cmd.station_type = sta->type; + + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(0xffff); + + if (addr) + memcpy(cmd.addr, addr, ETH_ALEN); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); + if (ret) + return ret; + + switch (status & IWL_ADD_STA_STATUS_MASK) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Internal station added.\n"); + return 0; + default: + ret = -EIO; + IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", + status); + break; + } + return ret; } int iwl_mvm_add_sta(struct iwl_mvm *mvm, @@ -1342,6 +1330,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; + bool sta_update = false; + unsigned int sta_flags = 0; lockdep_assert_held(&mvm->mutex); @@ -1356,10 +1346,25 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, spin_lock_init(&mvm_sta->lock); - /* In DQA mode, if this is a HW restart, re-alloc existing queues */ - if (iwl_mvm_is_dqa_supported(mvm) && - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* if this is a HW restart re-alloc existing queues */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_int_sta tmp_sta = { + .sta_id = sta_id, + .type = mvm_sta->sta_type, + }; + + /* + * First add an empty station since allocating + * a queue requires a valid station + */ + ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, + mvmvif->id, mvmvif->color); + if (ret) + goto err; + iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); + sta_update = true; + sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; } @@ -1376,33 +1381,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; - /* - * Allocate new queues for a TDLS station, unless we're in DQA mode, - * and then they'll be allocated dynamically - */ - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { - ret = iwl_mvm_tdls_sta_init(mvm, sta); - if (ret) - return ret; - } else if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; - if (!iwl_mvm_is_dqa_supported(mvm)) - continue; - /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated @@ -1436,7 +1423,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) @@ -1444,7 +1431,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, } update_fw: - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); + ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; @@ -1462,8 +1449,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, return 0; err: - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); return ret; } @@ -1536,79 +1521,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) return 0; } -void iwl_mvm_sta_drained_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); - u8 sta_id; - - /* - * The mutex is needed because of the SYNC cmd, but not only: if the - * work would run concurrently with iwl_mvm_rm_sta, it would run before - * iwl_mvm_rm_sta sets the station as busy, and exit. Then - * iwl_mvm_rm_sta would set the station as busy, and nobody will clean - * that later. - */ - mutex_lock(&mvm->mutex); - - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { - int ret; - struct ieee80211_sta *sta = - rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * This station is in use or RCU-removed; the latter happens in - * managed mode, where mac80211 removes the station before we - * can remove it from firmware (we can only do that after the - * MAC is marked unassociated), and possibly while the deauth - * frame to disconnect from the AP is still queued. Then, the - * station pointer is -ENOENT when the last skb is reclaimed. - */ - if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) - continue; - - if (PTR_ERR(sta) == -EINVAL) { - IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", - sta_id); - continue; - } - - if (!sta) { - IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", - sta_id); - continue; - } - - WARN_ON(PTR_ERR(sta) != -EBUSY); - /* This station was removed and we waited until it got drained, - * we can now proceed and remove it. - */ - ret = iwl_mvm_rm_sta_common(mvm, sta_id); - if (ret) { - IWL_ERR(mvm, - "Couldn't remove sta %d after it was drained\n", - sta_id); - continue; - } - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); - clear_bit(sta_id, mvm->sta_drained); - - if (mvm->tfd_drained[sta_id]) { - unsigned long i, msk = mvm->tfd_drained[sta_id]; - - for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, - IWL_MAX_TID_COUNT, 0); - - mvm->tfd_drained[sta_id] = 0; - IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", - sta_id, msk); - } - } - - mutex_unlock(&mvm->mutex); -} - static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) @@ -1632,10 +1544,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { - int i, ret; + int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; + int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; @@ -1646,10 +1559,10 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) - break; + return ret; } - return ret; + return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, @@ -1666,79 +1579,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); - if ((vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) || - iwl_mvm_is_dqa_supported(mvm)){ - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); - if (ret) - return ret; - if (iwl_mvm_has_new_tx_api(mvm)) { - ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); - } else { - u32 q_mask = mvm_sta->tfd_queue_msk; + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - q_mask); - } - if (ret) - return ret; - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); + if (ret) + return ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + } else { + u32 q_mask = mvm_sta->tfd_queue_msk; - /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) { - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); - /* - * If pending_frames is set at this point - it must be - * driver internal logic error, since queues are empty - * and removed successuly. - * warn on it but set it to 0 anyway to avoid station - * not being removed later in the function - */ - WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + q_mask); + } + if (ret) + return ret; - /* If there is a TXQ still marked as reserved - free it */ - if (iwl_mvm_is_dqa_supported(mvm) && - mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { - u8 reserved_txq = mvm_sta->reserved_queue; - enum iwl_mvm_queue_status *status; + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - /* - * If no traffic has gone through the reserved TXQ - it - * is still marked as IWL_MVM_QUEUE_RESERVED, and - * should be manually marked as free again - */ - spin_lock_bh(&mvm->queue_info_lock); - status = &mvm->queue_info[reserved_txq].status; - if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && - (*status != IWL_MVM_QUEUE_FREE), - "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) { - spin_unlock_bh(&mvm->queue_info_lock); - return -EINVAL; - } + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); - *status = IWL_MVM_QUEUE_FREE; + /* If there is a TXQ still marked as reserved - free it */ + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; + + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + spin_lock_bh(&mvm->queue_info_lock); + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + sta_id, reserved_txq, *status)) { spin_unlock_bh(&mvm->queue_info_lock); + return -EINVAL; } - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) { - /* if associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + *status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); + } - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == sta_id) { + /* if associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - } + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } /* @@ -1755,32 +1654,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); + spin_unlock_bh(&mvm_sta->lock); - /* - * There are frames pending on the AC queues for this station. - * We need to wait until all the frames are drained... - */ - if (atomic_read(&mvm->pending_frames[sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], - ERR_PTR(-EBUSY)); - spin_unlock_bh(&mvm_sta->lock); - - /* disable TDLS sta queues on drain complete */ - if (sta->tdls) { - mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); - } - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - } else { - spin_unlock_bh(&mvm_sta->lock); - - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); - - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); - } + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } @@ -1823,50 +1700,6 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta, - const u8 *addr, - u16 mac_id, u16 color) -{ - struct iwl_mvm_add_sta_cmd cmd; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - memset(&cmd, 0, sizeof(cmd)); - cmd.sta_id = sta->sta_id; - cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, - color)); - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - cmd.station_type = sta->type; - - if (!iwl_mvm_has_new_tx_api(mvm)) - cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); - cmd.tid_disable_tx = cpu_to_le16(0xffff); - - if (addr) - memcpy(cmd.addr, addr, ETH_ALEN); - - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, - iwl_mvm_add_sta_cmd_size(mvm), - &cmd, &status); - if (ret) - return ret; - - switch (status & IWL_ADD_STA_STATUS_MASK) { - case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "Internal station added.\n"); - return 0; - default: - ret = -EIO; - IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", - status); - break; - } - return ret; -} - static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? @@ -1879,7 +1712,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) IWL_MAX_TID_COUNT, wdg_timeout); mvm->aux_queue = queue; - } else if (iwl_mvm_is_dqa_supported(mvm)) { + } else { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvm->aux_sta.sta_id, @@ -1890,9 +1723,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, wdg_timeout); - } else { - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); } } @@ -1992,7 +1822,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) queue = mvm->probe_queue; @@ -2079,8 +1909,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_free_bcast_sta_queues(mvm, vif); + iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) @@ -2091,23 +1920,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) { - qmask = iwl_mvm_mac_get_queues_mask(vif); - - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. This only happens in NL80211_IFTYPE_AP vif type, - * so the next line will only have an effect there. - */ - qmask &= ~BIT(vif->cab_queue); - } - - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } @@ -2119,7 +1935,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; @@ -2150,7 +1966,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; @@ -2189,9 +2005,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; @@ -2256,9 +2069,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, @@ -2508,8 +2318,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ - if (!iwl_mvm_is_dqa_supported(mvm)) - mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -2577,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { - IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", + if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && + mvmsta->tid_data[tid].state != IWL_AGG_OFF) { + IWL_ERR(mvm, + "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", mvmsta->tid_data[tid].state); return -ENXIO; } @@ -2612,19 +2422,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = -ENXIO; goto release_locks; } - } else if (iwl_mvm_is_dqa_supported(mvm) && - unlikely(mvm->queue_info[txq_id].status == + } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto release_locks; - } else if (!iwl_mvm_is_dqa_supported(mvm) || - mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, - mvm->first_agg_queue, - mvm->last_agg_queue); + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); if (txq_id < 0) { ret = txq_id; IWL_ERR(mvm, "Failed to allocate agg queue\n"); @@ -2742,37 +2550,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, queue_status = mvm->queue_info[queue].status; spin_unlock_bh(&mvm->queue_info_lock); - /* In DQA mode, the existing queue might need to be reconfigured */ - if (iwl_mvm_is_dqa_supported(mvm)) { - /* Maybe there is no need to even alloc a queue... */ - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) - alloc_queue = false; + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { /* - * Only reconfig the SCD for the queue if the window size has - * changed from current (become smaller) + * If reconfiguring an existing queue, it first must be + * drained */ - if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { - /* - * If reconfiguring an existing queue, it first must be - * drained - */ - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - BIT(queue)); - if (ret) { - IWL_ERR(mvm, - "Error draining queue before reconfig\n"); - return ret; - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } - ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, - mvmsta->sta_id, tid, - buf_size, ssn); - if (ret) { - IWL_ERR(mvm, - "Error reconfiguring TXQ #%d\n", queue); - return ret; - } + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; } } @@ -2868,18 +2673,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); - /* - * There are still packets for this RA / TID in the HW. - * Not relevant for DQA mode, since there is no need to disable - * the queue. - */ - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->ssn != tid_data->next_reclaimed) { - tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; - err = 0; - break; - } - tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); @@ -2887,12 +2680,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); - } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -2962,13 +2749,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, - tid, 0); - } } return 0; @@ -3587,15 +3367,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, u16 n_queued; tid_data = &mvmsta->tid_data[tid]; - if (WARN(!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, - "TID %d state is %d\n", - tid, tid_data->state)) { - spin_unlock_bh(&mvmsta->lock); - ieee80211_sta_eosp(sta); - return; - } n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { @@ -3689,13 +3460,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, mvm_sta->disable_tx = disable; - /* - * Tell mac80211 to start/stop queuing tx for this station, - * but don't stop queuing if there are still pending frames - * for this station. - */ - if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) - ieee80211_sta_block_awake(mvm->hw, sta, disable); + /* Tell mac80211 to start/stop queuing tx for this station */ + ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 05fecbe87da4..aedabe101cf0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -222,16 +222,7 @@ struct iwl_mvm_vif; * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. - * This is useful when a client (if we are IBSS / GO or AP) disassociates. In - * that case, we need to drain all the frames for that client from the AC queues - * that are shared with the other clients. Only then, we can remove the STA in - * the fw. In order to do so, we track the non-AMPDU packets for each station. - * If mac80211 removes a STA and if it still has non-AMPDU packets pending in - * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all - * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped - * (we know about it with its Tx response), we remove the station in fw and set - * it as %NULL in %fw_id_to_mac_id: this is the purpose of - * %iwl_mvm_sta_drained_wk. + * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** @@ -290,6 +281,7 @@ struct iwl_mvm_vif; * These states relate to a specific RA / TID. * * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_QUEUED: aggregation start work has been queued * @IWL_AGG_STARTING: aggregation are starting (between start and oper) * @IWL_AGG_ON: aggregation session is up * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the @@ -299,6 +291,7 @@ struct iwl_mvm_vif; */ enum iwl_mvm_agg_state { IWL_AGG_OFF = 0, + IWL_AGG_QUEUED, IWL_AGG_STARTING, IWL_AGG_ON, IWL_EMPTYING_HW_QUEUE_ADDBA, @@ -325,6 +318,10 @@ enum iwl_mvm_agg_state { * @is_tid_active: has this TID sent traffic in the last * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this * field should be ignored. + * @tpt_meas_start: time of the throughput measurements start, is reset every HZ + * @tx_count_last: number of frames transmitted during the last second + * @tx_count: counts the number of frames transmitted since the last reset of + * tpt_meas_start */ struct iwl_mvm_tid_data { struct sk_buff_head deferred_tx_frames; @@ -339,6 +336,9 @@ struct iwl_mvm_tid_data { u16 ssn; u16 tx_time; bool is_tid_active; + unsigned long tpt_meas_start; + u32 tx_count_last; + u32 tx_count; }; struct iwl_mvm_key_pn { @@ -371,7 +371,6 @@ struct iwl_mvm_rxq_dup_data { * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station - * @hw_queue: per-AC mapping of the TFD queues used by station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. @@ -409,7 +408,6 @@ struct iwl_mvm_rxq_dup_data { struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; - u8 hw_queue[IEEE80211_NUM_ACS]; u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; @@ -533,9 +531,9 @@ void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, @@ -548,7 +546,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); -void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 5a682722adce..4d0314912e94 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,7 +75,6 @@ #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" -#include "fw-dbg.h" /* * For the high priority TE use a time event type that has similar priority to @@ -130,10 +131,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * issue as it will have to complete before the next command is * executed, and a new time event means a new command. */ - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); - else - iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC); + iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -248,7 +246,9 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + trig)) return; for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { @@ -263,11 +263,11 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Time event %d Action 0x%x received status: %d", - te_data->id, - le32_to_cpu(notif->action), - le32_to_cpu(notif->status)); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Time event %d Action 0x%x received status: %d", + te_data->id, + le32_to_cpu(notif->action), + le32_to_cpu(notif->status)); break; } } @@ -728,8 +728,21 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + u32 id; lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->time_event_lock); + id = te_data->id; + spin_unlock_bh(&mvm->time_event_lock); + + if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { + IWL_DEBUG_TE(mvm, + "don't remove TE with id=%u (not session protection)\n", + id); + return; + } + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } @@ -861,8 +874,23 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (te_data->running) { - IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); - return -EBUSY; + u32 id; + + spin_lock_bh(&mvm->time_event_lock); + id = te_data->id; + spin_unlock_bh(&mvm->time_event_lock); + + if (id == TE_CHANNEL_SWITCH_PERIOD) { + IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); + return -EBUSY; + } + + /* + * Remove the session protection time event to allow the + * channel switch. If we got here, we just heard a beacon so + * the session protection is not needed anymore anyway. + */ + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index 634175b2480c..2d0b8a391308 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -61,7 +61,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h index 8c3421c9991d..2ff560aa1a82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -63,7 +63,7 @@ #ifndef __tof_h__ #define __tof_h__ -#include "fw-api-tof.h" +#include "fw/api/tof.h" struct iwl_mvm_tof_data { struct iwl_tof_config_cmd tof_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 453a785a3ea5..4d907f60bce9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -71,7 +71,7 @@ #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ -static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) +void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; u32 duration = tt->params.ct_kill_duration; @@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) lockdep_assert_held(&mvm->mutex); + status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), sizeof(cmd), &cmd, &status); @@ -629,7 +630,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -680,7 +681,7 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -795,7 +796,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto unlock; } @@ -813,7 +814,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, return ret; } -static struct thermal_cooling_device_ops tcooling_ops = { +static const struct thermal_cooling_device_ops tcooling_ops = { .get_max_state = iwl_mvm_tcool_get_max_state, .get_cur_state = iwl_mvm_tcool_get_cur_state, .set_cur_state = iwl_mvm_tcool_set_cur_state, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 5fcc9dd6be56..6f2e2af23219 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -74,7 +74,6 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" -#include "fw-dbg.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, @@ -89,15 +88,15 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR sent to %pM, tid %d, ssn %d", - addr, tid, ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ @@ -559,17 +558,14 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif; - if (!iwl_mvm_is_dqa_supported(mvm)) - return info->hw_queue; - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: /* - * Handle legacy hostapd as well, where station will be added - * only just before sending the association response. + * Non-bufferable frames use the broadcast station, thus they + * use the probe queue. * Also take care of the case where we send a deauth to a * station that we don't have, or similarly an association * response (with non-success status) for a station we can't @@ -577,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, * Also, disassociate frames might happen, particular with * reason 7 ("Class 3 frame received from nonassociated STA"). */ - if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || - ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) || - ieee80211_is_disassoc(fc)) + if (ieee80211_is_mgmt(fc) && + (!ieee80211_is_bufferable_mmpdu(fc) || + ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; if (info->hw_queue == info->control.vif->cab_queue) return mvmvif->cab_queue; @@ -660,8 +656,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; - } else if (iwl_mvm_is_dqa_supported(mvm) && - info.control.vif->type == NL80211_IFTYPE_MONITOR) { + } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->aux_queue; } } @@ -680,17 +675,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } - /* - * Increase the pending frames counter, so that later when a reply comes - * in and the counter is decreased - we don't start getting negative - * values. - * Note that we don't need to make sure it isn't agg'd, since we're - * TXing non-sta - * For DQA mode - we shouldn't increase it though - */ - if (!iwl_mvm_is_dqa_supported(mvm)) - atomic_inc(&mvm->pending_frames[sta_id]); - return 0; } @@ -758,7 +742,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = sta->max_amsdu_len; /* the Tx FIFO to which this A-MSDU will be routed */ - txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); /* * Don't send an AMSDU that will be longer than the TXF. @@ -767,7 +751,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * fifo to be able to send bursts. */ max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256); + mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] - + 256); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -1000,22 +985,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, } } - if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) - txq_id = mvmsta->tid_data[tid].txq_id; - - if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { - /* default to TID 0 for non-QoS packets */ - u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; - - txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; - } + txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || - !mvmsta->tid_data[tid].is_tid_active) && - iwl_mvm_is_dqa_supported(mvm)) { + !mvmsta->tid_data[tid].is_tid_active)) { /* If TXQ needs to be allocated... */ if (txq_id == IWL_MVM_INVALID_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); @@ -1042,7 +1018,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; @@ -1076,10 +1052,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - /* Increase pending frames count if this isn't AMPDU or DQA queue */ - if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) - atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); - return 0; drop_unlock_sta: @@ -1148,8 +1120,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || - iwl_mvm_is_dqa_supported(mvm)) && + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell @@ -1183,13 +1154,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - if (!iwl_mvm_is_dqa_supported(mvm)) { - u8 mac80211_ac = tid_to_mac80211_ac[tid]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[mac80211_ac], tid, - CMD_ASYNC); - } tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1301,7 +1265,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { @@ -1312,9 +1276,9 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Tx status %d was received", - status & TX_STATUS_MSK); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); break; } } @@ -1373,6 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool flushed = false; skb_freed++; @@ -1386,11 +1351,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; + case TX_STATUS_FAIL_FIFO_FLUSHED: + case TX_STATUS_FAIL_DRAIN_FLOW: + flushed = true; + break; case TX_STATUS_FAIL_DEST_PS: - /* In DQA, the FW should have stopped the queue and not + /* the FW should have stopped the queue and not * return this status */ - WARN_ON(iwl_mvm_is_dqa_supported(mvm)); + WARN_ON(1); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1408,7 +1377,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && - !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) + !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; @@ -1446,26 +1415,21 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ieee80211_tx_status(mvm->hw, skb); } - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { - /* If this is an aggregation queue, we use the ssn since: - * ssn = wifi seq_num % 256. - * The seq_ctl is the sequence control of the packet to which - * this Tx response relates. But if there is a hole in the - * bitmap of the BA we received, this Tx response may allow to - * reclaim the hole and all the subsequent packets that were - * already acked. In that case, seq_ctl != ssn, and the next - * packet to be reclaimed will be ssn and not seq_ctl. In that - * case, several packets will be reclaimed even if - * frame_count = 1. - * - * The ssn is the index (% 256) of the latest packet that has - * treated (acked / dropped) + 1. - */ - next_reclaimed = ssn; - } else { - /* The next packet to be reclaimed is the one after this one */ - next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); - } + /* This is an aggregation queue or might become one, so we use + * the ssn since: ssn = wifi seq_num % 256. + * The seq_ctl is the sequence control of the packet to which + * this Tx response relates. But if there is a hole in the + * bitmap of the BA we received, this Tx response may allow to + * reclaim the hole and all the subsequent packets that were + * already acked. In that case, seq_ctl != ssn, and the next + * packet to be reclaimed will be ssn and not seq_ctl. In that + * case, several packets will be reclaimed even if + * frame_count = 1. + * + * The ssn is the index (% 256) of the latest packet that has + * treated (acked / dropped) + 1. + */ + next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", @@ -1548,49 +1512,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, mvmsta = NULL; } - /* - * If the txq is not an AMPDU queue, there is no chance we freed - * several skbs. Check that out... - */ - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) - goto out; - - /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); - - /* If we have still frames for this STA nothing to do here */ - if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) - goto out; - - if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { - - /* - * If there are no pending frames for this STA and - * the tx to this station is not disabled, notify - * mac80211 that this station can now wake up in its - * STA table. - * If mvmsta is not NULL, sta is valid. - */ - - spin_lock_bh(&mvmsta->lock); - - if (!mvmsta->disable_tx) - ieee80211_sta_block_awake(mvm->hw, sta, false); - - spin_unlock_bh(&mvmsta->lock); - } - - if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { - /* - * We are draining and this was the last packet - pre_rcu_remove - * has been called already. We might be after the - * synchronize_net already. - * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. - */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } - out: rcu_read_unlock(); } @@ -1605,7 +1526,7 @@ static const char *iwl_get_agg_tx_status(u16 status) AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); - AGG_TX_STATE_(LAST_SENT_TTL); + AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); @@ -1654,9 +1575,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); - if (WARN_ON_ONCE(queue < mvm->first_agg_queue && - (!iwl_mvm_is_dqa_supported(mvm) || - (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) + if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index fc5a490880d0..2ea74abad73d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -70,9 +70,8 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" -#include "fw-dbg.h" #include "mvm.h" -#include "fw-api-rs.h" +#include "fw/api/rs.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless @@ -464,8 +463,8 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } @@ -500,7 +499,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) struct iwl_error_event_table table; u32 val; - if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { @@ -512,8 +511,8 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } @@ -1190,14 +1189,15 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index eddaca76d514..3fc4343581ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = - TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); + TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 84f4ba01e14f..858765fed8f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -430,6 +430,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, @@ -710,12 +711,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = cfg_7265d; } - if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb) { - if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) - cfg = &iwla000_2ac_cfg_jf; - else if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) - cfg = &iwla000_2ac_cfg_hr; + if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb && + iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { + u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); + u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); + u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR); + if (rf_id_chp == jf_chp_id) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) + cfg = &iwla000_2ax_cfg_qnj_jf_b0; + else + cfg = &iwla000_2ac_cfg_jf; + } else if (rf_id_chp == hr_chp_id) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) + cfg = &iwla000_2ax_cfg_qnj_hr_a0; + else + cfg = &iwla000_2ac_cfg_hr; + } iwl_trans->cfg = cfg; } #endif @@ -825,11 +837,11 @@ static int iwl_pci_resume(struct device *device) /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill - * interrupt while in iwl_trans_check_hw_rf_kill(). + * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a1ea9ef97ed9..4fb7647995c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -661,10 +661,16 @@ static inline void iwl_pcie_sw_reset(struct iwl_trans *trans) usleep_range(5000, 6000); } +static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) +{ + return index & (q->n_window - 1); +} + static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, struct iwl_txq *txq, int idx) { - return txq->tfds + trans_pcie->tfd_size * idx; + return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, + idx); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) @@ -726,11 +732,6 @@ static inline bool iwl_queue_used(const struct iwl_txq *q, int i) !(i < q->read_ptr && i >= q->write_ptr); } -static inline u8 get_cmd_index(struct iwl_txq *q, u32 index) -{ - return index & (q->n_window - 1); -} - static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -793,7 +794,7 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); @@ -808,6 +809,8 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_apply_destination(struct iwl_trans *trans); +void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, + struct sk_buff *skb); #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 942736d3fa75..a06b6612b658 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1168,7 +1168,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); - cmd_index = get_cmd_index(txq, index); + cmd_index = iwl_pcie_get_cmd_index(txq, index); if (rxq->id == 0) iwl_op_mode_rx(trans->op_mode, &rxq->napi, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b84b78293e7b..c59f4581e972 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -307,7 +307,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -340,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3927bbf04f72..2e3e013ec95a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -986,7 +986,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); @@ -1252,7 +1252,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1300,7 +1300,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1663,7 +1663,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) @@ -1847,8 +1847,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. + * HW with volatile SRAM must save/restore contents to/from + * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), @@ -1856,8 +1856,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * is just for hardware register access; but GP1 MAC_SLEEP + * check is a good idea before accessing the SRAM of HW with + * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. @@ -2839,7 +2840,7 @@ static struct iwl_trans_dump_data spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { - u8 idx = get_cmd_index(cmdq, ptr); + u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); u32 caplen, cmdlen; cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + @@ -3142,7 +3143,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, iwl_set_bit(trans, CSR_HOST_CHICKEN, CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME); +#if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); + if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) { + u32 hw_status; + + hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); + if (hw_status & UMAG_GEN_HW_IS_FPGA) + trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0; + else + trans->cfg = &iwla000_2ac_cfg_hr; + } +#endif iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index a3795ba0d7b9..d74613fcb756 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -55,7 +55,7 @@ #include "iwl-csr.h" #include "iwl-io.h" #include "internal.h" -#include "mvm/fw-api.h" +#include "fw/api/tx.h" /* * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels @@ -88,14 +88,14 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - int write_ptr = txq->write_ptr; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; len = DIV_ROUND_UP(len, 4); - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + @@ -111,7 +111,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[write_ptr] = bc_ent; + scd_bc_tbl->tfd_offset[idx] = bc_ent; } /* @@ -176,16 +176,12 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ - int rd_ptr = txq->read_ptr; - int idx = get_cmd_index(txq, rd_ptr); + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); lockdep_assert_held(&txq->lock); - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); + iwl_pcie_get_tfd(trans_pcie, txq, idx)); /* free SKB */ if (txq->entries) { @@ -373,8 +369,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + iwl_pcie_get_tfd(trans_pcie, txq, idx); dma_addr_t tb_phys; bool amsdu; int i, len, tb1_len, tb2_len, hdr_len; @@ -386,10 +383,10 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); - tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ if (!amsdu) - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); @@ -422,16 +419,16 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, hdr_len = ieee80211_hdrlen(hdr->frame_control); if (amsdu) { - if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - tb1_len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) + if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, + tb1_len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) goto out_err; /* * building the A-MSDU might have changed this data, so memcpy * it now */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); return tfd; } @@ -484,6 +481,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans_pcie->txq[txq_id]; + int idx; void *tfd; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), @@ -497,16 +495,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + /* Set up driver data for this TFD */ - txq->entries[txq->write_ptr].skb = skb; - txq->entries[txq->write_ptr].cmd = dev_cmd; + txq->entries[idx].skb = skb; + txq->entries[idx].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(txq->write_ptr))); + INDEX_TO_SEQ(idx))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[txq->write_ptr].meta; + out_meta = &txq->entries[idx].meta; out_meta->flags = 0; tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); @@ -562,7 +562,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; - int idx, i, cmd_pos; + int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); @@ -651,7 +651,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -937,6 +936,15 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); + if (txq_id != trans_pcie->cmd_queue) { + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_pcie_free_tso_page(trans_pcie, skb); + } iwl_pcie_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); @@ -1033,6 +1041,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, .flags = CMD_WANT_SKB, }; int ret, qid; + u32 wr_ptr; txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) @@ -1060,7 +1069,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS)); ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) @@ -1073,6 +1082,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, rsp = (void *)hcmd.resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); + wr_ptr = le16_to_cpu(rsp->write_pointer); if (qid >= ARRAY_SIZE(trans_pcie->txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); @@ -1088,10 +1098,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, txq->id = qid; trans_pcie->txq[qid] = txq; + wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1); /* Place first TFD at index corresponding to start sequence number */ - txq->read_ptr = le16_to_cpu(rsp->write_pointer); - txq->write_ptr = le16_to_cpu(rsp->write_pointer); + txq->read_ptr = wr_ptr; + txq->write_ptr = wr_ptr; iwl_write_direct32(trans, HBUS_TARG_WRPTR, (txq->write_ptr) | (qid << 16)); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 034bdb4a0b06..c645d10d3707 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -43,8 +43,7 @@ #include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" -/* FIXME: need to abstract out TX command (once we know what it looks like) */ -#include "dvm/commands.h" +#include "fw/api/tx.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 @@ -107,7 +106,7 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num) q->n_window = slots_num; /* slots_num must be power-of-two size, otherwise - * get_cmd_index is broken. */ + * iwl_pcie_get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) return -EINVAL; @@ -429,7 +428,7 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; - int idx = get_cmd_index(txq, rd_ptr); + int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); @@ -578,8 +577,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return 0; } -static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, - struct sk_buff *skb) +void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, + struct sk_buff *skb) { struct page **page_ptr; @@ -1101,7 +1100,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; txq->read_ptr != tfd_num; txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { - struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -1110,7 +1110,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_tail(skbs, skb); - txq->entries[txq->read_ptr].skb = NULL; + txq->entries[idx].skb = NULL; if (!trans->cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); @@ -1560,7 +1560,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = get_cmd_index(txq, txq->write_ptr); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -1752,7 +1752,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - cmd_index = get_cmd_index(txq, index); + cmd_index = iwl_pcie_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; @@ -2370,7 +2370,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); } else { tb1_len = len; } diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index a3c066f90afc..012930d35434 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -125,8 +125,8 @@ void hostap_remove_interface(struct net_device *dev, int rtnl_locked, else unregister_netdev(dev); - /* dev->destructor = free_netdev() will free the device data, including - * private data, when removing the device */ + /* 'dev->needs_free_netdev = true' implies device data, including + * private data, will be freed when the device is removed */ } diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index c84fd8490601..56f6e3b71f48 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -210,7 +210,7 @@ struct ezusb_packet { } __packed; /* Table of devices that work or may work with this driver */ -static struct usb_device_id ezusb_table[] = { +static const struct usb_device_id ezusb_table[] = { {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)}, diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index 043bd1c23c19..b0b86f701061 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -41,7 +41,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] = { +static const struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c8852acc1462..6467ffac9811 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1362,8 +1362,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, txi->control.rates, ARRAY_SIZE(txi->control.rates)); - txi->rate_driver_data[0] = channel; - if (skb->len >= 24 + 8 && ieee80211_is_probe_resp(hdr->frame_control)) { /* fake header transmission time */ diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index e53025ea6689..16e54c757dd0 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -52,7 +52,7 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8682, "libertas/usb8682.bin", NULL } }; -static struct usb_device_id if_usb_table[] = { +static const struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 }, { USB_DEVICE(0x05a3, 0x8388), .driver_info = MODEL_8388 }, diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index e0ade40d9497..e9104eca327b 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -31,7 +31,7 @@ module_param_named(fw_name, lbtf_fw_name, charp, 0644); MODULE_FIRMWARE("lbtf_usb.bin"); -static struct usb_device_id if_usb_table[] = { +static const struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001) }, { USB_DEVICE(0x05a3, 0x8388) }, diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 16c77c27f1b6..725206914911 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -572,6 +572,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid); + memset(&add_ba_req, 0, sizeof(add_ba_req)); + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->is_hw_11ac_capable && diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 06ad2d50f9b0..32c5074da84c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -889,23 +889,15 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_STA); priv->bss_role = MWIFIEX_BSS_ROLE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_STA; break; case NL80211_IFTYPE_P2P_GO: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; case NL80211_IFTYPE_AP: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_UAP); priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; default: @@ -923,6 +915,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, adapter->rx_locked = false; spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + mwifiex_set_mac_address(priv, dev); + return 0; } @@ -2012,6 +2006,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, priv->state_11h.is_11h_active = false; } + mwifiex_config_uap_11d(priv, ¶ms->beacon); + if (mwifiex_config_start_uap(priv, bss_cfg)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start AP\n"); @@ -2963,6 +2959,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, } mwifiex_init_priv_params(priv, dev); + mwifiex_set_mac_address(priv, dev); + priv->netdev = dev; ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, @@ -2990,7 +2988,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = &priv->wdev; dev->ieee80211_ptr->iftype = priv->bss_mode; - memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; @@ -3123,11 +3120,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) priv->dfs_chan_sw_workqueue = NULL; } /* Clear the priv in adapter */ - priv->netdev->ieee80211_ptr = NULL; priv->netdev = NULL; - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - - priv->media_connected = false; switch (priv->bss_mode) { case NL80211_IFTYPE_UNSPECIFIED: @@ -3395,11 +3388,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) { - mwifiex_stop_net_dev_queue(priv->netdev, adapter); - if (netif_carrier_ok(priv->netdev)) - netif_carrier_off(priv->netdev); - } + if (priv && priv->netdev) + netif_device_detach(priv->netdev); } for (i = 0; i < retry_num; i++) { @@ -3470,11 +3460,8 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) { - if (!netif_carrier_ok(priv->netdev)) - netif_carrier_on(priv->netdev); - mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); - } + if (priv && priv->netdev) + netif_device_attach(priv->netdev); } if (!wiphy->wowlan_config) @@ -4215,7 +4202,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter) if (adapter->config_bands & BAND_A) n_channels_a = mwifiex_band_5ghz.n_channels; - adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a); + adapter->num_in_chan_stats = n_channels_bg + n_channels_a; adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) * adapter->num_in_chan_stats); diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index 6e2994308526..bfe84e55df77 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -180,11 +180,9 @@ static struct region_code_mapping region_code_mapping_t[] = { u8 *mwifiex_11d_code_2_region(u8 code) { u8 i; - u8 size = sizeof(region_code_mapping_t)/ - sizeof(struct region_code_mapping); /* Look for code in mapping table */ - for (i = 0; i < size; i++) + for (i = 0; i < ARRAY_SIZE(region_code_mapping_t); i++) if (region_code_mapping_t[i].code == code) return region_code_mapping_t[i].region; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 8dad52886034..0edc5d621304 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -26,6 +26,8 @@ #include "11n.h" #include "11ac.h" +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); + /* * This function initializes a command node. * @@ -427,7 +429,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; @@ -436,7 +438,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) if (!adapter->cmd_pool) { mwifiex_dbg(adapter, FATAL, "info: FREE_CMD_BUF: cmd_pool is null\n"); - return 0; + return; } cmd_array = adapter->cmd_pool; @@ -464,8 +466,6 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } - - return 0; } /* @@ -666,7 +666,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); @@ -684,11 +684,12 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, u32 add_tail) + struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; + bool add_tail = true; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { @@ -1075,7 +1076,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ -void +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL; diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index f6f105a7d3ff..6f4239be609d 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -940,8 +940,6 @@ mwifiex_reset_write(struct file *file, if (adapter->if_ops.card_reset) { dev_info(adapter->dev, "Resetting per request\n"); - adapter->hw_status = MWIFIEX_HW_STATUS_RESET; - mwifiex_cancel_all_pending_cmd(adapter); adapter->if_ops.card_reset(adapter); } diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 3ecb59f7405b..e11919db7818 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -337,17 +337,9 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - } - + netif_tx_wake_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } @@ -358,30 +350,20 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (!netif_tx_queue_stopped(txq)) - netif_tx_stop_queue(txq); - } - + netif_tx_stop_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } /* - * This function releases the lock variables and frees the locks and - * associated locks. + * This function invalidates the list heads. */ -static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) +static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; s32 i, j; - /* Free lists */ list_del(&adapter->cmd_free_q); list_del(&adapter->cmd_pending_q); list_del(&adapter->scan_pending_q); @@ -418,9 +400,11 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_cancel_all_pending_cmd(adapter); wake_up_interruptible(&adapter->cmd_wait_q.wait); wake_up_interruptible(&adapter->hs_activate_wait_q); +} - /* Free lock variables */ - mwifiex_free_lock_list(adapter); +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter) +{ + mwifiex_invalidate_lists(adapter); /* Free command buffer */ mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index b89596c18b41..d87aeff70cef 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -253,7 +253,7 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer) priv->wps_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ - ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE); + ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); ie_header.len = cpu_to_le16(priv->wps_ie_len); memcpy(*buffer, &ie_header, sizeof(ie_header)); *buffer += sizeof(ie_header); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index f2600b827e81..ee40b739b289 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -46,7 +46,7 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); bool aggr_ctrl; module_param(aggr_ctrl, bool, 0000); -MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0"); +MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0"); /* * This function registers the device and performs all the necessary @@ -588,7 +588,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (mwifiex_init_channel_scan_gap(adapter)) { mwifiex_dbg(adapter, ERROR, "could not init channel stats table\n"); - goto err_init_fw; + goto err_init_chan_scan; } if (driver_mode) { @@ -636,6 +636,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) err_add_intf: vfree(adapter->chan_stats); +err_init_chan_scan: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: @@ -653,6 +654,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } init_failed = true; @@ -665,8 +667,11 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) release_firmware(adapter->firmware); adapter->firmware = NULL; } - if (init_failed) + if (init_failed) { + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); + } /* Tell all current and future waiters we're finished */ complete_all(fw_done); @@ -935,31 +940,44 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -/* - * CFG802.11 network device handler for setting MAC address. - */ -static int -mwifiex_set_mac_address(struct net_device *dev, void *addr) +int mwifiex_set_mac_address(struct mwifiex_private *priv, + struct net_device *dev) { - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct sockaddr *hw_addr = addr; int ret; + u64 mac_addr; - memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); + if (priv->bss_type != MWIFIEX_BSS_TYPE_P2P) + goto done; + + mac_addr = ether_addr_to_u64(priv->curr_addr); + mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + u64_to_ether_addr(mac_addr, priv->curr_addr); /* Send request to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS, HostCmd_ACT_GEN_SET, 0, NULL, true); - if (!ret) - memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN); - else + if (ret) { mwifiex_dbg(priv->adapter, ERROR, "set mac address failed: ret=%d\n", ret); + return ret; + } +done: memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); + return 0; +} - return ret; +/* CFG802.11 network device handler for setting MAC address. + */ +static int +mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct sockaddr *hw_addr = addr; + + memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); + return mwifiex_set_mac_address(priv, dev); } /* @@ -1252,7 +1270,7 @@ static const struct net_device_ops mwifiex_netdev_ops = { .ndo_open = mwifiex_open, .ndo_stop = mwifiex_close, .ndo_start_xmit = mwifiex_hard_start_xmit, - .ndo_set_mac_address = mwifiex_set_mac_address, + .ndo_set_mac_address = mwifiex_ndo_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = mwifiex_tx_timeout, .ndo_get_stats = mwifiex_get_stats, @@ -1296,7 +1314,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); - memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { @@ -1352,26 +1369,12 @@ static void mwifiex_main_work_queue(struct work_struct *work) mwifiex_main_process(adapter); } -/* - * This function gets called during PCIe function level reset. Required - * code is extracted from mwifiex_remove_card() - */ -int -mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +/* Common teardown code used for both device removal and reset */ +static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; int i; - if (!adapter) - goto exit_return; - - wait_for_completion(adapter->fw_done); - /* Caller should ensure we aren't suspending while this happens */ - reinit_completion(adapter->fw_done); - - priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); - mwifiex_deauthenticate(priv, NULL); - /* We can no longer handle interrupts once we start doing the teardown * below. */ @@ -1380,6 +1383,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); + adapter->int_status = 0; /* Stop data */ for (i = 0; i < adapter->priv_num; i++) { @@ -1393,12 +1397,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - mwifiex_shutdown_drv(adapter); - if (adapter->if_ops.down_dev) - adapter->if_ops.down_dev(adapter); - mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); + if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { @@ -1420,10 +1421,37 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); rtnl_unlock(); } - vfree(adapter->chan_stats); - mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); -exit_return: + wiphy_unregister(adapter->wiphy); + wiphy_free(adapter->wiphy); + adapter->wiphy = NULL; + + vfree(adapter->chan_stats); + mwifiex_free_cmd_buffers(adapter); +} + +/* + * This function gets called during PCIe function level reset. + */ +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv; + + if (!adapter) + return 0; + + wait_for_completion(adapter->fw_done); + /* Caller should ensure we aren't suspending while this happens */ + reinit_completion(adapter->fw_done); + + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); + mwifiex_deauthenticate(priv, NULL); + + mwifiex_uninit_sw(adapter); + + if (adapter->if_ops.down_dev) + adapter->if_ops.down_dev(adapter); + return 0; } EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); @@ -1506,6 +1534,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } complete_all(adapter->fw_done); @@ -1605,10 +1634,8 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; - if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) { + if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) adapter->rx_work_enabled = true; - pr_notice("rx work enabled, cpus %d\n", num_possible_cpus()); - } adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", @@ -1653,8 +1680,11 @@ mwifiex_add_card(void *card, struct completion *fw_done, if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } err_kmalloc: + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); err_init_sw: @@ -1676,64 +1706,10 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card); */ int mwifiex_remove_card(struct mwifiex_adapter *adapter) { - struct mwifiex_private *priv = NULL; - int i; - if (!adapter) - goto exit_remove; + return 0; - /* We can no longer handle interrupts once we start doing the teardown - * below. */ - if (adapter->if_ops.disable_int) - adapter->if_ops.disable_int(adapter); - - adapter->surprise_removed = true; - - mwifiex_terminate_workqueue(adapter); - - /* Stop data */ - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (priv && priv->netdev) { - mwifiex_stop_net_dev_queue(priv->netdev, adapter); - if (netif_carrier_ok(priv->netdev)) - netif_carrier_off(priv->netdev); - } - } - - mwifiex_dbg(adapter, CMD, - "cmd: calling mwifiex_shutdown_drv...\n"); - - mwifiex_shutdown_drv(adapter); - mwifiex_dbg(adapter, CMD, - "cmd: mwifiex_shutdown_drv done\n"); - if (atomic_read(&adapter->rx_pending) || - atomic_read(&adapter->tx_pending) || - atomic_read(&adapter->cmd_pending)) { - mwifiex_dbg(adapter, ERROR, - "rx_pending=%d, tx_pending=%d,\t" - "cmd_pending=%d\n", - atomic_read(&adapter->rx_pending), - atomic_read(&adapter->tx_pending), - atomic_read(&adapter->cmd_pending)); - } - - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - - if (!priv) - continue; - - rtnl_lock(); - if (priv->netdev && - priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) - mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); - rtnl_unlock(); - } - vfree(adapter->chan_stats); - - wiphy_unregister(adapter->wiphy); - wiphy_free(adapter->wiphy); + mwifiex_uninit_sw(adapter); if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); @@ -1748,7 +1724,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) "info: free adapter\n"); mwifiex_free_adapter(adapter); -exit_remove: return 0; } EXPORT_SYMBOL_GPL(mwifiex_remove_card); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index f8cf3079ac7d..a76bd797e454 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -165,6 +165,8 @@ enum { /* Address alignment */ #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) +#define MWIFIEX_MAC_LOCAL_ADMIN_BIT 41 + /** *enum mwifiex_debug_level - marvell wifi debug level */ @@ -1077,9 +1079,9 @@ int mwifiex_get_debug_info(struct mwifiex_private *, struct mwifiex_debug_info *); int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); -void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_scan(struct mwifiex_adapter *adapter); @@ -1087,8 +1089,7 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, - u32 addtail); + struct cmd_ctrl_node *cmd_node); int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter); int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter); @@ -1563,6 +1564,9 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, struct mwifiex_sta_node *node); +void mwifiex_config_uap_11d(struct mwifiex_private *priv, + struct cfg80211_beacon_data *beacon_data); + void mwifiex_init_11h_params(struct mwifiex_private *priv); int mwifiex_is_11h_active(struct mwifiex_private *priv); int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag); @@ -1672,6 +1676,8 @@ void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); +int mwifiex_set_mac_address(struct mwifiex_private *priv, + struct net_device *dev); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 21f2201405d1..cd314946452c 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1043,12 +1043,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(card->cmdrsp_buf); + card->cmdrsp_buf = NULL; } if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); + card->cmd_buf = NULL; } return 0; } @@ -1983,7 +1985,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, * (3) wifi image. * * This function bypass the header and bluetooth part, return - * the offset of tail wifi-only part. + * the offset of tail wifi-only part. If the image is already wifi-only, + * that is start with CMD1, return 0. */ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, @@ -1991,7 +1994,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, const struct mwifiex_fw_data *fwdata; u32 offset = 0, data_len, dnld_cmd; int ret = 0; - bool cmd7_before = false; + bool cmd7_before = false, first_cmd = false; while (1) { /* Check for integer and buffer overflow */ @@ -2012,20 +2015,29 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, switch (dnld_cmd) { case MWIFIEX_FW_DNLD_CMD_1: + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + ret = -1; + goto done; + } + + /* Image start with cmd1, already wifi-only firmware */ + if (!first_cmd) { + mwifiex_dbg(adapter, MSG, + "input wifi-only firmware\n"); + return 0; + } + if (!cmd7_before) { mwifiex_dbg(adapter, ERROR, "no cmd7 before cmd1!\n"); ret = -1; goto done; } - if (offset + data_len < data_len) { - mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); - ret = -1; - goto done; - } offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_5: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2035,6 +2047,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_6: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2051,6 +2064,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, } goto done; case MWIFIEX_FW_DNLD_CMD_7: + first_cmd = true; cmd7_before = true; break; default: @@ -2428,7 +2442,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ -static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) +static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg = 0; @@ -2471,28 +2485,24 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } if (pcie_ireg & HOST_INTR_DNLD_DONE) { - pcie_ireg &= ~HOST_INTR_DNLD_DONE; mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { - pcie_ireg &= ~HOST_INTR_UPLD_RDY; mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { - pcie_ireg &= ~HOST_INTR_EVENT_RDY; mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_CMD_DONE) { - pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { mwifiex_dbg(adapter, INTR, "info: CMD sent Interrupt\n"); @@ -2507,75 +2517,13 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); - if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP) + if (!card->msi_enable && !card->msix_enable && + adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); return 0; } -static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) -{ - int ret; - u32 pcie_ireg; - unsigned long flags; - - spin_lock_irqsave(&adapter->int_lock, flags); - /* Clear out unused interrupts */ - pcie_ireg = adapter->int_status; - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - - if (pcie_ireg & HOST_INTR_DNLD_DONE) { - mwifiex_dbg(adapter, INTR, - "info: TX DNLD Done\n"); - ret = mwifiex_pcie_send_data_complete(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_UPLD_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx DATA\n"); - ret = mwifiex_pcie_process_recv_data(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_EVENT_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx EVENT\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - if (ret) - return ret; - } - - if (pcie_ireg & HOST_INTR_CMD_DONE) { - if (adapter->cmd_sent) { - mwifiex_dbg(adapter, INTR, - "info: CMD sent Interrupt\n"); - adapter->cmd_sent = false; - } - /* Handle command response */ - ret = mwifiex_pcie_process_cmd_complete(adapter); - if (ret) - return ret; - } - - mwifiex_dbg(adapter, INTR, - "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); - - return 0; -} - -static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) -{ - struct pcie_service_card *card = adapter->card; - - if (card->msix_enable) - return mwifiex_process_msix_int(adapter); - else - return mwifiex_process_pcie_int(adapter); -} - /* * This function downloads data from driver to card. * @@ -2934,7 +2882,6 @@ static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) mwifiex_pcie_delete_evtbd_ring(adapter); mwifiex_pcie_delete_rxbd_ring(adapter); mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; } /* @@ -3036,15 +2983,14 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } - mwifiex_pcie_free_buffers(adapter); + pci_disable_device(pdev); - if (pdev) { - pci_iounmap(pdev, card->pci_mmap); - pci_iounmap(pdev, card->pci_mmap1); - pci_disable_device(pdev); - pci_release_region(pdev, 2); - pci_release_region(pdev, 0); - } + pci_iounmap(pdev, card->pci_mmap); + pci_iounmap(pdev, card->pci_mmap1); + pci_release_region(pdev, 2); + pci_release_region(pdev, 0); + + mwifiex_pcie_free_buffers(adapter); } static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) @@ -3220,7 +3166,6 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int ret; struct pci_dev *pdev = card->dev; /* tx_buf_size might be changed to 3584 by firmware during @@ -3228,11 +3173,9 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) */ adapter->tx_buf_size = card->pcie.tx_buf_size; - ret = mwifiex_pcie_alloc_buffers(adapter); - if (!ret) - return; + mwifiex_pcie_alloc_buffers(adapter); - pci_iounmap(pdev, card->pci_mmap1); + pci_set_master(pdev); } /* This function cleans up the PCI-E host memory space. */ @@ -3240,10 +3183,13 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct pci_dev *pdev = card->dev; if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); + pci_clear_master(pdev); + adapter->seq_num = 0; mwifiex_pcie_free_buffers(adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index ae9630b49342..c9d41ed77fc7 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1534,8 +1534,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); /* Perform internal scan synchronously */ @@ -1948,7 +1947,8 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) } adapter->active_scan_triggered = true; - ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); + if (priv->scan_request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); user_scan_cfg->num_ssids = priv->scan_request->n_ssids; user_scan_cfg->ssid_list = priv->scan_request->ssids; @@ -2033,7 +2033,7 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); } return; @@ -2492,6 +2492,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_stats); for (i = 0 ; i < num_chan; i++) { + if (adapter->survey_idx >= adapter->num_in_chan_stats) { + mwifiex_dbg(adapter, WARN, + "FW reported too many channel results (max %d)\n", + adapter->num_in_chan_stats); + return; + } chan_stats.chan_num = fw_chan_stats->chan_num; chan_stats.bandcfg = fw_chan_stats->bandcfg; chan_stats.flags = fw_chan_stats->flags; @@ -2785,7 +2791,6 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, if (!scan_cfg) return -ENOMEM; - ether_addr_copy(scan_cfg->random_mac, priv->random_mac); scan_cfg->ssid_list = req_ssid; scan_cfg->num_ssids = 1; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index f81a006668f3..fd5183c10c4e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -390,7 +390,8 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); - if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) { + if (!ret && firmware_stat == FIRMWARE_READY_SDIO && + !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 534d94a206a5..fb090144a6d8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -189,9 +189,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, if (pbitmap_rates != NULL) { rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(pbitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { @@ -206,9 +204,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, cpu_to_le16(priv->bitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(priv->bitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(priv->bitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { @@ -1755,7 +1751,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, struct mwifiex_ie_types_vhtcap *vht_capab; struct mwifiex_ie_types_aid *aid; struct mwifiex_ie_types_tdls_idle_timeout *timeout; - u8 *pos, qos_info; + u8 *pos; u16 config_len = 0; struct station_parameters *params = priv->sta_params; @@ -1789,12 +1785,11 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, put_unaligned_le16(params->capability, pos); config_len += sizeof(params->capability); - qos_info = params->uapsd_queues | (params->max_sp << 5); - wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos + - config_len); + wmm_qos_info = (void *)(pos + config_len); wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA); - wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info)); - wmm_qos_info->qos_info = qos_info; + wmm_qos_info->header.len = + cpu_to_le16(sizeof(wmm_qos_info->qos_info)); + wmm_qos_info->qos_info = 0; config_len += sizeof(struct mwifiex_ie_types_qos_info); if (params->ht_capa) { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 2945775e83c5..0fba5b10ef2d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -298,9 +298,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, priv->bitmap_rates[1] = le16_to_cpu(rate_scope->ofdm_rate_bitmap); for (i = 0; - i < - sizeof(rate_scope->ht_mcs_rate_bitmap) / - sizeof(u16); i++) + i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); + i++) priv->bitmap_rates[2 + i] = le16_to_cpu(rate_scope-> ht_mcs_rate_bitmap[i]); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 42997e05d90f..a6077ab3efc3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -654,9 +654,9 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, */ int mwifiex_disable_auto_ds(struct mwifiex_private *priv) { - struct mwifiex_ds_auto_ds auto_ds; - - auto_ds.auto_ds = DEEP_SLEEP_OFF; + struct mwifiex_ds_auto_ds auto_ds = { + .auto_ds = DEEP_SLEEP_OFF, + }; return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true); @@ -811,8 +811,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode) * is checked to determine WPA version. If buffer length is zero, the existing * WPA IE is reset. */ -static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, - u8 *ie_data_ptr, u16 ie_len) +static int mwifiex_set_wpa_ie(struct mwifiex_private *priv, + u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { @@ -1351,101 +1351,96 @@ static int mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { - int ret = 0; struct ieee_types_vendor_header *pvendor_ie; const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; - u16 unparsed_len = ie_len; - int find_wpa_ie = 0; + u16 unparsed_len = ie_len, cur_ie_len; /* If the passed length is zero, reset the buffer */ if (!ie_len) { priv->gen_ie_buf_len = 0; priv->wps.session_enable = false; - return 0; - } else if (!ie_data_ptr) { + } else if (!ie_data_ptr || + ie_len <= sizeof(struct ieee_types_header)) { return -1; } pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; while (pvendor_ie) { - if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) { - /* Test to see if it is a WPA IE, if not, then it is a - * gen IE - */ - if (!memcmp(pvendor_ie->oui, wpa_oui, - sizeof(wpa_oui))) { - find_wpa_ie = 1; - break; - } - - /* Test to see if it is a WPS IE, if so, enable - * wps session flag - */ - if (!memcmp(pvendor_ie->oui, wps_oui, - sizeof(wps_oui))) { - priv->wps.session_enable = true; - mwifiex_dbg(priv->adapter, MSG, - "info: WPS Session Enabled.\n"); - ret = mwifiex_set_wps_ie(priv, - (u8 *)pvendor_ie, - unparsed_len); - } - } + cur_ie_len = pvendor_ie->len + sizeof(struct ieee_types_header); if (pvendor_ie->element_id == WLAN_EID_RSN) { - find_wpa_ie = 1; - break; + /* IE is a WPA/WPA2 IE so call set_wpa function */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; } if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { - /* IE is a WAPI IE so call set_wapi function */ - ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, - unparsed_len); - return ret; + /* IE is a WAPI IE so call set_wapi function */ + mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; } - unparsed_len -= (pvendor_ie->len + - sizeof(struct ieee_types_header)); + if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) { + /* Test to see if it is a WPA IE, if not, then + * it is a gen IE + */ + if (!memcmp(pvendor_ie->oui, wpa_oui, + sizeof(wpa_oui))) { + /* IE is a WPA/WPA2 IE so call set_wpa function + */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; + } + + if (!memcmp(pvendor_ie->oui, wps_oui, + sizeof(wps_oui))) { + /* Test to see if it is a WPS IE, + * if so, enable wps session flag + */ + priv->wps.session_enable = true; + mwifiex_dbg(priv->adapter, MSG, + "WPS Session Enabled.\n"); + mwifiex_set_wps_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; + } + } + + /* Saved in gen_ie, such as P2P IE.etc.*/ + + /* Verify that the passed length is not larger than the + * available space remaining in the buffer + */ + if (cur_ie_len < + (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { + /* Append the passed data to the end + * of the genIeBuffer + */ + memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, + (u8 *)pvendor_ie, cur_ie_len); + /* Increment the stored buffer length by the + * size passed + */ + priv->gen_ie_buf_len += cur_ie_len; + } + +next_ie: + unparsed_len -= cur_ie_len; if (unparsed_len <= sizeof(struct ieee_types_header)) pvendor_ie = NULL; else pvendor_ie = (struct ieee_types_vendor_header *) - (((u8 *)pvendor_ie) + pvendor_ie->len + - sizeof(struct ieee_types_header)); + (((u8 *)pvendor_ie) + cur_ie_len); } - if (find_wpa_ie) { - /* IE is a WPA/WPA2 IE so call set_wpa function */ - ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie, - unparsed_len); - priv->wps.session_enable = false; - return ret; - } - - /* - * Verify that the passed length is not larger than the - * available space remaining in the buffer - */ - if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { - - /* Append the passed data to the end of the - genIeBuffer */ - memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr, - ie_len); - /* Increment the stored buffer length by the - size passed */ - priv->gen_ie_buf_len += ie_len; - } else { - /* Passed data does not fit in the remaining - buffer space */ - ret = -1; - } - - /* Return 0, or -1 for error case */ - return ret; + return 0; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 39cd677d4159..e76af2866a19 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -130,7 +130,7 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv, if (skb_tailroom(skb) < rates_size + 4) { mwifiex_dbg(priv->adapter, ERROR, - "Insuffient space while adding rates\n"); + "Insufficient space while adding rates\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 477c29c9f5d9..18f7d9bf30b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -444,6 +444,28 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size) return; } +/* This function enable 11D if userspace set the country IE. + */ +void mwifiex_config_uap_11d(struct mwifiex_private *priv, + struct cfg80211_beacon_data *beacon_data) +{ + enum state_11d_t state_11d; + const u8 *country_ie; + + country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail, + beacon_data->tail_len); + if (country_ie) { + /* Send cmd to FW to enable 11D function */ + state_11d = ENABLE_11D; + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, + HostCmd_ACT_GEN_SET, DOT11D_I, + &state_11d, true)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: failed to enable 11D\n"); + } + } +} + /* This function parses BSS related parameters from structure * and prepares TLVs. These TLVs are appended to command buffer. */ @@ -848,8 +870,6 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg) { - enum state_11d_t state_11d; - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, true)) { @@ -858,16 +878,6 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, return -1; } - /* Send cmd to FW to enable 11D function */ - state_11d = ENABLE_11D; - if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, - HostCmd_ACT_GEN_SET, DOT11D_I, - &state_11d, true)) { - mwifiex_dbg(priv->adapter, ERROR, - "11D: failed to enable 11D\n"); - return -1; - } - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, HostCmd_ACT_GEN_SET, 0, NULL, true)) { mwifiex_dbg(priv->adapter, ERROR, diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index cb1753e43ef4..f4f2b9b27e32 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -24,7 +24,7 @@ static struct mwifiex_if_ops usb_ops; -static struct usb_device_id mwifiex_usb_table[] = { +static const struct usb_device_id mwifiex_usb_table[] = { /* 8766 */ {USB_DEVICE(USB8XXX_VID, USB8766_PID_1)}, {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8766_PID_2, @@ -1112,7 +1112,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) if (err) { mwifiex_dbg(adapter, ERROR, "prepare tx aggr skb failed, err=%d\n", err); - return; + goto unlock; } if (atomic_read(&port->tx_data_urb_pending) >= @@ -1133,6 +1133,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) done: if (err == -1) mwifiex_write_data_complete(adapter, skb_send, 0, -1); +unlock: spin_unlock_irqrestore(&port->tx_aggr_lock, flags); } diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 660267b359e4..7f3e3983b781 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -457,6 +457,9 @@ static void mt7601u_free_tx(struct mt7601u_dev *dev) { int i; + if (!dev->tx_q) + return; + for (i = 0; i < __MT_EP_OUT_MAX; i++) mt7601u_free_tx_queue(&dev->tx_q[i]); } @@ -484,6 +487,8 @@ static int mt7601u_alloc_tx(struct mt7601u_dev *dev) dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, sizeof(*dev->tx_q), GFP_KERNEL); + if (!dev->tx_q) + return -ENOMEM; for (i = 0; i < __MT_EP_OUT_MAX; i++) if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index 416c6045ff31..b9e4f6793138 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -19,7 +19,7 @@ #include "usb.h" #include "trace.h" -static struct usb_device_id mt7601u_device_table[] = { +static const struct usb_device_id mt7601u_device_table[] = { { USB_DEVICE(0x0b05, 0x17d3) }, { USB_DEVICE(0x0e8d, 0x760a) }, { USB_DEVICE(0x0e8d, 0x760b) }, diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile index 0d618e5e5f5b..f236b7dc2be3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Makefile +++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile @@ -25,7 +25,3 @@ qtnfmac_pearl_pcie-objs += \ pearl/pcie.o qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o - -# - -ccflags-y += -D__CHECK_ENDIAN diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index dda05003d522..56e5fed92a2a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -130,7 +130,6 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus) /* interface functions from common layer */ -void qtnf_rx_frame(struct device *dev, struct sk_buff *rxp); int qtnf_core_attach(struct qtnf_bus *bus); void qtnf_core_detach(struct qtnf_bus *bus); void qtnf_txflowblock(struct device *dev, bool state); diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index e3c090008125..a450bc6bc774 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) vif = qtnf_netdev_get_priv(wdev->netdev); + qtnf_scan_done(vif->mac, true); + if (qtnf_cmd_send_del_intf(vif)) pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, vif->vifid); @@ -266,11 +268,19 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); struct qtnf_bss_config *bss_cfg; int ret; - bss_cfg = &vif->bss_cfg; + if (!cfg80211_chandef_identical(&mac->chandef, &settings->chandef)) { + memcpy(&mac->chandef, &settings->chandef, sizeof(mac->chandef)); + if (vif->vifid != 0) + pr_warn("%s: unexpected chan %u (%u MHz)\n", dev->name, + settings->chandef.chan->hw_value, + settings->chandef.chan->center_freq); + } + bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); bss_cfg->bcn_period = settings->beacon_interval; @@ -281,8 +291,6 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, bss_cfg->ssid_len = settings->ssid_len; memcpy(&bss_cfg->ssid, settings->ssid, bss_cfg->ssid_len); - memcpy(&bss_cfg->chandef, &settings->chandef, - sizeof(struct cfg80211_chan_def)); memcpy(&bss_cfg->crypto, &settings->crypto, sizeof(struct cfg80211_crypto_settings)); @@ -329,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev) struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); int ret; + qtnf_scan_done(vif->mac, true); + ret = qtnf_cmd_send_stop_ap(vif); if (ret) { pr_err("VIF%u.%u: failed to stop AP operation in FW\n", @@ -564,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) return 0; - qtnf_scan_done(vif->mac, true); - ret = qtnf_cmd_send_del_sta(vif, params); if (ret) pr_err("VIF%u.%u: failed to delete STA %pM\n", @@ -573,19 +581,33 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, return ret; } +static void qtnf_scan_timeout(unsigned long data) +{ + struct qtnf_wmac *mac = (struct qtnf_wmac *)data; + + pr_warn("mac%d scan timed out\n", mac->macid); + qtnf_scan_done(mac, true); +} + static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); - int ret; mac->scan_req = request; - ret = qtnf_cmd_send_scan(mac); - if (ret) + if (qtnf_cmd_send_scan(mac)) { pr_err("MAC%u: failed to start scan\n", mac->macid); + mac->scan_req = NULL; + return -EFAULT; + } - return ret; + mac->scan_timeout.data = (unsigned long)mac; + mac->scan_timeout.function = qtnf_scan_timeout; + mod_timer(&mac->scan_timeout, + jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ); + + return 0; } static int @@ -593,6 +615,8 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct cfg80211_chan_def chandef; struct qtnf_bss_config *bss_cfg; int ret; @@ -605,9 +629,20 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); + if (sme->channel) { + /* FIXME: need to set proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); + } + bss_cfg->ssid_len = sme->ssid_len; memcpy(&bss_cfg->ssid, sme->ssid, bss_cfg->ssid_len); - bss_cfg->chandef.chan = sme->channel; bss_cfg->auth_type = sme->auth_type; bss_cfg->privacy = sme->privacy; bss_cfg->mfp = sme->mfp; @@ -677,6 +712,175 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int +qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct ieee80211_supported_band *sband; + struct cfg80211_chan_def *chandef; + struct ieee80211_channel *chan; + struct qtnf_chan_stats stats; + struct qtnf_vif *vif; + int ret; + + vif = qtnf_netdev_get_priv(dev); + chandef = &mac->chandef; + + sband = wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) + return -ENOENT; + + chan = &sband->channels[idx]; + memset(&stats, 0, sizeof(stats)); + + survey->channel = chan; + survey->filled = 0x0; + + if (chandef->chan) { + if (chan->hw_value == chandef->chan->hw_value) + survey->filled = SURVEY_INFO_IN_USE; + } + + ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats); + switch (ret) { + case 0: + if (unlikely(stats.chan_num != chan->hw_value)) { + pr_err("received stats for channel %d instead of %d\n", + stats.chan_num, chan->hw_value); + ret = -EINVAL; + break; + } + + survey->filled |= SURVEY_INFO_TIME | + SURVEY_INFO_TIME_SCAN | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | + SURVEY_INFO_NOISE_DBM; + + survey->time_scan = stats.cca_try; + survey->time = stats.cca_try; + survey->time_tx = stats.cca_tx; + survey->time_rx = stats.cca_rx; + survey->time_busy = stats.cca_busy; + survey->noise = stats.chan_noise; + break; + case -ENOENT: + pr_debug("no stats for channel %u\n", chan->hw_value); + ret = 0; + break; + default: + pr_debug("failed to get chan(%d) stats from card\n", + chan->hw_value); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct net_device *ndev = wdev->netdev; + struct qtnf_vif *vif; + + if (!ndev) + return -ENODEV; + + vif = qtnf_netdev_get_priv(wdev->netdev); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + if (vif->sta_state == QTNF_STA_DISCONNECTED) { + pr_warn("%s: STA disconnected\n", ndev->name); + return -ENODATA; + } + break; + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("%s: AP not started\n", ndev->name); + return -ENODATA; + } + break; + default: + pr_err("unsupported vif type (%d)\n", vif->wdev.iftype); + return -ENODATA; + } + + if (!cfg80211_chandef_valid(&mac->chandef)) { + pr_err("invalid channel settings on %s\n", ndev->name); + return -ENODATA; + } + + memcpy(chandef, &mac->chandef, sizeof(*chandef)); + return 0; +} + +static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + pr_debug("%s: chan(%u) count(%u) radar(%u) block_tx(%u)\n", dev->name, + params->chandef.chan->hw_value, params->count, + params->radar_required, params->block_tx); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("AP not started on %s\n", dev->name); + return -ENOTCONN; + } + break; + default: + pr_err("unsupported vif type (%d) on %s\n", + vif->wdev.iftype, dev->name); + return -EOPNOTSUPP; + } + + if (vif->vifid != 0) { + if (!(mac->status & QTNF_MAC_CSA_ACTIVE)) + return -EOPNOTSUPP; + + if (!cfg80211_chandef_identical(¶ms->chandef, + &mac->csa_chandef)) + return -EINVAL; + + return 0; + } + + if (!cfg80211_chandef_valid(¶ms->chandef)) { + pr_err("%s: invalid channel\n", dev->name); + return -EINVAL; + } + + if (cfg80211_chandef_identical(¶ms->chandef, &mac->chandef)) { + pr_err("%s: switch request to the same channel\n", dev->name); + return -EALREADY; + } + + ret = qtnf_cmd_send_chan_switch(mac, params); + if (ret) + pr_warn("%s: failed to switch to channel (%u)\n", + dev->name, params->chandef.chan->hw_value); + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -697,69 +901,49 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .set_default_mgmt_key = qtnf_set_default_mgmt_key, .scan = qtnf_scan, .connect = qtnf_connect, - .disconnect = qtnf_disconnect + .disconnect = qtnf_disconnect, + .dump_survey = qtnf_dump_survey, + .get_channel = qtnf_get_channel, + .channel_switch = qtnf_channel_switch }; -static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy, +static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, struct regulatory_request *req) { - struct qtnf_wmac *mac = wiphy_priv(wiphy); - struct qtnf_bus *bus; - struct qtnf_vif *vif; - struct qtnf_wmac *chan_mac; - int i; + struct qtnf_wmac *mac = wiphy_priv(wiphy_in); + struct qtnf_bus *bus = mac->bus; + struct wiphy *wiphy; + unsigned int mac_idx; enum nl80211_band band; - - bus = mac->bus; + int ret; pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator, req->alpha2[0], req->alpha2[1]); - vif = qtnf_mac_get_base_vif(mac); - if (!vif) { - pr_err("MAC%u: primary VIF is not configured\n", mac->macid); + ret = qtnf_cmd_reg_notify(bus, req); + if (ret) { + if (ret != -EOPNOTSUPP && ret != -EALREADY) + pr_err("failed to update reg domain to %c%c\n", + req->alpha2[0], req->alpha2[1]); return; } - /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) { - if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { - pr_err("MAC%u: not an ISO3166 code\n", mac->macid); - return; - } - } - if (!strncasecmp(req->alpha2, bus->hw_info.alpha2_code, - sizeof(req->alpha2))) { - pr_warn("MAC%u: unchanged country code\n", mac->macid); - return; - } - - if (qtnf_cmd_send_regulatory_config(mac, req->alpha2)) { - pr_err("MAC%u: failed to configure regulatory\n", mac->macid); - return; - } - - for (i = 0; i < bus->hw_info.num_mac; i++) { - chan_mac = bus->mac[i]; - - if (!chan_mac) + for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) { + if (!(bus->hw_info.mac_bitmap & (1 << mac_idx))) continue; - if (!(bus->hw_info.mac_bitmap & BIT(i))) - continue; + mac = bus->mac[mac_idx]; + wiphy = priv_to_wiphy(mac); for (band = 0; band < NUM_NL80211_BANDS; ++band) { if (!wiphy->bands[band]) continue; - if (qtnf_cmd_get_mac_chan_info(chan_mac, - wiphy->bands[band])) { - pr_err("MAC%u: can't get channel info\n", - chan_mac->macid); - qtnf_core_detach(bus); - - return; - } + ret = qtnf_cmd_get_mac_chan_info(mac, + wiphy->bands[band]); + if (ret) + pr_err("failed to get chan info for mac %u band %u\n", + mac_idx, band); } } } @@ -844,10 +1028,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) } iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL); - if (!iface_comb) { - ret = -ENOMEM; - goto out; - } + if (!iface_comb) + return -ENOMEM; ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo); if (ret) @@ -869,6 +1051,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->iface_combinations = iface_comb; wiphy->n_iface_combinations = 1; + wiphy->max_num_csa_counters = 2; /* Initialize cipher suits */ wiphy->cipher_suites = qtnf_cipher_suites; @@ -876,7 +1059,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | - WIPHY_FLAG_AP_UAPSD; + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; @@ -889,21 +1073,26 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) ether_addr_copy(wiphy->perm_addr, mac->macaddr); if (hw_info->hw_capab & QLINK_HW_SUPPORTS_REG_UPDATE) { - pr_debug("device supports REG_UPDATE\n"); + wiphy->regulatory_flags |= REGULATORY_STRICT_REG | + REGULATORY_CUSTOM_REG; wiphy->reg_notifier = qtnf_cfg80211_reg_notifier; - pr_debug("hint regulatory about EP region: %c%c\n", - hw_info->alpha2_code[0], - hw_info->alpha2_code[1]); - regulatory_hint(wiphy, hw_info->alpha2_code); + wiphy_apply_custom_regulatory(wiphy, hw_info->rd); } else { - pr_debug("device doesn't support REG_UPDATE\n"); wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } ret = wiphy_register(wiphy); + if (ret < 0) + goto out; + + if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) + ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd); + else if (isalpha(hw_info->rd->alpha2[0]) && + isalpha(hw_info->rd->alpha2[1])) + ret = regulatory_hint(wiphy, hw_info->rd->alpha2); out: - if (ret < 0) { + if (ret) { kfree(iface_comb); return ret; } @@ -947,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev) } vif->sta_state = QTNF_STA_DISCONNECTED; - qtnf_scan_done(mac, true); } + + qtnf_scan_done(mac, true); } void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 5bd33124a7c8..66db26613b1f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -34,10 +34,17 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) .aborted = aborted, }; + if (timer_pending(&mac->scan_timeout)) + del_timer_sync(&mac->scan_timeout); + + mutex_lock(&mac->mac_lock); + if (mac->scan_req) { cfg80211_scan_done(mac->scan_req, &info); mac->scan_req = NULL; } + + mutex_unlock(&mac->mac_lock); } #endif /* _QTN_FMAC_CFG80211_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index b39dbc3d3c1f..4206886b110c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -181,43 +181,11 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif) return ret; } -int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2) -{ - struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; - int ret; - - cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, - QLINK_CMD_REG_REGION, - sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) - return -ENOMEM; - - qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_COUNTRY, alpha2, - QTNF_MAX_ALPHA_LEN); - - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - - memcpy(mac->bus->hw_info.alpha2_code, alpha2, - sizeof(mac->bus->hw_info.alpha2_code)); -out: - return ret; -} - int qtnf_cmd_send_config_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; struct qtnf_bss_config *bss_cfg = &vif->bss_cfg; - struct cfg80211_chan_def *chandef = &bss_cfg->chandef; + struct cfg80211_chan_def *chandef = &vif->mac->chandef; struct qlink_tlv_channel *qchan; struct qlink_auth_encr aen; u16 res_code = QLINK_CMD_RESULT_OK; @@ -848,25 +816,168 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) return ret; } +static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags) +{ + u32 flags = 0; + + if (qflags & QLINK_RRF_NO_OFDM) + flags |= NL80211_RRF_NO_OFDM; + + if (qflags & QLINK_RRF_NO_CCK) + flags |= NL80211_RRF_NO_CCK; + + if (qflags & QLINK_RRF_NO_INDOOR) + flags |= NL80211_RRF_NO_INDOOR; + + if (qflags & QLINK_RRF_NO_OUTDOOR) + flags |= NL80211_RRF_NO_OUTDOOR; + + if (qflags & QLINK_RRF_DFS) + flags |= NL80211_RRF_DFS; + + if (qflags & QLINK_RRF_PTP_ONLY) + flags |= NL80211_RRF_PTP_ONLY; + + if (qflags & QLINK_RRF_PTMP_ONLY) + flags |= NL80211_RRF_PTMP_ONLY; + + if (qflags & QLINK_RRF_NO_IR) + flags |= NL80211_RRF_NO_IR; + + if (qflags & QLINK_RRF_AUTO_BW) + flags |= NL80211_RRF_AUTO_BW; + + if (qflags & QLINK_RRF_IR_CONCURRENT) + flags |= NL80211_RRF_IR_CONCURRENT; + + if (qflags & QLINK_RRF_NO_HT40MINUS) + flags |= NL80211_RRF_NO_HT40MINUS; + + if (qflags & QLINK_RRF_NO_HT40PLUS) + flags |= NL80211_RRF_NO_HT40PLUS; + + if (qflags & QLINK_RRF_NO_80MHZ) + flags |= NL80211_RRF_NO_80MHZ; + + if (qflags & QLINK_RRF_NO_160MHZ) + flags |= NL80211_RRF_NO_160MHZ; + + return flags; +} + static int qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, - const struct qlink_resp_get_hw_info *resp) + const struct qlink_resp_get_hw_info *resp, + size_t info_len) { struct qtnf_hw_info *hwinfo = &bus->hw_info; + const struct qlink_tlv_hdr *tlv; + const struct qlink_tlv_reg_rule *tlv_rule; + struct ieee80211_reg_rule *rule; + u16 tlv_type; + u16 tlv_value_len; + unsigned int rule_idx = 0; + + if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) + return -E2BIG; + + hwinfo->rd = kzalloc(sizeof(*hwinfo->rd) + + sizeof(struct ieee80211_reg_rule) + * resp->n_reg_rules, GFP_KERNEL); + + if (!hwinfo->rd) + return -ENOMEM; hwinfo->num_mac = resp->num_mac; hwinfo->mac_bitmap = resp->mac_bitmap; hwinfo->fw_ver = le32_to_cpu(resp->fw_ver); hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver); - memcpy(hwinfo->alpha2_code, resp->alpha2_code, - sizeof(hwinfo->alpha2_code)); hwinfo->total_tx_chain = resp->total_tx_chain; hwinfo->total_rx_chain = resp->total_rx_chain; hwinfo->hw_capab = le32_to_cpu(resp->hw_capab); + hwinfo->rd->n_reg_rules = resp->n_reg_rules; + hwinfo->rd->alpha2[0] = resp->alpha2[0]; + hwinfo->rd->alpha2[1] = resp->alpha2[1]; + + switch (resp->dfs_region) { + case QLINK_DFS_FCC: + hwinfo->rd->dfs_region = NL80211_DFS_FCC; + break; + case QLINK_DFS_ETSI: + hwinfo->rd->dfs_region = NL80211_DFS_ETSI; + break; + case QLINK_DFS_JP: + hwinfo->rd->dfs_region = NL80211_DFS_JP; + break; + case QLINK_DFS_UNSET: + default: + hwinfo->rd->dfs_region = NL80211_DFS_UNSET; + break; + } + + tlv = (const struct qlink_tlv_hdr *)resp->info; + + while (info_len >= sizeof(*tlv)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + + if (tlv_value_len + sizeof(*tlv) > info_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + switch (tlv_type) { + case QTN_TLV_ID_REG_RULE: + if (rule_idx >= resp->n_reg_rules) { + pr_warn("unexpected number of rules: %u\n", + resp->n_reg_rules); + return -EINVAL; + } + + if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + tlv_rule = (const struct qlink_tlv_reg_rule *)tlv; + rule = &hwinfo->rd->reg_rules[rule_idx++]; + + rule->freq_range.start_freq_khz = + le32_to_cpu(tlv_rule->start_freq_khz); + rule->freq_range.end_freq_khz = + le32_to_cpu(tlv_rule->end_freq_khz); + rule->freq_range.max_bandwidth_khz = + le32_to_cpu(tlv_rule->max_bandwidth_khz); + rule->power_rule.max_antenna_gain = + le32_to_cpu(tlv_rule->max_antenna_gain); + rule->power_rule.max_eirp = + le32_to_cpu(tlv_rule->max_eirp); + rule->dfs_cac_ms = + le32_to_cpu(tlv_rule->dfs_cac_ms); + rule->flags = qtnf_cmd_resp_reg_rule_flags_parse( + le32_to_cpu(tlv_rule->flags)); + break; + default: + break; + } + + info_len -= tlv_value_len + sizeof(*tlv); + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (rule_idx != resp->n_reg_rules) { + pr_warn("unexpected number of rules: expected %u got %u\n", + resp->n_reg_rules, rule_idx); + kfree(hwinfo->rd); + hwinfo->rd = NULL; + return -EINVAL; + } pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u\n", hwinfo->fw_ver, hwinfo->mac_bitmap, - hwinfo->alpha2_code[0], hwinfo->alpha2_code[1], + hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1], hwinfo->total_tx_chain, hwinfo->total_rx_chain); return 0; @@ -878,7 +989,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, struct ieee80211_iface_limit *limits = NULL; const struct qlink_iface_limit *limit_record; size_t record_count = 0, rec = 0; - u16 tlv_type, tlv_value_len, mask; + u16 tlv_type, tlv_value_len; struct qlink_iface_comb_num *comb; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; @@ -931,10 +1042,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, limit_record = (void *)tlv->val; limits[rec].max = le16_to_cpu(limit_record->max_num); - mask = le16_to_cpu(limit_record->type_mask); - limits[rec].types = qlink_iface_type_mask_to_nl(mask); - /* only AP and STA modes are supported */ + limits[rec].types = qlink_iface_type_to_nl_mask( + le16_to_cpu(limit_record->type)); + + /* supported modes: STA, AP */ limits[rec].types &= BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_AP_VLAN) | BIT(NL80211_IFTYPE_STATION); pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid, @@ -946,6 +1059,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, default: break; } + tlv_buf_size -= tlv_full_len; tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } @@ -1013,14 +1127,24 @@ qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band, unsigned int chidx = 0; u32 qflags; - kfree(band->channels); - band->channels = NULL; + if (band->channels) { + if (band->n_channels == resp->num_chans) { + memset(band->channels, 0, + sizeof(*band->channels) * band->n_channels); + } else { + kfree(band->channels); + band->n_channels = 0; + band->channels = NULL; + } + } band->n_channels = resp->num_chans; if (band->n_channels == 0) return 0; - band->channels = kcalloc(band->n_channels, sizeof(*chan), GFP_KERNEL); + if (!band->channels) + band->channels = kcalloc(band->n_channels, sizeof(*chan), + GFP_KERNEL); if (!band->channels) { band->n_channels = 0; return -ENOMEM; @@ -1212,6 +1336,62 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac, return 0; } +static int +qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats, + const u8 *payload, size_t payload_len) +{ + struct qlink_chan_stats *qlink_stats; + const struct qlink_tlv_hdr *tlv; + size_t tlv_full_len; + u16 tlv_value_len; + u16 tlv_type; + + tlv = (struct qlink_tlv_hdr *)payload; + while (payload_len >= sizeof(struct qlink_tlv_hdr)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); + if (tlv_full_len > payload_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + switch (tlv_type) { + case QTN_TLV_ID_CHANNEL_STATS: + if (unlikely(tlv_value_len != sizeof(*qlink_stats))) { + pr_err("invalid CHANNEL_STATS entry size\n"); + return -EINVAL; + } + + qlink_stats = (void *)tlv->val; + + stats->chan_num = le32_to_cpu(qlink_stats->chan_num); + stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx); + stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx); + stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy); + stats->cca_try = le32_to_cpu(qlink_stats->cca_try); + stats->chan_noise = qlink_stats->chan_noise; + + pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n", + stats->chan_num, stats->cca_try, + stats->cca_busy, stats->chan_noise); + break; + default: + pr_warn("Unknown TLV type: %#x\n", + le16_to_cpu(tlv->type)); + } + payload_len -= tlv_full_len; + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (payload_len) { + pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len); + return -EINVAL; + } + + return 0; +} + int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb, *resp_skb = NULL; @@ -1256,6 +1436,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) const struct qlink_resp_get_hw_info *resp; u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; + size_t info_len; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_GET_HW_INFO, @@ -1266,7 +1447,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) qtnf_bus_lock(bus); ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, - sizeof(*resp), NULL); + sizeof(*resp), &info_len); if (unlikely(ret)) goto out; @@ -1278,7 +1459,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) } resp = (const struct qlink_resp_get_hw_info *)resp_skb->data; - ret = qtnf_cmd_resp_proc_hw_info(bus, resp); + ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len); out: qtnf_bus_unlock(bus); @@ -1320,6 +1501,9 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data; cmd->band = qband; + + qtnf_bus_lock(mac->bus); + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, sizeof(*resp), &info_len); @@ -1343,6 +1527,7 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, ret = qtnf_cmd_resp_fill_channels_info(band, resp, info_len); out: + qtnf_bus_unlock(mac->bus); consume_skb(resp_skb); return ret; @@ -1676,10 +1861,27 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, cmd = (struct qlink_cmd_change_sta *)cmd_skb->data; ether_addr_copy(cmd->sta_addr, mac); - cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_mask)); - cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_set)); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + case NL80211_IFTYPE_STATION: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + default: + pr_err("unsupported iftype %d\n", vif->wdev.iftype); + ret = -EINVAL; + goto out; + } ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); if (unlikely(ret)) @@ -1853,8 +2055,8 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, ether_addr_copy(cmd->bssid, bss_cfg->bssid); - if (bss_cfg->chandef.chan) - cmd->freq = cpu_to_le16(bss_cfg->chandef.chan->center_freq); + if (vif->mac->chandef.chan) + cmd->channel = cpu_to_le16(vif->mac->chandef.chan->hw_value); cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period); @@ -1976,3 +2178,183 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) qtnf_bus_unlock(vif->mac->bus); return ret; } + +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) +{ + struct sk_buff *cmd_skb; + int ret; + u16 res_code; + struct qlink_cmd_reg_notify *cmd; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, + QLINK_CMD_REG_NOTIFY, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_reg_notify *)cmd_skb->data; + cmd->alpha2[0] = req->alpha2[0]; + cmd->alpha2[1] = req->alpha2[1]; + + switch (req->initiator) { + case NL80211_REGDOM_SET_BY_CORE: + cmd->initiator = QLINK_REGDOM_SET_BY_CORE; + break; + case NL80211_REGDOM_SET_BY_USER: + cmd->initiator = QLINK_REGDOM_SET_BY_USER; + break; + case NL80211_REGDOM_SET_BY_DRIVER: + cmd->initiator = QLINK_REGDOM_SET_BY_DRIVER; + break; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + cmd->initiator = QLINK_REGDOM_SET_BY_COUNTRY_IE; + break; + } + + switch (req->user_reg_hint_type) { + case NL80211_USER_REG_HINT_USER: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_USER; + break; + case NL80211_USER_REG_HINT_CELL_BASE: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_CELL_BASE; + break; + case NL80211_USER_REG_HINT_INDOOR: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_INDOOR; + break; + } + + qtnf_bus_lock(bus); + + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + if (ret) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_ENOTSUPP: + pr_warn("reg update not supported\n"); + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + pr_info("regulatory domain is already set to %c%c", + req->alpha2[0], req->alpha2[1]); + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_OK: + ret = 0; + break; + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(bus); + + return ret; +} + +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats) +{ + struct sk_buff *cmd_skb, *resp_skb = NULL; + struct qlink_cmd_get_chan_stats *cmd; + struct qlink_resp_get_chan_stats *resp; + size_t var_data_len; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret = 0; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, + QLINK_CMD_CHAN_STATS, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data; + cmd->channel = cpu_to_le16(channel); + + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + sizeof(*resp), &var_data_len); + if (unlikely(ret)) { + qtnf_bus_unlock(mac->bus); + return ret; + } + + if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + switch (res_code) { + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + default: + pr_err("cmd exec failed: 0x%.4X\n", res_code); + ret = -EFAULT; + break; + } + goto out; + } + + resp = (struct qlink_resp_get_chan_stats *)resp_skb->data; + ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info, + var_data_len); + +out: + qtnf_bus_unlock(mac->bus); + consume_skb(resp_skb); + return ret; +} + +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params) +{ + struct qlink_cmd_chan_switch *cmd; + struct sk_buff *cmd_skb; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0x0, + QLINK_CMD_CHAN_SWITCH, + sizeof(*cmd)); + + if (unlikely(!cmd_skb)) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data; + cmd->channel = cpu_to_le16(params->chandef.chan->hw_value); + cmd->radar_required = params->radar_required; + cmd->block_tx = params->block_tx; + cmd->beacon_count = params->count; + + ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); + + if (unlikely(ret)) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_OK: + memcpy(&mac->csa_chandef, ¶ms->chandef, + sizeof(mac->csa_chandef)); + mac->status |= QTNF_MAC_CSA_ACTIVE; + ret = 0; + break; + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + case QLINK_CMD_RESULT_ENOTSUPP: + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_INVALID: + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(mac->bus); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 6c51854ef5e7..783b20364296 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -70,5 +70,10 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code); int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up); +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats); +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index f053532c0e87..5e60180482d1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -288,6 +288,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->iflist[i].mac = mac; mac->iflist[i].vifid = i; qtnf_sta_list_init(&mac->iflist[i].sta_list); + mutex_init(&mac->mac_lock); + init_timer(&mac->scan_timeout); } qtnf_mac_init_primary_intf(mac); @@ -549,6 +551,9 @@ void qtnf_core_detach(struct qtnf_bus *bus) destroy_workqueue(bus->workqueue); } + kfree(bus->hw_info.rd); + bus->hw_info.rd = NULL; + qtnf_trans_free(bus); } EXPORT_SYMBOL_GPL(qtnf_core_detach); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a616434281cf..066fcd1095a0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -42,11 +42,11 @@ #define QTNF_MAX_SSID_LIST_LENGTH 2 #define QTNF_MAX_VSIE_LEN 255 -#define QTNF_MAX_ALPHA_LEN 2 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 #define QTNF_DEFAULT_BG_SCAN_PERIOD 300 #define QTNF_MAX_BG_SCAN_PERIOD 0xffff +#define QTNF_SCAN_TIMEOUT_SEC 15 #define QTNF_DEF_BSS_PRIORITY 0 #define QTNF_DEF_WDOG_TIMEOUT 5 @@ -68,7 +68,6 @@ struct qtnf_bss_config { u16 auth_type; bool privacy; enum nl80211_mfp mfp; - struct cfg80211_chan_def chandef; struct cfg80211_crypto_settings crypto; u16 bg_scan_period; u32 connect_flags; @@ -90,6 +89,10 @@ enum qtnf_sta_state { QTNF_STA_CONNECTED }; +enum qtnf_mac_status { + QTNF_MAC_CSA_ACTIVE = BIT(0) +}; + struct qtnf_vif { struct wireless_dev wdev; u8 vifid; @@ -125,25 +128,39 @@ struct qtnf_mac_info { size_t n_limits; }; +struct qtnf_chan_stats { + u32 chan_num; + u32 cca_tx; + u32 cca_rx; + u32 cca_busy; + u32 cca_try; + s8 chan_noise; +}; + struct qtnf_wmac { u8 macid; u8 wiphy_registered; u8 macaddr[ETH_ALEN]; + u32 status; struct qtnf_bus *bus; struct qtnf_mac_info macinfo; struct qtnf_vif iflist[QTNF_MAX_INTF]; struct cfg80211_scan_request *scan_req; + struct cfg80211_chan_def chandef; + struct cfg80211_chan_def csa_chandef; + struct mutex mac_lock; /* lock during wmac speicific ops */ + struct timer_list scan_timeout; }; struct qtnf_hw_info { + u16 ql_proto_ver; u8 num_mac; u8 mac_bitmap; - u8 alpha2_code[QTNF_MAX_ALPHA_LEN]; u32 fw_ver; - u16 ql_proto_ver; + u32 hw_capab; + struct ieee80211_regdomain *rd; u8 total_tx_chain; u8 total_rx_chain; - u32 hw_capab; }; struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 9b61e9a83670..43d2e7fd6e02 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -211,8 +211,8 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); - cfg80211_disconnected(vif->netdev, leave_info->reason, NULL, 0, 0, - GFP_KERNEL); + cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason), + NULL, 0, 0, GFP_KERNEL); vif->sta_state = QTNF_STA_DISCONNECTED; netif_carrier_off(vif->netdev); @@ -350,6 +350,63 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, return 0; } +static int +qtnf_event_handle_freq_change(struct qtnf_wmac *mac, + const struct qlink_event_freq_change *data, + u16 len) +{ + struct wiphy *wiphy = priv_to_wiphy(mac); + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + struct qtnf_vif *vif; + int freq; + int i; + + if (len < sizeof(*data)) { + pr_err("payload is too short\n"); + return -EINVAL; + } + + freq = le32_to_cpu(data->freq); + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) { + pr_err("channel at %d MHz not found\n", freq); + return -EINVAL; + } + + pr_debug("MAC%d switch to new channel %u MHz\n", mac->macid, freq); + + if (mac->status & QTNF_MAC_CSA_ACTIVE) { + mac->status &= ~QTNF_MAC_CSA_ACTIVE; + if (chan->hw_value != mac->csa_chandef.chan->hw_value) + pr_warn("unexpected switch to %u during CSA to %u\n", + chan->hw_value, + mac->csa_chandef.chan->hw_value); + } + + /* FIXME: need to figure out proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + continue; + + if (vif->netdev) { + mutex_lock(&vif->wdev.mtx); + cfg80211_ch_switch_notify(vif->netdev, &chandef); + mutex_unlock(&vif->wdev.mtx); + } + } + + return 0; +} + static int qtnf_event_parse(struct qtnf_wmac *mac, const struct sk_buff *event_skb) { @@ -400,6 +457,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac, ret = qtnf_event_handle_bss_leave(vif, (const void *)event, event_len); break; + case QLINK_EVENT_FREQ_CHANGE: + ret = qtnf_event_handle_freq_change(mac, (const void *)event, + event_len); + break; default: pr_warn("unknown event type: %x\n", event_id); break; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 7fc4f0d6a9ad..69131965a298 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include "qtn_hw_ids.h" #include "pcie_bus_priv.h" @@ -36,17 +38,13 @@ static bool use_msi = true; module_param(use_msi, bool, 0644); MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); -static unsigned int tx_bd_size_param = 256; +static unsigned int tx_bd_size_param = 32; module_param(tx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size"); +MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two"); static unsigned int rx_bd_size_param = 256; module_param(rx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size"); - -static unsigned int rx_bd_reserved_param = 16; -module_param(rx_bd_reserved_param, uint, 0644); -MODULE_PARM_DESC(rx_bd_reserved_param, "Reserved RX descriptors"); +MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two"); static u8 flashboot = 1; module_param(flashboot, byte, 0644); @@ -186,8 +184,10 @@ static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) return IOMEM_ERR_PTR(ret); busaddr = pci_resource_start(priv->pdev, index); - vaddr = pcim_iomap_table(priv->pdev)[index]; len = pci_resource_len(priv->pdev, index); + vaddr = pcim_iomap_table(priv->pdev)[index]; + if (!vaddr) + return IOMEM_ERR_PTR(-ENOMEM); pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n", index, vaddr, &busaddr, (int)len); @@ -250,19 +250,19 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) int ret = -ENOMEM; priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR); - if (IS_ERR_OR_NULL(priv->sysctl_bar)) { + if (IS_ERR(priv->sysctl_bar)) { pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); return ret; } priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR); - if (IS_ERR_OR_NULL(priv->dmareg_bar)) { + if (IS_ERR(priv->dmareg_bar)) { pr_err("failed to map BAR%u\n", QTN_DMA_BAR); return ret; } priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR); - if (IS_ERR_OR_NULL(priv->epmem_bar)) { + if (IS_ERR(priv->epmem_bar)) { pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); return ret; } @@ -274,32 +274,6 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) return 0; } -static int -qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask) -{ - int ret; - - ret = dma_supported(&priv->pdev->dev, dma_mask); - if (!ret) { - pr_err("DMA mask %llu not supported\n", dma_mask); - return ret; - } - - ret = pci_set_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set DMA mask %llu\n", dma_mask); - return ret; - } - - ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set consistent DMA mask %llu\n", dma_mask); - return ret; - } - - return ret; -} - static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -418,9 +392,8 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - priv->tx_bd_reclaim_start = 0; - priv->tx_bd_index = 0; - priv->tx_queue_len = 0; + priv->tx_bd_r_index = 0; + priv->tx_bd_w_index = 0; /* rx bd */ @@ -430,43 +403,34 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) priv->rx_bd_vbase = vaddr; priv->rx_bd_pbase = paddr; - writel(QTN_HOST_LO32(paddr), - PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT writel(QTN_HOST_HI32(paddr), PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(paddr), + PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base)); writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16, PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base)); - priv->hw_txproc_wr_ptr = priv->rx_bd_num - rx_bd_reserved_param; - - writel(priv->hw_txproc_wr_ptr, - PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); - pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - priv->rx_bd_index = 0; - return 0; } -static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) +static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index) { struct qtnf_rx_bd *rxbd; struct sk_buff *skb; dma_addr_t paddr; - skb = __dev_alloc_skb(SKB_BUF_SIZE + NET_IP_ALIGN, - GFP_ATOMIC); + skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); if (!skb) { - priv->rx_skb[rx_bd_index] = NULL; + priv->rx_skb[index] = NULL; return -ENOMEM; } - priv->rx_skb[rx_bd_index] = skb; - - skb_reserve(skb, NET_IP_ALIGN); - - rxbd = &priv->rx_bd_vbase[rx_bd_index]; + priv->rx_skb[index] = skb; + rxbd = &priv->rx_bd_vbase[index]; paddr = pci_map_single(priv->pdev, skb->data, SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); @@ -475,17 +439,24 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) return -ENOMEM; } - writel(QTN_HOST_LO32(paddr), - PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); - writel(QTN_HOST_HI32(paddr), - PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); - /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); - rxbd->info = 0x0; + priv->rx_bd_w_index = index; + + /* sync up all descriptor updates */ + wmb(); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(QTN_HOST_HI32(paddr), + PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(paddr), + PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); + + writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); return 0; } @@ -516,7 +487,7 @@ static void free_xfer_buffers(void *data) /* free rx buffers */ for (i = 0; i < priv->rx_bd_num; i++) { - if (priv->rx_skb[i]) { + if (priv->rx_skb && priv->rx_skb[i]) { rxbd = &priv->rx_bd_vbase[i]; paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); @@ -529,19 +500,72 @@ static void free_xfer_buffers(void *data) /* free tx buffers */ for (i = 0; i < priv->tx_bd_num; i++) { - if (priv->tx_skb[i]) { + if (priv->tx_skb && priv->tx_skb[i]) { dev_kfree_skb_any(priv->tx_skb[i]); priv->tx_skb[i] = NULL; } } } +static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv) +{ + u32 val; + + val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base)); + val |= HHBM_CONFIG_SOFT_RESET; + writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); + usleep_range(50, 100); + val &= ~HHBM_CONFIG_SOFT_RESET; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + val |= HHBM_64BIT; +#endif + writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); + writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base)); + + return 0; +} + static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) { int ret; + u32 val; priv->tx_bd_num = tx_bd_size_param; priv->rx_bd_num = rx_bd_size_param; + priv->rx_bd_w_index = 0; + priv->rx_bd_r_index = 0; + + if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) { + pr_err("tx_bd_size_param %u is not power of two\n", + priv->tx_bd_num); + return -EINVAL; + } + + val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd); + if (val > PCIE_HHBM_MAX_SIZE) { + pr_err("tx_bd_size_param %u is too large\n", + priv->tx_bd_num); + return -EINVAL; + } + + if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { + pr_err("rx_bd_size_param %u is not power of two\n", + priv->rx_bd_num); + return -EINVAL; + } + + val = priv->rx_bd_num * sizeof(dma_addr_t); + if (val > PCIE_HHBM_MAX_SIZE) { + pr_err("rx_bd_size_param %u is too large\n", + priv->rx_bd_num); + return -EINVAL; + } + + ret = qtnf_hhbm_init(priv); + if (ret) { + pr_err("failed to init h/w queues\n"); + return ret; + } ret = alloc_skb_array(priv); if (ret) { @@ -564,67 +588,72 @@ static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) return ret; } -static int qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) +static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) { struct qtnf_tx_bd *txbd; struct sk_buff *skb; + unsigned long flags; dma_addr_t paddr; - int last_sent; - int count; + u32 tx_done_index; + int count = 0; int i; - last_sent = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) - % priv->tx_bd_num; - i = priv->tx_bd_reclaim_start; - count = 0; + spin_lock_irqsave(&priv->tx_reclaim_lock, flags); - while (i != last_sent) { + tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) + & (priv->tx_bd_num - 1); + + i = priv->tx_bd_r_index; + + while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { skb = priv->tx_skb[i]; - if (!skb) - break; + if (likely(skb)) { + txbd = &priv->tx_bd_vbase[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), + le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); - txbd = &priv->tx_bd_vbase[i]; - paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), - le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, PCI_DMA_TODEVICE); + if (skb->dev) { + skb->dev->stats.tx_packets++; + skb->dev->stats.tx_bytes += skb->len; - if (skb->dev) { - skb->dev->stats.tx_packets++; - skb->dev->stats.tx_bytes += skb->len; + if (netif_queue_stopped(skb->dev)) + netif_wake_queue(skb->dev); + } - if (netif_queue_stopped(skb->dev)) - netif_wake_queue(skb->dev); + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); priv->tx_skb[i] = NULL; - priv->tx_queue_len--; count++; if (++i >= priv->tx_bd_num) i = 0; } - priv->tx_bd_reclaim_start = i; priv->tx_reclaim_done += count; priv->tx_reclaim_req++; + priv->tx_bd_r_index = i; - return count; + spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); } -static bool qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) +static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) { - if (priv->tx_queue_len >= priv->tx_bd_num - 1) { + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { pr_err_ratelimited("reclaim full Tx queue\n"); qtnf_pcie_data_tx_reclaim(priv); - if (priv->tx_queue_len >= priv->tx_bd_num - 1) { + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { priv->tx_full_count++; - return false; + return 0; } } - return true; + return 1; } static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) @@ -637,19 +666,17 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) u32 info; int ret = 0; - spin_lock_irqsave(&priv->tx_lock, flags); - - priv->tx_done_count++; + spin_lock_irqsave(&priv->tx0_lock, flags); if (!qtnf_tx_queue_ready(priv)) { if (skb->dev) netif_stop_queue(skb->dev); - spin_unlock_irqrestore(&priv->tx_lock, flags); + spin_unlock_irqrestore(&priv->tx0_lock, flags); return NETDEV_TX_BUSY; } - i = priv->tx_bd_index; + i = priv->tx_bd_w_index; priv->tx_skb[i] = skb; len = skb->len; @@ -673,16 +700,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) /* write new TX descriptor to PCIE_RX_FIFO on EP */ txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd); - writel(QTN_HOST_LO32(txbd_paddr), - PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base)); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT writel(QTN_HOST_HI32(txbd_paddr), PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(txbd_paddr), + PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base)); if (++i >= priv->tx_bd_num) i = 0; - priv->tx_bd_index = i; - priv->tx_queue_len++; + priv->tx_bd_w_index = i; tx_done: if (ret && skb) { @@ -692,7 +721,10 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) dev_kfree_skb_any(skb); } - spin_unlock_irqrestore(&priv->tx_lock, flags); + priv->tx_done_count++; + spin_unlock_irqrestore(&priv->tx0_lock, flags); + + qtnf_pcie_data_tx_reclaim(priv); return NETDEV_TX_OK; } @@ -719,14 +751,21 @@ static irqreturn_t qtnf_interrupt(int irq, void *data) if (!(status & priv->pcie_irq_mask)) goto irq_done; - if (status & PCIE_HDP_INT_RX_BITS) { + if (status & PCIE_HDP_INT_RX_BITS) priv->pcie_irq_rx_count++; + + if (status & PCIE_HDP_INT_TX_BITS) + priv->pcie_irq_tx_count++; + + if (status & PCIE_HDP_INT_HHBM_UF) + priv->pcie_irq_uf_count++; + + if (status & PCIE_HDP_INT_RX_BITS) { qtnf_dis_rxdone_irq(priv); napi_schedule(&bus->mux_napi); } if (status & PCIE_HDP_INT_TX_BITS) { - priv->pcie_irq_tx_count++; qtnf_dis_txdone_irq(priv); tasklet_hi_schedule(&priv->reclaim_tq); } @@ -741,16 +780,19 @@ static irqreturn_t qtnf_interrupt(int irq, void *data) return IRQ_HANDLED; } -static inline void hw_txproc_wr_ptr_inc(struct qtnf_pcie_bus_priv *priv) +static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv) { - u32 index; + u16 index = priv->rx_bd_r_index; + struct qtnf_rx_bd *rxbd; + u32 descw; - index = priv->hw_txproc_wr_ptr; + rxbd = &priv->rx_bd_vbase[index]; + descw = le32_to_cpu(rxbd->info); - if (++index >= priv->rx_bd_num) - index = 0; + if (descw & QTN_TXDONE_MASK) + return 1; - priv->hw_txproc_wr_ptr = index; + return 0; } static int qtnf_rx_poll(struct napi_struct *napi, int budget) @@ -762,64 +804,96 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) int processed = 0; struct qtnf_rx_bd *rxbd; dma_addr_t skb_paddr; + int consume; u32 descw; - u16 index; + u32 psize; + u16 r_idx; + u16 w_idx; int ret; - index = priv->rx_bd_index; - rxbd = &priv->rx_bd_vbase[index]; + while (processed < budget) { - descw = le32_to_cpu(rxbd->info); - while ((descw & QTN_TXDONE_MASK) && (processed < budget)) { - skb = priv->rx_skb[index]; + if (!qtnf_rx_data_ready(priv)) + goto rx_out; - if (likely(skb)) { - skb_put(skb, QTN_GET_LEN(descw)); + r_idx = priv->rx_bd_r_index; + rxbd = &priv->rx_bd_vbase[r_idx]; + descw = le32_to_cpu(rxbd->info); + skb = priv->rx_skb[r_idx]; + psize = QTN_GET_LEN(descw); + consume = 1; + + if (!(descw & QTN_TXDONE_MASK)) { + pr_warn("skip invalid rxbd[%d]\n", r_idx); + consume = 0; + } + + if (!skb) { + pr_warn("skip missing rx_skb[%d]\n", r_idx); + consume = 0; + } + + if (skb && (skb_tailroom(skb) < psize)) { + pr_err("skip packet with invalid length: %u > %u\n", + psize, skb_tailroom(skb)); + consume = 0; + } + + if (skb) { skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); + } + if (consume) { + skb_put(skb, psize); ndev = qtnf_classify_skb(bus, skb); if (likely(ndev)) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); } else { pr_debug("drop untagged skb\n"); bus->mux_dev.stats.rx_dropped++; dev_kfree_skb_any(skb); } - - processed++; } else { - pr_err("missing rx_skb[%d]\n", index); + if (skb) { + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } } - /* attached rx buffer is passed upstream: map a new one */ - ret = skb2rbd_attach(priv, index); - if (likely(!ret)) { - if (++index >= priv->rx_bd_num) - index = 0; + priv->rx_skb[r_idx] = NULL; + if (++r_idx >= priv->rx_bd_num) + r_idx = 0; - priv->rx_bd_index = index; - hw_txproc_wr_ptr_inc(priv); + priv->rx_bd_r_index = r_idx; - rxbd = &priv->rx_bd_vbase[index]; - descw = le32_to_cpu(rxbd->info); - } else { - pr_err("failed to allocate new rx_skb[%d]\n", index); - break; + /* repalce processed buffer by a new one */ + w_idx = priv->rx_bd_w_index; + while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num) > 0) { + if (++w_idx >= priv->rx_bd_num) + w_idx = 0; + + ret = skb2rbd_attach(priv, w_idx); + if (ret) { + pr_err("failed to allocate new rx_skb[%d]\n", + w_idx); + break; + } } - writel(priv->hw_txproc_wr_ptr, - PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); + processed++; } +rx_out: if (processed < budget) { napi_complete(napi); qtnf_en_rxdone_irq(priv); @@ -1058,11 +1132,8 @@ static int qtnf_bringup_fw(struct qtnf_bus *bus) static void qtnf_reclaim_tasklet_fn(unsigned long data) { struct qtnf_pcie_bus_priv *priv = (void *)data; - unsigned long flags; - spin_lock_irqsave(&priv->tx_lock, flags); qtnf_pcie_data_tx_reclaim(priv); - spin_unlock_irqrestore(&priv->tx_lock, flags); qtnf_en_txdone_irq(priv); } @@ -1090,10 +1161,22 @@ static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) { struct qtnf_bus *bus = dev_get_drvdata(s->private); struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); + u32 status; seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); + status = reg & PCIE_HDP_INT_TX_BITS; + seq_printf(s, "pcie_irq_tx_status(%s)\n", + (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); + status = reg & PCIE_HDP_INT_RX_BITS; + seq_printf(s, "pcie_irq_rx_status(%s)\n", + (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); + status = reg & PCIE_HDP_INT_HHBM_UF; + seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", + (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); return 0; } @@ -1107,10 +1190,24 @@ static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); - seq_printf(s, "tx_bd_reclaim_start(%u)\n", priv->tx_bd_reclaim_start); - seq_printf(s, "tx_bd_index(%u)\n", priv->tx_bd_index); - seq_printf(s, "rx_bd_index(%u)\n", priv->rx_bd_index); - seq_printf(s, "tx_queue_len(%u)\n", priv->tx_queue_len); + + seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); + seq_printf(s, "tx_bd_p_index(%u)\n", + readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) + & (priv->tx_bd_num - 1)); + seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); + seq_printf(s, "tx queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + + seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); + seq_printf(s, "rx_bd_p_index(%u)\n", + readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) + & (priv->rx_bd_num - 1)); + seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); + seq_printf(s, "rx alloc queue len(%u)\n", + CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num)); return 0; } @@ -1156,8 +1253,9 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); init_completion(&bus->request_firmware_complete); mutex_init(&bus->bus_lock); + spin_lock_init(&pcie_priv->tx0_lock); spin_lock_init(&pcie_priv->irq_lock); - spin_lock_init(&pcie_priv->tx_lock); + spin_lock_init(&pcie_priv->tx_reclaim_lock); /* init stats */ pcie_priv->tx_full_count = 0; @@ -1165,6 +1263,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv->pcie_irq_count = 0; pcie_priv->pcie_irq_rx_count = 0; pcie_priv->pcie_irq_tx_count = 0; + pcie_priv->pcie_irq_uf_count = 0; pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; @@ -1191,6 +1290,16 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pr_debug("successful init of PCI device %x\n", pdev->device); } +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); +#else + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); +#endif + if (ret) { + pr_err("PCIE DMA coherent mask init failed\n"); + goto err_base; + } + pcim_pin_device(pdev); pci_set_master(pdev); @@ -1212,12 +1321,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_base; } - ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32)); - if (ret) { - pr_err("PCIE DMA mask init failed\n"); - goto err_base; - } - ret = devm_add_action(&pdev->dev, free_xfer_buffers, (void *)pcie_priv); if (ret) { pr_err("custom release callback init failed\n"); @@ -1336,7 +1439,7 @@ static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, qtnf_pcie_resume); #endif -static struct pci_device_id qtnf_pcie_devid_table[] = { +static const struct pci_device_id qtnf_pcie_devid_table[] = { { PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index 2a897db2bd79..86ac1ccedb52 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h @@ -32,8 +32,10 @@ struct qtnf_pcie_bus_priv { /* lock for irq configuration changes */ spinlock_t irq_lock; - /* lock for tx operations */ - spinlock_t tx_lock; + /* lock for tx reclaim operations */ + spinlock_t tx_reclaim_lock; + /* lock for tx0 operations */ + spinlock_t tx0_lock; u8 msi_enabled; int mps; @@ -66,13 +68,11 @@ struct qtnf_pcie_bus_priv { void *bd_table_vaddr; u32 bd_table_len; - u32 hw_txproc_wr_ptr; + u32 rx_bd_w_index; + u32 rx_bd_r_index; - u16 tx_bd_reclaim_start; - u16 tx_bd_index; - u32 tx_queue_len; - - u16 rx_bd_index; + u32 tx_bd_w_index; + u32 tx_bd_r_index; u32 pcie_irq_mask; @@ -80,6 +80,7 @@ struct qtnf_pcie_bus_priv { u32 pcie_irq_count; u32 pcie_irq_rx_count; u32 pcie_irq_tx_count; + u32 pcie_irq_uf_count; u32 tx_full_count; u32 tx_done_count; u32 tx_reclaim_done; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h index e00d508fbcf0..c5a4e46d26ef 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h @@ -50,22 +50,21 @@ #define PCIE_HDP_INT_RX_BITS (0 \ | PCIE_HDP_INT_EP_TXDMA \ | PCIE_HDP_INT_EP_TXEMPTY \ + | PCIE_HDP_INT_HHBM_UF \ ) #define PCIE_HDP_INT_TX_BITS (0 \ | PCIE_HDP_INT_EP_RXDMA \ ) -#if BITS_PER_LONG == 64 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #define QTN_HOST_HI32(a) ((u32)(((u64)a) >> 32)) #define QTN_HOST_LO32(a) ((u32)(((u64)a) & 0xffffffffUL)) #define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) -#elif BITS_PER_LONG == 32 +#else #define QTN_HOST_HI32(a) 0 #define QTN_HOST_LO32(a) ((u32)(((u32)a) & 0xffffffffUL)) #define QTN_HOST_ADDR(h, l) ((u32)l) -#else -#error Unexpected BITS_PER_LONG value #endif #define QTN_SYSCTL_BAR 0 @@ -75,7 +74,7 @@ #define QTN_PCIE_BDA_VERSION 0x1002 #define PCIE_BDA_NAMELEN 32 -#define PCIE_HHBM_MAX_SIZE 512 +#define PCIE_HHBM_MAX_SIZE 2048 #define SKB_BUF_SIZE 2048 @@ -112,7 +111,7 @@ struct qtnf_pcie_bda { __le32 bda_flashsz; u8 bda_boardname[PCIE_BDA_NAMELEN]; __le32 bda_rc_msi_enabled; - __le32 bda_hhbm_list[PCIE_HHBM_MAX_SIZE]; + u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE]; __le32 bda_dsbw_start_index; __le32 bda_dsbw_end_index; __le32 bda_dsbw_total_bytes; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h index 78715b8a8ef9..5b48b425fa7f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h @@ -109,6 +109,7 @@ #define HHBM_WR_REQ (BIT(0)) #define HHBM_RD_REQ (BIT(1)) #define HHBM_DONE (BIT(31)) +#define HHBM_64BIT (BIT(10)) /* offsets for dual PCIE */ #define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710) @@ -333,6 +334,7 @@ #define PCIE_HDP_INT_RX_LEN_ERR (BIT(2)) #define PCIE_HDP_INT_RX_HDR_LEN_ERR (BIT(3)) #define PCIE_HDP_INT_EP_TXDMA (BIT(12)) +#define PCIE_HDP_INT_HHBM_UF (BIT(13)) #define PCIE_HDP_INT_EP_TXEMPTY (BIT(15)) #define PCIE_HDP_INT_IPC (BIT(29)) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 6eafc15e0065..a8242f678496 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -19,7 +19,7 @@ #include -#define QLINK_PROTO_VER 3 +#define QLINK_PROTO_VER 5 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -77,6 +77,7 @@ enum qlink_iface_type { QLINK_IFTYPE_ADHOC = 3, QLINK_IFTYPE_MONITOR = 4, QLINK_IFTYPE_WDS = 5, + QLINK_IFTYPE_AP_VLAN = 6, }; /** @@ -85,12 +86,12 @@ enum qlink_iface_type { * Data describing a single virtual interface. * * @if_type: Mode of interface operation, one of &enum qlink_iface_type - * @flags: interface flagsmap. + * @vlanid: VLAN ID for AP_VLAN interface type * @mac_addr: MAC address of virtual interface. */ struct qlink_intf_info { __le16 if_type; - __le16 flags; + __le16 vlanid; u8 mac_addr[ETH_ALEN]; u8 rsvd[2]; } __packed; @@ -133,6 +134,9 @@ enum qlink_channel_width { * number of operational channels and information on each of the channel. * This command is generic to a specified MAC, interface index must be set * to QLINK_VIFID_RSVD in command header. + * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This + * command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE + * capability. */ enum qlink_cmd_type { QLINK_CMD_FW_INIT = 0x0001, @@ -148,8 +152,9 @@ enum qlink_cmd_type { QLINK_CMD_DEL_INTF = 0x0016, QLINK_CMD_CHANGE_INTF = 0x0017, QLINK_CMD_UPDOWN_INTF = 0x0018, - QLINK_CMD_REG_REGION = 0x0019, + QLINK_CMD_REG_NOTIFY = 0x0019, QLINK_CMD_CHANS_INFO_GET = 0x001A, + QLINK_CMD_CHAN_SWITCH = 0x001B, QLINK_CMD_CONFIG_AP = 0x0020, QLINK_CMD_START_AP = 0x0021, QLINK_CMD_STOP_AP = 0x0022, @@ -161,6 +166,7 @@ enum qlink_cmd_type { QLINK_CMD_CHANGE_STA = 0x0051, QLINK_CMD_DEL_STA = 0x0052, QLINK_CMD_SCAN = 0x0053, + QLINK_CMD_CHAN_STATS = 0x0054, QLINK_CMD_CONNECT = 0x0060, QLINK_CMD_DISCONNECT = 0x0061, }; @@ -287,6 +293,7 @@ struct qlink_cmd_get_sta_info { * @pairwise: whether to use pairwise key. * @addr: MAC address of a STA key is being installed to. * @cipher: cipher suite. + * @vlanid: VLAN ID for AP_VLAN interface type * @key_data: key data itself. */ struct qlink_cmd_add_key { @@ -295,6 +302,7 @@ struct qlink_cmd_add_key { u8 pairwise; u8 addr[ETH_ALEN]; __le32 cipher; + __le16 vlanid; u8 key_data[0]; } __packed; @@ -341,12 +349,16 @@ struct qlink_cmd_set_def_mgmt_key { * * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags + * @if_type: Mode of interface operation, one of &enum qlink_iface_type + * @vlanid: VLAN ID to assign to specific STA * @sta_addr: address of the STA for which parameters are set. */ struct qlink_cmd_change_sta { struct qlink_cmd chdr; __le32 sta_flags_mask; __le32 sta_flags_set; + __le16 if_type; + __le16 vlanid; u8 sta_addr[ETH_ALEN]; } __packed; @@ -380,7 +392,7 @@ enum qlink_sta_connect_flags { struct qlink_cmd_connect { struct qlink_cmd chdr; __le32 flags; - __le16 freq; + __le16 channel; __le16 bg_scan_period; u8 bssid[ETH_ALEN]; u8 payload[0]; @@ -430,6 +442,70 @@ struct qlink_cmd_chans_info_get { u8 band; } __packed; +/** + * struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + */ +struct qlink_cmd_get_chan_stats { + struct qlink_cmd chdr; + __le16 channel; +} __packed; + +/** + * enum qlink_reg_initiator - Indicates the initiator of a reg domain request + * + * See &enum nl80211_reg_initiator for more info. + */ +enum qlink_reg_initiator { + QLINK_REGDOM_SET_BY_CORE, + QLINK_REGDOM_SET_BY_USER, + QLINK_REGDOM_SET_BY_DRIVER, + QLINK_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum qlink_user_reg_hint_type - type of user regulatory hint + * + * See &enum nl80211_user_reg_hint_type for more info. + */ +enum qlink_user_reg_hint_type { + QLINK_USER_REG_HINT_USER = 0, + QLINK_USER_REG_HINT_CELL_BASE = 1, + QLINK_USER_REG_HINT_INDOOR = 2, +}; + +/** + * struct qlink_cmd_reg_notify - data for QLINK_CMD_REG_NOTIFY command + * + * @alpha2: the ISO / IEC 3166 alpha2 country code. + * @initiator: which entity sent the request, one of &enum qlink_reg_initiator. + * @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one + * of &enum qlink_user_reg_hint_type. + */ +struct qlink_cmd_reg_notify { + struct qlink_cmd chdr; + u8 alpha2[2]; + u8 initiator; + u8 user_reg_hint_type; +} __packed; + +/** + * struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + * @radar_required: whether radar detection is required on the new channel + * @block_tx: whether transmissions should be blocked while changing + * @beacon_count: number of beacons until switch + */ +struct qlink_cmd_chan_switch { + struct qlink_cmd chdr; + __le16 channel; + u8 radar_required; + u8 block_tx; + u8 beacon_count; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -438,6 +514,7 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_INVALID, QLINK_CMD_RESULT_ENOTSUPP, QLINK_CMD_RESULT_ENOTFOUND, + QLINK_CMD_RESULT_EALREADY, }; /** @@ -496,6 +573,18 @@ struct qlink_resp_get_mac_info { u8 var_info[0]; } __packed; +/** + * enum qlink_dfs_regions - regulatory DFS regions + * + * Corresponds to &enum nl80211_dfs_regions. + */ +enum qlink_dfs_regions { + QLINK_DFS_UNSET = 0, + QLINK_DFS_FCC = 1, + QLINK_DFS_ETSI = 2, + QLINK_DFS_JP = 3, +}; + /** * struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command * @@ -504,22 +593,29 @@ struct qlink_resp_get_mac_info { * @fw_ver: wireless hardware firmware version. * @hw_capab: Bitmap of capabilities supported by firmware. * @ql_proto_ver: Version of QLINK protocol used by firmware. - * @country_code: country code ID firmware is configured to. * @num_mac: Number of separate physical radio devices provided by hardware. * @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware. * @total_tx_chains: total number of transmit chains used by device. * @total_rx_chains: total number of receive chains. + * @alpha2: country code ID firmware is configured to. + * @n_reg_rules: number of regulatory rules TLVs in variable portion of the + * message. + * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region. + * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE. */ struct qlink_resp_get_hw_info { struct qlink_resp rhdr; __le32 fw_ver; __le32 hw_capab; __le16 ql_proto_ver; - u8 alpha2_code[2]; u8 num_mac; u8 mac_bitmap; u8 total_tx_chain; u8 total_rx_chain; + u8 alpha2[2]; + u8 n_reg_rules; + u8 dfs_region; + u8 info[0]; } __packed; /** @@ -574,6 +670,16 @@ struct qlink_resp_phy_params { u8 info[0]; } __packed; +/** + * struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd + * + * @info: variable-length channel info. + */ +struct qlink_resp_get_chan_stats { + struct qlink_cmd rhdr; + u8 info[0]; +} __packed; + /* QLINK Events messages related definitions */ @@ -585,6 +691,7 @@ enum qlink_event_type { QLINK_EVENT_SCAN_COMPLETE = 0x0025, QLINK_EVENT_BSS_JOIN = 0x0026, QLINK_EVENT_BSS_LEAVE = 0x0027, + QLINK_EVENT_FREQ_CHANGE = 0x0028, }; /** @@ -651,7 +758,17 @@ struct qlink_event_bss_join { */ struct qlink_event_bss_leave { struct qlink_event ehdr; - u16 reason; + __le16 reason; +} __packed; + +/** + * struct qlink_event_freq_change - data for QLINK_EVENT_FREQ_CHANGE event + * + * @freq: new operating frequency in MHz + */ +struct qlink_event_freq_change { + struct qlink_event ehdr; + __le32 freq; } __packed; enum qlink_rxmgmt_flags { @@ -741,10 +858,12 @@ enum qlink_tlv_id { QTN_TLV_ID_LRETRY_LIMIT = 0x0204, QTN_TLV_ID_BCN_PERIOD = 0x0205, QTN_TLV_ID_DTIM = 0x0206, + QTN_TLV_ID_REG_RULE = 0x0207, QTN_TLV_ID_CHANNEL = 0x020F, QTN_TLV_ID_COVERAGE_CLASS = 0x0213, QTN_TLV_ID_IFACE_LIMIT = 0x0214, QTN_TLV_ID_NUM_IFACE_COMB = 0x0215, + QTN_TLV_ID_CHANNEL_STATS = 0x0216, QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300, QTN_TLV_ID_STA_GENERIC_INFO = 0x0301, QTN_TLV_ID_KEY = 0x0302, @@ -761,7 +880,7 @@ struct qlink_tlv_hdr { struct qlink_iface_limit { __le16 max_num; - __le16 type_mask; + __le16 type; } __packed; struct qlink_iface_comb_num { @@ -844,12 +963,54 @@ struct qlink_tlv_cclass { u8 cclass; } __packed; -enum qlink_dfs_state { - QLINK_DFS_USABLE, - QLINK_DFS_UNAVAILABLE, - QLINK_DFS_AVAILABLE, +/** + * enum qlink_reg_rule_flags - regulatory rule flags + * + * See description of &enum nl80211_reg_rule_flags + */ +enum qlink_reg_rule_flags { + QLINK_RRF_NO_OFDM = BIT(0), + QLINK_RRF_NO_CCK = BIT(1), + QLINK_RRF_NO_INDOOR = BIT(2), + QLINK_RRF_NO_OUTDOOR = BIT(3), + QLINK_RRF_DFS = BIT(4), + QLINK_RRF_PTP_ONLY = BIT(5), + QLINK_RRF_PTMP_ONLY = BIT(6), + QLINK_RRF_NO_IR = BIT(7), + QLINK_RRF_AUTO_BW = BIT(8), + QLINK_RRF_IR_CONCURRENT = BIT(9), + QLINK_RRF_NO_HT40MINUS = BIT(10), + QLINK_RRF_NO_HT40PLUS = BIT(11), + QLINK_RRF_NO_80MHZ = BIT(12), + QLINK_RRF_NO_160MHZ = BIT(13), }; +/** + * struct qlink_tlv_reg_rule - data for QTN_TLV_ID_REG_RULE TLV + * + * Regulatory rule description. + * + * @start_freq_khz: start frequency of the range the rule is attributed to. + * @end_freq_khz: end frequency of the range the rule is attributed to. + * @max_bandwidth_khz: max bandwidth that channels in specified range can be + * configured to. + * @max_antenna_gain: max antenna gain that can be used in the specified + * frequency range, dBi. + * @max_eirp: maximum EIRP. + * @flags: regulatory rule flags in &enum qlink_reg_rule_flags. + * @dfs_cac_ms: DFS CAC period. + */ +struct qlink_tlv_reg_rule { + struct qlink_tlv_hdr hdr; + __le32 start_freq_khz; + __le32 end_freq_khz; + __le32 max_bandwidth_khz; + __le32 max_antenna_gain; + __le32 max_eirp; + __le32 flags; + __le32 dfs_cac_ms; +} __packed; + enum qlink_channel_flags { QLINK_CHAN_DISABLED = BIT(0), QLINK_CHAN_NO_IR = BIT(1), @@ -865,6 +1026,12 @@ enum qlink_channel_flags { QLINK_CHAN_NO_10MHZ = BIT(12), }; +enum qlink_dfs_state { + QLINK_DFS_USABLE, + QLINK_DFS_UNAVAILABLE, + QLINK_DFS_AVAILABLE, +}; + struct qlink_tlv_channel { struct qlink_tlv_hdr hdr; __le16 hw_value; @@ -898,4 +1065,13 @@ struct qlink_auth_encr { u8 control_port_no_encrypt; } __packed; +struct qlink_chan_stats { + __le32 chan_num; + __le32 cca_tx; + __le32 cca_rx; + __le32 cca_busy; + __le32 cca_try; + s8 chan_noise; +} __packed; + #endif /* _QTN_QLINK_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 49ae652ad9a3..cf024c995fd6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -17,24 +17,30 @@ #include "qlink_util.h" -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask) +u16 qlink_iface_type_to_nl_mask(u16 qlink_type) { u16 result = 0; - if (qlink_mask & QLINK_IFTYPE_AP) + switch (qlink_type) { + case QLINK_IFTYPE_AP: result |= BIT(NL80211_IFTYPE_AP); - - if (qlink_mask & QLINK_IFTYPE_STATION) + break; + case QLINK_IFTYPE_STATION: result |= BIT(NL80211_IFTYPE_STATION); - - if (qlink_mask & QLINK_IFTYPE_ADHOC) + break; + case QLINK_IFTYPE_ADHOC: result |= BIT(NL80211_IFTYPE_ADHOC); - - if (qlink_mask & QLINK_IFTYPE_MONITOR) + break; + case QLINK_IFTYPE_MONITOR: result |= BIT(NL80211_IFTYPE_MONITOR); - - if (qlink_mask & QLINK_IFTYPE_WDS) + break; + case QLINK_IFTYPE_WDS: result |= BIT(NL80211_IFTYPE_WDS); + break; + case QLINK_IFTYPE_AP_VLAN: + result |= BIT(NL80211_IFTYPE_AP_VLAN); + break; + } return result; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 90d7d09a6c63..de06c1e20b5b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -22,14 +22,6 @@ #include "qlink.h" -static inline void qtnf_cmd_skb_put_action(struct sk_buff *skb, u16 action) -{ - __le16 *buf_ptr; - - buf_ptr = skb_put(skb, sizeof(action)); - *buf_ptr = cpu_to_le16(action); -} - static inline void qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len) { @@ -68,7 +60,7 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb, memcpy(hdr->val, &tmp, sizeof(tmp)); } -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask); +u16 qlink_iface_type_to_nl_mask(u16 qlink_type); u8 qlink_chan_width_mask_to_nl(u16 qlink_mask); #endif /* _QTN_FMAC_QLINK_UTIL_H_ */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 529e05999abb..f4b48b77c491 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1911,7 +1911,7 @@ static const struct rt2x00_ops rt2500usb_ops = { /* * rt2500usb module information. */ -static struct usb_device_id rt2500usb_device_table[] = { +static const struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 0b75def39c6c..d2c289446c00 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -3702,7 +3702,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_rfcsr_write(rt2x00dev, 8, 0); - tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); + if (rt2x00_rt(rt2x00dev, RT6352)) + tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); + else + tx_pin = 0; switch (rt2x00dev->default_ant.tx_chain_num) { case 3: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index ee5276e233fa..1123e2bed803 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -136,10 +136,19 @@ void rt2800mmio_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; - if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { rxdesc->flags |= RX_FLAG_DECRYPTED; - else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { + /* + * In order to check the Michael Mic, the packet must have + * been decrypted. Mac80211 doesnt check the MMIC failure + * flag to initiate MMIC countermeasures if the decoded flag + * has not been set. + */ + rxdesc->flags |= RX_FLAG_DECRYPTED; + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } } if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 685b8e0cd67d..24fc6d2045ef 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -697,11 +697,20 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; - - if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { rxdesc->flags |= RX_FLAG_DECRYPTED; - else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { + /* + * In order to check the Michael Mic, the packet must have + * been decrypted. Mac80211 doesnt check the MMIC failure + * flag to initiate MMIC countermeasures if the decoded flag + * has not been set. + */ + rxdesc->flags |= RX_FLAG_DECRYPTED; + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } } if (rt2x00_get_field32(word, RXD_W0_MY_BSS)) @@ -915,7 +924,7 @@ static const struct rt2x00_ops rt2800usb_ops = { /* * rt2800usb module information. */ -static struct usb_device_id rt2800usb_device_table[] = { +static const struct usb_device_id rt2800usb_device_table[] = { /* Abocom */ { USB_DEVICE(0x07b8, 0x2870) }, { USB_DEVICE(0x07b8, 0x2770) }, diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index fd913222abd1..9a212823f42c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -2408,7 +2408,7 @@ static const struct rt2x00_ops rt73usb_ops = { /* * rt73usb module information. */ -static struct usb_device_id rt73usb_device_table[] = { +static const struct usb_device_id rt73usb_device_table[] = { /* AboCom */ { USB_DEVICE(0x07b8, 0xb21b) }, { USB_DEVICE(0x07b8, 0xb21c) }, diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 55198ac2b755..121b94f09714 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -43,7 +43,7 @@ MODULE_AUTHOR("Larry Finger "); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] = { +static const struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 21e5ef021260..7806a4d2b1fc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6190,7 +6190,7 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface) ieee80211_free_hw(hw); } -static struct usb_device_id dev_table[] = { +static const struct usb_device_id dev_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index e36ee592c660..ea18aa7afecb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -426,9 +426,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; /* TODO: Correct this value for our hw */ - /* TODO: define these hard code value */ - hw->max_listen_interval = 10; - hw->max_rate_tries = 4; + hw->max_listen_interval = MAX_LISTEN_INTERVAL; + hw->max_rate_tries = MAX_RATE_TRIES; /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); @@ -1166,9 +1165,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, } } - if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + if (is_multicast_ether_addr(hdr->addr1)) tcb_desc->multicast = 1; - else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + else if (is_broadcast_ether_addr(hdr->addr1)) tcb_desc->broadcast = 1; _rtl_txrate_selectmode(hw, sta, tcb_desc); @@ -1408,6 +1407,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, return true; } else if (ETH_P_PAE == ether_type) { + /* EAPOL is seens as in-4way */ + rtlpriv->btcoexist.btc_info.in_4way = true; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"); @@ -1735,12 +1739,12 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw) continue; list_del(&entry->list); - kfree(entry); rtlpriv->scan_list.num--; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "BSSID=%pM is expire in scan list (total=%d)\n", entry->bssid, rtlpriv->scan_list.num); + kfree(entry); } spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags); @@ -1959,6 +1963,12 @@ void rtl_watchdog_wq_callback(void *data) if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + if (rtlpriv->btcoexist.btc_info.in_4way) { + if (time_after(jiffies, rtlpriv->btcoexist.btc_info.in_4way_ts + + msecs_to_jiffies(IN_4WAY_TIMEOUT_TIME))) + rtlpriv->btcoexist.btc_info.in_4way = false; + } + rtlpriv->link_info.bcn_rx_inperiod = 0; /* <6> scan list */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index ab7d81904d25..b56d1b7f5567 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -65,6 +65,8 @@ enum ap_peer { #define FRAME_OFFSET_ADDRESS3 16 #define FRAME_OFFSET_SEQUENCE 22 #define FRAME_OFFSET_ADDRESS4 24 +#define MAX_LISTEN_INTERVAL 10 +#define MAX_RATE_TRIES 4 #define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ WRITEEF2BYTE(_hdr, _val) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h index 2ac989a4b2bb..02dff4c3f664 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h @@ -43,22 +43,6 @@ #define RT_SDIO_INTERFACE 3 #define DEV_BUS_TYPE RT_PCI_INTERFACE -/* IC type */ -#define RTL_HW_TYPE(adapter) (rtl_hal((struct rtl_priv *)adapter)->hw_type) - -#define IS_NEW_GENERATION_IC(adapter) \ - (RTL_HW_TYPE(adapter) >= HARDWARE_TYPE_RTL8192EE) -#define IS_HARDWARE_TYPE_8812(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8812AE) -#define IS_HARDWARE_TYPE_8821(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8821AE) -#define IS_HARDWARE_TYPE_8723A(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723AE) -#define IS_HARDWARE_TYPE_8723B(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723BE) -#define IS_HARDWARE_TYPE_8192E(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8192EE) - #include "halbtc8192e2ant.h" #include "halbtc8723b1ant.h" #include "halbtc8723b2ant.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 03998d2e9eb8..c04425236ce4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -600,14 +600,8 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist, 0xffffff, 0x3); break; case 5: - if ((coex_sta->cck_ever_lock) && (coex_sta->scan_ap_num <= 5)) - halbtc8723b1ant_coex_table(btcoexist, force_exec, - 0x5a5a5a5a, 0x5aaa5a5a, - 0xffffff, 0x3); - else - halbtc8723b1ant_coex_table(btcoexist, force_exec, - 0x5a5a5a5a, 0x5aaa5a5a, - 0xffffff, 0x3); + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5aaa5a5a, 0xffffff, 0x3); break; case 6: halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 31965f0ef69d..e8f07573aed9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1183,7 +1183,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, } /* fixed internal switch S1->WiFi, S0->BT */ - btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0); + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); switch (antpos_type) { case BTC_ANT_WIFI_AT_MAIN: diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index e6024b013ca5..b5e9877d935c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -173,6 +173,16 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) { + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; + + /* override ant_num / ant_path */ + if (mod_params->ant_sel) { + rtlpriv->btcoexist.btc_info.ant_num = + (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); + + rtlpriv->btcoexist.btc_info.single_ant_path = + (mod_params->ant_sel == 1 ? 0 : 1); + } return rtlpriv->btcoexist.btc_info.single_ant_path; } @@ -183,6 +193,7 @@ u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 num; if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) @@ -190,6 +201,10 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) else num = 1; + /* override ant_num / ant_path */ + if (mod_params->ant_sel) + num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; + return num; } @@ -327,7 +342,22 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist) static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) { - return 0; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 cmd_buffer[4] = {0}; + u8 oper_ver = 0; + u8 req_num = 0x0E; + + if (btcoexist->bt_info.bt_real_fw_ver) + goto label_done; + + cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */ + cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ + cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */ + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, + &cmd_buffer[0]); + +label_done: + return btcoexist->bt_info.bt_real_fw_ver; } u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) @@ -861,7 +891,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter) { struct btc_coexist *btcoexist = &gl_bt_coexist; struct rtl_priv *rtlpriv = adapter; - u8 ant_num = 2, chip_type, single_ant_path = 0; + u8 ant_num = 2, chip_type; if (btcoexist->binded) return false; @@ -896,12 +926,6 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter) ant_num = rtl_get_hwpg_ant_num(rtlpriv); exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num); - /* set default antenna position to main port */ - btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT; - - single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv); - exhalbtc_set_single_ant_path(single_ant_path); - if (rtl_get_hwpg_package_type(rtlpriv) == 0) btcoexist->board_info.tfbga_package = false; else if (rtl_get_hwpg_package_type(rtlpriv) == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 4366c9817e1e..7d296a401b6f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -41,6 +41,7 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_periodical = rtl_btc_periodical, .btc_halt_notify = rtl_btc_halt_notify, .btc_btinfo_notify = rtl_btc_btinfo_notify, + .btc_btmpinfo_notify = rtl_btc_btmpinfo_notify, .btc_is_limited_dig = rtl_btc_is_limited_dig, .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, @@ -165,6 +166,33 @@ void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); } +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) +{ + u8 extid, seq, len; + u16 bt_real_fw_ver; + u8 bt_fw_ver; + + if ((length < 4) || (!tmp_buf)) + return; + + extid = tmp_buf[0]; + /* not response from BT FW then exit*/ + if (extid != 1) /* C2H_TRIG_BY_BT_FW = 1 */ + return; + + len = tmp_buf[1] >> 4; + seq = tmp_buf[2] >> 4; + + /* BT Firmware version response */ + if (seq == 0x0E) { + bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8); + bt_fw_ver = tmp_buf[5]; + + gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver; + gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver; + } +} + bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) { return gl_bt_coexist.bt_info.limited_dig; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index 6fe521cbe7f0..ac1253c46f44 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -39,6 +39,7 @@ void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, void rtl_btc_periodical(struct rtl_priv *rtlpriv); void rtl_btc_halt_notify(void); void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index b0ad061048c5..c53cbf3d52bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1505,6 +1505,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, u8 mac_addr[ETH_ALEN]; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + rtlpriv->btcoexist.btc_info.in_4way = false; + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "not open hw encryption\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 032b6317690d..08dc8919ef60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2257,7 +2257,7 @@ int rtl_pci_probe(struct pci_dev *pdev, /* find adapter */ if (!_rtl_pci_find_adapter(pdev, hw)) { err = -ENODEV; - goto fail3; + goto fail2; } /* Init IO handler */ @@ -2318,10 +2318,10 @@ int rtl_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, NULL); rtl_deinit_core(hw); +fail2: if (rtlpriv->io.pci_mem_start != 0) pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); -fail2: pci_release_regions(pdev); complete(&rtlpriv->firmware_loading_complete); diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 951d257cd4c0..02811eda57cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -283,7 +283,7 @@ static void rtl_rate_free_sta(void *rtlpriv, kfree(rate_priv); } -static struct rate_control_ops rtl_rate_ops = { +static const struct rate_control_ops rtl_rate_ops = { .name = "rtl_rc", .alloc = rtl_rate_alloc, .free = rtl_rate_free, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 774e72058d24..57e5d5c1d24b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -175,6 +175,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_info("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -376,7 +378,7 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl88ee_pci_ids[] = { +static const struct pci_device_id rtl88ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index bcbb0c60f1f1..38f85bfdf0c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -176,6 +176,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index f95a64507f17..530e80f0ef0b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -777,10 +777,6 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, queue_sel); } -static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) -{ -} - static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) { u16 value16; @@ -870,7 +866,6 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) rtl92c_init_edca(hw); rtl92c_init_rate_fallback(hw); rtl92c_init_retry_function(hw); - _rtl92cu_init_usb_aggregation(hw); rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20); rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); _rtl92cu_init_beacon_parameters(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 1b124eade846..5657b1e34ad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -352,11 +352,10 @@ u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw) void rtl92c_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); - if (IS_HARDWARE_TYPE_8192CE(rtlhal)) { + if (IS_HARDWARE_TYPE_8192CE(rtlpriv)) { rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 96c923b3feb4..43e021b49260 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -85,6 +85,10 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); + if (err) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } return err; } @@ -173,7 +177,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { .rx_urb_num = RTL92C_NUM_RX_URBS, .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER, .usb_rx_hdl = rtl8192cu_rx_hdl, - .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */ + .usb_rx_segregate_hdl = NULL, /* tx */ .usb_tx_cleanup = rtl8192c_tx_cleanup, .usb_tx_post_hdl = rtl8192c_tx_post_hdl, @@ -275,7 +279,7 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = { #define USB_VENDER_ID_REALTEK 0x0bda /* 2010-10-19 DID_USB_V3.4 */ -static struct usb_device_id rtl8192c_usb_ids[] = { +static const struct usb_device_id rtl8192c_usb_ids[] = { /*=== Realtek demoboard ===*/ /* Default ID */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index de6c3428f7c6..ac4a82de40c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -436,13 +436,6 @@ void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) _rtl_rx_process(hw, skb); } -void rtl8192c_rx_segregate_hdl( - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct sk_buff_head *skb_list) -{ -} - /*---------------------------------------------------------------------- * * Tx handler @@ -675,8 +668,3 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); } - -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - return true; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 487eec89bc29..15a66c547287 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -385,8 +385,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct ieee80211_rx_status *rx_status, u8 *p_desc, struct sk_buff *skb); void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb); -void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *, - struct sk_buff_head *); void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb); int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb); @@ -404,6 +402,5 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, struct sk_buff *skb); -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 16132c66e5e1..a6549f5f6c59 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -183,6 +183,8 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -347,7 +349,7 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92de_pci_ids[] = { +static const struct pci_device_id rtl92de_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)}, {}, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index f5d4df985c37..7eae27f8e173 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -887,6 +887,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8192E_DBG: @@ -905,12 +906,16 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, case C2H_8192E_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_RA_RPT: _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index d84ac7adfd82..ef9394be7016 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2133,7 +2133,12 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) if ((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_92E]) == 0xFF) rtlefuse->board_type = 0; + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E]; if (hwinfo[EEPROM_XTAL_92E] == 0xFF) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index eaa503b7c4b4..a3490080d066 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -177,6 +177,8 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -354,7 +356,7 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl92ee_pci_ids[] = { +static const struct pci_device_id rtl92ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x818B, rtl92ee_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 55f238a2a310..c58393eab6a1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -478,7 +478,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) struct rtl_priv *rtlpriv = rtl_priv(hw); u16 read_point = 0, write_point = 0, remind_cnt = 0; u32 tmp_4byte = 0; - static u16 last_read_point; static bool start_rx; tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); @@ -506,7 +505,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) rtlpci->rx_ring[queue_index].next_rx_rp = write_point; - last_read_point = read_point; return remind_cnt; } @@ -917,7 +915,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, struct rtl_priv *rtlpriv = rtl_priv(hw); u16 cur_tx_rp = 0; u16 cur_tx_wp = 0; - static u16 last_txw_point; static bool over_run; u32 tmp = 0; u8 q_idx = *val; @@ -951,9 +948,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, rtl_write_word(rtlpriv, get_desc_addr_fr_q_idx(q_idx), ring->cur_tx_wp); - - if (q_idx == 1) - last_txw_point = cur_tx_wp; } if (ring->avl_desc < (max_tx_desc - 15)) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 2006b09ea74f..d7945b9db493 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -216,6 +216,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtl92se_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -396,7 +398,7 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92se_pci_ids[] = { +static const struct pci_device_id rtl92se_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)}, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 7bf9f2557920..97b8bd294aa8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -184,6 +184,8 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } return 0; @@ -367,7 +369,7 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723e_pci_ids[] = { +static const struct pci_device_id rtl8723e_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723e_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index 131c0d1d633e..15c117e95a99 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -883,12 +883,8 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter( if ((rtldm->power_index_offset[RF90_PATH_A] != 0) && (rtldm->txpower_track_control)) { rtldm->done_txpower = true; - if (thermalvalue > rtlefuse->eeprom_thermalmeter) - rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, - index_for_channel); - else - rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, - index_for_channel); + rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, + index_for_channel); rtldm->swing_idx_cck_base = rtldm->swing_idx_cck; rtldm->swing_idx_ofdm_base[RF90_PATH_A] = diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index dd6f95cfaec9..4b963fd27d64 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -709,6 +709,7 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8723B_DBG: @@ -723,12 +724,16 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, case C2H_8723B_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8723B_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index cd5dc6dcb19f..4d47b97adfed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2111,6 +2111,13 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag, hwinfo); + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); + rtlhal->package_type = _rtl8723be_read_package_type(hw); /* set channel plan from efuse */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 9752175cc466..9606641519e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -152,33 +152,86 @@ bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw) return rtl8723be_phy_rf6052_config(hw); } -static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, - const u32 condition) +static bool _rtl8723be_check_positive(struct ieee80211_hw *hw, + const u32 condition1, + const u32 condition2) { - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 _board = rtlefuse->board_type; /*need efuse define*/ - u32 _interface = rtlhal->interface; - u32 _platform = 0x08;/*SupportPlatform */ - u32 cond = condition; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK) + >> CHIP_VER_RTL_SHIFT); + u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0)); - if (condition == 0xCDCDCDCD) - return true; + u8 board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */ + ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA */ + ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */ + ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA */ + ((rtlhal->board_type & BIT(2)) >> 2) << 4; /* _BT */ - cond = condition & 0xFF; - if ((_board & cond) == 0 && cond != 0x1F) + u32 cond1 = condition1, cond2 = condition2; + u32 driver1 = cut_ver << 24 | /* CUT ver */ + 0 << 20 | /* interface 2/2 */ + 0x04 << 16 | /* platform */ + rtlhal->package_type << 12 | + intf << 8 | /* interface 1/2 */ + board_type; + + u32 driver2 = rtlhal->type_glna << 0 | + rtlhal->type_gpa << 8 | + rtlhal->type_alna << 16 | + rtlhal->type_apa << 24; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n", + cond1, cond2); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n", + driver1, driver2); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Board, Package) = (0x%X, 0x%X)\n", + rtlhal->board_type, rtlhal->package_type); + + /*============== Value Defined Check ===============*/ + /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/ + + if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != + (driver1 & 0x0000F000))) + return false; + if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != + (driver1 & 0x0F000000))) return false; - cond = condition & 0xFF00; - cond = cond >> 8; - if ((_interface & cond) == 0 && cond != 0x07) - return false; + /*=============== Bit Defined Check ================*/ + /* We don't care [31:28] */ - cond = condition & 0xFF0000; - cond = cond >> 16; - if ((_platform & cond) == 0 && cond != 0x0F) - return false; - return true; + cond1 &= 0x00FF0FFF; + driver1 &= 0x00FF0FFF; + + if ((cond1 & driver1) == cond1) { + u32 mask = 0; + + if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/ + return true; + + if ((cond1 & BIT(0)) != 0) /*GLNA*/ + mask |= 0x000000FF; + if ((cond1 & BIT(1)) != 0) /*GPA*/ + mask |= 0x0000FF00; + if ((cond1 & BIT(2)) != 0) /*ALNA*/ + mask |= 0x00FF0000; + if ((cond1 & BIT(3)) != 0) /*APA*/ + mask |= 0xFF000000; + + /* BoardType of each RF path is matched*/ + if ((cond2 & mask) == (driver2 & mask)) + return true; + else + return false; + } + return false; } static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr, @@ -464,6 +517,16 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; + /* switch ant to BT */ + if (rtlpriv->rtlhal.interface == INTF_USB) { + rtl_write_dword(rtlpriv, 0x948, 0x0); + } else { + if (rtlpriv->btcoexist.btc_info.single_ant_path == 0) + rtl_write_dword(rtlpriv, 0x948, 0x280); + else + rtl_write_dword(rtlpriv, 0x948, 0x0); + } + rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { @@ -493,142 +556,84 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) return true; } +static bool rtl8723be_phy_config_with_headerfile(struct ieee80211_hw *hw, + u32 *array_table, + u16 arraylen, + void (*set_reg)(struct ieee80211_hw *hw, u32 regaddr, u32 data)) +{ + #define COND_ELSE 2 + #define COND_ENDIF 3 + + int i = 0; + u8 cond; + bool matched = true, skipped = false; + + while ((i + 1) < arraylen) { + u32 v1 = array_table[i]; + u32 v2 = array_table[i + 1]; + + if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/ + if (v1 & BIT(31)) {/* positive condition*/ + cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28); + if (cond == COND_ENDIF) { /*end*/ + matched = true; + skipped = false; + } else if (cond == COND_ELSE) { /*else*/ + matched = skipped ? false : true; + } else {/*if , else if*/ + if (skipped) { + matched = false; + } else { + if (_rtl8723be_check_positive( + hw, v1, v2)) { + matched = true; + skipped = true; + } else { + matched = false; + skipped = false; + } + } + } + } else if (v1 & BIT(30)) { /*negative condition*/ + /*do nothing*/ + } + } else { + if (matched) + set_reg(hw, v1, v2); + } + i = i + 2; + } + + return true; +} + static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - u32 arraylength; - u32 *ptrarray; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); - arraylength = RTL8723BEMAC_1T_ARRAYLEN; - ptrarray = RTL8723BEMAC_1T_ARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); - return true; + + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEMAC_1T_ARRAY, RTL8723BEMAC_1T_ARRAYLEN, + rtl_write_byte_with_val32); } static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype) { - #define READ_NEXT_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = array_table[i];\ - v2 = array_table[i+1]; \ - } while (0) - int i; - u32 *array_table; - u16 arraylen; - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; + if (configtype == BASEBAND_CONFIG_PHY_REG) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEPHY_REG_1TARRAY, + RTL8723BEPHY_REG_1TARRAYLEN, + _rtl8723be_config_bb_reg); + else if (configtype == BASEBAND_CONFIG_AGC_TAB) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEAGCTAB_1TARRAY, + RTL8723BEAGCTAB_1TARRAYLEN, + rtl_set_bbreg_with_dwmask); - if (configtype == BASEBAND_CONFIG_PHY_REG) { - arraylen = RTL8723BEPHY_REG_1TARRAYLEN; - array_table = RTL8723BEPHY_REG_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_bb_reg(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - _rtl8723be_config_bb_reg(hw, - v1, v2); - READ_NEXT_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - } - } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { - arraylen = RTL8723BEAGCTAB_1TARRAYLEN; - array_table = RTL8723BEAGCTAB_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xCDCDCDCD) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - continue; - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - READ_NEXT_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], array_table[i + 1]); - } - } - return true; + return false; } static u8 _rtl8723be_get_rate_section_index(u32 regaddr) @@ -761,73 +766,17 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - #define READ_NEXT_RF_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = radioa_array_table[i]; \ - v2 = radioa_array_table[i+1]; \ - } while (0) - - int i; - bool rtstatus = true; - u32 *radioa_array_table; - u16 radioa_arraylen; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 v1 = 0, v2 = 0; + bool ret = true; - radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN; - radioa_array_table = RTL8723BE_RADIOA_1TARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); - rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < radioa_arraylen; i = i + 2) { - v1 = radioa_array_table[i]; - v2 = radioa_array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_rf_radio_a(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= radioa_arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - radioa_array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - } else { - /*Configure matched pairs - *and skip to end of if-else. - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - _rtl8723be_config_rf_radio_a(hw, - v1, v2); - READ_NEXT_RF_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - } - } - } + ret = rtl8723be_phy_config_with_headerfile(hw, + RTL8723BE_RADIOA_1TARRAY, + RTL8723BE_RADIOA_1TARRAYLEN, + _rtl8723be_config_rf_radio_a); if (rtlhal->oem_id == RT_CID_819X_HP) _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD); @@ -840,7 +789,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, "switch case %#x not processed\n", rfpath); break; } - return true; + return ret; } void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) @@ -1350,7 +1299,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &rtlpriv->phy; - u32 delay; + u32 delay = 0; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n", rtlphy->current_channel); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index f9d10f1e7cf8..2b16a1467e78 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -187,16 +187,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8723befw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request firmware!\n"); - return 1; - } + pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + return 1; } return 0; } @@ -287,6 +281,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723be_pci", + .alt_fw_name = "rtlwifi/rtl8723befw.bin", .ops = &rtl8723be_hal_ops, .mod_params = &rtl8723be_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, @@ -380,7 +375,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723be_pci_ids[] = { +static const struct pci_device_id rtl8723be_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB723, rtl8723be_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index a180761e8810..381c16b9b3a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c @@ -26,6 +26,7 @@ *****************************************************************************/ #include "table.h" + u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x800, 0x80040000, 0x804, 0x00000003, @@ -36,7 +37,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x818, 0x02200385, 0x81C, 0x00000000, 0x820, 0x01000100, - 0x824, 0x00390204, + 0x824, 0x00190204, 0x828, 0x00000000, 0x82C, 0x00000000, 0x830, 0x00000000, @@ -73,9 +74,8 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x90C, 0x81121111, 0x910, 0x00000002, 0x914, 0x00000201, - 0x948, 0x00000280, 0xA00, 0x00D047C8, - 0xA04, 0x80FF000C, + 0xA04, 0x80FF800C, 0xA08, 0x8C838300, 0xA0C, 0x2E7F120F, 0xA10, 0x9500BB78, @@ -114,7 +114,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC4C, 0x007F037F, 0xC50, 0x69553420, 0xC54, 0x43BC0094, - 0xC58, 0x00023169, + 0xC58, 0x00013147, 0xC5C, 0x00250492, 0xC60, 0x00000000, 0xC64, 0x7112848B, @@ -125,7 +125,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC78, 0x0000001F, 0xC7C, 0x00B91612, 0xC80, 0x390000E4, - 0xC84, 0x20F60000, + 0xC84, 0x21F60000, 0xC88, 0x40000100, 0xC8C, 0x20200000, 0xC90, 0x00020E1A, @@ -224,15 +224,21 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { }; +u32 RTL8723BEPHY_REG_1TARRAYLEN = + sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32); + u32 RTL8723BEPHY_REG_ARRAY_PG[] = { - 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000, - 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800, - 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646, - 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840, + 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800, + 0, 0, 0, 0x0000086c, 0xffffff00, 0x32343600, + 0, 0, 0, 0x00000e00, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000e04, 0xffffffff, 0x28323638, 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244, 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436 }; +u32 RTL8723BEPHY_REG_ARRAY_PGLEN = + sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32); + u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x000, 0x00010000, 0x0B0, 0x000DFFE0, @@ -257,15 +263,37 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x01E, 0x00000000, 0x0DF, 0x00000780, 0x050, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0xA0000000, 0x00000000, 0x051, 0x0006B04E, 0x052, 0x000007D2, + 0xB0000000, 0x00000000, 0x053, 0x00000000, 0x054, 0x00050400, 0x055, 0x0004026E, 0x0DD, 0x0000004C, 0x070, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0xA0000000, 0x00000000, 0x071, 0x0006B04E, 0x072, 0x000007D2, + 0xB0000000, 0x00000000, 0x073, 0x00000000, 0x074, 0x00050400, 0x075, 0x0004026E, @@ -308,6 +336,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x044, 0x00000051, 0x0EF, 0x00000000, 0x0ED, 0x00000000, + 0x07F, 0x00020080, 0x0EF, 0x00002000, 0x03B, 0x000380EF, 0x03B, 0x000302FE, @@ -336,14 +365,24 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x0A3, 0x00008000, 0x0A4, 0x00048D80, 0x0A5, 0x00068000, - 0x000, 0x00033D80, + 0x0ED, 0x00000002, + 0x0EF, 0x00000002, + 0x056, 0x00000032, + 0x076, 0x00000032, + 0x001, 0x00000780, }; +u32 RTL8723BE_RADIOA_1TARRAYLEN = + sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32); + u32 RTL8723BEMAC_1T_ARRAY[] = { 0x02F, 0x00000030, 0x035, 0x00000000, + 0x039, 0x00000008, + 0x064, 0x00000000, 0x067, 0x00000020, + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -439,9 +478,13 @@ u32 RTL8723BEMAC_1T_ARRAY[] = { 0x709, 0x00000043, 0x70A, 0x00000065, 0x70B, 0x00000087, + 0x765, 0x00000018, + 0x76E, 0x00000004, }; +u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32); + u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xFD000001, 0xC78, 0xFC010001, @@ -466,21 +509,21 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE9140001, 0xC78, 0xE8150001, 0xC78, 0xE7160001, - 0xC78, 0xAA170001, - 0xC78, 0xA9180001, - 0xC78, 0xA8190001, - 0xC78, 0xA71A0001, - 0xC78, 0xA61B0001, - 0xC78, 0xA51C0001, - 0xC78, 0xA41D0001, - 0xC78, 0xA31E0001, - 0xC78, 0x671F0001, - 0xC78, 0x66200001, - 0xC78, 0x65210001, - 0xC78, 0x64220001, - 0xC78, 0x63230001, - 0xC78, 0x62240001, - 0xC78, 0x61250001, + 0xC78, 0xE6170001, + 0xC78, 0xE5180001, + 0xC78, 0xE4190001, + 0xC78, 0xE31A0001, + 0xC78, 0xA51B0001, + 0xC78, 0xA41C0001, + 0xC78, 0xA31D0001, + 0xC78, 0x671E0001, + 0xC78, 0x661F0001, + 0xC78, 0x65200001, + 0xC78, 0x64210001, + 0xC78, 0x63220001, + 0xC78, 0x4A230001, + 0xC78, 0x49240001, + 0xC78, 0x48250001, 0xC78, 0x47260001, 0xC78, 0x46270001, 0xC78, 0x45280001, @@ -491,22 +534,22 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0x282D0001, 0xC78, 0x272E0001, 0xC78, 0x262F0001, - 0xC78, 0x25300001, - 0xC78, 0x24310001, - 0xC78, 0x09320001, - 0xC78, 0x08330001, - 0xC78, 0x07340001, - 0xC78, 0x06350001, - 0xC78, 0x05360001, - 0xC78, 0x04370001, - 0xC78, 0x03380001, - 0xC78, 0x02390001, + 0xC78, 0x0A300001, + 0xC78, 0x09310001, + 0xC78, 0x08320001, + 0xC78, 0x07330001, + 0xC78, 0x06340001, + 0xC78, 0x05350001, + 0xC78, 0x04360001, + 0xC78, 0x03370001, + 0xC78, 0x02380001, + 0xC78, 0x01390001, 0xC78, 0x013A0001, - 0xC78, 0x003B0001, - 0xC78, 0x003C0001, - 0xC78, 0x003D0001, - 0xC78, 0x003E0001, - 0xC78, 0x003F0001, + 0xC78, 0x013B0001, + 0xC78, 0x013C0001, + 0xC78, 0x013D0001, + 0xC78, 0x013E0001, + 0xC78, 0x013F0001, 0xC78, 0xFC400001, 0xC78, 0xFB410001, 0xC78, 0xFA420001, @@ -531,47 +574,50 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE7550001, 0xC78, 0xE6560001, 0xC78, 0xE5570001, - 0xC78, 0xAA580001, - 0xC78, 0xA9590001, - 0xC78, 0xA85A0001, - 0xC78, 0xA75B0001, - 0xC78, 0xA65C0001, - 0xC78, 0xA55D0001, - 0xC78, 0xA45E0001, - 0xC78, 0x675F0001, - 0xC78, 0x66600001, - 0xC78, 0x65610001, - 0xC78, 0x64620001, - 0xC78, 0x63630001, - 0xC78, 0x62640001, - 0xC78, 0x61650001, + 0xC78, 0xE4580001, + 0xC78, 0xE3590001, + 0xC78, 0xA65A0001, + 0xC78, 0xA55B0001, + 0xC78, 0xA45C0001, + 0xC78, 0xA35D0001, + 0xC78, 0x675E0001, + 0xC78, 0x665F0001, + 0xC78, 0x65600001, + 0xC78, 0x64610001, + 0xC78, 0x63620001, + 0xC78, 0x62630001, + 0xC78, 0x61640001, + 0xC78, 0x48650001, 0xC78, 0x47660001, 0xC78, 0x46670001, 0xC78, 0x45680001, 0xC78, 0x44690001, 0xC78, 0x436A0001, 0xC78, 0x426B0001, - 0xC78, 0x296C0001, - 0xC78, 0x286D0001, - 0xC78, 0x276E0001, - 0xC78, 0x266F0001, - 0xC78, 0x25700001, - 0xC78, 0x24710001, - 0xC78, 0x09720001, - 0xC78, 0x08730001, - 0xC78, 0x07740001, - 0xC78, 0x06750001, - 0xC78, 0x05760001, - 0xC78, 0x04770001, - 0xC78, 0x03780001, - 0xC78, 0x02790001, + 0xC78, 0x286C0001, + 0xC78, 0x276D0001, + 0xC78, 0x266E0001, + 0xC78, 0x256F0001, + 0xC78, 0x24700001, + 0xC78, 0x09710001, + 0xC78, 0x08720001, + 0xC78, 0x07730001, + 0xC78, 0x06740001, + 0xC78, 0x05750001, + 0xC78, 0x04760001, + 0xC78, 0x03770001, + 0xC78, 0x02780001, + 0xC78, 0x01790001, 0xC78, 0x017A0001, - 0xC78, 0x007B0001, - 0xC78, 0x007C0001, - 0xC78, 0x007D0001, - 0xC78, 0x007E0001, - 0xC78, 0x007F0001, + 0xC78, 0x017B0001, + 0xC78, 0x017C0001, + 0xC78, 0x017D0001, + 0xC78, 0x017E0001, + 0xC78, 0x017F0001, 0xC50, 0x69553422, 0xC50, 0x69553420, + 0x824, 0x00390204, }; + +u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h index dc17001632f7..1deaffe22251 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h @@ -29,15 +29,15 @@ #define __RTL8723BE_TABLE__H_ #include -#define RTL8723BEPHY_REG_1TARRAYLEN 388 +extern u32 RTL8723BEPHY_REG_1TARRAYLEN; extern u32 RTL8723BEPHY_REG_1TARRAY[]; -#define RTL8723BEPHY_REG_ARRAY_PGLEN 36 +extern u32 RTL8723BEPHY_REG_ARRAY_PGLEN; extern u32 RTL8723BEPHY_REG_ARRAY_PG[]; -#define RTL8723BE_RADIOA_1TARRAYLEN 206 +extern u32 RTL8723BE_RADIOA_1TARRAYLEN; extern u32 RTL8723BE_RADIOA_1TARRAY[]; -#define RTL8723BEMAC_1T_ARRAYLEN 196 +extern u32 RTL8723BEMAC_1T_ARRAYLEN; extern u32 RTL8723BEMAC_1T_ARRAY[]; -#define RTL8723BEAGCTAB_1TARRAYLEN 260 +extern u32 RTL8723BEAGCTAB_1TARRAYLEN; extern u32 RTL8723BEAGCTAB_1TARRAY[]; #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 03259aa150fd..f2b2c549e5b2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -98,7 +98,7 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) { RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", + "chksum report fail! REG_MCUFWDL:0x%08x .\n", value32); goto exit; } @@ -1923,6 +1923,7 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8812_DBG: @@ -1938,9 +1939,15 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, - tmp_buf, - c2h_cmd_len); + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); + break; + case C2H_8812_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8812_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 2bc6bace069c..4f73012978e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -779,7 +779,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) _rtl8821ae_resume_tx_beacon(hw); break; } case HW_VAR_NAV_UPPER: { - u32 us_nav_upper = ((u32)*val); + u32 us_nav_upper = *(u32 *)val; if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) { RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING, @@ -2966,6 +2966,44 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, } } +static void _rtl8812ae_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + u8 ext_type_pa_2g_a = (hwinfo[0xBD] & BIT(2)) >> 2; /* 0xBD[2] */ + u8 ext_type_pa_2g_b = (hwinfo[0xBD] & BIT(6)) >> 6; /* 0xBD[6] */ + u8 ext_type_pa_5g_a = (hwinfo[0xBF] & BIT(2)) >> 2; /* 0xBF[2] */ + u8 ext_type_pa_5g_b = (hwinfo[0xBF] & BIT(6)) >> 6; /* 0xBF[6] */ + /* 0xBD[1:0] */ + u8 ext_type_lna_2g_a = (hwinfo[0xBD] & (BIT(1) | BIT(0))) >> 0; + /* 0xBD[5:4] */ + u8 ext_type_lna_2g_b = (hwinfo[0xBD] & (BIT(5) | BIT(4))) >> 4; + /* 0xBF[1:0] */ + u8 ext_type_lna_5g_a = (hwinfo[0xBF] & (BIT(1) | BIT(0))) >> 0; + /* 0xBF[5:4] */ + u8 ext_type_lna_5g_b = (hwinfo[0xBF] & (BIT(5) | BIT(4))) >> 4; + + _rtl8812ae_read_pa_type(hw, hwinfo, autoload_fail); + + /* [2.4G] Path A and B are both extPA */ + if ((rtlhal->pa_type_2g & (BIT(5) | BIT(4))) == (BIT(5) | BIT(4))) + rtlhal->type_gpa = ext_type_pa_2g_b << 2 | ext_type_pa_2g_a; + + /* [5G] Path A and B are both extPA */ + if ((rtlhal->pa_type_5g & (BIT(1) | BIT(0))) == (BIT(1) | BIT(0))) + rtlhal->type_apa = ext_type_pa_5g_b << 2 | ext_type_pa_5g_a; + + /* [2.4G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_2g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_glna = ext_type_lna_2g_b << 2 | ext_type_lna_2g_a; + + /* [5G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_5g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_alna = ext_type_lna_5g_b << 2 | ext_type_lna_5g_a; +} + static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, bool autoload_fail) { @@ -3114,7 +3152,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ hwinfo); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - _rtl8812ae_read_pa_type(hw, hwinfo, rtlefuse->autoload_failflag); + _rtl8812ae_read_amplifier_type(hw, hwinfo, + rtlefuse->autoload_failflag); _rtl8812ae_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index aa3ccc740521..176deb2b5386 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -3773,10 +3773,11 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path) u32 tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65; int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0; int tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], - tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num]; + tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num], + tx_dt[cal_num], rx_dt[cal_num]; bool tx0iqkok = false, rx0iqkok = false; bool vdf_enable = false; - int i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], + int i, k, vdf_y[3], vdf_x[3], ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0; RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index d71d2776ca03..0894ef48ab87 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -196,6 +196,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { pr_err("Can't alloc buffer for wowlan fw.\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -214,16 +216,10 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8821aefw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request normal firmware!\n"); - return 1; - } + pr_err("Failed to request normal firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); + return 1; } /*load wowlan firmware*/ pr_info("Using firmware %s\n", wowlan_fw_name); @@ -233,6 +229,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl_wowlan_fw_cb); if (err) { pr_err("Failed to request wowlan firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); return 1; } return 0; @@ -325,6 +323,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8821ae_pci", + .alt_fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, @@ -424,7 +423,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9, }; -static struct pci_device_id rtl8821ae_pci_ids[] = { +static const struct pci_device_id rtl8821ae_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, {}, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 70723e67b7d7..1ab1024330fb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -314,35 +314,29 @@ enum hardware_type { HARDWARE_TYPE_RTL8192EE, HARDWARE_TYPE_RTL8821AE, HARDWARE_TYPE_RTL8812AE, + HARDWARE_TYPE_RTL8822BE, /* keep it last */ HARDWARE_TYPE_NUM }; -#define IS_HARDWARE_TYPE_8192SU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU) -#define IS_HARDWARE_TYPE_8192SE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) -#define IS_HARDWARE_TYPE_8192CE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) -#define IS_HARDWARE_TYPE_8192CU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) -#define IS_HARDWARE_TYPE_8192DE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) -#define IS_HARDWARE_TYPE_8192DU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU) -#define IS_HARDWARE_TYPE_8723E(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E) -#define IS_HARDWARE_TYPE_8723U(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U) -#define IS_HARDWARE_TYPE_8192S(rtlhal) \ -(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal)) -#define IS_HARDWARE_TYPE_8192C(rtlhal) \ -(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal)) -#define IS_HARDWARE_TYPE_8192D(rtlhal) \ -(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) -#define IS_HARDWARE_TYPE_8723(rtlhal) \ -(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) +#define RTL_HW_TYPE(rtlpriv) (rtl_hal((struct rtl_priv *)rtlpriv)->hw_type) +#define IS_NEW_GENERATION_IC(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) >= HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8192CE(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192CE) +#define IS_HARDWARE_TYPE_8812(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8812AE) +#define IS_HARDWARE_TYPE_8821(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8821AE) +#define IS_HARDWARE_TYPE_8723A(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723AE) +#define IS_HARDWARE_TYPE_8723B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723BE) +#define IS_HARDWARE_TYPE_8192E(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8822B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8822BE) #define RX_HAL_IS_CCK_RATE(rxmcs) \ ((rxmcs) == DESC_RATE1M || \ @@ -592,7 +586,7 @@ enum rtl_hal_state { _HAL_STATE_START = 1, }; -enum rtl_desc92_rate { +enum rtl_desc_rate { DESC_RATE1M = 0x00, DESC_RATE2M = 0x01, DESC_RATE5_5M = 0x02, @@ -2477,6 +2471,8 @@ struct rtl_global_var { spinlock_t glb_list_lock; }; +#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */ + struct rtl_btc_info { u8 bt_type; u8 btcoexist; @@ -2485,6 +2481,7 @@ struct rtl_btc_info { u8 ap_num; bool in_4way; + unsigned long in_4way_ts; }; struct bt_coexist_info { @@ -2558,6 +2555,8 @@ struct rtl_btc_ops { void (*btc_halt_notify) (void); void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); + void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, + u8 *tmp_buf, u8 length); bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile index a475c813674a..ebb89965997a 100644 --- a/drivers/net/wireless/rsi/Makefile +++ b/drivers/net/wireless/rsi/Makefile @@ -3,6 +3,7 @@ rsi_91x-y += rsi_91x_core.o rsi_91x-y += rsi_91x_mac80211.o rsi_91x-y += rsi_91x_mgmt.o rsi_91x-y += rsi_91x_hal.o +rsi_91x-y += rsi_91x_ps.o rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 68f04a76769e..2b0516d2f63d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -16,6 +16,7 @@ #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_hal.h" /** * rsi_determine_min_weight_queue() - This function determines the queue with @@ -136,6 +137,10 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common) u8 q_num = INVALID_QUEUE; u8 ii = 0; + if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) { + q_num = MGMT_BEACON_Q; + return q_num; + } if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { if (!common->mgmt_q_block) q_num = MGMT_SOFT_Q; @@ -268,11 +273,11 @@ void rsi_core_qos_processor(struct rsi_common *common) break; } - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->tx_lock); status = adapter->check_hw_queue_status(adapter, q_num); if ((status <= 0)) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } @@ -287,30 +292,48 @@ void rsi_core_qos_processor(struct rsi_common *common) skb = rsi_core_dequeue_pkt(common, q_num); if (skb == NULL) { rsi_dbg(ERR_ZONE, "skb null\n"); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } - if (q_num == MGMT_SOFT_Q) + if (q_num == MGMT_SOFT_Q) { status = rsi_send_mgmt_pkt(common, skb); - else + } else if (q_num == MGMT_BEACON_Q) { + status = rsi_send_pkt_to_bus(common, skb); + dev_kfree_skb(skb); + } else { status = rsi_send_data_pkt(common, skb); + } if (status) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } common->tx_stats.total_tx_pkt_send[q_num]++; tstamp_2 = jiffies; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000)) schedule(); } } +struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr) +{ + int i; + + for (i = 0; i < common->max_stations; i++) { + if (!common->stations[i].sta) + continue; + if (!(memcmp(common->stations[i].sta->addr, + mac_addr, ETH_ALEN))) + return &common->stations[i]; + } + return NULL; +} + /** * rsi_core_xmit() - This function transmits the packets received from mac80211 * @common: Pointer to the driver private structure. @@ -323,42 +346,63 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; struct skb_info *tx_params; - struct ieee80211_hdr *tmp_hdr = NULL; + struct ieee80211_hdr *wh; + struct ieee80211_vif *vif = adapter->vifs[0]; u8 q_num, tid = 0; + struct rsi_sta *rsta = NULL; if ((!skb) || (!skb->len)) { rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n", __func__); goto xmit_fail; } - info = IEEE80211_SKB_CB(skb); - tx_params = (struct skb_info *)info->driver_data; - tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; - if (common->fsm_state != FSM_MAC_INIT_DONE) { rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__); goto xmit_fail; } - if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) || - (ieee80211_is_ctl(tmp_hdr->frame_control)) || - (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) { + info = IEEE80211_SKB_CB(skb); + tx_params = (struct skb_info *)info->driver_data; + wh = (struct ieee80211_hdr *)&skb->data[0]; + tx_params->sta_id = 0; + + if ((ieee80211_is_mgmt(wh->frame_control)) || + (ieee80211_is_ctl(wh->frame_control)) || + (ieee80211_is_qos_nullfunc(wh->frame_control))) { q_num = MGMT_SOFT_Q; skb->priority = q_num; } else { - if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { + if (ieee80211_is_data_qos(wh->frame_control)) { tid = (skb->data[24] & IEEE80211_QOS_TID); skb->priority = TID_TO_WME_AC(tid); } else { tid = IEEE80211_NONQOS_TID; skb->priority = BE_Q; } + q_num = skb->priority; tx_params->tid = tid; - tx_params->sta_id = 0; + + if ((vif->type == NL80211_IFTYPE_AP) && + (!is_broadcast_ether_addr(wh->addr1)) && + (!is_multicast_ether_addr(wh->addr1))) { + rsta = rsi_find_sta(common, wh->addr1); + if (!rsta) + goto xmit_fail; + tx_params->sta_id = rsta->sta_id; + } + + if (rsta) { + /* Start aggregation if not done for this tid */ + if (!rsta->start_tx_aggr[tid]) { + rsta->start_tx_aggr[tid] = true; + ieee80211_start_tx_ba_session(rsta->sta, + tid, 0); + } + } } - if ((q_num != MGMT_SOFT_Q) && + if ((q_num < MGMT_SOFT_Q) && ((skb_queue_len(&common->tx_queue[q_num]) + 1) >= DATA_QUEUE_WATER_MARK)) { rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 4c0a493bd44e..e98eb55c26cc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -130,6 +130,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) "FSM_COMMON_DEV_PARAMS_SENT", "FSM_BOOT_PARAMS_SENT", "FSM_EEPROM_READ_MAC_ADDR", + "FSM_EEPROM_READ_RF_TYPE", "FSM_RESET_MAC_SENT", "FSM_RADIO_CAPS_SENT", "FSM_BB_RF_PROG_SENT", @@ -138,6 +139,8 @@ static int rsi_stats_read(struct seq_file *seq, void *data) seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n"); seq_puts(seq, "DRIVER_FSM_STATE: "); + BUILD_BUG_ON(ARRAY_SIZE(fsm_state) != NUM_FSM_STATES); + if (common->fsm_state <= FSM_MAC_INIT_DONE) seq_printf(seq, "%s", fsm_state[common->fsm_state]); diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index c2303599c12e..070dfd68bb83 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -18,6 +18,7 @@ #include "rsi_mgmt.h" #include "rsi_hal.h" #include "rsi_sdio.h" +#include "rsi_common.h" /* FLASH Firmware */ static struct ta_metadata metadata_flash_content[] = { @@ -25,99 +26,268 @@ static struct ta_metadata metadata_flash_content[] = { {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, }; -/** - * rsi_send_data_pkt() - This function sends the recieved data packet from - * driver to device. - * @common: Pointer to the driver private structure. - * @skb: Pointer to the socket buffer structure. - * - * Return: status: 0 on success, -1 on failure. - */ -int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *tmp_hdr; + int status; + + status = adapter->host_intf_ops->write_pkt(common->priv, + skb->data, skb->len); + return status; +} + +static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_hdr *wh = NULL; + struct ieee80211_tx_info *info; + struct ieee80211_conf *conf = &adapter->hw->conf; + struct ieee80211_vif *vif = adapter->vifs[0]; + struct rsi_mgmt_desc *mgmt_desc; + struct skb_info *tx_params; + struct ieee80211_bss_conf *bss = NULL; + struct xtended_desc *xtend_desc = NULL; + u8 header_size; + u32 dword_align_bytes = 0; + + if (skb->len > MAX_MGMT_PKT_SIZE) { + rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); + return -EINVAL; + } + + info = IEEE80211_SKB_CB(skb); + tx_params = (struct skb_info *)info->driver_data; + + /* Update header size */ + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add extended descriptor\n", + __func__); + return -ENOSPC; + } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (dword_align_bytes > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add dword align\n", __func__); + return -ENOSPC; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; + + tx_params->internal_hdr_size = header_size; + memset(&skb->data[0], 0, header_size); + bss = &info->control.vif->bss_conf; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + + rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + mgmt_desc->frame_type = TX_DOT11_MGMT; + mgmt_desc->header_len = MIN_802_11_HDR_LEN; + mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE); + if (is_broadcast_ether_addr(wh->addr1)) + mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + + mgmt_desc->seq_ctrl = + cpu_to_le16(IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl))); + if (common->band == NL80211_BAND_2GHZ) + mgmt_desc->rate_info = RSI_RATE_1; + else + mgmt_desc->rate_info = RSI_RATE_6; + + if (conf_is_ht40(conf)) + mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); + + if (ieee80211_is_probe_req(wh->frame_control)) { + if (!bss->assoc) { + rsi_dbg(INFO_ZONE, + "%s: blocking mgmt queue\n", __func__); + mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; + common->mgmt_q_block = true; + rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); + } + } + + if (ieee80211_is_probe_resp(wh->frame_control)) { + mgmt_desc->misc_flags |= (RSI_ADD_DELTA_TSF_VAP_ID | + RSI_FETCH_RETRY_CNT_FRM_HST); +#define PROBE_RESP_RETRY_CNT 3 + xtend_desc->retry_cnt = PROBE_RESP_RETRY_CNT; + } + + if ((vif->type == NL80211_IFTYPE_AP) && + (ieee80211_is_action(wh->frame_control))) { + struct rsi_sta *rsta = rsi_find_sta(common, wh->addr1); + + if (rsta) + mgmt_desc->sta_id = tx_params->sta_id; + else + return -EINVAL; + } + return 0; +} + +/* This function prepares descriptor for given data packet */ +static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_vif *vif; + struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; - int status; + struct rsi_data_desc *data_desc; + struct xtended_desc *xtend_desc; u8 ieee80211_size = MIN_802_11_HDR_LEN; - u8 extnd_size; - __le16 *frame_desc; + u8 header_size; + u8 vap_id = 0; + u8 dword_align_bytes; u16 seq_num; info = IEEE80211_SKB_CB(skb); bss = &info->control.vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - if (!bss->assoc) { - status = -EINVAL; - goto err; - } - - tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; - seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4); - - extnd_size = ((uintptr_t)skb->data & 0x3); - - if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) { + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); - status = -ENOSPC; - goto err; + return -ENOSPC; } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, "%s: Not enough headroom\n", __func__); + return -ENOSPC; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; - skb_push(skb, (FRAME_DESC_SZ + extnd_size)); - frame_desc = (__le16 *)&skb->data[0]; - memset((u8 *)frame_desc, 0, FRAME_DESC_SZ); + tx_params->internal_hdr_size = header_size; + data_desc = (struct rsi_data_desc *)skb->data; + memset(data_desc, 0, header_size); - if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl)); + vif = adapter->vifs[0]; + + data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + + if (ieee80211_is_data_qos(wh->frame_control)) { ieee80211_size += 2; - frame_desc[6] |= cpu_to_le16(BIT(12)); + data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE); } + if ((vif->type == NL80211_IFTYPE_STATION) && + (adapter->ps_state == PS_ENABLED)) + wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE); + if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) && (common->secinfo.security_enable)) { if (rsi_is_cipher_wep(common)) ieee80211_size += 4; else ieee80211_size += 8; - frame_desc[6] |= cpu_to_le16(BIT(15)); + data_desc->mac_flags |= cpu_to_le16(RSI_ENCRYPT_PKT); } + rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_DATA_Q); + data_desc->header_len = ieee80211_size; - frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_DATA_Q << 12)); - frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8); - - if (common->min_rate != 0xffff) { + if (common->min_rate != RSI_RATE_AUTO) { /* Send fixed rate */ - frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE); - frame_desc[4] = cpu_to_le16(common->min_rate); + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->rate_info = cpu_to_le16(common->min_rate); if (conf_is_ht40(&common->priv->hw->conf)) - frame_desc[5] = cpu_to_le16(FULL40M_ENABLE); + data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); - if (common->vif_info[0].sgi) { - if (common->min_rate & 0x100) /* Only MCS rates */ - frame_desc[4] |= - cpu_to_le16(ENABLE_SHORTGI_RATE); + if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) { + /* Only MCS rates */ + data_desc->rate_info |= + cpu_to_le16(ENABLE_SHORTGI_RATE); } - } - frame_desc[6] |= cpu_to_le16(seq_num & 0xfff); - frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) | - (skb->priority & 0xf) | - (tx_params->sta_id << 8)); + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); + + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + if (common->band == NL80211_BAND_5GHZ) + data_desc->rate_info = cpu_to_le16(RSI_RATE_6); + else + data_desc->rate_info = cpu_to_le16(RSI_RATE_1); + data_desc->mac_flags |= cpu_to_le16(RSI_REKEY_PURPOSE); + data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST; +#define EAPOL_RETRY_CNT 15 + xtend_desc->retry_cnt = EAPOL_RETRY_CNT; + } + + data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); + data_desc->qid_tid = ((skb->priority & 0xf) | + ((tx_params->tid & 0xf) << 4)); + data_desc->sta_id = tx_params->sta_id; + + if ((is_broadcast_ether_addr(wh->addr1)) || + (is_multicast_ether_addr(wh->addr1))) { + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + data_desc->sta_id = vap_id; + + if (vif->type == NL80211_IFTYPE_AP) { + if (common->band == NL80211_BAND_5GHZ) + data_desc->rate_info = cpu_to_le16(RSI_RATE_6); + else + data_desc->rate_info = cpu_to_le16(RSI_RATE_1); + } + } + if ((vif->type == NL80211_IFTYPE_AP) && + (ieee80211_has_moredata(wh->frame_control))) + data_desc->frame_info |= cpu_to_le16(MORE_DATA_PRESENT); + + return 0; +} + +/* This function sends received data packet from driver to device */ +int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_vif *vif = adapter->vifs[0]; + struct ieee80211_tx_info *info; + struct ieee80211_bss_conf *bss; + int status = -EINVAL; + + if (!skb) + return 0; + if (common->iface_down) + goto err; + + info = IEEE80211_SKB_CB(skb); + if (!info->control.vif) + goto err; + bss = &info->control.vif->bss_conf; + + if ((vif->type == NL80211_IFTYPE_STATION) && (!bss->assoc)) + goto err; + + status = rsi_prepare_data_desc(common, skb); + if (status) + goto err; status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, skb->len); if (status) - rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", - __func__); + rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); err: ++common->tx_stats.total_tx_pkt_freed[skb->priority]; - rsi_indicate_tx_status(common->priv, skb, status); + rsi_indicate_tx_status(adapter, skb, status); return status; } @@ -133,22 +303,17 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; - struct ieee80211_bss_conf *bss; - struct ieee80211_hw *hw = adapter->hw; - struct ieee80211_conf *conf = &hw->conf; struct skb_info *tx_params; int status = -E2BIG; - __le16 *msg; u8 extnd_size; - u8 vap_id = 0; info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; extnd_size = ((uintptr_t)skb->data & 0x3); if (tx_params->flags & INTERNAL_MGMT_PKT) { + skb->data[1] |= BIT(7); /* Immediate Wakeup bit*/ if ((extnd_size) > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); dev_kfree_skb(skb); @@ -167,52 +332,12 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } - bss = &info->control.vif->bss_conf; - wh = (struct ieee80211_hdr *)&skb->data[0]; - if (FRAME_DESC_SZ > skb_headroom(skb)) goto err; - skb_push(skb, FRAME_DESC_SZ); - memset(skb->data, 0, FRAME_DESC_SZ); - msg = (__le16 *)skb->data; - - if (skb->len > MAX_MGMT_PKT_SIZE) { - rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); - goto err; - } - - msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - msg[1] = cpu_to_le16(TX_DOT11_MGMT); - msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8); - msg[3] = cpu_to_le16(RATE_INFO_ENABLE); - msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); - - if (wh->addr1[0] & BIT(0)) - msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); - - if (common->band == NL80211_BAND_2GHZ) - msg[4] = cpu_to_le16(RSI_11B_MODE); - else - msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); - - if (conf_is_ht40(conf)) { - msg[4] = cpu_to_le16(0xB | RSI_11G_MODE); - msg[5] = cpu_to_le16(0x6); - } - - /* Indicate to firmware to give cfm */ - if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) { - msg[1] |= cpu_to_le16(BIT(10)); - msg[7] = cpu_to_le16(PROBEREQ_CONFIRM); - common->mgmt_q_block = true; - } - - msg[7] |= cpu_to_le16(vap_id << 8); - - status = adapter->host_intf_ops->write_pkt(common->priv, (u8 *)msg, - skb->len); + rsi_prepare_mgmt_desc(common, skb); + status = adapter->host_intf_ops->write_pkt(common->priv, + (u8 *)skb->data, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); @@ -221,6 +346,61 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } +int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = (struct rsi_hw *)common->priv; + struct rsi_data_desc *bcn_frm; + struct ieee80211_hw *hw = common->priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct sk_buff *mac_bcn; + u8 vap_id = 0; + u16 tim_offset; + + mac_bcn = ieee80211_beacon_get_tim(adapter->hw, + adapter->vifs[adapter->sc_nvifs - 1], + &tim_offset, NULL); + if (!mac_bcn) { + rsi_dbg(ERR_ZONE, "Failed to get beacon from mac80211\n"); + return -EINVAL; + } + + common->beacon_cnt++; + bcn_frm = (struct rsi_data_desc *)skb->data; + rsi_set_len_qno(&bcn_frm->len_qno, mac_bcn->len, RSI_WIFI_DATA_Q); + bcn_frm->header_len = MIN_802_11_HDR_LEN; + bcn_frm->frame_info = cpu_to_le16(RSI_DATA_DESC_MAC_BBP_INFO | + RSI_DATA_DESC_NO_ACK_IND | + RSI_DATA_DESC_BEACON_FRAME | + RSI_DATA_DESC_INSERT_TSF | + RSI_DATA_DESC_INSERT_SEQ_NO | + RATE_INFO_ENABLE); + bcn_frm->rate_info = cpu_to_le16(vap_id << 14); + bcn_frm->qid_tid = BEACON_HW_Q; + + if (conf_is_ht40_plus(conf)) { + bcn_frm->bbp_info = cpu_to_le16(LOWER_20_ENABLE); + bcn_frm->bbp_info |= cpu_to_le16(LOWER_20_ENABLE >> 12); + } else if (conf_is_ht40_minus(conf)) { + bcn_frm->bbp_info = cpu_to_le16(UPPER_20_ENABLE); + bcn_frm->bbp_info |= cpu_to_le16(UPPER_20_ENABLE >> 12); + } + + if (common->band == NL80211_BAND_2GHZ) + bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1); + else + bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6); + + if (mac_bcn->data[tim_offset + 2] == 0) + bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON); + + memcpy(&skb->data[FRAME_DESC_SZ], mac_bcn->data, mac_bcn->len); + skb_put(skb, mac_bcn->len + FRAME_DESC_SZ); + + dev_kfree_skb(mac_bcn); + + return 0; +} + static void bl_cmd_timeout(unsigned long priv) { struct rsi_hw *adapter = (struct rsi_hw *)priv; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 021e5ac5f107..fa12c05d9e23 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -18,6 +18,7 @@ #include "rsi_debugfs.h" #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_ps.h" static const struct ieee80211_channel rsi_2ghz_channels[] = { { .band = NL80211_BAND_2GHZ, .center_freq = 2412, @@ -121,6 +122,23 @@ const u16 rsi_mcsrates[8] = { RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7 }; +static const u32 rsi_max_ap_stas[16] = { + 32, /* 1 - Wi-Fi alone */ + 0, /* 2 */ + 0, /* 3 */ + 0, /* 4 - BT EDR alone */ + 4, /* 5 - STA + BT EDR */ + 32, /* 6 - AP + BT EDR */ + 0, /* 7 */ + 0, /* 8 - BT LE alone */ + 4, /* 9 - STA + BE LE */ + 0, /* 10 */ + 0, /* 11 */ + 0, /* 12 */ + 1, /* 13 - STA + BT Dual */ + 4, /* 14 - AP + BT Dual */ +}; + /** * rsi_is_cipher_wep() - This function determines if the cipher is WEP or not. * @common: Pointer to the driver private structure. @@ -229,12 +247,20 @@ void rsi_indicate_tx_status(struct rsi_hw *adapter, int status) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct skb_info *tx_params; - memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + if (!adapter->hw) { + rsi_dbg(ERR_ZONE, "##### No MAC #####\n"); + return; + } if (!status) info->flags |= IEEE80211_TX_STAT_ACK; + tx_params = (struct skb_info *)info->driver_data; + skb_pull(skb, tx_params->internal_hdr_size); + memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + ieee80211_tx_status_irqsafe(adapter->hw, skb); } @@ -271,11 +297,12 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw) struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + rsi_dbg(ERR_ZONE, "===> Interface UP <===\n"); mutex_lock(&common->mutex); common->iface_down = false; - mutex_unlock(&common->mutex); - + wiphy_rfkill_start_polling(hw->wiphy); rsi_send_rx_filter_frame(common, 0); + mutex_unlock(&common->mutex); return 0; } @@ -291,8 +318,14 @@ static void rsi_mac80211_stop(struct ieee80211_hw *hw) struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + rsi_dbg(ERR_ZONE, "===> Interface DOWN <===\n"); mutex_lock(&common->mutex); common->iface_down = true; + wiphy_rfkill_stop_polling(hw->wiphy); + + /* Block all rx frames */ + rsi_send_rx_filter_frame(common, 0xffff); + mutex_unlock(&common->mutex); } @@ -309,24 +342,51 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + enum opmode intf_mode; int ret = -EOPNOTSUPP; + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&common->mutex); + + if (adapter->sc_nvifs > 1) { + mutex_unlock(&common->mutex); + return -EOPNOTSUPP; + } + switch (vif->type) { case NL80211_IFTYPE_STATION: - if (!adapter->sc_nvifs) { - ++adapter->sc_nvifs; - adapter->vifs[0] = vif; - ret = rsi_set_vap_capabilities(common, - STA_OPMODE, - VAP_ADD); - } + rsi_dbg(INFO_ZONE, "Station Mode"); + intf_mode = STA_OPMODE; + break; + case NL80211_IFTYPE_AP: + rsi_dbg(INFO_ZONE, "AP Mode"); + intf_mode = AP_OPMODE; break; default: rsi_dbg(ERR_ZONE, "%s: Interface type %d not supported\n", __func__, vif->type); + goto out; } + + adapter->vifs[adapter->sc_nvifs++] = vif; + ret = rsi_set_vap_capabilities(common, intf_mode, common->mac_addr, + 0, VAP_ADD); + if (ret) { + rsi_dbg(ERR_ZONE, "Failed to set VAP capabilities\n"); + goto out; + } + + if (vif->type == NL80211_IFTYPE_AP) { + int i; + + rsi_send_rx_filter_frame(common, DISALLOW_BEACONS); + common->min_rate = RSI_RATE_AUTO; + for (i = 0; i < common->max_stations; i++) + common->stations[i].sta = NULL; + } + +out: mutex_unlock(&common->mutex); return ret; @@ -345,13 +405,32 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + enum opmode opmode; + + rsi_dbg(INFO_ZONE, "Remove Interface Called\n"); mutex_lock(&common->mutex); - if (vif->type == NL80211_IFTYPE_STATION) { - adapter->sc_nvifs--; - rsi_set_vap_capabilities(common, STA_OPMODE, VAP_DELETE); + + if (adapter->sc_nvifs <= 0) { + mutex_unlock(&common->mutex); + return; } + switch (vif->type) { + case NL80211_IFTYPE_STATION: + opmode = STA_OPMODE; + break; + case NL80211_IFTYPE_AP: + opmode = AP_OPMODE; + break; + default: + mutex_unlock(&common->mutex); + return; + } + rsi_set_vap_capabilities(common, opmode, vif->addr, + 0, VAP_DELETE); + adapter->sc_nvifs--; + if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif))) adapter->vifs[0] = NULL; mutex_unlock(&common->mutex); @@ -452,6 +531,8 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_vif *vif = adapter->vifs[0]; + struct ieee80211_conf *conf = &hw->conf; int status = -EOPNOTSUPP; mutex_lock(&common->mutex); @@ -465,6 +546,28 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, status = rsi_config_power(hw); } + /* Power save parameters */ + if ((changed & IEEE80211_CONF_CHANGE_PS) && + (vif->type == NL80211_IFTYPE_STATION)) { + unsigned long flags; + + spin_lock_irqsave(&adapter->ps_lock, flags); + if (conf->flags & IEEE80211_CONF_PS) + rsi_enable_ps(adapter); + else + rsi_disable_ps(adapter); + spin_unlock_irqrestore(&adapter->ps_lock, flags); + } + + /* RTS threshold */ + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + rsi_dbg(INFO_ZONE, "RTS threshold\n"); + if ((common->rts_threshold) <= IEEE80211_MAX_RTS_THRESHOLD) { + rsi_dbg(INFO_ZONE, + "%s: Sending vap updates....\n", __func__); + status = rsi_send_vap_dynamic_update(common); + } + } mutex_unlock(&common->mutex); return status; @@ -507,6 +610,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + struct ieee80211_conf *conf = &hw->conf; u16 rx_filter_word = 0; mutex_lock(&common->mutex); @@ -521,10 +626,24 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, rsi_send_rx_filter_frame(common, rx_filter_word); } rsi_inform_bss_status(common, + STA_OPMODE, bss_conf->assoc, bss_conf->bssid, bss_conf->qos, - bss_conf->aid); + bss_conf->aid, + NULL, 0); + adapter->ps_info.dtim_interval_duration = bss->dtim_period; + adapter->ps_info.listen_interval = conf->listen_interval; + + /* If U-APSD is updated, send ps parameters to firmware */ + if (bss->assoc) { + if (common->uapsd_bitmap) { + rsi_dbg(INFO_ZONE, "Configuring UAPSD\n"); + rsi_conf_uapsd(adapter); + } + } else { + common->uapsd_bitmap = 0; + } } if (changed & BSS_CHANGED_CQM) { @@ -535,6 +654,18 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, common->cqm_info.rssi_thold, common->cqm_info.rssi_hyst); } + + if ((changed & BSS_CHANGED_BEACON_ENABLED) && + (vif->type == NL80211_IFTYPE_AP)) { + if (bss->enable_beacon) { + rsi_dbg(INFO_ZONE, "===> BEACON ENABLED <===\n"); + common->beacon_enabled = 1; + } else { + rsi_dbg(INFO_ZONE, "===> BEACON DISABLED <===\n"); + common->beacon_enabled = 0; + } + } + mutex_unlock(&common->mutex); } @@ -606,6 +737,12 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw, memcpy(&common->edca_params[idx], params, sizeof(struct ieee80211_tx_queue_params)); + + if (params->uapsd) + common->uapsd_bitmap |= idx; + else + common->uapsd_bitmap &= (~idx); + mutex_unlock(&common->mutex); return 0; @@ -617,15 +754,18 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw, * @vif: Pointer to the ieee80211_vif structure. * @key: Pointer to the ieee80211_key_conf structure. * - * Return: status: 0 on success, -1 on failure. + * Return: status: 0 on success, negative error codes on failure. */ static int rsi_hal_key_config(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_key_conf *key) + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta) { struct rsi_hw *adapter = hw->priv; + struct rsi_sta *rsta = NULL; int status; u8 key_type; + s16 sta_id = 0; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) key_type = RSI_PAIRWISE_KEY; @@ -635,23 +775,35 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n", __func__, key->cipher, key_type, key->keylen); - if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || - (key->cipher == WLAN_CIPHER_SUITE_WEP40)) { - status = rsi_hal_load_key(adapter->priv, - key->key, - key->keylen, - RSI_PAIRWISE_KEY, - key->keyidx, - key->cipher); - if (status) - return status; + if (vif->type == NL80211_IFTYPE_AP) { + if (sta) { + rsta = rsi_find_sta(adapter->priv, sta->addr); + if (rsta) + sta_id = rsta->sta_id; + } + adapter->priv->key = key; + } else { + if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || + (key->cipher == WLAN_CIPHER_SUITE_WEP40)) { + status = rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + RSI_PAIRWISE_KEY, + key->keyidx, + key->cipher, + sta_id); + if (status) + return status; + } } + return rsi_hal_load_key(adapter->priv, key->key, key->keylen, key_type, key->keyidx, - key->cipher); + key->cipher, + sta_id); } /** @@ -679,7 +831,7 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, switch (cmd) { case SET_KEY: secinfo->security_enable = true; - status = rsi_hal_key_config(hw, vif, key); + status = rsi_hal_key_config(hw, vif, key, sta); if (status) { mutex_unlock(&common->mutex); return status; @@ -697,10 +849,11 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, break; case DISABLE_KEY: - secinfo->security_enable = false; + if (vif->type == NL80211_IFTYPE_STATION) + secinfo->security_enable = false; rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__); memset(key, 0, sizeof(struct ieee80211_key_conf)); - status = rsi_hal_key_config(hw, vif, key); + status = rsi_hal_key_config(hw, vif, key, sta); break; default: @@ -729,9 +882,11 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, int status = -EOPNOTSUPP; struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; - u16 seq_no = 0; + struct rsi_sta *rsta = NULL; + u16 seq_no = 0, seq_start = 0; u8 ii = 0; struct ieee80211_sta *sta = params->sta; + u8 sta_id = 0; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; @@ -743,17 +898,32 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, } mutex_lock(&common->mutex); - rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action); + if (ssn != NULL) seq_no = *ssn; + if (vif->type == NL80211_IFTYPE_AP) { + rsta = rsi_find_sta(common, sta->addr); + if (!rsta) { + rsi_dbg(ERR_ZONE, "No station mapped\n"); + status = 0; + goto unlock; + } + sta_id = rsta->sta_id; + } + + rsi_dbg(INFO_ZONE, + "%s: AMPDU action tid=%d ssn=0x%x, buf_size=%d sta_id=%d\n", + __func__, tid, seq_no, buf_size, sta_id); + switch (action) { case IEEE80211_AMPDU_RX_START: status = rsi_send_aggregation_params_frame(common, tid, seq_no, buf_size, - STA_RX_ADDBA_DONE); + STA_RX_ADDBA_DONE, + sta_id); break; case IEEE80211_AMPDU_RX_STOP: @@ -761,11 +931,15 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, tid, 0, buf_size, - STA_RX_DELBA); + STA_RX_DELBA, + sta_id); break; case IEEE80211_AMPDU_TX_START: - common->vif_info[ii].seq_start = seq_no; + if (vif->type == NL80211_IFTYPE_STATION) + common->vif_info[ii].seq_start = seq_no; + else if (vif->type == NL80211_IFTYPE_AP) + rsta->seq_start[tid] = seq_no; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); status = 0; break; @@ -777,18 +951,23 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, tid, seq_no, buf_size, - STA_TX_DELBA); + STA_TX_DELBA, + sta_id); if (!status) ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: + if (vif->type == NL80211_IFTYPE_STATION) + seq_start = common->vif_info[ii].seq_start; + else if (vif->type == NL80211_IFTYPE_AP) + seq_start = rsta->seq_start[tid]; status = rsi_send_aggregation_params_frame(common, tid, - common->vif_info[ii] - .seq_start, + seq_start, buf_size, - STA_TX_ADDBA_DONE); + STA_TX_ADDBA_DONE, + sta_id); break; default: @@ -796,6 +975,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, break; } +unlock: mutex_unlock(&common->mutex); return status; } @@ -1014,7 +1194,7 @@ static void rsi_set_min_rate(struct ieee80211_hw *hw, * @vif: Pointer to the ieee80211_vif structure. * @sta: Pointer to the ieee80211_sta structure. * - * Return: 0 on success, -1 on failure. + * Return: 0 on success, negative error codes on failure. */ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1022,22 +1202,101 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + bool sta_exist = false; + struct rsi_sta *rsta; + int status = 0; + + rsi_dbg(INFO_ZONE, "Station Add: %pM\n", sta->addr); mutex_lock(&common->mutex); - rsi_set_min_rate(hw, sta, common); + if (vif->type == NL80211_IFTYPE_AP) { + u8 cnt; + int sta_idx = -1; + int free_index = -1; - if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) { - common->vif_info[0].sgi = true; + /* Check if max stations reached */ + if (common->num_stations >= common->max_stations) { + rsi_dbg(ERR_ZONE, "Reject: Max Stations exists\n"); + status = -EOPNOTSUPP; + goto unlock; + } + for (cnt = 0; cnt < common->max_stations; cnt++) { + rsta = &common->stations[cnt]; + + if (!rsta->sta) { + if (free_index < 0) + free_index = cnt; + continue; + } + if (!memcmp(rsta->sta->addr, sta->addr, ETH_ALEN)) { + rsi_dbg(INFO_ZONE, "Station exists\n"); + sta_idx = cnt; + sta_exist = true; + break; + } + } + if (!sta_exist) { + if (free_index >= 0) + sta_idx = free_index; + } + if (sta_idx < 0) { + rsi_dbg(ERR_ZONE, + "%s: Some problem reaching here...\n", + __func__); + status = -EINVAL; + goto unlock; + } + rsta = &common->stations[sta_idx]; + rsta->sta = sta; + rsta->sta_id = sta_idx; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->start_tx_aggr[cnt] = false; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->seq_start[cnt] = 0; + if (!sta_exist) { + rsi_dbg(INFO_ZONE, "New Station\n"); + + /* Send peer notify to device */ + rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); + rsi_inform_bss_status(common, AP_OPMODE, 1, sta->addr, + sta->wme, sta->aid, sta, sta_idx); + + if (common->key) { + struct ieee80211_key_conf *key = common->key; + + if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || + (key->cipher == WLAN_CIPHER_SUITE_WEP40)) + rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + RSI_PAIRWISE_KEY, + key->keyidx, + key->cipher, + sta_idx); + } + + common->num_stations++; + } } - if (sta->ht_cap.ht_supported) - ieee80211_start_tx_ba_session(sta, 0, 0); + if (vif->type == NL80211_IFTYPE_STATION) { + rsi_set_min_rate(hw, sta, common); + if (sta->ht_cap.ht_supported) { + common->vif_info[0].is_ht = true; + common->bitrate_mask[NL80211_BAND_2GHZ] = + sta->supp_rates[NL80211_BAND_2GHZ]; + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + common->vif_info[0].sgi = true; + ieee80211_start_tx_ba_session(sta, 0, 0); + } + } +unlock: mutex_unlock(&common->mutex); - return 0; + return status; } /** @@ -1047,7 +1306,7 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, * @vif: Pointer to the ieee80211_vif structure. * @sta: Pointer to the ieee80211_sta structure. * - * Return: 0 on success, -1 on failure. + * Return: 0 on success, negative error codes on failure. */ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1055,21 +1314,55 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + struct rsi_sta *rsta; + + rsi_dbg(INFO_ZONE, "Station Remove: %pM\n", sta->addr); mutex_lock(&common->mutex); - /* Resetting all the fields to default values */ - common->bitrate_mask[NL80211_BAND_2GHZ] = 0; - common->bitrate_mask[NL80211_BAND_5GHZ] = 0; - common->min_rate = 0xffff; - common->vif_info[0].is_ht = false; - common->vif_info[0].sgi = false; - common->vif_info[0].seq_start = 0; - common->secinfo.ptk_cipher = 0; - common->secinfo.gtk_cipher = 0; + if (vif->type == NL80211_IFTYPE_AP) { + u8 sta_idx, cnt; - rsi_send_rx_filter_frame(common, 0); - + /* Send peer notify to device */ + rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); + for (sta_idx = 0; sta_idx < common->max_stations; sta_idx++) { + rsta = &common->stations[sta_idx]; + + if (!rsta->sta) + continue; + if (!memcmp(rsta->sta->addr, sta->addr, ETH_ALEN)) { + rsi_inform_bss_status(common, AP_OPMODE, 0, + sta->addr, sta->wme, + sta->aid, sta, sta_idx); + rsta->sta = NULL; + rsta->sta_id = -1; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->start_tx_aggr[cnt] = false; + if (common->num_stations > 0) + common->num_stations--; + break; + } + } + if (sta_idx >= common->max_stations) + rsi_dbg(ERR_ZONE, "%s: No station found\n", __func__); + } + + if (vif->type == NL80211_IFTYPE_STATION) { + /* Resetting all the fields to default values */ + memcpy((u8 *)bss->bssid, (u8 *)sta->addr, ETH_ALEN); + bss->qos = sta->wme; + common->bitrate_mask[NL80211_BAND_2GHZ] = 0; + common->bitrate_mask[NL80211_BAND_5GHZ] = 0; + common->min_rate = 0xffff; + common->vif_info[0].is_ht = false; + common->vif_info[0].sgi = false; + common->vif_info[0].seq_start = 0; + common->secinfo.ptk_cipher = 0; + common->secinfo.gtk_cipher = 0; + if (!common->iface_down) + rsi_send_rx_filter_frame(common, 0); + } mutex_unlock(&common->mutex); return 0; @@ -1133,7 +1426,7 @@ static int rsi_mac80211_set_antenna(struct ieee80211_hw *hw, * @tx_ant: Bitmap for tx antenna * @rx_ant: Bitmap for rx antenna * - * Return: 0 on success, -1 on failure. + * Return: 0 on success, negative error codes on failure. */ static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) @@ -1151,6 +1444,21 @@ static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw, return 0; } +static int rsi_map_region_code(enum nl80211_dfs_regions region_code) +{ + switch (region_code) { + case NL80211_DFS_FCC: + return RSI_REGION_FCC; + case NL80211_DFS_ETSI: + return RSI_REGION_ETSI; + case NL80211_DFS_JP: + return RSI_REGION_TELEC; + case NL80211_DFS_UNSET: + return RSI_REGION_WORLD; + } + return RSI_REGION_WORLD; +} + static void rsi_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { @@ -1158,26 +1466,49 @@ static void rsi_reg_notify(struct wiphy *wiphy, struct ieee80211_channel *ch; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rsi_hw * adapter = hw->priv; + struct rsi_common *common = adapter->priv; int i; - - sband = wiphy->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < sband->n_channels; i++) { - ch = &sband->channels[i]; - if (ch->flags & IEEE80211_CHAN_DISABLED) - continue; + mutex_lock(&common->mutex); - if (ch->flags & IEEE80211_CHAN_RADAR) - ch->flags |= IEEE80211_CHAN_NO_IR; - } - - rsi_dbg(INFO_ZONE, - "country = %s dfs_region = %d\n", + rsi_dbg(INFO_ZONE, "country = %s dfs_region = %d\n", request->alpha2, request->dfs_region); - adapter->dfs_region = request->dfs_region; + + if (common->num_supp_bands > 1) { + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + + if (ch->flags & IEEE80211_CHAN_RADAR) + ch->flags |= IEEE80211_CHAN_NO_IR; + } + } + adapter->dfs_region = rsi_map_region_code(request->dfs_region); + rsi_dbg(INFO_ZONE, "RSI region code = %d\n", adapter->dfs_region); + + adapter->country[0] = request->alpha2[0]; + adapter->country[1] = request->alpha2[1]; + + mutex_unlock(&common->mutex); } -static struct ieee80211_ops mac80211_ops = { +static void rsi_mac80211_rfkill_poll(struct ieee80211_hw *hw) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + + mutex_lock(&common->mutex); + if (common->fsm_state != FSM_MAC_INIT_DONE) + wiphy_rfkill_set_hw_state(hw->wiphy, true); + else + wiphy_rfkill_set_hw_state(hw->wiphy, false); + mutex_unlock(&common->mutex); +} + +static const struct ieee80211_ops mac80211_ops = { .tx = rsi_mac80211_tx, .start = rsi_mac80211_start, .stop = rsi_mac80211_stop, @@ -1195,13 +1526,14 @@ static struct ieee80211_ops mac80211_ops = { .sta_remove = rsi_mac80211_sta_remove, .set_antenna = rsi_mac80211_set_antenna, .get_antenna = rsi_mac80211_get_antenna, + .rfkill_poll = rsi_mac80211_rfkill_poll, }; /** * rsi_mac80211_attach() - This function is used to initialize Mac80211 stack. * @common: Pointer to the driver private structure. * - * Return: 0 on success, -1 on failure. + * Return: 0 on success, negative error codes on failure. */ int rsi_mac80211_attach(struct rsi_common *common) { @@ -1229,12 +1561,16 @@ int rsi_mac80211_attach(struct rsi_common *common) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); hw->queues = MAX_HW_QUEUES; hw->extra_tx_headroom = RSI_NEEDED_HEADROOM; hw->max_rates = 1; hw->max_rate_tries = MAX_RETRIES; + hw->uapsd_queues = RSI_IEEE80211_UAPSD_QUEUES; + hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; hw->max_tx_aggregation_subframes = 6; rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); @@ -1244,7 +1580,8 @@ int rsi_mac80211_attach(struct rsi_common *common) SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); ether_addr_copy(hw->wiphy->addr_mask, addr_mask); - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP); wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->retry_short = RETRY_SHORT; wiphy->retry_long = RETRY_LONG; @@ -1259,6 +1596,14 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->bands[NL80211_BAND_5GHZ] = &adapter->sbands[NL80211_BAND_5GHZ]; + /* AP Parameters */ + wiphy->max_ap_assoc_sta = rsi_max_ap_stas[common->oper_mode - 1]; + common->max_stations = wiphy->max_ap_assoc_sta; + rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations); + hw->sta_data_size = sizeof(struct rsi_sta); + wiphy->flags = WIPHY_FLAG_REPORTS_OBSS; + wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; wiphy->reg_notifier = rsi_reg_notify; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1cde0ca81f9..3e1e80888d98 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -220,7 +220,8 @@ struct rsi_hw *rsi_91x_init(void) rsi_init_event(&common->tx_thread.event); mutex_init(&common->mutex); - mutex_init(&common->tx_rxlock); + mutex_init(&common->tx_lock); + mutex_init(&common->rx_lock); if (rsi_create_kthread(common, &common->tx_thread, @@ -230,6 +231,8 @@ struct rsi_hw *rsi_91x_init(void) goto err; } + rsi_default_ps_params(adapter); + spin_lock_init(&adapter->ps_lock); common->init_done = true; return adapter; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index d4d365b5d2d6..f7b550f900c4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -17,6 +17,8 @@ #include #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_ps.h" +#include "rsi_hal.h" static struct bootup_params boot_params_20 = { .magic_number = cpu_to_le16(0x5aa5), @@ -230,6 +232,8 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->rf_power_val = 0; /* Default 1.9V */ common->wlan_rf_power_mode = 0; common->obm_ant_sel_val = 2; + common->beacon_interval = RSI_BEACON_INTERVAL; + common->dtim_cnt = RSI_DTIM_COUNT; } /** @@ -266,11 +270,15 @@ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, struct sk_buff *skb) { struct skb_info *tx_params; + struct rsi_cmd_desc *desc; if (skb == NULL) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } + desc = (struct rsi_cmd_desc *)skb->data; + desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); + skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; tx_params->flags |= INTERNAL_MGMT_PKT; skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb); @@ -298,10 +306,11 @@ static int rsi_load_radio_caps(struct rsi_common *common) 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0}; struct sk_buff *skb; + u16 frame_len = sizeof(struct rsi_radio_caps); rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_radio_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -309,37 +318,40 @@ static int rsi_load_radio_caps(struct rsi_common *common) return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_radio_caps)); + memset(skb->data, 0, frame_len); radio_caps = (struct rsi_radio_caps *)skb->data; - radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES); - radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8); + radio_caps->desc_dword0.frame_type = RADIO_CAPABILITIES; + radio_caps->channel_num = common->channel; + radio_caps->rf_model = RSI_RF_TYPE; if (common->channel_width == BW_40MHZ) { - radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ); - radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ); + radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; + radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + if (conf_is_ht40_plus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(LOWER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(LOWER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_LOWER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_LOWER_20_ENABLE; } else if (conf_is_ht40_minus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(UPPER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(UPPER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_UPPER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_UPPER_20_ENABLE; } else { - radio_caps->desc_word[5] = - cpu_to_le16(BW_40MHZ << 12); - radio_caps->desc_word[5] |= - cpu_to_le16(FULL40M_ENABLE); + radio_caps->radio_cfg_info = + RSI_CMDDESC_40MHZ; + radio_caps->radio_info = + RSI_CMDDESC_FULL_40_ENABLE; } } } + radio_caps->radio_info |= radio_id; radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE); radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE); @@ -348,8 +360,6 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE); radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE); - radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8); - for (ii = 0; ii < MAX_HW_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f); @@ -357,7 +367,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->qos_params[ii].txop_q = 0; } - for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) { + for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(common->edca_params[ii].cw_min); radio_caps->qos_params[ii].cont_win_max_q = @@ -368,17 +378,19 @@ static int rsi_load_radio_caps(struct rsi_common *common) cpu_to_le16(common->edca_params[ii].txop); } + radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff); + radio_caps->qos_params[MGMT_HW_Q].txop_q = 0; + radio_caps->qos_params[BEACON_HW_Q].txop_q = cpu_to_le16(0xffff); + memcpy(&common->rate_pwr[0], &gc[0], 40); for (ii = 0; ii < 20; ii++) radio_caps->gcpd_per_rate[inx++] = cpu_to_le16(common->rate_pwr[ii] & 0x00FF); - radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); + rsi_set_len_qno(&radio_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - - skb_put(skb, (sizeof(struct rsi_radio_caps))); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -394,8 +406,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) */ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 *msg, - s32 msg_len, - u8 type) + s32 msg_len) { struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; @@ -403,38 +414,31 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 pad_bytes = msg[4]; struct sk_buff *skb; - if (type == RX_DOT11_MGMT) { - if (!adapter->sc_nvifs) - return -ENOLINK; + if (!adapter->sc_nvifs) + return -ENOLINK; - msg_len -= pad_bytes; - if (msg_len <= 0) { - rsi_dbg(MGMT_RX_ZONE, - "%s: Invalid rx msg of len = %d\n", - __func__, msg_len); - return -EINVAL; - } - - skb = dev_alloc_skb(msg_len); - if (!skb) { - rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n", - __func__); - return -ENOMEM; - } - - skb_put_data(skb, - (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), - msg_len); - - info = IEEE80211_SKB_CB(skb); - rx_params = (struct skb_info *)info->driver_data; - rx_params->rssi = rsi_get_rssi(msg); - rx_params->channel = rsi_get_channel(msg); - rsi_indicate_pkt_to_os(common, skb); - } else { - rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__); + msg_len -= pad_bytes; + if (msg_len <= 0) { + rsi_dbg(MGMT_RX_ZONE, + "%s: Invalid rx msg of len = %d\n", + __func__, msg_len); + return -EINVAL; } + skb = dev_alloc_skb(msg_len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, + (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), + msg_len); + + info = IEEE80211_SKB_CB(skb); + rx_params = (struct skb_info *)info->driver_data; + rx_params->rssi = rsi_get_rssi(msg); + rx_params->channel = rsi_get_channel(msg); + rsi_indicate_pkt_to_os(common, skb); + return 0; } @@ -451,20 +455,23 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, * Return: status: 0 on success, corresponding negative error code on failure. */ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, - u8 opmode, + enum opmode opmode, u8 notify_event, const unsigned char *bssid, u8 qos_enable, - u16 aid) + u16 aid, + u16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; u16 vap_id = 0; int status; + u16 frame_len = sizeof(struct rsi_peer_notify); rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_peer_notify)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -472,10 +479,13 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_peer_notify)); + memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; - peer_notify->command = cpu_to_le16(opmode << 1); + if (opmode == STA_OPMODE) + peer_notify->command = cpu_to_le16(PEER_TYPE_AP << 1); + else if (opmode == AP_OPMODE) + peer_notify->command = cpu_to_le16(PEER_TYPE_STA << 1); switch (notify_event) { case STA_CONNECTED: @@ -490,20 +500,22 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4); ether_addr_copy(peer_notify->mac_addr, bssid); - + peer_notify->mpdu_density = cpu_to_le16(RSI_MPDU_DENSITY); peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0); - peer_notify->desc_word[0] = - cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY); - peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8); + rsi_set_len_qno(&peer_notify->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; + peer_notify->desc.desc_dword3.qid_tid = sta_id; + peer_notify->desc.desc_dword3.sta_id = vap_id; - skb_put(skb, sizeof(struct rsi_peer_notify)); + skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); - if (!status && qos_enable) { + if ((vif->type == NL80211_IFTYPE_STATION) && + (!status && qos_enable)) { rsi_set_contention_vals(common); status = rsi_load_radio_caps(common); } @@ -525,13 +537,14 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, - u8 event) + u8 event, + u8 sta_id) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; - u8 peer_id = 0; + struct rsi_aggr_params *aggr_params; + u16 frame_len = sizeof(struct rsi_aggr_params); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -539,37 +552,29 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + aggr_params = (struct rsi_aggr_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND); + rsi_set_len_qno(&aggr_params->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + aggr_params->desc_dword0.frame_type = AMPDU_IND; + aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; + aggr_params->peer_id = sta_id; if (event == STA_TX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[5] = cpu_to_le16(buf_size); - mgmt_frame->desc_word[7] = - cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8))); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->baw_size = cpu_to_le16(buf_size); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_START; } else if (event == STA_RX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (START_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); - } else if (event == STA_TX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (peer_id << 8)); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->aggr_params |= (RSI_AGGR_PARAMS_START | + RSI_AGGR_PARAMS_RX_AGGR); } else if (event == STA_RX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_RX_AGGR; } - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -584,34 +589,36 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, static int rsi_program_bb_rf(struct rsi_common *common) { struct sk_buff *skb; - struct rsi_mac_frame *mgmt_frame; + struct rsi_bb_rf_prog *bb_rf_prog; + u16 frame_len = sizeof(struct rsi_bb_rf_prog); rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA); - mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint); + rsi_set_len_qno(&bb_rf_prog->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + bb_rf_prog->desc_dword0.frame_type = BBP_PROG_IN_TA; + bb_rf_prog->endpoint = common->endpoint; + bb_rf_prog->rf_power_mode = common->wlan_rf_power_mode; if (common->rf_reset) { - mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE); + bb_rf_prog->flags = cpu_to_le16(RF_RESET_ENABLE); rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n", __func__); common->rf_reset = 0; } common->bb_rf_prog_count = 1; - mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | (RSI_RF_TYPE << 4)); - skb_put(skb, FRAME_DESC_SZ); + bb_rf_prog->flags |= cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | + (RSI_RF_TYPE << 4)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -625,6 +632,8 @@ static int rsi_program_bb_rf(struct rsi_common *common) */ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, + u8 *mac_addr, + u8 vap_id, u8 vap_status) { struct sk_buff *skb = NULL; @@ -632,59 +641,60 @@ int rsi_set_vap_capabilities(struct rsi_common *common, struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; - u16 vap_id = 0; + u16 frame_len = sizeof(struct rsi_vap_caps); rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_vap_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_vap_caps)); + memset(skb->data, 0, frame_len); vap_caps = (struct rsi_vap_caps *)skb->data; - vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES); - vap_caps->desc_word[2] = cpu_to_le16(vap_status << 8); - vap_caps->desc_word[4] = cpu_to_le16(mode | - (common->channel_width << 8)); - vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) | - (common->mac_id << 4) | - common->radio_id); + rsi_set_len_qno(&vap_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + vap_caps->desc_dword0.frame_type = VAP_CAPABILITIES; + vap_caps->status = vap_status; + vap_caps->vif_type = mode; + vap_caps->channel_bw = common->channel_width; + vap_caps->vap_id = vap_id; + vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | + (common->radio_id & 0xf); - memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN); + memcpy(vap_caps->mac_addr, mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); - vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); if (common->band == NL80211_BAND_5GHZ) { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6); - if (conf_is_ht40(&common->priv->hw->conf)) { - vap_caps->default_ctrl_rate |= - cpu_to_le32(FULL40M_ENABLE << 16); - } + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_6); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); } else { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_1); + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_1); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_1); + } + if (conf_is_ht40(conf)) { if (conf_is_ht40_minus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(UPPER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(UPPER_20_ENABLE); else if (conf_is_ht40_plus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(LOWER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(LOWER_20_ENABLE); + else + vap_caps->ctrl_rate_flags = + cpu_to_le16(FULL40M_ENABLE); } vap_caps->default_data_rate = 0; - vap_caps->beacon_interval = cpu_to_le16(200); - vap_caps->dtim_period = cpu_to_le16(4); + vap_caps->beacon_interval = cpu_to_le16(common->beacon_interval); + vap_caps->dtim_period = cpu_to_le16(common->dtim_cnt); - skb_put(skb, sizeof(*vap_caps)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -705,58 +715,66 @@ int rsi_hal_load_key(struct rsi_common *common, u16 key_len, u8 key_type, u8 key_id, - u32 cipher) + u32 cipher, + s16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; + u16 frame_len = sizeof(struct rsi_set_key); rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_set_key)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_set_key)); + memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; + if (key_type == RSI_GROUP_KEY) { + key_descriptor = RSI_KEY_TYPE_BROADCAST; + if (vif->type == NL80211_IFTYPE_AP) + key_descriptor |= RSI_KEY_MODE_AP; + } if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { - key_len += 1; - key_descriptor |= BIT(2); + key_id = 0; + key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) - key_descriptor |= BIT(3); + key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { - key_descriptor |= BIT(4); - if (key_type == RSI_PAIRWISE_KEY) - key_id = 0; + key_descriptor |= RSI_CIPHER_WPA; if (cipher == WLAN_CIPHER_SUITE_TKIP) - key_descriptor |= BIT(5); + key_descriptor |= RSI_CIPHER_TKIP; } - key_descriptor |= (key_type | BIT(13) | (key_id << 14)); + key_descriptor |= RSI_PROTECT_DATA_FRAMES; + key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK); - set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ); - set_key->desc_word[4] = cpu_to_le16(key_descriptor); + rsi_set_len_qno(&set_key->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + set_key->desc_dword0.frame_type = SET_KEY_REQ; + set_key->key_desc = cpu_to_le16(key_descriptor); + set_key->sta_id = sta_id; - if ((cipher == WLAN_CIPHER_SUITE_WEP40) || - (cipher == WLAN_CIPHER_SUITE_WEP104)) { - memcpy(&set_key->key[key_id][1], - data, - key_len * 2); + if (data) { + if ((cipher == WLAN_CIPHER_SUITE_WEP40) || + (cipher == WLAN_CIPHER_SUITE_WEP104)) { + memcpy(&set_key->key[key_id][1], data, key_len * 2); + } else { + memcpy(&set_key->key[0][0], data, key_len); + } + memcpy(set_key->tx_mic_key, &data[16], 8); + memcpy(set_key->rx_mic_key, &data[24], 8); } else { - memcpy(&set_key->key[0][0], data, key_len); + memset(&set_key[FRAME_DESC_SZ], 0, frame_len - FRAME_DESC_SZ); } - memcpy(set_key->tx_mic_key, &data[16], 8); - memcpy(set_key->rx_mic_key, &data[24], 8); - - skb_put(skb, sizeof(struct rsi_set_key)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -970,12 +988,13 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; + struct rsi_chan_config *chan_cfg; + u16 frame_len = sizeof(struct rsi_chan_config); rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); @@ -986,37 +1005,33 @@ int rsi_set_channel(struct rsi_common *common, dev_kfree_skb(skb); return 0; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + chan_cfg = (struct rsi_chan_config *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); - mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value); + rsi_set_len_qno(&chan_cfg->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + chan_cfg->desc_dword0.frame_type = SCAN_REQUEST; + chan_cfg->channel_number = channel->hw_value; + chan_cfg->antenna_gain_offset_2g = channel->max_antenna_gain; + chan_cfg->antenna_gain_offset_5g = channel->max_antenna_gain; + chan_cfg->region_rftype = (RSI_RF_TYPE & 0xf) << 4; - mgmt_frame->desc_word[4] |= - cpu_to_le16(((char)(channel->max_antenna_gain)) << 8); - mgmt_frame->desc_word[5] = - cpu_to_le16((char)(channel->max_antenna_gain)); - - mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | - (RSI_RF_TYPE << 4)); - - if (!(channel->flags & IEEE80211_CHAN_NO_IR) && - !(channel->flags & IEEE80211_CHAN_RADAR)) { + if ((channel->flags & IEEE80211_CHAN_NO_IR) || + (channel->flags & IEEE80211_CHAN_RADAR)) { + chan_cfg->antenna_gain_offset_2g |= RSI_CHAN_RADAR; + } else { if (common->tx_power < channel->max_power) - mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power); + chan_cfg->tx_power = cpu_to_le16(common->tx_power); else - mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power); + chan_cfg->tx_power = cpu_to_le16(channel->max_power); } - mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region); + chan_cfg->region_rftype |= (common->priv->dfs_region & 0xf); if (common->channel_width == BW_40MHZ) - mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8); + chan_cfg->channel_width = 0x1; common->channel = channel->hw_value; - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -1058,6 +1073,37 @@ int rsi_send_radio_params_update(struct rsi_common *common) return rsi_send_internal_mgmt_frame(common, skb); } +/* This function programs the threshold. */ +int rsi_send_vap_dynamic_update(struct rsi_common *common) +{ + struct sk_buff *skb; + struct rsi_dynamic_s *dynamic_frame; + + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending vap update indication frame\n", __func__); + + skb = dev_alloc_skb(sizeof(struct rsi_dynamic_s)); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0, sizeof(struct rsi_dynamic_s)); + dynamic_frame = (struct rsi_dynamic_s *)skb->data; + rsi_set_len_qno(&dynamic_frame->desc_dword0.len_qno, + sizeof(dynamic_frame->frame_body), RSI_WIFI_MGMT_Q); + + dynamic_frame->desc_dword0.frame_type = VAP_DYNAMIC_UPDATE; + dynamic_frame->desc_dword2.pkt_info = + cpu_to_le32(common->rts_threshold); + /* Beacon miss threshold */ + dynamic_frame->frame_body.keep_alive_period = + cpu_to_le16(RSI_DEF_KEEPALIVE); + dynamic_frame->desc_dword3.sta_id = 0; /* vap id */ + + skb_put(skb, sizeof(struct rsi_dynamic_s)); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_compare() - This function is used to compare two integers * @a: pointer to the first integer @@ -1112,8 +1158,11 @@ static bool rsi_map_rates(u16 rate, int *offset) * * Return: 0 on success, corresponding error code on failure. */ -static int rsi_send_auto_rate_request(struct rsi_common *common) +static int rsi_send_auto_rate_request(struct rsi_common *common, + struct ieee80211_sta *sta, + u16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb; struct rsi_auto_rate *auto_rate; int ii = 0, jj = 0, kk = 0; @@ -1121,11 +1170,15 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) u8 band = hw->conf.chandef.chan->band; u8 num_supported_rates = 0; u8 rate_table_offset, rate_offset = 0; - u32 rate_bitmap = common->bitrate_mask[band]; - + u32 rate_bitmap; u16 *selected_rates, min_rate; + bool is_ht = false, is_sgi = false; + u16 frame_len = sizeof(struct rsi_auto_rate); - skb = dev_alloc_skb(sizeof(struct rsi_auto_rate)); + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending auto rate request frame\n", __func__); + + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); @@ -1140,8 +1193,6 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_auto_rate)); - auto_rate = (struct rsi_auto_rate *)skb->data; auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f)); @@ -1150,16 +1201,35 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) auto_rate->initial_boundary = cpu_to_le16(3); auto_rate->max_threshold_limt = cpu_to_le16(27); - auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND); + auto_rate->desc.desc_dword0.frame_type = AUTO_RATE_IND; if (common->channel_width == BW_40MHZ) - auto_rate->desc_word[7] |= cpu_to_le16(1); + auto_rate->desc.desc_dword3.qid_tid = BW_40MHZ; + auto_rate->desc.desc_dword3.sta_id = sta_id; + + if (vif->type == NL80211_IFTYPE_STATION) { + rate_bitmap = common->bitrate_mask[band]; + is_ht = common->vif_info[0].is_ht; + is_sgi = common->vif_info[0].sgi; + } else { + rate_bitmap = sta->supp_rates[band]; + is_ht = sta->ht_cap.ht_supported; + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + is_sgi = true; + } if (band == NL80211_BAND_2GHZ) { - min_rate = RSI_RATE_1; + if ((rate_bitmap == 0) && (is_ht)) + min_rate = RSI_RATE_MCS0; + else + min_rate = RSI_RATE_1; rate_table_offset = 0; } else { - min_rate = RSI_RATE_6; + if ((rate_bitmap == 0) && (is_ht)) + min_rate = RSI_RATE_MCS0; + else + min_rate = RSI_RATE_6; rate_table_offset = 4; } @@ -1173,7 +1243,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) } num_supported_rates = jj; - if (common->vif_info[0].is_ht) { + if (is_ht) { for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) selected_rates[jj++] = mcs[ii]; num_supported_rates += ARRAY_SIZE(mcs); @@ -1194,13 +1264,15 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) } /* loading HT rates in the bottom half of the auto rate table */ - if (common->vif_info[0].is_ht) { + if (is_ht) { for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1; ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) { - if (common->vif_info[0].sgi || - conf_is_ht40(&common->priv->hw->conf)) + if (is_sgi || conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); + else + auto_rate->supported_rates[ii++] = + cpu_to_le16(rsi_mcsrates[kk]); auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk--]); } @@ -1216,15 +1288,12 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2); auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2); - auto_rate->desc_word[7] |= cpu_to_le16(0 << 8); num_supported_rates *= 2; - auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); + rsi_set_len_qno(&auto_rate->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - skb_put(skb, - sizeof(struct rsi_auto_rate)); + skb_put(skb, frame_len); kfree(selected_rates); return rsi_send_internal_mgmt_frame(common, skb); @@ -1243,27 +1312,40 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) * Return: None. */ void rsi_inform_bss_status(struct rsi_common *common, + enum opmode opmode, u8 status, - const unsigned char *bssid, + const u8 *addr, u8 qos_enable, - u16 aid) + u16 aid, + struct ieee80211_sta *sta, + u16 sta_id) { if (status) { + if (opmode == STA_OPMODE) + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, - RSI_IFTYPE_STATION, + opmode, STA_CONNECTED, - bssid, + addr, qos_enable, - aid); + aid, sta_id); if (common->min_rate == 0xffff) - rsi_send_auto_rate_request(common); + rsi_send_auto_rate_request(common, sta, sta_id); + if (opmode == STA_OPMODE) { + if (!rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; + } } else { + if (opmode == STA_OPMODE) + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, - RSI_IFTYPE_STATION, + opmode, STA_DISCONNECTED, - bssid, + addr, qos_enable, - aid); + aid, sta_id); + if (opmode == STA_OPMODE) + rsi_send_block_unblock_frame(common, true); } } @@ -1276,7 +1358,8 @@ void rsi_inform_bss_status(struct rsi_common *common, */ static int rsi_eeprom_read(struct rsi_common *common) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_eeprom_read_frame *mgmt_frame; + struct rsi_hw *adapter = common->priv; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__); @@ -1289,18 +1372,21 @@ static int rsi_eeprom_read(struct rsi_common *common) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data; /* FrameType */ - mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + rsi_set_len_qno(&mgmt_frame->len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->pkt_type = EEPROM_READ; + /* Number of bytes to read */ - mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN + - WLAN_MAC_MAGIC_WORD_LEN + - WLAN_HOST_MODE_LEN + - WLAN_FW_VERSION_LEN); + mgmt_frame->pkt_info = + cpu_to_le32((adapter->eeprom.length << RSI_EEPROM_LEN_OFFSET) & + RSI_EEPROM_LEN_MASK); + mgmt_frame->pkt_info |= cpu_to_le32((3 << RSI_EEPROM_HDR_SIZE_OFFSET) & + RSI_EEPROM_HDR_SIZE_MASK); + /* Address to read */ - mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR); + mgmt_frame->eeprom_offset = cpu_to_le32(adapter->eeprom.offset); skb_put(skb, FRAME_DESC_SZ); @@ -1317,7 +1403,7 @@ static int rsi_eeprom_read(struct rsi_common *common) */ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_block_unblock_data *mgmt_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__); @@ -1330,23 +1416,25 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_block_unblock_data *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BLOCK_HW_QUEUE); + rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE; + mgmt_frame->host_quiet_info = QUIET_INFO_VALID; if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); - mgmt_frame->desc_word[4] = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4); } else { rsi_dbg(INFO_ZONE, "unblocking the data qs\n"); - mgmt_frame->desc_word[5] = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4); } skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); - } /** @@ -1383,6 +1471,61 @@ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) return rsi_send_internal_mgmt_frame(common, skb); } +int rsi_send_ps_request(struct rsi_hw *adapter, bool enable) +{ + struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf; + struct rsi_request_ps *ps; + struct rsi_ps_info *ps_info; + struct sk_buff *skb; + int frame_len = sizeof(*ps); + + skb = dev_alloc_skb(frame_len); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len); + + ps = (struct rsi_request_ps *)skb->data; + ps_info = &adapter->ps_info; + + rsi_set_len_qno(&ps->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + ps->desc.desc_dword0.frame_type = WAKEUP_SLEEP_REQUEST; + if (enable) { + ps->ps_sleep.enable = RSI_PS_ENABLE; + ps->desc.desc_dword3.token = cpu_to_le16(RSI_SLEEP_REQUEST); + } else { + ps->ps_sleep.enable = RSI_PS_DISABLE; + ps->desc.desc_dword0.len_qno |= cpu_to_le16(RSI_PS_DISABLE_IND); + ps->desc.desc_dword3.token = cpu_to_le16(RSI_WAKEUP_REQUEST); + } + + ps->ps_uapsd_acs = common->uapsd_bitmap; + + ps->ps_sleep.sleep_type = ps_info->sleep_type; + ps->ps_sleep.num_bcns_per_lis_int = + cpu_to_le16(ps_info->num_bcns_per_lis_int); + ps->ps_sleep.sleep_duration = + cpu_to_le32(ps_info->deep_sleep_wakeup_period); + + if (bss->assoc) + ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP; + else + ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP; + + ps->ps_listen_interval = cpu_to_le32(ps_info->listen_interval); + ps->ps_dtim_interval_duration = + cpu_to_le32(ps_info->dtim_interval_duration); + + if (ps_info->listen_interval > ps_info->dtim_interval_duration) + ps->ps_listen_interval = cpu_to_le32(RSI_PS_DISABLE); + + ps->ps_num_dtim_intervals = cpu_to_le16(ps_info->num_dtims_per_sleep); + skb_put(skb, frame_len); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_set_antenna() - This fuction send antenna configuration request * to device @@ -1394,7 +1537,7 @@ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) */ int rsi_set_antenna(struct rsi_common *common, u8 antenna) { - struct rsi_mac_frame *cmd_frame; + struct rsi_ant_sel_frame *ant_sel_frame; struct sk_buff *skb; skb = dev_alloc_skb(FRAME_DESC_SZ); @@ -1405,17 +1548,43 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) } memset(skb->data, 0, FRAME_DESC_SZ); - cmd_frame = (struct rsi_mac_frame *)skb->data; - - cmd_frame->desc_word[1] = cpu_to_le16(ANT_SEL_FRAME); - cmd_frame->desc_word[3] = cpu_to_le16(antenna & 0x00ff); - cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + ant_sel_frame = (struct rsi_ant_sel_frame *)skb->data; + ant_sel_frame->desc_dword0.frame_type = ANT_SEL_FRAME; + ant_sel_frame->sub_frame_type = ANTENNA_SEL_TYPE; + ant_sel_frame->ant_value = cpu_to_le16(antenna & ANTENNA_MASK_VALUE); + rsi_set_len_qno(&ant_sel_frame->desc_dword0.len_qno, + 0, RSI_WIFI_MGMT_Q); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } +static int rsi_send_beacon(struct rsi_common *common) +{ + struct sk_buff *skb = NULL; + u8 dword_align_bytes = 0; + + skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0, MAX_MGMT_PKT_SIZE); + + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (dword_align_bytes) + skb_pull(skb, (64 - dword_align_bytes)); + if (rsi_prepare_beacon(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); + return -EINVAL; + } + skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); + rsi_set_event(&common->tx_thread.event); + rsi_dbg(DATA_TX_ZONE, "%s: Added to beacon queue\n", __func__); + + return 0; +} + /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. @@ -1426,19 +1595,25 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) static int rsi_handle_ta_confirm_type(struct rsi_common *common, u8 *msg) { + struct rsi_hw *adapter = common->priv; u8 sub_type = (msg[15] & 0xff); + u16 msg_len = ((u16 *)msg)[0] & 0xfff; + u8 offset; switch (sub_type) { case BOOTUP_PARAMS_REQUEST: rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n", __func__); if (common->fsm_state == FSM_BOOT_PARAMS_SENT) { + adapter->eeprom.length = (IEEE80211_ADDR_LEN + + WLAN_MAC_MAGIC_WORD_LEN + + WLAN_HOST_MODE_LEN); + adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR; if (rsi_eeprom_read(common)) { common->fsm_state = FSM_CARD_NOT_READY; goto out; - } else { - common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } + common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } else { rsi_dbg(INFO_ZONE, "%s: Received bootup params cfm in %d state\n", @@ -1447,30 +1622,52 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, } break; - case EEPROM_READ_TYPE: + case EEPROM_READ: + rsi_dbg(FSM_ZONE, "EEPROM READ confirm received\n"); + if (msg_len <= 0) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid len %d\n", + __func__, msg_len); + goto out; + } + if (msg[16] != MAGIC_WORD) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid token\n", __func__); + common->fsm_state = FSM_CARD_NOT_READY; + goto out; + } if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) { - if (msg[16] == MAGIC_WORD) { - u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN - + WLAN_MAC_MAGIC_WORD_LEN); - memcpy(common->mac_addr, - &msg[offset], - ETH_ALEN); - memcpy(&common->fw_ver, - &msg[offset + ETH_ALEN], - sizeof(struct version_info)); - - } else { + offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN + + WLAN_MAC_MAGIC_WORD_LEN); + memcpy(common->mac_addr, &msg[offset], ETH_ALEN); + adapter->eeprom.length = + ((WLAN_MAC_MAGIC_WORD_LEN + 3) & (~3)); + adapter->eeprom.offset = WLAN_EEPROM_RFTYPE_ADDR; + if (rsi_eeprom_read(common)) { + rsi_dbg(ERR_ZONE, + "%s: Failed reading RF band\n", + __func__); common->fsm_state = FSM_CARD_NOT_READY; - break; + goto out; + } + common->fsm_state = FSM_EEPROM_READ_RF_TYPE; + } else if (common->fsm_state == FSM_EEPROM_READ_RF_TYPE) { + if ((msg[17] & 0x3) == 0x3) { + rsi_dbg(INIT_ZONE, "Dual band supported\n"); + common->band = NL80211_BAND_5GHZ; + common->num_supp_bands = 2; + } else if ((msg[17] & 0x3) == 0x1) { + rsi_dbg(INIT_ZONE, + "Only 2.4Ghz band supported\n"); + common->band = NL80211_BAND_2GHZ; + common->num_supp_bands = 1; } if (rsi_send_reset_mac(common)) goto out; - else - common->fsm_state = FSM_RESET_MAC_SENT; + common->fsm_state = FSM_RESET_MAC_SENT; } else { - rsi_dbg(ERR_ZONE, - "%s: Received eeprom mac addr in %d state\n", - __func__, common->fsm_state); + rsi_dbg(ERR_ZONE, "%s: Invalid EEPROM read type\n", + __func__); return 0; } break; @@ -1527,7 +1724,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, return 0; } break; - + case WAKEUP_SLEEP_REQUEST: + rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); + return rsi_handle_ps_confirm(adapter, msg); default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); @@ -1590,20 +1789,34 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", __func__, msg_len, msg_type); - if (msg_type == TA_CONFIRM_TYPE) { + switch (msg_type) { + case TA_CONFIRM_TYPE: return rsi_handle_ta_confirm_type(common, msg); - } else if (msg_type == CARD_READY_IND) { + case CARD_READY_IND: rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n", __func__); return rsi_handle_card_ready(common, msg); - } else if (msg_type == TX_STATUS_IND) { + case TX_STATUS_IND: if (msg[15] == PROBEREQ_CONFIRM) { common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); } - } else { - return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type); + break; + case BEACON_EVENT_IND: + rsi_dbg(INFO_ZONE, "Beacon event\n"); + if (common->fsm_state != FSM_MAC_INIT_DONE) + return -1; + if (common->iface_down) + return -1; + if (!common->beacon_enabled) + return -1; + rsi_send_beacon(common); + break; + case RX_DOT11_MGMT: + return rsi_mgmt_pkt_to_core(common, msg, msg_len); + default: + rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; } diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c new file mode 100644 index 000000000000..48c79f035c59 --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_91x_ps.c @@ -0,0 +1,146 @@ +/** + * Copyright (c) 2014 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "rsi_debugfs.h" +#include "rsi_mgmt.h" +#include "rsi_common.h" +#include "rsi_ps.h" + +char *str_psstate(enum ps_state state) +{ + switch (state) { + case PS_NONE: + return "PS_NONE"; + case PS_DISABLE_REQ_SENT: + return "PS_DISABLE_REQ_SENT"; + case PS_ENABLE_REQ_SENT: + return "PS_ENABLE_REQ_SENT"; + case PS_ENABLED: + return "PS_ENABLED"; + default: + return "INVALID_STATE"; + } + return "INVALID_STATE"; +} + +static inline void rsi_modify_ps_state(struct rsi_hw *adapter, + enum ps_state nstate) +{ + rsi_dbg(INFO_ZONE, "PS state changed %s => %s\n", + str_psstate(adapter->ps_state), + str_psstate(nstate)); + + adapter->ps_state = nstate; +} + +void rsi_default_ps_params(struct rsi_hw *adapter) +{ + struct rsi_ps_info *ps_info = &adapter->ps_info; + + ps_info->enabled = true; + ps_info->sleep_type = RSI_SLEEP_TYPE_LP; + ps_info->tx_threshold = 0; + ps_info->rx_threshold = 0; + ps_info->tx_hysterisis = 0; + ps_info->rx_hysterisis = 0; + ps_info->monitor_interval = 0; + ps_info->listen_interval = RSI_DEF_LISTEN_INTERVAL; + ps_info->num_bcns_per_lis_int = 0; + ps_info->dtim_interval_duration = 0; + ps_info->num_dtims_per_sleep = 0; + ps_info->deep_sleep_wakeup_period = RSI_DEF_DS_WAKEUP_PERIOD; +} + +void rsi_enable_ps(struct rsi_hw *adapter) +{ + if (adapter->ps_state != PS_NONE) { + rsi_dbg(ERR_ZONE, + "%s: Cannot accept enable PS in %s state\n", + __func__, str_psstate(adapter->ps_state)); + return; + } + + if (rsi_send_ps_request(adapter, true)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); + return; + } + + rsi_modify_ps_state(adapter, PS_ENABLE_REQ_SENT); +} + +void rsi_disable_ps(struct rsi_hw *adapter) +{ + if (adapter->ps_state != PS_ENABLED) { + rsi_dbg(ERR_ZONE, + "%s: Cannot accept disable PS in %s state\n", + __func__, str_psstate(adapter->ps_state)); + return; + } + + if (rsi_send_ps_request(adapter, false)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); + return; + } + + rsi_modify_ps_state(adapter, PS_DISABLE_REQ_SENT); +} + +void rsi_conf_uapsd(struct rsi_hw *adapter) +{ + int ret; + + if (adapter->ps_state != PS_ENABLED) + return; + + ret = rsi_send_ps_request(adapter, false); + if (!ret) + ret = rsi_send_ps_request(adapter, true); + if (ret) + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); +} + +int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg) +{ + u16 cfm_type = get_unaligned_le16(msg + PS_CONFIRM_INDEX); + + switch (cfm_type) { + case RSI_SLEEP_REQUEST: + if (adapter->ps_state == PS_ENABLE_REQ_SENT) + rsi_modify_ps_state(adapter, PS_ENABLED); + break; + case RSI_WAKEUP_REQUEST: + if (adapter->ps_state == PS_DISABLE_REQ_SENT) + rsi_modify_ps_state(adapter, PS_NONE); + break; + default: + rsi_dbg(ERR_ZONE, + "Invalid PS confirm type %x in state %s\n", + cfm_type, str_psstate(adapter->ps_state)); + return -1; + } + + return 0; +} + diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index e5ea99bb2dd8..8d3a4839b6ef 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -138,12 +138,15 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, static void rsi_handle_interrupt(struct sdio_func *function) { struct rsi_hw *adapter = sdio_get_drvdata(function); + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; - sdio_release_host(function); + + dev->sdio_irq_task = current; rsi_interrupt_handler(adapter); - sdio_claim_host(function); + dev->sdio_irq_task = NULL; } /** @@ -219,26 +222,18 @@ static void rsi_reset_card(struct sdio_func *pfunction) if (err) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); - if (!host->ocr_avail) { - /* Issue CMD5, arg = 0 */ - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - 0, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", - __func__, err); - host->ocr_avail = resp; - } + /* Issue CMD5, arg = 0 */ + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); + card->ocr = resp; /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - host->ocr_avail, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, + card->ocr, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); @@ -415,14 +410,16 @@ int rsi_sdio_read_register(struct rsi_hw *adapter, u8 fun_num = 0; int status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (fun_num == 0) *data = sdio_f0_readb(dev->pfunction, addr, &status); else *data = sdio_readb(dev->pfunction, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -446,14 +443,16 @@ int rsi_sdio_write_register(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; int status = 0; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (function == 0) sdio_f0_writeb(dev->pfunction, *data, addr, &status); else sdio_writeb(dev->pfunction, *data, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -498,11 +497,13 @@ static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_readsb(dev->pfunction, data, addr, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status != 0) rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__); @@ -540,11 +541,13 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, dev->write_fail++; } - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_writesb(dev->pfunction, addr, data, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status) { rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n", @@ -581,7 +584,6 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, } for (offset = 0, i = 0; i < num_blocks; i++, offset += block_size) { - memset(temp_buf, 0, block_size); memcpy(temp_buf, ta_firmware + offset, block_size); lsb_address = (u16)base_address; status = rsi_sdio_write_register_multiple @@ -857,7 +859,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter, sdio_release_host(pfunction); adapter->determine_event_timeout = rsi_sdio_determine_event_timeout; - adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register; + adapter->check_hw_queue_status = rsi_sdio_check_buffer_status; #ifdef CONFIG_RSI_DEBUGFS adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES; @@ -941,6 +943,84 @@ static int rsi_probe(struct sdio_func *pfunction, return 1; } +static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + msleep(20); +} + +/*This function resets and re-initializes the chip.*/ +static void rsi_reset_chip(struct rsi_hw *adapter) +{ + __le32 data; + u8 sdio_interrupt_status = 0; + u8 request = 1; + int ret; + + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); + ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); + if (ret < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to write SDIO wakeup register\n", __func__); + return; + } + msleep(20); + ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, + &sdio_interrupt_status); + if (ret < 0) { + rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", + __func__); + return; + } + rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", + __func__, sdio_interrupt_status); + + /* Put Thread-Arch processor on hold */ + if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); + return; + } + + data = TA_HOLD_THREAD_VALUE; + if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | + RSI_SD_REQUEST_MASTER, + (u8 *)&data, 4)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to hold Thread-Arch processor threads\n", + __func__); + return; + } + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf spi in device to finish. + */ + msleep(100); + + ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, RSI_ULP_WRITE_50, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, RSI_ULP_WRITE_0, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + /* This msleep will be sufficient for the ulp + * read write operations to complete for chip reset. + */ + msleep(500); +} + /** * rsi_disconnect() - This function performs the reverse of the probe function. * @pfunction: Pointer to the sdio_func structure. @@ -956,17 +1036,26 @@ static void rsi_disconnect(struct sdio_func *pfunction) return; dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; - - dev->write_fail = 2; - rsi_mac80211_detach(adapter); - sdio_claim_host(pfunction); sdio_release_irq(pfunction); - sdio_disable_func(pfunction); - rsi_91x_deinit(adapter); - /* Resetting to take care of the case, where-in driver is re-loaded */ - rsi_reset_card(pfunction); sdio_release_host(pfunction); + mdelay(10); + + rsi_mac80211_detach(adapter); + mdelay(10); + + /* Reset Chip */ + rsi_reset_chip(adapter); + + /* Resetting to take care of the case, where-in driver is re-loaded */ + sdio_claim_host(pfunction); + rsi_reset_card(pfunction); + sdio_disable_func(pfunction); + sdio_release_host(pfunction); + dev->write_fail = 2; + rsi_91x_deinit(adapter); + rsi_dbg(ERR_ZONE, "##### RSI SDIO device disconnected #####\n"); + } #ifdef CONFIG_PM diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index df2a63b1f15c..8e2a95c486b0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -69,20 +69,37 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) static int rsi_process_pkt(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; u8 num_blks = 0; u32 rcv_pkt_len = 0; int status = 0; + u8 value = 0; - status = rsi_sdio_read_register(adapter, - SDIO_RX_NUM_BLOCKS_REG, - &num_blks); + num_blks = ((adapter->interrupt_status & 1) | + ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); - if (status) { - rsi_dbg(ERR_ZONE, - "%s: Failed to read pkt length from the card:\n", - __func__); - return status; + if (!num_blks) { + status = rsi_sdio_read_register(adapter, + SDIO_RX_NUM_BLOCKS_REG, + &value); + if (status) { + rsi_dbg(ERR_ZONE, + "%s: Failed to read pkt length from the card:\n", + __func__); + return status; + } + num_blks = value & 0x1f; } + + if (dev->write_fail == 2) + rsi_sdio_ack_intr(common->priv, (1 << MSDU_PKT_PENDING)); + + if (unlikely(!num_blks)) { + dev->write_fail = 2; + return -1; + } + rcv_pkt_len = (num_blks * 256); common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL); @@ -213,7 +230,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) dev->rx_info.sdio_int_counter++; do { - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_sdio_read_register(common->priv, RSI_FN1_INT_REGISTER, &isr_status); @@ -221,14 +238,15 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } + adapter->interrupt_status = isr_status; if (isr_status == 0) { rsi_set_event(&common->tx_thread.event); dev->rx_info.sdio_intr_status_zero++; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } @@ -241,10 +259,12 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) switch (isr_type) { case BUFFER_AVAILABLE: - dev->rx_info.watch_bufferfull_count = 0; - dev->rx_info.buffer_full = false; - dev->rx_info.semi_buffer_full = false; - dev->rx_info.mgmt_buffer_full = false; + status = rsi_sdio_check_buffer_status(adapter, + 0); + if (status < 0) + rsi_dbg(ERR_ZONE, + "%s: Failed to check buffer status\n", + __func__); rsi_sdio_ack_intr(common->priv, (1 << PKT_BUFF_AVAILABLE)); rsi_set_event(&common->tx_thread.event); @@ -252,7 +272,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ISR_ZONE, "%s: ==> BUFFER_AVAILABLE <==\n", __func__); - dev->rx_info.buf_available_counter++; + dev->buff_status_updated = true; break; case FIRMWARE_ASSERT_IND: @@ -286,7 +306,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to read pkt\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } break; @@ -301,28 +321,28 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) } isr_status ^= BIT(isr_type - 1); } while (isr_status); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); } while (1); } -/** - * rsi_sdio_read_buffer_status_register() - This function is used to the read - * buffer status register and set - * relevant fields in - * rsi_91x_sdiodev struct. - * @adapter: Pointer to the driver hw structure. - * @q_num: The Q number whose status is to be found. - * - * Return: status: -1 on failure or else queue full/stop is indicated. +/* This function is used to read buffer status register and + * set relevant fields in rsi_91x_sdiodev struct. */ -int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num) +int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) { struct rsi_common *common = adapter->priv; struct rsi_91x_sdiodev *dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; u8 buf_status = 0; int status = 0; + static int counter = 4; + if (!dev->buff_status_updated && counter) { + counter--; + goto out; + } + + dev->buff_status_updated = false; status = rsi_sdio_read_register(common->priv, RSI_DEVICE_BUFFER_STATUS_REGISTER, &buf_status); @@ -357,10 +377,16 @@ int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num) dev->rx_info.semi_buffer_full = false; } + if (dev->rx_info.mgmt_buffer_full || dev->rx_info.buf_full_counter) + counter = 1; + else + counter = 4; + +out: if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full)) return QUEUE_FULL; - if (dev->rx_info.buffer_full) + if ((q_num < MGMT_SOFT_Q) && (dev->rx_info.buffer_full)) return QUEUE_FULL; return QUEUE_NOT_FULL; diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index bcd7f454ef30..81df09dd2636 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -29,19 +29,24 @@ * Return: status: 0 on success, a negative error code on failure. */ static int rsi_usb_card_write(struct rsi_hw *adapter, - void *buf, + u8 *buf, u16 len, u8 endpoint) { struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; int status; - s32 transfer; + u8 *seg = dev->tx_buffer; + int transfer; + int ep = dev->bulkout_endpoint_addr[endpoint - 1]; + memset(seg, 0, len + RSI_USB_TX_HEAD_ROOM); + memcpy(seg + RSI_USB_TX_HEAD_ROOM, buf, len); + len += RSI_USB_TX_HEAD_ROOM; + transfer = len; status = usb_bulk_msg(dev->usbdev, - usb_sndbulkpipe(dev->usbdev, - dev->bulkout_endpoint_addr[endpoint - 1]), - buf, - len, + usb_sndbulkpipe(dev->usbdev, ep), + (void *)seg, + (int)len, &transfer, HZ * 5); @@ -68,23 +73,19 @@ static int rsi_write_multiple(struct rsi_hw *adapter, u8 *data, u32 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; - u8 *seg = dev->tx_buffer; + struct rsi_91x_usbdev *dev = + (struct rsi_91x_usbdev *)adapter->rsi_dev; + + if (!adapter) + return -ENODEV; + + if (endpoint == 0) + return -EINVAL; if (dev->write_fail) - return 0; + return -ENETDOWN; - if (endpoint == MGMT_EP) { - memset(seg, 0, RSI_USB_TX_HEAD_ROOM); - memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count); - } else { - seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM); - } - - return rsi_usb_card_write(adapter, - seg, - count + RSI_USB_TX_HEAD_ROOM, - endpoint); + return rsi_usb_card_write(adapter, data, count, endpoint); } /** @@ -161,10 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; - buf = kmalloc(0x04, GFP_KERNEL); + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -203,10 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; - usb_reg_buf = kmalloc(0x04, GFP_KERNEL); + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; @@ -380,10 +387,11 @@ static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { - u32 queueno = ((pkt[1] >> 4) & 0xf); + u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; - endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP); + endpoint = ((queueno == RSI_WIFI_MGMT_Q || queueno == RSI_WIFI_DATA_Q || + queueno == RSI_COEX_Q) ? WLAN_EP : BT_EP); return rsi_write_multiple(adapter, endpoint, @@ -396,8 +404,15 @@ static int rsi_usb_master_reg_read(struct rsi_hw *adapter, u32 reg, { struct usb_device *usbdev = ((struct rsi_91x_usbdev *)adapter->rsi_dev)->usbdev; + u16 temp; + int ret; - return rsi_usb_reg_read(usbdev, reg, (u16 *)value, len); + ret = rsi_usb_reg_read(usbdev, reg, &temp, len); + if (ret < 0) + return ret; + *value = temp; + + return 0; } static int rsi_usb_master_reg_write(struct rsi_hw *adapter, @@ -424,7 +439,6 @@ static int rsi_usb_load_data_master_write(struct rsi_hw *adapter, rsi_dbg(INFO_ZONE, "num_blocks: %d\n", num_blocks); for (cur_indx = 0, i = 0; i < num_blocks; i++, cur_indx += block_size) { - memset(temp_buf, 0, block_size); memcpy(temp_buf, ta_firmware + cur_indx, block_size); status = rsi_usb_write_register_multiple(adapter, base_address, (u8 *)(temp_buf), @@ -558,6 +572,77 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, return status; } +static int usb_ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + int ret; + + ret = rsi_usb_master_reg_write + (adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + if (ret < 0) + return ret; + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + if (ret < 0) + return ret; + + /* Initializing GSPI for ULP read/writes */ + rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + if (ret < 0) + return ret; + + msleep(20); + + return 0; +} + +static int rsi_reset_card(struct rsi_hw *adapter) +{ + int ret; + + rsi_dbg(INFO_ZONE, "Resetting Card...\n"); + rsi_usb_master_reg_write(adapter, RSI_TA_HOLD_REG, 0xE, 4); + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf in device to finish. + */ + msleep(100); + + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, + RSI_ULP_WRITE_2, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, + RSI_ULP_WRITE_50, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + if (ret < 0) + goto fail; + + rsi_dbg(INFO_ZONE, "Reset card done\n"); + return ret; + +fail: + rsi_dbg(ERR_ZONE, "Reset card failed\n"); + return ret; +} + /** * rsi_probe() - This function is called by kernel when the driver provided * Vendor and device IDs are matched. All the initialization @@ -641,6 +726,7 @@ static void rsi_disconnect(struct usb_interface *pfunction) return; rsi_mac80211_detach(adapter); + rsi_reset_card(adapter); rsi_deinit_usb_interface(adapter); rsi_91x_deinit(adapter); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index d3e0a07604a6..465692b3c351 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -37,14 +37,14 @@ void rsi_usb_rx_thread(struct rsi_common *common) if (atomic_read(&dev->rx_thread.thread_done)) goto out; - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_read_pkt(common, 0); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); rsi_reset_event(&dev->rx_thread.event); if (adapter->rx_urb_submit(adapter)) { rsi_dbg(ERR_ZONE, diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index 44349696f5de..e579d694d13c 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -83,4 +83,5 @@ u16 rsi_get_connected_channel(struct rsi_hw *adapter); struct rsi_hw *rsi_91x_init(void); void rsi_91x_deinit(struct rsi_hw *adapter); int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len); +struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr); #endif diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 902dc540849c..7c145053da6d 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -52,6 +52,39 @@ #define FW_LOADING_SUCCESSFUL 'S' #define LOADING_INITIATED '1' +#define RSI_ULP_RESET_REG 0x161 +#define RSI_WATCH_DOG_TIMER_1 0x16c +#define RSI_WATCH_DOG_TIMER_2 0x16d +#define RSI_WATCH_DOG_DELAY_TIMER_1 0x16e +#define RSI_WATCH_DOG_DELAY_TIMER_2 0x16f +#define RSI_WATCH_DOG_TIMER_ENABLE 0x170 + +#define RSI_ULP_WRITE_0 00 +#define RSI_ULP_WRITE_2 02 +#define RSI_ULP_WRITE_50 50 + +#define RSI_RESTART_WDT BIT(11) +#define RSI_BYPASS_ULP_ON_WDT BIT(1) + +#define RSI_ULP_TIMER_ENABLE ((0xaa000) | RSI_RESTART_WDT | \ + RSI_BYPASS_ULP_ON_WDT) +#define RSI_RF_SPI_PROG_REG_BASE_ADDR 0x40080000 + +#define RSI_GSPI_CTRL_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR) +#define RSI_GSPI_CTRL_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x2) +#define RSI_GSPI_DATA_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x4) +#define RSI_GSPI_DATA_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x6) +#define RSI_GSPI_DATA_REG2 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x8) + +#define RSI_GSPI_CTRL_REG0_VALUE 0x340 + +#define RSI_GSPI_DMA_MODE BIT(13) + +#define RSI_GSPI_2_ULP BIT(12) +#define RSI_GSPI_TRIG BIT(7) +#define RSI_GSPI_READ BIT(6) +#define RSI_GSPI_RF_SPI_ACTIVE BIT(8) + /* Boot loader commands */ #define SEND_RPS_FILE '2' @@ -66,6 +99,8 @@ #define RSI_DEV_OPMODE_WIFI_ALONE 1 #define RSI_DEV_COEX_MODE_WIFI_ALONE 1 +#define BBP_INFO_40MHZ 0x6 + struct bl_header { __le32 flags; __le32 image_no; @@ -79,6 +114,37 @@ struct ta_metadata { unsigned int address; }; +struct rsi_mgmt_desc { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; + u8 xtend_desc_size; + u8 header_len; + __le16 frame_info; + u8 rate_info; + u8 reserved1; + __le16 bbp_info; + __le16 seq_ctrl; + u8 reserved2; + u8 sta_id; +} __packed; + +struct rsi_data_desc { + __le16 len_qno; + u8 cfm_frame_type; + u8 misc_flags; + u8 xtend_desc_size; + u8 header_len; + __le16 frame_info; + __le16 rate_info; + __le16 bbp_info; + __le16 mac_flags; + u8 qid_tid; + u8 sta_id; +} __packed; + int rsi_hal_device_init(struct rsi_hw *adapter); +int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb); +int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index f3985250b593..2c18dde633ea 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -21,6 +21,17 @@ #include #include +struct rsi_sta { + struct ieee80211_sta *sta; + s16 sta_id; + u16 seq_start[IEEE80211_NUM_TIDS]; + bool start_tx_aggr[IEEE80211_NUM_TIDS]; +}; + +struct rsi_hw; + +#include "rsi_ps.h" + #define ERR_ZONE BIT(0) /* For Error Msgs */ #define INFO_ZONE BIT(1) /* For General Status Msgs */ #define INIT_ZONE BIT(2) /* For Driver Init Seq Msgs */ @@ -37,10 +48,13 @@ enum RSI_FSM_STATES { FSM_COMMON_DEV_PARAMS_SENT, FSM_BOOT_PARAMS_SENT, FSM_EEPROM_READ_MAC_ADDR, + FSM_EEPROM_READ_RF_TYPE, FSM_RESET_MAC_SENT, FSM_RADIO_CAPS_SENT, FSM_BB_RF_PROG_SENT, - FSM_MAC_INIT_DONE + FSM_MAC_INIT_DONE, + + NUM_FSM_STATES }; extern u32 rsi_zone_enabled; @@ -51,18 +65,24 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define IEEE80211_ADDR_LEN 6 #define FRAME_DESC_SZ 16 #define MIN_802_11_HDR_LEN 24 +#define RSI_DEF_KEEPALIVE 90 #define DATA_QUEUE_WATER_MARK 400 #define MIN_DATA_QUEUE_WATER_MARK 300 #define MULTICAST_WATER_MARK 200 #define MAC_80211_HDR_FRAME_CONTROL 0 #define WME_NUM_AC 4 -#define NUM_SOFT_QUEUES 5 -#define MAX_HW_QUEUES 8 +#define NUM_SOFT_QUEUES 6 +#define MAX_HW_QUEUES 12 #define INVALID_QUEUE 0xff #define MAX_CONTINUOUS_VO_PKTS 8 #define MAX_CONTINUOUS_VI_PKTS 4 +/* Hardware queue info */ +#define BROADCAST_HW_Q 9 +#define MGMT_HW_Q 10 +#define BEACON_HW_Q 11 + /* Queue information */ #define RSI_COEX_Q 0x0 #define RSI_WIFI_MGMT_Q 0x4 @@ -70,6 +90,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define IEEE80211_MGMT_FRAME 0x00 #define IEEE80211_CTL_FRAME 0x04 +#define RSI_MAX_ASSOC_STAS 32 #define IEEE80211_QOS_TID 0x0f #define IEEE80211_NONQOS_TID 16 @@ -102,6 +123,7 @@ struct skb_info { u16 channel; s8 tid; s8 sta_id; + u8 internal_hdr_size; }; enum edca_queue { @@ -109,7 +131,8 @@ enum edca_queue { BE_Q, VI_Q, VO_Q, - MGMT_SOFT_Q + MGMT_SOFT_Q, + MGMT_BEACON_Q }; struct security_info { @@ -126,8 +149,8 @@ struct wmm_qinfo { }; struct transmit_q_stats { - u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1]; - u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1]; + u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 2]; + u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2]; }; struct vif_priv { @@ -155,7 +178,18 @@ struct cqm_info { u32 rssi_hyst; }; -struct rsi_hw; +struct xtended_desc { + u8 confirm_frame_type; + u8 retry_cnt; + u16 reserved; +}; + +enum rsi_dfs_regions { + RSI_REGION_FCC = 0, + RSI_REGION_ETSI, + RSI_REGION_TELEC, + RSI_REGION_WORLD +}; struct rsi_common { struct rsi_hw *priv; @@ -166,15 +200,18 @@ struct rsi_common { struct version_info fw_ver; struct rsi_thread tx_thread; - struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1]; + struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 2]; /* Mutex declaration */ struct mutex mutex; - /* Mutex used between tx/rx threads */ - struct mutex tx_rxlock; + /* Mutex used for tx thread */ + struct mutex tx_lock; + /* Mutex used for rx thread */ + struct mutex rx_lock; u8 endpoint; /* Channel/band related */ u8 band; + u8 num_supp_bands; u8 channel_width; u16 rts_threshold; @@ -216,11 +253,23 @@ struct rsi_common { u16 oper_mode; u8 lp_ps_handshake_mode; u8 ulp_ps_handshake_mode; + u8 uapsd_bitmap; u8 rf_power_val; u8 wlan_rf_power_mode; u8 obm_ant_sel_val; int tx_power; u8 ant_in_use; + + u16 beacon_interval; + u8 dtim_cnt; + + /* AP mode parameters */ + u8 beacon_enabled; + u16 beacon_cnt; + struct rsi_sta stations[RSI_MAX_ASSOC_STAS + 1]; + int num_stations; + int max_stations; + struct ieee80211_key_conf *key; }; enum host_intf { @@ -228,6 +277,19 @@ enum host_intf { RSI_HOST_INTF_USB }; +struct eepromrw_info { + u32 offset; + u32 length; + u8 write; + u16 eeprom_erase; + u8 data[480]; +}; + +struct eeprom_read { + u16 length; + u16 off_set; +}; + struct rsi_hw { struct rsi_common *priv; u8 device_model; @@ -241,6 +303,9 @@ struct rsi_hw { enum host_intf rsi_host_intf; u16 block_size; + enum ps_state ps_state; + struct rsi_ps_info ps_info; + spinlock_t ps_lock; /*To protect power save config*/ u32 usb_buffer_status_reg; #ifdef CONFIG_RSI_DEBUGFS struct rsi_debugfs *dfsentry; @@ -250,7 +315,10 @@ struct rsi_hw { struct timer_list bl_cmd_timer; bool blcmd_timer_expired; u32 flash_capacity; + struct eepromrw_info eeprom; + u32 interrupt_status; u8 dfs_region; + char country[2]; void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index dcb6db728cbd..c6e1fa669a27 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -43,11 +43,13 @@ #define WLAN_HOST_MODE_LEN 0x04 #define WLAN_FW_VERSION_LEN 0x08 #define MAGIC_WORD 0x5A +#define WLAN_EEPROM_RFTYPE_ADDR 424 /* Receive Frame Types */ #define TA_CONFIRM_TYPE 0x01 #define RX_DOT11_MGMT 0x02 #define TX_STATUS_IND 0x04 +#define BEACON_EVENT_IND 0x08 #define PROBEREQ_CONFIRM 2 #define CARD_READY_IND 0x00 @@ -61,8 +63,20 @@ #define BBP_REG_WRITE 0 #define RF_RESET_ENABLE BIT(3) #define RATE_INFO_ENABLE BIT(0) +#define MORE_DATA_PRESENT BIT(1) #define RSI_BROADCAST_PKT BIT(9) +#define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) +#define RSI_ADD_DELTA_TSF_VAP_ID BIT(3) +#define RSI_FETCH_RETRY_CNT_FRM_HST BIT(4) +#define RSI_QOS_ENABLE BIT(12) +#define RSI_REKEY_PURPOSE BIT(13) +#define RSI_ENCRYPT_PKT BIT(15) +#define RSI_SET_PS_ENABLE BIT(12) +#define RSI_CMDDESC_40MHZ BIT(4) +#define RSI_CMDDESC_UPPER_20_ENABLE BIT(5) +#define RSI_CMDDESC_LOWER_20_ENABLE BIT(6) +#define RSI_CMDDESC_FULL_40_ENABLE (BIT(5) | BIT(6)) #define UPPER_20_ENABLE (0x2 << 12) #define LOWER_20_ENABLE (0x4 << 12) #define FULL40M_ENABLE 0x6 @@ -120,6 +134,7 @@ #define RSI_RATE_MCS6 0x106 #define RSI_RATE_MCS7 0x107 #define RSI_RATE_MCS7_SG 0x307 +#define RSI_RATE_AUTO 0xffff #define BW_20MHZ 0 #define BW_40MHZ 1 @@ -143,6 +158,8 @@ #define ANTENNA_SEL_INT 0x02 /* RF_OUT_2 / Integerated */ #define ANTENNA_SEL_UFL 0x03 /* RF_OUT_1 / U.FL */ +#define ANTENNA_MASK_VALUE 0x00ff +#define ANTENNA_SEL_TYPE 1 /* Rx filter word definitions */ #define PROMISCOUS_MODE BIT(0) @@ -153,9 +170,38 @@ #define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5) #define DISALLOW_BROADCAST_DATA BIT(6) +#define RSI_MPDU_DENSITY 0x8 +#define RSI_CHAN_RADAR BIT(7) +#define RSI_BEACON_INTERVAL 200 +#define RSI_DTIM_COUNT 2 + +#define RSI_PS_DISABLE_IND BIT(15) +#define RSI_PS_ENABLE 1 +#define RSI_PS_DISABLE 0 +#define RSI_DEEP_SLEEP 1 +#define RSI_CONNECTED_SLEEP 2 +#define RSI_SLEEP_REQUEST 1 +#define RSI_WAKEUP_REQUEST 2 + +#define RSI_IEEE80211_UAPSD_QUEUES \ + (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + +#define RSI_DATA_DESC_MAC_BBP_INFO BIT(0) +#define RSI_DATA_DESC_NO_ACK_IND BIT(9) +#define RSI_DATA_DESC_QOS_EN BIT(12) +#define RSI_DATA_DESC_NORMAL_FRAME 0x00 +#define RSI_DATA_DESC_DTIM_BEACON_GATED_FRAME BIT(10) +#define RSI_DATA_DESC_BEACON_FRAME BIT(11) +#define RSI_DATA_DESC_DTIM_BEACON (BIT(10) | BIT(11)) +#define RSI_DATA_DESC_INSERT_TSF BIT(15) +#define RSI_DATA_DESC_INSERT_SEQ_NO BIT(2) + enum opmode { - STA_OPMODE = 1, - AP_OPMODE = 2 + AP_OPMODE = 0, + STA_OPMODE, }; enum vap_status { @@ -164,6 +210,10 @@ enum vap_status { VAP_UPDATE = 3 }; +enum peer_type { + PEER_TYPE_AP, + PEER_TYPE_STA, +}; extern struct ieee80211_rate rsi_rates[12]; extern const u16 rsi_mcsrates[8]; @@ -192,7 +242,7 @@ enum cmd_frame_type { AUTO_RATE_IND, BOOTUP_PARAMS_REQUEST, VAP_CAPABILITIES, - EEPROM_READ_TYPE , + EEPROM_READ, EEPROM_WRITE, GPIO_PIN_CONFIG , SET_RX_FILTER, @@ -205,6 +255,7 @@ enum cmd_frame_type { CW_MODE_REQ, PER_CMD_PKT, ANT_SEL_FRAME = 0x20, + VAP_DYNAMIC_UPDATE = 0x27, COMMON_DEV_CONFIG = 0x28, RADIO_PARAMS_UPDATE = 0x29 }; @@ -213,13 +264,52 @@ struct rsi_mac_frame { __le16 desc_word[8]; } __packed; +#define PWR_SAVE_WAKEUP_IND BIT(0) +#define TCP_CHECK_SUM_OFFLOAD BIT(1) +#define CONFIRM_REQUIRED_TO_HOST BIT(2) +#define ADD_DELTA_TSF BIT(3) +#define FETCH_RETRY_CNT_FROM_HOST_DESC BIT(4) +#define EOSP_INDICATION BIT(5) +#define REQUIRE_TSF_SYNC_CONFIRM BIT(6) +#define ENCAP_MGMT_PKT BIT(7) +#define DESC_IMMEDIATE_WAKEUP BIT(15) + +struct rsi_cmd_desc_dword0 { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; +}; + +struct rsi_cmd_desc_dword1 { + u8 xtend_desc_size; + u8 reserved1; + __le16 reserved2; +}; + +struct rsi_cmd_desc_dword2 { + __le32 pkt_info; /* Packet specific data */ +}; + +struct rsi_cmd_desc_dword3 { + __le16 token; + u8 qid_tid; + u8 sta_id; +}; + +struct rsi_cmd_desc { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + struct rsi_cmd_desc_dword2 desc_dword2; + struct rsi_cmd_desc_dword3 desc_dword3; +}; + struct rsi_boot_params { __le16 desc_word[8]; struct bootup_params bootup_params; } __packed; struct rsi_peer_notify { - __le16 desc_word[8]; + struct rsi_cmd_desc desc; u8 mac_addr[6]; __le16 command; __le16 mpdu_density; @@ -227,31 +317,116 @@ struct rsi_peer_notify { __le32 sta_flags; } __packed; +/* Aggregation params flags */ +#define RSI_AGGR_PARAMS_TID_MASK 0xf +#define RSI_AGGR_PARAMS_START BIT(4) +#define RSI_AGGR_PARAMS_RX_AGGR BIT(5) +struct rsi_aggr_params { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + __le16 seq_start; + __le16 baw_size; + __le16 token; + u8 aggr_params; + u8 peer_id; +} __packed; + +struct rsi_bb_rf_prog { + struct rsi_cmd_desc_dword0 desc_dword0; + __le16 reserved1; + u8 rf_power_mode; + u8 reserved2; + u8 endpoint; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + __le16 flags; +} __packed; + +struct rsi_chan_config { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + u8 channel_number; + u8 antenna_gain_offset_2g; + u8 antenna_gain_offset_5g; + u8 channel_width; + __le16 tx_power; + u8 region_rftype; + u8 flags; +} __packed; + struct rsi_vap_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + u8 reserved1; + u8 status; + __le16 reserved2; + u8 vif_type; + u8 channel_bw; + __le16 antenna_info; + u8 radioid_macid; + u8 vap_id; + __le16 reserved3; u8 mac_addr[6]; __le16 keep_alive_period; u8 bssid[6]; - __le16 reserved; + __le16 reserved4; __le32 flags; __le16 frag_threshold; __le16 rts_threshold; __le32 default_mgmt_rate; - __le32 default_ctrl_rate; + __le16 default_ctrl_rate; + __le16 ctrl_rate_flags; __le32 default_data_rate; __le16 beacon_interval; __le16 dtim_period; + __le16 beacon_miss_threshold; } __packed; +struct rsi_ant_sel_frame { + struct rsi_cmd_desc_dword0 desc_dword0; + u8 reserved; + u8 sub_frame_type; + __le16 ant_value; + __le32 reserved1; + __le32 reserved2; +} __packed; + +struct rsi_dynamic_s { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + struct rsi_cmd_desc_dword2 desc_dword2; + struct rsi_cmd_desc_dword3 desc_dword3; + struct framebody { + __le16 data_rate; + __le16 mgmt_rate; + __le16 keep_alive_period; + } frame_body; +} __packed; + +/* Key descriptor flags */ +#define RSI_KEY_TYPE_BROADCAST BIT(1) +#define RSI_WEP_KEY BIT(2) +#define RSI_WEP_KEY_104 BIT(3) +#define RSI_CIPHER_WPA BIT(4) +#define RSI_CIPHER_TKIP BIT(5) +#define RSI_KEY_MODE_AP BIT(7) +#define RSI_PROTECT_DATA_FRAMES BIT(13) +#define RSI_KEY_ID_MASK 0xC0 +#define RSI_KEY_ID_OFFSET 14 struct rsi_set_key { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + __le16 key_desc; + __le32 bpn; + u8 sta_id; + u8 vap_id; u8 key[4][32]; u8 tx_mic_key[8]; u8 rx_mic_key[8]; } __packed; struct rsi_auto_rate { - __le16 desc_word[8]; + struct rsi_cmd_desc desc; __le16 failure_limit; __le16 initial_boundary; __le16 max_threshold_limt; @@ -262,6 +437,19 @@ struct rsi_auto_rate { __le16 supported_rates[40]; } __packed; +#define QUIET_INFO_VALID BIT(0) +#define QUIET_ENABLE BIT(1) +struct rsi_block_unblock_data { + struct rsi_cmd_desc_dword0 desc_dword0; + u8 xtend_desc_size; + u8 host_quiet_info; + __le16 reserved; + __le16 block_q_bitmap; + __le16 unblock_q_bitmap; + __le16 token; + __le16 flush_q_bitmap; +} __packed; + struct qos_params { __le16 cont_win_min_q; __le16 cont_win_max_q; @@ -270,7 +458,14 @@ struct qos_params { } __packed; struct rsi_radio_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + u8 channel_num; + u8 rf_model; + __le16 ppe_ack_rate; + __le16 mode_11j; + u8 radio_cfg_info; + u8 radio_info; struct qos_params qos_params[MAX_HW_QUEUES]; u8 num_11n_rates; u8 num_11ac_rates; @@ -353,6 +548,34 @@ struct rsi_config_vals { u8 reserved2[16]; } __packed; +/* Packet info flags */ +#define RSI_EEPROM_HDR_SIZE_OFFSET 8 +#define RSI_EEPROM_HDR_SIZE_MASK 0x300 +#define RSI_EEPROM_LEN_OFFSET 20 +#define RSI_EEPROM_LEN_MASK 0xFFF00000 + +struct rsi_eeprom_read_frame { + __le16 len_qno; + u8 pkt_type; + u8 misc_flags; + __le32 pkt_info; + __le32 eeprom_offset; + __le16 delay_ms; + __le16 reserved3; +} __packed; + +struct rsi_request_ps { + struct rsi_cmd_desc desc; + struct ps_sleep_params ps_sleep; + u8 ps_mimic_support; + u8 ps_uapsd_acs; + u8 ps_uapsd_wakeup_period; + u8 reserved; + __le32 ps_listen_interval; + __le32 ps_dtim_interval_duration; + __le16 ps_num_dtim_intervals; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; @@ -385,16 +608,19 @@ static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno) int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg); int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, - u8 vap_status); + u8 *mac_addr, u8 vap_id, u8 vap_status); int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, - u16 ssn, u8 buf_size, u8 event); + u16 ssn, u8 buf_size, u8 event, + u8 sta_id); int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, - u8 key_type, u8 key_id, u32 cipher); + u8 key_type, u8 key_id, u32 cipher, s16 sta_id); int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); +int rsi_send_vap_dynamic_update(struct rsi_common *common); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); -void rsi_inform_bss_status(struct rsi_common *common, u8 status, - const u8 *bssid, u8 qos_enable, u16 aid); +void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, + u8 status, const u8 *addr, u8 qos_enable, u16 aid, + struct ieee80211_sta *sta, u16 sta_id); void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb); int rsi_mac80211_attach(struct rsi_common *common); void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb, diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h new file mode 100644 index 000000000000..d8475873df36 --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_ps.h @@ -0,0 +1,64 @@ +/** + * Copyright (c) 2017 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __RSI_PS_H__ +#define __RSI_PS_H__ + +#define PS_CONFIRM_INDEX 12 +#define RSI_DEF_DS_WAKEUP_PERIOD 200 +#define RSI_DEF_LISTEN_INTERVAL 200 +#define RSI_SLEEP_TYPE_LP 1 + +enum ps_state { + PS_NONE = 0, + PS_ENABLE_REQ_SENT = 1, + PS_DISABLE_REQ_SENT = 2, + PS_ENABLED = 3 +}; + +struct ps_sleep_params { + u8 enable; + u8 sleep_type; + u8 connected_sleep; + u8 reserved1; + __le16 num_bcns_per_lis_int; + __le16 wakeup_type; + __le32 sleep_duration; +} __packed; + +struct rsi_ps_info { + u8 enabled; + u8 sleep_type; + u8 tx_threshold; + u8 rx_threshold; + u8 tx_hysterisis; + u8 rx_hysterisis; + u16 monitor_interval; + u32 listen_interval; + u16 num_bcns_per_lis_int; + u32 dtim_interval_duration; + u16 num_dtims_per_sleep; + u32 deep_sleep_wakeup_period; +} __packed; + +char *str_psstate(enum ps_state state); +void rsi_enable_ps(struct rsi_hw *adapter); +void rsi_disable_ps(struct rsi_hw *adapter); +int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg); +void rsi_default_ps_params(struct rsi_hw *hw); +int rsi_send_ps_request(struct rsi_hw *adapter, bool enable); +void rsi_conf_uapsd(struct rsi_hw *adapter); +#endif diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 9fb73f68282a..95e4bed57baf 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -41,6 +41,7 @@ enum sdio_interrupt_type { #define PKT_BUFF_FULL 1 #define PKT_MGMT_BUFF_FULL 2 #define MSDU_PKT_PENDING 3 +#define RECV_NUM_BLOCKS 4 /* Interrupt Bit Related Macros */ #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 @@ -58,6 +59,7 @@ enum sdio_interrupt_type { #define SDIO_READ_START_LVL 0x000FC #define SDIO_READ_FIFO_CTL 0x000FD #define SDIO_WRITE_FIFO_CTL 0x000FE +#define SDIO_WAKEUP_REG 0x000FF #define SDIO_FUN1_INTR_CLR_REG 0x0008 #define SDIO_REG_HIGH_SPEED 0x0013 @@ -103,7 +105,7 @@ struct receive_info { struct rsi_91x_sdiodev { struct sdio_func *pfunction; - struct task_struct *in_sdio_litefi_irq; + struct task_struct *sdio_irq_task; struct receive_info rx_info; u32 next_read_delay; u32 sdio_high_speed_enable; @@ -112,6 +114,7 @@ struct rsi_91x_sdiodev { u8 prev_desc[16]; u16 tx_blk_size; u8 write_fail; + bool buff_status_updated; }; void rsi_interrupt_handler(struct rsi_hw *adapter); @@ -125,5 +128,5 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr, int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word); void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit); int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter); -int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num); +int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num); #endif diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 59513ac61fb3..891daea2d932 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -25,6 +25,7 @@ #define USB_INTERNAL_REG_1 0x25000 #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 +#define RSI_TA_HOLD_REG 0x22000844 #define USB_VENDOR_REGISTER_READ 0x15 #define USB_VENDOR_REGISTER_WRITE 0x16 @@ -32,10 +33,11 @@ #define MAX_RX_URBS 1 #define MAX_BULK_EP 8 -#define MGMT_EP 1 -#define DATA_EP 2 +#define WLAN_EP 1 +#define BT_EP 2 #define RSI_USB_BUF_SIZE 4096 +#define RSI_USB_CTRL_BUF_SIZE 0x04 struct rsi_91x_usbdev { struct rsi_thread rx_thread; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 60aaa850fbd1..c346c021b999 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6016,6 +6016,8 @@ static int wl1271_register_hw(struct wl1271 *wl) { int ret; u32 oui_addr = 0, nic_addr = 0; + struct platform_device *pdev = wl->pdev; + struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); if (wl->mac80211_registered) return 0; @@ -6040,6 +6042,27 @@ static int wl1271_register_hw(struct wl1271 *wl) nic_addr = wl->fuse_nic_addr + 1; } + if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) { + wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n"); + if (!strcmp(pdev_data->family->name, "wl18xx")) { + wl1271_warning("This default nvs file can be removed from the file system\n"); + } else { + wl1271_warning("Your device performance is not optimized.\n"); + wl1271_warning("Please use the calibrator tool to configure your device.\n"); + } + + if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { + wl1271_warning("Fuse mac address is zero. using random mac\n"); + /* Use TI oui and a random nic */ + oui_addr = WLCORE_TI_OUI_ADDRESS; + nic_addr = get_random_int(); + } else { + oui_addr = wl->fuse_oui_addr; + /* fuse has the BD_ADDR, the WLAN addresses are the next two */ + nic_addr = wl->fuse_nic_addr + 1; + } + } + wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr); ret = ieee80211_register_hw(wl->hw); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 2fb38717346f..f8a1fea64e25 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -230,6 +230,7 @@ static const struct wilink_family_data wl128x_data = { static const struct wilink_family_data wl18xx_data = { .name = "wl18xx", .cfg_name = "ti-connectivity/wl18xx-conf.bin", + .nvs_name = "ti-connectivity/wl1271-nvs.bin", }; static const struct of_device_id wlcore_sdio_of_match_table[] = { diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index fdabb9242cca..62ce54a949e9 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -92,6 +92,7 @@ static const struct wilink_family_data wl128x_data = { static const struct wilink_family_data wl18xx_data = { .name = "wl18xx", .cfg_name = "ti-connectivity/wl18xx-conf.bin", + .nvs_name = "ti-connectivity/wl1271-nvs.bin", }; struct wl12xx_spi_glue { diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index a9218e5b0efc..b72e2101488b 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -138,7 +138,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, return len; } -static struct bin_attribute fwlog_attr = { +static const struct bin_attribute fwlog_attr = { .attr = {.name = "fwlog", .mode = S_IRUSR}, .read = wl1271_sysfs_read_fwlog, }; diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 1827546ba807..95fbedc8ea34 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -40,6 +40,9 @@ /* wl12xx/wl18xx maximum transmission power (in dBm) */ #define WLCORE_MAX_TXPWR 25 +/* Texas Instruments pre assigned OUI */ +#define WLCORE_TI_OUI_ADDRESS 0x080028 + /* forward declaration */ struct wl1271_tx_hw_descr; enum wl_rx_buf_align; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index acec0d9ec422..da62220b9c01 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -965,7 +965,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, &addr4, sizeof(addr4)); if (!(addr4[0] == 0xAA && addr4[1] == 0xAA && addr4[2] == 0x03 && addr4[4] == 0x00)) { - printk(KERN_INFO "Insupported packet type!\n"); + printk(KERN_INFO "Unsupported packet type!\n"); return; } pkt_len = sig.size + 12 - 24 - 4 - 6; diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 7f586d76cf17..581e8577a221 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -25,7 +25,7 @@ #include #include "zd1201.h" -static struct usb_device_id zd1201_table[] = { +static const struct usb_device_id zd1201_table[] = { {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c index a93f657a41c7..d4e512f50945 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c @@ -61,7 +61,7 @@ static void dump_regwrite(u32 rw) switch (reg) { case 0: - PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d" + PDEBUG("reg0 CFG1 ref_sel %d hibernate %d rf_vco_reg_en %d" " if_vco_reg_en %d if_vga_en %d", bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1), bit(rw, 0)); diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 01ca1d57b3d9..c30bf118c67d 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -35,7 +35,7 @@ #include "zd_mac.h" #include "zd_usb.h" -static struct usb_device_id usb_ids[] = { +static const struct usb_device_id usb_ids[] = { /* ZD1211 */ { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e322a862ddfe..ee8ed9da00ad 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -551,8 +551,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) for (i = 0; i < MAX_PENDING_REQS; i++) { queue->pending_tx_info[i].callback_struct = (struct ubuf_info) { .callback = xenvif_zerocopy_callback, - .ctx = NULL, - .desc = i }; + { { .ctx = NULL, + .desc = i } } }; queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 7b61adb6270c..523387e71a80 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -611,7 +611,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = nskb; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 14323faf8bd9..d5612bd1cc81 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -31,6 +31,16 @@ enum log_ent_request { LOG_OLD_ENT }; +static struct device *to_dev(struct arena_info *arena) +{ + return &arena->nd_btt->dev; +} + +static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset) +{ + return offset + nd_btt->initial_offset; +} + static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, void *buf, size_t n, unsigned long flags) { @@ -38,7 +48,7 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets may be shifted from the base of the device */ - offset += arena->nd_btt->initial_offset; + offset = adjust_initial_offset(nd_btt, offset); return nvdimm_read_bytes(ndns, offset, buf, n, flags); } @@ -49,7 +59,7 @@ static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets may be shifted from the base of the device */ - offset += arena->nd_btt->initial_offset; + offset = adjust_initial_offset(nd_btt, offset); return nvdimm_write_bytes(ndns, offset, buf, n, flags); } @@ -62,8 +72,10 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super) * We rely on that to make sure rw_bytes does error clearing * correctly, so make sure that is the case. */ - WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512)); - WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512)); + dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), + "arena->infooff: %#llx is unaligned\n", arena->infooff); + dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), + "arena->info2off: %#llx is unaligned\n", arena->info2off); ret = arena_write_bytes(arena, arena->info2off, super, sizeof(struct btt_sb), 0); @@ -76,7 +88,6 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super) static int btt_info_read(struct arena_info *arena, struct btt_sb *super) { - WARN_ON(!super); return arena_read_bytes(arena, arena->infooff, super, sizeof(struct btt_sb), 0); } @@ -92,7 +103,10 @@ static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, { u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); - WARN_ON(lba >= arena->external_nlba); + if (unlikely(lba >= arena->external_nlba)) + dev_err_ratelimited(to_dev(arena), + "%s: lba %#x out of range (max: %#x)\n", + __func__, lba, arena->external_nlba); return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); } @@ -106,7 +120,7 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, * This 'mapping' is supposed to be just the LBA mapping, without * any flags set, so strip the flag bits. */ - mapping &= MAP_LBA_MASK; + mapping = ent_lba(mapping); ze = (z_flag << 1) + e_flag; switch (ze) { @@ -131,7 +145,8 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, * construed as a valid 'normal' case, but we decide not to, * to avoid confusion */ - WARN_ONCE(1, "Invalid use of Z and E flags\n"); + dev_err_ratelimited(to_dev(arena), + "Invalid use of Z and E flags\n"); return -EIO; } @@ -147,7 +162,10 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, u32 raw_mapping, postmap, ze, z_flag, e_flag; u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); - WARN_ON(lba >= arena->external_nlba); + if (unlikely(lba >= arena->external_nlba)) + dev_err_ratelimited(to_dev(arena), + "%s: lba %#x out of range (max: %#x)\n", + __func__, lba, arena->external_nlba); ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); if (ret) @@ -155,10 +173,10 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, raw_mapping = le32_to_cpu(in); - z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT; - e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT; + z_flag = ent_z_flag(raw_mapping); + e_flag = ent_e_flag(raw_mapping); ze = (z_flag << 1) + e_flag; - postmap = raw_mapping & MAP_LBA_MASK; + postmap = ent_lba(raw_mapping); /* Reuse the {z,e}_flag variables for *trim and *error */ z_flag = 0; @@ -195,7 +213,6 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, static int btt_log_read_pair(struct arena_info *arena, u32 lane, struct log_entry *ent) { - WARN_ON(!ent); return arena_read_bytes(arena, arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, 2 * LOG_ENT_SIZE, 0); @@ -299,11 +316,6 @@ static int btt_log_get_old(struct log_entry *ent) return old; } -static struct device *to_dev(struct arena_info *arena) -{ - return &arena->nd_btt->dev; -} - /* * This function copies the desired (old/new) log entry into ent if * it is not NULL. It returns the sub-slot number (0 or 1) @@ -381,7 +393,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; if (++(arena->freelist[lane].seq) == 4) arena->freelist[lane].seq = 1; - arena->freelist[lane].block = le32_to_cpu(ent->old_map); + if (ent_e_flag(ent->old_map)) + arena->freelist[lane].has_err = 1; + arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); return ret; } @@ -407,12 +421,14 @@ static int btt_map_init(struct arena_info *arena) * make sure rw_bytes does error clearing correctly, so make sure that * is the case. */ - WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512)); + dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), + "arena->mapoff: %#llx is unaligned\n", arena->mapoff); while (mapsize) { size_t size = min(mapsize, chunk_size); - WARN_ON_ONCE(size < 512); + dev_WARN_ONCE(to_dev(arena), size < 512, + "chunk size: %#zx is unaligned\n", size); ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, size, 0); if (ret) @@ -449,12 +465,14 @@ static int btt_log_init(struct arena_info *arena) * make sure rw_bytes does error clearing correctly, so make sure that * is the case. */ - WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512)); + dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), + "arena->logoff: %#llx is unaligned\n", arena->logoff); while (logsize) { size_t size = min(logsize, chunk_size); - WARN_ON_ONCE(size < 512); + dev_WARN_ONCE(to_dev(arena), size < 512, + "chunk size: %#zx is unaligned\n", size); ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, size, 0); if (ret) @@ -480,6 +498,40 @@ static int btt_log_init(struct arena_info *arena) return ret; } +static u64 to_namespace_offset(struct arena_info *arena, u64 lba) +{ + return arena->dataoff + ((u64)lba * arena->internal_lbasize); +} + +static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) +{ + int ret = 0; + + if (arena->freelist[lane].has_err) { + void *zero_page = page_address(ZERO_PAGE(0)); + u32 lba = arena->freelist[lane].block; + u64 nsoff = to_namespace_offset(arena, lba); + unsigned long len = arena->sector_size; + + mutex_lock(&arena->err_lock); + + while (len) { + unsigned long chunk = min(len, PAGE_SIZE); + + ret = arena_write_bytes(arena, nsoff, zero_page, + chunk, 0); + if (ret) + break; + len -= chunk; + nsoff += chunk; + if (len == 0) + arena->freelist[lane].has_err = 0; + } + mutex_unlock(&arena->err_lock); + } + return ret; +} + static int btt_freelist_init(struct arena_info *arena) { int old, new, ret; @@ -505,6 +557,17 @@ static int btt_freelist_init(struct arena_info *arena) arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); arena->freelist[i].block = le32_to_cpu(log_new.old_map); + /* + * FIXME: if error clearing fails during init, we want to make + * the BTT read-only + */ + if (ent_e_flag(log_new.old_map)) { + ret = arena_clear_freelist_error(arena, i); + if (ret) + dev_err_ratelimited(to_dev(arena), + "Unable to clear known errors\n"); + } + /* This implies a newly created or untouched flog entry */ if (log_new.old_map == log_new.new_map) continue; @@ -525,7 +588,6 @@ static int btt_freelist_init(struct arena_info *arena) if (ret) return ret; } - } return 0; @@ -566,6 +628,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, if (!arena) return NULL; arena->nd_btt = btt->nd_btt; + arena->sector_size = btt->sector_size; if (!size) return arena; @@ -694,6 +757,7 @@ static int discover_arenas(struct btt *btt) arena->external_lba_start = cur_nlba; parse_arena_meta(arena, super, cur_off); + mutex_init(&arena->err_lock); ret = btt_freelist_init(arena); if (ret) goto out; @@ -904,11 +968,6 @@ static void unlock_map(struct arena_info *arena, u32 premap) spin_unlock(&arena->map_locks[idx].lock); } -static u64 to_namespace_offset(struct arena_info *arena, u64 lba) -{ - return arena->dataoff + ((u64)lba * arena->internal_lbasize); -} - static int btt_data_read(struct arena_info *arena, struct page *page, unsigned int off, u32 lba, u32 len) { @@ -1032,6 +1091,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, */ while (1) { u32 new_map; + int new_t, new_e; if (t_flag) { zero_fill_data(page, off, cur_len); @@ -1050,20 +1110,29 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, */ barrier(); - ret = btt_map_read(arena, premap, &new_map, &t_flag, - &e_flag, NVDIMM_IO_ATOMIC); + ret = btt_map_read(arena, premap, &new_map, &new_t, + &new_e, NVDIMM_IO_ATOMIC); if (ret) goto out_rtt; - if (postmap == new_map) + if ((postmap == new_map) && (t_flag == new_t) && + (e_flag == new_e)) break; postmap = new_map; + t_flag = new_t; + e_flag = new_e; } ret = btt_data_read(arena, page, off, postmap, cur_len); - if (ret) + if (ret) { + int rc; + + /* Media error - set the e_flag */ + rc = btt_map_write(arena, premap, postmap, 0, 1, + NVDIMM_IO_ATOMIC); goto out_rtt; + } if (bip) { ret = btt_rw_integrity(btt, bip, arena, postmap, READ); @@ -1088,6 +1157,21 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, return ret; } +/* + * Normally, arena_{read,write}_bytes will take care of the initial offset + * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem, + * we need the final, raw namespace offset here + */ +static bool btt_is_badblock(struct btt *btt, struct arena_info *arena, + u32 postmap) +{ + u64 nsoff = adjust_initial_offset(arena->nd_btt, + to_namespace_offset(arena, postmap)); + sector_t phys_sector = nsoff >> 9; + + return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); +} + static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, sector_t sector, struct page *page, unsigned int off, unsigned int len) @@ -1100,7 +1184,9 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, while (len) { u32 cur_len; + int e_flag; + retry: lane = nd_region_acquire_lane(btt->nd_region); ret = lba_to_arena(btt, sector, &premap, &arena); @@ -1113,6 +1199,21 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, goto out_lane; } + if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) + arena->freelist[lane].has_err = 1; + + if (mutex_is_locked(&arena->err_lock) + || arena->freelist[lane].has_err) { + nd_region_release_lane(btt->nd_region, lane); + + ret = arena_clear_freelist_error(arena, lane); + if (ret) + return ret; + + /* OK to acquire a different lane/free block */ + goto retry; + } + new_postmap = arena->freelist[lane].block; /* Wait if the new block is being read from */ @@ -1138,7 +1239,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, } lock_map(arena, premap); - ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL, + ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag, NVDIMM_IO_ATOMIC); if (ret) goto out_map; @@ -1146,6 +1247,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, ret = -EIO; goto out_map; } + if (e_flag) + set_e_flag(old_postmap); log.lba = cpu_to_le32(premap); log.old_map = cpu_to_le32(old_postmap); @@ -1156,13 +1259,20 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, if (ret) goto out_map; - ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0); + ret = btt_map_write(arena, premap, new_postmap, 0, 0, + NVDIMM_IO_ATOMIC); if (ret) goto out_map; unlock_map(arena, premap); nd_region_release_lane(btt->nd_region, lane); + if (e_flag) { + ret = arena_clear_freelist_error(arena, lane); + if (ret) + return ret; + } + len -= cur_len; off += cur_len; sector += btt->sector_size >> SECTOR_SHIFT; @@ -1211,11 +1321,13 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; - BUG_ON(len > PAGE_SIZE); - /* Make sure len is in multiples of sector size. */ - /* XXX is this right? */ - BUG_ON(len < btt->sector_size); - BUG_ON(len % btt->sector_size); + if (len > PAGE_SIZE || len < btt->sector_size || + len % btt->sector_size) { + dev_err_ratelimited(&btt->nd_btt->dev, + "unaligned bio segment (len: %d)\n", len); + bio->bi_status = BLK_STS_IOERR; + break; + } err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, op_is_write(bio_op(bio)), iter.bi_sector); @@ -1241,8 +1353,10 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, { struct btt *btt = bdev->bd_disk->private_data; int rc; + unsigned int len; - rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); + len = hpage_nr_pages(page) * PAGE_SIZE; + rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector); if (rc == 0) page_endio(page, is_write, 0); @@ -1343,6 +1457,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, { int ret; struct btt *btt; + struct nd_namespace_io *nsio; struct device *dev = &nd_btt->dev; btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL); @@ -1356,6 +1471,8 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, INIT_LIST_HEAD(&btt->arena_list); mutex_init(&btt->init_lock); btt->nd_region = nd_region; + nsio = to_nd_namespace_io(&nd_btt->ndns->dev); + btt->phys_bb = &nsio->bb; ret = discover_arenas(btt); if (ret) { @@ -1429,6 +1546,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) } btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL); + if (!btt_sb) + return -ENOMEM; /* * If this returns < 0, that is ok as it just means there wasn't diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 888e862907a0..578c2057524d 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -15,6 +15,7 @@ #ifndef _LINUX_BTT_H #define _LINUX_BTT_H +#include #include #define BTT_SIG_LEN 16 @@ -38,6 +39,11 @@ #define IB_FLAG_ERROR 0x00000001 #define IB_FLAG_ERROR_MASK 0x00000001 +#define ent_lba(ent) (ent & MAP_LBA_MASK) +#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) +#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) +#define set_e_flag(ent) (ent |= MAP_ERR_MASK) + enum btt_init_state { INIT_UNCHECKED = 0, INIT_NOTFOUND, @@ -78,6 +84,7 @@ struct free_entry { u32 block; u8 sub; u8 seq; + u8 has_err; }; struct aligned_lock { @@ -104,6 +111,7 @@ struct aligned_lock { * handle incoming writes. * @version_major: Metadata layout version major. * @version_minor: Metadata layout version minor. + * @sector_size: The Linux sector size - 512 or 4096 * @nextoff: Offset in bytes to the start of the next arena. * @infooff: Offset in bytes to the info block of this arena. * @dataoff: Offset in bytes to the data area of this arena. @@ -131,6 +139,7 @@ struct arena_info { u32 nfree; u16 version_major; u16 version_minor; + u32 sector_size; /* Byte offsets to the different on-media structures */ u64 nextoff; u64 infooff; @@ -147,6 +156,7 @@ struct arena_info { struct dentry *debugfs_dir; /* Arena flags */ u32 flags; + struct mutex err_lock; }; /** @@ -181,6 +191,7 @@ struct btt { struct mutex init_lock; int init_state; int num_arenas; + struct badblocks *phys_bb; }; bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super); diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 3e359d282f8e..d58925295aa7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -61,7 +61,7 @@ static ssize_t sector_size_show(struct device *dev, { struct nd_btt *nd_btt = to_nd_btt(dev); - return nd_sector_size_show(nd_btt->lbasize, btt_lbasize_supported, buf); + return nd_size_select_show(nd_btt->lbasize, btt_lbasize_supported, buf); } static ssize_t sector_size_store(struct device *dev, @@ -72,7 +72,7 @@ static ssize_t sector_size_store(struct device *dev, device_lock(dev); nvdimm_bus_lock(dev); - rc = nd_sector_size_store(dev, buf, &nd_btt->lbasize, + rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 937fafa1886a..baf283986a7e 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -11,6 +11,7 @@ * General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -234,6 +235,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, struct nd_cmd_clear_error clear_err; struct nd_cmd_ars_cap ars_cap; u32 clear_err_unit, mask; + unsigned int noio_flag; int cmd_rc, rc; if (!nvdimm_bus) @@ -250,8 +252,10 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, memset(&ars_cap, 0, sizeof(ars_cap)); ars_cap.address = phys; ars_cap.length = len; + noio_flag = memalloc_noio_save(); rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap, sizeof(ars_cap), &cmd_rc); + memalloc_noio_restore(noio_flag); if (rc < 0) return rc; if (cmd_rc < 0) @@ -266,8 +270,10 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, memset(&clear_err, 0, sizeof(clear_err)); clear_err.address = phys; clear_err.length = len; + noio_flag = memalloc_noio_save(); rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err, sizeof(clear_err), &cmd_rc); + memalloc_noio_restore(noio_flag); if (rc < 0) return rc; if (cmd_rc < 0) @@ -905,19 +911,20 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, int read_only, unsigned int ioctl_cmd, unsigned long arg) { struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; - size_t buf_len = 0, in_len = 0, out_len = 0; static char out_env[ND_CMD_MAX_ENVELOPE]; static char in_env[ND_CMD_MAX_ENVELOPE]; const struct nd_cmd_desc *desc = NULL; unsigned int cmd = _IOC_NR(ioctl_cmd); - unsigned int func = cmd; - void __user *p = (void __user *) arg; struct device *dev = &nvdimm_bus->dev; - struct nd_cmd_pkg pkg; + void __user *p = (void __user *) arg; const char *cmd_name, *dimm_name; + u32 in_len = 0, out_len = 0; + unsigned int func = cmd; unsigned long cmd_mask; - void *buf; + struct nd_cmd_pkg pkg; int rc, i, cmd_rc; + u64 buf_len = 0; + void *buf; if (nvdimm) { desc = nd_cmd_dimm_desc(cmd); @@ -977,13 +984,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (cmd == ND_CMD_CALL) { func = pkg.nd_command; - dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n", + dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n", __func__, dimm_name, pkg.nd_command, in_len, out_len, buf_len); - - for (i = 0; i < ARRAY_SIZE(pkg.nd_reserved2); i++) - if (pkg.nd_reserved2[i]) - return -EINVAL; } /* process an output envelope */ @@ -1007,9 +1010,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, out_len += out_size; } - buf_len = out_len + in_len; + buf_len = (u64) out_len + (u64) in_len; if (buf_len > ND_IOCTL_MAX_BUFLEN) { - dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__, + dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__, dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); return -EINVAL; diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 47770460f3d3..b2fc29b8279b 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -280,18 +280,11 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, } if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { - /* - * FIXME: nsio_rw_bytes() may be called from atomic - * context in the btt case and the ACPI DSM path for - * clearing the error takes sleeping locks and allocates - * memory. An explicit error clearing path, and support - * for tracking badblocks in BTT metadata is needed to - * work around this collision. - */ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) && !(flags & NVDIMM_IO_ATOMIC)) { long cleared; + might_sleep(); cleared = nvdimm_clear_poison(&ndns->dev, nsio->res.start + offset, size); if (cleared < size) diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 75bc08c6838c..bb71f0cf8f5d 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -277,14 +277,14 @@ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, return 0; } -ssize_t nd_sector_size_show(unsigned long current_lbasize, +ssize_t nd_size_select_show(unsigned long current_size, const unsigned long *supported, char *buf) { ssize_t len = 0; int i; for (i = 0; supported[i]; i++) - if (current_lbasize == supported[i]) + if (current_size == supported[i]) len += sprintf(buf + len, "[%ld] ", supported[i]); else len += sprintf(buf + len, "%ld ", supported[i]); @@ -292,8 +292,8 @@ ssize_t nd_sector_size_show(unsigned long current_lbasize, return len; } -ssize_t nd_sector_size_store(struct device *dev, const char *buf, - unsigned long *current_lbasize, const unsigned long *supported) +ssize_t nd_size_select_store(struct device *dev, const char *buf, + unsigned long *current_size, const unsigned long *supported) { unsigned long lbasize; int rc, i; @@ -310,7 +310,7 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf, break; if (supported[i]) { - *current_lbasize = lbasize; + *current_size = lbasize; return 0; } else { return -EINVAL; diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 87796f840777..9c5f108910e3 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -45,12 +45,14 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd) return ndd->nslabel_size; } +int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) +{ + return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1); +} + size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) { - u32 index_span; - - if (ndd->nsindex_size) - return ndd->nsindex_size; + u32 nslot, space, size; /* * The minimum index space is 512 bytes, with that amount of @@ -60,16 +62,16 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) * starts to waste space at larger config_sizes, but it's * unlikely we'll ever see anything but 128K. */ - index_span = ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1); - index_span /= NSINDEX_ALIGN * 2; - ndd->nsindex_size = index_span * NSINDEX_ALIGN; + nslot = nvdimm_num_label_slots(ndd); + space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); + size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), + NSINDEX_ALIGN) * 2; + if (size <= space) + return size / 2; - return ndd->nsindex_size; -} - -int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) -{ - return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1); + dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", + ndd->nsarea.config_size, sizeof_namespace_label(ndd)); + return 0; } static int __nd_label_validate(struct nvdimm_drvdata *ndd) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 5f1c6756e57c..3e4d1e7998da 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1313,14 +1313,14 @@ static ssize_t sector_size_show(struct device *dev, if (is_namespace_blk(dev)) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - return nd_sector_size_show(nsblk->lbasize, + return nd_size_select_show(nsblk->lbasize, blk_lbasize_supported, buf); } if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); - return nd_sector_size_show(nspm->lbasize, + return nd_size_select_show(nspm->lbasize, pmem_lbasize_supported, buf); } return -ENXIO; @@ -1352,7 +1352,7 @@ static ssize_t sector_size_store(struct device *dev, if (to_ndns(dev)->claim) rc = -EBUSY; if (rc >= 0) - rc = nd_sector_size_store(dev, buf, lbasize, supported); + rc = nd_size_select_store(dev, buf, lbasize, supported); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__, @@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_namespace_index *nsindex; + /* + * If any of the DIMMs do not support labels the only + * possible BTT format is v1. + */ + if (!ndd) { + loop_bitmask = 0; + break; + } + nsindex = to_namespace_index(ndd, ndd->ns_current); if (nsindex == NULL) loop_bitmask |= 1; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index e1b5715bd91f..9c758a91372b 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -42,7 +42,7 @@ struct nd_poison { struct nvdimm_drvdata { struct device *dev; - int nsindex_size, nslabel_size; + int nslabel_size; struct nd_cmd_get_config_size nsarea; void *data; int ns_current, ns_next; @@ -134,6 +134,7 @@ struct nd_mapping { struct nvdimm *nvdimm; u64 start; u64 size; + int position; struct list_head labels; struct mutex lock; /* @@ -233,10 +234,10 @@ void nd_device_unregister(struct device *dev, enum nd_async_mode mode); void nd_device_notify(struct device *dev, enum nvdimm_event event); int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, size_t len); -ssize_t nd_sector_size_show(unsigned long current_lbasize, +ssize_t nd_size_select_show(unsigned long current_size, const unsigned long *supported, char *buf); -ssize_t nd_sector_size_store(struct device *dev, const char *buf, - unsigned long *current_lbasize, const unsigned long *supported); +ssize_t nd_size_select_store(struct device *dev, const char *buf, + unsigned long *current_size, const unsigned long *supported); int __init nvdimm_init(void); int __init nd_region_init(void); int __init nd_label_init(void); @@ -285,6 +286,13 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region) struct nd_pfn *to_nd_pfn(struct device *dev); #if IS_ENABLED(CONFIG_NVDIMM_PFN) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE +#else +#define PFN_DEFAULT_ALIGNMENT PAGE_SIZE +#endif + int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns); bool is_nd_pfn(struct device *dev); struct device *nd_pfn_create(struct nd_region *nd_region); @@ -390,21 +398,22 @@ int nd_region_activate(struct nd_region *nd_region); void __nd_iostat_start(struct bio *bio, unsigned long *start); static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) { - struct gendisk *disk = bio->bi_bdev->bd_disk; + struct gendisk *disk = bio->bi_disk; if (!blk_queue_io_stat(disk->queue)) return false; *start = jiffies; - generic_start_io_acct(bio_data_dir(bio), + generic_start_io_acct(disk->queue, bio_data_dir(bio), bio_sectors(bio), &disk->part0); return true; } static inline void nd_iostat_end(struct bio *bio, unsigned long start) { - struct gendisk *disk = bio->bi_bdev->bd_disk; + struct gendisk *disk = bio->bi_disk; - generic_end_io_acct(bio_data_dir(bio), &disk->part0, start); + generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, + start); } static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 5fcb6f5b22a2..9576c444f0ab 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -111,24 +111,27 @@ static ssize_t align_show(struct device *dev, return sprintf(buf, "%ld\n", nd_pfn->align); } -static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf) +static const unsigned long *nd_pfn_supported_alignments(void) { - unsigned long val; - int rc; + /* + * This needs to be a non-static variable because the *_SIZE + * macros aren't always constants. + */ + const unsigned long supported_alignments[] = { + PAGE_SIZE, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + HPAGE_PMD_SIZE, +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + HPAGE_PUD_SIZE, +#endif +#endif + 0, + }; + static unsigned long data[ARRAY_SIZE(supported_alignments)]; - rc = kstrtoul(buf, 0, &val); - if (rc) - return rc; + memcpy(data, supported_alignments, sizeof(data)); - if (!is_power_of_2(val) || val < PAGE_SIZE || val > SZ_1G) - return -EINVAL; - - if (nd_pfn->dev.driver) - return -EBUSY; - else - nd_pfn->align = val; - - return 0; + return data; } static ssize_t align_store(struct device *dev, @@ -139,7 +142,8 @@ static ssize_t align_store(struct device *dev, device_lock(dev); nvdimm_bus_lock(dev); - rc = __align_store(nd_pfn, buf); + rc = nd_size_select_store(dev, buf, &nd_pfn->align, + nd_pfn_supported_alignments()); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); @@ -260,6 +264,13 @@ static ssize_t size_show(struct device *dev, } static DEVICE_ATTR_RO(size); +static ssize_t supported_alignments_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return nd_size_select_show(0, nd_pfn_supported_alignments(), buf); +} +static DEVICE_ATTR_RO(supported_alignments); + static struct attribute *nd_pfn_attributes[] = { &dev_attr_mode.attr, &dev_attr_namespace.attr, @@ -267,6 +278,7 @@ static struct attribute *nd_pfn_attributes[] = { &dev_attr_align.attr, &dev_attr_resource.attr, &dev_attr_size.attr, + &dev_attr_supported_alignments.attr, NULL, }; @@ -290,7 +302,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, return NULL; nd_pfn->mode = PFN_MODE_NONE; - nd_pfn->align = HPAGE_SIZE; + nd_pfn->align = PFN_DEFAULT_ALIGNMENT; dev = &nd_pfn->dev; device_initialize(&nd_pfn->dev); if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { @@ -638,11 +650,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) / PAGE_SIZE); if (nd_pfn->mode == PFN_MODE_PMEM) { /* - * vmemmap_populate_hugepages() allocates the memmap array in - * HPAGE_SIZE chunks. + * The altmap should be padded out to the block size used + * when populating the vmemmap. This *should* be equal to + * PMD_SIZE for most architectures. */ offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, - max(nd_pfn->align, HPAGE_SIZE)) - start; + max(nd_pfn->align, PMD_SIZE)) - start; } else if (nd_pfn->mode == PFN_MODE_RAM) offset = ALIGN(start + SZ_8K + dax_label_reserve, nd_pfn->align) - start; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index f7099adaabc0..39dfd7affa31 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -80,22 +80,40 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem, static void write_pmem(void *pmem_addr, struct page *page, unsigned int off, unsigned int len) { - void *mem = kmap_atomic(page); + unsigned int chunk; + void *mem; - memcpy_flushcache(pmem_addr, mem + off, len); - kunmap_atomic(mem); + while (len) { + mem = kmap_atomic(page); + chunk = min_t(unsigned int, len, PAGE_SIZE); + memcpy_flushcache(pmem_addr, mem + off, chunk); + kunmap_atomic(mem); + len -= chunk; + off = 0; + page++; + pmem_addr += PAGE_SIZE; + } } static blk_status_t read_pmem(struct page *page, unsigned int off, void *pmem_addr, unsigned int len) { + unsigned int chunk; int rc; - void *mem = kmap_atomic(page); + void *mem; - rc = memcpy_mcsafe(mem + off, pmem_addr, len); - kunmap_atomic(mem); - if (rc) - return BLK_STS_IOERR; + while (len) { + mem = kmap_atomic(page); + chunk = min_t(unsigned int, len, PAGE_SIZE); + rc = memcpy_mcsafe(mem + off, pmem_addr, chunk); + kunmap_atomic(mem); + if (rc) + return BLK_STS_IOERR; + len -= chunk; + off = 0; + page++; + pmem_addr += PAGE_SIZE; + } return BLK_STS_OK; } @@ -188,7 +206,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct pmem_device *pmem = bdev->bd_queue->queuedata; blk_status_t rc; - rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); + rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE, + 0, is_write, sector); /* * The ->rw_page interface is subtle and tricky. The core @@ -243,16 +262,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return copy_from_iter_flushcache(addr, bytes, i); } -static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t size) -{ - arch_wb_cache_pmem(addr, size); -} - static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, .copy_from_iter = pmem_copy_from_iter, - .flush = pmem_dax_flush, }; static const struct attribute_group *pmem_attribute_groups[] = { diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 5434321cad67..c5917f040fa7 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -5,20 +5,6 @@ #include #include -#ifdef CONFIG_ARCH_HAS_PMEM_API -#define ARCH_MEMREMAP_PMEM MEMREMAP_WB -void arch_wb_cache_pmem(void *addr, size_t size); -void arch_invalidate_pmem(void *addr, size_t size); -#else -#define ARCH_MEMREMAP_PMEM MEMREMAP_WT -static inline void arch_wb_cache_pmem(void *addr, size_t size) -{ -} -static inline void arch_invalidate_pmem(void *addr, size_t size) -{ -} -#endif - /* this definition is in it's own header for tools/testing/nvdimm to consume */ struct pmem_device { /* One contiguous memory region per device */ diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 5954cfbea3fc..829d760f651c 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -723,8 +723,9 @@ static ssize_t mappingN(struct device *dev, char *buf, int n) nd_mapping = &nd_region->mapping[n]; nvdimm = nd_mapping->nvdimm; - return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), - nd_mapping->start, nd_mapping->size); + return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev), + nd_mapping->start, nd_mapping->size, + nd_mapping->position); } #define REGION_MAPPING(idx) \ @@ -965,6 +966,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, nd_region->mapping[i].nvdimm = nvdimm; nd_region->mapping[i].start = mapping->start; nd_region->mapping[i].size = mapping->size; + nd_region->mapping[i].position = mapping->position; INIT_LIST_HEAD(&nd_region->mapping[i].labels); mutex_init(&nd_region->mapping[i].lock); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 37046ac2c441..5a14cc7f28ee 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -76,6 +76,11 @@ static DEFINE_SPINLOCK(dev_list_lock); static struct class *nvme_class; +static __le32 nvme_get_log_dw10(u8 lid, size_t size) +{ + return cpu_to_le32((((size / 4) - 1) << 16) | lid); +} + int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) @@ -108,7 +113,16 @@ static blk_status_t nvme_error_status(struct request *req) case NVME_SC_WRITE_FAULT: case NVME_SC_READ_ERROR: case NVME_SC_UNWRITTEN_BLOCK: + case NVME_SC_ACCESS_DENIED: + case NVME_SC_READ_ONLY: return BLK_STS_MEDIUM; + case NVME_SC_GUARD_CHECK: + case NVME_SC_APPTAG_CHECK: + case NVME_SC_REFTAG_CHECK: + case NVME_SC_INVALID_PI: + return BLK_STS_PROTECTION; + case NVME_SC_RESERVATION_CONFLICT: + return BLK_STS_NEXUS; default: return BLK_STS_IOERR; } @@ -120,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req) return false; if (nvme_req(req)->status & NVME_SC_DNR) return false; - if (jiffies - req->start_time >= req->timeout) - return false; if (nvme_req(req)->retries >= nvme_max_retries) return false; return true; @@ -162,9 +174,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state) { enum nvme_ctrl_state old_state; + unsigned long flags; bool changed = false; - spin_lock_irq(&ctrl->lock); + spin_lock_irqsave(&ctrl->lock, flags); old_state = ctrl->state; switch (new_state) { @@ -225,7 +238,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, if (changed) ctrl->state = new_state; - spin_unlock_irq(&ctrl->lock); + spin_unlock_irqrestore(&ctrl->lock, flags); return changed; } @@ -307,7 +320,7 @@ static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) memset(&c, 0, sizeof(c)); c.directive.opcode = nvme_admin_directive_send; - c.directive.nsid = cpu_to_le32(0xffffffff); + c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; c.directive.dtype = NVME_DIR_IDENTIFY; c.directive.tdtype = NVME_DIR_STREAMS; @@ -357,7 +370,7 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl) if (ret) return ret; - ret = nvme_get_stream_params(ctrl, &s, 0xffffffff); + ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); if (ret) return ret; @@ -585,10 +598,44 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); -int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, - void __user *ubuffer, unsigned bufflen, - void __user *meta_buffer, unsigned meta_len, u32 meta_seed, - u32 *result, unsigned timeout) +static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, + unsigned len, u32 seed, bool write) +{ + struct bio_integrity_payload *bip; + int ret = -ENOMEM; + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + goto out; + + ret = -EFAULT; + if (write && copy_from_user(buf, ubuf, len)) + goto out_free_meta; + + bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); + if (IS_ERR(bip)) { + ret = PTR_ERR(bip); + goto out_free_meta; + } + + bip->bip_iter.bi_size = len; + bip->bip_iter.bi_sector = seed; + ret = bio_integrity_add_page(bio, virt_to_page(buf), len, + offset_in_page(buf)); + if (ret == len) + return buf; + ret = -ENOMEM; +out_free_meta: + kfree(buf); +out: + return ERR_PTR(ret); +} + +static int nvme_submit_user_cmd(struct request_queue *q, + struct nvme_command *cmd, void __user *ubuffer, + unsigned bufflen, void __user *meta_buffer, unsigned meta_len, + u32 meta_seed, u32 *result, unsigned timeout) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -610,50 +657,17 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, if (ret) goto out; bio = req->bio; - - if (!disk) - goto submit; - bio->bi_bdev = bdget_disk(disk, 0); - if (!bio->bi_bdev) { - ret = -ENODEV; - goto out_unmap; - } - - if (meta_buffer && meta_len) { - struct bio_integrity_payload *bip; - - meta = kmalloc(meta_len, GFP_KERNEL); - if (!meta) { - ret = -ENOMEM; + bio->bi_disk = disk; + if (disk && meta_buffer && meta_len) { + meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, + meta_seed, write); + if (IS_ERR(meta)) { + ret = PTR_ERR(meta); goto out_unmap; } - - if (write) { - if (copy_from_user(meta, meta_buffer, - meta_len)) { - ret = -EFAULT; - goto out_free_meta; - } - } - - bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); - if (IS_ERR(bip)) { - ret = PTR_ERR(bip); - goto out_free_meta; - } - - bip->bip_iter.bi_size = meta_len; - bip->bip_iter.bi_sector = meta_seed; - - ret = bio_integrity_add_page(bio, virt_to_page(meta), - meta_len, offset_in_page(meta)); - if (ret != meta_len) { - ret = -ENOMEM; - goto out_free_meta; - } } } - submit: + blk_execute_rq(req->q, disk, req, 0); if (nvme_req(req)->flags & NVME_REQ_CANCELLED) ret = -EINTR; @@ -665,27 +679,15 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, if (copy_to_user(meta_buffer, meta, meta_len)) ret = -EFAULT; } - out_free_meta: kfree(meta); out_unmap: - if (bio) { - if (disk && bio->bi_bdev) - bdput(bio->bi_bdev); + if (bio) blk_rq_unmap_user(bio); - } out: blk_mq_free_request(req); return ret; } -int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, - void __user *ubuffer, unsigned bufflen, u32 *result, - unsigned timeout) -{ - return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, - result, timeout); -} - static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; @@ -775,7 +777,8 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) return error; } -static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid) +static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, + u8 *eui64, u8 *nguid, uuid_t *uuid) { struct nvme_command c = { }; int status; @@ -791,7 +794,7 @@ static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid) if (!data) return -ENOMEM; - status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data, + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, NVME_IDENTIFY_DATA_SIZE); if (status) goto free_data; @@ -805,33 +808,33 @@ static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid) switch (cur->nidt) { case NVME_NIDT_EUI64: if (cur->nidl != NVME_NIDT_EUI64_LEN) { - dev_warn(ns->ctrl->device, + dev_warn(ctrl->device, "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", cur->nidl); goto free_data; } len = NVME_NIDT_EUI64_LEN; - memcpy(ns->eui, data + pos + sizeof(*cur), len); + memcpy(eui64, data + pos + sizeof(*cur), len); break; case NVME_NIDT_NGUID: if (cur->nidl != NVME_NIDT_NGUID_LEN) { - dev_warn(ns->ctrl->device, + dev_warn(ctrl->device, "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", cur->nidl); goto free_data; } len = NVME_NIDT_NGUID_LEN; - memcpy(ns->nguid, data + pos + sizeof(*cur), len); + memcpy(nguid, data + pos + sizeof(*cur), len); break; case NVME_NIDT_UUID: if (cur->nidl != NVME_NIDT_UUID_LEN) { - dev_warn(ns->ctrl->device, + dev_warn(ctrl->device, "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", cur->nidl); goto free_data; } len = NVME_NIDT_UUID_LEN; - uuid_copy(&ns->uuid, data + pos + sizeof(*cur)); + uuid_copy(uuid, data + pos + sizeof(*cur)); break; default: /* Skip unnkown types */ @@ -856,9 +859,10 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); } -static int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, - struct nvme_id_ns **id) +static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, + unsigned nsid) { + struct nvme_id_ns *id; struct nvme_command c = { }; int error; @@ -867,15 +871,18 @@ static int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS; - *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); - if (!*id) - return -ENOMEM; + id = kmalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return NULL; - error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, - sizeof(struct nvme_id_ns)); - if (error) - kfree(*id); - return error; + error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + if (error) { + dev_warn(ctrl->device, "Identify namespace failed\n"); + kfree(id); + return NULL; + } + + return id; } static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, @@ -970,7 +977,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.apptag = cpu_to_le16(io.apptag); c.rw.appmask = cpu_to_le16(io.appmask); - return __nvme_submit_user_cmd(ns->queue, &c, + return nvme_submit_user_cmd(ns->queue, &c, (void __user *)(uintptr_t)io.addr, length, metadata, meta_len, io.slba, NULL, 0); } @@ -1008,7 +1015,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - &cmd.result, timeout); + (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, + 0, &cmd.result, timeout); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) return -EFAULT; @@ -1166,32 +1174,21 @@ static void nvme_config_discard(struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX); } -static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) +static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, + struct nvme_id_ns *id, u8 *eui64, u8 *nguid, uuid_t *uuid) { - if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { - dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); - return -ENODEV; - } - - if ((*id)->ncap == 0) { - kfree(*id); - return -ENODEV; - } - - if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) - memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); - if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) - memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid)); - if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) { + if (ctrl->vs >= NVME_VS(1, 1, 0)) + memcpy(eui64, id->eui64, sizeof(id->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0)) + memcpy(nguid, id->nguid, sizeof(id->nguid)); + if (ctrl->vs >= NVME_VS(1, 3, 0)) { /* Don't treat error as fatal we potentially * already have a NGUID or EUI-64 */ - if (nvme_identify_ns_descs(ns, ns->ns_id)) - dev_warn(ns->ctrl->device, + if (nvme_identify_ns_descs(ctrl, nsid, eui64, nguid, uuid)) + dev_warn(ctrl->device, "%s: Identify Descriptors failed\n", __func__); } - - return 0; } static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) @@ -1232,22 +1229,38 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) static int nvme_revalidate_disk(struct gendisk *disk) { struct nvme_ns *ns = disk->private_data; - struct nvme_id_ns *id = NULL; - int ret; + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_id_ns *id; + u8 eui64[8] = { 0 }, nguid[16] = { 0 }; + uuid_t uuid = uuid_null; + int ret = 0; if (test_bit(NVME_NS_DEAD, &ns->flags)) { set_capacity(disk, 0); return -ENODEV; } - ret = nvme_revalidate_ns(ns, &id); - if (ret) - return ret; + id = nvme_identify_ns(ctrl, ns->ns_id); + if (!id) + return -ENODEV; - __nvme_revalidate_disk(disk, id); + if (id->ncap == 0) { + ret = -ENODEV; + goto out; + } + + nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); + if (!uuid_equal(&ns->uuid, &uuid) || + memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || + memcmp(&ns->eui, &eui64, sizeof(ns->eui))) { + dev_err(ctrl->device, + "identifiers changed for nsid %d\n", ns->ns_id); + ret = -ENODEV; + } + +out: kfree(id); - - return 0; + return ret; } static char nvme_pr_type(enum pr_type type) @@ -1447,7 +1460,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) ctrl->ctrl_config = NVME_CC_CSS_NVM; ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; - ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; + ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; ctrl->ctrl_config |= NVME_CC_ENABLE; @@ -1460,7 +1473,7 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) { - unsigned long timeout = jiffies + (shutdown_timeout * HZ); + unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); u32 csts; int ret; @@ -1509,6 +1522,23 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_write_cache(q, vwc, vwc); } +static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) +{ + __le64 ts; + int ret; + + if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) + return 0; + + ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); + ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), + NULL); + if (ret) + dev_warn_once(ctrl->device, + "could not set timestamp (%d)\n", ret); + return ret; +} + static int nvme_configure_apst(struct nvme_ctrl *ctrl) { /* @@ -1811,6 +1841,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); + if (id->rtd3e) { + /* us -> s */ + u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; + + ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, + shutdown_timeout, 60); + + if (ctrl->shutdown_timeout != shutdown_timeout) + dev_warn(ctrl->device, + "Shutdown timeout set to %u seconds\n", + ctrl->shutdown_timeout); + } else + ctrl->shutdown_timeout = shutdown_timeout; + ctrl->npss = id->npss; ctrl->apsta = id->apsta; prev_apst_enabled = ctrl->apst_enabled; @@ -1851,6 +1895,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); + ctrl->hmminds = le32_to_cpu(id->hmminds); + ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } kfree(id); @@ -1863,6 +1909,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ret = nvme_configure_apst(ctrl); if (ret < 0) return ret; + + ret = nvme_configure_timestamp(ctrl); + if (ret < 0) + return ret; ret = nvme_configure_directives(ctrl); if (ret < 0) @@ -2086,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, struct nvme_ns *ns = nvme_get_ns_from_dev(dev); if (a == &dev_attr_uuid.attr) { - if (uuid_is_null(&ns->uuid) || + if (uuid_is_null(&ns->uuid) && !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) return 0; } @@ -2318,13 +2368,20 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance); - if (nvme_revalidate_ns(ns, &id)) + id = nvme_identify_ns(ctrl, nsid); + if (!id) goto out_free_queue; - if (nvme_nvm_ns_supported(ns, id) && - nvme_nvm_register(ns, disk_name, node)) { - dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__); + if (id->ncap == 0) goto out_free_id; + + nvme_report_ns_ids(ctrl, ns->ns_id, id, ns->eui, ns->nguid, &ns->uuid); + + if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { + if (nvme_nvm_register(ns, disk_name, node)) { + dev_warn(ctrl->device, "LightNVM init failure\n"); + goto out_free_id; + } } disk = alloc_disk_node(0, node); @@ -2531,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); spin_lock_irq(&ctrl->lock); - while (ctrl->event_limit > 0) { + while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) { int aer_idx = --ctrl->event_limit; spin_unlock_irq(&ctrl->lock); @@ -2541,6 +2598,71 @@ static void nvme_async_event_work(struct work_struct *work) spin_unlock_irq(&ctrl->lock); } +static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) +{ + + u32 csts; + + if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) + return false; + + if (csts == ~0) + return false; + + return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); +} + +static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) +{ + struct nvme_command c = { }; + struct nvme_fw_slot_info_log *log; + + log = kmalloc(sizeof(*log), GFP_KERNEL); + if (!log) + return; + + c.common.opcode = nvme_admin_get_log_page; + c.common.nsid = cpu_to_le32(NVME_NSID_ALL); + c.common.cdw10[0] = nvme_get_log_dw10(NVME_LOG_FW_SLOT, sizeof(*log)); + + if (!nvme_submit_sync_cmd(ctrl->admin_q, &c, log, sizeof(*log))) + dev_warn(ctrl->device, + "Get FW SLOT INFO log error\n"); + kfree(log); +} + +static void nvme_fw_act_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(work, + struct nvme_ctrl, fw_act_work); + unsigned long fw_act_timeout; + + if (ctrl->mtfa) + fw_act_timeout = jiffies + + msecs_to_jiffies(ctrl->mtfa * 100); + else + fw_act_timeout = jiffies + + msecs_to_jiffies(admin_timeout * 1000); + + nvme_stop_queues(ctrl); + while (nvme_ctrl_pp_status(ctrl)) { + if (time_after(jiffies, fw_act_timeout)) { + dev_warn(ctrl->device, + "Fw activation timeout, reset controller\n"); + nvme_reset_ctrl(ctrl); + break; + } + msleep(100); + } + + if (ctrl->state != NVME_CTRL_LIVE) + return; + + nvme_start_queues(ctrl); + /* read FW slot informationi to clear the AER*/ + nvme_get_fw_slot_info(ctrl); +} + void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, union nvme_result *res) { @@ -2553,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, /*FALLTHRU*/ case NVME_SC_ABORT_REQ: ++ctrl->event_limit; - queue_work(nvme_wq, &ctrl->async_event_work); + if (ctrl->state == NVME_CTRL_LIVE) + queue_work(nvme_wq, &ctrl->async_event_work); break; default: break; @@ -2567,6 +2690,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, dev_info(ctrl->device, "rescanning\n"); nvme_queue_scan(ctrl); break; + case NVME_AER_NOTICE_FW_ACT_STARTING: + queue_work(nvme_wq, &ctrl->fw_act_work); + break; default: dev_warn(ctrl->device, "async event result %08x\n", result); } @@ -2614,6 +2740,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); flush_work(&ctrl->scan_work); + cancel_work_sync(&ctrl->fw_act_work); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); @@ -2677,6 +2804,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->quirks = quirks; INIT_WORK(&ctrl->scan_work, nvme_scan_work); INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); + INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); ret = nvme_set_instance(ctrl); if (ret) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5f5cd306f76d..555c976cc2ee 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -22,7 +22,7 @@ #include "fabrics.h" static LIST_HEAD(nvmf_transports); -static DEFINE_MUTEX(nvmf_transports_mutex); +static DECLARE_RWSEM(nvmf_transports_rwsem); static LIST_HEAD(nvmf_hosts); static DEFINE_MUTEX(nvmf_hosts_mutex); @@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void) kref_init(&host->ref); snprintf(host->nqn, NVMF_NQN_SIZE, - "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id); + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); mutex_lock(&nvmf_hosts_mutex); list_add_tail(&host->list, &nvmf_hosts); @@ -495,9 +495,9 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops) if (!ops->create_ctrl) return -EINVAL; - mutex_lock(&nvmf_transports_mutex); + down_write(&nvmf_transports_rwsem); list_add_tail(&ops->entry, &nvmf_transports); - mutex_unlock(&nvmf_transports_mutex); + up_write(&nvmf_transports_rwsem); return 0; } @@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(nvmf_register_transport); */ void nvmf_unregister_transport(struct nvmf_transport_ops *ops) { - mutex_lock(&nvmf_transports_mutex); + down_write(&nvmf_transports_rwsem); list_del(&ops->entry); - mutex_unlock(&nvmf_transports_mutex); + up_write(&nvmf_transports_rwsem); } EXPORT_SYMBOL_GPL(nvmf_unregister_transport); @@ -525,7 +525,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport( { struct nvmf_transport_ops *ops; - lockdep_assert_held(&nvmf_transports_mutex); + lockdep_assert_held(&nvmf_transports_rwsem); list_for_each_entry(ops, &nvmf_transports, entry) { if (strcmp(ops->name, opts->transport) == 0) @@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->nr_io_queues = num_online_cpus(); opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; + opts->kato = NVME_DEFAULT_KATO; options = o = kstrdup(buf, GFP_KERNEL); if (!options) @@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, goto out; } - if (opts->discovery_nqn) { - pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n"); - ret = -EINVAL; - goto out; - } - if (token < 0) { pr_err("Invalid keep_alive_tmo %d\n", token); ret = -EINVAL; goto out; - } else if (token == 0) { + } else if (token == 0 && !opts->discovery_nqn) { /* Allowed for debug */ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); } opts->kato = token; + + if (opts->discovery_nqn && opts->kato) { + pr_err("Discovery controllers cannot accept KATO != 0\n"); + ret = -EINVAL; + goto out; + } + break; case NVMF_OPT_CTRL_LOSS_TMO: if (match_int(args, &token)) { @@ -735,6 +737,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, goto out; } if (uuid_parse(p, &hostid)) { + pr_err("Invalid hostid %s\n", p); ret = -EINVAL; goto out; } @@ -761,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, uuid_copy(&opts->host->id, &hostid); out: - if (!opts->discovery_nqn && !opts->kato) - opts->kato = NVME_DEFAULT_KATO; kfree(options); return ret; } @@ -850,7 +851,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) goto out_free_opts; opts->mask &= ~NVMF_REQUIRED_OPTS; - mutex_lock(&nvmf_transports_mutex); + down_read(&nvmf_transports_rwsem); ops = nvmf_lookup_transport(opts); if (!ops) { pr_info("no handler found for transport %s.\n", @@ -877,16 +878,16 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) dev_warn(ctrl->device, "controller returned incorrect NQN: \"%s\".\n", ctrl->subnqn); - mutex_unlock(&nvmf_transports_mutex); + up_read(&nvmf_transports_rwsem); ctrl->ops->delete_ctrl(ctrl); return ERR_PTR(-EINVAL); } - mutex_unlock(&nvmf_transports_mutex); + up_read(&nvmf_transports_rwsem); return ctrl; out_unlock: - mutex_unlock(&nvmf_transports_mutex); + up_read(&nvmf_transports_rwsem); out_free_opts: nvmf_free_options(opts); return ERR_PTR(ret); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 5c2a08ef08ba..af075e998944 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -220,6 +220,90 @@ static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *); static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, struct nvme_fc_queue *, unsigned int); +static void +nvme_fc_free_lport(struct kref *ref) +{ + struct nvme_fc_lport *lport = + container_of(ref, struct nvme_fc_lport, ref); + unsigned long flags; + + WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); + WARN_ON(!list_empty(&lport->endp_list)); + + /* remove from transport list */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_del(&lport->port_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + /* let the LLDD know we've finished tearing it down */ + lport->ops->localport_delete(&lport->localport); + + ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); + ida_destroy(&lport->endp_cnt); + + put_device(lport->dev); + + kfree(lport); +} + +static void +nvme_fc_lport_put(struct nvme_fc_lport *lport) +{ + kref_put(&lport->ref, nvme_fc_free_lport); +} + +static int +nvme_fc_lport_get(struct nvme_fc_lport *lport) +{ + return kref_get_unless_zero(&lport->ref); +} + + +static struct nvme_fc_lport * +nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo) +{ + struct nvme_fc_lport *lport; + unsigned long flags; + + spin_lock_irqsave(&nvme_fc_lock, flags); + + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + if (lport->localport.node_name != pinfo->node_name || + lport->localport.port_name != pinfo->port_name) + continue; + + if (lport->localport.port_state != FC_OBJSTATE_DELETED) { + lport = ERR_PTR(-EEXIST); + goto out_done; + } + + if (!nvme_fc_lport_get(lport)) { + /* + * fails if ref cnt already 0. If so, + * act as if lport already deleted + */ + lport = NULL; + goto out_done; + } + + /* resume the lport */ + + lport->localport.port_role = pinfo->port_role; + lport->localport.port_id = pinfo->port_id; + lport->localport.port_state = FC_OBJSTATE_ONLINE; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return lport; + } + + lport = NULL; + +out_done: + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return lport; +} /** * nvme_fc_register_localport - transport entry point called by an @@ -257,6 +341,28 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, goto out_reghost_failed; } + /* + * look to see if there is already a localport that had been + * deregistered and in the process of waiting for all the + * references to fully be removed. If the references haven't + * expired, we can simply re-enable the localport. Remoteports + * and controller reconnections should resume naturally. + */ + newrec = nvme_fc_attach_to_unreg_lport(pinfo); + + /* found an lport, but something about its state is bad */ + if (IS_ERR(newrec)) { + ret = PTR_ERR(newrec); + goto out_reghost_failed; + + /* found existing lport, which was resumed */ + } else if (newrec) { + *portptr = &newrec->localport; + return 0; + } + + /* nothing found - allocate a new localport struct */ + newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), GFP_KERNEL); if (!newrec) { @@ -310,44 +416,6 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, } EXPORT_SYMBOL_GPL(nvme_fc_register_localport); -static void -nvme_fc_free_lport(struct kref *ref) -{ - struct nvme_fc_lport *lport = - container_of(ref, struct nvme_fc_lport, ref); - unsigned long flags; - - WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); - WARN_ON(!list_empty(&lport->endp_list)); - - /* remove from transport list */ - spin_lock_irqsave(&nvme_fc_lock, flags); - list_del(&lport->port_list); - spin_unlock_irqrestore(&nvme_fc_lock, flags); - - /* let the LLDD know we've finished tearing it down */ - lport->ops->localport_delete(&lport->localport); - - ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); - ida_destroy(&lport->endp_cnt); - - put_device(lport->dev); - - kfree(lport); -} - -static void -nvme_fc_lport_put(struct nvme_fc_lport *lport) -{ - kref_put(&lport->ref, nvme_fc_free_lport); -} - -static int -nvme_fc_lport_get(struct nvme_fc_lport *lport) -{ - return kref_get_unless_zero(&lport->ref); -} - /** * nvme_fc_unregister_localport - transport entry point called by an * LLDD to deregister/remove a previously @@ -1308,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); else if (freq->status) - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); /* * For the linux implementation, if we have an unsuccesful @@ -1336,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) */ if (freq->transferred_length != be32_to_cpu(op->cmd_iu.data_len)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result.u64 = 0; @@ -1353,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) freq->transferred_length || op->rsp_iu.status_code || sqe->common.command_id != cqe->command_id)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result = cqe->result; @@ -1361,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) break; default: - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } @@ -1921,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, * as well as those by FC-NVME spec. */ WARN_ON_ONCE(sqe->common.metadata); - WARN_ON_ONCE(sqe->common.dptr.prp1); - WARN_ON_ONCE(sqe->common.dptr.prp2); sqe->common.flags |= NVME_CMD_SGL_METABUF; /* - * format SQE DPTR field per FC-NVME rules - * type=data block descr; subtype=offset; - * offset is currently 0. + * format SQE DPTR field per FC-NVME rules: + * type=0x5 Transport SGL Data Block Descriptor + * subtype=0xA Transport-specific value + * address=0 + * length=length of the data series */ - sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; + sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); sqe->rw.dptr.sgl.addr = 0; @@ -2168,7 +2237,6 @@ static const struct blk_mq_ops nvme_fc_mq_ops = { .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_request, .exit_request = nvme_fc_exit_request, - .reinit_request = nvme_fc_reinit_request, .init_hctx = nvme_fc_init_hctx, .poll = nvme_fc_poll, .timeout = nvme_fc_timeout, @@ -2269,7 +2337,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) nvme_fc_init_io_queues(ctrl); - ret = blk_mq_reinit_tagset(&ctrl->tag_set); + ret = blk_mq_reinit_tagset(&ctrl->tag_set, nvme_fc_reinit_request); if (ret) goto out_free_io_queues; @@ -2655,7 +2723,6 @@ static const struct blk_mq_ops nvme_fc_admin_mq_ops = { .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_request, .exit_request = nvme_fc_exit_request, - .reinit_request = nvme_fc_reinit_request, .init_hctx = nvme_fc_init_admin_hctx, .timeout = nvme_fc_timeout, }; @@ -2733,6 +2800,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); if (ret) goto out_free_queues; + ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); if (IS_ERR(ctrl->ctrl.admin_q)) { diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index be8541335e31..1f79e3f141e6 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -643,17 +643,9 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q, vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); } - if (!disk) - goto submit; - - bio->bi_bdev = bdget_disk(disk, 0); - if (!bio->bi_bdev) { - ret = -ENODEV; - goto err_meta; - } + bio->bi_disk = disk; } -submit: blk_execute_rq(q, NULL, rq, 0); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) @@ -673,11 +665,8 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q, if (meta_buf && meta_len) dma_pool_free(dev->dma_pool, metadata, metadata_dma); err_map: - if (bio) { - if (disk && bio->bi_bdev) - bdput(bio->bi_bdev); + if (bio) blk_rq_unmap_user(bio); - } err_ppa: if (ppa_buf && ppa_len) dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); @@ -966,29 +955,3 @@ void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, &nvm_dev_attr_group); } - -/* move to shared place when used in multiple places. */ -#define PCI_VENDOR_ID_CNEX 0x1d1d -#define PCI_DEVICE_ID_CNEX_WL 0x2807 -#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f - -int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) -{ - struct nvme_ctrl *ctrl = ns->ctrl; - /* XXX: this is poking into PCI structures from generic code! */ - struct pci_dev *pdev = to_pci_dev(ctrl->dev); - - /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ - if (pdev->vendor == PCI_VENDOR_ID_CNEX && - pdev->device == PCI_DEVICE_ID_CNEX_QEMU && - id->vs[0] == 0x1) - return 1; - - /* CNEX Labs - PCI ID + Vendor specific bit */ - if (pdev->vendor == PCI_VENDOR_ID_CNEX && - pdev->device == PCI_DEVICE_ID_CNEX_WL && - id->vs[0] == 0x1) - return 1; - - return 0; -} diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8f2a168ddc01..d3f3c4447515 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -75,6 +75,11 @@ enum nvme_quirks { * The deepest sleep state should not be used. */ NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), + + /* + * Supports the LighNVM command set if indicated in vs[1]. + */ + NVME_QUIRK_LIGHTNVM = (1 << 6), }; /* @@ -125,6 +130,7 @@ struct nvme_ctrl { struct kref kref; int instance; struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; struct list_head namespaces; struct mutex namespaces_mutex; struct device *device; /* char device */ @@ -142,6 +148,7 @@ struct nvme_ctrl { u16 cntlid; u32 ctrl_config; + u16 mtfa; u32 queue_count; u64 cap; @@ -160,6 +167,7 @@ struct nvme_ctrl { u16 kas; u8 npss; u8 apsta; + unsigned int shutdown_timeout; unsigned int kato; bool subsystem; unsigned long quirks; @@ -167,13 +175,17 @@ struct nvme_ctrl { struct work_struct scan_work; struct work_struct async_event_work; struct delayed_work ka_work; + struct work_struct fw_act_work; /* Power saving configuration */ u64 ps_max_latency_us; bool apst_enabled; + /* PCIe only: */ u32 hmpre; u32 hmmin; + u32 hmminds; + u16 hmmaxd; /* Fabrics only */ u16 sqsize; @@ -207,13 +219,9 @@ struct nvme_ns { bool ext; u8 pi_type; unsigned long flags; - u16 noiob; - #define NVME_NS_REMOVING 0 #define NVME_NS_DEAD 1 - - u64 mode_select_num_blocks; - u32 mode_select_block_len; + u16 noiob; }; struct nvme_ctrl_ops { @@ -314,20 +322,12 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, unsigned timeout, int qid, int at_head, int flags); -int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, - void __user *ubuffer, unsigned bufflen, u32 *result, - unsigned timeout); -int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, - void __user *ubuffer, unsigned bufflen, - void __user *meta_buffer, unsigned meta_len, u32 meta_seed, - u32 *result, unsigned timeout); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_start_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); #ifdef CONFIG_NVM -int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); int nvme_nvm_register_sysfs(struct nvme_ns *ns); @@ -346,10 +346,6 @@ static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) return 0; } static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; -static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) -{ - return 0; -} static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ea892e732268..3f5a04c586ce 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -93,7 +94,7 @@ struct nvme_dev { struct mutex shutdown_lock; bool subsystem; void __iomem *cmb; - dma_addr_t cmb_dma_addr; + pci_bus_addr_t cmb_bus_addr; u64 cmb_size; u32 cmbsz; u32 cmbloc; @@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif +static void nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " + "dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -556,8 +571,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) int nprps, i; length -= (page_size - offset); - if (length <= 0) + if (length <= 0) { + iod->first_dma = 0; return BLK_STS_OK; + } dma_len -= (page_size - offset); if (dma_len) { @@ -620,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; bad_sgl: - if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->nents)) { - for_each_sg(iod->sg, sg, iod->nents, i) { - dma_addr_t phys = sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", i, &phys, - sg->offset, sg->length, - &sg_dma_address(sg), - sg_dma_len(sg)); - } - } + WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), iod->nents); return BLK_STS_IOERR; - } static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, @@ -667,7 +675,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) goto out_unmap; - if (rq_data_dir(req)) + if (req_op(req) == REQ_OP_WRITE) nvme_dif_remap(req, nvme_dif_prep); if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) @@ -695,7 +703,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) if (iod->nents) { dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); if (blk_integrity_rq(req)) { - if (!rq_data_dir(req)) + if (req_op(req) == REQ_OP_READ) nvme_dif_remap(req, nvme_dif_complete); dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); } @@ -1218,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), dev->ctrl.page_size); - nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; nvmeq->sq_cmds_io = dev->cmb + offset; } else { nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), @@ -1311,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) if (result < 0) goto release_cq; + nvme_init_queue(nvmeq, qid); result = queue_request_irq(nvmeq); if (result < 0) goto release_sq; - nvme_init_queue(nvmeq, qid); return result; release_sq: @@ -1377,6 +1385,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) if (blk_mq_alloc_tag_set(&dev->admin_tagset)) return -ENOMEM; + dev->ctrl.admin_tagset = &dev->admin_tagset; dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); if (IS_ERR(dev->ctrl.admin_q)) { @@ -1461,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) return result; nvmeq->cq_vector = 0; + nvme_init_queue(nvmeq, 0); result = queue_request_irq(nvmeq); if (result) { nvmeq->cq_vector = -1; @@ -1517,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) resource_size_t bar_size; struct pci_dev *pdev = to_pci_dev(dev->dev); void __iomem *cmb; - dma_addr_t dma_addr; + int bar; dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!(NVME_CMB_SZ(dev->cmbsz))) @@ -1530,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); size = szu * NVME_CMB_SZ(dev->cmbsz); offset = szu * NVME_CMB_OFST(dev->cmbloc); - bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); + bar = NVME_CMB_BIR(dev->cmbloc); + bar_size = pci_resource_len(pdev, bar); if (offset > bar_size) return NULL; @@ -1543,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) if (size > bar_size - offset) size = bar_size - offset; - dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; - cmb = ioremap_wc(dma_addr, size); + cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size); if (!cmb) return NULL; - dev->cmb_dma_addr = dma_addr; + dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset; dev->cmb_size = size; return cmb; } @@ -1609,21 +1619,23 @@ static void nvme_free_host_mem(struct nvme_dev *dev) dev->host_mem_descs = NULL; } -static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) +static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, + u32 chunk_size) { struct nvme_host_mem_buf_desc *descs; - u32 chunk_size, max_entries, len; + u32 max_entries, len; dma_addr_t descs_dma; int i = 0; void **bufs; u64 size = 0, tmp; - /* start big and work our way down */ - chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER); -retry: tmp = (preferred + chunk_size - 1); do_div(tmp, chunk_size); max_entries = tmp; + + if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) + max_entries = dev->ctrl.hmmaxd; + descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), &descs_dma, GFP_KERNEL); if (!descs) @@ -1647,15 +1659,9 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) i++; } - if (!size || (min && size < min)) { - dev_warn(dev->ctrl.device, - "failed to allocate host memory buffer.\n"); + if (!size) goto out_free_bufs; - } - dev_info(dev->ctrl.device, - "allocated %lld MiB host memory buffer.\n", - size >> ilog2(SZ_1M)); dev->nr_host_mem_descs = i; dev->host_mem_size = size; dev->host_mem_descs = descs; @@ -1676,21 +1682,35 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, descs_dma); out: - /* try a smaller chunk size if we failed early */ - if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { - chunk_size /= 2; - goto retry; - } dev->host_mem_descs = NULL; return -ENOMEM; } -static void nvme_setup_host_mem(struct nvme_dev *dev) +static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) +{ + u32 chunk_size; + + /* start big and work our way down */ + for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); + chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); + chunk_size /= 2) { + if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { + if (!min || dev->host_mem_size >= min) + return 0; + nvme_free_host_mem(dev); + } + } + + return -ENOMEM; +} + +static int nvme_setup_host_mem(struct nvme_dev *dev) { u64 max = (u64)max_host_mem_size_mb * SZ_1M; u64 preferred = (u64)dev->ctrl.hmpre * 4096; u64 min = (u64)dev->ctrl.hmmin * 4096; u32 enable_bits = NVME_HOST_MEM_ENABLE; + int ret = 0; preferred = min(preferred, max); if (min > max) { @@ -1698,7 +1718,7 @@ static void nvme_setup_host_mem(struct nvme_dev *dev) "min host memory (%lld MiB) above limit (%d MiB).\n", min >> ilog2(SZ_1M), max_host_mem_size_mb); nvme_free_host_mem(dev); - return; + return 0; } /* @@ -1712,12 +1732,21 @@ static void nvme_setup_host_mem(struct nvme_dev *dev) } if (!dev->host_mem_descs) { - if (nvme_alloc_host_mem(dev, min, preferred)) - return; + if (nvme_alloc_host_mem(dev, min, preferred)) { + dev_warn(dev->ctrl.device, + "failed to allocate host memory buffer.\n"); + return 0; /* controller must work without HMB */ + } + + dev_info(dev->ctrl.device, + "allocated %lld MiB host memory buffer.\n", + dev->host_mem_size >> ilog2(SZ_1M)); } - if (nvme_set_host_mem(dev, enable_bits)) + ret = nvme_set_host_mem(dev, enable_bits); + if (ret) nvme_free_host_mem(dev); + return ret; } static int nvme_setup_io_queues(struct nvme_dev *dev) @@ -2134,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; - nvme_init_queue(dev->queues[0], 0); result = nvme_alloc_admin_tags(dev); if (result) goto out; @@ -2161,8 +2189,11 @@ static void nvme_reset_work(struct work_struct *work) "unable to allocate dma for dbbuf\n"); } - if (dev->ctrl.hmpre) - nvme_setup_host_mem(dev); + if (dev->ctrl.hmpre) { + result = nvme_setup_host_mem(dev); + if (result < 0) + goto out; + } result = nvme_setup_io_queues(dev); if (result) @@ -2494,6 +2525,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index a7f7d0ae3331..92a03ff5fb4d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -37,8 +37,6 @@ #define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */ -#define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */ - #define NVME_RDMA_MAX_SEGMENTS 256 #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 @@ -152,6 +150,9 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static const struct blk_mq_ops nvme_rdma_mq_ops; +static const struct blk_mq_ops nvme_rdma_admin_mq_ops; + /* XXX: really should move to a generic header sooner or later.. */ static inline void put_unaligned_le24(u32 val, u8 *p) { @@ -500,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) return ret; } -static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, +static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, int idx, size_t queue_size) { struct nvme_rdma_queue *queue; @@ -558,22 +559,20 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { + if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + return; + rdma_disconnect(queue->cm_id); ib_drain_qp(queue->qp); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) -{ - nvme_rdma_destroy_queue_ib(queue); - rdma_destroy_id(queue->cm_id); -} - -static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) { if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) return; - nvme_rdma_stop_queue(queue); - nvme_rdma_free_queue(queue); + + nvme_rdma_destroy_queue_ib(queue); + rdma_destroy_id(queue->cm_id); } static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) @@ -581,31 +580,53 @@ static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) - nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); + nvme_rdma_free_queue(&ctrl->queues[i]); } -static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) +static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) + nvme_rdma_stop_queue(&ctrl->queues[i]); +} + +static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) +{ + int ret; + + if (idx) + ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); + else + ret = nvmf_connect_admin_queue(&ctrl->ctrl); + + if (!ret) + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags); + else + dev_info(ctrl->ctrl.device, + "failed to connect queue: %d ret=%d\n", idx, ret); + return ret; +} + +static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl) { int i, ret = 0; for (i = 1; i < ctrl->ctrl.queue_count; i++) { - ret = nvmf_connect_io_queue(&ctrl->ctrl, i); - if (ret) { - dev_info(ctrl->ctrl.device, - "failed to connect i/o queue: %d\n", ret); - goto out_free_queues; - } - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); + ret = nvme_rdma_start_queue(ctrl, i); + if (ret) + goto out_stop_queues; } return 0; -out_free_queues: - nvme_rdma_free_io_queues(ctrl); +out_stop_queues: + for (i--; i >= 1; i--) + nvme_rdma_stop_queue(&ctrl->queues[i]); return ret; } -static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) +static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; struct ib_device *ibdev = ctrl->device->dev; @@ -634,32 +655,230 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) "creating %d I/O queues.\n", nr_io_queues); for (i = 1; i < ctrl->ctrl.queue_count; i++) { - ret = nvme_rdma_init_queue(ctrl, i, - ctrl->ctrl.opts->queue_size); - if (ret) { - dev_info(ctrl->ctrl.device, - "failed to initialize i/o queue: %d\n", ret); + ret = nvme_rdma_alloc_queue(ctrl, i, + ctrl->ctrl.sqsize + 1); + if (ret) goto out_free_queues; - } } return 0; out_free_queues: for (i--; i >= 1; i--) - nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); + nvme_rdma_free_queue(&ctrl->queues[i]); return ret; } -static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) +static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl, bool admin) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + struct blk_mq_tag_set *set = admin ? + &ctrl->admin_tag_set : &ctrl->tag_set; + + blk_mq_free_tag_set(set); + nvme_rdma_dev_put(ctrl->device); +} + +static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, + bool admin) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + struct blk_mq_tag_set *set; + int ret; + + if (admin) { + set = &ctrl->admin_tag_set; + memset(set, 0, sizeof(*set)); + set->ops = &nvme_rdma_admin_mq_ops; + set->queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; + set->reserved_tags = 2; /* connect + keep-alive */ + set->numa_node = NUMA_NO_NODE; + set->cmd_size = sizeof(struct nvme_rdma_request) + + SG_CHUNK_SIZE * sizeof(struct scatterlist); + set->driver_data = ctrl; + set->nr_hw_queues = 1; + set->timeout = ADMIN_TIMEOUT; + } else { + set = &ctrl->tag_set; + memset(set, 0, sizeof(*set)); + set->ops = &nvme_rdma_mq_ops; + set->queue_depth = nctrl->opts->queue_size; + set->reserved_tags = 1; /* fabric connect */ + set->numa_node = NUMA_NO_NODE; + set->flags = BLK_MQ_F_SHOULD_MERGE; + set->cmd_size = sizeof(struct nvme_rdma_request) + + SG_CHUNK_SIZE * sizeof(struct scatterlist); + set->driver_data = ctrl; + set->nr_hw_queues = nctrl->queue_count - 1; + set->timeout = NVME_IO_TIMEOUT; + } + + ret = blk_mq_alloc_tag_set(set); + if (ret) + goto out; + + /* + * We need a reference on the device as long as the tag_set is alive, + * as the MRs in the request structures need a valid ib_device. + */ + ret = nvme_rdma_dev_get(ctrl->device); + if (!ret) { + ret = -EINVAL; + goto out_free_tagset; + } + + return set; + +out_free_tagset: + blk_mq_free_tag_set(set); +out: + return ERR_PTR(ret); +} + +static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool remove) { nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); - nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_mq_free_tag_set(&ctrl->admin_tag_set); - nvme_rdma_dev_put(ctrl->device); + nvme_rdma_stop_queue(&ctrl->queues[0]); + if (remove) { + blk_cleanup_queue(ctrl->ctrl.admin_q); + nvme_rdma_free_tagset(&ctrl->ctrl, true); + } + nvme_rdma_free_queue(&ctrl->queues[0]); +} + +static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool new) +{ + int error; + + error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); + if (error) + return error; + + ctrl->device = ctrl->queues[0].device; + + ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, + ctrl->device->dev->attrs.max_fast_reg_page_list_len); + + if (new) { + ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); + if (IS_ERR(ctrl->ctrl.admin_tagset)) + goto out_free_queue; + + ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); + if (IS_ERR(ctrl->ctrl.admin_q)) { + error = PTR_ERR(ctrl->ctrl.admin_q); + goto out_free_tagset; + } + } else { + error = blk_mq_reinit_tagset(&ctrl->admin_tag_set, + nvme_rdma_reinit_request); + if (error) + goto out_free_queue; + } + + error = nvme_rdma_start_queue(ctrl, 0); + if (error) + goto out_cleanup_queue; + + error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP, + &ctrl->ctrl.cap); + if (error) { + dev_err(ctrl->ctrl.device, + "prop_get NVME_REG_CAP failed\n"); + goto out_cleanup_queue; + } + + ctrl->ctrl.sqsize = + min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); + + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); + if (error) + goto out_cleanup_queue; + + ctrl->ctrl.max_hw_sectors = + (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); + + error = nvme_init_identify(&ctrl->ctrl); + if (error) + goto out_cleanup_queue; + + error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, + &ctrl->async_event_sqe, sizeof(struct nvme_command), + DMA_TO_DEVICE); + if (error) + goto out_cleanup_queue; + + return 0; + +out_cleanup_queue: + if (new) + blk_cleanup_queue(ctrl->ctrl.admin_q); +out_free_tagset: + if (new) + nvme_rdma_free_tagset(&ctrl->ctrl, true); +out_free_queue: + nvme_rdma_free_queue(&ctrl->queues[0]); + return error; +} + +static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, + bool remove) +{ + nvme_rdma_stop_io_queues(ctrl); + if (remove) { + blk_cleanup_queue(ctrl->ctrl.connect_q); + nvme_rdma_free_tagset(&ctrl->ctrl, false); + } + nvme_rdma_free_io_queues(ctrl); +} + +static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) +{ + int ret; + + ret = nvme_rdma_alloc_io_queues(ctrl); + if (ret) + return ret; + + if (new) { + ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); + if (IS_ERR(ctrl->ctrl.tagset)) + goto out_free_io_queues; + + ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); + if (IS_ERR(ctrl->ctrl.connect_q)) { + ret = PTR_ERR(ctrl->ctrl.connect_q); + goto out_free_tag_set; + } + } else { + ret = blk_mq_reinit_tagset(&ctrl->tag_set, + nvme_rdma_reinit_request); + if (ret) + goto out_free_io_queues; + + blk_mq_update_nr_hw_queues(&ctrl->tag_set, + ctrl->ctrl.queue_count - 1); + } + + ret = nvme_rdma_start_io_queues(ctrl); + if (ret) + goto out_cleanup_connect_q; + + return 0; + +out_cleanup_connect_q: + if (new) + blk_cleanup_queue(ctrl->ctrl.connect_q); +out_free_tag_set: + if (new) + nvme_rdma_free_tagset(&ctrl->ctrl, false); +out_free_io_queues: + nvme_rdma_free_io_queues(ctrl); + return ret; } static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) @@ -708,49 +927,27 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ++ctrl->ctrl.nr_reconnects; - if (ctrl->ctrl.queue_count > 1) { - nvme_rdma_free_io_queues(ctrl); + if (ctrl->ctrl.queue_count > 1) + nvme_rdma_destroy_io_queues(ctrl, false); - ret = blk_mq_reinit_tagset(&ctrl->tag_set); - if (ret) - goto requeue; - } - - nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); - - ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set); - if (ret) - goto requeue; - - ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); - if (ret) - goto requeue; - - ret = nvmf_connect_admin_queue(&ctrl->ctrl); - if (ret) - goto requeue; - - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - - ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); + nvme_rdma_destroy_admin_queue(ctrl, false); + ret = nvme_rdma_configure_admin_queue(ctrl, false); if (ret) goto requeue; if (ctrl->ctrl.queue_count > 1) { - ret = nvme_rdma_init_io_queues(ctrl); + ret = nvme_rdma_configure_io_queues(ctrl, false); if (ret) goto requeue; - - ret = nvme_rdma_connect_io_queues(ctrl); - if (ret) - goto requeue; - - blk_mq_update_nr_hw_queues(&ctrl->tag_set, - ctrl->ctrl.queue_count - 1); } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - WARN_ON_ONCE(!changed); + if (!changed) { + /* state change failure is ok if we're in DELETING state */ + WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); + return; + } + ctrl->ctrl.nr_reconnects = 0; nvme_start_ctrl(&ctrl->ctrl); @@ -769,16 +966,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, err_work); - int i; - nvme_stop_ctrl(&ctrl->ctrl); + nvme_stop_keep_alive(&ctrl->ctrl); - for (i = 0; i < ctrl->ctrl.queue_count; i++) - clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); - - if (ctrl->ctrl.queue_count > 1) + if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + } blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); /* We must take care of fastfail/requeue all our inflight requests */ if (ctrl->ctrl.queue_count > 1) @@ -863,7 +1059,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, if (req->mr->need_inval) { res = nvme_rdma_inv_rkey(queue, req); - if (res < 0) { + if (unlikely(res < 0)) { dev_err(ctrl->ctrl.device, "Queueing INV WR for rkey %#x failed (%d)\n", req->mr->rkey, res); @@ -932,7 +1128,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, * the block virtual boundary. */ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); - if (nr < count) { + if (unlikely(nr < count)) { if (nr < 0) return nr; return -EINVAL; @@ -1068,7 +1264,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, first = ≀ ret = ib_post_send(queue->qp, first, &bad_wr); - if (ret) { + if (unlikely(ret)) { dev_err(queue->ctrl->ctrl.device, "%s failed with error code %d\n", __func__, ret); } @@ -1094,7 +1290,7 @@ static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, wr.num_sge = 1; ret = ib_post_recv(queue->qp, &wr, &bad_wr); - if (ret) { + if (unlikely(ret)) { dev_err(queue->ctrl->ctrl.device, "%s failed with error code %d\n", __func__, ret); } @@ -1454,7 +1650,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(rq); err = nvme_rdma_map_data(queue, rq, c); - if (err < 0) { + if (unlikely(err < 0)) { dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", err); nvme_cleanup_cmd(rq); @@ -1468,7 +1664,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, flush = true; err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); - if (err) { + if (unlikely(err)) { nvme_rdma_unmap_data(queue, rq); goto err; } @@ -1521,7 +1717,6 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = { .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, .exit_request = nvme_rdma_exit_request, - .reinit_request = nvme_rdma_reinit_request, .init_hctx = nvme_rdma_init_hctx, .poll = nvme_rdma_poll, .timeout = nvme_rdma_timeout, @@ -1533,103 +1728,11 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, .exit_request = nvme_rdma_exit_request, - .reinit_request = nvme_rdma_reinit_request, .init_hctx = nvme_rdma_init_admin_hctx, .timeout = nvme_rdma_timeout, }; -static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) -{ - int error; - - error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); - if (error) - return error; - - ctrl->device = ctrl->queues[0].device; - - /* - * We need a reference on the device as long as the tag_set is alive, - * as the MRs in the request structures need a valid ib_device. - */ - error = -EINVAL; - if (!nvme_rdma_dev_get(ctrl->device)) - goto out_free_queue; - - ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, - ctrl->device->dev->attrs.max_fast_reg_page_list_len); - - memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); - ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops; - ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ - ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; - ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); - ctrl->admin_tag_set.driver_data = ctrl; - ctrl->admin_tag_set.nr_hw_queues = 1; - ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; - - error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); - if (error) - goto out_put_dev; - - ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); - if (IS_ERR(ctrl->ctrl.admin_q)) { - error = PTR_ERR(ctrl->ctrl.admin_q); - goto out_free_tagset; - } - - error = nvmf_connect_admin_queue(&ctrl->ctrl); - if (error) - goto out_cleanup_queue; - - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - - error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, - &ctrl->ctrl.cap); - if (error) { - dev_err(ctrl->ctrl.device, - "prop_get NVME_REG_CAP failed\n"); - goto out_cleanup_queue; - } - - ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); - - error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); - if (error) - goto out_cleanup_queue; - - ctrl->ctrl.max_hw_sectors = - (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); - - error = nvme_init_identify(&ctrl->ctrl); - if (error) - goto out_cleanup_queue; - - error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, - &ctrl->async_event_sqe, sizeof(struct nvme_command), - DMA_TO_DEVICE); - if (error) - goto out_cleanup_queue; - - return 0; - -out_cleanup_queue: - blk_cleanup_queue(ctrl->ctrl.admin_q); -out_free_tagset: - /* disconnect and drain the queue before freeing the tagset */ - nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_free_tag_set(&ctrl->admin_tag_set); -out_put_dev: - nvme_rdma_dev_put(ctrl->device); -out_free_queue: - nvme_rdma_free_queue(&ctrl->queues[0]); - return error; -} - -static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) +static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->reconnect_work); @@ -1638,33 +1741,26 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); - nvme_rdma_free_io_queues(ctrl); + nvme_rdma_destroy_io_queues(ctrl, shutdown); } - if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags)) + if (shutdown) nvme_shutdown_ctrl(&ctrl->ctrl); + else + nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); - nvme_rdma_destroy_admin_queue(ctrl); + nvme_rdma_destroy_admin_queue(ctrl, shutdown); } -static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) +static void nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl) { - nvme_stop_ctrl(&ctrl->ctrl); nvme_remove_namespaces(&ctrl->ctrl); - if (shutdown) - nvme_rdma_shutdown_ctrl(ctrl); - + nvme_rdma_shutdown_ctrl(ctrl, true); nvme_uninit_ctrl(&ctrl->ctrl); - if (ctrl->ctrl.tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(&ctrl->tag_set); - nvme_rdma_dev_put(ctrl->device); - } - nvme_put_ctrl(&ctrl->ctrl); } @@ -1673,7 +1769,8 @@ static void nvme_rdma_del_ctrl_work(struct work_struct *work) struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, delete_work); - __nvme_rdma_remove_ctrl(ctrl, true); + nvme_stop_ctrl(&ctrl->ctrl); + nvme_rdma_remove_ctrl(ctrl); } static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) @@ -1705,14 +1802,6 @@ static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) return ret; } -static void nvme_rdma_remove_ctrl_work(struct work_struct *work) -{ - struct nvme_rdma_ctrl *ctrl = container_of(work, - struct nvme_rdma_ctrl, delete_work); - - __nvme_rdma_remove_ctrl(ctrl, false); -} - static void nvme_rdma_reset_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = @@ -1721,30 +1810,16 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) bool changed; nvme_stop_ctrl(&ctrl->ctrl); - nvme_rdma_shutdown_ctrl(ctrl); + nvme_rdma_shutdown_ctrl(ctrl, false); - ret = nvme_rdma_configure_admin_queue(ctrl); - if (ret) { - /* ctrl is already shutdown, just remove the ctrl */ - INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work); - goto del_dead_ctrl; - } + ret = nvme_rdma_configure_admin_queue(ctrl, false); + if (ret) + goto out_fail; if (ctrl->ctrl.queue_count > 1) { - ret = blk_mq_reinit_tagset(&ctrl->tag_set); + ret = nvme_rdma_configure_io_queues(ctrl, false); if (ret) - goto del_dead_ctrl; - - ret = nvme_rdma_init_io_queues(ctrl); - if (ret) - goto del_dead_ctrl; - - ret = nvme_rdma_connect_io_queues(ctrl); - if (ret) - goto del_dead_ctrl; - - blk_mq_update_nr_hw_queues(&ctrl->tag_set, - ctrl->ctrl.queue_count - 1); + goto out_fail; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); @@ -1754,10 +1829,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) return; -del_dead_ctrl: - /* Deleting this dead controller... */ +out_fail: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); - WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work)); + nvme_rdma_remove_ctrl(ctrl); } static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { @@ -1773,62 +1847,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .get_address = nvmf_get_address, }; -static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) -{ - int ret; - - ret = nvme_rdma_init_io_queues(ctrl); - if (ret) - return ret; - - /* - * We need a reference on the device as long as the tag_set is alive, - * as the MRs in the request structures need a valid ib_device. - */ - ret = -EINVAL; - if (!nvme_rdma_dev_get(ctrl->device)) - goto out_free_io_queues; - - memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); - ctrl->tag_set.ops = &nvme_rdma_mq_ops; - ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ - ctrl->tag_set.numa_node = NUMA_NO_NODE; - ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; - ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); - ctrl->tag_set.driver_data = ctrl; - ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; - ctrl->tag_set.timeout = NVME_IO_TIMEOUT; - - ret = blk_mq_alloc_tag_set(&ctrl->tag_set); - if (ret) - goto out_put_dev; - ctrl->ctrl.tagset = &ctrl->tag_set; - - ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); - if (IS_ERR(ctrl->ctrl.connect_q)) { - ret = PTR_ERR(ctrl->ctrl.connect_q); - goto out_free_tag_set; - } - - ret = nvme_rdma_connect_io_queues(ctrl); - if (ret) - goto out_cleanup_connect_q; - - return 0; - -out_cleanup_connect_q: - blk_cleanup_queue(ctrl->ctrl.connect_q); -out_free_tag_set: - blk_mq_free_tag_set(&ctrl->tag_set); -out_put_dev: - nvme_rdma_dev_put(ctrl->device); -out_free_io_queues: - nvme_rdma_free_io_queues(ctrl); - return ret; -} - static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { @@ -1886,7 +1904,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (!ctrl->queues) goto out_uninit_ctrl; - ret = nvme_rdma_configure_admin_queue(ctrl); + ret = nvme_rdma_configure_admin_queue(ctrl, true); if (ret) goto out_kfree_queues; @@ -1921,7 +1939,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, } if (opts->nr_io_queues) { - ret = nvme_rdma_create_io_queues(ctrl); + ret = nvme_rdma_configure_io_queues(ctrl, true); if (ret) goto out_remove_admin_queue; } @@ -1943,7 +1961,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return &ctrl->ctrl; out_remove_admin_queue: - nvme_rdma_destroy_admin_queue(ctrl); + nvme_rdma_destroy_admin_queue(ctrl, true); out_kfree_queues: kfree(ctrl->queues); out_uninit_ctrl: diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a53bb6635b83..c4a0bf36e752 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req, u16 status; WARN_ON(req == NULL || slog == NULL); - if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF)) + if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) status = nvmet_get_smart_log_all(req, slog); else status = nvmet_get_smart_log_nsid(req, slog); @@ -168,15 +168,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) nvmet_req_complete(req, status); } -static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len) -{ - int len = min(src_len, dst_len); - - memcpy(dst, src, len); - if (dst_len > len) - memset(dst + len, ' ', dst_len - len); -} - static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -196,8 +187,9 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) bin2hex(id->sn, &ctrl->subsys->serial, min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); - copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); - copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); + memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); + memcpy_and_pad(id->fr, sizeof(id->fr), + UTS_RELEASE, strlen(UTS_RELEASE), ' '); id->rab = 6; @@ -451,7 +443,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req) u32 val32; u16 status = 0; - switch (cdw10 & 0xf) { + switch (cdw10 & 0xff) { case NVME_FEAT_NUM_QUEUES: nvmet_set_result(req, (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); @@ -461,6 +453,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); nvmet_set_result(req, req->sq->ctrl->kato); break; + case NVME_FEAT_HOST_ID: + status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; @@ -475,7 +470,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); u16 status = 0; - switch (cdw10 & 0xf) { + switch (cdw10 & 0xff) { /* * These features are mandatory in the spec, but we don't * have a useful way to implement them. We'll eventually @@ -509,6 +504,16 @@ static void nvmet_execute_get_features(struct nvmet_req *req) case NVME_FEAT_KATO: nvmet_set_result(req, req->sq->ctrl->kato * 1000); break; + case NVME_FEAT_HOST_ID: + /* need 128-bit host identifier flag */ + if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + break; + } + + status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, + sizeof(req->sq->ctrl->hostid)); + break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 0a0067e771f5..b6aeb1d70951 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -444,7 +444,7 @@ static struct config_group *nvmet_ns_make(struct config_group *group, goto out; ret = -EINVAL; - if (nsid == 0 || nsid == 0xffffffff) + if (nsid == 0 || nsid == NVME_NSID_ALL) goto out; ret = -ENOMEM; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f4b02bb4a1a8..1b208beeef50 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) if (status) nvmet_set_status(req, status); - /* XXX: need to fill in something useful for sq_head */ - req->rsp->sq_head = 0; - if (likely(req->sq)) /* may happen during early failure */ - req->rsp->sq_id = cpu_to_le16(req->sq->qid); + if (req->sq->size) + req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; + req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); + req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; if (req->ns) @@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) { + sq->sqhd = 0; sq->qid = qid; sq->size = size; @@ -538,37 +539,37 @@ EXPORT_SYMBOL_GPL(nvmet_req_uninit); static inline bool nvmet_cc_en(u32 cc) { - return cc & 0x1; + return (cc >> NVME_CC_EN_SHIFT) & 0x1; } static inline u8 nvmet_cc_css(u32 cc) { - return (cc >> 4) & 0x7; + return (cc >> NVME_CC_CSS_SHIFT) & 0x7; } static inline u8 nvmet_cc_mps(u32 cc) { - return (cc >> 7) & 0xf; + return (cc >> NVME_CC_MPS_SHIFT) & 0xf; } static inline u8 nvmet_cc_ams(u32 cc) { - return (cc >> 11) & 0x7; + return (cc >> NVME_CC_AMS_SHIFT) & 0x7; } static inline u8 nvmet_cc_shn(u32 cc) { - return (cc >> 14) & 0x3; + return (cc >> NVME_CC_SHN_SHIFT) & 0x3; } static inline u8 nvmet_cc_iosqes(u32 cc) { - return (cc >> 16) & 0xf; + return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; } static inline u8 nvmet_cc_iocqes(u32 cc) { - return (cc >> 20) & 0xf; + return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; } static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) @@ -749,6 +750,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, hostnqn, subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); + status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; goto out_put_subsystem; } up_read(&nvmet_config_sem); diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 3cc17269504b..db3bf6b8bf9e 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue already connected!\n"); return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; } + if (!sqsize) { + pr_warn("queue size zero!\n"); + return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + } - nvmet_cq_setup(ctrl, req->cq, qid, sqsize); - nvmet_sq_setup(ctrl, req->sq, qid, sqsize); + /* note: convert queue size from 0's-based value to 1's-based value */ + nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); + nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); return 0; } @@ -154,6 +159,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) le32_to_cpu(c->kato), &ctrl); if (status) goto out; + uuid_copy(&ctrl->hostid, &d->hostid); status = nvmet_install_queue(ctrl, req); if (status) { diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 309c84aa7595..58e010bdda3e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -58,7 +58,8 @@ struct nvmet_fc_ls_iod { struct work_struct work; } __aligned(sizeof(unsigned long long)); -#define NVMET_FC_MAX_KB_PER_XFR 256 +#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) +#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) enum nvmet_fcp_datadir { NVMET_FCP_NODATA, @@ -74,9 +75,7 @@ struct nvmet_fc_fcp_iod { struct nvme_fc_ersp_iu rspiubuf; dma_addr_t rspdma; struct scatterlist *data_sg; - struct scatterlist *next_sg; int data_sg_cnt; - u32 next_sg_offset; u32 total_length; u32 offset; enum nvmet_fcp_datadir io_dir; @@ -112,6 +111,7 @@ struct nvmet_fc_tgtport { struct ida assoc_cnt; struct nvmet_port *port; struct kref ref; + u32 max_sg_cnt; }; struct nvmet_fc_defer_fcp_req { @@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc { u32 a_id; struct nvmet_fc_tgtport *tgtport; struct list_head a_list; - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; }; @@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, unsigned long flags; int ret; - if (qid >= NVMET_NR_QUEUES) + if (qid > NVMET_NR_QUEUES) return NULL; queue = kzalloc((sizeof(*queue) + @@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, u16 qid = nvmet_fc_getqueueid(connection_id); unsigned long flags; + if (qid > NVMET_NR_QUEUES) + return NULL; + spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { @@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) int i; spin_lock_irqsave(&tgtport->lock, flags); - for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { + for (i = NVMET_NR_QUEUES; i >= 0; i--) { queue = assoc->queues[i]; if (queue) { if (!nvmet_fc_tgt_q_get(queue)) @@ -994,6 +997,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, INIT_LIST_HEAD(&newrec->assoc_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); + newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, + template->max_sgl_segments); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1866,51 +1871,23 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, u8 op) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; - struct scatterlist *sg, *datasg; unsigned long flags; - u32 tlen, sg_off; + u32 tlen; int ret; fcpreq->op = op; fcpreq->offset = fod->offset; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; - tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024), + + tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, (fod->total_length - fod->offset)); - tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE); - tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments - * PAGE_SIZE); fcpreq->transfer_length = tlen; fcpreq->transferred_length = 0; fcpreq->fcp_error = 0; fcpreq->rsplen = 0; - fcpreq->sg_cnt = 0; - - datasg = fod->next_sg; - sg_off = fod->next_sg_offset; - - for (sg = fcpreq->sg ; tlen; sg++) { - *sg = *datasg; - if (sg_off) { - sg->offset += sg_off; - sg->length -= sg_off; - sg->dma_address += sg_off; - sg_off = 0; - } - if (tlen < sg->length) { - sg->length = tlen; - fod->next_sg = datasg; - fod->next_sg_offset += tlen; - } else if (tlen == sg->length) { - fod->next_sg_offset = 0; - fod->next_sg = sg_next(datasg); - } else { - fod->next_sg_offset = 0; - datasg = sg_next(datasg); - } - tlen -= sg->length; - fcpreq->sg_cnt++; - } + fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; + fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); /* * If the last READDATA request: check if LLDD supports @@ -1936,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, spin_lock_irqsave(&fod->flock, flags); fod->writedataactive = false; spin_unlock_irqrestore(&fod->flock, flags); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { fcpreq->fcp_error = ret; fcpreq->transferred_length = 0; @@ -1955,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) /* if in the middle of an io and we need to tear down */ if (abort) { if (fcpreq->op == NVMET_FCOP_WRITEDATA) { - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return true; } @@ -1994,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) fod->abort = true; spin_unlock(&fod->flock); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; } @@ -2225,8 +2199,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->req.sg = fod->data_sg; fod->req.sg_cnt = fod->data_sg_cnt; fod->offset = 0; - fod->next_sg = fod->data_sg; - fod->next_sg_offset = 0; if (fod->io_dir == NVMET_FCP_WRITE) { /* pull the data over before invoking nvmet layer */ @@ -2561,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_tgtport *tgtport = port->priv; unsigned long flags; + bool matched = false; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); if (tgtport->port == port) { - nvmet_fc_tgtport_put(tgtport); + matched = true; tgtport->port = NULL; } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (matched) + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1bb9d5b311b1..7b75d9de55ab 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -193,9 +193,6 @@ fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) -#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \ - NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) - static DEFINE_SPINLOCK(fcloop_lock); static LIST_HEAD(fcloop_lports); @@ -227,8 +224,6 @@ struct fcloop_nport { struct fcloop_lport *lport; struct list_head nport_list; struct kref ref; - struct completion rport_unreg_done; - struct completion tport_unreg_done; u64 node_name; u64 port_name; u32 port_role; @@ -579,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, tfcp_req->aborted = true; spin_unlock(&tfcp_req->reqlock); - tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; + tfcp_req->status = NVME_SC_INTERNAL; /* * nothing more to do. If io wasn't active, the transport should @@ -633,6 +628,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, schedule_work(&inireq->iniwork); } +static void +fcloop_nport_free(struct kref *ref) +{ + struct fcloop_nport *nport = + container_of(ref, struct fcloop_nport, ref); + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(nport); +} + +static void +fcloop_nport_put(struct fcloop_nport *nport) +{ + kref_put(&nport->ref, fcloop_nport_free); +} + +static int +fcloop_nport_get(struct fcloop_nport *nport) +{ + return kref_get_unless_zero(&nport->ref); +} + static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { @@ -647,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; - /* release any threads waiting for the unreg to complete */ - complete(&rport->nport->rport_unreg_done); + fcloop_nport_put(rport->nport); } static void @@ -656,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; - /* release any threads waiting for the unreg to complete */ - complete(&tport->nport->tport_unreg_done); + fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 @@ -725,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, goto out_free_opts; } + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = opts->wwnn; pinfo.port_name = opts->wwpn; pinfo.port_role = opts->roles; @@ -807,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, return ret ? ret : count; } -static void -fcloop_nport_free(struct kref *ref) -{ - struct fcloop_nport *nport = - container_of(ref, struct fcloop_nport, ref); - unsigned long flags; - - spin_lock_irqsave(&fcloop_lock, flags); - list_del(&nport->nport_list); - spin_unlock_irqrestore(&fcloop_lock, flags); - - kfree(nport); -} - -static void -fcloop_nport_put(struct fcloop_nport *nport) -{ - kref_put(&nport->ref, fcloop_nport_free); -} - -static int -fcloop_nport_get(struct fcloop_nport *nport) -{ - return kref_get_unless_zero(&nport->ref); -} - static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { @@ -941,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -EIO; + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = nport->node_name; pinfo.port_name = nport->port_name; pinfo.port_role = nport->port_role; @@ -982,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport) } static int -__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) +__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - int ret; - if (!rport) return -EALREADY; - init_completion(&nport->rport_unreg_done); - - ret = nvme_fc_unregister_remoteport(rport->remoteport); - if (ret) - return ret; - - wait_for_completion(&nport->rport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvme_fc_unregister_remoteport(rport->remoteport); } static ssize_t @@ -1032,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); return ret ? ret : count; } @@ -1089,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport) } static int -__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) +__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - int ret; - if (!tport) return -EALREADY; - init_completion(&nport->tport_unreg_done); - - ret = nvmet_fc_unregister_targetport(tport->targetport); - if (ret) - return ret; - - wait_for_completion(&nport->tport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvmet_fc_unregister_targetport(tport->targetport); } static ssize_t @@ -1139,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); return ret ? ret : count; } @@ -1226,11 +1197,11 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); if (ret) pr_warn("%s: Failed deleting target port\n", __func__); - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); if (ret) pr_warn("%s: Failed deleting remote port\n", __func__); diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 3b4d47a6abdb..0d4c23dc4532 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -68,7 +68,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) nvmet_inline_bio_init(req); bio = &req->inline_bio; - bio->bi_bdev = req->ns->bdev; + bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; @@ -80,7 +80,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) struct bio *prev = bio; bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); - bio->bi_bdev = req->ns->bdev; + bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio_set_op_attrs(bio, op, op_flags); @@ -104,7 +104,7 @@ static void nvmet_execute_flush(struct nvmet_req *req) nvmet_inline_bio_init(req); bio = &req->inline_bio; - bio->bi_bdev = req->ns->bdev; + bio_set_dev(bio, req->ns->bdev); bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 717ed7ddb2f6..92628c432926 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -375,6 +375,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); if (error) goto out_free_sq; + ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); if (IS_ERR(ctrl->ctrl.admin_q)) { diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index e3b244c7e443..7b8e20adf760 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -74,6 +74,7 @@ struct nvmet_sq { struct percpu_ref ref; u16 qid; u16 size; + u16 sqhd; struct completion free_done; struct completion confirm_done; }; @@ -115,6 +116,7 @@ struct nvmet_ctrl { u32 cc; u32 csts; + uuid_t hostid; u16 cntlid; u32 kato; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index de54c7f5048a..d12e5de78e70 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, /* Stop the user from writing */ if (pos >= nvmem->size) - return 0; + return -EFBIG; if (count < nvmem->word_size) return -EINVAL; @@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, return ERR_PTR(-EINVAL); nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); + of_node_put(nvmem_np); if (IS_ERR(nvmem)) return ERR_CAST(nvmem); diff --git a/drivers/of/address.c b/drivers/of/address.c index 580bbf6ca2b1..792722e7d458 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -274,10 +274,9 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, /* Now consume following elements while they are contiguous */ while (parser->range + parser->np <= parser->end) { - u32 flags, pci_space; + u32 flags; u64 pci_addr, cpu_addr, size; - pci_space = be32_to_cpup(parser->range); flags = of_bus_pci_get_flags(parser->range); pci_addr = of_read_number(parser->range + 1, ns); cpu_addr = of_translate_address(parser->node, @@ -559,7 +558,7 @@ static u64 __of_translate_address(struct device_node *dev, int na, ns, pna, pns; u64 result = OF_BAD_ADDR; - pr_debug("** translation for device %s **\n", of_node_full_name(dev)); + pr_debug("** translation for device %pOF **\n", dev); /* Increase refcount at current level */ of_node_get(dev); @@ -573,13 +572,13 @@ static u64 __of_translate_address(struct device_node *dev, /* Count address cells & copy address locally */ bus->count_cells(dev, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { - pr_debug("Bad cell count for %s\n", of_node_full_name(dev)); + pr_debug("Bad cell count for %pOF\n", dev); goto bail; } memcpy(addr, in_addr, na * 4); - pr_debug("bus is %s (na=%d, ns=%d) on %s\n", - bus->name, na, ns, of_node_full_name(parent)); + pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", + bus->name, na, ns, parent); of_dump_addr("translating address:", addr, na); /* Translate */ @@ -600,13 +599,12 @@ static u64 __of_translate_address(struct device_node *dev, pbus = of_match_bus(parent); pbus->count_cells(dev, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { - pr_err("Bad cell count for %s\n", - of_node_full_name(dev)); + pr_err("Bad cell count for %pOF\n", dev); break; } - pr_debug("parent bus is %s (na=%d, ns=%d) on %s\n", - pbus->name, pna, pns, of_node_full_name(parent)); + pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", + pbus->name, pna, pns, parent); /* Apply bus translation */ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) @@ -855,7 +853,7 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz } if (!ranges) { - pr_debug("no dma-ranges found for node(%s)\n", np->full_name); + pr_debug("no dma-ranges found for node(%pOF)\n", np); ret = -ENODEV; goto out; } @@ -872,8 +870,8 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz dmaaddr = of_read_number(ranges, naddr); *paddr = of_translate_dma_address(np, ranges); if (*paddr == OF_BAD_ADDR) { - pr_err("translation of DMA address(%pad) to CPU address failed node(%s)\n", - dma_addr, np->full_name); + pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n", + dma_addr, np); ret = -EINVAL; goto out; } diff --git a/drivers/of/base.c b/drivers/of/base.c index 686628d1dfa6..260d33c0f26c 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -60,14 +60,13 @@ DEFINE_RAW_SPINLOCK(devtree_lock); int of_n_addr_cells(struct device_node *np) { - const __be32 *ip; + u32 cells; do { if (np->parent) np = np->parent; - ip = of_get_property(np, "#address-cells", NULL); - if (ip) - return be32_to_cpup(ip); + if (!of_property_read_u32(np, "#address-cells", &cells)) + return cells; } while (np->parent); /* No #address-cells property for the root node */ return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; @@ -76,14 +75,13 @@ EXPORT_SYMBOL(of_n_addr_cells); int of_n_size_cells(struct device_node *np) { - const __be32 *ip; + u32 cells; do { if (np->parent) np = np->parent; - ip = of_get_property(np, "#size-cells", NULL); - if (ip) - return be32_to_cpup(ip); + if (!of_property_read_u32(np, "#size-cells", &cells)) + return cells; } while (np->parent); /* No #size-cells property for the root node */ return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; @@ -160,7 +158,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) pp->attr.read = of_node_property_read; rc = sysfs_create_bin_file(&np->kobj, &pp->attr); - WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name); + WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np); return rc; } @@ -1122,7 +1120,7 @@ EXPORT_SYMBOL(of_find_node_by_phandle); void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) { int i; - printk("%s %s", msg, of_node_full_name(args->np)); + printk("%s %pOF", msg, args->np); for (i = 0; i < args->args_count; i++) { const char delim = i ? ',' : ':'; @@ -1184,17 +1182,17 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it) if (it->cells_name) { if (!it->node) { - pr_err("%s: could not find phandle\n", - it->parent->full_name); + pr_err("%pOF: could not find phandle\n", + it->parent); goto err; } if (of_property_read_u32(it->node, it->cells_name, &count)) { - pr_err("%s: could not get %s for %s\n", - it->parent->full_name, + pr_err("%pOF: could not get %s for %pOF\n", + it->parent, it->cells_name, - it->node->full_name); + it->node); goto err; } } else { @@ -1206,8 +1204,8 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it) * property data length */ if (it->cur + count > it->list_end) { - pr_err("%s: arguments longer than property\n", - it->parent->full_name); + pr_err("%pOF: arguments longer than property\n", + it->parent); goto err; } } @@ -1639,8 +1637,8 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np, strncpy(ap->stem, stem, stem_len); ap->stem[stem_len] = 0; list_add_tail(&ap->link, &aliases_lookup); - pr_debug("adding DT alias:%s: stem=%s id=%i node=%s\n", - ap->alias, ap->stem, ap->id, of_node_full_name(np)); + pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", + ap->alias, ap->stem, ap->id, np); } /** @@ -1664,11 +1662,13 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) if (of_chosen) { /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ - const char *name = of_get_property(of_chosen, "stdout-path", NULL); - if (!name) - name = of_get_property(of_chosen, "linux,stdout-path", NULL); + const char *name = NULL; + + if (of_property_read_string(of_chosen, "stdout-path", &name)) + of_property_read_string(of_chosen, "linux,stdout-path", + &name); if (IS_ENABLED(CONFIG_PPC) && !name) - name = of_get_property(of_aliases, "stdout", NULL); + of_property_read_string(of_aliases, "stdout", &name); if (name) of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); } diff --git a/drivers/of/device.c b/drivers/of/device.c index e0a28ea341fe..64b710265d39 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -9,6 +9,9 @@ #include #include #include +#include +#include +#include #include #include "of_private.h" @@ -58,7 +61,7 @@ int of_device_add(struct platform_device *ofdev) /* name and id have to be set so that the platform bus doesn't get * confused on matching */ ofdev->name = dev_name(&ofdev->dev); - ofdev->id = -1; + ofdev->id = PLATFORM_DEVID_NONE; /* * If this device has not binding numa node in devicetree, that is @@ -84,31 +87,28 @@ int of_device_add(struct platform_device *ofdev) */ int of_dma_configure(struct device *dev, struct device_node *np) { - u64 dma_addr, paddr, size; + u64 dma_addr, paddr, size = 0; int ret; bool coherent; unsigned long offset; const struct iommu_ops *iommu; u64 mask; - /* - * Set default coherent_dma_mask to 32 bit. Drivers are expected to - * setup the correct supported mask. - */ - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); - - /* - * Set it to coherent_dma_mask by default if the architecture - * code has not set it. - */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - ret = of_dma_get_range(np, &dma_addr, &paddr, &size); if (ret < 0) { + /* + * For legacy reasons, we have to assume some devices need + * DMA configuration regardless of whether "dma-ranges" is + * correctly specified or not. + */ + if (!dev_is_pci(dev) && +#ifdef CONFIG_ARM_AMBA + dev->bus != &amba_bustype && +#endif + dev->bus != &platform_bus_type) + return ret == -ENODEV ? 0 : ret; + dma_addr = offset = 0; - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); } else { offset = PFN_DOWN(paddr - dma_addr); @@ -129,6 +129,22 @@ int of_dma_configure(struct device *dev, struct device_node *np) dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } + /* + * Set default coherent_dma_mask to 32 bit. Drivers are expected to + * setup the correct supported mask. + */ + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); + /* + * Set it to coherent_dma_mask by default if the architecture + * code has not set it. + */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + + if (!size) + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + dev->dma_pfn_offset = offset; /* @@ -196,8 +212,10 @@ EXPORT_SYMBOL(of_device_get_match_data); static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) { const char *compat; - int cplen, i; - ssize_t tsize, csize, repend; + char *c; + struct property *p; + ssize_t csize; + ssize_t tsize; if ((!dev) || (!dev->of_node)) return -ENODEV; @@ -205,42 +223,28 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len /* Name & Type */ csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name, dev->of_node->type); + tsize = csize; + len -= csize; + if (str) + str += csize; - /* Get compatible property if any */ - compat = of_get_property(dev->of_node, "compatible", &cplen); - if (!compat) - return csize; + of_property_for_each_string(dev->of_node, "compatible", p, compat) { + csize = strlen(compat) + 1; + tsize += csize; + if (csize > len) + continue; - /* Find true end (we tolerate multiple \0 at the end */ - for (i = (cplen - 1); i >= 0 && !compat[i]; i--) - cplen--; - if (!cplen) - return csize; - cplen++; - - /* Check space (need cplen+1 chars including final \0) */ - tsize = csize + cplen; - repend = tsize; - - if (csize >= len) /* @ the limit, all is already filled */ - return tsize; - - if (tsize >= len) { /* limit compat list */ - cplen = len - csize - 1; - repend = len; + csize = snprintf(str, len, "C%s", compat); + for (c = str; c; ) { + c = strchr(c, ' '); + if (c) + *c++ = '_'; + } + len -= csize; + str += csize; } - /* Copy and do char replacement */ - memcpy(&str[csize + 1], compat, cplen); - for (i = csize; i < repend; i++) { - char c = str[i]; - if (c == '\0') - str[i] = 'C'; - else if (c == ' ') - str[i] = '_'; - } - - return repend; + return tsize; } int of_device_request_module(struct device *dev) @@ -274,6 +278,8 @@ ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len) ssize_t sl = of_device_get_modalias(dev, str, len - 2); if (sl < 0) return sl; + if (sl > len - 2) + return -ENOMEM; str[sl++] = '\n'; str[sl] = 0; @@ -288,25 +294,22 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) { const char *compat; struct alias_prop *app; - int seen = 0, cplen, sl; + struct property *p; + int seen = 0; if ((!dev) || (!dev->of_node)) return; add_uevent_var(env, "OF_NAME=%s", dev->of_node->name); - add_uevent_var(env, "OF_FULLNAME=%s", dev->of_node->full_name); + add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); if (dev->of_node->type && strcmp("", dev->of_node->type) != 0) add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type); /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ - compat = of_get_property(dev->of_node, "compatible", &cplen); - while (compat && *compat && cplen > 0) { + of_property_for_each_string(dev->of_node, "compatible", p, compat) { add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); - sl = strlen(compat) + 1; - compat += sl; - cplen -= sl; seen++; } add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 0542cf8b6e3d..301b6db2b48d 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -98,14 +98,14 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) switch (action) { case OF_RECONFIG_ATTACH_NODE: case OF_RECONFIG_DETACH_NODE: - pr_debug("notify %-15s %s\n", action_names[action], - pr->dn->full_name); + pr_debug("notify %-15s %pOF\n", action_names[action], + pr->dn); break; case OF_RECONFIG_ADD_PROPERTY: case OF_RECONFIG_REMOVE_PROPERTY: case OF_RECONFIG_UPDATE_PROPERTY: - pr_debug("notify %-15s %s:%s\n", action_names[action], - pr->dn->full_name, pr->prop->name); + pr_debug("notify %-15s %pOF:%s\n", action_names[action], + pr->dn, pr->prop->name); break; } @@ -328,11 +328,10 @@ void of_node_release(struct kobject *kobj) /* We should never be releasing nodes that haven't been detached. */ if (!of_node_check_flag(node, OF_DETACHED)) { - pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); + pr_err("ERROR: Bad of_node_put() on %pOF\n", node); dump_stack(); return; } - if (!of_node_check_flag(node, OF_DYNAMIC)) return; @@ -462,13 +461,13 @@ static void __of_changeset_entry_dump(struct of_changeset_entry *ce) case OF_RECONFIG_ADD_PROPERTY: case OF_RECONFIG_REMOVE_PROPERTY: case OF_RECONFIG_UPDATE_PROPERTY: - pr_debug("cset<%p> %-15s %s/%s\n", ce, action_names[ce->action], - ce->np->full_name, ce->prop->name); + pr_debug("cset<%p> %-15s %pOF/%s\n", ce, action_names[ce->action], + ce->np, ce->prop->name); break; case OF_RECONFIG_ATTACH_NODE: case OF_RECONFIG_DETACH_NODE: - pr_debug("cset<%p> %-15s %s\n", ce, action_names[ce->action], - ce->np->full_name); + pr_debug("cset<%p> %-15s %pOF\n", ce, action_names[ce->action], + ce->np); break; } } @@ -539,7 +538,7 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve } if (ret) - pr_err("changeset notifier error @%s\n", ce->np->full_name); + pr_err("changeset notifier error @%pOF\n", ce->np); } static int __of_changeset_entry_apply(struct of_changeset_entry *ce) @@ -570,8 +569,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) ret = __of_add_property(ce->np, ce->prop); if (ret) { - pr_err("changeset: add_property failed @%s/%s\n", - ce->np->full_name, + pr_err("changeset: add_property failed @%pOF/%s\n", + ce->np, ce->prop->name); break; } @@ -579,8 +578,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); if (ret) { - pr_err("changeset: remove_property failed @%s/%s\n", - ce->np->full_name, + pr_err("changeset: remove_property failed @%pOF/%s\n", + ce->np, ce->prop->name); break; } @@ -598,8 +597,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) ret = __of_update_property(ce->np, ce->prop, &old_prop); if (ret) { - pr_err("changeset: update_property failed @%s/%s\n", - ce->np->full_name, + pr_err("changeset: update_property failed @%pOF/%s\n", + ce->np, ce->prop->name); break; } diff --git a/drivers/of/irq.c b/drivers/of/irq.c index ab21c846eb27..ec00ae7384c2 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -59,20 +59,19 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; - const __be32 *parp; + phandle parent; if (!of_node_get(child)) return NULL; do { - parp = of_get_property(child, "interrupt-parent", NULL); - if (parp == NULL) + if (of_property_read_u32(child, "interrupt-parent", &parent)) { p = of_get_parent(child); - else { + } else { if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) p = of_node_get(of_irq_dflt_pic); else - p = of_find_node_by_phandle(be32_to_cpup(parp)); + p = of_find_node_by_phandle(parent); } of_node_put(child); child = p; @@ -117,11 +116,8 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * is none, we are nice and just walk up the tree */ do { - tmp = of_get_property(ipar, "#interrupt-cells", NULL); - if (tmp != NULL) { - intsize = be32_to_cpu(*tmp); + if (!of_property_read_u32(ipar, "#interrupt-cells", &intsize)) break; - } tnode = ipar; ipar = of_irq_find_parent(ipar); of_node_put(tnode); @@ -131,7 +127,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) goto fail; } - pr_debug("of_irq_parse_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize); + pr_debug("of_irq_parse_raw: ipar=%pOF, size=%d\n", ipar, intsize); if (out_irq->args_count != intsize) goto fail; @@ -169,8 +165,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Now check if cursor is an interrupt-controller and if it is * then we are done */ - if (of_get_property(ipar, "interrupt-controller", NULL) != - NULL) { + if (of_property_read_bool(ipar, "interrupt-controller")) { pr_debug(" -> got it !\n"); return 0; } @@ -229,14 +224,14 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Get #interrupt-cells and #address-cells of new * parent */ - tmp = of_get_property(newpar, "#interrupt-cells", NULL); - if (tmp == NULL) { + if (of_property_read_u32(newpar, "#interrupt-cells", + &newintsize)) { pr_debug(" -> parent lacks #interrupt-cells!\n"); goto fail; } - newintsize = be32_to_cpu(*tmp); - tmp = of_get_property(newpar, "#address-cells", NULL); - newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); + if (of_property_read_u32(newpar, "#address-cells", + &newaddrsize)) + newaddrsize = 0; pr_debug(" -> newintsize=%d, newaddrsize=%d\n", newintsize, newaddrsize); @@ -269,7 +264,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) skiplevel: /* Iterate again with new parent */ out_irq->np = newpar; - pr_debug(" -> new parent: %s\n", of_node_full_name(newpar)); + pr_debug(" -> new parent: %pOF\n", newpar); of_node_put(ipar); ipar = newpar; newpar = NULL; @@ -297,11 +292,11 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw); int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq) { struct device_node *p; - const __be32 *intspec, *tmp, *addr; - u32 intsize, intlen; + const __be32 *addr; + u32 intsize; int i, res; - pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); + pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index); /* OldWorld mac stuff is "special", handle out of line */ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) @@ -316,42 +311,32 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar if (!res) return of_irq_parse_raw(addr, out_irq); - /* Get the interrupts property */ - intspec = of_get_property(device, "interrupts", &intlen); - if (intspec == NULL) - return -EINVAL; - - intlen /= sizeof(*intspec); - - pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); - /* Look for the interrupt parent. */ p = of_irq_find_parent(device); if (p == NULL) return -EINVAL; /* Get size of interrupt specifier */ - tmp = of_get_property(p, "#interrupt-cells", NULL); - if (tmp == NULL) { + if (of_property_read_u32(p, "#interrupt-cells", &intsize)) { res = -EINVAL; goto out; } - intsize = be32_to_cpu(*tmp); - pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); - - /* Check index */ - if ((index + 1) * intsize > intlen) { - res = -EINVAL; - goto out; - } + pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); /* Copy intspec into irq structure */ - intspec += index * intsize; out_irq->np = p; out_irq->args_count = intsize; - for (i = 0; i < intsize; i++) - out_irq->args[i] = be32_to_cpup(intspec++); + for (i = 0; i < intsize; i++) { + res = of_property_read_u32_index(device, "interrupts", + (index * intsize) + i, + out_irq->args + i); + if (res) + goto out; + } + + pr_debug(" intspec=%d\n", *out_irq->args); + /* Check if there are any interrupt-map translations to process */ res = of_irq_parse_raw(addr, out_irq); @@ -508,7 +493,7 @@ void __init of_irq_init(const struct of_device_id *matches) INIT_LIST_HEAD(&intc_parent_list); for_each_matching_node_and_match(np, matches, &match) { - if (!of_find_property(np, "interrupt-controller", NULL) || + if (!of_property_read_bool(np, "interrupt-controller") || !of_device_is_available(np)) continue; @@ -555,8 +540,8 @@ void __init of_irq_init(const struct of_device_id *matches) of_node_set_flag(desc->dev, OF_POPULATED); - pr_debug("of_irq_init: init %s (%p), parent %p\n", - desc->dev->full_name, + pr_debug("of_irq_init: init %pOF (%p), parent %p\n", + desc->dev, desc->dev, desc->interrupt_parent); ret = desc->irq_init_cb(desc->dev, desc->interrupt_parent); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index e0dbd6e48a98..d94dd8b77abd 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -166,8 +166,8 @@ static bool of_mdiobus_child_is_phy(struct device_node *child) if (of_match_node(whitelist_phys, child)) { pr_warn(FW_WARN - "%s: Whitelisted compatible string. Please remove\n", - child->full_name); + "%pOF: Whitelisted compatible string. Please remove\n", + child); return true; } @@ -421,19 +421,14 @@ int of_phy_register_fixed_link(struct device_node *np) { struct fixed_phy_status status = {}; struct device_node *fixed_link_node; - const __be32 *fixed_link_prop; - int link_gpio; - int len, err; - struct phy_device *phy; + u32 fixed_link_prop[5]; const char *managed; + int link_gpio = -1; - err = of_property_read_string(np, "managed", &managed); - if (err == 0) { - if (strcmp(managed, "in-band-status") == 0) { - /* status is zeroed, namely its .link member */ - phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return PTR_ERR_OR_ZERO(phy); - } + if (of_property_read_string(np, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) { + /* status is zeroed, namely its .link member */ + goto register_phy; } /* New binding */ @@ -456,23 +451,25 @@ int of_phy_register_fixed_link(struct device_node *np) if (link_gpio == -EPROBE_DEFER) return -EPROBE_DEFER; - phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np); - return PTR_ERR_OR_ZERO(phy); + goto register_phy; } /* Old binding */ - fixed_link_prop = of_get_property(np, "fixed-link", &len); - if (fixed_link_prop && len == (5 * sizeof(__be32))) { + if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop, + ARRAY_SIZE(fixed_link_prop)) == 0) { status.link = 1; - status.duplex = be32_to_cpu(fixed_link_prop[1]); - status.speed = be32_to_cpu(fixed_link_prop[2]); - status.pause = be32_to_cpu(fixed_link_prop[3]); - status.asym_pause = be32_to_cpu(fixed_link_prop[4]); - phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return PTR_ERR_OR_ZERO(phy); + status.duplex = fixed_link_prop[1]; + status.speed = fixed_link_prop[2]; + status.pause = fixed_link_prop[3]; + status.asym_pause = fixed_link_prop[4]; + goto register_phy; } return -ENODEV; + +register_phy: + return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, link_gpio, + np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index c9d4d3a7b0fe..e9ec931f5b9a 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -57,15 +57,14 @@ EXPORT_SYMBOL_GPL(of_pci_find_child_device); */ int of_pci_get_devfn(struct device_node *np) { - unsigned int size; - const __be32 *reg; + u32 reg[5]; + int error; - reg = of_get_property(np, "reg", &size); + error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg)); + if (error) + return error; - if (!reg || size < 5 * sizeof(__be32)) - return -EINVAL; - - return (be32_to_cpup(reg) >> 8) & 0xff; + return (reg[0] >> 8) & 0xff; } EXPORT_SYMBOL_GPL(of_pci_get_devfn); @@ -78,16 +77,17 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn); */ int of_pci_parse_bus_range(struct device_node *node, struct resource *res) { - const __be32 *values; - int len; + u32 bus_range[2]; + int error; - values = of_get_property(node, "bus-range", &len); - if (!values || len < sizeof(*values) * 2) - return -EINVAL; + error = of_property_read_u32_array(node, "bus-range", bus_range, + ARRAY_SIZE(bus_range)); + if (error) + return error; res->name = node->name; - res->start = be32_to_cpup(values++); - res->end = be32_to_cpup(values); + res->start = bus_range[0]; + res->end = bus_range[1]; res->flags = IORESOURCE_BUS; return 0; @@ -105,17 +105,14 @@ EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); */ int of_get_pci_domain_nr(struct device_node *node) { - const __be32 *value; - int len; - u16 domain; + u32 domain; + int error; - value = of_get_property(node, "linux,pci-domain", &len); - if (!value || len < sizeof(*value)) - return -EINVAL; + error = of_property_read_u32(node, "linux,pci-domain", &domain); + if (error) + return error; - domain = (u16)be32_to_cpup(value); - - return domain; + return (u16)domain; } EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); @@ -204,15 +201,15 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, if (!bus_range) return -ENOMEM; - pr_info("host bridge %s ranges:\n", dev->full_name); + pr_info("host bridge %pOF ranges:\n", dev); err = of_pci_parse_bus_range(dev, bus_range); if (err) { bus_range->start = busno; bus_range->end = bus_max; bus_range->flags = IORESOURCE_BUS; - pr_info(" No bus range found for %s, using %pR\n", - dev->full_name, bus_range); + pr_info(" No bus range found for %pOF, using %pR\n", + dev, bus_range); } else { if (bus_range->end > bus_range->start + bus_max) bus_range->end = bus_range->start + bus_max; @@ -258,14 +255,14 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, if (resource_type(res) == IORESOURCE_IO) { if (!io_base) { - pr_err("I/O range found for %s. Please provide an io_base pointer to save CPU base address\n", - dev->full_name); + pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n", + dev); err = -EINVAL; goto conversion_failed; } if (*io_base != (resource_size_t)OF_BAD_ADDR) - pr_warn("More than one I/O resource converted for %s. CPU base address for old range lost!\n", - dev->full_name); + pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", + dev); *io_base = range.cpu_addr; } @@ -325,7 +322,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid, } if (!map_len || map_len % (4 * sizeof(*map))) { - pr_err("%s: Error: Bad %s length: %d\n", np->full_name, + pr_err("%pOF: Error: Bad %s length: %d\n", np, map_name, map_len); return -EINVAL; } @@ -349,8 +346,8 @@ int of_pci_map_rid(struct device_node *np, u32 rid, u32 rid_len = be32_to_cpup(map + 3); if (rid_base & ~map_mask) { - pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", - np->full_name, map_name, map_name, + pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", + np, map_name, map_name, map_mask, rid_base); return -EFAULT; } @@ -375,14 +372,13 @@ int of_pci_map_rid(struct device_node *np, u32 rid, if (id_out) *id_out = masked_rid - rid_base + out_base; - pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", - np->full_name, map_name, map_mask, rid_base, out_base, + pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", + np, map_name, map_mask, rid_base, out_base, rid_len, rid, *id_out); return 0; } - pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n", - np->full_name, map_name, rid, - target && *target ? (*target)->full_name : "any target"); + pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n", + np, map_name, rid, target && *target ? *target : NULL); return -EFAULT; } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index c0e4ee1cd1ba..8ecfee31ab6d 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -35,6 +35,7 @@ struct of_overlay_info { struct device_node *target; struct device_node *overlay; + bool is_symbols_node; }; /** @@ -55,7 +56,8 @@ struct of_overlay { }; static int of_overlay_apply_one(struct of_overlay *ov, - struct device_node *target, const struct device_node *overlay); + struct device_node *target, const struct device_node *overlay, + bool is_symbols_node); static BLOCKING_NOTIFIER_HEAD(of_overlay_chain); @@ -92,10 +94,74 @@ static int of_overlay_notify(struct of_overlay *ov, return 0; } -static int of_overlay_apply_single_property(struct of_overlay *ov, - struct device_node *target, struct property *prop) +static struct property *dup_and_fixup_symbol_prop(struct of_overlay *ov, + const struct property *prop) { - struct property *propn, *tprop; + struct of_overlay_info *ovinfo; + struct property *new; + const char *overlay_name; + char *label_path; + char *symbol_path; + const char *target_path; + int k; + int label_path_len; + int overlay_name_len; + int target_path_len; + + if (!prop->value) + return NULL; + symbol_path = prop->value; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + for (k = 0; k < ov->count; k++) { + ovinfo = &ov->ovinfo_tab[k]; + overlay_name = ovinfo->overlay->full_name; + overlay_name_len = strlen(overlay_name); + if (!strncasecmp(symbol_path, overlay_name, overlay_name_len)) + break; + } + + if (k >= ov->count) + goto err_free; + + target_path = ovinfo->target->full_name; + target_path_len = strlen(target_path); + + label_path = symbol_path + overlay_name_len; + label_path_len = strlen(label_path); + + new->name = kstrdup(prop->name, GFP_KERNEL); + new->length = target_path_len + label_path_len + 1; + new->value = kzalloc(new->length, GFP_KERNEL); + + if (!new->name || !new->value) + goto err_free; + + strcpy(new->value, target_path); + strcpy(new->value + target_path_len, label_path); + + /* mark the property as dynamic */ + of_property_set_flag(new, OF_DYNAMIC); + + return new; + + err_free: + kfree(new->name); + kfree(new->value); + kfree(new); + return NULL; + + +} + +static int of_overlay_apply_single_property(struct of_overlay *ov, + struct device_node *target, struct property *prop, + bool is_symbols_node) +{ + struct property *propn = NULL, *tprop; /* NOTE: Multiple changes of single properties not supported */ tprop = of_find_property(target, prop->name, NULL); @@ -106,7 +172,15 @@ static int of_overlay_apply_single_property(struct of_overlay *ov, of_prop_cmp(prop->name, "linux,phandle") == 0) return 0; - propn = __of_prop_dup(prop, GFP_KERNEL); + if (is_symbols_node) { + /* changing a property in __symbols__ node not allowed */ + if (tprop) + return -EINVAL; + propn = dup_and_fixup_symbol_prop(ov, prop); + } else { + propn = __of_prop_dup(prop, GFP_KERNEL); + } + if (propn == NULL) return -ENOMEM; @@ -130,18 +204,21 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, return -ENOMEM; /* NOTE: Multiple mods of created nodes not supported */ - tchild = of_get_child_by_name(target, cname); + for_each_child_of_node(target, tchild) + if (!of_node_cmp(cname, kbasename(tchild->full_name))) + break; + if (tchild != NULL) { /* new overlay phandle value conflicts with existing value */ if (child->phandle) return -EINVAL; /* apply overlay recursively */ - ret = of_overlay_apply_one(ov, tchild, child); + ret = of_overlay_apply_one(ov, tchild, child, 0); of_node_put(tchild); } else { /* create empty tree as a target */ - tchild = __of_node_dup(child, "%s/%s", target->full_name, cname); + tchild = __of_node_dup(child, "%pOF/%s", target, cname); if (!tchild) return -ENOMEM; @@ -152,7 +229,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, if (ret) return ret; - ret = of_overlay_apply_one(ov, tchild, child); + ret = of_overlay_apply_one(ov, tchild, child, 0); if (ret) return ret; } @@ -168,26 +245,32 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, * by using the changeset. */ static int of_overlay_apply_one(struct of_overlay *ov, - struct device_node *target, const struct device_node *overlay) + struct device_node *target, const struct device_node *overlay, + bool is_symbols_node) { struct device_node *child; struct property *prop; int ret; for_each_property_of_node(overlay, prop) { - ret = of_overlay_apply_single_property(ov, target, prop); + ret = of_overlay_apply_single_property(ov, target, prop, + is_symbols_node); if (ret) { - pr_err("Failed to apply prop @%s/%s\n", - target->full_name, prop->name); + pr_err("Failed to apply prop @%pOF/%s\n", + target, prop->name); return ret; } } + /* do not allow symbols node to have any children */ + if (is_symbols_node) + return 0; + for_each_child_of_node(overlay, child) { ret = of_overlay_apply_single_device_node(ov, target, child); if (ret != 0) { - pr_err("Failed to apply single node @%s/%s\n", - target->full_name, child->name); + pr_err("Failed to apply single node @%pOF/%s\n", + target, child->name); of_node_put(child); return ret; } @@ -213,9 +296,10 @@ static int of_overlay_apply(struct of_overlay *ov) for (i = 0; i < ov->count; i++) { struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i]; - err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay); + err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay, + ovinfo->is_symbols_node); if (err != 0) { - pr_err("apply failed '%s'\n", ovinfo->target->full_name); + pr_err("apply failed '%pOF'\n", ovinfo->target); return err; } } @@ -311,6 +395,9 @@ static int of_build_overlay_info(struct of_overlay *ov, for_each_child_of_node(tree, node) cnt++; + if (of_get_child_by_name(tree, "__symbols__")) + cnt++; + ovinfo = kcalloc(cnt, sizeof(*ovinfo), GFP_KERNEL); if (ovinfo == NULL) return -ENOMEM; @@ -322,6 +409,20 @@ static int of_build_overlay_info(struct of_overlay *ov, cnt++; } + node = of_get_child_by_name(tree, "__symbols__"); + if (node) { + ovinfo[cnt].overlay = node; + ovinfo[cnt].target = of_find_node_by_path("/__symbols__"); + ovinfo[cnt].is_symbols_node = 1; + + if (!ovinfo[cnt].target) { + pr_err("no symbols in root of device tree.\n"); + return -EINVAL; + } + + cnt++; + } + /* if nothing filled, return error */ if (cnt == 0) { kfree(ovinfo); @@ -400,8 +501,8 @@ int of_overlay_create(struct device_node *tree) /* build the overlay info structures */ err = of_build_overlay_info(ov, tree); if (err) { - pr_err("of_build_overlay_info() failed for tree@%s\n", - tree->full_name); + pr_err("of_build_overlay_info() failed for tree@%pOF\n", + tree); goto err_free_idr; } @@ -480,9 +581,8 @@ static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn) /* check against each subtree affected by this overlay */ list_for_each_entry(ce, &ovt->cset.entries, node) { if (overlay_subtree_check(ce->np, dn)) { - pr_err("%s: #%d clashes #%d @%s\n", - __func__, ov->id, ovt->id, - dn->full_name); + pr_err("%s: #%d clashes #%d @%pOF\n", + __func__, ov->id, ovt->id, dn); return 0; } } diff --git a/drivers/of/platform.c b/drivers/of/platform.c index b19524623498..ac15d0e3d27d 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -118,7 +118,7 @@ struct platform_device *of_device_alloc(struct device_node *np, int rc, i, num_reg = 0, num_irq; struct resource *res, temp_res; - dev = platform_device_alloc("", -1); + dev = platform_device_alloc("", PLATFORM_DEVID_NONE); if (!dev) return NULL; @@ -228,7 +228,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node, const void *prop; int i, ret; - pr_debug("Creating amba device %s\n", node->full_name); + pr_debug("Creating amba device %pOF\n", node); if (!of_device_is_available(node) || of_node_test_and_set_flag(node, OF_POPULATED)) @@ -259,15 +259,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node, ret = of_address_to_resource(node, 0, &dev->res); if (ret) { - pr_err("amba: of_address_to_resource() failed (%d) for %s\n", - ret, node->full_name); + pr_err("amba: of_address_to_resource() failed (%d) for %pOF\n", + ret, node); goto err_free; } ret = amba_device_add(dev, &iomem_resource); if (ret) { - pr_err("amba_device_add() failed (%d) for %s\n", - ret, node->full_name); + pr_err("amba_device_add() failed (%d) for %pOF\n", + ret, node); goto err_free; } @@ -310,7 +310,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l if (!of_address_to_resource(np, 0, &res)) if (res.start != auxdata->phys_addr) continue; - pr_debug("%s: devname=%s\n", np->full_name, auxdata->name); + pr_debug("%pOF: devname=%s\n", np, auxdata->name); return auxdata; } @@ -323,7 +323,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l if (!of_device_is_compatible(np, auxdata->compatible)) continue; if (!auxdata->phys_addr && !auxdata->name) { - pr_debug("%s: compatible match\n", np->full_name); + pr_debug("%pOF: compatible match\n", np); return auxdata; } } @@ -356,14 +356,14 @@ static int of_platform_bus_create(struct device_node *bus, /* Make sure it has a compatible property */ if (strict && (!of_get_property(bus, "compatible", NULL))) { - pr_debug("%s() - skipping %s, no compatible prop\n", - __func__, bus->full_name); + pr_debug("%s() - skipping %pOF, no compatible prop\n", + __func__, bus); return 0; } if (of_node_check_flag(bus, OF_POPULATED_BUS)) { - pr_debug("%s() - skipping %s, already populated\n", - __func__, bus->full_name); + pr_debug("%s() - skipping %pOF, already populated\n", + __func__, bus); return 0; } @@ -387,7 +387,7 @@ static int of_platform_bus_create(struct device_node *bus, return 0; for_each_child_of_node(bus, child) { - pr_debug(" create child: %s\n", child->full_name); + pr_debug(" create child: %pOF\n", child); rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict); if (rc) { of_node_put(child); @@ -419,7 +419,7 @@ int of_platform_bus_probe(struct device_node *root, return -EINVAL; pr_debug("%s()\n", __func__); - pr_debug(" starting at: %s\n", root->full_name); + pr_debug(" starting at: %pOF\n", root); /* Do a self check of bus type, if there's a match, create children */ if (of_match_node(matches, root)) { @@ -471,7 +471,7 @@ int of_platform_populate(struct device_node *root, return -EINVAL; pr_debug("%s()\n", __func__); - pr_debug(" starting at: %s\n", root->full_name); + pr_debug(" starting at: %pOF\n", root); for_each_child_of_node(root, child) { rc = of_platform_bus_create(child, matches, lookup, parent, true); @@ -660,8 +660,8 @@ static int of_platform_notify(struct notifier_block *nb, of_dev_put(pdev_parent); if (pdev == NULL) { - pr_err("%s: failed to create for '%s'\n", - __func__, rd->dn->full_name); + pr_err("%s: failed to create for '%pOF'\n", + __func__, rd->dn); /* of_platform_device_create tosses the error code */ return notifier_from_errno(-EINVAL); } diff --git a/drivers/of/property.c b/drivers/of/property.c index 067f9fab7b77..fbb72116e9d4 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -55,8 +55,8 @@ int of_property_count_elems_of_size(const struct device_node *np, return -ENODATA; if (prop->length % elem_size != 0) { - pr_err("size of %s in node %s is not a multiple of %d\n", - propname, np->full_name, elem_size); + pr_err("size of %s in node %pOF is not a multiple of %d\n", + propname, np, elem_size); return -EINVAL; } @@ -537,8 +537,8 @@ int of_graph_parse_endpoint(const struct device_node *node, { struct device_node *port_node = of_get_parent(node); - WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n", - __func__, node->full_name); + WARN_ONCE(!port_node, "%s(): endpoint %pOF has no parent node\n", + __func__, node); memset(endpoint, 0, sizeof(*endpoint)); @@ -621,14 +621,13 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, of_node_put(node); if (!port) { - pr_err("graph: no port node found in %s\n", - parent->full_name); + pr_err("graph: no port node found in %pOF\n", parent); return NULL; } } else { port = of_get_parent(prev); - if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n", - __func__, prev->full_name)) + if (WARN_ONCE(!port, "%s(): endpoint %pOF has no parent node\n", + __func__, prev)) return NULL; } @@ -797,8 +796,8 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint); if (!endpoint_node) { - pr_debug("no valid endpoint (%d, %d) for node %s\n", - port, endpoint, node->full_name); + pr_debug("no valid endpoint (%d, %d) for node %pOF\n", + port, endpoint, node); return NULL; } @@ -828,23 +827,23 @@ static void of_fwnode_put(struct fwnode_handle *fwnode) of_node_put(to_of_node(fwnode)); } -static bool of_fwnode_device_is_available(struct fwnode_handle *fwnode) +static bool of_fwnode_device_is_available(const struct fwnode_handle *fwnode) { return of_device_is_available(to_of_node(fwnode)); } -static bool of_fwnode_property_present(struct fwnode_handle *fwnode, +static bool of_fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { return of_property_read_bool(to_of_node(fwnode), propname); } -static int of_fwnode_property_read_int_array(struct fwnode_handle *fwnode, +static int of_fwnode_property_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) { - struct device_node *node = to_of_node(fwnode); + const struct device_node *node = to_of_node(fwnode); if (!val) return of_property_count_elems_of_size(node, propname, @@ -864,24 +863,26 @@ static int of_fwnode_property_read_int_array(struct fwnode_handle *fwnode, return -ENXIO; } -static int of_fwnode_property_read_string_array(struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) +static int +of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval) { - struct device_node *node = to_of_node(fwnode); + const struct device_node *node = to_of_node(fwnode); return val ? of_property_read_string_array(node, propname, val, nval) : of_property_count_strings(node, propname); } -static struct fwnode_handle *of_fwnode_get_parent(struct fwnode_handle *fwnode) +static struct fwnode_handle * +of_fwnode_get_parent(const struct fwnode_handle *fwnode) { return of_fwnode_handle(of_get_parent(to_of_node(fwnode))); } static struct fwnode_handle * -of_fwnode_get_next_child_node(struct fwnode_handle *fwnode, +of_fwnode_get_next_child_node(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { return of_fwnode_handle(of_get_next_available_child(to_of_node(fwnode), @@ -889,10 +890,10 @@ of_fwnode_get_next_child_node(struct fwnode_handle *fwnode, } static struct fwnode_handle * -of_fwnode_get_named_child_node(struct fwnode_handle *fwnode, +of_fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname) { - struct device_node *node = to_of_node(fwnode); + const struct device_node *node = to_of_node(fwnode); struct device_node *child; for_each_available_child_of_node(node, child) @@ -902,8 +903,38 @@ of_fwnode_get_named_child_node(struct fwnode_handle *fwnode, return NULL; } +static int +of_fwnode_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + struct of_phandle_args of_args; + unsigned int i; + int ret; + + if (nargs_prop) + ret = of_parse_phandle_with_args(to_of_node(fwnode), prop, + nargs_prop, index, &of_args); + else + ret = of_parse_phandle_with_fixed_args(to_of_node(fwnode), prop, + nargs, index, &of_args); + if (ret < 0) + return ret; + if (!args) + return 0; + + args->nargs = of_args.args_count; + args->fwnode = of_fwnode_handle(of_args.np); + + for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++) + args->args[i] = i < of_args.args_count ? of_args.args[i] : 0; + + return 0; +} + static struct fwnode_handle * -of_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, +of_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return of_fwnode_handle(of_graph_get_next_endpoint(to_of_node(fwnode), @@ -911,10 +942,10 @@ of_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, } static struct fwnode_handle * -of_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +of_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) { - return of_fwnode_handle(of_parse_phandle(to_of_node(fwnode), - "remote-endpoint", 0)); + return of_fwnode_handle( + of_graph_get_remote_endpoint(to_of_node(fwnode))); } static struct fwnode_handle * @@ -934,10 +965,10 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) return of_fwnode_handle(of_get_next_parent(np)); } -static int of_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, +static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { - struct device_node *node = to_of_node(fwnode); + const struct device_node *node = to_of_node(fwnode); struct device_node *port_node = of_get_parent(node); endpoint->local_fwnode = fwnode; @@ -960,8 +991,10 @@ const struct fwnode_operations of_fwnode_ops = { .get_parent = of_fwnode_get_parent, .get_next_child_node = of_fwnode_get_next_child_node, .get_named_child_node = of_fwnode_get_named_child_node, + .get_reference_args = of_fwnode_get_reference_args, .graph_get_next_endpoint = of_fwnode_graph_get_next_endpoint, .graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint, .graph_get_port_parent = of_fwnode_graph_get_port_parent, .graph_parse_endpoint = of_fwnode_graph_parse_endpoint, }; +EXPORT_SYMBOL_GPL(of_fwnode_ops); diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile index 6e00a9c69e58..2d135fba94c1 100644 --- a/drivers/of/unittest-data/Makefile +++ b/drivers/of/unittest-data/Makefile @@ -1,18 +1,27 @@ obj-y += testcases.dtb.o -obj-y += overlay.dtb.o -obj-y += overlay_bad_phandle.dtb.o -obj-y += overlay_base.dtb.o targets += testcases.dtb testcases.dtb.S + +ifdef CONFIG_OF_OVERLAY + +obj-y += overlay.dtb.o +obj-y += overlay_bad_phandle.dtb.o +obj-y += overlay_bad_symbol.dtb.o +obj-y += overlay_base.dtb.o + targets += overlay.dtb overlay.dtb.S targets += overlay_bad_phandle.dtb overlay_bad_phandle.dtb.S +targets += overlay_bad_symbol.dtb overlay_bad_symbol.dtb.S targets += overlay_base.dtb overlay_base.dtb.S -.PRECIOUS: \ - $(obj)/%.dtb.S \ - $(obj)/%.dtb - # enable creation of __symbols__ node DTC_FLAGS_overlay := -@ DTC_FLAGS_overlay_bad_phandle := -@ +DTC_FLAGS_overlay_bad_symbol := -@ DTC_FLAGS_overlay_base := -@ + +endif + +.PRECIOUS: \ + $(obj)/%.dtb.S \ + $(obj)/%.dtb diff --git a/drivers/of/unittest-data/overlay.dts b/drivers/of/unittest-data/overlay.dts index 6cd7e6a0c13e..9e791fcf05dd 100644 --- a/drivers/of/unittest-data/overlay.dts +++ b/drivers/of/unittest-data/overlay.dts @@ -7,7 +7,7 @@ fragment@0 { target = <&electric_1>; __overlay__ { - status = "ok"; + status = "okay"; hvac_2: hvac-large-1 { compatible = "ot,hvac-large"; @@ -23,9 +23,24 @@ fragment@1 { __overlay__ { #address-cells = <1>; #size-cells = <1>; - status = "ok"; + status = "okay"; - ride@200 { + ride@100 { + #address-cells = <1>; + #size-cells = <1>; + + track@30 { + incline-up = < 48 32 16 >; + }; + + track@40 { + incline-up = < 47 31 15 >; + }; + }; + + ride_200: ride@200 { + #address-cells = <1>; + #size-cells = <1>; compatible = "ot,ferris-wheel"; reg = < 0x00000200 0x100 >; hvac-provider = < &hvac_2 >; @@ -36,6 +51,14 @@ ride@200 { spin-rph = < 30 >; gondolas = < 16 >; gondola-capacity = < 6 >; + + ride_200_left: track@10 { + reg = < 0x00000010 0x10 >; + }; + + ride_200_right: track@20 { + reg = < 0x00000020 0x10 >; + }; }; }; }; @@ -44,7 +67,7 @@ fragment@2 { target = <&lights_2>; __overlay__ { - status = "ok"; + status = "okay"; color = "purple", "white", "red", "green"; rate = < 3 256 >; }; diff --git a/drivers/of/unittest-data/overlay_bad_symbol.dts b/drivers/of/unittest-data/overlay_bad_symbol.dts new file mode 100644 index 000000000000..09261cb9a67e --- /dev/null +++ b/drivers/of/unittest-data/overlay_bad_symbol.dts @@ -0,0 +1,22 @@ +/dts-v1/; +/plugin/; + +/ { + + fragment@0 { + target = <&electric_1>; + + __overlay__ { + + // This label should cause an error when the overlay + // is applied. There is already a symbol hvac_1 + // in the base tree + hvac_1: hvac-medium-2 { + compatible = "ot,hvac-medium"; + heat-range = < 50 75 >; + cool-range = < 60 80 >; + }; + + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_base.dts b/drivers/of/unittest-data/overlay_base.dts index 5566b27fb61a..453d0bd83320 100644 --- a/drivers/of/unittest-data/overlay_base.dts +++ b/drivers/of/unittest-data/overlay_base.dts @@ -44,6 +44,8 @@ rides_1: fairway-1 { orientation = < 127 >; ride@100 { + #address-cells = <1>; + #size-cells = <1>; compatible = "ot,roller-coaster"; reg = < 0x00000100 0x100 >; hvac-provider = < &hvac_1 >; @@ -53,6 +55,15 @@ ride@100 { spin-controller = < &spin_ctrl_2 5 &spin_ctrl_2 7 >; spin-controller-names = "track_1", "track_2"; queues = < 2 >; + + track@30 { + reg = < 0x00000030 0x10 >; + }; + + track@40 { + reg = < 0x00000040 0x10 >; + }; + }; }; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0107fc680335..29a35cb1da64 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -46,46 +46,54 @@ static struct unittest_results { static void __init of_unittest_find_node_by_name(void) { struct device_node *np; - const char *options; + const char *options, *name; np = of_find_node_by_path("/testcase-data"); - unittest(np && !strcmp("/testcase-data", np->full_name), + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && !strcmp("/testcase-data", name), "find /testcase-data failed\n"); of_node_put(np); + kfree(name); /* Test if trailing '/' works */ np = of_find_node_by_path("/testcase-data/"); unittest(!np, "trailing '/' on /testcase-data/ should fail\n"); np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); - unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name), + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name), "find /testcase-data/phandle-tests/consumer-a failed\n"); of_node_put(np); + kfree(name); np = of_find_node_by_path("testcase-alias"); - unittest(np && !strcmp("/testcase-data", np->full_name), + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && !strcmp("/testcase-data", name), "find testcase-alias failed\n"); of_node_put(np); + kfree(name); /* Test if trailing '/' works on aliases */ np = of_find_node_by_path("testcase-alias/"); unittest(!np, "trailing '/' on testcase-alias/ should fail\n"); np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a"); - unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name), + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name), "find testcase-alias/phandle-tests/consumer-a failed\n"); of_node_put(np); + kfree(name); np = of_find_node_by_path("/testcase-data/missing-path"); - unittest(!np, "non-existent path returned node %s\n", np->full_name); + unittest(!np, "non-existent path returned node %pOF\n", np); of_node_put(np); np = of_find_node_by_path("missing-alias"); - unittest(!np, "non-existent alias returned node %s\n", np->full_name); + unittest(!np, "non-existent alias returned node %pOF\n", np); of_node_put(np); np = of_find_node_by_path("testcase-alias/missing-path"); - unittest(!np, "non-existent alias with relative path returned node %s\n", np->full_name); + unittest(!np, "non-existent alias with relative path returned node %pOF\n", np); of_node_put(np); np = of_find_node_opts_by_path("/testcase-data:testoption", &options); @@ -315,8 +323,8 @@ static void __init of_unittest_check_phandles(void) hash_for_each_possible(phandle_ht, nh, node, np->phandle) { if (nh->np->phandle == np->phandle) { - pr_info("Duplicate phandle! %i used by %s and %s\n", - np->phandle, nh->np->full_name, np->full_name); + pr_info("Duplicate phandle! %i used by %pOF and %pOF\n", + np->phandle, nh->np, np); dup_count++; break; } @@ -406,8 +414,8 @@ static void __init of_unittest_parse_phandle_with_args(void) passed = false; } - unittest(passed, "index %i - data error on node %s rc=%i\n", - i, args.np->full_name, rc); + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); } /* Check for missing list property */ @@ -590,7 +598,7 @@ static void __init of_unittest_changeset(void) /* Make sure node names are constructed correctly */ unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")), - "'%s' not added\n", n21->full_name); + "'%pOF' not added\n", n21); of_node_put(np); unittest(!of_changeset_revert(&chgset), "revert failed\n"); @@ -621,8 +629,8 @@ static void __init of_unittest_parse_interrupts(void) passed &= (args.args_count == 1); passed &= (args.args[0] == (i + 1)); - unittest(passed, "index %i - data error on node %s rc=%i\n", - i, args.np->full_name, rc); + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); } of_node_put(np); @@ -667,8 +675,8 @@ static void __init of_unittest_parse_interrupts(void) default: passed = false; } - unittest(passed, "index %i - data error on node %s rc=%i\n", - i, args.np->full_name, rc); + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); } of_node_put(np); } @@ -737,8 +745,8 @@ static void __init of_unittest_parse_interrupts_extended(void) passed = false; } - unittest(passed, "index %i - data error on node %s rc=%i\n", - i, args.np->full_name, rc); + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); } of_node_put(np); } @@ -917,8 +925,11 @@ static int attach_node_and_children(struct device_node *np) { struct device_node *next, *dup, *child; unsigned long flags; + const char *full_name; - dup = of_find_node_by_path(np->full_name); + full_name = kasprintf(GFP_KERNEL, "%pOF", np); + dup = of_find_node_by_path(full_name); + kfree(full_name); if (dup) { update_node_properties(np, dup); return 0; @@ -1023,7 +1034,7 @@ static int unittest_probe(struct platform_device *pdev) } - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); of_platform_populate(np, NULL, NULL, &pdev->dev); @@ -1035,7 +1046,7 @@ static int unittest_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); return 0; } @@ -1649,7 +1660,7 @@ static int unittest_i2c_bus_probe(struct platform_device *pdev) } - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL); if (!std) { @@ -1687,7 +1698,7 @@ static int unittest_i2c_bus_remove(struct platform_device *pdev) struct device_node *np = dev->of_node; struct unittest_i2c_bus_data *std = platform_get_drvdata(pdev); - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); i2c_del_adapter(&std->adap); return 0; @@ -1718,7 +1729,7 @@ static int unittest_i2c_dev_probe(struct i2c_client *client, return -EINVAL; } - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); return 0; }; @@ -1728,7 +1739,7 @@ static int unittest_i2c_dev_remove(struct i2c_client *client) struct device *dev = &client->dev; struct device_node *np = client->dev.of_node; - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); return 0; } @@ -1763,7 +1774,7 @@ static int unittest_i2c_mux_probe(struct i2c_client *client, struct i2c_mux_core *muxc; u32 reg, max_reg; - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); if (!np) { dev_err(dev, "No OF node\n"); @@ -1808,7 +1819,7 @@ static int unittest_i2c_mux_remove(struct i2c_client *client) struct device_node *np = client->dev.of_node; struct i2c_mux_core *muxc = i2c_get_clientdata(client); - dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); i2c_mux_del_adapters(muxc); return 0; } @@ -1983,6 +1994,8 @@ static void __init of_unittest_overlay(void) static inline void __init of_unittest_overlay(void) { } #endif +#ifdef CONFIG_OF_OVERLAY + /* * __dtb_ot_begin[] and __dtb_ot_end[] are created by cmd_dt_S_dtb * in scripts/Makefile.lib @@ -2010,14 +2023,14 @@ struct overlay_info { OVERLAY_INFO_EXTERN(overlay_base); OVERLAY_INFO_EXTERN(overlay); OVERLAY_INFO_EXTERN(overlay_bad_phandle); - -#ifdef CONFIG_OF_OVERLAY +OVERLAY_INFO_EXTERN(overlay_bad_symbol); /* order of entries is hard-coded into users of overlays[] */ static struct overlay_info overlays[] = { OVERLAY_INFO(overlay_base, -9999), OVERLAY_INFO(overlay, 0), OVERLAY_INFO(overlay_bad_phandle, -EINVAL), + OVERLAY_INFO(overlay_bad_symbol, -EINVAL), {} }; @@ -2289,6 +2302,10 @@ static __init void of_unittest_overlay_high_level(void) unittest(overlay_data_add(2), "Adding overlay 'overlay_bad_phandle' failed\n"); + + unittest(overlay_data_add(3), + "Adding overlay 'overlay_bad_symbol' failed\n"); + return; err_unlock: diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index d275aadc47ee..22ec82fcdea2 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -25,7 +25,7 @@ config PCI_DRA7XX work either as EP or RC. In order to enable host-specific features PCI_DRA7XX_HOST must be selected and in order to enable device- specific features PCI_DRA7XX_EP must be selected. This uses - the Designware core. + the DesignWare core. if PCI_DRA7XX @@ -97,8 +97,8 @@ config PCI_KEYSTONE select PCIE_DW_HOST help Say Y here if you want to enable PCI controller support on Keystone - SoCs. The PCI controller on Keystone is based on Designware hardware - and therefore the driver re-uses the Designware core functions to + SoCs. The PCI controller on Keystone is based on DesignWare hardware + and therefore the driver re-uses the DesignWare core functions to implement the driver. config PCI_LAYERSCAPE @@ -132,7 +132,7 @@ config PCIE_QCOM select PCIE_DW_HOST help Say Y here to enable PCIe controller support on Qualcomm SoCs. The - PCIe controller uses the Designware core plus Qualcomm-specific + PCIe controller uses the DesignWare core plus Qualcomm-specific hardware wrappers. config PCIE_ARMADA_8K @@ -145,8 +145,8 @@ config PCIE_ARMADA_8K help Say Y here if you want to enable PCIe controller support on Armada-8K SoCs. The PCIe controller on Armada-8K is based on - Designware hardware and therefore the driver re-uses the - Designware core functions to implement the driver. + DesignWare hardware and therefore the driver re-uses the + DesignWare core functions to implement the driver. config PCIE_ARTPEC6 bool "Axis ARTPEC-6 PCIe controller" diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index f2fc5f47064e..34427a6a15af 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -195,7 +195,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) dra7xx_pcie_enable_msi_interrupts(dra7xx); } -static void dra7xx_pcie_host_init(struct pcie_port *pp) +static int dra7xx_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); @@ -206,6 +206,8 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) dw_pcie_wait_for_link(pci); dw_pcie_msi_init(pp); dra7xx_pcie_enable_interrupts(dra7xx); + + return 0; } static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { @@ -238,7 +240,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) return -ENODEV; } - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, + dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, pp); if (!dra7xx->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); @@ -275,7 +277,6 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) return IRQ_HANDLED; } - static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) { struct dra7xx_pcie *dra7xx = arg; @@ -335,10 +336,23 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } +static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ + u32 reg; + + reg = PCI_BASE_ADDRESS_0 + (4 * bar); + dw_pcie_writel_dbi2(pci, reg, 0x0); + dw_pcie_writel_dbi(pci, reg, 0x0); +} + static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); dra7xx_pcie_enable_wrapper_interrupts(dra7xx); } @@ -435,7 +449,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, pp->irq = platform_get_irq(pdev, 1); if (pp->irq < 0) { dev_err(dev, "missing IRQ resource\n"); - return -EINVAL; + return pp->irq; } ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, @@ -616,8 +630,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "missing IRQ resource\n"); - return -EINVAL; + dev_err(dev, "missing IRQ resource: %d\n", irq); + return irq; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index c78c06552590..5596fdedbb94 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -581,13 +581,15 @@ static int exynos_pcie_link_up(struct dw_pcie *pci) return 0; } -static void exynos_pcie_host_init(struct pcie_port *pp) +static int exynos_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct exynos_pcie *ep = to_exynos_pcie(pci); exynos_pcie_establish_link(ep); exynos_pcie_enable_interrupts(ep); + + return 0; } static const struct dw_pcie_host_ops exynos_pcie_host_ops = { @@ -605,9 +607,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, int ret; pp->irq = platform_get_irq(pdev, 1); - if (!pp->irq) { + if (pp->irq < 0) { dev_err(dev, "failed to get irq\n"); - return -ENODEV; + return pp->irq; } ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, IRQF_SHARED, "exynos-pcie", ep); @@ -618,9 +620,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (!pp->msi_irq) { + if (pp->msi_irq < 0) { dev_err(dev, "failed to get msi irq\n"); - return -ENODEV; + return pp->msi_irq; } ret = devm_request_irq(dev, pp->msi_irq, diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index bf5c3616e344..b73483534a5b 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -636,7 +636,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) return ret; } -static void imx6_pcie_host_init(struct pcie_port *pp) +static int imx6_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); @@ -649,6 +649,8 @@ static void imx6_pcie_host_init(struct pcie_port *pp) if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); + + return 0; } static int imx6_pcie_link_up(struct dw_pcie *pci) @@ -778,14 +780,15 @@ static int imx6_pcie_probe(struct platform_device *pdev) } break; case IMX7D: - imx6_pcie->pciephy_reset = devm_reset_control_get(dev, - "pciephy"); + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, + "pciephy"); if (IS_ERR(imx6_pcie->pciephy_reset)) { dev_err(dev, "Failed to get PCIEPHY reset control\n"); return PTR_ERR(imx6_pcie->pciephy_reset); } - imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps"); + imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, + "apps"); if (IS_ERR(imx6_pcie->apps_reset)) { dev_err(dev, "Failed to get PCIE APPS reset control\n"); return PTR_ERR(imx6_pcie->apps_reset); diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c index 8bc626e640c8..2fb20b887d2a 100644 --- a/drivers/pci/dwc/pci-keystone-dw.c +++ b/drivers/pci/dwc/pci-keystone-dw.c @@ -1,5 +1,5 @@ /* - * Designware application register space functions for Keystone PCI controller + * DesignWare application register space functions for Keystone PCI controller * * Copyright (C) 2013-2014 Texas Instruments., Ltd. * http://www.ti.com @@ -168,16 +168,12 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) { - struct keystone_pcie *ks_pcie; struct msi_desc *msi; struct pcie_port *pp; - struct dw_pcie *pci; u32 offset; msi = irq_data_get_msi_desc(d); pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); - pci = to_dw_pcie_from_pp(pp); - ks_pcie = to_keystone_pcie(pci); offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); /* Mask the end point if PVM implemented */ @@ -191,16 +187,12 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) { - struct keystone_pcie *ks_pcie; struct msi_desc *msi; struct pcie_port *pp; - struct dw_pcie *pci; u32 offset; msi = irq_data_get_msi_desc(d); pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); - pci = to_dw_pcie_from_pp(pp); - ks_pcie = to_keystone_pcie(pci); offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); /* Mask the end point if PVM implemented */ @@ -259,7 +251,7 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) { int i; - for (i = 0; i < MAX_LEGACY_IRQS; i++) + for (i = 0; i < PCI_NUM_INTX; i++) ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); } @@ -565,7 +557,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, /* Create legacy IRQ domain */ ks_pcie->legacy_irq_domain = irq_domain_add_linear(ks_pcie->legacy_intc_np, - MAX_LEGACY_IRQS, + PCI_NUM_INTX, &ks_dw_pcie_legacy_irq_domain_ops, NULL); if (!ks_pcie->legacy_irq_domain) { diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 4783cec1f78d..5bee3af47588 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -32,10 +32,6 @@ #define DRIVER_NAME "keystone-pcie" -/* driver specific constants */ -#define MAX_MSI_HOST_IRQS 8 -#define MAX_LEGACY_HOST_IRQS 4 - /* DEV_STAT_CTRL */ #define PCIE_CAP_BASE 0x70 @@ -173,7 +169,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, if (legacy) { np_temp = &ks_pcie->legacy_intc_np; - max_host_irqs = MAX_LEGACY_HOST_IRQS; + max_host_irqs = PCI_NUM_INTX; host_irqs = &ks_pcie->legacy_host_irqs[0]; } else { np_temp = &ks_pcie->msi_intc_np; @@ -261,7 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } -static void __init ks_pcie_host_init(struct pcie_port *pp) +static int __init ks_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -289,6 +285,8 @@ static void __init ks_pcie_host_init(struct pcie_port *pp) */ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); + + return 0; } static const struct dw_pcie_host_ops keystone_pcie_host_ops = { diff --git a/drivers/pci/dwc/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h index 74c5825882df..30b7bc2ac380 100644 --- a/drivers/pci/dwc/pci-keystone.h +++ b/drivers/pci/dwc/pci-keystone.h @@ -12,9 +12,7 @@ * published by the Free Software Foundation. */ -#define MAX_LEGACY_IRQS 4 #define MAX_MSI_HOST_IRQS 8 -#define MAX_LEGACY_HOST_IRQS 4 struct keystone_pcie { struct dw_pcie *pci; @@ -22,7 +20,7 @@ struct keystone_pcie { /* PCI Device ID */ u32 device_id; int num_legacy_host_irqs; - int legacy_host_irqs[MAX_LEGACY_HOST_IRQS]; + int legacy_host_irqs[PCI_NUM_INTX]; struct device_node *legacy_intc_np; int num_msi_host_irqs; diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index fd861289ad8b..87fa486bee2c 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -33,7 +33,8 @@ /* PEX Internal Configuration Registers */ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ -#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ + +#define PCIE_IATU_NUM 6 struct ls_pcie_drvdata { u32 lut_offset; @@ -72,14 +73,6 @@ static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); } -/* Fix class value */ -static void ls_pcie_fix_class(struct ls_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - - iowrite16(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE); -} - /* Drop MSG TLP except for Vendor MSG */ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) { @@ -91,6 +84,14 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) iowrite32(val, pci->dbi_base + PCIE_STRFMR1); } +static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) +{ + int i; + + for (i = 0; i < PCIE_IATU_NUM; i++) + dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); +} + static int ls1021_pcie_link_up(struct dw_pcie *pci) { u32 state; @@ -108,33 +109,6 @@ static int ls1021_pcie_link_up(struct dw_pcie *pci) return 1; } -static void ls1021_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct ls_pcie *pcie = to_ls_pcie(pci); - struct device *dev = pci->dev; - u32 index[2]; - - pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, - "fsl,pcie-scfg"); - if (IS_ERR(pcie->scfg)) { - dev_err(dev, "No syscfg phandle specified\n"); - pcie->scfg = NULL; - return; - } - - if (of_property_read_u32_array(dev->of_node, - "fsl,pcie-scfg", index, 2)) { - pcie->scfg = NULL; - return; - } - pcie->index = index[1]; - - dw_pcie_setup_rc(pp); - - ls_pcie_drop_msg_tlp(pcie); -} - static int ls_pcie_link_up(struct dw_pcie *pci) { struct ls_pcie *pcie = to_ls_pcie(pci); @@ -150,16 +124,54 @@ static int ls_pcie_link_up(struct dw_pcie *pci) return 1; } -static void ls_pcie_host_init(struct pcie_port *pp) +static int ls_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); - iowrite32(1, pci->dbi_base + PCIE_DBI_RO_WR_EN); - ls_pcie_fix_class(pcie); + /* + * Disable outbound windows configured by the bootloader to avoid + * one transaction hitting multiple outbound windows. + * dw_pcie_setup_rc() will reconfigure the outbound windows. + */ + ls_pcie_disable_outbound_atus(pcie); + + dw_pcie_dbi_ro_wr_en(pci); ls_pcie_clear_multifunction(pcie); + dw_pcie_dbi_ro_wr_dis(pci); + ls_pcie_drop_msg_tlp(pcie); - iowrite32(0, pci->dbi_base + PCIE_DBI_RO_WR_EN); + + dw_pcie_setup_rc(pp); + + return 0; +} + +static int ls1021_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + struct device *dev = pci->dev; + u32 index[2]; + int ret; + + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, + "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + ret = PTR_ERR(pcie->scfg); + dev_err(dev, "No syscfg phandle specified\n"); + pcie->scfg = NULL; + return ret; + } + + if (of_property_read_u32_array(dev->of_node, + "fsl,pcie-scfg", index, 2)) { + pcie->scfg = NULL; + return -EINVAL; + } + pcie->index = index[1]; + + return ls_pcie_host_init(pp); } static int ls_pcie_msi_host_init(struct pcie_port *pp, @@ -232,12 +244,22 @@ static struct ls_pcie_drvdata ls2080_drvdata = { .dw_pcie_ops = &dw_ls_pcie_ops, }; +static struct ls_pcie_drvdata ls2088_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, + { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, { }, }; diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index ea8f34af6a85..370d057c0046 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -134,13 +134,15 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) dev_err(pci->dev, "Link not up after reconfiguration\n"); } -static void armada8k_pcie_host_init(struct pcie_port *pp) +static int armada8k_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct armada8k_pcie *pcie = to_armada8k_pcie(pci); dw_pcie_setup_rc(pp); armada8k_pcie_establish_link(pcie); + + return 0; } static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) @@ -176,9 +178,9 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); - if (!pp->irq) { + if (pp->irq < 0) { dev_err(dev, "failed to get irq for port\n"); - return -ENODEV; + return pp->irq; } ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, @@ -226,7 +228,9 @@ static int armada8k_pcie_probe(struct platform_device *pdev) if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); - clk_prepare_enable(pcie->clk); + ret = clk_prepare_enable(pcie->clk); + if (ret) + return ret; /* Get the dw-pcie unit configuration/control registers base. */ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 01c6f7823672..6653619db6a1 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -141,12 +141,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(100, 200); - /* - * Enable writing to config regs. This is required as the Synopsys - * driver changes the class code. That register needs DBI write enable. - */ - dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); - /* setup root complex */ dw_pcie_setup_rc(pp); @@ -175,13 +169,15 @@ static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) dw_pcie_msi_init(pp); } -static void artpec6_pcie_host_init(struct pcie_port *pp) +static int artpec6_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); artpec6_pcie_establish_link(artpec6_pcie); artpec6_pcie_enable_interrupts(artpec6_pcie); + + return 0; } static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { @@ -207,9 +203,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq <= 0) { + if (pp->msi_irq < 0) { dev_err(dev, "failed to get MSI irq\n"); - return -ENODEV; + return pp->msi_irq; } ret = devm_request_irq(dev, pp->msi_irq, diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c index 398406393f37..d53d5f168363 100644 --- a/drivers/pci/dwc/pcie-designware-ep.c +++ b/drivers/pci/dwc/pcie-designware-ep.c @@ -1,5 +1,5 @@ /** - * Synopsys Designware PCIe Endpoint controller driver + * Synopsys DesignWare PCIe Endpoint controller driver * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I @@ -283,7 +283,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) { int ret; void *addr; - enum pci_barno bar; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; @@ -312,9 +311,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return -ENOMEM; ep->outbound_addr = addr; - for (bar = BAR_0; bar <= BAR_5; bar++) - dw_pcie_ep_reset_bar(pci, bar); - if (ep->ops->ep_init) ep->ops->ep_init(ep); @@ -328,7 +324,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ret < 0) epc->max_functions = 1; - ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size); + ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, + ep->page_size); if (ret < 0) { dev_err(dev, "Failed to initialize address space\n"); return ret; diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index d29c020da082..81e2157a7cfb 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -1,5 +1,5 @@ /* - * Synopsys Designware PCIe host controller driver + * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com @@ -71,9 +71,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) while ((pos = find_next_bit((unsigned long *) &val, 32, pos)) != 32) { irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); + generic_handle_irq(irq); dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, 1 << pos); - generic_handle_irq(irq); pos++; } } @@ -401,8 +401,11 @@ int dw_pcie_host_init(struct pcie_port *pp) } } - if (pp->ops->host_init) - pp->ops->host_init(pp); + if (pp->ops->host_init) { + ret = pp->ops->host_init(pp); + if (ret) + goto error; + } pp->root_bus_nr = pp->busn->start; @@ -594,10 +597,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); /* setup interrupt pins */ + dw_pcie_dbi_ro_wr_en(pci); val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); + dw_pcie_dbi_ro_wr_dis(pci); /* setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); @@ -634,8 +639,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + /* Enable write permission for the DBI read-only register */ + dw_pcie_dbi_ro_wr_en(pci); /* program correct class for RC */ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + /* Better disable write permission right after the update */ + dw_pcie_dbi_ro_wr_dis(pci); dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); val |= PORT_LOGIC_SPEED_CHANGE; diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 091b4e7ad059..168e2380f493 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -35,7 +35,7 @@ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static void dw_plat_pcie_host_init(struct pcie_port *pp) +static int dw_plat_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -44,6 +44,8 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp) if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); + + return 0; } static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c index 0e03af279259..88abdddee2ad 100644 --- a/drivers/pci/dwc/pcie-designware.c +++ b/drivers/pci/dwc/pcie-designware.c @@ -1,5 +1,5 @@ /* - * Synopsys Designware PCIe host controller driver + * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com @@ -107,8 +107,9 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, dw_pcie_writel_dbi(pci, offset + reg, val); } -void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u32 size) +static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, + int type, u64 cpu_addr, + u64 pci_addr, u32 size) { u32 retries, val; @@ -177,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); - if (val == PCIE_ATU_ENABLE) + if (val & PCIE_ATU_ENABLE) return; usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); @@ -200,8 +201,9 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, dw_pcie_writel_dbi(pci, offset + reg, val); } -int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar, - u64 cpu_addr, enum dw_pcie_as_type as_type) +static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, + int bar, u64 cpu_addr, + enum dw_pcie_as_type as_type) { int type; u32 retries, val; diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index b4d2a89f8e58..e5d9d77b778e 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -1,5 +1,5 @@ /* - * Synopsys Designware PCIe host controller driver + * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com @@ -76,6 +76,9 @@ #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) #define PCIE_ATU_UPPER_TARGET 0x91C +#define PCIE_MISC_CONTROL_1_OFF 0x8BC +#define PCIE_DBI_RO_WR_EN (0x1 << 0) + /* * iATU Unroll-specific register definitions * From 4.80 core version the address translation will be made by unroll @@ -134,7 +137,7 @@ struct dw_pcie_host_ops { unsigned int devfn, int where, int size, u32 *val); int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); - void (*host_init)(struct pcie_port *pp); + int (*host_init)(struct pcie_port *pp); void (*msi_set_irq)(struct pcie_port *pp, int irq); void (*msi_clear_irq)(struct pcie_port *pp, int irq); phys_addr_t (*get_msi_addr)(struct pcie_port *pp); @@ -186,6 +189,7 @@ struct dw_pcie_ep { struct dw_pcie_ep_ops *ops; phys_addr_t phys_base; size_t addr_size; + size_t page_size; u8 bar_to_atu[6]; phys_addr_t *outbound_addr; unsigned long ib_window_map; @@ -279,6 +283,28 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); } +static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val |= PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + +static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val &= ~PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + #ifdef CONFIG_PCIE_DW_HOST irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); void dw_pcie_msi_init(struct pcie_port *pp); diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index e51acee0ddf3..a20179169e06 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c @@ -223,7 +223,7 @@ static int hisi_pcie_link_up(struct dw_pcie *pci) return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); } -static struct dw_pcie_host_ops hisi_pcie_host_ops = { +static const struct dw_pcie_host_ops hisi_pcie_host_ops = { .rd_own_conf = hisi_pcie_cfg_read, .wr_own_conf = hisi_pcie_cfg_write, }; @@ -268,7 +268,6 @@ static int hisi_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct hisi_pcie *hisi_pcie; struct resource *reg; - struct device_driver *driver; int ret; hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); @@ -282,8 +281,6 @@ static int hisi_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - driver = dev->driver; - hisi_pcie->pci = pci; hisi_pcie->soc_ops = of_device_get_match_data(dev); diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index 33fddb9f6739..dc3033cf3c19 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c @@ -430,9 +430,11 @@ static int kirin_pcie_establish_link(struct pcie_port *pp) return 0; } -static void kirin_pcie_host_init(struct pcie_port *pp) +static int kirin_pcie_host_init(struct pcie_port *pp) { kirin_pcie_establish_link(pp); + + return 0; } static struct dw_pcie_ops kirin_dw_pcie_ops = { @@ -441,7 +443,7 @@ static struct dw_pcie_ops kirin_dw_pcie_ops = { .link_up = kirin_pcie_link_up, }; -static struct dw_pcie_host_ops kirin_pcie_host_ops = { +static const struct dw_pcie_host_ops kirin_pcie_host_ops = { .rd_own_conf = kirin_pcie_rd_own_conf, .wr_own_conf = kirin_pcie_wr_own_conf, .host_init = kirin_pcie_host_init, diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 68c5f2ab5bc8..ce7ba5b7552a 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -37,6 +37,20 @@ #include "pcie-designware.h" #define PCIE20_PARF_SYS_CTRL 0x00 +#define MST_WAKEUP_EN BIT(13) +#define SLV_WAKEUP_EN BIT(12) +#define MSTR_ACLK_CGC_DIS BIT(10) +#define SLV_ACLK_CGC_DIS BIT(9) +#define CORE_CLK_CGC_DIS BIT(6) +#define AUX_PWR_DET BIT(4) +#define L23_CLK_RMV_DIS BIT(2) +#define L1_CLK_RMV_DIS BIT(1) + +#define PCIE20_COMMAND_STATUS 0x04 +#define CMD_BME_VAL 0x4 +#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 +#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 + #define PCIE20_PARF_PHY_CTRL 0x40 #define PCIE20_PARF_PHY_REFCLK 0x4C #define PCIE20_PARF_DBI_BASE_ADDR 0x168 @@ -58,10 +72,22 @@ #define CFG_BRIDGE_SB_INIT BIT(0) #define PCIE20_CAP 0x70 +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) +#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) +#define PCIE_CAP_LINK1_VAL 0x2FD7F + +#define PCIE20_PARF_Q2A_FLUSH 0x1AC + +#define PCIE20_MISC_CONTROL_1_REG 0x8BC +#define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 -struct qcom_pcie_resources_v0 { +#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define SLV_ADDR_SPACE_SZ 0x10000000 + +struct qcom_pcie_resources_2_1_0 { struct clk *iface_clk; struct clk *core_clk; struct clk *phy_clk; @@ -75,7 +101,7 @@ struct qcom_pcie_resources_v0 { struct regulator *vdda_refclk; }; -struct qcom_pcie_resources_v1 { +struct qcom_pcie_resources_1_0_0 { struct clk *iface; struct clk *aux; struct clk *master_bus; @@ -84,7 +110,7 @@ struct qcom_pcie_resources_v1 { struct regulator *vdda; }; -struct qcom_pcie_resources_v2 { +struct qcom_pcie_resources_2_3_2 { struct clk *aux_clk; struct clk *master_clk; struct clk *slave_clk; @@ -92,7 +118,7 @@ struct qcom_pcie_resources_v2 { struct clk *pipe_clk; }; -struct qcom_pcie_resources_v3 { +struct qcom_pcie_resources_2_4_0 { struct clk *aux_clk; struct clk *master_clk; struct clk *slave_clk; @@ -110,11 +136,21 @@ struct qcom_pcie_resources_v3 { struct reset_control *phy_ahb_reset; }; +struct qcom_pcie_resources_2_3_3 { + struct clk *iface; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct clk *ahb_clk; + struct clk *aux_clk; + struct reset_control *rst[7]; +}; + union qcom_pcie_resources { - struct qcom_pcie_resources_v0 v0; - struct qcom_pcie_resources_v1 v1; - struct qcom_pcie_resources_v2 v2; - struct qcom_pcie_resources_v3 v3; + struct qcom_pcie_resources_1_0_0 v1_0_0; + struct qcom_pcie_resources_2_1_0 v2_1_0; + struct qcom_pcie_resources_2_3_2 v2_3_2; + struct qcom_pcie_resources_2_3_3 v2_3_3; + struct qcom_pcie_resources_2_4_0 v2_4_0; }; struct qcom_pcie; @@ -124,6 +160,7 @@ struct qcom_pcie_ops { int (*init)(struct qcom_pcie *pcie); int (*post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); + void (*post_deinit)(struct qcom_pcie *pcie); void (*ltssm_enable)(struct qcom_pcie *pcie); }; @@ -141,13 +178,13 @@ struct qcom_pcie { static void qcom_ep_reset_assert(struct qcom_pcie *pcie) { - gpiod_set_value(pcie->reset, 1); + gpiod_set_value_cansleep(pcie->reset, 1); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { - gpiod_set_value(pcie->reset, 0); + gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } @@ -172,7 +209,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) return dw_pcie_wait_for_link(pci); } -static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) +static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -182,9 +219,9 @@ static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); } -static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; @@ -212,29 +249,29 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) if (IS_ERR(res->phy_clk)) return PTR_ERR(res->phy_clk); - res->pci_reset = devm_reset_control_get(dev, "pci"); + res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) return PTR_ERR(res->pci_reset); - res->axi_reset = devm_reset_control_get(dev, "axi"); + res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); if (IS_ERR(res->axi_reset)) return PTR_ERR(res->axi_reset); - res->ahb_reset = devm_reset_control_get(dev, "ahb"); + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); if (IS_ERR(res->ahb_reset)) return PTR_ERR(res->ahb_reset); - res->por_reset = devm_reset_control_get(dev, "por"); + res->por_reset = devm_reset_control_get_exclusive(dev, "por"); if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); - res->phy_reset = devm_reset_control_get(dev, "phy"); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } -static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); @@ -249,9 +286,9 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) regulator_disable(res->vdda_refclk); } -static int qcom_pcie_init_v0(struct qcom_pcie *pcie) +static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; u32 val; @@ -367,9 +404,9 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie) return ret; } -static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; @@ -393,13 +430,13 @@ static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) if (IS_ERR(res->slave_bus)) return PTR_ERR(res->slave_bus); - res->core = devm_reset_control_get(dev, "core"); + res->core = devm_reset_control_get_exclusive(dev, "core"); return PTR_ERR_OR_ZERO(res->core); } -static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; reset_control_assert(res->core); clk_disable_unprepare(res->slave_bus); @@ -409,9 +446,9 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) regulator_disable(res->vdda); } -static int qcom_pcie_init_v1(struct qcom_pcie *pcie) +static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; @@ -477,7 +514,7 @@ static int qcom_pcie_init_v1(struct qcom_pcie *pcie) return ret; } -static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) +static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -487,9 +524,9 @@ static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) writel(val, pcie->parf + PCIE20_PARF_LTSSM); } -static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; @@ -513,20 +550,26 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) return PTR_ERR_OR_ZERO(res->pipe_clk); } -static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - clk_disable_unprepare(res->pipe_clk); clk_disable_unprepare(res->slave_clk); clk_disable_unprepare(res->master_clk); clk_disable_unprepare(res->cfg_clk); clk_disable_unprepare(res->aux_clk); } -static int qcom_pcie_init_v2(struct qcom_pcie *pcie) +static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + + clk_disable_unprepare(res->pipe_clk); +} + +static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; u32 val; @@ -589,9 +632,9 @@ static int qcom_pcie_init_v2(struct qcom_pcie *pcie) return ret; } -static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) +static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; @@ -605,9 +648,9 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; @@ -623,60 +666,64 @@ static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) if (IS_ERR(res->slave_clk)) return PTR_ERR(res->slave_clk); - res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); + res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); if (IS_ERR(res->axi_m_reset)) return PTR_ERR(res->axi_m_reset); - res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); + res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); if (IS_ERR(res->axi_s_reset)) return PTR_ERR(res->axi_s_reset); - res->pipe_reset = devm_reset_control_get(dev, "pipe"); + res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); if (IS_ERR(res->pipe_reset)) return PTR_ERR(res->pipe_reset); - res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); + res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, + "axi_m_vmid"); if (IS_ERR(res->axi_m_vmid_reset)) return PTR_ERR(res->axi_m_vmid_reset); - res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); + res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, + "axi_s_xpu"); if (IS_ERR(res->axi_s_xpu_reset)) return PTR_ERR(res->axi_s_xpu_reset); - res->parf_reset = devm_reset_control_get(dev, "parf"); + res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); if (IS_ERR(res->parf_reset)) return PTR_ERR(res->parf_reset); - res->phy_reset = devm_reset_control_get(dev, "phy"); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); if (IS_ERR(res->phy_reset)) return PTR_ERR(res->phy_reset); - res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); + res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, + "axi_m_sticky"); if (IS_ERR(res->axi_m_sticky_reset)) return PTR_ERR(res->axi_m_sticky_reset); - res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); + res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, + "pipe_sticky"); if (IS_ERR(res->pipe_sticky_reset)) return PTR_ERR(res->pipe_sticky_reset); - res->pwr_reset = devm_reset_control_get(dev, "pwr"); + res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); if (IS_ERR(res->pwr_reset)) return PTR_ERR(res->pwr_reset); - res->ahb_reset = devm_reset_control_get(dev, "ahb"); + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); if (IS_ERR(res->ahb_reset)) return PTR_ERR(res->ahb_reset); - res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); + res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); if (IS_ERR(res->phy_ahb_reset)) return PTR_ERR(res->phy_ahb_reset); return 0; } -static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; reset_control_assert(res->axi_m_reset); reset_control_assert(res->axi_s_reset); @@ -692,9 +739,9 @@ static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) clk_disable_unprepare(res->slave_clk); } -static int qcom_pcie_init_v3(struct qcom_pcie *pcie) +static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; u32 val; @@ -884,6 +931,166 @@ static int qcom_pcie_init_v3(struct qcom_pcie *pcie) return ret; } +static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i; + const char *rst_names[] = { "axi_m", "axi_s", "pipe", + "axi_m_sticky", "sticky", + "ahb", "sleep", }; + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->axi_m_clk = devm_clk_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_clk)) + return PTR_ERR(res->axi_m_clk); + + res->axi_s_clk = devm_clk_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_clk)) + return PTR_ERR(res->axi_s_clk); + + res->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(res->ahb_clk)) + return PTR_ERR(res->ahb_clk); + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + for (i = 0; i < ARRAY_SIZE(rst_names); i++) { + res->rst[i] = devm_reset_control_get(dev, rst_names[i]); + if (IS_ERR(res->rst[i])) + return PTR_ERR(res->rst[i]); + } + + return 0; +} + +static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->axi_m_clk); + clk_disable_unprepare(res->axi_s_clk); + clk_disable_unprepare(res->ahb_clk); + clk_disable_unprepare(res->aux_clk); +} + +static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i, ret; + u32 val; + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_assert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); + return ret; + } + } + + usleep_range(2000, 2500); + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_deassert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d deassert failed (%d)\n", i, + ret); + return ret; + } + } + + /* + * Don't have a way to see if the reset has completed. + * Wait for some time. + */ + usleep_range(2000, 2500); + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_iface; + } + + ret = clk_prepare_enable(res->axi_m_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->axi_s_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable axi slave clock\n"); + goto err_clk_axi_s; + } + + ret = clk_prepare_enable(res->ahb_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ahb clock\n"); + goto err_clk_ahb; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + writel(SLV_ADDR_SPACE_SZ, + pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS + | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + + writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); + writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); + + val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + + writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + PCIE20_DEVICE_CONTROL2_STATUS2); + + return 0; + +err_clk_aux: + clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: + clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->iface); +err_clk_iface: + /* + * Not checking for failure, will anyway return + * the original failure in 'ret'. + */ + for (i = 0; i < ARRAY_SIZE(res->rst); i++) + reset_control_assert(res->rst[i]); + + return ret; +} + static int qcom_pcie_link_up(struct dw_pcie *pci) { u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); @@ -891,7 +1098,7 @@ static int qcom_pcie_link_up(struct dw_pcie *pci) return !!(val & PCI_EXP_LNKSTA_DLLLA); } -static void qcom_pcie_host_init(struct pcie_port *pp) +static int qcom_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); @@ -901,14 +1108,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp) ret = pcie->ops->init(pcie); if (ret) - goto err_deinit; + return ret; ret = phy_power_on(pcie->phy); if (ret) goto err_deinit; - if (pcie->ops->post_init) - pcie->ops->post_init(pcie); + if (pcie->ops->post_init) { + ret = pcie->ops->post_init(pcie); + if (ret) + goto err_disable_phy; + } dw_pcie_setup_rc(pp); @@ -921,12 +1131,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp) if (ret) goto err; - return; + return 0; err: qcom_ep_reset_assert(pcie); + if (pcie->ops->post_deinit) + pcie->ops->post_deinit(pcie); +err_disable_phy: phy_power_off(pcie->phy); err_deinit: pcie->ops->deinit(pcie); + + return ret; } static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, @@ -950,39 +1165,52 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .rd_own_conf = qcom_pcie_rd_own_conf, }; -static const struct qcom_pcie_ops ops_v0 = { - .get_resources = qcom_pcie_get_resources_v0, - .init = qcom_pcie_init_v0, - .deinit = qcom_pcie_deinit_v0, - .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, +/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ +static const struct qcom_pcie_ops ops_2_1_0 = { + .get_resources = qcom_pcie_get_resources_2_1_0, + .init = qcom_pcie_init_2_1_0, + .deinit = qcom_pcie_deinit_2_1_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; -static const struct qcom_pcie_ops ops_v1 = { - .get_resources = qcom_pcie_get_resources_v1, - .init = qcom_pcie_init_v1, - .deinit = qcom_pcie_deinit_v1, - .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, +/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ +static const struct qcom_pcie_ops ops_1_0_0 = { + .get_resources = qcom_pcie_get_resources_1_0_0, + .init = qcom_pcie_init_1_0_0, + .deinit = qcom_pcie_deinit_1_0_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; -static const struct qcom_pcie_ops ops_v2 = { - .get_resources = qcom_pcie_get_resources_v2, - .init = qcom_pcie_init_v2, - .post_init = qcom_pcie_post_init_v2, - .deinit = qcom_pcie_deinit_v2, - .ltssm_enable = qcom_pcie_v2_ltssm_enable, +/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ +static const struct qcom_pcie_ops ops_2_3_2 = { + .get_resources = qcom_pcie_get_resources_2_3_2, + .init = qcom_pcie_init_2_3_2, + .post_init = qcom_pcie_post_init_2_3_2, + .deinit = qcom_pcie_deinit_2_3_2, + .post_deinit = qcom_pcie_post_deinit_2_3_2, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ +static const struct qcom_pcie_ops ops_2_4_0 = { + .get_resources = qcom_pcie_get_resources_2_4_0, + .init = qcom_pcie_init_2_4_0, + .deinit = qcom_pcie_deinit_2_4_0, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ +static const struct qcom_pcie_ops ops_2_3_3 = { + .get_resources = qcom_pcie_get_resources_2_3_3, + .init = qcom_pcie_init_2_3_3, + .deinit = qcom_pcie_deinit_2_3_3, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, }; -static const struct qcom_pcie_ops ops_v3 = { - .get_resources = qcom_pcie_get_resources_v3, - .init = qcom_pcie_init_v3, - .deinit = qcom_pcie_deinit_v3, - .ltssm_enable = qcom_pcie_v2_ltssm_enable, -}; - static int qcom_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1069,11 +1297,12 @@ static int qcom_pcie_probe(struct platform_device *pdev) } static const struct of_device_id qcom_pcie_match[] = { - { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, - { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, - { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, - { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, - { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 }, + { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, + { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, + { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, { } }; diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 80897291e0fb..709189d23b31 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -177,13 +177,15 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci) return 0; } -static void spear13xx_pcie_host_init(struct pcie_port *pp) +static int spear13xx_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); spear13xx_pcie_establish_link(spear13xx_pcie); spear13xx_pcie_enable_interrupts(spear13xx_pcie); + + return 0; } static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { @@ -199,9 +201,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, int ret; pp->irq = platform_get_irq(pdev, 0); - if (!pp->irq) { + if (pp->irq < 0) { dev_err(dev, "failed to get irq\n"); - return -ENODEV; + return pp->irq; } ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 53fff8030337..f9308c2f22e6 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -54,6 +54,8 @@ static struct workqueue_struct *kpcitest_workqueue; struct pci_epf_test { void *reg[6]; struct pci_epf *epf; + enum pci_barno test_reg_bar; + bool linkup_notifier; struct delayed_work cmd_handler; }; @@ -74,7 +76,12 @@ static struct pci_epf_header test_header = { .interrupt_pin = PCI_INTERRUPT_INTA, }; -static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 }; +struct pci_epf_test_data { + enum pci_barno test_reg_bar; + bool linkup_notifier; +}; + +static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; static int pci_epf_test_copy(struct pci_epf_test *epf_test) { @@ -86,7 +93,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) struct pci_epf *epf = epf_test->epf; struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; - struct pci_epf_test_reg *reg = epf_test->reg[0]; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); if (!src_addr) { @@ -145,7 +153,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) struct pci_epf *epf = epf_test->epf; struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; - struct pci_epf_test_reg *reg = epf_test->reg[0]; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); if (!src_addr) { @@ -195,7 +204,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) struct pci_epf *epf = epf_test->epf; struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; - struct pci_epf_test_reg *reg = epf_test->reg[0]; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); if (!dst_addr) { @@ -241,17 +251,16 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) return ret; } -static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq) { - u8 irq; u8 msi_count; struct pci_epf *epf = epf_test->epf; struct pci_epc *epc = epf->epc; - struct pci_epf_test_reg *reg = epf_test->reg[0]; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; reg->status |= STATUS_IRQ_RAISED; msi_count = pci_epc_get_msi(epc); - irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); else @@ -263,54 +272,61 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) int ret; u8 irq; u8 msi_count; + u32 command; struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, cmd_handler.work); struct pci_epf *epf = epf_test->epf; struct pci_epc *epc = epf->epc; - struct pci_epf_test_reg *reg = epf_test->reg[0]; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; - if (!reg->command) + command = reg->command; + if (!command) goto reset_handler; - if (reg->command & COMMAND_RAISE_LEGACY_IRQ) { + reg->command = 0; + reg->status = 0; + + irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; + + if (command & COMMAND_RAISE_LEGACY_IRQ) { reg->status = STATUS_IRQ_RAISED; pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); goto reset_handler; } - if (reg->command & COMMAND_WRITE) { + if (command & COMMAND_WRITE) { ret = pci_epf_test_write(epf_test); if (ret) reg->status |= STATUS_WRITE_FAIL; else reg->status |= STATUS_WRITE_SUCCESS; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } - if (reg->command & COMMAND_READ) { + if (command & COMMAND_READ) { ret = pci_epf_test_read(epf_test); if (!ret) reg->status |= STATUS_READ_SUCCESS; else reg->status |= STATUS_READ_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } - if (reg->command & COMMAND_COPY) { + if (command & COMMAND_COPY) { ret = pci_epf_test_copy(epf_test); if (!ret) reg->status |= STATUS_COPY_SUCCESS; else reg->status |= STATUS_COPY_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } - if (reg->command & COMMAND_RAISE_MSI_IRQ) { + if (command & COMMAND_RAISE_MSI_IRQ) { msi_count = pci_epc_get_msi(epc); - irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) goto reset_handler; reg->status = STATUS_IRQ_RAISED; @@ -319,8 +335,6 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) } reset_handler: - reg->command = 0; - queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, msecs_to_jiffies(1)); } @@ -358,6 +372,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; struct pci_epf_test *epf_test = epf_get_drvdata(epf); + enum pci_barno test_reg_bar = epf_test->test_reg_bar; flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; if (sizeof(dma_addr_t) == 0x8) @@ -370,7 +385,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) if (ret) { pci_epf_free_space(epf, epf_test->reg[bar], bar); dev_err(dev, "failed to set BAR%d\n", bar); - if (bar == BAR_0) + if (bar == test_reg_bar) return ret; } } @@ -384,17 +399,20 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) struct device *dev = &epf->dev; void *base; int bar; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), - BAR_0); + test_reg_bar); if (!base) { dev_err(dev, "failed to allocated register space\n"); return -ENOMEM; } - epf_test->reg[0] = base; + epf_test->reg[test_reg_bar] = base; - for (bar = BAR_1; bar <= BAR_5; bar++) { - base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar); + for (bar = BAR_0; bar <= BAR_5; bar++) { + if (bar == test_reg_bar) + continue; + base = pci_epf_alloc_space(epf, bar_size[bar], bar); if (!base) dev_err(dev, "failed to allocate space for BAR%d\n", bar); @@ -407,6 +425,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) static int pci_epf_test_bind(struct pci_epf *epf) { int ret; + struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epf_header *header = epf->header; struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; @@ -432,13 +451,34 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) return ret; + if (!epf_test->linkup_notifier) + queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); + return 0; } +static const struct pci_epf_device_id pci_epf_test_ids[] = { + { + .name = "pci_epf_test", + }, + {}, +}; + static int pci_epf_test_probe(struct pci_epf *epf) { struct pci_epf_test *epf_test; struct device *dev = &epf->dev; + const struct pci_epf_device_id *match; + struct pci_epf_test_data *data; + enum pci_barno test_reg_bar = BAR_0; + bool linkup_notifier = true; + + match = pci_epf_match_device(pci_epf_test_ids, epf); + data = (struct pci_epf_test_data *)match->driver_data; + if (data) { + test_reg_bar = data->test_reg_bar; + linkup_notifier = data->linkup_notifier; + } epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); if (!epf_test) @@ -446,6 +486,8 @@ static int pci_epf_test_probe(struct pci_epf *epf) epf->header = &test_header; epf_test->epf = epf; + epf_test->test_reg_bar = test_reg_bar; + epf_test->linkup_notifier = linkup_notifier; INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); @@ -453,31 +495,15 @@ static int pci_epf_test_probe(struct pci_epf *epf) return 0; } -static int pci_epf_test_remove(struct pci_epf *epf) -{ - struct pci_epf_test *epf_test = epf_get_drvdata(epf); - - kfree(epf_test); - return 0; -} - static struct pci_epf_ops ops = { .unbind = pci_epf_test_unbind, .bind = pci_epf_test_bind, .linkup = pci_epf_test_linkup, }; -static const struct pci_epf_device_id pci_epf_test_ids[] = { - { - .name = "pci_epf_test", - }, - {}, -}; - static struct pci_epf_driver test_driver = { .driver.name = "pci_epf_test", .probe = pci_epf_test_probe, - .remove = pci_epf_test_remove, .id_table = pci_epf_test_ids, .ops = &ops, .owner = THIS_MODULE, diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index caa7be10e473..42c2a1156325 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -370,6 +371,7 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) { unsigned long flags; + struct device *dev = epc->dev.parent; if (epf->epc) return -EBUSY; @@ -381,8 +383,12 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) return -EINVAL; epf->epc = epc; - dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask); - epf->dev.dma_mask = epc->dev.dma_mask; + if (dev->of_node) { + of_dma_configure(&epf->dev, dev->of_node); + } else { + dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask); + epf->dev.dma_mask = epc->dev.dma_mask; + } spin_lock_irqsave(&epc->lock, flags); list_add_tail(&epf->list, &epc->pci_epf); @@ -500,6 +506,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask); epc->dev.class = pci_epc_class; epc->dev.dma_mask = dev->dma_mask; + epc->dev.parent = dev; epc->ops = ops; ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index 3a94cc1caf22..83b7d5d3fc3e 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -24,21 +24,54 @@ #include /** - * pci_epc_mem_init() - initialize the pci_epc_mem structure + * pci_epc_mem_get_order() - determine the allocation order of a memory size + * @mem: address space of the endpoint controller + * @size: the size for which to get the order + * + * Reimplement get_order() for mem->page_size since the generic get_order + * always gets order with a constant PAGE_SIZE. + */ +static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size) +{ + int order; + unsigned int page_shift = ilog2(mem->page_size); + + size--; + size >>= page_shift; +#if BITS_PER_LONG == 32 + order = fls(size); +#else + order = fls64(size); +#endif + return order; +} + +/** + * __pci_epc_mem_init() - initialize the pci_epc_mem structure * @epc: the EPC device that invoked pci_epc_mem_init * @phys_base: the physical address of the base * @size: the size of the address space + * @page_size: size of each page * * Invoke to initialize the pci_epc_mem structure used by the * endpoint functions to allocate mapped PCI address. */ -int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size) +int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, + size_t page_size) { int ret; struct pci_epc_mem *mem; unsigned long *bitmap; - int pages = size >> PAGE_SHIFT; - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); + unsigned int page_shift; + int pages; + int bitmap_size; + + if (page_size < PAGE_SIZE) + page_size = PAGE_SIZE; + + page_shift = ilog2(page_size); + pages = size >> page_shift; + bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { @@ -54,6 +87,7 @@ int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size) mem->bitmap = bitmap; mem->phys_base = phys_base; + mem->page_size = page_size; mem->pages = pages; mem->size = size; @@ -67,7 +101,7 @@ int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size) err: return ret; } -EXPORT_SYMBOL_GPL(pci_epc_mem_init); +EXPORT_SYMBOL_GPL(__pci_epc_mem_init); /** * pci_epc_mem_exit() - cleanup the pci_epc_mem structure @@ -101,13 +135,17 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, int pageno; void __iomem *virt_addr; struct pci_epc_mem *mem = epc->mem; - int order = get_order(size); + unsigned int page_shift = ilog2(mem->page_size); + int order; + + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno < 0) return NULL; - *phys_addr = mem->phys_base + (pageno << PAGE_SHIFT); + *phys_addr = mem->phys_base + (pageno << page_shift); virt_addr = ioremap(*phys_addr, size); if (!virt_addr) bitmap_release_region(mem->bitmap, pageno, order); @@ -129,11 +167,14 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size) { int pageno; - int order = get_order(size); struct pci_epc_mem *mem = epc->mem; + unsigned int page_shift = ilog2(mem->page_size); + int order; iounmap(virt_addr); - pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT; + pageno = (phys_addr - mem->phys_base) >> page_shift; + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); bitmap_release_region(mem->bitmap, pageno, order); } EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 6877d6a5bcc9..ae1611a62808 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -27,7 +27,7 @@ #include static struct bus_type pci_epf_bus_type; -static struct device_type pci_epf_type; +static const struct device_type pci_epf_type; /** * pci_epf_linkup() - Notify the function driver that EPC device has @@ -267,6 +267,22 @@ struct pci_epf *pci_epf_create(const char *name) } EXPORT_SYMBOL_GPL(pci_epf_create); +const struct pci_epf_device_id * +pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf) +{ + if (!id || !epf) + return NULL; + + while (*id->name) { + if (strcmp(epf->name, id->name) == 0) + return id; + id++; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(pci_epf_match_device); + static void pci_epf_dev_release(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); @@ -275,7 +291,7 @@ static void pci_epf_dev_release(struct device *dev) kfree(epf); } -static struct device_type pci_epf_type = { +static const struct device_type pci_epf_type = { .release = pci_epf_dev_release, }; @@ -317,11 +333,12 @@ static int pci_epf_device_probe(struct device *dev) static int pci_epf_device_remove(struct device *dev) { - int ret; + int ret = 0; struct pci_epf *epf = to_pci_epf(dev); struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); - ret = driver->remove(epf); + if (driver->remove) + ret = driver->remove(epf); epf->driver = NULL; return ret; diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 89d61c2cbfaa..b868803792d8 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -71,7 +71,7 @@ config PCI_HOST_GENERIC config PCIE_XILINX bool "Xilinx AXI PCIe host bridge support" - depends on ARCH_ZYNQ || MICROBLAZE + depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) help Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. @@ -182,14 +182,13 @@ config PCIE_ROCKCHIP config PCIE_MEDIATEK bool "MediaTek PCIe controller" - depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST) + depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST) depends on OF depends on PCI select PCIEPORTBUS help Say Y here if you want to enable PCIe controller support on - MT7623 series SoCs. There is one single root complex with 3 root - ports available. Each port supports Gen2 lane x1. + MediaTek SoCs. config PCIE_TANGO_SMP8759 bool "Tango SMP8759 PCIe controller (DANGEROUS)" diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 5fb9b620ac78..89f4e3d072d7 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -191,7 +191,6 @@ #define LINK_WAIT_USLEEP_MIN 90000 #define LINK_WAIT_USLEEP_MAX 100000 -#define LEGACY_IRQ_NUM 4 #define MSI_IRQ_NUM 32 struct advk_pcie { @@ -729,7 +728,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) irq_chip->irq_unmask = advk_pcie_irq_unmask; pcie->irq_domain = - irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM, + irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &advk_pcie_irq_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); @@ -786,7 +785,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) advk_pcie_handle_msi(pcie); /* Process legacy interrupts */ - for (i = 0; i < LEGACY_IRQ_NUM; i++) { + for (i = 0; i < PCI_NUM_INTX; i++) { if (!(status & PCIE_ISR0_INTX_ASSERT(i))) continue; diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c index 5162dffc102b..96028f01bc90 100644 --- a/drivers/pci/host/pci-ftpci100.c +++ b/drivers/pci/host/pci-ftpci100.c @@ -350,12 +350,12 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) /* All PCI IRQs cascade off this one */ irq = of_irq_get(intc, 0); - if (!irq) { + if (irq <= 0) { dev_err(p->dev, "failed to get parent IRQ\n"); - return -EINVAL; + return irq ?: -EINVAL; } - p->irqdomain = irq_domain_add_linear(intc, 4, + p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, &faraday_pci_irqdomain_ops, p); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 415dcc69a502..0fe3ea164ee5 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -562,52 +563,6 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); - -/* - * Temporary CPU to vCPU mapping to address transitioning - * vmbus_cpu_number_to_vp_number() being migrated to - * hv_cpu_number_to_vp_number() in a separate patch. Once that patch - * has been picked up in the main line, remove this code here and use - * the official code. - */ -static struct hv_tmpcpumap -{ - bool initialized; - u32 vp_index[NR_CPUS]; -} hv_tmpcpumap; - -static void hv_tmpcpumap_init_cpu(void *_unused) -{ - int cpu = smp_processor_id(); - u64 vp_index; - - hv_get_vp_index(vp_index); - - hv_tmpcpumap.vp_index[cpu] = vp_index; -} - -static void hv_tmpcpumap_init(void) -{ - if (hv_tmpcpumap.initialized) - return; - - memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); - on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); - hv_tmpcpumap.initialized = true; -} - -/** - * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr - * - * Remove once vmbus_cpu_number_to_vp_number() has been converted to - * hv_cpu_number_to_vp_number() and replace callers appropriately. - */ -static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) -{ - return hv_tmpcpumap.vp_index[cpu]; -} - - /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot @@ -971,7 +926,7 @@ static void hv_irq_unmask(struct irq_data *data) var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; for_each_cpu_and(cpu, dest, cpu_online_mask) { - cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); + cpu_vmbus = hv_cpu_number_to_vp_number(cpu); if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { dev_err(&hbus->hdev->device, @@ -986,7 +941,7 @@ static void hv_irq_unmask(struct irq_data *data) } else { for_each_cpu_and(cpu, dest, cpu_online_mask) { params->int_target.vp_mask |= - (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); + (1ULL << hv_cpu_number_to_vp_number(cpu)); } } @@ -1063,7 +1018,7 @@ static u32 hv_compose_msi_req_v2( */ cpu = cpumask_first_and(affinity, cpu_online_mask); int_pkt->int_desc.processor_array[0] = - hv_tmp_cpu_nr_to_vp_nr(cpu); + hv_cpu_number_to_vp_number(cpu); int_pkt->int_desc.processor_count = 1; return sizeof(*int_pkt); @@ -1159,7 +1114,12 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) goto free_int_desc; } - wait_for_completion(&comp.comp_pkt.host_event); + /* + * Since this function is called with IRQ locks held, can't + * do normal wait for completion; instead poll. + */ + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) + udelay(100); if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, @@ -2490,8 +2450,6 @@ static int hv_pci_probe(struct hv_device *hdev, return -ENOMEM; hbus->state = hv_pcibus_init; - hv_tmpcpumap_init(); - /* * The PCI bus "domain" is what is called "segment" in ACPI and * other specs. Pull it from the instance ID, to get something diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index f353a6eb2f01..8d88f19dc171 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -1054,8 +1054,8 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->pcie = pcie; if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { - dev_warn(dev, "ignoring %s, missing pcie-port property\n", - of_node_full_name(child)); + dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", + child); goto skip; } @@ -1106,8 +1106,8 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, } if (flags & OF_GPIO_ACTIVE_LOW) { - dev_info(dev, "%s: reset gpio is active low\n", - of_node_full_name(child)); + dev_info(dev, "%pOF: reset gpio is active low\n", + child); gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_LOW; } else { @@ -1186,8 +1186,7 @@ static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) */ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) { - if (port->reset_gpio) - gpiod_set_value_cansleep(port->reset_gpio, 1); + gpiod_set_value_cansleep(port->reset_gpio, 1); clk_disable_unprepare(port->clk); } diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index b3722b7709df..9c40da54f88a 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -1147,15 +1147,15 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; - pcie->pex_rst = devm_reset_control_get(dev, "pex"); + pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); if (IS_ERR(pcie->pex_rst)) return PTR_ERR(pcie->pex_rst); - pcie->afi_rst = devm_reset_control_get(dev, "afi"); + pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); if (IS_ERR(pcie->afi_rst)) return PTR_ERR(pcie->afi_rst); - pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x"); + pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); if (IS_ERR(pcie->pcie_xrst)) return PTR_ERR(pcie->pcie_xrst); @@ -1703,8 +1703,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) pcie->num_supplies = 2; if (pcie->num_supplies == 0) { - dev_err(dev, "device %s not supported in legacy mode\n", - np->full_name); + dev_err(dev, "device %pOF not supported in legacy mode\n", np); return -ENODEV; } diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index f1b633bce525..1f42a202b021 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -489,7 +489,7 @@ static int xgene_msi_probe(struct platform_device *pdev) if (virt_msir < 0) { dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", irq_index); - rc = -EINVAL; + rc = virt_msir; goto error; } xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index bd897479a215..087645116ecb 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -61,7 +61,7 @@ #define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) -#define ROOT_CAP_AND_CTRL 0x5C +#define XGENE_V1_PCI_EXP_CAP 0x40 /* PCIe IP version */ #define XGENE_PCIE_IP_VER_UNKN 0 @@ -160,7 +160,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) } static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, - int offset) + int offset) { if ((pci_is_root_bus(bus) && devfn != 0) || xgene_pcie_hide_rc_bars(bus, offset)) @@ -189,7 +189,7 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, * Avoid this by not claiming to support CRS. */ if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && - ((where & ~0x3) == ROOT_CAP_AND_CTRL)) + ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); if (size <= 2) @@ -265,12 +265,12 @@ static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) } struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { - .bus_shift = 16, - .init = xgene_v1_pcie_ecam_init, - .pci_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write, + .bus_shift = 16, + .init = xgene_v1_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, } }; @@ -280,12 +280,12 @@ static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) } struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { - .bus_shift = 16, - .init = xgene_v2_pcie_ecam_init, - .pci_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write, + .bus_shift = 16, + .init = xgene_v2_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, } }; #endif @@ -318,7 +318,7 @@ static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, } static void xgene_pcie_linkup(struct xgene_pcie_port *port, - u32 *lanes, u32 *speed) + u32 *lanes, u32 *speed) { u32 val32; @@ -593,8 +593,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port) xgene_pcie_writel(port, i, 0); } -static int xgene_pcie_setup(struct xgene_pcie_port *port, - struct list_head *res, +static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res, resource_size_t io_base) { struct device *dev = port->dev; @@ -706,9 +705,9 @@ static const struct of_device_id xgene_pcie_match_table[] = { static struct platform_driver xgene_pcie_driver = { .driver = { - .name = "xgene-pcie", - .of_match_table = of_match_ptr(xgene_pcie_match_table), - .suppress_bind_attrs = true, + .name = "xgene-pcie", + .of_match_table = of_match_ptr(xgene_pcie_match_table), + .suppress_bind_attrs = true, }, .probe = xgene_pcie_probe_bridge, }; diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c index 4e5d628e8cd4..d8141f4865de 100644 --- a/drivers/pci/host/pcie-altera-msi.c +++ b/drivers/pci/host/pcie-altera-msi.c @@ -64,13 +64,11 @@ static void altera_msi_isr(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); struct altera_msi *msi; unsigned long status; - u32 num_of_vectors; u32 bit; u32 virq; chained_irq_enter(chip, desc); msi = irq_desc_get_handler_data(desc); - num_of_vectors = msi->num_of_vectors; while ((status = msi_readl(msi, MSI_STATUS)) != 0) { for_each_set_bit(bit, &status, msi->num_of_vectors) { @@ -267,9 +265,9 @@ static int altera_msi_probe(struct platform_device *pdev) return ret; msi->irq = platform_get_irq(pdev, 0); - if (msi->irq <= 0) { + if (msi->irq < 0) { dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); - ret = -ENODEV; + ret = msi->irq; goto err; } diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 4ea4f8f5dc77..b468b8cccf8d 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -76,8 +76,6 @@ #define LINK_UP_TIMEOUT HZ #define LINK_RETRAIN_TIMEOUT HZ -#define INTX_NUM 4 - #define DWORD_MASK 3 struct altera_pcie { @@ -464,6 +462,7 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, static const struct irq_domain_ops intx_domain_ops = { .map = altera_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, }; static void altera_pcie_isr(struct irq_desc *desc) @@ -481,11 +480,11 @@ static void altera_pcie_isr(struct irq_desc *desc) while ((status = cra_readl(pcie, P2A_INT_STATUS) & P2A_INT_STS_ALL) != 0) { - for_each_set_bit(bit, &status, INTX_NUM) { + for_each_set_bit(bit, &status, PCI_NUM_INTX) { /* clear interrupts */ cra_writel(pcie, 1 << bit, P2A_INT_STATUS); - virq = irq_find_mapping(pcie->irq_domain, bit + 1); + virq = irq_find_mapping(pcie->irq_domain, bit); if (virq) generic_handle_irq(virq); else @@ -536,7 +535,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) struct device_node *node = dev->of_node; /* Setup INTx */ - pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1, + pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); @@ -559,9 +558,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq <= 0) { + if (pcie->irq < 0) { dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); - return -EINVAL; + return pcie->irq; } irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c index 9fad7915f82a..2d0f535a2f69 100644 --- a/drivers/pci/host/pcie-iproc-msi.c +++ b/drivers/pci/host/pcie-iproc-msi.c @@ -317,7 +317,6 @@ static void iproc_msi_handler(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); struct iproc_msi_grp *grp; struct iproc_msi *msi; - struct iproc_pcie *pcie; u32 eq, head, tail, nr_events; unsigned long hwirq; int virq; @@ -326,7 +325,6 @@ static void iproc_msi_handler(struct irq_desc *desc) grp = irq_desc_get_handler_data(desc); msi = grp->msi; - pcie = msi->pcie; eq = grp->eq; /* diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 22531190bc40..a5073a921a04 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -134,6 +134,13 @@ static int iproc_pcie_pltfm_remove(struct platform_device *pdev) return iproc_pcie_remove(pcie); } +static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev) +{ + struct iproc_pcie *pcie = platform_get_drvdata(pdev); + + iproc_pcie_shutdown(pcie); +} + static struct platform_driver iproc_pcie_pltfm_driver = { .driver = { .name = "iproc-pcie", @@ -141,6 +148,7 @@ static struct platform_driver iproc_pcie_pltfm_driver = { }, .probe = iproc_pcie_pltfm_probe, .remove = iproc_pcie_pltfm_remove, + .shutdown = iproc_pcie_pltfm_shutdown, }; module_platform_driver(iproc_pcie_pltfm_driver); diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index c57486348856..3a8b9d20ee57 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -31,68 +31,71 @@ #include "pcie-iproc.h" -#define EP_PERST_SOURCE_SELECT_SHIFT 2 -#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) -#define EP_MODE_SURVIVE_PERST_SHIFT 1 -#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) -#define RC_PCIE_RST_OUTPUT_SHIFT 0 -#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) -#define PAXC_RESET_MASK 0x7f +#define EP_PERST_SOURCE_SELECT_SHIFT 2 +#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) +#define EP_MODE_SURVIVE_PERST_SHIFT 1 +#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) +#define RC_PCIE_RST_OUTPUT_SHIFT 0 +#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) +#define PAXC_RESET_MASK 0x7f -#define GIC_V3_CFG_SHIFT 0 -#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) +#define GIC_V3_CFG_SHIFT 0 +#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) -#define MSI_ENABLE_CFG_SHIFT 0 -#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) +#define MSI_ENABLE_CFG_SHIFT 0 +#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) -#define CFG_IND_ADDR_MASK 0x00001ffc +#define CFG_IND_ADDR_MASK 0x00001ffc -#define CFG_ADDR_BUS_NUM_SHIFT 20 -#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 -#define CFG_ADDR_DEV_NUM_SHIFT 15 -#define CFG_ADDR_DEV_NUM_MASK 0x000f8000 -#define CFG_ADDR_FUNC_NUM_SHIFT 12 -#define CFG_ADDR_FUNC_NUM_MASK 0x00007000 -#define CFG_ADDR_REG_NUM_SHIFT 2 -#define CFG_ADDR_REG_NUM_MASK 0x00000ffc -#define CFG_ADDR_CFG_TYPE_SHIFT 0 -#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 +#define CFG_ADDR_BUS_NUM_SHIFT 20 +#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 +#define CFG_ADDR_DEV_NUM_SHIFT 15 +#define CFG_ADDR_DEV_NUM_MASK 0x000f8000 +#define CFG_ADDR_FUNC_NUM_SHIFT 12 +#define CFG_ADDR_FUNC_NUM_MASK 0x00007000 +#define CFG_ADDR_REG_NUM_SHIFT 2 +#define CFG_ADDR_REG_NUM_MASK 0x00000ffc +#define CFG_ADDR_CFG_TYPE_SHIFT 0 +#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 -#define SYS_RC_INTX_MASK 0xf +#define SYS_RC_INTX_MASK 0xf -#define PCIE_PHYLINKUP_SHIFT 3 -#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) -#define PCIE_DL_ACTIVE_SHIFT 2 -#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) +#define PCIE_PHYLINKUP_SHIFT 3 +#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) +#define PCIE_DL_ACTIVE_SHIFT 2 +#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) -#define APB_ERR_EN_SHIFT 0 -#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) +#define APB_ERR_EN_SHIFT 0 +#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) + +#define CFG_RETRY_STATUS 0xffff0001 +#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ /* derive the enum index of the outbound/inbound mapping registers */ -#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) +#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) /* * Maximum number of outbound mapping window sizes that can be supported by any * OARR/OMAP mapping pair */ -#define MAX_NUM_OB_WINDOW_SIZES 4 +#define MAX_NUM_OB_WINDOW_SIZES 4 -#define OARR_VALID_SHIFT 0 -#define OARR_VALID BIT(OARR_VALID_SHIFT) -#define OARR_SIZE_CFG_SHIFT 1 +#define OARR_VALID_SHIFT 0 +#define OARR_VALID BIT(OARR_VALID_SHIFT) +#define OARR_SIZE_CFG_SHIFT 1 /* * Maximum number of inbound mapping region sizes that can be supported by an * IARR */ -#define MAX_NUM_IB_REGION_SIZES 9 +#define MAX_NUM_IB_REGION_SIZES 9 -#define IMAP_VALID_SHIFT 0 -#define IMAP_VALID BIT(IMAP_VALID_SHIFT) +#define IMAP_VALID_SHIFT 0 +#define IMAP_VALID BIT(IMAP_VALID_SHIFT) -#define PCI_EXP_CAP 0xac +#define IPROC_PCI_EXP_CAP 0xac -#define IPROC_PCIE_REG_INVALID 0xffff +#define IPROC_PCIE_REG_INVALID 0xffff /** * iProc PCIe outbound mapping controller specific parameters @@ -304,80 +307,80 @@ enum iproc_pcie_reg { /* iProc PCIe PAXB BCMA registers */ static const u16 iproc_pcie_reg_paxb_bcma[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, }; /* iProc PCIe PAXB registers */ static const u16 iproc_pcie_reg_paxb[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_OARR0] = 0xd20, - [IPROC_PCIE_OMAP0] = 0xd40, - [IPROC_PCIE_OARR1] = 0xd28, - [IPROC_PCIE_OMAP1] = 0xd48, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, - [IPROC_PCIE_APB_ERR_EN] = 0xf40, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; /* iProc PCIe PAXB v2 registers */ static const u16 iproc_pcie_reg_paxb_v2[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_OARR0] = 0xd20, - [IPROC_PCIE_OMAP0] = 0xd40, - [IPROC_PCIE_OARR1] = 0xd28, - [IPROC_PCIE_OMAP1] = 0xd48, - [IPROC_PCIE_OARR2] = 0xd60, - [IPROC_PCIE_OMAP2] = 0xd68, - [IPROC_PCIE_OARR3] = 0xdf0, - [IPROC_PCIE_OMAP3] = 0xdf8, - [IPROC_PCIE_IARR0] = 0xd00, - [IPROC_PCIE_IMAP0] = 0xc00, - [IPROC_PCIE_IARR2] = 0xd10, - [IPROC_PCIE_IMAP2] = 0xcc0, - [IPROC_PCIE_IARR3] = 0xe00, - [IPROC_PCIE_IMAP3] = 0xe08, - [IPROC_PCIE_IARR4] = 0xe68, - [IPROC_PCIE_IMAP4] = 0xe70, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, - [IPROC_PCIE_APB_ERR_EN] = 0xf40, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_OARR2] = 0xd60, + [IPROC_PCIE_OMAP2] = 0xd68, + [IPROC_PCIE_OARR3] = 0xdf0, + [IPROC_PCIE_OMAP3] = 0xdf8, + [IPROC_PCIE_IARR0] = 0xd00, + [IPROC_PCIE_IMAP0] = 0xc00, + [IPROC_PCIE_IARR2] = 0xd10, + [IPROC_PCIE_IMAP2] = 0xcc0, + [IPROC_PCIE_IARR3] = 0xe00, + [IPROC_PCIE_IMAP3] = 0xe08, + [IPROC_PCIE_IARR4] = 0xe68, + [IPROC_PCIE_IMAP4] = 0xe70, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; /* iProc PCIe PAXC v1 registers */ static const u16 iproc_pcie_reg_paxc[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, - [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, }; /* iProc PCIe PAXC v2 registers */ static const u16 iproc_pcie_reg_paxc_v2[] = { - [IPROC_PCIE_MSI_GIC_MODE] = 0x050, - [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, - [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, - [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, - [IPROC_PCIE_MSI_ADDR_HI] = 0x080, - [IPROC_PCIE_MSI_EN_CFG] = 0x09c, - [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, - [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_MSI_GIC_MODE] = 0x050, + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, + [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, + [IPROC_PCIE_MSI_ADDR_HI] = 0x080, + [IPROC_PCIE_MSI_EN_CFG] = 0x09c, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, }; static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) @@ -448,18 +451,112 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, } } +static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, + unsigned int busno, + unsigned int slot, + unsigned int fn, + int where) +{ + u16 offset; + u32 val; + + /* EP device access */ + val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | + (slot << CFG_ADDR_DEV_NUM_SHIFT) | + (fn << CFG_ADDR_FUNC_NUM_SHIFT) | + (where & CFG_ADDR_REG_NUM_MASK) | + (1 & CFG_ADDR_CFG_TYPE_MASK); + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); + + if (iproc_pcie_reg_is_invalid(offset)) + return NULL; + + return (pcie->base + offset); +} + +static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) +{ + int timeout = CFG_RETRY_STATUS_TIMEOUT_US; + unsigned int data; + + /* + * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only + * affects config reads of the Vendor ID. For config writes or any + * other config reads, the Root may automatically reissue the + * configuration request again as a new request. + * + * For config reads, this hardware returns CFG_RETRY_STATUS data + * when it receives a CRS completion, regardless of the address of + * the read or the CRS Software Visibility Enable bit. As a + * partial workaround for this, we retry in software any read that + * returns CFG_RETRY_STATUS. + * + * Note that a non-Vendor ID config register may have a value of + * CFG_RETRY_STATUS. If we read that, we can't distinguish it from + * a CRS completion, so we will incorrectly retry the read and + * eventually return the wrong data (0xffffffff). + */ + data = readl(cfg_data_p); + while (data == CFG_RETRY_STATUS && timeout--) { + udelay(1); + data = readl(cfg_data_p); + } + + if (data == CFG_RETRY_STATUS) + data = 0xffffffff; + + return data; +} + +static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct iproc_pcie *pcie = iproc_data(bus); + unsigned int slot = PCI_SLOT(devfn); + unsigned int fn = PCI_FUNC(devfn); + unsigned int busno = bus->number; + void __iomem *cfg_data_p; + unsigned int data; + int ret; + + /* root complex access */ + if (busno == 0) { + ret = pci_generic_config_read32(bus, devfn, where, size, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + /* Don't advertise CRS SV support */ + if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + return PCIBIOS_SUCCESSFUL; + } + + cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); + + if (!cfg_data_p) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = iproc_pcie_cfg_retry(cfg_data_p); + + *val = data; + if (size <= 2) + *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + /** * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c */ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, - int busno, - unsigned int devfn, + int busno, unsigned int devfn, int where) { unsigned slot = PCI_SLOT(devfn); unsigned fn = PCI_FUNC(devfn); - u32 val; u16 offset; /* root complex access */ @@ -484,18 +581,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, if (slot > 0) return NULL; - /* EP device access */ - val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | - (slot << CFG_ADDR_DEV_NUM_SHIFT) | - (fn << CFG_ADDR_FUNC_NUM_SHIFT) | - (where & CFG_ADDR_REG_NUM_MASK) | - (1 & CFG_ADDR_CFG_TYPE_MASK); - iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); - offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); - if (iproc_pcie_reg_is_invalid(offset)) - return NULL; - else - return (pcie->base + offset); + return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); } static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, @@ -554,9 +640,13 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { int ret; + struct iproc_pcie *pcie = iproc_data(bus); iproc_pcie_apb_err_disable(bus, true); - ret = pci_generic_config_read32(bus, devfn, where, size, val); + if (pcie->type == IPROC_PCIE_PAXB_V2) + ret = iproc_pcie_config_read(bus, devfn, where, size, val); + else + ret = pci_generic_config_read32(bus, devfn, where, size, val); iproc_pcie_apb_err_disable(bus, false); return ret; @@ -580,7 +670,7 @@ static struct pci_ops iproc_pcie_ops = { .write = iproc_pcie_config_write32, }; -static void iproc_pcie_reset(struct iproc_pcie *pcie) +static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) { u32 val; @@ -592,26 +682,33 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) if (pcie->ep_is_internal) return; - /* - * Select perst_b signal as reset source. Put the device into reset, - * and then bring it out of reset - */ - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); - val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & - ~RC_PCIE_RST_OUTPUT; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - udelay(250); - - val |= RC_PCIE_RST_OUTPUT; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - msleep(100); + if (assert) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & + ~RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + udelay(250); + } else { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val |= RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + msleep(100); + } } +int iproc_pcie_shutdown(struct iproc_pcie *pcie) +{ + iproc_pcie_perst_ctrl(pcie, true); + msleep(500); + + return 0; +} +EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); + static int iproc_pcie_check_link(struct iproc_pcie *pcie) { struct device *dev = pcie->dev; u32 hdr_type, link_ctrl, link_status, class, val; - u16 pos = PCI_EXP_CAP; bool link_is_active = false; /* @@ -628,16 +725,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie) } /* make sure we are not in EP mode */ - iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); + iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); return -EFAULT; } /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ -#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c -#define PCI_CLASS_BRIDGE_MASK 0xffff00 -#define PCI_CLASS_BRIDGE_SHIFT 8 +#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c +#define PCI_CLASS_BRIDGE_MASK 0xffff00 +#define PCI_CLASS_BRIDGE_SHIFT 8 iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, &class); class &= ~PCI_CLASS_BRIDGE_MASK; @@ -646,31 +743,31 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie) 4, class); /* check link status to see if link is active */ - iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA, + iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; if (!link_is_active) { /* try GEN 1 link speed */ -#define PCI_TARGET_LINK_SPEED_MASK 0xf -#define PCI_TARGET_LINK_SPEED_GEN2 0x2 -#define PCI_TARGET_LINK_SPEED_GEN1 0x1 +#define PCI_TARGET_LINK_SPEED_MASK 0xf +#define PCI_TARGET_LINK_SPEED_GEN2 0x2 +#define PCI_TARGET_LINK_SPEED_GEN1 0x1 iproc_pci_raw_config_read32(pcie, 0, - pos + PCI_EXP_LNKCTL2, 4, - &link_ctrl); + IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, + 4, &link_ctrl); if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == PCI_TARGET_LINK_SPEED_GEN2) { link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; iproc_pci_raw_config_write32(pcie, 0, - pos + PCI_EXP_LNKCTL2, - 4, link_ctrl); + IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, + 4, link_ctrl); msleep(100); iproc_pci_raw_config_read32(pcie, 0, - pos + PCI_EXP_LNKSTA, - 2, &link_status); + IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, + 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; } @@ -1223,6 +1320,8 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); pcie->ib_map = paxb_v2_ib_map; pcie->need_msi_steer = true; + dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", + CFG_RETRY_STATUS); break; case IPROC_PCIE_PAXC: regs = iproc_pcie_reg_paxc; @@ -1286,7 +1385,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) goto err_exit_phy; } - iproc_pcie_reset(pcie); + iproc_pcie_perst_ctrl(pcie, true); + iproc_pcie_perst_ctrl(pcie, false); if (pcie->need_ob_cfg) { ret = iproc_pcie_map_ranges(pcie, res); diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index 0bbe2ea44f3e..a6b55cec9a66 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -110,6 +110,7 @@ struct iproc_pcie { int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); +int iproc_pcie_shutdown(struct iproc_pcie *pcie); #ifdef CONFIG_PCIE_IPROC_MSI int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c index 5a9d8589ea0b..db93efdf1d63 100644 --- a/drivers/pci/host/pcie-mediatek.c +++ b/drivers/pci/host/pcie-mediatek.c @@ -3,6 +3,7 @@ * * Copyright (c) 2017 MediaTek Inc. * Author: Ryder Lee + * Honghui Zhang * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,6 +17,9 @@ #include #include +#include +#include +#include #include #include #include @@ -63,16 +67,104 @@ #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) +/* PCIe V2 share registers */ +#define PCIE_SYS_CFG_V2 0x0 +#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) +#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) + +/* PCIe V2 per-port registers */ +#define PCIE_MSI_VECTOR 0x0c0 +#define PCIE_INT_MASK 0x420 +#define INTX_MASK GENMASK(19, 16) +#define INTX_SHIFT 16 +#define PCIE_INT_STATUS 0x424 +#define MSI_STATUS BIT(23) +#define PCIE_IMSI_STATUS 0x42c +#define PCIE_IMSI_ADDR 0x430 +#define MSI_MASK BIT(23) +#define MTK_MSI_IRQS_NUM 32 + +#define PCIE_AHB_TRANS_BASE0_L 0x438 +#define PCIE_AHB_TRANS_BASE0_H 0x43c +#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) +#define PCIE_AXI_WINDOW0 0x448 +#define WIN_ENABLE BIT(7) + +/* PCIe V2 configuration transaction header */ +#define PCIE_CFG_HEADER0 0x460 +#define PCIE_CFG_HEADER1 0x464 +#define PCIE_CFG_HEADER2 0x468 +#define PCIE_CFG_WDATA 0x470 +#define PCIE_APP_TLP_REQ 0x488 +#define PCIE_CFG_RDATA 0x48c +#define APP_CFG_REQ BIT(0) +#define APP_CPL_STATUS GENMASK(7, 5) + +#define CFG_WRRD_TYPE_0 4 +#define CFG_WR_FMT 2 +#define CFG_RD_FMT 0 + +#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) +#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) +#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) +#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) +#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) +#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) +#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) +#define CFG_HEADER_DW0(type, fmt) \ + (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) +#define CFG_HEADER_DW1(where, size) \ + (GENMASK(((size) - 1), 0) << ((where) & 0x3)) +#define CFG_HEADER_DW2(regn, fun, dev, bus) \ + (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ + CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) + +#define PCIE_RST_CTRL 0x510 +#define PCIE_PHY_RSTB BIT(0) +#define PCIE_PIPE_SRSTB BIT(1) +#define PCIE_MAC_SRSTB BIT(2) +#define PCIE_CRSTB BIT(3) +#define PCIE_PERSTB BIT(8) +#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) +#define PCIE_LINK_STATUS_V2 0x804 +#define PCIE_PORT_LINKUP_V2 BIT(10) + +struct mtk_pcie_port; + +/** + * struct mtk_pcie_soc - differentiate between host generations + * @has_msi: whether this host supports MSI interrupts or not + * @ops: pointer to configuration access functions + * @startup: pointer to controller setting functions + * @setup_irq: pointer to initialize IRQ functions + */ +struct mtk_pcie_soc { + bool has_msi; + struct pci_ops *ops; + int (*startup)(struct mtk_pcie_port *port); + int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); +}; + /** * struct mtk_pcie_port - PCIe port information * @base: IO mapped register base * @list: port list * @pcie: pointer to PCIe host info * @reset: pointer to port reset control - * @sys_ck: pointer to bus clock - * @phy: pointer to phy control block + * @sys_ck: pointer to transaction/data link layer clock + * @ahb_ck: pointer to AHB slave interface operating clock for CSR access + * and RC initiated MMIO access + * @axi_ck: pointer to application layer MMIO channel operating clock + * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock + * when pcie_mac_ck/pcie_pipe_ck is turned off + * @obff_ck: pointer to OBFF functional block operating clock + * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock + * @phy: pointer to PHY control block * @lane: lane count - * @index: port index + * @slot: port slot + * @irq_domain: legacy INTx IRQ domain + * @msi_domain: MSI IRQ domain + * @msi_irq_in_use: bit map for assigned MSI IRQ */ struct mtk_pcie_port { void __iomem *base; @@ -80,9 +172,17 @@ struct mtk_pcie_port { struct mtk_pcie *pcie; struct reset_control *reset; struct clk *sys_ck; + struct clk *ahb_ck; + struct clk *axi_ck; + struct clk *aux_ck; + struct clk *obff_ck; + struct clk *pipe_ck; struct phy *phy; u32 lane; - u32 index; + u32 slot; + struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); }; /** @@ -96,6 +196,7 @@ struct mtk_pcie_port { * @busn: bus range * @offset: IO / Memory offset * @ports: pointer to PCIe port information + * @soc: pointer to SoC-dependent operations */ struct mtk_pcie { struct device *dev; @@ -111,13 +212,9 @@ struct mtk_pcie { resource_size_t io; } offset; struct list_head ports; + const struct mtk_pcie_soc *soc; }; -static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port) -{ - return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP); -} - static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; @@ -146,6 +243,12 @@ static void mtk_pcie_put_resources(struct mtk_pcie *pcie) list_for_each_entry_safe(port, tmp, &pcie->ports, list) { phy_power_off(port->phy); + phy_exit(port->phy); + clk_disable_unprepare(port->pipe_ck); + clk_disable_unprepare(port->obff_ck); + clk_disable_unprepare(port->axi_ck); + clk_disable_unprepare(port->aux_ck); + clk_disable_unprepare(port->ahb_ck); clk_disable_unprepare(port->sys_ck); mtk_pcie_port_free(port); } @@ -153,11 +256,412 @@ static void mtk_pcie_put_resources(struct mtk_pcie *pcie) mtk_pcie_subsys_powerdown(pcie); } +static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) +{ + u32 val; + int err; + + err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, + !(val & APP_CFG_REQ), 10, + 100 * USEC_PER_MSEC); + if (err) + return PCIBIOS_SET_FAILED; + + if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) + return PCIBIOS_SET_FAILED; + + return PCIBIOS_SUCCESSFUL; +} + +static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, + int where, int size, u32 *val) +{ + u32 tmp; + + /* Write PCIe configuration transaction header for Cfgrd */ + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Trigger h/w to transmit Cfgrd TLP */ + tmp = readl(port->base + PCIE_APP_TLP_REQ); + tmp |= APP_CFG_REQ; + writel(tmp, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + if (mtk_pcie_check_cfg_cpld(port)) + return PCIBIOS_SET_FAILED; + + /* Read cpld payload of Cfgrd */ + *val = readl(port->base + PCIE_CFG_RDATA); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, + int where, int size, u32 val) +{ + /* Write PCIe configuration transaction header for Cfgwr */ + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Write Cfgwr data */ + val = val << 8 * (where & 3); + writel(val, port->base + PCIE_CFG_WDATA); + + /* Trigger h/w to transmit Cfgwr TLP */ + val = readl(port->base + PCIE_APP_TLP_REQ); + val |= APP_CFG_REQ; + writel(val, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + return mtk_pcie_check_cfg_cpld(port); +} + +static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, + unsigned int devfn) +{ + struct mtk_pcie *pcie = bus->sysdata; + struct mtk_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) + if (port->slot == PCI_SLOT(devfn)) + return port; + + return NULL; +} + +static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct mtk_pcie_port *port; + u32 bn = bus->number; + int ret; + + port = mtk_pcie_find_port(bus, devfn); + if (!port) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); + if (ret) + *val = ~0; + + return ret; +} + +static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct mtk_pcie_port *port; + u32 bn = bus->number; + + port = mtk_pcie_find_port(bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); +} + +static struct pci_ops mtk_pcie_ops_v2 = { + .read = mtk_pcie_config_read, + .write = mtk_pcie_config_write, +}; + +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct resource *mem = &pcie->mem; + u32 val; + size_t size; + int err; + + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ + if (pcie->base) { + val = readl(pcie->base + PCIE_SYS_CFG_V2); + val |= PCIE_CSR_LTSSM_EN(port->slot) | + PCIE_CSR_ASPM_L1_EN(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG_V2); + } + + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); + + /* + * Enable PCIe link down reset, if link status changed from link up to + * link down, this will reset MAC control registers and configuration + * space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | + PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, + !!(val & PCIE_PORT_LINKUP_V2), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; + + /* Set INTx mask */ + val = readl(port->base + PCIE_INT_MASK); + val &= ~INTX_MASK; + writel(val, port->base + PCIE_INT_MASK); + + /* Set AHB to PCIe translation windows */ + size = mem->end - mem->start; + val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); + + val = upper_32_bits(mem->start); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); + + /* Set PCIe to AXI translation memory space.*/ + val = fls(0xffffffff) | WIN_ENABLE; + writel(val, port->base + PCIE_AXI_WINDOW0); + + return 0; +} + +static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port) +{ + int msi; + + msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); + if (msi < MTK_MSI_IRQS_NUM) + set_bit(msi, port->msi_irq_in_use); + else + return -ENOSPC; + + return msi; +} + +static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq) +{ + clear_bit(hwirq, port->msi_irq_in_use); +} + +static int mtk_pcie_msi_setup_irq(struct msi_controller *chip, + struct pci_dev *pdev, struct msi_desc *desc) +{ + struct mtk_pcie_port *port; + struct msi_msg msg; + unsigned int irq; + int hwirq; + phys_addr_t msg_addr; + + port = mtk_pcie_find_port(pdev->bus, pdev->devfn); + if (!port) + return -EINVAL; + + hwirq = mtk_pcie_msi_alloc(port); + if (hwirq < 0) + return hwirq; + + irq = irq_create_mapping(port->msi_domain, hwirq); + if (!irq) { + mtk_pcie_msi_free(port, hwirq); + return -EINVAL; + } + + chip->dev = &pdev->dev; + + irq_set_msi_desc(irq, desc); + + /* MT2712/MT7622 only support 32-bit MSI addresses */ + msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); + msg.address_hi = 0; + msg.address_lo = lower_32_bits(msg_addr); + msg.data = hwirq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + +static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) +{ + struct pci_dev *pdev = to_pci_dev(chip->dev); + struct irq_data *d = irq_get_irq_data(irq); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct mtk_pcie_port *port; + + port = mtk_pcie_find_port(pdev->bus, pdev->devfn); + if (!port) + return; + + irq_dispose_mapping(irq); + mtk_pcie_msi_free(port, hwirq); +} + +static struct msi_controller mtk_pcie_msi_chip = { + .setup_irq = mtk_pcie_msi_setup_irq, + .teardown_irq = mtk_msi_teardown_irq, +}; + +static struct irq_chip mtk_msi_irq_chip = { + .name = "MTK PCIe MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops msi_domain_ops = { + .map = mtk_pcie_msi_map, +}; + +static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) +{ + u32 val; + phys_addr_t msg_addr; + + msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); + val = lower_32_bits(msg_addr); + writel(val, port->base + PCIE_IMSI_ADDR); + + val = readl(port->base + PCIE_INT_MASK); + val &= ~MSI_MASK; + writel(val, port->base + PCIE_INT_MASK); +} + +static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = mtk_pcie_intx_map, +}; + +static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, + struct device_node *node) +{ + struct device *dev = port->pcie->dev; + struct device_node *pcie_intc_node; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "no PCIe Intc node found\n"); + return -ENODEV; + } + + port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, port); + if (!port->irq_domain) { + dev_err(dev, "failed to get INTx IRQ domain\n"); + return -ENODEV; + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM, + &msi_domain_ops, + &mtk_pcie_msi_chip); + if (!port->msi_domain) { + dev_err(dev, "failed to create MSI IRQ domain\n"); + return -ENODEV; + } + mtk_pcie_enable_msi(port); + } + + return 0; +} + +static irqreturn_t mtk_pcie_intr_handler(int irq, void *data) +{ + struct mtk_pcie_port *port = (struct mtk_pcie_port *)data; + unsigned long status; + u32 virq; + u32 bit = INTX_SHIFT; + + while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) { + for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { + /* Clear the INTx */ + writel(1 << bit, port->base + PCIE_INT_STATUS); + virq = irq_find_mapping(port->irq_domain, + bit - INTX_SHIFT); + generic_handle_irq(virq); + } + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) { + unsigned long imsi_status; + + while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { + for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { + /* Clear the MSI */ + writel(1 << bit, port->base + PCIE_IMSI_STATUS); + virq = irq_find_mapping(port->msi_domain, bit); + generic_handle_irq(virq); + } + } + /* Clear MSI interrupt status */ + writel(MSI_STATUS, port->base + PCIE_INT_STATUS); + } + } + + return IRQ_HANDLED; +} + +static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, + struct device_node *node) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + int err, irq; + + irq = platform_get_irq(pdev, port->slot); + err = devm_request_irq(dev, irq, mtk_pcie_intr_handler, + IRQF_SHARED, "mtk-pcie", port); + if (err) { + dev_err(dev, "unable to request IRQ %d\n", irq); + return err; + } + + err = mtk_pcie_init_irq_domain(port, node); + if (err) { + dev_err(dev, "failed to init PCIe IRQ domain\n"); + return err; + } + + return 0; +} + static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_host_bridge *host = pci_find_host_bridge(bus); - struct mtk_pcie *pcie = pci_host_bridge_priv(host); + struct mtk_pcie *pcie = bus->sysdata; writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus->number), pcie->base + PCIE_CFG_ADDR); @@ -171,16 +675,34 @@ static struct pci_ops mtk_pcie_ops = { .write = pci_generic_config_write, }; -static void mtk_pcie_configure_rc(struct mtk_pcie_port *port) +static int mtk_pcie_startup_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; - u32 func = PCI_FUNC(port->index << 3); - u32 slot = PCI_SLOT(port->index << 3); + u32 func = PCI_FUNC(port->slot << 3); + u32 slot = PCI_SLOT(port->slot << 3); u32 val; + int err; + + /* assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val |= PCIE_PORT_PERST(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* de-assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val &= ~PCIE_PORT_PERST(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, + !!(val & PCIE_PORT_LINKUP), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; /* enable interrupt */ val = readl(pcie->base + PCIE_INT_ENABLE); - val |= PCIE_PORT_INT_EN(port->index); + val |= PCIE_PORT_INT_EN(port->slot); writel(val, pcie->base + PCIE_INT_ENABLE); /* map to all DDR region. We need to set it before cfg operation. */ @@ -209,67 +731,94 @@ static void mtk_pcie_configure_rc(struct mtk_pcie_port *port) writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), pcie->base + PCIE_CFG_ADDR); writel(val, pcie->base + PCIE_CFG_DATA); + + return 0; } -static void mtk_pcie_assert_ports(struct mtk_pcie_port *port) +static void mtk_pcie_enable_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; - u32 val; - - /* assert port PERST_N */ - val = readl(pcie->base + PCIE_SYS_CFG); - val |= PCIE_PORT_PERST(port->index); - writel(val, pcie->base + PCIE_SYS_CFG); - - /* de-assert port PERST_N */ - val = readl(pcie->base + PCIE_SYS_CFG); - val &= ~PCIE_PORT_PERST(port->index); - writel(val, pcie->base + PCIE_SYS_CFG); - - /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */ - msleep(100); -} - -static void mtk_pcie_enable_ports(struct mtk_pcie_port *port) -{ - struct device *dev = port->pcie->dev; + struct device *dev = pcie->dev; int err; err = clk_prepare_enable(port->sys_ck); if (err) { - dev_err(dev, "failed to enable port%d clock\n", port->index); + dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); goto err_sys_clk; } + err = clk_prepare_enable(port->ahb_ck); + if (err) { + dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); + goto err_ahb_clk; + } + + err = clk_prepare_enable(port->aux_ck); + if (err) { + dev_err(dev, "failed to enable aux_ck%d\n", port->slot); + goto err_aux_clk; + } + + err = clk_prepare_enable(port->axi_ck); + if (err) { + dev_err(dev, "failed to enable axi_ck%d\n", port->slot); + goto err_axi_clk; + } + + err = clk_prepare_enable(port->obff_ck); + if (err) { + dev_err(dev, "failed to enable obff_ck%d\n", port->slot); + goto err_obff_clk; + } + + err = clk_prepare_enable(port->pipe_ck); + if (err) { + dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); + goto err_pipe_clk; + } + reset_control_assert(port->reset); reset_control_deassert(port->reset); + err = phy_init(port->phy); + if (err) { + dev_err(dev, "failed to initialize port%d phy\n", port->slot); + goto err_phy_init; + } + err = phy_power_on(port->phy); if (err) { - dev_err(dev, "failed to power on port%d phy\n", port->index); + dev_err(dev, "failed to power on port%d phy\n", port->slot); goto err_phy_on; } - mtk_pcie_assert_ports(port); - - /* if link up, then setup root port configuration space */ - if (mtk_pcie_link_up(port)) { - mtk_pcie_configure_rc(port); + if (!pcie->soc->startup(port)) return; - } - dev_info(dev, "Port%d link down\n", port->index); + dev_info(dev, "Port%d link down\n", port->slot); phy_power_off(port->phy); err_phy_on: + phy_exit(port->phy); +err_phy_init: + clk_disable_unprepare(port->pipe_ck); +err_pipe_clk: + clk_disable_unprepare(port->obff_ck); +err_obff_clk: + clk_disable_unprepare(port->axi_ck); +err_axi_clk: + clk_disable_unprepare(port->aux_ck); +err_aux_clk: + clk_disable_unprepare(port->ahb_ck); +err_ahb_clk: clk_disable_unprepare(port->sys_ck); err_sys_clk: mtk_pcie_port_free(port); } -static int mtk_pcie_parse_ports(struct mtk_pcie *pcie, - struct device_node *node, - int index) +static int mtk_pcie_parse_port(struct mtk_pcie *pcie, + struct device_node *node, + int slot) { struct mtk_pcie_port *port; struct resource *regs; @@ -288,34 +837,87 @@ static int mtk_pcie_parse_ports(struct mtk_pcie *pcie, return err; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); + snprintf(name, sizeof(name), "port%d", slot); + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); port->base = devm_ioremap_resource(dev, regs); if (IS_ERR(port->base)) { - dev_err(dev, "failed to map port%d base\n", index); + dev_err(dev, "failed to map port%d base\n", slot); return PTR_ERR(port->base); } - snprintf(name, sizeof(name), "sys_ck%d", index); + snprintf(name, sizeof(name), "sys_ck%d", slot); port->sys_ck = devm_clk_get(dev, name); if (IS_ERR(port->sys_ck)) { - dev_err(dev, "failed to get port%d clock\n", index); + dev_err(dev, "failed to get sys_ck%d clock\n", slot); return PTR_ERR(port->sys_ck); } - snprintf(name, sizeof(name), "pcie-rst%d", index); - port->reset = devm_reset_control_get_optional(dev, name); + /* sys_ck might be divided into the following parts in some chips */ + snprintf(name, sizeof(name), "ahb_ck%d", slot); + port->ahb_ck = devm_clk_get(dev, name); + if (IS_ERR(port->ahb_ck)) { + if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->ahb_ck = NULL; + } + + snprintf(name, sizeof(name), "axi_ck%d", slot); + port->axi_ck = devm_clk_get(dev, name); + if (IS_ERR(port->axi_ck)) { + if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->axi_ck = NULL; + } + + snprintf(name, sizeof(name), "aux_ck%d", slot); + port->aux_ck = devm_clk_get(dev, name); + if (IS_ERR(port->aux_ck)) { + if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->aux_ck = NULL; + } + + snprintf(name, sizeof(name), "obff_ck%d", slot); + port->obff_ck = devm_clk_get(dev, name); + if (IS_ERR(port->obff_ck)) { + if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->obff_ck = NULL; + } + + snprintf(name, sizeof(name), "pipe_ck%d", slot); + port->pipe_ck = devm_clk_get(dev, name); + if (IS_ERR(port->pipe_ck)) { + if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->pipe_ck = NULL; + } + + snprintf(name, sizeof(name), "pcie-rst%d", slot); + port->reset = devm_reset_control_get_optional_exclusive(dev, name); if (PTR_ERR(port->reset) == -EPROBE_DEFER) return PTR_ERR(port->reset); /* some platforms may use default PHY setting */ - snprintf(name, sizeof(name), "pcie-phy%d", index); + snprintf(name, sizeof(name), "pcie-phy%d", slot); port->phy = devm_phy_optional_get(dev, name); if (IS_ERR(port->phy)) return PTR_ERR(port->phy); - port->index = index; + port->slot = slot; port->pcie = pcie; + if (pcie->soc->setup_irq) { + err = pcie->soc->setup_irq(port, node); + if (err) + return err; + } + INIT_LIST_HEAD(&port->list); list_add_tail(&port->list, &pcie->ports); @@ -329,12 +931,14 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) struct resource *regs; int err; - /* get shared registers */ - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(pcie->base)) { - dev_err(dev, "failed to map shared register\n"); - return PTR_ERR(pcie->base); + /* get shared registers, which are optional */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); + if (regs) { + pcie->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(pcie->base)) { + dev_err(dev, "failed to map shared register\n"); + return PTR_ERR(pcie->base); + } } pcie->free_ck = devm_clk_get(dev, "free_ck"); @@ -422,7 +1026,7 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) } for_each_available_child_of_node(node, child) { - int index; + int slot; err = of_pci_get_devfn(child); if (err < 0) { @@ -430,9 +1034,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) return err; } - index = PCI_SLOT(err); + slot = PCI_SLOT(err); - err = mtk_pcie_parse_ports(pcie, child, index); + err = mtk_pcie_parse_port(pcie, child, slot); if (err) return err; } @@ -443,7 +1047,7 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) /* enable each port, and then check link status */ list_for_each_entry_safe(port, tmp, &pcie->ports, list) - mtk_pcie_enable_ports(port); + mtk_pcie_enable_port(port); /* power down PCIe subsys if slots are all empty (link down) */ if (list_empty(&pcie->ports)) @@ -480,9 +1084,12 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host) host->busnr = pcie->busn.start; host->dev.parent = pcie->dev; - host->ops = &mtk_pcie_ops; + host->ops = pcie->soc->ops; host->map_irq = of_irq_parse_and_map_pci; host->swizzle_irq = pci_common_swizzle; + host->sysdata = pcie; + if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi) + host->msi = &mtk_pcie_msi_chip; err = pci_scan_root_bus_bridge(host); if (err < 0) @@ -513,6 +1120,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(host); pcie->dev = dev; + pcie->soc = of_device_get_match_data(dev); platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); @@ -537,9 +1145,23 @@ static int mtk_pcie_probe(struct platform_device *pdev) return err; } +static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { + .ops = &mtk_pcie_ops, + .startup = mtk_pcie_startup_port, +}; + +static const struct mtk_pcie_soc mtk_pcie_soc_v2 = { + .has_msi = true, + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_v2, + .setup_irq = mtk_pcie_setup_irq, +}; + static const struct of_device_id mtk_pcie_ids[] = { - { .compatible = "mediatek,mt7623-pcie"}, - { .compatible = "mediatek,mt2701-pcie"}, + { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, + { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, + { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 }, + { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 }, {}, }; diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 246d485b24c6..4e0b25d09b0c 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -471,10 +471,8 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie) bridge->msi = &pcie->msi.chip; ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) { - kfree(bridge); + if (ret < 0) return ret; - } bus = bridge->bus; @@ -1190,14 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev) return 0; -err_free_bridge: - pci_free_host_bridge(bridge); - err_pm_put: pm_runtime_put(dev); err_pm_disable: pm_runtime_disable(dev); + +err_free_bridge: + pci_free_host_bridge(bridge); + pci_free_resource_list(&pcie->resources); + return err; } diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 7bb9870f6d8c..9051c6c8fea4 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -6,7 +6,7 @@ * Author: Shawn Lin * Wenrui Li * - * Bits taken from Synopsys Designware Host controller driver and + * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. * * This program is free software: you can redistribute it and/or modify @@ -15,6 +15,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -47,6 +48,7 @@ #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) #define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) +#define MAX_LANE_NUM 4 #define PCIE_CLIENT_BASE 0x0 #define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) @@ -111,6 +113,9 @@ #define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 #define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) +#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) +#define PCIE_CORE_LANE_MAP_MASK 0x0000000f +#define PCIE_CORE_LANE_MAP_REVERSE BIT(16) #define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) #define PCIE_CORE_INT_PRFPE BIT(0) #define PCIE_CORE_INT_CRFPE BIT(1) @@ -210,7 +215,8 @@ struct rockchip_pcie { void __iomem *reg_base; /* DT axi-base */ void __iomem *apb_base; /* DT apb-base */ - struct phy *phy; + bool legacy_phy; + struct phy *phys[MAX_LANE_NUM]; struct reset_control *core_rst; struct reset_control *mgmt_rst; struct reset_control *mgmt_sticky_rst; @@ -222,11 +228,13 @@ struct rockchip_pcie { struct clk *aclk_perf_pcie; struct clk *hclk_pcie; struct clk *clk_pcie_pm; + struct regulator *vpcie12v; /* 12V power supply */ struct regulator *vpcie3v3; /* 3.3V power supply */ struct regulator *vpcie1v8; /* 1.8V power supply */ struct regulator *vpcie0v9; /* 0.9V power supply */ struct gpio_desc *ep_gpio; u32 lanes; + u8 lanes_map; u8 root_bus_nr; int link_gen; struct device *dev; @@ -299,6 +307,24 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, return 1; } +static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) +{ + u32 val; + u8 map; + + if (rockchip->legacy_phy) + return GENMASK(MAX_LANE_NUM - 1, 0); + + val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); + map = val & PCIE_CORE_LANE_MAP_MASK; + + /* The link may be using a reverse-indexed mapping. */ + if (val & PCIE_CORE_LANE_MAP_REVERSE) + map = bitrev8(map) >> 4; + + return map; +} + static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { @@ -514,10 +540,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; - int err; + int err, i; u32 status; - gpiod_set_value(rockchip->ep_gpio, 0); + gpiod_set_value_cansleep(rockchip->ep_gpio, 0); err = reset_control_assert(rockchip->aclk_rst); if (err) { @@ -537,34 +563,36 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) return err; } - err = phy_init(rockchip->phy); - if (err < 0) { - dev_err(dev, "fail to init phy, err %d\n", err); - return err; + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_init(rockchip->phys[i]); + if (err) { + dev_err(dev, "init phy%d err %d\n", i, err); + goto err_exit_phy; + } } err = reset_control_assert(rockchip->core_rst); if (err) { dev_err(dev, "assert core_rst err %d\n", err); - return err; + goto err_exit_phy; } err = reset_control_assert(rockchip->mgmt_rst); if (err) { dev_err(dev, "assert mgmt_rst err %d\n", err); - return err; + goto err_exit_phy; } err = reset_control_assert(rockchip->mgmt_sticky_rst); if (err) { dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); - return err; + goto err_exit_phy; } err = reset_control_assert(rockchip->pipe_rst); if (err) { dev_err(dev, "assert pipe_rst err %d\n", err); - return err; + goto err_exit_phy; } udelay(10); @@ -572,19 +600,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) err = reset_control_deassert(rockchip->pm_rst); if (err) { dev_err(dev, "deassert pm_rst err %d\n", err); - return err; + goto err_exit_phy; } err = reset_control_deassert(rockchip->aclk_rst); if (err) { dev_err(dev, "deassert aclk_rst err %d\n", err); - return err; + goto err_exit_phy; } err = reset_control_deassert(rockchip->pclk_rst); if (err) { dev_err(dev, "deassert pclk_rst err %d\n", err); - return err; + goto err_exit_phy; } if (rockchip->link_gen == 2) @@ -602,10 +630,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) PCIE_CLIENT_MODE_RC, PCIE_CLIENT_CONFIG); - err = phy_power_on(rockchip->phy); - if (err) { - dev_err(dev, "fail to power on phy, err %d\n", err); - return err; + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_power_on(rockchip->phys[i]); + if (err) { + dev_err(dev, "power on phy%d err %d\n", i, err); + goto err_power_off_phy; + } } /* @@ -615,25 +645,25 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) err = reset_control_deassert(rockchip->mgmt_sticky_rst); if (err) { dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); - return err; + goto err_power_off_phy; } err = reset_control_deassert(rockchip->core_rst); if (err) { dev_err(dev, "deassert core_rst err %d\n", err); - return err; + goto err_power_off_phy; } err = reset_control_deassert(rockchip->mgmt_rst); if (err) { dev_err(dev, "deassert mgmt_rst err %d\n", err); - return err; + goto err_power_off_phy; } err = reset_control_deassert(rockchip->pipe_rst); if (err) { dev_err(dev, "deassert pipe_rst err %d\n", err); - return err; + goto err_power_off_phy; } /* Fix the transmitted FTS count desired to exit from L0s. */ @@ -658,7 +688,7 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, PCIE_CLIENT_CONFIG); - gpiod_set_value(rockchip->ep_gpio, 1); + gpiod_set_value_cansleep(rockchip->ep_gpio, 1); /* 500ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, @@ -666,7 +696,7 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) 500 * USEC_PER_MSEC); if (err) { dev_err(dev, "PCIe link training gen1 timeout!\n"); - return -ETIMEDOUT; + goto err_power_off_phy; } if (rockchip->link_gen == 2) { @@ -691,6 +721,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) PCIE_CORE_PL_CONF_LANE_SHIFT); dev_dbg(dev, "current link width is x%d\n", status); + /* Power off unused lane(s) */ + rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); + for (i = 0; i < MAX_LANE_NUM; i++) { + if (!(rockchip->lanes_map & BIT(i))) { + dev_dbg(dev, "idling lane %d\n", i); + phy_power_off(rockchip->phys[i]); + } + } + rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, @@ -715,6 +754,26 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); return 0; +err_power_off_phy: + while (i--) + phy_power_off(rockchip->phys[i]); + i = MAX_LANE_NUM; +err_exit_phy: + while (i--) + phy_exit(rockchip->phys[i]); + return err; +} + +static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) +{ + int i; + + for (i = 0; i < MAX_LANE_NUM; i++) { + /* inactive lanes are already powered off */ + if (rockchip->lanes_map & BIT(i)) + phy_power_off(rockchip->phys[i]); + phy_exit(rockchip->phys[i]); + } } static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) @@ -853,6 +912,91 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct phy *phy; + char *name; + u32 i; + + phy = devm_phy_get(dev, "pcie-phy"); + if (!IS_ERR(phy)) { + rockchip->legacy_phy = true; + rockchip->phys[0] = phy; + dev_warn(dev, "legacy phy model is deprecated!\n"); + return 0; + } + + if (PTR_ERR(phy) == -EPROBE_DEFER) + return PTR_ERR(phy); + + dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n"); + + for (i = 0; i < MAX_LANE_NUM; i++) { + name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i); + if (!name) + return -ENOMEM; + + phy = devm_of_phy_get(dev, dev->of_node, name); + kfree(name); + + if (IS_ERR(phy)) { + if (PTR_ERR(phy) != -EPROBE_DEFER) + dev_err(dev, "missing phy for lane %d: %ld\n", + i, PTR_ERR(phy)); + return PTR_ERR(phy); + } + + rockchip->phys[i] = phy; + } + + return 0; +} + +static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) +{ + int irq, err; + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) { + dev_err(dev, "missing sys IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, + IRQF_SHARED, "pcie-sys", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe subsystem IRQ\n"); + return err; + } + + irq = platform_get_irq_byname(pdev, "legacy"); + if (irq < 0) { + dev_err(dev, "missing legacy IRQ resource\n"); + return irq; + } + + irq_set_chained_handler_and_data(irq, + rockchip_pcie_legacy_int_handler, + rockchip); + + irq = platform_get_irq_byname(pdev, "client"); + if (irq < 0) { + dev_err(dev, "missing client IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, + IRQF_SHARED, "pcie-client", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe client IRQ\n"); + return err; + } + + return 0; +} /** * rockchip_pcie_parse_dt - Parse Device Tree @@ -866,7 +1010,6 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) struct platform_device *pdev = to_platform_device(dev); struct device_node *node = dev->of_node; struct resource *regs; - int irq; int err; regs = platform_get_resource_byname(pdev, @@ -883,12 +1026,9 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) if (IS_ERR(rockchip->apb_base)) return PTR_ERR(rockchip->apb_base); - rockchip->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(rockchip->phy)) { - if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER) - dev_err(dev, "missing phy\n"); - return PTR_ERR(rockchip->phy); - } + err = rockchip_pcie_get_phys(rockchip); + if (err) + return err; rockchip->lanes = 1; err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); @@ -903,49 +1043,50 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) if (rockchip->link_gen < 0 || rockchip->link_gen > 2) rockchip->link_gen = 2; - rockchip->core_rst = devm_reset_control_get(dev, "core"); + rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); if (IS_ERR(rockchip->core_rst)) { if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) dev_err(dev, "missing core reset property in node\n"); return PTR_ERR(rockchip->core_rst); } - rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt"); + rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); if (IS_ERR(rockchip->mgmt_rst)) { if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) dev_err(dev, "missing mgmt reset property in node\n"); return PTR_ERR(rockchip->mgmt_rst); } - rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky"); + rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, + "mgmt-sticky"); if (IS_ERR(rockchip->mgmt_sticky_rst)) { if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) dev_err(dev, "missing mgmt-sticky reset property in node\n"); return PTR_ERR(rockchip->mgmt_sticky_rst); } - rockchip->pipe_rst = devm_reset_control_get(dev, "pipe"); + rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); if (IS_ERR(rockchip->pipe_rst)) { if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) dev_err(dev, "missing pipe reset property in node\n"); return PTR_ERR(rockchip->pipe_rst); } - rockchip->pm_rst = devm_reset_control_get(dev, "pm"); + rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); if (IS_ERR(rockchip->pm_rst)) { if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) dev_err(dev, "missing pm reset property in node\n"); return PTR_ERR(rockchip->pm_rst); } - rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); + rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); if (IS_ERR(rockchip->pclk_rst)) { if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) dev_err(dev, "missing pclk reset property in node\n"); return PTR_ERR(rockchip->pclk_rst); } - rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); + rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); if (IS_ERR(rockchip->aclk_rst)) { if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) dev_err(dev, "missing aclk reset property in node\n"); @@ -982,40 +1123,15 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return PTR_ERR(rockchip->clk_pcie_pm); } - irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); - return -EINVAL; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, - IRQF_SHARED, "pcie-sys", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe subsystem IRQ\n"); + err = rockchip_pcie_setup_irq(rockchip); + if (err) return err; - } - irq = platform_get_irq_byname(pdev, "legacy"); - if (irq < 0) { - dev_err(dev, "missing legacy IRQ resource\n"); - return -EINVAL; - } - - irq_set_chained_handler_and_data(irq, - rockchip_pcie_legacy_int_handler, - rockchip); - - irq = platform_get_irq_byname(pdev, "client"); - if (irq < 0) { - dev_err(dev, "missing client IRQ resource\n"); - return -EINVAL; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, - IRQF_SHARED, "pcie-client", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe client IRQ\n"); - return err; + rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); + if (IS_ERR(rockchip->vpcie12v)) { + if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); @@ -1047,11 +1163,19 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) struct device *dev = rockchip->dev; int err; + if (!IS_ERR(rockchip->vpcie12v)) { + err = regulator_enable(rockchip->vpcie12v); + if (err) { + dev_err(dev, "fail to enable vpcie12v regulator\n"); + goto err_out; + } + } + if (!IS_ERR(rockchip->vpcie3v3)) { err = regulator_enable(rockchip->vpcie3v3); if (err) { dev_err(dev, "fail to enable vpcie3v3 regulator\n"); - goto err_out; + goto err_disable_12v; } } @@ -1079,6 +1203,9 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) err_disable_3v3: if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); +err_disable_12v: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); err_out: return err; } @@ -1116,7 +1243,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) return -EINVAL; } - rockchip->irq_domain = irq_domain_add_linear(intc, 4, + rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, &intx_domain_ops, rockchip); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); @@ -1270,6 +1397,56 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) return 0; } +static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + err = clk_prepare_enable(rockchip->aclk_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_pcie clock\n"); + return err; + } + + err = clk_prepare_enable(rockchip->aclk_perf_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); + goto err_aclk_perf_pcie; + } + + err = clk_prepare_enable(rockchip->hclk_pcie); + if (err) { + dev_err(dev, "unable to enable hclk_pcie clock\n"); + goto err_hclk_pcie; + } + + err = clk_prepare_enable(rockchip->clk_pcie_pm); + if (err) { + dev_err(dev, "unable to enable clk_pcie_pm clock\n"); + goto err_clk_pcie_pm; + } + + return 0; + +err_clk_pcie_pm: + clk_disable_unprepare(rockchip->hclk_pcie); +err_hclk_pcie: + clk_disable_unprepare(rockchip->aclk_perf_pcie); +err_aclk_perf_pcie: + clk_disable_unprepare(rockchip->aclk_pcie); + return err; +} + +static void rockchip_pcie_disable_clocks(void *data) +{ + struct rockchip_pcie *rockchip = data; + + clk_disable_unprepare(rockchip->clk_pcie_pm); + clk_disable_unprepare(rockchip->hclk_pcie); + clk_disable_unprepare(rockchip->aclk_perf_pcie); + clk_disable_unprepare(rockchip->aclk_pcie); +} + static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); @@ -1286,13 +1463,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) return ret; } - phy_power_off(rockchip->phy); - phy_exit(rockchip->phy); + rockchip_pcie_deinit_phys(rockchip); - clk_disable_unprepare(rockchip->clk_pcie_pm); - clk_disable_unprepare(rockchip->hclk_pcie); - clk_disable_unprepare(rockchip->aclk_perf_pcie); - clk_disable_unprepare(rockchip->aclk_pcie); + rockchip_pcie_disable_clocks(rockchip); if (!IS_ERR(rockchip->vpcie0v9)) regulator_disable(rockchip->vpcie0v9); @@ -1313,21 +1486,9 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) } } - err = clk_prepare_enable(rockchip->clk_pcie_pm); + err = rockchip_pcie_enable_clocks(rockchip); if (err) - goto err_pcie_pm; - - err = clk_prepare_enable(rockchip->hclk_pcie); - if (err) - goto err_hclk_pcie; - - err = clk_prepare_enable(rockchip->aclk_perf_pcie); - if (err) - goto err_aclk_perf_pcie; - - err = clk_prepare_enable(rockchip->aclk_pcie); - if (err) - goto err_aclk_pcie; + goto err_disable_0v9; err = rockchip_pcie_init_port(rockchip); if (err) @@ -1335,7 +1496,7 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) err = rockchip_pcie_cfg_atu(rockchip); if (err) - goto err_pcie_resume; + goto err_err_deinit_port; /* Need this to enter L1 again */ rockchip_pcie_update_txcredit_mui(rockchip); @@ -1343,15 +1504,13 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) return 0; +err_err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); err_pcie_resume: - clk_disable_unprepare(rockchip->aclk_pcie); -err_aclk_pcie: - clk_disable_unprepare(rockchip->aclk_perf_pcie); -err_aclk_perf_pcie: - clk_disable_unprepare(rockchip->hclk_pcie); -err_hclk_pcie: - clk_disable_unprepare(rockchip->clk_pcie_pm); -err_pcie_pm: + rockchip_pcie_disable_clocks(rockchip); +err_disable_0v9: + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); return err; } @@ -1385,29 +1544,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (err) return err; - err = clk_prepare_enable(rockchip->aclk_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_pcie clock\n"); - goto err_aclk_pcie; - } - - err = clk_prepare_enable(rockchip->aclk_perf_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); - goto err_aclk_perf_pcie; - } - - err = clk_prepare_enable(rockchip->hclk_pcie); - if (err) { - dev_err(dev, "unable to enable hclk_pcie clock\n"); - goto err_hclk_pcie; - } - - err = clk_prepare_enable(rockchip->clk_pcie_pm); - if (err) { - dev_err(dev, "unable to enable hclk_pcie clock\n"); - goto err_pcie_pm; - } + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + return err; err = rockchip_pcie_set_vpcie(rockchip); if (err) { @@ -1423,12 +1562,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev) err = rockchip_pcie_init_irq_domain(rockchip); if (err < 0) - goto err_vpcie; + goto err_deinit_port; err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res, &io_base); if (err) - goto err_vpcie; + goto err_remove_irq_domain; err = devm_request_pci_bus_resources(dev, &res); if (err) @@ -1466,12 +1605,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev) err = rockchip_pcie_cfg_atu(rockchip); if (err) - goto err_free_res; + goto err_unmap_iospace; rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); if (!rockchip->msg_region) { err = -ENOMEM; - goto err_free_res; + goto err_unmap_iospace; } list_splice_init(&res, &bridge->windows); @@ -1484,7 +1623,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) err = pci_scan_root_bus_bridge(bridge); if (err < 0) - goto err_free_res; + goto err_unmap_iospace; bus = bridge->bus; @@ -1498,9 +1637,17 @@ static int rockchip_pcie_probe(struct platform_device *pdev) pci_bus_add_devices(bus); return 0; +err_unmap_iospace: + pci_unmap_iospace(rockchip->io); err_free_res: pci_free_resource_list(&res); +err_remove_irq_domain: + irq_domain_remove(rockchip->irq_domain); +err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); err_vpcie: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); if (!IS_ERR(rockchip->vpcie1v8)) @@ -1508,14 +1655,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (!IS_ERR(rockchip->vpcie0v9)) regulator_disable(rockchip->vpcie0v9); err_set_vpcie: - clk_disable_unprepare(rockchip->clk_pcie_pm); -err_pcie_pm: - clk_disable_unprepare(rockchip->hclk_pcie); -err_hclk_pcie: - clk_disable_unprepare(rockchip->aclk_perf_pcie); -err_aclk_perf_pcie: - clk_disable_unprepare(rockchip->aclk_pcie); -err_aclk_pcie: + rockchip_pcie_disable_clocks(rockchip); return err; } @@ -1529,14 +1669,12 @@ static int rockchip_pcie_remove(struct platform_device *pdev) pci_unmap_iospace(rockchip->io); irq_domain_remove(rockchip->irq_domain); - phy_power_off(rockchip->phy); - phy_exit(rockchip->phy); + rockchip_pcie_deinit_phys(rockchip); - clk_disable_unprepare(rockchip->clk_pcie_pm); - clk_disable_unprepare(rockchip->hclk_pcie); - clk_disable_unprepare(rockchip->aclk_perf_pcie); - clk_disable_unprepare(rockchip->aclk_pcie); + rockchip_pcie_disable_clocks(rockchip); + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); if (!IS_ERR(rockchip->vpcie1v8)) diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index eec641a34fc5..65dea98b2643 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -133,7 +133,6 @@ #define CFG_DMA_REG_BAR GENMASK(2, 0) #define INT_PCI_MSI_NR (2 * 32) -#define INTX_NUM 4 /* Readin the PS_LINKUP */ #define PS_LINKUP_OFFSET 0x00000238 @@ -334,9 +333,8 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc) while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL) != 0) { - for_each_set_bit(bit, &status, INTX_NUM) { - virq = irq_find_mapping(pcie->legacy_irq_domain, - bit + 1); + for_each_set_bit(bit, &status, PCI_NUM_INTX) { + virq = irq_find_mapping(pcie->legacy_irq_domain, bit); if (virq) generic_handle_irq(virq); } @@ -436,6 +434,7 @@ static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, static const struct irq_domain_ops legacy_domain_ops = { .map = nwl_legacy_map, + .xlate = pci_irqd_intx_xlate, }; #ifdef CONFIG_PCI_MSI @@ -559,7 +558,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) } pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, - INTX_NUM, + PCI_NUM_INTX, &legacy_domain_ops, pcie); @@ -813,7 +812,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); if (pcie->irq_intx < 0) { dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); - return -EINVAL; + return pcie->irq_intx; } irq_set_chained_handler_and_data(pcie->irq_intx, diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index f63fa5e0278c..94e13cb8608f 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -5,7 +5,7 @@ * * Based on the Tegra PCIe driver * - * Bits taken from Synopsys Designware Host controller driver and + * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. * * This program is free software: you can redistribute it and/or modify @@ -60,6 +60,7 @@ #define XILINX_PCIE_INTR_MST_SLVERR BIT(27) #define XILINX_PCIE_INTR_MST_ERRP BIT(28) #define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED +#define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D #define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF /* Root Port Error FIFO Read Register definitions */ @@ -369,6 +370,7 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, /* INTx IRQ Domain operations */ static const struct irq_domain_ops intx_domain_ops = { .map = xilinx_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, }; /* PCIe HW Functions */ @@ -384,7 +386,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) { struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; struct device *dev = port->dev; - u32 val, mask, status, msi_data; + u32 val, mask, status; /* Read interrupt decode and mask registers */ val = pcie_read(port, XILINX_PCIE_REG_IDR); @@ -424,8 +426,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) xilinx_pcie_clear_err_interrupts(port); } - if (status & XILINX_PCIE_INTR_INTX) { - /* INTx interrupt received */ + if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); /* Check whether interrupt valid */ @@ -434,41 +435,24 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) goto error; } - if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) { - /* Clear interrupt FIFO register 1 */ - pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, - XILINX_PCIE_REG_RPIFR1); - - /* Handle INTx Interrupt */ - val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> - XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; - generic_handle_irq(irq_find_mapping(port->leg_domain, - val)); - } - } - - if (status & XILINX_PCIE_INTR_MSI) { - /* MSI Interrupt */ - val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); - - if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { - dev_warn(dev, "RP Intr FIFO1 read error\n"); - goto error; - } - + /* Decode the IRQ number */ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { - msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & - XILINX_PCIE_RPIFR2_MSG_DATA; - - /* Clear interrupt FIFO register 1 */ - pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, - XILINX_PCIE_REG_RPIFR1); - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - /* Handle MSI Interrupt */ - generic_handle_irq(msi_data); - } + val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & + XILINX_PCIE_RPIFR2_MSG_DATA; + } else { + val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> + XILINX_PCIE_RPIFR1_INTR_SHIFT; + val = irq_find_mapping(port->leg_domain, val); } + + /* Clear interrupt FIFO register 1 */ + pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, + XILINX_PCIE_REG_RPIFR1); + + /* Handle the interrupt */ + if (IS_ENABLED(CONFIG_PCI_MSI) || + !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) + generic_handle_irq(val); } if (status & XILINX_PCIE_INTR_SLV_UNSUPP) @@ -524,7 +508,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4, + port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); if (!port->leg_domain) { @@ -571,8 +555,8 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IDR); - /* Enable all interrupts */ - pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR); + /* Enable all interrupts we handle */ + pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); /* Enable the Bridge enable bit */ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 6088c3083194..509893bc3e63 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -183,7 +183,7 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d int i, best = 1; unsigned long flags; - if (!desc->msi_attrib.is_msix || vmd->msix_count == 1) + if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1) return &vmd->irqs[0]; raw_spin_lock_irqsave(&list_lock, flags); @@ -697,7 +697,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + PCI_IRQ_MSIX); if (vmd->msix_count < 0) return vmd->msix_count; @@ -755,6 +755,11 @@ static void vmd_remove(struct pci_dev *dev) static int vmd_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < vmd->msix_count; i++) + devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); pci_save_state(pdev); return 0; @@ -763,6 +768,16 @@ static int vmd_suspend(struct device *dev) static int vmd_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = pci_get_drvdata(pdev); + int err, i; + + for (i = 0; i < vmd->msix_count; i++) { + err = devm_request_irq(dev, pci_irq_vector(pdev, i), + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); + if (err) + return err; + } pci_restore_state(pdev); return 0; diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c index 5f49c3fd736a..2f8659a148f5 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.c +++ b/drivers/pci/hotplug/cpcihp_zt5550.c @@ -280,7 +280,7 @@ static void zt5550_hc_remove_one(struct pci_dev *pdev) } -static struct pci_device_id zt5550_hc_pci_tbl[] = { +static const struct pci_device_id zt5550_hc_pci_tbl[] = { { PCI_VENDOR_ID_ZIATECH, PCI_DEVICE_ID_ZIATECH_5550_HC, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 33d300d12411..4d06b8461255 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -1417,7 +1417,7 @@ static void __exit unload_cpqphpd(void) iounmap(smbios_start); } -static struct pci_device_id hpcd_pci_tbl[] = { +static const struct pci_device_id hpcd_pci_tbl[] = { { /* handle any PCI Hotplug controller */ .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 5efd01d84498..73cf84645c82 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -852,7 +852,7 @@ static int set_bus(struct slot *slot_cur) u8 speed; u8 cmd = 0x0; int retval; - static struct pci_device_id ciobx[] = { + static const struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, }; diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 43e345ac296b..a6a4dac798e5 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -1153,7 +1153,7 @@ void ibmphp_free_ebda_pci_rsrc_queue(void) } } -static struct pci_device_id id_table[] = { +static const struct pci_device_id id_table[] = { { .vendor = PCI_VENDOR_ID_IBM, .device = HPC_DEVICE_ID, diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 026830a138ae..e5d5ce9e3010 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -586,6 +586,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); + + /* + * If we've already reported a power fault, don't report it again + * until we've done something to handle it. + */ + if (ctrl->power_fault_detected) + events &= ~PCI_EXP_SLTSTA_PFD; + if (!events) return IRQ_NONE; diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 7c203198b582..74f6a17e4614 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -163,8 +163,8 @@ static void pnv_php_detach_device_nodes(struct device_node *parent) of_node_put(dn); refcount = kref_read(&dn->kobj.kref); if (refcount != 1) - pr_warn("Invalid refcount %d on <%s>\n", - refcount, of_node_full_name(dn)); + pr_warn("Invalid refcount %d on <%pOF>\n", + refcount, dn); of_detach_node(dn); } diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 3f93a4e79595..a3449d717a99 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -150,8 +150,8 @@ static void dlpar_pci_add_bus(struct device_node *dn) /* Add EADS device to PHB bus, adding new entry to bus->devices */ dev = of_create_pci_dev(dn, phb->bus, pdn->devfn); if (!dev) { - printk(KERN_ERR "%s: failed to create pci dev for %s\n", - __func__, dn->full_name); + printk(KERN_ERR "%s: failed to create pci dev for %pOF\n", + __func__, dn); return; } diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index a796301ea03f..edb5d8a53020 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -102,7 +102,7 @@ static struct attribute *default_attrs[] = { NULL, }; -static struct attribute_group dlpar_attr_group = { +static const struct attribute_group dlpar_attr_group = { .attrs = default_attrs, }; diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 8d132024f06e..1e29abaaea08 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -318,7 +318,7 @@ int rpaphp_add_slot(struct device_node *dn) if (!is_php_dn(dn, &indexes, &names, &types, &power_domains)) return 0; - dbg("Entry %s: dn->full_name=%s\n", __func__, dn->full_name); + dbg("Entry %s: dn=%pOF\n", __func__, dn); /* register PCI devices */ name = (char *) &names[1]; diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index ea41ea1d3c00..32aabc533be8 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -95,7 +95,7 @@ int rpaphp_enable_slot(struct slot *slot) bus = pci_find_bus_by_node(slot->dn); if (!bus) { - err("%s: no pci_bus for dn %s\n", __func__, slot->dn->full_name); + err("%s: no pci_bus for dn %pOF\n", __func__, slot->dn); return -EINVAL; } @@ -125,7 +125,7 @@ int rpaphp_enable_slot(struct slot *slot) if (rpaphp_debug) { struct pci_dev *dev; - dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); + dbg("%s: pci_devs of slot[%pOF]\n", __func__, slot->dn); list_for_each_entry(dev, &bus->devices, bus_list) dbg("\t%s\n", pci_name(dev)); } diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 388c4d8fcdd1..489862360f2c 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -122,8 +122,8 @@ int rpaphp_register_slot(struct slot *slot) int retval; int slotno = -1; - dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", - __func__, slot->dn->full_name, slot->index, slot->name, + dbg("%s registering slot:path[%pOF] index[%x], name[%s] pdomain[%x] type[%d]\n", + __func__, slot->dn, slot->index, slot->name, slot->power_domain, slot->type); /* should not try to register the same slot twice */ diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 3454dc7385f1..7bfb87bd2b7e 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -351,7 +351,7 @@ static void shpc_remove(struct pci_dev *dev) kfree(ctrl); } -static struct pci_device_id shpcd_pci_tbl[] = { +static const struct pci_device_id shpcd_pci_tbl[] = { {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)}, { /* end: all zeroes */ } }; diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index de0ea474fb73..e5824c7b7b6b 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) if (rc) { ctrl_info(ctrl, "Can't get msi for the hotplug controller\n"); ctrl_info(ctrl, "Use INTx for the hotplug controller\n"); + } else { + pci_set_master(pdev); } rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 120485d6f352..ac41c8be9200 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -331,7 +331,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) while (i--) pci_iov_remove_virtfn(dev, i, 0); - pcibios_sriov_disable(dev); err_pcibios: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); @@ -339,6 +338,8 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) ssleep(1); pci_cfg_access_unlock(dev); + pcibios_sriov_disable(dev); + if (iov->link != dev->devfn) sysfs_remove_link(&dev->dev.kobj, "dep_link"); @@ -357,14 +358,14 @@ static void sriov_disable(struct pci_dev *dev) for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i, 0); - pcibios_sriov_disable(dev); - iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); ssleep(1); pci_cfg_access_unlock(dev); + pcibios_sriov_disable(dev); + if (iov->link != dev->devfn) sysfs_remove_link(&dev->dev.kobj, "dep_link"); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 2225afc1cbbb..496ed9130600 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1451,13 +1451,30 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); +/* + * Users of the generic MSI infrastructure expect a device to have a single ID, + * so with DMA aliases we have to pick the least-worst compromise. Devices with + * DMA phantom functions tend to still emit MSIs from the real function number, + * so we ignore those and only consider topological aliases where either the + * alias device or RID appears on a different bus number. We also make the + * reasonable assumption that bridges are walked in an upstream direction (so + * the last one seen wins), and the much braver assumption that the most likely + * case is that of PCI->PCIe so we should always use the alias RID. This echoes + * the logic from intel_irq_remapping's set_msi_sid(), which presumably works + * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions + * for taking ownership all we can really do is close our eyes and hope... + */ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) { u32 *pa = data; + u8 bus = PCI_BUS_NUM(*pa); + + if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) + *pa = alias; - *pa = alias; return 0; } + /** * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) * @domain: The interrupt domain @@ -1471,7 +1488,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) { struct device_node *of_node; - u32 rid = 0; + u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); @@ -1487,14 +1504,14 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) * @pdev: The PCI device * * Use the firmware data to find a device-specific MSI domain - * (i.e. not one that is ste as a default). + * (i.e. not one that is set as a default). * - * Returns: The coresponding MSI domain or NULL if none has been found. + * Returns: The corresponding MSI domain or NULL if none has been found. */ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { struct irq_domain *dom; - u32 rid = 0; + u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); dom = of_msi_map_get_device_domain(&pdev->dev, rid); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index e70c1c7ba1bf..a8da543b3814 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -573,7 +573,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) { while (bus->parent) { if (acpi_pm_device_can_wakeup(&bus->self->dev)) - return acpi_pm_set_device_wakeup(&bus->self->dev, enable); + return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); bus = bus->parent; } @@ -581,7 +581,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) /* We have reached the root bus. */ if (bus->bridge) { if (acpi_pm_device_can_wakeup(bus->bridge)) - return acpi_pm_set_device_wakeup(bus->bridge, enable); + return acpi_pm_set_bridge_wakeup(bus->bridge, enable); } return 0; } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4450feaf5c00..11bd267fc137 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -647,9 +647,7 @@ static int pci_legacy_resume(struct device *dev) static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); - - if (!pci_has_subordinate(pci_dev)) - pci_enable_wake(pci_dev, PCI_D0, false); + pci_enable_wake(pci_dev, PCI_D0, false); } static void pci_pm_default_suspend(struct pci_dev *pci_dev) diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index a7a41d9c29df..7e9e79575d93 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -123,7 +123,7 @@ static struct attribute *smbios_attributes[] = { NULL, }; -static struct attribute_group smbios_attr_group = { +static const struct attribute_group smbios_attr_group = { .attrs = smbios_attributes, .is_visible = smbios_instance_string_exist, }; @@ -260,7 +260,7 @@ static struct attribute *acpi_attributes[] = { NULL, }; -static struct attribute_group acpi_attr_group = { +static const struct attribute_group acpi_attr_group = { .attrs = acpi_attributes, .is_visible = acpi_index_string_exist, }; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 2f3780b50723..8e075ea2743e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -556,9 +556,9 @@ static ssize_t devspec_show(struct device *dev, struct pci_dev *pdev = to_pci_dev(dev); struct device_node *np = pci_device_to_OF_node(pdev); - if (np == NULL || np->full_name == NULL) + if (np == NULL) return 0; - return sprintf(buf, "%s", np->full_name); + return sprintf(buf, "%pOF", np); } static DEVICE_ATTR_RO(devspec); #endif @@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; /* We need to keep extra room for a newline */ if (count >= (PAGE_SIZE - 1)) @@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t len; - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); @@ -1211,11 +1218,8 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); int bar = (unsigned long)attr->private; - struct resource *res; unsigned long port = off; - res = &pdev->resource[bar]; - port += pci_resource_start(pdev, bar); if (port > pci_resource_end(pdev, bar)) @@ -1431,7 +1435,7 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute pci_config_attr = { +static const struct bin_attribute pci_config_attr = { .attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, @@ -1441,7 +1445,7 @@ static struct bin_attribute pci_config_attr = { .write = pci_write_config, }; -static struct bin_attribute pcie_config_attr = { +static const struct bin_attribute pcie_config_attr = { .attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, @@ -1735,7 +1739,7 @@ const struct attribute_group *pcie_dev_groups[] = { NULL, }; -static struct attribute_group pci_dev_hp_attr_group = { +static const struct attribute_group pci_dev_hp_attr_group = { .attrs = pci_dev_hp_attrs, .is_visible = pci_dev_hp_attrs_are_visible, }; @@ -1759,23 +1763,23 @@ static umode_t sriov_attrs_are_visible(struct kobject *kobj, return a->mode; } -static struct attribute_group sriov_dev_attr_group = { +static const struct attribute_group sriov_dev_attr_group = { .attrs = sriov_dev_attrs, .is_visible = sriov_attrs_are_visible, }; #endif /* CONFIG_PCI_IOV */ -static struct attribute_group pci_dev_attr_group = { +static const struct attribute_group pci_dev_attr_group = { .attrs = pci_dev_dev_attrs, .is_visible = pci_dev_attrs_are_visible, }; -static struct attribute_group pci_bridge_attr_group = { +static const struct attribute_group pci_bridge_attr_group = { .attrs = pci_bridge_attrs, .is_visible = pci_bridge_attrs_are_visible, }; -static struct attribute_group pcie_dev_attr_group = { +static const struct attribute_group pcie_dev_attr_group = { .attrs = pcie_dev_attrs, .is_visible = pcie_dev_attrs_are_visible, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index fdf65a6c13f6..6078dfc11b11 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -892,7 +892,9 @@ EXPORT_SYMBOL_GPL(__pci_complete_power_transition); * -EINVAL if the requested state is invalid. * -EIO if device does not support PCI PM or its PM capabilities register has a * wrong version, or device doesn't support the requested state. + * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. * 0 if device already is in the requested state. + * 0 if the transition is to D3 but D3 is not supported. * 0 if device's power state has been successfully changed. */ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) @@ -1912,6 +1914,13 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; + /* + * Bridges can only signal wakeup on behalf of subordinate devices, + * but that is set up elsewhere, so skip them. + */ + if (pci_has_subordinate(dev)) + return 0; + /* Don't do the same thing twice in a row for one device. */ if (!!enable == !!dev->wakeup_prepared) return 0; @@ -3811,27 +3820,49 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev) } EXPORT_SYMBOL(pci_wait_for_pending_transaction); -/* - * We should only need to wait 100ms after FLR, but some devices take longer. - * Wait for up to 1000ms for config space to return something other than -1. - * Intel IGD requires this when an LCD panel is attached. We read the 2nd - * dword because VFs don't implement the 1st dword. - */ static void pci_flr_wait(struct pci_dev *dev) { - int i = 0; + int delay = 1, timeout = 60000; u32 id; - do { - msleep(100); - pci_read_config_dword(dev, PCI_COMMAND, &id); - } while (i++ < 10 && id == ~0); + /* + * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within + * 100ms, but may silently discard requests while the FLR is in + * progress. Wait 100ms before trying to access the device. + */ + msleep(100); - if (id == ~0) - dev_warn(&dev->dev, "Failed to return from FLR\n"); - else if (i > 1) - dev_info(&dev->dev, "Required additional %dms to return from FLR\n", - (i - 1) * 100); + /* + * After 100ms, the device should not silently discard config + * requests, but it may still indicate that it needs more time by + * responding to them with CRS completions. The Root Port will + * generally synthesize ~0 data to complete the read (except when + * CRS SV is enabled and the read was for the Vendor ID; in that + * case it synthesizes 0x0001 data). + * + * Wait for the device to return a non-CRS completion. Read the + * Command register instead of Vendor ID so we don't have to + * contend with the CRS SV value. + */ + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > timeout) { + dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n", + 100 + delay - 1); + return; + } + + if (delay > 1000) + dev_info(&dev->dev, "not ready %dms after FLR; waiting\n", + 100 + delay - 1); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1); } /** @@ -5398,8 +5429,8 @@ static int of_pci_bus_find_domain_nr(struct device *parent) use_dt_domains = 0; domain = pci_get_new_domain_nr(); } else { - dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n", - parent->of_node->full_name); + dev_err(parent, "Node %pOF has inconsistent \"linux,pci-domain\" property in DT\n", + parent->of_node); domain = -1; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 22e061738c6f..a6560c9baa52 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -235,6 +235,7 @@ enum pci_bar_type { pci_bar_mem64, /* A 64-bit memory BAR */ }; +int pci_configure_extended_tags(struct pci_dev *dev, void *ign); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); int pci_setup_device(struct pci_dev *dev); diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index dea186a9d6b6..6ff5f5b4f5e6 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -32,16 +32,9 @@ static int aer_probe(struct pcie_device *dev); static void aer_remove(struct pcie_device *dev); -static pci_ers_result_t aer_error_detected(struct pci_dev *dev, - enum pci_channel_state error); static void aer_error_resume(struct pci_dev *dev); static pci_ers_result_t aer_root_reset(struct pci_dev *dev); -static const struct pci_error_handlers aer_error_handlers = { - .error_detected = aer_error_detected, - .resume = aer_error_resume, -}; - static struct pcie_port_service_driver aerdriver = { .name = "aer", .port_type = PCI_EXP_TYPE_ROOT_PORT, @@ -49,9 +42,7 @@ static struct pcie_port_service_driver aerdriver = { .probe = aer_probe, .remove = aer_remove, - - .err_handler = &aer_error_handlers, - + .error_resume = aer_error_resume, .reset_link = aer_root_reset, }; @@ -349,20 +340,6 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) return PCI_ERS_RESULT_RECOVERED; } -/** - * aer_error_detected - update severity status - * @dev: pointer to Root Port's pci_dev data structure - * @error: error severity being notified by port bus - * - * Invoked by Port Bus driver during error recovery. - */ -static pci_ers_result_t aer_error_detected(struct pci_dev *dev, - enum pci_channel_state error) -{ - /* Root Port has no impact. Always recovers. */ - return PCI_ERS_RESULT_CAN_RECOVER; -} - /** * aer_error_resume - clean up corresponding error status bits * @dev: pointer to Root Port's pci_dev data structure diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index b1303b32053f..890efcc574cb 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -5,10 +5,10 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * This file implements the core part of PCI-Express AER. When an pci-express + * This file implements the core part of PCIe AER. When a PCIe * error is delivered, an error message will be collected and printed to * console, then, an error recovery procedure will be executed by following - * the pci error recovery rules. + * the PCI error recovery rules. * * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index c39f32e42b4d..2d976a623ddc 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c @@ -16,17 +16,62 @@ #include #include "../pci.h" +struct rp_pio_header_log_regs { + u32 dw0; + u32 dw1; + u32 dw2; + u32 dw3; +}; + +struct dpc_rp_pio_regs { + u32 status; + u32 mask; + u32 severity; + u32 syserror; + u32 exception; + + struct rp_pio_header_log_regs header_log; + u32 impspec_log; + u32 tlp_prefix_log[4]; + u32 log_size; + u16 first_error; +}; + struct dpc_dev { struct pcie_device *dev; struct work_struct work; int cap_pos; bool rp; + u32 rp_pio_status; +}; + +static const char * const rp_pio_error_string[] = { + "Configuration Request received UR Completion", /* Bit Position 0 */ + "Configuration Request received CA Completion", /* Bit Position 1 */ + "Configuration Request Completion Timeout", /* Bit Position 2 */ + NULL, + NULL, + NULL, + NULL, + NULL, + "I/O Request received UR Completion", /* Bit Position 8 */ + "I/O Request received CA Completion", /* Bit Position 9 */ + "I/O Request Completion Timeout", /* Bit Position 10 */ + NULL, + NULL, + NULL, + NULL, + NULL, + "Memory Request received UR Completion", /* Bit Position 16 */ + "Memory Request received CA Completion", /* Bit Position 17 */ + "Memory Request Completion Timeout", /* Bit Position 18 */ }; static int dpc_wait_rp_inactive(struct dpc_dev *dpc) { unsigned long timeout = jiffies + HZ; struct pci_dev *pdev = dpc->dev->port; + struct device *dev = &dpc->dev->device; u16 status; pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); @@ -36,15 +81,17 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); } if (status & PCI_EXP_DPC_RP_BUSY) { - dev_warn(&pdev->dev, "DPC root port still busy\n"); + dev_warn(dev, "DPC root port still busy\n"); return -EBUSY; } return 0; } -static void dpc_wait_link_inactive(struct pci_dev *pdev) +static void dpc_wait_link_inactive(struct dpc_dev *dpc) { unsigned long timeout = jiffies + HZ; + struct pci_dev *pdev = dpc->dev->port; + struct device *dev = &dpc->dev->device; u16 lnk_status; pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); @@ -54,7 +101,7 @@ static void dpc_wait_link_inactive(struct pci_dev *pdev) pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); } if (lnk_status & PCI_EXP_LNKSTA_DLLLA) - dev_warn(&pdev->dev, "Link state not disabled for DPC event\n"); + dev_warn(dev, "Link state not disabled for DPC event\n"); } static void interrupt_event_handler(struct work_struct *work) @@ -76,17 +123,132 @@ static void interrupt_event_handler(struct work_struct *work) } pci_unlock_rescan_remove(); - dpc_wait_link_inactive(pdev); + dpc_wait_link_inactive(dpc); if (dpc->rp && dpc_wait_rp_inactive(dpc)) return; + if (dpc->rp && dpc->rp_pio_status) { + pci_write_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_STATUS, + dpc->rp_pio_status); + dpc->rp_pio_status = 0; + } + pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT); } +static void dpc_rp_pio_print_tlp_header(struct device *dev, + struct rp_pio_header_log_regs *t) +{ + dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n", + t->dw0, t->dw1, t->dw2, t->dw3); +} + +static void dpc_rp_pio_print_error(struct dpc_dev *dpc, + struct dpc_rp_pio_regs *rp_pio) +{ + struct device *dev = &dpc->dev->device; + int i; + u32 status; + + dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", + rp_pio->status, rp_pio->mask); + + dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", + rp_pio->severity, rp_pio->syserror, rp_pio->exception); + + status = (rp_pio->status & ~rp_pio->mask); + + for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { + if (!(status & (1 << i))) + continue; + + dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i], + rp_pio->first_error == i ? " (First)" : ""); + } + + dpc_rp_pio_print_tlp_header(dev, &rp_pio->header_log); + if (rp_pio->log_size == 4) + return; + dev_err(dev, "RP PIO ImpSpec Log %#010x\n", rp_pio->impspec_log); + + for (i = 0; i < rp_pio->log_size - 5; i++) + dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, + rp_pio->tlp_prefix_log[i]); +} + +static void dpc_rp_pio_get_info(struct dpc_dev *dpc, + struct dpc_rp_pio_regs *rp_pio) +{ + struct pci_dev *pdev = dpc->dev->port; + struct device *dev = &dpc->dev->device; + int i; + u16 cap; + u16 status; + + pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_STATUS, + &rp_pio->status); + pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_MASK, + &rp_pio->mask); + + pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_SEVERITY, + &rp_pio->severity); + pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_SYSERROR, + &rp_pio->syserror); + pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_EXCEPTION, + &rp_pio->exception); + + /* Get First Error Pointer */ + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); + rp_pio->first_error = (status & 0x1f00) >> 8; + + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); + rp_pio->log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; + if (rp_pio->log_size < 4 || rp_pio->log_size > 9) { + dev_err(dev, "RP PIO log size %u is invalid\n", + rp_pio->log_size); + return; + } + + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG, + &rp_pio->header_log.dw0); + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4, + &rp_pio->header_log.dw1); + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8, + &rp_pio->header_log.dw2); + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, + &rp_pio->header_log.dw3); + if (rp_pio->log_size == 4) + return; + + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, + &rp_pio->impspec_log); + for (i = 0; i < rp_pio->log_size - 5; i++) + pci_read_config_dword(pdev, + dpc->cap_pos + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, + &rp_pio->tlp_prefix_log[i]); +} + +static void dpc_process_rp_pio_error(struct dpc_dev *dpc) +{ + struct dpc_rp_pio_regs rp_pio_regs; + + dpc_rp_pio_get_info(dpc, &rp_pio_regs); + dpc_rp_pio_print_error(dpc, &rp_pio_regs); + + dpc->rp_pio_status = rp_pio_regs.status; +} + static irqreturn_t dpc_irq(int irq, void *context) { struct dpc_dev *dpc = (struct dpc_dev *)context; struct pci_dev *pdev = dpc->dev->port; + struct device *dev = &dpc->dev->device; u16 status, source; pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); @@ -95,20 +257,24 @@ static irqreturn_t dpc_irq(int irq, void *context) if (!status || status == (u16)(~0)) return IRQ_NONE; - dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", + dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n", status, source); if (status & PCI_EXP_DPC_STATUS_TRIGGER) { u16 reason = (status >> 1) & 0x3; u16 ext_reason = (status >> 5) & 0x3; - dev_warn(&dpc->dev->device, "DPC %s detected, remove downstream devices\n", + dev_warn(dev, "DPC %s detected, remove downstream devices\n", (reason == 0) ? "unmasked uncorrectable error" : (reason == 1) ? "ERR_NONFATAL" : (reason == 2) ? "ERR_FATAL" : (ext_reason == 0) ? "RP PIO error" : (ext_reason == 1) ? "software trigger" : "reserved error"); + /* show RP PIO error detail information */ + if (reason == 3 && ext_reason == 0) + dpc_process_rp_pio_error(dpc); + schedule_work(&dpc->work); } return IRQ_HANDLED; @@ -119,10 +285,11 @@ static int dpc_probe(struct pcie_device *dev) { struct dpc_dev *dpc; struct pci_dev *pdev = dev->port; + struct device *device = &dev->device; int status; u16 ctl, cap; - dpc = devm_kzalloc(&dev->device, sizeof(*dpc), GFP_KERNEL); + dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); if (!dpc) return -ENOMEM; @@ -131,10 +298,10 @@ static int dpc_probe(struct pcie_device *dev) INIT_WORK(&dpc->work, interrupt_event_handler); set_service_data(dev, dpc); - status = devm_request_irq(&dev->device, dev->irq, dpc_irq, IRQF_SHARED, + status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc); if (status) { - dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq, + dev_warn(device, "request IRQ%d failed: %d\n", dev->irq, status); return status; } @@ -147,7 +314,7 @@ static int dpc_probe(struct pcie_device *dev) ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); - dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", + dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", cap & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf, diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 8aa3f14bc87d..083276e03c38 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -21,7 +21,6 @@ #include "../pci.h" #include "portdrv.h" -#include "aer/aerdrv.h" /* If this switch is set, PCIe port native services should not be enabled. */ bool pcie_ports_disabled; @@ -177,108 +176,20 @@ static void pcie_portdrv_remove(struct pci_dev *dev) pcie_port_device_remove(dev); } -static int error_detected_iter(struct device *device, void *data) -{ - struct pcie_device *pcie_device; - struct pcie_port_service_driver *driver; - struct aer_broadcast_data *result_data; - pci_ers_result_t status; - - result_data = (struct aer_broadcast_data *) data; - - if (device->bus == &pcie_port_bus_type && device->driver) { - driver = to_service_driver(device->driver); - if (!driver || - !driver->err_handler || - !driver->err_handler->error_detected) - return 0; - - pcie_device = to_pcie_device(device); - - /* Forward error detected message to service drivers */ - status = driver->err_handler->error_detected( - pcie_device->port, - result_data->state); - result_data->result = - merge_result(result_data->result, status); - } - - return 0; -} - static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, enum pci_channel_state error) { - struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER}; - - /* get true return value from &data */ - device_for_each_child(&dev->dev, &data, error_detected_iter); - return data.result; -} - -static int mmio_enabled_iter(struct device *device, void *data) -{ - struct pcie_device *pcie_device; - struct pcie_port_service_driver *driver; - pci_ers_result_t status, *result; - - result = (pci_ers_result_t *) data; - - if (device->bus == &pcie_port_bus_type && device->driver) { - driver = to_service_driver(device->driver); - if (driver && - driver->err_handler && - driver->err_handler->mmio_enabled) { - pcie_device = to_pcie_device(device); - - /* Forward error message to service drivers */ - status = driver->err_handler->mmio_enabled( - pcie_device->port); - *result = merge_result(*result, status); - } - } - - return 0; + /* Root Port has no impact. Always recovers. */ + return PCI_ERS_RESULT_CAN_RECOVER; } static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev) { - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - - /* get true return value from &status */ - device_for_each_child(&dev->dev, &status, mmio_enabled_iter); - return status; -} - -static int slot_reset_iter(struct device *device, void *data) -{ - struct pcie_device *pcie_device; - struct pcie_port_service_driver *driver; - pci_ers_result_t status, *result; - - result = (pci_ers_result_t *) data; - - if (device->bus == &pcie_port_bus_type && device->driver) { - driver = to_service_driver(device->driver); - if (driver && - driver->err_handler && - driver->err_handler->slot_reset) { - pcie_device = to_pcie_device(device); - - /* Forward error message to service drivers */ - status = driver->err_handler->slot_reset( - pcie_device->port); - *result = merge_result(*result, status); - } - } - - return 0; + return PCI_ERS_RESULT_RECOVERED; } static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) { - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - /* If fatal, restore cfg space for possible link reset at upstream */ if (dev->error_state == pci_channel_io_frozen) { dev->state_saved = true; @@ -287,9 +198,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) pci_enable_pcie_error_reporting(dev); } - /* get true return value from &status */ - device_for_each_child(&dev->dev, &status, slot_reset_iter); - return status; + return PCI_ERS_RESULT_RECOVERED; } static int resume_iter(struct device *device, void *data) @@ -299,13 +208,11 @@ static int resume_iter(struct device *device, void *data) if (device->bus == &pcie_port_bus_type && device->driver) { driver = to_service_driver(device->driver); - if (driver && - driver->err_handler && - driver->err_handler->resume) { + if (driver && driver->error_resume) { pcie_device = to_pcie_device(device); /* Forward error message to service drivers */ - driver->err_handler->resume(pcie_device->port); + driver->error_resume(pcie_device->port); } } @@ -353,7 +260,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { +static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { /* * Boxes that should not use MSI for PCIe PME signaling. */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e6a917b4acd3..ff94b69738a8 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1745,21 +1745,50 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) */ } -static void pci_configure_extended_tags(struct pci_dev *dev) +int pci_configure_extended_tags(struct pci_dev *dev, void *ign) { - u32 dev_cap; + struct pci_host_bridge *host; + u32 cap; + u16 ctl; int ret; if (!pci_is_pcie(dev)) - return; + return 0; - ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap); + ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); if (ret) - return; + return 0; - if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG) + if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) + return 0; + + ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); + if (ret) + return 0; + + host = pci_find_host_bridge(dev->bus); + if (!host) + return 0; + + /* + * If some device in the hierarchy doesn't handle Extended Tags + * correctly, make sure they're disabled. + */ + if (host->no_ext_tags) { + if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { + dev_info(&dev->dev, "disabling Extended Tags\n"); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_EXT_TAG); + } + return 0; + } + + if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { + dev_info(&dev->dev, "enabling Extended Tags\n"); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_EXT_TAG); + } + return 0; } /** @@ -1810,7 +1839,7 @@ static void pci_configure_device(struct pci_dev *dev) int ret; pci_configure_mps(dev); - pci_configure_extended_tags(dev); + pci_configure_extended_tags(dev, NULL); pci_configure_relaxed_ordering(dev); memset(&hpp, 0, sizeof(hpp)); @@ -1867,11 +1896,58 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) } EXPORT_SYMBOL(pci_alloc_dev); -bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, - int crs_timeout) +static bool pci_bus_crs_vendor_id(u32 l) +{ + return (l & 0xffff) == 0x0001; +} + +static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, + int timeout) { int delay = 1; + if (!pci_bus_crs_vendor_id(*l)) + return true; /* not a CRS completion */ + + if (!timeout) + return false; /* CRS, but caller doesn't want to wait */ + + /* + * We got the reserved Vendor ID that indicates a completion with + * Configuration Request Retry Status (CRS). Retry until we get a + * valid Vendor ID or we time out. + */ + while (pci_bus_crs_vendor_id(*l)) { + if (delay > timeout) { + pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", + pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); + + return false; + } + if (delay >= 1000) + pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n", + pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); + + msleep(delay); + delay *= 2; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) + return false; + } + + if (delay >= 1000) + pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n", + pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); + + return true; +} + +bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, + int timeout) +{ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; @@ -1880,28 +1956,8 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, *l == 0x0000ffff || *l == 0xffff0000) return false; - /* - * Configuration Request Retry Status. Some root ports return the - * actual device ID instead of the synthetic ID (0xFFFF) required - * by the PCIe spec. Ignore the device ID and only check for - * (vendor id == 1). - */ - while ((*l & 0xffff) == 0x0001) { - if (!crs_timeout) - return false; - - msleep(delay); - delay *= 2; - if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) - return false; - /* Card hasn't responded in 60 seconds? Must be stuck. */ - if (delay > crs_timeout) { - printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n", - pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn)); - return false; - } - } + if (pci_bus_crs_vendor_id(*l)) + return pci_bus_wait_crs(bus, devfn, l, timeout); return true; } @@ -2331,6 +2387,15 @@ void pcie_bus_configure_settings(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); +/* + * Called after each bus is probed, but before its children are examined. This + * is marked as __weak because multiple architectures define it. + */ +void __weak pcibios_fixup_bus(struct pci_bus *bus) +{ + /* nothing to do, expected to be removed in the future */ +} + unsigned int pci_scan_child_bus(struct pci_bus *bus) { unsigned int devfn, pass, max = bus->busn_res.start; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 140760403f36..a4d33619a7bb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* isa_dma_bridge_buggy */ #include "pci.h" @@ -1706,7 +1707,7 @@ static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id boot_interrupt_dmi_table[] = { +static const struct dmi_system_id boot_interrupt_dmi_table[] = { /* * Systems to exclude from boot interrupt reroute quirks */ @@ -2061,7 +2062,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, /* * The 82575 and 82598 may experience data corruption issues when transitioning - * out of L0S. To prevent this we need to disable L0S on the pci-e link + * out of L0S. To prevent this we need to disable L0S on the PCIe link. */ static void quirk_disable_aspm_l0s(struct pci_dev *dev) { @@ -3447,7 +3448,7 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) { acpi_handle bridge, SXIO, SXFP, SXLV; - if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) + if (!x86_apple_machine) return; if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) return; @@ -3492,7 +3493,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) struct pci_dev *sibling = NULL; struct pci_dev *nhi = NULL; - if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) + if (!x86_apple_machine) return; if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) return; @@ -4226,6 +4227,18 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) return acs_flags ? 0 : 1; } +static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * X-Gene root matching this quirk do not allow peer-to-peer + * transactions with others, allowing masking out these bits as if they + * were unimplemented in the ACS capability. + */ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return acs_flags ? 0 : 1; +} + /* * Many Intel PCH root ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an @@ -4474,6 +4487,8 @@ static const struct pci_dev_acs_enabled { { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ /* Cavium ThunderX */ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, + /* APM X-Gene */ + { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, { 0 } }; @@ -4746,23 +4761,6 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); -/* - * VMD-enabled root ports will change the source ID for all messages - * to the VMD device. Rather than doing device matching with the source - * ID, the AER driver should traverse the child device tree, reading - * AER registers to find the faulting device. - */ -static void quirk_no_aersid(struct pci_dev *pdev) -{ - /* VMD Domain */ - if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000) - pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); - /* FLR may cause some 82579 devices to hang. */ static void quirk_intel_no_flr(struct pci_dev *dev) { @@ -4770,3 +4768,34 @@ static void quirk_intel_no_flr(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); + +static void quirk_no_ext_tags(struct pci_dev *pdev) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); + + if (!bridge) + return; + + bridge->no_ext_tags = 1; + dev_info(&pdev->dev, "disabling Extended Tags (this device can't handle them)\n"); + + pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); + +#ifdef CONFIG_PCI_ATS +/* + * Some devices have a broken ATS implementation causing IOMMU stalls. + * Don't use ATS for those devices. + */ +static void quirk_no_ats(struct pci_dev *pdev) +{ + dev_info(&pdev->dev, "disabling ATS (broken on this device)\n"); + pdev->ats_cap = 0; +} + +/* AMD Stoney platform GPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); +#endif /* CONFIG_PCI_ATS */ diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 81eda3d93a5d..86106c44ce94 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -17,12 +17,6 @@ #include #include "pci.h" -void __weak pcibios_update_irq(struct pci_dev *dev, int irq) -{ - dev_dbg(&dev->dev, "assigning IRQ %02d\n", irq); - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); -} - void pci_assign_irq(struct pci_dev *dev) { u8 pin; @@ -65,29 +59,5 @@ void pci_assign_irq(struct pci_dev *dev) /* Always tell the device, so the driver knows what is the real IRQ to use; the device does not use it. */ - pcibios_update_irq(dev, irq); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } - -void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), - int (*map_irq)(const struct pci_dev *, u8, u8)) -{ - /* - * Implement pci_fixup_irqs() through pci_assign_irq(). - * This code should be remove eventually, it is a wrapper - * around pci_assign_irq() interface to keep current - * pci_fixup_irqs() behaviour unchanged on architecture - * code still relying on its interface. - */ - struct pci_dev *dev = NULL; - struct pci_host_bridge *hbrg = NULL; - - for_each_pci_dev(dev) { - hbrg = pci_find_host_bridge(dev->bus); - hbrg->swizzle_irq = swizzle; - hbrg->map_irq = map_irq; - pci_assign_irq(dev); - hbrg->swizzle_irq = NULL; - hbrg->map_irq = NULL; - } -} -EXPORT_SYMBOL_GPL(pci_fixup_irqs); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 85774b7a316a..e576e1a8d978 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -234,6 +234,19 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, return 0; } +/* + * We don't have to worry about legacy ISA devices, so nothing to do here. + * This is marked as __weak because multiple architectures define it; it should + * eventually go away. + */ +resource_size_t __weak pcibios_align_resource(void *data, + const struct resource *res, + resource_size_t size, + resource_size_t align) +{ + return res->start; +} + static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, int resno, resource_size_t size, resource_size_t align) { diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 944674ee3464..19e17829f515 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -131,22 +131,27 @@ static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data) return IRQ_HANDLED; } +/* Db/Pb1200 have separate per-socket insertion and ejection + * interrupts which stay asserted as long as the card is + * inserted/missing. The one which caused us to be called + * needs to be disabled and the other one enabled. + */ static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data) +{ + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t db1200_pcmcia_cdirq_fn(int irq, void *data) { struct db1x_pcmcia_sock *sock = data; - /* Db/Pb1200 have separate per-socket insertion and ejection - * interrupts which stay asserted as long as the card is - * inserted/missing. The one which caused us to be called - * needs to be disabled and the other one enabled. - */ - if (irq == sock->insert_irq) { - disable_irq_nosync(sock->insert_irq); + /* Wait a bit for the signals to stop bouncing. */ + msleep(100); + if (irq == sock->insert_irq) enable_irq(sock->eject_irq); - } else { - disable_irq_nosync(sock->eject_irq); + else enable_irq(sock->insert_irq); - } pcmcia_parse_events(&sock->socket, SS_DETECT); @@ -172,13 +177,13 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) */ if ((sock->board_type == BOARD_TYPE_DB1200) || (sock->board_type == BOARD_TYPE_DB1300)) { - ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_insert", sock); + ret = request_threaded_irq(sock->insert_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_insert", sock); if (ret) goto out1; - ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_eject", sock); + ret = request_threaded_irq(sock->eject_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_eject", sock); if (ret) { free_irq(sock->insert_irq, sock); goto out1; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 0a9b78705ee8..3303dd8d8eb5 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) ret = armpmu_register(pmu); if (ret) { pr_warn("Failed to register PMU for CPU%d\n", cpu); + kfree(pmu->name); return ret; } } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 441912c10b82..5c8d452e35e2 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -44,6 +44,7 @@ source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" source "drivers/phy/hisilicon/Kconfig" +source "drivers/phy/lantiq/Kconfig" source "drivers/phy/marvell/Kconfig" source "drivers/phy/mediatek/Kconfig" source "drivers/phy/motorola/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 06f3c500030d..3a52dcb09566 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -6,9 +6,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o - obj-$(CONFIG_ARCH_SUNXI) += allwinner/ obj-$(CONFIG_ARCH_MESON) += amlogic/ +obj-$(CONFIG_LANTIQ) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_RENESAS) += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig new file mode 100644 index 000000000000..326d88a6417d --- /dev/null +++ b/drivers/phy/lantiq/Kconfig @@ -0,0 +1,9 @@ +# +# Phy drivers for Lantiq / Intel platforms +# +config PHY_LANTIQ_RCU_USB2 + tristate "Lantiq XWAY SoC RCU based USB PHY" + depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST) + select GENERIC_PHY + help + Support for the USB PHY(s) on the Lantiq / Intel XWAY family SoCs. diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile new file mode 100644 index 000000000000..f73eb56a5416 --- /dev/null +++ b/drivers/phy/lantiq/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c new file mode 100644 index 000000000000..986224fca9e9 --- /dev/null +++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c @@ -0,0 +1,254 @@ +/* + * Lantiq XWAY SoC RCU module based USB 1.1/2.0 PHY driver + * + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Transmitter HS Pre-Emphasis Enable */ +#define RCU_CFG1_TX_PEE BIT(0) +/* Disconnect Threshold */ +#define RCU_CFG1_DIS_THR_MASK 0x00038000 +#define RCU_CFG1_DIS_THR_SHIFT 15 + +struct ltq_rcu_usb2_bits { + u8 hostmode; + u8 slave_endianness; + u8 host_endianness; + bool have_ana_cfg; +}; + +struct ltq_rcu_usb2_priv { + struct regmap *regmap; + unsigned int phy_reg_offset; + unsigned int ana_cfg1_reg_offset; + const struct ltq_rcu_usb2_bits *reg_bits; + struct device *dev; + struct phy *phy; + struct clk *phy_gate_clk; + struct reset_control *ctrl_reset; + struct reset_control *phy_reset; +}; + +static const struct ltq_rcu_usb2_bits xway_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx100_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 17, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx200_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = true, +}; + +static const struct of_device_id ltq_rcu_usb2_phy_of_match[] = { + { + .compatible = "lantiq,ase-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,danube-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx100-usb2-phy", + .data = &xrx100_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx200-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx300-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ltq_rcu_usb2_phy_of_match); + +static int ltq_rcu_usb2_phy_init(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + if (priv->reg_bits->have_ana_cfg) { + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_TX_PEE, RCU_CFG1_TX_PEE); + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_DIS_THR_MASK, 7 << RCU_CFG1_DIS_THR_SHIFT); + } + + /* Configure core to host mode */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->hostmode), 0); + + /* Select DMA endianness (Host-endian: big-endian) */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->slave_endianness), 0); + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->host_endianness), + BIT(priv->reg_bits->host_endianness)); + + return 0; +} + +static int ltq_rcu_usb2_phy_power_on(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + struct device *dev = priv->dev; + int ret; + + reset_control_deassert(priv->phy_reset); + + ret = clk_prepare_enable(priv->phy_gate_clk); + if (ret) + dev_err(dev, "failed to enable PHY gate\n"); + + return ret; +} + +static int ltq_rcu_usb2_phy_power_off(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + reset_control_assert(priv->phy_reset); + + clk_disable_unprepare(priv->phy_gate_clk); + + return 0; +} + +static struct phy_ops ltq_rcu_usb2_phy_ops = { + .init = ltq_rcu_usb2_phy_init, + .power_on = ltq_rcu_usb2_phy_power_on, + .power_off = ltq_rcu_usb2_phy_power_off, + .owner = THIS_MODULE, +}; + +static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv, + struct platform_device *pdev) +{ + struct device *dev = priv->dev; + const __be32 *offset; + int ret; + + priv->reg_bits = of_device_get_match_data(dev); + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU PHY reg offset\n"); + return -ENOENT; + } + priv->phy_reg_offset = __be32_to_cpu(*offset); + + if (priv->reg_bits->have_ana_cfg) { + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU ANA CFG1 reg offset\n"); + return -ENOENT; + } + priv->ana_cfg1_reg_offset = __be32_to_cpu(*offset); + } + + priv->phy_gate_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->phy_gate_clk)) { + dev_err(dev, "Unable to get USB phy gate clk\n"); + return PTR_ERR(priv->phy_gate_clk); + } + + priv->ctrl_reset = devm_reset_control_get_shared(dev, "ctrl"); + if (IS_ERR(priv->ctrl_reset)) { + if (PTR_ERR(priv->ctrl_reset) != -EPROBE_DEFER) + dev_err(dev, "failed to get 'ctrl' reset\n"); + return PTR_ERR(priv->ctrl_reset); + } + + priv->phy_reset = devm_reset_control_get_optional(dev, "phy"); + if (IS_ERR(priv->phy_reset)) + return PTR_ERR(priv->phy_reset); + + return 0; +} + +static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ltq_rcu_usb2_priv *priv; + struct phy_provider *provider; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + ret = ltq_rcu_usb2_of_parse(priv, pdev); + if (ret) + return ret; + + /* Reset USB core through reset controller */ + reset_control_deassert(priv->ctrl_reset); + + reset_control_assert(priv->phy_reset); + + priv->phy = devm_phy_create(dev, dev->of_node, <q_rcu_usb2_phy_ops); + if (IS_ERR(priv->phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(priv->phy); + } + + phy_set_drvdata(priv->phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + dev_set_drvdata(priv->dev, priv); + return 0; +} + +static struct platform_driver ltq_rcu_usb2_phy_driver = { + .probe = ltq_rcu_usb2_phy_probe, + .driver = { + .name = "lantiq-rcu-usb2-phy", + .of_match_table = ltq_rcu_usb2_phy_of_match, + } +}; +module_platform_driver(ltq_rcu_usb2_phy_driver); + +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY USB2 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig index 048d8893bc2e..68e321225400 100644 --- a/drivers/phy/marvell/Kconfig +++ b/drivers/phy/marvell/Kconfig @@ -21,6 +21,17 @@ config PHY_BERLIN_USB help Enable this to support the USB PHY on Marvell Berlin SoCs. +config PHY_MVEBU_CP110_COMPHY + tristate "Marvell CP110 comphy driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + select GENERIC_PHY + help + This driver allows to control the comphy, an hardware block providing + shared serdes PHYs on Marvell Armada 7k/8k (in the CP110). Its serdes + lanes can be used by various controllers (Ethernet, sata, usb, + PCIe...). + config PHY_MVEBU_SATA def_bool y depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile index 3fc188f59118..0cf6a7cbaf9f 100644 --- a/drivers/phy/marvell/Makefile +++ b/drivers/phy/marvell/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o +obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c new file mode 100644 index 000000000000..73ebad6634a7 --- /dev/null +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -0,0 +1,644 @@ +/* + * Copyright (C) 2017 Marvell + * + * Antoine Tenart + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Relative to priv->base */ +#define MVEBU_COMPHY_SERDES_CFG0(n) (0x0 + (n) * 0x1000) +#define MVEBU_COMPHY_SERDES_CFG0_PU_PLL BIT(1) +#define MVEBU_COMPHY_SERDES_CFG0_GEN_RX(n) ((n) << 3) +#define MVEBU_COMPHY_SERDES_CFG0_GEN_TX(n) ((n) << 7) +#define MVEBU_COMPHY_SERDES_CFG0_PU_RX BIT(11) +#define MVEBU_COMPHY_SERDES_CFG0_PU_TX BIT(12) +#define MVEBU_COMPHY_SERDES_CFG0_HALF_BUS BIT(14) +#define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000) +#define MVEBU_COMPHY_SERDES_CFG1_RESET BIT(3) +#define MVEBU_COMPHY_SERDES_CFG1_RX_INIT BIT(4) +#define MVEBU_COMPHY_SERDES_CFG1_CORE_RESET BIT(5) +#define MVEBU_COMPHY_SERDES_CFG1_RF_RESET BIT(6) +#define MVEBU_COMPHY_SERDES_CFG2(n) (0x8 + (n) * 0x1000) +#define MVEBU_COMPHY_SERDES_CFG2_DFE_EN BIT(4) +#define MVEBU_COMPHY_SERDES_STATUS0(n) (0x18 + (n) * 0x1000) +#define MVEBU_COMPHY_SERDES_STATUS0_TX_PLL_RDY BIT(2) +#define MVEBU_COMPHY_SERDES_STATUS0_RX_PLL_RDY BIT(3) +#define MVEBU_COMPHY_SERDES_STATUS0_RX_INIT BIT(4) +#define MVEBU_COMPHY_PWRPLL_CTRL(n) (0x804 + (n) * 0x1000) +#define MVEBU_COMPHY_PWRPLL_CTRL_RFREQ(n) ((n) << 0) +#define MVEBU_COMPHY_PWRPLL_PHY_MODE(n) ((n) << 5) +#define MVEBU_COMPHY_IMP_CAL(n) (0x80c + (n) * 0x1000) +#define MVEBU_COMPHY_IMP_CAL_TX_EXT(n) ((n) << 10) +#define MVEBU_COMPHY_IMP_CAL_TX_EXT_EN BIT(15) +#define MVEBU_COMPHY_DFE_RES(n) (0x81c + (n) * 0x1000) +#define MVEBU_COMPHY_DFE_RES_FORCE_GEN_TBL BIT(15) +#define MVEBU_COMPHY_COEF(n) (0x828 + (n) * 0x1000) +#define MVEBU_COMPHY_COEF_DFE_EN BIT(14) +#define MVEBU_COMPHY_COEF_DFE_CTRL BIT(15) +#define MVEBU_COMPHY_GEN1_S0(n) (0x834 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S0_TX_AMP(n) ((n) << 1) +#define MVEBU_COMPHY_GEN1_S0_TX_EMPH(n) ((n) << 7) +#define MVEBU_COMPHY_GEN1_S1(n) (0x838 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(n) ((n) << 0) +#define MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(n) ((n) << 3) +#define MVEBU_COMPHY_GEN1_S1_RX_MUL_FI(n) ((n) << 6) +#define MVEBU_COMPHY_GEN1_S1_RX_MUL_FF(n) ((n) << 8) +#define MVEBU_COMPHY_GEN1_S1_RX_DFE_EN BIT(10) +#define MVEBU_COMPHY_GEN1_S1_RX_DIV(n) ((n) << 11) +#define MVEBU_COMPHY_GEN1_S2(n) (0x8f4 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S2_TX_EMPH(n) ((n) << 0) +#define MVEBU_COMPHY_GEN1_S2_TX_EMPH_EN BIT(4) +#define MVEBU_COMPHY_LOOPBACK(n) (0x88c + (n) * 0x1000) +#define MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(n) ((n) << 1) +#define MVEBU_COMPHY_VDD_CAL0(n) (0x908 + (n) * 0x1000) +#define MVEBU_COMPHY_VDD_CAL0_CONT_MODE BIT(15) +#define MVEBU_COMPHY_EXT_SELV(n) (0x914 + (n) * 0x1000) +#define MVEBU_COMPHY_EXT_SELV_RX_SAMPL(n) ((n) << 5) +#define MVEBU_COMPHY_MISC_CTRL0(n) (0x93c + (n) * 0x1000) +#define MVEBU_COMPHY_MISC_CTRL0_ICP_FORCE BIT(5) +#define MVEBU_COMPHY_MISC_CTRL0_REFCLK_SEL BIT(10) +#define MVEBU_COMPHY_RX_CTRL1(n) (0x940 + (n) * 0x1000) +#define MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL BIT(11) +#define MVEBU_COMPHY_RX_CTRL1_CLK8T_EN BIT(12) +#define MVEBU_COMPHY_SPEED_DIV(n) (0x954 + (n) * 0x1000) +#define MVEBU_COMPHY_SPEED_DIV_TX_FORCE BIT(7) +#define MVEBU_SP_CALIB(n) (0x96c + (n) * 0x1000) +#define MVEBU_SP_CALIB_SAMPLER(n) ((n) << 8) +#define MVEBU_SP_CALIB_SAMPLER_EN BIT(12) +#define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000) +#define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5) +#define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10) +#define MVEBU_COMPHY_DLT_CTRL(n) (0x984 + (n) * 0x1000) +#define MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN BIT(2) +#define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000) +#define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7) +#define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000) +#define MVEBU_COMPHY_FRAME_DETECT3_LOST_TIMEOUT_EN BIT(12) +#define MVEBU_COMPHY_DME(n) (0xa28 + (n) * 0x1000) +#define MVEBU_COMPHY_DME_ETH_MODE BIT(7) +#define MVEBU_COMPHY_TRAINING0(n) (0xa68 + (n) * 0x1000) +#define MVEBU_COMPHY_TRAINING0_P2P_HOLD BIT(15) +#define MVEBU_COMPHY_TRAINING5(n) (0xaa4 + (n) * 0x1000) +#define MVEBU_COMPHY_TRAINING5_RX_TIMER(n) ((n) << 0) +#define MVEBU_COMPHY_TX_TRAIN_PRESET(n) (0xb1c + (n) * 0x1000) +#define MVEBU_COMPHY_TX_TRAIN_PRESET_16B_AUTO_EN BIT(8) +#define MVEBU_COMPHY_TX_TRAIN_PRESET_PRBS11 BIT(9) +#define MVEBU_COMPHY_GEN1_S3(n) (0xc40 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S3_FBCK_SEL BIT(9) +#define MVEBU_COMPHY_GEN1_S4(n) (0xc44 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S4_DFE_RES(n) ((n) << 8) +#define MVEBU_COMPHY_TX_PRESET(n) (0xc68 + (n) * 0x1000) +#define MVEBU_COMPHY_TX_PRESET_INDEX(n) ((n) << 0) +#define MVEBU_COMPHY_GEN1_S5(n) (0xd38 + (n) * 0x1000) +#define MVEBU_COMPHY_GEN1_S5_ICP(n) ((n) << 0) + +/* Relative to priv->regmap */ +#define MVEBU_COMPHY_CONF1(n) (0x1000 + (n) * 0x28) +#define MVEBU_COMPHY_CONF1_PWRUP BIT(1) +#define MVEBU_COMPHY_CONF1_USB_PCIE BIT(2) /* 0: Ethernet/SATA */ +#define MVEBU_COMPHY_CONF6(n) (0x1014 + (n) * 0x28) +#define MVEBU_COMPHY_CONF6_40B BIT(18) +#define MVEBU_COMPHY_SELECTOR 0x1140 +#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) + +#define MVEBU_COMPHY_LANES 6 +#define MVEBU_COMPHY_PORTS 3 + +struct mvebu_comhy_conf { + enum phy_mode mode; + unsigned lane; + unsigned port; + u32 mux; +}; + +#define MVEBU_COMPHY_CONF(_lane, _port, _mode, _mux) \ + { \ + .lane = _lane, \ + .port = _port, \ + .mode = _mode, \ + .mux = _mux, \ + } + +static const struct mvebu_comhy_conf mvebu_comphy_cp110_modes[] = { + /* lane 0 */ + MVEBU_COMPHY_CONF(0, 1, PHY_MODE_SGMII, 0x1), + /* lane 1 */ + MVEBU_COMPHY_CONF(1, 2, PHY_MODE_SGMII, 0x1), + /* lane 2 */ + MVEBU_COMPHY_CONF(2, 0, PHY_MODE_SGMII, 0x1), + MVEBU_COMPHY_CONF(2, 0, PHY_MODE_10GKR, 0x1), + /* lane 3 */ + MVEBU_COMPHY_CONF(3, 1, PHY_MODE_SGMII, 0x2), + /* lane 4 */ + MVEBU_COMPHY_CONF(4, 0, PHY_MODE_SGMII, 0x2), + MVEBU_COMPHY_CONF(4, 0, PHY_MODE_10GKR, 0x2), + MVEBU_COMPHY_CONF(4, 1, PHY_MODE_SGMII, 0x1), + /* lane 5 */ + MVEBU_COMPHY_CONF(5, 2, PHY_MODE_SGMII, 0x1), +}; + +struct mvebu_comphy_priv { + void __iomem *base; + struct regmap *regmap; + struct device *dev; + int modes[MVEBU_COMPHY_LANES]; +}; + +struct mvebu_comphy_lane { + struct mvebu_comphy_priv *priv; + unsigned id; + enum phy_mode mode; + int port; +}; + +static int mvebu_comphy_get_mux(int lane, int port, enum phy_mode mode) +{ + int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes); + + /* Unused PHY mux value is 0x0 */ + if (mode == PHY_MODE_INVALID) + return 0; + + for (i = 0; i < n; i++) { + if (mvebu_comphy_cp110_modes[i].lane == lane && + mvebu_comphy_cp110_modes[i].port == port && + mvebu_comphy_cp110_modes[i].mode == mode) + break; + } + + if (i == n) + return -EINVAL; + + return mvebu_comphy_cp110_modes[i].mux; +} + +static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane, + enum phy_mode mode) +{ + struct mvebu_comphy_priv *priv = lane->priv; + u32 val; + + regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val); + val &= ~MVEBU_COMPHY_CONF1_USB_PCIE; + val |= MVEBU_COMPHY_CONF1_PWRUP; + regmap_write(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), val); + + /* Select baud rates and PLLs */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id)); + val &= ~(MVEBU_COMPHY_SERDES_CFG0_PU_PLL | + MVEBU_COMPHY_SERDES_CFG0_PU_RX | + MVEBU_COMPHY_SERDES_CFG0_PU_TX | + MVEBU_COMPHY_SERDES_CFG0_HALF_BUS | + MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xf) | + MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf)); + if (mode == PHY_MODE_10GKR) + val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) | + MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe); + else if (mode == PHY_MODE_SGMII) + val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) | + MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) | + MVEBU_COMPHY_SERDES_CFG0_HALF_BUS; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id)); + + /* reset */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET | + MVEBU_COMPHY_SERDES_CFG1_CORE_RESET | + MVEBU_COMPHY_SERDES_CFG1_RF_RESET); + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + /* de-assert reset */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val |= MVEBU_COMPHY_SERDES_CFG1_RESET | + MVEBU_COMPHY_SERDES_CFG1_CORE_RESET; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + /* wait until clocks are ready */ + mdelay(1); + + /* exlicitly disable 40B, the bits isn't clear on reset */ + regmap_read(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), &val); + val &= ~MVEBU_COMPHY_CONF6_40B; + regmap_write(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), val); + + /* refclk selection */ + val = readl(priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id)); + val &= ~MVEBU_COMPHY_MISC_CTRL0_REFCLK_SEL; + if (mode == PHY_MODE_10GKR) + val |= MVEBU_COMPHY_MISC_CTRL0_ICP_FORCE; + writel(val, priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id)); + + /* power and pll selection */ + val = readl(priv->base + MVEBU_COMPHY_PWRPLL_CTRL(lane->id)); + val &= ~(MVEBU_COMPHY_PWRPLL_CTRL_RFREQ(0x1f) | + MVEBU_COMPHY_PWRPLL_PHY_MODE(0x7)); + val |= MVEBU_COMPHY_PWRPLL_CTRL_RFREQ(0x1) | + MVEBU_COMPHY_PWRPLL_PHY_MODE(0x4); + writel(val, priv->base + MVEBU_COMPHY_PWRPLL_CTRL(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_LOOPBACK(lane->id)); + val &= ~MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x7); + val |= MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x1); + writel(val, priv->base + MVEBU_COMPHY_LOOPBACK(lane->id)); +} + +static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane, + enum phy_mode mode) +{ + struct mvebu_comphy_priv *priv = lane->priv; + u32 val; + + /* SERDES external config */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id)); + val |= MVEBU_COMPHY_SERDES_CFG0_PU_PLL | + MVEBU_COMPHY_SERDES_CFG0_PU_RX | + MVEBU_COMPHY_SERDES_CFG0_PU_TX; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id)); + + /* check rx/tx pll */ + readl_poll_timeout(priv->base + MVEBU_COMPHY_SERDES_STATUS0(lane->id), + val, + val & (MVEBU_COMPHY_SERDES_STATUS0_RX_PLL_RDY | + MVEBU_COMPHY_SERDES_STATUS0_TX_PLL_RDY), + 1000, 150000); + if (!(val & (MVEBU_COMPHY_SERDES_STATUS0_RX_PLL_RDY | + MVEBU_COMPHY_SERDES_STATUS0_TX_PLL_RDY))) + return -ETIMEDOUT; + + /* rx init */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val |= MVEBU_COMPHY_SERDES_CFG1_RX_INIT; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + /* check rx */ + readl_poll_timeout(priv->base + MVEBU_COMPHY_SERDES_STATUS0(lane->id), + val, val & MVEBU_COMPHY_SERDES_STATUS0_RX_INIT, + 1000, 10000); + if (!(val & MVEBU_COMPHY_SERDES_STATUS0_RX_INIT)) + return -ETIMEDOUT; + + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val &= ~MVEBU_COMPHY_SERDES_CFG1_RX_INIT; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + return 0; +} + +static int mvebu_comphy_set_mode_sgmii(struct phy *phy) +{ + struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); + struct mvebu_comphy_priv *priv = lane->priv; + u32 val; + + mvebu_comphy_ethernet_init_reset(lane, PHY_MODE_SGMII); + + val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); + val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN; + val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL; + writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); + val &= ~MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN; + writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); + + regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val); + val &= ~MVEBU_COMPHY_CONF1_USB_PCIE; + val |= MVEBU_COMPHY_CONF1_PWRUP; + regmap_write(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), val); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S0(lane->id)); + val &= ~MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xf); + val |= MVEBU_COMPHY_GEN1_S0_TX_EMPH(0x1); + writel(val, priv->base + MVEBU_COMPHY_GEN1_S0(lane->id)); + + return mvebu_comphy_init_plls(lane, PHY_MODE_SGMII); +} + +static int mvebu_comphy_set_mode_10gkr(struct phy *phy) +{ + struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); + struct mvebu_comphy_priv *priv = lane->priv; + u32 val; + + mvebu_comphy_ethernet_init_reset(lane, PHY_MODE_10GKR); + + val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); + val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL | + MVEBU_COMPHY_RX_CTRL1_CLK8T_EN; + writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); + val |= MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN; + writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); + + /* Speed divider */ + val = readl(priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id)); + val |= MVEBU_COMPHY_SPEED_DIV_TX_FORCE; + writel(val, priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id)); + val |= MVEBU_COMPHY_SERDES_CFG2_DFE_EN; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id)); + + /* DFE resolution */ + val = readl(priv->base + MVEBU_COMPHY_DFE_RES(lane->id)); + val |= MVEBU_COMPHY_DFE_RES_FORCE_GEN_TBL; + writel(val, priv->base + MVEBU_COMPHY_DFE_RES(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S0(lane->id)); + val &= ~(MVEBU_COMPHY_GEN1_S0_TX_AMP(0x1f) | + MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xf)); + val |= MVEBU_COMPHY_GEN1_S0_TX_AMP(0x1c) | + MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xe); + writel(val, priv->base + MVEBU_COMPHY_GEN1_S0(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S2(lane->id)); + val &= ~MVEBU_COMPHY_GEN1_S2_TX_EMPH(0xf); + val |= MVEBU_COMPHY_GEN1_S2_TX_EMPH_EN; + writel(val, priv->base + MVEBU_COMPHY_GEN1_S2(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_TX_SLEW_RATE(lane->id)); + val |= MVEBU_COMPHY_TX_SLEW_RATE_EMPH(0x3) | + MVEBU_COMPHY_TX_SLEW_RATE_SLC(0x3f); + writel(val, priv->base + MVEBU_COMPHY_TX_SLEW_RATE(lane->id)); + + /* Impedance calibration */ + val = readl(priv->base + MVEBU_COMPHY_IMP_CAL(lane->id)); + val &= ~MVEBU_COMPHY_IMP_CAL_TX_EXT(0x1f); + val |= MVEBU_COMPHY_IMP_CAL_TX_EXT(0xe) | + MVEBU_COMPHY_IMP_CAL_TX_EXT_EN; + writel(val, priv->base + MVEBU_COMPHY_IMP_CAL(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S5(lane->id)); + val &= ~MVEBU_COMPHY_GEN1_S5_ICP(0xf); + writel(val, priv->base + MVEBU_COMPHY_GEN1_S5(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S1(lane->id)); + val &= ~(MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x7) | + MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x7) | + MVEBU_COMPHY_GEN1_S1_RX_MUL_FI(0x3) | + MVEBU_COMPHY_GEN1_S1_RX_MUL_FF(0x3)); + val |= MVEBU_COMPHY_GEN1_S1_RX_DFE_EN | + MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x2) | + MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x2) | + MVEBU_COMPHY_GEN1_S1_RX_MUL_FF(0x1) | + MVEBU_COMPHY_GEN1_S1_RX_DIV(0x3); + writel(val, priv->base + MVEBU_COMPHY_GEN1_S1(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_COEF(lane->id)); + val &= ~(MVEBU_COMPHY_COEF_DFE_EN | MVEBU_COMPHY_COEF_DFE_CTRL); + writel(val, priv->base + MVEBU_COMPHY_COEF(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S4(lane->id)); + val &= ~MVEBU_COMPHY_GEN1_S4_DFE_RES(0x3); + val |= MVEBU_COMPHY_GEN1_S4_DFE_RES(0x1); + writel(val, priv->base + MVEBU_COMPHY_GEN1_S4(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_GEN1_S3(lane->id)); + val |= MVEBU_COMPHY_GEN1_S3_FBCK_SEL; + writel(val, priv->base + MVEBU_COMPHY_GEN1_S3(lane->id)); + + /* rx training timer */ + val = readl(priv->base + MVEBU_COMPHY_TRAINING5(lane->id)); + val &= ~MVEBU_COMPHY_TRAINING5_RX_TIMER(0x3ff); + val |= MVEBU_COMPHY_TRAINING5_RX_TIMER(0x13); + writel(val, priv->base + MVEBU_COMPHY_TRAINING5(lane->id)); + + /* tx train peak to peak hold */ + val = readl(priv->base + MVEBU_COMPHY_TRAINING0(lane->id)); + val |= MVEBU_COMPHY_TRAINING0_P2P_HOLD; + writel(val, priv->base + MVEBU_COMPHY_TRAINING0(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_TX_PRESET(lane->id)); + val &= ~MVEBU_COMPHY_TX_PRESET_INDEX(0xf); + val |= MVEBU_COMPHY_TX_PRESET_INDEX(0x2); /* preset coeff */ + writel(val, priv->base + MVEBU_COMPHY_TX_PRESET(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_FRAME_DETECT3(lane->id)); + val &= ~MVEBU_COMPHY_FRAME_DETECT3_LOST_TIMEOUT_EN; + writel(val, priv->base + MVEBU_COMPHY_FRAME_DETECT3(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_TX_TRAIN_PRESET(lane->id)); + val |= MVEBU_COMPHY_TX_TRAIN_PRESET_16B_AUTO_EN | + MVEBU_COMPHY_TX_TRAIN_PRESET_PRBS11; + writel(val, priv->base + MVEBU_COMPHY_TX_TRAIN_PRESET(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_FRAME_DETECT0(lane->id)); + val &= ~MVEBU_COMPHY_FRAME_DETECT0_PATN(0x1ff); + val |= MVEBU_COMPHY_FRAME_DETECT0_PATN(0x88); + writel(val, priv->base + MVEBU_COMPHY_FRAME_DETECT0(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_DME(lane->id)); + val |= MVEBU_COMPHY_DME_ETH_MODE; + writel(val, priv->base + MVEBU_COMPHY_DME(lane->id)); + + val = readl(priv->base + MVEBU_COMPHY_VDD_CAL0(lane->id)); + val |= MVEBU_COMPHY_VDD_CAL0_CONT_MODE; + writel(val, priv->base + MVEBU_COMPHY_VDD_CAL0(lane->id)); + + val = readl(priv->base + MVEBU_SP_CALIB(lane->id)); + val &= ~MVEBU_SP_CALIB_SAMPLER(0x3); + val |= MVEBU_SP_CALIB_SAMPLER(0x3) | + MVEBU_SP_CALIB_SAMPLER_EN; + writel(val, priv->base + MVEBU_SP_CALIB(lane->id)); + val &= ~MVEBU_SP_CALIB_SAMPLER_EN; + writel(val, priv->base + MVEBU_SP_CALIB(lane->id)); + + /* External rx regulator */ + val = readl(priv->base + MVEBU_COMPHY_EXT_SELV(lane->id)); + val &= ~MVEBU_COMPHY_EXT_SELV_RX_SAMPL(0x1f); + val |= MVEBU_COMPHY_EXT_SELV_RX_SAMPL(0x1a); + writel(val, priv->base + MVEBU_COMPHY_EXT_SELV(lane->id)); + + return mvebu_comphy_init_plls(lane, PHY_MODE_10GKR); +} + +static int mvebu_comphy_power_on(struct phy *phy) +{ + struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); + struct mvebu_comphy_priv *priv = lane->priv; + int ret; + u32 mux, val; + + mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode); + if (mux < 0) + return -ENOTSUPP; + + regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val); + val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); + val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id); + regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val); + + switch (lane->mode) { + case PHY_MODE_SGMII: + ret = mvebu_comphy_set_mode_sgmii(phy); + break; + case PHY_MODE_10GKR: + ret = mvebu_comphy_set_mode_10gkr(phy); + break; + default: + return -ENOTSUPP; + } + + /* digital reset */ + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val |= MVEBU_COMPHY_SERDES_CFG1_RF_RESET; + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + return ret; +} + +static int mvebu_comphy_set_mode(struct phy *phy, enum phy_mode mode) +{ + struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); + + if (mvebu_comphy_get_mux(lane->id, lane->port, mode) < 0) + return -EINVAL; + + lane->mode = mode; + return 0; +} + +static int mvebu_comphy_power_off(struct phy *phy) +{ + struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); + struct mvebu_comphy_priv *priv = lane->priv; + u32 val; + + val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET | + MVEBU_COMPHY_SERDES_CFG1_CORE_RESET | + MVEBU_COMPHY_SERDES_CFG1_RF_RESET); + writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); + + regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val); + val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); + regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val); + + return 0; +} + +static const struct phy_ops mvebu_comphy_ops = { + .power_on = mvebu_comphy_power_on, + .power_off = mvebu_comphy_power_off, + .set_mode = mvebu_comphy_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *mvebu_comphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct mvebu_comphy_lane *lane; + struct phy *phy; + + if (WARN_ON(args->args[0] >= MVEBU_COMPHY_PORTS)) + return ERR_PTR(-EINVAL); + + phy = of_phy_simple_xlate(dev, args); + if (IS_ERR(phy)) + return phy; + + lane = phy_get_drvdata(phy); + if (lane->port >= 0) + return ERR_PTR(-EBUSY); + lane->port = args->args[0]; + + return phy; +} + +static int mvebu_comphy_probe(struct platform_device *pdev) +{ + struct mvebu_comphy_priv *priv; + struct phy_provider *provider; + struct device_node *child; + struct resource *res; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + priv->regmap = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (!priv->base) + return -ENOMEM; + + for_each_available_child_of_node(pdev->dev.of_node, child) { + struct mvebu_comphy_lane *lane; + struct phy *phy; + int ret; + u32 val; + + ret = of_property_read_u32(child, "reg", &val); + if (ret < 0) { + dev_err(&pdev->dev, "missing 'reg' property (%d)\n", + ret); + continue; + } + + if (val >= MVEBU_COMPHY_LANES) { + dev_err(&pdev->dev, "invalid 'reg' property\n"); + continue; + } + + lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL); + if (!lane) + return -ENOMEM; + + phy = devm_phy_create(&pdev->dev, child, &mvebu_comphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + lane->priv = priv; + lane->mode = PHY_MODE_INVALID; + lane->id = val; + lane->port = -1; + phy_set_drvdata(phy, lane); + + /* + * Once all modes are supported in this driver we should call + * mvebu_comphy_power_off(phy) here to avoid relying on the + * bootloader/firmware configuration. + */ + } + + dev_set_drvdata(&pdev->dev, priv); + provider = devm_of_phy_provider_register(&pdev->dev, + mvebu_comphy_xlate); + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id mvebu_comphy_of_match_table[] = { + { .compatible = "marvell,comphy-cp110" }, + { }, +}; +MODULE_DEVICE_TABLE(of, mvebu_comphy_of_match_table); + +static struct platform_driver mvebu_comphy_driver = { + .probe = mvebu_comphy_probe, + .driver = { + .name = "mvebu-comphy", + .of_match_table = mvebu_comphy_of_match_table, + }, +}; +module_platform_driver(mvebu_comphy_driver); + +MODULE_AUTHOR("Antoine Tenart "); +MODULE_DESCRIPTION("Common PHY driver for mvebu SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c index 6904633cad68..7cbdde029c0a 100644 --- a/drivers/phy/rockchip/phy-rockchip-pcie.c +++ b/drivers/phy/rockchip/phy-rockchip-pcie.c @@ -73,10 +73,38 @@ struct rockchip_pcie_data { struct rockchip_pcie_phy { struct rockchip_pcie_data *phy_data; struct regmap *reg_base; + struct phy_pcie_instance { + struct phy *phy; + u32 index; + } phys[PHY_MAX_LANE_NUM]; + struct mutex pcie_mutex; struct reset_control *phy_rst; struct clk *clk_pciephy_ref; + int pwr_cnt; + int init_cnt; }; +static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst) +{ + return container_of(inst, struct rockchip_pcie_phy, + phys[inst->index]); +} + +static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev); + + if (args->args_count == 0) + return rk_phy->phys[0].phy; + + if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM)) + return ERR_PTR(-ENODEV); + + return rk_phy->phys[args->args[0]].phy; +} + + static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy, u32 addr, u32 data) { @@ -116,29 +144,59 @@ static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy, static int rockchip_pcie_phy_power_off(struct phy *phy) { - struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); + struct phy_pcie_instance *inst = phy_get_drvdata(phy); + struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); int err = 0; + mutex_lock(&rk_phy->pcie_mutex); + + regmap_write(rk_phy->reg_base, + rk_phy->phy_data->pcie_laneoff, + HIWORD_UPDATE(PHY_LANE_IDLE_OFF, + PHY_LANE_IDLE_MASK, + PHY_LANE_IDLE_A_SHIFT + inst->index)); + + if (--rk_phy->pwr_cnt) + goto err_out; + err = reset_control_assert(rk_phy->phy_rst); if (err) { dev_err(&phy->dev, "assert phy_rst err %d\n", err); - return err; + goto err_restore; } +err_out: + mutex_unlock(&rk_phy->pcie_mutex); return 0; + +err_restore: + rk_phy->pwr_cnt++; + regmap_write(rk_phy->reg_base, + rk_phy->phy_data->pcie_laneoff, + HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, + PHY_LANE_IDLE_MASK, + PHY_LANE_IDLE_A_SHIFT + inst->index)); + mutex_unlock(&rk_phy->pcie_mutex); + return err; } static int rockchip_pcie_phy_power_on(struct phy *phy) { - struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); + struct phy_pcie_instance *inst = phy_get_drvdata(phy); + struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); int err = 0; u32 status; unsigned long timeout; + mutex_lock(&rk_phy->pcie_mutex); + + if (rk_phy->pwr_cnt++) + goto err_out; + err = reset_control_deassert(rk_phy->phy_rst); if (err) { dev_err(&phy->dev, "deassert phy_rst err %d\n", err); - return err; + goto err_pwr_cnt; } regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, @@ -146,6 +204,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) PHY_CFG_ADDR_MASK, PHY_CFG_ADDR_SHIFT)); + regmap_write(rk_phy->reg_base, + rk_phy->phy_data->pcie_laneoff, + HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, + PHY_LANE_IDLE_MASK, + PHY_LANE_IDLE_A_SHIFT + inst->index)); + /* * No documented timeout value for phy operation below, * so we make it large enough here. And we use loop-break @@ -214,18 +278,29 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) goto err_pll_lock; } +err_out: + mutex_unlock(&rk_phy->pcie_mutex); return 0; err_pll_lock: reset_control_assert(rk_phy->phy_rst); +err_pwr_cnt: + rk_phy->pwr_cnt--; + mutex_unlock(&rk_phy->pcie_mutex); return err; } static int rockchip_pcie_phy_init(struct phy *phy) { - struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); + struct phy_pcie_instance *inst = phy_get_drvdata(phy); + struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); int err = 0; + mutex_lock(&rk_phy->pcie_mutex); + + if (rk_phy->init_cnt++) + goto err_out; + err = clk_prepare_enable(rk_phy->clk_pciephy_ref); if (err) { dev_err(&phy->dev, "Fail to enable pcie ref clock.\n"); @@ -238,20 +313,33 @@ static int rockchip_pcie_phy_init(struct phy *phy) goto err_reset; } - return err; +err_out: + mutex_unlock(&rk_phy->pcie_mutex); + return 0; err_reset: + clk_disable_unprepare(rk_phy->clk_pciephy_ref); err_refclk: + rk_phy->init_cnt--; + mutex_unlock(&rk_phy->pcie_mutex); return err; } static int rockchip_pcie_phy_exit(struct phy *phy) { - struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); + struct phy_pcie_instance *inst = phy_get_drvdata(phy); + struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); + + mutex_lock(&rk_phy->pcie_mutex); + + if (--rk_phy->init_cnt) + goto err_init_cnt; clk_disable_unprepare(rk_phy->clk_pciephy_ref); +err_init_cnt: + mutex_unlock(&rk_phy->pcie_mutex); return 0; } @@ -283,10 +371,11 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie_phy *rk_phy; - struct phy *generic_phy; struct phy_provider *phy_provider; struct regmap *grf; const struct of_device_id *of_id; + int i; + u32 phy_num; grf = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(grf)) { @@ -305,6 +394,8 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data; rk_phy->reg_base = grf; + mutex_init(&rk_phy->pcie_mutex); + rk_phy->phy_rst = devm_reset_control_get(dev, "phy"); if (IS_ERR(rk_phy->phy_rst)) { if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER) @@ -319,14 +410,26 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) return PTR_ERR(rk_phy->clk_pciephy_ref); } - generic_phy = devm_phy_create(dev, dev->of_node, &ops); - if (IS_ERR(generic_phy)) { - dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(generic_phy); + /* parse #phy-cells to see if it's legacy PHY model */ + if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num)) + return -ENOENT; + + phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM; + dev_dbg(dev, "phy number is %d\n", phy_num); + + for (i = 0; i < phy_num; i++) { + rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops); + if (IS_ERR(rk_phy->phys[i].phy)) { + dev_err(dev, "failed to create PHY%d\n", i); + return PTR_ERR(rk_phy->phys[i].phy); + } + rk_phy->phys[i].index = i; + phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]); } - phy_set_drvdata(generic_phy, rk_phy); - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + platform_set_drvdata(pdev, rk_phy); + phy_provider = devm_of_phy_provider_register(dev, + rockchip_pcie_phy_of_xlate); return PTR_ERR_OR_ZERO(phy_provider); } diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 0e9013868188..a44680d64f9b 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 34ad10873452..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -100,6 +100,7 @@ config PINCTRL_AMD tristate "AMD GPIO pin control" depends on GPIOLIB select GPIOLIB_IRQCHIP + select PINMUX select PINCONF select GENERIC_PINCONF help @@ -338,6 +339,15 @@ config PINCTRL_INGENIC select GENERIC_PINMUX_FUNCTIONS select REGMAP_MMIO +config PINCTRL_RK805 + tristate "Pinctrl and GPIO driver for RK805 PMIC" + depends on MFD_RK808 + select GPIOLIB + select PINMUX + select GENERIC_PINCONF + help + This selects the pinctrl driver for RK805. + source "drivers/pinctrl/aspeed/Kconfig" source "drivers/pinctrl/bcm/Kconfig" source "drivers/pinctrl/berlin/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 4c44703ac97f..c16e27900dbb 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o +obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o obj-$(CONFIG_ARCH_ASPEED) += aspeed/ obj-y += bcm/ diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, unsigned long events; unsigned offset; unsigned gpio; - unsigned int type; events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); events &= mask; events &= pc->enabled_irq_map[bank]; for_each_set_bit(offset, &events, 32) { gpio = (32 * bank) + offset; - /* FIXME: no clue why the code looks up the type here */ - type = pc->irq_type[gpio]; - generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, gpio)); } diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) struct gpio_chip *chip = &pctrl->chip; bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); int ret, i, offset; + int irq_base; *chip = chv_gpio_chip; @@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); - ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, + if (!need_valid_mask) { + irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, + chip->ngpio, NUMA_NO_NODE); + if (irq_base < 0) { + dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); + return irq_base; + } + } else { + irq_base = 0; + } + + ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add IRQ chip\n"); diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index b8b6ab072cd0..71b944748304 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -550,9 +550,9 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); if (on) - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); else - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); writel(val, info->base + reg); spin_unlock_irqrestore(&info->irq_lock, flags); @@ -571,10 +571,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) val = readl(info->base + reg); switch (type) { case IRQ_TYPE_EDGE_RISING: - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); break; case IRQ_TYPE_EDGE_FALLING: - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); break; default: spin_unlock_irqrestore(&info->irq_lock, flags); @@ -624,11 +624,27 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static unsigned int armada_37xx_irq_startup(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + int irq = d->hwirq - chip->irq_base; + /* + * The mask field is a "precomputed bitmask for accessing the + * chip registers" which was introduced for the generic + * irqchip framework. As we don't use this framework, we can + * reuse this field for our own usage. + */ + d->mask = BIT(irq % GPIO_PER_REG); + + armada_37xx_irq_unmask(d); + + return 0; +} + static int armada_37xx_irqchip_register(struct platform_device *pdev, struct armada_37xx_pinctrl *info) { struct device_node *np = info->dev->of_node; - int nrirqs = info->data->nr_pins; struct gpio_chip *gc = &info->gpio_chip; struct irq_chip *irqchip = &info->irq_chip; struct resource res; @@ -666,8 +682,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, irqchip->irq_unmask = armada_37xx_irq_unmask; irqchip->irq_set_wake = armada_37xx_irq_set_wake; irqchip->irq_set_type = armada_37xx_irq_set_type; + irqchip->irq_startup = armada_37xx_irq_startup; irqchip->name = info->data->name; - ret = gpiochip_irqchip_add(gc, irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { @@ -680,19 +696,6 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, * controller. But we do not take advantage of this and use * the chained irq with all of them. */ - for (i = 0; i < nrirqs; i++) { - struct irq_data *d = irq_get_irq_data(gc->irq_base + i); - - /* - * The mask field is a "precomputed bitmask for - * accessing the chip registers" which was introduced - * for the generic irqchip framework. As we don't use - * this framework, we can reuse this field for our own - * usage. - */ - d->mask = BIT(i % GPIO_PER_REG); - } - for (i = 0; i < nr_irq_parent; i++) { int irq = irq_of_parse_and_map(np, i); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 38af1ec2df0c..3f6b34febbf1 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -36,6 +36,7 @@ #include #include +#include "core.h" #include "pinctrl-utils.h" #include "pinctrl-amd.h" @@ -725,6 +726,69 @@ static const struct pinconf_ops amd_pinconf_ops = { .pin_config_group_set = amd_pinconf_group_set, }; +#ifdef CONFIG_PM_SLEEP +static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) +{ + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); + + if (!pd) + return false; + + /* + * Only restore the pin if it is actually in use by the kernel (or + * by userspace). + */ + if (pd->mux_owner || pd->gpio_owner || + gpiochip_line_is_irq(&gpio_dev->gc, pin)) + return true; + + return false; +} + +int amd_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4); + } + + return 0; +} + +int amd_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4); + } + + return 0; +} + +static const struct dev_pm_ops amd_gpio_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend, + amd_gpio_resume) +}; +#endif + static struct pinctrl_desc amd_pinctrl_desc = { .pins = kerncz_pins, .npins = ARRAY_SIZE(kerncz_pins), @@ -764,6 +828,14 @@ static int amd_gpio_probe(struct platform_device *pdev) return irq_base; } +#ifdef CONFIG_PM_SLEEP + gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins, + sizeof(*gpio_dev->saved_regs), + GFP_KERNEL); + if (!gpio_dev->saved_regs) + return -ENOMEM; +#endif + gpio_dev->pdev = pdev; gpio_dev->gc.direction_input = amd_gpio_direction_input; gpio_dev->gc.direction_output = amd_gpio_direction_output; @@ -853,6 +925,9 @@ static struct platform_driver amd_gpio_driver = { .driver = { .name = "amd_gpio", .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match), +#ifdef CONFIG_PM_SLEEP + .pm = &amd_gpio_pm_ops, +#endif }, .probe = amd_gpio_probe, .remove = amd_gpio_remove, diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 5b1cb965c767..8fa453a59da5 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -97,6 +97,7 @@ struct amd_gpio { unsigned int hwbank_num; struct resource *res; struct platform_device *pdev; + u32 *saved_regs; }; /* KERNCZ configuration*/ diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c new file mode 100644 index 000000000000..b0bfd3082a1b --- /dev/null +++ b/drivers/pinctrl/pinctrl-rk805.c @@ -0,0 +1,493 @@ +/* + * Pinctrl driver for Rockchip RK805 PMIC + * + * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Joseph Chen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Based on the pinctrl-as3722 driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "pinconf.h" +#include "pinctrl-utils.h" + +struct rk805_pin_function { + const char *name; + const char *const *groups; + unsigned int ngroups; + int mux_option; +}; + +struct rk805_pin_group { + const char *name; + const unsigned int pins[1]; + unsigned int npins; +}; + +/* + * @reg: gpio setting register; + * @fun_mask: functions select mask value, when set is gpio; + * @dir_mask: input or output mask value, when set is output, otherwise input; + * @val_mask: gpio set value, when set is level high, otherwise low; + * + * Different PMIC has different pin features, belowing 3 mask members are not + * all necessary for every PMIC. For example, RK805 has 2 pins that can be used + * as output only GPIOs, so func_mask and dir_mask are not needed. RK816 has 1 + * pin that can be used as TS/GPIO, so fun_mask, dir_mask and val_mask are all + * necessary. + */ +struct rk805_pin_config { + u8 reg; + u8 fun_msk; + u8 dir_msk; + u8 val_msk; +}; + +struct rk805_pctrl_info { + struct rk808 *rk808; + struct device *dev; + struct pinctrl_dev *pctl; + struct gpio_chip gpio_chip; + struct pinctrl_desc pinctrl_desc; + const struct rk805_pin_function *functions; + unsigned int num_functions; + const struct rk805_pin_group *groups; + int num_pin_groups; + const struct pinctrl_pin_desc *pins; + unsigned int num_pins; + struct rk805_pin_config *pin_cfg; +}; + +enum rk805_pinmux_option { + RK805_PINMUX_GPIO, +}; + +enum { + RK805_GPIO0, + RK805_GPIO1, +}; + +static const char *const rk805_gpio_groups[] = { + "gpio0", + "gpio1", +}; + +/* RK805: 2 output only GPIOs */ +static const struct pinctrl_pin_desc rk805_pins_desc[] = { + PINCTRL_PIN(RK805_GPIO0, "gpio0"), + PINCTRL_PIN(RK805_GPIO1, "gpio1"), +}; + +static const struct rk805_pin_function rk805_pin_functions[] = { + { + .name = "gpio", + .groups = rk805_gpio_groups, + .ngroups = ARRAY_SIZE(rk805_gpio_groups), + .mux_option = RK805_PINMUX_GPIO, + }, +}; + +static const struct rk805_pin_group rk805_pin_groups[] = { + { + .name = "gpio0", + .pins = { RK805_GPIO0 }, + .npins = 1, + }, + { + .name = "gpio1", + .pins = { RK805_GPIO1 }, + .npins = 1, + }, +}; + +#define RK805_GPIO0_VAL_MSK BIT(0) +#define RK805_GPIO1_VAL_MSK BIT(1) + +static struct rk805_pin_config rk805_gpio_cfgs[] = { + { + .reg = RK805_OUT_REG, + .val_msk = RK805_GPIO0_VAL_MSK, + }, + { + .reg = RK805_OUT_REG, + .val_msk = RK805_GPIO1_VAL_MSK, + }, +}; + +/* generic gpio chip */ +static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct rk805_pctrl_info *pci = gpiochip_get_data(chip); + int ret, val; + + ret = regmap_read(pci->rk808->regmap, pci->pin_cfg[offset].reg, &val); + if (ret) { + dev_err(pci->dev, "get gpio%d value failed\n", offset); + return ret; + } + + return !!(val & pci->pin_cfg[offset].val_msk); +} + +static void rk805_gpio_set(struct gpio_chip *chip, + unsigned int offset, + int value) +{ + struct rk805_pctrl_info *pci = gpiochip_get_data(chip); + int ret; + + ret = regmap_update_bits(pci->rk808->regmap, + pci->pin_cfg[offset].reg, + pci->pin_cfg[offset].val_msk, + value ? pci->pin_cfg[offset].val_msk : 0); + if (ret) + dev_err(pci->dev, "set gpio%d value %d failed\n", + offset, value); +} + +static int rk805_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int rk805_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + rk805_gpio_set(chip, offset, value); + return pinctrl_gpio_direction_output(chip->base + offset); +} + +static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct rk805_pctrl_info *pci = gpiochip_get_data(chip); + unsigned int val; + int ret; + + /* default output*/ + if (!pci->pin_cfg[offset].dir_msk) + return 0; + + ret = regmap_read(pci->rk808->regmap, + pci->pin_cfg[offset].reg, + &val); + if (ret) { + dev_err(pci->dev, "get gpio%d direction failed\n", offset); + return ret; + } + + return !(val & pci->pin_cfg[offset].dir_msk); +} + +static struct gpio_chip rk805_gpio_chip = { + .label = "rk805-gpio", + .request = gpiochip_generic_request, + .free = gpiochip_generic_free, + .get_direction = rk805_gpio_get_direction, + .get = rk805_gpio_get, + .set = rk805_gpio_set, + .direction_input = rk805_gpio_direction_input, + .direction_output = rk805_gpio_direction_output, + .can_sleep = true, + .base = -1, + .owner = THIS_MODULE, +}; + +/* generic pinctrl */ +static int rk805_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + return pci->num_pin_groups; +} + +static const char *rk805_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + return pci->groups[group].name; +} + +static int rk805_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + *pins = pci->groups[group].pins; + *num_pins = pci->groups[group].npins; + + return 0; +} + +static const struct pinctrl_ops rk805_pinctrl_ops = { + .get_groups_count = rk805_pinctrl_get_groups_count, + .get_group_name = rk805_pinctrl_get_group_name, + .get_group_pins = rk805_pinctrl_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int rk805_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + return pci->num_functions; +} + +static const char *rk805_pinctrl_get_func_name(struct pinctrl_dev *pctldev, + unsigned int function) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + return pci->functions[function].name; +} + +static int rk805_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, + unsigned int function, + const char *const **groups, + unsigned int *const num_groups) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + + *groups = pci->functions[function].groups; + *num_groups = pci->functions[function].ngroups; + + return 0; +} + +static int _rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned int offset, + int mux) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + int ret; + + if (!pci->pin_cfg[offset].fun_msk) + return 0; + + if (mux == RK805_PINMUX_GPIO) { + ret = regmap_update_bits(pci->rk808->regmap, + pci->pin_cfg[offset].reg, + pci->pin_cfg[offset].fun_msk, + pci->pin_cfg[offset].fun_msk); + if (ret) { + dev_err(pci->dev, "set gpio%d GPIO failed\n", offset); + return ret; + } + } else { + dev_err(pci->dev, "Couldn't find function mux %d\n", mux); + return -EINVAL; + } + + return 0; +} + +static int rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned int function, + unsigned int group) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + int mux = pci->functions[function].mux_option; + int offset = group; + + return _rk805_pinctrl_set_mux(pctldev, offset, mux); +} + +static int rk805_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset, bool input) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + int ret; + + /* switch to gpio function */ + ret = _rk805_pinctrl_set_mux(pctldev, offset, RK805_PINMUX_GPIO); + if (ret) { + dev_err(pci->dev, "set gpio%d mux failed\n", offset); + return ret; + } + + /* set direction */ + if (!pci->pin_cfg[offset].dir_msk) + return 0; + + ret = regmap_update_bits(pci->rk808->regmap, + pci->pin_cfg[offset].reg, + pci->pin_cfg[offset].dir_msk, + input ? 0 : pci->pin_cfg[offset].dir_msk); + if (ret) { + dev_err(pci->dev, "set gpio%d direction failed\n", offset); + return ret; + } + + return ret; +} + +static const struct pinmux_ops rk805_pinmux_ops = { + .get_functions_count = rk805_pinctrl_get_funcs_count, + .get_function_name = rk805_pinctrl_get_func_name, + .get_function_groups = rk805_pinctrl_get_func_groups, + .set_mux = rk805_pinctrl_set_mux, + .gpio_set_direction = rk805_pmx_gpio_set_direction, +}; + +static int rk805_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 arg = 0; + + switch (param) { + case PIN_CONFIG_OUTPUT: + arg = rk805_gpio_get(&pci->gpio_chip, pin); + break; + default: + dev_err(pci->dev, "Properties not supported\n"); + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, (u16)arg); + + return 0; +} + +static int rk805_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param; + u32 i, arg = 0; + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_OUTPUT: + rk805_gpio_set(&pci->gpio_chip, pin, arg); + rk805_pmx_gpio_set_direction(pctldev, NULL, pin, false); + break; + default: + dev_err(pci->dev, "Properties not supported\n"); + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops rk805_pinconf_ops = { + .pin_config_get = rk805_pinconf_get, + .pin_config_set = rk805_pinconf_set, +}; + +static struct pinctrl_desc rk805_pinctrl_desc = { + .name = "rk805-pinctrl", + .pctlops = &rk805_pinctrl_ops, + .pmxops = &rk805_pinmux_ops, + .confops = &rk805_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int rk805_pinctrl_probe(struct platform_device *pdev) +{ + struct rk805_pctrl_info *pci; + int ret; + + pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = &pdev->dev; + pci->dev->of_node = pdev->dev.parent->of_node; + pci->rk808 = dev_get_drvdata(pdev->dev.parent); + + pci->pinctrl_desc = rk805_pinctrl_desc; + pci->gpio_chip = rk805_gpio_chip; + pci->gpio_chip.parent = &pdev->dev; + pci->gpio_chip.of_node = pdev->dev.parent->of_node; + + platform_set_drvdata(pdev, pci); + + switch (pci->rk808->variant) { + case RK805_ID: + pci->pins = rk805_pins_desc; + pci->num_pins = ARRAY_SIZE(rk805_pins_desc); + pci->functions = rk805_pin_functions; + pci->num_functions = ARRAY_SIZE(rk805_pin_functions); + pci->groups = rk805_pin_groups; + pci->num_pin_groups = ARRAY_SIZE(rk805_pin_groups); + pci->pinctrl_desc.pins = rk805_pins_desc; + pci->pinctrl_desc.npins = ARRAY_SIZE(rk805_pins_desc); + pci->pin_cfg = rk805_gpio_cfgs; + pci->gpio_chip.ngpio = ARRAY_SIZE(rk805_gpio_cfgs); + break; + default: + dev_err(&pdev->dev, "unsupported RK805 ID %lu\n", + pci->rk808->variant); + return -EINVAL; + } + + /* Add gpio chip */ + ret = devm_gpiochip_add_data(&pdev->dev, &pci->gpio_chip, pci); + if (ret < 0) { + dev_err(&pdev->dev, "Couldn't add gpiochip\n"); + return ret; + } + + /* Add pinctrl */ + pci->pctl = devm_pinctrl_register(&pdev->dev, &pci->pinctrl_desc, pci); + if (IS_ERR(pci->pctl)) { + dev_err(&pdev->dev, "Couldn't add pinctrl\n"); + return PTR_ERR(pci->pctl); + } + + /* Add pin range */ + ret = gpiochip_add_pin_range(&pci->gpio_chip, dev_name(&pdev->dev), + 0, 0, pci->gpio_chip.ngpio); + if (ret < 0) { + dev_err(&pdev->dev, "Couldn't add gpiochip pin range\n"); + return ret; + } + + return 0; +} + +static struct platform_driver rk805_pinctrl_driver = { + .probe = rk805_pinctrl_probe, + .driver = { + .name = "rk805-pinctrl", + }, +}; +module_platform_driver(rk805_pinctrl_driver); + +MODULE_DESCRIPTION("RK805 pin control and GPIO driver"); +MODULE_AUTHOR("Joseph Chen "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/sprd/Kconfig b/drivers/pinctrl/sprd/Kconfig index 6f4a7f9ac6fd..bc7f3fab22f1 100644 --- a/drivers/pinctrl/sprd/Kconfig +++ b/drivers/pinctrl/sprd/Kconfig @@ -4,6 +4,8 @@ config PINCTRL_SPRD bool "Spreadtrum pinctrl driver" + depends on OF + depends on ARCH_SPRD || COMPILE_TEST select PINMUX select PINCONF select GENERIC_PINCONF @@ -13,5 +15,6 @@ config PINCTRL_SPRD config PINCTRL_SPRD_SC9860 bool "Spreadtrum SC9860 pinctrl driver" + depends on PINCTRL_SPRD help Say Y here to enable Spreadtrum SC9860 pinctrl driver diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c index 7e7b9ac7e836..63529911445c 100644 --- a/drivers/pinctrl/sprd/pinctrl-sprd.c +++ b/drivers/pinctrl/sprd/pinctrl-sprd.c @@ -353,13 +353,13 @@ static const struct pinctrl_ops sprd_pctrl_ops = { .dt_free_map = pinctrl_utils_free_map, }; -int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) +static int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) { return PIN_FUNC_MAX; } -const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, - unsigned int selector) +static const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, + unsigned int selector) { switch (selector) { case PIN_FUNC_1: @@ -375,10 +375,10 @@ const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, } } -int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, - unsigned int selector, - const char * const **groups, - unsigned int * const num_groups) +static int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int * const num_groups) { struct sprd_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sprd_pinctrl_soc_info *info = pctl->info; @@ -400,7 +400,7 @@ static int sprd_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned long reg; unsigned int val = 0; - if (group_selector > info->ngroups) + if (group_selector >= info->ngroups) return -EINVAL; switch (func_selector) { @@ -734,7 +734,7 @@ static int sprd_pinconf_group_get(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; unsigned int pin_id; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -753,7 +753,7 @@ static int sprd_pinconf_group_set(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; int ret, i; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -813,7 +813,7 @@ static void sprd_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, const char *name; int i, ret; - if (selector > info->ngroups) + if (selector >= info->ngroups) return; grp = &info->groups[selector]; @@ -1100,12 +1100,16 @@ int sprd_pinctrl_remove(struct platform_device *pdev) void sprd_pinctrl_shutdown(struct platform_device *pdev) { - struct pinctrl *pinctl = devm_pinctrl_get(&pdev->dev); + struct pinctrl *pinctl; struct pinctrl_state *state; + pinctl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(pinctl)) + return; state = pinctrl_lookup_state(pinctl, "shutdown"); - if (!IS_ERR(state)) - pinctrl_select_state(pinctl, state); + if (IS_ERR(state)) + return; + pinctrl_select_state(pinctl, state); } MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h index c075ecb8e5db..0a3d2ac27503 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h @@ -17,7 +17,7 @@ #define __PINCTRL_UNIPHIER_H__ #include -#include +#include #include #include diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index e8a44a9bc916..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -518,7 +518,7 @@ static struct chromeos_laptop cr48 = { .callback = chromeos_laptop_dmi_matched, \ .driver_data = (void *)&board_ -static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = { { .ident = "Samsung Series 5 550", .matches = { diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 308a853ac4f1..b0693fdec8c6 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -14,7 +14,7 @@ #include #include -static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 2b6436d1b6a4..1baf720faf69 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -329,7 +329,7 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); -static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { +static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 0831b428c217..4eb8e1a472b2 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -255,12 +255,13 @@ static int parse_rgb(const char *buf, struct platform_zone *zone) static struct platform_zone *match_zone(struct device_attribute *attr) { - int i; - for (i = 0; i < quirks->num_zones; i++) { - if ((struct device_attribute *)zone_data[i].attr == attr) { + u8 zone; + + for (zone = 0; zone < quirks->num_zones; zone++) { + if ((struct device_attribute *)zone_data[zone].attr == attr) { pr_debug("alienware-wmi: matched zone location: %d\n", - zone_data[i].location); - return &zone_data[i]; + zone_data[zone].location); + return &zone_data[zone]; } } return NULL; @@ -420,7 +421,7 @@ static DEVICE_ATTR(lighting_control_state, 0644, show_control_state, static int alienware_zone_init(struct platform_device *dev) { - int i; + u8 zone; char buffer[10]; char *name; @@ -457,19 +458,19 @@ static int alienware_zone_init(struct platform_device *dev) if (!zone_data) return -ENOMEM; - for (i = 0; i < quirks->num_zones; i++) { - sprintf(buffer, "zone%02X", i); + for (zone = 0; zone < quirks->num_zones; zone++) { + sprintf(buffer, "zone%02hhX", zone); name = kstrdup(buffer, GFP_KERNEL); if (name == NULL) return 1; - sysfs_attr_init(&zone_dev_attrs[i].attr); - zone_dev_attrs[i].attr.name = name; - zone_dev_attrs[i].attr.mode = 0644; - zone_dev_attrs[i].show = zone_show; - zone_dev_attrs[i].store = zone_set; - zone_data[i].location = i; - zone_attrs[i] = &zone_dev_attrs[i].attr; - zone_data[i].attr = &zone_dev_attrs[i]; + sysfs_attr_init(&zone_dev_attrs[zone].attr); + zone_dev_attrs[zone].attr.name = name; + zone_dev_attrs[zone].attr.mode = 0644; + zone_dev_attrs[zone].show = zone_show; + zone_dev_attrs[zone].store = zone_set; + zone_data[zone].location = zone; + zone_attrs[zone] = &zone_dev_attrs[zone].attr; + zone_data[zone].attr = &zone_dev_attrs[zone]; } zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr; zone_attribute_group.attrs = zone_attrs; @@ -481,12 +482,13 @@ static int alienware_zone_init(struct platform_device *dev) static void alienware_zone_exit(struct platform_device *dev) { + u8 zone; + sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group); led_classdev_unregister(&global_led); if (zone_dev_attrs) { - int i; - for (i = 0; i < quirks->num_zones; i++) - kfree(zone_dev_attrs[i].attr.name); + for (zone = 0; zone < quirks->num_zones; zone++) + kfree(zone_dev_attrs[zone].attr.name); } kfree(zone_dev_attrs); kfree(zone_data); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 709e3a67391a..48e1541dc8d4 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -299,7 +299,7 @@ static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, union acpi_object *obj; u32 tmp = 0; - status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id, + status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id, &input, &output); if (ACPI_FAILURE(status)) @@ -1946,7 +1946,7 @@ static int show_call(struct seq_file *m, void *data) acpi_status status; status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, - 1, asus->debug.method_id, + 0, asus->debug.method_id, &input, &output); if (ACPI_FAILURE(status)) diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index a8e4a539e704..6bcb750e1865 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -805,7 +805,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata compal_dmi_table[] = { +static const struct dmi_system_id compal_dmi_table[] __initconst = { { .ident = "FL90/IFL90", .matches = { diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index dad8f4afa17c..28d9f8696081 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -48,7 +48,6 @@ MODULE_LICENSE("GPL"); #define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492" #define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492" -static u32 dell_wmi_interface_version; static bool wmi_requires_smbios_request; MODULE_ALIAS("wmi:"DELL_EVENT_GUID); @@ -56,6 +55,7 @@ MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID); struct dell_wmi_priv { struct input_dev *input_dev; + u32 interface_version; }; static int __init dmi_matched(const struct dmi_system_id *dmi) @@ -348,6 +348,7 @@ static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code) static void dell_wmi_notify(struct wmi_device *wdev, union acpi_object *obj) { + struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev); u16 *buffer_entry, *buffer_end; acpi_size buffer_size; int len, i; @@ -376,7 +377,7 @@ static void dell_wmi_notify(struct wmi_device *wdev, * So to prevent reading garbage from buffer we will process only first * one event on devices with WMI interface version 0. */ - if (dell_wmi_interface_version == 0 && buffer_entry < buffer_end) + if (priv->interface_version == 0 && buffer_entry < buffer_end) if (buffer_end > buffer_entry + buffer_entry[0] + 1) buffer_end = buffer_entry + buffer_entry[0] + 1; @@ -626,61 +627,67 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev) * WMI Interface Version 8 4 * WMI buffer length 12 4 4096 */ -static int dell_wmi_check_descriptor_buffer(void) +static int dell_wmi_check_descriptor_buffer(struct wmi_device *wdev) { - struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; + struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev); + union acpi_object *obj = NULL; + struct wmi_device *desc_dev; u32 *buffer; + int ret; - status = wmi_query_block(DELL_DESCRIPTOR_GUID, 0, &out); - if (ACPI_FAILURE(status)) { - pr_err("Cannot read Dell descriptor buffer - %d\n", status); - return status; + desc_dev = wmidev_get_other_guid(wdev, DELL_DESCRIPTOR_GUID); + if (!desc_dev) { + dev_err(&wdev->dev, "Dell WMI descriptor does not exist\n"); + return -ENODEV; } - obj = (union acpi_object *)out.pointer; + obj = wmidev_block_query(desc_dev, 0); if (!obj) { - pr_err("Dell descriptor buffer is empty\n"); - return -EINVAL; + dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n"); + ret = -EIO; + goto out; } if (obj->type != ACPI_TYPE_BUFFER) { - pr_err("Cannot read Dell descriptor buffer\n"); - kfree(obj); - return -EINVAL; + dev_err(&wdev->dev, "Dell descriptor has wrong type\n"); + ret = -EINVAL; + goto out; } if (obj->buffer.length != 128) { - pr_err("Dell descriptor buffer has invalid length (%d)\n", + dev_err(&wdev->dev, + "Dell descriptor buffer has invalid length (%d)\n", obj->buffer.length); if (obj->buffer.length < 16) { - kfree(obj); - return -EINVAL; + ret = -EINVAL; + goto out; } } buffer = (u32 *)obj->buffer.pointer; if (buffer[0] != 0x4C4C4544 && buffer[1] != 0x494D5720) - pr_warn("Dell descriptor buffer has invalid signature (%*ph)\n", + dev_warn(&wdev->dev, "Dell descriptor buffer has invalid signature (%*ph)\n", 8, buffer); if (buffer[2] != 0 && buffer[2] != 1) - pr_warn("Dell descriptor buffer has unknown version (%d)\n", + dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%d)\n", buffer[2]); if (buffer[3] != 4096) - pr_warn("Dell descriptor buffer has invalid buffer length (%d)\n", + dev_warn(&wdev->dev, "Dell descriptor buffer has invalid buffer length (%d)\n", buffer[3]); - dell_wmi_interface_version = buffer[2]; + priv->interface_version = buffer[2]; + ret = 0; - pr_info("Detected Dell WMI interface version %u\n", - dell_wmi_interface_version); + dev_info(&wdev->dev, "Detected Dell WMI interface version %u\n", + priv->interface_version); +out: kfree(obj); - return 0; + put_device(&desc_dev->dev); + return ret; } /* @@ -717,17 +724,19 @@ static int dell_wmi_events_set_enabled(bool enable) static int dell_wmi_probe(struct wmi_device *wdev) { + struct dell_wmi_priv *priv; int err; - struct dell_wmi_priv *priv = devm_kzalloc( + priv = devm_kzalloc( &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + dev_set_drvdata(&wdev->dev, priv); - err = dell_wmi_check_descriptor_buffer(); + err = dell_wmi_check_descriptor_buffer(wdev); if (err) return err; - dev_set_drvdata(&wdev->dev, priv); - return dell_wmi_input_setup(wdev); } diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 85de30f93a9c..56a8195096a2 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b) { struct acpi_device *device = bl_get_data(b); - if (b->props.power == FB_BLANK_POWERDOWN) - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); - else - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + if (fext) { + if (b->props.power == FB_BLANK_POWERDOWN) + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); + else + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + } return set_lcd_level(device, b->props.brightness); } diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 458e6c948c11..c26baf77938e 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id) "ThinkPad T42p", so the order of the entries matters. If your ThinkPad is not recognized, please update to latest BIOS. This is especially the case for some R52 ThinkPads. */ -static struct dmi_system_id __initdata hdaps_whitelist[] = { +static const struct dmi_system_id hdaps_whitelist[] __initconst = { HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 0df4209648d1..b4ed3dc983d5 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -107,13 +107,6 @@ enum hp_wmi_hardware_mask { HPWMI_TABLET_MASK = 0x04, }; -#define BIOS_ARGS_INIT(write, ctype, size) \ - (struct bios_args) { .signature = 0x55434553, \ - .command = (write) ? 0x2 : 0x1, \ - .commandtype = (ctype), \ - .datasize = (size), \ - .data = 0 } - struct bios_return { u32 sigpass; u32 return_code; @@ -188,6 +181,22 @@ struct rfkill2_device { static int rfkill2_count; static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES]; +/* map output size to the corresponding WMI method id */ +static inline int encode_outsize_for_pvsz(int outsize) +{ + if (outsize > 4096) + return -EINVAL; + if (outsize > 1024) + return 5; + if (outsize > 128) + return 4; + if (outsize > 4) + return 3; + if (outsize > 0) + return 2; + return 1; +} + /* * hp_wmi_perform_query * @@ -211,6 +220,7 @@ static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES]; static int hp_wmi_perform_query(int query, enum hp_wmi_command command, void *buffer, int insize, int outsize) { + int mid; struct bios_return *bios_return; int actual_outsize; union acpi_object *obj; @@ -225,11 +235,15 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; int ret = 0; + mid = encode_outsize_for_pvsz(outsize); + if (WARN_ON(mid < 0)) + return mid; + if (WARN_ON(insize > sizeof(args.data))) return -EINVAL; memcpy(&args.data, buffer, insize); - wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); + wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output); obj = output.pointer; diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index c62e5e11ca4b..18d55cee5bcd 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -103,7 +103,7 @@ static void rtl_port_unmap(void __iomem *addr) static int ibm_rtl_write(u8 value) { int ret = 0, count = 0; - static u32 cmd_port_val; + u32 cmd_port_val; RTL_DEBUG("%s(%d)\n", __func__, value); @@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) { } -static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { +static const struct dmi_system_id ibm_rtl_dmi_table[] __initconst = { { \ .matches = { \ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 603fc6050971..fe98d4ac0df3 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -42,6 +42,8 @@ #define IDEAPAD_RFKILL_DEV_NUM (3) +#define BM_CONSERVATION_BIT (5) + #define CFG_BT_BIT (16) #define CFG_3G_BIT (17) #define CFG_WIFI_BIT (18) @@ -54,6 +56,11 @@ static const char *const ideapad_wmi_fnesc_events[] = { }; #endif +enum { + BMCMD_CONSERVATION_ON = 3, + BMCMD_CONSERVATION_OFF = 5, +}; + enum { VPCCMD_R_VPC1 = 0x10, VPCCMD_R_BL_MAX, @@ -123,6 +130,23 @@ static int read_method_int(acpi_handle handle, const char *method, int *val) } } +static int method_gbmd(acpi_handle handle, unsigned long *ret) +{ + int result, val; + + result = read_method_int(handle, "GBMD", &val); + *ret = val; + return result; +} + +static int method_sbmc(acpi_handle handle, int cmd) +{ + acpi_status status; + + status = acpi_execute_simple_method(handle, "SBMC", cmd); + return ACPI_FAILURE(status) ? -1 : 0; +} + static int method_vpcr(acpi_handle handle, int cmd, int *ret) { acpi_status status; @@ -250,6 +274,13 @@ static int debugfs_status_show(struct seq_file *s, void *data) if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value)) seq_printf(s, "Camera status:\t%s(%lu)\n", value ? "On" : "Off", value); + seq_puts(s, "=====================\n"); + + if (!method_gbmd(priv->adev->handle, &value)) { + seq_printf(s, "Conservation mode:\t%s(%lu)\n", + test_bit(BM_CONSERVATION_BIT, &value) ? "On" : "Off", + value); + } return 0; } @@ -456,10 +487,45 @@ static ssize_t __maybe_unused touchpad_store(struct device *dev, static DEVICE_ATTR_RO(touchpad); +static ssize_t conservation_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ideapad_private *priv = dev_get_drvdata(dev); + unsigned long result; + + if (method_gbmd(priv->adev->handle, &result)) + return sprintf(buf, "-1\n"); + return sprintf(buf, "%u\n", test_bit(BM_CONSERVATION_BIT, &result)); +} + +static ssize_t conservation_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ideapad_private *priv = dev_get_drvdata(dev); + bool state; + int ret; + + ret = kstrtobool(buf, &state); + if (ret) + return ret; + + ret = method_sbmc(priv->adev->handle, state ? + BMCMD_CONSERVATION_ON : + BMCMD_CONSERVATION_OFF); + if (ret < 0) + return -EIO; + return count; +} + +static DEVICE_ATTR_RW(conservation_mode); + static struct attribute *ideapad_attributes[] = { &dev_attr_camera_power.attr, &dev_attr_fan_mode.attr, &dev_attr_touchpad.attr, + &dev_attr_conservation_mode.attr, NULL }; @@ -477,6 +543,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj, unsigned long value; supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &value); + } else if (attr == &dev_attr_conservation_mode.attr) { + supported = acpi_has_method(priv->adev->handle, "GBMD") && + acpi_has_method(priv->adev->handle, "SBMC"); } else supported = true; diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 8519e0f97bdd..e34fd70b67af 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -203,15 +203,26 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) acpi_status status; if (priv->wakeup_mode) { + /* + * Needed for wakeup from suspend-to-idle to work on some + * platforms that don't expose the 5-button array, but still + * send notifies with the power button event code to this + * device object on power button actions while suspended. + */ + if (event == 0xce) + goto wakeup; + /* Wake up on 5-button array events only. */ if (event == 0xc0 || !priv->array) return; - if (sparse_keymap_entry_from_scancode(priv->array, event)) - pm_wakeup_hard_event(&device->dev); - else + if (!sparse_keymap_entry_from_scancode(priv->array, event)) { dev_info(&device->dev, "unknown event 0x%x\n", event); + return; + } +wakeup: + pm_wakeup_hard_event(&device->dev); return; } @@ -219,7 +230,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) if (event != 0xc0) { if (!priv->array || !sparse_keymap_report_event(priv->array, event, 1, true)) - dev_info(&device->dev, "unknown event 0x%x\n", event); + dev_dbg(&device->dev, "unknown event 0x%x\n", event); return; } @@ -230,7 +241,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) } if (!sparse_keymap_report_event(priv->input_dev, ev_index, 1, true)) - dev_info(&device->dev, "unknown event index 0x%llx\n", + dev_dbg(&device->dev, "unknown event index 0x%llx\n", ev_index); } diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 480926786cb8..58c5ff36523a 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -83,7 +83,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) } else if (sparse_keymap_report_event(priv->input_dev, event, 1, true)) { return; } - dev_info(&device->dev, "unknown event index 0x%x\n", event); + dev_dbg(&device->dev, "unknown event index 0x%x\n", event); } static int intel_vbtn_probe(struct platform_device *device) diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 871cfa682519..d79fbf924b13 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -108,13 +108,13 @@ static irqreturn_t mid_pb_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct mid_pb_ddata mfld_ddata = { +static const struct mid_pb_ddata mfld_ddata = { .mirqlvl1_addr = INTEL_MSIC_IRQLVL1MSK, .pbstat_addr = INTEL_MSIC_PBSTATUS, .pbstat_mask = MSIC_PB_LEVEL, }; -static struct mid_pb_ddata mrfld_ddata = { +static const struct mid_pb_ddata mrfld_ddata = { .mirqlvl1_addr = BCOVE_IRQLVL1MSK, .pbstat_addr = BCOVE_PBSTATUS, .pbstat_mask = BCOVE_PB_LEVEL, @@ -142,8 +142,10 @@ static int mid_pb_probe(struct platform_device *pdev) if (!id) return -ENODEV; - if (irq < 0) - return -EINVAL; + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); + return irq; + } input = devm_input_allocate_device(&pdev->dev); if (!input) diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 6aa33c4a809f..5747f63c8d9f 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -299,7 +299,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id __initdata oaktrail_dmi_table[] = { +static const struct dmi_system_id oaktrail_dmi_table[] __initconst = { { .ident = "OakTrail platform", .matches = { diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 914bcd2edbde..17e08b42b0a9 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -110,6 +110,13 @@ static const struct pmc_reg_map spt_reg_map = { .pfear_sts = spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, + .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET, + .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET, + .regmap_length = SPT_PMC_MMIO_REG_LEN, + .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A, + .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT, }; static const struct pci_device_id pmc_pci_ids[] = { @@ -157,12 +164,13 @@ static inline u32 pmc_core_adjust_slp_s0_step(u32 value) int intel_pmc_slp_s0_counter_read(u32 *data) { struct pmc_dev *pmcdev = &pmc; + const struct pmc_reg_map *map = pmcdev->map; u32 value; if (!pmcdev->has_slp_s0_res) return -EACCES; - value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); + value = pmc_core_reg_read(pmcdev, map->slp_s0_offset); *data = pmc_core_adjust_slp_s0_step(value); return 0; @@ -172,9 +180,10 @@ EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read); static int pmc_core_dev_state_get(void *data, u64 *val) { struct pmc_dev *pmcdev = data; + const struct pmc_reg_map *map = pmcdev->map; u32 value; - value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); + value = pmc_core_reg_read(pmcdev, map->slp_s0_offset); *val = pmc_core_adjust_slp_s0_step(value); return 0; @@ -187,8 +196,8 @@ static int pmc_core_check_read_lock_bit(void) struct pmc_dev *pmcdev = &pmc; u32 value; - value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET); - return value & BIT(SPT_PMC_READ_DISABLE_BIT); + value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset); + return value & BIT(pmcdev->map->pm_read_disable_bit); } #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -204,12 +213,13 @@ static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; const struct pmc_bit_map *map = pmcdev->map->pfear_sts; - u8 pf_regs[NUM_ENTRIES]; + u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; int index, iter; - iter = SPT_PMC_XRAM_PPFEAR0A; + iter = pmcdev->map->ppfear0_offset; - for (index = 0; index < NUM_ENTRIES; index++, iter++) + for (index = 0; index < pmcdev->map->ppfear_buckets && + index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); for (index = 0; map[index].name; index++) @@ -376,6 +386,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct pmc_dev *pmcdev = &pmc; + const struct pmc_reg_map *map = pmcdev->map; u32 val, buf_size, fd; int err = 0; @@ -392,9 +403,9 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user goto out_unlock; } - fd = pmc_core_reg_read(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET); + fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); fd |= (1U << val); - pmc_core_reg_write(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET, fd); + pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); out_unlock: mutex_unlock(&pmcdev->lock); @@ -530,8 +541,8 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) } mutex_init(&pmcdev->lock); - pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); pmcdev->map = map; + pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); err = pmc_core_dbgfs_register(pmcdev); if (err < 0) diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 5a48e7728479..3d225a9cc09f 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -38,7 +38,8 @@ #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1) #define MTPMC_MASK 0xffff0000 -#define NUM_ENTRIES 5 +#define PPFEAR_MAX_NUM_ENTRIES 5 +#define SPT_PPFEAR_NUM_ENTRIES 5 #define SPT_PMC_READ_DISABLE_BIT 0x16 #define SPT_PMC_MSG_FULL_STS_BIT 0x18 #define NUM_RETRIES 100 @@ -126,10 +127,37 @@ struct pmc_bit_map { u32 bit_mask; }; +/** + * struct pmc_reg_map - Structure used to define parameter unique to a + PCH family + * @pfear_sts: Maps name of IP block to PPFEAR* bit + * @mphy_sts: Maps name of MPHY lane to MPHY status lane status bit + * @pll_sts: Maps name of PLL to corresponding bit status + * @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency + * @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit + * @base_address: Base address of PWRMBASE defined in BIOS writer guide + * @regmap_length: Length of memory to map from PWRMBASE address to access + * @ppfear0_offset: PWRMBASE offset to to read PPFEAR* + * @ppfear_buckets: Number of 8 bits blocks to read all IP blocks from + * PPFEAR + * @pm_cfg_offset: PWRMBASE offset to PM_CFG register + * @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE + * + * Each PCH has unique set of register offsets and bit indexes. This structure + * captures them to have a common implementation. + */ struct pmc_reg_map { const struct pmc_bit_map *pfear_sts; const struct pmc_bit_map *mphy_sts; const struct pmc_bit_map *pll_sts; + const u32 slp_s0_offset; + const u32 ltr_ignore_offset; + const u32 base_address; + const int regmap_length; + const u32 ppfear0_offset; + const int ppfear_buckets; + const u32 pm_cfg_offset; + const int pm_read_disable_bit; }; /** diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index f7cf981502cd..2c85f75e32b0 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -72,20 +72,20 @@ struct intel_scu_ipc_pdata_t { u8 irq_mode; }; -static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { +static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { .i2c_base = 0xff12b000, .i2c_len = 0x10, .irq_mode = 0, }; /* Penwell and Cloverview */ -static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { +static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { .i2c_base = 0xff12b000, .i2c_len = 0x10, .irq_mode = 1, }; -static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { +static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { .i2c_base = 0xff00d000, .i2c_len = 0x10, .irq_mode = 0, diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index cd21df982abd..d4fc42b4cbeb 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -331,6 +331,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = { TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf), + TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_apl_debugfs_conf), {} }; diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index 6ebdbd2b04fc..e0424d5a795a 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -46,7 +46,6 @@ #define TELEM_SAMPLING_DEFAULT_PERIOD 0xD #define TELEM_MAX_EVENTS_SRAM 28 -#define TELEM_MAX_OS_ALLOCATED_EVENTS 20 #define TELEM_SSRAM_STARTTIME_OFFSET 8 #define TELEM_SSRAM_EVTLOG_OFFSET 16 @@ -153,6 +152,30 @@ static struct telemetry_evtmap {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40}, }; +static struct telemetry_evtmap + telemetry_glk_pss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = { + {"IA_CORE0_C6_RES", 0x0400}, + {"IA_CORE0_C6_CTR", 0x0000}, + {"IA_MODULE0_C7_RES", 0x0410}, + {"IA_MODULE0_C7_CTR", 0x000C}, + {"IA_C0_RES", 0x0805}, + {"PCS_LTR", 0x2801}, + {"PSTATES", 0x2802}, + {"SOC_S0I3_RES", 0x0407}, + {"SOC_S0I3_CTR", 0x0008}, + {"PCS_S0I3_CTR", 0x0007}, + {"PCS_C1E_RES", 0x0414}, + {"PCS_IDLE_STATUS", 0x2806}, + {"IA_PERF_LIMITS", 0x280B}, + {"GT_PERF_LIMITS", 0x280C}, + {"PCS_WAKEUP_S0IX_CTR", 0x0025}, + {"PCS_IDLE_BLOCKED", 0x2C00}, + {"PCS_S0IX_BLOCKED", 0x2C01}, + {"PCS_S0IX_WAKE_REASONS", 0x2C02}, + {"PCS_LTR_BLOCKING", 0x2C03}, + {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40}, +}; + /* APL specific Data */ static struct telemetry_plt_config telem_apl_config = { .pss_config = { @@ -163,8 +186,19 @@ static struct telemetry_plt_config telem_apl_config = { }, }; +/* GLK specific Data */ +static struct telemetry_plt_config telem_glk_config = { + .pss_config = { + .telem_evts = telemetry_glk_pss_default_events, + }, + .ioss_config = { + .telem_evts = telemetry_apl_ioss_default_events, + }, +}; + static const struct x86_cpu_id telemetry_cpu_ids[] = { TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config), + TELEM_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_glk_config), {} }; diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8f98c211b440..4f3de2a8c4df 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -247,7 +247,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) return 1; }; -static struct dmi_system_id mlxplat_dmi_table[] __initdata = { +static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { .callback = mlxplat_dmi_default_matched, .matches = { diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 61b9014d2610..d5bfcc602090 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -605,7 +605,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi) return 1; } -static struct dmi_system_id __initdata msi_dmi_table[] = { +static const struct dmi_system_id msi_dmi_table[] __initconst = { { .ident = "MSI S270", .matches = { diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index f6209b739ec0..620138236c89 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -184,7 +184,7 @@ static const struct backlight_ops msi_backlight_ops = { static void msi_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - static struct key_entry *key; + struct key_entry *key; union acpi_object *obj; acpi_status status; diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c index f4bad83053a9..35d8b9a939f9 100644 --- a/drivers/platform/x86/mxm-wmi.c +++ b/drivers/platform/x86/mxm-wmi.c @@ -53,7 +53,7 @@ int mxm_wmi_call_mxds(int adapter) printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input, + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, &output); if (ACPI_FAILURE(status)) @@ -78,7 +78,7 @@ int mxm_wmi_call_mxmx(int adapter) printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input, + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, &output); if (ACPI_FAILURE(status)) diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index 77d1f90b0794..bc98ef95514a 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -39,7 +39,7 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) struct acpi_buffer input = { sizeof(dummy), &dummy }; struct acpi_buffer output = { sizeof(obj), &obj }; - status = wmi_evaluate_method(PEAQ_DOLBY_BUTTON_GUID, 1, + status = wmi_evaluate_method(PEAQ_DOLBY_BUTTON_GUID, 0, PEAQ_DOLBY_BUTTON_METHOD_ID, &input, &output); if (ACPI_FAILURE(status)) @@ -51,7 +51,7 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) return; } - if (peaq_ignore_events_counter && --peaq_ignore_events_counter >= 0) + if (peaq_ignore_events_counter && peaq_ignore_events_counter--) return; if (obj.integer.value) { diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 0c703feaeb88..d3cb26f6df73 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata samsung_dmi_table[] = { +static const struct dmi_system_id samsung_dmi_table[] __initconst = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index e6aac725a0af..a2fb7fbc3273 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata samsungq10_dmi_table[] = { +static const struct dmi_system_id samsungq10_dmi_table[] __initconst = { { .ident = "Samsung Q10", .matches = { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index bfae79534f44..a16cea2be9c3 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4880,7 +4880,7 @@ static struct acpi_driver sony_pic_driver = { .drv.pm = &sony_pic_pm, }; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b22573131e53..2242d6035d9e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -24,7 +24,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define TPACPI_VERSION "0.25" -#define TPACPI_SYSFS_VERSION 0x020700 +#define TPACPI_SYSFS_VERSION 0x030000 /* * Changelog: @@ -6342,7 +6342,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm) switch (thermal_read_mode) { case TPACPI_THERMAL_TPEC_16: - res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + res = sysfs_create_group(&tpacpi_hwmon->kobj, &thermal_temp_input16_group); if (res) return res; @@ -6350,7 +6350,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm) case TPACPI_THERMAL_TPEC_8: case TPACPI_THERMAL_ACPI_TMP07: case TPACPI_THERMAL_ACPI_UPDT: - res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + res = sysfs_create_group(&tpacpi_hwmon->kobj, &thermal_temp_input8_group); if (res) return res; @@ -6367,13 +6367,13 @@ static void thermal_exit(void) { switch (thermal_read_mode) { case TPACPI_THERMAL_TPEC_16: - sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + sysfs_remove_group(&tpacpi_hwmon->kobj, &thermal_temp_input16_group); break; case TPACPI_THERMAL_TPEC_8: case TPACPI_THERMAL_ACPI_TMP07: case TPACPI_THERMAL_ACPI_UPDT: - sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + sysfs_remove_group(&tpacpi_hwmon->kobj, &thermal_temp_input8_group); break; case TPACPI_THERMAL_NONE: @@ -8696,7 +8696,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) fan_attributes[ARRAY_SIZE(fan_attributes)-2] = &dev_attr_fan2_input.attr; } - rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + rc = sysfs_create_group(&tpacpi_hwmon->kobj, &fan_attr_group); if (rc < 0) return rc; @@ -8704,7 +8704,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) rc = driver_create_file(&tpacpi_hwmon_pdriver.driver, &driver_attr_fan_watchdog); if (rc < 0) { - sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + sysfs_remove_group(&tpacpi_hwmon->kobj, &fan_attr_group); return rc; } @@ -8719,7 +8719,7 @@ static void fan_exit(void) "cancelling any pending fan watchdog tasks\n"); /* FIXME: can we really do this unconditionally? */ - sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group); + sysfs_remove_group(&tpacpi_hwmon->kobj, &fan_attr_group); driver_remove_file(&tpacpi_hwmon_pdriver.driver, &driver_attr_fan_watchdog); @@ -9149,16 +9149,6 @@ static void hotkey_driver_event(const unsigned int scancode) tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode); } -/* sysfs name ---------------------------------------------------------- */ -static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); -} - -static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); - /* --------------------------------------------------------------------- */ /* /proc support */ @@ -9696,8 +9686,6 @@ static void thinkpad_acpi_module_exit(void) if (tpacpi_hwmon) hwmon_device_unregister(tpacpi_hwmon); - if (tp_features.sensors_pdev_attrs_registered) - device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); if (tpacpi_sensors_pdev) platform_device_unregister(tpacpi_sensors_pdev); if (tpacpi_pdev) @@ -9818,14 +9806,10 @@ static int __init thinkpad_acpi_module_init(void) thinkpad_acpi_module_exit(); return ret; } - ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); - if (ret) { - pr_err("unable to create sysfs hwmon device attributes\n"); - thinkpad_acpi_module_exit(); - return ret; - } tp_features.sensors_pdev_attrs_registered = 1; - tpacpi_hwmon = hwmon_device_register(&tpacpi_sensors_pdev->dev); + tpacpi_hwmon = hwmon_device_register_with_groups( + &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, NULL); + if (IS_ERR(tpacpi_hwmon)) { ret = PTR_ERR(tpacpi_hwmon); tpacpi_hwmon = NULL; diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index 440528676170..03d7620cd6d7 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -64,7 +64,7 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } -static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { +static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = { { .ident = "Toshiba laptop", .matches = { diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index e32ba575e8d9..0765b1797d4c 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -218,7 +218,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) if (!(block->flags & ACPI_WMI_METHOD)) return AE_BAD_DATA; - if (block->instance_count < instance) + if (block->instance_count <= instance) return AE_BAD_PARAMETER; input.count = 2; @@ -265,7 +265,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance, block = &wblock->gblock; handle = wblock->acpi_device->handle; - if (block->instance_count < instance) + if (block->instance_count <= instance) return AE_BAD_PARAMETER; /* Check GUID is a data block */ @@ -392,7 +392,7 @@ acpi_status wmi_set_block(const char *guid_string, u8 instance, block = &wblock->gblock; handle = wblock->acpi_device->handle; - if (block->instance_count < instance) + if (block->instance_count <= instance) return AE_BAD_PARAMETER; /* Check GUID is a data block */ diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 0ced908e7aa8..e681140b85d8 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -495,7 +495,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id pnpbios_dmi_table[] __initdata = { +static const struct dmi_system_id pnpbios_dmi_table[] __initconst = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 031a34372191..75f63e38a8d1 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -349,6 +349,36 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { .init = rk3399_pmu_iodomain_init, }; +static const struct rockchip_iodomain_soc_data soc_data_rv1108 = { + .grf_offset = 0x404, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "vccio1", + "vccio2", + "vccio3", + "vccio5", + "vccio6", + }, + +}; + +static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = { + .grf_offset = 0x104, + .supply_names = { + "pmu", + }, +}; + static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", @@ -382,6 +412,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3399-pmu-io-voltage-domain", .data = (void *)&soc_data_rk3399_pmu }, + { + .compatible = "rockchip,rv1108-io-voltage-domain", + .data = (void *)&soc_data_rv1108 + }, + { + .compatible = "rockchip,rv1108-pmu-io-voltage-domain", + .data = (void *)&soc_data_rv1108_pmu + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index 55fce8b75245..31080c254124 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -171,8 +171,8 @@ static u32 at91_shdwc_get_wakeup_input(struct platform_device *pdev, for_each_child_of_node(np, cnp) { if (of_property_read_u32(cnp, "reg", &wk_input)) { - dev_warn(&pdev->dev, "reg property is missing for %s\n", - cnp->full_name); + dev_warn(&pdev->dev, "reg property is missing for %pOF\n", + cnp); continue; } diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 969f5005669c..5ab90c1f3f7c 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -198,6 +198,15 @@ config BATTERY_BQ27XXX_I2C Say Y here to enable support for batteries with BQ27xxx chips connected over an I2C bus. +config BATTERY_BQ27XXX_HDQ + tristate "BQ27xxx HDQ support" + depends on BATTERY_BQ27XXX + depends on W1 + default y + help + Say Y here to enable support for batteries with BQ27xxx chips + connected over an HDQ bus. + config BATTERY_BQ27XXX_DT_UPDATES_NVM bool "BQ27xxx support for update of NVM/flash data memory" depends on BATTERY_BQ27XXX_I2C @@ -313,6 +322,19 @@ config BATTERY_MAX17042 with MAX17042. This driver also supports max17047/50 chips which are improved version of max17042. +config BATTERY_MAX1721X + tristate "MAX17211/MAX17215 standalone gas-gauge" + depends on W1 + select REGMAP_W1 + help + MAX1721x is fuel-gauge systems for lithium-ion (Li+) batteries + in handheld and portable equipment. MAX17211 used with single cell + battery. MAX17215 designed for muticell battery. Both them have + OneWire (W1) host interface. + + Say Y here to enable support for the MAX17211/MAX17215 standalone + battery gas-gauge. + config BATTERY_Z2 tristate "Z2 battery driver" depends on I2C && MACH_ZIPIT2 @@ -365,6 +387,7 @@ config BATTERY_RX51 config CHARGER_CPCAP tristate "CPCAP PMIC Charger Driver" depends on MFD_CPCAP && IIO + depends on OMAP_USB2 || (!OMAP_USB2 && COMPILE_TEST) default MFD_CPCAP help Say Y to enable support for CPCAP PMIC charger driver for Motorola diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index a41f40957847..621a19058fec 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -38,12 +38,14 @@ obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o obj-$(CONFIG_CHARGER_SBS) += sbs-charger.o obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o +obj-$(CONFIG_BATTERY_BQ27XXX_HDQ) += bq27xxx_battery_hdq.o obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o +obj-$(CONFIG_BATTERY_MAX1721X) += max1721x_battery.o obj-$(CONFIG_BATTERY_Z2) += z2_battery.o obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o obj-$(CONFIG_CHARGER_RT9455) += rt9455_charger.o diff --git a/drivers/power/supply/act8945a_charger.c b/drivers/power/supply/act8945a_charger.c index d1eb2e359532..8e117b31ba79 100644 --- a/drivers/power/supply/act8945a_charger.c +++ b/drivers/power/supply/act8945a_charger.c @@ -596,9 +596,9 @@ static int act8945a_charger_probe(struct platform_device *pdev) return ret; irq = of_irq_get(pdev->dev.of_node, 0); - if (irq == -EPROBE_DEFER) { + if (irq <= 0) { dev_err(&pdev->dev, "failed to find IRQ number\n"); - return -EPROBE_DEFER; + return irq ?: -ENXIO; } ret = devm_request_irq(&pdev->dev, irq, act8945a_status_changed, diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index d5a707e14526..35ff406aca48 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -43,6 +46,8 @@ #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2 #define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1)) #define BQ24190_REG_POC_SYS_MIN_SHIFT 1 +#define BQ24190_REG_POC_SYS_MIN_MIN 3000 +#define BQ24190_REG_POC_SYS_MIN_MAX 3700 #define BQ24190_REG_POC_BOOST_LIM_MASK BIT(0) #define BQ24190_REG_POC_BOOST_LIM_SHIFT 0 @@ -57,9 +62,13 @@ #define BQ24190_REG_PCTCC_IPRECHG_MASK (BIT(7) | BIT(6) | BIT(5) | \ BIT(4)) #define BQ24190_REG_PCTCC_IPRECHG_SHIFT 4 +#define BQ24190_REG_PCTCC_IPRECHG_MIN 128 +#define BQ24190_REG_PCTCC_IPRECHG_MAX 2048 #define BQ24190_REG_PCTCC_ITERM_MASK (BIT(3) | BIT(2) | BIT(1) | \ BIT(0)) #define BQ24190_REG_PCTCC_ITERM_SHIFT 0 +#define BQ24190_REG_PCTCC_ITERM_MIN 128 +#define BQ24190_REG_PCTCC_ITERM_MAX 2048 #define BQ24190_REG_CVC 0x04 /* Charge Voltage Control */ #define BQ24190_REG_CVC_VREG_MASK (BIT(7) | BIT(6) | BIT(5) | \ @@ -156,9 +165,13 @@ struct bq24190_dev_info { struct extcon_dev *extcon; struct notifier_block extcon_nb; struct delayed_work extcon_work; + struct delayed_work input_current_limit_work; char model_name[I2C_NAME_SIZE]; bool initialized; bool irq_event; + u16 sys_min; + u16 iprechg; + u16 iterm; struct mutex f_reg_lock; u8 f_reg; u8 ss_reg; @@ -504,15 +517,112 @@ static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi) static inline void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi) {} #endif -/* - * According to the "Host Mode and default Mode" section of the - * manual, a write to any register causes the bq24190 to switch - * from default mode to host mode. It will switch back to default - * mode after a WDT timeout unless the WDT is turned off as well. - * So, by simply turning off the WDT, we accomplish both with the - * same write. - */ -static int bq24190_set_mode_host(struct bq24190_dev_info *bdi) +#ifdef CONFIG_REGULATOR +static int bq24190_set_charge_mode(struct regulator_dev *dev, u8 val) +{ + struct bq24190_dev_info *bdi = rdev_get_drvdata(dev); + int ret; + + ret = pm_runtime_get_sync(bdi->dev); + if (ret < 0) { + dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret); + pm_runtime_put_noidle(bdi->dev); + return ret; + } + + ret = bq24190_write_mask(bdi, BQ24190_REG_POC, + BQ24190_REG_POC_CHG_CONFIG_MASK, + BQ24190_REG_POC_CHG_CONFIG_SHIFT, val); + + pm_runtime_mark_last_busy(bdi->dev); + pm_runtime_put_autosuspend(bdi->dev); + + return ret; +} + +static int bq24190_vbus_enable(struct regulator_dev *dev) +{ + return bq24190_set_charge_mode(dev, BQ24190_REG_POC_CHG_CONFIG_OTG); +} + +static int bq24190_vbus_disable(struct regulator_dev *dev) +{ + return bq24190_set_charge_mode(dev, BQ24190_REG_POC_CHG_CONFIG_CHARGE); +} + +static int bq24190_vbus_is_enabled(struct regulator_dev *dev) +{ + struct bq24190_dev_info *bdi = rdev_get_drvdata(dev); + int ret; + u8 val; + + ret = pm_runtime_get_sync(bdi->dev); + if (ret < 0) { + dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret); + pm_runtime_put_noidle(bdi->dev); + return ret; + } + + ret = bq24190_read_mask(bdi, BQ24190_REG_POC, + BQ24190_REG_POC_CHG_CONFIG_MASK, + BQ24190_REG_POC_CHG_CONFIG_SHIFT, &val); + + pm_runtime_mark_last_busy(bdi->dev); + pm_runtime_put_autosuspend(bdi->dev); + + return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG; +} + +static const struct regulator_ops bq24190_vbus_ops = { + .enable = bq24190_vbus_enable, + .disable = bq24190_vbus_disable, + .is_enabled = bq24190_vbus_is_enabled, +}; + +static const struct regulator_desc bq24190_vbus_desc = { + .name = "usb_otg_vbus", + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &bq24190_vbus_ops, + .fixed_uV = 5000000, + .n_voltages = 1, +}; + +static const struct regulator_init_data bq24190_vbus_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, +}; + +static int bq24190_register_vbus_regulator(struct bq24190_dev_info *bdi) +{ + struct bq24190_platform_data *pdata = bdi->dev->platform_data; + struct regulator_config cfg = { }; + struct regulator_dev *reg; + int ret = 0; + + cfg.dev = bdi->dev; + if (pdata && pdata->regulator_init_data) + cfg.init_data = pdata->regulator_init_data; + else + cfg.init_data = &bq24190_vbus_init_data; + cfg.driver_data = bdi; + reg = devm_regulator_register(bdi->dev, &bq24190_vbus_desc, &cfg); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(bdi->dev, "Can't register regulator: %d\n", ret); + } + + return ret; +} +#else +static int bq24190_register_vbus_regulator(struct bq24190_dev_info *bdi) +{ + return 0; +} +#endif + +static int bq24190_set_config(struct bq24190_dev_info *bdi) { int ret; u8 v; @@ -523,9 +633,52 @@ static int bq24190_set_mode_host(struct bq24190_dev_info *bdi) bdi->watchdog = ((v & BQ24190_REG_CTTC_WATCHDOG_MASK) >> BQ24190_REG_CTTC_WATCHDOG_SHIFT); + + /* + * According to the "Host Mode and default Mode" section of the + * manual, a write to any register causes the bq24190 to switch + * from default mode to host mode. It will switch back to default + * mode after a WDT timeout unless the WDT is turned off as well. + * So, by simply turning off the WDT, we accomplish both with the + * same write. + */ v &= ~BQ24190_REG_CTTC_WATCHDOG_MASK; - return bq24190_write(bdi, BQ24190_REG_CTTC, v); + ret = bq24190_write(bdi, BQ24190_REG_CTTC, v); + if (ret < 0) + return ret; + + if (bdi->sys_min) { + v = bdi->sys_min / 100 - 30; // manual section 9.5.1.2, table 9 + ret = bq24190_write_mask(bdi, BQ24190_REG_POC, + BQ24190_REG_POC_SYS_MIN_MASK, + BQ24190_REG_POC_SYS_MIN_SHIFT, + v); + if (ret < 0) + return ret; + } + + if (bdi->iprechg) { + v = bdi->iprechg / 128 - 1; // manual section 9.5.1.4, table 11 + ret = bq24190_write_mask(bdi, BQ24190_REG_PCTCC, + BQ24190_REG_PCTCC_IPRECHG_MASK, + BQ24190_REG_PCTCC_IPRECHG_SHIFT, + v); + if (ret < 0) + return ret; + } + + if (bdi->iterm) { + v = bdi->iterm / 128 - 1; // manual section 9.5.1.4, table 11 + ret = bq24190_write_mask(bdi, BQ24190_REG_PCTCC, + BQ24190_REG_PCTCC_ITERM_MASK, + BQ24190_REG_PCTCC_ITERM_SHIFT, + v); + if (ret < 0) + return ret; + } + + return 0; } static int bq24190_register_reset(struct bq24190_dev_info *bdi) @@ -773,6 +926,38 @@ static int bq24190_charger_set_temp_alert_max(struct bq24190_dev_info *bdi, return bq24190_battery_set_temp_alert_max(bdi, val); } +static int bq24190_charger_get_precharge(struct bq24190_dev_info *bdi, + union power_supply_propval *val) +{ + u8 v; + int ret; + + ret = bq24190_read_mask(bdi, BQ24190_REG_PCTCC, + BQ24190_REG_PCTCC_IPRECHG_MASK, + BQ24190_REG_PCTCC_IPRECHG_SHIFT, &v); + if (ret < 0) + return ret; + + val->intval = ++v * 128 * 1000; + return 0; +} + +static int bq24190_charger_get_charge_term(struct bq24190_dev_info *bdi, + union power_supply_propval *val) +{ + u8 v; + int ret; + + ret = bq24190_read_mask(bdi, BQ24190_REG_PCTCC, + BQ24190_REG_PCTCC_ITERM_MASK, + BQ24190_REG_PCTCC_ITERM_SHIFT, &v); + if (ret < 0) + return ret; + + val->intval = ++v * 128 * 1000; + return 0; +} + static int bq24190_charger_get_current(struct bq24190_dev_info *bdi, union power_supply_propval *val) { @@ -865,6 +1050,33 @@ static int bq24190_charger_set_voltage(struct bq24190_dev_info *bdi, ARRAY_SIZE(bq24190_cvc_vreg_values), val->intval); } +static int bq24190_charger_get_iinlimit(struct bq24190_dev_info *bdi, + union power_supply_propval *val) +{ + int iinlimit, ret; + + ret = bq24190_get_field_val(bdi, BQ24190_REG_ISC, + BQ24190_REG_ISC_IINLIM_MASK, + BQ24190_REG_ISC_IINLIM_SHIFT, + bq24190_isc_iinlim_values, + ARRAY_SIZE(bq24190_isc_iinlim_values), &iinlimit); + if (ret < 0) + return ret; + + val->intval = iinlimit; + return 0; +} + +static int bq24190_charger_set_iinlimit(struct bq24190_dev_info *bdi, + const union power_supply_propval *val) +{ + return bq24190_set_field_val(bdi, BQ24190_REG_ISC, + BQ24190_REG_ISC_IINLIM_MASK, + BQ24190_REG_ISC_IINLIM_SHIFT, + bq24190_isc_iinlim_values, + ARRAY_SIZE(bq24190_isc_iinlim_values), val->intval); +} + static int bq24190_charger_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { @@ -893,6 +1105,12 @@ static int bq24190_charger_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: ret = bq24190_charger_get_temp_alert_max(bdi, val); break; + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + ret = bq24190_charger_get_precharge(bdi, val); + break; + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + ret = bq24190_charger_get_charge_term(bdi, val); + break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: ret = bq24190_charger_get_current(bdi, val); break; @@ -905,6 +1123,9 @@ static int bq24190_charger_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: ret = bq24190_charger_get_voltage_max(bdi, val); break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq24190_charger_get_iinlimit(bdi, val); + break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_SYSTEM; ret = 0; @@ -956,6 +1177,9 @@ static int bq24190_charger_set_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: ret = bq24190_charger_set_voltage(bdi, val); break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq24190_charger_set_iinlimit(bdi, val); + break; default: ret = -EINVAL; } @@ -977,6 +1201,7 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_CHARGE_TYPE: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: ret = 1; break; default: @@ -986,16 +1211,45 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy, return ret; } +static void bq24190_input_current_limit_work(struct work_struct *work) +{ + struct bq24190_dev_info *bdi = + container_of(work, struct bq24190_dev_info, + input_current_limit_work.work); + + power_supply_set_input_current_limit_from_supplier(bdi->charger); +} + +/* Sync the input-current-limit with our parent supply (if we have one) */ +static void bq24190_charger_external_power_changed(struct power_supply *psy) +{ + struct bq24190_dev_info *bdi = power_supply_get_drvdata(psy); + + /* + * The Power-Good detection may take up to 220ms, sometimes + * the external charger detection is quicker, and the bq24190 will + * reset to iinlim based on its own charger detection (which is not + * hooked up when using external charger detection) resulting in a + * too low default 500mA iinlim. Delay setting the input-current-limit + * for 300ms to avoid this. + */ + queue_delayed_work(system_wq, &bdi->input_current_limit_work, + msecs_to_jiffies(300)); +} + static enum power_supply_property bq24190_charger_properties[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -1013,6 +1267,7 @@ static const struct power_supply_desc bq24190_charger_desc = { .get_property = bq24190_charger_get_property, .set_property = bq24190_charger_set_property, .property_is_writeable = bq24190_charger_property_is_writeable, + .external_power_changed = bq24190_charger_external_power_changed, }; /* Battery power supply property routines */ @@ -1460,13 +1715,50 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi) if (ret < 0) return ret; - ret = bq24190_set_mode_host(bdi); + ret = bq24190_set_config(bdi); if (ret < 0) return ret; return bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg); } +static int bq24190_get_config(struct bq24190_dev_info *bdi) +{ + const char * const s = "ti,system-minimum-microvolt"; + struct power_supply_battery_info info = {}; + int v; + + if (device_property_read_u32(bdi->dev, s, &v) == 0) { + v /= 1000; + if (v >= BQ24190_REG_POC_SYS_MIN_MIN + && v <= BQ24190_REG_POC_SYS_MIN_MAX) + bdi->sys_min = v; + else + dev_warn(bdi->dev, "invalid value for %s: %u\n", s, v); + } + + if (bdi->dev->of_node && + !power_supply_get_battery_info(bdi->charger, &info)) { + v = info.precharge_current_ua / 1000; + if (v >= BQ24190_REG_PCTCC_IPRECHG_MIN + && v <= BQ24190_REG_PCTCC_IPRECHG_MAX) + bdi->iprechg = v; + else + dev_warn(bdi->dev, "invalid value for battery:precharge-current-microamp: %d\n", + v); + + v = info.charge_term_current_ua / 1000; + if (v >= BQ24190_REG_PCTCC_ITERM_MIN + && v <= BQ24190_REG_PCTCC_ITERM_MAX) + bdi->iterm = v; + else + dev_warn(bdi->dev, "invalid value for battery:charge-term-current-microamp: %d\n", + v); + } + + return 0; +} + static int bq24190_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1494,10 +1786,12 @@ static int bq24190_probe(struct i2c_client *client, mutex_init(&bdi->f_reg_lock); bdi->f_reg = 0; bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */ + INIT_DELAYED_WORK(&bdi->input_current_limit_work, + bq24190_input_current_limit_work); i2c_set_clientdata(client, bdi); - if (!client->irq) { + if (client->irq <= 0) { dev_err(dev, "Can't get irq info\n"); return -EINVAL; } @@ -1530,13 +1824,8 @@ static int bq24190_probe(struct i2c_client *client, goto out_pmrt; } - ret = bq24190_hw_init(bdi); - if (ret < 0) { - dev_err(dev, "Hardware init failed\n"); - goto out_pmrt; - } - charger_cfg.drv_data = bdi; + charger_cfg.of_node = dev->of_node; charger_cfg.supplied_to = bq24190_charger_supplied_to; charger_cfg.num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to), bdi->charger = power_supply_register(dev, &bq24190_charger_desc, @@ -1560,8 +1849,20 @@ static int bq24190_probe(struct i2c_client *client, } } + ret = bq24190_get_config(bdi); + if (ret < 0) { + dev_err(dev, "Can't get devicetree config\n"); + goto out_charger; + } + + ret = bq24190_hw_init(bdi); + if (ret < 0) { + dev_err(dev, "Hardware init failed\n"); + goto out_charger; + } + ret = bq24190_sysfs_create_group(bdi); - if (ret) { + if (ret < 0) { dev_err(dev, "Can't create sysfs entries\n"); goto out_charger; } @@ -1577,6 +1878,10 @@ static int bq24190_probe(struct i2c_client *client, goto out_sysfs; } + ret = bq24190_register_vbus_regulator(bdi); + if (ret < 0) + goto out_sysfs; + if (bdi->extcon) { INIT_DELAYED_WORK(&bdi->extcon_work, bq24190_extcon_work); bdi->extcon_nb.notifier_call = bq24190_extcon_event; @@ -1704,7 +2009,7 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev) } bq24190_register_reset(bdi); - bq24190_set_mode_host(bdi); + bq24190_set_config(bdi); bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg); if (error >= 0) { @@ -1736,6 +2041,7 @@ MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids); #ifdef CONFIG_OF static const struct of_device_id bq24190_of_match[] = { { .compatible = "ti,bq24190", }, + { .compatible = "ti,bq24192i", }, { }, }; MODULE_DEVICE_TABLE(of, bq24190_of_match); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index ed44439d0112..51f0961ecf3e 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -58,8 +58,6 @@ #include -#define DRIVER_VERSION "1.2.0" - #define BQ27XXX_MANUFACTURER "Texas Instruments" /* BQ27XXX Flags */ @@ -132,8 +130,8 @@ enum bq27xxx_reg_index { [BQ27XXX_DM_CKSUM] = 0x60 /* Register mappings */ -static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { - [BQ27000] = { +static u8 + bq27000_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, @@ -157,7 +155,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_DM_DATA] = INVALID_REG_ADDR, [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR, }, - [BQ27010] = { + bq27010_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, @@ -181,7 +179,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_DM_DATA] = INVALID_REG_ADDR, [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR, }, - [BQ2750X] = { + bq2750x_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x28, @@ -201,7 +199,31 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = INVALID_REG_ADDR, BQ27XXX_DM_REG_ROWS, }, - [BQ2751X] = { +#define bq2751x_regs bq27510g3_regs +#define bq2752x_regs bq27510g3_regs + bq27500_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = 0x18, + [BQ27XXX_REG_TTES] = 0x1c, + [BQ27XXX_REG_TTECP] = 0x26, + [BQ27XXX_REG_NAC] = 0x0c, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x2a, + [BQ27XXX_REG_AE] = 0x22, + [BQ27XXX_REG_SOC] = 0x2c, + [BQ27XXX_REG_DCAP] = 0x3c, + [BQ27XXX_REG_AP] = 0x24, + BQ27XXX_DM_REG_ROWS, + }, +#define bq27510g1_regs bq27500_regs +#define bq27510g2_regs bq27500_regs + bq27510g3_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x28, @@ -221,87 +243,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = INVALID_REG_ADDR, BQ27XXX_DM_REG_ROWS, }, - [BQ27500] = { - [BQ27XXX_REG_CTRL] = 0x00, - [BQ27XXX_REG_TEMP] = 0x06, - [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, - [BQ27XXX_REG_VOLT] = 0x08, - [BQ27XXX_REG_AI] = 0x14, - [BQ27XXX_REG_FLAGS] = 0x0a, - [BQ27XXX_REG_TTE] = 0x16, - [BQ27XXX_REG_TTF] = 0x18, - [BQ27XXX_REG_TTES] = 0x1c, - [BQ27XXX_REG_TTECP] = 0x26, - [BQ27XXX_REG_NAC] = 0x0c, - [BQ27XXX_REG_FCC] = 0x12, - [BQ27XXX_REG_CYCT] = 0x2a, - [BQ27XXX_REG_AE] = 0x22, - [BQ27XXX_REG_SOC] = 0x2c, - [BQ27XXX_REG_DCAP] = 0x3c, - [BQ27XXX_REG_AP] = 0x24, - BQ27XXX_DM_REG_ROWS, - }, - [BQ27510G1] = { - [BQ27XXX_REG_CTRL] = 0x00, - [BQ27XXX_REG_TEMP] = 0x06, - [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, - [BQ27XXX_REG_VOLT] = 0x08, - [BQ27XXX_REG_AI] = 0x14, - [BQ27XXX_REG_FLAGS] = 0x0a, - [BQ27XXX_REG_TTE] = 0x16, - [BQ27XXX_REG_TTF] = 0x18, - [BQ27XXX_REG_TTES] = 0x1c, - [BQ27XXX_REG_TTECP] = 0x26, - [BQ27XXX_REG_NAC] = 0x0c, - [BQ27XXX_REG_FCC] = 0x12, - [BQ27XXX_REG_CYCT] = 0x2a, - [BQ27XXX_REG_AE] = 0x22, - [BQ27XXX_REG_SOC] = 0x2c, - [BQ27XXX_REG_DCAP] = 0x3c, - [BQ27XXX_REG_AP] = 0x24, - BQ27XXX_DM_REG_ROWS, - }, - [BQ27510G2] = { - [BQ27XXX_REG_CTRL] = 0x00, - [BQ27XXX_REG_TEMP] = 0x06, - [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, - [BQ27XXX_REG_VOLT] = 0x08, - [BQ27XXX_REG_AI] = 0x14, - [BQ27XXX_REG_FLAGS] = 0x0a, - [BQ27XXX_REG_TTE] = 0x16, - [BQ27XXX_REG_TTF] = 0x18, - [BQ27XXX_REG_TTES] = 0x1c, - [BQ27XXX_REG_TTECP] = 0x26, - [BQ27XXX_REG_NAC] = 0x0c, - [BQ27XXX_REG_FCC] = 0x12, - [BQ27XXX_REG_CYCT] = 0x2a, - [BQ27XXX_REG_AE] = 0x22, - [BQ27XXX_REG_SOC] = 0x2c, - [BQ27XXX_REG_DCAP] = 0x3c, - [BQ27XXX_REG_AP] = 0x24, - BQ27XXX_DM_REG_ROWS, - }, - [BQ27510G3] = { - [BQ27XXX_REG_CTRL] = 0x00, - [BQ27XXX_REG_TEMP] = 0x06, - [BQ27XXX_REG_INT_TEMP] = 0x28, - [BQ27XXX_REG_VOLT] = 0x08, - [BQ27XXX_REG_AI] = 0x14, - [BQ27XXX_REG_FLAGS] = 0x0a, - [BQ27XXX_REG_TTE] = 0x16, - [BQ27XXX_REG_TTF] = INVALID_REG_ADDR, - [BQ27XXX_REG_TTES] = 0x1a, - [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, - [BQ27XXX_REG_NAC] = 0x0c, - [BQ27XXX_REG_FCC] = 0x12, - [BQ27XXX_REG_CYCT] = 0x1e, - [BQ27XXX_REG_AE] = INVALID_REG_ADDR, - [BQ27XXX_REG_SOC] = 0x20, - [BQ27XXX_REG_DCAP] = 0x2e, - [BQ27XXX_REG_AP] = INVALID_REG_ADDR, - BQ27XXX_DM_REG_ROWS, - }, - [BQ27520G1] = { + bq27520g1_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, @@ -321,7 +263,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27520G2] = { + bq27520g2_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x36, @@ -341,7 +283,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27520G3] = { + bq27520g3_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x36, @@ -361,7 +303,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27520G4] = { + bq27520g4_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x28, @@ -381,7 +323,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = INVALID_REG_ADDR, BQ27XXX_DM_REG_ROWS, }, - [BQ27530] = { + bq27530_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x32, @@ -401,7 +343,8 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27541] = { +#define bq27531_regs bq27530_regs + bq27541_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x28, @@ -421,7 +364,10 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27545] = { +#define bq27542_regs bq27541_regs +#define bq27546_regs bq27541_regs +#define bq27742_regs bq27541_regs + bq27545_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, [BQ27XXX_REG_INT_TEMP] = 0x28, @@ -441,7 +387,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_AP] = 0x24, BQ27XXX_DM_REG_ROWS, }, - [BQ27421] = { + bq27421_regs[BQ27XXX_REG_MAX] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x02, [BQ27XXX_REG_INT_TEMP] = 0x1e, @@ -460,10 +406,12 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_DCAP] = 0x3c, [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, - }, -}; + }; +#define bq27425_regs bq27421_regs +#define bq27441_regs bq27421_regs +#define bq27621_regs bq27421_regs -static enum power_supply_property bq27000_battery_props[] = { +static enum power_supply_property bq27000_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -485,7 +433,7 @@ static enum power_supply_property bq27000_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27010_battery_props[] = { +static enum power_supply_property bq27010_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -505,43 +453,11 @@ static enum power_supply_property bq27010_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq2750x_battery_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_LEVEL, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_MANUFACTURER, -}; +#define bq2750x_props bq27510g3_props +#define bq2751x_props bq27510g3_props +#define bq2752x_props bq27510g3_props -static enum power_supply_property bq2751x_battery_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_LEVEL, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_MANUFACTURER, -}; - -static enum power_supply_property bq27500_battery_props[] = { +static enum power_supply_property bq27500_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -561,50 +477,10 @@ static enum power_supply_property bq27500_battery_props[] = { POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_MANUFACTURER, }; +#define bq27510g1_props bq27500_props +#define bq27510g2_props bq27500_props -static enum power_supply_property bq27510g1_battery_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_LEVEL, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, - POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_ENERGY_NOW, - POWER_SUPPLY_PROP_POWER_AVG, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_MANUFACTURER, -}; - -static enum power_supply_property bq27510g2_battery_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_LEVEL, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, - POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_ENERGY_NOW, - POWER_SUPPLY_PROP_POWER_AVG, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_MANUFACTURER, -}; - -static enum power_supply_property bq27510g3_battery_props[] = { +static enum power_supply_property bq27510g3_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -622,7 +498,7 @@ static enum power_supply_property bq27510g3_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27520g1_battery_props[] = { +static enum power_supply_property bq27520g1_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -642,28 +518,9 @@ static enum power_supply_property bq27520g1_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27520g2_battery_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_LEVEL, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, - POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_ENERGY_NOW, - POWER_SUPPLY_PROP_POWER_AVG, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_MANUFACTURER, -}; +#define bq27520g2_props bq27500_props -static enum power_supply_property bq27520g3_battery_props[] = { +static enum power_supply_property bq27520g3_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -683,7 +540,7 @@ static enum power_supply_property bq27520g3_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27520g4_battery_props[] = { +static enum power_supply_property bq27520g4_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -700,7 +557,7 @@ static enum power_supply_property bq27520g4_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27530_battery_props[] = { +static enum power_supply_property bq27530_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -717,8 +574,9 @@ static enum power_supply_property bq27530_battery_props[] = { POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_MANUFACTURER, }; +#define bq27531_props bq27530_props -static enum power_supply_property bq27541_battery_props[] = { +static enum power_supply_property bq27541_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -736,8 +594,11 @@ static enum power_supply_property bq27541_battery_props[] = { POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_MANUFACTURER, }; +#define bq27542_props bq27541_props +#define bq27546_props bq27541_props +#define bq27742_props bq27541_props -static enum power_supply_property bq27545_battery_props[] = { +static enum power_supply_property bq27545_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -755,7 +616,7 @@ static enum power_supply_property bq27545_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property bq27421_battery_props[] = { +static enum power_supply_property bq27421_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -769,33 +630,138 @@ static enum power_supply_property bq27421_battery_props[] = { POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_MANUFACTURER, }; +#define bq27425_props bq27421_props +#define bq27441_props bq27421_props +#define bq27621_props bq27421_props -#define BQ27XXX_PROP(_id, _prop) \ - [_id] = { \ - .props = _prop, \ - .size = ARRAY_SIZE(_prop), \ - } +struct bq27xxx_dm_reg { + u8 subclass_id; + u8 offset; + u8 bytes; + u16 min, max; +}; + +enum bq27xxx_dm_reg_id { + BQ27XXX_DM_DESIGN_CAPACITY = 0, + BQ27XXX_DM_DESIGN_ENERGY, + BQ27XXX_DM_TERMINATE_VOLTAGE, +}; + +#define bq27000_dm_regs 0 +#define bq27010_dm_regs 0 +#define bq2750x_dm_regs 0 +#define bq2751x_dm_regs 0 +#define bq2752x_dm_regs 0 + +#if 0 /* not yet tested */ +static struct bq27xxx_dm_reg bq27500_dm_regs[] = { + [BQ27XXX_DM_DESIGN_CAPACITY] = { 48, 10, 2, 0, 65535 }, + [BQ27XXX_DM_DESIGN_ENERGY] = { }, /* missing on chip */ + [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 48, 2, 1000, 32767 }, +}; +#else +#define bq27500_dm_regs 0 +#endif + +/* todo create data memory definitions from datasheets and test on chips */ +#define bq27510g1_dm_regs 0 +#define bq27510g2_dm_regs 0 +#define bq27510g3_dm_regs 0 +#define bq27520g1_dm_regs 0 +#define bq27520g2_dm_regs 0 +#define bq27520g3_dm_regs 0 +#define bq27520g4_dm_regs 0 +#define bq27530_dm_regs 0 +#define bq27531_dm_regs 0 +#define bq27541_dm_regs 0 +#define bq27542_dm_regs 0 +#define bq27546_dm_regs 0 +#define bq27742_dm_regs 0 + +#if 0 /* not yet tested */ +static struct bq27xxx_dm_reg bq27545_dm_regs[] = { + [BQ27XXX_DM_DESIGN_CAPACITY] = { 48, 23, 2, 0, 32767 }, + [BQ27XXX_DM_DESIGN_ENERGY] = { 48, 25, 2, 0, 32767 }, + [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 67, 2, 2800, 3700 }, +}; +#else +#define bq27545_dm_regs 0 +#endif + +static struct bq27xxx_dm_reg bq27421_dm_regs[] = { + [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 }, + [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 }, + [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2500, 3700 }, +}; + +static struct bq27xxx_dm_reg bq27425_dm_regs[] = { + [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 12, 2, 0, 32767 }, + [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 14, 2, 0, 32767 }, + [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 18, 2, 2800, 3700 }, +}; + +#if 0 /* not yet tested */ +#define bq27441_dm_regs bq27421_dm_regs +#else +#define bq27441_dm_regs 0 +#endif + +#if 0 /* not yet tested */ +static struct bq27xxx_dm_reg bq27621_dm_regs[] = { + [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 3, 2, 0, 8000 }, + [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 5, 2, 0, 32767 }, + [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 9, 2, 2500, 3700 }, +}; +#else +#define bq27621_dm_regs 0 +#endif + +#define BQ27XXX_O_ZERO 0x00000001 +#define BQ27XXX_O_OTDC 0x00000002 +#define BQ27XXX_O_UTOT 0x00000004 +#define BQ27XXX_O_CFGUP 0x00000008 +#define BQ27XXX_O_RAM 0x00000010 + +#define BQ27XXX_DATA(ref, key, opt) { \ + .opts = (opt), \ + .unseal_key = key, \ + .regs = ref##_regs, \ + .dm_regs = ref##_dm_regs, \ + .props = ref##_props, \ + .props_size = ARRAY_SIZE(ref##_props) } static struct { + u32 opts; + u32 unseal_key; + u8 *regs; + struct bq27xxx_dm_reg *dm_regs; enum power_supply_property *props; - size_t size; -} bq27xxx_battery_props[] = { - BQ27XXX_PROP(BQ27000, bq27000_battery_props), - BQ27XXX_PROP(BQ27010, bq27010_battery_props), - BQ27XXX_PROP(BQ2750X, bq2750x_battery_props), - BQ27XXX_PROP(BQ2751X, bq2751x_battery_props), - BQ27XXX_PROP(BQ27500, bq27500_battery_props), - BQ27XXX_PROP(BQ27510G1, bq27510g1_battery_props), - BQ27XXX_PROP(BQ27510G2, bq27510g2_battery_props), - BQ27XXX_PROP(BQ27510G3, bq27510g3_battery_props), - BQ27XXX_PROP(BQ27520G1, bq27520g1_battery_props), - BQ27XXX_PROP(BQ27520G2, bq27520g2_battery_props), - BQ27XXX_PROP(BQ27520G3, bq27520g3_battery_props), - BQ27XXX_PROP(BQ27520G4, bq27520g4_battery_props), - BQ27XXX_PROP(BQ27530, bq27530_battery_props), - BQ27XXX_PROP(BQ27541, bq27541_battery_props), - BQ27XXX_PROP(BQ27545, bq27545_battery_props), - BQ27XXX_PROP(BQ27421, bq27421_battery_props), + size_t props_size; +} bq27xxx_chip_data[] = { + [BQ27000] = BQ27XXX_DATA(bq27000, 0 , BQ27XXX_O_ZERO), + [BQ27010] = BQ27XXX_DATA(bq27010, 0 , BQ27XXX_O_ZERO), + [BQ2750X] = BQ27XXX_DATA(bq2750x, 0 , BQ27XXX_O_OTDC), + [BQ2751X] = BQ27XXX_DATA(bq2751x, 0 , BQ27XXX_O_OTDC), + [BQ2752X] = BQ27XXX_DATA(bq2752x, 0 , BQ27XXX_O_OTDC), + [BQ27500] = BQ27XXX_DATA(bq27500, 0x04143672, BQ27XXX_O_OTDC), + [BQ27510G1] = BQ27XXX_DATA(bq27510g1, 0 , BQ27XXX_O_OTDC), + [BQ27510G2] = BQ27XXX_DATA(bq27510g2, 0 , BQ27XXX_O_OTDC), + [BQ27510G3] = BQ27XXX_DATA(bq27510g3, 0 , BQ27XXX_O_OTDC), + [BQ27520G1] = BQ27XXX_DATA(bq27520g1, 0 , BQ27XXX_O_OTDC), + [BQ27520G2] = BQ27XXX_DATA(bq27520g2, 0 , BQ27XXX_O_OTDC), + [BQ27520G3] = BQ27XXX_DATA(bq27520g3, 0 , BQ27XXX_O_OTDC), + [BQ27520G4] = BQ27XXX_DATA(bq27520g4, 0 , BQ27XXX_O_OTDC), + [BQ27530] = BQ27XXX_DATA(bq27530, 0 , BQ27XXX_O_UTOT), + [BQ27531] = BQ27XXX_DATA(bq27531, 0 , BQ27XXX_O_UTOT), + [BQ27541] = BQ27XXX_DATA(bq27541, 0 , BQ27XXX_O_OTDC), + [BQ27542] = BQ27XXX_DATA(bq27542, 0 , BQ27XXX_O_OTDC), + [BQ27546] = BQ27XXX_DATA(bq27546, 0 , BQ27XXX_O_OTDC), + [BQ27742] = BQ27XXX_DATA(bq27742, 0 , BQ27XXX_O_OTDC), + [BQ27545] = BQ27XXX_DATA(bq27545, 0x04143672, BQ27XXX_O_OTDC), + [BQ27421] = BQ27XXX_DATA(bq27421, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), + [BQ27425] = BQ27XXX_DATA(bq27425, 0x04143672, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP), + [BQ27441] = BQ27XXX_DATA(bq27441, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), + [BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), }; static DEFINE_MUTEX(bq27xxx_list_lock); @@ -805,13 +771,6 @@ static LIST_HEAD(bq27xxx_battery_devices); #define BQ27XXX_DM_SZ 32 -struct bq27xxx_dm_reg { - u8 subclass_id; - u8 offset; - u8 bytes; - u16 min, max; -}; - /** * struct bq27xxx_dm_buf - chip data memory buffer * @class: data memory subclass_id @@ -844,12 +803,6 @@ static inline u16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf, return NULL; } -enum bq27xxx_dm_reg_id { - BQ27XXX_DM_DESIGN_CAPACITY = 0, - BQ27XXX_DM_DESIGN_ENERGY, - BQ27XXX_DM_TERMINATE_VOLTAGE, -}; - static const char * const bq27xxx_dm_reg_name[] = { [BQ27XXX_DM_DESIGN_CAPACITY] = "design-capacity", [BQ27XXX_DM_DESIGN_ENERGY] = "design-energy", @@ -1092,9 +1045,9 @@ static void bq27xxx_battery_update_dm_block(struct bq27xxx_device_info *di, } #ifdef CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM - if (!di->ram_chip && !bq27xxx_dt_to_nvm) { + if (!(di->opts & BQ27XXX_O_RAM) && !bq27xxx_dt_to_nvm) { #else - if (!di->ram_chip) { + if (!(di->opts & BQ27XXX_O_RAM)) { #endif /* devicetree and NVM differ; defer to NVM */ dev_warn(di->dev, "%s has %u; update to %u disallowed " @@ -1130,7 +1083,7 @@ static int bq27xxx_battery_cfgupdate_priv(struct bq27xxx_device_info *di, bool a return ret; } while (!!(ret & BQ27XXX_FLAG_CFGUP) != active && --try); - if (!try) { + if (!try && di->chip != BQ27425) { // 425 has a bug dev_err(di->dev, "timed out waiting for cfgupdate flag %d\n", active); return -EINVAL; } @@ -1162,7 +1115,7 @@ static inline int bq27xxx_battery_soft_reset(struct bq27xxx_device_info *di) static int bq27xxx_battery_write_dm_block(struct bq27xxx_device_info *di, struct bq27xxx_dm_buf *buf) { - bool cfgup = di->chip == BQ27421; /* assume related chips need cfgupdate */ + bool cfgup = di->opts & BQ27XXX_O_CFGUP; int ret; if (!buf->dirty) @@ -1261,7 +1214,7 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di, bq27xxx_battery_seal(di); - if (updated && di->chip != BQ27421) { /* not a cfgupdate chip, so reset */ + if (updated && !(di->opts & BQ27XXX_O_CFGUP)) { bq27xxx_write(di, BQ27XXX_REG_CTRL, BQ27XXX_RESET, false); BQ27XXX_MSLEEP(300); /* reset time is not documented */ } @@ -1328,7 +1281,7 @@ static int bq27xxx_battery_read_soc(struct bq27xxx_device_info *di) { int soc; - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) soc = bq27xxx_read(di, BQ27XXX_REG_SOC, true); else soc = bq27xxx_read(di, BQ27XXX_REG_SOC, false); @@ -1354,7 +1307,7 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg) return charge; } - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) charge *= BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS; else charge *= 1000; @@ -1370,7 +1323,7 @@ static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di) { int flags; - if (di->chip == BQ27000 || di->chip == BQ27010) { + if (di->opts & BQ27XXX_O_ZERO) { flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true); if (flags >= 0 && (flags & BQ27000_FLAG_CI)) return -ENODATA; @@ -1396,7 +1349,7 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di) { int dcap; - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, true); else dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false); @@ -1406,7 +1359,7 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di) return dcap; } - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) dcap = (dcap << 8) * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS; else dcap *= 1000; @@ -1428,7 +1381,7 @@ static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di) return ae; } - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) ae *= BQ27XXX_POWER_CONSTANT / BQ27XXX_RS; else ae *= 1000; @@ -1450,7 +1403,7 @@ static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di) return temp; } - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) temp = 5 * temp / 2; return temp; @@ -1507,7 +1460,7 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di) return tval; } - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS; else return tval; @@ -1518,26 +1471,12 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di) */ static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags) { - switch (di->chip) { - case BQ2750X: - case BQ2751X: - case BQ27500: - case BQ27510G1: - case BQ27510G2: - case BQ27510G3: - case BQ27520G1: - case BQ27520G2: - case BQ27520G3: - case BQ27520G4: - case BQ27541: - case BQ27545: + if (di->opts & BQ27XXX_O_OTDC) return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD); - case BQ27530: - case BQ27421: + if (di->opts & BQ27XXX_O_UTOT) return flags & BQ27XXX_FLAG_OT; - default: - return false; - } + + return false; } /* @@ -1545,7 +1484,7 @@ static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags) */ static bool bq27xxx_battery_undertemp(struct bq27xxx_device_info *di, u16 flags) { - if (di->chip == BQ27530 || di->chip == BQ27421) + if (di->opts & BQ27XXX_O_UTOT) return flags & BQ27XXX_FLAG_UT; return false; @@ -1556,7 +1495,7 @@ static bool bq27xxx_battery_undertemp(struct bq27xxx_device_info *di, u16 flags) */ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) { - if (di->chip == BQ27000 || di->chip == BQ27010) + if (di->opts & BQ27XXX_O_ZERO) return flags & (BQ27000_FLAG_EDV1 | BQ27000_FLAG_EDVF); else return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF); @@ -1569,7 +1508,7 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) { int flags; - bool has_singe_flag = di->chip == BQ27000 || di->chip == BQ27010; + bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); if (flags < 0) { @@ -1591,8 +1530,8 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) void bq27xxx_battery_update(struct bq27xxx_device_info *di) { struct bq27xxx_reg_cache cache = {0, }; - bool has_ci_flag = di->chip == BQ27000 || di->chip == BQ27010; - bool has_singe_flag = di->chip == BQ27000 || di->chip == BQ27010; + bool has_ci_flag = di->opts & BQ27XXX_O_ZERO; + bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); if ((cache.flags & 0xff) == 0xff) @@ -1670,7 +1609,7 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di, return curr; } - if (di->chip == BQ27000 || di->chip == BQ27010) { + if (di->opts & BQ27XXX_O_ZERO) { flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true); if (flags & BQ27000_FLAG_CHGS) { dev_dbg(di->dev, "negative current!\n"); @@ -1691,7 +1630,7 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, { int status; - if (di->chip == BQ27000 || di->chip == BQ27010) { + if (di->opts & BQ27XXX_O_ZERO) { if (di->cache.flags & BQ27000_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27000_FLAG_CHGS) @@ -1719,7 +1658,7 @@ static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di, { int level; - if (di->chip == BQ27000 || di->chip == BQ27010) { + if (di->opts & BQ27XXX_O_ZERO) { if (di->cache.flags & BQ27000_FLAG_FC) level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; else if (di->cache.flags & BQ27000_FLAG_EDV1) @@ -1884,7 +1823,11 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll); mutex_init(&di->lock); - di->regs = bq27xxx_regs[di->chip]; + + di->regs = bq27xxx_chip_data[di->chip].regs; + di->unseal_key = bq27xxx_chip_data[di->chip].unseal_key; + di->dm_regs = bq27xxx_chip_data[di->chip].dm_regs; + di->opts = bq27xxx_chip_data[di->chip].opts; psy_desc = devm_kzalloc(di->dev, sizeof(*psy_desc), GFP_KERNEL); if (!psy_desc) @@ -1892,8 +1835,8 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) psy_desc->name = di->name; psy_desc->type = POWER_SUPPLY_TYPE_BATTERY; - psy_desc->properties = bq27xxx_battery_props[di->chip].props; - psy_desc->num_properties = bq27xxx_battery_props[di->chip].size; + psy_desc->properties = bq27xxx_chip_data[di->chip].props; + psy_desc->num_properties = bq27xxx_chip_data[di->chip].props_size; psy_desc->get_property = bq27xxx_battery_get_property; psy_desc->external_power_changed = bq27xxx_external_power_changed; @@ -1903,8 +1846,6 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) return PTR_ERR(di->bat); } - dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); - bq27xxx_battery_settings(di); bq27xxx_battery_update(di); @@ -1938,110 +1879,6 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di) } EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown); -static int bq27xxx_battery_platform_read(struct bq27xxx_device_info *di, u8 reg, - bool single) -{ - struct device *dev = di->dev; - struct bq27xxx_platform_data *pdata = dev->platform_data; - unsigned int timeout = 3; - int upper, lower; - int temp; - - if (!single) { - /* Make sure the value has not changed in between reading the - * lower and the upper part */ - upper = pdata->read(dev, reg + 1); - do { - temp = upper; - if (upper < 0) - return upper; - - lower = pdata->read(dev, reg); - if (lower < 0) - return lower; - - upper = pdata->read(dev, reg + 1); - } while (temp != upper && --timeout); - - if (timeout == 0) - return -EIO; - - return (upper << 8) | lower; - } - - return pdata->read(dev, reg); -} - -static int bq27xxx_battery_platform_probe(struct platform_device *pdev) -{ - struct bq27xxx_device_info *di; - struct bq27xxx_platform_data *pdata = pdev->dev.platform_data; - - if (!pdata) { - dev_err(&pdev->dev, "no platform_data supplied\n"); - return -EINVAL; - } - - if (!pdata->read) { - dev_err(&pdev->dev, "no hdq read callback supplied\n"); - return -EINVAL; - } - - if (!pdata->chip) { - dev_err(&pdev->dev, "no device supplied\n"); - return -EINVAL; - } - - di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL); - if (!di) - return -ENOMEM; - - platform_set_drvdata(pdev, di); - - di->dev = &pdev->dev; - di->chip = pdata->chip; - di->name = pdata->name ?: dev_name(&pdev->dev); - di->bus.read = bq27xxx_battery_platform_read; - - return bq27xxx_battery_setup(di); -} - -static int bq27xxx_battery_platform_remove(struct platform_device *pdev) -{ - struct bq27xxx_device_info *di = platform_get_drvdata(pdev); - - bq27xxx_battery_teardown(di); - - return 0; -} - -static const struct platform_device_id bq27xxx_battery_platform_id_table[] = { - { "bq27000-battery", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(platform, bq27xxx_battery_platform_id_table); - -#ifdef CONFIG_OF -static const struct of_device_id bq27xxx_battery_platform_of_match_table[] = { - { .compatible = "ti,bq27000" }, - {}, -}; -MODULE_DEVICE_TABLE(of, bq27xxx_battery_platform_of_match_table); -#endif - -static struct platform_driver bq27xxx_battery_platform_driver = { - .probe = bq27xxx_battery_platform_probe, - .remove = bq27xxx_battery_platform_remove, - .driver = { - .name = "bq27000-battery", - .of_match_table = of_match_ptr(bq27xxx_battery_platform_of_match_table), - }, - .id_table = bq27xxx_battery_platform_id_table, -}; -module_platform_driver(bq27xxx_battery_platform_driver); - -MODULE_ALIAS("platform:bq27000-battery"); - MODULE_AUTHOR("Rodolfo Giometti "); MODULE_DESCRIPTION("BQ27xxx battery monitor driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/bq27xxx_battery_hdq.c b/drivers/power/supply/bq27xxx_battery_hdq.c new file mode 100644 index 000000000000..9aff896c9802 --- /dev/null +++ b/drivers/power/supply/bq27xxx_battery_hdq.c @@ -0,0 +1,135 @@ +/* + * BQ27xxx battery monitor HDQ/1-wire driver + * + * Copyright (C) 2007-2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define W1_FAMILY_BQ27000 0x01 + +#define HDQ_CMD_READ (0 << 7) +#define HDQ_CMD_WRITE (1 << 7) + +static int F_ID; +module_param(F_ID, int, S_IRUSR); +MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ27xxx device"); + +static int w1_bq27000_read(struct w1_slave *sl, unsigned int reg) +{ + u8 val; + + mutex_lock(&sl->master->bus_mutex); + w1_write_8(sl->master, HDQ_CMD_READ | reg); + val = w1_read_8(sl->master); + mutex_unlock(&sl->master->bus_mutex); + + return val; +} + +static int bq27xxx_battery_hdq_read(struct bq27xxx_device_info *di, u8 reg, + bool single) +{ + struct w1_slave *sl = dev_to_w1_slave(di->dev); + unsigned int timeout = 3; + int upper, lower; + int temp; + + if (!single) { + /* + * Make sure the value has not changed in between reading the + * lower and the upper part + */ + upper = w1_bq27000_read(sl, reg + 1); + do { + temp = upper; + if (upper < 0) + return upper; + + lower = w1_bq27000_read(sl, reg); + if (lower < 0) + return lower; + + upper = w1_bq27000_read(sl, reg + 1); + } while (temp != upper && --timeout); + + if (timeout == 0) + return -EIO; + + return (upper << 8) | lower; + } + + return w1_bq27000_read(sl, reg); +} + +static int bq27xxx_battery_hdq_add_slave(struct w1_slave *sl) +{ + struct bq27xxx_device_info *di; + + di = devm_kzalloc(&sl->dev, sizeof(*di), GFP_KERNEL); + if (!di) + return -ENOMEM; + + dev_set_drvdata(&sl->dev, di); + + di->dev = &sl->dev; + di->chip = BQ27000; + di->name = "bq27000-battery"; + di->bus.read = bq27xxx_battery_hdq_read; + + return bq27xxx_battery_setup(di); +} + +static void bq27xxx_battery_hdq_remove_slave(struct w1_slave *sl) +{ + struct bq27xxx_device_info *di = dev_get_drvdata(&sl->dev); + + bq27xxx_battery_teardown(di); +} + +static struct w1_family_ops bq27xxx_battery_hdq_fops = { + .add_slave = bq27xxx_battery_hdq_add_slave, + .remove_slave = bq27xxx_battery_hdq_remove_slave, +}; + +static struct w1_family bq27xxx_battery_hdq_family = { + .fid = W1_FAMILY_BQ27000, + .fops = &bq27xxx_battery_hdq_fops, +}; + +static int __init bq27xxx_battery_hdq_init(void) +{ + if (F_ID) + bq27xxx_battery_hdq_family.fid = F_ID; + + return w1_register_family(&bq27xxx_battery_hdq_family); +} +module_init(bq27xxx_battery_hdq_init); + +static void __exit bq27xxx_battery_hdq_exit(void) +{ + w1_unregister_family(&bq27xxx_battery_hdq_family); +} +module_exit(bq27xxx_battery_hdq_exit); + +MODULE_AUTHOR("Texas Instruments Ltd"); +MODULE_DESCRIPTION("BQ27xxx battery monitor HDQ/1-wire driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000)); diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index a5972214f074..0b11ed472f33 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -230,7 +230,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27210", BQ27010 }, { "bq27500", BQ2750X }, { "bq27510", BQ2751X }, - { "bq27520", BQ2751X }, + { "bq27520", BQ2752X }, { "bq27500-1", BQ27500 }, { "bq27510g1", BQ27510G1 }, { "bq27510g2", BQ27510G2 }, @@ -240,16 +240,16 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27520g3", BQ27520G3 }, { "bq27520g4", BQ27520G4 }, { "bq27530", BQ27530 }, - { "bq27531", BQ27530 }, + { "bq27531", BQ27531 }, { "bq27541", BQ27541 }, - { "bq27542", BQ27541 }, - { "bq27546", BQ27541 }, - { "bq27742", BQ27541 }, + { "bq27542", BQ27542 }, + { "bq27546", BQ27546 }, + { "bq27742", BQ27742 }, { "bq27545", BQ27545 }, { "bq27421", BQ27421 }, - { "bq27425", BQ27421 }, - { "bq27441", BQ27421 }, - { "bq27621", BQ27421 }, + { "bq27425", BQ27425 }, + { "bq27441", BQ27441 }, + { "bq27621", BQ27621 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table); diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index adc3761831e1..6502fa7c2106 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -1632,8 +1632,7 @@ static int charger_manager_probe(struct platform_device *pdev) return -ENODEV; } - cm = devm_kzalloc(&pdev->dev, - sizeof(struct charger_manager), GFP_KERNEL); + cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL); if (!cm) return -ENOMEM; @@ -1645,12 +1644,14 @@ static int charger_manager_probe(struct platform_device *pdev) /* Initialize alarm timer */ if (alarmtimer_get_rtcdev()) { cm_timer = devm_kzalloc(cm->dev, sizeof(*cm_timer), GFP_KERNEL); + if (!cm_timer) + return -ENOMEM; alarm_init(cm_timer, ALARM_BOOTTIME, cm_timer_func); } /* - * The following two do not need to be errors. - * Users may intentionally ignore those two features. + * Some of the following do not need to be errors. + * Users may intentionally ignore those features. */ if (desc->fullbatt_uV == 0) { dev_info(&pdev->dev, "Ignoring full-battery voltage threshold as it is not supplied\n"); diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c index 8edd4aa5f475..e5d81b493c45 100644 --- a/drivers/power/supply/ds2780_battery.c +++ b/drivers/power/supply/ds2780_battery.c @@ -663,7 +663,7 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp, return count; } -static struct bin_attribute ds2780_param_eeprom_bin_attr = { +static const struct bin_attribute ds2780_param_eeprom_bin_attr = { .attr = { .name = "param_eeprom", .mode = S_IRUGO | S_IWUSR, @@ -708,7 +708,7 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp, return count; } -static struct bin_attribute ds2780_user_eeprom_bin_attr = { +static const struct bin_attribute ds2780_user_eeprom_bin_attr = { .attr = { .name = "user_eeprom", .mode = S_IRUGO | S_IWUSR, diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c index 4400402f9ec5..efe83ef8670c 100644 --- a/drivers/power/supply/ds2781_battery.c +++ b/drivers/power/supply/ds2781_battery.c @@ -665,7 +665,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp, return count; } -static struct bin_attribute ds2781_param_eeprom_bin_attr = { +static const struct bin_attribute ds2781_param_eeprom_bin_attr = { .attr = { .name = "param_eeprom", .mode = S_IRUGO | S_IWUSR, @@ -711,7 +711,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp, return count; } -static struct bin_attribute ds2781_user_eeprom_bin_attr = { +static const struct bin_attribute ds2781_user_eeprom_bin_attr = { .attr = { .name = "user_eeprom", .mode = S_IRUGO | S_IWUSR, diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 677f7c40b25a..0f3432795f3c 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -626,7 +626,7 @@ static ssize_t lp8788_show_charger_status(struct device *dev, { struct lp8788_charger *pchg = dev_get_drvdata(dev); enum lp8788_charging_state state; - char *desc[LP8788_MAX_CHG_STATE] = { + static const char * const desc[LP8788_MAX_CHG_STATE] = { [LP8788_OFF] = "CHARGER OFF", [LP8788_WARM_UP] = "WARM UP", [LP8788_LOW_INPUT] = "LOW INPUT STATE", @@ -650,8 +650,10 @@ static ssize_t lp8788_show_eoc_time(struct device *dev, struct device_attribute *attr, char *buf) { struct lp8788_charger *pchg = dev_get_drvdata(dev); - char *stime[] = { "400ms", "5min", "10min", "15min", - "20min", "25min", "30min", "No timeout" }; + static const char * const stime[] = { + "400ms", "5min", "10min", "15min", + "20min", "25min", "30min", "No timeout" + }; u8 val; lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val); @@ -665,9 +667,13 @@ static ssize_t lp8788_show_eoc_level(struct device *dev, struct device_attribute *attr, char *buf) { struct lp8788_charger *pchg = dev_get_drvdata(dev); - char *abs_level[] = { "25mA", "49mA", "75mA", "98mA" }; - char *relative_level[] = { "5%", "10%", "15%", "20%" }; - char *level; + static const char * const abs_level[] = { + "25mA", "49mA", "75mA", "98mA" + }; + static const char * const relative_level[] = { + "5%", "10%", "15%", "20%" + }; + const char *level; u8 val; u8 mode; diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c index 7efb908f4451..08e4fd9ee607 100644 --- a/drivers/power/supply/ltc2941-battery-gauge.c +++ b/drivers/power/supply/ltc2941-battery-gauge.c @@ -1,6 +1,6 @@ /* - * I2C client/driver for the Linear Technology LTC2941 and LTC2943 - * Battery Gas Gauge IC + * I2C client/driver for the Linear Technology LTC2941, LTC2942, LTC2943 + * and LTC2944 Battery Gas Gauge IC * * Copyright (C) 2014 Topic Embedded Systems * @@ -34,35 +34,39 @@ enum ltc294x_reg { LTC294X_REG_CONTROL = 0x01, LTC294X_REG_ACC_CHARGE_MSB = 0x02, LTC294X_REG_ACC_CHARGE_LSB = 0x03, - LTC294X_REG_THRESH_HIGH_MSB = 0x04, - LTC294X_REG_THRESH_HIGH_LSB = 0x05, - LTC294X_REG_THRESH_LOW_MSB = 0x06, - LTC294X_REG_THRESH_LOW_LSB = 0x07, - LTC294X_REG_VOLTAGE_MSB = 0x08, - LTC294X_REG_VOLTAGE_LSB = 0x09, - LTC294X_REG_CURRENT_MSB = 0x0E, - LTC294X_REG_CURRENT_LSB = 0x0F, - LTC294X_REG_TEMPERATURE_MSB = 0x14, - LTC294X_REG_TEMPERATURE_LSB = 0x15, + LTC294X_REG_VOLTAGE_MSB = 0x08, + LTC294X_REG_VOLTAGE_LSB = 0x09, + LTC2942_REG_TEMPERATURE_MSB = 0x0C, + LTC2942_REG_TEMPERATURE_LSB = 0x0D, + LTC2943_REG_CURRENT_MSB = 0x0E, + LTC2943_REG_CURRENT_LSB = 0x0F, + LTC2943_REG_TEMPERATURE_MSB = 0x14, + LTC2943_REG_TEMPERATURE_LSB = 0x15, }; -#define LTC2943_REG_CONTROL_MODE_MASK (BIT(7) | BIT(6)) -#define LTC2943_REG_CONTROL_MODE_SCAN BIT(7) +enum ltc294x_id { + LTC2941_ID, + LTC2942_ID, + LTC2943_ID, + LTC2944_ID, +}; + +#define LTC2941_REG_STATUS_CHIP_ID BIT(7) + +#define LTC2942_REG_CONTROL_MODE_SCAN (BIT(7) | BIT(6)) +#define LTC2943_REG_CONTROL_MODE_SCAN BIT(7) #define LTC294X_REG_CONTROL_PRESCALER_MASK (BIT(5) | BIT(4) | BIT(3)) #define LTC294X_REG_CONTROL_SHUTDOWN_MASK (BIT(0)) #define LTC294X_REG_CONTROL_PRESCALER_SET(x) \ ((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK) #define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0 -#define LTC2941_NUM_REGS 0x08 -#define LTC2943_NUM_REGS 0x18 - struct ltc294x_info { struct i2c_client *client; /* I2C Client pointer */ struct power_supply *supply; /* Supply pointer */ struct power_supply_desc supply_desc; /* Supply description */ struct delayed_work work; /* Work scheduler */ - unsigned long num_regs; /* Number of registers (chip type) */ + enum ltc294x_id id; /* Chip type */ int charge; /* Last charge register content */ int r_sense; /* mOhm */ int Qlsb; /* nAh */ @@ -145,9 +149,18 @@ static int ltc294x_reset(const struct ltc294x_info *info, int prescaler_exp) control = LTC294X_REG_CONTROL_PRESCALER_SET(prescaler_exp) | LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED; - /* Put the 2943 into "monitor" mode, so it measures every 10 sec */ - if (info->num_regs == LTC2943_NUM_REGS) + /* Put device into "monitor" mode */ + switch (info->id) { + case LTC2942_ID: /* 2942 measures every 2 sec */ + control |= LTC2942_REG_CONTROL_MODE_SCAN; + break; + case LTC2943_ID: + case LTC2944_ID: /* 2943 and 2944 measure every 10 sec */ control |= LTC2943_REG_CONTROL_MODE_SCAN; + break; + default: + break; + } if (value != control) { ret = ltc294x_write_regs(info->client, @@ -252,7 +265,24 @@ static int ltc294x_get_voltage(const struct ltc294x_info *info, int *val) ret = ltc294x_read_regs(info->client, LTC294X_REG_VOLTAGE_MSB, &datar[0], 2); value = (datar[0] << 8) | datar[1]; - *val = ((value * 23600) / 0xFFFF) * 1000; /* in uV */ + switch (info->id) { + case LTC2943_ID: + value *= 23600 * 2; + value /= 0xFFFF; + value *= 1000 / 2; + break; + case LTC2944_ID: + value *= 70800 / 5*4; + value /= 0xFFFF; + value *= 1000 * 5/4; + break; + default: + value *= 6000 * 10; + value /= 0xFFFF; + value *= 1000 / 10; + break; + } + *val = value; return ret; } @@ -263,27 +293,38 @@ static int ltc294x_get_current(const struct ltc294x_info *info, int *val) s32 value; ret = ltc294x_read_regs(info->client, - LTC294X_REG_CURRENT_MSB, &datar[0], 2); + LTC2943_REG_CURRENT_MSB, &datar[0], 2); value = (datar[0] << 8) | datar[1]; value -= 0x7FFF; + if (info->id == LTC2944_ID) + value *= 64000; + else + value *= 60000; /* Value is in range -32k..+32k, r_sense is usually 10..50 mOhm, * the formula below keeps everything in s32 range while preserving * enough digits */ - *val = 1000 * ((60000 * value) / (info->r_sense * 0x7FFF)); /* in uA */ + *val = 1000 * (value / (info->r_sense * 0x7FFF)); /* in uA */ return ret; } static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val) { + enum ltc294x_reg reg; int ret; u8 datar[2]; u32 value; - ret = ltc294x_read_regs(info->client, - LTC294X_REG_TEMPERATURE_MSB, &datar[0], 2); - value = (datar[0] << 8) | datar[1]; - /* Full-scale is 510 Kelvin, convert to centidegrees */ - *val = (((51000 * value) / 0xFFFF) - 27215); + if (info->id == LTC2942_ID) { + reg = LTC2942_REG_TEMPERATURE_MSB; + value = 60000; /* Full-scale is 600 Kelvin */ + } else { + reg = LTC2943_REG_TEMPERATURE_MSB; + value = 51000; /* Full-scale is 510 Kelvin */ + } + ret = ltc294x_read_regs(info->client, reg, &datar[0], 2); + value *= (datar[0] << 8) | datar[1]; + /* Convert to centidegrees */ + *val = value / 0xFFFF - 27215; return ret; } @@ -357,8 +398,8 @@ static enum power_supply_property ltc294x_properties[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CURRENT_NOW, }; static int ltc294x_i2c_remove(struct i2c_client *client) @@ -375,10 +416,11 @@ static int ltc294x_i2c_probe(struct i2c_client *client, { struct power_supply_config psy_cfg = {}; struct ltc294x_info *info; + struct device_node *np; int ret; u32 prescaler_exp; s32 r_sense; - struct device_node *np; + u8 status; info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL); if (info == NULL) @@ -388,7 +430,7 @@ static int ltc294x_i2c_probe(struct i2c_client *client, np = of_node_get(client->dev.of_node); - info->num_regs = (unsigned long)of_device_get_match_data(&client->dev); + info->id = (enum ltc294x_id)of_device_get_match_data(&client->dev); info->supply_desc.name = np->name; /* r_sense can be negative, when sense+ is connected to the battery @@ -409,7 +451,7 @@ static int ltc294x_i2c_probe(struct i2c_client *client, prescaler_exp = LTC2941_MAX_PRESCALER_EXP; } - if (info->num_regs == LTC2943_NUM_REGS) { + if (info->id == LTC2943_ID) { if (prescaler_exp > LTC2943_MAX_PRESCALER_EXP) prescaler_exp = LTC2943_MAX_PRESCALER_EXP; info->Qlsb = ((340 * 50000) / r_sense) / @@ -421,21 +463,39 @@ static int ltc294x_i2c_probe(struct i2c_client *client, (128 / (1 << prescaler_exp)); } + /* Read status register to check for LTC2942 */ + if (info->id == LTC2941_ID || info->id == LTC2942_ID) { + ret = ltc294x_read_regs(client, LTC294X_REG_STATUS, &status, 1); + if (ret < 0) { + dev_err(&client->dev, + "Could not read status register\n"); + return ret; + } + if (status & LTC2941_REG_STATUS_CHIP_ID) + info->id = LTC2941_ID; + else + info->id = LTC2942_ID; + } + info->client = client; info->supply_desc.type = POWER_SUPPLY_TYPE_BATTERY; info->supply_desc.properties = ltc294x_properties; - if (info->num_regs >= LTC294X_REG_TEMPERATURE_LSB) + switch (info->id) { + case LTC2944_ID: + case LTC2943_ID: info->supply_desc.num_properties = ARRAY_SIZE(ltc294x_properties); - else if (info->num_regs >= LTC294X_REG_CURRENT_LSB) + break; + case LTC2942_ID: info->supply_desc.num_properties = ARRAY_SIZE(ltc294x_properties) - 1; - else if (info->num_regs >= LTC294X_REG_VOLTAGE_LSB) - info->supply_desc.num_properties = - ARRAY_SIZE(ltc294x_properties) - 2; - else + break; + case LTC2941_ID: + default: info->supply_desc.num_properties = ARRAY_SIZE(ltc294x_properties) - 3; + break; + } info->supply_desc.get_property = ltc294x_get_property; info->supply_desc.set_property = ltc294x_set_property; info->supply_desc.property_is_writeable = ltc294x_property_is_writeable; @@ -492,8 +552,10 @@ static SIMPLE_DEV_PM_OPS(ltc294x_pm_ops, ltc294x_suspend, ltc294x_resume); static const struct i2c_device_id ltc294x_i2c_id[] = { - {"ltc2941", LTC2941_NUM_REGS}, - {"ltc2943", LTC2943_NUM_REGS}, + { "ltc2941", LTC2941_ID, }, + { "ltc2942", LTC2942_ID, }, + { "ltc2943", LTC2943_ID, }, + { "ltc2944", LTC2944_ID, }, { }, }; MODULE_DEVICE_TABLE(i2c, ltc294x_i2c_id); @@ -501,11 +563,19 @@ MODULE_DEVICE_TABLE(i2c, ltc294x_i2c_id); static const struct of_device_id ltc294x_i2c_of_match[] = { { .compatible = "lltc,ltc2941", - .data = (void *)LTC2941_NUM_REGS + .data = (void *)LTC2941_ID, + }, + { + .compatible = "lltc,ltc2942", + .data = (void *)LTC2942_ID, }, { .compatible = "lltc,ltc2943", - .data = (void *)LTC2943_NUM_REGS + .data = (void *)LTC2943_ID, + }, + { + .compatible = "lltc,ltc2944", + .data = (void *)LTC2944_ID, }, { }, }; @@ -525,5 +595,5 @@ module_i2c_driver(ltc294x_driver); MODULE_AUTHOR("Auryn Verwegen, Topic Embedded Systems"); MODULE_AUTHOR("Mike Looijmans, Topic Embedded Products"); -MODULE_DESCRIPTION("LTC2941/LTC2943 Battery Gas Gauge IC driver"); +MODULE_DESCRIPTION("LTC2941/LTC2942/LTC2943/LTC2944 Battery Gas Gauge IC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index aecaaa2b0586..5b556a13f517 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -22,6 +22,7 @@ * This driver is based on max17040_battery.c */ +#include #include #include #include @@ -982,6 +983,8 @@ static int max17042_probe(struct i2c_client *client, struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); const struct power_supply_desc *max17042_desc = &max17042_psy_desc; struct power_supply_config psy_cfg = {}; + const struct acpi_device_id *acpi_id = NULL; + struct device *dev = &client->dev; struct max17042_chip *chip; int ret; int i; @@ -995,7 +998,15 @@ static int max17042_probe(struct i2c_client *client, return -ENOMEM; chip->client = client; - chip->chip_type = id->driver_data; + if (id) { + chip->chip_type = id->driver_data; + } else { + acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!acpi_id) + return -ENODEV; + + chip->chip_type = acpi_id->driver_data; + } chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config); if (IS_ERR(chip->regmap)) { dev_err(&client->dev, "Failed to initialize regmap\n"); @@ -1039,11 +1050,18 @@ static int max17042_probe(struct i2c_client *client, } if (client->irq) { + unsigned int flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; + + /* + * On ACPI systems the IRQ may be handled by ACPI-event code, + * so we need to share (if the ACPI code is willing to share). + */ + if (acpi_id) + flags |= IRQF_SHARED | IRQF_PROBE_SHARED; + ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, - max17042_thread_handler, - IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, + max17042_thread_handler, flags, chip->battery->desc->name, chip); if (!ret) { @@ -1053,10 +1071,13 @@ static int max17042_probe(struct i2c_client *client, max17042_set_soc_threshold(chip, 1); } else { client->irq = 0; - dev_err(&client->dev, "%s(): cannot get IRQ\n", - __func__); + if (ret != -EBUSY) + dev_err(&client->dev, "Failed to get IRQ\n"); } } + /* Not able to update the charge threshold when exceeded? -> disable */ + if (!client->irq) + regmap_write(chip->regmap, MAX17042_SALRT_Th, 0xff00); regmap_read(chip->regmap, MAX17042_STATUS, &val); if (val & STATUS_POR_BIT) { @@ -1104,6 +1125,14 @@ static int max17042_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(max17042_pm_ops, max17042_suspend, max17042_resume); +#ifdef CONFIG_ACPI +static const struct acpi_device_id max17042_acpi_match[] = { + { "MAX17047", MAXIM_DEVICE_TYPE_MAX17047 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, max17042_acpi_match); +#endif + #ifdef CONFIG_OF static const struct of_device_id max17042_dt_match[] = { { .compatible = "maxim,max17042" }, @@ -1125,6 +1154,7 @@ MODULE_DEVICE_TABLE(i2c, max17042_id); static struct i2c_driver max17042_i2c_driver = { .driver = { .name = "max17042", + .acpi_match_table = ACPI_PTR(max17042_acpi_match), .of_match_table = of_match_ptr(max17042_dt_match), .pm = &max17042_pm_ops, }, diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c new file mode 100644 index 000000000000..9ee601a03d9b --- /dev/null +++ b/drivers/power/supply/max1721x_battery.c @@ -0,0 +1,448 @@ +/* + * 1-Wire implementation for Maxim Semiconductor + * MAX7211/MAX17215 stanalone fuel gauge chip + * + * Copyright (C) 2017 Radioavionica Corporation + * Author: Alex A. Mihaylov + * + * Use consistent with the GNU GPL is permitted, + * provided that this copyright notice is + * preserved in its entirety in all copies and derived works. + * + */ + +#include +#include +#include +#include +#include + +#define W1_MAX1721X_FAMILY_ID 0x26 +#define DEF_DEV_NAME_MAX17211 "MAX17211" +#define DEF_DEV_NAME_MAX17215 "MAX17215" +#define DEF_DEV_NAME_UNKNOWN "UNKNOWN" +#define DEF_MFG_NAME "MAXIM" + +#define PSY_MAX_NAME_LEN 32 + +/* Number of valid register addresses in W1 mode */ +#define MAX1721X_MAX_REG_NR 0x1EF + +/* Factory settings (nonvilatile registers) (W1 specific) */ +#define MAX1721X_REG_NRSENSE 0x1CF /* RSense in 10^-5 Ohm */ +/* Strings */ +#define MAX1721X_REG_MFG_STR 0x1CC +#define MAX1721X_REG_MFG_NUMB 3 +#define MAX1721X_REG_DEV_STR 0x1DB +#define MAX1721X_REG_DEV_NUMB 5 +/* HEX Strings */ +#define MAX1721X_REG_SER_HEX 0x1D8 + +/* MAX172XX Output Registers for W1 chips */ +#define MAX172XX_REG_STATUS 0x000 /* status reg */ +#define MAX172XX_BAT_PRESENT (1<<4) /* battery connected bit */ +#define MAX172XX_REG_DEVNAME 0x021 /* chip config */ +#define MAX172XX_DEV_MASK 0x000F /* chip type mask */ +#define MAX172X1_DEV 0x0001 +#define MAX172X5_DEV 0x0005 +#define MAX172XX_REG_TEMP 0x008 /* Temperature */ +#define MAX172XX_REG_BATT 0x0DA /* Battery voltage */ +#define MAX172XX_REG_CURRENT 0x00A /* Actual current */ +#define MAX172XX_REG_AVGCURRENT 0x00B /* Average current */ +#define MAX172XX_REG_REPSOC 0x006 /* Percentage of charge */ +#define MAX172XX_REG_DESIGNCAP 0x018 /* Design capacity */ +#define MAX172XX_REG_REPCAP 0x005 /* Average capacity */ +#define MAX172XX_REG_TTE 0x011 /* Time to empty */ +#define MAX172XX_REG_TTF 0x020 /* Time to full */ + +struct max17211_device_info { + char name[PSY_MAX_NAME_LEN]; + struct power_supply *bat; + struct power_supply_desc bat_desc; + struct device *w1_dev; + struct regmap *regmap; + /* battery design format */ + unsigned int rsense; /* in tenths uOhm */ + char DeviceName[2 * MAX1721X_REG_DEV_NUMB + 1]; + char ManufacturerName[2 * MAX1721X_REG_MFG_NUMB + 1]; + char SerialNumber[13]; /* see get_sn_str() later for comment */ +}; + +/* Convert regs value to power_supply units */ + +static inline int max172xx_time_to_ps(unsigned int reg) +{ + return reg * 5625 / 1000; /* in sec. */ +} + +static inline int max172xx_percent_to_ps(unsigned int reg) +{ + return reg / 256; /* in percent from 0 to 100 */ +} + +static inline int max172xx_voltage_to_ps(unsigned int reg) +{ + return reg * 1250; /* in uV */ +} + +static inline int max172xx_capacity_to_ps(unsigned int reg) +{ + return reg * 500; /* in uAh */ +} + +/* + * Current and temperature is signed values, so unsigned regs + * value must be converted to signed type + */ + +static inline int max172xx_temperature_to_ps(unsigned int reg) +{ + int val = (int16_t)(reg); + + return val * 10 / 256; /* in tenths of deg. C */ +} + +/* + * Calculating current registers resolution: + * + * RSense stored in 10^-5 Ohm, so mesaurment voltage must be + * in 10^-11 Volts for get current in uA. + * 16 bit current reg fullscale +/-51.2mV is 102400 uV. + * So: 102400 / 65535 * 10^5 = 156252 + */ +static inline int max172xx_current_to_voltage(unsigned int reg) +{ + int val = (int16_t)(reg); + + return val * 156252; +} + + +static inline struct max17211_device_info * +to_device_info(struct power_supply *psy) +{ + return power_supply_get_drvdata(psy); +} + +static int max1721x_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct max17211_device_info *info = to_device_info(psy); + unsigned int reg = 0; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_PRESENT: + /* + * POWER_SUPPLY_PROP_PRESENT will always readable via + * sysfs interface. Value return 0 if battery not + * present or unaccesable via W1. + */ + val->intval = + regmap_read(info->regmap, MAX172XX_REG_STATUS, + ®) ? 0 : !(reg & MAX172XX_BAT_PRESENT); + break; + case POWER_SUPPLY_PROP_CAPACITY: + ret = regmap_read(info->regmap, MAX172XX_REG_REPSOC, ®); + val->intval = max172xx_percent_to_ps(reg); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = regmap_read(info->regmap, MAX172XX_REG_BATT, ®); + val->intval = max172xx_voltage_to_ps(reg); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + ret = regmap_read(info->regmap, MAX172XX_REG_DESIGNCAP, ®); + val->intval = max172xx_capacity_to_ps(reg); + break; + case POWER_SUPPLY_PROP_CHARGE_AVG: + ret = regmap_read(info->regmap, MAX172XX_REG_REPCAP, ®); + val->intval = max172xx_capacity_to_ps(reg); + break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: + ret = regmap_read(info->regmap, MAX172XX_REG_TTE, ®); + val->intval = max172xx_time_to_ps(reg); + break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: + ret = regmap_read(info->regmap, MAX172XX_REG_TTF, ®); + val->intval = max172xx_time_to_ps(reg); + break; + case POWER_SUPPLY_PROP_TEMP: + ret = regmap_read(info->regmap, MAX172XX_REG_TEMP, ®); + val->intval = max172xx_temperature_to_ps(reg); + break; + /* We need signed current, so must cast info->rsense to signed type */ + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = regmap_read(info->regmap, MAX172XX_REG_CURRENT, ®); + val->intval = + max172xx_current_to_voltage(reg) / (int)info->rsense; + break; + case POWER_SUPPLY_PROP_CURRENT_AVG: + ret = regmap_read(info->regmap, MAX172XX_REG_AVGCURRENT, ®); + val->intval = + max172xx_current_to_voltage(reg) / (int)info->rsense; + break; + /* + * Strings already received and inited by probe. + * We do dummy read for check battery still available. + */ + case POWER_SUPPLY_PROP_MODEL_NAME: + ret = regmap_read(info->regmap, MAX1721X_REG_DEV_STR, ®); + val->strval = info->DeviceName; + break; + case POWER_SUPPLY_PROP_MANUFACTURER: + ret = regmap_read(info->regmap, MAX1721X_REG_MFG_STR, ®); + val->strval = info->ManufacturerName; + break; + case POWER_SUPPLY_PROP_SERIAL_NUMBER: + ret = regmap_read(info->regmap, MAX1721X_REG_SER_HEX, ®); + val->strval = info->SerialNumber; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static enum power_supply_property max1721x_battery_props[] = { + /* int */ + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + /* strings */ + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + +static int get_string(struct max17211_device_info *info, + uint16_t reg, uint8_t nr, char *str) +{ + unsigned int val; + + if (!str || !(reg == MAX1721X_REG_MFG_STR || + reg == MAX1721X_REG_DEV_STR)) + return -EFAULT; + + while (nr--) { + if (regmap_read(info->regmap, reg++, &val)) + return -EFAULT; + *str++ = val>>8 & 0x00FF; + *str++ = val & 0x00FF; + } + return 0; +} + +/* Maxim say: Serial number is a hex string up to 12 hex characters */ +static int get_sn_string(struct max17211_device_info *info, char *str) +{ + unsigned int val[3]; + + if (!str) + return -EFAULT; + + if (regmap_read(info->regmap, MAX1721X_REG_SER_HEX, &val[0])) + return -EFAULT; + if (regmap_read(info->regmap, MAX1721X_REG_SER_HEX + 1, &val[1])) + return -EFAULT; + if (regmap_read(info->regmap, MAX1721X_REG_SER_HEX + 2, &val[2])) + return -EFAULT; + + snprintf(str, 13, "%04X%04X%04X", val[0], val[1], val[2]); + return 0; +} + +/* + * MAX1721x registers description for w1-regmap + */ +static const struct regmap_range max1721x_allow_range[] = { + regmap_reg_range(0, 0xDF), /* volatile data */ + regmap_reg_range(0x180, 0x1DF), /* non-volatile memory */ + regmap_reg_range(0x1E0, 0x1EF), /* non-volatile history (unused) */ +}; + +static const struct regmap_range max1721x_deny_range[] = { + /* volatile data unused registers */ + regmap_reg_range(0x24, 0x26), + regmap_reg_range(0x30, 0x31), + regmap_reg_range(0x33, 0x34), + regmap_reg_range(0x37, 0x37), + regmap_reg_range(0x3B, 0x3C), + regmap_reg_range(0x40, 0x41), + regmap_reg_range(0x43, 0x44), + regmap_reg_range(0x47, 0x49), + regmap_reg_range(0x4B, 0x4C), + regmap_reg_range(0x4E, 0xAF), + regmap_reg_range(0xB1, 0xB3), + regmap_reg_range(0xB5, 0xB7), + regmap_reg_range(0xBF, 0xD0), + regmap_reg_range(0xDB, 0xDB), + /* hole between volatile and non-volatile registers */ + regmap_reg_range(0xE0, 0x17F), +}; + +static const struct regmap_access_table max1721x_regs = { + .yes_ranges = max1721x_allow_range, + .n_yes_ranges = ARRAY_SIZE(max1721x_allow_range), + .no_ranges = max1721x_deny_range, + .n_no_ranges = ARRAY_SIZE(max1721x_deny_range), +}; + +/* + * Model Gauge M5 Algorithm output register + * Volatile data (must not be cached) + */ +static const struct regmap_range max1721x_volatile_allow[] = { + regmap_reg_range(0, 0xDF), +}; + +static const struct regmap_access_table max1721x_volatile_regs = { + .yes_ranges = max1721x_volatile_allow, + .n_yes_ranges = ARRAY_SIZE(max1721x_volatile_allow), +}; + +/* + * W1-regmap config + */ +static const struct regmap_config max1721x_regmap_w1_config = { + .reg_bits = 16, + .val_bits = 16, + .rd_table = &max1721x_regs, + .volatile_table = &max1721x_volatile_regs, + .max_register = MAX1721X_MAX_REG_NR, +}; + +static int devm_w1_max1721x_add_device(struct w1_slave *sl) +{ + struct power_supply_config psy_cfg = {}; + struct max17211_device_info *info; + + info = devm_kzalloc(&sl->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + sl->family_data = (void *)info; + info->w1_dev = &sl->dev; + + /* + * power_supply class battery name translated from W1 slave device + * unical ID (look like 26-0123456789AB) to "max1721x-0123456789AB\0" + * so, 26 (device family) correcpondent to max1721x devices. + * Device name still unical for any numbers connected devices. + */ + snprintf(info->name, sizeof(info->name), + "max1721x-%012X", (unsigned int)sl->reg_num.id); + info->bat_desc.name = info->name; + + /* + * FixMe: battery device name exceed max len for thermal_zone device + * name and translation to thermal_zone must be disabled. + */ + info->bat_desc.no_thermal = true; + info->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; + info->bat_desc.properties = max1721x_battery_props; + info->bat_desc.num_properties = ARRAY_SIZE(max1721x_battery_props); + info->bat_desc.get_property = max1721x_battery_get_property; + psy_cfg.drv_data = info; + + /* regmap init */ + info->regmap = devm_regmap_init_w1(info->w1_dev, + &max1721x_regmap_w1_config); + if (IS_ERR(info->regmap)) { + int err = PTR_ERR(info->regmap); + + dev_err(info->w1_dev, "Failed to allocate register map: %d\n", + err); + return err; + } + + /* rsense init */ + info->rsense = 0; + if (regmap_read(info->regmap, MAX1721X_REG_NRSENSE, &info->rsense)) { + dev_err(info->w1_dev, "Can't read RSense. Hardware error.\n"); + return -ENODEV; + } + + if (!info->rsense) { + dev_warn(info->w1_dev, "RSenese not calibrated, set 10 mOhms!\n"); + info->rsense = 1000; /* in regs in 10^-5 */ + } + dev_info(info->w1_dev, "RSense: %d mOhms.\n", info->rsense / 100); + + if (get_string(info, MAX1721X_REG_MFG_STR, + MAX1721X_REG_MFG_NUMB, info->ManufacturerName)) { + dev_err(info->w1_dev, "Can't read manufacturer. Hardware error.\n"); + return -ENODEV; + } + + if (!info->ManufacturerName[0]) + strncpy(info->ManufacturerName, DEF_MFG_NAME, + 2 * MAX1721X_REG_MFG_NUMB); + + if (get_string(info, MAX1721X_REG_DEV_STR, + MAX1721X_REG_DEV_NUMB, info->DeviceName)) { + dev_err(info->w1_dev, "Can't read device. Hardware error.\n"); + return -ENODEV; + } + if (!info->DeviceName[0]) { + unsigned int dev_name; + + if (regmap_read(info->regmap, + MAX172XX_REG_DEVNAME, &dev_name)) { + dev_err(info->w1_dev, "Can't read device name reg.\n"); + return -ENODEV; + } + + switch (dev_name & MAX172XX_DEV_MASK) { + case MAX172X1_DEV: + strncpy(info->DeviceName, DEF_DEV_NAME_MAX17211, + 2 * MAX1721X_REG_DEV_NUMB); + break; + case MAX172X5_DEV: + strncpy(info->DeviceName, DEF_DEV_NAME_MAX17215, + 2 * MAX1721X_REG_DEV_NUMB); + break; + default: + strncpy(info->DeviceName, DEF_DEV_NAME_UNKNOWN, + 2 * MAX1721X_REG_DEV_NUMB); + } + } + + if (get_sn_string(info, info->SerialNumber)) { + dev_err(info->w1_dev, "Can't read serial. Hardware error.\n"); + return -ENODEV; + } + + info->bat = devm_power_supply_register(&sl->dev, &info->bat_desc, + &psy_cfg); + if (IS_ERR(info->bat)) { + dev_err(info->w1_dev, "failed to register battery\n"); + return PTR_ERR(info->bat); + } + + return 0; +} + +static struct w1_family_ops w1_max1721x_fops = { + .add_slave = devm_w1_max1721x_add_device, +}; + +static struct w1_family w1_max1721x_family = { + .fid = W1_MAX1721X_FAMILY_ID, + .fops = &w1_max1721x_fops, +}; + +module_w1_family(w1_max1721x_family); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alex A. Mihaylov "); +MODULE_DESCRIPTION("Maxim MAX17211/MAX17215 Fuel Gauage IC driver"); +MODULE_ALIAS("w1-family-" __stringify(W1_MAX1721X_FAMILY_ID)); diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c index 9e29b1321648..3bc2eea7b3b7 100644 --- a/drivers/power/supply/olpc_battery.c +++ b/drivers/power/supply/olpc_battery.c @@ -535,7 +535,7 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute olpc_bat_eeprom = { +static const struct bin_attribute olpc_bat_eeprom = { .attr = { .name = "eeprom", .mode = S_IRUGO, @@ -559,7 +559,7 @@ static ssize_t olpc_bat_error_read(struct device *dev, return sprintf(buf, "%d\n", ec_byte); } -static struct device_attribute olpc_bat_error = { +static const struct device_attribute olpc_bat_error = { .attr = { .name = "error", .mode = S_IRUGO, diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c index b3c1873ad84d..1ad7ccce6075 100644 --- a/drivers/power/supply/pcf50633-charger.c +++ b/drivers/power/supply/pcf50633-charger.c @@ -254,7 +254,7 @@ static struct attribute *pcf50633_mbc_sysfs_entries[] = { NULL, }; -static struct attribute_group mbc_attr_group = { +static const struct attribute_group mbc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcf50633_mbc_sysfs_entries, }; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 540d3e0aa011..02c6340ae36f 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -259,18 +259,14 @@ static int power_supply_check_supplies(struct power_supply *psy) /* All supplies found, allocate char ** array for filling */ psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(psy->supplied_from), GFP_KERNEL); - if (!psy->supplied_from) { - dev_err(&psy->dev, "Couldn't allocate memory for supply list\n"); + if (!psy->supplied_from) return -ENOMEM; - } *psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(char *) * (cnt - 1), GFP_KERNEL); - if (!*psy->supplied_from) { - dev_err(&psy->dev, "Couldn't allocate memory for supply list\n"); + if (!*psy->supplied_from) return -ENOMEM; - } return power_supply_populate_supplied_from(psy); } @@ -314,11 +310,12 @@ static int __power_supply_am_i_supplied(struct device *dev, void *_data) struct power_supply *epsy = dev_get_drvdata(dev); struct psy_am_i_supplied_data *data = _data; - data->count++; - if (__power_supply_is_supplied_by(epsy, data->psy)) + if (__power_supply_is_supplied_by(epsy, data->psy)) { + data->count++; if (!epsy->desc->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, &ret)) return ret.intval; + } return 0; } @@ -374,6 +371,47 @@ int power_supply_is_system_supplied(void) } EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); +static int __power_supply_get_supplier_max_current(struct device *dev, + void *data) +{ + union power_supply_propval ret = {0,}; + struct power_supply *epsy = dev_get_drvdata(dev); + struct power_supply *psy = data; + + if (__power_supply_is_supplied_by(epsy, psy)) + if (!epsy->desc->get_property(epsy, + POWER_SUPPLY_PROP_CURRENT_MAX, + &ret)) + return ret.intval; + + return 0; +} + +int power_supply_set_input_current_limit_from_supplier(struct power_supply *psy) +{ + union power_supply_propval val = {0,}; + int curr; + + if (!psy->desc->set_property) + return -EINVAL; + + /* + * This function is not intended for use with a supply with multiple + * suppliers, we simply pick the first supply to report a non 0 + * max-current. + */ + curr = class_for_each_device(power_supply_class, NULL, psy, + __power_supply_get_supplier_max_current); + if (curr <= 0) + return (curr == 0) ? -ENODEV : curr; + + val.intval = curr; + + return psy->desc->set_property(psy, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val); +} +EXPORT_SYMBOL_GPL(power_supply_set_input_current_limit_from_supplier); + int power_supply_set_battery_charged(struct power_supply *psy) { if (atomic_read(&psy->use_cnt) >= 0 && diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index f7059459f0fb..b19a73176910 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -12,25 +12,21 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include -#include -#include +#include #include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include #include -#include - #include +#include +#include +#include enum { REG_MANUFACTURER_DATA, @@ -60,8 +56,8 @@ enum { #define BATTERY_MODE_OFFSET 0x03 #define BATTERY_MODE_MASK 0x8000 enum sbs_battery_mode { - BATTERY_MODE_AMPS, - BATTERY_MODE_WATTS + BATTERY_MODE_AMPS = 0, + BATTERY_MODE_WATTS = 0x8000 }; /* manufacturer access defines */ @@ -532,6 +528,8 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client, if (ret < 0) return ret; + usleep_range(1000, 2000); + return original_val & BATTERY_MODE_MASK; } diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index 9dff1b4b85fc..a5915f498eea 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig index 4b29a7182d7b..c6008f296605 100644 --- a/drivers/pps/Kconfig +++ b/drivers/pps/Kconfig @@ -19,9 +19,10 @@ menuconfig PPS To compile this driver as a module, choose M here: the module will be called pps_core.ko. +if PPS + config PPS_DEBUG bool "PPS debugging messages" - depends on PPS help Say Y here if you want the PPS support to produce a bunch of debug messages to the system log. Select this if you are having a @@ -29,7 +30,7 @@ config PPS_DEBUG config NTP_PPS bool "PPS kernel consumer support" - depends on PPS && !NO_HZ_COMMON + depends on !NO_HZ_COMMON help This option adds support for direct in-kernel time synchronization using an external PPS signal. @@ -39,3 +40,5 @@ config NTP_PPS source drivers/pps/clients/Kconfig source drivers/pps/generators/Kconfig + +endif # PPS diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig index efec021ce662..7f02a9b1a1fd 100644 --- a/drivers/pps/clients/Kconfig +++ b/drivers/pps/clients/Kconfig @@ -3,11 +3,9 @@ # comment "PPS clients support" - depends on PPS config PPS_CLIENT_KTIMER tristate "Kernel timer client (Testing client, use for debug)" - depends on PPS help If you say yes here you get support for a PPS debugging client which uses a kernel timer to generate the PPS signal. @@ -17,21 +15,20 @@ config PPS_CLIENT_KTIMER config PPS_CLIENT_LDISC tristate "PPS line discipline" - depends on PPS && TTY + depends on TTY help If you say yes here you get support for a PPS source connected with the CD (Carrier Detect) pin of your serial port. config PPS_CLIENT_PARPORT tristate "Parallel port PPS client" - depends on PPS && PARPORT + depends on PARPORT help If you say yes here you get support for a PPS source connected with the interrupt pin of your parallel port. config PPS_CLIENT_GPIO tristate "PPS client using GPIO" - depends on PPS help If you say yes here you get support for a PPS source using GPIO. To be useful you must also register a platform device diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig index 86b59378e71f..e4c4f3dc0728 100644 --- a/drivers/pps/generators/Kconfig +++ b/drivers/pps/generators/Kconfig @@ -3,11 +3,10 @@ # comment "PPS generators support" - depends on PPS config PPS_GENERATOR_PARPORT tristate "Parallel port PPS signal generator" - depends on PPS && PARPORT && BROKEN + depends on PARPORT && BROKEN help If you say yes here you get support for a PPS signal generator which utilizes STROBE pin of a parallel port to send PPS signals. It uses diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index faf6f7a83713..6edd3b9c7f01 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -221,7 +221,7 @@ static int ptp_dte_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_dte_caps = { +static const struct ptp_clock_info ptp_dte_caps = { .owner = THIS_MODULE, .name = "DTE PTP timer", .max_adj = 50000000, diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c index 344a3bac210b..1171ffd210b3 100644 --- a/drivers/ptp/ptp_ixp46x.c +++ b/drivers/ptp/ptp_ixp46x.c @@ -236,7 +236,7 @@ static int ptp_ixp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_ixp_caps = { +static const struct ptp_clock_info ptp_ixp_caps = { .owner = THIS_MODULE, .name = "IXP46X timer", .max_adj = 66666655, diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c index bb865695d7a6..2b1b212c219e 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm.c @@ -150,7 +150,7 @@ static int ptp_kvm_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_kvm_caps = { +static const struct ptp_clock_info ptp_kvm_caps = { .owner = THIS_MODULE, .name = "KVM virtual PTP", .max_adj = 0, diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index 3aa22ae4d94c..b3285175f20f 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -509,7 +509,7 @@ static int ptp_pch_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_pch_caps = { +static const struct ptp_clock_info ptp_pch_caps = { .owner = THIS_MODULE, .name = "PCH timer", .max_adj = 50000000, diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 313c10789ca2..763ee50ea57d 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -300,7 +300,7 @@ config PWM_MEDIATEK Generic PWM framework driver for Mediatek ARM SoC. To compile this driver as a module, choose M here: the module - will be called pwm-mxs. + will be called pwm-mediatek. config PWM_MXS tristate "Freescale MXS PWM support" @@ -417,6 +417,16 @@ config PWM_STM32 To compile this driver as a module, choose M here: the module will be called pwm-stm32. +config PWM_STM32_LP + tristate "STMicroelectronics STM32 PWM LP" + depends on MFD_STM32_LPTIMER || COMPILE_TEST + help + Generic PWM framework driver for STMicroelectronics STM32 SoCs + with Low-Power Timer (LPTIM). + + To compile this driver as a module, choose M here: the module + will be called pwm-stm32-lp. + config PWM_STMPE bool "STMPE expander PWM export" depends on MFD_STMPE @@ -446,7 +456,7 @@ config PWM_TEGRA config PWM_TIECAP tristate "ECAP PWM support" - depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX + depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE help PWM driver support for the ECAP APWM controller found on AM33XX TI SOC @@ -500,4 +510,13 @@ config PWM_VT8500 To compile this driver as a module, choose M here: the module will be called pwm-vt8500. +config PWM_ZX + tristate "ZTE ZX PWM support" + depends on ARCH_ZX + help + Generic PWM framework driver for ZTE ZX family SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-zx. + endif diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 93da1f79a3b8..ebefba5f528b 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_STI) += pwm-sti.o obj-$(CONFIG_PWM_STM32) += pwm-stm32.o +obj-$(CONFIG_PWM_STM32_LP) += pwm-stm32-lp.o obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o @@ -49,3 +50,4 @@ obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o obj-$(CONFIG_PWM_TWL) += pwm-twl.o obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o +obj-$(CONFIG_PWM_ZX) += pwm-zx.o diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c index c5dbf16d810b..db001cba937f 100644 --- a/drivers/pwm/pwm-bcm2835.c +++ b/drivers/pwm/pwm-bcm2835.c @@ -167,6 +167,8 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &bcm2835_pwm_ops; pc->chip.npwm = 2; + pc->chip.of_xlate = of_pwm_xlate_with_flags; + pc->chip.of_pwm_n_cells = 3; platform_set_drvdata(pdev, pc); diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index 8dadc58d6cdf..27c107e78d59 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -208,7 +208,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev) if (ret < 0) return ret; - pwm_chip->rstc = devm_reset_control_get(&pdev->dev, NULL); + pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(pwm_chip->rstc)) { clk_disable_unprepare(pwm_chip->clk); return PTR_ERR(pwm_chip->rstc); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 5c11bc708a3c..b52f3afb2ba1 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -2,6 +2,7 @@ * Mediatek Pulse Width Modulator driver * * Copyright (C) 2015 John Crispin + * Copyright (C) 2017 Zhi Mao * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -29,6 +30,8 @@ #define PWMDWIDTH 0x2c #define PWMTHRES 0x30 +#define PWM_CLK_DIV_MAX 7 + enum { MTK_CLK_MAIN = 0, MTK_CLK_TOP, @@ -61,6 +64,42 @@ static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip) return container_of(chip, struct mtk_pwm_chip, chip); } +static int mtk_pwm_clk_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); + int ret; + + ret = clk_prepare_enable(pc->clks[MTK_CLK_TOP]); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(pc->clks[MTK_CLK_MAIN]); + if (ret < 0) + goto disable_clk_top; + + ret = clk_prepare_enable(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); + if (ret < 0) + goto disable_clk_main; + + return 0; + +disable_clk_main: + clk_disable_unprepare(pc->clks[MTK_CLK_MAIN]); +disable_clk_top: + clk_disable_unprepare(pc->clks[MTK_CLK_TOP]); + + return ret; +} + +static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); + + clk_disable_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); + clk_disable_unprepare(pc->clks[MTK_CLK_MAIN]); + clk_disable_unprepare(pc->clks[MTK_CLK_TOP]); +} + static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num, unsigned int offset) { @@ -80,6 +119,11 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]; u32 resolution, clkdiv = 0; + int ret; + + ret = mtk_pwm_clk_enable(chip, pwm); + if (ret < 0) + return ret; resolution = NSEC_PER_SEC / clk_get_rate(clk); @@ -88,13 +132,18 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, clkdiv++; } - if (clkdiv > 7) + if (clkdiv > PWM_CLK_DIV_MAX) { + mtk_pwm_clk_disable(chip, pwm); + dev_err(chip->dev, "period %d not supported\n", period_ns); return -EINVAL; + } - mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | BIT(3) | clkdiv); + mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv); mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution); mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution); + mtk_pwm_clk_disable(chip, pwm); + return 0; } @@ -104,7 +153,7 @@ static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) u32 value; int ret; - ret = clk_prepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); + ret = mtk_pwm_clk_enable(chip, pwm); if (ret < 0) return ret; @@ -124,7 +173,7 @@ static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) value &= ~BIT(pwm->hwpwm); writel(value, pc->regs); - clk_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]); + mtk_pwm_clk_disable(chip, pwm); } static const struct pwm_ops mtk_pwm_ops = { @@ -156,14 +205,6 @@ static int mtk_pwm_probe(struct platform_device *pdev) return PTR_ERR(pc->clks[i]); } - ret = clk_prepare(pc->clks[MTK_CLK_TOP]); - if (ret < 0) - return ret; - - ret = clk_prepare(pc->clks[MTK_CLK_MAIN]); - if (ret < 0) - goto disable_clk_top; - platform_set_drvdata(pdev, pc); pc->chip.dev = &pdev->dev; @@ -174,26 +215,15 @@ static int mtk_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&pc->chip); if (ret < 0) { dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); - goto disable_clk_main; + return ret; } return 0; - -disable_clk_main: - clk_unprepare(pc->clks[MTK_CLK_MAIN]); -disable_clk_top: - clk_unprepare(pc->clks[MTK_CLK_TOP]); - - return ret; } static int mtk_pwm_remove(struct platform_device *pdev) { struct mtk_pwm_chip *pc = platform_get_drvdata(pdev); - unsigned int i; - - for (i = 0; i < pc->chip.npwm; i++) - pwm_disable(&pc->chip.pwms[i]); return pwmchip_remove(&pc->chip); } diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index cb845edfe2b4..d589331d1884 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -441,7 +441,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, for (i = 0; i < meson->chip.npwm; i++) { struct meson_pwm_channel *channel = &channels[i]; - snprintf(name, sizeof(name), "%s#mux%u", np->full_name, i); + snprintf(name, sizeof(name), "%pOF#mux%u", np, i); init.name = name; init.ops = &clk_mux_ops; diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 5f55cfab9b1c..a7eaf962a95b 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -241,11 +241,11 @@ static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) } #endif -static void pca9685_set_sleep_mode(struct pca9685 *pca, int sleep) +static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable) { regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_SLEEP, sleep ? MODE1_SLEEP : 0); - if (!sleep) { + MODE1_SLEEP, enable ? MODE1_SLEEP : 0); + if (!enable) { /* Wait 500us for the oscillator to be back up */ udelay(500); } @@ -272,13 +272,13 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * state is guaranteed active here. */ /* Put chip into sleep mode */ - pca9685_set_sleep_mode(pca, 1); + pca9685_set_sleep_mode(pca, true); /* Change the chip-wide output frequency */ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale); /* Wake the chip up */ - pca9685_set_sleep_mode(pca, 0); + pca9685_set_sleep_mode(pca, false); pca->period_ns = period_ns; } else { @@ -534,7 +534,7 @@ static int pca9685_pwm_runtime_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct pca9685 *pca = i2c_get_clientdata(client); - pca9685_set_sleep_mode(pca, 1); + pca9685_set_sleep_mode(pca, true); return 0; } @@ -543,7 +543,7 @@ static int pca9685_pwm_runtime_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct pca9685 *pca = i2c_get_clientdata(client); - pca9685_set_sleep_mode(pca, 0); + pca9685_set_sleep_mode(pca, false); return 0; } #endif diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 075c1a764ba2..29267d12fb4c 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -455,7 +455,6 @@ static const struct of_device_id tpu_of_table[] = { { .compatible = "renesas,tpu-r8a73a4", }, { .compatible = "renesas,tpu-r8a7740", }, { .compatible = "renesas,tpu-r8a7790", }, - { .compatible = "renesas,tpu-sh7372", }, { .compatible = "renesas,tpu", }, { }, }; diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 744d56197286..4d99d468df09 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -27,12 +27,15 @@ #define PWM_DUTY_NEGATIVE (0 << 3) #define PWM_INACTIVE_NEGATIVE (0 << 4) #define PWM_INACTIVE_POSITIVE (1 << 4) +#define PWM_POLARITY_MASK (PWM_DUTY_POSITIVE | PWM_INACTIVE_POSITIVE) #define PWM_OUTPUT_LEFT (0 << 5) +#define PWM_LOCK_EN (1 << 6) #define PWM_LP_DISABLE (0 << 8) struct rockchip_pwm_chip { struct pwm_chip chip; struct clk *clk; + struct clk *pclk; const struct rockchip_pwm_data *data; void __iomem *base; }; @@ -48,13 +51,8 @@ struct rockchip_pwm_data { struct rockchip_pwm_regs regs; unsigned int prescaler; bool supports_polarity; - const struct pwm_ops *ops; - - void (*set_enable)(struct pwm_chip *chip, - struct pwm_device *pwm, bool enable, - enum pwm_polarity polarity); - void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state); + bool supports_lock; + u32 enable_conf; }; static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) @@ -62,90 +60,18 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) return container_of(c, struct rockchip_pwm_chip, chip); } -static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, - struct pwm_device *pwm, bool enable, - enum pwm_polarity polarity) -{ - struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; - u32 val; - - val = readl_relaxed(pc->base + pc->data->regs.ctrl); - - if (enable) - val |= enable_conf; - else - val &= ~enable_conf; - - writel_relaxed(val, pc->base + pc->data->regs.ctrl); -} - -static void rockchip_pwm_get_state_v1(struct pwm_chip *chip, - struct pwm_device *pwm, - struct pwm_state *state) -{ - struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; - u32 val; - - val = readl_relaxed(pc->base + pc->data->regs.ctrl); - if ((val & enable_conf) == enable_conf) - state->enabled = true; -} - -static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, - struct pwm_device *pwm, bool enable, - enum pwm_polarity polarity) -{ - struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | - PWM_CONTINUOUS; - u32 val; - - if (polarity == PWM_POLARITY_INVERSED) - enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE; - else - enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE; - - val = readl_relaxed(pc->base + pc->data->regs.ctrl); - - if (enable) - val |= enable_conf; - else - val &= ~enable_conf; - - writel_relaxed(val, pc->base + pc->data->regs.ctrl); -} - -static void rockchip_pwm_get_state_v2(struct pwm_chip *chip, - struct pwm_device *pwm, - struct pwm_state *state) -{ - struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | - PWM_CONTINUOUS; - u32 val; - - val = readl_relaxed(pc->base + pc->data->regs.ctrl); - if ((val & enable_conf) != enable_conf) - return; - - state->enabled = true; - - if (!(val & PWM_DUTY_POSITIVE)) - state->polarity = PWM_POLARITY_INVERSED; -} - static void rockchip_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + u32 enable_conf = pc->data->enable_conf; unsigned long clk_rate; u64 tmp; + u32 val; int ret; - ret = clk_enable(pc->clk); + ret = clk_enable(pc->pclk); if (ret) return; @@ -157,19 +83,31 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip, tmp = readl_relaxed(pc->base + pc->data->regs.duty); tmp *= pc->data->prescaler * NSEC_PER_SEC; - state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); - pc->data->get_state(chip, pwm, state); + val = readl_relaxed(pc->base + pc->data->regs.ctrl); + if (pc->data->supports_polarity) + state->enabled = ((val & enable_conf) != enable_conf) ? + false : true; + else + state->enabled = ((val & enable_conf) == enable_conf) ? + true : false; - clk_disable(pc->clk); + if (pc->data->supports_polarity) { + if (!(val & PWM_DUTY_POSITIVE)) + state->polarity = PWM_POLARITY_INVERSED; + } + + clk_disable(pc->pclk); } -static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); unsigned long period, duty; u64 clk_rate, div; + u32 ctrl; clk_rate = clk_get_rate(pc->clk); @@ -178,26 +116,53 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * bits, every possible input period can be obtained using the * default prescaler value for all practical clock rate values. */ - div = clk_rate * period_ns; + div = clk_rate * state->period; period = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC); - div = clk_rate * duty_ns; + div = clk_rate * state->duty_cycle; duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC); + /* + * Lock the period and duty of previous configuration, then + * change the duty and period, that would not be effective. + */ + ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl); + if (pc->data->supports_lock) { + ctrl |= PWM_LOCK_EN; + writel_relaxed(ctrl, pc->base + pc->data->regs.ctrl); + } + writel(period, pc->base + pc->data->regs.period); writel(duty, pc->base + pc->data->regs.duty); - return 0; + if (pc->data->supports_polarity) { + ctrl &= ~PWM_POLARITY_MASK; + if (state->polarity == PWM_POLARITY_INVERSED) + ctrl |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE; + else + ctrl |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE; + } + + /* + * Unlock and set polarity at the same time, + * the configuration of duty, period and polarity + * would be effective together at next period. + */ + if (pc->data->supports_lock) + ctrl &= ~PWM_LOCK_EN; + + writel(ctrl, pc->base + pc->data->regs.ctrl); } static int rockchip_pwm_enable(struct pwm_chip *chip, - struct pwm_device *pwm, - bool enable, - enum pwm_polarity polarity) + struct pwm_device *pwm, + bool enable) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + u32 enable_conf = pc->data->enable_conf; int ret; + u32 val; if (enable) { ret = clk_enable(pc->clk); @@ -205,7 +170,14 @@ static int rockchip_pwm_enable(struct pwm_chip *chip, return ret; } - pc->data->set_enable(chip, pwm, enable, polarity); + val = readl_relaxed(pc->base + pc->data->regs.ctrl); + + if (enable) + val |= enable_conf; + else + val &= ~enable_conf; + + writel_relaxed(val, pc->base + pc->data->regs.ctrl); if (!enable) clk_disable(pc->clk); @@ -219,33 +191,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); struct pwm_state curstate; bool enabled; - int ret; + int ret = 0; + + ret = clk_enable(pc->pclk); + if (ret) + return ret; pwm_get_state(pwm, &curstate); enabled = curstate.enabled; - ret = clk_enable(pc->clk); - if (ret) - return ret; - - if (state->polarity != curstate.polarity && enabled) { - ret = rockchip_pwm_enable(chip, pwm, false, state->polarity); + if (state->polarity != curstate.polarity && enabled && + !pc->data->supports_lock) { + ret = rockchip_pwm_enable(chip, pwm, false); if (ret) goto out; enabled = false; } - ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (ret) { - if (enabled != curstate.enabled) - rockchip_pwm_enable(chip, pwm, !enabled, - state->polarity); - goto out; - } - + rockchip_pwm_config(chip, pwm, state); if (state->enabled != enabled) { - ret = rockchip_pwm_enable(chip, pwm, state->enabled, - state->polarity); + ret = rockchip_pwm_enable(chip, pwm, state->enabled); if (ret) goto out; } @@ -257,18 +222,12 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, rockchip_pwm_get_state(chip, pwm, state); out: - clk_disable(pc->clk); + clk_disable(pc->pclk); return ret; } -static const struct pwm_ops rockchip_pwm_ops_v1 = { - .get_state = rockchip_pwm_get_state, - .apply = rockchip_pwm_apply, - .owner = THIS_MODULE, -}; - -static const struct pwm_ops rockchip_pwm_ops_v2 = { +static const struct pwm_ops rockchip_pwm_ops = { .get_state = rockchip_pwm_get_state, .apply = rockchip_pwm_apply, .owner = THIS_MODULE, @@ -282,9 +241,9 @@ static const struct rockchip_pwm_data pwm_data_v1 = { .ctrl = 0x0c, }, .prescaler = 2, - .ops = &rockchip_pwm_ops_v1, - .set_enable = rockchip_pwm_set_enable_v1, - .get_state = rockchip_pwm_get_state_v1, + .supports_polarity = false, + .supports_lock = false, + .enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN, }; static const struct rockchip_pwm_data pwm_data_v2 = { @@ -296,9 +255,9 @@ static const struct rockchip_pwm_data pwm_data_v2 = { }, .prescaler = 1, .supports_polarity = true, - .ops = &rockchip_pwm_ops_v2, - .set_enable = rockchip_pwm_set_enable_v2, - .get_state = rockchip_pwm_get_state_v2, + .supports_lock = false, + .enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | + PWM_CONTINUOUS, }; static const struct rockchip_pwm_data pwm_data_vop = { @@ -310,15 +269,30 @@ static const struct rockchip_pwm_data pwm_data_vop = { }, .prescaler = 1, .supports_polarity = true, - .ops = &rockchip_pwm_ops_v2, - .set_enable = rockchip_pwm_set_enable_v2, - .get_state = rockchip_pwm_get_state_v2, + .supports_lock = false, + .enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | + PWM_CONTINUOUS, +}; + +static const struct rockchip_pwm_data pwm_data_v3 = { + .regs = { + .duty = 0x08, + .period = 0x04, + .cntr = 0x00, + .ctrl = 0x0c, + }, + .prescaler = 1, + .supports_polarity = true, + .supports_lock = true, + .enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | + PWM_CONTINUOUS, }; static const struct of_device_id rockchip_pwm_dt_ids[] = { { .compatible = "rockchip,rk2928-pwm", .data = &pwm_data_v1}, { .compatible = "rockchip,rk3288-pwm", .data = &pwm_data_v2}, { .compatible = "rockchip,vop-pwm", .data = &pwm_data_vop}, + { .compatible = "rockchip,rk3328-pwm", .data = &pwm_data_v3}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids); @@ -328,7 +302,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev) const struct of_device_id *id; struct rockchip_pwm_chip *pc; struct resource *r; - int ret; + int ret, count; id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev); if (!id) @@ -343,19 +317,49 @@ static int rockchip_pwm_probe(struct platform_device *pdev) if (IS_ERR(pc->base)) return PTR_ERR(pc->base); - pc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pc->clk)) - return PTR_ERR(pc->clk); + pc->clk = devm_clk_get(&pdev->dev, "pwm"); + if (IS_ERR(pc->clk)) { + pc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pc->clk)) { + ret = PTR_ERR(pc->clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Can't get bus clk: %d\n", + ret); + return ret; + } + } + + count = of_count_phandle_with_args(pdev->dev.of_node, + "clocks", "#clock-cells"); + if (count == 2) + pc->pclk = devm_clk_get(&pdev->dev, "pclk"); + else + pc->pclk = pc->clk; + + if (IS_ERR(pc->pclk)) { + ret = PTR_ERR(pc->pclk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Can't get APB clk: %d\n", ret); + return ret; + } ret = clk_prepare_enable(pc->clk); - if (ret) + if (ret) { + dev_err(&pdev->dev, "Can't prepare enable bus clk: %d\n", ret); return ret; + } + + ret = clk_prepare(pc->pclk); + if (ret) { + dev_err(&pdev->dev, "Can't prepare APB clk: %d\n", ret); + goto err_clk; + } platform_set_drvdata(pdev, pc); pc->data = id->data; pc->chip.dev = &pdev->dev; - pc->chip.ops = pc->data->ops; + pc->chip.ops = &rockchip_pwm_ops; pc->chip.base = -1; pc->chip.npwm = 1; @@ -368,12 +372,20 @@ static int rockchip_pwm_probe(struct platform_device *pdev) if (ret < 0) { clk_unprepare(pc->clk); dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + goto err_pclk; } /* Keep the PWM clk enabled if the PWM appears to be up and running. */ if (!pwm_is_enabled(pc->chip.pwms)) clk_disable(pc->clk); + return 0; + +err_pclk: + clk_unprepare(pc->pclk); +err_clk: + clk_disable_unprepare(pc->clk); + return ret; } @@ -395,6 +407,7 @@ static int rockchip_pwm_remove(struct platform_device *pdev) if (pwm_is_enabled(pc->chip.pwms)) clk_disable(pc->clk); + clk_unprepare(pc->pclk); clk_unprepare(pc->clk); return pwmchip_remove(&pc->chip); diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index f113cda47032..062f2cfc45ec 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -3,6 +3,7 @@ * Copyright (c) 2008 Simtec Electronics * Ben Dooks , * Copyright (c) 2013 Tomasz Figa + * Copyright (c) 2017 Samsung Electronics Co., Ltd. * * PWM driver for Samsung SoCs * @@ -74,6 +75,7 @@ struct samsung_pwm_channel { * @chip: generic PWM chip * @variant: local copy of hardware variant data * @inverter_mask: inverter status for all channels - one bit per channel + * @disabled_mask: disabled status for all channels - one bit per channel * @base: base address of mapped PWM registers * @base_clk: base clock used to drive the timers * @tclk0: external clock 0 (can be ERR_PTR if not present) @@ -83,6 +85,7 @@ struct samsung_pwm_chip { struct pwm_chip chip; struct samsung_pwm_variant variant; u8 inverter_mask; + u8 disabled_mask; void __iomem *base; struct clk *base_clk; @@ -257,6 +260,8 @@ static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm) tcon |= TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan); writel(tcon, our_chip->base + REG_TCON); + our_chip->disabled_mask &= ~BIT(pwm->hwpwm); + spin_unlock_irqrestore(&samsung_pwm_lock, flags); return 0; @@ -275,6 +280,8 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm) tcon &= ~TCON_AUTORELOAD(tcon_chan); writel(tcon, our_chip->base + REG_TCON); + our_chip->disabled_mask |= BIT(pwm->hwpwm); + spin_unlock_irqrestore(&samsung_pwm_lock, flags); } @@ -297,8 +304,8 @@ static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip, spin_unlock_irqrestore(&samsung_pwm_lock, flags); } -static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns, bool force_period) { struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip); struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm); @@ -312,9 +319,6 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, if (period_ns > NSEC_PER_SEC) return -ERANGE; - if (period_ns == chan->period_ns && duty_ns == chan->duty_ns) - return 0; - tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm)); oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm)); @@ -322,7 +326,7 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, ++tcnt; /* Check to see if we are changing the clock rate of the PWM. */ - if (chan->period_ns != period_ns) { + if (chan->period_ns != period_ns || force_period) { unsigned long tin_rate; u32 period; @@ -381,6 +385,12 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + return __pwm_samsung_config(chip, pwm, duty_ns, period_ns, false); +} + static void pwm_samsung_set_invert(struct samsung_pwm_chip *chip, unsigned int channel, bool invert) { @@ -592,51 +602,41 @@ static int pwm_samsung_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int pwm_samsung_suspend(struct device *dev) +static int pwm_samsung_resume(struct device *dev) { - struct samsung_pwm_chip *chip = dev_get_drvdata(dev); + struct samsung_pwm_chip *our_chip = dev_get_drvdata(dev); + struct pwm_chip *chip = &our_chip->chip; unsigned int i; - /* - * No one preserves these values during suspend so reset them. - * Otherwise driver leaves PWM unconfigured if same values are - * passed to pwm_config() next time. - */ - for (i = 0; i < SAMSUNG_PWM_NUM; ++i) { - struct pwm_device *pwm = &chip->chip.pwms[i]; + for (i = 0; i < SAMSUNG_PWM_NUM; i++) { + struct pwm_device *pwm = &chip->pwms[i]; struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm); if (!chan) continue; - chan->period_ns = 0; - chan->duty_ns = 0; - } + if (our_chip->variant.output_mask & BIT(i)) + pwm_samsung_set_invert(our_chip, i, + our_chip->inverter_mask & BIT(i)); - return 0; -} + if (chan->period_ns) { + __pwm_samsung_config(chip, pwm, chan->duty_ns, + chan->period_ns, true); + /* needed to make PWM disable work on Odroid-XU3 */ + pwm_samsung_manual_update(our_chip, pwm); + } -static int pwm_samsung_resume(struct device *dev) -{ - struct samsung_pwm_chip *chip = dev_get_drvdata(dev); - unsigned int chan; - - /* - * Inverter setting must be preserved across suspend/resume - * as nobody really seems to configure it more than once. - */ - for (chan = 0; chan < SAMSUNG_PWM_NUM; ++chan) { - if (chip->variant.output_mask & BIT(chan)) - pwm_samsung_set_invert(chip, chan, - chip->inverter_mask & BIT(chan)); + if (our_chip->disabled_mask & BIT(i)) + pwm_samsung_disable(chip, pwm); + else + pwm_samsung_enable(chip, pwm); } return 0; } #endif -static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, pwm_samsung_suspend, - pwm_samsung_resume); +static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, NULL, pwm_samsung_resume); static struct platform_driver pwm_samsung_driver = { .driver = { diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c new file mode 100644 index 000000000000..9793b296108f --- /dev/null +++ b/drivers/pwm/pwm-stm32-lp.c @@ -0,0 +1,246 @@ +/* + * STM32 Low-Power Timer PWM driver + * + * Copyright (C) STMicroelectronics 2017 + * + * Author: Gerald Baeza + * + * License terms: GNU General Public License (GPL), version 2 + * + * Inspired by Gerald Baeza's pwm-stm32 driver + */ + +#include +#include +#include +#include +#include +#include + +struct stm32_pwm_lp { + struct pwm_chip chip; + struct clk *clk; + struct regmap *regmap; +}; + +static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip) +{ + return container_of(chip, struct stm32_pwm_lp, chip); +} + +/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */ +#define STM32_LPTIM_MAX_PRESCALER 128 + +static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip); + unsigned long long prd, div, dty; + struct pwm_state cstate; + u32 val, mask, cfgr, presc = 0; + bool reenable; + int ret; + + pwm_get_state(pwm, &cstate); + reenable = !cstate.enabled; + + if (!state->enabled) { + if (cstate.enabled) { + /* Disable LP timer */ + ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + if (ret) + return ret; + /* disable clock to PWM counter */ + clk_disable(priv->clk); + } + return 0; + } + + /* Calculate the period and prescaler value */ + div = (unsigned long long)clk_get_rate(priv->clk) * state->period; + do_div(div, NSEC_PER_SEC); + prd = div; + while (div > STM32_LPTIM_MAX_ARR) { + presc++; + if ((1 << presc) > STM32_LPTIM_MAX_PRESCALER) { + dev_err(priv->chip.dev, "max prescaler exceeded\n"); + return -EINVAL; + } + div = prd >> presc; + } + prd = div; + + /* Calculate the duty cycle */ + dty = prd * state->duty_cycle; + do_div(dty, state->period); + + if (!cstate.enabled) { + /* enable clock to drive PWM counter */ + ret = clk_enable(priv->clk); + if (ret) + return ret; + } + + ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr); + if (ret) + goto err; + + if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) || + (FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity)) { + val = FIELD_PREP(STM32_LPTIM_PRESC, presc); + val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity); + mask = STM32_LPTIM_PRESC | STM32_LPTIM_WAVPOL; + + /* Must disable LP timer to modify CFGR */ + reenable = true; + ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + if (ret) + goto err; + + ret = regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, + val); + if (ret) + goto err; + } + + if (reenable) { + /* Must (re)enable LP timer to modify CMP & ARR */ + ret = regmap_write(priv->regmap, STM32_LPTIM_CR, + STM32_LPTIM_ENABLE); + if (ret) + goto err; + } + + ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, prd - 1); + if (ret) + goto err; + + ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, prd - (1 + dty)); + if (ret) + goto err; + + /* ensure CMP & ARR registers are properly written */ + ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, + (val & STM32_LPTIM_CMPOK_ARROK), + 100, 1000); + if (ret) { + dev_err(priv->chip.dev, "ARR/CMP registers write issue\n"); + goto err; + } + ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, + STM32_LPTIM_CMPOKCF_ARROKCF); + if (ret) + goto err; + + if (reenable) { + /* Start LP timer in continuous mode */ + ret = regmap_update_bits(priv->regmap, STM32_LPTIM_CR, + STM32_LPTIM_CNTSTRT, + STM32_LPTIM_CNTSTRT); + if (ret) { + regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + goto err; + } + } + + return 0; +err: + if (!cstate.enabled) + clk_disable(priv->clk); + + return ret; +} + +static void stm32_pwm_lp_get_state(struct pwm_chip *chip, + struct pwm_device *pwm, + struct pwm_state *state) +{ + struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip); + unsigned long rate = clk_get_rate(priv->clk); + u32 val, presc, prd; + u64 tmp; + + regmap_read(priv->regmap, STM32_LPTIM_CR, &val); + state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val); + /* Keep PWM counter clock refcount in sync with PWM initial state */ + if (state->enabled) + clk_enable(priv->clk); + + regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val); + presc = FIELD_GET(STM32_LPTIM_PRESC, val); + state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val); + + regmap_read(priv->regmap, STM32_LPTIM_ARR, &prd); + tmp = prd + 1; + tmp = (tmp << presc) * NSEC_PER_SEC; + state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate); + + regmap_read(priv->regmap, STM32_LPTIM_CMP, &val); + tmp = prd - val; + tmp = (tmp << presc) * NSEC_PER_SEC; + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate); +} + +static const struct pwm_ops stm32_pwm_lp_ops = { + .owner = THIS_MODULE, + .apply = stm32_pwm_lp_apply, + .get_state = stm32_pwm_lp_get_state, +}; + +static int stm32_pwm_lp_probe(struct platform_device *pdev) +{ + struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); + struct stm32_pwm_lp *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regmap = ddata->regmap; + priv->clk = ddata->clk; + priv->chip.base = -1; + priv->chip.dev = &pdev->dev; + priv->chip.ops = &stm32_pwm_lp_ops; + priv->chip.npwm = 1; + + ret = pwmchip_add(&priv->chip); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, priv); + + return 0; +} + +static int stm32_pwm_lp_remove(struct platform_device *pdev) +{ + struct stm32_pwm_lp *priv = platform_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < priv->chip.npwm; i++) + if (pwm_is_enabled(&priv->chip.pwms[i])) + pwm_disable(&priv->chip.pwms[i]); + + return pwmchip_remove(&priv->chip); +} + +static const struct of_device_id stm32_pwm_lp_of_match[] = { + { .compatible = "st,stm32-pwm-lp", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_pwm_lp_of_match); + +static struct platform_driver stm32_pwm_lp_driver = { + .probe = stm32_pwm_lp_probe, + .remove = stm32_pwm_lp_remove, + .driver = { + .name = "stm32-pwm-lp", + .of_match_table = of_match_ptr(stm32_pwm_lp_of_match), + }, +}; +module_platform_driver(stm32_pwm_lp_driver); + +MODULE_ALIAS("platform:stm32-pwm-lp"); +MODULE_DESCRIPTION("STMicroelectronics STM32 PWM LP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index e9b33f09ff09..f8ebbece57b7 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -218,7 +218,7 @@ static int tegra_pwm_probe(struct platform_device *pdev) */ pwm->clk_rate = clk_get_rate(pwm->clk); - pwm->rst = devm_reset_control_get(&pdev->dev, "pwm"); + pwm->rst = devm_reset_control_get_exclusive(&pdev->dev, "pwm"); if (IS_ERR(pwm->rst)) { ret = PTR_ERR(pwm->rst); dev_err(&pdev->dev, "Reset control is not found: %d\n", ret); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index 6ec342dd3eea..34b228626bd5 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -39,15 +39,15 @@ #define ECCTL2_TSCTR_FREERUN BIT(4) struct ecap_context { - u32 cap3; - u32 cap4; - u16 ecctl2; + u32 cap3; + u32 cap4; + u16 ecctl2; }; struct ecap_pwm_chip { - struct pwm_chip chip; - unsigned int clk_rate; - void __iomem *mmio_base; + struct pwm_chip chip; + unsigned int clk_rate; + void __iomem *mmio_base; struct ecap_context ctx; }; @@ -64,9 +64,9 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); + u32 period_cycles, duty_cycles; unsigned long long c; - unsigned long period_cycles, duty_cycles; - unsigned int reg_val; + u16 value; if (period_ns > NSEC_PER_SEC) return -ERANGE; @@ -74,7 +74,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, c = pc->clk_rate; c = c * period_ns; do_div(c, NSEC_PER_SEC); - period_cycles = (unsigned long)c; + period_cycles = (u32)c; if (period_cycles < 1) { period_cycles = 1; @@ -83,17 +83,17 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, c = pc->clk_rate; c = c * duty_ns; do_div(c, NSEC_PER_SEC); - duty_cycles = (unsigned long)c; + duty_cycles = (u32)c; } pm_runtime_get_sync(pc->chip.dev); - reg_val = readw(pc->mmio_base + ECCTL2); + value = readw(pc->mmio_base + ECCTL2); /* Configure APWM mode & disable sync option */ - reg_val |= ECCTL2_APWM_MODE | ECCTL2_SYNC_SEL_DISA; + value |= ECCTL2_APWM_MODE | ECCTL2_SYNC_SEL_DISA; - writew(reg_val, pc->mmio_base + ECCTL2); + writew(value, pc->mmio_base + ECCTL2); if (!pwm_is_enabled(pwm)) { /* Update active registers if not running */ @@ -110,40 +110,45 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, } if (!pwm_is_enabled(pwm)) { - reg_val = readw(pc->mmio_base + ECCTL2); + value = readw(pc->mmio_base + ECCTL2); /* Disable APWM mode to put APWM output Low */ - reg_val &= ~ECCTL2_APWM_MODE; - writew(reg_val, pc->mmio_base + ECCTL2); + value &= ~ECCTL2_APWM_MODE; + writew(value, pc->mmio_base + ECCTL2); } pm_runtime_put_sync(pc->chip.dev); + return 0; } static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity) + enum pwm_polarity polarity) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); - unsigned short reg_val; + u16 value; pm_runtime_get_sync(pc->chip.dev); - reg_val = readw(pc->mmio_base + ECCTL2); + + value = readw(pc->mmio_base + ECCTL2); + if (polarity == PWM_POLARITY_INVERSED) /* Duty cycle defines LOW period of PWM */ - reg_val |= ECCTL2_APWM_POL_LOW; + value |= ECCTL2_APWM_POL_LOW; else /* Duty cycle defines HIGH period of PWM */ - reg_val &= ~ECCTL2_APWM_POL_LOW; + value &= ~ECCTL2_APWM_POL_LOW; + + writew(value, pc->mmio_base + ECCTL2); - writew(reg_val, pc->mmio_base + ECCTL2); pm_runtime_put_sync(pc->chip.dev); + return 0; } static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); - unsigned int reg_val; + u16 value; /* Leave clock enabled on enabling PWM */ pm_runtime_get_sync(pc->chip.dev); @@ -152,24 +157,25 @@ static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) * Enable 'Free run Time stamp counter mode' to start counter * and 'APWM mode' to enable APWM output */ - reg_val = readw(pc->mmio_base + ECCTL2); - reg_val |= ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE; - writew(reg_val, pc->mmio_base + ECCTL2); + value = readw(pc->mmio_base + ECCTL2); + value |= ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE; + writew(value, pc->mmio_base + ECCTL2); + return 0; } static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); - unsigned int reg_val; + u16 value; /* * Disable 'Free run Time stamp counter mode' to stop counter * and 'APWM mode' to put APWM output to low */ - reg_val = readw(pc->mmio_base + ECCTL2); - reg_val &= ~(ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE); - writew(reg_val, pc->mmio_base + ECCTL2); + value = readw(pc->mmio_base + ECCTL2); + value &= ~(ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE); + writew(value, pc->mmio_base + ECCTL2); /* Disable clock on PWM disable */ pm_runtime_put_sync(pc->chip.dev); @@ -184,12 +190,12 @@ static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) } static const struct pwm_ops ecap_pwm_ops = { - .free = ecap_pwm_free, - .config = ecap_pwm_config, - .set_polarity = ecap_pwm_set_polarity, - .enable = ecap_pwm_enable, - .disable = ecap_pwm_disable, - .owner = THIS_MODULE, + .free = ecap_pwm_free, + .config = ecap_pwm_config, + .set_polarity = ecap_pwm_set_polarity, + .enable = ecap_pwm_enable, + .disable = ecap_pwm_disable, + .owner = THIS_MODULE, }; static const struct of_device_id ecap_of_match[] = { @@ -202,10 +208,10 @@ MODULE_DEVICE_TABLE(of, ecap_of_match); static int ecap_pwm_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - int ret; + struct ecap_pwm_chip *pc; struct resource *r; struct clk *clk; - struct ecap_pwm_chip *pc; + int ret; pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); if (!pc) @@ -248,9 +254,9 @@ static int ecap_pwm_probe(struct platform_device *pdev) return ret; } + platform_set_drvdata(pdev, pc); pm_runtime_enable(&pdev->dev); - platform_set_drvdata(pdev, pc); return 0; } @@ -259,6 +265,7 @@ static int ecap_pwm_remove(struct platform_device *pdev) struct ecap_pwm_chip *pc = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); + return pwmchip_remove(&pc->chip); } @@ -311,14 +318,13 @@ static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume); static struct platform_driver ecap_pwm_driver = { .driver = { - .name = "ecap", + .name = "ecap", .of_match_table = ecap_of_match, - .pm = &ecap_pwm_pm_ops, + .pm = &ecap_pwm_pm_ops, }, .probe = ecap_pwm_probe, .remove = ecap_pwm_remove, }; - module_platform_driver(ecap_pwm_driver); MODULE_DESCRIPTION("ECAP PWM driver"); diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index b5c6b0636893..4c22cb395040 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -122,12 +122,12 @@ struct ehrpwm_context { }; struct ehrpwm_pwm_chip { - struct pwm_chip chip; - unsigned int clk_rate; - void __iomem *mmio_base; + struct pwm_chip chip; + unsigned long clk_rate; + void __iomem *mmio_base; unsigned long period_cycles[NUM_PWM_CHANNEL]; enum pwm_polarity polarity[NUM_PWM_CHANNEL]; - struct clk *tbclk; + struct clk *tbclk; struct ehrpwm_context ctx; }; @@ -136,25 +136,26 @@ static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip) return container_of(chip, struct ehrpwm_pwm_chip, chip); } -static inline u16 ehrpwm_read(void __iomem *base, int offset) +static inline u16 ehrpwm_read(void __iomem *base, unsigned int offset) { return readw(base + offset); } -static inline void ehrpwm_write(void __iomem *base, int offset, unsigned int val) +static inline void ehrpwm_write(void __iomem *base, unsigned int offset, + u16 value) { - writew(val & 0xFFFF, base + offset); + writew(value, base + offset); } -static void ehrpwm_modify(void __iomem *base, int offset, - unsigned short mask, unsigned short val) +static void ehrpwm_modify(void __iomem *base, unsigned int offset, u16 mask, + u16 value) { - unsigned short regval; + unsigned short val; - regval = readw(base + offset); - regval &= ~mask; - regval |= val & mask; - writew(regval, base + offset); + val = readw(base + offset); + val &= ~mask; + val |= value & mask; + writew(val, base + offset); } /** @@ -163,14 +164,13 @@ static void ehrpwm_modify(void __iomem *base, int offset, * @prescale_div: prescaler value set * @tb_clk_div: Time Base Control prescaler bits */ -static int set_prescale_div(unsigned long rqst_prescaler, - unsigned short *prescale_div, unsigned short *tb_clk_div) +static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div, + u16 *tb_clk_div) { unsigned int clkdiv, hspclkdiv; for (clkdiv = 0; clkdiv <= CLKDIV_MAX; clkdiv++) { for (hspclkdiv = 0; hspclkdiv <= HSPCLKDIV_MAX; hspclkdiv++) { - /* * calculations for prescaler value : * prescale_div = HSPCLKDIVIDER * CLKDIVIDER. @@ -191,13 +191,14 @@ static int set_prescale_div(unsigned long rqst_prescaler, } } } + return 1; } static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan) { - int aqctl_reg; - unsigned short aqctl_val, aqctl_mask; + u16 aqctl_val, aqctl_mask; + unsigned int aqctl_reg; /* * Configure PWM output to HIGH/LOW level on counter @@ -232,13 +233,13 @@ static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan) * duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE */ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) + int duty_ns, int period_ns) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + u32 period_cycles, duty_cycles; + u16 ps_divval, tb_divval; + unsigned int i, cmp_reg; unsigned long long c; - unsigned long period_cycles, duty_cycles; - unsigned short ps_divval, tb_divval; - int i, cmp_reg; if (period_ns > NSEC_PER_SEC) return -ERANGE; @@ -272,8 +273,9 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (i == pwm->hwpwm) continue; - dev_err(chip->dev, "Period value conflicts with channel %d\n", - i); + dev_err(chip->dev, + "period value conflicts with channel %u\n", + i); return -EINVAL; } } @@ -282,7 +284,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, /* Configure clock prescaler to support Low frequency PWM wave */ if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval, - &tb_divval)) { + &tb_divval)) { dev_err(chip->dev, "Unsupported values\n"); return -EINVAL; } @@ -303,7 +305,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, /* Configure ehrpwm counter for up-count mode */ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, - TBCTL_CTRMODE_UP); + TBCTL_CTRMODE_UP); if (pwm->hwpwm == 1) /* Channel 1 configured with compare B register */ @@ -315,23 +317,26 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); pm_runtime_put_sync(chip->dev); + return 0; } static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip, - struct pwm_device *pwm, enum pwm_polarity polarity) + struct pwm_device *pwm, + enum pwm_polarity polarity) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); /* Configuration of polarity in hardware delayed, do at enable */ pc->polarity[pwm->hwpwm] = polarity; + return 0; } static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); - unsigned short aqcsfrc_val, aqcsfrc_mask; + u16 aqcsfrc_val, aqcsfrc_mask; int ret; /* Leave clock enabled on enabling PWM */ @@ -348,7 +353,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) /* Changes to shadow mode */ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, - AQSFRC_RLDCSF_ZRO); + AQSFRC_RLDCSF_ZRO); ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); @@ -358,20 +363,21 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) /* Enable TBCLK before enabling PWM device */ ret = clk_enable(pc->tbclk); if (ret) { - dev_err(chip->dev, "Failed to enable TBCLK for %s\n", - dev_name(pc->chip.dev)); + dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n", + dev_name(pc->chip.dev), ret); return ret; } /* Enable time counter for free_run */ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); + return 0; } static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); - unsigned short aqcsfrc_val, aqcsfrc_mask; + u16 aqcsfrc_val, aqcsfrc_mask; /* Action Qualifier puts PWM output low forcefully */ if (pwm->hwpwm) { @@ -387,7 +393,7 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) * Action Qualifier control on PWM output from next TBCLK */ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, - AQSFRC_RLDCSF_IMDT); + AQSFRC_RLDCSF_IMDT); ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); @@ -415,17 +421,17 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) } static const struct pwm_ops ehrpwm_pwm_ops = { - .free = ehrpwm_pwm_free, - .config = ehrpwm_pwm_config, - .set_polarity = ehrpwm_pwm_set_polarity, - .enable = ehrpwm_pwm_enable, - .disable = ehrpwm_pwm_disable, - .owner = THIS_MODULE, + .free = ehrpwm_pwm_free, + .config = ehrpwm_pwm_config, + .set_polarity = ehrpwm_pwm_set_polarity, + .enable = ehrpwm_pwm_enable, + .disable = ehrpwm_pwm_disable, + .owner = THIS_MODULE, }; static const struct of_device_id ehrpwm_of_match[] = { - { .compatible = "ti,am3352-ehrpwm" }, - { .compatible = "ti,am33xx-ehrpwm" }, + { .compatible = "ti,am3352-ehrpwm" }, + { .compatible = "ti,am33xx-ehrpwm" }, {}, }; MODULE_DEVICE_TABLE(of, ehrpwm_of_match); @@ -433,10 +439,10 @@ MODULE_DEVICE_TABLE(of, ehrpwm_of_match); static int ehrpwm_pwm_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - int ret; + struct ehrpwm_pwm_chip *pc; struct resource *r; struct clk *clk; - struct ehrpwm_pwm_chip *pc; + int ret; pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); if (!pc) @@ -489,13 +495,18 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&pc->chip); if (ret < 0) { dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); - return ret; + goto err_clk_unprepare; } + platform_set_drvdata(pdev, pc); pm_runtime_enable(&pdev->dev); - platform_set_drvdata(pdev, pc); return 0; + +err_clk_unprepare: + clk_unprepare(pc->tbclk); + + return ret; } static int ehrpwm_pwm_remove(struct platform_device *pdev) @@ -504,8 +515,8 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev) clk_unprepare(pc->tbclk); - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); + return pwmchip_remove(&pc->chip); } @@ -513,6 +524,7 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev) static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc) { pm_runtime_get_sync(pc->chip.dev); + pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL); pc->ctx.tbprd = ehrpwm_read(pc->mmio_base, TBPRD); pc->ctx.cmpa = ehrpwm_read(pc->mmio_base, CMPA); @@ -521,6 +533,7 @@ static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc) pc->ctx.aqctlb = ehrpwm_read(pc->mmio_base, AQCTLB); pc->ctx.aqsfrc = ehrpwm_read(pc->mmio_base, AQSFRC); pc->ctx.aqcsfrc = ehrpwm_read(pc->mmio_base, AQCSFRC); + pm_runtime_put_sync(pc->chip.dev); } @@ -539,9 +552,10 @@ static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc) static int ehrpwm_pwm_suspend(struct device *dev) { struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev); - int i; + unsigned int i; ehrpwm_pwm_save_context(pc); + for (i = 0; i < pc->chip.npwm; i++) { struct pwm_device *pwm = &pc->chip.pwms[i]; @@ -551,13 +565,14 @@ static int ehrpwm_pwm_suspend(struct device *dev) /* Disable explicitly if PWM is running */ pm_runtime_put_sync(dev); } + return 0; } static int ehrpwm_pwm_resume(struct device *dev) { struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev); - int i; + unsigned int i; for (i = 0; i < pc->chip.npwm; i++) { struct pwm_device *pwm = &pc->chip.pwms[i]; @@ -568,24 +583,25 @@ static int ehrpwm_pwm_resume(struct device *dev) /* Enable explicitly if PWM was running */ pm_runtime_get_sync(dev); } + ehrpwm_pwm_restore_context(pc); + return 0; } #endif static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend, - ehrpwm_pwm_resume); + ehrpwm_pwm_resume); static struct platform_driver ehrpwm_pwm_driver = { .driver = { - .name = "ehrpwm", + .name = "ehrpwm", .of_match_table = ehrpwm_of_match, - .pm = &ehrpwm_pwm_pm_ops, + .pm = &ehrpwm_pwm_pm_ops, }, .probe = ehrpwm_pwm_probe, .remove = ehrpwm_pwm_remove, }; - module_platform_driver(ehrpwm_pwm_driver); MODULE_DESCRIPTION("EHRPWM PWM driver"); diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index 21eff991d0e3..01153622778b 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include /* diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index 9de617b76680..b7a45be99815 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include /* diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 8141a4984126..3a78dd09ac81 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -241,6 +241,7 @@ static int vt8500_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&chip->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to add PWM chip\n"); + clk_unprepare(chip->clk); return ret; } diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c new file mode 100644 index 000000000000..5d27c16edfb1 --- /dev/null +++ b/drivers/pwm/pwm-zx.c @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2017 Sanechips Technology Co., Ltd. + * Copyright 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZX_PWM_MODE 0x0 +#define ZX_PWM_CLKDIV_SHIFT 2 +#define ZX_PWM_CLKDIV_MASK GENMASK(11, 2) +#define ZX_PWM_CLKDIV(x) (((x) << ZX_PWM_CLKDIV_SHIFT) & \ + ZX_PWM_CLKDIV_MASK) +#define ZX_PWM_POLAR BIT(1) +#define ZX_PWM_EN BIT(0) +#define ZX_PWM_PERIOD 0x4 +#define ZX_PWM_DUTY 0x8 + +#define ZX_PWM_CLKDIV_MAX 1023 +#define ZX_PWM_PERIOD_MAX 65535 + +struct zx_pwm_chip { + struct pwm_chip chip; + struct clk *pclk; + struct clk *wclk; + void __iomem *base; +}; + +static inline struct zx_pwm_chip *to_zx_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct zx_pwm_chip, chip); +} + +static inline u32 zx_pwm_readl(struct zx_pwm_chip *zpc, unsigned int hwpwm, + unsigned int offset) +{ + return readl(zpc->base + (hwpwm + 1) * 0x10 + offset); +} + +static inline void zx_pwm_writel(struct zx_pwm_chip *zpc, unsigned int hwpwm, + unsigned int offset, u32 value) +{ + writel(value, zpc->base + (hwpwm + 1) * 0x10 + offset); +} + +static void zx_pwm_set_mask(struct zx_pwm_chip *zpc, unsigned int hwpwm, + unsigned int offset, u32 mask, u32 value) +{ + u32 data; + + data = zx_pwm_readl(zpc, hwpwm, offset); + data &= ~mask; + data |= value & mask; + zx_pwm_writel(zpc, hwpwm, offset, data); +} + +static void zx_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); + unsigned long rate; + unsigned int div; + u32 value; + u64 tmp; + + value = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_MODE); + + if (value & ZX_PWM_POLAR) + state->polarity = PWM_POLARITY_NORMAL; + else + state->polarity = PWM_POLARITY_INVERSED; + + if (value & ZX_PWM_EN) + state->enabled = true; + else + state->enabled = false; + + div = (value & ZX_PWM_CLKDIV_MASK) >> ZX_PWM_CLKDIV_SHIFT; + rate = clk_get_rate(zpc->wclk); + + tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_PERIOD); + tmp *= div * NSEC_PER_SEC; + state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate); + + tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_DUTY); + tmp *= div * NSEC_PER_SEC; + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate); +} + +static int zx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + unsigned int duty_ns, unsigned int period_ns) +{ + struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); + unsigned int period_cycles, duty_cycles; + unsigned long long c; + unsigned int div = 1; + unsigned long rate; + + /* Find out the best divider */ + rate = clk_get_rate(zpc->wclk); + + while (1) { + c = rate / div; + c = c * period_ns; + do_div(c, NSEC_PER_SEC); + + if (c < ZX_PWM_PERIOD_MAX) + break; + + div++; + + if (div > ZX_PWM_CLKDIV_MAX) + return -ERANGE; + } + + /* Calculate duty cycles */ + period_cycles = c; + c *= duty_ns; + do_div(c, period_ns); + duty_cycles = c; + + /* + * If the PWM is being enabled, we have to temporarily disable it + * before configuring the registers. + */ + if (pwm_is_enabled(pwm)) + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_EN, 0); + + /* Set up registers */ + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_CLKDIV_MASK, + ZX_PWM_CLKDIV(div)); + zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_PERIOD, period_cycles); + zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_DUTY, duty_cycles); + + /* Re-enable the PWM if needed */ + if (pwm_is_enabled(pwm)) + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, + ZX_PWM_EN, ZX_PWM_EN); + + return 0; +} + +static int zx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); + struct pwm_state cstate; + int ret; + + pwm_get_state(pwm, &cstate); + + if (state->polarity != cstate.polarity) + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_POLAR, + (state->polarity == PWM_POLARITY_INVERSED) ? + 0 : ZX_PWM_POLAR); + + if (state->period != cstate.period || + state->duty_cycle != cstate.duty_cycle) { + ret = zx_pwm_config(chip, pwm, state->duty_cycle, + state->period); + if (ret) + return ret; + } + + if (state->enabled != cstate.enabled) { + if (state->enabled) { + ret = clk_prepare_enable(zpc->wclk); + if (ret) + return ret; + + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, + ZX_PWM_EN, ZX_PWM_EN); + } else { + zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, + ZX_PWM_EN, 0); + clk_disable_unprepare(zpc->wclk); + } + } + + return 0; +} + +static const struct pwm_ops zx_pwm_ops = { + .apply = zx_pwm_apply, + .get_state = zx_pwm_get_state, + .owner = THIS_MODULE, +}; + +static int zx_pwm_probe(struct platform_device *pdev) +{ + struct zx_pwm_chip *zpc; + struct resource *res; + unsigned int i; + int ret; + + zpc = devm_kzalloc(&pdev->dev, sizeof(*zpc), GFP_KERNEL); + if (!zpc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + zpc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(zpc->base)) + return PTR_ERR(zpc->base); + + zpc->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(zpc->pclk)) + return PTR_ERR(zpc->pclk); + + zpc->wclk = devm_clk_get(&pdev->dev, "wclk"); + if (IS_ERR(zpc->wclk)) + return PTR_ERR(zpc->wclk); + + ret = clk_prepare_enable(zpc->pclk); + if (ret) + return ret; + + zpc->chip.dev = &pdev->dev; + zpc->chip.ops = &zx_pwm_ops; + zpc->chip.base = -1; + zpc->chip.npwm = 4; + zpc->chip.of_xlate = of_pwm_xlate_with_flags; + zpc->chip.of_pwm_n_cells = 3; + + /* + * PWM devices may be enabled by firmware, and let's disable all of + * them initially to save power. + */ + for (i = 0; i < zpc->chip.npwm; i++) + zx_pwm_set_mask(zpc, i, ZX_PWM_MODE, ZX_PWM_EN, 0); + + ret = pwmchip_add(&zpc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, zpc); + + return 0; +} + +static int zx_pwm_remove(struct platform_device *pdev) +{ + struct zx_pwm_chip *zpc = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&zpc->chip); + clk_disable_unprepare(zpc->pclk); + + return ret; +} + +static const struct of_device_id zx_pwm_dt_ids[] = { + { .compatible = "zte,zx296718-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, zx_pwm_dt_ids); + +static struct platform_driver zx_pwm_driver = { + .driver = { + .name = "zx-pwm", + .of_match_table = zx_pwm_dt_ids, + }, + .probe = zx_pwm_probe, + .remove = zx_pwm_remove, +}; +module_platform_driver(zx_pwm_driver); + +MODULE_ALIAS("platform:zx-pwm"); +MODULE_AUTHOR("Shawn Guo "); +MODULE_DESCRIPTION("ZTE ZX PWM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 315a4be8dc1e..9a68914100ad 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO); MODULE_PARM_DESC(mbox_sel, "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); +static DEFINE_SPINLOCK(tsi721_maint_lock); + static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); @@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); struct tsi721_dma_desc *bd_ptr; u32 rd_count, swr_ptr, ch_stat; + unsigned long flags; int i, err = 0; u32 op = do_wr ? MAINT_WR : MAINT_RD; if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) return -EINVAL; + spin_lock_irqsave(&tsi721_maint_lock, flags); + bd_ptr = priv->mdma.bd_base; rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); @@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, */ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); + err_out: + spin_unlock_irqrestore(&tsi721_maint_lock, flags); return err; } diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c index a3824baca2e5..3ee9af83b638 100644 --- a/drivers/rapidio/rio-access.c +++ b/drivers/rapidio/rio-access.c @@ -13,17 +13,9 @@ #include #include -/* - * These interrupt-safe spinlocks protect all accesses to RIO - * configuration space and doorbell access. - */ -static DEFINE_SPINLOCK(rio_config_lock); -static DEFINE_SPINLOCK(rio_doorbell_lock); - /* * Wrappers for all RIO configuration access functions. They just check - * alignment, do locking and call the low-level functions pointed to - * by rio_mport->ops. + * alignment and call the low-level functions pointed to by rio_mport->ops. */ #define RIO_8_BAD 0 @@ -44,13 +36,10 @@ int __rio_local_read_config_##size \ (struct rio_mport *mport, u32 offset, type *value) \ { \ int res; \ - unsigned long flags; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ - spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ *value = (type)data; \ - spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } @@ -67,13 +56,8 @@ int __rio_local_read_config_##size \ int __rio_local_write_config_##size \ (struct rio_mport *mport, u32 offset, type value) \ { \ - int res; \ - unsigned long flags; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ - spin_lock_irqsave(&rio_config_lock, flags); \ - res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\ - spin_unlock_irqrestore(&rio_config_lock, flags); \ - return res; \ + return mport->ops->lcwrite(mport, mport->id, offset, len, value);\ } RIO_LOP_READ(8, u8, 1) @@ -104,13 +88,10 @@ int rio_mport_read_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ { \ int res; \ - unsigned long flags; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ - spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ *value = (type)data; \ - spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } @@ -127,13 +108,9 @@ int rio_mport_read_config_##size \ int rio_mport_write_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ { \ - int res; \ - unsigned long flags; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ - spin_lock_irqsave(&rio_config_lock, flags); \ - res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ - spin_unlock_irqrestore(&rio_config_lock, flags); \ - return res; \ + return mport->ops->cwrite(mport, mport->id, destid, hopcount, \ + offset, len, value); \ } RIO_OP_READ(8, u8, 1) @@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32); */ int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) { - int res; - unsigned long flags; - - spin_lock_irqsave(&rio_doorbell_lock, flags); - res = mport->ops->dsend(mport, mport->id, destid, data); - spin_unlock_irqrestore(&rio_doorbell_lock, flags); - - return res; + return mport->ops->dsend(mport, mport->id, destid, data); } EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index e740a66cb1d6..0fd6195601ba 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -696,11 +696,11 @@ config REGULATOR_RC5T583 outputs which can be controlled by i2c communication. config REGULATOR_RK808 - tristate "Rockchip RK808/RK818 Power regulators" + tristate "Rockchip RK805/RK808/RK818 Power regulators" depends on MFD_RK808 help Select this option to enable the power regulator of ROCKCHIP - PMIC RK808 and RK818. + PMIC RK805,RK808 and RK818. This driver supports the control of different power rails of device through regulator interface. The device supports multiple DCDC/LDO outputs which can be controlled by i2c communication. diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 9dd44dd4cdf6..14637a01ba2d 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -150,7 +150,7 @@ static void of_get_regulation_constraints(struct device_node *np, suspend_state = &constraints->state_disk; break; case PM_SUSPEND_ON: - case PM_SUSPEND_FREEZE: + case PM_SUSPEND_TO_IDLE: case PM_SUSPEND_STANDBY: default: continue; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index a16d81420612..213b68743cc8 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -65,6 +65,27 @@ /* max steps for increase voltage of Buck1/2, equal 100mv*/ #define MAX_STEPS_ONE_TIME 8 +#define RK805_DESC(_id, _match, _supply, _min, _max, _step, _vreg, \ + _vmask, _ereg, _emask, _etime) \ + [_id] = { \ + .name = (_match), \ + .supply_name = (_supply), \ + .of_match = of_match_ptr(_match), \ + .regulators_node = of_match_ptr("regulators"), \ + .type = REGULATOR_VOLTAGE, \ + .id = (_id), \ + .n_voltages = (((_max) - (_min)) / (_step) + 1), \ + .owner = THIS_MODULE, \ + .min_uV = (_min) * 1000, \ + .uV_step = (_step) * 1000, \ + .vsel_reg = (_vreg), \ + .vsel_mask = (_vmask), \ + .enable_reg = (_ereg), \ + .enable_mask = (_emask), \ + .enable_time = (_etime), \ + .ops = &rk805_reg_ops, \ + } + #define RK8XX_DESC(_id, _match, _supply, _min, _max, _step, _vreg, \ _vmask, _ereg, _emask, _etime) \ [_id] = { \ @@ -298,6 +319,28 @@ static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv) sel); } +static int rk805_set_suspend_enable(struct regulator_dev *rdev) +{ + unsigned int reg; + + reg = rdev->desc->enable_reg + RK808_SLP_SET_OFF_REG_OFFSET; + + return regmap_update_bits(rdev->regmap, reg, + rdev->desc->enable_mask, + rdev->desc->enable_mask); +} + +static int rk805_set_suspend_disable(struct regulator_dev *rdev) +{ + unsigned int reg; + + reg = rdev->desc->enable_reg + RK808_SLP_SET_OFF_REG_OFFSET; + + return regmap_update_bits(rdev->regmap, reg, + rdev->desc->enable_mask, + 0); +} + static int rk808_set_suspend_enable(struct regulator_dev *rdev) { unsigned int reg; @@ -320,6 +363,27 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev) rdev->desc->enable_mask); } +static struct regulator_ops rk805_reg_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_voltage = rk808_set_suspend_voltage, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, +}; + +static struct regulator_ops rk805_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, +}; + static struct regulator_ops rk808_buck1_2_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, @@ -369,6 +433,68 @@ static struct regulator_ops rk808_switch_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; +static const struct regulator_desc rk805_reg[] = { + { + .name = "DCDC_REG1", + .supply_name = "vcc1", + .of_match = of_match_ptr("DCDC_REG1"), + .regulators_node = of_match_ptr("regulators"), + .id = RK805_ID_DCDC1, + .ops = &rk805_reg_ops, + .type = REGULATOR_VOLTAGE, + .min_uV = 712500, + .uV_step = 12500, + .n_voltages = 64, + .vsel_reg = RK805_BUCK1_ON_VSEL_REG, + .vsel_mask = RK818_BUCK_VSEL_MASK, + .enable_reg = RK805_DCDC_EN_REG, + .enable_mask = BIT(0), + .owner = THIS_MODULE, + }, { + .name = "DCDC_REG2", + .supply_name = "vcc2", + .of_match = of_match_ptr("DCDC_REG2"), + .regulators_node = of_match_ptr("regulators"), + .id = RK805_ID_DCDC2, + .ops = &rk805_reg_ops, + .type = REGULATOR_VOLTAGE, + .min_uV = 712500, + .uV_step = 12500, + .n_voltages = 64, + .vsel_reg = RK805_BUCK2_ON_VSEL_REG, + .vsel_mask = RK818_BUCK_VSEL_MASK, + .enable_reg = RK805_DCDC_EN_REG, + .enable_mask = BIT(1), + .owner = THIS_MODULE, + }, { + .name = "DCDC_REG3", + .supply_name = "vcc3", + .of_match = of_match_ptr("DCDC_REG3"), + .regulators_node = of_match_ptr("regulators"), + .id = RK805_ID_DCDC3, + .ops = &rk805_switch_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 1, + .enable_reg = RK805_DCDC_EN_REG, + .enable_mask = BIT(2), + .owner = THIS_MODULE, + }, + + RK805_DESC(RK805_ID_DCDC4, "DCDC_REG4", "vcc4", 800, 3400, 100, + RK805_BUCK4_ON_VSEL_REG, RK818_BUCK4_VSEL_MASK, + RK805_DCDC_EN_REG, BIT(3), 0), + + RK805_DESC(RK805_ID_LDO1, "LDO_REG1", "vcc5", 800, 3400, 100, + RK805_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK805_LDO_EN_REG, + BIT(0), 400), + RK805_DESC(RK805_ID_LDO2, "LDO_REG2", "vcc5", 800, 3400, 100, + RK805_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK805_LDO_EN_REG, + BIT(1), 400), + RK805_DESC(RK805_ID_LDO3, "LDO_REG3", "vcc6", 800, 3400, 100, + RK805_LDO3_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK805_LDO_EN_REG, + BIT(2), 400), +}; + static const struct regulator_desc rk808_reg[] = { { .name = "DCDC_REG1", @@ -625,6 +751,10 @@ static int rk808_regulator_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pdata); switch (rk808->variant) { + case RK805_ID: + regulators = rk805_reg; + nregulators = RK805_NUM_REGULATORS; + break; case RK808_ID: regulators = rk808_reg; nregulators = RK808_NUM_REGULATORS; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 6c9ec84121bd..a4456db5849d 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include /* diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 56aada387887..219cbd910dbf 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include struct twlreg_info { diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 8891a8e50f12..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -12,6 +12,15 @@ config REMOTEPROC if REMOTEPROC +config IMX_REMOTEPROC + tristate "IMX6/7 remoteproc support" + depends on SOC_IMX6SX || SOC_IMX7D + help + Say y here to support iMX's remote processors (Cortex M4 + on iMX7D) via the remote processor framework. + + It's safe to say N here. + config OMAP_REMOTEPROC tristate "OMAP remoteproc support" depends on HAS_DMA @@ -83,6 +92,7 @@ config QCOM_ADSP_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n select MFD_SYSCON select QCOM_MDT_LOADER select QCOM_RPROC_COMMON @@ -99,6 +109,7 @@ config QCOM_Q6V5_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n select MFD_SYSCON select QCOM_RPROC_COMMON select QCOM_SCM @@ -110,6 +121,7 @@ config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n depends on QCOM_SMEM select QCOM_MDT_LOADER select QCOM_RPROC_COMMON diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index f1ce5fc8a2f3..1a0b3dd44b8c 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -8,6 +8,7 @@ remoteproc-y += remoteproc_debugfs.o remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o +obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c index 99539cec1329..bf3b9034c319 100644 --- a/drivers/remoteproc/da8xx_remoteproc.c +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -38,9 +39,27 @@ MODULE_PARM_DESC(da8xx_fw_name, #define SYSCFG_CHIPSIG3 BIT(3) #define SYSCFG_CHIPSIG4 BIT(4) +#define DA8XX_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct da8xx_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from DSP view + * @size: Size of the memory region + */ +struct da8xx_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + /** * struct da8xx_rproc - da8xx remote processor instance state * @rproc: rproc handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions * @dsp_clk: placeholder for platform's DSP clk * @ack_fxn: chip-specific ack function for ack'ing irq * @irq_data: ack_fxn function parameter @@ -50,6 +69,8 @@ MODULE_PARM_DESC(da8xx_fw_name, */ struct da8xx_rproc { struct rproc *rproc; + struct da8xx_rproc_mem *mem; + int num_mems; struct clk *dsp_clk; void (*ack_fxn)(struct irq_data *data); struct irq_data *irq_data; @@ -158,6 +179,44 @@ static const struct rproc_ops da8xx_rproc_ops = { .kick = da8xx_rproc_kick, }; +static int da8xx_rproc_get_internal_memories(struct platform_device *pdev, + struct da8xx_rproc *drproc) +{ + static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"}; + int num_mems = ARRAY_SIZE(mem_names); + struct device *dev = &pdev->dev; + struct resource *res; + int i; + + drproc->mem = devm_kcalloc(dev, num_mems, sizeof(*drproc->mem), + GFP_KERNEL); + if (!drproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + drproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(drproc->mem[i].cpu_addr)) { + dev_err(dev, "failed to parse and map %s memory\n", + mem_names[i]); + return PTR_ERR(drproc->mem[i].cpu_addr); + } + drproc->mem[i].bus_addr = res->start; + drproc->mem[i].dev_addr = + res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK; + drproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n", + mem_names[i], &drproc->mem[i].bus_addr, + drproc->mem[i].size, drproc->mem[i].cpu_addr, + drproc->mem[i].dev_addr); + } + drproc->num_mems = num_mems; + + return 0; +} + static int da8xx_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -184,12 +243,14 @@ static int da8xx_rproc_probe(struct platform_device *pdev) return -EINVAL; } - bootreg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "host1cfg"); bootreg = devm_ioremap_resource(dev, bootreg_res); if (IS_ERR(bootreg)) return PTR_ERR(bootreg); - chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "chipsig"); chipsig = devm_ioremap_resource(dev, chipsig_res); if (IS_ERR(chipsig)) return PTR_ERR(chipsig); @@ -201,16 +262,31 @@ static int da8xx_rproc_probe(struct platform_device *pdev) return PTR_ERR(dsp_clk); } + if (dev->of_node) { + ret = of_reserved_mem_device_init(dev); + if (ret) { + dev_err(dev, "device does not have specific CMA pool: %d\n", + ret); + return ret; + } + } + rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name, sizeof(*drproc)); - if (!rproc) - return -ENOMEM; + if (!rproc) { + ret = -ENOMEM; + goto free_mem; + } drproc = rproc->priv; drproc->rproc = rproc; drproc->dsp_clk = dsp_clk; rproc->has_iommu = false; + ret = da8xx_rproc_get_internal_memories(pdev, drproc); + if (ret) + goto free_rproc; + platform_set_drvdata(pdev, rproc); /* everything the ISR needs is now setup, so hook it up */ @@ -247,7 +323,9 @@ static int da8xx_rproc_probe(struct platform_device *pdev) free_rproc: rproc_free(rproc); - +free_mem: + if (dev->of_node) + of_reserved_mem_device_release(dev); return ret; } @@ -255,6 +333,7 @@ static int da8xx_rproc_remove(struct platform_device *pdev) { struct rproc *rproc = platform_get_drvdata(pdev); struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv; + struct device *dev = &pdev->dev; /* * The devm subsystem might end up releasing things before @@ -265,15 +344,24 @@ static int da8xx_rproc_remove(struct platform_device *pdev) rproc_del(rproc); rproc_free(rproc); + if (dev->of_node) + of_reserved_mem_device_release(dev); return 0; } +static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = { + { .compatible = "ti,da850-dsp", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, davinci_rproc_of_match); + static struct platform_driver da8xx_rproc_driver = { .probe = da8xx_rproc_probe, .remove = da8xx_rproc_remove, .driver = { .name = "davinci-rproc", + .of_match_table = of_match_ptr(davinci_rproc_of_match), }, }; diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c new file mode 100644 index 000000000000..633268e9d550 --- /dev/null +++ b/drivers/remoteproc/imx_rproc.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2017 Pengutronix, Oleksij Rempel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IMX7D_SRC_SCR 0x0C +#define IMX7D_ENABLE_M4 BIT(3) +#define IMX7D_SW_M4P_RST BIT(2) +#define IMX7D_SW_M4C_RST BIT(1) +#define IMX7D_SW_M4C_NON_SCLR_RST BIT(0) + +#define IMX7D_M4_RST_MASK (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \ + | IMX7D_SW_M4C_RST \ + | IMX7D_SW_M4C_NON_SCLR_RST) + +#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \ + | IMX7D_SW_M4C_RST) +#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST + +/* Address: 0x020D8000 */ +#define IMX6SX_SRC_SCR 0x00 +#define IMX6SX_ENABLE_M4 BIT(22) +#define IMX6SX_SW_M4P_RST BIT(12) +#define IMX6SX_SW_M4C_NON_SCLR_RST BIT(4) +#define IMX6SX_SW_M4C_RST BIT(3) + +#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ + | IMX6SX_SW_M4C_RST) +#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST +#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ + | IMX6SX_SW_M4C_NON_SCLR_RST \ + | IMX6SX_SW_M4C_RST) + +#define IMX7D_RPROC_MEM_MAX 8 + +/** + * struct imx_rproc_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @sys_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct imx_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t sys_addr; + size_t size; +}; + +/* att flags */ +/* M4 own area. Can be mapped at probe */ +#define ATT_OWN BIT(1) + +/* address translation table */ +struct imx_rproc_att { + u32 da; /* device address (From Cortex M4 view)*/ + u32 sa; /* system bus address */ + u32 size; /* size of reg range */ + int flags; +}; + +struct imx_rproc_dcfg { + u32 src_reg; + u32 src_mask; + u32 src_start; + u32 src_stop; + const struct imx_rproc_att *att; + size_t att_size; +}; + +struct imx_rproc { + struct device *dev; + struct regmap *regmap; + struct rproc *rproc; + const struct imx_rproc_dcfg *dcfg; + struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX]; + struct clk *clk; +}; + +static const struct imx_rproc_att imx_rproc_att_imx7d[] = { + /* dev addr , sys addr , size , flags */ + /* OCRAM_S (M4 Boot code) - alias */ + { 0x00000000, 0x00180000, 0x00008000, 0 }, + /* OCRAM_S (Code) */ + { 0x00180000, 0x00180000, 0x00008000, ATT_OWN }, + /* OCRAM (Code) - alias */ + { 0x00900000, 0x00900000, 0x00020000, 0 }, + /* OCRAM_EPDC (Code) - alias */ + { 0x00920000, 0x00920000, 0x00020000, 0 }, + /* OCRAM_PXP (Code) - alias */ + { 0x00940000, 0x00940000, 0x00008000, 0 }, + /* TCML (Code) */ + { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN }, + /* DDR (Code) - alias, first part of DDR (Data) */ + { 0x10000000, 0x80000000, 0x0FFF0000, 0 }, + + /* TCMU (Data) */ + { 0x20000000, 0x00800000, 0x00008000, ATT_OWN }, + /* OCRAM (Data) */ + { 0x20200000, 0x00900000, 0x00020000, 0 }, + /* OCRAM_EPDC (Data) */ + { 0x20220000, 0x00920000, 0x00020000, 0 }, + /* OCRAM_PXP (Data) */ + { 0x20240000, 0x00940000, 0x00008000, 0 }, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, +}; + +static const struct imx_rproc_att imx_rproc_att_imx6sx[] = { + /* dev addr , sys addr , size , flags */ + /* TCML (M4 Boot Code) - alias */ + { 0x00000000, 0x007F8000, 0x00008000, 0 }, + /* OCRAM_S (Code) */ + { 0x00180000, 0x008F8000, 0x00004000, 0 }, + /* OCRAM_S (Code) - alias */ + { 0x00180000, 0x008FC000, 0x00004000, 0 }, + /* TCML (Code) */ + { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN }, + /* DDR (Code) - alias, first part of DDR (Data) */ + { 0x10000000, 0x80000000, 0x0FFF8000, 0 }, + + /* TCMU (Data) */ + { 0x20000000, 0x00800000, 0x00008000, ATT_OWN }, + /* OCRAM_S (Data) - alias? */ + { 0x208F8000, 0x008F8000, 0x00004000, 0 }, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, +}; + +static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = { + .src_reg = IMX7D_SRC_SCR, + .src_mask = IMX7D_M4_RST_MASK, + .src_start = IMX7D_M4_START, + .src_stop = IMX7D_M4_STOP, + .att = imx_rproc_att_imx7d, + .att_size = ARRAY_SIZE(imx_rproc_att_imx7d), +}; + +static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = { + .src_reg = IMX6SX_SRC_SCR, + .src_mask = IMX6SX_M4_RST_MASK, + .src_start = IMX6SX_M4_START, + .src_stop = IMX6SX_M4_STOP, + .att = imx_rproc_att_imx6sx, + .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx), +}; + +static int imx_rproc_start(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = priv->dev; + int ret; + + ret = regmap_update_bits(priv->regmap, dcfg->src_reg, + dcfg->src_mask, dcfg->src_start); + if (ret) + dev_err(dev, "Filed to enable M4!\n"); + + return ret; +} + +static int imx_rproc_stop(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = priv->dev; + int ret; + + ret = regmap_update_bits(priv->regmap, dcfg->src_reg, + dcfg->src_mask, dcfg->src_stop); + if (ret) + dev_err(dev, "Filed to stop M4!\n"); + + return ret; +} + +static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, + int len, u64 *sys) +{ + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + int i; + + /* parse address translation table */ + for (i = 0; i < dcfg->att_size; i++) { + const struct imx_rproc_att *att = &dcfg->att[i]; + + if (da >= att->da && da + len < att->da + att->size) { + unsigned int offset = da - att->da; + + *sys = att->sa + offset; + return 0; + } + } + + dev_warn(priv->dev, "Translation filed: da = 0x%llx len = 0x%x\n", + da, len); + return -ENOENT; +} + +static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct imx_rproc *priv = rproc->priv; + void *va = NULL; + u64 sys; + int i; + + if (len <= 0) + return NULL; + + /* + * On device side we have many aliases, so we need to convert device + * address (M4) to system bus address first. + */ + if (imx_rproc_da_to_sys(priv, da, len, &sys)) + return NULL; + + for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) { + if (sys >= priv->mem[i].sys_addr && sys + len < + priv->mem[i].sys_addr + priv->mem[i].size) { + unsigned int offset = sys - priv->mem[i].sys_addr; + /* __force to make sparse happy with type conversion */ + va = (__force void *)(priv->mem[i].cpu_addr + offset); + break; + } + } + + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va); + + return va; +} + +static const struct rproc_ops imx_rproc_ops = { + .start = imx_rproc_start, + .stop = imx_rproc_stop, + .da_to_va = imx_rproc_da_to_va, +}; + +static int imx_rproc_addr_init(struct imx_rproc *priv, + struct platform_device *pdev) +{ + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + int a, b = 0, err, nph; + + /* remap required addresses */ + for (a = 0; a < dcfg->att_size; a++) { + const struct imx_rproc_att *att = &dcfg->att[a]; + + if (!(att->flags & ATT_OWN)) + continue; + + if (b >= IMX7D_RPROC_MEM_MAX) + break; + + priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, + att->sa, att->size); + if (!priv->mem[b].cpu_addr) { + dev_err(dev, "devm_ioremap_resource failed\n"); + return -ENOMEM; + } + priv->mem[b].sys_addr = att->sa; + priv->mem[b].size = att->size; + b++; + } + + /* memory-region is optional property */ + nph = of_count_phandle_with_args(np, "memory-region", NULL); + if (nph <= 0) + return 0; + + /* remap optional addresses */ + for (a = 0; a < nph; a++) { + struct device_node *node; + struct resource res; + + node = of_parse_phandle(np, "memory-region", a); + err = of_address_to_resource(node, 0, &res); + if (err) { + dev_err(dev, "unable to resolve memory region\n"); + return err; + } + + if (b >= IMX7D_RPROC_MEM_MAX) + break; + + priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(priv->mem[b].cpu_addr)) { + dev_err(dev, "devm_ioremap_resource failed\n"); + err = PTR_ERR(priv->mem[b].cpu_addr); + return err; + } + priv->mem[b].sys_addr = res.start; + priv->mem[b].size = resource_size(&res); + b++; + } + + return 0; +} + +static int imx_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct imx_rproc *priv; + struct rproc *rproc; + struct regmap_config config = { .name = "imx-rproc" }; + const struct imx_rproc_dcfg *dcfg; + struct regmap *regmap; + int ret; + + regmap = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to find syscon\n"); + return PTR_ERR(regmap); + } + regmap_attach_dev(dev, regmap, &config); + + /* set some other name then imx */ + rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops, + NULL, sizeof(*priv)); + if (!rproc) { + ret = -ENOMEM; + goto err; + } + + dcfg = of_device_get_match_data(dev); + if (!dcfg) + return -EINVAL; + + priv = rproc->priv; + priv->rproc = rproc; + priv->regmap = regmap; + priv->dcfg = dcfg; + priv->dev = dev; + + dev_set_drvdata(dev, rproc); + + ret = imx_rproc_addr_init(priv, pdev); + if (ret) { + dev_err(dev, "filed on imx_rproc_addr_init\n"); + goto err_put_rproc; + } + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "Failed to get clock\n"); + rproc_free(rproc); + return PTR_ERR(priv->clk); + } + + /* + * clk for M4 block including memory. Should be + * enabled before .start for FW transfer. + */ + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(&rproc->dev, "Failed to enable clock\n"); + rproc_free(rproc); + return ret; + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "rproc_add failed\n"); + goto err_put_clk; + } + + return ret; + +err_put_clk: + clk_disable_unprepare(priv->clk); +err_put_rproc: + rproc_free(rproc); +err: + return ret; +} + +static int imx_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct imx_rproc *priv = rproc->priv; + + clk_disable_unprepare(priv->clk); + rproc_del(rproc); + rproc_free(rproc); + + return 0; +} + +static const struct of_device_id imx_rproc_of_match[] = { + { .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d }, + { .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx }, + {}, +}; +MODULE_DEVICE_TABLE(of, imx_rproc_of_match); + +static struct platform_driver imx_rproc_driver = { + .probe = imx_rproc_probe, + .remove = imx_rproc_remove, + .driver = { + .name = "imx-rproc", + .of_match_table = imx_rproc_of_match, + }, +}; + +module_platform_driver(imx_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver"); +MODULE_AUTHOR("Oleksij Rempel "); diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index 5f776bfd674a..aaac31134e39 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -410,7 +410,7 @@ static int keystone_rproc_probe(struct platform_device *pdev) if (ret) goto free_rproc; - ksproc->reset = devm_reset_control_get(dev, NULL); + ksproc->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(ksproc->reset)) { ret = PTR_ERR(ksproc->reset); goto free_rproc; @@ -505,6 +505,7 @@ static const struct of_device_id keystone_rproc_of_match[] = { { .compatible = "ti,k2hk-dsp", }, { .compatible = "ti,k2l-dsp", }, { .compatible = "ti,k2e-dsp", }, + { .compatible = "ti,k2g-dsp", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, keystone_rproc_of_match); diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_adsp_pil.c index 49fe2f807e1d..3f6af54dbc96 100644 --- a/drivers/remoteproc/qcom_adsp_pil.c +++ b/drivers/remoteproc/qcom_adsp_pil.c @@ -38,6 +38,7 @@ struct adsp_data { const char *firmware_name; int pas_id; bool has_aggre2_clk; + const char *ssr_name; }; struct qcom_adsp { @@ -71,7 +72,9 @@ struct qcom_adsp { void *mem_region; size_t mem_size; + struct qcom_rproc_glink glink_subdev; struct qcom_rproc_subdev smd_subdev; + struct qcom_rproc_ssr ssr_subdev; }; static int adsp_load(struct rproc *rproc, const struct firmware *fw) @@ -266,10 +269,7 @@ static int adsp_init_regulator(struct qcom_adsp *adsp) regulator_set_load(adsp->cx_supply, 100000); adsp->px_supply = devm_regulator_get(adsp->dev, "px"); - if (IS_ERR(adsp->px_supply)) - return PTR_ERR(adsp->px_supply); - - return 0; + return PTR_ERR_OR_ZERO(adsp->px_supply); } static int adsp_request_irq(struct qcom_adsp *adsp, @@ -401,7 +401,9 @@ static int adsp_probe(struct platform_device *pdev) goto free_rproc; } + qcom_add_glink_subdev(rproc, &adsp->glink_subdev); qcom_add_smd_subdev(rproc, &adsp->smd_subdev); + qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name); ret = rproc_add(rproc); if (ret) @@ -422,7 +424,9 @@ static int adsp_remove(struct platform_device *pdev) qcom_smem_state_put(adsp->state); rproc_del(adsp->rproc); + qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev); qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); + qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); rproc_free(adsp->rproc); return 0; @@ -433,6 +437,7 @@ static const struct adsp_data adsp_resource_init = { .firmware_name = "adsp.mdt", .pas_id = 1, .has_aggre2_clk = false, + .ssr_name = "lpass", }; static const struct adsp_data slpi_resource_init = { @@ -440,6 +445,7 @@ static const struct adsp_data slpi_resource_init = { .firmware_name = "slpi.mdt", .pas_id = 12, .has_aggre2_clk = true, + .ssr_name = "dsps", }; static const struct of_device_id adsp_of_match[] = { diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index bb90481215c6..d487040b528b 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -18,13 +18,19 @@ #include #include #include +#include #include +#include #include #include "remoteproc_internal.h" #include "qcom_common.h" +#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev) #define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) +#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev) + +static BLOCKING_NOTIFIER_HEAD(ssr_notifiers); /** * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc @@ -45,13 +51,60 @@ struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, } EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table); +static int glink_subdev_probe(struct rproc_subdev *subdev) +{ + struct qcom_rproc_glink *glink = to_glink_subdev(subdev); + + glink->edge = qcom_glink_smem_register(glink->dev, glink->node); + + return IS_ERR(glink->edge) ? PTR_ERR(glink->edge) : 0; +} + +static void glink_subdev_remove(struct rproc_subdev *subdev) +{ + struct qcom_rproc_glink *glink = to_glink_subdev(subdev); + + qcom_glink_smem_unregister(glink->edge); + glink->edge = NULL; +} + +/** + * qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc + * @rproc: rproc handle to parent the subdevice + * @glink: reference to a GLINK subdev context + */ +void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink) +{ + struct device *dev = &rproc->dev; + + glink->node = of_get_child_by_name(dev->parent->of_node, "glink-edge"); + if (!glink->node) + return; + + glink->dev = dev; + rproc_add_subdev(rproc, &glink->subdev, glink_subdev_probe, glink_subdev_remove); +} +EXPORT_SYMBOL_GPL(qcom_add_glink_subdev); + +/** + * qcom_remove_glink_subdev() - remove a GLINK subdevice from rproc + * @rproc: rproc handle + * @glink: reference to a GLINK subdev context + */ +void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink) +{ + rproc_remove_subdev(rproc, &glink->subdev); + of_node_put(glink->node); +} +EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev); + static int smd_subdev_probe(struct rproc_subdev *subdev) { struct qcom_rproc_subdev *smd = to_smd_subdev(subdev); smd->edge = qcom_smd_register_edge(smd->dev, smd->node); - return IS_ERR(smd->edge) ? PTR_ERR(smd->edge) : 0; + return PTR_ERR_OR_ZERO(smd->edge); } static void smd_subdev_remove(struct rproc_subdev *subdev) @@ -92,5 +145,72 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) } EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); +/** + * qcom_register_ssr_notifier() - register SSR notification handler + * @nb: notifier_block to notify for restart notifications + * + * Returns 0 on success, negative errno on failure. + * + * This register the @notify function as handler for restart notifications. As + * remote processors are stopped this function will be called, with the SSR + * name passed as a parameter. + */ +int qcom_register_ssr_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&ssr_notifiers, nb); +} +EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier); + +/** + * qcom_unregister_ssr_notifier() - unregister SSR notification handler + * @nb: notifier_block to unregister + */ +void qcom_unregister_ssr_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&ssr_notifiers, nb); +} +EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); + +static int ssr_notify_start(struct rproc_subdev *subdev) +{ + return 0; +} + +static void ssr_notify_stop(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + + blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name); +} + +/** + * qcom_add_ssr_subdev() - register subdevice as restart notification source + * @rproc: rproc handle + * @ssr: SSR subdevice handle + * @ssr_name: identifier to use for notifications originating from @rproc + * + * As the @ssr is registered with the @rproc SSR events will be sent to all + * registered listeners in the system as the remoteproc is shut down. + */ +void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, + const char *ssr_name) +{ + ssr->name = ssr_name; + + rproc_add_subdev(rproc, &ssr->subdev, ssr_notify_start, ssr_notify_stop); +} +EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev); + +/** + * qcom_remove_ssr_subdev() - remove subdevice as restart notification source + * @rproc: rproc handle + * @ssr: SSR subdevice handle + */ +void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr) +{ + rproc_remove_subdev(rproc, &ssr->subdev); +} +EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev); + MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h index db5c826d5cd4..4f8bc168473c 100644 --- a/drivers/remoteproc/qcom_common.h +++ b/drivers/remoteproc/qcom_common.h @@ -4,6 +4,14 @@ #include #include "remoteproc_internal.h" +struct qcom_rproc_glink { + struct rproc_subdev subdev; + + struct device *dev; + struct device_node *node; + struct qcom_glink *edge; +}; + struct qcom_rproc_subdev { struct rproc_subdev subdev; @@ -12,11 +20,24 @@ struct qcom_rproc_subdev { struct qcom_smd_edge *edge; }; +struct qcom_rproc_ssr { + struct rproc_subdev subdev; + + const char *name; +}; + struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, const struct firmware *fw, int *tablesz); +void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink); +void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink); + void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); +void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, + const char *ssr_name); +void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr); + #endif diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 8fd697a3cf8f..2d3d5ac92c06 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -153,6 +153,7 @@ struct q6v5 { size_t mpss_size; struct qcom_rproc_subdev smd_subdev; + struct qcom_rproc_ssr ssr_subdev; }; static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, @@ -867,7 +868,8 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks, static int q6v5_init_reset(struct q6v5 *qproc) { - qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL); + qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, + NULL); if (IS_ERR(qproc->mss_restart)) { dev_err(qproc->dev, "failed to acquire mss restart\n"); return PTR_ERR(qproc->mss_restart); @@ -1038,6 +1040,7 @@ static int q6v5_probe(struct platform_device *pdev) } qcom_add_smd_subdev(rproc, &qproc->smd_subdev); + qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); ret = rproc_add(rproc); if (ret) @@ -1058,6 +1061,7 @@ static int q6v5_remove(struct platform_device *pdev) rproc_del(qproc->rproc); qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); + qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); rproc_free(qproc->rproc); return 0; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 564061dcc019..eab14b414bf0 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -794,7 +794,7 @@ static void rproc_remove_subdevices(struct rproc *rproc) { struct rproc_subdev *subdev; - list_for_each_entry(subdev, &rproc->subdevs, node) + list_for_each_entry_reverse(subdev, &rproc->subdevs, node) subdev->remove(subdev); } @@ -1119,7 +1119,7 @@ static void rproc_crash_handler_work(struct work_struct *work) } /** - * __rproc_boot() - boot a remote processor + * rproc_boot() - boot a remote processor * @rproc: handle of a remote processor * * Boot a remote processor (i.e. load its firmware, power it on, ...). @@ -1129,7 +1129,7 @@ static void rproc_crash_handler_work(struct work_struct *work) * * Returns 0 on success, and an appropriate error value otherwise. */ -static int __rproc_boot(struct rproc *rproc) +int rproc_boot(struct rproc *rproc) { const struct firmware *firmware_p; struct device *dev; @@ -1180,15 +1180,6 @@ static int __rproc_boot(struct rproc *rproc) mutex_unlock(&rproc->lock); return ret; } - -/** - * rproc_boot() - boot a remote processor - * @rproc: handle of a remote processor - */ -int rproc_boot(struct rproc *rproc) -{ - return __rproc_boot(rproc); -} EXPORT_SYMBOL(rproc_boot); /** @@ -1369,7 +1360,7 @@ static void rproc_type_release(struct device *dev) kfree(rproc); } -static struct device_type rproc_type = { +static const struct device_type rproc_type = { .name = "remoteproc", .release = rproc_type_release, }; @@ -1440,6 +1431,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.parent = dev; rproc->dev.type = &rproc_type; rproc->dev.class = &rproc_class; + rproc->dev.driver_data = rproc; /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -1578,6 +1570,23 @@ void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) } EXPORT_SYMBOL(rproc_remove_subdev); +/** + * rproc_get_by_child() - acquire rproc handle of @dev's ancestor + * @dev: child device to find ancestor of + * + * Returns the ancestor rproc instance, or NULL if not found. + */ +struct rproc *rproc_get_by_child(struct device *dev) +{ + for (dev = dev->parent; dev; dev = dev->parent) { + if (dev->type == &rproc_type) + return dev->driver_data; + } + + return NULL; +} +EXPORT_SYMBOL(rproc_get_by_child); + /** * rproc_report_crash() - rproc crash reporter function * @rproc: remote processor diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 1e9e5b3f021c..c1077bec5d0b 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -48,7 +48,6 @@ struct rproc_fw_ops { /* from remoteproc_core.c */ void rproc_release(struct kref *kref); irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); -int rproc_boot_nowait(struct rproc *rproc); void rproc_vdev_release(struct kref *ref); /* from remoteproc_virtio.c */ diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index d534bf23dc56..aacef0ea3b90 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c @@ -212,7 +212,8 @@ static int st_rproc_parse_dt(struct platform_device *pdev) int err; if (ddata->config->sw_reset) { - ddata->sw_reset = devm_reset_control_get(dev, "sw_reset"); + ddata->sw_reset = devm_reset_control_get_exclusive(dev, + "sw_reset"); if (IS_ERR(ddata->sw_reset)) { dev_err(dev, "Failed to get S/W Reset\n"); return PTR_ERR(ddata->sw_reset); @@ -220,7 +221,8 @@ static int st_rproc_parse_dt(struct platform_device *pdev) } if (ddata->config->pwr_reset) { - ddata->pwr_reset = devm_reset_control_get(dev, "pwr_reset"); + ddata->pwr_reset = devm_reset_control_get_exclusive(dev, + "pwr_reset"); if (IS_ERR(ddata->pwr_reset)) { dev_err(dev, "Failed to get Power Reset\n"); return PTR_ERR(ddata->pwr_reset); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 608c071e4bbf..e2baecbb9dd3 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -34,12 +34,12 @@ config RESET_BERLIN help This enables the reset controller driver for Marvell Berlin SoCs. -config RESET_GEMINI - bool "Gemini Reset Driver" if COMPILE_TEST - default ARCH_GEMINI - select MFD_SYSCON +config RESET_HSDK + bool "Synopsys HSDK Reset Driver" + depends on HAS_IOMEM + depends on ARC_SOC_HSDK || COMPILE_TEST help - This enables the reset controller driver for Cortina Systems Gemini. + This enables the reset controller driver for HSDK board. config RESET_IMX7 bool "i.MX7 Reset Driver" if COMPILE_TEST @@ -48,6 +48,12 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_LANTIQ + bool "Lantiq XWAY Reset Driver" if COMPILE_TEST + default SOC_TYPE_XWAY + help + This enables the reset controller driver for Lantiq / Intel XWAY SoCs. + config RESET_LPC18XX bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST default ARCH_LPC18XX diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 7081f9da2599..af1c15c330b3 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -5,8 +5,9 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o -obj-$(CONFIG_RESET_GEMINI) += reset-gemini.o +obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 0090784ff410..1d21c6f7d56c 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -43,10 +43,23 @@ struct reset_control { unsigned int id; struct kref refcnt; bool shared; + bool array; atomic_t deassert_count; atomic_t triggered_count; }; +/** + * struct reset_control_array - an array of reset controls + * @base: reset control for compatibility with reset control API functions + * @num_rstcs: number of reset controls + * @rstc: array of reset controls + */ +struct reset_control_array { + struct reset_control base; + unsigned int num_rstcs; + struct reset_control *rstc[]; +}; + /** * of_reset_simple_xlate - translate reset_spec to the reset line number * @rcdev: a pointer to the reset controller device @@ -135,6 +148,65 @@ int devm_reset_controller_register(struct device *dev, } EXPORT_SYMBOL_GPL(devm_reset_controller_register); +static inline struct reset_control_array * +rstc_to_array(struct reset_control *rstc) { + return container_of(rstc, struct reset_control_array, base); +} + +static int reset_control_array_reset(struct reset_control_array *resets) +{ + int ret, i; + + for (i = 0; i < resets->num_rstcs; i++) { + ret = reset_control_reset(resets->rstc[i]); + if (ret) + return ret; + } + + return 0; +} + +static int reset_control_array_assert(struct reset_control_array *resets) +{ + int ret, i; + + for (i = 0; i < resets->num_rstcs; i++) { + ret = reset_control_assert(resets->rstc[i]); + if (ret) + goto err; + } + + return 0; + +err: + while (i--) + reset_control_deassert(resets->rstc[i]); + return ret; +} + +static int reset_control_array_deassert(struct reset_control_array *resets) +{ + int ret, i; + + for (i = 0; i < resets->num_rstcs; i++) { + ret = reset_control_deassert(resets->rstc[i]); + if (ret) + goto err; + } + + return 0; + +err: + while (i--) + reset_control_assert(resets->rstc[i]); + return ret; +} + +static inline bool reset_control_is_array(struct reset_control *rstc) +{ + return rstc->array; +} + /** * reset_control_reset - reset the controlled device * @rstc: reset controller @@ -158,6 +230,9 @@ int reset_control_reset(struct reset_control *rstc) if (WARN_ON(IS_ERR(rstc))) return -EINVAL; + if (reset_control_is_array(rstc)) + return reset_control_array_reset(rstc_to_array(rstc)); + if (!rstc->rcdev->ops->reset) return -ENOTSUPP; @@ -202,8 +277,8 @@ int reset_control_assert(struct reset_control *rstc) if (WARN_ON(IS_ERR(rstc))) return -EINVAL; - if (!rstc->rcdev->ops->assert) - return -ENOTSUPP; + if (reset_control_is_array(rstc)) + return reset_control_array_assert(rstc_to_array(rstc)); if (rstc->shared) { if (WARN_ON(atomic_read(&rstc->triggered_count) != 0)) @@ -214,6 +289,21 @@ int reset_control_assert(struct reset_control *rstc) if (atomic_dec_return(&rstc->deassert_count) != 0) return 0; + + /* + * Shared reset controls allow the reset line to be in any state + * after this call, so doing nothing is a valid option. + */ + if (!rstc->rcdev->ops->assert) + return 0; + } else { + /* + * If the reset controller does not implement .assert(), there + * is no way to guarantee that the reset line is asserted after + * this call. + */ + if (!rstc->rcdev->ops->assert) + return -ENOTSUPP; } return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id); @@ -240,8 +330,8 @@ int reset_control_deassert(struct reset_control *rstc) if (WARN_ON(IS_ERR(rstc))) return -EINVAL; - if (!rstc->rcdev->ops->deassert) - return -ENOTSUPP; + if (reset_control_is_array(rstc)) + return reset_control_array_deassert(rstc_to_array(rstc)); if (rstc->shared) { if (WARN_ON(atomic_read(&rstc->triggered_count) != 0)) @@ -251,6 +341,16 @@ int reset_control_deassert(struct reset_control *rstc) return 0; } + /* + * If the reset controller does not implement .deassert(), we assume + * that it handles self-deasserting reset lines via .reset(). In that + * case, the reset lines are deasserted by default. If that is not the + * case, the reset controller driver should implement .deassert() and + * return -ENOTSUPP. + */ + if (!rstc->rcdev->ops->deassert) + return 0; + return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id); } EXPORT_SYMBOL_GPL(reset_control_deassert); @@ -266,7 +366,7 @@ int reset_control_status(struct reset_control *rstc) if (!rstc) return 0; - if (WARN_ON(IS_ERR(rstc))) + if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc)) return -EINVAL; if (rstc->rcdev->ops->status) @@ -404,6 +504,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, } EXPORT_SYMBOL_GPL(__reset_control_get); +static void reset_control_array_put(struct reset_control_array *resets) +{ + int i; + + mutex_lock(&reset_list_mutex); + for (i = 0; i < resets->num_rstcs; i++) + __reset_control_put_internal(resets->rstc[i]); + mutex_unlock(&reset_list_mutex); +} + /** * reset_control_put - free the reset controller * @rstc: reset controller @@ -413,6 +523,11 @@ void reset_control_put(struct reset_control *rstc) if (IS_ERR_OR_NULL(rstc)) return; + if (reset_control_is_array(rstc)) { + reset_control_array_put(rstc_to_array(rstc)); + return; + } + mutex_lock(&reset_list_mutex); __reset_control_put_internal(rstc); mutex_unlock(&reset_list_mutex); @@ -472,3 +587,116 @@ int device_reset(struct device *dev) return ret; } EXPORT_SYMBOL_GPL(device_reset); + +/** + * APIs to manage an array of reset controls. + */ +/** + * of_reset_control_get_count - Count number of resets available with a device + * + * @node: device node that contains 'resets'. + * + * Returns positive reset count on success, or error number on failure and + * on count being zero. + */ +static int of_reset_control_get_count(struct device_node *node) +{ + int count; + + if (!node) + return -EINVAL; + + count = of_count_phandle_with_args(node, "resets", "#reset-cells"); + if (count == 0) + count = -ENOENT; + + return count; +} + +/** + * of_reset_control_array_get - Get a list of reset controls using + * device node. + * + * @np: device node for the device that requests the reset controls array + * @shared: whether reset controls are shared or not + * @optional: whether it is optional to get the reset controls + * + * Returns pointer to allocated reset_control_array on success or + * error on failure + */ +struct reset_control * +of_reset_control_array_get(struct device_node *np, bool shared, bool optional) +{ + struct reset_control_array *resets; + struct reset_control *rstc; + int num, i; + + num = of_reset_control_get_count(np); + if (num < 0) + return optional ? NULL : ERR_PTR(num); + + resets = kzalloc(sizeof(*resets) + sizeof(resets->rstc[0]) * num, + GFP_KERNEL); + if (!resets) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < num; i++) { + rstc = __of_reset_control_get(np, NULL, i, shared, optional); + if (IS_ERR(rstc)) + goto err_rst; + resets->rstc[i] = rstc; + } + resets->num_rstcs = num; + resets->base.array = true; + + return &resets->base; + +err_rst: + mutex_lock(&reset_list_mutex); + while (--i >= 0) + __reset_control_put_internal(resets->rstc[i]); + mutex_unlock(&reset_list_mutex); + + kfree(resets); + + return rstc; +} +EXPORT_SYMBOL_GPL(of_reset_control_array_get); + +/** + * devm_reset_control_array_get - Resource managed reset control array get + * + * @dev: device that requests the list of reset controls + * @shared: whether reset controls are shared or not + * @optional: whether it is optional to get the reset controls + * + * The reset control array APIs are intended for a list of resets + * that just have to be asserted or deasserted, without any + * requirements on the order. + * + * Returns pointer to allocated reset_control_array on success or + * error on failure + */ +struct reset_control * +devm_reset_control_array_get(struct device *dev, bool shared, bool optional) +{ + struct reset_control **devres; + struct reset_control *rstc; + + devres = devres_alloc(devm_reset_control_release, sizeof(*devres), + GFP_KERNEL); + if (!devres) + return ERR_PTR(-ENOMEM); + + rstc = of_reset_control_array_get(dev->of_node, shared, optional); + if (IS_ERR(rstc)) { + devres_free(devres); + return rstc; + } + + *devres = rstc; + devres_add(dev, devres); + + return rstc; +} +EXPORT_SYMBOL_GPL(devm_reset_control_array_get); diff --git a/drivers/reset/reset-gemini.c b/drivers/reset/reset-gemini.c deleted file mode 100644 index a2478997c75b..000000000000 --- a/drivers/reset/reset-gemini.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Cortina Gemini Reset controller driver - * Copyright (C) 2017 Linus Walleij - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/** - * struct gemini_reset - gemini reset controller - * @map: regmap to access the containing system controller - * @rcdev: reset controller device - */ -struct gemini_reset { - struct regmap *map; - struct reset_controller_dev rcdev; -}; - -#define GEMINI_GLOBAL_SOFT_RESET 0x0c - -#define to_gemini_reset(p) \ - container_of((p), struct gemini_reset, rcdev) - -/* - * This is a self-deasserting reset controller. - */ -static int gemini_reset(struct reset_controller_dev *rcdev, - unsigned long id) -{ - struct gemini_reset *gr = to_gemini_reset(rcdev); - - /* Manual says to always set BIT 30 (CPU1) to 1 */ - return regmap_write(gr->map, - GEMINI_GLOBAL_SOFT_RESET, - BIT(GEMINI_RESET_CPU1) | BIT(id)); -} - -static int gemini_reset_status(struct reset_controller_dev *rcdev, - unsigned long id) -{ - struct gemini_reset *gr = to_gemini_reset(rcdev); - u32 val; - int ret; - - ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val); - if (ret) - return ret; - - return !!(val & BIT(id)); -} - -static const struct reset_control_ops gemini_reset_ops = { - .reset = gemini_reset, - .status = gemini_reset_status, -}; - -static int gemini_reset_probe(struct platform_device *pdev) -{ - struct gemini_reset *gr; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - int ret; - - gr = devm_kzalloc(dev, sizeof(*gr), GFP_KERNEL); - if (!gr) - return -ENOMEM; - - gr->map = syscon_node_to_regmap(np); - if (IS_ERR(gr->map)) { - ret = PTR_ERR(gr->map); - dev_err(dev, "unable to get regmap (%d)", ret); - return ret; - } - gr->rcdev.owner = THIS_MODULE; - gr->rcdev.nr_resets = 32; - gr->rcdev.ops = &gemini_reset_ops; - gr->rcdev.of_node = pdev->dev.of_node; - - ret = devm_reset_controller_register(&pdev->dev, &gr->rcdev); - if (ret) - return ret; - - dev_info(dev, "registered Gemini reset controller\n"); - return 0; -} - -static const struct of_device_id gemini_reset_dt_ids[] = { - { .compatible = "cortina,gemini-syscon", }, - { /* sentinel */ }, -}; - -static struct platform_driver gemini_reset_driver = { - .probe = gemini_reset_probe, - .driver = { - .name = "gemini-reset", - .of_match_table = gemini_reset_dt_ids, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(gemini_reset_driver); diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c new file mode 100644 index 000000000000..8bce391c6943 --- /dev/null +++ b/drivers/reset/reset-hsdk.c @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2017 Synopsys. + * + * Synopsys HSDK Development platform reset driver. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev) + +struct hsdk_rst { + void __iomem *regs_ctl; + void __iomem *regs_rst; + spinlock_t lock; + struct reset_controller_dev rcdev; +}; + +static const u32 rst_map[] = { + BIT(16), /* APB_RST */ + BIT(17), /* AXI_RST */ + BIT(18), /* ETH_RST */ + BIT(19), /* USB_RST */ + BIT(20), /* SDIO_RST */ + BIT(21), /* HDMI_RST */ + BIT(22), /* GFX_RST */ + BIT(25), /* DMAC_RST */ + BIT(31), /* EBI_RST */ +}; + +#define HSDK_MAX_RESETS ARRAY_SIZE(rst_map) + +#define CGU_SYS_RST_CTRL 0x0 +#define CGU_IP_SW_RESET 0x0 +#define CGU_IP_SW_RESET_DELAY_SHIFT 16 +#define CGU_IP_SW_RESET_DELAY_MASK GENMASK(31, CGU_IP_SW_RESET_DELAY_SHIFT) +#define CGU_IP_SW_RESET_DELAY 0 +#define CGU_IP_SW_RESET_RESET BIT(0) +#define SW_RESET_TIMEOUT 10000 + +static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id) +{ + writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); +} + +static int hsdk_reset_do(struct hsdk_rst *rst) +{ + u32 reg; + + reg = readl(rst->regs_rst + CGU_IP_SW_RESET); + reg &= ~CGU_IP_SW_RESET_DELAY_MASK; + reg |= CGU_IP_SW_RESET_DELAY << CGU_IP_SW_RESET_DELAY_SHIFT; + reg |= CGU_IP_SW_RESET_RESET; + writel(reg, rst->regs_rst + CGU_IP_SW_RESET); + + /* wait till reset bit is back to 0 */ + return readl_poll_timeout_atomic(rst->regs_rst + CGU_IP_SW_RESET, reg, + !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); +} + +static int hsdk_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct hsdk_rst *rst = to_hsdk_rst(rcdev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&rst->lock, flags); + hsdk_reset_config(rst, id); + ret = hsdk_reset_do(rst); + spin_unlock_irqrestore(&rst->lock, flags); + + return ret; +} + +static const struct reset_control_ops hsdk_reset_ops = { + .reset = hsdk_reset_reset, +}; + +static int hsdk_reset_probe(struct platform_device *pdev) +{ + struct hsdk_rst *rst; + struct resource *mem; + + rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); + if (!rst) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rst->regs_ctl = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rst->regs_ctl)) + return PTR_ERR(rst->regs_ctl); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rst->regs_rst)) + return PTR_ERR(rst->regs_rst); + + spin_lock_init(&rst->lock); + + rst->rcdev.owner = THIS_MODULE; + rst->rcdev.ops = &hsdk_reset_ops; + rst->rcdev.of_node = pdev->dev.of_node; + rst->rcdev.nr_resets = HSDK_MAX_RESETS; + rst->rcdev.of_reset_n_cells = 1; + + return reset_controller_register(&rst->rcdev); +} + +static const struct of_device_id hsdk_reset_dt_match[] = { + { .compatible = "snps,hsdk-reset" }, + { }, +}; + +static struct platform_driver hsdk_reset_driver = { + .probe = hsdk_reset_probe, + .driver = { + .name = "hsdk-reset", + .of_match_table = hsdk_reset_dt_match, + }, +}; +builtin_platform_driver(hsdk_reset_driver); + +MODULE_AUTHOR("Eugeniy Paltsev "); +MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/reset/reset-lantiq.c b/drivers/reset/reset-lantiq.c new file mode 100644 index 000000000000..11a582e50d30 --- /dev/null +++ b/drivers/reset/reset-lantiq.c @@ -0,0 +1,212 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define LANTIQ_RCU_RESET_TIMEOUT 10000 + +struct lantiq_rcu_reset_priv { + struct reset_controller_dev rcdev; + struct device *dev; + struct regmap *regmap; + u32 reset_offset; + u32 status_offset; +}; + +static struct lantiq_rcu_reset_priv *to_lantiq_rcu_reset_priv( + struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct lantiq_rcu_reset_priv, rcdev); +} + +static int lantiq_rcu_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int status = (id >> 8) & 0x1f; + u32 val; + int ret; + + ret = regmap_read(priv->regmap, priv->status_offset, &val); + if (ret) + return ret; + + return !!(val & BIT(status)); +} + +static int lantiq_rcu_reset_status_timeout(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + int ret; + int retry = LANTIQ_RCU_RESET_TIMEOUT; + + do { + ret = lantiq_rcu_reset_status(rcdev, id); + if (ret < 0) + return ret; + if (ret == assert) + return 0; + usleep_range(20, 40); + } while (--retry); + + return -ETIMEDOUT; +} + +static int lantiq_rcu_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int set = id & 0x1f; + u32 val = assert ? BIT(set) : 0; + int ret; + + ret = regmap_update_bits(priv->regmap, priv->reset_offset, BIT(set), + val); + if (ret) { + dev_err(priv->dev, "Failed to set reset bit %u\n", set); + return ret; + } + + + ret = lantiq_rcu_reset_status_timeout(rcdev, id, assert); + if (ret) + dev_err(priv->dev, "Failed to %s bit %u\n", + assert ? "assert" : "deassert", set); + + return ret; +} + +static int lantiq_rcu_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, true); +} + +static int lantiq_rcu_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, false); +} + +static int lantiq_rcu_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = lantiq_rcu_reset_assert(rcdev, id); + if (ret) + return ret; + + return lantiq_rcu_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops lantiq_rcu_reset_ops = { + .assert = lantiq_rcu_reset_assert, + .deassert = lantiq_rcu_reset_deassert, + .status = lantiq_rcu_reset_status, + .reset = lantiq_rcu_reset_reset, +}; + +static int lantiq_rcu_reset_of_parse(struct platform_device *pdev, + struct lantiq_rcu_reset_priv *priv) +{ + struct device *dev = &pdev->dev; + const __be32 *offset; + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(&pdev->dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU reset offset\n"); + return -ENOENT; + } + priv->reset_offset = __be32_to_cpu(*offset); + + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU status offset\n"); + return -ENOENT; + } + priv->status_offset = __be32_to_cpu(*offset); + + return 0; +} + +static int lantiq_rcu_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + unsigned int status, set; + + set = reset_spec->args[0]; + status = reset_spec->args[1]; + + if (set >= rcdev->nr_resets || status >= rcdev->nr_resets) + return -EINVAL; + + return (status << 8) | set; +} + +static int lantiq_rcu_reset_probe(struct platform_device *pdev) +{ + struct lantiq_rcu_reset_priv *priv; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + err = lantiq_rcu_reset_of_parse(pdev, priv); + if (err) + return err; + + priv->rcdev.ops = &lantiq_rcu_reset_ops; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.of_node = pdev->dev.of_node; + priv->rcdev.nr_resets = 32; + priv->rcdev.of_xlate = lantiq_rcu_reset_xlate; + priv->rcdev.of_reset_n_cells = 2; + + return reset_controller_register(&priv->rcdev); +} + +static const struct of_device_id lantiq_rcu_reset_dt_ids[] = { + { .compatible = "lantiq,danube-reset", }, + { .compatible = "lantiq,xrx200-reset", }, + { }, +}; +MODULE_DEVICE_TABLE(of, lantiq_rcu_reset_dt_ids); + +static struct platform_driver lantiq_rcu_reset_driver = { + .probe = lantiq_rcu_reset_probe, + .driver = { + .name = "lantiq-reset", + .of_match_table = lantiq_rcu_reset_dt_ids, + }, +}; +module_platform_driver(lantiq_rcu_reset_driver); + +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY RCU Reset Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index cd585cd2f04d..2c7dd1fd08df 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -107,7 +107,7 @@ static int sunxi_reset_init(struct device_node *np) spin_lock_init(&data->lock); data->rcdev.owner = THIS_MODULE; - data->rcdev.nr_resets = size * 32; + data->rcdev.nr_resets = size * 8; data->rcdev.ops = &sunxi_reset_ops; data->rcdev.of_node = np; @@ -162,7 +162,7 @@ static int sunxi_reset_probe(struct platform_device *pdev) spin_lock_init(&data->lock); data->rcdev.owner = THIS_MODULE; - data->rcdev.nr_resets = resource_size(res) * 32; + data->rcdev.nr_resets = resource_size(res) * 8; data->rcdev.ops = &sunxi_reset_ops; data->rcdev.of_node = pdev->dev.of_node; diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index c4ba89832796..bda2dd196ae5 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -50,59 +50,35 @@ struct uniphier_reset_data { } /* System reset data */ -#define UNIPHIER_SLD3_SYS_RESET_NAND(id) \ - UNIPHIER_RESETX((id), 0x2004, 2) - -#define UNIPHIER_LD11_SYS_RESET_NAND(id) \ - UNIPHIER_RESETX((id), 0x200c, 0) - -#define UNIPHIER_LD11_SYS_RESET_EMMC(id) \ - UNIPHIER_RESETX((id), 0x200c, 2) - -#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \ - UNIPHIER_RESETX((id), 0x2000, 10) - -#define UNIPHIER_LD11_SYS_RESET_STDMAC(id) \ - UNIPHIER_RESETX((id), 0x200c, 8) - -#define UNIPHIER_PRO4_SYS_RESET_GIO(id) \ - UNIPHIER_RESETX((id), 0x2000, 6) - -#define UNIPHIER_LD20_SYS_RESET_GIO(id) \ - UNIPHIER_RESETX((id), 0x200c, 5) - -#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \ - UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17) - -static const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = { - UNIPHIER_SLD3_SYS_RESET_NAND(2), - UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */ +static const struct uniphier_reset_data uniphier_ld4_sys_reset_data[] = { + UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ + UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (Ether, HSC, MIO) */ UNIPHIER_RESET_END, }; static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = { - UNIPHIER_SLD3_SYS_RESET_NAND(2), - UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */ - UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */ - UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), - UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ + UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */ + UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */ + UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ + UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ UNIPHIER_RESET_END, }; static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = { - UNIPHIER_SLD3_SYS_RESET_NAND(2), - UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */ - UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */ - UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), - UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ + UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC) */ + UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (PCIe, USB3) */ + UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ + UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ UNIPHIER_RESET_END, }; static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { - UNIPHIER_SLD3_SYS_RESET_NAND(2), - UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */ - UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), - UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ + UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */ + UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ + UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */ UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */ UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */ @@ -114,21 +90,27 @@ static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { }; static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = { - UNIPHIER_LD11_SYS_RESET_NAND(2), - UNIPHIER_LD11_SYS_RESET_EMMC(4), - UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */ + UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ + UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ + UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */ + UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ + UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ + UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */ UNIPHIER_RESET_END, }; static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { - UNIPHIER_LD11_SYS_RESET_NAND(2), - UNIPHIER_LD11_SYS_RESET_EMMC(4), - UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */ - UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */ + UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ + UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ + UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ + UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */ UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */ + UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ + UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ + UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */ UNIPHIER_RESET_END, }; @@ -151,7 +133,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { #define UNIPHIER_MIO_RESET_DMAC(id) \ UNIPHIER_RESETX((id), 0x110, 17) -static const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = { +static const struct uniphier_reset_data uniphier_ld4_mio_reset_data[] = { UNIPHIER_MIO_RESET_SD(0, 0), UNIPHIER_MIO_RESET_SD(1, 1), UNIPHIER_MIO_RESET_SD(2, 2), @@ -163,11 +145,9 @@ static const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = { UNIPHIER_MIO_RESET_USB2(8, 0), UNIPHIER_MIO_RESET_USB2(9, 1), UNIPHIER_MIO_RESET_USB2(10, 2), - UNIPHIER_MIO_RESET_USB2(11, 3), UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0), UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1), UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2), - UNIPHIER_MIO_RESET_USB2_BRIDGE(15, 3), UNIPHIER_RESET_END, }; @@ -216,6 +196,12 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = { UNIPHIER_RESET_END, }; +/* Analog signal amplifiers reset data */ +static const struct uniphier_reset_data uniphier_ld11_adamv_reset_data[] = { + UNIPHIER_RESETX(0, 0x10, 6), /* EVEA */ + UNIPHIER_RESET_END, +}; + /* core implementaton */ struct uniphier_reset_priv { struct reset_controller_dev rcdev; @@ -345,13 +331,9 @@ static int uniphier_reset_probe(struct platform_device *pdev) static const struct of_device_id uniphier_reset_match[] = { /* System reset */ - { - .compatible = "socionext,uniphier-sld3-reset", - .data = uniphier_sld3_sys_reset_data, - }, { .compatible = "socionext,uniphier-ld4-reset", - .data = uniphier_sld3_sys_reset_data, + .data = uniphier_ld4_sys_reset_data, }, { .compatible = "socionext,uniphier-pro4-reset", @@ -359,7 +341,7 @@ static const struct of_device_id uniphier_reset_match[] = { }, { .compatible = "socionext,uniphier-sld8-reset", - .data = uniphier_sld3_sys_reset_data, + .data = uniphier_ld4_sys_reset_data, }, { .compatible = "socionext,uniphier-pro5-reset", @@ -378,21 +360,17 @@ static const struct of_device_id uniphier_reset_match[] = { .data = uniphier_ld20_sys_reset_data, }, /* Media I/O reset, SD reset */ - { - .compatible = "socionext,uniphier-sld3-mio-reset", - .data = uniphier_sld3_mio_reset_data, - }, { .compatible = "socionext,uniphier-ld4-mio-reset", - .data = uniphier_sld3_mio_reset_data, + .data = uniphier_ld4_mio_reset_data, }, { .compatible = "socionext,uniphier-pro4-mio-reset", - .data = uniphier_sld3_mio_reset_data, + .data = uniphier_ld4_mio_reset_data, }, { .compatible = "socionext,uniphier-sld8-mio-reset", - .data = uniphier_sld3_mio_reset_data, + .data = uniphier_ld4_mio_reset_data, }, { .compatible = "socionext,uniphier-pro5-sd-reset", @@ -404,7 +382,7 @@ static const struct of_device_id uniphier_reset_match[] = { }, { .compatible = "socionext,uniphier-ld11-mio-reset", - .data = uniphier_sld3_mio_reset_data, + .data = uniphier_ld4_mio_reset_data, }, { .compatible = "socionext,uniphier-ld11-sd-reset", @@ -443,6 +421,15 @@ static const struct of_device_id uniphier_reset_match[] = { .compatible = "socionext,uniphier-ld20-peri-reset", .data = uniphier_pro4_peri_reset_data, }, + /* Analog signal amplifiers reset */ + { + .compatible = "socionext,uniphier-ld11-adamv-reset", + .data = uniphier_ld11_adamv_reset_data, + }, + { + .compatible = "socionext,uniphier-ld20-adamv-reset", + .data = uniphier_ld11_adamv_reset_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_reset_match); diff --git a/drivers/reset/reset-zx2967.c b/drivers/reset/reset-zx2967.c index 4dabb9ec4841..4f319f7753d4 100644 --- a/drivers/reset/reset-zx2967.c +++ b/drivers/reset/reset-zx2967.c @@ -55,7 +55,7 @@ static int zx2967_reset_deassert(struct reset_controller_dev *rcdev, return zx2967_reset_act(rcdev, id, false); } -static struct reset_control_ops zx2967_reset_ops = { +static const struct reset_control_ops zx2967_reset_ops = { .assert = zx2967_reset_assert, .deassert = zx2967_reset_deassert, }; diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index 1323a245763b..0fe6eac46512 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -13,9 +13,13 @@ config RPMSG_CHAR in /dev. They make it possible for user-space programs to send and receive rpmsg packets. +config RPMSG_QCOM_GLINK_NATIVE + tristate + select RPMSG + config RPMSG_QCOM_GLINK_RPM tristate "Qualcomm RPM Glink driver" - select RPMSG + select RPMSG_QCOM_GLINK_NATIVE depends on HAS_IOMEM depends on MAILBOX help @@ -23,6 +27,16 @@ config RPMSG_QCOM_GLINK_RPM which serves as a channel for communication with the RPM in GLINK enabled systems. +config RPMSG_QCOM_GLINK_SMEM + tristate "Qualcomm SMEM Glink driver" + select RPMSG_QCOM_GLINK_NATIVE + depends on MAILBOX + depends on QCOM_SMEM + help + Say y here to enable support for the GLINK SMEM communication driver, + which provides support for using the GLINK communication protocol + over SMEM. + config RPMSG_QCOM_SMD tristate "Qualcomm Shared Memory Driver (SMD)" depends on QCOM_SMEM diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile index 28cc19088cc0..c71f4ab1ae17 100644 --- a/drivers/rpmsg/Makefile +++ b/drivers/rpmsg/Makefile @@ -1,5 +1,7 @@ obj-$(CONFIG_RPMSG) += rpmsg_core.o obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o +obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o +obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c new file mode 100644 index 000000000000..5dcc9bf1c5bc --- /dev/null +++ b/drivers/rpmsg/qcom_glink_native.c @@ -0,0 +1,1618 @@ +/* + * Copyright (c) 2016-2017, Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rpmsg_internal.h" +#include "qcom_glink_native.h" + +#define GLINK_NAME_SIZE 32 +#define GLINK_VERSION_1 1 + +#define RPM_GLINK_CID_MIN 1 +#define RPM_GLINK_CID_MAX 65536 + +struct glink_msg { + __le16 cmd; + __le16 param1; + __le32 param2; + u8 data[]; +} __packed; + +/** + * struct glink_defer_cmd - deferred incoming control message + * @node: list node + * @msg: message header + * data: payload of the message + * + * Copy of a received control message, to be added to @rx_queue and processed + * by @rx_work of @qcom_glink. + */ +struct glink_defer_cmd { + struct list_head node; + + struct glink_msg msg; + u8 data[]; +}; + +/** + * struct glink_core_rx_intent - RX intent + * RX intent + * + * data: pointer to the data (may be NULL for zero-copy) + * id: remote or local intent ID + * size: size of the original intent (do not modify) + * reuse: To mark if the intent can be reused after first use + * in_use: To mark if intent is already in use for the channel + * offset: next write offset (initially 0) + */ +struct glink_core_rx_intent { + void *data; + u32 id; + size_t size; + bool reuse; + bool in_use; + u32 offset; + + struct list_head node; +}; + +/** + * struct qcom_glink - driver context, relates to one remote subsystem + * @dev: reference to the associated struct device + * @mbox_client: mailbox client + * @mbox_chan: mailbox channel + * @rx_pipe: pipe object for receive FIFO + * @tx_pipe: pipe object for transmit FIFO + * @irq: IRQ for signaling incoming events + * @rx_work: worker for handling received control messages + * @rx_lock: protects the @rx_queue + * @rx_queue: queue of received control messages to be processed in @rx_work + * @tx_lock: synchronizes operations on the tx fifo + * @idr_lock: synchronizes @lcids and @rcids modifications + * @lcids: idr of all channels with a known local channel id + * @rcids: idr of all channels with a known remote channel id + */ +struct qcom_glink { + struct device *dev; + + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + + struct qcom_glink_pipe *rx_pipe; + struct qcom_glink_pipe *tx_pipe; + + int irq; + + struct work_struct rx_work; + spinlock_t rx_lock; + struct list_head rx_queue; + + struct mutex tx_lock; + + spinlock_t idr_lock; + struct idr lcids; + struct idr rcids; + unsigned long features; + + bool intentless; +}; + +enum { + GLINK_STATE_CLOSED, + GLINK_STATE_OPENING, + GLINK_STATE_OPEN, + GLINK_STATE_CLOSING, +}; + +/** + * struct glink_channel - internal representation of a channel + * @rpdev: rpdev reference, only used for primary endpoints + * @ept: rpmsg endpoint this channel is associated with + * @glink: qcom_glink context handle + * @refcount: refcount for the channel object + * @recv_lock: guard for @ept.cb + * @name: unique channel name/identifier + * @lcid: channel id, in local space + * @rcid: channel id, in remote space + * @intent_lock: lock for protection of @liids, @riids + * @liids: idr of all local intents + * @riids: idr of all remote intents + * @intent_work: worker responsible for transmitting rx_done packets + * @done_intents: list of intents that needs to be announced rx_done + * @buf: receive buffer, for gathering fragments + * @buf_offset: write offset in @buf + * @buf_size: size of current @buf + * @open_ack: completed once remote has acked the open-request + * @open_req: completed once open-request has been received + * @intent_req_lock: Synchronises multiple intent requests + * @intent_req_result: Result of intent request + * @intent_req_comp: Completion for intent_req signalling + */ +struct glink_channel { + struct rpmsg_endpoint ept; + + struct rpmsg_device *rpdev; + struct qcom_glink *glink; + + struct kref refcount; + + spinlock_t recv_lock; + + char *name; + unsigned int lcid; + unsigned int rcid; + + spinlock_t intent_lock; + struct idr liids; + struct idr riids; + struct work_struct intent_work; + struct list_head done_intents; + + struct glink_core_rx_intent *buf; + int buf_offset; + int buf_size; + + struct completion open_ack; + struct completion open_req; + + struct mutex intent_req_lock; + bool intent_req_result; + struct completion intent_req_comp; +}; + +#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) + +static const struct rpmsg_endpoint_ops glink_endpoint_ops; + +#define RPM_CMD_VERSION 0 +#define RPM_CMD_VERSION_ACK 1 +#define RPM_CMD_OPEN 2 +#define RPM_CMD_CLOSE 3 +#define RPM_CMD_OPEN_ACK 4 +#define RPM_CMD_INTENT 5 +#define RPM_CMD_RX_DONE 6 +#define RPM_CMD_RX_INTENT_REQ 7 +#define RPM_CMD_RX_INTENT_REQ_ACK 8 +#define RPM_CMD_TX_DATA 9 +#define RPM_CMD_CLOSE_ACK 11 +#define RPM_CMD_TX_DATA_CONT 12 +#define RPM_CMD_READ_NOTIF 13 +#define RPM_CMD_RX_DONE_W_REUSE 14 + +#define GLINK_FEATURE_INTENTLESS BIT(1) + +static void qcom_glink_rx_done_work(struct work_struct *work); + +static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, + const char *name) +{ + struct glink_channel *channel; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return ERR_PTR(-ENOMEM); + + /* Setup glink internal glink_channel data */ + spin_lock_init(&channel->recv_lock); + spin_lock_init(&channel->intent_lock); + + channel->glink = glink; + channel->name = kstrdup(name, GFP_KERNEL); + + init_completion(&channel->open_req); + init_completion(&channel->open_ack); + + INIT_LIST_HEAD(&channel->done_intents); + INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); + + idr_init(&channel->liids); + idr_init(&channel->riids); + kref_init(&channel->refcount); + + return channel; +} + +static void qcom_glink_channel_release(struct kref *ref) +{ + struct glink_channel *channel = container_of(ref, struct glink_channel, + refcount); + unsigned long flags; + + spin_lock_irqsave(&channel->intent_lock, flags); + idr_destroy(&channel->liids); + idr_destroy(&channel->riids); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + kfree(channel->name); + kfree(channel); +} + +static size_t qcom_glink_rx_avail(struct qcom_glink *glink) +{ + return glink->rx_pipe->avail(glink->rx_pipe); +} + +static void qcom_glink_rx_peak(struct qcom_glink *glink, + void *data, unsigned int offset, size_t count) +{ + glink->rx_pipe->peak(glink->rx_pipe, data, offset, count); +} + +static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count) +{ + glink->rx_pipe->advance(glink->rx_pipe, count); +} + +static size_t qcom_glink_tx_avail(struct qcom_glink *glink) +{ + return glink->tx_pipe->avail(glink->tx_pipe); +} + +static void qcom_glink_tx_write(struct qcom_glink *glink, + const void *hdr, size_t hlen, + const void *data, size_t dlen) +{ + glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen); +} + +static int qcom_glink_tx(struct qcom_glink *glink, + const void *hdr, size_t hlen, + const void *data, size_t dlen, bool wait) +{ + unsigned int tlen = hlen + dlen; + int ret; + + /* Reject packets that are too big */ + if (tlen >= glink->tx_pipe->length) + return -EINVAL; + + ret = mutex_lock_interruptible(&glink->tx_lock); + if (ret) + return ret; + + while (qcom_glink_tx_avail(glink) < tlen) { + if (!wait) { + ret = -EAGAIN; + goto out; + } + + usleep_range(10000, 15000); + } + + qcom_glink_tx_write(glink, hdr, hlen, data, dlen); + + mbox_send_message(glink->mbox_chan, NULL); + mbox_client_txdone(glink->mbox_chan, 0); + +out: + mutex_unlock(&glink->tx_lock); + + return ret; +} + +static int qcom_glink_send_version(struct qcom_glink *glink) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(RPM_CMD_VERSION); + msg.param1 = cpu_to_le16(GLINK_VERSION_1); + msg.param2 = cpu_to_le32(glink->features); + + return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static void qcom_glink_send_version_ack(struct qcom_glink *glink) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK); + msg.param1 = cpu_to_le16(GLINK_VERSION_1); + msg.param2 = cpu_to_le32(glink->features); + + qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static void qcom_glink_send_open_ack(struct qcom_glink *glink, + struct glink_channel *channel) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK); + msg.param1 = cpu_to_le16(channel->rcid); + msg.param2 = cpu_to_le32(0); + + qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); +} + +static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, + unsigned int cid, bool granted) +{ + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(glink->dev, "unable to find channel\n"); + return; + } + + channel->intent_req_result = granted; + complete(&channel->intent_req_comp); +} + +/** + * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote + * @glink: Ptr to the glink edge + * @channel: Ptr to the channel that the open req is sent + * + * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote. + * Will return with refcount held, regardless of outcome. + * + * Returns 0 on success, negative errno otherwise. + */ +static int qcom_glink_send_open_req(struct qcom_glink *glink, + struct glink_channel *channel) +{ + struct { + struct glink_msg msg; + u8 name[GLINK_NAME_SIZE]; + } __packed req; + int name_len = strlen(channel->name) + 1; + int req_len = ALIGN(sizeof(req.msg) + name_len, 8); + int ret; + unsigned long flags; + + kref_get(&channel->refcount); + + spin_lock_irqsave(&glink->idr_lock, flags); + ret = idr_alloc_cyclic(&glink->lcids, channel, + RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, + GFP_ATOMIC); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (ret < 0) + return ret; + + channel->lcid = ret; + + req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN); + req.msg.param1 = cpu_to_le16(channel->lcid); + req.msg.param2 = cpu_to_le32(name_len); + strcpy(req.name, channel->name); + + ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true); + if (ret) + goto remove_idr; + + return 0; + +remove_idr: + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->lcids, channel->lcid); + channel->lcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + return ret; +} + +static void qcom_glink_send_close_req(struct qcom_glink *glink, + struct glink_channel *channel) +{ + struct glink_msg req; + + req.cmd = cpu_to_le16(RPM_CMD_CLOSE); + req.param1 = cpu_to_le16(channel->lcid); + req.param2 = 0; + + qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); +} + +static void qcom_glink_send_close_ack(struct qcom_glink *glink, + unsigned int rcid) +{ + struct glink_msg req; + + req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK); + req.param1 = cpu_to_le16(rcid); + req.param2 = 0; + + qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); +} + +static void qcom_glink_rx_done_work(struct work_struct *work) +{ + struct glink_channel *channel = container_of(work, struct glink_channel, + intent_work); + struct qcom_glink *glink = channel->glink; + struct glink_core_rx_intent *intent, *tmp; + struct { + u16 id; + u16 lcid; + u32 liid; + } __packed cmd; + + unsigned int cid = channel->lcid; + unsigned int iid; + bool reuse; + unsigned long flags; + + spin_lock_irqsave(&channel->intent_lock, flags); + list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { + list_del(&intent->node); + spin_unlock_irqrestore(&channel->intent_lock, flags); + iid = intent->id; + reuse = intent->reuse; + + cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; + cmd.lcid = cid; + cmd.liid = iid; + + qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); + if (!reuse) { + kfree(intent->data); + kfree(intent); + } + spin_lock_irqsave(&channel->intent_lock, flags); + } + spin_unlock_irqrestore(&channel->intent_lock, flags); +} + +static void qcom_glink_rx_done(struct qcom_glink *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent) +{ + /* We don't send RX_DONE to intentless systems */ + if (glink->intentless) { + kfree(intent->data); + kfree(intent); + return; + } + + /* Take it off the tree of receive intents */ + if (!intent->reuse) { + spin_lock(&channel->intent_lock); + idr_remove(&channel->liids, intent->id); + spin_unlock(&channel->intent_lock); + } + + /* Schedule the sending of a rx_done indication */ + spin_lock(&channel->intent_lock); + list_add_tail(&intent->node, &channel->done_intents); + spin_unlock(&channel->intent_lock); + + schedule_work(&channel->intent_work); +} + +/** + * qcom_glink_receive_version() - receive version/features from remote system + * + * @glink: pointer to transport interface + * @r_version: remote version + * @r_features: remote features + * + * This function is called in response to a remote-initiated version/feature + * negotiation sequence. + */ +static void qcom_glink_receive_version(struct qcom_glink *glink, + u32 version, + u32 features) +{ + switch (version) { + case 0: + break; + case GLINK_VERSION_1: + glink->features &= features; + /* FALLTHROUGH */ + default: + qcom_glink_send_version_ack(glink); + break; + } +} + +/** + * qcom_glink_receive_version_ack() - receive negotiation ack from remote system + * + * @glink: pointer to transport interface + * @r_version: remote version response + * @r_features: remote features response + * + * This function is called in response to a local-initiated version/feature + * negotiation sequence and is the counter-offer from the remote side based + * upon the initial version and feature set requested. + */ +static void qcom_glink_receive_version_ack(struct qcom_glink *glink, + u32 version, + u32 features) +{ + switch (version) { + case 0: + /* Version negotiation failed */ + break; + case GLINK_VERSION_1: + if (features == glink->features) + break; + + glink->features &= features; + /* FALLTHROUGH */ + default: + qcom_glink_send_version(glink); + break; + } +} + +/** + * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to + wire format and transmit + * @glink: The transport to transmit on. + * @channel: The glink channel + * @granted: The request response to encode. + * + * Return: 0 on success or standard Linux error code. + */ +static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, + struct glink_channel *channel, + bool granted) +{ + struct glink_msg msg; + + msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK); + msg.param1 = cpu_to_le16(channel->lcid); + msg.param2 = cpu_to_le32(granted); + + qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); + + return 0; +} + +/** + * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and + * transmit + * @glink: The transport to transmit on. + * @channel: The local channel + * @size: The intent to pass on to remote. + * + * Return: 0 on success or standard Linux error code. + */ +static int qcom_glink_advertise_intent(struct qcom_glink *glink, + struct glink_channel *channel, + struct glink_core_rx_intent *intent) +{ + struct command { + u16 id; + u16 lcid; + u32 count; + u32 size; + u32 liid; + } __packed; + struct command cmd; + + cmd.id = cpu_to_le16(RPM_CMD_INTENT); + cmd.lcid = cpu_to_le16(channel->lcid); + cmd.count = cpu_to_le32(1); + cmd.size = cpu_to_le32(intent->size); + cmd.liid = cpu_to_le32(intent->id); + + qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); + + return 0; +} + +static struct glink_core_rx_intent * +qcom_glink_alloc_intent(struct qcom_glink *glink, + struct glink_channel *channel, + size_t size, + bool reuseable) +{ + struct glink_core_rx_intent *intent; + int ret; + unsigned long flags; + + intent = kzalloc(sizeof(*intent), GFP_KERNEL); + if (!intent) + return NULL; + + intent->data = kzalloc(size, GFP_KERNEL); + if (!intent->data) + goto free_intent; + + spin_lock_irqsave(&channel->intent_lock, flags); + ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); + if (ret < 0) { + spin_unlock_irqrestore(&channel->intent_lock, flags); + goto free_data; + } + spin_unlock_irqrestore(&channel->intent_lock, flags); + + intent->id = ret; + intent->size = size; + intent->reuse = reuseable; + + return intent; + +free_data: + kfree(intent->data); +free_intent: + kfree(intent); + return NULL; +} + +static void qcom_glink_handle_rx_done(struct qcom_glink *glink, + u32 cid, uint32_t iid, + bool reuse) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(glink->dev, "invalid channel id received\n"); + return; + } + + spin_lock_irqsave(&channel->intent_lock, flags); + intent = idr_find(&channel->riids, iid); + + if (!intent) { + spin_unlock_irqrestore(&channel->intent_lock, flags); + dev_err(glink->dev, "invalid intent id received\n"); + return; + } + + intent->in_use = false; + + if (!reuse) { + idr_remove(&channel->riids, intent->id); + kfree(intent); + } + spin_unlock_irqrestore(&channel->intent_lock, flags); +} + +/** + * qcom_glink_handle_intent_req() - Receive a request for rx_intent + * from remote side + * if_ptr: Pointer to the transport interface + * rcid: Remote channel ID + * size: size of the intent + * + * The function searches for the local channel to which the request for + * rx_intent has arrived and allocates and notifies the remote back + */ +static void qcom_glink_handle_intent_req(struct qcom_glink *glink, + u32 cid, size_t size) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + pr_err("%s channel not found for cid %d\n", __func__, cid); + return; + } + + intent = qcom_glink_alloc_intent(glink, channel, size, false); + if (intent) + qcom_glink_advertise_intent(glink, channel, intent); + + qcom_glink_send_intent_req_ack(glink, channel, !!intent); +} + +static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra) +{ + struct glink_defer_cmd *dcmd; + + extra = ALIGN(extra, 8); + + if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) { + dev_dbg(glink->dev, "Insufficient data in rx fifo"); + return -ENXIO; + } + + dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC); + if (!dcmd) + return -ENOMEM; + + INIT_LIST_HEAD(&dcmd->node); + + qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra); + + spin_lock(&glink->rx_lock); + list_add_tail(&dcmd->node, &glink->rx_queue); + spin_unlock(&glink->rx_lock); + + schedule_work(&glink->rx_work); + qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra); + + return 0; +} + +static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + struct { + struct glink_msg msg; + __le32 chunk_size; + __le32 left_size; + } __packed hdr; + unsigned int chunk_size; + unsigned int left_size; + unsigned int rcid; + unsigned int liid; + int ret = 0; + unsigned long flags; + + if (avail < sizeof(hdr)) { + dev_dbg(glink->dev, "Not enough data in fifo\n"); + return -EAGAIN; + } + + qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr)); + chunk_size = le32_to_cpu(hdr.chunk_size); + left_size = le32_to_cpu(hdr.left_size); + + if (avail < sizeof(hdr) + chunk_size) { + dev_dbg(glink->dev, "Payload not yet in fifo\n"); + return -EAGAIN; + } + + if (WARN(chunk_size % 4, "Incoming data must be word aligned\n")) + return -EINVAL; + + rcid = le16_to_cpu(hdr.msg.param1); + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_dbg(glink->dev, "Data on non-existing channel\n"); + + /* Drop the message */ + goto advance_rx; + } + + if (glink->intentless) { + /* Might have an ongoing, fragmented, message to append */ + if (!channel->buf) { + intent = kzalloc(sizeof(*intent), GFP_ATOMIC); + if (!intent) + return -ENOMEM; + + intent->data = kmalloc(chunk_size + left_size, + GFP_ATOMIC); + if (!intent->data) { + kfree(intent); + return -ENOMEM; + } + + intent->id = 0xbabababa; + intent->size = chunk_size + left_size; + intent->offset = 0; + + channel->buf = intent; + } else { + intent = channel->buf; + } + } else { + liid = le32_to_cpu(hdr.msg.param2); + + spin_lock_irqsave(&channel->intent_lock, flags); + intent = idr_find(&channel->liids, liid); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + if (!intent) { + dev_err(glink->dev, + "no intent found for channel %s intent %d", + channel->name, liid); + goto advance_rx; + } + } + + if (intent->size - intent->offset < chunk_size) { + dev_err(glink->dev, "Insufficient space in intent\n"); + + /* The packet header lied, drop payload */ + goto advance_rx; + } + + qcom_glink_rx_peak(glink, intent->data + intent->offset, + sizeof(hdr), chunk_size); + intent->offset += chunk_size; + + /* Handle message when no fragments remain to be received */ + if (!left_size) { + spin_lock(&channel->recv_lock); + if (channel->ept.cb) { + channel->ept.cb(channel->ept.rpdev, + intent->data, + intent->offset, + channel->ept.priv, + RPMSG_ADDR_ANY); + } + spin_unlock(&channel->recv_lock); + + intent->offset = 0; + channel->buf = NULL; + + qcom_glink_rx_done(glink, channel, intent); + } + +advance_rx: + qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8)); + + return ret; +} + +static void qcom_glink_handle_intent(struct qcom_glink *glink, + unsigned int cid, + unsigned int count, + size_t avail) +{ + struct glink_core_rx_intent *intent; + struct glink_channel *channel; + struct intent_pair { + __le32 size; + __le32 iid; + }; + + struct { + struct glink_msg msg; + struct intent_pair intents[]; + } __packed * msg; + + const size_t msglen = sizeof(*msg) + sizeof(struct intent_pair) * count; + int ret; + int i; + unsigned long flags; + + if (avail < msglen) { + dev_dbg(glink->dev, "Not enough data in fifo\n"); + return; + } + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, cid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (!channel) { + dev_err(glink->dev, "intents for non-existing channel\n"); + return; + } + + msg = kmalloc(msglen, GFP_ATOMIC); + if (!msg) + return; + + qcom_glink_rx_peak(glink, msg, 0, msglen); + + for (i = 0; i < count; ++i) { + intent = kzalloc(sizeof(*intent), GFP_ATOMIC); + if (!intent) + break; + + intent->id = le32_to_cpu(msg->intents[i].iid); + intent->size = le32_to_cpu(msg->intents[i].size); + + spin_lock_irqsave(&channel->intent_lock, flags); + ret = idr_alloc(&channel->riids, intent, + intent->id, intent->id + 1, GFP_ATOMIC); + spin_unlock_irqrestore(&channel->intent_lock, flags); + + if (ret < 0) + dev_err(glink->dev, "failed to store remote intent\n"); + } + + kfree(msg); + qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); +} + +static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) +{ + struct glink_channel *channel; + + spin_lock(&glink->idr_lock); + channel = idr_find(&glink->lcids, lcid); + spin_unlock(&glink->idr_lock); + if (!channel) { + dev_err(glink->dev, "Invalid open ack packet\n"); + return -EINVAL; + } + + complete(&channel->open_ack); + + return 0; +} + +static irqreturn_t qcom_glink_native_intr(int irq, void *data) +{ + struct qcom_glink *glink = data; + struct glink_msg msg; + unsigned int param1; + unsigned int param2; + unsigned int avail; + unsigned int cmd; + int ret = 0; + + for (;;) { + avail = qcom_glink_rx_avail(glink); + if (avail < sizeof(msg)) + break; + + qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg)); + + cmd = le16_to_cpu(msg.cmd); + param1 = le16_to_cpu(msg.param1); + param2 = le32_to_cpu(msg.param2); + + switch (cmd) { + case RPM_CMD_VERSION: + case RPM_CMD_VERSION_ACK: + case RPM_CMD_CLOSE: + case RPM_CMD_CLOSE_ACK: + case RPM_CMD_RX_INTENT_REQ: + ret = qcom_glink_rx_defer(glink, 0); + break; + case RPM_CMD_OPEN_ACK: + ret = qcom_glink_rx_open_ack(glink, param1); + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + break; + case RPM_CMD_OPEN: + ret = qcom_glink_rx_defer(glink, param2); + break; + case RPM_CMD_TX_DATA: + case RPM_CMD_TX_DATA_CONT: + ret = qcom_glink_rx_data(glink, avail); + break; + case RPM_CMD_READ_NOTIF: + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + + mbox_send_message(glink->mbox_chan, NULL); + mbox_client_txdone(glink->mbox_chan, 0); + break; + case RPM_CMD_INTENT: + qcom_glink_handle_intent(glink, param1, param2, avail); + break; + case RPM_CMD_RX_DONE: + qcom_glink_handle_rx_done(glink, param1, param2, false); + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + break; + case RPM_CMD_RX_DONE_W_REUSE: + qcom_glink_handle_rx_done(glink, param1, param2, true); + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + break; + case RPM_CMD_RX_INTENT_REQ_ACK: + qcom_glink_handle_intent_req_ack(glink, param1, param2); + qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); + break; + default: + dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); + ret = -EINVAL; + break; + } + + if (ret) + break; + } + + return IRQ_HANDLED; +} + +/* Locally initiated rpmsg_create_ept */ +static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink, + const char *name) +{ + struct glink_channel *channel; + int ret; + unsigned long flags; + + channel = qcom_glink_alloc_channel(glink, name); + if (IS_ERR(channel)) + return ERR_CAST(channel); + + ret = qcom_glink_send_open_req(glink, channel); + if (ret) + goto release_channel; + + ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); + if (!ret) + goto err_timeout; + + ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ); + if (!ret) + goto err_timeout; + + qcom_glink_send_open_ack(glink, channel); + + return channel; + +err_timeout: + /* qcom_glink_send_open_req() did register the channel in lcids*/ + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->lcids, channel->lcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + +release_channel: + /* Release qcom_glink_send_open_req() reference */ + kref_put(&channel->refcount, qcom_glink_channel_release); + /* Release qcom_glink_alloc_channel() reference */ + kref_put(&channel->refcount, qcom_glink_channel_release); + + return ERR_PTR(-ETIMEDOUT); +} + +/* Remote initiated rpmsg_create_ept */ +static int qcom_glink_create_remote(struct qcom_glink *glink, + struct glink_channel *channel) +{ + int ret; + + qcom_glink_send_open_ack(glink, channel); + + ret = qcom_glink_send_open_req(glink, channel); + if (ret) + goto close_link; + + ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); + if (!ret) { + ret = -ETIMEDOUT; + goto close_link; + } + + return 0; + +close_link: + /* + * Send a close request to "undo" our open-ack. The close-ack will + * release the last reference. + */ + qcom_glink_send_close_req(glink, channel); + + /* Release qcom_glink_send_open_req() reference */ + kref_put(&channel->refcount, qcom_glink_channel_release); + + return ret; +} + +static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info + chinfo) +{ + struct glink_channel *parent = to_glink_channel(rpdev->ept); + struct glink_channel *channel; + struct qcom_glink *glink = parent->glink; + struct rpmsg_endpoint *ept; + const char *name = chinfo.name; + int cid; + int ret; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->rcids, channel, cid) { + if (!strcmp(channel->name, name)) + break; + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + channel = qcom_glink_create_local(glink, name); + if (IS_ERR(channel)) + return NULL; + } else { + ret = qcom_glink_create_remote(glink, channel); + if (ret) + return NULL; + } + + ept = &channel->ept; + ept->rpdev = rpdev; + ept->cb = cb; + ept->priv = priv; + ept->ops = &glink_endpoint_ops; + + return ept; +} + +static int qcom_glink_announce_create(struct rpmsg_device *rpdev) +{ + struct glink_channel *channel = to_glink_channel(rpdev->ept); + struct glink_core_rx_intent *intent; + struct qcom_glink *glink = channel->glink; + int num_intents = glink->intentless ? 0 : 5; + + /* Channel is now open, advertise base set of intents */ + while (num_intents--) { + intent = qcom_glink_alloc_intent(glink, channel, SZ_1K, true); + if (!intent) + break; + + qcom_glink_advertise_intent(glink, channel, intent); + } + + return 0; +} + +static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept) +{ + struct glink_channel *channel = to_glink_channel(ept); + struct qcom_glink *glink = channel->glink; + unsigned long flags; + + spin_lock_irqsave(&channel->recv_lock, flags); + channel->ept.cb = NULL; + spin_unlock_irqrestore(&channel->recv_lock, flags); + + /* Decouple the potential rpdev from the channel */ + channel->rpdev = NULL; + + qcom_glink_send_close_req(glink, channel); +} + +static int qcom_glink_request_intent(struct qcom_glink *glink, + struct glink_channel *channel, + size_t size) +{ + struct { + u16 id; + u16 cid; + u32 size; + } __packed cmd; + + int ret; + + mutex_lock(&channel->intent_req_lock); + + reinit_completion(&channel->intent_req_comp); + + cmd.id = RPM_CMD_RX_INTENT_REQ; + cmd.cid = channel->lcid; + cmd.size = size; + + ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); + if (!ret) { + dev_err(glink->dev, "intent request timed out\n"); + ret = -ETIMEDOUT; + } else { + ret = channel->intent_req_result ? 0 : -ECANCELED; + } + +unlock: + mutex_unlock(&channel->intent_req_lock); + return ret; +} + +static int __qcom_glink_send(struct glink_channel *channel, + void *data, int len, bool wait) +{ + struct qcom_glink *glink = channel->glink; + struct glink_core_rx_intent *intent = NULL; + struct glink_core_rx_intent *tmp; + int iid = 0; + struct { + struct glink_msg msg; + __le32 chunk_size; + __le32 left_size; + } __packed req; + int ret; + unsigned long flags; + + if (!glink->intentless) { + while (!intent) { + spin_lock_irqsave(&channel->intent_lock, flags); + idr_for_each_entry(&channel->riids, tmp, iid) { + if (tmp->size >= len && !tmp->in_use) { + tmp->in_use = true; + intent = tmp; + break; + } + } + spin_unlock_irqrestore(&channel->intent_lock, flags); + + /* We found an available intent */ + if (intent) + break; + + if (!wait) + return -EBUSY; + + ret = qcom_glink_request_intent(glink, channel, len); + if (ret < 0) + return ret; + } + + iid = intent->id; + } + + req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA); + req.msg.param1 = cpu_to_le16(channel->lcid); + req.msg.param2 = cpu_to_le32(iid); + req.chunk_size = cpu_to_le32(len); + req.left_size = cpu_to_le32(0); + + ret = qcom_glink_tx(glink, &req, sizeof(req), data, len, wait); + + /* Mark intent available if we failed */ + if (ret && intent) + intent->in_use = false; + + return ret; +} + +static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __qcom_glink_send(channel, data, len, true); +} + +static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __qcom_glink_send(channel, data, len, false); +} + +/* + * Finds the device_node for the glink child interested in this channel. + */ +static struct device_node *qcom_glink_match_channel(struct device_node *node, + const char *channel) +{ + struct device_node *child; + const char *name; + const char *key; + int ret; + + for_each_available_child_of_node(node, child) { + key = "qcom,glink-channels"; + ret = of_property_read_string(child, key, &name); + if (ret) + continue; + + if (strcmp(name, channel) == 0) + return child; + } + + return NULL; +} + +static const struct rpmsg_device_ops glink_device_ops = { + .create_ept = qcom_glink_create_ept, + .announce_create = qcom_glink_announce_create, +}; + +static const struct rpmsg_endpoint_ops glink_endpoint_ops = { + .destroy_ept = qcom_glink_destroy_ept, + .send = qcom_glink_send, + .trysend = qcom_glink_trysend, +}; + +static void qcom_glink_rpdev_release(struct device *dev) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct glink_channel *channel = to_glink_channel(rpdev->ept); + + channel->rpdev = NULL; + kfree(rpdev); +} + +static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, + char *name) +{ + struct glink_channel *channel; + struct rpmsg_device *rpdev; + bool create_device = false; + struct device_node *node; + int lcid; + int ret; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_for_each_entry(&glink->lcids, channel, lcid) { + if (!strcmp(channel->name, name)) + break; + } + spin_unlock_irqrestore(&glink->idr_lock, flags); + + if (!channel) { + channel = qcom_glink_alloc_channel(glink, name); + if (IS_ERR(channel)) + return PTR_ERR(channel); + + /* The opening dance was initiated by the remote */ + create_device = true; + } + + spin_lock_irqsave(&glink->idr_lock, flags); + ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC); + if (ret < 0) { + dev_err(glink->dev, "Unable to insert channel into rcid list\n"); + spin_unlock_irqrestore(&glink->idr_lock, flags); + goto free_channel; + } + channel->rcid = ret; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + complete(&channel->open_req); + + if (create_device) { + rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); + if (!rpdev) { + ret = -ENOMEM; + goto rcid_remove; + } + + rpdev->ept = &channel->ept; + strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE); + rpdev->src = RPMSG_ADDR_ANY; + rpdev->dst = RPMSG_ADDR_ANY; + rpdev->ops = &glink_device_ops; + + node = qcom_glink_match_channel(glink->dev->of_node, name); + rpdev->dev.of_node = node; + rpdev->dev.parent = glink->dev; + rpdev->dev.release = qcom_glink_rpdev_release; + + ret = rpmsg_register_device(rpdev); + if (ret) + goto free_rpdev; + + channel->rpdev = rpdev; + } + + return 0; + +free_rpdev: + kfree(rpdev); +rcid_remove: + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->rcids, channel->rcid); + channel->rcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); +free_channel: + /* Release the reference, iff we took it */ + if (create_device) + kref_put(&channel->refcount, qcom_glink_channel_release); + + return ret; +} + +static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) +{ + struct rpmsg_channel_info chinfo; + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->rcids, rcid); + spin_unlock_irqrestore(&glink->idr_lock, flags); + if (WARN(!channel, "close request on unknown channel\n")) + return; + + /* cancel pending rx_done work */ + cancel_work_sync(&channel->intent_work); + + if (channel->rpdev) { + strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + rpmsg_unregister_device(glink->dev, &chinfo); + } + + qcom_glink_send_close_ack(glink, channel->rcid); + + spin_lock_irqsave(&glink->idr_lock, flags); + idr_remove(&glink->rcids, channel->rcid); + channel->rcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + kref_put(&channel->refcount, qcom_glink_channel_release); +} + +static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid) +{ + struct glink_channel *channel; + unsigned long flags; + + spin_lock_irqsave(&glink->idr_lock, flags); + channel = idr_find(&glink->lcids, lcid); + if (WARN(!channel, "close ack on unknown channel\n")) { + spin_unlock_irqrestore(&glink->idr_lock, flags); + return; + } + + idr_remove(&glink->lcids, channel->lcid); + channel->lcid = 0; + spin_unlock_irqrestore(&glink->idr_lock, flags); + + kref_put(&channel->refcount, qcom_glink_channel_release); +} + +static void qcom_glink_work(struct work_struct *work) +{ + struct qcom_glink *glink = container_of(work, struct qcom_glink, + rx_work); + struct glink_defer_cmd *dcmd; + struct glink_msg *msg; + unsigned long flags; + unsigned int param1; + unsigned int param2; + unsigned int cmd; + + for (;;) { + spin_lock_irqsave(&glink->rx_lock, flags); + if (list_empty(&glink->rx_queue)) { + spin_unlock_irqrestore(&glink->rx_lock, flags); + break; + } + dcmd = list_first_entry(&glink->rx_queue, + struct glink_defer_cmd, node); + list_del(&dcmd->node); + spin_unlock_irqrestore(&glink->rx_lock, flags); + + msg = &dcmd->msg; + cmd = le16_to_cpu(msg->cmd); + param1 = le16_to_cpu(msg->param1); + param2 = le32_to_cpu(msg->param2); + + switch (cmd) { + case RPM_CMD_VERSION: + qcom_glink_receive_version(glink, param1, param2); + break; + case RPM_CMD_VERSION_ACK: + qcom_glink_receive_version_ack(glink, param1, param2); + break; + case RPM_CMD_OPEN: + qcom_glink_rx_open(glink, param1, msg->data); + break; + case RPM_CMD_CLOSE: + qcom_glink_rx_close(glink, param1); + break; + case RPM_CMD_CLOSE_ACK: + qcom_glink_rx_close_ack(glink, param1); + break; + case RPM_CMD_RX_INTENT_REQ: + qcom_glink_handle_intent_req(glink, param1, param2); + break; + default: + WARN(1, "Unknown defer object %d\n", cmd); + break; + } + + kfree(dcmd); + } +} + +struct qcom_glink *qcom_glink_native_probe(struct device *dev, + unsigned long features, + struct qcom_glink_pipe *rx, + struct qcom_glink_pipe *tx, + bool intentless) +{ + int irq; + int ret; + struct qcom_glink *glink; + + glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); + if (!glink) + return ERR_PTR(-ENOMEM); + + glink->dev = dev; + glink->tx_pipe = tx; + glink->rx_pipe = rx; + + glink->features = features; + glink->intentless = intentless; + + mutex_init(&glink->tx_lock); + spin_lock_init(&glink->rx_lock); + INIT_LIST_HEAD(&glink->rx_queue); + INIT_WORK(&glink->rx_work, qcom_glink_work); + + spin_lock_init(&glink->idr_lock); + idr_init(&glink->lcids); + idr_init(&glink->rcids); + + glink->mbox_client.dev = dev; + glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0); + if (IS_ERR(glink->mbox_chan)) { + if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER) + dev_err(dev, "failed to acquire IPC channel\n"); + return ERR_CAST(glink->mbox_chan); + } + + irq = of_irq_get(dev->of_node, 0); + ret = devm_request_irq(dev, irq, + qcom_glink_native_intr, + IRQF_NO_SUSPEND | IRQF_SHARED, + "glink-native", glink); + if (ret) { + dev_err(dev, "failed to request IRQ\n"); + return ERR_PTR(ret); + } + + glink->irq = irq; + + ret = qcom_glink_send_version(glink); + if (ret) + return ERR_PTR(ret); + + return glink; +} +EXPORT_SYMBOL_GPL(qcom_glink_native_probe); + +static int qcom_glink_remove_device(struct device *dev, void *data) +{ + device_unregister(dev); + + return 0; +} + +void qcom_glink_native_remove(struct qcom_glink *glink) +{ + struct glink_channel *channel; + int cid; + int ret; + unsigned long flags; + + disable_irq(glink->irq); + cancel_work_sync(&glink->rx_work); + + ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); + if (ret) + dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); + + spin_lock_irqsave(&glink->idr_lock, flags); + /* Release any defunct local channels, waiting for close-ack */ + idr_for_each_entry(&glink->lcids, channel, cid) + kref_put(&channel->refcount, qcom_glink_channel_release); + + idr_destroy(&glink->lcids); + idr_destroy(&glink->rcids); + spin_unlock_irqrestore(&glink->idr_lock, flags); + mbox_free_channel(glink->mbox_chan); +} +EXPORT_SYMBOL_GPL(qcom_glink_native_remove); + +void qcom_glink_native_unregister(struct qcom_glink *glink) +{ + device_unregister(glink->dev); +} +EXPORT_SYMBOL_GPL(qcom_glink_native_unregister); diff --git a/drivers/rpmsg/qcom_glink_native.h b/drivers/rpmsg/qcom_glink_native.h new file mode 100644 index 000000000000..0cae8a8199f8 --- /dev/null +++ b/drivers/rpmsg/qcom_glink_native.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016-2017, Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_GLINK_NATIVE_H__ +#define __QCOM_GLINK_NATIVE_H__ + +#define GLINK_FEATURE_INTENT_REUSE BIT(0) +#define GLINK_FEATURE_MIGRATION BIT(1) +#define GLINK_FEATURE_TRACER_PKT BIT(2) + +struct qcom_glink_pipe { + size_t length; + + size_t (*avail)(struct qcom_glink_pipe *glink_pipe); + + void (*peak)(struct qcom_glink_pipe *glink_pipe, void *data, + unsigned int offset, size_t count); + void (*advance)(struct qcom_glink_pipe *glink_pipe, size_t count); + + void (*write)(struct qcom_glink_pipe *glink_pipe, + const void *hdr, size_t hlen, + const void *data, size_t dlen); +}; + +struct qcom_glink; + +struct qcom_glink *qcom_glink_native_probe(struct device *dev, + unsigned long features, + struct qcom_glink_pipe *rx, + struct qcom_glink_pipe *tx, + bool intentless); +void qcom_glink_native_remove(struct qcom_glink *glink); + +void qcom_glink_native_unregister(struct qcom_glink *glink); +#endif diff --git a/drivers/rpmsg/qcom_glink_rpm.c b/drivers/rpmsg/qcom_glink_rpm.c index 3559a3e84c1e..69b25d157d0f 100644 --- a/drivers/rpmsg/qcom_glink_rpm.c +++ b/drivers/rpmsg/qcom_glink_rpm.c @@ -27,6 +27,7 @@ #include #include "rpmsg_internal.h" +#include "qcom_glink_native.h" #define RPM_TOC_SIZE 256 #define RPM_TOC_MAGIC 0x67727430 /* grt0 */ @@ -36,10 +37,7 @@ #define RPM_TX_FIFO_ID 0x61703272 /* ap2r */ #define RPM_RX_FIFO_ID 0x72326170 /* r2ap */ -#define GLINK_NAME_SIZE 32 - -#define RPM_GLINK_CID_MIN 1 -#define RPM_GLINK_CID_MAX 65536 +#define to_rpm_pipe(p) container_of(p, struct glink_rpm_pipe, native) struct rpm_toc_entry { __le32 id; @@ -54,170 +52,18 @@ struct rpm_toc { struct rpm_toc_entry entries[]; } __packed; -struct glink_msg { - __le16 cmd; - __le16 param1; - __le32 param2; - u8 data[]; -} __packed; - struct glink_rpm_pipe { + struct qcom_glink_pipe native; + void __iomem *tail; void __iomem *head; void __iomem *fifo; - - size_t length; }; -/** - * struct glink_defer_cmd - deferred incoming control message - * @node: list node - * @msg: message header - * data: payload of the message - * - * Copy of a received control message, to be added to @rx_queue and processed - * by @rx_work of @glink_rpm. - */ -struct glink_defer_cmd { - struct list_head node; - - struct glink_msg msg; - u8 data[]; -}; - -/** - * struct glink_rpm - driver context, relates to one remote subsystem - * @dev: reference to the associated struct device - * @doorbell: "rpm_hlos" ipc doorbell - * @rx_pipe: pipe object for receive FIFO - * @tx_pipe: pipe object for transmit FIFO - * @irq: IRQ for signaling incoming events - * @rx_work: worker for handling received control messages - * @rx_lock: protects the @rx_queue - * @rx_queue: queue of received control messages to be processed in @rx_work - * @tx_lock: synchronizes operations on the tx fifo - * @idr_lock: synchronizes @lcids and @rcids modifications - * @lcids: idr of all channels with a known local channel id - * @rcids: idr of all channels with a known remote channel id - */ -struct glink_rpm { - struct device *dev; - - struct mbox_client mbox_client; - struct mbox_chan *mbox_chan; - - struct glink_rpm_pipe rx_pipe; - struct glink_rpm_pipe tx_pipe; - - int irq; - - struct work_struct rx_work; - spinlock_t rx_lock; - struct list_head rx_queue; - - struct mutex tx_lock; - - struct mutex idr_lock; - struct idr lcids; - struct idr rcids; -}; - -enum { - GLINK_STATE_CLOSED, - GLINK_STATE_OPENING, - GLINK_STATE_OPEN, - GLINK_STATE_CLOSING, -}; - -/** - * struct glink_channel - internal representation of a channel - * @rpdev: rpdev reference, only used for primary endpoints - * @ept: rpmsg endpoint this channel is associated with - * @glink: glink_rpm context handle - * @refcount: refcount for the channel object - * @recv_lock: guard for @ept.cb - * @name: unique channel name/identifier - * @lcid: channel id, in local space - * @rcid: channel id, in remote space - * @buf: receive buffer, for gathering fragments - * @buf_offset: write offset in @buf - * @buf_size: size of current @buf - * @open_ack: completed once remote has acked the open-request - * @open_req: completed once open-request has been received - */ -struct glink_channel { - struct rpmsg_endpoint ept; - - struct rpmsg_device *rpdev; - struct glink_rpm *glink; - - struct kref refcount; - - spinlock_t recv_lock; - - char *name; - unsigned int lcid; - unsigned int rcid; - - void *buf; - int buf_offset; - int buf_size; - - struct completion open_ack; - struct completion open_req; -}; - -#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) - -static const struct rpmsg_endpoint_ops glink_endpoint_ops; - -#define RPM_CMD_VERSION 0 -#define RPM_CMD_VERSION_ACK 1 -#define RPM_CMD_OPEN 2 -#define RPM_CMD_CLOSE 3 -#define RPM_CMD_OPEN_ACK 4 -#define RPM_CMD_TX_DATA 9 -#define RPM_CMD_CLOSE_ACK 11 -#define RPM_CMD_TX_DATA_CONT 12 -#define RPM_CMD_READ_NOTIF 13 - -#define GLINK_FEATURE_INTENTLESS BIT(1) - -static struct glink_channel *glink_rpm_alloc_channel(struct glink_rpm *glink, - const char *name) +static size_t glink_rpm_rx_avail(struct qcom_glink_pipe *glink_pipe) { - struct glink_channel *channel; - - channel = kzalloc(sizeof(*channel), GFP_KERNEL); - if (!channel) - return ERR_PTR(-ENOMEM); - - /* Setup glink internal glink_channel data */ - spin_lock_init(&channel->recv_lock); - channel->glink = glink; - channel->name = kstrdup(name, GFP_KERNEL); - - init_completion(&channel->open_req); - init_completion(&channel->open_ack); - - kref_init(&channel->refcount); - - return channel; -} - -static void glink_rpm_channel_release(struct kref *ref) -{ - struct glink_channel *channel = container_of(ref, struct glink_channel, - refcount); - - kfree(channel->name); - kfree(channel); -} - -static size_t glink_rpm_rx_avail(struct glink_rpm *glink) -{ - struct glink_rpm_pipe *pipe = &glink->rx_pipe; + struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe); unsigned int head; unsigned int tail; @@ -225,21 +71,24 @@ static size_t glink_rpm_rx_avail(struct glink_rpm *glink) tail = readl(pipe->tail); if (head < tail) - return pipe->length - tail + head; + return pipe->native.length - tail + head; else return head - tail; } -static void glink_rpm_rx_peak(struct glink_rpm *glink, - void *data, size_t count) +static void glink_rpm_rx_peak(struct qcom_glink_pipe *glink_pipe, + void *data, unsigned int offset, size_t count) { - struct glink_rpm_pipe *pipe = &glink->rx_pipe; + struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe); unsigned int tail; size_t len; tail = readl(pipe->tail); + tail += offset; + if (tail >= pipe->native.length) + tail -= pipe->native.length; - len = min_t(size_t, count, pipe->length - tail); + len = min_t(size_t, count, pipe->native.length - tail); if (len) { __ioread32_copy(data, pipe->fifo + tail, len / sizeof(u32)); @@ -251,24 +100,24 @@ static void glink_rpm_rx_peak(struct glink_rpm *glink, } } -static void glink_rpm_rx_advance(struct glink_rpm *glink, +static void glink_rpm_rx_advance(struct qcom_glink_pipe *glink_pipe, size_t count) { - struct glink_rpm_pipe *pipe = &glink->rx_pipe; + struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe); unsigned int tail; tail = readl(pipe->tail); tail += count; - if (tail >= pipe->length) - tail -= pipe->length; + if (tail >= pipe->native.length) + tail -= pipe->native.length; writel(tail, pipe->tail); } -static size_t glink_rpm_tx_avail(struct glink_rpm *glink) +static size_t glink_rpm_tx_avail(struct qcom_glink_pipe *glink_pipe) { - struct glink_rpm_pipe *pipe = &glink->tx_pipe; + struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe); unsigned int head; unsigned int tail; @@ -276,19 +125,18 @@ static size_t glink_rpm_tx_avail(struct glink_rpm *glink) tail = readl(pipe->tail); if (tail <= head) - return pipe->length - head + tail; + return pipe->native.length - head + tail; else return tail - head; } -static unsigned int glink_rpm_tx_write(struct glink_rpm *glink, - unsigned int head, - const void *data, size_t count) +static unsigned int glink_rpm_tx_write_one(struct glink_rpm_pipe *pipe, + unsigned int head, + const void *data, size_t count) { - struct glink_rpm_pipe *pipe = &glink->tx_pipe; size_t len; - len = min_t(size_t, count, pipe->length - head); + len = min_t(size_t, count, pipe->native.length - head); if (len) { __iowrite32_copy(pipe->fifo + head, data, len / sizeof(u32)); @@ -300,725 +148,43 @@ static unsigned int glink_rpm_tx_write(struct glink_rpm *glink, } head += count; - if (head >= pipe->length) - head -= pipe->length; + if (head >= pipe->native.length) + head -= pipe->native.length; return head; } -static int glink_rpm_tx(struct glink_rpm *glink, - const void *hdr, size_t hlen, - const void *data, size_t dlen, bool wait) +static void glink_rpm_tx_write(struct qcom_glink_pipe *glink_pipe, + const void *hdr, size_t hlen, + const void *data, size_t dlen) { - struct glink_rpm_pipe *pipe = &glink->tx_pipe; + struct glink_rpm_pipe *pipe = to_rpm_pipe(glink_pipe); + size_t tlen = hlen + dlen; + size_t aligned_dlen; unsigned int head; - unsigned int tlen = hlen + dlen; - int ret; + char padding[8] = {0}; + size_t pad; - /* Reject packets that are too big */ - if (tlen >= glink->tx_pipe.length) - return -EINVAL; + /* Header length comes from glink native and is always 4 byte aligned */ + if (WARN(hlen % 4, "Glink Header length must be 4 bytes aligned\n")) + return; - if (WARN(tlen % 8, "Unaligned TX request")) - return -EINVAL; - - ret = mutex_lock_interruptible(&glink->tx_lock); - if (ret) - return ret; - - while (glink_rpm_tx_avail(glink) < tlen) { - if (!wait) { - ret = -ENOMEM; - goto out; - } - - msleep(10); - } + /* + * Move the unaligned tail of the message to the padding chunk, to + * ensure word aligned accesses + */ + aligned_dlen = ALIGN_DOWN(dlen, 4); + if (aligned_dlen != dlen) + memcpy(padding, data + aligned_dlen, dlen - aligned_dlen); head = readl(pipe->head); - head = glink_rpm_tx_write(glink, head, hdr, hlen); - head = glink_rpm_tx_write(glink, head, data, dlen); + head = glink_rpm_tx_write_one(pipe, head, hdr, hlen); + head = glink_rpm_tx_write_one(pipe, head, data, aligned_dlen); + + pad = ALIGN(tlen, 8) - ALIGN_DOWN(tlen, 4); + if (pad) + head = glink_rpm_tx_write_one(pipe, head, padding, pad); writel(head, pipe->head); - - mbox_send_message(glink->mbox_chan, NULL); - mbox_client_txdone(glink->mbox_chan, 0); - -out: - mutex_unlock(&glink->tx_lock); - - return ret; -} - -static int glink_rpm_send_version(struct glink_rpm *glink) -{ - struct glink_msg msg; - - msg.cmd = cpu_to_le16(RPM_CMD_VERSION); - msg.param1 = cpu_to_le16(1); - msg.param2 = cpu_to_le32(GLINK_FEATURE_INTENTLESS); - - return glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true); -} - -static void glink_rpm_send_version_ack(struct glink_rpm *glink) -{ - struct glink_msg msg; - - msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK); - msg.param1 = cpu_to_le16(1); - msg.param2 = cpu_to_le32(0); - - glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true); -} - -static void glink_rpm_send_open_ack(struct glink_rpm *glink, - struct glink_channel *channel) -{ - struct glink_msg msg; - - msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK); - msg.param1 = cpu_to_le16(channel->rcid); - msg.param2 = cpu_to_le32(0); - - glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true); -} - -/** - * glink_rpm_send_open_req() - send a RPM_CMD_OPEN request to the remote - * @glink: - * @channel: - * - * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote. - * Will return with refcount held, regardless of outcome. - * - * Returns 0 on success, negative errno otherwise. - */ -static int glink_rpm_send_open_req(struct glink_rpm *glink, - struct glink_channel *channel) -{ - struct { - struct glink_msg msg; - u8 name[GLINK_NAME_SIZE]; - } __packed req; - int name_len = strlen(channel->name) + 1; - int req_len = ALIGN(sizeof(req.msg) + name_len, 8); - int ret; - - kref_get(&channel->refcount); - - mutex_lock(&glink->idr_lock); - ret = idr_alloc_cyclic(&glink->lcids, channel, - RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, GFP_KERNEL); - mutex_unlock(&glink->idr_lock); - if (ret < 0) - return ret; - - channel->lcid = ret; - - req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN); - req.msg.param1 = cpu_to_le16(channel->lcid); - req.msg.param2 = cpu_to_le32(name_len); - strcpy(req.name, channel->name); - - ret = glink_rpm_tx(glink, &req, req_len, NULL, 0, true); - if (ret) - goto remove_idr; - - return 0; - -remove_idr: - mutex_lock(&glink->idr_lock); - idr_remove(&glink->lcids, channel->lcid); - channel->lcid = 0; - mutex_unlock(&glink->idr_lock); - - return ret; -} - -static void glink_rpm_send_close_req(struct glink_rpm *glink, - struct glink_channel *channel) -{ - struct glink_msg req; - - req.cmd = cpu_to_le16(RPM_CMD_CLOSE); - req.param1 = cpu_to_le16(channel->lcid); - req.param2 = 0; - - glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true); -} - -static void glink_rpm_send_close_ack(struct glink_rpm *glink, unsigned int rcid) -{ - struct glink_msg req; - - req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK); - req.param1 = cpu_to_le16(rcid); - req.param2 = 0; - - glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true); -} - -static int glink_rpm_rx_defer(struct glink_rpm *glink, size_t extra) -{ - struct glink_defer_cmd *dcmd; - - extra = ALIGN(extra, 8); - - if (glink_rpm_rx_avail(glink) < sizeof(struct glink_msg) + extra) { - dev_dbg(glink->dev, "Insufficient data in rx fifo"); - return -ENXIO; - } - - dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC); - if (!dcmd) - return -ENOMEM; - - INIT_LIST_HEAD(&dcmd->node); - - glink_rpm_rx_peak(glink, &dcmd->msg, sizeof(dcmd->msg) + extra); - - spin_lock(&glink->rx_lock); - list_add_tail(&dcmd->node, &glink->rx_queue); - spin_unlock(&glink->rx_lock); - - schedule_work(&glink->rx_work); - glink_rpm_rx_advance(glink, sizeof(dcmd->msg) + extra); - - return 0; -} - -static int glink_rpm_rx_data(struct glink_rpm *glink, size_t avail) -{ - struct glink_channel *channel; - struct { - struct glink_msg msg; - __le32 chunk_size; - __le32 left_size; - } __packed hdr; - unsigned int chunk_size; - unsigned int left_size; - unsigned int rcid; - - if (avail < sizeof(hdr)) { - dev_dbg(glink->dev, "Not enough data in fifo\n"); - return -EAGAIN; - } - - glink_rpm_rx_peak(glink, &hdr, sizeof(hdr)); - chunk_size = le32_to_cpu(hdr.chunk_size); - left_size = le32_to_cpu(hdr.left_size); - - if (avail < sizeof(hdr) + chunk_size) { - dev_dbg(glink->dev, "Payload not yet in fifo\n"); - return -EAGAIN; - } - - if (WARN(chunk_size % 4, "Incoming data must be word aligned\n")) - return -EINVAL; - - rcid = le16_to_cpu(hdr.msg.param1); - channel = idr_find(&glink->rcids, rcid); - if (!channel) { - dev_dbg(glink->dev, "Data on non-existing channel\n"); - - /* Drop the message */ - glink_rpm_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8)); - return 0; - } - - /* Might have an ongoing, fragmented, message to append */ - if (!channel->buf) { - channel->buf = kmalloc(chunk_size + left_size, GFP_ATOMIC); - if (!channel->buf) - return -ENOMEM; - - channel->buf_size = chunk_size + left_size; - channel->buf_offset = 0; - } - - glink_rpm_rx_advance(glink, sizeof(hdr)); - - if (channel->buf_size - channel->buf_offset < chunk_size) { - dev_err(glink->dev, "Insufficient space in input buffer\n"); - - /* The packet header lied, drop payload */ - glink_rpm_rx_advance(glink, chunk_size); - return -ENOMEM; - } - - glink_rpm_rx_peak(glink, channel->buf + channel->buf_offset, chunk_size); - channel->buf_offset += chunk_size; - - /* Handle message when no fragments remain to be received */ - if (!left_size) { - spin_lock(&channel->recv_lock); - if (channel->ept.cb) { - channel->ept.cb(channel->ept.rpdev, - channel->buf, - channel->buf_offset, - channel->ept.priv, - RPMSG_ADDR_ANY); - } - spin_unlock(&channel->recv_lock); - - kfree(channel->buf); - channel->buf = NULL; - channel->buf_size = 0; - } - - /* Each message starts at 8 byte aligned address */ - glink_rpm_rx_advance(glink, ALIGN(chunk_size, 8)); - - return 0; -} - -static int glink_rpm_rx_open_ack(struct glink_rpm *glink, unsigned int lcid) -{ - struct glink_channel *channel; - - channel = idr_find(&glink->lcids, lcid); - if (!channel) { - dev_err(glink->dev, "Invalid open ack packet\n"); - return -EINVAL; - } - - complete(&channel->open_ack); - - return 0; -} - -static irqreturn_t glink_rpm_intr(int irq, void *data) -{ - struct glink_rpm *glink = data; - struct glink_msg msg; - unsigned int param1; - unsigned int param2; - unsigned int avail; - unsigned int cmd; - int ret; - - for (;;) { - avail = glink_rpm_rx_avail(glink); - if (avail < sizeof(msg)) - break; - - glink_rpm_rx_peak(glink, &msg, sizeof(msg)); - - cmd = le16_to_cpu(msg.cmd); - param1 = le16_to_cpu(msg.param1); - param2 = le32_to_cpu(msg.param2); - - switch (cmd) { - case RPM_CMD_VERSION: - case RPM_CMD_VERSION_ACK: - case RPM_CMD_CLOSE: - case RPM_CMD_CLOSE_ACK: - ret = glink_rpm_rx_defer(glink, 0); - break; - case RPM_CMD_OPEN_ACK: - ret = glink_rpm_rx_open_ack(glink, param1); - glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8)); - break; - case RPM_CMD_OPEN: - ret = glink_rpm_rx_defer(glink, param2); - break; - case RPM_CMD_TX_DATA: - case RPM_CMD_TX_DATA_CONT: - ret = glink_rpm_rx_data(glink, avail); - break; - case RPM_CMD_READ_NOTIF: - glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8)); - - mbox_send_message(glink->mbox_chan, NULL); - mbox_client_txdone(glink->mbox_chan, 0); - - ret = 0; - break; - default: - dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); - ret = -EINVAL; - break; - } - - if (ret) - break; - } - - return IRQ_HANDLED; -} - -/* Locally initiated rpmsg_create_ept */ -static struct glink_channel *glink_rpm_create_local(struct glink_rpm *glink, - const char *name) -{ - struct glink_channel *channel; - int ret; - - channel = glink_rpm_alloc_channel(glink, name); - if (IS_ERR(channel)) - return ERR_CAST(channel); - - ret = glink_rpm_send_open_req(glink, channel); - if (ret) - goto release_channel; - - ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); - if (!ret) - goto err_timeout; - - ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ); - if (!ret) - goto err_timeout; - - glink_rpm_send_open_ack(glink, channel); - - return channel; - -err_timeout: - /* glink_rpm_send_open_req() did register the channel in lcids*/ - mutex_lock(&glink->idr_lock); - idr_remove(&glink->lcids, channel->lcid); - mutex_unlock(&glink->idr_lock); - -release_channel: - /* Release glink_rpm_send_open_req() reference */ - kref_put(&channel->refcount, glink_rpm_channel_release); - /* Release glink_rpm_alloc_channel() reference */ - kref_put(&channel->refcount, glink_rpm_channel_release); - - return ERR_PTR(-ETIMEDOUT); -} - -/* Remote initiated rpmsg_create_ept */ -static int glink_rpm_create_remote(struct glink_rpm *glink, - struct glink_channel *channel) -{ - int ret; - - glink_rpm_send_open_ack(glink, channel); - - ret = glink_rpm_send_open_req(glink, channel); - if (ret) - goto close_link; - - ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); - if (!ret) { - ret = -ETIMEDOUT; - goto close_link; - } - - return 0; - -close_link: - /* - * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. - */ - glink_rpm_send_close_req(glink, channel); - - /* Release glink_rpm_send_open_req() reference */ - kref_put(&channel->refcount, glink_rpm_channel_release); - - return ret; -} - -static struct rpmsg_endpoint *glink_rpm_create_ept(struct rpmsg_device *rpdev, - rpmsg_rx_cb_t cb, void *priv, - struct rpmsg_channel_info chinfo) -{ - struct glink_channel *parent = to_glink_channel(rpdev->ept); - struct glink_channel *channel; - struct glink_rpm *glink = parent->glink; - struct rpmsg_endpoint *ept; - const char *name = chinfo.name; - int cid; - int ret; - - idr_for_each_entry(&glink->rcids, channel, cid) { - if (!strcmp(channel->name, name)) - break; - } - - if (!channel) { - channel = glink_rpm_create_local(glink, name); - if (IS_ERR(channel)) - return NULL; - } else { - ret = glink_rpm_create_remote(glink, channel); - if (ret) - return NULL; - } - - ept = &channel->ept; - ept->rpdev = rpdev; - ept->cb = cb; - ept->priv = priv; - ept->ops = &glink_endpoint_ops; - - return ept; -} - -static void glink_rpm_destroy_ept(struct rpmsg_endpoint *ept) -{ - struct glink_channel *channel = to_glink_channel(ept); - struct glink_rpm *glink = channel->glink; - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - channel->ept.cb = NULL; - spin_unlock_irqrestore(&channel->recv_lock, flags); - - /* Decouple the potential rpdev from the channel */ - channel->rpdev = NULL; - - glink_rpm_send_close_req(glink, channel); -} - -static int __glink_rpm_send(struct glink_channel *channel, - void *data, int len, bool wait) -{ - struct glink_rpm *glink = channel->glink; - struct { - struct glink_msg msg; - __le32 chunk_size; - __le32 left_size; - } __packed req; - - if (WARN(len % 8, "RPM GLINK expects 8 byte aligned messages\n")) - return -EINVAL; - - req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA); - req.msg.param1 = cpu_to_le16(channel->lcid); - req.msg.param2 = cpu_to_le32(channel->rcid); - req.chunk_size = cpu_to_le32(len); - req.left_size = cpu_to_le32(0); - - return glink_rpm_tx(glink, &req, sizeof(req), data, len, wait); -} - -static int glink_rpm_send(struct rpmsg_endpoint *ept, void *data, int len) -{ - struct glink_channel *channel = to_glink_channel(ept); - - return __glink_rpm_send(channel, data, len, true); -} - -static int glink_rpm_trysend(struct rpmsg_endpoint *ept, void *data, int len) -{ - struct glink_channel *channel = to_glink_channel(ept); - - return __glink_rpm_send(channel, data, len, false); -} - -/* - * Finds the device_node for the glink child interested in this channel. - */ -static struct device_node *glink_rpm_match_channel(struct device_node *node, - const char *channel) -{ - struct device_node *child; - const char *name; - const char *key; - int ret; - - for_each_available_child_of_node(node, child) { - key = "qcom,glink-channels"; - ret = of_property_read_string(child, key, &name); - if (ret) - continue; - - if (strcmp(name, channel) == 0) - return child; - } - - return NULL; -} - -static const struct rpmsg_device_ops glink_device_ops = { - .create_ept = glink_rpm_create_ept, -}; - -static const struct rpmsg_endpoint_ops glink_endpoint_ops = { - .destroy_ept = glink_rpm_destroy_ept, - .send = glink_rpm_send, - .trysend = glink_rpm_trysend, -}; - -static void glink_rpm_rpdev_release(struct device *dev) -{ - struct rpmsg_device *rpdev = to_rpmsg_device(dev); - struct glink_channel *channel = to_glink_channel(rpdev->ept); - - channel->rpdev = NULL; - kfree(rpdev); -} - -static int glink_rpm_rx_open(struct glink_rpm *glink, unsigned int rcid, - char *name) -{ - struct glink_channel *channel; - struct rpmsg_device *rpdev; - bool create_device = false; - int lcid; - int ret; - - idr_for_each_entry(&glink->lcids, channel, lcid) { - if (!strcmp(channel->name, name)) - break; - } - - if (!channel) { - channel = glink_rpm_alloc_channel(glink, name); - if (IS_ERR(channel)) - return PTR_ERR(channel); - - /* The opening dance was initiated by the remote */ - create_device = true; - } - - mutex_lock(&glink->idr_lock); - ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_KERNEL); - if (ret < 0) { - dev_err(glink->dev, "Unable to insert channel into rcid list\n"); - mutex_unlock(&glink->idr_lock); - goto free_channel; - } - channel->rcid = ret; - mutex_unlock(&glink->idr_lock); - - complete(&channel->open_req); - - if (create_device) { - rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); - if (!rpdev) { - ret = -ENOMEM; - goto rcid_remove; - } - - rpdev->ept = &channel->ept; - strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE); - rpdev->src = RPMSG_ADDR_ANY; - rpdev->dst = RPMSG_ADDR_ANY; - rpdev->ops = &glink_device_ops; - - rpdev->dev.of_node = glink_rpm_match_channel(glink->dev->of_node, name); - rpdev->dev.parent = glink->dev; - rpdev->dev.release = glink_rpm_rpdev_release; - - ret = rpmsg_register_device(rpdev); - if (ret) - goto free_rpdev; - - channel->rpdev = rpdev; - } - - return 0; - -free_rpdev: - kfree(rpdev); -rcid_remove: - mutex_lock(&glink->idr_lock); - idr_remove(&glink->rcids, channel->rcid); - channel->rcid = 0; - mutex_unlock(&glink->idr_lock); -free_channel: - /* Release the reference, iff we took it */ - if (create_device) - kref_put(&channel->refcount, glink_rpm_channel_release); - - return ret; -} - -static void glink_rpm_rx_close(struct glink_rpm *glink, unsigned int rcid) -{ - struct rpmsg_channel_info chinfo; - struct glink_channel *channel; - - channel = idr_find(&glink->rcids, rcid); - if (WARN(!channel, "close request on unknown channel\n")) - return; - - if (channel->rpdev) { - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); - chinfo.src = RPMSG_ADDR_ANY; - chinfo.dst = RPMSG_ADDR_ANY; - - rpmsg_unregister_device(glink->dev, &chinfo); - } - - glink_rpm_send_close_ack(glink, channel->rcid); - - mutex_lock(&glink->idr_lock); - idr_remove(&glink->rcids, channel->rcid); - channel->rcid = 0; - mutex_unlock(&glink->idr_lock); - - kref_put(&channel->refcount, glink_rpm_channel_release); -} - -static void glink_rpm_rx_close_ack(struct glink_rpm *glink, unsigned int lcid) -{ - struct glink_channel *channel; - - channel = idr_find(&glink->lcids, lcid); - if (WARN(!channel, "close ack on unknown channel\n")) - return; - - mutex_lock(&glink->idr_lock); - idr_remove(&glink->lcids, channel->lcid); - channel->lcid = 0; - mutex_unlock(&glink->idr_lock); - - kref_put(&channel->refcount, glink_rpm_channel_release); -} - -static void glink_rpm_work(struct work_struct *work) -{ - struct glink_rpm *glink = container_of(work, struct glink_rpm, rx_work); - struct glink_defer_cmd *dcmd; - struct glink_msg *msg; - unsigned long flags; - unsigned int param1; - unsigned int param2; - unsigned int cmd; - - for (;;) { - spin_lock_irqsave(&glink->rx_lock, flags); - if (list_empty(&glink->rx_queue)) { - spin_unlock_irqrestore(&glink->rx_lock, flags); - break; - } - dcmd = list_first_entry(&glink->rx_queue, struct glink_defer_cmd, node); - list_del(&dcmd->node); - spin_unlock_irqrestore(&glink->rx_lock, flags); - - msg = &dcmd->msg; - cmd = le16_to_cpu(msg->cmd); - param1 = le16_to_cpu(msg->param1); - param2 = le32_to_cpu(msg->param2); - - switch (cmd) { - case RPM_CMD_VERSION: - glink_rpm_send_version_ack(glink); - break; - case RPM_CMD_VERSION_ACK: - break; - case RPM_CMD_OPEN: - glink_rpm_rx_open(glink, param1, msg->data); - break; - case RPM_CMD_CLOSE: - glink_rpm_rx_close(glink, param1); - break; - case RPM_CMD_CLOSE_ACK: - glink_rpm_rx_close_ack(glink, param1); - break; - default: - WARN(1, "Unknown defer object %d\n", cmd); - break; - } - - kfree(dcmd); - } } static int glink_rpm_parse_toc(struct device *dev, @@ -1067,14 +233,14 @@ static int glink_rpm_parse_toc(struct device *dev, switch (id) { case RPM_RX_FIFO_ID: - rx->length = size; + rx->native.length = size; rx->tail = msg_ram + offset; rx->head = msg_ram + offset + sizeof(u32); rx->fifo = msg_ram + offset + 2 * sizeof(u32); break; case RPM_TX_FIFO_ID: - tx->length = size; + tx->native.length = size; tx->tail = msg_ram + offset; tx->head = msg_ram + offset + sizeof(u32); @@ -1098,38 +264,21 @@ static int glink_rpm_parse_toc(struct device *dev, static int glink_rpm_probe(struct platform_device *pdev) { - struct glink_rpm *glink; + struct qcom_glink *glink; + struct glink_rpm_pipe *rx_pipe; + struct glink_rpm_pipe *tx_pipe; struct device_node *np; void __iomem *msg_ram; size_t msg_ram_size; struct device *dev = &pdev->dev; struct resource r; - int irq; int ret; - glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); - if (!glink) + rx_pipe = devm_kzalloc(&pdev->dev, sizeof(*rx_pipe), GFP_KERNEL); + tx_pipe = devm_kzalloc(&pdev->dev, sizeof(*tx_pipe), GFP_KERNEL); + if (!rx_pipe || !tx_pipe) return -ENOMEM; - glink->dev = dev; - - mutex_init(&glink->tx_lock); - spin_lock_init(&glink->rx_lock); - INIT_LIST_HEAD(&glink->rx_queue); - INIT_WORK(&glink->rx_work, glink_rpm_work); - - mutex_init(&glink->idr_lock); - idr_init(&glink->lcids); - idr_init(&glink->rcids); - - glink->mbox_client.dev = &pdev->dev; - glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0); - if (IS_ERR(glink->mbox_chan)) { - if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to acquire IPC channel\n"); - return PTR_ERR(glink->mbox_chan); - } - np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", 0); ret = of_address_to_resource(np, 0, &r); of_node_put(np); @@ -1142,61 +291,38 @@ static int glink_rpm_probe(struct platform_device *pdev) return -ENOMEM; ret = glink_rpm_parse_toc(dev, msg_ram, msg_ram_size, - &glink->rx_pipe, &glink->tx_pipe); + rx_pipe, tx_pipe); if (ret) return ret; - writel(0, glink->tx_pipe.head); - writel(0, glink->rx_pipe.tail); + /* Pipe specific accessors */ + rx_pipe->native.avail = glink_rpm_rx_avail; + rx_pipe->native.peak = glink_rpm_rx_peak; + rx_pipe->native.advance = glink_rpm_rx_advance; + tx_pipe->native.avail = glink_rpm_tx_avail; + tx_pipe->native.write = glink_rpm_tx_write; - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(dev, irq, - glink_rpm_intr, - IRQF_NO_SUSPEND | IRQF_SHARED, - "glink-rpm", glink); - if (ret) { - dev_err(dev, "Failed to request IRQ\n"); - return ret; - } + writel(0, tx_pipe->head); + writel(0, rx_pipe->tail); - glink->irq = irq; - - ret = glink_rpm_send_version(glink); - if (ret) - return ret; + glink = qcom_glink_native_probe(&pdev->dev, + 0, + &rx_pipe->native, + &tx_pipe->native, + true); + if (IS_ERR(glink)) + return PTR_ERR(glink); platform_set_drvdata(pdev, glink); return 0; } -static int glink_rpm_remove_device(struct device *dev, void *data) -{ - device_unregister(dev); - - return 0; -} - static int glink_rpm_remove(struct platform_device *pdev) { - struct glink_rpm *glink = platform_get_drvdata(pdev); - struct glink_channel *channel; - int cid; - int ret; + struct qcom_glink *glink = platform_get_drvdata(pdev); - disable_irq(glink->irq); - cancel_work_sync(&glink->rx_work); - - ret = device_for_each_child(glink->dev, NULL, glink_rpm_remove_device); - if (ret) - dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); - - /* Release any defunct local channels, waiting for close-ack */ - idr_for_each_entry(&glink->lcids, channel, cid) - kref_put(&channel->refcount, glink_rpm_channel_release); - - idr_destroy(&glink->lcids); - idr_destroy(&glink->rcids); + qcom_glink_native_remove(glink); return 0; } diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c new file mode 100644 index 000000000000..5cdaa5f8fb61 --- /dev/null +++ b/drivers/rpmsg/qcom_glink_smem.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2016, Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "qcom_glink_native.h" + +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 +#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ + +#define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478 +#define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479 +#define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480 + +struct glink_smem_pipe { + struct qcom_glink_pipe native; + + __le32 *tail; + __le32 *head; + + void *fifo; + + int remote_pid; +}; + +#define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native) + +static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np) +{ + struct glink_smem_pipe *pipe = to_smem_pipe(np); + size_t len; + void *fifo; + u32 head; + u32 tail; + + if (!pipe->fifo) { + fifo = qcom_smem_get(pipe->remote_pid, + SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len); + if (IS_ERR(fifo)) { + pr_err("failed to acquire RX fifo handle: %ld\n", + PTR_ERR(fifo)); + return 0; + } + + pipe->fifo = fifo; + pipe->native.length = len; + } + + head = le32_to_cpu(*pipe->head); + tail = le32_to_cpu(*pipe->tail); + + if (head < tail) + return pipe->native.length - tail + head; + else + return head - tail; +} + +static void glink_smem_rx_peak(struct qcom_glink_pipe *np, + void *data, unsigned int offset, size_t count) +{ + struct glink_smem_pipe *pipe = to_smem_pipe(np); + size_t len; + u32 tail; + + tail = le32_to_cpu(*pipe->tail); + tail += offset; + if (tail >= pipe->native.length) + tail -= pipe->native.length; + + len = min_t(size_t, count, pipe->native.length - tail); + if (len) { + __ioread32_copy(data, pipe->fifo + tail, + len / sizeof(u32)); + } + + if (len != count) { + __ioread32_copy(data + len, pipe->fifo, + (count - len) / sizeof(u32)); + } +} + +static void glink_smem_rx_advance(struct qcom_glink_pipe *np, + size_t count) +{ + struct glink_smem_pipe *pipe = to_smem_pipe(np); + u32 tail; + + tail = le32_to_cpu(*pipe->tail); + + tail += count; + if (tail > pipe->native.length) + tail -= pipe->native.length; + + *pipe->tail = cpu_to_le32(tail); +} + +static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np) +{ + struct glink_smem_pipe *pipe = to_smem_pipe(np); + u32 head; + u32 tail; + u32 avail; + + head = le32_to_cpu(*pipe->head); + tail = le32_to_cpu(*pipe->tail); + + if (tail <= head) + avail = pipe->native.length - head + tail; + else + avail = tail - head; + + if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) + avail = 0; + else + avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; + + return avail; +} + +static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe, + unsigned int head, + const void *data, size_t count) +{ + size_t len; + + len = min_t(size_t, count, pipe->native.length - head); + if (len) + memcpy(pipe->fifo + head, data, len); + + if (len != count) + memcpy(pipe->fifo, data + len, count - len); + + head += count; + if (head >= pipe->native.length) + head -= pipe->native.length; + + return head; +} + +static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe, + const void *hdr, size_t hlen, + const void *data, size_t dlen) +{ + struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe); + unsigned int head; + + head = le32_to_cpu(*pipe->head); + + head = glink_smem_tx_write_one(pipe, head, hdr, hlen); + head = glink_smem_tx_write_one(pipe, head, data, dlen); + + /* Ensure head is always aligned to 8 bytes */ + head = ALIGN(head, 8); + if (head >= pipe->native.length) + head -= pipe->native.length; + + *pipe->head = cpu_to_le32(head); +} + +static void qcom_glink_smem_release(struct device *dev) +{ + kfree(dev); +} + +struct qcom_glink *qcom_glink_smem_register(struct device *parent, + struct device_node *node) +{ + struct glink_smem_pipe *rx_pipe; + struct glink_smem_pipe *tx_pipe; + struct qcom_glink *glink; + struct device *dev; + u32 remote_pid; + __le32 *descs; + size_t size; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + dev->parent = parent; + dev->of_node = node; + dev->release = qcom_glink_smem_release; + dev_set_name(dev, "%s:%s", node->parent->name, node->name); + ret = device_register(dev); + if (ret) { + pr_err("failed to register glink edge\n"); + return ERR_PTR(ret); + } + + ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", + &remote_pid); + if (ret) { + dev_err(dev, "failed to parse qcom,remote-pid\n"); + goto err_put_dev; + } + + rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); + tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); + if (!rx_pipe || !tx_pipe) { + ret = -ENOMEM; + goto err_put_dev; + } + + ret = qcom_smem_alloc(remote_pid, + SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32); + if (ret && ret != -EEXIST) { + dev_err(dev, "failed to allocate glink descriptors\n"); + goto err_put_dev; + } + + descs = qcom_smem_get(remote_pid, + SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); + if (IS_ERR(descs)) { + dev_err(dev, "failed to acquire xprt descriptor\n"); + ret = PTR_ERR(descs); + goto err_put_dev; + } + + if (size != 32) { + dev_err(dev, "glink descriptor of invalid size\n"); + ret = -EINVAL; + goto err_put_dev; + } + + tx_pipe->tail = &descs[0]; + tx_pipe->head = &descs[1]; + rx_pipe->tail = &descs[2]; + rx_pipe->head = &descs[3]; + + ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, + SZ_16K); + if (ret && ret != -EEXIST) { + dev_err(dev, "failed to allocate TX fifo\n"); + goto err_put_dev; + } + + tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, + &tx_pipe->native.length); + if (IS_ERR(tx_pipe->fifo)) { + dev_err(dev, "failed to acquire TX fifo\n"); + ret = PTR_ERR(tx_pipe->fifo); + goto err_put_dev; + } + + rx_pipe->native.avail = glink_smem_rx_avail; + rx_pipe->native.peak = glink_smem_rx_peak; + rx_pipe->native.advance = glink_smem_rx_advance; + rx_pipe->remote_pid = remote_pid; + + tx_pipe->native.avail = glink_smem_tx_avail; + tx_pipe->native.write = glink_smem_tx_write; + tx_pipe->remote_pid = remote_pid; + + *rx_pipe->tail = 0; + *tx_pipe->head = 0; + + glink = qcom_glink_native_probe(dev, + GLINK_FEATURE_INTENT_REUSE, + &rx_pipe->native, &tx_pipe->native, + false); + if (IS_ERR(glink)) { + ret = PTR_ERR(glink); + goto err_put_dev; + } + + return glink; + +err_put_dev: + put_device(dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(qcom_glink_smem_register); + +void qcom_glink_smem_unregister(struct qcom_glink *glink) +{ + qcom_glink_native_remove(glink); + qcom_glink_native_unregister(glink); +} +EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister); + +MODULE_AUTHOR("Bjorn Andersson "); +MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index a0a39a8821a3..b01774e9fac0 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1368,6 +1368,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, edge->dev.parent = parent; edge->dev.release = qcom_smd_edge_release; + edge->dev.of_node = node; edge->dev.groups = qcom_smd_edge_groups; dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); ret = device_register(&edge->dev); diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index eee2a9f77d37..82b83002fcba 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -45,6 +45,7 @@ * @rbufs: kernel address of rx buffers * @sbufs: kernel address of tx buffers * @num_bufs: total number of buffers for rx and tx + * @buf_size: size of one rx or tx buffer * @last_sbuf: index of last tx buffer used * @bufs_dma: dma base addr of the buffers * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders. @@ -65,6 +66,7 @@ struct virtproc_info { struct virtqueue *rvq, *svq; void *rbufs, *sbufs; unsigned int num_bufs; + unsigned int buf_size; int last_sbuf; dma_addr_t bufs_dma; struct mutex tx_lock; @@ -158,7 +160,7 @@ struct virtio_rpmsg_channel { * processor. */ #define MAX_RPMSG_NUM_BUFS (512) -#define RPMSG_BUF_SIZE (512) +#define MAX_RPMSG_BUF_SIZE (512) /* * Local addresses are dynamically allocated on-demand. @@ -192,6 +194,28 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = { .trysend_offchannel = virtio_rpmsg_trysend_offchannel, }; +/** + * rpmsg_sg_init - initialize scatterlist according to cpu address location + * @sg: scatterlist to fill + * @cpu_addr: virtual address of the buffer + * @len: buffer length + * + * An internal function filling scatterlist according to virtual address + * location (in vmalloc or in kernel). + */ +static void +rpmsg_sg_init(struct scatterlist *sg, void *cpu_addr, unsigned int len) +{ + if (is_vmalloc_addr(cpu_addr)) { + sg_init_table(sg, 1); + sg_set_page(sg, vmalloc_to_page(cpu_addr), len, + offset_in_page(cpu_addr)); + } else { + WARN_ON(!virt_addr_valid(cpu_addr)); + sg_init_one(sg, cpu_addr, len); + } +} + /** * __ept_release() - deallocate an rpmsg endpoint * @kref: the ept's reference count @@ -435,7 +459,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp) * (half of our buffers are used for sending messages) */ if (vrp->last_sbuf < vrp->num_bufs / 2) - ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; + ret = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++; /* or recycle a used one */ else ret = virtqueue_get_buf(vrp->svq, &len); @@ -561,7 +585,7 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, * messaging), or to improve the buffer allocator, to support * variable-length buffer sizes. */ - if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { + if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) { dev_err(dev, "message is too big (%d)\n", len); return -EMSGSIZE; } @@ -610,7 +634,7 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, msg, sizeof(*msg) + msg->len, true); #endif - sg_init_one(&sg, msg, sizeof(*msg) + len); + rpmsg_sg_init(&sg, msg, sizeof(*msg) + len); mutex_lock(&vrp->tx_lock); @@ -632,7 +656,6 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, mutex_unlock(&vrp->tx_lock); return err; } -EXPORT_SYMBOL(rpmsg_send_offchannel_raw); static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) { @@ -702,7 +725,7 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ - if (len > RPMSG_BUF_SIZE || + if (len > vrp->buf_size || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return -EINVAL; @@ -735,7 +758,7 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, dev_warn(dev, "msg received with no recipient\n"); /* publish the real size of the buffer */ - sg_init_one(&sg, msg, RPMSG_BUF_SIZE); + rpmsg_sg_init(&sg, msg, vrp->buf_size); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); @@ -892,7 +915,9 @@ static int rpmsg_probe(struct virtio_device *vdev) else vrp->num_bufs = MAX_RPMSG_NUM_BUFS; - total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; + vrp->buf_size = MAX_RPMSG_BUF_SIZE; + + total_buf_space = vrp->num_bufs * vrp->buf_size; /* allocate coherent memory for the buffers */ bufs_va = dma_alloc_coherent(vdev->dev.parent->parent, @@ -915,9 +940,9 @@ static int rpmsg_probe(struct virtio_device *vdev) /* set up the receive buffers */ for (i = 0; i < vrp->num_bufs / 2; i++) { struct scatterlist sg; - void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; + void *cpu_addr = vrp->rbufs + i * vrp->buf_size; - sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); + rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size); err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, GFP_KERNEL); @@ -982,7 +1007,7 @@ static int rpmsg_remove_device(struct device *dev, void *data) static void rpmsg_remove(struct virtio_device *vdev) { struct virtproc_info *vrp = vdev->priv; - size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; + size_t total_buf_space = vrp->num_bufs * vrp->buf_size; int ret; vdev->config->reset(vdev); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 72419ac2c52a..e0e58f3b1420 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -227,14 +227,14 @@ config RTC_DRV_AS3722 will be called rtc-as3722. config RTC_DRV_DS1307 - tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057" + tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057" help If you say yes here you get support for various compatible RTC chips (often with battery backup) connected with I2C. This driver - should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00, - EPSON RX-8025, Intersil ISL12057 and probably other chips. In some - cases the RTC must already have been initialized (by manufacturing or - a bootloader). + should handle DS1307, DS1337, DS1338, DS1339, DS1340, DS1341, + ST M41T00, EPSON RX-8025, Intersil ISL12057 and probably other chips. + In some cases the RTC must already have been initialized (by + manufacturing or a bootloader). The first seven registers on these chips hold an RTC, and other registers may add features such as NVRAM, a trickle charger for @@ -371,11 +371,11 @@ config RTC_DRV_MAX77686 will be called rtc-max77686. config RTC_DRV_RK808 - tristate "Rockchip RK808/RK818 RTC" + tristate "Rockchip RK805/RK808/RK818 RTC" depends on MFD_RK808 help If you say yes here you will get support for the - RTC of RK808 and RK818 PMIC. + RTC of RK805, RK808 and RK818 PMIC. This driver can also be built as a module. If so, the module will be called rk808-rtc. @@ -1765,6 +1765,14 @@ config RTC_DRV_CPCAP Say y here for CPCAP rtc found on some Motorola phones and tablets such as Droid 4. +config RTC_DRV_RTD119X + bool "Realtek RTD129x RTC" + depends on ARCH_REALTEK || COMPILE_TEST + default ARCH_REALTEK + help + If you say yes here, you get support for the RTD1295 SoC + Real Time Clock. + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME @@ -1780,5 +1788,13 @@ config RTC_DRV_HID_SENSOR_TIME If this driver is compiled as a module, it will be named rtc-hid-sensor-time. +config RTC_DRV_GOLDFISH + tristate "Goldfish Real Time Clock" + depends on MIPS && (GOLDFISH || COMPILE_TEST) + help + Say yes to enable RTC driver for the Goldfish based virtual platform. + + Goldfish is a code name for the virtual platform developed by Google + for Android emulation. endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index acd366b41c85..7230014c92af 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -131,6 +131,7 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o +obj-$(CONFIG_RTC_DRV_RTD119X) += rtc-rtd119x.o obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o obj-$(CONFIG_RTC_DRV_RV8803) += rtc-rv8803.o obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o @@ -170,3 +171,4 @@ obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o obj-$(CONFIG_RTC_DRV_ZYNQMP) += rtc-zynqmp.o +obj-$(CONFIG_RTC_DRV_GOLDFISH) += rtc-goldfish.o diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 794bc4fa4937..00efe24a6063 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -24,28 +24,19 @@ static dev_t rtc_devt; static int rtc_dev_open(struct inode *inode, struct file *file) { - int err; struct rtc_device *rtc = container_of(inode->i_cdev, struct rtc_device, char_dev); - const struct rtc_class_ops *ops = rtc->ops; if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; file->private_data = rtc; - err = ops->open ? ops->open(rtc->dev.parent) : 0; - if (err == 0) { - spin_lock_irq(&rtc->irq_lock); - rtc->irq_data = 0; - spin_unlock_irq(&rtc->irq_lock); + spin_lock_irq(&rtc->irq_lock); + rtc->irq_data = 0; + spin_unlock_irq(&rtc->irq_lock); - return 0; - } - - /* something has gone wrong */ - clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); - return err; + return 0; } #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL @@ -438,9 +429,6 @@ static int rtc_dev_release(struct inode *inode, struct file *file) rtc_update_irq_enable(rtc, 0); rtc_irq_set_state(rtc, NULL, 0); - if (rtc->ops->release) - rtc->ops->release(rtc->dev.parent); - clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return 0; } diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c index f225cd873ff6..97d8259b9494 100644 --- a/drivers/rtc/rtc-dm355evm.c +++ b/drivers/rtc/rtc-dm355evm.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4b43aa62fbc7..e7d9215c9201 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -39,6 +39,7 @@ enum ds_type { ds_1338, ds_1339, ds_1340, + ds_1341, ds_1388, ds_3231, m41t0, @@ -50,7 +51,6 @@ enum ds_type { /* rs5c372 too? different address... */ }; - /* RTC registers don't differ much, except for the century flag */ #define DS1307_REG_SECS 0x00 /* 00-59 */ # define DS1307_BIT_CH 0x80 @@ -113,11 +113,7 @@ enum ds_type { # define RX8025_BIT_VDET 0x40 # define RX8025_BIT_XST 0x20 - struct ds1307 { - u8 offset; /* register's offset */ - u8 regs[11]; - u16 nvram_offset; struct nvmem_config nvmem_cfg; enum ds_type type; unsigned long flags; @@ -126,7 +122,6 @@ struct ds1307 { struct device *dev; struct regmap *regmap; const char *name; - int irq; struct rtc_device *rtc; #ifdef CONFIG_COMMON_CLK struct clk_hw clks[2]; @@ -137,18 +132,47 @@ struct chip_desc { unsigned alarm:1; u16 nvram_offset; u16 nvram_size; + u8 offset; /* register's offset */ u8 century_reg; u8 century_enable_bit; u8 century_bit; + u8 bbsqi_bit; + irq_handler_t irq_handler; + const struct rtc_class_ops *rtc_ops; u16 trickle_charger_reg; - u8 trickle_charger_setup; - u8 (*do_trickle_setup)(struct ds1307 *, uint32_t, + u8 (*do_trickle_setup)(struct ds1307 *, u32, bool); }; -static u8 do_trickle_setup_ds1339(struct ds1307 *, uint32_t ohms, bool diode); +static int ds1307_get_time(struct device *dev, struct rtc_time *t); +static int ds1307_set_time(struct device *dev, struct rtc_time *t); +static u8 do_trickle_setup_ds1339(struct ds1307 *, u32 ohms, bool diode); +static irqreturn_t rx8130_irq(int irq, void *dev_id); +static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t); +static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t); +static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled); +static irqreturn_t mcp794xx_irq(int irq, void *dev_id); +static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t); +static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t); +static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled); -static struct chip_desc chips[last_ds_type] = { +static const struct rtc_class_ops rx8130_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = rx8130_read_alarm, + .set_alarm = rx8130_set_alarm, + .alarm_irq_enable = rx8130_alarm_irq_enable, +}; + +static const struct rtc_class_ops mcp794xx_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = mcp794xx_read_alarm, + .set_alarm = mcp794xx_set_alarm, + .alarm_irq_enable = mcp794xx_alarm_irq_enable, +}; + +static const struct chip_desc chips[last_ds_type] = { [ds_1307] = { .nvram_offset = 8, .nvram_size = 56, @@ -170,6 +194,7 @@ static struct chip_desc chips[last_ds_type] = { .alarm = 1, .century_reg = DS1307_REG_MONTH, .century_bit = DS1337_BIT_CENTURY, + .bbsqi_bit = DS1339_BIT_BBSQI, .trickle_charger_reg = 0x10, .do_trickle_setup = &do_trickle_setup_ds1339, }, @@ -179,25 +204,36 @@ static struct chip_desc chips[last_ds_type] = { .century_bit = DS1340_BIT_CENTURY, .trickle_charger_reg = 0x08, }, + [ds_1341] = { + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + }, [ds_1388] = { + .offset = 1, .trickle_charger_reg = 0x0a, }, [ds_3231] = { .alarm = 1, .century_reg = DS1307_REG_MONTH, .century_bit = DS1337_BIT_CENTURY, + .bbsqi_bit = DS3231_BIT_BBSQW, }, [rx_8130] = { .alarm = 1, /* this is battery backed SRAM */ .nvram_offset = 0x20, .nvram_size = 4, /* 32bit (4 word x 8 bit) */ + .offset = 0x10, + .irq_handler = rx8130_irq, + .rtc_ops = &rx8130_rtc_ops, }, [mcp794xx] = { .alarm = 1, /* this is battery backed SRAM */ .nvram_offset = 0x20, .nvram_size = 0x40, + .irq_handler = mcp794xx_irq, + .rtc_ops = &mcp794xx_rtc_ops, }, }; @@ -209,6 +245,7 @@ static const struct i2c_device_id ds1307_id[] = { { "ds1339", ds_1339 }, { "ds1388", ds_1388 }, { "ds1340", ds_1340 }, + { "ds1341", ds_1341 }, { "ds3231", ds_3231 }, { "m41t0", m41t0 }, { "m41t00", m41t00 }, @@ -252,6 +289,10 @@ static const struct of_device_id ds1307_of_match[] = { .compatible = "dallas,ds1340", .data = (void *)ds_1340 }, + { + .compatible = "dallas,ds1341", + .data = (void *)ds_1341 + }, { .compatible = "maxim,ds3231", .data = (void *)ds_3231 @@ -298,6 +339,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = { { .id = "DS1339", .driver_data = ds_1339 }, { .id = "DS1388", .driver_data = ds_1388 }, { .id = "DS1340", .driver_data = ds_1340 }, + { .id = "DS1341", .driver_data = ds_1341 }, { .id = "DS3231", .driver_data = ds_3231 }, { .id = "M41T0", .driver_data = m41t0 }, { .id = "M41T00", .driver_data = m41t00 }, @@ -352,34 +394,36 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) struct ds1307 *ds1307 = dev_get_drvdata(dev); int tmp, ret; const struct chip_desc *chip = &chips[ds1307->type]; + u8 regs[7]; /* read the RTC date and time registers all at once */ - ret = regmap_bulk_read(ds1307->regmap, ds1307->offset, ds1307->regs, 7); + ret = regmap_bulk_read(ds1307->regmap, chip->offset, regs, + sizeof(regs)); if (ret) { dev_err(dev, "%s error %d\n", "read", ret); return ret; } - dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs); + dev_dbg(dev, "%s: %7ph\n", "read", regs); /* if oscillator fail bit is set, no data can be trusted */ if (ds1307->type == m41t0 && - ds1307->regs[DS1307_REG_MIN] & M41T0_BIT_OF) { + regs[DS1307_REG_MIN] & M41T0_BIT_OF) { dev_warn_once(dev, "oscillator failed, set time!\n"); return -EINVAL; } - t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f); - t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f); - tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f; + t->tm_sec = bcd2bin(regs[DS1307_REG_SECS] & 0x7f); + t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f); + tmp = regs[DS1307_REG_HOUR] & 0x3f; t->tm_hour = bcd2bin(tmp); - t->tm_wday = bcd2bin(ds1307->regs[DS1307_REG_WDAY] & 0x07) - 1; - t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f); - tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f; + t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1; + t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f); + tmp = regs[DS1307_REG_MONTH] & 0x1f; t->tm_mon = bcd2bin(tmp) - 1; - t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100; + t->tm_year = bcd2bin(regs[DS1307_REG_YEAR]) + 100; - if (ds1307->regs[chip->century_reg] & chip->century_bit && + if (regs[chip->century_reg] & chip->century_bit && IS_ENABLED(CONFIG_RTC_DRV_DS1307_CENTURY)) t->tm_year += 100; @@ -399,7 +443,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) const struct chip_desc *chip = &chips[ds1307->type]; int result; int tmp; - u8 *buf = ds1307->regs; + u8 regs[7]; dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", @@ -418,35 +462,36 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) return -EINVAL; #endif - buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); - buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); - buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); - buf[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1); - buf[DS1307_REG_MDAY] = bin2bcd(t->tm_mday); - buf[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1); + regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec); + regs[DS1307_REG_MIN] = bin2bcd(t->tm_min); + regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); + regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1); + regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday); + regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1); /* assume 20YY not 19YY */ tmp = t->tm_year - 100; - buf[DS1307_REG_YEAR] = bin2bcd(tmp); + regs[DS1307_REG_YEAR] = bin2bcd(tmp); if (chip->century_enable_bit) - buf[chip->century_reg] |= chip->century_enable_bit; + regs[chip->century_reg] |= chip->century_enable_bit; if (t->tm_year > 199 && chip->century_bit) - buf[chip->century_reg] |= chip->century_bit; + regs[chip->century_reg] |= chip->century_bit; if (ds1307->type == mcp794xx) { /* * these bits were cleared when preparing the date/time * values and need to be set again before writing the - * buffer out to the device. + * regsfer out to the device. */ - buf[DS1307_REG_SECS] |= MCP794XX_BIT_ST; - buf[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN; + regs[DS1307_REG_SECS] |= MCP794XX_BIT_ST; + regs[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN; } - dev_dbg(dev, "%s: %7ph\n", "write", buf); + dev_dbg(dev, "%s: %7ph\n", "write", regs); - result = regmap_bulk_write(ds1307->regmap, ds1307->offset, buf, 7); + result = regmap_bulk_write(ds1307->regmap, chip->offset, regs, + sizeof(regs)); if (result) { dev_err(dev, "%s error %d\n", "write", result); return result; @@ -458,33 +503,34 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); int ret; + u8 regs[9]; if (!test_bit(HAS_ALARM, &ds1307->flags)) return -EINVAL; /* read all ALARM1, ALARM2, and status registers at once */ ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, - ds1307->regs, 9); + regs, sizeof(regs)); if (ret) { dev_err(dev, "%s error %d\n", "alarm read", ret); return ret; } dev_dbg(dev, "%s: %4ph, %3ph, %2ph\n", "alarm read", - &ds1307->regs[0], &ds1307->regs[4], &ds1307->regs[7]); + ®s[0], ®s[4], ®s[7]); /* * report alarm time (ALARM1); assume 24 hour and day-of-month modes, * and that all four fields are checked matches */ - t->time.tm_sec = bcd2bin(ds1307->regs[0] & 0x7f); - t->time.tm_min = bcd2bin(ds1307->regs[1] & 0x7f); - t->time.tm_hour = bcd2bin(ds1307->regs[2] & 0x3f); - t->time.tm_mday = bcd2bin(ds1307->regs[3] & 0x3f); + t->time.tm_sec = bcd2bin(regs[0] & 0x7f); + t->time.tm_min = bcd2bin(regs[1] & 0x7f); + t->time.tm_hour = bcd2bin(regs[2] & 0x3f); + t->time.tm_mday = bcd2bin(regs[3] & 0x3f); /* ... and status */ - t->enabled = !!(ds1307->regs[7] & DS1337_BIT_A1IE); - t->pending = !!(ds1307->regs[8] & DS1337_BIT_A1I); + t->enabled = !!(regs[7] & DS1337_BIT_A1IE); + t->pending = !!(regs[8] & DS1337_BIT_A1I); dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, enabled=%d, pending=%d\n", @@ -498,7 +544,7 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); - unsigned char *buf = ds1307->regs; + unsigned char regs[9]; u8 control, status; int ret; @@ -512,33 +558,35 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* read current status of both alarms and the chip */ - ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9); + ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, regs, + sizeof(regs)); if (ret) { dev_err(dev, "%s error %d\n", "alarm write", ret); return ret; } - control = ds1307->regs[7]; - status = ds1307->regs[8]; + control = regs[7]; + status = regs[8]; dev_dbg(dev, "%s: %4ph, %3ph, %02x %02x\n", "alarm set (old status)", - &ds1307->regs[0], &ds1307->regs[4], control, status); + ®s[0], ®s[4], control, status); /* set ALARM1, using 24 hour and day-of-month modes */ - buf[0] = bin2bcd(t->time.tm_sec); - buf[1] = bin2bcd(t->time.tm_min); - buf[2] = bin2bcd(t->time.tm_hour); - buf[3] = bin2bcd(t->time.tm_mday); + regs[0] = bin2bcd(t->time.tm_sec); + regs[1] = bin2bcd(t->time.tm_min); + regs[2] = bin2bcd(t->time.tm_hour); + regs[3] = bin2bcd(t->time.tm_mday); /* set ALARM2 to non-garbage */ - buf[4] = 0; - buf[5] = 0; - buf[6] = 0; + regs[4] = 0; + regs[5] = 0; + regs[6] = 0; /* disable alarms */ - buf[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE); - buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); + regs[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE); + regs[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); - ret = regmap_bulk_write(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9); + ret = regmap_bulk_write(ds1307->regmap, DS1339_REG_ALARM1_SECS, regs, + sizeof(regs)); if (ret) { dev_err(dev, "can't set alarm time\n"); return ret; @@ -547,8 +595,8 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) /* optionally enable ALARM1 */ if (t->enabled) { dev_dbg(dev, "alarm IRQ armed\n"); - buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */ - regmap_write(ds1307->regmap, DS1337_REG_CONTROL, buf[7]); + regs[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */ + regmap_write(ds1307->regmap, DS1337_REG_CONTROL, regs[7]); } return 0; @@ -584,11 +632,11 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { #define RX8130_REG_ALARM_HOUR 0x08 #define RX8130_REG_ALARM_WEEK_OR_DAY 0x09 #define RX8130_REG_EXTENSION 0x0c -#define RX8130_REG_EXTENSION_WADA (1 << 3) +#define RX8130_REG_EXTENSION_WADA BIT(3) #define RX8130_REG_FLAG 0x0d -#define RX8130_REG_FLAG_AF (1 << 3) +#define RX8130_REG_FLAG_AF BIT(3) #define RX8130_REG_CONTROL0 0x0e -#define RX8130_REG_CONTROL0_AIE (1 << 3) +#define RX8130_REG_CONTROL0_AIE BIT(3) static irqreturn_t rx8130_irq(int irq, void *dev_id) { @@ -600,7 +648,8 @@ static irqreturn_t rx8130_irq(int irq, void *dev_id) mutex_lock(lock); /* Read control registers. */ - ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); if (ret < 0) goto out; if (!(ctl[1] & RX8130_REG_FLAG_AF)) @@ -608,7 +657,8 @@ static irqreturn_t rx8130_irq(int irq, void *dev_id) ctl[1] &= ~RX8130_REG_FLAG_AF; ctl[2] &= ~RX8130_REG_CONTROL0_AIE; - ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); if (ret < 0) goto out; @@ -630,12 +680,14 @@ static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t) return -EINVAL; /* Read alarm registers. */ - ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3); + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, + sizeof(ald)); if (ret < 0) return ret; /* Read control registers. */ - ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); if (ret < 0) return ret; @@ -676,7 +728,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* Read control registers. */ - ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); if (ret < 0) return ret; @@ -684,7 +737,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) ctl[1] |= RX8130_REG_FLAG_AF; ctl[2] &= ~RX8130_REG_CONTROL0_AIE; - ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); if (ret < 0) return ret; @@ -693,7 +747,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) ald[1] = bin2bcd(t->time.tm_hour); ald[2] = bin2bcd(t->time.tm_mday); - ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3); + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, + sizeof(ald)); if (ret < 0) return ret; @@ -702,7 +757,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) ctl[2] |= RX8130_REG_CONTROL0_AIE; - return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, + sizeof(ctl)); } static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) @@ -725,14 +781,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, reg); } -static const struct rtc_class_ops rx8130_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = rx8130_read_alarm, - .set_alarm = rx8130_set_alarm, - .alarm_irq_enable = rx8130_alarm_irq_enable, -}; - /*----------------------------------------------------------------------*/ /* @@ -748,11 +796,11 @@ static const struct rtc_class_ops rx8130_rtc_ops = { #define MCP794XX_REG_ALARM0_CTRL 0x0d #define MCP794XX_REG_ALARM1_BASE 0x11 #define MCP794XX_REG_ALARM1_CTRL 0x14 -# define MCP794XX_BIT_ALMX_IF (1 << 3) -# define MCP794XX_BIT_ALMX_C0 (1 << 4) -# define MCP794XX_BIT_ALMX_C1 (1 << 5) -# define MCP794XX_BIT_ALMX_C2 (1 << 6) -# define MCP794XX_BIT_ALMX_POL (1 << 7) +# define MCP794XX_BIT_ALMX_IF BIT(3) +# define MCP794XX_BIT_ALMX_C0 BIT(4) +# define MCP794XX_BIT_ALMX_C1 BIT(5) +# define MCP794XX_BIT_ALMX_C2 BIT(6) +# define MCP794XX_BIT_ALMX_POL BIT(7) # define MCP794XX_MSK_ALMX_MATCH (MCP794XX_BIT_ALMX_C0 | \ MCP794XX_BIT_ALMX_C1 | \ MCP794XX_BIT_ALMX_C2) @@ -793,37 +841,38 @@ static irqreturn_t mcp794xx_irq(int irq, void *dev_id) static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); - u8 *regs = ds1307->regs; + u8 regs[10]; int ret; if (!test_bit(HAS_ALARM, &ds1307->flags)) return -EINVAL; /* Read control and alarm 0 registers. */ - ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); if (ret) return ret; t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN); /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ - t->time.tm_sec = bcd2bin(ds1307->regs[3] & 0x7f); - t->time.tm_min = bcd2bin(ds1307->regs[4] & 0x7f); - t->time.tm_hour = bcd2bin(ds1307->regs[5] & 0x3f); - t->time.tm_wday = bcd2bin(ds1307->regs[6] & 0x7) - 1; - t->time.tm_mday = bcd2bin(ds1307->regs[7] & 0x3f); - t->time.tm_mon = bcd2bin(ds1307->regs[8] & 0x1f) - 1; + t->time.tm_sec = bcd2bin(regs[3] & 0x7f); + t->time.tm_min = bcd2bin(regs[4] & 0x7f); + t->time.tm_hour = bcd2bin(regs[5] & 0x3f); + t->time.tm_wday = bcd2bin(regs[6] & 0x7) - 1; + t->time.tm_mday = bcd2bin(regs[7] & 0x3f); + t->time.tm_mon = bcd2bin(regs[8] & 0x1f) - 1; t->time.tm_year = -1; t->time.tm_yday = -1; t->time.tm_isdst = -1; dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " - "enabled=%d polarity=%d irq=%d match=%d\n", __func__, + "enabled=%d polarity=%d irq=%d match=%lu\n", __func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour, t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled, - !!(ds1307->regs[6] & MCP794XX_BIT_ALMX_POL), - !!(ds1307->regs[6] & MCP794XX_BIT_ALMX_IF), - (ds1307->regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4); + !!(regs[6] & MCP794XX_BIT_ALMX_POL), + !!(regs[6] & MCP794XX_BIT_ALMX_IF), + (regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4); return 0; } @@ -831,7 +880,7 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); - unsigned char *regs = ds1307->regs; + unsigned char regs[10]; int ret; if (!test_bit(HAS_ALARM, &ds1307->flags)) @@ -844,7 +893,8 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* Read control and alarm 0 registers. */ - ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); if (ret) return ret; @@ -863,7 +913,8 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) /* Disable interrupt. We will not enable until completely programmed */ regs[0] &= ~MCP794XX_BIT_ALM0_EN; - ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); if (ret) return ret; @@ -885,22 +936,15 @@ static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled) enabled ? MCP794XX_BIT_ALM0_EN : 0); } -static const struct rtc_class_ops mcp794xx_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = mcp794xx_read_alarm, - .set_alarm = mcp794xx_set_alarm, - .alarm_irq_enable = mcp794xx_alarm_irq_enable, -}; - /*----------------------------------------------------------------------*/ static int ds1307_nvram_read(void *priv, unsigned int offset, void *val, size_t bytes) { struct ds1307 *ds1307 = priv; + const struct chip_desc *chip = &chips[ds1307->type]; - return regmap_bulk_read(ds1307->regmap, ds1307->nvram_offset + offset, + return regmap_bulk_read(ds1307->regmap, chip->nvram_offset + offset, val, bytes); } @@ -908,15 +952,16 @@ static int ds1307_nvram_write(void *priv, unsigned int offset, void *val, size_t bytes) { struct ds1307 *ds1307 = priv; + const struct chip_desc *chip = &chips[ds1307->type]; - return regmap_bulk_write(ds1307->regmap, ds1307->nvram_offset + offset, + return regmap_bulk_write(ds1307->regmap, chip->nvram_offset + offset, val, bytes); } /*----------------------------------------------------------------------*/ static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, - uint32_t ohms, bool diode) + u32 ohms, bool diode) { u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE : DS1307_TRICKLE_CHARGER_NO_DIODE; @@ -939,23 +984,23 @@ static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, return setup; } -static void ds1307_trickle_init(struct ds1307 *ds1307, - struct chip_desc *chip) +static u8 ds1307_trickle_init(struct ds1307 *ds1307, + const struct chip_desc *chip) { - uint32_t ohms = 0; + u32 ohms; bool diode = true; if (!chip->do_trickle_setup) - goto out; + return 0; + if (device_property_read_u32(ds1307->dev, "trickle-resistor-ohms", &ohms)) - goto out; + return 0; + if (device_property_read_bool(ds1307->dev, "trickle-diode-disable")) diode = false; - chip->trickle_charger_setup = chip->do_trickle_setup(ds1307, - ohms, diode); -out: - return; + + return chip->do_trickle_setup(ds1307, ohms, diode); } /*----------------------------------------------------------------------*/ @@ -995,7 +1040,7 @@ static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC) } static ssize_t ds3231_hwmon_show_temp(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { int ret; s32 temp; @@ -1006,8 +1051,8 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev, return sprintf(buf, "%d\n", temp); } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ds3231_hwmon_show_temp, - NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_input, 0444, ds3231_hwmon_show_temp, + NULL, 0); static struct attribute *ds3231_hwmon_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, @@ -1023,7 +1068,8 @@ static void ds1307_hwmon_register(struct ds1307 *ds1307) return; dev = devm_hwmon_device_register_with_groups(ds1307->dev, ds1307->name, - ds1307, ds3231_hwmon_groups); + ds1307, + ds3231_hwmon_groups); if (IS_ERR(dev)) { dev_warn(ds1307->dev, "unable to register hwmon device %ld\n", PTR_ERR(dev)); @@ -1095,7 +1141,7 @@ static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw, } static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) + unsigned long *prate) { int i; @@ -1108,7 +1154,7 @@ static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate, } static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) + unsigned long parent_rate) { struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw); int control = 0; @@ -1168,7 +1214,7 @@ static const struct clk_ops ds3231_clk_sqw_ops = { }; static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) + unsigned long parent_rate) { return 32768; } @@ -1259,7 +1305,7 @@ static int ds3231_clks_register(struct ds1307 *ds1307) /* optional override of the clockname */ of_property_read_string_index(node, "clock-output-names", i, - &init.name); + &init.name); ds1307->clks[i].init = &init; onecell->clks[i] = devm_clk_register(ds1307->dev, @@ -1309,22 +1355,14 @@ static int ds1307_probe(struct i2c_client *client, struct ds1307 *ds1307; int err = -ENODEV; int tmp, wday; - struct chip_desc *chip; - bool want_irq = false; + const struct chip_desc *chip; + bool want_irq; bool ds1307_can_wakeup_device = false; - unsigned char *buf; + unsigned char regs[8]; struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev); struct rtc_time tm; unsigned long timestamp; - - irq_handler_t irq_handler = ds1307_irq; - - static const int bbsqi_bitpos[] = { - [ds_1337] = 0, - [ds_1339] = DS1339_BIT_BBSQI, - [ds_3231] = DS3231_BIT_BBSQW, - }; - const struct rtc_class_ops *rtc_ops = &ds13xx_rtc_ops; + u8 trickle_charger_setup = 0; ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL); if (!ds1307) @@ -1333,7 +1371,6 @@ static int ds1307_probe(struct i2c_client *client, dev_set_drvdata(&client->dev, ds1307); ds1307->dev = &client->dev; ds1307->name = client->name; - ds1307->irq = client->irq; ds1307->regmap = devm_regmap_init_i2c(client, ®map_config); if (IS_ERR(ds1307->regmap)) { @@ -1361,23 +1398,22 @@ static int ds1307_probe(struct i2c_client *client, ds1307->type = acpi_id->driver_data; } - if (!pdata) - ds1307_trickle_init(ds1307, chip); - else if (pdata->trickle_charger_setup) - chip->trickle_charger_setup = pdata->trickle_charger_setup; + want_irq = client->irq > 0 && chip->alarm; - if (chip->trickle_charger_setup && chip->trickle_charger_reg) { + if (!pdata) + trickle_charger_setup = ds1307_trickle_init(ds1307, chip); + else if (pdata->trickle_charger_setup) + trickle_charger_setup = pdata->trickle_charger_setup; + + if (trickle_charger_setup && chip->trickle_charger_reg) { + trickle_charger_setup |= DS13XX_TRICKLE_CHARGER_MAGIC; dev_dbg(ds1307->dev, "writing trickle charger info 0x%x to 0x%x\n", - DS13XX_TRICKLE_CHARGER_MAGIC | chip->trickle_charger_setup, - chip->trickle_charger_reg); + trickle_charger_setup, chip->trickle_charger_reg); regmap_write(ds1307->regmap, chip->trickle_charger_reg, - DS13XX_TRICKLE_CHARGER_MAGIC | - chip->trickle_charger_setup); + trickle_charger_setup); } - buf = ds1307->regs; - #ifdef CONFIG_OF /* * For devices with no IRQ directly connected to the SoC, the RTC chip @@ -1387,31 +1423,27 @@ static int ds1307_probe(struct i2c_client *client, * This will guarantee the 'wakealarm' sysfs entry is available on the device, * if supported by the RTC. */ - if (of_property_read_bool(client->dev.of_node, "wakeup-source")) { + if (chip->alarm && of_property_read_bool(client->dev.of_node, + "wakeup-source")) ds1307_can_wakeup_device = true; - } - /* Intersil ISL12057 DT backward compatibility */ - if (of_property_read_bool(client->dev.of_node, - "isil,irq2-can-wakeup-machine")) { - ds1307_can_wakeup_device = true; - } #endif switch (ds1307->type) { case ds_1337: case ds_1339: + case ds_1341: case ds_3231: /* get registers that the "rtc" read below won't read... */ err = regmap_bulk_read(ds1307->regmap, DS1337_REG_CONTROL, - buf, 2); + regs, 2); if (err) { dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } /* oscillator off? turn it on, so clock can tick. */ - if (ds1307->regs[0] & DS1337_BIT_nEOSC) - ds1307->regs[0] &= ~DS1337_BIT_nEOSC; + if (regs[0] & DS1337_BIT_nEOSC) + regs[0] &= ~DS1337_BIT_nEOSC; /* * Using IRQ or defined as wakeup-source? @@ -1419,114 +1451,92 @@ static int ds1307_probe(struct i2c_client *client, * For some variants, be sure alarms can trigger when we're * running on Vbackup (BBSQI/BBSQW) */ - if (chip->alarm && (ds1307->irq > 0 || - ds1307_can_wakeup_device)) { - ds1307->regs[0] |= DS1337_BIT_INTCN - | bbsqi_bitpos[ds1307->type]; - ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); - - want_irq = true; + if (want_irq || ds1307_can_wakeup_device) { + regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit; + regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); } regmap_write(ds1307->regmap, DS1337_REG_CONTROL, - ds1307->regs[0]); + regs[0]); /* oscillator fault? clear flag, and warn */ - if (ds1307->regs[1] & DS1337_BIT_OSF) { + if (regs[1] & DS1337_BIT_OSF) { regmap_write(ds1307->regmap, DS1337_REG_STATUS, - ds1307->regs[1] & ~DS1337_BIT_OSF); + regs[1] & ~DS1337_BIT_OSF); dev_warn(ds1307->dev, "SET TIME!\n"); } break; case rx_8025: err = regmap_bulk_read(ds1307->regmap, - RX8025_REG_CTRL1 << 4 | 0x08, buf, 2); + RX8025_REG_CTRL1 << 4 | 0x08, regs, 2); if (err) { dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } /* oscillator off? turn it on, so clock can tick. */ - if (!(ds1307->regs[1] & RX8025_BIT_XST)) { - ds1307->regs[1] |= RX8025_BIT_XST; + if (!(regs[1] & RX8025_BIT_XST)) { + regs[1] |= RX8025_BIT_XST; regmap_write(ds1307->regmap, RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); + regs[1]); dev_warn(ds1307->dev, "oscillator stop detected - SET TIME!\n"); } - if (ds1307->regs[1] & RX8025_BIT_PON) { - ds1307->regs[1] &= ~RX8025_BIT_PON; + if (regs[1] & RX8025_BIT_PON) { + regs[1] &= ~RX8025_BIT_PON; regmap_write(ds1307->regmap, RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); + regs[1]); dev_warn(ds1307->dev, "power-on detected\n"); } - if (ds1307->regs[1] & RX8025_BIT_VDET) { - ds1307->regs[1] &= ~RX8025_BIT_VDET; + if (regs[1] & RX8025_BIT_VDET) { + regs[1] &= ~RX8025_BIT_VDET; regmap_write(ds1307->regmap, RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); + regs[1]); dev_warn(ds1307->dev, "voltage drop detected\n"); } /* make sure we are running in 24hour mode */ - if (!(ds1307->regs[0] & RX8025_BIT_2412)) { + if (!(regs[0] & RX8025_BIT_2412)) { u8 hour; /* switch to 24 hour mode */ regmap_write(ds1307->regmap, RX8025_REG_CTRL1 << 4 | 0x08, - ds1307->regs[0] | RX8025_BIT_2412); + regs[0] | RX8025_BIT_2412); err = regmap_bulk_read(ds1307->regmap, RX8025_REG_CTRL1 << 4 | 0x08, - buf, 2); + regs, 2); if (err) { dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } /* correct hour */ - hour = bcd2bin(ds1307->regs[DS1307_REG_HOUR]); + hour = bcd2bin(regs[DS1307_REG_HOUR]); if (hour == 12) hour = 0; - if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) + if (regs[DS1307_REG_HOUR] & DS1307_BIT_PM) hour += 12; regmap_write(ds1307->regmap, DS1307_REG_HOUR << 4 | 0x08, hour); } break; - case rx_8130: - ds1307->offset = 0x10; /* Seconds starts at 0x10 */ - rtc_ops = &rx8130_rtc_ops; - if (chip->alarm && ds1307->irq > 0) { - irq_handler = rx8130_irq; - want_irq = true; - } - break; - case ds_1388: - ds1307->offset = 1; /* Seconds starts at 1 */ - break; - case mcp794xx: - rtc_ops = &mcp794xx_rtc_ops; - if (chip->alarm && (ds1307->irq > 0 || - ds1307_can_wakeup_device)) { - irq_handler = mcp794xx_irq; - want_irq = true; - } - break; default: break; } read_rtc: /* read RTC registers */ - err = regmap_bulk_read(ds1307->regmap, ds1307->offset, buf, 8); + err = regmap_bulk_read(ds1307->regmap, chip->offset, regs, + sizeof(regs)); if (err) { dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; @@ -1537,7 +1547,7 @@ static int ds1307_probe(struct i2c_client *client, * specify the extra bits as must-be-zero, but there are * still a few values that are clearly out-of-range. */ - tmp = ds1307->regs[DS1307_REG_SECS]; + tmp = regs[DS1307_REG_SECS]; switch (ds1307->type) { case ds_1307: case m41t0: @@ -1556,10 +1566,10 @@ static int ds1307_probe(struct i2c_client *client, regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); /* oscillator fault? clear flag, and warn */ - if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) { + if (regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) { regmap_write(ds1307->regmap, DS1307_REG_CONTROL, - ds1307->regs[DS1307_REG_CONTROL] & - ~DS1338_BIT_OSF); + regs[DS1307_REG_CONTROL] & + ~DS1338_BIT_OSF); dev_warn(ds1307->dev, "SET TIME!\n"); goto read_rtc; } @@ -1583,9 +1593,9 @@ static int ds1307_probe(struct i2c_client *client, break; case mcp794xx: /* make sure that the backup battery is enabled */ - if (!(ds1307->regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) { + if (!(regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) { regmap_write(ds1307->regmap, DS1307_REG_WDAY, - ds1307->regs[DS1307_REG_WDAY] | + regs[DS1307_REG_WDAY] | MCP794XX_BIT_VBATEN); } @@ -1602,7 +1612,7 @@ static int ds1307_probe(struct i2c_client *client, break; } - tmp = ds1307->regs[DS1307_REG_HOUR]; + tmp = regs[DS1307_REG_HOUR]; switch (ds1307->type) { case ds_1340: case m41t0: @@ -1625,9 +1635,9 @@ static int ds1307_probe(struct i2c_client *client, tmp = bcd2bin(tmp & 0x1f); if (tmp == 12) tmp = 0; - if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) + if (regs[DS1307_REG_HOUR] & DS1307_BIT_PM) tmp += 12; - regmap_write(ds1307->regmap, ds1307->offset + DS1307_REG_HOUR, + regmap_write(ds1307->regmap, chip->offset + DS1307_REG_HOUR, bin2bcd(tmp)); } @@ -1650,19 +1660,16 @@ static int ds1307_probe(struct i2c_client *client, MCP794XX_REG_WEEKDAY_WDAY_MASK, tm.tm_wday + 1); - if (want_irq) { + if (want_irq || ds1307_can_wakeup_device) { device_set_wakeup_capable(ds1307->dev, true); set_bit(HAS_ALARM, &ds1307->flags); } ds1307->rtc = devm_rtc_allocate_device(ds1307->dev); - if (IS_ERR(ds1307->rtc)) { + if (IS_ERR(ds1307->rtc)) return PTR_ERR(ds1307->rtc); - } - if (ds1307_can_wakeup_device && ds1307->irq <= 0) { - /* Disable request for an IRQ */ - want_irq = false; + if (ds1307_can_wakeup_device && !want_irq) { dev_info(ds1307->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n"); /* We cannot support UIE mode if we do not have an IRQ line */ @@ -1670,8 +1677,8 @@ static int ds1307_probe(struct i2c_client *client, } if (want_irq) { - err = devm_request_threaded_irq(ds1307->dev, - ds1307->irq, NULL, irq_handler, + err = devm_request_threaded_irq(ds1307->dev, client->irq, NULL, + chip->irq_handler ?: ds1307_irq, IRQF_SHARED | IRQF_ONESHOT, ds1307->name, ds1307); if (err) { @@ -1679,8 +1686,9 @@ static int ds1307_probe(struct i2c_client *client, device_set_wakeup_capable(ds1307->dev, false); clear_bit(HAS_ALARM, &ds1307->flags); dev_err(ds1307->dev, "unable to request IRQ!\n"); - } else + } else { dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq); + } } if (chip->nvram_size) { @@ -1691,13 +1699,12 @@ static int ds1307_probe(struct i2c_client *client, ds1307->nvmem_cfg.reg_read = ds1307_nvram_read; ds1307->nvmem_cfg.reg_write = ds1307_nvram_write; ds1307->nvmem_cfg.priv = ds1307; - ds1307->nvram_offset = chip->nvram_offset; ds1307->rtc->nvmem_config = &ds1307->nvmem_cfg; ds1307->rtc->nvram_old_abi = true; } - ds1307->rtc->ops = rtc_ops; + ds1307->rtc->ops = chip->rtc_ops ?: &ds13xx_rtc_ops; err = rtc_register_device(ds1307->rtc); if (err) return err; diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 7bf46bfe11a4..9caaccccaa57 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c @@ -190,7 +190,7 @@ static int ds1672_probe(struct i2c_client *client, return 0; } -static struct i2c_device_id ds1672_id[] = { +static const struct i2c_device_id ds1672_id[] = { { "ds1672", 0 }, { } }; diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c index 4f4930a2004c..b0ef8cfe742d 100644 --- a/drivers/rtc/rtc-em3027.c +++ b/drivers/rtc/rtc-em3027.c @@ -132,7 +132,7 @@ static int em3027_probe(struct i2c_client *client, return 0; } -static struct i2c_device_id em3027_id[] = { +static const struct i2c_device_id em3027_id[] = { { "em3027", 0 }, { } }; diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c new file mode 100644 index 000000000000..d67769265185 --- /dev/null +++ b/drivers/rtc/rtc-goldfish.c @@ -0,0 +1,237 @@ +/* drivers/rtc/rtc-goldfish.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (C) 2017 Imagination Technologies Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#define TIMER_TIME_LOW 0x00 /* get low bits of current time */ + /* and update TIMER_TIME_HIGH */ +#define TIMER_TIME_HIGH 0x04 /* get high bits of time at last */ + /* TIMER_TIME_LOW read */ +#define TIMER_ALARM_LOW 0x08 /* set low bits of alarm and */ + /* activate it */ +#define TIMER_ALARM_HIGH 0x0c /* set high bits of next alarm */ +#define TIMER_IRQ_ENABLED 0x10 +#define TIMER_CLEAR_ALARM 0x14 +#define TIMER_ALARM_STATUS 0x18 +#define TIMER_CLEAR_INTERRUPT 0x1c + +struct goldfish_rtc { + void __iomem *base; + int irq; + struct rtc_device *rtc; +}; + +static int goldfish_rtc_read_alarm(struct device *dev, + struct rtc_wkalrm *alrm) +{ + u64 rtc_alarm; + u64 rtc_alarm_low; + u64 rtc_alarm_high; + void __iomem *base; + struct goldfish_rtc *rtcdrv; + + rtcdrv = dev_get_drvdata(dev); + base = rtcdrv->base; + + rtc_alarm_low = readl(base + TIMER_ALARM_LOW); + rtc_alarm_high = readl(base + TIMER_ALARM_HIGH); + rtc_alarm = (rtc_alarm_high << 32) | rtc_alarm_low; + + do_div(rtc_alarm, NSEC_PER_SEC); + memset(alrm, 0, sizeof(struct rtc_wkalrm)); + + rtc_time_to_tm(rtc_alarm, &alrm->time); + + if (readl(base + TIMER_ALARM_STATUS)) + alrm->enabled = 1; + else + alrm->enabled = 0; + + return 0; +} + +static int goldfish_rtc_set_alarm(struct device *dev, + struct rtc_wkalrm *alrm) +{ + struct goldfish_rtc *rtcdrv; + unsigned long rtc_alarm; + u64 rtc_alarm64; + u64 rtc_status_reg; + void __iomem *base; + int ret = 0; + + rtcdrv = dev_get_drvdata(dev); + base = rtcdrv->base; + + if (alrm->enabled) { + ret = rtc_tm_to_time(&alrm->time, &rtc_alarm); + if (ret != 0) + return ret; + + rtc_alarm64 = rtc_alarm * NSEC_PER_SEC; + writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); + writel(rtc_alarm64, base + TIMER_ALARM_LOW); + } else { + /* + * if this function was called with enabled=0 + * then it could mean that the application is + * trying to cancel an ongoing alarm + */ + rtc_status_reg = readl(base + TIMER_ALARM_STATUS); + if (rtc_status_reg) + writel(1, base + TIMER_CLEAR_ALARM); + } + + return ret; +} + +static int goldfish_rtc_alarm_irq_enable(struct device *dev, + unsigned int enabled) +{ + void __iomem *base; + struct goldfish_rtc *rtcdrv; + + rtcdrv = dev_get_drvdata(dev); + base = rtcdrv->base; + + if (enabled) + writel(1, base + TIMER_IRQ_ENABLED); + else + writel(0, base + TIMER_IRQ_ENABLED); + + return 0; +} + +static irqreturn_t goldfish_rtc_interrupt(int irq, void *dev_id) +{ + struct goldfish_rtc *rtcdrv = dev_id; + void __iomem *base = rtcdrv->base; + + writel(1, base + TIMER_CLEAR_INTERRUPT); + + rtc_update_irq(rtcdrv->rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int goldfish_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct goldfish_rtc *rtcdrv; + void __iomem *base; + u64 time_high; + u64 time_low; + u64 time; + + rtcdrv = dev_get_drvdata(dev); + base = rtcdrv->base; + + time_low = readl(base + TIMER_TIME_LOW); + time_high = readl(base + TIMER_TIME_HIGH); + time = (time_high << 32) | time_low; + + do_div(time, NSEC_PER_SEC); + + rtc_time_to_tm(time, tm); + + return 0; +} + +static int goldfish_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct goldfish_rtc *rtcdrv; + void __iomem *base; + unsigned long now; + u64 now64; + int ret; + + rtcdrv = dev_get_drvdata(dev); + base = rtcdrv->base; + + ret = rtc_tm_to_time(tm, &now); + if (ret == 0) { + now64 = now * NSEC_PER_SEC; + writel((now64 >> 32), base + TIMER_TIME_HIGH); + writel(now64, base + TIMER_TIME_LOW); + } + + return ret; +} + +static const struct rtc_class_ops goldfish_rtc_ops = { + .read_time = goldfish_rtc_read_time, + .set_time = goldfish_rtc_set_time, + .read_alarm = goldfish_rtc_read_alarm, + .set_alarm = goldfish_rtc_set_alarm, + .alarm_irq_enable = goldfish_rtc_alarm_irq_enable +}; + +static int goldfish_rtc_probe(struct platform_device *pdev) +{ + struct goldfish_rtc *rtcdrv; + struct resource *r; + int err; + + rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL); + if (!rtcdrv) + return -ENOMEM; + + platform_set_drvdata(pdev, rtcdrv); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + + rtcdrv->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(rtcdrv->base)) + return -ENODEV; + + rtcdrv->irq = platform_get_irq(pdev, 0); + if (rtcdrv->irq < 0) + return -ENODEV; + + rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &goldfish_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtcdrv->rtc)) + return PTR_ERR(rtcdrv->rtc); + + err = devm_request_irq(&pdev->dev, rtcdrv->irq, + goldfish_rtc_interrupt, + 0, pdev->name, rtcdrv); + if (err) + return err; + + return 0; +} + +static const struct of_device_id goldfish_rtc_of_match[] = { + { .compatible = "google,goldfish-rtc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_rtc_of_match); + +static struct platform_driver goldfish_rtc = { + .probe = goldfish_rtc_probe, + .driver = { + .name = "goldfish_rtc", + .of_match_table = goldfish_rtc_of_match, + } +}; + +module_platform_driver(goldfish_rtc); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 8940e9e43ea0..f4c070ea8384 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -440,28 +440,6 @@ static int m41t80_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume); -static ssize_t flags_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct i2c_client *client = to_i2c_client(dev); - int val; - - val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS); - if (val < 0) - return val; - return sprintf(buf, "%#x\n", val); -} -static DEVICE_ATTR_RO(flags); - -static struct attribute *attrs[] = { - &dev_attr_flags.attr, - NULL, -}; - -static struct attribute_group attr_group = { - .attrs = attrs, -}; - #ifdef CONFIG_COMMON_CLK #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) @@ -912,13 +890,6 @@ static struct notifier_block wdt_notifier = { ***************************************************************************** */ -static void m41t80_remove_sysfs_group(void *_dev) -{ - struct device *dev = _dev; - - sysfs_remove_group(&dev->kobj, &attr_group); -} - static int m41t80_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -927,6 +898,7 @@ static int m41t80_probe(struct i2c_client *client, struct rtc_device *rtc = NULL; struct rtc_time tm; struct m41t80_data *m41t80_data = NULL; + bool wakeup_source = false; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_BYTE_DATA)) { @@ -947,6 +919,10 @@ static int m41t80_probe(struct i2c_client *client, m41t80_data->features = id->driver_data; i2c_set_clientdata(client, m41t80_data); +#ifdef CONFIG_OF + wakeup_source = of_property_read_bool(client->dev.of_node, + "wakeup-source"); +#endif if (client->irq > 0) { rc = devm_request_threaded_irq(&client->dev, client->irq, NULL, m41t80_handle_irq, @@ -955,14 +931,16 @@ static int m41t80_probe(struct i2c_client *client, if (rc) { dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); client->irq = 0; - } else { - m41t80_rtc_ops.read_alarm = m41t80_read_alarm; - m41t80_rtc_ops.set_alarm = m41t80_set_alarm; - m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable; - /* Enable the wakealarm */ - device_init_wakeup(&client->dev, true); + wakeup_source = false; } } + if (client->irq > 0 || wakeup_source) { + m41t80_rtc_ops.read_alarm = m41t80_read_alarm; + m41t80_rtc_ops.set_alarm = m41t80_set_alarm; + m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable; + /* Enable the wakealarm */ + device_init_wakeup(&client->dev, true); + } rtc = devm_rtc_device_register(&client->dev, client->name, &m41t80_rtc_ops, THIS_MODULE); @@ -970,6 +948,10 @@ static int m41t80_probe(struct i2c_client *client, return PTR_ERR(rtc); m41t80_data->rtc = rtc; + if (client->irq <= 0) { + /* We cannot support UIE mode if we do not have an IRQ line */ + rtc->uie_unsupported = 1; + } /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); @@ -1004,21 +986,6 @@ static int m41t80_probe(struct i2c_client *client, return rc; } - /* Export sysfs entries */ - rc = sysfs_create_group(&(&client->dev)->kobj, &attr_group); - if (rc) { - dev_err(&client->dev, "Failed to create sysfs group: %d\n", rc); - return rc; - } - - rc = devm_add_action_or_reset(&client->dev, m41t80_remove_sysfs_group, - &client->dev); - if (rc) { - dev_err(&client->dev, - "Failed to add sysfs cleanup action: %d\n", rc); - return rc; - } - #ifdef CONFIG_RTC_DRV_M41T80_WDT if (m41t80_data->features & M41T80_FEATURE_HT) { save_client = client; diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c index 48b6b411f8b2..cbdc86a560ba 100644 --- a/drivers/rtc/rtc-max6900.c +++ b/drivers/rtc/rtc-max6900.c @@ -226,7 +226,7 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id) return 0; } -static struct i2c_device_id max6900_id[] = { +static const struct i2c_device_id max6900_id[] = { { "max6900", 0 }, { } }; diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 16d129a0bb3b..67d6fc2d23e6 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c @@ -234,8 +234,6 @@ static int max8925_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x77); else ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x0); - if (ret < 0) - goto out; out: return ret; } diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 401f46d8f21b..bce427d202ee 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -238,26 +238,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -/* - * Clear all interrupts and release the IRQ - */ -static void mxc_rtc_release(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - void __iomem *ioaddr = pdata->ioaddr; - - spin_lock_irq(&pdata->rtc->irq_lock); - - /* Disable all rtc interrupts */ - writew(0, ioaddr + RTC_RTCIENR); - - /* Clear all interrupt status */ - writew(0xffffffff, ioaddr + RTC_RTCISR); - - spin_unlock_irq(&pdata->rtc->irq_lock); -} - static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled); @@ -343,7 +323,6 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) /* RTC layer */ static const struct rtc_class_ops mxc_rtc_ops = { - .release = mxc_rtc_release, .read_time = mxc_rtc_read_time, .set_mmss64 = mxc_rtc_set_mmss, .read_alarm = mxc_rtc_read_alarm, diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c index c0a6e638c672..9e83be32ff43 100644 --- a/drivers/rtc/rtc-puv3.c +++ b/drivers/rtc/rtc-puv3.c @@ -157,49 +157,7 @@ static int puv3_rtc_proc(struct device *dev, struct seq_file *seq) return 0; } -static int puv3_rtc_open(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - int ret; - - ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq, - 0, "pkunity-rtc alarm", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret); - return ret; - } - - ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq, - 0, "pkunity-rtc tick", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret); - goto tick_err; - } - - return ret; - - tick_err: - free_irq(puv3_rtc_alarmno, rtc_dev); - return ret; -} - -static void puv3_rtc_release(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - - /* do not clear AIE here, it may be needed for wake */ - puv3_rtc_setpie(dev, 0); - free_irq(puv3_rtc_alarmno, rtc_dev); - free_irq(puv3_rtc_tickno, rtc_dev); -} - static const struct rtc_class_ops puv3_rtcops = { - .open = puv3_rtc_open, - .release = puv3_rtc_release, .read_time = puv3_rtc_gettime, .set_time = puv3_rtc_settime, .read_alarm = puv3_rtc_getalarm, @@ -222,10 +180,6 @@ static void puv3_rtc_enable(struct device *dev, int en) static int puv3_rtc_remove(struct platform_device *dev) { - struct rtc_device *rtc = platform_get_drvdata(dev); - - rtc_device_unregister(rtc); - puv3_rtc_setpie(&dev->dev, 0); puv3_rtc_setaie(&dev->dev, 0); @@ -259,6 +213,24 @@ static int puv3_rtc_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "PKUnity_rtc: tick irq %d, alarm irq %d\n", puv3_rtc_tickno, puv3_rtc_alarmno); + rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + ret = devm_request_irq(&pdev->dev, puv3_rtc_alarmno, puv3_rtc_alarmirq, + 0, "pkunity-rtc alarm", rtc); + if (ret) { + dev_err(&pdev->dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret); + return ret; + } + + ret = devm_request_irq(&pdev->dev, puv3_rtc_tickno, puv3_rtc_tickirq, + 0, "pkunity-rtc tick", rtc); + if (ret) { + dev_err(&pdev->dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret); + return ret; + } + /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { @@ -278,12 +250,10 @@ static int puv3_rtc_probe(struct platform_device *pdev) puv3_rtc_enable(&pdev->dev, 1); /* register RTC and exit */ - rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops, - THIS_MODULE); - - if (IS_ERR(rtc)) { + rtc->ops = &puv3_rtcops; + ret = rtc_register_device(rtc); + if (ret) { dev_err(&pdev->dev, "cannot attach rtc\n"); - ret = PTR_ERR(rtc); goto err_nortc; } diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index fe4985b54608..47304f5664d8 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -348,7 +348,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) dev_err(dev, "No alarm IRQ resource defined\n"); return -ENXIO; } - pxa_rtc_open(dev); + pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { @@ -356,6 +356,8 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) return -ENOMEM; } + pxa_rtc_open(dev); + sa1100_rtc->rcnr = pxa_rtc->base + 0x0; sa1100_rtc->rtsr = pxa_rtc->base + 0x8; sa1100_rtc->rtar = pxa_rtc->base + 0x4; diff --git a/drivers/rtc/rtc-rtd119x.c b/drivers/rtc/rtc-rtd119x.c new file mode 100644 index 000000000000..b233559d950b --- /dev/null +++ b/drivers/rtc/rtc-rtd119x.c @@ -0,0 +1,242 @@ +/* + * Realtek RTD129x RTC + * + * Copyright (c) 2017 Andreas Färber + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTD_RTCSEC 0x00 +#define RTD_RTCMIN 0x04 +#define RTD_RTCHR 0x08 +#define RTD_RTCDATE1 0x0c +#define RTD_RTCDATE2 0x10 +#define RTD_RTCACR 0x28 +#define RTD_RTCEN 0x2c +#define RTD_RTCCR 0x30 + +#define RTD_RTCSEC_RTCSEC_MASK 0x7f + +#define RTD_RTCMIN_RTCMIN_MASK 0x3f + +#define RTD_RTCHR_RTCHR_MASK 0x1f + +#define RTD_RTCDATE1_RTCDATE1_MASK 0xff + +#define RTD_RTCDATE2_RTCDATE2_MASK 0x7f + +#define RTD_RTCACR_RTCPWR BIT(7) + +#define RTD_RTCEN_RTCEN_MASK 0xff + +#define RTD_RTCCR_RTCRST BIT(6) + +struct rtd119x_rtc { + void __iomem *base; + struct clk *clk; + struct rtc_device *rtcdev; + unsigned int base_year; +}; + +static inline int rtd119x_rtc_days_in_year(int year) +{ + return 365 + (is_leap_year(year) ? 1 : 0); +} + +static void rtd119x_rtc_reset(struct device *dev) +{ + struct rtd119x_rtc *data = dev_get_drvdata(dev); + u32 val; + + val = readl_relaxed(data->base + RTD_RTCCR); + val |= RTD_RTCCR_RTCRST; + writel_relaxed(val, data->base + RTD_RTCCR); + + val &= ~RTD_RTCCR_RTCRST; + writel(val, data->base + RTD_RTCCR); +} + +static void rtd119x_rtc_set_enabled(struct device *dev, bool enable) +{ + struct rtd119x_rtc *data = dev_get_drvdata(dev); + u32 val; + + val = readl_relaxed(data->base + RTD_RTCEN); + if (enable) { + if ((val & RTD_RTCEN_RTCEN_MASK) == 0x5a) + return; + writel_relaxed(0x5a, data->base + RTD_RTCEN); + } else { + writel_relaxed(0, data->base + RTD_RTCEN); + } +} + +static int rtd119x_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct rtd119x_rtc *data = dev_get_drvdata(dev); + s32 day; + u32 sec; + unsigned int year; + int tries = 0; + + while (true) { + tm->tm_sec = (readl_relaxed(data->base + RTD_RTCSEC) & RTD_RTCSEC_RTCSEC_MASK) >> 1; + tm->tm_min = readl_relaxed(data->base + RTD_RTCMIN) & RTD_RTCMIN_RTCMIN_MASK; + tm->tm_hour = readl_relaxed(data->base + RTD_RTCHR) & RTD_RTCHR_RTCHR_MASK; + day = readl_relaxed(data->base + RTD_RTCDATE1) & RTD_RTCDATE1_RTCDATE1_MASK; + day |= (readl_relaxed(data->base + RTD_RTCDATE2) & RTD_RTCDATE2_RTCDATE2_MASK) << 8; + sec = (readl_relaxed(data->base + RTD_RTCSEC) & RTD_RTCSEC_RTCSEC_MASK) >> 1; + tries++; + + if (sec == tm->tm_sec) + break; + + if (tries >= 3) + return -EINVAL; + } + if (tries > 1) + dev_dbg(dev, "%s: needed %i tries\n", __func__, tries); + + year = data->base_year; + while (day >= rtd119x_rtc_days_in_year(year)) { + day -= rtd119x_rtc_days_in_year(year); + year++; + } + tm->tm_year = year - 1900; + tm->tm_yday = day; + + tm->tm_mon = 0; + while (day >= rtc_month_days(tm->tm_mon, year)) { + day -= rtc_month_days(tm->tm_mon, year); + tm->tm_mon++; + } + tm->tm_mday = day + 1; + + return 0; +} + +static int rtd119x_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rtd119x_rtc *data = dev_get_drvdata(dev); + unsigned int day; + int i; + + if (1900 + tm->tm_year < data->base_year) + return -EINVAL; + + day = 0; + for (i = data->base_year; i < 1900 + tm->tm_year; i++) + day += rtd119x_rtc_days_in_year(i); + + day += tm->tm_yday; + if (day > 0x7fff) + return -EINVAL; + + rtd119x_rtc_set_enabled(dev, false); + + writel_relaxed((tm->tm_sec << 1) & RTD_RTCSEC_RTCSEC_MASK, data->base + RTD_RTCSEC); + writel_relaxed(tm->tm_min & RTD_RTCMIN_RTCMIN_MASK, data->base + RTD_RTCMIN); + writel_relaxed(tm->tm_hour & RTD_RTCHR_RTCHR_MASK, data->base + RTD_RTCHR); + writel_relaxed(day & RTD_RTCDATE1_RTCDATE1_MASK, data->base + RTD_RTCDATE1); + writel_relaxed((day >> 8) & RTD_RTCDATE2_RTCDATE2_MASK, data->base + RTD_RTCDATE2); + + rtd119x_rtc_set_enabled(dev, true); + + return 0; +} + +static const struct rtc_class_ops rtd119x_rtc_ops = { + .read_time = rtd119x_rtc_read_time, + .set_time = rtd119x_rtc_set_time, +}; + +static const struct of_device_id rtd119x_rtc_dt_ids[] = { + { .compatible = "realtek,rtd1295-rtc" }, + { } +}; + +static int rtd119x_rtc_probe(struct platform_device *pdev) +{ + struct rtd119x_rtc *data; + struct resource *res; + u32 val; + int ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + data->base_year = 2014; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + data->clk = of_clk_get(pdev->dev.of_node, 0); + if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); + + ret = clk_prepare_enable(data->clk); + if (ret) { + clk_put(data->clk); + return ret; + } + + val = readl_relaxed(data->base + RTD_RTCACR); + if (!(val & RTD_RTCACR_RTCPWR)) { + writel_relaxed(RTD_RTCACR_RTCPWR, data->base + RTD_RTCACR); + + rtd119x_rtc_reset(&pdev->dev); + + writel_relaxed(0, data->base + RTD_RTCMIN); + writel_relaxed(0, data->base + RTD_RTCHR); + writel_relaxed(0, data->base + RTD_RTCDATE1); + writel_relaxed(0, data->base + RTD_RTCDATE2); + } + + rtd119x_rtc_set_enabled(&pdev->dev, true); + + data->rtcdev = devm_rtc_device_register(&pdev->dev, "rtc", + &rtd119x_rtc_ops, THIS_MODULE); + if (IS_ERR(data->rtcdev)) { + dev_err(&pdev->dev, "failed to register rtc device"); + clk_disable_unprepare(data->clk); + clk_put(data->clk); + return PTR_ERR(data->rtcdev); + } + + return 0; +} + +static int rtd119x_rtc_remove(struct platform_device *pdev) +{ + struct rtd119x_rtc *data = platform_get_drvdata(pdev); + + rtd119x_rtc_set_enabled(&pdev->dev, false); + + clk_disable_unprepare(data->clk); + clk_put(data->clk); + + return 0; +} + +static struct platform_driver rtd119x_rtc_driver = { + .probe = rtd119x_rtc_probe, + .remove = rtd119x_rtc_remove, + .driver = { + .name = "rtd1295-rtc", + .of_match_table = rtd119x_rtc_dt_ids, + }, +}; +builtin_platform_driver(rtd119x_rtc_driver); diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index 85fa1da03762..aa09771de04f 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -868,7 +868,7 @@ static int rv3029_i2c_probe(struct i2c_client *client, return rv3029_probe(&client->dev, regmap, client->irq, client->name); } -static struct i2c_device_id rv3029_id[] = { +static const struct i2c_device_id rv3029_id[] = { { "rv3029", 0 }, { "rv3029c2", 0 }, { } diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 449820eeefe8..7067bca5c20d 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -106,33 +106,12 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) return 0; } -/* - * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset. - * To keep the information if an irq is pending, pass the value read from - * STATUS1 to the caller. - */ -static int s35390a_reset(struct s35390a *s35390a, char *status1) +static int s35390a_init(struct s35390a *s35390a) { char buf; int ret; unsigned initcount = 0; - ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1); - if (ret < 0) - return ret; - - if (*status1 & S35390A_FLAG_POC) - /* - * Do not communicate for 0.5 seconds since the power-on - * detection circuit is in operation. - */ - msleep(500); - else if (!(*status1 & S35390A_FLAG_BLD)) - /* - * If both POC and BLD are unset everything is fine. - */ - return 0; - /* * At least one of POC and BLD are set, so reinitialise chip. Keeping * this information in the hardware to know later that the time isn't @@ -142,7 +121,6 @@ static int s35390a_reset(struct s35390a *s35390a, char *status1) * The 24H bit is kept over reset, so set it already here. */ initialize: - *status1 = S35390A_FLAG_24H; buf = S35390A_FLAG_RESET | S35390A_FLAG_24H; ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); @@ -165,6 +143,34 @@ static int s35390a_reset(struct s35390a *s35390a, char *status1) return 1; } +/* + * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset. + * To keep the information if an irq is pending, pass the value read from + * STATUS1 to the caller. + */ +static int s35390a_read_status(struct s35390a *s35390a, char *status1) +{ + int ret; + + ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1); + if (ret < 0) + return ret; + + if (*status1 & S35390A_FLAG_POC) { + /* + * Do not communicate for 0.5 seconds since the power-on + * detection circuit is in operation. + */ + msleep(500); + return 1; + } else if (*status1 & S35390A_FLAG_BLD) + return 1; + /* + * If both POC and BLD are unset everything is fine. + */ + return 0; +} + static int s35390a_disable_test_mode(struct s35390a *s35390a) { char buf[1]; @@ -208,13 +214,16 @@ static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); int i, err; - char buf[7]; + char buf[7], status; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + if (s35390a_read_status(s35390a, &status) == 1) + s35390a_init(s35390a); + buf[S35390A_BYTE_YEAR] = bin2bcd(tm->tm_year - 100); buf[S35390A_BYTE_MONTH] = bin2bcd(tm->tm_mon + 1); buf[S35390A_BYTE_DAY] = bin2bcd(tm->tm_mday); @@ -235,9 +244,12 @@ static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); - char buf[7]; + char buf[7], status; int i, err; + if (s35390a_read_status(s35390a, &status) == 1) + return -EINVAL; + err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); if (err < 0) return err; @@ -392,12 +404,42 @@ static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) return s35390a_set_datetime(to_i2c_client(dev), tm); } +static int s35390a_rtc_ioctl(struct device *dev, unsigned int cmd, + unsigned long arg) +{ + struct i2c_client *client = to_i2c_client(dev); + struct s35390a *s35390a = i2c_get_clientdata(client); + char sts; + int err; + + switch (cmd) { + case RTC_VL_READ: + /* s35390a_reset set lowvoltage flag and init RTC if needed */ + err = s35390a_read_status(s35390a, &sts); + if (err < 0) + return err; + if (copy_to_user((void __user *)arg, &err, sizeof(int))) + return -EFAULT; + break; + case RTC_VL_CLR: + /* update flag and clear register */ + err = s35390a_init(s35390a); + if (err < 0) + return err; + break; + default: + return -ENOIOCTLCMD; + } + + return 0; +} + static const struct rtc_class_ops s35390a_rtc_ops = { .read_time = s35390a_rtc_read_time, .set_time = s35390a_rtc_set_time, .set_alarm = s35390a_rtc_set_alarm, .read_alarm = s35390a_rtc_read_alarm, - + .ioctl = s35390a_rtc_ioctl, }; static struct i2c_driver s35390a_driver; @@ -405,7 +447,7 @@ static struct i2c_driver s35390a_driver; static int s35390a_probe(struct i2c_client *client, const struct i2c_device_id *id) { - int err, err_reset; + int err, err_read; unsigned int i; struct s35390a *s35390a; struct rtc_time tm; @@ -438,9 +480,9 @@ static int s35390a_probe(struct i2c_client *client, } } - err_reset = s35390a_reset(s35390a, &status1); - if (err_reset < 0) { - err = err_reset; + err_read = s35390a_read_status(s35390a, &status1); + if (err_read < 0) { + err = err_read; dev_err(&client->dev, "error resetting chip\n"); goto exit_dummy; } @@ -466,7 +508,7 @@ static int s35390a_probe(struct i2c_client *client, } } - if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0) + if (err_read > 0 || s35390a_get_datetime(client, &tm) < 0) dev_warn(&client->dev, "clock needs to be set\n"); device_set_wakeup_capable(&client->dev, 1); diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index c2187bf6c7e4..ed71d1113627 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -95,46 +95,6 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int sa1100_rtc_open(struct device *dev) -{ - struct sa1100_rtc *info = dev_get_drvdata(dev); - struct rtc_device *rtc = info->rtc; - int ret; - - ret = request_irq(info->irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", dev); - if (ret) { - dev_err(dev, "IRQ %d already in use.\n", info->irq_1hz); - goto fail_ui; - } - ret = request_irq(info->irq_alarm, sa1100_rtc_interrupt, 0, "rtc Alrm", dev); - if (ret) { - dev_err(dev, "IRQ %d already in use.\n", info->irq_alarm); - goto fail_ai; - } - rtc->max_user_freq = RTC_FREQ; - rtc_irq_set_freq(rtc, NULL, RTC_FREQ); - - return 0; - - fail_ai: - free_irq(info->irq_1hz, dev); - fail_ui: - clk_disable_unprepare(info->clk); - return ret; -} - -static void sa1100_rtc_release(struct device *dev) -{ - struct sa1100_rtc *info = dev_get_drvdata(dev); - - spin_lock_irq(&info->lock); - writel_relaxed(0, info->rtsr); - spin_unlock_irq(&info->lock); - - free_irq(info->irq_alarm, dev); - free_irq(info->irq_1hz, dev); -} - static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { u32 rtsr; @@ -216,8 +176,6 @@ static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) } static const struct rtc_class_ops sa1100_rtc_ops = { - .open = sa1100_rtc_open, - .release = sa1100_rtc_release, .read_time = sa1100_rtc_read_time, .set_time = sa1100_rtc_set_time, .read_alarm = sa1100_rtc_read_alarm, @@ -265,6 +223,9 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) } info->rtc = rtc; + rtc->max_user_freq = RTC_FREQ; + rtc_irq_set_freq(rtc, NULL, RTC_FREQ); + /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). * @@ -299,6 +260,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev) struct resource *iores; void __iomem *base; int irq_1hz, irq_alarm; + int ret; irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz"); irq_alarm = platform_get_irq_byname(pdev, "rtc alarm"); @@ -311,6 +273,19 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm; + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, + "rtc 1Hz", &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "IRQ %d already in use.\n", irq_1hz); + return ret; + } + ret = devm_request_irq(&pdev->dev, irq_alarm, sa1100_rtc_interrupt, 0, + "rtc Alrm", &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "IRQ %d already in use.\n", irq_alarm); + return ret; + } + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(base)) @@ -339,8 +314,12 @@ static int sa1100_rtc_remove(struct platform_device *pdev) { struct sa1100_rtc *info = platform_get_drvdata(pdev); - if (info) + if (info) { + spin_lock_irq(&info->lock); + writel_relaxed(0, info->rtsr); + spin_unlock_irq(&info->lock); clk_disable_unprepare(info->clk); + } return 0; } diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 39cbc1238b92..3d2216ccd860 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -73,6 +73,9 @@ #define SUN6I_ALARM_CONFIG 0x0050 #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0) +#define SUN6I_LOSC_OUT_GATING 0x0060 +#define SUN6I_LOSC_OUT_GATING_EN BIT(0) + /* * Get date values */ @@ -125,6 +128,7 @@ struct sun6i_rtc_dev { struct clk_hw hw; struct clk_hw *int_osc; struct clk *losc; + struct clk *ext_losc; spinlock_t lock; }; @@ -188,23 +192,24 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) struct clk_init_data init = { .ops = &sun6i_rtc_osc_ops, }; + const char *clkout_name = "osc32k-out"; const char *parents[2]; rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) return; - spin_lock_init(&rtc->lock); - clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws), + clk_data = kzalloc(sizeof(*clk_data) + (sizeof(*clk_data->hws) * 2), GFP_KERNEL); if (!clk_data) return; + spin_lock_init(&rtc->lock); rtc->base = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(rtc->base)) { pr_crit("Can't map RTC registers"); - return; + goto err; } /* Switch to the external, more precise, oscillator */ @@ -216,7 +221,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) /* Deal with old DTs */ if (!of_get_property(node, "clocks", NULL)) - return; + goto err; rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL, "rtc-int-osc", @@ -235,7 +240,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) init.parent_names = parents; init.num_parents = of_clk_get_parent_count(node) + 1; - of_property_read_string(node, "clock-output-names", &init.name); + of_property_read_string_index(node, "clock-output-names", 0, + &init.name); rtc->losc = clk_register(NULL, &rtc->hw); if (IS_ERR(rtc->losc)) { @@ -243,9 +249,25 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) return; } - clk_data->num = 1; + of_property_read_string_index(node, "clock-output-names", 1, + &clkout_name); + rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name, + 0, rtc->base + SUN6I_LOSC_OUT_GATING, + SUN6I_LOSC_OUT_GATING_EN, 0, + &rtc->lock); + if (IS_ERR(rtc->ext_losc)) { + pr_crit("Couldn't register the LOSC external gate\n"); + return; + } + + clk_data->num = 2; clk_data->hws[0] = &rtc->hw; + clk_data->hws[1] = __clk_get_hw(rtc->ext_losc); of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + return; + +err: + kfree(clk_data); } CLK_OF_DECLARE_DRIVER(sun6i_rtc_clk, "allwinner,sun6i-a31-rtc", sun6i_rtc_clk_init); diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index c18c39212ce6..3472e79f2b17 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -31,7 +31,7 @@ #include #include -#include +#include enum twl_class { TWL_4030 = 0, diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index e1b86bb01062..7ce22967fd16 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -119,23 +119,6 @@ static inline void write_elapsed_second(unsigned long sec) spin_unlock_irq(&rtc_lock); } -static void vr41xx_rtc_release(struct device *dev) -{ - - spin_lock_irq(&rtc_lock); - - rtc1_write(ECMPLREG, 0); - rtc1_write(ECMPMREG, 0); - rtc1_write(ECMPHREG, 0); - rtc1_write(RTCL1LREG, 0); - rtc1_write(RTCL1HREG, 0); - - spin_unlock_irq(&rtc_lock); - - disable_irq(aie_irq); - disable_irq(pie_irq); -} - static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time) { unsigned long epoch_sec, elapsed_sec; @@ -272,7 +255,6 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id) } static const struct rtc_class_ops vr41xx_rtc_ops = { - .release = vr41xx_rtc_release, .ioctl = vr41xx_rtc_ioctl, .read_time = vr41xx_rtc_read_time, .set_time = vr41xx_rtc_set_time, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 9c97ad1ee121..29f35e29d480 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -62,7 +62,6 @@ MODULE_LICENSE("GPL"); static int dasd_alloc_queue(struct dasd_block *); static void dasd_setup_queue(struct dasd_block *); static void dasd_free_queue(struct dasd_block *); -static void dasd_flush_request_queue(struct dasd_block *); static int dasd_flush_block_queue(struct dasd_block *); static void dasd_device_tasklet(struct dasd_device *); static void dasd_block_tasklet(struct dasd_block *); @@ -158,7 +157,6 @@ struct dasd_block *dasd_alloc_block(void) /* open_count = 0 means device online but not in use */ atomic_set(&block->open_count, -1); - spin_lock_init(&block->request_queue_lock); atomic_set(&block->tasklet_scheduled, 0); tasklet_init(&block->tasklet, (void (*)(unsigned long)) dasd_block_tasklet, @@ -391,7 +389,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) device->state = DASD_STATE_READY; return rc; } - dasd_flush_request_queue(block); dasd_destroy_partitions(block); block->blocks = 0; block->bp_block = 0; @@ -1645,8 +1642,12 @@ void dasd_generic_handle_state_change(struct dasd_device *device) dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); dasd_schedule_device_bh(device); - if (device->block) + if (device->block) { dasd_schedule_block_bh(device->block); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); + } } EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); @@ -2638,6 +2639,7 @@ static void dasd_block_timeout(unsigned long ptr) dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); dasd_schedule_block_bh(block); + blk_mq_run_hw_queues(block->request_queue, true); } /* @@ -2677,115 +2679,11 @@ static void __dasd_process_erp(struct dasd_device *device, erp_fn(cqr); } -/* - * Fetch requests from the block device queue. - */ -static void __dasd_process_request_queue(struct dasd_block *block) -{ - struct request_queue *queue; - struct request *req; - struct dasd_ccw_req *cqr; - struct dasd_device *basedev; - unsigned long flags; - queue = block->request_queue; - basedev = block->base; - /* No queue ? Then there is nothing to do. */ - if (queue == NULL) - return; - - /* - * We requeue request from the block device queue to the ccw - * queue only in two states. In state DASD_STATE_READY the - * partition detection is done and we need to requeue requests - * for that. State DASD_STATE_ONLINE is normal block device - * operation. - */ - if (basedev->state < DASD_STATE_READY) { - while ((req = blk_fetch_request(block->request_queue))) - __blk_end_request_all(req, BLK_STS_IOERR); - return; - } - - /* - * if device is stopped do not fetch new requests - * except failfast is active which will let requests fail - * immediately in __dasd_block_start_head() - */ - if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) - return; - - /* Now we try to fetch requests from the request queue */ - while ((req = blk_peek_request(queue))) { - if (basedev->features & DASD_FEATURE_READONLY && - rq_data_dir(req) == WRITE) { - DBF_DEV_EVENT(DBF_ERR, basedev, - "Rejecting write request %p", - req); - blk_start_request(req); - __blk_end_request_all(req, BLK_STS_IOERR); - continue; - } - if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && - (basedev->features & DASD_FEATURE_FAILFAST || - blk_noretry_request(req))) { - DBF_DEV_EVENT(DBF_ERR, basedev, - "Rejecting failfast request %p", - req); - blk_start_request(req); - __blk_end_request_all(req, BLK_STS_TIMEOUT); - continue; - } - cqr = basedev->discipline->build_cp(basedev, block, req); - if (IS_ERR(cqr)) { - if (PTR_ERR(cqr) == -EBUSY) - break; /* normal end condition */ - if (PTR_ERR(cqr) == -ENOMEM) - break; /* terminate request queue loop */ - if (PTR_ERR(cqr) == -EAGAIN) { - /* - * The current request cannot be build right - * now, we have to try later. If this request - * is the head-of-queue we stop the device - * for 1/2 second. - */ - if (!list_empty(&block->ccw_queue)) - break; - spin_lock_irqsave( - get_ccwdev_lock(basedev->cdev), flags); - dasd_device_set_stop_bits(basedev, - DASD_STOPPED_PENDING); - spin_unlock_irqrestore( - get_ccwdev_lock(basedev->cdev), flags); - dasd_block_set_timer(block, HZ/2); - break; - } - DBF_DEV_EVENT(DBF_ERR, basedev, - "CCW creation failed (rc=%ld) " - "on request %p", - PTR_ERR(cqr), req); - blk_start_request(req); - __blk_end_request_all(req, BLK_STS_IOERR); - continue; - } - /* - * Note: callback is set to dasd_return_cqr_cb in - * __dasd_block_start_head to cover erp requests as well - */ - cqr->callback_data = (void *) req; - cqr->status = DASD_CQR_FILLED; - req->completion_data = cqr; - blk_start_request(req); - list_add_tail(&cqr->blocklist, &block->ccw_queue); - INIT_LIST_HEAD(&cqr->devlist); - dasd_profile_start(block, cqr, req); - } -} - static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) { struct request *req; - int status; blk_status_t error = BLK_STS_OK; + int status; req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); @@ -2809,7 +2707,19 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) break; } } - __blk_end_request_all(req, error); + + /* + * We need to take care for ETIMEDOUT errors here since the + * complete callback does not get called in this case. + * Take care of all errors here and avoid additional code to + * transfer the error value to the complete callback. + */ + if (error) { + blk_mq_end_request(req, error); + blk_mq_run_hw_queues(req->q, true); + } else { + blk_mq_complete_request(req); + } } /* @@ -2938,27 +2848,30 @@ static void dasd_block_tasklet(struct dasd_block *block) struct list_head final_queue; struct list_head *l, *n; struct dasd_ccw_req *cqr; + struct dasd_queue *dq; atomic_set(&block->tasklet_scheduled, 0); INIT_LIST_HEAD(&final_queue); - spin_lock(&block->queue_lock); + spin_lock_irq(&block->queue_lock); /* Finish off requests on ccw queue */ __dasd_process_block_ccw_queue(block, &final_queue); - spin_unlock(&block->queue_lock); + spin_unlock_irq(&block->queue_lock); + /* Now call the callback function of requests with final status */ - spin_lock_irq(&block->request_queue_lock); list_for_each_safe(l, n, &final_queue) { cqr = list_entry(l, struct dasd_ccw_req, blocklist); + dq = cqr->dq; + spin_lock_irq(&dq->lock); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); + spin_unlock_irq(&dq->lock); } - spin_lock(&block->queue_lock); - /* Get new request from the block device request queue */ - __dasd_process_request_queue(block); + + spin_lock_irq(&block->queue_lock); /* Now check if the head of the ccw queue needs to be started. */ __dasd_block_start_head(block); - spin_unlock(&block->queue_lock); - spin_unlock_irq(&block->request_queue_lock); + spin_unlock_irq(&block->queue_lock); + if (waitqueue_active(&shutdown_waitq)) wake_up(&shutdown_waitq); dasd_put_device(block->base); @@ -2977,14 +2890,13 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr) { struct dasd_block *block = cqr->block; struct request *req; - unsigned long flags; if (!block) return -EINVAL; - spin_lock_irqsave(&block->request_queue_lock, flags); + spin_lock_irq(&cqr->dq->lock); req = (struct request *) cqr->callback_data; - blk_requeue_request(block->request_queue, req); - spin_unlock_irqrestore(&block->request_queue_lock, flags); + blk_mq_requeue_request(req, false); + spin_unlock_irq(&cqr->dq->lock); return 0; } @@ -2999,6 +2911,7 @@ static int dasd_flush_block_queue(struct dasd_block *block) struct dasd_ccw_req *cqr, *n; int rc, i; struct list_head flush_queue; + unsigned long flags; INIT_LIST_HEAD(&flush_queue); spin_lock_bh(&block->queue_lock); @@ -3037,11 +2950,11 @@ static int dasd_flush_block_queue(struct dasd_block *block) goto restart_cb; } /* call the callback function */ - spin_lock_irq(&block->request_queue_lock); + spin_lock_irqsave(&cqr->dq->lock, flags); cqr->endclk = get_tod_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); - spin_unlock_irq(&block->request_queue_lock); + spin_unlock_irqrestore(&cqr->dq->lock, flags); } return rc; } @@ -3069,42 +2982,114 @@ EXPORT_SYMBOL(dasd_schedule_block_bh); /* * Dasd request queue function. Called from ll_rw_blk.c */ -static void do_dasd_request(struct request_queue *queue) +static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) { - struct dasd_block *block; + struct dasd_block *block = hctx->queue->queuedata; + struct dasd_queue *dq = hctx->driver_data; + struct request *req = qd->rq; + struct dasd_device *basedev; + struct dasd_ccw_req *cqr; + blk_status_t rc = BLK_STS_OK; - block = queue->queuedata; + basedev = block->base; + spin_lock_irq(&dq->lock); + if (basedev->state < DASD_STATE_READY) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "device not ready for request %p", req); + rc = BLK_STS_IOERR; + goto out; + } + + /* + * if device is stopped do not fetch new requests + * except failfast is active which will let requests fail + * immediately in __dasd_block_start_head() + */ + if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "device stopped request %p", req); + rc = BLK_STS_RESOURCE; + goto out; + } + + if (basedev->features & DASD_FEATURE_READONLY && + rq_data_dir(req) == WRITE) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "Rejecting write request %p", req); + rc = BLK_STS_IOERR; + goto out; + } + + if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && + (basedev->features & DASD_FEATURE_FAILFAST || + blk_noretry_request(req))) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "Rejecting failfast request %p", req); + rc = BLK_STS_IOERR; + goto out; + } + + cqr = basedev->discipline->build_cp(basedev, block, req); + if (IS_ERR(cqr)) { + if (PTR_ERR(cqr) == -EBUSY || + PTR_ERR(cqr) == -ENOMEM || + PTR_ERR(cqr) == -EAGAIN) { + rc = BLK_STS_RESOURCE; + goto out; + } + DBF_DEV_EVENT(DBF_ERR, basedev, + "CCW creation failed (rc=%ld) on request %p", + PTR_ERR(cqr), req); + rc = BLK_STS_IOERR; + goto out; + } + /* + * Note: callback is set to dasd_return_cqr_cb in + * __dasd_block_start_head to cover erp requests as well + */ + cqr->callback_data = req; + cqr->status = DASD_CQR_FILLED; + cqr->dq = dq; + req->completion_data = cqr; + blk_mq_start_request(req); spin_lock(&block->queue_lock); - /* Get new request from the block device request queue */ - __dasd_process_request_queue(block); - /* Now check if the head of the ccw queue needs to be started. */ - __dasd_block_start_head(block); + list_add_tail(&cqr->blocklist, &block->ccw_queue); + INIT_LIST_HEAD(&cqr->devlist); + dasd_profile_start(block, cqr, req); + dasd_schedule_block_bh(block); spin_unlock(&block->queue_lock); + +out: + spin_unlock_irq(&dq->lock); + return rc; } /* * Block timeout callback, called from the block layer * - * request_queue lock is held on entry. - * * Return values: * BLK_EH_RESET_TIMER if the request should be left running * BLK_EH_NOT_HANDLED if the request is handled or terminated * by the driver. */ -enum blk_eh_timer_return dasd_times_out(struct request *req) +enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) { struct dasd_ccw_req *cqr = req->completion_data; struct dasd_block *block = req->q->queuedata; struct dasd_device *device; + unsigned long flags; int rc = 0; if (!cqr) return BLK_EH_NOT_HANDLED; + spin_lock_irqsave(&cqr->dq->lock, flags); device = cqr->startdev ? cqr->startdev : block->base; - if (!device->blk_timeout) + if (!device->blk_timeout) { + spin_unlock_irqrestore(&cqr->dq->lock, flags); return BLK_EH_RESET_TIMER; + } DBF_DEV_EVENT(DBF_WARNING, device, " dasd_times_out cqr %p status %x", cqr, cqr->status); @@ -3154,19 +3139,64 @@ enum blk_eh_timer_return dasd_times_out(struct request *req) } dasd_schedule_block_bh(block); spin_unlock(&block->queue_lock); + spin_unlock_irqrestore(&cqr->dq->lock, flags); return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } +static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int idx) +{ + struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); + + if (!dq) + return -ENOMEM; + + spin_lock_init(&dq->lock); + hctx->driver_data = dq; + + return 0; +} + +static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) +{ + kfree(hctx->driver_data); + hctx->driver_data = NULL; +} + +static void dasd_request_done(struct request *req) +{ + blk_mq_end_request(req, 0); + blk_mq_run_hw_queues(req->q, true); +} + +static struct blk_mq_ops dasd_mq_ops = { + .queue_rq = do_dasd_request, + .complete = dasd_request_done, + .timeout = dasd_times_out, + .init_hctx = dasd_init_hctx, + .exit_hctx = dasd_exit_hctx, +}; + /* * Allocate and initialize request queue and default I/O scheduler. */ static int dasd_alloc_queue(struct dasd_block *block) { - block->request_queue = blk_init_queue(do_dasd_request, - &block->request_queue_lock); - if (block->request_queue == NULL) - return -ENOMEM; + int rc; + + block->tag_set.ops = &dasd_mq_ops; + block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; + block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; + block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + + rc = blk_mq_alloc_tag_set(&block->tag_set); + if (rc) + return rc; + + block->request_queue = blk_mq_init_queue(&block->tag_set); + if (IS_ERR(block->request_queue)) + return PTR_ERR(block->request_queue); block->request_queue->queuedata = block; @@ -3229,26 +3259,11 @@ static void dasd_free_queue(struct dasd_block *block) { if (block->request_queue) { blk_cleanup_queue(block->request_queue); + blk_mq_free_tag_set(&block->tag_set); block->request_queue = NULL; } } -/* - * Flush request on the request queue. - */ -static void dasd_flush_request_queue(struct dasd_block *block) -{ - struct request *req; - - if (!block->request_queue) - return; - - spin_lock_irq(&block->request_queue_lock); - while ((req = blk_fetch_request(block->request_queue))) - __blk_end_request_all(req, BLK_STS_IOERR); - spin_unlock_irq(&block->request_queue_lock); -} - static int dasd_open(struct block_device *bdev, fmode_t mode) { struct dasd_device *base; @@ -3744,8 +3759,12 @@ int dasd_generic_path_operational(struct dasd_device *device) return 1; } dasd_schedule_device_bh(device); - if (device->block) + if (device->block) { dasd_schedule_block_bh(device->block); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); + } if (!device->stopped) wake_up(&generic_waitq); @@ -4008,8 +4027,12 @@ int dasd_generic_restore_device(struct ccw_device *cdev) */ device->stopped |= DASD_UNRESUMED_PM; - if (device->block) + if (device->block) { dasd_schedule_block_bh(device->block); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); + } clear_bit(DASD_FLAG_SUSPENDED, &device->flags); dasd_put_device(device); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index e38042ce94e6..c95a4784c191 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1326,7 +1326,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, { struct dasd_device *device; struct request_queue *q; - unsigned long val, flags; + unsigned long val; device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device) || !device->block) @@ -1342,16 +1342,10 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, dasd_put_device(device); return -ENODEV; } - spin_lock_irqsave(&device->block->request_queue_lock, flags); - if (!val) - blk_queue_rq_timed_out(q, NULL); - else - blk_queue_rq_timed_out(q, dasd_times_out); device->blk_timeout = val; blk_queue_rq_timeout(q, device->blk_timeout * HZ); - spin_unlock_irqrestore(&device->block->request_queue_lock, flags); dasd_put_device(device); return count; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index f9e25fc03d6b..db470bd10175 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -56,6 +56,7 @@ #include #include #include +#include /* DASD discipline magic */ #define DASD_ECKD_MAGIC 0xC5C3D2C4 @@ -185,6 +186,7 @@ struct dasd_ccw_req { char status; /* status of this request */ short retries; /* A retry counter */ unsigned long flags; /* flags of this request */ + struct dasd_queue *dq; /* ... and how */ unsigned long starttime; /* jiffies time of request start */ @@ -248,6 +250,16 @@ struct dasd_ccw_req { #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ +/* + * There is no reliable way to determine the number of available CPUs on + * LPAR but there is no big performance difference between 1 and the + * maximum CPU number. + * 64 is a good trade off performance wise. + */ +#define DASD_NR_HW_QUEUES 64 +#define DASD_MAX_LCU_DEV 256 +#define DASD_REQ_PER_DEV 4 + /* Signature for error recovery functions. */ typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); @@ -539,6 +551,7 @@ struct dasd_block { struct gendisk *gdp; struct request_queue *request_queue; spinlock_t request_queue_lock; + struct blk_mq_tag_set tag_set; struct block_device *bdev; atomic_t open_count; @@ -563,6 +576,10 @@ struct dasd_attention_data { __u8 lpum; }; +struct dasd_queue { + spinlock_t lock; +}; + /* reasons why device (ccw_device_start) was stopped */ #define DASD_STOPPED_NOT_ACC 1 /* not accessible */ #define DASD_STOPPED_QUIESCE 2 /* Quiesced */ @@ -731,7 +748,7 @@ void dasd_free_device(struct dasd_device *); struct dasd_block *dasd_alloc_block(void); void dasd_free_block(struct dasd_block *); -enum blk_eh_timer_return dasd_times_out(struct request *req); +enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved); void dasd_enable_device(struct dasd_device *); void dasd_set_target_state(struct dasd_device *, int); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 68bae4f6bd88..7abb240847c0 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -856,14 +856,14 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio); bytes_done = 0; - dev_info = bio->bi_bdev->bd_disk->private_data; + dev_info = bio->bi_disk->private_data; if (dev_info == NULL) goto fail; if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; - if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) { /* Request beyond end of DCSS segment. */ goto fail; } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2e7fd966c515..eb51893c74a4 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq) static void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; - int *error; + blk_status_t *error; int i; for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { @@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) static void scm_blk_request_done(struct request *req) { - int *error = blk_mq_rq_to_pdu(req); + blk_status_t *error = blk_mq_rq_to_pdu(req); blk_mq_end_request(req, *error); } @@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) atomic_set(&bdev->queued_reqs, 0); bdev->tag_set.ops = &scm_mq_ops; - bdev->tag_set.cmd_size = sizeof(int); + bdev->tag_set.cmd_size = sizeof(blk_status_t); bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index a48f0d40c1d2..571a0709e1e5 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -183,7 +183,7 @@ static unsigned long xpram_highest_page_index(void) */ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) { - xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; + xpram_device_t *xdev = bio->bi_disk->private_data; struct bio_vec bvec; struct bvec_iter iter; unsigned int index; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 489b583f263d..e5c32f4b5287 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev) static int recovery_check(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch; int *redo = data; spin_lock_irq(cdev->ccwlock); switch (cdev->private->state) { + case DEV_STATE_ONLINE: + sch = to_subchannel(cdev->dev.parent); + if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) + break; + /* fall through */ case DEV_STATE_DISCONNECTED: CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, @@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused) } spin_unlock_irq(&recovery_lock); } else - CIO_MSG_EVENT(4, "recovery: end\n"); + CIO_MSG_EVENT(3, "recovery: end\n"); } static DECLARE_WORK(recovery_work, recovery_work_func); @@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data) schedule_work(&recovery_work); } -static void ccw_device_schedule_recovery(void) +void ccw_device_schedule_recovery(void) { unsigned long flags; - CIO_MSG_EVENT(4, "recovery: schedule\n"); + CIO_MSG_EVENT(3, "recovery: schedule\n"); spin_lock_irqsave(&recovery_lock, flags); if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { recovery_phase = 0; diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index ec497af99dd8..69cb70f080a5 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev); void ccw_device_set_notoper(struct ccw_device *cdev); void ccw_device_set_timeout(struct ccw_device *, int); +void ccw_device_schedule_recovery(void); /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 12016e32e519..f98ea674c3d8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type) } } +static void ccw_device_handle_broken_paths(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; + + if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) + ccw_device_schedule_recovery(); + + cdev->private->path_broken_mask = broken_paths; +} + void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -508,6 +519,7 @@ void ccw_device_verify_done(struct ccw_device *cdev, int err) memset(&cdev->private->irb, 0, sizeof(struct irb)); } ccw_device_report_path_events(cdev); + ccw_device_handle_broken_paths(cdev); break; case -ETIME: case -EUSERS: diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 220f49145b2f..9a1b56b2df3e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -131,6 +131,8 @@ struct ccw_device_private { not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ + u8 path_broken_mask; /* mask of paths, which were found to be + unusable */ struct { unsigned int fast:1; /* post with "channel end" */ unsigned int repall:1; /* report every interrupt status */ diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h index 287b4ad0999e..cd350345b3d2 100644 --- a/drivers/s390/crypto/ap_asm.h +++ b/drivers/s390/crypto/ap_asm.h @@ -69,16 +69,19 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid) } /** - * ap_aqic(): Enable interruption for a specific AP. + * ap_aqic(): Control interruption for a specific AP. * @qid: The AP queue number + * @qirqctrl: struct ap_qirq_ctrl (64 bit value) * @ind: The notification indicator byte * * Returns AP queue status. */ -static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) +static inline struct ap_queue_status ap_aqic(ap_qid_t qid, + struct ap_qirq_ctrl qirqctrl, + void *ind) { register unsigned long reg0 asm ("0") = qid | (3UL << 24); - register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC; + register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl; register struct ap_queue_status reg1_out asm ("1"); register void *reg2 asm ("2") = ind; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 6dee598979e7..5f0be2040272 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -165,27 +165,52 @@ static int ap_configuration_available(void) return test_facility(12); } +/** + * ap_apft_available(): Test if AP facilities test (APFT) + * facility is available. + * + * Returns 1 if APFT is is available. + */ +static int ap_apft_available(void) +{ + return test_facility(15); +} + /** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number + * @tbit: Test facilities bit * @info: Pointer to queue descriptor * * Returns AP queue status structure. */ -static inline struct ap_queue_status -ap_test_queue(ap_qid_t qid, unsigned long *info) +struct ap_queue_status ap_test_queue(ap_qid_t qid, + int tbit, + unsigned long *info) { - if (test_facility(15)) - qid |= 1UL << 23; /* set APFT T bit*/ + if (tbit) + qid |= 1UL << 23; /* set T bit*/ return ap_tapq(qid, info); } +EXPORT_SYMBOL(ap_test_queue); -static inline int ap_query_configuration(void) +/* + * ap_query_configuration(): Fetch cryptographic config info + * + * Returns the ap configuration info fetched via PQAP(QCI). + * On success 0 is returned, on failure a negative errno + * is returned, e.g. if the PQAP(QCI) instruction is not + * available, the return value will be -EOPNOTSUPP. + */ +int ap_query_configuration(struct ap_config_info *info) { - if (!ap_configuration) + if (!ap_configuration_available()) return -EOPNOTSUPP; - return ap_qci(ap_configuration); + if (!info) + return -EINVAL; + return ap_qci(info); } +EXPORT_SYMBOL(ap_query_configuration); /** * ap_init_configuration(): Allocate and query configuration array. @@ -198,7 +223,7 @@ static void ap_init_configuration(void) ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL); if (!ap_configuration) return; - if (ap_query_configuration() != 0) { + if (ap_query_configuration(ap_configuration) != 0) { kfree(ap_configuration); ap_configuration = NULL; return; @@ -261,7 +286,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, if (!ap_test_config_card_id(AP_QID_CARD(qid))) return -ENODEV; - status = ap_test_queue(qid, &info); + status = ap_test_queue(qid, ap_apft_available(), &info); switch (status.response_code) { case AP_RESPONSE_NORMAL: *queue_depth = (int)(info & 0xff); @@ -940,7 +965,9 @@ static int ap_select_domain(void) for (j = 0; j < AP_DEVICES; j++) { if (!ap_test_config_card_id(j)) continue; - status = ap_test_queue(AP_MKQID(j, i), NULL); + status = ap_test_queue(AP_MKQID(j, i), + ap_apft_available(), + NULL); if (status.response_code != AP_RESPONSE_NORMAL) continue; count++; @@ -993,7 +1020,7 @@ static void ap_scan_bus(struct work_struct *unused) AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); - ap_query_configuration(); + ap_query_configuration(ap_configuration); if (ap_select_domain() != 0) goto out; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 4dc7c88fb054..754cf2223cfb 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -28,6 +28,7 @@ #include #include +#include #define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DOMAINS 256 /* Number of AP domains. */ @@ -40,41 +41,6 @@ extern int ap_domain_index; extern spinlock_t ap_list_lock; extern struct list_head ap_card_list; -/** - * The ap_qid_t identifier of an ap queue. It contains a - * 6 bit card index and a 4 bit queue index (domain). - */ -typedef unsigned int ap_qid_t; - -#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) -#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) -#define AP_QID_QUEUE(_qid) ((_qid) & 255) - -/** - * structy ap_queue_status - Holds the AP queue status. - * @queue_empty: Shows if queue is empty - * @replies_waiting: Waiting replies - * @queue_full: Is 1 if the queue is full - * @pad: A 4 bit pad - * @int_enabled: Shows if interrupts are enabled for the AP - * @response_code: Holds the 8 bit response code - * @pad2: A 16 bit pad - * - * The ap queue status word is returned by all three AP functions - * (PQAP, NQAP and DQAP). There's a set of flags in the first - * byte, followed by a 1 byte response code. - */ -struct ap_queue_status { - unsigned int queue_empty : 1; - unsigned int replies_waiting : 1; - unsigned int queue_full : 1; - unsigned int pad1 : 4; - unsigned int int_enabled : 1; - unsigned int response_code : 8; - unsigned int pad2 : 16; -} __packed; - - static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) { return (*ptr & (0x80000000u >> nr)) != 0; @@ -238,17 +204,6 @@ struct ap_message { struct ap_message *); }; -struct ap_config_info { - unsigned int special_command:1; - unsigned int ap_extended:1; - unsigned char reserved1:6; - unsigned char reserved2[15]; - unsigned int apm[8]; /* AP ID mask */ - unsigned int aqm[8]; /* AP queue mask */ - unsigned int adm[8]; /* AP domain mask */ - unsigned char reserved4[16]; -} __packed; - /** * ap_init_message() - Initialize ap_message. * Initialize a message before using. Otherwise this might result in diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 0f1a5d02acb0..56b96edffd5b 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -15,6 +15,25 @@ #include "ap_bus.h" #include "ap_asm.h" +/** + * ap_queue_irq_ctrl(): Control interruption on a AP queue. + * @qirqctrl: struct ap_qirq_ctrl (64 bit value) + * @ind: The notification indicator byte + * + * Returns AP queue status. + * + * Control interruption on the given AP queue. + * Just a simple wrapper function for the low level PQAP(AQIC) + * instruction available for other kernel modules. + */ +struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid, + struct ap_qirq_ctrl qirqctrl, + void *ind) +{ + return ap_aqic(qid, qirqctrl, ind); +} +EXPORT_SYMBOL(ap_queue_irq_ctrl); + /** * ap_queue_enable_interruption(): Enable interruption on an AP queue. * @qid: The AP queue number @@ -27,8 +46,11 @@ static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind) { struct ap_queue_status status; + struct ap_qirq_ctrl qirqctrl = { 0 }; - status = ap_aqic(aq->qid, ind); + qirqctrl.ir = 1; + qirqctrl.isc = AP_ISC; + status = ap_aqic(aq->qid, qirqctrl, ind); switch (status.response_code) { case AP_RESPONSE_NORMAL: case AP_RESPONSE_OTHERWISE_CHANGED: @@ -362,7 +384,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq) /* Get the status with TAPQ */ status = ap_tapq(aq->qid, NULL); - if (status.int_enabled == 1) { + if (status.irq_enabled == 1) { /* Irqs are now enabled */ aq->interrupt = AP_INTR_ENABLED; aq->state = (aq->queue_count > 0) ? diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 2ade6131a89f..26363e0816fe 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -305,7 +305,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) * ch The channel, the sense code belongs to. * sense The sense code to inspect. */ -static inline void ccw_unit_check(struct channel *ch, __u8 sense) +static void ccw_unit_check(struct channel *ch, __u8 sense) { CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "%s(%s): %02x", diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 619da81dca70..d01b5c2a7760 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -327,8 +327,7 @@ lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) spin_unlock_irqrestore(&card->mask_lock, flags); wake_up(&card->wait_q); } -static inline int -lcs_threads_running(struct lcs_card *card, unsigned long threads) +static int lcs_threads_running(struct lcs_card *card, unsigned long threads) { unsigned long flags; int rc = 0; @@ -346,8 +345,7 @@ lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) lcs_threads_running(card, threads) == 0); } -static inline int -lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) { unsigned long flags; @@ -373,8 +371,7 @@ lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) wake_up(&card->wait_q); } -static inline int -__lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) { unsigned long flags; int rc = 0; @@ -444,8 +441,7 @@ lcs_setup_card(struct lcs_card *card) INIT_LIST_HEAD(&card->lancmd_waiters); } -static inline void -lcs_clear_multicast_list(struct lcs_card *card) +static void lcs_clear_multicast_list(struct lcs_card *card) { #ifdef CONFIG_IP_MULTICAST struct lcs_ipm_list *ipm; @@ -656,8 +652,7 @@ __lcs_resume_channel(struct lcs_channel *channel) /** * Make a buffer ready for processing. */ -static inline void -__lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) { int prev, next; @@ -1169,8 +1164,8 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) /** * function called by net device to handle multicast address relevant things */ -static inline void -lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_remove_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; struct list_head *l; @@ -1196,8 +1191,9 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) spin_unlock_irqrestore(&card->ipm_lock, flags); } -static inline struct lcs_ipm_list * -lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) +static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, + struct ip_mc_list *im4, + char *buf) { struct lcs_ipm_list *tmp, *ipm = NULL; struct list_head *l; @@ -1218,8 +1214,8 @@ lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) return ipm; } -static inline void -lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_set_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 7e0e6a4019f3..b9c7c1e61da2 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -249,14 +249,14 @@ struct ll_header { * Compatibility macros for busy handling * of network devices. */ -static inline void netiucv_clear_busy(struct net_device *dev) +static void netiucv_clear_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } -static inline int netiucv_test_and_set_busy(struct net_device *dev) +static int netiucv_test_and_set_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 7a0ffc71b25d..59e09854c4f7 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -857,11 +857,6 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } -static inline int qeth_get_ip_protocol(struct sk_buff *skb) -{ - return ip_hdr(skb)->protocol; -} - static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, struct qeth_buffer_pool_entry *entry) { @@ -951,10 +946,13 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); -int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int, int); -int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int); +int qeth_do_send_packet_fast(struct qeth_card *card, + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len); +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int hd_len, unsigned int offset, int elements); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, @@ -987,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); int qeth_vm_request_mac(struct qeth_card *card); +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4792cabb862e..bae7440abc01 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -101,7 +101,7 @@ void qeth_close_dev(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_close_dev); -static inline const char *qeth_get_cardname(struct qeth_card *card) +static const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { @@ -330,7 +330,7 @@ static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) return q; } -static inline int qeth_cq_init(struct qeth_card *card) +static int qeth_cq_init(struct qeth_card *card) { int rc; @@ -352,7 +352,7 @@ static inline int qeth_cq_init(struct qeth_card *card) return rc; } -static inline int qeth_alloc_cq(struct qeth_card *card) +static int qeth_alloc_cq(struct qeth_card *card) { int rc; @@ -397,7 +397,7 @@ static inline int qeth_alloc_cq(struct qeth_card *card) goto out; } -static inline void qeth_free_cq(struct qeth_card *card) +static void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; @@ -408,8 +408,9 @@ static inline void qeth_free_cq(struct qeth_card *card) card->qdio.out_bufstates = NULL; } -static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, - int delayed) { +static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) +{ enum iucv_tx_notify n; switch (sbalf15) { @@ -432,8 +433,8 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, return n; } -static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, - int bidx, int forced_cleanup) +static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, + int forced_cleanup) { if (q->card->options.cq != QETH_CQ_ENABLED) return; @@ -475,8 +476,9 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, } -static inline void qeth_qdio_handle_aob(struct qeth_card *card, - unsigned long phys_aob_addr) { +static void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) +{ struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; @@ -2228,7 +2230,7 @@ static int qeth_cm_setup(struct qeth_card *card) } -static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) +static int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: @@ -2251,7 +2253,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) } } -static inline int qeth_get_mtu_outof_framesize(int framesize) +static int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: @@ -2267,7 +2269,7 @@ static inline int qeth_get_mtu_outof_framesize(int framesize) } } -static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) +static int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: @@ -2738,8 +2740,8 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) } } -static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( - struct qeth_card *card) +static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( + struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; @@ -2870,7 +2872,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); -static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) +static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: @@ -3888,27 +3890,45 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); -static inline void __qeth_fill_buffer(struct sk_buff *skb, - struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, - int offset) +/** + * qeth_push_hdr() - push a qeth_hdr onto an skb. + * @skb: skb that the qeth_hdr should be pushed onto. + * @hdr: double pointer to a qeth_hdr. When returning with >= 0, + * it contains a valid pointer to a qeth_hdr. + * @len: length of the hdr that needs to be pushed on. + * + * Returns the pushed length. If the header can't be pushed on + * (eg. because it would cross a page boundary), it is allocated from + * the cache instead and 0 is returned. + * Error to create the hdr is indicated by returning with < 0. + */ +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len) { - int length = skb_headlen(skb); - int length_here; - int element; - char *data; - int first_lap, cnt; - struct skb_frag_struct *frag; - - element = *next_element_to_fill; - data = skb->data; - first_lap = (is_tso == 0 ? 1 : 0); - - if (offset >= 0) { - data = skb->data + offset; - length -= offset; - first_lap = 0; + if (skb_headroom(skb) >= len && + qeth_get_elements_for_range((addr_t)skb->data - len, + (addr_t)skb->data) == 1) { + *hdr = skb_push(skb, len); + return len; } + /* fall back */ + *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_push_hdr); +static void __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) +{ + struct qdio_buffer *buffer = buf->buffer; + int element = buf->next_element_to_fill; + int length = skb_headlen(skb) - offset; + char *data = skb->data + offset; + int length_here, cnt; + + /* map linear part into buffer element(s) */ while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3918,34 +3938,28 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; - if (!length) { - if (first_lap) - if (skb_shinfo(skb)->nr_frags) - buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; - else - buffer->element[element].eflags = 0; - else + if (is_first_elem) { + is_first_elem = false; + if (length || skb_is_nonlinear(skb)) + /* skb needs additional elements */ buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; + SBAL_EFLAGS_FIRST_FRAG; + else + buffer->element[element].eflags = 0; } else { - if (first_lap) - buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; - else - buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; - first_lap = 0; } + /* map page frags into buffer element(s) */ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - data = (char *)page_to_phys(skb_frag_page(frag)) + - frag->page_offset; - length = frag->size; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + data = skb_frag_address(frag); + length = skb_frag_size(frag); while (length > 0) { length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3964,48 +3978,45 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; - *next_element_to_fill = element; + buf->next_element_to_fill = element; } -static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) +/** + * qeth_fill_buffer() - map skb into an output buffer + * @queue: QDIO queue to submit the buffer on + * @buf: buffer to transport the skb + * @skb: skb to map into the buffer + * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated + * from qeth_core_header_cache. + * @offset: when mapping the skb, start at skb->data + offset + * @hd_len: if > 0, build a dedicated header element of this size + */ +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) { - struct qdio_buffer *buffer; - int flush_cnt = 0, hdr_len, large_send = 0; + struct qdio_buffer *buffer = buf->buffer; + bool is_first_elem = true; + int flush_cnt = 0; - buffer = buf->buffer; refcount_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); - /*check first on TSO ....*/ - if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { + /* build dedicated header element */ + if (hd_len) { int element = buf->next_element_to_fill; + is_first_elem = false; - hdr_len = sizeof(struct qeth_hdr_tso) + - ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; - /*fill first buffer entry only with header information */ - buffer->element[element].addr = skb->data; - buffer->element[element].length = hdr_len; - buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->next_element_to_fill++; - skb->data += hdr_len; - skb->len -= hdr_len; - large_send = 1; - } - - if (offset >= 0) { - int element = buf->next_element_to_fill; buffer->element[element].addr = hdr; - buffer->element[element].length = sizeof(struct qeth_hdr) + - hd_len; + buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->is_header[element] = 1; + /* remember to free cache-allocated qeth_hdr: */ + buf->is_header[element] = ((void *)hdr != skb->data); buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buffer, large_send, - (int *)&buf->next_element_to_fill, offset); + __qeth_fill_buffer(skb, buf, is_first_elem, offset); if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); @@ -4030,8 +4041,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, } int qeth_do_send_packet_fast(struct qeth_card *card, - struct qeth_qdio_out_q *queue, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4061,8 +4073,9 @@ int qeth_do_send_packet_fast(struct qeth_card *card, EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - int elements_needed) + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; @@ -4111,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; @@ -4834,7 +4847,7 @@ int qeth_vm_request_mac(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_vm_request_mac); -static inline int qeth_get_qdio_q_format(struct qeth_card *card) +static int qeth_get_qdio_q_format(struct qeth_card *card) { if (card->info.type == QETH_CARD_TYPE_IQD) return QDIO_IQDIO_QFMT; @@ -4899,9 +4912,12 @@ static void qeth_determine_capabilities(struct qeth_card *card) return; } -static inline void qeth_qdio_establish_cq(struct qeth_card *card, - struct qdio_buffer **in_sbal_ptrs, - void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { +static void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) + (struct ccw_device *, int, + unsigned long)) +{ int i; if (card->options.cq == QETH_CQ_ENABLED) { @@ -5193,9 +5209,10 @@ int qeth_core_hardsetup_card(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); -static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, - struct qdio_buffer_element *element, - struct sk_buff **pskb, int offset, int *pfrag, int data_len) +static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element *element, + struct sk_buff **pskb, int offset, int *pfrag, + int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 6d255c22656d..d1ee9e30c68b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -78,7 +78,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev, static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); -static inline const char *qeth_get_bufsize_str(struct qeth_card *card) +static const char *qeth_get_bufsize_str(struct qeth_card *card) { if (card->qdio.in_buf_size == 16384) return "16k"; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ad110abfdd47..760b023eae95 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -231,13 +231,7 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) spin_unlock_bh(&card->mclock); } -static inline u32 qeth_l2_mac_hash(const u8 *addr) -{ - return get_unaligned((u32 *)(&addr[2])); -} - -static inline int qeth_l2_get_cast_type(struct qeth_card *card, - struct sk_buff *skb) +static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) return RTN_UNSPEC; @@ -248,8 +242,8 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card, return RTN_UNSPEC; } -static inline void qeth_l2_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); @@ -265,13 +259,14 @@ static inline void qeth_l2_hdr_csum(struct qeth_card *card, card->perf_stats.tx_csum++; } -static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int cast_type) +static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, + int cast_type, unsigned int data_len) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + hdr->hdr.l2.pkt_length = data_len; /* set byte byte 3 to casting flags */ if (cast_type == RTN_MULTICAST) @@ -281,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, else hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; - hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr); /* VSWITCH relies on the VLAN * information to be present in * the QDIO header */ @@ -519,15 +513,6 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) /* fall back to alternative mechanism: */ } - if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter " - "parameters on device %s: x%x\n", - CARD_BUS_ID(card), rc); - } - } - if (card->info.type == QETH_CARD_TYPE_IQD || card->info.type == QETH_CARD_TYPE_OSM || card->info.type == QETH_CARD_TYPE_OSX || @@ -615,13 +600,13 @@ static void qeth_promisc_to_bridge(struct qeth_card *card) * only if there is not in the hash table storage already * */ -static void -qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) +static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, + u8 is_uc) { + u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); struct qeth_mac *mac; - hash_for_each_possible(card->mac_htable, mac, hnode, - qeth_l2_mac_hash(ha->addr)) { + hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { if (is_uc == mac->is_uc && !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) { mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; @@ -638,9 +623,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) mac->is_uc = is_uc; mac->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->mac_htable, &mac->hnode, - qeth_l2_mac_hash(mac->mac_addr)); - + hash_add(card->mac_htable, &mac->hnode, mac_hash); } static void qeth_l2_set_rx_mode(struct net_device *dev) @@ -693,21 +676,127 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } +static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) +{ + unsigned int data_offset = ETH_HLEN; + struct qeth_hdr *hdr; + int rc; + + hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!hdr) + return -ENOMEM; + qeth_l2_fill_header(hdr, skb, cast_type, skb->len); + skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr), + data_offset); + + if (!qeth_get_elements_no(card, skb, 1, data_offset)) { + rc = -E2BIG; + goto out; + } + rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, + sizeof(*hdr) + data_offset); +out: + if (rc) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; +} + +static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) +{ + int push_len = sizeof(struct qeth_hdr); + unsigned int elements, nr_frags; + unsigned int hdr_elements = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + int rc; + + /* fix hardware limitation: as long as we do not have sbal + * chaining we can not send long frag lists + */ + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; + } + nr_frags = skb_shinfo(skb)->nr_frags; + + rc = skb_cow_head(skb, push_len); + if (rc) + return rc; + push_len = qeth_push_hdr(skb, &hdr, push_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was allocated from cache */ + hd_len = sizeof(*hdr); + hdr_elements = 1; + } + qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); + if (skb->ip_summed == CHECKSUM_PARTIAL) + qeth_l2_hdr_csum(card, hdr, skb); + + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); + if (!elements) { + rc = -E2BIG; + goto out; + } + elements += hdr_elements; + + /* TODO: remove the skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); +out: + if (!rc) { + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; + } + } else { + if (hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); + } + return rc; +} + +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) +{ + unsigned int elements; + struct qeth_hdr *hdr; + + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + hdr = (struct qeth_hdr *)skb->data; + elements = qeth_get_elements_no(card, skb, 0, 0); + if (!elements) + return -E2BIG; + if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) + return -EINVAL; + return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); +} + static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { - int rc; - struct qeth_hdr *hdr = NULL; - int elements = 0; struct qeth_card *card = dev->ml_priv; - struct sk_buff *new_skb = skb; int cast_type = qeth_l2_get_cast_type(card, skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; - int data_offset = -1; - int elements_needed = 0; - int hd_len = 0; - int nr_frags; + int rc; if (card->qdio.do_prio_queueing || (cast_type && card->info.is_multicast_different)) @@ -721,118 +810,38 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, goto tx_drop; } - if ((card->info.type == QETH_CARD_TYPE_OSN) && - (skb->protocol == htons(ETH_P_IPV6))) - goto tx_drop; - if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); } netif_stop_queue(dev); - /* fix hardware limitation: as long as we do not have sbal - * chaining we can not send long frag lists - */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0, 0)) { - int lin_rc = skb_linearize(new_skb); - - if (card->options.performance_stats) { - if (lin_rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (lin_rc) - goto tx_drop; + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + rc = qeth_l2_xmit_osn(card, skb, queue); + break; + case QETH_CARD_TYPE_IQD: + rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); + break; + default: + rc = qeth_l2_xmit_osa(card, skb, queue, cast_type); } - if (card->info.type == QETH_CARD_TYPE_OSN) - hdr = (struct qeth_hdr *)skb->data; - else { - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = ETH_HLEN; - hdr = kmem_cache_alloc(qeth_core_header_cache, - GFP_ATOMIC); - if (!hdr) - goto tx_drop; - elements_needed++; - skb_reset_mac_header(new_skb); - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - hdr->hdr.l2.pkt_length = new_skb->len; - memcpy(((char *)hdr) + sizeof(struct qeth_hdr), - skb_mac_header(new_skb), ETH_HLEN); - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, - sizeof(struct qeth_hdr)); - if (!new_skb) - goto tx_drop; - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - skb_set_mac_header(new_skb, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - if (new_skb->ip_summed == CHECKSUM_PARTIAL) - qeth_l2_hdr_csum(card, hdr, new_skb); - } - } - - elements = qeth_get_elements_no(card, new_skb, elements_needed, - (data_offset > 0) ? data_offset : 0); - if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; - } - - if (card->info.type != QETH_CARD_TYPE_IQD) { - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, - sizeof(struct qeth_hdr_layer2))) - goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, - elements); - } else - rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, hd_len); if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; - if (nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; - } - } - if (new_skb != skb) - dev_kfree_skb_any(skb); - rc = NETDEV_TX_OK; - } else { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else - goto tx_drop; - } - - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; @@ -1010,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->vlan_features |= NETIF_F_RXCSUM; } } + if (card->info.type != QETH_CARD_TYPE_OSN && + card->info.type != QETH_CARD_TYPE_IQD) { + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr); + } + card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * @@ -1744,11 +1759,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, return rc; } -static inline int ipa_cmd_sbp(struct qeth_card *card) +static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, + enum qeth_ipa_sbp_cmd sbp_cmd, + unsigned int cmd_length) { - return (card->info.type == QETH_CARD_TYPE_IQD) ? - IPA_CMD_SETBRIDGEPORT_IQD : - IPA_CMD_SETBRIDGEPORT_OSA; + enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? + IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0); + if (!iob) + return iob; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + cmd_length; + cmd->data.sbp.hdr.command_code = sbp_cmd; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + return iob; } static int qeth_bridgeport_query_support_cb(struct qeth_card *card, @@ -1778,21 +1808,13 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, static void qeth_bridgeport_query_support(struct qeth_card *card) { struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; QETH_CARD_TEXT(card, 2, "brqsuppo"); - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_sbp_query_cmds_supp)); if (!iob) return; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_query_cmds_supp); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_COMMANDS_SUPPORTED; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, (void *)&cbctl) || qeth_bridgeport_makerc(card, &cbctl, @@ -1846,7 +1868,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl = { .data = { .qports = { @@ -1859,16 +1880,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "brqports"); if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_BRIDGE_PORTS; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, (void *)&cbctl); if (rc < 0) @@ -1900,7 +1914,6 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) int rc = 0; int cmdlength; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; enum qeth_ipa_sbp_cmd setcmd; @@ -1908,32 +1921,24 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) switch (role) { case QETH_SBP_ROLE_NONE: setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_reset_role); + cmdlength = sizeof(struct qeth_sbp_reset_role); break; case QETH_SBP_ROLE_PRIMARY: setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_primary); + cmdlength = sizeof(struct qeth_sbp_set_primary); break; case QETH_SBP_ROLE_SECONDARY: setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_secondary); + cmdlength = sizeof(struct qeth_sbp_set_secondary); break; default: return -EINVAL; } if (!(card->options.sbp.supported_funcs & setcmd)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = cmdlength; - cmd->data.sbp.hdr.command_code = setcmd; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, (void *)&cbctl); if (rc < 0) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d42e758518ed..ab661a431f7c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -247,7 +247,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return -ENOENT; addr->ref_counter--; - if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP)) return rc; if (addr->in_progress) return -EINPROGRESS; @@ -329,8 +330,9 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) kfree(addr); } } else { - if (addr->type == QETH_IP_TYPE_NORMAL) - addr->ref_counter++; + if (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP) + addr->ref_counter++; } return rc; @@ -784,11 +786,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_CARD_TEXT(card, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "delrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_CARD_TEXT(card, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "delrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -867,7 +869,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, return rc; } -static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) { if (cast_type == RTN_MULTICAST) return QETH_CAST_MULTICAST; @@ -876,7 +878,7 @@ static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) return QETH_CAST_UNICAST; } -static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) { u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; if (cast_type == RTN_MULTICAST) @@ -890,22 +892,10 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) static int qeth_l3_setadapter_parms(struct qeth_card *card) { - int rc; + int rc = 0; QETH_DBF_TEXT(SETUP, 2, "setadprm"); - if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - dev_info(&card->gdev->dev, - "set adapter parameters not supported.\n"); - QETH_DBF_TEXT(SETUP, 2, " notsupp"); - return 0; - } - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " - "0x%x\n", dev_name(&card->gdev->dev), rc); - return rc; - } if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { rc = qeth_setadpparms_change_macaddr(card); if (rc) @@ -1656,9 +1646,8 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, return 0; } -static inline int qeth_l3_rebuild_skb(struct qeth_card *card, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned short *vlan_id) +static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned short *vlan_id) { __u16 prot; struct iphdr *ip_hdr; @@ -2408,7 +2397,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; struct neighbour *n = NULL; @@ -2546,8 +2535,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static inline void qeth_l3_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); @@ -2582,7 +2571,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, hdr->ext.hdr_len = 28; /*insert non-fix values */ hdr->ext.mss = skb_shinfo(skb)->gso_size; - hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); + hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - sizeof(struct qeth_hdr_tso)); tcph->check = 0; @@ -2648,9 +2637,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_get_priority_queue(card, skb, ipv, cast_type) : card->qdio.default_out_queue]; int tx_bytes = skb->len; + unsigned int hd_len = 0; bool use_tso; int data_offset = -1; - int nr_frags; + unsigned int nr_frags; if (((card->info.type == QETH_CARD_TYPE_IQD) && (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || @@ -2675,11 +2665,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && - (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4); + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); if (card->info.type == QETH_CARD_TYPE_IQD) { new_skb = skb; data_offset = ETH_HLEN; + hd_len = sizeof(*hdr); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2727,6 +2718,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (lin_rc) goto tx_drop; } + nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); @@ -2766,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (card->info.type != QETH_CARD_TYPE_IQD) { int len; - if (use_tso) - len = ((unsigned long)tcp_hdr(new_skb) + - tcp_hdrlen(new_skb)) - - (unsigned long)new_skb->data; - else + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { len = sizeof(struct qeth_hdr_layer3); + } if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, + hd_len, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, 0); + data_offset, hd_len); if (!rc) { card->stats.tx_packets++; @@ -2786,7 +2780,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 1a80ce41425e..e8bcc314cc5f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -895,9 +895,26 @@ static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, u8 *addr) { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { return -EINVAL; } + if (proto == QETH_PROT_IPV4) { + memcpy(&ipv4_addr, addr, sizeof(ipv4_addr)); + if (ipv4_is_multicast(ipv4_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } else if (proto == QETH_PROT_IPV6) { + memcpy(&ipv6_addr, addr, sizeof(ipv6_addr)); + if (ipv6_addr_is_multicast(&ipv6_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } + return 0; } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index bcc8f3dfd4c4..82ac331d9125 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -29,7 +29,6 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include #include #include #include diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index d5bf36ec8a75..8227076c9cbb 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -3,7 +3,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corp. 2002, 2016 + * Copyright IBM Corp. 2002, 2017 */ #define KMSG_COMPONENT "zfcp" @@ -113,8 +113,12 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_status_read_buffer *srb = req->data; struct zfcp_dbf_hba *rec = &dbf->hba_buf; + static int const level = 2; unsigned long flags; + if (unlikely(!debug_level_enabled(dbf->hba, level))) + return; + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -142,7 +146,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, "fsf_uss", req->req_id); log: - debug_event(dbf->hba, 2, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } @@ -156,8 +160,12 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) struct zfcp_dbf *dbf = req->adapter->dbf; struct zfcp_dbf_hba *rec = &dbf->hba_buf; struct fsf_status_read_buffer *sr_buf = req->data; + static int const level = 1; unsigned long flags; + if (unlikely(!debug_level_enabled(dbf->hba, level))) + return; + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -169,7 +177,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) memcpy(&rec->u.be, &sr_buf->payload.bit_error, sizeof(struct fsf_bit_error_payload)); - debug_event(dbf->hba, 1, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } @@ -186,8 +194,12 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_pay *payload = &dbf->pay_buf; unsigned long flags; + static int const level = 1; u16 length; + if (unlikely(!debug_level_enabled(dbf->pay, level))) + return; + if (!pl) return; @@ -202,7 +214,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, while (payload->counter < scount && (char *)pl[payload->counter]) { memcpy(payload->data, (char *)pl[payload->counter], length); - debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); + debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length)); payload->counter++; } @@ -217,15 +229,19 @@ void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_hba *rec = &dbf->hba_buf; + static int const level = 1; unsigned long flags; + if (unlikely(!debug_level_enabled(dbf->hba, level))) + return; + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_HBA_BASIC; - debug_event(dbf->hba, 1, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } @@ -264,9 +280,13 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; + static int const level = 1; struct list_head *entry; unsigned long flags; + if (unlikely(!debug_level_enabled(dbf->rec, level))) + return; + spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -283,7 +303,7 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, rec->u.trig.want = want; rec->u.trig.need = need; - debug_event(dbf->rec, 1, rec, sizeof(*rec)); + debug_event(dbf->rec, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } @@ -300,6 +320,9 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) struct zfcp_dbf_rec *rec = &dbf->rec_buf; unsigned long flags; + if (!debug_level_enabled(dbf->rec, level)) + return; + spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -345,8 +368,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, { struct zfcp_dbf *dbf = wka_port->adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; + static int const level = 1; unsigned long flags; + if (unlikely(!debug_level_enabled(dbf->rec, level))) + return; + spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -362,10 +389,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, rec->u.run.rec_action = ~0; rec->u.run.rec_count = ~0; - debug_event(dbf->rec, 1, rec, sizeof(*rec)); + debug_event(dbf->rec, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } +#define ZFCP_DBF_SAN_LEVEL 1 + static inline void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, char *paytag, struct scatterlist *sg, u8 id, u16 len, @@ -408,7 +437,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, (u16)(sg->length - offset)); /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ memcpy(payload->data, sg_virt(sg) + offset, pay_len); - debug_event(dbf->pay, 1, payload, + debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload, zfcp_dbf_plen(pay_len)); payload->counter++; offset += pay_len; @@ -418,7 +447,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, spin_unlock(&dbf->pay_lock); out: - debug_event(dbf->san, 1, rec, sizeof(*rec)); + debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->san_lock, flags); } @@ -434,6 +463,9 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; + if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL))) + return; + length = (u16)zfcp_qdio_real_bytes(ct_els->req); zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, length, fsf->req_id, d_id, length); @@ -447,6 +479,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, struct fc_ct_hdr *reqh = sg_virt(ct_els->req); struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); struct scatterlist *resp_entry = ct_els->resp; + struct fc_ct_hdr *resph; struct fc_gpn_ft_resp *acc; int max_entries, x, last = 0; @@ -460,7 +493,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, && reqh->ct_fs_subtype == FC_NS_SUBTYPE && reqh->ct_options == 0 && reqh->_ct_resvd1 == 0 - && reqh->ct_cmd == FC_NS_GPN_FT + && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT) /* reqh->ct_mr_size can vary so do not match but read below */ && reqh->_ct_resvd2 == 0 && reqh->ct_reason == 0 @@ -473,7 +506,15 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, return len; /* not GPN_FT response so do not cap */ acc = sg_virt(resp_entry); - max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) + + /* cap all but accept CT responses to at least the CT header */ + resph = (struct fc_ct_hdr *)acc; + if ((ct_els->status) || + (resph->ct_cmd != cpu_to_be16(FC_FS_ACC))) + return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD); + + max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 / + sizeof(struct fc_gpn_ft_resp)) + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one * to account for header as 1st pseudo "entry" */; @@ -503,6 +544,9 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; + if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL))) + return; + length = (u16)zfcp_qdio_real_bytes(ct_els->resp); zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, length, fsf->req_id, ct_els->d_id, @@ -522,6 +566,9 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) u16 length; struct scatterlist sg; + if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL))) + return; + length = (u16)(srb->length - offsetof(struct fsf_status_read_buffer, payload)); sg_init_one(&sg, srb->payload.data, length); @@ -555,8 +602,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, rec->scsi_retries = sc->retries; rec->scsi_allowed = sc->allowed; rec->scsi_id = sc->device->id; - /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */ rec->scsi_lun = (u32)sc->device->lun; + rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32); rec->host_scribble = (unsigned long)sc->host_scribble; memcpy(rec->scsi_opcode, sc->cmnd, @@ -564,19 +611,31 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, if (fsf) { rec->fsf_req_id = fsf->req_id; - fcp_rsp = (struct fcp_resp_with_ext *) - &(fsf->qtcb->bottom.io.fcp_rsp); + rec->pl_len = FCP_RESP_WITH_EXT; + fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu); + /* mandatory parts of FCP_RSP IU in this SCSI record */ memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; rec->fcp_rsp_info = fcp_rsp_info->rsp_code; + rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len); } if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { - rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, - (u16)ZFCP_DBF_PAY_MAX_REC); - zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, - "fcp_sns", fsf->req_id); + rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len); } + /* complete FCP_RSP IU in associated PAYload record + * but only if there are optional parts + */ + if (fcp_rsp->resp.fr_flags != 0) + zfcp_dbf_pl_write( + dbf, fcp_rsp, + /* at least one full PAY record + * but not beyond hardware response field + */ + min_t(u16, max_t(u16, rec->pl_len, + ZFCP_DBF_PAY_MAX_REC), + FSF_FCP_RSP_SIZE), + "fcp_riu", fsf->req_id); } debug_event(dbf->scsi, level, rec, sizeof(*rec)); diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index db186d44cfaf..3508c00458f4 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * zfcp device driver * debug feature declarations * - * Copyright IBM Corp. 2008, 2016 + * Copyright IBM Corp. 2008, 2017 */ #ifndef ZFCP_DBF_H @@ -204,16 +204,17 @@ enum zfcp_dbf_scsi_id { * @id: unique number of recovery record type * @tag: identifier string specifying the location of initiation * @scsi_id: scsi device id - * @scsi_lun: scsi device logical unit number + * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit * @scsi_result: scsi result * @scsi_retries: current retry number of scsi request * @scsi_allowed: allowed retries - * @fcp_rsp_info: FCP response info + * @fcp_rsp_info: FCP response info code * @scsi_opcode: scsi opcode * @fsf_req_id: request id of fsf request * @host_scribble: LLD specific data attached to SCSI request - * @pl_len: length of paload stored as zfcp_dbf_pay - * @fsf_rsp: response for fsf request + * @pl_len: length of payload stored as zfcp_dbf_pay + * @fcp_rsp: response for FCP request + * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit */ struct zfcp_dbf_scsi { u8 id; @@ -230,6 +231,7 @@ struct zfcp_dbf_scsi { u64 host_scribble; u16 pl_len; struct fcp_resp_with_ext fcp_rsp; + u32 scsi_lun_64_hi; } __packed; /** @@ -299,7 +301,7 @@ bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req) if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND) return false; /* not an FCP response */ - fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp; + fcp_rsp = &qtcb->bottom.io.fcp_rsp.iu.resp; rsp_flags = fcp_rsp->fr_flags; fr_status = fcp_rsp->fr_status; return (fsf_stat == FSF_FCP_RSP_AVAILABLE) && @@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) { struct fsf_qtcb *qtcb = req->qtcb; - if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && + if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED | + ZFCP_STATUS_FSFREQ_ERROR))) { + zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req); + + } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); @@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd, * @flag: indicates type of reset (Target Reset, Logical Unit Reset) */ static inline -void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) +void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag, + struct zfcp_fsf_req *fsf_req) { char tmp_tag[ZFCP_DBF_TAG_LEN]; @@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) memcpy(tmp_tag, "lr_", 3); memcpy(&tmp_tag[3], tag, 4); - _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req); } /** diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 7ccfce559034..37408f5f81ce 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -572,9 +572,8 @@ static void zfcp_erp_memwait_handler(unsigned long data) static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) { - init_timer(&erp_action->timer); - erp_action->timer.function = zfcp_erp_memwait_handler; - erp_action->timer.data = (unsigned long) erp_action; + setup_timer(&erp_action->timer, zfcp_erp_memwait_handler, + (unsigned long) erp_action); erp_action->timer.expires = jiffies + HZ; add_timer(&erp_action->timer); } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 9afdbc32b23f..a9e968717dd9 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -41,7 +41,6 @@ extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); -extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 7331eea67435..8210645c2111 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -3,7 +3,7 @@ * * Fibre Channel related functions for the zfcp device driver. * - * Copyright IBM Corp. 2008, 2010 + * Copyright IBM Corp. 2008, 2017 */ #define KMSG_COMPONENT "zfcp" @@ -29,7 +29,7 @@ static u32 zfcp_fc_rscn_range_mask[] = { }; static bool no_auto_port_rescan; -module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600); +module_param(no_auto_port_rescan, bool, 0600); MODULE_PARM_DESC(no_auto_port_rescan, "no automatic port_rescan (default off)"); @@ -260,7 +260,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) page = (struct fc_els_rscn_page *) head; /* see FC-FS */ - no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page); + no_entries = be16_to_cpu(head->rscn_plen) / + sizeof(struct fc_els_rscn_page); for (i = 1; i < no_entries; i++) { /* skip head and start with 1st element */ @@ -296,7 +297,7 @@ static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) status_buffer = (struct fsf_status_read_buffer *) req->data; plogi = (struct fc_els_flogi *) status_buffer->payload.data; - zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn); + zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn)); } static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) @@ -306,7 +307,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) struct fc_els_logo *logo = (struct fc_els_logo *) status_buffer->payload.data; - zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn); + zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn)); } /** @@ -335,7 +336,7 @@ static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req) if (ct_els->status) return; - if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC) + if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC)) return; /* looks like a valid d_id */ @@ -352,8 +353,8 @@ static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size) ct_hdr->ct_rev = FC_CT_REV; ct_hdr->ct_fs_type = FC_FST_DIR; ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE; - ct_hdr->ct_cmd = cmd; - ct_hdr->ct_mr_size = mr_size / 4; + ct_hdr->ct_cmd = cpu_to_be16(cmd); + ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4); } static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, @@ -376,7 +377,7 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr, FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE); - gid_pn_req->gid_pn.fn_wwpn = port->wwpn; + gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn); ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els, adapter->pool.gid_pn_req, @@ -460,26 +461,26 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) */ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) { - if (plogi->fl_wwpn != port->wwpn) { + if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) { port->d_id = 0; dev_warn(&port->adapter->ccw_device->dev, "A port opened with WWPN 0x%016Lx returned data that " "identifies it as WWPN 0x%016Lx\n", (unsigned long long) port->wwpn, - (unsigned long long) plogi->fl_wwpn); + (unsigned long long) be64_to_cpu(plogi->fl_wwpn)); return; } - port->wwnn = plogi->fl_wwnn; - port->maxframe_size = plogi->fl_csp.sp_bb_data; + port->wwnn = be64_to_cpu(plogi->fl_wwnn); + port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data); - if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID) + if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID)) port->supported_classes |= FC_COS_CLASS1; - if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID) + if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID)) port->supported_classes |= FC_COS_CLASS2; - if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID) + if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID)) port->supported_classes |= FC_COS_CLASS3; - if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID) + if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID)) port->supported_classes |= FC_COS_CLASS4; } @@ -497,9 +498,9 @@ static void zfcp_fc_adisc_handler(void *data) } if (!port->wwnn) - port->wwnn = adisc_resp->adisc_wwnn; + port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn); - if ((port->wwpn != adisc_resp->adisc_wwpn) || + if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) || !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "fcadh_2"); @@ -538,8 +539,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port) /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ - fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost); - fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost); + fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost)); + fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost)); fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); @@ -666,8 +667,8 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, if (ct_els->status) return -EIO; - if (hdr->ct_cmd != FC_FS_ACC) { - if (hdr->ct_reason == FC_BA_RJT_UNABLE) + if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) { + if (hdr->ct_reason == FC_FS_RJT_UNABL) return -EAGAIN; /* might be a temporary condition */ return -EIO; } @@ -693,10 +694,11 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, if (d_id >= FC_FID_WELL_KNOWN_BASE) continue; /* skip the adapter's port and known remote ports */ - if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host)) + if (be64_to_cpu(acc->fp_wwpn) == + fc_host_port_name(adapter->scsi_host)) continue; - port = zfcp_port_enqueue(adapter, acc->fp_wwpn, + port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn), ZFCP_STATUS_COMMON_NOESC, d_id); if (!IS_ERR(port)) zfcp_erp_port_reopen(port, 0, "fcegpf1"); diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index df2b541c8287..41f22d3dc6d1 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -4,7 +4,7 @@ * Fibre Channel related definitions and inline functions for the zfcp * device driver * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2017 */ #ifndef ZFCP_FC_H @@ -212,6 +212,8 @@ static inline void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, u8 tm_flags) { + u32 datalen; + int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); if (unlikely(tm_flags)) { @@ -228,10 +230,13 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); - fcp->fc_dl = scsi_bufflen(scsi); + datalen = scsi_bufflen(scsi); + fcp->fc_dl = cpu_to_be32(datalen); - if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) - fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8; + if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) { + datalen += datalen / scsi->device->sector_size * 8; + fcp->fc_dl = cpu_to_be32(datalen); + } } /** @@ -266,19 +271,23 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp, if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { sense = (char *) &fcp_rsp[1]; if (rsp_flags & FCP_RSP_LEN_VAL) - sense += fcp_rsp->ext.fr_rsp_len; - sense_len = min(fcp_rsp->ext.fr_sns_len, - (u32) SCSI_SENSE_BUFFERSIZE); + sense += be32_to_cpu(fcp_rsp->ext.fr_rsp_len); + sense_len = min_t(u32, be32_to_cpu(fcp_rsp->ext.fr_sns_len), + SCSI_SENSE_BUFFERSIZE); memcpy(scsi->sense_buffer, sense, sense_len); } if (unlikely(rsp_flags & FCP_RESID_UNDER)) { - resid = fcp_rsp->ext.fr_resid; + resid = be32_to_cpu(fcp_rsp->ext.fr_resid); scsi_set_resid(scsi, resid); if (scsi_bufflen(scsi) - resid < scsi->underflow && !(rsp_flags & FCP_SNS_LEN_VAL) && fcp_rsp->resp.fr_status == SAM_STAT_GOOD) set_host_byte(scsi, DID_ERROR); + } else if (unlikely(rsp_flags & FCP_RESID_OVER)) { + /* FCP_DL was not sufficient for SCSI data length */ + if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD) + set_host_byte(scsi, DID_ERROR); } } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 27ff38f839fc..69d1dc3ec79d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -3,7 +3,7 @@ * * Implementation of FSF commands. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2017 */ #define KMSG_COMPONENT "zfcp" @@ -197,8 +197,6 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) switch (sr_buf->status_subtype) { case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: - zfcp_fsf_link_down_info_eval(req, ldi); - break; case FSF_STATUS_READ_SUB_FDISC_FAILED: zfcp_fsf_link_down_info_eval(req, ldi); break; @@ -476,8 +474,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) if (req->data) memcpy(req->data, bottom, sizeof(*bottom)); - fc_host_port_name(shost) = nsp->fl_wwpn; - fc_host_node_name(shost) = nsp->fl_wwnn; + fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); + fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; @@ -503,8 +501,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) switch (bottom->fc_topology) { case FSF_TOPO_P2P: adapter->peer_d_id = ntoh24(bottom->peer_d_id); - adapter->peer_wwpn = plogi->fl_wwpn; - adapter->peer_wwnn = plogi->fl_wwnn; + adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); + adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case FSF_TOPO_FABRIC: @@ -928,8 +926,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_res("fsscth2", req); ct->status = 0; + zfcp_dbf_san_res("fsscth2", req); break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: zfcp_fsf_class_not_supp(req); @@ -991,8 +989,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); - zfcp_qdio_set_data_div(qdio, &req->qdio_req, - zfcp_qdio_sbale_count(sg_req)); + zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_qdio_set_scount(qdio, &req->qdio_req); return 0; @@ -1109,8 +1106,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_res("fsselh1", req); send_els->status = 0; + zfcp_dbf_san_res("fsselh1", req); break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: zfcp_fsf_class_not_supp(req); @@ -1394,6 +1391,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: + /* no zfcp_fc_test_link() with failed open port */ + /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: case FSF_SQ_NO_RETRY_POSSIBLE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2142,7 +2141,8 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) zfcp_scsi_dif_sense_error(scpnt, 0x3); goto skip_fsfstatus; } - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); + fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); skip_fsfstatus: @@ -2255,10 +2255,12 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) goto failed_scsi_cmnd; - fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; + BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); + fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); - if (scsi_prot_sg_count(scsi_cmnd)) { + if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && + scsi_prot_sg_count(scsi_cmnd)) { zfcp_qdio_set_data_div(qdio, &req->qdio_req, scsi_prot_sg_count(scsi_cmnd)); retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, @@ -2299,7 +2301,7 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) zfcp_fsf_fcp_handler_common(req); - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; if ((rsp_info->rsp_code != FCP_TMF_CMPL) || @@ -2348,7 +2350,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); - fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; + fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); @@ -2392,7 +2394,6 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) req_id, dev_name(&adapter->ccw_device->dev)); } - fsf_req->qdio_req.sbal_response = sbal_idx; zfcp_fsf_req_complete(fsf_req); if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index ea3c76ac0de1..88feba5bfda4 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,7 +3,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corp. 2002, 2016 + * Copyright IBM Corp. 2002, 2017 */ #ifndef FSF_H @@ -312,8 +312,14 @@ struct fsf_qtcb_bottom_io { u32 data_block_length; u32 prot_data_length; u8 res2[4]; - u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; - u8 fcp_rsp[FSF_FCP_RSP_SIZE]; + union { + u8 byte[FSF_FCP_CMND_SIZE]; + struct fcp_cmnd iu; + } fcp_cmnd; + union { + u8 byte[FSF_FCP_RSP_SIZE]; + struct fcp_resp_with_ext iu; + } fcp_rsp; u8 res3[64]; } __attribute__ ((packed)); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index dbf2b54703f7..9e358fc04b78 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -14,7 +14,7 @@ #include "zfcp_ext.h" #include "zfcp_qdio.h" -static bool enable_multibuffer = 1; +static bool enable_multibuffer = true; module_param_named(datarouter, enable_multibuffer, bool, 0400); MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 497cd379b0d1..7f647a90c750 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -54,7 +54,6 @@ struct zfcp_qdio { * @sbal_last: last sbal for this request * @sbal_limit: last possible sbal for this request * @sbale_curr: current sbale at creation of this request - * @sbal_response: sbal used in interrupt * @qdio_outb_usage: usage of outbound queue */ struct zfcp_qdio_req { @@ -64,7 +63,6 @@ struct zfcp_qdio_req { u8 sbal_last; u8 sbal_limit; u8 sbale_curr; - u8 sbal_response; u16 qdio_outb_usage; }; @@ -224,21 +222,6 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, sbale->length = count; } -/** - * zfcp_qdio_sbale_count - count sbale used - * @sg: pointer to struct scatterlist - */ -static inline -unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg) -{ - unsigned int count = 0; - - for (; sg; sg = sg_next(sg)) - count++; - - return count; -} - /** * zfcp_qdio_real_bytes - count bytes used * @sg: pointer to struct scatterlist diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0678cf714c0e..ec3ddd1d31d5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -3,7 +3,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2016 + * Copyright IBM Corp. 2002, 2017 */ #define KMSG_COMPONENT "zfcp" @@ -28,7 +28,7 @@ static bool enable_dif; module_param_named(dif, enable_dif, bool, 0400); MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); -static bool allow_lun_scan = 1; +static bool allow_lun_scan = true; module_param(allow_lun_scan, bool, 0600); MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); @@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); - if (ret) + if (ret) { + zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL); return ret; + } if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); + zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL); return SUCCESS; } } - if (!fsf_req) + if (!fsf_req) { + zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL); return FAILED; + } wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req); retval = FAILED; } else { - zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 4b3b08025ef6..6be77b3aa8a5 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -168,7 +168,6 @@ MODULE_LICENSE("GPL"); STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); -STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); STATIC void NCR_700_chip_setup(struct Scsi_Host *host); STATIC void NCR_700_chip_reset(struct Scsi_Host *host); @@ -315,7 +314,6 @@ NCR_700_detect(struct scsi_host_template *tpnt, /* Fill in the missing routines from the host template */ tpnt->queuecommand = NCR_700_queuecommand; tpnt->eh_abort_handler = NCR_700_abort; - tpnt->eh_bus_reset_handler = NCR_700_bus_reset; tpnt->eh_host_reset_handler = NCR_700_host_reset; tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST; tpnt->sg_tablesize = NCR_700_SG_SEGMENTS; @@ -1938,14 +1936,14 @@ NCR_700_abort(struct scsi_cmnd * SCp) } STATIC int -NCR_700_bus_reset(struct scsi_cmnd * SCp) +NCR_700_host_reset(struct scsi_cmnd * SCp) { DECLARE_COMPLETION_ONSTACK(complete); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; scmd_printk(KERN_INFO, SCp, - "New error handler wants BUS reset, cmd %p\n\t", SCp); + "New error handler wants HOST reset, cmd %p\n\t", SCp); scsi_print_command(SCp); /* In theory, eh_complete should always be null because the @@ -1960,6 +1958,7 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp) hostdata->eh_complete = &complete; NCR_700_internal_bus_reset(SCp->device->host); + NCR_700_chip_reset(SCp->device->host); spin_unlock_irq(SCp->device->host->host_lock); wait_for_completion(&complete); @@ -1974,22 +1973,6 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp) return SUCCESS; } -STATIC int -NCR_700_host_reset(struct scsi_cmnd * SCp) -{ - scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t"); - scsi_print_command(SCp); - - spin_lock_irq(SCp->device->host->host_lock); - - NCR_700_internal_bus_reset(SCp->device->host); - NCR_700_chip_reset(SCp->device->host); - - spin_unlock_irq(SCp->device->host->host_lock); - - return SUCCESS; -} - STATIC void NCR_700_set_period(struct scsi_target *STp, int period) { diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d145e0d90227..41366339b950 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -283,7 +283,7 @@ config SCSI_ISCSI_ATTRS config SCSI_SAS_ATTRS tristate "SAS Transport Attributes" depends on SCSI - select BLK_DEV_BSG + select BLK_DEV_BSGLIB help If you wish to export transport-specific information about each attached SAS device to sysfs, say Y. diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index acc33440bca0..8a0812221d72 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -2296,13 +2296,13 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) /** - * NCR5380_bus_reset - reset the SCSI bus + * NCR5380_host_reset - reset the SCSI host * @cmd: SCSI command undergoing EH * * Returns SUCCESS */ -static int NCR5380_bus_reset(struct scsi_cmnd *cmd) +static int NCR5380_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c index 05835bf1bf9c..54e7d26908ee 100644 --- a/drivers/scsi/NCR_Q720.c +++ b/drivers/scsi/NCR_Q720.c @@ -217,8 +217,7 @@ NCR_Q720_probe(struct device *dev) } if (dma_declare_coherent_memory(dev, base_addr, base_addr, - mem_size, DMA_MEMORY_MAP) - != DMA_MEMORY_MAP) { + mem_size, 0)) { printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n"); goto out_release_region; } diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 9176bfbd5745..61aadc7acb49 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -147,22 +147,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, } } -static int a2091_bus_reset(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - - /* FIXME perform bus-specific reset */ - - /* FIXME 2: kill this function, and let midlayer fall back - to the same action, calling wd33c93_host_reset() */ - - spin_lock_irq(instance->host_lock); - wd33c93_host_reset(cmd); - spin_unlock_irq(instance->host_lock); - - return SUCCESS; -} - static struct scsi_host_template a2091_scsi_template = { .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", @@ -171,7 +155,6 @@ static struct scsi_host_template a2091_scsi_template = { .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, - .eh_bus_reset_handler = a2091_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index e6375b4de79e..2427a8541247 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -162,22 +162,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, } } -static int a3000_bus_reset(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - - /* FIXME perform bus-specific reset */ - - /* FIXME 2: kill this entire function, which should - cause mid-layer to call wd33c93_host_reset anyway? */ - - spin_lock_irq(instance->host_lock); - wd33c93_host_reset(cmd); - spin_unlock_irq(instance->host_lock); - - return SUCCESS; -} - static struct scsi_host_template amiga_a3000_scsi_template = { .module = THIS_MODULE, .name = "Amiga 3000 built-in SCSI", @@ -186,7 +170,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = { .proc_name = "A3000", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, - .eh_bus_reset_handler = a3000_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a1a2c71e1626..af3e4d3f9735 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -594,6 +594,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_READ_NAME); @@ -611,10 +612,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); @@ -700,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - if (!(fibptr->dev->supplement_adapter_info.supported_options2 & - AAC_OPTION_VARIABLE_BLOCK_SIZE)) + if (!aac_supports_2T(fibptr->dev)) { dresp->mnt[0].capacityhigh = 0; - if ((le32_to_cpu(dresp->status) != ST_OK) || - (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { - _aac_probe_container2(context, fibptr); - return; + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { + _aac_probe_container2(context, fibptr); + return; + } } scsicmd = (struct scsi_cmnd *) context; @@ -725,6 +724,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, fibptr, @@ -736,9 +736,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; - else if (status < 0) { + if (status < 0 && status != -EINPROGRESS) { /* Inherit results from VM_NameServe, if any */ dresp->status = cpu_to_le32(ST_OK); _aac_probe_container2(context, fibptr); @@ -766,6 +764,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); scsicmd->SCp.ptr = (char *)callback; + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, fibptr, @@ -777,10 +776,9 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } + if (status < 0) { scsicmd->SCp.ptr = NULL; aac_fib_complete(fibptr); @@ -1126,6 +1124,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, cmd_fibcontext, @@ -1138,10 +1137,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); @@ -2335,16 +2332,14 @@ static int aac_read(struct scsi_cmnd * scsicmd) * Alocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); /* @@ -2429,16 +2424,14 @@ static int aac_write(struct scsi_cmnd * scsicmd) * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); /* @@ -2588,6 +2581,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd) synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); synchronizecmd->count = cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; /* * Now send the Fib to the adapter @@ -2603,10 +2597,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd) /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_synchronize: aac_fib_send failed with status: %d.\n", status); @@ -2666,6 +2658,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) pmcmd->cid = cpu_to_le32(sdev_id(sdev)); pmcmd->parm = (scsicmd->cmnd[1] & 1) ? cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; /* * Now send the Fib to the adapter @@ -2681,10 +2674,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); @@ -3692,16 +3683,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_adapter_scsi(cmd_fibcontext, scsicmd); /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); aac_fib_complete(cmd_fibcontext); @@ -3739,15 +3728,14 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) if (!cmd_fibcontext) return -1; + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; status = aac_adapter_hba(cmd_fibcontext, scsicmd); /* * Check that the command queued to the controller */ - if (status == -EINPROGRESS) { - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + if (status == -EINPROGRESS) return 0; - } pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", status); @@ -3763,6 +3751,8 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) struct aac_dev *dev; unsigned long byte_count = 0; int nseg; + struct scatterlist *sg; + int i; dev = (struct aac_dev *)scsicmd->device->host->hostdata; // Get rid of old data @@ -3771,32 +3761,29 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) psg->sg[0].count = 0; nseg = scsi_dma_map(scsicmd); - if (nseg < 0) + if (nseg <= 0) return nseg; - if (nseg) { - struct scatterlist *sg; - int i; - psg->count = cpu_to_le32(nseg); + psg->count = cpu_to_le32(nseg); - scsi_for_each_sg(scsicmd, sg, nseg, i) { - psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); - psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); - byte_count += sg_dma_len(sg); - } - /* hba wants the size to be exact */ - if (byte_count > scsi_bufflen(scsicmd)) { - u32 temp = le32_to_cpu(psg->sg[i-1].count) - - (byte_count - scsi_bufflen(scsicmd)); - psg->sg[i-1].count = cpu_to_le32(temp); - byte_count = scsi_bufflen(scsicmd); - } - /* Check for command underflow */ - if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ - printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", - byte_count, scsicmd->underflow); - } + scsi_for_each_sg(scsicmd, sg, nseg, i) { + psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); + psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); + byte_count += sg_dma_len(sg); } + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + return byte_count; } @@ -3807,6 +3794,8 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) unsigned long byte_count = 0; u64 addr; int nseg; + struct scatterlist *sg; + int i; dev = (struct aac_dev *)scsicmd->device->host->hostdata; // Get rid of old data @@ -3816,34 +3805,31 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) psg->sg[0].count = 0; nseg = scsi_dma_map(scsicmd); - if (nseg < 0) + if (nseg <= 0) return nseg; - if (nseg) { - struct scatterlist *sg; - int i; - scsi_for_each_sg(scsicmd, sg, nseg, i) { - int count = sg_dma_len(sg); - addr = sg_dma_address(sg); - psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); - psg->sg[i].addr[1] = cpu_to_le32(addr>>32); - psg->sg[i].count = cpu_to_le32(count); - byte_count += count; - } - psg->count = cpu_to_le32(nseg); - /* hba wants the size to be exact */ - if (byte_count > scsi_bufflen(scsicmd)) { - u32 temp = le32_to_cpu(psg->sg[i-1].count) - - (byte_count - scsi_bufflen(scsicmd)); - psg->sg[i-1].count = cpu_to_le32(temp); - byte_count = scsi_bufflen(scsicmd); - } - /* Check for command underflow */ - if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ - printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", - byte_count, scsicmd->underflow); - } + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + addr = sg_dma_address(sg); + psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); + psg->sg[i].addr[1] = cpu_to_le32(addr>>32); + psg->sg[i].count = cpu_to_le32(count); + byte_count += count; } + psg->count = cpu_to_le32(nseg); + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + return byte_count; } @@ -3851,6 +3837,8 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg) { unsigned long byte_count = 0; int nseg; + struct scatterlist *sg; + int i; // Get rid of old data psg->count = 0; @@ -3862,37 +3850,34 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg) psg->sg[0].flags = 0; nseg = scsi_dma_map(scsicmd); - if (nseg < 0) + if (nseg <= 0) return nseg; - if (nseg) { - struct scatterlist *sg; - int i; - scsi_for_each_sg(scsicmd, sg, nseg, i) { - int count = sg_dma_len(sg); - u64 addr = sg_dma_address(sg); - psg->sg[i].next = 0; - psg->sg[i].prev = 0; - psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); - psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); - psg->sg[i].count = cpu_to_le32(count); - psg->sg[i].flags = 0; - byte_count += count; - } - psg->count = cpu_to_le32(nseg); - /* hba wants the size to be exact */ - if (byte_count > scsi_bufflen(scsicmd)) { - u32 temp = le32_to_cpu(psg->sg[i-1].count) - - (byte_count - scsi_bufflen(scsicmd)); - psg->sg[i-1].count = cpu_to_le32(temp); - byte_count = scsi_bufflen(scsicmd); - } - /* Check for command underflow */ - if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ - printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", - byte_count, scsicmd->underflow); - } + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + psg->sg[i].next = 0; + psg->sg[i].prev = 0; + psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); + psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); + psg->sg[i].count = cpu_to_le32(count); + psg->sg[i].flags = 0; + byte_count += count; } + psg->count = cpu_to_le32(nseg); + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(psg->sg[i-1].count) - + (byte_count - scsi_bufflen(scsicmd)); + psg->sg[i-1].count = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } + return byte_count; } @@ -3901,75 +3886,77 @@ static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, { unsigned long byte_count = 0; int nseg; + struct scatterlist *sg; + int i, conformable = 0; + u32 min_size = PAGE_SIZE, cur_size; nseg = scsi_dma_map(scsicmd); - if (nseg < 0) + if (nseg <= 0) return nseg; - if (nseg) { - struct scatterlist *sg; - int i, conformable = 0; - u32 min_size = PAGE_SIZE, cur_size; - scsi_for_each_sg(scsicmd, sg, nseg, i) { - int count = sg_dma_len(sg); - u64 addr = sg_dma_address(sg); + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); - BUG_ON(i >= sg_max); - rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); - rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); - cur_size = cpu_to_le32(count); - rio2->sge[i].length = cur_size; - rio2->sge[i].flags = 0; - if (i == 0) { - conformable = 1; - rio2->sgeFirstSize = cur_size; - } else if (i == 1) { - rio2->sgeNominalSize = cur_size; + BUG_ON(i >= sg_max); + rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); + rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); + cur_size = cpu_to_le32(count); + rio2->sge[i].length = cur_size; + rio2->sge[i].flags = 0; + if (i == 0) { + conformable = 1; + rio2->sgeFirstSize = cur_size; + } else if (i == 1) { + rio2->sgeNominalSize = cur_size; + min_size = cur_size; + } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { + conformable = 0; + if (cur_size < min_size) min_size = cur_size; - } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { - conformable = 0; - if (cur_size < min_size) - min_size = cur_size; - } - byte_count += count; } + byte_count += count; + } - /* hba wants the size to be exact */ - if (byte_count > scsi_bufflen(scsicmd)) { - u32 temp = le32_to_cpu(rio2->sge[i-1].length) - - (byte_count - scsi_bufflen(scsicmd)); - rio2->sge[i-1].length = cpu_to_le32(temp); - byte_count = scsi_bufflen(scsicmd); - } + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp = le32_to_cpu(rio2->sge[i-1].length) - + (byte_count - scsi_bufflen(scsicmd)); + rio2->sge[i-1].length = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } - rio2->sgeCnt = cpu_to_le32(nseg); - rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); - /* not conformable: evaluate required sg elements */ - if (!conformable) { - int j, nseg_new = nseg, err_found; - for (i = min_size / PAGE_SIZE; i >= 1; --i) { - err_found = 0; - nseg_new = 2; - for (j = 1; j < nseg - 1; ++j) { - if (rio2->sge[j].length % (i*PAGE_SIZE)) { - err_found = 1; - break; - } - nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); - } - if (!err_found) + rio2->sgeCnt = cpu_to_le32(nseg); + rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); + /* not conformable: evaluate required sg elements */ + if (!conformable) { + int j, nseg_new = nseg, err_found; + for (i = min_size / PAGE_SIZE; i >= 1; --i) { + err_found = 0; + nseg_new = 2; + for (j = 1; j < nseg - 1; ++j) { + if (rio2->sge[j].length % (i*PAGE_SIZE)) { + err_found = 1; break; + } + nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); } - if (i > 0 && nseg_new <= sg_max) - aac_convert_sgraw2(rio2, i, nseg, nseg_new); - } else - rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); - - /* Check for command underflow */ - if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { - printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", - byte_count, scsicmd->underflow); + if (!err_found) + break; } + if (i > 0 && nseg_new <= sg_max) { + int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new); + + if (ret < 0) + return ret; + } + } else + rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); + + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); } return byte_count; @@ -3986,7 +3973,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC); if (sge == NULL) - return -1; + return -ENOMEM; for (i = 1, pos = 1; i < nseg-1; ++i) { for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index ee2667e20e42..403a639574e5 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1723,6 +1723,7 @@ struct aac_dev #define FIB_CONTEXT_FLAG_FASTRESP (0x00000008) #define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) +#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) /* * Define the command values @@ -2700,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev) return 0; } +static inline int aac_supports_2T(struct aac_dev *dev) +{ + return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64); +} + char * get_container_type(unsigned type); extern int numacb; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 9ee025b1d0e0..97d269f16888 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -520,9 +520,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; if (dev->a_ops.adapter_comm && - (status[1] & AAC_OPT_NEW_COMM)) { - dev->comm_interface = AAC_COMM_MESSAGE; - dev->raw_io_interface = 1; + (status[1] & AAC_OPT_NEW_COMM)) { + dev->comm_interface = AAC_COMM_MESSAGE; + dev->raw_io_interface = 1; if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) { /* driver supports TYPE1 (Tupelo) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 1c617ccfaf12..dfe8e70f8d99 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -770,7 +770,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, /* bit1 of request_id must be 0 */ hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); - } else + fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; + } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) return -EINVAL; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0f277df73af0..62beb2596466 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -814,111 +814,229 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) return ret; } +static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info, + struct fib *fib, u64 tmf_lun) +{ + struct aac_hba_tm_req *tmf; + u64 address; + + /* start a HBA_TMF_LUN_RESET TMF request */ + tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf = HBA_TMF_LUN_RESET; + tmf->it_nexus = info->rmw_nexus; + int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun); + + address = (u64)fib->hw_error_pa; + tmf->error_ptr_hi = cpu_to_le32 + ((u32)(address >> 32)); + tmf->error_ptr_lo = cpu_to_le32 + ((u32)(address & 0xffffffff)); + tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*tmf); + + return HBA_IU_TYPE_SCSI_TM_REQ; +} + +static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info, + struct fib *fib) +{ + struct aac_hba_reset_req *rst; + u64 address; + + /* already tried, start a hard reset now */ + rst = (struct aac_hba_reset_req *)fib->hw_fib_va; + memset(rst, 0, sizeof(*rst)); + rst->it_nexus = info->rmw_nexus; + + address = (u64)fib->hw_error_pa; + rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + rst->error_ptr_lo = cpu_to_le32 + ((u32)(address & 0xffffffff)); + rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*rst); + + return HBA_IU_TYPE_SATA_REQ; +} + +void aac_tmf_callback(void *context, struct fib *fibptr) +{ + struct aac_hba_resp *err = + &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; + struct aac_hba_map_info *info = context; + int res; + + switch (err->service_response) { + case HBA_RESP_SVCRES_TMF_REJECTED: + res = -1; + break; + case HBA_RESP_SVCRES_TMF_LUN_INVALID: + res = 0; + break; + case HBA_RESP_SVCRES_TMF_COMPLETE: + case HBA_RESP_SVCRES_TMF_SUCCEEDED: + res = 0; + break; + default: + res = -2; + break; + } + aac_fib_complete(fibptr); + + info->reset_state = res; +} + /* - * aac_eh_reset - Reset command handling + * aac_eh_dev_reset - Device reset command handling * @scsi_cmd: SCSI command block causing the reset * */ -static int aac_eh_reset(struct scsi_cmnd* cmd) +static int aac_eh_dev_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + struct aac_hba_map_info *info; + int count; + u32 bus, cid; + struct fib *fib; + int ret = FAILED; + int status; + u8 command; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) + return FAILED; + + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) + return FAILED; + + pr_err("%s: Host adapter reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + /* start a HBA_TMF_LUN_RESET TMF request */ + command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun); + + info->reset_state = 1; + + status = aac_hba_send(command, fib, + (fib_callback) aac_tmf_callback, + (void *) info); + + /* Wait up to 15 seconds for completion */ + for (count = 0; count < 15; ++count) { + if (info->reset_state == 0) { + ret = info->reset_state == 0 ? SUCCESS : FAILED; + break; + } + msleep(1000); + } + + return ret; +} + +/* + * aac_eh_target_reset - Target reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_target_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + struct aac_hba_map_info *info; + int count; + u32 bus, cid; + int ret = FAILED; + struct fib *fib; + int status; + u8 command; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) + return FAILED; + + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) + return FAILED; + + pr_err("%s: Host adapter reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + + /* already tried, start a hard reset now */ + command = aac_eh_tmf_hard_reset_fib(info, fib); + + info->reset_state = 2; + + status = aac_hba_send(command, fib, + (fib_callback) aac_tmf_callback, + (void *) info); + + /* Wait up to 15 seconds for completion */ + for (count = 0; count < 15; ++count) { + if (info->reset_state <= 0) { + ret = info->reset_state == 0 ? SUCCESS : FAILED; + break; + } + msleep(1000); + } + + return ret; +} + +/* + * aac_eh_bus_reset - Bus reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +static int aac_eh_bus_reset(struct scsi_cmnd* cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; int count; - u32 bus, cid; - int ret = FAILED; + u32 cmd_bus; int status = 0; - __le32 supported_options2 = 0; - bool is_mu_reset; - bool is_ignore_reset; - bool is_doorbell_reset; - bus = aac_logical_to_phys(scmd_channel(cmd)); - cid = scmd_id(cmd); - if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && - aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { - struct fib *fib; - int status; - u64 address; - u8 command; + cmd_bus = aac_logical_to_phys(scmd_channel(cmd)); + /* Mark the assoc. FIB to not complete, eh handler does this */ + for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { + struct fib *fib = &aac->fibs[count]; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", - AAC_DRIVERNAME); + if (fib->hw_fib_va->header.XferState && + (fib->flags & FIB_CONTEXT_FLAG) && + (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) { + struct aac_hba_map_info *info; + u32 bus, cid; - fib = aac_fib_alloc(aac); - if (!fib) - return ret; - - - if (aac->hba_map[bus][cid].reset_state == 0) { - struct aac_hba_tm_req *tmf; - - /* start a HBA_TMF_LUN_RESET TMF request */ - tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; - memset(tmf, 0, sizeof(*tmf)); - tmf->tmf = HBA_TMF_LUN_RESET; - tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus; - tmf->lun[1] = cmd->device->lun; - - address = (u64)fib->hw_error_pa; - tmf->error_ptr_hi = cpu_to_le32 - ((u32)(address >> 32)); - tmf->error_ptr_lo = cpu_to_le32 - ((u32)(address & 0xffffffff)); - tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); - fib->hbacmd_size = sizeof(*tmf); - - command = HBA_IU_TYPE_SCSI_TM_REQ; - aac->hba_map[bus][cid].reset_state++; - } else if (aac->hba_map[bus][cid].reset_state >= 1) { - struct aac_hba_reset_req *rst; - - /* already tried, start a hard reset now */ - rst = (struct aac_hba_reset_req *)fib->hw_fib_va; - memset(rst, 0, sizeof(*rst)); - /* reset_type is already zero... */ - rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus; - - address = (u64)fib->hw_error_pa; - rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); - rst->error_ptr_lo = cpu_to_le32 - ((u32)(address & 0xffffffff)); - rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); - fib->hbacmd_size = sizeof(*rst); - - command = HBA_IU_TYPE_SATA_REQ; - aac->hba_map[bus][cid].reset_state = 0; - } - cmd->SCp.sent_command = 0; - - status = aac_hba_send(command, fib, - (fib_callback) aac_hba_callback, - (void *) cmd); - - /* Wait up to 15 seconds for completion */ - for (count = 0; count < 15; ++count) { - if (cmd->SCp.sent_command) { - ret = SUCCESS; - break; - } - msleep(1000); - } - - if (ret == SUCCESS) - goto out; - - } else { - - /* Mark the assoc. FIB to not complete, eh handler does this */ - for (count = 0; - count < (host->can_queue + AAC_NUM_MGT_FIB); - ++count) { - struct fib *fib = &aac->fibs[count]; - - if (fib->hw_fib_va->header.XferState && - (fib->flags & FIB_CONTEXT_FLAG) && - (fib->callback_data == cmd)) { + cmd = (struct scsi_cmnd *)fib->callback_data; + bus = aac_logical_to_phys(scmd_channel(cmd)); + if (bus != cmd_bus) + continue; + cid = scmd_id(cmd); + info = &aac->hba_map[bus][cid]; + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || + info->devtype != AAC_DEVTYPE_NATIVE_RAW) { fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; } @@ -935,8 +1053,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) dev_err(&aac->pdev->dev, "Adapter health - %d\n", status); count = get_num_of_incomplete_fibs(aac); - if (count == 0) - return SUCCESS; + return (count == 0) ? SUCCESS : FAILED; +} + +/* + * aac_eh_host_reset - Host reset command handling + * @scsi_cmd: SCSI command block causing the reset + * + */ +int aac_eh_host_reset(struct scsi_cmnd *cmd) +{ + struct scsi_device * dev = cmd->device; + struct Scsi_Host * host = dev->host; + struct aac_dev * aac = (struct aac_dev *)host->hostdata; + int ret = FAILED; + __le32 supported_options2 = 0; + bool is_mu_reset; + bool is_ignore_reset; + bool is_doorbell_reset; /* * Check if reset is supported by the firmware @@ -954,11 +1088,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) && aac_check_reset && (aac_check_reset != -1 || !is_ignore_reset)) { /* Bypass wait for command quiesce */ - aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); + if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0) + ret = SUCCESS; } - ret = SUCCESS; + /* + * Reset EH state + */ + if (ret == SUCCESS) { + int bus, cid; + struct aac_hba_map_info *info; -out: + for (bus = 0; bus < AAC_MAX_BUSES; bus++) { + for (cid = 0; cid < AAC_MAX_TARGETS; cid++) { + info = &aac->hba_map[bus][cid]; + if (info->devtype == AAC_DEVTYPE_NATIVE_RAW) + info->reset_state = 0; + } + } + } return ret; } @@ -1382,7 +1529,10 @@ static struct scsi_host_template aac_driver_template = { .change_queue_depth = aac_change_queue_depth, .sdev_attrs = aac_dev_attrs, .eh_abort_handler = aac_eh_abort, - .eh_host_reset_handler = aac_eh_reset, + .eh_device_reset_handler = aac_eh_dev_reset, + .eh_target_reset_handler = aac_eh_target_reset, + .eh_bus_reset_handler = aac_eh_bus_reset, + .eh_host_reset_handler = aac_eh_host_reset, .can_queue = AAC_NUM_IO_FIB, .this_id = MAXIMUM_NUM_CONTAINERS, .sg_tablesize = 16, @@ -1457,7 +1607,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* * Only series 7 needs freset. */ - if (pdev->device == PMC_DEVICE_S7) + if (pdev->device == PMC_DEVICE_S7) pdev->needs_freset = 1; list_for_each_entry(aac, &aac_devices, entry) { diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 48c2b2b34b72..0c9361c87ec8 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev) aac_set_intx_mode(dev); src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); + + msleep(5000); } static void aac_send_hardware_soft_reset(struct aac_dev *dev) diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index ce5dc73d85bb..bc0058df31c6 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -1140,6 +1140,9 @@ static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs) /* * Reset the bus * + * AIC-6260 has a hard reset (MRST signal), but apparently + * one cannot trigger it via software. So live with + * a soft reset; no-one seemed to have cared. */ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt) { @@ -1222,15 +1225,6 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt) return SUCCESS; } -/* - * Reset the host (bus and controller) - * - */ -static int aha152x_host_reset(Scsi_Cmnd *SCpnt) -{ - return aha152x_host_reset_host(SCpnt->device->host); -} - /* * Return the "logical geometry" * @@ -2917,7 +2911,6 @@ static struct scsi_host_template aha152x_driver_template = { .eh_abort_handler = aha152x_abort, .eh_device_reset_handler = aha152x_device_reset, .eh_bus_reset_handler = aha152x_bus_reset, - .eh_host_reset_handler = aha152x_host_reset, .bios_param = aha152x_biosparam, .can_queue = 1, .this_id = 7, diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index a23cc9ac5acd..124217927c4a 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -986,7 +986,7 @@ static struct isa_driver aha1542_isa_driver = { static int isa_registered; #ifdef CONFIG_PNP -static struct pnp_device_id aha1542_pnp_ids[] = { +static const struct pnp_device_id aha1542_pnp_ids[] = { { .id = "ADP1542" }, { .id = "" } }; diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile index 07b60a780c06..b03ba0df7a83 100644 --- a/drivers/scsi/aic7xxx/Makefile +++ b/drivers/scsi/aic7xxx/Makefile @@ -59,7 +59,8 @@ $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ $(srctree)/$(src)/aic7xxx.seq -$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h +$(aic7xxx-gen-y): $(objtree)/$(obj)/aic7xxx_seq.h + @true else $(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped endif @@ -76,7 +77,8 @@ $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ $(srctree)/$(src)/aic79xx.seq -$(aic79xx-gen-y): $(obj)/aic79xx_seq.h +$(aic79xx-gen-y): $(objtree)/$(obj)/aic79xx_seq.h + @true else $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped endif diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped index cdcead071ef6..ddcd5a7701ac 100644 --- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped +++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped @@ -12,13 +12,6 @@ typedef struct ahd_reg_parse_entry { uint8_t mask; } ahd_reg_parse_entry_t; -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_mode_ptr_print; -#else -#define ahd_mode_ptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "MODE_PTR", 0x00, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_intstat_print; #else @@ -26,27 +19,6 @@ ahd_reg_print_t ahd_intstat_print; ahd_print_register(NULL, 0, "INTSTAT", 0x01, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_seqintcode_print; -#else -#define ahd_seqintcode_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SEQINTCODE", 0x02, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_error_print; -#else -#define ahd_error_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_hescb_qoff_print; -#else -#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "HESCB_QOFF", 0x08, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_hs_mailbox_print; #else @@ -61,27 +33,6 @@ ahd_reg_print_t ahd_seqintstat_print; ahd_print_register(NULL, 0, "SEQINTSTAT", 0x0c, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrseqintstat_print; -#else -#define ahd_clrseqintstat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRSEQINTSTAT", 0x0c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_swtimer_print; -#else -#define ahd_swtimer_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SWTIMER", 0x0e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sescb_qoff_print; -#else -#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SESCB_QOFF", 0x12, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_intctl_print; #else @@ -110,111 +61,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print; ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqin_print; -#else -#define ahd_lqin_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lunptr_print; -#else -#define ahd_lunptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_cmdlenptr_print; -#else -#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CMDLENPTR", 0x25, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_attrptr_print; -#else -#define ahd_attrptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ATTRPTR", 0x26, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_flagptr_print; -#else -#define ahd_flagptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "FLAGPTR", 0x27, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_cmdptr_print; -#else -#define ahd_cmdptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CMDPTR", 0x28, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_qnextptr_print; -#else -#define ahd_qnextptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_abrtbyteptr_print; -#else -#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ABRTBYTEPTR", 0x2b, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_abrtbitptr_print; -#else -#define ahd_abrtbitptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lunlen_print; -#else -#define ahd_lunlen_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LUNLEN", 0x30, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_cdblimit_print; -#else -#define ahd_cdblimit_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CDBLIMIT", 0x31, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_maxcmd_print; -#else -#define ahd_maxcmd_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "MAXCMD", 0x32, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_maxcmdcnt_print; -#else -#define ahd_maxcmdcnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqctl1_print; -#else -#define ahd_lqctl1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqctl2_print; -#else -#define ahd_lqctl2_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_scsiseq0_print; #else @@ -229,13 +75,6 @@ ahd_reg_print_t ahd_scsiseq1_print; ahd_print_register(NULL, 0, "SCSISEQ1", 0x3b, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sxfrctl0_print; -#else -#define ahd_sxfrctl0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_dffstat_print; #else @@ -243,13 +82,6 @@ ahd_reg_print_t ahd_dffstat_print; ahd_print_register(NULL, 0, "DFFSTAT", 0x3f, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_multargid_print; -#else -#define ahd_multargid_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_scsisigi_print; #else @@ -264,13 +96,6 @@ ahd_reg_print_t ahd_scsiphase_print; ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scsidat_print; -#else -#define ahd_scsidat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCSIDAT", 0x44, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_scsibus_print; #else @@ -278,13 +103,6 @@ ahd_reg_print_t ahd_scsibus_print; ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_targidin_print; -#else -#define ahd_targidin_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "TARGIDIN", 0x48, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_selid_print; #else @@ -293,10 +111,10 @@ ahd_reg_print_t ahd_selid_print; #endif #if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sblkctl_print; +ahd_reg_print_t ahd_simode0_print; #else -#define ahd_sblkctl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap) +#define ahd_simode0_print(regvalue, cur_col, wrap) \ + ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap) #endif #if AIC_DEBUG_REGISTERS @@ -306,13 +124,6 @@ ahd_reg_print_t ahd_sstat0_print; ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_simode0_print; -#else -#define ahd_simode0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_sstat1_print; #else @@ -327,13 +138,6 @@ ahd_reg_print_t ahd_sstat2_print; ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrsint2_print; -#else -#define ahd_clrsint2_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRSINT2", 0x4d, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_perrdiag_print; #else @@ -341,13 +145,6 @@ ahd_reg_print_t ahd_perrdiag_print; ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqistate_print; -#else -#define ahd_lqistate_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQISTATE", 0x4e, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_soffcnt_print; #else @@ -355,13 +152,6 @@ ahd_reg_print_t ahd_soffcnt_print; ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqostate_print; -#else -#define ahd_lqostate_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQOSTATE", 0x4f, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqistat0_print; #else @@ -369,27 +159,6 @@ ahd_reg_print_t ahd_lqistat0_print; ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrlqiint0_print; -#else -#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqimode0_print; -#else -#define ahd_lqimode0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqimode1_print; -#else -#define ahd_lqimode1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQIMODE1", 0x51, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqistat1_print; #else @@ -397,13 +166,6 @@ ahd_reg_print_t ahd_lqistat1_print; ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrlqiint1_print; -#else -#define ahd_clrlqiint1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRLQIINT1", 0x51, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqistat2_print; #else @@ -418,20 +180,6 @@ ahd_reg_print_t ahd_sstat3_print; ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_simode3_print; -#else -#define ahd_simode3_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SIMODE3", 0x53, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrsint3_print; -#else -#define ahd_clrsint3_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRSINT3", 0x53, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqostat0_print; #else @@ -439,27 +187,6 @@ ahd_reg_print_t ahd_lqostat0_print; ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrlqoint0_print; -#else -#define ahd_clrlqoint0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRLQOINT0", 0x54, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqomode0_print; -#else -#define ahd_lqomode0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQOMODE0", 0x54, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqomode1_print; -#else -#define ahd_lqomode1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQOMODE1", 0x55, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqostat1_print; #else @@ -467,13 +194,6 @@ ahd_reg_print_t ahd_lqostat1_print; ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrlqoint1_print; -#else -#define ahd_clrlqoint1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRLQOINT1", 0x55, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_lqostat2_print; #else @@ -481,13 +201,6 @@ ahd_reg_print_t ahd_lqostat2_print; ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_os_space_cnt_print; -#else -#define ahd_os_space_cnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "OS_SPACE_CNT", 0x56, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_simode1_print; #else @@ -495,13 +208,6 @@ ahd_reg_print_t ahd_simode1_print; ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_gsfifo_print; -#else -#define ahd_gsfifo_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "GSFIFO", 0x58, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_dffsxfrctl_print; #else @@ -509,27 +215,6 @@ ahd_reg_print_t ahd_dffsxfrctl_print; ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lqoscsctl_print; -#else -#define ahd_lqoscsctl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LQOSCSCTL", 0x5a, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_nextscb_print; -#else -#define ahd_nextscb_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEXTSCB", 0x5a, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_clrseqintsrc_print; -#else -#define ahd_clrseqintsrc_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CLRSEQINTSRC", 0x5b, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seqintsrc_print; #else @@ -537,13 +222,6 @@ ahd_reg_print_t ahd_seqintsrc_print; ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_currscb_print; -#else -#define ahd_currscb_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seqimode_print; #else @@ -558,90 +236,6 @@ ahd_reg_print_t ahd_mdffstat_print; ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_lastscb_print; -#else -#define ahd_lastscb_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_negoaddr_print; -#else -#define ahd_negoaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_negperiod_print; -#else -#define ahd_negperiod_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_negoffset_print; -#else -#define ahd_negoffset_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEGOFFSET", 0x62, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_negppropts_print; -#else -#define ahd_negppropts_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEGPPROPTS", 0x63, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_negconopts_print; -#else -#define ahd_negconopts_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEGCONOPTS", 0x64, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_annexcol_print; -#else -#define ahd_annexcol_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ANNEXCOL", 0x65, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_annexdat_print; -#else -#define ahd_annexdat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ANNEXDAT", 0x66, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scschkn_print; -#else -#define ahd_scschkn_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCSCHKN", 0x66, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_iownid_print; -#else -#define ahd_iownid_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_shcnt_print; -#else -#define ahd_shcnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SHCNT", 0x68, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_townid_print; -#else -#define ahd_townid_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seloid_print; #else @@ -649,90 +243,6 @@ ahd_reg_print_t ahd_seloid_print; ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scbhaddr_print; -#else -#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCBHADDR", 0x7c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sghaddr_print; -#else -#define ahd_sghaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scbhcnt_print; -#else -#define ahd_scbhcnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sghcnt_print; -#else -#define ahd_sghcnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_pcixctl_print; -#else -#define ahd_pcixctl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_dchspltstat0_print; -#else -#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_dchspltstat1_print; -#else -#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sgspltstat0_print; -#else -#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_sgspltstat1_print; -#else -#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_df0pcistat_print; -#else -#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_reg0_print; -#else -#define ahd_reg0_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_reg_isr_print; -#else -#define ahd_reg_isr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_sg_state_print; #else @@ -740,27 +250,6 @@ ahd_reg_print_t ahd_sg_state_print; ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_targpcistat_print; -#else -#define ahd_targpcistat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scbautoptr_print; -#else -#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCBAUTOPTR", 0xab, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_ccscbaddr_print; -#else -#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_ccscbctl_print; #else @@ -775,69 +264,6 @@ ahd_reg_print_t ahd_ccsgctl_print; ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_ccscbram_print; -#else -#define ahd_ccscbram_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_brddat_print; -#else -#define ahd_brddat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "BRDDAT", 0xb8, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_seeadr_print; -#else -#define ahd_seeadr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SEEADR", 0xba, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_seedat_print; -#else -#define ahd_seedat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SEEDAT", 0xbc, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_seectl_print; -#else -#define ahd_seectl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SEECTL", 0xbe, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_seestat_print; -#else -#define ahd_seestat_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_dspdatactl_print; -#else -#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_dspselect_print; -#else -#define ahd_dspselect_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "DSPSELECT", 0xc4, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_wrtbiasctl_print; -#else -#define ahd_wrtbiasctl_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seqctl0_print; #else @@ -852,62 +278,6 @@ ahd_reg_print_t ahd_seqintctl_print; ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_prgmcnt_print; -#else -#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "PRGMCNT", 0xde, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_none_print; -#else -#define ahd_none_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NONE", 0xea, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_intvec1_addr_print; -#else -#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INTVEC1_ADDR", 0xf4, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_curaddr_print; -#else -#define ahd_curaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_intvec2_addr_print; -#else -#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INTVEC2_ADDR", 0xf6, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_longjmp_addr_print; -#else -#define ahd_longjmp_addr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LONGJMP_ADDR", 0xf8, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_accum_save_print; -#else -#define ahd_accum_save_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ACCUM_SAVE", 0xfa, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_waiting_scb_tails_print; -#else -#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_sram_base_print; #else @@ -915,62 +285,6 @@ ahd_reg_print_t ahd_sram_base_print; ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_waiting_tid_head_print; -#else -#define ahd_waiting_tid_head_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "WAITING_TID_HEAD", 0x120, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_waiting_tid_tail_print; -#else -#define ahd_waiting_tid_tail_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "WAITING_TID_TAIL", 0x122, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_next_queued_scb_addr_print; -#else -#define ahd_next_queued_scb_addr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR", 0x124, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_complete_scb_head_print; -#else -#define ahd_complete_scb_head_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD", 0x128, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_complete_scb_dmainprog_head_print; -#else -#define ahd_complete_scb_dmainprog_head_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD", 0x12a, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_complete_dma_scb_head_print; -#else -#define ahd_complete_dma_scb_head_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD", 0x12c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_complete_dma_scb_tail_print; -#else -#define ahd_complete_dma_scb_tail_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL", 0x12e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_complete_on_qfreeze_head_print; -#else -#define ahd_complete_on_qfreeze_head_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD", 0x130, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_qfreeze_count_print; #else @@ -992,13 +306,6 @@ ahd_reg_print_t ahd_saved_mode_print; ahd_print_register(NULL, 0, "SAVED_MODE", 0x136, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_msg_out_print; -#else -#define ahd_msg_out_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "MSG_OUT", 0x137, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seq_flags_print; #else @@ -1013,48 +320,6 @@ ahd_reg_print_t ahd_lastphase_print; ahd_print_register(NULL, 0, "LASTPHASE", 0x13c, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_qoutfifo_entry_valid_tag_print; -#else -#define ahd_qoutfifo_entry_valid_tag_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG", 0x13d, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_kernel_tqinpos_print; -#else -#define ahd_kernel_tqinpos_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "KERNEL_TQINPOS", 0x13e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_qoutfifo_next_addr_print; -#else -#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR", 0x144, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_last_msg_print; -#else -#define ahd_last_msg_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LAST_MSG", 0x14a, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scsiseq_template_print; -#else -#define ahd_scsiseq_template_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x14b, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_initiator_tag_print; -#else -#define ahd_initiator_tag_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INITIATOR_TAG", 0x14c, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_seq_flags2_print; #else @@ -1062,62 +327,6 @@ ahd_reg_print_t ahd_seq_flags2_print; ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x14d, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_allocfifo_scbptr_print; -#else -#define ahd_allocfifo_scbptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR", 0x14e, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_int_coalescing_timer_print; -#else -#define ahd_int_coalescing_timer_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INT_COALESCING_TIMER", 0x150, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_int_coalescing_maxcmds_print; -#else -#define ahd_int_coalescing_maxcmds_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS", 0x152, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_int_coalescing_mincmds_print; -#else -#define ahd_int_coalescing_mincmds_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS", 0x153, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_cmds_pending_print; -#else -#define ahd_cmds_pending_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CMDS_PENDING", 0x154, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_int_coalescing_cmdcount_print; -#else -#define ahd_int_coalescing_cmdcount_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT", 0x156, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_local_hs_mailbox_print; -#else -#define ahd_local_hs_mailbox_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX", 0x157, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_cmdsize_table_print; -#else -#define ahd_cmdsize_table_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x158, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_mk_message_scb_print; #else @@ -1139,27 +348,6 @@ ahd_reg_print_t ahd_scb_base_print; ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_residual_datacnt_print; -#else -#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_sense_busaddr_print; -#else -#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 0x18c, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_tag_print; -#else -#define ahd_scb_tag_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_TAG", 0x190, regvalue, cur_col, wrap) -#endif - #if AIC_DEBUG_REGISTERS ahd_reg_print_t ahd_scb_control_print; #else @@ -1174,69 +362,6 @@ ahd_reg_print_t ahd_scb_scsiid_print; ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap) #endif -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_lun_print; -#else -#define ahd_scb_lun_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_LUN", 0x194, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_task_attribute_print; -#else -#define ahd_scb_task_attribute_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_TASK_ATTRIBUTE", 0x195, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_task_management_print; -#else -#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT", 0x197, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_dataptr_print; -#else -#define ahd_scb_dataptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_DATAPTR", 0x198, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_datacnt_print; -#else -#define ahd_scb_datacnt_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_DATACNT", 0x1a0, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_sgptr_print; -#else -#define ahd_scb_sgptr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_SGPTR", 0x1a4, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_busaddr_print; -#else -#define ahd_scb_busaddr_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_BUSADDR", 0x1a8, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_next2_print; -#else -#define ahd_scb_next2_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap) -#endif - -#if AIC_DEBUG_REGISTERS -ahd_reg_print_t ahd_scb_disconnected_lists_print; -#else -#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \ - ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 0x1b8, regvalue, cur_col, wrap) -#endif - #define MODE_PTR 0x00 #define DST_MODE 0x70 @@ -1292,15 +417,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CLRCMDINT 0x02 #define CLRSPLTINT 0x01 -#define ERROR 0x04 -#define CIOPARERR 0x80 -#define CIOACCESFAIL 0x40 -#define MPARERR 0x20 -#define DPARERR 0x10 -#define SQPARERR 0x08 -#define ILLOPCODE 0x04 -#define DSCTMOUT 0x02 - #define CLRERR 0x04 #define CLRCIOPARERR 0x80 #define CLRCIOACCESFAIL 0x40 @@ -1310,6 +426,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CLRILLOPCODE 0x04 #define CLRDSCTMOUT 0x02 +#define ERROR 0x04 +#define CIOPARERR 0x80 +#define CIOACCESFAIL 0x40 +#define MPARERR 0x20 +#define DPARERR 0x10 +#define SQPARERR 0x08 +#define ILLOPCODE 0x04 +#define DSCTMOUT 0x02 + #define HCNTRL 0x05 #define SEQ_RESET 0x80 #define POWRDN 0x40 @@ -1404,22 +529,22 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define FIFOFULL 0x02 #define FIFOEMP 0x01 -#define SG_CACHE_SHADOW 0x1b -#define ODD_SEG 0x04 -#define LAST_SEG 0x02 -#define LAST_SEG_DONE 0x01 - #define ARBCTL 0x1b #define RESET_HARB 0x80 #define RETRY_SWEN 0x08 #define USE_TIME 0x07 +#define SG_CACHE_SHADOW 0x1b +#define ODD_SEG 0x04 +#define LAST_SEG 0x02 +#define LAST_SEG_DONE 0x01 + #define SG_CACHE_PRE 0x1b -#define LQIN 0x20 - #define TYPEPTR 0x20 +#define LQIN 0x20 + #define TAGPTR 0x21 #define LUNPTR 0x22 @@ -1479,14 +604,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SINGLECMD 0x02 #define ABORTPENDING 0x01 -#define SCSBIST0 0x39 -#define GSBISTERR 0x40 -#define GSBISTDONE 0x20 -#define GSBISTRUN 0x10 -#define OSBISTERR 0x04 -#define OSBISTDONE 0x02 -#define OSBISTRUN 0x01 - #define LQCTL2 0x39 #define LQIRETRY 0x80 #define LQICONTINUE 0x40 @@ -1497,10 +614,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define LQOTOIDLE 0x02 #define LQOPAUSE 0x01 -#define SCSBIST1 0x3a -#define NTBISTERR 0x04 -#define NTBISTDONE 0x02 -#define NTBISTRUN 0x01 +#define SCSBIST0 0x39 +#define GSBISTERR 0x40 +#define GSBISTDONE 0x20 +#define GSBISTRUN 0x10 +#define OSBISTERR 0x04 +#define OSBISTDONE 0x02 +#define OSBISTRUN 0x01 #define SCSISEQ0 0x3a #define TEMODEO 0x80 @@ -1509,8 +629,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define FORCEBUSFREE 0x10 #define SCSIRSTO 0x01 +#define SCSBIST1 0x3a +#define NTBISTERR 0x04 +#define NTBISTDONE 0x02 +#define NTBISTRUN 0x01 + #define SCSISEQ1 0x3b +#define BUSINITID 0x3c + #define SXFRCTL0 0x3c #define DFON 0x80 #define DFPEXP 0x40 @@ -1519,8 +646,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define DLCOUNT 0x3c -#define BUSINITID 0x3c - #define SXFRCTL1 0x3d #define BITBUCKET 0x80 #define ENSACHK 0x40 @@ -1545,6 +670,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CURRFIFO_1 0x01 #define CURRFIFO_0 0x00 +#define MULTARGID 0x40 + #define SCSISIGO 0x40 #define CDO 0x80 #define IOO 0x40 @@ -1555,8 +682,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define REQO 0x02 #define ACKO 0x01 -#define MULTARGID 0x40 - #define SCSISIGI 0x41 #define ATNI 0x10 #define SELI 0x08 @@ -1603,14 +728,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define ENAB20 0x04 #define SELWIDE 0x02 -#define CLRSINT0 0x4b -#define CLRSELDO 0x40 -#define CLRSELDI 0x20 -#define CLRSELINGO 0x10 -#define CLRIOERR 0x08 -#define CLROVERRUN 0x04 -#define CLRSPIORDY 0x02 -#define CLRARBDO 0x01 +#define SIMODE0 0x4b +#define ENSELDO 0x40 +#define ENSELDI 0x20 +#define ENSELINGO 0x10 +#define ENIOERR 0x08 +#define ENOVERRUN 0x04 +#define ENSPIORDY 0x02 +#define ENARBDO 0x01 #define SSTAT0 0x4b #define TARGET 0x80 @@ -1622,23 +747,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SPIORDY 0x02 #define ARBDO 0x01 -#define SIMODE0 0x4b -#define ENSELDO 0x40 -#define ENSELDI 0x20 -#define ENSELINGO 0x10 -#define ENIOERR 0x08 -#define ENOVERRUN 0x04 -#define ENSPIORDY 0x02 -#define ENARBDO 0x01 - -#define CLRSINT1 0x4c -#define CLRSELTIMEO 0x80 -#define CLRATNO 0x40 -#define CLRSCSIRSTI 0x20 -#define CLRBUSFREE 0x08 -#define CLRSCSIPERR 0x04 -#define CLRSTRB2FAST 0x02 -#define CLRREQINIT 0x01 +#define CLRSINT0 0x4b +#define CLRSELDO 0x40 +#define CLRSELDI 0x20 +#define CLRSELINGO 0x10 +#define CLRIOERR 0x08 +#define CLROVERRUN 0x04 +#define CLRSPIORDY 0x02 +#define CLRARBDO 0x01 #define SSTAT1 0x4c #define SELTO 0x80 @@ -1650,6 +766,20 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define STRB2FAST 0x02 #define REQINIT 0x01 +#define CLRSINT1 0x4c +#define CLRSELTIMEO 0x80 +#define CLRATNO 0x40 +#define CLRSCSIRSTI 0x20 +#define CLRBUSFREE 0x08 +#define CLRSCSIPERR 0x04 +#define CLRSTRB2FAST 0x02 +#define CLRREQINIT 0x01 + +#define SIMODE2 0x4d +#define ENWIDE_RES 0x04 +#define ENSDONE 0x02 +#define ENDMADONE 0x01 + #define SSTAT2 0x4d #define BUSFREETIME 0xc0 #define NONPACKREQ 0x20 @@ -1662,11 +792,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define BUSFREE_DFF0 0x80 #define BUSFREE_LQO 0x40 -#define SIMODE2 0x4d -#define ENWIDE_RES 0x04 -#define ENSDONE 0x02 -#define ENDMADONE 0x01 - #define CLRSINT2 0x4d #define CLRNONPACKREQ 0x20 #define CLRWIDE_RES 0x04 @@ -1685,10 +810,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define LQISTATE 0x4e -#define SOFFCNT 0x4f - #define LQOSTATE 0x4f +#define SOFFCNT 0x4f + #define LQISTAT0 0x50 #define LQIATNQAS 0x20 #define LQICRCT1 0x10 @@ -1697,14 +822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define LQIATNLQ 0x02 #define LQIATNCMD 0x01 -#define CLRLQIINT0 0x50 -#define CLRLQIATNQAS 0x20 -#define CLRLQICRCT1 0x10 -#define CLRLQICRCT2 0x08 -#define CLRLQIBADLQT 0x04 -#define CLRLQIATNLQ 0x02 -#define CLRLQIATNCMD 0x01 - #define LQIMODE0 0x50 #define ENLQIATNQASK 0x20 #define ENLQICRCT1 0x10 @@ -1713,6 +830,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define ENLQIATNLQ 0x02 #define ENLQIATNCMD 0x01 +#define CLRLQIINT0 0x50 +#define CLRLQIATNQAS 0x20 +#define CLRLQICRCT1 0x10 +#define CLRLQICRCT2 0x08 +#define CLRLQIBADLQT 0x04 +#define CLRLQIATNLQ 0x02 +#define CLRLQIATNCMD 0x01 + #define LQIMODE1 0x51 #define ENLQIPHASE_LQ 0x80 #define ENLQIPHASE_NLQ 0x40 @@ -1753,25 +878,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define LQISTOPCMD 0x02 #define LQIGSAVAIL 0x01 -#define SSTAT3 0x53 -#define NTRAMPERR 0x02 -#define OSRAMPERR 0x01 - #define SIMODE3 0x53 #define ENNTRAMPERR 0x02 #define ENOSRAMPERR 0x01 +#define SSTAT3 0x53 +#define NTRAMPERR 0x02 +#define OSRAMPERR 0x01 + #define CLRSINT3 0x53 #define CLRNTRAMPERR 0x02 #define CLROSRAMPERR 0x01 -#define LQOSTAT0 0x54 -#define LQOTARGSCBPERR 0x10 -#define LQOSTOPT2 0x08 -#define LQOATNLQ 0x04 -#define LQOATNPKT 0x02 -#define LQOTCRC 0x01 - #define CLRLQOINT0 0x54 #define CLRLQOTARGSCBPERR 0x10 #define CLRLQOSTOPT2 0x08 @@ -1779,6 +897,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CLRLQOATNPKT 0x02 #define CLRLQOTCRC 0x01 +#define LQOSTAT0 0x54 +#define LQOTARGSCBPERR 0x10 +#define LQOSTOPT2 0x08 +#define LQOATNLQ 0x04 +#define LQOATNPKT 0x02 +#define LQOTCRC 0x01 + #define LQOMODE0 0x54 #define ENLQOTARGSCBPERR 0x10 #define ENLQOSTOPT2 0x08 @@ -1793,13 +918,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define ENLQOBUSFREE 0x02 #define ENLQOPHACHGINPKT 0x01 -#define LQOSTAT1 0x55 -#define LQOINITSCBPERR 0x10 -#define LQOSTOPI2 0x08 -#define LQOBADQAS 0x04 -#define LQOBUSFREE 0x02 -#define LQOPHACHGINPKT 0x01 - #define CLRLQOINT1 0x55 #define CLRLQOINITSCBPERR 0x10 #define CLRLQOSTOPI2 0x08 @@ -1807,6 +925,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CLRLQOBUSFREE 0x02 #define CLRLQOPHACHGINPKT 0x01 +#define LQOSTAT1 0x55 +#define LQOINITSCBPERR 0x10 +#define LQOSTOPI2 0x08 +#define LQOBADQAS 0x04 +#define LQOBUSFREE 0x02 +#define LQOPHACHGINPKT 0x01 + #define LQOSTAT2 0x56 #define LQOPKT 0xe0 #define LQOWAITFIFO 0x10 @@ -1859,8 +984,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CFG4ICMD 0x02 #define CFG4TCMD 0x01 -#define CURRSCB 0x5c - #define SEQIMODE 0x5c #define ENCTXTDONE 0x40 #define ENSAVEPTRS 0x20 @@ -1870,6 +993,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define ENCFG4ICMD 0x02 #define ENCFG4TCMD 0x01 +#define CURRSCB 0x5c + +#define CRCCONTROL 0x5d +#define CRCVALCHKEN 0x40 + #define MDFFSTAT 0x5d #define SHCNTNEGATIVE 0x40 #define SHCNTMINUS1 0x20 @@ -1879,34 +1007,31 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define DATAINFIFO 0x02 #define FIFOFREE 0x01 -#define CRCCONTROL 0x5d -#define CRCVALCHKEN 0x40 - #define DFFTAG 0x5e -#define LASTSCB 0x5e - #define SCSITEST 0x5e #define CNTRTEST 0x08 #define SEL_TXPLL_DEBUG 0x04 +#define LASTSCB 0x5e + #define IOPDNCTL 0x5f #define DISABLE_OE 0x80 #define PDN_IDIST 0x04 #define PDN_DIFFSENSE 0x01 -#define SHADDR 0x60 +#define DGRPCRCI 0x60 #define NEGOADDR 0x60 -#define DGRPCRCI 0x60 +#define SHADDR 0x60 #define NEGPERIOD 0x61 -#define PACKCRCI 0x62 - #define NEGOFFSET 0x62 +#define PACKCRCI 0x62 + #define NEGPPROPTS 0x63 #define PPROPT_PACE 0x08 #define PPROPT_QAS 0x04 @@ -1942,16 +1067,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SHCNT 0x68 -#define TOWNID 0x69 - #define PLL960CTL1 0x69 +#define TOWNID 0x69 + #define PLL960CNT0 0x6a #define XSIG 0x6a #define SELOID 0x6b +#define FAIRNESS 0x6c + #define PLL400CTL0 0x6c #define PLL_VCOSEL 0x80 #define PLL_PWDN 0x40 @@ -1961,8 +1088,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define PLL_DLPF 0x02 #define PLL_ENFBM 0x01 -#define FAIRNESS 0x6c - #define PLL400CTL1 0x6d #define PLL_CNTEN 0x80 #define PLL_CNTCLR 0x40 @@ -1974,25 +1099,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define HADDR 0x70 +#define HODMAADR 0x70 + #define PLLDELAY 0x70 #define SPLIT_DROP_REQ 0x80 -#define HODMAADR 0x70 +#define HCNT 0x78 #define HODMACNT 0x78 -#define HCNT 0x78 - #define HODMAEN 0x7a -#define SCBHADDR 0x7c - #define SGHADDR 0x7c -#define SCBHCNT 0x84 +#define SCBHADDR 0x7c #define SGHCNT 0x84 +#define SCBHCNT 0x84 + #define DFF_THRSH 0x88 #define WR_DFTHRSH 0x70 #define RD_DFTHRSH 0x07 @@ -2025,6 +1150,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CMCRXMSG0 0x90 +#define OVLYRXMSG0 0x90 + +#define DCHRXMSG0 0x90 + #define ROENABLE 0x90 #define MSIROEN 0x20 #define OVLYROEN 0x10 @@ -2033,12 +1162,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define DCH1ROEN 0x02 #define DCH0ROEN 0x01 -#define OVLYRXMSG0 0x90 - -#define DCHRXMSG0 0x90 - #define OVLYRXMSG1 0x91 +#define CMCRXMSG1 0x91 + +#define DCHRXMSG1 0x91 + #define NSENABLE 0x91 #define MSINSEN 0x20 #define OVLYNSEN 0x10 @@ -2047,10 +1176,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define DCH1NSEN 0x02 #define DCH0NSEN 0x01 -#define CMCRXMSG1 0x91 - -#define DCHRXMSG1 0x91 - #define DCHRXMSG2 0x92 #define CMCRXMSG2 0x92 @@ -2074,24 +1199,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define TSCSERREN 0x02 #define CMPABCDIS 0x01 +#define CMCSEQBCNT 0x94 + #define OVLYSEQBCNT 0x94 #define DCHSEQBCNT 0x94 -#define CMCSEQBCNT 0x94 - -#define CMCSPLTSTAT0 0x96 - #define DCHSPLTSTAT0 0x96 #define OVLYSPLTSTAT0 0x96 -#define CMCSPLTSTAT1 0x97 +#define CMCSPLTSTAT0 0x96 #define OVLYSPLTSTAT1 0x97 #define DCHSPLTSTAT1 0x97 +#define CMCSPLTSTAT1 0x97 + #define SGRXMSG0 0x98 #define CDNUM 0xf8 #define CFNUM 0x07 @@ -2119,18 +1244,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define TAG_NUM 0x1f #define RLXORD 0x10 -#define SGSEQBCNT 0x9c - #define SLVSPLTOUTATTR0 0x9c #define LOWER_BCNT 0xff +#define SGSEQBCNT 0x9c + #define SLVSPLTOUTATTR1 0x9d #define CMPLT_DNUM 0xf8 #define CMPLT_FNUM 0x07 -#define SLVSPLTOUTATTR2 0x9e -#define CMPLT_BNUM 0xff - #define SGSPLTSTAT0 0x9e #define STAETERM 0x80 #define SCBCERR 0x40 @@ -2141,6 +1263,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define RXSCEMSG 0x02 #define RXSPLTRSP 0x01 +#define SLVSPLTOUTATTR2 0x9e +#define CMPLT_BNUM 0xff + #define SGSPLTSTAT1 0x9f #define RXDATABUCKET 0x01 @@ -2177,14 +1302,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CLRPENDMSI 0x08 #define DPR 0x01 +#define DATA_COUNT_ODD 0xa7 + #define TARGPCISTAT 0xa7 #define DPE 0x80 #define SSE 0x40 #define STA 0x08 #define TWATERR 0x02 -#define DATA_COUNT_ODD 0xa7 - #define SCBPTR 0xa8 #define CCSCBACNT 0xab @@ -2196,10 +1321,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define CCSGADDR 0xac -#define CCSCBADR_BK 0xac - #define CCSCBADDR 0xac +#define CCSCBADR_BK 0xac + #define CMC_RAMBIST 0xad #define SG_ELEMENT_SIZE 0x80 #define SCBRAMBIST_FAIL 0x40 @@ -2253,9 +1378,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SEEDAT 0xbc #define SEECTL 0xbe +#define SEEOP_EWDS 0x40 #define SEEOP_WALL 0x40 #define SEEOP_EWEN 0x40 -#define SEEOP_EWDS 0x40 #define SEEOPCODE 0x70 #define SEERST 0x02 #define SEESTART 0x01 @@ -2272,25 +1397,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SCBCNT 0xbf -#define DFWADDR 0xc0 - #define DSPFLTRCTL 0xc0 #define FLTRDISABLE 0x20 #define EDGESENSE 0x10 #define DSPFCNTSEL 0x0f +#define DFWADDR 0xc0 + #define DSPDATACTL 0xc1 #define BYPASSENAB 0x80 #define DESQDIS 0x10 #define RCVROFFSTDIS 0x04 #define XMITOFFSTDIS 0x02 -#define DFRADDR 0xc2 - #define DSPREQCTL 0xc2 #define MANREQCTL 0xc0 #define MANREQDLY 0x3f +#define DFRADDR 0xc2 + #define DSPACKCTL 0xc3 #define MANACKCTL 0xc0 #define MANACKDLY 0x3f @@ -2311,14 +1436,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define WRTBIASCALC 0xc7 -#define RCVRBIASCALC 0xc8 - #define DFPTRS 0xc8 -#define SKEWCALC 0xc9 +#define RCVRBIASCALC 0xc8 #define DFBKPTR 0xc9 +#define SKEWCALC 0xc9 + #define DFDBCTL 0xcb #define DFF_CIO_WR_RDY 0x20 #define DFF_CIO_RD_RDY 0x10 @@ -2403,12 +1528,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define ACCUM_SAVE 0xfa -#define WAITING_SCB_TAILS 0x100 - #define AHD_PCI_CONFIG_BASE 0x100 #define SRAM_BASE 0x100 +#define WAITING_SCB_TAILS 0x100 + #define WAITING_TID_HEAD 0x120 #define WAITING_TID_TAIL 0x122 @@ -2437,8 +1562,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define PRELOADEN 0x80 #define WIDEODD 0x40 #define SCSIEN 0x20 -#define SDMAEN 0x10 #define SDMAENACK 0x10 +#define SDMAEN 0x10 #define HDMAEN 0x08 #define HDMAENACK 0x08 #define DIRECTION 0x04 @@ -2536,12 +1661,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define MK_MESSAGE_SCSIID 0x162 -#define SCB_BASE 0x180 - #define SCB_RESIDUAL_DATACNT 0x180 #define SCB_CDB_STORE 0x180 #define SCB_HOST_CDB_PTR 0x180 +#define SCB_BASE 0x180 + #define SCB_RESIDUAL_SGPTR 0x184 #define SG_ADDR_MASK 0xf8 #define SG_OVERRUN_RESID 0x02 @@ -2609,77 +1734,77 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print; #define SCB_DISCONNECTED_LISTS 0x1b8 +#define STIMESEL_SHIFT 0x03 +#define STIMESEL_MIN 0x18 +#define INVALID_ADDR 0x80 +#define CMD_GROUP_CODE_SHIFT 0x05 +#define AHD_PRECOMP_MASK 0x07 +#define TARGET_DATA_IN 0x01 +#define SEEOP_EWEN_ADDR 0xc0 +#define NUMDSPS 0x14 +#define DST_MODE_SHIFT 0x04 +#define CCSCBADDR_MAX 0x80 +#define AHD_ANNEXCOL_PER_DEV0 0x04 +#define TARGET_CMD_CMPLT 0xfe +#define SEEOP_WRAL_ADDR 0x40 +#define BUS_8_BIT 0x00 #define AHD_TIMER_MAX_US 0x18ffe7 #define AHD_TIMER_MAX_TICKS 0xffff #define AHD_SENSE_BUFSIZE 0x100 -#define BUS_8_BIT 0x00 -#define TARGET_CMD_CMPLT 0xfe -#define SEEOP_WRAL_ADDR 0x40 -#define AHD_AMPLITUDE_DEF 0x07 -#define AHD_PRECOMP_CUTBACK_37 0x07 #define AHD_PRECOMP_SHIFT 0x00 +#define AHD_PRECOMP_CUTBACK_37 0x07 #define AHD_ANNEXCOL_PRECOMP_SLEW 0x04 -#define AHD_TIMER_US_PER_TICK 0x19 -#define SCB_TRANSFER_SIZE_FULL_LUN 0x38 +#define AHD_AMPLITUDE_DEF 0x07 +#define WRTBIASCTL_HP_DEFAULT 0x00 +#define TID_SHIFT 0x04 #define STATUS_QUEUE_FULL 0x28 #define STATUS_BUSY 0x08 -#define MAX_OFFSET_NON_PACED 0x7f -#define MAX_OFFSET_PACED 0xfe -#define BUS_32_BIT 0x02 -#define CCSGADDR_MAX 0x80 -#define TID_SHIFT 0x04 -#define MK_MESSAGE_BIT_OFFSET 0x04 -#define WRTBIASCTL_HP_DEFAULT 0x00 #define SEEOP_EWDS_ADDR 0x00 -#define AHD_AMPLITUDE_SHIFT 0x00 -#define AHD_AMPLITUDE_MASK 0x07 -#define AHD_ANNEXCOL_AMPLITUDE 0x06 -#define AHD_SLEWRATE_DEF_REVA 0x08 +#define SCB_TRANSFER_SIZE_FULL_LUN 0x38 +#define MK_MESSAGE_BIT_OFFSET 0x04 +#define MAX_OFFSET_PACED 0xfe +#define MAX_OFFSET_NON_PACED 0x7f +#define LUNLEN_SINGLE_LEVEL_LUN 0x0f +#define CCSGADDR_MAX 0x80 +#define B_CURRFIFO_0 0x02 +#define BUS_32_BIT 0x02 +#define AHD_TIMER_US_PER_TICK 0x19 #define AHD_SLEWRATE_SHIFT 0x03 #define AHD_SLEWRATE_MASK 0x78 +#define AHD_SLEWRATE_DEF_REVA 0x08 #define AHD_PRECOMP_CUTBACK_29 0x06 #define AHD_NUM_PER_DEV_ANNEXCOLS 0x04 -#define B_CURRFIFO_0 0x02 -#define LUNLEN_SINGLE_LEVEL_LUN 0x0f -#define NVRAM_SCB_OFFSET 0x2c -#define STATUS_PKT_SENSE 0xff -#define CMD_GROUP_CODE_SHIFT 0x05 -#define MAX_OFFSET_PACED_BUG 0x7f +#define AHD_ANNEXCOL_AMPLITUDE 0x06 +#define AHD_AMPLITUDE_SHIFT 0x00 +#define AHD_AMPLITUDE_MASK 0x07 #define STIMESEL_BUG_ADJ 0x08 -#define STIMESEL_MIN 0x18 -#define STIMESEL_SHIFT 0x03 -#define CCSGRAM_MAXSEGS 0x10 -#define INVALID_ADDR 0x80 +#define STATUS_PKT_SENSE 0xff +#define SRC_MODE_SHIFT 0x00 #define SEEOP_ERAL_ADDR 0x80 +#define NVRAM_SCB_OFFSET 0x2c +#define MAX_OFFSET_PACED_BUG 0x7f +#define CCSGRAM_MAXSEGS 0x10 #define AHD_SLEWRATE_DEF_REVB 0x08 #define AHD_PRECOMP_CUTBACK_17 0x04 -#define AHD_PRECOMP_MASK 0x07 -#define SRC_MODE_SHIFT 0x00 -#define PKT_OVERRUN_BUFSIZE 0x200 #define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30 -#define TARGET_DATA_IN 0x01 -#define HOST_MSG 0xff +#define PKT_OVERRUN_BUFSIZE 0x200 #define MAX_OFFSET 0xfe +#define HOST_MSG 0xff #define BUS_16_BIT 0x01 -#define CCSCBADDR_MAX 0x80 -#define NUMDSPS 0x14 -#define SEEOP_EWEN_ADDR 0xc0 -#define AHD_ANNEXCOL_PER_DEV0 0x04 -#define DST_MODE_SHIFT 0x04 /* Downloaded Constant Definitions */ +#define SG_SIZEOF 0x04 +#define SG_PREFETCH_ALIGN_MASK 0x02 +#define SG_PREFETCH_CNT_LIMIT 0x01 #define CACHELINE_MASK 0x07 #define SCB_TRANSFER_SIZE 0x06 #define PKT_OVERRUN_BUFOFFSET 0x05 -#define SG_SIZEOF 0x04 #define SG_PREFETCH_ADDR_MASK 0x03 -#define SG_PREFETCH_ALIGN_MASK 0x02 -#define SG_PREFETCH_CNT_LIMIT 0x01 #define SG_PREFETCH_CNT 0x00 #define DOWNLOAD_CONST_COUNT 0x08 /* Exported Labels */ -#define LABEL_seq_isr 0x28f #define LABEL_timer_isr 0x28b +#define LABEL_seq_isr 0x28f diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped index f5ea715d6ac3..2e0c58905b9e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped +++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped @@ -234,6 +234,23 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap) 0x49, regvalue, cur_col, wrap)); } +static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = { + { "ENARBDO", 0x01, 0x01 }, + { "ENSPIORDY", 0x02, 0x02 }, + { "ENOVERRUN", 0x04, 0x04 }, + { "ENIOERR", 0x08, 0x08 }, + { "ENSELINGO", 0x10, 0x10 }, + { "ENSELDI", 0x20, 0x20 }, + { "ENSELDO", 0x40, 0x40 } +}; + +int +ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap) +{ + return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0", + 0x4b, regvalue, cur_col, wrap)); +} + static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = { { "ARBDO", 0x01, 0x01 }, { "SPIORDY", 0x02, 0x02 }, @@ -252,23 +269,6 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap) 0x4b, regvalue, cur_col, wrap)); } -static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = { - { "ENARBDO", 0x01, 0x01 }, - { "ENSPIORDY", 0x02, 0x02 }, - { "ENOVERRUN", 0x04, 0x04 }, - { "ENIOERR", 0x08, 0x08 }, - { "ENSELINGO", 0x10, 0x10 }, - { "ENSELDI", 0x20, 0x20 }, - { "ENSELDO", 0x40, 0x40 } -}; - -int -ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap) -{ - return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0", - 0x4b, regvalue, cur_col, wrap)); -} - static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = { { "REQINIT", 0x01, 0x01 }, { "STRB2FAST", 0x02, 0x02 }, diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 64ab9eaec428..381846164003 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -7340,7 +7340,6 @@ ahc_dump_card_state(struct ahc_softc *ahc) printk("\n"); } - ahc_platform_dump_card_state(ahc); printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index fc6a83188c1e..acd687f4554e 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -2329,11 +2329,6 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) return (retval); } -void -ahc_platform_dump_card_state(struct ahc_softc *ahc) -{ -} - static void ahc_linux_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h index 54c702864103..f8489078f003 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.h +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -688,7 +688,6 @@ void ahc_done(struct ahc_softc*, struct scb*); void ahc_send_async(struct ahc_softc *, char channel, u_int target, u_int lun, ac_code); void ahc_print_path(struct ahc_softc *, struct scb *); -void ahc_platform_dump_card_state(struct ahc_softc *ahc); #ifdef CONFIG_PCI #define AHC_PCI_CONFIG 1 diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped index e821082a4f47..473039df0ed5 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped +++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped @@ -244,8 +244,6 @@ ahc_reg_print_t ahc_scb_tag_print; #define SCSIDATH 0x07 -#define STCNT 0x08 - #define OPTIONMODE 0x08 #define OPTIONMODE_DEFAULTS 0x03 #define AUTORATEEN 0x80 @@ -257,6 +255,8 @@ ahc_reg_print_t ahc_scb_tag_print; #define AUTO_MSGOUT_DE 0x02 #define DIS_MSGIN_DUALEDGE 0x01 +#define STCNT 0x08 + #define TARGCRCCNT 0x0a #define CLRSINT0 0x0b @@ -365,8 +365,6 @@ ahc_reg_print_t ahc_scb_tag_print; #define ALTSTIM 0x20 #define DFLTTID 0x10 -#define TARGID 0x1b - #define SPIOCAP 0x1b #define SOFT1 0x80 #define SOFT0 0x40 @@ -377,12 +375,14 @@ ahc_reg_print_t ahc_scb_tag_print; #define ROM 0x02 #define SSPIOCPS 0x01 +#define TARGID 0x1b + #define BRDCTL 0x1d #define BRDDAT7 0x80 #define BRDDAT6 0x40 #define BRDDAT5 0x20 -#define BRDDAT4 0x10 #define BRDSTB 0x10 +#define BRDDAT4 0x10 #define BRDDAT3 0x08 #define BRDCS 0x08 #define BRDDAT2 0x04 @@ -406,8 +406,8 @@ ahc_reg_print_t ahc_scb_tag_print; #define DIAGLEDEN 0x80 #define DIAGLEDON 0x40 #define AUTOFLUSHDIS 0x20 -#define ENAB40 0x08 #define SELBUSB 0x08 +#define ENAB40 0x08 #define ENAB20 0x04 #define SELWIDE 0x02 #define XCVR 0x01 @@ -730,8 +730,8 @@ ahc_reg_print_t ahc_scb_tag_print; #define SCB_BASE 0xa0 #define SCB_CDB_PTR 0xa0 -#define SCB_RESIDUAL_DATACNT 0xa0 #define SCB_CDB_STORE 0xa0 +#define SCB_RESIDUAL_DATACNT 0xa0 #define SCB_RESIDUAL_SGPTR 0xa4 @@ -756,8 +756,8 @@ ahc_reg_print_t ahc_scb_tag_print; #define SCB_CONTROL 0xb8 #define SCB_TAG_TYPE 0x03 -#define STATUS_RCVD 0x80 #define TARGET_SCB 0x80 +#define STATUS_RCVD 0x80 #define DISCENB 0x40 #define TAG_ENB 0x20 #define MK_MESSAGE 0x10 @@ -872,40 +872,40 @@ ahc_reg_print_t ahc_scb_tag_print; #define SG_CACHE_PRE 0xfc +#define TARGET_CMD_CMPLT 0xfe #define MAX_OFFSET_ULTRA2 0x7f #define MAX_OFFSET_16BIT 0x08 #define BUS_8_BIT 0x00 -#define TARGET_CMD_CMPLT 0xfe +#define TID_SHIFT 0x04 #define STATUS_QUEUE_FULL 0x28 #define STATUS_BUSY 0x08 -#define MAX_OFFSET_8BIT 0x0f -#define BUS_32_BIT 0x02 -#define CCSGADDR_MAX 0x80 -#define TID_SHIFT 0x04 #define SCB_DOWNLOAD_SIZE_64 0x30 +#define MAX_OFFSET_8BIT 0x0f #define HOST_MAILBOX_SHIFT 0x04 +#define CCSGADDR_MAX 0x80 +#define BUS_32_BIT 0x02 +#define SG_SIZEOF 0x08 +#define SEQ_MAILBOX_SHIFT 0x00 +#define SCB_LIST_NULL 0xff +#define SCB_DOWNLOAD_SIZE 0x20 #define CMD_GROUP_CODE_SHIFT 0x05 #define CCSGRAM_MAXSEGS 0x10 -#define SCB_LIST_NULL 0xff -#define SG_SIZEOF 0x08 -#define SCB_DOWNLOAD_SIZE 0x20 -#define SEQ_MAILBOX_SHIFT 0x00 #define TARGET_DATA_IN 0x01 -#define HOST_MSG 0xff -#define MAX_OFFSET 0x7f -#define BUS_16_BIT 0x01 -#define SCB_UPLOAD_SIZE 0x20 #define STACK_SIZE 0x04 +#define SCB_UPLOAD_SIZE 0x20 +#define MAX_OFFSET 0x7f +#define HOST_MSG 0xff +#define BUS_16_BIT 0x01 /* Downloaded Constant Definitions */ #define INVERTED_CACHESIZE_MASK 0x03 -#define SG_PREFETCH_ADDR_MASK 0x06 #define SG_PREFETCH_ALIGN_MASK 0x05 +#define SG_PREFETCH_ADDR_MASK 0x06 #define QOUTFIFO_OFFSET 0x00 #define SG_PREFETCH_CNT 0x04 -#define CACHESIZE_MASK 0x02 #define QINFIFO_OFFSET 0x01 +#define CACHESIZE_MASK 0x02 #define DOWNLOAD_CONST_COUNT 0x07 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index a14ba7a6b81e..6c838865ac5a 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -70,7 +70,7 @@ static struct scsi_host_template aic94xx_sht = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .track_queue_depth = 1, @@ -956,11 +956,11 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time) return 1; } -static ssize_t asd_version_show(struct device_driver *driver, char *buf) +static ssize_t version_show(struct device_driver *driver, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); } -static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL); +static DRIVER_ATTR_RO(version); static int asd_create_driver_attrs(struct device_driver *driver) { diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index 9c86481f779f..259d9c20bf25 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -190,7 +190,7 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, return 1; } -static struct bin_attribute arcmsr_sysfs_message_read_attr = { +static const struct bin_attribute arcmsr_sysfs_message_read_attr = { .attr = { .name = "mu_read", .mode = S_IRUSR , @@ -199,7 +199,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { .read = arcmsr_sysfs_iop_message_read, }; -static struct bin_attribute arcmsr_sysfs_message_write_attr = { +static const struct bin_attribute arcmsr_sysfs_message_write_attr = { .attr = { .name = "mu_write", .mode = S_IWUSR, @@ -208,7 +208,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { .write = arcmsr_sysfs_iop_message_write, }; -static struct bin_attribute arcmsr_sysfs_message_clear_attr = { +static const struct bin_attribute arcmsr_sysfs_message_clear_attr = { .attr = { .name = "mu_clear", .mode = S_IWUSR, diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 12b88294d667..421fe869a11e 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2725,7 +2725,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) * Params : SCpnt - command causing reset * Returns : one of SCSI_RESET_ macros */ -int acornscsi_bus_reset(struct scsi_cmnd *SCpnt) +int acornscsi_host_reset(struct scsi_cmnd *SCpnt) { AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; struct scsi_cmnd *SCptr; @@ -2734,14 +2734,15 @@ int acornscsi_bus_reset(struct scsi_cmnd *SCpnt) #if (DEBUG & DEBUG_RESET) { - int asr, ssr; + int asr, ssr, devidx; asr = sbic_arm_read(host, SBIC_ASR); ssr = sbic_arm_read(host, SBIC_SSR); printk(KERN_WARNING "acornscsi_reset: "); print_sbic_status(asr, ssr, host->scsi.phase); - acornscsi_dumplog(host, SCpnt->device->id); + for (devidx = 0; devidx < 9; devidx++) + acornscsi_dumplog(host, devidx); } #endif @@ -2884,7 +2885,7 @@ static struct scsi_host_template acornscsi_template = { .info = acornscsi_info, .queuecommand = acornscsi_queuecmd, .eh_abort_handler = acornscsi_abort, - .eh_bus_reset_handler = acornscsi_bus_reset, + .eh_host_reset_handler = acornscsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index a87b99c7fb9a..ae1d809904fb 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -216,7 +216,7 @@ static struct scsi_host_template cumanascsi_template = { .info = cumanascsi_info, .queuecommand = cumanascsi_queue_command, .eh_abort_handler = NCR5380_abort, - .eh_bus_reset_handler = NCR5380_bus_reset, + .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 6be6666534d4..05b7f755499b 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -105,7 +105,7 @@ static struct scsi_host_template oakscsi_template = { .info = oakscsi_info, .queuecommand = oakscsi_queue_command, .eh_abort_handler = NCR5380_abort, - .eh_bus_reset_handler = NCR5380_bus_reset, + .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index a75feebe6ad6..89f5154c40b6 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -671,7 +671,7 @@ static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value) #include "NCR5380.c" -static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) +static int atari_scsi_host_reset(struct scsi_cmnd *cmd) { int rv; unsigned long flags; @@ -688,7 +688,7 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) atari_dma_orig_addr = NULL; } - rv = NCR5380_bus_reset(cmd); + rv = NCR5380_host_reset(cmd); /* The 5380 raises its IRQ line while _RST is active but the ST DMA * "lock" has been released so this interrupt may end up handled by @@ -711,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = { .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, - .eh_bus_reset_handler = atari_scsi_bus_reset, + .eh_host_reset_handler = atari_scsi_host_reset, .this_id = 7, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 97dca4681784..43a80ce5ce6a 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -82,8 +82,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, return NULL; sess = cls_session->dd_data; beiscsi_sess = sess->dd_data; - beiscsi_sess->bhs_pool = pci_pool_create("beiscsi_bhs_pool", - phba->pcidev, + beiscsi_sess->bhs_pool = dma_pool_create("beiscsi_bhs_pool", + &phba->pcidev->dev, sizeof(struct be_cmd_bhs), 64, 0); if (!beiscsi_sess->bhs_pool) @@ -108,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session) struct beiscsi_session *beiscsi_sess = sess->dd_data; printk(KERN_INFO "In beiscsi_session_destroy\n"); - pci_pool_destroy(beiscsi_sess->bhs_pool); + dma_pool_destroy(beiscsi_sess->bhs_pool); iscsi_session_teardown(cls_session); } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index f862332261f8..b4542e7e2ad5 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4257,7 +4257,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { - pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, + dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, io_task->bhs_pa.u.a64.address); io_task->cmd_bhs = NULL; task->hdr = NULL; @@ -4374,7 +4374,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; - io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, + io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, GFP_ATOMIC, &paddr); if (!io_task->cmd_bhs) return -ENOMEM; @@ -4501,7 +4501,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; - pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, + dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, io_task->bhs_pa.u.a64.address); io_task->cmd_bhs = NULL; return -ENOMEM; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 338dbe0800c1..81ce3ffda968 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -438,7 +438,7 @@ struct beiscsi_hba { test_bit(BEISCSI_HBA_ONLINE, &phba->state)) struct beiscsi_session { - struct pci_pool *bhs_pool; + struct dma_pool *bhs_pool; }; /** diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 7eb0eef18fdd..24e657a4ec80 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -373,32 +373,28 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) } /* - * Scsi_Host template entry, resets the bus and abort all commands. + * Scsi_Host template entry, resets the target and abort all commands. */ static int -bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) +bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; + struct scsi_target *starget = scsi_target(cmnd->device); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_itnim_s *itnim; unsigned long flags; - u32 i, rc, err_cnt = 0; + u32 rc, rtn = FAILED; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); enum bfi_tskim_status task_status; spin_lock_irqsave(&bfad->bfad_lock, flags); - for (i = 0; i < MAX_FCP_TARGET; i++) { - itnim = bfad_get_itnim(im_port, i); - if (itnim) { - cmnd->SCp.ptr = (char *)&wq; - rc = bfad_im_target_reset_send(bfad, cmnd, itnim); - if (rc != BFA_STATUS_OK) { - err_cnt++; - continue; - } - + itnim = bfad_get_itnim(im_port, starget->id); + if (itnim) { + cmnd->SCp.ptr = (char *)&wq; + rc = bfad_im_target_reset_send(bfad, cmnd, itnim); + if (rc == BFA_STATUS_OK) { /* wait target reset to complete */ spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_event(wq, test_bit(IO_DONE_BIT, @@ -406,20 +402,17 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) spin_lock_irqsave(&bfad->bfad_lock, flags); task_status = cmnd->SCp.Status >> 1; - if (task_status != BFI_TSKIM_STS_OK) { + if (task_status != BFI_TSKIM_STS_OK) BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset failure," " status: %d\n", task_status); - err_cnt++; - } + else + rtn = SUCCESS; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - if (err_cnt) - return FAILED; - - return SUCCESS; + return rtn; } /* @@ -816,7 +809,7 @@ struct scsi_host_template bfad_im_scsi_host_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, - .eh_bus_reset_handler = bfad_im_reset_bus_handler, + .eh_target_reset_handler = bfad_im_reset_target_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, @@ -839,7 +832,7 @@ struct scsi_host_template bfad_im_vport_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, - .eh_bus_reset_handler = bfad_im_reset_bus_handler, + .eh_target_reset_handler = bfad_im_reset_target_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 7e007e142aab..901a31632493 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -539,7 +539,6 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid); void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt); int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd); -int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd); int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd); int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd); void bnx2fc_rport_event_handler(struct fc_lport *lport, diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index dad959fcf6d8..c535c52e72e5 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -105,6 +105,7 @@ do { \ static struct class * ch_sysfs_class; typedef struct { + struct kref ref; struct list_head list; int minor; char name[8]; @@ -563,13 +564,23 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest) /* ------------------------------------------------------------------------ */ +static void ch_destroy(struct kref *ref) +{ + scsi_changer *ch = container_of(ref, scsi_changer, ref); + + kfree(ch->dt); + kfree(ch); +} + static int ch_release(struct inode *inode, struct file *file) { scsi_changer *ch = file->private_data; scsi_device_put(ch->device); + ch->device = NULL; file->private_data = NULL; + kref_put(&ch->ref, ch_destroy); return 0; } @@ -588,6 +599,7 @@ ch_open(struct inode *inode, struct file *file) mutex_unlock(&ch_mutex); return -ENXIO; } + kref_get(&ch->ref); spin_unlock(&ch_index_lock); file->private_data = ch; @@ -935,8 +947,11 @@ static int ch_probe(struct device *dev) } mutex_init(&ch->lock); + kref_init(&ch->ref); ch->device = sd; - ch_readconfig(ch); + ret = ch_readconfig(ch); + if (ret) + goto destroy_dev; if (init) ch_init_elem(ch); @@ -944,6 +959,8 @@ static int ch_probe(struct device *dev) sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); return 0; +destroy_dev: + device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor)); remove_idr: idr_remove(&ch_index_idr, ch->minor); free_ch: @@ -960,8 +977,7 @@ static int ch_remove(struct device *dev) spin_unlock(&ch_index_lock); device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); - kfree(ch->dt); - kfree(ch); + kref_put(&ch->ref, ch_destroy); return 0; } diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9acb89538e29..667046419b19 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -465,7 +465,7 @@ struct csio_hw { struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */ struct csio_hw_params params; /* Hw parameters */ - struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */ + struct dma_pool *scsi_dma_pool; /* DMA pool for SCSI */ mempool_t *mb_mempool; /* Mailbox memory pool*/ mempool_t *rnode_mempool; /* rnode memory pool */ diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index dcd074169aa9..28a9c7d706cb 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -485,9 +485,10 @@ csio_resource_alloc(struct csio_hw *hw) if (!hw->rnode_mempool) goto err_free_mb_mempool; - hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev, - CSIO_SCSI_RSP_LEN, 8, 0); - if (!hw->scsi_pci_pool) + hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool", + &hw->pdev->dev, CSIO_SCSI_RSP_LEN, + 8, 0); + if (!hw->scsi_dma_pool) goto err_free_rn_pool; return 0; @@ -505,8 +506,8 @@ csio_resource_alloc(struct csio_hw *hw) static void csio_resource_free(struct csio_hw *hw) { - pci_pool_destroy(hw->scsi_pci_pool); - hw->scsi_pci_pool = NULL; + dma_pool_destroy(hw->scsi_dma_pool); + hw->scsi_dma_pool = NULL; mempool_destroy(hw->rnode_mempool); hw->rnode_mempool = NULL; mempool_destroy(hw->mb_mempool); diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index a1ff75f1384f..dab0d3f9bee1 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -2445,7 +2445,7 @@ csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) /* Allocate Dma buffers for Response Payload */ dma_buf = &ioreq->dma_buf; - dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL, + dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, &dma_buf->paddr); if (!dma_buf->vaddr) { csio_err(hw, @@ -2485,7 +2485,7 @@ csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) ioreq = (struct csio_ioreq *)tmp; dma_buf = &ioreq->dma_buf; - pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr, + dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, dma_buf->paddr); kfree(ioreq); @@ -2516,7 +2516,7 @@ csio_scsim_exit(struct csio_scsim *scm) ioreq = (struct csio_ioreq *)tmp; dma_buf = &ioreq->dma_buf; - pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr, + dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, dma_buf->paddr); kfree(ioreq); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 1a4cfa562a60..512c8f1ea5b0 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -585,19 +585,21 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) static struct rtable *find_route_ipv4(struct flowi4 *fl4, __be32 saddr, __be32 daddr, - __be16 sport, __be16 dport, u8 tos) + __be16 sport, __be16 dport, u8 tos, + int ifindex) { struct rtable *rt; rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, - dport, sport, IPPROTO_TCP, tos, 0); + dport, sport, IPPROTO_TCP, tos, ifindex); if (IS_ERR(rt)) return NULL; return rt; } -static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) +static struct cxgbi_sock * +cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) { struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; struct dst_entry *dst; @@ -611,7 +613,8 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) int port = 0xFFFF; int err = 0; - rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); + rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, + daddr->sin_port, 0, ifindex); if (!rt) { pr_info("no route to ipv4 0x%x, port %u.\n", be32_to_cpu(daddr->sin_addr.s_addr), @@ -693,11 +696,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) #if IS_ENABLED(CONFIG_IPV6) static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, - const struct in6_addr *daddr) + const struct in6_addr *daddr, + int ifindex) { struct flowi6 fl; memset(&fl, 0, sizeof(fl)); + fl.flowi6_oif = ifindex; if (saddr) memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); if (daddr) @@ -705,7 +710,8 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); } -static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) +static struct cxgbi_sock * +cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) { struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; struct dst_entry *dst; @@ -719,7 +725,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) int port = 0xFFFF; int err = 0; - rt = find_route_ipv6(NULL, &daddr6->sin6_addr); + rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex); if (!rt) { pr_info("no route to ipv6 %pI6 port %u\n", @@ -2536,6 +2542,7 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, struct cxgbi_endpoint *cep; struct cxgbi_hba *hba = NULL; struct cxgbi_sock *csk; + int ifindex = 0; int err = -EINVAL; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, @@ -2548,13 +2555,15 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, pr_info("shost 0x%p, priv NULL.\n", shost); goto err_out; } + + ifindex = hba->ndev->ifindex; } if (dst_addr->sa_family == AF_INET) { - csk = cxgbi_check_route(dst_addr); + csk = cxgbi_check_route(dst_addr, ifindex); #if IS_ENABLED(CONFIG_IPV6) } else if (dst_addr->sa_family == AF_INET6) { - csk = cxgbi_check_route6(dst_addr); + csk = cxgbi_check_route6(dst_addr, ifindex); #endif } else { pr_info("address family 0x%x NOT supported.\n", diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 6a4367cc9caa..76b8b7eed0c0 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -820,8 +820,7 @@ static void term_afu(struct cxlflash_cfg *cfg) for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) term_intr(cfg, UNMAP_THREE, k); - if (cfg->afu) - stop_afu(cfg); + stop_afu(cfg); for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) term_mc(cfg, k); diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index ad0f9968ccfb..ed46e8df2e42 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1390,6 +1390,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, if (unlikely(!ctxi)) { dev_err(dev, "%s: Failed to create context ctxid=%d\n", __func__, ctxid); + rc = -ENOMEM; goto err; } @@ -1650,6 +1651,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, u64 ctxid = DECODE_CTXID(recover->context_id), rctxid = recover->context_id; long reg; + bool locked = true; int lretry = 20; /* up to 2 seconds */ int new_adap_fd = -1; int rc = 0; @@ -1658,8 +1660,11 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, up_read(&cfg->ioctl_rwsem); rc = mutex_lock_interruptible(mutex); down_read(&cfg->ioctl_rwsem); - if (rc) + if (rc) { + locked = false; goto out; + } + rc = check_state(cfg); if (rc) { dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); @@ -1693,8 +1698,10 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, mutex_unlock(mutex); msleep(100); rc = mutex_lock_interruptible(mutex); - if (rc) + if (rc) { + locked = false; goto out; + } goto retry_recover; } @@ -1738,7 +1745,8 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, out: if (likely(ctxi)) put_context(ctxi); - mutex_unlock(mutex); + if (locked) + mutex_unlock(mutex); atomic_dec_if_positive(&cfg->recovery_threads); return rc; } diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index bdfb93061460..703bf1e9a64a 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -694,11 +694,7 @@ static int shrink_lxt(struct afu *afu, /* Free LBAs allocated to freed chunks */ mutex_lock(&blka->mutex); for (i = delta - 1; i >= 0; i--) { - /* Mask the higher 48 bits before shifting, even though - * it is a noop - */ - aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK); - aun = (aun >> MC_CHUNK_SHIFT); + aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT; if (needs_ws) write_same16(sdev, aun, MC_CHUNK_SIZE); ba_free(&blka->ba_lun, aun); diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 6af3394d051d..003c3d726238 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -58,7 +58,7 @@ static struct scsi_host_template dmx3191d_driver_template = { .info = NCR5380_info, .queuecommand = NCR5380_queue_command, .eh_abort_handler = NCR5380_abort, - .eh_bus_reset_handler = NCR5380_bus_reset, + .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 32, .this_id = 7, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 256dd6791fcc..fd172b0890d3 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -1169,11 +1169,6 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6 if(chan < 0 || chan >= MAX_CHANNEL) return NULL; - if( pHba->channel[chan].device == NULL){ - printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n"); - return NULL; - } - d = pHba->channel[chan].device[id]; if(!d || d->tid == 0) { return NULL; diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 227dd2c2ec2f..6501c330d8c8 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1899,7 +1899,6 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg) static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) { unsigned int i, time, k, c, limit = 0; - int arg_done = 0; struct scsi_cmnd *SCpnt; struct Scsi_Host *shost = SCarg->device->host; struct hostdata *ha = (struct hostdata *)shost->hostdata; @@ -1967,9 +1966,6 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) if (SCpnt->scsi_done == NULL) panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", ha->board_name, i); - - if (SCpnt == SCarg) - arg_done = 1; } if (do_dma(shost->io_port, 0, RESET_PIO)) { @@ -2037,10 +2033,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) ha->in_reset = 0; do_trace = 0; - if (arg_done) - printk("%s: reset, exit, done.\n", ha->board_name); - else - printk("%s: reset, exit.\n", ha->board_name); + printk("%s: reset, exit.\n", ha->board_name); spin_unlock_irq(shost->host_lock); return SUCCESS; diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index f2e9d8aa979c..81f226be3e3b 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -309,7 +309,7 @@ MODULE_PARM_DESC(interrupt_mode, "Defines the interrupt mode to use. 0 for legacy" ", 1 for MSI. Default is MSI (1)."); -static struct pci_device_id +static const struct pci_device_id esas2r_pci_table[] = { { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049, 0, diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 71cb05b1c3eb..c3fc34b9964d 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -597,14 +597,12 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, lp->non_tagged_cmd = ent; return 0; - } else { - /* Tagged command, see if blocked by a - * non-tagged one. - */ - if (lp->non_tagged_cmd || lp->hold) - return -EBUSY; } + /* Tagged command. Check that it isn't blocked by a non-tagged one. */ + if (lp->non_tagged_cmd || lp->hold) + return -EBUSY; + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); lp->tagged_cmds[ent->orig_tag[1]] = ent; @@ -1210,12 +1208,6 @@ static int esp_reconnect(struct esp *esp) esp->active_cmd = ent; - if (ent->flags & ESP_CMD_FLAG_ABORT) { - esp->msg_out[0] = ABORT_TASK_SET; - esp->msg_out_len = 1; - scsi_esp_cmd(esp, ESP_CMD_SATN); - } - esp_event(esp, ESP_EVENT_CHECK_PHASE); esp_restore_pointers(esp, ent); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; @@ -1230,9 +1222,6 @@ static int esp_finish_select(struct esp *esp) { struct esp_cmd_entry *ent; struct scsi_cmnd *cmd; - u8 orig_select_state; - - orig_select_state = esp->select_state; /* No longer selecting. */ esp->select_state = ESP_SELECT_NONE; @@ -1496,9 +1485,8 @@ static void esp_msgin_reject(struct esp *esp) return; } - esp->msg_out[0] = ABORT_TASK_SET; - esp->msg_out_len = 1; - scsi_esp_cmd(esp, ESP_CMD_SATN); + shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); + esp_schedule_reset(esp); } static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) @@ -1621,7 +1609,7 @@ static void esp_msgin_extended(struct esp *esp) shost_printk(KERN_INFO, esp->host, "Unexpected extended msg type %x\n", esp->msg_in[2]); - esp->msg_out[0] = ABORT_TASK_SET; + esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); } @@ -1745,7 +1733,6 @@ static int esp_process_event(struct esp *esp) return 0; } goto again; - break; case ESP_EVENT_DATA_IN: write = 1; @@ -1956,12 +1943,16 @@ static int esp_process_event(struct esp *esp) } else { if (esp->msg_out_len > 1) esp->ops->dma_invalidate(esp); - } - if (!(esp->ireg & ESP_INTR_DC)) { - if (esp->rev != FASHME) + /* XXX if the chip went into disconnected mode, + * we can't run the phase state machine anyway. + */ + if (!(esp->ireg & ESP_INTR_DC)) scsi_esp_cmd(esp, ESP_CMD_NULL); } + + esp->msg_out_len = 0; + esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; case ESP_EVENT_MSGIN: @@ -1998,6 +1989,10 @@ static int esp_process_event(struct esp *esp) scsi_esp_cmd(esp, ESP_CMD_MOK); + /* Check whether a bus reset is to be done next */ + if (esp->event == ESP_EVENT_RESET) + return 0; + if (esp->event != ESP_EVENT_FREE_BUS) esp_event(esp, ESP_EVENT_CHECK_PHASE); } else { @@ -2022,7 +2017,6 @@ static int esp_process_event(struct esp *esp) } esp_schedule_reset(esp); return 0; - break; case ESP_EVENT_RESET: scsi_esp_cmd(esp, ESP_CMD_RS); @@ -2033,7 +2027,6 @@ static int esp_process_event(struct esp *esp) "Unexpected event %x, resetting\n", esp->event); esp_schedule_reset(esp); return 0; - break; } return 1; } @@ -2170,14 +2163,14 @@ static void __esp_interrupt(struct esp *esp) esp_schedule_reset(esp); } else { - if (!(esp->ireg & ESP_INTR_RSEL)) { - /* Some combination of FDONE, BSERV, DC. */ - if (esp->select_state != ESP_SELECT_NONE) - intr_done = esp_finish_select(esp); - } else if (esp->ireg & ESP_INTR_RSEL) { + if (esp->ireg & ESP_INTR_RSEL) { if (esp->active_cmd) (void) esp_finish_select(esp); intr_done = esp_reconnect(esp); + } else { + /* Some combination of FDONE, BSERV, DC. */ + if (esp->select_state != ESP_SELECT_NONE) + intr_done = esp_finish_select(esp); } } while (!intr_done) diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 84dcbe4a6268..7e8932ae91f8 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -281,7 +281,6 @@ struct esp_cmd_entry { u8 flags; #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ -#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ #define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */ diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 9cf3d56296ab..5c8310bade61 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -659,13 +659,13 @@ static void fcoe_fcf_device_release(struct device *dev) kfree(fcf); } -static struct device_type fcoe_ctlr_device_type = { +static const struct device_type fcoe_ctlr_device_type = { .name = "fcoe_ctlr", .groups = fcoe_ctlr_attr_groups, .release = fcoe_ctlr_device_release, }; -static struct device_type fcoe_fcf_device_type = { +static const struct device_type fcoe_fcf_device_type = { .name = "fcoe_fcf", .groups = fcoe_fcf_attr_groups, .release = fcoe_fcf_device_release, diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index b87ab38a4530..ebbe5a3e665d 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -933,7 +933,7 @@ struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt ) } } - fdomain_16x0_bus_reset(NULL); + fdomain_16x0_host_reset(NULL); if (fdomain_test_loopback()) { printk(KERN_ERR "scsi: Detection failed (loopback test failed at port base 0x%x)\n", port_base); @@ -1568,7 +1568,7 @@ static int fdomain_16x0_abort(struct scsi_cmnd *SCpnt) return SUCCESS; } -int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt) +int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt) { unsigned long flags; @@ -1758,7 +1758,7 @@ struct scsi_host_template fdomain_driver_template = { .info = fdomain_16x0_info, .queuecommand = fdomain_16x0_queue, .eh_abort_handler = fdomain_16x0_abort, - .eh_bus_reset_handler = fdomain_16x0_bus_reset, + .eh_host_reset_handler = fdomain_16x0_host_reset, .bios_param = fdomain_16x0_biosparam, .release = fdomain_16x0_release, .can_queue = 1, diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h index 47021d9d4fe4..5cbe86b573ae 100644 --- a/drivers/scsi/fdomain.h +++ b/drivers/scsi/fdomain.h @@ -21,4 +21,4 @@ extern struct scsi_host_template fdomain_driver_template; extern int fdomain_setup(char *str); extern struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt ); -extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt); +extern int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 67aab965c0f4..d094ba59ed15 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -180,7 +180,7 @@ enum fnic_msix_intr_index { struct fnic_msix_entry { int requested; - char devname[IFNAMSIZ]; + char devname[IFNAMSIZ + 11]; irqreturn_t (*isr)(int, void *); void *devid; }; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 6c0646d62dfb..242e2ee494a1 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1990,10 +1990,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Issuing Host reset due to out of order IO\n"); - if (fnic_host_reset(sc) == FAILED) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_host_reset failed.\n"); - } ret = FAILED; goto fnic_abort_cmd_end; } diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index c34fc91ba486..fc538181f8df 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -1,17 +1,17 @@ /* * Generic Generic NCR5380 driver - * + * * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 440-4894 + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 440-4894 * * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin - * K.Lentin@cs.monash.edu.au + * K.Lentin@cs.monash.edu.au * * NCR53C400A extensions (c) 1996, Ingmar Baumgart - * ingmar@gonzo.schwaben.de + * ingmar@gonzo.schwaben.de * * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl @@ -44,17 +44,19 @@ int c400_ctl_status; \ int c400_blk_cnt; \ int c400_host_buf; \ - int io_width + int io_width; \ + int pdma_residual; \ + int board #define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len -#define NCR5380_dma_recv_setup generic_NCR5380_pread -#define NCR5380_dma_send_setup generic_NCR5380_pwrite -#define NCR5380_dma_residual NCR5380_dma_residual_none +#define NCR5380_dma_recv_setup generic_NCR5380_precv +#define NCR5380_dma_send_setup generic_NCR5380_psend +#define NCR5380_dma_residual generic_NCR5380_dma_residual #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command #define NCR5380_abort generic_NCR5380_abort -#define NCR5380_bus_reset generic_NCR5380_bus_reset +#define NCR5380_host_reset generic_NCR5380_host_reset #define NCR5380_info generic_NCR5380_info #define NCR5380_io_delay(x) udelay(x) @@ -76,6 +78,7 @@ #define IRQ_AUTO 254 #define MAX_CARDS 8 +#define DMA_MAX_SIZE 32768 /* old-style parameters for compatibility */ static int ncr_irq = -1; @@ -314,6 +317,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, } hostdata = shost_priv(instance); + hostdata->board = board; hostdata->io = iomem; hostdata->region_size = region_size; @@ -478,180 +482,210 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance) release_mem_region(base, region_size); } -/** - * generic_NCR5380_pread - pseudo DMA read - * @hostdata: scsi host private data - * @dst: buffer to read into - * @len: buffer length +/* wait_for_53c80_access - wait for 53C80 registers to become accessible + * @hostdata: scsi host private data * - * Perform a pseudo DMA mode read from an NCR53C400 or equivalent - * controller + * The registers within the 53C80 logic block are inaccessible until + * bit 7 in the 53C400 control status register gets asserted. */ - -static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata, + +static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata) +{ + int count = 10000; + + do { + if (hostdata->board == BOARD_DTC3181E) + udelay(4); /* DTC436 chip hangs without this */ + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG) + return; + } while (--count > 0); + + scmd_printk(KERN_ERR, hostdata->connected, + "53c80 registers not accessible, device will be reset\n"); + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); +} + +/** + * generic_NCR5380_precv - pseudo DMA receive + * @hostdata: scsi host private data + * @dst: buffer to write into + * @len: transfer size + * + * Perform a pseudo DMA mode receive from a 53C400 or equivalent device. + */ + +static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { - int blocks = len / 128; + int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); - NCR5380_write(hostdata->c400_blk_cnt, blocks); - while (1) { - if (NCR5380_read(hostdata->c400_blk_cnt) == 0) - break; - if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { - printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); - return -1; + NCR5380_write(hostdata->c400_blk_cnt, len / 128); + + do { + if (start == len - 128) { + /* Ignore End of DMA interrupt for the final buffer */ + if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0) + break; + } else { + if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, + hostdata->c400_ctl_status, + CSR_GATED_53C80_IRQ, + CSR_GATED_53C80_IRQ, HZ / 64) < 0 || + NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) + break; } - while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) - ; /* FIXME - no timeout */ if (hostdata->io_port && hostdata->io_width == 2) insw(hostdata->io_port + hostdata->c400_host_buf, - dst + start, 64); + dst + start, 64); else if (hostdata->io_port) insb(hostdata->io_port + hostdata->c400_host_buf, - dst + start, 128); + dst + start, 128); else memcpy_fromio(dst + start, hostdata->io + NCR53C400_host_buffer, 128); - start += 128; - blocks--; + } while (start < len); + + residual = len - start; + + if (residual != 0) { + /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } + wait_for_53c80_access(hostdata); - if (blocks) { - while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) - ; /* FIXME - no timeout */ + if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_END_DMA_TRANSFER, + BASR_END_DMA_TRANSFER, + HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", + __func__); - if (hostdata->io_port && hostdata->io_width == 2) - insw(hostdata->io_port + hostdata->c400_host_buf, - dst + start, 64); - else if (hostdata->io_port) - insb(hostdata->io_port + hostdata->c400_host_buf, - dst + start, 128); - else - memcpy_fromio(dst + start, - hostdata->io + NCR53C400_host_buffer, 128); + hostdata->pdma_residual = residual; - start += 128; - blocks--; - } - - if (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ)) - printk("53C400r: no 53C80 gated irq after transfer"); - - /* wait for 53C80 registers to be available */ - while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) - ; - - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) - printk(KERN_ERR "53C400r: no end dma signal\n"); - return 0; } /** - * generic_NCR5380_pwrite - pseudo DMA write - * @hostdata: scsi host private data - * @dst: buffer to read into - * @len: buffer length + * generic_NCR5380_psend - pseudo DMA send + * @hostdata: scsi host private data + * @src: buffer to read from + * @len: transfer size * - * Perform a pseudo DMA mode read from an NCR53C400 or equivalent - * controller + * Perform a pseudo DMA mode send to a 53C400 or equivalent device. */ -static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata, - unsigned char *src, int len) +static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, + unsigned char *src, int len) { - int blocks = len / 128; + int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); - NCR5380_write(hostdata->c400_blk_cnt, blocks); - while (1) { - if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { - printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); - return -1; + NCR5380_write(hostdata->c400_blk_cnt, len / 128); + + do { + if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, + CSR_HOST_BUF_NOT_RDY, 0, + hostdata->c400_ctl_status, + CSR_GATED_53C80_IRQ, + CSR_GATED_53C80_IRQ, HZ / 64) < 0 || + NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) { + /* Both 128 B buffers are in use */ + if (start >= 128) + start -= 128; + if (start >= 128) + start -= 128; + break; } - if (NCR5380_read(hostdata->c400_blk_cnt) == 0) + if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0) break; - while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) - ; // FIXME - timeout + + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { + /* Host buffer is empty, other one is in use */ + if (start >= 128) + start -= 128; + break; + } + + if (start >= len) + continue; if (hostdata->io_port && hostdata->io_width == 2) outsw(hostdata->io_port + hostdata->c400_host_buf, - src + start, 64); + src + start, 64); else if (hostdata->io_port) outsb(hostdata->io_port + hostdata->c400_host_buf, - src + start, 128); + src + start, 128); else memcpy_toio(hostdata->io + NCR53C400_host_buffer, src + start, 128); - start += 128; - blocks--; + } while (1); + + residual = len - start; + + if (residual != 0) { + /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ + NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } - if (blocks) { - while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) - ; // FIXME - no timeout + wait_for_53c80_access(hostdata); - if (hostdata->io_port && hostdata->io_width == 2) - outsw(hostdata->io_port + hostdata->c400_host_buf, - src + start, 64); - else if (hostdata->io_port) - outsb(hostdata->io_port + hostdata->c400_host_buf, - src + start, 128); - else - memcpy_toio(hostdata->io + NCR53C400_host_buffer, - src + start, 128); + if (residual == 0) { + if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, + TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, + HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, + "%s: Last Byte Sent timeout\n", __func__); - start += 128; - blocks--; + if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, + HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", + __func__); } - /* wait for 53C80 registers to be available */ - while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) { - udelay(4); /* DTC436 chip hangs without this */ - /* FIXME - no timeout */ - } + hostdata->pdma_residual = residual; - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) { - printk(KERN_ERR "53C400w: no end dma signal\n"); - } - - while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)) - ; // TIMEOUT return 0; } static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { - int transfersize = cmd->transfersize; + int transfersize = cmd->SCp.this_residual; if (hostdata->flags & FLAG_NO_PSEUDO_DMA) return 0; - /* Limit transfers to 32K, for xx400 & xx406 - * pseudoDMA that transfers in 128 bytes blocks. - */ - if (transfersize > 32 * 1024 && cmd->SCp.this_residual && - !(cmd->SCp.this_residual % transfersize)) - transfersize = 32 * 1024; - /* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */ if (transfersize % 128) - transfersize = 0; + return 0; - return transfersize; + /* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */ + if (hostdata->board == BOARD_DTC3181E && + cmd->sc_data_direction == DMA_TO_DEVICE) + transfersize = min(cmd->SCp.this_residual, 512); + + return min(transfersize, DMA_MAX_SIZE); } -/* - * Include the NCR5380 core code that we build our driver around - */ - +static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return hostdata->pdma_residual; +} + +/* Include the core driver code. */ + #include "NCR5380.c" static struct scsi_host_template driver_template = { @@ -661,7 +695,7 @@ static struct scsi_host_template driver_template = { .info = generic_NCR5380_info, .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, - .eh_bus_reset_handler = generic_NCR5380_bus_reset, + .eh_host_reset_handler = generic_NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, @@ -671,11 +705,10 @@ static struct scsi_host_template driver_template = { .max_sectors = 128, }; - static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) { int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], - irq[ndev], card[ndev]); + irq[ndev], card[ndev]); if (ret) { if (base[ndev]) printk(KERN_WARNING "Card not found at address 0x%03x\n", @@ -687,7 +720,7 @@ static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) } static int generic_NCR5380_isa_remove(struct device *pdev, - unsigned int ndev) + unsigned int ndev) { generic_NCR5380_release_resources(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); @@ -703,14 +736,14 @@ static struct isa_driver generic_NCR5380_isa_driver = { }; #ifdef CONFIG_PNP -static struct pnp_device_id generic_NCR5380_pnp_ids[] = { +static const struct pnp_device_id generic_NCR5380_pnp_ids[] = { { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, - const struct pnp_device_id *id) + const struct pnp_device_id *id) { int base, irq; @@ -721,7 +754,7 @@ static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, irq = pnp_irq(pdev, 0); return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, - id->driver_data); + id->driver_data); } static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index facc7271f932..a4473356a9dc 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -2354,7 +2354,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) inq.resp_aenc = 2; inq.add_length= 32; strcpy(inq.vendor,ha->oem_name); - sprintf(inq.product,"Host Drive #%02d",t); + snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t); strcpy(inq.revision," "); gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); break; diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index be609db66807..d08b2716752c 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -147,7 +147,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) gdth_cmd_str *gdtcmd; gdth_evt_str *estr; - char hrec[161]; + char hrec[277]; char *buf; gdth_dskstat_str *pds; diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 3b6f83ffddc4..a27fc49ebd3a 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -171,23 +171,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, } } -static int gvp11_bus_reset(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - - /* FIXME perform bus-specific reset */ - - /* FIXME 2: shouldn't we no-op this function (return - FAILED), and fall back to host reset function, - wd33c93_host_reset ? */ - - spin_lock_irq(instance->host_lock); - wd33c93_host_reset(cmd); - spin_unlock_irq(instance->host_lock); - - return SUCCESS; -} - static struct scsi_host_template gvp11_scsi_template = { .module = THIS_MODULE, .name = "GVP Series II SCSI", @@ -196,7 +179,6 @@ static struct scsi_host_template gvp11_scsi_template = { .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, - .eh_bus_reset_handler = gvp11_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index a722f2bd72ab..07f4a4cfbec1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -25,14 +26,13 @@ #include #include -#define DRV_VERSION "v1.6" - #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 #define HISI_SAS_QUEUE_SLOTS 512 #define HISI_SAS_MAX_ITCT_ENTRIES 2048 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_RESET_BIT 0 +#define HISI_SAS_REJECT_CMD_BIT 1 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) @@ -90,6 +90,14 @@ enum hisi_sas_dev_type { HISI_SAS_DEV_TYPE_SATA, }; +struct hisi_sas_hw_error { + u32 irq_msk; + u32 msk; + int shift; + const char *msg; + int reg; +}; + struct hisi_sas_phy { struct hisi_hba *hisi_hba; struct hisi_sas_port *port; @@ -132,6 +140,7 @@ struct hisi_sas_dq { struct hisi_sas_device { struct hisi_hba *hisi_hba; struct domain_device *sas_device; + struct completion *completion; struct hisi_sas_dq *dq; struct list_head list; u64 attached_phy; @@ -192,6 +201,7 @@ struct hisi_sas_hw { void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); + void (*get_events)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *linkrates); enum sas_linkrate (*phy_get_max_linkrate)(void); @@ -201,6 +211,7 @@ struct hisi_sas_hw { void (*dereg_device)(struct hisi_hba *hisi_hba, struct domain_device *device); int (*soft_reset)(struct hisi_hba *hisi_hba); + u32 (*get_phys_state)(struct hisi_hba *hisi_hba); int max_command_entries; int complete_hdr_size; }; @@ -390,6 +401,7 @@ struct hisi_sas_slot_buf_table { extern struct scsi_transport_template *hisi_sas_stt; extern struct scsi_host_template *hisi_sas_sht; +extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); extern void hisi_sas_init_add(struct hisi_hba *hisi_hba); extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); extern void hisi_sas_free(struct hisi_hba *hisi_hba); @@ -408,6 +420,4 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot); extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); -extern void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, - u32 state); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 4022c3f8295f..16664f2e15fb 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -61,6 +61,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) case ATA_CMD_WRITE_QUEUED: case ATA_CMD_WRITE_LOG_DMA_EXT: case ATA_CMD_WRITE_STREAM_DMA_EXT: + case ATA_CMD_ZAC_MGMT_IN: return HISI_SAS_SATA_PROTOCOL_DMA; case ATA_CMD_CHK_POWER: @@ -73,6 +74,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) case ATA_CMD_SET_FEATURES: case ATA_CMD_STANDBY: case ATA_CMD_STANDBYNOW1: + case ATA_CMD_ZAC_MGMT_OUT: return HISI_SAS_SATA_PROTOCOL_NONDATA; default: if (direction == DMA_NONE) @@ -125,6 +127,15 @@ struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) } EXPORT_SYMBOL_GPL(to_hisi_sas_port); +void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) +{ + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) + hisi_hba->hw->phy_disable(hisi_hba, phy_no); +} +EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); + static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; @@ -433,7 +444,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_dq *dq = sas_dev->dq; - if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) + if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; /* protect task_prep and start_delivery sequence */ @@ -716,7 +727,6 @@ static void hisi_sas_dev_gone(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; - int dev_id = sas_dev->device_id; dev_info(dev, "found dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); @@ -729,9 +739,7 @@ static void hisi_sas_dev_gone(struct domain_device *device) hisi_hba->hw->free_device(hisi_hba, sas_dev); device->lldd_dev = NULL; memset(sas_dev, 0, sizeof(*sas_dev)); - sas_dev->device_id = dev_id; sas_dev->dev_type = SAS_PHY_UNUSED; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; } static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) @@ -764,7 +772,12 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, case PHY_FUNC_SET_LINK_RATE: hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); break; - + case PHY_FUNC_GET_EVENTS: + if (hisi_hba->hw->get_events) { + hisi_hba->hw->get_events(hisi_hba, phy_no); + break; + } + /* fallthru */ case PHY_FUNC_RELEASE_SPINUP_HOLD: default: return -EOPNOTSUPP; @@ -967,37 +980,117 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, sizeof(ssp_task), tmf); } +static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba, + struct asd_sas_port *sas_port, enum sas_linkrate linkrate) +{ + struct hisi_sas_device *sas_dev; + struct domain_device *device; + int i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + sas_dev = &hisi_hba->devices[i]; + device = sas_dev->sas_device; + if ((sas_dev->dev_type == SAS_PHY_UNUSED) + || !device || (device->port != sas_port)) + continue; + + hisi_hba->hw->free_device(hisi_hba, sas_dev); + + /* Update linkrate of directly attached device. */ + if (!device->parent) + device->linkrate = linkrate; + + hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + } +} + +static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, + u32 state) +{ + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + struct asd_sas_port *_sas_port = NULL; + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + bool do_port_check = !!(_sas_port != sas_port); + + if (!sas_phy->phy->enabled) + continue; + + /* Report PHY state change to libsas */ + if (state & (1 << phy_no)) { + if (do_port_check && sas_port) { + struct domain_device *dev = sas_port->port_dev; + + _sas_port = sas_port; + port->id = phy->port_id; + hisi_sas_refresh_port_id(hisi_hba, + sas_port, sas_phy->linkrate); + + if (DEV_IS_EXPANDER(dev->dev_type)) + sas_ha->notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD); + } + } else if (old_state & (1 << phy_no)) + /* PHY down but was up before */ + hisi_sas_phy_down(hisi_hba, phy_no, 0); + + } + + drain_workqueue(hisi_hba->shost->work_q); +} + static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) { + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + struct device *dev = hisi_hba->dev; + struct Scsi_Host *shost = hisi_hba->shost; + u32 old_state, state; + unsigned long flags; int rc; if (!hisi_hba->hw->soft_reset) return -1; - if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { - struct device *dev = hisi_hba->dev; - struct sas_ha_struct *sas_ha = &hisi_hba->sha; - unsigned long flags; - - dev_dbg(dev, "controller reset begins!\n"); - scsi_block_requests(hisi_hba->shost); - rc = hisi_hba->hw->soft_reset(hisi_hba); - if (rc) { - dev_warn(dev, "controller reset failed (%d)\n", rc); - goto out; - } - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_release_tasks(hisi_hba); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - - sas_ha->notify_ha_event(sas_ha, HAE_RESET); - dev_dbg(dev, "controller reset successful!\n"); - } else + if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) return -1; + dev_dbg(dev, "controller resetting...\n"); + old_state = hisi_hba->hw->get_phys_state(hisi_hba); + + scsi_block_requests(shost); + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + rc = hisi_hba->hw->soft_reset(hisi_hba); + if (rc) { + dev_warn(dev, "controller reset failed (%d)\n", rc); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + goto out; + } + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_tasks(hisi_hba); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + sas_ha->notify_ha_event(sas_ha, HAE_RESET); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + /* Init and wait for PHYs to come up and all libsas event finished. */ + hisi_hba->hw->phys_init(hisi_hba); + msleep(1000); + drain_workqueue(hisi_hba->wq); + drain_workqueue(shost->work_q); + + state = hisi_hba->hw->get_phys_state(hisi_hba); + hisi_sas_rescan_topology(hisi_hba, old_state, state); + dev_dbg(dev, "controller reset complete\n"); + out: - scsi_unblock_requests(hisi_hba->shost); + scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + return rc; } @@ -1241,7 +1334,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; unsigned long flags, flags_dq; - if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) + if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; if (!device->port) @@ -1279,12 +1372,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot->port = port; task->lldd_task = slot; + slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, + GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) { + rc = -ENOMEM; + goto err_out_tag; + } + memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); + memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id, abort_flag, task_tag); if (rc) - goto err_out_tag; + goto err_out_buf; list_add_tail(&slot->entry, &sas_dev->list); @@ -1302,6 +1404,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, return 0; +err_out_buf: + dma_pool_free(hisi_hba->buffer_pool, slot->buf, + slot->buf_dma); err_out_tag: spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot_idx); @@ -1437,36 +1542,6 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) } EXPORT_SYMBOL_GPL(hisi_sas_phy_down); -void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, - u32 state) -{ - struct sas_ha_struct *sas_ha = &hisi_hba->sha; - int phy_no; - - for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - struct asd_sas_port *sas_port = sas_phy->port; - struct domain_device *dev; - - if (sas_phy->enabled) { - /* Report PHY state change to libsas */ - if (state & (1 << phy_no)) - continue; - - if (old_state & (1 << phy_no)) - /* PHY down but was up before */ - hisi_sas_phy_down(hisi_hba, phy_no, 0); - } - if (!sas_port) - continue; - dev = sas_port->port_dev; - - if (DEV_IS_EXPANDER(dev->dev_type)) - sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD); - } -} -EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology); struct scsi_transport_template *hisi_sas_stt; EXPORT_SYMBOL_GPL(hisi_sas_stt); @@ -1487,7 +1562,7 @@ static struct scsi_host_template _hisi_sas_sht = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, }; @@ -1825,7 +1900,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, return shost; err_out: - kfree(shost); + scsi_host_put(shost); dev_err(dev, "shost alloc failed\n"); return NULL; } @@ -1916,7 +1991,7 @@ int hisi_sas_probe(struct platform_device *pdev, scsi_remove_host(shost); err_out_ha: hisi_sas_free(hisi_hba); - kfree(shost); + scsi_host_put(shost); return rc; } EXPORT_SYMBOL_GPL(hisi_sas_probe); @@ -1931,15 +2006,13 @@ int hisi_sas_remove(struct platform_device *pdev) sas_remove_host(sha->core.shost); hisi_sas_free(hisi_hba); - kfree(shost); + scsi_host_put(shost); return 0; } EXPORT_SYMBOL_GPL(hisi_sas_remove); static __init int hisi_sas_init(void) { - pr_info("hisi_sas: driver version %s\n", DRV_VERSION); - hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); if (!hisi_sas_stt) return -ENOMEM; @@ -1955,7 +2028,6 @@ static __exit void hisi_sas_exit(void) module_init(hisi_sas_init); module_exit(hisi_sas_exit); -MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry "); MODULE_DESCRIPTION("HISILICON SAS controller driver"); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 2bfea7082e3a..779af979b6db 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -256,6 +256,8 @@ #define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) #define LINK_DFX2_SEND_HOLD_STS_OFF 10 #define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) +#define SAS_ERR_CNT4_REG (PORT_BASE + 0x290) +#define SAS_ERR_CNT6_REG (PORT_BASE + 0x298) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) @@ -399,6 +401,172 @@ struct hisi_sas_err_record_v2 { __le32 dma_rx_err_type; }; +static const struct hisi_sas_hw_error one_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), + .msk = HGC_DQE_ECC_1B_ADDR_MSK, + .shift = HGC_DQE_ECC_1B_ADDR_OFF, + .msg = "hgc_dqe_acc1b_intr found: \ + Ram address is 0x%08X\n", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF), + .msk = HGC_IOST_ECC_1B_ADDR_MSK, + .shift = HGC_IOST_ECC_1B_ADDR_OFF, + .msg = "hgc_iost_acc1b_intr found: \ + Ram address is 0x%08X\n", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF), + .msk = HGC_ITCT_ECC_1B_ADDR_MSK, + .shift = HGC_ITCT_ECC_1B_ADDR_OFF, + .msg = "hgc_itct_acc1b_intr found: \ + Ram address is 0x%08X\n", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF), + .msk = HGC_CQE_ECC_1B_ADDR_MSK, + .shift = HGC_CQE_ECC_1B_ADDR_OFF, + .msg = "hgc_cqe_acc1b_intr found: \ + Ram address is 0x%08X\n", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_acc1b_intr found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS15, + }, +}; + +static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), + .msk = HGC_DQE_ECC_MB_ADDR_MSK, + .shift = HGC_DQE_ECC_MB_ADDR_OFF, + .msg = "hgc_dqe_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), + .msk = HGC_IOST_ECC_MB_ADDR_MSK, + .shift = HGC_IOST_ECC_MB_ADDR_OFF, + .msg = "hgc_iost_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), + .msk = HGC_ITCT_ECC_MB_ADDR_MSK, + .shift = HGC_ITCT_ECC_MB_ADDR_OFF, + .msg = "hgc_itct_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), + .msk = HGC_CQE_ECC_MB_ADDR_MSK, + .shift = HGC_CQE_ECC_MB_ADDR_OFF, + .msg = "hgc_cqe_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + .reg = HGC_RXM_DFX_STATUS15, + }, +}; + enum { HISI_SAS_PHY_PHY_UPDOWN, HISI_SAS_PHY_CHNL_INT, @@ -806,12 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, static void free_device_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { + DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; - struct device *dev = hisi_hba->dev; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); int i; + sas_dev->completion = &completion; + /* SoC bug workaround */ if (dev_is_sata(sas_dev->sas_device)) clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); @@ -821,28 +991,12 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); - /* clear the itct int*/ for (i = 0; i < 2; i++) { - /* clear the itct table*/ - reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); - reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); + reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); + wait_for_completion(sas_dev->completion); - udelay(10); - reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); - if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { - dev_dbg(dev, "got clear ITCT done interrupt\n"); - - /* invalid the itct state*/ - memset(itct, 0, sizeof(struct hisi_sas_itct)); - hisi_sas_write32(hisi_hba, ENT_INT_SRC3, - ENT_INT_SRC3_ITC_INT_MSK); - - /* clear the itct */ - hisi_sas_write32(hisi_hba, ITCT_CLR, 0); - dev_dbg(dev, "clear ITCT ok\n"); - break; - } + memset(itct, 0, sizeof(struct hisi_sas_itct)); } } @@ -1023,7 +1177,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); @@ -1332,25 +1486,12 @@ static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) enable_phy_v2_hw(hisi_hba, phy_no); } -static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) -{ - disable_phy_v2_hw(hisi_hba, phy_no); -} - -static void stop_phys_v2_hw(struct hisi_hba *hisi_hba) -{ - int i; - - for (i = 0; i < hisi_hba->n_phy; i++) - stop_phy_v2_hw(hisi_hba, i); -} - static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; - stop_phy_v2_hw(hisi_hba, phy_no); + disable_phy_v2_hw(hisi_hba, phy_no); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, @@ -1360,17 +1501,38 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) start_phy_v2_hw(hisi_hba, phy_no); } -static void start_phys_v2_hw(struct hisi_hba *hisi_hba) +static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { - int i; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + u32 err4_reg_val, err6_reg_val; - for (i = 0; i < hisi_hba->n_phy; i++) - start_phy_v2_hw(hisi_hba, i); + /* loss dword syn, phy reset problem */ + err4_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT4_REG); + + /* disparity err, invalid dword */ + err6_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT6_REG); + + sphy->loss_of_dword_sync_count += (err4_reg_val >> 16) & 0xFFFF; + sphy->phy_reset_problem_count += err4_reg_val & 0xFFFF; + sphy->invalid_dword_count += (err6_reg_val & 0xFF0000) >> 16; + sphy->running_disparity_error_count += err6_reg_val & 0xFF; } static void phys_init_v2_hw(struct hisi_hba *hisi_hba) { - start_phys_v2_hw(hisi_hba); + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!sas_phy->phy->enabled) + continue; + + start_phy_v2_hw(hisi_hba, i); + } } static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -1965,7 +2127,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, } case DMA_RX_DATA_LEN_UNDERFLOW: { - ts->residual = dma_rx_err_type; + ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; break; } @@ -2091,7 +2253,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, } case DMA_RX_DATA_LEN_UNDERFLOW: { - ts->residual = dma_rx_err_type; + ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; break; } @@ -2599,6 +2761,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) struct hisi_hba *hisi_hba = p; u32 irq_msk; int phy_no = 0; + irqreturn_t res = IRQ_NONE; irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; @@ -2613,15 +2776,15 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) case CHL_INT0_SL_PHY_ENABLE_MSK: /* phy up */ if (phy_up_v2_hw(phy_no, hisi_hba) == - IRQ_NONE) - return IRQ_NONE; + IRQ_HANDLED) + res = IRQ_HANDLED; break; case CHL_INT0_NOT_RDY_MSK: /* phy down */ if (phy_down_v2_hw(phy_no, hisi_hba) == - IRQ_NONE) - return IRQ_NONE; + IRQ_HANDLED) + res = IRQ_HANDLED; break; case (CHL_INT0_NOT_RDY_MSK | @@ -2631,13 +2794,13 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) if (reg_value & BIT(phy_no)) { /* phy up */ if (phy_up_v2_hw(phy_no, hisi_hba) == - IRQ_NONE) - return IRQ_NONE; + IRQ_HANDLED) + res = IRQ_HANDLED; } else { /* phy down */ if (phy_down_v2_hw(phy_no, hisi_hba) == - IRQ_NONE) - return IRQ_NONE; + IRQ_HANDLED) + res = IRQ_HANDLED; } break; @@ -2650,7 +2813,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) phy_no++; } - return IRQ_HANDLED; + return res; } static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) @@ -2733,194 +2896,38 @@ static void one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { struct device *dev = hisi_hba->dev; - u32 reg_val; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; - if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); - dev_warn(dev, "hgc_dqe_acc1b_intr found: \ - Ram address is 0x%08X\n", - (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >> - HGC_DQE_ECC_1B_ADDR_OFF); + for (i = 0; i < ARRAY_SIZE(one_bit_ecc_errors); i++) { + ecc_error = &one_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_warn(dev, ecc_error->msg, val); + } } - - if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); - dev_warn(dev, "hgc_iost_acc1b_intr found: \ - Ram address is 0x%08X\n", - (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >> - HGC_IOST_ECC_1B_ADDR_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); - dev_warn(dev, "hgc_itct_acc1b_intr found: \ - Ram address is 0x%08X\n", - (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >> - HGC_ITCT_ECC_1B_ADDR_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - dev_warn(dev, "hgc_iostl_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> - HGC_LM_DFX_STATUS2_IOSTLIST_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - dev_warn(dev, "hgc_itctl_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> - HGC_LM_DFX_STATUS2_ITCTLIST_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); - dev_warn(dev, "hgc_cqe_acc1b_intr found: \ - Ram address is 0x%08X\n", - (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >> - HGC_CQE_ECC_1B_ADDR_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem0_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> - HGC_RXM_DFX_STATUS14_MEM0_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem1_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> - HGC_RXM_DFX_STATUS14_MEM1_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem2_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> - HGC_RXM_DFX_STATUS14_MEM2_OFF); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); - dev_warn(dev, "rxm_mem3_acc1b_intr found: \ - memory address is 0x%08X\n", - (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> - HGC_RXM_DFX_STATUS15_MEM3_OFF); - } - } static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { - u32 reg_val; struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; - if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); - dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \ - Ram address is 0x%08X\n", - irq_value, - (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> - HGC_DQE_ECC_MB_ADDR_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); - dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \ - Ram address is 0x%08X\n", - irq_value, - (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> - HGC_IOST_ECC_MB_ADDR_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); - dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \ - Ram address is 0x%08X\n", - irq_value, - (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> - HGC_ITCT_ECC_MB_ADDR_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> - HGC_LM_DFX_STATUS2_IOSTLIST_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> - HGC_LM_DFX_STATUS2_ITCTLIST_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); - dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \ - Ram address is 0x%08X\n", - irq_value, - (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> - HGC_CQE_ECC_MB_ADDR_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> - HGC_RXM_DFX_STATUS14_MEM0_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> - HGC_RXM_DFX_STATUS14_MEM1_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> - HGC_RXM_DFX_STATUS14_MEM2_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); - } - - if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { - reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); - dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \ - memory address is 0x%08X\n", - irq_value, - (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> - HGC_RXM_DFX_STATUS15_MEM3_OFF); - queue_work(hisi_hba->wq, &hisi_hba->rst_work); + for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { + ecc_error = &multi_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_warn(dev, ecc_error->msg, irq_value, val); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } } return; @@ -3053,8 +3060,20 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } + + if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { + u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); + u32 dev_id = reg_val & ITCT_DEV_MSK; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[dev_id]; + + hisi_sas_write32(hisi_hba, ITCT_CLR, 0); + dev_dbg(dev, "clear ITCT ok\n"); + complete(sas_dev->completion); + } } + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); return IRQ_HANDLED; @@ -3251,97 +3270,92 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; - int i, irq, rc, irq_map[128]; - + int irq, rc, irq_map[128]; + int i, phy_no, fatal_no, queue_no, k; for (i = 0; i < 128; i++) irq_map[i] = platform_get_irq(pdev, i); for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { - int idx = i; - - irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ - if (!irq) { - dev_err(dev, "irq init: fail map phy interrupt %d\n", - idx); - return -ENOENT; - } - + irq = irq_map[i + 1]; /* Phy up/down is irq1 */ rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, DRV_NAME " phy", hisi_hba); if (rc) { dev_err(dev, "irq init: could not request " "phy interrupt %d, rc=%d\n", irq, rc); - return -ENOENT; + rc = -ENOENT; + goto free_phy_int_irqs; } } - for (i = 0; i < hisi_hba->n_phy; i++) { - struct hisi_sas_phy *phy = &hisi_hba->phy[i]; - int idx = i + 72; /* First SATA interrupt is irq72 */ - - irq = irq_map[idx]; - if (!irq) { - dev_err(dev, "irq init: fail map phy interrupt %d\n", - idx); - return -ENOENT; - } + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + irq = irq_map[phy_no + 72]; rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, DRV_NAME " sata", phy); if (rc) { dev_err(dev, "irq init: could not request " "sata interrupt %d, rc=%d\n", irq, rc); - return -ENOENT; + rc = -ENOENT; + goto free_sata_int_irqs; } } - for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) { - int idx = i; - - irq = irq_map[idx + 81]; - if (!irq) { - dev_err(dev, "irq init: fail map fatal interrupt %d\n", - idx); - return -ENOENT; - } - - rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, + for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) { + irq = irq_map[fatal_no + 81]; + rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0, DRV_NAME " fatal", hisi_hba); if (rc) { dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", irq, rc); - return -ENOENT; + rc = -ENOENT; + goto free_fatal_int_irqs; } } - for (i = 0; i < hisi_hba->queue_count; i++) { - int idx = i + 96; /* First cq interrupt is irq96 */ - struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; struct tasklet_struct *t = &cq->tasklet; - irq = irq_map[idx]; - if (!irq) { - dev_err(dev, - "irq init: could not map cq interrupt %d\n", - idx); - return -ENOENT; - } + irq = irq_map[queue_no + 96]; rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, - DRV_NAME " cq", &hisi_hba->cq[i]); + DRV_NAME " cq", cq); if (rc) { dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", irq, rc); - return -ENOENT; + rc = -ENOENT; + goto free_cq_int_irqs; } tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } return 0; + +free_cq_int_irqs: + for (k = 0; k < queue_no; k++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[k]; + + free_irq(irq_map[k + 96], cq); + tasklet_kill(&cq->tasklet); + } +free_fatal_int_irqs: + for (k = 0; k < fatal_no; k++) + free_irq(irq_map[k + 81], hisi_hba); +free_sata_int_irqs: + for (k = 0; k < phy_no; k++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[k]; + + free_irq(irq_map[k + 72], phy); + } +free_phy_int_irqs: + for (k = 0; k < i; k++) + free_irq(irq_map[k + 1], hisi_hba); + return rc; } static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) @@ -3383,19 +3397,21 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) synchronize_irq(platform_get_irq(pdev, i)); } + +static u32 get_phys_state_v2_hw(struct hisi_hba *hisi_hba) +{ + return hisi_sas_read32(hisi_hba, PHY_STATE); +} + static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; - u32 old_state, state; int rc, cnt; - int phy_no; - - old_state = hisi_sas_read32(hisi_hba, PHY_STATE); interrupt_disable_v2_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); - stop_phys_v2_hw(hisi_hba); + hisi_sas_stop_phys(hisi_hba); mdelay(10); @@ -3425,22 +3441,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) phys_reject_stp_links_v2_hw(hisi_hba); - /* Re-enable the PHYs */ - for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - if (sas_phy->enabled) - start_phy_v2_hw(hisi_hba, phy_no); - } - - /* Wait for the PHYs to come up and read the PHY state */ - msleep(1000); - - state = hisi_sas_read32(hisi_hba, PHY_STATE); - - hisi_sas_rescan_topology(hisi_hba, old_state, state); - return 0; } @@ -3463,11 +3463,13 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .phy_enable = enable_phy_v2_hw, .phy_disable = disable_phy_v2_hw, .phy_hard_reset = phy_hard_reset_v2_hw, + .get_events = phy_get_events_v2_hw, .phy_set_linkrate = phy_set_linkrate_v2_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), .soft_reset = soft_reset_v2_hw, + .get_phys_state = get_phys_state_v2_hw, }; static int hisi_sas_v2_probe(struct platform_device *pdev) @@ -3491,10 +3493,17 @@ static int hisi_sas_v2_remove(struct platform_device *pdev) { struct sas_ha_struct *sha = platform_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; + int i; if (timer_pending(&hisi_hba->timer)) del_timer(&hisi_hba->timer); + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + tasklet_kill(&cq->tasklet); + } + return hisi_sas_remove(pdev); } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 83d2dca1c650..2e5fa9717be8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -23,14 +23,11 @@ #define PHY_STATE 0x24 #define PHY_PORT_NUM_MA 0x28 #define PHY_CONN_RATE 0x30 -#define AXI_AHB_CLK_CFG 0x3c #define ITCT_CLR 0x44 #define ITCT_CLR_EN_OFF 16 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) #define ITCT_DEV_OFF 0 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) -#define AXI_USER1 0x48 -#define AXI_USER2 0x4c #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 @@ -137,6 +134,7 @@ #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define STP_LINK_TIMER (PORT_BASE + 0x120) #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) @@ -167,6 +165,31 @@ #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define MAX_ITCT_HW 4096 /* max the hw can support */ +#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ +#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) +#error Max ITCT exceeded +#endif + +#define AXI_MASTER_CFG_BASE (0x5000) +#define AM_CTRL_GLOBAL (0x0) +#define AM_CURR_TRANS_RETURN (0x150) + +#define AM_CFG_MAX_TRANS (0x5010) +#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) +#define AXI_CFG (0x5100) +#define AM_ROB_ECC_ERR_ADDR (0x510c) +#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0 +#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF) +#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 +#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) /* HW dma structures */ /* Delivery queue header */ @@ -354,8 +377,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) /* Global registers init */ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); - hisi_sas_write32(hisi_hba, AXI_USER1, 0x0); - hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); @@ -371,15 +392,14 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); - hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0); hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); - hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); - hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff); + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801); @@ -389,7 +409,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); - hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); @@ -398,9 +417,11 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa); hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, - 0xa0064); + 0xa03e8); hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG, - 0xa0064); + 0xa03e8); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, + 0x7f7a120); } for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ @@ -578,8 +599,6 @@ static void free_device_v3_hw(struct hisi_hba *hisi_hba, memset(itct, 0, sizeof(struct hisi_sas_itct)); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); - hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED; - hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL; /* clear the itct */ hisi_sas_write32(hisi_hba, ITCT_CLR, 0); @@ -610,8 +629,52 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 1 << CFG_ABT_SET_IPTT_DONE_OFF); } +static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int ret; + u32 val; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + /* Disable all of the PHYs */ + hisi_sas_stop_phys(hisi_hba); + udelay(50); + + /* Ensure axi bus idle */ + ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val, + 20000, 1000000); + if (ret) { + dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); + return -EIO; + } + + if (ACPI_HANDLE(dev)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else + dev_err(dev, "no reset method!\n"); + + return 0; +} + static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { + struct device *dev = hisi_hba->dev; + int rc; + + rc = reset_hw_v3_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); + return rc; + } + + msleep(100); init_reg_v3_hw(hisi_hba); return 0; @@ -640,25 +703,12 @@ static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) enable_phy_v3_hw(hisi_hba, phy_no); } -static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) -{ - disable_phy_v3_hw(hisi_hba, phy_no); -} - -static void start_phys_v3_hw(struct hisi_hba *hisi_hba) -{ - int i; - - for (i = 0; i < hisi_hba->n_phy; i++) - start_phy_v3_hw(hisi_hba, i); -} - static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; - stop_phy_v3_hw(hisi_hba, phy_no); + disable_phy_v3_hw(hisi_hba, phy_no); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, @@ -675,7 +725,17 @@ enum sas_linkrate phy_get_max_linkrate_v3_hw(void) static void phys_init_v3_hw(struct hisi_hba *hisi_hba) { - start_phys_v3_hw(hisi_hba); + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (!sas_phy->phy->enabled) + continue; + + start_phy_v3_hw(hisi_hba, i); + } } static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -1140,7 +1200,6 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { - int res = 0; u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; @@ -1161,7 +1220,7 @@ static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); - return res; + return 0; } static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) @@ -1259,7 +1318,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value0 - & (~CHL_INT0_HOTPLUG_TOUT_MSK) + & (~CHL_INT0_SL_RX_BCST_ACK_MSK) & (~CHL_INT0_SL_PHY_ENABLE_MSK) & (~CHL_INT0_NOT_RDY_MSK)); } @@ -1620,6 +1679,104 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) return 0; } +static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + u32 prog_phy_link_rate = + hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i; + enum sas_linkrate min, max; + u32 rate_mask = 0; + + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = sas_phy->phy->maximum_linkrate; + min = r->minimum_linkrate; + } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = r->maximum_linkrate; + min = sas_phy->phy->minimum_linkrate; + } else + return; + + sas_phy->phy->maximum_linkrate = max; + sas_phy->phy->minimum_linkrate = min; + + min -= SAS_LINK_RATE_1_5_GBPS; + max -= SAS_LINK_RATE_1_5_GBPS; + + for (i = 0; i <= max; i++) + rate_mask |= 1 << (i * 2); + + prog_phy_link_rate &= ~0xff; + prog_phy_link_rate |= rate_mask; + + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + + phy_hard_reset_v3_hw(hisi_hba, phy_no); +} + +static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) +{ + struct pci_dev *pdev = hisi_hba->pci_dev; + int i; + + synchronize_irq(pci_irq_vector(pdev, 1)); + synchronize_irq(pci_irq_vector(pdev, 2)); + synchronize_irq(pci_irq_vector(pdev, 11)); + for (i = 0; i < hisi_hba->queue_count; i++) { + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); + synchronize_irq(pci_irq_vector(pdev, i + 16)); + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); + } +} + +static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) +{ + return hisi_sas_read32(hisi_hba, PHY_STATE); +} + +static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int rc; + u32 status; + + interrupt_disable_v3_hw(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); + + hisi_sas_stop_phys(hisi_hba); + + mdelay(10); + + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); + + /* wait until bus idle */ + rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + + AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); + if (rc) { + dev_err(dev, "axi bus is not idle, rc = %d\n", rc); + return rc; + } + + hisi_sas_init_mem(hisi_hba); + + return hw_init_v3_hw(hisi_hba); +} + static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, .setup_itct = setup_itct_v3_hw, @@ -1640,7 +1797,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .phy_disable = disable_phy_v3_hw, .phy_hard_reset = phy_hard_reset_v3_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, + .phy_set_linkrate = phy_set_linkrate_v3_hw, .dereg_device = dereg_device_v3_hw, + .soft_reset = soft_reset_v3_hw, + .get_phys_state = get_phys_state_v3_hw, }; static struct Scsi_Host * @@ -1651,8 +1811,10 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) struct device *dev = &pdev->dev; shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); - if (!shost) - goto err_out; + if (!shost) { + dev_err(dev, "shost alloc failed\n"); + return NULL; + } hisi_hba = shost_priv(shost); hisi_hba->hw = &hisi_sas_v3_hw; @@ -1673,6 +1835,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) return shost; err_out: + scsi_host_put(shost); dev_err(dev, "shost alloc failed\n"); return NULL; } @@ -1781,7 +1944,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_out_register_ha: scsi_remove_host(shost); err_out_ha: - kfree(shost); + scsi_host_put(shost); err_out_regions: pci_release_regions(pdev); err_out_disable_device: @@ -1801,6 +1964,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) struct hisi_sas_cq *cq = &hisi_hba->cq[i]; free_irq(pci_irq_vector(pdev, i+16), cq); + tasklet_kill(&cq->tasklet); } pci_free_irq_vectors(pdev); } @@ -1810,14 +1974,16 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct device *dev = &pdev->dev; struct sas_ha_struct *sha = dev_get_drvdata(dev); struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = sha->core.shost; sas_unregister_ha(sha); sas_remove_host(sha->core.shost); - hisi_sas_free(hisi_hba); hisi_sas_v3_destroy_irqs(pdev, hisi_hba); pci_release_regions(pdev); pci_disable_device(pdev); + hisi_sas_free(hisi_hba); + scsi_host_put(shost); } enum { @@ -1839,7 +2005,6 @@ static struct pci_driver sas_v3_pci_driver = { module_pci_driver(sas_v3_pci_driver); -MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry "); MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 831a1c8b9f89..fe3a0da3ec97 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -315,8 +315,6 @@ static void scsi_host_dev_release(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; - struct request_queue *q; - void *queuedata; scsi_proc_hostdir_rm(shost->hostt); @@ -326,12 +324,6 @@ static void scsi_host_dev_release(struct device *dev) kthread_stop(shost->ehandler); if (shost->work_q) destroy_workqueue(shost->work_q); - q = shost->uspace_req_q; - if (q) { - queuedata = q->queuedata; - blk_cleanup_queue(q); - kfree(queuedata); - } if (shost->shost_state == SHOST_CREATED) { /* diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f7cdb28bd38..9abe81021484 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -81,11 +81,8 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); +MODULE_ALIAS("cciss"); -static int hpsa_allow_any; -module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(hpsa_allow_any, - "Allow hpsa driver to access unknown HP Smart Array hardware"); static int hpsa_simple_mode; module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_simple_mode, @@ -148,6 +145,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; @@ -158,6 +157,26 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); * access = Address of the struct of function pointers */ static struct board_type products[] = { + {0x40700E11, "Smart Array 5300", &SA5A_access}, + {0x40800E11, "Smart Array 5i", &SA5B_access}, + {0x40820E11, "Smart Array 532", &SA5B_access}, + {0x40830E11, "Smart Array 5312", &SA5B_access}, + {0x409A0E11, "Smart Array 641", &SA5A_access}, + {0x409B0E11, "Smart Array 642", &SA5A_access}, + {0x409C0E11, "Smart Array 6400", &SA5A_access}, + {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, + {0x40910E11, "Smart Array 6i", &SA5A_access}, + {0x3225103C, "Smart Array P600", &SA5A_access}, + {0x3223103C, "Smart Array P800", &SA5A_access}, + {0x3234103C, "Smart Array P400", &SA5A_access}, + {0x3235103C, "Smart Array P400i", &SA5A_access}, + {0x3211103C, "Smart Array E200i", &SA5A_access}, + {0x3212103C, "Smart Array E200", &SA5A_access}, + {0x3213103C, "Smart Array E200i", &SA5A_access}, + {0x3214103C, "Smart Array E200i", &SA5A_access}, + {0x3215103C, "Smart Array E200i", &SA5A_access}, + {0x3237103C, "Smart Array E500", &SA5A_access}, + {0x323D103C, "Smart Array P700m", &SA5A_access}, {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access}, @@ -278,7 +297,8 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u64 *cfg_offset); static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); -static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board); static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[], int reply_queue); @@ -866,6 +886,16 @@ static ssize_t host_show_ctlr_num(struct device *dev, return snprintf(buf, 20, "%d\n", h->ctlr); } +static ssize_t host_show_legacy_board(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); +} + static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); @@ -891,6 +921,8 @@ static DEVICE_ATTR(lockup_detected, S_IRUGO, host_show_lockup_detected, NULL); static DEVICE_ATTR(ctlr_num, S_IRUGO, host_show_ctlr_num, NULL); +static DEVICE_ATTR(legacy_board, S_IRUGO, + host_show_legacy_board, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, @@ -912,6 +944,7 @@ static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_raid_offload_debug, &dev_attr_lockup_detected, &dev_attr_ctlr_num, + &dev_attr_legacy_board, NULL, }; @@ -3565,7 +3598,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, buf, bufsize, 0, scsi3addr, TYPE_CMD)) { - rc = -1; + rc = -EAGAIN; goto out; } if (extended_response) @@ -3578,16 +3611,19 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); - rc = -1; + rc = -EIO; } else { struct ReportLUNdata *rld = buf; if (rld->extended_response_flag != extended_response) { - dev_err(&h->pdev->dev, - "report luns requested format %u, got %u\n", - extended_response, - rld->extended_response_flag); - rc = -1; + if (!h->legacy_board) { + dev_err(&h->pdev->dev, + "report luns requested format %u, got %u\n", + extended_response, + rld->extended_response_flag); + rc = -EINVAL; + } else + rc = -EOPNOTSUPP; } } out: @@ -3603,7 +3639,7 @@ static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, HPSA_REPORT_PHYS_EXTENDED); - if (!rc || !hpsa_allow_any) + if (!rc || rc != -EOPNOTSUPP) return rc; /* REPORT PHYS EXTENDED is not supported */ @@ -3791,7 +3827,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, memset(this_device->device_id, 0, sizeof(this_device->device_id)); if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, - sizeof(this_device->device_id))) + sizeof(this_device->device_id)) < 0) dev_err(&h->pdev->dev, "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n", h->ctlr, __func__, @@ -3809,6 +3845,16 @@ static int hpsa_update_device_info(struct ctlr_info *h, if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && + h->legacy_board) { + /* + * Legacy boards might not support volume status + */ + dev_info(&h->pdev->dev, + "C0:T%d:L%d Volume status not available, assuming online.\n", + this_device->target, this_device->lun); + volume_offline = 0; + } this_device->volume_offline = volume_offline; if (volume_offline == HPSA_LV_FAILED) { rc = HPSA_LV_FAILED; @@ -6571,7 +6617,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); BUG(); - return -1; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { @@ -7232,7 +7277,8 @@ static int hpsa_interrupt_mode(struct ctlr_info *h) return 0; } -static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board) { int i; u32 subsystem_vendor_id, subsystem_device_id; @@ -7242,17 +7288,24 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; + if (legacy_board) + *legacy_board = false; for (i = 0; i < ARRAY_SIZE(products); i++) - if (*board_id == products[i].board_id) + if (*board_id == products[i].board_id) { + if (products[i].access != &SA5A_access && + products[i].access != &SA5B_access) + return i; + dev_warn(&pdev->dev, + "legacy board ID: 0x%08x\n", + *board_id); + if (legacy_board) + *legacy_board = true; return i; + } - if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && - subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || - !hpsa_allow_any) { - dev_warn(&pdev->dev, "unrecognized board ID: " - "0x%08x, ignoring.\n", *board_id); - return -ENODEV; - } + dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id); + if (legacy_board) + *legacy_board = true; return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ } @@ -7555,13 +7608,14 @@ static void hpsa_free_pci_init(struct ctlr_info *h) static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; + bool legacy_board; - prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); + prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); if (prod_index < 0) return prod_index; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); - + h->legacy_board = legacy_board; pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); @@ -8241,7 +8295,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); - rc = hpsa_lookup_board_id(pdev, &board_id); + rc = hpsa_lookup_board_id(pdev, &board_id, NULL); if (rc < 0) { dev_warn(&pdev->dev, "Board ID not found\n"); return rc; @@ -9443,14 +9497,6 @@ hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) return -EINVAL; } -/* SMP = Serial Management Protocol */ -static int -hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, -struct request *req) -{ - return -EINVAL; -} - static struct sas_function_template hpsa_sas_transport_functions = { .get_linkerrors = hpsa_sas_get_linkerrors, .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, @@ -9460,7 +9506,6 @@ static struct sas_function_template hpsa_sas_transport_functions = { .phy_setup = hpsa_sas_phy_setup, .phy_release = hpsa_sas_phy_release, .set_phy_speed = hpsa_sas_phy_speed, - .smp_handler = hpsa_sas_smp_handler, }; /* diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 1c49741bc639..018f980a701c 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -293,6 +293,7 @@ struct ctlr_info { int drv_req_rescan; int raid_offload_debug; int discovery_polling; + int legacy_board; struct ReportLUNdata *lastlogicals; int needs_abort_tags_swizzled; struct workqueue_struct *resubmit_wq; @@ -447,6 +448,23 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) } } +/* + * Variant of the above; 0x04 turns interrupts off... + */ +static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val) +{ + if (val) { /* Turn interrupts on */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else { /* Turn them off */ + h->interrupts_enabled = 0; + writel(SA5B_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} + static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) { if (val) { /* turn on interrupts */ @@ -549,6 +567,14 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) true : false; } +/* + * Returns true if an interrupt is pending.. + */ +static bool SA5B_intr_pending(struct ctlr_info *h) +{ + return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING; +} + #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC @@ -581,38 +607,53 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) } static struct access_method SA5_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5_intr_mask, - .intr_pending = SA5_intr_pending, - .command_completed = SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, +}; + +/* Duplicate entry of the above to mark unsupported boards */ +static struct access_method SA5A_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, +}; + +static struct access_method SA5B_access = { + .submit_command = SA5_submit_command, + .set_intr_mask = SA5B_intr_mask, + .intr_pending = SA5B_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5_ioaccel_mode1_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5_performant_intr_mask, - .intr_pending = SA5_ioaccel_mode1_intr_pending, - .command_completed = SA5_ioaccel_mode1_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_ioaccel_mode1_intr_pending, + .command_completed = SA5_ioaccel_mode1_completed, }; static struct access_method SA5_ioaccel_mode2_access = { - .submit_command = SA5_submit_command_ioaccel2, - .set_intr_mask = SA5_performant_intr_mask, - .intr_pending = SA5_performant_intr_pending, - .command_completed = SA5_performant_completed, + .submit_command = SA5_submit_command_ioaccel2, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access = { - .submit_command = SA5_submit_command, - .set_intr_mask = SA5_performant_intr_mask, - .intr_pending = SA5_performant_intr_pending, - .command_completed = SA5_performant_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access_no_read = { - .submit_command = SA5_submit_command_no_read, - .set_intr_mask = SA5_performant_intr_mask, - .intr_pending = SA5_performant_intr_pending, - .command_completed = SA5_performant_completed, + .submit_command = SA5_submit_command_no_read, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; struct board_type { diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 7226226f7383..2fad7f03aa02 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1106,12 +1106,10 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) static int hptiop_reset(struct scsi_cmnd *scp) { - struct Scsi_Host * host = scp->device->host; - struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; + struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; - printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", - scp->device->host->host_no, scp->device->channel, - scp->device->id, scp); + printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n", + scp->device->host->host_no, -1, -1); return hptiop_reset_hba(hba)? FAILED : SUCCESS; } @@ -1179,8 +1177,7 @@ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = driver_name, .queuecommand = hptiop_queuecommand, - .eh_device_reset_handler = hptiop_reset, - .eh_bus_reset_handler = hptiop_reset, + .eh_host_reset_handler = hptiop_reset, .info = hptiop_info, .emulated = 0, .use_clustering = ENABLE_CLUSTERING, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index cc4e05be8d4a..b491af31a5f8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2528,16 +2528,12 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc, block_rc; + int rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); - if (block_rc == FAST_IO_FAIL) - return FAST_IO_FAIL; - return rc ? FAILED : SUCCESS; } @@ -4929,7 +4925,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev) return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun); } -static struct vio_device_id ibmvfc_device_table[] = { +static const struct vio_device_id ibmvfc_device_table[] = { {"fcp", "IBM,vfc-client"}, { "", "" } }; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index da22b3665cb0..7d156b161482 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2330,7 +2330,7 @@ static int ibmvscsi_resume(struct device *dev) * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we * support. */ -static struct vio_device_id ibmvscsi_device_table[] = { +static const struct vio_device_id ibmvscsi_device_table[] = { {"vscsi", "IBM,v-scsi"}, { "", "" } }; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 1f75d0380516..2799a6b08f73 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) */ if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { pr_err("write_pending failed since: %d\n", vscsi->flags); - return 0; + return -EIO; } rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, @@ -4086,7 +4086,7 @@ static struct class ibmvscsis_class = { .dev_groups = ibmvscsis_dev_groups, }; -static struct vio_device_id ibmvscsis_device_table[] = { +static const struct vio_device_id ibmvscsis_device_table[] = { { "v-scsi-host", "IBM,v-scsi-host" }, { "", "" } }; diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 9164ce1249c1..87c94191033b 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1106,7 +1106,6 @@ static struct scsi_host_template imm_template = { .name = "Iomega VPI2 (imm) interface", .queuecommand = imm_queuecommand, .eh_abort_handler = imm_abort, - .eh_bus_reset_handler = imm_reset, .eh_host_reset_handler = imm_reset, .bios_param = imm_biosparam, .this_id = 7, diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 45371179ab87..922e3e56c90d 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -166,7 +166,7 @@ static struct scsi_host_template isci_sht = { .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = sas_eh_abort_handler, .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = isci_host_attrs, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4842fc0e809d..4d934d6c3e13 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -163,7 +163,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_conn *conn; - struct iscsi_session *session; void (*old_state_change)(struct sock *); read_lock_bh(&sk->sk_callback_lock); @@ -172,7 +171,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) read_unlock_bh(&sk->sk_callback_lock); return; } - session = conn->session; iscsi_sw_sk_state_check(sk); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 234352da5c3c..772c35a5c49e 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2222,8 +2222,6 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) FC_SCSI_DBG(lport, "Resetting host\n"); - fc_block_scsi_eh(sc_cmd); - fc_lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 42381adf0769..c62e8d111fd9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1078,7 +1078,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (opcode != ISCSI_OP_NOOP_OUT) return 0; - if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { + if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { /* * nop-out in response to target's nop-out rejected. * Just resend. @@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup); /** * iscsi_session_teardown - destroy session, host, and cls_session * @cls_session: iscsi session - * - * The driver must have called iscsi_remove_session before - * calling this. */ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { @@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) iscsi_pool_free(&session->cmdpool); + iscsi_remove_session(cls_session); + kfree(session->password); kfree(session->password_in); kfree(session->username); @@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->portal_type); kfree(session->discovery_parent_type); - iscsi_destroy_session(cls_session); + iscsi_free_session(cls_session); + iscsi_host_dec_session_cnt(shost); module_put(owner); } diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig index 9dafe64e7c7a..13739bfacc67 100644 --- a/drivers/scsi/libsas/Kconfig +++ b/drivers/scsi/libsas/Kconfig @@ -26,6 +26,7 @@ config SCSI_SAS_LIBSAS tristate "SAS Domain Transport Attributes" depends on SCSI select SCSI_SAS_ATTRS + select BLK_DEV_BSGLIB help This provides transport specific helpers for SAS drivers which use the domain device construct (like the aic94xxx). diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 87f5e694dbed..70be4425ae0b 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -343,6 +343,7 @@ static int smp_ata_check_ready(struct ata_link *link) case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); + /* fall through */ default: return -ENODEV; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 570b2cb2da43..6b4fd2375178 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -64,8 +64,8 @@ static void smp_task_done(struct sas_task *task) /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 -static int smp_execute_task(struct domain_device *dev, void *req, int req_size, - void *resp, int resp_size) +static int smp_execute_task_sg(struct domain_device *dev, + struct scatterlist *req, struct scatterlist *resp) { int res, retry; struct sas_task *task = NULL; @@ -86,8 +86,8 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, } task->dev = dev; task->task_proto = dev->tproto; - sg_init_one(&task->smp_task.smp_req, req, req_size); - sg_init_one(&task->smp_task.smp_resp, resp, resp_size); + task->smp_task.smp_req = *req; + task->smp_task.smp_resp = *resp; task->task_done = smp_task_done; @@ -151,6 +151,17 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, return res; } +static int smp_execute_task(struct domain_device *dev, void *req, int req_size, + void *resp, int resp_size) +{ + struct scatterlist req_sg; + struct scatterlist resp_sg; + + sg_init_one(&req_sg, req, req_size); + sg_init_one(&resp_sg, resp, resp_size); + return smp_execute_task_sg(dev, &req_sg, &resp_sg); +} + /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) @@ -2130,57 +2141,50 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) return res; } -int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, - struct request *req) +void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) { struct domain_device *dev; - int ret, type; - struct request *rsp = req->next_rq; - - if (!rsp) { - printk("%s: space for a smp response is missing\n", - __func__); - return -EINVAL; - } + unsigned int reslen = 0; + int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) - return sas_smp_host_handler(shost, req, rsp); + return sas_smp_host_handler(job, shost); - type = rphy->identify.device_type; - - if (type != SAS_EDGE_EXPANDER_DEVICE && - type != SAS_FANOUT_EXPANDER_DEVICE) { + switch (rphy->identify.device_type) { + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + break; + default: printk("%s: can we send a smp request to a device?\n", __func__); - return -EINVAL; + goto out; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); - return -EINVAL; + goto out; } /* do we need to support multiple segments? */ - if (bio_multiple_segments(req->bio) || - bio_multiple_segments(rsp->bio)) { + if (job->request_payload.sg_cnt > 1 || + job->reply_payload.sg_cnt > 1) { printk("%s: multiple segments req %u, rsp %u\n", - __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); - return -EINVAL; + __func__, job->request_payload.payload_len, + job->reply_payload.payload_len); + goto out; } - ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req), - bio_data(rsp->bio), blk_rq_bytes(rsp)); + ret = smp_execute_task_sg(dev, job->request_payload.sg_list, + job->reply_payload.sg_list); if (ret > 0) { /* positive number is the untransferred residual */ - scsi_req(rsp)->resid_len = ret; - scsi_req(req)->resid_len = 0; + reslen = ret; ret = 0; - } else if (ret == 0) { - scsi_req(rsp)->resid_len = 0; - scsi_req(req)->resid_len = 0; } - return ret; +out: + bsg_job_done(job, ret, reslen); } diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c index 45cbbc44f4d7..9ead93df3a6e 100644 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -225,47 +225,36 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id, resp_data[2] = SMP_RESP_FUNC_ACC; } -int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, - struct request *rsp) +void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost) { - u8 *req_data = NULL, *resp_data = NULL, *buf; struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + u8 *req_data, *resp_data; + unsigned int reslen = 0; int error = -EINVAL; /* eight is the minimum size for request and response frames */ - if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8) + if (job->request_payload.payload_len < 8 || + job->reply_payload.payload_len < 8) goto out; - if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || - bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { - shost_printk(KERN_ERR, shost, - "SMP request/response frame crosses page boundary"); + error = -ENOMEM; + req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!req_data) goto out; - } - - req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL); + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, req_data, + job->request_payload.payload_len); /* make sure frame can always be built ... we copy * back only the requested length */ - resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL); - - if (!req_data || !resp_data) { - error = -ENOMEM; - goto out; - } - - local_irq_disable(); - buf = kmap_atomic(bio_page(req->bio)); - memcpy(req_data, buf, blk_rq_bytes(req)); - kunmap_atomic(buf - bio_offset(req->bio)); - local_irq_enable(); + resp_data = kzalloc(max(job->reply_payload.payload_len, 128U), + GFP_KERNEL); + if (!resp_data) + goto out_free_req; + error = -EINVAL; if (req_data[0] != SMP_REQUEST) - goto out; - - /* always succeeds ... even if we can't process the request - * the result is in the response frame */ - error = 0; + goto out_free_resp; /* set up default don't know response */ resp_data[0] = SMP_RESPONSE; @@ -274,20 +263,18 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, switch (req_data[1]) { case SMP_REPORT_GENERAL: - scsi_req(req)->resid_len -= 8; - scsi_req(rsp)->resid_len -= 32; resp_data[2] = SMP_RESP_FUNC_ACC; resp_data[9] = sas_ha->num_phys; + reslen = 32; break; case SMP_REPORT_MANUF_INFO: - scsi_req(req)->resid_len -= 8; - scsi_req(rsp)->resid_len -= 64; resp_data[2] = SMP_RESP_FUNC_ACC; memcpy(resp_data + 12, shost->hostt->name, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(resp_data + 20, "libsas virt phy", SAS_EXPANDER_PRODUCT_ID_LEN); + reslen = 64; break; case SMP_READ_GPIO_REG: @@ -295,14 +282,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_DISCOVER: - scsi_req(req)->resid_len -= 16; - if ((int)scsi_req(req)->resid_len < 0) { - scsi_req(req)->resid_len = 0; - error = -EINVAL; - goto out; - } - scsi_req(rsp)->resid_len -= 56; + if (job->request_payload.payload_len < 16) + goto out_free_resp; sas_host_smp_discover(sas_ha, resp_data, req_data[9]); + reslen = 56; break; case SMP_REPORT_PHY_ERR_LOG: @@ -311,14 +294,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_REPORT_PHY_SATA: - scsi_req(req)->resid_len -= 16; - if ((int)scsi_req(req)->resid_len < 0) { - scsi_req(req)->resid_len = 0; - error = -EINVAL; - goto out; - } - scsi_req(rsp)->resid_len -= 60; + if (job->request_payload.payload_len < 16) + goto out_free_resp; sas_report_phy_sata(sas_ha, resp_data, req_data[9]); + reslen = 60; break; case SMP_REPORT_ROUTE_INFO: @@ -330,16 +309,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, const int base_frame_size = 11; int to_write = req_data[4]; - if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || - scsi_req(req)->resid_len < base_frame_size + to_write * 4) { + if (job->request_payload.payload_len < + base_frame_size + to_write * 4) { resp_data[2] = SMP_RESP_INV_FRM_LEN; break; } to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], req_data[3], to_write, &req_data[8]); - scsi_req(req)->resid_len -= base_frame_size + to_write * 4; - scsi_req(rsp)->resid_len -= 8; + reslen = 8; break; } @@ -348,16 +326,12 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_PHY_CONTROL: - scsi_req(req)->resid_len -= 44; - if ((int)scsi_req(req)->resid_len < 0) { - scsi_req(req)->resid_len = 0; - error = -EINVAL; - goto out; - } - scsi_req(rsp)->resid_len -= 8; + if (job->request_payload.payload_len < 44) + goto out_free_resp; sas_phy_control(sas_ha, req_data[9], req_data[10], req_data[32] >> 4, req_data[33] >> 4, resp_data); + reslen = 8; break; case SMP_PHY_TEST_FUNCTION: @@ -369,15 +343,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; } - local_irq_disable(); - buf = kmap_atomic(bio_page(rsp->bio)); - memcpy(buf, resp_data, blk_rq_bytes(rsp)); - flush_kernel_dcache_page(bio_page(rsp->bio)); - kunmap_atomic(buf - bio_offset(rsp->bio)); - local_irq_enable(); + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, resp_data, + job->reply_payload.payload_len); - out: - kfree(req_data); + error = 0; +out_free_resp: kfree(resp_data); - return error; +out_free_req: + kfree(req_data); +out: + bsg_job_done(job, error, reslen); } diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index a216c957b639..c07e08136491 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -81,6 +81,8 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); int sas_notify_lldd_dev_found(struct domain_device *); void sas_notify_lldd_dev_gone(struct domain_device *); +void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy); int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *); int sas_smp_get_phy_events(struct sas_phy *phy); @@ -98,16 +100,14 @@ void sas_hae_reset(struct work_struct *work); void sas_free_device(struct kref *kref); #ifdef CONFIG_SCSI_SAS_HOST_SMP -extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, - struct request *rsp); +extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost); #else -static inline int sas_smp_host_handler(struct Scsi_Host *shost, - struct request *req, - struct request *rsp) +static inline void sas_smp_host_handler(struct bsg_job *job, + struct Scsi_Host *shost) { shost_printk(KERN_ERR, shost, "Cannot send SMP to a sas host (not enabled in CONFIG)\n"); - return -EINVAL; + bsg_job_done(job, -EINVAL, 0); } #endif diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 87e5079d816b..ea8ad06ff582 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -526,7 +526,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) return FAILED; } -int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) +int sas_eh_target_reset_handler(struct scsi_cmnd *cmd) { int res; struct Scsi_Host *host = cmd->device->host; @@ -554,15 +554,15 @@ static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) struct Scsi_Host *shost = cmd->device->host; if (!shost->hostt->eh_device_reset_handler) - goto try_bus_reset; + goto try_target_reset; res = shost->hostt->eh_device_reset_handler(cmd); if (res == SUCCESS) return res; -try_bus_reset: - if (shost->hostt->eh_bus_reset_handler) - return shost->hostt->eh_bus_reset_handler(cmd); +try_target_reset: + if (shost->hostt->eh_target_reset_handler) + return shost->hostt->eh_target_reset_handler(cmd); return FAILED; } @@ -855,7 +855,6 @@ int sas_target_alloc(struct scsi_target *starget) int sas_slave_configure(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - struct sas_ha_struct *sas_ha; BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); @@ -864,8 +863,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev) return 0; } - sas_ha = dev->port->ha; - sas_read_port_mode_page(scsi_dev); if (scsi_dev->tagged_supported) { @@ -996,6 +993,6 @@ EXPORT_SYMBOL_GPL(sas_bios_param); EXPORT_SYMBOL_GPL(sas_task_abort); EXPORT_SYMBOL_GPL(sas_phy_reset); EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); -EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler); +EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); EXPORT_SYMBOL_GPL(sas_target_destroy); EXPORT_SYMBOL_GPL(sas_ioctl); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 562dc0139735..8eb3f96fe068 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -733,7 +733,6 @@ struct lpfc_hba { uint32_t fc_rttov; /* R_T_TOV timer value */ uint32_t fc_altov; /* AL_TOV timer value */ uint32_t fc_crtov; /* C_R_TOV timer value */ - uint32_t fc_citov; /* C_I_TOV timer value */ struct serv_parm fc_fabparam; /* fabric service parameters buffer */ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ @@ -757,6 +756,7 @@ struct lpfc_hba { #define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; uint32_t initial_imax; + uint8_t bbcredit_support; /* HBA Config Parameters */ uint32_t cfg_ack0; @@ -836,6 +836,7 @@ struct lpfc_hba { uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_mds_diags; uint32_t cfg_enable_fc4_type; + uint32_t cfg_enable_bbcr; /*Enable BB Credit Recovery*/ uint32_t cfg_xri_split; #define LPFC_ENABLE_FCP 1 #define LPFC_ENABLE_NVME 2 @@ -946,14 +947,14 @@ struct lpfc_hba { struct list_head active_rrq_list; spinlock_t hbalock; - /* pci_mem_pools */ - struct pci_pool *lpfc_sg_dma_buf_pool; - struct pci_pool *lpfc_mbuf_pool; - struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ - struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ - struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ - struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ - struct pci_pool *txrdy_payload_pool; + /* dma_mem_pools */ + struct dma_pool *lpfc_sg_dma_buf_pool; + struct dma_pool *lpfc_mbuf_pool; + struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */ + struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ + struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ + struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ + struct dma_pool *txrdy_payload_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 7ee1a94c0b33..c17677f494af 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -247,13 +247,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); - spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); - spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); - tot = phba->sli4_hba.nvmet_xri_cnt - - (phba->sli4_hba.nvmet_ctx_get_cnt + - phba->sli4_hba.nvmet_ctx_put_cnt); - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); + /* Calculate outstanding IOs */ + tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); + tot += atomic_read(&tgtp->xmt_fcp_release); + tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; len += snprintf(buf + len, PAGE_SIZE - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" @@ -1893,6 +1890,36 @@ static inline bool lpfc_rangecheck(uint val, uint min, uint max) return val >= min && val <= max; } +/** + * lpfc_enable_bbcr_set: Sets an attribute value. + * @phba: pointer the the adapter structure. + * @val: integer attribute value. + * + * Description: + * Validates the min and max values then sets the + * adapter config field if in the valid range. prints error message + * and does not set the parameter if invalid. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + */ +static ssize_t +lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) +{ + if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3068 %s_enable_bbcr changed from %d to %d\n", + LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val); + phba->cfg_enable_bbcr = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n", + LPFC_DRIVER_NAME, val); + return -EINVAL; +} + /** * lpfc_param_show - Return a cfg attribute value in decimal * @@ -5116,6 +5143,14 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, */ LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); +/* + * lpfc_enable_bbcr: Enable BB Credit Recovery + * 0 = BB Credit Recovery disabled + * 1 = BB Credit Recovery enabled (default) + * Value range is [0,1]. Default value is 1. + */ +LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); + struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_nvme_info, &dev_attr_bg_info, @@ -5223,6 +5258,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_protocol, &dev_attr_lpfc_xlane_supported, &dev_attr_lpfc_enable_mds_diags, + &dev_attr_lpfc_enable_bbcr, NULL, }; @@ -6234,11 +6270,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel); + lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); if (phba->sli_rev != LPFC_SLI_REV4) { /* NVME only supported on SLI4 */ phba->nvmet_support = 0; phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + phba->cfg_enable_bbcr = 0; } else { /* We MUST have FCP support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index d56dafcdd563..931db52692f5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -46,6 +46,16 @@ lpfc_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) +#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, 0444);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\ + lpfc_##name##_show, lpfc_##name##_store) + #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index a1686c2d863c..fe9e1c079c20 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2384,20 +2384,17 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) goto job_error; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmboxq) { - rc = -ENOMEM; + if (!pmboxq) goto link_diag_test_exit; - } req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, req_len, LPFC_SLI4_MBX_EMBED); - if (alloc_len != req_len) { - rc = -ENOMEM; + if (alloc_len != req_len) goto link_diag_test_exit; - } + run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, phba->sli4_hba.lnk_info.lnk_no); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index da669dce12fe..7e300734b345 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -556,9 +556,8 @@ int lpfc_nvmet_update_targetport(struct lpfc_hba *phba); void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba); void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb); -void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, - struct rqb_dmabuf *nvmebuf, uint64_t isr_ts); +void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx, + struct rqb_dmabuf *nvmebuf, uint64_t isr_ts); void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9c0c1463057d..33417681f5d4 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -955,7 +955,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, CTrsp = (struct lpfc_sli_ct_request *)outp->virt; fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n", did, fc4_data_0, fc4_data_1); @@ -969,7 +969,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_NVME; - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3064 Setting ndlp %p, DID x%06x with " "FC4 x%08x, Data: x%08x x%08x\n", ndlp, did, ndlp->nlp_fc4_type, diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 744f3f395b64..d50c481ec41c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -851,13 +851,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); } - spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); - spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); - tot = phba->sli4_hba.nvmet_xri_cnt - - (phba->sli4_hba.nvmet_ctx_get_cnt + - phba->sli4_hba.nvmet_ctx_put_cnt); - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); + /* Calculate outstanding IOs */ + tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); + tot += atomic_read(&tgtp->xmt_fcp_release); + tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; len += snprintf(buf + len, size - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 7b7d314af0e0..c4edd87bfc65 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -478,16 +478,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) return; for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { - eq = phba->sli4_hba.hba_eq[eqidx]; - if (cq->assoc_qid == eq->queue_id) + if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id) break; } if (eqidx == phba->io_channel_irqs) { pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); eqidx = 0; - eq = phba->sli4_hba.hba_eq[0]; } + eq = phba->sli4_hba.hba_eq[eqidx]; + if (qtype == DUMP_FCP || qtype == DUMP_NVME) pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" "->EQ[Idx:%d|Qid:%d]:\n", diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 094c97b9e5f7..f9a566eaef04 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -159,6 +159,7 @@ struct lpfc_node_rrq { #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ #define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ +#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */ #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6d1d6f691df4..468a66371de9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1527,6 +1527,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, uint8_t name[sizeof(struct lpfc_name)]; uint32_t rc, keepDID = 0, keep_nlp_flag = 0; uint16_t keep_nlp_state; + struct lpfc_nvme_rport *keep_nrport = NULL; int put_node; int put_rport; unsigned long *active_rrqs_xri_bitmap = NULL; @@ -1624,6 +1625,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, keep_nlp_state = new_ndlp->nlp_state; lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); + /* interchange the nvme remoteport structs */ + keep_nrport = new_ndlp->nrport; + new_ndlp->nrport = ndlp->nrport; + /* Move this back to NPR state */ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { /* The new_ndlp is replacing ndlp totally, so we need @@ -1646,6 +1651,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } new_ndlp->nlp_type = ndlp->nlp_type; } + + /* Fix up the nvme rport */ + if (ndlp->nrport) { + ndlp->nrport = NULL; + lpfc_nlp_put(ndlp); + } + /* We shall actually free the ndlp with both nlp_DID and * nlp_portname fields equals 0 to avoid any ndlp on the * nodelist never to be used. @@ -1690,6 +1702,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, keep_nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); + /* Previous ndlp no longer active with nvme host transport. + * Remove reference from earlier registration unless the + * nvme host took care of it. + */ + if (ndlp->nrport) + lpfc_nlp_put(ndlp); + ndlp->nrport = keep_nrport; + /* Fix up the rport accordingly */ rport = ndlp->rport; if (rport) { @@ -1966,6 +1986,7 @@ int lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) { struct lpfc_hba *phba = vport->phba; + struct Scsi_Host *shost; struct serv_parm *sp; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; @@ -1984,6 +2005,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) if (!elsiocb) return 1; + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; + spin_unlock_irq(shost->host_lock); + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); /* For PLOGI request, remainder of payload is service parameters */ @@ -2007,6 +2033,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); + sp->cmn.bbRcvSizeMsb &= 0xF; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", @@ -2151,6 +2178,16 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; u32 local_nlp_type, elscmd; + /* + * If we are in RSCN mode, the FC4 types supported from a + * previous GFT_ID command may not be accurate. So, if we + * are a NVME Initiator, always look for the possibility of + * the remote NPort beng a NVME Target. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_RSCN_MODE && + vport->nvmei_support) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; local_nlp_type = ndlp->nlp_fc4_type; send_next_prli: @@ -3420,8 +3457,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, maxretry = 3; delay = 1000; retry = 1; - break; + } else if (cmd == ELS_CMD_FLOGI && + stat.un.b.lsRjtRsnCodeExp == + LSEXP_NOTHING_MORE) { + vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; + retry = 1; + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0820 FLOGI Failed (x%x). " + "BBCredit Not Supported\n", + stat.un.lsRjtError); } + break; + case LSRJT_PROTOCOL_ERR: if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && @@ -3442,6 +3489,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_retry; } break; + case LSRJT_CMD_UNSUPPORTED: + /* lpfc nvmet returns this type of LS_RJT when it + * receives an FCP PRLI because lpfc nvmet only + * support NVME. ELS request is terminated for FCP4 + * on this rport. + */ + if (stat.un.b.lsRjtRsnCodeExp == + LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; + spin_unlock_irq(shost->host_lock); + retry = 0; + goto out_retry; + } + break; } break; @@ -3930,7 +3992,25 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { - lpfc_unreg_rpi(vport, ndlp); + if (!lpfc_unreg_rpi(vport, ndlp) && + (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0314 PLOGI recov DID x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_rpi, ndlp->nlp_flag); + mp = mbox->context1; + if (mp) { + lpfc_mbuf_free(phba, mp->virt, + mp->phys); + kfree(mp); + } + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + /* Increment reference count to ndlp to hold the * reference to ndlp for the callback function. */ @@ -4132,6 +4212,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); + sp->cmn.bbRcvSizeMsb &= 0xF; /* If our firmware supports this feature, convey that * info to the target using the vendor specific field. @@ -7989,6 +8070,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + + /* NVMET accepts NVME PRLI only. Reject FCP PRLI */ + if (cmd == ELS_CMD_PRLI && phba->nvmet_support) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + rjt_exp = LSEXP_REQ_UNSUPPORTED; + break; + } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); break; case ELS_CMD_LIRR: @@ -8784,6 +8872,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, pcmd += sizeof(uint32_t); /* Node Name */ pcmd += sizeof(uint32_t); /* Node Name */ memcpy(pcmd, &vport->fc_nodename, 8); + sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); lpfc_set_disctmo(vport); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index aa5e5ff56dfb..20808349a80e 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1108,6 +1108,7 @@ void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; + uint8_t bbscn = 0; if (pmb->u.mb.mbxStatus) goto out; @@ -1134,10 +1135,17 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ - if (vport->port_state != LPFC_FLOGI) + if (vport->port_state != LPFC_FLOGI) { + if (phba->bbcredit_support && phba->cfg_enable_bbcr) { + bbscn = bf_get(lpfc_bbscn_def, + &phba->sli4_hba.bbscn_params); + vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; + vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4); + } lpfc_initial_flogi(vport); - else if (vport->fc_flag & FC_PT2PT) + } else if (vport->fc_flag & FC_PT2PT) { lpfc_disc_start(vport); + } return; out: diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 26a5647e057e..bdc1f184f67a 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2293,15 +2293,27 @@ typedef struct { uint32_t rttov; uint32_t altov; uint32_t crtov; - uint32_t citov; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd4:19; + uint32_t cscn:1; + uint32_t bbscn:4; + uint32_t rsvd3:8; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t rsvd3:8; + uint32_t bbscn:4; + uint32_t cscn:1; + uint32_t rsvd4:19; +#endif + #ifdef __BIG_ENDIAN_BITFIELD uint32_t rrq_enable:1; uint32_t rrq_immed:1; - uint32_t rsvd4:29; + uint32_t rsvd5:29; uint32_t ack0_enable:1; #else /* __LITTLE_ENDIAN_BITFIELD */ uint32_t ack0_enable:1; - uint32_t rsvd4:29; + uint32_t rsvd5:29; uint32_t rrq_immed:1; uint32_t rrq_enable:1; #endif diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index bb4715705fa3..1db0a38683f4 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -2217,9 +2217,15 @@ struct lpfc_mbx_reg_vfi { uint32_t e_d_tov; uint32_t r_a_tov; uint32_t word10; -#define lpfc_reg_vfi_nport_id_SHIFT 0 -#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF -#define lpfc_reg_vfi_nport_id_WORD word10 +#define lpfc_reg_vfi_nport_id_SHIFT 0 +#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF +#define lpfc_reg_vfi_nport_id_WORD word10 +#define lpfc_reg_vfi_bbcr_SHIFT 27 +#define lpfc_reg_vfi_bbcr_MASK 0x00000001 +#define lpfc_reg_vfi_bbcr_WORD word10 +#define lpfc_reg_vfi_bbscn_SHIFT 28 +#define lpfc_reg_vfi_bbscn_MASK 0x0000000F +#define lpfc_reg_vfi_bbscn_WORD word10 }; struct lpfc_mbx_init_vpi { @@ -2646,7 +2652,16 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF #define lpfc_mbx_rd_conf_link_speed_WORD word6 uint32_t rsvd_7; - uint32_t rsvd_8; + uint32_t word8; +#define lpfc_mbx_rd_conf_bbscn_min_SHIFT 0 +#define lpfc_mbx_rd_conf_bbscn_min_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_min_WORD word8 +#define lpfc_mbx_rd_conf_bbscn_max_SHIFT 4 +#define lpfc_mbx_rd_conf_bbscn_max_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_max_WORD word8 +#define lpfc_mbx_rd_conf_bbscn_def_SHIFT 8 +#define lpfc_mbx_rd_conf_bbscn_def_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bbscn_def_WORD word8 uint32_t word9; #define lpfc_mbx_rd_conf_lmt_SHIFT 0 #define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 491aa95eb0f6..100bc4c8798d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -1253,6 +1254,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) unsigned long time_elapsed; uint32_t tick_cqe, max_cqe, val; uint64_t tot, data1, data2, data3; + struct lpfc_nvmet_tgtport *tgtp; struct lpfc_register reg_data; void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; @@ -1281,13 +1283,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* Check outstanding IO count */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) { - spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); - spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); - tot = phba->sli4_hba.nvmet_xri_cnt - - (phba->sli4_hba.nvmet_ctx_get_cnt + - phba->sli4_hba.nvmet_ctx_put_cnt); - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); + tgtp = phba->targetport->private; + /* Calculate outstanding IOs */ + tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); + tot += atomic_read(&tgtp->xmt_fcp_release); + tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; } else { tot = atomic_read(&phba->fc4NvmeIoCmpls); data1 = atomic_read( @@ -3048,7 +3048,7 @@ lpfc_online(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; - int i; + int i, error = 0; bool vpis_cleared = false; if (!phba) @@ -3072,6 +3072,18 @@ lpfc_online(struct lpfc_hba *phba) if (!phba->sli4_hba.max_cfg_param.vpi_used) vpis_cleared = true; spin_unlock_irq(&phba->hbalock); + + /* Reestablish the local initiator port. + * The offline process destroyed the previous lport. + */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && + !phba->nvmet_support) { + error = lpfc_nvme_create_localport(phba->pport); + if (error) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6132 NVME restore reg failed " + "on nvmei error x%x\n", error); + } } else { lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ @@ -3226,6 +3238,13 @@ lpfc_offline(struct lpfc_hba *phba) /* stop port and all timers associated with this hba */ lpfc_stop_port(phba); + + /* Tear down the local and target port registrations. The + * nvme transports need to cleanup. + */ + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(phba->pport); + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) @@ -3275,7 +3294,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; @@ -3286,7 +3305,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; @@ -3317,7 +3336,7 @@ lpfc_nvme_free(struct lpfc_hba *phba) list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_put, list) { list_del(&lpfc_ncmd->list); - pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); phba->total_nvme_bufs--; @@ -3328,7 +3347,7 @@ lpfc_nvme_free(struct lpfc_hba *phba) list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_get, list) { list_del(&lpfc_ncmd->list); - pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); phba->total_nvme_bufs--; @@ -3640,7 +3659,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) list_remove_head(&scsi_sgl_list, psb, struct lpfc_scsi_buf, list); if (psb) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); } @@ -3710,9 +3729,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba) if (phba->sli_rev == LPFC_SLI_REV4) return be64_to_cpu(wwn); else - return (((wwn & 0xffffffff00000000) >> 32) | - ((wwn & 0x00000000ffffffff) << 32)); - + return rol64(wwn, 32); } /** @@ -3774,7 +3791,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) list_remove_head(&nvme_sgl_list, lpfc_ncmd, struct lpfc_nvme_buf, list); if (lpfc_ncmd) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); @@ -5930,8 +5947,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); /* Fast-path XRI aborted CQ Event work queue list */ @@ -5940,8 +5955,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.sgl_list_lock); - spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock); - spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); /* @@ -6118,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) "Extents and RPI headers enabled.\n"); } mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; goto out_free_bsmbx; } @@ -6516,6 +6530,12 @@ lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); } + + /* Update the nvmet_xri_cnt to reflect no current sgls. + * The next initialization cycle sets the count and allocates + * the sgls over again. + */ + phba->sli4_hba.nvmet_xri_cnt = 0; } /** @@ -6846,8 +6866,8 @@ lpfc_create_shost(struct lpfc_hba *phba) if (phba->nvmet_support) { /* Only 1 vport (pport) will support NVME target */ if (phba->txrdy_payload_pool == NULL) { - phba->txrdy_payload_pool = pci_pool_create( - "txrdy_pool", phba->pcidev, + phba->txrdy_payload_pool = dma_pool_create( + "txrdy_pool", &phba->pcidev->dev, TXRDY_PAYLOAD_LEN, 16, 0); if (phba->txrdy_payload_pool) { phba->targetport = NULL; @@ -7605,6 +7625,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3082 Mailbox (x%x) returned ldv:x0\n", bf_get(lpfc_mqe_command, &pmb->u.mqe)); + if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { + phba->bbcredit_support = 1; + phba->sli4_hba.bbscn_params.word0 = rd_config->word8; + } + phba->sli4_hba.extents_in_use = bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); phba->sli4_hba.max_cfg_param.max_xri = @@ -8301,6 +8326,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } + /* Put list in known state in case driver load fails. */ + INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); + /* Create NVMET Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index ce25a18367b5..81fb92967b11 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -376,7 +376,12 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgLnk.rttov = phba->fc_rttov; mb->un.varCfgLnk.altov = phba->fc_altov; mb->un.varCfgLnk.crtov = phba->fc_crtov; - mb->un.varCfgLnk.citov = phba->fc_citov; + mb->un.varCfgLnk.cscn = 0; + if (phba->bbcredit_support && phba->cfg_enable_bbcr) { + mb->un.varCfgLnk.cscn = 1; + mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def, + &phba->sli4_hba.bbscn_params); + } if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; @@ -2139,6 +2144,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; struct lpfc_hba *phba = vport->phba; + uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; @@ -2168,16 +2174,39 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } + + bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0); + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0); + bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF; + + if (phba->bbcredit_support && phba->cfg_enable_bbcr && + bbscn_fabric != 0) { + bbscn_max = bf_get(lpfc_bbscn_max, + &phba->sli4_hba.bbscn_params); + if (bbscn_fabric <= bbscn_max) { + bbscn_def = bf_get(lpfc_bbscn_def, + &phba->sli4_hba.bbscn_params); + + if (bbscn_fabric > bbscn_def) + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, + bbscn_fabric); + else + bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def); + + bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1); + } + } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" - " port_state:x%x topology chg:%d\n", + " port_state:x%x topology chg:%d bbscn_fabric :%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, - vport->port_state, phba->fc_topology_changed); + vport->port_state, phba->fc_topology_changed, + bbscn_fabric); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index fcc05a1517c2..56faeb049b4a 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -97,8 +97,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) i = SLI4_PAGE_SIZE; phba->lpfc_sg_dma_buf_pool = - pci_pool_create("lpfc_sg_dma_buf_pool", - phba->pcidev, + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, i, 0); if (!phba->lpfc_sg_dma_buf_pool) @@ -106,15 +106,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) } else { phba->lpfc_sg_dma_buf_pool = - pci_pool_create("lpfc_sg_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, align, 0); if (!phba->lpfc_sg_dma_buf_pool) goto fail; } - phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, + phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_mbuf_pool) @@ -128,7 +128,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) pool->max_count = 0; pool->current_count = 0; for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { - pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool, + pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, &pool->elements[i].phys); if (!pool->elements[i].virt) goto fail_free_mbuf_pool; @@ -152,21 +152,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) sizeof(struct lpfc_node_rrq)); if (!phba->rrq_pool) goto fail_free_nlp_mem_pool; - phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", - phba->pcidev, + phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", + &phba->pcidev->dev, LPFC_HDR_BUF_SIZE, align, 0); if (!phba->lpfc_hrb_pool) goto fail_free_rrq_mem_pool; - phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", - phba->pcidev, + phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", + &phba->pcidev->dev, LPFC_DATA_BUF_SIZE, align, 0); if (!phba->lpfc_drb_pool) goto fail_free_hrb_pool; phba->lpfc_hbq_pool = NULL; } else { - phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool", - phba->pcidev, LPFC_BPL_SIZE, align, 0); + phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", + &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_hbq_pool) goto fail_free_nlp_mem_pool; phba->lpfc_hrb_pool = NULL; @@ -185,10 +185,10 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) return 0; fail_free_drb_pool: - pci_pool_destroy(phba->lpfc_drb_pool); + dma_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; fail_free_hrb_pool: - pci_pool_destroy(phba->lpfc_hrb_pool); + dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; fail_free_rrq_mem_pool: mempool_destroy(phba->rrq_pool); @@ -201,14 +201,14 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) phba->mbox_mem_pool = NULL; fail_free_mbuf_pool: while (i--) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); fail_free_lpfc_mbuf_pool: - pci_pool_destroy(phba->lpfc_mbuf_pool); + dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: - pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); phba->lpfc_sg_dma_buf_pool = NULL; fail: return -ENOMEM; @@ -218,8 +218,8 @@ int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) { phba->lpfc_nvmet_drb_pool = - pci_pool_create("lpfc_nvmet_drb_pool", - phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE, + dma_pool_create("lpfc_nvmet_drb_pool", + &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, SGL_ALIGN_SZ, 0); if (!phba->lpfc_nvmet_drb_pool) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -248,20 +248,20 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); if (phba->lpfc_nvmet_drb_pool) - pci_pool_destroy(phba->lpfc_nvmet_drb_pool); + dma_pool_destroy(phba->lpfc_nvmet_drb_pool); phba->lpfc_nvmet_drb_pool = NULL; if (phba->lpfc_drb_pool) - pci_pool_destroy(phba->lpfc_drb_pool); + dma_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; if (phba->lpfc_hrb_pool) - pci_pool_destroy(phba->lpfc_hrb_pool); + dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; if (phba->txrdy_payload_pool) - pci_pool_destroy(phba->txrdy_payload_pool); + dma_pool_destroy(phba->txrdy_payload_pool); phba->txrdy_payload_pool = NULL; if (phba->lpfc_hbq_pool) - pci_pool_destroy(phba->lpfc_hbq_pool); + dma_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; if (phba->rrq_pool) @@ -282,15 +282,15 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free MBUF memory pool */ for (i = 0; i < pool->current_count; i++) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); - pci_pool_destroy(phba->lpfc_mbuf_pool); + dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; /* Free DMA buffer memory pool */ - pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); phba->lpfc_sg_dma_buf_pool = NULL; /* Free Device Data memory pool */ @@ -379,7 +379,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba) * @handle: used to return the DMA-mapped address of the mbuf * * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. - * Allocates from generic pci_pool_alloc function first and if that fails and + * Allocates from generic dma_pool_alloc function first and if that fails and * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the * HBA's pool. * @@ -397,7 +397,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) unsigned long iflags; void *ret; - ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); + ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); spin_lock_irqsave(&phba->hbalock, iflags); if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { @@ -433,7 +433,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) pool->elements[pool->current_count].phys = dma; pool->current_count++; } else { - pci_pool_free(phba->lpfc_mbuf_pool, virt, dma); + dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); } return; } @@ -470,7 +470,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) * @handle: used to return the DMA-mapped address of the nvmet_buf * * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool - * PCI pool. Allocates from generic pci_pool_alloc function. + * PCI pool. Allocates from generic dma_pool_alloc function. * * Returns: * pointer to the allocated nvmet_buf on success @@ -481,7 +481,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) { void *ret; - ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); + ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); return ret; } @@ -497,7 +497,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); } /** @@ -522,7 +522,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, + hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -547,7 +547,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } @@ -574,16 +574,16 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) if (!dma_buf) return NULL; - dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &dma_buf->hbuf.phys); if (!dma_buf->hbuf.virt) { kfree(dma_buf); return NULL; } - dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; @@ -607,8 +607,8 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) void lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) { - pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); - pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); } @@ -634,16 +634,16 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) if (!dma_buf) return NULL; - dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &dma_buf->hbuf.phys); if (!dma_buf->hbuf.virt) { kfree(dma_buf); return NULL; } - dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool, + dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; @@ -667,8 +667,8 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) { - pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); - pci_pool_free(phba->lpfc_nvmet_drb_pool, + dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + dma_pool_free(phba->lpfc_nvmet_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f74cb0142fd4..f3ad7cac355d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1724,6 +1724,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, lpfc_nvme_update_localport(vport); } + } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + } else if (ndlp->nlp_fc4_type == 0) { rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0, ndlp->nlp_DID); @@ -1892,6 +1895,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, goto out; } + /* When the rport rejected the FCP PRLI as unsupported. + * This should only happen in Pt2Pt so an NVME PRLI + * should be outstanding still. + */ + if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) { + ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; + goto out_err; + } + /* The LS Req had some error. Don't let this be a * target. */ @@ -2189,12 +2201,15 @@ lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* - * Take no action. If a LOGO is outstanding, then possibly DevLoss has - * timed out and is calling for Device Remove. In this case, the LOGO - * must be allowed to complete in state LOGO_ISSUE so that the rpi - * and other NLP flags are correctly cleaned up. + * DevLoss has timed out and is calling for Device Remove. + * In this case, abort the LOGO and cleanup the ndlp */ - return ndlp->nlp_state; + + lpfc_unreg_rpi(vport, ndlp); + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 0a0a1b92d01d..23bdb1ca106e 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -110,7 +110,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, qhandle->index = qidx; } - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6073 Binding %s HdwQueue %d (cpu %d) to " "io_channel %d qhandle %p\n", str, qidx, qhandle->cpu_id, qhandle->index, qhandle); @@ -364,7 +364,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); - if (rc == WQE_ERROR) { + if (rc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "6045 Issue GEN REQ WQE to NPORT x%x " "Data: x%x x%x\n", @@ -884,7 +884,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, wcqe->total_data_placed); nCmd->transferred_length = 0; nCmd->rcv_rsplen = 0; - nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; + nCmd->status = NVME_SC_INTERNAL; } } @@ -1270,7 +1270,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * not exceed the programmed depth. */ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { - ret = -EAGAIN; + ret = -EBUSY; goto out_fail; } @@ -1279,7 +1279,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6065 driver's buffer pool is empty, " "IO failed\n"); - ret = -ENOMEM; + ret = -EBUSY; goto out_fail; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -1332,7 +1332,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, "sid: x%x did: x%x oxid: x%x\n", ret, vport->fc_myDID, ndlp->nlp_DID, lpfc_ncmd->cur_iocbq.sli4_xritag); - ret = -EBUSY; goto out_free_nvme_buf; } @@ -1576,7 +1575,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); spin_unlock_irqrestore(&phba->hbalock, flags); - if (ret_val == IOCB_ERROR) { + if (ret_val) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6137 Failed abts issue_wqe with status x%x " "for nvme_fcreq %p.\n", @@ -1939,7 +1938,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) * pci bus space for an I/O. The DMA buffer includes the * number of SGE's necessary to support the sg_tablesize. */ - lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &lpfc_ncmd->dma_handle); if (!lpfc_ncmd->data) { @@ -1950,7 +1949,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); break; @@ -1961,7 +1960,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, pwqeq); if (iotag == 0) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, @@ -2182,8 +2181,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) vport->localport = localport; lport->vport = vport; vport->nvmei_support = 1; - len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max); - vport->phba->total_nvme_bufs += len; + + /* Don't post more new bufs if repost already recovered + * the nvme sgls. + */ + if (phba->sli4_hba.nvme_xri_cnt == 0) { + len = lpfc_new_nvme_buf(vport, + phba->sli4_hba.nvme_xri_max); + vport->phba->total_nvme_bufs += len; + } } return ret; @@ -2296,6 +2302,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type); localport = vport->localport; + if (!localport) + return 0; + lport = (struct lpfc_nvme_lport *)localport->private; /* NVME rports are not preserved across devloss. diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index bbbd0f84160d..0b7c1a49e203 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -170,12 +170,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct rqb_dmabuf *nvmebuf; + struct lpfc_nvmet_ctx_info *infop; uint32_t *payload; uint32_t size, oxid, sid, rc; + int cpu; unsigned long iflag; if (ctxp->txrdy) { - pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, + dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, ctxp->txrdy_phys); ctxp->txrdy = NULL; ctxp->txrdy_phys = 0; @@ -267,11 +269,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); - spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); - list_add_tail(&ctx_buf->list, - &phba->sli4_hba.lpfc_nvmet_ctx_put_list); - phba->sli4_hba.nvmet_ctx_put_cnt++; - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); + /* + * Use the CPU context list, from the MRQ the IO was received on + * (ctxp->idx), to save context structure. + */ + cpu = smp_processor_id(); + infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); + spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); + list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); + infop->nvmet_ctx_list_cnt++; + spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag); #endif } @@ -552,7 +559,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ } else { ctxp->entry_cnt++; - start_clean = offsetof(struct lpfc_iocbq, wqe); + start_clean = offsetof(struct lpfc_iocbq, iocb_flag); memset(((char *)cmdwqe) + start_clean, 0, (sizeof(struct lpfc_iocbq) - start_clean)); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -879,51 +886,54 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { }; static void -lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) +__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, + struct lpfc_nvmet_ctx_info *infop) { struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; unsigned long flags; - spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags); - spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); + spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); list_for_each_entry_safe(ctx_buf, next_ctx_buf, - &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) { + &infop->nvmet_ctx_list, list) { spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); list_del_init(&ctx_buf->list); spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); - __lpfc_clear_active_sglq(phba, - ctx_buf->sglq->sli4_lxritag); + + __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); list_add_tail(&ctx_buf->sglq->list, - &phba->sli4_hba.lpfc_nvmet_sgl_list); + &phba->sli4_hba.lpfc_nvmet_sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); } - list_for_each_entry_safe(ctx_buf, next_ctx_buf, - &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) { - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); - list_del_init(&ctx_buf->list); - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); - __lpfc_clear_active_sglq(phba, - ctx_buf->sglq->sli4_lxritag); - ctx_buf->sglq->state = SGL_FREED; - ctx_buf->sglq->ndlp = NULL; + spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags); +} - spin_lock(&phba->sli4_hba.sgl_list_lock); - list_add_tail(&ctx_buf->sglq->list, - &phba->sli4_hba.lpfc_nvmet_sgl_list); - spin_unlock(&phba->sli4_hba.sgl_list_lock); +static void +lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_ctx_info *infop; + int i, j; - lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); - kfree(ctx_buf->context); + /* The first context list, MRQ 0 CPU 0 */ + infop = phba->sli4_hba.nvmet_ctx_info; + if (!infop) + return; + + /* Cycle the the entire CPU context list for every MRQ */ + for (i = 0; i < phba->cfg_nvmet_mrq; i++) { + for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { + __lpfc_nvmet_clean_io_for_cpu(phba, infop); + infop++; /* next */ + } } - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags); + kfree(phba->sli4_hba.nvmet_ctx_info); + phba->sli4_hba.nvmet_ctx_info = NULL; } static int @@ -932,15 +942,71 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) struct lpfc_nvmet_ctxbuf *ctx_buf; struct lpfc_iocbq *nvmewqe; union lpfc_wqe128 *wqe; - int i; + struct lpfc_nvmet_ctx_info *last_infop; + struct lpfc_nvmet_ctx_info *infop; + int i, j, idx; lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6403 Allocate NVMET resources for %d XRIs\n", phba->sli4_hba.nvmet_xri_cnt); + phba->sli4_hba.nvmet_ctx_info = kcalloc( + phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, + sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); + if (!phba->sli4_hba.nvmet_ctx_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6419 Failed allocate memory for " + "nvmet context lists\n"); + return -ENOMEM; + } + + /* + * Assuming X CPUs in the system, and Y MRQs, allocate some + * lpfc_nvmet_ctx_info structures as follows: + * + * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0 + * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1 + * ... + * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY + * + * Each line represents a MRQ "silo" containing an entry for + * every CPU. + * + * MRQ X is initially assumed to be associated with CPU X, thus + * contexts are initially distributed across all MRQs using + * the MRQ index (N) as follows cpuN/mrqN. When contexts are + * freed, the are freed to the MRQ silo based on the CPU number + * of the IO completion. Thus a context that was allocated for MRQ A + * whose IO completed on CPU B will be freed to cpuB/mrqA. + */ + infop = phba->sli4_hba.nvmet_ctx_info; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + INIT_LIST_HEAD(&infop->nvmet_ctx_list); + spin_lock_init(&infop->nvmet_ctx_list_lock); + infop->nvmet_ctx_list_cnt = 0; + infop++; + } + } + + /* + * Setup the next CPU context info ptr for each MRQ. + * MRQ 0 will cycle thru CPUs 0 - X separately from + * MRQ 1 cycling thru CPUs 0 - X, and so on. + */ + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + last_infop = lpfc_get_ctx_list(phba, 0, j); + for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { + infop = lpfc_get_ctx_list(phba, i, j); + infop->nvmet_ctx_next_cpu = last_infop; + last_infop = infop; + } + } + /* For all nvmet xris, allocate resources needed to process a * received command on a per xri basis. */ + idx = 0; for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { @@ -977,7 +1043,6 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) /* Word 7 */ bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); - bf_set(wqe_pu, &wqe->generic.wqe_com, 1); /* Word 10 */ bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); @@ -995,12 +1060,35 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } - spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); - list_add_tail(&ctx_buf->list, - &phba->sli4_hba.lpfc_nvmet_ctx_get_list); - spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); + + /* + * Add ctx to MRQidx context list. Our initial assumption + * is MRQidx will be associated with CPUidx. This association + * can change on the fly. + */ + infop = lpfc_get_ctx_list(phba, idx, idx); + spin_lock(&infop->nvmet_ctx_list_lock); + list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); + infop->nvmet_ctx_list_cnt++; + spin_unlock(&infop->nvmet_ctx_list_lock); + + /* Spread ctx structures evenly across all MRQs */ + idx++; + if (idx >= phba->cfg_nvmet_mrq) + idx = 0; + } + + infop = phba->sli4_hba.nvmet_ctx_info; + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, + "6408 TOTAL NVMET ctx for CPU %d " + "MRQ %d: cnt %d nextcpu %p\n", + i, j, infop->nvmet_ctx_list_cnt, + infop->nvmet_ctx_next_cpu); + infop++; + } } - phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt; return 0; } @@ -1365,10 +1453,65 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, #endif } +static struct lpfc_nvmet_ctxbuf * +lpfc_nvmet_replenish_context(struct lpfc_hba *phba, + struct lpfc_nvmet_ctx_info *current_infop) +{ + struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; + struct lpfc_nvmet_ctx_info *get_infop; + int i; + + /* + * The current_infop for the MRQ a NVME command IU was received + * on is empty. Our goal is to replenish this MRQs context + * list from a another CPUs. + * + * First we need to pick a context list to start looking on. + * nvmet_ctx_start_cpu has available context the last time + * we needed to replenish this CPU where nvmet_ctx_next_cpu + * is just the next sequential CPU for this MRQ. + */ + if (current_infop->nvmet_ctx_start_cpu) + get_infop = current_infop->nvmet_ctx_start_cpu; + else + get_infop = current_infop->nvmet_ctx_next_cpu; + + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (get_infop == current_infop) { + get_infop = get_infop->nvmet_ctx_next_cpu; + continue; + } + spin_lock(&get_infop->nvmet_ctx_list_lock); + + /* Just take the entire context list, if there are any */ + if (get_infop->nvmet_ctx_list_cnt) { + list_splice_init(&get_infop->nvmet_ctx_list, + ¤t_infop->nvmet_ctx_list); + current_infop->nvmet_ctx_list_cnt = + get_infop->nvmet_ctx_list_cnt - 1; + get_infop->nvmet_ctx_list_cnt = 0; + spin_unlock(&get_infop->nvmet_ctx_list_lock); + + current_infop->nvmet_ctx_start_cpu = get_infop; + list_remove_head(¤t_infop->nvmet_ctx_list, + ctx_buf, struct lpfc_nvmet_ctxbuf, + list); + return ctx_buf; + } + + /* Otherwise, move on to the next CPU for this MRQ */ + spin_unlock(&get_infop->nvmet_ctx_list_lock); + get_infop = get_infop->nvmet_ctx_next_cpu; + } + + /* Nothing found, all contexts for the MRQ are in-flight */ + return NULL; +} + /** * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer * @phba: pointer to lpfc hba data structure. - * @pring: pointer to a SLI ring. + * @idx: relative index of MRQ vector * @nvmebuf: pointer to lpfc nvme command HBQ data structure. * * This routine is used for processing the WQE associated with a unsolicited @@ -1380,22 +1523,26 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, **/ static void lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, + uint32_t idx, struct rqb_dmabuf *nvmebuf, uint64_t isr_timestamp) { -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_ctxbuf *ctx_buf; + struct lpfc_nvmet_ctx_info *current_infop; uint32_t *payload; uint32_t size, oxid, sid, rc, qno; unsigned long iflag; + int current_cpu; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t id; #endif + if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) + return; + ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, @@ -1407,31 +1554,24 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, goto dropit; } - spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); - if (phba->sli4_hba.nvmet_ctx_get_cnt) { - list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list, + /* + * Get a pointer to the context list for this MRQ based on + * the CPU this MRQ IRQ is associated with. If the CPU association + * changes from our initial assumption, the context list could + * be empty, thus it would need to be replenished with the + * context list from another CPU for this MRQ. + */ + current_cpu = smp_processor_id(); + current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); + spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); + if (current_infop->nvmet_ctx_list_cnt) { + list_remove_head(¤t_infop->nvmet_ctx_list, ctx_buf, struct lpfc_nvmet_ctxbuf, list); - phba->sli4_hba.nvmet_ctx_get_cnt--; + current_infop->nvmet_ctx_list_cnt--; } else { - spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); - if (phba->sli4_hba.nvmet_ctx_put_cnt) { - list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list, - &phba->sli4_hba.lpfc_nvmet_ctx_get_list); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); - phba->sli4_hba.nvmet_ctx_get_cnt = - phba->sli4_hba.nvmet_ctx_put_cnt; - phba->sli4_hba.nvmet_ctx_put_cnt = 0; - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - - list_remove_head( - &phba->sli4_hba.lpfc_nvmet_ctx_get_list, - ctx_buf, struct lpfc_nvmet_ctxbuf, list); - phba->sli4_hba.nvmet_ctx_get_cnt--; - } else { - spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); - } + ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); } - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); + spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); oxid = be16_to_cpu(fc_hdr->fh_ox_id); @@ -1483,6 +1623,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->size = size; ctxp->oxid = oxid; ctxp->sid = sid; + ctxp->idx = idx; ctxp->state = LPFC_NVMET_STE_RCV; ctxp->entry_cnt = 1; ctxp->flag = 0; @@ -1556,7 +1697,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (nvmebuf) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ -#endif } /** @@ -1591,7 +1731,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /** * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport * @phba: pointer to lpfc hba data structure. - * @pring: pointer to a SLI ring. + * @idx: relative index of MRQ vector * @nvmebuf: pointer to received nvme data structure. * * This routine is used to process an unsolicited event received from a SLI @@ -1602,7 +1742,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, **/ void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, + uint32_t idx, struct rqb_dmabuf *nvmebuf, uint64_t isr_timestamp) { @@ -1610,7 +1750,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } - lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, + lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp); } @@ -1863,6 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->sli4_xritag); /* Word 7 */ + bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); /* Word 8 */ @@ -1939,7 +2080,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, case NVMET_FCOP_WRITEDATA: /* Words 0 - 2 : The first sg segment */ - txrdy = pci_pool_alloc(phba->txrdy_payload_pool, + txrdy = dma_pool_alloc(phba->txrdy_payload_pool, GFP_KERNEL, &physaddr); if (!txrdy) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, @@ -1971,6 +2112,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->sli4_xritag); /* Word 7 */ + bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1); bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE); @@ -2054,6 +2196,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->sli4_xritag); /* Word 7 */ + bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 48a76788b003..25a65b0bb7f3 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -74,6 +74,19 @@ struct lpfc_nvmet_tgtport { atomic_t xmt_abort_rsp_error; }; +struct lpfc_nvmet_ctx_info { + struct list_head nvmet_ctx_list; + spinlock_t nvmet_ctx_list_lock; /* lock per CPU */ + struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu; + struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu; + uint16_t nvmet_ctx_list_cnt; + char pad[16]; /* pad to a cache-line */ +}; + +/* This retrieves the context info associated with the specified cpu / mrq */ +#define lpfc_get_ctx_list(phba, cpu, mrq) \ + (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq)) + struct lpfc_nvmet_rcv_ctx { union { struct nvmefc_tgt_ls_req ls_req; @@ -92,6 +105,7 @@ struct lpfc_nvmet_rcv_ctx { uint16_t size; uint16_t entry_cnt; uint16_t cpu; + uint16_t idx; uint16_t state; /* States */ #define LPFC_NVMET_STE_LS_RCV 1 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index adc784539061..1a6f122bb25d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -416,7 +416,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, + psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -427,7 +427,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -826,7 +826,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) * for the struct fcp_cmnd, struct fcp_rsp and the number * of bde's necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, + psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -839,7 +839,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) */ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -848,7 +848,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -857,7 +857,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_sg_dma_buf_pool, + dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e948ea05fd33..8b119f87b51d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *); static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); -static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, - uint32_t); +static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, + struct lpfc_eqe *eqe, uint32_t qidx); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, @@ -106,7 +106,7 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) * -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ -static uint32_t +static int lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) { union lpfc_wqe *temp_wqe; @@ -123,7 +123,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) idx = ((q->host_index + 1) % q->entry_count); if (idx == q->hba_index) { q->WQ_overflow++; - return -ENOMEM; + return -EBUSY; } q->WQ_posted++; /* set consumption flag every once in a while */ @@ -10741,7 +10741,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->vport = vport; abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); - if (retval == IOCB_ERROR) { + if (retval) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, "6147 Failed abts issue_wqe with status x%x " "for oxid x%x\n", @@ -13010,7 +13010,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * completion queue, and then return. * **/ -static void +static int lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, struct lpfc_queue *speq) { @@ -13034,7 +13034,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0365 Slow-path CQ identifier " "(%d) does not exist\n", cqid); - return; + return 0; } /* Save EQ associated with this CQ */ @@ -13071,7 +13071,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0370 Invalid completion queue type (%d)\n", cq->type); - return; + return 0; } /* Catch the no cq entry condition, log an error */ @@ -13086,6 +13086,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); + + return ecount; } /** @@ -13289,7 +13291,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, if (fc_hdr->fh_type == FC_TYPE_FCP) { dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); lpfc_nvmet_unsol_fcp_event( - phba, phba->sli4_hba.els_wq->pring, dma_buf, + phba, idx, dma_buf, cq->assoc_qp->isr_timestamp); return false; } @@ -13393,7 +13395,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * queue and process all the entries on the completion queue, rearm the * completion queue, and then return. **/ -static void +static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, uint32_t qidx) { @@ -13409,7 +13411,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, "event: majorcode=x%x, minorcode=x%x\n", bf_get_le32(lpfc_eqe_major_code, eqe), bf_get_le32(lpfc_eqe_minor_code, eqe)); - return; + return 0; } /* Get the reference to the corresponding CQ */ @@ -13446,8 +13448,9 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Otherwise this is a Slow path event */ if (cq == NULL) { - lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); - return; + ecount = lpfc_sli4_sp_handle_eqe(phba, eqe, + phba->sli4_hba.hba_eq[qidx]); + return ecount; } process_cq: @@ -13456,7 +13459,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, "0368 Miss-matched fast-path completion " "queue identifier: eqcqid=%d, fcpcqid=%d\n", cqid, cq->queue_id); - return; + return 0; } /* Save EQ associated with this CQ */ @@ -13486,6 +13489,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); + + return ecount; } static void @@ -13706,6 +13711,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; + int ccount = 0; int hba_eqidx; /* Get the driver's phba structure from the dev_id */ @@ -13757,8 +13763,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) if (eqe == NULL) break; - lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); - if (!(++ecount % fpeq->entry_repost)) + ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); + if (!(++ecount % fpeq->entry_repost) || + ccount > LPFC_MAX_ISR_CQE) break; fpeq->EQ_processed++; } @@ -17051,7 +17058,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *pcmd = cmdiocb->context2; if (pcmd && pcmd->virt) - pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); lpfc_sli_release_iocbq(phba, cmdiocb); } @@ -17079,7 +17086,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, /* Allocate buffer for command payload */ pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (pcmd) - pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, &pcmd->phys); if (!pcmd || !pcmd->virt) goto exit; @@ -17128,7 +17135,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2023 Unable to process MDS loopback frame\n"); if (pcmd && pcmd->virt) - pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); lpfc_sli_release_iocbq(phba, iocbq); lpfc_in_buf_free(phba, &dmabuf->dbuf); @@ -18888,6 +18895,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_sglq *sglq; struct lpfc_sli_ring *pring; unsigned long iflags; + uint32_t ret = 0; /* NVME_LS and NVME_LS ABTS requests. */ if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { @@ -18906,10 +18914,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); - if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) { + ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); + if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); - return WQE_ERROR; + return ret; } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); return 0; @@ -18924,9 +18934,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; bf_set(wqe_cqid, &wqe->generic.wqe_com, phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); - if (lpfc_sli4_wq_put(wq, wqe)) { + ret = lpfc_sli4_wq_put(wq, wqe); + if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); - return WQE_ERROR; + return ret; } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -18950,9 +18961,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; bf_set(wqe_cqid, &wqe->generic.wqe_com, phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); - if (lpfc_sli4_wq_put(wq, wqe)) { + ret = lpfc_sli4_wq_put(wq, wqe); + if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); - return WQE_ERROR; + return ret; } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 7a1d74e9e877..60200385fe00 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -158,6 +158,7 @@ struct lpfc_queue { #define LPFC_MQ_REPOST 8 #define LPFC_CQ_REPOST 64 #define LPFC_RQ_REPOST 64 +#define LPFC_MAX_ISR_CQE 64 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ @@ -419,6 +420,20 @@ struct lpfc_hba_eq_hdl { #define LPFC_MULTI_CPU_AFFINITY 0xffffffff }; +/*BB Credit recovery value*/ +struct lpfc_bbscn_params { + uint32_t word0; +#define lpfc_bbscn_min_SHIFT 0 +#define lpfc_bbscn_min_MASK 0x0000000F +#define lpfc_bbscn_min_WORD word0 +#define lpfc_bbscn_max_SHIFT 4 +#define lpfc_bbscn_max_MASK 0x0000000F +#define lpfc_bbscn_max_WORD word0 +#define lpfc_bbscn_def_SHIFT 8 +#define lpfc_bbscn_def_MASK 0x0000000F +#define lpfc_bbscn_def_WORD word0 +}; + /* Port Capabilities for SLI4 Parameters */ struct lpfc_pc_sli4_params { uint32_t supported; @@ -550,6 +565,7 @@ struct lpfc_sli4_hba { uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; + struct lpfc_bbscn_params bbscn_params; struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ /* Pointers to the constructed SLI4 queues */ @@ -621,8 +637,6 @@ struct lpfc_sli4_hba { uint16_t scsi_xri_start; uint16_t els_xri_cnt; uint16_t nvmet_xri_cnt; - uint16_t nvmet_ctx_get_cnt; - uint16_t nvmet_ctx_put_cnt; uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_total; struct list_head lpfc_els_sgl_list; @@ -631,9 +645,8 @@ struct lpfc_sli4_hba { struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_nvme_buf_list; - struct list_head lpfc_nvmet_ctx_get_list; - struct list_head lpfc_nvmet_ctx_put_list; struct list_head lpfc_nvmet_io_wait_list; + struct lpfc_nvmet_ctx_info *nvmet_ctx_info; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; unsigned long *rpi_bmask; @@ -664,8 +677,6 @@ struct lpfc_sli4_hba { spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */ - spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */ - spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ uint32_t physical_port; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c6a24c3e2d5e..6aa192b3e4bf 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.4.0.1" +#define LPFC_DRIVER_VERSION "11.4.0.3" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index a6682c508c4c..8c4d3003b68b 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -448,15 +448,14 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat ioremap(macio_resource_start(mdev, 1), 0x1000); state->dmaintr = macio_irq(mdev, 1); if (state->regs == NULL || state->dma == NULL) { - printk(KERN_ERR "mac53c94: ioremap failed for %s\n", - node->full_name); + printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node); goto out_free; } clkprop = of_get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { - printk(KERN_ERR "%s: can't get clock frequency, " - "assuming 25MHz\n", node->full_name); + printk(KERN_ERR "%pOF: can't get clock frequency, " + "assuming 25MHz\n", node); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; @@ -469,7 +468,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat sizeof(struct dbdma_cmd), GFP_KERNEL); if (dma_cmd_space == 0) { printk(KERN_ERR "mac53c94: couldn't allocate dma " - "command space for %s\n", node->full_name); + "command space for %pOF\n", node); rc = -ENOMEM; goto out_free; } @@ -481,8 +480,8 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat mac53c94_init(state); if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { - printk(KERN_ERR "mac53C94: can't get irq %d for %s\n", - state->intr, node->full_name); + printk(KERN_ERR "mac53C94: can't get irq %d for %pOF\n", + state->intr, node); goto out_free_dma; } diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index cdb61eaa2d1f..eb551f3cc471 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -348,26 +348,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); - u8 *fifo = esp->regs + ESP_FDATA * 16; + u8 __iomem *fifo = esp->regs + ESP_FDATA * 16; + u8 phase = esp->sreg & ESP_STAT_PMASK; cmd &= ~ESP_CMD_DMA; mep->error = 0; if (write) { + u8 *dst = (u8 *)addr; + u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); + scsi_esp_cmd(esp, cmd); while (1) { - unsigned int n; - - n = mac_esp_wait_for_fifo(esp); - if (!n) + if (!mac_esp_wait_for_fifo(esp)) break; - if (n > esp_count) - n = esp_count; - esp_count -= n; - - MAC_ESP_PIO_LOOP("%2@,%0@+", n); + *dst++ = esp_read8(ESP_FDATA); + --esp_count; if (!esp_count) break; @@ -375,14 +373,17 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, if (mac_esp_wait_for_intr(esp)) break; - if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) && - ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)) + if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); - if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != - ESP_INTR_BSERV) + if (esp->ireg & mask) { + mep->error = 1; break; + } + + if (phase == ESP_MIP) + scsi_esp_cmd(esp, ESP_CMD_MOK); scsi_esp_cmd(esp, ESP_CMD_TI); } @@ -402,14 +403,14 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, if (mac_esp_wait_for_intr(esp)) break; - if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) && - ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP)) + if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); - if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != - ESP_INTR_BSERV) + if (esp->ireg & ~ESP_INTR_BSERV) { + mep->error = 1; break; + } n = MAC_ESP_FIFO_SIZE - (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 196acc79714b..dd6057359d7c 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -41,7 +41,7 @@ #define NCR5380_intr macscsi_intr #define NCR5380_queue_command macscsi_queue_command #define NCR5380_abort macscsi_abort -#define NCR5380_bus_reset macscsi_bus_reset +#define NCR5380_host_reset macscsi_host_reset #define NCR5380_info macscsi_info #include "NCR5380.h" @@ -328,7 +328,7 @@ static struct scsi_host_template mac_scsi_template = { .info = macscsi_info, .queuecommand = macscsi_queue_command, .eh_abort_handler = macscsi_abort, - .eh_bus_reset_handler = macscsi_bus_reset, + .eh_host_reset_handler = macscsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = 1, diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 3c63c292cb92..7195cff51d4c 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -311,13 +311,15 @@ mega_query_adapter(adapter_t *adapter) right 8 bits making them zero. This 0 value was hardcoded to fix sparse warnings. */ if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { - sprintf (adapter->fw_version, "%c%d%d.%d%d", + snprintf(adapter->fw_version, sizeof(adapter->fw_version), + "%c%d%d.%d%d", adapter->product_info.fw_version[2], 0, adapter->product_info.fw_version[1] & 0x0f, 0, adapter->product_info.fw_version[0] & 0x0f); - sprintf (adapter->bios_version, "%c%d%d.%d%d", + snprintf(adapter->bios_version, sizeof(adapter->fw_version), + "%c%d%d.%d%d", adapter->product_info.bios_version[2], 0, adapter->product_info.bios_version[1] & 0x0f, diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index f0987f22ea70..ec3c43854978 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -341,8 +341,6 @@ static struct scsi_host_template megaraid_template_g = { .proc_name = "megaraid", .queuecommand = megaraid_queue_command, .eh_abort_handler = megaraid_abort_handler, - .eh_device_reset_handler = megaraid_reset_handler, - .eh_bus_reset_handler = megaraid_reset_handler, .eh_host_reset_handler = megaraid_reset_handler, .change_queue_depth = scsi_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, @@ -1153,8 +1151,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) // Allocate memory for 16-bytes aligned mailboxes - raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool", - adapter->pdev, + raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool", + &adapter->pdev->dev, sizeof(mbox64_t) + 16, 16, 0); @@ -1164,7 +1162,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { - mbox_pci_blk[i].vaddr = pci_pool_alloc( + mbox_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->mbox_pool_handle, GFP_KERNEL, &mbox_pci_blk[i].dma_addr); @@ -1181,8 +1179,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) * share common memory pool. Passthru structures piggyback on memory * allocted to extended passthru since passthru is smaller of the two */ - raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru", - adapter->pdev, sizeof(mraid_epassthru_t), 128, 0); + raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru", + &adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0); if (raid_dev->epthru_pool_handle == NULL) { goto fail_setup_dma_pool; @@ -1190,7 +1188,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { - epthru_pci_blk[i].vaddr = pci_pool_alloc( + epthru_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->epthru_pool_handle, GFP_KERNEL, &epthru_pci_blk[i].dma_addr); @@ -1202,8 +1200,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) // Allocate memory for each scatter-gather list. Request for 512 bytes // alignment for each sg list - raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg", - adapter->pdev, + raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg", + &adapter->pdev->dev, sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, 512, 0); @@ -1213,7 +1211,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter) sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { - sg_pci_blk[i].vaddr = pci_pool_alloc( + sg_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->sg_pool_handle, GFP_KERNEL, &sg_pci_blk[i].dma_addr); @@ -1249,29 +1247,29 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter) sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { - pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, + dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, sg_pci_blk[i].dma_addr); } if (raid_dev->sg_pool_handle) - pci_pool_destroy(raid_dev->sg_pool_handle); + dma_pool_destroy(raid_dev->sg_pool_handle); epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { - pci_pool_free(raid_dev->epthru_pool_handle, + dma_pool_free(raid_dev->epthru_pool_handle, epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); } if (raid_dev->epthru_pool_handle) - pci_pool_destroy(raid_dev->epthru_pool_handle); + dma_pool_destroy(raid_dev->epthru_pool_handle); mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { - pci_pool_free(raid_dev->mbox_pool_handle, + dma_pool_free(raid_dev->mbox_pool_handle, mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); } if (raid_dev->mbox_pool_handle) - pci_pool_destroy(raid_dev->mbox_pool_handle); + dma_pool_destroy(raid_dev->mbox_pool_handle); return; } diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 544d6f7e6138..65b6f6ace3a5 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) kioc->pool_index = right_pool; kioc->free_buf = 1; - kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC, + kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC, &kioc->buf_paddr); spin_unlock_irqrestore(&pool->lock, flags); @@ -658,7 +658,7 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) * not in use */ if (kioc->free_buf == 1) - pci_pool_free(pool->handle, kioc->buf_vaddr, + dma_pool_free(pool->handle, kioc->buf_vaddr, kioc->buf_paddr); else pool->in_use = 0; @@ -940,8 +940,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) GFP_KERNEL); adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc, GFP_KERNEL); - adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", - adapter->pdev, + adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool", + &adapter->pdev->dev, sizeof(mraid_passthru_t), 16, 0); @@ -970,7 +970,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) kioc = adapter->kioc_list + i; kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); - kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool, + kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool, GFP_KERNEL, &kioc->pthru32_h); if (!kioc->pthru32) { @@ -1006,7 +1006,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) for (i = 0; i < lld_adp->max_kioc; i++) { kioc = adapter->kioc_list + i; if (kioc->pthru32) { - pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32, + dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } } @@ -1017,7 +1017,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) kfree(adapter->mbox_list); if (adapter->pthru_dma_pool) - pci_pool_destroy(adapter->pthru_dma_pool); + dma_pool_destroy(adapter->pthru_dma_pool); kfree(adapter); @@ -1086,14 +1086,15 @@ mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) pool->buf_size = bufsize; spin_lock_init(&pool->lock); - pool->handle = pci_pool_create("megaraid mm data buffer", - adp->pdev, bufsize, 16, 0); + pool->handle = dma_pool_create("megaraid mm data buffer", + &adp->pdev->dev, bufsize, + 16, 0); if (!pool->handle) { goto dma_pool_setup_error; } - pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, + pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL, &pool->paddr); if (!pool->vaddr) @@ -1163,14 +1164,14 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp) kioc = adp->kioc_list + i; - pci_pool_free(adp->pthru_dma_pool, kioc->pthru32, + dma_pool_free(adp->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } kfree(adp->kioc_list); kfree(adp->mbox_list); - pci_pool_destroy(adp->pthru_dma_pool); + dma_pool_destroy(adp->pthru_dma_pool); return; @@ -1194,10 +1195,10 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) if (pool->handle) { if (pool->vaddr) - pci_pool_free(pool->handle, pool->vaddr, + dma_pool_free(pool->handle, pool->vaddr, pool->paddr); - pci_pool_destroy(pool->handle); + dma_pool_destroy(pool->handle); pool->handle = NULL; } } diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 2b209bbb4c91..a6722c93a295 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.701.17.00-rc1" -#define MEGASAS_RELDATE "March 2, 2017" +#define MEGASAS_VERSION "07.702.06.00-rc1" +#define MEGASAS_RELDATE "June 21, 2017" /* * Device IDs @@ -2115,7 +2115,6 @@ struct megasas_instance { u32 *crash_dump_buf; dma_addr_t crash_dump_h; void *crash_buf[MAX_CRASH_DUMP_SIZE]; - u32 crash_buf_pages; unsigned int fw_crash_buffer_size; unsigned int fw_crash_state; unsigned int fw_crash_buffer_offset; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 71c4746341ea..e518dadc8161 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1995,9 +1996,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->sync_cmd && - cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) + (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { + cmd_mfi->frame->hdr.cmd_status = + MFI_STAT_WRONG_STATE; megasas_complete_cmd(instance, cmd_mfi, DID_OK); + } } } } else { @@ -2791,7 +2795,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; if (cmd) megasas_dump_frame(cmd->io_request, - sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); ret = megasas_reset_fusion(scmd->device->host, SCSIIO_TIMEOUT_OCR); } else @@ -3862,19 +3866,19 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance) cmd = instance->cmd_list[i]; if (cmd->frame) - pci_pool_free(instance->frame_dma_pool, cmd->frame, + dma_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) - pci_pool_free(instance->sense_dma_pool, cmd->sense, + dma_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ - pci_pool_destroy(instance->frame_dma_pool); - pci_pool_destroy(instance->sense_dma_pool); + dma_pool_destroy(instance->frame_dma_pool); + dma_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; @@ -3925,22 +3929,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) /* * Use DMA pool facility provided by PCI layer */ - instance->frame_dma_pool = pci_pool_create("megasas frame pool", - instance->pdev, instance->mfi_frame_size, - 256, 0); + instance->frame_dma_pool = dma_pool_create("megasas frame pool", + &instance->pdev->dev, + instance->mfi_frame_size, 256, 0); if (!instance->frame_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); return -ENOMEM; } - instance->sense_dma_pool = pci_pool_create("megasas sense pool", - instance->pdev, 128, 4, 0); + instance->sense_dma_pool = dma_pool_create("megasas sense pool", + &instance->pdev->dev, 128, + 4, 0); if (!instance->sense_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); - pci_pool_destroy(instance->frame_dma_pool); + dma_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; @@ -3955,10 +3960,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) cmd = instance->cmd_list[i]; - cmd->frame = pci_pool_alloc(instance->frame_dma_pool, + cmd->frame = dma_pool_alloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); - cmd->sense = pci_pool_alloc(instance->sense_dma_pool, + cmd->sense = dma_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* @@ -3966,7 +3971,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); + dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } @@ -5478,7 +5483,8 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; - if (resetwaittime > MEGASAS_RESET_WAIT_TIME) + if ((resetwaittime < 1) || + (resetwaittime > MEGASAS_RESET_WAIT_TIME)) resetwaittime = MEGASAS_RESET_WAIT_TIME; if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) @@ -5649,6 +5655,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, prev_aen.word = le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); + if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || + (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { + dev_info(&instance->pdev->dev, + "%s %d out of range class %d send by application\n", + __func__, __LINE__, curr_aen.members.class); + return 0; + } + /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously @@ -6096,14 +6110,12 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->pd_info = pci_alloc_consistent(pdev, sizeof(struct MR_PD_INFO), &instance->pd_info_h); - instance->pd_info = pci_alloc_consistent(pdev, - sizeof(struct MR_PD_INFO), &instance->pd_info_h); - instance->tgt_prop = pci_alloc_consistent(pdev, - sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); - if (!instance->pd_info) dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + instance->tgt_prop = pci_alloc_consistent(pdev, + sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); + if (!instance->tgt_prop) dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); @@ -6663,9 +6675,14 @@ static void megasas_detach_one(struct pci_dev *pdev) fusion->max_map_sz, fusion->ld_map[i], fusion->ld_map_phys[i]); - if (fusion->ld_drv_map[i]) - free_pages((ulong)fusion->ld_drv_map[i], - fusion->drv_map_pages); + if (fusion->ld_drv_map[i]) { + if (is_vmalloc_addr(fusion->ld_drv_map[i])) + vfree(fusion->ld_drv_map[i]); + else + free_pages((ulong)fusion->ld_drv_map[i], + fusion->drv_map_pages); + } + if (fusion->pd_seq_sync[i]) dma_free_coherent(&instance->pdev->dev, pd_seq_map_sz, @@ -6866,6 +6883,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, void *sense = NULL; dma_addr_t sense_handle; unsigned long *sense_ptr; + u32 opcode; memset(kbuff_arr, 0, sizeof(kbuff_arr)); @@ -6893,15 +6911,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) { + if (opcode == MR_DCMD_CTRL_SHUTDOWN) { if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { megasas_return_cmd(instance, cmd); return -1; } } - if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { + if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { error = megasas_set_crash_dump_params_ioctl(cmd); megasas_return_cmd(instance, cmd); return error; @@ -6975,8 +6994,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, cmd->sync_cmd = 0; dev_err(&instance->pdev->dev, "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", - __func__, __LINE__, cmd->frame->dcmd.opcode, - cmd->cmd_status_drv); + __func__, __LINE__, opcode, cmd->cmd_status_drv); return -EBUSY; } @@ -7323,49 +7341,39 @@ static struct pci_driver megasas_pci_driver = { /* * Sysfs driver attributes */ -static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) +static ssize_t version_show(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } +static DRIVER_ATTR_RO(version); -static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); - -static ssize_t -megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) +static ssize_t release_date_show(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } +static DRIVER_ATTR_RO(release_date); -static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); - -static ssize_t -megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) +static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } +static DRIVER_ATTR_RO(support_poll_for_event); -static DRIVER_ATTR(support_poll_for_event, S_IRUGO, - megasas_sysfs_show_support_poll_for_event, NULL); - - static ssize_t -megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) +static ssize_t support_device_change_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_device_change); } +static DRIVER_ATTR_RO(support_device_change); -static DRIVER_ATTR(support_device_change, S_IRUGO, - megasas_sysfs_show_support_device_change, NULL); - -static ssize_t -megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) +static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } -static ssize_t -megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) +static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, + size_t count) { int retval = count; @@ -7375,9 +7383,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun } return retval; } - -static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, - megasas_sysfs_set_dbg_lvl); +static DRIVER_ATTR_RW(dbg_lvl); static inline void megasas_remove_scsi_device(struct scsi_device *sdev) { diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 62affa76133d..ecc699a65bac 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -67,16 +67,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) #define MR_LD_STATE_OPTIMAL 3 -#ifdef FALSE -#undef FALSE -#endif -#define FALSE 0 - -#ifdef TRUE -#undef TRUE -#endif -#define TRUE 1 - #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) #define SPAN_INVALID 0xff @@ -709,7 +699,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; - u8 retval = TRUE; + u8 retval = true; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; @@ -727,7 +717,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, if (raid->level == 6) { logArm = get_arm_from_strip(instance, ld, stripRow, map); if (logArm == -1U) - return FALSE; + return false; rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; arm = armQ + 1 + logArm; @@ -738,7 +728,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, /* Calculate the arm */ physArm = get_arm(instance, ld, span, stripRow, map); if (physArm == 0xFF) - return FALSE; + return false; arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); @@ -812,7 +802,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; - u8 retval = TRUE; + u8 retval = true; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; @@ -829,7 +819,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u32 rowMod, armQ, arm; if (raid->rowSize == 0) - return FALSE; + return false; /* get logical row mod */ rowMod = mega_mod64(row, raid->rowSize); armQ = raid->rowSize-1-rowMod; /* index of Q drive */ @@ -839,7 +829,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, physArm = (u8)arm; } else { if (raid->modFactor == 0) - return FALSE; + return false; physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); @@ -851,7 +841,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } else { span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); if (span == SPAN_INVALID) - return FALSE; + return false; } /* Get the array on which this span is present */ @@ -954,7 +944,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, */ if (raid->rowDataSize == 0) { if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) - return FALSE; + return false; else if (instance->UnevenSpanSupport) { io_info->IoforUnevenSpan = 1; } else { @@ -963,7 +953,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, "rowDataSize = 0x%0x," "but there is _NO_ UnevenSpanSupport\n", MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); - return FALSE; + return false; } } @@ -988,7 +978,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, dev_info(&instance->pdev->dev, "return from %s %d." "Send IO w/o region lock.\n", __func__, __LINE__); - return FALSE; + return false; } if (raid->spanDepth == 1) { @@ -1004,7 +994,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip); - return FALSE; + return false; } io_info->start_span = startlba_span; io_info->start_row = start_row; @@ -1038,7 +1028,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, raid->capability. fpWriteAcrossStripe)); } else - io_info->fpOkForIo = FALSE; + io_info->fpOkForIo = false; if (numRows == 1) { /* single-strip IOs can always lock only the data needed */ @@ -1124,7 +1114,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == MR_DEVHANDLE_INVALID) - io_info->fpOkForIo = FALSE; + io_info->fpOkForIo = false; return retval; } else if (isRead) { uint stripIdx; @@ -1138,10 +1128,10 @@ MR_BuildRaidContext(struct megasas_instance *instance, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map); if (!retval) - return TRUE; + return true; } } - return TRUE; + return true; } /* diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 985510628f56..11bd2e698b84 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -313,20 +313,20 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) cmd = fusion->cmd_list[i]; if (cmd) { if (cmd->sg_frame) - pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, + dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame, cmd->sg_frame_phys_addr); if (cmd->sense) - pci_pool_free(fusion->sense_dma_pool, cmd->sense, + dma_pool_free(fusion->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } } if (fusion->sg_dma_pool) { - pci_pool_destroy(fusion->sg_dma_pool); + dma_pool_destroy(fusion->sg_dma_pool); fusion->sg_dma_pool = NULL; } if (fusion->sense_dma_pool) { - pci_pool_destroy(fusion->sense_dma_pool); + dma_pool_destroy(fusion->sense_dma_pool); fusion->sense_dma_pool = NULL; } @@ -343,11 +343,11 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) fusion->request_alloc_sz, fusion->req_frames_desc, fusion->req_frames_desc_phys); if (fusion->io_request_frames) - pci_pool_free(fusion->io_request_frames_pool, + dma_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, fusion->io_request_frames_phys); if (fusion->io_request_frames_pool) { - pci_pool_destroy(fusion->io_request_frames_pool); + dma_pool_destroy(fusion->io_request_frames_pool); fusion->io_request_frames_pool = NULL; } @@ -376,12 +376,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) fusion->sg_dma_pool = - pci_pool_create("mr_sg", instance->pdev, + dma_pool_create("mr_sg", &instance->pdev->dev, instance->max_chain_frame_sz, MR_DEFAULT_NVME_PAGE_SIZE, 0); /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ fusion->sense_dma_pool = - pci_pool_create("mr_sense", instance->pdev, + dma_pool_create("mr_sense", &instance->pdev->dev, SCSI_SENSE_BUFFERSIZE, 64, 0); if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { @@ -395,10 +395,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) */ for (i = 0; i < max_cmd; i++) { cmd = fusion->cmd_list[i]; - cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, + cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, GFP_KERNEL, &cmd->sg_frame_phys_addr); - cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, + cmd->sense = dma_pool_alloc(fusion->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); if (!cmd->sg_frame || !cmd->sense) { dev_err(&instance->pdev->dev, @@ -410,7 +410,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) /* create sense buffer for the raid 1/10 fp */ for (i = max_cmd; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; - cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, + cmd->sense = dma_pool_alloc(fusion->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); if (!cmd->sense) { dev_err(&instance->pdev->dev, @@ -479,7 +479,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance) } fusion->io_request_frames_pool = - pci_pool_create("mr_ioreq", instance->pdev, + dma_pool_create("mr_ioreq", &instance->pdev->dev, fusion->io_frames_alloc_sz, 16, 0); if (!fusion->io_request_frames_pool) { @@ -489,7 +489,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance) } fusion->io_request_frames = - pci_pool_alloc(fusion->io_request_frames_pool, + dma_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { dev_err(&instance->pdev->dev, @@ -509,7 +509,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance) count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; fusion->reply_frames_desc_pool = - pci_pool_create("mr_reply", instance->pdev, + dma_pool_create("mr_reply", &instance->pdev->dev, fusion->reply_alloc_sz * count, 16, 0); if (!fusion->reply_frames_desc_pool) { @@ -519,7 +519,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance) } fusion->reply_frames_desc[0] = - pci_pool_alloc(fusion->reply_frames_desc_pool, + dma_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); if (!fusion->reply_frames_desc[0]) { dev_err(&instance->pdev->dev, @@ -562,8 +562,10 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) memset(fusion->rdpq_virt, 0, sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION); count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; - fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq", - instance->pdev, fusion->reply_alloc_sz, 16, 0); + fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", + &instance->pdev->dev, + fusion->reply_alloc_sz, + 16, 0); if (!fusion->reply_frames_desc_pool) { dev_err(&instance->pdev->dev, @@ -573,7 +575,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) for (i = 0; i < count; i++) { fusion->reply_frames_desc[i] = - pci_pool_alloc(fusion->reply_frames_desc_pool, + dma_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &fusion->reply_frames_desc_phys[i]); if (!fusion->reply_frames_desc[i]) { dev_err(&instance->pdev->dev, @@ -601,13 +603,13 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) { for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) { if (fusion->reply_frames_desc[i]) - pci_pool_free(fusion->reply_frames_desc_pool, + dma_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc[i], fusion->reply_frames_desc_phys[i]); } if (fusion->reply_frames_desc_pool) - pci_pool_destroy(fusion->reply_frames_desc_pool); + dma_pool_destroy(fusion->reply_frames_desc_pool); if (fusion->rdpq_virt) pci_free_consistent(instance->pdev, @@ -623,12 +625,12 @@ megasas_free_reply_fusion(struct megasas_instance *instance) { fusion = instance->ctrl_context; if (fusion->reply_frames_desc[0]) - pci_pool_free(fusion->reply_frames_desc_pool, + dma_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc[0], fusion->reply_frames_desc_phys[0]); if (fusion->reply_frames_desc_pool) - pci_pool_destroy(fusion->reply_frames_desc_pool); + dma_pool_destroy(fusion->reply_frames_desc_pool); } @@ -914,7 +916,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) ret = 1; goto fail_fw_init; } - dev_info(&instance->pdev->dev, "Init cmd success\n"); ret = 0; @@ -925,6 +926,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) sizeof(struct MPI2_IOC_INIT_REQUEST), IOCInitMessage, ioc_init_handle); fail_get_cmd: + dev_err(&instance->pdev->dev, + "Init cmd return status %s for SCSI host %d\n", + ret ? "FAILED" : "SUCCESS", instance->host->host_no); + return ret; } @@ -1260,6 +1265,80 @@ megasas_display_intel_branding(struct megasas_instance *instance) } } +/** + * megasas_allocate_raid_maps - Allocate memory for RAID maps + * @instance: Adapter soft state + * + * return: if success: return 0 + * failed: return -ENOMEM + */ +static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + int i = 0; + + fusion = instance->ctrl_context; + + fusion->drv_map_pages = get_order(fusion->drv_map_sz); + + for (i = 0; i < 2; i++) { + fusion->ld_map[i] = NULL; + + fusion->ld_drv_map[i] = (void *) + __get_free_pages(__GFP_ZERO | GFP_KERNEL, + fusion->drv_map_pages); + + if (!fusion->ld_drv_map[i]) { + fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); + + if (!fusion->ld_drv_map[i]) { + dev_err(&instance->pdev->dev, + "Could not allocate memory for local map" + " size requested: %d\n", + fusion->drv_map_sz); + goto ld_drv_map_alloc_fail; + } + } + } + + for (i = 0; i < 2; i++) { + fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, + fusion->max_map_sz, + &fusion->ld_map_phys[i], + GFP_KERNEL); + if (!fusion->ld_map[i]) { + dev_err(&instance->pdev->dev, + "Could not allocate memory for map info %s:%d\n", + __func__, __LINE__); + goto ld_map_alloc_fail; + } + } + + return 0; + +ld_map_alloc_fail: + for (i = 0; i < 2; i++) { + if (fusion->ld_map[i]) + dma_free_coherent(&instance->pdev->dev, + fusion->max_map_sz, + fusion->ld_map[i], + fusion->ld_map_phys[i]); + } + +ld_drv_map_alloc_fail: + for (i = 0; i < 2; i++) { + if (fusion->ld_drv_map[i]) { + if (is_vmalloc_addr(fusion->ld_drv_map[i])) + vfree(fusion->ld_drv_map[i]); + else + free_pages((ulong)fusion->ld_drv_map[i], + fusion->drv_map_pages); + } + } + + return -ENOMEM; +} + /** * megasas_init_adapter_fusion - Initializes the FW * @instance: Adapter soft state @@ -1379,45 +1458,14 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; fusion->fast_path_io = 0; - fusion->drv_map_pages = get_order(fusion->drv_map_sz); - for (i = 0; i < 2; i++) { - fusion->ld_map[i] = NULL; - fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL, - fusion->drv_map_pages); - if (!fusion->ld_drv_map[i]) { - dev_err(&instance->pdev->dev, "Could not allocate " - "memory for local map info for %d pages\n", - fusion->drv_map_pages); - if (i == 1) - free_pages((ulong)fusion->ld_drv_map[0], - fusion->drv_map_pages); - goto fail_ioc_init; - } - memset(fusion->ld_drv_map[i], 0, - ((1 << PAGE_SHIFT) << fusion->drv_map_pages)); - } - - for (i = 0; i < 2; i++) { - fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, - fusion->max_map_sz, - &fusion->ld_map_phys[i], - GFP_KERNEL); - if (!fusion->ld_map[i]) { - dev_err(&instance->pdev->dev, "Could not allocate memory " - "for map info\n"); - goto fail_map_info; - } - } + if (megasas_allocate_raid_maps(instance)) + goto fail_ioc_init; if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); return 0; -fail_map_info: - if (i == 1) - dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, - fusion->ld_map[0], fusion->ld_map_phys[0]); fail_ioc_init: megasas_free_cmds_fusion(instance); fail_alloc_cmds: @@ -3287,7 +3335,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; - mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); + mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); } /** @@ -3369,17 +3417,13 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance) { unsigned int i; - instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE); for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { - instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL, - instance->crash_buf_pages); + instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); if (!instance->crash_buf[i]) { dev_info(&instance->pdev->dev, "Firmware crash dump " "memory allocation failed at index %d\n", i); break; } - memset(instance->crash_buf[i], 0, - ((1 << PAGE_SHIFT) << instance->crash_buf_pages)); } instance->drv_buf_alloc = i; } @@ -3391,12 +3435,10 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance) void megasas_free_host_crash_buffer(struct megasas_instance *instance) { - unsigned int i -; + unsigned int i; for (i = 0; i < instance->drv_buf_alloc; i++) { if (instance->crash_buf[i]) - free_pages((ulong)instance->crash_buf[i], - instance->crash_buf_pages); + vfree(instance->crash_buf[i]); } instance->drv_buf_index = 0; instance->drv_buf_alloc = 0; @@ -3556,6 +3598,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, } } + megasas_complete_cmd_dpc_fusion((unsigned long)instance); outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) goto out; @@ -3564,8 +3607,6 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " "commands to complete for scsi%d\n", i, outstanding, instance->host->host_no); - megasas_complete_cmd_dpc_fusion( - (unsigned long)instance); } msleep(1000); } @@ -3623,6 +3664,15 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) if (!smid) continue; + + /* Do not refire shutdown command */ + if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) == + MR_DCMD_CTRL_SHUTDOWN) { + cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; + megasas_complete_cmd(instance, cmd_mfi, DID_OK); + continue; + } + req_desc = megasas_get_request_descriptor (instance, smid - 1); refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode != @@ -3750,7 +3800,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, struct megasas_cmd_fusion *cmd_fusion; struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; - struct fusion_context *fusion; + struct fusion_context *fusion = NULL; struct megasas_cmd_fusion *scsi_lookup; int rc; struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; @@ -3777,8 +3827,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, cmd_fusion->request_desc = req_desc; req_desc->Words = 0; - scsi_lookup = fusion->cmd_list[smid_task - 1]; - mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; @@ -3825,13 +3873,13 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, rc = SUCCESS; switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + scsi_lookup = fusion->cmd_list[smid_task - 1]; + if (scsi_lookup->scmd == NULL) break; else { instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); - megasas_complete_cmd_dpc_fusion - ((unsigned long)instance); instance->instancet->enable_intr(instance); if (scsi_lookup->scmd == NULL) break; @@ -3843,9 +3891,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) break; instance->instancet->disable_intr(instance); - msleep(1000); - megasas_complete_cmd_dpc_fusion - ((unsigned long)instance); + megasas_sync_irqs((unsigned long)instance); rc = megasas_track_scsiio(instance, id, channel); instance->instancet->enable_intr(instance); @@ -4271,9 +4317,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) megasas_fusion_update_can_queue(instance, OCR_CONTEXT); if (megasas_ioc_init_fusion(instance)) { - dev_warn(&instance->pdev->dev, - "megasas_ioc_init_fusion() failed! for " - "scsi%d\n", instance->host->host_no); if (instance->requestorId && !reason) goto fail_kill_adapter; else @@ -4319,6 +4362,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) instance->instancet->enable_intr(instance); atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); + dev_info(&instance->pdev->dev, "Interrupts are enabled and" + " controller is OPERATIONAL for scsi:%d\n", + instance->host->host_no); + /* Restart SR-IOV heartbeat */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) @@ -4330,11 +4377,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) instance->skip_heartbeat_timer_del = 1; } - /* Adapter reset completed successfully */ - dev_warn(&instance->pdev->dev, "Reset " - "successful for scsi%d.\n", - instance->host->host_no); - if (instance->crash_dump_drv_support && instance->crash_dump_app_support) megasas_set_crash_dump_params(instance, @@ -4344,6 +4386,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) MR_CRASH_BUF_TURN_OFF); retval = SUCCESS; + + /* Adapter reset completed successfully */ + dev_warn(&instance->pdev->dev, + "Reset successful for scsi%d.\n", + instance->host->host_no); + goto out; } fail_kill_adapter: diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 18039bba26c4..87999905bca3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -615,9 +615,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); if (event_data->DiscoveryStatus) - pr_info("discovery_status(0x%08x)", + pr_cont(" discovery_status(0x%08x)", le32_to_cpu(event_data->DiscoveryStatus)); - pr_info("\n"); + pr_cont("\n"); return; } case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: @@ -3198,9 +3198,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } if (ioc->sense) { - pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - if (ioc->sense_dma_pool) - pci_pool_destroy(ioc->sense_dma_pool); + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); dexitprintk(ioc, pr_info(MPT3SAS_FMT "sense_pool(0x%p): free\n", ioc->name, ioc->sense)); @@ -3208,9 +3207,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } if (ioc->reply) { - pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); - if (ioc->reply_dma_pool) - pci_pool_destroy(ioc->reply_dma_pool); + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); dexitprintk(ioc, pr_info(MPT3SAS_FMT "reply_pool(0x%p): free\n", ioc->name, ioc->reply)); @@ -3218,10 +3216,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } if (ioc->reply_free) { - pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, ioc->reply_free_dma); - if (ioc->reply_free_dma_pool) - pci_pool_destroy(ioc->reply_free_dma_pool); + dma_pool_destroy(ioc->reply_free_dma_pool); dexitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free_pool(0x%p): free\n", ioc->name, ioc->reply_free)); @@ -3232,7 +3229,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) do { rps = &ioc->reply_post[i]; if (rps->reply_post_free) { - pci_pool_free( + dma_pool_free( ioc->reply_post_free_dma_pool, rps->reply_post_free, rps->reply_post_free_dma); @@ -3244,8 +3241,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); - if (ioc->reply_post_free_dma_pool) - pci_pool_destroy(ioc->reply_post_free_dma_pool); + dma_pool_destroy(ioc->reply_post_free_dma_pool); kfree(ioc->reply_post); } @@ -3266,12 +3262,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) if (ioc->chain_lookup) { for (i = 0; i < ioc->chain_depth; i++) { if (ioc->chain_lookup[i].chain_buffer) - pci_pool_free(ioc->chain_dma_pool, + dma_pool_free(ioc->chain_dma_pool, ioc->chain_lookup[i].chain_buffer, ioc->chain_lookup[i].chain_buffer_dma); } - if (ioc->chain_dma_pool) - pci_pool_destroy(ioc->chain_dma_pool); + dma_pool_destroy(ioc->chain_dma_pool); free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); ioc->chain_lookup = NULL; } @@ -3446,23 +3441,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->name); goto out; } - ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", - ioc->pdev, sz, 16, 0); + ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", + &ioc->pdev->dev, sz, 16, 0); if (!ioc->reply_post_free_dma_pool) { pr_err(MPT3SAS_FMT - "reply_post_free pool: pci_pool_create failed\n", + "reply_post_free pool: dma_pool_create failed\n", ioc->name); goto out; } i = 0; do { ioc->reply_post[i].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool, + dma_pool_alloc(ioc->reply_post_free_dma_pool, GFP_KERNEL, &ioc->reply_post[i].reply_post_free_dma); if (!ioc->reply_post[i].reply_post_free) { pr_err(MPT3SAS_FMT - "reply_post_free pool: pci_pool_alloc failed\n", + "reply_post_free pool: dma_pool_alloc failed\n", ioc->name); goto out; } @@ -3577,15 +3572,15 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->name); goto out; } - ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, ioc->chain_segment_sz, 16, 0); if (!ioc->chain_dma_pool) { - pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", + pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", ioc->name); goto out; } for (i = 0; i < ioc->chain_depth; i++) { - ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( + ioc->chain_lookup[i].chain_buffer = dma_pool_alloc( ioc->chain_dma_pool , GFP_KERNEL, &ioc->chain_lookup[i].chain_buffer_dma); if (!ioc->chain_lookup[i].chain_buffer) { @@ -3630,17 +3625,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) /* sense buffers, 4 byte align */ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; - ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, - 0); + ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, + 4, 0); if (!ioc->sense_dma_pool) { - pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", + pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n", ioc->name); goto out; } - ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, &ioc->sense_dma); if (!ioc->sense) { - pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", + pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n", ioc->name); goto out; } @@ -3654,17 +3649,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) /* reply pool, 4 byte align */ sz = ioc->reply_free_queue_depth * ioc->reply_sz; - ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, - 0); + ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, + 4, 0); if (!ioc->reply_dma_pool) { - pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", + pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n", ioc->name); goto out; } - ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, &ioc->reply_dma); if (!ioc->reply) { - pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", + pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n", ioc->name); goto out; } @@ -3680,17 +3675,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) /* reply free queue, 16 byte align */ sz = ioc->reply_free_queue_depth * 4; - ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", - ioc->pdev, sz, 16, 0); + ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", + &ioc->pdev->dev, sz, 16, 0); if (!ioc->reply_free_dma_pool) { - pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n", + pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n", ioc->name); goto out; } - ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL, &ioc->reply_free_dma); if (!ioc->reply_free) { - pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n", + pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n", ioc->name); goto out; } @@ -3708,7 +3703,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->config_page_sz, &ioc->config_page_dma); if (!ioc->config_page) { pr_err(MPT3SAS_FMT - "config page: pci_pool_alloc failed\n", + "config page: dma_pool_alloc failed\n", ioc->name); goto out; } @@ -5499,10 +5494,10 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; mutex_init(&ioc->ctl_cmds.mutex); - if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || - !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || - !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || - !ioc->ctl_cmds.sense) { + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { r = -ENOMEM; goto out_free_resources; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 099ab4ca7edf..a77bb7dc12b1 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -970,7 +970,7 @@ struct MPT3SAS_ADAPTER { u8 id; int cpu_count; char name[MPT_NAME_LENGTH]; - char driver_name[MPT_NAME_LENGTH]; + char driver_name[MPT_NAME_LENGTH - 8]; char tmp_string[MPT_STRING_LENGTH]; struct pci_dev *pdev; Mpi2SystemInterfaceRegs_t __iomem *chip; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e7a7a704a315..d3940c5d079d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1870,6 +1870,38 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) return rc; } +static int +_transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + /* Check if the request is split across multiple segments */ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + + return 0; +} + +static void +_transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + /** * _transport_smp_handler - transport portal for smp passthru * @shost: shost object @@ -1880,9 +1912,9 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) * Example: * smp_rep_general /sys/class/bsg/expander-5:0 */ -static int -_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, - struct request *req) +static void +_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) { struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; @@ -1891,33 +1923,25 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, u16 smid; u32 ioc_state; void *psge; - u8 issue_reset = 0; - dma_addr_t dma_addr_in = 0; - dma_addr_t dma_addr_out = 0; - dma_addr_t pci_dma_in = 0; - dma_addr_t pci_dma_out = 0; - void *pci_addr_in = NULL; - void *pci_addr_out = NULL; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; u16 wait_state_count; - struct request *rsp = req->next_rq; - struct bio_vec bvec; - struct bvec_iter iter; - - if (!rsp) { - pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", - ioc->name, __func__); - return -EINVAL; - } + unsigned int reslen = 0; if (ioc->shost_recovery || ioc->pci_error_recovery) { pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", __func__, ioc->name); - return -EFAULT; + rc = -EFAULT; + goto out; } rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); if (rc) - return rc; + goto out; if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, @@ -1927,58 +1951,20 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } ioc->transport_cmds.status = MPT3_CMD_PENDING; - /* Check if the request is split across multiple segments */ - if (bio_multiple_segments(req->bio)) { - u32 offset = 0; - - /* Allocate memory and copy the request */ - pci_addr_out = pci_alloc_consistent(ioc->pdev, - blk_rq_bytes(req), &pci_dma_out); - if (!pci_addr_out) { - pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n", - ioc->name, __func__); - rc = -ENOMEM; - goto out; - } - - bio_for_each_segment(bvec, req->bio, iter) { - memcpy(pci_addr_out + offset, - page_address(bvec.bv_page) + bvec.bv_offset, - bvec.bv_len); - offset += bvec.bv_len; - } - } else { - dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), - blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) { - pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n", - ioc->name, __func__); - rc = -ENOMEM; - goto free_pci; - } + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); } - /* Check if the response needs to be populated across - * multiple segments */ - if (bio_multiple_segments(rsp->bio)) { - pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), - &pci_dma_in); - if (!pci_addr_in) { - pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n", - ioc->name, __func__); - rc = -ENOMEM; - goto unmap; - } - } else { - dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), - blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) { - pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n", - ioc->name, __func__); - rc = -ENOMEM; - goto unmap; - } - } + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; wait_state_count = 0; ioc_state = mpt3sas_base_get_iocstate(ioc, 1); @@ -1988,7 +1974,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, "%s: failed due to ioc not operational\n", ioc->name, __func__); rc = -EFAULT; - goto unmap; + goto unmap_in; } ssleep(1); ioc_state = mpt3sas_base_get_iocstate(ioc, 1); @@ -2005,7 +1991,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; - goto unmap; + goto unmap_in; } rc = 0; @@ -2018,15 +2004,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, mpi_request->SASAddress = (rphy) ? cpu_to_le64(rphy->identify.sas_address) : cpu_to_le64(ioc->sas_hba.sas_address); - mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); psge = &mpi_request->SGL; - if (bio_multiple_segments(req->bio)) - ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), - pci_dma_in, (blk_rq_bytes(rsp) + 4)); - else - ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4), - dma_addr_in, (blk_rq_bytes(rsp) + 4)); + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); dtransportprintk(ioc, pr_info(MPT3SAS_FMT "%s - sending smp request\n", ioc->name, __func__)); @@ -2040,83 +2022,51 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, __func__, ioc->name); _debug_dump_mf(mpi_request, sizeof(Mpi2SmpPassthroughRequest_t)/4); - if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) - issue_reset = 1; - goto issue_host_reset; + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } } dtransportprintk(ioc, pr_info(MPT3SAS_FMT "%s - complete\n", ioc->name, __func__)); - if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { - - mpi_reply = ioc->transport_cmds.reply; - - dtransportprintk(ioc, pr_info(MPT3SAS_FMT - "%s - reply data transfer size(%d)\n", - ioc->name, __func__, - le16_to_cpu(mpi_reply->ResponseDataLength))); - - memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply)); - scsi_req(req)->sense_len = sizeof(*mpi_reply); - scsi_req(req)->resid_len = 0; - scsi_req(rsp)->resid_len -= - le16_to_cpu(mpi_reply->ResponseDataLength); - - /* check if the resp needs to be copied from the allocated - * pci mem */ - if (bio_multiple_segments(rsp->bio)) { - u32 offset = 0; - u32 bytes_to_copy = - le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, iter) { - if (bytes_to_copy <= bvec.bv_len) { - memcpy(page_address(bvec.bv_page) + - bvec.bv_offset, pci_addr_in + - offset, bytes_to_copy); - break; - } else { - memcpy(page_address(bvec.bv_page) + - bvec.bv_offset, pci_addr_in + - offset, bvec.bv_len); - bytes_to_copy -= bvec.bv_len; - } - offset += bvec.bv_len; - } - } - } else { + if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { dtransportprintk(ioc, pr_info(MPT3SAS_FMT "%s - no reply\n", ioc->name, __func__)); rc = -ENXIO; + goto unmap_in; } - issue_host_reset: - if (issue_reset) { - mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - rc = -ETIMEDOUT; + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + + if (addr_in) { + sg_copy_to_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); } - unmap: - if (dma_addr_out) - pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), - PCI_DMA_BIDIRECTIONAL); - if (dma_addr_in) - pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), - PCI_DMA_BIDIRECTIONAL); - - free_pci: - if (pci_addr_out) - pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out, - pci_dma_out); - - if (pci_addr_in) - pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in, - pci_dma_in); - + rc = 0; + unmap_in: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); + unmap_out: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; mutex_unlock(&ioc->transport_cmds.mutex); - return rc; + bsg_job_done(job, rc, reslen); } struct sas_function_template mpt3sas_transport_functions = { diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index e7f6661a8862..4f515700bdc3 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c @@ -121,21 +121,6 @@ int mvme147_detect(struct scsi_host_template *tpnt) return 0; } -static int mvme147_bus_reset(struct scsi_cmnd *cmd) -{ - /* FIXME perform bus-specific reset */ - - /* FIXME 2: kill this function, and let midlayer fallback to - the same result, calling wd33c93_host_reset() */ - - spin_lock_irq(cmd->device->host->host_lock); - wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); - - return SUCCESS; -} - - static struct scsi_host_template driver_template = { .proc_name = "MVME147", .name = "MVME147 built-in SCSI", @@ -143,7 +128,6 @@ static struct scsi_host_template driver_template = { .release = mvme147_release, .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, - .eh_bus_reset_handler = mvme147_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 4e047b5001a6..718c88de328b 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -61,7 +61,7 @@ static struct scsi_host_template mvs_sht = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = mvst_host_attrs, @@ -125,8 +125,7 @@ static void mvs_free(struct mvs_info *mvi) else slot_nr = MVS_CHIP_SLOT_SZ; - if (mvi->dma_pool) - pci_pool_destroy(mvi->dma_pool); + dma_pool_destroy(mvi->dma_pool); if (mvi->tx) dma_free_coherent(mvi->dev, @@ -296,7 +295,8 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) goto err_out; sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); - mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); + mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev, + MVS_SLOT_BUF_SZ, 16, 0); if (!mvi->dma_pool) { printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; @@ -557,14 +557,14 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) SHOST_TO_SAS_HA(shost) = kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); if (!SHOST_TO_SAS_HA(shost)) { - kfree(shost); + scsi_host_put(shost); rc = -ENOMEM; goto err_out_regions; } rc = mvs_prep_sas_ha_init(shost, chip); if (rc) { - kfree(shost); + scsi_host_put(shost); rc = -ENOMEM; goto err_out_regions; } diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index c7cc8035eacb..ee81d10252e0 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -790,7 +790,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf slot->n_elem = n_elem; slot->slot_tag = tag; - slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); + slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); if (!slot->buf) { rc = -ENOMEM; goto err_out_tag; @@ -840,7 +840,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf return rc; err_out_slot_buf: - pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); err_out_tag: mvs_tag_free(mvi, tag); err_out: @@ -918,7 +918,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, } if (slot->buf) { - pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); slot->buf = NULL; } list_del_init(&slot->entry); diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 53c84771f0e8..107e191bf023 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -201,7 +201,6 @@ static int nsp32_release (struct Scsi_Host *); /* SCSI error handler */ static int nsp32_eh_abort (struct scsi_cmnd *); -static int nsp32_eh_bus_reset (struct scsi_cmnd *); static int nsp32_eh_host_reset(struct scsi_cmnd *); /* generate SCSI message */ @@ -276,8 +275,7 @@ static struct scsi_host_template nsp32_template = { .max_sectors = 128, .this_id = NSP32_HOST_SCSIID, .use_clustering = DISABLE_CLUSTERING, - .eh_abort_handler = nsp32_eh_abort, - .eh_bus_reset_handler = nsp32_eh_bus_reset, + .eh_abort_handler = nsp32_eh_abort, .eh_host_reset_handler = nsp32_eh_host_reset, /* .highmem_io = 1, */ }; @@ -2845,24 +2843,6 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt) return SUCCESS; } -static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt) -{ - nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; - unsigned int base = SCpnt->device->host->io_port; - - spin_lock_irq(SCpnt->device->host->host_lock); - - nsp32_msg(KERN_INFO, "Bus Reset"); - nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); - - nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); - nsp32_do_bus_reset(data); - nsp32_write2(base, IRQ_CONTROL, 0); - - spin_unlock_irq(SCpnt->device->host->host_lock); - return SUCCESS; /* SCSI bus reset is succeeded at any time. */ -} - static void nsp32_do_bus_reset(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 929ee7e88120..20ec1c01dbd5 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -619,7 +619,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q os_aux_t * aux = STp->buffer->aux; os_partition_t * par = &(aux->partition); struct st_partstat * STps = &(STp->ps[STp->partition]); - int blk_cnt, blk_sz, i; + unsigned int blk_cnt, blk_sz, i; if (STp->raw) { if (STp->buffer->syscall_result) { @@ -5434,7 +5434,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i for (i=0, offset=st_bp->buffer_bytes; i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++) - offset -= st_bp->sg[i].length; + offset -= st_bp->sg[i].length; if (i == st_bp->sg_segs) { /* Should never happen */ printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n"); return (-EIO); @@ -5667,12 +5667,12 @@ static struct osst_support_data support_list[] = { * sysfs support for osst driver parameter information */ -static ssize_t osst_version_show(struct device_driver *ddd, char *buf) +static ssize_t version_show(struct device_driver *ddd, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", osst_version); } -static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL); +static DRIVER_ATTR_RO(version); static int osst_create_sysfs_files(struct device_driver *sysfs) { diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c index 714b248f5d5e..953a792150ae 100644 --- a/drivers/scsi/pcmcia/fdomain_stub.c +++ b/drivers/scsi/pcmcia/fdomain_stub.c @@ -173,7 +173,7 @@ static void fdomain_release(struct pcmcia_device *link) static int fdomain_resume(struct pcmcia_device *link) { - fdomain_16x0_bus_reset(NULL); + fdomain_16x0_host_reset(NULL); return 0; } diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c index c670dc704c74..0556054764dc 100644 --- a/drivers/scsi/pcmcia/qlogic_stub.c +++ b/drivers/scsi/pcmcia/qlogic_stub.c @@ -67,7 +67,7 @@ static struct scsi_host_template qlogicfas_driver_template = { .info = qlogicfas408_info, .queuecommand = qlogicfas408_queuecommand, .eh_abort_handler = qlogicfas408_abort, - .eh_bus_reset_handler = qlogicfas408_bus_reset, + .eh_host_reset_handler = qlogicfas408_host_reset, .bios_param = qlogicfas408_biosparam, .can_queue = 1, .this_id = -1, @@ -264,7 +264,7 @@ static int qlogic_resume(struct pcmcia_device *link) outb(0x04, link->resource[0]->start + 0xd); } /* Ugggglllyyyy!!! */ - qlogicfas408_bus_reset(NULL); + qlogicfas408_host_reset(NULL); return 0; } diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 034b2f7d1135..0e013f76b582 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -86,7 +86,7 @@ static struct scsi_host_template pm8001_sht = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = pm8001_host_attrs, @@ -160,8 +160,6 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } } PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); - if (pm8001_ha->shost) - scsi_host_put(pm8001_ha->shost); flush_workqueue(pm8001_wq); kfree(pm8001_ha->tags); kfree(pm8001_ha); @@ -1073,7 +1071,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev, err_out_free: kfree(SHOST_TO_SAS_HA(shost)); err_out_free_host: - kfree(shost); + scsi_host_put(shost); err_out_regions: pci_release_regions(pdev); err_out_disable: @@ -1112,6 +1110,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) tasklet_kill(&pm8001_ha->tasklet[j]); #endif + scsi_host_put(pm8001_ha->shost); pm8001_free(pm8001_ha); kfree(sha->sas_phy); kfree(sha->sas_port); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 1cc814f1505a..b4d6cd8cd1ad 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1595,12 +1595,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) if (pinstance->ccn.hcam->notification_type == NOTIFICATION_TYPE_ENTRY_CHANGED && cfg_entry->resource_type == RES_TYPE_VSET) { - - if (fw_version <= PMCRAID_FW_VERSION_1) - hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0; - else - hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0; - + hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0; } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) { goto out_notify_apps; } @@ -4655,13 +4650,13 @@ pmcraid_release_control_blocks( return; for (i = 0; i < max_index; i++) { - pci_pool_free(pinstance->control_pool, + dma_pool_free(pinstance->control_pool, pinstance->cmd_list[i]->ioa_cb, pinstance->cmd_list[i]->ioa_cb_bus_addr); pinstance->cmd_list[i]->ioa_cb = NULL; pinstance->cmd_list[i]->ioa_cb_bus_addr = 0; } - pci_pool_destroy(pinstance->control_pool); + dma_pool_destroy(pinstance->control_pool); pinstance->control_pool = NULL; } @@ -4718,8 +4713,8 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance) pinstance->host->unique_id); pinstance->control_pool = - pci_pool_create(pinstance->ctl_pool_name, - pinstance->pdev, + dma_pool_create(pinstance->ctl_pool_name, + &pinstance->pdev->dev, sizeof(struct pmcraid_control_block), PMCRAID_IOARCB_ALIGNMENT, 0); @@ -4728,7 +4723,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance) for (i = 0; i < PMCRAID_MAX_CMD; i++) { pinstance->cmd_list[i]->ioa_cb = - pci_pool_alloc( + dma_pool_alloc( pinstance->control_pool, GFP_KERNEL, &(pinstance->cmd_list[i]->ioa_cb_bus_addr)); diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 01eb2bc16dc1..8bfac72a242b 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -755,7 +755,7 @@ struct pmcraid_instance { /* structures related to command blocks */ struct kmem_cache *cmd_cachep; /* cache for cmd blocks */ - struct pci_pool *control_pool; /* pool for control blocks */ + struct dma_pool *control_pool; /* pool for control blocks */ char cmd_pool_name[64]; /* name of cmd cache */ char ctl_pool_name[64]; /* name of control cache */ diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index f6ad579280d4..7be5823ab036 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -970,7 +970,6 @@ static struct scsi_host_template ppa_template = { .name = "Iomega VPI0 (ppa) interface", .queuecommand = ppa_queuecommand, .eh_abort_handler = ppa_abort, - .eh_bus_reset_handler = ppa_reset, .eh_host_reset_handler = ppa_reset, .bios_param = ppa_biosparam, .this_id = -1, diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 351f06dfc5a0..9bf7b227e69a 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -300,7 +300,6 @@ struct qedf_ctx { #define QEDF_FALLBACK_VLAN 1002 #define QEDF_DEFAULT_PRIO 3 int vlan_id; - uint vlan_hw_insert:1; struct qed_dev *cdev; struct qed_dev_fcoe_info dev_info; struct qed_int_info int_info; @@ -443,7 +442,6 @@ extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr); extern int qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); -extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr); extern u8 *qedf_get_src_mac(struct fc_lport *lport); extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb); extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf); diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index aefd24ca9604..773558fc0697 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -108,7 +108,6 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; struct fip_header *fiph; u16 op, vlan_tci = 0; u8 sub; @@ -124,16 +123,14 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) op = ntohs(fiph->fip_op); sub = fiph->fip_subcode; - if (!qedf->vlan_hw_insert) { - vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); - memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; - vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id); - } + /* + * Add VLAN tag to non-offload FIP frame based on current stored VLAN + * for FIP/FCoE traffic. + */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); - /* Update eth_hdr since we added a VLAN tag */ - eth_hdr = (struct ethhdr *)skb_mac_header(skb); + /* Get VLAN ID from skb for printing purposes */ + __vlan_hwaccel_get_tag(skb, &vlan_tci); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, @@ -174,7 +171,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) /* Handle FIP VLAN resp in the driver */ if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { qedf_fcoe_process_vlan_resp(qedf, skb); - qedf->vlan_hw_insert = 0; kfree_skb(skb); } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual " @@ -242,26 +238,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) } } -void qedf_update_src_mac(struct fc_lport *lport, u8 *addr) -{ - struct qedf_ctx *qedf = lport_priv(lport); - - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, - "Setting data_src_addr=%pM.\n", addr); - ether_addr_copy(qedf->data_src_addr, addr); -} - u8 *qedf_get_src_mac(struct fc_lport *lport) { - u8 mac[ETH_ALEN]; - u8 port_id[3]; struct qedf_ctx *qedf = lport_priv(lport); - /* We need to use the lport port_id to create the data_src_addr */ - if (is_zero_ether_addr(qedf->data_src_addr)) { - hton24(port_id, lport->port_id); - fc_fcoe_set_mac(mac, port_id); - qedf->ctlr.update_mac(lport, mac); - } return qedf->data_src_addr; } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 1d13c9ca517d..7c0064500cc5 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " uint qedf_debug = QEDF_LOG_INFO; module_param_named(debug, qedf_debug, uint, S_IRUGO); -MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging" +MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" " mask"); static uint qedf_fipvlan_retries = 30; @@ -163,7 +164,7 @@ static void qedf_handle_link_update(struct work_struct *work) QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " "response, falling back to default VLAN %d.\n", qedf_fallback_vlan); - qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN); + qedf_set_vlan_id(qedf, qedf_fallback_vlan); /* * Zero out data_src_addr so we'll update it with the new @@ -187,6 +188,50 @@ static void qedf_handle_link_update(struct work_struct *work) } } +#define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1 +#define QEDF_FCOE_MAC_METHOD_FCF_MAP 2 +#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3 +static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp) +{ + u8 *granted_mac; + struct fc_frame_header *fh = fc_frame_header_get(fp); + u8 fc_map[3]; + int method = 0; + + /* Get granted MAC address from FIP FLOGI payload */ + granted_mac = fr_cb(fp)->granted_mac; + + /* + * We set the source MAC for FCoE traffic based on the Granted MAC + * address from the switch. + * + * If granted_mac is non-zero, we used that. + * If the granted_mac is zeroed out, created the FCoE MAC based on + * the sel_fcf->fc_map and the d_id fo the FLOGI frame. + * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the + * d_id of the FLOGI frame. + */ + if (!is_zero_ether_addr(granted_mac)) { + ether_addr_copy(qedf->data_src_addr, granted_mac); + method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC; + } else if (qedf->ctlr.sel_fcf->fc_map != 0) { + hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); + qedf->data_src_addr[0] = fc_map[0]; + qedf->data_src_addr[1] = fc_map[1]; + qedf->data_src_addr[2] = fc_map[2]; + qedf->data_src_addr[3] = fh->fh_d_id[0]; + qedf->data_src_addr[4] = fh->fh_d_id[1]; + qedf->data_src_addr[5] = fh->fh_d_id[2]; + method = QEDF_FCOE_MAC_METHOD_FCF_MAP; + } else { + fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); + method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); +} + static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -212,6 +257,10 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, /* Log stats for FLOGI reject */ if (fc_frame_payload_op(fp) == ELS_LS_RJT) qedf->flogi_failed++; + else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { + /* Set the source MAC we will use for FCoE traffic */ + qedf_set_data_src_addr(qedf, fp); + } /* Complete flogi_compl so we can proceed to sending ADISCs */ complete(&qedf->flogi_compl); @@ -312,8 +361,9 @@ static void qedf_link_recovery(struct work_struct *work) /* Since the link when down and up to verify which vlan we're on */ qedf->fipvlan_retries = qedf_fipvlan_retries; rc = qedf_initiate_fipvlan_req(qedf); + /* If getting the VLAN fails, set the VLAN to the fallback one */ if (!rc) - return; + qedf_set_vlan_id(qedf, qedf_fallback_vlan); /* * We need to wait for an FCF to be selected due to the @@ -629,16 +679,6 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); } -static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd) -{ - QEDF_ERR(NULL, "BUS RESET Issued...\n"); - /* - * Essentially a no-op but return SUCCESS to prevent - * unnecessary escalation to the host reset handler. - */ - return SUCCESS; -} - void qedf_wait_for_upload(struct qedf_ctx *qedf) { while (1) { @@ -716,7 +756,6 @@ static struct scsi_host_template qedf_host_template = { .eh_abort_handler = qedf_eh_abort, .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ - .eh_bus_reset_handler = qedf_eh_bus_reset, .eh_host_reset_handler = qedf_eh_host_reset, .slave_configure = qedf_slave_configure, .dma_boundary = QED_HW_DMA_BOUNDARY, @@ -915,6 +954,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); + /* + * Add VLAN tag to non-offload FCoE frame based on current stored VLAN + * for FIP/FCoE traffic. + */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); /* fill up mac and fcoe headers */ @@ -927,7 +970,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); /* Set the source MAC address */ - fc_fcoe_set_mac(eh->h_source, fh->fh_s_id); + ether_addr_copy(eh->h_source, qedf->data_src_addr); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); @@ -1025,7 +1068,6 @@ static int qedf_offload_connection(struct qedf_ctx *qedf, { struct qed_fcoe_params_offload conn_info; u32 port_id; - u8 lport_src_id[3]; int rval; uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); @@ -1054,11 +1096,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf, (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); /* Need to use our FCoE MAC for the offload session */ - port_id = fc_host_port_id(qedf->lport->host); - lport_src_id[2] = (port_id & 0x000000FF); - lport_src_id[1] = (port_id & 0x0000FF00) >> 8; - lport_src_id[0] = (port_id & 0x00FF0000) >> 16; - fc_fcoe_set_mac(conn_info.src_mac, lport_src_id); + ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); @@ -1347,7 +1385,6 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO); qedf->ctlr.send = qedf_fip_send; - qedf->ctlr.update_mac = qedf_update_src_mac; qedf->ctlr.get_src_addr = qedf_get_src_mac; ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); } @@ -2939,7 +2976,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) sprintf(host_buf, "qedf_%u_link", qedf->lport->host->host_no); - qedf->link_update_wq = create_singlethread_workqueue(host_buf); + qedf->link_update_wq = create_workqueue(host_buf); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); @@ -3056,9 +3093,24 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", qedf->mac); - /* Set the WWNN and WWPN based on the MAC address */ - qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); - qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); + /* + * Set the WWNN and WWPN in the following way: + * + * If the info we get from qed is non-zero then use that to set the + * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based + * on the MAC address. + */ + if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting WWPN and WWNN from qed dev_info.\n"); + qedf->wwnn = qedf->dev_info.wwnn; + qedf->wwpn = qedf->dev_info.wwpn; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n"); + qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); + qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); + } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); @@ -3094,7 +3146,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) /* Start LL2 processing thread */ snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); qedf->ll2_recv_wq = - create_singlethread_workqueue(host_buf); + create_workqueue(host_buf); if (!qedf->ll2_recv_wq) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); goto err7; @@ -3114,8 +3166,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } set_bit(QEDF_LL2_STARTED, &qedf->flags); - /* hw will be insterting vlan tag*/ - qedf->vlan_hw_insert = 1; + /* Set initial FIP/FCoE VLAN to NULL */ qedf->vlan_id = 0; /* @@ -3137,7 +3188,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); qedf->timer_work_queue = - create_singlethread_workqueue(host_buf); + create_workqueue(host_buf); if (!qedf->timer_work_queue) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " "workqueue.\n"); @@ -3148,7 +3199,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) if (mode != QEDF_MODE_RECOVERY) { sprintf(host_buf, "qedf_%u_dpc", qedf->lport->host->host_no); - qedf->dpc_wq = create_singlethread_workqueue(host_buf); + qedf->dpc_wq = create_workqueue(host_buf); } /* diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h index 6fa442061c32..397b3b8ee51a 100644 --- a/drivers/scsi/qedf/qedf_version.h +++ b/drivers/scsi/qedf/qedf_version.h @@ -7,9 +7,9 @@ * this source tree. */ -#define QEDF_VERSION "8.18.22.0" +#define QEDF_VERSION "8.20.5.0" #define QEDF_DRIVER_MAJOR_VER 8 -#define QEDF_DRIVER_MINOR_VER 18 -#define QEDF_DRIVER_REV_VER 22 +#define QEDF_DRIVER_MINOR_VER 20 +#define QEDF_DRIVER_REV_VER 5 #define QEDF_DRIVER_ENG_VER 0 diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index 91d2f51c351b..b8b22ce60ecc 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h @@ -54,8 +54,8 @@ struct qedi_endpoint; /* MAX Length for cached SGL */ #define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) -#define MAX_NUM_MSIX_PF 8 -#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus()) +#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ + num_online_cpus()) #define QEDI_LOCAL_PORT_MIN 60000 #define QEDI_LOCAL_PORT_MAX 61024 @@ -301,7 +301,6 @@ struct qedi_ctx { u16 bdq_prod_idx; u16 rq_num_entries; - u32 msix_count; u32 max_sqes; u8 num_queues; u32 max_active_conns; diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 37da9a8b43b1..a02b34ea5cab 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -534,7 +534,7 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1); - conn_info->default_cq = (qedi_ep->fw_cid % 8); + conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues); conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; conn_info->dup_ack_theshold = 3; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 2c3783684815..cccc34adc0e0 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -794,13 +794,14 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) u32 log_page_size; int rval = 0; - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n", - MIN_NUM_CPUS_MSIX(qedi)); num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE; qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Number of CQ count is %d\n", qedi->num_queues); + memset(&qedi->pf_params.iscsi_pf_params, 0, sizeof(qedi->pf_params.iscsi_pf_params)); @@ -1575,7 +1576,7 @@ struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) { struct qedi_cmd *cmd = NULL; - if (tid > MAX_ISCSI_TASK_ENTRIES) + if (tid >= MAX_ISCSI_TASK_ENTRIES) return NULL; cmd = qedi->itt_map[tid].p_cmd; @@ -2179,9 +2180,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) goto free_host; } - qedi->msix_count = MAX_NUM_MSIX_PF; atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); + if (rc) + goto free_host; + if (mode != QEDI_MODE_RECOVERY) { rc = qedi_set_iscsi_pf_param(qedi); if (rc) { diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 08a1feb3a195..9ce28c4f9812 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, return -EINVAL; if (start > ha->optrom_size) return -EINVAL; + if (size > ha->optrom_size - start) + size = ha->optrom_size - start; mutex_lock(&ha->optrom_mutex); switch (val) { @@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, } ha->optrom_region_start = start; - ha->optrom_region_size = start + size > ha->optrom_size ? - ha->optrom_size - start : size; + ha->optrom_region_size = start + size; ha->optrom_state = QLA_SREADING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); @@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, } ha->optrom_region_start = start; - ha->optrom_region_size = start + size > ha->optrom_size ? - ha->optrom_size - start : size; + ha->optrom_region_size = start + size; ha->optrom_state = QLA_SWRITING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); @@ -565,47 +565,17 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - uint16_t iter, addr, offset; int rval; - if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) + if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) return 0; - if (ha->sfp_data) - goto do_read; - - ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, - &ha->sfp_data_dma); - if (!ha->sfp_data) { - ql_log(ql_log_warn, vha, 0x706c, - "Unable to allocate memory for SFP read-data.\n"); + if (qla2x00_reset_active(vha)) return 0; - } -do_read: - memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); - addr = 0xa0; - for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; - iter++, offset += SFP_BLOCK_SIZE) { - if (iter == 4) { - /* Skip to next device address. */ - addr = 0xa2; - offset = 0; - } - - rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, - addr, offset, SFP_BLOCK_SIZE, BIT_1); - if (rval != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x706d, - "Unable to read SFP data (%x/%x/%x).\n", rval, - addr, offset); - - return -EIO; - } - memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); - buf += SFP_BLOCK_SIZE; - } + rval = qla2x00_read_sfp_dev(vha, buf, count); + if (rval) + return -EIO; return count; } @@ -615,7 +585,7 @@ static struct bin_attribute sysfs_sfp_attr = { .name = "sfp", .mode = S_IRUSR | S_IWUSR, }, - .size = SFP_DEV_SIZE * 2, + .size = SFP_DEV_SIZE, .read = qla2x00_sysfs_read_sfp, }; @@ -1511,6 +1481,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); } +static ssize_t +qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + ha->min_link_speed == 5 ? "32Gps" : + ha->min_link_speed == 4 ? "16Gps" : + ha->min_link_speed == 3 ? "8Gps" : + ha->min_link_speed == 2 ? "4Gps" : + ha->min_link_speed != 0 ? "unknown" : ""); +} + +static ssize_t +qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + ha->max_speed_sup ? "32Gps" : "16Gps"); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1556,6 +1558,8 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, qla2x00_allow_cna_fw_dump_show, qla2x00_allow_cna_fw_dump_store); static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); +static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL); +static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1590,6 +1594,8 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_fw_dump_size, &dev_attr_allow_cna_fw_dump, &dev_attr_pep_version, + &dev_attr_min_link_speed, + &dev_attr_max_speed_sup, NULL, }; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 26751d34bcf2..3e9dc54b89a3 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -14,7 +14,7 @@ * | Module Init and Probe | 0x0193 | 0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e | - * | Mailbox commands | 0x1199 | 0x1193 | + * | Mailbox commands | 0x1205 | 0x11a2-0x11ff | * | Device Discovery | 0x2134 | 0x210e-0x2116 | * | | | 0x211a | * | | | 0x211c-0x2128 | @@ -41,7 +41,7 @@ * | | | 0x70ad-0x70ae | * | | | 0x70d0-0x70d6 | * | | | 0x70d7-0x70db | - * | Task Management | 0x8042 | 0x8000,0x800b | + * | Task Management | 0x8042 | 0x8000 | * | | | 0x8019 | * | | | 0x8025,0x8026 | * | | | 0x8031,0x8032 | @@ -2520,8 +2520,6 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) static inline int ql_mask_match(uint32_t level) { - if (ql2xextended_error_logging == 1) - ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; return (level & ql2xextended_error_logging) == level; } @@ -2738,9 +2736,9 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) mbx_reg = MAILBOX_REG(ha, reg, 0); ql_dbg(level, vha, id, "Mailbox registers:\n"); - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++, mbx_reg++) ql_dbg(level, vha, id, - "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); + "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg)); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 0730b10b4280..486c075998f6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -427,6 +427,7 @@ struct srb_iocb { enum nvmefc_fcp_datadir dir; uint32_t dl; uint32_t timeout_sec; + struct list_head entry; } nvme; } u; @@ -470,7 +471,7 @@ typedef struct srb { uint8_t cmd_type; uint8_t pad[3]; atomic_t ref_count; - wait_queue_head_t nvme_ls_waitQ; + wait_queue_head_t nvme_ls_waitq; struct fc_port *fcport; struct scsi_qla_host *vha; uint32_t handle; @@ -901,6 +902,7 @@ struct mbx_cmd_32 { #define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ #define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */ #define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */ +#define MBA_TRANS_INSERT 0x8130 /* Transceiver Insertion */ #define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ #define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change Notification */ @@ -977,6 +979,7 @@ struct mbx_cmd_32 { #define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */ #define MBC_RESET 0x18 /* Reset. */ #define MBC_GET_ADAPTER_LOOP_ID 0x20 /* Get loop id of ISP2200. */ +#define MBC_GET_SET_ZIO_THRESHOLD 0x21 /* Get/SET ZIO THRESHOLD. */ #define MBC_GET_RETRY_COUNT 0x22 /* Get f/w retry cnt/delay. */ #define MBC_DISABLE_VI 0x24 /* Disable VI operation. */ #define MBC_ENABLE_VI 0x25 /* Enable VI operation. */ @@ -2301,8 +2304,7 @@ typedef struct fc_port { unsigned int login_succ:1; struct work_struct nvme_del_work; - atomic_t nvme_ref_count; - wait_queue_head_t nvme_waitQ; + struct completion nvme_del_done; uint32_t nvme_prli_service_param; #define NVME_PRLI_SP_CONF BIT_7 #define NVME_PRLI_SP_INITIATOR BIT_5 @@ -3338,6 +3340,7 @@ struct qla_qpair { struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ struct list_head hints_list; + struct list_head nvme_done_list; uint16_t cpuid; struct qla_tgt_counters tgt_counters; }; @@ -3463,8 +3466,15 @@ struct qla_hw_data { uint32_t n2n_ae:1; uint32_t fw_started:1; uint32_t fw_init_done:1; + + uint32_t detected_lr_sfp:1; + uint32_t using_lr_setting:1; } flags; + uint16_t long_range_distance; /* 32G & above */ +#define LR_DISTANCE_5K 1 +#define LR_DISTANCE_10K 0 + /* This spinlock is used to protect "io transactions", you must * acquire it before doing any IO to the card, eg with RD_REG*() and * WRT_REG*() for the duration of your entire commandtransaction. @@ -3712,7 +3722,7 @@ struct qla_hw_data { struct sns_cmd_pkt *sns_cmd; dma_addr_t sns_cmd_dma; -#define SFP_DEV_SIZE 256 +#define SFP_DEV_SIZE 512 #define SFP_BLOCK_SIZE 64 void *sfp_data; dma_addr_t sfp_data_dma; @@ -4017,8 +4027,20 @@ struct qla_hw_data { struct qlt_hw_data tgt; int allow_cna_fw_dump; + uint32_t fw_ability_mask; + uint16_t min_link_speed; + uint16_t max_speed_sup; + + atomic_t nvme_active_aen_cnt; + uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ }; +#define FW_ABILITY_MAX_SPEED_MASK 0xFUL +#define FW_ABILITY_MAX_SPEED_16G 0x0 +#define FW_ABILITY_MAX_SPEED_32G 0x1 +#define FW_ABILITY_MAX_SPEED(ha) \ + (ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK) + /* * Qlogic scsi host structure */ @@ -4089,6 +4111,8 @@ typedef struct scsi_qla_host { #define FX00_CRITEMP_RECOVERY 25 #define FX00_HOST_INFO_RESEND 26 #define QPAIR_ONLINE_CHECK_NEEDED 27 +#define SET_ZIO_THRESHOLD_NEEDED 28 +#define DETECT_SFP_CHANGE 29 unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ @@ -4129,8 +4153,7 @@ typedef struct scsi_qla_host { uint8_t fabric_node_name[WWN_SIZE]; struct nvme_fc_local_port *nvme_local_port; - atomic_t nvme_ref_count; - wait_queue_head_t nvme_waitQ; + struct completion nvme_del_done; struct list_head nvme_rport_list; atomic_t nvme_active_aen_cnt; uint16_t nvme_last_rptd_aen; @@ -4199,6 +4222,7 @@ typedef struct scsi_qla_host { int fcport_count; wait_queue_head_t fcport_waitQ; wait_queue_head_t vref_waitq; + uint8_t min_link_speed_feat; } scsi_qla_host_t; struct qla27xx_image_status { @@ -4373,6 +4397,88 @@ enum nexus_wait_type { WAIT_LUN, }; +/* Refer to SNIA SFF 8247 */ +struct sff_8247_a0 { + u8 txid; /* transceiver id */ + u8 ext_txid; + u8 connector; + /* compliance code */ + u8 eth_infi_cc3; /* ethernet, inifiband */ + u8 sonet_cc4[2]; + u8 eth_cc6; + /* link length */ +#define FC_LL_VL BIT_7 /* very long */ +#define FC_LL_S BIT_6 /* Short */ +#define FC_LL_I BIT_5 /* Intermidiate*/ +#define FC_LL_L BIT_4 /* Long */ +#define FC_LL_M BIT_3 /* Medium */ +#define FC_LL_SA BIT_2 /* ShortWave laser */ +#define FC_LL_LC BIT_1 /* LongWave laser */ +#define FC_LL_EL BIT_0 /* Electrical inter enclosure */ + u8 fc_ll_cc7; + /* FC technology */ +#define FC_TEC_EL BIT_7 /* Electrical inter enclosure */ +#define FC_TEC_SN BIT_6 /* short wave w/o OFC */ +#define FC_TEC_SL BIT_5 /* short wave with OFC */ +#define FC_TEC_LL BIT_4 /* Longwave Laser */ +#define FC_TEC_ACT BIT_3 /* Active cable */ +#define FC_TEC_PAS BIT_2 /* Passive cable */ + u8 fc_tec_cc8; + /* Transmission Media */ +#define FC_MED_TW BIT_7 /* Twin Ax */ +#define FC_MED_TP BIT_6 /* Twited Pair */ +#define FC_MED_MI BIT_5 /* Min Coax */ +#define FC_MED_TV BIT_4 /* Video Coax */ +#define FC_MED_M6 BIT_3 /* Multimode, 62.5um */ +#define FC_MED_M5 BIT_2 /* Multimode, 50um */ +#define FC_MED_SM BIT_0 /* Single Mode */ + u8 fc_med_cc9; + /* speed FC_SP_12: 12*100M = 1200 MB/s */ +#define FC_SP_12 BIT_7 +#define FC_SP_8 BIT_6 +#define FC_SP_16 BIT_5 +#define FC_SP_4 BIT_4 +#define FC_SP_32 BIT_3 +#define FC_SP_2 BIT_2 +#define FC_SP_1 BIT_0 + u8 fc_sp_cc10; + u8 encode; + u8 bitrate; + u8 rate_id; + u8 length_km; /* offset 14/eh */ + u8 length_100m; + u8 length_50um_10m; + u8 length_62um_10m; + u8 length_om4_10m; + u8 length_om3_10m; +#define SFF_VEN_NAME_LEN 16 + u8 vendor_name[SFF_VEN_NAME_LEN]; /* offset 20/14h */ + u8 tx_compat; + u8 vendor_oui[3]; +#define SFF_PART_NAME_LEN 16 + u8 vendor_pn[SFF_PART_NAME_LEN]; /* part number */ + u8 vendor_rev[4]; + u8 wavelength[2]; + u8 resv; + u8 cc_base; + u8 options[2]; /* offset 64 */ + u8 br_max; + u8 br_min; + u8 vendor_sn[16]; + u8 date_code[8]; + u8 diag; + u8 enh_options; + u8 sff_revision; + u8 cc_ext; + u8 vendor_specific[32]; + u8 resv2[128]; +}; + +#define AUTO_DETECT_SFP_SUPPORT(_vha)\ + (ql2xautodetectsfp && !_vha->vp_idx && \ + (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\ + IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw))) + #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ (IS_QLA27XX(_ha) || IS_QLA83XX(_ha))) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index b9c9886e8b1d..bec641aae7b3 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1699,6 +1699,15 @@ struct access_chip_rsp_84xx { #define FAC_OPT_CMD_UNLOCK_SEMAPHORE 0x04 #define FAC_OPT_CMD_GET_SECTOR_SIZE 0x05 +/* enhanced features bit definitions */ +#define NEF_LR_DIST_ENABLE BIT_0 + +/* LR Distance bit positions */ +#define LR_DIST_NV_POS 2 +#define LR_DIST_FW_POS 12 +#define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS) +#define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000) + struct nvram_81xx { /* NVRAM header. */ uint8_t id[4]; @@ -1745,7 +1754,9 @@ struct nvram_81xx { uint16_t reserved_6_3[14]; /* Offset 192. */ - uint16_t reserved_7[32]; + uint8_t min_link_speed; + uint8_t reserved_7_0; + uint16_t reserved_7[31]; /* * BIT 0 = Enable spinup delay @@ -1839,16 +1850,13 @@ struct nvram_81xx { uint8_t reserved_21[16]; uint16_t reserved_22[3]; - /* - * BIT 0 = Extended BB credits for LR - * BIT 1 = Virtual Fabric Enable - * BIT 2 = Enhanced Features Unused - * BIT 3-7 = Enhanced Features Reserved + /* Offset 406 (0x196) Enhanced Features + * BIT 0 = Extended BB credits for LR + * BIT 1 = Virtual Fabric Enable + * BIT 2-5 = Distance Support if BIT 0 is on + * BIT 6-15 = Unused */ - /* Enhanced Features */ - uint8_t enhanced_features; - - uint8_t reserved_23; + uint16_t enhanced_features; uint16_t reserved_24[4]; /* Offset 416. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index cadb6e3baacc..f852ca60c49f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -9,17 +9,6 @@ #include -/* - * Global functions prototype in qla_nvme.c source file. - */ -extern void qla_nvme_register_hba(scsi_qla_host_t *); -extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *); -extern void qla_nvme_delete(scsi_qla_host_t *); -extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp); -extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *, - struct req_que *); -extern void qla24xx_async_gffid_sp_done(void *, int); - /* * Global Function Prototypes in qla_init.c source file. */ @@ -116,13 +105,15 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, void *); int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); - +int qla24xx_detect_sfp(scsi_qla_host_t *vha); +int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); /* * Global Data in qla_os.c source file. */ extern char qla2x00_version_str[]; extern struct kmem_cache *srb_cachep; +extern struct kmem_cache *qla_tgt_plogi_cachep; extern int ql2xlogintimeout; extern int qlport_down_retry; @@ -153,6 +144,7 @@ extern int ql2xfwholdabts; extern int ql2xmvasynctoatio; extern int ql2xuctrlirq; extern int ql2xnvmeenable; +extern int ql2xautodetectsfp; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -495,6 +487,9 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, struct port_database_24xx *); +extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *); +extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t); + /* * Global Function Prototypes in qla_isr.c source file. */ @@ -804,6 +799,7 @@ extern char *qdev_state(uint32_t); extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); extern int qla82xx_read_temperature(scsi_qla_host_t *); extern int qla8044_read_temperature(scsi_qla_host_t *); +extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int); /* BSG related functions */ extern int qla24xx_bsg_request(struct bsg_job *); @@ -873,4 +869,6 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t); void qlt_remove_target_resources(struct qla_hw_data *); void qlt_clr_qp_table(struct scsi_qla_host *vha); +void qla_nvme_cmpl_io(struct srb_iocb *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index b323a7c71eda..bc3db6abc9a0 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2816,13 +2816,19 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) case MODE_INITIATOR: case MODE_DUAL: default: + ql_dbg(ql_dbg_disc, vha, 0x201f, + "%s %d %8phC post %s\n", __func__, + __LINE__, fcport->port_name, + (atomic_read(&fcport->state) == + FCS_ONLINE) ? "gpdb" : "gnl"); + if (atomic_read(&fcport->state) == FCS_ONLINE) - break; - ql_dbg(ql_dbg_disc, vha, 0x201f, - "%s %d %8phC post gnl\n", - __func__, __LINE__, fcport->port_name); - qla24xx_post_gnl_work(vha, fcport); + qla24xx_post_gpdb_work(vha, + fcport, PDO_FORCE_ADISC); + else + qla24xx_post_gnl_work(vha, + fcport); break; } } else { /* fcport->d_id.b24 != ea->id.b24 */ @@ -3080,7 +3086,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) GPSC_RSP_SIZE); /* GPSC req */ - memcpy(ct_req->req.gpsc.port_name, fcport->port_name, + memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, WWN_SIZE); sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 072ad1aa5505..b5b48ddca962 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -36,7 +36,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); -static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, struct event_arg *); @@ -774,8 +773,7 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; } -static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, - u8 opt) +int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { struct qla_work_evt *e; @@ -808,6 +806,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) if (!sp) goto done; + sp->type = SRB_MB_IOCB; + sp->name = "gpdb"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0xd043, @@ -816,12 +820,6 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); - sp->type = SRB_MB_IOCB; - sp->name = "gpdb"; - sp->gen1 = fcport->rscn_gen; - sp->gen2 = fcport->login_gen; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_GET_PORT_DATABASE; mb[1] = fcport->loop_id; @@ -1466,6 +1464,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) __func__, __LINE__, ea->fcport->port_name); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; + ea->fcport->send_els_logo = 0; qla24xx_post_gpdb_work(vha, ea->fcport, 0); } break; @@ -2823,6 +2822,147 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) return QLA_SUCCESS; } +#define PRINT_FIELD(_field, _flag, _str) { \ + if (a0->_field & _flag) {\ + if (p) {\ + strcat(ptr, "|");\ + ptr++;\ + leftover--;\ + } \ + len = snprintf(ptr, leftover, "%s", _str); \ + p = 1;\ + leftover -= len;\ + ptr += len; \ + } \ +} + +static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) +{ +#define STR_LEN 64 + struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; + u8 str[STR_LEN], *ptr, p; + int leftover, len; + + memset(str, 0, STR_LEN); + snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); + ql_dbg(ql_dbg_init, vha, 0x015a, + "SFP MFG Name: %s\n", str); + + memset(str, 0, STR_LEN); + snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); + ql_dbg(ql_dbg_init, vha, 0x015c, + "SFP Part Name: %s\n", str); + + /* media */ + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); + PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); + PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); + PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); + PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); + PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); + PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); + ql_dbg(ql_dbg_init, vha, 0x0160, + "SFP Media: %s\n", str); + + /* link length */ + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); + PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); + PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); + PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); + PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); + ql_dbg(ql_dbg_init, vha, 0x0196, + "SFP Link Length: %s\n", str); + + memset(str, 0, STR_LEN); + ptr = str; + leftover = STR_LEN; + p = len = 0; + PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); + PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); + PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); + ql_dbg(ql_dbg_init, vha, 0x016e, + "SFP FC Link Tech: %s\n", str); + + if (a0->length_km) + ql_dbg(ql_dbg_init, vha, 0x016f, + "SFP Distant: %d km\n", a0->length_km); + if (a0->length_100m) + ql_dbg(ql_dbg_init, vha, 0x0170, + "SFP Distant: %d m\n", a0->length_100m*100); + if (a0->length_50um_10m) + ql_dbg(ql_dbg_init, vha, 0x0189, + "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); + if (a0->length_62um_10m) + ql_dbg(ql_dbg_init, vha, 0x018a, + "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); + if (a0->length_om4_10m) + ql_dbg(ql_dbg_init, vha, 0x0194, + "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); + if (a0->length_om3_10m) + ql_dbg(ql_dbg_init, vha, 0x0195, + "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); +} + + +/* + * Return Code: + * QLA_SUCCESS: no action + * QLA_INTERFACE_ERROR: SFP is not there. + * QLA_FUNCTION_FAILED: detected New SFP + */ +int +qla24xx_detect_sfp(scsi_qla_host_t *vha) +{ + int rc = QLA_SUCCESS; + struct sff_8247_a0 *a; + struct qla_hw_data *ha = vha->hw; + + if (!AUTO_DETECT_SFP_SUPPORT(vha)) + goto out; + + rc = qla2x00_read_sfp_dev(vha, NULL, 0); + if (rc) + goto out; + + a = (struct sff_8247_a0 *)vha->hw->sfp_data; + qla2xxx_print_sfp_info(vha); + + if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) { + /* long range */ + ha->flags.detected_lr_sfp = 1; + + if (a->length_km > 5 || a->length_100m > 50) + ha->long_range_distance = LR_DISTANCE_10K; + else + ha->long_range_distance = LR_DISTANCE_5K; + + if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting) + ql_dbg(ql_dbg_async, vha, 0x507b, + "Detected Long Range SFP.\n"); + } else { + /* short range */ + ha->flags.detected_lr_sfp = 0; + if (ha->flags.using_lr_setting) + ql_dbg(ql_dbg_async, vha, 0x5084, + "Detected Short Range SFP.\n"); + } + + if (!vha->flags.init_done) + rc = QLA_SUCCESS; +out: + return rc; +} + /** * qla2x00_setup_chip() - Load and start RISC firmware. * @ha: HA context @@ -2879,6 +3019,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) rval = qla2x00_execute_fw(vha, srisc_address); /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { + qla24xx_detect_sfp(vha); + rval = qla2x00_set_exlogins_buffer(vha); if (rval != QLA_SUCCESS) goto failed; @@ -4609,24 +4751,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) qla2x00_fdmi_register(vha); /* Ensure we are logged into the SNS. */ - if (IS_FWI2_CAPABLE(ha)) - loop_id = NPH_SNS; - else - loop_id = SIMPLE_NAME_SERVER; + loop_id = NPH_SNS_LID(ha); rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 0xfc, mb, BIT_1|BIT_0); - if (rval != QLA_SUCCESS) { + if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_disc, vha, 0x20a1, + "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", + loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return rval; } - if (mb[0] != MBS_COMMAND_COMPLETE) { - ql_dbg(ql_dbg_disc, vha, 0x20a1, - "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " - "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1], - mb[2], mb[6], mb[7]); - return (QLA_SUCCESS); - } - if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { if (qla2x00_rft_id(vha)) { /* EMPTY */ @@ -4804,6 +4938,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) new_fcport->fc4_type = swl[swl_idx].fc4_type; new_fcport->nvme_flag = 0; + new_fcport->fc4f_nvme = 0; if (vha->flags.nvme_enabled && swl[swl_idx].fc4f_nvme) { new_fcport->fc4f_nvme = @@ -5913,7 +6048,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); - + qla2x00_configure_hba(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp->vp_idx) { @@ -7804,7 +7939,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, ha->queue_pair_map[qpair_id] = qpair; qpair->id = qpair_id; qpair->vp_idx = vp_idx; + qpair->fw_started = ha->flags.fw_started; INIT_LIST_HEAD(&qpair->hints_list); + INIT_LIST_HEAD(&qpair->nvme_done_list); qpair->chip_reset = ha->base_qpair->chip_reset; qpair->enable_class_2 = ha->base_qpair->enable_class_2; qpair->enable_explicit_conf = diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a36c485fae50..2f94159186d7 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2682,12 +2682,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) uint32_t *cur_dsd; struct scatterlist *sg; int index; - uint16_t tot_dsds; + uint16_t cmd_dsds, rsp_dsds; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; - int loop_iterartion = 0; int entry_count = 1; + cont_a64_entry_t *cont_pkt = NULL; ct_iocb->entry_type = CT_IOCB_TYPE; ct_iocb->entry_status = 0; @@ -2698,30 +2698,46 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->vp_index = sp->vha->vp_idx; ct_iocb->comp_status = cpu_to_le16(0); - ct_iocb->cmd_dsd_count = - cpu_to_le16(bsg_job->request_payload.sg_cnt); + cmd_dsds = bsg_job->request_payload.sg_cnt; + rsp_dsds = bsg_job->reply_payload.sg_cnt; + + ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); ct_iocb->timeout = 0; - ct_iocb->rsp_dsd_count = - cpu_to_le16(bsg_job->reply_payload.sg_cnt); - ct_iocb->rsp_byte_count = - cpu_to_le32(bsg_job->reply_payload.payload_len); + ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); ct_iocb->cmd_byte_count = cpu_to_le32(bsg_job->request_payload.payload_len); - ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address - (bsg_job->request_payload.sg_list))); - ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address - (bsg_job->request_payload.sg_list))); - ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len - (bsg_job->request_payload.sg_list)); - avail_dsds = 1; - cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; + avail_dsds = 2; + cur_dsd = (uint32_t *)ct_iocb->dseg_0_address; index = 0; - tot_dsds = bsg_job->reply_payload.sg_cnt; - for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { + for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb( + vha, ha->req_q_map[0]); + cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; + avail_dsds = 5; + entry_count++; + } + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + } + + index = 0; + + for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { dma_addr_t sle_dma; - cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { @@ -2740,7 +2756,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); - loop_iterartion++; avail_dsds--; } ct_iocb->entry_count = entry_count; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 7b3b702ef622..9d9668aac6f6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); @@ -452,7 +454,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) uint16_t peg_fw_state, nw_interface_link_up; uint16_t nw_interface_signal_detect, sfp_status; uint16_t htbt_counter, htbt_monitor_enable; - uint16_t sfp_additonal_info, sfp_multirate; + uint16_t sfp_additional_info, sfp_multirate; uint16_t sfp_tx_fault, link_speed, dcbx_status; /* @@ -492,7 +494,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) sfp_status = ((mb[2] & 0x0c00) >> 10); htbt_counter = ((mb[2] & 0x7000) >> 12); htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); - sfp_additonal_info = (mb[6] & 0x0003); + sfp_additional_info = (mb[6] & 0x0003); sfp_multirate = ((mb[6] & 0x0004) >> 2); sfp_tx_fault = ((mb[6] & 0x0008) >> 3); link_speed = ((mb[6] & 0x0070) >> 4); @@ -507,9 +509,9 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) sfp_status); ql_log(ql_log_warn, vha, 0x5067, "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " - "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", + "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", htbt_counter, htbt_monitor_enable, - sfp_additonal_info, sfp_multirate); + sfp_additional_info, sfp_multirate); ql_log(ql_log_warn, vha, 0x5068, "sfp_tx_fault=0x%x, link_state=0x%x, " "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, @@ -799,6 +801,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); + + if (AUTO_DETECT_SFP_SUPPORT(vha)) { + set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } break; case MBA_LOOP_DOWN: /* Loop Down Event */ @@ -1228,6 +1235,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) schedule_work(&ha->board_disable); break; + case MBA_TRANS_INSERT: + ql_dbg(ql_dbg_async, vha, 0x5091, + "Transceiver Insertion: %04x\n", mb[1]); + break; + default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", @@ -1537,8 +1549,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; - bsg_job = sp->u.bsg_job; - bsg_reply = bsg_job->reply; type = NULL; switch (sp->type) { @@ -1577,6 +1587,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); @@ -1823,7 +1835,7 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) nvme = &sp->u.iocb_cmd; if (unlikely(nvme->u.nvme.aen_op)) - atomic_dec(&sp->vha->nvme_active_aen_cnt); + atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); /* * State flags: Bit 6 and 0. @@ -1856,17 +1868,42 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) fd->transferred_length = fd->payload_length - le32_to_cpu(sts->residual_len); + /* + * If transport error then Failure (HBA rejects request) + * otherwise transport will handle. + */ if (sts->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5038, "NVME-%s error - hdl=%x entry-status(%x).\n", sp->name, sp->handle, sts->entry_status); ret = QLA_FUNCTION_FAILED; - } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { - ql_log(ql_log_warn, fcport->vha, 0x5039, - "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", - sp->name, sp->handle, sts->comp_status, - le32_to_cpu(sts->residual_len), sts->ox_id); - ret = QLA_FUNCTION_FAILED; + } else { + switch (le16_to_cpu(sts->comp_status)) { + case CS_COMPLETE: + ret = 0; + break; + + case CS_ABORTED: + case CS_RESET: + case CS_PORT_UNAVAILABLE: + case CS_PORT_LOGGED_OUT: + case CS_PORT_BUSY: + ql_log(ql_log_warn, fcport->vha, 0x5060, + "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n", + sp->name, sp->handle, sts->comp_status, + le32_to_cpu(sts->residual_len), sts->ox_id); + fd->transferred_length = fd->payload_length; + ret = QLA_ABORTED; + break; + + default: + ql_log(ql_log_warn, fcport->vha, 0x5060, + "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", + sp->name, sp->handle, sts->comp_status, + le32_to_cpu(sts->residual_len), sts->ox_id); + ret = QLA_FUNCTION_FAILED; + break; + } } sp->done(sp, ret); } @@ -2827,8 +2864,8 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, sp->done(sp, 0); } -void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt, - struct req_que *req) +void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, + struct pt_ls4_request *pkt, struct req_que *req) { srb_t *sp; const char func[] = "LS4_IOCB"; @@ -3132,7 +3169,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; - uint32_t stat = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -3146,19 +3182,11 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); - /* - * Use host_status register to check to PCI disconnection before we - * we process the response queue. - */ - stat = RD_REG_DWORD(®->host_status); - if (qla2x00_check_reg32_for_disconnect(vha, stat)) - goto out; qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } -out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; @@ -3429,7 +3457,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) } /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { if (ha->msixbase && ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || ql2xmqsupport)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 7c6d1a404011..99502fa90810 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -54,6 +54,10 @@ static struct rom_cmd { { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, { MBC_GET_RETRY_COUNT }, { MBC_TRACE_CONTROL }, + { MBC_INITIALIZE_MULTIQ }, + { MBC_IOCB_COMMAND_A64 }, + { MBC_GET_ADAPTER_LOOP_ID }, + { MBC_READ_SFP }, }; static int is_rom_cmd(uint16_t cmd) @@ -102,7 +106,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; - uint16_t __iomem *mbx_reg; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); @@ -486,21 +489,24 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) mbx_done: if (rval) { - ql_dbg(ql_dbg_disc, base_vha, 0x1020, - "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", - mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); - + if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { + pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR, + dev_name(&ha->pdev->dev), 0x1020+0x800, + vha->host_no); + mboxes = mcp->in_mb; + cnt = 4; + for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) + if (mboxes & BIT_0) { + printk(" mb[%u]=%x", i, mcp->mb[i]); + cnt--; + } + pr_warn(" cmd=%x ****\n", command); + } ql_dbg(ql_dbg_mbx, vha, 0x1198, - "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n", + "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", RD_REG_DWORD(®->isp24.host_status), - ha->fw_dump_cap_flags, RD_REG_DWORD(®->isp24.ictrl), RD_REG_DWORD(®->isp24.istatus)); - - mbx_reg = ®->isp24.mailbox0; - for (i = 0; i < 6; i++) - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199, - "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } @@ -561,6 +567,28 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, #define EXTENDED_BB_CREDITS BIT_0 #define NVME_ENABLE_FLAG BIT_3 +static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) +{ + uint16_t mb4 = BIT_0; + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + mb4 |= ha->long_range_distance << LR_DIST_FW_POS; + + return mb4; +} + +static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) +{ + uint16_t mb4 = BIT_0; + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + struct nvram_81xx *nv = ha->nvram; + + mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); + } + + return mb4; +} /* * qla2x00_execute_fw @@ -595,17 +623,44 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; + mcp->mb[4] = 0; + ha->flags.using_lr_setting = 0; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { - struct nvram_81xx *nv = ha->nvram; - mcp->mb[4] = (nv->enhanced_features & - EXTENDED_BB_CREDITS); - } else - mcp->mb[4] = 0; + if (ql2xautodetectsfp) { + if (ha->flags.detected_lr_sfp) { + mcp->mb[4] |= + qla25xx_set_sfp_lr_dist(ha); + ha->flags.using_lr_setting = 1; + } + } else { + struct nvram_81xx *nv = ha->nvram; + /* set LR distance if specified in nvram */ + if (nv->enhanced_features & + NEF_LR_DIST_ENABLE) { + mcp->mb[4] |= + qla25xx_set_nvr_lr_dist(ha); + ha->flags.using_lr_setting = 1; + } + } + } if (ql2xnvmeenable && IS_QLA27XX(ha)) mcp->mb[4] |= NVME_ENABLE_FLAG; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + struct nvram_81xx *nv = ha->nvram; + /* set minimum speed if specified in nvram */ + if (nv->min_link_speed >= 2 && + nv->min_link_speed <= 5) { + mcp->mb[4] |= BIT_4; + mcp->mb[11] = nv->min_link_speed; + mcp->out_mb |= MBX_11; + mcp->in_mb |= BIT_5; + vha->min_link_speed_feat = nv->min_link_speed; + } + } + if (ha->flags.exlogins_enabled) mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; @@ -613,7 +668,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; - mcp->in_mb |= MBX_1; + mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; @@ -632,12 +687,30 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (IS_FWI2_CAPABLE(ha)) { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027, - "Done exchanges=%x.\n", mcp->mb[1]); - } else { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, - "Done %s.\n", __func__); + ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "fw_ability_mask=%x.\n", ha->fw_ability_mask); + ql_dbg(ql_dbg_mbx, vha, 0x1027, + "exchanges=%x.\n", mcp->mb[1]); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + ha->max_speed_sup = mcp->mb[2] & BIT_0; + ql_dbg(ql_dbg_mbx, vha, 0x119b, + "Maximum speed supported=%s.\n", + ha->max_speed_sup ? "32Gps" : "16Gps"); + if (vha->min_link_speed_feat) { + ha->min_link_speed = mcp->mb[5]; + ql_dbg(ql_dbg_mbx, vha, 0x119c, + "Minimum speed set=%s.\n", + mcp->mb[5] == 5 ? "32Gps" : + mcp->mb[5] == 4 ? "16Gps" : + mcp->mb[5] == 3 ? "8Gps" : + mcp->mb[5] == 2 ? "4Gps" : + "unknown"); + } + } } + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, + "Done.\n"); } return rval; @@ -947,20 +1020,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) "%s: Firmware supports Exchange Offload 0x%x\n", __func__, ha->fw_attributes_h); - /* bit 26 of fw_attributes */ - if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) { - struct init_cb_24xx *icb; - - icb = (struct init_cb_24xx *)ha->init_cb; - /* - * fw supports nvme and driver load - * parameter requested nvme - */ + /* + * FW supports nvme and driver load parameter requested nvme. + * BIT 26 of fw_attributes indicates NVMe support. + */ + if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) vha->flags.nvme_enabled = 1; - icb->firmware_options_2 &= cpu_to_le32(~0xf); - ha->zio_mode = 0; - ha->zio_timer = 0; - } } @@ -1673,7 +1738,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); } else { - /*EMPTY*/ + if (IS_QLA27XX(ha)) { + if (mcp->mb[2] == 6 || mcp->mb[3] == 2) + ql_dbg(ql_dbg_mbx, vha, 0x119d, + "Invalid SFP/Validation Failed\n"); + } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, "Done %s.\n", __func__); } @@ -1878,6 +1947,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, "Entered %s.\n", __func__); @@ -1906,7 +1976,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { - /*EMPTY*/ + if (IS_QLA27XX(ha)) { + if (mcp->mb[2] == 6 || mcp->mb[3] == 2) + ql_dbg(ql_dbg_mbx, vha, 0x119e, + "Invalid SFP/Validation Failed\n"); + } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, "Done %s.\n", __func__); } @@ -3689,7 +3763,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (qla_ini_mode_enabled(vha) && ha->flags.fawwpn_enabled && (rptid_entry->u.f1.flags & - VP_FLAGS_NAME_VALID)) { + BIT_6)) { memcpy(vha->port_name, rptid_entry->u.f1.port_name, WWN_SIZE); @@ -4590,6 +4664,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + if (mcp->mb[0] == MBS_COMMAND_ERROR && + mcp->mb[1] == 0x22) + /* sfp is not there */ + rval = QLA_INTERFACE_ERROR; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); @@ -5817,7 +5895,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, dd_dma = dma_map_single(&vha->hw->pdev->dev, dd_buf, size, DMA_FROM_DEVICE); - if (!dd_dma) { + if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } @@ -6085,3 +6163,108 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha, done: return rval; } + +int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; + mcp->mb[1] = cpu_to_le16(1); + mcp->mb[2] = cpu_to_le16(value); + mcp->out_mb = MBX_2 | MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + + ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", + (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); + + return rval; +} + +int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, + "Entered %s\n", __func__); + + memset(mcp->mb, 0, sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; + mcp->mb[1] = cpu_to_le16(0); + mcp->out_mb = MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval == QLA_SUCCESS) + *value = mc.mb[2]; + + ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", + (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); + + return rval; +} + +int +qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t iter, addr, offset; + dma_addr_t phys_addr; + int rval, c; + u8 *sfp_data; + + memset(ha->sfp_data, 0, SFP_DEV_SIZE); + addr = 0xa0; + phys_addr = ha->sfp_data_dma; + sfp_data = ha->sfp_data; + offset = c = 0; + + for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { + if (iter == 4) { + /* Skip to next device address. */ + addr = 0xa2; + offset = 0; + } + + rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, + addr, offset, SFP_BLOCK_SIZE, BIT_1); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x706d, + "Unable to read SFP data (%x/%x/%x).\n", rval, + addr, offset); + + return rval; + } + + if (buf && (c < count)) { + u16 sz; + + if ((count - c) >= SFP_BLOCK_SIZE) + sz = SFP_BLOCK_SIZE; + else + sz = count - c; + + memcpy(buf, sfp_data, sz); + buf += SFP_BLOCK_SIZE; + c += sz; + } + phys_addr += SFP_BLOCK_SIZE; + sfp_data += SFP_BLOCK_SIZE; + offset += SFP_BLOCK_SIZE; + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f0605cd196fb..c0f8f6c17b79 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), + wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count), 10*HZ); spin_lock_irqsave(&ha->vport_slock, flags); @@ -187,6 +187,11 @@ qla24xx_enable_vp(scsi_qla_host_t *vha) !(ha->current_topology & ISP_CFG_F)) { vha->vp_err_state = VP_ERR_PORTDWN; fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); + ql_dbg(ql_dbg_taskm, vha, 0x800b, + "%s skip enable. loop_state %x topo %x\n", + __func__, base_vha->loop_state.counter, + ha->current_topology); + goto enable_failed; } @@ -759,11 +764,18 @@ static void qla_do_work(struct work_struct *work) struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); struct scsi_qla_host *vha; struct qla_hw_data *ha = qpair->hw; + struct srb_iocb *nvme, *nxt_nvme; spin_lock_irqsave(&qpair->qp_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, qpair->rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); + + list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list, + u.nvme.entry) { + list_del_init(&nvme->u.nvme.entry); + qla_nvme_cmpl_io(nvme); + } } /* create response queue */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 10b742d27e16..e23a3d4c36f3 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1819,6 +1819,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) if (!sp) goto done; + sp->type = SRB_FXIOCB_DCMD; + sp->name = "fxdisc"; + qla2x00_init_timer(sp, FXDISC_TIMEOUT); + fdisc = &sp->u.iocb_cmd; switch (fx_type) { case FXDISC_GET_CONFIG_INFO: @@ -1920,9 +1924,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) goto done_unmap_req; } - sp->type = SRB_FXIOCB_DCMD; - sp->name = "fxdisc"; - qla2x00_init_timer(sp, FXDISC_TIMEOUT); fdisc->timeout = qla2x00_fxdisc_iocb_timeout; fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); sp->done = qla2x00_fxdisc_sp_done; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index f3710a75fe1f..6b33a1f24f56 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -5,7 +5,6 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_nvme.h" -#include "qla_def.h" #include #include #include @@ -15,7 +14,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport; static void qla_nvme_unregister_remote_port(struct work_struct *); -int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) +int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) { struct nvme_rport *rport; int ret; @@ -61,8 +60,8 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) rport->req.port_id = fcport->d_id.b24; ql_log(ql_log_info, vha, 0x2102, - "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n", - __func__, rport->req.port_name, rport->req.node_name, + "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", + __func__, rport->req.node_name, rport->req.port_name, rport->req.port_id); ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req, @@ -76,16 +75,14 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->nvme_remote_port->private = fcport; fcport->nvme_flag |= NVME_FLAG_REGISTERED; - atomic_set(&fcport->nvme_ref_count, 1); - init_waitqueue_head(&fcport->nvme_waitQ); rport->fcport = fcport; list_add_tail(&rport->list, &vha->nvme_rport_list); return 0; } /* Allocate a queue for NVMe traffic */ -static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx, - u16 qsize, void **handle) +static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, + unsigned int qidx, u16 qsize, void **handle) { struct scsi_qla_host *vha; struct qla_hw_data *ha; @@ -157,6 +154,16 @@ static void qla_nvme_sp_ls_done(void *ptr, int res) qla2x00_rel_sp(sp); } +void qla_nvme_cmpl_io(struct srb_iocb *nvme) +{ + srb_t *sp; + struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; + + sp = container_of(nvme, srb_t, u.iocb_cmd); + fd->done(fd); + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + static void qla_nvme_sp_done(void *ptr, int res) { srb_t *sp = ptr; @@ -172,13 +179,14 @@ static void qla_nvme_sp_done(void *ptr, int res) if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED)) goto rel; - if (unlikely(nvme->u.nvme.comp_status || res)) - fd->status = -EINVAL; + if (unlikely(res == QLA_FUNCTION_FAILED)) + fd->status = NVME_SC_INTERNAL; else fd->status = 0; fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; - fd->done(fd); + list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list); + return; rel: qla2xxx_rel_qpair_sp(sp->qpair, sp); } @@ -193,13 +201,11 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, struct qla_hw_data *ha = fcport->vha->hw; rval = ha->isp_ops->abort_command(sp); - if (rval != QLA_SUCCESS) - ql_log(ql_log_warn, fcport->vha, 0x2125, - "%s: failed to abort LS command for SP:%p rval=%x\n", - __func__, sp, rval); ql_dbg(ql_dbg_io, fcport->vha, 0x212b, - "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); + "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__, + (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", + sp, fcport, rval); } static void qla_nvme_ls_complete(struct work_struct *work) @@ -214,7 +220,7 @@ static void qla_nvme_ls_complete(struct work_struct *work) static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) { - fc_port_t *fcport = (fc_port_t *)rport->private; + fc_port_t *fcport = rport->private; struct srb_iocb *nvme; struct nvme_private *priv = fd->private; struct scsi_qla_host *vha; @@ -236,7 +242,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, sp->name = "nvme_ls"; sp->done = qla_nvme_sp_ls_done; atomic_set(&sp->ref_count, 1); - init_waitqueue_head(&sp->nvme_ls_waitQ); nvme = &sp->u.iocb_cmd; priv->sp = sp; priv->fd = fd; @@ -258,7 +263,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); atomic_dec(&sp->ref_count); - wake_up(&sp->nvme_ls_waitQ); + wake_up(&sp->nvme_ls_waitq); return rval; } @@ -276,20 +281,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, struct qla_hw_data *ha = fcport->vha->hw; rval = ha->isp_ops->abort_command(sp); - if (!rval) - ql_log(ql_log_warn, fcport->vha, 0x2127, - "%s: failed to abort command for SP:%p rval=%x\n", - __func__, sp, rval); - ql_dbg(ql_dbg_io, fcport->vha, 0x2126, - "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); + ql_dbg(ql_dbg_io, fcport->vha, 0x2127, + "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__, + (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", + sp, fcport, rval); } static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle) { struct scsi_qla_host *vha = lport->private; unsigned long flags; - struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; + struct qla_qpair *qpair = hw_queue_handle; /* Acquire ring specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); @@ -310,6 +313,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp) uint16_t avail_dsds; uint32_t *cur_dsd; struct req_que *req = NULL; + struct rsp_que *rsp = NULL; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; @@ -318,13 +322,15 @@ static int qla2x00_start_nvme_mq(srb_t *sp) struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; uint32_t rval = QLA_SUCCESS; - /* Setup qpair pointers */ - req = qpair->req; tot_dsds = fd->sg_cnt; /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); + /* Setup qpair pointers */ + req = qpair->req; + rsp = qpair->rsp; + /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { @@ -359,7 +365,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp) struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; if (cmd->sqe.common.opcode == nvme_admin_async_event) { nvme->u.nvme.aen_op = 1; - atomic_inc(&vha->nvme_active_aen_cnt); + atomic_inc(&vha->hw->nvme_active_aen_cnt); } } @@ -472,6 +478,11 @@ static int qla2x00_start_nvme_mq(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + queuing_error: spin_unlock_irqrestore(&qpair->qp_lock, flags); return rval; @@ -487,7 +498,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct scsi_qla_host *vha; int rval = QLA_FUNCTION_FAILED; srb_t *sp; - struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; + struct qla_qpair *qpair = hw_queue_handle; struct nvme_private *priv; if (!fd) { @@ -496,7 +507,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, } priv = fd->private; - fcport = (fc_port_t *)rport->private; + fcport = rport->private; if (!fcport) { ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n"); return rval; @@ -512,7 +523,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, return -EIO; atomic_set(&sp->ref_count, 1); - init_waitqueue_head(&sp->nvme_ls_waitQ); + init_waitqueue_head(&sp->nvme_ls_waitq); priv->sp = sp; sp->type = SRB_NVME_CMD; sp->name = "nvme_cmd"; @@ -526,7 +537,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, ql_log(ql_log_warn, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); atomic_dec(&sp->ref_count); - wake_up(&sp->nvme_ls_waitQ); + wake_up(&sp->nvme_ls_waitq); return -EIO; } @@ -537,12 +548,10 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) { struct scsi_qla_host *vha = lport->private; - atomic_dec(&vha->nvme_ref_count); - wake_up_all(&vha->nvme_waitQ); - ql_log(ql_log_info, vha, 0x210f, "localport delete of %p completed.\n", vha->nvme_local_port); vha->nvme_local_port = NULL; + complete(&vha->nvme_del_done); } static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) @@ -550,11 +559,9 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) fc_port_t *fcport; struct nvme_rport *r_port, *trport; - fcport = (fc_port_t *)rport->private; + fcport = rport->private; fcport->nvme_remote_port = NULL; fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; - atomic_dec(&fcport->nvme_ref_count); - wake_up_all(&fcport->nvme_waitQ); list_for_each_entry_safe(r_port, trport, &fcport->vha->nvme_rport_list, list) { @@ -564,6 +571,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } } kfree(r_port); + complete(&fcport->nvme_del_done); ql_log(ql_log_info, fcport->vha, 0x2110, "remoteport_delete of %p completed.\n", fcport); @@ -594,7 +602,7 @@ static int qla_nvme_wait_on_command(srb_t *sp) { int ret = QLA_SUCCESS; - wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1), + wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1), NVME_ABORT_POLLING_PERIOD*HZ); if (atomic_read(&sp->ref_count) > 1) @@ -606,12 +614,11 @@ static int qla_nvme_wait_on_command(srb_t *sp) static int qla_nvme_wait_on_rport_del(fc_port_t *fcport) { int ret = QLA_SUCCESS; + int timeout; - wait_event_timeout(fcport->nvme_waitQ, - atomic_read(&fcport->nvme_ref_count), - NVME_ABORT_POLLING_PERIOD*HZ); - - if (atomic_read(&fcport->nvme_ref_count)) { + timeout = wait_for_completion_timeout(&fcport->nvme_del_done, + msecs_to_jiffies(2000)); + if (!timeout) { ret = QLA_FUNCTION_FAILED; ql_log(ql_log_info, fcport->vha, 0x2111, "timed out waiting for fcport=%p to delete\n", fcport); @@ -620,50 +627,14 @@ static int qla_nvme_wait_on_rport_del(fc_port_t *fcport) return ret; } -void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp) +void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp) { int rval; rval = ha->isp_ops->abort_command(sp); - if (!rval) { - if (!qla_nvme_wait_on_command(sp)) - ql_log(ql_log_warn, NULL, 0x2112, - "nvme_wait_on_command timed out waiting on sp=%p\n", - sp); - } -} - -static void qla_nvme_abort_all(fc_port_t *fcport) -{ - int que, cnt; - unsigned long flags; - srb_t *sp; - struct qla_hw_data *ha = fcport->vha->hw; - struct req_que *req; - - spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_req_queues; que++) { - req = ha->req_q_map[que]; - if (!req) - continue; - if (!req->outstanding_cmds) - continue; - for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { - sp = req->outstanding_cmds[cnt]; - if ((sp) && ((sp->type == SRB_NVME_CMD) || - (sp->type == SRB_NVME_LS)) && - (sp->fcport == fcport)) { - atomic_inc(&sp->ref_count); - spin_unlock_irqrestore(&ha->hardware_lock, - flags); - qla_nvme_abort(ha, sp); - spin_lock_irqsave(&ha->hardware_lock, flags); - req->outstanding_cmds[cnt] = NULL; - sp->done(sp, 1); - } - } - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (!rval && !qla_nvme_wait_on_command(sp)) + ql_log(ql_log_warn, NULL, 0x2112, + "nvme_wait_on_comand timed out waiting on sp=%p\n", sp); } static void qla_nvme_unregister_remote_port(struct work_struct *work) @@ -675,18 +646,23 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work) if (!IS_ENABLED(CONFIG_NVME_FC)) return; + ql_log(ql_log_warn, NULL, 0x2112, + "%s: unregister remoteport on %p\n",__func__, fcport); + list_for_each_entry_safe(rport, trport, &fcport->vha->nvme_rport_list, list) { if (rport->fcport == fcport) { ql_log(ql_log_info, fcport->vha, 0x2113, "%s: fcport=%p\n", __func__, fcport); + init_completion(&fcport->nvme_del_done); nvme_fc_unregister_remoteport( fcport->nvme_remote_port); + qla_nvme_wait_on_rport_del(fcport); } } } -void qla_nvme_delete(scsi_qla_host_t *vha) +void qla_nvme_delete(struct scsi_qla_host *vha) { struct nvme_rport *rport, *trport; fc_port_t *fcport; @@ -701,12 +677,13 @@ void qla_nvme_delete(scsi_qla_host_t *vha) ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n", __func__, fcport); + init_completion(&fcport->nvme_del_done); nvme_fc_unregister_remoteport(fcport->nvme_remote_port); qla_nvme_wait_on_rport_del(fcport); - qla_nvme_abort_all(fcport); } if (vha->nvme_local_port) { + init_completion(&vha->nvme_del_done); nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); if (nv_ret == 0) ql_log(ql_log_info, vha, 0x2116, @@ -715,10 +692,12 @@ void qla_nvme_delete(scsi_qla_host_t *vha) else ql_log(ql_log_info, vha, 0x2115, "Unregister of localport failed\n"); + wait_for_completion_timeout(&vha->nvme_del_done, + msecs_to_jiffies(5000)); } } -void qla_nvme_register_hba(scsi_qla_host_t *vha) +void qla_nvme_register_hba(struct scsi_qla_host *vha) { struct nvme_fc_port_template *tmpl; struct qla_hw_data *ha; @@ -744,8 +723,8 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha) pinfo.port_id = vha->d_id.b24; ql_log(ql_log_info, vha, 0xffff, - "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n", - pinfo.port_name, pinfo.node_name, pinfo.port_id); + "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", + pinfo.node_name, pinfo.port_name, pinfo.port_id); qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; ret = nvme_fc_register_localport(&pinfo, tmpl, @@ -755,7 +734,5 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha) "register_localport failed: ret=%x\n", ret); return; } - atomic_set(&vha->nvme_ref_count, 1); vha->nvme_local_port->private = vha; - init_waitqueue_head(&vha->nvme_waitQ); } diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h index dfe56f207b28..7f05fa1c77db 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.h +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -12,12 +12,18 @@ #include #include +#include "qla_def.h" + #define NVME_ATIO_CMD_OFF 32 #define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF) #define Q2T_NVME_NUM_TAGS 2048 #define QLA_MAX_FC_SEGMENTS 64 +struct scsi_qla_host; +struct qla_hw_data; +struct req_que; struct srb; + struct nvme_private { struct srb *sp; struct nvmefc_ls_req *fd; @@ -129,4 +135,15 @@ struct pt_ls4_rx_unsol { uint32_t desc_len; uint32_t payload[3]; }; + +/* + * Global functions prototype in qla_nvme.c source file. + */ +void qla_nvme_register_hba(struct scsi_qla_host *); +int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); +void qla_nvme_delete(struct scsi_qla_host *); +void qla_nvme_abort(struct qla_hw_data *, struct srb *sp); +void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *, + struct req_que *); +void qla24xx_async_gffid_sp_done(void *, int); #endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index df57655779ed..5b2437a5ea44 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -113,12 +113,12 @@ MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI. Default is 1 - perform FDMI."); -#define MAX_Q_DEPTH 32 +#define MAX_Q_DEPTH 64 static int ql2xmaxqdepth = MAX_Q_DEPTH; module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to set for each LUN. " - "Default is 32."); + "Default is 64."); #if (IS_ENABLED(CONFIG_NVME_FC)) int ql2xenabledif; @@ -200,7 +200,7 @@ MODULE_PARM_DESC(ql2xgffidenable, "Enables GFF_ID checks of port type. " "Default is 0 - Do not use GFF_ID information."); -int ql2xasynctmfenable; +int ql2xasynctmfenable = 1; module_param(ql2xasynctmfenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xasynctmfenable, "Enables issue of TM IOCBs asynchronously via IOCB mechanism" @@ -262,6 +262,12 @@ MODULE_PARM_DESC(ql2xmvasynctoatio, "0 (Default). Do not move IOCBs" "1 - Move IOCBs."); +int ql2xautodetectsfp = 1; +module_param(ql2xautodetectsfp, int, 0444); +MODULE_PARM_DESC(ql2xautodetectsfp, + "Detect SFP range and set appropriate distance.\n" + "1 (Default): Enable\n"); + /* * SCSI host template entry points */ @@ -379,6 +385,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; INIT_LIST_HEAD(&ha->base_qpair->hints_list); + INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list); ha->base_qpair->enable_class_2 = ql2xenableclass2; /* init qpair to this cpu. Will adjust at run time. */ qla_cpu_update(rsp->qpair, smp_processor_id()); @@ -710,7 +717,7 @@ qla2x00_sp_free_dma(void *ptr) } end: - if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) { + if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) { CMD_SP(cmd) = NULL; qla2x00_rel_sp(sp); } @@ -735,7 +742,7 @@ qla2x00_sp_compl(void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2x00_sp_free_dma(sp); + sp->free(sp); cmd->scsi_done(cmd); } @@ -807,7 +814,7 @@ qla2xxx_qpair_sp_compl(void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2xxx_qpair_sp_free_dma(sp); + sp->free(sp); cmd->scsi_done(cmd); } @@ -928,7 +935,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; qc24_host_busy_free_sp: - qla2x00_sp_free_dma(sp); + sp->free(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -1017,7 +1024,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, return 0; qc24_host_busy_free_sp: - qla2xxx_qpair_sp_free_dma(sp); + sp->free(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -1134,7 +1141,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { qla2x00_mark_all_devices_lost(vha, 0); - wait_event(vha->fcport_waitQ, test_fcport_count(vha)); + wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ); } /* @@ -1715,8 +1722,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) if (sp) { req->outstanding_cmds[cnt] = NULL; if (sp->cmd_type == TYPE_SRB) { - if ((sp->type == SRB_NVME_CMD) || - (sp->type == SRB_NVME_LS)) { + if (sp->type == SRB_NVME_CMD || + sp->type == SRB_NVME_LS) { sp_get(sp); spin_unlock_irqrestore( &ha->hardware_lock, flags); @@ -1725,6 +1732,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) &ha->hardware_lock, flags); } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && + (!test_bit(ABORT_ISP_ACTIVE, + &vha->dpc_flags)) && (sp->type == SRB_SCSI_CMD)) { /* * Don't abort commands in @@ -2751,6 +2760,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ha->tgt.sess_lock); spin_lock_init(&ha->tgt.atio_lock); + atomic_set(&ha->nvme_active_aen_cnt, 0); /* Clear our data area */ ha->bars = bars; @@ -3168,7 +3178,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ha->mqenable) { bool mq = false; bool startit = false; - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); if (QLA_TGT_MODE_ENABLED()) { mq = true; @@ -3328,6 +3338,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (test_bit(UNLOADING, &base_vha->dpc_flags)) return -ENODEV; + if (ha->flags.detected_lr_sfp) { + ql_log(ql_log_info, base_vha, 0xffff, + "Reset chip to pick up LR SFP setting\n"); + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); + } + return 0; probe_init_failed: @@ -3383,12 +3400,22 @@ qla2x00_shutdown(struct pci_dev *pdev) scsi_qla_host_t *vha; struct qla_hw_data *ha; - if (!atomic_read(&pdev->enable_cnt)) - return; - vha = pci_get_drvdata(pdev); ha = vha->hw; + ql_log(ql_log_info, vha, 0xfffa, + "Adapter shutdown\n"); + + /* + * Prevent future board_disable and wait + * until any pending board_disable has completed. + */ + set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags); + cancel_work_sync(&ha->board_disable); + + if (!atomic_read(&pdev->enable_cnt)) + return; + /* Notify ISPFX00 firmware */ if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(vha, 20); @@ -3419,8 +3446,9 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_free_fw_dump(ha); - pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); + ql_log(ql_log_info, vha, 0xfffe, + "Adapter shutdown successfully.\n"); } /* Deletes all the virtual ports for a given ha */ @@ -4006,8 +4034,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, "loop_id_map=%p.\n", ha->loop_id_map); } + ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, + SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); + if (!ha->sfp_data) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate memory for SFP read-data.\n"); + goto fail_sfp_data; + } + return 0; +fail_sfp_data: + kfree(ha->loop_id_map); fail_loop_id_map: dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: @@ -4345,7 +4383,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->ct_sns, ha->ct_sns_dma); if (ha->sfp_data) - dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, + ha->sfp_data_dma); if (ha->ms_iocb) dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); @@ -4638,9 +4677,10 @@ static void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; - fc_port_t *fcport = NULL; + fc_port_t *fcport = NULL, *tfcp; struct qlt_plogi_ack_t *pla = (struct qlt_plogi_ack_t *)e->u.new_sess.pla; + uint8_t free_fcport = 0; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); @@ -4655,6 +4695,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) pla->ref_count--; } } else { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (fcport) { fcport->d_id = e->u.new_sess.id; @@ -4664,6 +4705,29 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC mem alloc fail.\n", + __func__, e->u.new_sess.port_name); + + if (pla) + kmem_cache_free(qla_tgt_plogi_cachep, pla); + return; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* search again to make sure one else got ahead */ + tfcp = qla2x00_find_fcport_by_wwpn(vha, + e->u.new_sess.port_name, 1); + if (tfcp) { + /* should rarily happen */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC found existing fcport b4 add. DS %d LS %d\n", + __func__, tfcp->port_name, tfcp->disc_state, + tfcp->fw_login_state); + + free_fcport = 1; + } else { list_add_tail(&fcport->list, &vha->vp_fcports); if (pla) { @@ -4681,6 +4745,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) else qla24xx_async_gnl(vha, fcport); } + + if (free_fcport) { + qla2x00_free_fcport(fcport); + if (pla) + kmem_cache_free(qla_tgt_plogi_cachep, pla); + } } void @@ -5493,6 +5563,13 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); + if (!atomic_read(&pdev->enable_cnt)) { + ql_log(ql_log_info, base_vha, 0xfffc, + "PCI device disabled, no action req for PCI error=%lx\n", + base_vha->pci_flags); + return; + } + qla2x00_wait_for_sess_deletion(base_vha); set_bit(UNLOADING, &base_vha->dpc_flags); @@ -5687,6 +5764,16 @@ qla2x00_do_dpc(void *data) } } + if (test_and_clear_bit(DETECT_SFP_CHANGE, + &base_vha->dpc_flags) && + !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) { + qla24xx_detect_sfp(base_vha); + + if (ha->flags.detected_lr_sfp != + ha->flags.using_lr_setting) + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + } + if (test_and_clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) { @@ -5828,6 +5915,17 @@ qla2x00_do_dpc(void *data) mutex_unlock(&ha->mq_lock); } + if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) { + ql_log(ql_log_info, base_vha, 0xffffff, + "nvme: SET ZIO Activity exchange threshold to %d.\n", + ha->nvme_last_rptd_aen); + if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) { + ql_log(ql_log_info, base_vha, 0xffffff, + "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", + ha->nvme_last_rptd_aen); + } + } + if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); @@ -6025,12 +6123,15 @@ qla2x00_timer(scsi_qla_host_t *vha) * FC-NVME * see if the active AEN count has changed from what was last reported. */ - if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) { - vha->nvme_last_rptd_aen = - atomic_read(&vha->nvme_active_aen_cnt); + if (!vha->vp_idx && + atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen && + ha->zio_mode == QLA_ZIO_MODE_6) { ql_log(ql_log_info, vha, 0x3002, - "reporting new aen count of %d to the fw\n", - vha->nvme_last_rptd_aen); + "nvme: Sched: Set ZIO exchange threshold to %d.\n", + ha->nvme_last_rptd_aen); + ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); + set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); + start_dpc++; } /* Schedule the DPC routine if needed */ @@ -6181,6 +6282,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) ql_dbg(ql_dbg_aer, vha, 0x9000, "PCI error detected, state %x.\n", state); + if (!atomic_read(&pdev->enable_cnt)) { + ql_log(ql_log_info, vha, 0xffff, + "PCI device is disabled,state %x\n", state); + return PCI_ERS_RESULT_NEED_RESET; + } + switch (state) { case pci_channel_io_normal: ha->flags.eeh_busy = 0; @@ -6574,6 +6681,8 @@ qla2x00_module_init(void) strcpy(qla2x00_version_str, QLA2XXX_VERSION); if (ql2xextended_error_logging) strcat(qla2x00_version_str, "-debug"); + if (ql2xextended_error_logging == 1) + ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; qla2xxx_transport_template = fc_attach_transport(&qla2xxx_transport_functions); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e101cd3043b9..f05cfc83c9c8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -145,7 +145,7 @@ static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, * Global Variables */ static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; -static struct kmem_cache *qla_tgt_plogi_cachep; +struct kmem_cache *qla_tgt_plogi_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); @@ -585,11 +585,13 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; sp->fcport->logout_on_delete = 1; sp->fcport->plogi_nack_done_deadline = jiffies + HZ; + sp->fcport->send_els_logo = 0; break; case SRB_NACK_PRLI: sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; sp->fcport->deleted = 0; + sp->fcport->send_els_logo = 0; if (!sp->fcport->login_succ && !IS_SW_RESV_ADDR(sp->fcport->d_id)) { @@ -1479,7 +1481,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); - wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); + wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); /* Big hammer */ if (!ha->flags.host_shutting_down && @@ -1487,7 +1489,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ - wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); + wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); return 0; } EXPORT_SYMBOL(qlt_stop_phase1); @@ -1528,6 +1530,7 @@ static void qlt_release(struct qla_tgt *tgt) u64 key = 0; u16 i; struct qla_qpair_hint *h; + struct qla_hw_data *ha = vha->hw; if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && !tgt->tgt_stopped) @@ -1548,12 +1551,18 @@ static void qlt_release(struct qla_tgt *tgt) } } kfree(tgt->qphints); + mutex_lock(&qla_tgt_mutex); + list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); + mutex_unlock(&qla_tgt_mutex); btree_for_each_safe64(&tgt->lun_qpair_map, key, node) btree_remove64(&tgt->lun_qpair_map, key); btree_destroy64(&tgt->lun_qpair_map); + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target) + ha->tgt.tgt_ops->remove_target(vha); + vha->vha_tgt.qla_tgt = NULL; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, @@ -1901,6 +1910,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; mcmd->qpair = ha->base_qpair; + mcmd->vha = vha; /* * LUN is looked up by target-core internally based on the passed @@ -2003,7 +2013,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) { - struct scsi_qla_host *ha = qpair->vha; + struct scsi_qla_host *ha = mcmd->vha; struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; uint16_t temp; @@ -3464,6 +3474,9 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); + if (cmd) + vha = cmd->vha; + pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe050, @@ -4379,6 +4392,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, mcmd->flags = flags; mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->qpair = ha->base_qpair; + mcmd->vha = vha; switch (fn) { case QLA_TGT_LUN_RESET: @@ -6170,10 +6184,6 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) /* free left over qfull cmds */ qlt_init_term_exchange(vha); - mutex_lock(&qla_tgt_mutex); - list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); - mutex_unlock(&qla_tgt_mutex); - ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", vha->host_no, ha); qlt_release(vha->vha_tgt.qla_tgt); @@ -6530,7 +6540,6 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) /* Adjust ring index */ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); - RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); } void @@ -6796,7 +6805,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 7fe02d036bdf..aba58d3848a6 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -705,6 +705,7 @@ struct qla_tgt_func_tmpl { int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); int (*chk_dif_tags)(uint32_t tag); void (*add_target)(struct scsi_qla_host *); + void (*remove_target)(struct scsi_qla_host *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -959,6 +960,7 @@ struct qla_tgt_mgmt_cmd { uint8_t fc_tm_rsp; struct fc_port *sess; struct qla_qpair *qpair; + struct scsi_qla_host *vha; struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index b18646d6057f..733e8dcccf5c 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -443,8 +443,12 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); } - if (buf) - ent->t263.num_queues = count; + if (buf) { + if (count) + ent->t263.num_queues = count; + else + qla27xx_skip_entry(ent, buf); + } return false; } @@ -692,11 +696,12 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); } - if (buf) - ent->t274.num_queues = count; - - if (!count) - qla27xx_skip_entry(ent, buf); + if (buf) { + if (count) + ent->t274.num_queues = count; + else + qla27xx_skip_entry(ent, buf); + } return false; } diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 005a378f7fab..8c4b505c9f66 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.00.00.00-k" +#define QLA2XXX_VERSION "10.00.00.01-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 0 diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c index 840823b99e51..95431d605c24 100644 --- a/drivers/scsi/qlogicfas.c +++ b/drivers/scsi/qlogicfas.c @@ -188,7 +188,7 @@ static struct scsi_host_template qlogicfas_driver_template = { .info = qlogicfas408_info, .queuecommand = qlogicfas408_queuecommand, .eh_abort_handler = qlogicfas408_abort, - .eh_bus_reset_handler = qlogicfas408_bus_reset, + .eh_host_reset_handler = qlogicfas408_host_reset, .bios_param = qlogicfas408_biosparam, .can_queue = 1, .this_id = -1, diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c index c3a9151ca823..8b471a925b43 100644 --- a/drivers/scsi/qlogicfas408.c +++ b/drivers/scsi/qlogicfas408.c @@ -496,13 +496,13 @@ int qlogicfas408_abort(struct scsi_cmnd *cmd) return SUCCESS; } -/* +/* * Reset SCSI bus * FIXME: This function is invoked with cmd = NULL directly by * the PCMCIA qlogic_stub code. This wants fixing */ -int qlogicfas408_bus_reset(struct scsi_cmnd *cmd) +int qlogicfas408_host_reset(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); unsigned long flags; @@ -607,7 +607,7 @@ module_exit(qlogicfas408_exit); EXPORT_SYMBOL(qlogicfas408_info); EXPORT_SYMBOL(qlogicfas408_queuecommand); EXPORT_SYMBOL(qlogicfas408_abort); -EXPORT_SYMBOL(qlogicfas408_bus_reset); +EXPORT_SYMBOL(qlogicfas408_host_reset); EXPORT_SYMBOL(qlogicfas408_biosparam); EXPORT_SYMBOL(qlogicfas408_ihandl); EXPORT_SYMBOL(qlogicfas408_get_chip_type); diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h index 2f6c0a166200..f6b1216af79f 100644 --- a/drivers/scsi/qlogicfas408.h +++ b/drivers/scsi/qlogicfas408.h @@ -108,7 +108,7 @@ int qlogicfas408_biosparam(struct scsi_device * disk, struct block_device *dev, sector_t capacity, int ip[]); int qlogicfas408_abort(struct scsi_cmnd * cmd); -int qlogicfas408_bus_reset(struct scsi_cmnd * cmd); +extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd); const char *qlogicfas408_info(struct Scsi_Host *host); int qlogicfas408_get_chip_type(int qbase, int int_type); void qlogicfas408_setup(int qbase, int id, int int_type); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 69bfc0a1aea3..cec9a14982e6 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1283,7 +1283,7 @@ static struct scsi_host_template qpti_template = { .queuecommand = qlogicpti_queuecommand, .slave_configure = qlogicpti_slave_configure, .eh_abort_handler = qlogicpti_abort, - .eh_bus_reset_handler = qlogicpti_reset, + .eh_host_reset_handler = qlogicpti_reset, .can_queue = QLOGICPTI_REQ_QUEUE_LEN, .this_id = 7, .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1bf274e3b2b6..a7e4fba724b7 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -411,6 +411,57 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, } EXPORT_SYMBOL_GPL(scsi_get_vpd_page); +/** + * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device + * @sdev: The device to ask + * @page: Which Vital Product Data to return + * + * Returns %NULL upon failure. + */ +static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) +{ + struct scsi_vpd *vpd_buf; + int vpd_len = SCSI_VPD_PG_LEN, result; + +retry_pg: + vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); + if (!vpd_buf) + return NULL; + + result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); + if (result < 0) { + kfree(vpd_buf); + return NULL; + } + if (result > vpd_len) { + vpd_len = result; + kfree(vpd_buf); + goto retry_pg; + } + + vpd_buf->len = result; + + return vpd_buf; +} + +static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, + struct scsi_vpd __rcu **sdev_vpd_buf) +{ + struct scsi_vpd *vpd_buf; + + vpd_buf = scsi_get_vpd_buf(sdev, page); + if (!vpd_buf) + return; + + mutex_lock(&sdev->inquiry_mutex); + rcu_swap_protected(*sdev_vpd_buf, vpd_buf, + lockdep_is_held(&sdev->inquiry_mutex)); + mutex_unlock(&sdev->inquiry_mutex); + + if (vpd_buf) + kfree_rcu(vpd_buf, rcu); +} + /** * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure * @sdev: The device to ask @@ -422,95 +473,24 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page); */ void scsi_attach_vpd(struct scsi_device *sdev) { - int result, i; - int vpd_len = SCSI_VPD_PG_LEN; - int pg80_supported = 0; - int pg83_supported = 0; - unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; + int i; + struct scsi_vpd *vpd_buf; if (!scsi_device_supports_vpd(sdev)) return; -retry_pg0: - vpd_buf = kmalloc(vpd_len, GFP_KERNEL); + /* Ask for all the pages supported by this device */ + vpd_buf = scsi_get_vpd_buf(sdev, 0); if (!vpd_buf) return; - /* Ask for all the pages supported by this device */ - result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len); - if (result < 0) { - kfree(vpd_buf); - return; - } - if (result > vpd_len) { - vpd_len = result; - kfree(vpd_buf); - goto retry_pg0; - } - - for (i = 4; i < result; i++) { - if (vpd_buf[i] == 0x80) - pg80_supported = 1; - if (vpd_buf[i] == 0x83) - pg83_supported = 1; + for (i = 4; i < vpd_buf->len; i++) { + if (vpd_buf->data[i] == 0x80) + scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); + if (vpd_buf->data[i] == 0x83) + scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); } kfree(vpd_buf); - vpd_len = SCSI_VPD_PG_LEN; - - if (pg80_supported) { -retry_pg80: - vpd_buf = kmalloc(vpd_len, GFP_KERNEL); - if (!vpd_buf) - return; - - result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len); - if (result < 0) { - kfree(vpd_buf); - return; - } - if (result > vpd_len) { - vpd_len = result; - kfree(vpd_buf); - goto retry_pg80; - } - mutex_lock(&sdev->inquiry_mutex); - orig_vpd_buf = sdev->vpd_pg80; - sdev->vpd_pg80_len = result; - rcu_assign_pointer(sdev->vpd_pg80, vpd_buf); - mutex_unlock(&sdev->inquiry_mutex); - synchronize_rcu(); - if (orig_vpd_buf) { - kfree(orig_vpd_buf); - orig_vpd_buf = NULL; - } - vpd_len = SCSI_VPD_PG_LEN; - } - - if (pg83_supported) { -retry_pg83: - vpd_buf = kmalloc(vpd_len, GFP_KERNEL); - if (!vpd_buf) - return; - - result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len); - if (result < 0) { - kfree(vpd_buf); - return; - } - if (result > vpd_len) { - vpd_len = result; - kfree(vpd_buf); - goto retry_pg83; - } - mutex_lock(&sdev->inquiry_mutex); - orig_vpd_buf = sdev->vpd_pg83; - sdev->vpd_pg83_len = result; - rcu_assign_pointer(sdev->vpd_pg83, vpd_buf); - mutex_unlock(&sdev->inquiry_mutex); - synchronize_rcu(); - if (orig_vpd_buf) - kfree(orig_vpd_buf); - } } /** diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 3be980d47268..09ba494f8896 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2261,7 +2261,7 @@ static int resp_ie_l_pg(unsigned char * arr) static int resp_log_sense(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { - int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n; + int ppc, sp, pcode, subpcode, alloc_len, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = scp->cmnd; @@ -2272,7 +2272,6 @@ static int resp_log_sense(struct scsi_cmnd * scp, mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0); return check_condition_result; } - pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; subpcode = cmd[3] & 0xff; alloc_len = get_unaligned_be16(cmd + 7); @@ -5466,7 +5465,7 @@ static int sdebug_driver_probe(struct device * dev) return error; } if (submit_queues > nr_cpu_ids) { - pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n", + pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n", my_name, submit_queues, nr_cpu_ids); submit_queues = nr_cpu_ids; } diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index a97c9507103d..5e9755008aed 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -6,8 +6,10 @@ void scsi_show_rq(struct seq_file *m, struct request *rq) { struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); + int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); char buf[80]; __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); - seq_printf(m, ", .cmd=%s", buf); + seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, + cmd->retries, msecs / 1000, msecs % 1000); } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ea9f40e51f68..dab876c65473 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -259,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) */ enum blk_eh_timer_return scsi_times_out(struct request *req) { - struct scsi_cmnd *scmd = req->special; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; struct Scsi_Host *host = scmd->device->host; @@ -552,6 +552,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_ALLOC_FAILURE); return SUCCESS; } + /* FALLTHROUGH */ case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: @@ -573,12 +574,14 @@ int scsi_check_sense(struct scsi_cmnd *scmd) return ADD_TO_MLQUEUE; else set_host_byte(scmd, DID_TARGET_FAILURE); + /* FALLTHROUGH */ case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ sshdr.asc == 0x21 || /* Logical block address out of range */ sshdr.asc == 0x24 || /* Invalid field in cdb */ - sshdr.asc == 0x26) { /* Parameter value invalid */ + sshdr.asc == 0x26 || /* Parameter value invalid */ + sshdr.asc == 0x27) { /* Write protected */ set_host_byte(scmd, DID_TARGET_FAILURE); } return SUCCESS; @@ -683,6 +686,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) switch (status_byte(scmd->result)) { case GOOD: scsi_handle_queue_ramp_up(scmd->device); + /* FALLTHROUGH */ case COMMAND_TERMINATED: return SUCCESS; case CHECK_CONDITION: @@ -1734,6 +1738,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } + /* FALLTHROUGH */ case DID_NO_CONNECT: case DID_BAD_TARGET: /* @@ -1819,6 +1824,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * the case of trying to send too many commands to a * tagged queueing device. */ + /* FALLTHROUGH */ case BUSY: /* * device can't talk to us at the moment. Should only @@ -1831,6 +1837,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) if (scmd->cmnd[0] == REPORT_LUNS) scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); + /* FALLTHROUGH */ case COMMAND_TERMINATED: return SUCCESS; case TASK_ABORTED: @@ -2320,8 +2327,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) rtn = scsi_try_host_reset(scmd); if (rtn == SUCCESS) break; - default: /* FALLTHROUGH */ + default: rtn = FAILED; break; } diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index b6bf3f29a12a..0a875491f5a7 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -116,13 +116,15 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, case NOT_READY: /* This happens if there is no disc in drive */ if (sdev->removable) break; + /* FALLTHROUGH */ case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; result = 0; /* This is no longer considered an error */ break; } - default: /* Fall through for non-removable media */ + /* FALLTHROUGH -- for non-removable media */ + default: sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", result); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f6097b89d5d3..9cf6a80fe297 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -44,6 +44,8 @@ static struct kmem_cache *scsi_sense_cache; static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); +static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); + static inline struct kmem_cache * scsi_select_sense_cache(bool unchecked_isa_dma) { @@ -140,6 +142,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; + if (cmd->request->rq_flags & RQF_DONTPREP) { + cmd->request->rq_flags &= ~RQF_DONTPREP; + scsi_mq_uninit_cmd(cmd); + } else { + WARN_ON_ONCE(true); + } blk_mq_requeue_request(cmd->request, true); put_device(&sdev->sdev_gendev); } @@ -627,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes, unsigned int bidi_bytes) { - struct scsi_cmnd *cmd = req->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; @@ -642,6 +650,11 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_queue_add_random(q)) add_disk_randomness(req->rq_disk); + if (!blk_rq_is_scsi(req)) { + WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); + cmd->flags &= ~SCMD_INITIALIZED; + } + if (req->mq_ctx) { /* * In the MQ case the command gets freed by __blk_mq_end_request, @@ -977,8 +990,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * A new command will be prepared and issued. */ if (q->mq_ops) { - cmd->request->rq_flags &= ~RQF_DONTPREP; - scsi_mq_uninit_cmd(cmd); scsi_mq_requeue_cmd(cmd); } else { scsi_release_buffers(cmd); @@ -1107,15 +1118,23 @@ int scsi_init_io(struct scsi_cmnd *cmd) EXPORT_SYMBOL(scsi_init_io); /** - * scsi_initialize_rq - initialize struct scsi_cmnd.req + * scsi_initialize_rq - initialize struct scsi_cmnd partially + * @rq: Request associated with the SCSI command to be initialized. * - * Called from inside blk_get_request(). + * This function initializes the members of struct scsi_cmnd that must be + * initialized before request processing starts and that won't be + * reinitialized if a SCSI command is requeued. + * + * Called from inside blk_get_request() for pass-through requests and from + * inside scsi_init_command() for filesystem requests. */ void scsi_initialize_rq(struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); scsi_req_init(&cmd->req); + cmd->jiffies_at_alloc = jiffies; + cmd->retries = 0; } EXPORT_SYMBOL(scsi_initialize_rq); @@ -1153,8 +1172,18 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; - unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; + struct request *rq = blk_mq_rq_from_pdu(cmd); + unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; + unsigned long jiffies_at_alloc; + int retries; + if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { + flags |= SCMD_INITIALIZED; + scsi_initialize_rq(rq); + } + + jiffies_at_alloc = cmd->jiffies_at_alloc; + retries = cmd->retries; /* zero out the cmd, except for the embedded scsi_request */ memset((char *)cmd + sizeof(cmd->req), 0, sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size); @@ -1162,16 +1191,17 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->device = dev; cmd->sense_buffer = buf; cmd->prot_sdb = prot; - cmd->flags = unchecked_isa_dma; + cmd->flags = flags; INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); - cmd->jiffies_at_alloc = jiffies; + cmd->jiffies_at_alloc = jiffies_at_alloc; + cmd->retries = retries; scsi_add_cmd_to_list(cmd); } static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd = req->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); /* * Passthrough requests may transfer data, in which case they must @@ -1202,7 +1232,7 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) */ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd = req->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); if (unlikely(sdev->handler && sdev->handler->prep_fn)) { int ret = sdev->handler->prep_fn(sdev, req); @@ -1217,7 +1247,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd = req->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; @@ -1349,12 +1379,14 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) ret = scsi_setup_cmnd(sdev, req); out: + if (ret != BLKPREP_OK) + cmd->flags &= ~SCMD_INITIALIZED; return scsi_prep_return(q, req, ret); } static void scsi_unprep_fn(struct request_queue *q, struct request *req) { - scsi_uninit_cmd(req->special); + scsi_uninit_cmd(blk_mq_rq_to_pdu(req)); } /* @@ -1545,7 +1577,7 @@ static int scsi_lld_busy(struct request_queue *q) */ static void scsi_kill_request(struct request *req, struct request_queue *q) { - struct scsi_cmnd *cmd = req->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev; struct scsi_target *starget; struct Scsi_Host *shost; @@ -1576,7 +1608,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) static void scsi_softirq_done(struct request *rq) { - struct scsi_cmnd *cmd = rq->special; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; int disposition; @@ -1764,8 +1796,8 @@ static void scsi_request_fn(struct request_queue *q) blk_start_request(req); spin_unlock_irq(q->queue_lock); - cmd = req->special; - if (unlikely(cmd == NULL)) { + cmd = blk_mq_rq_to_pdu(req); + if (cmd != req->special) { printk(KERN_CRIT "impossible request in %s.\n" "please mail a stack trace to " "linux-scsi@vger.kernel.org\n", @@ -1868,6 +1900,7 @@ static int scsi_mq_prep_fn(struct request *req) struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; struct scatterlist *sg; + int ret; scsi_init_command(sdev, cmd); @@ -1901,7 +1934,10 @@ static int scsi_mq_prep_fn(struct request *req) blk_mq_start_request(req); - return scsi_setup_cmnd(sdev, req); + ret = scsi_setup_cmnd(sdev, req); + if (ret != BLK_STS_OK) + cmd->flags &= ~SCMD_INITIALIZED; + return ret; } static void scsi_mq_done(struct scsi_cmnd *cmd) @@ -2001,8 +2037,8 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req, return scsi_times_out(req); } -static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, - unsigned int hctx_idx, unsigned int numa_node) +static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; const bool unchecked_isa_dma = shost->unchecked_isa_dma; @@ -2026,8 +2062,8 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, return 0; } -static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq, - unsigned int hctx_idx) +static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); @@ -2104,7 +2140,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) } EXPORT_SYMBOL_GPL(__scsi_init_queue); -static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) +static int scsi_old_init_rq(struct request_queue *q, struct request *rq, + gfp_t gfp) { struct Scsi_Host *shost = q->rq_alloc_data; const bool unchecked_isa_dma = shost->unchecked_isa_dma; @@ -2134,7 +2171,7 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) return -ENOMEM; } -static void scsi_exit_rq(struct request_queue *q, struct request *rq) +static void scsi_old_exit_rq(struct request_queue *q, struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); @@ -2144,7 +2181,7 @@ static void scsi_exit_rq(struct request_queue *q, struct request *rq) cmd->sense_buffer); } -struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) +struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct request_queue *q; @@ -2155,8 +2192,8 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; q->rq_alloc_data = shost; q->request_fn = scsi_request_fn; - q->init_rq_fn = scsi_init_rq; - q->exit_rq_fn = scsi_exit_rq; + q->init_rq_fn = scsi_old_init_rq; + q->exit_rq_fn = scsi_old_exit_rq; q->initialize_rq_fn = scsi_initialize_rq; if (blk_init_allocated_queue(q) < 0) { @@ -2180,8 +2217,8 @@ static const struct blk_mq_ops scsi_mq_ops = { #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, #endif - .init_request = scsi_init_request, - .exit_request = scsi_exit_request, + .init_request = scsi_mq_init_request, + .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, .map_queues = scsi_map_queues, }; @@ -2547,7 +2584,7 @@ EXPORT_SYMBOL(scsi_test_unit_ready); * @sdev: scsi device to change the state of. * @state: state to change to. * - * Returns zero if unsuccessful or an error if the requested + * Returns zero if successful or an error if the requested * transition is illegal. */ int @@ -2654,6 +2691,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) } sdev->sdev_state = state; + sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); return 0; illegal: @@ -3073,19 +3111,26 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, * Try to transition the scsi device to SDEV_RUNNING or one of the * offlined states and goose the device queue if successful. */ - if ((sdev->sdev_state == SDEV_BLOCK) || - (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) + switch (sdev->sdev_state) { + case SDEV_BLOCK: + case SDEV_TRANSPORT_OFFLINE: sdev->sdev_state = new_state; - else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { + sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); + break; + case SDEV_CREATED_BLOCK: if (new_state == SDEV_TRANSPORT_OFFLINE || new_state == SDEV_OFFLINE) sdev->sdev_state = new_state; else sdev->sdev_state = SDEV_CREATED; - } else if (sdev->sdev_state != SDEV_CANCEL && - sdev->sdev_state != SDEV_OFFLINE) + sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state"); + break; + case SDEV_CANCEL: + case SDEV_OFFLINE: + break; + default: return -EINVAL; - + } scsi_start_queue(sdev); return 0; @@ -3262,8 +3307,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) { u8 cur_id_type = 0xff; u8 cur_id_size = 0; - unsigned char *d, *cur_id_str; - unsigned char __rcu *vpd_pg83; + const unsigned char *d, *cur_id_str; + const struct scsi_vpd *vpd_pg83; int id_size = -EINVAL; rcu_read_lock(); @@ -3294,8 +3339,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) } memset(id, 0, id_len); - d = vpd_pg83 + 4; - while (d < vpd_pg83 + sdev->vpd_pg83_len) { + d = vpd_pg83->data + 4; + while (d < vpd_pg83->data + vpd_pg83->len) { /* Skip designators not referring to the LUN */ if ((d[1] & 0x30) != 0x00) goto next_desig; @@ -3411,8 +3456,8 @@ EXPORT_SYMBOL(scsi_vpd_lun_id); */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { - unsigned char *d; - unsigned char __rcu *vpd_pg83; + const unsigned char *d; + const struct scsi_vpd *vpd_pg83; int group_id = -EAGAIN, rel_port = -1; rcu_read_lock(); @@ -3422,8 +3467,8 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) return -ENXIO; } - d = sdev->vpd_pg83 + 4; - while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) { + d = vpd_pg83->data + 4; + while (d < vpd_pg83->data + vpd_pg83->len) { switch (d[1] & 0xf) { case 0x4: /* Relative target port */ diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index c11c1f9c912c..5c6d016a5ae9 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -88,7 +88,7 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_run_host_queues(struct Scsi_Host *shost); extern void scsi_requeue_run_queue(struct work_struct *work); -extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); +extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev); extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); extern void scsi_start_queue(struct scsi_device *sdev); extern int scsi_mq_setup_tags(struct Scsi_Host *shost); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fd88dabd599d..15590a063ad9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -268,7 +268,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, if (shost_use_blk_mq(shost)) sdev->request_queue = scsi_mq_alloc_queue(sdev); else - sdev->request_queue = scsi_alloc_queue(sdev); + sdev->request_queue = scsi_old_alloc_queue(sdev); if (!sdev->request_queue) { /* release fn is set up in scsi_sysfs_device_initialise, so * have to free and put manually here */ @@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (*bflags & BLIST_NO_DIF) sdev->no_dif = 1; + if (*bflags & BLIST_UNMAP_LIMIT_WS) + sdev->unmap_limit_for_ws = 1; + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; if (*bflags & BLIST_TRY_VPD_PAGES) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d6984df71f1c..bf53356f41f0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -114,7 +114,7 @@ static int check_set(unsigned long long *val, char *src) { char *last; - if (strncmp(src, "-", 20) == 0) { + if (strcmp(src, "-") == 0) { *val = SCAN_WILD_CARD; } else { /* @@ -303,6 +303,8 @@ store_host_reset(struct device *dev, struct device_attribute *attr, if (sht->host_reset) ret = sht->host_reset(shost, type); + else + ret = -EOPNOTSUPP; exit_store_host_reset: if (ret == 0) @@ -426,6 +428,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) struct scsi_device *sdev; struct device *parent; struct list_head *this, *tmp; + struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; unsigned long flags; sdev = container_of(work, struct scsi_device, ew.work); @@ -454,8 +457,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) /* NULL queue means the device can't be used */ sdev->request_queue = NULL; - kfree(sdev->vpd_pg83); - kfree(sdev->vpd_pg80); + mutex_lock(&sdev->inquiry_mutex); + rcu_swap_protected(sdev->vpd_pg80, vpd_pg80, + lockdep_is_held(&sdev->inquiry_mutex)); + rcu_swap_protected(sdev->vpd_pg83, vpd_pg83, + lockdep_is_held(&sdev->inquiry_mutex)); + mutex_unlock(&sdev->inquiry_mutex); + + if (vpd_pg83) + kfree_rcu(vpd_pg83, rcu); + if (vpd_pg80) + kfree_rcu(vpd_pg80, rcu); kfree(sdev->inquiry); kfree(sdev); @@ -793,15 +805,16 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ { \ struct device *dev = container_of(kobj, struct device, kobj); \ struct scsi_device *sdev = to_scsi_device(dev); \ - int ret; \ - if (!sdev->vpd_##_page) \ - return -EINVAL; \ + struct scsi_vpd *vpd_page; \ + int ret = -EINVAL; \ + \ rcu_read_lock(); \ - ret = memory_read_from_buffer(buf, count, &off, \ - rcu_dereference(sdev->vpd_##_page), \ - sdev->vpd_##_page##_len); \ + vpd_page = rcu_dereference(sdev->vpd_##_page); \ + if (vpd_page) \ + ret = memory_read_from_buffer(buf, count, &off, \ + vpd_page->data, vpd_page->len); \ rcu_read_unlock(); \ - return ret; \ + return ret; \ } \ static struct bin_attribute dev_attr_vpd_##_page = { \ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 892fbd9800d9..cbd4495d0ff9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, list_for_each_entry(rport, &fc_host->rports, peers) { - if ((rport->port_state == FC_PORTSTATE_BLOCKED) && + if ((rport->port_state == FC_PORTSTATE_BLOCKED || + rport->port_state == FC_PORTSTATE_NOTPRESENT) && (rport->channel == channel)) { switch (fc_host->tgtid_bind_type) { @@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; - rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; @@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, fci->f->dd_fcrport_size); spin_unlock_irqrestore(shost->host_lock, flags); - if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { - scsi_target_unblock(&rport->dev, SDEV_RUNNING); - - /* initiate a scan of the target */ - spin_lock_irqsave(shost->host_lock, flags); - rport->flags |= FC_RPORT_SCAN_PENDING; - scsi_queue_work(shost, &rport->scan_work); - spin_unlock_irqrestore(shost->host_lock, flags); - } + fc_remote_port_rolechg(rport, ids->roles); return rport; } } @@ -3276,8 +3268,8 @@ fc_scsi_scan_rport(struct work_struct *work) } /** - * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport - * @cmnd: SCSI command that scsi_eh is trying to recover + * fc_block_rport() - Block SCSI eh thread for blocked fc_rport. + * @rport: Remote port that scsi_eh is trying to recover. * * This routine can be called from a FC LLD scsi_eh callback. It * blocks the scsi_eh thread until the fc_rport leaves the @@ -3289,10 +3281,9 @@ fc_scsi_scan_rport(struct work_struct *work) * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be * passed back to scsi_eh. */ -int fc_block_scsi_eh(struct scsi_cmnd *cmnd) +int fc_block_rport(struct fc_rport *rport) { - struct Scsi_Host *shost = cmnd->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct Scsi_Host *shost = rport_to_shost(rport); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); @@ -3309,6 +3300,28 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd) return 0; } +EXPORT_SYMBOL(fc_block_rport); + +/** + * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport + * @cmnd: SCSI command that scsi_eh is trying to recover + * + * This routine can be called from a FC LLD scsi_eh callback. It + * blocks the scsi_eh thread until the fc_rport leaves the + * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is + * necessary to avoid the scsi_eh failing recovery actions for blocked + * rports which would lead to offlined SCSI devices. + * + * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. + * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be + * passed back to scsi_eh. + */ +int fc_block_scsi_eh(struct scsi_cmnd *cmnd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + return fc_block_rport(rport); +} EXPORT_SYMBOL(fc_block_scsi_eh); /** @@ -3550,7 +3563,7 @@ fc_vport_sched_delete(struct work_struct *work) static enum blk_eh_timer_return fc_bsg_job_timeout(struct request *req) { - struct bsg_job *job = (void *) req->special; + struct bsg_job *job = blk_mq_rq_to_pdu(req); struct Scsi_Host *shost = fc_bsg_to_shost(job); struct fc_rport *rport = fc_bsg_to_rport(job); struct fc_internal *i = to_fc_internal(shost->transportt); @@ -3763,7 +3776,8 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); - q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size); + q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size, + NULL); if (IS_ERR(q)) { dev_err(dev, "fc_host%d: bsg interface failed to initialize - setup queue\n", @@ -3808,7 +3822,8 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) if (!i->f->bsg_request) return -ENOTSUPP; - q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size); + q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size, + NULL); if (IS_ERR(q)) { dev_err(dev, "failed to setup bsg queue\n"); return PTR_ERR(q); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index a424eaeafeb0..7404d26895f5 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev) kfree(fnode_sess); } -static struct device_type iscsi_flashnode_sess_dev_type = { +static const struct device_type iscsi_flashnode_sess_dev_type = { .name = "iscsi_flashnode_sess_dev_type", .groups = iscsi_flashnode_sess_attr_groups, .release = iscsi_flashnode_sess_release, @@ -1195,7 +1195,7 @@ static void iscsi_flashnode_conn_release(struct device *dev) kfree(fnode_conn); } -static struct device_type iscsi_flashnode_conn_dev_type = { +static const struct device_type iscsi_flashnode_conn_dev_type = { .name = "iscsi_flashnode_conn_dev_type", .groups = iscsi_flashnode_conn_attr_groups, .release = iscsi_flashnode_conn_release, @@ -1542,7 +1542,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); - q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0); + q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0, NULL); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); @@ -2210,22 +2210,6 @@ void iscsi_free_session(struct iscsi_cls_session *session) } EXPORT_SYMBOL_GPL(iscsi_free_session); -/** - * iscsi_destroy_session - destroy iscsi session - * @session: iscsi_session - * - * Can be called by a LLD or iscsi_transport. There must not be - * any running connections. - */ -int iscsi_destroy_session(struct iscsi_cls_session *session) -{ - iscsi_remove_session(session); - ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n"); - iscsi_free_session(session); - return 0; -} -EXPORT_SYMBOL_GPL(iscsi_destroy_session); - /** * iscsi_create_conn - create iscsi class connection * @session: iscsi cls session @@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t group; nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 5006a656e16a..319dff970237 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -169,39 +169,22 @@ static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev) return rdev; } -static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, - struct sas_rphy *rphy) +static int sas_smp_dispatch(struct bsg_job *job) { - struct request *req; - blk_status_t ret; - int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); + struct Scsi_Host *shost = dev_to_shost(job->dev); + struct sas_rphy *rphy = NULL; - while ((req = blk_fetch_request(q)) != NULL) { - spin_unlock_irq(q->queue_lock); + if (!scsi_is_host_device(job->dev)) + rphy = dev_to_rphy(job->dev); - scsi_req(req)->resid_len = blk_rq_bytes(req); - if (req->next_rq) - scsi_req(req->next_rq)->resid_len = - blk_rq_bytes(req->next_rq); - handler = to_sas_internal(shost->transportt)->f->smp_handler; - ret = handler(shost, rphy, req); - scsi_req(req)->result = ret; - - blk_end_request_all(req, 0); - - spin_lock_irq(q->queue_lock); + if (!job->req->next_rq) { + dev_warn(job->dev, "space for a smp response is missing\n"); + bsg_job_done(job, -EINVAL, 0); + return 0; } -} -static void sas_host_smp_request(struct request_queue *q) -{ - sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL); -} - -static void sas_non_host_smp_request(struct request_queue *q) -{ - struct sas_rphy *rphy = q->queuedata; - sas_smp_request(q, rphy_to_shost(rphy), rphy); + to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy); + return 0; } static void sas_host_release(struct device *dev) @@ -217,81 +200,36 @@ static void sas_host_release(struct device *dev) static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) { struct request_queue *q; - int error; - struct device *dev; - char namebuf[20]; - const char *name; - void (*release)(struct device *); if (!to_sas_internal(shost->transportt)->f->smp_handler) { printk("%s can't handle SMP requests\n", shost->hostt->name); return 0; } - q = blk_alloc_queue(GFP_KERNEL); - if (!q) - return -ENOMEM; - q->initialize_rq_fn = scsi_initialize_rq; - q->cmd_size = sizeof(struct scsi_request); - if (rphy) { - q->request_fn = sas_non_host_smp_request; - dev = &rphy->dev; - name = dev_name(dev); - release = NULL; + q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), + sas_smp_dispatch, 0, NULL); + if (IS_ERR(q)) + return PTR_ERR(q); + rphy->q = q; } else { - q->request_fn = sas_host_smp_request; - dev = &shost->shost_gendev; - snprintf(namebuf, sizeof(namebuf), - "sas_host%d", shost->host_no); - name = namebuf; - release = sas_host_release; + char name[20]; + + snprintf(name, sizeof(name), "sas_host%d", shost->host_no); + q = bsg_setup_queue(&shost->shost_gendev, name, + sas_smp_dispatch, 0, sas_host_release); + if (IS_ERR(q)) + return PTR_ERR(q); + to_sas_host_attrs(shost)->q = q; } - error = blk_init_allocated_queue(q); - if (error) - goto out_cleanup_queue; /* * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - - error = bsg_register_queue(q, dev, name, release); - if (error) - goto out_cleanup_queue; - - if (rphy) - rphy->q = q; - else - to_sas_host_attrs(shost)->q = q; - - if (rphy) - q->queuedata = rphy; - else - q->queuedata = shost; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); return 0; - -out_cleanup_queue: - blk_cleanup_queue(q); - return error; -} - -static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy) -{ - struct request_queue *q; - - if (rphy) - q = rphy->q; - else - q = to_sas_host_attrs(shost)->q; - - if (!q) - return; - - bsg_unregister_queue(q); } /* @@ -321,9 +259,10 @@ static int sas_host_remove(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); + struct request_queue *q = to_sas_host_attrs(shost)->q; - sas_bsg_remove(shost, NULL); - + if (q) + bsg_unregister_queue(q); return 0; } @@ -421,6 +360,9 @@ sas_tlr_supported(struct scsi_device *sdev) char *buffer = kzalloc(vpd_len, GFP_KERNEL); int ret = 0; + if (!buffer) + goto out; + if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len)) goto out; @@ -1710,7 +1652,8 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); - sas_bsg_remove(NULL, rphy); + if (rphy->q) + bsg_unregister_queue(rphy->q); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f617021c94f7..4f6f01cf9968 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -78,7 +78,7 @@ static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) * parameters must be such that multipath can detect failed paths timely. * Hence do not allow all three parameters to be disabled simultaneously. */ -int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo) { if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) return -EINVAL; @@ -556,8 +556,11 @@ int srp_reconnect_rport(struct srp_rport *rport) */ shost_for_each_device(sdev, shost) { mutex_lock(&sdev->state_mutex); - if (sdev->sdev_state == SDEV_OFFLINE) + if (sdev->sdev_state == SDEV_OFFLINE) { sdev->sdev_state = SDEV_RUNNING; + sysfs_notify(&sdev->sdev_gendev.kobj, + NULL, "state"); + } mutex_unlock(&sdev->state_mutex); } } else if (rport->state == SRP_RPORT_RUNNING) { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e2647f2d4430..d175c5c5ccf8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -312,7 +312,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr, if (err) return err; - if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION) + if (val <= T10_PI_TYPE3_PROTECTION) sdkp->protection_type = val; return count; @@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) break; case SD_LBP_WS16: - max_blocks = min_not_zero(sdkp->max_ws_blocks, - (u32)SD_MAX_WS16_BLOCKS); + if (sdkp->device->unmap_limit_for_ws) + max_blocks = sdkp->max_unmap_blocks; + else + max_blocks = sdkp->max_ws_blocks; + + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS10: - max_blocks = min_not_zero(sdkp->max_ws_blocks, - (u32)SD_MAX_WS10_BLOCKS); + if (sdkp->device->unmap_limit_for_ws) + max_blocks = sdkp->max_unmap_blocks; + else + max_blocks = sdkp->max_ws_blocks; + + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); break; case SD_LBP_ZERO: @@ -1013,7 +1021,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) ret = scsi_init_io(SCpnt); if (ret != BLKPREP_OK) goto out; - SCpnt = rq->special; + WARN_ON_ONCE(SCpnt != rq->special); /* from here on until we're complete, any goto out * is used for a killable error condition */ @@ -2915,8 +2923,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); - else if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); else sd_config_discard(sdkp, SD_LBP_DISABLE); } @@ -3101,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_security(sdkp, buffer); } - sdkp->first_scan = 0; - /* * We now have all cache related info, determine how we deal * with flush requests. @@ -3117,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk) q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); /* - * Use the device's preferred I/O size for reads and writes + * Determine the device's preferred I/O size for reads and writes * unless the reported value is unreasonably small, large, or * garbage. */ @@ -3131,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk) rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), (sector_t)BLK_DEF_MAX_SECTORS); - /* Combine with controller limits */ - q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); + /* Do not exceed controller limit */ + rw_max = min(rw_max, queue_max_hw_sectors(q)); + + /* + * Only update max_sectors if previously unset or if the current value + * exceeds the capabilities of the hardware. + */ + if (sdkp->first_scan || + q->limits.max_sectors > q->limits.max_dev_sectors || + q->limits.max_sectors > q->limits.max_hw_sectors) + q->limits.max_sectors = rw_max; + + sdkp->first_scan = 0; set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); sd_config_write_same(sdkp); @@ -3226,7 +3241,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) gd->major = sd_major((index & 0xf0) >> 4); gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); - gd->minors = SD_MINORS; gd->fops = &sd_fops; gd->private_data = &sdkp->driver; @@ -3540,7 +3554,7 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) * doesn't support sync. There's not much to do and * suspend shouldn't fail. */ - ret = 0; + ret = 0; } } diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 8927f9f54ad9..11826c5c2dd4 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -51,6 +51,13 @@ struct ses_component { u64 addr; }; +static bool ses_page2_supported(struct enclosure_device *edev) +{ + struct ses_device *ses_dev = edev->scratch; + + return (ses_dev->page2 != NULL); +} + static int ses_probe(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); @@ -179,7 +186,8 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, unsigned char *type_ptr = ses_dev->page1_types; unsigned char *desc_ptr = ses_dev->page2 + 8; - ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); + if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0) + return NULL; for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { @@ -203,6 +211,10 @@ static void ses_get_fault(struct enclosure_device *edev, { unsigned char *desc; + if (!ses_page2_supported(edev)) { + ecomp->fault = 0; + return; + } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->fault = (desc[3] & 0x60) >> 4; @@ -215,6 +227,9 @@ static int ses_set_fault(struct enclosure_device *edev, unsigned char desc[4]; unsigned char *desc_ptr; + if (!ses_page2_supported(edev)) + return -EINVAL; + desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) @@ -242,6 +257,10 @@ static void ses_get_status(struct enclosure_device *edev, { unsigned char *desc; + if (!ses_page2_supported(edev)) { + ecomp->status = 0; + return; + } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->status = (desc[0] & 0x0f); @@ -252,6 +271,10 @@ static void ses_get_locate(struct enclosure_device *edev, { unsigned char *desc; + if (!ses_page2_supported(edev)) { + ecomp->locate = 0; + return; + } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->locate = (desc[2] & 0x02) ? 1 : 0; @@ -264,6 +287,9 @@ static int ses_set_locate(struct enclosure_device *edev, unsigned char desc[4]; unsigned char *desc_ptr; + if (!ses_page2_supported(edev)) + return -EINVAL; + desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) @@ -292,6 +318,9 @@ static int ses_set_active(struct enclosure_device *edev, unsigned char desc[4]; unsigned char *desc_ptr; + if (!ses_page2_supported(edev)) + return -EINVAL; + desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) @@ -328,6 +357,11 @@ static void ses_get_power_status(struct enclosure_device *edev, { unsigned char *desc; + if (!ses_page2_supported(edev)) { + ecomp->power_status = 0; + return; + } + desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; @@ -340,6 +374,9 @@ static int ses_set_power_status(struct enclosure_device *edev, unsigned char desc[4]; unsigned char *desc_ptr; + if (!ses_page2_supported(edev)) + return -EINVAL; + desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) @@ -601,7 +638,7 @@ static int ses_intf_add(struct device *cdev, { struct scsi_device *sdev = to_scsi_device(cdev->parent); struct scsi_device *tmp_sdev; - unsigned char *buf = NULL, *hdr_buf, *type_ptr; + unsigned char *buf = NULL, *hdr_buf, *type_ptr, page; struct ses_device *ses_dev; u32 result; int i, types, len, components = 0; @@ -630,7 +667,8 @@ static int ses_intf_add(struct device *cdev, if (!hdr_buf || !ses_dev) goto err_init_free; - result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE); + page = 1; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (result) goto recv_failed; @@ -639,7 +677,7 @@ static int ses_intf_add(struct device *cdev, if (!buf) goto err_free; - result = ses_recv_diag(sdev, 1, buf, len); + result = ses_recv_diag(sdev, page, buf, len); if (result) goto recv_failed; @@ -669,9 +707,10 @@ static int ses_intf_add(struct device *cdev, ses_dev->page1_len = len; buf = NULL; - result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); + page = 2; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (result) - goto recv_failed; + goto page2_not_supported; len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; buf = kzalloc(len, GFP_KERNEL); @@ -688,7 +727,8 @@ static int ses_intf_add(struct device *cdev, /* The additional information page --- allows us * to match up the devices */ - result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); + page = 10; + result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (!result) { len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; @@ -696,13 +736,14 @@ static int ses_intf_add(struct device *cdev, if (!buf) goto err_free; - result = ses_recv_diag(sdev, 10, buf, len); + result = ses_recv_diag(sdev, page, buf, len); if (result) goto recv_failed; ses_dev->page10 = buf; ses_dev->page10_len = len; buf = NULL; } +page2_not_supported: scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); if (!scomp) goto err_free; @@ -734,7 +775,7 @@ static int ses_intf_add(struct device *cdev, recv_failed: sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n", - result); + page); err = -ENODEV; err_free: kfree(buf); @@ -777,8 +818,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) if (!edev) return; - enclosure_unregister(edev); - ses_dev = edev->scratch; edev->scratch = NULL; @@ -790,6 +829,7 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(edev->component[0].scratch); put_device(&edev->edev); + enclosure_unregister(edev); } static void ses_intf_remove(struct device *cdev, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 84e782d8e7c3..0419c2298eab 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q) return max_sectors << 9; } +static void +sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) +{ + Sg_request *srp; + int val; + unsigned int ms; + + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if (val > SG_MAX_QUEUE) + break; + rinfo[val].req_state = srp->done + 1; + rinfo[val].problem = + srp->header.masked_status & + srp->header.host_status & + srp->header.driver_status; + if (srp->done) + rinfo[val].duration = + srp->header.duration; + else { + ms = jiffies_to_msecs(jiffies); + rinfo[val].duration = + (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; + } + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + val++; + } +} + static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { @@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -EFAULT; else { sg_req_info_t *rinfo; - unsigned int ms; - rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, - GFP_KERNEL); + rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, + GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); - val = 0; - list_for_each_entry(srp, &sfp->rq_list, entry) { - if (val >= SG_MAX_QUEUE) - break; - memset(&rinfo[val], 0, SZ_SG_REQ_INFO); - rinfo[val].req_state = srp->done + 1; - rinfo[val].problem = - srp->header.masked_status & - srp->header.host_status & - srp->header.driver_status; - if (srp->done) - rinfo[val].duration = - srp->header.duration; - else { - ms = jiffies_to_msecs(jiffies); - rinfo[val].duration = - (ms > srp->header.duration) ? - (ms - srp->header.duration) : 0; - } - rinfo[val].orphan = srp->orphan; - rinfo[val].sg_io_owned = srp->sg_io_owned; - rinfo[val].pack_id = srp->header.pack_id; - rinfo[val].usr_ptr = srp->header.usr_ptr; - val++; - } + sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); @@ -1081,8 +1089,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return blk_trace_setup(sdp->device->request_queue, sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), - NULL, - (char *)arg); + NULL, p); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: @@ -1233,6 +1240,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; + int ret = 0; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; @@ -1243,8 +1251,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; - if (req_sz > rsv_schp->bufflen) - return -ENOMEM; /* cannot map more than reserved buffer */ + mutex_lock(&sfp->f_mutex); + if (req_sz > rsv_schp->bufflen) { + ret = -ENOMEM; /* cannot map more than reserved buffer */ + goto out; + } sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); @@ -1258,7 +1269,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; - return 0; +out: + mutex_unlock(&sfp->f_mutex); + return ret; } static void @@ -1735,9 +1748,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) !sfp->res_in_use) { sfp->res_in_use = 1; sg_link_reserve(sfp, srp, dxfer_len); - } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { + } else if (hp->flags & SG_FLAG_MMAP_IO) { + res = -EBUSY; /* sfp->res_in_use == 1 */ + if (dxfer_len > rsv_schp->bufflen) + res = -ENOMEM; mutex_unlock(&sfp->f_mutex); - return -EBUSY; + return res; } else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) { diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 80cfa93e407c..5ed696dc9bbd 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -192,20 +192,6 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata) hcp->desc.pnext = hdata->dma; } -static int sgiwd93_bus_reset(struct scsi_cmnd *cmd) -{ - /* FIXME perform bus-specific reset */ - - /* FIXME 2: kill this function, and let midlayer fallback - to the same result, calling wd33c93_host_reset() */ - - spin_lock_irq(cmd->device->host->host_lock); - wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); - - return SUCCESS; -} - /* * Kludge alert - the SCSI code calls the abort and reset method with int * arguments not with pointers. So this is going to blow up beautyfully @@ -217,7 +203,6 @@ static struct scsi_host_template sgiwd93_template = { .name = "SGI WD93", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, - .eh_bus_reset_handler = sgiwd93_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = 16, .this_id = 7, diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e164ffade38a..dc3a0542a2e8 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -688,6 +688,28 @@ struct pqi_config_table_heartbeat { __le32 heartbeat_counter; }; +union pqi_reset_register { + struct { + u32 reset_type : 3; + u32 reserved : 2; + u32 reset_action : 3; + u32 hold_in_pd1 : 1; + u32 reserved2 : 23; + } bits; + u32 all_bits; +}; + +#define PQI_RESET_ACTION_RESET 0x1 + +#define PQI_RESET_TYPE_NO_RESET 0x0 +#define PQI_RESET_TYPE_SOFT_RESET 0x1 +#define PQI_RESET_TYPE_FIRM_RESET 0x2 +#define PQI_RESET_TYPE_HARD_RESET 0x3 + +#define PQI_RESET_ACTION_COMPLETED 0x2 + +#define PQI_RESET_POLL_INTERVAL_MSECS 100 + #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 #define PQI_MAX_TRANSFER_SIZE (1024U * 1024U) @@ -995,6 +1017,7 @@ struct pqi_ctrl_info { u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; + u8 pqi_reset_quiesce_supported : 1; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; @@ -1056,9 +1079,9 @@ enum pqi_ctrl_mode { #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 #define BMIC_WRITE_HOST_WELLNESS 0xa5 -#define BMIC_CACHE_FLUSH 0xc2 +#define BMIC_FLUSH_CACHE 0xc2 -#define SA_CACHE_FLUSH 0x1 +#define SA_FLUSH_CACHE 0x1 #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0) #define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f) @@ -1164,6 +1187,23 @@ struct bmic_identify_physical_device { u8 padding_to_multiple_of_512[9]; }; +struct bmic_flush_cache { + u8 disable_flag; + u8 system_power_action; + u8 ndu_flush; + u8 shutdown_event; + u8 reserved[28]; +}; + +/* for shutdown_event member of struct bmic_flush_cache */ +enum bmic_flush_cache_shutdown_event { + NONE_CACHE_FLUSH_ONLY = 0, + SHUTDOWN = 1, + HIBERNATE = 2, + SUSPEND = 3, + RESTART = 4 +}; + #pragma pack() int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index cb8f886e705c..83bdbd84eb01 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -40,11 +40,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "1.0.4-100" +#define DRIVER_VERSION "1.1.2-125" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_RELEASE 4 -#define DRIVER_REVISION 100 +#define DRIVER_MINOR 1 +#define DRIVER_RELEASE 2 +#define DRIVER_REVISION 125 #define DRIVER_NAME "Microsemi PQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -431,10 +431,10 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, cdb[1] = CISS_GET_RAID_MAP; put_unaligned_be32(buffer_length, &cdb[6]); break; - case SA_CACHE_FLUSH: + case SA_FLUSH_CACHE: request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; - cdb[6] = BMIC_CACHE_FLUSH; + cdb[6] = BMIC_FLUSH_CACHE; put_unaligned_be16(buffer_length, &cdb[7]); break; case BMIC_IDENTIFY_CONTROLLER: @@ -585,14 +585,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, return rc; } -#define SA_CACHE_FLUSH_BUFFER_LENGTH 4 - -static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info) +static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, + enum bmic_flush_cache_shutdown_event shutdown_event) { int rc; struct pqi_raid_path_request request; int pci_direction; - u8 *buffer; + struct bmic_flush_cache *flush_cache; /* * Don't bother trying to flush the cache if the controller is @@ -601,13 +600,15 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info) if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL); - if (!buffer) + flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); + if (!flush_cache) return -ENOMEM; + flush_cache->shutdown_event = shutdown_event; + rc = pqi_build_raid_path_request(ctrl_info, &request, - SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer, - SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction); + SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, + sizeof(*flush_cache), 0, &pci_direction); if (rc) goto out; @@ -618,7 +619,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info) pci_direction); out: - kfree(buffer); + kfree(flush_cache); return rc; } @@ -3007,11 +3008,9 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, break; case IRQ_MODE_INTX: pqi_configure_legacy_intx(ctrl_info, true); - sis_disable_msix(ctrl_info); sis_enable_intx(ctrl_info); break; case IRQ_MODE_NONE: - sis_disable_msix(ctrl_info); break; } break; @@ -3019,14 +3018,12 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, switch (new_mode) { case IRQ_MODE_MSIX: pqi_configure_legacy_intx(ctrl_info, false); - sis_disable_intx(ctrl_info); sis_enable_msix(ctrl_info); break; case IRQ_MODE_INTX: break; case IRQ_MODE_NONE: pqi_configure_legacy_intx(ctrl_info, false); - sis_disable_intx(ctrl_info); break; } break; @@ -5498,6 +5495,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) case XFER_NONE: case XFER_WRITE: case XFER_READ: + case XFER_READ | XFER_WRITE: break; default: return -EINVAL; @@ -5538,6 +5536,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) case XFER_READ: request.data_direction = SOP_READ_FLAG; break; + case XFER_READ | XFER_WRITE: + request.data_direction = SOP_BIDIRECTIONAL; + break; } request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; @@ -5889,28 +5890,62 @@ static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) scsi_host_put(shost); } -#define PQI_RESET_ACTION_RESET 0x1 +static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) +{ + int rc = 0; + struct pqi_device_registers __iomem *pqi_registers; + unsigned long timeout; + unsigned int timeout_msecs; + union pqi_reset_register reset_reg; -#define PQI_RESET_TYPE_NO_RESET 0x0 -#define PQI_RESET_TYPE_SOFT_RESET 0x1 -#define PQI_RESET_TYPE_FIRM_RESET 0x2 -#define PQI_RESET_TYPE_HARD_RESET 0x3 + pqi_registers = ctrl_info->pqi_registers; + timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; + timeout = msecs_to_jiffies(timeout_msecs) + jiffies; + + while (1) { + msleep(PQI_RESET_POLL_INTERVAL_MSECS); + reset_reg.all_bits = readl(&pqi_registers->device_reset); + if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) + break; + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + break; + } + if (time_after(jiffies, timeout)) { + rc = -ETIMEDOUT; + break; + } + } + + return rc; +} static int pqi_reset(struct pqi_ctrl_info *ctrl_info) { int rc; - u32 reset_params; + union pqi_reset_register reset_reg; - reset_params = (PQI_RESET_ACTION_RESET << 5) | - PQI_RESET_TYPE_HARD_RESET; + if (ctrl_info->pqi_reset_quiesce_supported) { + rc = sis_pqi_reset_quiesce(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "PQI reset failed during quiesce with error %d\n", + rc); + return rc; + } + } - writel(reset_params, - &ctrl_info->pqi_registers->device_reset); + reset_reg.all_bits = 0; + reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; + reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; - rc = pqi_wait_for_pqi_mode_ready(ctrl_info); + writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); + + rc = pqi_wait_for_pqi_reset_completion(ctrl_info); if (rc) dev_err(&ctrl_info->pci_dev->dev, - "PQI reset failed\n"); + "PQI reset failed with error %d\n", rc); return rc; } @@ -6007,7 +6042,12 @@ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) rc = pqi_reset(ctrl_info); if (rc) return rc; - sis_reenable_sis_mode(ctrl_info); + rc = sis_reenable_sis_mode(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "re-enabling SIS mode failed with error %d\n", rc); + return rc; + } pqi_save_ctrl_mode(ctrl_info, SIS_MODE); return 0; @@ -6659,7 +6699,8 @@ static void pqi_shutdown(struct pci_dev *pci_dev) * Write all data in the controller's battery-backed cache to * storage. */ - rc = pqi_flush_cache(ctrl_info); + rc = pqi_flush_cache(ctrl_info, SHUTDOWN); + pqi_reset(ctrl_info); if (rc == 0) return; @@ -6703,7 +6744,7 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat pqi_cancel_rescan_worker(ctrl_info); pqi_wait_until_scan_finished(ctrl_info); pqi_wait_until_lun_reset_finished(ctrl_info); - pqi_flush_cache(ctrl_info); + pqi_flush_cache(ctrl_info, SUSPEND); pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_wait_until_inbound_queues_empty(ctrl_info); @@ -6781,7 +6822,7 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_ADAPTEC2, 0x0605) + PCI_VENDOR_ID_ADAPTEC2, 0x0608) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, @@ -6811,6 +6852,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0806) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0807) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0900) @@ -6847,6 +6892,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0908) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x090a) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1200) @@ -6879,6 +6928,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1380) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_DELL, 0x1fe0) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x0600) @@ -6897,11 +6950,7 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0604) - }, - { - PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0606) + PCI_VENDOR_ID_HP, 0x0609) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, @@ -6927,14 +6976,6 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x0655) }, - { - PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0656) - }, - { - PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x0657) - }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x0700) @@ -6955,14 +6996,6 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x1101) }, - { - PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x1102) - }, - { - PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - PCI_VENDOR_ID_HP, 0x1150) - }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 0d89d3728b43..b209a35e482e 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -329,14 +329,6 @@ static int pqi_sas_phy_speed(struct sas_phy *phy, return -EINVAL; } -/* SMP = Serial Management Protocol */ - -static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, - struct request *req) -{ - return -EINVAL; -} - struct sas_function_template pqi_sas_transport_functions = { .get_linkerrors = pqi_sas_get_linkerrors, .get_enclosure_identifier = pqi_sas_get_enclosure_identifier, @@ -346,5 +338,4 @@ struct sas_function_template pqi_sas_transport_functions = { .phy_setup = pqi_sas_phy_setup, .phy_release = pqi_sas_phy_release, .set_phy_speed = pqi_sas_phy_speed, - .smp_handler = pqi_sas_smp_handler, }; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index e55dfcf200e5..5141bd4c9f06 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -34,11 +34,13 @@ #define SIS_REENABLE_SIS_MODE 0x1 #define SIS_ENABLE_MSIX 0x40 #define SIS_ENABLE_INTX 0x80 -#define SIS_SOFT_RESET 0x100 -#define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_CMD_READY 0x200 +#define SIS_TRIGGER_SHUTDOWN 0x800000 +#define SIS_PQI_RESET_QUIESCE 0x1000000 + #define SIS_CMD_COMPLETE 0x1000 #define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000 + #define SIS_CMD_STATUS_SUCCESS 0x1 #define SIS_CMD_COMPLETE_TIMEOUT_SECS 30 #define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS 10 @@ -47,6 +49,7 @@ #define SIS_EXTENDED_PROPERTIES_SUPPORTED 0x800000 #define SIS_SMARTARRAY_FEATURES_SUPPORTED 0x2 #define SIS_PQI_MODE_SUPPORTED 0x4 +#define SIS_PQI_RESET_QUIESCE_SUPPORTED 0x8 #define SIS_REQUIRED_EXTENDED_PROPERTIES \ (SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED) @@ -258,6 +261,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info) SIS_REQUIRED_EXTENDED_PROPERTIES) return -ENODEV; + if (extended_properties & SIS_PQI_RESET_QUIESCE_SUPPORTED) + ctrl_info->pqi_reset_quiesce_supported = true; + return 0; } @@ -336,9 +342,10 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info) #define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30 -static void sis_wait_for_doorbell_bit_to_clear( +static int sis_wait_for_doorbell_bit_to_clear( struct pqi_ctrl_info *ctrl_info, u32 bit) { + int rc = 0; u32 doorbell_register; unsigned long timeout; @@ -350,78 +357,38 @@ static void sis_wait_for_doorbell_bit_to_clear( if ((doorbell_register & bit) == 0) break; if (readl(&ctrl_info->registers->sis_firmware_status) & - SIS_CTRL_KERNEL_PANIC) + SIS_CTRL_KERNEL_PANIC) { + rc = -ENODEV; break; + } if (time_after(jiffies, timeout)) { dev_err(&ctrl_info->pci_dev->dev, "doorbell register bit 0x%x not cleared\n", bit); + rc = -ETIMEDOUT; break; } usleep_range(1000, 2000); } + + return rc; } -/* Enable MSI-X interrupts on the controller. */ +static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit) +{ + writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell); + + return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit); +} void sis_enable_msix(struct pqi_ctrl_info *ctrl_info) { - u32 doorbell_register; - - doorbell_register = - readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); - doorbell_register |= SIS_ENABLE_MSIX; - - writel(doorbell_register, - &ctrl_info->registers->sis_host_to_ctrl_doorbell); - - sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX); -} - -/* Disable MSI-X interrupts on the controller. */ - -void sis_disable_msix(struct pqi_ctrl_info *ctrl_info) -{ - u32 doorbell_register; - - doorbell_register = - readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); - doorbell_register &= ~SIS_ENABLE_MSIX; - - writel(doorbell_register, - &ctrl_info->registers->sis_host_to_ctrl_doorbell); + sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_MSIX); } void sis_enable_intx(struct pqi_ctrl_info *ctrl_info) { - u32 doorbell_register; - - doorbell_register = - readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); - doorbell_register |= SIS_ENABLE_INTX; - - writel(doorbell_register, - &ctrl_info->registers->sis_host_to_ctrl_doorbell); - - sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX); -} - -void sis_disable_intx(struct pqi_ctrl_info *ctrl_info) -{ - u32 doorbell_register; - - doorbell_register = - readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell); - doorbell_register &= ~SIS_ENABLE_INTX; - - writel(doorbell_register, - &ctrl_info->registers->sis_host_to_ctrl_doorbell); -} - -void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) -{ - writel(SIS_SOFT_RESET, - &ctrl_info->registers->sis_host_to_ctrl_doorbell); + sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX); } void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info) @@ -434,38 +401,14 @@ void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info) &ctrl_info->registers->sis_host_to_ctrl_doorbell); } -#define SIS_MODE_READY_TIMEOUT_SECS 30 +int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info) +{ + return sis_set_doorbell_bit(ctrl_info, SIS_PQI_RESET_QUIESCE); +} int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info) { - int rc; - unsigned long timeout; - struct pqi_ctrl_registers __iomem *registers; - u32 doorbell; - - registers = ctrl_info->registers; - - writel(SIS_REENABLE_SIS_MODE, - ®isters->sis_host_to_ctrl_doorbell); - - rc = 0; - timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; - - while (1) { - doorbell = readl(®isters->sis_ctrl_to_host_doorbell); - if ((doorbell & SIS_REENABLE_SIS_MODE) == 0) - break; - if (time_after(jiffies, timeout)) { - rc = -ETIMEDOUT; - break; - } - } - - if (rc) - dev_err(&ctrl_info->pci_dev->dev, - "re-enabling SIS mode failed\n"); - - return rc; + return sis_set_doorbell_bit(ctrl_info, SIS_REENABLE_SIS_MODE); } void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value) diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 983184b69373..2bf889dbf5ab 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -27,11 +27,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info); int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info); int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); void sis_enable_msix(struct pqi_ctrl_info *ctrl_info); -void sis_disable_msix(struct pqi_ctrl_info *ctrl_info); void sis_enable_intx(struct pqi_ctrl_info *ctrl_info); -void sis_disable_intx(struct pqi_ctrl_info *ctrl_info); -void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info); +int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info); int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index a8f630213a1a..9be34d37c356 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -393,7 +393,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) ret = scsi_init_io(SCpnt); if (ret != BLKPREP_OK) goto out; - SCpnt = rq->special; + WARN_ON_ONCE(SCpnt != rq->special); cd = scsi_cd(rq->rq_disk); /* from here on until we're complete, any goto out diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3cc8d67783a1..5e7200f05873 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1640,6 +1640,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) put_cpu(); if (ret == -EAGAIN) { + if (payload_sz > sizeof(cmd_request->mpb)) + kfree(payload); /* no more space */ return SCSI_MLQUEUE_DEVICE_BUSY; } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index e64b0c542f95..9492638296c8 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -46,7 +46,7 @@ #define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value) #define NCR5380_queue_command sun3scsi_queue_command -#define NCR5380_bus_reset sun3scsi_bus_reset +#define NCR5380_host_reset sun3scsi_host_reset #define NCR5380_abort sun3scsi_abort #define NCR5380_info sun3scsi_info @@ -495,7 +495,7 @@ static struct scsi_host_template sun3_scsi_template = { .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, - .eh_bus_reset_handler = sun3scsi_bus_reset, + .eh_host_reset_handler = sun3scsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_NONE, diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 6b349e301869..ca360daa6a25 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -759,7 +759,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru /* * Maximum synchronous period factor supported by the chip. */ - period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); + period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* @@ -4985,13 +4985,10 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) * Compute the bus address of this table. */ if (ln && !tp->luntbl) { - int i; - tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; - for (i = 0 ; i < 64 ; i++) - tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); + memset32(tp->luntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } @@ -5077,8 +5074,7 @@ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) /* * Initialize the task table with invalid entries. */ - for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) - lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); + memset32(lp->itlq_tbl, cpu_to_scr(np->notask_ba), SYM_CONF_MAX_TASK); /* * Fill up the tag buffer with tag numbers. @@ -5764,8 +5760,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); - for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ - np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); + memset32(np->badluntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); /* * Prepare the bus address array that contains the bus diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5bc9dc14e075..794a4600e952 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -438,7 +438,7 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs); + hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 9be211d68b15..7c28e8d4955a 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -818,7 +818,6 @@ static struct scsi_host_template virtscsi_host_template_single = { .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, - .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, @@ -839,7 +838,6 @@ static struct scsi_host_template virtscsi_host_template_multi = { .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, - .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, @@ -972,6 +970,8 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; + shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); + cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index 9e09da412b92..74be04f2357c 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -1578,6 +1578,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt) int i; instance = SCpnt->device->host; + spin_lock_irq(instance->host_lock); hostdata = (struct WD33C93_hostdata *) instance->hostdata; printk("scsi%d: reset. ", instance->host_no); @@ -1603,6 +1604,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt) reset_wd33c93(instance); SCpnt->result = DID_RESET << 16; enable_irq(instance->irq); + spin_unlock_irq(instance->host_lock); return SUCCESS; } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 07fc0ac51c52..fc9e98047421 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,6 +1,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/actions/Kconfig" +source "drivers/soc/amlogic/Kconfig" source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/fsl/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 9241125416ba..2fcaff864584 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -9,7 +9,9 @@ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_MXC) += imx/ +obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ +obj-$(CONFIG_ARCH_MESON) += amlogic/ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-y += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig new file mode 100644 index 000000000000..22acf064531f --- /dev/null +++ b/drivers/soc/amlogic/Kconfig @@ -0,0 +1,12 @@ +menu "Amlogic SoC drivers" + +config MESON_GX_SOCINFO + bool "Amlogic Meson GX SoC Information driver" + depends on ARCH_MESON || COMPILE_TEST + default ARCH_MESON + select SOC_BUS + help + Say yes to support decoding of Amlogic Meson GX SoC family + information about the type, package and version. + +endmenu diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile new file mode 100644 index 000000000000..3e85fc462c21 --- /dev/null +++ b/drivers/soc/amlogic/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c new file mode 100644 index 000000000000..89f4cf507be6 --- /dev/null +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2017 BayLibre, SAS + * Author: Neil Armstrong + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AO_SEC_SD_CFG8 0xe0 +#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8 + +#define SOCINFO_MAJOR GENMASK(31, 24) +#define SOCINFO_MINOR GENMASK(23, 16) +#define SOCINFO_PACK GENMASK(15, 8) +#define SOCINFO_MISC GENMASK(7, 0) + +static const struct meson_gx_soc_id { + const char *name; + unsigned int id; +} soc_ids[] = { + { "GXBB", 0x1f }, + { "GXTVBB", 0x20 }, + { "GXL", 0x21 }, + { "GXM", 0x22 }, + { "TXL", 0x23 }, +}; + +static const struct meson_gx_package_id { + const char *name; + unsigned int major_id; + unsigned int pack_id; +} soc_packages[] = { + { "S905", 0x1f, 0 }, + { "S905M", 0x1f, 0x20 }, + { "S905D", 0x21, 0 }, + { "S905X", 0x21, 0x80 }, + { "S905L", 0x21, 0xc0 }, + { "S905M2", 0x21, 0xe0 }, + { "S912", 0x22, 0 }, +}; + +static inline unsigned int socinfo_to_major(u32 socinfo) +{ + return FIELD_GET(SOCINFO_MAJOR, socinfo); +} + +static inline unsigned int socinfo_to_minor(u32 socinfo) +{ + return FIELD_GET(SOCINFO_MINOR, socinfo); +} + +static inline unsigned int socinfo_to_pack(u32 socinfo) +{ + return FIELD_GET(SOCINFO_PACK, socinfo); +} + +static inline unsigned int socinfo_to_misc(u32 socinfo) +{ + return FIELD_GET(SOCINFO_MISC, socinfo); +} + +static const char *socinfo_to_package_id(u32 socinfo) +{ + unsigned int pack = socinfo_to_pack(socinfo) & 0xf0; + unsigned int major = socinfo_to_major(socinfo); + int i; + + for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) { + if (soc_packages[i].major_id == major && + soc_packages[i].pack_id == pack) + return soc_packages[i].name; + } + + return "Unknown"; +} + +static const char *socinfo_to_soc_id(u32 socinfo) +{ + unsigned int id = socinfo_to_major(socinfo); + int i; + + for (i = 0 ; i < ARRAY_SIZE(soc_ids) ; ++i) { + if (soc_ids[i].id == id) + return soc_ids[i].name; + } + + return "Unknown"; +} + +int __init meson_gx_socinfo_init(void) +{ + struct soc_device_attribute *soc_dev_attr; + struct soc_device *soc_dev; + struct device_node *np; + struct regmap *regmap; + unsigned int socinfo; + struct device *dev; + int ret; + + /* look up for chipid node */ + np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gx-ao-secure"); + if (!np) + return -ENODEV; + + /* check if interface is enabled */ + if (!of_device_is_available(np)) + return -ENODEV; + + /* check if chip-id is available */ + if (!of_property_read_bool(np, "amlogic,has-chip-id")) + return -ENODEV; + + /* node should be a syscon */ + regmap = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(regmap)) { + pr_err("%s: failed to get regmap\n", __func__); + return -ENODEV; + } + + ret = regmap_read(regmap, AO_SEC_SOCINFO_OFFSET, &socinfo); + if (ret < 0) + return ret; + + if (!socinfo) { + pr_err("%s: invalid chipid value\n", __func__); + return -EINVAL; + } + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENODEV; + + soc_dev_attr->family = "Amlogic Meson"; + + np = of_find_node_by_path("/"); + of_property_read_string(np, "model", &soc_dev_attr->machine); + of_node_put(np); + + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x:%x - %x:%x", + socinfo_to_major(socinfo), + socinfo_to_minor(socinfo), + socinfo_to_pack(socinfo), + socinfo_to_misc(socinfo)); + soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%s (%s)", + socinfo_to_soc_id(socinfo), + socinfo_to_package_id(socinfo)); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr->revision); + kfree_const(soc_dev_attr->soc_id); + kfree(soc_dev_attr); + return PTR_ERR(soc_dev); + } + dev = soc_device_to_device(soc_dev); + + dev_info(dev, "Amlogic Meson %s Revision %x:%x (%x:%x) Detected\n", + soc_dev_attr->soc_id, + socinfo_to_major(socinfo), + socinfo_to_minor(socinfo), + socinfo_to_pack(socinfo), + socinfo_to_misc(socinfo)); + + return 0; +} +device_initcall(meson_gx_socinfo_init); diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index a8e8389a6894..eaa9585c7347 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -177,8 +177,8 @@ static int fsl_bman_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", + node); return -ENXIO; } bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); @@ -205,14 +205,14 @@ static int fsl_bman_probe(struct platform_device *pdev) err_irq = platform_get_irq(pdev, 0); if (err_irq <= 0) { - dev_info(dev, "Can't get %s IRQ\n", node->full_name); + dev_info(dev, "Can't get %pOF IRQ\n", node); return -ENODEV; } ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", dev); if (ret) { - dev_err(dev, "devm_request_irq() failed %d for '%s'\n", - ret, node->full_name); + dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n", + ret, node); return ret; } /* Disable Buffer Pool State Change */ diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 8354d4dabdad..39b39c8f1399 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -103,16 +103,14 @@ static int bman_portal_probe(struct platform_device *pdev) addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, DPAA_PORTAL_CE); if (!addr_phys[0]) { - dev_err(dev, "Can't get %s property 'reg::CE'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); return -ENXIO; } addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, DPAA_PORTAL_CI); if (!addr_phys[1]) { - dev_err(dev, "Can't get %s property 'reg::CI'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); return -ENXIO; } @@ -120,7 +118,7 @@ static int bman_portal_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(dev, "Can't get %s IRQ'\n", node->full_name); + dev_err(dev, "Can't get %pOF IRQ'\n", node); return -ENXIO; } pcfg->irq = irq; diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 90bc40c48675..835ce947ffca 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -695,8 +695,8 @@ static int fsl_qman_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", + node); return -ENXIO; } qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); @@ -740,15 +740,15 @@ static int fsl_qman_probe(struct platform_device *pdev) err_irq = platform_get_irq(pdev, 0); if (err_irq <= 0) { - dev_info(dev, "Can't get %s property 'interrupts'\n", - node->full_name); + dev_info(dev, "Can't get %pOF property 'interrupts'\n", + node); return -ENODEV; } ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err", dev); if (ret) { - dev_err(dev, "devm_request_irq() failed %d for '%s'\n", - ret, node->full_name); + dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n", + ret, node); return ret; } diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index adbaa30d3c5a..cbacdf4f98ed 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -237,30 +237,27 @@ static int qman_portal_probe(struct platform_device *pdev) addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, DPAA_PORTAL_CE); if (!addr_phys[0]) { - dev_err(dev, "Can't get %s property 'reg::CE'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); return -ENXIO; } addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, DPAA_PORTAL_CI); if (!addr_phys[1]) { - dev_err(dev, "Can't get %s property 'reg::CI'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); return -ENXIO; } err = of_property_read_u32(node, "cell-index", &val); if (err) { - dev_err(dev, "Can't get %s property 'cell-index'\n", - node->full_name); + dev_err(dev, "Can't get %pOF property 'cell-index'\n", node); return err; } pcfg->channel = val; pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(dev, "Can't get %s IRQ\n", node->full_name); + dev_err(dev, "Can't get %pOF IRQ\n", node); return -ENXIO; } pcfg->irq = irq; diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index 0aaf429f31d5..3b27075c21a7 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -304,8 +304,8 @@ static int __init qe_add_gpiochips(void) goto err; continue; err: - pr_err("%s: registration failed with status %d\n", - np->full_name, ret); + pr_err("%pOF: registration failed with status %d\n", + np, ret); kfree(qe_gc); /* try others anyway */ } diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile new file mode 100644 index 000000000000..be9e866d53e5 --- /dev/null +++ b/drivers/soc/lantiq/Makefile @@ -0,0 +1,2 @@ +obj-y += fpi-bus.o +obj-$(CONFIG_XRX200_PHY_FW) += gphy.o diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c new file mode 100644 index 000000000000..a671c9984c4c --- /dev/null +++ b/drivers/soc/lantiq/fpi-bus.c @@ -0,0 +1,87 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011-2015 John Crispin + * Copyright (C) 2015 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define XBAR_ALWAYS_LAST 0x430 +#define XBAR_FPI_BURST_EN BIT(1) +#define XBAR_AHB_BURST_EN BIT(2) + +#define RCU_VR9_BE_AHB1S 0x00000008 + +static int ltq_fpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res_xbar; + struct regmap *rcu_regmap; + void __iomem *xbar_membase; + u32 rcu_ahb_endianness_reg_offset; + int ret; + + res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xbar_membase = devm_ioremap_resource(dev, res_xbar); + if (IS_ERR(xbar_membase)) + return PTR_ERR(xbar_membase); + + /* RCU configuration is optional */ + rcu_regmap = syscon_regmap_lookup_by_phandle(np, "lantiq,rcu"); + if (IS_ERR(rcu_regmap)) + return PTR_ERR(rcu_regmap); + + ret = device_property_read_u32(dev, "lantiq,offset-endianness", + &rcu_ahb_endianness_reg_offset); + if (ret) { + dev_err(&pdev->dev, "Failed to get RCU reg offset\n"); + return ret; + } + + ret = regmap_update_bits(rcu_regmap, rcu_ahb_endianness_reg_offset, + RCU_VR9_BE_AHB1S, RCU_VR9_BE_AHB1S); + if (ret) { + dev_warn(&pdev->dev, + "Failed to configure RCU AHB endianness\n"); + return ret; + } + + /* disable fpi burst */ + ltq_w32_mask(XBAR_FPI_BURST_EN, 0, xbar_membase + XBAR_ALWAYS_LAST); + + return of_platform_populate(dev->of_node, NULL, NULL, dev); +} + +static const struct of_device_id ltq_fpi_match[] = { + { .compatible = "lantiq,xrx200-fpi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ltq_fpi_match); + +static struct platform_driver ltq_fpi_driver = { + .probe = ltq_fpi_probe, + .driver = { + .name = "fpi-xway", + .of_match_table = ltq_fpi_match, + }, +}; + +module_platform_driver(ltq_fpi_driver); + +MODULE_DESCRIPTION("Lantiq FPI bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c new file mode 100644 index 000000000000..8d8659463b3e --- /dev/null +++ b/drivers/soc/lantiq/gphy.c @@ -0,0 +1,260 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2012 John Crispin + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define XRX200_GPHY_FW_ALIGN (16 * 1024) + +struct xway_gphy_priv { + struct clk *gphy_clk_gate; + struct reset_control *gphy_reset; + struct reset_control *gphy_reset2; + struct notifier_block gphy_reboot_nb; + void __iomem *membase; + char *fw_name; +}; + +struct xway_gphy_match_data { + char *fe_firmware_name; + char *ge_firmware_name; +}; + +static const struct xway_gphy_match_data xrx200a1x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", +}; + +static const struct xway_gphy_match_data xrx200a2x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", +}; + +static const struct xway_gphy_match_data xrx300_gphy_data = { + .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", + .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", +}; + +static const struct of_device_id xway_gphy_match[] = { + { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data }, + { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data }, + { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data }, + { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, xway_gphy_match); + +static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) +{ + return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); +} + +static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, + unsigned long code, void *unused) +{ + struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); + + if (priv) { + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + } + + return NOTIFY_DONE; +} + +static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, + dma_addr_t *dev_addr) +{ + const struct firmware *fw; + void *fw_addr; + dma_addr_t dma_addr; + size_t size; + int ret; + + ret = request_firmware(&fw, priv->fw_name, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, error: %i\n", + priv->fw_name, ret); + return ret; + } + + /* + * GPHY cores need the firmware code in a persistent and contiguous + * memory area with a 16 kB boundary aligned start address. + */ + size = fw->size + XRX200_GPHY_FW_ALIGN; + + fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); + if (fw_addr) { + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); + *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); + memcpy(fw_addr, fw->data, fw->size); + } else { + dev_err(dev, "failed to alloc firmware memory\n"); + ret = -ENOMEM; + } + + release_firmware(fw); + + return ret; +} + +static int xway_gphy_of_probe(struct platform_device *pdev, + struct xway_gphy_priv *priv) +{ + struct device *dev = &pdev->dev; + const struct xway_gphy_match_data *gphy_fw_name_cfg; + u32 gphy_mode; + int ret; + struct resource *res_gphy; + + gphy_fw_name_cfg = of_device_get_match_data(dev); + + priv->gphy_clk_gate = devm_clk_get(dev, NULL); + if (IS_ERR(priv->gphy_clk_gate)) { + dev_err(dev, "Failed to lookup gate clock\n"); + return PTR_ERR(priv->gphy_clk_gate); + } + + res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->membase = devm_ioremap_resource(dev, res_gphy); + if (IS_ERR(priv->membase)) + return PTR_ERR(priv->membase); + + priv->gphy_reset = devm_reset_control_get(dev, "gphy"); + if (IS_ERR(priv->gphy_reset)) { + if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER) + dev_err(dev, "Failed to lookup gphy reset\n"); + return PTR_ERR(priv->gphy_reset); + } + + priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2"); + if (IS_ERR(priv->gphy_reset2)) + return PTR_ERR(priv->gphy_reset2); + + ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode); + /* Default to GE mode */ + if (ret) + gphy_mode = GPHY_MODE_GE; + + switch (gphy_mode) { + case GPHY_MODE_FE: + priv->fw_name = gphy_fw_name_cfg->fe_firmware_name; + break; + case GPHY_MODE_GE: + priv->fw_name = gphy_fw_name_cfg->ge_firmware_name; + break; + default: + dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); + return -EINVAL; + } + + return 0; +} + +static int xway_gphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv; + dma_addr_t fw_addr = 0; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = xway_gphy_of_probe(pdev, priv); + if (ret) + return ret; + + ret = clk_prepare_enable(priv->gphy_clk_gate); + if (ret) + return ret; + + ret = xway_gphy_load(dev, priv, &fw_addr); + if (ret) { + clk_disable_unprepare(priv->gphy_clk_gate); + return ret; + } + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(fw_addr, priv->membase); + + reset_control_deassert(priv->gphy_reset); + reset_control_deassert(priv->gphy_reset2); + + /* assert the gphy reset because it can hang after a reboot: */ + priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; + priv->gphy_reboot_nb.priority = -1; + + ret = register_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to register reboot notifier\n"); + + platform_set_drvdata(pdev, priv); + + return ret; +} + +static int xway_gphy_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv = platform_get_drvdata(pdev); + int ret; + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(0, priv->membase); + + clk_disable_unprepare(priv->gphy_clk_gate); + + ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to unregister reboot notifier\n"); + + return 0; +} + +static struct platform_driver xway_gphy_driver = { + .probe = xway_gphy_probe, + .remove = xway_gphy_remove, + .driver = { + .name = "xway-rcu-gphy", + .of_match_table = xway_gphy_match, + }, +}; + +module_platform_driver(xway_gphy_driver); + +MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index c80a04e1b2b1..c2048382830f 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -1067,7 +1067,7 @@ static const struct pmic_wrapper_type pwrap_mt2701 = { .init_soc_specific = pwrap_mt2701_init_soc_specific, }; -static struct pmic_wrapper_type pwrap_mt8135 = { +static const struct pmic_wrapper_type pwrap_mt8135 = { .regs = mt8135_regs, .type = PWRAP_MT8135, .arb_en_all = 0x1ff, @@ -1079,7 +1079,7 @@ static struct pmic_wrapper_type pwrap_mt8135 = { .init_soc_specific = pwrap_mt8135_init_soc_specific, }; -static struct pmic_wrapper_type pwrap_mt8173 = { +static const struct pmic_wrapper_type pwrap_mt8173 = { .regs = mt8173_regs, .type = PWRAP_MT8173, .arb_en_all = 0x3f, @@ -1091,7 +1091,7 @@ static struct pmic_wrapper_type pwrap_mt8173 = { .init_soc_specific = pwrap_mt8173_init_soc_specific, }; -static struct of_device_id of_pwrap_match_tbl[] = { +static const struct of_device_id of_pwrap_match_tbl[] = { { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701, @@ -1233,8 +1233,8 @@ static int pwrap_probe(struct platform_device *pdev) ret = of_platform_populate(np, NULL, NULL, wrp->dev); if (ret) { - dev_dbg(wrp->dev, "failed to create child devices at %s\n", - np->full_name); + dev_dbg(wrp->dev, "failed to create child devices at %pOF\n", + np); goto err_out2; } diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index ceb2cc495cd0..e1ce8b1b5090 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -22,6 +22,7 @@ #include #include +#include #include #define SPM_VDE_PWR_CON 0x0210 @@ -39,6 +40,11 @@ #define SPM_MFG_2D_PWR_CON 0x02c0 #define SPM_MFG_ASYNC_PWR_CON 0x02c4 #define SPM_USB_PWR_CON 0x02cc +#define SPM_ETHSYS_PWR_CON 0x02e0 /* MT7622 */ +#define SPM_HIF0_PWR_CON 0x02e4 /* MT7622 */ +#define SPM_HIF1_PWR_CON 0x02e8 /* MT7622 */ +#define SPM_WB_PWR_CON 0x02ec /* MT7622 */ + #define SPM_PWR_STATUS 0x060c #define SPM_PWR_STATUS_2ND 0x0610 @@ -64,6 +70,10 @@ #define PWR_STATUS_MFG_ASYNC BIT(23) #define PWR_STATUS_AUDIO BIT(24) #define PWR_STATUS_USB BIT(25) +#define PWR_STATUS_ETHSYS BIT(24) /* MT7622 */ +#define PWR_STATUS_HIF0 BIT(25) /* MT7622 */ +#define PWR_STATUS_HIF1 BIT(26) /* MT7622 */ +#define PWR_STATUS_WB BIT(27) /* MT7622 */ enum clk_id { CLK_NONE, @@ -73,6 +83,7 @@ enum clk_id { CLK_VENC_LT, CLK_ETHIF, CLK_VDEC, + CLK_HIFSEL, CLK_MAX, }; @@ -84,6 +95,7 @@ static const char * const clk_names[] = { "venc_lt", "ethif", "vdec", + "hif_sel", NULL, }; @@ -124,6 +136,19 @@ struct scp { struct scp_ctrl_reg ctrl_reg; }; +struct scp_subdomain { + int origin; + int subdomain; +}; + +struct scp_soc_data { + const struct scp_domain_data *domains; + int num_domains; + const struct scp_subdomain *subdomains; + int num_subdomains; + const struct scp_ctrl_reg regs; +}; + static int scpsys_domain_is_on(struct scp_domain *scpd) { struct scp *scp = scpd->scp; @@ -357,7 +382,7 @@ static void init_clks(struct platform_device *pdev, struct clk **clk) static struct scp *init_scp(struct platform_device *pdev, const struct scp_domain_data *scp_domain_data, int num, - struct scp_ctrl_reg *scp_ctrl_reg) + const struct scp_ctrl_reg *scp_ctrl_reg) { struct genpd_onecell_data *pd_data; struct resource *res; @@ -565,26 +590,6 @@ static const struct scp_domain_data scp_domain_data_mt2701[] = { }, }; -#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701) - -static int __init scpsys_probe_mt2701(struct platform_device *pdev) -{ - struct scp *scp; - struct scp_ctrl_reg scp_reg; - - scp_reg.pwr_sta_offs = SPM_PWR_STATUS; - scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND; - - scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701, - &scp_reg); - if (IS_ERR(scp)) - return PTR_ERR(scp); - - mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701); - - return 0; -} - /* * MT6797 power domain support */ @@ -649,51 +654,62 @@ static const struct scp_domain_data scp_domain_data_mt6797[] = { }, }; -#define NUM_DOMAINS_MT6797 ARRAY_SIZE(scp_domain_data_mt6797) #define SPM_PWR_STATUS_MT6797 0x0180 #define SPM_PWR_STATUS_2ND_MT6797 0x0184 -static int __init scpsys_probe_mt6797(struct platform_device *pdev) -{ - struct scp *scp; - struct genpd_onecell_data *pd_data; - int ret; - struct scp_ctrl_reg scp_reg; +static const struct scp_subdomain scp_subdomain_mt6797[] = { + {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VDEC}, + {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_ISP}, + {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VENC}, + {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_MJC}, +}; - scp_reg.pwr_sta_offs = SPM_PWR_STATUS_MT6797; - scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND_MT6797; +/* + * MT7622 power domain support + */ - scp = init_scp(pdev, scp_domain_data_mt6797, NUM_DOMAINS_MT6797, - &scp_reg); - if (IS_ERR(scp)) - return PTR_ERR(scp); - - mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT6797); - - pd_data = &scp->pd_data; - - ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM], - pd_data->domains[MT6797_POWER_DOMAIN_VDEC]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM], - pd_data->domains[MT6797_POWER_DOMAIN_ISP]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM], - pd_data->domains[MT6797_POWER_DOMAIN_VENC]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM], - pd_data->domains[MT6797_POWER_DOMAIN_MJC]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - return 0; -} +static const struct scp_domain_data scp_domain_data_mt7622[] = { + [MT7622_POWER_DOMAIN_ETHSYS] = { + .name = "ethsys", + .sta_mask = PWR_STATUS_ETHSYS, + .ctl_offs = SPM_ETHSYS_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_NONE}, + .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_ETHSYS, + .active_wakeup = true, + }, + [MT7622_POWER_DOMAIN_HIF0] = { + .name = "hif0", + .sta_mask = PWR_STATUS_HIF0, + .ctl_offs = SPM_HIF0_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_HIFSEL}, + .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF0, + .active_wakeup = true, + }, + [MT7622_POWER_DOMAIN_HIF1] = { + .name = "hif1", + .sta_mask = PWR_STATUS_HIF1, + .ctl_offs = SPM_HIF1_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_HIFSEL}, + .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF1, + .active_wakeup = true, + }, + [MT7622_POWER_DOMAIN_WB] = { + .name = "wb", + .sta_mask = PWR_STATUS_WB, + .ctl_offs = SPM_WB_PWR_CON, + .sram_pdn_bits = 0, + .sram_pdn_ack_bits = 0, + .clk_id = {CLK_NONE}, + .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_WB, + .active_wakeup = true, + }, +}; /* * MT8173 power domain support @@ -789,39 +805,50 @@ static const struct scp_domain_data scp_domain_data_mt8173[] = { }, }; -#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173) +static const struct scp_subdomain scp_subdomain_mt8173[] = { + {MT8173_POWER_DOMAIN_MFG_ASYNC, MT8173_POWER_DOMAIN_MFG_2D}, + {MT8173_POWER_DOMAIN_MFG_2D, MT8173_POWER_DOMAIN_MFG}, +}; -static int __init scpsys_probe_mt8173(struct platform_device *pdev) -{ - struct scp *scp; - struct genpd_onecell_data *pd_data; - int ret; - struct scp_ctrl_reg scp_reg; +static const struct scp_soc_data mt2701_data = { + .domains = scp_domain_data_mt2701, + .num_domains = ARRAY_SIZE(scp_domain_data_mt2701), + .regs = { + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND + } +}; - scp_reg.pwr_sta_offs = SPM_PWR_STATUS; - scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND; +static const struct scp_soc_data mt6797_data = { + .domains = scp_domain_data_mt6797, + .num_domains = ARRAY_SIZE(scp_domain_data_mt6797), + .subdomains = scp_subdomain_mt6797, + .num_subdomains = ARRAY_SIZE(scp_subdomain_mt6797), + .regs = { + .pwr_sta_offs = SPM_PWR_STATUS_MT6797, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND_MT6797 + } +}; - scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173, - &scp_reg); - if (IS_ERR(scp)) - return PTR_ERR(scp); +static const struct scp_soc_data mt7622_data = { + .domains = scp_domain_data_mt7622, + .num_domains = ARRAY_SIZE(scp_domain_data_mt7622), + .regs = { + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND + } +}; - mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173); - - pd_data = &scp->pd_data; - - ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC], - pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D], - pd_data->domains[MT8173_POWER_DOMAIN_MFG]); - if (ret && IS_ENABLED(CONFIG_PM)) - dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - - return 0; -} +static const struct scp_soc_data mt8173_data = { + .domains = scp_domain_data_mt8173, + .num_domains = ARRAY_SIZE(scp_domain_data_mt8173), + .subdomains = scp_subdomain_mt8173, + .num_subdomains = ARRAY_SIZE(scp_subdomain_mt8173), + .regs = { + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND + } +}; /* * scpsys driver init @@ -830,13 +857,16 @@ static int __init scpsys_probe_mt8173(struct platform_device *pdev) static const struct of_device_id of_scpsys_match_tbl[] = { { .compatible = "mediatek,mt2701-scpsys", - .data = scpsys_probe_mt2701, + .data = &mt2701_data, }, { .compatible = "mediatek,mt6797-scpsys", - .data = scpsys_probe_mt6797, + .data = &mt6797_data, + }, { + .compatible = "mediatek,mt7622-scpsys", + .data = &mt7622_data, }, { .compatible = "mediatek,mt8173-scpsys", - .data = scpsys_probe_mt8173, + .data = &mt8173_data, }, { /* sentinel */ } @@ -844,16 +874,33 @@ static const struct of_device_id of_scpsys_match_tbl[] = { static int scpsys_probe(struct platform_device *pdev) { - int (*probe)(struct platform_device *); - const struct of_device_id *of_id; + const struct of_device_id *match; + const struct scp_subdomain *sd; + const struct scp_soc_data *soc; + struct scp *scp; + struct genpd_onecell_data *pd_data; + int i, ret; - of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node); - if (!of_id || !of_id->data) - return -EINVAL; + match = of_match_device(of_scpsys_match_tbl, &pdev->dev); + soc = (const struct scp_soc_data *)match->data; - probe = of_id->data; + scp = init_scp(pdev, soc->domains, soc->num_domains, &soc->regs); + if (IS_ERR(scp)) + return PTR_ERR(scp); - return probe(pdev); + mtk_register_power_domains(pdev, scp, soc->num_domains); + + pd_data = &scp->pd_data; + + for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) { + ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin], + pd_data->domains[sd->subdomain]); + if (ret && IS_ENABLED(CONFIG_PM)) + dev_err(&pdev->dev, "Failed to add subdomain: %d\n", + ret); + } + + return 0; } static struct platform_driver scpsys_drv = { diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 9fca977ef18d..b00bccddcd3b 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -1,6 +1,17 @@ # # QCOM Soc drivers # +menu "Qualcomm SoC drivers" + +config QCOM_GLINK_SSR + tristate "Qualcomm Glink SSR driver" + depends on RPMSG + depends on QCOM_RPROC_COMMON + help + Say y here to enable GLINK SSR support. The GLINK SSR driver + implements the SSR protocol for notifying the remote processor about + neighboring subsystems going up or down. + config QCOM_GSBI tristate "QCOM General Serial Bus Interface" depends on ARCH_QCOM @@ -74,3 +85,5 @@ config QCOM_WCNSS_CTRL help Client driver for the WCNSS_CTRL SMD channel, used to download nv firmware to a newly booted WCNSS chip. + +endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 414f0de274fa..f151de41eb93 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,3 +1,4 @@ +obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_PM) += spm.o diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c new file mode 100644 index 000000000000..19c7399eddb5 --- /dev/null +++ b/drivers/soc/qcom/glink_ssr.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +/** + * struct do_cleanup_msg - The data structure for an SSR do_cleanup message + * version: The G-Link SSR protocol version + * command: The G-Link SSR command - do_cleanup + * seq_num: Sequence number + * name_len: Length of the name of the subsystem being restarted + * name: G-Link edge name of the subsystem being restarted + */ +struct do_cleanup_msg { + __le32 version; + __le32 command; + __le32 seq_num; + __le32 name_len; + char name[32]; +}; + +/** + * struct cleanup_done_msg - The data structure for an SSR cleanup_done message + * version: The G-Link SSR protocol version + * response: The G-Link SSR response to a do_cleanup command, cleanup_done + * seq_num: Sequence number + */ +struct cleanup_done_msg { + __le32 version; + __le32 response; + __le32 seq_num; +}; + +/** + * G-Link SSR protocol commands + */ +#define GLINK_SSR_DO_CLEANUP 0 +#define GLINK_SSR_CLEANUP_DONE 1 + +struct glink_ssr { + struct device *dev; + struct rpmsg_endpoint *ept; + + struct notifier_block nb; + + u32 seq_num; + struct completion completion; +}; + +static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev, + void *data, int len, void *priv, u32 addr) +{ + struct cleanup_done_msg *msg = data; + struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev); + + if (len < sizeof(*msg)) { + dev_err(ssr->dev, "message too short\n"); + return -EINVAL; + } + + if (le32_to_cpu(msg->version) != 0) + return -EINVAL; + + if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE) + return 0; + + if (le32_to_cpu(msg->seq_num) != ssr->seq_num) { + dev_err(ssr->dev, "invalid sequence number of response\n"); + return -EINVAL; + } + + complete(&ssr->completion); + + return 0; +} + +static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb); + struct do_cleanup_msg msg; + char *ssr_name = data; + int ret; + + ssr->seq_num++; + reinit_completion(&ssr->completion); + + memset(&msg, 0, sizeof(msg)); + msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP); + msg.seq_num = cpu_to_le32(ssr->seq_num); + msg.name_len = cpu_to_le32(strlen(ssr_name)); + strlcpy(msg.name, ssr_name, sizeof(msg.name)); + + ret = rpmsg_send(ssr->ept, &msg, sizeof(msg)); + if (ret < 0) + dev_err(ssr->dev, "failed to send cleanup message\n"); + + ret = wait_for_completion_timeout(&ssr->completion, HZ); + if (!ret) + dev_err(ssr->dev, "timeout waiting for cleanup done message\n"); + + return NOTIFY_DONE; +} + +static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev) +{ + struct glink_ssr *ssr; + + ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL); + if (!ssr) + return -ENOMEM; + + init_completion(&ssr->completion); + + ssr->dev = &rpdev->dev; + ssr->ept = rpdev->ept; + ssr->nb.notifier_call = qcom_glink_ssr_notify; + + dev_set_drvdata(&rpdev->dev, ssr); + + return qcom_register_ssr_notifier(&ssr->nb); +} + +static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev) +{ + struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev); + + qcom_unregister_ssr_notifier(&ssr->nb); +} + +static const struct rpmsg_device_id qcom_glink_ssr_match[] = { + { "glink_ssr" }, + {} +}; + +static struct rpmsg_driver qcom_glink_ssr_driver = { + .probe = qcom_glink_ssr_probe, + .remove = qcom_glink_ssr_remove, + .callback = qcom_glink_ssr_callback, + .id_table = qcom_glink_ssr_match, + .drv = { + .name = "qcom_glink_ssr", + }, +}; +module_rpmsg_driver(qcom_glink_ssr_driver); + +MODULE_ALIAS("rpmsg:glink_ssr"); +MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index bd63df0d14e0..08bd8549242a 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -178,14 +178,13 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw, if (phdr->p_filesz) { sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware(&seg_fw, fw_name, dev); + ret = request_firmware_into_buf(&seg_fw, fw_name, dev, + ptr, phdr->p_filesz); if (ret) { dev_err(dev, "failed to load %s\n", fw_name); break; } - memcpy(ptr, seg_fw->data, seg_fw->size); - release_firmware(seg_fw); } diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index dc540ea92e9d..403bea9d546b 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -496,7 +496,8 @@ static int qcom_smsm_probe(struct platform_device *pdev) if (!smsm->hosts) return -ENOMEM; - local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells"); + local_node = of_find_node_with_property(of_node_get(pdev->dev.of_node), + "#qcom,smem-state-cells"); if (!local_node) { dev_err(&pdev->dev, "no state entry\n"); return -EINVAL; diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index b9069184df19..d008e5b82db4 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -347,6 +347,7 @@ static const struct of_device_id wcnss_ctrl_of_match[] = { { .compatible = "qcom,wcnss", }, {} }; +MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match); static struct rpmsg_driver wcnss_ctrl_driver = { .probe = wcnss_ctrl_probe, diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 87a4be46bd98..567414cb42ba 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -3,7 +3,7 @@ config SOC_RENESAS default y if ARCH_RENESAS select SOC_BUS select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \ - ARCH_R8A7795 || ARCH_R8A7796 + ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77995 select SYSC_R8A7743 if ARCH_R8A7743 select SYSC_R8A7745 if ARCH_R8A7745 select SYSC_R8A7779 if ARCH_R8A7779 @@ -13,6 +13,7 @@ config SOC_RENESAS select SYSC_R8A7794 if ARCH_R8A7794 select SYSC_R8A7795 if ARCH_R8A7795 select SYSC_R8A7796 if ARCH_R8A7796 + select SYSC_R8A77995 if ARCH_R8A77995 if SOC_RENESAS @@ -53,6 +54,10 @@ config SYSC_R8A7796 bool "R-Car M3-W System Controller support" if COMPILE_TEST select SYSC_RCAR +config SYSC_R8A77995 + bool "R-Car D3 System Controller support" if COMPILE_TEST + select SYSC_RCAR + # Family config RST_RCAR bool "R-Car Reset Controller support" if COMPILE_TEST diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 1a1a297b26a7..6b6e7f16104c 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o +obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o # Family obj-$(CONFIG_RST_RCAR) += rcar-rst.o diff --git a/drivers/soc/renesas/r8a77995-sysc.c b/drivers/soc/renesas/r8a77995-sysc.c new file mode 100644 index 000000000000..f718429cab02 --- /dev/null +++ b/drivers/soc/renesas/r8a77995-sysc.c @@ -0,0 +1,31 @@ +/* + * Renesas R-Car D3 System Controller + * + * Copyright (C) 2017 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include + +#include + +#include "rcar-sysc.h" + +static struct rcar_sysc_area r8a77995_areas[] __initdata = { + { "always-on", 0, 0, R8A77995_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "ca53-scu", 0x140, 0, R8A77995_PD_CA53_SCU, R8A77995_PD_ALWAYS_ON, + PD_SCU }, + { "ca53-cpu0", 0x200, 0, R8A77995_PD_CA53_CPU0, R8A77995_PD_CA53_SCU, + PD_CPU_NOCR }, +}; + + +const struct rcar_sysc_info r8a77995_sysc_info __initconst = { + .areas = r8a77995_areas, + .num_areas = ARRAY_SIZE(r8a77995_areas), +}; diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c index a6d1c26d3167..baa47014e96b 100644 --- a/drivers/soc/renesas/rcar-rst.c +++ b/drivers/soc/renesas/rcar-rst.c @@ -41,6 +41,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = { /* R-Car Gen3 is handled like R-Car Gen2 */ { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 }, { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 }, { /* sentinel */ } }; @@ -61,7 +62,7 @@ static int __init rcar_rst_init(void) base = of_iomap(np, 0); if (!base) { - pr_warn("%s: Cannot map regs\n", np->full_name); + pr_warn("%pOF: Cannot map regs\n", np); error = -ENOMEM; goto out_put; } @@ -70,7 +71,7 @@ static int __init rcar_rst_init(void) cfg = match->data; saved_mode = ioread32(base + cfg->modemr); - pr_debug("%s: MODE = 0x%08x\n", np->full_name, saved_mode); + pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode); out_put: of_node_put(np); diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 7c8da3c90011..c8406e81640f 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -283,6 +283,9 @@ static const struct of_device_id rcar_sysc_matches[] = { #endif #ifdef CONFIG_SYSC_R8A7796 { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info }, +#endif +#ifdef CONFIG_SYSC_R8A77995 + { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info }, #endif { /* sentinel */ } }; @@ -323,7 +326,7 @@ static int __init rcar_sysc_pd_init(void) base = of_iomap(np, 0); if (!base) { - pr_warn("%s: Cannot map regs\n", np->full_name); + pr_warn("%pOF: Cannot map regs\n", np); error = -ENOMEM; goto out_put; } @@ -348,13 +351,13 @@ static int __init rcar_sysc_pd_init(void) */ syscimr = ioread32(base + SYSCIMR); syscimr |= syscier; - pr_debug("%s: syscimr = 0x%08x\n", np->full_name, syscimr); + pr_debug("%pOF: syscimr = 0x%08x\n", np, syscimr); iowrite32(syscimr, base + SYSCIMR); /* * SYSC needs all interrupt sources enabled to control power. */ - pr_debug("%s: syscier = 0x%08x\n", np->full_name, syscier); + pr_debug("%pOF: syscier = 0x%08x\n", np, syscier); iowrite32(syscier, base + SYSCIER); for (i = 0; i < info->num_areas; i++) { diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 1a5bebaf54ba..2f524922c4d2 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -58,6 +58,7 @@ extern const struct rcar_sysc_info r8a7792_sysc_info; extern const struct rcar_sysc_info r8a7794_sysc_info; extern const struct rcar_sysc_info r8a7795_sysc_info; extern const struct rcar_sysc_info r8a7796_sysc_info; +extern const struct rcar_sysc_info r8a77995_sysc_info; /* diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index ca26f13d399c..90d6b7a4340a 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -144,6 +144,11 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = { .id = 0x52, }; +static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = { + .family = &fam_rcar_gen3, + .id = 0x58, +}; + static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { .family = &fam_shmobile, .id = 0x37, @@ -199,6 +204,9 @@ static const struct of_device_id renesas_socs[] __initconst = { #ifdef CONFIG_ARCH_R8A7796 { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, #endif +#ifdef CONFIG_ARCH_R8A77995 + { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 }, +#endif #ifdef CONFIG_ARCH_SH73A0 { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 }, #endif diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index d61db34ad6dd..15e71fd6c513 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -54,6 +54,17 @@ static const struct rockchip_grf_info rk3288_grf __initconst = { .num_values = ARRAY_SIZE(rk3288_defaults), }; +#define RK3328_GRF_SOC_CON4 0x410 + +static const struct rockchip_grf_value rk3328_defaults[] __initconst = { + { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) }, +}; + +static const struct rockchip_grf_info rk3328_grf __initconst = { + .values = rk3328_defaults, + .num_values = ARRAY_SIZE(rk3328_defaults), +}; + #define RK3368_GRF_SOC_CON15 0x43c static const struct rockchip_grf_value rk3368_defaults[] __initconst = { @@ -83,6 +94,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = { }, { .compatible = "rockchip,rk3288-grf", .data = (void *)&rk3288_grf, + }, { + .compatible = "rockchip,rk3328-grf", + .data = (void *)&rk3328_grf, }, { .compatible = "rockchip,rk3368-grf", .data = (void *)&rk3368_grf, diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 796c46a6cbe7..40b75748835f 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -730,6 +731,16 @@ static const struct rockchip_domain_info rk3328_pm_domains[] = { [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false), }; +static const struct rockchip_domain_info rk3366_pm_domains[] = { + [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true), + [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false), + [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false), + [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false), + [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false), + [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false), + [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false), +}; + static const struct rockchip_domain_info rk3368_pm_domains[] = { [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true), [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false), @@ -794,6 +805,23 @@ static const struct rockchip_pmu_info rk3328_pmu = { .domain_info = rk3328_pm_domains, }; +static const struct rockchip_pmu_info rk3366_pmu = { + .pwr_offset = 0x0c, + .status_offset = 0x10, + .req_offset = 0x3c, + .idle_offset = 0x40, + .ack_offset = 0x40, + + .core_pwrcnt_offset = 0x48, + .gpu_pwrcnt_offset = 0x50, + + .core_power_transition_time = 24, + .gpu_power_transition_time = 24, + + .num_domains = ARRAY_SIZE(rk3366_pm_domains), + .domain_info = rk3366_pm_domains, +}; + static const struct rockchip_pmu_info rk3368_pmu = { .pwr_offset = 0x0c, .status_offset = 0x10, @@ -833,6 +861,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = { .compatible = "rockchip,rk3328-power-controller", .data = (void *)&rk3328_pmu, }, + { + .compatible = "rockchip,rk3366-power-controller", + .data = (void *)&rk3366_pmu, + }, { .compatible = "rockchip,rk3368-power-controller", .data = (void *)&rk3368_pmu, diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c index a6a5d807cc2b..7c4fec1f93b5 100644 --- a/drivers/soc/samsung/pm_domains.c +++ b/drivers/soc/samsung/pm_domains.c @@ -147,7 +147,7 @@ static __init const char *exynos_get_domain_name(struct device_node *node) const char *name; if (of_property_read_string(node, "label", &name) < 0) - name = strrchr(node->full_name, '/') + 1; + name = kbasename(node->full_name); return kstrdup_const(name, GFP_KERNEL); } @@ -237,11 +237,11 @@ static __init int exynos4_pm_init_power_domain(void) continue; if (of_genpd_add_subdomain(&parent, &child)) - pr_warn("%s failed to add subdomain: %s\n", - parent.np->full_name, child.np->full_name); + pr_warn("%pOF failed to add subdomain: %pOF\n", + parent.np, child.np); else - pr_info("%s has as child subdomain: %s.\n", - parent.np->full_name, child.np->full_name); + pr_info("%pOF has as child subdomain: %pOF.\n", + parent.np, child.np); } return 0; diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index 99e354c8f53f..882be5ed7e84 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -23,6 +23,7 @@ struct sunxi_sram_func { char *func; u8 val; + u32 reg_val; }; struct sunxi_sram_data { @@ -39,10 +40,11 @@ struct sunxi_sram_desc { bool claimed; }; -#define SUNXI_SRAM_MAP(_val, _func) \ +#define SUNXI_SRAM_MAP(_reg_val, _val, _func) \ { \ .func = _func, \ .val = _val, \ + .reg_val = _reg_val, \ } #define SUNXI_SRAM_DATA(_name, _reg, _off, _width, ...) \ @@ -57,14 +59,20 @@ struct sunxi_sram_desc { static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = { .data = SUNXI_SRAM_DATA("A3-A4", 0x4, 0x4, 2, - SUNXI_SRAM_MAP(0, "cpu"), - SUNXI_SRAM_MAP(1, "emac")), + SUNXI_SRAM_MAP(0, 0, "cpu"), + SUNXI_SRAM_MAP(1, 1, "emac")), }; static struct sunxi_sram_desc sun4i_a10_sram_d = { .data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1, - SUNXI_SRAM_MAP(0, "cpu"), - SUNXI_SRAM_MAP(1, "usb-otg")), + SUNXI_SRAM_MAP(0, 0, "cpu"), + SUNXI_SRAM_MAP(1, 1, "usb-otg")), +}; + +static struct sunxi_sram_desc sun50i_a64_sram_c = { + .data = SUNXI_SRAM_DATA("C", 0x4, 24, 1, + SUNXI_SRAM_MAP(0, 1, "cpu"), + SUNXI_SRAM_MAP(1, 0, "de2")), }; static const struct of_device_id sunxi_sram_dt_ids[] = { @@ -76,6 +84,10 @@ static const struct of_device_id sunxi_sram_dt_ids[] = { .compatible = "allwinner,sun4i-a10-sram-d", .data = &sun4i_a10_sram_d.data, }, + { + .compatible = "allwinner,sun50i-a64-sram-c", + .data = &sun50i_a64_sram_c.data, + }, {} }; @@ -121,7 +133,8 @@ static int sunxi_sram_show(struct seq_file *s, void *data) for (func = sram_data->func; func->func; func++) { seq_printf(s, "\t\t%s%c\n", func->func, - func->val == val ? '*' : ' '); + func->reg_val == val ? + '*' : ' '); } } @@ -149,10 +162,13 @@ static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data } static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node, - unsigned int *value) + unsigned int *reg_value) { const struct of_device_id *match; + const struct sunxi_sram_data *data; + struct sunxi_sram_func *func; struct of_phandle_args args; + u8 val; int ret; ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0, @@ -165,8 +181,7 @@ static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *nod goto err; } - if (value) - *value = args.args[0]; + val = args.args[0]; match = of_match_node(sunxi_sram_dt_ids, args.np); if (!match) { @@ -174,6 +189,26 @@ static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *nod goto err; } + data = match->data; + if (!data) { + ret = -EINVAL; + goto err; + }; + + for (func = data->func; func->func; func++) { + if (val == func->val) { + if (reg_value) + *reg_value = func->reg_val; + + break; + } + } + + if (!func->func) { + ret = -EINVAL; + goto err; + } + of_node_put(args.np); return match->data; @@ -190,6 +225,9 @@ int sunxi_sram_claim(struct device *dev) u32 val, mask; if (IS_ERR(base)) + return PTR_ERR(base); + + if (!base) return -EPROBE_DEFER; if (!dev || !dev->of_node) @@ -267,6 +305,7 @@ static int sunxi_sram_probe(struct platform_device *pdev) static const struct of_device_id sunxi_sram_dt_match[] = { { .compatible = "allwinner,sun4i-a10-sram-controller" }, + { .compatible = "allwinner,sun50i-a64-sram-controller" }, { }, }; MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 1beb7c347344..e9e277178c94 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -107,6 +107,11 @@ config ARCH_TEGRA_186_SOC endif endif +config SOC_TEGRA_FUSE + def_bool y + depends on ARCH_TEGRA + select SOC_BUS + config SOC_TEGRA_FLOWCTRL bool diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 7413f60fa855..b7c552e3133c 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -19,10 +19,12 @@ #include #include #include -#include +#include #include #include -#include +#include +#include +#include #include #include @@ -210,6 +212,31 @@ static void tegra_enable_fuse_clk(void __iomem *base) writel(reg, base + 0x14); } +struct device * __init tegra_soc_device_register(void) +{ + struct soc_device_attribute *attr; + struct soc_device *dev; + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return NULL; + + attr->family = kasprintf(GFP_KERNEL, "Tegra"); + attr->revision = kasprintf(GFP_KERNEL, "%d", tegra_sku_info.revision); + attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id()); + + dev = soc_device_register(attr); + if (IS_ERR(dev)) { + kfree(attr->soc_id); + kfree(attr->revision); + kfree(attr->family); + kfree(attr); + return ERR_CAST(dev); + } + + return soc_device_to_device(dev); +} + static int __init tegra_init_fuse(void) { const struct of_device_id *match; @@ -311,6 +338,31 @@ static int __init tegra_init_fuse(void) pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); + return 0; } early_initcall(tegra_init_fuse); + +#ifdef CONFIG_ARM64 +static int __init tegra_init_soc(void) +{ + struct device_node *np; + struct device *soc; + + /* make sure we're running on Tegra */ + np = of_find_matching_node(NULL, tegra_fuse_match); + if (!np) + return 0; + + of_node_put(np); + + soc = tegra_soc_device_register(); + if (IS_ERR(soc)) { + pr_err("failed to register SoC device: %ld\n", PTR_ERR(soc)); + return PTR_ERR(soc); + } + + return 0; +} +device_initcall(tegra_init_soc); +#endif diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index e233dd5dcab3..0453ff6839a7 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -918,10 +918,8 @@ static void tegra_powergate_init(struct tegra_pmc *pmc, if (!np) return; - for_each_child_of_node(np, child) { + for_each_child_of_node(np, child) tegra_powergate_add(pmc, child); - of_node_put(child); - } of_node_put(np); } diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index 282e371378ce..caf698e5f0b0 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -85,7 +85,7 @@ static struct device_attribute realview_build_attr = static int realview_soc_probe(struct platform_device *pdev) { - static struct regmap *syscon_regmap; + struct regmap *syscon_regmap; struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; struct device_node *np = pdev->dev.of_node; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index cf8ee5e48f73..6e65524cbfd9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -41,6 +41,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -1692,6 +1693,35 @@ static void of_register_spi_devices(struct spi_controller *ctlr) { } #endif #ifdef CONFIG_ACPI +static void acpi_spi_parse_apple_properties(struct spi_device *spi) +{ + struct acpi_device *dev = ACPI_COMPANION(&spi->dev); + const union acpi_object *obj; + + if (!x86_apple_machine) + return; + + if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) + && obj->buffer.length >= 4) + spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; + + if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) + && obj->buffer.length == 8) + spi->bits_per_word = *(u64 *)obj->buffer.pointer; + + if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) + && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) + spi->mode |= SPI_LSB_FIRST; + + if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) + && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) + spi->mode |= SPI_CPOL; + + if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) + && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) + spi->mode |= SPI_CPHA; +} + static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) { struct spi_device *spi = data; @@ -1765,6 +1795,8 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, acpi_spi_add_resource, spi); acpi_dev_free_resource_list(&resource_list); + acpi_spi_parse_apple_properties(spi); + if (ret < 0 || !spi->max_speed_hz) { spi_dev_put(spi); return AE_OK; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 1b0a1bed8e11..554683912cff 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -24,6 +24,8 @@ menuconfig STAGING if STAGING +source "drivers/staging/irda/net/Kconfig" + source "drivers/staging/wlan-ng/Kconfig" source "drivers/staging/comedi/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 2b61cbd44d13..8951c37d8d80 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -2,6 +2,8 @@ obj-y += media/ obj-y += typec/ +obj-$(CONFIG_IRDA) += irda/net/ +obj-$(CONFIG_IRDA) += irda/drivers/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 6ba270e0494d..0f695df14c9d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -294,19 +294,9 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } -/** - * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file - * @file: The associated backing file. - * @buf: The buffer of data being written to - * @len: The number of bytes being read - * @pos: The position of the first byte to read. - * - * Return: 0 if successful, or another return code if not. - */ -static ssize_t ashmem_read(struct file *file, char __user *buf, - size_t len, loff_t *pos) +static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) { - struct ashmem_area *asma = file->private_data; + struct ashmem_area *asma = iocb->ki_filp->private_data; int ret = 0; mutex_lock(&ashmem_mutex); @@ -320,20 +310,17 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, goto out_unlock; } - mutex_unlock(&ashmem_mutex); - /* * asma and asma->file are used outside the lock here. We assume * once asma->file is set it will never be changed, and will not * be destroyed until all references to the file are dropped and * ashmem_release is called. */ - ret = __vfs_read(asma->file, buf, len, pos); - if (ret >= 0) - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; - return ret; - + mutex_unlock(&ashmem_mutex); + ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); + mutex_lock(&ashmem_mutex); + if (ret > 0) + asma->file->f_pos = iocb->ki_pos; out_unlock: mutex_unlock(&ashmem_mutex); return ret; @@ -834,7 +821,7 @@ static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, - .read = ashmem_read, + .read_iter = ashmem_read_iter, .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c index 0d33e520f635..cc18e25103ca 100644 --- a/drivers/staging/comedi/drivers/serial2002.c +++ b/drivers/staging/comedi/drivers/serial2002.c @@ -106,16 +106,8 @@ static long serial2002_tty_ioctl(struct file *f, unsigned int op, static int serial2002_tty_write(struct file *f, unsigned char *buf, int count) { - const char __user *p = (__force const char __user *)buf; - int result; - loff_t offset = 0; - mm_segment_t oldfs; - - oldfs = get_fs(); - set_fs(KERNEL_DS); - result = __vfs_write(f, p, count, &offset); - set_fs(oldfs); - return result; + loff_t pos = 0; + return kernel_write(f, buf, count, &pos); } static void serial2002_tty_read_poll_wait(struct file *f, int timeout) @@ -148,19 +140,14 @@ static int serial2002_tty_read(struct file *f, int timeout) { unsigned char ch; int result; + loff_t pos = 0; result = -1; if (!IS_ERR(f)) { - mm_segment_t oldfs; - char __user *p = (__force char __user *)&ch; - loff_t offset = 0; - - oldfs = get_fs(); - set_fs(KERNEL_DS); if (f->f_op->poll) { serial2002_tty_read_poll_wait(f, timeout); - if (__vfs_read(f, p, 1, &offset) == 1) + if (kernel_read(f, &ch, 1, &pos) == 1) result = ch; } else { /* Device does not support poll, busy wait */ @@ -171,14 +158,13 @@ static int serial2002_tty_read(struct file *f, int timeout) if (retries >= timeout) break; - if (__vfs_read(f, p, 1, &offset) == 1) { + if (kernel_read(f, &ch, 1, &pos) == 1) { result = ch; break; } usleep_range(100, 1000); } } - set_fs(oldfs); } return result; } diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 861a249e6ef1..3f4148c92308 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -58,6 +58,7 @@ struct gb_light { bool ready; #if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS) struct v4l2_flash *v4l2_flash; + struct v4l2_flash *v4l2_flash_ind; #endif }; @@ -534,26 +535,21 @@ static int gb_lights_light_v4l2_register(struct gb_light *light) { struct gb_connection *connection = get_conn_from_light(light); struct device *dev = &connection->bundle->dev; - struct v4l2_flash_config *sd_cfg; + struct v4l2_flash_config sd_cfg = { {0} }, sd_cfg_ind = { {0} }; struct led_classdev_flash *fled; - struct led_classdev_flash *iled = NULL; + struct led_classdev *iled = NULL; struct gb_channel *channel_torch, *channel_ind, *channel_flash; - int ret = 0; - - sd_cfg = kcalloc(1, sizeof(*sd_cfg), GFP_KERNEL); - if (!sd_cfg) - return -ENOMEM; channel_torch = get_channel_from_mode(light, GB_CHANNEL_MODE_TORCH); if (channel_torch) __gb_lights_channel_v4l2_config(&channel_torch->intensity_uA, - &sd_cfg->torch_intensity); + &sd_cfg.intensity); channel_ind = get_channel_from_mode(light, GB_CHANNEL_MODE_INDICATOR); if (channel_ind) { __gb_lights_channel_v4l2_config(&channel_ind->intensity_uA, - &sd_cfg->indicator_intensity); - iled = &channel_ind->fled; + &sd_cfg_ind.intensity); + iled = &channel_ind->fled.led_cdev; } channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH); @@ -561,31 +557,37 @@ static int gb_lights_light_v4l2_register(struct gb_light *light) fled = &channel_flash->fled; - snprintf(sd_cfg->dev_name, sizeof(sd_cfg->dev_name), "%s", light->name); + snprintf(sd_cfg.dev_name, sizeof(sd_cfg.dev_name), "%s", light->name); + snprintf(sd_cfg_ind.dev_name, sizeof(sd_cfg_ind.dev_name), + "%s indicator", light->name); /* Set the possible values to faults, in our case all faults */ - sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT | + sd_cfg.flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT | LED_FAULT_OVER_TEMPERATURE | LED_FAULT_SHORT_CIRCUIT | LED_FAULT_OVER_CURRENT | LED_FAULT_INDICATOR | LED_FAULT_UNDER_VOLTAGE | LED_FAULT_INPUT_VOLTAGE | LED_FAULT_LED_OVER_TEMPERATURE; - light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, iled, - &v4l2_flash_ops, sd_cfg); - if (IS_ERR_OR_NULL(light->v4l2_flash)) { - ret = PTR_ERR(light->v4l2_flash); - goto out_free; + light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, &v4l2_flash_ops, + &sd_cfg); + if (IS_ERR(light->v4l2_flash)) + return PTR_ERR(light->v4l2_flash); + + if (channel_ind) { + light->v4l2_flash_ind = + v4l2_flash_indicator_init(dev, NULL, iled, &sd_cfg_ind); + if (IS_ERR(light->v4l2_flash_ind)) { + v4l2_flash_release(light->v4l2_flash); + return PTR_ERR(light->v4l2_flash_ind); + } } - return ret; - -out_free: - kfree(sd_cfg); - return ret; + return 0; } static void gb_lights_light_v4l2_unregister(struct gb_light *light) { + v4l2_flash_release(light->v4l2_flash_ind); v4l2_flash_release(light->v4l2_flash); } #else diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index d11c6de9c777..6150d2780e22 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st, struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi); unsigned long long scale_uv; int i, ret, id; - u8 ones[6]; /* reset the serial interface */ - memset(&ones, 0xFF, 6); - ret = spi_write(st->sd.spi, &ones, 6); + ret = ad_sd_reset(&st->sd, 48); if (ret < 0) goto out; usleep_range(500, 1000); /* Wait for at least 500us */ diff --git a/drivers/staging/irda/TODO b/drivers/staging/irda/TODO new file mode 100644 index 000000000000..7d98a5cffaff --- /dev/null +++ b/drivers/staging/irda/TODO @@ -0,0 +1,4 @@ +The irda code will be removed soon from the kernel tree as it is old and +obsolete and broken. + +Don't worry about fixing up anything here, it's not needed. diff --git a/drivers/net/irda/Kconfig b/drivers/staging/irda/drivers/Kconfig similarity index 100% rename from drivers/net/irda/Kconfig rename to drivers/staging/irda/drivers/Kconfig diff --git a/drivers/net/irda/Makefile b/drivers/staging/irda/drivers/Makefile similarity index 96% rename from drivers/net/irda/Makefile rename to drivers/staging/irda/drivers/Makefile index 4c344433dae5..e2901b135528 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/staging/irda/drivers/Makefile @@ -5,6 +5,8 @@ # Rewritten to use lists instead of if-statements. # +subdir-ccflags-y += -I$(srctree)/drivers/staging/irda/include + # FIR drivers obj-$(CONFIG_USB_IRDA) += irda-usb.o obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o diff --git a/drivers/net/irda/act200l-sir.c b/drivers/staging/irda/drivers/act200l-sir.c similarity index 100% rename from drivers/net/irda/act200l-sir.c rename to drivers/staging/irda/drivers/act200l-sir.c diff --git a/drivers/net/irda/actisys-sir.c b/drivers/staging/irda/drivers/actisys-sir.c similarity index 100% rename from drivers/net/irda/actisys-sir.c rename to drivers/staging/irda/drivers/actisys-sir.c diff --git a/drivers/net/irda/ali-ircc.c b/drivers/staging/irda/drivers/ali-ircc.c similarity index 100% rename from drivers/net/irda/ali-ircc.c rename to drivers/staging/irda/drivers/ali-ircc.c diff --git a/drivers/net/irda/ali-ircc.h b/drivers/staging/irda/drivers/ali-ircc.h similarity index 100% rename from drivers/net/irda/ali-ircc.h rename to drivers/staging/irda/drivers/ali-ircc.h diff --git a/drivers/net/irda/au1k_ir.c b/drivers/staging/irda/drivers/au1k_ir.c similarity index 100% rename from drivers/net/irda/au1k_ir.c rename to drivers/staging/irda/drivers/au1k_ir.c diff --git a/drivers/net/irda/bfin_sir.c b/drivers/staging/irda/drivers/bfin_sir.c similarity index 100% rename from drivers/net/irda/bfin_sir.c rename to drivers/staging/irda/drivers/bfin_sir.c diff --git a/drivers/net/irda/bfin_sir.h b/drivers/staging/irda/drivers/bfin_sir.h similarity index 100% rename from drivers/net/irda/bfin_sir.h rename to drivers/staging/irda/drivers/bfin_sir.h diff --git a/drivers/net/irda/donauboe.c b/drivers/staging/irda/drivers/donauboe.c similarity index 100% rename from drivers/net/irda/donauboe.c rename to drivers/staging/irda/drivers/donauboe.c diff --git a/drivers/net/irda/donauboe.h b/drivers/staging/irda/drivers/donauboe.h similarity index 100% rename from drivers/net/irda/donauboe.h rename to drivers/staging/irda/drivers/donauboe.h diff --git a/drivers/net/irda/esi-sir.c b/drivers/staging/irda/drivers/esi-sir.c similarity index 100% rename from drivers/net/irda/esi-sir.c rename to drivers/staging/irda/drivers/esi-sir.c diff --git a/drivers/net/irda/girbil-sir.c b/drivers/staging/irda/drivers/girbil-sir.c similarity index 100% rename from drivers/net/irda/girbil-sir.c rename to drivers/staging/irda/drivers/girbil-sir.c diff --git a/drivers/net/irda/irda-usb.c b/drivers/staging/irda/drivers/irda-usb.c similarity index 99% rename from drivers/net/irda/irda-usb.c rename to drivers/staging/irda/drivers/irda-usb.c index 6f3c805f7211..723e49bc4baa 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/staging/irda/drivers/irda-usb.c @@ -72,7 +72,7 @@ static int qos_mtt_bits = 0; /* These are the currently known IrDA USB dongles. Add new dongles here */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW }, /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */ diff --git a/drivers/net/irda/irda-usb.h b/drivers/staging/irda/drivers/irda-usb.h similarity index 100% rename from drivers/net/irda/irda-usb.h rename to drivers/staging/irda/drivers/irda-usb.h diff --git a/drivers/net/irda/irtty-sir.c b/drivers/staging/irda/drivers/irtty-sir.c similarity index 100% rename from drivers/net/irda/irtty-sir.c rename to drivers/staging/irda/drivers/irtty-sir.c diff --git a/drivers/net/irda/irtty-sir.h b/drivers/staging/irda/drivers/irtty-sir.h similarity index 100% rename from drivers/net/irda/irtty-sir.h rename to drivers/staging/irda/drivers/irtty-sir.h diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/staging/irda/drivers/kingsun-sir.c similarity index 99% rename from drivers/net/irda/kingsun-sir.c rename to drivers/staging/irda/drivers/kingsun-sir.c index 24c0f169a7b1..4fd4ac2fe09f 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/staging/irda/drivers/kingsun-sir.c @@ -85,7 +85,7 @@ #define KING_PRODUCT_ID 0x4200 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ { USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) }, { } diff --git a/drivers/net/irda/ks959-sir.c b/drivers/staging/irda/drivers/ks959-sir.c similarity index 99% rename from drivers/net/irda/ks959-sir.c rename to drivers/staging/irda/drivers/ks959-sir.c index 3affded3e30d..8025741e7586 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/staging/irda/drivers/ks959-sir.c @@ -133,7 +133,7 @@ #define KS959_PRODUCT_ID 0x4959 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, {} diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/staging/irda/drivers/ksdazzle-sir.c similarity index 99% rename from drivers/net/irda/ksdazzle-sir.c rename to drivers/staging/irda/drivers/ksdazzle-sir.c index 741452c7ce35..d2a0755df596 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/staging/irda/drivers/ksdazzle-sir.c @@ -97,7 +97,7 @@ #define KSDAZZLE_PRODUCT_ID 0x4100 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, {} diff --git a/drivers/net/irda/litelink-sir.c b/drivers/staging/irda/drivers/litelink-sir.c similarity index 100% rename from drivers/net/irda/litelink-sir.c rename to drivers/staging/irda/drivers/litelink-sir.c diff --git a/drivers/net/irda/ma600-sir.c b/drivers/staging/irda/drivers/ma600-sir.c similarity index 100% rename from drivers/net/irda/ma600-sir.c rename to drivers/staging/irda/drivers/ma600-sir.c diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/staging/irda/drivers/mcp2120-sir.c similarity index 100% rename from drivers/net/irda/mcp2120-sir.c rename to drivers/staging/irda/drivers/mcp2120-sir.c diff --git a/drivers/net/irda/mcs7780.c b/drivers/staging/irda/drivers/mcs7780.c similarity index 99% rename from drivers/net/irda/mcs7780.c rename to drivers/staging/irda/drivers/mcs7780.c index 765de3bedb88..c3f0b254b344 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/staging/irda/drivers/mcs7780.c @@ -66,7 +66,7 @@ #define MCS_VENDOR_ID 0x9710 #define MCS_PRODUCT_ID 0x7780 -static struct usb_device_id mcs_table[] = { +static const struct usb_device_id mcs_table[] = { /* MosChip Corp., MCS7780 FIR-USB Adapter */ {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)}, {}, diff --git a/drivers/net/irda/mcs7780.h b/drivers/staging/irda/drivers/mcs7780.h similarity index 100% rename from drivers/net/irda/mcs7780.h rename to drivers/staging/irda/drivers/mcs7780.h diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/staging/irda/drivers/nsc-ircc.c similarity index 100% rename from drivers/net/irda/nsc-ircc.c rename to drivers/staging/irda/drivers/nsc-ircc.c diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/staging/irda/drivers/nsc-ircc.h similarity index 100% rename from drivers/net/irda/nsc-ircc.h rename to drivers/staging/irda/drivers/nsc-ircc.h diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/staging/irda/drivers/old_belkin-sir.c similarity index 100% rename from drivers/net/irda/old_belkin-sir.c rename to drivers/staging/irda/drivers/old_belkin-sir.c diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/staging/irda/drivers/pxaficp_ir.c similarity index 100% rename from drivers/net/irda/pxaficp_ir.c rename to drivers/staging/irda/drivers/pxaficp_ir.c diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/staging/irda/drivers/sa1100_ir.c similarity index 100% rename from drivers/net/irda/sa1100_ir.c rename to drivers/staging/irda/drivers/sa1100_ir.c diff --git a/drivers/net/irda/sh_sir.c b/drivers/staging/irda/drivers/sh_sir.c similarity index 100% rename from drivers/net/irda/sh_sir.c rename to drivers/staging/irda/drivers/sh_sir.c diff --git a/drivers/net/irda/sir-dev.h b/drivers/staging/irda/drivers/sir-dev.h similarity index 100% rename from drivers/net/irda/sir-dev.h rename to drivers/staging/irda/drivers/sir-dev.h diff --git a/drivers/net/irda/sir_dev.c b/drivers/staging/irda/drivers/sir_dev.c similarity index 100% rename from drivers/net/irda/sir_dev.c rename to drivers/staging/irda/drivers/sir_dev.c diff --git a/drivers/net/irda/sir_dongle.c b/drivers/staging/irda/drivers/sir_dongle.c similarity index 100% rename from drivers/net/irda/sir_dongle.c rename to drivers/staging/irda/drivers/sir_dongle.c diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/staging/irda/drivers/smsc-ircc2.c similarity index 100% rename from drivers/net/irda/smsc-ircc2.c rename to drivers/staging/irda/drivers/smsc-ircc2.c diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/staging/irda/drivers/smsc-ircc2.h similarity index 100% rename from drivers/net/irda/smsc-ircc2.h rename to drivers/staging/irda/drivers/smsc-ircc2.h diff --git a/drivers/net/irda/smsc-sio.h b/drivers/staging/irda/drivers/smsc-sio.h similarity index 100% rename from drivers/net/irda/smsc-sio.h rename to drivers/staging/irda/drivers/smsc-sio.h diff --git a/drivers/net/irda/stir4200.c b/drivers/staging/irda/drivers/stir4200.c similarity index 99% rename from drivers/net/irda/stir4200.c rename to drivers/staging/irda/drivers/stir4200.c index 7ee514879531..ee2cb70b688d 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/staging/irda/drivers/stir4200.c @@ -183,7 +183,7 @@ struct stir_cb { /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */ { USB_DEVICE(0x066f, 0x4200) }, { } diff --git a/drivers/net/irda/tekram-sir.c b/drivers/staging/irda/drivers/tekram-sir.c similarity index 100% rename from drivers/net/irda/tekram-sir.c rename to drivers/staging/irda/drivers/tekram-sir.c diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/staging/irda/drivers/toim3232-sir.c similarity index 100% rename from drivers/net/irda/toim3232-sir.c rename to drivers/staging/irda/drivers/toim3232-sir.c diff --git a/drivers/net/irda/via-ircc.c b/drivers/staging/irda/drivers/via-ircc.c similarity index 100% rename from drivers/net/irda/via-ircc.c rename to drivers/staging/irda/drivers/via-ircc.c diff --git a/drivers/net/irda/via-ircc.h b/drivers/staging/irda/drivers/via-ircc.h similarity index 100% rename from drivers/net/irda/via-ircc.h rename to drivers/staging/irda/drivers/via-ircc.h diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/staging/irda/drivers/vlsi_ir.c similarity index 100% rename from drivers/net/irda/vlsi_ir.c rename to drivers/staging/irda/drivers/vlsi_ir.c diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/staging/irda/drivers/vlsi_ir.h similarity index 100% rename from drivers/net/irda/vlsi_ir.h rename to drivers/staging/irda/drivers/vlsi_ir.h diff --git a/drivers/net/irda/w83977af.h b/drivers/staging/irda/drivers/w83977af.h similarity index 100% rename from drivers/net/irda/w83977af.h rename to drivers/staging/irda/drivers/w83977af.h diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/staging/irda/drivers/w83977af_ir.c similarity index 100% rename from drivers/net/irda/w83977af_ir.c rename to drivers/staging/irda/drivers/w83977af_ir.c diff --git a/drivers/net/irda/w83977af_ir.h b/drivers/staging/irda/drivers/w83977af_ir.h similarity index 100% rename from drivers/net/irda/w83977af_ir.h rename to drivers/staging/irda/drivers/w83977af_ir.h diff --git a/include/net/irda/af_irda.h b/drivers/staging/irda/include/net/irda/af_irda.h similarity index 100% rename from include/net/irda/af_irda.h rename to drivers/staging/irda/include/net/irda/af_irda.h diff --git a/include/net/irda/crc.h b/drivers/staging/irda/include/net/irda/crc.h similarity index 100% rename from include/net/irda/crc.h rename to drivers/staging/irda/include/net/irda/crc.h diff --git a/include/net/irda/discovery.h b/drivers/staging/irda/include/net/irda/discovery.h similarity index 100% rename from include/net/irda/discovery.h rename to drivers/staging/irda/include/net/irda/discovery.h diff --git a/include/net/irda/ircomm_core.h b/drivers/staging/irda/include/net/irda/ircomm_core.h similarity index 100% rename from include/net/irda/ircomm_core.h rename to drivers/staging/irda/include/net/irda/ircomm_core.h diff --git a/include/net/irda/ircomm_event.h b/drivers/staging/irda/include/net/irda/ircomm_event.h similarity index 100% rename from include/net/irda/ircomm_event.h rename to drivers/staging/irda/include/net/irda/ircomm_event.h diff --git a/include/net/irda/ircomm_lmp.h b/drivers/staging/irda/include/net/irda/ircomm_lmp.h similarity index 100% rename from include/net/irda/ircomm_lmp.h rename to drivers/staging/irda/include/net/irda/ircomm_lmp.h diff --git a/include/net/irda/ircomm_param.h b/drivers/staging/irda/include/net/irda/ircomm_param.h similarity index 100% rename from include/net/irda/ircomm_param.h rename to drivers/staging/irda/include/net/irda/ircomm_param.h diff --git a/include/net/irda/ircomm_ttp.h b/drivers/staging/irda/include/net/irda/ircomm_ttp.h similarity index 100% rename from include/net/irda/ircomm_ttp.h rename to drivers/staging/irda/include/net/irda/ircomm_ttp.h diff --git a/include/net/irda/ircomm_tty.h b/drivers/staging/irda/include/net/irda/ircomm_tty.h similarity index 100% rename from include/net/irda/ircomm_tty.h rename to drivers/staging/irda/include/net/irda/ircomm_tty.h diff --git a/include/net/irda/ircomm_tty_attach.h b/drivers/staging/irda/include/net/irda/ircomm_tty_attach.h similarity index 100% rename from include/net/irda/ircomm_tty_attach.h rename to drivers/staging/irda/include/net/irda/ircomm_tty_attach.h diff --git a/include/net/irda/irda.h b/drivers/staging/irda/include/net/irda/irda.h similarity index 100% rename from include/net/irda/irda.h rename to drivers/staging/irda/include/net/irda/irda.h diff --git a/include/net/irda/irda_device.h b/drivers/staging/irda/include/net/irda/irda_device.h similarity index 100% rename from include/net/irda/irda_device.h rename to drivers/staging/irda/include/net/irda/irda_device.h diff --git a/include/net/irda/iriap.h b/drivers/staging/irda/include/net/irda/iriap.h similarity index 100% rename from include/net/irda/iriap.h rename to drivers/staging/irda/include/net/irda/iriap.h diff --git a/include/net/irda/iriap_event.h b/drivers/staging/irda/include/net/irda/iriap_event.h similarity index 100% rename from include/net/irda/iriap_event.h rename to drivers/staging/irda/include/net/irda/iriap_event.h diff --git a/include/net/irda/irias_object.h b/drivers/staging/irda/include/net/irda/irias_object.h similarity index 100% rename from include/net/irda/irias_object.h rename to drivers/staging/irda/include/net/irda/irias_object.h diff --git a/include/net/irda/irlan_client.h b/drivers/staging/irda/include/net/irda/irlan_client.h similarity index 100% rename from include/net/irda/irlan_client.h rename to drivers/staging/irda/include/net/irda/irlan_client.h diff --git a/include/net/irda/irlan_common.h b/drivers/staging/irda/include/net/irda/irlan_common.h similarity index 100% rename from include/net/irda/irlan_common.h rename to drivers/staging/irda/include/net/irda/irlan_common.h diff --git a/include/net/irda/irlan_eth.h b/drivers/staging/irda/include/net/irda/irlan_eth.h similarity index 100% rename from include/net/irda/irlan_eth.h rename to drivers/staging/irda/include/net/irda/irlan_eth.h diff --git a/include/net/irda/irlan_event.h b/drivers/staging/irda/include/net/irda/irlan_event.h similarity index 100% rename from include/net/irda/irlan_event.h rename to drivers/staging/irda/include/net/irda/irlan_event.h diff --git a/include/net/irda/irlan_filter.h b/drivers/staging/irda/include/net/irda/irlan_filter.h similarity index 100% rename from include/net/irda/irlan_filter.h rename to drivers/staging/irda/include/net/irda/irlan_filter.h diff --git a/include/net/irda/irlan_provider.h b/drivers/staging/irda/include/net/irda/irlan_provider.h similarity index 100% rename from include/net/irda/irlan_provider.h rename to drivers/staging/irda/include/net/irda/irlan_provider.h diff --git a/include/net/irda/irlap.h b/drivers/staging/irda/include/net/irda/irlap.h similarity index 100% rename from include/net/irda/irlap.h rename to drivers/staging/irda/include/net/irda/irlap.h diff --git a/include/net/irda/irlap_event.h b/drivers/staging/irda/include/net/irda/irlap_event.h similarity index 100% rename from include/net/irda/irlap_event.h rename to drivers/staging/irda/include/net/irda/irlap_event.h diff --git a/include/net/irda/irlap_frame.h b/drivers/staging/irda/include/net/irda/irlap_frame.h similarity index 100% rename from include/net/irda/irlap_frame.h rename to drivers/staging/irda/include/net/irda/irlap_frame.h diff --git a/include/net/irda/irlmp.h b/drivers/staging/irda/include/net/irda/irlmp.h similarity index 100% rename from include/net/irda/irlmp.h rename to drivers/staging/irda/include/net/irda/irlmp.h diff --git a/include/net/irda/irlmp_event.h b/drivers/staging/irda/include/net/irda/irlmp_event.h similarity index 100% rename from include/net/irda/irlmp_event.h rename to drivers/staging/irda/include/net/irda/irlmp_event.h diff --git a/include/net/irda/irlmp_frame.h b/drivers/staging/irda/include/net/irda/irlmp_frame.h similarity index 100% rename from include/net/irda/irlmp_frame.h rename to drivers/staging/irda/include/net/irda/irlmp_frame.h diff --git a/include/net/irda/irmod.h b/drivers/staging/irda/include/net/irda/irmod.h similarity index 100% rename from include/net/irda/irmod.h rename to drivers/staging/irda/include/net/irda/irmod.h diff --git a/include/net/irda/irqueue.h b/drivers/staging/irda/include/net/irda/irqueue.h similarity index 100% rename from include/net/irda/irqueue.h rename to drivers/staging/irda/include/net/irda/irqueue.h diff --git a/include/net/irda/irttp.h b/drivers/staging/irda/include/net/irda/irttp.h similarity index 100% rename from include/net/irda/irttp.h rename to drivers/staging/irda/include/net/irda/irttp.h diff --git a/include/net/irda/parameters.h b/drivers/staging/irda/include/net/irda/parameters.h similarity index 100% rename from include/net/irda/parameters.h rename to drivers/staging/irda/include/net/irda/parameters.h diff --git a/include/net/irda/qos.h b/drivers/staging/irda/include/net/irda/qos.h similarity index 100% rename from include/net/irda/qos.h rename to drivers/staging/irda/include/net/irda/qos.h diff --git a/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h similarity index 100% rename from include/net/irda/timer.h rename to drivers/staging/irda/include/net/irda/timer.h diff --git a/include/net/irda/wrapper.h b/drivers/staging/irda/include/net/irda/wrapper.h similarity index 100% rename from include/net/irda/wrapper.h rename to drivers/staging/irda/include/net/irda/wrapper.h diff --git a/net/irda/Kconfig b/drivers/staging/irda/net/Kconfig similarity index 94% rename from net/irda/Kconfig rename to drivers/staging/irda/net/Kconfig index c8671a7ffb3c..6abeae6c666a 100644 --- a/net/irda/Kconfig +++ b/drivers/staging/irda/net/Kconfig @@ -27,11 +27,11 @@ menuconfig IRDA comment "IrDA protocols" depends on IRDA -source "net/irda/irlan/Kconfig" +source "drivers/staging/irda/net/irlan/Kconfig" -source "net/irda/irnet/Kconfig" +source "drivers/staging/irda/net/irnet/Kconfig" -source "net/irda/ircomm/Kconfig" +source "drivers/staging/irda/net/ircomm/Kconfig" config IRDA_ULTRA bool "Ultra (connectionless) protocol" @@ -92,5 +92,5 @@ config IRDA_DEBUG If unsure, say Y (since it makes it easier to find the bugs). -source "drivers/net/irda/Kconfig" +source "drivers/staging/irda/drivers/Kconfig" diff --git a/net/irda/Makefile b/drivers/staging/irda/net/Makefile similarity index 88% rename from net/irda/Makefile rename to drivers/staging/irda/net/Makefile index 187f6c563a4b..bd1a635b88cf 100644 --- a/net/irda/Makefile +++ b/drivers/staging/irda/net/Makefile @@ -2,6 +2,8 @@ # Makefile for the Linux IrDA protocol layer. # +subdir-ccflags-y += -I$(srctree)/drivers/staging/irda/include + obj-$(CONFIG_IRDA) += irda.o obj-$(CONFIG_IRLAN) += irlan/ obj-$(CONFIG_IRNET) += irnet/ diff --git a/net/irda/af_irda.c b/drivers/staging/irda/net/af_irda.c similarity index 100% rename from net/irda/af_irda.c rename to drivers/staging/irda/net/af_irda.c diff --git a/net/irda/discovery.c b/drivers/staging/irda/net/discovery.c similarity index 100% rename from net/irda/discovery.c rename to drivers/staging/irda/net/discovery.c diff --git a/net/irda/ircomm/Kconfig b/drivers/staging/irda/net/ircomm/Kconfig similarity index 100% rename from net/irda/ircomm/Kconfig rename to drivers/staging/irda/net/ircomm/Kconfig diff --git a/net/irda/ircomm/Makefile b/drivers/staging/irda/net/ircomm/Makefile similarity index 100% rename from net/irda/ircomm/Makefile rename to drivers/staging/irda/net/ircomm/Makefile diff --git a/net/irda/ircomm/ircomm_core.c b/drivers/staging/irda/net/ircomm/ircomm_core.c similarity index 100% rename from net/irda/ircomm/ircomm_core.c rename to drivers/staging/irda/net/ircomm/ircomm_core.c diff --git a/net/irda/ircomm/ircomm_event.c b/drivers/staging/irda/net/ircomm/ircomm_event.c similarity index 100% rename from net/irda/ircomm/ircomm_event.c rename to drivers/staging/irda/net/ircomm/ircomm_event.c diff --git a/net/irda/ircomm/ircomm_lmp.c b/drivers/staging/irda/net/ircomm/ircomm_lmp.c similarity index 100% rename from net/irda/ircomm/ircomm_lmp.c rename to drivers/staging/irda/net/ircomm/ircomm_lmp.c diff --git a/net/irda/ircomm/ircomm_param.c b/drivers/staging/irda/net/ircomm/ircomm_param.c similarity index 100% rename from net/irda/ircomm/ircomm_param.c rename to drivers/staging/irda/net/ircomm/ircomm_param.c diff --git a/net/irda/ircomm/ircomm_ttp.c b/drivers/staging/irda/net/ircomm/ircomm_ttp.c similarity index 100% rename from net/irda/ircomm/ircomm_ttp.c rename to drivers/staging/irda/net/ircomm/ircomm_ttp.c diff --git a/net/irda/ircomm/ircomm_tty.c b/drivers/staging/irda/net/ircomm/ircomm_tty.c similarity index 100% rename from net/irda/ircomm/ircomm_tty.c rename to drivers/staging/irda/net/ircomm/ircomm_tty.c diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c similarity index 100% rename from net/irda/ircomm/ircomm_tty_attach.c rename to drivers/staging/irda/net/ircomm/ircomm_tty_attach.c diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c similarity index 100% rename from net/irda/ircomm/ircomm_tty_ioctl.c rename to drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c diff --git a/net/irda/irda_device.c b/drivers/staging/irda/net/irda_device.c similarity index 100% rename from net/irda/irda_device.c rename to drivers/staging/irda/net/irda_device.c diff --git a/net/irda/iriap.c b/drivers/staging/irda/net/iriap.c similarity index 100% rename from net/irda/iriap.c rename to drivers/staging/irda/net/iriap.c diff --git a/net/irda/iriap_event.c b/drivers/staging/irda/net/iriap_event.c similarity index 100% rename from net/irda/iriap_event.c rename to drivers/staging/irda/net/iriap_event.c diff --git a/net/irda/irias_object.c b/drivers/staging/irda/net/irias_object.c similarity index 100% rename from net/irda/irias_object.c rename to drivers/staging/irda/net/irias_object.c diff --git a/net/irda/irlan/Kconfig b/drivers/staging/irda/net/irlan/Kconfig similarity index 100% rename from net/irda/irlan/Kconfig rename to drivers/staging/irda/net/irlan/Kconfig diff --git a/net/irda/irlan/Makefile b/drivers/staging/irda/net/irlan/Makefile similarity index 100% rename from net/irda/irlan/Makefile rename to drivers/staging/irda/net/irlan/Makefile diff --git a/net/irda/irlan/irlan_client.c b/drivers/staging/irda/net/irlan/irlan_client.c similarity index 100% rename from net/irda/irlan/irlan_client.c rename to drivers/staging/irda/net/irlan/irlan_client.c diff --git a/net/irda/irlan/irlan_client_event.c b/drivers/staging/irda/net/irlan/irlan_client_event.c similarity index 100% rename from net/irda/irlan/irlan_client_event.c rename to drivers/staging/irda/net/irlan/irlan_client_event.c diff --git a/net/irda/irlan/irlan_common.c b/drivers/staging/irda/net/irlan/irlan_common.c similarity index 100% rename from net/irda/irlan/irlan_common.c rename to drivers/staging/irda/net/irlan/irlan_common.c diff --git a/net/irda/irlan/irlan_eth.c b/drivers/staging/irda/net/irlan/irlan_eth.c similarity index 100% rename from net/irda/irlan/irlan_eth.c rename to drivers/staging/irda/net/irlan/irlan_eth.c diff --git a/net/irda/irlan/irlan_event.c b/drivers/staging/irda/net/irlan/irlan_event.c similarity index 100% rename from net/irda/irlan/irlan_event.c rename to drivers/staging/irda/net/irlan/irlan_event.c diff --git a/net/irda/irlan/irlan_filter.c b/drivers/staging/irda/net/irlan/irlan_filter.c similarity index 100% rename from net/irda/irlan/irlan_filter.c rename to drivers/staging/irda/net/irlan/irlan_filter.c diff --git a/net/irda/irlan/irlan_provider.c b/drivers/staging/irda/net/irlan/irlan_provider.c similarity index 100% rename from net/irda/irlan/irlan_provider.c rename to drivers/staging/irda/net/irlan/irlan_provider.c diff --git a/net/irda/irlan/irlan_provider_event.c b/drivers/staging/irda/net/irlan/irlan_provider_event.c similarity index 100% rename from net/irda/irlan/irlan_provider_event.c rename to drivers/staging/irda/net/irlan/irlan_provider_event.c diff --git a/net/irda/irlap.c b/drivers/staging/irda/net/irlap.c similarity index 100% rename from net/irda/irlap.c rename to drivers/staging/irda/net/irlap.c diff --git a/net/irda/irlap_event.c b/drivers/staging/irda/net/irlap_event.c similarity index 100% rename from net/irda/irlap_event.c rename to drivers/staging/irda/net/irlap_event.c diff --git a/net/irda/irlap_frame.c b/drivers/staging/irda/net/irlap_frame.c similarity index 100% rename from net/irda/irlap_frame.c rename to drivers/staging/irda/net/irlap_frame.c diff --git a/net/irda/irlmp.c b/drivers/staging/irda/net/irlmp.c similarity index 100% rename from net/irda/irlmp.c rename to drivers/staging/irda/net/irlmp.c diff --git a/net/irda/irlmp_event.c b/drivers/staging/irda/net/irlmp_event.c similarity index 100% rename from net/irda/irlmp_event.c rename to drivers/staging/irda/net/irlmp_event.c diff --git a/net/irda/irlmp_frame.c b/drivers/staging/irda/net/irlmp_frame.c similarity index 100% rename from net/irda/irlmp_frame.c rename to drivers/staging/irda/net/irlmp_frame.c diff --git a/net/irda/irmod.c b/drivers/staging/irda/net/irmod.c similarity index 99% rename from net/irda/irmod.c rename to drivers/staging/irda/net/irmod.c index c5e35b85c477..4319f4ff66b0 100644 --- a/net/irda/irmod.c +++ b/drivers/staging/irda/net/irmod.c @@ -190,7 +190,7 @@ static void __exit irda_cleanup(void) * * Jean II */ -subsys_initcall(irda_init); +device_initcall(irda_init); module_exit(irda_cleanup); MODULE_AUTHOR("Dag Brattli & Jean Tourrilhes "); diff --git a/net/irda/irnet/Kconfig b/drivers/staging/irda/net/irnet/Kconfig similarity index 100% rename from net/irda/irnet/Kconfig rename to drivers/staging/irda/net/irnet/Kconfig diff --git a/net/irda/irnet/Makefile b/drivers/staging/irda/net/irnet/Makefile similarity index 100% rename from net/irda/irnet/Makefile rename to drivers/staging/irda/net/irnet/Makefile diff --git a/net/irda/irnet/irnet.h b/drivers/staging/irda/net/irnet/irnet.h similarity index 100% rename from net/irda/irnet/irnet.h rename to drivers/staging/irda/net/irnet/irnet.h diff --git a/net/irda/irnet/irnet_irda.c b/drivers/staging/irda/net/irnet/irnet_irda.c similarity index 100% rename from net/irda/irnet/irnet_irda.c rename to drivers/staging/irda/net/irnet/irnet_irda.c diff --git a/net/irda/irnet/irnet_irda.h b/drivers/staging/irda/net/irnet/irnet_irda.h similarity index 100% rename from net/irda/irnet/irnet_irda.h rename to drivers/staging/irda/net/irnet/irnet_irda.h diff --git a/net/irda/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c similarity index 100% rename from net/irda/irnet/irnet_ppp.c rename to drivers/staging/irda/net/irnet/irnet_ppp.c diff --git a/net/irda/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h similarity index 100% rename from net/irda/irnet/irnet_ppp.h rename to drivers/staging/irda/net/irnet/irnet_ppp.h diff --git a/net/irda/irnetlink.c b/drivers/staging/irda/net/irnetlink.c similarity index 100% rename from net/irda/irnetlink.c rename to drivers/staging/irda/net/irnetlink.c diff --git a/net/irda/irproc.c b/drivers/staging/irda/net/irproc.c similarity index 100% rename from net/irda/irproc.c rename to drivers/staging/irda/net/irproc.c diff --git a/net/irda/irqueue.c b/drivers/staging/irda/net/irqueue.c similarity index 100% rename from net/irda/irqueue.c rename to drivers/staging/irda/net/irqueue.c diff --git a/net/irda/irsysctl.c b/drivers/staging/irda/net/irsysctl.c similarity index 100% rename from net/irda/irsysctl.c rename to drivers/staging/irda/net/irsysctl.c diff --git a/net/irda/irttp.c b/drivers/staging/irda/net/irttp.c similarity index 100% rename from net/irda/irttp.c rename to drivers/staging/irda/net/irttp.c diff --git a/net/irda/parameters.c b/drivers/staging/irda/net/parameters.c similarity index 100% rename from net/irda/parameters.c rename to drivers/staging/irda/net/parameters.c diff --git a/net/irda/qos.c b/drivers/staging/irda/net/qos.c similarity index 100% rename from net/irda/qos.c rename to drivers/staging/irda/net/qos.c diff --git a/net/irda/timer.c b/drivers/staging/irda/net/timer.c similarity index 100% rename from net/irda/timer.c rename to drivers/staging/irda/net/timer.c diff --git a/net/irda/wrapper.c b/drivers/staging/irda/net/wrapper.c similarity index 100% rename from net/irda/wrapper.c rename to drivers/staging/irda/net/wrapper.c diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 68f283a2744c..f916b475e767 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -731,8 +731,7 @@ int cfs_tracefile_dump_all_pages(char *filename) __LASSERT_TAGE_INVARIANT(tage); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &filp->f_pos); + rc = kernel_write(filp, buf, tage->used, &filp->f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -976,7 +975,6 @@ static int tracefiled(void *arg) struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - mm_segment_t __oldfs; struct file *filp; char *buf; int last_loop = 0; @@ -1014,8 +1012,6 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } - __oldfs = get_fs(); - set_fs(get_ds()); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; @@ -1028,8 +1024,7 @@ static int tracefiled(void *arg) f_pos = i_size_read(file_inode(filp)); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &f_pos); + rc = kernel_write(filp, buf, tage->used, &f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -1040,7 +1035,6 @@ static int tracefiled(void *arg) break; } } - set_fs(__oldfs); filp_close(filp, NULL); put_pages_on_daemon_list(&pc); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index d571b8b050d8..cb826e9e840e 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -596,7 +596,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) default: getlk->fl_type = F_UNLCK; } - getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid; + getlk->fl_pid = -(pid_t)lock->l_policy_data.l_flock.pid; getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start; getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end; } else { diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 2c30e422a47e..be665454f407 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -2364,7 +2364,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) PFID(ll_inode2fid(inode)), inode); ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1); - rc = filemap_write_and_wait_range(inode->i_mapping, start, end); + rc = file_write_and_wait_range(file, start, end); inode_lock(inode); /* catch async errors that were recorded back when async writeback diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index d855129768f8..25393e3a0fe8 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -210,7 +210,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, data->ocd_ibits_known = MDS_INODELOCK_FULL; data->ocd_version = LUSTRE_VERSION_CODE; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) data->ocd_connect_flags |= OBD_CONNECT_RDONLY; if (sbi->ll_flags & LL_SBI_USER_XATTR) data->ocd_connect_flags |= OBD_CONNECT_XATTR; @@ -2031,7 +2031,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data) int err; __u32 read_only; - if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) { read_only = *flags & MS_RDONLY; err = obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_READ_ONLY), diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 4897dbd3286d..5cc2b3255207 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -561,8 +561,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, } } - if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && - dentry->d_sb->s_flags & MS_RDONLY) + if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb)) return ERR_PTR(-EROFS); if (it->it_op & IT_CREAT) diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c index 8f0707a27a83..4f0a42633d5a 100644 --- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -52,7 +52,6 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) struct kuc_hdr *kuch = (struct kuc_hdr *)payload; ssize_t count = kuch->kuc_msglen; loff_t offset = 0; - mm_segment_t fs; int rc = -ENXIO; if (IS_ERR_OR_NULL(filp)) @@ -63,18 +62,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) return rc; } - fs = get_fs(); - set_fs(KERNEL_DS); while (count > 0) { - rc = vfs_write(filp, (void __force __user *)payload, - count, &offset); + rc = kernel_write(filp, payload, count, &offset); if (rc < 0) break; count -= rc; payload += rc; rc = 0; } - set_fs(fs); if (rc < 0) CWARN("message send failed (%d)\n", rc); diff --git a/drivers/staging/media/atomisp/i2c/ap1302.c b/drivers/staging/media/atomisp/i2c/ap1302.c index bacffbe962d4..2f772a020c8b 100644 --- a/drivers/staging/media/atomisp/i2c/ap1302.c +++ b/drivers/staging/media/atomisp/i2c/ap1302.c @@ -11,11 +11,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * */ #include "../include/linux/atomisp.h" @@ -1098,7 +1093,7 @@ static const struct v4l2_ctrl_config ctrls[] = { }, }; -static struct v4l2_subdev_sensor_ops ap1302_sensor_ops = { +static const struct v4l2_subdev_sensor_ops ap1302_sensor_ops = { .g_skip_frames = ap1302_g_skip_frames, }; diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/gc0310.c index 350fd7fd5b86..35ed51ffe944 100644 --- a/drivers/staging/media/atomisp/i2c/gc0310.c +++ b/drivers/staging/media/atomisp/i2c/gc0310.c @@ -118,9 +118,8 @@ static int gc0310_write_reg(struct i2c_client *client, u16 data_length, /* high byte goes out first */ *wreg = (u8)(reg & 0xff); - if (data_length == GC0310_8BIT) { + if (data_length == GC0310_8BIT) data[1] = (u8)(val); - } ret = gc0310_i2c_write(client, len, data); if (ret) @@ -1453,7 +1452,7 @@ static int gc0310_probe(struct i2c_client *client, return ret; } -static struct acpi_device_id gc0310_acpi_match[] = { +static const struct acpi_device_id gc0310_acpi_match[] = { {"XXGC0310"}, {"INT0310"}, {}, diff --git a/drivers/staging/media/atomisp/i2c/gc2235.c b/drivers/staging/media/atomisp/i2c/gc2235.c index 50f431729b6c..e43d31ea9676 100644 --- a/drivers/staging/media/atomisp/i2c/gc2235.c +++ b/drivers/staging/media/atomisp/i2c/gc2235.c @@ -480,7 +480,7 @@ static const struct v4l2_ctrl_ops ctrl_ops = { .g_volatile_ctrl = gc2235_g_volatile_ctrl }; -struct v4l2_ctrl_config gc2235_controls[] = { +static struct v4l2_ctrl_config gc2235_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ABSOLUTE, @@ -1183,7 +1183,7 @@ static int gc2235_probe(struct i2c_client *client, return ret; } -static struct acpi_device_id gc2235_acpi_match[] = { +static const struct acpi_device_id gc2235_acpi_match[] = { { "INT33F8" }, {}, }; diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h index 7c3d994180cc..a8d6aa9c9a5d 100644 --- a/drivers/staging/media/atomisp/i2c/gc2235.h +++ b/drivers/staging/media/atomisp/i2c/gc2235.h @@ -530,7 +530,7 @@ static struct gc2235_reg const gc2235_1616_1216_30fps[] = { { GC2235_TOK_TERM, 0, 0 } }; -struct gc2235_resolution gc2235_res_preview[] = { +static struct gc2235_resolution gc2235_res_preview[] = { { .desc = "gc2235_1600_900_30fps", @@ -582,7 +582,7 @@ struct gc2235_resolution gc2235_res_preview[] = { }; #define N_RES_PREVIEW (ARRAY_SIZE(gc2235_res_preview)) -struct gc2235_resolution gc2235_res_still[] = { +static struct gc2235_resolution gc2235_res_still[] = { { .desc = "gc2235_1600_900_30fps", .width = 1600, @@ -632,7 +632,7 @@ struct gc2235_resolution gc2235_res_still[] = { }; #define N_RES_STILL (ARRAY_SIZE(gc2235_res_still)) -struct gc2235_resolution gc2235_res_video[] = { +static struct gc2235_resolution gc2235_res_video[] = { { .desc = "gc2235_1296_736_30fps", .width = 1296, diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c index d68ebb49f002..558dcdf135d9 100644 --- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c +++ b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c @@ -136,7 +136,7 @@ int ad5816g_vcm_power_down(struct v4l2_subdev *sd) } -int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val) +static int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 data = val & VCM_CODE_MASK; @@ -214,12 +214,3 @@ int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { return 0; } - -int ad5816g_vcm_init(struct v4l2_subdev *sd) -{ - ad5816g_dev.platform_data = camera_get_af_platform_data(); - return (NULL == ad5816g_dev.platform_data) ? -ENODEV : 0; - -} - - diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.c b/drivers/staging/media/atomisp/i2c/imx/drv201.c index 915e4019cfeb..6d9d4c968722 100644 --- a/drivers/staging/media/atomisp/i2c/imx/drv201.c +++ b/drivers/staging/media/atomisp/i2c/imx/drv201.c @@ -128,7 +128,7 @@ int drv201_vcm_power_down(struct v4l2_subdev *sd) } -int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val) +static int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 data = val & VCM_CODE_MASK; @@ -207,12 +207,3 @@ int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { return 0; } - -int drv201_vcm_init(struct v4l2_subdev *sd) -{ - drv201_dev.platform_data = camera_get_af_platform_data(); - return (NULL == drv201_dev.platform_data) ? -ENODEV : 0; -} - - - diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.c b/drivers/staging/media/atomisp/i2c/imx/dw9714.c index b7dee1b6bb37..6397a7ee0af6 100644 --- a/drivers/staging/media/atomisp/i2c/imx/dw9714.c +++ b/drivers/staging/media/atomisp/i2c/imx/dw9714.c @@ -56,7 +56,7 @@ int dw9714_vcm_power_down(struct v4l2_subdev *sd) } -int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val) +static int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = -EINVAL; @@ -221,15 +221,3 @@ int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value) return 0; } - -int dw9714_vcm_init(struct v4l2_subdev *sd) -{ - - /* set VCM to home position and vcm mode to direct*/ - dw9714_dev.vcm_mode = DW9714_DIRECT; - dw9714_dev.vcm_settings.update = false; - dw9714_dev.platform_data = camera_get_af_platform_data(); - return (NULL == dw9714_dev.platform_data) ? -ENODEV : 0; - -} - diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.c b/drivers/staging/media/atomisp/i2c/imx/dw9718.c index 65a1fcf187d5..c02b9f0a2440 100644 --- a/drivers/staging/media/atomisp/i2c/imx/dw9718.c +++ b/drivers/staging/media/atomisp/i2c/imx/dw9718.c @@ -204,11 +204,6 @@ int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value) return 0; } -int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val) -{ - return -EINVAL; -} - int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value) { return dw9718_t_focus_abs(sd, dw9718_dev.focus + value); diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.c b/drivers/staging/media/atomisp/i2c/imx/dw9719.c index eca2d7640030..565237796bb4 100644 --- a/drivers/staging/media/atomisp/i2c/imx/dw9719.c +++ b/drivers/staging/media/atomisp/i2c/imx/dw9719.c @@ -161,11 +161,6 @@ int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value) return 0; } -int dw9719_t_focus_vcm(struct v4l2_subdev *sd, u16 val) -{ - return -EINVAL; -} - int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); @@ -201,9 +196,3 @@ int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { return 0; } - -int dw9719_vcm_init(struct v4l2_subdev *sd) -{ - dw9719_dev.platform_data = camera_get_af_platform_data(); - return (NULL == dw9719_dev.platform_data) ? -ENODEV : 0; -} diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c index 408a7b945153..49ab0af87096 100644 --- a/drivers/staging/media/atomisp/i2c/imx/imx.c +++ b/drivers/staging/media/atomisp/i2c/imx/imx.c @@ -1084,46 +1084,15 @@ static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) return 0; } -int imx_vcm_power_up(struct v4l2_subdev *sd) -{ - struct imx_device *dev = to_imx_sensor(sd); - if (dev->vcm_driver && dev->vcm_driver->power_up) - return dev->vcm_driver->power_up(sd); - return 0; -} - -int imx_vcm_power_down(struct v4l2_subdev *sd) -{ - struct imx_device *dev = to_imx_sensor(sd); - if (dev->vcm_driver && dev->vcm_driver->power_down) - return dev->vcm_driver->power_down(sd); - return 0; -} - -int imx_vcm_init(struct v4l2_subdev *sd) -{ - struct imx_device *dev = to_imx_sensor(sd); - if (dev->vcm_driver && dev->vcm_driver->init) - return dev->vcm_driver->init(sd); - return 0; -} - -int imx_t_focus_vcm(struct v4l2_subdev *sd, u16 val) -{ - struct imx_device *dev = to_imx_sensor(sd); - if (dev->vcm_driver && dev->vcm_driver->t_focus_vcm) - return dev->vcm_driver->t_focus_vcm(sd, val); - return 0; -} - -int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value) +static int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_focus_abs) return dev->vcm_driver->t_focus_abs(sd, value); return 0; } -int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) + +static int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_focus_rel) @@ -1131,7 +1100,7 @@ int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) return 0; } -int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) +static int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->q_focus_status) @@ -1139,7 +1108,7 @@ int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) return 0; } -int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) +static int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->q_focus_abs) @@ -1147,7 +1116,7 @@ int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) return 0; } -int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) +static int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew) @@ -1155,7 +1124,7 @@ int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) return 0; } -int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value) +static int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing) @@ -2105,8 +2074,7 @@ imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param) return 0; } -int -imx_g_frame_interval(struct v4l2_subdev *sd, +static int imx_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct imx_device *dev = to_imx_sensor(sd); diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h index 41b4133ca995..30beb2a0ed93 100644 --- a/drivers/staging/media/atomisp/i2c/imx/imx.h +++ b/drivers/staging/media/atomisp/i2c/imx/imx.h @@ -222,8 +222,6 @@ struct imx_vcm { int (*power_up)(struct v4l2_subdev *sd); int (*power_down)(struct v4l2_subdev *sd); - int (*init)(struct v4l2_subdev *sd); - int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val); int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value); int (*t_focus_abs_init)(struct v4l2_subdev *sd); int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value); @@ -549,9 +547,6 @@ static const struct imx_reg imx219_param_update[] = { extern int ad5816g_vcm_power_up(struct v4l2_subdev *sd); extern int ad5816g_vcm_power_down(struct v4l2_subdev *sd); -extern int ad5816g_vcm_init(struct v4l2_subdev *sd); - -extern int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -561,9 +556,6 @@ extern int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value); extern int drv201_vcm_power_up(struct v4l2_subdev *sd); extern int drv201_vcm_power_down(struct v4l2_subdev *sd); -extern int drv201_vcm_init(struct v4l2_subdev *sd); - -extern int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -573,9 +565,6 @@ extern int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value); extern int dw9714_vcm_power_up(struct v4l2_subdev *sd); extern int dw9714_vcm_power_down(struct v4l2_subdev *sd); -extern int dw9714_vcm_init(struct v4l2_subdev *sd); - -extern int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int dw9714_t_focus_abs_init(struct v4l2_subdev *sd); extern int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value); @@ -586,9 +575,6 @@ extern int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value); extern int dw9719_vcm_power_up(struct v4l2_subdev *sd); extern int dw9719_vcm_power_down(struct v4l2_subdev *sd); -extern int dw9719_vcm_init(struct v4l2_subdev *sd); - -extern int dw9719_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -598,9 +584,6 @@ extern int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value); extern int dw9718_vcm_power_up(struct v4l2_subdev *sd); extern int dw9718_vcm_power_down(struct v4l2_subdev *sd); -extern int dw9718_vcm_init(struct v4l2_subdev *sd); - -extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -615,8 +598,6 @@ struct imx_vcm imx_vcms[] = { [IMX175_MERRFLD] = { .power_up = drv201_vcm_power_up, .power_down = drv201_vcm_power_down, - .init = drv201_vcm_init, - .t_focus_vcm = drv201_t_focus_vcm, .t_focus_abs = drv201_t_focus_abs, .t_focus_abs_init = NULL, .t_focus_rel = drv201_t_focus_rel, @@ -628,8 +609,6 @@ struct imx_vcm imx_vcms[] = { [IMX175_VALLEYVIEW] = { .power_up = dw9714_vcm_power_up, .power_down = dw9714_vcm_power_down, - .init = dw9714_vcm_init, - .t_focus_vcm = dw9714_t_focus_vcm, .t_focus_abs = dw9714_t_focus_abs, .t_focus_abs_init = NULL, .t_focus_rel = dw9714_t_focus_rel, @@ -641,8 +620,6 @@ struct imx_vcm imx_vcms[] = { [IMX135_SALTBAY] = { .power_up = ad5816g_vcm_power_up, .power_down = ad5816g_vcm_power_down, - .init = ad5816g_vcm_init, - .t_focus_vcm = ad5816g_t_focus_vcm, .t_focus_abs = ad5816g_t_focus_abs, .t_focus_abs_init = NULL, .t_focus_rel = ad5816g_t_focus_rel, @@ -654,8 +631,6 @@ struct imx_vcm imx_vcms[] = { [IMX135_VICTORIABAY] = { .power_up = dw9719_vcm_power_up, .power_down = dw9719_vcm_power_down, - .init = dw9719_vcm_init, - .t_focus_vcm = dw9719_t_focus_vcm, .t_focus_abs = dw9719_t_focus_abs, .t_focus_abs_init = NULL, .t_focus_rel = dw9719_t_focus_rel, @@ -667,8 +642,6 @@ struct imx_vcm imx_vcms[] = { [IMX134_VALLEYVIEW] = { .power_up = dw9714_vcm_power_up, .power_down = dw9714_vcm_power_down, - .init = dw9714_vcm_init, - .t_focus_vcm = dw9714_t_focus_vcm, .t_focus_abs = dw9714_t_focus_abs, .t_focus_abs_init = dw9714_t_focus_abs_init, .t_focus_rel = dw9714_t_focus_rel, @@ -680,8 +653,6 @@ struct imx_vcm imx_vcms[] = { [IMX219_MFV0_PRH] = { .power_up = dw9718_vcm_power_up, .power_down = dw9718_vcm_power_down, - .init = dw9718_vcm_init, - .t_focus_vcm = dw9718_t_focus_vcm, .t_focus_abs = dw9718_t_focus_abs, .t_focus_abs_init = NULL, .t_focus_rel = dw9718_t_focus_rel, diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/lm3554.c index 2b170c07aaba..679176f7c542 100644 --- a/drivers/staging/media/atomisp/i2c/lm3554.c +++ b/drivers/staging/media/atomisp/i2c/lm3554.c @@ -974,7 +974,7 @@ static const struct dev_pm_ops lm3554_pm_ops = { .resume = lm3554_resume, }; -static struct acpi_device_id lm3554_acpi_match[] = { +static const struct acpi_device_id lm3554_acpi_match[] = { { "INTCF1C" }, {}, }; diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/mt9m114.c index 3fa915313e53..3c837cb8859c 100644 --- a/drivers/staging/media/atomisp/i2c/mt9m114.c +++ b/drivers/staging/media/atomisp/i2c/mt9m114.c @@ -1209,10 +1209,10 @@ static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd, return -EINVAL; } - clamp_t(int, win_left, 0, 4); - clamp_t(int, win_top, 0, 4); - clamp_t(int, win_right, 0, 4); - clamp_t(int, win_bottom, 0, 4); + win_left = clamp_t(int, win_left, 0, 4); + win_top = clamp_t(int, win_top, 0, 4); + win_right = clamp_t(int, win_right, 0, 4); + win_bottom = clamp_t(int, win_bottom, 0, 4); ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING); if (ret) { @@ -1806,7 +1806,7 @@ static const struct v4l2_subdev_video_ops mt9m114_video_ops = { .g_frame_interval = mt9m114_g_frame_interval, }; -static struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = { +static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = { .g_skip_frames = mt9m114_g_skip_frames, }; @@ -1928,7 +1928,7 @@ static int mt9m114_probe(struct i2c_client *client, MODULE_DEVICE_TABLE(i2c, mt9m114_id); -static struct acpi_device_id mt9m114_acpi_match[] = { +static const struct acpi_device_id mt9m114_acpi_match[] = { { "INT33F0" }, { "CRMT1040" }, {}, diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c index 3cabfe54c669..51b7d61df0f5 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.c +++ b/drivers/staging/media/atomisp/i2c/ov2680.c @@ -89,7 +89,7 @@ static int ov2680_read_reg(struct i2c_client *client, "read from offset 0x%x error %d", reg, err); return err; } - + *val = 0; /* high byte comes first */ if (data_length == OV2680_8BIT) @@ -285,7 +285,6 @@ static int ov2680_g_fnumber(struct v4l2_subdev *sd, s32 *val) static int ov2680_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) { - *val = (OV2680_F_NUMBER_DEFAULT_NUM << 24) | (OV2680_F_NUMBER_DEM << 16) | (OV2680_F_NUMBER_DEFAULT_NUM << 8) | OV2680_F_NUMBER_DEM; @@ -306,7 +305,7 @@ static int ov2680_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) { struct ov2680_device *dev = to_ov2680_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); - + *val = ov2680_res[dev->fmt_idx].bin_factor_y; dev_dbg(&client->dev, "++++ov2680_g_bin_factor_y\n"); return 0; @@ -399,7 +398,7 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg, struct ov2680_device *dev = to_ov2680_sensor(sd); u16 vts,hts; int ret,exp_val; - + dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain); hts = ov2680_res[dev->fmt_idx].pixels_per_line; @@ -542,7 +541,7 @@ static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) switch (cmd) { case ATOMISP_IOC_S_EXPOSURE: return ov2680_s_exposure(sd, arg); - + default: return -EINVAL; } @@ -983,7 +982,7 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on) if (on == 0){ ret = power_down(sd); } else { - ret = power_up(sd); + ret = power_up(sd); if (!ret) return ov2680_init(sd); } @@ -1207,7 +1206,7 @@ static int ov2680_s_stream(struct v4l2_subdev *sd, int enable) dev_dbg(&client->dev, "ov2680_s_stream one \n"); else dev_dbg(&client->dev, "ov2680_s_stream off \n"); - + ret = ov2680_write_reg(client, OV2680_8BIT, OV2680_SW_STREAM, enable ? OV2680_START_STREAMING : OV2680_STOP_STREAMING); @@ -1267,7 +1266,7 @@ static int ov2680_s_config(struct v4l2_subdev *sd, dev_err(&client->dev, "ov2680_detect err s_config.\n"); goto fail_csi_cfg; } - + /* turn off sensor, after probed */ ret = power_down(sd); if (ret) { @@ -1385,7 +1384,7 @@ static int ov2680_enum_frame_size(struct v4l2_subdev *sd, static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) { struct ov2680_device *dev = to_ov2680_sensor(sd); - + mutex_lock(&dev->input_lock); *frames = ov2680_res[dev->fmt_idx].skip_frames; mutex_unlock(&dev->input_lock); @@ -1517,7 +1516,7 @@ static int ov2680_probe(struct i2c_client *client, return ret; } -static struct acpi_device_id ov2680_acpi_match[] = { +static const struct acpi_device_id ov2680_acpi_match[] = { {"XXOV2680"}, {"OVTI2680"}, {}, diff --git a/drivers/staging/media/atomisp/i2c/ov2722.c b/drivers/staging/media/atomisp/i2c/ov2722.c index b7afadebdf89..10094ac56561 100644 --- a/drivers/staging/media/atomisp/i2c/ov2722.c +++ b/drivers/staging/media/atomisp/i2c/ov2722.c @@ -1337,7 +1337,7 @@ static int ov2722_probe(struct i2c_client *client, MODULE_DEVICE_TABLE(i2c, ov2722_id); -static struct acpi_device_id ov2722_acpi_match[] = { +static const struct acpi_device_id ov2722_acpi_match[] = { { "INT33FB" }, {}, }; diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c index d6447398f5ef..123642557aa8 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c +++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c @@ -146,7 +146,7 @@ static int ov5693_read_reg(struct i2c_client *client, return -EINVAL; } - memset(msg, 0 , sizeof(msg)); + memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].flags = 0; @@ -702,7 +702,7 @@ static long ov5693_s_exposure(struct v4l2_subdev *sd, } static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size, - u16 addr, u8 * buf) + u16 addr, u8 *buf) { u16 index; int ret; @@ -720,7 +720,7 @@ static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size, return 0; } -static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 * buf) +static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov5693_device *dev = to_ov5693_sensor(sd); @@ -913,7 +913,7 @@ static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value) return ret; } -int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val) +static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = -EINVAL; @@ -2032,7 +2032,7 @@ static int ov5693_probe(struct i2c_client *client, MODULE_DEVICE_TABLE(i2c, ov5693_id); -static struct acpi_device_id ov5693_acpi_match[] = { +static const struct acpi_device_id ov5693_acpi_match[] = { {"INT33BE"}, {}, }; diff --git a/drivers/staging/media/atomisp/i2c/ov8858.c b/drivers/staging/media/atomisp/i2c/ov8858.c index 9574bc49113c..43e1638fd674 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858.c +++ b/drivers/staging/media/atomisp/i2c/ov8858.c @@ -2189,7 +2189,7 @@ static const struct i2c_device_id ov8858_id[] = { MODULE_DEVICE_TABLE(i2c, ov8858_id); -static struct acpi_device_id ov8858_acpi_match[] = { +static const struct acpi_device_id ov8858_acpi_match[] = { {"INT3477"}, {}, }; diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h index d3fde200c013..638d1a803a2b 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858.h +++ b/drivers/staging/media/atomisp/i2c/ov8858.h @@ -164,7 +164,6 @@ struct ov8858_vcm { int (*power_up)(struct v4l2_subdev *sd); int (*power_down)(struct v4l2_subdev *sd); int (*init)(struct v4l2_subdev *sd); - int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val); int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value); int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value); int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value); @@ -312,7 +311,6 @@ static const struct ov8858_reg ov8858_param_update[] = { extern int dw9718_vcm_power_up(struct v4l2_subdev *sd); extern int dw9718_vcm_power_down(struct v4l2_subdev *sd); extern int dw9718_vcm_init(struct v4l2_subdev *sd); -extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -328,7 +326,6 @@ static struct ov8858_vcm ov8858_vcms[] = { .power_up = dw9718_vcm_power_up, .power_down = dw9718_vcm_power_down, .init = dw9718_vcm_init, - .t_focus_vcm = dw9718_t_focus_vcm, .t_focus_abs = dw9718_t_focus_abs, .t_focus_rel = dw9718_t_focus_rel, .q_focus_status = dw9718_q_focus_status, diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h index f9a3cf8fbf1a..7d74a8899fae 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h +++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h @@ -164,7 +164,6 @@ struct ov8858_vcm { int (*power_up)(struct v4l2_subdev *sd); int (*power_down)(struct v4l2_subdev *sd); int (*init)(struct v4l2_subdev *sd); - int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val); int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value); int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value); int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value); @@ -312,7 +311,6 @@ static const struct ov8858_reg ov8858_param_update[] = { extern int dw9718_vcm_power_up(struct v4l2_subdev *sd); extern int dw9718_vcm_power_down(struct v4l2_subdev *sd); extern int dw9718_vcm_init(struct v4l2_subdev *sd); -extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val); extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value); extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value); extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value); @@ -328,7 +326,6 @@ static struct ov8858_vcm ov8858_vcms[] = { .power_up = dw9718_vcm_power_up, .power_down = dw9718_vcm_power_down, .init = dw9718_vcm_init, - .t_focus_vcm = dw9718_t_focus_vcm, .t_focus_abs = dw9718_t_focus_abs, .t_focus_rel = dw9718_t_focus_rel, .q_focus_status = dw9718_q_focus_status, diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h index 35865462ccf9..d67dd658cff9 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp.h @@ -28,12 +28,6 @@ #include #include -/* struct media_device_info.driver_version */ -#define ATOMISP_CSS_VERSION_MASK 0x00ffffff -#define ATOMISP_CSS_VERSION_15 KERNEL_VERSION(1, 5, 0) -#define ATOMISP_CSS_VERSION_20 KERNEL_VERSION(2, 0, 0) -#define ATOMISP_CSS_VERSION_21 KERNEL_VERSION(2, 1, 0) - /* struct media_device_info.hw_revision */ #define ATOMISP_HW_REVISION_MASK 0x0000ff00 #define ATOMISP_HW_REVISION_SHIFT 8 diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c index 97093baf28ac..f48bf451c1f5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c @@ -82,48 +82,6 @@ union host { } ptr; }; -/* - * atomisp_kernel_malloc: chooses whether kmalloc() or vmalloc() is preferable. - * - * It is also a wrap functions to pass into css framework. - */ -void *atomisp_kernel_malloc(size_t bytes) -{ - /* vmalloc() is preferable if allocating more than 1 page */ - if (bytes > PAGE_SIZE) - return vmalloc(bytes); - - return kmalloc(bytes, GFP_KERNEL); -} - -/* - * atomisp_kernel_zalloc: chooses whether set 0 to the allocated memory. - * - * It is also a wrap functions to pass into css framework. - */ -void *atomisp_kernel_zalloc(size_t bytes, bool zero_mem) -{ - void *ptr = atomisp_kernel_malloc(bytes); - - if (ptr && zero_mem) - memset(ptr, 0, bytes); - - return ptr; -} - -/* - * Free buffer allocated with atomisp_kernel_malloc()/atomisp_kernel_zalloc - * helper - */ -void atomisp_kernel_free(void *ptr) -{ - /* Verify if buffer was allocated by vmalloc() or kmalloc() */ - if (is_vmalloc_addr(ptr)) - vfree(ptr); - else - kfree(ptr); -} - /* * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field. * subdev->priv is set in mrst.c @@ -785,7 +743,7 @@ void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe) struct atomisp_css_params_with_list, list); list_del(¶m->list); atomisp_free_css_parameters(¶m->params); - atomisp_kernel_free(param); + kvfree(param); } } @@ -1132,7 +1090,7 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, asd->params.dvs_6axis = NULL; atomisp_free_css_parameters( &pipe->frame_params[vb->i]->params); - atomisp_kernel_free(pipe->frame_params[vb->i]); + kvfree(pipe->frame_params[vb->i]); pipe->frame_params[vb->i] = NULL; } @@ -4329,7 +4287,7 @@ int atomisp_set_parameters(struct video_device *vdev, * are ready, the parameters will be set to CSS. * per-frame setting only works for the main output frame. */ - param = atomisp_kernel_zalloc(sizeof(*param), true); + param = kvzalloc(sizeof(*param), GFP_KERNEL); if (!param) { dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n", __func__); @@ -4375,7 +4333,7 @@ int atomisp_set_parameters(struct video_device *vdev, if (css_param) atomisp_free_css_parameters(css_param); if (param) - atomisp_kernel_free(param); + kvfree(param); return ret; } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h index 8e6d9df7ad1a..31ba4e613d13 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h @@ -78,9 +78,6 @@ static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address) return ret; } */ -void *atomisp_kernel_malloc(size_t bytes); -void *atomisp_kernel_zalloc(size_t bytes, bool zero_mem); -void atomisp_kernel_free(void *ptr); /* * Interrupt functions diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c index ad2c610d2ce3..05897b747349 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c @@ -1671,12 +1671,12 @@ int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd) /* We allocate the cpu-side buffer used for communication with user * space */ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - asd->params.metadata_user[i] = atomisp_kernel_malloc( + asd->params.metadata_user[i] = kvmalloc( asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_info.metadata_info.size); + stream_info.metadata_info.size, GFP_KERNEL); if (!asd->params.metadata_user[i]) { while (--i >= 0) { - atomisp_kernel_free(asd->params.metadata_user[i]); + kvfree(asd->params.metadata_user[i]); asd->params.metadata_user[i] = NULL; } return -ENOMEM; @@ -1692,7 +1692,7 @@ void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd) for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { if (asd->params.metadata_user[i]) { - atomisp_kernel_free(asd->params.metadata_user[i]); + kvfree(asd->params.metadata_user[i]); asd->params.metadata_user[i] = NULL; } } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c index c151c848cf8f..d8cfed358d55 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c @@ -643,14 +643,14 @@ static void atomisp_buf_release_output(struct videobuf_queue *vq, vb->state = VIDEOBUF_NEEDS_INIT; } -static struct videobuf_queue_ops videobuf_qops = { +static const struct videobuf_queue_ops videobuf_qops = { .buf_setup = atomisp_buf_setup, .buf_prepare = atomisp_buf_prepare, .buf_queue = atomisp_buf_queue, .buf_release = atomisp_buf_release, }; -static struct videobuf_queue_ops videobuf_qops_output = { +static const struct videobuf_queue_ops videobuf_qops_output = { .buf_setup = atomisp_buf_setup_output, .buf_prepare = atomisp_buf_prepare_output, .buf_queue = atomisp_buf_queue_output, diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h index c8e0c4fe3717..7542a72f1d0f 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h @@ -75,15 +75,6 @@ #define ATOMISP_PCI_REV_MRFLD_A0_MAX 0 #define ATOMISP_PCI_REV_BYT_A0_MAX 4 -#define ATOMISP_MAJOR 0 -#define ATOMISP_MINOR 5 -#define ATOMISP_PATCHLEVEL 1 - -#define DRIVER_VERSION_STR __stringify(ATOMISP_MAJOR) \ - "." __stringify(ATOMISP_MINOR) "." __stringify(ATOMISP_PATCHLEVEL) -#define DRIVER_VERSION KERNEL_VERSION(ATOMISP_MAJOR, \ - ATOMISP_MINOR, ATOMISP_PATCHLEVEL) - #define ATOM_ISP_STEP_WIDTH 2 #define ATOM_ISP_STEP_HEIGHT 2 @@ -310,10 +301,6 @@ struct atomisp_device { extern struct device *atomisp_dev; -extern void *atomisp_kernel_malloc(size_t bytes); - -extern void atomisp_kernel_free(void *ptr); - #define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt) #ifdef ISP2401 extern void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c index aa0526ebaff1..717647951fb6 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c @@ -51,7 +51,6 @@ static const char *DRIVER = "atomisp"; /* max size 15 */ static const char *CARD = "ATOM ISP"; /* max size 31 */ static const char *BUS_INFO = "PCI-3"; /* max size 31 */ -static const u32 VERSION = DRIVER_VERSION; /* * FIXME: ISP should not know beforehand all CIDs supported by sensor. @@ -562,8 +561,6 @@ static int atomisp_querycap(struct file *file, void *fh, strncpy(cap->card, CARD, sizeof(cap->card) - 1); strncpy(cap->bus_info, BUS_INFO, sizeof(cap->card) - 1); - cap->version = VERSION; - cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c index 3d6bb166927c..744ab6eb42a0 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c @@ -1253,8 +1253,7 @@ int atomisp_create_pads_links(struct atomisp_device *isp) { struct atomisp_sub_device *asd; int i, j, ret = 0; - isp->num_of_streams = isp->media_dev.driver_version >= - ATOMISP_CSS_VERSION_20 ? 2 : 1; + isp->num_of_streams = 2; for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { for (j = 0; j < isp->num_of_streams; j++) { ret = @@ -1414,8 +1413,7 @@ int atomisp_subdev_init(struct atomisp_device *isp) * CSS2.0 running ISP2400 support * multiple streams */ - isp->num_of_streams = isp->media_dev.driver_version >= - ATOMISP_CSS_VERSION_20 ? 2 : 1; + isp->num_of_streams = 2; isp->asd = devm_kzalloc(isp->dev, sizeof(struct atomisp_sub_device) * isp->num_of_streams, GFP_KERNEL); if (!isp->asd) diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c index a543def739fc..663aa916e3ca 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c @@ -1083,27 +1083,23 @@ atomisp_load_firmware(struct atomisp_device *isp) if (skip_fwload) return NULL; - if (isp->media_dev.driver_version == ATOMISP_CSS_VERSION_21) { - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_A0)) - fw_path = "shisp_2401a0_v21.bin"; + if (isp->media_dev.hw_revision == + ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) + | ATOMISP_HW_STEPPING_A0)) + fw_path = "shisp_2401a0_v21.bin"; - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_A0)) - fw_path = "shisp_2401a0_legacy_v21.bin"; + if (isp->media_dev.hw_revision == + ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT) + | ATOMISP_HW_STEPPING_A0)) + fw_path = "shisp_2401a0_legacy_v21.bin"; - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_B0)) - fw_path = "shisp_2400b0_v21.bin"; - } + if (isp->media_dev.hw_revision == + ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT) + | ATOMISP_HW_STEPPING_B0)) + fw_path = "shisp_2400b0_v21.bin"; if (!fw_path) { - dev_err(isp->dev, - "Unsupported driver_version 0x%x, hw_revision 0x%x\n", - isp->media_dev.driver_version, + dev_err(isp->dev, "Unsupported hw_revision 0x%x\n", isp->media_dev.hw_revision); return NULL; } @@ -1251,7 +1247,6 @@ static int atomisp_pci_probe(struct pci_dev *dev, /* This is not a true PCI device on SoC, so the delay is not needed. */ isp->pdev->d3_delay = 0; - isp->media_dev.driver_version = ATOMISP_CSS_VERSION_21; switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) { case ATOMISP_PCI_DEVICE_SOC_MRFLD: isp->media_dev.hw_revision = diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c index 76d9142fd37e..faef97672eac 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c @@ -14,18 +14,18 @@ */ #else /** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ #endif #include "system_global.h" @@ -130,8 +130,7 @@ void ia_css_isys_ibuf_rmgr_release( for (i = 0; i < ibuf_rsrc.num_allocated; i++) { handle = getHandle(i); - if ((handle->start_addr == *start_addr) - && ( true == handle->active)) { + if (handle->active && handle->start_addr == *start_addr) { handle->active = false; ibuf_rsrc.num_active--; break; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c index 471f2be974e2..e882b5596813 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c @@ -1939,6 +1939,7 @@ void *sh_css_calloc(size_t N, size_t size) p = sh_css_malloc(N*size); if (p) memset(p, 0, size); + return p; } return NULL; } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c index eecd8cf71951..63582161050a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c @@ -131,14 +131,10 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) { char *namebuffer; - int namelength = (int)strlen(name); - namebuffer = (char *) kmalloc(namelength + 1, GFP_KERNEL); - if (namebuffer == NULL) + namebuffer = kstrdup(name, GFP_KERNEL); + if (!namebuffer) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - memcpy(namebuffer, name, namelength + 1); - bd->name = fw_minibuffer[index].name = namebuffer; } else { bd->name = name; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c index 05eeff58a229..b8aae4ba5a78 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c @@ -46,14 +46,16 @@ static ia_css_ptr dummy_ptr; static bool hmm_initialized; struct _hmm_mem_stat hmm_mem_stat; -/* p: private - s: shared - u: user - i: ion */ +/* + * p: private + * s: shared + * u: user + * i: ion + */ static const char hmm_bo_type_string[] = "psui"; static ssize_t bo_show(struct device *dev, struct device_attribute *attr, - char *buf, struct list_head *bo_list, bool active) + char *buf, struct list_head *bo_list, bool active) { ssize_t ret = 0; struct hmm_buffer_object *bo; @@ -73,10 +75,10 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr, spin_lock_irqsave(&bo_device.list_lock, flags); list_for_each_entry(bo, bo_list, list) { if ((active && (bo->status & HMM_BO_ALLOCED)) || - (!active && !(bo->status & HMM_BO_ALLOCED))) { + (!active && !(bo->status & HMM_BO_ALLOCED))) { ret = scnprintf(buf + index1, PAGE_SIZE - index1, - "%c %d\n", - hmm_bo_type_string[bo->type], bo->pgnr); + "%c %d\n", + hmm_bo_type_string[bo->type], bo->pgnr); total[bo->type] += bo->pgnr; count[bo->type]++; @@ -89,9 +91,10 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr, for (i = 0; i < HMM_BO_LAST; i++) { if (count[i]) { ret = scnprintf(buf + index1 + index2, - PAGE_SIZE - index1 - index2, - "%ld %c buffer objects: %ld KB\n", - count[i], hmm_bo_type_string[i], total[i] * 4); + PAGE_SIZE - index1 - index2, + "%ld %c buffer objects: %ld KB\n", + count[i], hmm_bo_type_string[i], + total[i] * 4); if (ret > 0) index2 += ret; } @@ -101,23 +104,21 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr, return index1 + index2 + 1; } -static ssize_t active_bo_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr, + char *buf) { return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true); } -static ssize_t free_bo_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr, + char *buf) { return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false); } static ssize_t reserved_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { ssize_t ret = 0; @@ -129,7 +130,7 @@ static ssize_t reserved_pool_show(struct device *dev, spin_lock_irqsave(&pinfo->list_lock, flags); ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n", - pinfo->index, pinfo->pgnr); + pinfo->index, pinfo->pgnr); spin_unlock_irqrestore(&pinfo->list_lock, flags); if (ret > 0) @@ -139,8 +140,8 @@ static ssize_t reserved_pool_show(struct device *dev, }; static ssize_t dynamic_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { ssize_t ret = 0; @@ -152,7 +153,7 @@ static ssize_t dynamic_pool_show(struct device *dev, spin_lock_irqsave(&pinfo->list_lock, flags); ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n", - pinfo->pgnr, pinfo->pool_size); + pinfo->pgnr, pinfo->pool_size); spin_unlock_irqrestore(&pinfo->list_lock, flags); if (ret > 0) @@ -200,7 +201,7 @@ int hmm_init(void) if (!ret) { ret = sysfs_create_group(&atomisp_dev->kobj, - atomisp_attribute_group); + atomisp_attribute_group); if (ret) dev_err(atomisp_dev, "%s Failed to create sysfs\n", __func__); @@ -213,9 +214,7 @@ void hmm_cleanup(void) { sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); - /* - * free dummy memory first - */ + /* free dummy memory first */ hmm_free(dummy_ptr); dummy_ptr = 0; @@ -224,36 +223,37 @@ void hmm_cleanup(void) } ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, - int from_highmem, void *userptr, bool cached) + int from_highmem, void *userptr, bool cached) { unsigned int pgnr; struct hmm_buffer_object *bo; int ret; - /* Check if we are initialized. In the ideal world we wouldn't need - this but we can tackle it once the driver is a lot cleaner */ + /* + * Check if we are initialized. In the ideal world we wouldn't need + * this but we can tackle it once the driver is a lot cleaner + */ if (!hmm_initialized) hmm_init(); - /*Get page number from size*/ + /* Get page number from size */ pgnr = size_to_pgnr_ceil(bytes); - /*Buffer object structure init*/ + /* Buffer object structure init */ bo = hmm_bo_alloc(&bo_device, pgnr); if (!bo) { dev_err(atomisp_dev, "hmm_bo_create failed.\n"); goto create_bo_err; } - /*Allocate pages for memory*/ + /* Allocate pages for memory */ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); if (ret) { - dev_err(atomisp_dev, - "hmm_bo_alloc_pages failed.\n"); + dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; } - /*Combind the virtual address and pages togather*/ + /* Combind the virtual address and pages togather */ ret = hmm_bo_bind(bo); if (ret) { dev_err(atomisp_dev, "hmm_bo_bind failed.\n"); @@ -282,8 +282,8 @@ void hmm_free(ia_css_ptr virt) if (!bo) { dev_err(atomisp_dev, - "can not find buffer object start with " - "address 0x%x\n", (unsigned int)virt); + "can not find buffer object start with address 0x%x\n", + (unsigned int)virt); return; } @@ -298,29 +298,29 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) { if (!bo) { dev_err(atomisp_dev, - "can not find buffer object contains " - "address 0x%x\n", ptr); + "can not find buffer object contains address 0x%x\n", + ptr); return -EINVAL; } if (!hmm_bo_page_allocated(bo)) { dev_err(atomisp_dev, - "buffer object has no page allocated.\n"); + "buffer object has no page allocated.\n"); return -EINVAL; } if (!hmm_bo_allocated(bo)) { dev_err(atomisp_dev, - "buffer object has no virtual address" - " space allocated.\n"); + "buffer object has no virtual address space allocated.\n"); return -EINVAL; } return 0; } -/*Read function in ISP memory management*/ -static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes) +/* Read function in ISP memory management */ +static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, + unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; @@ -362,7 +362,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int byte return 0; } -/*Read function in ISP memory management*/ +/* Read function in ISP memory management */ static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes) { struct hmm_buffer_object *bo; @@ -397,24 +397,24 @@ static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes) return 0; } -/*Read function in ISP memory management*/ +/* Read function in ISP memory management */ int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes) { if (!data) { dev_err(atomisp_dev, - "hmm_load NULL argument\n"); + "hmm_load NULL argument\n"); return -EINVAL; } return load_and_flush(virt, data, bytes); } -/*Flush hmm data from the data cache*/ +/* Flush hmm data from the data cache */ int hmm_flush(ia_css_ptr virt, unsigned int bytes) { return load_and_flush(virt, NULL, bytes); } -/*Write function in ISP memory management*/ +/* Write function in ISP memory management */ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) { struct hmm_buffer_object *bo; @@ -460,8 +460,8 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) if (!des) { dev_err(atomisp_dev, - "kmap buffer object page failed: " - "pg_idx = %d\n", idx); + "kmap buffer object page failed: pg_idx = %d\n", + idx); return -EINVAL; } @@ -496,7 +496,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) return 0; } -/*memset function in ISP memory management*/ +/* memset function in ISP memory management */ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) { struct hmm_buffer_object *bo; @@ -556,7 +556,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) return 0; } -/*Virtual address to physical address convert*/ +/* Virtual address to physical address convert */ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) { unsigned int idx, offset; @@ -591,7 +591,7 @@ int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) return hmm_bo_mmap(vma, bo); } -/*Map ISP virtual address into IA virtual address*/ +/* Map ISP virtual address into IA virtual address */ void *hmm_vmap(ia_css_ptr virt, bool cached) { struct hmm_buffer_object *bo; @@ -600,8 +600,8 @@ void *hmm_vmap(ia_css_ptr virt, bool cached) bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); + "can not find buffer object contains address 0x%x\n", + virt); return NULL; } @@ -620,8 +620,8 @@ void hmm_flush_vmap(ia_css_ptr virt) bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); + "can not find buffer object contains address 0x%x\n", + virt); return; } @@ -635,26 +635,25 @@ void hmm_vunmap(ia_css_ptr virt) bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); + "can not find buffer object contains address 0x%x\n", + virt); return; } - return hmm_bo_vunmap(bo); + hmm_bo_vunmap(bo); } -int hmm_pool_register(unsigned int pool_size, - enum hmm_pool_type pool_type) +int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type) { switch (pool_type) { case HMM_POOL_TYPE_RESERVED: reserved_pool.pops = &reserved_pops; return reserved_pool.pops->pool_init(&reserved_pool.pool_info, - pool_size); + pool_size); case HMM_POOL_TYPE_DYNAMIC: dynamic_pool.pops = &dynamic_pops; return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info, - pool_size); + pool_size); default: dev_err(atomisp_dev, "invalid pool type.\n"); return -EINVAL; @@ -703,10 +702,10 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr) void hmm_show_mem_stat(const char *func, const int line) { trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", - hmm_mem_stat.tol_cnt, - hmm_mem_stat.usr_size, hmm_mem_stat.res_size, - hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, - hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); + hmm_mem_stat.tol_cnt, + hmm_mem_stat.usr_size, hmm_mem_stat.res_size, + hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, + hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); } void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr) diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index 38f72d069e27..58adaea44eb5 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c @@ -48,7 +48,6 @@ /* driver definitions */ #define BCM2048_DRIVER_AUTHOR "Eero Nurkkala " #define BCM2048_DRIVER_NAME BCM2048_NAME -#define BCM2048_DRIVER_VERSION KERNEL_VERSION(0, 0, 1) #define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver" #define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver" @@ -2565,7 +2564,7 @@ static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = { /* * bcm2048_viddev_template - video device interface */ -static struct video_device bcm2048_viddev_template = { +static const struct video_device bcm2048_viddev_template = { .fops = &bcm2048_fops, .name = BCM2048_DRIVER_NAME, .release = video_device_release_empty, diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c index f28916ea69f1..3e30f4864e2b 100644 --- a/drivers/staging/media/cxd2099/cxd2099.c +++ b/drivers/staging/media/cxd2099/cxd2099.c @@ -33,8 +33,9 @@ #include "cxd2099.h" -/* comment this line to deactivate the cxd2099ar buffer mode */ -#define BUFFER_MODE 1 +static int buffermode; +module_param(buffermode, int, 0444); +MODULE_PARM_DESC(buffermode, "Enable use of the CXD2099AR buffer mode (default: disabled)"); static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount); @@ -221,7 +222,6 @@ static int write_reg(struct cxd *ci, u8 reg, u8 val) return write_regm(ci, reg, val, 0xff); } -#ifdef BUFFER_MODE static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n) { int status = 0; @@ -248,7 +248,6 @@ static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n) } return status; } -#endif static void set_mode(struct cxd *ci, int mode) { @@ -642,8 +641,6 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) return len; } -#ifdef BUFFER_MODE - static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) { struct cxd *ci = ca->data; @@ -658,7 +655,6 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) mutex_unlock(&ci->lock); return ecount; } -#endif static struct dvb_ca_en50221 en_templ = { .read_attribute_mem = read_attribute_mem, @@ -669,11 +665,8 @@ static struct dvb_ca_en50221 en_templ = { .slot_shutdown = slot_shutdown, .slot_ts_enable = slot_ts_enable, .poll_slot_status = poll_slot_status, -#ifdef BUFFER_MODE .read_data = read_data, .write_data = write_data, -#endif - }; struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg, @@ -703,6 +696,14 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg, ci->en.data = ci; init(ci); dev_info(&i2c->dev, "Attached CXD2099AR at %02x\n", ci->cfg.adr); + + if (!buffermode) { + ci->en.read_data = NULL; + ci->en.write_data = NULL; + } else { + dev_info(&i2c->dev, "Using CXD2099AR buffer mode"); + } + return &ci->en; } EXPORT_SYMBOL(cxd2099_attach); diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 8b2117ee0f60..155e8c758e4b 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -1304,7 +1304,7 @@ static void vpfe_buf_cleanup(struct vb2_buffer *vb) list_del_init(&buf->list); } -static struct vb2_ops video_qops = { +static const struct vb2_ops video_qops = { .queue_setup = vpfe_buffer_queue_setup, .buf_init = vpfe_buffer_init, .buf_prepare = vpfe_buffer_prepare, diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig index 7eff50bcea39..2be921cd0d55 100644 --- a/drivers/staging/media/imx/Kconfig +++ b/drivers/staging/media/imx/Kconfig @@ -1,6 +1,8 @@ config VIDEO_IMX_MEDIA tristate "i.MX5/6 V4L2 media core driver" depends on MEDIA_CONTROLLER && VIDEO_V4L2 && ARCH_MXC && IMX_IPUV3_CORE + depends on VIDEO_V4L2_SUBDEV_API + select VIDEOBUF2_DMA_CONTIG select V4L2_FWNODE ---help--- Say yes here to enable support for video4linux media controller @@ -12,7 +14,6 @@ menu "i.MX5/6 Media Sub devices" config VIDEO_IMX_CSI tristate "i.MX5/6 Camera Sensor Interface driver" depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C - select VIDEOBUF2_DMA_CONTIG default y ---help--- A video4linux camera sensor interface driver for i.MX5/6. diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c index ed363fe3b3d0..0790b3d9e255 100644 --- a/drivers/staging/media/imx/imx-ic-prpencvf.c +++ b/drivers/staging/media/imx/imx-ic-prpencvf.c @@ -134,19 +134,19 @@ static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd) static void prp_put_ipu_resources(struct prp_priv *priv) { - if (!IS_ERR_OR_NULL(priv->ic)) + if (priv->ic) ipu_ic_put(priv->ic); priv->ic = NULL; - if (!IS_ERR_OR_NULL(priv->out_ch)) + if (priv->out_ch) ipu_idmac_put(priv->out_ch); priv->out_ch = NULL; - if (!IS_ERR_OR_NULL(priv->rot_in_ch)) + if (priv->rot_in_ch) ipu_idmac_put(priv->rot_in_ch); priv->rot_in_ch = NULL; - if (!IS_ERR_OR_NULL(priv->rot_out_ch)) + if (priv->rot_out_ch) ipu_idmac_put(priv->rot_out_ch); priv->rot_out_ch = NULL; } @@ -154,43 +154,46 @@ static void prp_put_ipu_resources(struct prp_priv *priv) static int prp_get_ipu_resources(struct prp_priv *priv) { struct imx_ic_priv *ic_priv = priv->ic_priv; + struct ipu_ic *ic; + struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch; int ret, task = ic_priv->task_id; priv->ipu = priv->md->ipu[ic_priv->ipu_id]; - priv->ic = ipu_ic_get(priv->ipu, task); - if (IS_ERR(priv->ic)) { + ic = ipu_ic_get(priv->ipu, task); + if (IS_ERR(ic)) { v4l2_err(&ic_priv->sd, "failed to get IC\n"); - ret = PTR_ERR(priv->ic); + ret = PTR_ERR(ic); goto out; } + priv->ic = ic; - priv->out_ch = ipu_idmac_get(priv->ipu, - prp_channel[task].out_ch); - if (IS_ERR(priv->out_ch)) { + out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].out_ch); + if (IS_ERR(out_ch)) { v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n", prp_channel[task].out_ch); - ret = PTR_ERR(priv->out_ch); + ret = PTR_ERR(out_ch); goto out; } + priv->out_ch = out_ch; - priv->rot_in_ch = ipu_idmac_get(priv->ipu, - prp_channel[task].rot_in_ch); - if (IS_ERR(priv->rot_in_ch)) { + rot_in_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_in_ch); + if (IS_ERR(rot_in_ch)) { v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n", prp_channel[task].rot_in_ch); - ret = PTR_ERR(priv->rot_in_ch); + ret = PTR_ERR(rot_in_ch); goto out; } + priv->rot_in_ch = rot_in_ch; - priv->rot_out_ch = ipu_idmac_get(priv->ipu, - prp_channel[task].rot_out_ch); - if (IS_ERR(priv->rot_out_ch)) { + rot_out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_out_ch); + if (IS_ERR(rot_out_ch)) { v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n", prp_channel[task].rot_out_ch); - ret = PTR_ERR(priv->rot_out_ch); + ret = PTR_ERR(rot_out_ch); goto out; } + priv->rot_out_ch = rot_out_ch; return 0; out: @@ -374,6 +377,17 @@ static int prp_setup_channel(struct prp_priv *priv, image.phys0 = addr0; image.phys1 = addr1; + if (channel == priv->out_ch || channel == priv->rot_out_ch) { + switch (image.pix.pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + case V4L2_PIX_FMT_NV12: + /* Skip writing U and V components to odd rows */ + ipu_cpmem_skip_odd_chroma_rows(channel); + break; + } + } + ret = ipu_cpmem_set_image(channel, &image); if (ret) return ret; @@ -1278,9 +1292,8 @@ static int prp_init(struct imx_ic_priv *ic_priv) priv->ic_priv = ic_priv; spin_lock_init(&priv->irqlock); - init_timer(&priv->eof_timeout_timer); - priv->eof_timeout_timer.data = (unsigned long)priv; - priv->eof_timeout_timer.function = prp_eof_timeout; + setup_timer(&priv->eof_timeout_timer, prp_eof_timeout, + (unsigned long)priv); priv->vdev = imx_media_capture_device_init(&ic_priv->sd, PRPENCVF_SRC_PAD); diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index ddab4c249da2..ea145bafb880 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -62,7 +62,7 @@ struct capture_priv { /* In bytes, per queue */ #define VID_MEM_LIMIT SZ_64M -static struct vb2_ops capture_qops; +static const struct vb2_ops capture_qops; /* * Video ioctls follow @@ -503,7 +503,7 @@ static void capture_stop_streaming(struct vb2_queue *vq) spin_unlock_irqrestore(&priv->q_lock, flags); } -static struct vb2_ops capture_qops = { +static const struct vb2_ops capture_qops = { .queue_setup = capture_queue_setup, .buf_init = capture_buf_init, .buf_prepare = capture_buf_prepare, diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index a2d26693912e..6d856118c223 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -122,11 +122,11 @@ static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev) static void csi_idmac_put_ipu_resources(struct csi_priv *priv) { - if (!IS_ERR_OR_NULL(priv->idmac_ch)) + if (priv->idmac_ch) ipu_idmac_put(priv->idmac_ch); priv->idmac_ch = NULL; - if (!IS_ERR_OR_NULL(priv->smfc)) + if (priv->smfc) ipu_smfc_put(priv->smfc); priv->smfc = NULL; } @@ -134,23 +134,27 @@ static void csi_idmac_put_ipu_resources(struct csi_priv *priv) static int csi_idmac_get_ipu_resources(struct csi_priv *priv) { int ch_num, ret; + struct ipu_smfc *smfc; + struct ipuv3_channel *idmac_ch; ch_num = IPUV3_CHANNEL_CSI0 + priv->smfc_id; - priv->smfc = ipu_smfc_get(priv->ipu, ch_num); - if (IS_ERR(priv->smfc)) { + smfc = ipu_smfc_get(priv->ipu, ch_num); + if (IS_ERR(smfc)) { v4l2_err(&priv->sd, "failed to get SMFC\n"); - ret = PTR_ERR(priv->smfc); + ret = PTR_ERR(smfc); goto out; } + priv->smfc = smfc; - priv->idmac_ch = ipu_idmac_get(priv->ipu, ch_num); - if (IS_ERR(priv->idmac_ch)) { + idmac_ch = ipu_idmac_get(priv->ipu, ch_num); + if (IS_ERR(idmac_ch)) { v4l2_err(&priv->sd, "could not get IDMAC channel %u\n", ch_num); - ret = PTR_ERR(priv->idmac_ch); + ret = PTR_ERR(idmac_ch); goto out; } + priv->idmac_ch = idmac_ch; return 0; out: @@ -357,6 +361,8 @@ static int csi_idmac_setup_channel(struct csi_priv *priv) passthrough = (sensor_ep->bus_type != V4L2_MBUS_CSI2 && sensor_ep->bus.parallel.bus_width >= 16); passthrough_bits = 16; + /* Skip writing U and V components to odd rows */ + ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch); break; case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: @@ -1583,6 +1589,7 @@ static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, static int csi_registered(struct v4l2_subdev *sd) { struct csi_priv *priv = v4l2_get_subdevdata(sd); + struct ipu_csi *csi; int i, ret; u32 code; @@ -1590,11 +1597,12 @@ static int csi_registered(struct v4l2_subdev *sd) priv->md = dev_get_drvdata(sd->v4l2_dev->dev); /* get handle to IPU CSI */ - priv->csi = ipu_csi_get(priv->ipu, priv->csi_id); - if (IS_ERR(priv->csi)) { + csi = ipu_csi_get(priv->ipu, priv->csi_id); + if (IS_ERR(csi)) { v4l2_err(&priv->sd, "failed to get CSI%d\n", priv->csi_id); - return PTR_ERR(priv->csi); + return PTR_ERR(csi); } + priv->csi = csi; for (i = 0; i < CSI_NUM_PADS; i++) { priv->pad[i].flags = (i == CSI_SINK_PAD) ? @@ -1663,7 +1671,7 @@ static void csi_unregistered(struct v4l2_subdev *sd) if (priv->fim) imx_media_fim_free(priv->fim); - if (!IS_ERR_OR_NULL(priv->csi)) + if (priv->csi) ipu_csi_put(priv->csi); } @@ -1731,9 +1739,8 @@ static int imx_csi_probe(struct platform_device *pdev) priv->csi_id = pdata->csi; priv->smfc_id = (priv->csi_id == 0) ? 0 : 2; - init_timer(&priv->eof_timeout_timer); - priv->eof_timeout_timer.data = (unsigned long)priv; - priv->eof_timeout_timer.function = csi_idmac_eof_timeout; + setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout, + (unsigned long)priv); spin_lock_init(&priv->irqlock); v4l2_subdev_init(&priv->sd, &csi_subdev_ops); diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c index 48cbc7716758..d96f4512224f 100644 --- a/drivers/staging/media/imx/imx-media-dev.c +++ b/drivers/staging/media/imx/imx-media-dev.c @@ -87,11 +87,11 @@ imx_media_add_async_subdev(struct imx_media_dev *imxmd, if (pdev) devname = dev_name(&pdev->dev); - /* return NULL if this subdev already added */ + /* return -EEXIST if this subdev already added */ if (imx_media_find_async_subdev(imxmd, np, devname)) { dev_dbg(imxmd->md.dev, "%s: already added %s\n", __func__, np ? np->name : devname); - imxsd = NULL; + imxsd = ERR_PTR(-EEXIST); goto out; } diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c index b026fe66467c..12df09f52490 100644 --- a/drivers/staging/media/imx/imx-media-of.c +++ b/drivers/staging/media/imx/imx-media-of.c @@ -100,9 +100,9 @@ static void of_get_remote_pad(struct device_node *epnode, } } -static struct imx_media_subdev * +static int of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, - bool is_csi_port) + bool is_csi_port, struct imx_media_subdev **subdev) { struct imx_media_subdev *imxsd; int i, num_pads, ret; @@ -110,13 +110,25 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, if (!of_device_is_available(sd_np)) { dev_dbg(imxmd->md.dev, "%s: %s not enabled\n", __func__, sd_np->name); - return NULL; + *subdev = NULL; + /* unavailable is not an error */ + return 0; } /* register this subdev with async notifier */ imxsd = imx_media_add_async_subdev(imxmd, sd_np, NULL); - if (IS_ERR_OR_NULL(imxsd)) - return imxsd; + ret = PTR_ERR_OR_ZERO(imxsd); + if (ret) { + if (ret == -EEXIST) { + /* already added, everything is fine */ + *subdev = NULL; + return 0; + } + + /* other error, can't continue */ + return ret; + } + *subdev = imxsd; if (is_csi_port) { /* @@ -137,10 +149,11 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, } else { num_pads = of_get_port_count(sd_np); if (num_pads != 1) { + /* confused, but no reason to give up here */ dev_warn(imxmd->md.dev, "%s: unknown device %s with %d ports\n", __func__, sd_np->name, num_pads); - return NULL; + return 0; } /* @@ -151,7 +164,7 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, } if (imxsd->num_sink_pads >= num_pads) - return ERR_PTR(-EINVAL); + return -EINVAL; imxsd->num_src_pads = num_pads - imxsd->num_sink_pads; @@ -191,20 +204,15 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, ret = of_add_pad_link(imxmd, pad, sd_np, remote_np, i, remote_pad); - if (ret) { - imxsd = ERR_PTR(ret); + if (ret) break; - } if (i < imxsd->num_sink_pads) { /* follow sink endpoints upstream */ - remote_imxsd = of_parse_subdev(imxmd, - remote_np, - false); - if (IS_ERR(remote_imxsd)) { - imxsd = remote_imxsd; + ret = of_parse_subdev(imxmd, remote_np, + false, &remote_imxsd); + if (ret) break; - } } of_node_put(remote_np); @@ -212,14 +220,14 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np, if (port != sd_np) of_node_put(port); - if (IS_ERR(imxsd)) { + if (ret) { of_node_put(remote_np); of_node_put(epnode); break; } } - return imxsd; + return ret; } int imx_media_of_parse(struct imx_media_dev *imxmd, @@ -236,11 +244,9 @@ int imx_media_of_parse(struct imx_media_dev *imxmd, if (!csi_np) break; - lcsi = of_parse_subdev(imxmd, csi_np, true); - if (IS_ERR(lcsi)) { - ret = PTR_ERR(lcsi); + ret = of_parse_subdev(imxmd, csi_np, true, &lcsi); + if (ret) goto err_put; - } ret = of_property_read_u32(csi_np, "reg", &csi_id); if (ret) { diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c index 7eabdc4aa79f..433474d58e3e 100644 --- a/drivers/staging/media/imx/imx-media-vdic.c +++ b/drivers/staging/media/imx/imx-media-vdic.c @@ -126,15 +126,15 @@ struct vdic_priv { static void vdic_put_ipu_resources(struct vdic_priv *priv) { - if (!IS_ERR_OR_NULL(priv->vdi_in_ch_p)) + if (priv->vdi_in_ch_p) ipu_idmac_put(priv->vdi_in_ch_p); priv->vdi_in_ch_p = NULL; - if (!IS_ERR_OR_NULL(priv->vdi_in_ch)) + if (priv->vdi_in_ch) ipu_idmac_put(priv->vdi_in_ch); priv->vdi_in_ch = NULL; - if (!IS_ERR_OR_NULL(priv->vdi_in_ch_n)) + if (priv->vdi_in_ch_n) ipu_idmac_put(priv->vdi_in_ch_n); priv->vdi_in_ch_n = NULL; @@ -146,40 +146,43 @@ static void vdic_put_ipu_resources(struct vdic_priv *priv) static int vdic_get_ipu_resources(struct vdic_priv *priv) { int ret, err_chan; + struct ipuv3_channel *ch; + struct ipu_vdi *vdi; priv->ipu = priv->md->ipu[priv->ipu_id]; - priv->vdi = ipu_vdi_get(priv->ipu); - if (IS_ERR(priv->vdi)) { + vdi = ipu_vdi_get(priv->ipu); + if (IS_ERR(vdi)) { v4l2_err(&priv->sd, "failed to get VDIC\n"); - ret = PTR_ERR(priv->vdi); + ret = PTR_ERR(vdi); goto out; } + priv->vdi = vdi; if (!priv->csi_direct) { - priv->vdi_in_ch_p = ipu_idmac_get(priv->ipu, - IPUV3_CHANNEL_MEM_VDI_PREV); - if (IS_ERR(priv->vdi_in_ch_p)) { + ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_PREV); + if (IS_ERR(ch)) { err_chan = IPUV3_CHANNEL_MEM_VDI_PREV; - ret = PTR_ERR(priv->vdi_in_ch_p); + ret = PTR_ERR(ch); goto out_err_chan; } + priv->vdi_in_ch_p = ch; - priv->vdi_in_ch = ipu_idmac_get(priv->ipu, - IPUV3_CHANNEL_MEM_VDI_CUR); - if (IS_ERR(priv->vdi_in_ch)) { + ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_CUR); + if (IS_ERR(ch)) { err_chan = IPUV3_CHANNEL_MEM_VDI_CUR; - ret = PTR_ERR(priv->vdi_in_ch); + ret = PTR_ERR(ch); goto out_err_chan; } + priv->vdi_in_ch = ch; - priv->vdi_in_ch_n = ipu_idmac_get(priv->ipu, - IPUV3_CHANNEL_MEM_VDI_NEXT); + ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_NEXT); if (IS_ERR(priv->vdi_in_ch_n)) { err_chan = IPUV3_CHANNEL_MEM_VDI_NEXT; - ret = PTR_ERR(priv->vdi_in_ch_n); + ret = PTR_ERR(ch); goto out_err_chan; } + priv->vdi_in_ch_n = ch; } return 0; diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 015e41bd036e..71af13bd0ebd 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -288,7 +288,7 @@ static void release_ir_tx(struct kref *ref) struct IR_tx *tx = container_of(ref, struct IR_tx, ref); struct IR *ir = tx->ir; - ir->l.features &= ~LIRC_CAN_SEND_PULSE; + ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE; /* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */ ir->tx = NULL; kfree(tx); @@ -1249,7 +1249,7 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg) break; case LIRC_GET_REC_MODE: if (!(features & LIRC_CAN_REC_MASK)) - return -ENOSYS; + return -ENOTTY; result = put_user(LIRC_REC2MODE (features & LIRC_CAN_REC_MASK), @@ -1257,24 +1257,24 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg) break; case LIRC_SET_REC_MODE: if (!(features & LIRC_CAN_REC_MASK)) - return -ENOSYS; + return -ENOTTY; result = get_user(mode, uptr); if (!result && !(LIRC_MODE2REC(mode) & features)) - result = -EINVAL; + result = -ENOTTY; break; case LIRC_GET_SEND_MODE: if (!(features & LIRC_CAN_SEND_MASK)) - return -ENOSYS; + return -ENOTTY; - result = put_user(LIRC_MODE_PULSE, uptr); + result = put_user(LIRC_MODE_LIRCCODE, uptr); break; case LIRC_SET_SEND_MODE: if (!(features & LIRC_CAN_SEND_MASK)) - return -ENOSYS; + return -ENOTTY; result = get_user(mode, uptr); - if (!result && mode != LIRC_MODE_PULSE) + if (!result && mode != LIRC_MODE_LIRCCODE) return -EINVAL; break; default: @@ -1512,7 +1512,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) kref_init(&tx->ref); ir->tx = tx; - ir->l.features |= LIRC_CAN_SEND_PULSE; + ir->l.features |= LIRC_CAN_SEND_LIRCCODE; mutex_init(&tx->client_lock); tx->c = client; tx->need_boot = 1; diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 0bac58241a22..9e2f0421a01e 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -1199,7 +1199,7 @@ static int iss_video_mmap(struct file *file, struct vm_area_struct *vma) return vb2_mmap(&vfh->queue, vma); } -static struct v4l2_file_operations iss_video_fops = { +static const struct v4l2_file_operations iss_video_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = iss_video_open, diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index a4e3ae8f0c85..87595c594b12 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include "mt29f_spinand.h" @@ -496,8 +496,12 @@ static int spinand_program_page(struct spi_device *spi_nand, if (!wbuf) return -ENOMEM; - enable_read_hw_ecc = 0; - spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); + enable_read_hw_ecc = 1; + retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); + if (retval < 0) { + dev_err(&spi_nand->dev, "ecc error on read page!!!\n"); + return retval; + } for (i = offset, j = 0; i < len; i++, j++) wbuf[i] &= buf[j]; diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c index c4b1b218ea38..290b419aa9dd 100644 --- a/drivers/staging/pi433/rf69.c +++ b/drivers/staging/pi433/rf69.c @@ -570,12 +570,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value) dev_dbg(&spi->dev, "set: DIO mapping"); #endif - // check DIO number - if (DIONumber > 5) { - dev_dbg(&spi->dev, "set: illegal input param"); - return -EINVAL; - } - switch (DIONumber) { case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break; case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break; @@ -583,6 +577,9 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value) case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break; case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break; case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break; + default: + dev_dbg(&spi->dev, "set: illegal input param"); + return -EINVAL; } // read reg diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index 6b778206a1a3..cb8a95aabd6c 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c @@ -119,9 +119,8 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) { - rtw_free_mlme_priv_ie_data(pmlmepriv); - if (pmlmepriv) { + rtw_free_mlme_priv_ie_data(pmlmepriv); if (pmlmepriv->free_bss_buf) { vfree(pmlmepriv->free_bss_buf); } diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c index 92277457aba4..ce1dd6f9036f 100644 --- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c +++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c @@ -311,6 +311,8 @@ static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t if (num < 2) return count; + if (id >= TOTAL_CAM_ENTRY) + return -EINVAL; if (strcmp("c", cmd) == 0) { _clear_cam_entry(adapter, id); diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c index 5f84526cb5b5..edbf6af1c8b7 100644 --- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c +++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c @@ -2901,11 +2901,11 @@ halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter, if (halmac_adapter->fw_version.h2c_version < 4) return HALMAC_RET_FW_NO_SUPPORT; + driver_adapter = halmac_adapter->driver_adapter; + HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "[TRACE]%s ==========>\n", __func__); - driver_adapter = halmac_adapter->driver_adapter; - HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "[TRACE]%s <==========\n", __func__); diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c index f33024e4d853..544f638ed3ef 100644 --- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c +++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c @@ -1618,10 +1618,11 @@ halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter, void *driver_adapter = NULL; enum halmac_ret_status status = HALMAC_RET_SUCCESS; + driver_adapter = halmac_adapter->driver_adapter; + HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "%s!!\n", __func__); - driver_adapter = halmac_adapter->driver_adapter; h2c_header = h2c_buff; h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; @@ -1713,10 +1714,11 @@ halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode, void *driver_adapter = NULL; enum halmac_ret_status status = HALMAC_RET_SUCCESS; + driver_adapter = halmac_adapter->driver_adapter; + HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "halmac_send_h2c_set_pwr_mode_88xx!!\n"); - driver_adapter = halmac_adapter->driver_adapter; h2c_header = H2c_buff; h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; @@ -2143,10 +2145,11 @@ halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter, enum halmac_cmd_process_status *process_status = &halmac_adapter->halmac_state.scan_state_set.process_status; + driver_adapter = halmac_adapter->driver_adapter; + HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "halmac_ctrl_ch_switch!!\n"); - driver_adapter = halmac_adapter->driver_adapter; halmac_api = (struct halmac_api *)halmac_adapter->halmac_api; if (halmac_transition_scan_state_88xx( @@ -2276,15 +2279,13 @@ enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx( { u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0}; u16 h2c_seq_mum = 0; - void *driver_adapter = NULL; + void *driver_adapter = halmac_adapter->driver_adapter; struct halmac_h2c_header_info h2c_header_info; enum halmac_ret_status status = HALMAC_RET_SUCCESS; HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, "%s!!\n", __func__); - driver_adapter = halmac_adapter->driver_adapter; - UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en); UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th); UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout); diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 53748d61e20e..89e2cfe7d1cc 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -205,16 +205,6 @@ static int device_reset(struct scsi_cmnd *srb) return SUCCESS; } -/* Simulate a SCSI bus reset by resetting the device's USB port. */ -static int bus_reset(struct scsi_cmnd *srb) -{ - struct rtsx_dev *dev = host_to_rtsx(srb->device->host); - - dev_info(&dev->pci->dev, "%s called\n", __func__); - - return SUCCESS; -} - /* * this defines our host template, with which we'll allocate hosts */ @@ -231,7 +221,6 @@ static struct scsi_host_template rtsx_host_template = { /* error and abort handlers */ .eh_abort_handler = command_abort, .eh_device_reset_handler = device_reset, - .eh_bus_reset_handler = bus_reset, /* queue commands only, only one command per LUN */ .can_queue = 1, diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 67956e24779c..56f7be6af1f6 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -1376,6 +1376,8 @@ static void reset_highlight_buffers(struct vc_data *); static int read_all_key; +static int in_keyboard_notifier; + static void start_read_all_timer(struct vc_data *vc, int command); enum { @@ -1408,7 +1410,10 @@ static void read_all_doc(struct vc_data *vc) cursor_track = read_all_mode; spk_reset_index_count(0); if (get_sentence_buf(vc, 0) == -1) { - kbd_fakekey2(vc, RA_DOWN_ARROW); + del_timer(&cursor_timer); + if (!in_keyboard_notifier) + speakup_fake_down_arrow(); + start_read_all_timer(vc, RA_DOWN_ARROW); } else { say_sentence_num(0, 0); synth_insert_next_index(0); @@ -2212,8 +2217,10 @@ static int keyboard_notifier_call(struct notifier_block *nb, int ret = NOTIFY_OK; static int keycode; /* to hold the current keycode */ + in_keyboard_notifier = 1; + if (vc->vc_mode == KD_GRAPHICS) - return ret; + goto out; /* * First, determine whether we are handling a fake keypress on @@ -2225,7 +2232,7 @@ static int keyboard_notifier_call(struct notifier_block *nb, */ if (speakup_fake_key_pressed()) - return ret; + goto out; switch (code) { case KBD_KEYCODE: @@ -2266,6 +2273,8 @@ static int keyboard_notifier_call(struct notifier_block *nb, break; } } +out: + in_keyboard_notifier = 0; return ret; } diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 74cce4f1a7bd..27ecf6fb49fd 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c @@ -1826,7 +1826,7 @@ static __init int visorutil_spar_detect(void) return 0; } -static int init_unisys(void) +static int __init init_unisys(void) { int result; @@ -1841,7 +1841,7 @@ static int init_unisys(void) return 0; }; -static void exit_unisys(void) +static void __exit exit_unisys(void) { acpi_bus_unregister_driver(&unisys_acpi_driver); } diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c index 8567e447891e..419dba89af06 100644 --- a/drivers/staging/unisys/visorhba/visorhba_main.c +++ b/drivers/staging/unisys/visorhba/visorhba_main.c @@ -47,9 +47,8 @@ MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types); MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR); struct visordisk_info { + struct scsi_device *sdev; u32 valid; - /* Disk Path */ - u32 channel, id, lun; atomic_t ios_threshold; atomic_t error_count; struct visordisk_info *next; @@ -105,12 +104,6 @@ struct visorhba_devices_open { struct visorhba_devdata *devdata; }; -#define for_each_vdisk_match(iter, list, match) \ - for (iter = &list->head; iter->next; iter = iter->next) \ - if ((iter->channel == match->channel) && \ - (iter->id == match->id) && \ - (iter->lun == match->lun)) - /* * visor_thread_start - Starts a thread for the device * @threadfn: Function the thread starts @@ -313,10 +306,9 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable, * Return: Int representing whether command was queued successfully or not */ static int forward_taskmgmt_command(enum task_mgmt_types tasktype, - struct scsi_cmnd *scsicmd) + struct scsi_device *scsidev) { struct uiscmdrsp *cmdrsp; - struct scsi_device *scsidev = scsicmd->device; struct visorhba_devdata *devdata = (struct visorhba_devdata *)scsidev->host->hostdata; int notifyresult = 0xffff; @@ -364,12 +356,6 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype, dev_dbg(&scsidev->sdev_gendev, "visorhba: taskmgmt type=%d success; result=0x%x\n", tasktype, notifyresult); - if (tasktype == TASK_MGMT_ABORT_TASK) - scsicmd->result = DID_ABORT << 16; - else - scsicmd->result = DID_RESET << 16; - - scsicmd->scsi_done(scsicmd); cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp); return SUCCESS; @@ -392,17 +378,20 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd) /* issue TASK_MGMT_ABORT_TASK */ struct scsi_device *scsidev; struct visordisk_info *vdisk; - struct visorhba_devdata *devdata; + int rtn; scsidev = scsicmd->device; - devdata = (struct visorhba_devdata *)scsidev->host->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { - if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) - atomic_inc(&vdisk->error_count); - else - atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); + vdisk = scsidev->hostdata; + if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) + atomic_inc(&vdisk->error_count); + else + atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); + rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev); + if (rtn == SUCCESS) { + scsicmd->result = DID_ABORT << 16; + scsicmd->scsi_done(scsicmd); } - return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd); + return rtn; } /* @@ -416,17 +405,20 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd) /* issue TASK_MGMT_LUN_RESET */ struct scsi_device *scsidev; struct visordisk_info *vdisk; - struct visorhba_devdata *devdata; + int rtn; scsidev = scsicmd->device; - devdata = (struct visorhba_devdata *)scsidev->host->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { - if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) - atomic_inc(&vdisk->error_count); - else - atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); + vdisk = scsidev->hostdata; + if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) + atomic_inc(&vdisk->error_count); + else + atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); + rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev); + if (rtn == SUCCESS) { + scsicmd->result = DID_RESET << 16; + scsicmd->scsi_done(scsicmd); } - return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd); + return rtn; } /* @@ -440,17 +432,22 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd) { struct scsi_device *scsidev; struct visordisk_info *vdisk; - struct visorhba_devdata *devdata; + int rtn; scsidev = scsicmd->device; - devdata = (struct visorhba_devdata *)scsidev->host->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { + shost_for_each_device(scsidev, scsidev->host) { + vdisk = scsidev->hostdata; if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) atomic_inc(&vdisk->error_count); else atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); } - return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd); + rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev); + if (rtn == SUCCESS) { + scsicmd->result = DID_RESET << 16; + scsicmd->scsi_done(scsicmd); + } + return rtn; } /* @@ -604,27 +601,24 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev) * LLD can alloc any struct & do init if needed. */ struct visordisk_info *vdisk; - struct visordisk_info *tmpvdisk; struct visorhba_devdata *devdata; struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host; + /* already allocated return success */ + if (scsidev->hostdata) + return 0; + /* even though we errored, treat as success */ devdata = (struct visorhba_devdata *)scsihost->hostdata; if (!devdata) return 0; - /* already allocated return success */ - for_each_vdisk_match(vdisk, devdata, scsidev) - return 0; - - tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC); - if (!tmpvdisk) + vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC); + if (!vdisk) return -ENOMEM; - tmpvdisk->channel = scsidev->channel; - tmpvdisk->id = scsidev->id; - tmpvdisk->lun = scsidev->lun; - vdisk->next = tmpvdisk; + vdisk->sdev = scsidev; + scsidev->hostdata = vdisk; return 0; } @@ -637,17 +631,11 @@ static void visorhba_slave_destroy(struct scsi_device *scsidev) /* midlevel calls this after device has been quiesced and * before it is to be deleted. */ - struct visordisk_info *vdisk, *delvdisk; - struct visorhba_devdata *devdata; - struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host; + struct visordisk_info *vdisk; - devdata = (struct visorhba_devdata *)scsihost->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { - delvdisk = vdisk->next; - vdisk->next = delvdisk->next; - kfree(delvdisk); - return; - } + vdisk = scsidev->hostdata; + scsidev->hostdata = NULL; + kfree(vdisk); } static struct scsi_host_template visorhba_driver_template = { @@ -823,7 +811,6 @@ static int visorhba_serverdown(struct visorhba_devdata *devdata) static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) { - struct visorhba_devdata *devdata; struct visordisk_info *vdisk; struct scsi_device *scsidev; @@ -836,12 +823,10 @@ static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)) return; /* Okay see what our error_count is here.... */ - devdata = (struct visorhba_devdata *)scsidev->host->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { - if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) { - atomic_inc(&vdisk->error_count); - atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); - } + vdisk = scsidev->hostdata; + if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) { + atomic_inc(&vdisk->error_count); + atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); } } @@ -881,7 +866,6 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, char *this_page_orig; int bufind = 0; struct visordisk_info *vdisk; - struct visorhba_devdata *devdata; scsidev = scsicmd->device; if ((cmdrsp->scsi.cmnd[0] == INQUIRY) && @@ -918,13 +902,11 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, } kfree(buf); } else { - devdata = (struct visorhba_devdata *)scsidev->host->hostdata; - for_each_vdisk_match(vdisk, devdata, scsidev) { - if (atomic_read(&vdisk->ios_threshold) > 0) { - atomic_dec(&vdisk->ios_threshold); - if (atomic_read(&vdisk->ios_threshold) == 0) - atomic_set(&vdisk->error_count, 0); - } + vdisk = scsidev->hostdata; + if (atomic_read(&vdisk->ios_threshold) > 0) { + atomic_dec(&vdisk->ios_threshold); + if (atomic_read(&vdisk->ios_threshold) == 0) + atomic_set(&vdisk->error_count, 0); } } } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 0159ca4407d8..be08849175ea 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo, if (head_bytes > actual) head_bytes = actual; - memcpy((char *)page_address(pages[0]) + + memcpy((char *)kmap(pages[0]) + pagelist->offset, fragments, head_bytes); + kunmap(pages[0]); } if ((actual >= 0) && (head_bytes < actual) && (tail_bytes != 0)) { - memcpy((char *)page_address(pages[num_pages - 1]) + + memcpy((char *)kmap(pages[num_pages - 1]) + ((pagelist->offset + actual) & (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)), fragments + g_cache_line_size, tail_bytes); + kunmap(pages[num_pages - 1]); } down(&g_free_fragments_mutex); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index a91b7c25ffd4..928127642574 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -896,13 +896,14 @@ static int core_alua_write_tpg_metadata( u32 md_buf_len) { struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); + loff_t pos = 0; int ret; if (IS_ERR(file)) { pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } - ret = kernel_write(file, md_buf, md_buf_len, 0); + ret = kernel_write(file, md_buf, md_buf_len, &pos); if (ret < 0) pr_err("Error writing ALUA metadata file: %s\n", path); fput(file); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 24cf11d9e50a..c629817a8854 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -443,7 +443,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, for (prot = 0; prot < prot_length;) { sector_t len = min_t(sector_t, bufsize, prot_length - prot); - ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); + ssize_t ret = kernel_write(prot_fd, buf, len, &pos); if (ret != len) { pr_err("vfs_write to prot file failed: %zd\n", ret); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index ee7c7fa55dad..07c814c42648 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -338,7 +338,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, return NULL; } - bio->bi_bdev = ib_dev->ibd_bd; + bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; @@ -395,7 +395,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd) bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; - bio->bi_bdev = ib_dev->ibd_bd; + bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; if (!immed) bio->bi_private = cmd; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 6d5def64db61..dd2cd8048582 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1974,6 +1974,7 @@ static int __core_scsi3_write_aptpl_to_file( char path[512]; u32 pr_aptpl_buf_len; int ret; + loff_t pos = 0; memset(path, 0, 512); @@ -1993,7 +1994,7 @@ static int __core_scsi3_write_aptpl_to_file( pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */ - ret = kernel_write(file, buf, pr_aptpl_buf_len, 0); + ret = kernel_write(file, buf, pr_aptpl_buf_len, &pos); if (ret < 0) pr_debug("Error writing APTPL metadata file: %s\n", path); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 58169e519422..7952357df9c8 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -224,13 +224,14 @@ static void optee_release(struct tee_context *ctx) if (!IS_ERR(shm)) { arg = tee_shm_get_va(shm, 0); /* - * If va2pa fails for some reason, we can't call - * optee_close_session(), only free the memory. Secure OS - * will leak sessions and finally refuse more sessions, but - * we will at least let normal world reclaim its memory. + * If va2pa fails for some reason, we can't call into + * secure world, only free the memory. Secure OS will leak + * sessions and finally refuse more sessions, but we will + * at least let normal world reclaim its memory. */ if (!IS_ERR(arg)) - tee_shm_va2pa(shm, arg, &parg); + if (tee_shm_va2pa(shm, arg, &parg)) + arg = NULL; /* prevent usage of parg below */ } list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, @@ -258,7 +259,7 @@ static void optee_release(struct tee_context *ctx) } } -static struct tee_driver_ops optee_ops = { +static const struct tee_driver_ops optee_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, @@ -268,13 +269,13 @@ static struct tee_driver_ops optee_ops = { .cancel_req = optee_cancel_req, }; -static struct tee_desc optee_desc = { +static const struct tee_desc optee_desc = { .name = DRIVER_NAME "-clnt", .ops = &optee_ops, .owner = THIS_MODULE, }; -static struct tee_driver_ops optee_supp_ops = { +static const struct tee_driver_ops optee_supp_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, @@ -282,7 +283,7 @@ static struct tee_driver_ops optee_supp_ops = { .supp_send = optee_supp_send, }; -static struct tee_desc optee_supp_desc = { +static const struct tee_desc optee_supp_desc = { .name = DRIVER_NAME "-supp", .ops = &optee_supp_ops, .owner = THIS_MODULE, diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 13b7c98cdf25..069c8e1429de 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -298,7 +298,7 @@ struct optee_smc_disable_shm_cache_result { OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) /* - * Resume from RPC (for example after processing an IRQ) + * Resume from RPC (for example after processing a foreign interrupt) * * Call register usage: * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC @@ -383,19 +383,19 @@ struct optee_smc_disable_shm_cache_result { OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) /* - * Deliver an IRQ in normal world. + * Deliver foreign interrupt to normal world. * * "Call" register usage: - * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR * a1-7 Resume information, must be preserved * * "Return" register usage: * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. * a1-7 Preserved */ -#define OPTEE_SMC_RPC_FUNC_IRQ 4 -#define OPTEE_SMC_RETURN_RPC_IRQ \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) +#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 +#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) /* * Do an RPC request. The supplied struct optee_msg_arg tells which diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 8814eca06021..cef417f4f4d2 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -140,11 +140,8 @@ static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) msec_to_wait = arg->params[0].u.value.a; - /* set task's state to interruptible sleep */ - set_current_state(TASK_INTERRUPTIBLE); - - /* take a nap */ - msleep(msec_to_wait); + /* Go to interruptible sleep */ + msleep_interruptible(msec_to_wait); arg->ret = TEEC_SUCCESS; return; @@ -374,11 +371,11 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) shm = reg_pair_to_ptr(param->a1, param->a2); tee_shm_free(shm); break; - case OPTEE_SMC_RPC_FUNC_IRQ: + case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: /* - * An IRQ was raised while secure world was executing, - * since all IRQs are handled in Linux a dummy RPC is - * performed to let Linux take the IRQ through the normal + * A foreign interrupt was raised while secure world was + * executing, since they are handled in Linux a dummy RPC is + * performed to let Linux take the interrupt through the normal * vector. */ break; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 5c60bf4423e6..58a5009eacc3 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -90,8 +90,13 @@ static int tee_ioctl_version(struct tee_context *ctx, struct tee_ioctl_version_data vers; ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + + if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED) + vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED; + if (copy_to_user(uvers, &vers, sizeof(vers))) return -EFAULT; + return 0; } diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index d356d7f025eb..4bc7956cefc4 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -80,7 +80,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) size, vma->vm_page_prot); } -static struct dma_buf_ops tee_shm_dma_buf_ops = { +static const struct dma_buf_ops tee_shm_dma_buf_ops = { .map_dma_buf = tee_shm_op_map_dma_buf, .unmap_dma_buf = tee_shm_op_unmap_dma_buf, .release = tee_shm_op_release, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index b5b5facb8747..07002df4f83a 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -342,7 +342,7 @@ config X86_PKG_TEMP_THERMAL config INTEL_SOC_DTS_IOSF_CORE tristate - depends on X86 + depends on X86 && PCI select IOSF_MBI help This is becoming a common feature for Intel SoCs to expose the additional @@ -352,7 +352,7 @@ config INTEL_SOC_DTS_IOSF_CORE config INTEL_SOC_DTS_THERMAL tristate "Intel SoCs DTS thermal driver" - depends on X86 + depends on X86 && PCI select INTEL_SOC_DTS_IOSF_CORE select THERMAL_WRITABLE_TRIPS help @@ -473,4 +473,12 @@ config ZX2967_THERMAL the primitive temperature sensor embedded in zx2967 SoCs. This sensor generates the real time die temperature. +config UNIPHIER_THERMAL + tristate "Socionext UniPhier thermal driver" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on THERMAL_OF && MFD_SYSCON + help + Enable this to plug in UniPhier on-chip PVT thermal driver into the + thermal framework. The driver supports CPU thermal zone temperature + reporting and a couple of trip points. endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 094d7039981c..8b79bca23536 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -59,3 +59,4 @@ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o +obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index e6863c841662..a4d6a0e2e993 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -145,7 +145,7 @@ static void bcm2835_thermal_debugfs(struct platform_device *pdev) debugfs_create_regset32("regset", 0444, data->debugfsdir, regset); } -static struct thermal_zone_of_device_ops bcm2835_thermal_ops = { +static const struct thermal_zone_of_device_ops bcm2835_thermal_ops = { .get_temp = bcm2835_thermal_get_temp, }; diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 9c3ce341eb97..bd3572c41585 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -206,7 +206,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp) return 0; } -static struct thermal_zone_of_device_ops hisi_of_thermal_ops = { +static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { .get_temp = hisi_thermal_get_temp, }; diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c index 51ceb80212a7..c719167e9f28 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c @@ -228,7 +228,7 @@ static void get_single_name(acpi_handle handle, char *name) struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer))) - pr_warn("Failed get name from handle\n"); + pr_warn("Failed to get device name from acpi handle\n"); else { memcpy(name, buffer.pointer, ACPI_NAME_SIZE); kfree(buffer.pointer); diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h index f00700bc9d79..65075b174329 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h @@ -34,10 +34,10 @@ struct trt { acpi_handle target; u64 influence; u64 sample_period; - u64 reverved1; - u64 reverved2; - u64 reverved3; - u64 reverved4; + u64 reserved1; + u64 reserved2; + u64 reserved3; + u64 reserved4; } __packed; #define ACPI_NR_ART_ELEMENTS 13 diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index a9ec94ed7a42..8ee38f55c7f3 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -16,6 +16,8 @@ #include #include "acpi_thermal_rel.h" +#define INT3400_THERMAL_TABLE_CHANGED 0x83 + enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, INT3400_THERMAL_ACTIVE, @@ -104,7 +106,7 @@ static struct attribute *uuid_attrs[] = { NULL }; -static struct attribute_group uuid_attribute_group = { +static const struct attribute_group uuid_attribute_group = { .attrs = uuid_attrs, .name = "uuids" }; @@ -185,6 +187,35 @@ static int int3400_thermal_run_osc(acpi_handle handle, return result; } +static void int3400_notify(acpi_handle handle, + u32 event, + void *data) +{ + struct int3400_thermal_priv *priv = data; + char *thermal_prop[5]; + + if (!priv) + return; + + switch (event) { + case INT3400_THERMAL_TABLE_CHANGED: + thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", + priv->thermal->type); + thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", + priv->thermal->temperature); + thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP="); + thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", + THERMAL_TABLE_CHANGED); + thermal_prop[4] = NULL; + kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, + thermal_prop); + break; + default: + dev_err(&priv->adev->dev, "Unsupported event [0x%x]\n", event); + break; + } +} + static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, int *temp) { @@ -290,6 +321,12 @@ static int int3400_thermal_probe(struct platform_device *pdev) if (result) goto free_zone; + result = acpi_install_notify_handler( + priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify, + (void *)priv); + if (result) + goto free_zone; + return 0; free_zone: @@ -306,6 +343,10 @@ static int int3400_thermal_remove(struct platform_device *pdev) { struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); + acpi_remove_notify_handler( + priv->adev->handle, ACPI_DEVICE_NOTIFY, + int3400_notify); + if (!priv->rel_misc_dev_res) acpi_thermal_rel_misc_device_remove(priv->adev->handle); diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 1891f34ab7fc..f69ab026ba24 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c @@ -21,39 +21,33 @@ struct int3406_thermal_data { int upper_limit; - int upper_limit_index; int lower_limit; - int lower_limit_index; acpi_handle handle; struct acpi_video_device_brightness *br; struct backlight_device *raw_bd; struct thermal_cooling_device *cooling_dev; }; -static int int3406_thermal_to_raw(int level, struct int3406_thermal_data *d) -{ - int max_level = d->br->levels[d->br->count - 1]; - int raw_max = d->raw_bd->props.max_brightness; - - return level * raw_max / max_level; -} - -static int int3406_thermal_to_acpi(int level, struct int3406_thermal_data *d) -{ - int raw_max = d->raw_bd->props.max_brightness; - int max_level = d->br->levels[d->br->count - 1]; - - return level * max_level / raw_max; -} +/* + * According to the ACPI spec, + * "Each brightness level is represented by a number between 0 and 100, + * and can be thought of as a percentage. For example, 50 can be 50% + * power consumption or 50% brightness, as defined by the OEM." + * + * As int3406 device uses this value to communicate with the native + * graphics driver, we make the assumption that it represents + * the percentage of brightness only + */ +#define ACPI_TO_RAW(v, d) (d->raw_bd->props.max_brightness * v / 100) +#define RAW_TO_ACPI(v, d) (v * 100 / d->raw_bd->props.max_brightness) static int int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { struct int3406_thermal_data *d = cooling_dev->devdata; - int index = d->lower_limit_index ? d->lower_limit_index : 2; - *state = d->br->count - 1 - index; + *state = d->upper_limit - d->lower_limit; return 0; } @@ -62,19 +56,15 @@ int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state) { struct int3406_thermal_data *d = cooling_dev->devdata; - int level, raw_level; + int acpi_level, raw_level; - if (state > d->br->count - 3) + if (state > d->upper_limit - d->lower_limit) return -EINVAL; - state = d->br->count - 1 - state; - level = d->br->levels[state]; + acpi_level = d->br->levels[d->upper_limit - state]; - if ((d->upper_limit && level > d->upper_limit) || - (d->lower_limit && level < d->lower_limit)) - return -EINVAL; + raw_level = ACPI_TO_RAW(acpi_level, d); - raw_level = int3406_thermal_to_raw(level, d); return backlight_device_set_brightness(d->raw_bd, raw_level); } @@ -83,27 +73,22 @@ int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { struct int3406_thermal_data *d = cooling_dev->devdata; - int raw_level, level, i; - int *levels = d->br->levels; + int acpi_level; + int index; - raw_level = d->raw_bd->props.brightness; - level = int3406_thermal_to_acpi(raw_level, d); + acpi_level = RAW_TO_ACPI(d->raw_bd->props.brightness, d); /* - * There is no 1:1 mapping between the firmware interface level with the - * raw interface level, we will have to find one that is close enough. + * There is no 1:1 mapping between the firmware interface level + * with the raw interface level, we will have to find one that is + * right above it. */ - for (i = 2; i < d->br->count; i++) { - if (level < levels[i]) { - if (i == 2) - break; - if ((level - levels[i - 1]) < (levels[i] - level)) - i--; + for (index = d->lower_limit; index < d->upper_limit; index++) { + if (acpi_level <= d->br->levels[index]) break; - } } - *state = d->br->count - 1 - i; + *state = d->upper_limit - index; return 0; } @@ -117,7 +102,7 @@ static int int3406_thermal_get_index(int *array, int nr, int value) { int i; - for (i = 0; i < nr; i++) { + for (i = 2; i < nr; i++) { if (array[i] == value) break; } @@ -128,27 +113,20 @@ static void int3406_thermal_get_limit(struct int3406_thermal_data *d) { acpi_status status; unsigned long long lower_limit, upper_limit; - int index; status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit); - if (ACPI_SUCCESS(status)) { - index = int3406_thermal_get_index(d->br->levels, d->br->count, - lower_limit); - if (index > 0) { - d->lower_limit = (int)lower_limit; - d->lower_limit_index = index; - } - } + if (ACPI_SUCCESS(status)) + d->lower_limit = int3406_thermal_get_index(d->br->levels, + d->br->count, lower_limit); status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit); - if (ACPI_SUCCESS(status)) { - index = int3406_thermal_get_index(d->br->levels, d->br->count, - upper_limit); - if (index > 0) { - d->upper_limit = (int)upper_limit; - d->upper_limit_index = index; - } - } + if (ACPI_SUCCESS(status)) + d->upper_limit = int3406_thermal_get_index(d->br->levels, + d->br->count, upper_limit); + + /* lower_limit and upper_limit should be always set */ + d->lower_limit = d->lower_limit > 0 ? d->lower_limit : 2; + d->upper_limit = d->upper_limit > 0 ? d->upper_limit : d->br->count - 1; } static void int3406_notify(acpi_handle handle, u32 event, void *data) diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index ff3b36f339e3..f02341f7134d 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -127,7 +127,7 @@ static struct attribute *power_limit_attrs[] = { NULL }; -static struct attribute_group power_limit_attribute_group = { +static const struct attribute_group power_limit_attribute_group = { .attrs = power_limit_attrs, .name = "power_limits" }; diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 2b49e8d0fe9e..c60b1cfcc64e 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -49,7 +49,7 @@ #define WPT_TSGPEN 0x84 /* General Purpose Event Enables */ /* Wildcat Point-LP PCH Thermal Register bit definitions */ -#define WPT_TEMP_TSR 0x00ff /* Temp TS Reading */ +#define WPT_TEMP_TSR 0x01ff /* Temp TS Reading */ #define WPT_TSC_CPDE 0x01 /* Catastrophic Power-Down Enable */ #define WPT_TSS_TSDSS 0x10 /* Thermal Sensor Dynamic Shutdown Status */ #define WPT_TSS_GPES 0x08 /* GPE status */ @@ -125,7 +125,7 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) *nr_trips = 0; /* Check if BIOS has already enabled thermal sensor */ - if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) { + if (WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL)) { ptd->bios_enabled = true; goto read_trips; } @@ -141,7 +141,7 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) } writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); - if (!(WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))) { + if (!(WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL))) { dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n"); return -ENODEV; } @@ -174,9 +174,9 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp) { - u8 wpt_temp; + u16 wpt_temp; - wpt_temp = WPT_TEMP_TSR & readl(ptd->hw_base + WPT_TEMP); + wpt_temp = WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP); /* Resolution of 1/2 degree C and an offset of -50C */ *temp = (wpt_temp * 1000 / 2 - 50000); @@ -387,7 +387,7 @@ static int intel_pch_thermal_resume(struct device *device) return ptd->ops->resume(ptd); } -static struct pci_device_id intel_pch_thermal_id[] = { +static const struct pci_device_id intel_pch_thermal_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1), .driver_data = board_hsw, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2), diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 7737f14846f9..1e61c09153c9 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -3,6 +3,7 @@ * Author: Hanyi Wu * Sascha Hauer * Dawei Chien + * Louis Yu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -111,9 +112,10 @@ /* * Layout of the fuses providing the calibration data - * These macros could be used for both MT8173 and MT2701. - * MT8173 has five sensors and need five VTS calibration data, - * and MT2701 has three sensors and need three VTS calibration data. + * These macros could be used for MT8173, MT2701, and MT2712. + * MT8173 has 5 sensors and needs 5 VTS calibration data. + * MT2701 has 3 sensors and needs 3 VTS calibration data. + * MT2712 has 4 sensors and needs 4 VTS calibration data. */ #define MT8173_CALIB_BUF0_VALID BIT(0) #define MT8173_CALIB_BUF1_ADC_GE(x) (((x) >> 22) & 0x3ff) @@ -124,6 +126,8 @@ #define MT8173_CALIB_BUF2_VTS_TSABB(x) (((x) >> 14) & 0x1ff) #define MT8173_CALIB_BUF0_DEGC_CALI(x) (((x) >> 1) & 0x3f) #define MT8173_CALIB_BUF0_O_SLOPE(x) (((x) >> 26) & 0x3f) +#define MT8173_CALIB_BUF0_O_SLOPE_SIGN(x) (((x) >> 7) & 0x1) +#define MT8173_CALIB_BUF1_ID(x) (((x) >> 9) & 0x1) /* MT2701 thermal sensors */ #define MT2701_TS1 0 @@ -136,11 +140,26 @@ /* The total number of temperature sensors in the MT2701 */ #define MT2701_NUM_SENSORS 3 -#define THERMAL_NAME "mtk-thermal" - /* The number of sensing points per bank */ #define MT2701_NUM_SENSORS_PER_ZONE 3 +/* MT2712 thermal sensors */ +#define MT2712_TS1 0 +#define MT2712_TS2 1 +#define MT2712_TS3 2 +#define MT2712_TS4 3 + +/* AUXADC channel 11 is used for the temperature sensors */ +#define MT2712_TEMP_AUXADC_CHANNEL 11 + +/* The total number of temperature sensors in the MT2712 */ +#define MT2712_NUM_SENSORS 4 + +/* The number of sensing points per bank */ +#define MT2712_NUM_SENSORS_PER_ZONE 4 + +#define THERMAL_NAME "mtk-thermal" + struct mtk_thermal; struct thermal_bank_cfg { @@ -215,6 +234,21 @@ static const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { static const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; +/* MT2712 thermal sensor data */ +static const int mt2712_bank_data[MT2712_NUM_SENSORS] = { + MT2712_TS1, MT2712_TS2, MT2712_TS3, MT2712_TS4 +}; + +static const int mt2712_msr[MT2712_NUM_SENSORS_PER_ZONE] = { + TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR3 +}; + +static const int mt2712_adcpnp[MT2712_NUM_SENSORS_PER_ZONE] = { + TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3 +}; + +static const int mt2712_mux_values[MT2712_NUM_SENSORS] = { 0, 1, 2, 3 }; + /** * The MT8173 thermal controller has four banks. Each bank can read up to * four temperature sensors simultaneously. The MT8173 has a total of 5 @@ -277,6 +311,31 @@ static const struct mtk_thermal_data mt2701_thermal_data = { .sensor_mux_values = mt2701_mux_values, }; +/** + * The MT2712 thermal controller has one bank, which can read up to + * four temperature sensors simultaneously. The MT2712 has a total of 4 + * temperature sensors. + * + * The thermal core only gets the maximum temperature of this one bank, + * so the bank concept wouldn't be necessary here. However, the SVS (Smart + * Voltage Scaling) unit makes its decisions based on the same bank + * data. + */ +static const struct mtk_thermal_data mt2712_thermal_data = { + .auxadc_channel = MT2712_TEMP_AUXADC_CHANNEL, + .num_banks = 1, + .num_sensors = MT2712_NUM_SENSORS, + .bank_data = { + { + .num_sensors = 4, + .sensors = mt2712_bank_data, + }, + }, + .msr = mt2712_msr, + .adcpnp = mt2712_adcpnp, + .sensor_mux_values = mt2712_mux_values, +}; + /** * raw_to_mcelsius - convert a raw ADC value to mcelsius * @mt: The thermal controller @@ -552,7 +611,11 @@ static int mtk_thermal_get_calibration_data(struct device *dev, mt->vts[MT8173_TS4] = MT8173_CALIB_BUF2_VTS_TS4(buf[2]); mt->vts[MT8173_TSABB] = MT8173_CALIB_BUF2_VTS_TSABB(buf[2]); mt->degc_cali = MT8173_CALIB_BUF0_DEGC_CALI(buf[0]); - mt->o_slope = MT8173_CALIB_BUF0_O_SLOPE(buf[0]); + if (MT8173_CALIB_BUF1_ID(buf[1]) & + MT8173_CALIB_BUF0_O_SLOPE_SIGN(buf[0])) + mt->o_slope = -MT8173_CALIB_BUF0_O_SLOPE(buf[0]); + else + mt->o_slope = MT8173_CALIB_BUF0_O_SLOPE(buf[0]); } else { dev_info(dev, "Device not calibrated, using default calibration values\n"); } @@ -571,6 +634,10 @@ static const struct of_device_id mtk_thermal_of_match[] = { { .compatible = "mediatek,mt2701-thermal", .data = (void *)&mt2701_thermal_data, + }, + { + .compatible = "mediatek,mt2712-thermal", + .data = (void *)&mt2712_thermal_data, }, { }, }; @@ -645,16 +712,16 @@ static int mtk_thermal_probe(struct platform_device *pdev) return -EINVAL; } + ret = device_reset(&pdev->dev); + if (ret) + return ret; + ret = clk_prepare_enable(mt->clk_auxadc); if (ret) { dev_err(&pdev->dev, "Can't enable auxadc clk: %d\n", ret); return ret; } - ret = device_reset(&pdev->dev); - if (ret) - goto err_disable_clk_auxadc; - ret = clk_prepare_enable(mt->clk_peri_therm); if (ret) { dev_err(&pdev->dev, "Can't enable peri clk: %d\n", ret); @@ -705,6 +772,7 @@ static struct platform_driver mtk_thermal_driver = { module_platform_driver(mtk_thermal_driver); +MODULE_AUTHOR("Louis Yu "); MODULE_AUTHOR("Dawei Chien "); MODULE_AUTHOR("Sascha Hauer "); MODULE_AUTHOR("Hanyi Wu "); diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 4362a69ac88d..c866cc165960 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -188,7 +188,7 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) tmu_write(data, TMR_DISABLE, &data->regs->tmr); } -static struct thermal_zone_of_device_ops tmu_tz_ops = { +static const struct thermal_zone_of_device_ops tmu_tz_ops = { .get_temp = tmu_get_temp, }; diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 37fcefd06d9f..203aca44a2bb 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -225,7 +225,7 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high) return 0; } -static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { +static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { .get_temp = rcar_gen3_thermal_get_temp, .set_trips = rcar_gen3_thermal_set_trips, }; diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 4c7796512453..206035139110 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -320,6 +320,44 @@ static const struct tsadc_table rk3288_code_table[] = { {0, 125000}, }; +static const struct tsadc_table rk3328_code_table[] = { + {0, -40000}, + {296, -40000}, + {304, -35000}, + {313, -30000}, + {331, -20000}, + {340, -15000}, + {349, -10000}, + {359, -5000}, + {368, 0}, + {378, 5000}, + {388, 10000}, + {398, 15000}, + {408, 20000}, + {418, 25000}, + {429, 30000}, + {440, 35000}, + {451, 40000}, + {462, 45000}, + {473, 50000}, + {485, 55000}, + {496, 60000}, + {508, 65000}, + {521, 70000}, + {533, 75000}, + {546, 80000}, + {559, 85000}, + {572, 90000}, + {586, 95000}, + {600, 100000}, + {614, 105000}, + {629, 110000}, + {644, 115000}, + {659, 120000}, + {675, 125000}, + {TSADCV2_DATA_MASK, 125000}, +}; + static const struct tsadc_table rk3368_code_table[] = { {0, -40000}, {106, -40000}, @@ -790,6 +828,29 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { }, }; +static const struct rockchip_tsadc_chip rk3328_tsadc_data = { + .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ + .chn_num = 1, /* one channels for tsadc */ + + .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */ + .tshut_temp = 95000, + + .initialize = rk_tsadcv2_initialize, + .irq_ack = rk_tsadcv3_irq_ack, + .control = rk_tsadcv3_control, + .get_temp = rk_tsadcv2_get_temp, + .set_alarm_temp = rk_tsadcv2_alarm_temp, + .set_tshut_temp = rk_tsadcv2_tshut_temp, + .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = rk3328_code_table, + .length = ARRAY_SIZE(rk3328_code_table), + .data_mask = TSADCV2_DATA_MASK, + .mode = ADC_INCREMENT, + }, +}; + static const struct rockchip_tsadc_chip rk3366_tsadc_data = { .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ @@ -874,6 +935,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .compatible = "rockchip,rk3288-tsadc", .data = (void *)&rk3288_tsadc_data, }, + { + .compatible = "rockchip,rk3328-tsadc", + .data = (void *)&rk3328_tsadc_data, + }, { .compatible = "rockchip,rk3366-tsadc", .data = (void *)&rk3366_tsadc_data, diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 7b8ef09d2b3c..ed805c7c5ace 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1286,7 +1286,7 @@ static int exynos_map_dt_data(struct platform_device *pdev) return 0; } -static struct thermal_zone_of_device_ops exynos_sensor_ops = { +static const struct thermal_zone_of_device_ops exynos_sensor_ops = { .get_temp = exynos_get_temp, .set_emul_temp = exynos_tmu_set_emulation, }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5a51c740e372..2b1b0ba393a4 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -390,7 +390,7 @@ static void handle_critical_trips(struct thermal_zone_device *tz, if (trip_type == THERMAL_TRIP_CRITICAL) { dev_emerg(&tz->device, - "critical temperature reached(%d C),shutting down\n", + "critical temperature reached (%d C), shutting down\n", tz->temperature / 1000); mutex_lock(&poweroff_lock); if (!power_off_triggered) { @@ -836,11 +836,7 @@ static void thermal_release(struct device *dev) if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); - kfree(tz->device.groups); + thermal_zone_destroy_device_groups(tz); kfree(tz); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { @@ -1213,10 +1209,8 @@ thermal_zone_device_register(const char *type, int trips, int mask, ida_init(&tz->ida); mutex_init(&tz->lock); result = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL); - if (result < 0) { - kfree(tz); - return ERR_PTR(result); - } + if (result < 0) + goto free_tz; tz->id = result; strlcpy(tz->type, type, sizeof(tz->type)); @@ -1232,18 +1226,15 @@ thermal_zone_device_register(const char *type, int trips, int mask, /* Add nodes that are always present via .groups */ result = thermal_zone_create_device_groups(tz, mask); if (result) - goto unregister; + goto remove_id; /* A new thermal zone needs to be updated anyway. */ atomic_set(&tz->need_update, 1); dev_set_name(&tz->device, "thermal_zone%d", tz->id); result = device_register(&tz->device); - if (result) { - ida_simple_remove(&thermal_tz_ida, tz->id); - kfree(tz); - return ERR_PTR(result); - } + if (result) + goto remove_device_groups; for (count = 0; count < trips; count++) { if (tz->ops->get_trip_type(tz, count, &trip_type)) @@ -1297,6 +1288,14 @@ thermal_zone_device_register(const char *type, int trips, int mask, ida_simple_remove(&thermal_tz_ida, tz->id); device_unregister(&tz->device); return ERR_PTR(result); + +remove_device_groups: + thermal_zone_destroy_device_groups(tz); +remove_id: + ida_simple_remove(&thermal_tz_ida, tz->id); +free_tz: + kfree(tz); + return ERR_PTR(result); } EXPORT_SYMBOL_GPL(thermal_zone_device_register); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 2412b3759e16..27e3b1df7360 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -71,6 +71,7 @@ int thermal_build_list_of_policies(char *buf); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *, int); +void thermal_zone_destroy_device_groups(struct thermal_zone_device *); void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *); /* used only at binding time */ ssize_t diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index a694de907a26..fb80c96d8f73 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -605,6 +605,24 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) return 0; } +/** + * destroy_trip_attrs() - destroy attributes for trip points + * @tz: the thermal zone device + * + * helper function to free resources allocated by create_trip_attrs() + */ +static void destroy_trip_attrs(struct thermal_zone_device *tz) +{ + if (!tz) + return; + + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + if (tz->ops->get_trip_hyst) + kfree(tz->trip_hyst_attrs); + kfree(tz->trips_attribute_group.attrs); +} + int thermal_zone_create_device_groups(struct thermal_zone_device *tz, int mask) { @@ -637,6 +655,17 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *tz, return 0; } +void thermal_zone_destroy_device_groups(struct thermal_zone_device *tz) +{ + if (!tz) + return; + + if (tz->trips) + destroy_trip_attrs(tz); + + kfree(tz->device.groups); +} + /* sys I/F for cooling device */ static ssize_t thermal_cooling_device_type_show(struct device *dev, diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c new file mode 100644 index 000000000000..95704732f760 --- /dev/null +++ b/drivers/thermal/uniphier_thermal.c @@ -0,0 +1,384 @@ +/** + * uniphier_thermal.c - Socionext UniPhier thermal driver + * + * Copyright 2014 Panasonic Corporation + * Copyright 2016-2017 Socionext Inc. + * All rights reserved. + * + * Author: + * Kunihiko Hayashi + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thermal_core.h" + +/* + * block registers + * addresses are the offset from .block_base + */ +#define PVTCTLEN 0x0000 +#define PVTCTLEN_EN BIT(0) + +#define PVTCTLMODE 0x0004 +#define PVTCTLMODE_MASK 0xf +#define PVTCTLMODE_TEMPMON 0x5 + +#define EMONREPEAT 0x0040 +#define EMONREPEAT_ENDLESS BIT(24) +#define EMONREPEAT_PERIOD GENMASK(3, 0) +#define EMONREPEAT_PERIOD_1000000 0x9 + +/* + * common registers + * addresses are the offset from .map_base + */ +#define PVTCTLSEL 0x0900 +#define PVTCTLSEL_MASK GENMASK(2, 0) +#define PVTCTLSEL_MONITOR 0 + +#define SETALERT0 0x0910 +#define SETALERT1 0x0914 +#define SETALERT2 0x0918 +#define SETALERT_TEMP_OVF (GENMASK(7, 0) << 16) +#define SETALERT_TEMP_OVF_VALUE(val) (((val) & GENMASK(7, 0)) << 16) +#define SETALERT_EN BIT(0) + +#define PMALERTINTCTL 0x0920 +#define PMALERTINTCTL_CLR(ch) BIT(4 * (ch) + 2) +#define PMALERTINTCTL_SET(ch) BIT(4 * (ch) + 1) +#define PMALERTINTCTL_EN(ch) BIT(4 * (ch) + 0) +#define PMALERTINTCTL_MASK (GENMASK(10, 8) | GENMASK(6, 4) | \ + GENMASK(2, 0)) + +#define TMOD 0x0928 +#define TMOD_WIDTH 9 + +#define TMODCOEF 0x0e5c + +#define TMODSETUP0_EN BIT(30) +#define TMODSETUP0_VAL(val) (((val) & GENMASK(13, 0)) << 16) +#define TMODSETUP1_EN BIT(15) +#define TMODSETUP1_VAL(val) ((val) & GENMASK(14, 0)) + +/* SoC critical temperature */ +#define CRITICAL_TEMP_LIMIT (120 * 1000) + +/* Max # of alert channels */ +#define ALERT_CH_NUM 3 + +/* SoC specific thermal sensor data */ +struct uniphier_tm_soc_data { + u32 map_base; + u32 block_base; + u32 tmod_setup_addr; +}; + +struct uniphier_tm_dev { + struct regmap *regmap; + struct device *dev; + bool alert_en[ALERT_CH_NUM]; + struct thermal_zone_device *tz_dev; + const struct uniphier_tm_soc_data *data; +}; + +static int uniphier_tm_initialize_sensor(struct uniphier_tm_dev *tdev) +{ + struct regmap *map = tdev->regmap; + u32 val; + u32 tmod_calib[2]; + int ret; + + /* stop PVT */ + regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, + PVTCTLEN_EN, 0); + + /* + * Since SoC has a calibrated value that was set in advance, + * TMODCOEF shows non-zero and PVT refers the value internally. + * + * If TMODCOEF shows zero, the boards don't have the calibrated + * value, and the driver has to set default value from DT. + */ + ret = regmap_read(map, tdev->data->map_base + TMODCOEF, &val); + if (ret) + return ret; + if (!val) { + /* look for the default values in DT */ + ret = of_property_read_u32_array(tdev->dev->of_node, + "socionext,tmod-calibration", + tmod_calib, + ARRAY_SIZE(tmod_calib)); + if (ret) + return ret; + + regmap_write(map, tdev->data->tmod_setup_addr, + TMODSETUP0_EN | TMODSETUP0_VAL(tmod_calib[0]) | + TMODSETUP1_EN | TMODSETUP1_VAL(tmod_calib[1])); + } + + /* select temperature mode */ + regmap_write_bits(map, tdev->data->block_base + PVTCTLMODE, + PVTCTLMODE_MASK, PVTCTLMODE_TEMPMON); + + /* set monitoring period */ + regmap_write_bits(map, tdev->data->block_base + EMONREPEAT, + EMONREPEAT_ENDLESS | EMONREPEAT_PERIOD, + EMONREPEAT_ENDLESS | EMONREPEAT_PERIOD_1000000); + + /* set monitor mode */ + regmap_write_bits(map, tdev->data->map_base + PVTCTLSEL, + PVTCTLSEL_MASK, PVTCTLSEL_MONITOR); + + return 0; +} + +static void uniphier_tm_set_alert(struct uniphier_tm_dev *tdev, u32 ch, + u32 temp) +{ + struct regmap *map = tdev->regmap; + + /* set alert temperature */ + regmap_write_bits(map, tdev->data->map_base + SETALERT0 + (ch << 2), + SETALERT_EN | SETALERT_TEMP_OVF, + SETALERT_EN | + SETALERT_TEMP_OVF_VALUE(temp / 1000)); +} + +static void uniphier_tm_enable_sensor(struct uniphier_tm_dev *tdev) +{ + struct regmap *map = tdev->regmap; + int i; + u32 bits = 0; + + for (i = 0; i < ALERT_CH_NUM; i++) + if (tdev->alert_en[i]) + bits |= PMALERTINTCTL_EN(i); + + /* enable alert interrupt */ + regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, + PMALERTINTCTL_MASK, bits); + + /* start PVT */ + regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, + PVTCTLEN_EN, PVTCTLEN_EN); + + usleep_range(700, 1500); /* The spec note says at least 700us */ +} + +static void uniphier_tm_disable_sensor(struct uniphier_tm_dev *tdev) +{ + struct regmap *map = tdev->regmap; + + /* disable alert interrupt */ + regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, + PMALERTINTCTL_MASK, 0); + + /* stop PVT */ + regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, + PVTCTLEN_EN, 0); + + usleep_range(1000, 2000); /* The spec note says at least 1ms */ +} + +static int uniphier_tm_get_temp(void *data, int *out_temp) +{ + struct uniphier_tm_dev *tdev = data; + struct regmap *map = tdev->regmap; + int ret; + u32 temp; + + ret = regmap_read(map, tdev->data->map_base + TMOD, &temp); + if (ret) + return ret; + + /* MSB of the TMOD field is a sign bit */ + *out_temp = sign_extend32(temp, TMOD_WIDTH - 1) * 1000; + + return 0; +} + +static const struct thermal_zone_of_device_ops uniphier_of_thermal_ops = { + .get_temp = uniphier_tm_get_temp, +}; + +static void uniphier_tm_irq_clear(struct uniphier_tm_dev *tdev) +{ + u32 mask = 0, bits = 0; + int i; + + for (i = 0; i < ALERT_CH_NUM; i++) { + mask |= (PMALERTINTCTL_CLR(i) | PMALERTINTCTL_SET(i)); + bits |= PMALERTINTCTL_CLR(i); + } + + /* clear alert interrupt */ + regmap_write_bits(tdev->regmap, + tdev->data->map_base + PMALERTINTCTL, mask, bits); +} + +static irqreturn_t uniphier_tm_alarm_irq(int irq, void *_tdev) +{ + struct uniphier_tm_dev *tdev = _tdev; + + disable_irq_nosync(irq); + uniphier_tm_irq_clear(tdev); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t uniphier_tm_alarm_irq_thread(int irq, void *_tdev) +{ + struct uniphier_tm_dev *tdev = _tdev; + + thermal_zone_device_update(tdev->tz_dev, THERMAL_EVENT_UNSPECIFIED); + + return IRQ_HANDLED; +} + +static int uniphier_tm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *regmap; + struct device_node *parent; + struct uniphier_tm_dev *tdev; + const struct thermal_trip *trips; + int i, ret, irq, ntrips, crit_temp = INT_MAX; + + tdev = devm_kzalloc(dev, sizeof(*tdev), GFP_KERNEL); + if (!tdev) + return -ENOMEM; + tdev->dev = dev; + + tdev->data = of_device_get_match_data(dev); + if (WARN_ON(!tdev->data)) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* get regmap from syscon node */ + parent = of_get_parent(dev->of_node); /* parent should be syscon node */ + regmap = syscon_node_to_regmap(parent); + of_node_put(parent); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to get regmap (error %ld)\n", + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + tdev->regmap = regmap; + + ret = uniphier_tm_initialize_sensor(tdev); + if (ret) { + dev_err(dev, "failed to initialize sensor\n"); + return ret; + } + + ret = devm_request_threaded_irq(dev, irq, uniphier_tm_alarm_irq, + uniphier_tm_alarm_irq_thread, + 0, "thermal", tdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, tdev); + + tdev->tz_dev = devm_thermal_zone_of_sensor_register(dev, 0, tdev, + &uniphier_of_thermal_ops); + if (IS_ERR(tdev->tz_dev)) { + dev_err(dev, "failed to register sensor device\n"); + return PTR_ERR(tdev->tz_dev); + } + + /* get trip points */ + trips = of_thermal_get_trip_points(tdev->tz_dev); + ntrips = of_thermal_get_ntrips(tdev->tz_dev); + if (ntrips > ALERT_CH_NUM) { + dev_err(dev, "thermal zone has too many trips\n"); + return -E2BIG; + } + + /* set alert temperatures */ + for (i = 0; i < ntrips; i++) { + if (trips[i].type == THERMAL_TRIP_CRITICAL && + trips[i].temperature < crit_temp) + crit_temp = trips[i].temperature; + uniphier_tm_set_alert(tdev, i, trips[i].temperature); + tdev->alert_en[i] = true; + } + if (crit_temp > CRITICAL_TEMP_LIMIT) { + dev_err(dev, "critical trip is over limit(>%d), or not set\n", + CRITICAL_TEMP_LIMIT); + return -EINVAL; + } + + uniphier_tm_enable_sensor(tdev); + + return 0; +} + +static int uniphier_tm_remove(struct platform_device *pdev) +{ + struct uniphier_tm_dev *tdev = platform_get_drvdata(pdev); + + /* disable sensor */ + uniphier_tm_disable_sensor(tdev); + + return 0; +} + +static const struct uniphier_tm_soc_data uniphier_pxs2_tm_data = { + .map_base = 0xe000, + .block_base = 0xe000, + .tmod_setup_addr = 0xe904, +}; + +static const struct uniphier_tm_soc_data uniphier_ld20_tm_data = { + .map_base = 0xe000, + .block_base = 0xe800, + .tmod_setup_addr = 0xe938, +}; + +static const struct of_device_id uniphier_tm_dt_ids[] = { + { + .compatible = "socionext,uniphier-pxs2-thermal", + .data = &uniphier_pxs2_tm_data, + }, + { + .compatible = "socionext,uniphier-ld20-thermal", + .data = &uniphier_ld20_tm_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_tm_dt_ids); + +static struct platform_driver uniphier_tm_driver = { + .probe = uniphier_tm_probe, + .remove = uniphier_tm_remove, + .driver = { + .name = "uniphier-thermal", + .of_match_table = uniphier_tm_dt_ids, + }, +}; +module_platform_driver(uniphier_tm_driver); + +MODULE_AUTHOR("Kunihiko Hayashi "); +MODULE_DESCRIPTION("UniPhier thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c index a5670ad2cfc8..6acce0bce7c0 100644 --- a/drivers/thermal/zx2967_thermal.c +++ b/drivers/thermal/zx2967_thermal.c @@ -111,7 +111,7 @@ static int zx2967_thermal_get_temp(void *data, int *temp) return ret; } -static struct thermal_zone_of_device_ops zx2967_of_thermal_ops = { +static const struct thermal_zone_of_device_ops zx2967_of_thermal_ops = { .get_temp = zx2967_thermal_get_temp, }; diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index bdaac1ff00a5..53250fc057e1 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -13,9 +13,9 @@ */ #include -#include #include #include +#include #include #include #include @@ -102,11 +102,6 @@ static inline u64 get_route(u32 route_hi, u32 route_lo) return (u64)route_hi << 32 | route_lo; } -static inline bool is_apple(void) -{ - return dmi_match(DMI_BOARD_VENDOR, "Apple Inc."); -} - static bool icm_match(const struct tb_cfg_request *req, const struct ctl_pkg *pkg) { @@ -176,7 +171,7 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size, static bool icm_fr_is_supported(struct tb *tb) { - return !is_apple(); + return !x86_apple_machine; } static inline int icm_fr_get_switch_index(u32 port) @@ -517,7 +512,7 @@ static bool icm_ar_is_supported(struct tb *tb) * Starting from Alpine Ridge we can use ICM on Apple machines * as well. We just need to reset and re-enable it first. */ - if (!is_apple()) + if (!x86_apple_machine) return true; /* @@ -1011,7 +1006,7 @@ static int icm_start(struct tb *tb) * don't provide images publicly either. To be on the safe side * prevent root switch NVM upgrade on Macs for now. */ - tb->root_switch->no_nvm_upgrade = is_apple(); + tb->root_switch->no_nvm_upgrade = x86_apple_machine; ret = tb_switch_add(tb->root_switch); if (ret) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1b02ca0b6129..0b22ad9d68b4 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include "tb.h" #include "tb_regs.h" @@ -453,7 +453,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) struct tb_cm *tcm; struct tb *tb; - if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) + if (!x86_apple_machine) return NULL; tb = tb_domain_alloc(nhi, sizeof(*tcm)); diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 873e0ba89737..cc2b4d9433ed 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -458,4 +458,9 @@ config MIPS_EJTAG_FDC_KGDB_CHAN help FDC channel number to use for KGDB. +config VCC + tristate "Sun Virtual Console Concentrator" + depends on SUN_LDOMS + help + Support for Sun logical domain consoles. endif # TTY diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index 8689279afdf1..16330a819685 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -33,5 +33,6 @@ obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o obj-$(CONFIG_DA_TTY) += metag_da.o obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o +obj-$(CONFIG_VCC) += vcc.o obj-y += ipwireless/ diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c index 653f99271865..a1d272ac82bb 100644 --- a/drivers/tty/hvc/hvc_vio.c +++ b/drivers/tty/hvc/hvc_vio.c @@ -442,6 +442,14 @@ void __init hvc_vio_init_early(void) #ifdef CONFIG_PPC_EARLY_DEBUG_LPAR void __init udbg_init_debug_lpar(void) { + /* + * If we're running as a hypervisor then we definitely can't call the + * hypervisor to print debug output (we *are* the hypervisor), so don't + * register if we detect that MSR_HV=1. + */ + if (mfmsr() & MSR_HV) + return; + hvterm_privs[0] = &hvterm_priv0; hvterm_priv0.termno = 0; hvterm_priv0.proto = HV_PROTOCOL_RAW; @@ -455,6 +463,10 @@ void __init udbg_init_debug_lpar(void) #ifdef CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI void __init udbg_init_debug_lpar_hvsi(void) { + /* See comment above in udbg_init_debug_lpar() */ + if (mfmsr() & MSR_HV) + return; + hvterm_privs[0] = &hvterm_priv0; hvterm_priv0.termno = CONFIG_PPC_EARLY_DEBUG_HVSI_VTERMNO; hvterm_priv0.proto = HV_PROTOCOL_HVSI; diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 1c0c9553bc05..7dd38047ba23 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -246,11 +246,11 @@ struct mxser_port { unsigned char err_shadow; struct async_icount icount; /* kernel counters for 4 input interrupts */ - int timeout; + unsigned int timeout; int read_status_mask; int ignore_status_mask; - int xmit_fifo_size; + unsigned int xmit_fifo_size; int xmit_head; int xmit_tail; int xmit_cnt; @@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on) static int mxser_set_baud(struct tty_struct *tty, long newspd) { struct mxser_port *info = tty->driver_data; - int quot = 0, baud; + unsigned int quot = 0, baud; unsigned char cval; + u64 timeout; if (!info->ioaddr) return -1; @@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd) quot = 0; } - info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base); - info->timeout += HZ / 50; /* Add .02 seconds of slop */ + /* + * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the + * u64 domain + */ + timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot; + do_div(timeout, info->baud_base); + info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */ if (quot) { info->MCR |= UART_MCR_DTR; diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 583c9a0c7ecc..8c48c3784831 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port, { unsigned int ctl, baud, quot, ier; unsigned long flags; + int tries; spin_lock_irqsave(&port->lock, flags); + /* Drain the hot tub fully before we power it off for the winter. */ + for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--) + mdelay(10); + /* disable uart while changing speed */ bcm_uart_disable(port); bcm_uart_flush(port); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 849c1f9991ce..f0252184291e 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport) static int lpuart_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - int ret; unsigned long flags; unsigned char temp; @@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port) sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK) + 1); - ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0, - DRIVER_NAME, sport); - if (ret) - return ret; - spin_lock_irqsave(&sport->port.lock, flags); lpuart_setup_watermark(sport); @@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port) static int lpuart32_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - int ret; unsigned long flags; unsigned long temp; @@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port) sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK) - 1); - ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0, - DRIVER_NAME, sport); - if (ret) - return ret; - spin_lock_irqsave(&sport->port.lock, flags); lpuart32_setup_watermark(sport); @@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port) spin_unlock_irqrestore(&port->lock, flags); - devm_free_irq(port->dev, port->irq, sport); - if (sport->lpuart_dma_rx_use) { del_timer_sync(&sport->lpuart_timer); lpuart_dma_rx_free(&sport->port); @@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port) static void lpuart32_shutdown(struct uart_port *port) { - struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long temp; unsigned long flags; @@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port) lpuart32_write(port, temp, UARTCTRL); spin_unlock_irqrestore(&port->lock, flags); - - devm_free_irq(port->dev, port->irq, sport); } static void @@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev) platform_set_drvdata(pdev, &sport->port); - if (lpuart_is_32(sport)) + if (lpuart_is_32(sport)) { lpuart_reg.cons = LPUART32_CONSOLE; - else + ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0, + DRIVER_NAME, sport); + } else { lpuart_reg.cons = LPUART_CONSOLE; + ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0, + DRIVER_NAME, sport); + } + + if (ret) + goto failed_irq_request; ret = uart_add_one_port(&lpuart_reg, &sport->port); - if (ret) { - clk_disable_unprepare(sport->clk); - return ret; - } + if (ret) + goto failed_attach_port; sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); if (!sport->dma_tx_chan) @@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev) } return 0; + +failed_attach_port: +failed_irq_request: + clk_disable_unprepare(sport->clk); + return ret; } static int lpuart_remove(struct platform_device *pdev) diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index ae8cfc81ffc5..d9123f995705 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -371,7 +371,7 @@ static const struct file_operations port_regs_ops = { }; #endif /* CONFIG_DEBUG_FS */ -static struct dmi_system_id pch_uart_dmi_table[] = { +static const struct dmi_system_id pch_uart_dmi_table[] = { { .ident = "CM-iTC", { diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index cdd2f942317c..b9c7a904c1ea 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev) goto err_out; uartclk = 0; } else { - clk_prepare_enable(clk); + ret = clk_prepare_enable(clk); + if (ret) + goto err_out; + + ret = devm_add_action_or_reset(&pdev->dev, + (void(*)(void *))clk_disable_unprepare, + clk); + if (ret) + goto err_out; + uartclk = clk_get_rate(clk); } @@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev) uart_unregister_driver(&s->uart); err_out: if (!IS_ERR(s->regulator)) - return regulator_disable(s->regulator); + regulator_disable(s->regulator); return ret; } diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c new file mode 100644 index 000000000000..ef01d24858cd --- /dev/null +++ b/drivers/tty/vcc.c @@ -0,0 +1,1155 @@ +/* vcc.c: sun4v virtual channel concentrator + * + * Copyright (C) 2017 Oracle. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_MODULE_NAME "vcc" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "July 1, 2017" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + +MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +struct vcc_port { + struct vio_driver_state vio; + + spinlock_t lock; + char *domain; + struct tty_struct *tty; /* only populated while dev is open */ + unsigned long index; /* index into the vcc_table */ + + u64 refcnt; + bool excl_locked; + + bool removed; + + /* This buffer is required to support the tty write_room interface + * and guarantee that any characters that the driver accepts will + * be eventually sent, either immediately or later. + */ + int chars_in_buffer; + struct vio_vcc buffer; + + struct timer_list rx_timer; + struct timer_list tx_timer; +}; + +/* Microseconds that thread will delay waiting for a vcc port ref */ +#define VCC_REF_DELAY 100 + +#define VCC_MAX_PORTS 1024 +#define VCC_MINOR_START 0 /* must be zero */ +#define VCC_BUFF_LEN VIO_VCC_MTU_SIZE + +#define VCC_CTL_BREAK -1 +#define VCC_CTL_HUP -2 + +static const char vcc_driver_name[] = "vcc"; +static const char vcc_device_node[] = "vcc"; +static struct tty_driver *vcc_tty_driver; + +static struct vcc_port *vcc_table[VCC_MAX_PORTS]; +static DEFINE_SPINLOCK(vcc_table_lock); + +int vcc_dbg; +int vcc_dbg_ldc; +int vcc_dbg_vio; + +module_param(vcc_dbg, uint, 0664); +module_param(vcc_dbg_ldc, uint, 0664); +module_param(vcc_dbg_vio, uint, 0664); + +#define VCC_DBG_DRV 0x1 +#define VCC_DBG_LDC 0x2 +#define VCC_DBG_PKT 0x4 + +#define vccdbg(f, a...) \ + do { \ + if (vcc_dbg & VCC_DBG_DRV) \ + pr_info(f, ## a); \ + } while (0) \ + +#define vccdbgl(l) \ + do { \ + if (vcc_dbg & VCC_DBG_LDC) \ + ldc_print(l); \ + } while (0) \ + +#define vccdbgp(pkt) \ + do { \ + if (vcc_dbg & VCC_DBG_PKT) { \ + int i; \ + for (i = 0; i < pkt.tag.stype; i++) \ + pr_info("[%c]", pkt.data[i]); \ + } \ + } while (0) \ + +/* Note: Be careful when adding flags to this line discipline. Don't + * add anything that will cause echoing or we'll go into recursive + * loop echoing chars back and forth with the console drivers. + */ +static const struct ktermios vcc_tty_termios = { + .c_iflag = IGNBRK | IGNPAR, + .c_oflag = OPOST, + .c_cflag = B38400 | CS8 | CREAD | HUPCL, + .c_cc = INIT_C_CC, + .c_ispeed = 38400, + .c_ospeed = 38400 +}; + +/** + * vcc_table_add() - Add VCC port to the VCC table + * @port: pointer to the VCC port + * + * Return: index of the port in the VCC table on success, + * -1 on failure + */ +static int vcc_table_add(struct vcc_port *port) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&vcc_table_lock, flags); + for (i = VCC_MINOR_START; i < VCC_MAX_PORTS; i++) { + if (!vcc_table[i]) { + vcc_table[i] = port; + break; + } + } + spin_unlock_irqrestore(&vcc_table_lock, flags); + + if (i < VCC_MAX_PORTS) + return i; + else + return -1; +} + +/** + * vcc_table_remove() - Removes a VCC port from the VCC table + * @index: Index into the VCC table + */ +static void vcc_table_remove(unsigned long index) +{ + unsigned long flags; + + if (WARN_ON(index >= VCC_MAX_PORTS)) + return; + + spin_lock_irqsave(&vcc_table_lock, flags); + vcc_table[index] = NULL; + spin_unlock_irqrestore(&vcc_table_lock, flags); +} + +/** + * vcc_get() - Gets a reference to VCC port + * @index: Index into the VCC table + * @excl: Indicates if an exclusive access is requested + * + * Return: reference to the VCC port, if found + * NULL, if port not found + */ +static struct vcc_port *vcc_get(unsigned long index, bool excl) +{ + struct vcc_port *port; + unsigned long flags; + +try_again: + spin_lock_irqsave(&vcc_table_lock, flags); + + port = vcc_table[index]; + if (!port) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + return NULL; + } + + if (!excl) { + if (port->excl_locked) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + udelay(VCC_REF_DELAY); + goto try_again; + } + port->refcnt++; + spin_unlock_irqrestore(&vcc_table_lock, flags); + return port; + } + + if (port->refcnt) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + /* Threads wanting exclusive access will wait half the time, + * probably giving them higher priority in the case of + * multiple waiters. + */ + udelay(VCC_REF_DELAY/2); + goto try_again; + } + + port->refcnt++; + port->excl_locked = true; + spin_unlock_irqrestore(&vcc_table_lock, flags); + + return port; +} + +/** + * vcc_put() - Returns a reference to VCC port + * @port: pointer to VCC port + * @excl: Indicates if the returned reference is an exclusive reference + * + * Note: It's the caller's responsibility to ensure the correct value + * for the excl flag + */ +static void vcc_put(struct vcc_port *port, bool excl) +{ + unsigned long flags; + + if (!port) + return; + + spin_lock_irqsave(&vcc_table_lock, flags); + + /* check if caller attempted to put with the wrong flags */ + if (WARN_ON((excl && !port->excl_locked) || + (!excl && port->excl_locked))) + goto done; + + port->refcnt--; + + if (excl) + port->excl_locked = false; + +done: + spin_unlock_irqrestore(&vcc_table_lock, flags); +} + +/** + * vcc_get_ne() - Get a non-exclusive reference to VCC port + * @index: Index into the VCC table + * + * Gets a non-exclusive reference to VCC port, if it's not removed + * + * Return: pointer to the VCC port, if found + * NULL, if port not found + */ +static struct vcc_port *vcc_get_ne(unsigned long index) +{ + struct vcc_port *port; + + port = vcc_get(index, false); + + if (port && port->removed) { + vcc_put(port, false); + return NULL; + } + + return port; +} + +static void vcc_kick_rx(struct vcc_port *port) +{ + struct vio_driver_state *vio = &port->vio; + + assert_spin_locked(&port->lock); + + if (!timer_pending(&port->rx_timer) && !port->removed) { + disable_irq_nosync(vio->vdev->rx_irq); + port->rx_timer.expires = (jiffies + 1); + add_timer(&port->rx_timer); + } +} + +static void vcc_kick_tx(struct vcc_port *port) +{ + assert_spin_locked(&port->lock); + + if (!timer_pending(&port->tx_timer) && !port->removed) { + port->tx_timer.expires = (jiffies + 1); + add_timer(&port->tx_timer); + } +} + +static int vcc_rx_check(struct tty_struct *tty, int size) +{ + if (WARN_ON(!tty || !tty->port)) + return 1; + + /* tty_buffer_request_room won't sleep because it uses + * GFP_ATOMIC flag to allocate buffer + */ + if (test_bit(TTY_THROTTLED, &tty->flags) || + (tty_buffer_request_room(tty->port, VCC_BUFF_LEN) < VCC_BUFF_LEN)) + return 0; + + return 1; +} + +static int vcc_rx(struct tty_struct *tty, char *buf, int size) +{ + int len = 0; + + if (WARN_ON(!tty || !tty->port)) + return len; + + len = tty_insert_flip_string(tty->port, buf, size); + if (len) + tty_flip_buffer_push(tty->port); + + return len; +} + +static int vcc_ldc_read(struct vcc_port *port) +{ + struct vio_driver_state *vio = &port->vio; + struct tty_struct *tty; + struct vio_vcc pkt; + int rv = 0; + + tty = port->tty; + if (!tty) { + rv = ldc_rx_reset(vio->lp); + vccdbg("VCC: reset rx q: rv=%d\n", rv); + goto done; + } + + /* Read as long as LDC has incoming data. */ + while (1) { + if (!vcc_rx_check(tty, VIO_VCC_MTU_SIZE)) { + vcc_kick_rx(port); + break; + } + + vccdbgl(vio->lp); + + rv = ldc_read(vio->lp, &pkt, sizeof(pkt)); + if (rv <= 0) + break; + + vccdbg("VCC: ldc_read()=%d\n", rv); + vccdbg("TAG [%02x:%02x:%04x:%08x]\n", + pkt.tag.type, pkt.tag.stype, + pkt.tag.stype_env, pkt.tag.sid); + + if (pkt.tag.type == VIO_TYPE_DATA) { + vccdbgp(pkt); + /* vcc_rx_check ensures memory availability */ + vcc_rx(tty, pkt.data, pkt.tag.stype); + } else { + pr_err("VCC: unknown msg [%02x:%02x:%04x:%08x]\n", + pkt.tag.type, pkt.tag.stype, + pkt.tag.stype_env, pkt.tag.sid); + rv = -ECONNRESET; + break; + } + + WARN_ON(rv != LDC_PACKET_SIZE); + } + +done: + return rv; +} + +static void vcc_rx_timer(unsigned long index) +{ + struct vio_driver_state *vio; + struct vcc_port *port; + unsigned long flags; + int rv; + + port = vcc_get_ne(index); + if (!port) + return; + + spin_lock_irqsave(&port->lock, flags); + port->rx_timer.expires = 0; + + vio = &port->vio; + + enable_irq(vio->vdev->rx_irq); + + if (!port->tty || port->removed) + goto done; + + rv = vcc_ldc_read(port); + if (rv == -ECONNRESET) + vio_conn_reset(vio); + +done: + spin_unlock_irqrestore(&port->lock, flags); + vcc_put(port, false); +} + +static void vcc_tx_timer(unsigned long index) +{ + struct vcc_port *port; + struct vio_vcc *pkt; + unsigned long flags; + int tosend = 0; + int rv; + + port = vcc_get_ne(index); + if (!port) + return; + + spin_lock_irqsave(&port->lock, flags); + port->tx_timer.expires = 0; + + if (!port->tty || port->removed) + goto done; + + tosend = min(VCC_BUFF_LEN, port->chars_in_buffer); + if (!tosend) + goto done; + + pkt = &port->buffer; + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = tosend; + vccdbgl(port->vio.lp); + + rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend)); + WARN_ON(!rv); + + if (rv < 0) { + vccdbg("VCC: ldc_write()=%d\n", rv); + vcc_kick_tx(port); + } else { + struct tty_struct *tty = port->tty; + + port->chars_in_buffer = 0; + if (tty) + tty_wakeup(tty); + } + +done: + spin_unlock_irqrestore(&port->lock, flags); + vcc_put(port, false); +} + +/** + * vcc_event() - LDC event processing engine + * @arg: VCC private data + * @event: LDC event + * + * Handles LDC events for VCC + */ +static void vcc_event(void *arg, int event) +{ + struct vio_driver_state *vio; + struct vcc_port *port; + unsigned long flags; + int rv; + + port = arg; + vio = &port->vio; + + spin_lock_irqsave(&port->lock, flags); + + switch (event) { + case LDC_EVENT_RESET: + case LDC_EVENT_UP: + vio_link_state_change(vio, event); + break; + + case LDC_EVENT_DATA_READY: + rv = vcc_ldc_read(port); + if (rv == -ECONNRESET) + vio_conn_reset(vio); + break; + + default: + pr_err("VCC: unexpected LDC event(%d)\n", event); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static struct ldc_channel_config vcc_ldc_cfg = { + .event = vcc_event, + .mtu = VIO_VCC_MTU_SIZE, + .mode = LDC_MODE_RAW, + .debug = 0, +}; + +/* Ordered from largest major to lowest */ +static struct vio_version vcc_versions[] = { + { .major = 1, .minor = 0 }, +}; + +static struct tty_port_operations vcc_port_ops = { 0 }; + +static ssize_t vcc_sysfs_domain_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct vcc_port *port; + int rv; + + port = dev_get_drvdata(dev); + if (!port) + return -ENODEV; + + rv = scnprintf(buf, PAGE_SIZE, "%s\n", port->domain); + + return rv; +} + +static int vcc_send_ctl(struct vcc_port *port, int ctl) +{ + struct vio_vcc pkt; + int rv; + + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.sid = ctl; + pkt.tag.stype = 0; + + rv = ldc_write(port->vio.lp, &pkt, sizeof(pkt.tag)); + WARN_ON(!rv); + vccdbg("VCC: ldc_write(%ld)=%d\n", sizeof(pkt.tag), rv); + + return rv; +} + +static ssize_t vcc_sysfs_break_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct vcc_port *port; + unsigned long flags; + int rv = count; + int brk; + + port = dev_get_drvdata(dev); + if (!port) + return -ENODEV; + + spin_lock_irqsave(&port->lock, flags); + + if (sscanf(buf, "%ud", &brk) != 1 || brk != 1) + rv = -EINVAL; + else if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0) + vcc_kick_tx(port); + + spin_unlock_irqrestore(&port->lock, flags); + + return rv; +} + +static DEVICE_ATTR(domain, 0400, vcc_sysfs_domain_show, NULL); +static DEVICE_ATTR(break, 0200, NULL, vcc_sysfs_break_store); + +static struct attribute *vcc_sysfs_entries[] = { + &dev_attr_domain.attr, + &dev_attr_break.attr, + NULL +}; + +static struct attribute_group vcc_attribute_group = { + .name = NULL, + .attrs = vcc_sysfs_entries, +}; + +/** + * vcc_probe() - Initialize VCC port + * @vdev: Pointer to VIO device of the new VCC port + * @id: VIO device ID + * + * Initializes a VCC port to receive serial console data from + * the guest domain. Sets up a TTY end point on the control + * domain. Sets up VIO/LDC link between the guest & control + * domain endpoints. + * + * Return: status of the probe + */ +static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vcc_port *port; + struct device *dev; + const char *domain; + char *name; + u64 node; + int rv; + + vccdbg("VCC: name=%s\n", dev_name(&vdev->dev)); + + if (!vcc_tty_driver) { + pr_err("VCC: TTY driver not registered\n"); + return -ENODEV; + } + + port = kzalloc(sizeof(struct vcc_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL); + + rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions, + ARRAY_SIZE(vcc_versions), NULL, name); + if (rv) + goto free_port; + + port->vio.debug = vcc_dbg_vio; + vcc_ldc_cfg.debug = vcc_dbg_ldc; + + rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port); + if (rv) + goto free_port; + + spin_lock_init(&port->lock); + + port->index = vcc_table_add(port); + if (port->index == -1) { + pr_err("VCC: no more TTY indices left for allocation\n"); + goto free_ldc; + } + + /* Register the device using VCC table index as TTY index */ + dev = tty_register_device(vcc_tty_driver, port->index, &vdev->dev); + if (IS_ERR(dev)) { + rv = PTR_ERR(dev); + goto free_table; + } + + hp = mdesc_grab(); + + node = vio_vdev_node(hp, vdev); + if (node == MDESC_NODE_NULL) { + rv = -ENXIO; + mdesc_release(hp); + goto unreg_tty; + } + + domain = mdesc_get_property(hp, node, "vcc-domain-name", NULL); + if (!domain) { + rv = -ENXIO; + mdesc_release(hp); + goto unreg_tty; + } + port->domain = kstrdup(domain, GFP_KERNEL); + + mdesc_release(hp); + + rv = sysfs_create_group(&vdev->dev.kobj, &vcc_attribute_group); + if (rv) + goto free_domain; + + init_timer(&port->rx_timer); + port->rx_timer.function = vcc_rx_timer; + port->rx_timer.data = port->index; + + init_timer(&port->tx_timer); + port->tx_timer.function = vcc_tx_timer; + port->tx_timer.data = port->index; + + dev_set_drvdata(&vdev->dev, port); + + /* It's possible to receive IRQs in the middle of vio_port_up. Disable + * IRQs until the port is up. + */ + disable_irq_nosync(vdev->rx_irq); + vio_port_up(&port->vio); + enable_irq(vdev->rx_irq); + + return 0; + +free_domain: + kfree(port->domain); +unreg_tty: + tty_unregister_device(vcc_tty_driver, port->index); +free_table: + vcc_table_remove(port->index); +free_ldc: + vio_ldc_free(&port->vio); +free_port: + kfree(name); + kfree(port); + + return rv; +} + +/** + * vcc_remove() - Terminate a VCC port + * @vdev: Pointer to VIO device of the VCC port + * + * Terminates a VCC port. Sets up the teardown of TTY and + * VIO/LDC link between guest and primary domains. + * + * Return: status of removal + */ +static int vcc_remove(struct vio_dev *vdev) +{ + struct vcc_port *port = dev_get_drvdata(&vdev->dev); + + if (!port) + return -ENODEV; + + del_timer_sync(&port->rx_timer); + del_timer_sync(&port->tx_timer); + + /* If there's a process with the device open, do a synchronous + * hangup of the TTY. This *may* cause the process to call close + * asynchronously, but it's not guaranteed. + */ + if (port->tty) + tty_vhangup(port->tty); + + /* Get exclusive reference to VCC, ensures that there are no other + * clients to this port + */ + port = vcc_get(port->index, true); + + if (WARN_ON(!port)) + return -ENODEV; + + tty_unregister_device(vcc_tty_driver, port->index); + + del_timer_sync(&port->vio.timer); + vio_ldc_free(&port->vio); + sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group); + dev_set_drvdata(&vdev->dev, NULL); + if (port->tty) { + port->removed = true; + vcc_put(port, true); + } else { + vcc_table_remove(port->index); + + kfree(port->vio.name); + kfree(port->domain); + kfree(port); + } + + return 0; +} + +static const struct vio_device_id vcc_match[] = { + { + .type = "vcc-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vcc_match); + +static struct vio_driver vcc_driver = { + .id_table = vcc_match, + .probe = vcc_probe, + .remove = vcc_remove, + .name = "vcc", +}; + +static int vcc_open(struct tty_struct *tty, struct file *vcc_file) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: open: Invalid TTY handle\n"); + return -ENXIO; + } + + if (tty->count > 1) + return -EBUSY; + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: open: Failed to find VCC port\n"); + return -ENODEV; + } + + if (unlikely(!port->vio.lp)) { + pr_err("VCC: open: LDC channel not configured\n"); + vcc_put(port, false); + return -EPIPE; + } + vccdbgl(port->vio.lp); + + vcc_put(port, false); + + if (unlikely(!tty->port)) { + pr_err("VCC: open: TTY port not found\n"); + return -ENXIO; + } + + if (unlikely(!tty->port->ops)) { + pr_err("VCC: open: TTY ops not defined\n"); + return -ENXIO; + } + + return tty_port_open(tty->port, tty, vcc_file); +} + +static void vcc_close(struct tty_struct *tty, struct file *vcc_file) +{ + if (unlikely(!tty)) { + pr_err("VCC: close: Invalid TTY handle\n"); + return; + } + + if (unlikely(tty->count > 1)) + return; + + if (unlikely(!tty->port)) { + pr_err("VCC: close: TTY port not found\n"); + return; + } + + tty_port_close(tty->port, tty, vcc_file); +} + +static void vcc_ldc_hup(struct vcc_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + if (vcc_send_ctl(port, VCC_CTL_HUP) < 0) + vcc_kick_tx(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void vcc_hangup(struct tty_struct *tty) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: hangup: Invalid TTY handle\n"); + return; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: hangup: Failed to find VCC port\n"); + return; + } + + if (unlikely(!tty->port)) { + pr_err("VCC: hangup: TTY port not found\n"); + vcc_put(port, false); + return; + } + + vcc_ldc_hup(port); + + vcc_put(port, false); + + tty_port_hangup(tty->port); +} + +static int vcc_write(struct tty_struct *tty, const unsigned char *buf, + int count) +{ + struct vcc_port *port; + struct vio_vcc *pkt; + unsigned long flags; + int total_sent = 0; + int tosend = 0; + int rv = -EINVAL; + + if (unlikely(!tty)) { + pr_err("VCC: write: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: write: Failed to find VCC port"); + return -ENODEV; + } + + spin_lock_irqsave(&port->lock, flags); + + pkt = &port->buffer; + pkt->tag.type = VIO_TYPE_DATA; + + while (count > 0) { + /* Minimum of data to write and space available */ + tosend = min(count, (VCC_BUFF_LEN - port->chars_in_buffer)); + + if (!tosend) + break; + + memcpy(&pkt->data[port->chars_in_buffer], &buf[total_sent], + tosend); + port->chars_in_buffer += tosend; + pkt->tag.stype = tosend; + + vccdbg("TAG [%02x:%02x:%04x:%08x]\n", pkt->tag.type, + pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid); + vccdbg("DATA [%s]\n", pkt->data); + vccdbgl(port->vio.lp); + + /* Since we know we have enough room in VCC buffer for tosend + * we record that it was sent regardless of whether the + * hypervisor actually took it because we have it buffered. + */ + rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend)); + vccdbg("VCC: write: ldc_write(%d)=%d\n", + (VIO_TAG_SIZE + tosend), rv); + + total_sent += tosend; + count -= tosend; + if (rv < 0) { + vcc_kick_tx(port); + break; + } + + port->chars_in_buffer = 0; + } + + spin_unlock_irqrestore(&port->lock, flags); + + vcc_put(port, false); + + vccdbg("VCC: write: total=%d rv=%d", total_sent, rv); + + return total_sent ? total_sent : rv; +} + +static int vcc_write_room(struct tty_struct *tty) +{ + struct vcc_port *port; + u64 num; + + if (unlikely(!tty)) { + pr_err("VCC: write_room: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: write_room: Failed to find VCC port\n"); + return -ENODEV; + } + + num = VCC_BUFF_LEN - port->chars_in_buffer; + + vcc_put(port, false); + + return num; +} + +static int vcc_chars_in_buffer(struct tty_struct *tty) +{ + struct vcc_port *port; + u64 num; + + if (unlikely(!tty)) { + pr_err("VCC: chars_in_buffer: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: chars_in_buffer: Failed to find VCC port\n"); + return -ENODEV; + } + + num = port->chars_in_buffer; + + vcc_put(port, false); + + return num; +} + +static int vcc_break_ctl(struct tty_struct *tty, int state) +{ + struct vcc_port *port; + unsigned long flags; + + if (unlikely(!tty)) { + pr_err("VCC: break_ctl: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: break_ctl: Failed to find VCC port\n"); + return -ENODEV; + } + + /* Turn off break */ + if (state == 0) { + vcc_put(port, false); + return 0; + } + + spin_lock_irqsave(&port->lock, flags); + + if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0) + vcc_kick_tx(port); + + spin_unlock_irqrestore(&port->lock, flags); + + vcc_put(port, false); + + return 0; +} + +static int vcc_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct vcc_port *port_vcc; + struct tty_port *port_tty; + int ret; + + if (unlikely(!tty)) { + pr_err("VCC: install: Invalid TTY handle\n"); + return -ENXIO; + } + + if (tty->index >= VCC_MAX_PORTS) + return -EINVAL; + + ret = tty_standard_install(driver, tty); + if (ret) + return ret; + + port_tty = kzalloc(sizeof(struct tty_port), GFP_KERNEL); + if (!port_tty) + return -ENOMEM; + + port_vcc = vcc_get(tty->index, true); + if (!port_vcc) { + pr_err("VCC: install: Failed to find VCC port\n"); + tty->port = NULL; + kfree(port_tty); + return -ENODEV; + } + + tty_port_init(port_tty); + port_tty->ops = &vcc_port_ops; + tty->port = port_tty; + + port_vcc->tty = tty; + + vcc_put(port_vcc, true); + + return 0; +} + +static void vcc_cleanup(struct tty_struct *tty) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: cleanup: Invalid TTY handle\n"); + return; + } + + port = vcc_get(tty->index, true); + if (port) { + port->tty = NULL; + + if (port->removed) { + vcc_table_remove(tty->index); + kfree(port->vio.name); + kfree(port->domain); + kfree(port); + } else { + vcc_put(port, true); + } + } + + tty_port_destroy(tty->port); + kfree(tty->port); + tty->port = NULL; +} + +static const struct tty_operations vcc_ops = { + .open = vcc_open, + .close = vcc_close, + .hangup = vcc_hangup, + .write = vcc_write, + .write_room = vcc_write_room, + .chars_in_buffer = vcc_chars_in_buffer, + .break_ctl = vcc_break_ctl, + .install = vcc_install, + .cleanup = vcc_cleanup, +}; + +#define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) + +static int vcc_tty_init(void) +{ + int rv; + + pr_info("VCC: %s\n", version); + + vcc_tty_driver = tty_alloc_driver(VCC_MAX_PORTS, VCC_TTY_FLAGS); + if (IS_ERR(vcc_tty_driver)) { + pr_err("VCC: TTY driver alloc failed\n"); + return PTR_ERR(vcc_tty_driver); + } + + vcc_tty_driver->driver_name = vcc_driver_name; + vcc_tty_driver->name = vcc_device_node; + + vcc_tty_driver->minor_start = VCC_MINOR_START; + vcc_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + vcc_tty_driver->init_termios = vcc_tty_termios; + + tty_set_operations(vcc_tty_driver, &vcc_ops); + + rv = tty_register_driver(vcc_tty_driver); + if (rv) { + pr_err("VCC: TTY driver registration failed\n"); + put_tty_driver(vcc_tty_driver); + vcc_tty_driver = NULL; + return rv; + } + + vccdbg("VCC: TTY driver registered\n"); + + return 0; +} + +static void vcc_tty_exit(void) +{ + tty_unregister_driver(vcc_tty_driver); + put_tty_driver(vcc_tty_driver); + vccdbg("VCC: TTY driver unregistered\n"); + + vcc_tty_driver = NULL; +} + +static int __init vcc_init(void) +{ + int rv; + + rv = vcc_tty_init(); + if (rv) { + pr_err("VCC: TTY init failed\n"); + return rv; + } + + rv = vio_register_driver(&vcc_driver); + if (rv) { + pr_err("VCC: VIO driver registration failed\n"); + vcc_tty_exit(); + } else { + vccdbg("VCC: VIO driver registered successfully\n"); + } + + return rv; +} + +static void __exit vcc_exit(void) +{ + vio_unregister_driver(&vcc_driver); + vccdbg("VCC: VIO driver unregistered\n"); + vcc_tty_exit(); + vccdbg("VCC: TTY driver unregistered\n"); +} + +module_init(vcc_init); +module_exit(vcc_exit); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 5aacea1978a5..3e865dbf878c 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb) /* * only set a new error if there is no previous error. * Errors are only cleared during read/open + * Avoid propagating -EPIPE (stall) to userspace since it is + * better handled as an empty read */ - if (desc->rerr == 0) + if (desc->rerr == 0 && status != -EPIPE) desc->rerr = status; if (length + desc->length > desc->wMaxCommand) { diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 4be52c602e9b..68b54bd88d1e 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { + struct usb_interface_assoc_descriptor *d; + + d = (struct usb_interface_assoc_descriptor *)header; + if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { + dev_warn(ddev, + "config %d has an invalid interface association descriptor of length %d, skipping\n", + cfgno, d->bLength); + continue; + } + if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { - config->intf_assoc[iad_num] = - (struct usb_interface_assoc_descriptor - *)header; + config->intf_assoc[iad_num] = d; iad_num++; } @@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev) } if (dev->quirks & USB_QUIRK_DELAY_INIT) - msleep(100); + msleep(200); result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 318bb3b96687..4664e543cf2f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); +/* Hard limit, necessary to avoid arithmetic overflow */ +#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) + static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ @@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT)) return -EINVAL; + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) + return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && @@ -1571,7 +1576,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb totlen += isopkt[u].length; } u *= sizeof(struct usb_iso_packet_descriptor); - uurb->buffer_length = totlen; + if (totlen <= uurb->buffer_length) + uurb->buffer_length = totlen; + else + WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d", + totlen, uurb->buffer_length); break; default: diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 41eaf0b52518..b5c733613823 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4838,7 +4838,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, goto loop; if (udev->quirks & USB_QUIRK_DELAY_INIT) - msleep(1000); + msleep(2000); /* consecutive bus-powered hubs aren't reliable; they can * violate the voltage drop budget. if the new child has diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 4c38ea41ae96..371a07d874a3 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, elength = 1; goto next_desc; } + if ((buflen < elength) || (elength < 3)) { + dev_err(&intf->dev, "invalid descriptor buffer length\n"); + break; + } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 4cef7d4f9cd0..a26d1fde0f5e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = { { .compatible = "rockchip,rk3399-dwc3" }, { .compatible = "xlnx,zynqmp-dwc3" }, { .compatible = "cavium,octeon-7130-usb-uctl" }, + { .compatible = "sprd,sc9860-dwc3" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 827e376bfa97..75e6cb044eb2 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, DWC3_TRBCTL_CONTROL_DATA, true); + req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; + /* Now prepare one extra TRB to align transfer size */ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, maxpacket - rem, @@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, DWC3_TRBCTL_CONTROL_DATA, true); + req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; + /* Now prepare one extra TRB to align transfer size */ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, 0, DWC3_TRBCTL_CONTROL_DATA, @@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, dwc3_ep0_prepare_one_trb(dep, req->request.dma, req->request.length, DWC3_TRBCTL_CONTROL_DATA, false); + + req->trb = &dwc->ep0_trb[dep->trb_enqueue]; + ret = dwc3_ep0_start_trans(dep); } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 9990944a7245..8b342587f8ad 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -46,7 +46,8 @@ static void ffs_data_get(struct ffs_data *ffs); static void ffs_data_put(struct ffs_data *ffs); /* Creates new ffs_data object. */ -static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); +static struct ffs_data *__must_check ffs_data_new(const char *dev_name) + __attribute__((malloc)); /* Opened counter handling. */ static void ffs_data_opened(struct ffs_data *ffs); @@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req) { struct ffs_io_data *io_data = req->context; + struct ffs_data *ffs = io_data->ffs; ENTER(); INIT_WORK(&io_data->work, ffs_user_copy_worker); - schedule_work(&io_data->work); + queue_work(ffs->io_completion_wq, &io_data->work); } static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) @@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, if (unlikely(ret < 0)) return ERR_PTR(ret); - ffs = ffs_data_new(); + ffs = ffs_data_new(dev_name); if (unlikely(!ffs)) return ERR_PTR(-ENOMEM); ffs->file_perms = data.perms; @@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs) BUG_ON(waitqueue_active(&ffs->ev.waitq) || waitqueue_active(&ffs->ep0req_completion.wait) || waitqueue_active(&ffs->wait)); + destroy_workqueue(ffs->io_completion_wq); kfree(ffs->dev_name); kfree(ffs); } @@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs) ffs_data_put(ffs); } -static struct ffs_data *ffs_data_new(void) +static struct ffs_data *ffs_data_new(const char *dev_name) { struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); if (unlikely(!ffs)) @@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void) ENTER(); + ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name); + if (!ffs->io_completion_wq) { + kfree(ffs); + return NULL; + } + refcount_set(&ffs->ref, 1); atomic_set(&ffs->opened, 0); ffs->state = FFS_READ_DESCRIPTORS; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index f95bddd6513f..5153e29870c3 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -307,8 +307,6 @@ struct fsg_common { struct completion thread_notifier; struct task_struct *thread_task; - /* Callback functions. */ - const struct fsg_operations *ops; /* Gadget's private data. */ void *private_data; @@ -686,9 +684,8 @@ static int do_read(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *)bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nread); if (signal_pending(current)) @@ -883,8 +880,8 @@ static int do_write(struct fsg_common *common) /* Perform the write */ file_offset_tmp = file_offset; - nwritten = vfs_write(curlun->filp, (char __user *)bh->buf, - amount, &file_offset_tmp); + nwritten = kernel_write(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nwritten); if (signal_pending(current)) @@ -1021,9 +1018,8 @@ static int do_verify(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *) bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); @@ -2440,6 +2436,7 @@ static void handle_exception(struct fsg_common *common) static int fsg_main_thread(void *common_) { struct fsg_common *common = common_; + int i; /* * Allow the thread to be killed by a signal, but set the signal mask @@ -2453,13 +2450,6 @@ static int fsg_main_thread(void *common_) /* Allow the thread to be frozen */ set_freezable(); - /* - * Arrange for userspace references to be interpreted as kernel - * pointers. That way we can pass a kernel pointer to a routine - * that expects a __user pointer and it will work okay. - */ - set_fs(get_ds()); - /* The main loop */ while (common->state != FSG_STATE_TERMINATED) { if (exception_in_progress(common) || signal_pending(current)) { @@ -2485,21 +2475,16 @@ static int fsg_main_thread(void *common_) common->thread_task = NULL; spin_unlock_irq(&common->lock); - if (!common->ops || !common->ops->thread_exits - || common->ops->thread_exits(common) < 0) { - int i; + /* Eject media from all LUNs */ - down_write(&common->filesem); - for (i = 0; i < ARRAY_SIZE(common->luns); i++) { - struct fsg_lun *curlun = common->luns[i]; - if (!curlun || !fsg_lun_is_open(curlun)) - continue; + down_write(&common->filesem); + for (i = 0; i < ARRAY_SIZE(common->luns); i++) { + struct fsg_lun *curlun = common->luns[i]; + if (curlun && fsg_lun_is_open(curlun)) fsg_lun_close(curlun); - curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; - } - up_write(&common->filesem); } + up_write(&common->filesem); /* Let fsg_unbind() know the thread has exited */ complete_and_exit(&common->thread_notifier, 0); @@ -2690,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common) } EXPORT_SYMBOL_GPL(fsg_common_remove_luns); -void fsg_common_set_ops(struct fsg_common *common, - const struct fsg_operations *ops) -{ - common->ops = ops; -} -EXPORT_SYMBOL_GPL(fsg_common_set_ops); - void fsg_common_free_buffers(struct fsg_common *common) { _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h index d3902313b8ac..dc05ca0c4359 100644 --- a/drivers/usb/gadget/function/f_mass_storage.h +++ b/drivers/usb/gadget/function/f_mass_storage.h @@ -60,17 +60,6 @@ struct fsg_module_parameters { struct fsg_common; /* FSF callback functions */ -struct fsg_operations { - /* - * Callback function to call when thread exits. If no - * callback is set or it returns value lower then zero MSF - * will force eject all LUNs it operates on (including those - * marked as non-removable or with prevent_medium_removal flag - * set). - */ - int (*thread_exits)(struct fsg_common *common); -}; - struct fsg_lun_opts { struct config_group group; struct fsg_lun *lun; @@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun); void fsg_common_remove_luns(struct fsg_common *common); -void fsg_common_set_ops(struct fsg_common *common, - const struct fsg_operations *ops); - int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, unsigned int id, const char *name, const char **name_pfx); diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 8df244fc9d80..ea0da35a44e2 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) size_t size; /* Amount of data in a TX request. */ size_t bytes_copied = 0; struct usb_request *req; + int value; DBG(dev, "printer_write trying to send %d bytes\n", (int)len); @@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return -EAGAIN; } - if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { + /* here, we unlock, and only unlock, to avoid deadlock. */ + spin_unlock(&dev->lock); + value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC); + spin_lock(&dev->lock); + if (value) { list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->lock_printer_io); diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h index 540f1c48c1a8..79f70ebf85dc 100644 --- a/drivers/usb/gadget/function/u_fs.h +++ b/drivers/usb/gadget/function/u_fs.h @@ -279,6 +279,7 @@ struct ffs_data { } file_perms; struct eventfd_ctx *ffs_eventfd; + struct workqueue_struct *io_completion_wq; bool no_disconnect; struct work_struct reset_work; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 684900fcfe24..5c28bee327e1 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -28,7 +28,7 @@ #include #include #include - +#include #include #include @@ -116,6 +116,7 @@ enum ep0_state { struct dev_data { spinlock_t lock; refcount_t count; + int udc_usage; enum ep0_state state; /* P: lock */ struct usb_gadgetfs_event event [N_EVENT]; unsigned ev_next; @@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) INIT_WORK(&priv->work, ep_user_copy_worker); schedule_work(&priv->work); } - spin_unlock(&epdata->dev->lock); usb_ep_free_request(ep, req); + spin_unlock(&epdata->dev->lock); put_ep(epdata); } @@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) struct usb_request *req = dev->req; if ((retval = setup_req (ep, req, 0)) == 0) { + ++dev->udc_usage; spin_unlock_irq (&dev->lock); retval = usb_ep_queue (ep, req, GFP_KERNEL); spin_lock_irq (&dev->lock); + --dev->udc_usage; } dev->state = STATE_DEV_CONNECTED; @@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) retval = -EIO; else { len = min (len, (size_t)dev->req->actual); -// FIXME don't call this with the spinlock held ... + ++dev->udc_usage; + spin_unlock_irq(&dev->lock); if (copy_to_user (buf, dev->req->buf, len)) retval = -EFAULT; else retval = len; + spin_lock_irq(&dev->lock); + --dev->udc_usage; clean_req (dev->gadget->ep0, dev->req); /* NOTE userspace can't yet choose to stall */ } @@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) retval = setup_req (dev->gadget->ep0, dev->req, len); if (retval == 0) { dev->state = STATE_DEV_CONNECTED; + ++dev->udc_usage; spin_unlock_irq (&dev->lock); if (copy_from_user (dev->req->buf, buf, len)) retval = -EFAULT; @@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) GFP_KERNEL); } spin_lock_irq(&dev->lock); + --dev->udc_usage; if (retval < 0) { clean_req (dev->gadget->ep0, dev->req); } else @@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) struct usb_gadget *gadget = dev->gadget; long ret = -ENOTTY; - if (gadget->ops->ioctl) + spin_lock_irq(&dev->lock); + if (dev->state == STATE_DEV_OPENED || + dev->state == STATE_DEV_UNBOUND) { + /* Not bound to a UDC */ + } else if (gadget->ops->ioctl) { + ++dev->udc_usage; + spin_unlock_irq(&dev->lock); + ret = gadget->ops->ioctl (gadget, code, value); + spin_lock_irq(&dev->lock); + --dev->udc_usage; + } + spin_unlock_irq(&dev->lock); + return ret; } @@ -1463,10 +1483,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) if (value < 0) break; + ++dev->udc_usage; spin_unlock (&dev->lock); value = usb_ep_queue (gadget->ep0, dev->req, GFP_KERNEL); spin_lock (&dev->lock); + --dev->udc_usage; if (value < 0) { clean_req (gadget->ep0, dev->req); break; @@ -1490,8 +1512,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->length = value; req->zero = value < w_length; + ++dev->udc_usage; spin_unlock (&dev->lock); value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); + spin_lock(&dev->lock); + --dev->udc_usage; + spin_unlock(&dev->lock); if (value < 0) { DBG (dev, "ep_queue --> %d\n", value); req->status = 0; @@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev) /* break link to FS */ ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); list_del_init (&ep->epfiles); + spin_unlock_irq (&dev->lock); + dentry = ep->dentry; ep->dentry = NULL; parent = d_inode(dentry->d_parent); /* break link to controller */ + mutex_lock(&ep->lock); if (ep->state == STATE_EP_ENABLED) (void) usb_ep_disable (ep->ep); ep->state = STATE_EP_UNBOUND; usb_ep_free_request (ep->ep, ep->req); ep->ep = NULL; + mutex_unlock(&ep->lock); + wake_up (&ep->wait); put_ep (ep); - spin_unlock_irq (&dev->lock); - /* break link to dcache */ inode_lock(parent); d_delete (dentry); @@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget) spin_lock_irq (&dev->lock); dev->state = STATE_DEV_UNBOUND; + while (dev->udc_usage > 0) { + spin_unlock_irq(&dev->lock); + usleep_range(1000, 2000); + spin_lock_irq(&dev->lock); + } spin_unlock_irq (&dev->lock); destroy_ep_files (dev); diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c index e99ab57ee3e5..fcba59782f26 100644 --- a/drivers/usb/gadget/legacy/mass_storage.c +++ b/drivers/usb/gadget/legacy/mass_storage.c @@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS; FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); -static unsigned long msg_registered; -static void msg_cleanup(void); - -static int msg_thread_exits(struct fsg_common *common) -{ - msg_cleanup(); - return 0; -} - static int msg_do_config(struct usb_configuration *c) { struct fsg_opts *opts; @@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = { static int msg_bind(struct usb_composite_dev *cdev) { - static const struct fsg_operations ops = { - .thread_exits = msg_thread_exits, - }; struct fsg_opts *opts; struct fsg_config config; int status; @@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev) if (status) goto fail; - fsg_common_set_ops(opts->common, &ops); - status = fsg_common_set_cdev(opts->common, cdev, config.can_stall); if (status) goto fail_set_cdev; @@ -256,18 +242,12 @@ MODULE_LICENSE("GPL"); static int __init msg_init(void) { - int ret; - - ret = usb_composite_probe(&msg_driver); - set_bit(0, &msg_registered); - - return ret; + return usb_composite_probe(&msg_driver); } module_init(msg_init); -static void msg_cleanup(void) +static void __exit msg_cleanup(void) { - if (test_and_clear_bit(0, &msg_registered)) - usb_composite_unregister(&msg_driver); + usb_composite_unregister(&msg_driver); } module_exit(msg_cleanup); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 7cd5c969fcbe..1e9567091d86 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -273,6 +273,7 @@ config USB_SNP_CORE config USB_SNP_UDC_PLAT tristate "Synopsys USB 2.0 Device controller" depends on USB_GADGET && OF && HAS_DMA + depends on EXTCON || EXTCON=n select USB_GADGET_DUALSPEED select USB_SNP_CORE default ARCH_BCM_IPROC diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 98d71400f8a1..a884c022df7a 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -29,6 +29,8 @@ #include #include "atmel_usba_udc.h" +#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \ + | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING) #ifdef CONFIG_USB_GADGET_DEBUG_FS #include @@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev) IRQ_NOAUTOEN); ret = devm_request_threaded_irq(&pdev->dev, gpio_to_irq(udc->vbus_pin), NULL, - usba_vbus_irq_thread, IRQF_ONESHOT, + usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, "atmel_usba_udc", udc); if (ret) { udc->vbus_pin = -ENODEV; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 75c51ca4ee0f..d41d07aae0ce 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; - if (driver->max_speed < udc->gadget->max_speed) - usb_gadget_udc_set_speed(udc, driver->max_speed); + usb_gadget_udc_set_speed(udc, driver->max_speed); ret = driver->bind(udc->gadget, driver); if (ret) diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index a030d7923d7d..b17618a55f1b 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -237,6 +237,8 @@ struct dummy_hcd { struct usb_device *udev; struct list_head urbp_list; + struct urbp *next_frame_urbp; + u32 stream_en_ep; u8 num_stream[30 / 2]; @@ -253,11 +255,13 @@ struct dummy { */ struct dummy_ep ep[DUMMY_ENDPOINTS]; int address; + int callback_usage; struct usb_gadget gadget; struct usb_gadget_driver *driver; struct dummy_request fifo_req; u8 fifo_buf[FIFO_SIZE]; u16 devstatus; + unsigned ints_enabled:1; unsigned udc_suspended:1; unsigned pullup:1; @@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) USB_PORT_STAT_CONNECTION) == 0) dum_hcd->port_status |= (USB_PORT_STAT_C_CONNECTION << 16); - if ((dum_hcd->port_status & - USB_PORT_STAT_ENABLE) == 1 && - (dum_hcd->port_status & - USB_SS_PORT_LS_U0) == 1 && - dum_hcd->rh_state != DUMMY_RH_SUSPENDED) + if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) && + (dum_hcd->port_status & + USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 && + dum_hcd->rh_state != DUMMY_RH_SUSPENDED) dum_hcd->active = 1; } } else { @@ -440,18 +443,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd) (~dum_hcd->old_status) & dum_hcd->port_status; /* Report reset and disconnect events to the driver */ - if (dum->driver && (disconnect || reset)) { + if (dum->ints_enabled && (disconnect || reset)) { stop_activity(dum); + ++dum->callback_usage; + spin_unlock(&dum->lock); if (reset) usb_gadget_udc_reset(&dum->gadget, dum->driver); else dum->driver->disconnect(&dum->gadget); + spin_lock(&dum->lock); + --dum->callback_usage; } - } else if (dum_hcd->active != dum_hcd->old_active) { + } else if (dum_hcd->active != dum_hcd->old_active && + dum->ints_enabled) { + ++dum->callback_usage; + spin_unlock(&dum->lock); if (dum_hcd->old_active && dum->driver->suspend) dum->driver->suspend(&dum->gadget); else if (!dum_hcd->old_active && dum->driver->resume) dum->driver->resume(&dum->gadget); + spin_lock(&dum->lock); + --dum->callback_usage; } dum_hcd->old_status = dum_hcd->port_status; @@ -972,8 +984,11 @@ static int dummy_udc_start(struct usb_gadget *g, * can't enumerate without help from the driver we're binding. */ + spin_lock_irq(&dum->lock); dum->devstatus = 0; dum->driver = driver; + dum->ints_enabled = 1; + spin_unlock_irq(&dum->lock); return 0; } @@ -984,6 +999,16 @@ static int dummy_udc_stop(struct usb_gadget *g) struct dummy *dum = dum_hcd->dum; spin_lock_irq(&dum->lock); + dum->ints_enabled = 0; + stop_activity(dum); + + /* emulate synchronize_irq(): wait for callbacks to finish */ + while (dum->callback_usage > 0) { + spin_unlock_irq(&dum->lock); + usleep_range(1000, 2000); + spin_lock_irq(&dum->lock); + } + dum->driver = NULL; spin_unlock_irq(&dum->lock); @@ -1037,7 +1062,12 @@ static int dummy_udc_probe(struct platform_device *pdev) memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); dum->gadget.name = gadget_name; dum->gadget.ops = &dummy_ops; - dum->gadget.max_speed = USB_SPEED_SUPER; + if (mod_data.is_super_speed) + dum->gadget.max_speed = USB_SPEED_SUPER; + else if (mod_data.is_high_speed) + dum->gadget.max_speed = USB_SPEED_HIGH; + else + dum->gadget.max_speed = USB_SPEED_FULL; dum->gadget.dev.parent = &pdev->dev; init_dummy_udc_hw(dum); @@ -1246,6 +1276,8 @@ static int dummy_urb_enqueue( list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); urb->hcpriv = urbp; + if (!dum_hcd->next_frame_urbp) + dum_hcd->next_frame_urbp = urbp; if (usb_pipetype(urb->pipe) == PIPE_CONTROL) urb->error_count = 1; /* mark as a new urb */ @@ -1521,6 +1553,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address) if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? dum->ss_hcd : dum->hs_hcd))) return NULL; + if (!dum->ints_enabled) + return NULL; if ((address & ~USB_DIR_IN) == 0) return &dum->ep[0]; for (i = 1; i < DUMMY_ENDPOINTS; i++) { @@ -1762,6 +1796,7 @@ static void dummy_timer(unsigned long _dum_hcd) spin_unlock_irqrestore(&dum->lock, flags); return; } + dum_hcd->next_frame_urbp = NULL; for (i = 0; i < DUMMY_ENDPOINTS; i++) { if (!ep_info[i].name) @@ -1778,6 +1813,10 @@ static void dummy_timer(unsigned long _dum_hcd) int type; int status = -EINPROGRESS; + /* stop when we reach URBs queued after the timer interrupt */ + if (urbp == dum_hcd->next_frame_urbp) + break; + urb = urbp->urb; if (urb->unlinked) goto return_urb; @@ -1857,10 +1896,12 @@ static void dummy_timer(unsigned long _dum_hcd) * until setup() returns; no reentrancy issues etc. */ if (value > 0) { + ++dum->callback_usage; spin_unlock(&dum->lock); value = dum->driver->setup(&dum->gadget, &setup); spin_lock(&dum->lock); + --dum->callback_usage; if (value >= 0) { /* no delays (max 64KB data stage) */ @@ -2561,8 +2602,6 @@ static struct hc_driver dummy_hcd = { .product_desc = "Dummy host controller", .hcd_priv_size = sizeof(struct dummy_hcd), - .flags = HCD_USB3 | HCD_SHARED, - .reset = dummy_setup, .start = dummy_start, .stop = dummy_stop, @@ -2591,8 +2630,12 @@ static int dummy_hcd_probe(struct platform_device *pdev) dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); dum = *((void **)dev_get_platdata(&pdev->dev)); - if (!mod_data.is_super_speed) + if (mod_data.is_super_speed) + dummy_hcd.flags = HCD_USB3 | HCD_SHARED; + else if (mod_data.is_high_speed) dummy_hcd.flags = HCD_USB2; + else + dummy_hcd.flags = HCD_USB11; hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); if (!hs_hcd) return -ENOMEM; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index df37c1e6e9d5..63a206122058 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep, usb3_ep->ep.maxpacket); u8 *buf = usb3_req->req.buf + usb3_req->req.actual; u32 tmp = 0; - bool is_last; + bool is_last = !len ? true : false; if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) return -EBUSY; @@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep, usb3_write(usb3, tmp, fifo_reg); } - is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); + if (!is_last) + is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); /* Send the data */ usb3_set_px_con_send(usb3_ep, len, is_last); @@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep, usb3_set_p0_con_for_ctrl_read_data(usb3); } else { usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); - usb3_set_p0_con_for_ctrl_write_data(usb3); + if (usb3_req->req.length) + usb3_set_p0_con_for_ctrl_write_data(usb3); } usb3_p0_xfer(usb3_ep, usb3_req); @@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size) static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, const struct usb_endpoint_descriptor *desc) { - return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc)); + int i; + const u32 max_packet_array[] = {8, 16, 32, 64, 512}; + u32 mpkt = PN_RAMMAP_MPKT(1024); + + for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) { + if (usb_endpoint_maxp(desc) <= max_packet_array[i]) + mpkt = PN_RAMMAP_MPKT(max_packet_array[i]); + } + + return usb3_ep->rammap_val | mpkt; } static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index a4d814b7f380..91393ec7d850 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -53,7 +53,7 @@ #define DRIVER_DESC "OHCI OMAP driver" #ifdef CONFIG_TPS65010 -#include +#include #else #define LOW 0 diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index a8b8d8b8d9f3..d4e0f7cd96fa 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -123,13 +123,12 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev) * regular memory. The HCD_LOCAL_MEM flag does just that. */ - if (!dma_declare_coherent_memory(dev, mem->start, + retval = dma_declare_coherent_memory(dev, mem->start, mem->start - mem->parent->start, resource_size(mem), - DMA_MEMORY_MAP | - DMA_MEMORY_EXCLUSIVE)) { + DMA_MEMORY_EXCLUSIVE); + if (retval) { dev_err(dev, "cannot declare coherent memory\n"); - retval = -ENXIO; goto err1; } diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index cfcfadfc94fc..16d081a093bb 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -227,13 +227,10 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev) goto err_ioremap_regs; } - if (!dma_declare_coherent_memory(&dev->dev, sram->start, - sram->start, - resource_size(sram), - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) { - ret = -EBUSY; + ret = dma_declare_coherent_memory(&dev->dev, sram->start, sram->start, + resource_size(sram), DMA_MEMORY_EXCLUSIVE); + if (ret) goto err_dma_declare; - } if (cell->enable) { ret = cell->enable(dev); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 658d9d1f9ea3..6dda3623a276 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev) if ((value & ASMT_CONTROL_WRITE_BIT) == 0) return 0; - usleep_range(40, 60); + udelay(50); } dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); @@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); * * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. * It signals to the BIOS that the OS wants control of the host controller, - * and then waits 5 seconds for the BIOS to hand over control. + * and then waits 1 second for the BIOS to hand over control. * If we timeout, assume the BIOS is broken and take control anyway. */ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) @@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) if (val & XHCI_HC_BIOS_OWNED) { writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); - /* Wait for 5 seconds with 10 microsecond polling interval */ + /* Wait for 1 second with 10 microsecond polling interval */ timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, - 0, 5000, 10); + 0, 1000000, 10); /* Assume a buggy BIOS and take HC ownership anyway */ if (timeout) { @@ -1100,7 +1100,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) * operational or runtime registers. Wait 5 seconds and no more. */ timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, - 5000, 10); + 5000000, 10); /* Assume a buggy HC and start HC initialization anyway */ if (timeout) { val = readl(op_reg_base + XHCI_STS_OFFSET); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index ad89a6d4111b..da9158f171cb 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, /* If PSI table exists, add the custom speed attributes from it */ if (usb3_1 && xhci->usb3_rhub.psi_count) { - u32 ssp_cap_base, bm_attrib, psi; + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; int offset; ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; @@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { psi = xhci->usb3_rhub.psi[i]; psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; + psi_exp = XHCI_EXT_PORT_PSIE(psi); + psi_mant = XHCI_EXT_PORT_PSIM(psi); + + /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */ + for (; psi_exp < 3; psi_exp++) + psi_mant /= 1000; + if (psi_mant >= 10) + psi |= BIT(14); + if ((psi & PLT_MASK) == PLT_SYM) { /* Symmetric, create SSA RX and TX from one PSI entry */ put_unaligned_le32(psi, &buf[offset]); @@ -1506,9 +1515,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } - if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && - (hcd->speed < HCD_USB3)) - t2 &= ~PORT_WAKE_BITS; } else t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 8071c8fdd15e..76f392954733 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,11 +54,6 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 -#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba -#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb -#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc - #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 static const char hcd_name[] = "xhci_hcd"; @@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if ((pdev->vendor == PCI_VENDOR_ID_AMD) && - ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || - (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) - xhci->quirks |= XHCI_U2_DISABLE_WAKE; - if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 163bafde709f..1cb6eaef4ae1 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev) * 2. xhci_plat is child of a device from firmware (dwc3-plat) * 3. xhci_plat is grandchild of a pci device (dwc3-pci) */ - sysdev = &pdev->dev; - if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node) - sysdev = sysdev->parent; + for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) { + if (is_of_node(sysdev->fwnode) || + is_acpi_device_node(sysdev->fwnode)) + break; #ifdef CONFIG_PCI - else if (sysdev->parent && sysdev->parent->parent && - sysdev->parent->parent->bus == &pci_bus_type) - sysdev = sysdev->parent->parent; + else if (sysdev->bus == &pci_bus_type) + break; #endif + } + + if (!sysdev) + sysdev = &pdev->dev; /* Try to set 64-bit DMA first */ if (WARN_ON(!sysdev->dma_mask)) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b2ff1ff1a02f..ee198ea47f49 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, if (xhci->quirks & XHCI_MTK_HOST) { ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); if (ret < 0) { - xhci_free_endpoint_ring(xhci, virt_dev, ep_index); + xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); + virt_dev->eps[ep_index].new_ring = NULL; return ret; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2abaa4d6d39d..2b48aa4f6b76 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -735,6 +735,8 @@ struct xhci_ep_ctx { #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ #define EP_HAS_LSA (1 << 15) +/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ +#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff) /* ep_info2 bitmasks */ /* @@ -1681,7 +1683,7 @@ struct xhci_bus_state { static inline unsigned int hcd_index(struct usb_hcd *hcd) { - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) return 0; else return 1; @@ -1826,7 +1828,7 @@ struct xhci_hcd { /* For controller with a broken Port Disable implementation */ #define XHCI_BROKEN_PORT_PED (1 << 25) #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) -#define XHCI_U2_DISABLE_WAKE (1 << 27) +/* Reserved. It was XHCI_U2_DISABLE_WAKE */ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) unsigned int num_active_eps; @@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, u8 lsa; u8 hid; - esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 | - EP_MAX_ESIT_PAYLOAD_LO(tx_info); + esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | + CTX_TO_MAX_ESIT_PAYLOAD(tx_info); ep_state = info & EP_STATE_MASK; max_pstr = info & EP_MAXPSTREAMS_MASK; diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 042c5a8fd423..c6052c814bcc 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -96,7 +96,7 @@ struct isp1301 { #if IS_REACHABLE(CONFIG_TPS65010) -#include +#include #else diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 628b600b02b1..b5dc077ed7d3 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index d1af831f43eb..68f26904c316 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + int ret = 0; - if (!usbhs_pipe_is_dcp(pipe)) - usbhsf_fifo_barrier(priv, fifo); + if (!usbhs_pipe_is_dcp(pipe)) { + /* + * This driver checks the pipe condition first to avoid -EBUSY + * from usbhsf_fifo_barrier() with about 10 msec delay in + * the interrupt handler if the pipe is RX direction and empty. + */ + if (usbhs_pipe_is_dir_in(pipe)) + ret = usbhs_pipe_is_accessible(pipe); + if (!ret) + ret = usbhsf_fifo_barrier(priv, fifo); + } - usbhs_write(priv, fifo->ctr, BCLR); + /* + * if non-DCP pipe, this driver should set BCLR when + * usbhsf_fifo_barrier() returns 0. + */ + if (!ret) + usbhs_write(priv, fifo->ctr, BCLR); } static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 1a59f335b063..a3ccb899df60 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -834,13 +834,25 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) if (result == USB_STOR_TRANSPORT_GOOD) { srb->result = SAM_STAT_GOOD; srb->sense_buffer[0] = 0x0; + } + + /* + * ATA-passthru commands use sense data to report + * the command completion status, and often devices + * return Check Condition status when nothing is + * wrong. + */ + else if (srb->cmnd[0] == ATA_16 || + srb->cmnd[0] == ATA_12) { + /* leave the data alone */ + } /* * If there was a problem, report an unspecified * hardware error to prevent the higher layers from * entering an infinite retry loop. */ - } else { + else { srb->result = DID_ERROR << 16; if ((sshdr.response_code & 0x72) == 0x72) srb->sense_buffer[1] = HARDWARE_ERROR; diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index f58caa9e6a27..a155cd02bce2 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf) intf->desc.bInterfaceProtocol == USB_PR_UAS); } -static int uas_find_uas_alt_setting(struct usb_interface *intf) +static struct usb_host_interface *uas_find_uas_alt_setting( + struct usb_interface *intf) { int i; @@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf) struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) - return alt->desc.bAlternateSetting; + return alt; } - return -ENODEV; + return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, @@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); unsigned long flags = id->driver_info; - int r, alt; - + struct usb_host_interface *alt; + int r; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) + if (!alt) return 0; - r = uas_find_endpoints(&intf->altsetting[alt], eps); + r = uas_find_endpoints(alt, eps); if (r < 0) return 0; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 5ef014ba6ae8..63cf981ed81c 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -737,7 +737,7 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) return FAILED; } -static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd) +static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd) { struct scsi_device *sdev = cmnd->device; struct uas_dev_info *devinfo = sdev->hostdata; @@ -848,7 +848,7 @@ static struct scsi_host_template uas_host_template = { .slave_alloc = uas_slave_alloc, .slave_configure = uas_slave_configure, .eh_abort_handler = uas_eh_abort_handler, - .eh_bus_reset_handler = uas_eh_bus_reset_handler, + .eh_device_reset_handler = uas_eh_device_reset_handler, .this_id = -1, .sg_tablesize = SG_NONE, .skip_settle_delay = 1, @@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids); static int uas_switch_interface(struct usb_device *udev, struct usb_interface *intf) { - int alt; + struct usb_host_interface *alt; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) - return alt; + if (!alt) + return -ENODEV; - return usb_set_interface(udev, - intf->altsetting[0].desc.bInterfaceNumber, alt); + return usb_set_interface(udev, alt->desc.bInterfaceNumber, + alt->desc.bAlternateSetting); } static int uas_configure_endpoints(struct uas_dev_info *devinfo) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 5a70c33ef0e0..eb06d88b41d6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SANE_SENSE ), +/* Reported by Kris Lindgren */ +UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, + "Seagate", + "External", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_WP_DETECT ), + UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, "Maxtor", "USB to SATA", diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 35a1e777b449..9a53912bdfe9 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface, if (iface->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; + if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc)) + return -ENODEV; result = -ENOMEM; uwb_rc = uwb_rc_alloc(); diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c index 01c20a260a8b..39dd4ef53c77 100644 --- a/drivers/uwb/uwbd.c +++ b/drivers/uwb/uwbd.c @@ -302,18 +302,22 @@ static int uwbd(void *param) /** Start the UWB daemon */ void uwbd_start(struct uwb_rc *rc) { - rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); - if (rc->uwbd.task == NULL) + struct task_struct *task = kthread_run(uwbd, rc, "uwbd"); + if (IS_ERR(task)) { + rc->uwbd.task = NULL; printk(KERN_ERR "UWB: Cannot start management daemon; " "UWB won't work\n"); - else + } else { + rc->uwbd.task = task; rc->uwbd.pid = rc->uwbd.task->pid; + } } /* Stop the UWB daemon and free any unprocessed events */ void uwbd_stop(struct uwb_rc *rc) { - kthread_stop(rc->uwbd.task); + if (rc->uwbd.task) + kthread_stop(rc->uwbd.task); uwbd_flush(rc); } diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index 31372fbf6c5b..62dfbfeaabfc 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -93,7 +93,7 @@ static int vfio_amba_remove(struct amba_device *adev) return -EINVAL; } -static struct amba_id pl330_ids[] = { +static const struct amba_id pl330_ids[] = { { 0, 0 }, }; diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 330d50582f40..f5a86f651f38 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -85,6 +85,7 @@ struct vfio_group { struct list_head unbound_list; struct mutex unbound_lock; atomic_t opened; + wait_queue_head_t container_q; bool noiommu; struct kvm *kvm; struct blocking_notifier_head notifier; @@ -138,9 +139,10 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev) iommu_group_set_name(group, "vfio-noiommu"); iommu_group_set_iommudata(group, &noiommu, NULL); ret = iommu_group_add_device(group, dev); - iommu_group_put(group); - if (ret) + if (ret) { + iommu_group_put(group); return NULL; + } /* * Where to taint? At this point we've added an IOMMU group for a @@ -337,6 +339,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) mutex_init(&group->unbound_lock); atomic_set(&group->container_users, 0); atomic_set(&group->opened, 0); + init_waitqueue_head(&group->container_q); group->iommu_group = iommu_group; #ifdef CONFIG_VFIO_NOIOMMU group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); @@ -993,6 +996,23 @@ void *vfio_del_group_dev(struct device *dev) } } while (ret <= 0); + /* + * In order to support multiple devices per group, devices can be + * plucked from the group while other devices in the group are still + * in use. The container persists with this group and those remaining + * devices still attached. If the user creates an isolation violation + * by binding this device to another driver while the group is still in + * use, that's their fault. However, in the case of removing the last, + * or potentially the only, device in the group there can be no other + * in-use devices in the group. The user has done their due diligence + * and we should lay no claims to those devices. In order to do that, + * we need to make sure the group is detached from the container. + * Without this stall, we're potentially racing with a user process + * that may attempt to immediately bind this device to another driver. + */ + if (list_empty(&group->device_list)) + wait_event(group->container_q, !group->container); + vfio_group_put(group); return device_data; @@ -1298,6 +1318,7 @@ static void __vfio_group_unset_container(struct vfio_group *group) group->iommu_group); group->container = NULL; + wake_up(&group->container_q); list_del(&group->container_next); /* Detaching the last group deprivileges a container, remove iommu */ diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 8549cb111627..92155cce926d 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1169,13 +1169,21 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) INIT_LIST_HEAD(&group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions); list_for_each_entry(region, &group_resv_regions, list) { + /* + * The presence of any 'real' MSI regions should take + * precedence over the software-managed one if the + * IOMMU driver happens to advertise both types. + */ + if (region->type == IOMMU_RESV_MSI) { + ret = false; + break; + } + if (region->type == IOMMU_RESV_SW_MSI) { *base = region->start; ret = true; - goto out; } } -out: list_for_each_entry_safe(region, next, &group_resv_regions, list) kfree(region); return ret; @@ -1265,8 +1273,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, INIT_LIST_HEAD(&domain->group_list); list_add(&group->next, &domain->group_list); - msi_remap = resv_msi ? irq_domain_check_msi_remap() : - iommu_capable(bus, IOMMU_CAP_INTR_REMAP); + msi_remap = irq_domain_check_msi_remap() || + iommu_capable(bus, IOMMU_CAP_INTR_REMAP); if (!allow_unsafe_interrupts && !msi_remap) { pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 06d044862e58..58585ec8699e 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -533,6 +533,7 @@ static void handle_tx(struct vhost_net *net) ubuf->callback = vhost_zerocopy_callback; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; + refcount_set(&ubuf->refcnt, 1); msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; @@ -634,8 +635,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) preempt_enable(); - if (vhost_enable_notify(&net->dev, vq)) + if (!vhost_vq_avail_empty(&net->dev, vq)) vhost_poll_queue(&vq->poll); + else if (unlikely(vhost_enable_notify(&net->dev, vq))) { + vhost_disable_notify(&net->dev, vq); + vhost_poll_queue(&vq->poll); + } + mutex_unlock(&vq->mutex); len = peek_head_len(rvq, sk); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9cb3f722dce1..d6dbb28245e6 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1271,7 +1271,7 @@ static struct vhost_umem *vhost_umem_alloc(void) if (!umem) return NULL; - umem->umem_tree = RB_ROOT; + umem->umem_tree = RB_ROOT_CACHED; umem->numem = 0; INIT_LIST_HEAD(&umem->umem_list); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index bb7c29b8b9fc..d59a9cc65f9d 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -71,7 +71,7 @@ struct vhost_umem_node { }; struct vhost_umem { - struct rb_root umem_tree; + struct rb_root_cached umem_tree; struct list_head umem_list; int numem; }; diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index 18134416b154..e470da95d806 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -9,7 +9,8 @@ #include #include #include -#include +#include /* Only for legacy support */ +#include #include #include #include @@ -23,8 +24,7 @@ struct gpio_backlight { struct device *dev; struct device *fbdev; - int gpio; - int active; + struct gpio_desc *gpiod; int def_value; }; @@ -38,8 +38,7 @@ static int gpio_backlight_update_status(struct backlight_device *bl) bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) brightness = 0; - gpio_set_value_cansleep(gbl->gpio, - brightness ? gbl->active : !gbl->active); + gpiod_set_value_cansleep(gbl->gpiod, brightness); return 0; } @@ -61,22 +60,24 @@ static const struct backlight_ops gpio_backlight_ops = { static int gpio_backlight_probe_dt(struct platform_device *pdev, struct gpio_backlight *gbl) { - struct device_node *np = pdev->dev.of_node; - enum of_gpio_flags gpio_flags; - - gbl->gpio = of_get_gpio_flags(np, 0, &gpio_flags); - - if (!gpio_is_valid(gbl->gpio)) { - if (gbl->gpio != -EPROBE_DEFER) { - dev_err(&pdev->dev, - "Error: The gpios parameter is missing or invalid.\n"); - } - return gbl->gpio; - } - - gbl->active = (gpio_flags & OF_GPIO_ACTIVE_LOW) ? 0 : 1; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + enum gpiod_flags flags; + int ret; gbl->def_value = of_property_read_bool(np, "default-on"); + flags = gbl->def_value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + + gbl->gpiod = devm_gpiod_get(dev, NULL, flags); + if (IS_ERR(gbl->gpiod)) { + ret = PTR_ERR(gbl->gpiod); + + if (ret != -EPROBE_DEFER) { + dev_err(dev, + "Error: The gpios parameter is missing or invalid.\n"); + } + return ret; + } return 0; } @@ -89,7 +90,6 @@ static int gpio_backlight_probe(struct platform_device *pdev) struct backlight_device *bl; struct gpio_backlight *gbl; struct device_node *np = pdev->dev.of_node; - unsigned long flags = GPIOF_DIR_OUT; int ret; if (!pdata && !np) { @@ -109,22 +109,26 @@ static int gpio_backlight_probe(struct platform_device *pdev) if (ret) return ret; } else { + /* + * Legacy platform data GPIO retrieveal. Do not expand + * the use of this code path, currently only used by one + * SH board. + */ + unsigned long flags = GPIOF_DIR_OUT; + gbl->fbdev = pdata->fbdev; - gbl->gpio = pdata->gpio; - gbl->active = pdata->active_low ? 0 : 1; gbl->def_value = pdata->def_value; - } - - if (gbl->active) flags |= gbl->def_value ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW; - else - flags |= gbl->def_value ? GPIOF_INIT_LOW : GPIOF_INIT_HIGH; - ret = devm_gpio_request_one(gbl->dev, gbl->gpio, flags, - pdata ? pdata->name : "backlight"); - if (ret < 0) { - dev_err(&pdev->dev, "unable to request GPIO\n"); - return ret; + ret = devm_gpio_request_one(gbl->dev, pdata->gpio, flags, + pdata ? pdata->name : "backlight"); + if (ret < 0) { + dev_err(&pdev->dev, "unable to request GPIO\n"); + return ret; + } + gbl->gpiod = gpio_to_desc(pdata->gpio); + if (!gbl->gpiod) + return -EINVAL; } memset(&props, 0, sizeof(props)); diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 84a110a719cb..96312c3afc07 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 -static struct dmi_system_id kb3886bl_device_table[] __initdata = { +static const struct dmi_system_id kb3886bl_device_table[] __initconst = { { .ident = "Sahara Touch-iT", .matches = { diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c index 60d6c2ac87aa..2030a6b77a09 100644 --- a/drivers/video/backlight/lm3630a_bl.c +++ b/drivers/video/backlight/lm3630a_bl.c @@ -31,7 +31,8 @@ #define REG_FAULT 0x0B #define REG_PWM_OUTLOW 0x12 #define REG_PWM_OUTHIGH 0x13 -#define REG_MAX 0x1F +#define REG_FILTER_STRENGTH 0x50 +#define REG_MAX 0x50 #define INT_DEBOUNCE_MSEC 10 struct lm3630a_chip { @@ -80,7 +81,7 @@ static int lm3630a_chip_init(struct lm3630a_chip *pchip) usleep_range(1000, 2000); /* set Filter Strength Register */ - rval = lm3630a_write(pchip, 0x50, 0x03); + rval = lm3630a_write(pchip, REG_FILTER_STRENGTH, 0x03); /* set Cofig. register */ rval |= lm3630a_update(pchip, REG_CONFIG, 0x07, pdata->pwm_ctrl); /* set boost control */ diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c index 5d8bb8b20183..a186bc677c7d 100644 --- a/drivers/video/backlight/pandora_bl.c +++ b/drivers/video/backlight/pandora_bl.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #define TWL_PWM0_ON 0x00 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 002f1ce22bd0..9bd17682655a 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -178,7 +178,7 @@ static int pwm_backlight_parse_dt(struct device *dev, return 0; } -static struct of_device_id pwm_backlight_of_match[] = { +static const struct of_device_id pwm_backlight_of_match[] = { { .compatible = "pwm-backlight" }, { } }; diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 2111d06f8c81..7f1f1fbcef9e 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -117,7 +117,7 @@ config DUMMY_CONSOLE_ROWS Select 25 if you use a 640x480 resolution by default. config FRAMEBUFFER_CONSOLE - tristate "Framebuffer Console support" + bool "Framebuffer Console support" depends on FB && !UML select VT_HW_CONSOLE_BINDING select CRC32 diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile index 43bfa485db96..eb2cbec52643 100644 --- a/drivers/video/console/Makefile +++ b/drivers/video/console/Makefile @@ -7,13 +7,5 @@ obj-$(CONFIG_SGI_NEWPORT_CONSOLE) += newport_con.o obj-$(CONFIG_STI_CONSOLE) += sticon.o sticore.o obj-$(CONFIG_VGA_CONSOLE) += vgacon.o obj-$(CONFIG_MDA_CONSOLE) += mdacon.o -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o bitblit.o softcursor.o -ifeq ($(CONFIG_FB_TILEBLITTING),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += tileblit.o -endif -ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ - fbcon_ccw.o -endif obj-$(CONFIG_FB_STI) += sticore.o diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index dc06cb6a15dc..445b1dc5d441 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -398,9 +398,8 @@ static const char *vgacon_startup(void) #endif } - /* boot_params.screen_info initialized? */ - if ((screen_info.orig_video_mode == 0) && - (screen_info.orig_video_lines == 0) && + /* boot_params.screen_info reasonably initialized? */ + if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index c0c6b88d3839..d48e96088f76 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -72,7 +72,7 @@ static struct fb_var_screeninfo mc68x328fb_default __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo mc68x328fb_fix __initdata = { +static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = { .id = "68328fb", .type = FB_TYPE_PACKED_PIXELS, .xpanstep = 1, diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5c6696bb56da..5e58f5ec0a28 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2173,7 +2173,7 @@ config FB_PS3_DEFAULT_SIZE_M config FB_XILINX tristate "Xilinx frame buffer support" - depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ) + depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index ffc2c33c6cef..36d25190b48c 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -1035,7 +1035,7 @@ static struct clcd_vendor_data vendor_nomadik = { .init_panel = nomadik_clcd_init_panel, }; -static struct amba_id clcdfb_id_table[] = { +static const struct amba_id clcdfb_id_table[] = { { .id = 0x00041110, .mask = 0x000ffffe, diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 6a317de7082c..13ba371e70aa 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -1157,7 +1157,7 @@ static int ark_pci_resume (struct pci_dev* dev) /* List of boards that we are trying to support */ -static struct pci_device_id ark_devices[] = { +static const struct pci_device_id ark_devices[] = { {PCI_DEVICE(0xEDD8, 0xA099)}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index 91eea4583382..ea31054a28ca 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -592,7 +592,7 @@ static void asiliantfb_remove(struct pci_dev *dp) framebuffer_release(p); } -static struct pci_device_id asiliantfb_pci_tbl[] = { +static const struct pci_device_id asiliantfb_pci_tbl[] = { { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 669ecc755fa9..e06358da4b99 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -320,7 +320,7 @@ static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int } } -static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = { +static const struct fb_fix_screeninfo atmel_lcdfb_fix __initconst = { .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index fa07242a78d2..db18474607c9 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -116,7 +116,7 @@ static const struct fb_var_screeninfo default_var = { /* default modedb mode */ /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { .refresh = 60, .xres = 640, .yres = 480, @@ -166,7 +166,7 @@ static int aty128_pci_resume(struct pci_dev *pdev); static int aty128_do_resume(struct pci_dev *pdev); /* supported Rage128 chipsets */ -static struct pci_device_id aty128_pci_tbl[] = { +static const struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index b55fdac9c9f5..3ec72f19114b 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -274,7 +274,7 @@ static struct fb_var_screeninfo default_var = { 0, FB_VMODE_NONINTERLACED }; -static struct fb_videomode defmode = { +static const struct fb_videomode defmode = { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED @@ -1855,7 +1855,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) case ATYIO_CLKR: if (M64_HAS(INTEGRATED)) { - struct atyclk clk; + struct atyclk clk = { 0 }; union aty_pll *pll = &par->pll; u32 dsp_config = pll->ct.dsp_config; u32 dsp_on_off = pll->ct.dsp_on_off; @@ -3756,7 +3756,7 @@ static void atyfb_pci_remove(struct pci_dev *pdev) atyfb_remove(info); } -static struct pci_device_id atyfb_pci_tbl[] = { +static const struct pci_device_id atyfb_pci_tbl[] = { #ifdef CONFIG_FB_ATY_GX { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) }, diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 6b4c7872b375..1e2ec360f8c1 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -96,7 +96,7 @@ #define CHIP_DEF(id, family, flags) \ { PCI_VENDOR_ID_ATI, id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (flags) | (CHIP_FAMILY_##family) } -static struct pci_device_id radeonfb_pci_table[] = { +static const struct pci_device_id radeonfb_pci_table[] = { /* Radeon Xpress 200m */ CHIP_DEF(PCI_CHIP_RS480_5955, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), CHIP_DEF(PCI_CHIP_RS482_5975, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), @@ -2241,7 +2241,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj, return radeon_show_one_edid(buf, off, count, rinfo->mon2_EDID); } -static struct bin_attribute edid1_attr = { +static const struct bin_attribute edid1_attr = { .attr = { .name = "edid1", .mode = 0444, @@ -2250,7 +2250,7 @@ static struct bin_attribute edid1_attr = { .read = radeon_show_edid1, }; -static struct bin_attribute edid2_attr = { +static const struct bin_attribute edid2_attr = { .attr = { .name = "edid2", .mode = 0444, diff --git a/drivers/video/fbdev/bfin-lq035q1-fb.c b/drivers/video/fbdev/bfin-lq035q1-fb.c index b594a58ff21d..b459354ad940 100644 --- a/drivers/video/fbdev/bfin-lq035q1-fb.c +++ b/drivers/video/fbdev/bfin-lq035q1-fb.c @@ -841,7 +841,7 @@ static int bfin_lq035q1_resume(struct device *dev) return 0; } -static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { +static const struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { .suspend = bfin_lq035q1_suspend, .resume = bfin_lq035q1_resume, }; diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 8c5b281f0b29..7aa972072357 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -333,8 +333,8 @@ static int bw2_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: bwtwo at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: bwtwo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index 43e915eaf606..8de88b129b62 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -553,8 +553,8 @@ static int cg14_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n", - dp->full_name, + printk(KERN_INFO "%pOF: cgfourteen at %lx:%lx, %dMB\n", + dp, par->iospace, info->fix.smem_start, par->ramsize >> 20); diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 716391f22e75..6c334260cf53 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -412,8 +412,8 @@ static int cg3_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cg3 at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: cg3 at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c index bdf901ed5291..0296c21acc78 100644 --- a/drivers/video/fbdev/cg6.c +++ b/drivers/video/fbdev/cg6.c @@ -810,8 +810,8 @@ static int cg6_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n", - dp->full_name, info->fix.id, + printk(KERN_INFO "%pOF: CGsix [%s] at %lx:%lx\n", + dp, info->fix.id, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 59abdc6a97f6..f103665cad43 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -292,7 +292,7 @@ static void chips_hw_init(void) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } -static struct fb_fix_screeninfo chipsfb_fix = { +static const struct fb_fix_screeninfo chipsfb_fix = { .id = "C&T 65550", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -309,7 +309,7 @@ static struct fb_fix_screeninfo chipsfb_fix = { .smem_len = 0x100000, /* 1MB */ }; -static struct fb_var_screeninfo chipsfb_var = { +static const struct fb_var_screeninfo chipsfb_var = { .xres = 800, .yres = 600, .xres_virtual = 800, diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 9da90bd242f4..0ef633e278a1 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -126,7 +126,7 @@ static void lcd_clear(struct fb_info *info) lcd_write_control(info, LCD_RESET); } -static struct fb_fix_screeninfo cobalt_lcdfb_fix = { +static const struct fb_fix_screeninfo cobalt_lcdfb_fix = { .id = "cobalt-lcd", .type = FB_TYPE_TEXT, .type_aux = FB_AUX_TEXT_MDA, diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile index 9e3ddf225393..73493bbd7a15 100644 --- a/drivers/video/fbdev/core/Makefile +++ b/drivers/video/fbdev/core/Makefile @@ -4,6 +4,20 @@ obj-$(CONFIG_FB) += fb.o fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ modedb.o fbcvt.o fb-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o + +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y) +fb-y += fbcon.o bitblit.o softcursor.o +ifeq ($(CONFIG_FB_TILEBLITTING),y) +fb-y += tileblit.o +endif +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) +fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ + fbcon_ccw.o +endif +ifeq ($(CONFIG_DMI),y) +fb-y += fbcon_dmi_quirks.o +endif +endif fb-objs := $(fb-y) obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o diff --git a/drivers/video/console/bitblit.c b/drivers/video/fbdev/core/bitblit.c similarity index 98% rename from drivers/video/console/bitblit.c rename to drivers/video/fbdev/core/bitblit.c index dbfe4eecf12e..790900d646c0 100644 --- a/drivers/video/console/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -203,7 +203,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, } static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -213,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = info->var.yres - bh; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -416,7 +416,3 @@ void fbcon_set_bitops(struct fbcon_ops *ops) EXPORT_SYMBOL(fbcon_set_bitops); -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Bit Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 37f69c061210..487d5e336e1b 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -69,7 +69,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy { struct fb_info *info = file->private_data; struct inode *inode = file_inode(file); - int err = filemap_write_and_wait_range(inode->i_mapping, start, end); + int err = file_write_and_wait_range(file, start, end); if (err) return err; diff --git a/drivers/video/console/fbcon.c b/drivers/video/fbdev/core/fbcon.c similarity index 99% rename from drivers/video/console/fbcon.c rename to drivers/video/fbdev/core/fbcon.c index 12ded23f1aaf..04612f938bab 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include @@ -135,8 +136,9 @@ static char fontname[40]; static int info_idx = -1; /* console rotation */ -static int initial_rotation; +static int initial_rotation = -1; static int fbcon_has_sysfs; +static int margin_color; static const struct consw fb_con; @@ -491,6 +493,13 @@ static int __init fb_console_setup(char *this_opt) initial_rotation = 0; continue; } + + if (!strncmp(options, "margin:", 7)) { + options += 7; + if (*options) + margin_color = simple_strtoul(options, &options, 0); + continue; + } } return 1; } @@ -563,7 +572,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, unsigned short *save = NULL, *r, *q; int logo_height; - if (info->flags & FBINFO_MODULE) { + if (info->fbops->owner) { logo_shown = FBCON_LOGO_DONTSHOW; return; } @@ -954,7 +963,10 @@ static const char *fbcon_startup(void) ops->cur_rotate = -1; ops->cur_blink_jiffies = HZ / 5; info->fbcon_par = ops; - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); if (info->fix.type != FB_TYPE_TEXT) { @@ -1091,7 +1103,10 @@ static void fbcon_init(struct vc_data *vc, int init) ops = info->fbcon_par; ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); cols = vc->vc_cols; @@ -1299,7 +1314,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) - ops->clear_margins(vc, info, bottom_only); + ops->clear_margins(vc, info, margin_color, bottom_only); } static void fbcon_cursor(struct vc_data *vc, int mode) @@ -3606,7 +3621,7 @@ static void fbcon_exit(void) fbcon_has_exited = 1; } -static int __init fb_console_init(void) +void __init fb_console_init(void) { int i; @@ -3628,11 +3643,8 @@ static int __init fb_console_init(void) console_unlock(); fbcon_start(); - return 0; } -fs_initcall(fb_console_init); - #ifdef MODULE static void __exit fbcon_deinit_device(void) @@ -3647,7 +3659,7 @@ static void __exit fbcon_deinit_device(void) } } -static void __exit fb_console_exit(void) +void __exit fb_console_exit(void) { console_lock(); fb_unregister_client(&fbcon_event_notifier); @@ -3657,9 +3669,4 @@ static void __exit fb_console_exit(void) do_unregister_con_driver(&fb_con); console_unlock(); } - -module_exit(fb_console_exit); - #endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon.h b/drivers/video/fbdev/core/fbcon.h similarity index 97% rename from drivers/video/console/fbcon.h rename to drivers/video/fbdev/core/fbcon.h index 7aaa4eabbba0..18f3ac144237 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -60,7 +60,7 @@ struct fbcon_ops { const unsigned short *s, int count, int yy, int xx, int fg, int bg); void (*clear_margins)(struct vc_data *vc, struct fb_info *info, - int bottom_only); + int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg); int (*update_start)(struct fb_info *info); @@ -261,5 +261,10 @@ extern void fbcon_set_rotate(struct fbcon_ops *ops); #define fbcon_set_rotate(x) do {} while(0) #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ -#endif /* _VIDEO_FBCON_H */ +#ifdef CONFIG_DMI +int fbcon_platform_get_rotate(struct fb_info *info); +#else +#define fbcon_platform_get_rotate(i) FB_ROTATE_UR +#endif /* CONFIG_DMI */ +#endif /* _VIDEO_FBCON_H */ diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c similarity index 98% rename from drivers/video/console/fbcon_ccw.c rename to drivers/video/fbdev/core/fbcon_ccw.c index 5a3cbf6dff4d..37a8b0b22566 100644 --- a/drivers/video/console/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -189,7 +189,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, } static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -198,7 +198,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = vc->vc_rows*ch; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -418,7 +418,3 @@ void fbcon_rotate_ccw(struct fbcon_ops *ops) ops->update_start = ccw_update_start; } EXPORT_SYMBOL(fbcon_rotate_ccw); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (270 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c similarity index 98% rename from drivers/video/console/fbcon_cw.c rename to drivers/video/fbdev/core/fbcon_cw.c index e7ee44db4e98..1888f8c866e8 100644 --- a/drivers/video/console/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -172,7 +172,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -181,7 +181,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int rs = info->var.yres - rw; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -401,7 +401,3 @@ void fbcon_rotate_cw(struct fbcon_ops *ops) ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbcon_dmi_quirks.c b/drivers/video/fbdev/core/fbcon_dmi_quirks.c new file mode 100644 index 000000000000..6904e47d1e51 --- /dev/null +++ b/drivers/video/fbdev/core/fbcon_dmi_quirks.c @@ -0,0 +1,145 @@ +/* + * fbcon_dmi_quirks.c -- DMI based quirk detection for fbcon + * + * Copyright (C) 2017 Hans de Goede + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include "fbcon.h" + +/* + * Some x86 clamshell design devices use portrait tablet screens and a display + * engine which cannot rotate in hardware, so we need to rotate the fbcon to + * compensate. Unfortunately these (cheap) devices also typically have quite + * generic DMI data, so we match on a combination of DMI data, screen resolution + * and a list of known BIOS dates to avoid false positives. + */ + +struct fbcon_dmi_rotate_data { + int width; + int height; + const char * const *bios_dates; + int rotate; +}; + +static const struct fbcon_dmi_rotate_data rotate_data_asus_t100ha = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CCW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_pocket = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/26/2017", "06/28/2017", + "07/05/2017", "08/07/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_win = { + .width = 720, + .height = 1280, + .bios_dates = (const char * const []){ + "10/25/2016", "11/18/2016", "12/23/2016", "12/26/2016", + "02/21/2017", "03/20/2017", "05/25/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_itworks_tw891 = { + .width = 800, + .height = 1280, + .bios_dates = (const char * const []){ "10/16/2015", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_vios_lth17 = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CW, +}; + +static const struct dmi_system_id rotate_data[] = { + { /* Asus T100HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), + }, + .driver_data = (void *)&rotate_data_asus_t100ha, + }, { /* + * GPD Pocket, note that the the DMI data is less generic then + * it seems, devices with a board-vendor of "AMI Corporation" + * are quite rare, as are devices which have both board- *and* + * product-id set to "Default String" + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_pocket, + }, { /* GPD Win (same note on DMI match as GPD Pocket) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_win, + }, { /* I.T.Works TW891 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TW891"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"), + }, + .driver_data = (void *)&rotate_data_itworks_tw891, + }, { /* VIOS LTH17 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "LTH17"), + }, + .driver_data = (void *)&rotate_data_vios_lth17, + }, + {} +}; + +int fbcon_platform_get_rotate(struct fb_info *info) +{ + const struct dmi_system_id *match; + const struct fbcon_dmi_rotate_data *data; + const char *bios_date; + int i; + + for (match = dmi_first_match(rotate_data); + match; + match = dmi_first_match(match + 1)) { + data = match->driver_data; + + if (data->width != info->var.xres || + data->height != info->var.yres) + continue; + + if (!data->bios_dates) + return data->rotate; + + bios_date = dmi_get_system_info(DMI_BIOS_DATE); + if (!bios_date) + continue; + + for (i = 0; data->bios_dates[i]; i++) { + if (!strcmp(data->bios_dates[i], bios_date)) + return data->rotate; + } + } + + return FB_ROTATE_UR; +} diff --git a/drivers/video/console/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c similarity index 95% rename from drivers/video/console/fbcon_rotate.c rename to drivers/video/fbdev/core/fbcon_rotate.c index db6528f2d3f2..8a51e4d95cc5 100644 --- a/drivers/video/console/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -110,7 +110,3 @@ void fbcon_set_rotate(struct fbcon_ops *ops) } } EXPORT_SYMBOL(fbcon_set_rotate); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h similarity index 100% rename from drivers/video/console/fbcon_rotate.h rename to drivers/video/fbdev/core/fbcon_rotate.h diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c similarity index 98% rename from drivers/video/console/fbcon_ud.c rename to drivers/video/fbdev/core/fbcon_ud.c index 19e3714abfe8..f98eee263597 100644 --- a/drivers/video/console/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -220,7 +220,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, } static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -228,7 +228,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bh = info->var.yres - (vc->vc_rows*ch); struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -446,7 +446,3 @@ void fbcon_rotate_ud(struct fbcon_ops *ops) ops->update_start = ud_update_start; } EXPORT_SYMBOL(fbcon_rotate_ud); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (180 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 25e862c487f6..f741ba8df01b 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -316,7 +317,7 @@ static void fb_set_logo(struct fb_info *info, for (i = 0; i < logo->height; i++) { for (j = 0; j < logo->width; src++) { d = *src ^ xor; - for (k = 7; k >= 0; k--) { + for (k = 7; k >= 0 && j < logo->width; k--) { *dst++ = ((d >> k) & 1) ? fg : 0; j++; } @@ -463,7 +464,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, /* Return if the frame buffer is not mapped or suspended */ if (logo == NULL || info->state != FBINFO_STATE_RUNNING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; image.depth = 8; @@ -601,7 +602,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate) memset(&fb_logo, 0, sizeof(struct logo_data)); if (info->flags & FBINFO_MISC_TILEBLITTING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -1892,6 +1893,9 @@ fbmem_init(void) fb_class = NULL; goto err_class; } + + fb_console_init(); + return 0; err_class: @@ -1906,6 +1910,8 @@ module_init(fbmem_init); static void __exit fbmem_exit(void) { + fb_console_exit(); + remove_proc_entry("fb", NULL); class_destroy(fb_class); unregister_chrdev(FB_MAJOR, "fb"); diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 41d7979d81c5..2b2d67328514 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1479,8 +1479,8 @@ int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb, if (ret) return ret; - pr_debug("%s: got %dx%d display mode from %s\n", - of_node_full_name(np), vm.hactive, vm.vactive, np->name); + pr_debug("%pOF: got %dx%d display mode from %s\n", + np, vm.hactive, vm.vactive, np->name); dump_fb_videomode(fb); return 0; diff --git a/drivers/video/console/softcursor.c b/drivers/video/fbdev/core/softcursor.c similarity index 93% rename from drivers/video/console/softcursor.c rename to drivers/video/fbdev/core/softcursor.c index 46dd8f5d2e9e..fc93f254498e 100644 --- a/drivers/video/console/softcursor.c +++ b/drivers/video/fbdev/core/softcursor.c @@ -76,7 +76,3 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) } EXPORT_SYMBOL(soft_cursor); - -MODULE_AUTHOR("James Simmons "); -MODULE_DESCRIPTION("Generic software cursor"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/tileblit.c b/drivers/video/fbdev/core/tileblit.c similarity index 96% rename from drivers/video/console/tileblit.c rename to drivers/video/fbdev/core/tileblit.c index 15e8e1a89c45..93390312957f 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -74,7 +74,7 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, } static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { return; } @@ -152,8 +152,3 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) } EXPORT_SYMBOL(fbcon_set_tileops); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Tile Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 99acf538a8b8..9a5751cb4e16 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -1336,7 +1336,7 @@ static void cyber2000fb_i2c_unregister(struct cfb_info *cfb) * These parameters give * 640x480, hsync 31.5kHz, vsync 60Hz */ -static struct fb_videomode cyber2000fb_default_mode = { +static const struct fb_videomode cyber2000fb_default_mode = { .refresh = 60, .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index c229b1a0d13b..a74096c53cb5 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1341,7 +1341,7 @@ static int fb_probe(struct platform_device *device) { struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&device->dev); - static struct resource *lcdc_regs; + struct resource *lcdc_regs; struct lcd_ctrl_config *lcd_cfg; struct fb_videomode *lcdc_info; struct fb_info *da8xx_fb_info; diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c index 3526899da61b..7b1492d34e98 100644 --- a/drivers/video/fbdev/dnfb.c +++ b/drivers/video/fbdev/dnfb.c @@ -126,7 +126,7 @@ struct fb_var_screeninfo dnfb_var = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo dnfb_fix = { +static const struct fb_fix_screeninfo dnfb_fix = { .id = "Apollo Mono", .smem_start = (FRAME_BUFFER_START + IO_BASE), .smem_len = FRAME_BUFFER_LEN, diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 1e784adb89b1..3a010641f630 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -149,6 +149,10 @@ ATTRIBUTE_GROUPS(efifb); static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */ +static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */ +static struct resource *bar_resource; +static u64 bar_offset; + static int efifb_probe(struct platform_device *dev) { struct fb_info *info; @@ -203,6 +207,13 @@ static int efifb_probe(struct platform_device *dev) efifb_fix.smem_start |= ext_lfb_base; } + if (bar_resource && + bar_resource->start + bar_offset != efifb_fix.smem_start) { + dev_info(&efifb_pci_dev->dev, + "BAR has moved, updating efifb address\n"); + efifb_fix.smem_start = bar_resource->start + bar_offset; + } + efifb_defined.bits_per_pixel = screen_info.lfb_depth; efifb_defined.xres = screen_info.lfb_width; efifb_defined.yres = screen_info.lfb_height; @@ -370,15 +381,13 @@ static struct platform_driver efifb_driver = { builtin_platform_driver(efifb_driver); -#if defined(CONFIG_PCI) && !defined(CONFIG_X86) +#if defined(CONFIG_PCI) -static bool pci_bar_found; /* did we find a BAR matching the efifb base? */ - -static void claim_efifb_bar(struct pci_dev *dev, int idx) +static void record_efifb_bar_resource(struct pci_dev *dev, int idx, u64 offset) { u16 word; - pci_bar_found = true; + efifb_pci_dev = dev; pci_read_config_word(dev, PCI_COMMAND, &word); if (!(word & PCI_COMMAND_MEMORY)) { @@ -389,12 +398,8 @@ static void claim_efifb_bar(struct pci_dev *dev, int idx) return; } - if (pci_claim_resource(dev, idx)) { - pci_dev_disabled = true; - dev_err(&dev->dev, - "BAR %d: failed to claim resource for efifb!\n", idx); - return; - } + bar_resource = &dev->resource[idx]; + bar_offset = offset; dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx); } @@ -405,7 +410,7 @@ static void efifb_fixup_resources(struct pci_dev *dev) u64 size = screen_info.lfb_size; int i; - if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + if (efifb_pci_dev || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return; if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) @@ -421,7 +426,7 @@ static void efifb_fixup_resources(struct pci_dev *dev) continue; if (res->start <= base && res->end >= base + size - 1) { - claim_efifb_bar(dev, i); + record_efifb_bar_resource(dev, i, base - res->start); break; } } diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c index 88fa2e70a0bb..d9e816d53531 100644 --- a/drivers/video/fbdev/fb-puv3.c +++ b/drivers/video/fbdev/fb-puv3.c @@ -69,7 +69,7 @@ static const struct fb_videomode unifb_modes[] = { 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; -static struct fb_var_screeninfo unifb_default = { +static const struct fb_var_screeninfo unifb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c index dda31e0a45af..6b1915872af1 100644 --- a/drivers/video/fbdev/ffb.c +++ b/drivers/video/fbdev/ffb.c @@ -997,9 +997,9 @@ static int ffb_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: %s at %016lx, type %d, " + printk(KERN_INFO "%pOF: %s at %016lx, type %d, " "DAC pnum[%x] rev[%d] manuf_rev[%d]\n", - dp->full_name, + dp, ((par->flags & FFB_FLAG_AFB) ? "AFB" : "FFB"), par->physbase, par->board_type, dac_pnum, dac_rev, dac_mrev); diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c index e69d47af9932..ac7a4ebfd390 100644 --- a/drivers/video/fbdev/fm2fb.c +++ b/drivers/video/fbdev/fm2fb.c @@ -213,7 +213,7 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id); -static struct zorro_device_id fm2fb_devices[] = { +static const struct zorro_device_id fm2fb_devices[] = { { ZORRO_PROD_BSC_FRAMEMASTER_II }, { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index ec9fc9ac23de..f4f76373b2a8 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -474,7 +474,7 @@ static void gxfb_remove(struct pci_dev *pdev) framebuffer_release(info); } -static struct pci_device_id gxfb_id_table[] = { +static const struct pci_device_id gxfb_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO) }, { 0, } }; diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c index b471f92969b1..8fc8f46dadeb 100644 --- a/drivers/video/fbdev/grvga.c +++ b/drivers/video/fbdev/grvga.c @@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = { } }; -static struct fb_fix_screeninfo grvga_fix = { +static const struct fb_fix_screeninfo grvga_fix = { .id = "AG SVGACTRL", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index 2488baab7c89..d18f7b31932c 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -107,7 +107,7 @@ static const char * const i810_pci_list[] = { "Intel(R) 815 (Internal Graphics with AGP) Framebuffer Device" }; -static struct pci_device_id i810fb_pci_tbl[] = { +static const struct pci_device_id i810fb_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, @@ -1542,7 +1542,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor) return 0; } -static struct fb_ops i810fb_ops = { +static const struct fb_ops i810fb_ops = { .owner = THIS_MODULE, .fb_open = i810fb_open, .fb_release = i810fb_release, diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 4363c64d74e8..ecdcf358ad5e 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1318,7 +1318,7 @@ imsttfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) } } -static struct pci_device_id imsttfb_pci_tbl[] = { +static const struct pci_device_id imsttfb_pci_tbl[] = { { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IBM }, { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT3D, diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index ffc391208b27..d7463a2a5d83 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -173,7 +173,7 @@ static int intelfb_set_fbinfo(struct intelfb_info *dinfo); #define INTELFB_CLASS_MASK 0 #endif -static struct pci_device_id intelfb_pci_table[] = { +static const struct pci_device_id intelfb_pci_table[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index f77478fb3d14..a7bd9f25911b 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -633,7 +633,7 @@ static int kyrofb_ioctl(struct fb_info *info, return 0; } -static struct pci_device_id kyrofb_pci_tbl[] = { +static const struct pci_device_id kyrofb_pci_tbl[] = { { PCI_VENDOR_ID_ST, PCI_DEVICE_ID_STG4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c index 62e59dc90ee6..71862188f528 100644 --- a/drivers/video/fbdev/leo.c +++ b/drivers/video/fbdev/leo.c @@ -619,8 +619,8 @@ static int leo_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: leo at %lx:%lx\n", - dp->full_name, + printk(KERN_INFO "%pOF: leo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index f6a0b9af97a9..b9b284d79631 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1198,7 +1198,7 @@ static int matroxfb_blank(int blank, struct fb_info *info) return 0; } -static struct fb_ops matroxfb_ops = { +static const struct fb_ops matroxfb_ops = { .owner = THIS_MODULE, .fb_open = matroxfb_open, .fb_release = matroxfb_release, @@ -1573,14 +1573,14 @@ static struct board { NULL}}; #ifndef MODULE -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { /* 640x480 @ 60Hz, 31.5 kHz */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }; -#endif /* !MODULE */ static int hotplug = 0; +#endif /* !MODULE */ static void setDefaultOutputs(struct matrox_fb_info *minfo) { @@ -1623,7 +1623,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) unsigned int memsize; int err; - static struct pci_device_id intel_82437[] = { + static const struct pci_device_id intel_82437[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437) }, { }, }; @@ -1794,9 +1794,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) minfo->fbops = matroxfb_ops; minfo->fbcon.fbops = &minfo->fbops; minfo->fbcon.pseudo_palette = minfo->cmap; - /* after __init time we are like module... no logo */ - minfo->fbcon.flags = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT; - minfo->fbcon.flags |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ + minfo->fbcon.flags = FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ FBINFO_HWACCEL_COPYAREA | /* We have hw-assisted bmove */ FBINFO_HWACCEL_FILLRECT | /* And fillrect */ FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */ @@ -2116,7 +2114,7 @@ static void pci_remove_matrox(struct pci_dev* pdev) { matroxfb_remove(minfo, 1); } -static struct pci_device_id matroxfb_devices[] = { +static const struct pci_device_id matroxfb_devices[] = { #ifdef CONFIG_FB_MATROX_MILLENIUM {PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_MIL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c index cab7333208ea..5bb1b5c308a7 100644 --- a/drivers/video/fbdev/maxinefb.c +++ b/drivers/video/fbdev/maxinefb.c @@ -39,7 +39,7 @@ static struct fb_info fb_info; -static struct fb_var_screeninfo maxinefb_defined = { +static const struct fb_var_screeninfo maxinefb_defined = { .xres = 1024, .yres = 768, .xres_virtual = 1024, diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index f9ec5c0484fa..cd372527c9e4 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -982,7 +982,7 @@ static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par) #define CHIP_ID(id) \ { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) } -static struct pci_device_id mb862xx_pci_tbl[] = { +static const struct pci_device_id mb862xx_pci_tbl[] = { /* MB86295/MB86296 */ CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP), CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA), diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 698df9543e30..539b85da0897 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -79,7 +79,7 @@ struct mbxfb_info { }; -static struct fb_var_screeninfo mbxfb_default = { +static const struct fb_var_screeninfo mbxfb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -102,7 +102,7 @@ static struct fb_var_screeninfo mbxfb_default = { .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; -static struct fb_fix_screeninfo mbxfb_fix = { +static const struct fb_fix_screeninfo mbxfb_fix = { .id = "MBX", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index db023a97d1ea..5d3a444083f7 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -2138,7 +2138,7 @@ static void neofb_remove(struct pci_dev *dev) } } -static struct pci_device_id neofb_devices[] = { +static const struct pci_device_id neofb_devices[] = { {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070}, diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index ce7dab7299fe..418a2d0d06a9 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -55,7 +55,7 @@ /* HW cursor parameters */ #define MAX_CURS 32 -static struct pci_device_id nvidiafb_pci_tbl[] = { +static const struct pci_device_id nvidiafb_pci_tbl[] = { {PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0}, { 0, } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 9be884b0c778..90d38de34479 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -383,7 +383,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } -static void __init offb_init_fb(const char *name, const char *full_name, +static void __init offb_init_fb(const char *name, int width, int height, int depth, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) @@ -402,14 +402,13 @@ static void __init offb_init_fb(const char *name, const char *full_name, "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", width, height, name, address, depth, pitch); if (depth != 8 && depth != 15 && depth != 16 && depth != 32) { - printk(KERN_ERR "%s: can't use depth = %d\n", full_name, - depth); + printk(KERN_ERR "%pOF: can't use depth = %d\n", dp, depth); release_mem_region(res_start, res_size); return; } info = framebuffer_alloc(sizeof(u32) * 16, NULL); - + if (info == 0) { release_mem_region(res_start, res_size); return; @@ -515,7 +514,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, if (register_framebuffer(info) < 0) goto out_err; - fb_info(info, "Open Firmware frame buffer device on %s\n", full_name); + fb_info(info, "Open Firmware frame buffer device on %pOF\n", dp); return; out_err: @@ -644,7 +643,6 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : dp->name, - no_real_node ? "display" : dp->full_name, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c index 9d2da146813e..796f4634c4c6 100644 --- a/drivers/video/fbdev/omap/lcd_h3.c +++ b/drivers/video/fbdev/omap/lcd_h3.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include "omapfb.h" diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index df9e6ebcfad5..e3a85432f926 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -496,7 +496,7 @@ static void mipid_cleanup(struct lcd_panel *panel) mipid_esd_stop_check(md); } -static struct lcd_panel mipid_panel = { +static const struct lcd_panel mipid_panel = { .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index f14691ce8d02..6cd759c01037 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -18,7 +18,7 @@ #include